Merge tag 'pstore-v4.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull pstore updates from Kees Cook:
 "Various fixes and tweaks for the pstore subsystem.

  Highlights:

   - use memdup_user() instead of open-coded copies (Geliang Tang)

   - fix record memory leak during initialization (Douglas Anderson)

   - avoid confused compressed record warning (Ankit Kumar)

   - prepopulate record timestamp and remove redundant logic from
     backends"

* tag 'pstore-v4.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  powerpc/nvram: use memdup_user
  pstore: use memdup_user
  pstore: Fix format string to use %u for record id
  pstore: Populate pstore record->time field
  pstore: Create common record initializer
  efi-pstore: Refactor erase routine
  pstore: Avoid potential infinite loop
  pstore: Fix leaked pstore_record in pstore_get_backend_records()
  pstore: Don't warn if data is uncompressed and type is not PSTORE_TYPE_DMESG
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index ed3e5e9..f35473f 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -24,8 +24,6 @@
 	- How to do DMA with ISA (and LPC) devices.
 DMA-attributes.txt
 	- listing of the various possible attributes a DMA region can have
-DocBook/
-	- directory with DocBook templates etc. for kernel documentation.
 EDID/
 	- directory with info on customizing EDID for broken gfx/displays.
 IPMI.txt
@@ -40,8 +38,6 @@
 	- basic info on the Intel IOMMU virtualization support.
 Makefile
 	- It's not of interest for those who aren't touching the build system.
-Makefile.sphinx
-	- It's not of interest for those who aren't touching the build system.
 PCI/
 	- info related to PCI drivers.
 RCU/
@@ -264,6 +260,8 @@
 	- full colour GIF image of Linux logo (penguin - Tux).
 logo.txt
 	- info on creator of above logo & site to get additional images from.
+lsm.txt
+	- Linux Security Modules: General Security Hooks for Linux
 lzo.txt
 	- kernel LZO decompressor input formats
 m68k/
diff --git a/Documentation/ABI/stable/sysfs-class-udc b/Documentation/ABI/stable/sysfs-class-udc
index 85d3dac..d1e2f3e 100644
--- a/Documentation/ABI/stable/sysfs-class-udc
+++ b/Documentation/ABI/stable/sysfs-class-udc
@@ -55,14 +55,6 @@
 		Indicates the maximum USB speed supported by this port.
 Users:
 
-What:		/sys/class/udc/<udc>/maximum_speed
-Date:		June 2011
-KernelVersion:	3.1
-Contact:	Felipe Balbi <balbi@kernel.org>
-Description:
-		Indicates the maximum USB speed supported by this port.
-Users:
-
 What:		/sys/class/udc/<udc>/soft_connect
 Date:		June 2011
 KernelVersion:	3.1
@@ -91,3 +83,11 @@
 		'configured', and 'suspended'; however not all USB Device
 		Controllers support reporting all states.
 Users:
+
+What:		/sys/class/udc/<udc>/function
+Date:		June 2017
+KernelVersion:	4.13
+Contact:	Felipe Balbi <balbi@kernel.org>
+Description:
+		Prints out name of currently running USB Gadget Driver.
+Users:
diff --git a/Documentation/ABI/stable/sysfs-driver-aspeed-vuart b/Documentation/ABI/stable/sysfs-driver-aspeed-vuart
new file mode 100644
index 0000000..8062953
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-aspeed-vuart
@@ -0,0 +1,15 @@
+What:		/sys/bus/platform/drivers/aspeed-vuart/*/lpc_address
+Date:		April 2017
+Contact:	Jeremy Kerr <jk@ozlabs.org>
+Description:	Configures which IO port the host side of the UART
+		will appear on the host <-> BMC LPC bus.
+Users:		OpenBMC.  Proposed changes should be mailed to
+		openbmc@lists.ozlabs.org
+
+What:		/sys/bus/platform/drivers/aspeed-vuart*/sirq
+Date:		April 2017
+Contact:	Jeremy Kerr <jk@ozlabs.org>
+Description:	Configures which interrupt number the host side of
+		the UART will appear on the host <-> BMC LPC bus.
+Users:		OpenBMC.  Proposed changes should be mailed to
+		openbmc@lists.ozlabs.org
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac1 b/Documentation/ABI/testing/configfs-usb-gadget-uac1
index 8ba9a12..abfe447 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uac1
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac1
@@ -1,12 +1,14 @@
 What:		/config/usb-gadget/gadget/functions/uac1.name
-Date:		Sep 2014
-KernelVersion:	3.18
+Date:		June 2017
+KernelVersion:	4.14
 Description:
 		The attributes:
 
-		audio_buf_size - audio buffer size
-		fn_cap - capture pcm device file name
-		fn_cntl - control device file name
-		fn_play - playback pcm device file name
-		req_buf_size - ISO OUT endpoint request buffer size
-		req_count - ISO OUT endpoint request count
+		c_chmask - capture channel mask
+		c_srate - capture sampling rate
+		c_ssize - capture sample size (bytes)
+		p_chmask - playback channel mask
+		p_srate - playback sampling rate
+		p_ssize - playback sample size (bytes)
+		req_number - the number of pre-allocated request
+			for both capture and playback
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac1_legacy b/Documentation/ABI/testing/configfs-usb-gadget-uac1_legacy
new file mode 100644
index 0000000..b2eaefd
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac1_legacy
@@ -0,0 +1,12 @@
+What:		/config/usb-gadget/gadget/functions/uac1_legacy.name
+Date:		Sep 2014
+KernelVersion:	3.18
+Description:
+		The attributes:
+
+		audio_buf_size - audio buffer size
+		fn_cap - capture pcm device file name
+		fn_cntl - control device file name
+		fn_play - playback pcm device file name
+		req_buf_size - ISO OUT endpoint request buffer size
+		req_count - ISO OUT endpoint request count
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index bb0f9a1..e76432b 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -34,9 +34,10 @@
 			fsuuid:= file system UUID (e.g 8bcbe394-4f13-4144-be8e-5aa9ea2ce2f6)
 			uid:= decimal value
 			euid:= decimal value
-			fowner:=decimal value
+			fowner:= decimal value
 		lsm:  	are LSM specific
 		option:	appraise_type:= [imasig]
+			pcr:= decimal value
 
 		default policy:
 			# PROC_SUPER_MAGIC
@@ -96,3 +97,8 @@
 
 		Smack:
 			measure subj_user=_ func=FILE_CHECK mask=MAY_READ
+
+		Example of measure rules using alternate PCRs:
+
+			measure func=KEXEC_KERNEL_CHECK pcr=4
+			measure func=KEXEC_INITRAMFS_CHECK pcr=5
diff --git a/Documentation/ABI/testing/sysfs-bus-fsi b/Documentation/ABI/testing/sysfs-bus-fsi
new file mode 100644
index 0000000..57c8063
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-fsi
@@ -0,0 +1,38 @@
+What:           /sys/bus/platform/devices/fsi-master/rescan
+Date:		May 2017
+KernelVersion:  4.12
+Contact:        cbostic@linux.vnet.ibm.com
+Description:
+                Initiates a FSI master scan for all connected slave devices
+		on its links.
+
+What:           /sys/bus/platform/devices/fsi-master/break
+Date:		May 2017
+KernelVersion:  4.12
+Contact:        cbostic@linux.vnet.ibm.com
+Description:
+		Sends an FSI BREAK command on a master's communication
+		link to any connnected slaves.  A BREAK resets connected
+		device's logic and preps it to receive further commands
+		from the master.
+
+What:           /sys/bus/platform/devices/fsi-master/slave@00:00/term
+Date:		May 2017
+KernelVersion:  4.12
+Contact:        cbostic@linux.vnet.ibm.com
+Description:
+		Sends an FSI terminate command from the master to its
+		connected slave. A terminate resets the slave's state machines
+		that control access to the internally connected engines.  In
+		addition the slave freezes its internal error register for
+		debugging purposes.  This command is also needed to abort any
+		ongoing operation in case of an expired 'Master Time Out'
+		timer.
+
+What:           /sys/bus/platform/devices/fsi-master/slave@00:00/raw
+Date:		May 2017
+KernelVersion:  4.12
+Contact:        cbostic@linux.vnet.ibm.com
+Description:
+		Provides a means of reading/writing a 32 bit value from/to a
+		specified FSI bus address.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 8c24d08..2db2cdf 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1425,6 +1425,17 @@
 		guarantees that the hardware fifo is flushed to the device
 		buffer.
 
+What:		/sys/bus/iio/devices/iio:device*/buffer/hwfifo_timeout
+KernelVersion:	4.12
+Contact:	linux-iio@vger.kernel.org
+Description:
+		A read/write property to provide capability to delay reporting of
+		samples till a timeout is reached. This allows host processors to
+		sleep, while the sensor is storing samples in its internal fifo.
+		The maximum timeout in seconds can be specified by setting
+		hwfifo_timeout.The current delay can be read by reading
+		hwfifo_timeout. A value of 0 means that there is no timeout.
+
 What:		/sys/bus/iio/devices/iio:deviceX/buffer/hwfifo_watermark
 KernelVersion: 4.2
 Contact:	linux-iio@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-meas-spec b/Documentation/ABI/testing/sysfs-bus-iio-meas-spec
index 1a6265e..6d47e54 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-meas-spec
+++ b/Documentation/ABI/testing/sysfs-bus-iio-meas-spec
@@ -5,4 +5,3 @@
                 Reading returns either '1' or '0'. '1' means that the
                 battery level supplied to sensor is below 2.25V.
                 This ABI is available for tsys02d, htu21, ms8607
-		This ABI is available for htu21, ms8607
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-timer-stm32 b/Documentation/ABI/testing/sysfs-bus-iio-timer-stm32
index 230020e..161c147 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-timer-stm32
+++ b/Documentation/ABI/testing/sysfs-bus-iio-timer-stm32
@@ -16,6 +16,54 @@
 		- "OC2REF"    : OC2REF signal is used as trigger output.
 		- "OC3REF"    : OC3REF signal is used as trigger output.
 		- "OC4REF"    : OC4REF signal is used as trigger output.
+		Additional modes (on TRGO2 only):
+		- "OC5REF"    : OC5REF signal is used as trigger output.
+		- "OC6REF"    : OC6REF signal is used as trigger output.
+		- "compare_pulse_OC4REF":
+		  OC4REF rising or falling edges generate pulses.
+		- "compare_pulse_OC6REF":
+		  OC6REF rising or falling edges generate pulses.
+		- "compare_pulse_OC4REF_r_or_OC6REF_r":
+		  OC4REF or OC6REF rising edges generate pulses.
+		- "compare_pulse_OC4REF_r_or_OC6REF_f":
+		  OC4REF rising or OC6REF falling edges generate pulses.
+		- "compare_pulse_OC5REF_r_or_OC6REF_r":
+		  OC5REF or OC6REF rising edges generate pulses.
+		- "compare_pulse_OC5REF_r_or_OC6REF_f":
+		  OC5REF rising or OC6REF falling edges generate pulses.
+
+		+-----------+   +-------------+            +---------+
+		| Prescaler +-> | Counter     |        +-> | Master  | TRGO(2)
+		+-----------+   +--+--------+-+        |-> | Control +-->
+		                   |        |          ||  +---------+
+		                +--v--------+-+ OCxREF ||  +---------+
+		                | Chx compare +----------> | Output  | ChX
+		                +-----------+-+         |  | Control +-->
+		                      .     |           |  +---------+
+		                      .     |           |    .
+		                +-----------v-+ OC6REF  |    .
+		                | Ch6 compare +---------+>
+		                +-------------+
+
+		Example with: "compare_pulse_OC4REF_r_or_OC6REF_r":
+
+		                X
+		              X   X
+		            X .   . X
+		          X   .   .   X
+		        X     .   .     X
+		count X .     .   .     . X
+		        .     .   .     .
+		        .     .   .     .
+		        +---------------+
+		OC4REF  |     .   .     |
+		      +-+     .   .     +-+
+		        .     +---+     .
+		OC6REF  .     |   |     .
+		      +-------+   +-------+
+		        +-+   +-+
+		TRGO2   | |   | |
+		      +-+ +---+ +---------+
 
 What:		/sys/bus/iio/devices/triggerX/master_mode
 KernelVersion:	4.11
@@ -90,3 +138,18 @@
 			Counting is enabled on rising edge of the connected
 			trigger, and remains enabled for the duration of this
 			selected mode.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_count_trigger_mode_available
+KernelVersion:	4.13
+Contact:	benjamin.gaignard@st.com
+Description:
+		Reading returns the list possible trigger modes.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_count0_trigger_mode
+KernelVersion:	4.13
+Contact:	benjamin.gaignard@st.com
+Description:
+		Configure the device counter trigger mode
+		counting direction is set by in_count0_count_direction
+		attribute and the counter is clocked by the connected trigger
+		rising edges.
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
new file mode 100644
index 0000000..2a98149
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -0,0 +1,110 @@
+What: /sys/bus/thunderbolt/devices/.../domainX/security
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	This attribute holds current Thunderbolt security level
+		set by the system BIOS. Possible values are:
+
+		none: All devices are automatically authorized
+		user: Devices are only authorized based on writing
+		      appropriate value to the authorized attribute
+		secure: Require devices that support secure connect at
+			minimum. User needs to authorize each device.
+		dponly: Automatically tunnel Display port (and USB). No
+			PCIe tunnels are created.
+
+What: /sys/bus/thunderbolt/devices/.../authorized
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	This attribute is used to authorize Thunderbolt devices
+		after they have been connected. If the device is not
+		authorized, no devices such as PCIe and Display port are
+		available to the system.
+
+		Contents of this attribute will be 0 when the device is not
+		yet authorized.
+
+		Possible values are supported:
+		1: The device will be authorized and connected
+
+		When key attribute contains 32 byte hex string the possible
+		values are:
+		1: The 32 byte hex string is added to the device NVM and
+		   the device is authorized.
+		2: Send a challenge based on the 32 byte hex string. If the
+		   challenge response from device is valid, the device is
+		   authorized. In case of failure errno will be ENOKEY if
+		   the device did not contain a key at all, and
+		   EKEYREJECTED if the challenge response did not match.
+
+What: /sys/bus/thunderbolt/devices/.../key
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	When a devices supports Thunderbolt secure connect it will
+		have this attribute. Writing 32 byte hex string changes
+		authorization to use the secure connection method instead.
+
+What:		/sys/bus/thunderbolt/devices/.../device
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	This attribute contains id of this device extracted from
+		the device DROM.
+
+What:		/sys/bus/thunderbolt/devices/.../device_name
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	This attribute contains name of this device extracted from
+		the device DROM.
+
+What:		/sys/bus/thunderbolt/devices/.../vendor
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	This attribute contains vendor id of this device extracted
+		from the device DROM.
+
+What:		/sys/bus/thunderbolt/devices/.../vendor_name
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	This attribute contains vendor name of this device extracted
+		from the device DROM.
+
+What:		/sys/bus/thunderbolt/devices/.../unique_id
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	This attribute contains unique_id string of this device.
+		This is either read from hardware registers (UUID on
+		newer hardware) or based on UID from the device DROM.
+		Can be used to uniquely identify particular device.
+
+What:		/sys/bus/thunderbolt/devices/.../nvm_version
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	If the device has upgradeable firmware the version
+		number is available here. Format: %x.%x, major.minor.
+		If the device is in safe mode reading the file returns
+		-ENODATA instead as the NVM version is not available.
+
+What:		/sys/bus/thunderbolt/devices/.../nvm_authenticate
+Date:		Sep 2017
+KernelVersion:	4.13
+Contact:	thunderbolt-software@lists.01.org
+Description:	When new NVM image is written to the non-active NVM
+		area (through non_activeX NVMem device), the
+		authentication procedure is started by writing 1 to
+		this file. If everything goes well, the device is
+		restarted with the new NVM firmware. If the image
+		verification fails an error code is returned instead.
+
+		When read holds status of the last authentication
+		operation if an error occurred during the process. This
+		is directly the status value from the DMA configuration
+		based mailbox before the device is power cycled. Writing
+		0 here clears the status.
diff --git a/Documentation/ABI/testing/sysfs-class-mux b/Documentation/ABI/testing/sysfs-class-mux
new file mode 100644
index 0000000..8715f9c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-mux
@@ -0,0 +1,16 @@
+What:		/sys/class/mux/
+Date:		April 2017
+KernelVersion:	4.13
+Contact:	Peter Rosin <peda@axentia.se>
+Description:
+		The mux/ class sub-directory belongs to the Generic MUX
+		Framework and provides a sysfs interface for using MUX
+		controllers.
+
+What:		/sys/class/mux/muxchipN/
+Date:		April 2017
+KernelVersion:	4.13
+Contact:	Peter Rosin <peda@axentia.se>
+Description:
+		A /sys/class/mux/muxchipN directory is created for each
+		probed MUX chip where N is a simple enumeration.
diff --git a/Documentation/ABI/testing/sysfs-class-power-twl4030 b/Documentation/ABI/testing/sysfs-class-power-twl4030
index be26af0..b4fd32d 100644
--- a/Documentation/ABI/testing/sysfs-class-power-twl4030
+++ b/Documentation/ABI/testing/sysfs-class-power-twl4030
@@ -1,20 +1,3 @@
-What: /sys/class/power_supply/twl4030_ac/max_current
-      /sys/class/power_supply/twl4030_usb/max_current
-Description:
-	Read/Write limit on current which may
-	be drawn from the ac (Accessory Charger) or
-	USB port.
-
-	Value is in micro-Amps.
-
-	Value is set automatically to an appropriate
-	value when a cable is plugged or unplugged.
-
-	Value can the set by writing to the attribute.
-	The change will only persist until the next
-	plug event.  These event are reported via udev.
-
-
 What: /sys/class/power_supply/twl4030_usb/mode
 Description:
 	Changing mode for USB port.
diff --git a/Documentation/ABI/testing/sysfs-class-typec b/Documentation/ABI/testing/sysfs-class-typec
index d4a3d23..5be552e 100644
--- a/Documentation/ABI/testing/sysfs-class-typec
+++ b/Documentation/ABI/testing/sysfs-class-typec
@@ -30,6 +30,21 @@
 
 		Valid values: source, sink
 
+What:           /sys/class/typec/<port>/port_type
+Date:           May 2017
+Contact:	Badhri Jagan Sridharan <Badhri@google.com>
+Description:
+		Indicates the type of the port. This attribute can be used for
+		requesting a change in the port type. Port type change is
+		supported as a synchronous operation, so write(2) to the
+		attribute will not return until the operation has finished.
+
+		Valid values:
+		- source (The port will behave as source only DFP port)
+		- sink (The port will behave as sink only UFP port)
+		- dual (The port will behave as dual-role-data and
+			dual-role-power port)
+
 What:		/sys/class/typec/<port>/vconn_source
 Date:		April 2017
 Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
diff --git a/Documentation/ABI/testing/sysfs-uevent b/Documentation/ABI/testing/sysfs-uevent
new file mode 100644
index 0000000..aa39f8d
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-uevent
@@ -0,0 +1,47 @@
+What:           /sys/.../uevent
+Date:           May 2017
+KernelVersion:  4.13
+Contact:        Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:
+                Enable passing additional variables for synthetic uevents that
+                are generated by writing /sys/.../uevent file.
+
+                Recognized extended format is ACTION [UUID [KEY=VALUE ...].
+
+                The ACTION is compulsory - it is the name of the uevent action
+                ("add", "change", "remove"). There is no change compared to
+                previous functionality here. The rest of the extended format
+                is optional.
+
+                You need to pass UUID first before any KEY=VALUE pairs.
+                The UUID must be in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+                format where 'x' is a hex digit. The UUID is considered to be
+                a transaction identifier so it's possible to use the same UUID
+                value for one or more synthetic uevents in which case we
+                logically group these uevents together for any userspace
+                listeners. The UUID value appears in uevent as
+                "SYNTH_UUID=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" environment
+                variable.
+
+                If UUID is not passed in, the generated synthetic uevent gains
+                "SYNTH_UUID=0" environment variable automatically.
+
+                The KEY=VALUE pairs can contain alphanumeric characters only.
+                It's possible to define zero or more pairs - each pair is then
+                delimited by a space character ' '. Each pair appears in
+                synthetic uevent as "SYNTH_ARG_KEY=VALUE". That means the KEY
+                name gains "SYNTH_ARG_" prefix to avoid possible collisions
+                with existing variables.
+
+                Example of valid sequence written to the uevent file:
+
+                    add fe4d7c9d-b8c6-4a70-9ef1-3d8a58d18eed A=1 B=abc
+
+                This generates synthetic uevent including these variables:
+
+                    ACTION=add
+                    SYNTH_ARG_A=1
+                    SYNTH_ARG_B=abc
+                    SYNTH_UUID=fe4d7c9d-b8c6-4a70-9ef1-3d8a58d18eed
+Users:
+                udev, userspace tools generating synthetic uevents
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 6b20128..71200df 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -692,7 +692,7 @@
 boot with 'dma_debug_entries=<your_desired_number>' to overwrite the
 architectural default.
 
-void debug_dmap_mapping_error(struct device *dev, dma_addr_t dma_addr);
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 dma-debug interface debug_dma_mapping_error() to debug drivers that fail
 to check DMA mapping errors on addresses returned by dma_map_single() and
diff --git a/Documentation/DocBook/.gitignore b/Documentation/DocBook/.gitignore
deleted file mode 100644
index e05da3f..0000000
--- a/Documentation/DocBook/.gitignore
+++ /dev/null
@@ -1,17 +0,0 @@
-*.xml
-*.ps
-*.pdf
-*.html
-*.9.gz
-*.9
-*.aux
-*.dvi
-*.log
-*.out
-*.png
-*.gif
-*.svg
-*.proc
-*.db
-media-indices.tmpl
-media-entities.tmpl
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
deleted file mode 100644
index 85916f1..0000000
--- a/Documentation/DocBook/Makefile
+++ /dev/null
@@ -1,282 +0,0 @@
-###
-# This makefile is used to generate the kernel documentation,
-# primarily based on in-line comments in various source files.
-# See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how
-# to document the SRC - and how to read it.
-# To add a new book the only step required is to add the book to the
-# list of DOCBOOKS.
-
-DOCBOOKS := z8530book.xml  \
-	    kernel-hacking.xml kernel-locking.xml \
-	    networking.xml \
-	    filesystems.xml lsm.xml kgdb.xml \
-	    libata.xml mtdnand.xml librs.xml rapidio.xml \
-	    s390-drivers.xml scsi.xml \
-	    sh.xml w1.xml
-
-ifeq ($(DOCBOOKS),)
-
-# Skip DocBook build if the user explicitly requested no DOCBOOKS.
-.DEFAULT:
-	@echo "  SKIP    DocBook $@ target (DOCBOOKS=\"\" specified)."
-else
-ifneq ($(SPHINXDIRS),)
-
-# Skip DocBook build if the user explicitly requested a sphinx dir
-.DEFAULT:
-	@echo "  SKIP    DocBook $@ target (SPHINXDIRS specified)."
-else
-
-
-###
-# The build process is as follows (targets):
-#              (xmldocs) [by docproc]
-# file.tmpl --> file.xml +--> file.ps   (psdocs)   [by db2ps or xmlto]
-#                        +--> file.pdf  (pdfdocs)  [by db2pdf or xmlto]
-#                        +--> DIR=file  (htmldocs) [by xmlto]
-#                        +--> man/      (mandocs)  [by xmlto]
-
-
-# for PDF and PS output you can choose between xmlto and docbook-utils tools
-PDF_METHOD	= $(prefer-db2x)
-PS_METHOD	= $(prefer-db2x)
-
-
-targets += $(DOCBOOKS)
-BOOKS := $(addprefix $(obj)/,$(DOCBOOKS))
-xmldocs: $(BOOKS)
-sgmldocs: xmldocs
-
-PS := $(patsubst %.xml, %.ps, $(BOOKS))
-psdocs: $(PS)
-
-PDF := $(patsubst %.xml, %.pdf, $(BOOKS))
-pdfdocs: $(PDF)
-
-HTML := $(sort $(patsubst %.xml, %.html, $(BOOKS)))
-htmldocs: $(HTML)
-	$(call cmd,build_main_index)
-
-MAN := $(patsubst %.xml, %.9, $(BOOKS))
-mandocs: $(MAN)
-	find $(obj)/man -name '*.9' | xargs gzip -nf
-
-# Default location for installed man pages
-export INSTALL_MAN_PATH = $(objtree)/usr
-
-installmandocs: mandocs
-	mkdir -p $(INSTALL_MAN_PATH)/man/man9/
-	find $(obj)/man -name '*.9.gz' -printf '%h %f\n' | \
-		sort -k 2 -k 1 | uniq -f 1 | sed -e 's: :/:' | \
-		xargs install -m 644 -t $(INSTALL_MAN_PATH)/man/man9/
-
-# no-op for the DocBook toolchain
-epubdocs:
-latexdocs:
-linkcheckdocs:
-
-###
-#External programs used
-KERNELDOCXMLREF = $(srctree)/scripts/kernel-doc-xml-ref
-KERNELDOC       = $(srctree)/scripts/kernel-doc
-DOCPROC         = $(objtree)/scripts/docproc
-CHECK_LC_CTYPE = $(objtree)/scripts/check-lc_ctype
-
-# Use a fixed encoding - UTF-8 if the C library has support built-in
-# or ASCII if not
-LC_CTYPE := $(call try-run, LC_CTYPE=C.UTF-8 $(CHECK_LC_CTYPE),C.UTF-8,C)
-export LC_CTYPE
-
-XMLTOFLAGS = -m $(srctree)/$(src)/stylesheet.xsl
-XMLTOFLAGS += --skip-validation
-
-###
-# DOCPROC is used for two purposes:
-# 1) To generate a dependency list for a .tmpl file
-# 2) To preprocess a .tmpl file and call kernel-doc with
-#     appropriate parameters.
-# The following rules are used to generate the .xml documentation
-# required to generate the final targets. (ps, pdf, html).
-quiet_cmd_docproc = DOCPROC $@
-      cmd_docproc = SRCTREE=$(srctree)/ $(DOCPROC) doc $< >$@
-define rule_docproc
-	set -e;								\
-        $(if $($(quiet)cmd_$(1)),echo '  $($(quiet)cmd_$(1))';) 	\
-        $(cmd_$(1)); 							\
-        ( 								\
-          echo 'cmd_$@ := $(cmd_$(1))'; 				\
-          echo $@: `SRCTREE=$(srctree) $(DOCPROC) depend $<`; 		\
-        ) > $(dir $@).$(notdir $@).cmd
-endef
-
-%.xml: %.tmpl $(KERNELDOC) $(DOCPROC) $(KERNELDOCXMLREF) FORCE
-	$(call if_changed_rule,docproc)
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-notfoundtemplate = echo "*** You have to install docbook-utils or xmlto ***"; \
-		   exit 1
-db2xtemplate = db2TYPE -o $(dir $@) $<
-xmltotemplate = xmlto TYPE $(XMLTOFLAGS) -o $(dir $@) $<
-
-# determine which methods are available
-ifeq ($(shell which db2ps >/dev/null 2>&1 && echo found),found)
-	use-db2x = db2x
-	prefer-db2x = db2x
-else
-	use-db2x = notfound
-	prefer-db2x = $(use-xmlto)
-endif
-ifeq ($(shell which xmlto >/dev/null 2>&1 && echo found),found)
-	use-xmlto = xmlto
-	prefer-xmlto = xmlto
-else
-	use-xmlto = notfound
-	prefer-xmlto = $(use-db2x)
-endif
-
-# the commands, generated from the chosen template
-quiet_cmd_db2ps = PS      $@
-      cmd_db2ps = $(subst TYPE,ps, $($(PS_METHOD)template))
-%.ps : %.xml
-	$(call cmd,db2ps)
-
-quiet_cmd_db2pdf = PDF     $@
-      cmd_db2pdf = $(subst TYPE,pdf, $($(PDF_METHOD)template))
-%.pdf : %.xml
-	$(call cmd,db2pdf)
-
-
-index = index.html
-main_idx = $(obj)/$(index)
-quiet_cmd_build_main_index = HTML    $(main_idx)
-      cmd_build_main_index = rm -rf $(main_idx); \
-		   echo '<h1>Linux Kernel HTML Documentation</h1>' >> $(main_idx) && \
-		   echo '<h2>Kernel Version: $(KERNELVERSION)</h2>' >> $(main_idx) && \
-		   cat $(HTML) >> $(main_idx)
-
-quiet_cmd_db2html = HTML    $@
-      cmd_db2html = xmlto html $(XMLTOFLAGS) -o $(patsubst %.html,%,$@) $< && \
-		echo '<a HREF="$(patsubst %.html,%,$(notdir $@))/index.html"> \
-		$(patsubst %.html,%,$(notdir $@))</a><p>' > $@
-
-###
-# Rules to create an aux XML and .db, and use them to re-process the DocBook XML
-# to fill internal hyperlinks
-       gen_aux_xml = :
- quiet_gen_aux_xml = echo '  XMLREF  $@'
-silent_gen_aux_xml = :
-%.aux.xml: %.xml
-	@$($(quiet)gen_aux_xml)
-	@rm -rf $@
-	@(cat $< | egrep "^<refentry id" | egrep -o "\".*\"" | cut -f 2 -d \" > $<.db)
-	@$(KERNELDOCXMLREF) -db $<.db $< > $@
-.PRECIOUS: %.aux.xml
-
-%.html:	%.aux.xml
-	@(which xmlto > /dev/null 2>&1) || \
-	 (echo "*** You need to install xmlto ***"; \
-	  exit 1)
-	@rm -rf $@ $(patsubst %.html,%,$@)
-	$(call cmd,db2html)
-	@if [ ! -z "$(PNG-$(basename $(notdir $@)))" ]; then \
-            cp $(PNG-$(basename $(notdir $@))) $(patsubst %.html,%,$@); fi
-
-quiet_cmd_db2man = MAN     $@
-      cmd_db2man = if grep -q refentry $<; then xmlto man $(XMLTOFLAGS) -o $(obj)/man/$(*F) $< ; fi
-%.9 : %.xml
-	@(which xmlto > /dev/null 2>&1) || \
-	 (echo "*** You need to install xmlto ***"; \
-	  exit 1)
-	$(Q)mkdir -p $(obj)/man/$(*F)
-	$(call cmd,db2man)
-	@touch $@
-
-###
-# Rules to generate postscripts and PNG images from .fig format files
-quiet_cmd_fig2eps = FIG2EPS $@
-      cmd_fig2eps = fig2dev -Leps $< $@
-
-%.eps: %.fig
-	@(which fig2dev > /dev/null 2>&1) || \
-	 (echo "*** You need to install transfig ***"; \
-	  exit 1)
-	$(call cmd,fig2eps)
-
-quiet_cmd_fig2png = FIG2PNG $@
-      cmd_fig2png = fig2dev -Lpng $< $@
-
-%.png: %.fig
-	@(which fig2dev > /dev/null 2>&1) || \
-	 (echo "*** You need to install transfig ***"; \
-	  exit 1)
-	$(call cmd,fig2png)
-
-###
-# Rule to convert a .c file to inline XML documentation
-       gen_xml = :
- quiet_gen_xml = echo '  GEN     $@'
-silent_gen_xml = :
-%.xml: %.c
-	@$($(quiet)gen_xml)
-	@(                            \
-	   echo "<programlisting>";   \
-	   expand --tabs=8 < $< |     \
-	   sed -e "s/&/\\&amp;/g"     \
-	       -e "s/</\\&lt;/g"      \
-	       -e "s/>/\\&gt;/g";     \
-	   echo "</programlisting>")  > $@
-
-endif # DOCBOOKS=""
-endif # SPHINDIR=...
-
-###
-# Help targets as used by the top-level makefile
-dochelp:
-	@echo  ' Linux kernel internal documentation in different formats (DocBook):'
-	@echo  '  htmldocs        - HTML'
-	@echo  '  pdfdocs         - PDF'
-	@echo  '  psdocs          - Postscript'
-	@echo  '  xmldocs         - XML DocBook'
-	@echo  '  mandocs         - man pages'
-	@echo  '  installmandocs  - install man pages generated by mandocs to INSTALL_MAN_PATH'; \
-	 echo  '                    (default: $(INSTALL_MAN_PATH))'; \
-	 echo  ''
-	@echo  '  cleandocs       - clean all generated DocBook files'
-	@echo
-	@echo  '  make DOCBOOKS="s1.xml s2.xml" [target] Generate only docs s1.xml s2.xml'
-	@echo  '  valid values for DOCBOOKS are: $(DOCBOOKS)'
-	@echo
-	@echo  "  make DOCBOOKS=\"\" [target] Don't generate docs from Docbook"
-	@echo  '     This is useful to generate only the ReST docs (Sphinx)'
-
-
-###
-# Temporary files left by various tools
-clean-files := $(DOCBOOKS) \
-	$(patsubst %.xml, %.dvi,     $(DOCBOOKS)) \
-	$(patsubst %.xml, %.aux,     $(DOCBOOKS)) \
-	$(patsubst %.xml, %.tex,     $(DOCBOOKS)) \
-	$(patsubst %.xml, %.log,     $(DOCBOOKS)) \
-	$(patsubst %.xml, %.out,     $(DOCBOOKS)) \
-	$(patsubst %.xml, %.ps,      $(DOCBOOKS)) \
-	$(patsubst %.xml, %.pdf,     $(DOCBOOKS)) \
-	$(patsubst %.xml, %.html,    $(DOCBOOKS)) \
-	$(patsubst %.xml, %.9,       $(DOCBOOKS)) \
-	$(patsubst %.xml, %.aux.xml, $(DOCBOOKS)) \
-	$(patsubst %.xml, %.xml.db,  $(DOCBOOKS)) \
-	$(patsubst %.xml, %.xml,     $(DOCBOOKS)) \
-	$(patsubst %.xml, .%.xml.cmd, $(DOCBOOKS)) \
-	$(index)
-
-clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
-
-cleandocs:
-	$(Q)rm -f $(call objectify, $(clean-files))
-	$(Q)rm -rf $(call objectify, $(clean-dirs))
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable so we can use it in if_changed and friends.
-
-.PHONY: $(PHONY)
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
deleted file mode 100644
index 6006b63..0000000
--- a/Documentation/DocBook/filesystems.tmpl
+++ /dev/null
@@ -1,381 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="Linux-filesystems-API">
- <bookinfo>
-  <title>Linux Filesystems API</title>
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License as published by the Free Software Foundation; either
-     version 2 of the License, or (at your option) any later
-     version.
-   </para>
-
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-  <chapter id="vfs">
-     <title>The Linux VFS</title>
-     <sect1 id="the_filesystem_types"><title>The Filesystem types</title>
-!Iinclude/linux/fs.h
-     </sect1>
-     <sect1 id="the_directory_cache"><title>The Directory Cache</title>
-!Efs/dcache.c
-!Iinclude/linux/dcache.h
-     </sect1>
-     <sect1 id="inode_handling"><title>Inode Handling</title>
-!Efs/inode.c
-!Efs/bad_inode.c
-     </sect1>
-     <sect1 id="registration_and_superblocks"><title>Registration and Superblocks</title>
-!Efs/super.c
-     </sect1>
-     <sect1 id="file_locks"><title>File Locks</title>
-!Efs/locks.c
-!Ifs/locks.c
-     </sect1>
-     <sect1 id="other_functions"><title>Other Functions</title>
-!Efs/mpage.c
-!Efs/namei.c
-!Efs/buffer.c
-!Eblock/bio.c
-!Efs/seq_file.c
-!Efs/filesystems.c
-!Efs/fs-writeback.c
-!Efs/block_dev.c
-     </sect1>
-  </chapter>
-
-  <chapter id="proc">
-     <title>The proc filesystem</title>
-
-     <sect1 id="sysctl_interface"><title>sysctl interface</title>
-!Ekernel/sysctl.c
-     </sect1>
-
-     <sect1 id="proc_filesystem_interface"><title>proc filesystem interface</title>
-!Ifs/proc/base.c
-     </sect1>
-  </chapter>
-
-  <chapter id="fs_events">
-     <title>Events based on file descriptors</title>
-!Efs/eventfd.c
-  </chapter>
-
-  <chapter id="sysfs">
-     <title>The Filesystem for Exporting Kernel Objects</title>
-!Efs/sysfs/file.c
-!Efs/sysfs/symlink.c
-  </chapter>
-
-  <chapter id="debugfs">
-     <title>The debugfs filesystem</title>
-
-     <sect1 id="debugfs_interface"><title>debugfs interface</title>
-!Efs/debugfs/inode.c
-!Efs/debugfs/file.c
-     </sect1>
-  </chapter>
-
-  <chapter id="LinuxJDBAPI">
-  <chapterinfo>
-  <title>The Linux Journalling API</title>
-
-  <authorgroup>
-  <author>
-     <firstname>Roger</firstname>
-     <surname>Gammans</surname>
-     <affiliation>
-     <address>
-      <email>rgammans@computer-surgery.co.uk</email>
-     </address>
-    </affiliation>
-     </author>
-  </authorgroup>
-
-  <authorgroup>
-   <author>
-    <firstname>Stephen</firstname>
-    <surname>Tweedie</surname>
-    <affiliation>
-     <address>
-      <email>sct@redhat.com</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2002</year>
-   <holder>Roger Gammans</holder>
-  </copyright>
-  </chapterinfo>
-
-  <title>The Linux Journalling API</title>
-
-    <sect1 id="journaling_overview">
-     <title>Overview</title>
-    <sect2 id="journaling_details">
-     <title>Details</title>
-<para>
-The journalling layer is  easy to use. You need to
-first of all create a journal_t data structure. There are
-two calls to do this dependent on how you decide to allocate the physical
-media on which the journal resides. The jbd2_journal_init_inode() call
-is for journals stored in filesystem inodes, or the jbd2_journal_init_dev()
-call can be used for journal stored on a raw device (in a continuous range
-of blocks). A journal_t is a typedef for a struct pointer, so when
-you are finally finished make sure you call jbd2_journal_destroy() on it
-to free up any used kernel memory.
-</para>
-
-<para>
-Once you have got your journal_t object you need to 'mount' or load the journal
-file. The journalling layer expects the space for the journal was already
-allocated and initialized properly by the userspace tools.  When loading the
-journal you must call jbd2_journal_load() to process journal contents.  If the
-client file system detects the journal contents does not need to be processed
-(or even need not have valid contents), it may call jbd2_journal_wipe() to
-clear the journal contents before calling jbd2_journal_load().
-</para>
-
-<para>
-Note that jbd2_journal_wipe(..,0) calls jbd2_journal_skip_recovery() for you if
-it detects any outstanding transactions in the journal and similarly
-jbd2_journal_load() will call jbd2_journal_recover() if necessary.  I would
-advise reading ext4_load_journal() in fs/ext4/super.c for examples on this
-stage.
-</para>
-
-<para>
-Now you can go ahead and start modifying the underlying
-filesystem. Almost.
-</para>
-
-<para>
-
-You still need to actually journal your filesystem changes, this
-is done by wrapping them into transactions. Additionally you
-also need to wrap the modification of each of the buffers
-with calls to the journal layer, so it knows what the modifications
-you are actually making are. To do this use jbd2_journal_start() which
-returns a transaction handle.
-</para>
-
-<para>
-jbd2_journal_start()
-and its counterpart jbd2_journal_stop(), which indicates the end of a
-transaction are nestable calls, so you can reenter a transaction if necessary,
-but remember you must call jbd2_journal_stop() the same number of times as
-jbd2_journal_start() before the transaction is completed (or more accurately
-leaves the update phase). Ext4/VFS makes use of this feature to simplify
-handling of inode dirtying, quota support, etc.
-</para>
-
-<para>
-Inside each transaction you need to wrap the modifications to the
-individual buffers (blocks). Before you start to modify a buffer you
-need to call jbd2_journal_get_{create,write,undo}_access() as appropriate,
-this allows the journalling layer to copy the unmodified data if it
-needs to. After all the buffer may be part of a previously uncommitted
-transaction.
-At this point you are at last ready to modify a buffer, and once
-you are have done so you need to call jbd2_journal_dirty_{meta,}data().
-Or if you've asked for access to a buffer you now know is now longer
-required to be pushed back on the device you can call jbd2_journal_forget()
-in much the same way as you might have used bforget() in the past.
-</para>
-
-<para>
-A jbd2_journal_flush() may be called at any time to commit and checkpoint
-all your transactions.
-</para>
-
-<para>
-Then at umount time , in your put_super() you can then call jbd2_journal_destroy()
-to clean up your in-core journal object.
-</para>
-
-<para>
-Unfortunately there a couple of ways the journal layer can cause a deadlock.
-The first thing to note is that each task can only have
-a single outstanding transaction at any one time, remember nothing
-commits until the outermost jbd2_journal_stop(). This means
-you must complete the transaction at the end of each file/inode/address
-etc. operation you perform, so that the journalling system isn't re-entered
-on another journal. Since transactions can't be nested/batched
-across differing journals, and another filesystem other than
-yours (say ext4) may be modified in a later syscall.
-</para>
-
-<para>
-The second case to bear in mind is that jbd2_journal_start() can
-block if there isn't enough space in the journal for your transaction
-(based on the passed nblocks param) - when it blocks it merely(!) needs to
-wait for transactions to complete and be committed from other tasks,
-so essentially we are waiting for jbd2_journal_stop(). So to avoid
-deadlocks you must treat jbd2_journal_start/stop() as if they
-were semaphores and include them in your semaphore ordering rules to prevent
-deadlocks. Note that jbd2_journal_extend() has similar blocking behaviour to
-jbd2_journal_start() so you can deadlock here just as easily as on
-jbd2_journal_start().
-</para>
-
-<para>
-Try to reserve the right number of blocks the first time. ;-). This will
-be the maximum number of blocks you are going to touch in this transaction.
-I advise having a look at at least ext4_jbd.h to see the basis on which
-ext4 uses to make these decisions.
-</para>
-
-<para>
-Another wriggle to watch out for is your on-disk block allocation strategy.
-Why? Because, if you do a delete, you need to ensure you haven't reused any
-of the freed blocks until the transaction freeing these blocks commits. If you
-reused these blocks and crash happens, there is no way to restore the contents
-of the reallocated blocks at the end of the last fully committed transaction.
-
-One simple way of doing this is to mark blocks as free in internal in-memory
-block allocation structures only after the transaction freeing them commits.
-Ext4 uses journal commit callback for this purpose.
-</para>
-
-<para>
-With journal commit callbacks you can ask the journalling layer to call a
-callback function when the transaction is finally committed to disk, so that
-you can do some of your own management. You ask the journalling layer for
-calling the callback by simply setting journal->j_commit_callback function
-pointer and that function is called after each transaction commit. You can also
-use transaction->t_private_list for attaching entries to a transaction that
-need processing when the transaction commits.
-</para>
-
-<para>
-JBD2 also provides a way to block all transaction updates via
-jbd2_journal_{un,}lock_updates(). Ext4 uses this when it wants a window with a
-clean and stable fs for a moment.  E.g.
-</para>
-
-<programlisting>
-
-	jbd2_journal_lock_updates() //stop new stuff happening..
-	jbd2_journal_flush()        // checkpoint everything.
-	..do stuff on stable fs
-	jbd2_journal_unlock_updates() // carry on with filesystem use.
-</programlisting>
-
-<para>
-The opportunities for abuse and DOS attacks with this should be obvious,
-if you allow unprivileged userspace to trigger codepaths containing these
-calls.
-</para>
-
-    </sect2>
-
-    <sect2 id="jbd_summary">
-     <title>Summary</title>
-<para>
-Using the journal is a matter of wrapping the different context changes,
-being each mount, each modification (transaction) and each changed buffer
-to tell the journalling layer about them.
-</para>
-
-    </sect2>
-
-    </sect1>
-
-    <sect1 id="data_types">
-     <title>Data Types</title>
-     <para>
-	The journalling layer uses typedefs to 'hide' the concrete definitions
-	of the structures used. As a client of the JBD2 layer you can
-	just rely on the using the pointer as a magic cookie  of some sort.
-
-	Obviously the hiding is not enforced as this is 'C'.
-     </para>
-	<sect2 id="structures"><title>Structures</title>
-!Iinclude/linux/jbd2.h
-	</sect2>
-    </sect1>
-
-    <sect1 id="functions">
-     <title>Functions</title>
-     <para>
-	The functions here are split into two groups those that
-	affect a journal as a whole, and those which are used to
-	manage transactions
-     </para>
-	<sect2 id="journal_level"><title>Journal Level</title>
-!Efs/jbd2/journal.c
-!Ifs/jbd2/recovery.c
-	</sect2>
-	<sect2 id="transaction_level"><title>Transasction Level</title>
-!Efs/jbd2/transaction.c
-	</sect2>
-    </sect1>
-    <sect1 id="see_also">
-     <title>See also</title>
-	<para>
-	  <citation>
-	   <ulink url="http://kernel.org/pub/linux/kernel/people/sct/ext3/journal-design.ps.gz">
-	   	Journaling the Linux ext2fs Filesystem, LinuxExpo 98, Stephen Tweedie
-	   </ulink>
-	  </citation>
-	</para>
-	<para>
-	   <citation>
-	   <ulink url="http://olstrans.sourceforge.net/release/OLS2000-ext3/OLS2000-ext3.html">
-	   	Ext3 Journalling FileSystem, OLS 2000, Dr. Stephen Tweedie
-	   </ulink>
-	   </citation>
-	</para>
-    </sect1>
-
-  </chapter>
-
-  <chapter id="splice">
-      <title>splice API</title>
-  <para>
-	splice is a method for moving blocks of data around inside the
-	kernel, without continually transferring them between the kernel
-	and user space.
-  </para>
-!Ffs/splice.c
-  </chapter>
-
-  <chapter id="pipes">
-      <title>pipes API</title>
-  <para>
-	Pipe interfaces are all for in-kernel (builtin image) use.
-	They are not exported for use by modules.
-  </para>
-!Iinclude/linux/pipe_fs_i.h
-!Ffs/pipe.c
-  </chapter>
-
-</book>
diff --git a/Documentation/DocBook/kernel-hacking.tmpl b/Documentation/DocBook/kernel-hacking.tmpl
deleted file mode 100644
index da5c087..0000000
--- a/Documentation/DocBook/kernel-hacking.tmpl
+++ /dev/null
@@ -1,1312 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="lk-hacking-guide">
- <bookinfo>
-  <title>Unreliable Guide To Hacking The Linux Kernel</title>
-  
-  <authorgroup>
-   <author>
-    <firstname>Rusty</firstname>
-    <surname>Russell</surname>
-    <affiliation>
-     <address>
-      <email>rusty@rustcorp.com.au</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2005</year>
-   <holder>Rusty Russell</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-    This documentation is free software; you can redistribute
-    it and/or modify it under the terms of the GNU General Public
-    License as published by the Free Software Foundation; either
-    version 2 of the License, or (at your option) any later
-    version.
-   </para>
-   
-   <para>
-    This program is distributed in the hope that it will be
-    useful, but WITHOUT ANY WARRANTY; without even the implied
-    warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-    See the GNU General Public License for more details.
-   </para>
-   
-   <para>
-    You should have received a copy of the GNU General Public
-    License along with this program; if not, write to the Free
-    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-    MA 02111-1307 USA
-   </para>
-   
-   <para>
-    For more details see the file COPYING in the source
-    distribution of Linux.
-   </para>
-  </legalnotice>
-
-  <releaseinfo>
-   This is the first release of this document as part of the kernel tarball.
-  </releaseinfo>
-
- </bookinfo>
-
- <toc></toc>
-
- <chapter id="introduction">
-  <title>Introduction</title>
-  <para>
-   Welcome, gentle reader, to Rusty's Remarkably Unreliable Guide to Linux
-   Kernel Hacking.  This document describes the common routines and
-   general requirements for kernel code: its goal is to serve as a
-   primer for Linux kernel development for experienced C
-   programmers.  I avoid implementation details: that's what the
-   code is for, and I ignore whole tracts of useful routines.
-  </para>
-  <para>
-   Before you read this, please understand that I never wanted to
-   write this document, being grossly under-qualified, but I always
-   wanted to read it, and this was the only way.  I hope it will
-   grow into a compendium of best practice, common starting points
-   and random information.
-  </para>
- </chapter>
-
- <chapter id="basic-players">
-  <title>The Players</title>
-
-  <para>
-   At any time each of the CPUs in a system can be:
-  </para>
-
-  <itemizedlist>
-   <listitem>
-    <para>
-     not associated with any process, serving a hardware interrupt;
-    </para>
-   </listitem>
-
-   <listitem>
-    <para>
-     not associated with any process, serving a softirq or tasklet;
-    </para>
-   </listitem>
-
-   <listitem>
-    <para>
-     running in kernel space, associated with a process (user context);
-    </para>
-   </listitem>
-
-   <listitem>
-    <para>
-     running a process in user space.
-    </para>
-   </listitem>
-  </itemizedlist>
-
-  <para>
-   There is an ordering between these.  The bottom two can preempt
-   each other, but above that is a strict hierarchy: each can only be
-   preempted by the ones above it.  For example, while a softirq is
-   running on a CPU, no other softirq will preempt it, but a hardware
-   interrupt can.  However, any other CPUs in the system execute
-   independently.
-  </para>
-
-  <para>
-   We'll see a number of ways that the user context can block
-   interrupts, to become truly non-preemptable.
-  </para>
-  
-  <sect1 id="basics-usercontext">
-   <title>User Context</title>
-
-   <para>
-    User context is when you are coming in from a system call or other
-    trap: like userspace, you can be preempted by more important tasks
-    and by interrupts.  You can sleep, by calling
-    <function>schedule()</function>.
-   </para>
-
-   <note>
-    <para>
-     You are always in user context on module load and unload,
-     and on operations on the block device layer.
-    </para>
-   </note>
-
-   <para>
-    In user context, the <varname>current</varname> pointer (indicating 
-    the task we are currently executing) is valid, and
-    <function>in_interrupt()</function>
-    (<filename>include/linux/interrupt.h</filename>) is <returnvalue>false
-    </returnvalue>.  
-   </para>
-
-   <caution>
-    <para>
-     Beware that if you have preemption or softirqs disabled
-     (see below), <function>in_interrupt()</function> will return a 
-     false positive.
-    </para>
-   </caution>
-  </sect1>
-
-  <sect1 id="basics-hardirqs">
-   <title>Hardware Interrupts (Hard IRQs)</title>
-
-   <para>
-    Timer ticks, <hardware>network cards</hardware> and 
-    <hardware>keyboard</hardware> are examples of real
-    hardware which produce interrupts at any time.  The kernel runs
-    interrupt handlers, which services the hardware.  The kernel
-    guarantees that this handler is never re-entered: if the same
-    interrupt arrives, it is queued (or dropped).  Because it
-    disables interrupts, this handler has to be fast: frequently it
-    simply acknowledges the interrupt, marks a 'software interrupt'
-    for execution and exits.
-   </para>
-
-   <para>
-    You can tell you are in a hardware interrupt, because 
-    <function>in_irq()</function> returns <returnvalue>true</returnvalue>.  
-   </para>
-   <caution>
-    <para>
-     Beware that this will return a false positive if interrupts are disabled 
-     (see below).
-    </para>
-   </caution>
-  </sect1>
-
-  <sect1 id="basics-softirqs">
-   <title>Software Interrupt Context: Softirqs and Tasklets</title>
-
-   <para>
-    Whenever a system call is about to return to userspace, or a
-    hardware interrupt handler exits, any 'software interrupts'
-    which are marked pending (usually by hardware interrupts) are
-    run (<filename>kernel/softirq.c</filename>).
-   </para>
-
-   <para>
-    Much of the real interrupt handling work is done here.  Early in
-    the transition to <acronym>SMP</acronym>, there were only 'bottom
-    halves' (BHs), which didn't take advantage of multiple CPUs.  Shortly 
-    after we switched from wind-up computers made of match-sticks and snot,
-    we abandoned this limitation and switched to 'softirqs'.
-   </para>
-
-   <para>
-    <filename class="headerfile">include/linux/interrupt.h</filename> lists the
-    different softirqs.  A very important softirq is the
-    timer softirq (<filename
-    class="headerfile">include/linux/timer.h</filename>): you can
-    register to have it call functions for you in a given length of
-    time.
-   </para>
-
-   <para>
-    Softirqs are often a pain to deal with, since the same softirq
-    will run simultaneously on more than one CPU.  For this reason,
-    tasklets (<filename
-    class="headerfile">include/linux/interrupt.h</filename>) are more
-    often used: they are dynamically-registrable (meaning you can have
-    as many as you want), and they also guarantee that any tasklet
-    will only run on one CPU at any time, although different tasklets
-    can run simultaneously.
-   </para>
-   <caution>
-    <para>
-     The name 'tasklet' is misleading: they have nothing to do with 'tasks',
-     and probably more to do with some bad vodka Alexey Kuznetsov had at the 
-     time.
-    </para>
-   </caution>
-
-   <para>
-    You can tell you are in a softirq (or tasklet)
-    using the <function>in_softirq()</function> macro 
-    (<filename class="headerfile">include/linux/interrupt.h</filename>).
-   </para>
-   <caution>
-    <para>
-     Beware that this will return a false positive if a bh lock (see below)
-     is held.
-    </para>
-   </caution>
-  </sect1>
- </chapter>
-
- <chapter id="basic-rules">
-  <title>Some Basic Rules</title>
-
-  <variablelist>
-   <varlistentry>
-    <term>No memory protection</term>
-    <listitem>
-     <para>
-      If you corrupt memory, whether in user context or
-      interrupt context, the whole machine will crash.  Are you
-      sure you can't do what you want in userspace?
-     </para>
-    </listitem>
-   </varlistentry>
-
-   <varlistentry>
-    <term>No floating point or <acronym>MMX</acronym></term>
-    <listitem>
-     <para>
-      The <acronym>FPU</acronym> context is not saved; even in user
-      context the <acronym>FPU</acronym> state probably won't
-      correspond with the current process: you would mess with some
-      user process' <acronym>FPU</acronym> state.  If you really want
-      to do this, you would have to explicitly save/restore the full
-      <acronym>FPU</acronym> state (and avoid context switches).  It
-      is generally a bad idea; use fixed point arithmetic first.
-     </para>
-    </listitem>
-   </varlistentry>
-
-   <varlistentry>
-    <term>A rigid stack limit</term>
-    <listitem>
-     <para>
-      Depending on configuration options the kernel stack is about 3K to 6K for most 32-bit architectures: it's
-      about 14K on most 64-bit archs, and often shared with interrupts
-      so you can't use it all.  Avoid deep recursion and huge local
-      arrays on the stack (allocate them dynamically instead).
-     </para>
-    </listitem>
-   </varlistentry>
-
-   <varlistentry>
-    <term>The Linux kernel is portable</term>
-    <listitem>
-     <para>
-      Let's keep it that way.  Your code should be 64-bit clean,
-      and endian-independent.  You should also minimize CPU
-      specific stuff, e.g. inline assembly should be cleanly
-      encapsulated and minimized to ease porting.  Generally it
-      should be restricted to the architecture-dependent part of
-      the kernel tree.
-     </para>
-    </listitem>
-   </varlistentry>
-  </variablelist>
- </chapter>
-
- <chapter id="ioctls">
-  <title>ioctls: Not writing a new system call</title>
-
-  <para>
-   A system call generally looks like this
-  </para>
-
-  <programlisting>
-asmlinkage long sys_mycall(int arg)
-{
-        return 0; 
-}
-  </programlisting>
-
-  <para>
-   First, in most cases you don't want to create a new system call.
-   You create a character device and implement an appropriate ioctl
-   for it.  This is much more flexible than system calls, doesn't have
-   to be entered in every architecture's
-   <filename class="headerfile">include/asm/unistd.h</filename> and
-   <filename>arch/kernel/entry.S</filename> file, and is much more
-   likely to be accepted by Linus.
-  </para>
-
-  <para>
-   If all your routine does is read or write some parameter, consider
-   implementing a <function>sysfs</function> interface instead.
-  </para>
-
-  <para>
-   Inside the ioctl you're in user context to a process.  When a
-   error occurs you return a negated errno (see
-   <filename class="headerfile">include/linux/errno.h</filename>),
-   otherwise you return <returnvalue>0</returnvalue>.
-  </para>
-
-  <para>
-   After you slept you should check if a signal occurred: the
-   Unix/Linux way of handling signals is to temporarily exit the
-   system call with the <constant>-ERESTARTSYS</constant> error.  The
-   system call entry code will switch back to user context, process
-   the signal handler and then your system call will be restarted
-   (unless the user disabled that).  So you should be prepared to
-   process the restart, e.g. if you're in the middle of manipulating
-   some data structure.
-  </para>
-
-  <programlisting>
-if (signal_pending(current))
-        return -ERESTARTSYS;
-  </programlisting>
-
-  <para>
-   If you're doing longer computations: first think userspace. If you
-   <emphasis>really</emphasis> want to do it in kernel you should
-   regularly check if you need to give up the CPU (remember there is
-   cooperative multitasking per CPU).  Idiom:
-  </para>
-
-  <programlisting>
-cond_resched(); /* Will sleep */ 
-  </programlisting>
-
-  <para>
-   A short note on interface design: the UNIX system call motto is
-   "Provide mechanism not policy".
-  </para>
- </chapter>
-
- <chapter id="deadlock-recipes">
-  <title>Recipes for Deadlock</title>
-
-  <para>
-   You cannot call any routines which may sleep, unless:
-  </para>
-  <itemizedlist>
-   <listitem>
-    <para>
-     You are in user context.
-    </para>
-   </listitem>
-
-   <listitem>
-    <para>
-     You do not own any spinlocks.
-    </para>
-   </listitem>
-
-   <listitem>
-    <para>
-     You have interrupts enabled (actually, Andi Kleen says
-     that the scheduling code will enable them for you, but
-     that's probably not what you wanted).
-    </para>
-   </listitem>
-  </itemizedlist>
-
-  <para>
-   Note that some functions may sleep implicitly: common ones are
-   the user space access functions (*_user) and memory allocation
-   functions without <symbol>GFP_ATOMIC</symbol>.
-  </para>
-
-  <para>
-   You should always compile your kernel
-   <symbol>CONFIG_DEBUG_ATOMIC_SLEEP</symbol> on, and it will warn
-   you if you break these rules.  If you <emphasis>do</emphasis> break
-   the rules, you will eventually lock up your box.
-  </para>
-
-  <para>
-   Really.
-  </para>
- </chapter>
-
- <chapter id="common-routines">
-  <title>Common Routines</title>
-
-  <sect1 id="routines-printk">
-   <title>
-    <function>printk()</function>
-    <filename class="headerfile">include/linux/kernel.h</filename>
-   </title>
-
-   <para>
-    <function>printk()</function> feeds kernel messages to the
-    console, dmesg, and the syslog daemon.  It is useful for debugging
-    and reporting errors, and can be used inside interrupt context,
-    but use with caution: a machine which has its console flooded with
-    printk messages is unusable.  It uses a format string mostly
-    compatible with ANSI C printf, and C string concatenation to give
-    it a first "priority" argument:
-   </para>
-
-   <programlisting>
-printk(KERN_INFO "i = %u\n", i);
-   </programlisting>
-
-   <para>
-    See <filename class="headerfile">include/linux/kernel.h</filename>;
-    for other KERN_ values; these are interpreted by syslog as the
-    level.  Special case: for printing an IP address use
-   </para>
-
-   <programlisting>
-__be32 ipaddress;
-printk(KERN_INFO "my ip: %pI4\n", &amp;ipaddress);
-   </programlisting>
-
-   <para>
-    <function>printk()</function> internally uses a 1K buffer and does
-    not catch overruns.  Make sure that will be enough.
-   </para>
-
-   <note>
-    <para>
-     You will know when you are a real kernel hacker
-     when you start typoing printf as printk in your user programs :)
-    </para>
-   </note>
-
-   <!--- From the Lions book reader department --> 
-
-   <note>
-    <para>
-     Another sidenote: the original Unix Version 6 sources had a
-     comment on top of its printf function: "Printf should not be
-     used for chit-chat".  You should follow that advice.
-    </para>
-   </note>
-  </sect1>
-
-  <sect1 id="routines-copy">
-   <title>
-    <function>copy_[to/from]_user()</function>
-    /
-    <function>get_user()</function>
-    /
-    <function>put_user()</function>
-    <filename class="headerfile">include/linux/uaccess.h</filename>
-   </title>  
-
-   <para>
-    <emphasis>[SLEEPS]</emphasis>
-   </para>
-
-   <para>
-    <function>put_user()</function> and <function>get_user()</function>
-    are used to get and put single values (such as an int, char, or
-    long) from and to userspace.  A pointer into userspace should
-    never be simply dereferenced: data should be copied using these
-    routines.  Both return <constant>-EFAULT</constant> or 0.
-   </para>
-   <para>
-    <function>copy_to_user()</function> and
-    <function>copy_from_user()</function> are more general: they copy
-    an arbitrary amount of data to and from userspace.
-    <caution>
-     <para>
-      Unlike <function>put_user()</function> and
-      <function>get_user()</function>, they return the amount of
-      uncopied data (ie. <returnvalue>0</returnvalue> still means
-      success).
-     </para>
-    </caution>
-    [Yes, this moronic interface makes me cringe.  The flamewar comes up every year or so. --RR.]
-   </para>
-   <para>
-    The functions may sleep implicitly. This should never be called
-    outside user context (it makes no sense), with interrupts
-    disabled, or a spinlock held.
-   </para>
-  </sect1>
-
-  <sect1 id="routines-kmalloc">
-   <title><function>kmalloc()</function>/<function>kfree()</function>
-    <filename class="headerfile">include/linux/slab.h</filename></title>
-
-   <para>
-    <emphasis>[MAY SLEEP: SEE BELOW]</emphasis>
-   </para>
-
-   <para>
-    These routines are used to dynamically request pointer-aligned
-    chunks of memory, like malloc and free do in userspace, but
-    <function>kmalloc()</function> takes an extra flag word.
-    Important values:
-   </para>
-
-   <variablelist>
-    <varlistentry>
-     <term>
-      <constant>
-       GFP_KERNEL
-      </constant>
-     </term>
-     <listitem>
-      <para>
-       May sleep and swap to free memory. Only allowed in user
-       context, but is the most reliable way to allocate memory.
-      </para>
-     </listitem>
-    </varlistentry>
-    
-    <varlistentry>
-     <term>
-      <constant>
-       GFP_ATOMIC
-      </constant>
-     </term>
-     <listitem>
-      <para>
-       Don't sleep. Less reliable than <constant>GFP_KERNEL</constant>,
-       but may be called from interrupt context. You should
-       <emphasis>really</emphasis> have a good out-of-memory
-       error-handling strategy.
-      </para>
-     </listitem>
-    </varlistentry>
-    
-    <varlistentry>
-     <term>
-      <constant>
-       GFP_DMA
-      </constant>
-     </term>
-     <listitem>
-      <para>
-       Allocate ISA DMA lower than 16MB. If you don't know what that
-       is you don't need it.  Very unreliable.
-      </para>
-     </listitem>
-    </varlistentry>
-   </variablelist>
-
-   <para>
-    If you see a <errorname>sleeping function called from invalid
-    context</errorname> warning message, then maybe you called a
-    sleeping allocation function from interrupt context without
-    <constant>GFP_ATOMIC</constant>.  You should really fix that.
-    Run, don't walk.
-   </para>
-
-   <para>
-    If you are allocating at least <constant>PAGE_SIZE</constant>
-    (<filename class="headerfile">include/asm/page.h</filename>) bytes,
-    consider using <function>__get_free_pages()</function>
-
-    (<filename class="headerfile">include/linux/mm.h</filename>).  It
-    takes an order argument (0 for page sized, 1 for double page, 2
-    for four pages etc.) and the same memory priority flag word as
-    above.
-   </para>
-
-   <para>
-    If you are allocating more than a page worth of bytes you can use
-    <function>vmalloc()</function>.  It'll allocate virtual memory in
-    the kernel map.  This block is not contiguous in physical memory,
-    but the <acronym>MMU</acronym> makes it look like it is for you
-    (so it'll only look contiguous to the CPUs, not to external device
-    drivers).  If you really need large physically contiguous memory
-    for some weird device, you have a problem: it is poorly supported
-    in Linux because after some time memory fragmentation in a running
-    kernel makes it hard.  The best way is to allocate the block early
-    in the boot process via the <function>alloc_bootmem()</function>
-    routine.
-   </para>
-
-   <para>
-    Before inventing your own cache of often-used objects consider
-    using a slab cache in
-    <filename class="headerfile">include/linux/slab.h</filename>
-   </para>
-  </sect1>
-
-  <sect1 id="routines-current">
-   <title><function>current</function>
-    <filename class="headerfile">include/asm/current.h</filename></title>
-
-   <para>
-    This global variable (really a macro) contains a pointer to
-    the current task structure, so is only valid in user context.
-    For example, when a process makes a system call, this will
-    point to the task structure of the calling process.  It is
-    <emphasis>not NULL</emphasis> in interrupt context.
-   </para>
-  </sect1>
-
-  <sect1 id="routines-udelay">
-   <title><function>mdelay()</function>/<function>udelay()</function>
-     <filename class="headerfile">include/asm/delay.h</filename>
-     <filename class="headerfile">include/linux/delay.h</filename>
-   </title>
-
-   <para>
-    The <function>udelay()</function> and <function>ndelay()</function> functions can be used for small pauses.
-    Do not use large values with them as you risk
-    overflow - the helper function <function>mdelay()</function> is useful
-    here, or consider <function>msleep()</function>.
-   </para> 
-  </sect1>
- 
-  <sect1 id="routines-endian">
-   <title><function>cpu_to_be32()</function>/<function>be32_to_cpu()</function>/<function>cpu_to_le32()</function>/<function>le32_to_cpu()</function>
-     <filename class="headerfile">include/asm/byteorder.h</filename>
-   </title>
-
-   <para>
-    The <function>cpu_to_be32()</function> family (where the "32" can
-    be replaced by 64 or 16, and the "be" can be replaced by "le") are
-    the general way to do endian conversions in the kernel: they
-    return the converted value.  All variations supply the reverse as
-    well: <function>be32_to_cpu()</function>, etc.
-   </para>
-
-   <para>
-    There are two major variations of these functions: the pointer
-    variation, such as <function>cpu_to_be32p()</function>, which take
-    a pointer to the given type, and return the converted value.  The
-    other variation is the "in-situ" family, such as
-    <function>cpu_to_be32s()</function>, which convert value referred
-    to by the pointer, and return void.
-   </para> 
-  </sect1>
-
-  <sect1 id="routines-local-irqs">
-   <title><function>local_irq_save()</function>/<function>local_irq_restore()</function>
-    <filename class="headerfile">include/linux/irqflags.h</filename>
-   </title>
-
-   <para>
-    These routines disable hard interrupts on the local CPU, and
-    restore them.  They are reentrant; saving the previous state in
-    their one <varname>unsigned long flags</varname> argument.  If you
-    know that interrupts are enabled, you can simply use
-    <function>local_irq_disable()</function> and
-    <function>local_irq_enable()</function>.
-   </para>
-  </sect1>
-
-  <sect1 id="routines-softirqs">
-   <title><function>local_bh_disable()</function>/<function>local_bh_enable()</function>
-    <filename class="headerfile">include/linux/interrupt.h</filename></title>
-
-   <para>
-    These routines disable soft interrupts on the local CPU, and
-    restore them.  They are reentrant; if soft interrupts were
-    disabled before, they will still be disabled after this pair
-    of functions has been called.  They prevent softirqs and tasklets
-    from running on the current CPU.
-   </para>
-  </sect1>
-
-  <sect1 id="routines-processorids">
-   <title><function>smp_processor_id</function>()
-    <filename class="headerfile">include/asm/smp.h</filename></title>
-   
-   <para>
-    <function>get_cpu()</function> disables preemption (so you won't
-    suddenly get moved to another CPU) and returns the current
-    processor number, between 0 and <symbol>NR_CPUS</symbol>.  Note
-    that the CPU numbers are not necessarily continuous.  You return
-    it again with <function>put_cpu()</function> when you are done.
-   </para>
-   <para>
-    If you know you cannot be preempted by another task (ie. you are
-    in interrupt context, or have preemption disabled) you can use
-    smp_processor_id().
-   </para>
-  </sect1>
-
-  <sect1 id="routines-init">
-   <title><type>__init</type>/<type>__exit</type>/<type>__initdata</type>
-    <filename class="headerfile">include/linux/init.h</filename></title>
-
-   <para>
-    After boot, the kernel frees up a special section; functions
-    marked with <type>__init</type> and data structures marked with
-    <type>__initdata</type> are dropped after boot is complete: similarly
-    modules discard this memory after initialization.  <type>__exit</type>
-    is used to declare a function which is only required on exit: the
-    function will be dropped if this file is not compiled as a module.
-    See the header file for use. Note that it makes no sense for a function
-    marked with <type>__init</type> to be exported to modules with 
-    <function>EXPORT_SYMBOL()</function> - this will break.
-   </para>
-
-  </sect1>
-
-  <sect1 id="routines-init-again">
-   <title><function>__initcall()</function>/<function>module_init()</function>
-    <filename class="headerfile">include/linux/init.h</filename></title>
-   <para>
-    Many parts of the kernel are well served as a module
-    (dynamically-loadable parts of the kernel).  Using the
-    <function>module_init()</function> and
-    <function>module_exit()</function> macros it is easy to write code
-    without #ifdefs which can operate both as a module or built into
-    the kernel.
-   </para>
-
-   <para>
-    The <function>module_init()</function> macro defines which
-    function is to be called at module insertion time (if the file is
-    compiled as a module), or at boot time: if the file is not
-    compiled as a module the <function>module_init()</function> macro
-    becomes equivalent to <function>__initcall()</function>, which
-    through linker magic ensures that the function is called on boot.
-   </para>
-
-   <para>
-    The function can return a negative error number to cause
-    module loading to fail (unfortunately, this has no effect if
-    the module is compiled into the kernel).  This function is
-    called in user context with interrupts enabled, so it can sleep.
-   </para>
-  </sect1>
-  
-  <sect1 id="routines-moduleexit">
-   <title> <function>module_exit()</function>
-    <filename class="headerfile">include/linux/init.h</filename> </title>
-
-   <para>
-    This macro defines the function to be called at module removal
-    time (or never, in the case of the file compiled into the
-    kernel).  It will only be called if the module usage count has
-    reached zero.  This function can also sleep, but cannot fail:
-    everything must be cleaned up by the time it returns.
-   </para>
-
-   <para>
-    Note that this macro is optional: if it is not present, your
-    module will not be removable (except for 'rmmod -f').
-   </para>
-  </sect1>
-
-  <sect1 id="routines-module-use-counters">
-   <title> <function>try_module_get()</function>/<function>module_put()</function>
-    <filename class="headerfile">include/linux/module.h</filename></title>
-
-   <para>
-    These manipulate the module usage count, to protect against
-    removal (a module also can't be removed if another module uses one
-    of its exported symbols: see below).  Before calling into module
-    code, you should call <function>try_module_get()</function> on
-    that module: if it fails, then the module is being removed and you
-    should act as if it wasn't there.  Otherwise, you can safely enter
-    the module, and call <function>module_put()</function> when you're
-    finished.
-   </para>
-
-   <para>
-   Most registerable structures have an
-   <structfield>owner</structfield> field, such as in the
-   <structname>file_operations</structname> structure. Set this field
-   to the macro <symbol>THIS_MODULE</symbol>.
-   </para>
-  </sect1>
-
- <!-- add info on new-style module refcounting here -->
- </chapter>
-
- <chapter id="queues">
-  <title>Wait Queues
-   <filename class="headerfile">include/linux/wait.h</filename>
-  </title>
-  <para>
-   <emphasis>[SLEEPS]</emphasis>
-  </para>
-
-  <para>
-   A wait queue is used to wait for someone to wake you up when a
-   certain condition is true.  They must be used carefully to ensure
-   there is no race condition.  You declare a
-   <type>wait_queue_head_t</type>, and then processes which want to
-   wait for that condition declare a <type>wait_queue_t</type>
-   referring to themselves, and place that in the queue.
-  </para>
-
-  <sect1 id="queue-declaring">
-   <title>Declaring</title>
-   
-   <para>
-    You declare a <type>wait_queue_head_t</type> using the
-    <function>DECLARE_WAIT_QUEUE_HEAD()</function> macro, or using the
-    <function>init_waitqueue_head()</function> routine in your
-    initialization code.
-   </para>
-  </sect1>
-  
-  <sect1 id="queue-waitqueue">
-   <title>Queuing</title>
-   
-   <para>
-    Placing yourself in the waitqueue is fairly complex, because you
-    must put yourself in the queue before checking the condition.
-    There is a macro to do this:
-    <function>wait_event_interruptible()</function>
-
-    <filename class="headerfile">include/linux/wait.h</filename> The
-    first argument is the wait queue head, and the second is an
-    expression which is evaluated; the macro returns
-    <returnvalue>0</returnvalue> when this expression is true, or
-    <returnvalue>-ERESTARTSYS</returnvalue> if a signal is received.
-    The <function>wait_event()</function> version ignores signals.
-   </para>
- 
-  </sect1>
-
-  <sect1 id="queue-waking">
-   <title>Waking Up Queued Tasks</title>
-   
-   <para>
-    Call <function>wake_up()</function>
-
-    <filename class="headerfile">include/linux/wait.h</filename>;,
-    which will wake up every process in the queue.  The exception is
-    if one has <constant>TASK_EXCLUSIVE</constant> set, in which case
-    the remainder of the queue will not be woken.  There are other variants
-    of this basic function available in the same header.
-   </para>
-  </sect1>
- </chapter>
-
- <chapter id="atomic-ops">
-  <title>Atomic Operations</title>
-
-  <para>
-   Certain operations are guaranteed atomic on all platforms.  The
-   first class of operations work on <type>atomic_t</type>
-
-   <filename class="headerfile">include/asm/atomic.h</filename>; this
-   contains a signed integer (at least 32 bits long), and you must use
-   these functions to manipulate or read atomic_t variables.
-   <function>atomic_read()</function> and
-   <function>atomic_set()</function> get and set the counter,
-   <function>atomic_add()</function>,
-   <function>atomic_sub()</function>,
-   <function>atomic_inc()</function>,
-   <function>atomic_dec()</function>, and
-   <function>atomic_dec_and_test()</function> (returns
-   <returnvalue>true</returnvalue> if it was decremented to zero).
-  </para>
-
-  <para>
-   Yes.  It returns <returnvalue>true</returnvalue> (i.e. != 0) if the
-   atomic variable is zero.
-  </para>
-
-  <para>
-   Note that these functions are slower than normal arithmetic, and
-   so should not be used unnecessarily.
-  </para>
-
-  <para>
-   The second class of atomic operations is atomic bit operations on an
-   <type>unsigned long</type>, defined in
-
-   <filename class="headerfile">include/linux/bitops.h</filename>.  These
-   operations generally take a pointer to the bit pattern, and a bit
-   number: 0 is the least significant bit.
-   <function>set_bit()</function>, <function>clear_bit()</function>
-   and <function>change_bit()</function> set, clear, and flip the
-   given bit.  <function>test_and_set_bit()</function>,
-   <function>test_and_clear_bit()</function> and
-   <function>test_and_change_bit()</function> do the same thing,
-   except return true if the bit was previously set; these are
-   particularly useful for atomically setting flags.
-  </para>
-  
-  <para>
-   It is possible to call these operations with bit indices greater
-   than BITS_PER_LONG.  The resulting behavior is strange on big-endian
-   platforms though so it is a good idea not to do this.
-  </para>
- </chapter>
-
- <chapter id="symbols">
-  <title>Symbols</title>
-
-  <para>
-   Within the kernel proper, the normal linking rules apply
-   (ie. unless a symbol is declared to be file scope with the
-   <type>static</type> keyword, it can be used anywhere in the
-   kernel).  However, for modules, a special exported symbol table is
-   kept which limits the entry points to the kernel proper.  Modules
-   can also export symbols.
-  </para>
-
-  <sect1 id="sym-exportsymbols">
-   <title><function>EXPORT_SYMBOL()</function>
-    <filename class="headerfile">include/linux/export.h</filename></title>
-
-   <para>
-    This is the classic method of exporting a symbol: dynamically
-    loaded modules will be able to use the symbol as normal.
-   </para>
-  </sect1>
-
-  <sect1 id="sym-exportsymbols-gpl">
-   <title><function>EXPORT_SYMBOL_GPL()</function>
-    <filename class="headerfile">include/linux/export.h</filename></title>
-
-   <para>
-    Similar to <function>EXPORT_SYMBOL()</function> except that the
-    symbols exported by <function>EXPORT_SYMBOL_GPL()</function> can
-    only be seen by modules with a
-    <function>MODULE_LICENSE()</function> that specifies a GPL
-    compatible license.  It implies that the function is considered
-    an internal implementation issue, and not really an interface.
-    Some maintainers and developers may however
-    require EXPORT_SYMBOL_GPL() when adding any new APIs or functionality.
-   </para>
-  </sect1>
- </chapter>
-
- <chapter id="conventions">
-  <title>Routines and Conventions</title>
-
-  <sect1 id="conventions-doublelinkedlist">
-   <title>Double-linked lists
-    <filename class="headerfile">include/linux/list.h</filename></title>
-
-   <para>
-    There used to be three sets of linked-list routines in the kernel
-    headers, but this one is the winner.  If you don't have some
-    particular pressing need for a single list, it's a good choice.
-   </para>
-
-   <para>
-    In particular, <function>list_for_each_entry</function> is useful.
-   </para>
-  </sect1>
-
-  <sect1 id="convention-returns">
-   <title>Return Conventions</title>
-
-   <para>
-    For code called in user context, it's very common to defy C
-    convention, and return <returnvalue>0</returnvalue> for success,
-    and a negative error number
-    (eg. <returnvalue>-EFAULT</returnvalue>) for failure.  This can be
-    unintuitive at first, but it's fairly widespread in the kernel.
-   </para>
-
-   <para>
-    Using <function>ERR_PTR()</function>
-
-    <filename class="headerfile">include/linux/err.h</filename>; to
-    encode a negative error number into a pointer, and
-    <function>IS_ERR()</function> and <function>PTR_ERR()</function>
-    to get it back out again: avoids a separate pointer parameter for
-    the error number.  Icky, but in a good way.
-   </para>
-  </sect1>
-
-  <sect1 id="conventions-borkedcompile">
-   <title>Breaking Compilation</title>
-
-   <para>
-    Linus and the other developers sometimes change function or
-    structure names in development kernels; this is not done just to
-    keep everyone on their toes: it reflects a fundamental change
-    (eg. can no longer be called with interrupts on, or does extra
-    checks, or doesn't do checks which were caught before).  Usually
-    this is accompanied by a fairly complete note to the linux-kernel
-    mailing list; search the archive.  Simply doing a global replace
-    on the file usually makes things <emphasis>worse</emphasis>.
-   </para>
-  </sect1>
-
-  <sect1 id="conventions-initialising">
-   <title>Initializing structure members</title>
-
-   <para>
-    The preferred method of initializing structures is to use
-    designated initialisers, as defined by ISO C99, eg:
-   </para>
-   <programlisting>
-static struct block_device_operations opt_fops = {
-        .open               = opt_open,
-        .release            = opt_release,
-        .ioctl              = opt_ioctl,
-        .check_media_change = opt_media_change,
-};
-   </programlisting>
-   <para>
-    This makes it easy to grep for, and makes it clear which
-    structure fields are set.  You should do this because it looks
-    cool.
-   </para>
-  </sect1>
-
-  <sect1 id="conventions-gnu-extns">
-   <title>GNU Extensions</title>
-
-   <para>
-    GNU Extensions are explicitly allowed in the Linux kernel.
-    Note that some of the more complex ones are not very well
-    supported, due to lack of general use, but the following are
-    considered standard (see the GCC info page section "C
-    Extensions" for more details - Yes, really the info page, the
-    man page is only a short summary of the stuff in info).
-   </para>
-   <itemizedlist>
-    <listitem>
-     <para>
-      Inline functions
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      Statement expressions (ie. the ({ and }) constructs).
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      Declaring attributes of a function / variable / type
-      (__attribute__)
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      typeof
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      Zero length arrays
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      Macro varargs
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      Arithmetic on void pointers
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      Non-Constant initializers
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      Assembler Instructions (not outside arch/ and include/asm/)
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      Function names as strings (__func__).
-     </para>
-    </listitem>
-    <listitem>
-     <para>
-      __builtin_constant_p()
-     </para>
-    </listitem>
-   </itemizedlist>
-
-   <para>
-    Be wary when using long long in the kernel, the code gcc generates for
-    it is horrible and worse: division and multiplication does not work
-    on i386 because the GCC runtime functions for it are missing from
-    the kernel environment.
-   </para>
-
-    <!-- FIXME: add a note about ANSI aliasing cleanness -->
-  </sect1>
-
-  <sect1 id="conventions-cplusplus">
-   <title>C++</title>
-   
-   <para>
-    Using C++ in the kernel is usually a bad idea, because the
-    kernel does not provide the necessary runtime environment
-    and the include files are not tested for it.  It is still
-    possible, but not recommended.  If you really want to do
-    this, forget about exceptions at least.
-   </para>
-  </sect1>
-
-  <sect1 id="conventions-ifdef">
-   <title>&num;if</title>
-   
-   <para>
-    It is generally considered cleaner to use macros in header files
-    (or at the top of .c files) to abstract away functions rather than
-    using `#if' pre-processor statements throughout the source code.
-   </para>
-  </sect1>
- </chapter>
-
- <chapter id="submitting">
-  <title>Putting Your Stuff in the Kernel</title>
-
-  <para>
-   In order to get your stuff into shape for official inclusion, or
-   even to make a neat patch, there's administrative work to be
-   done:
-  </para>
-  <itemizedlist>
-   <listitem>
-    <para>
-     Figure out whose pond you've been pissing in.  Look at the top of
-     the source files, inside the <filename>MAINTAINERS</filename>
-     file, and last of all in the <filename>CREDITS</filename> file.
-     You should coordinate with this person to make sure you're not
-     duplicating effort, or trying something that's already been
-     rejected.
-    </para>
-
-    <para>
-     Make sure you put your name and EMail address at the top of
-     any files you create or mangle significantly.  This is the
-     first place people will look when they find a bug, or when
-     <emphasis>they</emphasis> want to make a change.
-    </para>
-   </listitem>
-
-   <listitem>
-    <para>
-     Usually you want a configuration option for your kernel hack.
-     Edit <filename>Kconfig</filename> in the appropriate directory.
-     The Config language is simple to use by cut and paste, and there's
-     complete documentation in
-     <filename>Documentation/kbuild/kconfig-language.txt</filename>.
-    </para>
-
-    <para>
-     In your description of the option, make sure you address both the
-     expert user and the user who knows nothing about your feature.  Mention
-     incompatibilities and issues here.  <emphasis> Definitely
-     </emphasis> end your description with <quote> if in doubt, say N
-     </quote> (or, occasionally, `Y'); this is for people who have no
-     idea what you are talking about.
-    </para>
-   </listitem>
-
-   <listitem>
-    <para>
-     Edit the <filename>Makefile</filename>: the CONFIG variables are
-     exported here so you can usually just add a "obj-$(CONFIG_xxx) +=
-     xxx.o" line.  The syntax is documented in
-     <filename>Documentation/kbuild/makefiles.txt</filename>.
-    </para>
-   </listitem>
-
-   <listitem>
-    <para>
-     Put yourself in <filename>CREDITS</filename> if you've done
-     something noteworthy, usually beyond a single file (your name
-     should be at the top of the source files anyway).
-     <filename>MAINTAINERS</filename> means you want to be consulted
-     when changes are made to a subsystem, and hear about bugs; it
-     implies a more-than-passing commitment to some part of the code.
-    </para>
-   </listitem>
-   
-   <listitem>
-    <para>
-     Finally, don't forget to read <filename>Documentation/process/submitting-patches.rst</filename>
-     and possibly <filename>Documentation/process/submitting-drivers.rst</filename>.
-    </para>
-   </listitem>
-  </itemizedlist>
- </chapter>
-
- <chapter id="cantrips">
-  <title>Kernel Cantrips</title>
-
-  <para>
-   Some favorites from browsing the source.  Feel free to add to this
-   list.
-  </para>
-
-  <para>
-   <filename>arch/x86/include/asm/delay.h:</filename>
-  </para>
-  <programlisting>
-#define ndelay(n) (__builtin_constant_p(n) ? \
-        ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
-        __ndelay(n))
-  </programlisting>
-
-  <para>
-   <filename>include/linux/fs.h</filename>:
-  </para>
-  <programlisting>
-/*
- * Kernel pointers have redundant information, so we can use a
- * scheme where we can return either an error code or a dentry
- * pointer with the same return value.
- *
- * This should be a per-architecture thing, to allow different
- * error and pointer decisions.
- */
- #define ERR_PTR(err)    ((void *)((long)(err)))
- #define PTR_ERR(ptr)    ((long)(ptr))
- #define IS_ERR(ptr)     ((unsigned long)(ptr) > (unsigned long)(-1000))
-</programlisting>
-
-  <para>
-   <filename>arch/x86/include/asm/uaccess_32.h:</filename>
-  </para>
-
-  <programlisting>
-#define copy_to_user(to,from,n)                         \
-        (__builtin_constant_p(n) ?                      \
-         __constant_copy_to_user((to),(from),(n)) :     \
-         __generic_copy_to_user((to),(from),(n)))
-  </programlisting>
-
-  <para>
-   <filename>arch/sparc/kernel/head.S:</filename>
-  </para>
-
-  <programlisting>
-/*
- * Sun people can't spell worth damn. "compatability" indeed.
- * At least we *know* we can't spell, and use a spell-checker.
- */
-
-/* Uh, actually Linus it is I who cannot spell. Too much murky
- * Sparc assembly will do this to ya.
- */
-C_LABEL(cputypvar):
-        .asciz "compatibility"
-
-/* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
-        .align 4
-C_LABEL(cputypvar_sun4m):
-        .asciz "compatible"
-  </programlisting>
-
-  <para>
-   <filename>arch/sparc/lib/checksum.S:</filename>
-  </para>
-
-  <programlisting>
-        /* Sun, you just can't beat me, you just can't.  Stop trying,
-         * give up.  I'm serious, I am going to kick the living shit
-         * out of you, game over, lights out.
-         */
-  </programlisting>
- </chapter>
-
- <chapter id="credits">
-  <title>Thanks</title>
-
-  <para>
-   Thanks to Andi Kleen for the idea, answering my questions, fixing
-   my mistakes, filling content, etc.  Philipp Rumpf for more spelling
-   and clarity fixes, and some excellent non-obvious points.  Werner
-   Almesberger for giving me a great summary of
-   <function>disable_irq()</function>, and Jes Sorensen and Andrea
-   Arcangeli added caveats. Michael Elizabeth Chastain for checking
-   and adding to the Configure section. <!-- Rusty insisted on this
-   bit; I didn't do it! --> Telsa Gwynne for teaching me DocBook. 
-  </para>
- </chapter>
-</book>
-
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
deleted file mode 100644
index 7c9cc484..0000000
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ /dev/null
@@ -1,2151 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="LKLockingGuide">
- <bookinfo>
-  <title>Unreliable Guide To Locking</title>
-  
-  <authorgroup>
-   <author>
-    <firstname>Rusty</firstname>
-    <surname>Russell</surname>
-    <affiliation>
-     <address>
-      <email>rusty@rustcorp.com.au</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2003</year>
-   <holder>Rusty Russell</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License as published by the Free Software Foundation; either
-     version 2 of the License, or (at your option) any later
-     version.
-   </para>
-      
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-      
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-      
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
- <toc></toc>
-  <chapter id="intro">
-   <title>Introduction</title>
-   <para>
-     Welcome, to Rusty's Remarkably Unreliable Guide to Kernel
-     Locking issues.  This document describes the locking systems in
-     the Linux Kernel in 2.6.
-   </para>
-   <para>
-     With the wide availability of HyperThreading, and <firstterm
-     linkend="gloss-preemption">preemption </firstterm> in the Linux
-     Kernel, everyone hacking on the kernel needs to know the
-     fundamentals of concurrency and locking for
-     <firstterm linkend="gloss-smp"><acronym>SMP</acronym></firstterm>.
-   </para>
-  </chapter>
-
-   <chapter id="races">
-    <title>The Problem With Concurrency</title>
-    <para>
-      (Skip this if you know what a Race Condition is).
-    </para>
-    <para>
-      In a normal program, you can increment a counter like so:
-    </para>
-    <programlisting>
-      very_important_count++;
-    </programlisting>
-
-    <para>
-      This is what they would expect to happen:
-    </para>
-
-    <table>
-     <title>Expected Results</title>
-
-     <tgroup cols="2" align="left">
-
-      <thead>
-       <row>
-        <entry>Instance 1</entry>
-        <entry>Instance 2</entry>
-       </row>
-      </thead>
-
-      <tbody>
-       <row>
-        <entry>read very_important_count (5)</entry>
-        <entry></entry>
-       </row>
-       <row>
-        <entry>add 1 (6)</entry>
-        <entry></entry>
-       </row>
-       <row>
-        <entry>write very_important_count (6)</entry>
-        <entry></entry>
-       </row>
-       <row>
-        <entry></entry>
-        <entry>read very_important_count (6)</entry>
-       </row>
-       <row>
-        <entry></entry>
-        <entry>add 1 (7)</entry>
-       </row>
-       <row>
-        <entry></entry>
-        <entry>write very_important_count (7)</entry>
-       </row>
-      </tbody>
-
-     </tgroup>
-    </table>
-
-    <para>
-     This is what might happen:
-    </para>
-
-    <table>
-     <title>Possible Results</title>
-
-     <tgroup cols="2" align="left">
-      <thead>
-       <row>
-        <entry>Instance 1</entry>
-        <entry>Instance 2</entry>
-       </row>
-      </thead>
-
-      <tbody>
-       <row>
-        <entry>read very_important_count (5)</entry>
-        <entry></entry>
-       </row>
-       <row>
-        <entry></entry>
-        <entry>read very_important_count (5)</entry>
-       </row>
-       <row>
-        <entry>add 1 (6)</entry>
-        <entry></entry>
-       </row>
-       <row>
-        <entry></entry>
-        <entry>add 1 (6)</entry>
-       </row>
-       <row>
-        <entry>write very_important_count (6)</entry>
-        <entry></entry>
-       </row>
-       <row>
-        <entry></entry>
-        <entry>write very_important_count (6)</entry>
-       </row>
-      </tbody>
-     </tgroup>
-    </table>
-
-    <sect1 id="race-condition">
-    <title>Race Conditions and Critical Regions</title>
-    <para>
-      This overlap, where the result depends on the
-      relative timing of multiple tasks, is called a <firstterm>race condition</firstterm>.
-      The piece of code containing the concurrency issue is called a
-      <firstterm>critical region</firstterm>.  And especially since Linux starting running
-      on SMP machines, they became one of the major issues in kernel
-      design and implementation.
-    </para>
-    <para>
-      Preemption can have the same effect, even if there is only one
-      CPU: by preempting one task during the critical region, we have
-      exactly the same race condition.  In this case the thread which
-      preempts might run the critical region itself.
-    </para>
-    <para>
-      The solution is to recognize when these simultaneous accesses
-      occur, and use locks to make sure that only one instance can
-      enter the critical region at any time.  There are many
-      friendly primitives in the Linux kernel to help you do this.
-      And then there are the unfriendly primitives, but I'll pretend
-      they don't exist.
-    </para>
-    </sect1>
-  </chapter>
-
-  <chapter id="locks">
-   <title>Locking in the Linux Kernel</title>
-
-   <para>
-     If I could give you one piece of advice: never sleep with anyone
-     crazier than yourself.  But if I had to give you advice on
-     locking: <emphasis>keep it simple</emphasis>.
-   </para>
-
-   <para>
-     Be reluctant to introduce new locks.
-   </para>
-
-   <para>
-     Strangely enough, this last one is the exact reverse of my advice when
-     you <emphasis>have</emphasis> slept with someone crazier than yourself.
-     And you should think about getting a big dog.
-   </para>
-
-   <sect1 id="lock-intro">
-   <title>Two Main Types of Kernel Locks: Spinlocks and Mutexes</title>
-
-   <para>
-     There are two main types of kernel locks.  The fundamental type
-     is the spinlock 
-     (<filename class="headerfile">include/asm/spinlock.h</filename>),
-     which is a very simple single-holder lock: if you can't get the 
-     spinlock, you keep trying (spinning) until you can.  Spinlocks are 
-     very small and fast, and can be used anywhere.
-   </para>
-   <para>
-     The second type is a mutex
-     (<filename class="headerfile">include/linux/mutex.h</filename>): it
-     is like a spinlock, but you may block holding a mutex.
-     If you can't lock a mutex, your task will suspend itself, and be woken
-     up when the mutex is released.  This means the CPU can do something
-     else while you are waiting.  There are many cases when you simply
-     can't sleep (see <xref linkend="sleeping-things"/>), and so have to
-     use a spinlock instead.
-   </para>
-   <para>
-     Neither type of lock is recursive: see
-     <xref linkend="deadlock"/>.
-   </para>
-   </sect1>
- 
-   <sect1 id="uniprocessor">
-    <title>Locks and Uniprocessor Kernels</title>
-
-    <para>
-      For kernels compiled without <symbol>CONFIG_SMP</symbol>, and
-      without <symbol>CONFIG_PREEMPT</symbol> spinlocks do not exist at
-      all.  This is an excellent design decision: when no-one else can
-      run at the same time, there is no reason to have a lock.
-    </para>
-
-    <para>
-      If the kernel is compiled without <symbol>CONFIG_SMP</symbol>,
-      but <symbol>CONFIG_PREEMPT</symbol> is set, then spinlocks
-      simply disable preemption, which is sufficient to prevent any
-      races.  For most purposes, we can think of preemption as
-      equivalent to SMP, and not worry about it separately.
-    </para>
-
-    <para>
-      You should always test your locking code with <symbol>CONFIG_SMP</symbol>
-      and <symbol>CONFIG_PREEMPT</symbol> enabled, even if you don't have an SMP test box, because it
-      will still catch some kinds of locking bugs.
-    </para>
-
-    <para>
-      Mutexes still exist, because they are required for
-      synchronization between <firstterm linkend="gloss-usercontext">user 
-      contexts</firstterm>, as we will see below.
-    </para>
-   </sect1>
-
-    <sect1 id="usercontextlocking">
-     <title>Locking Only In User Context</title>
-
-     <para>
-       If you have a data structure which is only ever accessed from
-       user context, then you can use a simple mutex
-       (<filename>include/linux/mutex.h</filename>) to protect it.  This
-       is the most trivial case: you initialize the mutex.  Then you can
-       call <function>mutex_lock_interruptible()</function> to grab the mutex,
-       and <function>mutex_unlock()</function> to release it.  There is also a 
-       <function>mutex_lock()</function>, which should be avoided, because it 
-       will not return if a signal is received.
-     </para>
-
-     <para>
-       Example: <filename>net/netfilter/nf_sockopt.c</filename> allows 
-       registration of new <function>setsockopt()</function> and 
-       <function>getsockopt()</function> calls, with
-       <function>nf_register_sockopt()</function>.  Registration and 
-       de-registration are only done on module load and unload (and boot 
-       time, where there is no concurrency), and the list of registrations 
-       is only consulted for an unknown <function>setsockopt()</function>
-       or <function>getsockopt()</function> system call.  The 
-       <varname>nf_sockopt_mutex</varname> is perfect to protect this,
-       especially since the setsockopt and getsockopt calls may well
-       sleep.
-     </para>
-   </sect1>
-
-   <sect1 id="lock-user-bh">
-    <title>Locking Between User Context and Softirqs</title>
-
-    <para>
-      If a <firstterm linkend="gloss-softirq">softirq</firstterm> shares
-      data with user context, you have two problems.  Firstly, the current 
-      user context can be interrupted by a softirq, and secondly, the
-      critical region could be entered from another CPU.  This is where
-      <function>spin_lock_bh()</function> 
-      (<filename class="headerfile">include/linux/spinlock.h</filename>) is
-      used.  It disables softirqs on that CPU, then grabs the lock.
-      <function>spin_unlock_bh()</function> does the reverse.  (The
-      '_bh' suffix is a historical reference to "Bottom Halves", the
-      old name for software interrupts.  It should really be
-      called spin_lock_softirq()' in a perfect world).
-    </para>
-
-    <para>
-      Note that you can also use <function>spin_lock_irq()</function>
-      or <function>spin_lock_irqsave()</function> here, which stop
-      hardware interrupts as well: see <xref linkend="hardirq-context"/>.
-    </para>
-
-    <para>
-      This works perfectly for <firstterm linkend="gloss-up"><acronym>UP
-      </acronym></firstterm> as well: the spin lock vanishes, and this macro 
-      simply becomes <function>local_bh_disable()</function>
-      (<filename class="headerfile">include/linux/interrupt.h</filename>), which
-      protects you from the softirq being run.
-    </para>
-   </sect1>
-
-   <sect1 id="lock-user-tasklet">
-    <title>Locking Between User Context and Tasklets</title>
-
-    <para>
-      This is exactly the same as above, because <firstterm
-      linkend="gloss-tasklet">tasklets</firstterm> are actually run
-      from a softirq.
-    </para>
-   </sect1>
-
-   <sect1 id="lock-user-timers">
-    <title>Locking Between User Context and Timers</title>
-
-    <para>
-      This, too, is exactly the same as above, because <firstterm
-      linkend="gloss-timers">timers</firstterm> are actually run from
-      a softirq.  From a locking point of view, tasklets and timers
-      are identical.
-    </para>
-   </sect1>
-
-   <sect1 id="lock-tasklets">
-    <title>Locking Between Tasklets/Timers</title>
-
-    <para>
-      Sometimes a tasklet or timer might want to share data with
-      another tasklet or timer.
-    </para>
-
-    <sect2 id="lock-tasklets-same">
-     <title>The Same Tasklet/Timer</title>
-     <para>
-       Since a tasklet is never run on two CPUs at once, you don't
-       need to worry about your tasklet being reentrant (running
-       twice at once), even on SMP.
-     </para>
-    </sect2>
-
-    <sect2 id="lock-tasklets-different">
-     <title>Different Tasklets/Timers</title>
-     <para>
-       If another tasklet/timer wants
-       to share data with your tasklet or timer , you will both need to use
-       <function>spin_lock()</function> and
-       <function>spin_unlock()</function> calls.  
-       <function>spin_lock_bh()</function> is
-       unnecessary here, as you are already in a tasklet, and
-       none will be run on the same CPU.
-     </para>
-    </sect2>
-   </sect1>
-
-   <sect1 id="lock-softirqs">
-    <title>Locking Between Softirqs</title>
-
-    <para>
-      Often a softirq might
-      want to share data with itself or a tasklet/timer.
-    </para>
-
-    <sect2 id="lock-softirqs-same">
-     <title>The Same Softirq</title>
-
-     <para>
-       The same softirq can run on the other CPUs: you can use a
-       per-CPU array (see <xref linkend="per-cpu"/>) for better
-       performance.  If you're going so far as to use a softirq,
-       you probably care about scalable performance enough
-       to justify the extra complexity.
-     </para>
-
-     <para>
-       You'll need to use <function>spin_lock()</function> and 
-       <function>spin_unlock()</function> for shared data.
-     </para>
-    </sect2>
-
-    <sect2 id="lock-softirqs-different">
-     <title>Different Softirqs</title>
-
-     <para>
-       You'll need to use <function>spin_lock()</function> and
-       <function>spin_unlock()</function> for shared data, whether it
-       be a timer, tasklet, different softirq or the same or another
-       softirq: any of them could be running on a different CPU.
-     </para>
-    </sect2>
-   </sect1>
-  </chapter>
-
-  <chapter id="hardirq-context">
-   <title>Hard IRQ Context</title>
-
-   <para>
-     Hardware interrupts usually communicate with a
-     tasklet or softirq.  Frequently this involves putting work in a
-     queue, which the softirq will take out.
-   </para>
-
-   <sect1 id="hardirq-softirq">
-    <title>Locking Between Hard IRQ and Softirqs/Tasklets</title>
-
-    <para>
-      If a hardware irq handler shares data with a softirq, you have
-      two concerns.  Firstly, the softirq processing can be
-      interrupted by a hardware interrupt, and secondly, the
-      critical region could be entered by a hardware interrupt on
-      another CPU.  This is where <function>spin_lock_irq()</function> is 
-      used.  It is defined to disable interrupts on that cpu, then grab 
-      the lock. <function>spin_unlock_irq()</function> does the reverse.
-    </para>
-
-    <para>
-      The irq handler does not to use
-      <function>spin_lock_irq()</function>, because the softirq cannot
-      run while the irq handler is running: it can use
-      <function>spin_lock()</function>, which is slightly faster.  The
-      only exception would be if a different hardware irq handler uses
-      the same lock: <function>spin_lock_irq()</function> will stop
-      that from interrupting us.
-    </para>
-
-    <para>
-      This works perfectly for UP as well: the spin lock vanishes,
-      and this macro simply becomes <function>local_irq_disable()</function>
-      (<filename class="headerfile">include/asm/smp.h</filename>), which
-      protects you from the softirq/tasklet/BH being run.
-    </para>
-
-    <para>
-      <function>spin_lock_irqsave()</function> 
-      (<filename>include/linux/spinlock.h</filename>) is a variant
-      which saves whether interrupts were on or off in a flags word,
-      which is passed to <function>spin_unlock_irqrestore()</function>.  This
-      means that the same code can be used inside an hard irq handler (where
-      interrupts are already off) and in softirqs (where the irq
-      disabling is required).
-    </para>
-
-    <para>
-      Note that softirqs (and hence tasklets and timers) are run on
-      return from hardware interrupts, so
-      <function>spin_lock_irq()</function> also stops these.  In that
-      sense, <function>spin_lock_irqsave()</function> is the most
-      general and powerful locking function.
-    </para>
-
-   </sect1>
-   <sect1 id="hardirq-hardirq">
-    <title>Locking Between Two Hard IRQ Handlers</title>
-    <para>
-      It is rare to have to share data between two IRQ handlers, but
-      if you do, <function>spin_lock_irqsave()</function> should be
-      used: it is architecture-specific whether all interrupts are
-      disabled inside irq handlers themselves.
-    </para>
-   </sect1>
-
-  </chapter>
-
-  <chapter id="cheatsheet">
-   <title>Cheat Sheet For Locking</title>
-   <para>
-     Pete Zaitcev gives the following summary:
-   </para>
-   <itemizedlist>
-      <listitem>
-	<para>
-          If you are in a process context (any syscall) and want to
-	lock other process out, use a mutex.  You can take a mutex
-	and sleep (<function>copy_from_user*(</function> or
-	<function>kmalloc(x,GFP_KERNEL)</function>).
-      </para>
-      </listitem>
-      <listitem>
-	<para>
-	Otherwise (== data can be touched in an interrupt), use
-	<function>spin_lock_irqsave()</function> and
-	<function>spin_unlock_irqrestore()</function>.
-	</para>
-      </listitem>
-      <listitem>
-	<para>
-	Avoid holding spinlock for more than 5 lines of code and
-	across any function call (except accessors like
-	<function>readb</function>).
-	</para>
-      </listitem>
-    </itemizedlist>
-
-   <sect1 id="minimum-lock-reqirements">
-   <title>Table of Minimum Requirements</title>
-
-   <para> The following table lists the <emphasis>minimum</emphasis>
-	locking requirements between various contexts.  In some cases,
-	the same context can only be running on one CPU at a time, so
-	no locking is required for that context (eg. a particular
-	thread can only run on one CPU at a time, but if it needs
-	shares data with another thread, locking is required).
-   </para>
-   <para>
-	Remember the advice above: you can always use
-	<function>spin_lock_irqsave()</function>, which is a superset
-	of all other spinlock primitives.
-   </para>
-
-   <table>
-<title>Table of Locking Requirements</title>
-<tgroup cols="11">
-<tbody>
-
-<row>
-<entry></entry>
-<entry>IRQ Handler A</entry>
-<entry>IRQ Handler B</entry>
-<entry>Softirq A</entry>
-<entry>Softirq B</entry>
-<entry>Tasklet A</entry>
-<entry>Tasklet B</entry>
-<entry>Timer A</entry>
-<entry>Timer B</entry>
-<entry>User Context A</entry>
-<entry>User Context B</entry>
-</row>
-
-<row>
-<entry>IRQ Handler A</entry>
-<entry>None</entry>
-</row>
-
-<row>
-<entry>IRQ Handler B</entry>
-<entry>SLIS</entry>
-<entry>None</entry>
-</row>
-
-<row>
-<entry>Softirq A</entry>
-<entry>SLI</entry>
-<entry>SLI</entry>
-<entry>SL</entry>
-</row>
-
-<row>
-<entry>Softirq B</entry>
-<entry>SLI</entry>
-<entry>SLI</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-</row>
-
-<row>
-<entry>Tasklet A</entry>
-<entry>SLI</entry>
-<entry>SLI</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>None</entry>
-</row>
-
-<row>
-<entry>Tasklet B</entry>
-<entry>SLI</entry>
-<entry>SLI</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>None</entry>
-</row>
-
-<row>
-<entry>Timer A</entry>
-<entry>SLI</entry>
-<entry>SLI</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>None</entry>
-</row>
-
-<row>
-<entry>Timer B</entry>
-<entry>SLI</entry>
-<entry>SLI</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>SL</entry>
-<entry>None</entry>
-</row>
-
-<row>
-<entry>User Context A</entry>
-<entry>SLI</entry>
-<entry>SLI</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>None</entry>
-</row>
-
-<row>
-<entry>User Context B</entry>
-<entry>SLI</entry>
-<entry>SLI</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>SLBH</entry>
-<entry>MLI</entry>
-<entry>None</entry>
-</row>
-
-</tbody>
-</tgroup>
-</table>
-
-   <table>
-<title>Legend for Locking Requirements Table</title>
-<tgroup cols="2">
-<tbody>
-
-<row>
-<entry>SLIS</entry>
-<entry>spin_lock_irqsave</entry>
-</row>
-<row>
-<entry>SLI</entry>
-<entry>spin_lock_irq</entry>
-</row>
-<row>
-<entry>SL</entry>
-<entry>spin_lock</entry>
-</row>
-<row>
-<entry>SLBH</entry>
-<entry>spin_lock_bh</entry>
-</row>
-<row>
-<entry>MLI</entry>
-<entry>mutex_lock_interruptible</entry>
-</row>
-
-</tbody>
-</tgroup>
-</table>
-
-</sect1>
-</chapter>
-
-<chapter id="trylock-functions">
- <title>The trylock Functions</title>
-  <para>
-   There are functions that try to acquire a lock only once and immediately
-   return a value telling about success or failure to acquire the lock.
-   They can be used if you need no access to the data protected with the lock
-   when some other thread is holding the lock. You should acquire the lock
-   later if you then need access to the data protected with the lock.
-  </para>
-
-  <para>
-    <function>spin_trylock()</function> does not spin but returns non-zero if
-    it acquires the spinlock on the first try or 0 if not. This function can
-    be used in all contexts like <function>spin_lock</function>: you must have
-    disabled the contexts that might interrupt you and acquire the spin lock.
-  </para>
-
-  <para>
-    <function>mutex_trylock()</function> does not suspend your task
-    but returns non-zero if it could lock the mutex on the first try
-    or 0 if not. This function cannot be safely used in hardware or software
-    interrupt contexts despite not sleeping.
-  </para>
-</chapter>
-
-  <chapter id="Examples">
-   <title>Common Examples</title>
-    <para>
-Let's step through a simple example: a cache of number to name
-mappings.  The cache keeps a count of how often each of the objects is
-used, and when it gets full, throws out the least used one.
-
-    </para>
-
-   <sect1 id="examples-usercontext">
-    <title>All In User Context</title>
-    <para>
-For our first example, we assume that all operations are in user
-context (ie. from system calls), so we can sleep.  This means we can
-use a mutex to protect the cache and all the objects within
-it.  Here's the code:
-    </para>
-
-    <programlisting>
-#include &lt;linux/list.h&gt;
-#include &lt;linux/slab.h&gt;
-#include &lt;linux/string.h&gt;
-#include &lt;linux/mutex.h&gt;
-#include &lt;asm/errno.h&gt;
-
-struct object
-{
-        struct list_head list;
-        int id;
-        char name[32];
-        int popularity;
-};
-
-/* Protects the cache, cache_num, and the objects within it */
-static DEFINE_MUTEX(cache_lock);
-static LIST_HEAD(cache);
-static unsigned int cache_num = 0;
-#define MAX_CACHE_SIZE 10
-
-/* Must be holding cache_lock */
-static struct object *__cache_find(int id)
-{
-        struct object *i;
-
-        list_for_each_entry(i, &amp;cache, list)
-                if (i-&gt;id == id) {
-                        i-&gt;popularity++;
-                        return i;
-                }
-        return NULL;
-}
-
-/* Must be holding cache_lock */
-static void __cache_delete(struct object *obj)
-{
-        BUG_ON(!obj);
-        list_del(&amp;obj-&gt;list);
-        kfree(obj);
-        cache_num--;
-}
-
-/* Must be holding cache_lock */
-static void __cache_add(struct object *obj)
-{
-        list_add(&amp;obj-&gt;list, &amp;cache);
-        if (++cache_num > MAX_CACHE_SIZE) {
-                struct object *i, *outcast = NULL;
-                list_for_each_entry(i, &amp;cache, list) {
-                        if (!outcast || i-&gt;popularity &lt; outcast-&gt;popularity)
-                                outcast = i;
-                }
-                __cache_delete(outcast);
-        }
-}
-
-int cache_add(int id, const char *name)
-{
-        struct object *obj;
-
-        if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL)
-                return -ENOMEM;
-
-        strlcpy(obj-&gt;name, name, sizeof(obj-&gt;name));
-        obj-&gt;id = id;
-        obj-&gt;popularity = 0;
-
-        mutex_lock(&amp;cache_lock);
-        __cache_add(obj);
-        mutex_unlock(&amp;cache_lock);
-        return 0;
-}
-
-void cache_delete(int id)
-{
-        mutex_lock(&amp;cache_lock);
-        __cache_delete(__cache_find(id));
-        mutex_unlock(&amp;cache_lock);
-}
-
-int cache_find(int id, char *name)
-{
-        struct object *obj;
-        int ret = -ENOENT;
-
-        mutex_lock(&amp;cache_lock);
-        obj = __cache_find(id);
-        if (obj) {
-                ret = 0;
-                strcpy(name, obj-&gt;name);
-        }
-        mutex_unlock(&amp;cache_lock);
-        return ret;
-}
-</programlisting>
-
-    <para>
-Note that we always make sure we have the cache_lock when we add,
-delete, or look up the cache: both the cache infrastructure itself and
-the contents of the objects are protected by the lock.  In this case
-it's easy, since we copy the data for the user, and never let them
-access the objects directly.
-    </para>
-    <para>
-There is a slight (and common) optimization here: in
-<function>cache_add</function> we set up the fields of the object
-before grabbing the lock.  This is safe, as no-one else can access it
-until we put it in cache.
-    </para>
-    </sect1>
-
-   <sect1 id="examples-interrupt">
-    <title>Accessing From Interrupt Context</title>
-    <para>
-Now consider the case where <function>cache_find</function> can be
-called from interrupt context: either a hardware interrupt or a
-softirq.  An example would be a timer which deletes object from the
-cache.
-    </para>
-    <para>
-The change is shown below, in standard patch format: the
-<symbol>-</symbol> are lines which are taken away, and the
-<symbol>+</symbol> are lines which are added.
-    </para>
-<programlisting>
---- cache.c.usercontext	2003-12-09 13:58:54.000000000 +1100
-+++ cache.c.interrupt	2003-12-09 14:07:49.000000000 +1100
-@@ -12,7 +12,7 @@
-         int popularity;
- };
-
--static DEFINE_MUTEX(cache_lock);
-+static DEFINE_SPINLOCK(cache_lock);
- static LIST_HEAD(cache);
- static unsigned int cache_num = 0;
- #define MAX_CACHE_SIZE 10
-@@ -55,6 +55,7 @@
- int cache_add(int id, const char *name)
- {
-         struct object *obj;
-+        unsigned long flags;
-
-         if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL)
-                 return -ENOMEM;
-@@ -63,30 +64,33 @@
-         obj-&gt;id = id;
-         obj-&gt;popularity = 0;
-
--        mutex_lock(&amp;cache_lock);
-+        spin_lock_irqsave(&amp;cache_lock, flags);
-         __cache_add(obj);
--        mutex_unlock(&amp;cache_lock);
-+        spin_unlock_irqrestore(&amp;cache_lock, flags);
-         return 0;
- }
-
- void cache_delete(int id)
- {
--        mutex_lock(&amp;cache_lock);
-+        unsigned long flags;
-+
-+        spin_lock_irqsave(&amp;cache_lock, flags);
-         __cache_delete(__cache_find(id));
--        mutex_unlock(&amp;cache_lock);
-+        spin_unlock_irqrestore(&amp;cache_lock, flags);
- }
-
- int cache_find(int id, char *name)
- {
-         struct object *obj;
-         int ret = -ENOENT;
-+        unsigned long flags;
-
--        mutex_lock(&amp;cache_lock);
-+        spin_lock_irqsave(&amp;cache_lock, flags);
-         obj = __cache_find(id);
-         if (obj) {
-                 ret = 0;
-                 strcpy(name, obj-&gt;name);
-         }
--        mutex_unlock(&amp;cache_lock);
-+        spin_unlock_irqrestore(&amp;cache_lock, flags);
-         return ret;
- }
-</programlisting>
-
-    <para>
-Note that the <function>spin_lock_irqsave</function> will turn off
-interrupts if they are on, otherwise does nothing (if we are already
-in an interrupt handler), hence these functions are safe to call from
-any context.
-    </para>
-    <para>
-Unfortunately, <function>cache_add</function> calls
-<function>kmalloc</function> with the <symbol>GFP_KERNEL</symbol>
-flag, which is only legal in user context.  I have assumed that
-<function>cache_add</function> is still only called in user context,
-otherwise this should become a parameter to
-<function>cache_add</function>.
-    </para>
-  </sect1>
-   <sect1 id="examples-refcnt">
-    <title>Exposing Objects Outside This File</title>
-    <para>
-If our objects contained more information, it might not be sufficient
-to copy the information in and out: other parts of the code might want
-to keep pointers to these objects, for example, rather than looking up
-the id every time.  This produces two problems.
-    </para>
-    <para>
-The first problem is that we use the <symbol>cache_lock</symbol> to
-protect objects: we'd need to make this non-static so the rest of the
-code can use it.  This makes locking trickier, as it is no longer all
-in one place.
-    </para>
-    <para>
-The second problem is the lifetime problem: if another structure keeps
-a pointer to an object, it presumably expects that pointer to remain
-valid.  Unfortunately, this is only guaranteed while you hold the
-lock, otherwise someone might call <function>cache_delete</function>
-and even worse, add another object, re-using the same address.
-    </para>
-    <para>
-As there is only one lock, you can't hold it forever: no-one else would
-get any work done.
-    </para>
-    <para>
-The solution to this problem is to use a reference count: everyone who
-has a pointer to the object increases it when they first get the
-object, and drops the reference count when they're finished with it.
-Whoever drops it to zero knows it is unused, and can actually delete it.
-    </para>
-    <para>
-Here is the code:
-    </para>
-
-<programlisting>
---- cache.c.interrupt	2003-12-09 14:25:43.000000000 +1100
-+++ cache.c.refcnt	2003-12-09 14:33:05.000000000 +1100
-@@ -7,6 +7,7 @@
- struct object
- {
-         struct list_head list;
-+        unsigned int refcnt;
-         int id;
-         char name[32];
-         int popularity;
-@@ -17,6 +18,35 @@
- static unsigned int cache_num = 0;
- #define MAX_CACHE_SIZE 10
-
-+static void __object_put(struct object *obj)
-+{
-+        if (--obj-&gt;refcnt == 0)
-+                kfree(obj);
-+}
-+
-+static void __object_get(struct object *obj)
-+{
-+        obj-&gt;refcnt++;
-+}
-+
-+void object_put(struct object *obj)
-+{
-+        unsigned long flags;
-+
-+        spin_lock_irqsave(&amp;cache_lock, flags);
-+        __object_put(obj);
-+        spin_unlock_irqrestore(&amp;cache_lock, flags);
-+}
-+
-+void object_get(struct object *obj)
-+{
-+        unsigned long flags;
-+
-+        spin_lock_irqsave(&amp;cache_lock, flags);
-+        __object_get(obj);
-+        spin_unlock_irqrestore(&amp;cache_lock, flags);
-+}
-+
- /* Must be holding cache_lock */
- static struct object *__cache_find(int id)
- {
-@@ -35,6 +65,7 @@
- {
-         BUG_ON(!obj);
-         list_del(&amp;obj-&gt;list);
-+        __object_put(obj);
-         cache_num--;
- }
-
-@@ -63,6 +94,7 @@
-         strlcpy(obj-&gt;name, name, sizeof(obj-&gt;name));
-         obj-&gt;id = id;
-         obj-&gt;popularity = 0;
-+        obj-&gt;refcnt = 1; /* The cache holds a reference */
-
-         spin_lock_irqsave(&amp;cache_lock, flags);
-         __cache_add(obj);
-@@ -79,18 +111,15 @@
-         spin_unlock_irqrestore(&amp;cache_lock, flags);
- }
-
--int cache_find(int id, char *name)
-+struct object *cache_find(int id)
- {
-         struct object *obj;
--        int ret = -ENOENT;
-         unsigned long flags;
-
-         spin_lock_irqsave(&amp;cache_lock, flags);
-         obj = __cache_find(id);
--        if (obj) {
--                ret = 0;
--                strcpy(name, obj-&gt;name);
--        }
-+        if (obj)
-+                __object_get(obj);
-         spin_unlock_irqrestore(&amp;cache_lock, flags);
--        return ret;
-+        return obj;
- }
-</programlisting>
-
-<para>
-We encapsulate the reference counting in the standard 'get' and 'put'
-functions.  Now we can return the object itself from
-<function>cache_find</function> which has the advantage that the user
-can now sleep holding the object (eg. to
-<function>copy_to_user</function> to name to userspace).
-</para>
-<para>
-The other point to note is that I said a reference should be held for
-every pointer to the object: thus the reference count is 1 when first
-inserted into the cache.  In some versions the framework does not hold
-a reference count, but they are more complicated.
-</para>
-
-   <sect2 id="examples-refcnt-atomic">
-    <title>Using Atomic Operations For The Reference Count</title>
-<para>
-In practice, <type>atomic_t</type> would usually be used for
-<structfield>refcnt</structfield>.  There are a number of atomic
-operations defined in
-
-<filename class="headerfile">include/asm/atomic.h</filename>: these are
-guaranteed to be seen atomically from all CPUs in the system, so no
-lock is required.  In this case, it is simpler than using spinlocks,
-although for anything non-trivial using spinlocks is clearer.  The
-<function>atomic_inc</function> and
-<function>atomic_dec_and_test</function> are used instead of the
-standard increment and decrement operators, and the lock is no longer
-used to protect the reference count itself.
-</para>
-
-<programlisting>
---- cache.c.refcnt	2003-12-09 15:00:35.000000000 +1100
-+++ cache.c.refcnt-atomic	2003-12-11 15:49:42.000000000 +1100
-@@ -7,7 +7,7 @@
- struct object
- {
-         struct list_head list;
--        unsigned int refcnt;
-+        atomic_t refcnt;
-         int id;
-         char name[32];
-         int popularity;
-@@ -18,33 +18,15 @@
- static unsigned int cache_num = 0;
- #define MAX_CACHE_SIZE 10
-
--static void __object_put(struct object *obj)
--{
--        if (--obj-&gt;refcnt == 0)
--                kfree(obj);
--}
--
--static void __object_get(struct object *obj)
--{
--        obj-&gt;refcnt++;
--}
--
- void object_put(struct object *obj)
- {
--        unsigned long flags;
--
--        spin_lock_irqsave(&amp;cache_lock, flags);
--        __object_put(obj);
--        spin_unlock_irqrestore(&amp;cache_lock, flags);
-+        if (atomic_dec_and_test(&amp;obj-&gt;refcnt))
-+                kfree(obj);
- }
-
- void object_get(struct object *obj)
- {
--        unsigned long flags;
--
--        spin_lock_irqsave(&amp;cache_lock, flags);
--        __object_get(obj);
--        spin_unlock_irqrestore(&amp;cache_lock, flags);
-+        atomic_inc(&amp;obj-&gt;refcnt);
- }
-
- /* Must be holding cache_lock */
-@@ -65,7 +47,7 @@
- {
-         BUG_ON(!obj);
-         list_del(&amp;obj-&gt;list);
--        __object_put(obj);
-+        object_put(obj);
-         cache_num--;
- }
-
-@@ -94,7 +76,7 @@
-         strlcpy(obj-&gt;name, name, sizeof(obj-&gt;name));
-         obj-&gt;id = id;
-         obj-&gt;popularity = 0;
--        obj-&gt;refcnt = 1; /* The cache holds a reference */
-+        atomic_set(&amp;obj-&gt;refcnt, 1); /* The cache holds a reference */
-
-         spin_lock_irqsave(&amp;cache_lock, flags);
-         __cache_add(obj);
-@@ -119,7 +101,7 @@
-         spin_lock_irqsave(&amp;cache_lock, flags);
-         obj = __cache_find(id);
-         if (obj)
--                __object_get(obj);
-+                object_get(obj);
-         spin_unlock_irqrestore(&amp;cache_lock, flags);
-         return obj;
- }
-</programlisting>
-</sect2>
-</sect1>
-
-   <sect1 id="examples-lock-per-obj">
-    <title>Protecting The Objects Themselves</title>
-    <para>
-In these examples, we assumed that the objects (except the reference
-counts) never changed once they are created.  If we wanted to allow
-the name to change, there are three possibilities:
-    </para>
-    <itemizedlist>
-      <listitem>
-	<para>
-You can make <symbol>cache_lock</symbol> non-static, and tell people
-to grab that lock before changing the name in any object.
-        </para>
-      </listitem>
-      <listitem>
-        <para>
-You can provide a <function>cache_obj_rename</function> which grabs
-this lock and changes the name for the caller, and tell everyone to
-use that function.
-        </para>
-      </listitem>
-      <listitem>
-        <para>
-You can make the <symbol>cache_lock</symbol> protect only the cache
-itself, and use another lock to protect the name.
-        </para>
-      </listitem>
-    </itemizedlist>
-
-      <para>
-Theoretically, you can make the locks as fine-grained as one lock for
-every field, for every object.  In practice, the most common variants
-are:
-</para>
-    <itemizedlist>
-      <listitem>
-	<para>
-One lock which protects the infrastructure (the <symbol>cache</symbol>
-list in this example) and all the objects.  This is what we have done
-so far.
-	</para>
-      </listitem>
-      <listitem>
-        <para>
-One lock which protects the infrastructure (including the list
-pointers inside the objects), and one lock inside the object which
-protects the rest of that object.
-        </para>
-      </listitem>
-      <listitem>
-        <para>
-Multiple locks to protect the infrastructure (eg. one lock per hash
-chain), possibly with a separate per-object lock.
-        </para>
-      </listitem>
-    </itemizedlist>
-
-<para>
-Here is the "lock-per-object" implementation:
-</para>
-<programlisting>
---- cache.c.refcnt-atomic	2003-12-11 15:50:54.000000000 +1100
-+++ cache.c.perobjectlock	2003-12-11 17:15:03.000000000 +1100
-@@ -6,11 +6,17 @@
-
- struct object
- {
-+        /* These two protected by cache_lock. */
-         struct list_head list;
-+        int popularity;
-+
-         atomic_t refcnt;
-+
-+        /* Doesn't change once created. */
-         int id;
-+
-+        spinlock_t lock; /* Protects the name */
-         char name[32];
--        int popularity;
- };
-
- static DEFINE_SPINLOCK(cache_lock);
-@@ -77,6 +84,7 @@
-         obj-&gt;id = id;
-         obj-&gt;popularity = 0;
-         atomic_set(&amp;obj-&gt;refcnt, 1); /* The cache holds a reference */
-+        spin_lock_init(&amp;obj-&gt;lock);
-
-         spin_lock_irqsave(&amp;cache_lock, flags);
-         __cache_add(obj);
-</programlisting>
-
-<para>
-Note that I decide that the <structfield>popularity</structfield>
-count should be protected by the <symbol>cache_lock</symbol> rather
-than the per-object lock: this is because it (like the
-<structname>struct list_head</structname> inside the object) is
-logically part of the infrastructure.  This way, I don't need to grab
-the lock of every object in <function>__cache_add</function> when
-seeking the least popular.
-</para>
-
-<para>
-I also decided that the <structfield>id</structfield> member is
-unchangeable, so I don't need to grab each object lock in
-<function>__cache_find()</function> to examine the
-<structfield>id</structfield>: the object lock is only used by a
-caller who wants to read or write the <structfield>name</structfield>
-field.
-</para>
-
-<para>
-Note also that I added a comment describing what data was protected by
-which locks.  This is extremely important, as it describes the runtime
-behavior of the code, and can be hard to gain from just reading.  And
-as Alan Cox says, <quote>Lock data, not code</quote>.
-</para>
-</sect1>
-</chapter>
-
-   <chapter id="common-problems">
-    <title>Common Problems</title>
-    <sect1 id="deadlock">
-    <title>Deadlock: Simple and Advanced</title>
-
-    <para>
-      There is a coding bug where a piece of code tries to grab a
-      spinlock twice: it will spin forever, waiting for the lock to
-      be released (spinlocks, rwlocks and mutexes are not
-      recursive in Linux).  This is trivial to diagnose: not a
-      stay-up-five-nights-talk-to-fluffy-code-bunnies kind of
-      problem.
-    </para>
-
-    <para>
-      For a slightly more complex case, imagine you have a region
-      shared by a softirq and user context.  If you use a
-      <function>spin_lock()</function> call to protect it, it is 
-      possible that the user context will be interrupted by the softirq
-      while it holds the lock, and the softirq will then spin
-      forever trying to get the same lock.
-    </para>
-
-    <para>
-      Both of these are called deadlock, and as shown above, it can
-      occur even with a single CPU (although not on UP compiles,
-      since spinlocks vanish on kernel compiles with 
-      <symbol>CONFIG_SMP</symbol>=n. You'll still get data corruption 
-      in the second example).
-    </para>
-
-    <para>
-      This complete lockup is easy to diagnose: on SMP boxes the
-      watchdog timer or compiling with <symbol>DEBUG_SPINLOCK</symbol> set
-      (<filename>include/linux/spinlock.h</filename>) will show this up 
-      immediately when it happens.
-    </para>
-
-    <para>
-      A more complex problem is the so-called 'deadly embrace',
-      involving two or more locks.  Say you have a hash table: each
-      entry in the table is a spinlock, and a chain of hashed
-      objects.  Inside a softirq handler, you sometimes want to
-      alter an object from one place in the hash to another: you
-      grab the spinlock of the old hash chain and the spinlock of
-      the new hash chain, and delete the object from the old one,
-      and insert it in the new one.
-    </para>
-
-    <para>
-      There are two problems here.  First, if your code ever
-      tries to move the object to the same chain, it will deadlock
-      with itself as it tries to lock it twice.  Secondly, if the
-      same softirq on another CPU is trying to move another object
-      in the reverse direction, the following could happen:
-    </para>
-
-    <table>
-     <title>Consequences</title>
-
-     <tgroup cols="2" align="left">
-
-      <thead>
-       <row>
-        <entry>CPU 1</entry>
-        <entry>CPU 2</entry>
-       </row>
-      </thead>
-
-      <tbody>
-       <row>
-        <entry>Grab lock A -&gt; OK</entry>
-        <entry>Grab lock B -&gt; OK</entry>
-       </row>
-       <row>
-        <entry>Grab lock B -&gt; spin</entry>
-        <entry>Grab lock A -&gt; spin</entry>
-       </row>
-      </tbody>
-     </tgroup>
-    </table>
-
-    <para>
-      The two CPUs will spin forever, waiting for the other to give up
-      their lock.  It will look, smell, and feel like a crash.
-    </para>
-    </sect1>
-
-    <sect1 id="techs-deadlock-prevent">
-     <title>Preventing Deadlock</title>
-
-     <para>
-       Textbooks will tell you that if you always lock in the same
-       order, you will never get this kind of deadlock.  Practice
-       will tell you that this approach doesn't scale: when I
-       create a new lock, I don't understand enough of the kernel
-       to figure out where in the 5000 lock hierarchy it will fit.
-     </para>
-
-     <para>
-       The best locks are encapsulated: they never get exposed in
-       headers, and are never held around calls to non-trivial
-       functions outside the same file.  You can read through this
-       code and see that it will never deadlock, because it never
-       tries to grab another lock while it has that one.  People
-       using your code don't even need to know you are using a
-       lock.
-     </para>
-
-     <para>
-       A classic problem here is when you provide callbacks or
-       hooks: if you call these with the lock held, you risk simple
-       deadlock, or a deadly embrace (who knows what the callback
-       will do?).  Remember, the other programmers are out to get
-       you, so don't do this.
-     </para>
-
-    <sect2 id="techs-deadlock-overprevent">
-     <title>Overzealous Prevention Of Deadlocks</title>
-
-     <para>
-       Deadlocks are problematic, but not as bad as data
-       corruption.  Code which grabs a read lock, searches a list,
-       fails to find what it wants, drops the read lock, grabs a
-       write lock and inserts the object has a race condition.
-     </para>
-
-     <para>
-       If you don't see why, please stay the fuck away from my code.
-     </para>
-    </sect2>
-    </sect1>
-
-   <sect1 id="racing-timers">
-    <title>Racing Timers: A Kernel Pastime</title>
-
-    <para>
-      Timers can produce their own special problems with races.
-      Consider a collection of objects (list, hash, etc) where each
-      object has a timer which is due to destroy it.
-    </para>
-
-    <para>
-      If you want to destroy the entire collection (say on module
-      removal), you might do the following:
-    </para>
-
-    <programlisting>
-        /* THIS CODE BAD BAD BAD BAD: IF IT WAS ANY WORSE IT WOULD USE
-           HUNGARIAN NOTATION */
-        spin_lock_bh(&amp;list_lock);
-
-        while (list) {
-                struct foo *next = list-&gt;next;
-                del_timer(&amp;list-&gt;timer);
-                kfree(list);
-                list = next;
-        }
-
-        spin_unlock_bh(&amp;list_lock);
-    </programlisting>
-
-    <para>
-      Sooner or later, this will crash on SMP, because a timer can
-      have just gone off before the <function>spin_lock_bh()</function>,
-      and it will only get the lock after we
-      <function>spin_unlock_bh()</function>, and then try to free
-      the element (which has already been freed!).
-    </para>
-
-    <para>
-      This can be avoided by checking the result of
-      <function>del_timer()</function>: if it returns
-      <returnvalue>1</returnvalue>, the timer has been deleted.
-      If <returnvalue>0</returnvalue>, it means (in this
-      case) that it is currently running, so we can do:
-    </para>
-
-    <programlisting>
-        retry:
-                spin_lock_bh(&amp;list_lock);
-
-                while (list) {
-                        struct foo *next = list-&gt;next;
-                        if (!del_timer(&amp;list-&gt;timer)) {
-                                /* Give timer a chance to delete this */
-                                spin_unlock_bh(&amp;list_lock);
-                                goto retry;
-                        }
-                        kfree(list);
-                        list = next;
-                }
-
-                spin_unlock_bh(&amp;list_lock);
-    </programlisting>
-
-    <para>
-      Another common problem is deleting timers which restart
-      themselves (by calling <function>add_timer()</function> at the end
-      of their timer function).  Because this is a fairly common case
-      which is prone to races, you should use <function>del_timer_sync()</function>
-      (<filename class="headerfile">include/linux/timer.h</filename>)
-      to handle this case.  It returns the number of times the timer
-      had to be deleted before we finally stopped it from adding itself back
-      in.
-    </para>
-   </sect1>
-
-  </chapter>
-
- <chapter id="Efficiency">
-    <title>Locking Speed</title>
-
-    <para>
-There are three main things to worry about when considering speed of
-some code which does locking.  First is concurrency: how many things
-are going to be waiting while someone else is holding a lock.  Second
-is the time taken to actually acquire and release an uncontended lock.
-Third is using fewer, or smarter locks.  I'm assuming that the lock is
-used fairly often: otherwise, you wouldn't be concerned about
-efficiency.
-</para>
-    <para>
-Concurrency depends on how long the lock is usually held: you should
-hold the lock for as long as needed, but no longer.  In the cache
-example, we always create the object without the lock held, and then
-grab the lock only when we are ready to insert it in the list.
-</para>
-    <para>
-Acquisition times depend on how much damage the lock operations do to
-the pipeline (pipeline stalls) and how likely it is that this CPU was
-the last one to grab the lock (ie. is the lock cache-hot for this
-CPU): on a machine with more CPUs, this likelihood drops fast.
-Consider a 700MHz Intel Pentium III: an instruction takes about 0.7ns,
-an atomic increment takes about 58ns, a lock which is cache-hot on
-this CPU takes 160ns, and a cacheline transfer from another CPU takes
-an additional 170 to 360ns.  (These figures from Paul McKenney's
-<ulink url="http://www.linuxjournal.com/article.php?sid=6993"> Linux
-Journal RCU article</ulink>).
-</para>
-    <para>
-These two aims conflict: holding a lock for a short time might be done
-by splitting locks into parts (such as in our final per-object-lock
-example), but this increases the number of lock acquisitions, and the
-results are often slower than having a single lock.  This is another
-reason to advocate locking simplicity.
-</para>
-    <para>
-The third concern is addressed below: there are some methods to reduce
-the amount of locking which needs to be done.
-</para>
-
-  <sect1 id="efficiency-rwlocks">
-   <title>Read/Write Lock Variants</title>
-
-   <para>
-      Both spinlocks and mutexes have read/write variants:
-      <type>rwlock_t</type> and <structname>struct rw_semaphore</structname>.
-      These divide users into two classes: the readers and the writers.  If
-      you are only reading the data, you can get a read lock, but to write to
-      the data you need the write lock.  Many people can hold a read lock,
-      but a writer must be sole holder.
-    </para>
-
-   <para>
-      If your code divides neatly along reader/writer lines (as our
-      cache code does), and the lock is held by readers for
-      significant lengths of time, using these locks can help.  They
-      are slightly slower than the normal locks though, so in practice
-      <type>rwlock_t</type> is not usually worthwhile.
-    </para>
-   </sect1>
-
-   <sect1 id="efficiency-read-copy-update">
-    <title>Avoiding Locks: Read Copy Update</title>
-
-    <para>
-      There is a special method of read/write locking called Read Copy
-      Update.  Using RCU, the readers can avoid taking a lock
-      altogether: as we expect our cache to be read more often than
-      updated (otherwise the cache is a waste of time), it is a
-      candidate for this optimization.
-    </para>
-
-    <para>
-      How do we get rid of read locks?  Getting rid of read locks
-      means that writers may be changing the list underneath the
-      readers.  That is actually quite simple: we can read a linked
-      list while an element is being added if the writer adds the
-      element very carefully.  For example, adding
-      <symbol>new</symbol> to a single linked list called
-      <symbol>list</symbol>:
-    </para>
-
-    <programlisting>
-        new-&gt;next = list-&gt;next;
-        wmb();
-        list-&gt;next = new;
-    </programlisting>
-
-    <para>
-      The <function>wmb()</function> is a write memory barrier.  It
-      ensures that the first operation (setting the new element's
-      <symbol>next</symbol> pointer) is complete and will be seen by
-      all CPUs, before the second operation is (putting the new
-      element into the list).  This is important, since modern
-      compilers and modern CPUs can both reorder instructions unless
-      told otherwise: we want a reader to either not see the new
-      element at all, or see the new element with the
-      <symbol>next</symbol> pointer correctly pointing at the rest of
-      the list.
-    </para>
-    <para>
-      Fortunately, there is a function to do this for standard
-      <structname>struct list_head</structname> lists:
-      <function>list_add_rcu()</function>
-      (<filename>include/linux/list.h</filename>).
-    </para>
-    <para>
-      Removing an element from the list is even simpler: we replace
-      the pointer to the old element with a pointer to its successor,
-      and readers will either see it, or skip over it.
-    </para>
-    <programlisting>
-        list-&gt;next = old-&gt;next;
-    </programlisting>
-    <para>
-      There is <function>list_del_rcu()</function>
-      (<filename>include/linux/list.h</filename>) which does this (the
-      normal version poisons the old object, which we don't want).
-    </para>
-    <para>
-      The reader must also be careful: some CPUs can look through the
-      <symbol>next</symbol> pointer to start reading the contents of
-      the next element early, but don't realize that the pre-fetched
-      contents is wrong when the <symbol>next</symbol> pointer changes
-      underneath them.  Once again, there is a
-      <function>list_for_each_entry_rcu()</function>
-      (<filename>include/linux/list.h</filename>) to help you.  Of
-      course, writers can just use
-      <function>list_for_each_entry()</function>, since there cannot
-      be two simultaneous writers.
-    </para>
-    <para>
-      Our final dilemma is this: when can we actually destroy the
-      removed element?  Remember, a reader might be stepping through
-      this element in the list right now: if we free this element and
-      the <symbol>next</symbol> pointer changes, the reader will jump
-      off into garbage and crash.  We need to wait until we know that
-      all the readers who were traversing the list when we deleted the
-      element are finished.  We use <function>call_rcu()</function> to
-      register a callback which will actually destroy the object once
-      all pre-existing readers are finished.  Alternatively,
-      <function>synchronize_rcu()</function> may be used to block until
-      all pre-existing are finished.
-    </para>
-    <para>
-      But how does Read Copy Update know when the readers are
-      finished?  The method is this: firstly, the readers always
-      traverse the list inside
-      <function>rcu_read_lock()</function>/<function>rcu_read_unlock()</function>
-      pairs: these simply disable preemption so the reader won't go to
-      sleep while reading the list.
-    </para>
-    <para>
-      RCU then waits until every other CPU has slept at least once:
-      since readers cannot sleep, we know that any readers which were
-      traversing the list during the deletion are finished, and the
-      callback is triggered.  The real Read Copy Update code is a
-      little more optimized than this, but this is the fundamental
-      idea.
-    </para>
-
-<programlisting>
---- cache.c.perobjectlock	2003-12-11 17:15:03.000000000 +1100
-+++ cache.c.rcupdate	2003-12-11 17:55:14.000000000 +1100
-@@ -1,15 +1,18 @@
- #include &lt;linux/list.h&gt;
- #include &lt;linux/slab.h&gt;
- #include &lt;linux/string.h&gt;
-+#include &lt;linux/rcupdate.h&gt;
- #include &lt;linux/mutex.h&gt;
- #include &lt;asm/errno.h&gt;
-
- struct object
- {
--        /* These two protected by cache_lock. */
-+        /* This is protected by RCU */
-         struct list_head list;
-         int popularity;
-
-+        struct rcu_head rcu;
-+
-         atomic_t refcnt;
-
-         /* Doesn't change once created. */
-@@ -40,7 +43,7 @@
- {
-         struct object *i;
-
--        list_for_each_entry(i, &amp;cache, list) {
-+        list_for_each_entry_rcu(i, &amp;cache, list) {
-                 if (i-&gt;id == id) {
-                         i-&gt;popularity++;
-                         return i;
-@@ -49,19 +52,25 @@
-         return NULL;
- }
-
-+/* Final discard done once we know no readers are looking. */
-+static void cache_delete_rcu(void *arg)
-+{
-+        object_put(arg);
-+}
-+
- /* Must be holding cache_lock */
- static void __cache_delete(struct object *obj)
- {
-         BUG_ON(!obj);
--        list_del(&amp;obj-&gt;list);
--        object_put(obj);
-+        list_del_rcu(&amp;obj-&gt;list);
-         cache_num--;
-+        call_rcu(&amp;obj-&gt;rcu, cache_delete_rcu);
- }
-
- /* Must be holding cache_lock */
- static void __cache_add(struct object *obj)
- {
--        list_add(&amp;obj-&gt;list, &amp;cache);
-+        list_add_rcu(&amp;obj-&gt;list, &amp;cache);
-         if (++cache_num > MAX_CACHE_SIZE) {
-                 struct object *i, *outcast = NULL;
-                 list_for_each_entry(i, &amp;cache, list) {
-@@ -104,12 +114,11 @@
- struct object *cache_find(int id)
- {
-         struct object *obj;
--        unsigned long flags;
-
--        spin_lock_irqsave(&amp;cache_lock, flags);
-+        rcu_read_lock();
-         obj = __cache_find(id);
-         if (obj)
-                 object_get(obj);
--        spin_unlock_irqrestore(&amp;cache_lock, flags);
-+        rcu_read_unlock();
-         return obj;
- }
-</programlisting>
-
-<para>
-Note that the reader will alter the
-<structfield>popularity</structfield> member in
-<function>__cache_find()</function>, and now it doesn't hold a lock.
-One solution would be to make it an <type>atomic_t</type>, but for
-this usage, we don't really care about races: an approximate result is
-good enough, so I didn't change it.
-</para>
-
-<para>
-The result is that <function>cache_find()</function> requires no
-synchronization with any other functions, so is almost as fast on SMP
-as it would be on UP.
-</para>
-
-<para>
-There is a further optimization possible here: remember our original
-cache code, where there were no reference counts and the caller simply
-held the lock whenever using the object?  This is still possible: if
-you hold the lock, no one can delete the object, so you don't need to
-get and put the reference count.
-</para>
-
-<para>
-Now, because the 'read lock' in RCU is simply disabling preemption, a
-caller which always has preemption disabled between calling
-<function>cache_find()</function> and
-<function>object_put()</function> does not need to actually get and
-put the reference count: we could expose
-<function>__cache_find()</function> by making it non-static, and
-such callers could simply call that.
-</para>
-<para>
-The benefit here is that the reference count is not written to: the
-object is not altered in any way, which is much faster on SMP
-machines due to caching.
-</para>
-  </sect1>
-
-   <sect1 id="per-cpu">
-    <title>Per-CPU Data</title>
-
-    <para>
-      Another technique for avoiding locking which is used fairly
-      widely is to duplicate information for each CPU.  For example,
-      if you wanted to keep a count of a common condition, you could
-      use a spin lock and a single counter.  Nice and simple.
-    </para>
-
-    <para>
-      If that was too slow (it's usually not, but if you've got a
-      really big machine to test on and can show that it is), you
-      could instead use a counter for each CPU, then none of them need
-      an exclusive lock.  See <function>DEFINE_PER_CPU()</function>,
-      <function>get_cpu_var()</function> and
-      <function>put_cpu_var()</function>
-      (<filename class="headerfile">include/linux/percpu.h</filename>).
-    </para>
-
-    <para>
-      Of particular use for simple per-cpu counters is the
-      <type>local_t</type> type, and the
-      <function>cpu_local_inc()</function> and related functions,
-      which are more efficient than simple code on some architectures
-      (<filename class="headerfile">include/asm/local.h</filename>).
-    </para>
-
-    <para>
-      Note that there is no simple, reliable way of getting an exact
-      value of such a counter, without introducing more locks.  This
-      is not a problem for some uses.
-    </para>
-   </sect1>
-
-   <sect1 id="mostly-hardirq">
-    <title>Data Which Mostly Used By An IRQ Handler</title>
-
-    <para>
-      If data is always accessed from within the same IRQ handler, you
-      don't need a lock at all: the kernel already guarantees that the
-      irq handler will not run simultaneously on multiple CPUs.
-    </para>
-    <para>
-      Manfred Spraul points out that you can still do this, even if
-      the data is very occasionally accessed in user context or
-      softirqs/tasklets.  The irq handler doesn't use a lock, and
-      all other accesses are done as so:
-    </para>
-
-<programlisting>
-	spin_lock(&amp;lock);
-	disable_irq(irq);
-	...
-	enable_irq(irq);
-	spin_unlock(&amp;lock);
-</programlisting>
-    <para>
-      The <function>disable_irq()</function> prevents the irq handler
-      from running (and waits for it to finish if it's currently
-      running on other CPUs).  The spinlock prevents any other
-      accesses happening at the same time.  Naturally, this is slower
-      than just a <function>spin_lock_irq()</function> call, so it
-      only makes sense if this type of access happens extremely
-      rarely.
-    </para>
-   </sect1>
-  </chapter>
-
- <chapter id="sleeping-things">
-    <title>What Functions Are Safe To Call From Interrupts?</title>
-
-    <para>
-      Many functions in the kernel sleep (ie. call schedule())
-      directly or indirectly: you can never call them while holding a
-      spinlock, or with preemption disabled.  This also means you need
-      to be in user context: calling them from an interrupt is illegal.
-    </para>
-
-   <sect1 id="sleeping">
-    <title>Some Functions Which Sleep</title>
-
-    <para>
-      The most common ones are listed below, but you usually have to
-      read the code to find out if other calls are safe.  If everyone
-      else who calls it can sleep, you probably need to be able to
-      sleep, too.  In particular, registration and deregistration
-      functions usually expect to be called from user context, and can
-      sleep.
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>
-        Accesses to 
-        <firstterm linkend="gloss-userspace">userspace</firstterm>:
-      </para>
-      <itemizedlist>
-       <listitem>
-        <para>
-          <function>copy_from_user()</function>
-        </para>
-       </listitem>
-       <listitem>
-        <para>
-          <function>copy_to_user()</function>
-        </para>
-       </listitem>
-       <listitem>
-        <para>
-          <function>get_user()</function>
-        </para>
-       </listitem>
-       <listitem>
-        <para>
-          <function>put_user()</function>
-        </para>
-       </listitem>
-      </itemizedlist>
-     </listitem>
-
-     <listitem>
-      <para>
-        <function>kmalloc(GFP_KERNEL)</function>
-      </para>
-     </listitem>
-
-     <listitem>
-      <para>
-      <function>mutex_lock_interruptible()</function> and
-      <function>mutex_lock()</function>
-      </para>
-      <para>
-       There is a <function>mutex_trylock()</function> which does not
-       sleep.  Still, it must not be used inside interrupt context since
-       its implementation is not safe for that.
-       <function>mutex_unlock()</function> will also never sleep.
-       It cannot be used in interrupt context either since a mutex
-       must be released by the same task that acquired it.
-      </para>
-     </listitem>
-    </itemizedlist>
-   </sect1>
-
-   <sect1 id="dont-sleep">
-    <title>Some Functions Which Don't Sleep</title>
-
-    <para>
-     Some functions are safe to call from any context, or holding
-     almost any lock.
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>
-	<function>printk()</function>
-      </para>
-     </listitem>
-     <listitem>
-      <para>
-        <function>kfree()</function>
-      </para>
-     </listitem>
-     <listitem>
-      <para>
-	<function>add_timer()</function> and <function>del_timer()</function>
-      </para>
-     </listitem>
-    </itemizedlist>
-   </sect1>
-  </chapter>
-
-  <chapter id="apiref-mutex">
-   <title>Mutex API reference</title>
-!Iinclude/linux/mutex.h
-!Ekernel/locking/mutex.c
-  </chapter>
-
-  <chapter id="apiref-futex">
-   <title>Futex API reference</title>
-!Ikernel/futex.c
-  </chapter>
-
-  <chapter id="references">
-   <title>Further reading</title>
-
-   <itemizedlist>
-    <listitem>
-     <para>
-       <filename>Documentation/locking/spinlocks.txt</filename>:
-       Linus Torvalds' spinlocking tutorial in the kernel sources.
-     </para>
-    </listitem>
-
-    <listitem>
-     <para>
-       Unix Systems for Modern Architectures: Symmetric
-       Multiprocessing and Caching for Kernel Programmers:
-     </para>
-
-     <para>
-       Curt Schimmel's very good introduction to kernel level
-       locking (not written for Linux, but nearly everything
-       applies).  The book is expensive, but really worth every
-       penny to understand SMP locking. [ISBN: 0201633388]
-     </para>
-    </listitem>
-   </itemizedlist>
-  </chapter>
-
-  <chapter id="thanks">
-    <title>Thanks</title>
-
-    <para>
-      Thanks to Telsa Gwynne for DocBooking, neatening and adding
-      style.
-    </para>
-
-    <para>
-      Thanks to Martin Pool, Philipp Rumpf, Stephen Rothwell, Paul
-      Mackerras, Ruedi Aschwanden, Alan Cox, Manfred Spraul, Tim
-      Waugh, Pete Zaitcev, James Morris, Robert Love, Paul McKenney,
-      John Ashby for proofreading, correcting, flaming, commenting.
-    </para>
-
-    <para>
-      Thanks to the cabal for having no influence on this document.
-    </para>
-  </chapter>
-
-  <glossary id="glossary">
-   <title>Glossary</title>
-
-   <glossentry id="gloss-preemption">
-    <glossterm>preemption</glossterm>
-     <glossdef>
-      <para>
-        Prior to 2.5, or when <symbol>CONFIG_PREEMPT</symbol> is
-        unset, processes in user context inside the kernel would not
-        preempt each other (ie. you had that CPU until you gave it up,
-        except for interrupts).  With the addition of
-        <symbol>CONFIG_PREEMPT</symbol> in 2.5.4, this changed: when
-        in user context, higher priority tasks can "cut in": spinlocks
-        were changed to disable preemption, even on UP.
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-bh">
-    <glossterm>bh</glossterm>
-     <glossdef>
-      <para>
-        Bottom Half: for historical reasons, functions with
-        '_bh' in them often now refer to any software interrupt, e.g.
-        <function>spin_lock_bh()</function> blocks any software interrupt 
-        on the current CPU.  Bottom halves are deprecated, and will 
-        eventually be replaced by tasklets.  Only one bottom half will be 
-        running at any time.
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-hwinterrupt">
-    <glossterm>Hardware Interrupt / Hardware IRQ</glossterm>
-    <glossdef>
-     <para>
-       Hardware interrupt request.  <function>in_irq()</function> returns 
-       <returnvalue>true</returnvalue> in a hardware interrupt handler.
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-interruptcontext">
-    <glossterm>Interrupt Context</glossterm>
-    <glossdef>
-     <para>
-       Not user context: processing a hardware irq or software irq.
-       Indicated by the <function>in_interrupt()</function> macro 
-       returning <returnvalue>true</returnvalue>.
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-smp">
-    <glossterm><acronym>SMP</acronym></glossterm>
-    <glossdef>
-     <para>
-       Symmetric Multi-Processor: kernels compiled for multiple-CPU
-       machines.  (CONFIG_SMP=y).
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-softirq">
-    <glossterm>Software Interrupt / softirq</glossterm>
-    <glossdef>
-     <para>
-       Software interrupt handler.  <function>in_irq()</function> returns
-       <returnvalue>false</returnvalue>; <function>in_softirq()</function>
-       returns <returnvalue>true</returnvalue>.  Tasklets and softirqs
-	both fall into the category of 'software interrupts'.
-     </para>
-     <para>
-       Strictly speaking a softirq is one of up to 32 enumerated software
-       interrupts which can run on multiple CPUs at once.
-       Sometimes used to refer to tasklets as
-       well (ie. all software interrupts).
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-tasklet">
-    <glossterm>tasklet</glossterm>
-    <glossdef>
-     <para>
-       A dynamically-registrable software interrupt,
-       which is guaranteed to only run on one CPU at a time.
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-timers">
-    <glossterm>timer</glossterm>
-    <glossdef>
-     <para>
-       A dynamically-registrable software interrupt, which is run at
-       (or close to) a given time.  When running, it is just like a
-       tasklet (in fact, they are called from the TIMER_SOFTIRQ).
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-up">
-    <glossterm><acronym>UP</acronym></glossterm>
-    <glossdef>
-     <para>
-       Uni-Processor: Non-SMP.  (CONFIG_SMP=n).
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-usercontext">
-    <glossterm>User Context</glossterm>
-    <glossdef>
-     <para>
-       The kernel executing on behalf of a particular process (ie. a
-       system call or trap) or kernel thread.  You can tell which
-       process with the <symbol>current</symbol> macro.)  Not to
-       be confused with userspace.  Can be interrupted by software or
-       hardware interrupts.
-     </para>
-    </glossdef>
-   </glossentry>
-
-   <glossentry id="gloss-userspace">
-    <glossterm>Userspace</glossterm>
-    <glossdef>
-     <para>
-       A process executing its own code outside the kernel.
-     </para>
-    </glossdef>
-   </glossentry>      
-
-  </glossary>
-</book>
-
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl
deleted file mode 100644
index 856ac20..0000000
--- a/Documentation/DocBook/kgdb.tmpl
+++ /dev/null
@@ -1,918 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="kgdbOnLinux">
- <bookinfo>
-  <title>Using kgdb, kdb and the kernel debugger internals</title>
-
-  <authorgroup>
-   <author>
-    <firstname>Jason</firstname>
-    <surname>Wessel</surname>
-    <affiliation>
-     <address>
-      <email>jason.wessel@windriver.com</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-  <copyright>
-   <year>2008,2010</year>
-   <holder>Wind River Systems, Inc.</holder>
-  </copyright>
-  <copyright>
-   <year>2004-2005</year>
-   <holder>MontaVista Software, Inc.</holder>
-  </copyright>
-  <copyright>
-   <year>2004</year>
-   <holder>Amit S. Kale</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-   This file is licensed under the terms of the GNU General Public License
-   version 2. This program is licensed "as is" without any warranty of any
-   kind, whether express or implied.
-   </para>
-
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-  <chapter id="Introduction">
-    <title>Introduction</title>
-    <para>
-    The kernel has two different debugger front ends (kdb and kgdb)
-    which interface to the debug core.  It is possible to use either
-    of the debugger front ends and dynamically transition between them
-    if you configure the kernel properly at compile and runtime.
-    </para>
-    <para>
-    Kdb is simplistic shell-style interface which you can use on a
-    system console with a keyboard or serial console.  You can use it
-    to inspect memory, registers, process lists, dmesg, and even set
-    breakpoints to stop in a certain location.  Kdb is not a source
-    level debugger, although you can set breakpoints and execute some
-    basic kernel run control.  Kdb is mainly aimed at doing some
-    analysis to aid in development or diagnosing kernel problems.  You
-    can access some symbols by name in kernel built-ins or in kernel
-    modules if the code was built
-    with <symbol>CONFIG_KALLSYMS</symbol>.
-    </para>
-    <para>
-    Kgdb is intended to be used as a source level debugger for the
-    Linux kernel. It is used along with gdb to debug a Linux kernel.
-    The expectation is that gdb can be used to "break in" to the
-    kernel to inspect memory, variables and look through call stack
-    information similar to the way an application developer would use
-    gdb to debug an application.  It is possible to place breakpoints
-    in kernel code and perform some limited execution stepping.
-    </para>
-    <para>
-    Two machines are required for using kgdb. One of these machines is
-    a development machine and the other is the target machine.  The
-    kernel to be debugged runs on the target machine. The development
-    machine runs an instance of gdb against the vmlinux file which
-    contains the symbols (not a boot image such as bzImage, zImage,
-    uImage...).  In gdb the developer specifies the connection
-    parameters and connects to kgdb.  The type of connection a
-    developer makes with gdb depends on the availability of kgdb I/O
-    modules compiled as built-ins or loadable kernel modules in the test
-    machine's kernel.
-    </para>
-  </chapter>
-  <chapter id="CompilingAKernel">
-  <title>Compiling a kernel</title>
-  <para>
-  <itemizedlist>
-  <listitem><para>In order to enable compilation of kdb, you must first enable kgdb.</para></listitem>
-  <listitem><para>The kgdb test compile options are described in the kgdb test suite chapter.</para></listitem>
-  </itemizedlist>
-  </para>
-  <sect1 id="CompileKGDB">
-    <title>Kernel config options for kgdb</title>
-    <para>
-    To enable <symbol>CONFIG_KGDB</symbol> you should look under
-    "Kernel hacking" / "Kernel debugging" and select "KGDB: kernel debugger".
-    </para>
-    <para>
-    While it is not a hard requirement that you have symbols in your
-    vmlinux file, gdb tends not to be very useful without the symbolic
-    data, so you will want to turn
-    on <symbol>CONFIG_DEBUG_INFO</symbol> which is called "Compile the
-    kernel with debug info" in the config menu.
-    </para>
-    <para>
-    It is advised, but not required, that you turn on the
-    <symbol>CONFIG_FRAME_POINTER</symbol> kernel option which is called "Compile the
-    kernel with frame pointers" in the config menu.  This option
-    inserts code to into the compiled executable which saves the frame
-    information in registers or on the stack at different points which
-    allows a debugger such as gdb to more accurately construct
-    stack back traces while debugging the kernel.
-    </para>
-    <para>
-    If the architecture that you are using supports the kernel option
-    CONFIG_STRICT_KERNEL_RWX, you should consider turning it off.  This
-    option will prevent the use of software breakpoints because it
-    marks certain regions of the kernel's memory space as read-only.
-    If kgdb supports it for the architecture you are using, you can
-    use hardware breakpoints if you desire to run with the
-    CONFIG_STRICT_KERNEL_RWX option turned on, else you need to turn off
-    this option.
-    </para>
-    <para>
-    Next you should choose one of more I/O drivers to interconnect
-    debugging host and debugged target.  Early boot debugging requires
-    a KGDB I/O driver that supports early debugging and the driver
-    must be built into the kernel directly. Kgdb I/O driver
-    configuration takes place via kernel or module parameters which
-    you can learn more about in the in the section that describes the
-    parameter "kgdboc".
-    </para>
-    <para>Here is an example set of .config symbols to enable or
-    disable for kgdb:
-    <itemizedlist>
-    <listitem><para># CONFIG_STRICT_KERNEL_RWX is not set</para></listitem>
-    <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
-    <listitem><para>CONFIG_KGDB=y</para></listitem>
-    <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
-    </itemizedlist>
-    </para>
-  </sect1>
-  <sect1 id="CompileKDB">
-    <title>Kernel config options for kdb</title>
-    <para>Kdb is quite a bit more complex than the simple gdbstub
-    sitting on top of the kernel's debug core.  Kdb must implement a
-    shell, and also adds some helper functions in other parts of the
-    kernel, responsible for printing out interesting data such as what
-    you would see if you ran "lsmod", or "ps".  In order to build kdb
-    into the kernel you follow the same steps as you would for kgdb.
-    </para>
-    <para>The main config option for kdb
-    is <symbol>CONFIG_KGDB_KDB</symbol> which is called "KGDB_KDB:
-    include kdb frontend for kgdb" in the config menu.  In theory you
-    would have already also selected an I/O driver such as the
-    CONFIG_KGDB_SERIAL_CONSOLE interface if you plan on using kdb on a
-    serial port, when you were configuring kgdb.
-    </para>
-    <para>If you want to use a PS/2-style keyboard with kdb, you would
-    select CONFIG_KDB_KEYBOARD which is called "KGDB_KDB: keyboard as
-    input device" in the config menu.  The CONFIG_KDB_KEYBOARD option
-    is not used for anything in the gdb interface to kgdb.  The
-    CONFIG_KDB_KEYBOARD option only works with kdb.
-    </para>
-    <para>Here is an example set of .config symbols to enable/disable kdb:
-    <itemizedlist>
-    <listitem><para># CONFIG_STRICT_KERNEL_RWX is not set</para></listitem>
-    <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
-    <listitem><para>CONFIG_KGDB=y</para></listitem>
-    <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
-    <listitem><para>CONFIG_KGDB_KDB=y</para></listitem>
-    <listitem><para>CONFIG_KDB_KEYBOARD=y</para></listitem>
-    </itemizedlist>
-    </para>
-  </sect1>
-  </chapter>
-  <chapter id="kgdbKernelArgs">
-  <title>Kernel Debugger Boot Arguments</title>
-  <para>This section describes the various runtime kernel
-  parameters that affect the configuration of the kernel debugger.
-  The following chapter covers using kdb and kgdb as well as
-  providing some examples of the configuration parameters.</para>
-   <sect1 id="kgdboc">
-   <title>Kernel parameter: kgdboc</title>
-   <para>The kgdboc driver was originally an abbreviation meant to
-   stand for "kgdb over console".  Today it is the primary mechanism
-   to configure how to communicate from gdb to kgdb as well as the
-   devices you want to use to interact with the kdb shell.
-   </para>
-   <para>For kgdb/gdb, kgdboc is designed to work with a single serial
-   port. It is intended to cover the circumstance where you want to
-   use a serial console as your primary console as well as using it to
-   perform kernel debugging.  It is also possible to use kgdb on a
-   serial port which is not designated as a system console.  Kgdboc
-   may be configured as a kernel built-in or a kernel loadable module.
-   You can only make use of <constant>kgdbwait</constant> and early
-   debugging if you build kgdboc into the kernel as a built-in.
-   </para>
-   <para>Optionally you can elect to activate kms (Kernel Mode
-   Setting) integration.  When you use kms with kgdboc and you have a
-   video driver that has atomic mode setting hooks, it is possible to
-   enter the debugger on the graphics console.  When the kernel
-   execution is resumed, the previous graphics mode will be restored.
-   This integration can serve as a useful tool to aid in diagnosing
-   crashes or doing analysis of memory with kdb while allowing the
-   full graphics console applications to run.
-   </para>
-   <sect2 id="kgdbocArgs">
-   <title>kgdboc arguments</title>
-   <para>Usage: <constant>kgdboc=[kms][[,]kbd][[,]serial_device][,baud]</constant></para>
-   <para>The order listed above must be observed if you use any of the
-   optional configurations together.
-   </para>
-   <para>Abbreviations:
-   <itemizedlist>
-   <listitem><para>kms = Kernel Mode Setting</para></listitem>
-   <listitem><para>kbd = Keyboard</para></listitem>
-   </itemizedlist>
-   </para>
-   <para>You can configure kgdboc to use the keyboard, and/or a serial
-   device depending on if you are using kdb and/or kgdb, in one of the
-   following scenarios.  The order listed above must be observed if
-   you use any of the optional configurations together.  Using kms +
-   only gdb is generally not a useful combination.</para>
-   <sect3 id="kgdbocArgs1">
-   <title>Using loadable module or built-in</title>
-   <para>
-   <orderedlist>
-   <listitem><para>As a kernel built-in:</para>
-   <para>Use the kernel boot argument: <constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para></listitem>
-   <listitem>
-   <para>As a kernel loadable module:</para>
-   <para>Use the command: <constant>modprobe kgdboc kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
-   <para>Here are two examples of how you might format the kgdboc
-   string. The first is for an x86 target using the first serial port.
-   The second example is for the ARM Versatile AB using the second
-   serial port.
-   <orderedlist>
-   <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
-   <listitem><para><constant>kgdboc=ttyAMA1,115200</constant></para></listitem>
-   </orderedlist>
-   </para>
-   </listitem>
-   </orderedlist></para>
-   </sect3>
-   <sect3 id="kgdbocArgs2">
-   <title>Configure kgdboc at runtime with sysfs</title>
-   <para>At run time you can enable or disable kgdboc by echoing a
-   parameters into the sysfs.  Here are two examples:</para>
-   <orderedlist>
-   <listitem><para>Enable kgdboc on ttyS0</para>
-   <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
-   <listitem><para>Disable kgdboc</para>
-   <para><constant>echo "" &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
-   </orderedlist>
-   <para>NOTE: You do not need to specify the baud if you are
-   configuring the console on tty which is already configured or
-   open.</para>
-   </sect3>
-   <sect3 id="kgdbocArgs3">
-   <title>More examples</title>
-   <para>You can configure kgdboc to use the keyboard, and/or a serial device
-   depending on if you are using kdb and/or kgdb, in one of the
-   following scenarios.
-   <orderedlist>
-   <listitem><para>kdb and kgdb over only a serial port</para>
-   <para><constant>kgdboc=&lt;serial_device&gt;[,baud]</constant></para>
-   <para>Example: <constant>kgdboc=ttyS0,115200</constant></para>
-   </listitem>
-   <listitem><para>kdb and kgdb with keyboard and a serial port</para>
-   <para><constant>kgdboc=kbd,&lt;serial_device&gt;[,baud]</constant></para>
-   <para>Example: <constant>kgdboc=kbd,ttyS0,115200</constant></para>
-   </listitem>
-   <listitem><para>kdb with a keyboard</para>
-   <para><constant>kgdboc=kbd</constant></para>
-   </listitem>
-   <listitem><para>kdb with kernel mode setting</para>
-   <para><constant>kgdboc=kms,kbd</constant></para>
-   </listitem>
-   <listitem><para>kdb with kernel mode setting and kgdb over a serial port</para>
-   <para><constant>kgdboc=kms,kbd,ttyS0,115200</constant></para>
-   </listitem>
-   </orderedlist>
-   </para>
-   <para>NOTE: Kgdboc does not support interrupting the target via the
-   gdb remote protocol.  You must manually send a sysrq-g unless you
-   have a proxy that splits console output to a terminal program.
-   A console proxy has a separate TCP port for the debugger and a separate
-   TCP port for the "human" console.  The proxy can take care of sending
-   the sysrq-g for you.
-   </para>
-   <para>When using kgdboc with no debugger proxy, you can end up
-    connecting the debugger at one of two entry points.  If an
-    exception occurs after you have loaded kgdboc, a message should
-    print on the console stating it is waiting for the debugger.  In
-    this case you disconnect your terminal program and then connect the
-    debugger in its place.  If you want to interrupt the target system
-    and forcibly enter a debug session you have to issue a Sysrq
-    sequence and then type the letter <constant>g</constant>.  Then
-    you disconnect the terminal session and connect gdb.  Your options
-    if you don't like this are to hack gdb to send the sysrq-g for you
-    as well as on the initial connect, or to use a debugger proxy that
-    allows an unmodified gdb to do the debugging.
-   </para>
-   </sect3>
-   </sect2>
-   </sect1>
-   <sect1 id="kgdbwait">
-   <title>Kernel parameter: kgdbwait</title>
-   <para>
-   The Kernel command line option <constant>kgdbwait</constant> makes
-   kgdb wait for a debugger connection during booting of a kernel.  You
-   can only use this option if you compiled a kgdb I/O driver into the
-   kernel and you specified the I/O driver configuration as a kernel
-   command line option.  The kgdbwait parameter should always follow the
-   configuration parameter for the kgdb I/O driver in the kernel
-   command line else the I/O driver will not be configured prior to
-   asking the kernel to use it to wait.
-   </para>
-   <para>
-   The kernel will stop and wait as early as the I/O driver and
-   architecture allows when you use this option.  If you build the
-   kgdb I/O driver as a loadable kernel module kgdbwait will not do
-   anything.
-   </para>
-   </sect1>
-   <sect1 id="kgdbcon">
-   <title>Kernel parameter: kgdbcon</title>
-   <para> The kgdbcon feature allows you to see printk() messages
-   inside gdb while gdb is connected to the kernel.  Kdb does not make
-    use of the kgdbcon feature.
-   </para>
-   <para>Kgdb supports using the gdb serial protocol to send console
-   messages to the debugger when the debugger is connected and running.
-   There are two ways to activate this feature.
-   <orderedlist>
-   <listitem><para>Activate with the kernel command line option:</para>
-   <para><constant>kgdbcon</constant></para>
-   </listitem>
-   <listitem><para>Use sysfs before configuring an I/O driver</para>
-   <para>
-   <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
-   </para>
-   <para>
-   NOTE: If you do this after you configure the kgdb I/O driver, the
-   setting will not take effect until the next point the I/O is
-   reconfigured.
-   </para>
-   </listitem>
-   </orderedlist>
-  </para>
-   <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
-   active system console.  An example of incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
-   </para>
-   <para>It is possible to use this option with kgdboc on a tty that is not a system console.
-   </para>
-  </sect1>
-   <sect1 id="kgdbreboot">
-   <title>Run time parameter: kgdbreboot</title>
-   <para> The kgdbreboot feature allows you to change how the debugger
-   deals with the reboot notification.  You have 3 choices for the
-   behavior.  The default behavior is always set to 0.</para>
-   <orderedlist>
-   <listitem><para>echo -1 > /sys/module/debug_core/parameters/kgdbreboot</para>
-   <para>Ignore the reboot notification entirely.</para>
-   </listitem>
-   <listitem><para>echo 0 > /sys/module/debug_core/parameters/kgdbreboot</para>
-   <para>Send the detach message to any attached debugger client.</para>
-   </listitem>
-   <listitem><para>echo 1 > /sys/module/debug_core/parameters/kgdbreboot</para>
-   <para>Enter the debugger on reboot notify.</para>
-   </listitem>
-   </orderedlist>
-  </sect1>
-  </chapter>
-  <chapter id="usingKDB">
-  <title>Using kdb</title>
-  <para>
-  </para>
-  <sect1 id="quickKDBserial">
-  <title>Quick start for kdb on a serial port</title>
-  <para>This is a quick example of how to use kdb.</para>
-  <para><orderedlist>
-  <listitem><para>Configure kgdboc at boot using kernel parameters:
-  <itemizedlist>
-  <listitem><para><constant>console=ttyS0,115200 kgdboc=ttyS0,115200</constant></para></listitem>
-  </itemizedlist></para>
-  <para>OR</para>
-  <para>Configure kgdboc after the kernel has booted; assuming you are using a serial port console:
-  <itemizedlist>
-  <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
-  </itemizedlist>
-  </para>
-  </listitem>
-  <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault.  There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
-  <itemizedlist>
-  <listitem><para>When logged in as root or with a super user session you can run:</para>
-   <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
-  <listitem><para>Example using minicom 2.2</para>
-  <para>Press: <constant>Control-a</constant></para>
-  <para>Press: <constant>f</constant></para>
-  <para>Press: <constant>g</constant></para>
-  </listitem>
-  <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
-  <para>Press: <constant>Control-]</constant></para>
-  <para>Type in:<constant>send break</constant></para>
-  <para>Press: <constant>Enter</constant></para>
-  <para>Press: <constant>g</constant></para>
-  </listitem>
-  </itemizedlist>
-  </listitem>
-  <listitem><para>From the kdb prompt you can run the "help" command to see a complete list of the commands that are available.</para>
-  <para>Some useful commands in kdb include:
-  <itemizedlist>
-  <listitem><para>lsmod  -- Shows where kernel modules are loaded</para></listitem>
-  <listitem><para>ps -- Displays only the active processes</para></listitem>
-  <listitem><para>ps A -- Shows all the processes</para></listitem>
-  <listitem><para>summary -- Shows kernel version info and memory usage</para></listitem>
-  <listitem><para>bt -- Get a backtrace of the current process using dump_stack()</para></listitem>
-  <listitem><para>dmesg -- View the kernel syslog buffer</para></listitem>
-  <listitem><para>go -- Continue the system</para></listitem>
-  </itemizedlist>
-  </para>
-  </listitem>
-  <listitem>
-  <para>When you are done using kdb you need to consider rebooting the
-  system or using the "go" command to resuming normal kernel
-  execution.  If you have paused the kernel for a lengthy period of
-  time, applications that rely on timely networking or anything to do
-  with real wall clock time could be adversely affected, so you
-  should take this into consideration when using the kernel
-  debugger.</para>
-  </listitem>
-  </orderedlist></para>
-  </sect1>
-  <sect1 id="quickKDBkeyboard">
-  <title>Quick start for kdb using a keyboard connected console</title>
-  <para>This is a quick example of how to use kdb with a keyboard.</para>
-  <para><orderedlist>
-  <listitem><para>Configure kgdboc at boot using kernel parameters:
-  <itemizedlist>
-  <listitem><para><constant>kgdboc=kbd</constant></para></listitem>
-  </itemizedlist></para>
-  <para>OR</para>
-  <para>Configure kgdboc after the kernel has booted:
-  <itemizedlist>
-  <listitem><para><constant>echo kbd &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
-  </itemizedlist>
-  </para>
-  </listitem>
-  <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault.  There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
-  <itemizedlist>
-  <listitem><para>When logged in as root or with a super user session you can run:</para>
-   <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
-  <listitem><para>Example using a laptop keyboard</para>
-  <para>Press and hold down: <constant>Alt</constant></para>
-  <para>Press and hold down: <constant>Fn</constant></para>
-  <para>Press and release the key with the label: <constant>SysRq</constant></para>
-  <para>Release: <constant>Fn</constant></para>
-  <para>Press and release: <constant>g</constant></para>
-  <para>Release: <constant>Alt</constant></para>
-  </listitem>
-  <listitem><para>Example using a PS/2 101-key keyboard</para>
-  <para>Press and hold down: <constant>Alt</constant></para>
-  <para>Press and release the key with the label: <constant>SysRq</constant></para>
-  <para>Press and release: <constant>g</constant></para>
-  <para>Release: <constant>Alt</constant></para>
-  </listitem>
-  </itemizedlist>
-  </listitem>
-  <listitem>
-  <para>Now type in a kdb command such as "help", "dmesg", "bt" or "go" to continue kernel execution.</para>
-  </listitem>
-  </orderedlist></para>
-  </sect1>
-  </chapter>
-  <chapter id="EnableKGDB">
-   <title>Using kgdb / gdb</title>
-   <para>In order to use kgdb you must activate it by passing
-   configuration information to one of the kgdb I/O drivers.  If you
-   do not pass any configuration information kgdb will not do anything
-   at all.  Kgdb will only actively hook up to the kernel trap hooks
-   if a kgdb I/O driver is loaded and configured.  If you unconfigure
-   a kgdb I/O driver, kgdb will unregister all the kernel hook points.
-   </para>
-   <para> All kgdb I/O drivers can be reconfigured at run time, if
-   <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
-   are enabled, by echo'ing a new config string to
-   <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
-   The driver can be unconfigured by passing an empty string.  You cannot
-   change the configuration while the debugger is attached.  Make sure
-   to detach the debugger with the <constant>detach</constant> command
-   prior to trying to unconfigure a kgdb I/O driver.
-   </para>
-  <sect1 id="ConnectingGDB">
-  <title>Connecting with gdb to a serial port</title>
-  <orderedlist>
-  <listitem><para>Configure kgdboc</para>
-   <para>Configure kgdboc at boot using kernel parameters:
-   <itemizedlist>
-    <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
-   </itemizedlist></para>
-   <para>OR</para>
-   <para>Configure kgdboc after the kernel has booted:
-   <itemizedlist>
-    <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
-   </itemizedlist></para>
-  </listitem>
-  <listitem>
-  <para>Stop kernel execution (break into the debugger)</para>
-  <para>In order to connect to gdb via kgdboc, the kernel must
-  first be stopped.  There are several ways to stop the kernel which
-  include using kgdbwait as a boot argument, via a sysrq-g, or running
-  the kernel until it takes an exception where it waits for the
-  debugger to attach.
-  <itemizedlist>
-  <listitem><para>When logged in as root or with a super user session you can run:</para>
-   <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
-  <listitem><para>Example using minicom 2.2</para>
-  <para>Press: <constant>Control-a</constant></para>
-  <para>Press: <constant>f</constant></para>
-  <para>Press: <constant>g</constant></para>
-  </listitem>
-  <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
-  <para>Press: <constant>Control-]</constant></para>
-  <para>Type in:<constant>send break</constant></para>
-  <para>Press: <constant>Enter</constant></para>
-  <para>Press: <constant>g</constant></para>
-  </listitem>
-  </itemizedlist>
-  </para>
-  </listitem>
-  <listitem>
-    <para>Connect from gdb</para>
-    <para>
-    Example (using a directly connected port):
-    </para>
-    <programlisting>
-    % gdb ./vmlinux
-    (gdb) set remotebaud 115200
-    (gdb) target remote /dev/ttyS0
-    </programlisting>
-    <para>
-    Example (kgdb to a terminal server on TCP port 2012):
-    </para>
-    <programlisting>
-    % gdb ./vmlinux
-    (gdb) target remote 192.168.2.2:2012
-    </programlisting>
-    <para>
-    Once connected, you can debug a kernel the way you would debug an
-    application program.
-    </para>
-    <para>
-    If you are having problems connecting or something is going
-    seriously wrong while debugging, it will most often be the case
-    that you want to enable gdb to be verbose about its target
-    communications.  You do this prior to issuing the <constant>target
-    remote</constant> command by typing in: <constant>set debug remote 1</constant>
-    </para>
-  </listitem>
-  </orderedlist>
-  <para>Remember if you continue in gdb, and need to "break in" again,
-  you need to issue an other sysrq-g.  It is easy to create a simple
-  entry point by putting a breakpoint at <constant>sys_sync</constant>
-  and then you can run "sync" from a shell or script to break into the
-  debugger.</para>
-  </sect1>
-  </chapter>
-  <chapter id="switchKdbKgdb">
-  <title>kgdb and kdb interoperability</title>
-  <para>It is possible to transition between kdb and kgdb dynamically.
-  The debug core will remember which you used the last time and
-  automatically start in the same mode.</para>
-  <sect1>
-  <title>Switching between kdb and kgdb</title>
-  <sect2>
-  <title>Switching from kgdb to kdb</title>
-  <para>
-  There are two ways to switch from kgdb to kdb: you can use gdb to
-  issue a maintenance packet, or you can blindly type the command $3#33.
-  Whenever the kernel debugger stops in kgdb mode it will print the
-  message <constant>KGDB or $3#33 for KDB</constant>.  It is important
-  to note that you have to type the sequence correctly in one pass.
-  You cannot type a backspace or delete because kgdb will interpret
-  that as part of the debug stream.
-  <orderedlist>
-  <listitem><para>Change from kgdb to kdb by blindly typing:</para>
-  <para><constant>$3#33</constant></para></listitem>
-  <listitem><para>Change from kgdb to kdb with gdb</para>
-  <para><constant>maintenance packet 3</constant></para>
-  <para>NOTE: Now you must kill gdb. Typically you press control-z and
-  issue the command: kill -9 %</para></listitem>
-  </orderedlist>
-  </para>
-  </sect2>
-  <sect2>
-  <title>Change from kdb to kgdb</title>
-  <para>There are two ways you can change from kdb to kgdb.  You can
-  manually enter kgdb mode by issuing the kgdb command from the kdb
-  shell prompt, or you can connect gdb while the kdb shell prompt is
-  active.  The kdb shell looks for the typical first commands that gdb
-  would issue with the gdb remote protocol and if it sees one of those
-  commands it automatically changes into kgdb mode.</para>
-  <orderedlist>
-  <listitem><para>From kdb issue the command:</para>
-  <para><constant>kgdb</constant></para>
-  <para>Now disconnect your terminal program and connect gdb in its place</para></listitem>
-  <listitem><para>At the kdb prompt, disconnect the terminal program and connect gdb in its place.</para></listitem>
-  </orderedlist>
-  </sect2>
-  </sect1>
-  <sect1>
-  <title>Running kdb commands from gdb</title>
-  <para>It is possible to run a limited set of kdb commands from gdb,
-  using the gdb monitor command.  You don't want to execute any of the
-  run control or breakpoint operations, because it can disrupt the
-  state of the kernel debugger.  You should be using gdb for
-  breakpoints and run control operations if you have gdb connected.
-  The more useful commands to run are things like lsmod, dmesg, ps or
-  possibly some of the memory information commands.  To see all the kdb
-  commands you can run <constant>monitor help</constant>.</para>
-  <para>Example:
-  <informalexample><programlisting>
-(gdb) monitor ps
-1 idle process (state I) and
-27 sleeping system daemon (state M) processes suppressed,
-use 'ps A' to see all.
-Task Addr       Pid   Parent [*] cpu State Thread     Command
-
-0xc78291d0        1        0  0    0   S  0xc7829404  init
-0xc7954150      942        1  0    0   S  0xc7954384  dropbear
-0xc78789c0      944        1  0    0   S  0xc7878bf4  sh
-(gdb)
-  </programlisting></informalexample>
-  </para>
-  </sect1>
-  </chapter>
-  <chapter id="KGDBTestSuite">
-    <title>kgdb Test Suite</title>
-    <para>
-    When kgdb is enabled in the kernel config you can also elect to
-    enable the config parameter KGDB_TESTS.  Turning this on will
-    enable a special kgdb I/O module which is designed to test the
-    kgdb internal functions.
-    </para>
-    <para>
-    The kgdb tests are mainly intended for developers to test the kgdb
-    internals as well as a tool for developing a new kgdb architecture
-    specific implementation.  These tests are not really for end users
-    of the Linux kernel.  The primary source of documentation would be
-    to look in the drivers/misc/kgdbts.c file.
-    </para>
-    <para>
-    The kgdb test suite can also be configured at compile time to run
-    the core set of tests by setting the kernel config parameter
-    KGDB_TESTS_ON_BOOT.  This particular option is aimed at automated
-    regression testing and does not require modifying the kernel boot
-    config arguments.  If this is turned on, the kgdb test suite can
-    be disabled by specifying "kgdbts=" as a kernel boot argument.
-    </para>
-  </chapter>
-  <chapter id="CommonBackEndReq">
-  <title>Kernel Debugger Internals</title>
-  <sect1 id="kgdbArchitecture">
-    <title>Architecture Specifics</title>
-      <para>
-      The kernel debugger is organized into a number of components:
-      <orderedlist>
-      <listitem><para>The debug core</para>
-      <para>
-      The debug core is found in kernel/debugger/debug_core.c.  It contains:
-      <itemizedlist>
-      <listitem><para>A generic OS exception handler which includes
-      sync'ing the processors into a stopped state on an multi-CPU
-      system.</para></listitem>
-      <listitem><para>The API to talk to the kgdb I/O drivers</para></listitem>
-      <listitem><para>The API to make calls to the arch-specific kgdb implementation</para></listitem>
-      <listitem><para>The logic to perform safe memory reads and writes to memory while using the debugger</para></listitem>
-      <listitem><para>A full implementation for software breakpoints unless overridden by the arch</para></listitem>
-      <listitem><para>The API to invoke either the kdb or kgdb frontend to the debug core.</para></listitem>
-      <listitem><para>The structures and callback API for atomic kernel mode setting.</para>
-      <para>NOTE: kgdboc is where the kms callbacks are invoked.</para></listitem>
-      </itemizedlist>
-      </para>
-      </listitem>
-      <listitem><para>kgdb arch-specific implementation</para>
-      <para>
-      This implementation is generally found in arch/*/kernel/kgdb.c.
-      As an example, arch/x86/kernel/kgdb.c contains the specifics to
-      implement HW breakpoint as well as the initialization to
-      dynamically register and unregister for the trap handlers on
-      this architecture.  The arch-specific portion implements:
-      <itemizedlist>
-      <listitem><para>contains an arch-specific trap catcher which
-      invokes kgdb_handle_exception() to start kgdb about doing its
-      work</para></listitem>
-      <listitem><para>translation to and from gdb specific packet format to pt_regs</para></listitem>
-      <listitem><para>Registration and unregistration of architecture specific trap hooks</para></listitem>
-      <listitem><para>Any special exception handling and cleanup</para></listitem>
-      <listitem><para>NMI exception handling and cleanup</para></listitem>
-      <listitem><para>(optional) HW breakpoints</para></listitem>
-      </itemizedlist>
-      </para>
-      </listitem>
-      <listitem><para>gdbstub frontend (aka kgdb)</para>
-      <para>The gdbstub is located in kernel/debug/gdbstub.c. It contains:</para>
-      <itemizedlist>
-        <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
-      </itemizedlist>
-      </listitem>
-      <listitem><para>kdb frontend</para>
-      <para>The kdb debugger shell is broken down into a number of
-      components.  The kdb core is located in kernel/debug/kdb.  There
-      are a number of helper functions in some of the other kernel
-      components to make it possible for kdb to examine and report
-      information about the kernel without taking locks that could
-      cause a kernel deadlock.  The kdb core contains implements the following functionality.</para>
-      <itemizedlist>
-        <listitem><para>A simple shell</para></listitem>
-        <listitem><para>The kdb core command set</para></listitem>
-        <listitem><para>A registration API to register additional kdb shell commands.</para>
-	<itemizedlist>
-        <listitem><para>A good example of a self-contained kdb module
-        is the "ftdump" command for dumping the ftrace buffer.  See:
-        kernel/trace/trace_kdb.c</para></listitem>
-        <listitem><para>For an example of how to dynamically register
-        a new kdb command you can build the kdb_hello.ko kernel module
-        from samples/kdb/kdb_hello.c.  To build this example you can
-        set CONFIG_SAMPLES=y and CONFIG_SAMPLE_KDB=m in your kernel
-        config.  Later run "modprobe kdb_hello" and the next time you
-        enter the kdb shell, you can run the "hello"
-        command.</para></listitem>
-	</itemizedlist></listitem>
-        <listitem><para>The implementation for kdb_printf() which
-        emits messages directly to I/O drivers, bypassing the kernel
-        log.</para></listitem>
-        <listitem><para>SW / HW breakpoint management for the kdb shell</para></listitem>
-      </itemizedlist>
-      </listitem>
-      <listitem><para>kgdb I/O driver</para>
-      <para>
-      Each kgdb I/O driver has to provide an implementation for the following:
-      <itemizedlist>
-      <listitem><para>configuration via built-in or module</para></listitem>
-      <listitem><para>dynamic configuration and kgdb hook registration calls</para></listitem>
-      <listitem><para>read and write character interface</para></listitem>
-      <listitem><para>A cleanup handler for unconfiguring from the kgdb core</para></listitem>
-      <listitem><para>(optional) Early debug methodology</para></listitem>
-      </itemizedlist>
-      Any given kgdb I/O driver has to operate very closely with the
-      hardware and must do it in such a way that does not enable
-      interrupts or change other parts of the system context without
-      completely restoring them. The kgdb core will repeatedly "poll"
-      a kgdb I/O driver for characters when it needs input.  The I/O
-      driver is expected to return immediately if there is no data
-      available.  Doing so allows for the future possibility to touch
-      watchdog hardware in such a way as to have a target system not
-      reset when these are enabled.
-      </para>
-      </listitem>
-      </orderedlist>
-      </para>
-      <para>
-      If you are intent on adding kgdb architecture specific support
-      for a new architecture, the architecture should define
-      <constant>HAVE_ARCH_KGDB</constant> in the architecture specific
-      Kconfig file.  This will enable kgdb for the architecture, and
-      at that point you must create an architecture specific kgdb
-      implementation.
-      </para>
-      <para>
-      There are a few flags which must be set on every architecture in
-      their &lt;asm/kgdb.h&gt; file.  These are:
-      <itemizedlist>
-        <listitem>
-          <para>
-          NUMREGBYTES: The size in bytes of all of the registers, so
-          that we can ensure they will all fit into a packet.
-          </para>
-        </listitem>
-        <listitem>
-          <para>
-          BUFMAX: The size in bytes of the buffer GDB will read into.
-          This must be larger than NUMREGBYTES.
-          </para>
-        </listitem>
-        <listitem>
-          <para>
-          CACHE_FLUSH_IS_SAFE: Set to 1 if it is always safe to call
-          flush_cache_range or flush_icache_range.  On some architectures,
-          these functions may not be safe to call on SMP since we keep other
-          CPUs in a holding pattern.
-          </para>
-        </listitem>
-      </itemizedlist>
-      </para>
-      <para>
-      There are also the following functions for the common backend,
-      found in kernel/kgdb.c, that must be supplied by the
-      architecture-specific backend unless marked as (optional), in
-      which case a default function maybe used if the architecture
-      does not need to provide a specific implementation.
-      </para>
-!Iinclude/linux/kgdb.h
-  </sect1>
-  <sect1 id="kgdbocDesign">
-  <title>kgdboc internals</title>
-  <sect2>
-  <title>kgdboc and uarts</title>
-  <para>
-  The kgdboc driver is actually a very thin driver that relies on the
-  underlying low level to the hardware driver having "polling hooks"
-  to which the tty driver is attached.  In the initial
-  implementation of kgdboc the serial_core was changed to expose a
-  low level UART hook for doing polled mode reading and writing of a
-  single character while in an atomic context.  When kgdb makes an I/O
-  request to the debugger, kgdboc invokes a callback in the serial
-  core which in turn uses the callback in the UART driver.</para>
-  <para>
-  When using kgdboc with a UART, the UART driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
-#ifdef CONFIG_CONSOLE_POLL
-	.poll_get_char = serial8250_get_poll_char,
-	.poll_put_char = serial8250_put_poll_char,
-#endif
-  </programlisting>
-  Any implementation specifics around creating a polling driver use the
-  <constant>#ifdef CONFIG_CONSOLE_POLL</constant>, as shown above.
-  Keep in mind that polling hooks have to be implemented in such a way
-  that they can be called from an atomic context and have to restore
-  the state of the UART chip on return such that the system can return
-  to normal when the debugger detaches.  You need to be very careful
-  with any kind of lock you consider, because failing here is most likely
-  going to mean pressing the reset button.
-  </para>
-  </sect2>
-  <sect2 id="kgdbocKbd">
-  <title>kgdboc and keyboards</title>
-  <para>The kgdboc driver contains logic to configure communications
-  with an attached keyboard.  The keyboard infrastructure is only
-  compiled into the kernel when CONFIG_KDB_KEYBOARD=y is set in the
-  kernel configuration.</para>
-  <para>The core polled keyboard driver driver for PS/2 type keyboards
-  is in drivers/char/kdb_keyboard.c.  This driver is hooked into the
-  debug core when kgdboc populates the callback in the array
-  called <constant>kdb_poll_funcs[]</constant>.  The
-  kdb_get_kbd_char() is the top-level function which polls hardware
-  for single character input.
-  </para>
-  </sect2>
-  <sect2 id="kgdbocKms">
-  <title>kgdboc and kms</title>
-  <para>The kgdboc driver contains logic to request the graphics
-  display to switch to a text context when you are using
-  "kgdboc=kms,kbd", provided that you have a video driver which has a
-  frame buffer console and atomic kernel mode setting support.</para>
-  <para>
-  Every time the kernel
-  debugger is entered it calls kgdboc_pre_exp_handler() which in turn
-  calls con_debug_enter() in the virtual console layer.  On resuming kernel
-  execution, the kernel debugger calls kgdboc_post_exp_handler() which
-  in turn calls con_debug_leave().</para>
-  <para>Any video driver that wants to be compatible with the kernel
-  debugger and the atomic kms callbacks must implement the
-  mode_set_base_atomic, fb_debug_enter and fb_debug_leave operations.
-  For the fb_debug_enter and fb_debug_leave the option exists to use
-  the generic drm fb helper functions or implement something custom for
-  the hardware.  The following example shows the initialization of the
-  .mode_set_base_atomic operation in
-  drivers/gpu/drm/i915/intel_display.c:
-  <informalexample>
-  <programlisting>
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
-[...]
-        .mode_set_base_atomic = intel_pipe_set_base_atomic,
-[...]
-};
-  </programlisting>
-  </informalexample>
-  </para>
-  <para>Here is an example of how the i915 driver initializes the fb_debug_enter and fb_debug_leave functions to use the generic drm helpers in
-  drivers/gpu/drm/i915/intel_fb.c:
-  <informalexample>
-  <programlisting>
-static struct fb_ops intelfb_ops = {
-[...]
-       .fb_debug_enter = drm_fb_helper_debug_enter,
-       .fb_debug_leave = drm_fb_helper_debug_leave,
-[...]
-};
-  </programlisting>
-  </informalexample>
-  </para>
-  </sect2>
-  </sect1>
-  </chapter>
-  <chapter id="credits">
-     <title>Credits</title>
-	<para>
-		The following people have contributed to this document:
-		<orderedlist>
-			<listitem><para>Amit Kale<email>amitkale@linsyssoft.com</email></para></listitem>
-			<listitem><para>Tom Rini<email>trini@kernel.crashing.org</email></para></listitem>
-		</orderedlist>
-                In March 2008 this document was completely rewritten by:
-		<itemizedlist>
-		<listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
-		</itemizedlist>
-                In Jan 2010 this document was updated to include kdb.
-		<itemizedlist>
-		<listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
-		</itemizedlist>
-	</para>
-  </chapter>
-</book>
-
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
deleted file mode 100644
index 0320910..0000000
--- a/Documentation/DocBook/libata.tmpl
+++ /dev/null
@@ -1,1625 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="libataDevGuide">
- <bookinfo>
-  <title>libATA Developer's Guide</title>
-  
-  <authorgroup>
-   <author>
-    <firstname>Jeff</firstname>
-    <surname>Garzik</surname>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2003-2006</year>
-   <holder>Jeff Garzik</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-   The contents of this file are subject to the Open
-   Software License version 1.1 that can be found at
-   <ulink url="http://fedoraproject.org/wiki/Licensing:OSL1.1">http://fedoraproject.org/wiki/Licensing:OSL1.1</ulink>
-   and is included herein by reference.
-   </para>
-
-   <para>
-   Alternatively, the contents of this file may be used under the terms
-   of the GNU General Public License version 2 (the "GPL") as distributed
-   in the kernel source COPYING file, in which case the provisions of
-   the GPL are applicable instead of the above.  If you wish to allow
-   the use of your version of this file only under the terms of the
-   GPL and not to allow others to use your version of this file under
-   the OSL, indicate your decision by deleting the provisions above and
-   replace them with the notice and other provisions required by the GPL.
-   If you do not delete the provisions above, a recipient may use your
-   version of this file under either the OSL or the GPL.
-   </para>
-
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-  <chapter id="libataIntroduction">
-     <title>Introduction</title>
-  <para>
-  libATA is a library used inside the Linux kernel to support ATA host
-  controllers and devices.  libATA provides an ATA driver API, class
-  transports for ATA and ATAPI devices, and SCSI&lt;-&gt;ATA translation
-  for ATA devices according to the T10 SAT specification.
-  </para>
-  <para>
-  This Guide documents the libATA driver API, library functions, library
-  internals, and a couple sample ATA low-level drivers.
-  </para>
-  </chapter>
-
-  <chapter id="libataDriverApi">
-     <title>libata Driver API</title>
-     <para>
-     struct ata_port_operations is defined for every low-level libata
-     hardware driver, and it controls how the low-level driver
-     interfaces with the ATA and SCSI layers.
-     </para>
-     <para>
-     FIS-based drivers will hook into the system with ->qc_prep() and
-     ->qc_issue() high-level hooks.  Hardware which behaves in a manner
-     similar to PCI IDE hardware may utilize several generic helpers,
-     defining at a bare minimum the bus I/O addresses of the ATA shadow
-     register blocks.
-     </para>
-     <sect1>
-        <title>struct ata_port_operations</title>
-
-	<sect2><title>Disable ATA port</title>
-	<programlisting>
-void (*port_disable) (struct ata_port *);
-	</programlisting>
-
-	<para>
-	Called from ata_bus_probe() error path, as well as when
-	unregistering from the SCSI module (rmmod, hot unplug).
-	This function should do whatever needs to be done to take the
-	port out of use.  In most cases, ata_port_disable() can be used
-	as this hook.
-	</para>
-	<para>
-	Called from ata_bus_probe() on a failed probe.
-	Called from ata_scsi_release().
-	</para>
-
-	</sect2>
-
-	<sect2><title>Post-IDENTIFY device configuration</title>
-	<programlisting>
-void (*dev_config) (struct ata_port *, struct ata_device *);
-	</programlisting>
-
-	<para>
-	Called after IDENTIFY [PACKET] DEVICE is issued to each device
-	found.  Typically used to apply device-specific fixups prior to
-	issue of SET FEATURES - XFER MODE, and prior to operation.
-	</para>
-	<para>
-	This entry may be specified as NULL in ata_port_operations.
-	</para>
-
-	</sect2>
-
-	<sect2><title>Set PIO/DMA mode</title>
-	<programlisting>
-void (*set_piomode) (struct ata_port *, struct ata_device *);
-void (*set_dmamode) (struct ata_port *, struct ata_device *);
-void (*post_set_mode) (struct ata_port *);
-unsigned int (*mode_filter) (struct ata_port *, struct ata_device *, unsigned int);
-	</programlisting>
-
-	<para>
-	Hooks called prior to the issue of SET FEATURES - XFER MODE
-	command.  The optional ->mode_filter() hook is called when libata
-	has built a mask of the possible modes. This is passed to the 
-	->mode_filter() function which should return a mask of valid modes
-	after filtering those unsuitable due to hardware limits. It is not
-	valid to use this interface to add modes.
-	</para>
-	<para>
-	dev->pio_mode and dev->dma_mode are guaranteed to be valid when
-	->set_piomode() and when ->set_dmamode() is called. The timings for
-	any other drive sharing the cable will also be valid at this point.
-	That is the library records the decisions for the modes of each
-	drive on a channel before it attempts to set any of them.
-	</para>
-	<para>
-	->post_set_mode() is
-	called unconditionally, after the SET FEATURES - XFER MODE
-	command completes successfully.
-	</para>
-
-	<para>
-	->set_piomode() is always called (if present), but
-	->set_dma_mode() is only called if DMA is possible.
-	</para>
-
-	</sect2>
-
-	<sect2><title>Taskfile read/write</title>
-	<programlisting>
-void (*sff_tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
-void (*sff_tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
-	</programlisting>
-
-	<para>
-	->tf_load() is called to load the given taskfile into hardware
-	registers / DMA buffers.  ->tf_read() is called to read the
-	hardware registers / DMA buffers, to obtain the current set of
-	taskfile register values.
-	Most drivers for taskfile-based hardware (PIO or MMIO) use
-	ata_sff_tf_load() and ata_sff_tf_read() for these hooks.
-	</para>
-
-	</sect2>
-
-	<sect2><title>PIO data read/write</title>
-	<programlisting>
-void (*sff_data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
-	</programlisting>
-
-	<para>
-All bmdma-style drivers must implement this hook.  This is the low-level
-operation that actually copies the data bytes during a PIO data
-transfer.
-Typically the driver will choose one of ata_sff_data_xfer_noirq(),
-ata_sff_data_xfer(), or ata_sff_data_xfer32().
-	</para>
-
-	</sect2>
-
-	<sect2><title>ATA command execute</title>
-	<programlisting>
-void (*sff_exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
-	</programlisting>
-
-	<para>
-	causes an ATA command, previously loaded with
-	->tf_load(), to be initiated in hardware.
-	Most drivers for taskfile-based hardware use ata_sff_exec_command()
-	for this hook.
-	</para>
-
-	</sect2>
-
-	<sect2><title>Per-cmd ATAPI DMA capabilities filter</title>
-	<programlisting>
-int (*check_atapi_dma) (struct ata_queued_cmd *qc);
-	</programlisting>
-
-	<para>
-Allow low-level driver to filter ATA PACKET commands, returning a status
-indicating whether or not it is OK to use DMA for the supplied PACKET
-command.
-	</para>
-	<para>
-	This hook may be specified as NULL, in which case libata will
-	assume that atapi dma can be supported.
-	</para>
-
-	</sect2>
-
-	<sect2><title>Read specific ATA shadow registers</title>
-	<programlisting>
-u8   (*sff_check_status)(struct ata_port *ap);
-u8   (*sff_check_altstatus)(struct ata_port *ap);
-	</programlisting>
-
-	<para>
-	Reads the Status/AltStatus ATA shadow register from
-	hardware.  On some hardware, reading the Status register has
-	the side effect of clearing the interrupt condition.
-	Most drivers for taskfile-based hardware use
-	ata_sff_check_status() for this hook.
-	</para>
-
-	</sect2>
-
-	<sect2><title>Write specific ATA shadow register</title>
-	<programlisting>
-void (*sff_set_devctl)(struct ata_port *ap, u8 ctl);
-	</programlisting>
-
-	<para>
-	Write the device control ATA shadow register to the hardware.
-	Most drivers don't need to define this.
-	</para>
-
-	</sect2>
-
-	<sect2><title>Select ATA device on bus</title>
-	<programlisting>
-void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
-	</programlisting>
-
-	<para>
-	Issues the low-level hardware command(s) that causes one of N
-	hardware devices to be considered 'selected' (active and
-	available for use) on the ATA bus.  This generally has no
-	meaning on FIS-based devices.
-	</para>
-	<para>
-	Most drivers for taskfile-based hardware use
-	ata_sff_dev_select() for this hook.
-	</para>
-
-	</sect2>
-
-	<sect2><title>Private tuning method</title>
-	<programlisting>
-void (*set_mode) (struct ata_port *ap);
-	</programlisting>
-
-	<para>
-	By default libata performs drive and controller tuning in
-	accordance with the ATA timing rules and also applies blacklists
-	and cable limits. Some controllers need special handling and have
-	custom tuning rules, typically raid controllers that use ATA
-	commands but do not actually do drive timing.
-	</para>
-
-	<warning>
-	<para>
-	This hook should not be used to replace the standard controller
-	tuning logic when a controller has quirks. Replacing the default
-	tuning logic in that case would bypass handling for drive and
-	bridge quirks that may be important to data reliability. If a
-	controller needs to filter the mode selection it should use the
-	mode_filter hook instead.
-	</para>
-	</warning>
-
-	</sect2>
-
-	<sect2><title>Control PCI IDE BMDMA engine</title>
-	<programlisting>
-void (*bmdma_setup) (struct ata_queued_cmd *qc);
-void (*bmdma_start) (struct ata_queued_cmd *qc);
-void (*bmdma_stop) (struct ata_port *ap);
-u8   (*bmdma_status) (struct ata_port *ap);
-	</programlisting>
-
-	<para>
-When setting up an IDE BMDMA transaction, these hooks arm
-(->bmdma_setup), fire (->bmdma_start), and halt (->bmdma_stop)
-the hardware's DMA engine.  ->bmdma_status is used to read the standard
-PCI IDE DMA Status register.
-	</para>
-
-	<para>
-These hooks are typically either no-ops, or simply not implemented, in
-FIS-based drivers.
-	</para>
-	<para>
-Most legacy IDE drivers use ata_bmdma_setup() for the bmdma_setup()
-hook.  ata_bmdma_setup() will write the pointer to the PRD table to
-the IDE PRD Table Address register, enable DMA in the DMA Command
-register, and call exec_command() to begin the transfer.
-	</para>
-	<para>
-Most legacy IDE drivers use ata_bmdma_start() for the bmdma_start()
-hook.  ata_bmdma_start() will write the ATA_DMA_START flag to the DMA
-Command register.
-	</para>
-	<para>
-Many legacy IDE drivers use ata_bmdma_stop() for the bmdma_stop()
-hook.  ata_bmdma_stop() clears the ATA_DMA_START flag in the DMA
-command register.
-	</para>
-	<para>
-Many legacy IDE drivers use ata_bmdma_status() as the bmdma_status() hook.
-	</para>
-
-	</sect2>
-
-	<sect2><title>High-level taskfile hooks</title>
-	<programlisting>
-void (*qc_prep) (struct ata_queued_cmd *qc);
-int (*qc_issue) (struct ata_queued_cmd *qc);
-	</programlisting>
-
-	<para>
-	Higher-level hooks, these two hooks can potentially supercede
-	several of the above taskfile/DMA engine hooks.  ->qc_prep is
-	called after the buffers have been DMA-mapped, and is typically
-	used to populate the hardware's DMA scatter-gather table.
-	Most drivers use the standard ata_qc_prep() helper function, but
-	more advanced drivers roll their own.
-	</para>
-	<para>
-	->qc_issue is used to make a command active, once the hardware
-	and S/G tables have been prepared.  IDE BMDMA drivers use the
-	helper function ata_qc_issue_prot() for taskfile protocol-based
-	dispatch.  More advanced drivers implement their own ->qc_issue.
-	</para>
-	<para>
-	ata_qc_issue_prot() calls ->tf_load(), ->bmdma_setup(), and
-	->bmdma_start() as necessary to initiate a transfer.
-	</para>
-
-	</sect2>
-
-	<sect2><title>Exception and probe handling (EH)</title>
-	<programlisting>
-void (*eng_timeout) (struct ata_port *ap);
-void (*phy_reset) (struct ata_port *ap);
-	</programlisting>
-
-	<para>
-Deprecated.  Use ->error_handler() instead.
-	</para>
-
-	<programlisting>
-void (*freeze) (struct ata_port *ap);
-void (*thaw) (struct ata_port *ap);
-	</programlisting>
-
-	<para>
-ata_port_freeze() is called when HSM violations or some other
-condition disrupts normal operation of the port.  A frozen port
-is not allowed to perform any operation until the port is
-thawed, which usually follows a successful reset.
-	</para>
-
-	<para>
-The optional ->freeze() callback can be used for freezing the port
-hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
-port cannot be frozen hardware-wise, the interrupt handler
-must ack and clear interrupts unconditionally while the port
-is frozen.
-	</para>
-	<para>
-The optional ->thaw() callback is called to perform the opposite of ->freeze():
-prepare the port for normal operation once again.  Unmask interrupts,
-start DMA engine, etc.
-	</para>
-
-	<programlisting>
-void (*error_handler) (struct ata_port *ap);
-	</programlisting>
-
-	<para>
-->error_handler() is a driver's hook into probe, hotplug, and recovery
-and other exceptional conditions.  The primary responsibility of an
-implementation is to call ata_do_eh() or ata_bmdma_drive_eh() with a set
-of EH hooks as arguments:
-	</para>
-
-	<para>
-'prereset' hook (may be NULL) is called during an EH reset, before any other actions
-are taken.
-	</para>
-
-	<para>
-'postreset' hook (may be NULL) is called after the EH reset is performed.  Based on
-existing conditions, severity of the problem, and hardware capabilities,
-	</para>
-
-	<para>
-Either 'softreset' (may be NULL) or 'hardreset' (may be NULL) will be
-called to perform the low-level EH reset.
-	</para>
-
-	<programlisting>
-void (*post_internal_cmd) (struct ata_queued_cmd *qc);
-	</programlisting>
-
-	<para>
-Perform any hardware-specific actions necessary to finish processing
-after executing a probe-time or EH-time command via ata_exec_internal().
-	</para>
-
-	</sect2>
-
-	<sect2><title>Hardware interrupt handling</title>
-	<programlisting>
-irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
-void (*irq_clear) (struct ata_port *);
-	</programlisting>
-
-	<para>
-	->irq_handler is the interrupt handling routine registered with
-	the system, by libata.  ->irq_clear is called during probe just
-	before the interrupt handler is registered, to be sure hardware
-	is quiet.
-	</para>
-	<para>
-	The second argument, dev_instance, should be cast to a pointer
-	to struct ata_host_set.
-	</para>
-	<para>
-	Most legacy IDE drivers use ata_sff_interrupt() for the
-	irq_handler hook, which scans all ports in the host_set,
-	determines which queued command was active (if any), and calls
-	ata_sff_host_intr(ap,qc).
-	</para>
-	<para>
-	Most legacy IDE drivers use ata_sff_irq_clear() for the
-	irq_clear() hook, which simply clears the interrupt and error
-	flags in the DMA status register.
-	</para>
-
-	</sect2>
-
-	<sect2><title>SATA phy read/write</title>
-	<programlisting>
-int (*scr_read) (struct ata_port *ap, unsigned int sc_reg,
-		 u32 *val);
-int (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
-                   u32 val);
-	</programlisting>
-
-	<para>
-	Read and write standard SATA phy registers.  Currently only used
-	if ->phy_reset hook called the sata_phy_reset() helper function.
-	sc_reg is one of SCR_STATUS, SCR_CONTROL, SCR_ERROR, or SCR_ACTIVE.
-	</para>
-
-	</sect2>
-
-	<sect2><title>Init and shutdown</title>
-	<programlisting>
-int (*port_start) (struct ata_port *ap);
-void (*port_stop) (struct ata_port *ap);
-void (*host_stop) (struct ata_host_set *host_set);
-	</programlisting>
-
-	<para>
-	->port_start() is called just after the data structures for each
-	port are initialized.  Typically this is used to alloc per-port
-	DMA buffers / tables / rings, enable DMA engines, and similar
-	tasks.  Some drivers also use this entry point as a chance to
-	allocate driver-private memory for ap->private_data.
-	</para>
-	<para>
-	Many drivers use ata_port_start() as this hook or call
-	it from their own port_start() hooks.  ata_port_start()
-	allocates space for a legacy IDE PRD table and returns.
-	</para>
-	<para>
-	->port_stop() is called after ->host_stop().  Its sole function
-	is to release DMA/memory resources, now that they are no longer
-	actively being used.  Many drivers also free driver-private
-	data from port at this time.
-	</para>
-	<para>
-	->host_stop() is called after all ->port_stop() calls
-have completed.  The hook must finalize hardware shutdown, release DMA
-and other resources, etc.
-	This hook may be specified as NULL, in which case it is not called.
-	</para>
-
-	</sect2>
-
-     </sect1>
-  </chapter>
-
-  <chapter id="libataEH">
-        <title>Error handling</title>
-
-	<para>
-	This chapter describes how errors are handled under libata.
-	Readers are advised to read SCSI EH
-	(Documentation/scsi/scsi_eh.txt) and ATA exceptions doc first.
-	</para>
-
-	<sect1><title>Origins of commands</title>
-	<para>
-	In libata, a command is represented with struct ata_queued_cmd
-	or qc.  qc's are preallocated during port initialization and
-	repetitively used for command executions.  Currently only one
-	qc is allocated per port but yet-to-be-merged NCQ branch
-	allocates one for each tag and maps each qc to NCQ tag 1-to-1.
-	</para>
-	<para>
-	libata commands can originate from two sources - libata itself
-	and SCSI midlayer.  libata internal commands are used for
-	initialization and error handling.  All normal blk requests
-	and commands for SCSI emulation are passed as SCSI commands
-	through queuecommand callback of SCSI host template.
-	</para>
-	</sect1>
-
-	<sect1><title>How commands are issued</title>
-
-	<variablelist>
-
-	<varlistentry><term>Internal commands</term>
-	<listitem>
-	<para>
-	First, qc is allocated and initialized using
-	ata_qc_new_init().  Although ata_qc_new_init() doesn't
-	implement any wait or retry mechanism when qc is not
-	available, internal commands are currently issued only during
-	initialization and error recovery, so no other command is
-	active and allocation is guaranteed to succeed.
-	</para>
-	<para>
-	Once allocated qc's taskfile is initialized for the command to
-	be executed.  qc currently has two mechanisms to notify
-	completion.  One is via qc->complete_fn() callback and the
-	other is completion qc->waiting.  qc->complete_fn() callback
-	is the asynchronous path used by normal SCSI translated
-	commands and qc->waiting is the synchronous (issuer sleeps in
-	process context) path used by internal commands.
-	</para>
-	<para>
-	Once initialization is complete, host_set lock is acquired
-	and the qc is issued.
-	</para>
-	</listitem>
-	</varlistentry>
-
-	<varlistentry><term>SCSI commands</term>
-	<listitem>
-	<para>
-	All libata drivers use ata_scsi_queuecmd() as
-	hostt->queuecommand callback.  scmds can either be simulated
-	or translated.  No qc is involved in processing a simulated
-	scmd.  The result is computed right away and the scmd is
-	completed.
-	</para>
-	<para>
-	For a translated scmd, ata_qc_new_init() is invoked to
-	allocate a qc and the scmd is translated into the qc.  SCSI
-	midlayer's completion notification function pointer is stored
-	into qc->scsidone.
-	</para>
-	<para>
-	qc->complete_fn() callback is used for completion
-	notification.  ATA commands use ata_scsi_qc_complete() while
-	ATAPI commands use atapi_qc_complete().  Both functions end up
-	calling qc->scsidone to notify upper layer when the qc is
-	finished.  After translation is completed, the qc is issued
-	with ata_qc_issue().
-	</para>
-	<para>
-	Note that SCSI midlayer invokes hostt->queuecommand while
-	holding host_set lock, so all above occur while holding
-	host_set lock.
-	</para>
-	</listitem>
-	</varlistentry>
-
-	</variablelist>
-	</sect1>
-
-	<sect1><title>How commands are processed</title>
-	<para>
-	Depending on which protocol and which controller are used,
-	commands are processed differently.  For the purpose of
-	discussion, a controller which uses taskfile interface and all
-	standard callbacks is assumed.
-	</para>
-	<para>
-	Currently 6 ATA command protocols are used.  They can be
-	sorted into the following four categories according to how
-	they are processed.
-	</para>
-
-	<variablelist>
-	   <varlistentry><term>ATA NO DATA or DMA</term>
-	   <listitem>
-	   <para>
-	   ATA_PROT_NODATA and ATA_PROT_DMA fall into this category.
-	   These types of commands don't require any software
-	   intervention once issued.  Device will raise interrupt on
-	   completion.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry><term>ATA PIO</term>
-	   <listitem>
-	   <para>
-	   ATA_PROT_PIO is in this category.  libata currently
-	   implements PIO with polling.  ATA_NIEN bit is set to turn
-	   off interrupt and pio_task on ata_wq performs polling and
-	   IO.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry><term>ATAPI NODATA or DMA</term>
-	   <listitem>
-	   <para>
-	   ATA_PROT_ATAPI_NODATA and ATA_PROT_ATAPI_DMA are in this
-	   category.  packet_task is used to poll BSY bit after
-	   issuing PACKET command.  Once BSY is turned off by the
-	   device, packet_task transfers CDB and hands off processing
-	   to interrupt handler.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry><term>ATAPI PIO</term>
-	   <listitem>
-	   <para>
-	   ATA_PROT_ATAPI is in this category.  ATA_NIEN bit is set
-	   and, as in ATAPI NODATA or DMA, packet_task submits cdb.
-	   However, after submitting cdb, further processing (data
-	   transfer) is handed off to pio_task.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-	</variablelist>
-        </sect1>
-
-	<sect1><title>How commands are completed</title>
-	<para>
-	Once issued, all qc's are either completed with
-	ata_qc_complete() or time out.  For commands which are handled
-	by interrupts, ata_host_intr() invokes ata_qc_complete(), and,
-	for PIO tasks, pio_task invokes ata_qc_complete().  In error
-	cases, packet_task may also complete commands.
-	</para>
-	<para>
-	ata_qc_complete() does the following.
-	</para>
-
-	<orderedlist>
-
-	<listitem>
-	<para>
-	DMA memory is unmapped.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	ATA_QCFLAG_ACTIVE is cleared from qc->flags.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	qc->complete_fn() callback is invoked.  If the return value of
-	the callback is not zero.  Completion is short circuited and
-	ata_qc_complete() returns.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	__ata_qc_complete() is called, which does
-	   <orderedlist>
-
-	   <listitem>
-	   <para>
-	   qc->flags is cleared to zero.
-	   </para>
-	   </listitem>
-
-	   <listitem>
-	   <para>
-	   ap->active_tag and qc->tag are poisoned.
-	   </para>
-	   </listitem>
-
-	   <listitem>
-	   <para>
-	   qc->waiting is cleared &amp; completed (in that order).
-	   </para>
-	   </listitem>
-
-	   <listitem>
-	   <para>
-	   qc is deallocated by clearing appropriate bit in ap->qactive.
-	   </para>
-	   </listitem>
-
-	   </orderedlist>
-	</para>
-	</listitem>
-
-	</orderedlist>
-
-	<para>
-	So, it basically notifies upper layer and deallocates qc.  One
-	exception is short-circuit path in #3 which is used by
-	atapi_qc_complete().
-	</para>
-	<para>
-	For all non-ATAPI commands, whether it fails or not, almost
-	the same code path is taken and very little error handling
-	takes place.  A qc is completed with success status if it
-	succeeded, with failed status otherwise.
-	</para>
-	<para>
-	However, failed ATAPI commands require more handling as
-	REQUEST SENSE is needed to acquire sense data.  If an ATAPI
-	command fails, ata_qc_complete() is invoked with error status,
-	which in turn invokes atapi_qc_complete() via
-	qc->complete_fn() callback.
-	</para>
-	<para>
-	This makes atapi_qc_complete() set scmd->result to
-	SAM_STAT_CHECK_CONDITION, complete the scmd and return 1.  As
-	the sense data is empty but scmd->result is CHECK CONDITION,
-	SCSI midlayer will invoke EH for the scmd, and returning 1
-	makes ata_qc_complete() to return without deallocating the qc.
-	This leads us to ata_scsi_error() with partially completed qc.
-	</para>
-
-	</sect1>
-
-	<sect1><title>ata_scsi_error()</title>
-	<para>
-	ata_scsi_error() is the current transportt->eh_strategy_handler()
-	for libata.  As discussed above, this will be entered in two
-	cases - timeout and ATAPI error completion.  This function
-	calls low level libata driver's eng_timeout() callback, the
-	standard callback for which is ata_eng_timeout().  It checks
-	if a qc is active and calls ata_qc_timeout() on the qc if so.
-	Actual error handling occurs in ata_qc_timeout().
-	</para>
-	<para>
-	If EH is invoked for timeout, ata_qc_timeout() stops BMDMA and
-	completes the qc.  Note that as we're currently in EH, we
-	cannot call scsi_done.  As described in SCSI EH doc, a
-	recovered scmd should be either retried with
-	scsi_queue_insert() or finished with scsi_finish_command().
-	Here, we override qc->scsidone with scsi_finish_command() and
-	calls ata_qc_complete().
-	</para>
-	<para>
-	If EH is invoked due to a failed ATAPI qc, the qc here is
-	completed but not deallocated.  The purpose of this
-	half-completion is to use the qc as place holder to make EH
-	code reach this place.  This is a bit hackish, but it works.
-	</para>
-	<para>
-	Once control reaches here, the qc is deallocated by invoking
-	__ata_qc_complete() explicitly.  Then, internal qc for REQUEST
-	SENSE is issued.  Once sense data is acquired, scmd is
-	finished by directly invoking scsi_finish_command() on the
-	scmd.  Note that as we already have completed and deallocated
-	the qc which was associated with the scmd, we don't need
-	to/cannot call ata_qc_complete() again.
-	</para>
-
-	</sect1>
-
-	<sect1><title>Problems with the current EH</title>
-
-	<itemizedlist>
-
-	<listitem>
-	<para>
-	Error representation is too crude.  Currently any and all
-	error conditions are represented with ATA STATUS and ERROR
-	registers.  Errors which aren't ATA device errors are treated
-	as ATA device errors by setting ATA_ERR bit.  Better error
-	descriptor which can properly represent ATA and other
-	errors/exceptions is needed.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	When handling timeouts, no action is taken to make device
-	forget about the timed out command and ready for new commands.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	EH handling via ata_scsi_error() is not properly protected
-	from usual command processing.  On EH entrance, the device is
-	not in quiescent state.  Timed out commands may succeed or
-	fail any time.  pio_task and atapi_task may still be running.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	Too weak error recovery.  Devices / controllers causing HSM
-	mismatch errors and other errors quite often require reset to
-	return to known state.  Also, advanced error handling is
-	necessary to support features like NCQ and hotplug.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	ATA errors are directly handled in the interrupt handler and
-	PIO errors in pio_task.  This is problematic for advanced
-	error handling for the following reasons.
-	</para>
-	<para>
-	First, advanced error handling often requires context and
-	internal qc execution.
-	</para>
-	<para>
-	Second, even a simple failure (say, CRC error) needs
-	information gathering and could trigger complex error handling
-	(say, resetting &amp; reconfiguring).  Having multiple code
-	paths to gather information, enter EH and trigger actions
-	makes life painful.
-	</para>
-	<para>
-	Third, scattered EH code makes implementing low level drivers
-	difficult.  Low level drivers override libata callbacks.  If
-	EH is scattered over several places, each affected callbacks
-	should perform its part of error handling.  This can be error
-	prone and painful.
-	</para>
-	</listitem>
-
-	</itemizedlist>
-	</sect1>
-  </chapter>
-
-  <chapter id="libataExt">
-     <title>libata Library</title>
-!Edrivers/ata/libata-core.c
-  </chapter>
-
-  <chapter id="libataInt">
-     <title>libata Core Internals</title>
-!Idrivers/ata/libata-core.c
-  </chapter>
-
-  <chapter id="libataScsiInt">
-     <title>libata SCSI translation/emulation</title>
-!Edrivers/ata/libata-scsi.c
-!Idrivers/ata/libata-scsi.c
-  </chapter>
-
-  <chapter id="ataExceptions">
-     <title>ATA errors and exceptions</title>
-
-  <para>
-  This chapter tries to identify what error/exception conditions exist
-  for ATA/ATAPI devices and describe how they should be handled in
-  implementation-neutral way.
-  </para>
-
-  <para>
-  The term 'error' is used to describe conditions where either an
-  explicit error condition is reported from device or a command has
-  timed out.
-  </para>
-
-  <para>
-  The term 'exception' is either used to describe exceptional
-  conditions which are not errors (say, power or hotplug events), or
-  to describe both errors and non-error exceptional conditions.  Where
-  explicit distinction between error and exception is necessary, the
-  term 'non-error exception' is used.
-  </para>
-
-  <sect1 id="excat">
-     <title>Exception categories</title>
-     <para>
-     Exceptions are described primarily with respect to legacy
-     taskfile + bus master IDE interface.  If a controller provides
-     other better mechanism for error reporting, mapping those into
-     categories described below shouldn't be difficult.
-     </para>
-
-     <para>
-     In the following sections, two recovery actions - reset and
-     reconfiguring transport - are mentioned.  These are described
-     further in <xref linkend="exrec"/>.
-     </para>
-
-     <sect2 id="excatHSMviolation">
-        <title>HSM violation</title>
-        <para>
-        This error is indicated when STATUS value doesn't match HSM
-        requirement during issuing or execution any ATA/ATAPI command.
-        </para>
-
-	<itemizedlist>
-	<title>Examples</title>
-
-        <listitem>
-	<para>
-	ATA_STATUS doesn't contain !BSY &amp;&amp; DRDY &amp;&amp; !DRQ while trying
-	to issue a command.
-        </para>
-	</listitem>
-
-        <listitem>
-	<para>
-	!BSY &amp;&amp; !DRQ during PIO data transfer.
-        </para>
-	</listitem>
-
-        <listitem>
-	<para>
-	DRQ on command completion.
-        </para>
-	</listitem>
-
-        <listitem>
-	<para>
-	!BSY &amp;&amp; ERR after CDB transfer starts but before the
-        last byte of CDB is transferred.  ATA/ATAPI standard states
-        that &quot;The device shall not terminate the PACKET command
-        with an error before the last byte of the command packet has
-        been written&quot; in the error outputs description of PACKET
-        command and the state diagram doesn't include such
-        transitions.
-	</para>
-	</listitem>
-
-	</itemizedlist>
-
-	<para>
-	In these cases, HSM is violated and not much information
-	regarding the error can be acquired from STATUS or ERROR
-	register.  IOW, this error can be anything - driver bug,
-	faulty device, controller and/or cable.
-	</para>
-
-	<para>
-	As HSM is violated, reset is necessary to restore known state.
-	Reconfiguring transport for lower speed might be helpful too
-	as transmission errors sometimes cause this kind of errors.
-	</para>
-     </sect2>
-     
-     <sect2 id="excatDevErr">
-        <title>ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION)</title>
-
-	<para>
-	These are errors detected and reported by ATA/ATAPI devices
-	indicating device problems.  For this type of errors, STATUS
-	and ERROR register values are valid and describe error
-	condition.  Note that some of ATA bus errors are detected by
-	ATA/ATAPI devices and reported using the same mechanism as
-	device errors.  Those cases are described later in this
-	section.
-	</para>
-
-	<para>
-	For ATA commands, this type of errors are indicated by !BSY
-	&amp;&amp; ERR during command execution and on completion.
-	</para>
-
-	<para>For ATAPI commands,</para>
-
-	<itemizedlist>
-
-	<listitem>
-	<para>
-	!BSY &amp;&amp; ERR &amp;&amp; ABRT right after issuing PACKET
-	indicates that PACKET command is not supported and falls in
-	this category.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	!BSY &amp;&amp; ERR(==CHK) &amp;&amp; !ABRT after the last
-	byte of CDB is transferred indicates CHECK CONDITION and
-	doesn't fall in this category.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	!BSY &amp;&amp; ERR(==CHK) &amp;&amp; ABRT after the last byte
-        of CDB is transferred *probably* indicates CHECK CONDITION and
-        doesn't fall in this category.
-	</para>
-	</listitem>
-
-	</itemizedlist>
-
-	<para>
-	Of errors detected as above, the following are not ATA/ATAPI
-	device errors but ATA bus errors and should be handled
-	according to <xref linkend="excatATAbusErr"/>.
-	</para>
-
-	<variablelist>
-
-	   <varlistentry>
-	   <term>CRC error during data transfer</term>
-	   <listitem>
-	   <para>
-	   This is indicated by ICRC bit in the ERROR register and
-	   means that corruption occurred during data transfer.  Up to
-	   ATA/ATAPI-7, the standard specifies that this bit is only
-	   applicable to UDMA transfers but ATA/ATAPI-8 draft revision
-	   1f says that the bit may be applicable to multiword DMA and
-	   PIO.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry>
-	   <term>ABRT error during data transfer or on completion</term>
-	   <listitem>
-	   <para>
-	   Up to ATA/ATAPI-7, the standard specifies that ABRT could be
-	   set on ICRC errors and on cases where a device is not able
-	   to complete a command.  Combined with the fact that MWDMA
-	   and PIO transfer errors aren't allowed to use ICRC bit up to
-	   ATA/ATAPI-7, it seems to imply that ABRT bit alone could
-	   indicate transfer errors.
-	   </para>
-	   <para>
-	   However, ATA/ATAPI-8 draft revision 1f removes the part
-	   that ICRC errors can turn on ABRT.  So, this is kind of
-	   gray area.  Some heuristics are needed here.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	</variablelist>
-
-	<para>
-	ATA/ATAPI device errors can be further categorized as follows.
-	</para>
-
-	<variablelist>
-
-	   <varlistentry>
-	   <term>Media errors</term>
-	   <listitem>
-	   <para>
-	   This is indicated by UNC bit in the ERROR register.  ATA
-	   devices reports UNC error only after certain number of
-	   retries cannot recover the data, so there's nothing much
-	   else to do other than notifying upper layer.
-	   </para>
-	   <para>
-	   READ and WRITE commands report CHS or LBA of the first
-	   failed sector but ATA/ATAPI standard specifies that the
-	   amount of transferred data on error completion is
-	   indeterminate, so we cannot assume that sectors preceding
-	   the failed sector have been transferred and thus cannot
-	   complete those sectors successfully as SCSI does.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry>
-	   <term>Media changed / media change requested error</term>
-	   <listitem>
-	   <para>
-	   &lt;&lt;TODO: fill here&gt;&gt;
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry><term>Address error</term>
-	   <listitem>
-	   <para>
-	   This is indicated by IDNF bit in the ERROR register.
-	   Report to upper layer.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry><term>Other errors</term>
-	   <listitem>
-	   <para>
-	   This can be invalid command or parameter indicated by ABRT
-	   ERROR bit or some other error condition.  Note that ABRT
-	   bit can indicate a lot of things including ICRC and Address
-	   errors.  Heuristics needed.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	</variablelist>
-
-	<para>
-	Depending on commands, not all STATUS/ERROR bits are
-	applicable.  These non-applicable bits are marked with
-	&quot;na&quot; in the output descriptions but up to ATA/ATAPI-7
-	no definition of &quot;na&quot; can be found.  However,
-	ATA/ATAPI-8 draft revision 1f describes &quot;N/A&quot; as
-	follows.
-	</para>
-
-	<blockquote>
-	<variablelist>
-	   <varlistentry><term>3.2.3.3a N/A</term>
-	   <listitem>
-	   <para>
-	   A keyword the indicates a field has no defined value in
-	   this standard and should not be checked by the host or
-	   device. N/A fields should be cleared to zero.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-	</variablelist>
-	</blockquote>
-
-	<para>
-	So, it seems reasonable to assume that &quot;na&quot; bits are
-	cleared to zero by devices and thus need no explicit masking.
-	</para>
-
-     </sect2>
-
-     <sect2 id="excatATAPIcc">
-        <title>ATAPI device CHECK CONDITION</title>
-
-	<para>
-	ATAPI device CHECK CONDITION error is indicated by set CHK bit
-	(ERR bit) in the STATUS register after the last byte of CDB is
-	transferred for a PACKET command.  For this kind of errors,
-	sense data should be acquired to gather information regarding
-	the errors.  REQUEST SENSE packet command should be used to
-	acquire sense data.
-	</para>
-
-	<para>
-	Once sense data is acquired, this type of errors can be
-	handled similarly to other SCSI errors.  Note that sense data
-	may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
-	&amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR).  In such
-	cases, the error should be considered as an ATA bus error and
-	handled according to <xref linkend="excatATAbusErr"/>.
-	</para>
-
-     </sect2>
-
-     <sect2 id="excatNCQerr">
-        <title>ATA device error (NCQ)</title>
-
-	<para>
-	NCQ command error is indicated by cleared BSY and set ERR bit
-	during NCQ command phase (one or more NCQ commands
-	outstanding).  Although STATUS and ERROR registers will
-	contain valid values describing the error, READ LOG EXT is
-	required to clear the error condition, determine which command
-	has failed and acquire more information.
-	</para>
-
-	<para>
-	READ LOG EXT Log Page 10h reports which tag has failed and
-	taskfile register values describing the error.  With this
-	information the failed command can be handled as a normal ATA
-	command error as in <xref linkend="excatDevErr"/> and all
-	other in-flight commands must be retried.  Note that this
-	retry should not be counted - it's likely that commands
-	retried this way would have completed normally if it were not
-	for the failed command.
-	</para>
-
-	<para>
-	Note that ATA bus errors can be reported as ATA device NCQ
-	errors.  This should be handled as described in <xref
-	linkend="excatATAbusErr"/>.
-	</para>
-
-	<para>
-	If READ LOG EXT Log Page 10h fails or reports NQ, we're
-	thoroughly screwed.  This condition should be treated
-	according to <xref linkend="excatHSMviolation"/>.
-	</para>
-
-     </sect2>
-
-     <sect2 id="excatATAbusErr">
-        <title>ATA bus error</title>
-
-	<para>
-	ATA bus error means that data corruption occurred during
-	transmission over ATA bus (SATA or PATA).  This type of errors
-	can be indicated by
-	</para>
-
-	<itemizedlist>
-
-	<listitem>
-	<para>
-	ICRC or ABRT error as described in <xref linkend="excatDevErr"/>.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	Controller-specific error completion with error information
-	indicating transmission error.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	On some controllers, command timeout.  In this case, there may
-	be a mechanism to determine that the timeout is due to
-	transmission error.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	Unknown/random errors, timeouts and all sorts of weirdities.
-	</para>
-	</listitem>
-
-	</itemizedlist>
-
-	<para>
-	As described above, transmission errors can cause wide variety
-	of symptoms ranging from device ICRC error to random device
-	lockup, and, for many cases, there is no way to tell if an
-	error condition is due to transmission error or not;
-	therefore, it's necessary to employ some kind of heuristic
-	when dealing with errors and timeouts.  For example,
-	encountering repetitive ABRT errors for known supported
-	command is likely to indicate ATA bus error.
-	</para>
-
-	<para>
-	Once it's determined that ATA bus errors have possibly
-	occurred, lowering ATA bus transmission speed is one of
-	actions which may alleviate the problem.  See <xref
-	linkend="exrecReconf"/> for more information.
-	</para>
-
-     </sect2>
-
-     <sect2 id="excatPCIbusErr">
-        <title>PCI bus error</title>
-
-	<para>
-	Data corruption or other failures during transmission over PCI
-	(or other system bus).  For standard BMDMA, this is indicated
-	by Error bit in the BMDMA Status register.  This type of
-	errors must be logged as it indicates something is very wrong
-	with the system.  Resetting host controller is recommended.
-	</para>
-
-     </sect2>
-
-     <sect2 id="excatLateCompletion">
-        <title>Late completion</title>
-
-	<para>
-	This occurs when timeout occurs and the timeout handler finds
-	out that the timed out command has completed successfully or
-	with error.  This is usually caused by lost interrupts.  This
-	type of errors must be logged.  Resetting host controller is
-	recommended.
-	</para>
-
-     </sect2>
-
-     <sect2 id="excatUnknown">
-        <title>Unknown error (timeout)</title>
-
-	<para>
-	This is when timeout occurs and the command is still
-	processing or the host and device are in unknown state.  When
-	this occurs, HSM could be in any valid or invalid state.  To
-	bring the device to known state and make it forget about the
-	timed out command, resetting is necessary.  The timed out
-	command may be retried.
-	</para>
-
-	<para>
-	Timeouts can also be caused by transmission errors.  Refer to
-	<xref linkend="excatATAbusErr"/> for more details.
-	</para>
-
-     </sect2>
-
-     <sect2 id="excatHoplugPM">
-        <title>Hotplug and power management exceptions</title>
-
-	<para>
-	&lt;&lt;TODO: fill here&gt;&gt;
-	</para>
-
-     </sect2>
-
-  </sect1>
-
-  <sect1 id="exrec">
-     <title>EH recovery actions</title>
-
-     <para>
-     This section discusses several important recovery actions.
-     </para>
-
-     <sect2 id="exrecClr">
-        <title>Clearing error condition</title>
-
-	<para>
-	Many controllers require its error registers to be cleared by
-	error handler.  Different controllers may have different
-	requirements.
-	</para>
-
-	<para>
-	For SATA, it's strongly recommended to clear at least SError
-	register during error handling.
-	</para>
-     </sect2>
-
-     <sect2 id="exrecRst">
-        <title>Reset</title>
-
-	<para>
-	During EH, resetting is necessary in the following cases.
-	</para>
-
-	<itemizedlist>
-
-	<listitem>
-	<para>
-	HSM is in unknown or invalid state
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	HBA is in unknown or invalid state
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	EH needs to make HBA/device forget about in-flight commands
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	HBA/device behaves weirdly
-	</para>
-	</listitem>
-
-	</itemizedlist>
-
-	<para>
-	Resetting during EH might be a good idea regardless of error
-	condition to improve EH robustness.  Whether to reset both or
-	either one of HBA and device depends on situation but the
-	following scheme is recommended.
-	</para>
-
-	<itemizedlist>
-
-	<listitem>
-	<para>
-	When it's known that HBA is in ready state but ATA/ATAPI
-	device is in unknown state, reset only device.
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	If HBA is in unknown state, reset both HBA and device.
-	</para>
-	</listitem>
-
-	</itemizedlist>
-
-	<para>
-	HBA resetting is implementation specific.  For a controller
-	complying to taskfile/BMDMA PCI IDE, stopping active DMA
-	transaction may be sufficient iff BMDMA state is the only HBA
-	context.  But even mostly taskfile/BMDMA PCI IDE complying
-	controllers may have implementation specific requirements and
-	mechanism to reset themselves.  This must be addressed by
-	specific drivers.
-	</para>
-
-	<para>
-	OTOH, ATA/ATAPI standard describes in detail ways to reset
-	ATA/ATAPI devices.
-	</para>
-
-	<variablelist>
-
-	   <varlistentry><term>PATA hardware reset</term>
-	   <listitem>
-	   <para>
-	   This is hardware initiated device reset signalled with
-	   asserted PATA RESET- signal.  There is no standard way to
-	   initiate hardware reset from software although some
-	   hardware provides registers that allow driver to directly
-	   tweak the RESET- signal.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry><term>Software reset</term>
-	   <listitem>
-	   <para>
-	   This is achieved by turning CONTROL SRST bit on for at
-	   least 5us.  Both PATA and SATA support it but, in case of
-	   SATA, this may require controller-specific support as the
-	   second Register FIS to clear SRST should be transmitted
-	   while BSY bit is still set.  Note that on PATA, this resets
-	   both master and slave devices on a channel.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry><term>EXECUTE DEVICE DIAGNOSTIC command</term>
-	   <listitem>
-	   <para>
-	   Although ATA/ATAPI standard doesn't describe exactly, EDD
-	   implies some level of resetting, possibly similar level
-	   with software reset.  Host-side EDD protocol can be handled
-	   with normal command processing and most SATA controllers
-	   should be able to handle EDD's just like other commands.
-	   As in software reset, EDD affects both devices on a PATA
-	   bus.
-	   </para>
-	   <para>
-	   Although EDD does reset devices, this doesn't suit error
-	   handling as EDD cannot be issued while BSY is set and it's
-	   unclear how it will act when device is in unknown/weird
-	   state.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry><term>ATAPI DEVICE RESET command</term>
-	   <listitem>
-	   <para>
-	   This is very similar to software reset except that reset
-	   can be restricted to the selected device without affecting
-	   the other device sharing the cable.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	   <varlistentry><term>SATA phy reset</term>
-	   <listitem>
-	   <para>
-	   This is the preferred way of resetting a SATA device.  In
-	   effect, it's identical to PATA hardware reset.  Note that
-	   this can be done with the standard SCR Control register.
-	   As such, it's usually easier to implement than software
-	   reset.
-	   </para>
-	   </listitem>
-	   </varlistentry>
-
-	</variablelist>
-
-	<para>
-	One more thing to consider when resetting devices is that
-	resetting clears certain configuration parameters and they
-	need to be set to their previous or newly adjusted values
-	after reset.
-	</para>
-
-	<para>
-	Parameters affected are.
-	</para>
-
-	<itemizedlist>
-
-	<listitem>
-	<para>
-	CHS set up with INITIALIZE DEVICE PARAMETERS (seldom used)
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	Parameters set with SET FEATURES including transfer mode setting
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	Block count set with SET MULTIPLE MODE
-	</para>
-	</listitem>
-
-	<listitem>
-	<para>
-	Other parameters (SET MAX, MEDIA LOCK...)
-	</para>
-	</listitem>
-
-	</itemizedlist>
-
-	<para>
-	ATA/ATAPI standard specifies that some parameters must be
-	maintained across hardware or software reset, but doesn't
-	strictly specify all of them.  Always reconfiguring needed
-	parameters after reset is required for robustness.  Note that
-	this also applies when resuming from deep sleep (power-off).
-	</para>
-
-	<para>
-	Also, ATA/ATAPI standard requires that IDENTIFY DEVICE /
-	IDENTIFY PACKET DEVICE is issued after any configuration
-	parameter is updated or a hardware reset and the result used
-	for further operation.  OS driver is required to implement
-	revalidation mechanism to support this.
-	</para>
-
-     </sect2>
-
-     <sect2 id="exrecReconf">
-        <title>Reconfigure transport</title>
-
-	<para>
-	For both PATA and SATA, a lot of corners are cut for cheap
-	connectors, cables or controllers and it's quite common to see
-	high transmission error rate.  This can be mitigated by
-	lowering transmission speed.
-	</para>
-
-	<para>
-	The following is a possible scheme Jeff Garzik suggested.
-	</para>
-
-	<blockquote>
-	<para>
-	If more than $N (3?) transmission errors happen in 15 minutes,
-	</para>	
-	<itemizedlist>
-	<listitem>
-	<para>
-	if SATA, decrease SATA PHY speed.  if speed cannot be decreased,
-	</para>
-	</listitem>
-	<listitem>
-	<para>
-	decrease UDMA xfer speed.  if at UDMA0, switch to PIO4,
-	</para>
-	</listitem>
-	<listitem>
-	<para>
-	decrease PIO xfer speed.  if at PIO3, complain, but continue
-	</para>
-	</listitem>
-	</itemizedlist>
-	</blockquote>
-
-     </sect2>
-
-  </sect1>
-
-  </chapter>
-
-  <chapter id="PiixInt">
-     <title>ata_piix Internals</title>
-!Idrivers/ata/ata_piix.c
-  </chapter>
-
-  <chapter id="SILInt">
-     <title>sata_sil Internals</title>
-!Idrivers/ata/sata_sil.c
-  </chapter>
-
-  <chapter id="libataThanks">
-     <title>Thanks</title>
-  <para>
-  The bulk of the ATA knowledge comes thanks to long conversations with
-  Andre Hedrick (www.linux-ide.org), and long hours pondering the ATA
-  and SCSI specifications.
-  </para>
-  <para>
-  Thanks to Alan Cox for pointing out similarities 
-  between SATA and SCSI, and in general for motivation to hack on
-  libata.
-  </para>
-  <para>
-  libata's device detection
-  method, ata_pio_devchk, and in general all the early probing was
-  based on extensive study of Hale Landis's probe/reset code in his
-  ATADRVR driver (www.ata-atapi.com).
-  </para>
-  </chapter>
-
-</book>
diff --git a/Documentation/DocBook/librs.tmpl b/Documentation/DocBook/librs.tmpl
deleted file mode 100644
index 94f2136..0000000
--- a/Documentation/DocBook/librs.tmpl
+++ /dev/null
@@ -1,289 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="Reed-Solomon-Library-Guide">
- <bookinfo>
-  <title>Reed-Solomon Library Programming Interface</title>
-  
-  <authorgroup>
-   <author>
-    <firstname>Thomas</firstname>
-    <surname>Gleixner</surname>
-    <affiliation>
-     <address>
-      <email>tglx@linutronix.de</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2004</year>
-   <holder>Thomas Gleixner</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License version 2 as published by the Free Software Foundation.
-   </para>
-      
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-      
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-      
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-  <chapter id="intro">
-      <title>Introduction</title>
-  <para>
-  	The generic Reed-Solomon Library provides encoding, decoding
-	and error correction functions.
-  </para>
-  <para>
-  	Reed-Solomon codes are used in communication and storage
-	applications to ensure data integrity. 
-  </para>
-  <para>
-  	This documentation is provided for developers who want to utilize
-	the functions provided by the library.
-  </para>
-  </chapter>
-  
-  <chapter id="bugs">
-     <title>Known Bugs And Assumptions</title>
-  <para>
-	None.	
-  </para>
-  </chapter>
-
-  <chapter id="usage">
-     	<title>Usage</title>
-	<para>
-		This chapter provides examples of how to use the library.
-	</para>
-	<sect1>
-		<title>Initializing</title>
-		<para>
-			The init function init_rs returns a pointer to an
-			rs decoder structure, which holds the necessary
-			information for encoding, decoding and error correction
-			with the given polynomial. It either uses an existing
-			matching decoder or creates a new one. On creation all
-			the lookup tables for fast en/decoding are created.
-			The function may take a while, so make sure not to 
-			call it in critical code paths.
-		</para>
-		<programlisting>
-/* the Reed Solomon control structure */
-static struct rs_control *rs_decoder;
-
-/* Symbolsize is 10 (bits)
- * Primitive polynomial is x^10+x^3+1
- * first consecutive root is 0
- * primitive element to generate roots = 1
- * generator polynomial degree (number of roots) = 6
- */
-rs_decoder = init_rs (10, 0x409, 0, 1, 6);
-		</programlisting>
-	</sect1>
-	<sect1>
-		<title>Encoding</title>
-		<para>
-			The encoder calculates the Reed-Solomon code over
-			the given data length and stores the result in 
-			the parity buffer. Note that the parity buffer must
-			be initialized before calling the encoder.
-		</para>
-		<para>
-			The expanded data can be inverted on the fly by
-			providing a non-zero inversion mask. The expanded data is
-			XOR'ed with the mask. This is used e.g. for FLASH
-			ECC, where the all 0xFF is inverted to an all 0x00.
-			The Reed-Solomon code for all 0x00 is all 0x00. The
-			code is inverted before storing to FLASH so it is 0xFF
-			too. This prevents that reading from an erased FLASH
-			results in ECC errors.
-		</para>
-		<para>
-			The databytes are expanded to the given symbol size
-			on the fly. There is no support for encoding continuous
-			bitstreams with a symbol size != 8 at the moment. If
-			it is necessary it should be not a big deal to implement
-			such functionality.
-		</para>
-		<programlisting>
-/* Parity buffer. Size = number of roots */
-uint16_t par[6];
-/* Initialize the parity buffer */
-memset(par, 0, sizeof(par));
-/* Encode 512 byte in data8. Store parity in buffer par */
-encode_rs8 (rs_decoder, data8, 512, par, 0);
-		</programlisting>
-	</sect1>
-	<sect1>
-		<title>Decoding</title>
-		<para>
-			The decoder calculates the syndrome over
-			the given data length and the received parity symbols
-			and corrects errors in the data.
-		</para>
-		<para>
-			If a syndrome is available from a hardware decoder
-			then the syndrome calculation is skipped.
-		</para>
-		<para>
-			The correction of the data buffer can be suppressed
-			by providing a correction pattern buffer and an error
-			location buffer to the decoder. The decoder stores the
-			calculated error location and the correction bitmask
-			in the given buffers. This is useful for hardware
-			decoders which use a weird bit ordering scheme.
-		</para>
-		<para>
-			The databytes are expanded to the given symbol size
-			on the fly. There is no support for decoding continuous
-			bitstreams with a symbolsize != 8 at the moment. If
-			it is necessary it should be not a big deal to implement
-			such functionality.
-		</para>
-		
-		<sect2>
-		<title>
-			Decoding with syndrome calculation, direct data correction
-		</title>
-		<programlisting>
-/* Parity buffer. Size = number of roots */
-uint16_t par[6];
-uint8_t  data[512];
-int numerr;
-/* Receive data */
-.....
-/* Receive parity */
-.....
-/* Decode 512 byte in data8.*/
-numerr = decode_rs8 (rs_decoder, data8, par, 512, NULL, 0, NULL, 0, NULL);
-		</programlisting>
-		</sect2>
-
-		<sect2>
-		<title>
-			Decoding with syndrome given by hardware decoder, direct data correction
-		</title>
-		<programlisting>
-/* Parity buffer. Size = number of roots */
-uint16_t par[6], syn[6];
-uint8_t  data[512];
-int numerr;
-/* Receive data */
-.....
-/* Receive parity */
-.....
-/* Get syndrome from hardware decoder */
-.....
-/* Decode 512 byte in data8.*/
-numerr = decode_rs8 (rs_decoder, data8, par, 512, syn, 0, NULL, 0, NULL);
-		</programlisting>
-		</sect2>
-
-		<sect2>
-		<title>
-			Decoding with syndrome given by hardware decoder, no direct data correction.
-		</title>
-		<para>
-			Note: It's not necessary to give data and received parity to the decoder.
-		</para>
-		<programlisting>
-/* Parity buffer. Size = number of roots */
-uint16_t par[6], syn[6], corr[8];
-uint8_t  data[512];
-int numerr, errpos[8];
-/* Receive data */
-.....
-/* Receive parity */
-.....
-/* Get syndrome from hardware decoder */
-.....
-/* Decode 512 byte in data8.*/
-numerr = decode_rs8 (rs_decoder, NULL, NULL, 512, syn, 0, errpos, 0, corr);
-for (i = 0; i &lt; numerr; i++) {
-	do_error_correction_in_your_buffer(errpos[i], corr[i]);
-}
-		</programlisting>
-		</sect2>
-	</sect1>
-	<sect1>
-		<title>Cleanup</title>
-		<para>
-			The function free_rs frees the allocated resources,
-			if the caller is the last user of the decoder.
-		</para>
-		<programlisting>
-/* Release resources */
-free_rs(rs_decoder);
-		</programlisting>
-	</sect1>
-
-  </chapter>
-	
-  <chapter id="structs">
-     <title>Structures</title>
-     <para>
-     This chapter contains the autogenerated documentation of the structures which are
-     used in the Reed-Solomon Library and are relevant for a developer.
-     </para>
-!Iinclude/linux/rslib.h
-  </chapter>
-
-  <chapter id="pubfunctions">
-     <title>Public Functions Provided</title>
-     <para>
-     This chapter contains the autogenerated documentation of the Reed-Solomon functions
-     which are exported.
-     </para>
-!Elib/reed_solomon/reed_solomon.c
-  </chapter>
-  
-  <chapter id="credits">
-     <title>Credits</title>
-	<para>
-		The library code for encoding and decoding was written by Phil Karn.
-	</para>
-	<programlisting>
-		Copyright 2002, Phil Karn, KA9Q
- 		May be used under the terms of the GNU General Public License (GPL)
-	</programlisting>
-	<para>
-		The wrapper functions and interfaces are written by Thomas Gleixner.
-	</para>
-	<para>
-		Many users have provided bugfixes, improvements and helping hands for testing.
-		Thanks a lot.
-	</para>
-	<para>
-		The following people have contributed to this document:
-	</para>
-	<para>
-		Thomas Gleixner<email>tglx@linutronix.de</email>
-	</para>
-  </chapter>
-</book>
diff --git a/Documentation/DocBook/lsm.tmpl b/Documentation/DocBook/lsm.tmpl
deleted file mode 100644
index fe7664c..0000000
--- a/Documentation/DocBook/lsm.tmpl
+++ /dev/null
@@ -1,265 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE article PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<article class="whitepaper" id="LinuxSecurityModule" lang="en">
- <articleinfo>
- <title>Linux Security Modules:  General Security Hooks for Linux</title>
- <authorgroup>
- <author>
- <firstname>Stephen</firstname> 
- <surname>Smalley</surname>
- <affiliation>
- <orgname>NAI Labs</orgname>
- <address><email>ssmalley@nai.com</email></address>
- </affiliation>
- </author>
- <author>
- <firstname>Timothy</firstname> 
- <surname>Fraser</surname>
- <affiliation>
- <orgname>NAI Labs</orgname>
- <address><email>tfraser@nai.com</email></address>
- </affiliation>
- </author>
- <author>
- <firstname>Chris</firstname> 
- <surname>Vance</surname>
- <affiliation>
- <orgname>NAI Labs</orgname>
- <address><email>cvance@nai.com</email></address>
- </affiliation>
- </author>
- </authorgroup>
- </articleinfo>
-
-<sect1 id="Introduction"><title>Introduction</title>
-
-<para>
-In March 2001, the National Security Agency (NSA) gave a presentation
-about Security-Enhanced Linux (SELinux) at the 2.5 Linux Kernel
-Summit.  SELinux is an implementation of flexible and fine-grained
-nondiscretionary access controls in the Linux kernel, originally
-implemented as its own particular kernel patch.  Several other
-security projects (e.g. RSBAC, Medusa) have also developed flexible
-access control architectures for the Linux kernel, and various
-projects have developed particular access control models for Linux
-(e.g. LIDS, DTE, SubDomain).  Each project has developed and
-maintained its own kernel patch to support its security needs.
-</para>
-
-<para>
-In response to the NSA presentation, Linus Torvalds made a set of
-remarks that described a security framework he would be willing to
-consider for inclusion in the mainstream Linux kernel.  He described a
-general framework that would provide a set of security hooks to
-control operations on kernel objects and a set of opaque security
-fields in kernel data structures for maintaining security attributes.
-This framework could then be used by loadable kernel modules to
-implement any desired model of security.  Linus also suggested the
-possibility of migrating the Linux capabilities code into such a
-module.
-</para>
-
-<para>
-The Linux Security Modules (LSM) project was started by WireX to
-develop such a framework.  LSM is a joint development effort by
-several security projects, including Immunix, SELinux, SGI and Janus,
-and several individuals, including Greg Kroah-Hartman and James
-Morris, to develop a Linux kernel patch that implements this
-framework.  The patch is currently tracking the 2.4 series and is
-targeted for integration into the 2.5 development series.  This
-technical report provides an overview of the framework and the example
-capabilities security module provided by the LSM kernel patch.
-</para>
-
-</sect1>
-
-<sect1 id="framework"><title>LSM Framework</title>
-
-<para>
-The LSM kernel patch provides a general kernel framework to support
-security modules.  In particular, the LSM framework is primarily
-focused on supporting access control modules, although future
-development is likely to address other security needs such as
-auditing.  By itself, the framework does not provide any additional
-security; it merely provides the infrastructure to support security
-modules.  The LSM kernel patch also moves most of the capabilities
-logic into an optional security module, with the system defaulting
-to the traditional superuser logic.  This capabilities module
-is discussed further in <xref linkend="cap"/>.
-</para>
-
-<para>
-The LSM kernel patch adds security fields to kernel data structures
-and inserts calls to hook functions at critical points in the kernel
-code to manage the security fields and to perform access control.  It
-also adds functions for registering and unregistering security
-modules, and adds a general <function>security</function> system call
-to support new system calls for security-aware applications.
-</para>
-
-<para>
-The LSM security fields are simply <type>void*</type> pointers.  For
-process and program execution security information, security fields
-were added to <structname>struct task_struct</structname> and 
-<structname>struct linux_binprm</structname>.  For filesystem security
-information, a security field was added to 
-<structname>struct super_block</structname>.  For pipe, file, and socket
-security information, security fields were added to 
-<structname>struct inode</structname> and 
-<structname>struct file</structname>.  For packet and network device security
-information, security fields were added to
-<structname>struct sk_buff</structname> and
-<structname>struct net_device</structname>.  For System V IPC security
-information, security fields were added to
-<structname>struct kern_ipc_perm</structname> and
-<structname>struct msg_msg</structname>; additionally, the definitions
-for <structname>struct msg_msg</structname>, <structname>struct 
-msg_queue</structname>, and <structname>struct 
-shmid_kernel</structname> were moved to header files
-(<filename>include/linux/msg.h</filename> and
-<filename>include/linux/shm.h</filename> as appropriate) to allow
-the security modules to use these definitions.
-</para>
-
-<para>
-Each LSM hook is a function pointer in a global table,
-security_ops. This table is a
-<structname>security_operations</structname> structure as defined by
-<filename>include/linux/security.h</filename>.  Detailed documentation
-for each hook is included in this header file.  At present, this
-structure consists of a collection of substructures that group related
-hooks based on the kernel object (e.g. task, inode, file, sk_buff,
-etc) as well as some top-level hook function pointers for system
-operations.  This structure is likely to be flattened in the future
-for performance.  The placement of the hook calls in the kernel code
-is described by the "called:" lines in the per-hook documentation in
-the header file.  The hook calls can also be easily found in the
-kernel code by looking for the string "security_ops->".
-
-</para>
-
-<para>
-Linus mentioned per-process security hooks in his original remarks as a
-possible alternative to global security hooks.  However, if LSM were
-to start from the perspective of per-process hooks, then the base
-framework would have to deal with how to handle operations that
-involve multiple processes (e.g. kill), since each process might have
-its own hook for controlling the operation.  This would require a
-general mechanism for composing hooks in the base framework.
-Additionally, LSM would still need global hooks for operations that
-have no process context (e.g. network input operations).
-Consequently, LSM provides global security hooks, but a security
-module is free to implement per-process hooks (where that makes sense)
-by storing a security_ops table in each process' security field and
-then invoking these per-process hooks from the global hooks.
-The problem of composition is thus deferred to the module.
-</para>
-
-<para>
-The global security_ops table is initialized to a set of hook
-functions provided by a dummy security module that provides
-traditional superuser logic.  A <function>register_security</function>
-function (in <filename>security/security.c</filename>) is provided to
-allow a security module to set security_ops to refer to its own hook
-functions, and an <function>unregister_security</function> function is
-provided to revert security_ops to the dummy module hooks.  This
-mechanism is used to set the primary security module, which is
-responsible for making the final decision for each hook.
-</para>
-
-<para>
-LSM also provides a simple mechanism for stacking additional security
-modules with the primary security module.  It defines
-<function>register_security</function> and
-<function>unregister_security</function> hooks in the
-<structname>security_operations</structname> structure and provides
-<function>mod_reg_security</function> and
-<function>mod_unreg_security</function> functions that invoke these
-hooks after performing some sanity checking.  A security module can
-call these functions in order to stack with other modules.  However,
-the actual details of how this stacking is handled are deferred to the
-module, which can implement these hooks in any way it wishes
-(including always returning an error if it does not wish to support
-stacking).  In this manner, LSM again defers the problem of
-composition to the module.
-</para>
-
-<para>
-Although the LSM hooks are organized into substructures based on
-kernel object, all of the hooks can be viewed as falling into two
-major categories: hooks that are used to manage the security fields
-and hooks that are used to perform access control.  Examples of the
-first category of hooks include the
-<function>alloc_security</function> and
-<function>free_security</function> hooks defined for each kernel data
-structure that has a security field.  These hooks are used to allocate
-and free security structures for kernel objects.  The first category
-of hooks also includes hooks that set information in the security
-field after allocation, such as the <function>post_lookup</function>
-hook in <structname>struct inode_security_ops</structname>.  This hook
-is used to set security information for inodes after successful lookup
-operations.  An example of the second category of hooks is the
-<function>permission</function> hook in 
-<structname>struct inode_security_ops</structname>.  This hook checks
-permission when accessing an inode.
-</para>
-
-</sect1>
-
-<sect1 id="cap"><title>LSM Capabilities Module</title>
-
-<para>
-The LSM kernel patch moves most of the existing POSIX.1e capabilities
-logic into an optional security module stored in the file
-<filename>security/capability.c</filename>.  This change allows
-users who do not want to use capabilities to omit this code entirely
-from their kernel, instead using the dummy module for traditional
-superuser logic or any other module that they desire.  This change
-also allows the developers of the capabilities logic to maintain and
-enhance their code more freely, without needing to integrate patches
-back into the base kernel.
-</para>
-
-<para>
-In addition to moving the capabilities logic, the LSM kernel patch
-could move the capability-related fields from the kernel data
-structures into the new security fields managed by the security
-modules.  However, at present, the LSM kernel patch leaves the
-capability fields in the kernel data structures.  In his original
-remarks, Linus suggested that this might be preferable so that other
-security modules can be easily stacked with the capabilities module
-without needing to chain multiple security structures on the security field.
-It also avoids imposing extra overhead on the capabilities module
-to manage the security fields.  However, the LSM framework could
-certainly support such a move if it is determined to be desirable,
-with only a few additional changes described below.
-</para>
-
-<para>
-At present, the capabilities logic for computing process capabilities
-on <function>execve</function> and <function>set*uid</function>,
-checking capabilities for a particular process, saving and checking
-capabilities for netlink messages, and handling the
-<function>capget</function> and <function>capset</function> system
-calls have been moved into the capabilities module.  There are still a
-few locations in the base kernel where capability-related fields are
-directly examined or modified, but the current version of the LSM
-patch does allow a security module to completely replace the
-assignment and testing of capabilities.  These few locations would
-need to be changed if the capability-related fields were moved into
-the security field.  The following is a list of known locations that
-still perform such direct examination or modification of
-capability-related fields:
-<itemizedlist>
-<listitem><para><filename>fs/open.c</filename>:<function>sys_access</function></para></listitem>
-<listitem><para><filename>fs/lockd/host.c</filename>:<function>nlm_bind_host</function></para></listitem>
-<listitem><para><filename>fs/nfsd/auth.c</filename>:<function>nfsd_setuser</function></para></listitem>
-<listitem><para><filename>fs/proc/array.c</filename>:<function>task_cap</function></para></listitem>
-</itemizedlist>
-</para>
-
-</sect1>
-
-</article>
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
deleted file mode 100644
index b442921..0000000
--- a/Documentation/DocBook/mtdnand.tmpl
+++ /dev/null
@@ -1,1291 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="MTD-NAND-Guide">
- <bookinfo>
-  <title>MTD NAND Driver Programming Interface</title>
-  
-  <authorgroup>
-   <author>
-    <firstname>Thomas</firstname>
-    <surname>Gleixner</surname>
-    <affiliation>
-     <address>
-      <email>tglx@linutronix.de</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2004</year>
-   <holder>Thomas Gleixner</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License version 2 as published by the Free Software Foundation.
-   </para>
-      
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-      
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-      
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-  <chapter id="intro">
-      <title>Introduction</title>
-  <para>
-  	The generic NAND driver supports almost all NAND and AG-AND based
-	chips and connects them to the Memory Technology Devices (MTD)
-	subsystem of the Linux Kernel.
-  </para>
-  <para>
-  	This documentation is provided for developers who want to implement
-	board drivers or filesystem drivers suitable for NAND devices.
-  </para>
-  </chapter>
-  
-  <chapter id="bugs">
-     <title>Known Bugs And Assumptions</title>
-  <para>
-	None.	
-  </para>
-  </chapter>
-
-  <chapter id="dochints">
-     <title>Documentation hints</title>
-     <para>
-     The function and structure docs are autogenerated. Each function and 
-     struct member has a short description which is marked with an [XXX] identifier.
-     The following chapters explain the meaning of those identifiers.
-     </para>
-     <sect1 id="Function_identifiers_XXX">
-	<title>Function identifiers [XXX]</title>
-     	<para>
-	The functions are marked with [XXX] identifiers in the short
-	comment. The identifiers explain the usage and scope of the
-	functions. Following identifiers are used:
-     	</para>
-	<itemizedlist>
-		<listitem><para>
-	  	[MTD Interface]</para><para>
-		These functions provide the interface to the MTD kernel API. 
-		They are not replaceable and provide functionality
-		which is complete hardware independent.
-		</para></listitem>
-		<listitem><para>
-	  	[NAND Interface]</para><para>
-		These functions are exported and provide the interface to the NAND kernel API. 
-		</para></listitem>
-		<listitem><para>
-	  	[GENERIC]</para><para>
-		Generic functions are not replaceable and provide functionality
-		which is complete hardware independent.
-		</para></listitem>
-		<listitem><para>
-	  	[DEFAULT]</para><para>
-		Default functions provide hardware related functionality which is suitable
-		for most of the implementations. These functions can be replaced by the
-		board driver if necessary. Those functions are called via pointers in the
-		NAND chip description structure. The board driver can set the functions which
-		should be replaced by board dependent functions before calling nand_scan().
-		If the function pointer is NULL on entry to nand_scan() then the pointer
-		is set to the default function which is suitable for the detected chip type.
-		</para></listitem>
-	</itemizedlist>
-     </sect1>
-     <sect1 id="Struct_member_identifiers_XXX">
-	<title>Struct member identifiers [XXX]</title>
-     	<para>
-	The struct members are marked with [XXX] identifiers in the 
-	comment. The identifiers explain the usage and scope of the
-	members. Following identifiers are used:
-     	</para>
-	<itemizedlist>
-		<listitem><para>
-	  	[INTERN]</para><para>
-		These members are for NAND driver internal use only and must not be
-		modified. Most of these values are calculated from the chip geometry
-		information which is evaluated during nand_scan().
-		</para></listitem>
-		<listitem><para>
-	  	[REPLACEABLE]</para><para>
-		Replaceable members hold hardware related functions which can be 
-		provided by the board driver. The board driver can set the functions which
-		should be replaced by board dependent functions before calling nand_scan().
-		If the function pointer is NULL on entry to nand_scan() then the pointer
-		is set to the default function which is suitable for the detected chip type.
-		</para></listitem>
-		<listitem><para>
-	  	[BOARDSPECIFIC]</para><para>
-		Board specific members hold hardware related information which must
-		be provided by the board driver. The board driver must set the function
-		pointers and datafields before calling nand_scan().
-		</para></listitem>
-		<listitem><para>
-	  	[OPTIONAL]</para><para>
-		Optional members can hold information relevant for the board driver. The
-		generic NAND driver code does not use this information.
-		</para></listitem>
-	</itemizedlist>
-     </sect1>
-  </chapter>   
-
-  <chapter id="basicboarddriver">
-     	<title>Basic board driver</title>
-	<para>
-		For most boards it will be sufficient to provide just the
-		basic functions and fill out some really board dependent
-		members in the nand chip description structure.
-	</para>
-	<sect1 id="Basic_defines">
-		<title>Basic defines</title>
-		<para>
-			At least you have to provide a nand_chip structure
-			and a storage for the ioremap'ed chip address.
-			You can allocate the nand_chip structure using
-			kmalloc or you can allocate it statically.
-			The NAND chip structure embeds an mtd structure
-			which will be registered to the MTD subsystem.
-			You can extract a pointer to the mtd structure
-			from a nand_chip pointer using the nand_to_mtd()
-			helper.
-		</para>
-		<para>
-			Kmalloc based example
-		</para>
-		<programlisting>
-static struct mtd_info *board_mtd;
-static void __iomem *baseaddr;
-		</programlisting>
-		<para>
-			Static example
-		</para>
-		<programlisting>
-static struct nand_chip board_chip;
-static void __iomem *baseaddr;
-		</programlisting>
-	</sect1>
-	<sect1 id="Partition_defines">
-		<title>Partition defines</title>
-		<para>
-			If you want to divide your device into partitions, then
-			define a partitioning scheme suitable to your board.
-		</para>
-		<programlisting>
-#define NUM_PARTITIONS 2
-static struct mtd_partition partition_info[] = {
-	{ .name = "Flash partition 1",
-	  .offset =  0,
-	  .size =    8 * 1024 * 1024 },
-	{ .name = "Flash partition 2",
-	  .offset =  MTDPART_OFS_NEXT,
-	  .size =    MTDPART_SIZ_FULL },
-};
-		</programlisting>
-	</sect1>
-	<sect1 id="Hardware_control_functions">
-		<title>Hardware control function</title>
-		<para>
-			The hardware control function provides access to the 
-			control pins of the NAND chip(s). 
-			The access can be done by GPIO pins or by address lines.
-			If you use address lines, make sure that the timing
-			requirements are met.
-		</para>
-		<para>
-			<emphasis>GPIO based example</emphasis>
-		</para>
-		<programlisting>
-static void board_hwcontrol(struct mtd_info *mtd, int cmd)
-{
-	switch(cmd){
-		case NAND_CTL_SETCLE: /* Set CLE pin high */ break;
-		case NAND_CTL_CLRCLE: /* Set CLE pin low */ break;
-		case NAND_CTL_SETALE: /* Set ALE pin high */ break;
-		case NAND_CTL_CLRALE: /* Set ALE pin low */ break;
-		case NAND_CTL_SETNCE: /* Set nCE pin low */ break;
-		case NAND_CTL_CLRNCE: /* Set nCE pin high */ break;
-	}
-}
-		</programlisting>
-		<para>
-			<emphasis>Address lines based example.</emphasis> It's assumed that the
-			nCE pin is driven by a chip select decoder.
-		</para>
-		<programlisting>
-static void board_hwcontrol(struct mtd_info *mtd, int cmd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	switch(cmd){
-		case NAND_CTL_SETCLE: this->IO_ADDR_W |= CLE_ADRR_BIT;  break;
-		case NAND_CTL_CLRCLE: this->IO_ADDR_W &amp;= ~CLE_ADRR_BIT; break;
-		case NAND_CTL_SETALE: this->IO_ADDR_W |= ALE_ADRR_BIT;  break;
-		case NAND_CTL_CLRALE: this->IO_ADDR_W &amp;= ~ALE_ADRR_BIT; break;
-	}
-}
-		</programlisting>
-	</sect1>
-	<sect1 id="Device_ready_function">
-		<title>Device ready function</title>
-		<para>
-			If the hardware interface has the ready busy pin of the NAND chip connected to a
-			GPIO or other accessible I/O pin, this function is used to read back the state of the
-			pin. The function has no arguments and should return 0, if the device is busy (R/B pin 
-			is low) and 1, if the device is ready (R/B pin is high).
-			If the hardware interface does not give access to the ready busy pin, then
-			the function must not be defined and the function pointer this->dev_ready is set to NULL.		
-		</para>
-	</sect1>
-	<sect1 id="Init_function">
-		<title>Init function</title>
-		<para>
-			The init function allocates memory and sets up all the board
-			specific parameters and function pointers. When everything
-			is set up nand_scan() is called. This function tries to
-			detect and identify then chip. If a chip is found all the
-			internal data fields are initialized accordingly.
-			The structure(s) have to be zeroed out first and then filled with the necessary
-			information about the device.
-		</para>
-		<programlisting>
-static int __init board_init (void)
-{
-	struct nand_chip *this;
-	int err = 0;
-
-	/* Allocate memory for MTD device structure and private data */
-	this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-	if (!this) {
-		printk ("Unable to allocate NAND MTD device structure.\n");
-		err = -ENOMEM;
-		goto out;
-	}
-
-	board_mtd = nand_to_mtd(this);
-
-	/* map physical address */
-	baseaddr = ioremap(CHIP_PHYSICAL_ADDRESS, 1024);
-	if (!baseaddr) {
-		printk("Ioremap to access NAND chip failed\n");
-		err = -EIO;
-		goto out_mtd;
-	}
-
-	/* Set address of NAND IO lines */
-	this->IO_ADDR_R = baseaddr;
-	this->IO_ADDR_W = baseaddr;
-	/* Reference hardware control function */
-	this->hwcontrol = board_hwcontrol;
-	/* Set command delay time, see datasheet for correct value */
-	this->chip_delay = CHIP_DEPENDEND_COMMAND_DELAY;
-	/* Assign the device ready function, if available */
-	this->dev_ready = board_dev_ready;
-	this->eccmode = NAND_ECC_SOFT;
-
-	/* Scan to find existence of the device */
-	if (nand_scan (board_mtd, 1)) {
-		err = -ENXIO;
-		goto out_ior;
-	}
-	
-	add_mtd_partitions(board_mtd, partition_info, NUM_PARTITIONS);
-	goto out;
-
-out_ior:
-	iounmap(baseaddr);
-out_mtd:
-	kfree (this);
-out:
-	return err;
-}
-module_init(board_init);
-		</programlisting>
-	</sect1>
-	<sect1 id="Exit_function">
-		<title>Exit function</title>
-		<para>
-			The exit function is only necessary if the driver is
-			compiled as a module. It releases all resources which
-			are held by the chip driver and unregisters the partitions
-			in the MTD layer.
-		</para>
-		<programlisting>
-#ifdef MODULE
-static void __exit board_cleanup (void)
-{
-	/* Release resources, unregister device */
-	nand_release (board_mtd);
-
-	/* unmap physical address */
-	iounmap(baseaddr);
-	
-	/* Free the MTD device structure */
-	kfree (mtd_to_nand(board_mtd));
-}
-module_exit(board_cleanup);
-#endif
-		</programlisting>
-	</sect1>
-  </chapter>
-
-  <chapter id="boarddriversadvanced">
-     	<title>Advanced board driver functions</title>
-	<para>
-		This chapter describes the advanced functionality of the NAND
-		driver. For a list of functions which can be overridden by the board
-		driver see the documentation of the nand_chip structure.
-	</para>
-	<sect1 id="Multiple_chip_control">
-		<title>Multiple chip control</title>
-		<para>
-			The nand driver can control chip arrays. Therefore the
-			board driver must provide an own select_chip function. This
-			function must (de)select the requested chip.
-			The function pointer in the nand_chip structure must
-			be set before calling nand_scan(). The maxchip parameter
-			of nand_scan() defines the maximum number of chips to
-			scan for. Make sure that the select_chip function can
-			handle the requested number of chips.
-		</para>
-		<para>
-			The nand driver concatenates the chips to one virtual
-			chip and provides this virtual chip to the MTD layer.
-		</para>
-		<para>
-			<emphasis>Note: The driver can only handle linear chip arrays
-			of equally sized chips. There is no support for
-			parallel arrays which extend the buswidth.</emphasis>
-		</para>
-		<para>
-			<emphasis>GPIO based example</emphasis>
-		</para>
-		<programlisting>
-static void board_select_chip (struct mtd_info *mtd, int chip)
-{
-	/* Deselect all chips, set all nCE pins high */
-	GPIO(BOARD_NAND_NCE) |= 0xff;	
-	if (chip >= 0)
-		GPIO(BOARD_NAND_NCE) &amp;= ~ (1 &lt;&lt; chip);
-}
-		</programlisting>
-		<para>
-			<emphasis>Address lines based example.</emphasis>
-			Its assumed that the nCE pins are connected to an
-			address decoder.
-		</para>
-		<programlisting>
-static void board_select_chip (struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	
-	/* Deselect all chips */
-	this->IO_ADDR_R &amp;= ~BOARD_NAND_ADDR_MASK;
-	this->IO_ADDR_W &amp;= ~BOARD_NAND_ADDR_MASK;
-	switch (chip) {
-	case 0:
-		this->IO_ADDR_R |= BOARD_NAND_ADDR_CHIP0;
-		this->IO_ADDR_W |= BOARD_NAND_ADDR_CHIP0;
-		break;
-	....	
-	case n:
-		this->IO_ADDR_R |= BOARD_NAND_ADDR_CHIPn;
-		this->IO_ADDR_W |= BOARD_NAND_ADDR_CHIPn;
-		break;
-	}	
-}
-		</programlisting>
-	</sect1>
-	<sect1 id="Hardware_ECC_support">
-		<title>Hardware ECC support</title>
-		<sect2 id="Functions_and_constants">
-			<title>Functions and constants</title>
-			<para>
-				The nand driver supports three different types of
-				hardware ECC.
-				<itemizedlist>
-				<listitem><para>NAND_ECC_HW3_256</para><para>
-				Hardware ECC generator providing 3 bytes ECC per
-				256 byte.
-				</para>	</listitem>
-				<listitem><para>NAND_ECC_HW3_512</para><para>
-				Hardware ECC generator providing 3 bytes ECC per
-				512 byte.
-				</para>	</listitem>
-				<listitem><para>NAND_ECC_HW6_512</para><para>
-				Hardware ECC generator providing 6 bytes ECC per
-				512 byte.
-				</para>	</listitem>
-				<listitem><para>NAND_ECC_HW8_512</para><para>
-				Hardware ECC generator providing 6 bytes ECC per
-				512 byte.
-				</para>	</listitem>
-				</itemizedlist>
-				If your hardware generator has a different functionality
-				add it at the appropriate place in nand_base.c
-			</para>
-			<para>
-				The board driver must provide following functions:
-				<itemizedlist>
-				<listitem><para>enable_hwecc</para><para>
-				This function is called before reading / writing to
-				the chip. Reset or initialize the hardware generator
-				in this function. The function is called with an
-				argument which let you distinguish between read 
-				and write operations.
-				</para>	</listitem>
-				<listitem><para>calculate_ecc</para><para>
-				This function is called after read / write from / to
-				the chip. Transfer the ECC from the hardware to
-				the buffer. If the option NAND_HWECC_SYNDROME is set
-				then the function is only called on write. See below.
-				</para>	</listitem>
-				<listitem><para>correct_data</para><para>
-				In case of an ECC error this function is called for
-				error detection and correction. Return 1 respectively 2
-				in case the error can be corrected. If the error is
-				not correctable return -1. If your hardware generator
-				matches the default algorithm of the nand_ecc software
-				generator then use the correction function provided
-				by nand_ecc instead of implementing duplicated code.
-				</para>	</listitem>
-				</itemizedlist>
-			</para>
-		</sect2>
-		<sect2 id="Hardware_ECC_with_syndrome_calculation">
-		<title>Hardware ECC with syndrome calculation</title>
-			<para>
-				Many hardware ECC implementations provide Reed-Solomon
-				codes and calculate an error syndrome on read. The syndrome
-				must be converted to a standard Reed-Solomon syndrome
-				before calling the error correction code in the generic
-				Reed-Solomon library.
-			</para>
-			<para>
-				The ECC bytes must be placed immediately after the data
-				bytes in order to make the syndrome generator work. This
-				is contrary to the usual layout used by software ECC. The
-				separation of data and out of band area is not longer
-				possible. The nand driver code handles this layout and
-				the remaining free bytes in the oob area are managed by 
-				the autoplacement code. Provide a matching oob-layout
-				in this case. See rts_from4.c and diskonchip.c for 
-				implementation reference. In those cases we must also
-				use bad block tables on FLASH, because the ECC layout is
-				interfering with the bad block marker positions.
-				See bad block table support for details.
-			</para>
-		</sect2>
-	</sect1>
-	<sect1 id="Bad_Block_table_support">
-		<title>Bad block table support</title>
-		<para>
-			Most NAND chips mark the bad blocks at a defined
-			position in the spare area. Those blocks must 
-			not be erased under any circumstances as the bad 
-			block information would be lost.
-			It is possible to check the bad block mark each
-			time when the blocks are accessed by reading the
-			spare area of the first page in the block. This
-			is time consuming so a bad block table is used.
-		</para>
-		<para>
-			The nand driver supports various types of bad block
-			tables.
-			<itemizedlist>
-			<listitem><para>Per device</para><para>
-			The bad block table contains all bad block information
-			of the device which can consist of multiple chips.
-			</para>	</listitem>
-			<listitem><para>Per chip</para><para>
-			A bad block table is used per chip and contains the
-			bad block information for this particular chip.
-			</para>	</listitem>
-			<listitem><para>Fixed offset</para><para>
-			The bad block table is located at a fixed offset
-			in the chip (device). This applies to various
-			DiskOnChip devices.
-			</para>	</listitem>
-			<listitem><para>Automatic placed</para><para>
-			The bad block table is automatically placed and
-			detected either at the end or at the beginning
-			of a chip (device)
-			</para>	</listitem>
-			<listitem><para>Mirrored tables</para><para>
-			The bad block table is mirrored on the chip (device) to
-			allow updates of the bad block table without data loss.
-			</para>	</listitem>
-			</itemizedlist>
-		</para>
-		<para>	
-			nand_scan() calls the function nand_default_bbt(). 
-			nand_default_bbt() selects appropriate default
-			bad block table descriptors depending on the chip information
-			which was retrieved by nand_scan().
-		</para>
-		<para>
-			The standard policy is scanning the device for bad 
-			blocks and build a ram based bad block table which
-			allows faster access than always checking the
-			bad block information on the flash chip itself.
-		</para>
-		<sect2 id="Flash_based_tables">
-			<title>Flash based tables</title>
-			<para>
-				It may be desired or necessary to keep a bad block table in FLASH.
-				For AG-AND chips this is mandatory, as they have no factory marked
-				bad blocks. They have factory marked good blocks. The marker pattern
-				is erased when the block is erased to be reused. So in case of
-				powerloss before writing the pattern back to the chip this block 
-				would be lost and added to the bad blocks. Therefore we scan the 
-				chip(s) when we detect them the first time for good blocks and 
-				store this information in a bad block table before erasing any 
-				of the blocks.
-			</para>
-			<para>
-				The blocks in which the tables are stored are protected against
-				accidental access by marking them bad in the memory bad block
-				table. The bad block table management functions are allowed
-				to circumvent this protection.
-			</para>
-			<para>
-				The simplest way to activate the FLASH based bad block table support 
-				is to set the option NAND_BBT_USE_FLASH in the bbt_option field of
-				the nand chip structure before calling nand_scan(). For AG-AND
-				chips is this done by default.
-				This activates the default FLASH based bad block table functionality 
-				of the NAND driver. The default bad block table options are
-				<itemizedlist>
-				<listitem><para>Store bad block table per chip</para></listitem>
-				<listitem><para>Use 2 bits per block</para></listitem>
-				<listitem><para>Automatic placement at the end of the chip</para></listitem>
-				<listitem><para>Use mirrored tables with version numbers</para></listitem>
-				<listitem><para>Reserve 4 blocks at the end of the chip</para></listitem>
-				</itemizedlist>
-			</para>
-		</sect2>
-		<sect2 id="User_defined_tables">
-			<title>User defined tables</title>
-			<para>
-				User defined tables are created by filling out a 
-				nand_bbt_descr structure and storing the pointer in the
-				nand_chip structure member bbt_td before calling nand_scan(). 
-				If a mirror table is necessary a second structure must be
-				created and a pointer to this structure must be stored
-				in bbt_md inside the nand_chip structure. If the bbt_md 
-				member is set to NULL then only the main table is used
-				and no scan for the mirrored table is performed.
-			</para>
-			<para>
-				The most important field in the nand_bbt_descr structure
-				is the options field. The options define most of the 
-				table properties. Use the predefined constants from
-				nand.h to define the options.
-				<itemizedlist>
-				<listitem><para>Number of bits per block</para>
-				<para>The supported number of bits is 1, 2, 4, 8.</para></listitem>
-				<listitem><para>Table per chip</para>
-				<para>Setting the constant NAND_BBT_PERCHIP selects that
-				a bad block table is managed for each chip in a chip array.
-				If this option is not set then a per device bad block table
-				is used.</para></listitem>
-				<listitem><para>Table location is absolute</para>
-				<para>Use the option constant NAND_BBT_ABSPAGE and
-				define the absolute page number where the bad block
-				table starts in the field pages. If you have selected bad block
-				tables per chip and you have a multi chip array then the start page
-				must be given for each chip in the chip array. Note: there is no scan
-				for a table ident pattern performed, so the fields 
-				pattern, veroffs, offs, len can be left uninitialized</para></listitem>
-				<listitem><para>Table location is automatically detected</para>
-				<para>The table can either be located in the first or the last good
-				blocks of the chip (device). Set NAND_BBT_LASTBLOCK to place
-				the bad block table at the end of the chip (device). The
-				bad block tables are marked and identified by a pattern which
-				is stored in the spare area of the first page in the block which
-				holds the bad block table. Store a pointer to the pattern  
-				in the pattern field. Further the length of the pattern has to be 
-				stored in len and the offset in the spare area must be given
-				in the offs member of the nand_bbt_descr structure. For mirrored
-				bad block tables different patterns are mandatory.</para></listitem>
-				<listitem><para>Table creation</para>
-				<para>Set the option NAND_BBT_CREATE to enable the table creation
-				if no table can be found during the scan. Usually this is done only 
-				once if a new chip is found. </para></listitem>
-				<listitem><para>Table write support</para>
-				<para>Set the option NAND_BBT_WRITE to enable the table write support.
-				This allows the update of the bad block table(s) in case a block has
-				to be marked bad due to wear. The MTD interface function block_markbad
-				is calling the update function of the bad block table. If the write
-				support is enabled then the table is updated on FLASH.</para>
-				<para>
-				Note: Write support should only be enabled for mirrored tables with
-				version control.
-				</para></listitem>
-				<listitem><para>Table version control</para>
-				<para>Set the option NAND_BBT_VERSION to enable the table version control.
-				It's highly recommended to enable this for mirrored tables with write
-				support. It makes sure that the risk of losing the bad block
-				table information is reduced to the loss of the information about the
-				one worn out block which should be marked bad. The version is stored in
-				4 consecutive bytes in the spare area of the device. The position of
-				the version number is defined by the member veroffs in the bad block table
-				descriptor.</para></listitem>
-				<listitem><para>Save block contents on write</para>
-				<para>
-				In case that the block which holds the bad block table does contain
-				other useful information, set the option NAND_BBT_SAVECONTENT. When
-				the bad block table is written then the whole block is read the bad
-				block table is updated and the block is erased and everything is 
-				written back. If this option is not set only the bad block table
-				is written and everything else in the block is ignored and erased.
-				</para></listitem>
-				<listitem><para>Number of reserved blocks</para>
-				<para>
-				For automatic placement some blocks must be reserved for
-				bad block table storage. The number of reserved blocks is defined 
-				in the maxblocks member of the bad block table description structure.
-				Reserving 4 blocks for mirrored tables should be a reasonable number. 
-				This also limits the number of blocks which are scanned for the bad
-				block table ident pattern.
-				</para></listitem>
-				</itemizedlist>
-			</para>
-		</sect2>
-	</sect1>
-	<sect1 id="Spare_area_placement">
-		<title>Spare area (auto)placement</title>
-		<para>
-			The nand driver implements different possibilities for
-			placement of filesystem data in the spare area, 
-			<itemizedlist>
-			<listitem><para>Placement defined by fs driver</para></listitem>
-			<listitem><para>Automatic placement</para></listitem>
-			</itemizedlist>
-			The default placement function is automatic placement. The
-			nand driver has built in default placement schemes for the
-			various chiptypes. If due to hardware ECC functionality the
-			default placement does not fit then the board driver can
-			provide a own placement scheme.
-		</para>
-		<para>
-			File system drivers can provide a own placement scheme which
-			is used instead of the default placement scheme.
-		</para>
-		<para>
-			Placement schemes are defined by a nand_oobinfo structure
-	     		<programlisting>
-struct nand_oobinfo {
-	int	useecc;
-	int	eccbytes;
-	int	eccpos[24];
-	int	oobfree[8][2];
-};
-	     		</programlisting>
-			<itemizedlist>
-			<listitem><para>useecc</para><para>
-				The useecc member controls the ecc and placement function. The header
-				file include/mtd/mtd-abi.h contains constants to select ecc and
-				placement. MTD_NANDECC_OFF switches off the ecc complete. This is
-				not recommended and available for testing and diagnosis only.
-				MTD_NANDECC_PLACE selects caller defined placement, MTD_NANDECC_AUTOPLACE
-				selects automatic placement.
-			</para></listitem>
-			<listitem><para>eccbytes</para><para>
-				The eccbytes member defines the number of ecc bytes per page.
-			</para></listitem>
-			<listitem><para>eccpos</para><para>
-				The eccpos array holds the byte offsets in the spare area where
-				the ecc codes are placed.
-			</para></listitem>
-			<listitem><para>oobfree</para><para>
-				The oobfree array defines the areas in the spare area which can be
-				used for automatic placement. The information is given in the format
-				{offset, size}. offset defines the start of the usable area, size the
-				length in bytes. More than one area can be defined. The list is terminated
-				by an {0, 0} entry.
-			</para></listitem>
-			</itemizedlist>
-		</para>
-		<sect2 id="Placement_defined_by_fs_driver">
-			<title>Placement defined by fs driver</title>
-			<para>
-				The calling function provides a pointer to a nand_oobinfo
-				structure which defines the ecc placement. For writes the
-				caller must provide a spare area buffer along with the
-				data buffer. The spare area buffer size is (number of pages) *
-				(size of spare area). For reads the buffer size is
-				(number of pages) * ((size of spare area) + (number of ecc
-				steps per page) * sizeof (int)). The driver stores the
-				result of the ecc check for each tuple in the spare buffer.
-				The storage sequence is 
-			</para>
-			<para>
-				&lt;spare data page 0&gt;&lt;ecc result 0&gt;...&lt;ecc result n&gt;
-			</para>
-			<para>
-				...
-			</para>
-			<para>
-				&lt;spare data page n&gt;&lt;ecc result 0&gt;...&lt;ecc result n&gt;
-			</para>
-			<para>
-				This is a legacy mode used by YAFFS1.
-			</para>
-			<para>
-				If the spare area buffer is NULL then only the ECC placement is
-				done according to the given scheme in the nand_oobinfo structure.
-			</para>
-		</sect2>
-		<sect2 id="Automatic_placement">
-			<title>Automatic placement</title>
-			<para>
-				Automatic placement uses the built in defaults to place the
-				ecc bytes in the spare area. If filesystem data have to be stored /
-				read into the spare area then the calling function must provide a
-				buffer. The buffer size per page is determined by the oobfree array in
-				the nand_oobinfo structure.
-			</para>
-			<para>
-				If the spare area buffer is NULL then only the ECC placement is
-				done according to the default builtin scheme.
-			</para>
-		</sect2>
-	</sect1>	
-	<sect1 id="Spare_area_autoplacement_default">
-		<title>Spare area autoplacement default schemes</title>
-		<sect2 id="pagesize_256">
-			<title>256 byte pagesize</title>
-<informaltable><tgroup cols="3"><tbody>
-<row>
-<entry>Offset</entry>
-<entry>Content</entry>
-<entry>Comment</entry>
-</row>
-<row>
-<entry>0x00</entry>
-<entry>ECC byte 0</entry>
-<entry>Error correction code byte 0</entry>
-</row>
-<row>
-<entry>0x01</entry>
-<entry>ECC byte 1</entry>
-<entry>Error correction code byte 1</entry>
-</row>
-<row>
-<entry>0x02</entry>
-<entry>ECC byte 2</entry>
-<entry>Error correction code byte 2</entry>
-</row>
-<row>
-<entry>0x03</entry>
-<entry>Autoplace 0</entry>
-<entry></entry>
-</row>
-<row>
-<entry>0x04</entry>
-<entry>Autoplace 1</entry>
-<entry></entry>
-</row>
-<row>
-<entry>0x05</entry>
-<entry>Bad block marker</entry>
-<entry>If any bit in this byte is zero, then this block is bad.
-This applies only to the first page in a block. In the remaining
-pages this byte is reserved</entry>
-</row>
-<row>
-<entry>0x06</entry>
-<entry>Autoplace 2</entry>
-<entry></entry>
-</row>
-<row>
-<entry>0x07</entry>
-<entry>Autoplace 3</entry>
-<entry></entry>
-</row>
-</tbody></tgroup></informaltable>
-		</sect2>
-		<sect2 id="pagesize_512">
-			<title>512 byte pagesize</title>
-<informaltable><tgroup cols="3"><tbody>
-<row>
-<entry>Offset</entry>
-<entry>Content</entry>
-<entry>Comment</entry>
-</row>
-<row>
-<entry>0x00</entry>
-<entry>ECC byte 0</entry>
-<entry>Error correction code byte 0 of the lower 256 Byte data in
-this page</entry>
-</row>
-<row>
-<entry>0x01</entry>
-<entry>ECC byte 1</entry>
-<entry>Error correction code byte 1 of the lower 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x02</entry>
-<entry>ECC byte 2</entry>
-<entry>Error correction code byte 2 of the lower 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x03</entry>
-<entry>ECC byte 3</entry>
-<entry>Error correction code byte 0 of the upper 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x04</entry>
-<entry>reserved</entry>
-<entry>reserved</entry>
-</row>
-<row>
-<entry>0x05</entry>
-<entry>Bad block marker</entry>
-<entry>If any bit in this byte is zero, then this block is bad.
-This applies only to the first page in a block. In the remaining
-pages this byte is reserved</entry>
-</row>
-<row>
-<entry>0x06</entry>
-<entry>ECC byte 4</entry>
-<entry>Error correction code byte 1 of the upper 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x07</entry>
-<entry>ECC byte 5</entry>
-<entry>Error correction code byte 2 of the upper 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x08 - 0x0F</entry>
-<entry>Autoplace 0 - 7</entry>
-<entry></entry>
-</row>
-</tbody></tgroup></informaltable>
-		</sect2>
-		<sect2 id="pagesize_2048">
-			<title>2048 byte pagesize</title>
-<informaltable><tgroup cols="3"><tbody>
-<row>
-<entry>Offset</entry>
-<entry>Content</entry>
-<entry>Comment</entry>
-</row>
-<row>
-<entry>0x00</entry>
-<entry>Bad block marker</entry>
-<entry>If any bit in this byte is zero, then this block is bad.
-This applies only to the first page in a block. In the remaining
-pages this byte is reserved</entry>
-</row>
-<row>
-<entry>0x01</entry>
-<entry>Reserved</entry>
-<entry>Reserved</entry>
-</row>
-<row>
-<entry>0x02-0x27</entry>
-<entry>Autoplace 0 - 37</entry>
-<entry></entry>
-</row>
-<row>
-<entry>0x28</entry>
-<entry>ECC byte 0</entry>
-<entry>Error correction code byte 0 of the first 256 Byte data in
-this page</entry>
-</row>
-<row>
-<entry>0x29</entry>
-<entry>ECC byte 1</entry>
-<entry>Error correction code byte 1 of the first 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x2A</entry>
-<entry>ECC byte 2</entry>
-<entry>Error correction code byte 2 of the first 256 Bytes data in
-this page</entry>
-</row>
-<row>
-<entry>0x2B</entry>
-<entry>ECC byte 3</entry>
-<entry>Error correction code byte 0 of the second 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x2C</entry>
-<entry>ECC byte 4</entry>
-<entry>Error correction code byte 1 of the second 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x2D</entry>
-<entry>ECC byte 5</entry>
-<entry>Error correction code byte 2 of the second 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x2E</entry>
-<entry>ECC byte 6</entry>
-<entry>Error correction code byte 0 of the third 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x2F</entry>
-<entry>ECC byte 7</entry>
-<entry>Error correction code byte 1 of the third 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x30</entry>
-<entry>ECC byte 8</entry>
-<entry>Error correction code byte 2 of the third 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x31</entry>
-<entry>ECC byte 9</entry>
-<entry>Error correction code byte 0 of the fourth 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x32</entry>
-<entry>ECC byte 10</entry>
-<entry>Error correction code byte 1 of the fourth 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x33</entry>
-<entry>ECC byte 11</entry>
-<entry>Error correction code byte 2 of the fourth 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x34</entry>
-<entry>ECC byte 12</entry>
-<entry>Error correction code byte 0 of the fifth 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x35</entry>
-<entry>ECC byte 13</entry>
-<entry>Error correction code byte 1 of the fifth 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x36</entry>
-<entry>ECC byte 14</entry>
-<entry>Error correction code byte 2 of the fifth 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x37</entry>
-<entry>ECC byte 15</entry>
-<entry>Error correction code byte 0 of the sixt 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x38</entry>
-<entry>ECC byte 16</entry>
-<entry>Error correction code byte 1 of the sixt 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x39</entry>
-<entry>ECC byte 17</entry>
-<entry>Error correction code byte 2 of the sixt 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x3A</entry>
-<entry>ECC byte 18</entry>
-<entry>Error correction code byte 0 of the seventh 256 Bytes of
-data in this page</entry>
-</row>
-<row>
-<entry>0x3B</entry>
-<entry>ECC byte 19</entry>
-<entry>Error correction code byte 1 of the seventh 256 Bytes of
-data in this page</entry>
-</row>
-<row>
-<entry>0x3C</entry>
-<entry>ECC byte 20</entry>
-<entry>Error correction code byte 2 of the seventh 256 Bytes of
-data in this page</entry>
-</row>
-<row>
-<entry>0x3D</entry>
-<entry>ECC byte 21</entry>
-<entry>Error correction code byte 0 of the eighth 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x3E</entry>
-<entry>ECC byte 22</entry>
-<entry>Error correction code byte 1 of the eighth 256 Bytes of data
-in this page</entry>
-</row>
-<row>
-<entry>0x3F</entry>
-<entry>ECC byte 23</entry>
-<entry>Error correction code byte 2 of the eighth 256 Bytes of data
-in this page</entry>
-</row>
-</tbody></tgroup></informaltable>
-		</sect2>
-     	</sect1>
-  </chapter>
-
-  <chapter id="filesystems">
-     	<title>Filesystem support</title>
-	<para>
-		The NAND driver provides all necessary functions for a
-		filesystem via the MTD interface.
-	</para>
-	<para>
-		Filesystems must be aware of the NAND peculiarities and
-		restrictions. One major restrictions of NAND Flash is, that you cannot 
-		write as often as you want to a page. The consecutive writes to a page, 
-		before erasing it again, are restricted to 1-3 writes, depending on the 
-		manufacturers specifications. This applies similar to the spare area. 
-	</para>
-	<para>
-		Therefore NAND aware filesystems must either write in page size chunks
-		or hold a writebuffer to collect smaller writes until they sum up to 
-		pagesize. Available NAND aware filesystems: JFFS2, YAFFS. 		
-	</para>
-	<para>
-		The spare area usage to store filesystem data is controlled by
-		the spare area placement functionality which is described in one
-		of the earlier chapters.
-	</para>
-  </chapter>	
-  <chapter id="tools">
-     	<title>Tools</title>
-	<para>
-		The MTD project provides a couple of helpful tools to handle NAND Flash.
-		<itemizedlist>
-		<listitem><para>flasherase, flasheraseall: Erase and format FLASH partitions</para></listitem>
-		<listitem><para>nandwrite: write filesystem images to NAND FLASH</para></listitem>
-		<listitem><para>nanddump: dump the contents of a NAND FLASH partitions</para></listitem>
-		</itemizedlist>
-	</para>
-	<para>
-		These tools are aware of the NAND restrictions. Please use those tools
-		instead of complaining about errors which are caused by non NAND aware
-		access methods.
-	</para>
-  </chapter>	
-
-  <chapter id="defines">
-     <title>Constants</title>
-     <para>
-     This chapter describes the constants which might be relevant for a driver developer.
-     </para>
-     <sect1 id="Chip_option_constants">
-	<title>Chip option constants</title>
-     	<sect2 id="Constants_for_chip_id_table">
-		<title>Constants for chip id table</title>
-     		<para>
-		These constants are defined in nand.h. They are ored together to describe
-		the chip functionality.
-     		<programlisting>
-/* Buswitdh is 16 bit */
-#define NAND_BUSWIDTH_16	0x00000002
-/* Device supports partial programming without padding */
-#define NAND_NO_PADDING		0x00000004
-/* Chip has cache program function */
-#define NAND_CACHEPRG		0x00000008
-/* Chip has copy back function */
-#define NAND_COPYBACK		0x00000010
-/* AND Chip which has 4 banks and a confusing page / block 
- * assignment. See Renesas datasheet for further information */
-#define NAND_IS_AND		0x00000020
-/* Chip has a array of 4 pages which can be read without
- * additional ready /busy waits */
-#define NAND_4PAGE_ARRAY	0x00000040 
-		</programlisting>
-     		</para>
-     	</sect2>
-     	<sect2 id="Constants_for_runtime_options">
-		<title>Constants for runtime options</title>
-     		<para>
-		These constants are defined in nand.h. They are ored together to describe
-		the functionality.
-     		<programlisting>
-/* The hw ecc generator provides a syndrome instead a ecc value on read 
- * This can only work if we have the ecc bytes directly behind the 
- * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
-#define NAND_HWECC_SYNDROME	0x00020000
-		</programlisting>
-     		</para>
-     	</sect2>
-     </sect1>	
-
-     <sect1 id="EEC_selection_constants">
-	<title>ECC selection constants</title>
-	<para>
-	Use these constants to select the ECC algorithm.
-  	<programlisting>
-/* No ECC. Usage is not recommended ! */
-#define NAND_ECC_NONE		0
-/* Software ECC 3 byte ECC per 256 Byte data */
-#define NAND_ECC_SOFT		1
-/* Hardware ECC 3 byte ECC per 256 Byte data */
-#define NAND_ECC_HW3_256	2
-/* Hardware ECC 3 byte ECC per 512 Byte data */
-#define NAND_ECC_HW3_512	3
-/* Hardware ECC 6 byte ECC per 512 Byte data */
-#define NAND_ECC_HW6_512	4
-/* Hardware ECC 6 byte ECC per 512 Byte data */
-#define NAND_ECC_HW8_512	6
-	</programlisting>
-	</para>
-     </sect1>	
-
-     <sect1 id="Hardware_control_related_constants">
-	<title>Hardware control related constants</title>
-	<para>
-	These constants describe the requested hardware access function when
-	the boardspecific hardware control function is called
-  	<programlisting>
-/* Select the chip by setting nCE to low */
-#define NAND_CTL_SETNCE 	1
-/* Deselect the chip by setting nCE to high */
-#define NAND_CTL_CLRNCE		2
-/* Select the command latch by setting CLE to high */
-#define NAND_CTL_SETCLE		3
-/* Deselect the command latch by setting CLE to low */
-#define NAND_CTL_CLRCLE		4
-/* Select the address latch by setting ALE to high */
-#define NAND_CTL_SETALE		5
-/* Deselect the address latch by setting ALE to low */
-#define NAND_CTL_CLRALE		6
-/* Set write protection by setting WP to high. Not used! */
-#define NAND_CTL_SETWP		7
-/* Clear write protection by setting WP to low. Not used! */
-#define NAND_CTL_CLRWP		8
-	</programlisting>
-	</para>
-     </sect1>	
-
-     <sect1 id="Bad_block_table_constants">
-	<title>Bad block table related constants</title>
-	<para>
-	These constants describe the options used for bad block
-	table descriptors.
-  	<programlisting>
-/* Options for the bad block table descriptors */
-
-/* The number of bits used per block in the bbt on the device */
-#define NAND_BBT_NRBITS_MSK	0x0000000F
-#define NAND_BBT_1BIT		0x00000001
-#define NAND_BBT_2BIT		0x00000002
-#define NAND_BBT_4BIT		0x00000004
-#define NAND_BBT_8BIT		0x00000008
-/* The bad block table is in the last good block of the device */
-#define	NAND_BBT_LASTBLOCK	0x00000010
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_ABSPAGE	0x00000020
-/* bbt is stored per chip on multichip devices */
-#define NAND_BBT_PERCHIP	0x00000080
-/* bbt has a version counter at offset veroffs */
-#define NAND_BBT_VERSION	0x00000100
-/* Create a bbt if none axists */
-#define NAND_BBT_CREATE		0x00000200
-/* Write bbt if necessary */
-#define NAND_BBT_WRITE		0x00001000
-/* Read and write back block contents when writing bbt */
-#define NAND_BBT_SAVECONTENT	0x00002000
-	</programlisting>
-	</para>
-     </sect1>	
-
-  </chapter>
-  	
-  <chapter id="structs">
-     <title>Structures</title>
-     <para>
-     This chapter contains the autogenerated documentation of the structures which are
-     used in the NAND driver and might be relevant for a driver developer. Each  
-     struct member has a short description which is marked with an [XXX] identifier.
-     See the chapter "Documentation hints" for an explanation.
-     </para>
-!Iinclude/linux/mtd/nand.h
-  </chapter>
-
-  <chapter id="pubfunctions">
-     <title>Public Functions Provided</title>
-     <para>
-     This chapter contains the autogenerated documentation of the NAND kernel API functions
-      which are exported. Each function has a short description which is marked with an [XXX] identifier.
-     See the chapter "Documentation hints" for an explanation.
-     </para>
-!Edrivers/mtd/nand/nand_base.c
-!Edrivers/mtd/nand/nand_bbt.c
-!Edrivers/mtd/nand/nand_ecc.c
-  </chapter>
-  
-  <chapter id="intfunctions">
-     <title>Internal Functions Provided</title>
-     <para>
-     This chapter contains the autogenerated documentation of the NAND driver internal functions.
-     Each function has a short description which is marked with an [XXX] identifier.
-     See the chapter "Documentation hints" for an explanation.
-     The functions marked with [DEFAULT] might be relevant for a board driver developer.
-     </para>
-!Idrivers/mtd/nand/nand_base.c
-!Idrivers/mtd/nand/nand_bbt.c
-<!-- No internal functions for kernel-doc:
-X!Idrivers/mtd/nand/nand_ecc.c
--->
-  </chapter>
-
-  <chapter id="credits">
-     <title>Credits</title>
-	<para>
-		The following people have contributed to the NAND driver:
-		<orderedlist>
-			<listitem><para>Steven J. Hill<email>sjhill@realitydiluted.com</email></para></listitem>
-			<listitem><para>David Woodhouse<email>dwmw2@infradead.org</email></para></listitem>
-			<listitem><para>Thomas Gleixner<email>tglx@linutronix.de</email></para></listitem>
-		</orderedlist>
-		A lot of users have provided bugfixes, improvements and helping hands for testing.
-		Thanks a lot.
-	</para>
-	<para>
-		The following people have contributed to this document:
-		<orderedlist>
-			<listitem><para>Thomas Gleixner<email>tglx@linutronix.de</email></para></listitem>
-		</orderedlist>
-	</para>
-  </chapter>
-</book>
diff --git a/Documentation/DocBook/networking.tmpl b/Documentation/DocBook/networking.tmpl
deleted file mode 100644
index 29df250..0000000
--- a/Documentation/DocBook/networking.tmpl
+++ /dev/null
@@ -1,111 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="LinuxNetworking">
- <bookinfo>
-  <title>Linux Networking and Network Devices APIs</title>
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License as published by the Free Software Foundation; either
-     version 2 of the License, or (at your option) any later
-     version.
-   </para>
-
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-  <chapter id="netcore">
-     <title>Linux Networking</title>
-     <sect1><title>Networking Base Types</title>
-!Iinclude/linux/net.h
-     </sect1>
-     <sect1><title>Socket Buffer Functions</title>
-!Iinclude/linux/skbuff.h
-!Iinclude/net/sock.h
-!Enet/socket.c
-!Enet/core/skbuff.c
-!Enet/core/sock.c
-!Enet/core/datagram.c
-!Enet/core/stream.c
-     </sect1>
-     <sect1><title>Socket Filter</title>
-!Enet/core/filter.c
-     </sect1>
-     <sect1><title>Generic Network Statistics</title>
-!Iinclude/uapi/linux/gen_stats.h
-!Enet/core/gen_stats.c
-!Enet/core/gen_estimator.c
-     </sect1>
-     <sect1><title>SUN RPC subsystem</title>
-<!-- The !D functionality is not perfect, garbage has to be protected by comments
-!Dnet/sunrpc/sunrpc_syms.c
--->
-!Enet/sunrpc/xdr.c
-!Enet/sunrpc/svc_xprt.c
-!Enet/sunrpc/xprt.c
-!Enet/sunrpc/sched.c
-!Enet/sunrpc/socklib.c
-!Enet/sunrpc/stats.c
-!Enet/sunrpc/rpc_pipe.c
-!Enet/sunrpc/rpcb_clnt.c
-!Enet/sunrpc/clnt.c
-     </sect1>
-     <sect1><title>WiMAX</title>
-!Enet/wimax/op-msg.c
-!Enet/wimax/op-reset.c
-!Enet/wimax/op-rfkill.c
-!Enet/wimax/stack.c
-!Iinclude/net/wimax.h
-!Iinclude/uapi/linux/wimax.h
-     </sect1>
-  </chapter>
-
-  <chapter id="netdev">
-     <title>Network device support</title>
-     <sect1><title>Driver Support</title>
-!Enet/core/dev.c
-!Enet/ethernet/eth.c
-!Enet/sched/sch_generic.c
-!Iinclude/linux/etherdevice.h
-!Iinclude/linux/netdevice.h
-     </sect1>
-     <sect1><title>PHY Support</title>
-!Edrivers/net/phy/phy.c
-!Idrivers/net/phy/phy.c
-!Edrivers/net/phy/phy_device.c
-!Idrivers/net/phy/phy_device.c
-!Edrivers/net/phy/mdio_bus.c
-!Idrivers/net/phy/mdio_bus.c
-     </sect1>
-<!-- FIXME: Removed for now since no structured comments in source
-     <sect1><title>Wireless</title>
-X!Enet/core/wireless.c
-     </sect1>
--->
-  </chapter>
-
-</book>
diff --git a/Documentation/DocBook/rapidio.tmpl b/Documentation/DocBook/rapidio.tmpl
deleted file mode 100644
index ac3cca3..0000000
--- a/Documentation/DocBook/rapidio.tmpl
+++ /dev/null
@@ -1,155 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-        "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
-	<!ENTITY rapidio SYSTEM "rapidio.xml">
-	]>
-
-<book id="RapidIO-Guide">
- <bookinfo>
-  <title>RapidIO Subsystem Guide</title>
-
-  <authorgroup>
-   <author>
-    <firstname>Matt</firstname>
-    <surname>Porter</surname>
-    <affiliation>
-     <address>
-      <email>mporter@kernel.crashing.org</email>
-      <email>mporter@mvista.com</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2005</year>
-   <holder>MontaVista Software, Inc.</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License version 2 as published by the Free Software Foundation.
-   </para>
-
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-  <chapter id="intro">
-      <title>Introduction</title>
-  <para>
-	RapidIO is a high speed switched fabric interconnect with
-	features aimed at the embedded market.  RapidIO provides
-	support for memory-mapped I/O as well as message-based
-	transactions over the switched fabric network. RapidIO has
-	a standardized discovery mechanism not unlike the PCI bus
-	standard that allows simple detection of devices in a
-	network.
-  </para>
-  <para>
-  	This documentation is provided for developers intending
-	to support RapidIO on new architectures, write new drivers,
-	or to understand the subsystem internals.
-  </para>
-  </chapter>
-
-  <chapter id="bugs">
-     <title>Known Bugs and Limitations</title>
-
-     <sect1 id="known_bugs">
-     	<title>Bugs</title>
-	  <para>None. ;)</para>
-     </sect1>
-     <sect1 id="Limitations">
-     	<title>Limitations</title>
-	  <para>
-	    <orderedlist>
-	      <listitem><para>Access/management of RapidIO memory regions is not supported</para></listitem>
-	      <listitem><para>Multiple host enumeration is not supported</para></listitem>
-	    </orderedlist>
-	 </para>
-     </sect1>
-  </chapter>
-
-  <chapter id="drivers">
-     	<title>RapidIO driver interface</title>
-	<para>
-		Drivers are provided a set of calls in order
-		to interface with the subsystem to gather info
-		on devices, request/map memory region resources,
-		and manage mailboxes/doorbells.
-	</para>
-	<sect1 id="Functions">
-		<title>Functions</title>
-!Iinclude/linux/rio_drv.h
-!Edrivers/rapidio/rio-driver.c
-!Edrivers/rapidio/rio.c
-	</sect1>
-  </chapter>
-
-  <chapter id="internals">
-     <title>Internals</title>
-
-     <para>
-     This chapter contains the autogenerated documentation of the RapidIO
-     subsystem.
-     </para>
-
-     <sect1 id="Structures"><title>Structures</title>
-!Iinclude/linux/rio.h
-     </sect1>
-     <sect1 id="Enumeration_and_Discovery"><title>Enumeration and Discovery</title>
-!Idrivers/rapidio/rio-scan.c
-     </sect1>
-     <sect1 id="Driver_functionality"><title>Driver functionality</title>
-!Idrivers/rapidio/rio.c
-!Idrivers/rapidio/rio-access.c
-     </sect1>
-     <sect1 id="Device_model_support"><title>Device model support</title>
-!Idrivers/rapidio/rio-driver.c
-     </sect1>
-     <sect1 id="PPC32_support"><title>PPC32 support</title>
-!Iarch/powerpc/sysdev/fsl_rio.c
-     </sect1>
-  </chapter>
-
-  <chapter id="credits">
-     <title>Credits</title>
-	<para>
-		The following people have contributed to the RapidIO
-		subsystem directly or indirectly:
-		<orderedlist>
-			<listitem><para>Matt Porter<email>mporter@kernel.crashing.org</email></para></listitem>
-			<listitem><para>Randy Vinson<email>rvinson@mvista.com</email></para></listitem>
-			<listitem><para>Dan Malek<email>dan@embeddedalley.com</email></para></listitem>
-		</orderedlist>
-	</para>
-	<para>
-		The following people have contributed to this document:
-		<orderedlist>
-			<listitem><para>Matt Porter<email>mporter@kernel.crashing.org</email></para></listitem>
-		</orderedlist>
-	</para>
-  </chapter>
-</book>
diff --git a/Documentation/DocBook/s390-drivers.tmpl b/Documentation/DocBook/s390-drivers.tmpl
deleted file mode 100644
index 95bfc12..0000000
--- a/Documentation/DocBook/s390-drivers.tmpl
+++ /dev/null
@@ -1,161 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="s390drivers">
- <bookinfo>
-  <title>Writing s390 channel device drivers</title>
-
-  <authorgroup>
-   <author>
-    <firstname>Cornelia</firstname>
-    <surname>Huck</surname>
-    <affiliation>
-     <address>
-       <email>cornelia.huck@de.ibm.com</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2007</year>
-   <holder>IBM Corp.</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License as published by the Free Software Foundation; either
-     version 2 of the License, or (at your option) any later
-     version.
-   </para>
-
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-  <chapter id="intro">
-   <title>Introduction</title>
-  <para>
-    This document describes the interfaces available for device drivers that
-    drive s390 based channel attached I/O devices. This includes interfaces for
-    interaction with the hardware and interfaces for interacting with the
-    common driver core. Those interfaces are provided by the s390 common I/O
-    layer.
-  </para>
-  <para>
-    The document assumes a familarity with the technical terms associated
-    with the s390 channel I/O architecture. For a description of this
-    architecture, please refer to the "z/Architecture: Principles of
-    Operation", IBM publication no. SA22-7832.
-  </para>
-  <para>
-    While most I/O devices on a s390 system are typically driven through the
-    channel I/O mechanism described here, there are various other methods
-    (like the diag interface). These are out of the scope of this document.
-  </para>
-  <para>
-    Some additional information can also be found in the kernel source
-    under Documentation/s390/driver-model.txt.
-  </para>
-  </chapter>
-  <chapter id="ccw">
-   <title>The ccw bus</title>
-  <para>
-	The ccw bus typically contains the majority of devices available to
-	a s390 system. Named after the channel command word (ccw), the basic
-	command structure used to address its devices, the ccw bus contains
-	so-called channel attached devices. They are addressed via I/O
-	subchannels, visible on the css bus. A device driver for
-	channel-attached devices, however, will never interact	with the
-	subchannel directly, but only via the I/O device on the ccw bus,
-	the ccw device.
-  </para>
-    <sect1 id="channelIO">
-     <title>I/O functions for channel-attached devices</title>
-    <para>
-      Some hardware structures have been translated into C structures for use
-      by the common I/O layer and device drivers. For more information on
-      the hardware structures represented here, please consult the Principles
-      of Operation.
-    </para>
-!Iarch/s390/include/asm/cio.h
-    </sect1>
-    <sect1 id="ccwdev">
-     <title>ccw devices</title>
-    <para>
-      Devices that want to initiate channel I/O need to attach to the ccw bus.
-      Interaction with the driver core is done via the common I/O layer, which
-      provides the abstractions of ccw devices and ccw device drivers.
-    </para>
-    <para>
-      The functions that initiate or terminate channel I/O all act upon a
-      ccw device structure. Device drivers must not bypass those functions
-      or strange side effects may happen.
-    </para>
-!Iarch/s390/include/asm/ccwdev.h
-!Edrivers/s390/cio/device.c
-!Edrivers/s390/cio/device_ops.c
-    </sect1>
-    <sect1 id="cmf">
-     <title>The channel-measurement facility</title>
-  <para>
-	The channel-measurement facility provides a means to collect
-	measurement data which is made available by the channel subsystem
-	for each channel attached device.
-  </para>
-!Iarch/s390/include/asm/cmb.h
-!Edrivers/s390/cio/cmf.c
-    </sect1>
-  </chapter>
-
-  <chapter id="ccwgroup">
-   <title>The ccwgroup bus</title>
-  <para>
-	The ccwgroup bus only contains artificial devices, created by the user.
-	Many networking devices (e.g. qeth) are in fact composed of several
-	ccw devices (like read, write and data channel for qeth). The
-	ccwgroup bus provides a mechanism to create a meta-device which
-	contains those ccw devices as slave devices and can be associated
-	with the netdevice.
-  </para>
-   <sect1 id="ccwgroupdevices">
-    <title>ccw group devices</title>
-!Iarch/s390/include/asm/ccwgroup.h
-!Edrivers/s390/cio/ccwgroup.c
-   </sect1>
-  </chapter>
-
-  <chapter id="genericinterfaces">
-   <title>Generic interfaces</title>
-  <para>
-	Some interfaces are available to other drivers that do not necessarily
-	have anything to do with the busses described above, but still are
-	indirectly using basic infrastructure in the common I/O layer.
-	One example is the support for adapter interrupts.
-  </para>
-!Edrivers/s390/cio/airq.c
-  </chapter>
-
-</book>
diff --git a/Documentation/DocBook/scsi.tmpl b/Documentation/DocBook/scsi.tmpl
deleted file mode 100644
index 4b9b9b2..0000000
--- a/Documentation/DocBook/scsi.tmpl
+++ /dev/null
@@ -1,409 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="scsimid">
-  <bookinfo>
-    <title>SCSI Interfaces Guide</title>
-
-    <authorgroup>
-      <author>
-        <firstname>James</firstname>
-        <surname>Bottomley</surname>
-        <affiliation>
-          <address>
-            <email>James.Bottomley@hansenpartnership.com</email>
-          </address>
-        </affiliation>
-      </author>
-
-      <author>
-        <firstname>Rob</firstname>
-        <surname>Landley</surname>
-        <affiliation>
-          <address>
-            <email>rob@landley.net</email>
-          </address>
-        </affiliation>
-      </author>
-
-    </authorgroup>
-
-    <copyright>
-      <year>2007</year>
-      <holder>Linux Foundation</holder>
-    </copyright>
-
-    <legalnotice>
-      <para>
-        This documentation is free software; you can redistribute
-        it and/or modify it under the terms of the GNU General Public
-        License version 2.
-      </para>
-
-      <para>
-        This program is distributed in the hope that it will be
-        useful, but WITHOUT ANY WARRANTY; without even the implied
-        warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-        For more details see the file COPYING in the source
-        distribution of Linux.
-      </para>
-    </legalnotice>
-  </bookinfo>
-
-  <toc></toc>
-
-  <chapter id="intro">
-    <title>Introduction</title>
-    <sect1 id="protocol_vs_bus">
-      <title>Protocol vs bus</title>
-      <para>
-        Once upon a time, the Small Computer Systems Interface defined both
-        a parallel I/O bus and a data protocol to connect a wide variety of
-        peripherals (disk drives, tape drives, modems, printers, scanners,
-        optical drives, test equipment, and medical devices) to a host
-        computer.
-      </para>
-      <para>
-        Although the old parallel (fast/wide/ultra) SCSI bus has largely
-        fallen out of use, the SCSI command set is more widely used than ever
-        to communicate with devices over a number of different busses.
-      </para>
-      <para>
-        The <ulink url='http://www.t10.org/scsi-3.htm'>SCSI protocol</ulink>
-        is a big-endian peer-to-peer packet based protocol.  SCSI commands
-        are 6, 10, 12, or 16 bytes long, often followed by an associated data
-        payload.
-      </para>
-      <para>
-        SCSI commands can be transported over just about any kind of bus, and
-        are the default protocol for storage devices attached to USB, SATA,
-        SAS, Fibre Channel, FireWire, and ATAPI devices.  SCSI packets are
-        also commonly exchanged over Infiniband,
-        <ulink url='http://i2o.shadowconnect.com/faq.php'>I20</ulink>, TCP/IP
-        (<ulink url='https://en.wikipedia.org/wiki/ISCSI'>iSCSI</ulink>), even
-        <ulink url='http://cyberelk.net/tim/parport/parscsi.html'>Parallel
-        ports</ulink>.
-      </para>
-    </sect1>
-    <sect1 id="subsystem_design">
-      <title>Design of the Linux SCSI subsystem</title>
-      <para>
-        The SCSI subsystem uses a three layer design, with upper, mid, and low
-        layers.  Every operation involving the SCSI subsystem (such as reading
-        a sector from a disk) uses one driver at each of the 3 levels: one
-        upper layer driver, one lower layer driver, and the SCSI midlayer.
-      </para>
-      <para>
-        The SCSI upper layer provides the interface between userspace and the
-        kernel, in the form of block and char device nodes for I/O and
-        ioctl().  The SCSI lower layer contains drivers for specific hardware
-        devices.
-      </para>
-      <para>
-        In between is the SCSI mid-layer, analogous to a network routing
-        layer such as the IPv4 stack.  The SCSI mid-layer routes a packet
-        based data protocol between the upper layer's /dev nodes and the
-        corresponding devices in the lower layer.  It manages command queues,
-        provides error handling and power management functions, and responds
-        to ioctl() requests.
-      </para>
-    </sect1>
-  </chapter>
-
-  <chapter id="upper_layer">
-    <title>SCSI upper layer</title>
-    <para>
-      The upper layer supports the user-kernel interface by providing
-      device nodes.
-    </para>
-    <sect1 id="sd">
-      <title>sd (SCSI Disk)</title>
-      <para>sd (sd_mod.o)</para>
-<!-- !Idrivers/scsi/sd.c -->
-    </sect1>
-    <sect1 id="sr">
-      <title>sr (SCSI CD-ROM)</title>
-      <para>sr (sr_mod.o)</para>
-    </sect1>
-    <sect1 id="st">
-      <title>st (SCSI Tape)</title>
-      <para>st (st.o)</para>
-    </sect1>
-    <sect1 id="sg">
-      <title>sg (SCSI Generic)</title>
-      <para>sg (sg.o)</para>
-    </sect1>
-    <sect1 id="ch">
-      <title>ch (SCSI Media Changer)</title>
-      <para>ch (ch.c)</para>
-    </sect1>
-  </chapter>
-
-  <chapter id="mid_layer">
-    <title>SCSI mid layer</title>
-
-    <sect1 id="midlayer_implementation">
-      <title>SCSI midlayer implementation</title>
-      <sect2 id="scsi_device.h">
-        <title>include/scsi/scsi_device.h</title>
-        <para>
-        </para>
-!Iinclude/scsi/scsi_device.h
-      </sect2>
-
-      <sect2 id="scsi.c">
-        <title>drivers/scsi/scsi.c</title>
-        <para>Main file for the SCSI midlayer.</para>
-!Edrivers/scsi/scsi.c
-      </sect2>
-      <sect2 id="scsicam.c">
-        <title>drivers/scsi/scsicam.c</title>
-        <para>
-          <ulink url='http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf'>SCSI
-          Common Access Method</ulink> support functions, for use with
-          HDIO_GETGEO, etc.
-        </para>
-!Edrivers/scsi/scsicam.c
-      </sect2>
-      <sect2 id="scsi_error.c">
-        <title>drivers/scsi/scsi_error.c</title>
-        <para>Common SCSI error/timeout handling routines.</para>
-!Edrivers/scsi/scsi_error.c
-      </sect2>
-      <sect2 id="scsi_devinfo.c">
-        <title>drivers/scsi/scsi_devinfo.c</title>
-        <para>
-          Manage scsi_dev_info_list, which tracks blacklisted and whitelisted
-          devices.
-        </para>
-!Idrivers/scsi/scsi_devinfo.c
-      </sect2>
-      <sect2 id="scsi_ioctl.c">
-        <title>drivers/scsi/scsi_ioctl.c</title>
-        <para>
-          Handle ioctl() calls for SCSI devices.
-        </para>
-!Edrivers/scsi/scsi_ioctl.c
-      </sect2>
-      <sect2 id="scsi_lib.c">
-        <title>drivers/scsi/scsi_lib.c</title>
-        <para>
-          SCSI queuing library.
-        </para>
-!Edrivers/scsi/scsi_lib.c
-      </sect2>
-      <sect2 id="scsi_lib_dma.c">
-        <title>drivers/scsi/scsi_lib_dma.c</title>
-        <para>
-          SCSI library functions depending on DMA
-          (map and unmap scatter-gather lists).
-        </para>
-!Edrivers/scsi/scsi_lib_dma.c
-      </sect2>
-      <sect2 id="scsi_module.c">
-        <title>drivers/scsi/scsi_module.c</title>
-        <para>
-          The file drivers/scsi/scsi_module.c contains legacy support for
-          old-style host templates.  It should never be used by any new driver.
-        </para>
-      </sect2>
-      <sect2 id="scsi_proc.c">
-        <title>drivers/scsi/scsi_proc.c</title>
-        <para>
-          The functions in this file provide an interface between
-          the PROC file system and the SCSI device drivers
-          It is mainly used for debugging, statistics and to pass
-          information directly to the lowlevel driver.
-
-          I.E. plumbing to manage /proc/scsi/*
-        </para>
-!Idrivers/scsi/scsi_proc.c
-      </sect2>
-      <sect2 id="scsi_netlink.c">
-        <title>drivers/scsi/scsi_netlink.c</title>
-        <para>
-          Infrastructure to provide async events from transports to userspace
-          via netlink, using a single NETLINK_SCSITRANSPORT protocol for all
-          transports.
-
-          See <ulink url='http://marc.info/?l=linux-scsi&amp;m=115507374832500&amp;w=2'>the
-          original patch submission</ulink> for more details.
-        </para>
-!Idrivers/scsi/scsi_netlink.c
-      </sect2>
-      <sect2 id="scsi_scan.c">
-        <title>drivers/scsi/scsi_scan.c</title>
-        <para>
-          Scan a host to determine which (if any) devices are attached.
-
-          The general scanning/probing algorithm is as follows, exceptions are
-          made to it depending on device specific flags, compilation options,
-          and global variable (boot or module load time) settings.
-
-          A specific LUN is scanned via an INQUIRY command; if the LUN has a
-          device attached, a scsi_device is allocated and setup for it.
-
-          For every id of every channel on the given host, start by scanning
-          LUN 0.  Skip hosts that don't respond at all to a scan of LUN 0.
-          Otherwise, if LUN 0 has a device attached, allocate and setup a
-          scsi_device for it.  If target is SCSI-3 or up, issue a REPORT LUN,
-          and scan all of the LUNs returned by the REPORT LUN; else,
-          sequentially scan LUNs up until some maximum is reached, or a LUN is
-          seen that cannot have a device attached to it.
-        </para>
-!Idrivers/scsi/scsi_scan.c
-      </sect2>
-      <sect2 id="scsi_sysctl.c">
-        <title>drivers/scsi/scsi_sysctl.c</title>
-        <para>
-          Set up the sysctl entry: "/dev/scsi/logging_level"
-          (DEV_SCSI_LOGGING_LEVEL) which sets/returns scsi_logging_level.
-        </para>
-      </sect2>
-      <sect2 id="scsi_sysfs.c">
-        <title>drivers/scsi/scsi_sysfs.c</title>
-        <para>
-          SCSI sysfs interface routines.
-        </para>
-!Edrivers/scsi/scsi_sysfs.c
-      </sect2>
-      <sect2 id="hosts.c">
-        <title>drivers/scsi/hosts.c</title>
-        <para>
-          mid to lowlevel SCSI driver interface
-        </para>
-!Edrivers/scsi/hosts.c
-      </sect2>
-      <sect2 id="constants.c">
-        <title>drivers/scsi/constants.c</title>
-        <para>
-          mid to lowlevel SCSI driver interface
-        </para>
-!Edrivers/scsi/constants.c
-      </sect2>
-    </sect1>
-
-    <sect1 id="Transport_classes">
-      <title>Transport classes</title>
-      <para>
-        Transport classes are service libraries for drivers in the SCSI
-        lower layer, which expose transport attributes in sysfs.
-      </para>
-      <sect2 id="Fibre_Channel_transport">
-        <title>Fibre Channel transport</title>
-        <para>
-          The file drivers/scsi/scsi_transport_fc.c defines transport attributes
-          for Fibre Channel.
-        </para>
-!Edrivers/scsi/scsi_transport_fc.c
-      </sect2>
-      <sect2 id="iSCSI_transport">
-        <title>iSCSI transport class</title>
-        <para>
-          The file drivers/scsi/scsi_transport_iscsi.c defines transport
-          attributes for the iSCSI class, which sends SCSI packets over TCP/IP
-          connections.
-        </para>
-!Edrivers/scsi/scsi_transport_iscsi.c
-      </sect2>
-      <sect2 id="SAS_transport">
-        <title>Serial Attached SCSI (SAS) transport class</title>
-        <para>
-          The file drivers/scsi/scsi_transport_sas.c defines transport
-          attributes for Serial Attached SCSI, a variant of SATA aimed at
-          large high-end systems.
-        </para>
-        <para>
-          The SAS transport class contains common code to deal with SAS HBAs,
-          an aproximated representation of SAS topologies in the driver model,
-          and various sysfs attributes to expose these topologies and management
-          interfaces to userspace.
-        </para>
-        <para>
-          In addition to the basic SCSI core objects this transport class
-          introduces two additional intermediate objects:  The SAS PHY
-          as represented by struct sas_phy defines an "outgoing" PHY on
-          a SAS HBA or Expander, and the SAS remote PHY represented by
-          struct sas_rphy defines an "incoming" PHY on a SAS Expander or
-          end device.  Note that this is purely a software concept, the
-          underlying hardware for a PHY and a remote PHY is the exactly
-          the same.
-        </para>
-        <para>
-          There is no concept of a SAS port in this code, users can see
-          what PHYs form a wide port based on the port_identifier attribute,
-          which is the same for all PHYs in a port.
-        </para>
-!Edrivers/scsi/scsi_transport_sas.c
-      </sect2>
-      <sect2 id="SATA_transport">
-        <title>SATA transport class</title>
-        <para>
-          The SATA transport is handled by libata, which has its own book of
-          documentation in this directory.
-        </para>
-      </sect2>
-      <sect2 id="SPI_transport">
-        <title>Parallel SCSI (SPI) transport class</title>
-        <para>
-          The file drivers/scsi/scsi_transport_spi.c defines transport
-          attributes for traditional (fast/wide/ultra) SCSI busses.
-        </para>
-!Edrivers/scsi/scsi_transport_spi.c
-      </sect2>
-      <sect2 id="SRP_transport">
-        <title>SCSI RDMA (SRP) transport class</title>
-        <para>
-          The file drivers/scsi/scsi_transport_srp.c defines transport
-          attributes for SCSI over Remote Direct Memory Access.
-        </para>
-!Edrivers/scsi/scsi_transport_srp.c
-      </sect2>
-    </sect1>
-
-  </chapter>
-
-  <chapter id="lower_layer">
-    <title>SCSI lower layer</title>
-    <sect1 id="hba_drivers">
-      <title>Host Bus Adapter transport types</title>
-      <para>
-        Many modern device controllers use the SCSI command set as a protocol to
-        communicate with their devices through many different types of physical
-        connections.
-      </para>
-      <para>
-        In SCSI language a bus capable of carrying SCSI commands is
-        called a "transport", and a controller connecting to such a bus is
-        called a "host bus adapter" (HBA).
-      </para>
-      <sect2 id="scsi_debug.c">
-        <title>Debug transport</title>
-        <para>
-          The file drivers/scsi/scsi_debug.c simulates a host adapter with a
-          variable number of disks (or disk like devices) attached, sharing a
-          common amount of RAM.  Does a lot of checking to make sure that we are
-          not getting blocks mixed up, and panics the kernel if anything out of
-          the ordinary is seen.
-        </para>
-        <para>
-          To be more realistic, the simulated devices have the transport
-          attributes of SAS disks.
-        </para>
-        <para>
-          For documentation see
-          <ulink url='http://sg.danny.cz/sg/sdebug26.html'>http://sg.danny.cz/sg/sdebug26.html</ulink>
-        </para>
-<!-- !Edrivers/scsi/scsi_debug.c -->
-      </sect2>
-      <sect2 id="todo">
-        <title>todo</title>
-        <para>Parallel (fast/wide/ultra) SCSI, USB, SATA,
-        SAS, Fibre Channel, FireWire, ATAPI devices, Infiniband,
-        I20, iSCSI, Parallel ports, netlink...
-        </para>
-      </sect2>
-    </sect1>
-  </chapter>
-</book>
diff --git a/Documentation/DocBook/sh.tmpl b/Documentation/DocBook/sh.tmpl
deleted file mode 100644
index 4a38f60..0000000
--- a/Documentation/DocBook/sh.tmpl
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="sh-drivers">
- <bookinfo>
-  <title>SuperH Interfaces Guide</title>
-  
-  <authorgroup>
-   <author>
-    <firstname>Paul</firstname>
-    <surname>Mundt</surname>
-    <affiliation>
-     <address>
-      <email>lethal@linux-sh.org</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2008-2010</year>
-   <holder>Paul Mundt</holder>
-  </copyright>
-  <copyright>
-   <year>2008-2010</year>
-   <holder>Renesas Technology Corp.</holder>
-  </copyright>
-  <copyright>
-   <year>2010</year>
-   <holder>Renesas Electronics Corp.</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License version 2 as published by the Free Software Foundation.
-   </para>
-      
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-      
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-      
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-  <chapter id="mm">
-    <title>Memory Management</title>
-    <sect1 id="sh4">
-    <title>SH-4</title>
-      <sect2 id="sq">
-        <title>Store Queue API</title>
-!Earch/sh/kernel/cpu/sh4/sq.c
-      </sect2>
-    </sect1>
-    <sect1 id="sh5">
-      <title>SH-5</title>
-      <sect2 id="tlb">
-	<title>TLB Interfaces</title>
-!Iarch/sh/mm/tlb-sh5.c
-!Iarch/sh/include/asm/tlb_64.h
-      </sect2>
-    </sect1>
-  </chapter>
-  <chapter id="mach">
-    <title>Machine Specific Interfaces</title>
-    <sect1 id="dreamcast">
-      <title>mach-dreamcast</title>
-!Iarch/sh/boards/mach-dreamcast/rtc.c
-    </sect1>
-    <sect1 id="x3proto">
-      <title>mach-x3proto</title>
-!Earch/sh/boards/mach-x3proto/ilsel.c
-    </sect1>
-  </chapter>
-  <chapter id="busses">
-    <title>Busses</title>
-    <sect1 id="superhyway">
-      <title>SuperHyway</title>
-!Edrivers/sh/superhyway/superhyway.c
-    </sect1>
-
-    <sect1 id="maple">
-      <title>Maple</title>
-!Edrivers/sh/maple/maple.c
-    </sect1>
-  </chapter>
-</book>
diff --git a/Documentation/DocBook/stylesheet.xsl b/Documentation/DocBook/stylesheet.xsl
deleted file mode 100644
index 3bf4ecf..0000000
--- a/Documentation/DocBook/stylesheet.xsl
+++ /dev/null
@@ -1,11 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<stylesheet xmlns="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<param name="chunk.quietly">1</param>
-<param name="funcsynopsis.style">ansi</param>
-<param name="funcsynopsis.tabular.threshold">80</param>
-<param name="callout.graphics">0</param>
-<!-- <param name="paper.type">A4</param> -->
-<param name="generate.consistent.ids">1</param>
-<param name="generate.section.toc.level">2</param>
-<param name="use.id.as.filename">1</param>
-</stylesheet>
diff --git a/Documentation/DocBook/w1.tmpl b/Documentation/DocBook/w1.tmpl
deleted file mode 100644
index b0228d4..0000000
--- a/Documentation/DocBook/w1.tmpl
+++ /dev/null
@@ -1,101 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="w1id">
-  <bookinfo>
-    <title>W1: Dallas' 1-wire bus</title>
-
-    <authorgroup>
-      <author>
-        <firstname>David</firstname>
-        <surname>Fries</surname>
-        <affiliation>
-          <address>
-            <email>David@Fries.net</email>
-          </address>
-        </affiliation>
-      </author>
-
-    </authorgroup>
-
-    <copyright>
-      <year>2013</year>
-      <!--
-      <holder></holder>
-      -->
-    </copyright>
-
-    <legalnotice>
-      <para>
-        This documentation is free software; you can redistribute
-        it and/or modify it under the terms of the GNU General Public
-        License version 2.
-      </para>
-
-      <para>
-        This program is distributed in the hope that it will be
-        useful, but WITHOUT ANY WARRANTY; without even the implied
-        warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-        For more details see the file COPYING in the source
-        distribution of Linux.
-      </para>
-    </legalnotice>
-  </bookinfo>
-
-  <toc></toc>
-
-  <chapter id="w1_internal">
-    <title>W1 API internal to the kernel</title>
-
-    <sect1 id="w1_internal_api">
-      <title>W1 API internal to the kernel</title>
-      <sect2 id="w1.h">
-        <title>drivers/w1/w1.h</title>
-        <para>W1 core functions.</para>
-!Idrivers/w1/w1.h
-      </sect2>
-
-      <sect2 id="w1.c">
-        <title>drivers/w1/w1.c</title>
-        <para>W1 core functions.</para>
-!Idrivers/w1/w1.c
-      </sect2>
-
-      <sect2 id="w1_family.h">
-        <title>drivers/w1/w1_family.h</title>
-        <para>Allows registering device family operations.</para>
-!Idrivers/w1/w1_family.h
-      </sect2>
-
-      <sect2 id="w1_family.c">
-        <title>drivers/w1/w1_family.c</title>
-        <para>Allows registering device family operations.</para>
-!Edrivers/w1/w1_family.c
-      </sect2>
-
-      <sect2 id="w1_int.c">
-        <title>drivers/w1/w1_int.c</title>
-        <para>W1 internal initialization for master devices.</para>
-!Edrivers/w1/w1_int.c
-      </sect2>
-
-      <sect2 id="w1_netlink.h">
-        <title>drivers/w1/w1_netlink.h</title>
-        <para>W1 external netlink API structures and commands.</para>
-!Idrivers/w1/w1_netlink.h
-      </sect2>
-
-      <sect2 id="w1_io.c">
-        <title>drivers/w1/w1_io.c</title>
-        <para>W1 input/output.</para>
-!Edrivers/w1/w1_io.c
-!Idrivers/w1/w1_io.c
-      </sect2>
-
-    </sect1>
-
-
-  </chapter>
-
-</book>
diff --git a/Documentation/DocBook/z8530book.tmpl b/Documentation/DocBook/z8530book.tmpl
deleted file mode 100644
index 6f3883b..0000000
--- a/Documentation/DocBook/z8530book.tmpl
+++ /dev/null
@@ -1,371 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="Z85230Guide">
- <bookinfo>
-  <title>Z8530 Programming Guide</title>
-  
-  <authorgroup>
-   <author>
-    <firstname>Alan</firstname>
-    <surname>Cox</surname>
-    <affiliation>
-     <address>
-      <email>alan@lxorguk.ukuu.org.uk</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2000</year>
-   <holder>Alan Cox</holder>
-  </copyright>
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License as published by the Free Software Foundation; either
-     version 2 of the License, or (at your option) any later
-     version.
-   </para>
-      
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-      
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-      
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
-<toc></toc>
-
-  <chapter id="intro">
-      <title>Introduction</title>
-  <para>
-	The Z85x30 family synchronous/asynchronous controller chips are
-	used on a large number of cheap network interface cards. The
-	kernel provides a core interface layer that is designed to make
-	it easy to provide WAN services using this chip.
-  </para>
-  <para>
-	The current driver only support synchronous operation. Merging the
-	asynchronous driver support into this code to allow any Z85x30
-	device to be used as both a tty interface and as a synchronous 
-	controller is a project for Linux post the 2.4 release
-  </para>
-  </chapter>
-  
-  <chapter id="Driver_Modes">
- 	<title>Driver Modes</title>
-  <para>
-	The Z85230 driver layer can drive Z8530, Z85C30 and Z85230 devices
-	in three different modes. Each mode can be applied to an individual
-	channel on the chip (each chip has two channels).
-  </para>
-  <para>
-	The PIO synchronous mode supports the most common Z8530 wiring. Here
-	the chip is interface to the I/O and interrupt facilities of the
-	host machine but not to the DMA subsystem. When running PIO the
-	Z8530 has extremely tight timing requirements. Doing high speeds,
-	even with a Z85230 will be tricky. Typically you should expect to
-	achieve at best 9600 baud with a Z8C530 and 64Kbits with a Z85230.
-  </para>
-  <para>
-	The DMA mode supports the chip when it is configured to use dual DMA
-	channels on an ISA bus. The better cards tend to support this mode
-	of operation for a single channel. With DMA running the Z85230 tops
-	out when it starts to hit ISA DMA constraints at about 512Kbits. It
-	is worth noting here that many PC machines hang or crash when the
-	chip is driven fast enough to hold the ISA bus solid.
-  </para>
-  <para>
-	Transmit DMA mode uses a single DMA channel. The DMA channel is used
-	for transmission as the transmit FIFO is smaller than the receive
-	FIFO. it gives better performance than pure PIO mode but is nowhere
-	near as ideal as pure DMA mode. 
-  </para>
-  </chapter>
-
-  <chapter id="Using_the_Z85230_driver">
- 	<title>Using the Z85230 driver</title>
-  <para>
-	The Z85230 driver provides the back end interface to your board. To
-	configure a Z8530 interface you need to detect the board and to 
-	identify its ports and interrupt resources. It is also your problem
-	to verify the resources are available.
-  </para>
-  <para>
-	Having identified the chip you need to fill in a struct z8530_dev,
-	which describes each chip. This object must exist until you finally
-	shutdown the board. Firstly zero the active field. This ensures 
-	nothing goes off without you intending it. The irq field should
-	be set to the interrupt number of the chip. (Each chip has a single
-	interrupt source rather than each channel). You are responsible
-	for allocating the interrupt line. The interrupt handler should be
-	set to <function>z8530_interrupt</function>. The device id should
-	be set to the z8530_dev structure pointer. Whether the interrupt can
-	be shared or not is board dependent, and up to you to initialise.
-  </para>
-  <para>
-	The structure holds two channel structures. 
-	Initialise chanA.ctrlio and chanA.dataio with the address of the
-	control and data ports. You can or this with Z8530_PORT_SLEEP to
-	indicate your interface needs the 5uS delay for chip settling done
-	in software. The PORT_SLEEP option is architecture specific. Other
-	flags may become available on future platforms, eg for MMIO.
-	Initialise the chanA.irqs to &amp;z8530_nop to start the chip up
-	as disabled and discarding interrupt events. This ensures that
-	stray interrupts will be mopped up and not hang the bus. Set
-	chanA.dev to point to the device structure itself. The
-	private and name field you may use as you wish. The private field
-	is unused by the Z85230 layer. The name is used for error reporting
-	and it may thus make sense to make it match the network name.
-  </para>
-  <para>
-	Repeat the same operation with the B channel if your chip has
-	both channels wired to something useful. This isn't always the
-	case. If it is not wired then the I/O values do not matter, but
-	you must initialise chanB.dev.
-  </para>
-  <para>
-	If your board has DMA facilities then initialise the txdma and
-	rxdma fields for the relevant channels. You must also allocate the
-	ISA DMA channels and do any necessary board level initialisation
-	to configure them. The low level driver will do the Z8530 and
-	DMA controller programming but not board specific magic.
-  </para>
-  <para>
-	Having initialised the device you can then call
-	<function>z8530_init</function>. This will probe the chip and 
-	reset it into a known state. An identification sequence is then
-	run to identify the chip type. If the checks fail to pass the
-	function returns a non zero error code. Typically this indicates
-	that the port given is not valid. After this call the
-	type field of the z8530_dev structure is initialised to either
-	Z8530, Z85C30 or Z85230 according to the chip found.
-  </para>
-  <para>
-	Once you have called z8530_init you can also make use of the utility
-	function <function>z8530_describe</function>. This provides a 
-	consistent reporting format for the Z8530 devices, and allows all
-	the drivers to provide consistent reporting.
-  </para>
-  </chapter>
-
-  <chapter id="Attaching_Network_Interfaces">
- 	<title>Attaching Network Interfaces</title>
-  <para>
-	If you wish to use the network interface facilities of the driver,
-	then you need to attach a network device to each channel that is
-	present and in use. In addition to use the generic HDLC
-	you need to follow some additional plumbing rules. They may seem 
-	complex but a look at the example hostess_sv11 driver should
-	reassure you.
-  </para>
-  <para>
-	The network device used for each channel should be pointed to by
-	the netdevice field of each channel. The hdlc-&gt; priv field of the
-	network device points to your private data - you will need to be
-	able to find your private data from this.
-  </para>
-  <para>
-	The way most drivers approach this particular problem is to
-	create a structure holding the Z8530 device definition and
-	put that into the private field of the network device. The
-	network device fields of the channels then point back to the
-	network devices.
-  </para>
-  <para>
-	If you wish to use the generic HDLC then you need to register
-	the HDLC device.
-  </para>
-  <para>
-	Before you register your network device you will also need to
-	provide suitable handlers for most of the network device callbacks. 
-	See the network device documentation for more details on this.
-  </para>
-  </chapter>
-
-  <chapter id="Configuring_And_Activating_The_Port">
- 	<title>Configuring And Activating The Port</title>
-  <para>
-	The Z85230 driver provides helper functions and tables to load the
-	port registers on the Z8530 chips. When programming the register
-	settings for a channel be aware that the documentation recommends
-	initialisation orders. Strange things happen when these are not
-	followed. 
-  </para>
-  <para>
-	<function>z8530_channel_load</function> takes an array of
-	pairs of initialisation values in an array of u8 type. The first
-	value is the Z8530 register number. Add 16 to indicate the alternate
-	register bank on the later chips. The array is terminated by a 255.
-  </para>
-  <para>
-	The driver provides a pair of public tables. The
-	z8530_hdlc_kilostream table is for the UK 'Kilostream' service and
-	also happens to cover most other end host configurations. The
-	z8530_hdlc_kilostream_85230 table is the same configuration using
-	the enhancements of the 85230 chip. The configuration loaded is
-	standard NRZ encoded synchronous data with HDLC bitstuffing. All
-	of the timing is taken from the other end of the link.
-  </para>
-  <para>
-	When writing your own tables be aware that the driver internally
-	tracks register values. It may need to reload values. You should
-	therefore be sure to set registers 1-7, 9-11, 14 and 15 in all
-	configurations. Where the register settings depend on DMA selection
-	the driver will update the bits itself when you open or close.
-	Loading a new table with the interface open is not recommended.
-  </para>
-  <para>
-	There are three standard configurations supported by the core
-	code. In PIO mode the interface is programmed up to use
-	interrupt driven PIO. This places high demands on the host processor
-	to avoid latency. The driver is written to take account of latency
-	issues but it cannot avoid latencies caused by other drivers,
-	notably IDE in PIO mode. Because the drivers allocate buffers you
-	must also prevent MTU changes while the port is open.
-  </para>
-  <para>
-	Once the port is open it will call the rx_function of each channel
-	whenever a completed packet arrived. This is invoked from
-	interrupt context and passes you the channel and a network	
-	buffer (struct sk_buff) holding the data. The data includes
-	the CRC bytes so most users will want to trim the last two
-	bytes before processing the data. This function is very timing
-	critical. When you wish to simply discard data the support
-	code provides the function <function>z8530_null_rx</function>
-	to discard the data.
-  </para>
-  <para>
-	To active PIO mode sending and receiving the <function>
-	z8530_sync_open</function> is called. This expects to be passed
-	the network device and the channel. Typically this is called from
-	your network device open callback. On a failure a non zero error
-	status is returned. The <function>z8530_sync_close</function> 
-	function shuts down a PIO channel. This must be done before the 
-	channel is opened again	and before the driver shuts down 
-	and unloads.
-  </para>
-  <para>
-	The ideal mode of operation is dual channel DMA mode. Here the
-	kernel driver will configure the board for DMA in both directions.
-	The driver also handles ISA DMA issues such as controller
-	programming and the memory range limit for you. This mode is
-	activated by calling the <function>z8530_sync_dma_open</function>
-	function. On failure a non zero error value is returned.
-	Once this mode is activated it can be shut down by calling the
-	<function>z8530_sync_dma_close</function>. You must call the close
-	function matching the open mode you used.
-  </para>
-  <para>
-	The final supported mode uses a single DMA channel to drive the
-	transmit side. As the Z85C30 has a larger FIFO on the receive
-	channel	this tends to increase the maximum speed a little. 
-	This is activated by calling the <function>z8530_sync_txdma_open
-	</function>. This returns a non zero error code on failure. The
-	<function>z8530_sync_txdma_close</function> function closes down
-	the Z8530 interface from this mode.
-  </para>
-  </chapter>
-
-  <chapter id="Network_Layer_Functions">
- 	<title>Network Layer Functions</title>
-  <para>
-	The Z8530 layer provides functions to queue packets for
-	transmission. The driver internally buffers the frame currently
-	being transmitted and one further frame (in order to keep back
-	to back transmission running). Any further buffering is up to
-	the caller.
-  </para>
-  <para>
-	The function <function>z8530_queue_xmit</function> takes a network
-	buffer in sk_buff format and queues it for transmission. The
-	caller must provide the entire packet with the exception of the
-	bitstuffing and CRC. This is normally done by the caller via
-	the generic HDLC interface layer. It returns 0 if the buffer has been
-	queued and non zero values for queue full. If the function accepts
-	the buffer it becomes property of the Z8530 layer and the caller
-	should not free it.
-  </para>
-  <para>
-	The function <function>z8530_get_stats</function> returns a pointer
-	to an internally maintained per interface statistics block. This
-	provides most of the interface code needed to implement the network
-	layer get_stats callback.
-  </para>
-  </chapter>
-
-  <chapter id="Porting_The_Z8530_Driver">
-     <title>Porting The Z8530 Driver</title>
-  <para>
-	The Z8530 driver is written to be portable. In DMA mode it makes
-	assumptions about the use of ISA DMA. These are probably warranted
-	in most cases as the Z85230 in particular was designed to glue to PC
-	type machines. The PIO mode makes no real assumptions.
-  </para>
-  <para>
-	Should you need to retarget the Z8530 driver to another architecture
-	the only code that should need changing are the port I/O functions.
-	At the moment these assume PC I/O port accesses. This may not be
-	appropriate for all platforms. Replacing 
-	<function>z8530_read_port</function> and <function>z8530_write_port
-	</function> is intended to be all that is required to port this
-	driver layer.
-  </para>
-  </chapter>
-
-  <chapter id="bugs">
-     <title>Known Bugs And Assumptions</title>
-  <para>
-  <variablelist>
-    <varlistentry><term>Interrupt Locking</term>
-    <listitem>
-    <para>
-	The locking in the driver is done via the global cli/sti lock. This
-	makes for relatively poor SMP performance. Switching this to use a
-	per device spin lock would probably materially improve performance.
-    </para>
-    </listitem></varlistentry>
-
-    <varlistentry><term>Occasional Failures</term>
-    <listitem>
-    <para>
-	We have reports of occasional failures when run for very long
-	periods of time and the driver starts to receive junk frames. At
-	the moment the cause of this is not clear.
-    </para>
-    </listitem></varlistentry>
-  </variablelist>
-	
-  </para>
-  </chapter>
-
-  <chapter id="pubfunctions">
-     <title>Public Functions Provided</title>
-!Edrivers/net/wan/z85230.c
-  </chapter>
-
-  <chapter id="intfunctions">
-     <title>Internal Functions</title>
-!Idrivers/net/wan/z85230.c
-  </chapter>
-
-</book>
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
index 82001a2..1f246eb 100644
--- a/Documentation/IRQ-domain.txt
+++ b/Documentation/IRQ-domain.txt
@@ -231,5 +231,42 @@
 4) No need to implement irq_domain_ops.map and irq_domain_ops.unmap,
    they are unused with hierarchy irq_domain.
 
-Hierarchy irq_domain may also be used to support other architectures,
-such as ARM, ARM64 etc.
+Hierarchy irq_domain is in no way x86 specific, and is heavily used to
+support other architectures, such as ARM, ARM64 etc.
+
+=== Debugging ===
+
+If you switch on CONFIG_IRQ_DOMAIN_DEBUG (which depends on
+CONFIG_IRQ_DOMAIN and CONFIG_DEBUG_FS), you will find a new file in
+your debugfs mount point, called irq_domain_mapping. This file
+contains a live snapshot of all the IRQ domains in the system:
+
+ name              mapped  linear-max  direct-max  devtree-node
+ pl061                  8           8           0  /smb/gpio@e0080000
+ pl061                  8           8           0  /smb/gpio@e1050000
+ pMSI                   0           0           0  /interrupt-controller@e1101000/v2m@e0080000
+ MSI                   37           0           0  /interrupt-controller@e1101000/v2m@e0080000
+ GICv2m                37           0           0  /interrupt-controller@e1101000/v2m@e0080000
+ GICv2                448         448           0  /interrupt-controller@e1101000
+
+it also iterates over the interrupts to display their mapping in the
+domains, and makes the domain stacking visible:
+
+
+irq    hwirq    chip name        chip data           active  type            domain
+    1  0x00019  GICv2            0xffff00000916bfd8     *    LINEAR          GICv2
+    2  0x0001d  GICv2            0xffff00000916bfd8          LINEAR          GICv2
+    3  0x0001e  GICv2            0xffff00000916bfd8     *    LINEAR          GICv2
+    4  0x0001b  GICv2            0xffff00000916bfd8     *    LINEAR          GICv2
+    5  0x0001a  GICv2            0xffff00000916bfd8          LINEAR          GICv2
+[...]
+   96  0x81808  MSI              0x          (null)           RADIX          MSI
+   96+ 0x00063  GICv2m           0xffff8003ee116980           RADIX          GICv2m
+   96+ 0x00063  GICv2            0xffff00000916bfd8          LINEAR          GICv2
+   97  0x08800  MSI              0x          (null)     *     RADIX          MSI
+   97+ 0x00064  GICv2m           0xffff8003ee116980     *     RADIX          GICv2m
+   97+ 0x00064  GICv2            0xffff00000916bfd8     *    LINEAR          GICv2
+
+Here, interrupts 1-5 are only using a single domain, while 96 and 97
+are build out of a stack of three domain, each level performing a
+particular function.
diff --git a/Documentation/Makefile b/Documentation/Makefile
index c2a4691..a423203 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -1 +1,126 @@
+# -*- makefile -*-
+# Makefile for Sphinx documentation
+#
+
 subdir-y :=
+
+# You can set these variables from the command line.
+SPHINXBUILD   = sphinx-build
+SPHINXOPTS    =
+SPHINXDIRS    = .
+_SPHINXDIRS   = $(patsubst $(srctree)/Documentation/%/conf.py,%,$(wildcard $(srctree)/Documentation/*/conf.py))
+SPHINX_CONF   = conf.py
+PAPER         =
+BUILDDIR      = $(obj)/output
+PDFLATEX      = xelatex
+LATEXOPTS     = -interaction=batchmode
+
+# User-friendly check for sphinx-build
+HAVE_SPHINX := $(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi)
+
+ifeq ($(HAVE_SPHINX),0)
+
+.DEFAULT:
+	$(warning The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed and in PATH, or set the SPHINXBUILD make variable to point to the full path of the '$(SPHINXBUILD)' executable.)
+	@echo "  SKIP    Sphinx $@ target."
+
+else # HAVE_SPHINX
+
+# User-friendly check for pdflatex
+HAVE_PDFLATEX := $(shell if which $(PDFLATEX) >/dev/null 2>&1; then echo 1; else echo 0; fi)
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+KERNELDOC       = $(srctree)/scripts/kernel-doc
+KERNELDOC_CONF  = -D kerneldoc_srctree=$(srctree) -D kerneldoc_bin=$(KERNELDOC)
+ALLSPHINXOPTS   =  $(KERNELDOC_CONF) $(PAPEROPT_$(PAPER)) $(SPHINXOPTS)
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+# commands; the 'cmd' from scripts/Kbuild.include is not *loopable*
+loop_cmd = $(echo-cmd) $(cmd_$(1)) || exit;
+
+# $2 sphinx builder e.g. "html"
+# $3 name of the build subfolder / e.g. "media", used as:
+#    * dest folder relative to $(BUILDDIR) and
+#    * cache folder relative to $(BUILDDIR)/.doctrees
+# $4 dest subfolder e.g. "man" for man pages at media/man
+# $5 reST source folder relative to $(srctree)/$(src),
+#    e.g. "media" for the linux-tv book-set at ./Documentation/media
+
+quiet_cmd_sphinx = SPHINX  $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
+      cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media $2 && \
+	PYTHONDONTWRITEBYTECODE=1 \
+	BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
+	$(SPHINXBUILD) \
+	-b $2 \
+	-c $(abspath $(srctree)/$(src)) \
+	-d $(abspath $(BUILDDIR)/.doctrees/$3) \
+	-D version=$(KERNELVERSION) -D release=$(KERNELRELEASE) \
+	$(ALLSPHINXOPTS) \
+	$(abspath $(srctree)/$(src)/$5) \
+	$(abspath $(BUILDDIR)/$3/$4)
+
+htmldocs:
+	@+$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,html,$(var),,$(var)))
+
+linkcheckdocs:
+	@$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,linkcheck,$(var),,$(var)))
+
+latexdocs:
+	@+$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,latex,$(var),latex,$(var)))
+
+ifeq ($(HAVE_PDFLATEX),0)
+
+pdfdocs:
+	$(warning The '$(PDFLATEX)' command was not found. Make sure you have it installed and in PATH to produce PDF output.)
+	@echo "  SKIP    Sphinx $@ target."
+
+else # HAVE_PDFLATEX
+
+pdfdocs: latexdocs
+	$(foreach var,$(SPHINXDIRS), $(MAKE) PDFLATEX=$(PDFLATEX) LATEXOPTS="$(LATEXOPTS)" -C $(BUILDDIR)/$(var)/latex || exit;)
+
+endif # HAVE_PDFLATEX
+
+epubdocs:
+	@+$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,epub,$(var),epub,$(var)))
+
+xmldocs:
+	@+$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,xml,$(var),xml,$(var)))
+
+endif # HAVE_SPHINX
+
+# The following targets are independent of HAVE_SPHINX, and the rules should
+# work or silently pass without Sphinx.
+
+# no-ops for the Sphinx toolchain
+sgmldocs:
+	@:
+psdocs:
+	@:
+mandocs:
+	@:
+installmandocs:
+	@:
+
+cleandocs:
+	$(Q)rm -rf $(BUILDDIR)
+	$(Q)$(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media clean
+
+dochelp:
+	@echo  ' Linux kernel internal documentation in different formats from ReST:'
+	@echo  '  htmldocs        - HTML'
+	@echo  '  latexdocs       - LaTeX'
+	@echo  '  pdfdocs         - PDF'
+	@echo  '  epubdocs        - EPUB'
+	@echo  '  xmldocs         - XML'
+	@echo  '  linkcheckdocs   - check for broken external links (will connect to external hosts)'
+	@echo  '  cleandocs       - clean all generated files'
+	@echo
+	@echo  '  make SPHINXDIRS="s1 s2" [target] Generate only docs of folder s1, s2'
+	@echo  '  valid values for SPHINXDIRS are: $(_SPHINXDIRS)'
+	@echo
+	@echo  '  make SPHINX_CONF={conf-file} [target] use *additional* sphinx-build'
+	@echo  '  configuration. This is e.g. useful to build with nit-picking config.'
diff --git a/Documentation/Makefile.sphinx b/Documentation/Makefile.sphinx
deleted file mode 100644
index bcf529f..0000000
--- a/Documentation/Makefile.sphinx
+++ /dev/null
@@ -1,130 +0,0 @@
-# -*- makefile -*-
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXBUILD   = sphinx-build
-SPHINXOPTS    =
-SPHINXDIRS    = .
-_SPHINXDIRS   = $(patsubst $(srctree)/Documentation/%/conf.py,%,$(wildcard $(srctree)/Documentation/*/conf.py))
-SPHINX_CONF   = conf.py
-PAPER         =
-BUILDDIR      = $(obj)/output
-PDFLATEX      = xelatex
-LATEXOPTS     = -interaction=batchmode
-
-# User-friendly check for sphinx-build
-HAVE_SPHINX := $(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi)
-
-ifeq ($(HAVE_SPHINX),0)
-
-.DEFAULT:
-	$(warning The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed and in PATH, or set the SPHINXBUILD make variable to point to the full path of the '$(SPHINXBUILD)' executable.)
-	@echo "  SKIP    Sphinx $@ target."
-
-else ifneq ($(DOCBOOKS),)
-
-# Skip Sphinx build if the user explicitly requested DOCBOOKS.
-.DEFAULT:
-	@echo "  SKIP    Sphinx $@ target (DOCBOOKS specified)."
-
-else # HAVE_SPHINX
-
-# User-friendly check for pdflatex
-HAVE_PDFLATEX := $(shell if which $(PDFLATEX) >/dev/null 2>&1; then echo 1; else echo 0; fi)
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-KERNELDOC       = $(srctree)/scripts/kernel-doc
-KERNELDOC_CONF  = -D kerneldoc_srctree=$(srctree) -D kerneldoc_bin=$(KERNELDOC)
-ALLSPHINXOPTS   =  $(KERNELDOC_CONF) $(PAPEROPT_$(PAPER)) $(SPHINXOPTS)
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-# commands; the 'cmd' from scripts/Kbuild.include is not *loopable*
-loop_cmd = $(echo-cmd) $(cmd_$(1)) || exit;
-
-# $2 sphinx builder e.g. "html"
-# $3 name of the build subfolder / e.g. "media", used as:
-#    * dest folder relative to $(BUILDDIR) and
-#    * cache folder relative to $(BUILDDIR)/.doctrees
-# $4 dest subfolder e.g. "man" for man pages at media/man
-# $5 reST source folder relative to $(srctree)/$(src),
-#    e.g. "media" for the linux-tv book-set at ./Documentation/media
-
-quiet_cmd_sphinx = SPHINX  $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
-      cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media $2 && \
-	PYTHONDONTWRITEBYTECODE=1 \
-	BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
-	$(SPHINXBUILD) \
-	-b $2 \
-	-c $(abspath $(srctree)/$(src)) \
-	-d $(abspath $(BUILDDIR)/.doctrees/$3) \
-	-D version=$(KERNELVERSION) -D release=$(KERNELRELEASE) \
-	$(ALLSPHINXOPTS) \
-	$(abspath $(srctree)/$(src)/$5) \
-	$(abspath $(BUILDDIR)/$3/$4)
-
-htmldocs:
-	@+$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,html,$(var),,$(var)))
-
-linkcheckdocs:
-	@$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,linkcheck,$(var),,$(var)))
-
-latexdocs:
-	@+$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,latex,$(var),latex,$(var)))
-
-ifeq ($(HAVE_PDFLATEX),0)
-
-pdfdocs:
-	$(warning The '$(PDFLATEX)' command was not found. Make sure you have it installed and in PATH to produce PDF output.)
-	@echo "  SKIP    Sphinx $@ target."
-
-else # HAVE_PDFLATEX
-
-pdfdocs: latexdocs
-	$(foreach var,$(SPHINXDIRS), $(MAKE) PDFLATEX=$(PDFLATEX) LATEXOPTS="$(LATEXOPTS)" -C $(BUILDDIR)/$(var)/latex || exit;)
-
-endif # HAVE_PDFLATEX
-
-epubdocs:
-	@+$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,epub,$(var),epub,$(var)))
-
-xmldocs:
-	@+$(foreach var,$(SPHINXDIRS),$(call loop_cmd,sphinx,xml,$(var),xml,$(var)))
-
-endif # HAVE_SPHINX
-
-# The following targets are independent of HAVE_SPHINX, and the rules should
-# work or silently pass without Sphinx.
-
-# no-ops for the Sphinx toolchain
-sgmldocs:
-	@:
-psdocs:
-	@:
-mandocs:
-	@:
-installmandocs:
-	@:
-
-cleandocs:
-	$(Q)rm -rf $(BUILDDIR)
-	$(Q)$(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media clean
-
-dochelp:
-	@echo  ' Linux kernel internal documentation in different formats (Sphinx):'
-	@echo  '  htmldocs        - HTML'
-	@echo  '  latexdocs       - LaTeX'
-	@echo  '  pdfdocs         - PDF'
-	@echo  '  epubdocs        - EPUB'
-	@echo  '  xmldocs         - XML'
-	@echo  '  linkcheckdocs   - check for broken external links (will connect to external hosts)'
-	@echo  '  cleandocs       - clean all generated files'
-	@echo
-	@echo  '  make SPHINXDIRS="s1 s2" [target] Generate only docs of folder s1, s2'
-	@echo  '  valid values for SPHINXDIRS are: $(_SPHINXDIRS)'
-	@echo
-	@echo  '  make SPHINX_CONF={conf-file} [target] use *additional* sphinx-build'
-	@echo  '  configuration. This is e.g. useful to build with nit-picking config.'
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index 1e37138..618e13d 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -186,7 +186,7 @@
 a different interrupt, the driver will deadlock trying to recursively
 acquire the spinlock.  Such deadlocks can be avoided by using
 spin_lock_irqsave() or spin_lock_irq() which disable local interrupts
-and acquire the lock (see Documentation/DocBook/kernel-locking).
+and acquire the lock (see Documentation/kernel-hacking/locking.rst).
 
 4.5 How to tell whether MSI/MSI-X is enabled on a device
 
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 1672573..f46980c 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -28,8 +28,6 @@
 	- RCU CPU stall warnings (module parameter rcu_cpu_stall_suppress)
 torture.txt
 	- RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST)
-trace.txt
-	- CONFIG_RCU_TRACE debugfs files and formats
 UP.txt
 	- RCU on Uniprocessor Systems
 whatisRCU.txt
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index f60adf1..95b30fa 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -559,9 +559,7 @@
 	For <tt>remove_gp_synchronous()</tt>, as long as all modifications
 	to <tt>gp</tt> are carried out while holding <tt>gp_lock</tt>,
 	the above optimizations are harmless.
-	However,
-	with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt>,
-	<tt>sparse</tt> will complain if you
+	However, <tt>sparse</tt> will complain if you
 	define <tt>gp</tt> with <tt>__rcu</tt> and then
 	access it without using
 	either <tt>rcu_access_pointer()</tt> or <tt>rcu_dereference()</tt>.
@@ -1849,7 +1847,8 @@
 If the nesting is not visible to the compiler, as is the case with
 mutually recursive functions each in its own translation unit,
 stack overflow will result.
-If the nesting takes the form of loops, either the control variable
+If the nesting takes the form of loops, perhaps in the guise of tail
+recursion, either the control variable
 will overflow or (in the Linux kernel) you will get an RCU CPU stall warning.
 Nevertheless, this class of RCU implementations is one
 of the most composable constructs in existence.
@@ -1977,9 +1976,8 @@
 	and <tt>rcu_dereference()</tt>, perhaps (incorrectly)
 	substituting a simple assignment.
 	To catch this sort of error, a given RCU-protected pointer may be
-	tagged with <tt>__rcu</tt>, after which running sparse
-	with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt> will complain
-	about simple-assignment accesses to that pointer.
+	tagged with <tt>__rcu</tt>, after which sparse
+	will complain about simple-assignment accesses to that pointer.
 	Arnd Bergmann made me aware of this requirement, and also
 	supplied the needed
 	<a href="https://lwn.net/Articles/376011/">patch series</a>.
@@ -2036,7 +2034,7 @@
 	some other synchronization mechanism, for example, reference
 	counting.
 <li>	In kernels built with <tt>CONFIG_RCU_TRACE=y</tt>, RCU-related
-	information is provided via both debugfs and event tracing.
+	information is provided via event tracing.
 <li>	Open-coded use of <tt>rcu_assign_pointer()</tt> and
 	<tt>rcu_dereference()</tt> to create typical linked
 	data structures can be surprisingly error-prone.
@@ -2519,11 +2517,7 @@
 <tt>nohz_full</tt> CPU running in userspace.
 RCU must therefore track <tt>nohz_full</tt> userspace
 execution.
-And in
-<a href="https://lwn.net/Articles/558284/"><tt>CONFIG_NO_HZ_FULL_SYSIDLE=y</tt></a>
-kernels, RCU must separately track idle CPUs on the one hand and
-CPUs that are either idle or executing in userspace on the other.
-In both cases, RCU must be able to sample state at two points in
+RCU must therefore be able to sample state at two points in
 time, and be able to determine whether or not some other CPU spent
 any time idle and/or executing in userspace.
 
@@ -2936,6 +2930,20 @@
 need not exclude CPU-hotplug operations.
 
 <p>
+SRCU also differs from other RCU flavors in that SRCU's expedited and
+non-expedited grace periods are implemented by the same mechanism.
+This means that in the current SRCU implementation, expediting a
+future grace period has the side effect of expediting all prior
+grace periods that have not yet completed.
+(But please note that this is a property of the current implementation,
+not necessarily of future implementations.)
+In addition, if SRCU has been idle for longer than the interval
+specified by the <tt>srcutree.exp_holdoff</tt> kernel boot parameter
+(25&nbsp;microseconds by default),
+and if a <tt>synchronize_srcu()</tt> invocation ends this idle period,
+that invocation will be automatically expedited.
+
+<p>
 As of v4.12, SRCU's callbacks are maintained per-CPU, eliminating
 a locking bottleneck present in prior kernel versions.
 Although this will allow users to put much heavier stress on
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 8779471..6beda55 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -413,11 +413,11 @@
 	read-side critical sections.  It is the responsibility of the
 	RCU update-side primitives to deal with this.
 
-17.	Use CONFIG_PROVE_RCU, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the
-	__rcu sparse checks (enabled by CONFIG_SPARSE_RCU_POINTER) to
-	validate your RCU code.  These can help find problems as follows:
+17.	Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the
+	__rcu sparse checks to validate your RCU code.	These can help
+	find problems as follows:
 
-	CONFIG_PROVE_RCU: check that accesses to RCU-protected data
+	CONFIG_PROVE_LOCKING: check that accesses to RCU-protected data
 		structures are carried out under the proper RCU
 		read-side critical section, while holding the right
 		combination of locks, or whatever other conditions
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
deleted file mode 100644
index 6549012..0000000
--- a/Documentation/RCU/trace.txt
+++ /dev/null
@@ -1,535 +0,0 @@
-CONFIG_RCU_TRACE debugfs Files and Formats
-
-
-The rcutree and rcutiny implementations of RCU provide debugfs trace
-output that summarizes counters and state.  This information is useful for
-debugging RCU itself, and can sometimes also help to debug abuses of RCU.
-The following sections describe the debugfs files and formats, first
-for rcutree and next for rcutiny.
-
-
-CONFIG_TREE_RCU and CONFIG_PREEMPT_RCU debugfs Files and Formats
-
-These implementations of RCU provide several debugfs directories under the
-top-level directory "rcu":
-
-rcu/rcu_bh
-rcu/rcu_preempt
-rcu/rcu_sched
-
-Each directory contains files for the corresponding flavor of RCU.
-Note that rcu/rcu_preempt is only present for CONFIG_PREEMPT_RCU.
-For CONFIG_TREE_RCU, the RCU flavor maps onto the RCU-sched flavor,
-so that activity for both appears in rcu/rcu_sched.
-
-In addition, the following file appears in the top-level directory:
-rcu/rcutorture.  This file displays rcutorture test progress.  The output
-of "cat rcu/rcutorture" looks as follows:
-
-rcutorture test sequence: 0 (test in progress)
-rcutorture update version number: 615
-
-The first line shows the number of rcutorture tests that have completed
-since boot.  If a test is currently running, the "(test in progress)"
-string will appear as shown above.  The second line shows the number of
-update cycles that the current test has started, or zero if there is
-no test in progress.
-
-
-Within each flavor directory (rcu/rcu_bh, rcu/rcu_sched, and possibly
-also rcu/rcu_preempt) the following files will be present:
-
-rcudata:
-	Displays fields in struct rcu_data.
-rcuexp:
-	Displays statistics for expedited grace periods.
-rcugp:
-	Displays grace-period counters.
-rcuhier:
-	Displays the struct rcu_node hierarchy.
-rcu_pending:
-	Displays counts of the reasons rcu_pending() decided that RCU had
-	work to do.
-rcuboost:
-	Displays RCU boosting statistics.  Only present if
-	CONFIG_RCU_BOOST=y.
-
-The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
-
-  0!c=30455 g=30456 cnq=1/0:1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
-  1!c=30719 g=30720 cnq=1/0:0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
-  2!c=30150 g=30151 cnq=1/1:1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
-  3 c=31249 g=31250 cnq=1/1:0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
-  4!c=29502 g=29503 cnq=1/0:1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
-  5 c=31201 g=31202 cnq=1/0:1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
-  6!c=30253 g=30254 cnq=1/0:1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
-  7 c=31178 g=31178 cnq=1/0:0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
-
-This file has one line per CPU, or eight for this 8-CPU system.
-The fields are as follows:
-
-o	The number at the beginning of each line is the CPU number.
-	CPUs numbers followed by an exclamation mark are offline,
-	but have been online at least once since boot.	There will be
-	no output for CPUs that have never been online, which can be
-	a good thing in the surprisingly common case where NR_CPUS is
-	substantially larger than the number of actual CPUs.
-
-o	"c" is the count of grace periods that this CPU believes have
-	completed.  Offlined CPUs and CPUs in dynticks idle mode may lag
-	quite a ways behind, for example, CPU 4 under "rcu_sched" above,
-	which has been offline through 16 RCU grace periods.  It is not
-	unusual to see offline CPUs lagging by thousands of grace periods.
-	Note that although the grace-period number is an unsigned long,
-	it is printed out as a signed long to allow more human-friendly
-	representation near boot time.
-
-o	"g" is the count of grace periods that this CPU believes have
-	started.  Again, offlined CPUs and CPUs in dynticks idle mode
-	may lag behind.  If the "c" and "g" values are equal, this CPU
-	has already reported a quiescent state for the last RCU grace
-	period that it is aware of, otherwise, the CPU believes that it
-	owes RCU a quiescent state.
-
-o	"pq" indicates that this CPU has passed through a quiescent state
-	for the current grace period.  It is possible for "pq" to be
-	"1" and "c" different than "g", which indicates that although
-	the CPU has passed through a quiescent state, either (1) this
-	CPU has not yet reported that fact, (2) some other CPU has not
-	yet reported for this grace period, or (3) both.
-
-o	"qp" indicates that RCU still expects a quiescent state from
-	this CPU.  Offlined CPUs and CPUs in dyntick idle mode might
-	well have qp=1, which is OK: RCU is still ignoring them.
-
-o	"dt" is the current value of the dyntick counter that is incremented
-	when entering or leaving idle, either due to a context switch or
-	due to an interrupt.  This number is even if the CPU is in idle
-	from RCU's viewpoint and odd otherwise.  The number after the
-	first "/" is the interrupt nesting depth when in idle state,
-	or a large number added to the interrupt-nesting depth when
-	running a non-idle task.  Some architectures do not accurately
-	count interrupt nesting when running in non-idle kernel context,
-	which can result in interesting anomalies such as negative
-	interrupt-nesting levels.  The number after the second "/"
-	is the NMI nesting depth.
-
-o	"df" is the number of times that some other CPU has forced a
-	quiescent state on behalf of this CPU due to this CPU being in
-	idle state.
-
-o	"of" is the number of times that some other CPU has forced a
-	quiescent state on behalf of this CPU due to this CPU being
-	offline.  In a perfect world, this might never happen, but it
-	turns out that offlining and onlining a CPU can take several grace
-	periods, and so there is likely to be an extended period of time
-	when RCU believes that the CPU is online when it really is not.
-	Please note that erring in the other direction (RCU believing a
-	CPU is offline when it is really alive and kicking) is a fatal
-	error, so it makes sense to err conservatively.
-
-o	"ql" is the number of RCU callbacks currently residing on
-	this CPU.  The first number is the number of "lazy" callbacks
-	that are known to RCU to only be freeing memory, and the number
-	after the "/" is the total number of callbacks, lazy or not.
-	These counters count callbacks regardless of what phase of
-	grace-period processing that they are in (new, waiting for
-	grace period to start, waiting for grace period to end, ready
-	to invoke).
-
-o	"qs" gives an indication of the state of the callback queue
-	with four characters:
-
-	"N"	Indicates that there are callbacks queued that are not
-		ready to be handled by the next grace period, and thus
-		will be handled by the grace period following the next
-		one.
-
-	"R"	Indicates that there are callbacks queued that are
-		ready to be handled by the next grace period.
-
-	"W"	Indicates that there are callbacks queued that are
-		waiting on the current grace period.
-
-	"D"	Indicates that there are callbacks queued that have
-		already been handled by a prior grace period, and are
-		thus waiting to be invoked.  Note that callbacks in
-		the process of being invoked are not counted here.
-		Callbacks in the process of being invoked are those
-		that have been removed from the rcu_data structures
-		queues by rcu_do_batch(), but which have not yet been
-		invoked.
-
-	If there are no callbacks in a given one of the above states,
-	the corresponding character is replaced by ".".
-
-o	"b" is the batch limit for this CPU.  If more than this number
-	of RCU callbacks is ready to invoke, then the remainder will
-	be deferred.
-
-o	"ci" is the number of RCU callbacks that have been invoked for
-	this CPU.  Note that ci+nci+ql is the number of callbacks that have
-	been registered in absence of CPU-hotplug activity.
-
-o	"nci" is the number of RCU callbacks that have been offloaded from
-	this CPU.  This will always be zero unless the kernel was built
-	with CONFIG_RCU_NOCB_CPU=y and the "rcu_nocbs=" kernel boot
-	parameter was specified.
-
-o	"co" is the number of RCU callbacks that have been orphaned due to
-	this CPU going offline.  These orphaned callbacks have been moved
-	to an arbitrarily chosen online CPU.
-
-o	"ca" is the number of RCU callbacks that have been adopted by this
-	CPU due to other CPUs going offline.  Note that ci+co-ca+ql is
-	the number of RCU callbacks registered on this CPU.
-
-
-Kernels compiled with CONFIG_RCU_BOOST=y display the following from
-/debug/rcu/rcu_preempt/rcudata:
-
-  0!c=12865 g=12866 cnq=1/0:1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
-  1 c=14407 g=14408 cnq=1/0:0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
-  2 c=14407 g=14408 cnq=1/0:0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
-  3 c=14407 g=14408 cnq=1/0:0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
-  4 c=14405 g=14406 cnq=1/0:1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
-  5!c=14168 g=14169 cnq=1/0:0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
-  6 c=14404 g=14405 cnq=1/0:0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
-  7 c=14407 g=14408 cnq=1/0:1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
-
-This is similar to the output discussed above, but contains the following
-additional fields:
-
-o	"kt" is the per-CPU kernel-thread state.  The digit preceding
-	the first slash is zero if there is no work pending and 1
-	otherwise.  The character between the first pair of slashes is
-	as follows:
-
-	"S"	The kernel thread is stopped, in other words, all
-		CPUs corresponding to this rcu_node structure are
-		offline.
-
-	"R"	The kernel thread is running.
-
-	"W"	The kernel thread is waiting because there is no work
-		for it to do.
-
-	"O"	The kernel thread is waiting because it has been
-		forced off of its designated CPU or because its
-		->cpus_allowed mask permits it to run on other than
-		its designated CPU.
-
-	"Y"	The kernel thread is yielding to avoid hogging CPU.
-
-	"?"	Unknown value, indicates a bug.
-
-	The number after the final slash is the CPU that the kthread
-	is actually running on.
-
-	This field is displayed only for CONFIG_RCU_BOOST kernels.
-
-o	"ktl" is the low-order 16 bits (in hexadecimal) of the count of
-	the number of times that this CPU's per-CPU kthread has gone
-	through its loop servicing invoke_rcu_cpu_kthread() requests.
-
-	This field is displayed only for CONFIG_RCU_BOOST kernels.
-
-
-The output of "cat rcu/rcu_preempt/rcuexp" looks as follows:
-
-s=21872 wd1=0 wd2=0 wd3=5 enq=0 sc=21872
-
-These fields are as follows:
-
-o	"s" is the sequence number, with an odd number indicating that
-	an expedited grace period is in progress.
-
-o	"wd1", "wd2", and "wd3" are the number of times that an attempt
-	to start an expedited grace period found that someone else had
-	completed an expedited grace period that satisfies the attempted
-	request.  "Our work is done."
-
-o	"enq" is the number of quiescent states still outstanding.
-
-o	"sc" is the number of times that the attempt to start a
-	new expedited grace period succeeded.
-
-
-The output of "cat rcu/rcu_preempt/rcugp" looks as follows:
-
-completed=31249  gpnum=31250  age=1  max=18
-
-These fields are taken from the rcu_state structure, and are as follows:
-
-o	"completed" is the number of grace periods that have completed.
-	It is comparable to the "c" field from rcu/rcudata in that a
-	CPU whose "c" field matches the value of "completed" is aware
-	that the corresponding RCU grace period has completed.
-
-o	"gpnum" is the number of grace periods that have started.  It is
-	similarly comparable to the "g" field from rcu/rcudata in that
-	a CPU whose "g" field matches the value of "gpnum" is aware that
-	the corresponding RCU grace period has started.
-
-	If these two fields are equal, then there is no grace period
-	in progress, in other words, RCU is idle.  On the other hand,
-	if the two fields differ (as they are above), then an RCU grace
-	period is in progress.
-
-o	"age" is the number of jiffies that the current grace period
-	has extended for, or zero if there is no grace period currently
-	in effect.
-
-o	"max" is the age in jiffies of the longest-duration grace period
-	thus far.
-
-The output of "cat rcu/rcu_preempt/rcuhier" looks as follows:
-
-c=14407 g=14408 s=0 jfq=2 j=c863 nfqs=12040/nfqsng=0(12040) fqlh=1051 oqlen=0/0
-3/3 ..>. 0:7 ^0
-e/e ..>. 0:3 ^0    d/d ..>. 4:7 ^1
-
-The fields are as follows:
-
-o	"c" is exactly the same as "completed" under rcu/rcu_preempt/rcugp.
-
-o	"g" is exactly the same as "gpnum" under rcu/rcu_preempt/rcugp.
-
-o	"s" is the current state of the force_quiescent_state()
-	state machine.
-
-o	"jfq" is the number of jiffies remaining for this grace period
-	before force_quiescent_state() is invoked to help push things
-	along.	Note that CPUs in idle mode throughout the grace period
-	will not report on their own, but rather must be check by some
-	other CPU via force_quiescent_state().
-
-o	"j" is the low-order four hex digits of the jiffies counter.
-	Yes, Paul did run into a number of problems that turned out to
-	be due to the jiffies counter no longer counting.  Why do you ask?
-
-o	"nfqs" is the number of calls to force_quiescent_state() since
-	boot.
-
-o	"nfqsng" is the number of useless calls to force_quiescent_state(),
-	where there wasn't actually a grace period active.  This can
-	no longer happen due to grace-period processing being pushed
-	into a kthread.  The number in parentheses is the difference
-	between "nfqs" and "nfqsng", or the number of times that
-	force_quiescent_state() actually did some real work.
-
-o	"fqlh" is the number of calls to force_quiescent_state() that
-	exited immediately (without even being counted in nfqs above)
-	due to contention on ->fqslock.
-
-o	Each element of the form "3/3 ..>. 0:7 ^0" represents one rcu_node
-	structure.  Each line represents one level of the hierarchy,
-	from root to leaves.  It is best to think of the rcu_data
-	structures as forming yet another level after the leaves.
-	Note that there might be either one, two, three, or even four
-	levels of rcu_node structures, depending on the relationship
-	between CONFIG_RCU_FANOUT, CONFIG_RCU_FANOUT_LEAF (possibly
-	adjusted using the rcu_fanout_leaf kernel boot parameter), and
-	CONFIG_NR_CPUS (possibly adjusted using the nr_cpu_ids count of
-	possible CPUs for the booting hardware).
-
-	o	The numbers separated by the "/" are the qsmask followed
-		by the qsmaskinit.  The qsmask will have one bit
-		set for each entity in the next lower level that has
-		not yet checked in for the current grace period ("e"
-		indicating CPUs 5, 6, and 7 in the example above).
-		The qsmaskinit will have one bit for each entity that is
-		currently expected to check in during each grace period.
-		The value of qsmaskinit is assigned to that of qsmask
-		at the beginning of each grace period.
-
-	o	The characters separated by the ">" indicate the state
-		of the blocked-tasks lists.  A "G" preceding the ">"
-		indicates that at least one task blocked in an RCU
-		read-side critical section blocks the current grace
-		period, while a "E" preceding the ">" indicates that
-		at least one task blocked in an RCU read-side critical
-		section blocks the current expedited grace period.
-		A "T" character following the ">" indicates that at
-		least one task is blocked within an RCU read-side
-		critical section, regardless of whether any current
-		grace period (expedited or normal) is inconvenienced.
-		A "." character appears if the corresponding condition
-		does not hold, so that "..>." indicates that no tasks
-		are blocked.  In contrast, "GE>T" indicates maximal
-		inconvenience from blocked tasks.  CONFIG_TREE_RCU
-		builds of the kernel will always show "..>.".
-
-	o	The numbers separated by the ":" are the range of CPUs
-		served by this struct rcu_node.  This can be helpful
-		in working out how the hierarchy is wired together.
-
-		For example, the example rcu_node structure shown above
-		has "0:7", indicating that it covers CPUs 0 through 7.
-
-	o	The number after the "^" indicates the bit in the
-		next higher level rcu_node structure that this rcu_node
-		structure corresponds to.  For example, the "d/d ..>. 4:7
-		^1" has a "1" in this position, indicating that it
-		corresponds to the "1" bit in the "3" shown in the
-		"3/3 ..>. 0:7 ^0" entry on the next level up.
-
-
-The output of "cat rcu/rcu_sched/rcu_pending" looks as follows:
-
-  0!np=26111 qsp=29 rpq=5386 cbr=1 cng=570 gpc=3674 gps=577 nn=15903 ndw=0
-  1!np=28913 qsp=35 rpq=6097 cbr=1 cng=448 gpc=3700 gps=554 nn=18113 ndw=0
-  2!np=32740 qsp=37 rpq=6202 cbr=0 cng=476 gpc=4627 gps=546 nn=20889 ndw=0
-  3 np=23679 qsp=22 rpq=5044 cbr=1 cng=415 gpc=3403 gps=347 nn=14469 ndw=0
-  4!np=30714 qsp=4 rpq=5574 cbr=0 cng=528 gpc=3931 gps=639 nn=20042 ndw=0
-  5 np=28910 qsp=2 rpq=5246 cbr=0 cng=428 gpc=4105 gps=709 nn=18422 ndw=0
-  6!np=38648 qsp=5 rpq=7076 cbr=0 cng=840 gpc=4072 gps=961 nn=25699 ndw=0
-  7 np=37275 qsp=2 rpq=6873 cbr=0 cng=868 gpc=3416 gps=971 nn=25147 ndw=0
-
-The fields are as follows:
-
-o	The leading number is the CPU number, with "!" indicating
-	an offline CPU.
-
-o	"np" is the number of times that __rcu_pending() has been invoked
-	for the corresponding flavor of RCU.
-
-o	"qsp" is the number of times that the RCU was waiting for a
-	quiescent state from this CPU.
-
-o	"rpq" is the number of times that the CPU had passed through
-	a quiescent state, but not yet reported it to RCU.
-
-o	"cbr" is the number of times that this CPU had RCU callbacks
-	that had passed through a grace period, and were thus ready
-	to be invoked.
-
-o	"cng" is the number of times that this CPU needed another
-	grace period while RCU was idle.
-
-o	"gpc" is the number of times that an old grace period had
-	completed, but this CPU was not yet aware of it.
-
-o	"gps" is the number of times that a new grace period had started,
-	but this CPU was not yet aware of it.
-
-o	"ndw" is the number of times that a wakeup of an rcuo
-	callback-offload kthread had to be deferred in order to avoid
-	deadlock.
-
-o	"nn" is the number of times that this CPU needed nothing.
-
-
-The output of "cat rcu/rcuboost" looks as follows:
-
-0:3 tasks=.... kt=W ntb=0 neb=0 nnb=0 j=c864 bt=c894
-    balk: nt=0 egt=4695 bt=0 nb=0 ny=56 nos=0
-4:7 tasks=.... kt=W ntb=0 neb=0 nnb=0 j=c864 bt=c894
-    balk: nt=0 egt=6541 bt=0 nb=0 ny=126 nos=0
-
-This information is output only for rcu_preempt.  Each two-line entry
-corresponds to a leaf rcu_node structure.  The fields are as follows:
-
-o	"n:m" is the CPU-number range for the corresponding two-line
-	entry.  In the sample output above, the first entry covers
-	CPUs zero through three and the second entry covers CPUs four
-	through seven.
-
-o	"tasks=TNEB" gives the state of the various segments of the
-	rnp->blocked_tasks list:
-
-	"T"	This indicates that there are some tasks that blocked
-		while running on one of the corresponding CPUs while
-		in an RCU read-side critical section.
-
-	"N"	This indicates that some of the blocked tasks are preventing
-		the current normal (non-expedited) grace period from
-		completing.
-
-	"E"	This indicates that some of the blocked tasks are preventing
-		the current expedited grace period from completing.
-
-	"B"	This indicates that some of the blocked tasks are in
-		need of RCU priority boosting.
-
-	Each character is replaced with "." if the corresponding
-	condition does not hold.
-
-o	"kt" is the state of the RCU priority-boosting kernel
-	thread associated with the corresponding rcu_node structure.
-	The state can be one of the following:
-
-	"S"	The kernel thread is stopped, in other words, all
-		CPUs corresponding to this rcu_node structure are
-		offline.
-
-	"R"	The kernel thread is running.
-
-	"W"	The kernel thread is waiting because there is no work
-		for it to do.
-
-	"Y"	The kernel thread is yielding to avoid hogging CPU.
-
-	"?"	Unknown value, indicates a bug.
-
-o	"ntb" is the number of tasks boosted.
-
-o	"neb" is the number of tasks boosted in order to complete an
-	expedited grace period.
-
-o	"nnb" is the number of tasks boosted in order to complete a
-	normal (non-expedited) grace period.  When boosting a task
-	that was blocking both an expedited and a normal grace period,
-	it is counted against the expedited total above.
-
-o	"j" is the low-order 16 bits of the jiffies counter in
-	hexadecimal.
-
-o	"bt" is the low-order 16 bits of the value that the jiffies
-	counter will have when we next start boosting, assuming that
-	the current grace period does not end beforehand.  This is
-	also in hexadecimal.
-
-o	"balk: nt" counts the number of times we didn't boost (in
-	other words, we balked) even though it was time to boost because
-	there were no blocked tasks to boost.  This situation occurs
-	when there is one blocked task on one rcu_node structure and
-	none on some other rcu_node structure.
-
-o	"egt" counts the number of times we balked because although
-	there were blocked tasks, none of them were blocking the
-	current grace period, whether expedited or otherwise.
-
-o	"bt" counts the number of times we balked because boosting
-	had already been initiated for the current grace period.
-
-o	"nb" counts the number of times we balked because there
-	was at least one task blocking the current non-expedited grace
-	period that never had blocked.  If it is already running, it
-	just won't help to boost its priority!
-
-o	"ny" counts the number of times we balked because it was
-	not yet time to start boosting.
-
-o	"nos" counts the number of times we balked for other
-	reasons, e.g., the grace period ended first.
-
-
-CONFIG_TINY_RCU debugfs Files and Formats
-
-These implementations of RCU provides a single debugfs file under the
-top-level directory RCU, namely rcu/rcudata, which displays fields in
-rcu_bh_ctrlblk and rcu_sched_ctrlblk.
-
-The output of "cat rcu/rcudata" is as follows:
-
-rcu_sched: qlen: 0
-rcu_bh: qlen: 0
-
-This is split into rcu_sched and rcu_bh sections.  The field is as
-follows:
-
-o	"qlen" is the number of RCU callbacks currently waiting either
-	for an RCU grace period or waiting to be invoked.  This is the
-	only field present for rcu_sched and rcu_bh, due to the
-	short-circuiting of grace period in those two cases.
diff --git a/Documentation/security/LoadPin.txt b/Documentation/admin-guide/LSM/LoadPin.rst
similarity index 72%
rename from Documentation/security/LoadPin.txt
rename to Documentation/admin-guide/LSM/LoadPin.rst
index e11877f..3207076 100644
--- a/Documentation/security/LoadPin.txt
+++ b/Documentation/admin-guide/LSM/LoadPin.rst
@@ -1,3 +1,7 @@
+=======
+LoadPin
+=======
+
 LoadPin is a Linux Security Module that ensures all kernel-loaded files
 (modules, firmware, etc) all originate from the same filesystem, with
 the expectation that such a filesystem is backed by a read-only device
@@ -5,13 +9,13 @@
 and/or unchangeable filesystem to enforce module and firmware loading
 restrictions without needing to sign the files individually.
 
-The LSM is selectable at build-time with CONFIG_SECURITY_LOADPIN, and
+The LSM is selectable at build-time with ``CONFIG_SECURITY_LOADPIN``, and
 can be controlled at boot-time with the kernel command line option
-"loadpin.enabled". By default, it is enabled, but can be disabled at
-boot ("loadpin.enabled=0").
+"``loadpin.enabled``". By default, it is enabled, but can be disabled at
+boot ("``loadpin.enabled=0``").
 
 LoadPin starts pinning when it sees the first file loaded. If the
 block device backing the filesystem is not read-only, a sysctl is
-created to toggle pinning: /proc/sys/kernel/loadpin/enabled. (Having
+created to toggle pinning: ``/proc/sys/kernel/loadpin/enabled``. (Having
 a mutable filesystem means pinning is mutable too, but having the
 sysctl allows for easy testing on systems with a mutable filesystem.)
diff --git a/Documentation/security/SELinux.txt b/Documentation/admin-guide/LSM/SELinux.rst
similarity index 70%
rename from Documentation/security/SELinux.txt
rename to Documentation/admin-guide/LSM/SELinux.rst
index 07eae00f..f722c9b 100644
--- a/Documentation/security/SELinux.txt
+++ b/Documentation/admin-guide/LSM/SELinux.rst
@@ -1,27 +1,33 @@
+=======
+SELinux
+=======
+
 If you want to use SELinux, chances are you will want
 to use the distro-provided policies, or install the
 latest reference policy release from
+
 	http://oss.tresys.com/projects/refpolicy
 
 However, if you want to install a dummy policy for
-testing, you can do using 'mdp' provided under
+testing, you can do using ``mdp`` provided under
 scripts/selinux.  Note that this requires the selinux
 userspace to be installed - in particular you will
 need checkpolicy to compile a kernel, and setfiles and
 fixfiles to label the filesystem.
 
 	1. Compile the kernel with selinux enabled.
-	2. Type 'make' to compile mdp.
+	2. Type ``make`` to compile ``mdp``.
 	3. Make sure that you are not running with
 	   SELinux enabled and a real policy.  If
 	   you are, reboot with selinux disabled
 	   before continuing.
-	4. Run install_policy.sh:
+	4. Run install_policy.sh::
+
 		cd scripts/selinux
 		sh install_policy.sh
 
 Step 4 will create a new dummy policy valid for your
 kernel, with a single selinux user, role, and type.
-It will compile the policy, will set your SELINUXTYPE to
-dummy in /etc/selinux/config, install the compiled policy
-as 'dummy', and relabel your filesystem.
+It will compile the policy, will set your ``SELINUXTYPE`` to
+``dummy`` in ``/etc/selinux/config``, install the compiled policy
+as ``dummy``, and relabel your filesystem.
diff --git a/Documentation/security/Smack.txt b/Documentation/admin-guide/LSM/Smack.rst
similarity index 84%
rename from Documentation/security/Smack.txt
rename to Documentation/admin-guide/LSM/Smack.rst
index 945cc63..6a5826a 100644
--- a/Documentation/security/Smack.txt
+++ b/Documentation/admin-guide/LSM/Smack.rst
@@ -1,3 +1,6 @@
+=====
+Smack
+=====
 
 
     "Good for you, you've decided to clean the elevator!"
@@ -14,6 +17,7 @@
 at hand.
 
 Smack consists of three major components:
+
     - The kernel
     - Basic utilities, which are helpful but not required
     - Configuration data
@@ -39,16 +43,24 @@
 This should make and install on most modern distributions.
 There are five commands included in smackutil:
 
-chsmack    - display or set Smack extended attribute values
-smackctl   - load the Smack access rules
-smackaccess - report if a process with one label has access
-              to an object with another
+chsmack:
+	display or set Smack extended attribute values
+
+smackctl:
+	load the Smack access rules
+
+smackaccess:
+	report if a process with one label has access
+	to an object with another
 
 These two commands are obsolete with the introduction of
 the smackfs/load2 and smackfs/cipso2 interfaces.
 
-smackload  - properly formats data for writing to smackfs/load
-smackcipso - properly formats data for writing to smackfs/cipso
+smackload:
+	properly formats data for writing to smackfs/load
+
+smackcipso:
+	properly formats data for writing to smackfs/cipso
 
 In keeping with the intent of Smack, configuration data is
 minimal and not strictly required. The most important
@@ -56,15 +68,15 @@
 If smackutil is installed the startup script will take care
 of this, but it can be manually as well.
 
-Add this line to /etc/fstab:
+Add this line to ``/etc/fstab``::
 
     smackfs /sys/fs/smackfs smackfs defaults 0 0
 
-The /sys/fs/smackfs directory is created by the kernel.
+The ``/sys/fs/smackfs`` directory is created by the kernel.
 
 Smack uses extended attributes (xattrs) to store labels on filesystem
 objects. The attributes are stored in the extended attribute security
-name space. A process must have CAP_MAC_ADMIN to change any of these
+name space. A process must have ``CAP_MAC_ADMIN`` to change any of these
 attributes.
 
 The extended attributes that Smack uses are:
@@ -73,14 +85,17 @@
 	Used to make access control decisions. In almost all cases
 	the label given to a new filesystem object will be the label
 	of the process that created it.
+
 SMACK64EXEC
 	The Smack label of a process that execs a program file with
 	this attribute set will run with this attribute's value.
+
 SMACK64MMAP
 	Don't allow the file to be mmapped by a process whose Smack
 	label does not allow all of the access permitted to a process
 	with the label contained in this attribute. This is a very
 	specific use case for shared libraries.
+
 SMACK64TRANSMUTE
 	Can only have the value "TRUE". If this attribute is present
 	on a directory when an object is created in the directory and
@@ -89,27 +104,29 @@
 	gets the label of the directory instead of the label of the
 	creating process. If the object being created is a directory
 	the SMACK64TRANSMUTE attribute is set as well.
+
 SMACK64IPIN
 	This attribute is only available on file descriptors for sockets.
 	Use the Smack label in this attribute for access control
 	decisions on packets being delivered to this socket.
+
 SMACK64IPOUT
 	This attribute is only available on file descriptors for sockets.
 	Use the Smack label in this attribute for access control
 	decisions on packets coming from this socket.
 
-There are multiple ways to set a Smack label on a file:
+There are multiple ways to set a Smack label on a file::
 
     # attr -S -s SMACK64 -V "value" path
     # chsmack -a value path
 
 A process can see the Smack label it is running with by
-reading /proc/self/attr/current. A process with CAP_MAC_ADMIN
+reading ``/proc/self/attr/current``. A process with ``CAP_MAC_ADMIN``
 can set the process Smack by writing there.
 
 Most Smack configuration is accomplished by writing to files
 in the smackfs filesystem. This pseudo-filesystem is mounted
-on /sys/fs/smackfs.
+on ``/sys/fs/smackfs``.
 
 access
 	Provided for backward compatibility. The access2 interface
@@ -120,6 +137,7 @@
 	this file. The next read will indicate whether the access
 	would be permitted. The text will be either "1" indicating
 	access, or "0" indicating denial.
+
 access2
 	This interface reports whether a subject with the specified
 	Smack label has a particular access to an object with a
@@ -127,13 +145,17 @@
 	this file. The next read will indicate whether the access
 	would be permitted. The text will be either "1" indicating
 	access, or "0" indicating denial.
+
 ambient
 	This contains the Smack label applied to unlabeled network
 	packets.
+
 change-rule
 	This interface allows modification of existing access control rules.
-	The format accepted on write is:
+	The format accepted on write is::
+
 		"%s %s %s %s"
+
 	where the first string is the subject label, the second the
 	object label, the third the access to allow and the fourth the
 	access to deny. The access strings may contain only the characters
@@ -141,47 +163,63 @@
 	modified by enabling the permissions in the third string and disabling
 	those in the fourth string. If there is no such rule it will be
 	created using the access specified in the third and the fourth strings.
+
 cipso
 	Provided for backward compatibility. The cipso2 interface
 	is preferred and should be used instead.
 	This interface allows a specific CIPSO header to be assigned
-	to a Smack label. The format accepted on write is:
+	to a Smack label. The format accepted on write is::
+
 		"%24s%4d%4d"["%4d"]...
+
 	The first string is a fixed Smack label. The first number is
 	the level to use. The second number is the number of categories.
-	The following numbers are the categories.
-	"level-3-cats-5-19          3   2   5  19"
+	The following numbers are the categories::
+
+		"level-3-cats-5-19          3   2   5  19"
+
 cipso2
 	This interface allows a specific CIPSO header to be assigned
-	to a Smack label. The format accepted on write is:
-	"%s%4d%4d"["%4d"]...
+	to a Smack label. The format accepted on write is::
+
+		"%s%4d%4d"["%4d"]...
+
 	The first string is a long Smack label. The first number is
 	the level to use. The second number is the number of categories.
-	The following numbers are the categories.
-	"level-3-cats-5-19   3   2   5  19"
+	The following numbers are the categories::
+
+		"level-3-cats-5-19   3   2   5  19"
+
 direct
 	This contains the CIPSO level used for Smack direct label
 	representation in network packets.
+
 doi
 	This contains the CIPSO domain of interpretation used in
 	network packets.
+
 ipv6host
 	This interface allows specific IPv6 internet addresses to be
 	treated as single label hosts. Packets are sent to single
 	label hosts only from processes that have Smack write access
 	to the host label. All packets received from single label hosts
-	are given the specified label. The format accepted on write is:
+	are given the specified label. The format accepted on write is::
+
 		"%h:%h:%h:%h:%h:%h:%h:%h label" or
 		"%h:%h:%h:%h:%h:%h:%h:%h/%d label".
+
 	The "::" address shortcut is not supported.
 	If label is "-DELETE" a matched entry will be deleted.
+
 load
 	Provided for backward compatibility. The load2 interface
 	is preferred and should be used instead.
 	This interface allows access control rules in addition to
 	the system defined rules to be specified. The format accepted
-	on write is:
+	on write is::
+
 		"%24s%24s%5s"
+
 	where the first string is the subject label, the second the
 	object label, and the third the requested access. The access
 	string may contain only the characters "rwxat-", and specifies
@@ -189,17 +227,21 @@
 	permissions that are not allowed. The string "r-x--" would
 	specify read and execute access. Labels are limited to 23
 	characters in length.
+
 load2
 	This interface allows access control rules in addition to
 	the system defined rules to be specified. The format accepted
-	on write is:
+	on write is::
+
 		"%s %s %s"
+
 	where the first string is the subject label, the second the
 	object label, and the third the requested access. The access
 	string may contain only the characters "rwxat-", and specifies
 	which sort of access is allowed. The "-" is a placeholder for
 	permissions that are not allowed. The string "r-x--" would
 	specify read and execute access.
+
 load-self
 	Provided for backward compatibility. The load-self2 interface
 	is preferred and should be used instead.
@@ -208,66 +250,83 @@
 	otherwise be permitted, and are intended to provide additional
 	restrictions on the process. The format is the same as for
 	the load interface.
+
 load-self2
 	This interface allows process specific access rules to be
 	defined. These rules are only consulted if access would
 	otherwise be permitted, and are intended to provide additional
 	restrictions on the process. The format is the same as for
 	the load2 interface.
+
 logging
 	This contains the Smack logging state.
+
 mapped
 	This contains the CIPSO level used for Smack mapped label
 	representation in network packets.
+
 netlabel
 	This interface allows specific internet addresses to be
 	treated as single label hosts. Packets are sent to single
 	label hosts without CIPSO headers, but only from processes
 	that have Smack write access to the host label. All packets
 	received from single label hosts are given the specified
-	label. The format accepted on write is:
+	label. The format accepted on write is::
+
 		"%d.%d.%d.%d label" or "%d.%d.%d.%d/%d label".
+
 	If the label specified is "-CIPSO" the address is treated
 	as a host that supports CIPSO headers.
+
 onlycap
 	This contains labels processes must have for CAP_MAC_ADMIN
-	and CAP_MAC_OVERRIDE to be effective. If this file is empty
+	and ``CAP_MAC_OVERRIDE`` to be effective. If this file is empty
 	these capabilities are effective at for processes with any
 	label. The values are set by writing the desired labels, separated
 	by spaces, to the file or cleared by writing "-" to the file.
+
 ptrace
 	This is used to define the current ptrace policy
-	0 - default: this is the policy that relies on Smack access rules.
-	    For the PTRACE_READ a subject needs to have a read access on
-	    object. For the PTRACE_ATTACH a read-write access is required.
-	1 - exact: this is the policy that limits PTRACE_ATTACH. Attach is
+
+	0 - default:
+	    this is the policy that relies on Smack access rules.
+	    For the ``PTRACE_READ`` a subject needs to have a read access on
+	    object. For the ``PTRACE_ATTACH`` a read-write access is required.
+
+	1 - exact:
+	    this is the policy that limits ``PTRACE_ATTACH``. Attach is
 	    only allowed when subject's and object's labels are equal.
-	    PTRACE_READ is not affected. Can be overridden with CAP_SYS_PTRACE.
-	2 - draconian: this policy behaves like the 'exact' above with an
-	    exception that it can't be overridden with CAP_SYS_PTRACE.
+	    ``PTRACE_READ`` is not affected. Can be overridden with ``CAP_SYS_PTRACE``.
+
+	2 - draconian:
+	    this policy behaves like the 'exact' above with an
+	    exception that it can't be overridden with ``CAP_SYS_PTRACE``.
+
 revoke-subject
 	Writing a Smack label here sets the access to '-' for all access
 	rules with that subject label.
+
 unconfined
-	If the kernel is configured with CONFIG_SECURITY_SMACK_BRINGUP
-	a process with CAP_MAC_ADMIN can write a label into this interface.
+	If the kernel is configured with ``CONFIG_SECURITY_SMACK_BRINGUP``
+	a process with ``CAP_MAC_ADMIN`` can write a label into this interface.
 	Thereafter, accesses that involve that label will be logged and
 	the access permitted if it wouldn't be otherwise. Note that this
 	is dangerous and can ruin the proper labeling of your system.
 	It should never be used in production.
+
 relabel-self
 	This interface contains a list of labels to which the process can
-	transition to, by writing to /proc/self/attr/current.
+	transition to, by writing to ``/proc/self/attr/current``.
 	Normally a process can change its own label to any legal value, but only
-	if it has CAP_MAC_ADMIN. This interface allows a process without
-	CAP_MAC_ADMIN to relabel itself to one of labels from predefined list.
-	A process without CAP_MAC_ADMIN can change its label only once. When it
+	if it has ``CAP_MAC_ADMIN``. This interface allows a process without
+	``CAP_MAC_ADMIN`` to relabel itself to one of labels from predefined list.
+	A process without ``CAP_MAC_ADMIN`` can change its label only once. When it
 	does, this list will be cleared.
 	The values are set by writing the desired labels, separated
 	by spaces, to the file or cleared by writing "-" to the file.
 
 If you are using the smackload utility
-you can add access rules in /etc/smack/accesses. They take the form:
+you can add access rules in ``/etc/smack/accesses``. They take the form::
 
     subjectlabel objectlabel access
 
@@ -277,14 +336,14 @@
 
 Look for additional programs on http://schaufler-ca.com
 
-From the Smack Whitepaper:
-
-The Simplified Mandatory Access Control Kernel
+The Simplified Mandatory Access Control Kernel (Whitepaper)
+===========================================================
 
 Casey Schaufler
 casey@schaufler-ca.com
 
 Mandatory Access Control
+------------------------
 
 Computer systems employ a variety of schemes to constrain how information is
 shared among the people and services using the machine. Some of these schemes
@@ -297,6 +356,7 @@
 or programs that have access to pieces of data.
 
 Bell & LaPadula
+---------------
 
 From the middle of the 1980's until the turn of the century Mandatory Access
 Control (MAC) was very closely associated with the Bell & LaPadula security
@@ -306,6 +366,7 @@
 often sited as failing to address general needs.
 
 Domain Type Enforcement
+-----------------------
 
 Around the turn of the century Domain Type Enforcement (DTE) became popular.
 This scheme organizes users, programs, and data into domains that are
@@ -316,6 +377,7 @@
 disabled or used in limited ways in the majority of cases.
 
 Smack
+-----
 
 Smack is a Mandatory Access Control mechanism designed to provide useful MAC
 while avoiding the pitfalls of its predecessors. The limitations of Bell &
@@ -326,46 +388,55 @@
 modes already in use.
 
 Smack Terminology
+-----------------
 
 The jargon used to talk about Smack will be familiar to those who have dealt
 with other MAC systems and shouldn't be too difficult for the uninitiated to
 pick up. There are four terms that are used in a specific way and that are
 especially important:
 
-	Subject: A subject is an active entity on the computer system.
+  Subject:
+	A subject is an active entity on the computer system.
 	On Smack a subject is a task, which is in turn the basic unit
 	of execution.
 
-	Object: An object is a passive entity on the computer system.
+  Object:
+	An object is a passive entity on the computer system.
 	On Smack files of all types, IPC, and tasks can be objects.
 
-	Access: Any attempt by a subject to put information into or get
+  Access:
+	Any attempt by a subject to put information into or get
 	information from an object is an access.
 
-	Label: Data that identifies the Mandatory Access Control
+  Label:
+	Data that identifies the Mandatory Access Control
 	characteristics of a subject or an object.
 
 These definitions are consistent with the traditional use in the security
 community. There are also some terms from Linux that are likely to crop up:
 
-	Capability: A task that possesses a capability has permission to
+  Capability:
+	A task that possesses a capability has permission to
 	violate an aspect of the system security policy, as identified by
 	the specific capability. A task that possesses one or more
 	capabilities is a privileged task, whereas a task with no
 	capabilities is an unprivileged task.
 
-	Privilege: A task that is allowed to violate the system security
+  Privilege:
+	A task that is allowed to violate the system security
 	policy is said to have privilege. As of this writing a task can
 	have privilege either by possessing capabilities or by having an
 	effective user of root.
 
 Smack Basics
+------------
 
 Smack is an extension to a Linux system. It enforces additional restrictions
 on what subjects can access which objects, based on the labels attached to
 each of the subject and the object.
 
 Labels
+~~~~~~
 
 Smack labels are ASCII character strings. They can be up to 255 characters
 long, but keeping them to twenty-three characters is recommended.
@@ -377,7 +448,7 @@
 (quote) and '"' (double-quote) characters.
 Smack labels cannot begin with a '-'. This is reserved for special options.
 
-There are some predefined labels:
+There are some predefined labels::
 
 	_ 	Pronounced "floor", a single underscore character.
 	^ 	Pronounced "hat", a single circumflex character.
@@ -390,14 +461,18 @@
 mechanism.
 
 Access Rules
+~~~~~~~~~~~~
 
 Smack uses the traditional access modes of Linux. These modes are read,
 execute, write, and occasionally append. There are a few cases where the
 access mode may not be obvious. These include:
 
-	Signals: A signal is a write operation from the subject task to
+  Signals:
+	A signal is a write operation from the subject task to
 	the object task.
-	Internet Domain IPC: Transmission of a packet is considered a
+
+  Internet Domain IPC:
+	Transmission of a packet is considered a
 	write operation from the source task to the destination task.
 
 Smack restricts access based on the label attached to a subject and the label
@@ -417,6 +492,7 @@
 	7. Any other access is denied.
 
 Smack Access Rules
+~~~~~~~~~~~~~~~~~~
 
 With the isolation provided by Smack access separation is simple. There are
 many interesting cases where limited access by subjects to objects with
@@ -427,8 +503,9 @@
 mechanism for specifying rules allowing access between labels.
 
 Access Rule Format
+~~~~~~~~~~~~~~~~~~
 
-The format of an access rule is:
+The format of an access rule is::
 
 	subject-label object-label access
 
@@ -446,7 +523,7 @@
 
 Uppercase values for the specification letters are allowed as well.
 Access mode specifications can be in any order. Examples of acceptable rules
-are:
+are::
 
 	TopSecret Secret  rx
 	Secret    Unclass R
@@ -456,7 +533,7 @@
 	New       Old     rRrRr
 	Closed    Off     -
 
-Examples of unacceptable rules are:
+Examples of unacceptable rules are::
 
 	Top Secret Secret     rx
 	Ace        Ace        r
@@ -469,6 +546,7 @@
 as "ar". A lone dash is used to specify that no access should be allowed.
 
 Applying Access Rules
+~~~~~~~~~~~~~~~~~~~~~
 
 The developers of Linux rarely define new sorts of things, usually importing
 schemes and concepts from other systems. Most often, the other systems are
@@ -511,6 +589,7 @@
 receiver. The receiver is not required to have read access to the sender.
 
 Setting Access Rules
+~~~~~~~~~~~~~~~~~~~~
 
 The configuration file /etc/smack/accesses contains the rules to be set at
 system startup. The contents are written to the special file
@@ -520,6 +599,7 @@
 specification.
 
 Task Attribute
+~~~~~~~~~~~~~~
 
 The Smack label of a process can be read from /proc/<pid>/attr/current. A
 process can read its own Smack label from /proc/self/attr/current. A
@@ -527,12 +607,14 @@
 /proc/self/attr/current but not the label of another process.
 
 File Attribute
+~~~~~~~~~~~~~~
 
 The Smack label of a filesystem object is stored as an extended attribute
 named SMACK64 on the file. This attribute is in the security namespace. It can
 only be changed by a process with privilege.
 
 Privilege
+~~~~~~~~~
 
 A process with CAP_MAC_OVERRIDE or CAP_MAC_ADMIN is privileged.
 CAP_MAC_OVERRIDE allows the process access to objects it would
@@ -540,6 +622,7 @@
 Smack data, including rules and attributes.
 
 Smack Networking
+~~~~~~~~~~~~~~~~
 
 As mentioned before, Smack enforces access control on network protocol
 transmissions. Every packet sent by a Smack process is tagged with its Smack
@@ -551,6 +634,7 @@
 the packet is dropped.
 
 CIPSO Configuration
+~~~~~~~~~~~~~~~~~~~
 
 It is normally unnecessary to specify the CIPSO configuration. The default
 values used by the system handle all internal cases. Smack will compose CIPSO
@@ -571,13 +655,13 @@
 The label and category set are mapped to a Smack label as defined in
 /etc/smack/cipso.
 
-A Smack/CIPSO mapping has the form:
+A Smack/CIPSO mapping has the form::
 
 	smack level [category [category]*]
 
 Smack does not expect the level or category sets to be related in any
 particular way and does not assume or assign accesses based on them. Some
-examples of mappings:
+examples of mappings::
 
 	TopSecret 7
 	TS:A,B    7 1 2
@@ -597,25 +681,30 @@
 /sys/fs/smackfs/direct.
 
 Socket Attributes
+~~~~~~~~~~~~~~~~~
 
 There are two attributes that are associated with sockets. These attributes
 can only be set by privileged tasks, but any task can read them for their own
 sockets.
 
-	SMACK64IPIN: The Smack label of the task object. A privileged
+  SMACK64IPIN:
+	The Smack label of the task object. A privileged
 	program that will enforce policy may set this to the star label.
 
-	SMACK64IPOUT: The Smack label transmitted with outgoing packets.
+  SMACK64IPOUT:
+	The Smack label transmitted with outgoing packets.
 	A privileged program may set this to match the label of another
 	task with which it hopes to communicate.
 
 Smack Netlabel Exceptions
+~~~~~~~~~~~~~~~~~~~~~~~~~
 
 You will often find that your labeled application has to talk to the outside,
 unlabeled world. To do this there's a special file /sys/fs/smackfs/netlabel
-where you can add some exceptions in the form of :
-@IP1	   LABEL1 or
-@IP2/MASK  LABEL2
+where you can add some exceptions in the form of::
+
+	@IP1	   LABEL1 or
+	@IP2/MASK  LABEL2
 
 It means that your application will have unlabeled access to @IP1 if it has
 write access on LABEL1, and access to the subnet @IP2/MASK if it has write
@@ -624,28 +713,32 @@
 Entries in the /sys/fs/smackfs/netlabel file are matched by longest mask
 first, like in classless IPv4 routing.
 
-A special label '@' and an option '-CIPSO' can be used there :
-@      means Internet, any application with any label has access to it
--CIPSO means standard CIPSO networking
+A special label '@' and an option '-CIPSO' can be used there::
 
-If you don't know what CIPSO is and don't plan to use it, you can just do :
-echo 127.0.0.1 -CIPSO > /sys/fs/smackfs/netlabel
-echo 0.0.0.0/0 @      > /sys/fs/smackfs/netlabel
+	@      means Internet, any application with any label has access to it
+	-CIPSO means standard CIPSO networking
+
+If you don't know what CIPSO is and don't plan to use it, you can just do::
+
+	echo 127.0.0.1 -CIPSO > /sys/fs/smackfs/netlabel
+	echo 0.0.0.0/0 @      > /sys/fs/smackfs/netlabel
 
 If you use CIPSO on your 192.168.0.0/16 local network and need also unlabeled
-Internet access, you can have :
-echo 127.0.0.1      -CIPSO > /sys/fs/smackfs/netlabel
-echo 192.168.0.0/16 -CIPSO > /sys/fs/smackfs/netlabel
-echo 0.0.0.0/0      @      > /sys/fs/smackfs/netlabel
+Internet access, you can have::
 
+	echo 127.0.0.1      -CIPSO > /sys/fs/smackfs/netlabel
+	echo 192.168.0.0/16 -CIPSO > /sys/fs/smackfs/netlabel
+	echo 0.0.0.0/0      @      > /sys/fs/smackfs/netlabel
 
 Writing Applications for Smack
+------------------------------
 
 There are three sorts of applications that will run on a Smack system. How an
 application interacts with Smack will determine what it will have to do to
 work properly under Smack.
 
 Smack Ignorant Applications
+---------------------------
 
 By far the majority of applications have no reason whatever to care about the
 unique properties of Smack. Since invoking a program has no impact on the
@@ -653,12 +746,14 @@
 whether the process has execute access to the program.
 
 Smack Relevant Applications
+---------------------------
 
 Some programs can be improved by teaching them about Smack, but do not make
 any security decisions themselves. The utility ls(1) is one example of such a
 program.
 
 Smack Enforcing Applications
+----------------------------
 
 These are special programs that not only know about Smack, but participate in
 the enforcement of system policy. In most cases these are the programs that
@@ -666,15 +761,16 @@
 to processes running with various labels.
 
 File System Interfaces
+----------------------
 
 Smack maintains labels on file system objects using extended attributes. The
 Smack label of a file, directory, or other file system object can be obtained
-using getxattr(2).
+using getxattr(2)::
 
 	len = getxattr("/", "security.SMACK64", value, sizeof (value));
 
 will put the Smack label of the root directory into value. A privileged
-process can set the Smack label of a file system object with setxattr(2).
+process can set the Smack label of a file system object with setxattr(2)::
 
 	len = strlen("Rubble");
 	rc = setxattr("/foo", "security.SMACK64", "Rubble", len, 0);
@@ -683,17 +779,18 @@
 privilege.
 
 Socket Interfaces
+-----------------
 
 The socket attributes can be read using fgetxattr(2).
 
 A privileged process can set the Smack label of outgoing packets with
-fsetxattr(2).
+fsetxattr(2)::
 
 	len = strlen("Rubble");
 	rc = fsetxattr(fd, "security.SMACK64IPOUT", "Rubble", len, 0);
 
 will set the Smack label "Rubble" on packets going out from the socket if the
-program has appropriate privilege.
+program has appropriate privilege::
 
 	rc = fsetxattr(fd, "security.SMACK64IPIN, "*", strlen("*"), 0);
 
@@ -701,33 +798,40 @@
 packets will be checked if the program has appropriate privilege.
 
 Administration
+--------------
 
 Smack supports some mount options:
 
-	smackfsdef=label: specifies the label to give files that lack
+  smackfsdef=label:
+	specifies the label to give files that lack
 	the Smack label extended attribute.
 
-	smackfsroot=label: specifies the label to assign the root of the
+  smackfsroot=label:
+	specifies the label to assign the root of the
 	file system if it lacks the Smack extended attribute.
 
-	smackfshat=label: specifies a label that must have read access to
+  smackfshat=label:
+	specifies a label that must have read access to
 	all labels set on the filesystem. Not yet enforced.
 
-	smackfsfloor=label: specifies a label to which all labels set on the
+  smackfsfloor=label:
+	specifies a label to which all labels set on the
 	filesystem must have read access. Not yet enforced.
 
 These mount options apply to all file system types.
 
 Smack auditing
+--------------
 
 If you want Smack auditing of security events, you need to set CONFIG_AUDIT
 in your kernel configuration.
 By default, all denied events will be audited. You can change this behavior by
-writing a single character to the /sys/fs/smackfs/logging file :
-0 : no logging
-1 : log denied (default)
-2 : log accepted
-3 : log denied & accepted
+writing a single character to the /sys/fs/smackfs/logging file::
+
+	0 : no logging
+	1 : log denied (default)
+	2 : log accepted
+	3 : log denied & accepted
 
 Events are logged as 'key=value' pairs, for each event you at least will get
 the subject, the object, the rights requested, the action, the kernel function
@@ -735,6 +839,7 @@
 audited.
 
 Bringup Mode
+------------
 
 Bringup mode provides logging features that can make application
 configuration and system bringup easier. Configure the kernel with
diff --git a/Documentation/security/Yama.txt b/Documentation/admin-guide/LSM/Yama.rst
similarity index 60%
rename from Documentation/security/Yama.txt
rename to Documentation/admin-guide/LSM/Yama.rst
index d9ee7d7..13468ea 100644
--- a/Documentation/security/Yama.txt
+++ b/Documentation/admin-guide/LSM/Yama.rst
@@ -1,13 +1,14 @@
+====
+Yama
+====
+
 Yama is a Linux Security Module that collects system-wide DAC security
 protections that are not handled by the core kernel itself. This is
-selectable at build-time with CONFIG_SECURITY_YAMA, and can be controlled
-at run-time through sysctls in /proc/sys/kernel/yama:
+selectable at build-time with ``CONFIG_SECURITY_YAMA``, and can be controlled
+at run-time through sysctls in ``/proc/sys/kernel/yama``:
 
-- ptrace_scope
-
-==============================================================
-
-ptrace_scope:
+ptrace_scope
+============
 
 As Linux grows in popularity, it will become a larger target for
 malware. One particularly troubling weakness of the Linux process
@@ -25,47 +26,49 @@
 Since ptrace is not commonly used by non-developers and non-admins, system
 builders should be allowed the option to disable this debugging system.
 
-For a solution, some applications use prctl(PR_SET_DUMPABLE, ...) to
+For a solution, some applications use ``prctl(PR_SET_DUMPABLE, ...)`` to
 specifically disallow such ptrace attachment (e.g. ssh-agent), but many
 do not. A more general solution is to only allow ptrace directly from a
 parent to a child process (i.e. direct "gdb EXE" and "strace EXE" still
-work), or with CAP_SYS_PTRACE (i.e. "gdb --pid=PID", and "strace -p PID"
+work), or with ``CAP_SYS_PTRACE`` (i.e. "gdb --pid=PID", and "strace -p PID"
 still work as root).
 
 In mode 1, software that has defined application-specific relationships
 between a debugging process and its inferior (crash handlers, etc),
-prctl(PR_SET_PTRACER, pid, ...) can be used. An inferior can declare which
-other process (and its descendants) are allowed to call PTRACE_ATTACH
+``prctl(PR_SET_PTRACER, pid, ...)`` can be used. An inferior can declare which
+other process (and its descendants) are allowed to call ``PTRACE_ATTACH``
 against it. Only one such declared debugging process can exists for
 each inferior at a time. For example, this is used by KDE, Chromium, and
 Firefox's crash handlers, and by Wine for allowing only Wine processes
 to ptrace each other. If a process wishes to entirely disable these ptrace
-restrictions, it can call prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, ...)
+restrictions, it can call ``prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, ...)``
 so that any otherwise allowed process (even those in external pid namespaces)
 may attach.
 
-The sysctl settings (writable only with CAP_SYS_PTRACE) are:
+The sysctl settings (writable only with ``CAP_SYS_PTRACE``) are:
 
-0 - classic ptrace permissions: a process can PTRACE_ATTACH to any other
+0 - classic ptrace permissions:
+    a process can ``PTRACE_ATTACH`` to any other
     process running under the same uid, as long as it is dumpable (i.e.
     did not transition uids, start privileged, or have called
-    prctl(PR_SET_DUMPABLE...) already). Similarly, PTRACE_TRACEME is
+    ``prctl(PR_SET_DUMPABLE...)`` already). Similarly, ``PTRACE_TRACEME`` is
     unchanged.
 
-1 - restricted ptrace: a process must have a predefined relationship
-    with the inferior it wants to call PTRACE_ATTACH on. By default,
+1 - restricted ptrace:
+    a process must have a predefined relationship
+    with the inferior it wants to call ``PTRACE_ATTACH`` on. By default,
     this relationship is that of only its descendants when the above
     classic criteria is also met. To change the relationship, an
-    inferior can call prctl(PR_SET_PTRACER, debugger, ...) to declare
-    an allowed debugger PID to call PTRACE_ATTACH on the inferior.
-    Using PTRACE_TRACEME is unchanged.
+    inferior can call ``prctl(PR_SET_PTRACER, debugger, ...)`` to declare
+    an allowed debugger PID to call ``PTRACE_ATTACH`` on the inferior.
+    Using ``PTRACE_TRACEME`` is unchanged.
 
-2 - admin-only attach: only processes with CAP_SYS_PTRACE may use ptrace
-    with PTRACE_ATTACH, or through children calling PTRACE_TRACEME.
+2 - admin-only attach:
+    only processes with ``CAP_SYS_PTRACE`` may use ptrace
+    with ``PTRACE_ATTACH``, or through children calling ``PTRACE_TRACEME``.
 
-3 - no attach: no processes may use ptrace with PTRACE_ATTACH nor via
-    PTRACE_TRACEME. Once set, this sysctl value cannot be changed.
+3 - no attach:
+    no processes may use ptrace with ``PTRACE_ATTACH`` nor via
+    ``PTRACE_TRACEME``. Once set, this sysctl value cannot be changed.
 
 The original children-only logic was based on the restrictions in grsecurity.
-
-==============================================================
diff --git a/Documentation/security/apparmor.txt b/Documentation/admin-guide/LSM/apparmor.rst
similarity index 65%
rename from Documentation/security/apparmor.txt
rename to Documentation/admin-guide/LSM/apparmor.rst
index 93c1fd7..3e9734b 100644
--- a/Documentation/security/apparmor.txt
+++ b/Documentation/admin-guide/LSM/apparmor.rst
@@ -1,4 +1,9 @@
---- What is AppArmor? ---
+========
+AppArmor
+========
+
+What is AppArmor?
+=================
 
 AppArmor is MAC style security extension for the Linux kernel.  It implements
 a task centered policy, with task "profiles" being created and loaded
@@ -6,34 +11,41 @@
 them run in an unconfined state which is equivalent to standard Linux DAC
 permissions.
 
---- How to enable/disable ---
+How to enable/disable
+=====================
 
-set CONFIG_SECURITY_APPARMOR=y
+set ``CONFIG_SECURITY_APPARMOR=y``
 
-If AppArmor should be selected as the default security module then
-   set CONFIG_DEFAULT_SECURITY="apparmor"
-   and CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1
+If AppArmor should be selected as the default security module then set::
+
+   CONFIG_DEFAULT_SECURITY="apparmor"
+   CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1
 
 Build the kernel
 
 If AppArmor is not the default security module it can be enabled by passing
-security=apparmor on the kernel's command line.
+``security=apparmor`` on the kernel's command line.
 
 If AppArmor is the default security module it can be disabled by passing
-apparmor=0, security=XXXX (where XXX is valid security module), on the
-kernel's command line
+``apparmor=0, security=XXXX`` (where ``XXXX`` is valid security module), on the
+kernel's command line.
 
 For AppArmor to enforce any restrictions beyond standard Linux DAC permissions
 policy must be loaded into the kernel from user space (see the Documentation
 and tools links).
 
---- Documentation ---
+Documentation
+=============
 
-Documentation can be found on the wiki.
+Documentation can be found on the wiki, linked below.
 
---- Links ---
+Links
+=====
 
 Mailing List - apparmor@lists.ubuntu.com
+
 Wiki - http://apparmor.wiki.kernel.org/
+
 User space tools - https://launchpad.net/apparmor
+
 Kernel module - git://git.kernel.org/pub/scm/linux/kernel/git/jj/apparmor-dev.git
diff --git a/Documentation/security/LSM.txt b/Documentation/admin-guide/LSM/index.rst
similarity index 62%
rename from Documentation/security/LSM.txt
rename to Documentation/admin-guide/LSM/index.rst
index c2683f2..c980dfe 100644
--- a/Documentation/security/LSM.txt
+++ b/Documentation/admin-guide/LSM/index.rst
@@ -1,12 +1,13 @@
-Linux Security Module framework
--------------------------------
+===========================
+Linux Security Module Usage
+===========================
 
 The Linux Security Module (LSM) framework provides a mechanism for
 various security checks to be hooked by new kernel extensions. The name
 "module" is a bit of a misnomer since these extensions are not actually
 loadable kernel modules. Instead, they are selectable at build-time via
 CONFIG_DEFAULT_SECURITY and can be overridden at boot-time via the
-"security=..." kernel command line argument, in the case where multiple
+``"security=..."`` kernel command line argument, in the case where multiple
 LSMs were built into a given kernel.
 
 The primary users of the LSM interface are Mandatory Access Control
@@ -19,23 +20,22 @@
 Without a specific LSM built into the kernel, the default LSM will be the
 Linux capabilities system. Most LSMs choose to extend the capabilities
 system, building their checks on top of the defined capability hooks.
-For more details on capabilities, see capabilities(7) in the Linux
+For more details on capabilities, see ``capabilities(7)`` in the Linux
 man-pages project.
 
 A list of the active security modules can be found by reading
-/sys/kernel/security/lsm. This is a comma separated list, and
+``/sys/kernel/security/lsm``. This is a comma separated list, and
 will always include the capability module. The list reflects the
 order in which checks are made. The capability module will always
 be first, followed by any "minor" modules (e.g. Yama) and then
 the one "major" module (e.g. SELinux) if there is one configured.
 
-Based on https://lkml.org/lkml/2007/10/26/215,
-a new LSM is accepted into the kernel when its intent (a description of
-what it tries to protect against and in what cases one would expect to
-use it) has been appropriately documented in Documentation/security/.
-This allows an LSM's code to be easily compared to its goals, and so
-that end users and distros can make a more informed decision about which
-LSMs suit their requirements.
+.. toctree::
+   :maxdepth: 1
 
-For extensive documentation on the available LSM hook interfaces, please
-see include/linux/security.h.
+   apparmor
+   LoadPin
+   SELinux
+   Smack
+   tomoyo
+   Yama
diff --git a/Documentation/security/tomoyo.txt b/Documentation/admin-guide/LSM/tomoyo.rst
similarity index 85%
rename from Documentation/security/tomoyo.txt
rename to Documentation/admin-guide/LSM/tomoyo.rst
index 200a2d3..a594721 100644
--- a/Documentation/security/tomoyo.txt
+++ b/Documentation/admin-guide/LSM/tomoyo.rst
@@ -1,21 +1,30 @@
---- What is TOMOYO? ---
+======
+TOMOYO
+======
+
+What is TOMOYO?
+===============
 
 TOMOYO is a name-based MAC extension (LSM module) for the Linux kernel.
 
 LiveCD-based tutorials are available at
+
 http://tomoyo.sourceforge.jp/1.7/1st-step/ubuntu10.04-live/
-http://tomoyo.sourceforge.jp/1.7/1st-step/centos5-live/ .
+http://tomoyo.sourceforge.jp/1.7/1st-step/centos5-live/
+
 Though these tutorials use non-LSM version of TOMOYO, they are useful for you
 to know what TOMOYO is.
 
---- How to enable TOMOYO? ---
+How to enable TOMOYO?
+=====================
 
-Build the kernel with CONFIG_SECURITY_TOMOYO=y and pass "security=tomoyo" on
+Build the kernel with ``CONFIG_SECURITY_TOMOYO=y`` and pass ``security=tomoyo`` on
 kernel's command line.
 
 Please see http://tomoyo.sourceforge.jp/2.3/ for details.
 
---- Where is documentation? ---
+Where is documentation?
+=======================
 
 User <-> Kernel interface documentation is available at
 http://tomoyo.sourceforge.jp/2.3/policy-reference.html .
@@ -42,7 +51,8 @@
   Realities of Mainlining
     http://sourceforge.jp/projects/tomoyo/docs/lfj2008.pdf
 
---- What is future plan? ---
+What is future plan?
+====================
 
 We believe that inode based security and name based security are complementary
 and both should be used together. But unfortunately, so far, we cannot enable
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index b96e80f..b5343c5 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -55,12 +55,6 @@
    contains information about the problems, which may result by upgrading
    your kernel.
 
- - The Documentation/DocBook/ subdirectory contains several guides for
-   kernel developers and users.  These guides can be rendered in a
-   number of formats:  PostScript (.ps), PDF, HTML, & man-pages, among others.
-   After installation, ``make psdocs``, ``make pdfdocs``, ``make htmldocs``,
-   or ``make mandocs`` will render the documentation in the requested format.
-
 Installing the kernel source
 ----------------------------
 
diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
index c9cea2e..6b71852 100644
--- a/Documentation/admin-guide/devices.txt
+++ b/Documentation/admin-guide/devices.txt
@@ -369,8 +369,10 @@
 		237 = /dev/loop-control Loopback control device
 		238 = /dev/vhost-net	Host kernel accelerator for virtio net
 		239 = /dev/uhid		User-space I/O driver support for HID subsystem
+		240 = /dev/userio	Serio driver testing device
+		241 = /dev/vhost-vsock	Host kernel driver for virtio vsock
 
-		240-254			Reserved for local use
+		242-254			Reserved for local use
 		255			Reserved for MISC_DYNAMIC_MINOR
 
   11 char	Raw keyboard device	(Linux/SPARC only)
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 8c60a8a..5bb9161 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -61,6 +61,8 @@
    java
    ras
    pm/index
+   thunderbolt
+   LSM/index
 
 .. only::  subproject and html
 
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 15f79c2..f24ee1c 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -649,6 +649,13 @@
 			/proc/<pid>/coredump_filter.
 			See also Documentation/filesystems/proc.txt.
 
+	coresight_cpu_debug.enable
+			[ARM,ARM64]
+			Format: <bool>
+			Enable/disable the CPU sampling based debugging.
+			0: default value, disable debugging
+			1: enable debugging at boot time
+
 	cpuidle.off=1	[CPU_IDLE]
 			disable the cpuidle sub-system
 
@@ -720,7 +727,8 @@
 			See also Documentation/input/joystick-parport.txt
 
 	ddebug_query=   [KNL,DYNAMIC_DEBUG] Enable debug messages at early boot
-			time. See Documentation/dynamic-debug-howto.txt for
+			time. See
+			Documentation/admin-guide/dynamic-debug-howto.rst for
 			details.  Deprecated, see dyndbg.
 
 	debug		[KNL] Enable kernel debugging (events log level).
@@ -866,6 +874,15 @@
 
 	dscc4.setup=	[NET]
 
+	dt_cpu_ftrs=	[PPC]
+			Format: {"off" | "known"}
+			Control how the dt_cpu_ftrs device-tree binding is
+			used for CPU feature discovery and setup (if it
+			exists).
+			off: Do not use it, fall back to legacy cpu table.
+			known: Do not pass through unknown features to guests
+			or userspace, only those that the kernel is aware of.
+
 	dump_apple_properties	[X86]
 			Dump name and content of EFI device properties on
 			x86 Macs.  Useful for driver authors to determine
@@ -874,7 +891,8 @@
 	dyndbg[="val"]		[KNL,DYNAMIC_DEBUG]
 	module.dyndbg[="val"]
 			Enable debug messages at boot time.  See
-			Documentation/dynamic-debug-howto.txt for details.
+			Documentation/admin-guide/dynamic-debug-howto.rst
+			for details.
 
 	nompx		[X86] Disables Intel Memory Protection Extensions.
 			See Documentation/x86/intel_mpx.txt for more
@@ -945,6 +963,12 @@
 			must already be setup and configured. Options are not
 			yet supported.
 
+		owl,<addr>
+			Start an early, polled-mode console on a serial port
+			of an Actions Semi SoC, such as S500 or S900, at the
+			specified address. The serial port must already be
+			setup and configured. Options are not yet supported.
+
 		smh	Use ARM semihosting calls for early console.
 
 		s3c2410,<addr>
@@ -1477,12 +1501,21 @@
 			in crypto/hash_info.h.
 
 	ima_policy=	[IMA]
-			The builtin measurement policy to load during IMA
-			setup.  Specyfing "tcb" as the value, measures all
-			programs exec'd, files mmap'd for exec, and all files
-			opened with the read mode bit set by either the
-			effective uid (euid=0) or uid=0.
-			Format: "tcb"
+			The builtin policies to load during IMA setup.
+			Format: "tcb | appraise_tcb | secure_boot"
+
+			The "tcb" policy measures all programs exec'd, files
+			mmap'd for exec, and all files opened with the read
+			mode bit set by either the effective uid (euid=0) or
+			uid=0.
+
+			The "appraise_tcb" policy appraises the integrity of
+			all files owned by root. (This is the equivalent
+			of ima_appraise_tcb.)
+
+			The "secure_boot" policy appraises the integrity
+			of files (eg. kexec kernel image, kernel modules,
+			firmware, policy, etc) based on file signatures.
 
 	ima_tcb		[IMA] Deprecated.  Use ima_policy= instead.
 			Load a policy which meets the needs of the Trusted
@@ -2127,6 +2160,12 @@
 	memmap=nn[KMG]@ss[KMG]
 			[KNL] Force usage of a specific region of memory.
 			Region of memory to be used is from ss to ss+nn.
+			If @ss[KMG] is omitted, it is equivalent to mem=nn[KMG],
+			which limits max address to nn[KMG].
+			Multiple different regions can be specified,
+			comma delimited.
+			Example:
+				memmap=100M@2G,100M#3G,1G!1024G
 
 	memmap=nn[KMG]#ss[KMG]
 			[KNL,ACPI] Mark specific memory as ACPI data.
@@ -2139,6 +2178,9 @@
 			         memmap=64K$0x18690000
 			         or
 			         memmap=0x10000$0x18690000
+			Some bootloaders may need an escape character before '$',
+			like Grub2, otherwise '$' and the following number
+			will be eaten.
 
 	memmap=nn[KMG]!ss[KMG]
 			[KNL,X86] Mark specific memory as protected.
@@ -3229,21 +3271,17 @@
 
 	rcutree.gp_cleanup_delay=	[KNL]
 			Set the number of jiffies to delay each step of
-			RCU grace-period cleanup.  This only has effect
-			when CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP is set.
+			RCU grace-period cleanup.
 
 	rcutree.gp_init_delay=	[KNL]
 			Set the number of jiffies to delay each step of
-			RCU grace-period initialization.  This only has
-			effect when CONFIG_RCU_TORTURE_TEST_SLOW_INIT
-			is set.
+			RCU grace-period initialization.
 
 	rcutree.gp_preinit_delay=	[KNL]
 			Set the number of jiffies to delay each step of
 			RCU grace-period pre-initialization, that is,
 			the propagation of recent CPU-hotplug changes up
-			the rcu_node combining tree.  This only has effect
-			when CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT is set.
+			the rcu_node combining tree.
 
 	rcutree.rcu_fanout_exact= [KNL]
 			Disable autobalancing of the rcu_node combining
@@ -3319,6 +3357,17 @@
 			This wake_up() will be accompanied by a
 			WARN_ONCE() splat and an ftrace_dump().
 
+	rcuperf.gp_async= [KNL]
+			Measure performance of asynchronous
+			grace-period primitives such as call_rcu().
+
+	rcuperf.gp_async_max= [KNL]
+			Specify the maximum number of outstanding
+			callbacks per writer thread.  When a writer
+			thread exceeds this limit, it invokes the
+			corresponding flavor of rcu_barrier() to allow
+			previously posted callbacks to drain.
+
 	rcuperf.gp_exp= [KNL]
 			Measure performance of expedited synchronous
 			grace-period primitives.
@@ -3346,17 +3395,22 @@
 	rcuperf.perf_runnable= [BOOT]
 			Start rcuperf running at boot time.
 
+	rcuperf.perf_type= [KNL]
+			Specify the RCU implementation to test.
+
 	rcuperf.shutdown= [KNL]
 			Shut the system down after performance tests
 			complete.  This is useful for hands-off automated
 			testing.
 
-	rcuperf.perf_type= [KNL]
-			Specify the RCU implementation to test.
-
 	rcuperf.verbose= [KNL]
 			Enable additional printk() statements.
 
+	rcuperf.writer_holdoff= [KNL]
+			Write-side holdoff between grace periods,
+			in microseconds.  The default of zero says
+			no holdoff.
+
 	rcutorture.cbflood_inter_holdoff= [KNL]
 			Set holdoff time (jiffies) between successive
 			callback-flood tests.
@@ -3794,6 +3848,15 @@
 	spia_pedr=
 	spia_peddr=
 
+	srcutree.counter_wrap_check [KNL]
+			Specifies how frequently to check for
+			grace-period sequence counter wrap for the
+			srcu_data structure's ->srcu_gp_seq_needed field.
+			The greater the number of bits set in this kernel
+			parameter, the less frequently counter wrap will
+			be checked for.  Note that the bottom two bits
+			are ignored.
+
 	srcutree.exp_holdoff [KNL]
 			Specifies how many nanoseconds must elapse
 			since the end of the last SRCU grace period for
@@ -3802,6 +3865,13 @@
 			expediting.  Set to zero to disable automatic
 			expediting.
 
+	stack_guard_gap=	[MM]
+			override the default stack gap protection. The value
+			is in page units and it defines how many pages prior
+			to (for stacks growing down) resp. after (for stacks
+			growing up) the main stack are reserved for no other
+			mapping. Default value is 256 pages.
+
 	stacktrace	[FTRACE]
 			Enabled the stack tracer on boot up.
 
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 09aa2e9..463cf7e 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -269,16 +269,16 @@
 ``scaling_cur_freq``
 	Current frequency of all of the CPUs belonging to this policy (in kHz).
 
-	For the majority of scaling drivers, this is the frequency of the last
-	P-state requested by the driver from the hardware using the scaling
+	In the majority of cases, this is the frequency of the last P-state
+	requested by the scaling driver from the hardware using the scaling
 	interface provided by it, which may or may not reflect the frequency
 	the CPU is actually running at (due to hardware design and other
 	limitations).
 
-	Some scaling drivers (e.g. |intel_pstate|) attempt to provide
-	information more precisely reflecting the current CPU frequency through
-	this attribute, but that still may not be the exact current CPU
-	frequency as seen by the hardware at the moment.
+	Some architectures (e.g. ``x86``) may attempt to provide information
+	more precisely reflecting the current CPU frequency through this
+	attribute, but that still may not be the exact current CPU frequency as
+	seen by the hardware at the moment.
 
 ``scaling_driver``
 	The scaling driver currently in use.
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index 33d7039..1d62498 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -157,10 +157,8 @@
 the processor model and platform configuration.
 
 It selects the maximum P-state it is allowed to use, subject to limits set via
-``sysfs``, every time the P-state selection computations are carried out by the
-driver's utilization update callback for the given CPU (that does not happen
-more often than every 10 ms), but the hardware configuration will not be changed
-if the new P-state is the same as the current one.
+``sysfs``, every time the driver configuration for the given CPU is updated
+(e.g. via ``sysfs``).
 
 This is the default P-state selection algorithm if the
 :c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option
diff --git a/Documentation/admin-guide/ras.rst b/Documentation/admin-guide/ras.rst
index 8c7bbf2..1978967 100644
--- a/Documentation/admin-guide/ras.rst
+++ b/Documentation/admin-guide/ras.rst
@@ -344,9 +344,9 @@
 controllers. The following example will assume 2 channels:
 
 	+------------+-----------------------+
-	| Chip       |       Channels        |
-	| Select     +-----------+-----------+
-	| rows       |  ``ch0``  |  ``ch1``  |
+	| CS Rows    |       Channels        |
+	+------------+-----------+-----------+
+	|            |  ``ch0``  |  ``ch1``  |
 	+============+===========+===========+
 	| ``csrow0`` |  DIMM_A0  |  DIMM_B0  |
 	+------------+           |           |
@@ -698,7 +698,7 @@
 The structure of the message is:
 
 	+---------------------------------------+-------------+
-	| Content                               + Example     |
+	| Content                               | Example     |
 	+=======================================+=============+
 	| The memory controller                 | MC0         |
 	+---------------------------------------+-------------+
@@ -713,7 +713,7 @@
 	+---------------------------------------+-------------+
 	| The error syndrome                    | 0xb741      |
 	+---------------------------------------+-------------+
-	| Memory row                            | row 0       +
+	| Memory row                            | row 0       |
 	+---------------------------------------+-------------+
 	| Memory channel                        | channel 1   |
 	+---------------------------------------+-------------+
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
new file mode 100644
index 0000000..6a4cd1f
--- /dev/null
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -0,0 +1,199 @@
+=============
+ Thunderbolt
+=============
+The interface presented here is not meant for end users. Instead there
+should be a userspace tool that handles all the low-level details, keeps
+database of the authorized devices and prompts user for new connections.
+
+More details about the sysfs interface for Thunderbolt devices can be
+found in ``Documentation/ABI/testing/sysfs-bus-thunderbolt``.
+
+Those users who just want to connect any device without any sort of
+manual work, can add following line to
+``/etc/udev/rules.d/99-local.rules``::
+
+  ACTION=="add", SUBSYSTEM=="thunderbolt", ATTR{authorized}=="0", ATTR{authorized}="1"
+
+This will authorize all devices automatically when they appear. However,
+keep in mind that this bypasses the security levels and makes the system
+vulnerable to DMA attacks.
+
+Security levels and how to use them
+-----------------------------------
+Starting from Intel Falcon Ridge Thunderbolt controller there are 4
+security levels available. The reason for these is the fact that the
+connected devices can be DMA masters and thus read contents of the host
+memory without CPU and OS knowing about it. There are ways to prevent
+this by setting up an IOMMU but it is not always available for various
+reasons.
+
+The security levels are as follows:
+
+  none
+    All devices are automatically connected by the firmware. No user
+    approval is needed. In BIOS settings this is typically called
+    *Legacy mode*.
+
+  user
+    User is asked whether the device is allowed to be connected.
+    Based on the device identification information available through
+    ``/sys/bus/thunderbolt/devices``. user then can do the decision.
+    In BIOS settings this is typically called *Unique ID*.
+
+  secure
+    User is asked whether the device is allowed to be connected. In
+    addition to UUID the device (if it supports secure connect) is sent
+    a challenge that should match the expected one based on a random key
+    written to ``key`` sysfs attribute. In BIOS settings this is
+    typically called *One time saved key*.
+
+  dponly
+    The firmware automatically creates tunnels for Display Port and
+    USB. No PCIe tunneling is done. In BIOS settings this is
+    typically called *Display Port Only*.
+
+The current security level can be read from
+``/sys/bus/thunderbolt/devices/domainX/security`` where ``domainX`` is
+the Thunderbolt domain the host controller manages. There is typically
+one domain per Thunderbolt host controller.
+
+If the security level reads as ``user`` or ``secure`` the connected
+device must be authorized by the user before PCIe tunnels are created
+(e.g the PCIe device appears).
+
+Each Thunderbolt device plugged in will appear in sysfs under
+``/sys/bus/thunderbolt/devices``. The device directory carries
+information that can be used to identify the particular device,
+including its name and UUID.
+
+Authorizing devices when security level is ``user`` or ``secure``
+-----------------------------------------------------------------
+When a device is plugged in it will appear in sysfs as follows::
+
+  /sys/bus/thunderbolt/devices/0-1/authorized	- 0
+  /sys/bus/thunderbolt/devices/0-1/device	- 0x8004
+  /sys/bus/thunderbolt/devices/0-1/device_name	- Thunderbolt to FireWire Adapter
+  /sys/bus/thunderbolt/devices/0-1/vendor	- 0x1
+  /sys/bus/thunderbolt/devices/0-1/vendor_name	- Apple, Inc.
+  /sys/bus/thunderbolt/devices/0-1/unique_id	- e0376f00-0300-0100-ffff-ffffffffffff
+
+The ``authorized`` attribute reads 0 which means no PCIe tunnels are
+created yet. The user can authorize the device by simply::
+
+  # echo 1 > /sys/bus/thunderbolt/devices/0-1/authorized
+
+This will create the PCIe tunnels and the device is now connected.
+
+If the device supports secure connect, and the domain security level is
+set to ``secure``, it has an additional attribute ``key`` which can hold
+a random 32 byte value used for authorization and challenging the device in
+future connects::
+
+  /sys/bus/thunderbolt/devices/0-3/authorized	- 0
+  /sys/bus/thunderbolt/devices/0-3/device	- 0x305
+  /sys/bus/thunderbolt/devices/0-3/device_name	- AKiTiO Thunder3 PCIe Box
+  /sys/bus/thunderbolt/devices/0-3/key		-
+  /sys/bus/thunderbolt/devices/0-3/vendor	- 0x41
+  /sys/bus/thunderbolt/devices/0-3/vendor_name	- inXtron
+  /sys/bus/thunderbolt/devices/0-3/unique_id	- dc010000-0000-8508-a22d-32ca6421cb16
+
+Notice the key is empty by default.
+
+If the user does not want to use secure connect it can just ``echo 1``
+to the ``authorized`` attribute and the PCIe tunnels will be created in
+the same way than in ``user`` security level.
+
+If the user wants to use secure connect, the first time the device is
+plugged a key needs to be created and send to the device::
+
+  # key=$(openssl rand -hex 32)
+  # echo $key > /sys/bus/thunderbolt/devices/0-3/key
+  # echo 1 > /sys/bus/thunderbolt/devices/0-3/authorized
+
+Now the device is connected (PCIe tunnels are created) and in addition
+the key is stored on the device NVM.
+
+Next time the device is plugged in the user can verify (challenge) the
+device using the same key::
+
+  # echo $key > /sys/bus/thunderbolt/devices/0-3/key
+  # echo 2 > /sys/bus/thunderbolt/devices/0-3/authorized
+
+If the challenge the device returns back matches the one we expect based
+on the key, the device is connected and the PCIe tunnels are created.
+However, if the challenge failed no tunnels are created and error is
+returned to the user.
+
+If the user still wants to connect the device it can either approve
+the device without a key or write new key and write 1 to the
+``authorized`` file to get the new key stored on the device NVM.
+
+Upgrading NVM on Thunderbolt device or host
+-------------------------------------------
+Since most of the functionality is handled in a firmware running on a
+host controller or a device, it is important that the firmware can be
+upgraded to the latest where possible bugs in it have been fixed.
+Typically OEMs provide this firmware from their support site.
+
+There is also a central site which has links where to download firmwares
+for some machines:
+
+  `Thunderbolt Updates <https://thunderbolttechnology.net/updates>`_
+
+Before you upgrade firmware on a device or host, please make sure it is
+the suitable. Failing to do that may render the device (or host) in a
+state where it cannot be used properly anymore without special tools!
+
+Host NVM upgrade on Apple Macs is not supported.
+
+Once the NVM image has been downloaded, you need to plug in a
+Thunderbolt device so that the host controller appears. It does not
+matter which device is connected (unless you are upgrading NVM on a
+device - then you need to connect that particular device).
+
+Note OEM-specific method to power the controller up ("force power") may
+be available for your system in which case there is no need to plug in a
+Thunderbolt device.
+
+After that we can write the firmware to the non-active parts of the NVM
+of the host or device. As an example here is how Intel NUC6i7KYK (Skull
+Canyon) Thunderbolt controller NVM is upgraded::
+
+  # dd if=KYK_TBT_FW_0018.bin of=/sys/bus/thunderbolt/devices/0-0/nvm_non_active0/nvmem
+
+Once the operation completes we can trigger NVM authentication and
+upgrade process as follows::
+
+  # echo 1 > /sys/bus/thunderbolt/devices/0-0/nvm_authenticate
+
+If no errors are returned, the host controller shortly disappears. Once
+it comes back the driver notices it and initiates a full power cycle.
+After a while the host controller appears again and this time it should
+be fully functional.
+
+We can verify that the new NVM firmware is active by running following
+commands::
+
+  # cat /sys/bus/thunderbolt/devices/0-0/nvm_authenticate
+  0x0
+  # cat /sys/bus/thunderbolt/devices/0-0/nvm_version
+  18.0
+
+If ``nvm_authenticate`` contains anything else than 0x0 it is the error
+code from the last authentication cycle, which means the authentication
+of the NVM image failed.
+
+Note names of the NVMem devices ``nvm_activeN`` and ``nvm_non_activeN``
+depends on the order they are registered in the NVMem subsystem. N in
+the name is the identifier added by the NVMem subsystem.
+
+Upgrading NVM when host controller is in safe mode
+--------------------------------------------------
+If the existing NVM is not properly authenticated (or is missing) the
+host controller goes into safe mode which means that only available
+functionality is flashing new NVM image. When in this mode the reading
+``nvm_version`` fails with ``ENODATA`` and the device identification
+information is missing.
+
+To recover from this mode, one needs to flash a valid NVM image to the
+host host controller in the same way it is done in the previous chapter.
diff --git a/Documentation/arm/Atmel/README b/Documentation/arm/Atmel/README
index 6ca78f8..afb13c1 100644
--- a/Documentation/arm/Atmel/README
+++ b/Documentation/arm/Atmel/README
@@ -16,7 +16,7 @@
 
 AT91 SoCs
 ---------
-Documentation and detailled datasheet for each product are available on
+Documentation and detailed datasheet for each product are available on
 the Atmel website: http://www.atmel.com.
 
   Flavors:
@@ -101,6 +101,42 @@
         + Datasheet
           http://www.atmel.com/Images/Atmel-11267-32-bit-Cortex-A5-Microcontroller-SAMA5D2_Datasheet.pdf
 
+    * ARM Cortex-M7 MCUs
+      - sams70 family
+        - sams70j19
+        - sams70j20
+        - sams70j21
+        - sams70n19
+        - sams70n20
+        - sams70n21
+        - sams70q19
+        - sams70q20
+        - sams70q21
+        + Datasheet
+          http://www.atmel.com/Images/Atmel-11242-32-bit-Cortex-M7-Microcontroller-SAM-S70Q-SAM-S70N-SAM-S70J_Datasheet.pdf
+
+      - samv70 family
+        - samv70j19
+        - samv70j20
+        - samv70n19
+        - samv70n20
+        - samv70q19
+        - samv70q20
+        + Datasheet
+          http://www.atmel.com/Images/Atmel-11297-32-bit-Cortex-M7-Microcontroller-SAM-V70Q-SAM-V70N-SAM-V70J_Datasheet.pdf
+
+      - samv71 family
+        - samv71j19
+        - samv71j20
+        - samv71j21
+        - samv71n19
+        - samv71n20
+        - samv71n21
+        - samv71q19
+        - samv71q20
+        - samv71q21
+        + Datasheet
+          http://www.atmel.com/Images/Atmel-44003-32-bit-Cortex-M7-Microcontroller-SAM-V71Q-SAM-V71N-SAM-V71J_Datasheet.pdf
 
 Linux kernel information
 ------------------------
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 01ddeaf..9490f28 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -632,7 +632,7 @@
 i/o is issued (since the bio may otherwise get freed in case i/o completion
 happens in the meantime).
 
-The bio_clone() routine may be used to duplicate a bio, where the clone
+The bio_clone_fast() routine may be used to duplicate a bio, where the clone
 shares the bio_vec_list with the original bio (i.e. both point to the
 same bio_vec_list). This would typically be used for splitting i/o requests
 in lvm or md.
diff --git a/Documentation/conf.py b/Documentation/conf.py
index bacf9d3..71b032b 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -271,8 +271,7 @@
 
 # Additional stuff for the LaTeX preamble.
     'preamble': '''
-	% Adjust margins
-	\\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}
+        \\usepackage{ifthen}
 
         % Allow generate some pages in landscape
         \\usepackage{lscape}
@@ -281,6 +280,7 @@
 	\\definecolor{NoteColor}{RGB}{204,255,255}
 	\\definecolor{WarningColor}{RGB}{255,204,204}
 	\\definecolor{AttentionColor}{RGB}{255,255,204}
+	\\definecolor{ImportantColor}{RGB}{192,255,204}
 	\\definecolor{OtherColor}{RGB}{204,204,204}
         \\newlength{\\mynoticelength}
         \\makeatletter\\newenvironment{coloredbox}[1]{%
@@ -301,7 +301,12 @@
 	            \\ifthenelse%
 	            {\\equal{\\py@noticetype}{attention}}%
 	            {\\colorbox{AttentionColor}{\\usebox{\\@tempboxa}}}%
-	            {\\colorbox{OtherColor}{\\usebox{\\@tempboxa}}}%
+		    {%
+	               \\ifthenelse%
+	               {\\equal{\\py@noticetype}{important}}%
+	               {\\colorbox{ImportantColor}{\\usebox{\\@tempboxa}}}%
+	               {\\colorbox{OtherColor}{\\usebox{\\@tempboxa}}}%
+		    }%
 		 }%
 	      }%
         }\\makeatother
@@ -336,30 +341,51 @@
 if major == 1 and minor > 3:
     latex_elements['preamble']  += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
 
+if major == 1 and minor <= 4:
+    latex_elements['preamble']  += '\\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}'
+elif major == 1 and (minor > 5 or (minor == 5 and patch >= 3)):
+    latex_elements['sphinxsetup'] = 'hmargin=0.5in, vmargin=0.5in'
+
+
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title,
 #  author, documentclass [howto, manual, or own class]).
+# Sorted in alphabetical order
 latex_documents = [
-    ('doc-guide/index', 'kernel-doc-guide.tex', 'Linux Kernel Documentation Guide',
-     'The kernel development community', 'manual'),
     ('admin-guide/index', 'linux-user.tex', 'Linux Kernel User Documentation',
      'The kernel development community', 'manual'),
     ('core-api/index', 'core-api.tex', 'The kernel core API manual',
      'The kernel development community', 'manual'),
+    ('crypto/index', 'crypto-api.tex', 'Linux Kernel Crypto API manual',
+     'The kernel development community', 'manual'),
+    ('dev-tools/index', 'dev-tools.tex', 'Development tools for the Kernel',
+     'The kernel development community', 'manual'),
+    ('doc-guide/index', 'kernel-doc-guide.tex', 'Linux Kernel Documentation Guide',
+     'The kernel development community', 'manual'),
     ('driver-api/index', 'driver-api.tex', 'The kernel driver API manual',
      'The kernel development community', 'manual'),
-    ('input/index', 'linux-input.tex', 'The Linux input driver subsystem',
-     'The kernel development community', 'manual'),
-    ('kernel-documentation', 'kernel-documentation.tex', 'The Linux Kernel Documentation',
-     'The kernel development community', 'manual'),
-    ('process/index', 'development-process.tex', 'Linux Kernel Development Documentation',
+    ('filesystems/index', 'filesystems.tex', 'Linux Filesystems API',
      'The kernel development community', 'manual'),
     ('gpu/index', 'gpu.tex', 'Linux GPU Driver Developer\'s Guide',
      'The kernel development community', 'manual'),
+    ('input/index', 'linux-input.tex', 'The Linux input driver subsystem',
+     'The kernel development community', 'manual'),
+    ('kernel-hacking/index', 'kernel-hacking.tex', 'Unreliable Guide To Hacking The Linux Kernel',
+     'The kernel development community', 'manual'),
     ('media/index', 'media.tex', 'Linux Media Subsystem Documentation',
      'The kernel development community', 'manual'),
+    ('networking/index', 'networking.tex', 'Linux Networking Documentation',
+     'The kernel development community', 'manual'),
+    ('process/index', 'development-process.tex', 'Linux Kernel Development Documentation',
+     'The kernel development community', 'manual'),
     ('security/index', 'security.tex', 'The kernel security subsystem manual',
      'The kernel development community', 'manual'),
+    ('sh/index', 'sh.tex', 'SuperH architecture implementation manual',
+     'The kernel development community', 'manual'),
+    ('sound/index', 'sound.tex', 'Linux Sound Subsystem Documentation',
+     'The kernel development community', 'manual'),
+    ('userspace-api/index', 'userspace-api.tex', 'The Linux kernel user-space API guide',
+     'The kernel development community', 'manual'),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
diff --git a/Documentation/core-api/assoc_array.rst b/Documentation/core-api/assoc_array.rst
index d83cfff..8231b91 100644
--- a/Documentation/core-api/assoc_array.rst
+++ b/Documentation/core-api/assoc_array.rst
@@ -10,7 +10,10 @@
 
 1. Objects are opaque pointers.  The implementation does not care where they
    point (if anywhere) or what they point to (if anything).
-.. note:: Pointers to objects _must_ be zero in the least significant bit.
+
+   .. note::
+
+      Pointers to objects _must_ be zero in the least significant bit.
 
 2. Objects do not need to contain linkage blocks for use by the array.  This
    permits an object to be located in multiple arrays simultaneously.
diff --git a/Documentation/core-api/atomic_ops.rst b/Documentation/core-api/atomic_ops.rst
index 55e43f1..fce9291 100644
--- a/Documentation/core-api/atomic_ops.rst
+++ b/Documentation/core-api/atomic_ops.rst
@@ -303,6 +303,11 @@
 	void smp_mb__before_atomic(void);
 	void smp_mb__after_atomic(void);
 
+Preceding a non-value-returning read-modify-write atomic operation with
+smp_mb__before_atomic() and following it with smp_mb__after_atomic()
+provides the same full ordering that is provided by value-returning
+read-modify-write atomic operations.
+
 For example, smp_mb__before_atomic() can be used like so::
 
 	obj->dead = 1;
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 62abd36..0606be3 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -19,6 +19,7 @@
    workqueue
    genericirq
    flexible-arrays
+   librs
 
 Interfaces for kernel debugging
 ===============================
diff --git a/Documentation/core-api/librs.rst b/Documentation/core-api/librs.rst
new file mode 100644
index 0000000..6010f5b
--- /dev/null
+++ b/Documentation/core-api/librs.rst
@@ -0,0 +1,212 @@
+==========================================
+Reed-Solomon Library Programming Interface
+==========================================
+
+:Author: Thomas Gleixner
+
+Introduction
+============
+
+The generic Reed-Solomon Library provides encoding, decoding and error
+correction functions.
+
+Reed-Solomon codes are used in communication and storage applications to
+ensure data integrity.
+
+This documentation is provided for developers who want to utilize the
+functions provided by the library.
+
+Known Bugs And Assumptions
+==========================
+
+None.
+
+Usage
+=====
+
+This chapter provides examples of how to use the library.
+
+Initializing
+------------
+
+The init function init_rs returns a pointer to an rs decoder structure,
+which holds the necessary information for encoding, decoding and error
+correction with the given polynomial. It either uses an existing
+matching decoder or creates a new one. On creation all the lookup tables
+for fast en/decoding are created. The function may take a while, so make
+sure not to call it in critical code paths.
+
+::
+
+    /* the Reed Solomon control structure */
+    static struct rs_control *rs_decoder;
+
+    /* Symbolsize is 10 (bits)
+     * Primitive polynomial is x^10+x^3+1
+     * first consecutive root is 0
+     * primitive element to generate roots = 1
+     * generator polynomial degree (number of roots) = 6
+     */
+    rs_decoder = init_rs (10, 0x409, 0, 1, 6);
+
+
+Encoding
+--------
+
+The encoder calculates the Reed-Solomon code over the given data length
+and stores the result in the parity buffer. Note that the parity buffer
+must be initialized before calling the encoder.
+
+The expanded data can be inverted on the fly by providing a non-zero
+inversion mask. The expanded data is XOR'ed with the mask. This is used
+e.g. for FLASH ECC, where the all 0xFF is inverted to an all 0x00. The
+Reed-Solomon code for all 0x00 is all 0x00. The code is inverted before
+storing to FLASH so it is 0xFF too. This prevents that reading from an
+erased FLASH results in ECC errors.
+
+The databytes are expanded to the given symbol size on the fly. There is
+no support for encoding continuous bitstreams with a symbol size != 8 at
+the moment. If it is necessary it should be not a big deal to implement
+such functionality.
+
+::
+
+    /* Parity buffer. Size = number of roots */
+    uint16_t par[6];
+    /* Initialize the parity buffer */
+    memset(par, 0, sizeof(par));
+    /* Encode 512 byte in data8. Store parity in buffer par */
+    encode_rs8 (rs_decoder, data8, 512, par, 0);
+
+
+Decoding
+--------
+
+The decoder calculates the syndrome over the given data length and the
+received parity symbols and corrects errors in the data.
+
+If a syndrome is available from a hardware decoder then the syndrome
+calculation is skipped.
+
+The correction of the data buffer can be suppressed by providing a
+correction pattern buffer and an error location buffer to the decoder.
+The decoder stores the calculated error location and the correction
+bitmask in the given buffers. This is useful for hardware decoders which
+use a weird bit ordering scheme.
+
+The databytes are expanded to the given symbol size on the fly. There is
+no support for decoding continuous bitstreams with a symbolsize != 8 at
+the moment. If it is necessary it should be not a big deal to implement
+such functionality.
+
+Decoding with syndrome calculation, direct data correction
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    /* Parity buffer. Size = number of roots */
+    uint16_t par[6];
+    uint8_t  data[512];
+    int numerr;
+    /* Receive data */
+    .....
+    /* Receive parity */
+    .....
+    /* Decode 512 byte in data8.*/
+    numerr = decode_rs8 (rs_decoder, data8, par, 512, NULL, 0, NULL, 0, NULL);
+
+
+Decoding with syndrome given by hardware decoder, direct data correction
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    /* Parity buffer. Size = number of roots */
+    uint16_t par[6], syn[6];
+    uint8_t  data[512];
+    int numerr;
+    /* Receive data */
+    .....
+    /* Receive parity */
+    .....
+    /* Get syndrome from hardware decoder */
+    .....
+    /* Decode 512 byte in data8.*/
+    numerr = decode_rs8 (rs_decoder, data8, par, 512, syn, 0, NULL, 0, NULL);
+
+
+Decoding with syndrome given by hardware decoder, no direct data correction.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Note: It's not necessary to give data and received parity to the
+decoder.
+
+::
+
+    /* Parity buffer. Size = number of roots */
+    uint16_t par[6], syn[6], corr[8];
+    uint8_t  data[512];
+    int numerr, errpos[8];
+    /* Receive data */
+    .....
+    /* Receive parity */
+    .....
+    /* Get syndrome from hardware decoder */
+    .....
+    /* Decode 512 byte in data8.*/
+    numerr = decode_rs8 (rs_decoder, NULL, NULL, 512, syn, 0, errpos, 0, corr);
+    for (i = 0; i < numerr; i++) {
+        do_error_correction_in_your_buffer(errpos[i], corr[i]);
+    }
+
+
+Cleanup
+-------
+
+The function free_rs frees the allocated resources, if the caller is
+the last user of the decoder.
+
+::
+
+    /* Release resources */
+    free_rs(rs_decoder);
+
+
+Structures
+==========
+
+This chapter contains the autogenerated documentation of the structures
+which are used in the Reed-Solomon Library and are relevant for a
+developer.
+
+.. kernel-doc:: include/linux/rslib.h
+   :internal:
+
+Public Functions Provided
+=========================
+
+This chapter contains the autogenerated documentation of the
+Reed-Solomon functions which are exported.
+
+.. kernel-doc:: lib/reed_solomon/reed_solomon.c
+   :export:
+
+Credits
+=======
+
+The library code for encoding and decoding was written by Phil Karn.
+
+::
+
+            Copyright 2002, Phil Karn, KA9Q
+            May be used under the terms of the GNU General Public License (GPL)
+
+
+The wrapper functions and interfaces are written by Thomas Gleixner.
+
+Many users have provided bugfixes, improvements and helping hands for
+testing. Thanks a lot.
+
+The following people have contributed to this document:
+
+Thomas Gleixner\ tglx@linutronix.de
diff --git a/Documentation/crypto/asymmetric-keys.txt b/Documentation/crypto/asymmetric-keys.txt
index 5ad6480..b82b6ad 100644
--- a/Documentation/crypto/asymmetric-keys.txt
+++ b/Documentation/crypto/asymmetric-keys.txt
@@ -265,7 +265,7 @@
 
      The caller passes a pointer to the following struct with all of the fields
      cleared, except for data, datalen and quotalen [see
-     Documentation/security/keys.txt].
+     Documentation/security/keys/core.rst].
 
 	struct key_preparsed_payload {
 		char		*description;
diff --git a/Documentation/crypto/conf.py b/Documentation/crypto/conf.py
new file mode 100644
index 0000000..4335d25
--- /dev/null
+++ b/Documentation/crypto/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = 'Linux Kernel Crypto API'
+
+tags.add("subproject")
+
+latex_documents = [
+    ('index', 'crypto-api.tex', 'Linux Kernel Crypto API manual',
+     'The kernel development community', 'manual'),
+]
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index 07d8811..4ac991d 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -23,6 +23,7 @@
    kmemleak
    kmemcheck
    gdb-kernel-debugging
+   kgdb
 
 
 .. only::  subproject and html
diff --git a/Documentation/dev-tools/kgdb.rst b/Documentation/dev-tools/kgdb.rst
new file mode 100644
index 0000000..7527320
--- /dev/null
+++ b/Documentation/dev-tools/kgdb.rst
@@ -0,0 +1,907 @@
+=================================================
+Using kgdb, kdb and the kernel debugger internals
+=================================================
+
+:Author: Jason Wessel
+
+Introduction
+============
+
+The kernel has two different debugger front ends (kdb and kgdb) which
+interface to the debug core. It is possible to use either of the
+debugger front ends and dynamically transition between them if you
+configure the kernel properly at compile and runtime.
+
+Kdb is simplistic shell-style interface which you can use on a system
+console with a keyboard or serial console. You can use it to inspect
+memory, registers, process lists, dmesg, and even set breakpoints to
+stop in a certain location. Kdb is not a source level debugger, although
+you can set breakpoints and execute some basic kernel run control. Kdb
+is mainly aimed at doing some analysis to aid in development or
+diagnosing kernel problems. You can access some symbols by name in
+kernel built-ins or in kernel modules if the code was built with
+``CONFIG_KALLSYMS``.
+
+Kgdb is intended to be used as a source level debugger for the Linux
+kernel. It is used along with gdb to debug a Linux kernel. The
+expectation is that gdb can be used to "break in" to the kernel to
+inspect memory, variables and look through call stack information
+similar to the way an application developer would use gdb to debug an
+application. It is possible to place breakpoints in kernel code and
+perform some limited execution stepping.
+
+Two machines are required for using kgdb. One of these machines is a
+development machine and the other is the target machine. The kernel to
+be debugged runs on the target machine. The development machine runs an
+instance of gdb against the vmlinux file which contains the symbols (not
+a boot image such as bzImage, zImage, uImage...). In gdb the developer
+specifies the connection parameters and connects to kgdb. The type of
+connection a developer makes with gdb depends on the availability of
+kgdb I/O modules compiled as built-ins or loadable kernel modules in the
+test machine's kernel.
+
+Compiling a kernel
+==================
+
+-  In order to enable compilation of kdb, you must first enable kgdb.
+
+-  The kgdb test compile options are described in the kgdb test suite
+   chapter.
+
+Kernel config options for kgdb
+------------------------------
+
+To enable ``CONFIG_KGDB`` you should look under
+:menuselection:`Kernel hacking --> Kernel debugging` and select
+:menuselection:`KGDB: kernel debugger`.
+
+While it is not a hard requirement that you have symbols in your vmlinux
+file, gdb tends not to be very useful without the symbolic data, so you
+will want to turn on ``CONFIG_DEBUG_INFO`` which is called
+:menuselection:`Compile the kernel with debug info` in the config menu.
+
+It is advised, but not required, that you turn on the
+``CONFIG_FRAME_POINTER`` kernel option which is called :menuselection:`Compile
+the kernel with frame pointers` in the config menu. This option inserts code
+to into the compiled executable which saves the frame information in
+registers or on the stack at different points which allows a debugger
+such as gdb to more accurately construct stack back traces while
+debugging the kernel.
+
+If the architecture that you are using supports the kernel option
+``CONFIG_STRICT_KERNEL_RWX``, you should consider turning it off. This
+option will prevent the use of software breakpoints because it marks
+certain regions of the kernel's memory space as read-only. If kgdb
+supports it for the architecture you are using, you can use hardware
+breakpoints if you desire to run with the ``CONFIG_STRICT_KERNEL_RWX``
+option turned on, else you need to turn off this option.
+
+Next you should choose one of more I/O drivers to interconnect debugging
+host and debugged target. Early boot debugging requires a KGDB I/O
+driver that supports early debugging and the driver must be built into
+the kernel directly. Kgdb I/O driver configuration takes place via
+kernel or module parameters which you can learn more about in the in the
+section that describes the parameter kgdboc.
+
+Here is an example set of ``.config`` symbols to enable or disable for kgdb::
+
+  # CONFIG_STRICT_KERNEL_RWX is not set
+  CONFIG_FRAME_POINTER=y
+  CONFIG_KGDB=y
+  CONFIG_KGDB_SERIAL_CONSOLE=y
+
+Kernel config options for kdb
+-----------------------------
+
+Kdb is quite a bit more complex than the simple gdbstub sitting on top
+of the kernel's debug core. Kdb must implement a shell, and also adds
+some helper functions in other parts of the kernel, responsible for
+printing out interesting data such as what you would see if you ran
+``lsmod``, or ``ps``. In order to build kdb into the kernel you follow the
+same steps as you would for kgdb.
+
+The main config option for kdb is ``CONFIG_KGDB_KDB`` which is called
+:menuselection:`KGDB_KDB: include kdb frontend for kgdb` in the config menu.
+In theory you would have already also selected an I/O driver such as the
+``CONFIG_KGDB_SERIAL_CONSOLE`` interface if you plan on using kdb on a
+serial port, when you were configuring kgdb.
+
+If you want to use a PS/2-style keyboard with kdb, you would select
+``CONFIG_KDB_KEYBOARD`` which is called :menuselection:`KGDB_KDB: keyboard as
+input device` in the config menu. The ``CONFIG_KDB_KEYBOARD`` option is not
+used for anything in the gdb interface to kgdb. The ``CONFIG_KDB_KEYBOARD``
+option only works with kdb.
+
+Here is an example set of ``.config`` symbols to enable/disable kdb::
+
+  # CONFIG_STRICT_KERNEL_RWX is not set
+  CONFIG_FRAME_POINTER=y
+  CONFIG_KGDB=y
+  CONFIG_KGDB_SERIAL_CONSOLE=y
+  CONFIG_KGDB_KDB=y
+  CONFIG_KDB_KEYBOARD=y
+
+Kernel Debugger Boot Arguments
+==============================
+
+This section describes the various runtime kernel parameters that affect
+the configuration of the kernel debugger. The following chapter covers
+using kdb and kgdb as well as providing some examples of the
+configuration parameters.
+
+Kernel parameter: kgdboc
+------------------------
+
+The kgdboc driver was originally an abbreviation meant to stand for
+"kgdb over console". Today it is the primary mechanism to configure how
+to communicate from gdb to kgdb as well as the devices you want to use
+to interact with the kdb shell.
+
+For kgdb/gdb, kgdboc is designed to work with a single serial port. It
+is intended to cover the circumstance where you want to use a serial
+console as your primary console as well as using it to perform kernel
+debugging. It is also possible to use kgdb on a serial port which is not
+designated as a system console. Kgdboc may be configured as a kernel
+built-in or a kernel loadable module. You can only make use of
+``kgdbwait`` and early debugging if you build kgdboc into the kernel as
+a built-in.
+
+Optionally you can elect to activate kms (Kernel Mode Setting)
+integration. When you use kms with kgdboc and you have a video driver
+that has atomic mode setting hooks, it is possible to enter the debugger
+on the graphics console. When the kernel execution is resumed, the
+previous graphics mode will be restored. This integration can serve as a
+useful tool to aid in diagnosing crashes or doing analysis of memory
+with kdb while allowing the full graphics console applications to run.
+
+kgdboc arguments
+~~~~~~~~~~~~~~~~
+
+Usage::
+
+	kgdboc=[kms][[,]kbd][[,]serial_device][,baud]
+
+The order listed above must be observed if you use any of the optional
+configurations together.
+
+Abbreviations:
+
+-  kms = Kernel Mode Setting
+
+-  kbd = Keyboard
+
+You can configure kgdboc to use the keyboard, and/or a serial device
+depending on if you are using kdb and/or kgdb, in one of the following
+scenarios. The order listed above must be observed if you use any of the
+optional configurations together. Using kms + only gdb is generally not
+a useful combination.
+
+Using loadable module or built-in
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+1. As a kernel built-in:
+
+   Use the kernel boot argument::
+
+	kgdboc=<tty-device>,[baud]
+
+2. As a kernel loadable module:
+
+   Use the command::
+
+	modprobe kgdboc kgdboc=<tty-device>,[baud]
+
+   Here are two examples of how you might format the kgdboc string. The
+   first is for an x86 target using the first serial port. The second
+   example is for the ARM Versatile AB using the second serial port.
+
+   1. ``kgdboc=ttyS0,115200``
+
+   2. ``kgdboc=ttyAMA1,115200``
+
+Configure kgdboc at runtime with sysfs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+At run time you can enable or disable kgdboc by echoing a parameters
+into the sysfs. Here are two examples:
+
+1. Enable kgdboc on ttyS0::
+
+	echo ttyS0 > /sys/module/kgdboc/parameters/kgdboc
+
+2. Disable kgdboc::
+
+	echo "" > /sys/module/kgdboc/parameters/kgdboc
+
+.. note::
+
+   You do not need to specify the baud if you are configuring the
+   console on tty which is already configured or open.
+
+More examples
+^^^^^^^^^^^^^
+
+You can configure kgdboc to use the keyboard, and/or a serial device
+depending on if you are using kdb and/or kgdb, in one of the following
+scenarios.
+
+1. kdb and kgdb over only a serial port::
+
+	kgdboc=<serial_device>[,baud]
+
+   Example::
+
+	kgdboc=ttyS0,115200
+
+2. kdb and kgdb with keyboard and a serial port::
+
+	kgdboc=kbd,<serial_device>[,baud]
+
+   Example::
+
+	kgdboc=kbd,ttyS0,115200
+
+3. kdb with a keyboard::
+
+	kgdboc=kbd
+
+4. kdb with kernel mode setting::
+
+	kgdboc=kms,kbd
+
+5. kdb with kernel mode setting and kgdb over a serial port::
+
+	kgdboc=kms,kbd,ttyS0,115200
+
+.. note::
+
+   Kgdboc does not support interrupting the target via the gdb remote
+   protocol. You must manually send a :kbd:`SysRq-G` unless you have a proxy
+   that splits console output to a terminal program. A console proxy has a
+   separate TCP port for the debugger and a separate TCP port for the
+   "human" console. The proxy can take care of sending the :kbd:`SysRq-G`
+   for you.
+
+When using kgdboc with no debugger proxy, you can end up connecting the
+debugger at one of two entry points. If an exception occurs after you
+have loaded kgdboc, a message should print on the console stating it is
+waiting for the debugger. In this case you disconnect your terminal
+program and then connect the debugger in its place. If you want to
+interrupt the target system and forcibly enter a debug session you have
+to issue a :kbd:`Sysrq` sequence and then type the letter :kbd:`g`. Then you
+disconnect the terminal session and connect gdb. Your options if you
+don't like this are to hack gdb to send the :kbd:`SysRq-G` for you as well as
+on the initial connect, or to use a debugger proxy that allows an
+unmodified gdb to do the debugging.
+
+Kernel parameter: ``kgdbwait``
+------------------------------
+
+The Kernel command line option ``kgdbwait`` makes kgdb wait for a
+debugger connection during booting of a kernel. You can only use this
+option if you compiled a kgdb I/O driver into the kernel and you
+specified the I/O driver configuration as a kernel command line option.
+The kgdbwait parameter should always follow the configuration parameter
+for the kgdb I/O driver in the kernel command line else the I/O driver
+will not be configured prior to asking the kernel to use it to wait.
+
+The kernel will stop and wait as early as the I/O driver and
+architecture allows when you use this option. If you build the kgdb I/O
+driver as a loadable kernel module kgdbwait will not do anything.
+
+Kernel parameter: ``kgdbcon``
+-----------------------------
+
+The ``kgdbcon`` feature allows you to see :c:func:`printk` messages inside gdb
+while gdb is connected to the kernel. Kdb does not make use of the kgdbcon
+feature.
+
+Kgdb supports using the gdb serial protocol to send console messages to
+the debugger when the debugger is connected and running. There are two
+ways to activate this feature.
+
+1. Activate with the kernel command line option::
+
+	kgdbcon
+
+2. Use sysfs before configuring an I/O driver::
+
+	echo 1 > /sys/module/kgdb/parameters/kgdb_use_con
+
+.. note::
+
+   If you do this after you configure the kgdb I/O driver, the
+   setting will not take effect until the next point the I/O is
+   reconfigured.
+
+.. important::
+
+   You cannot use kgdboc + kgdbcon on a tty that is an
+   active system console. An example of incorrect usage is::
+
+	console=ttyS0,115200 kgdboc=ttyS0 kgdbcon
+
+It is possible to use this option with kgdboc on a tty that is not a
+system console.
+
+Run time parameter: ``kgdbreboot``
+----------------------------------
+
+The kgdbreboot feature allows you to change how the debugger deals with
+the reboot notification. You have 3 choices for the behavior. The
+default behavior is always set to 0.
+
+.. tabularcolumns:: |p{0.4cm}|p{11.5cm}|p{5.6cm}|
+
+.. flat-table::
+  :widths: 1 10 8
+
+  * - 1
+    - ``echo -1 > /sys/module/debug_core/parameters/kgdbreboot``
+    - Ignore the reboot notification entirely.
+
+  * - 2
+    - ``echo 0 > /sys/module/debug_core/parameters/kgdbreboot``
+    - Send the detach message to any attached debugger client.
+
+  * - 3
+    - ``echo 1 > /sys/module/debug_core/parameters/kgdbreboot``
+    - Enter the debugger on reboot notify.
+
+Using kdb
+=========
+
+Quick start for kdb on a serial port
+------------------------------------
+
+This is a quick example of how to use kdb.
+
+1. Configure kgdboc at boot using kernel parameters::
+
+	console=ttyS0,115200 kgdboc=ttyS0,115200
+
+   OR
+
+   Configure kgdboc after the kernel has booted; assuming you are using
+   a serial port console::
+
+	echo ttyS0 > /sys/module/kgdboc/parameters/kgdboc
+
+2. Enter the kernel debugger manually or by waiting for an oops or
+   fault. There are several ways you can enter the kernel debugger
+   manually; all involve using the :kbd:`SysRq-G`, which means you must have
+   enabled ``CONFIG_MAGIC_SysRq=y`` in your kernel config.
+
+   -  When logged in as root or with a super user session you can run::
+
+	echo g > /proc/sysrq-trigger
+
+   -  Example using minicom 2.2
+
+      Press: :kbd:`CTRL-A` :kbd:`f` :kbd:`g`
+
+   -  When you have telneted to a terminal server that supports sending
+      a remote break
+
+      Press: :kbd:`CTRL-]`
+
+      Type in: ``send break``
+
+      Press: :kbd:`Enter` :kbd:`g`
+
+3. From the kdb prompt you can run the ``help`` command to see a complete
+   list of the commands that are available.
+
+   Some useful commands in kdb include:
+
+   =========== =================================================================
+   ``lsmod``   Shows where kernel modules are loaded
+   ``ps``      Displays only the active processes
+   ``ps A``    Shows all the processes
+   ``summary`` Shows kernel version info and memory usage
+   ``bt``      Get a backtrace of the current process using :c:func:`dump_stack`
+   ``dmesg``   View the kernel syslog buffer
+   ``go``      Continue the system
+   =========== =================================================================
+
+4. When you are done using kdb you need to consider rebooting the system
+   or using the ``go`` command to resuming normal kernel execution. If you
+   have paused the kernel for a lengthy period of time, applications
+   that rely on timely networking or anything to do with real wall clock
+   time could be adversely affected, so you should take this into
+   consideration when using the kernel debugger.
+
+Quick start for kdb using a keyboard connected console
+------------------------------------------------------
+
+This is a quick example of how to use kdb with a keyboard.
+
+1. Configure kgdboc at boot using kernel parameters::
+
+	kgdboc=kbd
+
+   OR
+
+   Configure kgdboc after the kernel has booted::
+
+	echo kbd > /sys/module/kgdboc/parameters/kgdboc
+
+2. Enter the kernel debugger manually or by waiting for an oops or
+   fault. There are several ways you can enter the kernel debugger
+   manually; all involve using the :kbd:`SysRq-G`, which means you must have
+   enabled ``CONFIG_MAGIC_SysRq=y`` in your kernel config.
+
+   -  When logged in as root or with a super user session you can run::
+
+	echo g > /proc/sysrq-trigger
+
+   -  Example using a laptop keyboard:
+
+      Press and hold down: :kbd:`Alt`
+
+      Press and hold down: :kbd:`Fn`
+
+      Press and release the key with the label: :kbd:`SysRq`
+
+      Release: :kbd:`Fn`
+
+      Press and release: :kbd:`g`
+
+      Release: :kbd:`Alt`
+
+   -  Example using a PS/2 101-key keyboard
+
+      Press and hold down: :kbd:`Alt`
+
+      Press and release the key with the label: :kbd:`SysRq`
+
+      Press and release: :kbd:`g`
+
+      Release: :kbd:`Alt`
+
+3. Now type in a kdb command such as ``help``, ``dmesg``, ``bt`` or ``go`` to
+   continue kernel execution.
+
+Using kgdb / gdb
+================
+
+In order to use kgdb you must activate it by passing configuration
+information to one of the kgdb I/O drivers. If you do not pass any
+configuration information kgdb will not do anything at all. Kgdb will
+only actively hook up to the kernel trap hooks if a kgdb I/O driver is
+loaded and configured. If you unconfigure a kgdb I/O driver, kgdb will
+unregister all the kernel hook points.
+
+All kgdb I/O drivers can be reconfigured at run time, if
+``CONFIG_SYSFS`` and ``CONFIG_MODULES`` are enabled, by echo'ing a new
+config string to ``/sys/module/<driver>/parameter/<option>``. The driver
+can be unconfigured by passing an empty string. You cannot change the
+configuration while the debugger is attached. Make sure to detach the
+debugger with the ``detach`` command prior to trying to unconfigure a
+kgdb I/O driver.
+
+Connecting with gdb to a serial port
+------------------------------------
+
+1. Configure kgdboc
+
+   Configure kgdboc at boot using kernel parameters::
+
+	kgdboc=ttyS0,115200
+
+   OR
+
+   Configure kgdboc after the kernel has booted::
+
+	echo ttyS0 > /sys/module/kgdboc/parameters/kgdboc
+
+2. Stop kernel execution (break into the debugger)
+
+   In order to connect to gdb via kgdboc, the kernel must first be
+   stopped. There are several ways to stop the kernel which include
+   using kgdbwait as a boot argument, via a :kbd:`SysRq-G`, or running the
+   kernel until it takes an exception where it waits for the debugger to
+   attach.
+
+   -  When logged in as root or with a super user session you can run::
+
+	echo g > /proc/sysrq-trigger
+
+   -  Example using minicom 2.2
+
+      Press: :kbd:`CTRL-A` :kbd:`f` :kbd:`g`
+
+   -  When you have telneted to a terminal server that supports sending
+      a remote break
+
+      Press: :kbd:`CTRL-]`
+
+      Type in: ``send break``
+
+      Press: :kbd:`Enter` :kbd:`g`
+
+3. Connect from gdb
+
+   Example (using a directly connected port)::
+
+           % gdb ./vmlinux
+           (gdb) set remotebaud 115200
+           (gdb) target remote /dev/ttyS0
+
+
+   Example (kgdb to a terminal server on TCP port 2012)::
+
+           % gdb ./vmlinux
+           (gdb) target remote 192.168.2.2:2012
+
+
+   Once connected, you can debug a kernel the way you would debug an
+   application program.
+
+   If you are having problems connecting or something is going seriously
+   wrong while debugging, it will most often be the case that you want
+   to enable gdb to be verbose about its target communications. You do
+   this prior to issuing the ``target remote`` command by typing in::
+
+	set debug remote 1
+
+Remember if you continue in gdb, and need to "break in" again, you need
+to issue an other :kbd:`SysRq-G`. It is easy to create a simple entry point by
+putting a breakpoint at ``sys_sync`` and then you can run ``sync`` from a
+shell or script to break into the debugger.
+
+kgdb and kdb interoperability
+=============================
+
+It is possible to transition between kdb and kgdb dynamically. The debug
+core will remember which you used the last time and automatically start
+in the same mode.
+
+Switching between kdb and kgdb
+------------------------------
+
+Switching from kgdb to kdb
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are two ways to switch from kgdb to kdb: you can use gdb to issue
+a maintenance packet, or you can blindly type the command ``$3#33``.
+Whenever the kernel debugger stops in kgdb mode it will print the
+message ``KGDB or $3#33 for KDB``. It is important to note that you have
+to type the sequence correctly in one pass. You cannot type a backspace
+or delete because kgdb will interpret that as part of the debug stream.
+
+1. Change from kgdb to kdb by blindly typing::
+
+	$3#33
+
+2. Change from kgdb to kdb with gdb::
+
+	maintenance packet 3
+
+   .. note::
+
+     Now you must kill gdb. Typically you press :kbd:`CTRL-Z` and issue
+     the command::
+
+	kill -9 %
+
+Change from kdb to kgdb
+~~~~~~~~~~~~~~~~~~~~~~~
+
+There are two ways you can change from kdb to kgdb. You can manually
+enter kgdb mode by issuing the kgdb command from the kdb shell prompt,
+or you can connect gdb while the kdb shell prompt is active. The kdb
+shell looks for the typical first commands that gdb would issue with the
+gdb remote protocol and if it sees one of those commands it
+automatically changes into kgdb mode.
+
+1. From kdb issue the command::
+
+	kgdb
+
+   Now disconnect your terminal program and connect gdb in its place
+
+2. At the kdb prompt, disconnect the terminal program and connect gdb in
+   its place.
+
+Running kdb commands from gdb
+-----------------------------
+
+It is possible to run a limited set of kdb commands from gdb, using the
+gdb monitor command. You don't want to execute any of the run control or
+breakpoint operations, because it can disrupt the state of the kernel
+debugger. You should be using gdb for breakpoints and run control
+operations if you have gdb connected. The more useful commands to run
+are things like lsmod, dmesg, ps or possibly some of the memory
+information commands. To see all the kdb commands you can run
+``monitor help``.
+
+Example::
+
+    (gdb) monitor ps
+    1 idle process (state I) and
+    27 sleeping system daemon (state M) processes suppressed,
+    use 'ps A' to see all.
+    Task Addr       Pid   Parent [*] cpu State Thread     Command
+
+    0xc78291d0        1        0  0    0   S  0xc7829404  init
+    0xc7954150      942        1  0    0   S  0xc7954384  dropbear
+    0xc78789c0      944        1  0    0   S  0xc7878bf4  sh
+    (gdb)
+
+kgdb Test Suite
+===============
+
+When kgdb is enabled in the kernel config you can also elect to enable
+the config parameter ``KGDB_TESTS``. Turning this on will enable a special
+kgdb I/O module which is designed to test the kgdb internal functions.
+
+The kgdb tests are mainly intended for developers to test the kgdb
+internals as well as a tool for developing a new kgdb architecture
+specific implementation. These tests are not really for end users of the
+Linux kernel. The primary source of documentation would be to look in
+the ``drivers/misc/kgdbts.c`` file.
+
+The kgdb test suite can also be configured at compile time to run the
+core set of tests by setting the kernel config parameter
+``KGDB_TESTS_ON_BOOT``. This particular option is aimed at automated
+regression testing and does not require modifying the kernel boot config
+arguments. If this is turned on, the kgdb test suite can be disabled by
+specifying ``kgdbts=`` as a kernel boot argument.
+
+Kernel Debugger Internals
+=========================
+
+Architecture Specifics
+----------------------
+
+The kernel debugger is organized into a number of components:
+
+1. The debug core
+
+   The debug core is found in ``kernel/debugger/debug_core.c``. It
+   contains:
+
+   -  A generic OS exception handler which includes sync'ing the
+      processors into a stopped state on an multi-CPU system.
+
+   -  The API to talk to the kgdb I/O drivers
+
+   -  The API to make calls to the arch-specific kgdb implementation
+
+   -  The logic to perform safe memory reads and writes to memory while
+      using the debugger
+
+   -  A full implementation for software breakpoints unless overridden
+      by the arch
+
+   -  The API to invoke either the kdb or kgdb frontend to the debug
+      core.
+
+   -  The structures and callback API for atomic kernel mode setting.
+
+      .. note:: kgdboc is where the kms callbacks are invoked.
+
+2. kgdb arch-specific implementation
+
+   This implementation is generally found in ``arch/*/kernel/kgdb.c``. As
+   an example, ``arch/x86/kernel/kgdb.c`` contains the specifics to
+   implement HW breakpoint as well as the initialization to dynamically
+   register and unregister for the trap handlers on this architecture.
+   The arch-specific portion implements:
+
+   -  contains an arch-specific trap catcher which invokes
+      :c:func:`kgdb_handle_exception` to start kgdb about doing its work
+
+   -  translation to and from gdb specific packet format to :c:type:`pt_regs`
+
+   -  Registration and unregistration of architecture specific trap
+      hooks
+
+   -  Any special exception handling and cleanup
+
+   -  NMI exception handling and cleanup
+
+   -  (optional) HW breakpoints
+
+3. gdbstub frontend (aka kgdb)
+
+   The gdbstub is located in ``kernel/debug/gdbstub.c``. It contains:
+
+   -  All the logic to implement the gdb serial protocol
+
+4. kdb frontend
+
+   The kdb debugger shell is broken down into a number of components.
+   The kdb core is located in kernel/debug/kdb. There are a number of
+   helper functions in some of the other kernel components to make it
+   possible for kdb to examine and report information about the kernel
+   without taking locks that could cause a kernel deadlock. The kdb core
+   contains implements the following functionality.
+
+   -  A simple shell
+
+   -  The kdb core command set
+
+   -  A registration API to register additional kdb shell commands.
+
+      -  A good example of a self-contained kdb module is the ``ftdump``
+         command for dumping the ftrace buffer. See:
+         ``kernel/trace/trace_kdb.c``
+
+      -  For an example of how to dynamically register a new kdb command
+         you can build the kdb_hello.ko kernel module from
+         ``samples/kdb/kdb_hello.c``. To build this example you can set
+         ``CONFIG_SAMPLES=y`` and ``CONFIG_SAMPLE_KDB=m`` in your kernel
+         config. Later run ``modprobe kdb_hello`` and the next time you
+         enter the kdb shell, you can run the ``hello`` command.
+
+   -  The implementation for :c:func:`kdb_printf` which emits messages directly
+      to I/O drivers, bypassing the kernel log.
+
+   -  SW / HW breakpoint management for the kdb shell
+
+5. kgdb I/O driver
+
+   Each kgdb I/O driver has to provide an implementation for the
+   following:
+
+   -  configuration via built-in or module
+
+   -  dynamic configuration and kgdb hook registration calls
+
+   -  read and write character interface
+
+   -  A cleanup handler for unconfiguring from the kgdb core
+
+   -  (optional) Early debug methodology
+
+   Any given kgdb I/O driver has to operate very closely with the
+   hardware and must do it in such a way that does not enable interrupts
+   or change other parts of the system context without completely
+   restoring them. The kgdb core will repeatedly "poll" a kgdb I/O
+   driver for characters when it needs input. The I/O driver is expected
+   to return immediately if there is no data available. Doing so allows
+   for the future possibility to touch watchdog hardware in such a way
+   as to have a target system not reset when these are enabled.
+
+If you are intent on adding kgdb architecture specific support for a new
+architecture, the architecture should define ``HAVE_ARCH_KGDB`` in the
+architecture specific Kconfig file. This will enable kgdb for the
+architecture, and at that point you must create an architecture specific
+kgdb implementation.
+
+There are a few flags which must be set on every architecture in their
+``asm/kgdb.h`` file. These are:
+
+-  ``NUMREGBYTES``:
+     The size in bytes of all of the registers, so that we
+     can ensure they will all fit into a packet.
+
+-  ``BUFMAX``:
+     The size in bytes of the buffer GDB will read into. This must
+     be larger than NUMREGBYTES.
+
+-  ``CACHE_FLUSH_IS_SAFE``:
+     Set to 1 if it is always safe to call
+     flush_cache_range or flush_icache_range. On some architectures,
+     these functions may not be safe to call on SMP since we keep other
+     CPUs in a holding pattern.
+
+There are also the following functions for the common backend, found in
+``kernel/kgdb.c``, that must be supplied by the architecture-specific
+backend unless marked as (optional), in which case a default function
+maybe used if the architecture does not need to provide a specific
+implementation.
+
+.. kernel-doc:: include/linux/kgdb.h
+   :internal:
+
+kgdboc internals
+----------------
+
+kgdboc and uarts
+~~~~~~~~~~~~~~~~
+
+The kgdboc driver is actually a very thin driver that relies on the
+underlying low level to the hardware driver having "polling hooks" to
+which the tty driver is attached. In the initial implementation of
+kgdboc the serial_core was changed to expose a low level UART hook for
+doing polled mode reading and writing of a single character while in an
+atomic context. When kgdb makes an I/O request to the debugger, kgdboc
+invokes a callback in the serial core which in turn uses the callback in
+the UART driver.
+
+When using kgdboc with a UART, the UART driver must implement two
+callbacks in the :c:type:`struct uart_ops <uart_ops>`.
+Example from ``drivers/8250.c``::
+
+
+    #ifdef CONFIG_CONSOLE_POLL
+        .poll_get_char = serial8250_get_poll_char,
+        .poll_put_char = serial8250_put_poll_char,
+    #endif
+
+
+Any implementation specifics around creating a polling driver use the
+``#ifdef CONFIG_CONSOLE_POLL``, as shown above. Keep in mind that
+polling hooks have to be implemented in such a way that they can be
+called from an atomic context and have to restore the state of the UART
+chip on return such that the system can return to normal when the
+debugger detaches. You need to be very careful with any kind of lock you
+consider, because failing here is most likely going to mean pressing the
+reset button.
+
+kgdboc and keyboards
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The kgdboc driver contains logic to configure communications with an
+attached keyboard. The keyboard infrastructure is only compiled into the
+kernel when ``CONFIG_KDB_KEYBOARD=y`` is set in the kernel configuration.
+
+The core polled keyboard driver driver for PS/2 type keyboards is in
+``drivers/char/kdb_keyboard.c``. This driver is hooked into the debug core
+when kgdboc populates the callback in the array called
+:c:type:`kdb_poll_funcs[]`. The :c:func:`kdb_get_kbd_char` is the top-level
+function which polls hardware for single character input.
+
+kgdboc and kms
+~~~~~~~~~~~~~~~~~~
+
+The kgdboc driver contains logic to request the graphics display to
+switch to a text context when you are using ``kgdboc=kms,kbd``, provided
+that you have a video driver which has a frame buffer console and atomic
+kernel mode setting support.
+
+Every time the kernel debugger is entered it calls
+:c:func:`kgdboc_pre_exp_handler` which in turn calls :c:func:`con_debug_enter`
+in the virtual console layer. On resuming kernel execution, the kernel
+debugger calls :c:func:`kgdboc_post_exp_handler` which in turn calls
+:c:func:`con_debug_leave`.
+
+Any video driver that wants to be compatible with the kernel debugger
+and the atomic kms callbacks must implement the ``mode_set_base_atomic``,
+``fb_debug_enter`` and ``fb_debug_leave operations``. For the
+``fb_debug_enter`` and ``fb_debug_leave`` the option exists to use the
+generic drm fb helper functions or implement something custom for the
+hardware. The following example shows the initialization of the
+.mode_set_base_atomic operation in
+drivers/gpu/drm/i915/intel_display.c::
+
+
+    static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+    [...]
+            .mode_set_base_atomic = intel_pipe_set_base_atomic,
+    [...]
+    };
+
+
+Here is an example of how the i915 driver initializes the
+fb_debug_enter and fb_debug_leave functions to use the generic drm
+helpers in ``drivers/gpu/drm/i915/intel_fb.c``::
+
+
+    static struct fb_ops intelfb_ops = {
+    [...]
+           .fb_debug_enter = drm_fb_helper_debug_enter,
+           .fb_debug_leave = drm_fb_helper_debug_leave,
+    [...]
+    };
+
+
+Credits
+=======
+
+The following people have contributed to this document:
+
+1. Amit Kale <amitkale@linsyssoft.com>
+
+2. Tom Rini <trini@kernel.crashing.org>
+
+In March 2008 this document was completely rewritten by:
+
+-  Jason Wessel <jason.wessel@windriver.com>
+
+In Jan 2010 this document was updated to include kdb.
+
+-  Jason Wessel <jason.wessel@windriver.com>
diff --git a/Documentation/dev-tools/sparse.rst b/Documentation/dev-tools/sparse.rst
index ffdcc97..78aa00a 100644
--- a/Documentation/dev-tools/sparse.rst
+++ b/Documentation/dev-tools/sparse.rst
@@ -103,9 +103,3 @@
 
 The optional make variable CF can be used to pass arguments to sparse.  The
 build system passes -Wbitwise to sparse automatically.
-
-Checking RCU annotations
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-RCU annotations are not checked by default.  To enable RCU annotation
-checks, include -DCONFIG_SPARSE_RCU_POINTER in your CF flags.
diff --git a/Documentation/devicetree/bindings/arm/actions.txt b/Documentation/devicetree/bindings/arm/actions.txt
new file mode 100644
index 0000000..3bc7ea5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/actions.txt
@@ -0,0 +1,39 @@
+Actions Semi platforms device tree bindings
+-------------------------------------------
+
+
+S500 SoC
+========
+
+Required root node properties:
+
+ - compatible :  must contain "actions,s500"
+
+
+Modules:
+
+Root node property compatible must contain, depending on module:
+
+ - LeMaker Guitar: "lemaker,guitar"
+
+
+Boards:
+
+Root node property compatible must contain, depending on board:
+
+ - LeMaker Guitar Base Board rev. B: "lemaker,guitar-bb-rev-b", "lemaker,guitar"
+
+
+S900 SoC
+========
+
+Required root node properties:
+
+- compatible :  must contain "actions,s900"
+
+
+Boards:
+
+Root node property compatible must contain, depending on board:
+
+ - uCRobotics Bubblegum-96: "ucrobotics,bubblegum-96"
diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt
index bfd5b55..0fff40a 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.txt
+++ b/Documentation/devicetree/bindings/arm/amlogic.txt
@@ -29,26 +29,35 @@
   Required root node property:
     compatible: "amlogic,s912", "amlogic,meson-gxm";
 
-Board compatible values:
+Board compatible values (alphabetically, grouped by SoC):
+
   - "geniatech,atv1200" (Meson6)
+
   - "minix,neo-x8" (Meson8)
-  - "tronfy,mxq" (Meson8b)
+
   - "hardkernel,odroid-c1" (Meson8b)
+  - "tronfy,mxq" (Meson8b)
+
+  - "amlogic,p200" (Meson gxbb)
+  - "amlogic,p201" (Meson gxbb)
+  - "friendlyarm,nanopi-k2" (Meson gxbb)
+  - "hardkernel,odroid-c2" (Meson gxbb)
+  - "nexbox,a95x" (Meson gxbb or Meson gxl s905x)
   - "tronsmart,vega-s95-pro", "tronsmart,vega-s95" (Meson gxbb)
   - "tronsmart,vega-s95-meta", "tronsmart,vega-s95" (Meson gxbb)
   - "tronsmart,vega-s95-telos", "tronsmart,vega-s95" (Meson gxbb)
-  - "hardkernel,odroid-c2" (Meson gxbb)
-  - "amlogic,p200" (Meson gxbb)
-  - "amlogic,p201" (Meson gxbb)
   - "wetek,hub" (Meson gxbb)
   - "wetek,play2" (Meson gxbb)
+
   - "amlogic,p212" (Meson gxl s905x)
+  - "hwacom,amazetv" (Meson gxl s905x)
   - "khadas,vim" (Meson gxl s905x)
+  - "libretech,cc" (Meson gxl s905x)
 
   - "amlogic,p230" (Meson gxl s905d)
   - "amlogic,p231" (Meson gxl s905d)
-  - "hwacom,amazetv" (Meson gxl s905x)
+
   - "amlogic,q200" (Meson gxm s912)
   - "amlogic,q201" (Meson gxm s912)
-  - "nexbox,a95x" (Meson gxbb or Meson gxl s905x)
+  - "kingnovel,r-box-pro" (Meson gxm S912)
   - "nexbox,a1" (Meson gxm s912)
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index 799af90..91cb8e4 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -41,6 +41,36 @@
        - "atmel,sama5d43"
        - "atmel,sama5d44"
 
+ * "atmel,samv7" for MCUs using a Cortex-M7, shall be extended with the specific
+   SoC family:
+    o "atmel,sams70" shall be extended with the specific MCU compatible:
+       - "atmel,sams70j19"
+       - "atmel,sams70j20"
+       - "atmel,sams70j21"
+       - "atmel,sams70n19"
+       - "atmel,sams70n20"
+       - "atmel,sams70n21"
+       - "atmel,sams70q19"
+       - "atmel,sams70q20"
+       - "atmel,sams70q21"
+    o "atmel,samv70" shall be extended with the specific MCU compatible:
+       - "atmel,samv70j19"
+       - "atmel,samv70j20"
+       - "atmel,samv70n19"
+       - "atmel,samv70n20"
+       - "atmel,samv70q19"
+       - "atmel,samv70q20"
+    o "atmel,samv71" shall be extended with the specific MCU compatible:
+       - "atmel,samv71j19"
+       - "atmel,samv71j20"
+       - "atmel,samv71j21"
+       - "atmel,samv71n19"
+       - "atmel,samv71n20"
+       - "atmel,samv71n21"
+       - "atmel,samv71q19"
+       - "atmel,samv71q20"
+       - "atmel,samv71q21"
+
 Chipid required properties:
 - compatible: Should be "atmel,sama5d2-chipid"
 - reg : Should contain registers location and length
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.txt
new file mode 100644
index 0000000..23a0217
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,stingray.txt
@@ -0,0 +1,12 @@
+Broadcom Stingray device tree bindings
+------------------------------------------------
+
+Boards with Stingray shall have the following properties:
+
+Required root node property:
+
+Stingray Combo SVK board
+compatible = "brcm,bcm958742k", "brcm,stingray";
+
+Stingray SST100 board
+compatible = "brcm,bcm958742t", "brcm,stingray";
diff --git a/Documentation/devicetree/bindings/arm/ccn.txt b/Documentation/devicetree/bindings/arm/ccn.txt
index b100d38..2980145 100644
--- a/Documentation/devicetree/bindings/arm/ccn.txt
+++ b/Documentation/devicetree/bindings/arm/ccn.txt
@@ -3,6 +3,7 @@
 Required properties:
 
 - compatible: (standard compatible string) should be one of:
+	"arm,ccn-502"
 	"arm,ccn-504"
 	"arm,ccn-508"
 
diff --git a/Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt b/Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
new file mode 100644
index 0000000..2982912
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
@@ -0,0 +1,49 @@
+* CoreSight CPU Debug Component:
+
+CoreSight CPU debug component are compliant with the ARMv8 architecture
+reference manual (ARM DDI 0487A.k) Chapter 'Part H: External debug'. The
+external debug module is mainly used for two modes: self-hosted debug and
+external debug, and it can be accessed from mmio region from Coresight
+and eventually the debug module connects with CPU for debugging. And the
+debug module provides sample-based profiling extension, which can be used
+to sample CPU program counter, secure state and exception level, etc;
+usually every CPU has one dedicated debug module to be connected.
+
+Required properties:
+
+- compatible : should be "arm,coresight-cpu-debug"; supplemented with
+               "arm,primecell" since this driver is using the AMBA bus
+	       interface.
+
+- reg : physical base address and length of the register set.
+
+- clocks : the clock associated to this component.
+
+- clock-names : the name of the clock referenced by the code. Since we are
+                using the AMBA framework, the name of the clock providing
+		the interconnect should be "apb_pclk" and the clock is
+		mandatory. The interface between the debug logic and the
+		processor core is clocked by the internal CPU clock, so it
+		is enabled with CPU clock by default.
+
+- cpu : the CPU phandle the debug module is affined to. When omitted
+	the module is considered to belong to CPU0.
+
+Optional properties:
+
+- power-domains: a phandle to the debug power domain. We use "power-domains"
+                 binding to turn on the debug logic if it has own dedicated
+		 power domain and if necessary to use "cpuidle.off=1" or
+		 "nohlt" in the kernel command line or sysfs node to
+		 constrain idle states to ensure registers in the CPU power
+		 domain are accessible.
+
+Example:
+
+	debug@f6590000 {
+		compatible = "arm,coresight-cpu-debug","arm,primecell";
+		reg = <0 0xf6590000 0 0x1000>;
+		clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+		clock-names = "apb_pclk";
+		cpu = <&cpu0>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index 1030f5f..ad1913b 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -193,6 +193,7 @@
 			     "spin-table"
 			# On ARM 32-bit systems this property is optional and
 			  can be one of:
+			    "actions,s500-smp"
 			    "allwinner,sun6i-a31"
 			    "allwinner,sun8i-a23"
 			    "arm,realview-smp"
@@ -249,7 +250,7 @@
 		Usage: Optional
 		Value type: <u32>
 		Definition:
-			# u32 value representing CPU capacity [3] in
+			# u32 value representing CPU capacity [4] in
 			  DMIPS/MHz, relative to highest capacity-dmips-mhz
 			  in the system.
 
@@ -476,5 +477,5 @@
 [2] arm/msm/qcom,kpss-acc.txt
 [3] ARM Linux kernel documentation - idle states bindings
     Documentation/devicetree/bindings/arm/idle-states.txt
-[3] ARM Linux kernel documentation - cpu capacity bindings
+[4] ARM Linux kernel documentation - cpu capacity bindings
     Documentation/devicetree/bindings/arm/cpu-capacity.txt
diff --git a/Documentation/devicetree/bindings/arm/gemini.txt b/Documentation/devicetree/bindings/arm/gemini.txt
index 0041eb0..55bf7ce 100644
--- a/Documentation/devicetree/bindings/arm/gemini.txt
+++ b/Documentation/devicetree/bindings/arm/gemini.txt
@@ -24,6 +24,19 @@
   global control registers, with the compatible string
   "cortina,gemini-syscon", "syscon";
 
+  Required properties on the syscon:
+  - reg: syscon register location and size.
+  - #clock-cells: should be set to <1> - the system controller is also a
+                  clock provider.
+  - #reset-cells: should be set to <1> - the system controller is also a
+                  reset line provider.
+
+  The clock sources have shorthand defines in the include file:
+  <dt-bindings/clock/cortina,gemini-clock.h>
+
+  The reset lines have shorthand defines in the include file:
+  <dt-bindings/reset/cortina,gemini-reset.h>
+
 - timer: the soc bus node must have a timer node pointing to the SoC timer
   block, with the compatible string "cortina,gemini-timer"
   See: clocksource/cortina,gemini-timer.txt
@@ -56,12 +69,15 @@
 		syscon: syscon@40000000 {
 			compatible = "cortina,gemini-syscon", "syscon";
 			reg = <0x40000000 0x1000>;
+			#clock-cells = <1>;
+			#reset-cells = <1>;
 		};
 
 		uart0: serial@42000000 {
 			compatible = "ns16550a";
 			reg = <0x42000000 0x100>;
-			clock-frequency = <48000000>;
+			resets = <&syscon GEMINI_RESET_UART>;
+			clocks = <&syscon GEMINI_CLK_UART>;
 			interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 		};
@@ -73,12 +89,18 @@
 			interrupts = <14 IRQ_TYPE_EDGE_FALLING>, /* Timer 1 */
 				     <15 IRQ_TYPE_EDGE_FALLING>, /* Timer 2 */
 				     <16 IRQ_TYPE_EDGE_FALLING>; /* Timer 3 */
+			resets = <&syscon GEMINI_RESET_TIMER>;
+			/* APB clock or RTC clock */
+			clocks = <&syscon GEMINI_CLK_APB>,
+				 <&syscon GEMINI_CLK_RTC>;
+			clock-names = "PCLK", "EXTCLK";
 			syscon = <&syscon>;
 		};
 
 		intcon: interrupt-controller@48000000 {
 			compatible = "cortina,gemini-interrupt-controller";
 			reg = <0x48000000 0x1000>;
+			resets = <&syscon GEMINI_RESET_INTCON0>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 		};
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
index 2e73215..7111fbc8 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
@@ -4,6 +4,10 @@
 Required root node properties:
 	- compatible = "hisilicon,hi3660";
 
+HiKey960 Board
+Required root node properties:
+	- compatible = "hisilicon,hi3660-hikey960", "hisilicon,hi3660";
+
 Hi3798cv200 SoC
 Required root node properties:
 	- compatible = "hisilicon,hi3798cv200";
diff --git a/Documentation/devicetree/bindings/arm/keystone/keystone.txt b/Documentation/devicetree/bindings/arm/keystone/keystone.txt
index 48f6703..f310bad 100644
--- a/Documentation/devicetree/bindings/arm/keystone/keystone.txt
+++ b/Documentation/devicetree/bindings/arm/keystone/keystone.txt
@@ -37,3 +37,6 @@
 
 -  K2G EVM
    compatible = "ti,k2g-evm", "ti,k2g", "ti-keystone"
+
+-  K2G Industrial Communication Engine EVM
+   compatible = "ti,k2g-ice", "ti,k2g", "ti-keystone"
diff --git a/Documentation/devicetree/bindings/arm/mediatek.txt b/Documentation/devicetree/bindings/arm/mediatek.txt
index c860b24..da7bd13 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek.txt
@@ -12,6 +12,8 @@
    "mediatek,mt6592"
    "mediatek,mt6755"
    "mediatek,mt6795"
+   "mediatek,mt6797"
+   "mediatek,mt7622"
    "mediatek,mt7623"
    "mediatek,mt8127"
    "mediatek,mt8135"
@@ -38,6 +40,12 @@
 - Evaluation board for MT6795(Helio X10):
     Required root node properties:
       - compatible = "mediatek,mt6795-evb", "mediatek,mt6795";
+- Evaluation board for MT6797(Helio X20):
+    Required root node properties:
+      - compatible = "mediatek,mt6797-evb", "mediatek,mt6797";
+- Reference board variant 1 for MT7622:
+    Required root node properties:
+      - compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
 - Evaluation board for MT7623:
     Required root node properties:
       - compatible = "mediatek,mt7623-evb", "mediatek,mt7623";
diff --git a/Documentation/devicetree/bindings/arm/realtek.txt b/Documentation/devicetree/bindings/arm/realtek.txt
new file mode 100644
index 0000000..13d7557
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/realtek.txt
@@ -0,0 +1,20 @@
+Realtek platforms device tree bindings
+--------------------------------------
+
+
+RTD1295 SoC
+===========
+
+Required root node properties:
+
+ - compatible :  must contain "realtek,rtd1295"
+
+
+Root node property compatible must contain, depending on board:
+
+ - Zidoo X9S: "zidoo,x9s"
+
+
+Example:
+
+    compatible = "zidoo,x9s", "realtek,rtd1295";
diff --git a/Documentation/devicetree/bindings/arm/rockchip.txt b/Documentation/devicetree/bindings/arm/rockchip.txt
index c965d99e..11c0ac4 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.txt
+++ b/Documentation/devicetree/bindings/arm/rockchip.txt
@@ -42,6 +42,10 @@
     Required root node properties:
       - compatible = "firefly,firefly-rk3288-reload", "rockchip,rk3288";
 
+- Firefly Firefly-RK3399 board:
+    Required root node properties:
+      - compatible = "firefly,firefly-rk3399", "rockchip,rk3399";
+
 - ChipSPARK PopMetal-RK3288 board:
     Required root node properties:
       - compatible = "chipspark,popmetal-rk3288", "rockchip,rk3288";
@@ -138,9 +142,9 @@
     Required root node properties:
       - compatible = "rockchip,px5-evb", "rockchip,px5", "rockchip,rk3368";
 
-- Rockchip RK1108 Evaluation board
+- Rockchip RV1108 Evaluation board
     Required root node properties:
-      - compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
+      - compatible = "rockchip,rv1108-evb", "rockchip,rv1108";
 
 - Rockchip RK3368 evb:
     Required root node properties:
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
index 170fe05..1a671e3 100644
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ b/Documentation/devicetree/bindings/arm/shmobile.txt
@@ -55,12 +55,19 @@
     compatible = "renesas,bockw", "renesas,r8a7778"
   - Genmai (RTK772100BC00000BR)
     compatible = "renesas,genmai", "renesas,r7s72100"
+  - GR-Peach (X28A-M01-E/F)
+    compatible = "renesas,gr-peach", "renesas,r7s72100"
   - Gose (RTP0RC7793SEB00010S)
     compatible = "renesas,gose", "renesas,r8a7793"
-  - H3ULCB (R-Car Starter Kit Premier, RTP0RC7795SKB00010S)
+  - H3ULCB (R-Car Starter Kit Premier, RTP0RC7795SKBX0010SA00 (H3 ES1.1))
+    H3ULCB (R-Car Starter Kit Premier, RTP0RC77951SKBX010SA00 (H3 ES2.0))
     compatible = "renesas,h3ulcb", "renesas,r8a7795";
   - Henninger
     compatible = "renesas,henninger", "renesas,r8a7791"
+  - iWave Systems RZ/G1M Qseven Development Platform (iW-RainboW-G20D-Qseven)
+    compatible = "iwave,g20d", "iwave,g20m", "renesas,r8a7743"
+  - iWave Systems RZ/G1M Qseven System On Module (iW-RainboW-G20M-Qseven)
+    compatible = "iwave,g20m", "renesas,r8a7743"
   - Koelsch (RTP0RC7791SEB00010S)
     compatible = "renesas,koelsch", "renesas,r8a7791"
   - Kyoto Microcomputer Co. KZM-A9-Dual
@@ -69,7 +76,7 @@
     compatible = "renesas,kzm9g", "renesas,sh73a0"
   - Lager (RTP0RC7790SEB00010S)
     compatible = "renesas,lager", "renesas,r8a7790"
-  - M3ULCB (R-Car Starter Kit Pro, RTP0RC7796SKB00010S)
+  - M3ULCB (R-Car Starter Kit Pro, RTP0RC7796SKBX0010SA09 (M3 ES1.0))
     compatible = "renesas,m3ulcb", "renesas,r8a7796";
   - Marzen (R0P7779A00010S)
     compatible = "renesas,marzen", "renesas,r8a7779"
@@ -81,6 +88,8 @@
     compatible = "renesas,salvator-x", "renesas,r8a7795";
   - Salvator-X (RTP0RC7796SIPB0011S)
     compatible = "renesas,salvator-x", "renesas,r8a7796";
+  - Salvator-XS (Salvator-X 2nd version, RTP0RC7795SIPB0012S)
+    compatible = "renesas,salvator-xs", "renesas,r8a7795";
   - SILK (RTP0RC7794LCB00011S)
     compatible = "renesas,silk", "renesas,r8a7794"
   - SK-RZG1E (YR8A77450S000BE)
diff --git a/Documentation/devicetree/bindings/arm/tegra.txt b/Documentation/devicetree/bindings/arm/tegra.txt
index b5a4342..7f1411b 100644
--- a/Documentation/devicetree/bindings/arm/tegra.txt
+++ b/Documentation/devicetree/bindings/arm/tegra.txt
@@ -29,7 +29,6 @@
   nvidia,harmony
   nvidia,seaboard
   nvidia,ventana
-  nvidia,whistler
   toradex,apalis_t30
   toradex,apalis_t30-eval
   toradex,apalis-tk1
diff --git a/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt b/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
index fc33ca0..7c3ca0e 100644
--- a/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
@@ -3,7 +3,7 @@
 Required properties:
   - reg: Physical base address and size of the controller's register area.
   - compatible: Compatibility string. Must be 'fsl,<chip>-ahci', where
-    chip could be ls1021a, ls1043a, ls1046a, ls2080a etc.
+    chip could be ls1021a, ls1043a, ls1046a, ls1088a, ls2080a etc.
   - clocks: Input clock specifier. Refer to common clock bindings.
   - interrupts: Interrupt specifier. Refer to interrupt binding.
 
diff --git a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
index 1eceefb..8a6c3c2 100644
--- a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
+++ b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
@@ -3,7 +3,8 @@
 Required properties:
 
 - compatible:
-    "brcm,gisb-arb" or "brcm,bcm7445-gisb-arb" for 28nm chips
+    "brcm,bcm7278-gisb-arb" for V7 28nm chips
+    "brcm,gisb-arb" or "brcm,bcm7445-gisb-arb" for other 28nm chips
     "brcm,bcm7435-gisb-arb" for newer 40nm chips
     "brcm,bcm7400-gisb-arb" for older 40nm chips and all 65nm chips
     "brcm,bcm7038-gisb-arb" for 130nm chips
diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
index 6f66e9a..f2c5f0e4a 100644
--- a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt
@@ -219,3 +219,79 @@
 --------
 PLL and leaf clock compatible strings for BCM63138 are:
     "brcm,bcm63138-armpll"
+
+Stingray
+-----------
+PLL and leaf clock compatible strings for Stingray are:
+    "brcm,sr-genpll0"
+    "brcm,sr-genpll1"
+    "brcm,sr-genpll2"
+    "brcm,sr-genpll3"
+    "brcm,sr-genpll4"
+    "brcm,sr-genpll5"
+    "brcm,sr-genpll6"
+
+    "brcm,sr-lcpll0"
+    "brcm,sr-lcpll1"
+    "brcm,sr-lcpll-pcie"
+
+
+The following table defines the set of PLL/clock index and ID for Stingray.
+These clock IDs are defined in:
+    "include/dt-bindings/clock/bcm-sr.h"
+
+    Clock		Source		Index	ID
+    ---			-----		-----	---------
+    crystal		N/A		N/A	N/A
+    crmu_ref25m		crystal		N/A	N/A
+
+    genpll0		crystal		0	BCM_SR_GENPLL0
+    clk_125m		genpll0		1	BCM_SR_GENPLL0_125M_CLK
+    clk_scr		genpll0		2	BCM_SR_GENPLL0_SCR_CLK
+    clk_250		genpll0		3	BCM_SR_GENPLL0_250M_CLK
+    clk_pcie_axi	genpll0		4	BCM_SR_GENPLL0_PCIE_AXI_CLK
+    clk_paxc_axi_x2	genpll0		5	BCM_SR_GENPLL0_PAXC_AXI_X2_CLK
+    clk_paxc_axi	genpll0		6	BCM_SR_GENPLL0_PAXC_AXI_CLK
+
+    genpll1		crystal		0	BCM_SR_GENPLL1
+    clk_pcie_tl		genpll1		1	BCM_SR_GENPLL1_PCIE_TL_CLK
+    clk_mhb_apb		genpll1		2	BCM_SR_GENPLL1_MHB_APB_CLK
+
+    genpll2		crystal		0	BCM_SR_GENPLL2
+    clk_nic		genpll2		1	BCM_SR_GENPLL2_NIC_CLK
+    clk_ts_500_ref	genpll2		2	BCM_SR_GENPLL2_TS_500_REF_CLK
+    clk_125_nitro	genpll2		3	BCM_SR_GENPLL2_125_NITRO_CLK
+    clk_chimp		genpll2		4	BCM_SR_GENPLL2_CHIMP_CLK
+    clk_nic_flash	genpll2		5	BCM_SR_GENPLL2_NIC_FLASH
+
+    genpll3		crystal		0	BCM_SR_GENPLL3
+    clk_hsls		genpll3		1	BCM_SR_GENPLL3_HSLS_CLK
+    clk_sdio		genpll3		2	BCM_SR_GENPLL3_SDIO_CLK
+
+    genpll4		crystal		0	BCM_SR_GENPLL4
+    ccn			genpll4		1	BCM_SR_GENPLL4_CCN_CLK
+    clk_tpiu_pll	genpll4		2	BCM_SR_GENPLL4_TPIU_PLL_CLK
+    noc_clk		genpll4		3	BCM_SR_GENPLL4_NOC_CLK
+    clk_chclk_fs4	genpll4		4	BCM_SR_GENPLL4_CHCLK_FS4_CLK
+    clk_bridge_fscpu	genpll4		5	BCM_SR_GENPLL4_BRIDGE_FSCPU_CLK
+
+
+    genpll5		crystal		0	BCM_SR_GENPLL5
+    fs4_hf_clk		genpll5		1	BCM_SR_GENPLL5_FS4_HF_CLK
+    crypto_ae_clk	genpll5		2	BCM_SR_GENPLL5_CRYPTO_AE_CLK
+    raid_ae_clk		genpll5		3	BCM_SR_GENPLL5_RAID_AE_CLK
+
+    genpll6		crystal		0	BCM_SR_GENPLL6
+    48_usb		genpll6		1	BCM_SR_GENPLL6_48_USB_CLK
+
+    lcpll0		crystal		0	BCM_SR_LCPLL0
+    clk_sata_refp 	lcpll0		1	BCM_SR_LCPLL0_SATA_REFP_CLK
+    clk_sata_refn	lcpll0		2	BCM_SR_LCPLL0_SATA_REFN_CLK
+    clk_usb_ref		lcpll0		3	BCM_SR_LCPLL0_USB_REF_CLK
+    sata_refpn		lcpll0		3	BCM_SR_LCPLL0_SATA_REFPN_CLK
+
+    lcpll1		crystal		0	BCM_SR_LCPLL1
+    wan 		lcpll1		1	BCM_SR_LCPLL0_WAN_CLK
+
+    lcpll_pcie		crystal		0	BCM_SR_LCPLL_PCIE
+    pcie_phy_ref 	lcpll1		1	BCM_SR_LCPLL_PCIE_PHY_REF_CLK
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index e9c5a1d..f465647 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -22,7 +22,8 @@
 - #clock-cells : must contain 1
 - #reset-cells : must contain 1
 
-For the PRCM CCUs on H3/A64, one more clock is needed:
+For the PRCM CCUs on H3/A64, two more clocks are needed:
+- "pll-periph": the SoC's peripheral PLL from the main CCU
 - "iosc": the SoC's internal frequency oscillator
 
 Example for generic CCU:
@@ -39,8 +40,8 @@
 r_ccu: clock@01f01400 {
 	compatible = "allwinner,sun50i-a64-r-ccu";
 	reg = <0x01f01400 0x100>;
-	clocks = <&osc24M>, <&osc32k>, <&iosc>;
-	clock-names = "hosc", "losc", "iosc";
+	clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>;
+	clock-names = "hosc", "losc", "iosc", "pll-periph";
 	#clock-cells = <1>;
 	#reset-cells = <1>;
 };
diff --git a/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
index ba0e15a..0c38e4b 100644
--- a/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
+++ b/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
@@ -63,64 +63,64 @@
 	 * because they can not be enabled simultaneously on a
 	 * single SoC.
 	 */
-	opp50@300000000 {
+	opp50-300000000 {
 		opp-hz = /bits/ 64 <300000000>;
 		opp-microvolt = <950000 931000 969000>;
 		opp-supported-hw = <0x06 0x0010>;
 		opp-suspend;
 	};
 
-	opp100@275000000 {
+	opp100-275000000 {
 		opp-hz = /bits/ 64 <275000000>;
 		opp-microvolt = <1100000 1078000 1122000>;
 		opp-supported-hw = <0x01 0x00FF>;
 		opp-suspend;
 	};
 
-	opp100@300000000 {
+	opp100-300000000 {
 		opp-hz = /bits/ 64 <300000000>;
 		opp-microvolt = <1100000 1078000 1122000>;
 		opp-supported-hw = <0x06 0x0020>;
 		opp-suspend;
 	};
 
-	opp100@500000000 {
+	opp100-500000000 {
 		opp-hz = /bits/ 64 <500000000>;
 		opp-microvolt = <1100000 1078000 1122000>;
 		opp-supported-hw = <0x01 0xFFFF>;
 	};
 
-	opp100@600000000 {
+	opp100-600000000 {
 		opp-hz = /bits/ 64 <600000000>;
 		opp-microvolt = <1100000 1078000 1122000>;
 		opp-supported-hw = <0x06 0x0040>;
 	};
 
-	opp120@600000000 {
+	opp120-600000000 {
 		opp-hz = /bits/ 64 <600000000>;
 		opp-microvolt = <1200000 1176000 1224000>;
 		opp-supported-hw = <0x01 0xFFFF>;
 	};
 
-	opp120@720000000 {
+	opp120-720000000 {
 		opp-hz = /bits/ 64 <720000000>;
 		opp-microvolt = <1200000 1176000 1224000>;
 		opp-supported-hw = <0x06 0x0080>;
 	};
 
-	oppturbo@720000000 {
+	oppturbo-720000000 {
 		opp-hz = /bits/ 64 <720000000>;
 		opp-microvolt = <1260000 1234800 1285200>;
 		opp-supported-hw = <0x01 0xFFFF>;
 	};
 
-	oppturbo@800000000 {
+	oppturbo-800000000 {
 		opp-hz = /bits/ 64 <800000000>;
 		opp-microvolt = <1260000 1234800 1285200>;
 		opp-supported-hw = <0x06 0x0100>;
 	};
 
-	oppnitro@1000000000 {
+	oppnitro-1000000000 {
 		opp-hz = /bits/ 64 <1000000000>;
 		opp-microvolt = <1325000 1298500 1351500>;
 		opp-supported-hw = <0x04 0x0200>;
diff --git a/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt b/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
new file mode 100644
index 0000000..a767259
--- /dev/null
+++ b/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
@@ -0,0 +1,24 @@
+Device-tree bindings for gpio-based FSI master driver
+-----------------------------------------------------
+
+Required properties:
+ - compatible = "fsi-master-gpio";
+ - clock-gpios = <gpio-descriptor>;	: GPIO for FSI clock
+ - data-gpios = <gpio-descriptor>;	: GPIO for FSI data signal
+
+Optional properties:
+ - enable-gpios = <gpio-descriptor>;	: GPIO for enable signal
+ - trans-gpios = <gpio-descriptor>;	: GPIO for voltage translator enable
+ - mux-gpios = <gpio-descriptor>;	: GPIO for pin multiplexing with other
+                                          functions (eg, external FSI masters)
+
+Examples:
+
+    fsi-master {
+        compatible = "fsi-master-gpio", "fsi-master";
+        clock-gpios = <&gpio 0>;
+        data-gpios = <&gpio 1>;
+        enable-gpios = <&gpio 2>;
+        trans-gpios = <&gpio 3>;
+        mux-gpios = <&gpio 4>;
+    }
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 42c3bb2..01e331a 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -41,9 +41,9 @@
 Optional properties:
 
 In order to use the GPIO lines in PWM mode, some additional optional
-properties are required. Only Armada 370 and XP support these properties.
+properties are required.
 
-- compatible: Must contain "marvell,armada-370-xp-gpio"
+- compatible: Must contain "marvell,armada-370-gpio"
 
 - reg: an additional register set is needed, for the GPIO Blink
   Counter on/off registers.
@@ -71,7 +71,7 @@
 		};
 
 		gpio1: gpio@18140 {
-			compatible = "marvell,armada-370-xp-gpio";
+			compatible = "marvell,armada-370-gpio";
 			reg = <0x18140 0x40>, <0x181c8 0x08>;
 			reg-names = "gpio", "pwm";
 			ngpios = <17>;
diff --git a/Documentation/devicetree/bindings/gpio/gpio_atmel.txt b/Documentation/devicetree/bindings/gpio/gpio_atmel.txt
index 85f8c0d..29416f9 100644
--- a/Documentation/devicetree/bindings/gpio/gpio_atmel.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio_atmel.txt
@@ -5,9 +5,13 @@
 - reg: Should contain GPIO controller registers location and length
 - interrupts: Should be the port interrupt shared by all the pins.
 - #gpio-cells: Should be two.  The first cell is the pin number and
-  the second cell is used to specify optional parameters (currently
-  unused).
+  the second cell is used to specify optional parameters to declare if the GPIO
+  is active high or low. See gpio.txt.
 - gpio-controller: Marks the device node as a GPIO controller.
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells: Should be two. The first cell is the pin number and the
+  second cell is used to specify irq type flags, see the two cell description
+  in interrupt-controller/interrupts.txt for details.
 
 optional properties:
 - #gpio-lines: Number of gpio if absent 32.
@@ -21,5 +25,7 @@
 		#gpio-cells = <2>;
 		gpio-controller;
 		#gpio-lines = <19>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
 	};
 
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
new file mode 100644
index 0000000..d3b6e1a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
@@ -0,0 +1,86 @@
+ARM Mali Midgard GPU
+====================
+
+Required properties:
+
+- compatible :
+  * Must contain one of the following:
+    + "arm,mali-t604"
+    + "arm,mali-t624"
+    + "arm,mali-t628"
+    + "arm,mali-t720"
+    + "arm,mali-t760"
+    + "arm,mali-t820"
+    + "arm,mali-t830"
+    + "arm,mali-t860"
+    + "arm,mali-t880"
+  * which must be preceded by one of the following vendor specifics:
+    + "amlogic,meson-gxm-mali"
+    + "rockchip,rk3288-mali"
+
+- reg : Physical base address of the device and length of the register area.
+
+- interrupts : Contains the three IRQ lines required by Mali Midgard devices.
+
+- interrupt-names : Contains the names of IRQ resources in the order they were
+  provided in the interrupts property. Must contain: "job", "mmu", "gpu".
+
+
+Optional properties:
+
+- clocks : Phandle to clock for the Mali Midgard device.
+
+- mali-supply : Phandle to regulator for the Mali device. Refer to
+  Documentation/devicetree/bindings/regulator/regulator.txt for details.
+
+- operating-points-v2 : Refer to Documentation/devicetree/bindings/power/opp.txt
+  for details.
+
+
+Example for a Mali-T760:
+
+gpu@ffa30000 {
+	compatible = "rockchip,rk3288-mali", "arm,mali-t760", "arm,mali-midgard";
+	reg = <0xffa30000 0x10000>;
+	interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+	interrupt-names = "job", "mmu", "gpu";
+	clocks = <&cru ACLK_GPU>;
+	mali-supply = <&vdd_gpu>;
+	operating-points-v2 = <&gpu_opp_table>;
+	power-domains = <&power RK3288_PD_GPU>;
+};
+
+gpu_opp_table: opp_table0 {
+	compatible = "operating-points-v2";
+
+	opp@533000000 {
+		opp-hz = /bits/ 64 <533000000>;
+		opp-microvolt = <1250000>;
+	};
+	opp@450000000 {
+		opp-hz = /bits/ 64 <450000000>;
+		opp-microvolt = <1150000>;
+	};
+	opp@400000000 {
+		opp-hz = /bits/ 64 <400000000>;
+		opp-microvolt = <1125000>;
+	};
+	opp@350000000 {
+		opp-hz = /bits/ 64 <350000000>;
+		opp-microvolt = <1075000>;
+	};
+	opp@266000000 {
+		opp-hz = /bits/ 64 <266000000>;
+		opp-microvolt = <1025000>;
+	};
+	opp@160000000 {
+		opp-hz = /bits/ 64 <160000000>;
+		opp-microvolt = <925000>;
+	};
+	opp@100000000 {
+		opp-hz = /bits/ 64 <100000000>;
+		opp-microvolt = <912500>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mt6577.txt b/Documentation/devicetree/bindings/i2c/i2c-mtk.txt
similarity index 77%
rename from Documentation/devicetree/bindings/i2c/i2c-mt6577.txt
rename to Documentation/devicetree/bindings/i2c/i2c-mtk.txt
index 0ce6fa3..bd5a7be 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mt6577.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mtk.txt
@@ -4,11 +4,11 @@
 
 Required properties:
   - compatible: value should be either of the following.
-      (a) "mediatek,mt6577-i2c", for i2c compatible with mt6577 i2c.
-      (b) "mediatek,mt6589-i2c", for i2c compatible with mt6589 i2c.
-      (c) "mediatek,mt8127-i2c", for i2c compatible with mt8127 i2c.
-      (d) "mediatek,mt8135-i2c", for i2c compatible with mt8135 i2c.
-      (e) "mediatek,mt8173-i2c", for i2c compatible with mt8173 i2c.
+      "mediatek,mt2701-i2c", "mediatek,mt6577-i2c": for Mediatek mt2701
+      "mediatek,mt6577-i2c": for i2c compatible with mt6577.
+      "mediatek,mt6589-i2c": for i2c compatible with mt6589.
+      "mediatek,mt7623-i2c", "mediatek,mt6577-i2c": for i2c compatible with mt7623.
+      "mediatek,mt8173-i2c": for i2c compatible with mt8173.
   - reg: physical base address of the controller and dma base, length of memory
     mapped region.
   - interrupts: interrupt number to the cpu.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt
new file mode 100644
index 0000000..2907dab
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt
@@ -0,0 +1,99 @@
+General Purpose I2C Bus Mux
+
+This binding describes an I2C bus multiplexer that uses a mux controller
+from the mux subsystem to route the I2C signals.
+
+                                  .-----.  .-----.
+                                  | dev |  | dev |
+    .------------.                '-----'  '-----'
+    | SoC        |                   |        |
+    |            |          .--------+--------'
+    |   .------. |  .------+    child bus A, on MUX value set to 0
+    |   | I2C  |-|--| Mux  |
+    |   '------' |  '--+---+    child bus B, on MUX value set to 1
+    |   .------. |     |    '----------+--------+--------.
+    |   | MUX- | |     |               |        |        |
+    |   | Ctrl |-|-----+            .-----.  .-----.  .-----.
+    |   '------' |                  | dev |  | dev |  | dev |
+    '------------'                  '-----'  '-----'  '-----'
+
+Required properties:
+- compatible: i2c-mux
+- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
+  port is connected to.
+- mux-controls: The phandle of the mux controller to use for operating the
+  mux.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory. The sub-bus number
+  is also the mux-controller state described in ../mux/mux-controller.txt
+
+Optional properties:
+- mux-locked: If present, explicitly allow unrelated I2C transactions on the
+  parent I2C adapter at these times:
+   + during setup of the multiplexer
+   + between setup of the multiplexer and the child bus I2C transaction
+   + between the child bus I2C transaction and releasing of the multiplexer
+   + during releasing of the multiplexer
+  However, I2C transactions to devices behind all I2C multiplexers connected
+  to the same parent adapter that this multiplexer is connected to are blocked
+  for the full duration of the complete multiplexed I2C transaction (i.e.
+  including the times covered by the above list).
+  If mux-locked is not present, the multiplexer is assumed to be parent-locked.
+  This means that no unrelated I2C transactions are allowed on the parent I2C
+  adapter for the complete multiplexed I2C transaction.
+  The properties of mux-locked and parent-locked multiplexers are discussed
+  in more detail in Documentation/i2c/i2c-topology.
+
+For each i2c child node, an I2C child bus will be created. They will
+be numbered based on their order in the device tree.
+
+Whenever an access is made to a device on a child bus, the value set
+in the relevant node's reg property will be set as the state in the
+mux controller.
+
+Example:
+	mux: mux-controller {
+		compatible = "gpio-mux";
+		#mux-control-cells = <0>;
+
+		mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+			    <&pioA 1 GPIO_ACTIVE_HIGH>;
+	};
+
+	i2c-mux {
+		compatible = "i2c-mux";
+		mux-locked;
+		i2c-parent = <&i2c1>;
+
+		mux-controls = <&mux>;
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		i2c@1 {
+			reg = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			ssd1307: oled@3c {
+				compatible = "solomon,ssd1307fb-i2c";
+				reg = <0x3c>;
+				pwms = <&pwm 4 3000>;
+				reset-gpios = <&gpio2 7 1>;
+				reset-active-low;
+			};
+		};
+
+		i2c@3 {
+			reg = <3>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			pca9555: pca9555@20 {
+				compatible = "nxp,pca9555";
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x20>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
index 0471891..f413e82 100644
--- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
@@ -2,6 +2,8 @@
 
 Required properties:
 - compatible:	depending on the SoC this should be one of:
+			- "amlogic,meson8-saradc" for Meson8
+			- "amlogic,meson8b-saradc" for Meson8b
 			- "amlogic,meson-gxbb-saradc" for GXBB
 			- "amlogic,meson-gxl-saradc" for GXL
 			- "amlogic,meson-gxm-saradc" for GXM
diff --git a/Documentation/devicetree/bindings/iio/adc/renesas,gyroadc.txt b/Documentation/devicetree/bindings/iio/adc/renesas,gyroadc.txt
index f5b0ada..df5b9f2 100644
--- a/Documentation/devicetree/bindings/iio/adc/renesas,gyroadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/renesas,gyroadc.txt
@@ -1,4 +1,4 @@
-* Renesas RCar GyroADC device driver
+* Renesas R-Car GyroADC device driver
 
 The GyroADC block is a reduced SPI block with up to 8 chipselect lines,
 which supports the SPI protocol of a selected few SPI ADCs. The SPI ADCs
@@ -16,8 +16,7 @@
 - clocks:	References to all the clocks specified in the clock-names
 		property as specified in
 		Documentation/devicetree/bindings/clock/clock-bindings.txt.
-- clock-names:	Shall contain "fck" and "if". The "fck" is the GyroADC block
-		clock, the "if" is the interface clock.
+- clock-names:	Shall contain "fck". The "fck" is the GyroADC block clock.
 - power-domains: Must contain a reference to the PM domain, if available.
 - #address-cells: Should be <1> (setting for the subnodes) for all ADCs
 		except for "fujitsu,mb88101a". Should be <0> (setting for
@@ -75,8 +74,8 @@
 	adc@e6e54000 {
 		compatible = "renesas,r8a7791-gyroadc", "renesas,rcar-gyroadc";
 		reg = <0 0xe6e54000 0 64>;
-		clocks = <&mstp9_clks R8A7791_CLK_GYROADC>, <&clk_65m>;
-		clock-names = "fck", "if";
+		clocks = <&mstp9_clks R8A7791_CLK_GYROADC>;
+		clock-names = "fck";
 		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 
 		pinctrl-0 = <&adc_pins>;
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
index e35f9f1..8310073 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -21,11 +21,19 @@
 Contents of a stm32 adc root node:
 -----------------------------------
 Required properties:
-- compatible: Should be "st,stm32f4-adc-core".
+- compatible: Should be one of:
+  "st,stm32f4-adc-core"
+  "st,stm32h7-adc-core"
 - reg: Offset and length of the ADC block register set.
 - interrupts: Must contain the interrupt for ADC block.
-- clocks: Clock for the analog circuitry (common to all ADCs).
-- clock-names: Must be "adc".
+- clocks: Core can use up to two clocks, depending on part used:
+  - "adc" clock: for the analog circuitry, common to all ADCs.
+    It's required on stm32f4.
+    It's optional on stm32h7.
+  - "bus" clock: for registers access, common to all ADCs.
+    It's not present on stm32f4.
+    It's required on stm32h7.
+- clock-names: Must be "adc" and/or "bus" depending on part used.
 - interrupt-controller: Identifies the controller node as interrupt-parent
 - vref-supply: Phandle to the vref input analog reference voltage.
 - #interrupt-cells = <1>;
@@ -42,14 +50,18 @@
 ADC instance available on the machine.
 
 Required properties:
-- compatible: Should be "st,stm32f4-adc".
+- compatible: Should be one of:
+  "st,stm32f4-adc"
+  "st,stm32h7-adc"
 - reg: Offset of ADC instance in ADC block (e.g. may be 0x0, 0x100, 0x200).
-- clocks: Input clock private to this ADC instance.
+- clocks: Input clock private to this ADC instance. It's required only on
+  stm32f4, that has per instance clock input for registers access.
 - interrupt-parent: Phandle to the parent interrupt controller.
 - interrupts: IRQ Line for the ADC (e.g. may be 0 for adc@0, 1 for adc@100 or
   2 for adc@200).
 - st,adc-channels: List of single-ended channels muxed for this ADC.
-  It can have up to 16 channels, numbered from 0 to 15 (resp. for in0..in15).
+  It can have up to 16 channels on stm32f4 or 20 channels on stm32h7, numbered
+  from 0 to 15 or 19 (resp. for in0..in15 or in0..in19).
 - #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
   Documentation/devicetree/bindings/iio/iio-bindings.txt
 
@@ -58,7 +70,9 @@
   See ../../dma/dma.txt for details.
 - dma-names: Must be "rx" when dmas property is being used.
 - assigned-resolution-bits: Resolution (bits) to use for conversions. Must
-  match device available resolutions (e.g. can be 6, 8, 10 or 12 on stm32f4).
+  match device available resolutions:
+  * can be 6, 8, 10 or 12 on stm32f4
+  * can be 8, 10, 12, 14 or 16 on stm32h7
   Default is maximum resolution if unset.
 
 Example:
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc084s021.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc084s021.txt
new file mode 100644
index 0000000..4259e50
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti-adc084s021.txt
@@ -0,0 +1,19 @@
+* Texas Instruments' ADC084S021
+
+Required properties:
+ - compatible        : Must be "ti,adc084s021"
+ - reg               : SPI chip select number for the device
+ - vref-supply       : The regulator supply for ADC reference voltage
+ - spi-cpol          : Per spi-bus bindings
+ - spi-cpha          : Per spi-bus bindings
+ - spi-max-frequency : Per spi-bus bindings
+
+Example:
+adc@0 {
+	compatible = "ti,adc084s021";
+	reg = <0>;
+	vref-supply = <&adc_vref>;
+	spi-cpol;
+	spi-cpha;
+	spi-max-frequency = <16000000>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc108s102.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc108s102.txt
new file mode 100644
index 0000000..bbbbb4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti-adc108s102.txt
@@ -0,0 +1,18 @@
+* Texas Instruments' ADC108S102 and ADC128S102 ADC chip
+
+Required properties:
+ - compatible: Should be "ti,adc108s102"
+ - reg: spi chip select number for the device
+ - vref-supply: The regulator supply for ADC reference voltage
+
+Recommended properties:
+ - spi-max-frequency: Definition as per
+		Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+adc@0 {
+	compatible = "ti,adc108s102";
+	reg = <0>;
+	vref-supply = <&vdd_supply>;
+	spi-max-frequency = <1000000>;
+};
diff --git a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
index 8305fb0..6f28ff5 100644
--- a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+++ b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
@@ -13,7 +13,8 @@
   "data ready" (valid values: 1 or 2).
 - interrupt-parent: should be the phandle for the interrupt controller
 - interrupts: interrupt mapping for IRQ. It should be configured with
-  flags IRQ_TYPE_LEVEL_HIGH or IRQ_TYPE_EDGE_RISING.
+  flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
+  IRQ_TYPE_EDGE_FALLING.
 
   Refer to interrupt-controller/interrupts.txt for generic interrupt
   client node bindings.
diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
new file mode 100644
index 0000000..c827940
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
@@ -0,0 +1,39 @@
+I/O channel multiplexer bindings
+
+If a multiplexer is used to select which hardware signal is fed to
+e.g. an ADC channel, these bindings describe that situation.
+
+Required properties:
+- compatible : "io-channel-mux"
+- io-channels : Channel node of the parent channel that has multiplexed
+		input.
+- io-channel-names : Should be "parent".
+- #address-cells = <1>;
+- #size-cells = <0>;
+- mux-controls : Mux controller node to use for operating the mux
+- channels : List of strings, labeling the mux controller states.
+
+For each non-empty string in the channels property, an io-channel will
+be created. The number of this io-channel is the same as the index into
+the list of strings in the channels property, and also matches the mux
+controller state. The mux controller state is described in
+../mux/mux-controller.txt
+
+Example:
+	mux: mux-controller {
+		compatible = "mux-gpio";
+		#mux-control-cells = <0>;
+
+		mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+			    <&pioA 1 GPIO_ACTIVE_HIGH>;
+	};
+
+	adc-mux {
+		compatible = "io-channel-mux";
+		io-channels = <&adc 0>;
+		io-channel-names = "parent";
+
+		mux-controls = <&mux>;
+
+		channels = "sync", "in", "system-regulator";
+	};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
index 81cd369..4ae553e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
@@ -3,8 +3,11 @@
 
 Required properties:
 
-- compatible : should be "allwinner,sun7i-a20-sc-nmi" or
-  "allwinner,sun6i-a31-sc-nmi" or "allwinner,sun9i-a80-nmi"
+- compatible : should be one of the following:
+  - "allwinner,sun7i-a20-sc-nmi"
+  - "allwinner,sun6i-a31-sc-nmi" (deprecated)
+  - "allwinner,sun6i-a31-r-intc"
+  - "allwinner,sun9i-a80-nmi"
 - reg : Specifies base physical address and size of the registers.
 - interrupt-controller : Identifies the node as an interrupt controller
 - #interrupt-cells : Specifies the number of cells needed to encode an
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt
new file mode 100644
index 0000000..033cc82
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt
@@ -0,0 +1,25 @@
+Device tree configuration for the I2C Interrupt Controller on the AST24XX and
+AST25XX SoCs.
+
+Required Properties:
+- #address-cells	: should be 1
+- #size-cells 		: should be 1
+- #interrupt-cells 	: should be 1
+- compatible 		: should be "aspeed,ast2400-i2c-ic"
+			  or "aspeed,ast2500-i2c-ic"
+- reg			: address start and range of controller
+- interrupts		: interrupt number
+- interrupt-controller	: denotes that the controller receives and fires
+			  new interrupts for child busses
+
+Example:
+
+i2c_ic: interrupt-controller@0 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	#interrupt-cells = <1>;
+	compatible = "aspeed,ast2400-i2c-ic";
+	reg = <0x0 0x40>;
+	interrupts = <12>;
+	interrupt-controller;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt
index 6c6e853..e3fea07 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt
@@ -1,12 +1,13 @@
 Aspeed Vectored Interrupt Controller
 
-These bindings are for the Aspeed AST2400 interrupt controller register layout.
-The SoC has an legacy register layout, but this driver does not support that
-mode of operation.
+These bindings are for the Aspeed interrupt controller. The AST2400 and
+AST2500 SoC families include a legacy register layout before a re-designed
+layout, but the bindings do not prescribe the use of one or the other.
 
 Required properties:
 
-- compatible : should be "aspeed,ast2400-vic".
+- compatible : "aspeed,ast2400-vic"
+               "aspeed,ast2500-vic"
 
 - interrupt-controller : Identifies the node as an interrupt controller
 - #interrupt-cells : Specifies the number of cells needed to encode an
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,gicp.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,gicp.txt
new file mode 100644
index 0000000..64a00ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,gicp.txt
@@ -0,0 +1,27 @@
+Marvell GICP Controller
+-----------------------
+
+GICP is a Marvell extension of the GIC that allows to trigger GIC SPI
+interrupts by doing a memory transaction. It is used by the ICU
+located in the Marvell CP110 to turn wired interrupts inside the CP
+into GIC SPI interrupts.
+
+Required properties:
+
+- compatible: Must be "marvell,ap806-gicp"
+
+- reg: Must be the address and size of the GICP SPI registers
+
+- marvell,spi-ranges: tuples of GIC SPI interrupts ranges available
+  for this GICP
+
+- msi-controller: indicates that this is an MSI controller
+
+Example:
+
+gicp_spi: gicp-spi@3f0040 {
+	compatible = "marvell,ap806-gicp";
+	reg = <0x3f0040 0x10>;
+	marvell,spi-ranges = <64 64>, <288 64>;
+	msi-controller;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
new file mode 100644
index 0000000..aa8bf2e
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
@@ -0,0 +1,51 @@
+Marvell ICU Interrupt Controller
+--------------------------------
+
+The Marvell ICU (Interrupt Consolidation Unit) controller is
+responsible for collecting all wired-interrupt sources in the CP and
+communicating them to the GIC in the AP, the unit translates interrupt
+requests on input wires to MSG memory mapped transactions to the GIC.
+
+Required properties:
+
+- compatible: Should be "marvell,cp110-icu"
+
+- reg: Should contain ICU registers location and length.
+
+- #interrupt-cells: Specifies the number of cells needed to encode an
+  interrupt source. The value shall be 3.
+
+  The 1st cell is the group type of the ICU interrupt. Possible group
+  types are:
+
+   ICU_GRP_NSR (0x0) : Shared peripheral interrupt, non-secure
+   ICU_GRP_SR  (0x1) : Shared peripheral interrupt, secure
+   ICU_GRP_SEI (0x4) : System error interrupt
+   ICU_GRP_REI (0x5) : RAM error interrupt
+
+  The 2nd cell is the index of the interrupt in the ICU unit.
+
+  The 3rd cell is the type of the interrupt. See arm,gic.txt for
+  details.
+
+- interrupt-controller: Identifies the node as an interrupt
+  controller.
+
+- msi-parent: Should point to the GICP controller, the GIC extension
+  that allows to trigger interrupts using MSG memory mapped
+  transactions.
+
+Example:
+
+icu: interrupt-controller@1e0000 {
+	compatible = "marvell,cp110-icu";
+	reg = <0x1e0000 0x10>;
+	#interrupt-cells = <3>;
+	interrupt-controller;
+	msi-parent = <&gicp>;
+};
+
+usb3h0: usb3@500000 {
+	interrupt-parent = <&icu>;
+	interrupts = <ICU_GRP_NSR 106 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
index a89c03b..11cc87a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
@@ -1,21 +1,23 @@
-+Mediatek 65xx/67xx/81xx sysirq
++Mediatek MT65xx/MT67xx/MT81xx sysirq
 
 Mediatek SOCs sysirq support controllable irq inverter for each GIC SPI
 interrupt.
 
 Required properties:
-- compatible: should be one of:
-	"mediatek,mt8173-sysirq"
-	"mediatek,mt8135-sysirq"
-	"mediatek,mt8127-sysirq"
-	"mediatek,mt6795-sysirq"
-	"mediatek,mt6755-sysirq"
-	"mediatek,mt6592-sysirq"
-	"mediatek,mt6589-sysirq"
-	"mediatek,mt6582-sysirq"
-	"mediatek,mt6580-sysirq"
-	"mediatek,mt6577-sysirq"
-	"mediatek,mt2701-sysirq"
+- compatible: should be
+	"mediatek,mt8173-sysirq", "mediatek,mt6577-sysirq": for MT8173
+	"mediatek,mt8135-sysirq", "mediatek,mt6577-sysirq": for MT8135
+	"mediatek,mt8127-sysirq", "mediatek,mt6577-sysirq": for MT8127
+	"mediatek,mt7622-sysirq", "mediatek,mt6577-sysirq": for MT7622
+	"mediatek,mt6795-sysirq", "mediatek,mt6577-sysirq": for MT6795
+	"mediatek,mt6797-sysirq", "mediatek,mt6577-sysirq": for MT6797
+	"mediatek,mt6755-sysirq", "mediatek,mt6577-sysirq": for MT6755
+	"mediatek,mt6592-sysirq", "mediatek,mt6577-sysirq": for MT6592
+	"mediatek,mt6589-sysirq", "mediatek,mt6577-sysirq": for MT6589
+	"mediatek,mt6582-sysirq", "mediatek,mt6577-sysirq": for MT6582
+	"mediatek,mt6580-sysirq", "mediatek,mt6577-sysirq": for MT6580
+	"mediatek,mt6577-sysirq": for MT6577
+	"mediatek,mt2701-sysirq", "mediatek,mt6577-sysirq": for MT2701
 - interrupt-controller : Identifies the node as an interrupt controller
 - #interrupt-cells : Use the same format as specified by GIC in arm,gic.txt.
 - interrupt-parent: phandle of irq parent for sysirq. The parent must
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index 24b6560..1d4afe9 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -1,4 +1,4 @@
-Common leds properties.
+* Common leds properties.
 
 LED and flash LED devices provide the same basic functionality as current
 regulators, but extended with LED and flash LED specific features like
@@ -49,6 +49,22 @@
 - panic-indicator : This property specifies that the LED should be used,
 		    if at all possible, as a panic indicator.
 
+- trigger-sources : List of devices which should be used as a source triggering
+		    this LED activity. Some LEDs can be related to a specific
+		    device and should somehow indicate its state. E.g. USB 2.0
+		    LED may react to device(s) in a USB 2.0 port(s).
+		    Another common example is switch or router with multiple
+		    Ethernet ports each of them having its own LED assigned
+		    (assuming they are not hardwired). In such cases this
+		    property should contain phandle(s) of related source
+		    device(s).
+		    In many cases LED can be related to more than one device
+		    (e.g. one USB LED vs. multiple USB ports). Each source
+		    should be represented by a node in the device tree and be
+		    referenced by a phandle and a set of phandle arguments. A
+		    length of arguments should be specified by the
+		    #trigger-source-cells property in the source node.
+
 Required properties for flash LED child nodes:
 - flash-max-microamp : Maximum flash LED supply current in microamperes.
 - flash-max-timeout-us : Maximum timeout in microseconds after which the flash
@@ -59,7 +75,17 @@
 For controllers that have no configurable timeout the flash-max-timeout-us
 property can be omitted.
 
-Examples:
+* Trigger source providers
+
+Each trigger source should be represented by a device tree node. It may be e.g.
+a USB port or an Ethernet device.
+
+Required properties for trigger source:
+- #trigger-source-cells : Number of cells in a source trigger. Typically 0 for
+			  nodes of simple trigger sources (e.g. a specific USB
+			  port).
+
+* Examples
 
 gpio-leds {
 	compatible = "gpio-leds";
@@ -69,6 +95,11 @@
 		linux,default-trigger = "heartbeat";
 		gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>;
 	};
+
+	usb {
+		gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+		trigger-sources = <&ohci_port1>, <&ehci_port1>;
+	};
 };
 
 max77693-led {
diff --git a/Documentation/devicetree/bindings/mfd/hi6421.txt b/Documentation/devicetree/bindings/mfd/hi6421.txt
index 0d5a446..22da96d 100644
--- a/Documentation/devicetree/bindings/mfd/hi6421.txt
+++ b/Documentation/devicetree/bindings/mfd/hi6421.txt
@@ -1,7 +1,9 @@
 * HI6421 Multi-Functional Device (MFD), by HiSilicon Ltd.
 
 Required parent device properties:
-- compatible	: contains "hisilicon,hi6421-pmic";
+- compatible    : One of the following chip-specific strings:
+	"hisilicon,hi6421-pmic";
+	"hisilicon,hi6421v530-pmic";
 - reg		: register range space of hi6421;
 
 Supported Hi6421 sub-devices include:
diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
index bbd083f..1db6e00 100644
--- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt
+++ b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
@@ -31,7 +31,7 @@
 		compatible = "st,stm32-timers";
 		reg = <0x40010000 0x400>;
 		clocks = <&rcc 0 160>;
-		clock-names = "clk_int";
+		clock-names = "int";
 
 		pwm {
 			compatible = "st,stm32-pwm";
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
index 38833e6..8af1202 100644
--- a/Documentation/devicetree/bindings/mfd/tps65910.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -61,6 +61,10 @@
   There should be 9 entries here, one for each gpio.
 - ti,system-power-controller: Telling whether or not this pmic is controlling
   the system power.
+- ti,sleep-enable: Enable SLEEP state.
+- ti,sleep-keep-therm: Keep thermal monitoring on in sleep state.
+- ti,sleep-keep-ck32k: Keep the 32KHz clock output on in sleep state.
+- ti,sleep-keep-hsclk: Keep high speed internal clock on in sleep state.
 
 Regulator Optional properties:
 - ti,regulator-ext-sleep-control: enable external sleep
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index dedfb02..a2cf5e1 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -7,6 +7,20 @@
 by mmc.txt and the properties used by the sdhci-esdhc driver.
 
 Required properties:
+  - compatible : should be "fsl,esdhc", or "fsl,<chip>-esdhc".
+    Possible compatibles for PowerPC:
+	"fsl,mpc8536-esdhc"
+	"fsl,mpc8378-esdhc"
+	"fsl,p2020-esdhc"
+	"fsl,p4080-esdhc"
+	"fsl,t1040-esdhc"
+	"fsl,t4240-esdhc"
+    Possible compatibles for ARM:
+	"fsl,ls1012a-esdhc"
+	"fsl,ls1088a-esdhc"
+	"fsl,ls1043a-esdhc"
+	"fsl,ls1046a-esdhc"
+	"fsl,ls2080a-esdhc"
   - interrupt-parent : interrupt source phandle.
   - clock-frequency : specifies eSDHC base clock frequency.
 
diff --git a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
index df37058..8af1afc 100644
--- a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
@@ -12,6 +12,7 @@
 Required Properties:
 
 * compatible: should be one of the following.
+  - "hisilicon,hi3660-dw-mshc": for controllers with hi3660 specific extensions.
   - "hisilicon,hi4511-dw-mshc": for controllers with hi4511 specific extensions.
   - "hisilicon,hi6220-dw-mshc": for controllers with hi6220 specific extensions.
 
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index 520d61d..49ed3ad 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -15,6 +15,7 @@
 	- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
 	- "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
 	- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
+	- "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3328
 	- "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
 	- "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
 
@@ -31,6 +32,10 @@
   probing, low speeds or in case where all phases work at tuning time.
   If not specified 0 deg will be used.
 
+* rockchip,desired-num-phases: The desired number of times that the host
+  execute tuning when needed. If not specified, the host will do tuning
+  for 360 times, namely tuning for each degree.
+
 Example:
 
 	rkdwmmc0@12200000 {
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index 74166a0..0e026c1 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -18,7 +18,7 @@
 Optional properties:
 ti,dual-volt: boolean, supports dual voltage cards
 <supply-name>-supply: phandle to the regulator device tree node
-"supply-name" examples are "vmmc", "vmmc_aux" etc
+"supply-name" examples are "vmmc", "vmmc_aux"(deprecated)/"vqmmc" etc
 ti,non-removable: non-removable slot (like eMMC)
 ti,needs-special-reset: Requires a special softreset sequence
 ti,needs-special-hs-handling: HSMMC IP needs special setting for handling High Speed
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index f6bee57..9bb66e47 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -59,8 +59,22 @@
 - reg: should contain 2 register ranges. The first one is pointing to the PMECC
        block, and the second one to the PMECC_ERRLOC block.
 
+* SAMA5 NFC I/O bindings:
+
+SAMA5 SoCs embed an advanced NAND controller logic to automate READ/WRITE page
+operations. This interface to this logic is placed in a separate I/O range and
+should thus have its own DT node.
+
+- compatible: should be "atmel,sama5d3-nfc-io", "syscon".
+- reg: should contain the I/O range used to interact with the NFC logic.
+
 Example:
 
+	nfc_io: nfc-io@70000000 {
+		compatible = "atmel,sama5d3-nfc-io", "syscon";
+		reg = <0x70000000 0x8000000>;
+	};
+
 	pmecc: ecc-engine@ffffc070 {
 		compatible = "atmel,at91sam9g45-pmecc";
                 reg = <0xffffc070 0x490>,
diff --git a/Documentation/devicetree/bindings/mux/adi,adg792a.txt b/Documentation/devicetree/bindings/mux/adi,adg792a.txt
new file mode 100644
index 0000000..96b787a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/adi,adg792a.txt
@@ -0,0 +1,75 @@
+Bindings for Analog Devices ADG792A/G Triple 4:1 Multiplexers
+
+Required properties:
+- compatible : "adi,adg792a" or "adi,adg792g"
+- #mux-control-cells : <0> if parallel (the three muxes are bound together
+  with a single mux controller controlling all three muxes), or <1> if
+  not (one mux controller for each mux).
+* Standard mux-controller bindings as described in mux-controller.txt
+
+Optional properties for ADG792G:
+- gpio-controller : if present, #gpio-cells below is required.
+- #gpio-cells : should be <2>
+			  - First cell is the GPO line number, i.e. 0 or 1
+			  - Second cell is used to specify active high (0)
+			    or active low (1)
+
+Optional properties:
+- idle-state : if present, array of states that the mux controllers will have
+  when idle. The special state MUX_IDLE_AS_IS is the default and
+  MUX_IDLE_DISCONNECT is also supported.
+
+States 0 through 3 correspond to signals A through D in the datasheet.
+
+Example:
+
+	/*
+	 * Three independent mux controllers (of which one is used).
+	 * Mux 0 is disconnected when idle, mux 1 idles in the previously
+	 * selected state and mux 2 idles with signal B.
+	 */
+	&i2c0 {
+		mux: mux-controller@50 {
+			compatible = "adi,adg792a";
+			reg = <0x50>;
+			#mux-control-cells = <1>;
+
+			idle-state = <MUX_IDLE_DISCONNECT MUX_IDLE_AS_IS 1>;
+		};
+	};
+
+	adc-mux {
+		compatible = "io-channel-mux";
+		io-channels = <&adc 0>;
+		io-channel-names = "parent";
+
+		mux-controls = <&mux 2>;
+
+		channels = "sync-1", "", "out";
+	};
+
+
+	/*
+	 * Three parallel muxes with one mux controller, useful e.g. if
+	 * the adc is differential, thus needing two signals to be muxed
+	 * simultaneously for correct operation.
+	 */
+	&i2c0 {
+		pmux: mux-controller@50 {
+			compatible = "adi,adg792a";
+			reg = <0x50>;
+			#mux-control-cells = <0>;
+
+			idle-state = <1>;
+		};
+	};
+
+	diff-adc-mux {
+		compatible = "io-channel-mux";
+		io-channels = <&adc 0>;
+		io-channel-names = "parent";
+
+		mux-controls = <&pmux>;
+
+		channels = "sync-1", "", "out";
+	};
diff --git a/Documentation/devicetree/bindings/mux/gpio-mux.txt b/Documentation/devicetree/bindings/mux/gpio-mux.txt
new file mode 100644
index 0000000..b8f7463
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/gpio-mux.txt
@@ -0,0 +1,69 @@
+GPIO-based multiplexer controller bindings
+
+Define what GPIO pins are used to control a multiplexer. Or several
+multiplexers, if the same pins control more than one multiplexer.
+
+Required properties:
+- compatible : "gpio-mux"
+- mux-gpios : list of gpios used to control the multiplexer, least
+	      significant bit first.
+- #mux-control-cells : <0>
+* Standard mux-controller bindings as decribed in mux-controller.txt
+
+Optional properties:
+- idle-state : if present, the state the mux will have when idle. The
+	       special state MUX_IDLE_AS_IS is the default.
+
+The multiplexer state is defined as the number represented by the
+multiplexer GPIO pins, where the first pin is the least significant
+bit. An active pin is a binary 1, an inactive pin is a binary 0.
+
+Example:
+
+	mux: mux-controller {
+		compatible = "gpio-mux";
+		#mux-control-cells = <0>;
+
+		mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+			    <&pioA 1 GPIO_ACTIVE_HIGH>;
+	};
+
+	adc-mux {
+		compatible = "io-channel-mux";
+		io-channels = <&adc 0>;
+		io-channel-names = "parent";
+
+		mux-controls = <&mux>;
+
+		channels = "sync-1", "in", "out", "sync-2";
+	};
+
+	i2c-mux {
+		compatible = "i2c-mux";
+		i2c-parent = <&i2c1>;
+
+		mux-controls = <&mux>;
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		i2c@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			ssd1307: oled@3c {
+				/* ... */
+			};
+		};
+
+		i2c@3 {
+			reg = <3>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			pca9555: pca9555@20 {
+				/* ... */
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/mux/mmio-mux.txt b/Documentation/devicetree/bindings/mux/mmio-mux.txt
new file mode 100644
index 0000000..a9bfb4d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/mmio-mux.txt
@@ -0,0 +1,60 @@
+MMIO register bitfield-based multiplexer controller bindings
+
+Define register bitfields to be used to control multiplexers. The parent
+device tree node must be a syscon node to provide register access.
+
+Required properties:
+- compatible : "mmio-mux"
+- #mux-control-cells : <1>
+- mux-reg-masks : an array of register offset and pre-shifted bitfield mask
+                  pairs, each describing a single mux control.
+* Standard mux-controller bindings as decribed in mux-controller.txt
+
+Optional properties:
+- idle-states : if present, the state the muxes will have when idle. The
+		special state MUX_IDLE_AS_IS is the default.
+
+The multiplexer state of each multiplexer is defined as the value of the
+bitfield described by the corresponding register offset and bitfield mask pair
+in the mux-reg-masks array, accessed through the parent syscon.
+
+Example:
+
+	syscon {
+		compatible = "syscon";
+
+		mux: mux-controller {
+			compatible = "mmio-mux";
+			#mux-control-cells = <1>;
+
+			mux-reg-masks = <0x3 0x30>, /* 0: reg 0x3, bits 5:4 */
+					<0x3 0x40>, /* 1: reg 0x3, bit 6 */
+			idle-states = <MUX_IDLE_AS_IS>, <0>;
+		};
+	};
+
+	video-mux {
+		compatible = "video-mux";
+		mux-controls = <&mux 0>;
+
+		ports {
+			/* inputs 0..3 */
+			port@0 {
+				reg = <0>;
+			};
+			port@1 {
+				reg = <1>;
+			};
+			port@2 {
+				reg = <2>;
+			};
+			port@3 {
+				reg = <3>;
+			};
+
+			/* output */
+			port@4 {
+				reg = <4>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/mux/mux-controller.txt b/Documentation/devicetree/bindings/mux/mux-controller.txt
new file mode 100644
index 0000000..4f47e4b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/mux-controller.txt
@@ -0,0 +1,157 @@
+Common multiplexer controller bindings
+======================================
+
+A multiplexer (or mux) controller will have one, or several, consumer devices
+that uses the mux controller. Thus, a mux controller can possibly control
+several parallel multiplexers. Presumably there will be at least one
+multiplexer needed by each consumer, but a single mux controller can of course
+control several multiplexers for a single consumer.
+
+A mux controller provides a number of states to its consumers, and the state
+space is a simple zero-based enumeration. I.e. 0-1 for a 2-way multiplexer,
+0-7 for an 8-way multiplexer, etc.
+
+
+Consumers
+---------
+
+Mux controller consumers should specify a list of mux controllers that they
+want to use with a property containing a 'mux-ctrl-list':
+
+	mux-ctrl-list ::= <single-mux-ctrl> [mux-ctrl-list]
+	single-mux-ctrl ::= <mux-ctrl-phandle> [mux-ctrl-specifier]
+	mux-ctrl-phandle : phandle to mux controller node
+	mux-ctrl-specifier : array of #mux-control-cells specifying the
+			     given mux controller (controller specific)
+
+Mux controller properties should be named "mux-controls". The exact meaning of
+each mux controller property must be documented in the device tree binding for
+each consumer. An optional property "mux-control-names" may contain a list of
+strings to label each of the mux controllers listed in the "mux-controls"
+property.
+
+Drivers for devices that use more than a single mux controller can use the
+"mux-control-names" property to map the name of the requested mux controller
+to an index into the list given by the "mux-controls" property.
+
+mux-ctrl-specifier typically encodes the chip-relative mux controller number.
+If the mux controller chip only provides a single mux controller, the
+mux-ctrl-specifier can typically be left out.
+
+Example:
+
+	/* One consumer of a 2-way mux controller (one GPIO-line) */
+	mux: mux-controller {
+		compatible = "gpio-mux";
+		#mux-control-cells = <0>;
+
+		mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>;
+	};
+
+	adc-mux {
+		compatible = "io-channel-mux";
+		io-channels = <&adc 0>;
+		io-channel-names = "parent";
+
+		mux-controls = <&mux>;
+		mux-control-names = "adc";
+
+		channels = "sync", "in";
+	};
+
+Note that in the example above, specifying the "mux-control-names" is redundant
+because there is only one mux controller in the list. However, if the driver
+for the consumer node in fact asks for a named mux controller, that name is of
+course still required.
+
+	/*
+	 * Two consumers (one for an ADC line and one for an i2c bus) of
+	 * parallel 4-way multiplexers controlled by the same two GPIO-lines.
+	 */
+	mux: mux-controller {
+		compatible = "gpio-mux";
+		#mux-control-cells = <0>;
+
+		mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+			    <&pioA 1 GPIO_ACTIVE_HIGH>;
+	};
+
+	adc-mux {
+		compatible = "io-channel-mux";
+		io-channels = <&adc 0>;
+		io-channel-names = "parent";
+
+		mux-controls = <&mux>;
+
+		channels = "sync-1", "in", "out", "sync-2";
+	};
+
+	i2c-mux {
+		compatible = "i2c-mux";
+		i2c-parent = <&i2c1>;
+
+		mux-controls = <&mux>;
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		i2c@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			ssd1307: oled@3c {
+				/* ... */
+			};
+		};
+
+		i2c@3 {
+			reg = <3>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			pca9555: pca9555@20 {
+				/* ... */
+			};
+		};
+	};
+
+
+Mux controller nodes
+--------------------
+
+Mux controller nodes must specify the number of cells used for the
+specifier using the '#mux-control-cells' property.
+
+Optionally, mux controller nodes can also specify the state the mux should
+have when it is idle. The idle-state property is used for this. If the
+idle-state is not present, the mux controller is typically left as is when
+it is idle. For multiplexer chips that expose several mux controllers, the
+idle-state property is an array with one idle state for each mux controller.
+
+The special value (-1) may be used to indicate that the mux should be left
+as is when it is idle. This is the default, but can still be useful for
+mux controller chips with more than one mux controller, particularly when
+there is a need to "step past" a mux controller and set some other idle
+state for a mux controller with a higher index.
+
+Some mux controllers have the ability to disconnect the input/output of the
+multiplexer. Using this disconnected high-impedance state as the idle state
+is indicated with idle state (-2).
+
+These constants are available in
+
+      #include <dt-bindings/mux/mux.h>
+
+as MUX_IDLE_AS_IS (-1) and MUX_IDLE_DISCONNECT (-2).
+
+An example mux controller node look like this (the adg972a chip is a triple
+4-way multiplexer):
+
+	mux: mux-controller@50 {
+		compatible = "adi,adg792a";
+		reg = <0x50>;
+		#mux-control-cells = <1>;
+
+		idle-state = <MUX_IDLE_DISCONNECT MUX_IDLE_AS_IS 2>;
+	};
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index d6c6e41..8ec2ca2 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -34,7 +34,7 @@
       "brcm,bcm6328-switch"
       "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
 
-See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional
+See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
 required and optional properties.
 
 Examples:
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 7ef9dbb..1d4d0f4 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -26,6 +26,10 @@
 - interrupt-controller	: Indicates the switch is itself an interrupt
 			  controller. This is used for the PHY interrupts.
 #interrupt-cells = <2>	: Controller uses two cells, number and flag
+- eeprom-length		: Set to the length of an EEPROM connected to the
+			  switch. Must be set if the switch can not detect
+			  the presence and/or size of a connected EEPROM,
+			  otherwise optional.
 - mdio			: Container of PHY and devices on the switches MDIO
 			  bus.
 - mdio?		: Container of PHYs and devices on the external MDIO
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
index 16c3a950..acfafc8 100644
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ b/Documentation/devicetree/bindings/net/smsc911x.txt
@@ -27,6 +27,7 @@
   of the device. On many systems this is wired high so the device goes
   out of reset at power-on, but if it is under program control, this
   optional GPIO can wake up in response to it.
+- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
 
 Examples:
 
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
index 94aeeea..194926f 100644
--- a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
@@ -4,6 +4,7 @@
 - compatible: Should be one of the following.
   - "rockchip,rk3066a-efuse" - for RK3066a SoCs.
   - "rockchip,rk3188-efuse" - for RK3188 SoCs.
+  - "rockchip,rk322x-efuse" - for RK322x SoCs.
   - "rockchip,rk3288-efuse" - for RK3288 SoCs.
   - "rockchip,rk3399-efuse" - for RK3399 SoCs.
 - reg: Should contain the registers location and exact eFuse size
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
index 6372549..e36d261 100644
--- a/Documentation/devicetree/bindings/opp/opp.txt
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -186,20 +186,20 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <975000 970000 985000>;
 			opp-microamp = <70000>;
 			clock-latency-ns = <300000>;
 			opp-suspend;
 		};
-		opp@1100000000 {
+		opp-1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-microvolt = <1000000 980000 1010000>;
 			opp-microamp = <80000>;
 			clock-latency-ns = <310000>;
 		};
-		opp@1200000000 {
+		opp-1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt = <1025000>;
 			clock-latency-ns = <290000>;
@@ -265,20 +265,20 @@
 		 * independently.
 		 */
 
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <975000 970000 985000>;
 			opp-microamp = <70000>;
 			clock-latency-ns = <300000>;
 			opp-suspend;
 		};
-		opp@1100000000 {
+		opp-1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-microvolt = <1000000 980000 1010000>;
 			opp-microamp = <80000>;
 			clock-latency-ns = <310000>;
 		};
-		opp@1200000000 {
+		opp-1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt = <1025000>;
 			opp-microamp = <90000;
@@ -341,20 +341,20 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <975000 970000 985000>;
 			opp-microamp = <70000>;
 			clock-latency-ns = <300000>;
 			opp-suspend;
 		};
-		opp@1100000000 {
+		opp-1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
 			opp-microvolt = <1000000 980000 1010000>;
 			opp-microamp = <80000>;
 			clock-latency-ns = <310000>;
 		};
-		opp@1200000000 {
+		opp-1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt = <1025000>;
 			opp-microamp = <90000>;
@@ -367,20 +367,20 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@1300000000 {
+		opp-1300000000 {
 			opp-hz = /bits/ 64 <1300000000>;
 			opp-microvolt = <1050000 1045000 1055000>;
 			opp-microamp = <95000>;
 			clock-latency-ns = <400000>;
 			opp-suspend;
 		};
-		opp@1400000000 {
+		opp-1400000000 {
 			opp-hz = /bits/ 64 <1400000000>;
 			opp-microvolt = <1075000>;
 			opp-microamp = <100000>;
 			clock-latency-ns = <400000>;
 		};
-		opp@1500000000 {
+		opp-1500000000 {
 			opp-hz = /bits/ 64 <1500000000>;
 			opp-microvolt = <1100000 1010000 1110000>;
 			opp-microamp = <95000>;
@@ -409,7 +409,7 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <970000>, /* Supply 0 */
 					<960000>, /* Supply 1 */
@@ -422,7 +422,7 @@
 
 		/* OR */
 
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <975000 970000 985000>, /* Supply 0 */
 					<965000 960000 975000>, /* Supply 1 */
@@ -435,7 +435,7 @@
 
 		/* OR */
 
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <975000 970000 985000>, /* Supply 0 */
 					<965000 960000 975000>, /* Supply 1 */
@@ -467,7 +467,7 @@
 		status = "okay";
 		opp-shared;
 
-		opp@600000000 {
+		opp-600000000 {
 			/*
 			 * Supports all substrate and process versions for 0xF
 			 * cuts, i.e. only first four cuts.
@@ -478,7 +478,7 @@
 			...
 		};
 
-		opp@800000000 {
+		opp-800000000 {
 			/*
 			 * Supports:
 			 * - cuts: only one, 6th cut (represented by 6th bit).
@@ -510,7 +510,7 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt-slow = <915000 900000 925000>;
 			opp-microvolt-fast = <975000 970000 985000>;
@@ -518,7 +518,7 @@
 			opp-microamp-fast =  <71000>;
 		};
 
-		opp@1200000000 {
+		opp-1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			opp-microvolt-slow = <915000 900000 925000>, /* Supply vcc0 */
 					      <925000 910000 935000>; /* Supply vcc1 */
diff --git a/Documentation/devicetree/bindings/pci/kirin-pcie.txt b/Documentation/devicetree/bindings/pci/kirin-pcie.txt
new file mode 100644
index 0000000..68ffa0f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/kirin-pcie.txt
@@ -0,0 +1,50 @@
+HiSilicon Kirin SoCs PCIe host DT description
+
+Kirin PCIe host controller is based on Designware PCI core.
+It shares common functions with PCIe Designware core driver
+and inherits common properties defined in
+Documentation/devicetree/bindings/pci/designware-pci.txt.
+
+Additional properties are described here:
+
+Required properties
+- compatible:
+	"hisilicon,kirin960-pcie" for PCIe of Kirin960 SoC
+- reg: Should contain rc_dbi, apb, phy, config registers location and length.
+- reg-names: Must include the following entries:
+  "dbi": controller configuration registers;
+  "apb": apb Ctrl register defined by Kirin;
+  "phy": apb PHY register defined by Kirin;
+  "config": PCIe configuration space registers.
+- reset-gpios: The gpio to generate PCIe perst assert and deassert signal.
+
+Optional properties:
+
+Example based on kirin960:
+
+	pcie@f4000000 {
+		compatible = "hisilicon,kirin-pcie";
+		reg = <0x0 0xf4000000 0x0 0x1000>, <0x0 0xff3fe000 0x0 0x1000>,
+		      <0x0 0xf3f20000 0x0 0x40000>, <0x0 0xF4000000 0 0x2000>;
+		reg-names = "dbi","apb","phy", "config";
+		bus-range = <0x0  0x1>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		device_type = "pci";
+		ranges = <0x02000000 0x0 0x00000000 0x0 0xf5000000 0x0 0x2000000>;
+		num-lanes = <1>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xf800 0 0 7>;
+		interrupt-map = <0x0 0 0 1 &gic 0 0 0  282 4>,
+				<0x0 0 0 2 &gic 0 0 0  283 4>,
+				<0x0 0 0 3 &gic 0 0 0  284 4>,
+				<0x0 0 0 4 &gic 0 0 0  285 4>;
+		clocks = <&crg_ctrl HI3660_PCIEPHY_REF>,
+			 <&crg_ctrl HI3660_CLK_GATE_PCIEAUX>,
+			 <&crg_ctrl HI3660_PCLK_GATE_PCIE_PHY>,
+			 <&crg_ctrl HI3660_PCLK_GATE_PCIE_SYS>,
+			 <&crg_ctrl HI3660_ACLK_GATE_PCIE>;
+		clock-names = "pcie_phy_ref", "pcie_aux",
+			      "pcie_apb_phy", "pcie_apb_sys", "pcie_aclk";
+		reset-gpios = <&gpio11 1 0 >;
+	};
diff --git a/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt b/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt
index 09aeba9..32f0572 100644
--- a/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt
+++ b/Documentation/devicetree/bindings/phy/bcm-ns-usb3-phy.txt
@@ -3,9 +3,10 @@
 Required properties:
 
 - compatible: one of: "brcm,ns-ax-usb3-phy", "brcm,ns-bx-usb3-phy".
-- reg: register mappings for DMP (Device Management Plugin) and ChipCommon B
-       MMI.
-- reg-names: "dmp" and "ccb-mii"
+- reg: address of MDIO bus device
+- usb3-dmp-syscon: phandle to syscon with DMP (Device Management Plugin)
+		   registers
+- #phy-cells: must be 0
 
 Initialization of USB 3.0 PHY depends on Northstar version. There are currently
 three known series: Ax, Bx and Cx.
@@ -15,9 +16,19 @@
 Known C0: BCM47094 rev 0
 
 Example:
-	usb3-phy {
-		compatible = "brcm,ns-ax-usb3-phy";
-		reg = <0x18105000 0x1000>, <0x18003000 0x1000>;
-		reg-names = "dmp", "ccb-mii";
-		#phy-cells = <0>;
+	mdio: mdio@0 {
+		reg = <0x0>;
+		#size-cells = <1>;
+		#address-cells = <0>;
+
+		usb3-phy@10 {
+			compatible = "brcm,ns-ax-usb3-phy";
+			reg = <0x10>;
+			usb3-dmp-syscon = <&usb3_dmp>;
+			#phy-cells = <0>;
+		};
+	};
+
+	usb3_dmp: syscon@18105000 {
+		reg = <0x18105000 0x1000>;
 	};
diff --git a/Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.txt b/Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.txt
new file mode 100644
index 0000000..04f063a
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,ns2-drd-phy.txt
@@ -0,0 +1,30 @@
+BROADCOM NORTHSTAR2 USB2 (DUAL ROLE DEVICE) PHY
+
+Required properties:
+ - compatible: brcm,ns2-drd-phy
+ - reg: offset and length of the NS2 PHY related registers.
+ - reg-names
+   The below registers must be provided.
+   icfg - for DRD ICFG configurations
+   rst-ctrl - for DRD IDM reset
+   crmu-ctrl - for CRMU core vdd, PHY and PHY PLL reset
+   usb2-strap - for port over current polarity reversal
+ - #phy-cells: Must be 0. No args required.
+ - vbus-gpios: vbus gpio binding
+ - id-gpios: id gpio binding
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties
+
+Example:
+	usbdrd_phy: phy@66000960 {
+			#phy-cells = <0>;
+			compatible = "brcm,ns2-drd-phy";
+			reg = <0x66000960 0x24>,
+			      <0x67012800 0x4>,
+			      <0x6501d148 0x4>,
+			      <0x664d0700 0x4>;
+			reg-names = "icfg", "rst-ctrl",
+				    "crmu-ctrl", "usb2-strap";
+			id-gpios = <&gpio_g 30 0>;
+			vbus-gpios = <&gpio_g 31 0>;
+	};
diff --git a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
index 6ccce09..97977cd 100644
--- a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
+++ b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
@@ -7,12 +7,13 @@
      "brcm,iproc-ns2-sata-phy"
      "brcm,iproc-nsp-sata-phy"
      "brcm,phy-sata3"
+     "brcm,iproc-sr-sata-phy"
 - address-cells: should be 1
 - size-cells: should be 0
 - reg: register ranges for the PHY PCB interface
 - reg-names: should be "phy" and "phy-ctrl"
      The "phy-ctrl" registers are only required for
-     "brcm,iproc-ns2-sata-phy".
+     "brcm,iproc-ns2-sata-phy" and "brcm,iproc-sr-sata-phy".
 
 Sub-nodes:
   Each port's PHY should be represented as a sub-node.
@@ -23,8 +24,8 @@
 
 Sub-nodes optional properties:
 - brcm,enable-ssc: use spread spectrum clocking (SSC) on this port
-     This property is not applicable for "brcm,iproc-ns2-sata-phy" and
-     "brcm,iproc-nsp-sata-phy".
+     This property is not applicable for "brcm,iproc-ns2-sata-phy",
+     "brcm,iproc-nsp-sata-phy" and "brcm,iproc-sr-sata-phy".
 
 Example:
 	sata-phy@f0458100 {
diff --git a/Documentation/devicetree/bindings/phy/meson-gxl-usb2-phy.txt b/Documentation/devicetree/bindings/phy/meson-gxl-usb2-phy.txt
new file mode 100644
index 0000000..a105494
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/meson-gxl-usb2-phy.txt
@@ -0,0 +1,17 @@
+* Amlogic Meson GXL and GXM USB2 PHY binding
+
+Required properties:
+- compatible:	Should be "amlogic,meson-gxl-usb2-phy"
+- reg:		The base address and length of the registers
+- #phys-cells:	must be 0 (see phy-bindings.txt in this directory)
+
+Optional properties:
+- phy-supply:	see phy-bindings.txt in this directory
+
+
+Example:
+	usb2_phy0: phy@78000 {
+		compatible = "amlogic,meson-gxl-usb2-phy";
+		#phy-cells = <0>;
+		reg = <0x0 0x78000 0x0 0x20>;
+	};
diff --git a/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt b/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
index 5fa73b9..d81d73a 100644
--- a/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
+++ b/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
@@ -1,7 +1,8 @@
-* Amlogic Meson8b and GXBB USB2 PHY
+* Amlogic Meson8, Meson8b and GXBB USB2 PHY
 
 Required properties:
 - compatible:	Depending on the platform this should be one of:
+	"amlogic,meson8-usb2-phy"
 	"amlogic,meson8b-usb2-phy"
 	"amlogic,meson-gxbb-usb2-phy"
 - reg:		The base address and length of the registers
diff --git a/Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt b/Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
new file mode 100644
index 0000000..2eb9b2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
@@ -0,0 +1,40 @@
+Motorola CPCAP PMIC USB PHY binding
+
+Required properties:
+compatible: Shall be either "motorola,cpcap-usb-phy" or
+	    "motorola,mapphone-cpcap-usb-phy"
+#phy-cells: Shall be 0
+interrupts: CPCAP PMIC interrupts used by the USB PHY
+interrupt-names: Interrupt names
+io-channels: IIO ADC channels used by the USB PHY
+io-channel-names: IIO ADC channel names
+vusb-supply: Regulator for the PHY
+
+Optional properties:
+pinctrl: Optional alternate pin modes for the PHY
+pinctrl-names: Names for optional pin modes
+mode-gpios: Optional GPIOs for configuring alternate modes
+
+Example:
+cpcap_usb2_phy: phy {
+	compatible = "motorola,mapphone-cpcap-usb-phy";
+	pinctrl-0 = <&usb_gpio_mux_sel1 &usb_gpio_mux_sel2>;
+	pinctrl-1 = <&usb_ulpi_pins>;
+	pinctrl-2 = <&usb_utmi_pins>;
+	pinctrl-3 = <&uart3_pins>;
+	pinctrl-names = "default", "ulpi", "utmi", "uart";
+	#phy-cells = <0>;
+	interrupts-extended = <
+		&cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
+		&cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
+		&cpcap 48 1
+	>;
+	interrupt-names =
+		"id_ground", "id_float", "se0conn", "vbusvld",
+		"sessvld", "sessend", "se1", "dm", "dp";
+	mode-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH
+		      &gpio1 0 GPIO_ACTIVE_HIGH>;
+	io-channels = <&cpcap_adc 2>, <&cpcap_adc 7>;
+	io-channel-names = "vbus", "id";
+	vusb-supply = <&vusb>;
+};
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
index e71a8d2..84d59b0 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
@@ -2,6 +2,7 @@
 
 Required properties (phy (parent) node):
  - compatible : should be one of the listed compatibles:
+	* "rockchip,rk3228-usb2phy"
 	* "rockchip,rk3328-usb2phy"
 	* "rockchip,rk3366-usb2phy"
 	* "rockchip,rk3399-usb2phy"
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
new file mode 100644
index 0000000..f94cea4
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
@@ -0,0 +1,46 @@
+* Renesas R-Car generation 3 USB 3.0 PHY
+
+This file provides information on what the device node for the R-Car generation
+3 USB 3.0 PHY contains.
+If you want to enable spread spectrum clock (ssc), you should use USB_EXTAL
+instead of USB3_CLK. However, if you don't want to these features, you don't
+need this driver.
+
+Required properties:
+- compatible: "renesas,r8a7795-usb3-phy" if the device is a part of an R8A7795
+	      SoC.
+	      "renesas,r8a7796-usb3-phy" if the device is a part of an R8A7796
+	      SoC.
+	      "renesas,rcar-gen3-usb3-phy" for a generic R-Car Gen3 compatible
+	      device.
+
+	      When compatible with the generic version, nodes must list the
+	      SoC-specific version corresponding to the platform first
+	      followed by the generic version.
+
+- reg: offset and length of the USB 3.0 PHY register block.
+- clocks: A list of phandles and clock-specifier pairs.
+- clock-names: Name of the clocks.
+  - The funcional clock must be "usb3-if".
+  - The usb3's external clock must be "usb3s_clk".
+  - The usb2's external clock must be "usb_extal". If you want to use the ssc,
+    the clock-frequency must not be 0.
+- #phy-cells: see phy-bindings.txt in the same directory, must be <0>.
+
+Optional properties:
+- renesas,ssc-range: Enable/disable spread spectrum clock (ssc) by using
+		     the following values as u32:
+			- 0 (or the property doesn't exist): disable the ssc
+			- 4980: enable the ssc as -4980 ppm
+			- 4492: enable the ssc as -4492 ppm
+			- 4003: enable the ssc as -4003 ppm
+
+Example (R-Car H3):
+
+	usb-phy@e65ee000 {
+		compatible = "renesas,r8a7795-usb3-phy",
+			     "renesas,rcar-gen3-usb3-phy";
+		reg = <0 0xe65ee000 0 0x90>;
+		clocks = <&cpg CPG_MOD 328>, <&usb3s0_clk>, <&usb_extal>;
+		clock-names = "usb3-if", "usb3s_clk", "usb_extal";
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index 71a3c13..f01d154 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -247,7 +247,6 @@
 bias-pull-up		- pull up the pin
 bias-pull-down		- pull down the pin
 bias-pull-pin-default	- use pin-default pull state
-bi-directional		- pin supports simultaneous input/output operations
 drive-push-pull		- drive actively high and low
 drive-open-drain	- drive with open drain
 drive-open-source	- drive with open source
@@ -260,7 +259,6 @@
 power-source		- select between different power supplies
 low-power-enable	- enable low power mode
 low-power-disable	- disable low power mode
-output-enable		- enable output on pin regardless of output value
 output-low		- set the pin to output mode with low level
 output-high		- set the pin to output mode with high level
 slew-rate		- set the slew rate
diff --git a/Documentation/devicetree/bindings/power/actions,owl-sps.txt b/Documentation/devicetree/bindings/power/actions,owl-sps.txt
new file mode 100644
index 0000000..007b9a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/actions,owl-sps.txt
@@ -0,0 +1,17 @@
+Actions Semi Owl Smart Power System (SPS)
+
+Required properties:
+- compatible          :  "actions,s500-sps" for S500
+- reg                 :  Offset and length of the register set for the device.
+- #power-domain-cells :  Must be 1.
+                         See macros in:
+                          include/dt-bindings/power/owl-s500-powergate.h for S500
+
+
+Example:
+
+		sps: power-controller@b01b0100 {
+			compatible = "actions,s500-sps";
+			reg = <0xb01b0100 0x100>;
+			#power-domain-cells = <1>;
+		};
diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
index d3a5a93..43c21fb 100644
--- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
+++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
@@ -32,6 +32,7 @@
 Required properties:
 - compatible: should be one of:
   - "rockchip,rk3188-io-voltage-domain" for rk3188
+  - "rockchip,rk3228-io-voltage-domain" for rk3228
   - "rockchip,rk3288-io-voltage-domain" for rk3288
   - "rockchip,rk3328-io-voltage-domain" for rk3328
   - "rockchip,rk3368-io-voltage-domain" for rk3368
@@ -59,6 +60,12 @@
 - vccio1-supply: The supply connected to VCCIO1.
                  Sometimes also labeled VCCIO1 and VCCIO2.
 
+Possible supplies for rk3228:
+- vccio1-supply: The supply connected to VCCIO1.
+- vccio2-supply: The supply connected to VCCIO2.
+- vccio3-supply: The supply connected to VCCIO3.
+- vccio4-supply: The supply connected to VCCIO4.
+
 Possible supplies for rk3288:
 - audio-supply:  The supply connected to APIO4_VDD.
 - bb-supply:     The supply connected to APIO5_VDD.
diff --git a/Documentation/devicetree/bindings/power/supply/battery.txt b/Documentation/devicetree/bindings/power/supply/battery.txt
new file mode 100644
index 0000000..f4d3b4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/battery.txt
@@ -0,0 +1,57 @@
+Battery Characteristics
+
+The devicetree battery node provides static battery characteristics.
+In smart batteries, these are typically stored in non-volatile memory
+on a fuel gauge chip. The battery node should be used where there is
+no appropriate non-volatile memory, or it is unprogrammed/incorrect.
+
+Upstream dts files should not include battery nodes, unless the battery
+represented cannot easily be replaced in the system by one of a
+different type. This prevents unpredictable, potentially harmful,
+behavior should a replacement that changes the battery type occur
+without a corresponding update to the dtb.
+
+Required Properties:
+ - compatible: Must be "simple-battery"
+
+Optional Properties:
+ - voltage-min-design-microvolt: drained battery voltage
+ - energy-full-design-microwatt-hours: battery design energy
+ - charge-full-design-microamp-hours: battery design capacity
+ - precharge-current-microamp: current for pre-charge phase
+ - charge-term-current-microamp: current for charge termination phase
+ - constant-charge-current-max-microamp: maximum constant input current
+ - constant-charge-voltage-max-microvolt: maximum constant input voltage
+
+Battery properties are named, where possible, for the corresponding
+elements in enum power_supply_property, defined in
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/linux/power_supply.h
+
+Batteries must be referenced by chargers and/or fuel-gauges
+using a phandle. The phandle's property should be named
+"monitored-battery".
+
+Example:
+
+	bat: battery {
+		compatible = "simple-battery";
+		voltage-min-design-microvolt = <3200000>;
+		energy-full-design-microwatt-hours = <5290000>;
+		charge-full-design-microamp-hours = <1430000>;
+		precharge-current-microamp = <256000>;
+		charge-term-current-microamp = <128000>;
+		constant-charge-current-max-microamp = <900000>;
+		constant-charge-voltage-max-microvolt = <4200000>;
+	};
+
+	charger: charger@11 {
+		....
+		monitored-battery = <&bat>;
+		...
+	};
+
+	fuel_gauge: fuel-gauge@22 {
+		....
+		monitored-battery = <&bat>;
+		...
+	};
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
index b0c95ef..6858e1a 100644
--- a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
@@ -1,7 +1,7 @@
-Binding for TI BQ27XXX fuel gauge family
+TI BQ27XXX fuel gauge family
 
 Required properties:
-- compatible: Should contain one of the following:
+- compatible: contains one of the following:
  * "ti,bq27200" - BQ27200
  * "ti,bq27210" - BQ27210
  * "ti,bq27500" - deprecated, use revision specific property below
@@ -26,11 +26,28 @@
  * "ti,bq27425" - BQ27425
  * "ti,bq27441" - BQ27441
  * "ti,bq27621" - BQ27621
-- reg: integer, i2c address of the device.
+- reg: integer, I2C address of the fuel gauge.
+
+Optional properties:
+- monitored-battery: phandle of battery characteristics node
+    The fuel gauge uses the following battery properties:
+    + energy-full-design-microwatt-hours
+    + charge-full-design-microamp-hours
+    + voltage-min-design-microvolt
+  Both or neither of the *-full-design-*-hours properties must be set.
+  See Documentation/devicetree/bindings/power/supply/battery.txt
 
 Example:
 
-bq27510g3 {
-    compatible = "ti,bq27510g3";
-    reg = <0x55>;
-};
+	bat: battery {
+		compatible = "simple-battery";
+		voltage-min-design-microvolt = <3200000>;
+		energy-full-design-microwatt-hours = <5290000>;
+		charge-full-design-microamp-hours = <1430000>;
+	};
+
+	bq27510g3: fuel-gauge@55 {
+		compatible = "ti,bq27510g3";
+		reg = <0x55>;
+		monitored-battery = <&bat>;
+	};
diff --git a/Documentation/devicetree/bindings/power/supply/cpcap-battery.txt b/Documentation/devicetree/bindings/power/supply/cpcap-battery.txt
new file mode 100644
index 0000000..a04efa2
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/cpcap-battery.txt
@@ -0,0 +1,31 @@
+Motorola CPCAP PMIC battery driver binding
+
+Required properties:
+- compatible: Shall be "motorola,cpcap-battery"
+- interrupts: Interrupt specifier for each name in interrupt-names
+- interrupt-names: Should contain the following entries:
+		   "lowbph", "lowbpl", "chrgcurr1", "battdetb"
+- io-channels: IIO ADC channel specifier for each name in io-channel-names
+- io-channel-names: Should contain the following entries:
+		    "battdetb", "battp", "chg_isense", "batti"
+- power-supplies: List of phandles for power-supplying devices, as
+		  described in power_supply.txt. Typically a reference
+		  to cpcap_charger.
+
+Example:
+
+cpcap_battery: battery {
+	compatible = "motorola,cpcap-battery";
+	interrupts-extended = <
+		&cpcap 5 0 &cpcap 3 0
+		&cpcap 20 0 &cpcap 54 0
+	>;
+	interrupt-names =
+		"lowbph", "lowbpl",
+		"chrgcurr1", "battdetb";
+	io-channels = <&cpcap_adc 0 &cpcap_adc 1
+		       &cpcap_adc 5 &cpcap_adc 6>;
+	io-channel-names = "battdetb", "battp",
+			   "chg_isense", "batti";
+	power-supplies = <&cpcap_charger>;
+};
diff --git a/Documentation/devicetree/bindings/power/supply/ltc3651-charger.txt b/Documentation/devicetree/bindings/power/supply/ltc3651-charger.txt
new file mode 100644
index 0000000..71f2840
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/ltc3651-charger.txt
@@ -0,0 +1,27 @@
+ltc3651-charger
+
+Required properties:
+ - compatible: "lltc,ltc3651-charger"
+ - lltc,acpr-gpios: Connect to ACPR output. See remark below.
+
+Optional properties:
+ - lltc,fault-gpios: Connect to FAULT output. See remark below.
+ - lltc,chrg-gpios: Connect to CHRG output. See remark below.
+
+The ltc3651 outputs are open-drain type and active low. The driver assumes the
+GPIO reports "active" when the output is asserted, so if the pins have been
+connected directly, the GPIO flags should be set to active low also.
+
+The driver will attempt to aquire interrupts for all GPIOs to detect changes in
+line state. If the system is not capabale of providing interrupts, the driver
+cannot report changes and userspace will need to periodically read the sysfs
+attributes to detect changes.
+
+Example:
+
+	charger: battery-charger {
+		compatible = "lltc,ltc3651-charger";
+		lltc,acpr-gpios = <&gpio0 68 GPIO_ACTIVE_LOW>;
+		lltc,fault-gpios = <&gpio0 64 GPIO_ACTIVE_LOW>;
+		lltc,chrg-gpios = <&gpio0 63 GPIO_ACTIVE_LOW>;
+	};
diff --git a/Documentation/devicetree/bindings/power/max8903-charger.txt b/Documentation/devicetree/bindings/power/supply/max8903-charger.txt
similarity index 100%
rename from Documentation/devicetree/bindings/power/max8903-charger.txt
rename to Documentation/devicetree/bindings/power/supply/max8903-charger.txt
diff --git a/Documentation/devicetree/bindings/power_supply/maxim,max14656.txt b/Documentation/devicetree/bindings/power/supply/maxim,max14656.txt
similarity index 100%
rename from Documentation/devicetree/bindings/power_supply/maxim,max14656.txt
rename to Documentation/devicetree/bindings/power/supply/maxim,max14656.txt
diff --git a/Documentation/devicetree/bindings/property-units.txt b/Documentation/devicetree/bindings/property-units.txt
index 12278d7..0849618 100644
--- a/Documentation/devicetree/bindings/property-units.txt
+++ b/Documentation/devicetree/bindings/property-units.txt
@@ -25,8 +25,10 @@
 Electricity
 ----------------------------------------
 -microamp	: micro amps
+-microamp-hours : micro amp-hours
 -ohms		: Ohms
 -micro-ohms	: micro Ohms
+-microwatt-hours: micro Watt-hours
 -microvolt	: micro volts
 
 Temperature
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index d18edb0..378f6dc 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -24,6 +24,14 @@
 - regulator-settling-time-us: Settling time, in microseconds, for voltage
   change if regulator have the constant time for any level voltage change.
   This is useful when regulator have exponential voltage change.
+- regulator-settling-time-up-us: Settling time, in microseconds, for voltage
+  increase if the regulator needs a constant time to settle after voltage
+  increases of any level. This is useful for regulators with exponential
+  voltage changes.
+- regulator-settling-time-down-us: Settling time, in microseconds, for voltage
+  decrease if the regulator needs a constant time to settle after voltage
+  decreases of any level. This is useful for regulators with exponential
+  voltage changes.
 - regulator-soft-start: Enable soft start so that voltage ramps slowly
 - regulator-state-mem sub-root node for Suspend-to-RAM mode
   : suspend to memory, the device goes to sleep, but all data stored in memory,
diff --git a/Documentation/devicetree/bindings/reset/ti,sci-reset.txt b/Documentation/devicetree/bindings/reset/ti,sci-reset.txt
new file mode 100644
index 0000000..8b1cf02
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/ti,sci-reset.txt
@@ -0,0 +1,62 @@
+Texas Instruments System Control Interface (TI-SCI) Reset Controller
+=====================================================================
+
+Some TI SoCs contain a system controller (like the Power Management Micro
+Controller (PMMC) on Keystone 66AK2G SoC) that are responsible for controlling
+the state of the various hardware modules present on the SoC. Communication
+between the host processor running an OS and the system controller happens
+through a protocol called TI System Control Interface (TI-SCI protocol).
+For TI SCI details, please refer to the document,
+Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+
+TI-SCI Reset Controller Node
+============================
+This reset controller node uses the TI SCI protocol to perform the reset
+management of various hardware modules present on the SoC. Must be a child
+node of the associated TI-SCI system controller node.
+
+Required properties:
+--------------------
+ - compatible	: Should be "ti,sci-reset"
+ - #reset-cells	: Should be 2. Please see the reset consumer node below for
+		  usage details.
+
+TI-SCI Reset Consumer Nodes
+===========================
+Each of the reset consumer nodes should have the following properties,
+in addition to their own properties.
+
+Required properties:
+--------------------
+ - resets	: A phandle and reset specifier pair, one pair for each reset
+		  signal that affects the device, or that the device manages.
+		  The phandle should point to the TI-SCI reset controller node,
+		  and the reset specifier should have 2 cell-values. The first
+		  cell should contain the device ID. The second cell should
+		  contain the reset mask value used by system controller.
+		  Please refer to the protocol documentation for these values
+		  to be used for different devices,
+		  http://processors.wiki.ti.com/index.php/TISCI#66AK2G02_Data
+
+Please also refer to Documentation/devicetree/bindings/reset/reset.txt for
+common reset controller usage by consumers.
+
+Example:
+--------
+The following example demonstrates both a TI-SCI reset controller node and a
+consumer (a DSP device) on the 66AK2G SoC.
+
+pmmc: pmmc {
+	compatible = "ti,k2g-sci";
+
+	k2g_reset: reset-controller {
+		compatible = "ti,sci-reset";
+		#reset-cells = <2>;
+	};
+};
+
+dsp0: dsp@10800000 {
+	...
+	resets = <&k2g_reset 0x0046 0x1>;
+	...
+};
diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
index 10276a4..419ff6c 100644
--- a/Documentation/devicetree/bindings/serial/8250.txt
+++ b/Documentation/devicetree/bindings/serial/8250.txt
@@ -20,6 +20,8 @@
 	- "fsl,16550-FIFO64"
 	- "fsl,ns16550"
 	- "ti,da830-uart"
+	- "aspeed,ast2400-vuart"
+	- "aspeed,ast2500-vuart"
 	- "serial" if the port type is unknown.
 - reg : offset and length of the register set for the device.
 - interrupts : should contain uart interrupt.
@@ -45,6 +47,7 @@
   property.
 - tx-threshold: Specify the TX FIFO low water indication for parts with
   programmable TX FIFO thresholds.
+- resets : phandle + reset specifier pairs
 
 Note:
 * fsl,ns16550:
diff --git a/Documentation/devicetree/bindings/serial/actions,owl-uart.txt b/Documentation/devicetree/bindings/serial/actions,owl-uart.txt
new file mode 100644
index 0000000..aa873ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/actions,owl-uart.txt
@@ -0,0 +1,16 @@
+Actions Semi Owl UART
+
+Required properties:
+- compatible :  "actions,s500-uart", "actions,owl-uart" for S500
+                "actions,s900-uart", "actions,owl-uart" for S900
+- reg        :  Offset and length of the register set for the device.
+- interrupts :  Should contain UART interrupt.
+
+
+Example:
+
+		uart3: serial@b0126000 {
+			compatible = "actions,s500-uart", "actions,owl-uart";
+			reg = <0xb0126000 0x1000>;
+			interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+		};
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
new file mode 100644
index 0000000..8ff65fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
@@ -0,0 +1,38 @@
+Amlogic Meson SoC UART Serial Interface
+=======================================
+
+The Amlogic Meson SoC UART Serial Interface is present on a large range
+of SoCs, and can be present either in the "Always-On" power domain or the
+"Everything-Else" power domain.
+
+The particularity of the "Always-On" Serial Interface is that the hardware
+is active since power-on and does not need any clock gating and is usable
+as very early serial console.
+
+Required properties:
+- compatible : compatible: value should be different for each SoC family as :
+	- Meson6 : "amlogic,meson6-uart"
+	- Meson8 : "amlogic,meson8-uart"
+	- Meson8b : "amlogic,meson8b-uart"
+	- GX (GXBB, GXL, GXM) : "amlogic,meson-gx-uart"
+	eventually followed by : "amlogic,meson-ao-uart" if this UART interface
+	is in the "Always-On" power domain.
+- reg : offset and length of the register set for the device.
+- interrupts : identifier to the device interrupt
+- clocks : a list of phandle + clock-specifier pairs, one for each
+	   entry in clock names.
+- clocks-names :
+   * "xtal" for external xtal clock identifier
+   * "pclk" for the bus core clock, either the clk81 clock or the gate clock
+   * "baud" for the source of the baudrate generator, can be either the xtal
+	or the pclk.
+
+e.g.
+uart_A: serial@84c0 {
+	compatible = "amlogic,meson-gx-uart";
+	reg = <0x0 0x84c0 0x0 0x14>;
+	interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
+	/* Use xtal as baud rate clock source */
+	clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>;
+	clock-names = "xtal", "pclk", "baud";
+};
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index 574c3a2..e6b5724 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -9,6 +9,7 @@
 - fsl,irda-mode : Indicate the uart supports irda mode
 - fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
                   in DCE mode by default.
+- fsl,dma-size : Indicate the size of the DMA buffer and its periods
 
 Please check Documentation/devicetree/bindings/serial/serial.txt
 for the complete list of generic properties.
@@ -28,4 +29,5 @@
 	interrupts = <31>;
 	uart-has-rtscts;
 	fsl,dte-mode;
+	fsl,dma-size = <1024 4>;
 };
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
index c95005e..a1252a0 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
@@ -6,6 +6,8 @@
     on Vybrid vf610 SoC with 8-bit register organization
   - "fsl,ls1021a-lpuart" for lpuart compatible with the one integrated
     on LS1021A SoC with 32-bit big-endian register organization
+  - "fsl,imx7ulp-lpuart" for lpuart compatible with the one integrated
+    on i.MX7ULP SoC with 32-bit little-endian register organization
 - reg : Address and length of the register set for the device
 - interrupts : Should contain uart interrupt
 - clocks : phandle + clock specifier pairs, one for each entry in clock-names
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index 0015c72..b6cf384 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -8,6 +8,8 @@
   * "mediatek,mt6589-uart" for MT6589 compatible UARTS
   * "mediatek,mt6755-uart" for MT6755 compatible UARTS
   * "mediatek,mt6795-uart" for MT6795 compatible UARTS
+  * "mediatek,mt6797-uart" for MT6797 compatible UARTS
+  * "mediatek,mt7622-uart" for MT7622 compatible UARTS
   * "mediatek,mt7623-uart" for MT7623 compatible UARTS
   * "mediatek,mt8127-uart" for MT8127 compatible UARTS
   * "mediatek,mt8135-uart" for MT8135 compatible UARTS
diff --git a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
index 16fe94d..b1d165b 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
@@ -9,11 +9,14 @@
 
 The driver implements the Generic PM domain bindings described in
 power/power_domain.txt. It provides the power domains defined in
-include/dt-bindings/power/mt8173-power.h and mt2701-power.h.
+- include/dt-bindings/power/mt8173-power.h
+- include/dt-bindings/power/mt6797-power.h
+- include/dt-bindings/power/mt2701-power.h
 
 Required properties:
 - compatible: Should be one of:
 	- "mediatek,mt2701-scpsys"
+	- "mediatek,mt6797-scpsys"
 	- "mediatek,mt8173-scpsys"
 - #power-domain-cells: Must be 1
 - reg: Address range of the SCPSYS unit
@@ -22,6 +25,7 @@
                       These are clocks which hardware needs to be
                       enabled before enabling certain power domains.
 	Required clocks for MT2701: "mm", "mfg", "ethif"
+	Required clocks for MT6797: "mm", "mfg", "vdec"
 	Required clocks for MT8173: "mm", "mfg", "venc", "venc_lt"
 
 Optional properties:
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index dc97506..64ee489 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -38,6 +38,8 @@
 			 specifiers, one for transmission, and one for
 			 reception.
 - dma-names            : Must contain a list of two DMA names, "tx" and "rx".
+- spi-slave            : Empty property indicating the SPI controller is used
+			 in slave mode.
 - renesas,dtdl         : delay sync signal (setup) in transmit mode.
 			 Must contain one of the following values:
 			 0   (no bit delay)
diff --git a/Documentation/devicetree/bindings/spi/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt
index 4b1d6e7..1f6e86f 100644
--- a/Documentation/devicetree/bindings/spi/spi-bus.txt
+++ b/Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -1,17 +1,23 @@
 SPI (Serial Peripheral Interface) busses
 
-SPI busses can be described with a node for the SPI master device
-and a set of child nodes for each SPI slave on the bus.  For this
-discussion, it is assumed that the system's SPI controller is in
-SPI master mode.  This binding does not describe SPI controllers
-in slave mode.
+SPI busses can be described with a node for the SPI controller device
+and a set of child nodes for each SPI slave on the bus.  The system's SPI
+controller may be described for use in SPI master mode or in SPI slave mode,
+but not for both at the same time.
 
-The SPI master node requires the following properties:
+The SPI controller node requires the following properties:
+- compatible      - Name of SPI bus controller following generic names
+		    recommended practice.
+
+In master mode, the SPI controller node requires the following additional
+properties:
 - #address-cells  - number of cells required to define a chip select
 		address on the SPI bus.
 - #size-cells     - should be zero.
-- compatible      - name of SPI bus controller following generic names
-		recommended practice.
+
+In slave mode, the SPI controller node requires one additional property:
+- spi-slave       - Empty property.
+
 No other properties are required in the SPI bus node.  It is assumed
 that a driver for an SPI bus device will understand that it is an SPI bus.
 However, the binding does not attempt to define the specific method for
@@ -21,7 +27,7 @@
 chip selects.  Individual drivers can define additional properties to
 support describing the chip select layout.
 
-Optional properties:
+Optional properties (master mode only):
 - cs-gpios	  - gpios chip select.
 - num-cs	  - total number of chipselects.
 
@@ -41,28 +47,36 @@
 cs2 : &gpio1 1 0
 cs3 : &gpio1 2 0
 
-SPI slave nodes must be children of the SPI master node and can
-contain the following properties.
-- reg             - (required) chip select address of device.
-- compatible      - (required) name of SPI device following generic names
-		recommended practice.
-- spi-max-frequency - (required) Maximum SPI clocking speed of device in Hz.
-- spi-cpol        - (optional) Empty property indicating device requires
-		inverse clock polarity (CPOL) mode.
-- spi-cpha        - (optional) Empty property indicating device requires
-		shifted clock phase (CPHA) mode.
-- spi-cs-high     - (optional) Empty property indicating device requires
-		chip select active high.
-- spi-3wire       - (optional) Empty property indicating device requires
-		3-wire mode.
-- spi-lsb-first   - (optional) Empty property indicating device requires
-		LSB first mode.
-- spi-tx-bus-width - (optional) The bus width (number of data wires) that is
-                      used for MOSI. Defaults to 1 if not present.
-- spi-rx-bus-width - (optional) The bus width (number of data wires) that is
-                      used for MISO. Defaults to 1 if not present.
-- spi-rx-delay-us  - (optional) Microsecond delay after a read transfer.
-- spi-tx-delay-us  - (optional) Microsecond delay after a write transfer.
+
+SPI slave nodes must be children of the SPI controller node.
+
+In master mode, one or more slave nodes (up to the number of chip selects) can
+be present.  Required properties are:
+- compatible      - Name of SPI device following generic names recommended
+		    practice.
+- reg             - Chip select address of device.
+- spi-max-frequency - Maximum SPI clocking speed of device in Hz.
+
+In slave mode, the (single) slave node is optional.
+If present, it must be called "slave".  Required properties are:
+- compatible      - Name of SPI device following generic names recommended
+		    practice.
+
+All slave nodes can contain the following optional properties:
+- spi-cpol        - Empty property indicating device requires inverse clock
+		    polarity (CPOL) mode.
+- spi-cpha        - Empty property indicating device requires shifted clock
+		    phase (CPHA) mode.
+- spi-cs-high     - Empty property indicating device requires chip select
+		    active high.
+- spi-3wire       - Empty property indicating device requires 3-wire mode.
+- spi-lsb-first   - Empty property indicating device requires LSB first mode.
+- spi-tx-bus-width - The bus width (number of data wires) that is used for MOSI.
+		    Defaults to 1 if not present.
+- spi-rx-bus-width - The bus width (number of data wires) that is used for MISO.
+		    Defaults to 1 if not present.
+- spi-rx-delay-us - Microsecond delay after a read transfer.
+- spi-tx-delay-us - Microsecond delay after a write transfer.
 
 Some SPI controllers and devices support Dual and Quad SPI transfer mode.
 It allows data in the SPI system to be transferred using 2 wires (DUAL) or 4
diff --git a/Documentation/devicetree/bindings/spi/spi-meson.txt b/Documentation/devicetree/bindings/spi/spi-meson.txt
index dc6d031..825c39c 100644
--- a/Documentation/devicetree/bindings/spi/spi-meson.txt
+++ b/Documentation/devicetree/bindings/spi/spi-meson.txt
@@ -20,3 +20,34 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 	};
+
+* SPICC (SPI Communication Controller)
+
+The Meson SPICC is generic SPI controller for general purpose Full-Duplex
+communications with dedicated 16 words RX/TX PIO FIFOs.
+
+Required properties:
+ - compatible: should be "amlogic,meson-gx-spicc" on Amlogic GX SoCs.
+ - reg: physical base address and length of the controller registers
+ - interrupts: The interrupt specifier
+ - clock-names: Must contain "core"
+ - clocks: phandle of the input clock for the baud rate generator
+ - #address-cells: should be 1
+ - #size-cells: should be 0
+
+Optional properties:
+ - resets: phandle of the internal reset line
+
+See ../spi/spi-bus.txt for more details on SPI bus master and slave devices
+required and optional properties.
+
+Example :
+	spi@c1108d80 {
+		compatible = "amlogic,meson-gx-spicc";
+		reg = <0xc1108d80 0x80>;
+		interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+		clock-names = "core";
+		clocks = <&clk81>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
index e43f4cf..e0318cf 100644
--- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -3,7 +3,9 @@
 Required properties:
 - compatible: should be one of the following.
     - mediatek,mt2701-spi: for mt2701 platforms
+    - mediatek,mt2712-spi: for mt2712 platforms
     - mediatek,mt6589-spi: for mt6589 platforms
+    - mediatek,mt7622-spi: for mt7622 platforms
     - mediatek,mt8135-spi: for mt8135 platforms
     - mediatek,mt8173-spi: for mt8173 platforms
 
diff --git a/Documentation/devicetree/bindings/spi/spi-stm32.txt b/Documentation/devicetree/bindings/spi/spi-stm32.txt
new file mode 100644
index 0000000..1b3fa2c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-stm32.txt
@@ -0,0 +1,59 @@
+STMicroelectronics STM32 SPI Controller
+
+The STM32 SPI controller is used to communicate with external devices using
+the Serial Peripheral Interface. It supports full-duplex, half-duplex and
+simplex synchronous serial communication with external devices. It supports
+from 4 to 32-bit data size. Although it can be configured as master or slave,
+only master is supported by the driver.
+
+Required properties:
+- compatible: Must be "st,stm32h7-spi".
+- reg: Offset and length of the device's register set.
+- interrupts: Must contain the interrupt id.
+- clocks: Must contain an entry for spiclk (which feeds the internal clock
+	  generator).
+- #address-cells:  Number of cells required to define a chip select address.
+- #size-cells: Should be zero.
+
+Optional properties:
+- resets: Must contain the phandle to the reset controller.
+- A pinctrl state named "default" may be defined to set pins in mode of
+  operation for SPI transfer.
+- dmas: DMA specifiers for tx and rx dma. DMA fifo mode must be used. See the
+  STM32 DMA bindings, Documentation/devicetree/bindings/dma/stm32-dma.txt.
+- dma-names: DMA request names should include "tx" and "rx" if present.
+- cs-gpios: list of GPIO chip selects. See the SPI bus bindings,
+  Documentation/devicetree/bindings/spi/spi-bus.txt
+
+
+Child nodes represent devices on the SPI bus
+  See ../spi/spi-bus.txt
+
+Optional properties:
+- st,spi-midi-ns: (Master Inter-Data Idleness) minimum time delay in
+		  nanoseconds inserted between two consecutive data frames.
+
+
+Example:
+	spi2: spi@40003800 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "st,stm32h7-spi";
+		reg = <0x40003800 0x400>;
+		interrupts = <36>;
+		clocks = <&rcc SPI2_CK>;
+		resets = <&rcc 1166>;
+		dmas = <&dmamux1 0 39 0x400 0x01>,
+		       <&dmamux1 1 40 0x400 0x01>;
+		dma-names = "rx", "tx";
+		pinctrl-0 = <&spi2_pins_b>;
+		pinctrl-names = "default";
+		cs-gpios = <&gpioa 11 0>;
+
+		aardvark@0 {
+			compatible = "totalphase,aardvark";
+			reg = <0>;
+			spi-max-frequency = <4000000>;
+			st,spi-midi-ns = <4000>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/timer/actions,owl-timer.txt b/Documentation/devicetree/bindings/timer/actions,owl-timer.txt
new file mode 100644
index 0000000..e3c28da
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/actions,owl-timer.txt
@@ -0,0 +1,20 @@
+Actions Semi Owl Timer
+
+Required properties:
+- compatible      :  "actions,s500-timer" for S500
+                     "actions,s900-timer" for S900
+- reg             :  Offset and length of the register set for the device.
+- interrupts      :  Should contain the interrupts.
+- interrupt-names :  Valid names are: "2hz0", "2hz1",
+                                      "timer0", "timer1", "timer2", "timer3"
+                     See ../resource-names.txt
+
+Example:
+
+		timer@b0168000 {
+			compatible = "actions,s500-timer";
+			reg = <0xb0168000 0x100>;
+			interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+			             <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "timer0", "timer1";
+		};
diff --git a/Documentation/devicetree/bindings/timer/faraday,fttmr010.txt b/Documentation/devicetree/bindings/timer/faraday,fttmr010.txt
index b73ca6c..1957922 100644
--- a/Documentation/devicetree/bindings/timer/faraday,fttmr010.txt
+++ b/Documentation/devicetree/bindings/timer/faraday,fttmr010.txt
@@ -7,7 +7,11 @@
 
 - compatible : Must be one of
   "faraday,fttmr010"
-  "cortina,gemini-timer"
+  "cortina,gemini-timer", "faraday,fttmr010"
+  "moxa,moxart-timer", "faraday,fttmr010"
+  "aspeed,ast2400-timer"
+  "aspeed,ast2500-timer"
+
 - reg : Should contain registers location and length
 - interrupts : Should contain the three timer interrupts usually with
   flags for falling edge
diff --git a/Documentation/devicetree/bindings/timer/moxa,moxart-timer.txt b/Documentation/devicetree/bindings/timer/moxa,moxart-timer.txt
deleted file mode 100644
index e207c11..0000000
--- a/Documentation/devicetree/bindings/timer/moxa,moxart-timer.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-MOXA ART timer
-
-Required properties:
-
-- compatible : Must be one of:
- 	- "moxa,moxart-timer"
- 	- "aspeed,ast2400-timer"
-- reg : Should contain registers location and length
-- interrupts : Should contain the timer interrupt number
-- clocks : Should contain phandle for the clock that drives the counter
-
-Example:
-
-	timer: timer@98400000 {
-		compatible = "moxa,moxart-timer";
-		reg = <0x98400000 0x42>;
-		interrupts = <19 1>;
-		clocks = <&coreclk>;
-	};
diff --git a/Documentation/devicetree/bindings/trivial-devices.txt b/Documentation/devicetree/bindings/trivial-devices.txt
index 3e0a34c..35f406d 100644
--- a/Documentation/devicetree/bindings/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/trivial-devices.txt
@@ -55,6 +55,7 @@
 infineon,slb9635tt	Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
 infineon,slb9645tt	Infineon SLB9645 I2C TPM (new protocol, max 400khz)
 isil,isl29028		Intersil ISL29028 Ambient Light and Proximity Sensor
+isil,isl29030		Intersil ISL29030 Ambient Light and Proximity Sensor
 maxim,ds1050		5 Bit Programmable, Pulse-Width Modulator
 maxim,max1237		Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
 maxim,max6625		9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 00bea03..fcf199b 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -10,6 +10,7 @@
   - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
   - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
   - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
+  - "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs;
   - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
   - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
   - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index f658f39..52fb410 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -45,6 +45,8 @@
 			a free-running PHY clock.
  - snps,dis-del-phy-power-chg-quirk: when set core will change PHY power
 			from P0 to P1/P2/P3 without delay.
+ - snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check
+			during HS transmit.
  - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal
 			utmi_l1_suspend_n, false when asserts utmi_sleep_n
  - snps,hird-threshold: HIRD threshold
diff --git a/Documentation/devicetree/bindings/usb/iproc-udc.txt b/Documentation/devicetree/bindings/usb/iproc-udc.txt
new file mode 100644
index 0000000..272d7fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/iproc-udc.txt
@@ -0,0 +1,21 @@
+Broadcom IPROC USB Device controller.
+
+The device node is used for UDCs integrated into Broadcom's
+iProc family (Northstar2, Cygnus) of SoCs'. The UDC is based
+on Synopsys Designware Cores AHB Subsystem Device Controller
+IP.
+
+Required properties:
+ - compatible: Add the compatibility strings for supported platforms.
+   For Broadcom NS2 platform, add "brcm,ns2-udc","brcm,iproc-udc".
+   For Broadcom Cygnus platform, add "brcm,cygnus-udc", "brcm,iproc-udc".
+ - reg: Offset and length of UDC register set
+ - interrupts: description of interrupt line
+ - phys: phandle to phy node.
+
+Example:
+	udc_dwc: usb@664e0000 {
+		compatible = "brcm,ns2-udc", "brcm,iproc-udc";
+		reg = <0x664e0000 0x2000>;
+		interrupts = <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>;
+		phys = <&usbdrd_phy>;
diff --git a/Documentation/devicetree/bindings/usb/usb-ohci.txt b/Documentation/devicetree/bindings/usb/usb-ohci.txt
index 9df4569..e8766b0 100644
--- a/Documentation/devicetree/bindings/usb/usb-ohci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-ohci.txt
@@ -10,6 +10,7 @@
 - big-endian-desc : boolean, set this for hcds with big-endian descriptors
 - big-endian : boolean, for hcds with big-endian-regs + big-endian-desc
 - no-big-frame-no : boolean, set if frame_no lives in bits [15:0] of HCCA
+- remote-wakeup-connected: remote wakeup is wired on the platform
 - num-ports : u32, to override the detected port count
 - clocks : a list of phandle + clock specifier pairs
 - phys : phandle + phy specifier pair
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index c03d201..286f4e1 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -5,6 +5,7 @@
 
 abcn	Abracon Corporation
 abilis	Abilis Systems
+actions	Actions Semiconductor Co., Ltd.
 active-semi	Active-Semi International Inc
 ad	Avionic Design GmbH
 adapteva	Adapteva, Inc.
@@ -165,6 +166,7 @@
 keymile	Keymile GmbH
 khadas	Khadas
 kinetic Kinetic Technologies
+kingnovel	Kingnovel Technology Co., Ltd.
 kosagi	Sutajio Ko-Usagi PTE Ltd.
 kyo	Kyocera Corporation
 lacie	LaCie
@@ -172,6 +174,7 @@
 lego	LEGO Systems A/S
 lenovo	Lenovo Group Ltd.
 lg	LG Corporation
+libretech	Shenzhen Libre Technology Co., Ltd
 licheepi	Lichee Pi
 linaro	Linaro Limited
 linux	Linux-specific binding
@@ -331,6 +334,7 @@
 tronsmart	Tronsmart
 truly	Truly Semiconductors Limited
 tyan	Tyan Computer Corporation
+ucrobotics	uCRobotics
 udoo	Udoo
 uniwest	United Western Technologies Corp (UniWest)
 upisemi	uPI Semiconductor Corp.
@@ -356,6 +360,7 @@
 xunlong	Shenzhen Xunlong Software CO.,Limited
 zarlink	Zarlink Semiconductor
 zeitec	ZEITEC Semiconductor Co., LTD.
+zidoo	Shenzhen Zidoo Technology Co., Ltd.
 zii	Zodiac Inflight Innovations
 zte	ZTE Corp.
 zyxel	ZyXEL Communications Corp.
diff --git a/Documentation/doc-guide/docbook.rst b/Documentation/doc-guide/docbook.rst
deleted file mode 100644
index d8bf043..0000000
--- a/Documentation/doc-guide/docbook.rst
+++ /dev/null
@@ -1,90 +0,0 @@
-DocBook XML [DEPRECATED]
-========================
-
-.. attention::
-
-   This section describes the deprecated DocBook XML toolchain. Please do not
-   create new DocBook XML template files. Please consider converting existing
-   DocBook XML templates files to Sphinx/reStructuredText.
-
-Converting DocBook to Sphinx
-----------------------------
-
-Over time, we expect all of the documents under ``Documentation/DocBook`` to be
-converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
-enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,
-which uses ``pandoc`` under the hood. For example::
-
-  $ cd Documentation/sphinx
-  $ ./tmplcvt ../DocBook/in.tmpl ../out.rst
-
-Then edit the resulting rst files to fix any remaining issues, and add the
-document in the ``toctree`` in ``Documentation/index.rst``.
-
-Components of the kernel-doc system
------------------------------------
-
-Many places in the source tree have extractable documentation in the form of
-block comments above functions. The components of this system are:
-
-- ``scripts/kernel-doc``
-
-  This is a perl script that hunts for the block comments and can mark them up
-  directly into reStructuredText, DocBook, man, text, and HTML. (No, not
-  texinfo.)
-
-- ``Documentation/DocBook/*.tmpl``
-
-  These are XML template files, which are normal XML files with special
-  place-holders for where the extracted documentation should go.
-
-- ``scripts/docproc.c``
-
-  This is a program for converting XML template files into XML files. When a
-  file is referenced it is searched for symbols exported (EXPORT_SYMBOL), to be
-  able to distinguish between internal and external functions.
-
-  It invokes kernel-doc, giving it the list of functions that are to be
-  documented.
-
-  Additionally it is used to scan the XML template files to locate all the files
-  referenced herein. This is used to generate dependency information as used by
-  make.
-
-- ``Makefile``
-
-  The targets 'xmldocs', 'psdocs', 'pdfdocs', and 'htmldocs' are used to build
-  DocBook XML files, PostScript files, PDF files, and html files in
-  Documentation/DocBook. The older target 'sgmldocs' is equivalent to 'xmldocs'.
-
-- ``Documentation/DocBook/Makefile``
-
-  This is where C files are associated with SGML templates.
-
-How to use kernel-doc comments in DocBook XML template files
-------------------------------------------------------------
-
-DocBook XML template files (\*.tmpl) are like normal XML files, except that they
-can contain escape sequences where extracted documentation should be inserted.
-
-``!E<filename>`` is replaced by the documentation, in ``<filename>``, for
-functions that are exported using ``EXPORT_SYMBOL``: the function list is
-collected from files listed in ``Documentation/DocBook/Makefile``.
-
-``!I<filename>`` is replaced by the documentation for functions that are **not**
-exported using ``EXPORT_SYMBOL``.
-
-``!D<filename>`` is used to name additional files to search for functions
-exported using ``EXPORT_SYMBOL``.
-
-``!F<filename> <function [functions...]>`` is replaced by the documentation, in
-``<filename>``, for the functions listed.
-
-``!P<filename> <section title>`` is replaced by the contents of the ``DOC:``
-section titled ``<section title>`` from ``<filename>``. Spaces are allowed in
-``<section title>``; do not quote the ``<section title>``.
-
-``!C<filename>`` is replaced by nothing, but makes the tools check that all DOC:
-sections and documented functions, symbols, etc. are used. This makes sense to
-use when you use ``!F`` or ``!P`` only and want to verify that all documentation
-is included.
diff --git a/Documentation/doc-guide/index.rst b/Documentation/doc-guide/index.rst
index 6fff402..a7f95d7 100644
--- a/Documentation/doc-guide/index.rst
+++ b/Documentation/doc-guide/index.rst
@@ -10,7 +10,6 @@
    sphinx.rst
    kernel-doc.rst
    parse-headers.rst
-   docbook.rst
 
 .. only::  subproject and html
 
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index b32e481..b24854b 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -149,6 +149,16 @@
 ``%CONST``
   Name of a constant. (No cross-referencing, just formatting.)
 
+````literal````
+  A literal block that should be handled as-is. The output will use a
+  ``monospaced font``.
+
+  Useful if you need to use special characters that would otherwise have some
+  meaning either by kernel-doc script of by reStructuredText.
+
+  This is particularly useful if you need to use things like ``%ph`` inside
+  a function description.
+
 ``$ENVVAR``
   Name of an environment variable. (No cross-referencing, just formatting.)
 
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index 731334d..84e8e8a 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -15,11 +15,6 @@
 kernel-doc comments have some special structure and formatting, but beyond that
 they are also treated as reStructuredText.
 
-There is also the deprecated DocBook toolchain to generate documentation from
-DocBook XML template files under ``Documentation/DocBook``. The DocBook files
-are to be converted to reStructuredText, and the toolchain is slated to be
-removed.
-
 Finally, there are thousands of plain text documentation files scattered around
 ``Documentation``. Some of these will likely be converted to reStructuredText
 over time, but the bulk of them will remain in plain text.
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 77b9222..f64a63b 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -118,7 +118,6 @@
 devlist.h*
 devicetable-offsets.h
 dnotify_test
-docproc
 dslm
 dtc
 elf2ecoff
diff --git a/Documentation/driver-api/firmware/request_firmware.rst b/Documentation/driver-api/firmware/request_firmware.rst
index cc0aea8..1c2c496 100644
--- a/Documentation/driver-api/firmware/request_firmware.rst
+++ b/Documentation/driver-api/firmware/request_firmware.rst
@@ -44,6 +44,17 @@
 .. kernel-doc:: drivers/base/firmware_class.c
    :functions: request_firmware_nowait
 
+Considerations for suspend and resume
+=====================================
+
+During suspend and resume only the built-in firmware and the firmware cache
+elements of the firmware API can be used. This is managed by fw_pm_notify().
+
+fw_pm_notify
+------------
+.. kernel-doc:: drivers/base/firmware_class.c
+   :functions: fw_pm_notify
+
 request firmware API expected driver use
 ========================================
 
diff --git a/Documentation/driver-api/i2c.rst b/Documentation/driver-api/i2c.rst
index f3939f7..0bf86a4 100644
--- a/Documentation/driver-api/i2c.rst
+++ b/Documentation/driver-api/i2c.rst
@@ -13,8 +13,8 @@
 between masters, as well as to handshake and to synchronize clocks from
 slower clients.
 
-The Linux I2C programming interfaces support only the master side of bus
-interactions, not the slave side. The programming interface is
+The Linux I2C programming interfaces support the master side of bus
+interactions and the slave side. The programming interface is
 structured around two kinds of driver, and two kinds of device. An I2C
 "Adapter Driver" abstracts the controller hardware; it binds to a
 physical device (perhaps a PCI device or platform_device) and exposes a
@@ -22,9 +22,8 @@
 I2C bus segment it manages. On each I2C bus segment will be I2C devices
 represented by a :c:type:`struct i2c_client <i2c_client>`.
 Those devices will be bound to a :c:type:`struct i2c_driver
-<i2c_driver>`, which should follow the standard Linux driver
-model. (At this writing, a legacy model is more widely used.) There are
-functions to perform various I2C protocol operations; at this writing
+<i2c_driver>`, which should follow the standard Linux driver model. There
+are functions to perform various I2C protocol operations; at this writing
 all such functions are usable only from task context.
 
 The System Management Bus (SMBus) is a sibling protocol. Most SMBus
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 8058a87..3cf1ace 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -32,7 +32,13 @@
    i2c
    hsi
    edac
+   scsi
+   libata
+   mtdnand
    miscellaneous
+   w1
+   rapidio
+   s390-drivers
    vme
    80211/index
    uio-howto
diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst
new file mode 100644
index 0000000..4adc056
--- /dev/null
+++ b/Documentation/driver-api/libata.rst
@@ -0,0 +1,1031 @@
+========================
+libATA Developer's Guide
+========================
+
+:Author: Jeff Garzik
+
+Introduction
+============
+
+libATA is a library used inside the Linux kernel to support ATA host
+controllers and devices. libATA provides an ATA driver API, class
+transports for ATA and ATAPI devices, and SCSI<->ATA translation for ATA
+devices according to the T10 SAT specification.
+
+This Guide documents the libATA driver API, library functions, library
+internals, and a couple sample ATA low-level drivers.
+
+libata Driver API
+=================
+
+:c:type:`struct ata_port_operations <ata_port_operations>`
+is defined for every low-level libata
+hardware driver, and it controls how the low-level driver interfaces
+with the ATA and SCSI layers.
+
+FIS-based drivers will hook into the system with ``->qc_prep()`` and
+``->qc_issue()`` high-level hooks. Hardware which behaves in a manner
+similar to PCI IDE hardware may utilize several generic helpers,
+defining at a bare minimum the bus I/O addresses of the ATA shadow
+register blocks.
+
+:c:type:`struct ata_port_operations <ata_port_operations>`
+----------------------------------------------------------
+
+Disable ATA port
+~~~~~~~~~~~~~~~~
+
+::
+
+    void (*port_disable) (struct ata_port *);
+
+
+Called from :c:func:`ata_bus_probe` error path, as well as when unregistering
+from the SCSI module (rmmod, hot unplug). This function should do
+whatever needs to be done to take the port out of use. In most cases,
+:c:func:`ata_port_disable` can be used as this hook.
+
+Called from :c:func:`ata_bus_probe` on a failed probe. Called from
+:c:func:`ata_scsi_release`.
+
+Post-IDENTIFY device configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*dev_config) (struct ata_port *, struct ata_device *);
+
+
+Called after IDENTIFY [PACKET] DEVICE is issued to each device found.
+Typically used to apply device-specific fixups prior to issue of SET
+FEATURES - XFER MODE, and prior to operation.
+
+This entry may be specified as NULL in ata_port_operations.
+
+Set PIO/DMA mode
+~~~~~~~~~~~~~~~~
+
+::
+
+    void (*set_piomode) (struct ata_port *, struct ata_device *);
+    void (*set_dmamode) (struct ata_port *, struct ata_device *);
+    void (*post_set_mode) (struct ata_port *);
+    unsigned int (*mode_filter) (struct ata_port *, struct ata_device *, unsigned int);
+
+
+Hooks called prior to the issue of SET FEATURES - XFER MODE command. The
+optional ``->mode_filter()`` hook is called when libata has built a mask of
+the possible modes. This is passed to the ``->mode_filter()`` function
+which should return a mask of valid modes after filtering those
+unsuitable due to hardware limits. It is not valid to use this interface
+to add modes.
+
+``dev->pio_mode`` and ``dev->dma_mode`` are guaranteed to be valid when
+``->set_piomode()`` and when ``->set_dmamode()`` is called. The timings for
+any other drive sharing the cable will also be valid at this point. That
+is the library records the decisions for the modes of each drive on a
+channel before it attempts to set any of them.
+
+``->post_set_mode()`` is called unconditionally, after the SET FEATURES -
+XFER MODE command completes successfully.
+
+``->set_piomode()`` is always called (if present), but ``->set_dma_mode()``
+is only called if DMA is possible.
+
+Taskfile read/write
+~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*sff_tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
+    void (*sff_tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
+
+
+``->tf_load()`` is called to load the given taskfile into hardware
+registers / DMA buffers. ``->tf_read()`` is called to read the hardware
+registers / DMA buffers, to obtain the current set of taskfile register
+values. Most drivers for taskfile-based hardware (PIO or MMIO) use
+:c:func:`ata_sff_tf_load` and :c:func:`ata_sff_tf_read` for these hooks.
+
+PIO data read/write
+~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*sff_data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
+
+
+All bmdma-style drivers must implement this hook. This is the low-level
+operation that actually copies the data bytes during a PIO data
+transfer. Typically the driver will choose one of
+:c:func:`ata_sff_data_xfer_noirq`, :c:func:`ata_sff_data_xfer`, or
+:c:func:`ata_sff_data_xfer32`.
+
+ATA command execute
+~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*sff_exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
+
+
+causes an ATA command, previously loaded with ``->tf_load()``, to be
+initiated in hardware. Most drivers for taskfile-based hardware use
+:c:func:`ata_sff_exec_command` for this hook.
+
+Per-cmd ATAPI DMA capabilities filter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    int (*check_atapi_dma) (struct ata_queued_cmd *qc);
+
+
+Allow low-level driver to filter ATA PACKET commands, returning a status
+indicating whether or not it is OK to use DMA for the supplied PACKET
+command.
+
+This hook may be specified as NULL, in which case libata will assume
+that atapi dma can be supported.
+
+Read specific ATA shadow registers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    u8   (*sff_check_status)(struct ata_port *ap);
+    u8   (*sff_check_altstatus)(struct ata_port *ap);
+
+
+Reads the Status/AltStatus ATA shadow register from hardware. On some
+hardware, reading the Status register has the side effect of clearing
+the interrupt condition. Most drivers for taskfile-based hardware use
+:c:func:`ata_sff_check_status` for this hook.
+
+Write specific ATA shadow register
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*sff_set_devctl)(struct ata_port *ap, u8 ctl);
+
+
+Write the device control ATA shadow register to the hardware. Most
+drivers don't need to define this.
+
+Select ATA device on bus
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
+
+
+Issues the low-level hardware command(s) that causes one of N hardware
+devices to be considered 'selected' (active and available for use) on
+the ATA bus. This generally has no meaning on FIS-based devices.
+
+Most drivers for taskfile-based hardware use :c:func:`ata_sff_dev_select` for
+this hook.
+
+Private tuning method
+~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*set_mode) (struct ata_port *ap);
+
+
+By default libata performs drive and controller tuning in accordance
+with the ATA timing rules and also applies blacklists and cable limits.
+Some controllers need special handling and have custom tuning rules,
+typically raid controllers that use ATA commands but do not actually do
+drive timing.
+
+    **Warning**
+
+    This hook should not be used to replace the standard controller
+    tuning logic when a controller has quirks. Replacing the default
+    tuning logic in that case would bypass handling for drive and bridge
+    quirks that may be important to data reliability. If a controller
+    needs to filter the mode selection it should use the mode_filter
+    hook instead.
+
+Control PCI IDE BMDMA engine
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*bmdma_setup) (struct ata_queued_cmd *qc);
+    void (*bmdma_start) (struct ata_queued_cmd *qc);
+    void (*bmdma_stop) (struct ata_port *ap);
+    u8   (*bmdma_status) (struct ata_port *ap);
+
+
+When setting up an IDE BMDMA transaction, these hooks arm
+(``->bmdma_setup``), fire (``->bmdma_start``), and halt (``->bmdma_stop``) the
+hardware's DMA engine. ``->bmdma_status`` is used to read the standard PCI
+IDE DMA Status register.
+
+These hooks are typically either no-ops, or simply not implemented, in
+FIS-based drivers.
+
+Most legacy IDE drivers use :c:func:`ata_bmdma_setup` for the
+:c:func:`bmdma_setup` hook. :c:func:`ata_bmdma_setup` will write the pointer
+to the PRD table to the IDE PRD Table Address register, enable DMA in the DMA
+Command register, and call :c:func:`exec_command` to begin the transfer.
+
+Most legacy IDE drivers use :c:func:`ata_bmdma_start` for the
+:c:func:`bmdma_start` hook. :c:func:`ata_bmdma_start` will write the
+ATA_DMA_START flag to the DMA Command register.
+
+Many legacy IDE drivers use :c:func:`ata_bmdma_stop` for the
+:c:func:`bmdma_stop` hook. :c:func:`ata_bmdma_stop` clears the ATA_DMA_START
+flag in the DMA command register.
+
+Many legacy IDE drivers use :c:func:`ata_bmdma_status` as the
+:c:func:`bmdma_status` hook.
+
+High-level taskfile hooks
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*qc_prep) (struct ata_queued_cmd *qc);
+    int (*qc_issue) (struct ata_queued_cmd *qc);
+
+
+Higher-level hooks, these two hooks can potentially supercede several of
+the above taskfile/DMA engine hooks. ``->qc_prep`` is called after the
+buffers have been DMA-mapped, and is typically used to populate the
+hardware's DMA scatter-gather table. Most drivers use the standard
+:c:func:`ata_qc_prep` helper function, but more advanced drivers roll their
+own.
+
+``->qc_issue`` is used to make a command active, once the hardware and S/G
+tables have been prepared. IDE BMDMA drivers use the helper function
+:c:func:`ata_qc_issue_prot` for taskfile protocol-based dispatch. More
+advanced drivers implement their own ``->qc_issue``.
+
+:c:func:`ata_qc_issue_prot` calls ``->tf_load()``, ``->bmdma_setup()``, and
+``->bmdma_start()`` as necessary to initiate a transfer.
+
+Exception and probe handling (EH)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    void (*eng_timeout) (struct ata_port *ap);
+    void (*phy_reset) (struct ata_port *ap);
+
+
+Deprecated. Use ``->error_handler()`` instead.
+
+::
+
+    void (*freeze) (struct ata_port *ap);
+    void (*thaw) (struct ata_port *ap);
+
+
+:c:func:`ata_port_freeze` is called when HSM violations or some other
+condition disrupts normal operation of the port. A frozen port is not
+allowed to perform any operation until the port is thawed, which usually
+follows a successful reset.
+
+The optional ``->freeze()`` callback can be used for freezing the port
+hardware-wise (e.g. mask interrupt and stop DMA engine). If a port
+cannot be frozen hardware-wise, the interrupt handler must ack and clear
+interrupts unconditionally while the port is frozen.
+
+The optional ``->thaw()`` callback is called to perform the opposite of
+``->freeze()``: prepare the port for normal operation once again. Unmask
+interrupts, start DMA engine, etc.
+
+::
+
+    void (*error_handler) (struct ata_port *ap);
+
+
+``->error_handler()`` is a driver's hook into probe, hotplug, and recovery
+and other exceptional conditions. The primary responsibility of an
+implementation is to call :c:func:`ata_do_eh` or :c:func:`ata_bmdma_drive_eh`
+with a set of EH hooks as arguments:
+
+'prereset' hook (may be NULL) is called during an EH reset, before any
+other actions are taken.
+
+'postreset' hook (may be NULL) is called after the EH reset is
+performed. Based on existing conditions, severity of the problem, and
+hardware capabilities,
+
+Either 'softreset' (may be NULL) or 'hardreset' (may be NULL) will be
+called to perform the low-level EH reset.
+
+::
+
+    void (*post_internal_cmd) (struct ata_queued_cmd *qc);
+
+
+Perform any hardware-specific actions necessary to finish processing
+after executing a probe-time or EH-time command via
+:c:func:`ata_exec_internal`.
+
+Hardware interrupt handling
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+    irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
+    void (*irq_clear) (struct ata_port *);
+
+
+``->irq_handler`` is the interrupt handling routine registered with the
+system, by libata. ``->irq_clear`` is called during probe just before the
+interrupt handler is registered, to be sure hardware is quiet.
+
+The second argument, dev_instance, should be cast to a pointer to
+:c:type:`struct ata_host_set <ata_host_set>`.
+
+Most legacy IDE drivers use :c:func:`ata_sff_interrupt` for the irq_handler
+hook, which scans all ports in the host_set, determines which queued
+command was active (if any), and calls ata_sff_host_intr(ap,qc).
+
+Most legacy IDE drivers use :c:func:`ata_sff_irq_clear` for the
+:c:func:`irq_clear` hook, which simply clears the interrupt and error flags
+in the DMA status register.
+
+SATA phy read/write
+~~~~~~~~~~~~~~~~~~~
+
+::
+
+    int (*scr_read) (struct ata_port *ap, unsigned int sc_reg,
+             u32 *val);
+    int (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
+                       u32 val);
+
+
+Read and write standard SATA phy registers. Currently only used if
+``->phy_reset`` hook called the :c:func:`sata_phy_reset` helper function.
+sc_reg is one of SCR_STATUS, SCR_CONTROL, SCR_ERROR, or SCR_ACTIVE.
+
+Init and shutdown
+~~~~~~~~~~~~~~~~~
+
+::
+
+    int (*port_start) (struct ata_port *ap);
+    void (*port_stop) (struct ata_port *ap);
+    void (*host_stop) (struct ata_host_set *host_set);
+
+
+``->port_start()`` is called just after the data structures for each port
+are initialized. Typically this is used to alloc per-port DMA buffers /
+tables / rings, enable DMA engines, and similar tasks. Some drivers also
+use this entry point as a chance to allocate driver-private memory for
+``ap->private_data``.
+
+Many drivers use :c:func:`ata_port_start` as this hook or call it from their
+own :c:func:`port_start` hooks. :c:func:`ata_port_start` allocates space for
+a legacy IDE PRD table and returns.
+
+``->port_stop()`` is called after ``->host_stop()``. Its sole function is to
+release DMA/memory resources, now that they are no longer actively being
+used. Many drivers also free driver-private data from port at this time.
+
+``->host_stop()`` is called after all ``->port_stop()`` calls have completed.
+The hook must finalize hardware shutdown, release DMA and other
+resources, etc. This hook may be specified as NULL, in which case it is
+not called.
+
+Error handling
+==============
+
+This chapter describes how errors are handled under libata. Readers are
+advised to read SCSI EH (Documentation/scsi/scsi_eh.txt) and ATA
+exceptions doc first.
+
+Origins of commands
+-------------------
+
+In libata, a command is represented with
+:c:type:`struct ata_queued_cmd <ata_queued_cmd>` or qc.
+qc's are preallocated during port initialization and repetitively used
+for command executions. Currently only one qc is allocated per port but
+yet-to-be-merged NCQ branch allocates one for each tag and maps each qc
+to NCQ tag 1-to-1.
+
+libata commands can originate from two sources - libata itself and SCSI
+midlayer. libata internal commands are used for initialization and error
+handling. All normal blk requests and commands for SCSI emulation are
+passed as SCSI commands through queuecommand callback of SCSI host
+template.
+
+How commands are issued
+-----------------------
+
+Internal commands
+    First, qc is allocated and initialized using :c:func:`ata_qc_new_init`.
+    Although :c:func:`ata_qc_new_init` doesn't implement any wait or retry
+    mechanism when qc is not available, internal commands are currently
+    issued only during initialization and error recovery, so no other
+    command is active and allocation is guaranteed to succeed.
+
+    Once allocated qc's taskfile is initialized for the command to be
+    executed. qc currently has two mechanisms to notify completion. One
+    is via ``qc->complete_fn()`` callback and the other is completion
+    ``qc->waiting``. ``qc->complete_fn()`` callback is the asynchronous path
+    used by normal SCSI translated commands and ``qc->waiting`` is the
+    synchronous (issuer sleeps in process context) path used by internal
+    commands.
+
+    Once initialization is complete, host_set lock is acquired and the
+    qc is issued.
+
+SCSI commands
+    All libata drivers use :c:func:`ata_scsi_queuecmd` as
+    ``hostt->queuecommand`` callback. scmds can either be simulated or
+    translated. No qc is involved in processing a simulated scmd. The
+    result is computed right away and the scmd is completed.
+
+    For a translated scmd, :c:func:`ata_qc_new_init` is invoked to allocate a
+    qc and the scmd is translated into the qc. SCSI midlayer's
+    completion notification function pointer is stored into
+    ``qc->scsidone``.
+
+    ``qc->complete_fn()`` callback is used for completion notification. ATA
+    commands use :c:func:`ata_scsi_qc_complete` while ATAPI commands use
+    :c:func:`atapi_qc_complete`. Both functions end up calling ``qc->scsidone``
+    to notify upper layer when the qc is finished. After translation is
+    completed, the qc is issued with :c:func:`ata_qc_issue`.
+
+    Note that SCSI midlayer invokes hostt->queuecommand while holding
+    host_set lock, so all above occur while holding host_set lock.
+
+How commands are processed
+--------------------------
+
+Depending on which protocol and which controller are used, commands are
+processed differently. For the purpose of discussion, a controller which
+uses taskfile interface and all standard callbacks is assumed.
+
+Currently 6 ATA command protocols are used. They can be sorted into the
+following four categories according to how they are processed.
+
+ATA NO DATA or DMA
+    ATA_PROT_NODATA and ATA_PROT_DMA fall into this category. These
+    types of commands don't require any software intervention once
+    issued. Device will raise interrupt on completion.
+
+ATA PIO
+    ATA_PROT_PIO is in this category. libata currently implements PIO
+    with polling. ATA_NIEN bit is set to turn off interrupt and
+    pio_task on ata_wq performs polling and IO.
+
+ATAPI NODATA or DMA
+    ATA_PROT_ATAPI_NODATA and ATA_PROT_ATAPI_DMA are in this
+    category. packet_task is used to poll BSY bit after issuing PACKET
+    command. Once BSY is turned off by the device, packet_task
+    transfers CDB and hands off processing to interrupt handler.
+
+ATAPI PIO
+    ATA_PROT_ATAPI is in this category. ATA_NIEN bit is set and, as
+    in ATAPI NODATA or DMA, packet_task submits cdb. However, after
+    submitting cdb, further processing (data transfer) is handed off to
+    pio_task.
+
+How commands are completed
+--------------------------
+
+Once issued, all qc's are either completed with :c:func:`ata_qc_complete` or
+time out. For commands which are handled by interrupts,
+:c:func:`ata_host_intr` invokes :c:func:`ata_qc_complete`, and, for PIO tasks,
+pio_task invokes :c:func:`ata_qc_complete`. In error cases, packet_task may
+also complete commands.
+
+:c:func:`ata_qc_complete` does the following.
+
+1. DMA memory is unmapped.
+
+2. ATA_QCFLAG_ACTIVE is cleared from qc->flags.
+
+3. :c:func:`qc->complete_fn` callback is invoked. If the return value of the
+   callback is not zero. Completion is short circuited and
+   :c:func:`ata_qc_complete` returns.
+
+4. :c:func:`__ata_qc_complete` is called, which does
+
+   1. ``qc->flags`` is cleared to zero.
+
+   2. ``ap->active_tag`` and ``qc->tag`` are poisoned.
+
+   3. ``qc->waiting`` is cleared & completed (in that order).
+
+   4. qc is deallocated by clearing appropriate bit in ``ap->qactive``.
+
+So, it basically notifies upper layer and deallocates qc. One exception
+is short-circuit path in #3 which is used by :c:func:`atapi_qc_complete`.
+
+For all non-ATAPI commands, whether it fails or not, almost the same
+code path is taken and very little error handling takes place. A qc is
+completed with success status if it succeeded, with failed status
+otherwise.
+
+However, failed ATAPI commands require more handling as REQUEST SENSE is
+needed to acquire sense data. If an ATAPI command fails,
+:c:func:`ata_qc_complete` is invoked with error status, which in turn invokes
+:c:func:`atapi_qc_complete` via ``qc->complete_fn()`` callback.
+
+This makes :c:func:`atapi_qc_complete` set ``scmd->result`` to
+SAM_STAT_CHECK_CONDITION, complete the scmd and return 1. As the
+sense data is empty but ``scmd->result`` is CHECK CONDITION, SCSI midlayer
+will invoke EH for the scmd, and returning 1 makes :c:func:`ata_qc_complete`
+to return without deallocating the qc. This leads us to
+:c:func:`ata_scsi_error` with partially completed qc.
+
+:c:func:`ata_scsi_error`
+------------------------
+
+:c:func:`ata_scsi_error` is the current ``transportt->eh_strategy_handler()``
+for libata. As discussed above, this will be entered in two cases -
+timeout and ATAPI error completion. This function calls low level libata
+driver's :c:func:`eng_timeout` callback, the standard callback for which is
+:c:func:`ata_eng_timeout`. It checks if a qc is active and calls
+:c:func:`ata_qc_timeout` on the qc if so. Actual error handling occurs in
+:c:func:`ata_qc_timeout`.
+
+If EH is invoked for timeout, :c:func:`ata_qc_timeout` stops BMDMA and
+completes the qc. Note that as we're currently in EH, we cannot call
+scsi_done. As described in SCSI EH doc, a recovered scmd should be
+either retried with :c:func:`scsi_queue_insert` or finished with
+:c:func:`scsi_finish_command`. Here, we override ``qc->scsidone`` with
+:c:func:`scsi_finish_command` and calls :c:func:`ata_qc_complete`.
+
+If EH is invoked due to a failed ATAPI qc, the qc here is completed but
+not deallocated. The purpose of this half-completion is to use the qc as
+place holder to make EH code reach this place. This is a bit hackish,
+but it works.
+
+Once control reaches here, the qc is deallocated by invoking
+:c:func:`__ata_qc_complete` explicitly. Then, internal qc for REQUEST SENSE
+is issued. Once sense data is acquired, scmd is finished by directly
+invoking :c:func:`scsi_finish_command` on the scmd. Note that as we already
+have completed and deallocated the qc which was associated with the
+scmd, we don't need to/cannot call :c:func:`ata_qc_complete` again.
+
+Problems with the current EH
+----------------------------
+
+-  Error representation is too crude. Currently any and all error
+   conditions are represented with ATA STATUS and ERROR registers.
+   Errors which aren't ATA device errors are treated as ATA device
+   errors by setting ATA_ERR bit. Better error descriptor which can
+   properly represent ATA and other errors/exceptions is needed.
+
+-  When handling timeouts, no action is taken to make device forget
+   about the timed out command and ready for new commands.
+
+-  EH handling via :c:func:`ata_scsi_error` is not properly protected from
+   usual command processing. On EH entrance, the device is not in
+   quiescent state. Timed out commands may succeed or fail any time.
+   pio_task and atapi_task may still be running.
+
+-  Too weak error recovery. Devices / controllers causing HSM mismatch
+   errors and other errors quite often require reset to return to known
+   state. Also, advanced error handling is necessary to support features
+   like NCQ and hotplug.
+
+-  ATA errors are directly handled in the interrupt handler and PIO
+   errors in pio_task. This is problematic for advanced error handling
+   for the following reasons.
+
+   First, advanced error handling often requires context and internal qc
+   execution.
+
+   Second, even a simple failure (say, CRC error) needs information
+   gathering and could trigger complex error handling (say, resetting &
+   reconfiguring). Having multiple code paths to gather information,
+   enter EH and trigger actions makes life painful.
+
+   Third, scattered EH code makes implementing low level drivers
+   difficult. Low level drivers override libata callbacks. If EH is
+   scattered over several places, each affected callbacks should perform
+   its part of error handling. This can be error prone and painful.
+
+libata Library
+==============
+
+.. kernel-doc:: drivers/ata/libata-core.c
+   :export:
+
+libata Core Internals
+=====================
+
+.. kernel-doc:: drivers/ata/libata-core.c
+   :internal:
+
+.. kernel-doc:: drivers/ata/libata-eh.c
+
+libata SCSI translation/emulation
+=================================
+
+.. kernel-doc:: drivers/ata/libata-scsi.c
+   :export:
+
+.. kernel-doc:: drivers/ata/libata-scsi.c
+   :internal:
+
+ATA errors and exceptions
+=========================
+
+This chapter tries to identify what error/exception conditions exist for
+ATA/ATAPI devices and describe how they should be handled in
+implementation-neutral way.
+
+The term 'error' is used to describe conditions where either an explicit
+error condition is reported from device or a command has timed out.
+
+The term 'exception' is either used to describe exceptional conditions
+which are not errors (say, power or hotplug events), or to describe both
+errors and non-error exceptional conditions. Where explicit distinction
+between error and exception is necessary, the term 'non-error exception'
+is used.
+
+Exception categories
+--------------------
+
+Exceptions are described primarily with respect to legacy taskfile + bus
+master IDE interface. If a controller provides other better mechanism
+for error reporting, mapping those into categories described below
+shouldn't be difficult.
+
+In the following sections, two recovery actions - reset and
+reconfiguring transport - are mentioned. These are described further in
+`EH recovery actions <#exrec>`__.
+
+HSM violation
+~~~~~~~~~~~~~
+
+This error is indicated when STATUS value doesn't match HSM requirement
+during issuing or execution any ATA/ATAPI command.
+
+-  ATA_STATUS doesn't contain !BSY && DRDY && !DRQ while trying to
+   issue a command.
+
+-  !BSY && !DRQ during PIO data transfer.
+
+-  DRQ on command completion.
+
+-  !BSY && ERR after CDB transfer starts but before the last byte of CDB
+   is transferred. ATA/ATAPI standard states that "The device shall not
+   terminate the PACKET command with an error before the last byte of
+   the command packet has been written" in the error outputs description
+   of PACKET command and the state diagram doesn't include such
+   transitions.
+
+In these cases, HSM is violated and not much information regarding the
+error can be acquired from STATUS or ERROR register. IOW, this error can
+be anything - driver bug, faulty device, controller and/or cable.
+
+As HSM is violated, reset is necessary to restore known state.
+Reconfiguring transport for lower speed might be helpful too as
+transmission errors sometimes cause this kind of errors.
+
+ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+These are errors detected and reported by ATA/ATAPI devices indicating
+device problems. For this type of errors, STATUS and ERROR register
+values are valid and describe error condition. Note that some of ATA bus
+errors are detected by ATA/ATAPI devices and reported using the same
+mechanism as device errors. Those cases are described later in this
+section.
+
+For ATA commands, this type of errors are indicated by !BSY && ERR
+during command execution and on completion.
+
+For ATAPI commands,
+
+-  !BSY && ERR && ABRT right after issuing PACKET indicates that PACKET
+   command is not supported and falls in this category.
+
+-  !BSY && ERR(==CHK) && !ABRT after the last byte of CDB is transferred
+   indicates CHECK CONDITION and doesn't fall in this category.
+
+-  !BSY && ERR(==CHK) && ABRT after the last byte of CDB is transferred
+   \*probably\* indicates CHECK CONDITION and doesn't fall in this
+   category.
+
+Of errors detected as above, the following are not ATA/ATAPI device
+errors but ATA bus errors and should be handled according to
+`ATA bus error <#excatATAbusErr>`__.
+
+CRC error during data transfer
+    This is indicated by ICRC bit in the ERROR register and means that
+    corruption occurred during data transfer. Up to ATA/ATAPI-7, the
+    standard specifies that this bit is only applicable to UDMA
+    transfers but ATA/ATAPI-8 draft revision 1f says that the bit may be
+    applicable to multiword DMA and PIO.
+
+ABRT error during data transfer or on completion
+    Up to ATA/ATAPI-7, the standard specifies that ABRT could be set on
+    ICRC errors and on cases where a device is not able to complete a
+    command. Combined with the fact that MWDMA and PIO transfer errors
+    aren't allowed to use ICRC bit up to ATA/ATAPI-7, it seems to imply
+    that ABRT bit alone could indicate transfer errors.
+
+    However, ATA/ATAPI-8 draft revision 1f removes the part that ICRC
+    errors can turn on ABRT. So, this is kind of gray area. Some
+    heuristics are needed here.
+
+ATA/ATAPI device errors can be further categorized as follows.
+
+Media errors
+    This is indicated by UNC bit in the ERROR register. ATA devices
+    reports UNC error only after certain number of retries cannot
+    recover the data, so there's nothing much else to do other than
+    notifying upper layer.
+
+    READ and WRITE commands report CHS or LBA of the first failed sector
+    but ATA/ATAPI standard specifies that the amount of transferred data
+    on error completion is indeterminate, so we cannot assume that
+    sectors preceding the failed sector have been transferred and thus
+    cannot complete those sectors successfully as SCSI does.
+
+Media changed / media change requested error
+    <<TODO: fill here>>
+
+Address error
+    This is indicated by IDNF bit in the ERROR register. Report to upper
+    layer.
+
+Other errors
+    This can be invalid command or parameter indicated by ABRT ERROR bit
+    or some other error condition. Note that ABRT bit can indicate a lot
+    of things including ICRC and Address errors. Heuristics needed.
+
+Depending on commands, not all STATUS/ERROR bits are applicable. These
+non-applicable bits are marked with "na" in the output descriptions but
+up to ATA/ATAPI-7 no definition of "na" can be found. However,
+ATA/ATAPI-8 draft revision 1f describes "N/A" as follows.
+
+    3.2.3.3a N/A
+        A keyword the indicates a field has no defined value in this
+        standard and should not be checked by the host or device. N/A
+        fields should be cleared to zero.
+
+So, it seems reasonable to assume that "na" bits are cleared to zero by
+devices and thus need no explicit masking.
+
+ATAPI device CHECK CONDITION
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ATAPI device CHECK CONDITION error is indicated by set CHK bit (ERR bit)
+in the STATUS register after the last byte of CDB is transferred for a
+PACKET command. For this kind of errors, sense data should be acquired
+to gather information regarding the errors. REQUEST SENSE packet command
+should be used to acquire sense data.
+
+Once sense data is acquired, this type of errors can be handled
+similarly to other SCSI errors. Note that sense data may indicate ATA
+bus error (e.g. Sense Key 04h HARDWARE ERROR && ASC/ASCQ 47h/00h SCSI
+PARITY ERROR). In such cases, the error should be considered as an ATA
+bus error and handled according to `ATA bus error <#excatATAbusErr>`__.
+
+ATA device error (NCQ)
+~~~~~~~~~~~~~~~~~~~~~~
+
+NCQ command error is indicated by cleared BSY and set ERR bit during NCQ
+command phase (one or more NCQ commands outstanding). Although STATUS
+and ERROR registers will contain valid values describing the error, READ
+LOG EXT is required to clear the error condition, determine which
+command has failed and acquire more information.
+
+READ LOG EXT Log Page 10h reports which tag has failed and taskfile
+register values describing the error. With this information the failed
+command can be handled as a normal ATA command error as in
+`ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION) <#excatDevErr>`__
+and all other in-flight commands must be retried. Note that this retry
+should not be counted - it's likely that commands retried this way would
+have completed normally if it were not for the failed command.
+
+Note that ATA bus errors can be reported as ATA device NCQ errors. This
+should be handled as described in `ATA bus error <#excatATAbusErr>`__.
+
+If READ LOG EXT Log Page 10h fails or reports NQ, we're thoroughly
+screwed. This condition should be treated according to
+`HSM violation <#excatHSMviolation>`__.
+
+ATA bus error
+~~~~~~~~~~~~~
+
+ATA bus error means that data corruption occurred during transmission
+over ATA bus (SATA or PATA). This type of errors can be indicated by
+
+-  ICRC or ABRT error as described in
+   `ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION) <#excatDevErr>`__.
+
+-  Controller-specific error completion with error information
+   indicating transmission error.
+
+-  On some controllers, command timeout. In this case, there may be a
+   mechanism to determine that the timeout is due to transmission error.
+
+-  Unknown/random errors, timeouts and all sorts of weirdities.
+
+As described above, transmission errors can cause wide variety of
+symptoms ranging from device ICRC error to random device lockup, and,
+for many cases, there is no way to tell if an error condition is due to
+transmission error or not; therefore, it's necessary to employ some kind
+of heuristic when dealing with errors and timeouts. For example,
+encountering repetitive ABRT errors for known supported command is
+likely to indicate ATA bus error.
+
+Once it's determined that ATA bus errors have possibly occurred,
+lowering ATA bus transmission speed is one of actions which may
+alleviate the problem. See `Reconfigure transport <#exrecReconf>`__ for
+more information.
+
+PCI bus error
+~~~~~~~~~~~~~
+
+Data corruption or other failures during transmission over PCI (or other
+system bus). For standard BMDMA, this is indicated by Error bit in the
+BMDMA Status register. This type of errors must be logged as it
+indicates something is very wrong with the system. Resetting host
+controller is recommended.
+
+Late completion
+~~~~~~~~~~~~~~~
+
+This occurs when timeout occurs and the timeout handler finds out that
+the timed out command has completed successfully or with error. This is
+usually caused by lost interrupts. This type of errors must be logged.
+Resetting host controller is recommended.
+
+Unknown error (timeout)
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This is when timeout occurs and the command is still processing or the
+host and device are in unknown state. When this occurs, HSM could be in
+any valid or invalid state. To bring the device to known state and make
+it forget about the timed out command, resetting is necessary. The timed
+out command may be retried.
+
+Timeouts can also be caused by transmission errors. Refer to
+`ATA bus error <#excatATAbusErr>`__ for more details.
+
+Hotplug and power management exceptions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+<<TODO: fill here>>
+
+EH recovery actions
+-------------------
+
+This section discusses several important recovery actions.
+
+Clearing error condition
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Many controllers require its error registers to be cleared by error
+handler. Different controllers may have different requirements.
+
+For SATA, it's strongly recommended to clear at least SError register
+during error handling.
+
+Reset
+~~~~~
+
+During EH, resetting is necessary in the following cases.
+
+-  HSM is in unknown or invalid state
+
+-  HBA is in unknown or invalid state
+
+-  EH needs to make HBA/device forget about in-flight commands
+
+-  HBA/device behaves weirdly
+
+Resetting during EH might be a good idea regardless of error condition
+to improve EH robustness. Whether to reset both or either one of HBA and
+device depends on situation but the following scheme is recommended.
+
+-  When it's known that HBA is in ready state but ATA/ATAPI device is in
+   unknown state, reset only device.
+
+-  If HBA is in unknown state, reset both HBA and device.
+
+HBA resetting is implementation specific. For a controller complying to
+taskfile/BMDMA PCI IDE, stopping active DMA transaction may be
+sufficient iff BMDMA state is the only HBA context. But even mostly
+taskfile/BMDMA PCI IDE complying controllers may have implementation
+specific requirements and mechanism to reset themselves. This must be
+addressed by specific drivers.
+
+OTOH, ATA/ATAPI standard describes in detail ways to reset ATA/ATAPI
+devices.
+
+PATA hardware reset
+    This is hardware initiated device reset signalled with asserted PATA
+    RESET- signal. There is no standard way to initiate hardware reset
+    from software although some hardware provides registers that allow
+    driver to directly tweak the RESET- signal.
+
+Software reset
+    This is achieved by turning CONTROL SRST bit on for at least 5us.
+    Both PATA and SATA support it but, in case of SATA, this may require
+    controller-specific support as the second Register FIS to clear SRST
+    should be transmitted while BSY bit is still set. Note that on PATA,
+    this resets both master and slave devices on a channel.
+
+EXECUTE DEVICE DIAGNOSTIC command
+    Although ATA/ATAPI standard doesn't describe exactly, EDD implies
+    some level of resetting, possibly similar level with software reset.
+    Host-side EDD protocol can be handled with normal command processing
+    and most SATA controllers should be able to handle EDD's just like
+    other commands. As in software reset, EDD affects both devices on a
+    PATA bus.
+
+    Although EDD does reset devices, this doesn't suit error handling as
+    EDD cannot be issued while BSY is set and it's unclear how it will
+    act when device is in unknown/weird state.
+
+ATAPI DEVICE RESET command
+    This is very similar to software reset except that reset can be
+    restricted to the selected device without affecting the other device
+    sharing the cable.
+
+SATA phy reset
+    This is the preferred way of resetting a SATA device. In effect,
+    it's identical to PATA hardware reset. Note that this can be done
+    with the standard SCR Control register. As such, it's usually easier
+    to implement than software reset.
+
+One more thing to consider when resetting devices is that resetting
+clears certain configuration parameters and they need to be set to their
+previous or newly adjusted values after reset.
+
+Parameters affected are.
+
+-  CHS set up with INITIALIZE DEVICE PARAMETERS (seldom used)
+
+-  Parameters set with SET FEATURES including transfer mode setting
+
+-  Block count set with SET MULTIPLE MODE
+
+-  Other parameters (SET MAX, MEDIA LOCK...)
+
+ATA/ATAPI standard specifies that some parameters must be maintained
+across hardware or software reset, but doesn't strictly specify all of
+them. Always reconfiguring needed parameters after reset is required for
+robustness. Note that this also applies when resuming from deep sleep
+(power-off).
+
+Also, ATA/ATAPI standard requires that IDENTIFY DEVICE / IDENTIFY PACKET
+DEVICE is issued after any configuration parameter is updated or a
+hardware reset and the result used for further operation. OS driver is
+required to implement revalidation mechanism to support this.
+
+Reconfigure transport
+~~~~~~~~~~~~~~~~~~~~~
+
+For both PATA and SATA, a lot of corners are cut for cheap connectors,
+cables or controllers and it's quite common to see high transmission
+error rate. This can be mitigated by lowering transmission speed.
+
+The following is a possible scheme Jeff Garzik suggested.
+
+    If more than $N (3?) transmission errors happen in 15 minutes,
+
+    -  if SATA, decrease SATA PHY speed. if speed cannot be decreased,
+
+    -  decrease UDMA xfer speed. if at UDMA0, switch to PIO4,
+
+    -  decrease PIO xfer speed. if at PIO3, complain, but continue
+
+ata_piix Internals
+===================
+
+.. kernel-doc:: drivers/ata/ata_piix.c
+   :internal:
+
+sata_sil Internals
+===================
+
+.. kernel-doc:: drivers/ata/sata_sil.c
+   :internal:
+
+Thanks
+======
+
+The bulk of the ATA knowledge comes thanks to long conversations with
+Andre Hedrick (www.linux-ide.org), and long hours pondering the ATA and
+SCSI specifications.
+
+Thanks to Alan Cox for pointing out similarities between SATA and SCSI,
+and in general for motivation to hack on libata.
+
+libata's device detection method, ata_pio_devchk, and in general all
+the early probing was based on extensive study of Hale Landis's
+probe/reset code in his ATADRVR driver (www.ata-atapi.com).
diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst
new file mode 100644
index 0000000..e9afa58
--- /dev/null
+++ b/Documentation/driver-api/mtdnand.rst
@@ -0,0 +1,1007 @@
+=====================================
+MTD NAND Driver Programming Interface
+=====================================
+
+:Author: Thomas Gleixner
+
+Introduction
+============
+
+The generic NAND driver supports almost all NAND and AG-AND based chips
+and connects them to the Memory Technology Devices (MTD) subsystem of
+the Linux Kernel.
+
+This documentation is provided for developers who want to implement
+board drivers or filesystem drivers suitable for NAND devices.
+
+Known Bugs And Assumptions
+==========================
+
+None.
+
+Documentation hints
+===================
+
+The function and structure docs are autogenerated. Each function and
+struct member has a short description which is marked with an [XXX]
+identifier. The following chapters explain the meaning of those
+identifiers.
+
+Function identifiers [XXX]
+--------------------------
+
+The functions are marked with [XXX] identifiers in the short comment.
+The identifiers explain the usage and scope of the functions. Following
+identifiers are used:
+
+-  [MTD Interface]
+
+   These functions provide the interface to the MTD kernel API. They are
+   not replaceable and provide functionality which is complete hardware
+   independent.
+
+-  [NAND Interface]
+
+   These functions are exported and provide the interface to the NAND
+   kernel API.
+
+-  [GENERIC]
+
+   Generic functions are not replaceable and provide functionality which
+   is complete hardware independent.
+
+-  [DEFAULT]
+
+   Default functions provide hardware related functionality which is
+   suitable for most of the implementations. These functions can be
+   replaced by the board driver if necessary. Those functions are called
+   via pointers in the NAND chip description structure. The board driver
+   can set the functions which should be replaced by board dependent
+   functions before calling nand_scan(). If the function pointer is
+   NULL on entry to nand_scan() then the pointer is set to the default
+   function which is suitable for the detected chip type.
+
+Struct member identifiers [XXX]
+-------------------------------
+
+The struct members are marked with [XXX] identifiers in the comment. The
+identifiers explain the usage and scope of the members. Following
+identifiers are used:
+
+-  [INTERN]
+
+   These members are for NAND driver internal use only and must not be
+   modified. Most of these values are calculated from the chip geometry
+   information which is evaluated during nand_scan().
+
+-  [REPLACEABLE]
+
+   Replaceable members hold hardware related functions which can be
+   provided by the board driver. The board driver can set the functions
+   which should be replaced by board dependent functions before calling
+   nand_scan(). If the function pointer is NULL on entry to
+   nand_scan() then the pointer is set to the default function which is
+   suitable for the detected chip type.
+
+-  [BOARDSPECIFIC]
+
+   Board specific members hold hardware related information which must
+   be provided by the board driver. The board driver must set the
+   function pointers and datafields before calling nand_scan().
+
+-  [OPTIONAL]
+
+   Optional members can hold information relevant for the board driver.
+   The generic NAND driver code does not use this information.
+
+Basic board driver
+==================
+
+For most boards it will be sufficient to provide just the basic
+functions and fill out some really board dependent members in the nand
+chip description structure.
+
+Basic defines
+-------------
+
+At least you have to provide a nand_chip structure and a storage for
+the ioremap'ed chip address. You can allocate the nand_chip structure
+using kmalloc or you can allocate it statically. The NAND chip structure
+embeds an mtd structure which will be registered to the MTD subsystem.
+You can extract a pointer to the mtd structure from a nand_chip pointer
+using the nand_to_mtd() helper.
+
+Kmalloc based example
+
+::
+
+    static struct mtd_info *board_mtd;
+    static void __iomem *baseaddr;
+
+
+Static example
+
+::
+
+    static struct nand_chip board_chip;
+    static void __iomem *baseaddr;
+
+
+Partition defines
+-----------------
+
+If you want to divide your device into partitions, then define a
+partitioning scheme suitable to your board.
+
+::
+
+    #define NUM_PARTITIONS 2
+    static struct mtd_partition partition_info[] = {
+        { .name = "Flash partition 1",
+          .offset =  0,
+          .size =    8 * 1024 * 1024 },
+        { .name = "Flash partition 2",
+          .offset =  MTDPART_OFS_NEXT,
+          .size =    MTDPART_SIZ_FULL },
+    };
+
+
+Hardware control function
+-------------------------
+
+The hardware control function provides access to the control pins of the
+NAND chip(s). The access can be done by GPIO pins or by address lines.
+If you use address lines, make sure that the timing requirements are
+met.
+
+*GPIO based example*
+
+::
+
+    static void board_hwcontrol(struct mtd_info *mtd, int cmd)
+    {
+        switch(cmd){
+            case NAND_CTL_SETCLE: /* Set CLE pin high */ break;
+            case NAND_CTL_CLRCLE: /* Set CLE pin low */ break;
+            case NAND_CTL_SETALE: /* Set ALE pin high */ break;
+            case NAND_CTL_CLRALE: /* Set ALE pin low */ break;
+            case NAND_CTL_SETNCE: /* Set nCE pin low */ break;
+            case NAND_CTL_CLRNCE: /* Set nCE pin high */ break;
+        }
+    }
+
+
+*Address lines based example.* It's assumed that the nCE pin is driven
+by a chip select decoder.
+
+::
+
+    static void board_hwcontrol(struct mtd_info *mtd, int cmd)
+    {
+        struct nand_chip *this = mtd_to_nand(mtd);
+        switch(cmd){
+            case NAND_CTL_SETCLE: this->IO_ADDR_W |= CLE_ADRR_BIT;  break;
+            case NAND_CTL_CLRCLE: this->IO_ADDR_W &= ~CLE_ADRR_BIT; break;
+            case NAND_CTL_SETALE: this->IO_ADDR_W |= ALE_ADRR_BIT;  break;
+            case NAND_CTL_CLRALE: this->IO_ADDR_W &= ~ALE_ADRR_BIT; break;
+        }
+    }
+
+
+Device ready function
+---------------------
+
+If the hardware interface has the ready busy pin of the NAND chip
+connected to a GPIO or other accessible I/O pin, this function is used
+to read back the state of the pin. The function has no arguments and
+should return 0, if the device is busy (R/B pin is low) and 1, if the
+device is ready (R/B pin is high). If the hardware interface does not
+give access to the ready busy pin, then the function must not be defined
+and the function pointer this->dev_ready is set to NULL.
+
+Init function
+-------------
+
+The init function allocates memory and sets up all the board specific
+parameters and function pointers. When everything is set up nand_scan()
+is called. This function tries to detect and identify then chip. If a
+chip is found all the internal data fields are initialized accordingly.
+The structure(s) have to be zeroed out first and then filled with the
+necessary information about the device.
+
+::
+
+    static int __init board_init (void)
+    {
+        struct nand_chip *this;
+        int err = 0;
+
+        /* Allocate memory for MTD device structure and private data */
+        this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+        if (!this) {
+            printk ("Unable to allocate NAND MTD device structure.\n");
+            err = -ENOMEM;
+            goto out;
+        }
+
+        board_mtd = nand_to_mtd(this);
+
+        /* map physical address */
+        baseaddr = ioremap(CHIP_PHYSICAL_ADDRESS, 1024);
+        if (!baseaddr) {
+            printk("Ioremap to access NAND chip failed\n");
+            err = -EIO;
+            goto out_mtd;
+        }
+
+        /* Set address of NAND IO lines */
+        this->IO_ADDR_R = baseaddr;
+        this->IO_ADDR_W = baseaddr;
+        /* Reference hardware control function */
+        this->hwcontrol = board_hwcontrol;
+        /* Set command delay time, see datasheet for correct value */
+        this->chip_delay = CHIP_DEPENDEND_COMMAND_DELAY;
+        /* Assign the device ready function, if available */
+        this->dev_ready = board_dev_ready;
+        this->eccmode = NAND_ECC_SOFT;
+
+        /* Scan to find existence of the device */
+        if (nand_scan (board_mtd, 1)) {
+            err = -ENXIO;
+            goto out_ior;
+        }
+
+        add_mtd_partitions(board_mtd, partition_info, NUM_PARTITIONS);
+        goto out;
+
+    out_ior:
+        iounmap(baseaddr);
+    out_mtd:
+        kfree (this);
+    out:
+        return err;
+    }
+    module_init(board_init);
+
+
+Exit function
+-------------
+
+The exit function is only necessary if the driver is compiled as a
+module. It releases all resources which are held by the chip driver and
+unregisters the partitions in the MTD layer.
+
+::
+
+    #ifdef MODULE
+    static void __exit board_cleanup (void)
+    {
+        /* Release resources, unregister device */
+        nand_release (board_mtd);
+
+        /* unmap physical address */
+        iounmap(baseaddr);
+
+        /* Free the MTD device structure */
+        kfree (mtd_to_nand(board_mtd));
+    }
+    module_exit(board_cleanup);
+    #endif
+
+
+Advanced board driver functions
+===============================
+
+This chapter describes the advanced functionality of the NAND driver.
+For a list of functions which can be overridden by the board driver see
+the documentation of the nand_chip structure.
+
+Multiple chip control
+---------------------
+
+The nand driver can control chip arrays. Therefore the board driver must
+provide an own select_chip function. This function must (de)select the
+requested chip. The function pointer in the nand_chip structure must be
+set before calling nand_scan(). The maxchip parameter of nand_scan()
+defines the maximum number of chips to scan for. Make sure that the
+select_chip function can handle the requested number of chips.
+
+The nand driver concatenates the chips to one virtual chip and provides
+this virtual chip to the MTD layer.
+
+*Note: The driver can only handle linear chip arrays of equally sized
+chips. There is no support for parallel arrays which extend the
+buswidth.*
+
+*GPIO based example*
+
+::
+
+    static void board_select_chip (struct mtd_info *mtd, int chip)
+    {
+        /* Deselect all chips, set all nCE pins high */
+        GPIO(BOARD_NAND_NCE) |= 0xff;
+        if (chip >= 0)
+            GPIO(BOARD_NAND_NCE) &= ~ (1 << chip);
+    }
+
+
+*Address lines based example.* Its assumed that the nCE pins are
+connected to an address decoder.
+
+::
+
+    static void board_select_chip (struct mtd_info *mtd, int chip)
+    {
+        struct nand_chip *this = mtd_to_nand(mtd);
+
+        /* Deselect all chips */
+        this->IO_ADDR_R &= ~BOARD_NAND_ADDR_MASK;
+        this->IO_ADDR_W &= ~BOARD_NAND_ADDR_MASK;
+        switch (chip) {
+        case 0:
+            this->IO_ADDR_R |= BOARD_NAND_ADDR_CHIP0;
+            this->IO_ADDR_W |= BOARD_NAND_ADDR_CHIP0;
+            break;
+        ....
+        case n:
+            this->IO_ADDR_R |= BOARD_NAND_ADDR_CHIPn;
+            this->IO_ADDR_W |= BOARD_NAND_ADDR_CHIPn;
+            break;
+        }
+    }
+
+
+Hardware ECC support
+--------------------
+
+Functions and constants
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The nand driver supports three different types of hardware ECC.
+
+-  NAND_ECC_HW3_256
+
+   Hardware ECC generator providing 3 bytes ECC per 256 byte.
+
+-  NAND_ECC_HW3_512
+
+   Hardware ECC generator providing 3 bytes ECC per 512 byte.
+
+-  NAND_ECC_HW6_512
+
+   Hardware ECC generator providing 6 bytes ECC per 512 byte.
+
+-  NAND_ECC_HW8_512
+
+   Hardware ECC generator providing 6 bytes ECC per 512 byte.
+
+If your hardware generator has a different functionality add it at the
+appropriate place in nand_base.c
+
+The board driver must provide following functions:
+
+-  enable_hwecc
+
+   This function is called before reading / writing to the chip. Reset
+   or initialize the hardware generator in this function. The function
+   is called with an argument which let you distinguish between read and
+   write operations.
+
+-  calculate_ecc
+
+   This function is called after read / write from / to the chip.
+   Transfer the ECC from the hardware to the buffer. If the option
+   NAND_HWECC_SYNDROME is set then the function is only called on
+   write. See below.
+
+-  correct_data
+
+   In case of an ECC error this function is called for error detection
+   and correction. Return 1 respectively 2 in case the error can be
+   corrected. If the error is not correctable return -1. If your
+   hardware generator matches the default algorithm of the nand_ecc
+   software generator then use the correction function provided by
+   nand_ecc instead of implementing duplicated code.
+
+Hardware ECC with syndrome calculation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Many hardware ECC implementations provide Reed-Solomon codes and
+calculate an error syndrome on read. The syndrome must be converted to a
+standard Reed-Solomon syndrome before calling the error correction code
+in the generic Reed-Solomon library.
+
+The ECC bytes must be placed immediately after the data bytes in order
+to make the syndrome generator work. This is contrary to the usual
+layout used by software ECC. The separation of data and out of band area
+is not longer possible. The nand driver code handles this layout and the
+remaining free bytes in the oob area are managed by the autoplacement
+code. Provide a matching oob-layout in this case. See rts_from4.c and
+diskonchip.c for implementation reference. In those cases we must also
+use bad block tables on FLASH, because the ECC layout is interfering
+with the bad block marker positions. See bad block table support for
+details.
+
+Bad block table support
+-----------------------
+
+Most NAND chips mark the bad blocks at a defined position in the spare
+area. Those blocks must not be erased under any circumstances as the bad
+block information would be lost. It is possible to check the bad block
+mark each time when the blocks are accessed by reading the spare area of
+the first page in the block. This is time consuming so a bad block table
+is used.
+
+The nand driver supports various types of bad block tables.
+
+-  Per device
+
+   The bad block table contains all bad block information of the device
+   which can consist of multiple chips.
+
+-  Per chip
+
+   A bad block table is used per chip and contains the bad block
+   information for this particular chip.
+
+-  Fixed offset
+
+   The bad block table is located at a fixed offset in the chip
+   (device). This applies to various DiskOnChip devices.
+
+-  Automatic placed
+
+   The bad block table is automatically placed and detected either at
+   the end or at the beginning of a chip (device)
+
+-  Mirrored tables
+
+   The bad block table is mirrored on the chip (device) to allow updates
+   of the bad block table without data loss.
+
+nand_scan() calls the function nand_default_bbt().
+nand_default_bbt() selects appropriate default bad block table
+descriptors depending on the chip information which was retrieved by
+nand_scan().
+
+The standard policy is scanning the device for bad blocks and build a
+ram based bad block table which allows faster access than always
+checking the bad block information on the flash chip itself.
+
+Flash based tables
+~~~~~~~~~~~~~~~~~~
+
+It may be desired or necessary to keep a bad block table in FLASH. For
+AG-AND chips this is mandatory, as they have no factory marked bad
+blocks. They have factory marked good blocks. The marker pattern is
+erased when the block is erased to be reused. So in case of powerloss
+before writing the pattern back to the chip this block would be lost and
+added to the bad blocks. Therefore we scan the chip(s) when we detect
+them the first time for good blocks and store this information in a bad
+block table before erasing any of the blocks.
+
+The blocks in which the tables are stored are protected against
+accidental access by marking them bad in the memory bad block table. The
+bad block table management functions are allowed to circumvent this
+protection.
+
+The simplest way to activate the FLASH based bad block table support is
+to set the option NAND_BBT_USE_FLASH in the bbt_option field of the
+nand chip structure before calling nand_scan(). For AG-AND chips is
+this done by default. This activates the default FLASH based bad block
+table functionality of the NAND driver. The default bad block table
+options are
+
+-  Store bad block table per chip
+
+-  Use 2 bits per block
+
+-  Automatic placement at the end of the chip
+
+-  Use mirrored tables with version numbers
+
+-  Reserve 4 blocks at the end of the chip
+
+User defined tables
+~~~~~~~~~~~~~~~~~~~
+
+User defined tables are created by filling out a nand_bbt_descr
+structure and storing the pointer in the nand_chip structure member
+bbt_td before calling nand_scan(). If a mirror table is necessary a
+second structure must be created and a pointer to this structure must be
+stored in bbt_md inside the nand_chip structure. If the bbt_md member
+is set to NULL then only the main table is used and no scan for the
+mirrored table is performed.
+
+The most important field in the nand_bbt_descr structure is the
+options field. The options define most of the table properties. Use the
+predefined constants from nand.h to define the options.
+
+-  Number of bits per block
+
+   The supported number of bits is 1, 2, 4, 8.
+
+-  Table per chip
+
+   Setting the constant NAND_BBT_PERCHIP selects that a bad block
+   table is managed for each chip in a chip array. If this option is not
+   set then a per device bad block table is used.
+
+-  Table location is absolute
+
+   Use the option constant NAND_BBT_ABSPAGE and define the absolute
+   page number where the bad block table starts in the field pages. If
+   you have selected bad block tables per chip and you have a multi chip
+   array then the start page must be given for each chip in the chip
+   array. Note: there is no scan for a table ident pattern performed, so
+   the fields pattern, veroffs, offs, len can be left uninitialized
+
+-  Table location is automatically detected
+
+   The table can either be located in the first or the last good blocks
+   of the chip (device). Set NAND_BBT_LASTBLOCK to place the bad block
+   table at the end of the chip (device). The bad block tables are
+   marked and identified by a pattern which is stored in the spare area
+   of the first page in the block which holds the bad block table. Store
+   a pointer to the pattern in the pattern field. Further the length of
+   the pattern has to be stored in len and the offset in the spare area
+   must be given in the offs member of the nand_bbt_descr structure.
+   For mirrored bad block tables different patterns are mandatory.
+
+-  Table creation
+
+   Set the option NAND_BBT_CREATE to enable the table creation if no
+   table can be found during the scan. Usually this is done only once if
+   a new chip is found.
+
+-  Table write support
+
+   Set the option NAND_BBT_WRITE to enable the table write support.
+   This allows the update of the bad block table(s) in case a block has
+   to be marked bad due to wear. The MTD interface function
+   block_markbad is calling the update function of the bad block table.
+   If the write support is enabled then the table is updated on FLASH.
+
+   Note: Write support should only be enabled for mirrored tables with
+   version control.
+
+-  Table version control
+
+   Set the option NAND_BBT_VERSION to enable the table version
+   control. It's highly recommended to enable this for mirrored tables
+   with write support. It makes sure that the risk of losing the bad
+   block table information is reduced to the loss of the information
+   about the one worn out block which should be marked bad. The version
+   is stored in 4 consecutive bytes in the spare area of the device. The
+   position of the version number is defined by the member veroffs in
+   the bad block table descriptor.
+
+-  Save block contents on write
+
+   In case that the block which holds the bad block table does contain
+   other useful information, set the option NAND_BBT_SAVECONTENT. When
+   the bad block table is written then the whole block is read the bad
+   block table is updated and the block is erased and everything is
+   written back. If this option is not set only the bad block table is
+   written and everything else in the block is ignored and erased.
+
+-  Number of reserved blocks
+
+   For automatic placement some blocks must be reserved for bad block
+   table storage. The number of reserved blocks is defined in the
+   maxblocks member of the bad block table description structure.
+   Reserving 4 blocks for mirrored tables should be a reasonable number.
+   This also limits the number of blocks which are scanned for the bad
+   block table ident pattern.
+
+Spare area (auto)placement
+--------------------------
+
+The nand driver implements different possibilities for placement of
+filesystem data in the spare area,
+
+-  Placement defined by fs driver
+
+-  Automatic placement
+
+The default placement function is automatic placement. The nand driver
+has built in default placement schemes for the various chiptypes. If due
+to hardware ECC functionality the default placement does not fit then
+the board driver can provide a own placement scheme.
+
+File system drivers can provide a own placement scheme which is used
+instead of the default placement scheme.
+
+Placement schemes are defined by a nand_oobinfo structure
+
+::
+
+    struct nand_oobinfo {
+        int useecc;
+        int eccbytes;
+        int eccpos[24];
+        int oobfree[8][2];
+    };
+
+
+-  useecc
+
+   The useecc member controls the ecc and placement function. The header
+   file include/mtd/mtd-abi.h contains constants to select ecc and
+   placement. MTD_NANDECC_OFF switches off the ecc complete. This is
+   not recommended and available for testing and diagnosis only.
+   MTD_NANDECC_PLACE selects caller defined placement,
+   MTD_NANDECC_AUTOPLACE selects automatic placement.
+
+-  eccbytes
+
+   The eccbytes member defines the number of ecc bytes per page.
+
+-  eccpos
+
+   The eccpos array holds the byte offsets in the spare area where the
+   ecc codes are placed.
+
+-  oobfree
+
+   The oobfree array defines the areas in the spare area which can be
+   used for automatic placement. The information is given in the format
+   {offset, size}. offset defines the start of the usable area, size the
+   length in bytes. More than one area can be defined. The list is
+   terminated by an {0, 0} entry.
+
+Placement defined by fs driver
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The calling function provides a pointer to a nand_oobinfo structure
+which defines the ecc placement. For writes the caller must provide a
+spare area buffer along with the data buffer. The spare area buffer size
+is (number of pages) \* (size of spare area). For reads the buffer size
+is (number of pages) \* ((size of spare area) + (number of ecc steps per
+page) \* sizeof (int)). The driver stores the result of the ecc check
+for each tuple in the spare buffer. The storage sequence is::
+
+	<spare data page 0><ecc result 0>...<ecc result n>
+
+	...
+
+	<spare data page n><ecc result 0>...<ecc result n>
+
+This is a legacy mode used by YAFFS1.
+
+If the spare area buffer is NULL then only the ECC placement is done
+according to the given scheme in the nand_oobinfo structure.
+
+Automatic placement
+~~~~~~~~~~~~~~~~~~~
+
+Automatic placement uses the built in defaults to place the ecc bytes in
+the spare area. If filesystem data have to be stored / read into the
+spare area then the calling function must provide a buffer. The buffer
+size per page is determined by the oobfree array in the nand_oobinfo
+structure.
+
+If the spare area buffer is NULL then only the ECC placement is done
+according to the default builtin scheme.
+
+Spare area autoplacement default schemes
+----------------------------------------
+
+256 byte pagesize
+~~~~~~~~~~~~~~~~~
+
+======== ================== ===================================================
+Offset   Content            Comment
+======== ================== ===================================================
+0x00     ECC byte 0         Error correction code byte 0
+0x01     ECC byte 1         Error correction code byte 1
+0x02     ECC byte 2         Error correction code byte 2
+0x03     Autoplace 0
+0x04     Autoplace 1
+0x05     Bad block marker   If any bit in this byte is zero, then this
+			    block is bad. This applies only to the first
+			    page in a block. In the remaining pages this
+			    byte is reserved
+0x06     Autoplace 2
+0x07     Autoplace 3
+======== ================== ===================================================
+
+512 byte pagesize
+~~~~~~~~~~~~~~~~~
+
+
+============= ================== ==============================================
+Offset        Content            Comment
+============= ================== ==============================================
+0x00          ECC byte 0         Error correction code byte 0 of the lower
+				 256 Byte data in this page
+0x01          ECC byte 1         Error correction code byte 1 of the lower
+				 256 Bytes of data in this page
+0x02          ECC byte 2         Error correction code byte 2 of the lower
+				 256 Bytes of data in this page
+0x03          ECC byte 3         Error correction code byte 0 of the upper
+				 256 Bytes of data in this page
+0x04          reserved           reserved
+0x05          Bad block marker   If any bit in this byte is zero, then this
+				 block is bad. This applies only to the first
+				 page in a block. In the remaining pages this
+				 byte is reserved
+0x06          ECC byte 4         Error correction code byte 1 of the upper
+				 256 Bytes of data in this page
+0x07          ECC byte 5         Error correction code byte 2 of the upper
+				 256 Bytes of data in this page
+0x08 - 0x0F   Autoplace 0 - 7
+============= ================== ==============================================
+
+2048 byte pagesize
+~~~~~~~~~~~~~~~~~~
+
+=========== ================== ================================================
+Offset      Content            Comment
+=========== ================== ================================================
+0x00        Bad block marker   If any bit in this byte is zero, then this block
+			       is bad. This applies only to the first page in a
+			       block. In the remaining pages this byte is
+			       reserved
+0x01        Reserved           Reserved
+0x02-0x27   Autoplace 0 - 37
+0x28        ECC byte 0         Error correction code byte 0 of the first
+			       256 Byte data in this page
+0x29        ECC byte 1         Error correction code byte 1 of the first
+			       256 Bytes of data in this page
+0x2A        ECC byte 2         Error correction code byte 2 of the first
+			       256 Bytes data in this page
+0x2B        ECC byte 3         Error correction code byte 0 of the second
+			       256 Bytes of data in this page
+0x2C        ECC byte 4         Error correction code byte 1 of the second
+			       256 Bytes of data in this page
+0x2D        ECC byte 5         Error correction code byte 2 of the second
+			       256 Bytes of data in this page
+0x2E        ECC byte 6         Error correction code byte 0 of the third
+			       256 Bytes of data in this page
+0x2F        ECC byte 7         Error correction code byte 1 of the third
+			       256 Bytes of data in this page
+0x30        ECC byte 8         Error correction code byte 2 of the third
+			       256 Bytes of data in this page
+0x31        ECC byte 9         Error correction code byte 0 of the fourth
+			       256 Bytes of data in this page
+0x32        ECC byte 10        Error correction code byte 1 of the fourth
+			       256 Bytes of data in this page
+0x33        ECC byte 11        Error correction code byte 2 of the fourth
+			       256 Bytes of data in this page
+0x34        ECC byte 12        Error correction code byte 0 of the fifth
+			       256 Bytes of data in this page
+0x35        ECC byte 13        Error correction code byte 1 of the fifth
+			       256 Bytes of data in this page
+0x36        ECC byte 14        Error correction code byte 2 of the fifth
+			       256 Bytes of data in this page
+0x37        ECC byte 15        Error correction code byte 0 of the sixth
+			       256 Bytes of data in this page
+0x38        ECC byte 16        Error correction code byte 1 of the sixth
+			       256 Bytes of data in this page
+0x39        ECC byte 17        Error correction code byte 2 of the sixth
+			       256 Bytes of data in this page
+0x3A        ECC byte 18        Error correction code byte 0 of the seventh
+			       256 Bytes of data in this page
+0x3B        ECC byte 19        Error correction code byte 1 of the seventh
+			       256 Bytes of data in this page
+0x3C        ECC byte 20        Error correction code byte 2 of the seventh
+			       256 Bytes of data in this page
+0x3D        ECC byte 21        Error correction code byte 0 of the eighth
+			       256 Bytes of data in this page
+0x3E        ECC byte 22        Error correction code byte 1 of the eighth
+			       256 Bytes of data in this page
+0x3F        ECC byte 23        Error correction code byte 2 of the eighth
+			       256 Bytes of data in this page
+=========== ================== ================================================
+
+Filesystem support
+==================
+
+The NAND driver provides all necessary functions for a filesystem via
+the MTD interface.
+
+Filesystems must be aware of the NAND peculiarities and restrictions.
+One major restrictions of NAND Flash is, that you cannot write as often
+as you want to a page. The consecutive writes to a page, before erasing
+it again, are restricted to 1-3 writes, depending on the manufacturers
+specifications. This applies similar to the spare area.
+
+Therefore NAND aware filesystems must either write in page size chunks
+or hold a writebuffer to collect smaller writes until they sum up to
+pagesize. Available NAND aware filesystems: JFFS2, YAFFS.
+
+The spare area usage to store filesystem data is controlled by the spare
+area placement functionality which is described in one of the earlier
+chapters.
+
+Tools
+=====
+
+The MTD project provides a couple of helpful tools to handle NAND Flash.
+
+-  flasherase, flasheraseall: Erase and format FLASH partitions
+
+-  nandwrite: write filesystem images to NAND FLASH
+
+-  nanddump: dump the contents of a NAND FLASH partitions
+
+These tools are aware of the NAND restrictions. Please use those tools
+instead of complaining about errors which are caused by non NAND aware
+access methods.
+
+Constants
+=========
+
+This chapter describes the constants which might be relevant for a
+driver developer.
+
+Chip option constants
+---------------------
+
+Constants for chip id table
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+These constants are defined in nand.h. They are OR-ed together to
+describe the chip functionality::
+
+    /* Buswitdh is 16 bit */
+    #define NAND_BUSWIDTH_16    0x00000002
+    /* Device supports partial programming without padding */
+    #define NAND_NO_PADDING     0x00000004
+    /* Chip has cache program function */
+    #define NAND_CACHEPRG       0x00000008
+    /* Chip has copy back function */
+    #define NAND_COPYBACK       0x00000010
+    /* AND Chip which has 4 banks and a confusing page / block
+     * assignment. See Renesas datasheet for further information */
+    #define NAND_IS_AND     0x00000020
+    /* Chip has a array of 4 pages which can be read without
+     * additional ready /busy waits */
+    #define NAND_4PAGE_ARRAY    0x00000040
+
+
+Constants for runtime options
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+These constants are defined in nand.h. They are OR-ed together to
+describe the functionality::
+
+    /* The hw ecc generator provides a syndrome instead a ecc value on read
+     * This can only work if we have the ecc bytes directly behind the
+     * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
+    #define NAND_HWECC_SYNDROME 0x00020000
+
+
+ECC selection constants
+-----------------------
+
+Use these constants to select the ECC algorithm::
+
+    /* No ECC. Usage is not recommended ! */
+    #define NAND_ECC_NONE       0
+    /* Software ECC 3 byte ECC per 256 Byte data */
+    #define NAND_ECC_SOFT       1
+    /* Hardware ECC 3 byte ECC per 256 Byte data */
+    #define NAND_ECC_HW3_256    2
+    /* Hardware ECC 3 byte ECC per 512 Byte data */
+    #define NAND_ECC_HW3_512    3
+    /* Hardware ECC 6 byte ECC per 512 Byte data */
+    #define NAND_ECC_HW6_512    4
+    /* Hardware ECC 6 byte ECC per 512 Byte data */
+    #define NAND_ECC_HW8_512    6
+
+
+Hardware control related constants
+----------------------------------
+
+These constants describe the requested hardware access function when the
+boardspecific hardware control function is called::
+
+    /* Select the chip by setting nCE to low */
+    #define NAND_CTL_SETNCE     1
+    /* Deselect the chip by setting nCE to high */
+    #define NAND_CTL_CLRNCE     2
+    /* Select the command latch by setting CLE to high */
+    #define NAND_CTL_SETCLE     3
+    /* Deselect the command latch by setting CLE to low */
+    #define NAND_CTL_CLRCLE     4
+    /* Select the address latch by setting ALE to high */
+    #define NAND_CTL_SETALE     5
+    /* Deselect the address latch by setting ALE to low */
+    #define NAND_CTL_CLRALE     6
+    /* Set write protection by setting WP to high. Not used! */
+    #define NAND_CTL_SETWP      7
+    /* Clear write protection by setting WP to low. Not used! */
+    #define NAND_CTL_CLRWP      8
+
+
+Bad block table related constants
+---------------------------------
+
+These constants describe the options used for bad block table
+descriptors::
+
+    /* Options for the bad block table descriptors */
+
+    /* The number of bits used per block in the bbt on the device */
+    #define NAND_BBT_NRBITS_MSK 0x0000000F
+    #define NAND_BBT_1BIT       0x00000001
+    #define NAND_BBT_2BIT       0x00000002
+    #define NAND_BBT_4BIT       0x00000004
+    #define NAND_BBT_8BIT       0x00000008
+    /* The bad block table is in the last good block of the device */
+    #define NAND_BBT_LASTBLOCK  0x00000010
+    /* The bbt is at the given page, else we must scan for the bbt */
+    #define NAND_BBT_ABSPAGE    0x00000020
+    /* bbt is stored per chip on multichip devices */
+    #define NAND_BBT_PERCHIP    0x00000080
+    /* bbt has a version counter at offset veroffs */
+    #define NAND_BBT_VERSION    0x00000100
+    /* Create a bbt if none axists */
+    #define NAND_BBT_CREATE     0x00000200
+    /* Write bbt if necessary */
+    #define NAND_BBT_WRITE      0x00001000
+    /* Read and write back block contents when writing bbt */
+    #define NAND_BBT_SAVECONTENT    0x00002000
+
+
+Structures
+==========
+
+This chapter contains the autogenerated documentation of the structures
+which are used in the NAND driver and might be relevant for a driver
+developer. Each struct member has a short description which is marked
+with an [XXX] identifier. See the chapter "Documentation hints" for an
+explanation.
+
+.. kernel-doc:: include/linux/mtd/nand.h
+   :internal:
+
+Public Functions Provided
+=========================
+
+This chapter contains the autogenerated documentation of the NAND kernel
+API functions which are exported. Each function has a short description
+which is marked with an [XXX] identifier. See the chapter "Documentation
+hints" for an explanation.
+
+.. kernel-doc:: drivers/mtd/nand/nand_base.c
+   :export:
+
+.. kernel-doc:: drivers/mtd/nand/nand_ecc.c
+   :export:
+
+Internal Functions Provided
+===========================
+
+This chapter contains the autogenerated documentation of the NAND driver
+internal functions. Each function has a short description which is
+marked with an [XXX] identifier. See the chapter "Documentation hints"
+for an explanation. The functions marked with [DEFAULT] might be
+relevant for a board driver developer.
+
+.. kernel-doc:: drivers/mtd/nand/nand_base.c
+   :internal:
+
+.. kernel-doc:: drivers/mtd/nand/nand_bbt.c
+   :internal:
+
+Credits
+=======
+
+The following people have contributed to the NAND driver:
+
+1. Steven J. Hill\ sjhill@realitydiluted.com
+
+2. David Woodhouse\ dwmw2@infradead.org
+
+3. Thomas Gleixner\ tglx@linutronix.de
+
+A lot of users have provided bugfixes, improvements and helping hands
+for testing. Thanks a lot.
+
+The following people have contributed to this document:
+
+1. Thomas Gleixner\ tglx@linutronix.de
diff --git a/Documentation/driver-api/rapidio.rst b/Documentation/driver-api/rapidio.rst
new file mode 100644
index 0000000..71ff658
--- /dev/null
+++ b/Documentation/driver-api/rapidio.rst
@@ -0,0 +1,107 @@
+=======================
+RapidIO Subsystem Guide
+=======================
+
+:Author: Matt Porter
+
+Introduction
+============
+
+RapidIO is a high speed switched fabric interconnect with features aimed
+at the embedded market. RapidIO provides support for memory-mapped I/O
+as well as message-based transactions over the switched fabric network.
+RapidIO has a standardized discovery mechanism not unlike the PCI bus
+standard that allows simple detection of devices in a network.
+
+This documentation is provided for developers intending to support
+RapidIO on new architectures, write new drivers, or to understand the
+subsystem internals.
+
+Known Bugs and Limitations
+==========================
+
+Bugs
+----
+
+None. ;)
+
+Limitations
+-----------
+
+1. Access/management of RapidIO memory regions is not supported
+
+2. Multiple host enumeration is not supported
+
+RapidIO driver interface
+========================
+
+Drivers are provided a set of calls in order to interface with the
+subsystem to gather info on devices, request/map memory region
+resources, and manage mailboxes/doorbells.
+
+Functions
+---------
+
+.. kernel-doc:: include/linux/rio_drv.h
+   :internal:
+
+.. kernel-doc:: drivers/rapidio/rio-driver.c
+   :export:
+
+.. kernel-doc:: drivers/rapidio/rio.c
+   :export:
+
+Internals
+=========
+
+This chapter contains the autogenerated documentation of the RapidIO
+subsystem.
+
+Structures
+----------
+
+.. kernel-doc:: include/linux/rio.h
+   :internal:
+
+Enumeration and Discovery
+-------------------------
+
+.. kernel-doc:: drivers/rapidio/rio-scan.c
+   :internal:
+
+Driver functionality
+--------------------
+
+.. kernel-doc:: drivers/rapidio/rio.c
+   :internal:
+
+.. kernel-doc:: drivers/rapidio/rio-access.c
+   :internal:
+
+Device model support
+--------------------
+
+.. kernel-doc:: drivers/rapidio/rio-driver.c
+   :internal:
+
+PPC32 support
+-------------
+
+.. kernel-doc:: arch/powerpc/sysdev/fsl_rio.c
+   :internal:
+
+Credits
+=======
+
+The following people have contributed to the RapidIO subsystem directly
+or indirectly:
+
+1. Matt Porter\ mporter@kernel.crashing.org
+
+2. Randy Vinson\ rvinson@mvista.com
+
+3. Dan Malek\ dan@embeddedalley.com
+
+The following people have contributed to this document:
+
+1. Matt Porter\ mporter@kernel.crashing.org
diff --git a/Documentation/driver-api/s390-drivers.rst b/Documentation/driver-api/s390-drivers.rst
new file mode 100644
index 0000000..7060da1
--- /dev/null
+++ b/Documentation/driver-api/s390-drivers.rst
@@ -0,0 +1,111 @@
+===================================
+Writing s390 channel device drivers
+===================================
+
+:Author: Cornelia Huck
+
+Introduction
+============
+
+This document describes the interfaces available for device drivers that
+drive s390 based channel attached I/O devices. This includes interfaces
+for interaction with the hardware and interfaces for interacting with
+the common driver core. Those interfaces are provided by the s390 common
+I/O layer.
+
+The document assumes a familarity with the technical terms associated
+with the s390 channel I/O architecture. For a description of this
+architecture, please refer to the "z/Architecture: Principles of
+Operation", IBM publication no. SA22-7832.
+
+While most I/O devices on a s390 system are typically driven through the
+channel I/O mechanism described here, there are various other methods
+(like the diag interface). These are out of the scope of this document.
+
+Some additional information can also be found in the kernel source under
+Documentation/s390/driver-model.txt.
+
+The ccw bus
+===========
+
+The ccw bus typically contains the majority of devices available to a
+s390 system. Named after the channel command word (ccw), the basic
+command structure used to address its devices, the ccw bus contains
+so-called channel attached devices. They are addressed via I/O
+subchannels, visible on the css bus. A device driver for
+channel-attached devices, however, will never interact with the
+subchannel directly, but only via the I/O device on the ccw bus, the ccw
+device.
+
+I/O functions for channel-attached devices
+------------------------------------------
+
+Some hardware structures have been translated into C structures for use
+by the common I/O layer and device drivers. For more information on the
+hardware structures represented here, please consult the Principles of
+Operation.
+
+.. kernel-doc:: arch/s390/include/asm/cio.h
+   :internal:
+
+ccw devices
+-----------
+
+Devices that want to initiate channel I/O need to attach to the ccw bus.
+Interaction with the driver core is done via the common I/O layer, which
+provides the abstractions of ccw devices and ccw device drivers.
+
+The functions that initiate or terminate channel I/O all act upon a ccw
+device structure. Device drivers must not bypass those functions or
+strange side effects may happen.
+
+.. kernel-doc:: arch/s390/include/asm/ccwdev.h
+   :internal:
+
+.. kernel-doc:: drivers/s390/cio/device.c
+   :export:
+
+.. kernel-doc:: drivers/s390/cio/device_ops.c
+   :export:
+
+The channel-measurement facility
+--------------------------------
+
+The channel-measurement facility provides a means to collect measurement
+data which is made available by the channel subsystem for each channel
+attached device.
+
+.. kernel-doc:: arch/s390/include/asm/cmb.h
+   :internal:
+
+.. kernel-doc:: drivers/s390/cio/cmf.c
+   :export:
+
+The ccwgroup bus
+================
+
+The ccwgroup bus only contains artificial devices, created by the user.
+Many networking devices (e.g. qeth) are in fact composed of several ccw
+devices (like read, write and data channel for qeth). The ccwgroup bus
+provides a mechanism to create a meta-device which contains those ccw
+devices as slave devices and can be associated with the netdevice.
+
+ccw group devices
+-----------------
+
+.. kernel-doc:: arch/s390/include/asm/ccwgroup.h
+   :internal:
+
+.. kernel-doc:: drivers/s390/cio/ccwgroup.c
+   :export:
+
+Generic interfaces
+==================
+
+Some interfaces are available to other drivers that do not necessarily
+have anything to do with the busses described above, but still are
+indirectly using basic infrastructure in the common I/O layer. One
+example is the support for adapter interrupts.
+
+.. kernel-doc:: drivers/s390/cio/airq.c
+   :export:
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
new file mode 100644
index 0000000..859fb67
--- /dev/null
+++ b/Documentation/driver-api/scsi.rst
@@ -0,0 +1,344 @@
+=====================
+SCSI Interfaces Guide
+=====================
+
+:Author: James Bottomley
+:Author: Rob Landley
+
+Introduction
+============
+
+Protocol vs bus
+---------------
+
+Once upon a time, the Small Computer Systems Interface defined both a
+parallel I/O bus and a data protocol to connect a wide variety of
+peripherals (disk drives, tape drives, modems, printers, scanners,
+optical drives, test equipment, and medical devices) to a host computer.
+
+Although the old parallel (fast/wide/ultra) SCSI bus has largely fallen
+out of use, the SCSI command set is more widely used than ever to
+communicate with devices over a number of different busses.
+
+The `SCSI protocol <http://www.t10.org/scsi-3.htm>`__ is a big-endian
+peer-to-peer packet based protocol. SCSI commands are 6, 10, 12, or 16
+bytes long, often followed by an associated data payload.
+
+SCSI commands can be transported over just about any kind of bus, and
+are the default protocol for storage devices attached to USB, SATA, SAS,
+Fibre Channel, FireWire, and ATAPI devices. SCSI packets are also
+commonly exchanged over Infiniband,
+`I20 <http://i2o.shadowconnect.com/faq.php>`__, TCP/IP
+(`iSCSI <https://en.wikipedia.org/wiki/ISCSI>`__), even `Parallel
+ports <http://cyberelk.net/tim/parport/parscsi.html>`__.
+
+Design of the Linux SCSI subsystem
+----------------------------------
+
+The SCSI subsystem uses a three layer design, with upper, mid, and low
+layers. Every operation involving the SCSI subsystem (such as reading a
+sector from a disk) uses one driver at each of the 3 levels: one upper
+layer driver, one lower layer driver, and the SCSI midlayer.
+
+The SCSI upper layer provides the interface between userspace and the
+kernel, in the form of block and char device nodes for I/O and ioctl().
+The SCSI lower layer contains drivers for specific hardware devices.
+
+In between is the SCSI mid-layer, analogous to a network routing layer
+such as the IPv4 stack. The SCSI mid-layer routes a packet based data
+protocol between the upper layer's /dev nodes and the corresponding
+devices in the lower layer. It manages command queues, provides error
+handling and power management functions, and responds to ioctl()
+requests.
+
+SCSI upper layer
+================
+
+The upper layer supports the user-kernel interface by providing device
+nodes.
+
+sd (SCSI Disk)
+--------------
+
+sd (sd_mod.o)
+
+sr (SCSI CD-ROM)
+----------------
+
+sr (sr_mod.o)
+
+st (SCSI Tape)
+--------------
+
+st (st.o)
+
+sg (SCSI Generic)
+-----------------
+
+sg (sg.o)
+
+ch (SCSI Media Changer)
+-----------------------
+
+ch (ch.c)
+
+SCSI mid layer
+==============
+
+SCSI midlayer implementation
+----------------------------
+
+include/scsi/scsi_device.h
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: include/scsi/scsi_device.h
+   :internal:
+
+drivers/scsi/scsi.c
+~~~~~~~~~~~~~~~~~~~
+
+Main file for the SCSI midlayer.
+
+.. kernel-doc:: drivers/scsi/scsi.c
+   :export:
+
+drivers/scsi/scsicam.c
+~~~~~~~~~~~~~~~~~~~~~~
+
+`SCSI Common Access
+Method <http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf>`__ support
+functions, for use with HDIO_GETGEO, etc.
+
+.. kernel-doc:: drivers/scsi/scsicam.c
+   :export:
+
+drivers/scsi/scsi_error.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Common SCSI error/timeout handling routines.
+
+.. kernel-doc:: drivers/scsi/scsi_error.c
+   :export:
+
+drivers/scsi/scsi_devinfo.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Manage scsi_dev_info_list, which tracks blacklisted and whitelisted
+devices.
+
+.. kernel-doc:: drivers/scsi/scsi_devinfo.c
+   :internal:
+
+drivers/scsi/scsi_ioctl.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Handle ioctl() calls for SCSI devices.
+
+.. kernel-doc:: drivers/scsi/scsi_ioctl.c
+   :export:
+
+drivers/scsi/scsi_lib.c
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+SCSI queuing library.
+
+.. kernel-doc:: drivers/scsi/scsi_lib.c
+   :export:
+
+drivers/scsi/scsi_lib_dma.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+SCSI library functions depending on DMA (map and unmap scatter-gather
+lists).
+
+.. kernel-doc:: drivers/scsi/scsi_lib_dma.c
+   :export:
+
+drivers/scsi/scsi_module.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The file drivers/scsi/scsi_module.c contains legacy support for
+old-style host templates. It should never be used by any new driver.
+
+drivers/scsi/scsi_proc.c
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The functions in this file provide an interface between the PROC file
+system and the SCSI device drivers It is mainly used for debugging,
+statistics and to pass information directly to the lowlevel driver. I.E.
+plumbing to manage /proc/scsi/\*
+
+.. kernel-doc:: drivers/scsi/scsi_proc.c
+   :internal:
+
+drivers/scsi/scsi_netlink.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Infrastructure to provide async events from transports to userspace via
+netlink, using a single NETLINK_SCSITRANSPORT protocol for all
+transports. See `the original patch
+submission <http://marc.info/?l=linux-scsi&m=115507374832500&w=2>`__ for
+more details.
+
+.. kernel-doc:: drivers/scsi/scsi_netlink.c
+   :internal:
+
+drivers/scsi/scsi_scan.c
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Scan a host to determine which (if any) devices are attached. The
+general scanning/probing algorithm is as follows, exceptions are made to
+it depending on device specific flags, compilation options, and global
+variable (boot or module load time) settings. A specific LUN is scanned
+via an INQUIRY command; if the LUN has a device attached, a scsi_device
+is allocated and setup for it. For every id of every channel on the
+given host, start by scanning LUN 0. Skip hosts that don't respond at
+all to a scan of LUN 0. Otherwise, if LUN 0 has a device attached,
+allocate and setup a scsi_device for it. If target is SCSI-3 or up,
+issue a REPORT LUN, and scan all of the LUNs returned by the REPORT LUN;
+else, sequentially scan LUNs up until some maximum is reached, or a LUN
+is seen that cannot have a device attached to it.
+
+.. kernel-doc:: drivers/scsi/scsi_scan.c
+   :internal:
+
+drivers/scsi/scsi_sysctl.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set up the sysctl entry: "/dev/scsi/logging_level"
+(DEV_SCSI_LOGGING_LEVEL) which sets/returns scsi_logging_level.
+
+drivers/scsi/scsi_sysfs.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+SCSI sysfs interface routines.
+
+.. kernel-doc:: drivers/scsi/scsi_sysfs.c
+   :export:
+
+drivers/scsi/hosts.c
+~~~~~~~~~~~~~~~~~~~~
+
+mid to lowlevel SCSI driver interface
+
+.. kernel-doc:: drivers/scsi/hosts.c
+   :export:
+
+drivers/scsi/constants.c
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+mid to lowlevel SCSI driver interface
+
+.. kernel-doc:: drivers/scsi/constants.c
+   :export:
+
+Transport classes
+-----------------
+
+Transport classes are service libraries for drivers in the SCSI lower
+layer, which expose transport attributes in sysfs.
+
+Fibre Channel transport
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The file drivers/scsi/scsi_transport_fc.c defines transport attributes
+for Fibre Channel.
+
+.. kernel-doc:: drivers/scsi/scsi_transport_fc.c
+   :export:
+
+iSCSI transport class
+~~~~~~~~~~~~~~~~~~~~~
+
+The file drivers/scsi/scsi_transport_iscsi.c defines transport
+attributes for the iSCSI class, which sends SCSI packets over TCP/IP
+connections.
+
+.. kernel-doc:: drivers/scsi/scsi_transport_iscsi.c
+   :export:
+
+Serial Attached SCSI (SAS) transport class
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The file drivers/scsi/scsi_transport_sas.c defines transport
+attributes for Serial Attached SCSI, a variant of SATA aimed at large
+high-end systems.
+
+The SAS transport class contains common code to deal with SAS HBAs, an
+aproximated representation of SAS topologies in the driver model, and
+various sysfs attributes to expose these topologies and management
+interfaces to userspace.
+
+In addition to the basic SCSI core objects this transport class
+introduces two additional intermediate objects: The SAS PHY as
+represented by struct sas_phy defines an "outgoing" PHY on a SAS HBA or
+Expander, and the SAS remote PHY represented by struct sas_rphy defines
+an "incoming" PHY on a SAS Expander or end device. Note that this is
+purely a software concept, the underlying hardware for a PHY and a
+remote PHY is the exactly the same.
+
+There is no concept of a SAS port in this code, users can see what PHYs
+form a wide port based on the port_identifier attribute, which is the
+same for all PHYs in a port.
+
+.. kernel-doc:: drivers/scsi/scsi_transport_sas.c
+   :export:
+
+SATA transport class
+~~~~~~~~~~~~~~~~~~~~
+
+The SATA transport is handled by libata, which has its own book of
+documentation in this directory.
+
+Parallel SCSI (SPI) transport class
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The file drivers/scsi/scsi_transport_spi.c defines transport
+attributes for traditional (fast/wide/ultra) SCSI busses.
+
+.. kernel-doc:: drivers/scsi/scsi_transport_spi.c
+   :export:
+
+SCSI RDMA (SRP) transport class
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The file drivers/scsi/scsi_transport_srp.c defines transport
+attributes for SCSI over Remote Direct Memory Access.
+
+.. kernel-doc:: drivers/scsi/scsi_transport_srp.c
+   :export:
+
+SCSI lower layer
+================
+
+Host Bus Adapter transport types
+--------------------------------
+
+Many modern device controllers use the SCSI command set as a protocol to
+communicate with their devices through many different types of physical
+connections.
+
+In SCSI language a bus capable of carrying SCSI commands is called a
+"transport", and a controller connecting to such a bus is called a "host
+bus adapter" (HBA).
+
+Debug transport
+~~~~~~~~~~~~~~~
+
+The file drivers/scsi/scsi_debug.c simulates a host adapter with a
+variable number of disks (or disk like devices) attached, sharing a
+common amount of RAM. Does a lot of checking to make sure that we are
+not getting blocks mixed up, and panics the kernel if anything out of
+the ordinary is seen.
+
+To be more realistic, the simulated devices have the transport
+attributes of SAS disks.
+
+For documentation see http://sg.danny.cz/sg/sdebug26.html
+
+todo
+~~~~
+
+Parallel (fast/wide/ultra) SCSI, USB, SATA, SAS, Fibre Channel,
+FireWire, ATAPI devices, Infiniband, I20, iSCSI, Parallel ports,
+netlink...
diff --git a/Documentation/driver-api/usb/dwc3.rst b/Documentation/driver-api/usb/dwc3.rst
new file mode 100644
index 0000000..c3dc84a50
--- /dev/null
+++ b/Documentation/driver-api/usb/dwc3.rst
@@ -0,0 +1,712 @@
+===============================================================
+Synopsys DesignWare Core SuperSpeed USB 3.0 Controller
+===============================================================
+
+:Author: Felipe Balbi <felipe.balbi@linux.intel.com>
+:Date: April 2017
+
+Introduction
+============
+
+The *Synopsys DesignWare Core SuperSpeed USB 3.0 Controller*
+(hereinafter referred to as *DWC3*) is a USB SuperSpeed compliant
+controller which can be configured in one of 4 ways:
+
+	1. Peripheral-only configuration
+	2. Host-only configuration
+	3. Dual-Role configuration
+	4. Hub configuration
+
+Linux currently supports several versions of this controller. In all
+likelyhood, the version in your SoC is already supported. At the time
+of this writing, known tested versions range from 2.02a to 3.10a. As a
+rule of thumb, anything above 2.02a should work reliably well.
+
+Currently, we have many known users for this driver. In alphabetical
+order:
+
+	1. Cavium
+	2. Intel Corporation
+	3. Qualcomm
+	4. Rockchip
+	5. ST
+	6. Samsung
+	7. Texas Instruments
+	8. Xilinx
+
+Summary of Features
+======================
+
+For details about features supported by your version of DWC3, consult
+your IP team and/or *Synopsys DesignWare Core SuperSpeed USB 3.0
+Controller Databook*. Following is a list of features supported by the
+driver at the time of this writing:
+
+	1. Up to 16 bidirectional endpoints (including the control
+	   pipe - ep0)
+	2. Flexible endpoint configuration
+	3. Simultaneous IN and OUT transfer support
+	4. Scatter-list support
+	5. Up to 256 TRBs [#trb]_ per endpoint
+	6. Support for all transfer types (*Control*, *Bulk*,
+	   *Interrupt*, and *Isochronous*)
+	7. SuperSpeed Bulk Streams
+	8. Link Power Management
+	9. Trace Events for debugging
+	10. DebugFS [#debugfs]_ interface
+
+These features have all been exercised with many of the **in-tree**
+gadget drivers. We have verified both *ConfigFS* [#configfs]_ and
+legacy gadget drivers.
+
+Driver Design
+==============
+
+The DWC3 driver sits on the *drivers/usb/dwc3/* directory. All files
+related to this driver are in this one directory. This makes it easy
+for new-comers to read the code and understand how it behaves.
+
+Because of DWC3's configuration flexibility, the driver is a little
+complex in some places but it should be rather straightforward to
+understand.
+
+The biggest part of the driver refers to the Gadget API.
+
+Known Limitations
+===================
+
+Like any other HW, DWC3 has its own set of limitations. To avoid
+constant questions about such problems, we decided to document them
+here and have a single location to where we could point users.
+
+OUT Transfer Size Requirements
+---------------------------------
+
+According to Synopsys Databook, all OUT transfer TRBs [#trb]_ must
+have their *size* field set to a value which is integer divisible by
+the endpoint's *wMaxPacketSize*. This means that *e.g.* in order to
+receive a Mass Storage *CBW* [#cbw]_, req->length must either be set
+to a value that's divisible by *wMaxPacketSize* (1024 on SuperSpeed,
+512 on HighSpeed, etc), or DWC3 driver must add a Chained TRB pointing
+to a throw-away buffer for the remaining length. Without this, OUT
+transfers will **NOT** start.
+
+Note that as of this writing, this won't be a problem because DWC3 is
+fully capable of appending a chained TRB for the remaining length and
+completely hide this detail from the gadget driver. It's still worth
+mentioning because this seems to be the largest source of queries
+about DWC3 and *non-working transfers*.
+
+TRB Ring Size Limitation
+-------------------------
+
+We, currently, have a hard limit of 256 TRBs [#trb]_ per endpoint,
+with the last TRB being a Link TRB [#link_trb]_ pointing back to the
+first. This limit is arbitrary but it has the benefit of adding up to
+exactly 4096 bytes, or 1 Page.
+
+DWC3 driver will try its best to cope with more than 255 requests and,
+for the most part, it should work normally. However this is not
+something that has been exercised very frequently. If you experience
+any problems, see section **Reporting Bugs** below.
+
+Reporting Bugs
+================
+
+Whenever you encounter a problem with DWC3, first and foremost you
+should make sure that:
+
+	1. You're running latest tag from `Linus' tree`_
+	2. You can reproduce the error without any out-of-tree changes
+	   to DWC3
+	3. You have checked that it's not a fault on the host machine
+
+After all these are verified, then here's how to capture enough
+information so we can be of any help to you.
+
+Required Information
+---------------------
+
+DWC3 relies exclusively on Trace Events for debugging. Everything is
+exposed there, with some extra bits being exposed to DebugFS
+[#debugfs]_.
+
+In order to capture DWC3's Trace Events you should run the following
+commands **before** plugging the USB cable to a host machine:
+
+.. code-block:: sh
+
+		 # mkdir -p /d
+		 # mkdir -p /t
+		 # mount -t debugfs none /d
+		 # mount -t tracefs none /t
+		 # echo 81920 > /t/buffer_size_kb
+		 # echo 1 > /t/events/dwc3/enable
+
+After this is done, you can connect your USB cable and reproduce the
+problem. As soon as the fault is reproduced, make a copy of files
+``trace`` and ``regdump``, like so:
+
+.. code-block:: sh
+
+		# cp /t/trace /root/trace.txt
+		# cat /d/*dwc3*/regdump > /root/regdump.txt
+
+Make sure to compress ``trace.txt`` and ``regdump.txt`` in a tarball
+and email it to `me`_ with `linux-usb`_ in Cc. If you want to be extra
+sure that I'll help you, write your subject line in the following
+format:
+
+	**[BUG REPORT] usb: dwc3: Bug while doing XYZ**
+
+On the email body, make sure to detail what you doing, which gadget
+driver you were using, how to reproduce the problem, what SoC you're
+using, which OS (and its version) was running on the Host machine.
+
+With all this information, we should be able to understand what's
+going on and be helpful to you.
+
+Debugging
+===========
+
+First and foremost a disclaimer::
+
+  DISCLAIMER: The information available on DebugFS and/or TraceFS can
+  change at any time at any Major Linux Kernel Release. If writing
+  scripts, do **NOT** assume information to be available in the
+  current format.
+
+With that out of the way, let's carry on.
+
+If you're willing to debug your own problem, you deserve a round of
+applause :-)
+
+Anyway, there isn't much to say here other than Trace Events will be
+really helpful in figuring out issues with DWC3. Also, access to
+Synopsys Databook will be **really** valuable in this case.
+
+A USB Sniffer can be helpful at times but it's not entirely required,
+there's a lot that can be understood without looking at the wire.
+
+Feel free to email `me`_ and Cc `linux-usb`_ if you need any help.
+
+``DebugFS``
+-------------
+
+``DebugFS`` is very good for gathering snapshots of what's going on
+with DWC3 and/or any endpoint.
+
+On DWC3's ``DebugFS`` directory, you will find the following files and
+directories:
+
+``ep[0..15]{in,out}/``
+``link_state``
+``regdump``
+``testmode``
+
+``link_state``
+``````````````
+
+When read, ``link_state`` will print out one of ``U0``, ``U1``,
+``U2``, ``U3``, ``SS.Disabled``, ``RX.Detect``, ``SS.Inactive``,
+``Polling``, ``Recovery``, ``Hot Reset``, ``Compliance``,
+``Loopback``, ``Reset``, ``Resume`` or ``UNKNOWN link state``.
+
+This file can also be written to in order to force link to one of the
+states above.
+
+``regdump``
+`````````````
+
+File name is self-explanatory. When read, ``regdump`` will print out a
+register dump of DWC3. Note that this file can be grepped to find the
+information you want.
+
+``testmode``
+``````````````
+
+When read, ``testmode`` will print out a name of one of the specified
+USB 2.0 Testmodes (``test_j``, ``test_k``, ``test_se0_nak``,
+``test_packet``, ``test_force_enable``) or the string ``no test`` in
+case no tests are currently being executed.
+
+In order to start any of these test modes, the same strings can be
+written to the file and DWC3 will enter the requested test mode.
+
+
+``ep[0..15]{in,out}``
+``````````````````````
+
+For each endpoint we expose one directory following the naming
+convention ``ep$num$dir`` *(ep0in, ep0out, ep1in, ...)*. Inside each
+of these directories you will find the following files:
+
+``descriptor_fetch_queue``
+``event_queue``
+``rx_fifo_queue``
+``rx_info_queue``
+``rx_request_queue``
+``transfer_type``
+``trb_ring``
+``tx_fifo_queue``
+``tx_request_queue``
+
+With access to Synopsys Databook, you can decode the information on
+them.
+
+``transfer_type``
+~~~~~~~~~~~~~~~~~~
+
+When read, ``transfer_type`` will print out one of ``control``,
+``bulk``, ``interrupt`` or ``isochronous`` depending on what the
+endpoint descriptor says. If the endpoint hasn't been enabled yet, it
+will print ``--``.
+
+``trb_ring``
+~~~~~~~~~~~~~
+
+When read, ``trb_ring`` will print out details about all TRBs on the
+ring. It will also tell you where our enqueue and dequeue pointers are
+located in the ring:
+
+.. code-block:: sh
+   
+		buffer_addr,size,type,ioc,isp_imi,csp,chn,lst,hwo
+		000000002c754000,481,normal,1,0,1,0,0,0         
+		000000002c75c000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c754000,481,normal,1,0,1,0,0,0         
+		000000002c75c000,481,normal,1,0,1,0,0,0         
+		000000002c784000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c784000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c754000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c784000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c784000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c754000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c75c000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c754000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c754000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c754000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c75c000,481,normal,1,0,1,0,0,0         
+		000000002c780000,481,normal,1,0,1,0,0,0         
+		000000002c784000,481,normal,1,0,1,0,0,0         
+		000000002c788000,481,normal,1,0,1,0,0,0         
+		000000002c78c000,481,normal,1,0,1,0,0,0         
+		000000002c790000,481,normal,1,0,1,0,0,0         
+		000000002c754000,481,normal,1,0,1,0,0,0         
+		000000002c758000,481,normal,1,0,1,0,0,0         
+		000000002c75c000,512,normal,1,0,1,0,0,1        D
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0       E 
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		0000000000000000,0,UNKNOWN,0,0,0,0,0,0         
+		00000000381ab000,0,link,0,0,0,0,0,1
+
+
+Trace Events
+-------------
+
+DWC3 also provides several trace events which help us gathering
+information about the behavior of the driver during runtime.
+
+In order to use these events, you must enable ``CONFIG_FTRACE`` in
+your kernel config.
+
+For details about how enable DWC3 events, see section **Reporting
+Bugs**.
+
+The following subsections will give details about each Event Class and
+each Event defined by DWC3.
+
+MMIO
+```````
+
+It is sometimes useful to look at every MMIO access when looking for
+bugs. Because of that, DWC3 offers two Trace Events (one for
+dwc3_readl() and one for dwc3_writel()). ``TP_printk`` follows::
+
+  TP_printk("addr %p value %08x", __entry->base + __entry->offset,
+  		__entry->value)
+
+Interrupt Events
+````````````````
+
+Every IRQ event can be logged and decoded into a human readable
+string. Because every event will be different, we don't give an
+example other than the ``TP_printk`` format used::
+
+  TP_printk("event (%08x): %s", __entry->event,
+  		dwc3_decode_event(__entry->event, __entry->ep0state))
+
+Control Request
+`````````````````
+
+Every USB Control Request can be logged to the trace buffer. The
+output format is::
+
+  TP_printk("%s", dwc3_decode_ctrl(__entry->bRequestType,
+  				__entry->bRequest, __entry->wValue,
+  				__entry->wIndex, __entry->wLength)
+  )
+
+Note that Standard Control Requests will be decoded into
+human-readable strings with their respective arguments. Class and
+Vendor requests will be printed out a sequence of 8 bytes in hex
+format.
+
+Lifetime of a ``struct usb_request``
+```````````````````````````````````````
+
+The entire lifetime of a ``struct usb_request`` can be tracked on the
+trace buffer. We have one event for each of allocation, free,
+queueing, dequeueing, and giveback. Output format is::
+
+  TP_printk("%s: req %p length %u/%u %s%s%s ==> %d",
+  	__get_str(name), __entry->req, __entry->actual, __entry->length,
+  	__entry->zero ? "Z" : "z",
+  	__entry->short_not_ok ? "S" : "s",
+  	__entry->no_interrupt ? "i" : "I",
+  	__entry->status
+  )
+
+Generic Commands
+````````````````````
+
+We can log and decode every Generic Command with its completion
+code. Format is::
+
+  TP_printk("cmd '%s' [%x] param %08x --> status: %s",
+  	dwc3_gadget_generic_cmd_string(__entry->cmd),
+  	__entry->cmd, __entry->param,
+  	dwc3_gadget_generic_cmd_status_string(__entry->status)
+  )
+
+Endpoint Commands
+````````````````````
+
+Endpoints commands can also be logged together with completion
+code. Format is::
+
+  TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x --> status: %s",
+  	__get_str(name), dwc3_gadget_ep_cmd_string(__entry->cmd),
+  	__entry->cmd, __entry->param0,
+  	__entry->param1, __entry->param2,
+  	dwc3_ep_cmd_status_string(__entry->cmd_status)
+  )
+
+Lifetime of a ``TRB``
+``````````````````````
+
+A ``TRB`` Lifetime is simple. We are either preparing a ``TRB`` or
+completing it. With these two events, we can see how a ``TRB`` changes
+over time. Format is::
+
+  TP_printk("%s: %d/%d trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
+  	__get_str(name), __entry->queued, __entry->allocated,
+  	__entry->trb, __entry->bph, __entry->bpl,
+  	({char *s;
+  	int pcm = ((__entry->size >> 24) & 3) + 1;
+  	switch (__entry->type) {
+  	case USB_ENDPOINT_XFER_INT:
+  	case USB_ENDPOINT_XFER_ISOC:
+  		switch (pcm) {
+  		case 1:
+  			s = "1x ";
+  			break;
+  		case 2:
+  			s = "2x ";
+  			break;
+  		case 3:
+  			s = "3x ";
+  			break;
+  		}
+  	default:
+  		s = "";
+  	} s; }),
+  	DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl,
+  	__entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
+  	__entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
+  	__entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
+  	__entry->ctrl & DWC3_TRB_CTRL_CSP ? 'S' : 's',
+  	__entry->ctrl & DWC3_TRB_CTRL_ISP_IMI ? 'S' : 's',
+  	__entry->ctrl & DWC3_TRB_CTRL_IOC ? 'C' : 'c',
+      dwc3_trb_type_string(DWC3_TRBCTL_TYPE(__entry->ctrl))
+  )  
+
+Lifetime of an Endpoint
+```````````````````````
+
+And endpoint's lifetime is summarized with enable and disable
+operations, both of which can be traced. Format is::
+
+  TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c%c:%c:%c",
+  	__get_str(name), __entry->maxpacket,
+  	__entry->maxpacket_limit, __entry->max_streams,
+  	__entry->maxburst, __entry->trb_enqueue,
+  	__entry->trb_dequeue,
+  	__entry->flags & DWC3_EP_ENABLED ? 'E' : 'e',
+  	__entry->flags & DWC3_EP_STALL ? 'S' : 's',
+  	__entry->flags & DWC3_EP_WEDGE ? 'W' : 'w',
+  	__entry->flags & DWC3_EP_BUSY ? 'B' : 'b',
+  	__entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p',
+  	__entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm',
+  	__entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e',
+  	__entry->direction ? '<' : '>'
+  )
+
+
+Structures, Methods and Definitions
+====================================
+
+.. kernel-doc:: drivers/usb/dwc3/core.h
+   :doc: main data structures
+   :internal:
+
+.. kernel-doc:: drivers/usb/dwc3/gadget.h
+   :doc: gadget-only helpers
+   :internal:
+
+.. kernel-doc:: drivers/usb/dwc3/gadget.c
+   :doc: gadget-side implementation
+   :internal:
+
+.. kernel-doc:: drivers/usb/dwc3/core.c
+   :doc: core driver (probe, PM, etc)
+   :internal:
+   
+.. [#trb] Transfer Request Block
+.. [#link_trb] Transfer Request Block pointing to another Transfer
+	       Request Block.
+.. [#debugfs] The Debug File System
+.. [#configfs] The Config File System
+.. [#cbw] Command Block Wrapper
+.. _Linus' tree: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/
+.. _me: felipe.balbi@linux.intel.com
+.. _linux-usb: linux-usb@vger.kernel.org
diff --git a/Documentation/driver-api/usb/index.rst b/Documentation/driver-api/usb/index.rst
index 1bf64ed..8fe995a 100644
--- a/Documentation/driver-api/usb/index.rst
+++ b/Documentation/driver-api/usb/index.rst
@@ -16,7 +16,10 @@
    persist
    error-codes
    writing_usb_driver
+   dwc3
    writing_musb_glue_layer
+   typec
+   usb3-debug-port
 
 .. only::  subproject and html
 
diff --git a/Documentation/usb/typec.rst b/Documentation/driver-api/usb/typec.rst
similarity index 100%
rename from Documentation/usb/typec.rst
rename to Documentation/driver-api/usb/typec.rst
diff --git a/Documentation/usb/usb3-debug-port.rst b/Documentation/driver-api/usb/usb3-debug-port.rst
similarity index 100%
rename from Documentation/usb/usb3-debug-port.rst
rename to Documentation/driver-api/usb/usb3-debug-port.rst
diff --git a/Documentation/driver-api/w1.rst b/Documentation/driver-api/w1.rst
new file mode 100644
index 0000000..9963cca
--- /dev/null
+++ b/Documentation/driver-api/w1.rst
@@ -0,0 +1,70 @@
+======================
+W1: Dallas' 1-wire bus
+======================
+
+:Author: David Fries
+
+W1 API internal to the kernel
+=============================
+
+W1 API internal to the kernel
+-----------------------------
+
+include/linux/w1.h
+~~~~~~~~~~~~~~~~~~
+
+W1 kernel API functions.
+
+.. kernel-doc:: include/linux/w1.h
+   :internal:
+
+drivers/w1/w1.c
+~~~~~~~~~~~~~~~
+
+W1 core functions.
+
+.. kernel-doc:: drivers/w1/w1.c
+   :internal:
+
+drivers/w1/w1_family.c
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Allows registering device family operations.
+
+.. kernel-doc:: drivers/w1/w1_family.c
+   :export:
+
+drivers/w1/w1_internal.h
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+W1 internal initialization for master devices.
+
+.. kernel-doc:: drivers/w1/w1_internal.h
+   :internal:
+
+drivers/w1/w1_int.c
+~~~~~~~~~~~~~~~~~~~~
+
+W1 internal initialization for master devices.
+
+.. kernel-doc:: drivers/w1/w1_int.c
+   :export:
+
+drivers/w1/w1_netlink.h
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+W1 external netlink API structures and commands.
+
+.. kernel-doc:: drivers/w1/w1_netlink.h
+   :internal:
+
+drivers/w1/w1_io.c
+~~~~~~~~~~~~~~~~~~~
+
+W1 input/output.
+
+.. kernel-doc:: drivers/w1/w1_io.c
+   :export:
+
+.. kernel-doc:: drivers/w1/w1_io.c
+   :internal:
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index e72587f..2d132fc 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -311,6 +311,8 @@
   devm_irq_alloc_desc_at()
   devm_irq_alloc_desc_from()
   devm_irq_alloc_descs_from()
+  devm_irq_alloc_generic_chip()
+  devm_irq_setup_generic_chip()
 
 LED
   devm_led_classdev_register()
@@ -335,7 +337,12 @@
   devm_kzalloc()
 
 MFD
- devm_mfd_add_devices()
+  devm_mfd_add_devices()
+
+MUX
+  devm_mux_chip_alloc()
+  devm_mux_chip_register()
+  devm_mux_control_get()
 
 PER-CPU MEM
   devm_alloc_percpu()
diff --git a/Documentation/fb/api.txt b/Documentation/fb/api.txt
index d4ff7de..d52cf1e 100644
--- a/Documentation/fb/api.txt
+++ b/Documentation/fb/api.txt
@@ -289,12 +289,12 @@
 FOURCC definitions are located in the linux/videodev2.h header. However, and
 despite starting with the V4L2_PIX_FMT_prefix, they are not restricted to V4L2
 and don't require usage of the V4L2 subsystem. FOURCC documentation is
-available in Documentation/DocBook/v4l/pixfmt.xml.
+available in Documentation/media/uapi/v4l/pixfmt.rst.
 
 To select a format, applications set the grayscale field to the desired FOURCC.
 For YUV formats, they should also select the appropriate colorspace by setting
 the colorspace field to one of the colorspaces listed in linux/videodev2.h and
-documented in Documentation/DocBook/v4l/colorspaces.xml.
+documented in Documentation/media/uapi/v4l/colorspaces.rst.
 
 The red, green, blue and transp fields are not used with the FOURCC-based API.
 For forward compatibility reasons applications must zero those fields, and
diff --git a/Documentation/filesystems/autofs4.txt b/Documentation/filesystems/autofs4.txt
index f10dd59..8444dc3 100644
--- a/Documentation/filesystems/autofs4.txt
+++ b/Documentation/filesystems/autofs4.txt
@@ -316,7 +316,7 @@
         struct autofs_v5_packet {
                 int proto_version;                /* Protocol version */
                 int type;                        /* Type of packet */
-                autofs_wqt_t wait_queue_token;
+                autofs_wqt_t wait_queue_entry_token;
                 __u32 dev;
                 __u64 ino;
                 __u32 uid;
@@ -341,12 +341,12 @@
 `O_DIRECT`) to _pipe2(2)_ so that a read from the pipe will return at
 most one packet, and any unread portion of a packet will be discarded.
 
-The `wait_queue_token` is a unique number which can identify a
+The `wait_queue_entry_token` is a unique number which can identify a
 particular request to be acknowledged.  When a message is sent over
 the pipe the affected dentry is marked as either "active" or
 "expiring" and other accesses to it block until the message is
 acknowledged using one of the ioctls below and the relevant
-`wait_queue_token`.
+`wait_queue_entry_token`.
 
 Communicating with autofs: root directory ioctls
 ------------------------------------------------
@@ -358,7 +358,7 @@
 The available ioctl commands are:
 
 - **AUTOFS_IOC_READY**: a notification has been handled.  The argument
-    to the ioctl command is the "wait_queue_token" number
+    to the ioctl command is the "wait_queue_entry_token" number
     corresponding to the notification being acknowledged.
 - **AUTOFS_IOC_FAIL**: similar to above, but indicates failure with
     the error code `ENOENT`.
@@ -382,14 +382,14 @@
         struct autofs_packet_expire_multi {
                 int proto_version;              /* Protocol version */
                 int type;                       /* Type of packet */
-                autofs_wqt_t wait_queue_token;
+                autofs_wqt_t wait_queue_entry_token;
                 int len;
                 char name[NAME_MAX+1];
         };
 
      is required.  This is filled in with the name of something
      that can be unmounted or removed.  If nothing can be expired,
-     `errno` is set to `EAGAIN`.  Even though a `wait_queue_token`
+     `errno` is set to `EAGAIN`.  Even though a `wait_queue_entry_token`
      is present in the structure, no "wait queue" is established
      and no acknowledgment is needed.
 - **AUTOFS_IOC_EXPIRE_MULTI**:  This is similar to
diff --git a/Documentation/filesystems/conf.py b/Documentation/filesystems/conf.py
new file mode 100644
index 0000000..ea44172
--- /dev/null
+++ b/Documentation/filesystems/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "Linux Filesystems API"
+
+tags.add("subproject")
+
+latex_documents = [
+    ('index', 'filesystems.tex', project,
+     'The kernel development community', 'manual'),
+]
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
new file mode 100644
index 0000000..256e10e
--- /dev/null
+++ b/Documentation/filesystems/index.rst
@@ -0,0 +1,317 @@
+=====================
+Linux Filesystems API
+=====================
+
+The Linux VFS
+=============
+
+The Filesystem types
+--------------------
+
+.. kernel-doc:: include/linux/fs.h
+   :internal:
+
+The Directory Cache
+-------------------
+
+.. kernel-doc:: fs/dcache.c
+   :export:
+
+.. kernel-doc:: include/linux/dcache.h
+   :internal:
+
+Inode Handling
+--------------
+
+.. kernel-doc:: fs/inode.c
+   :export:
+
+.. kernel-doc:: fs/bad_inode.c
+   :export:
+
+Registration and Superblocks
+----------------------------
+
+.. kernel-doc:: fs/super.c
+   :export:
+
+File Locks
+----------
+
+.. kernel-doc:: fs/locks.c
+   :export:
+
+.. kernel-doc:: fs/locks.c
+   :internal:
+
+Other Functions
+---------------
+
+.. kernel-doc:: fs/mpage.c
+   :export:
+
+.. kernel-doc:: fs/namei.c
+   :export:
+
+.. kernel-doc:: fs/buffer.c
+   :export:
+
+.. kernel-doc:: block/bio.c
+   :export:
+
+.. kernel-doc:: fs/seq_file.c
+   :export:
+
+.. kernel-doc:: fs/filesystems.c
+   :export:
+
+.. kernel-doc:: fs/fs-writeback.c
+   :export:
+
+.. kernel-doc:: fs/block_dev.c
+   :export:
+
+The proc filesystem
+===================
+
+sysctl interface
+----------------
+
+.. kernel-doc:: kernel/sysctl.c
+   :export:
+
+proc filesystem interface
+-------------------------
+
+.. kernel-doc:: fs/proc/base.c
+   :internal:
+
+Events based on file descriptors
+================================
+
+.. kernel-doc:: fs/eventfd.c
+   :export:
+
+The Filesystem for Exporting Kernel Objects
+===========================================
+
+.. kernel-doc:: fs/sysfs/file.c
+   :export:
+
+.. kernel-doc:: fs/sysfs/symlink.c
+   :export:
+
+The debugfs filesystem
+======================
+
+debugfs interface
+-----------------
+
+.. kernel-doc:: fs/debugfs/inode.c
+   :export:
+
+.. kernel-doc:: fs/debugfs/file.c
+   :export:
+
+The Linux Journalling API
+=========================
+
+Overview
+--------
+
+Details
+~~~~~~~
+
+The journalling layer is easy to use. You need to first of all create a
+journal_t data structure. There are two calls to do this dependent on
+how you decide to allocate the physical media on which the journal
+resides. The :c:func:`jbd2_journal_init_inode` call is for journals stored in
+filesystem inodes, or the :c:func:`jbd2_journal_init_dev` call can be used
+for journal stored on a raw device (in a continuous range of blocks). A
+journal_t is a typedef for a struct pointer, so when you are finally
+finished make sure you call :c:func:`jbd2_journal_destroy` on it to free up
+any used kernel memory.
+
+Once you have got your journal_t object you need to 'mount' or load the
+journal file. The journalling layer expects the space for the journal
+was already allocated and initialized properly by the userspace tools.
+When loading the journal you must call :c:func:`jbd2_journal_load` to process
+journal contents. If the client file system detects the journal contents
+does not need to be processed (or even need not have valid contents), it
+may call :c:func:`jbd2_journal_wipe` to clear the journal contents before
+calling :c:func:`jbd2_journal_load`.
+
+Note that jbd2_journal_wipe(..,0) calls
+:c:func:`jbd2_journal_skip_recovery` for you if it detects any outstanding
+transactions in the journal and similarly :c:func:`jbd2_journal_load` will
+call :c:func:`jbd2_journal_recover` if necessary. I would advise reading
+:c:func:`ext4_load_journal` in fs/ext4/super.c for examples on this stage.
+
+Now you can go ahead and start modifying the underlying filesystem.
+Almost.
+
+You still need to actually journal your filesystem changes, this is done
+by wrapping them into transactions. Additionally you also need to wrap
+the modification of each of the buffers with calls to the journal layer,
+so it knows what the modifications you are actually making are. To do
+this use :c:func:`jbd2_journal_start` which returns a transaction handle.
+
+:c:func:`jbd2_journal_start` and its counterpart :c:func:`jbd2_journal_stop`,
+which indicates the end of a transaction are nestable calls, so you can
+reenter a transaction if necessary, but remember you must call
+:c:func:`jbd2_journal_stop` the same number of times as
+:c:func:`jbd2_journal_start` before the transaction is completed (or more
+accurately leaves the update phase). Ext4/VFS makes use of this feature to
+simplify handling of inode dirtying, quota support, etc.
+
+Inside each transaction you need to wrap the modifications to the
+individual buffers (blocks). Before you start to modify a buffer you
+need to call :c:func:`jbd2_journal_get_create_access()` /
+:c:func:`jbd2_journal_get_write_access()` /
+:c:func:`jbd2_journal_get_undo_access()` as appropriate, this allows the
+journalling layer to copy the unmodified
+data if it needs to. After all the buffer may be part of a previously
+uncommitted transaction. At this point you are at last ready to modify a
+buffer, and once you are have done so you need to call
+:c:func:`jbd2_journal_dirty_metadata`. Or if you've asked for access to a
+buffer you now know is now longer required to be pushed back on the
+device you can call :c:func:`jbd2_journal_forget` in much the same way as you
+might have used :c:func:`bforget` in the past.
+
+A :c:func:`jbd2_journal_flush` may be called at any time to commit and
+checkpoint all your transactions.
+
+Then at umount time , in your :c:func:`put_super` you can then call
+:c:func:`jbd2_journal_destroy` to clean up your in-core journal object.
+
+Unfortunately there a couple of ways the journal layer can cause a
+deadlock. The first thing to note is that each task can only have a
+single outstanding transaction at any one time, remember nothing commits
+until the outermost :c:func:`jbd2_journal_stop`. This means you must complete
+the transaction at the end of each file/inode/address etc. operation you
+perform, so that the journalling system isn't re-entered on another
+journal. Since transactions can't be nested/batched across differing
+journals, and another filesystem other than yours (say ext4) may be
+modified in a later syscall.
+
+The second case to bear in mind is that :c:func:`jbd2_journal_start` can block
+if there isn't enough space in the journal for your transaction (based
+on the passed nblocks param) - when it blocks it merely(!) needs to wait
+for transactions to complete and be committed from other tasks, so
+essentially we are waiting for :c:func:`jbd2_journal_stop`. So to avoid
+deadlocks you must treat :c:func:`jbd2_journal_start` /
+:c:func:`jbd2_journal_stop` as if they were semaphores and include them in
+your semaphore ordering rules to prevent
+deadlocks. Note that :c:func:`jbd2_journal_extend` has similar blocking
+behaviour to :c:func:`jbd2_journal_start` so you can deadlock here just as
+easily as on :c:func:`jbd2_journal_start`.
+
+Try to reserve the right number of blocks the first time. ;-). This will
+be the maximum number of blocks you are going to touch in this
+transaction. I advise having a look at at least ext4_jbd.h to see the
+basis on which ext4 uses to make these decisions.
+
+Another wriggle to watch out for is your on-disk block allocation
+strategy. Why? Because, if you do a delete, you need to ensure you
+haven't reused any of the freed blocks until the transaction freeing
+these blocks commits. If you reused these blocks and crash happens,
+there is no way to restore the contents of the reallocated blocks at the
+end of the last fully committed transaction. One simple way of doing
+this is to mark blocks as free in internal in-memory block allocation
+structures only after the transaction freeing them commits. Ext4 uses
+journal commit callback for this purpose.
+
+With journal commit callbacks you can ask the journalling layer to call
+a callback function when the transaction is finally committed to disk,
+so that you can do some of your own management. You ask the journalling
+layer for calling the callback by simply setting
+``journal->j_commit_callback`` function pointer and that function is
+called after each transaction commit. You can also use
+``transaction->t_private_list`` for attaching entries to a transaction
+that need processing when the transaction commits.
+
+JBD2 also provides a way to block all transaction updates via
+:c:func:`jbd2_journal_lock_updates()` /
+:c:func:`jbd2_journal_unlock_updates()`. Ext4 uses this when it wants a
+window with a clean and stable fs for a moment. E.g.
+
+::
+
+
+        jbd2_journal_lock_updates() //stop new stuff happening..
+        jbd2_journal_flush()        // checkpoint everything.
+        ..do stuff on stable fs
+        jbd2_journal_unlock_updates() // carry on with filesystem use.
+
+The opportunities for abuse and DOS attacks with this should be obvious,
+if you allow unprivileged userspace to trigger codepaths containing
+these calls.
+
+Summary
+~~~~~~~
+
+Using the journal is a matter of wrapping the different context changes,
+being each mount, each modification (transaction) and each changed
+buffer to tell the journalling layer about them.
+
+Data Types
+----------
+
+The journalling layer uses typedefs to 'hide' the concrete definitions
+of the structures used. As a client of the JBD2 layer you can just rely
+on the using the pointer as a magic cookie of some sort. Obviously the
+hiding is not enforced as this is 'C'.
+
+Structures
+~~~~~~~~~~
+
+.. kernel-doc:: include/linux/jbd2.h
+   :internal:
+
+Functions
+---------
+
+The functions here are split into two groups those that affect a journal
+as a whole, and those which are used to manage transactions
+
+Journal Level
+~~~~~~~~~~~~~
+
+.. kernel-doc:: fs/jbd2/journal.c
+   :export:
+
+.. kernel-doc:: fs/jbd2/recovery.c
+   :internal:
+
+Transasction Level
+~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: fs/jbd2/transaction.c
+
+See also
+--------
+
+`Journaling the Linux ext2fs Filesystem, LinuxExpo 98, Stephen
+Tweedie <http://kernel.org/pub/linux/kernel/people/sct/ext3/journal-design.ps.gz>`__
+
+`Ext3 Journalling FileSystem, OLS 2000, Dr. Stephen
+Tweedie <http://olstrans.sourceforge.net/release/OLS2000-ext3/OLS2000-ext3.html>`__
+
+splice API
+==========
+
+splice is a method for moving blocks of data around inside the kernel,
+without continually transferring them between the kernel and user space.
+
+.. kernel-doc:: fs/splice.c
+
+pipes API
+=========
+
+Pipe interfaces are all for in-kernel (builtin image) use. They are not
+exported for use by modules.
+
+.. kernel-doc:: include/linux/pipe_fs_i.h
+   :internal:
+
+.. kernel-doc:: fs/pipe.c
diff --git a/Documentation/filesystems/nfs/idmapper.txt b/Documentation/filesystems/nfs/idmapper.txt
index fe03d10..b86831a 100644
--- a/Documentation/filesystems/nfs/idmapper.txt
+++ b/Documentation/filesystems/nfs/idmapper.txt
@@ -55,7 +55,7 @@
 this case, /some/other/program will handle all uid lookups and
 /usr/sbin/nfs.idmap will handle gid, user, and group lookups.
 
-See <file:Documentation/security/keys-request-key.txt> for more information
+See <file:Documentation/security/keys/request-key.rst> for more information
 about the request-key function.
 
 
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 1bdb735..6162d0e9 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -228,7 +228,7 @@
 task would be to clean up interfaces like moving functions around between
 files to better group them and improving the interfaces like dropping return
 values for functions that never fail. Then write kerneldoc for all exported
-functions and an overview section and integrate it all into the drm DocBook.
+functions and an overview section and integrate it all into the drm book.
 
 See https://dri.freedesktop.org/docs/drm/ for what's there already.
 
diff --git a/Documentation/hwmon/ads1015 b/Documentation/hwmon/ads1015
index 063b80d..02d2a45 100644
--- a/Documentation/hwmon/ads1015
+++ b/Documentation/hwmon/ads1015
@@ -40,7 +40,7 @@
 Platform Data
 -------------
 
-In linux/i2c/ads1015.h platform data is defined, channel_data contains
+In linux/platform_data/ads1015.h platform data is defined, channel_data contains
 configuration data for the used input combinations:
 - pga is the programmable gain amplifier (values are full scale)
   0: +/- 6.144 V
diff --git a/Documentation/hwmon/adt7475 b/Documentation/hwmon/adt7475
index 0502f2b..09d73a1 100644
--- a/Documentation/hwmon/adt7475
+++ b/Documentation/hwmon/adt7475
@@ -109,6 +109,15 @@
 Fan speed may be set to maximum when the temperature sensor associated with
 the PWM control exceeds temp#_max.
 
+At Tmin - hysteresis the PWM output can either be off (0% duty cycle) or at the
+minimum (i.e. auto_point1_pwm). This behaviour can be configured using the
+pwm[1-*]_stall_disable sysfs attribute. A value of 0 means the fans will shut
+off. A value of 1 means the fans will run at auto_point1_pwm.
+
+The responsiveness of the ADT747x to temperature changes can be configured.
+This allows smoothing of the fan speed transition. To set the transition time
+set the value in ms in the temp[1-*]_smoothing sysfs attribute.
+
 Notes
 -----
 
diff --git a/Documentation/hwmon/ir35221 b/Documentation/hwmon/ir35221
new file mode 100644
index 0000000..f7e1127
--- /dev/null
+++ b/Documentation/hwmon/ir35221
@@ -0,0 +1,87 @@
+Kernel driver ir35221
+=====================
+
+Supported chips:
+  * Infinion IR35221
+    Prefix: 'ir35221'
+    Addresses scanned: -
+    Datasheet: Datasheet is not publicly available.
+
+Author: Samuel Mendoza-Jonas <sam@mendozajonas.com>
+
+
+Description
+-----------
+
+IR35221 is a Digital DC-DC Multiphase Converter
+
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+Example: the following commands will load the driver for an IR35221
+at address 0x70 on I2C bus #4:
+
+# modprobe ir35221
+# echo ir35221 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
+
+
+Sysfs attributes
+----------------
+
+curr1_label		"iin"
+curr1_input		Measured input current
+curr1_max		Maximum current
+curr1_max_alarm		Current high alarm
+
+curr[2-3]_label		"iout[1-2]"
+curr[2-3]_input		Measured output current
+curr[2-3]_crit		Critical maximum current
+curr[2-3]_crit_alarm	Current critical high alarm
+curr[2-3]_highest	Highest output current
+curr[2-3]_lowest	Lowest output current
+curr[2-3]_max		Maximum current
+curr[2-3]_max_alarm	Current high alarm
+
+in1_label		"vin"
+in1_input		Measured input voltage
+in1_crit		Critical maximum input voltage
+in1_crit_alarm		Input voltage critical high alarm
+in1_highest		Highest input voltage
+in1_lowest		Lowest input voltage
+in1_min			Minimum input voltage
+in1_min_alarm		Input voltage low alarm
+
+in[2-3]_label		"vout[1-2]"
+in[2-3]_input		Measured output voltage
+in[2-3]_lcrit		Critical minimum output voltage
+in[2-3]_lcrit_alarm	Output voltage critical low alarm
+in[2-3]_crit		Critical maximum output voltage
+in[2-3]_crit_alarm	Output voltage critical high alarm
+in[2-3]_highest		Highest output voltage
+in[2-3]_lowest		Lowest output voltage
+in[2-3]_max		Maximum output voltage
+in[2-3]_max_alarm	Output voltage high alarm
+in[2-3]_min		Minimum output voltage
+in[2-3]_min_alarm	Output voltage low alarm
+
+power1_label		"pin"
+power1_input		Measured input power
+power1_alarm		Input power high alarm
+power1_max		Input power limit
+
+power[2-3]_label	"pout[1-2]"
+power[2-3]_input	Measured output power
+power[2-3]_max		Output power limit
+power[2-3]_max_alarm	Output power high alarm
+
+temp[1-2]_input		Measured temperature
+temp[1-2]_crit		Critical high temperature
+temp[1-2]_crit_alarm	Chip temperature critical high alarm
+temp[1-2]_highest	Highest temperature
+temp[1-2]_lowest	Lowest temperature
+temp[1-2]_max		Maximum temperature
+temp[1-2]_max_alarm	Chip temperature high alarm
diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245
index b478b08..4ca7a9d 100644
--- a/Documentation/hwmon/ltc4245
+++ b/Documentation/hwmon/ltc4245
@@ -96,7 +96,7 @@
 the sensor reading.
 
 The LTC4245 chip can be configured to sample all GPIO pins with two methods:
-1) platform data -- see include/linux/i2c/ltc4245.h
+1) platform data -- see include/linux/platform_data/ltc4245.h
 2) OF device tree -- add the "ltc4245,use-extra-gpios" property to each chip
 
 The default mode of operation is to sample a single GPIO pin.
diff --git a/Documentation/hwmon/pmbus-core b/Documentation/hwmon/pmbus-core
index 31e4720..8ed10e9 100644
--- a/Documentation/hwmon/pmbus-core
+++ b/Documentation/hwmon/pmbus-core
@@ -253,7 +253,7 @@
 PMBus driver platform data
 ==========================
 
-PMBus platform data is defined in include/linux/i2c/pmbus.h. Platform data
+PMBus platform data is defined in include/linux/pmbus.h. Platform data
 currently only provides a flag field with a single bit used.
 
 #define PMBUS_SKIP_STATUS_CHECK (1 << 0)
diff --git a/Documentation/index.rst b/Documentation/index.rst
index bc67dbf..cb7f1ba 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -3,8 +3,8 @@
    You can adapt this file completely to your liking, but it should at least
    contain the root `toctree` directive.
 
-Welcome to The Linux Kernel's documentation
-===========================================
+The Linux Kernel documentation
+==============================
 
 This is the top level of the kernel's documentation tree.  Kernel
 documentation, like the kernel itself, is very much a work in progress;
@@ -51,6 +51,7 @@
    process/index
    dev-tools/index
    doc-guide/index
+   kernel-hacking/index
 
 Kernel API documentation
 ------------------------
@@ -67,11 +68,24 @@
    driver-api/index
    core-api/index
    media/index
+   networking/index
    input/index
    gpu/index
    security/index
    sound/index
    crypto/index
+   filesystems/index
+
+Architecture-specific documentation
+-----------------------------------
+
+These books provide programming details about architecture-specific
+implementation.
+
+.. toctree::
+   :maxdepth: 2
+
+   sh/index
 
 Korean translations
 -------------------
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index e18daca..659afd5 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1331,7 +1331,7 @@
 	--- 7.5 mandatory-y
 
 	mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm
-	to define the minimun set of headers that must be exported in
+	to define the minimum set of headers that must be exported in
 	include/asm.
 
 	The convention is to list one subdir per line and
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index 104740e..c23e2c5 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -17,8 +17,8 @@
 It is documented in this Documentation/kernel-doc-nano-HOWTO.txt file.
 
 This style embeds the documentation within the source files, using
-a few simple conventions.  The scripts/kernel-doc perl script, some
-SGML templates in Documentation/DocBook, and other tools understand
+a few simple conventions.  The scripts/kernel-doc perl script, the
+Documentation/sphinx/kerneldoc.py Sphinx extension and other tools understand
 these conventions, and are used to extract this embedded documentation
 into various documents.
 
@@ -122,15 +122,9 @@
 - scripts/kernel-doc
 
   This is a perl script that hunts for the block comments and can mark
-  them up directly into DocBook, man, text, and HTML. (No, not
+  them up directly into DocBook, ReST, man, text, and HTML. (No, not
   texinfo.)
 
-- Documentation/DocBook/*.tmpl
-
-  These are SGML template files, which are normal SGML files with
-  special place-holders for where the extracted documentation should
-  go.
-
 - scripts/docproc.c
 
   This is a program for converting SGML template files into SGML
@@ -145,25 +139,18 @@
 
 - Makefile
 
-  The targets 'xmldocs', 'psdocs', 'pdfdocs', and 'htmldocs' are used
-  to build XML DocBook files, PostScript files, PDF files, and html files
-  in Documentation/DocBook. The older target 'sgmldocs' is equivalent
-  to 'xmldocs'.
-
-- Documentation/DocBook/Makefile
-
-  This is where C files are associated with SGML templates.
-
+  The targets 'xmldocs', 'latexdocs', 'pdfdocs', 'epubdocs'and 'htmldocs'
+  are used to build XML DocBook files, LaTeX files, PDF files,
+  ePub files and html files in Documentation/.
 
 How to extract the documentation
 --------------------------------
 
 If you just want to read the ready-made books on the various
-subsystems (see Documentation/DocBook/*.tmpl), just type 'make
-psdocs', or 'make pdfdocs', or 'make htmldocs', depending on your
-preference.  If you would rather read a different format, you can type
-'make xmldocs' and then use DocBook tools to convert
-Documentation/DocBook/*.xml to a format of your choice (for example,
+subsystems, just type 'make epubdocs', or 'make pdfdocs', or 'make htmldocs',
+depending on your preference.  If you would rather read a different format,
+you can type 'make xmldocs' and then use DocBook tools to convert
+Documentation/output/*.xml to a format of your choice (for example,
 'db2html ...' if 'make htmldocs' was not defined).
 
 If you want to see man pages instead, you can do this:
@@ -329,37 +316,7 @@
  * hardware, software, or its subject(s).
  */
 
-DOC: sections are used in SGML templates files as indicated below.
-
-
-How to make new SGML template files
------------------------------------
-
-SGML template files (*.tmpl) are like normal SGML files, except that
-they can contain escape sequences where extracted documentation should
-be inserted.
-
-!E<filename> is replaced by the documentation, in <filename>, for
-functions that are exported using EXPORT_SYMBOL: the function list is
-collected from files listed in Documentation/DocBook/Makefile.
-
-!I<filename> is replaced by the documentation for functions that are
-_not_ exported using EXPORT_SYMBOL.
-
-!D<filename> is used to name additional files to search for functions
-exported using EXPORT_SYMBOL.
-
-!F<filename> <function [functions...]> is replaced by the
-documentation, in <filename>, for the functions listed.
-
-!P<filename> <section title> is replaced by the contents of the DOC:
-section titled <section title> from <filename>.
-Spaces are allowed in <section title>; do not quote the <section title>.
-
-!C<filename> is replaced by nothing, but makes the tools check that
-all DOC: sections and documented functions, symbols, etc. are used.
-This makes sense to use when you use !F/!P only and want to verify
-that all documentation is included.
+DOC: sections are used in ReST files.
 
 Tim.
 */ <twaugh@redhat.com>
diff --git a/Documentation/kernel-hacking/conf.py b/Documentation/kernel-hacking/conf.py
new file mode 100644
index 0000000..3d8acf0
--- /dev/null
+++ b/Documentation/kernel-hacking/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "Kernel Hacking Guides"
+
+tags.add("subproject")
+
+latex_documents = [
+    ('index', 'kernel-hacking.tex', project,
+     'The kernel development community', 'manual'),
+]
diff --git a/Documentation/kernel-hacking/hacking.rst b/Documentation/kernel-hacking/hacking.rst
new file mode 100644
index 0000000..daf3883
--- /dev/null
+++ b/Documentation/kernel-hacking/hacking.rst
@@ -0,0 +1,811 @@
+============================================
+Unreliable Guide To Hacking The Linux Kernel
+============================================
+
+:Author: Rusty Russell
+
+Introduction
+============
+
+Welcome, gentle reader, to Rusty's Remarkably Unreliable Guide to Linux
+Kernel Hacking. This document describes the common routines and general
+requirements for kernel code: its goal is to serve as a primer for Linux
+kernel development for experienced C programmers. I avoid implementation
+details: that's what the code is for, and I ignore whole tracts of
+useful routines.
+
+Before you read this, please understand that I never wanted to write
+this document, being grossly under-qualified, but I always wanted to
+read it, and this was the only way. I hope it will grow into a
+compendium of best practice, common starting points and random
+information.
+
+The Players
+===========
+
+At any time each of the CPUs in a system can be:
+
+-  not associated with any process, serving a hardware interrupt;
+
+-  not associated with any process, serving a softirq or tasklet;
+
+-  running in kernel space, associated with a process (user context);
+
+-  running a process in user space.
+
+There is an ordering between these. The bottom two can preempt each
+other, but above that is a strict hierarchy: each can only be preempted
+by the ones above it. For example, while a softirq is running on a CPU,
+no other softirq will preempt it, but a hardware interrupt can. However,
+any other CPUs in the system execute independently.
+
+We'll see a number of ways that the user context can block interrupts,
+to become truly non-preemptable.
+
+User Context
+------------
+
+User context is when you are coming in from a system call or other trap:
+like userspace, you can be preempted by more important tasks and by
+interrupts. You can sleep, by calling :c:func:`schedule()`.
+
+.. note::
+
+    You are always in user context on module load and unload, and on
+    operations on the block device layer.
+
+In user context, the ``current`` pointer (indicating the task we are
+currently executing) is valid, and :c:func:`in_interrupt()`
+(``include/linux/preempt.h``) is false.
+
+.. warning::
+
+    Beware that if you have preemption or softirqs disabled (see below),
+    :c:func:`in_interrupt()` will return a false positive.
+
+Hardware Interrupts (Hard IRQs)
+-------------------------------
+
+Timer ticks, network cards and keyboard are examples of real hardware
+which produce interrupts at any time. The kernel runs interrupt
+handlers, which services the hardware. The kernel guarantees that this
+handler is never re-entered: if the same interrupt arrives, it is queued
+(or dropped). Because it disables interrupts, this handler has to be
+fast: frequently it simply acknowledges the interrupt, marks a 'software
+interrupt' for execution and exits.
+
+You can tell you are in a hardware interrupt, because
+:c:func:`in_irq()` returns true.
+
+.. warning::
+
+    Beware that this will return a false positive if interrupts are
+    disabled (see below).
+
+Software Interrupt Context: Softirqs and Tasklets
+-------------------------------------------------
+
+Whenever a system call is about to return to userspace, or a hardware
+interrupt handler exits, any 'software interrupts' which are marked
+pending (usually by hardware interrupts) are run (``kernel/softirq.c``).
+
+Much of the real interrupt handling work is done here. Early in the
+transition to SMP, there were only 'bottom halves' (BHs), which didn't
+take advantage of multiple CPUs. Shortly after we switched from wind-up
+computers made of match-sticks and snot, we abandoned this limitation
+and switched to 'softirqs'.
+
+``include/linux/interrupt.h`` lists the different softirqs. A very
+important softirq is the timer softirq (``include/linux/timer.h``): you
+can register to have it call functions for you in a given length of
+time.
+
+Softirqs are often a pain to deal with, since the same softirq will run
+simultaneously on more than one CPU. For this reason, tasklets
+(``include/linux/interrupt.h``) are more often used: they are
+dynamically-registrable (meaning you can have as many as you want), and
+they also guarantee that any tasklet will only run on one CPU at any
+time, although different tasklets can run simultaneously.
+
+.. warning::
+
+    The name 'tasklet' is misleading: they have nothing to do with
+    'tasks', and probably more to do with some bad vodka Alexey
+    Kuznetsov had at the time.
+
+You can tell you are in a softirq (or tasklet) using the
+:c:func:`in_softirq()` macro (``include/linux/preempt.h``).
+
+.. warning::
+
+    Beware that this will return a false positive if a
+    :ref:`botton half lock <local_bh_disable>` is held.
+
+Some Basic Rules
+================
+
+No memory protection
+    If you corrupt memory, whether in user context or interrupt context,
+    the whole machine will crash. Are you sure you can't do what you
+    want in userspace?
+
+No floating point or MMX
+    The FPU context is not saved; even in user context the FPU state
+    probably won't correspond with the current process: you would mess
+    with some user process' FPU state. If you really want to do this,
+    you would have to explicitly save/restore the full FPU state (and
+    avoid context switches). It is generally a bad idea; use fixed point
+    arithmetic first.
+
+A rigid stack limit
+    Depending on configuration options the kernel stack is about 3K to
+    6K for most 32-bit architectures: it's about 14K on most 64-bit
+    archs, and often shared with interrupts so you can't use it all.
+    Avoid deep recursion and huge local arrays on the stack (allocate
+    them dynamically instead).
+
+The Linux kernel is portable
+    Let's keep it that way. Your code should be 64-bit clean, and
+    endian-independent. You should also minimize CPU specific stuff,
+    e.g. inline assembly should be cleanly encapsulated and minimized to
+    ease porting. Generally it should be restricted to the
+    architecture-dependent part of the kernel tree.
+
+ioctls: Not writing a new system call
+=====================================
+
+A system call generally looks like this::
+
+    asmlinkage long sys_mycall(int arg)
+    {
+            return 0;
+    }
+
+
+First, in most cases you don't want to create a new system call. You
+create a character device and implement an appropriate ioctl for it.
+This is much more flexible than system calls, doesn't have to be entered
+in every architecture's ``include/asm/unistd.h`` and
+``arch/kernel/entry.S`` file, and is much more likely to be accepted by
+Linus.
+
+If all your routine does is read or write some parameter, consider
+implementing a :c:func:`sysfs()` interface instead.
+
+Inside the ioctl you're in user context to a process. When a error
+occurs you return a negated errno (see
+``include/uapi/asm-generic/errno-base.h``,
+``include/uapi/asm-generic/errno.h`` and ``include/linux/errno.h``),
+otherwise you return 0.
+
+After you slept you should check if a signal occurred: the Unix/Linux
+way of handling signals is to temporarily exit the system call with the
+``-ERESTARTSYS`` error. The system call entry code will switch back to
+user context, process the signal handler and then your system call will
+be restarted (unless the user disabled that). So you should be prepared
+to process the restart, e.g. if you're in the middle of manipulating
+some data structure.
+
+::
+
+    if (signal_pending(current))
+            return -ERESTARTSYS;
+
+
+If you're doing longer computations: first think userspace. If you
+**really** want to do it in kernel you should regularly check if you need
+to give up the CPU (remember there is cooperative multitasking per CPU).
+Idiom::
+
+    cond_resched(); /* Will sleep */
+
+
+A short note on interface design: the UNIX system call motto is "Provide
+mechanism not policy".
+
+Recipes for Deadlock
+====================
+
+You cannot call any routines which may sleep, unless:
+
+-  You are in user context.
+
+-  You do not own any spinlocks.
+
+-  You have interrupts enabled (actually, Andi Kleen says that the
+   scheduling code will enable them for you, but that's probably not
+   what you wanted).
+
+Note that some functions may sleep implicitly: common ones are the user
+space access functions (\*_user) and memory allocation functions
+without ``GFP_ATOMIC``.
+
+You should always compile your kernel ``CONFIG_DEBUG_ATOMIC_SLEEP`` on,
+and it will warn you if you break these rules. If you **do** break the
+rules, you will eventually lock up your box.
+
+Really.
+
+Common Routines
+===============
+
+:c:func:`printk()`
+------------------
+
+Defined in ``include/linux/printk.h``
+
+:c:func:`printk()` feeds kernel messages to the console, dmesg, and
+the syslog daemon. It is useful for debugging and reporting errors, and
+can be used inside interrupt context, but use with caution: a machine
+which has its console flooded with printk messages is unusable. It uses
+a format string mostly compatible with ANSI C printf, and C string
+concatenation to give it a first "priority" argument::
+
+    printk(KERN_INFO "i = %u\n", i);
+
+
+See ``include/linux/kern_levels.h``; for other ``KERN_`` values; these are
+interpreted by syslog as the level. Special case: for printing an IP
+address use::
+
+    __be32 ipaddress;
+    printk(KERN_INFO "my ip: %pI4\n", &ipaddress);
+
+
+:c:func:`printk()` internally uses a 1K buffer and does not catch
+overruns. Make sure that will be enough.
+
+.. note::
+
+    You will know when you are a real kernel hacker when you start
+    typoing printf as printk in your user programs :)
+
+.. note::
+
+    Another sidenote: the original Unix Version 6 sources had a comment
+    on top of its printf function: "Printf should not be used for
+    chit-chat". You should follow that advice.
+
+:c:func:`copy_to_user()` / :c:func:`copy_from_user()` / :c:func:`get_user()` / :c:func:`put_user()`
+---------------------------------------------------------------------------------------------------
+
+Defined in ``include/linux/uaccess.h`` / ``asm/uaccess.h``
+
+**[SLEEPS]**
+
+:c:func:`put_user()` and :c:func:`get_user()` are used to get
+and put single values (such as an int, char, or long) from and to
+userspace. A pointer into userspace should never be simply dereferenced:
+data should be copied using these routines. Both return ``-EFAULT`` or
+0.
+
+:c:func:`copy_to_user()` and :c:func:`copy_from_user()` are
+more general: they copy an arbitrary amount of data to and from
+userspace.
+
+.. warning::
+
+    Unlike :c:func:`put_user()` and :c:func:`get_user()`, they
+    return the amount of uncopied data (ie. 0 still means success).
+
+[Yes, this moronic interface makes me cringe. The flamewar comes up
+every year or so. --RR.]
+
+The functions may sleep implicitly. This should never be called outside
+user context (it makes no sense), with interrupts disabled, or a
+spinlock held.
+
+:c:func:`kmalloc()`/:c:func:`kfree()`
+-------------------------------------
+
+Defined in ``include/linux/slab.h``
+
+**[MAY SLEEP: SEE BELOW]**
+
+These routines are used to dynamically request pointer-aligned chunks of
+memory, like malloc and free do in userspace, but
+:c:func:`kmalloc()` takes an extra flag word. Important values:
+
+``GFP_KERNEL``
+    May sleep and swap to free memory. Only allowed in user context, but
+    is the most reliable way to allocate memory.
+
+``GFP_ATOMIC``
+    Don't sleep. Less reliable than ``GFP_KERNEL``, but may be called
+    from interrupt context. You should **really** have a good
+    out-of-memory error-handling strategy.
+
+``GFP_DMA``
+    Allocate ISA DMA lower than 16MB. If you don't know what that is you
+    don't need it. Very unreliable.
+
+If you see a sleeping function called from invalid context warning
+message, then maybe you called a sleeping allocation function from
+interrupt context without ``GFP_ATOMIC``. You should really fix that.
+Run, don't walk.
+
+If you are allocating at least ``PAGE_SIZE`` (``asm/page.h`` or
+``asm/page_types.h``) bytes, consider using :c:func:`__get_free_pages()`
+(``include/linux/gfp.h``). It takes an order argument (0 for page sized,
+1 for double page, 2 for four pages etc.) and the same memory priority
+flag word as above.
+
+If you are allocating more than a page worth of bytes you can use
+:c:func:`vmalloc()`. It'll allocate virtual memory in the kernel
+map. This block is not contiguous in physical memory, but the MMU makes
+it look like it is for you (so it'll only look contiguous to the CPUs,
+not to external device drivers). If you really need large physically
+contiguous memory for some weird device, you have a problem: it is
+poorly supported in Linux because after some time memory fragmentation
+in a running kernel makes it hard. The best way is to allocate the block
+early in the boot process via the :c:func:`alloc_bootmem()`
+routine.
+
+Before inventing your own cache of often-used objects consider using a
+slab cache in ``include/linux/slab.h``
+
+:c:func:`current()`
+-------------------
+
+Defined in ``include/asm/current.h``
+
+This global variable (really a macro) contains a pointer to the current
+task structure, so is only valid in user context. For example, when a
+process makes a system call, this will point to the task structure of
+the calling process. It is **not NULL** in interrupt context.
+
+:c:func:`mdelay()`/:c:func:`udelay()`
+-------------------------------------
+
+Defined in ``include/asm/delay.h`` / ``include/linux/delay.h``
+
+The :c:func:`udelay()` and :c:func:`ndelay()` functions can be
+used for small pauses. Do not use large values with them as you risk
+overflow - the helper function :c:func:`mdelay()` is useful here, or
+consider :c:func:`msleep()`.
+
+:c:func:`cpu_to_be32()`/:c:func:`be32_to_cpu()`/:c:func:`cpu_to_le32()`/:c:func:`le32_to_cpu()`
+-----------------------------------------------------------------------------------------------
+
+Defined in ``include/asm/byteorder.h``
+
+The :c:func:`cpu_to_be32()` family (where the "32" can be replaced
+by 64 or 16, and the "be" can be replaced by "le") are the general way
+to do endian conversions in the kernel: they return the converted value.
+All variations supply the reverse as well:
+:c:func:`be32_to_cpu()`, etc.
+
+There are two major variations of these functions: the pointer
+variation, such as :c:func:`cpu_to_be32p()`, which take a pointer
+to the given type, and return the converted value. The other variation
+is the "in-situ" family, such as :c:func:`cpu_to_be32s()`, which
+convert value referred to by the pointer, and return void.
+
+:c:func:`local_irq_save()`/:c:func:`local_irq_restore()`
+--------------------------------------------------------
+
+Defined in ``include/linux/irqflags.h``
+
+These routines disable hard interrupts on the local CPU, and restore
+them. They are reentrant; saving the previous state in their one
+``unsigned long flags`` argument. If you know that interrupts are
+enabled, you can simply use :c:func:`local_irq_disable()` and
+:c:func:`local_irq_enable()`.
+
+.. _local_bh_disable:
+
+:c:func:`local_bh_disable()`/:c:func:`local_bh_enable()`
+--------------------------------------------------------
+
+Defined in ``include/linux/bottom_half.h``
+
+
+These routines disable soft interrupts on the local CPU, and restore
+them. They are reentrant; if soft interrupts were disabled before, they
+will still be disabled after this pair of functions has been called.
+They prevent softirqs and tasklets from running on the current CPU.
+
+:c:func:`smp_processor_id()`
+----------------------------
+
+Defined in ``include/linux/smp.h``
+
+:c:func:`get_cpu()` disables preemption (so you won't suddenly get
+moved to another CPU) and returns the current processor number, between
+0 and ``NR_CPUS``. Note that the CPU numbers are not necessarily
+continuous. You return it again with :c:func:`put_cpu()` when you
+are done.
+
+If you know you cannot be preempted by another task (ie. you are in
+interrupt context, or have preemption disabled) you can use
+smp_processor_id().
+
+``__init``/``__exit``/``__initdata``
+------------------------------------
+
+Defined in  ``include/linux/init.h``
+
+After boot, the kernel frees up a special section; functions marked with
+``__init`` and data structures marked with ``__initdata`` are dropped
+after boot is complete: similarly modules discard this memory after
+initialization. ``__exit`` is used to declare a function which is only
+required on exit: the function will be dropped if this file is not
+compiled as a module. See the header file for use. Note that it makes no
+sense for a function marked with ``__init`` to be exported to modules
+with :c:func:`EXPORT_SYMBOL()` or :c:func:`EXPORT_SYMBOL_GPL()`- this
+will break.
+
+:c:func:`__initcall()`/:c:func:`module_init()`
+----------------------------------------------
+
+Defined in  ``include/linux/init.h`` / ``include/linux/module.h``
+
+Many parts of the kernel are well served as a module
+(dynamically-loadable parts of the kernel). Using the
+:c:func:`module_init()` and :c:func:`module_exit()` macros it
+is easy to write code without #ifdefs which can operate both as a module
+or built into the kernel.
+
+The :c:func:`module_init()` macro defines which function is to be
+called at module insertion time (if the file is compiled as a module),
+or at boot time: if the file is not compiled as a module the
+:c:func:`module_init()` macro becomes equivalent to
+:c:func:`__initcall()`, which through linker magic ensures that
+the function is called on boot.
+
+The function can return a negative error number to cause module loading
+to fail (unfortunately, this has no effect if the module is compiled
+into the kernel). This function is called in user context with
+interrupts enabled, so it can sleep.
+
+:c:func:`module_exit()`
+-----------------------
+
+
+Defined in  ``include/linux/module.h``
+
+This macro defines the function to be called at module removal time (or
+never, in the case of the file compiled into the kernel). It will only
+be called if the module usage count has reached zero. This function can
+also sleep, but cannot fail: everything must be cleaned up by the time
+it returns.
+
+Note that this macro is optional: if it is not present, your module will
+not be removable (except for 'rmmod -f').
+
+:c:func:`try_module_get()`/:c:func:`module_put()`
+-------------------------------------------------
+
+Defined in ``include/linux/module.h``
+
+These manipulate the module usage count, to protect against removal (a
+module also can't be removed if another module uses one of its exported
+symbols: see below). Before calling into module code, you should call
+:c:func:`try_module_get()` on that module: if it fails, then the
+module is being removed and you should act as if it wasn't there.
+Otherwise, you can safely enter the module, and call
+:c:func:`module_put()` when you're finished.
+
+Most registerable structures have an owner field, such as in the
+:c:type:`struct file_operations <file_operations>` structure.
+Set this field to the macro ``THIS_MODULE``.
+
+Wait Queues ``include/linux/wait.h``
+====================================
+
+**[SLEEPS]**
+
+A wait queue is used to wait for someone to wake you up when a certain
+condition is true. They must be used carefully to ensure there is no
+race condition. You declare a :c:type:`wait_queue_head_t`, and then processes
+which want to wait for that condition declare a :c:type:`wait_queue_entry_t`
+referring to themselves, and place that in the queue.
+
+Declaring
+---------
+
+You declare a ``wait_queue_head_t`` using the
+:c:func:`DECLARE_WAIT_QUEUE_HEAD()` macro, or using the
+:c:func:`init_waitqueue_head()` routine in your initialization
+code.
+
+Queuing
+-------
+
+Placing yourself in the waitqueue is fairly complex, because you must
+put yourself in the queue before checking the condition. There is a
+macro to do this: :c:func:`wait_event_interruptible()`
+(``include/linux/wait.h``) The first argument is the wait queue head, and
+the second is an expression which is evaluated; the macro returns 0 when
+this expression is true, or ``-ERESTARTSYS`` if a signal is received. The
+:c:func:`wait_event()` version ignores signals.
+
+Waking Up Queued Tasks
+----------------------
+
+Call :c:func:`wake_up()` (``include/linux/wait.h``);, which will wake
+up every process in the queue. The exception is if one has
+``TASK_EXCLUSIVE`` set, in which case the remainder of the queue will
+not be woken. There are other variants of this basic function available
+in the same header.
+
+Atomic Operations
+=================
+
+Certain operations are guaranteed atomic on all platforms. The first
+class of operations work on :c:type:`atomic_t` (``include/asm/atomic.h``);
+this contains a signed integer (at least 32 bits long), and you must use
+these functions to manipulate or read :c:type:`atomic_t` variables.
+:c:func:`atomic_read()` and :c:func:`atomic_set()` get and set
+the counter, :c:func:`atomic_add()`, :c:func:`atomic_sub()`,
+:c:func:`atomic_inc()`, :c:func:`atomic_dec()`, and
+:c:func:`atomic_dec_and_test()` (returns true if it was
+decremented to zero).
+
+Yes. It returns true (i.e. != 0) if the atomic variable is zero.
+
+Note that these functions are slower than normal arithmetic, and so
+should not be used unnecessarily.
+
+The second class of atomic operations is atomic bit operations on an
+``unsigned long``, defined in ``include/linux/bitops.h``. These
+operations generally take a pointer to the bit pattern, and a bit
+number: 0 is the least significant bit. :c:func:`set_bit()`,
+:c:func:`clear_bit()` and :c:func:`change_bit()` set, clear,
+and flip the given bit. :c:func:`test_and_set_bit()`,
+:c:func:`test_and_clear_bit()` and
+:c:func:`test_and_change_bit()` do the same thing, except return
+true if the bit was previously set; these are particularly useful for
+atomically setting flags.
+
+It is possible to call these operations with bit indices greater than
+``BITS_PER_LONG``. The resulting behavior is strange on big-endian
+platforms though so it is a good idea not to do this.
+
+Symbols
+=======
+
+Within the kernel proper, the normal linking rules apply (ie. unless a
+symbol is declared to be file scope with the ``static`` keyword, it can
+be used anywhere in the kernel). However, for modules, a special
+exported symbol table is kept which limits the entry points to the
+kernel proper. Modules can also export symbols.
+
+:c:func:`EXPORT_SYMBOL()`
+-------------------------
+
+Defined in ``include/linux/export.h``
+
+This is the classic method of exporting a symbol: dynamically loaded
+modules will be able to use the symbol as normal.
+
+:c:func:`EXPORT_SYMBOL_GPL()`
+-----------------------------
+
+Defined in ``include/linux/export.h``
+
+Similar to :c:func:`EXPORT_SYMBOL()` except that the symbols
+exported by :c:func:`EXPORT_SYMBOL_GPL()` can only be seen by
+modules with a :c:func:`MODULE_LICENSE()` that specifies a GPL
+compatible license. It implies that the function is considered an
+internal implementation issue, and not really an interface. Some
+maintainers and developers may however require EXPORT_SYMBOL_GPL()
+when adding any new APIs or functionality.
+
+Routines and Conventions
+========================
+
+Double-linked lists ``include/linux/list.h``
+--------------------------------------------
+
+There used to be three sets of linked-list routines in the kernel
+headers, but this one is the winner. If you don't have some particular
+pressing need for a single list, it's a good choice.
+
+In particular, :c:func:`list_for_each_entry()` is useful.
+
+Return Conventions
+------------------
+
+For code called in user context, it's very common to defy C convention,
+and return 0 for success, and a negative error number (eg. ``-EFAULT``) for
+failure. This can be unintuitive at first, but it's fairly widespread in
+the kernel.
+
+Using :c:func:`ERR_PTR()` (``include/linux/err.h``) to encode a
+negative error number into a pointer, and :c:func:`IS_ERR()` and
+:c:func:`PTR_ERR()` to get it back out again: avoids a separate
+pointer parameter for the error number. Icky, but in a good way.
+
+Breaking Compilation
+--------------------
+
+Linus and the other developers sometimes change function or structure
+names in development kernels; this is not done just to keep everyone on
+their toes: it reflects a fundamental change (eg. can no longer be
+called with interrupts on, or does extra checks, or doesn't do checks
+which were caught before). Usually this is accompanied by a fairly
+complete note to the linux-kernel mailing list; search the archive.
+Simply doing a global replace on the file usually makes things **worse**.
+
+Initializing structure members
+------------------------------
+
+The preferred method of initializing structures is to use designated
+initialisers, as defined by ISO C99, eg::
+
+    static struct block_device_operations opt_fops = {
+            .open               = opt_open,
+            .release            = opt_release,
+            .ioctl              = opt_ioctl,
+            .check_media_change = opt_media_change,
+    };
+
+
+This makes it easy to grep for, and makes it clear which structure
+fields are set. You should do this because it looks cool.
+
+GNU Extensions
+--------------
+
+GNU Extensions are explicitly allowed in the Linux kernel. Note that
+some of the more complex ones are not very well supported, due to lack
+of general use, but the following are considered standard (see the GCC
+info page section "C Extensions" for more details - Yes, really the info
+page, the man page is only a short summary of the stuff in info).
+
+-  Inline functions
+
+-  Statement expressions (ie. the ({ and }) constructs).
+
+-  Declaring attributes of a function / variable / type
+   (__attribute__)
+
+-  typeof
+
+-  Zero length arrays
+
+-  Macro varargs
+
+-  Arithmetic on void pointers
+
+-  Non-Constant initializers
+
+-  Assembler Instructions (not outside arch/ and include/asm/)
+
+-  Function names as strings (__func__).
+
+-  __builtin_constant_p()
+
+Be wary when using long long in the kernel, the code gcc generates for
+it is horrible and worse: division and multiplication does not work on
+i386 because the GCC runtime functions for it are missing from the
+kernel environment.
+
+C++
+---
+
+Using C++ in the kernel is usually a bad idea, because the kernel does
+not provide the necessary runtime environment and the include files are
+not tested for it. It is still possible, but not recommended. If you
+really want to do this, forget about exceptions at least.
+
+NUMif
+-----
+
+It is generally considered cleaner to use macros in header files (or at
+the top of .c files) to abstract away functions rather than using \`#if'
+pre-processor statements throughout the source code.
+
+Putting Your Stuff in the Kernel
+================================
+
+In order to get your stuff into shape for official inclusion, or even to
+make a neat patch, there's administrative work to be done:
+
+-  Figure out whose pond you've been pissing in. Look at the top of the
+   source files, inside the ``MAINTAINERS`` file, and last of all in the
+   ``CREDITS`` file. You should coordinate with this person to make sure
+   you're not duplicating effort, or trying something that's already
+   been rejected.
+
+   Make sure you put your name and EMail address at the top of any files
+   you create or mangle significantly. This is the first place people
+   will look when they find a bug, or when **they** want to make a change.
+
+-  Usually you want a configuration option for your kernel hack. Edit
+   ``Kconfig`` in the appropriate directory. The Config language is
+   simple to use by cut and paste, and there's complete documentation in
+   ``Documentation/kbuild/kconfig-language.txt``.
+
+   In your description of the option, make sure you address both the
+   expert user and the user who knows nothing about your feature.
+   Mention incompatibilities and issues here. **Definitely** end your
+   description with “if in doubt, say N” (or, occasionally, \`Y'); this
+   is for people who have no idea what you are talking about.
+
+-  Edit the ``Makefile``: the CONFIG variables are exported here so you
+   can usually just add a "obj-$(CONFIG_xxx) += xxx.o" line. The syntax
+   is documented in ``Documentation/kbuild/makefiles.txt``.
+
+-  Put yourself in ``CREDITS`` if you've done something noteworthy,
+   usually beyond a single file (your name should be at the top of the
+   source files anyway). ``MAINTAINERS`` means you want to be consulted
+   when changes are made to a subsystem, and hear about bugs; it implies
+   a more-than-passing commitment to some part of the code.
+
+-  Finally, don't forget to read
+   ``Documentation/process/submitting-patches.rst`` and possibly
+   ``Documentation/process/submitting-drivers.rst``.
+
+Kernel Cantrips
+===============
+
+Some favorites from browsing the source. Feel free to add to this list.
+
+``arch/x86/include/asm/delay.h``::
+
+    #define ndelay(n) (__builtin_constant_p(n) ? \
+            ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
+            __ndelay(n))
+
+
+``include/linux/fs.h``::
+
+    /*
+     * Kernel pointers have redundant information, so we can use a
+     * scheme where we can return either an error code or a dentry
+     * pointer with the same return value.
+     *
+     * This should be a per-architecture thing, to allow different
+     * error and pointer decisions.
+     */
+     #define ERR_PTR(err)    ((void *)((long)(err)))
+     #define PTR_ERR(ptr)    ((long)(ptr))
+     #define IS_ERR(ptr)     ((unsigned long)(ptr) > (unsigned long)(-1000))
+
+``arch/x86/include/asm/uaccess_32.h:``::
+
+    #define copy_to_user(to,from,n)                         \
+            (__builtin_constant_p(n) ?                      \
+             __constant_copy_to_user((to),(from),(n)) :     \
+             __generic_copy_to_user((to),(from),(n)))
+
+
+``arch/sparc/kernel/head.S:``::
+
+    /*
+     * Sun people can't spell worth damn. "compatability" indeed.
+     * At least we *know* we can't spell, and use a spell-checker.
+     */
+
+    /* Uh, actually Linus it is I who cannot spell. Too much murky
+     * Sparc assembly will do this to ya.
+     */
+    C_LABEL(cputypvar):
+            .asciz "compatibility"
+
+    /* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
+            .align 4
+    C_LABEL(cputypvar_sun4m):
+            .asciz "compatible"
+
+
+``arch/sparc/lib/checksum.S:``::
+
+            /* Sun, you just can't beat me, you just can't.  Stop trying,
+             * give up.  I'm serious, I am going to kick the living shit
+             * out of you, game over, lights out.
+             */
+
+
+Thanks
+======
+
+Thanks to Andi Kleen for the idea, answering my questions, fixing my
+mistakes, filling content, etc. Philipp Rumpf for more spelling and
+clarity fixes, and some excellent non-obvious points. Werner Almesberger
+for giving me a great summary of :c:func:`disable_irq()`, and Jes
+Sorensen and Andrea Arcangeli added caveats. Michael Elizabeth Chastain
+for checking and adding to the Configure section. Telsa Gwynne for
+teaching me DocBook.
diff --git a/Documentation/kernel-hacking/index.rst b/Documentation/kernel-hacking/index.rst
new file mode 100644
index 0000000..fcb0eda
--- /dev/null
+++ b/Documentation/kernel-hacking/index.rst
@@ -0,0 +1,9 @@
+=====================
+Kernel Hacking Guides
+=====================
+
+.. toctree::
+   :maxdepth: 2
+
+   hacking
+   locking
diff --git a/Documentation/kernel-hacking/locking.rst b/Documentation/kernel-hacking/locking.rst
new file mode 100644
index 0000000..f937c0f
--- /dev/null
+++ b/Documentation/kernel-hacking/locking.rst
@@ -0,0 +1,1446 @@
+===========================
+Unreliable Guide To Locking
+===========================
+
+:Author: Rusty Russell
+
+Introduction
+============
+
+Welcome, to Rusty's Remarkably Unreliable Guide to Kernel Locking
+issues. This document describes the locking systems in the Linux Kernel
+in 2.6.
+
+With the wide availability of HyperThreading, and preemption in the
+Linux Kernel, everyone hacking on the kernel needs to know the
+fundamentals of concurrency and locking for SMP.
+
+The Problem With Concurrency
+============================
+
+(Skip this if you know what a Race Condition is).
+
+In a normal program, you can increment a counter like so:
+
+::
+
+          very_important_count++;
+
+
+This is what they would expect to happen:
+
+
+.. table:: Expected Results
+
+  +------------------------------------+------------------------------------+
+  | Instance 1                         | Instance 2                         |
+  +====================================+====================================+
+  | read very_important_count (5)      |                                    |
+  +------------------------------------+------------------------------------+
+  | add 1 (6)                          |                                    |
+  +------------------------------------+------------------------------------+
+  | write very_important_count (6)     |                                    |
+  +------------------------------------+------------------------------------+
+  |                                    | read very_important_count (6)      |
+  +------------------------------------+------------------------------------+
+  |                                    | add 1 (7)                          |
+  +------------------------------------+------------------------------------+
+  |                                    | write very_important_count (7)     |
+  +------------------------------------+------------------------------------+
+
+This is what might happen:
+
+.. table:: Possible Results
+
+  +------------------------------------+------------------------------------+
+  | Instance 1                         | Instance 2                         |
+  +====================================+====================================+
+  | read very_important_count (5)      |                                    |
+  +------------------------------------+------------------------------------+
+  |                                    | read very_important_count (5)      |
+  +------------------------------------+------------------------------------+
+  | add 1 (6)                          |                                    |
+  +------------------------------------+------------------------------------+
+  |                                    | add 1 (6)                          |
+  +------------------------------------+------------------------------------+
+  | write very_important_count (6)     |                                    |
+  +------------------------------------+------------------------------------+
+  |                                    | write very_important_count (6)     |
+  +------------------------------------+------------------------------------+
+
+
+Race Conditions and Critical Regions
+------------------------------------
+
+This overlap, where the result depends on the relative timing of
+multiple tasks, is called a race condition. The piece of code containing
+the concurrency issue is called a critical region. And especially since
+Linux starting running on SMP machines, they became one of the major
+issues in kernel design and implementation.
+
+Preemption can have the same effect, even if there is only one CPU: by
+preempting one task during the critical region, we have exactly the same
+race condition. In this case the thread which preempts might run the
+critical region itself.
+
+The solution is to recognize when these simultaneous accesses occur, and
+use locks to make sure that only one instance can enter the critical
+region at any time. There are many friendly primitives in the Linux
+kernel to help you do this. And then there are the unfriendly
+primitives, but I'll pretend they don't exist.
+
+Locking in the Linux Kernel
+===========================
+
+If I could give you one piece of advice: never sleep with anyone crazier
+than yourself. But if I had to give you advice on locking: **keep it
+simple**.
+
+Be reluctant to introduce new locks.
+
+Strangely enough, this last one is the exact reverse of my advice when
+you **have** slept with someone crazier than yourself. And you should
+think about getting a big dog.
+
+Two Main Types of Kernel Locks: Spinlocks and Mutexes
+-----------------------------------------------------
+
+There are two main types of kernel locks. The fundamental type is the
+spinlock (``include/asm/spinlock.h``), which is a very simple
+single-holder lock: if you can't get the spinlock, you keep trying
+(spinning) until you can. Spinlocks are very small and fast, and can be
+used anywhere.
+
+The second type is a mutex (``include/linux/mutex.h``): it is like a
+spinlock, but you may block holding a mutex. If you can't lock a mutex,
+your task will suspend itself, and be woken up when the mutex is
+released. This means the CPU can do something else while you are
+waiting. There are many cases when you simply can't sleep (see
+`What Functions Are Safe To Call From Interrupts? <#sleeping-things>`__),
+and so have to use a spinlock instead.
+
+Neither type of lock is recursive: see
+`Deadlock: Simple and Advanced <#deadlock>`__.
+
+Locks and Uniprocessor Kernels
+------------------------------
+
+For kernels compiled without ``CONFIG_SMP``, and without
+``CONFIG_PREEMPT`` spinlocks do not exist at all. This is an excellent
+design decision: when no-one else can run at the same time, there is no
+reason to have a lock.
+
+If the kernel is compiled without ``CONFIG_SMP``, but ``CONFIG_PREEMPT``
+is set, then spinlocks simply disable preemption, which is sufficient to
+prevent any races. For most purposes, we can think of preemption as
+equivalent to SMP, and not worry about it separately.
+
+You should always test your locking code with ``CONFIG_SMP`` and
+``CONFIG_PREEMPT`` enabled, even if you don't have an SMP test box,
+because it will still catch some kinds of locking bugs.
+
+Mutexes still exist, because they are required for synchronization
+between user contexts, as we will see below.
+
+Locking Only In User Context
+----------------------------
+
+If you have a data structure which is only ever accessed from user
+context, then you can use a simple mutex (``include/linux/mutex.h``) to
+protect it. This is the most trivial case: you initialize the mutex.
+Then you can call :c:func:`mutex_lock_interruptible()` to grab the
+mutex, and :c:func:`mutex_unlock()` to release it. There is also a
+:c:func:`mutex_lock()`, which should be avoided, because it will
+not return if a signal is received.
+
+Example: ``net/netfilter/nf_sockopt.c`` allows registration of new
+:c:func:`setsockopt()` and :c:func:`getsockopt()` calls, with
+:c:func:`nf_register_sockopt()`. Registration and de-registration
+are only done on module load and unload (and boot time, where there is
+no concurrency), and the list of registrations is only consulted for an
+unknown :c:func:`setsockopt()` or :c:func:`getsockopt()` system
+call. The ``nf_sockopt_mutex`` is perfect to protect this, especially
+since the setsockopt and getsockopt calls may well sleep.
+
+Locking Between User Context and Softirqs
+-----------------------------------------
+
+If a softirq shares data with user context, you have two problems.
+Firstly, the current user context can be interrupted by a softirq, and
+secondly, the critical region could be entered from another CPU. This is
+where :c:func:`spin_lock_bh()` (``include/linux/spinlock.h``) is
+used. It disables softirqs on that CPU, then grabs the lock.
+:c:func:`spin_unlock_bh()` does the reverse. (The '_bh' suffix is
+a historical reference to "Bottom Halves", the old name for software
+interrupts. It should really be called spin_lock_softirq()' in a
+perfect world).
+
+Note that you can also use :c:func:`spin_lock_irq()` or
+:c:func:`spin_lock_irqsave()` here, which stop hardware interrupts
+as well: see `Hard IRQ Context <#hardirq-context>`__.
+
+This works perfectly for UP as well: the spin lock vanishes, and this
+macro simply becomes :c:func:`local_bh_disable()`
+(``include/linux/interrupt.h``), which protects you from the softirq
+being run.
+
+Locking Between User Context and Tasklets
+-----------------------------------------
+
+This is exactly the same as above, because tasklets are actually run
+from a softirq.
+
+Locking Between User Context and Timers
+---------------------------------------
+
+This, too, is exactly the same as above, because timers are actually run
+from a softirq. From a locking point of view, tasklets and timers are
+identical.
+
+Locking Between Tasklets/Timers
+-------------------------------
+
+Sometimes a tasklet or timer might want to share data with another
+tasklet or timer.
+
+The Same Tasklet/Timer
+~~~~~~~~~~~~~~~~~~~~~~
+
+Since a tasklet is never run on two CPUs at once, you don't need to
+worry about your tasklet being reentrant (running twice at once), even
+on SMP.
+
+Different Tasklets/Timers
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If another tasklet/timer wants to share data with your tasklet or timer
+, you will both need to use :c:func:`spin_lock()` and
+:c:func:`spin_unlock()` calls. :c:func:`spin_lock_bh()` is
+unnecessary here, as you are already in a tasklet, and none will be run
+on the same CPU.
+
+Locking Between Softirqs
+------------------------
+
+Often a softirq might want to share data with itself or a tasklet/timer.
+
+The Same Softirq
+~~~~~~~~~~~~~~~~
+
+The same softirq can run on the other CPUs: you can use a per-CPU array
+(see `Per-CPU Data <#per-cpu>`__) for better performance. If you're
+going so far as to use a softirq, you probably care about scalable
+performance enough to justify the extra complexity.
+
+You'll need to use :c:func:`spin_lock()` and
+:c:func:`spin_unlock()` for shared data.
+
+Different Softirqs
+~~~~~~~~~~~~~~~~~~
+
+You'll need to use :c:func:`spin_lock()` and
+:c:func:`spin_unlock()` for shared data, whether it be a timer,
+tasklet, different softirq or the same or another softirq: any of them
+could be running on a different CPU.
+
+Hard IRQ Context
+================
+
+Hardware interrupts usually communicate with a tasklet or softirq.
+Frequently this involves putting work in a queue, which the softirq will
+take out.
+
+Locking Between Hard IRQ and Softirqs/Tasklets
+----------------------------------------------
+
+If a hardware irq handler shares data with a softirq, you have two
+concerns. Firstly, the softirq processing can be interrupted by a
+hardware interrupt, and secondly, the critical region could be entered
+by a hardware interrupt on another CPU. This is where
+:c:func:`spin_lock_irq()` is used. It is defined to disable
+interrupts on that cpu, then grab the lock.
+:c:func:`spin_unlock_irq()` does the reverse.
+
+The irq handler does not to use :c:func:`spin_lock_irq()`, because
+the softirq cannot run while the irq handler is running: it can use
+:c:func:`spin_lock()`, which is slightly faster. The only exception
+would be if a different hardware irq handler uses the same lock:
+:c:func:`spin_lock_irq()` will stop that from interrupting us.
+
+This works perfectly for UP as well: the spin lock vanishes, and this
+macro simply becomes :c:func:`local_irq_disable()`
+(``include/asm/smp.h``), which protects you from the softirq/tasklet/BH
+being run.
+
+:c:func:`spin_lock_irqsave()` (``include/linux/spinlock.h``) is a
+variant which saves whether interrupts were on or off in a flags word,
+which is passed to :c:func:`spin_unlock_irqrestore()`. This means
+that the same code can be used inside an hard irq handler (where
+interrupts are already off) and in softirqs (where the irq disabling is
+required).
+
+Note that softirqs (and hence tasklets and timers) are run on return
+from hardware interrupts, so :c:func:`spin_lock_irq()` also stops
+these. In that sense, :c:func:`spin_lock_irqsave()` is the most
+general and powerful locking function.
+
+Locking Between Two Hard IRQ Handlers
+-------------------------------------
+
+It is rare to have to share data between two IRQ handlers, but if you
+do, :c:func:`spin_lock_irqsave()` should be used: it is
+architecture-specific whether all interrupts are disabled inside irq
+handlers themselves.
+
+Cheat Sheet For Locking
+=======================
+
+Pete Zaitcev gives the following summary:
+
+-  If you are in a process context (any syscall) and want to lock other
+   process out, use a mutex. You can take a mutex and sleep
+   (``copy_from_user*(`` or ``kmalloc(x,GFP_KERNEL)``).
+
+-  Otherwise (== data can be touched in an interrupt), use
+   :c:func:`spin_lock_irqsave()` and
+   :c:func:`spin_unlock_irqrestore()`.
+
+-  Avoid holding spinlock for more than 5 lines of code and across any
+   function call (except accessors like :c:func:`readb()`).
+
+Table of Minimum Requirements
+-----------------------------
+
+The following table lists the **minimum** locking requirements between
+various contexts. In some cases, the same context can only be running on
+one CPU at a time, so no locking is required for that context (eg. a
+particular thread can only run on one CPU at a time, but if it needs
+shares data with another thread, locking is required).
+
+Remember the advice above: you can always use
+:c:func:`spin_lock_irqsave()`, which is a superset of all other
+spinlock primitives.
+
+============== ============= ============= ========= ========= ========= ========= ======= ======= ============== ==============
+.              IRQ Handler A IRQ Handler B Softirq A Softirq B Tasklet A Tasklet B Timer A Timer B User Context A User Context B
+============== ============= ============= ========= ========= ========= ========= ======= ======= ============== ==============
+IRQ Handler A  None
+IRQ Handler B  SLIS          None
+Softirq A      SLI           SLI           SL
+Softirq B      SLI           SLI           SL        SL
+Tasklet A      SLI           SLI           SL        SL        None
+Tasklet B      SLI           SLI           SL        SL        SL        None
+Timer A        SLI           SLI           SL        SL        SL        SL        None
+Timer B        SLI           SLI           SL        SL        SL        SL        SL      None
+User Context A SLI           SLI           SLBH      SLBH      SLBH      SLBH      SLBH    SLBH    None
+User Context B SLI           SLI           SLBH      SLBH      SLBH      SLBH      SLBH    SLBH    MLI            None
+============== ============= ============= ========= ========= ========= ========= ======= ======= ============== ==============
+
+Table: Table of Locking Requirements
+
++--------+----------------------------+
+| SLIS   | spin_lock_irqsave          |
++--------+----------------------------+
+| SLI    | spin_lock_irq              |
++--------+----------------------------+
+| SL     | spin_lock                  |
++--------+----------------------------+
+| SLBH   | spin_lock_bh               |
++--------+----------------------------+
+| MLI    | mutex_lock_interruptible   |
++--------+----------------------------+
+
+Table: Legend for Locking Requirements Table
+
+The trylock Functions
+=====================
+
+There are functions that try to acquire a lock only once and immediately
+return a value telling about success or failure to acquire the lock.
+They can be used if you need no access to the data protected with the
+lock when some other thread is holding the lock. You should acquire the
+lock later if you then need access to the data protected with the lock.
+
+:c:func:`spin_trylock()` does not spin but returns non-zero if it
+acquires the spinlock on the first try or 0 if not. This function can be
+used in all contexts like :c:func:`spin_lock()`: you must have
+disabled the contexts that might interrupt you and acquire the spin
+lock.
+
+:c:func:`mutex_trylock()` does not suspend your task but returns
+non-zero if it could lock the mutex on the first try or 0 if not. This
+function cannot be safely used in hardware or software interrupt
+contexts despite not sleeping.
+
+Common Examples
+===============
+
+Let's step through a simple example: a cache of number to name mappings.
+The cache keeps a count of how often each of the objects is used, and
+when it gets full, throws out the least used one.
+
+All In User Context
+-------------------
+
+For our first example, we assume that all operations are in user context
+(ie. from system calls), so we can sleep. This means we can use a mutex
+to protect the cache and all the objects within it. Here's the code::
+
+    #include <linux/list.h>
+    #include <linux/slab.h>
+    #include <linux/string.h>
+    #include <linux/mutex.h>
+    #include <asm/errno.h>
+
+    struct object
+    {
+            struct list_head list;
+            int id;
+            char name[32];
+            int popularity;
+    };
+
+    /* Protects the cache, cache_num, and the objects within it */
+    static DEFINE_MUTEX(cache_lock);
+    static LIST_HEAD(cache);
+    static unsigned int cache_num = 0;
+    #define MAX_CACHE_SIZE 10
+
+    /* Must be holding cache_lock */
+    static struct object *__cache_find(int id)
+    {
+            struct object *i;
+
+            list_for_each_entry(i, &cache, list)
+                    if (i->id == id) {
+                            i->popularity++;
+                            return i;
+                    }
+            return NULL;
+    }
+
+    /* Must be holding cache_lock */
+    static void __cache_delete(struct object *obj)
+    {
+            BUG_ON(!obj);
+            list_del(&obj->list);
+            kfree(obj);
+            cache_num--;
+    }
+
+    /* Must be holding cache_lock */
+    static void __cache_add(struct object *obj)
+    {
+            list_add(&obj->list, &cache);
+            if (++cache_num > MAX_CACHE_SIZE) {
+                    struct object *i, *outcast = NULL;
+                    list_for_each_entry(i, &cache, list) {
+                            if (!outcast || i->popularity < outcast->popularity)
+                                    outcast = i;
+                    }
+                    __cache_delete(outcast);
+            }
+    }
+
+    int cache_add(int id, const char *name)
+    {
+            struct object *obj;
+
+            if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL)
+                    return -ENOMEM;
+
+            strlcpy(obj->name, name, sizeof(obj->name));
+            obj->id = id;
+            obj->popularity = 0;
+
+            mutex_lock(&cache_lock);
+            __cache_add(obj);
+            mutex_unlock(&cache_lock);
+            return 0;
+    }
+
+    void cache_delete(int id)
+    {
+            mutex_lock(&cache_lock);
+            __cache_delete(__cache_find(id));
+            mutex_unlock(&cache_lock);
+    }
+
+    int cache_find(int id, char *name)
+    {
+            struct object *obj;
+            int ret = -ENOENT;
+
+            mutex_lock(&cache_lock);
+            obj = __cache_find(id);
+            if (obj) {
+                    ret = 0;
+                    strcpy(name, obj->name);
+            }
+            mutex_unlock(&cache_lock);
+            return ret;
+    }
+
+Note that we always make sure we have the cache_lock when we add,
+delete, or look up the cache: both the cache infrastructure itself and
+the contents of the objects are protected by the lock. In this case it's
+easy, since we copy the data for the user, and never let them access the
+objects directly.
+
+There is a slight (and common) optimization here: in
+:c:func:`cache_add()` we set up the fields of the object before
+grabbing the lock. This is safe, as no-one else can access it until we
+put it in cache.
+
+Accessing From Interrupt Context
+--------------------------------
+
+Now consider the case where :c:func:`cache_find()` can be called
+from interrupt context: either a hardware interrupt or a softirq. An
+example would be a timer which deletes object from the cache.
+
+The change is shown below, in standard patch format: the ``-`` are lines
+which are taken away, and the ``+`` are lines which are added.
+
+::
+
+    --- cache.c.usercontext 2003-12-09 13:58:54.000000000 +1100
+    +++ cache.c.interrupt   2003-12-09 14:07:49.000000000 +1100
+    @@ -12,7 +12,7 @@
+             int popularity;
+     };
+
+    -static DEFINE_MUTEX(cache_lock);
+    +static DEFINE_SPINLOCK(cache_lock);
+     static LIST_HEAD(cache);
+     static unsigned int cache_num = 0;
+     #define MAX_CACHE_SIZE 10
+    @@ -55,6 +55,7 @@
+     int cache_add(int id, const char *name)
+     {
+             struct object *obj;
+    +        unsigned long flags;
+
+             if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL)
+                     return -ENOMEM;
+    @@ -63,30 +64,33 @@
+             obj->id = id;
+             obj->popularity = 0;
+
+    -        mutex_lock(&cache_lock);
+    +        spin_lock_irqsave(&cache_lock, flags);
+             __cache_add(obj);
+    -        mutex_unlock(&cache_lock);
+    +        spin_unlock_irqrestore(&cache_lock, flags);
+             return 0;
+     }
+
+     void cache_delete(int id)
+     {
+    -        mutex_lock(&cache_lock);
+    +        unsigned long flags;
+    +
+    +        spin_lock_irqsave(&cache_lock, flags);
+             __cache_delete(__cache_find(id));
+    -        mutex_unlock(&cache_lock);
+    +        spin_unlock_irqrestore(&cache_lock, flags);
+     }
+
+     int cache_find(int id, char *name)
+     {
+             struct object *obj;
+             int ret = -ENOENT;
+    +        unsigned long flags;
+
+    -        mutex_lock(&cache_lock);
+    +        spin_lock_irqsave(&cache_lock, flags);
+             obj = __cache_find(id);
+             if (obj) {
+                     ret = 0;
+                     strcpy(name, obj->name);
+             }
+    -        mutex_unlock(&cache_lock);
+    +        spin_unlock_irqrestore(&cache_lock, flags);
+             return ret;
+     }
+
+Note that the :c:func:`spin_lock_irqsave()` will turn off
+interrupts if they are on, otherwise does nothing (if we are already in
+an interrupt handler), hence these functions are safe to call from any
+context.
+
+Unfortunately, :c:func:`cache_add()` calls :c:func:`kmalloc()`
+with the ``GFP_KERNEL`` flag, which is only legal in user context. I
+have assumed that :c:func:`cache_add()` is still only called in
+user context, otherwise this should become a parameter to
+:c:func:`cache_add()`.
+
+Exposing Objects Outside This File
+----------------------------------
+
+If our objects contained more information, it might not be sufficient to
+copy the information in and out: other parts of the code might want to
+keep pointers to these objects, for example, rather than looking up the
+id every time. This produces two problems.
+
+The first problem is that we use the ``cache_lock`` to protect objects:
+we'd need to make this non-static so the rest of the code can use it.
+This makes locking trickier, as it is no longer all in one place.
+
+The second problem is the lifetime problem: if another structure keeps a
+pointer to an object, it presumably expects that pointer to remain
+valid. Unfortunately, this is only guaranteed while you hold the lock,
+otherwise someone might call :c:func:`cache_delete()` and even
+worse, add another object, re-using the same address.
+
+As there is only one lock, you can't hold it forever: no-one else would
+get any work done.
+
+The solution to this problem is to use a reference count: everyone who
+has a pointer to the object increases it when they first get the object,
+and drops the reference count when they're finished with it. Whoever
+drops it to zero knows it is unused, and can actually delete it.
+
+Here is the code::
+
+    --- cache.c.interrupt   2003-12-09 14:25:43.000000000 +1100
+    +++ cache.c.refcnt  2003-12-09 14:33:05.000000000 +1100
+    @@ -7,6 +7,7 @@
+     struct object
+     {
+             struct list_head list;
+    +        unsigned int refcnt;
+             int id;
+             char name[32];
+             int popularity;
+    @@ -17,6 +18,35 @@
+     static unsigned int cache_num = 0;
+     #define MAX_CACHE_SIZE 10
+
+    +static void __object_put(struct object *obj)
+    +{
+    +        if (--obj->refcnt == 0)
+    +                kfree(obj);
+    +}
+    +
+    +static void __object_get(struct object *obj)
+    +{
+    +        obj->refcnt++;
+    +}
+    +
+    +void object_put(struct object *obj)
+    +{
+    +        unsigned long flags;
+    +
+    +        spin_lock_irqsave(&cache_lock, flags);
+    +        __object_put(obj);
+    +        spin_unlock_irqrestore(&cache_lock, flags);
+    +}
+    +
+    +void object_get(struct object *obj)
+    +{
+    +        unsigned long flags;
+    +
+    +        spin_lock_irqsave(&cache_lock, flags);
+    +        __object_get(obj);
+    +        spin_unlock_irqrestore(&cache_lock, flags);
+    +}
+    +
+     /* Must be holding cache_lock */
+     static struct object *__cache_find(int id)
+     {
+    @@ -35,6 +65,7 @@
+     {
+             BUG_ON(!obj);
+             list_del(&obj->list);
+    +        __object_put(obj);
+             cache_num--;
+     }
+
+    @@ -63,6 +94,7 @@
+             strlcpy(obj->name, name, sizeof(obj->name));
+             obj->id = id;
+             obj->popularity = 0;
+    +        obj->refcnt = 1; /* The cache holds a reference */
+
+             spin_lock_irqsave(&cache_lock, flags);
+             __cache_add(obj);
+    @@ -79,18 +111,15 @@
+             spin_unlock_irqrestore(&cache_lock, flags);
+     }
+
+    -int cache_find(int id, char *name)
+    +struct object *cache_find(int id)
+     {
+             struct object *obj;
+    -        int ret = -ENOENT;
+             unsigned long flags;
+
+             spin_lock_irqsave(&cache_lock, flags);
+             obj = __cache_find(id);
+    -        if (obj) {
+    -                ret = 0;
+    -                strcpy(name, obj->name);
+    -        }
+    +        if (obj)
+    +                __object_get(obj);
+             spin_unlock_irqrestore(&cache_lock, flags);
+    -        return ret;
+    +        return obj;
+     }
+
+We encapsulate the reference counting in the standard 'get' and 'put'
+functions. Now we can return the object itself from
+:c:func:`cache_find()` which has the advantage that the user can
+now sleep holding the object (eg. to :c:func:`copy_to_user()` to
+name to userspace).
+
+The other point to note is that I said a reference should be held for
+every pointer to the object: thus the reference count is 1 when first
+inserted into the cache. In some versions the framework does not hold a
+reference count, but they are more complicated.
+
+Using Atomic Operations For The Reference Count
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In practice, :c:type:`atomic_t` would usually be used for refcnt. There are a
+number of atomic operations defined in ``include/asm/atomic.h``: these
+are guaranteed to be seen atomically from all CPUs in the system, so no
+lock is required. In this case, it is simpler than using spinlocks,
+although for anything non-trivial using spinlocks is clearer. The
+:c:func:`atomic_inc()` and :c:func:`atomic_dec_and_test()`
+are used instead of the standard increment and decrement operators, and
+the lock is no longer used to protect the reference count itself.
+
+::
+
+    --- cache.c.refcnt  2003-12-09 15:00:35.000000000 +1100
+    +++ cache.c.refcnt-atomic   2003-12-11 15:49:42.000000000 +1100
+    @@ -7,7 +7,7 @@
+     struct object
+     {
+             struct list_head list;
+    -        unsigned int refcnt;
+    +        atomic_t refcnt;
+             int id;
+             char name[32];
+             int popularity;
+    @@ -18,33 +18,15 @@
+     static unsigned int cache_num = 0;
+     #define MAX_CACHE_SIZE 10
+
+    -static void __object_put(struct object *obj)
+    -{
+    -        if (--obj->refcnt == 0)
+    -                kfree(obj);
+    -}
+    -
+    -static void __object_get(struct object *obj)
+    -{
+    -        obj->refcnt++;
+    -}
+    -
+     void object_put(struct object *obj)
+     {
+    -        unsigned long flags;
+    -
+    -        spin_lock_irqsave(&cache_lock, flags);
+    -        __object_put(obj);
+    -        spin_unlock_irqrestore(&cache_lock, flags);
+    +        if (atomic_dec_and_test(&obj->refcnt))
+    +                kfree(obj);
+     }
+
+     void object_get(struct object *obj)
+     {
+    -        unsigned long flags;
+    -
+    -        spin_lock_irqsave(&cache_lock, flags);
+    -        __object_get(obj);
+    -        spin_unlock_irqrestore(&cache_lock, flags);
+    +        atomic_inc(&obj->refcnt);
+     }
+
+     /* Must be holding cache_lock */
+    @@ -65,7 +47,7 @@
+     {
+             BUG_ON(!obj);
+             list_del(&obj->list);
+    -        __object_put(obj);
+    +        object_put(obj);
+             cache_num--;
+     }
+
+    @@ -94,7 +76,7 @@
+             strlcpy(obj->name, name, sizeof(obj->name));
+             obj->id = id;
+             obj->popularity = 0;
+    -        obj->refcnt = 1; /* The cache holds a reference */
+    +        atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
+
+             spin_lock_irqsave(&cache_lock, flags);
+             __cache_add(obj);
+    @@ -119,7 +101,7 @@
+             spin_lock_irqsave(&cache_lock, flags);
+             obj = __cache_find(id);
+             if (obj)
+    -                __object_get(obj);
+    +                object_get(obj);
+             spin_unlock_irqrestore(&cache_lock, flags);
+             return obj;
+     }
+
+Protecting The Objects Themselves
+---------------------------------
+
+In these examples, we assumed that the objects (except the reference
+counts) never changed once they are created. If we wanted to allow the
+name to change, there are three possibilities:
+
+-  You can make ``cache_lock`` non-static, and tell people to grab that
+   lock before changing the name in any object.
+
+-  You can provide a :c:func:`cache_obj_rename()` which grabs this
+   lock and changes the name for the caller, and tell everyone to use
+   that function.
+
+-  You can make the ``cache_lock`` protect only the cache itself, and
+   use another lock to protect the name.
+
+Theoretically, you can make the locks as fine-grained as one lock for
+every field, for every object. In practice, the most common variants
+are:
+
+-  One lock which protects the infrastructure (the ``cache`` list in
+   this example) and all the objects. This is what we have done so far.
+
+-  One lock which protects the infrastructure (including the list
+   pointers inside the objects), and one lock inside the object which
+   protects the rest of that object.
+
+-  Multiple locks to protect the infrastructure (eg. one lock per hash
+   chain), possibly with a separate per-object lock.
+
+Here is the "lock-per-object" implementation:
+
+::
+
+    --- cache.c.refcnt-atomic   2003-12-11 15:50:54.000000000 +1100
+    +++ cache.c.perobjectlock   2003-12-11 17:15:03.000000000 +1100
+    @@ -6,11 +6,17 @@
+
+     struct object
+     {
+    +        /* These two protected by cache_lock. */
+             struct list_head list;
+    +        int popularity;
+    +
+             atomic_t refcnt;
+    +
+    +        /* Doesn't change once created. */
+             int id;
+    +
+    +        spinlock_t lock; /* Protects the name */
+             char name[32];
+    -        int popularity;
+     };
+
+     static DEFINE_SPINLOCK(cache_lock);
+    @@ -77,6 +84,7 @@
+             obj->id = id;
+             obj->popularity = 0;
+             atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
+    +        spin_lock_init(&obj->lock);
+
+             spin_lock_irqsave(&cache_lock, flags);
+             __cache_add(obj);
+
+Note that I decide that the popularity count should be protected by the
+``cache_lock`` rather than the per-object lock: this is because it (like
+the :c:type:`struct list_head <list_head>` inside the object)
+is logically part of the infrastructure. This way, I don't need to grab
+the lock of every object in :c:func:`__cache_add()` when seeking
+the least popular.
+
+I also decided that the id member is unchangeable, so I don't need to
+grab each object lock in :c:func:`__cache_find()` to examine the
+id: the object lock is only used by a caller who wants to read or write
+the name field.
+
+Note also that I added a comment describing what data was protected by
+which locks. This is extremely important, as it describes the runtime
+behavior of the code, and can be hard to gain from just reading. And as
+Alan Cox says, “Lock data, not code”.
+
+Common Problems
+===============
+
+Deadlock: Simple and Advanced
+-----------------------------
+
+There is a coding bug where a piece of code tries to grab a spinlock
+twice: it will spin forever, waiting for the lock to be released
+(spinlocks, rwlocks and mutexes are not recursive in Linux). This is
+trivial to diagnose: not a
+stay-up-five-nights-talk-to-fluffy-code-bunnies kind of problem.
+
+For a slightly more complex case, imagine you have a region shared by a
+softirq and user context. If you use a :c:func:`spin_lock()` call
+to protect it, it is possible that the user context will be interrupted
+by the softirq while it holds the lock, and the softirq will then spin
+forever trying to get the same lock.
+
+Both of these are called deadlock, and as shown above, it can occur even
+with a single CPU (although not on UP compiles, since spinlocks vanish
+on kernel compiles with ``CONFIG_SMP``\ =n. You'll still get data
+corruption in the second example).
+
+This complete lockup is easy to diagnose: on SMP boxes the watchdog
+timer or compiling with ``DEBUG_SPINLOCK`` set
+(``include/linux/spinlock.h``) will show this up immediately when it
+happens.
+
+A more complex problem is the so-called 'deadly embrace', involving two
+or more locks. Say you have a hash table: each entry in the table is a
+spinlock, and a chain of hashed objects. Inside a softirq handler, you
+sometimes want to alter an object from one place in the hash to another:
+you grab the spinlock of the old hash chain and the spinlock of the new
+hash chain, and delete the object from the old one, and insert it in the
+new one.
+
+There are two problems here. First, if your code ever tries to move the
+object to the same chain, it will deadlock with itself as it tries to
+lock it twice. Secondly, if the same softirq on another CPU is trying to
+move another object in the reverse direction, the following could
+happen:
+
++-----------------------+-----------------------+
+| CPU 1                 | CPU 2                 |
++=======================+=======================+
+| Grab lock A -> OK     | Grab lock B -> OK     |
++-----------------------+-----------------------+
+| Grab lock B -> spin   | Grab lock A -> spin   |
++-----------------------+-----------------------+
+
+Table: Consequences
+
+The two CPUs will spin forever, waiting for the other to give up their
+lock. It will look, smell, and feel like a crash.
+
+Preventing Deadlock
+-------------------
+
+Textbooks will tell you that if you always lock in the same order, you
+will never get this kind of deadlock. Practice will tell you that this
+approach doesn't scale: when I create a new lock, I don't understand
+enough of the kernel to figure out where in the 5000 lock hierarchy it
+will fit.
+
+The best locks are encapsulated: they never get exposed in headers, and
+are never held around calls to non-trivial functions outside the same
+file. You can read through this code and see that it will never
+deadlock, because it never tries to grab another lock while it has that
+one. People using your code don't even need to know you are using a
+lock.
+
+A classic problem here is when you provide callbacks or hooks: if you
+call these with the lock held, you risk simple deadlock, or a deadly
+embrace (who knows what the callback will do?). Remember, the other
+programmers are out to get you, so don't do this.
+
+Overzealous Prevention Of Deadlocks
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Deadlocks are problematic, but not as bad as data corruption. Code which
+grabs a read lock, searches a list, fails to find what it wants, drops
+the read lock, grabs a write lock and inserts the object has a race
+condition.
+
+If you don't see why, please stay the fuck away from my code.
+
+Racing Timers: A Kernel Pastime
+-------------------------------
+
+Timers can produce their own special problems with races. Consider a
+collection of objects (list, hash, etc) where each object has a timer
+which is due to destroy it.
+
+If you want to destroy the entire collection (say on module removal),
+you might do the following::
+
+            /* THIS CODE BAD BAD BAD BAD: IF IT WAS ANY WORSE IT WOULD USE
+               HUNGARIAN NOTATION */
+            spin_lock_bh(&list_lock);
+
+            while (list) {
+                    struct foo *next = list->next;
+                    del_timer(&list->timer);
+                    kfree(list);
+                    list = next;
+            }
+
+            spin_unlock_bh(&list_lock);
+
+
+Sooner or later, this will crash on SMP, because a timer can have just
+gone off before the :c:func:`spin_lock_bh()`, and it will only get
+the lock after we :c:func:`spin_unlock_bh()`, and then try to free
+the element (which has already been freed!).
+
+This can be avoided by checking the result of
+:c:func:`del_timer()`: if it returns 1, the timer has been deleted.
+If 0, it means (in this case) that it is currently running, so we can
+do::
+
+            retry:
+                    spin_lock_bh(&list_lock);
+
+                    while (list) {
+                            struct foo *next = list->next;
+                            if (!del_timer(&list->timer)) {
+                                    /* Give timer a chance to delete this */
+                                    spin_unlock_bh(&list_lock);
+                                    goto retry;
+                            }
+                            kfree(list);
+                            list = next;
+                    }
+
+                    spin_unlock_bh(&list_lock);
+
+
+Another common problem is deleting timers which restart themselves (by
+calling :c:func:`add_timer()` at the end of their timer function).
+Because this is a fairly common case which is prone to races, you should
+use :c:func:`del_timer_sync()` (``include/linux/timer.h``) to
+handle this case. It returns the number of times the timer had to be
+deleted before we finally stopped it from adding itself back in.
+
+Locking Speed
+=============
+
+There are three main things to worry about when considering speed of
+some code which does locking. First is concurrency: how many things are
+going to be waiting while someone else is holding a lock. Second is the
+time taken to actually acquire and release an uncontended lock. Third is
+using fewer, or smarter locks. I'm assuming that the lock is used fairly
+often: otherwise, you wouldn't be concerned about efficiency.
+
+Concurrency depends on how long the lock is usually held: you should
+hold the lock for as long as needed, but no longer. In the cache
+example, we always create the object without the lock held, and then
+grab the lock only when we are ready to insert it in the list.
+
+Acquisition times depend on how much damage the lock operations do to
+the pipeline (pipeline stalls) and how likely it is that this CPU was
+the last one to grab the lock (ie. is the lock cache-hot for this CPU):
+on a machine with more CPUs, this likelihood drops fast. Consider a
+700MHz Intel Pentium III: an instruction takes about 0.7ns, an atomic
+increment takes about 58ns, a lock which is cache-hot on this CPU takes
+160ns, and a cacheline transfer from another CPU takes an additional 170
+to 360ns. (These figures from Paul McKenney's `Linux Journal RCU
+article <http://www.linuxjournal.com/article.php?sid=6993>`__).
+
+These two aims conflict: holding a lock for a short time might be done
+by splitting locks into parts (such as in our final per-object-lock
+example), but this increases the number of lock acquisitions, and the
+results are often slower than having a single lock. This is another
+reason to advocate locking simplicity.
+
+The third concern is addressed below: there are some methods to reduce
+the amount of locking which needs to be done.
+
+Read/Write Lock Variants
+------------------------
+
+Both spinlocks and mutexes have read/write variants: ``rwlock_t`` and
+:c:type:`struct rw_semaphore <rw_semaphore>`. These divide
+users into two classes: the readers and the writers. If you are only
+reading the data, you can get a read lock, but to write to the data you
+need the write lock. Many people can hold a read lock, but a writer must
+be sole holder.
+
+If your code divides neatly along reader/writer lines (as our cache code
+does), and the lock is held by readers for significant lengths of time,
+using these locks can help. They are slightly slower than the normal
+locks though, so in practice ``rwlock_t`` is not usually worthwhile.
+
+Avoiding Locks: Read Copy Update
+--------------------------------
+
+There is a special method of read/write locking called Read Copy Update.
+Using RCU, the readers can avoid taking a lock altogether: as we expect
+our cache to be read more often than updated (otherwise the cache is a
+waste of time), it is a candidate for this optimization.
+
+How do we get rid of read locks? Getting rid of read locks means that
+writers may be changing the list underneath the readers. That is
+actually quite simple: we can read a linked list while an element is
+being added if the writer adds the element very carefully. For example,
+adding ``new`` to a single linked list called ``list``::
+
+            new->next = list->next;
+            wmb();
+            list->next = new;
+
+
+The :c:func:`wmb()` is a write memory barrier. It ensures that the
+first operation (setting the new element's ``next`` pointer) is complete
+and will be seen by all CPUs, before the second operation is (putting
+the new element into the list). This is important, since modern
+compilers and modern CPUs can both reorder instructions unless told
+otherwise: we want a reader to either not see the new element at all, or
+see the new element with the ``next`` pointer correctly pointing at the
+rest of the list.
+
+Fortunately, there is a function to do this for standard
+:c:type:`struct list_head <list_head>` lists:
+:c:func:`list_add_rcu()` (``include/linux/list.h``).
+
+Removing an element from the list is even simpler: we replace the
+pointer to the old element with a pointer to its successor, and readers
+will either see it, or skip over it.
+
+::
+
+            list->next = old->next;
+
+
+There is :c:func:`list_del_rcu()` (``include/linux/list.h``) which
+does this (the normal version poisons the old object, which we don't
+want).
+
+The reader must also be careful: some CPUs can look through the ``next``
+pointer to start reading the contents of the next element early, but
+don't realize that the pre-fetched contents is wrong when the ``next``
+pointer changes underneath them. Once again, there is a
+:c:func:`list_for_each_entry_rcu()` (``include/linux/list.h``)
+to help you. Of course, writers can just use
+:c:func:`list_for_each_entry()`, since there cannot be two
+simultaneous writers.
+
+Our final dilemma is this: when can we actually destroy the removed
+element? Remember, a reader might be stepping through this element in
+the list right now: if we free this element and the ``next`` pointer
+changes, the reader will jump off into garbage and crash. We need to
+wait until we know that all the readers who were traversing the list
+when we deleted the element are finished. We use
+:c:func:`call_rcu()` to register a callback which will actually
+destroy the object once all pre-existing readers are finished.
+Alternatively, :c:func:`synchronize_rcu()` may be used to block
+until all pre-existing are finished.
+
+But how does Read Copy Update know when the readers are finished? The
+method is this: firstly, the readers always traverse the list inside
+:c:func:`rcu_read_lock()`/:c:func:`rcu_read_unlock()` pairs:
+these simply disable preemption so the reader won't go to sleep while
+reading the list.
+
+RCU then waits until every other CPU has slept at least once: since
+readers cannot sleep, we know that any readers which were traversing the
+list during the deletion are finished, and the callback is triggered.
+The real Read Copy Update code is a little more optimized than this, but
+this is the fundamental idea.
+
+::
+
+    --- cache.c.perobjectlock   2003-12-11 17:15:03.000000000 +1100
+    +++ cache.c.rcupdate    2003-12-11 17:55:14.000000000 +1100
+    @@ -1,15 +1,18 @@
+     #include <linux/list.h>
+     #include <linux/slab.h>
+     #include <linux/string.h>
+    +#include <linux/rcupdate.h>
+     #include <linux/mutex.h>
+     #include <asm/errno.h>
+
+     struct object
+     {
+    -        /* These two protected by cache_lock. */
+    +        /* This is protected by RCU */
+             struct list_head list;
+             int popularity;
+
+    +        struct rcu_head rcu;
+    +
+             atomic_t refcnt;
+
+             /* Doesn't change once created. */
+    @@ -40,7 +43,7 @@
+     {
+             struct object *i;
+
+    -        list_for_each_entry(i, &cache, list) {
+    +        list_for_each_entry_rcu(i, &cache, list) {
+                     if (i->id == id) {
+                             i->popularity++;
+                             return i;
+    @@ -49,19 +52,25 @@
+             return NULL;
+     }
+
+    +/* Final discard done once we know no readers are looking. */
+    +static void cache_delete_rcu(void *arg)
+    +{
+    +        object_put(arg);
+    +}
+    +
+     /* Must be holding cache_lock */
+     static void __cache_delete(struct object *obj)
+     {
+             BUG_ON(!obj);
+    -        list_del(&obj->list);
+    -        object_put(obj);
+    +        list_del_rcu(&obj->list);
+             cache_num--;
+    +        call_rcu(&obj->rcu, cache_delete_rcu);
+     }
+
+     /* Must be holding cache_lock */
+     static void __cache_add(struct object *obj)
+     {
+    -        list_add(&obj->list, &cache);
+    +        list_add_rcu(&obj->list, &cache);
+             if (++cache_num > MAX_CACHE_SIZE) {
+                     struct object *i, *outcast = NULL;
+                     list_for_each_entry(i, &cache, list) {
+    @@ -104,12 +114,11 @@
+     struct object *cache_find(int id)
+     {
+             struct object *obj;
+    -        unsigned long flags;
+
+    -        spin_lock_irqsave(&cache_lock, flags);
+    +        rcu_read_lock();
+             obj = __cache_find(id);
+             if (obj)
+                     object_get(obj);
+    -        spin_unlock_irqrestore(&cache_lock, flags);
+    +        rcu_read_unlock();
+             return obj;
+     }
+
+Note that the reader will alter the popularity member in
+:c:func:`__cache_find()`, and now it doesn't hold a lock. One
+solution would be to make it an ``atomic_t``, but for this usage, we
+don't really care about races: an approximate result is good enough, so
+I didn't change it.
+
+The result is that :c:func:`cache_find()` requires no
+synchronization with any other functions, so is almost as fast on SMP as
+it would be on UP.
+
+There is a further optimization possible here: remember our original
+cache code, where there were no reference counts and the caller simply
+held the lock whenever using the object? This is still possible: if you
+hold the lock, no one can delete the object, so you don't need to get
+and put the reference count.
+
+Now, because the 'read lock' in RCU is simply disabling preemption, a
+caller which always has preemption disabled between calling
+:c:func:`cache_find()` and :c:func:`object_put()` does not
+need to actually get and put the reference count: we could expose
+:c:func:`__cache_find()` by making it non-static, and such
+callers could simply call that.
+
+The benefit here is that the reference count is not written to: the
+object is not altered in any way, which is much faster on SMP machines
+due to caching.
+
+Per-CPU Data
+------------
+
+Another technique for avoiding locking which is used fairly widely is to
+duplicate information for each CPU. For example, if you wanted to keep a
+count of a common condition, you could use a spin lock and a single
+counter. Nice and simple.
+
+If that was too slow (it's usually not, but if you've got a really big
+machine to test on and can show that it is), you could instead use a
+counter for each CPU, then none of them need an exclusive lock. See
+:c:func:`DEFINE_PER_CPU()`, :c:func:`get_cpu_var()` and
+:c:func:`put_cpu_var()` (``include/linux/percpu.h``).
+
+Of particular use for simple per-cpu counters is the ``local_t`` type,
+and the :c:func:`cpu_local_inc()` and related functions, which are
+more efficient than simple code on some architectures
+(``include/asm/local.h``).
+
+Note that there is no simple, reliable way of getting an exact value of
+such a counter, without introducing more locks. This is not a problem
+for some uses.
+
+Data Which Mostly Used By An IRQ Handler
+----------------------------------------
+
+If data is always accessed from within the same IRQ handler, you don't
+need a lock at all: the kernel already guarantees that the irq handler
+will not run simultaneously on multiple CPUs.
+
+Manfred Spraul points out that you can still do this, even if the data
+is very occasionally accessed in user context or softirqs/tasklets. The
+irq handler doesn't use a lock, and all other accesses are done as so::
+
+        spin_lock(&lock);
+        disable_irq(irq);
+        ...
+        enable_irq(irq);
+        spin_unlock(&lock);
+
+The :c:func:`disable_irq()` prevents the irq handler from running
+(and waits for it to finish if it's currently running on other CPUs).
+The spinlock prevents any other accesses happening at the same time.
+Naturally, this is slower than just a :c:func:`spin_lock_irq()`
+call, so it only makes sense if this type of access happens extremely
+rarely.
+
+What Functions Are Safe To Call From Interrupts?
+================================================
+
+Many functions in the kernel sleep (ie. call schedule()) directly or
+indirectly: you can never call them while holding a spinlock, or with
+preemption disabled. This also means you need to be in user context:
+calling them from an interrupt is illegal.
+
+Some Functions Which Sleep
+--------------------------
+
+The most common ones are listed below, but you usually have to read the
+code to find out if other calls are safe. If everyone else who calls it
+can sleep, you probably need to be able to sleep, too. In particular,
+registration and deregistration functions usually expect to be called
+from user context, and can sleep.
+
+-  Accesses to userspace:
+
+   -  :c:func:`copy_from_user()`
+
+   -  :c:func:`copy_to_user()`
+
+   -  :c:func:`get_user()`
+
+   -  :c:func:`put_user()`
+
+-  :c:func:`kmalloc(GFP_KERNEL) <kmalloc>`
+
+-  :c:func:`mutex_lock_interruptible()` and
+   :c:func:`mutex_lock()`
+
+   There is a :c:func:`mutex_trylock()` which does not sleep.
+   Still, it must not be used inside interrupt context since its
+   implementation is not safe for that. :c:func:`mutex_unlock()`
+   will also never sleep. It cannot be used in interrupt context either
+   since a mutex must be released by the same task that acquired it.
+
+Some Functions Which Don't Sleep
+--------------------------------
+
+Some functions are safe to call from any context, or holding almost any
+lock.
+
+-  :c:func:`printk()`
+
+-  :c:func:`kfree()`
+
+-  :c:func:`add_timer()` and :c:func:`del_timer()`
+
+Mutex API reference
+===================
+
+.. kernel-doc:: include/linux/mutex.h
+   :internal:
+
+.. kernel-doc:: kernel/locking/mutex.c
+   :export:
+
+Futex API reference
+===================
+
+.. kernel-doc:: kernel/futex.c
+   :internal:
+
+Further reading
+===============
+
+-  ``Documentation/locking/spinlocks.txt``: Linus Torvalds' spinlocking
+   tutorial in the kernel sources.
+
+-  Unix Systems for Modern Architectures: Symmetric Multiprocessing and
+   Caching for Kernel Programmers:
+
+   Curt Schimmel's very good introduction to kernel level locking (not
+   written for Linux, but nearly everything applies). The book is
+   expensive, but really worth every penny to understand SMP locking.
+   [ISBN: 0201633388]
+
+Thanks
+======
+
+Thanks to Telsa Gwynne for DocBooking, neatening and adding style.
+
+Thanks to Martin Pool, Philipp Rumpf, Stephen Rothwell, Paul Mackerras,
+Ruedi Aschwanden, Alan Cox, Manfred Spraul, Tim Waugh, Pete Zaitcev,
+James Morris, Robert Love, Paul McKenney, John Ashby for proofreading,
+correcting, flaming, commenting.
+
+Thanks to the cabal for having no influence on this document.
+
+Glossary
+========
+
+preemption
+  Prior to 2.5, or when ``CONFIG_PREEMPT`` is unset, processes in user
+  context inside the kernel would not preempt each other (ie. you had that
+  CPU until you gave it up, except for interrupts). With the addition of
+  ``CONFIG_PREEMPT`` in 2.5.4, this changed: when in user context, higher
+  priority tasks can "cut in": spinlocks were changed to disable
+  preemption, even on UP.
+
+bh
+  Bottom Half: for historical reasons, functions with '_bh' in them often
+  now refer to any software interrupt, e.g. :c:func:`spin_lock_bh()`
+  blocks any software interrupt on the current CPU. Bottom halves are
+  deprecated, and will eventually be replaced by tasklets. Only one bottom
+  half will be running at any time.
+
+Hardware Interrupt / Hardware IRQ
+  Hardware interrupt request. :c:func:`in_irq()` returns true in a
+  hardware interrupt handler.
+
+Interrupt Context
+  Not user context: processing a hardware irq or software irq. Indicated
+  by the :c:func:`in_interrupt()` macro returning true.
+
+SMP
+  Symmetric Multi-Processor: kernels compiled for multiple-CPU machines.
+  (``CONFIG_SMP=y``).
+
+Software Interrupt / softirq
+  Software interrupt handler. :c:func:`in_irq()` returns false;
+  :c:func:`in_softirq()` returns true. Tasklets and softirqs both
+  fall into the category of 'software interrupts'.
+
+  Strictly speaking a softirq is one of up to 32 enumerated software
+  interrupts which can run on multiple CPUs at once. Sometimes used to
+  refer to tasklets as well (ie. all software interrupts).
+
+tasklet
+  A dynamically-registrable software interrupt, which is guaranteed to
+  only run on one CPU at a time.
+
+timer
+  A dynamically-registrable software interrupt, which is run at (or close
+  to) a given time. When running, it is just like a tasklet (in fact, they
+  are called from the ``TIMER_SOFTIRQ``).
+
+UP
+  Uni-Processor: Non-SMP. (``CONFIG_SMP=n``).
+
+User Context
+  The kernel executing on behalf of a particular process (ie. a system
+  call or trap) or kernel thread. You can tell which process with the
+  ``current`` macro.) Not to be confused with userspace. Can be
+  interrupted by software or hardware interrupts.
+
+Userspace
+  A process executing its own code outside the kernel.
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
index df31e30..2cb7dc5 100644
--- a/Documentation/kernel-per-CPU-kthreads.txt
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -109,13 +109,12 @@
 	on that CPU.  If a thread that expects to run on the de-jittered
 	CPU awakens, the scheduler will send an IPI that can result in
 	a subsequent SCHED_SOFTIRQ.
-2.	Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
-	CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU
-	to be de-jittered is marked as an adaptive-ticks CPU using the
-	"nohz_full=" boot parameter.  This reduces the number of
-	scheduler-clock interrupts that the de-jittered CPU receives,
-	minimizing its chances of being selected to do the load balancing
-	work that runs in SCHED_SOFTIRQ context.
+2.	CONFIG_NO_HZ_FULL=y and ensure that the CPU to be de-jittered
+	is marked as an adaptive-ticks CPU using the "nohz_full="
+	boot parameter.  This reduces the number of scheduler-clock
+	interrupts that the de-jittered CPU receives, minimizing its
+	chances of being selected to do the load balancing work that
+	runs in SCHED_SOFTIRQ context.
 3.	To the extent possible, keep the CPU out of the kernel when it
 	is non-idle, for example, by avoiding system calls and by
 	forcing both kernel threads and interrupts to execute elsewhere.
@@ -135,11 +134,10 @@
 RCU_SOFTIRQ:  Do at least one of the following:
 1.	Offload callbacks and keep the CPU in either dyntick-idle or
 	adaptive-ticks state by doing all of the following:
-	a.	Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
-		CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU
-		to be de-jittered is marked as an adaptive-ticks CPU using
-		the "nohz_full=" boot parameter.  Bind the rcuo kthreads
-		to housekeeping CPUs, which can tolerate OS jitter.
+	a.	CONFIG_NO_HZ_FULL=y and ensure that the CPU to be
+		de-jittered is marked as an adaptive-ticks CPU using the
+		"nohz_full=" boot parameter.  Bind the rcuo kthreads to
+		housekeeping CPUs, which can tolerate OS jitter.
 	b.	To the extent possible, keep the CPU out of the kernel
 		when it is non-idle, for example, by avoiding system
 		calls and by forcing both kernel threads and interrupts
@@ -236,11 +234,10 @@
 	is feasible only if your workload never requires RCU priority
 	boosting, for example, if you ensure frequent idle time on all
 	CPUs that might execute within the kernel.
-3.	Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y,
-	which offloads all RCU callbacks to kthreads that can be moved
-	off of CPUs susceptible to OS jitter.  This approach prevents the
-	rcuc/%u kthreads from having any work to do, so that they are
-	never awakened.
+3.	Build with CONFIG_RCU_NOCB_CPU=y and boot with the rcu_nocbs=
+	boot parameter offloading RCU callbacks from all CPUs susceptible
+	to OS jitter.  This approach prevents the rcuc/%u kthreads from
+	having any work to do, so that they are never awakened.
 4.	Ensure that the CPU never enters the kernel, and, in particular,
 	avoid initiating any CPU hotplug operations on this CPU.  This is
 	another way of preventing any callbacks from being queued on the
diff --git a/Documentation/lsm.txt b/Documentation/lsm.txt
new file mode 100644
index 0000000..ad4dfd0
--- /dev/null
+++ b/Documentation/lsm.txt
@@ -0,0 +1,201 @@
+========================================================
+Linux Security Modules: General Security Hooks for Linux
+========================================================
+
+:Author: Stephen Smalley
+:Author: Timothy Fraser
+:Author: Chris Vance
+
+.. note::
+
+   The APIs described in this book are outdated.
+
+Introduction
+============
+
+In March 2001, the National Security Agency (NSA) gave a presentation
+about Security-Enhanced Linux (SELinux) at the 2.5 Linux Kernel Summit.
+SELinux is an implementation of flexible and fine-grained
+nondiscretionary access controls in the Linux kernel, originally
+implemented as its own particular kernel patch. Several other security
+projects (e.g. RSBAC, Medusa) have also developed flexible access
+control architectures for the Linux kernel, and various projects have
+developed particular access control models for Linux (e.g. LIDS, DTE,
+SubDomain). Each project has developed and maintained its own kernel
+patch to support its security needs.
+
+In response to the NSA presentation, Linus Torvalds made a set of
+remarks that described a security framework he would be willing to
+consider for inclusion in the mainstream Linux kernel. He described a
+general framework that would provide a set of security hooks to control
+operations on kernel objects and a set of opaque security fields in
+kernel data structures for maintaining security attributes. This
+framework could then be used by loadable kernel modules to implement any
+desired model of security. Linus also suggested the possibility of
+migrating the Linux capabilities code into such a module.
+
+The Linux Security Modules (LSM) project was started by WireX to develop
+such a framework. LSM is a joint development effort by several security
+projects, including Immunix, SELinux, SGI and Janus, and several
+individuals, including Greg Kroah-Hartman and James Morris, to develop a
+Linux kernel patch that implements this framework. The patch is
+currently tracking the 2.4 series and is targeted for integration into
+the 2.5 development series. This technical report provides an overview
+of the framework and the example capabilities security module provided
+by the LSM kernel patch.
+
+LSM Framework
+=============
+
+The LSM kernel patch provides a general kernel framework to support
+security modules. In particular, the LSM framework is primarily focused
+on supporting access control modules, although future development is
+likely to address other security needs such as auditing. By itself, the
+framework does not provide any additional security; it merely provides
+the infrastructure to support security modules. The LSM kernel patch
+also moves most of the capabilities logic into an optional security
+module, with the system defaulting to the traditional superuser logic.
+This capabilities module is discussed further in
+`LSM Capabilities Module <#cap>`__.
+
+The LSM kernel patch adds security fields to kernel data structures and
+inserts calls to hook functions at critical points in the kernel code to
+manage the security fields and to perform access control. It also adds
+functions for registering and unregistering security modules, and adds a
+general :c:func:`security()` system call to support new system calls
+for security-aware applications.
+
+The LSM security fields are simply ``void*`` pointers. For process and
+program execution security information, security fields were added to
+:c:type:`struct task_struct <task_struct>` and
+:c:type:`struct linux_binprm <linux_binprm>`. For filesystem
+security information, a security field was added to :c:type:`struct
+super_block <super_block>`. For pipe, file, and socket security
+information, security fields were added to :c:type:`struct inode
+<inode>` and :c:type:`struct file <file>`. For packet and
+network device security information, security fields were added to
+:c:type:`struct sk_buff <sk_buff>` and :c:type:`struct
+net_device <net_device>`. For System V IPC security information,
+security fields were added to :c:type:`struct kern_ipc_perm
+<kern_ipc_perm>` and :c:type:`struct msg_msg
+<msg_msg>`; additionally, the definitions for :c:type:`struct
+msg_msg <msg_msg>`, struct msg_queue, and struct shmid_kernel
+were moved to header files (``include/linux/msg.h`` and
+``include/linux/shm.h`` as appropriate) to allow the security modules to
+use these definitions.
+
+Each LSM hook is a function pointer in a global table, security_ops.
+This table is a :c:type:`struct security_operations
+<security_operations>` structure as defined by
+``include/linux/security.h``. Detailed documentation for each hook is
+included in this header file. At present, this structure consists of a
+collection of substructures that group related hooks based on the kernel
+object (e.g. task, inode, file, sk_buff, etc) as well as some top-level
+hook function pointers for system operations. This structure is likely
+to be flattened in the future for performance. The placement of the hook
+calls in the kernel code is described by the "called:" lines in the
+per-hook documentation in the header file. The hook calls can also be
+easily found in the kernel code by looking for the string
+"security_ops->".
+
+Linus mentioned per-process security hooks in his original remarks as a
+possible alternative to global security hooks. However, if LSM were to
+start from the perspective of per-process hooks, then the base framework
+would have to deal with how to handle operations that involve multiple
+processes (e.g. kill), since each process might have its own hook for
+controlling the operation. This would require a general mechanism for
+composing hooks in the base framework. Additionally, LSM would still
+need global hooks for operations that have no process context (e.g.
+network input operations). Consequently, LSM provides global security
+hooks, but a security module is free to implement per-process hooks
+(where that makes sense) by storing a security_ops table in each
+process' security field and then invoking these per-process hooks from
+the global hooks. The problem of composition is thus deferred to the
+module.
+
+The global security_ops table is initialized to a set of hook functions
+provided by a dummy security module that provides traditional superuser
+logic. A :c:func:`register_security()` function (in
+``security/security.c``) is provided to allow a security module to set
+security_ops to refer to its own hook functions, and an
+:c:func:`unregister_security()` function is provided to revert
+security_ops to the dummy module hooks. This mechanism is used to set
+the primary security module, which is responsible for making the final
+decision for each hook.
+
+LSM also provides a simple mechanism for stacking additional security
+modules with the primary security module. It defines
+:c:func:`register_security()` and
+:c:func:`unregister_security()` hooks in the :c:type:`struct
+security_operations <security_operations>` structure and
+provides :c:func:`mod_reg_security()` and
+:c:func:`mod_unreg_security()` functions that invoke these hooks
+after performing some sanity checking. A security module can call these
+functions in order to stack with other modules. However, the actual
+details of how this stacking is handled are deferred to the module,
+which can implement these hooks in any way it wishes (including always
+returning an error if it does not wish to support stacking). In this
+manner, LSM again defers the problem of composition to the module.
+
+Although the LSM hooks are organized into substructures based on kernel
+object, all of the hooks can be viewed as falling into two major
+categories: hooks that are used to manage the security fields and hooks
+that are used to perform access control. Examples of the first category
+of hooks include the :c:func:`alloc_security()` and
+:c:func:`free_security()` hooks defined for each kernel data
+structure that has a security field. These hooks are used to allocate
+and free security structures for kernel objects. The first category of
+hooks also includes hooks that set information in the security field
+after allocation, such as the :c:func:`post_lookup()` hook in
+:c:type:`struct inode_security_ops <inode_security_ops>`.
+This hook is used to set security information for inodes after
+successful lookup operations. An example of the second category of hooks
+is the :c:func:`permission()` hook in :c:type:`struct
+inode_security_ops <inode_security_ops>`. This hook checks
+permission when accessing an inode.
+
+LSM Capabilities Module
+=======================
+
+The LSM kernel patch moves most of the existing POSIX.1e capabilities
+logic into an optional security module stored in the file
+``security/capability.c``. This change allows users who do not want to
+use capabilities to omit this code entirely from their kernel, instead
+using the dummy module for traditional superuser logic or any other
+module that they desire. This change also allows the developers of the
+capabilities logic to maintain and enhance their code more freely,
+without needing to integrate patches back into the base kernel.
+
+In addition to moving the capabilities logic, the LSM kernel patch could
+move the capability-related fields from the kernel data structures into
+the new security fields managed by the security modules. However, at
+present, the LSM kernel patch leaves the capability fields in the kernel
+data structures. In his original remarks, Linus suggested that this
+might be preferable so that other security modules can be easily stacked
+with the capabilities module without needing to chain multiple security
+structures on the security field. It also avoids imposing extra overhead
+on the capabilities module to manage the security fields. However, the
+LSM framework could certainly support such a move if it is determined to
+be desirable, with only a few additional changes described below.
+
+At present, the capabilities logic for computing process capabilities on
+:c:func:`execve()` and :c:func:`set\*uid()`, checking
+capabilities for a particular process, saving and checking capabilities
+for netlink messages, and handling the :c:func:`capget()` and
+:c:func:`capset()` system calls have been moved into the
+capabilities module. There are still a few locations in the base kernel
+where capability-related fields are directly examined or modified, but
+the current version of the LSM patch does allow a security module to
+completely replace the assignment and testing of capabilities. These few
+locations would need to be changed if the capability-related fields were
+moved into the security field. The following is a list of known
+locations that still perform such direct examination or modification of
+capability-related fields:
+
+-  ``fs/open.c``::c:func:`sys_access()`
+
+-  ``fs/lockd/host.c``::c:func:`nlm_bind_host()`
+
+-  ``fs/nfsd/auth.c``::c:func:`nfsd_setuser()`
+
+-  ``fs/proc/array.c``::c:func:`task_cap()`
diff --git a/Documentation/media/uapi/v4l/vidioc-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
index deb1f6f..b80d85c 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-selection.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
@@ -129,8 +129,8 @@
 
 .. _sel-const-adjust:
 
-.. figure::  constraints.*
-    :alt:    constraints.pdf / constraints.svg
+.. kernel-figure::  constraints.svg
+    :alt:    constraints.svg
     :align:  center
 
     Size adjustments with constraint flags.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 732f10e..c239a0c 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -27,7 +27,7 @@
  (2) to provide a guide as to how to use the barriers that are available.
 
 Note that an architecture can provide more than the minimum requirement
-for any particular barrier, but if the architecure provides less than
+for any particular barrier, but if the architecture provides less than
 that, that architecture is incorrect.
 
 Note also that it is possible that a barrier may be a no-op for an
@@ -498,11 +498,11 @@
      This means that ACQUIRE acts as a minimal "acquire" operation and
      RELEASE acts as a minimal "release" operation.
 
-A subset of the atomic operations described in atomic_ops.txt have ACQUIRE
-and RELEASE variants in addition to fully-ordered and relaxed (no barrier
-semantics) definitions.  For compound atomics performing both a load and a
-store, ACQUIRE semantics apply only to the load and RELEASE semantics apply
-only to the store portion of the operation.
+A subset of the atomic operations described in core-api/atomic_ops.rst have
+ACQUIRE and RELEASE variants in addition to fully-ordered and relaxed (no
+barrier semantics) definitions.  For compound atomics performing both a load
+and a store, ACQUIRE semantics apply only to the load and RELEASE semantics
+apply only to the store portion of the operation.
 
 Memory barriers are only required where there's a possibility of interaction
 between two CPUs or between a CPU and a device.  If it can be guaranteed that
diff --git a/Documentation/networking/conf.py b/Documentation/networking/conf.py
new file mode 100644
index 0000000..40f69e6
--- /dev/null
+++ b/Documentation/networking/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "Linux Networking Documentation"
+
+tags.add("subproject")
+
+latex_documents = [
+    ('index', 'networking.tex', project,
+     'The kernel development community', 'manual'),
+]
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt
index d86adcd..eaa8f9a 100644
--- a/Documentation/networking/dns_resolver.txt
+++ b/Documentation/networking/dns_resolver.txt
@@ -143,7 +143,7 @@
 dns_query() returns a copy of the value attached to the key, or an error if
 that is indicated instead.
 
-See <file:Documentation/security/keys-request-key.txt> for further
+See <file:Documentation/security/keys/request-key.rst> for further
 information about request-key function.
 
 
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt
new file mode 100644
index 0000000..76e016d
--- /dev/null
+++ b/Documentation/networking/dpaa.txt
@@ -0,0 +1,194 @@
+The QorIQ DPAA Ethernet Driver
+==============================
+
+Authors:
+Madalin Bucur <madalin.bucur@nxp.com>
+Camelia Groza <camelia.groza@nxp.com>
+
+Contents
+========
+
+	- DPAA Ethernet Overview
+	- DPAA Ethernet Supported SoCs
+	- Configuring DPAA Ethernet in your kernel
+	- DPAA Ethernet Frame Processing
+	- DPAA Ethernet Features
+	- Debugging
+
+DPAA Ethernet Overview
+======================
+
+DPAA stands for Data Path Acceleration Architecture and it is a
+set of networking acceleration IPs that are available on several
+generations of SoCs, both on PowerPC and ARM64.
+
+The Freescale DPAA architecture consists of a series of hardware blocks
+that support Ethernet connectivity. The Ethernet driver depends upon the
+following drivers in the Linux kernel:
+
+ - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
+    drivers/iommu/fsl_*
+ - Frame Manager (FMan)
+    drivers/net/ethernet/freescale/fman
+ - Queue Manager (QMan), Buffer Manager (BMan)
+    drivers/soc/fsl/qbman
+
+A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
+
+  dpaa_eth       /eth0\     ...       /ethN\
+  driver        |      |             |      |
+  -------------   ----   -----------   ----   -------------
+       -Ports  / Tx  Rx \    ...    / Tx  Rx \
+  FMan        |          |         |          |
+       -MACs  |   MAC0   |         |   MACN   |
+             /   dtsec0   \  ...  /   dtsecN   \ (or tgec)
+            /              \     /              \(or memac)
+  ---------  --------------  ---  --------------  ---------
+      FMan, FMan Port, FMan SP, FMan MURAM drivers
+  ---------------------------------------------------------
+      FMan HW blocks: MURAM, MACs, Ports, SP
+  ---------------------------------------------------------
+
+The dpaa_eth relation to the QMan, BMan and FMan:
+              ________________________________
+  dpaa_eth   /            eth0                \
+  driver    /                                  \
+  ---------   -^-   -^-   -^-   ---    ---------
+  QMan driver / \   / \   / \  \   /  | BMan    |
+             |Rx | |Rx | |Tx | |Tx |  | driver  |
+  ---------  |Dfl| |Err| |Cnf| |FQs|  |         |
+  QMan HW    |FQ | |FQ | |FQs| |   |  |         |
+             /   \ /   \ /   \  \ /   |         |
+  ---------   ---   ---   ---   -v-    ---------
+            |        FMan QMI         |         |
+            | FMan HW       FMan BMI  | BMan HW |
+              -----------------------   --------
+
+where the acronyms used above (and in the code) are:
+DPAA = Data Path Acceleration Architecture
+FMan = DPAA Frame Manager
+QMan = DPAA Queue Manager
+BMan = DPAA Buffers Manager
+QMI = QMan interface in FMan
+BMI = BMan interface in FMan
+FMan SP = FMan Storage Profiles
+MURAM = Multi-user RAM in FMan
+FQ = QMan Frame Queue
+Rx Dfl FQ = default reception FQ
+Rx Err FQ = Rx error frames FQ
+Tx Cnf FQ = Tx confirmation FQs
+Tx FQs = transmission frame queues
+dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
+tgec = ten gigabit Ethernet controller (10 Gbps)
+memac = multirate Ethernet MAC (10/100/1000/10000)
+
+DPAA Ethernet Supported SoCs
+============================
+
+The DPAA drivers enable the Ethernet controllers present on the following SoCs:
+
+# PPC
+P1023
+P2041
+P3041
+P4080
+P5020
+P5040
+T1023
+T1024
+T1040
+T1042
+T2080
+T4240
+B4860
+
+# ARM
+LS1043A
+LS1046A
+
+Configuring DPAA Ethernet in your kernel
+========================================
+
+To enable the DPAA Ethernet driver, the following Kconfig options are required:
+
+# common for arch/arm64 and arch/powerpc platforms
+CONFIG_FSL_DPAA=y
+CONFIG_FSL_FMAN=y
+CONFIG_FSL_DPAA_ETH=y
+CONFIG_FSL_XGMAC_MDIO=y
+
+# for arch/powerpc only
+CONFIG_FSL_PAMU=y
+
+# common options needed for the PHYs used on the RDBs
+CONFIG_VITESSE_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_AQUANTIA_PHY=y
+
+DPAA Ethernet Frame Processing
+==============================
+
+On Rx, buffers for the incoming frames are retrieved from one of the three
+existing buffers pools. The driver initializes and seeds these, each with
+buffers of different sizes: 1KB, 2KB and 4KB.
+
+On Tx, all transmitted frames are returned to the driver through Tx
+confirmation frame queues. The driver is then responsible for freeing the
+buffers. In order to do this properly, a backpointer is added to the buffer
+before transmission that points to the skb. When the buffer returns to the
+driver on a confirmation FQ, the skb can be correctly consumed.
+
+DPAA Ethernet Features
+======================
+
+Currently the DPAA Ethernet driver enables the basic features required for
+a Linux Ethernet driver. The support for advanced features will be added
+gradually.
+
+The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
+checksum offload feature is enabled by default and cannot be controlled through
+ethtool.
+
+The driver has support for multiple prioritized Tx traffic classes. Priorities
+range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
+strict priority levels. Each traffic class contains NR_CPU TX queues. By
+default, only one traffic class is enabled and the lowest priority Tx queues
+are used. Higher priority traffic classes can be enabled with the mqprio
+qdisc. For example, all four traffic classes are enabled on an interface with
+the following command. Furthermore, skb priority levels are mapped to traffic
+classes as follows:
+
+	* priorities 0 to 3 - traffic class 0 (low priority)
+	* priorities 4 to 7 - traffic class 1 (medium-low priority)
+	* priorities 8 to 11 - traffic class 2 (medium-high priority)
+	* priorities 12 to 15 - traffic class 3 (high priority)
+
+tc qdisc add dev <int> root handle 1: \
+	 mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
+
+Debugging
+=========
+
+The following statistics are exported for each interface through ethtool:
+
+	- interrupt count per CPU
+	- Rx packets count per CPU
+	- Tx packets count per CPU
+	- Tx confirmed packets count per CPU
+	- Tx S/G frames count per CPU
+	- Tx error count per CPU
+	- Rx error count per CPU
+	- Rx error count per type
+	- congestion related statistics:
+		- congestion status
+		- time spent in congestion
+		- number of time the device entered congestion
+		- dropped packets count per cause
+
+The driver also exports the following information in sysfs:
+
+	- the FQ IDs for each FQ type
+	/sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
+
+	- the IDs of the buffer pools in use
+	/sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
new file mode 100644
index 0000000..b5bd87e
--- /dev/null
+++ b/Documentation/networking/index.rst
@@ -0,0 +1,18 @@
+Linux Networking Documentation
+==============================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   kapi
+   z8530book
+
+.. only::  subproject
+
+   Indices
+   =======
+
+   * :ref:`genindex`
+
diff --git a/Documentation/networking/kapi.rst b/Documentation/networking/kapi.rst
new file mode 100644
index 0000000..580289f
--- /dev/null
+++ b/Documentation/networking/kapi.rst
@@ -0,0 +1,147 @@
+=========================================
+Linux Networking and Network Devices APIs
+=========================================
+
+Linux Networking
+================
+
+Networking Base Types
+---------------------
+
+.. kernel-doc:: include/linux/net.h
+   :internal:
+
+Socket Buffer Functions
+-----------------------
+
+.. kernel-doc:: include/linux/skbuff.h
+   :internal:
+
+.. kernel-doc:: include/net/sock.h
+   :internal:
+
+.. kernel-doc:: net/socket.c
+   :export:
+
+.. kernel-doc:: net/core/skbuff.c
+   :export:
+
+.. kernel-doc:: net/core/sock.c
+   :export:
+
+.. kernel-doc:: net/core/datagram.c
+   :export:
+
+.. kernel-doc:: net/core/stream.c
+   :export:
+
+Socket Filter
+-------------
+
+.. kernel-doc:: net/core/filter.c
+   :export:
+
+Generic Network Statistics
+--------------------------
+
+.. kernel-doc:: include/uapi/linux/gen_stats.h
+   :internal:
+
+.. kernel-doc:: net/core/gen_stats.c
+   :export:
+
+.. kernel-doc:: net/core/gen_estimator.c
+   :export:
+
+SUN RPC subsystem
+-----------------
+
+.. kernel-doc:: net/sunrpc/xdr.c
+   :export:
+
+.. kernel-doc:: net/sunrpc/svc_xprt.c
+   :export:
+
+.. kernel-doc:: net/sunrpc/xprt.c
+   :export:
+
+.. kernel-doc:: net/sunrpc/sched.c
+   :export:
+
+.. kernel-doc:: net/sunrpc/socklib.c
+   :export:
+
+.. kernel-doc:: net/sunrpc/stats.c
+   :export:
+
+.. kernel-doc:: net/sunrpc/rpc_pipe.c
+   :export:
+
+.. kernel-doc:: net/sunrpc/rpcb_clnt.c
+   :export:
+
+.. kernel-doc:: net/sunrpc/clnt.c
+   :export:
+
+WiMAX
+-----
+
+.. kernel-doc:: net/wimax/op-msg.c
+   :export:
+
+.. kernel-doc:: net/wimax/op-reset.c
+   :export:
+
+.. kernel-doc:: net/wimax/op-rfkill.c
+   :export:
+
+.. kernel-doc:: net/wimax/stack.c
+   :export:
+
+.. kernel-doc:: include/net/wimax.h
+   :internal:
+
+.. kernel-doc:: include/uapi/linux/wimax.h
+   :internal:
+
+Network device support
+======================
+
+Driver Support
+--------------
+
+.. kernel-doc:: net/core/dev.c
+   :export:
+
+.. kernel-doc:: net/ethernet/eth.c
+   :export:
+
+.. kernel-doc:: net/sched/sch_generic.c
+   :export:
+
+.. kernel-doc:: include/linux/etherdevice.h
+   :internal:
+
+.. kernel-doc:: include/linux/netdevice.h
+   :internal:
+
+PHY Support
+-----------
+
+.. kernel-doc:: drivers/net/phy/phy.c
+   :export:
+
+.. kernel-doc:: drivers/net/phy/phy.c
+   :internal:
+
+.. kernel-doc:: drivers/net/phy/phy_device.c
+   :export:
+
+.. kernel-doc:: drivers/net/phy/phy_device.c
+   :internal:
+
+.. kernel-doc:: drivers/net/phy/mdio_bus.c
+   :export:
+
+.. kernel-doc:: drivers/net/phy/mdio_bus.c
+   :internal:
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 59f4db2..f55639d 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -122,7 +122,7 @@
 or will be computed in the stack. Capable hardware can pass the hash in
 the receive descriptor for the packet; this would usually be the same
 hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
-skb->rx_hash and can be used elsewhere in the stack as a hash of the
+skb->hash and can be used elsewhere in the stack as a hash of the
 packet’s flow.
 
 Each receive hardware queue has an associated list of CPUs to which
diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt
index bdc4c0d..9c7139d 100644
--- a/Documentation/networking/tcp.txt
+++ b/Documentation/networking/tcp.txt
@@ -1,7 +1,7 @@
 TCP protocol
 ============
 
-Last updated: 9 February 2008
+Last updated: 3 June 2017
 
 Contents
 ========
@@ -29,18 +29,19 @@
 A congestion control mechanism can be registered through functions in
 tcp_cong.c. The functions used by the congestion control mechanism are
 registered via passing a tcp_congestion_ops struct to
-tcp_register_congestion_control. As a minimum name, ssthresh,
-cong_avoid must be valid.
+tcp_register_congestion_control. As a minimum, the congestion control
+mechanism must provide a valid name and must implement either ssthresh,
+cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
 
 Private data for a congestion control mechanism is stored in tp->ca_priv.
 tcp_ca(tp) returns a pointer to this space.  This is preallocated space - it
 is important to check the size of your private data will fit this space, or
-alternatively space could be allocated elsewhere and a pointer to it could
+alternatively, space could be allocated elsewhere and a pointer to it could
 be stored here.
 
 There are three kinds of congestion control algorithms currently: The
 simplest ones are derived from TCP reno (highspeed, scalable) and just
-provide an alternative the congestion window calculation. More complex
+provide an alternative congestion window calculation. More complex
 ones like BIC try to look at other events to provide better
 heuristics.  There are also round trip time based algorithms like
 Vegas and Westwood+.
@@ -49,21 +50,15 @@
 needs to maintain fairness and performance. Please review current
 research and RFC's before developing new modules.
 
-The method that is used to determine which congestion control mechanism is
-determined by the setting of the sysctl net.ipv4.tcp_congestion_control.
-The default congestion control will be the last one registered (LIFO);
-so if you built everything as modules, the default will be reno. If you
-build with the defaults from Kconfig, then CUBIC will be builtin (not a
-module) and it will end up the default.
+The default congestion control mechanism is chosen based on the
+DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
+value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
+module will be autoloaded if needed and you will get the expected protocol. If
+you ask for an unknown congestion method, then the sysctl attempt will fail.
 
-If you really want a particular default value then you will need
-to set it with the sysctl.  If you use a sysctl, the module will be autoloaded
-if needed and you will get the expected protocol. If you ask for an
-unknown congestion method, then the sysctl attempt will fail.
-
-If you remove a tcp congestion control module, then you will get the next
+If you remove a TCP congestion control module, then you will get the next
 available one. Since reno cannot be built as a module, and cannot be
-deleted, it will always be available.
+removed, it will always be available.
 
 How the new TCP output machine [nyi] works.
 ===========================================
diff --git a/Documentation/networking/z8530book.rst b/Documentation/networking/z8530book.rst
new file mode 100644
index 0000000..fea2c40
--- /dev/null
+++ b/Documentation/networking/z8530book.rst
@@ -0,0 +1,256 @@
+=======================
+Z8530 Programming Guide
+=======================
+
+:Author: Alan Cox
+
+Introduction
+============
+
+The Z85x30 family synchronous/asynchronous controller chips are used on
+a large number of cheap network interface cards. The kernel provides a
+core interface layer that is designed to make it easy to provide WAN
+services using this chip.
+
+The current driver only support synchronous operation. Merging the
+asynchronous driver support into this code to allow any Z85x30 device to
+be used as both a tty interface and as a synchronous controller is a
+project for Linux post the 2.4 release
+
+Driver Modes
+============
+
+The Z85230 driver layer can drive Z8530, Z85C30 and Z85230 devices in
+three different modes. Each mode can be applied to an individual channel
+on the chip (each chip has two channels).
+
+The PIO synchronous mode supports the most common Z8530 wiring. Here the
+chip is interface to the I/O and interrupt facilities of the host
+machine but not to the DMA subsystem. When running PIO the Z8530 has
+extremely tight timing requirements. Doing high speeds, even with a
+Z85230 will be tricky. Typically you should expect to achieve at best
+9600 baud with a Z8C530 and 64Kbits with a Z85230.
+
+The DMA mode supports the chip when it is configured to use dual DMA
+channels on an ISA bus. The better cards tend to support this mode of
+operation for a single channel. With DMA running the Z85230 tops out
+when it starts to hit ISA DMA constraints at about 512Kbits. It is worth
+noting here that many PC machines hang or crash when the chip is driven
+fast enough to hold the ISA bus solid.
+
+Transmit DMA mode uses a single DMA channel. The DMA channel is used for
+transmission as the transmit FIFO is smaller than the receive FIFO. it
+gives better performance than pure PIO mode but is nowhere near as ideal
+as pure DMA mode.
+
+Using the Z85230 driver
+=======================
+
+The Z85230 driver provides the back end interface to your board. To
+configure a Z8530 interface you need to detect the board and to identify
+its ports and interrupt resources. It is also your problem to verify the
+resources are available.
+
+Having identified the chip you need to fill in a struct z8530_dev,
+which describes each chip. This object must exist until you finally
+shutdown the board. Firstly zero the active field. This ensures nothing
+goes off without you intending it. The irq field should be set to the
+interrupt number of the chip. (Each chip has a single interrupt source
+rather than each channel). You are responsible for allocating the
+interrupt line. The interrupt handler should be set to
+:c:func:`z8530_interrupt()`. The device id should be set to the
+z8530_dev structure pointer. Whether the interrupt can be shared or not
+is board dependent, and up to you to initialise.
+
+The structure holds two channel structures. Initialise chanA.ctrlio and
+chanA.dataio with the address of the control and data ports. You can or
+this with Z8530_PORT_SLEEP to indicate your interface needs the 5uS
+delay for chip settling done in software. The PORT_SLEEP option is
+architecture specific. Other flags may become available on future
+platforms, eg for MMIO. Initialise the chanA.irqs to &z8530_nop to
+start the chip up as disabled and discarding interrupt events. This
+ensures that stray interrupts will be mopped up and not hang the bus.
+Set chanA.dev to point to the device structure itself. The private and
+name field you may use as you wish. The private field is unused by the
+Z85230 layer. The name is used for error reporting and it may thus make
+sense to make it match the network name.
+
+Repeat the same operation with the B channel if your chip has both
+channels wired to something useful. This isn't always the case. If it is
+not wired then the I/O values do not matter, but you must initialise
+chanB.dev.
+
+If your board has DMA facilities then initialise the txdma and rxdma
+fields for the relevant channels. You must also allocate the ISA DMA
+channels and do any necessary board level initialisation to configure
+them. The low level driver will do the Z8530 and DMA controller
+programming but not board specific magic.
+
+Having initialised the device you can then call
+:c:func:`z8530_init()`. This will probe the chip and reset it into
+a known state. An identification sequence is then run to identify the
+chip type. If the checks fail to pass the function returns a non zero
+error code. Typically this indicates that the port given is not valid.
+After this call the type field of the z8530_dev structure is
+initialised to either Z8530, Z85C30 or Z85230 according to the chip
+found.
+
+Once you have called z8530_init you can also make use of the utility
+function :c:func:`z8530_describe()`. This provides a consistent
+reporting format for the Z8530 devices, and allows all the drivers to
+provide consistent reporting.
+
+Attaching Network Interfaces
+============================
+
+If you wish to use the network interface facilities of the driver, then
+you need to attach a network device to each channel that is present and
+in use. In addition to use the generic HDLC you need to follow some
+additional plumbing rules. They may seem complex but a look at the
+example hostess_sv11 driver should reassure you.
+
+The network device used for each channel should be pointed to by the
+netdevice field of each channel. The hdlc-> priv field of the network
+device points to your private data - you will need to be able to find
+your private data from this.
+
+The way most drivers approach this particular problem is to create a
+structure holding the Z8530 device definition and put that into the
+private field of the network device. The network device fields of the
+channels then point back to the network devices.
+
+If you wish to use the generic HDLC then you need to register the HDLC
+device.
+
+Before you register your network device you will also need to provide
+suitable handlers for most of the network device callbacks. See the
+network device documentation for more details on this.
+
+Configuring And Activating The Port
+===================================
+
+The Z85230 driver provides helper functions and tables to load the port
+registers on the Z8530 chips. When programming the register settings for
+a channel be aware that the documentation recommends initialisation
+orders. Strange things happen when these are not followed.
+
+:c:func:`z8530_channel_load()` takes an array of pairs of
+initialisation values in an array of u8 type. The first value is the
+Z8530 register number. Add 16 to indicate the alternate register bank on
+the later chips. The array is terminated by a 255.
+
+The driver provides a pair of public tables. The z8530_hdlc_kilostream
+table is for the UK 'Kilostream' service and also happens to cover most
+other end host configurations. The z8530_hdlc_kilostream_85230 table
+is the same configuration using the enhancements of the 85230 chip. The
+configuration loaded is standard NRZ encoded synchronous data with HDLC
+bitstuffing. All of the timing is taken from the other end of the link.
+
+When writing your own tables be aware that the driver internally tracks
+register values. It may need to reload values. You should therefore be
+sure to set registers 1-7, 9-11, 14 and 15 in all configurations. Where
+the register settings depend on DMA selection the driver will update the
+bits itself when you open or close. Loading a new table with the
+interface open is not recommended.
+
+There are three standard configurations supported by the core code. In
+PIO mode the interface is programmed up to use interrupt driven PIO.
+This places high demands on the host processor to avoid latency. The
+driver is written to take account of latency issues but it cannot avoid
+latencies caused by other drivers, notably IDE in PIO mode. Because the
+drivers allocate buffers you must also prevent MTU changes while the
+port is open.
+
+Once the port is open it will call the rx_function of each channel
+whenever a completed packet arrived. This is invoked from interrupt
+context and passes you the channel and a network buffer (struct
+sk_buff) holding the data. The data includes the CRC bytes so most
+users will want to trim the last two bytes before processing the data.
+This function is very timing critical. When you wish to simply discard
+data the support code provides the function
+:c:func:`z8530_null_rx()` to discard the data.
+
+To active PIO mode sending and receiving the ``z8530_sync_open`` is called.
+This expects to be passed the network device and the channel. Typically
+this is called from your network device open callback. On a failure a
+non zero error status is returned.
+The :c:func:`z8530_sync_close()` function shuts down a PIO
+channel. This must be done before the channel is opened again and before
+the driver shuts down and unloads.
+
+The ideal mode of operation is dual channel DMA mode. Here the kernel
+driver will configure the board for DMA in both directions. The driver
+also handles ISA DMA issues such as controller programming and the
+memory range limit for you. This mode is activated by calling the
+:c:func:`z8530_sync_dma_open()` function. On failure a non zero
+error value is returned. Once this mode is activated it can be shut down
+by calling the :c:func:`z8530_sync_dma_close()`. You must call
+the close function matching the open mode you used.
+
+The final supported mode uses a single DMA channel to drive the transmit
+side. As the Z85C30 has a larger FIFO on the receive channel this tends
+to increase the maximum speed a little. This is activated by calling the
+``z8530_sync_txdma_open``. This returns a non zero error code on failure. The
+:c:func:`z8530_sync_txdma_close()` function closes down the Z8530
+interface from this mode.
+
+Network Layer Functions
+=======================
+
+The Z8530 layer provides functions to queue packets for transmission.
+The driver internally buffers the frame currently being transmitted and
+one further frame (in order to keep back to back transmission running).
+Any further buffering is up to the caller.
+
+The function :c:func:`z8530_queue_xmit()` takes a network buffer
+in sk_buff format and queues it for transmission. The caller must
+provide the entire packet with the exception of the bitstuffing and CRC.
+This is normally done by the caller via the generic HDLC interface
+layer. It returns 0 if the buffer has been queued and non zero values
+for queue full. If the function accepts the buffer it becomes property
+of the Z8530 layer and the caller should not free it.
+
+The function :c:func:`z8530_get_stats()` returns a pointer to an
+internally maintained per interface statistics block. This provides most
+of the interface code needed to implement the network layer get_stats
+callback.
+
+Porting The Z8530 Driver
+========================
+
+The Z8530 driver is written to be portable. In DMA mode it makes
+assumptions about the use of ISA DMA. These are probably warranted in
+most cases as the Z85230 in particular was designed to glue to PC type
+machines. The PIO mode makes no real assumptions.
+
+Should you need to retarget the Z8530 driver to another architecture the
+only code that should need changing are the port I/O functions. At the
+moment these assume PC I/O port accesses. This may not be appropriate
+for all platforms. Replacing :c:func:`z8530_read_port()` and
+``z8530_write_port`` is intended to be all that is required to port
+this driver layer.
+
+Known Bugs And Assumptions
+==========================
+
+Interrupt Locking
+    The locking in the driver is done via the global cli/sti lock. This
+    makes for relatively poor SMP performance. Switching this to use a
+    per device spin lock would probably materially improve performance.
+
+Occasional Failures
+    We have reports of occasional failures when run for very long
+    periods of time and the driver starts to receive junk frames. At the
+    moment the cause of this is not clear.
+
+Public Functions Provided
+=========================
+
+.. kernel-doc:: drivers/net/wan/z85230.c
+   :export:
+
+Internal Functions
+==================
+
+.. kernel-doc:: drivers/net/wan/z85230.c
+   :internal:
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index 0c72588..300d378 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -115,28 +115,33 @@
 charge when battery considered full/empty at given conditions (temperature,
 age)". I.e. these attributes represents real thresholds, not design values.
 
+ENERGY_FULL, ENERGY_EMPTY - same as above but for energy.
+
 CHARGE_COUNTER - the current charge counter (in µAh).  This could easily
 be negative; there is no empty or full value.  It is only useful for
 relative, time-based measurements.
 
+PRECHARGE_CURRENT - the maximum charge current during precharge phase
+of charge cycle (typically 20% of battery capacity).
+CHARGE_TERM_CURRENT - Charge termination current. The charge cycle
+terminates when battery voltage is above recharge threshold, and charge
+current is below this setting (typically 10% of battery capacity).
+
 CONSTANT_CHARGE_CURRENT - constant charge current programmed by charger.
 CONSTANT_CHARGE_CURRENT_MAX - maximum charge current supported by the
 power supply object.
-INPUT_CURRENT_LIMIT - input current limit programmed by charger. Indicates
-the current drawn from a charging source.
-CHARGE_TERM_CURRENT - Charge termination current used to detect the end of charge
-condition.
-
-CALIBRATE - battery or coulomb counter calibration status
 
 CONSTANT_CHARGE_VOLTAGE - constant charge voltage programmed by charger.
 CONSTANT_CHARGE_VOLTAGE_MAX - maximum charge voltage supported by the
 power supply object.
 
+INPUT_CURRENT_LIMIT - input current limit programmed by charger. Indicates
+the current drawn from a charging source.
+
 CHARGE_CONTROL_LIMIT - current charge control limit setting
 CHARGE_CONTROL_LIMIT_MAX - maximum charge control limit setting
 
-ENERGY_FULL, ENERGY_EMPTY - same as above but for energy.
+CALIBRATE - battery or coulomb counter calibration status
 
 CAPACITY - capacity in percents.
 CAPACITY_ALERT_MIN - minimum capacity alert value in percents.
@@ -174,6 +179,18 @@
 external_power_changed callback.
 
 
+Devicetree battery characteristics
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Drivers should call power_supply_get_battery_info() to obtain battery
+characteristics from a devicetree battery node, defined in
+Documentation/devicetree/bindings/power/supply/battery.txt. This is
+implemented in drivers/power/supply/bq27xxx_battery.c.
+
+Properties in struct power_supply_battery_info and their counterparts in the
+battery node have names corresponding to elements in enum power_supply_property,
+for naming consistency between sysfs attributes and battery node properties.
+
+
 QA
 ~~
 Q: Where is POWER_SUPPLY_PROP_XYZ attribute?
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index ee69d753..0fde3dc 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -105,9 +105,9 @@
 
 In particular, if the driver requires remote wakeup capability (i.e. hardware
 mechanism allowing the device to request a change of its power state, such as
-PCI PME) for proper functioning and device_run_wake() returns 'false' for the
+PCI PME) for proper functioning and device_can_wakeup() returns 'false' for the
 device, then ->runtime_suspend() should return -EBUSY.  On the other hand, if
-device_run_wake() returns 'true' for the device and the device is put into a
+device_can_wakeup() returns 'true' for the device and the device is put into a
 low-power state during the execution of the suspend callback, it is expected
 that remote wakeup will be enabled for the device.  Generally, remote wakeup
 should be enabled for all input devices put into low-power states at run time.
@@ -253,9 +253,6 @@
       being executed for that device and it is not practical to wait for the
       suspend to complete; means "start a resume as soon as you've suspended"
 
-  unsigned int run_wake;
-    - set if the device is capable of generating runtime wake-up events
-
   enum rpm_status runtime_status;
     - the runtime PM status of the device; this field's initial value is
       RPM_SUSPENDED, which means that each device is initially regarded by the
diff --git a/Documentation/prctl/no_new_privs.txt b/Documentation/prctl/no_new_privs.txt
deleted file mode 100644
index f7be84f..0000000
--- a/Documentation/prctl/no_new_privs.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-The execve system call can grant a newly-started program privileges that
-its parent did not have.  The most obvious examples are setuid/setgid
-programs and file capabilities.  To prevent the parent program from
-gaining these privileges as well, the kernel and user code must be
-careful to prevent the parent from doing anything that could subvert the
-child.  For example:
-
- - The dynamic loader handles LD_* environment variables differently if
-   a program is setuid.
-
- - chroot is disallowed to unprivileged processes, since it would allow
-   /etc/passwd to be replaced from the point of view of a process that
-   inherited chroot.
-
- - The exec code has special handling for ptrace.
-
-These are all ad-hoc fixes.  The no_new_privs bit (since Linux 3.5) is a
-new, generic mechanism to make it safe for a process to modify its
-execution environment in a manner that persists across execve.  Any task
-can set no_new_privs.  Once the bit is set, it is inherited across fork,
-clone, and execve and cannot be unset.  With no_new_privs set, execve
-promises not to grant the privilege to do anything that could not have
-been done without the execve call.  For example, the setuid and setgid
-bits will no longer change the uid or gid; file capabilities will not
-add to the permitted set, and LSMs will not relax constraints after
-execve.
-
-To set no_new_privs, use prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0).
-
-Be careful, though: LSMs might also not tighten constraints on exec
-in no_new_privs mode.  (This means that setting up a general-purpose
-service launcher to set no_new_privs before execing daemons may
-interfere with LSM-based sandboxing.)
-
-Note that no_new_privs does not prevent privilege changes that do not
-involve execve.  An appropriately privileged task can still call
-setuid(2) and receive SCM_RIGHTS datagrams.
-
-There are two main use cases for no_new_privs so far:
-
- - Filters installed for the seccomp mode 2 sandbox persist across
-   execve and can change the behavior of newly-executed programs.
-   Unprivileged users are therefore only allowed to install such filters
-   if no_new_privs is set.
-
- - By itself, no_new_privs can be used to reduce the attack surface
-   available to an unprivileged user.  If everything running with a
-   given uid has no_new_privs set, then that uid will be unable to
-   escalate its privileges by directly attacking setuid, setgid, and
-   fcap-using binaries; it will need to compromise something without the
-   no_new_privs bit set first.
-
-In the future, other potentially dangerous kernel features could become
-available to unprivileged tasks if no_new_privs is set.  In principle,
-several options to unshare(2) and clone(2) would be safe when
-no_new_privs is set, and no_new_privs + chroot is considerable less
-dangerous than chroot by itself.
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index e25d63f..3aed751 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -116,12 +116,11 @@
 
 Linux documentation for functions is transitioning to inline
 documentation via specially-formatted comments near their
-definitions in the source.  These comments can be combined with the
-SGML templates in the Documentation/DocBook directory to make DocBook
-files, which can then be converted by DocBook stylesheets to PostScript,
-HTML, PDF files, and several other formats.  In order to convert from
-DocBook format to a format of your choice, you'll need to install Jade as
-well as the desired DocBook stylesheets.
+definitions in the source.  These comments can be combined with ReST
+files the Documentation/ directory to make enriched documentation, which can
+then be converted to PostScript, HTML, LaTex, ePUB and PDF files.
+In order to convert from ReST format to a format of your choice, you'll need
+Sphinx.
 
 Util-linux
 ----------
@@ -323,12 +322,6 @@
   functionalities required for ``XeLaTex`` to work. For PDF output you'll also
   need ``convert(1)`` from ImageMagick (https://www.imagemagick.org).
 
-Other tools
------------
-
-In order to produce documentation from DocBook, you'll also need ``xmlto``.
-Please notice, however, that we're currently migrating all documents to use
-``Sphinx``.
 
 Getting updated software
 ========================
@@ -409,15 +402,6 @@
 
 - <http://sourceforge.net/projects/linuxquota/>
 
-DocBook Stylesheets
--------------------
-
-- <http://sourceforge.net/projects/docbook/files/docbook-dsssl/>
-
-XMLTO XSLT Frontend
--------------------
-
-- <http://cyberelk.net/tim/xmlto/>
 
 Intel P6 microcode
 ------------------
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index d20d52a..a20b44a 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -980,8 +980,8 @@
 
 When writing a single inline assembly statement containing multiple
 instructions, put each instruction on a separate line in a separate quoted
-string, and end each string except the last with \n\t to properly indent the
-next instruction in the assembly output:
+string, and end each string except the last with ``\n\t`` to properly indent
+the next instruction in the assembly output:
 
 .. code-block:: c
 
diff --git a/Documentation/process/email-clients.rst b/Documentation/process/email-clients.rst
index ac892b3..07faa54 100644
--- a/Documentation/process/email-clients.rst
+++ b/Documentation/process/email-clients.rst
@@ -167,6 +167,11 @@
 
 Run away from it.
 
+IBM Verse (Web GUI)
+*******************
+
+See Lotus Notes.
+
 Mutt (TUI)
 **********
 
diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst
index 1260f60..c6875b1 100644
--- a/Documentation/process/howto.rst
+++ b/Documentation/process/howto.rst
@@ -180,14 +180,6 @@
 	make latexdocs
 	make epubdocs
 
-Currently, there are some documents written on DocBook that are in
-the process of conversion to ReST. Such documents will be created in the
-Documentation/DocBook/ directory and can be generated also as
-Postscript or man pages by running::
-
-	make psdocs
-	make mandocs
-
 Becoming A Kernel Developer
 ---------------------------
 
diff --git a/Documentation/process/kernel-docs.rst b/Documentation/process/kernel-docs.rst
index 05a7857..b8cac85 100644
--- a/Documentation/process/kernel-docs.rst
+++ b/Documentation/process/kernel-docs.rst
@@ -40,50 +40,18 @@
 Docs at the Linux Kernel tree
 -----------------------------
 
-The DocBook books should be built with ``make {htmldocs | psdocs | pdfdocs}``.
 The Sphinx books should be built with ``make {htmldocs | pdfdocs | epubdocs}``.
 
     * Name: **linux/Documentation**
 
       :Author: Many.
       :Location: Documentation/
-      :Keywords: text files, Sphinx, DocBook.
+      :Keywords: text files, Sphinx.
       :Description: Documentation that comes with the kernel sources,
         inside the Documentation directory. Some pages from this document
         (including this document itself) have been moved there, and might
         be more up to date than the web version.
 
-    * Title: **The Kernel Hacking HOWTO**
-
-      :Author: Various Talented People, and Rusty.
-      :Location: Documentation/DocBook/kernel-hacking.tmpl
-      :Keywords: HOWTO, kernel contexts, deadlock, locking, modules,
-        symbols, return conventions.
-      :Description: From the Introduction: "Please understand that I
-        never wanted to write this document, being grossly underqualified,
-        but I always wanted to read it, and this was the only way. I
-        simply explain some best practices, and give reading entry-points
-        into the kernel sources. I avoid implementation details: that's
-        what the code is for, and I ignore whole tracts of useful
-        routines. This document assumes familiarity with C, and an
-        understanding of what the kernel is, and how it is used. It was
-        originally written for the 2.3 kernels, but nearly all of it
-        applies to 2.2 too; 2.0 is slightly different".
-
-    * Title: **Linux Kernel Locking HOWTO**
-
-      :Author: Various Talented People, and Rusty.
-      :Location: Documentation/DocBook/kernel-locking.tmpl
-      :Keywords: locks, locking, spinlock, semaphore, atomic, race
-        condition, bottom halves, tasklets, softirqs.
-      :Description: The title says it all: document describing the
-        locking system in the Linux Kernel either in uniprocessor or SMP
-        systems.
-      :Notes: "It was originally written for the later (>2.3.47) 2.3
-        kernels, but most of it applies to 2.2 too; 2.0 is slightly
-        different". Freely redistributable under the conditions of the GNU
-        General Public License.
-
 On-line docs
 ------------
 
diff --git a/Documentation/scheduler/sched-deadline.txt b/Documentation/scheduler/sched-deadline.txt
index cbc1b46..e89e36e 100644
--- a/Documentation/scheduler/sched-deadline.txt
+++ b/Documentation/scheduler/sched-deadline.txt
@@ -7,6 +7,8 @@
  0. WARNING
  1. Overview
  2. Scheduling algorithm
+   2.1 Main algorithm
+   2.2 Bandwidth reclaiming
  3. Scheduling Real-Time Tasks
    3.1 Definitions
    3.2 Schedulability Analysis for Uniprocessor Systems
@@ -44,6 +46,9 @@
 2. Scheduling algorithm
 ==================
 
+2.1 Main algorithm
+------------------
+
  SCHED_DEADLINE uses three parameters, named "runtime", "period", and
  "deadline", to schedule tasks. A SCHED_DEADLINE task should receive
  "runtime" microseconds of execution time every "period" microseconds, and
@@ -113,6 +118,160 @@
          remaining runtime = remaining runtime + runtime
 
 
+2.2 Bandwidth reclaiming
+------------------------
+
+ Bandwidth reclaiming for deadline tasks is based on the GRUB (Greedy
+ Reclamation of Unused Bandwidth) algorithm [15, 16, 17] and it is enabled
+ when flag SCHED_FLAG_RECLAIM is set.
+
+ The following diagram illustrates the state names for tasks handled by GRUB:
+
+                             ------------
+                 (d)        |   Active   |
+              ------------->|            |
+              |             | Contending |
+              |              ------------
+              |                A      |
+          ----------           |      |
+         |          |          |      |
+         | Inactive |          |(b)   | (a)
+         |          |          |      |
+          ----------           |      |
+              A                |      V
+              |              ------------
+              |             |   Active   |
+              --------------|     Non    |
+                 (c)        | Contending |
+                             ------------
+
+ A task can be in one of the following states:
+
+  - ActiveContending: if it is ready for execution (or executing);
+
+  - ActiveNonContending: if it just blocked and has not yet surpassed the 0-lag
+    time;
+
+  - Inactive: if it is blocked and has surpassed the 0-lag time.
+
+ State transitions:
+
+  (a) When a task blocks, it does not become immediately inactive since its
+      bandwidth cannot be immediately reclaimed without breaking the
+      real-time guarantees. It therefore enters a transitional state called
+      ActiveNonContending. The scheduler arms the "inactive timer" to fire at
+      the 0-lag time, when the task's bandwidth can be reclaimed without
+      breaking the real-time guarantees.
+
+      The 0-lag time for a task entering the ActiveNonContending state is
+      computed as
+
+                        (runtime * dl_period)
+             deadline - ---------------------
+                             dl_runtime
+
+      where runtime is the remaining runtime, while dl_runtime and dl_period
+      are the reservation parameters.
+
+  (b) If the task wakes up before the inactive timer fires, the task re-enters
+      the ActiveContending state and the "inactive timer" is canceled.
+      In addition, if the task wakes up on a different runqueue, then
+      the task's utilization must be removed from the previous runqueue's active
+      utilization and must be added to the new runqueue's active utilization.
+      In order to avoid races between a task waking up on a runqueue while the
+       "inactive timer" is running on a different CPU, the "dl_non_contending"
+      flag is used to indicate that a task is not on a runqueue but is active
+      (so, the flag is set when the task blocks and is cleared when the
+      "inactive timer" fires or when the task  wakes up).
+
+  (c) When the "inactive timer" fires, the task enters the Inactive state and
+      its utilization is removed from the runqueue's active utilization.
+
+  (d) When an inactive task wakes up, it enters the ActiveContending state and
+      its utilization is added to the active utilization of the runqueue where
+      it has been enqueued.
+
+ For each runqueue, the algorithm GRUB keeps track of two different bandwidths:
+
+  - Active bandwidth (running_bw): this is the sum of the bandwidths of all
+    tasks in active state (i.e., ActiveContending or ActiveNonContending);
+
+  - Total bandwidth (this_bw): this is the sum of all tasks "belonging" to the
+    runqueue, including the tasks in Inactive state.
+
+
+ The algorithm reclaims the bandwidth of the tasks in Inactive state.
+ It does so by decrementing the runtime of the executing task Ti at a pace equal
+ to
+
+           dq = -max{ Ui, (1 - Uinact) } dt
+
+ where Uinact is the inactive utilization, computed as (this_bq - running_bw),
+ and Ui is the bandwidth of task Ti.
+
+
+ Let's now see a trivial example of two deadline tasks with runtime equal
+ to 4 and period equal to 8 (i.e., bandwidth equal to 0.5):
+
+     A            Task T1
+     |
+     |                               |
+     |                               |
+     |--------                       |----
+     |       |                       V
+     |---|---|---|---|---|---|---|---|--------->t
+     0   1   2   3   4   5   6   7   8
+
+
+     A            Task T2
+     |
+     |                               |
+     |                               |
+     |       ------------------------|
+     |       |                       V
+     |---|---|---|---|---|---|---|---|--------->t
+     0   1   2   3   4   5   6   7   8
+
+
+     A            running_bw
+     |
+   1 -----------------               ------
+     |               |               |
+  0.5-               -----------------
+     |                               |
+     |---|---|---|---|---|---|---|---|--------->t
+     0   1   2   3   4   5   6   7   8
+
+
+  - Time t = 0:
+
+    Both tasks are ready for execution and therefore in ActiveContending state.
+    Suppose Task T1 is the first task to start execution.
+    Since there are no inactive tasks, its runtime is decreased as dq = -1 dt.
+
+  - Time t = 2:
+
+    Suppose that task T1 blocks
+    Task T1 therefore enters the ActiveNonContending state. Since its remaining
+    runtime is equal to 2, its 0-lag time is equal to t = 4.
+    Task T2 start execution, with runtime still decreased as dq = -1 dt since
+    there are no inactive tasks.
+
+  - Time t = 4:
+
+    This is the 0-lag time for Task T1. Since it didn't woken up in the
+    meantime, it enters the Inactive state. Its bandwidth is removed from
+    running_bw.
+    Task T2 continues its execution. However, its runtime is now decreased as
+    dq = - 0.5 dt because Uinact = 0.5.
+    Task T2 therefore reclaims the bandwidth unused by Task T1.
+
+  - Time t = 8:
+
+    Task T1 wakes up. It enters the ActiveContending state again, and the
+    running_bw is incremented.
+
+
 3. Scheduling Real-Time Tasks
 =============================
 
@@ -330,6 +489,15 @@
   14 - J. Erickson, U. Devi and S. Baruah. Improved tardiness bounds for
        Global EDF. Proceedings of the 22nd Euromicro Conference on
        Real-Time Systems, 2010.
+  15 - G. Lipari, S. Baruah, Greedy reclamation of unused bandwidth in
+       constant-bandwidth servers, 12th IEEE Euromicro Conference on Real-Time
+       Systems, 2000.
+  16 - L. Abeni, J. Lelli, C. Scordino, L. Palopoli, Greedy CPU reclaiming for
+       SCHED DEADLINE. In Proceedings of the Real-Time Linux Workshop (RTLWS),
+       Dusseldorf, Germany, 2014.
+  17 - L. Abeni, G. Lipari, A. Parri, Y. Sun, Multicore CPU reclaiming: parallel
+       or sequential?. In Proceedings of the 31st Annual ACM Symposium on Applied
+       Computing, 2016.
 
 
 4. Bandwidth management
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX
deleted file mode 100644
index 45c82fd..0000000
--- a/Documentation/security/00-INDEX
+++ /dev/null
@@ -1,26 +0,0 @@
-00-INDEX
-	- this file.
-LSM.txt
-	- description of the Linux Security Module framework.
-SELinux.txt
-	- how to get started with the SELinux security enhancement.
-Smack.txt
-	- documentation on the Smack Linux Security Module.
-Yama.txt
-	- documentation on the Yama Linux Security Module.
-apparmor.txt
-	- documentation on the AppArmor security extension.
-credentials.txt
-	- documentation about credentials in Linux.
-keys-ecryptfs.txt
-	- description of the encryption keys for the ecryptfs filesystem.
-keys-request-key.txt
-	- description of the kernel key request service.
-keys-trusted-encrypted.txt
-	- info on the Trusted and Encrypted keys in the kernel key ring service.
-keys.txt
-	- description of the kernel key retention service.
-tomoyo.txt
-	- documentation on the TOMOYO Linux Security Module.
-IMA-templates.txt
-	- documentation on the template management mechanism for IMA.
diff --git a/Documentation/security/IMA-templates.txt b/Documentation/security/IMA-templates.rst
similarity index 72%
rename from Documentation/security/IMA-templates.txt
rename to Documentation/security/IMA-templates.rst
index 839b5da..2cd0e27 100644
--- a/Documentation/security/IMA-templates.txt
+++ b/Documentation/security/IMA-templates.rst
@@ -1,9 +1,12 @@
-                       IMA Template Management Mechanism
+=================================
+IMA Template Management Mechanism
+=================================
 
 
-==== INTRODUCTION ====
+Introduction
+============
 
-The original 'ima' template is fixed length, containing the filedata hash
+The original ``ima`` template is fixed length, containing the filedata hash
 and pathname. The filedata hash is limited to 20 bytes (md5/sha1).
 The pathname is a null terminated string, limited to 255 characters.
 To overcome these limitations and to add additional file metadata, it is
@@ -28,61 +31,64 @@
 two functions, init() and show(), respectively to generate and display
 measurement entries. Defining a new template descriptor requires
 specifying the template format (a string of field identifiers separated
-by the '|' character) through the 'ima_template_fmt' kernel command line
+by the ``|`` character) through the ``ima_template_fmt`` kernel command line
 parameter. At boot time, IMA initializes the chosen template descriptor
 by translating the format into an array of template fields structures taken
 from the set of the supported ones.
 
-After the initialization step, IMA will call ima_alloc_init_template()
+After the initialization step, IMA will call ``ima_alloc_init_template()``
 (new function defined within the patches for the new template management
 mechanism) to generate a new measurement entry by using the template
 descriptor chosen through the kernel configuration or through the newly
-introduced 'ima_template' and 'ima_template_fmt' kernel command line parameters.
+introduced ``ima_template`` and ``ima_template_fmt`` kernel command line parameters.
 It is during this phase that the advantages of the new architecture are
 clearly shown: the latter function will not contain specific code to handle
-a given template but, instead, it simply calls the init() method of the template
+a given template but, instead, it simply calls the ``init()`` method of the template
 fields associated to the chosen template descriptor and store the result
 (pointer to allocated data and data length) in the measurement entry structure.
 
 The same mechanism is employed to display measurements entries.
-The functions ima[_ascii]_measurements_show() retrieve, for each entry,
+The functions ``ima[_ascii]_measurements_show()`` retrieve, for each entry,
 the template descriptor used to produce that entry and call the show()
 method for each item of the array of template fields structures.
 
 
 
-==== SUPPORTED TEMPLATE FIELDS AND DESCRIPTORS ====
+Supported Template Fields and Descriptors
+=========================================
 
 In the following, there is the list of supported template fields
-('<identifier>': description), that can be used to define new template
+``('<identifier>': description)``, that can be used to define new template
 descriptors by adding their identifier to the format string
 (support for more data types will be added later):
 
  - 'd': the digest of the event (i.e. the digest of a measured file),
-        calculated with the SHA1 or MD5 hash algorithm;
+   calculated with the SHA1 or MD5 hash algorithm;
  - 'n': the name of the event (i.e. the file name), with size up to 255 bytes;
  - 'd-ng': the digest of the event, calculated with an arbitrary hash
-           algorithm (field format: [<hash algo>:]digest, where the digest
-           prefix is shown only if the hash algorithm is not SHA1 or MD5);
+   algorithm (field format: [<hash algo>:]digest, where the digest
+   prefix is shown only if the hash algorithm is not SHA1 or MD5);
  - 'n-ng': the name of the event, without size limitations;
  - 'sig': the file signature.
 
 
 Below, there is the list of defined template descriptors:
- - "ima": its format is 'd|n';
- - "ima-ng" (default): its format is 'd-ng|n-ng';
- - "ima-sig": its format is 'd-ng|n-ng|sig'.
+
+ - "ima": its format is ``d|n``;
+ - "ima-ng" (default): its format is ``d-ng|n-ng``;
+ - "ima-sig": its format is ``d-ng|n-ng|sig``.
 
 
 
-==== USE ====
+Use
+===
 
 To specify the template descriptor to be used to generate measurement entries,
 currently the following methods are supported:
 
  - select a template descriptor among those supported in the kernel
-   configuration ('ima-ng' is the default choice);
+   configuration (``ima-ng`` is the default choice);
  - specify a template descriptor name from the kernel command line through
-   the 'ima_template=' parameter;
+   the ``ima_template=`` parameter;
  - register a new template descriptor with custom format through the kernel
-   command line parameter 'ima_template_fmt='.
+   command line parameter ``ima_template_fmt=``.
diff --git a/Documentation/security/LSM.rst b/Documentation/security/LSM.rst
new file mode 100644
index 0000000..d75778b
--- /dev/null
+++ b/Documentation/security/LSM.rst
@@ -0,0 +1,14 @@
+=================================
+Linux Security Module Development
+=================================
+
+Based on https://lkml.org/lkml/2007/10/26/215,
+a new LSM is accepted into the kernel when its intent (a description of
+what it tries to protect against and in what cases one would expect to
+use it) has been appropriately documented in ``Documentation/security/LSM``.
+This allows an LSM's code to be easily compared to its goals, and so
+that end users and distros can make a more informed decision about which
+LSMs suit their requirements.
+
+For extensive documentation on the available LSM hook interfaces, please
+see ``include/linux/lsm_hooks.h``.
diff --git a/Documentation/security/conf.py b/Documentation/security/conf.py
deleted file mode 100644
index 472fc9a..0000000
--- a/Documentation/security/conf.py
+++ /dev/null
@@ -1,8 +0,0 @@
-project = "The kernel security subsystem manual"
-
-tags.add("subproject")
-
-latex_documents = [
-    ('index', 'security.tex', project,
-     'The kernel development community', 'manual'),
-]
diff --git a/Documentation/security/credentials.txt b/Documentation/security/credentials.rst
similarity index 72%
rename from Documentation/security/credentials.txt
rename to Documentation/security/credentials.rst
index 8625705..038a7e19 100644
--- a/Documentation/security/credentials.txt
+++ b/Documentation/security/credentials.rst
@@ -1,38 +1,18 @@
-			     ====================
-			     CREDENTIALS IN LINUX
-			     ====================
+====================
+Credentials in Linux
+====================
 
 By: David Howells <dhowells@redhat.com>
 
-Contents:
+.. contents:: :local:
 
- (*) Overview.
-
- (*) Types of credentials.
-
- (*) File markings.
-
- (*) Task credentials.
-
-     - Immutable credentials.
-     - Accessing task credentials.
-     - Accessing another task's credentials.
-     - Altering credentials.
-     - Managing credentials.
-
- (*) Open file credentials.
-
- (*) Overriding the VFS's use of credentials.
-
-
-========
-OVERVIEW
+Overview
 ========
 
 There are several parts to the security check performed by Linux when one
 object acts upon another:
 
- (1) Objects.
+ 1. Objects.
 
      Objects are things in the system that may be acted upon directly by
      userspace programs.  Linux has a variety of actionable objects, including:
@@ -48,7 +28,7 @@
      As a part of the description of all these objects there is a set of
      credentials.  What's in the set depends on the type of object.
 
- (2) Object ownership.
+ 2. Object ownership.
 
      Amongst the credentials of most objects, there will be a subset that
      indicates the ownership of that object.  This is used for resource
@@ -57,7 +37,7 @@
      In a standard UNIX filesystem, for instance, this will be defined by the
      UID marked on the inode.
 
- (3) The objective context.
+ 3. The objective context.
 
      Also amongst the credentials of those objects, there will be a subset that
      indicates the 'objective context' of that object.  This may or may not be
@@ -67,7 +47,7 @@
      The objective context is used as part of the security calculation that is
      carried out when an object is acted upon.
 
- (4) Subjects.
+ 4. Subjects.
 
      A subject is an object that is acting upon another object.
 
@@ -77,10 +57,10 @@
 
      Objects other than tasks may under some circumstances also be subjects.
      For instance an open file may send SIGIO to a task using the UID and EUID
-     given to it by a task that called fcntl(F_SETOWN) upon it.  In this case,
+     given to it by a task that called ``fcntl(F_SETOWN)`` upon it.  In this case,
      the file struct will have a subjective context too.
 
- (5) The subjective context.
+ 5. The subjective context.
 
      A subject has an additional interpretation of its credentials.  A subset
      of its credentials forms the 'subjective context'.  The subjective context
@@ -92,7 +72,7 @@
      from the real UID and GID that normally form the objective context of the
      task.
 
- (6) Actions.
+ 6. Actions.
 
      Linux has a number of actions available that a subject may perform upon an
      object.  The set of actions available depends on the nature of the subject
@@ -101,7 +81,7 @@
      Actions include reading, writing, creating and deleting files; forking or
      signalling and tracing tasks.
 
- (7) Rules, access control lists and security calculations.
+ 7. Rules, access control lists and security calculations.
 
      When a subject acts upon an object, a security calculation is made.  This
      involves taking the subjective context, the objective context and the
@@ -111,7 +91,7 @@
 
      There are two main sources of rules:
 
-     (a) Discretionary access control (DAC):
+     a. Discretionary access control (DAC):
 
 	 Sometimes the object will include sets of rules as part of its
 	 description.  This is an 'Access Control List' or 'ACL'.  A Linux
@@ -127,7 +107,7 @@
 	 A Linux file might also sport a POSIX ACL.  This is a list of rules
 	 that grants various permissions to arbitrary subjects.
 
-     (b) Mandatory access control (MAC):
+     b. Mandatory access control (MAC):
 
 	 The system as a whole may have one or more sets of rules that get
 	 applied to all subjects and objects, regardless of their source.
@@ -139,65 +119,65 @@
 	 that says that this action is either granted or denied.
 
 
-====================
-TYPES OF CREDENTIALS
+Types of Credentials
 ====================
 
 The Linux kernel supports the following types of credentials:
 
- (1) Traditional UNIX credentials.
+ 1. Traditional UNIX credentials.
 
-	Real User ID
-	Real Group ID
+	- Real User ID
+	- Real Group ID
 
      The UID and GID are carried by most, if not all, Linux objects, even if in
      some cases it has to be invented (FAT or CIFS files for example, which are
      derived from Windows).  These (mostly) define the objective context of
      that object, with tasks being slightly different in some cases.
 
-	Effective, Saved and FS User ID
-	Effective, Saved and FS Group ID
-	Supplementary groups
+	- Effective, Saved and FS User ID
+	- Effective, Saved and FS Group ID
+	- Supplementary groups
 
      These are additional credentials used by tasks only.  Usually, an
      EUID/EGID/GROUPS will be used as the subjective context, and real UID/GID
      will be used as the objective.  For tasks, it should be noted that this is
      not always true.
 
- (2) Capabilities.
+ 2. Capabilities.
 
-	Set of permitted capabilities
-	Set of inheritable capabilities
-	Set of effective capabilities
-	Capability bounding set
+	- Set of permitted capabilities
+	- Set of inheritable capabilities
+	- Set of effective capabilities
+	- Capability bounding set
 
      These are only carried by tasks.  They indicate superior capabilities
      granted piecemeal to a task that an ordinary task wouldn't otherwise have.
      These are manipulated implicitly by changes to the traditional UNIX
-     credentials, but can also be manipulated directly by the capset() system
-     call.
+     credentials, but can also be manipulated directly by the ``capset()``
+     system call.
 
      The permitted capabilities are those caps that the process might grant
-     itself to its effective or permitted sets through capset().  This
+     itself to its effective or permitted sets through ``capset()``.  This
      inheritable set might also be so constrained.
 
      The effective capabilities are the ones that a task is actually allowed to
      make use of itself.
 
      The inheritable capabilities are the ones that may get passed across
-     execve().
+     ``execve()``.
 
      The bounding set limits the capabilities that may be inherited across
-     execve(), especially when a binary is executed that will execute as UID 0.
+     ``execve()``, especially when a binary is executed that will execute as
+     UID 0.
 
- (3) Secure management flags (securebits).
+ 3. Secure management flags (securebits).
 
      These are only carried by tasks.  These govern the way the above
      credentials are manipulated and inherited over certain operations such as
      execve().  They aren't used directly as objective or subjective
      credentials.
 
- (4) Keys and keyrings.
+ 4. Keys and keyrings.
 
      These are only carried by tasks.  They carry and cache security tokens
      that don't fit into the other standard UNIX credentials.  They are for
@@ -218,7 +198,7 @@
 
      For more information on using keys, see Documentation/security/keys.txt.
 
- (5) LSM
+ 5. LSM
 
      The Linux Security Module allows extra controls to be placed over the
      operations that a task may do.  Currently Linux supports several LSM
@@ -228,7 +208,7 @@
      rules (policies) that say what operations a task with one label may do to
      an object with another label.
 
- (6) AF_KEY
+ 6. AF_KEY
 
      This is a socket-based approach to credential management for networking
      stacks [RFC 2367].  It isn't discussed by this document as it doesn't
@@ -244,25 +224,19 @@
 to the server, regardless of who is actually doing a read or a write upon it.
 
 
-=============
-FILE MARKINGS
+File Markings
 =============
 
 Files on disk or obtained over the network may have annotations that form the
 objective security context of that file.  Depending on the type of filesystem,
 this may include one or more of the following:
 
- (*) UNIX UID, GID, mode;
-
- (*) Windows user ID;
-
- (*) Access control list;
-
- (*) LSM security label;
-
- (*) UNIX exec privilege escalation bits (SUID/SGID);
-
- (*) File capabilities exec privilege escalation bits.
+ * UNIX UID, GID, mode;
+ * Windows user ID;
+ * Access control list;
+ * LSM security label;
+ * UNIX exec privilege escalation bits (SUID/SGID);
+ * File capabilities exec privilege escalation bits.
 
 These are compared to the task's subjective security context, and certain
 operations allowed or disallowed as a result.  In the case of execve(), the
@@ -270,8 +244,7 @@
 extra privileges, based on the annotations on the executable file.
 
 
-================
-TASK CREDENTIALS
+Task Credentials
 ================
 
 In Linux, all of a task's credentials are held in (uid, gid) or through
@@ -282,20 +255,20 @@
 Once a set of credentials has been prepared and committed, it may not be
 changed, barring the following exceptions:
 
- (1) its reference count may be changed;
+ 1. its reference count may be changed;
 
- (2) the reference count on the group_info struct it points to may be changed;
+ 2. the reference count on the group_info struct it points to may be changed;
 
- (3) the reference count on the security data it points to may be changed;
+ 3. the reference count on the security data it points to may be changed;
 
- (4) the reference count on any keyrings it points to may be changed;
+ 4. the reference count on any keyrings it points to may be changed;
 
- (5) any keyrings it points to may be revoked, expired or have their security
-     attributes changed; and
+ 5. any keyrings it points to may be revoked, expired or have their security
+    attributes changed; and
 
- (6) the contents of any keyrings to which it points may be changed (the whole
-     point of keyrings being a shared set of credentials, modifiable by anyone
-     with appropriate access).
+ 6. the contents of any keyrings to which it points may be changed (the whole
+    point of keyrings being a shared set of credentials, modifiable by anyone
+    with appropriate access).
 
 To alter anything in the cred struct, the copy-and-replace principle must be
 adhered to.  First take a copy, then alter the copy and then use RCU to change
@@ -303,37 +276,37 @@
 with this (see below).
 
 A task may only alter its _own_ credentials; it is no longer permitted for a
-task to alter another's credentials.  This means the capset() system call is no
-longer permitted to take any PID other than the one of the current process.
-Also keyctl_instantiate() and keyctl_negate() functions no longer permit
-attachment to process-specific keyrings in the requesting process as the
-instantiating process may need to create them.
+task to alter another's credentials.  This means the ``capset()`` system call
+is no longer permitted to take any PID other than the one of the current
+process. Also ``keyctl_instantiate()`` and ``keyctl_negate()`` functions no
+longer permit attachment to process-specific keyrings in the requesting
+process as the instantiating process may need to create them.
 
 
-IMMUTABLE CREDENTIALS
+Immutable Credentials
 ---------------------
 
-Once a set of credentials has been made public (by calling commit_creds() for
-example), it must be considered immutable, barring two exceptions:
+Once a set of credentials has been made public (by calling ``commit_creds()``
+for example), it must be considered immutable, barring two exceptions:
 
- (1) The reference count may be altered.
+ 1. The reference count may be altered.
 
- (2) Whilst the keyring subscriptions of a set of credentials may not be
-     changed, the keyrings subscribed to may have their contents altered.
+ 2. Whilst the keyring subscriptions of a set of credentials may not be
+    changed, the keyrings subscribed to may have their contents altered.
 
 To catch accidental credential alteration at compile time, struct task_struct
 has _const_ pointers to its credential sets, as does struct file.  Furthermore,
-certain functions such as get_cred() and put_cred() operate on const pointers,
-thus rendering casts unnecessary, but require to temporarily ditch the const
-qualification to be able to alter the reference count.
+certain functions such as ``get_cred()`` and ``put_cred()`` operate on const
+pointers, thus rendering casts unnecessary, but require to temporarily ditch
+the const qualification to be able to alter the reference count.
 
 
-ACCESSING TASK CREDENTIALS
+Accessing Task Credentials
 --------------------------
 
 A task being able to alter only its own credentials permits the current process
 to read or replace its own credentials without the need for any form of locking
-- which simplifies things greatly.  It can just call:
+-- which simplifies things greatly.  It can just call::
 
 	const struct cred *current_cred()
 
@@ -341,7 +314,7 @@
 it afterwards.
 
 There are convenience wrappers for retrieving specific aspects of a task's
-credentials (the value is simply returned in each case):
+credentials (the value is simply returned in each case)::
 
 	uid_t current_uid(void)		Current's real UID
 	gid_t current_gid(void)		Current's real GID
@@ -354,7 +327,7 @@
 	struct user_struct *current_user(void)  Current's user account
 
 There are also convenience wrappers for retrieving specific associated pairs of
-a task's credentials:
+a task's credentials::
 
 	void current_uid_gid(uid_t *, gid_t *);
 	void current_euid_egid(uid_t *, gid_t *);
@@ -365,12 +338,12 @@
 
 
 In addition, there is a function for obtaining a reference on the current
-process's current set of credentials:
+process's current set of credentials::
 
 	const struct cred *get_current_cred(void);
 
 and functions for getting references to one of the credentials that don't
-actually live in struct cred:
+actually live in struct cred::
 
 	struct user_struct *get_current_user(void);
 	struct group_info *get_current_groups(void);
@@ -378,22 +351,22 @@
 which get references to the current process's user accounting structure and
 supplementary groups list respectively.
 
-Once a reference has been obtained, it must be released with put_cred(),
-free_uid() or put_group_info() as appropriate.
+Once a reference has been obtained, it must be released with ``put_cred()``,
+``free_uid()`` or ``put_group_info()`` as appropriate.
 
 
-ACCESSING ANOTHER TASK'S CREDENTIALS
+Accessing Another Task's Credentials
 ------------------------------------
 
 Whilst a task may access its own credentials without the need for locking, the
 same is not true of a task wanting to access another task's credentials.  It
-must use the RCU read lock and rcu_dereference().
+must use the RCU read lock and ``rcu_dereference()``.
 
-The rcu_dereference() is wrapped by:
+The ``rcu_dereference()`` is wrapped by::
 
 	const struct cred *__task_cred(struct task_struct *task);
 
-This should be used inside the RCU read lock, as in the following example:
+This should be used inside the RCU read lock, as in the following example::
 
 	void foo(struct task_struct *t, struct foo_data *f)
 	{
@@ -410,39 +383,40 @@
 
 Should it be necessary to hold another task's credentials for a long period of
 time, and possibly to sleep whilst doing so, then the caller should get a
-reference on them using:
+reference on them using::
 
 	const struct cred *get_task_cred(struct task_struct *task);
 
 This does all the RCU magic inside of it.  The caller must call put_cred() on
 the credentials so obtained when they're finished with.
 
- [*] Note: The result of __task_cred() should not be passed directly to
-     get_cred() as this may race with commit_cred().
+.. note::
+   The result of ``__task_cred()`` should not be passed directly to
+   ``get_cred()`` as this may race with ``commit_cred()``.
 
 There are a couple of convenience functions to access bits of another task's
-credentials, hiding the RCU magic from the caller:
+credentials, hiding the RCU magic from the caller::
 
 	uid_t task_uid(task)		Task's real UID
 	uid_t task_euid(task)		Task's effective UID
 
-If the caller is holding the RCU read lock at the time anyway, then:
+If the caller is holding the RCU read lock at the time anyway, then::
 
 	__task_cred(task)->uid
 	__task_cred(task)->euid
 
 should be used instead.  Similarly, if multiple aspects of a task's credentials
-need to be accessed, RCU read lock should be used, __task_cred() called, the
-result stored in a temporary pointer and then the credential aspects called
+need to be accessed, RCU read lock should be used, ``__task_cred()`` called,
+the result stored in a temporary pointer and then the credential aspects called
 from that before dropping the lock.  This prevents the potentially expensive
 RCU magic from being invoked multiple times.
 
 Should some other single aspect of another task's credentials need to be
-accessed, then this can be used:
+accessed, then this can be used::
 
 	task_cred_xxx(task, member)
 
-where 'member' is a non-pointer member of the cred struct.  For instance:
+where 'member' is a non-pointer member of the cred struct.  For instance::
 
 	uid_t task_cred_xxx(task, suid);
 
@@ -451,7 +425,7 @@
 disappear the moment the RCU read lock is dropped.
 
 
-ALTERING CREDENTIALS
+Altering Credentials
 --------------------
 
 As previously mentioned, a task may only alter its own credentials, and may not
@@ -459,7 +433,7 @@
 locking to alter its own credentials.
 
 To alter the current process's credentials, a function should first prepare a
-new set of credentials by calling:
+new set of credentials by calling::
 
 	struct cred *prepare_creds(void);
 
@@ -467,9 +441,10 @@
 duplicate of the current process's credentials, returning with the mutex still
 held if successful.  It returns NULL if not successful (out of memory).
 
-The mutex prevents ptrace() from altering the ptrace state of a process whilst
-security checks on credentials construction and changing is taking place as
-the ptrace state may alter the outcome, particularly in the case of execve().
+The mutex prevents ``ptrace()`` from altering the ptrace state of a process
+whilst security checks on credentials construction and changing is taking place
+as the ptrace state may alter the outcome, particularly in the case of
+``execve()``.
 
 The new credentials set should be altered appropriately, and any security
 checks and hooks done.  Both the current and the proposed sets of credentials
@@ -478,36 +453,37 @@
 
 
 When the credential set is ready, it should be committed to the current process
-by calling:
+by calling::
 
 	int commit_creds(struct cred *new);
 
 This will alter various aspects of the credentials and the process, giving the
-LSM a chance to do likewise, then it will use rcu_assign_pointer() to actually
-commit the new credentials to current->cred, it will release
-current->cred_replace_mutex to allow ptrace() to take place, and it will notify
-the scheduler and others of the changes.
+LSM a chance to do likewise, then it will use ``rcu_assign_pointer()`` to
+actually commit the new credentials to ``current->cred``, it will release
+``current->cred_replace_mutex`` to allow ``ptrace()`` to take place, and it
+will notify the scheduler and others of the changes.
 
 This function is guaranteed to return 0, so that it can be tail-called at the
-end of such functions as sys_setresuid().
+end of such functions as ``sys_setresuid()``.
 
 Note that this function consumes the caller's reference to the new credentials.
-The caller should _not_ call put_cred() on the new credentials afterwards.
+The caller should _not_ call ``put_cred()`` on the new credentials afterwards.
 
 Furthermore, once this function has been called on a new set of credentials,
 those credentials may _not_ be changed further.
 
 
-Should the security checks fail or some other error occur after prepare_creds()
-has been called, then the following function should be invoked:
+Should the security checks fail or some other error occur after
+``prepare_creds()`` has been called, then the following function should be
+invoked::
 
 	void abort_creds(struct cred *new);
 
-This releases the lock on current->cred_replace_mutex that prepare_creds() got
-and then releases the new credentials.
+This releases the lock on ``current->cred_replace_mutex`` that
+``prepare_creds()`` got and then releases the new credentials.
 
 
-A typical credentials alteration function would look something like this:
+A typical credentials alteration function would look something like this::
 
 	int alter_suid(uid_t suid)
 	{
@@ -529,53 +505,50 @@
 	}
 
 
-MANAGING CREDENTIALS
+Managing Credentials
 --------------------
 
 There are some functions to help manage credentials:
 
- (*) void put_cred(const struct cred *cred);
+ - ``void put_cred(const struct cred *cred);``
 
      This releases a reference to the given set of credentials.  If the
      reference count reaches zero, the credentials will be scheduled for
      destruction by the RCU system.
 
- (*) const struct cred *get_cred(const struct cred *cred);
+ - ``const struct cred *get_cred(const struct cred *cred);``
 
      This gets a reference on a live set of credentials, returning a pointer to
      that set of credentials.
 
- (*) struct cred *get_new_cred(struct cred *cred);
+ - ``struct cred *get_new_cred(struct cred *cred);``
 
      This gets a reference on a set of credentials that is under construction
      and is thus still mutable, returning a pointer to that set of credentials.
 
 
-=====================
-OPEN FILE CREDENTIALS
+Open File Credentials
 =====================
 
 When a new file is opened, a reference is obtained on the opening task's
-credentials and this is attached to the file struct as 'f_cred' in place of
-'f_uid' and 'f_gid'.  Code that used to access file->f_uid and file->f_gid
-should now access file->f_cred->fsuid and file->f_cred->fsgid.
+credentials and this is attached to the file struct as ``f_cred`` in place of
+``f_uid`` and ``f_gid``.  Code that used to access ``file->f_uid`` and
+``file->f_gid`` should now access ``file->f_cred->fsuid`` and
+``file->f_cred->fsgid``.
 
-It is safe to access f_cred without the use of RCU or locking because the
+It is safe to access ``f_cred`` without the use of RCU or locking because the
 pointer will not change over the lifetime of the file struct, and nor will the
 contents of the cred struct pointed to, barring the exceptions listed above
 (see the Task Credentials section).
 
 
-=======================================
-OVERRIDING THE VFS'S USE OF CREDENTIALS
+Overriding the VFS's Use of Credentials
 =======================================
 
 Under some circumstances it is desirable to override the credentials used by
-the VFS, and that can be done by calling into such as vfs_mkdir() with a
+the VFS, and that can be done by calling into such as ``vfs_mkdir()`` with a
 different set of credentials.  This is done in the following places:
 
- (*) sys_faccessat().
-
- (*) do_coredump().
-
- (*) nfs4recover.c.
+ * ``sys_faccessat()``.
+ * ``do_coredump()``.
+ * nfs4recover.c.
diff --git a/Documentation/security/index.rst b/Documentation/security/index.rst
index 9bae6bb..298a94a 100644
--- a/Documentation/security/index.rst
+++ b/Documentation/security/index.rst
@@ -1,7 +1,13 @@
 ======================
-Security documentation
+Security Documentation
 ======================
 
 .. toctree::
+   :maxdepth: 1
 
+   credentials
+   IMA-templates
+   keys/index
+   LSM
+   self-protection
    tpm/index
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys/core.rst
similarity index 88%
rename from Documentation/security/keys.txt
rename to Documentation/security/keys/core.rst
index cd50199..0d831a7 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys/core.rst
@@ -1,6 +1,6 @@
-			 ============================
-			 KERNEL KEY RETENTION SERVICE
-			 ============================
+============================
+Kernel Key Retention Service
+============================
 
 This service allows cryptographic keys, authentication tokens, cross-domain
 user mappings, and similar to be cached in the kernel for the use of
@@ -29,8 +29,7 @@
 	- Garbage collection
 
 
-============
-KEY OVERVIEW
+Key Overview
 ============
 
 In this context, keys represent units of cryptographic data, authentication
@@ -47,14 +46,14 @@
 	- State.
 
 
- (*) Each key is issued a serial number of type key_serial_t that is unique for
+  *  Each key is issued a serial number of type key_serial_t that is unique for
      the lifetime of that key. All serial numbers are positive non-zero 32-bit
      integers.
 
      Userspace programs can use a key's serial numbers as a way to gain access
      to it, subject to permission checking.
 
- (*) Each key is of a defined "type". Types must be registered inside the
+  *  Each key is of a defined "type". Types must be registered inside the
      kernel by a kernel service (such as a filesystem) before keys of that type
      can be added or used. Userspace programs cannot define new types directly.
 
@@ -64,18 +63,18 @@
      Should a type be removed from the system, all the keys of that type will
      be invalidated.
 
- (*) Each key has a description. This should be a printable string. The key
+  *  Each key has a description. This should be a printable string. The key
      type provides an operation to perform a match between the description on a
      key and a criterion string.
 
- (*) Each key has an owner user ID, a group ID and a permissions mask. These
+  *  Each key has an owner user ID, a group ID and a permissions mask. These
      are used to control what a process may do to a key from userspace, and
      whether a kernel service will be able to find the key.
 
- (*) Each key can be set to expire at a specific time by the key type's
+  *  Each key can be set to expire at a specific time by the key type's
      instantiation function. Keys can also be immortal.
 
- (*) Each key can have a payload. This is a quantity of data that represent the
+  *  Each key can have a payload. This is a quantity of data that represent the
      actual "key". In the case of a keyring, this is a list of keys to which
      the keyring links; in the case of a user-defined key, it's an arbitrary
      blob of data.
@@ -91,39 +90,38 @@
      permitted, another key type operation will be called to convert the key's
      attached payload back into a blob of data.
 
- (*) Each key can be in one of a number of basic states:
+  *  Each key can be in one of a number of basic states:
 
-     (*) Uninstantiated. The key exists, but does not have any data attached.
+      *  Uninstantiated. The key exists, but does not have any data attached.
      	 Keys being requested from userspace will be in this state.
 
-     (*) Instantiated. This is the normal state. The key is fully formed, and
+      *  Instantiated. This is the normal state. The key is fully formed, and
 	 has data attached.
 
-     (*) Negative. This is a relatively short-lived state. The key acts as a
+      *  Negative. This is a relatively short-lived state. The key acts as a
 	 note saying that a previous call out to userspace failed, and acts as
 	 a throttle on key lookups. A negative key can be updated to a normal
 	 state.
 
-     (*) Expired. Keys can have lifetimes set. If their lifetime is exceeded,
+      *  Expired. Keys can have lifetimes set. If their lifetime is exceeded,
 	 they traverse to this state. An expired key can be updated back to a
 	 normal state.
 
-     (*) Revoked. A key is put in this state by userspace action. It can't be
+      *  Revoked. A key is put in this state by userspace action. It can't be
 	 found or operated upon (apart from by unlinking it).
 
-     (*) Dead. The key's type was unregistered, and so the key is now useless.
+      *  Dead. The key's type was unregistered, and so the key is now useless.
 
 Keys in the last three states are subject to garbage collection.  See the
 section on "Garbage collection".
 
 
-====================
-KEY SERVICE OVERVIEW
+Key Service Overview
 ====================
 
 The key service provides a number of features besides keys:
 
- (*) The key service defines three special key types:
+  *  The key service defines three special key types:
 
      (+) "keyring"
 
@@ -149,7 +147,7 @@
 	 be created and updated from userspace, but the payload is only
 	 readable from kernel space.
 
- (*) Each process subscribes to three keyrings: a thread-specific keyring, a
+  *  Each process subscribes to three keyrings: a thread-specific keyring, a
      process-specific keyring, and a session-specific keyring.
 
      The thread-specific keyring is discarded from the child when any sort of
@@ -170,7 +168,7 @@
      The ownership of the thread keyring changes when the real UID and GID of
      the thread changes.
 
- (*) Each user ID resident in the system holds two special keyrings: a user
+  *  Each user ID resident in the system holds two special keyrings: a user
      specific keyring and a default user session keyring. The default session
      keyring is initialised with a link to the user-specific keyring.
 
@@ -180,7 +178,7 @@
      If a process attempts to access its session key when it doesn't have one,
      it will be subscribed to the default for its current UID.
 
- (*) Each user has two quotas against which the keys they own are tracked. One
+  *  Each user has two quotas against which the keys they own are tracked. One
      limits the total number of keys and keyrings, the other limits the total
      amount of description and payload space that can be consumed.
 
@@ -194,54 +192,53 @@
      If a system call that modifies a key or keyring in some way would put the
      user over quota, the operation is refused and error EDQUOT is returned.
 
- (*) There's a system call interface by which userspace programs can create and
+  *  There's a system call interface by which userspace programs can create and
      manipulate keys and keyrings.
 
- (*) There's a kernel interface by which services can register types and search
+  *  There's a kernel interface by which services can register types and search
      for keys.
 
- (*) There's a way for the a search done from the kernel to call back to
+  *  There's a way for the a search done from the kernel to call back to
      userspace to request a key that can't be found in a process's keyrings.
 
- (*) An optional filesystem is available through which the key database can be
+  *  An optional filesystem is available through which the key database can be
      viewed and manipulated.
 
 
-======================
-KEY ACCESS PERMISSIONS
+Key Access Permissions
 ======================
 
 Keys have an owner user ID, a group access ID, and a permissions mask. The mask
 has up to eight bits each for possessor, user, group and other access. Only
 six of each set of eight bits are defined. These permissions granted are:
 
- (*) View
+  *  View
 
      This permits a key or keyring's attributes to be viewed - including key
      type and description.
 
- (*) Read
+  *  Read
 
      This permits a key's payload to be viewed or a keyring's list of linked
      keys.
 
- (*) Write
+  *  Write
 
      This permits a key's payload to be instantiated or updated, or it allows a
      link to be added to or removed from a keyring.
 
- (*) Search
+  *  Search
 
      This permits keyrings to be searched and keys to be found. Searches can
      only recurse into nested keyrings that have search permission set.
 
- (*) Link
+  *  Link
 
      This permits a key or keyring to be linked to. To create a link from a
      keyring to a key, a process must have Write permission on the keyring and
      Link permission on the key.
 
- (*) Set Attribute
+  *  Set Attribute
 
      This permits a key's UID, GID and permissions mask to be changed.
 
@@ -249,8 +246,7 @@
 the key or having the sysadmin capability is sufficient.
 
 
-===============
-SELINUX SUPPORT
+SELinux Support
 ===============
 
 The security class "key" has been added to SELinux so that mandatory access
@@ -282,14 +278,13 @@
 similarly.
 
 
-================
-NEW PROCFS FILES
+New ProcFS Files
 ================
 
 Two files have been added to procfs by which an administrator can find out
 about the status of the key service:
 
- (*) /proc/keys
+  *  /proc/keys
 
      This lists the keys that are currently viewable by the task reading the
      file, giving information about their type, description and permissions.
@@ -301,7 +296,7 @@
      security checks are still performed, and may further filter out keys that
      the current process is not authorised to view.
 
-     The contents of the file look like this:
+     The contents of the file look like this::
 
 	SERIAL   FLAGS  USAGE EXPY PERM     UID   GID   TYPE      DESCRIPTION: SUMMARY
 	00000001 I-----    39 perm 1f3f0000     0     0 keyring   _uid_ses.0: 1/4
@@ -314,7 +309,7 @@
 	00000893 I--Q-N     1  35s 1f3f0000     0     0 user      metal:silver: 0
 	00000894 I--Q--     1  10h 003f0000     0     0 user      metal:gold: 0
 
-     The flags are:
+     The flags are::
 
 	I	Instantiated
 	R	Revoked
@@ -324,10 +319,10 @@
 	N	Negative key
 
 
- (*) /proc/key-users
+  *  /proc/key-users
 
      This file lists the tracking data for each user that has at least one key
-     on the system.  Such data includes quota information and statistics:
+     on the system.  Such data includes quota information and statistics::
 
 	[root@andromeda root]# cat /proc/key-users
 	0:     46 45/45 1/100 13/10000
@@ -335,7 +330,8 @@
 	32:     2 2/2 2/100 40/10000
 	38:     2 2/2 2/100 40/10000
 
-     The format of each line is
+     The format of each line is::
+
 	<UID>:			User ID to which this applies
 	<usage>			Structure refcount
 	<inst>/<keys>		Total number of keys and number instantiated
@@ -346,14 +342,14 @@
 Four new sysctl files have been added also for the purpose of controlling the
 quota limits on keys:
 
- (*) /proc/sys/kernel/keys/root_maxkeys
+  *  /proc/sys/kernel/keys/root_maxkeys
      /proc/sys/kernel/keys/root_maxbytes
 
      These files hold the maximum number of keys that root may have and the
      maximum total number of bytes of data that root may have stored in those
      keys.
 
- (*) /proc/sys/kernel/keys/maxkeys
+  *  /proc/sys/kernel/keys/maxkeys
      /proc/sys/kernel/keys/maxbytes
 
      These files hold the maximum number of keys that each non-root user may
@@ -364,8 +360,7 @@
 the appropriate file.
 
 
-===============================
-USERSPACE SYSTEM CALL INTERFACE
+Userspace System Call Interface
 ===============================
 
 Userspace can manipulate keys directly through three new syscalls: add_key,
@@ -375,7 +370,7 @@
 When referring to a key directly, userspace programs should use the key's
 serial number (a positive 32-bit integer). However, there are some special
 values available for referring to special keys and keyrings that relate to the
-process making the call:
+process making the call::
 
 	CONSTANT			VALUE	KEY REFERENCED
 	==============================	======	===========================
@@ -391,8 +386,8 @@
 
 The main syscalls are:
 
- (*) Create a new key of given type, description and payload and add it to the
-     nominated keyring:
+  *  Create a new key of given type, description and payload and add it to the
+     nominated keyring::
 
 	key_serial_t add_key(const char *type, const char *desc,
 			     const void *payload, size_t plen,
@@ -432,8 +427,8 @@
      The ID of the new or updated key is returned if successful.
 
 
- (*) Search the process's keyrings for a key, potentially calling out to
-     userspace to create it.
+  *  Search the process's keyrings for a key, potentially calling out to
+     userspace to create it::
 
 	key_serial_t request_key(const char *type, const char *description,
 				 const char *callout_info,
@@ -453,7 +448,7 @@
 
 The keyctl syscall functions are:
 
- (*) Map a special key ID to a real key ID for this process:
+  *  Map a special key ID to a real key ID for this process::
 
 	key_serial_t keyctl(KEYCTL_GET_KEYRING_ID, key_serial_t id,
 			    int create);
@@ -466,7 +461,7 @@
      non-zero; and the error ENOKEY will be returned if "create" is zero.
 
 
- (*) Replace the session keyring this process subscribes to with a new one:
+  *  Replace the session keyring this process subscribes to with a new one::
 
 	key_serial_t keyctl(KEYCTL_JOIN_SESSION_KEYRING, const char *name);
 
@@ -484,7 +479,7 @@
      The ID of the new session keyring is returned if successful.
 
 
- (*) Update the specified key:
+  *  Update the specified key::
 
 	long keyctl(KEYCTL_UPDATE, key_serial_t key, const void *payload,
 		    size_t plen);
@@ -498,7 +493,7 @@
      add_key().
 
 
- (*) Revoke a key:
+  *  Revoke a key::
 
 	long keyctl(KEYCTL_REVOKE, key_serial_t key);
 
@@ -507,7 +502,7 @@
      be findable.
 
 
- (*) Change the ownership of a key:
+  *  Change the ownership of a key::
 
 	long keyctl(KEYCTL_CHOWN, key_serial_t key, uid_t uid, gid_t gid);
 
@@ -520,7 +515,7 @@
      its group list members.
 
 
- (*) Change the permissions mask on a key:
+  *  Change the permissions mask on a key::
 
 	long keyctl(KEYCTL_SETPERM, key_serial_t key, key_perm_t perm);
 
@@ -531,7 +526,7 @@
      error EINVAL will be returned.
 
 
- (*) Describe a key:
+  *  Describe a key::
 
 	long keyctl(KEYCTL_DESCRIBE, key_serial_t key, char *buffer,
 		    size_t buflen);
@@ -547,7 +542,7 @@
      A process must have view permission on the key for this function to be
      successful.
 
-     If successful, a string is placed in the buffer in the following format:
+     If successful, a string is placed in the buffer in the following format::
 
 	<type>;<uid>;<gid>;<perm>;<description>
 
@@ -555,12 +550,12 @@
      is hexadecimal. A NUL character is included at the end of the string if
      the buffer is sufficiently big.
 
-     This can be parsed with
+     This can be parsed with::
 
 	sscanf(buffer, "%[^;];%d;%d;%o;%s", type, &uid, &gid, &mode, desc);
 
 
- (*) Clear out a keyring:
+  *  Clear out a keyring::
 
 	long keyctl(KEYCTL_CLEAR, key_serial_t keyring);
 
@@ -573,7 +568,7 @@
      DNS resolver cache keyring is an example of this.
 
 
- (*) Link a key into a keyring:
+  *  Link a key into a keyring::
 
 	long keyctl(KEYCTL_LINK, key_serial_t keyring, key_serial_t key);
 
@@ -592,7 +587,7 @@
      added.
 
 
- (*) Unlink a key or keyring from another keyring:
+  *  Unlink a key or keyring from another keyring::
 
 	long keyctl(KEYCTL_UNLINK, key_serial_t keyring, key_serial_t key);
 
@@ -604,7 +599,7 @@
      is not present, error ENOENT will be the result.
 
 
- (*) Search a keyring tree for a key:
+  *  Search a keyring tree for a key::
 
 	key_serial_t keyctl(KEYCTL_SEARCH, key_serial_t keyring,
 			    const char *type, const char *description,
@@ -628,7 +623,7 @@
      fails. On success, the resulting key ID will be returned.
 
 
- (*) Read the payload data from a key:
+  *  Read the payload data from a key::
 
 	long keyctl(KEYCTL_READ, key_serial_t keyring, char *buffer,
 		    size_t buflen);
@@ -650,7 +645,7 @@
      available rather than the amount copied.
 
 
- (*) Instantiate a partially constructed key.
+  *  Instantiate a partially constructed key::
 
 	long keyctl(KEYCTL_INSTANTIATE, key_serial_t key,
 		    const void *payload, size_t plen,
@@ -677,7 +672,7 @@
      array instead of a single buffer.
 
 
- (*) Negatively instantiate a partially constructed key.
+  *  Negatively instantiate a partially constructed key::
 
 	long keyctl(KEYCTL_NEGATE, key_serial_t key,
 		    unsigned timeout, key_serial_t keyring);
@@ -700,12 +695,12 @@
      as rejecting the key with ENOKEY as the error code.
 
 
- (*) Set the default request-key destination keyring.
+  *  Set the default request-key destination keyring::
 
 	long keyctl(KEYCTL_SET_REQKEY_KEYRING, int reqkey_defl);
 
      This sets the default keyring to which implicitly requested keys will be
-     attached for this thread. reqkey_defl should be one of these constants:
+     attached for this thread. reqkey_defl should be one of these constants::
 
 	CONSTANT				VALUE	NEW DEFAULT KEYRING
 	======================================	======	=======================
@@ -731,7 +726,7 @@
      there is one, otherwise the user default session keyring.
 
 
- (*) Set the timeout on a key.
+  *  Set the timeout on a key::
 
 	long keyctl(KEYCTL_SET_TIMEOUT, key_serial_t key, unsigned timeout);
 
@@ -744,7 +739,7 @@
      or expired keys.
 
 
- (*) Assume the authority granted to instantiate a key
+  *  Assume the authority granted to instantiate a key::
 
 	long keyctl(KEYCTL_ASSUME_AUTHORITY, key_serial_t key);
 
@@ -766,7 +761,7 @@
      The assumed authoritative key is inherited across fork and exec.
 
 
- (*) Get the LSM security context attached to a key.
+  *  Get the LSM security context attached to a key::
 
 	long keyctl(KEYCTL_GET_SECURITY, key_serial_t key, char *buffer,
 		    size_t buflen)
@@ -787,7 +782,7 @@
      successful.
 
 
- (*) Install the calling process's session keyring on its parent.
+  *  Install the calling process's session keyring on its parent::
 
 	long keyctl(KEYCTL_SESSION_TO_PARENT);
 
@@ -807,7 +802,7 @@
      kernel and resumes executing userspace.
 
 
- (*) Invalidate a key.
+  *  Invalidate a key::
 
 	long keyctl(KEYCTL_INVALIDATE, key_serial_t key);
 
@@ -823,20 +818,19 @@
      A process must have search permission on the key for this function to be
      successful.
 
- (*) Compute a Diffie-Hellman shared secret or public key
+  *  Compute a Diffie-Hellman shared secret or public key::
 
-       long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
-		   char *buffer, size_t buflen,
-		   struct keyctl_kdf_params *kdf);
+	long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
+		    char *buffer, size_t buflen, struct keyctl_kdf_params *kdf);
 
-     The params struct contains serial numbers for three keys:
+     The params struct contains serial numbers for three keys::
 
 	 - The prime, p, known to both parties
 	 - The local private key
 	 - The base integer, which is either a shared generator or the
 	   remote public key
 
-     The value computed is:
+     The value computed is::
 
 	result = base ^ private (mod prime)
 
@@ -858,12 +852,12 @@
      of the KDF is returned to the caller. The KDF is characterized with
      struct keyctl_kdf_params as follows:
 
-	 - char *hashname specifies the NUL terminated string identifying
+	 - ``char *hashname`` specifies the NUL terminated string identifying
 	   the hash used from the kernel crypto API and applied for the KDF
 	   operation. The KDF implemenation complies with SP800-56A as well
 	   as with SP800-108 (the counter KDF).
 
-	 - char *otherinfo specifies the OtherInfo data as documented in
+	 - ``char *otherinfo`` specifies the OtherInfo data as documented in
 	   SP800-56A section 5.8.1.2. The length of the buffer is given with
 	   otherinfolen. The format of OtherInfo is defined by the caller.
 	   The otherinfo pointer may be NULL if no OtherInfo shall be used.
@@ -875,10 +869,10 @@
      and either the buffer length or the OtherInfo length exceeds the
      allowed length.
 
- (*) Restrict keyring linkage
+  *  Restrict keyring linkage::
 
-       long keyctl(KEYCTL_RESTRICT_KEYRING, key_serial_t keyring,
-		   const char *type, const char *restriction);
+	long keyctl(KEYCTL_RESTRICT_KEYRING, key_serial_t keyring,
+		    const char *type, const char *restriction);
 
      An existing keyring can restrict linkage of additional keys by evaluating
      the contents of the key according to a restriction scheme.
@@ -900,8 +894,7 @@
      To apply a keyring restriction the process must have Set Attribute
      permission and the keyring must not be previously restricted.
 
-===============
-KERNEL SERVICES
+Kernel Services
 ===============
 
 The kernel services for key management are fairly simple to deal with. They can
@@ -915,29 +908,29 @@
 two different users opening the same file is left to the filesystem author to
 solve.
 
-To access the key manager, the following header must be #included:
+To access the key manager, the following header must be #included::
 
 	<linux/key.h>
 
 Specific key types should have a header file under include/keys/ that should be
-used to access that type.  For keys of type "user", for example, that would be:
+used to access that type.  For keys of type "user", for example, that would be::
 
 	<keys/user-type.h>
 
 Note that there are two different types of pointers to keys that may be
 encountered:
 
- (*) struct key *
+  *  struct key *
 
      This simply points to the key structure itself. Key structures will be at
      least four-byte aligned.
 
- (*) key_ref_t
+  *  key_ref_t
 
-     This is equivalent to a struct key *, but the least significant bit is set
+     This is equivalent to a ``struct key *``, but the least significant bit is set
      if the caller "possesses" the key. By "possession" it is meant that the
      calling processes has a searchable link to the key from one of its
-     keyrings. There are three functions for dealing with these:
+     keyrings. There are three functions for dealing with these::
 
 	key_ref_t make_key_ref(const struct key *key, bool possession);
 
@@ -955,7 +948,7 @@
 prevent access vs modification races. See the section "Notes on accessing
 payload contents" for more information.
 
-(*) To search for a key, call:
+ *  To search for a key, call::
 
 	struct key *request_key(const struct key_type *type,
 				const char *description,
@@ -977,7 +970,7 @@
     See also Documentation/security/keys-request-key.txt.
 
 
-(*) To search for a key, passing auxiliary data to the upcaller, call:
+ *  To search for a key, passing auxiliary data to the upcaller, call::
 
 	struct key *request_key_with_auxdata(const struct key_type *type,
 					     const char *description,
@@ -990,14 +983,14 @@
     is a blob of length callout_len, if given (the length may be 0).
 
 
-(*) A key can be requested asynchronously by calling one of:
+ *  A key can be requested asynchronously by calling one of::
 
 	struct key *request_key_async(const struct key_type *type,
 				      const char *description,
 				      const void *callout_info,
 				      size_t callout_len);
 
-    or:
+    or::
 
 	struct key *request_key_async_with_auxdata(const struct key_type *type,
 						   const char *description,
@@ -1010,7 +1003,7 @@
 
     These two functions return with the key potentially still under
     construction.  To wait for construction completion, the following should be
-    called:
+    called::
 
 	int wait_for_key_construction(struct key *key, bool intr);
 
@@ -1022,11 +1015,11 @@
     case error ERESTARTSYS will be returned.
 
 
-(*) When it is no longer required, the key should be released using:
+ *  When it is no longer required, the key should be released using::
 
 	void key_put(struct key *key);
 
-    Or:
+    Or::
 
 	void key_ref_put(key_ref_t key_ref);
 
@@ -1034,8 +1027,8 @@
     the argument will not be parsed.
 
 
-(*) Extra references can be made to a key by calling one of the following
-    functions:
+ *  Extra references can be made to a key by calling one of the following
+    functions::
 
 	struct key *__key_get(struct key *key);
 	struct key *key_get(struct key *key);
@@ -1047,7 +1040,7 @@
     then the key will not be dereferenced and no increment will take place.
 
 
-(*) A key's serial number can be obtained by calling:
+ *  A key's serial number can be obtained by calling::
 
 	key_serial_t key_serial(struct key *key);
 
@@ -1055,7 +1048,7 @@
     latter case without parsing the argument).
 
 
-(*) If a keyring was found in the search, this can be further searched by:
+ *  If a keyring was found in the search, this can be further searched by::
 
 	key_ref_t keyring_search(key_ref_t keyring_ref,
 				 const struct key_type *type,
@@ -1070,7 +1063,7 @@
     reference pointer if successful.
 
 
-(*) A keyring can be created by:
+ *  A keyring can be created by::
 
 	struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
 				  const struct cred *cred,
@@ -1109,7 +1102,7 @@
     -EPERM to in this case.
 
 
-(*) To check the validity of a key, this function can be called:
+ *  To check the validity of a key, this function can be called::
 
 	int validate_key(struct key *key);
 
@@ -1119,7 +1112,7 @@
     returned (in the latter case without parsing the argument).
 
 
-(*) To register a key type, the following function should be called:
+ *  To register a key type, the following function should be called::
 
 	int register_key_type(struct key_type *type);
 
@@ -1127,13 +1120,13 @@
     present.
 
 
-(*) To unregister a key type, call:
+ *  To unregister a key type, call::
 
 	void unregister_key_type(struct key_type *type);
 
 
 Under some circumstances, it may be desirable to deal with a bundle of keys.
-The facility provides access to the keyring type for managing such a bundle:
+The facility provides access to the keyring type for managing such a bundle::
 
 	struct key_type key_type_keyring;
 
@@ -1143,8 +1136,7 @@
 search a specific keyring, so using keyrings in this way is of limited utility.
 
 
-===================================
-NOTES ON ACCESSING PAYLOAD CONTENTS
+Notes On Accessing Payload Contents
 ===================================
 
 The simplest payload is just data stored in key->payload directly.  In this
@@ -1154,31 +1146,31 @@
 key->payload.data[] array.  One of the following ways must be selected to
 access the data:
 
- (1) Unmodifiable key type.
+  1) Unmodifiable key type.
 
      If the key type does not have a modify method, then the key's payload can
      be accessed without any form of locking, provided that it's known to be
      instantiated (uninstantiated keys cannot be "found").
 
- (2) The key's semaphore.
+  2) The key's semaphore.
 
      The semaphore could be used to govern access to the payload and to control
      the payload pointer. It must be write-locked for modifications and would
      have to be read-locked for general access. The disadvantage of doing this
      is that the accessor may be required to sleep.
 
- (3) RCU.
+  3) RCU.
 
      RCU must be used when the semaphore isn't already held; if the semaphore
      is held then the contents can't change under you unexpectedly as the
      semaphore must still be used to serialise modifications to the key. The
      key management code takes care of this for the key type.
 
-     However, this means using:
+     However, this means using::
 
 	rcu_read_lock() ... rcu_dereference() ... rcu_read_unlock()
 
-     to read the pointer, and:
+     to read the pointer, and::
 
 	rcu_dereference() ... rcu_assign_pointer() ... call_rcu()
 
@@ -1194,11 +1186,11 @@
      usage.  This is called key->payload.rcu_data0.  The following accessors
      wrap the RCU calls to this element:
 
-     (a) Set or change the first payload pointer:
+     a) Set or change the first payload pointer::
 
 		rcu_assign_keypointer(struct key *key, void *data);
 
-     (b) Read the first payload pointer with the key semaphore held:
+     b) Read the first payload pointer with the key semaphore held::
 
 		[const] void *dereference_key_locked([const] struct key *key);
 
@@ -1206,39 +1198,38 @@
 	 parameter.  Static analysis will give an error if it things the lock
 	 isn't held.
 
-     (c) Read the first payload pointer with the RCU read lock held:
+     c) Read the first payload pointer with the RCU read lock held::
 
 		const void *dereference_key_rcu(const struct key *key);
 
 
-===================
-DEFINING A KEY TYPE
+Defining a Key Type
 ===================
 
 A kernel service may want to define its own key type. For instance, an AFS
 filesystem might want to define a Kerberos 5 ticket key type. To do this, it
 author fills in a key_type struct and registers it with the system.
 
-Source files that implement key types should include the following header file:
+Source files that implement key types should include the following header file::
 
 	<linux/key-type.h>
 
 The structure has a number of fields, some of which are mandatory:
 
- (*) const char *name
+  *  ``const char *name``
 
      The name of the key type. This is used to translate a key type name
      supplied by userspace into a pointer to the structure.
 
 
- (*) size_t def_datalen
+  *  ``size_t def_datalen``
 
      This is optional - it supplies the default payload data length as
      contributed to the quota. If the key type's payload is always or almost
      always the same size, then this is a more efficient way to do things.
 
      The data length (and quota) on a particular key can always be changed
-     during instantiation or update by calling:
+     during instantiation or update by calling::
 
 	int key_payload_reserve(struct key *key, size_t datalen);
 
@@ -1246,18 +1237,18 @@
      viable.
 
 
- (*) int (*vet_description)(const char *description);
+  *  ``int (*vet_description)(const char *description);``
 
      This optional method is called to vet a key description.  If the key type
      doesn't approve of the key description, it may return an error, otherwise
      it should return 0.
 
 
- (*) int (*preparse)(struct key_preparsed_payload *prep);
+  *  ``int (*preparse)(struct key_preparsed_payload *prep);``
 
      This optional method permits the key type to attempt to parse payload
      before a key is created (add key) or the key semaphore is taken (update or
-     instantiate key).  The structure pointed to by prep looks like:
+     instantiate key).  The structure pointed to by prep looks like::
 
 	struct key_preparsed_payload {
 		char		*description;
@@ -1285,7 +1276,7 @@
      otherwise.
 
 
- (*) void (*free_preparse)(struct key_preparsed_payload *prep);
+  *  ``void (*free_preparse)(struct key_preparsed_payload *prep);``
 
      This method is only required if the preparse() method is provided,
      otherwise it is unused.  It cleans up anything attached to the description
@@ -1294,7 +1285,7 @@
      successfully, even if instantiate() or update() succeed.
 
 
- (*) int (*instantiate)(struct key *key, struct key_preparsed_payload *prep);
+  *  ``int (*instantiate)(struct key *key, struct key_preparsed_payload *prep);``
 
      This method is called to attach a payload to a key during construction.
      The payload attached need not bear any relation to the data passed to this
@@ -1318,7 +1309,7 @@
      free_preparse method doesn't release the data.
 
 
- (*) int (*update)(struct key *key, const void *data, size_t datalen);
+  *  ``int (*update)(struct key *key, const void *data, size_t datalen);``
 
      If this type of key can be updated, then this method should be provided.
      It is called to update a key's payload from the blob of data provided.
@@ -1343,10 +1334,10 @@
      It is safe to sleep in this method.
 
 
- (*) int (*match_preparse)(struct key_match_data *match_data);
+  *  ``int (*match_preparse)(struct key_match_data *match_data);``
 
      This method is optional.  It is called when a key search is about to be
-     performed.  It is given the following structure:
+     performed.  It is given the following structure::
 
 	struct key_match_data {
 		bool (*cmp)(const struct key *key,
@@ -1357,23 +1348,23 @@
 	};
 
      On entry, raw_data will be pointing to the criteria to be used in matching
-     a key by the caller and should not be modified.  (*cmp)() will be pointing
+     a key by the caller and should not be modified.  ``(*cmp)()`` will be pointing
      to the default matcher function (which does an exact description match
      against raw_data) and lookup_type will be set to indicate a direct lookup.
 
      The following lookup_type values are available:
 
-      [*] KEYRING_SEARCH_LOOKUP_DIRECT - A direct lookup hashes the type and
+       *  KEYRING_SEARCH_LOOKUP_DIRECT - A direct lookup hashes the type and
       	  description to narrow down the search to a small number of keys.
 
-      [*] KEYRING_SEARCH_LOOKUP_ITERATE - An iterative lookup walks all the
+       *  KEYRING_SEARCH_LOOKUP_ITERATE - An iterative lookup walks all the
       	  keys in the keyring until one is matched.  This must be used for any
       	  search that's not doing a simple direct match on the key description.
 
      The method may set cmp to point to a function of its choice that does some
      other form of match, may set lookup_type to KEYRING_SEARCH_LOOKUP_ITERATE
-     and may attach something to the preparsed pointer for use by (*cmp)().
-     (*cmp)() should return true if a key matches and false otherwise.
+     and may attach something to the preparsed pointer for use by ``(*cmp)()``.
+     ``(*cmp)()`` should return true if a key matches and false otherwise.
 
      If preparsed is set, it may be necessary to use the match_free() method to
      clean it up.
@@ -1381,20 +1372,20 @@
      The method should return 0 if successful or a negative error code
      otherwise.
 
-     It is permitted to sleep in this method, but (*cmp)() may not sleep as
+     It is permitted to sleep in this method, but ``(*cmp)()`` may not sleep as
      locks will be held over it.
 
      If match_preparse() is not provided, keys of this type will be matched
      exactly by their description.
 
 
- (*) void (*match_free)(struct key_match_data *match_data);
+  *  ``void (*match_free)(struct key_match_data *match_data);``
 
      This method is optional.  If given, it called to clean up
      match_data->preparsed after a successful call to match_preparse().
 
 
- (*) void (*revoke)(struct key *key);
+  *  ``void (*revoke)(struct key *key);``
 
      This method is optional.  It is called to discard part of the payload
      data upon a key being revoked.  The caller will have the key semaphore
@@ -1404,7 +1395,7 @@
      a deadlock against the key semaphore.
 
 
- (*) void (*destroy)(struct key *key);
+  *  ``void (*destroy)(struct key *key);``
 
      This method is optional. It is called to discard the payload data on a key
      when it is being destroyed.
@@ -1416,7 +1407,7 @@
      It is not safe to sleep in this method; the caller may hold spinlocks.
 
 
- (*) void (*describe)(const struct key *key, struct seq_file *p);
+  *  ``void (*describe)(const struct key *key, struct seq_file *p);``
 
      This method is optional. It is called during /proc/keys reading to
      summarise a key's description and payload in text form.
@@ -1432,7 +1423,7 @@
      caller.
 
 
- (*) long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+  *  ``long (*read)(const struct key *key, char __user *buffer, size_t buflen);``
 
      This method is optional. It is called by KEYCTL_READ to translate the
      key's payload into something a blob of data for userspace to deal with.
@@ -1448,8 +1439,7 @@
      as might happen when the userspace buffer is accessed.
 
 
- (*) int (*request_key)(struct key_construction *cons, const char *op,
-			void *aux);
+  *  ``int (*request_key)(struct key_construction *cons, const char *op, void *aux);``
 
      This method is optional.  If provided, request_key() and friends will
      invoke this function rather than upcalling to /sbin/request-key to operate
@@ -1463,7 +1453,7 @@
      This method is permitted to return before the upcall is complete, but the
      following function must be called under all circumstances to complete the
      instantiation process, whether or not it succeeds, whether or not there's
-     an error:
+     an error::
 
 	void complete_request_key(struct key_construction *cons, int error);
 
@@ -1479,16 +1469,16 @@
      The key under construction and the authorisation key can be found in the
      key_construction struct pointed to by cons:
 
-     (*) struct key *key;
+      *  ``struct key *key;``
 
      	 The key under construction.
 
-     (*) struct key *authkey;
+      *  ``struct key *authkey;``
 
      	 The authorisation key.
 
 
- (*) struct key_restriction *(*lookup_restriction)(const char *params);
+  *  ``struct key_restriction *(*lookup_restriction)(const char *params);``
 
      This optional method is used to enable userspace configuration of keyring
      restrictions. The restriction parameter string (not including the key type
@@ -1497,12 +1487,11 @@
      attempted key link operation. If there is no match, -EINVAL is returned.
 
 
-============================
-REQUEST-KEY CALLBACK SERVICE
+Request-Key Callback Service
 ============================
 
 To create a new key, the kernel will attempt to execute the following command
-line:
+line::
 
 	/sbin/request-key create <key> <uid> <gid> \
 		<threadring> <processring> <sessionring> <callout_info>
@@ -1511,10 +1500,10 @@
 keyrings from the process that caused the search to be issued. These are
 included for two reasons:
 
-  (1) There may be an authentication token in one of the keyrings that is
+   1  There may be an authentication token in one of the keyrings that is
       required to obtain the key, eg: a Kerberos Ticket-Granting Ticket.
 
-  (2) The new key should probably be cached in one of these rings.
+   2  The new key should probably be cached in one of these rings.
 
 This program should set it UID and GID to those specified before attempting to
 access any more keys. It may then look around for a user specific process to
@@ -1539,7 +1528,7 @@
 
 
 Similarly, the kernel may attempt to update an expired or a soon to expire key
-by executing:
+by executing::
 
 	/sbin/request-key update <key> <uid> <gid> \
 		<threadring> <processring> <sessionring>
@@ -1548,8 +1537,7 @@
 the rings are provided for reference.
 
 
-==================
-GARBAGE COLLECTION
+Garbage Collection
 ==================
 
 Dead keys (for which the type has been removed) will be automatically unlinked
@@ -1557,6 +1545,6 @@
 background garbage collector.
 
 Similarly, revoked and expired keys will be garbage collected, but only after a
-certain amount of time has passed.  This time is set as a number of seconds in:
+certain amount of time has passed.  This time is set as a number of seconds in::
 
 	/proc/sys/kernel/keys/gc_delay
diff --git a/Documentation/security/keys-ecryptfs.txt b/Documentation/security/keys/ecryptfs.rst
similarity index 91%
rename from Documentation/security/keys-ecryptfs.txt
rename to Documentation/security/keys/ecryptfs.rst
index c3bbeba..4920f3a 100644
--- a/Documentation/security/keys-ecryptfs.txt
+++ b/Documentation/security/keys/ecryptfs.rst
@@ -1,4 +1,6 @@
-		Encrypted keys for the eCryptfs filesystem
+==========================================
+Encrypted keys for the eCryptfs filesystem
+==========================================
 
 ECryptfs is a stacked filesystem which transparently encrypts and decrypts each
 file using a randomly generated File Encryption Key (FEK).
@@ -35,20 +37,23 @@
 threats of malicious software, because it is available in clear form only at
 kernel level.
 
-Usage:
+Usage::
+
    keyctl add encrypted name "new ecryptfs key-type:master-key-name keylen" ring
    keyctl add encrypted name "load hex_blob" ring
    keyctl update keyid "update key-type:master-key-name"
 
-name:= '<16 hexadecimal characters>'
-key-type:= 'trusted' | 'user'
-keylen:= 64
+Where::
+
+	name:= '<16 hexadecimal characters>'
+	key-type:= 'trusted' | 'user'
+	keylen:= 64
 
 
 Example of encrypted key usage with the eCryptfs filesystem:
 
 Create an encrypted key "1000100010001000" of length 64 bytes with format
-'ecryptfs' and save it using a previously loaded user key "test":
+'ecryptfs' and save it using a previously loaded user key "test"::
 
     $ keyctl add encrypted 1000100010001000 "new ecryptfs user:test 64" @u
     19184530
@@ -62,7 +67,7 @@
     $ keyctl pipe 19184530 > ecryptfs.blob
 
 Mount an eCryptfs filesystem using the created encrypted key "1000100010001000"
-into the '/secret' directory:
+into the '/secret' directory::
 
     $ mount -i -t ecryptfs -oecryptfs_sig=1000100010001000,\
       ecryptfs_cipher=aes,ecryptfs_key_bytes=32 /secret /secret
diff --git a/Documentation/security/keys/index.rst b/Documentation/security/keys/index.rst
new file mode 100644
index 0000000..647d58f
--- /dev/null
+++ b/Documentation/security/keys/index.rst
@@ -0,0 +1,11 @@
+===========
+Kernel Keys
+===========
+
+.. toctree::
+   :maxdepth: 1
+
+   core
+   ecryptfs
+   request-key
+   trusted-encrypted
diff --git a/Documentation/security/keys-request-key.txt b/Documentation/security/keys/request-key.rst
similarity index 76%
rename from Documentation/security/keys-request-key.txt
rename to Documentation/security/keys/request-key.rst
index 51987bf..aba3278 100644
--- a/Documentation/security/keys-request-key.txt
+++ b/Documentation/security/keys/request-key.rst
@@ -1,19 +1,19 @@
-			      ===================
-			      KEY REQUEST SERVICE
-			      ===================
+===================
+Key Request Service
+===================
 
 The key request service is part of the key retention service (refer to
 Documentation/security/keys.txt).  This document explains more fully how
 the requesting algorithm works.
 
 The process starts by either the kernel requesting a service by calling
-request_key*():
+``request_key*()``::
 
 	struct key *request_key(const struct key_type *type,
 				const char *description,
 				const char *callout_info);
 
-or:
+or::
 
 	struct key *request_key_with_auxdata(const struct key_type *type,
 					     const char *description,
@@ -21,14 +21,14 @@
 					     size_t callout_len,
 					     void *aux);
 
-or:
+or::
 
 	struct key *request_key_async(const struct key_type *type,
 				      const char *description,
 				      const char *callout_info,
 				      size_t callout_len);
 
-or:
+or::
 
 	struct key *request_key_async_with_auxdata(const struct key_type *type,
 						   const char *description,
@@ -36,7 +36,7 @@
 					     	   size_t callout_len,
 						   void *aux);
 
-Or by userspace invoking the request_key system call:
+Or by userspace invoking the request_key system call::
 
 	key_serial_t request_key(const char *type,
 				 const char *description,
@@ -67,38 +67,37 @@
 forking and execution of /sbin/request-key.
 
 
-===========
-THE PROCESS
+The Process
 ===========
 
 A request proceeds in the following manner:
 
- (1) Process A calls request_key() [the userspace syscall calls the kernel
+  1) Process A calls request_key() [the userspace syscall calls the kernel
      interface].
 
- (2) request_key() searches the process's subscribed keyrings to see if there's
+  2) request_key() searches the process's subscribed keyrings to see if there's
      a suitable key there.  If there is, it returns the key.  If there isn't,
      and callout_info is not set, an error is returned.  Otherwise the process
      proceeds to the next step.
 
- (3) request_key() sees that A doesn't have the desired key yet, so it creates
+  3) request_key() sees that A doesn't have the desired key yet, so it creates
      two things:
 
-     (a) An uninstantiated key U of requested type and description.
+      a) An uninstantiated key U of requested type and description.
 
-     (b) An authorisation key V that refers to key U and notes that process A
+      b) An authorisation key V that refers to key U and notes that process A
      	 is the context in which key U should be instantiated and secured, and
      	 from which associated key requests may be satisfied.
 
- (4) request_key() then forks and executes /sbin/request-key with a new session
+  4) request_key() then forks and executes /sbin/request-key with a new session
      keyring that contains a link to auth key V.
 
- (5) /sbin/request-key assumes the authority associated with key U.
+  5) /sbin/request-key assumes the authority associated with key U.
 
- (6) /sbin/request-key execs an appropriate program to perform the actual
+  6) /sbin/request-key execs an appropriate program to perform the actual
      instantiation.
 
- (7) The program may want to access another key from A's context (say a
+  7) The program may want to access another key from A's context (say a
      Kerberos TGT key).  It just requests the appropriate key, and the keyring
      search notes that the session keyring has auth key V in its bottom level.
 
@@ -106,15 +105,15 @@
      UID, GID, groups and security info of process A as if it was process A,
      and come up with key W.
 
- (8) The program then does what it must to get the data with which to
+  8) The program then does what it must to get the data with which to
      instantiate key U, using key W as a reference (perhaps it contacts a
      Kerberos server using the TGT) and then instantiates key U.
 
- (9) Upon instantiating key U, auth key V is automatically revoked so that it
+  9) Upon instantiating key U, auth key V is automatically revoked so that it
      may not be used again.
 
-(10) The program then exits 0 and request_key() deletes key V and returns key
-     U to the caller.
+  10) The program then exits 0 and request_key() deletes key V and returns key
+      U to the caller.
 
 This also extends further.  If key W (step 7 above) didn't exist, key W would
 be created uninstantiated, another auth key (X) would be created (as per step
@@ -127,8 +126,7 @@
 of them, and (b) it requires the same UID/GID/Groups all the way through.
 
 
-====================================
-NEGATIVE INSTANTIATION AND REJECTION
+Negative Instantiation And Rejection
 ====================================
 
 Rather than instantiating a key, it is possible for the possessor of an
@@ -145,23 +143,22 @@
 instantiated for a short amount of time.
 
 
-====================
-THE SEARCH ALGORITHM
+The Search Algorithm
 ====================
 
 A search of any particular keyring proceeds in the following fashion:
 
- (1) When the key management code searches for a key (keyring_search_aux) it
+  1) When the key management code searches for a key (keyring_search_aux) it
      firstly calls key_permission(SEARCH) on the keyring it's starting with,
      if this denies permission, it doesn't search further.
 
- (2) It considers all the non-keyring keys within that keyring and, if any key
+  2) It considers all the non-keyring keys within that keyring and, if any key
      matches the criteria specified, calls key_permission(SEARCH) on it to see
      if the key is allowed to be found.  If it is, that key is returned; if
      not, the search continues, and the error code is retained if of higher
      priority than the one currently set.
 
- (3) It then considers all the keyring-type keys in the keyring it's currently
+  3) It then considers all the keyring-type keys in the keyring it's currently
      searching.  It calls key_permission(SEARCH) on each keyring, and if this
      grants permission, it recurses, executing steps (2) and (3) on that
      keyring.
@@ -173,20 +170,20 @@
 When search_process_keyrings() is invoked, it performs the following searches
 until one succeeds:
 
- (1) If extant, the process's thread keyring is searched.
+  1) If extant, the process's thread keyring is searched.
 
- (2) If extant, the process's process keyring is searched.
+  2) If extant, the process's process keyring is searched.
 
- (3) The process's session keyring is searched.
+  3) The process's session keyring is searched.
 
- (4) If the process has assumed the authority associated with a request_key()
+  4) If the process has assumed the authority associated with a request_key()
      authorisation key then:
 
-     (a) If extant, the calling process's thread keyring is searched.
+      a) If extant, the calling process's thread keyring is searched.
 
-     (b) If extant, the calling process's process keyring is searched.
+      b) If extant, the calling process's process keyring is searched.
 
-     (c) The calling process's session keyring is searched.
+      c) The calling process's session keyring is searched.
 
 The moment one succeeds, all pending errors are discarded and the found key is
 returned.
@@ -194,7 +191,7 @@
 Only if all these fail does the whole thing fail with the highest priority
 error.  Note that several errors may have come from LSM.
 
-The error priority is:
+The error priority is::
 
 	EKEYREVOKED > EKEYEXPIRED > ENOKEY
 
diff --git a/Documentation/security/keys-trusted-encrypted.txt b/Documentation/security/keys/trusted-encrypted.rst
similarity index 93%
rename from Documentation/security/keys-trusted-encrypted.txt
rename to Documentation/security/keys/trusted-encrypted.rst
index b20a993..7b50383 100644
--- a/Documentation/security/keys-trusted-encrypted.txt
+++ b/Documentation/security/keys/trusted-encrypted.rst
@@ -1,4 +1,6 @@
-			Trusted and Encrypted Keys
+==========================
+Trusted and Encrypted Keys
+==========================
 
 Trusted and Encrypted Keys are two new key types added to the existing kernel
 key ring service.  Both of these new types are variable length symmetric keys,
@@ -20,7 +22,8 @@
 authorization value (20 zeros).  This can be set at takeownership time with the
 trouser's utility: "tpm_takeownership -u -z".
 
-Usage:
+Usage::
+
     keyctl add trusted name "new keylen [options]" ring
     keyctl add trusted name "load hex_blob [pcrlock=pcrnum]" ring
     keyctl update key "update [options]"
@@ -64,19 +67,22 @@
 key or a more complex structure. The format of the more complex structure is
 application specific, which is identified by 'format'.
 
-Usage:
+Usage::
+
     keyctl add encrypted name "new [format] key-type:master-key-name keylen"
         ring
     keyctl add encrypted name "load hex_blob" ring
     keyctl update keyid "update key-type:master-key-name"
 
-format:= 'default | ecryptfs'
-key-type:= 'trusted' | 'user'
+Where::
+
+	format:= 'default | ecryptfs'
+	key-type:= 'trusted' | 'user'
 
 
 Examples of trusted and encrypted key usage:
 
-Create and save a trusted key named "kmk" of length 32 bytes:
+Create and save a trusted key named "kmk" of length 32 bytes::
 
     $ keyctl add trusted kmk "new 32" @u
     440502848
@@ -99,7 +105,7 @@
 
     $ keyctl pipe 440502848 > kmk.blob
 
-Load a trusted key from the saved blob:
+Load a trusted key from the saved blob::
 
     $ keyctl add trusted kmk "load `cat kmk.blob`" @u
     268728824
@@ -114,7 +120,7 @@
     f1f8fff03ad0acb083725535636addb08d73dedb9832da198081e5deae84bfaf0409c22b
     e4a8aea2b607ec96931e6f4d4fe563ba
 
-Reseal a trusted key under new pcr values:
+Reseal a trusted key under new pcr values::
 
     $ keyctl update 268728824 "update pcrinfo=`cat pcr.blob`"
     $ keyctl print 268728824
@@ -135,11 +141,13 @@
 values, protects against boot and offline attacks.  Create and save an
 encrypted key "evm" using the above trusted key "kmk":
 
-option 1: omitting 'format'
+option 1: omitting 'format'::
+
     $ keyctl add encrypted evm "new trusted:kmk 32" @u
     159771175
 
-option 2: explicitly defining 'format' as 'default'
+option 2: explicitly defining 'format' as 'default'::
+
     $ keyctl add encrypted evm "new default trusted:kmk 32" @u
     159771175
 
@@ -150,7 +158,7 @@
 
     $ keyctl pipe 159771175 > evm.blob
 
-Load an encrypted key "evm" from saved blob:
+Load an encrypted key "evm" from saved blob::
 
     $ keyctl add encrypted evm "load `cat evm.blob`" @u
     831684262
@@ -164,4 +172,4 @@
 are anticipated.  In particular the new format 'ecryptfs' has been defined in
 in order to use encrypted keys to mount an eCryptfs filesystem.  More details
 about the usage can be found in the file
-'Documentation/security/keys-ecryptfs.txt'.
+``Documentation/security/keys-ecryptfs.txt``.
diff --git a/Documentation/security/self-protection.txt b/Documentation/security/self-protection.rst
similarity index 82%
rename from Documentation/security/self-protection.txt
rename to Documentation/security/self-protection.rst
index 141acfe..60c8bd8 100644
--- a/Documentation/security/self-protection.txt
+++ b/Documentation/security/self-protection.rst
@@ -1,4 +1,6 @@
-# Kernel Self-Protection
+======================
+Kernel Self-Protection
+======================
 
 Kernel self-protection is the design and implementation of systems and
 structures within the Linux kernel to protect against security flaws in
@@ -26,7 +28,8 @@
 and/or accepted.
 
 
-## Attack Surface Reduction
+Attack Surface Reduction
+========================
 
 The most fundamental defense against security exploits is to reduce the
 areas of the kernel that can be used to redirect execution. This ranges
@@ -34,13 +37,15 @@
 APIs hard to use incorrectly, minimizing the areas of writable kernel
 memory, etc.
 
-### Strict kernel memory permissions
+Strict kernel memory permissions
+--------------------------------
 
 When all of kernel memory is writable, it becomes trivial for attacks
 to redirect execution flow. To reduce the availability of these targets
 the kernel needs to protect its memory with a tight set of permissions.
 
-#### Executable code and read-only data must not be writable
+Executable code and read-only data must not be writable
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Any areas of the kernel with executable memory must not be writable.
 While this obviously includes the kernel text itself, we must consider
@@ -51,18 +56,19 @@
 made writable during the update, and then returned to the original
 permissions.)
 
-In support of this are CONFIG_STRICT_KERNEL_RWX and
-CONFIG_STRICT_MODULE_RWX, which seek to make sure that code is not
+In support of this are ``CONFIG_STRICT_KERNEL_RWX`` and
+``CONFIG_STRICT_MODULE_RWX``, which seek to make sure that code is not
 writable, data is not executable, and read-only data is neither writable
 nor executable.
 
 Most architectures have these options on by default and not user selectable.
 For some architectures like arm that wish to have these be selectable,
 the architecture Kconfig can select ARCH_OPTIONAL_KERNEL_RWX to enable
-a Kconfig prompt. CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT determines
+a Kconfig prompt. ``CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT`` determines
 the default setting when ARCH_OPTIONAL_KERNEL_RWX is enabled.
 
-#### Function pointers and sensitive variables must not be writable
+Function pointers and sensitive variables must not be writable
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Vast areas of kernel memory contain function pointers that are looked
 up by the kernel and used to continue execution (e.g. descriptor/vector
@@ -74,8 +80,8 @@
 of the kernel, gaining the protection of the kernel's strict memory
 permissions as described above.
 
-For variables that are initialized once at __init time, these can
-be marked with the (new and under development) __ro_after_init
+For variables that are initialized once at ``__init`` time, these can
+be marked with the (new and under development) ``__ro_after_init``
 attribute.
 
 What remains are variables that are updated rarely (e.g. GDT). These
@@ -85,7 +91,8 @@
 CPU thread performing the update would be given uninterruptible write
 access to the memory.)
 
-#### Segregation of kernel memory from userspace memory
+Segregation of kernel memory from userspace memory
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The kernel must never execute userspace memory. The kernel must also never
 access userspace memory without explicit expectation to do so. These
@@ -95,10 +102,11 @@
 cannot be passed to trivially-controlled userspace memory, forcing
 attacks to operate entirely in kernel memory.
 
-### Reduced access to syscalls
+Reduced access to syscalls
+--------------------------
 
 One trivial way to eliminate many syscalls for 64-bit systems is building
-without CONFIG_COMPAT. However, this is rarely a feasible scenario.
+without ``CONFIG_COMPAT``. However, this is rarely a feasible scenario.
 
 The "seccomp" system provides an opt-in feature made available to
 userspace, which provides a way to reduce the number of kernel entry
@@ -112,7 +120,8 @@
 restricted to the more regular set of normally available to unprivileged
 userspace.
 
-### Restricting access to kernel modules
+Restricting access to kernel modules
+------------------------------------
 
 The kernel should never allow an unprivileged user the ability to
 load specific kernel modules, since that would provide a facility to
@@ -127,11 +136,12 @@
 To protect against even privileged users, systems may need to either
 disable module loading entirely (e.g. monolithic kernel builds or
 modules_disabled sysctl), or provide signed modules (e.g.
-CONFIG_MODULE_SIG_FORCE, or dm-crypt with LoadPin), to keep from having
+``CONFIG_MODULE_SIG_FORCE``, or dm-crypt with LoadPin), to keep from having
 root load arbitrary kernel code via the module loader interface.
 
 
-## Memory integrity
+Memory integrity
+================
 
 There are many memory structures in the kernel that are regularly abused
 to gain execution control during an attack, By far the most commonly
@@ -139,16 +149,18 @@
 address stored on the stack is overwritten. Many other examples of this
 kind of attack exist, and protections exist to defend against them.
 
-### Stack buffer overflow
+Stack buffer overflow
+---------------------
 
 The classic stack buffer overflow involves writing past the expected end
 of a variable stored on the stack, ultimately writing a controlled value
 to the stack frame's stored return address. The most widely used defense
 is the presence of a stack canary between the stack variables and the
-return address (CONFIG_CC_STACKPROTECTOR), which is verified just before
+return address (``CONFIG_CC_STACKPROTECTOR``), which is verified just before
 the function returns. Other defenses include things like shadow stacks.
 
-### Stack depth overflow
+Stack depth overflow
+--------------------
 
 A less well understood attack is using a bug that triggers the
 kernel to consume stack memory with deep function calls or large stack
@@ -158,27 +170,31 @@
 sensitive thread_info structure elsewhere, and adding a faulting memory
 hole at the bottom of the stack to catch these overflows.
 
-### Heap memory integrity
+Heap memory integrity
+---------------------
 
 The structures used to track heap free lists can be sanity-checked during
 allocation and freeing to make sure they aren't being used to manipulate
 other memory areas.
 
-### Counter integrity
+Counter integrity
+-----------------
 
 Many places in the kernel use atomic counters to track object references
 or perform similar lifetime management. When these counters can be made
 to wrap (over or under) this traditionally exposes a use-after-free
 flaw. By trapping atomic wrapping, this class of bug vanishes.
 
-### Size calculation overflow detection
+Size calculation overflow detection
+-----------------------------------
 
 Similar to counter overflow, integer overflows (usually size calculations)
 need to be detected at runtime to kill this class of bug, which
 traditionally leads to being able to write past the end of kernel buffers.
 
 
-## Statistical defenses
+Probabilistic defenses
+======================
 
 While many protections can be considered deterministic (e.g. read-only
 memory cannot be written to), some protections provide only statistical
@@ -186,7 +202,8 @@
 running system to overcome the defense. While not perfect, these do
 provide meaningful defenses.
 
-### Canaries, blinding, and other secrets
+Canaries, blinding, and other secrets
+-------------------------------------
 
 It should be noted that things like the stack canary discussed earlier
 are technically statistical defenses, since they rely on a secret value,
@@ -201,7 +218,8 @@
 different canary per stack) and high entropy (e.g. is the RNG actually
 working?) in order to maximize their success.
 
-### Kernel Address Space Layout Randomization (KASLR)
+Kernel Address Space Layout Randomization (KASLR)
+-------------------------------------------------
 
 Since the location of kernel memory is almost always instrumental in
 mounting a successful attack, making the location non-deterministic
@@ -209,22 +227,25 @@
 the value of information exposures higher, since they may be used to
 discover desired memory locations.)
 
-#### Text and module base
+Text and module base
+~~~~~~~~~~~~~~~~~~~~
 
 By relocating the physical and virtual base address of the kernel at
-boot-time (CONFIG_RANDOMIZE_BASE), attacks needing kernel code will be
+boot-time (``CONFIG_RANDOMIZE_BASE``), attacks needing kernel code will be
 frustrated. Additionally, offsetting the module loading base address
 means that even systems that load the same set of modules in the same
 order every boot will not share a common base address with the rest of
 the kernel text.
 
-#### Stack base
+Stack base
+~~~~~~~~~~
 
 If the base address of the kernel stack is not the same between processes,
 or even not the same between syscalls, targets on or beyond the stack
 become more difficult to locate.
 
-#### Dynamic memory base
+Dynamic memory base
+~~~~~~~~~~~~~~~~~~~
 
 Much of the kernel's dynamic memory (e.g. kmalloc, vmalloc, etc) ends up
 being relatively deterministic in layout due to the order of early-boot
@@ -232,7 +253,8 @@
 between boots, targeting them is frustrated, requiring an information
 exposure specific to the region.
 
-#### Structure layout
+Structure layout
+~~~~~~~~~~~~~~~~
 
 By performing a per-build randomization of the layout of sensitive
 structures, attacks must either be tuned to known kernel builds or expose
@@ -240,26 +262,30 @@
 them.
 
 
-## Preventing Information Exposures
+Preventing Information Exposures
+================================
 
 Since the locations of sensitive structures are the primary target for
 attacks, it is important to defend against exposure of both kernel memory
 addresses and kernel memory contents (since they may contain kernel
 addresses or other sensitive things like canary values).
 
-### Unique identifiers
+Unique identifiers
+------------------
 
 Kernel memory addresses must never be used as identifiers exposed to
 userspace. Instead, use an atomic counter, an idr, or similar unique
 identifier.
 
-### Memory initialization
+Memory initialization
+---------------------
 
 Memory copied to userspace must always be fully initialized. If not
 explicitly memset(), this will require changes to the compiler to make
 sure structure holes are cleared.
 
-### Memory poisoning
+Memory poisoning
+----------------
 
 When releasing memory, it is best to poison the contents (clear stack on
 syscall return, wipe heap memory on a free), to avoid reuse attacks that
@@ -267,9 +293,10 @@
 variable attacks, stack content exposures, heap content exposures, and
 use-after-free attacks.
 
-### Destination tracking
+Destination tracking
+--------------------
 
 To help kill classes of bugs that result in kernel addresses being
 written to userspace, the destination of writes needs to be tracked. If
-the buffer is destined for userspace (e.g. seq_file backed /proc files),
+the buffer is destined for userspace (e.g. seq_file backed ``/proc`` files),
 it should automatically censor sensitive values.
diff --git a/Documentation/serial/n_gsm.txt b/Documentation/serial/n_gsm.txt
index a5d9112..875361b 100644
--- a/Documentation/serial/n_gsm.txt
+++ b/Documentation/serial/n_gsm.txt
@@ -77,6 +77,13 @@
 
 6- first close all virtual ports before closing the physical port.
 
+Note that after closing the physical port the modem is still in multiplexing
+mode. This may prevent a successful re-opening of the port later. To avoid
+this situation either reset the modem if your hardware allows that or send
+a disconnect command frame manually before initializing the multiplexing mode
+for the second time. The byte sequence for the disconnect command frame is:
+0xf9, 0x03, 0xef, 0x03, 0xc3, 0x16, 0xf9.
+
 Additional Documentation
 ------------------------
 More practical details on the protocol and how it's supported by industrial
diff --git a/Documentation/sh/conf.py b/Documentation/sh/conf.py
new file mode 100644
index 0000000..1eb684a
--- /dev/null
+++ b/Documentation/sh/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "SuperH architecture implementation manual"
+
+tags.add("subproject")
+
+latex_documents = [
+    ('index', 'sh.tex', project,
+     'The kernel development community', 'manual'),
+]
diff --git a/Documentation/sh/index.rst b/Documentation/sh/index.rst
new file mode 100644
index 0000000..bc8db7b
--- /dev/null
+++ b/Documentation/sh/index.rst
@@ -0,0 +1,59 @@
+=======================
+SuperH Interfaces Guide
+=======================
+
+:Author: Paul Mundt
+
+Memory Management
+=================
+
+SH-4
+----
+
+Store Queue API
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: arch/sh/kernel/cpu/sh4/sq.c
+   :export:
+
+SH-5
+----
+
+TLB Interfaces
+~~~~~~~~~~~~~~
+
+.. kernel-doc:: arch/sh/mm/tlb-sh5.c
+   :internal:
+
+.. kernel-doc:: arch/sh/include/asm/tlb_64.h
+   :internal:
+
+Machine Specific Interfaces
+===========================
+
+mach-dreamcast
+--------------
+
+.. kernel-doc:: arch/sh/boards/mach-dreamcast/rtc.c
+   :internal:
+
+mach-x3proto
+------------
+
+.. kernel-doc:: arch/sh/boards/mach-x3proto/ilsel.c
+   :export:
+
+Busses
+======
+
+SuperHyway
+----------
+
+.. kernel-doc:: drivers/sh/superhyway/superhyway.c
+   :export:
+
+Maple
+-----
+
+.. kernel-doc:: drivers/sh/maple/maple.c
+   :export:
diff --git a/Documentation/sound/conf.py b/Documentation/sound/conf.py
new file mode 100644
index 0000000..3f1fc5e
--- /dev/null
+++ b/Documentation/sound/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "Linux Sound Subsystem Documentation"
+
+tags.add("subproject")
+
+latex_documents = [
+    ('index', 'sound.tex', project,
+     'The kernel development community', 'manual'),
+]
diff --git a/Documentation/sphinx/convert_template.sed b/Documentation/sphinx/convert_template.sed
deleted file mode 100644
index c1503fc..0000000
--- a/Documentation/sphinx/convert_template.sed
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Pandoc doesn't grok <function> or <structname>, so convert them
-# ahead of time.
-#
-# Use the following escapes to pass through pandoc:
-#	$bq = "`"
-#	$lt = "<"
-#	$gt = ">"
-#
-s%<function>\([^<(]\+\)()</function>%:c:func:$bq\1()$bq%g
-s%<function>\([^<(]\+\)</function>%:c:func:$bq\1()$bq%g
-s%<structname>struct *\([^<]\+\)</structname>%:c:type:$bqstruct \1 $lt\1$gt$bq%g
-s%struct <structname>\([^<]\+\)</structname>%:c:type:$bqstruct \1 $lt\1$gt$bq%g
-s%<structname>\([^<]\+\)</structname>%:c:type:$bqstruct \1 $lt\1$gt$bq%g
-#
-# Wrap docproc directives in para and code blocks.
-#
-s%^\(!.*\)$%<para><code>DOCPROC: \1</code></para>%
diff --git a/Documentation/sphinx/post_convert.sed b/Documentation/sphinx/post_convert.sed
deleted file mode 100644
index 392770b..0000000
--- a/Documentation/sphinx/post_convert.sed
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Unescape.
-#
-s/$bq/`/g
-s/$lt/</g
-s/$gt/>/g
-#
-# pandoc thinks that both "_" needs to be escaped.  Remove the extra
-# backslashes.
-#
-s/\\_/_/g
-#
-# Unwrap docproc directives.
-#
-s/^``DOCPROC: !E\(.*\)``$/.. kernel-doc:: \1\n   :export:/
-s/^``DOCPROC: !I\(.*\)``$/.. kernel-doc:: \1\n   :internal:/
-s/^``DOCPROC: !F\([^ ]*\) \(.*\)``$/.. kernel-doc:: \1\n   :functions: \2/
-s/^``DOCPROC: !P\([^ ]*\) \(.*\)``$/.. kernel-doc:: \1\n   :doc: \2/
-s/^``DOCPROC: \(!.*\)``$/.. WARNING: DOCPROC directive not supported: \1/
-#
-# Trim trailing whitespace.
-#
-s/[[:space:]]*$//
diff --git a/Documentation/sphinx/tmplcvt b/Documentation/sphinx/tmplcvt
deleted file mode 100755
index 6848f0a..0000000
--- a/Documentation/sphinx/tmplcvt
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#
-# Convert a template file into something like RST
-#
-# fix <function>
-# feed to pandoc
-# fix \_
-# title line?
-#
-set -eu
-
-if [ "$#" != "2" ]; then
-	echo "$0 <docbook file> <rst file>"
-	exit
-fi
-
-DIR=$(dirname $0)
-
-in=$1
-rst=$2
-tmp=$rst.tmp
-
-cp $in $tmp
-sed --in-place -f $DIR/convert_template.sed $tmp
-pandoc -s -S -f docbook -t rst -o $rst $tmp
-sed --in-place -f $DIR/post_convert.sed $rst
-rm $tmp
-echo "book writen to $rst"
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index d1824b3..1721c1b 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -62,8 +62,8 @@
 (That data line is sometimes called MOMI or SISO.)
 
 Microcontrollers often support both master and slave sides of the SPI
-protocol.  This document (and Linux) currently only supports the master
-side of SPI interactions.
+protocol.  This document (and Linux) supports both the master and slave
+sides of SPI interactions.
 
 
 Who uses it?  On what kinds of systems?
@@ -154,9 +154,8 @@
 or monitor temperature and voltage levels during industrial processing.
 And those might all be sharing the same controller driver.
 
-A "struct spi_device" encapsulates the master-side interface between
-those two types of driver.  At this writing, Linux has no slave side
-programming interface.
+A "struct spi_device" encapsulates the controller-side interface between
+those two types of drivers.
 
 There is a minimal core of SPI programming interfaces, focussing on
 using the driver model to connect controller and protocol drivers using
@@ -177,10 +176,24 @@
    /sys/bus/spi/drivers/D ... driver for one or more spi*.* devices
 
    /sys/class/spi_master/spiB ... symlink (or actual device node) to
-	a logical node which could hold class related state for the
-	controller managing bus "B".  All spiB.* devices share one
+	a logical node which could hold class related state for the SPI
+	master controller managing bus "B".  All spiB.* devices share one
 	physical SPI bus segment, with SCLK, MOSI, and MISO.
 
+   /sys/devices/.../CTLR/slave ... virtual file for (un)registering the
+	slave device for an SPI slave controller.
+	Writing the driver name of an SPI slave handler to this file
+	registers the slave device; writing "(null)" unregisters the slave
+	device.
+	Reading from this file shows the name of the slave device ("(null)"
+	if not registered).
+
+   /sys/class/spi_slave/spiB ... symlink (or actual device node) to
+	a logical node which could hold class related state for the SPI
+	slave controller on bus "B".  When registered, a single spiB.*
+	device is present here, possible sharing the physical SPI bus
+	segment with other SPI slave devices.
+
 Note that the actual location of the controller's class state depends
 on whether you enabled CONFIG_SYSFS_DEPRECATED or not.  At this time,
 the only class-specific state is the bus number ("B" in "spiB"), so
diff --git a/Documentation/timers/NO_HZ.txt b/Documentation/timers/NO_HZ.txt
index 6eaf576..2dcaf9a 100644
--- a/Documentation/timers/NO_HZ.txt
+++ b/Documentation/timers/NO_HZ.txt
@@ -194,32 +194,9 @@
 
 Another approach is to offload RCU callback processing to "rcuo" kthreads
 using the CONFIG_RCU_NOCB_CPU=y Kconfig option.  The specific CPUs to
-offload may be selected via several methods:
-
-1.	One of three mutually exclusive Kconfig options specify a
-	build-time default for the CPUs to offload:
-
-	a.	The CONFIG_RCU_NOCB_CPU_NONE=y Kconfig option results in
-		no CPUs being offloaded.
-
-	b.	The CONFIG_RCU_NOCB_CPU_ZERO=y Kconfig option causes
-		CPU 0 to be offloaded.
-
-	c.	The CONFIG_RCU_NOCB_CPU_ALL=y Kconfig option causes all
-		CPUs to be offloaded.  Note that the callbacks will be
-		offloaded to "rcuo" kthreads, and that those kthreads
-		will in fact run on some CPU.  However, this approach
-		gives fine-grained control on exactly which CPUs the
-		callbacks run on, along with their scheduling priority
-		(including the default of SCHED_OTHER), and it further
-		allows this control to be varied dynamically at runtime.
-
-2.	The "rcu_nocbs=" kernel boot parameter, which takes a comma-separated
-	list of CPUs and CPU ranges, for example, "1,3-5" selects CPUs 1,
-	3, 4, and 5.  The specified CPUs will be offloaded in addition to
-	any CPUs specified as offloaded by CONFIG_RCU_NOCB_CPU_ZERO=y or
-	CONFIG_RCU_NOCB_CPU_ALL=y.  This means that the "rcu_nocbs=" boot
-	parameter has no effect for kernels built with RCU_NOCB_CPU_ALL=y.
+offload may be selected using The "rcu_nocbs=" kernel boot parameter,
+which takes a comma-separated list of CPUs and CPU ranges, for example,
+"1,3-5" selects CPUs 1, 3, 4, and 5.
 
 The offloaded CPUs will never queue RCU callbacks, and therefore RCU
 never prevents offloaded CPUs from entering either dyntick-idle mode
diff --git a/Documentation/trace/coresight-cpu-debug.txt b/Documentation/trace/coresight-cpu-debug.txt
new file mode 100644
index 0000000..b3da1f9
--- /dev/null
+++ b/Documentation/trace/coresight-cpu-debug.txt
@@ -0,0 +1,175 @@
+		Coresight CPU Debug Module
+		==========================
+
+   Author:   Leo Yan <leo.yan@linaro.org>
+   Date:     April 5th, 2017
+
+Introduction
+------------
+
+Coresight CPU debug module is defined in ARMv8-a architecture reference manual
+(ARM DDI 0487A.k) Chapter 'Part H: External debug', the CPU can integrate
+debug module and it is mainly used for two modes: self-hosted debug and
+external debug. Usually the external debug mode is well known as the external
+debugger connects with SoC from JTAG port; on the other hand the program can
+explore debugging method which rely on self-hosted debug mode, this document
+is to focus on this part.
+
+The debug module provides sample-based profiling extension, which can be used
+to sample CPU program counter, secure state and exception level, etc; usually
+every CPU has one dedicated debug module to be connected. Based on self-hosted
+debug mechanism, Linux kernel can access these related registers from mmio
+region when the kernel panic happens. The callback notifier for kernel panic
+will dump related registers for every CPU; finally this is good for assistant
+analysis for panic.
+
+
+Implementation
+--------------
+
+- During driver registration, it uses EDDEVID and EDDEVID1 - two device ID
+  registers to decide if sample-based profiling is implemented or not. On some
+  platforms this hardware feature is fully or partially implemented; and if
+  this feature is not supported then registration will fail.
+
+- At the time this documentation was written, the debug driver mainly relies on
+  information gathered by the kernel panic callback notifier from three
+  sampling registers: EDPCSR, EDVIDSR and EDCIDSR: from EDPCSR we can get
+  program counter; EDVIDSR has information for secure state, exception level,
+  bit width, etc; EDCIDSR is context ID value which contains the sampled value
+  of CONTEXTIDR_EL1.
+
+- The driver supports a CPU running in either AArch64 or AArch32 mode. The
+  registers naming convention is a bit different between them, AArch64 uses
+  'ED' for register prefix (ARM DDI 0487A.k, chapter H9.1) and AArch32 uses
+  'DBG' as prefix (ARM DDI 0487A.k, chapter G5.1). The driver is unified to
+  use AArch64 naming convention.
+
+- ARMv8-a (ARM DDI 0487A.k) and ARMv7-a (ARM DDI 0406C.b) have different
+  register bits definition. So the driver consolidates two difference:
+
+  If PCSROffset=0b0000, on ARMv8-a the feature of EDPCSR is not implemented;
+  but ARMv7-a defines "PCSR samples are offset by a value that depends on the
+  instruction set state". For ARMv7-a, the driver checks furthermore if CPU
+  runs with ARM or thumb instruction set and calibrate PCSR value, the
+  detailed description for offset is in ARMv7-a ARM (ARM DDI 0406C.b) chapter
+  C11.11.34 "DBGPCSR, Program Counter Sampling Register".
+
+  If PCSROffset=0b0010, ARMv8-a defines "EDPCSR implemented, and samples have
+  no offset applied and do not sample the instruction set state in AArch32
+  state". So on ARMv8 if EDDEVID1.PCSROffset is 0b0010 and the CPU operates
+  in AArch32 state, EDPCSR is not sampled; when the CPU operates in AArch64
+  state EDPCSR is sampled and no offset are applied.
+
+
+Clock and power domain
+----------------------
+
+Before accessing debug registers, we should ensure the clock and power domain
+have been enabled properly. In ARMv8-a ARM (ARM DDI 0487A.k) chapter 'H9.1
+Debug registers', the debug registers are spread into two domains: the debug
+domain and the CPU domain.
+
+                                +---------------+
+                                |               |
+                                |               |
+                     +----------+--+            |
+        dbg_clock -->|          |**|            |<-- cpu_clock
+                     |    Debug |**|   CPU      |
+ dbg_power_domain -->|          |**|            |<-- cpu_power_domain
+                     +----------+--+            |
+                                |               |
+                                |               |
+                                +---------------+
+
+For debug domain, the user uses DT binding "clocks" and "power-domains" to
+specify the corresponding clock source and power supply for the debug logic.
+The driver calls the pm_runtime_{put|get} operations as needed to handle the
+debug power domain.
+
+For CPU domain, the different SoC designs have different power management
+schemes and finally this heavily impacts external debug module. So we can
+divide into below cases:
+
+- On systems with a sane power controller which can behave correctly with
+  respect to CPU power domain, the CPU power domain can be controlled by
+  register EDPRCR in driver. The driver firstly writes bit EDPRCR.COREPURQ
+  to power up the CPU, and then writes bit EDPRCR.CORENPDRQ for emulation
+  of CPU power down. As result, this can ensure the CPU power domain is
+  powered on properly during the period when access debug related registers;
+
+- Some designs will power down an entire cluster if all CPUs on the cluster
+  are powered down - including the parts of the debug registers that should
+  remain powered in the debug power domain. The bits in EDPRCR are not
+  respected in these cases, so these designs do not support debug over
+  power down in the way that the CoreSight / Debug designers anticipated.
+  This means that even checking EDPRSR has the potential to cause a bus hang
+  if the target register is unpowered.
+
+  In this case, accessing to the debug registers while they are not powered
+  is a recipe for disaster; so we need preventing CPU low power states at boot
+  time or when user enable module at the run time. Please see chapter
+  "How to use the module" for detailed usage info for this.
+
+
+Device Tree Bindings
+--------------------
+
+See Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt for details.
+
+
+How to use the module
+---------------------
+
+If you want to enable debugging functionality at boot time, you can add
+"coresight_cpu_debug.enable=1" to the kernel command line parameter.
+
+The driver also can work as module, so can enable the debugging when insmod
+module:
+# insmod coresight_cpu_debug.ko debug=1
+
+When boot time or insmod module you have not enabled the debugging, the driver
+uses the debugfs file system to provide a knob to dynamically enable or disable
+debugging:
+
+To enable it, write a '1' into /sys/kernel/debug/coresight_cpu_debug/enable:
+# echo 1 > /sys/kernel/debug/coresight_cpu_debug/enable
+
+To disable it, write a '0' into /sys/kernel/debug/coresight_cpu_debug/enable:
+# echo 0 > /sys/kernel/debug/coresight_cpu_debug/enable
+
+As explained in chapter "Clock and power domain", if you are working on one
+platform which has idle states to power off debug logic and the power
+controller cannot work well for the request from EDPRCR, then you should
+firstly constraint CPU idle states before enable CPU debugging feature; so can
+ensure the accessing to debug logic.
+
+If you want to limit idle states at boot time, you can use "nohlt" or
+"cpuidle.off=1" in the kernel command line.
+
+At the runtime you can disable idle states with below methods:
+
+Set latency request to /dev/cpu_dma_latency to disable all CPUs specific idle
+states (if latency = 0uS then disable all idle states):
+# echo "what_ever_latency_you_need_in_uS" > /dev/cpu_dma_latency
+
+Disable specific CPU's specific idle state:
+# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable
+
+
+Output format
+-------------
+
+Here is an example of the debugging output format:
+
+ARM external debug module:
+coresight-cpu-debug 850000.debug: CPU[0]:
+coresight-cpu-debug 850000.debug:  EDPRSR:  00000001 (Power:On DLK:Unlock)
+coresight-cpu-debug 850000.debug:  EDPCSR:  [<ffff00000808e9bc>] handle_IPI+0x174/0x1d8
+coresight-cpu-debug 850000.debug:  EDCIDSR: 00000000
+coresight-cpu-debug 850000.debug:  EDVIDSR: 90000000 (State:Non-secure Mode:EL1/0 Width:64bits VMID:0)
+coresight-cpu-debug 852000.debug: CPU[1]:
+coresight-cpu-debug 852000.debug:  EDPRSR:  00000001 (Power:On DLK:Unlock)
+coresight-cpu-debug 852000.debug:  EDPCSR:  [<ffff0000087fab34>] debug_notifier_call+0x23c/0x358
+coresight-cpu-debug 852000.debug:  EDCIDSR: 00000000
+coresight-cpu-debug 852000.debug:  EDVIDSR: 90000000 (State:Non-secure Mode:EL1/0 Width:64bits VMID:0)
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 94a987b..fff8ff6 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -1609,7 +1609,7 @@
   <idle>-0       3dN.2   14us : sched_avg_update <-__cpu_load_update
   <idle>-0       3dN.2   14us : _raw_spin_unlock <-cpu_load_update_nohz
   <idle>-0       3dN.2   14us : sub_preempt_count <-_raw_spin_unlock
-  <idle>-0       3dN.1   15us : calc_load_exit_idle <-tick_nohz_idle_exit
+  <idle>-0       3dN.1   15us : calc_load_nohz_stop <-tick_nohz_idle_exit
   <idle>-0       3dN.1   15us : touch_softlockup_watchdog <-tick_nohz_idle_exit
   <idle>-0       3dN.1   15us : hrtimer_cancel <-tick_nohz_idle_exit
   <idle>-0       3dN.1   15us : hrtimer_try_to_cancel <-hrtimer_cancel
diff --git a/Documentation/translations/ja_JP/howto.rst b/Documentation/translations/ja_JP/howto.rst
index 4511eed..8d7ed0c 100644
--- a/Documentation/translations/ja_JP/howto.rst
+++ b/Documentation/translations/ja_JP/howto.rst
@@ -197,13 +197,6 @@
         make latexdocs
         make epubdocs
 
-現在、幾つかの DocBook形式で書かれたドキュメントは ReST形式に転換中で
-す。それらのドキュメントはDocumentation/DocBook ディレクトリに生成され、
-Postscript または man ページの形式を生成するには以下のようにします - ::
-
-        make psdocs
-        make mandocs
-
 カーネル開発者になるには
 ------------------------
 
diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst
index 2333697..624654b 100644
--- a/Documentation/translations/ko_KR/howto.rst
+++ b/Documentation/translations/ko_KR/howto.rst
@@ -191,13 +191,6 @@
          make latexdocs
          make epubdocs
 
-현재, ReST 로의 변환이 진행중인, DocBook 으로 쓰인 문서들이 존재한다. 그런
-문서들은 Documentation/DocBook/ 디렉토리 안에 생성될 것이고 다음 커맨드를 통해
-Postscript 나 man page 로도 만들어질 수 있다::
-
-         make psdocs
-         make mandocs
-
 커널 개발자가 되는 것
 ---------------------
 
@@ -270,15 +263,17 @@
     선호되는 방법은  git(커널의 소스 관리 툴, 더 많은 정보들은
     https://git-scm.com/ 에서 참조할 수 있다)를 사용하는 것이지만 순수한
     패치파일의 형식으로 보내는 것도 무관하다.
-  - 2주 후에 -rc1 커널이 배포되며 지금부터는 전체 커널의 안정성에 영향을
-    미칠수 있는 새로운 기능들을 포함하지 않는 패치들만이 추가될 수 있다.
-    완전히 새로운 드라이버(혹은 파일시스템)는 -rc1 이후에만 받아들여진다는
-    것을 기억해라. 왜냐하면 변경이 자체내에서만 발생하고 추가된 코드가
-    드라이버 외부의 다른 부분에는 영향을 주지 않으므로 그런 변경은
+  - 2주 후에 -rc1 커널이 릴리즈되며 여기서부터의 주안점은 새로운 커널을
+    가능한한 안정되게 하는 것이다.  이 시점에서의 대부분의 패치들은
     회귀(역자주: 이전에는 존재하지 않았지만 새로운 기능추가나 변경으로 인해
-    생겨난 버그)를 일으킬 만한 위험을 가지고 있지 않기 때문이다. -rc1이
-    배포된 이후에 git를 사용하여 패치들을 Linus에게 보낼수 있지만 패치들은
-    공식적인 메일링 리스트로 보내서 검토를 받을 필요가 있다.
+    생겨난 버그)를 고쳐야 한다.  이전부터 존재한 버그는 회귀가 아니므로, 그런
+    버그에 대한 수정사항은 중요한 경우에만 보내져야 한다.  완전히 새로운
+    드라이버(혹은 파일시스템)는 -rc1 이후에만 받아들여진다는 것을 기억해라.
+    왜냐하면 변경이 자체내에서만 발생하고 추가된 코드가 드라이버 외부의 다른
+    부분에는 영향을 주지 않으므로 그런 변경은 회귀를 일으킬 만한 위험을 가지고
+    있지 않기 때문이다. -rc1이 배포된 이후에 git를 사용하여 패치들을 Linus에게
+    보낼수 있지만 패치들은 공식적인 메일링 리스트로 보내서 검토를 받을 필요가
+    있다.
   - 새로운 -rc는 Linus가 현재 git tree가 테스트 하기에 충분히 안정된 상태에
     있다고 판단될 때마다 배포된다. 목표는 새로운 -rc 커널을 매주 배포하는
     것이다.
@@ -359,7 +354,7 @@
 버그 보고
 ---------
 
-https://bugzilla.kernel.org는 리눅스 커널 개발자들이 커널의 버그를 추적하는
+https://bugzilla.kernel.org 는 리눅스 커널 개발자들이 커널의 버그를 추적하는
 곳이다. 사용자들은 발견한 모든 버그들을 보고하기 위하여 이 툴을 사용할 것을
 권장한다.  kernel bugzilla를 사용하는 자세한 방법은 다음을 참조하라.
 
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index d05d4c5..c6f4ead 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -786,7 +786,7 @@
 위의 코드를 아래와 같이 바꿔버릴 수 있습니다:
 
 	q = READ_ONCE(a);
-	WRITE_ONCE(b, 1);
+	WRITE_ONCE(b, 2);
 	do_something_else();
 
 이렇게 되면, CPU 는 변수 'a' 로부터의 로드와 변수 'b' 로의 스토어 사이의 순서를
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index fb0cc4d..fbc397d 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -16,10 +16,11 @@
 13. RNDIS function
 14. SERIAL function
 15. SOURCESINK function
-16. UAC1 function
+16. UAC1 function (legacy implementation)
 17. UAC2 function
 18. UVC function
 19. PRINTER function
+20. UAC1 function (new API)
 
 
 1. ACM function
@@ -589,15 +590,16 @@
 host: test-usb (tools/usb/testusb.c)
 
 
-16. UAC1 function
+16. UAC1 function (legacy implementation)
 =================
 
-The function is provided by usb_f_uac1.ko module.
+The function is provided by usb_f_uac1_legacy.ko module.
 
 Function-specific configfs interface
 ------------------------------------
 
-The function name to use when creating the function directory is "uac1".
+The function name to use when creating the function directory
+is "uac1_legacy".
 The uac1 function provides these attributes in its function directory:
 
 	audio_buf_size - audio buffer size
@@ -772,3 +774,46 @@
 
 More advanced testing can be done with the prn_example
 described in Documentation/usb/gadget-printer.txt.
+
+
+20. UAC1 function (virtual ALSA card, using u_audio API)
+=================
+
+The function is provided by usb_f_uac1.ko module.
+It will create a virtual ALSA card and the audio streams are simply
+sinked to and sourced from it.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "uac1".
+The uac1 function provides these attributes in its function directory:
+
+	c_chmask - capture channel mask
+	c_srate - capture sampling rate
+	c_ssize - capture sample size (bytes)
+	p_chmask - playback channel mask
+	p_srate - playback sampling rate
+	p_ssize - playback sample size (bytes)
+	req_number - the number of pre-allocated request for both capture
+		     and playback
+
+The attributes have sane default values.
+
+Testing the UAC1 function
+-------------------------
+
+device: run the gadget
+host: aplay -l # should list our USB Audio Gadget
+
+This function does not require real hardware support, it just
+sends a stream of audio data to/from the host. In order to
+actually hear something at the device side, a command similar
+to this must be used at the device side:
+
+$ arecord -f dat -t wav -D hw:2,0 | aplay -D hw:0,0 &
+
+e.g.:
+
+$ arecord -f dat -t wav -D hw:CARD=UAC1Gadget,DEV=0 | \
+aplay -D default:CARD=OdroidU3
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index a9d01b4..7b2eb1b 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -16,6 +16,8 @@
 .. toctree::
    :maxdepth: 2
 
+   no_new_privs
+   seccomp_filter
    unshare
 
 .. only::  subproject and html
diff --git a/Documentation/userspace-api/no_new_privs.rst b/Documentation/userspace-api/no_new_privs.rst
new file mode 100644
index 0000000..d060ea2
--- /dev/null
+++ b/Documentation/userspace-api/no_new_privs.rst
@@ -0,0 +1,63 @@
+======================
+No New Privileges Flag
+======================
+
+The execve system call can grant a newly-started program privileges that
+its parent did not have.  The most obvious examples are setuid/setgid
+programs and file capabilities.  To prevent the parent program from
+gaining these privileges as well, the kernel and user code must be
+careful to prevent the parent from doing anything that could subvert the
+child.  For example:
+
+ - The dynamic loader handles ``LD_*`` environment variables differently if
+   a program is setuid.
+
+ - chroot is disallowed to unprivileged processes, since it would allow
+   ``/etc/passwd`` to be replaced from the point of view of a process that
+   inherited chroot.
+
+ - The exec code has special handling for ptrace.
+
+These are all ad-hoc fixes.  The ``no_new_privs`` bit (since Linux 3.5) is a
+new, generic mechanism to make it safe for a process to modify its
+execution environment in a manner that persists across execve.  Any task
+can set ``no_new_privs``.  Once the bit is set, it is inherited across fork,
+clone, and execve and cannot be unset.  With ``no_new_privs`` set, ``execve()``
+promises not to grant the privilege to do anything that could not have
+been done without the execve call.  For example, the setuid and setgid
+bits will no longer change the uid or gid; file capabilities will not
+add to the permitted set, and LSMs will not relax constraints after
+execve.
+
+To set ``no_new_privs``, use::
+
+    prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+
+Be careful, though: LSMs might also not tighten constraints on exec
+in ``no_new_privs`` mode.  (This means that setting up a general-purpose
+service launcher to set ``no_new_privs`` before execing daemons may
+interfere with LSM-based sandboxing.)
+
+Note that ``no_new_privs`` does not prevent privilege changes that do not
+involve ``execve()``.  An appropriately privileged task can still call
+``setuid(2)`` and receive SCM_RIGHTS datagrams.
+
+There are two main use cases for ``no_new_privs`` so far:
+
+ - Filters installed for the seccomp mode 2 sandbox persist across
+   execve and can change the behavior of newly-executed programs.
+   Unprivileged users are therefore only allowed to install such filters
+   if ``no_new_privs`` is set.
+
+ - By itself, ``no_new_privs`` can be used to reduce the attack surface
+   available to an unprivileged user.  If everything running with a
+   given uid has ``no_new_privs`` set, then that uid will be unable to
+   escalate its privileges by directly attacking setuid, setgid, and
+   fcap-using binaries; it will need to compromise something without the
+   ``no_new_privs`` bit set first.
+
+In the future, other potentially dangerous kernel features could become
+available to unprivileged tasks if ``no_new_privs`` is set.  In principle,
+several options to ``unshare(2)`` and ``clone(2)`` would be safe when
+``no_new_privs`` is set, and ``no_new_privs`` + ``chroot`` is considerable less
+dangerous than chroot by itself.
diff --git a/Documentation/prctl/seccomp_filter.txt b/Documentation/userspace-api/seccomp_filter.rst
similarity index 70%
rename from Documentation/prctl/seccomp_filter.txt
rename to Documentation/userspace-api/seccomp_filter.rst
index 1e469ef..f71eb5e 100644
--- a/Documentation/prctl/seccomp_filter.txt
+++ b/Documentation/userspace-api/seccomp_filter.rst
@@ -1,8 +1,9 @@
-		SECure COMPuting with filters
-		=============================
+===========================================
+Seccomp BPF (SECure COMPuting with filters)
+===========================================
 
 Introduction
-------------
+============
 
 A large number of system calls are exposed to every userland process
 with many of them going unused for the entire lifetime of the process.
@@ -27,7 +28,7 @@
 call arguments directly.
 
 What it isn't
--------------
+=============
 
 System call filtering isn't a sandbox.  It provides a clearly defined
 mechanism for minimizing the exposed kernel surface.  It is meant to be
@@ -40,13 +41,13 @@
 construed, incorrectly, as a more complete sandboxing solution.
 
 Usage
------
+=====
 
 An additional seccomp mode is added and is enabled using the same
 prctl(2) call as the strict seccomp.  If the architecture has
-CONFIG_HAVE_ARCH_SECCOMP_FILTER, then filters may be added as below:
+``CONFIG_HAVE_ARCH_SECCOMP_FILTER``, then filters may be added as below:
 
-PR_SET_SECCOMP:
+``PR_SET_SECCOMP``:
 	Now takes an additional argument which specifies a new filter
 	using a BPF program.
 	The BPF program will be executed over struct seccomp_data
@@ -55,24 +56,25 @@
 	acceptable values to inform the kernel which action should be
 	taken.
 
-	Usage:
+	Usage::
+
 		prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, prog);
 
 	The 'prog' argument is a pointer to a struct sock_fprog which
 	will contain the filter program.  If the program is invalid, the
-	call will return -1 and set errno to EINVAL.
+	call will return -1 and set errno to ``EINVAL``.
 
-	If fork/clone and execve are allowed by @prog, any child
+	If ``fork``/``clone`` and ``execve`` are allowed by @prog, any child
 	processes will be constrained to the same filters and system
 	call ABI as the parent.
 
-	Prior to use, the task must call prctl(PR_SET_NO_NEW_PRIVS, 1) or
-	run with CAP_SYS_ADMIN privileges in its namespace.  If these are not
-	true, -EACCES will be returned.  This requirement ensures that filter
+	Prior to use, the task must call ``prctl(PR_SET_NO_NEW_PRIVS, 1)`` or
+	run with ``CAP_SYS_ADMIN`` privileges in its namespace.  If these are not
+	true, ``-EACCES`` will be returned.  This requirement ensures that filter
 	programs cannot be applied to child processes with greater privileges
 	than the task that installed them.
 
-	Additionally, if prctl(2) is allowed by the attached filter,
+	Additionally, if ``prctl(2)`` is allowed by the attached filter,
 	additional filters may be layered on which will increase evaluation
 	time, but allow for further decreasing the attack surface during
 	execution of a process.
@@ -80,51 +82,52 @@
 The above call returns 0 on success and non-zero on error.
 
 Return values
--------------
+=============
+
 A seccomp filter may return any of the following values. If multiple
 filters exist, the return value for the evaluation of a given system
 call will always use the highest precedent value. (For example,
-SECCOMP_RET_KILL will always take precedence.)
+``SECCOMP_RET_KILL`` will always take precedence.)
 
 In precedence order, they are:
 
-SECCOMP_RET_KILL:
+``SECCOMP_RET_KILL``:
 	Results in the task exiting immediately without executing the
-	system call.  The exit status of the task (status & 0x7f) will
-	be SIGSYS, not SIGKILL.
+	system call.  The exit status of the task (``status & 0x7f``) will
+	be ``SIGSYS``, not ``SIGKILL``.
 
-SECCOMP_RET_TRAP:
-	Results in the kernel sending a SIGSYS signal to the triggering
-	task without executing the system call.  siginfo->si_call_addr
+``SECCOMP_RET_TRAP``:
+	Results in the kernel sending a ``SIGSYS`` signal to the triggering
+	task without executing the system call. ``siginfo->si_call_addr``
 	will show the address of the system call instruction, and
-	siginfo->si_syscall and siginfo->si_arch will indicate which
+	``siginfo->si_syscall`` and ``siginfo->si_arch`` will indicate which
 	syscall was attempted.  The program counter will be as though
 	the syscall happened (i.e. it will not point to the syscall
 	instruction).  The return value register will contain an arch-
 	dependent value -- if resuming execution, set it to something
 	sensible.  (The architecture dependency is because replacing
-	it with -ENOSYS could overwrite some useful information.)
+	it with ``-ENOSYS`` could overwrite some useful information.)
 
-	The SECCOMP_RET_DATA portion of the return value will be passed
-	as si_errno.
+	The ``SECCOMP_RET_DATA`` portion of the return value will be passed
+	as ``si_errno``.
 
-	SIGSYS triggered by seccomp will have a si_code of SYS_SECCOMP.
+	``SIGSYS`` triggered by seccomp will have a si_code of ``SYS_SECCOMP``.
 
-SECCOMP_RET_ERRNO:
+``SECCOMP_RET_ERRNO``:
 	Results in the lower 16-bits of the return value being passed
 	to userland as the errno without executing the system call.
 
-SECCOMP_RET_TRACE:
+``SECCOMP_RET_TRACE``:
 	When returned, this value will cause the kernel to attempt to
-	notify a ptrace()-based tracer prior to executing the system
-	call.  If there is no tracer present, -ENOSYS is returned to
+	notify a ``ptrace()``-based tracer prior to executing the system
+	call.  If there is no tracer present, ``-ENOSYS`` is returned to
 	userland and the system call is not executed.
 
-	A tracer will be notified if it requests PTRACE_O_TRACESECCOMP
-	using ptrace(PTRACE_SETOPTIONS).  The tracer will be notified
-	of a PTRACE_EVENT_SECCOMP and the SECCOMP_RET_DATA portion of
+	A tracer will be notified if it requests ``PTRACE_O_TRACESECCOM``P
+	using ``ptrace(PTRACE_SETOPTIONS)``.  The tracer will be notified
+	of a ``PTRACE_EVENT_SECCOMP`` and the ``SECCOMP_RET_DATA`` portion of
 	the BPF program return value will be available to the tracer
-	via PTRACE_GETEVENTMSG.
+	via ``PTRACE_GETEVENTMSG``.
 
 	The tracer can skip the system call by changing the syscall number
 	to -1.  Alternatively, the tracer can change the system call
@@ -138,19 +141,19 @@
 	allow use of ptrace, even of other sandboxed processes, without
 	extreme care; ptracers can use this mechanism to escape.)
 
-SECCOMP_RET_ALLOW:
+``SECCOMP_RET_ALLOW``:
 	Results in the system call being executed.
 
 If multiple filters exist, the return value for the evaluation of a
 given system call will always use the highest precedent value.
 
-Precedence is only determined using the SECCOMP_RET_ACTION mask.  When
+Precedence is only determined using the ``SECCOMP_RET_ACTION`` mask.  When
 multiple filters return values of the same precedence, only the
-SECCOMP_RET_DATA from the most recently installed filter will be
+``SECCOMP_RET_DATA`` from the most recently installed filter will be
 returned.
 
 Pitfalls
---------
+========
 
 The biggest pitfall to avoid during use is filtering on system call
 number without checking the architecture value.  Why?  On any
@@ -160,39 +163,40 @@
 the filters may be abused.  Always check the arch value!
 
 Example
--------
+=======
 
-The samples/seccomp/ directory contains both an x86-specific example
+The ``samples/seccomp/`` directory contains both an x86-specific example
 and a more generic example of a higher level macro interface for BPF
 program generation.
 
 
 
 Adding architecture support
------------------------
+===========================
 
-See arch/Kconfig for the authoritative requirements.  In general, if an
+See ``arch/Kconfig`` for the authoritative requirements.  In general, if an
 architecture supports both ptrace_event and seccomp, it will be able to
-support seccomp filter with minor fixup: SIGSYS support and seccomp return
-value checking.  Then it must just add CONFIG_HAVE_ARCH_SECCOMP_FILTER
+support seccomp filter with minor fixup: ``SIGSYS`` support and seccomp return
+value checking.  Then it must just add ``CONFIG_HAVE_ARCH_SECCOMP_FILTER``
 to its arch-specific Kconfig.
 
 
 
 Caveats
--------
+=======
 
 The vDSO can cause some system calls to run entirely in userspace,
 leading to surprises when you run programs on different machines that
 fall back to real syscalls.  To minimize these surprises on x86, make
 sure you test with
-/sys/devices/system/clocksource/clocksource0/current_clocksource set to
-something like acpi_pm.
+``/sys/devices/system/clocksource/clocksource0/current_clocksource`` set to
+something like ``acpi_pm``.
 
 On x86-64, vsyscall emulation is enabled by default.  (vsyscalls are
-legacy variants on vDSO calls.)  Currently, emulated vsyscalls will honor seccomp, with a few oddities:
+legacy variants on vDSO calls.)  Currently, emulated vsyscalls will
+honor seccomp, with a few oddities:
 
-- A return value of SECCOMP_RET_TRAP will set a si_call_addr pointing to
+- A return value of ``SECCOMP_RET_TRAP`` will set a ``si_call_addr`` pointing to
   the vsyscall entry for the given call and not the address after the
   'syscall' instruction.  Any code which wants to restart the call
   should be aware that (a) a ret instruction has been emulated and (b)
@@ -200,7 +204,7 @@
   emulation security checks, making resuming the syscall mostly
   pointless.
 
-- A return value of SECCOMP_RET_TRACE will signal the tracer as usual,
+- A return value of ``SECCOMP_RET_TRACE`` will signal the tracer as usual,
   but the syscall may not be changed to another system call using the
   orig_rax register. It may only be changed to -1 order to skip the
   currently emulated call. Any other change MAY terminate the process.
@@ -209,14 +213,14 @@
   rip or rsp.  (Do not rely on other changes terminating the process.
   They might work.  For example, on some kernels, choosing a syscall
   that only exists in future kernels will be correctly emulated (by
-  returning -ENOSYS).
+  returning ``-ENOSYS``).
 
-To detect this quirky behavior, check for addr & ~0x0C00 ==
-0xFFFFFFFFFF600000.  (For SECCOMP_RET_TRACE, use rip.  For
-SECCOMP_RET_TRAP, use siginfo->si_call_addr.)  Do not check any other
+To detect this quirky behavior, check for ``addr & ~0x0C00 ==
+0xFFFFFFFFFF600000``.  (For ``SECCOMP_RET_TRACE``, use rip.  For
+``SECCOMP_RET_TRAP``, use ``siginfo->si_call_addr``.)  Do not check any other
 condition: future kernels may improve vsyscall emulation and current
 kernels in vsyscall=native mode will behave differently, but the
-instructions at 0xF...F600{0,4,8,C}00 will not be system calls in these
+instructions at ``0xF...F600{0,4,8,C}00`` will not be system calls in these
 cases.
 
 Note that modern systems are unlikely to use vsyscalls at all -- they
diff --git a/Documentation/userspace-api/unshare.rst b/Documentation/userspace-api/unshare.rst
index 737c192..877e90a 100644
--- a/Documentation/userspace-api/unshare.rst
+++ b/Documentation/userspace-api/unshare.rst
@@ -107,7 +107,7 @@
 
 unshare() reverses sharing that was done using clone(2) system call,
 so unshare() should have a similar interface as clone(2). That is,
-since flags in clone(int flags, void *stack) specifies what should
+since flags in clone(int flags, void \*stack) specifies what should
 be shared, similar flags in unshare(int flags) should specify
 what should be unshared. Unfortunately, this may appear to invert
 the meaning of the flags from the way they are used in clone(2).
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index 61b611e..b297c48 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -36,7 +36,8 @@
 		to broadcast MCEs.
    mce=bootlog
 		Enable logging of machine checks left over from booting.
-		Disabled by default on AMD because some BIOS leave bogus ones.
+		Disabled by default on AMD Fam10h and older because some BIOS
+		leave bogus ones.
 		If your BIOS doesn't do that it's a good idea to enable though
 		to make sure you log even machine check events that result
 		in a reboot. On Intel systems it is enabled by default.
diff --git a/MAINTAINERS b/MAINTAINERS
index 053c3bd..61d0ac6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -478,7 +478,7 @@
 S:	Maintained
 F:	Documentation/hwmon/ads1015
 F:	drivers/hwmon/ads1015.c
-F:	include/linux/i2c/ads1015.h
+F:	include/linux/platform_data/ads1015.h
 
 ADT746X FAN DRIVER
 M:	Colin Leroy <colin@colino.net>
@@ -1036,6 +1036,22 @@
 F:	drivers/amba/
 F:	include/linux/amba/bus.h
 
+ARM/ACTIONS SEMI ARCHITECTURE
+M:	Andreas Färber <afaerber@suse.de>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Maintained
+N:	owl
+F:	arch/arm/mach-actions/
+F:	arch/arm/boot/dts/owl-*
+F:	arch/arm64/boot/dts/actions/
+F:	drivers/clocksource/owl-*
+F:	drivers/soc/actions/
+F:	include/dt-bindings/power/owl-*
+F:	include/linux/soc/actions/
+F:	Documentation/devicetree/bindings/arm/actions.txt
+F:	Documentation/devicetree/bindings/power/actions,owl-sps.txt
+F:	Documentation/devicetree/bindings/timer/actions,owl-timer.txt
+
 ARM/ADS SPHERE MACHINE SUPPORT
 M:	Lennert Buytenhek <kernel@wantstofly.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1172,7 +1188,7 @@
 
 ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
 M:	Hartley Sweeten <hsweeten@visionengravers.com>
-M:	Ryan Mallon <rmallon@gmail.com>
+M:	Alexander Sverdlin <alexander.sverdlin@gmail.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-ep93xx/
@@ -1207,7 +1223,9 @@
 S:	Maintained
 F:	drivers/hwtracing/coresight/*
 F:	Documentation/trace/coresight.txt
+F:	Documentation/trace/coresight-cpu-debug.txt
 F:	Documentation/devicetree/bindings/arm/coresight.txt
+F:	Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
 F:	Documentation/ABI/testing/sysfs-bus-coresight-devices-*
 F:	tools/perf/arch/arm/util/pmu.c
 F:	tools/perf/arch/arm/util/auxtrace.c
@@ -1489,13 +1507,15 @@
 M:	Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
-F:	arch/arm/mach-mvebu/
-F:	drivers/rtc/rtc-armada38x.c
 F:	arch/arm/boot/dts/armada*
 F:	arch/arm/boot/dts/kirkwood*
+F:	arch/arm/configs/mvebu_*_defconfig
+F:	arch/arm/mach-mvebu/
 F:	arch/arm64/boot/dts/marvell/armada*
 F:	drivers/cpufreq/mvebu-cpufreq.c
-F:	arch/arm/configs/mvebu_*_defconfig
+F:	drivers/irqchip/irq-armada-370-xp.c
+F:	drivers/irqchip/irq-mvebu-*
+F:	drivers/rtc/rtc-armada38x.c
 
 ARM/Marvell Berlin SoC support
 M:	Jisheng Zhang <jszhang@marvell.com>
@@ -1677,6 +1697,13 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
+ARM/REALTEK ARCHITECTURE
+M:	Andreas Färber <afaerber@suse.de>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Maintained
+F:	arch/arm64/boot/dts/realtek/
+F:	Documentation/devicetree/bindings/arm/realtek.txt
+
 ARM/RENESAS ARM64 ARCHITECTURE
 M:	Simon Horman <horms@verge.net.au>
 M:	Magnus Damm <magnus.damm@gmail.com>
@@ -1710,6 +1737,7 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git
 S:	Maintained
 F:	arch/arm/boot/dts/rk3*
+F:	arch/arm/boot/dts/rv1108*
 F:	arch/arm/mach-rockchip/
 F:	drivers/clk/rockchip/
 F:	drivers/i2c/busses/i2c-rk3x.c
@@ -1721,7 +1749,6 @@
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:	Kukjin Kim <kgene@kernel.org>
 M:	Krzysztof Kozlowski <krzk@kernel.org>
-R:	Javier Martinez Canillas <javier@osg.samsung.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 Q:	https://patchwork.kernel.org/project/linux-samsung-soc/list/
@@ -1829,7 +1856,6 @@
 ARM/STI ARCHITECTURE
 M:	Patrice Chotard <patrice.chotard@st.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L:	kernel@stlinux.com
 W:	http://www.stlinux.com
 S:	Maintained
 F:	arch/arm/mach-sti/
@@ -1843,8 +1869,8 @@
 F:	drivers/media/rc/st_rc.c
 F:	drivers/media/platform/sti/c8sectpfe/
 F:	drivers/mmc/host/sdhci-st.c
-F:	drivers/phy/phy-miphy28lp.c
-F:	drivers/phy/phy-stih407-usb.c
+F:	drivers/phy/st/phy-miphy28lp.c
+F:	drivers/phy/st/phy-stih407-usb.c
 F:	drivers/pinctrl/pinctrl-st.c
 F:	drivers/remoteproc/st_remoteproc.c
 F:	drivers/remoteproc/st_slim_rproc.c
@@ -2322,6 +2348,15 @@
 F:	drivers/input/touchscreen/atmel_mxt_ts.c
 F:	include/linux/platform_data/atmel_mxt_ts.h
 
+ATOMIC INFRASTRUCTURE
+M:	Will Deacon <will.deacon@arm.com>
+M:	Peter Zijlstra <peterz@infradead.org>
+R:	Boqun Feng <boqun.feng@gmail.com>
+L:	linux-kernel@vger.kernel.org
+S:	Maintained
+F:	arch/*/include/asm/atomic*.h
+F:	include/*/atomic*.h
+
 ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
 M:	Bradley Grove <linuxdrivers@attotech.com>
 L:	linux-scsi@vger.kernel.org
@@ -2685,7 +2720,6 @@
 F:	arch/arm/mach-bcm/
 
 BROADCOM BCM2835 ARM ARCHITECTURE
-M:	Lee Jones <lee@kernel.org>
 M:	Eric Anholt <eric@anholt.net>
 M:	Stefan Wahren <stefan.wahren@i2se.com>
 L:	linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2964,7 +2998,7 @@
 
 C6X ARCHITECTURE
 M:	Mark Salter <msalter@redhat.com>
-M:	Aurelien Jacquiot <a-jacquiot@ti.com>
+M:	Aurelien Jacquiot <jacquiot.aurelien@gmail.com>
 L:	linux-c6x-dev@linux-c6x.org
 W:	http://www.linux-c6x.org/wiki/index.php/Main_Page
 S:	Maintained
@@ -3586,7 +3620,6 @@
 S:	Maintained
 F:	Documentation/crypto/
 F:	Documentation/devicetree/bindings/crypto/
-F:	Documentation/DocBook/crypto-API.tmpl
 F:	arch/*/crypto/
 F:	crypto/
 F:	drivers/crypto/
@@ -4143,8 +4176,7 @@
 L:	linux-doc@vger.kernel.org
 S:	Maintained
 F:	Documentation/
-F:	scripts/docproc.c
-F:	scripts/kernel-doc*
+F:	scripts/kernel-doc
 X:	Documentation/ABI/
 X:	Documentation/devicetree/
 X:	Documentation/acpi
@@ -5622,7 +5654,7 @@
 
 GENWQE (IBM Generic Workqueue Card)
 M:	Frank Haverkamp <haver@linux.vnet.ibm.com>
-M:	Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+M:	Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
 S:	Supported
 F:	drivers/misc/genwqe/
 
@@ -5667,7 +5699,6 @@
 
 GPIO SUBSYSTEM
 M:	Linus Walleij <linus.walleij@linaro.org>
-M:	Alexandre Courbot <gnurou@gmail.com>
 L:	linux-gpio@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
 S:	Maintained
@@ -6481,6 +6512,13 @@
 F:	Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
 F:	drivers/iio/adc/envelope-detector.c
 
+IIO MULTIPLEXER
+M:	Peter Rosin <peda@axentia.se>
+L:	linux-iio@vger.kernel.org
+S:	Maintained
+F:	Documentation/devicetree/bindings/iio/multiplexer/iio-mux.txt
+F:	drivers/iio/multiplexer/iio-mux.c
+
 IIO SUBSYSTEM AND DRIVERS
 M:	Jonathan Cameron <jic23@kernel.org>
 R:	Hartmut Knaack <knaack.h@gmx.de>
@@ -7349,7 +7387,7 @@
 M:	David Howells <dhowells@redhat.com>
 L:	keyrings@vger.kernel.org
 S:	Maintained
-F:	Documentation/security/keys.txt
+F:	Documentation/security/keys/core.rst
 F:	include/linux/key.h
 F:	include/linux/key-type.h
 F:	include/linux/keyctl.h
@@ -7363,7 +7401,7 @@
 L:	linux-security-module@vger.kernel.org
 L:	keyrings@vger.kernel.org
 S:	Supported
-F:	Documentation/security/keys-trusted-encrypted.txt
+F:	Documentation/security/keys/trusted-encrypted.rst
 F:	include/keys/trusted-type.h
 F:	security/keys/trusted.c
 F:	security/keys/trusted.h
@@ -7374,7 +7412,7 @@
 L:	linux-security-module@vger.kernel.org
 L:	keyrings@vger.kernel.org
 S:	Supported
-F:	Documentation/security/keys-trusted-encrypted.txt
+F:	Documentation/security/keys/trusted-encrypted.rst
 F:	include/keys/encrypted-type.h
 F:	security/keys/encrypted-keys/
 
@@ -7384,7 +7422,7 @@
 L:	kgdb-bugreport@lists.sourceforge.net
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
 S:	Maintained
-F:	Documentation/DocBook/kgdb.tmpl
+F:	Documentation/dev-tools/kgdb.rst
 F:	drivers/misc/kgdbts.c
 F:	drivers/tty/serial/kgdboc.c
 F:	include/linux/kdb.h
@@ -7556,7 +7594,7 @@
 F:	drivers/ata/sata_promise.*
 
 LIBLOCKDEP
-M:	Sasha Levin <sasha.levin@oracle.com>
+M:	Sasha Levin <alexander.levin@verizon.com>
 S:	Maintained
 F:	tools/lib/lockdep/
 
@@ -7707,7 +7745,7 @@
 
 LIVE PATCHING
 M:	Josh Poimboeuf <jpoimboe@redhat.com>
-M:	Jessica Yu <jeyu@redhat.com>
+M:	Jessica Yu <jeyu@kernel.org>
 M:	Jiri Kosina <jikos@kernel.org>
 M:	Miroslav Benes <mbenes@suse.cz>
 R:	Petr Mladek <pmladek@suse.com>
@@ -8061,11 +8099,11 @@
 F:	drivers/power/supply/max14577_charger.c
 F:	drivers/power/supply/max77693_charger.c
 
-MAXIM MAX77802 MULTIFUNCTION PMIC DEVICE DRIVERS
-M:	Javier Martinez Canillas <javier@osg.samsung.com>
+MAXIM MAX77802 PMIC REGULATOR DEVICE DRIVER
+M:	Javier Martinez Canillas <javier@dowhile0.org>
 L:	linux-kernel@vger.kernel.org
 S:	Supported
-F:	drivers/*/*max77802*.c
+F:	drivers/regulator/max77802-regulator.c
 F:	Documentation/devicetree/bindings/*/*max77802.txt
 F:	include/dt-bindings/*/*max77802.h
 
@@ -8438,7 +8476,7 @@
 S:	Supported
 F:	arch/microblaze/
 
-MICROCHIP / ATMEL AT91 / AT32 SERIAL DRIVER
+MICROCHIP / ATMEL AT91 SERIAL DRIVER
 M:	Richard Genoud <richard.genoud@gmail.com>
 S:	Maintained
 F:	drivers/tty/serial/atmel_serial.c
@@ -8508,7 +8546,7 @@
 F:	drivers/media/radio/radio-miropcm20*
 
 MELLANOX MLX4 core VPI driver
-M:	Yishai Hadas <yishaih@mellanox.com>
+M:	Tariq Toukan <tariqt@mellanox.com>
 L:	netdev@vger.kernel.org
 L:	linux-rdma@vger.kernel.org
 W:	http://www.mellanox.com
@@ -8516,7 +8554,6 @@
 S:	Supported
 F:	drivers/net/ethernet/mellanox/mlx4/
 F:	include/linux/mlx4/
-F:	include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX4 IB driver
 M:	Yishai Hadas <yishaih@mellanox.com>
@@ -8526,6 +8563,7 @@
 S:	Supported
 F:	drivers/infiniband/hw/mlx4/
 F:	include/linux/mlx4/
+F:	include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX5 core VPI driver
 M:	Saeed Mahameed <saeedm@mellanox.com>
@@ -8538,7 +8576,6 @@
 S:	Supported
 F:	drivers/net/ethernet/mellanox/mlx5/core/
 F:	include/linux/mlx5/
-F:	include/uapi/rdma/mlx5-abi.h
 
 MELLANOX MLX5 IB driver
 M:	Matan Barak <matanb@mellanox.com>
@@ -8549,6 +8586,7 @@
 S:	Supported
 F:	drivers/infiniband/hw/mlx5/
 F:	include/linux/mlx5/
+F:	include/uapi/rdma/mlx5-abi.h
 
 MELEXIS MLX90614 DRIVER
 M:	Crt Mori <cmo@melexis.com>
@@ -8588,7 +8626,7 @@
 F:	drivers/media/dvb-frontends/mn88473*
 
 MODULE SUPPORT
-M:	Jessica Yu <jeyu@redhat.com>
+M:	Jessica Yu <jeyu@kernel.org>
 M:	Rusty Russell <rusty@rustcorp.com.au>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
 S:	Maintained
@@ -8716,6 +8754,15 @@
 F:	drivers/mmc/host/mmc_spi.c
 F:	include/linux/spi/mmc_spi.h
 
+MULTIPLEXER SUBSYSTEM
+M:	Peter Rosin <peda@axentia.se>
+S:	Maintained
+F:	Documentation/ABI/testing/mux/sysfs-class-mux*
+F:	Documentation/devicetree/bindings/mux/
+F:	include/linux/dt-bindings/mux/
+F:	include/linux/mux/
+F:	drivers/mux/
+
 MULTISOUND SOUND DRIVER
 M:	Andrew Veliath <andrewtv@usa.net>
 S:	Maintained
@@ -10155,7 +10202,7 @@
 S:	Maintained
 F:	Documentation/hwmon/pmbus
 F:	drivers/hwmon/pmbus/
-F:	include/linux/i2c/pmbus.h
+F:	include/linux/pmbus.h
 
 PMC SIERRA MaxRAID DRIVER
 L:	linux-scsi@vger.kernel.org
@@ -10450,7 +10497,7 @@
 
 PXA RTC DRIVER
 M:	Robert Jarzmik <robert.jarzmik@free.fr>
-L:	rtc-linux@googlegroups.com
+L:	linux-rtc@vger.kernel.org
 S:	Maintained
 
 QAT DRIVER
@@ -10545,6 +10592,7 @@
 L:	linux-kernel@vger.kernel.org
 S:	Maintained
 F:	drivers/staging/fsl-mc/
+F:	Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
 
 QT1010 MEDIA DRIVER
 M:	Antti Palosaari <crope@iki.fi>
@@ -10757,7 +10805,7 @@
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:	Alessandro Zummo <a.zummo@towertech.it>
 M:	Alexandre Belloni <alexandre.belloni@free-electrons.com>
-L:	rtc-linux@googlegroups.com
+L:	linux-rtc@vger.kernel.org
 Q:	http://patchwork.ozlabs.org/project/rtc-linux/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
 S:	Maintained
@@ -10832,11 +10880,11 @@
 S:	Supported
 F:	drivers/iio/adc/rcar_gyro_adc.c
 
-RENESAS USB2 PHY DRIVER
+RENESAS USB PHY DRIVER
 M:	Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
 L:	linux-renesas-soc@vger.kernel.org
 S:	Maintained
-F:	drivers/phy/phy-rcar-gen3-usb2.c
+F:	drivers/phy/renesas/phy-rcar-gen3-usb*.c
 
 RESET CONTROLLER FRAMEWORK
 M:	Philipp Zabel <p.zabel@pengutronix.de>
@@ -10993,7 +11041,7 @@
 F:	arch/s390/
 F:	drivers/s390/
 F:	Documentation/s390/
-F:	Documentation/DocBook/s390*
+F:	Documentation/driver-api/s390-drivers.rst
 
 S390 COMMON I/O LAYER
 M:	Sebastian Ott <sebott@linux.vnet.ibm.com>
@@ -11238,12 +11286,12 @@
 S:	Supported
 F:	Documentation/devicetree/bindings/phy/samsung-phy.txt
 F:	Documentation/phy/samsung-usb2.txt
-F:	drivers/phy/phy-exynos4210-usb2.c
-F:	drivers/phy/phy-exynos4x12-usb2.c
-F:	drivers/phy/phy-exynos5250-usb2.c
-F:	drivers/phy/phy-s5pv210-usb2.c
-F:	drivers/phy/phy-samsung-usb2.c
-F:	drivers/phy/phy-samsung-usb2.h
+F:	drivers/phy/samsung/phy-exynos4210-usb2.c
+F:	drivers/phy/samsung/phy-exynos4x12-usb2.c
+F:	drivers/phy/samsung/phy-exynos5250-usb2.c
+F:	drivers/phy/samsung/phy-s5pv210-usb2.c
+F:	drivers/phy/samsung/phy-samsung-usb2.c
+F:	drivers/phy/samsung/phy-samsung-usb2.h
 
 SERIAL DRIVERS
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -11268,7 +11316,6 @@
 
 STI CEC DRIVER
 M:	Benjamin Gaignard <benjamin.gaignard@linaro.org>
-L:	kernel@stlinux.com
 S:	Maintained
 F:	drivers/staging/media/st-cec/
 F:	Documentation/devicetree/bindings/media/stih-cec.txt
@@ -11328,6 +11375,9 @@
 
 THUNDERBOLT DRIVER
 M:	Andreas Noever <andreas.noever@gmail.com>
+M:	Michael Jamet <michael.jamet@intel.com>
+M:	Mika Westerberg <mika.westerberg@linux.intel.com>
+M:	Yehezkel Bernat <yehezkel.bernat@intel.com>
 S:	Maintained
 F:	drivers/thunderbolt/
 
@@ -11495,6 +11545,7 @@
 F:	include/uapi/linux/seccomp.h
 F:	include/linux/seccomp.h
 F:	tools/testing/selftests/seccomp/*
+F:	Documentation/userspace-api/seccomp_filter.rst
 K:	\bsecure_computing
 K:	\bTIF_SECCOMP\b
 
@@ -11553,6 +11604,7 @@
 F:	include/linux/selinux*
 F:	security/selinux/
 F:	scripts/selinux/
+F:	Documentation/admin-guide/LSM/SELinux.rst
 
 APPARMOR SECURITY MODULE
 M:	John Johansen <john.johansen@canonical.com>
@@ -11561,18 +11613,21 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jj/apparmor-dev.git
 S:	Supported
 F:	security/apparmor/
+F:	Documentation/admin-guide/LSM/apparmor.rst
 
 LOADPIN SECURITY MODULE
 M:	Kees Cook <keescook@chromium.org>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git lsm/loadpin
 S:	Supported
 F:	security/loadpin/
+F:	Documentation/admin-guide/LSM/LoadPin.rst
 
 YAMA SECURITY MODULE
 M:	Kees Cook <keescook@chromium.org>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git yama/tip
 S:	Supported
 F:	security/yama/
+F:	Documentation/admin-guide/LSM/Yama.rst
 
 SENSABLE PHANTOM
 M:	Jiri Slaby <jirislaby@gmail.com>
@@ -11778,6 +11833,7 @@
 S:	Supported
 F:	arch/arm/mach-davinci/
 F:	drivers/i2c/busses/i2c-davinci.c
+F:	arch/arm/boot/dts/da850*
 
 TI DAVINCI SERIES MEDIA DRIVER
 M:	"Lad, Prabhakar" <prabhakar.csengg@gmail.com>
@@ -11874,7 +11930,7 @@
 W:	http://schaufler-ca.com
 T:	git git://github.com/cschaufler/smack-next
 S:	Maintained
-F:	Documentation/security/Smack.txt
+F:	Documentation/admin-guide/LSM/Smack.rst
 F:	security/smack/
 
 DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
@@ -12633,6 +12689,8 @@
 F:	Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
 F:	include/dt-bindings/genpd/k2g.h
 F:	drivers/soc/ti/ti_sci_pm_domains.c
+F:	Documentation/devicetree/bindings/reset/ti,sci-reset.txt
+F:	drivers/reset/reset-ti-sci.c
 
 THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
 M:	Hans Verkuil <hverkuil@xs4all.nl>
@@ -12882,7 +12940,7 @@
 L:	linux-mmc@vger.kernel.org
 S:	Supported
 F:	drivers/mmc/host/tmio_mmc*
-F:	drivers/mmc/host/sh_mobile_sdhi.c
+F:	drivers/mmc/host/renesas_sdhi*
 F:	include/linux/mfd/tmio.h
 
 TMP401 HARDWARE MONITOR DRIVER
@@ -13463,6 +13521,17 @@
 T:	git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
 S:	Maintained
 
+UUID HELPERS
+M:	Christoph Hellwig <hch@lst.de>
+R:	Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+L:	linux-kernel@vger.kernel.org
+T:	git git://git.infradead.org/users/hch/uuid.git
+F:	lib/uuid.c
+F:	lib/test_uuid.c
+F:	include/linux/uuid.h
+F:	include/uapi/linux/uuid.h
+S:	Maintained
+
 UVESAFB DRIVER
 M:	Michal Januszewski <spock@gentoo.org>
 L:	linux-fbdev@vger.kernel.org
@@ -13769,6 +13838,7 @@
 S:	Maintained
 F:	Documentation/w1/
 F:	drivers/w1/
+F:	include/linux/w1.h
 
 W83791D HARDWARE MONITORING DRIVER
 M:	Marc Hulsman <m.hulsman@tudelft.nl>
@@ -13861,7 +13931,7 @@
 F:	drivers/net/wireless/wl3501*
 
 WOLFSON MICROELECTRONICS DRIVERS
-L:	patches@opensource.wolfsonmicro.com
+L:	patches@opensource.cirrus.com
 T:	git https://github.com/CirrusLogic/linux-drivers.git
 W:	https://github.com/CirrusLogic/linux-drivers/wiki
 S:	Supported
diff --git a/Makefile b/Makefile
index 470bd4d..d7cb037 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION =
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -1312,7 +1312,7 @@
 #
 mrproper: rm-dirs  := $(wildcard $(MRPROPER_DIRS))
 mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
-mrproper-dirs      := $(addprefix _mrproper_,Documentation/DocBook scripts)
+mrproper-dirs      := $(addprefix _mrproper_,scripts)
 
 PHONY += $(mrproper-dirs) mrproper archmrproper
 $(mrproper-dirs):
@@ -1416,9 +1416,7 @@
 	@$(MAKE) $(build)=$(package-dir) help
 	@echo  ''
 	@echo  'Documentation targets:'
-	@$(MAKE) -f $(srctree)/Documentation/Makefile.sphinx dochelp
-	@echo  ''
-	@$(MAKE) -f $(srctree)/Documentation/DocBook/Makefile dochelp
+	@$(MAKE) -f $(srctree)/Documentation/Makefile dochelp
 	@echo  ''
 	@echo  'Architecture specific targets ($(SRCARCH)):'
 	@$(if $(archhelp),$(archhelp),\
@@ -1437,7 +1435,7 @@
 	@echo  '  make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
 	@echo  '  make V=2   [targets] 2 => give reason for rebuild of target'
 	@echo  '  make O=dir [targets] Locate all output files in "dir", including .config'
-	@echo  '  make C=1   [targets] Check all c source with $$CHECK (sparse by default)'
+	@echo  '  make C=1   [targets] Check re-compiled c source with $$CHECK (sparse by default)'
 	@echo  '  make C=2   [targets] Force check of all c source with $$CHECK'
 	@echo  '  make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
 	@echo  '  make W=n   [targets] Enable extra gcc checks, n=1,2,3 where'
@@ -1469,9 +1467,7 @@
 DOC_TARGETS := xmldocs sgmldocs psdocs latexdocs pdfdocs htmldocs mandocs installmandocs epubdocs cleandocs linkcheckdocs
 PHONY += $(DOC_TARGETS)
 $(DOC_TARGETS): scripts_basic FORCE
-	$(Q)$(MAKE) $(build)=scripts build_docproc build_check-lc_ctype
-	$(Q)$(MAKE) $(build)=Documentation -f $(srctree)/Documentation/Makefile.sphinx $@
-	$(Q)$(MAKE) $(build)=Documentation/DocBook $@
+	$(Q)$(MAKE) $(build)=Documentation $@
 
 else # KBUILD_EXTMOD
 
diff --git a/arch/Kconfig b/arch/Kconfig
index 6c00e5b..f76b214 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -867,4 +867,13 @@
 config ARCH_WANT_RELAX_ORDER
 	bool
 
+config REFCOUNT_FULL
+	bool "Perform full reference count validation at the expense of speed"
+	help
+	  Enabling this switches the refcounting infrastructure from a fast
+	  unchecked atomic_t implementation to a fully state checked
+	  implementation, which can be (slightly) slower but provides protections
+	  against various use-after-free conditions that can be used in
+	  security flaw exploits.
+
 source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
index f30c94a..ff67b837 100644
--- a/arch/alpha/include/uapi/asm/ioctls.h
+++ b/arch/alpha/include/uapi/asm/ioctls.h
@@ -100,6 +100,7 @@
 #define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
 #define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER	_IOR('T', 0x41, int) /* Safely open the slave */
 
 #define TIOCSERCONFIG	0x5453
 #define TIOCSERGWILD	0x5454
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 7bee4e4..3e74ca5 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -36,7 +36,6 @@
 generic-y += resource.h
 generic-y += sembuf.h
 generic-y += shmbuf.h
-generic-y += siginfo.h
 generic-y += socket.h
 generic-y += sockios.h
 generic-y += stat.h
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 6e1242d..4104a08 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -86,8 +86,6 @@ struct task_struct;
 #define TSK_K_BLINK(tsk)	TSK_K_REG(tsk, 4)
 #define TSK_K_FP(tsk)		TSK_K_REG(tsk, 0)
 
-#define thread_saved_pc(tsk)	TSK_K_BLINK(tsk)
-
 extern void start_thread(struct pt_regs * regs, unsigned long pc,
 			 unsigned long usp);
 
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index b15bf6b..b55fc2a 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += siginfo.h
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index fc8211f..666613f 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -470,7 +470,7 @@ void __init setup_arch(char **cmdline_p)
 void __init time_init(void)
 {
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 }
 
 static int __init customize_machine(void)
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 3e25e8d..2e13683 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4c1a35f..abd59fa 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -25,6 +25,7 @@
 	select EDAC_SUPPORT
 	select EDAC_ATOMIC_SCRUB
 	select GENERIC_ALLOCATOR
+	select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY
 	select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI)
 	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
 	select GENERIC_CPU_AUTOPROBE
@@ -337,7 +338,7 @@
 	select ARM_HAS_SG_CHAIN
 	select ARM_PATCH_PHYS_VIRT
 	select AUTO_ZRELADDR
-	select CLKSRC_OF
+	select TIMER_OF
 	select COMMON_CLK
 	select GENERIC_CLOCKEVENTS
 	select MIGHT_HAVE_PCI
@@ -351,7 +352,7 @@
 	depends on !MMU
 	select ARM_NVIC
 	select AUTO_ZRELADDR
-	select CLKSRC_OF
+	select TIMER_OF
 	select COMMON_CLK
 	select CPU_V7M
 	select GENERIC_CLOCKEVENTS
@@ -532,7 +533,7 @@
 	select CLKDEV_LOOKUP
 	select CLKSRC_PXA
 	select CLKSRC_MMIO
-	select CLKSRC_OF
+	select TIMER_OF
 	select CPU_XSCALE if !CPU_XSC3
 	select GENERIC_CLOCKEVENTS
 	select GPIO_PXA
@@ -571,7 +572,7 @@
 	select CLKDEV_LOOKUP
 	select CLKSRC_MMIO
 	select CLKSRC_PXA
-	select CLKSRC_OF if OF
+	select TIMER_OF if OF
 	select CPU_FREQ
 	select CPU_SA1100
 	select GENERIC_CLOCKEVENTS
@@ -711,6 +712,8 @@
 #
 source "arch/arm/mach-mvebu/Kconfig"
 
+source "arch/arm/mach-actions/Kconfig"
+
 source "arch/arm/mach-alpine/Kconfig"
 
 source "arch/arm/mach-artpec/Kconfig"
@@ -1357,7 +1360,7 @@
 
 config HAVE_ARM_TWD
 	bool
-	select CLKSRC_OF if OF
+	select TIMER_OF if OF
 	help
 	  This options enables support for the ARM timer and watchdog unit
 
@@ -1416,6 +1419,7 @@
 	config VMSPLIT_3G
 		bool "3G/1G user/kernel split"
 	config VMSPLIT_3G_OPT
+		depends on !ARM_LPAE
 		bool "3G/1G user/kernel split (for full 1G low memory)"
 	config VMSPLIT_2G
 		bool "2G/2G user/kernel split"
@@ -1460,6 +1464,7 @@
 # selected platforms.
 config ARCH_NR_GPIO
 	int
+	default 2048 if ARCH_SOCFPGA
 	default 1024 if ARCH_BRCMSTB || ARCH_SHMOBILE || ARCH_TEGRA || \
 		ARCH_ZYNQ
 	default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5 || \
@@ -1637,7 +1642,7 @@
 config HAVE_ARCH_PFN_VALID
 	def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
 
-config HAVE_GENERIC_RCU_GUP
+config HAVE_GENERIC_GUP
 	def_bool y
 	depends on ARM_LPAE
 
@@ -2061,6 +2066,23 @@
 	  is only useful for kernels that may run on systems that have
 	  UEFI firmware.
 
+config DMI
+	bool "Enable support for SMBIOS (DMI) tables"
+	depends on EFI
+	default y
+	help
+	  This enables SMBIOS/DMI feature for systems.
+
+	  This option is only useful on systems that have UEFI firmware.
+	  However, even with this option, the resultant kernel should
+	  continue to boot on existing non-UEFI platforms.
+
+	  NOTE: This does *NOT* enable or encourage the use of DMI quirks,
+	  i.e., the the practice of identifying the platform via DMI to
+	  decide whether certain workarounds for buggy hardware and/or
+	  firmware need to be enabled. This would require the DMI subsystem
+	  to be enabled much earlier than we do on ARM, which is non-trivial.
+
 endmenu
 
 menu "CPU Power Management"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 426d271..447629d 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -145,6 +145,15 @@
 		  Say Y here if you want kernel low-level debugging support
 		  on the USART3 port of sama5d4.
 
+	config DEBUG_AT91_SAMV7_USART1
+		bool "Kernel low-level debugging via SAMV7 USART1"
+		select DEBUG_AT91_UART
+		depends on SOC_SAMV7
+		help
+		  Say Y here if you want the debug print routines to direct
+		  their output to the USART1 port on SAMV7 based
+		  machines.
+
 	config DEBUG_BCM2835
 		bool "Kernel low-level debugging on BCM2835 PL011 UART"
 		depends on ARCH_BCM2835 && ARCH_MULTI_V6
@@ -751,6 +760,7 @@
 		  ARCH      DEBUG_UART_PHYS   DEBUG_UART_VIRT
 		  APQ8064   0x16640000        0xf0040000
 		  APQ8084   0xf995e000        0xfa75e000
+		  IPQ4019   0x078af000        0xf78af000
 		  MSM8X60   0x19c40000        0xf0040000
 		  MSM8960   0x16440000        0xf0040000
 		  MSM8974   0xf991e000        0xfa71e000
@@ -776,6 +786,30 @@
 		  their output to the standard serial port on the RealView
 		  PB1176 platform.
 
+	config DEBUG_RV1108_UART0
+		bool "Kernel low-level debugging messages via Rockchip RV1108 UART0"
+		depends on ARCH_ROCKCHIP
+		select DEBUG_UART_8250
+		help
+		  Say Y here if you want kernel low-level debugging support
+                  on Rockchip RV1108 based platforms.
+
+	config DEBUG_RV1108_UART1
+		bool "Kernel low-level debugging messages via Rockchip RV1108 UART1"
+		depends on ARCH_ROCKCHIP
+		select DEBUG_UART_8250
+		help
+		  Say Y here if you want kernel low-level debugging support
+		  on Rockchip RV1108 based platforms.
+
+	config DEBUG_RV1108_UART2
+		bool "Kernel low-level debugging messages via Rockchip RV1108 UART2"
+		depends on ARCH_ROCKCHIP
+		select DEBUG_UART_8250
+		help
+		  Say Y here if you want kernel low-level debugging support
+		  on Rockchip RV1108 based platforms.
+
 	config DEBUG_RK29_UART0
 		bool "Kernel low-level debugging messages via Rockchip RK29 UART0"
 		depends on ARCH_ROCKCHIP
@@ -1465,6 +1499,9 @@
 	default 0x10126000 if DEBUG_RK3X_UART1
 	default 0x101f1000 if DEBUG_VERSATILE
 	default 0x101fb000 if DEBUG_NOMADIK_UART
+	default 0x10210000 if DEBUG_RV1108_UART2
+	default 0x10220000 if DEBUG_RV1108_UART1
+	default 0x10230000 if DEBUG_RV1108_UART0
 	default 0x11002000 if DEBUG_MT8127_UART0
 	default 0x11006000 if DEBUG_MT6589_UART0
 	default 0x11009000 if DEBUG_MT8135_UART3
@@ -1481,6 +1518,7 @@
 	default 0x3f201000 if DEBUG_BCM2836
 	default 0x3e000000 if DEBUG_BCM_KONA_UART
 	default 0x4000e400 if DEBUG_LL_UART_EFM32
+	default 0x40028000 if DEBUG_AT91_SAMV7_USART1
 	default 0x40081000 if DEBUG_LPC18XX_UART0
 	default 0x40090000 if DEBUG_LPC32XX
 	default 0x40100000 if DEBUG_PXA_UART1
@@ -1563,6 +1601,9 @@
 
 config DEBUG_UART_VIRT
 	hex "Virtual base address of debug UART"
+	default 0xc881f000 if DEBUG_RV1108_UART2
+	default 0xc8821000 if DEBUG_RV1108_UART1
+	default 0xc8912000 if DEBUG_RV1108_UART0
 	default 0xe0000a00 if DEBUG_NETX_UART
 	default 0xe0010fe0 if ARCH_RPC
 	default 0xf0000be0 if ARCH_EBSA110
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 65f4e2a..47d3a1a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -151,6 +151,7 @@
 
 # Machine directory name.  This list is sorted alphanumerically
 # by CONFIG_* macro name.
+machine-$(CONFIG_ARCH_ACTIONS)		+= actions
 machine-$(CONFIG_ARCH_ALPINE)		+= alpine
 machine-$(CONFIG_ARCH_ARTPEC)		+= artpec
 machine-$(CONFIG_ARCH_AT91)		+= at91
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
index 9d5dc4f..a17ca8d 100644
--- a/arch/arm/boot/compressed/efi-header.S
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -17,14 +17,13 @@
 		@ there.
 		.inst	'M' | ('Z' << 8) | (0x1310 << 16)   @ tstne r0, #0x4d000
 #else
-		mov	r0, r0
+ AR_CLASS(	mov	r0, r0		)
+  M_CLASS(	nop.w			)
 #endif
 		.endm
 
 		.macro	__EFI_HEADER
 #ifdef CONFIG_EFI_STUB
-		b	__efi_start
-
 		.set	start_offset, __efi_start - start
 		.org	start + 0x3c
 		@
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 7c711ba..8a75687 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -130,19 +130,22 @@
 		.rept	7
 		__nop
 		.endr
-   ARM(		mov	r0, r0		)
-   ARM(		b	1f		)
- THUMB(		badr	r12, 1f		)
- THUMB(		bx	r12		)
+#ifndef CONFIG_THUMB2_KERNEL
+		mov	r0, r0
+#else
+ AR_CLASS(	sub	pc, pc, #3	)	@ A/R: switch to Thumb2 mode
+  M_CLASS(	nop.w			)	@ M: already in Thumb2 mode
+		.thumb
+#endif
+		W(b)	1f
 
 		.word	_magic_sig	@ Magic numbers to help the loader
 		.word	_magic_start	@ absolute load/run zImage address
 		.word	_magic_end	@ zImage end address
 		.word	0x04030201	@ endianness flag
 
- THUMB(		.thumb			)
-1:		__EFI_HEADER
-
+		__EFI_HEADER
+1:
  ARM_BE8(	setend	be		)	@ go BE8 if compiled for BE8
  AR_CLASS(	mrs	r9, cpsr	)
 #ifdef CONFIG_ARM_VIRT_EXT
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 9c5e1d9..4b17f35 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -72,6 +72,7 @@
 	bcm2835-rpi-b-plus.dtb \
 	bcm2835-rpi-a-plus.dtb \
 	bcm2836-rpi-2-b.dtb \
+	bcm2837-rpi-3-b.dtb \
 	bcm2835-rpi-zero.dtb
 dtb-$(CONFIG_ARCH_BCM_5301X) += \
 	bcm4708-asus-rt-ac56u.dtb \
@@ -204,7 +205,8 @@
 	keystone-k2hk-evm.dtb \
 	keystone-k2l-evm.dtb \
 	keystone-k2e-evm.dtb \
-	keystone-k2g-evm.dtb
+	keystone-k2g-evm.dtb \
+	keystone-k2g-ice.dtb
 dtb-$(CONFIG_MACH_KIRKWOOD) += \
 	kirkwood-b3.dtb \
 	kirkwood-blackarmor-nas220.dtb \
@@ -363,6 +365,7 @@
 	imx6dl-gw551x.dtb \
 	imx6dl-gw552x.dtb \
 	imx6dl-gw553x.dtb \
+	imx6dl-gw560x.dtb \
 	imx6dl-gw5903.dtb \
 	imx6dl-gw5904.dtb \
 	imx6dl-hummingboard.dtb \
@@ -408,6 +411,7 @@
 	imx6q-gw551x.dtb \
 	imx6q-gw552x.dtb \
 	imx6q-gw553x.dtb \
+	imx6q-gw560x.dtb \
 	imx6q-gw5903.dtb \
 	imx6q-gw5904.dtb \
 	imx6q-h100.dtb \
@@ -476,6 +480,7 @@
 	imx7d-cl-som-imx7.dtb \
 	imx7d-colibri-eval-v3.dtb \
 	imx7d-nitrogen7.dtb \
+	imx7d-pico.dtb \
 	imx7d-sbc-imx7.dtb \
 	imx7d-sdb.dtb \
 	imx7d-sdb-sht11.dtb \
@@ -603,6 +608,7 @@
 	am335x-bone.dtb \
 	am335x-boneblack.dtb \
 	am335x-boneblack-wireless.dtb \
+	am335x-boneblue.dtb \
 	am335x-bonegreen.dtb \
 	am335x-bonegreen-wireless.dtb \
 	am335x-chiliboard.dtb \
@@ -663,6 +669,8 @@
 	orion5x-maxtor-shared-storage-2.dtb \
 	orion5x-netgear-wnr854t.dtb \
 	orion5x-rd88f5182-nas.dtb
+dtb-$(CONFIG_ARCH_ACTIONS) += \
+	owl-s500-guitar-bb-rev-b.dtb
 dtb-$(CONFIG_ARCH_PRIMA2) += \
 	prima2-evb.dtb
 dtb-$(CONFIG_ARCH_OXNAS) += \
@@ -701,9 +709,11 @@
 dtb-$(CONFIG_ARCH_RENESAS) += \
 	emev2-kzm9d.dtb \
 	r7s72100-genmai.dtb \
+	r7s72100-gr-peach.dtb \
 	r7s72100-rskrza1.dtb \
 	r8a73a4-ape6evm.dtb \
 	r8a7740-armadillo800eva.dtb \
+	r8a7743-iwg20d-q7.dtb \
 	r8a7743-sk-rzg1m.dtb \
 	r8a7745-sk-rzg1e.dtb \
 	r8a7778-bockw.dtb \
@@ -718,7 +728,7 @@
 	r8a7794-silk.dtb \
 	sh73a0-kzm9g.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += \
-	rk1108-evb.dtb \
+	rv1108-evb.dtb \
 	rk3036-evb.dtb \
 	rk3036-kylin.dtb \
 	rk3066a-bqcurie2.dtb \
@@ -790,9 +800,12 @@
 dtb-$(CONFIG_ARCH_STM32)+= \
 	stm32f429-disco.dtb \
 	stm32f469-disco.dtb \
+	stm32f746-disco.dtb \
+	stm32f769-disco.dtb \
 	stm32429i-eval.dtb \
 	stm32746g-eval.dtb \
-	stm32h743i-eval.dtb
+	stm32h743i-eval.dtb \
+	stm32h743i-disco.dtb
 dtb-$(CONFIG_MACH_SUN4I) += \
 	sun4i-a10-a1000.dtb \
 	sun4i-a10-ba10-tvbox.dtb \
@@ -895,6 +908,7 @@
 	sun8i-h3-bananapi-m2-plus.dtb \
 	sun8i-h3-beelink-x2.dtb \
 	sun8i-h3-nanopi-m1.dtb	\
+	sun8i-h3-nanopi-m1-plus.dtb \
 	sun8i-h3-nanopi-neo.dtb \
 	sun8i-h3-nanopi-neo-air.dtb \
 	sun8i-h3-orangepi-2.dtb \
@@ -905,7 +919,8 @@
 	sun8i-h3-orangepi-plus.dtb \
 	sun8i-h3-orangepi-plus2e.dtb \
 	sun8i-r16-parrot.dtb \
-	sun8i-v3s-licheepi-zero.dtb
+	sun8i-v3s-licheepi-zero.dtb \
+	sun8i-v3s-licheepi-zero-dock.dtb
 dtb-$(CONFIG_MACH_SUN9I) += \
 	sun9i-a80-optimus.dtb \
 	sun9i-a80-cubieboard4.dtb
@@ -920,8 +935,7 @@
 	tegra20-seaboard.dtb \
 	tegra20-tec.dtb \
 	tegra20-trimslice.dtb \
-	tegra20-ventana.dtb \
-	tegra20-whistler.dtb
+	tegra20-ventana.dtb
 dtb-$(CONFIG_ARCH_TEGRA_3x_SOC) += \
 	tegra30-apalis-eval.dtb \
 	tegra30-beaver.dtb \
@@ -999,6 +1013,7 @@
 	armada-385-db-ap.dtb \
 	armada-385-linksys-caiman.dtb \
 	armada-385-linksys-cobra.dtb \
+	armada-385-linksys-rango.dtb \
 	armada-385-linksys-shelby.dtb \
 	armada-385-synology-ds116.dtb \
 	armada-385-turris-omnia.dtb \
diff --git a/arch/arm/boot/dts/aks-cdu.dts b/arch/arm/boot/dts/aks-cdu.dts
index 5b1bf92..888bfcd 100644
--- a/arch/arm/boot/dts/aks-cdu.dts
+++ b/arch/arm/boot/dts/aks-cdu.dts
@@ -62,32 +62,36 @@
 			status = "okay";
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
-			status = "okay";
+		ebi: ebi@10000000 {
+			nand_controller: nand-controller {
+				nand: nand@3 {
+					partitions {
+						bootstrap@0 {
+							label = "bootstrap";
+							reg = <0x0 0x40000>;
+						};
 
-			bootstrap@0 {
-				label = "bootstrap";
-				reg = <0x0 0x40000>;
-			};
+						uboot@40000 {
+							label = "uboot";
+							reg = <0x40000 0x80000>;
+						};
 
-			uboot@40000 {
-				label = "uboot";
-				reg = <0x40000 0x80000>;
-			};
-			ubootenv@c0000 {
-				label = "ubootenv";
-				reg = <0xc0000 0x40000>;
-			};
-			kernel@100000 {
-				label = "kernel";
-				reg = <0x100000 0x400000>;
-			};
-			rootfs@500000 {
-				label = "rootfs";
-				reg = <0x500000 0x7b00000>;
+						ubootenv@c0000 {
+							label = "ubootenv";
+							reg = <0xc0000 0x40000>;
+						};
+
+						kernel@100000 {
+							label = "kernel";
+							reg = <0x100000 0x400000>;
+						};
+
+						rootfs@500000 {
+							label = "rootfs";
+							reg = <0x500000 0x7b00000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index d42b98f..ec6052c 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -255,7 +255,7 @@
 	};
 
 	at24@50 {
-		compatible = "at24,24c02";
+		compatible = "atmel,24c02";
 		pagesize = <8>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/am335x-base0033.dts b/arch/arm/boot/dts/am335x-base0033.dts
index c2bee45..29782be 100644
--- a/arch/arm/boot/dts/am335x-base0033.dts
+++ b/arch/arm/boot/dts/am335x-base0033.dts
@@ -89,7 +89,7 @@
 
 &i2c0 {
 	eeprom: eeprom@50 {
-		compatible = "at,24c256";
+		compatible = "atmel,24c256";
 		reg = <0x50>;
 	};
 };
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index bf6b26a..1d15444 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -232,7 +232,7 @@
 	};
 
 	baseboard_eeprom: baseboard_eeprom@50 {
-		compatible = "at,24c256";
+		compatible = "atmel,24c256";
 		reg = <0x50>;
 
 		#address-cells = <1>;
@@ -251,7 +251,7 @@
 	clock-frequency = <100000>;
 
 	cape_eeprom0: cape_eeprom0@54 {
-		compatible = "at,24c256";
+		compatible = "atmel,24c256";
 		reg = <0x54>;
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -261,7 +261,7 @@
 	};
 
 	cape_eeprom1: cape_eeprom1@55 {
-		compatible = "at,24c256";
+		compatible = "atmel,24c256";
 		reg = <0x55>;
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -271,7 +271,7 @@
 	};
 
 	cape_eeprom2: cape_eeprom2@56 {
-		compatible = "at,24c256";
+		compatible = "atmel,24c256";
 		reg = <0x56>;
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -281,7 +281,7 @@
 	};
 
 	cape_eeprom3: cape_eeprom3@57 {
-		compatible = "at,24c256";
+		compatible = "atmel,24c256";
 		reg = <0x57>;
 		#address-cells = <1>;
 		#size-cells = <1>;
diff --git a/arch/arm/boot/dts/am335x-boneblack-wireless.dts b/arch/arm/boot/dts/am335x-boneblack-wireless.dts
index 105bd10..83f49f6 100644
--- a/arch/arm/boot/dts/am335x-boneblack-wireless.dts
+++ b/arch/arm/boot/dts/am335x-boneblack-wireless.dts
@@ -97,6 +97,11 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart3_pins &bt_pins>;
 	status = "okay";
+
+	bluetooth {
+		compatible = "ti,wl1835-st";
+		enable-gpios = <&gpio0 28 GPIO_ACTIVE_HIGH>;
+	};
 };
 
 &gpio3 {
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 935ed17..d154d31 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -22,7 +22,7 @@
 	 * BeagleBone Blacks have PG 2.0 silicon which is guaranteed
 	 * to support 1GHz OPP so enable it for PG 2.0 on this board.
 	 */
-	oppnitro@1000000000 {
+	oppnitro-1000000000 {
 		opp-supported-hw = <0x06 0x0100>;
 	};
 };
diff --git a/arch/arm/boot/dts/am335x-boneblue.dts b/arch/arm/boot/dts/am335x-boneblue.dts
new file mode 100644
index 0000000..cdc1b2b
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-boneblue.dts
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "am33xx.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+	model = "TI AM335x BeagleBone Blue";
+	compatible = "ti,am335x-bone-blue", "ti,am33xx";
+
+	cpus {
+		cpu@0 {
+			cpu0-supply = <&dcdc2_reg>;
+		};
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x80000000 0x20000000>; /* 512 MB */
+	};
+
+	chosen {
+		stdout-path = &uart0;
+	};
+
+	leds {
+		pinctrl-names = "default";
+		pinctrl-0 = <&user_leds_s0>;
+
+		compatible = "gpio-leds";
+
+		usr_0_led {
+			label = "beaglebone:green:usr0";
+			gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+			default-state = "off";
+		};
+
+		usr_1_led {
+			label = "beaglebone:green:usr1";
+			gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "mmc0";
+			default-state = "off";
+		};
+
+		usr_2_led {
+			label = "beaglebone:green:usr2";
+			gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "cpu0";
+			default-state = "off";
+		};
+
+		usr_3_led {
+			label = "beaglebone:green:usr3";
+			gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "mmc1";
+			default-state = "off";
+		};
+
+		wifi_led {
+			label = "wifi";
+			gpios = <&gpio0 19 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "phy0assoc";
+		};
+
+		red_led {
+			label = "red";
+			gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		green_led {
+			label = "green";
+			gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		batt_1_led {
+			label = "bat25";
+			gpios = <&gpio0 27 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		batt_2_led {
+			label = "bat50";
+			gpios = <&gpio0 11 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		batt_3_led {
+			label = "bat75";
+			gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		batt_4_led {
+			label = "bat100";
+			gpios = <&gpio0 26 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+	};
+
+	vmmcsd_fixed: fixedregulator0 {
+		compatible = "regulator-fixed";
+		regulator-name = "vmmcsd_fixed";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	wlan_en_reg: fixedregulator@2 {
+		compatible = "regulator-fixed";
+		regulator-name = "wlan-en-regulator";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		startup-delay-us= <70000>;
+
+		/* WL_EN */
+		gpio = <&gpio3 9 0>;
+		enable-active-high;
+	};
+};
+
+&am33xx_pinmux {
+	user_leds_s0: user_leds_s0 {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x854, PIN_OUTPUT | MUX_MODE7) /* (V15) gpmc_a5.gpio1[21] - USR_LED_0 */
+			AM33XX_IOPAD(0x858, PIN_OUTPUT | MUX_MODE7) /* (U15) gpmc_a6.gpio1[22] - USR_LED_1 */
+			AM33XX_IOPAD(0x85c, PIN_OUTPUT | MUX_MODE7) /* (T15) gpmc_a7.gpio1[23] - USR_LED_2 */
+			AM33XX_IOPAD(0x860, PIN_OUTPUT | MUX_MODE7) /* (V16) gpmc_a8.gpio1[24] - USR_LED_3 */
+			AM33XX_IOPAD(0x9b0, PIN_OUTPUT | MUX_MODE7) /* (A15) xdma_event_intr0.gpio0[19] - WIFI_LED */
+			AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE7) /* (R7) gpmc_advn_ale.gpio2[2] - P8.7, LED_RED, GP1_PIN_5 */
+			AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE7) /* (T7) gpmc_oen_ren.gpio2[3] - P8.8, LED_GREEN, GP1_PIN_6 */
+			AM33XX_IOPAD(0x82c, PIN_OUTPUT | MUX_MODE7) /* (U12) gpmc_ad11.gpio0[27] - P8.17, BATT_LED_1 */
+			AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE7) /* (T5) lcd_data15.gpio0[11] - P8.32, BATT_LED_2 */
+			AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE7) /* (V6) gpmc_csn0.gpio1[29] - P8.26, BATT_LED_3 */
+			AM33XX_IOPAD(0x828, PIN_OUTPUT | MUX_MODE7) /* (T11) gpmc_ad10.gpio0[26] - P8.14, BATT_LED_4 */
+
+		>;
+	};
+
+	i2c0_pins: pinmux_i2c0_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0)	/* (C17) I2C0_SDA.I2C0_SDA */
+			AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0)	/* (C16) I2C0_SCL.I2C0_SCL */
+		>;
+	};
+
+	i2c2_pins: pinmux_i2c2_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x978, PIN_INPUT_PULLUP | MUX_MODE3)	/* (D18) uart1_ctsn.I2C2_SDA */
+			AM33XX_IOPAD(0x97c, PIN_INPUT_PULLUP | MUX_MODE3)	/* (D17) uart1_rtsn.I2C2_SCL */
+		>;
+	};
+
+	uart0_pins: pinmux_uart0_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0)	/* (E15) uart0_rxd.uart0_rxd */
+			AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* (E16) uart0_txd.uart0_txd */
+		>;
+	};
+
+	mmc1_pins: pinmux_mmc1_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7)		/* (C15) spi0_cs1.gpio0[6] */
+		>;
+	};
+
+	mmc2_pins: pinmux_mmc2_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2)	/* (U9) gpmc_csn1.mmc1_clk */
+			AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2)	/* (V9) gpmc_csn2.mmc1_cmd */
+			AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1)	/* (U7) gpmc_ad0.mmc1_dat0 */
+			AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1)	/* (V7) gpmc_ad1.mmc1_dat1 */
+			AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1)	/* (R8) gpmc_ad2.mmc1_dat2 */
+			AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1)	/* (T8) gpmc_ad3.mmc1_dat3 */
+			AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1)	/* (U8) gpmc_ad4.mmc1_dat4 */
+			AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1)	/* (V8) gpmc_ad5.mmc1_dat5 */
+			AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1)	/* (R9) gpmc_ad6.mmc1_dat6 */
+			AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1)	/* (T9) gpmc_ad7.mmc1_dat7 */
+		>;
+	};
+
+	mmc3_pins: pinmux_mmc3_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE6)	/* (L15) gmii1_rxd1.mmc2_clk */
+			AM33XX_IOPAD(0x914, PIN_INPUT_PULLUP | MUX_MODE6)	/* (J16) gmii1_txen.mmc2_cmd */
+			AM33XX_IOPAD(0x918, PIN_INPUT_PULLUP | MUX_MODE5)	/* (J17) gmii1_rxdv.mmc2_dat0 */
+			AM33XX_IOPAD(0x91c, PIN_INPUT_PULLUP | MUX_MODE5)	/* (J18) gmii1_txd3.mmc2_dat1 */
+			AM33XX_IOPAD(0x920, PIN_INPUT_PULLUP | MUX_MODE5)	/* (K15) gmii1_txd2.mmc2_dat2 */
+			AM33XX_IOPAD(0x908, PIN_INPUT_PULLUP | MUX_MODE5)	/* (H16) gmii1_col.mmc2_dat3 */
+		>;
+	};
+
+	bt_pins: pinmux_bt_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLUP | MUX_MODE7)	/* (K17) gmii1_txd0.gpio0[28] - BT_EN */
+		>;
+	};
+
+	uart3_pins: pinmux_uart3_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE1)	/* (L17) gmii1_rxd3.uart3_rxd */
+			AM33XX_IOPAD(0x938, PIN_OUTPUT_PULLDOWN | MUX_MODE1)	/* (L16) gmii1_rxd2.uart3_txd */
+			AM33XX_IOPAD(0x948, PIN_INPUT | MUX_MODE3)		/* (M17) mdio_data.uart3_ctsn */
+			AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLDOWN | MUX_MODE3)	/* (M18) mdio_clk.uart3_rtsn */
+		>;
+	};
+
+	wl18xx_pins: pinmux_wl18xx_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x92c, PIN_OUTPUT_PULLDOWN | MUX_MODE7)	/* (K18) gmii1_txclk.gpio3[9] - WL_EN */
+			AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)	/* (H18) rmii1_refclk.gpio0[29] - WL_IRQ */
+			AM33XX_IOPAD(0x930, PIN_OUTPUT_PULLUP | MUX_MODE7)	/* (L18) gmii1_rxclk.gpio3[10] - LS_BUF_EN */
+		>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
+
+	status = "okay";
+};
+
+&usb {
+	status = "okay";
+};
+
+&usb_ctrl_mod {
+	status = "okay";
+};
+
+&usb0_phy {
+	status = "okay";
+};
+
+&usb1_phy {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+	dr_mode = "peripheral";
+	interrupts-extended = <&intc 18 &tps 0>;
+	interrupt-names = "mc", "vbus";
+};
+
+&usb1 {
+	status = "okay";
+	dr_mode = "host";
+};
+
+&cppi41dma  {
+	status = "okay";
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins>;
+
+	status = "okay";
+	clock-frequency = <400000>;
+
+	tps: tps@24 {
+		reg = <0x24>;
+	};
+
+	baseboard_eeprom: baseboard_eeprom@50 {
+		compatible = "at,24c256";
+		reg = <0x50>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+		baseboard_data: baseboard_data@0 {
+			reg = <0 0x100>;
+		};
+	};
+};
+
+&i2c2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c2_pins>;
+
+	status = "okay";
+	clock-frequency = <400000>;
+
+	mpu9250@68 {
+		compatible = "invensense,mpu9250";
+		reg = <0x68>;
+		interrupt-parent = <&gpio3>;
+		interrupts = <21 GPIO_ACTIVE_LOW>;
+		i2c-gate {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			ax8975@c {
+				compatible = "ak,ak8975";
+				reg = <0x0c>;
+			};
+		};
+	};
+
+	pressure@76 {
+		compatible = "bosch,bmp280";
+		reg = <0x76>;
+	};
+};
+
+/include/ "tps65217.dtsi"
+
+&tps {
+	interrupts = <7>; /* NMI */
+	interrupt-parent = <&intc>;
+
+	charger {
+		interrupts = <0>, <1>;
+		interrupt-names = "USB", "AC";
+		status = "okay";
+	};
+
+	pwrbutton {
+		interrupts = <2>;
+		status = "okay";
+	};
+
+	regulators {
+		dcdc1_reg: regulator@0 {
+			regulator-name = "vdds_dpr";
+			regulator-always-on;
+		};
+
+		dcdc2_reg: regulator@1 {
+			/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+			regulator-name = "vdd_mpu";
+			regulator-min-microvolt = <925000>;
+			regulator-max-microvolt = <1351500>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		dcdc3_reg: regulator@2 {
+			/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
+			regulator-name = "vdd_core";
+			regulator-min-microvolt = <925000>;
+			regulator-max-microvolt = <1150000>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		ldo1_reg: regulator@3 {
+			regulator-name = "vio,vrtc,vdds";
+			regulator-always-on;
+		};
+
+		ldo2_reg: regulator@4 {
+			regulator-name = "vdd_3v3aux";
+			regulator-always-on;
+		};
+
+		ldo3_reg: regulator@5 {
+			regulator-name = "vdd_1v8";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-always-on;
+		};
+
+		ldo4_reg: regulator@6 {
+			regulator-name = "vdd_3v3a";
+			regulator-always-on;
+		};
+	};
+};
+
+&mmc1 {
+	status = "okay";
+	vmmc-supply = <&vmmcsd_fixed>;
+	bus-width = <4>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins>;
+	cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+};
+
+&mmc2 {
+	status = "okay";
+	vmmc-supply = <&vmmcsd_fixed>;
+	bus-width = <8>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_pins>;
+};
+
+&mmc3 {
+	dmas = <&edma_xbar 12 0 1
+		&edma_xbar 13 0 2>;
+	dma-names = "tx", "rx";
+	status = "okay";
+	vmmc-supply = <&wlan_en_reg>;
+	bus-width = <4>;
+	non-removable;
+	cap-power-off-card;
+	ti,needs-special-hs-handling;
+	keep-power-in-suspend;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	wlcore: wlcore@2 {
+		compatible = "ti,wl1835";
+		reg = <2>;
+		interrupt-parent = <&gpio0>;
+		interrupts = <29 IRQ_TYPE_EDGE_RISING>;
+	};
+};
+
+&tscadc {
+	status = "okay";
+	adc {
+		ti,adc-channels = <0 1 2 3 4 5 6 7>;
+	};
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart3_pins &bt_pins>;
+	status = "okay";
+
+	bluetooth {
+		compatible = "ti,wl1835-st";
+		enable-gpios = <&gpio0 28 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&aes {
+	status = "okay";
+};
+
+&sham {
+	status = "okay";
+};
+
+&rtc {
+	system-power-controller;
+	clocks = <&clk_32768_ck>, <&clkdiv32k_ick>;
+	clock-names = "ext-clk", "int-clk";
+};
+
+&gpio3 {
+	ls_buf_en {
+		gpio-hog;
+		gpios = <10 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "LS_BUF_EN";
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
index 9d1a0fd..57731f0 100644
--- a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
+++ b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
@@ -97,6 +97,11 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart3_pins &bt_pins>;
 	status = "okay";
+
+	bluetooth {
+		compatible = "ti,wl1835-st";
+		enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+	};
 };
 
 &gpio1 {
diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts
index 807494b..946d706 100644
--- a/arch/arm/boot/dts/am335x-nano.dts
+++ b/arch/arm/boot/dts/am335x-nano.dts
@@ -224,7 +224,7 @@
 	};
 
 	eeprom@53 {
-		compatible = "microchip,24c02";
+		compatible = "microchip,24c02", "atmel,24c02";
 		reg = <0x53>;
 		pagesize = <8>;
 	};
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 30e2f87..03c7d77 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -67,7 +67,7 @@
 	};
 
 	eeprom: eeprom@50 {
-		compatible = "at,24c256";
+		compatible = "atmel,24c256";
 		reg = <0x50>;
 	};
 
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 14533ff..428a25e 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -138,7 +138,7 @@
 	};
 
 	i2c_rtc: rtc@68 {
-		compatible = "rv4162";
+		compatible = "microcrystal,rv4162";
 		reg = <0x68>;
 		status = "disabled";
 	};
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index bf8727a..4f6a286 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -188,7 +188,7 @@
 	};
 
 	at24@50 {
-		compatible = "at24,24c32";
+		compatible = "atmel,24c32";
 		pagesize = <32>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index c5d2589..1bcc604 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -220,7 +220,7 @@
 
 	mmc1_pins: pinmux_mmc1_pins {
 		pinctrl-single,pins = <
-			AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7)		/* spi0_cs1.gpio0_6 */
+			AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7)		/* uart0_rtsn.gpio1_9 */
 		>;
 	};
 
@@ -280,10 +280,6 @@
 			AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7)	/* nKbdReset - gpmc_ad13.gpio1_13 */
 			AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7)	/* nDispReset - gpmc_ad14.gpio1_14 */
 			AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7)	/* USB1_enPower - gpmc_a1.gpio1_17 */
-			/* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */
-			AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7)	/* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */
-			AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7)	/* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */
-			AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7)	/* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */
 			/* PDI Bus - Battery system */
 			AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7)	/* nBattReset  gpmc_a0.gpio1_16 */
 			AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7)	/* BattPDIData gpmc_ad15.gpio1_15 */
@@ -309,7 +305,7 @@
 	};
 
 	eeprom: eeprom@50 {
-		compatible = "at,24c256";
+		compatible = "atmel,24c256";
 		reg = <0x50>;
 	};
 
@@ -384,7 +380,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc1_pins>;
 	bus-width = <4>;
-	cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+	cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
 	vmmc-supply = <&vmmcsd_fixed>;
 };
 
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 9e24294..7d7ca05 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -64,64 +64,64 @@
 		 * because the can not be enabled simultaneously on a
 		 * single SoC.
 		 */
-		opp50@300000000 {
+		opp50-300000000 {
 			opp-hz = /bits/ 64 <300000000>;
 			opp-microvolt = <950000 931000 969000>;
 			opp-supported-hw = <0x06 0x0010>;
 			opp-suspend;
 		};
 
-		opp100@275000000 {
+		opp100-275000000 {
 			opp-hz = /bits/ 64 <275000000>;
 			opp-microvolt = <1100000 1078000 1122000>;
 			opp-supported-hw = <0x01 0x00FF>;
 			opp-suspend;
 		};
 
-		opp100@300000000 {
+		opp100-300000000 {
 			opp-hz = /bits/ 64 <300000000>;
 			opp-microvolt = <1100000 1078000 1122000>;
 			opp-supported-hw = <0x06 0x0020>;
 			opp-suspend;
 		};
 
-		opp100@500000000 {
+		opp100-500000000 {
 			opp-hz = /bits/ 64 <500000000>;
 			opp-microvolt = <1100000 1078000 1122000>;
 			opp-supported-hw = <0x01 0xFFFF>;
 		};
 
-		opp100@600000000 {
+		opp100-600000000 {
 			opp-hz = /bits/ 64 <600000000>;
 			opp-microvolt = <1100000 1078000 1122000>;
 			opp-supported-hw = <0x06 0x0040>;
 		};
 
-		opp120@600000000 {
+		opp120-600000000 {
 			opp-hz = /bits/ 64 <600000000>;
 			opp-microvolt = <1200000 1176000 1224000>;
 			opp-supported-hw = <0x01 0xFFFF>;
 		};
 
-		opp120@720000000 {
+		opp120-720000000 {
 			opp-hz = /bits/ 64 <720000000>;
 			opp-microvolt = <1200000 1176000 1224000>;
 			opp-supported-hw = <0x06 0x0080>;
 		};
 
-		oppturbo@720000000 {
+		oppturbo-720000000 {
 			opp-hz = /bits/ 64 <720000000>;
 			opp-microvolt = <1260000 1234800 1285200>;
 			opp-supported-hw = <0x01 0xFFFF>;
 		};
 
-		oppturbo@800000000 {
+		oppturbo-800000000 {
 			opp-hz = /bits/ 64 <800000000>;
 			opp-microvolt = <1260000 1234800 1285200>;
 			opp-supported-hw = <0x06 0x0100>;
 		};
 
-		oppnitro@1000000000 {
+		oppnitro-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <1325000 1298500 1351500>;
 			opp-supported-hw = <0x04 0x0200>;
@@ -431,7 +431,6 @@
 				&edma_xbar 25 0 0>;
 			dma-names = "tx", "rx";
 			interrupts = <64>;
-			interrupt-parent = <&intc>;
 			reg = <0x48060000 0x1000>;
 			status = "disabled";
 		};
@@ -444,7 +443,6 @@
 				&edma 3 0>;
 			dma-names = "tx", "rx";
 			interrupts = <28>;
-			interrupt-parent = <&intc>;
 			reg = <0x481d8000 0x1000>;
 			status = "disabled";
 		};
@@ -454,7 +452,6 @@
 			ti,hwmods = "mmc3";
 			ti,needs-special-reset;
 			interrupts = <29>;
-			interrupt-parent = <&intc>;
 			reg = <0x47810000 0x1000>;
 			status = "disabled";
 		};
@@ -853,7 +850,6 @@
 			       0x4a101200 0x100>;
 			#address-cells = <1>;
 			#size-cells = <1>;
-			interrupt-parent = <&intc>;
 			/*
 			 * c0_rx_thresh_pend
 			 * c0_rx_pend
@@ -908,7 +904,6 @@
 		lcdc: lcdc@4830e000 {
 			compatible = "ti,am33xx-tilcdc";
 			reg = <0x4830e000 0x1000>;
-			interrupt-parent = <&intc>;
 			interrupts = <36>;
 			ti,hwmods = "lcdc";
 			status = "disabled";
@@ -917,7 +912,6 @@
 		tscadc: tscadc@44e0d000 {
 			compatible = "ti,am3359-tscadc";
 			reg = <0x44e0d000 0x1000>;
-			interrupt-parent = <&intc>;
 			interrupts = <16>;
 			ti,hwmods = "adc_tsc";
 			status = "disabled";
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 176e09e..e5b0614 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -59,32 +59,32 @@
 		compatible = "operating-points-v2-ti-cpu";
 		syscon = <&scm_conf>;
 
-		opp50@300000000 {
+		opp50-300000000 {
 			opp-hz = /bits/ 64 <300000000>;
 			opp-microvolt = <950000 931000 969000>;
 			opp-supported-hw = <0xFF 0x01>;
 			opp-suspend;
 		};
 
-		opp100@600000000 {
+		opp100-600000000 {
 			opp-hz = /bits/ 64 <600000000>;
 			opp-microvolt = <1100000 1078000 1122000>;
 			opp-supported-hw = <0xFF 0x04>;
 		};
 
-		opp120@720000000 {
+		opp120-720000000 {
 			opp-hz = /bits/ 64 <720000000>;
 			opp-microvolt = <1200000 1176000 1224000>;
 			opp-supported-hw = <0xFF 0x08>;
 		};
 
-		oppturbo@800000000 {
+		oppturbo-800000000 {
 			opp-hz = /bits/ 64 <800000000>;
 			opp-microvolt = <1260000 1234800 1285200>;
 			opp-supported-hw = <0xFF 0x10>;
 		};
 
-		oppnitro@1000000000 {
+		oppnitro-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <1325000 1298500 1351500>;
 			opp-supported-hw = <0xFF 0x20>;
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index 397e98b..29a538e 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -549,8 +549,6 @@
 
 		dcdc3: regulator-dcdc3 {
 			regulator-name = "vdcdc3";
-			regulator-min-microvolt = <1500000>;
-			regulator-max-microvolt = <1500000>;
 			regulator-boot-on;
 			regulator-always-on;
 			regulator-state-mem {
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index c1f7f93..5e36447 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -339,7 +339,7 @@
 	clock-frequency = <400000>;
 
 	at24@50 {
-		compatible = "at24,24c256";
+		compatible = "atmel,24c256";
 		pagesize = <64>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 4dc54be..2c6bf06 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -451,8 +451,6 @@
 
 		dcdc3: regulator-dcdc3 {
 			regulator-name = "vdds_ddr";
-			regulator-min-microvolt = <1500000>;
-			regulator-max-microvolt = <1500000>;
 			regulator-boot-on;
 			regulator-always-on;
 			regulator-state-mem {
@@ -511,7 +509,7 @@
 	};
 
 	at24@50 {
-		compatible = "at24,24c256";
+		compatible = "atmel,24c256";
 		pagesize = <64>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 9acd4cc..54f40f3 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -442,8 +442,6 @@
 
 		dcdc3: regulator-dcdc3 {
 			regulator-name = "vdcdc3";
-			regulator-min-microvolt = <1500000>;
-			regulator-max-microvolt = <1500000>;
 			regulator-boot-on;
 			regulator-always-on;
 		};
@@ -477,7 +475,7 @@
 	};
 
 	at24@50 {
-		compatible = "at24,24c256";
+		compatible = "atmel,24c256";
 		pagesize = <64>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index d1d73b72..430be58 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -833,4 +833,40 @@
 		ti,bit-shift = <23>;
 		reg = <0x4100>;
 	};
+
+	clkout2_src_mux_ck: clkout2_src_mux_ck {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&clk_rc32k_ck>, <&sysclk_div>, <&dpll_ddr_m2_ck>,
+			 <&dpll_per_m2_ck>, <&dpll_disp_m2_ck>,
+			 <&dpll_mpu_m2_ck>, <&dpll_extdev_ck>;
+		reg = <0x4108>;
+	};
+
+	clkout2_pre_div_ck: clkout2_pre_div_ck {
+		#clock-cells = <0>;
+		compatible = "ti,divider-clock";
+		clocks = <&clkout2_src_mux_ck>;
+		ti,bit-shift = <4>;
+		ti,max-div = <8>;
+		reg = <0x4108>;
+	};
+
+	clkout2_post_div_ck: clkout2_post_div_ck {
+		#clock-cells = <0>;
+		compatible = "ti,divider-clock";
+		clocks = <&clkout2_pre_div_ck>;
+		ti,bit-shift = <8>;
+		ti,max-div = <32>;
+		ti,index-power-of-two;
+		reg = <0x4108>;
+	};
+
+	clkout2_ck: clkout2_ck {
+		#clock-cells = <0>;
+		compatible = "ti,gate-clock";
+		clocks = <&clkout2_post_div_ck>;
+		ti,bit-shift = <16>;
+		reg = <0x4108>;
+	};
 };
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index ad68d1e..7b20783 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -79,3 +79,20 @@
 	id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
 	vbus-gpio = <&gpio7 22 GPIO_ACTIVE_HIGH>;
 };
+
+&mailbox5 {
+	status = "okay";
+	mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
+		status = "okay";
+	};
+	mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
+		status = "okay";
+	};
+};
+
+&mailbox6 {
+	status = "okay";
+	mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
+		status = "okay";
+	};
+};
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index 8350b4b..9da6d83 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -91,3 +91,23 @@
 &pcie1 {
 	gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
 };
+
+&mailbox5 {
+	status = "okay";
+	mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
+		status = "okay";
+	};
+	mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
+		status = "okay";
+	};
+};
+
+&mailbox6 {
+	status = "okay";
+	mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
+		status = "okay";
+	};
+	mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
+		status = "okay";
+	};
+};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index 585d792..fdfe5b1 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -388,7 +388,7 @@
 	};
 
 	eeprom: eeprom@50 {
-		compatible = "at,24c32";
+		compatible = "atmel,24c32";
 		reg = <0x50>;
 	};
 };
diff --git a/arch/arm/boot/dts/animeo_ip.dts b/arch/arm/boot/dts/animeo_ip.dts
index 9cc372b..26ade8c 100644
--- a/arch/arm/boot/dts/animeo_ip.dts
+++ b/arch/arm/boot/dts/animeo_ip.dts
@@ -83,25 +83,44 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			barebox@0 {
-				label = "barebox";
-				reg = <0x0 0x58000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			u_boot_env@58000 {
-				label = "u_boot_env";
-				reg = <0x58000 0x8000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			ubi@60000 {
-				label = "ubi";
-				reg = <0x60000 0x1FA0000>;
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
+
+						barebox@0 {
+							label = "barebox";
+							reg = <0x0 0x58000>;
+						};
+
+						u_boot_env@58000 {
+							label = "u_boot_env";
+							reg = <0x58000 0x8000>;
+						};
+
+						ubi@60000 {
+							label = "ubi";
+							reg = <0x60000 0x1FA0000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index cc011c8..f9cf127 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -137,29 +137,38 @@
 			};
 
 			gpio0: gpio@18100 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18100 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18100 0x40>, <0x181c0 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <32>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <82>, <83>, <84>, <85>;
+				clocks = <&coreclk 0>;
 			};
 
 			gpio1: gpio@18140 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18140 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18140 0x40>, <0x181c8 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <32>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <87>, <88>, <89>, <90>;
+				clocks = <&coreclk 0>;
 			};
 
 			gpio2: gpio@18180 {
-				compatible = "marvell,orion-gpio";
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
 				reg = <0x18180 0x40>;
 				ngpios = <2>;
 				gpio-controller;
diff --git a/arch/arm/boot/dts/armada-385-linksys-caiman.dts b/arch/arm/boot/dts/armada-385-linksys-caiman.dts
index f3cee91..ee669ae 100644
--- a/arch/arm/boot/dts/armada-385-linksys-caiman.dts
+++ b/arch/arm/boot/dts/armada-385-linksys-caiman.dts
@@ -44,71 +44,128 @@
 	model = "Linksys WRT1200AC";
 	compatible = "linksys,caiman", "linksys,armada385", "marvell,armada385",
 		     "marvell,armada380";
+};
 
-	soc {
-		internal-regs{
-			i2c@11000 {
-
-				pca9635@68 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-					wan_amber@0 {
-						label = "caiman:amber:wan";
-						reg = <0x0>;
-					};
-
-					wan_white@1 {
-						label = "caiman:white:wan";
-						reg = <0x1>;
-					};
-
-					wlan_2g@2 {
-						label = "caiman:white:wlan_2g";
-						reg = <0x2>;
-					};
-
-					wlan_5g@3 {
-						label = "caiman:white:wlan_5g";
-						reg = <0x3>;
-					};
-
-					usb2@5 {
-						label = "caiman:white:usb2";
-						reg = <0x5>;
-					};
-
-					usb3_1@6 {
-						label = "caiman:white:usb3_1";
-						reg = <0x6>;
-					};
-
-					usb3_2@7 {
-						label = "caiman:white:usb3_2";
-						reg = <0x7>;
-					};
-
-					wps_white@8 {
-						label = "caiman:white:wps";
-						reg = <0x8>;
-					};
-
-					wps_amber@9 {
-						label = "caiman:amber:wps";
-						reg = <0x9>;
-					};
-				};
-			};
-		};
+&expander0 {
+	wan_amber@0 {
+		label = "caiman:amber:wan";
+		reg = <0x0>;
 	};
 
-	gpio-leds {
-		power {
-			label = "caiman:white:power";
-		};
+	wan_white@1 {
+		label = "caiman:white:wan";
+		reg = <0x1>;
+	};
 
-		sata {
-			label = "caiman:white:sata";
-		};
+	wlan_2g@2 {
+		label = "caiman:white:wlan_2g";
+		reg = <0x2>;
+	};
+
+	wlan_5g@3 {
+		label = "caiman:white:wlan_5g";
+		reg = <0x3>;
+	};
+
+	usb2@5 {
+		label = "caiman:white:usb2";
+		reg = <0x5>;
+	};
+
+	usb3_1@6 {
+		label = "caiman:white:usb3_1";
+		reg = <0x6>;
+	};
+
+	usb3_2@7 {
+		label = "caiman:white:usb3_2";
+		reg = <0x7>;
+	};
+
+	wps_white@8 {
+		label = "caiman:white:wps";
+		reg = <0x8>;
+	};
+
+	wps_amber@9 {
+		label = "caiman:amber:wps";
+		reg = <0x9>;
+	};
+};
+
+&gpio_leds {
+	power {
+		label = "caiman:white:power";
+	};
+
+	sata {
+		label = "caiman:white:sata";
+	};
+};
+
+&nand {
+	/* 128MiB */
+
+	partition@0 {
+		label = "u-boot";
+		reg = <0x0000000 0x200000>;  /* 2MiB */
+		read-only;
+	};
+
+	partition@100000 {
+		label = "u_env";
+		reg = <0x200000 0x40000>;    /* 256KiB */
+	};
+
+	partition@140000 {
+		label = "s_env";
+		reg = <0x240000 0x40000>;    /* 256KiB */
+	};
+
+	partition@900000 {
+		label = "devinfo";
+		reg = <0x900000 0x100000>;   /* 1MiB */
+		read-only;
+	};
+
+	/* kernel1 overlaps with rootfs1 by design */
+	partition@a00000 {
+		label = "kernel1";
+		reg = <0xa00000 0x2800000>;  /* 40MiB */
+	};
+
+	partition@1000000 {
+		label = "rootfs1";
+		reg = <0x1000000 0x2200000>;  /* 34MiB */
+	};
+
+	/* kernel2 overlaps with rootfs2 by design */
+	partition@3200000 {
+		label = "kernel2";
+		reg = <0x3200000 0x2800000>; /* 40MiB */
+	};
+
+	partition@3800000 {
+		label = "rootfs2";
+		reg = <0x3800000 0x2200000>; /* 34MiB */
+	};
+
+	/*
+	 * 38MiB, last MiB is for the BBT, not writable
+	 */
+	partition@5a00000 {
+		label = "syscfg";
+		reg = <0x5a00000 0x2600000>;
+	};
+
+	/*
+	 * Unused area between "s_env" and "devinfo".
+	 * Moved here because otherwise the renumbered
+	 * partitions would break the bootloader
+	 * supplied bootargs
+	 */
+	partition@180000 {
+		label = "unused_area";
+		reg = <0x280000 0x680000>;   /* 6.5MiB */
 	};
 };
diff --git a/arch/arm/boot/dts/armada-385-linksys-cobra.dts b/arch/arm/boot/dts/armada-385-linksys-cobra.dts
index 1110718..5169ca8 100644
--- a/arch/arm/boot/dts/armada-385-linksys-cobra.dts
+++ b/arch/arm/boot/dts/armada-385-linksys-cobra.dts
@@ -44,71 +44,128 @@
 	model = "Linksys WRT1900ACv2";
 	compatible = "linksys,cobra", "linksys,armada385", "marvell,armada385",
 		     "marvell,armada380";
+};
 
-	soc {
-		internal-regs{
-			i2c@11000 {
-
-				pca9635@68 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-					wan_amber@0 {
-						label = "cobra:amber:wan";
-						reg = <0x0>;
-					};
-
-					wan_white@1 {
-						label = "cobra:white:wan";
-						reg = <0x1>;
-					};
-
-					wlan_2g@2 {
-						label = "cobra:white:wlan_2g";
-						reg = <0x2>;
-					};
-
-					wlan_5g@3 {
-						label = "cobra:white:wlan_5g";
-						reg = <0x3>;
-					};
-
-					usb2@5 {
-						label = "cobra:white:usb2";
-						reg = <0x5>;
-					};
-
-					usb3_1@6 {
-						label = "cobra:white:usb3_1";
-						reg = <0x6>;
-					};
-
-					usb3_2@7 {
-						label = "cobra:white:usb3_2";
-						reg = <0x7>;
-					};
-
-					wps_white@8 {
-						label = "cobra:white:wps";
-						reg = <0x8>;
-					};
-
-					wps_amber@9 {
-						label = "cobra:amber:wps";
-						reg = <0x9>;
-					};
-				};
-			};
-		};
+&expander0 {
+	wan_amber@0 {
+		label = "cobra:amber:wan";
+		reg = <0x0>;
 	};
 
-	gpio-leds {
-		power {
-			label = "cobra:white:power";
-		};
+	wan_white@1 {
+		label = "cobra:white:wan";
+		reg = <0x1>;
+	};
 
-		sata {
-			label = "cobra:white:sata";
-		};
+	wlan_2g@2 {
+		label = "cobra:white:wlan_2g";
+		reg = <0x2>;
+	};
+
+	wlan_5g@3 {
+		label = "cobra:white:wlan_5g";
+		reg = <0x3>;
+	};
+
+	usb2@5 {
+		label = "cobra:white:usb2";
+		reg = <0x5>;
+	};
+
+	usb3_1@6 {
+		label = "cobra:white:usb3_1";
+		reg = <0x6>;
+	};
+
+	usb3_2@7 {
+		label = "cobra:white:usb3_2";
+		reg = <0x7>;
+	};
+
+	wps_white@8 {
+		label = "cobra:white:wps";
+		reg = <0x8>;
+	};
+
+	wps_amber@9 {
+		label = "cobra:amber:wps";
+		reg = <0x9>;
+	};
+};
+
+&gpio_leds {
+	power {
+		label = "cobra:white:power";
+	};
+
+	sata {
+		label = "cobra:white:sata";
+	};
+};
+
+&nand {
+	/* 128MiB */
+
+	partition@0 {
+		label = "u-boot";
+		reg = <0x0000000 0x200000>;  /* 2MiB */
+		read-only;
+	};
+
+	partition@100000 {
+		label = "u_env";
+		reg = <0x200000 0x40000>;    /* 256KiB */
+	};
+
+	partition@140000 {
+		label = "s_env";
+		reg = <0x240000 0x40000>;    /* 256KiB */
+	};
+
+	partition@900000 {
+		label = "devinfo";
+		reg = <0x900000 0x100000>;   /* 1MiB */
+		read-only;
+	};
+
+	/* kernel1 overlaps with rootfs1 by design */
+	partition@a00000 {
+		label = "kernel1";
+		reg = <0xa00000 0x2800000>;  /* 40MiB */
+	};
+
+	partition@1000000 {
+		label = "rootfs1";
+		reg = <0x1000000 0x2200000>;  /* 34MiB */
+	};
+
+	/* kernel2 overlaps with rootfs2 by design */
+	partition@3200000 {
+		label = "kernel2";
+		reg = <0x3200000 0x2800000>; /* 40MiB */
+	};
+
+	partition@3800000 {
+		label = "rootfs2";
+		reg = <0x3800000 0x2200000>; /* 34MiB */
+	};
+
+	/*
+	 * 38MiB, last MiB is for the BBT, not writable
+	 */
+	partition@5a00000 {
+		label = "syscfg";
+		reg = <0x5a00000 0x2600000>;
+	};
+
+	/*
+	 * Unused area between "s_env" and "devinfo".
+	 * Moved here because otherwise the renumbered
+	 * partitions would break the bootloader
+	 * supplied bootargs
+	 */
+	partition@180000 {
+		label = "unused_area";
+		reg = <0x280000 0x680000>;   /* 6.5MiB */
 	};
 };
diff --git a/arch/arm/boot/dts/armada-385-linksys-rango.dts b/arch/arm/boot/dts/armada-385-linksys-rango.dts
new file mode 100644
index 0000000..da8a0f3
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-linksys-rango.dts
@@ -0,0 +1,203 @@
+/*
+ * Device Tree file for the Linksys WRT3200ACM (Rango)
+ *
+ * Copyright (C) 2016 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is licensed under the terms of the GNU General Public
+ *     License version 2.  This program is licensed "as is" without
+ *     any warranty of any kind, whether express or implied.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "armada-385-linksys.dtsi"
+
+/ {
+	model = "Linksys WRT3200ACM";
+	compatible = "linksys,rango", "linksys,armada385", "marvell,armada385",
+		     "marvell,armada380";
+};
+
+&expander0 {
+	wan_amber@0 {
+		label = "rango:amber:wan";
+		reg = <0x0>;
+	};
+
+	wan_white@1 {
+		label = "rango:white:wan";
+		reg = <0x1>;
+	};
+
+	usb2@5 {
+		label = "rango:white:usb2";
+		reg = <0x5>;
+	};
+
+	usb3_1@6 {
+		label = "rango:white:usb3_1";
+		reg = <0x6>;
+	};
+
+	usb3_2@7 {
+		label = "rango:white:usb3_2";
+		reg = <0x7>;
+	};
+
+	wps_white@8 {
+		label = "rango:white:wps";
+		reg = <0x8>;
+	};
+
+	wps_amber@9 {
+		label = "rango:amber:wps";
+		reg = <0x9>;
+	};
+};
+
+&gpio_leds {
+	power {
+		gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
+		label = "rango:white:power";
+	};
+
+	sata {
+		gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
+		label = "rango:white:sata";
+	};
+
+	wlan_2g {
+		gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
+		label = "rango:white:wlan_2g";
+	};
+
+	wlan_5g {
+		gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+		label = "rango:white:wlan_5g";
+	};
+};
+
+&gpio_leds_pins {
+	marvell,pins = "mpp21", "mpp45", "mpp46", "mpp56";
+};
+
+&nand {
+	/* AMD/Spansion S34ML02G2 256MiB, OEM Layout */
+
+	partition@0 {
+		label = "u-boot";
+		reg = <0x0000000 0x200000>;  /* 2MiB */
+		read-only;
+	};
+
+	partition@200000 {
+		label = "u_env";
+		reg = <0x200000 0x20000>;    /* 128KiB */
+	};
+
+	partition@220000 {
+		label = "s_env";
+		reg = <0x220000 0x40000>;    /* 256KiB */
+	};
+
+	partition@7e0000 {
+		label = "devinfo";
+		reg = <0x7e0000 0x40000>;   /* 256KiB */
+		read-only;
+	};
+
+	partition@820000 {
+		label = "sysdiag";
+		reg = <0x820000 0x1e0000>;   /* 1920KiB */
+		read-only;
+	};
+
+	/* kernel1 overlaps with rootfs1 by design */
+	partition@a00000 {
+		label = "kernel1";
+		reg = <0xa00000 0x5000000>;  /* 80MiB */
+	};
+
+	partition@1000000 {
+		label = "rootfs1";
+		reg = <0x1000000 0x4a00000>;  /* 74MiB */
+	};
+
+	/* kernel2 overlaps with rootfs2 by design */
+	partition@5a00000 {
+		label = "kernel2";
+		reg = <0x5a00000 0x5000000>; /* 80MiB */
+	};
+
+	partition@6000000 {
+		label = "rootfs2";
+		reg = <0x6000000 0x4a00000>; /* 74MiB */
+	};
+
+	/*
+	 * 86MiB, last MiB is for the BBT, not writable
+	 */
+	partition@aa00000 {
+		label = "syscfg";
+		reg = <0xaa00000 0x5600000>;
+	};
+
+	/*
+	 * Unused area between "s_env" and "devinfo".
+	 * Moved here because otherwise the renumbered
+	 * partitions would break the bootloader
+	 * supplied bootargs
+	 */
+	partition@180000 {
+		label = "unused_area";
+		reg = <0x260000 0x5c0000>;   /* 5.75MiB */
+	};
+};
+
+&sdhci {
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdhci_pins>;
+	no-1-8-v;
+	non-removable;
+	wp-inverted;
+	bus-width = <8>;
+	status = "okay";
+};
+
+&usb3_1_vbus {
+	gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+};
+
+&usb3_1_vbus_pins {
+	marvell,pins = "mpp44";
+};
diff --git a/arch/arm/boot/dts/armada-385-linksys-shelby.dts b/arch/arm/boot/dts/armada-385-linksys-shelby.dts
index c7a8ddd..94aa35b 100644
--- a/arch/arm/boot/dts/armada-385-linksys-shelby.dts
+++ b/arch/arm/boot/dts/armada-385-linksys-shelby.dts
@@ -44,71 +44,128 @@
 	model = "Linksys WRT1900ACS";
 	compatible = "linksys,shelby", "linksys,armada385", "marvell,armada385",
 		     "marvell,armada380";
+};
 
-	soc {
-		internal-regs{
-			i2c@11000 {
-
-				pca9635@68 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-
-					wan_amber@0 {
-						label = "shelby:amber:wan";
-						reg = <0x0>;
-					};
-
-					wan_white@1 {
-						label = "shelby:white:wan";
-						reg = <0x1>;
-					};
-
-					wlan_2g@2 {
-						label = "shelby:white:wlan_2g";
-						reg = <0x2>;
-					};
-
-					wlan_5g@3 {
-						label = "shelby:white:wlan_5g";
-						reg = <0x3>;
-					};
-
-					usb2@5 {
-						label = "shelby:white:usb2";
-						reg = <0x5>;
-					};
-
-					usb3_1@6 {
-						label = "shelby:white:usb3_1";
-						reg = <0x6>;
-					};
-
-					usb3_2@7 {
-						label = "shelby:white:usb3_2";
-						reg = <0x7>;
-					};
-
-					wps_white@8 {
-						label = "shelby:white:wps";
-						reg = <0x8>;
-					};
-
-					wps_amber@9 {
-						label = "shelby:amber:wps";
-						reg = <0x9>;
-					};
-				};
-			};
-		};
+&expander0 {
+	wan_amber@0 {
+		label = "shelby:amber:wan";
+		reg = <0x0>;
 	};
 
-	gpio-leds {
-		power {
-			label = "shelby:white:power";
-		};
+	wan_white@1 {
+		label = "shelby:white:wan";
+		reg = <0x1>;
+	};
 
-		sata {
-			label = "shelby:white:sata";
-		};
+	wlan_2g@2 {
+		label = "shelby:white:wlan_2g";
+		reg = <0x2>;
+	};
+
+	wlan_5g@3 {
+		label = "shelby:white:wlan_5g";
+		reg = <0x3>;
+	};
+
+	usb2@5 {
+		label = "shelby:white:usb2";
+		reg = <0x5>;
+	};
+
+	usb3_1@6 {
+		label = "shelby:white:usb3_1";
+		reg = <0x6>;
+	};
+
+	usb3_2@7 {
+		label = "shelby:white:usb3_2";
+		reg = <0x7>;
+	};
+
+	wps_white@8 {
+		label = "shelby:white:wps";
+		reg = <0x8>;
+	};
+
+	wps_amber@9 {
+		label = "shelby:amber:wps";
+		reg = <0x9>;
+	};
+};
+
+&gpio_leds {
+	power {
+		label = "shelby:white:power";
+	};
+
+	sata {
+		label = "shelby:white:sata";
+	};
+};
+
+&nand {
+	/* 128MiB */
+
+	partition@0 {
+		label = "u-boot";
+		reg = <0x0000000 0x200000>;  /* 2MiB */
+		read-only;
+	};
+
+	partition@100000 {
+		label = "u_env";
+		reg = <0x200000 0x40000>;    /* 256KiB */
+	};
+
+	partition@140000 {
+		label = "s_env";
+		reg = <0x240000 0x40000>;    /* 256KiB */
+	};
+
+	partition@900000 {
+		label = "devinfo";
+		reg = <0x900000 0x100000>;   /* 1MiB */
+		read-only;
+	};
+
+	/* kernel1 overlaps with rootfs1 by design */
+	partition@a00000 {
+		label = "kernel1";
+		reg = <0xa00000 0x2800000>;  /* 40MiB */
+	};
+
+	partition@1000000 {
+		label = "rootfs1";
+		reg = <0x1000000 0x2200000>;  /* 34MiB */
+	};
+
+	/* kernel2 overlaps with rootfs2 by design */
+	partition@3200000 {
+		label = "kernel2";
+		reg = <0x3200000 0x2800000>; /* 40MiB */
+	};
+
+	partition@3800000 {
+		label = "rootfs2";
+		reg = <0x3800000 0x2200000>; /* 34MiB */
+	};
+
+	/*
+	 * 38MiB, last MiB is for the BBT, not writable
+	 */
+	partition@5a00000 {
+		label = "syscfg";
+		reg = <0x5a00000 0x2600000>;
+	};
+
+	/*
+	 * Unused area between "s_env" and "devinfo".
+	 * Moved here because otherwise the renumbered
+	 * partitions would break the bootloader
+	 * supplied bootargs
+	 */
+	partition@180000 {
+		label = "unused_area";
+		reg = <0x280000 0x680000>;   /* 6.5MiB */
 	};
 };
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index 2306c45..e1f355f 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -52,7 +52,7 @@
 
 	memory {
 		device_type = "memory";
-		reg = <0x00000000 0x20000000>; /* 512 MB */
+		reg = <0x00000000 0x20000000>; /* 512 MiB */
 	};
 
 	soc {
@@ -61,255 +61,45 @@
 			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
 			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
 			  MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
-
-		internal-regs {
-			i2c@11000 {
-				pinctrl-names = "default";
-				pinctrl-0 = <&i2c0_pins>;
-				status = "okay";
-
-				tmp421@4c {
-					compatible = "ti,tmp421";
-					reg = <0x4c>;
-				};
-
-				pca9635@68 {
-					#address-cells = <1>;
-					#size-cells = <0>;
-					compatible = "nxp,pca9635";
-					reg = <0x68>;
-				};
-			};
-
-			/* J10: VCC, NC, RX, NC, TX, GND  */
-			serial@12000 {
-				status = "okay";
-			};
-
-			ethernet@70000 {
-				status = "okay";
-				phy-mode = "rgmii-id";
-				buffer-manager = <&bm>;
-				bm,pool-long = <2>;
-				bm,pool-short = <3>;
-				fixed-link {
-					speed = <1000>;
-					full-duplex;
-				};
-			};
-
-			ethernet@34000 {
-				status = "okay";
-				phy-mode = "sgmii";
-				buffer-manager = <&bm>;
-				bm,pool-long = <0>;
-				bm,pool-short = <1>;
-				fixed-link {
-					speed = <1000>;
-					full-duplex;
-				};
-			};
-
-			mdio@72004 {
-				status = "okay";
-
-				switch@0 {
-					compatible = "marvell,mv88e6085";
-					#address-cells = <1>;
-					#size-cells = <0>;
-					reg = <0>;
-
-					ports {
-						#address-cells = <1>;
-						#size-cells = <0>;
-
-						port@0 {
-							reg = <0>;
-							label = "lan4";
-						};
-
-						port@1 {
-							reg = <1>;
-							label = "lan3";
-						};
-
-						port@2 {
-							reg = <2>;
-							label = "lan2";
-						};
-
-						port@3 {
-							reg = <3>;
-							label = "lan1";
-						};
-
-						port@4 {
-							reg = <4>;
-							label = "wan";
-						};
-
-						port@5 {
-							reg = <5>;
-							label = "cpu";
-							ethernet = <&eth2>;
-
-							fixed-link {
-								speed = <1000>;
-								full-duplex;
-							};
-						};
-					};
-				};
-			};
-
-			sata@a8000 {
-				status = "okay";
-			};
-
-			bm@c8000 {
-				status = "okay";
-			};
-
-			/* USB part of the eSATA/USB 2.0 port */
-			usb@58000 {
-				status = "okay";
-			};
-
-			usb3@f8000 {
-				status = "okay";
-				usb-phy = <&usb3_phy>;
-			};
-
-			flash@d0000 {
-				status = "okay";
-				num-cs = <1>;
-				marvell,nand-keep-config;
-				marvell,nand-enable-arbiter;
-				nand-on-flash-bbt;
-
-				partition@0 {
-					label = "u-boot";
-					reg = <0x0000000 0x200000>;  /* 2MB */
-					read-only;
-				};
-
-				partition@100000 {
-					label = "u_env";
-					reg = <0x200000 0x40000>;    /* 256KB */
-				};
-
-				partition@140000 {
-					label = "s_env";
-					reg = <0x240000 0x40000>;    /* 256KB */
-				};
-
-				partition@900000 {
-					label = "devinfo";
-					reg = <0x900000 0x100000>;   /* 1MB */
-					read-only;
-				};
-
-				/* kernel1 overlaps with rootfs1 by design */
-				partition@a00000 {
-					label = "kernel1";
-					reg = <0xa00000 0x2800000>;  /* 40MB */
-				};
-
-				partition@1000000 {
-					label = "rootfs1";
-					reg = <0x1000000 0x2200000>;  /* 34MB */
-				};
-
-				/* kernel2 overlaps with rootfs2 by design */
-				partition@3200000 {
-					label = "kernel2";
-					reg = <0x3200000 0x2800000>; /* 40MB */
-				};
-
-				partition@3800000 {
-					label = "rootfs2";
-					reg = <0x3800000 0x2200000>; /* 34MB */
-				};
-
-				/*
-				 * 38MB, last MB is for the BBT, not writable
-				 */
-				partition@5a00000 {
-					label = "syscfg";
-					reg = <0x5a00000 0x2600000>;
-				};
-
-				/*
-				 * Unused area between "s_env" and "devinfo".
-				 * Moved here because otherwise the renumbered
-				 * partitions would break the bootloader
-				 * supplied bootargs
-				 */
-				partition@180000 {
-					label = "unused_area";
-					reg = <0x280000 0x680000>;   /* 6.5MB */
-				};
-			};
-		};
-
-		bm-bppi {
-			status = "okay";
-		};
-
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				/* Marvell 88W8864, 5GHz-only */
-				status = "okay";
-			};
-
-			pcie@2,0 {
-				/* Marvell 88W8864, 2GHz-only */
-				status = "okay";
-			};
-		};
 	};
 
-	usb3_phy: usb3_phy {
+	usb3_1_phy: usb3_1-phy {
 		compatible = "usb-nop-xceiv";
-		vcc-supply = <&reg_xhci0_vbus>;
+		vcc-supply = <&usb3_1_vbus>;
 	};
 
-	reg_xhci0_vbus: xhci0-vbus {
+	usb3_1_vbus: usb3_1-vbus {
 		compatible = "regulator-fixed";
 		pinctrl-names = "default";
-		pinctrl-0 = <&xhci0_vbus_pins>;
-		regulator-name = "xhci0-vbus";
+		pinctrl-0 = <&usb3_1_vbus_pins>;
+		regulator-name = "usb3_1-vbus";
 		regulator-min-microvolt = <5000000>;
 		regulator-max-microvolt = <5000000>;
 		enable-active-high;
 		gpio = <&gpio1 18 GPIO_ACTIVE_HIGH>;
 	};
 
-	gpio_keys {
+	gpio_keys: gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
-		pinctrl-0 = <&keys_pin>;
+		pinctrl-0 = <&gpio_keys_pins>;
 		pinctrl-names = "default";
 
-		button@1 {
+		wps {
 			label = "WPS";
 			linux,code = <KEY_WPS_BUTTON>;
 			gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
 		};
 
-		button@2 {
+		reset {
 			label = "Factory Reset Button";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
 		};
 	};
 
-	gpio-leds {
+	gpio_leds: gpio-leds {
 		compatible = "gpio-leds";
-		pinctrl-0 = <&power_led_pin &sata_led_pin>;
+		pinctrl-0 = <&gpio_leds_pins>;
 		pinctrl-names = "default";
 
 		power {
@@ -323,21 +113,83 @@
 			linux,default-trigger = "disk-activity";
 		};
 	};
+};
 
-	dsa@0 {
-		status = "disabled";
+&ahci0 {
+	status = "okay";
+};
 
-		compatible = "marvell,dsa";
-		#address-cells = <2>;
+&bm {
+	status = "okay";
+};
+
+&bm_bppi {
+	status = "okay";
+};
+
+&eth0 {
+	status = "okay";
+	phy-mode = "rgmii-id";
+	buffer-manager = <&bm>;
+	bm,pool-long = <0>;
+	bm,pool-short = <1>;
+	fixed-link {
+		speed = <1000>;
+		full-duplex;
+	};
+};
+
+&eth2 {
+	status = "okay";
+	phy-mode = "sgmii";
+	buffer-manager = <&bm>;
+	bm,pool-long = <2>;
+	bm,pool-short = <3>;
+	fixed-link {
+		speed = <1000>;
+		full-duplex;
+	};
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins>;
+	status = "okay";
+
+	tmp421@4c {
+		compatible = "ti,tmp421";
+		reg = <0x4c>;
+	};
+
+	expander0: pca9635@68 {
+		#address-cells = <1>;
 		#size-cells = <0>;
+		compatible = "nxp,pca9635";
+		reg = <0x68>;
+	};
+};
 
-		dsa,ethernet = <&eth2>;
-		dsa,mii-bus = <&mdio>;
+&nand {
+	/* 128MiB or 256MiB */
+	status = "okay";
+	num-cs = <1>;
+	marvell,nand-keep-config;
+	marvell,nand-enable-arbiter;
+	nand-on-flash-bbt;
+};
 
-		switch@0 {
+&mdio {
+	status = "okay";
+
+	switch@0 {
+		compatible = "marvell,mv88e6085";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0>;
+
+		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
-			reg = <0x0 0>;	/* MDIO address 0, switch 0 in tree */
 
 			port@0 {
 				reg = <0>;
@@ -367,28 +219,45 @@
 			port@5 {
 				reg = <5>;
 				label = "cpu";
+				ethernet = <&eth2>;
+
+				fixed-link {
+					speed = <1000>;
+					full-duplex;
+				};
 			};
 		};
 	};
 };
 
+&pciec {
+	status = "okay";
+};
+
+&pcie1 {
+	/* Marvell 88W8864, 5GHz-only */
+	status = "okay";
+};
+
+&pcie2 {
+	/* Marvell 88W8864, 2GHz-only */
+	status = "okay";
+};
+
 &pinctrl {
-	keys_pin: keys-pin {
+	gpio_keys_pins: gpio-keys-pins {
+		/* mpp24: wps, mpp29: reset */
 		marvell,pins = "mpp24", "mpp29";
 		marvell,function = "gpio";
 	};
 
-	power_led_pin: power-led-pin {
-		marvell,pins = "mpp55";
+	gpio_leds_pins: gpio-leds-pins {
+		/* mpp54: sata, mpp55: power */
+		marvell,pins = "mpp54", "mpp55";
 		marvell,function = "gpio";
 	};
 
-	sata_led_pin: sata-led-pin {
-		marvell,pins = "mpp54";
-		marvell,function = "gpio";
-	};
-
-	xhci0_vbus_pins: xhci0-vbus-pins {
+	usb3_1_vbus_pins: usb3_1-vbus-pins {
 		marvell,pins = "mpp50";
 		marvell,function = "gpio";
 	};
@@ -397,3 +266,18 @@
 &spi0 {
 	status = "disabled";
 };
+
+&uart0 {
+	/* J10: VCC, NC, RX, NC, TX, GND  */
+	status = "okay";
+};
+
+&usb0 {
+	/* USB part of the eSATA/USB 2.0 port */
+	status = "okay";
+};
+
+&usb3_1 {
+	status = "okay";
+	usb-phy = <&usb3_1_phy>;
+};
diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
index 28eede1..be16ce3 100644
--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
+++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
@@ -171,7 +171,7 @@
 			/* leds device (in STM32F0) at address 0x2b */
 
 			eeprom@54 {
-				compatible = "at,24c64";
+				compatible = "atmel,24c64";
 				reg = <0x54>;
 
 				/* The EEPROM contains data for bootloader.
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 8b165c3..af31f5d 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -312,31 +312,39 @@
 			};
 
 			gpio0: gpio@18100 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18100 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18100 0x40>, <0x181c0 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <32>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
 					     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
 					     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
 					     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&coreclk 0>;
 			};
 
 			gpio1: gpio@18140 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18140 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18140 0x40>, <0x181c8 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <28>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
 					     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
 					     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
 					     <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&coreclk 0>;
 			};
 
 			systemc: system-controller@18200 {
diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
index 84cc232..be22ec5 100644
--- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
+++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
@@ -311,6 +311,10 @@
 	reg = <0x20a00 0x2d0>, <0x21070 0x58>;
 };
 
+&rtc {
+	status = "disabled";
+};
+
 &timer {
 	compatible = "marvell,armada-xp-timer";
 	clocks = <&coreclk 2>, <&refclk>;
diff --git a/arch/arm/boot/dts/armada-xp-98dx4251.dtsi b/arch/arm/boot/dts/armada-xp-98dx4251.dtsi
index 51de91b..bc9f824 100644
--- a/arch/arm/boot/dts/armada-xp-98dx4251.dtsi
+++ b/arch/arm/boot/dts/armada-xp-98dx4251.dtsi
@@ -87,4 +87,5 @@
 
 &pp0 {
 	compatible = "marvell,prestera-98dx4251";
+	interrupts = <33>, <34>, <35>, <36>;
 };
diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
index 9efcf59..6d705f5 100644
--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
@@ -308,13 +308,11 @@
 		};
 	};
 
-	gpio_fan {
+	pwm_fan {
 		/* SUNON HA4010V4-0000-C99 */
-		compatible = "gpio-fan";
-		gpios = <&gpio0 24 0>;
 
-		gpio-fan,speed-map = <0    0
-				      4500 1>;
+		compatible = "pwm-fan";
+		pwms = <&gpio0 24 4000>;
 	};
 
 	dsa {
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index 07c5090..9f25814 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -202,25 +202,33 @@
 
 		internal-regs {
 			gpio0: gpio@18100 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18100 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18100 0x40>, <0x181c0 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <32>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <82>, <83>, <84>, <85>;
+				clocks = <&coreclk 0>;
 			};
 
 			gpio1: gpio@18140 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18140 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18140 0x40>, <0x181c8 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <17>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <87>, <88>, <89>;
+				clocks = <&coreclk 0>;
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 64e936a..2bfe07a 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -285,29 +285,38 @@
 
 		internal-regs {
 			gpio0: gpio@18100 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18100 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18100 0x40>, <0x181c0 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <32>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <82>, <83>, <84>, <85>;
+				clocks = <&coreclk 0>;
 			};
 
 			gpio1: gpio@18140 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18140 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18140 0x40>, <0x181c8 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <32>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <87>, <88>, <89>, <90>;
+				clocks = <&coreclk 0>;
 			};
 
 			gpio2: gpio@18180 {
-				compatible = "marvell,orion-gpio";
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
 				reg = <0x18180 0x40>;
 				ngpios = <3>;
 				gpio-controller;
diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
index d1383dd..6c33935 100644
--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
@@ -323,29 +323,38 @@
 
 		internal-regs {
 			gpio0: gpio@18100 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18100 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18100 0x40>, <0x181c0 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <32>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <82>, <83>, <84>, <85>;
+				clocks = <&coreclk 0>;
 			};
 
 			gpio1: gpio@18140 {
-				compatible = "marvell,orion-gpio";
-				reg = <0x18140 0x40>;
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
+				reg = <0x18140 0x40>, <0x181c8 0x08>;
+				reg-names = "gpio", "pwm";
 				ngpios = <32>;
 				gpio-controller;
 				#gpio-cells = <2>;
+				#pwm-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 				interrupts = <87>, <88>, <89>, <90>;
+				clocks = <&coreclk 0>;
 			};
 
 			gpio2: gpio@18180 {
-				compatible = "marvell,orion-gpio";
+				compatible = "marvell,armada-370-gpio",
+					     "marvell,orion-gpio";
 				reg = <0x18180 0x40>;
 				ngpios = <3>;
 				gpio-controller;
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index 8c6bc29..8a04c7e 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -886,13 +886,12 @@
 			};
 
 			timer: timer@1e782000 {
+				/* This timer is a Faraday FTTMR010 derivative */
 				compatible = "aspeed,ast2400-timer";
 				reg = <0x1e782000 0x90>;
-				// The moxart_timer driver registers only one
-				// interrupt and assumes it's for timer 1
-				//interrupts = <16 17 18 35 36 37 38 39>;
-				interrupts = <16>;
+				interrupts = <16 17 18 35 36 37 38 39>;
 				clocks = <&clk_apb>;
+				clock-names = "PCLK";
 			};
 
 			wdt1: wdt@1e785000 {
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index a0bea4a..9cffe34 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -993,13 +993,12 @@
 			};
 
 			timer: timer@1e782000 {
+				/* This timer is a Faraday FTTMR010 derivative */
 				compatible = "aspeed,ast2400-timer";
 				reg = <0x1e782000 0x90>;
-				// The moxart_timer driver registers only one
-				// interrupt and assumes it's for timer 1
-				//interrupts = <16 17 18 35 36 37 38 39>;
-				interrupts = <16>;
+				interrupts = <16 17 18 35 36 37 38 39>;
 				clocks = <&clk_apb>;
+				clock-names = "PCLK";
 			};
 
 
diff --git a/arch/arm/boot/dts/at91-cosino.dtsi b/arch/arm/boot/dts/at91-cosino.dtsi
index 02d8ef4..89cde17 100644
--- a/arch/arm/boot/dts/at91-cosino.dtsi
+++ b/arch/arm/boot/dts/at91-cosino.dtsi
@@ -82,38 +82,61 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			atmel,has-pmecc;	/* Enable PMECC */
-			atmel,pmecc-cap = <4>;
-			atmel,pmecc-sector-size = <512>;
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_addr_nand
+				     &pinctrl_ebi_data_0_7>;
+			pinctrl-names = "default";
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x40000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_oe_we
+					     &pinctrl_nand_cs
+					     &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			uboot@40000 {
-				label = "u-boot";
-				reg = <0x40000 0x80000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioD 5 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioD 4 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "hw";
+					nand-ecc-strength = <4>;
+					nand-ecc-step-size = <512>;
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			ubootenv@c0000 {
-				label = "U-Boot Env";
-				reg = <0xc0000 0x140000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			kernel@200000 {
-				label = "kernel";
-				reg = <0x200000 0x600000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x40000>;
+						};
 
-			rootfs@800000 {
-				label = "rootfs";
-				reg = <0x800000 0x0f800000>;
+						uboot@40000 {
+							label = "u-boot";
+							reg = <0x40000 0x80000>;
+						};
+
+						ubootenv@c0000 {
+							label = "U-Boot Env";
+							reg = <0xc0000 0x140000>;
+						};
+
+						kernel@200000 {
+							label = "kernel";
+							reg = <0x200000 0x600000>;
+						};
+
+						rootfs@800000 {
+							label = "rootfs";
+							reg = <0x800000 0x0f800000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/at91-kizbox.dts b/arch/arm/boot/dts/at91-kizbox.dts
index b4f147c..1f31df8 100644
--- a/arch/arm/boot/dts/at91-kizbox.dts
+++ b/arch/arm/boot/dts/at91-kizbox.dts
@@ -59,19 +59,39 @@
 			status = "okay";
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			bootstrap@0 {
-				label = "bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			ubi@20000 {
-				label = "ubi";
-				reg = <0x20000 0x7fe0000>;
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
+
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
+
+						bootstrap@0 {
+							label = "bootstrap";
+							reg = <0x0 0x20000>;
+						};
+
+						ubi@20000 {
+							label = "ubi";
+							reg = <0x20000 0x7fe0000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/at91-kizbox2.dts b/arch/arm/boot/dts/at91-kizbox2.dts
index 50a1456..4372c02 100644
--- a/arch/arm/boot/dts/at91-kizbox2.dts
+++ b/arch/arm/boot/dts/at91-kizbox2.dts
@@ -141,23 +141,40 @@
 			status = "okay";
 		};
 
-		nand0: nand@60000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			atmel,has-pmecc;
-			atmel,pmecc-cap = <4>;
-			atmel,pmecc-sector-size = <512>;
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_nand_addr>;
+			pinctrl-names = "default";
 			status = "okay";
 
-			bootstrap@0 {
-				label = "bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
 
-			ubi@20000 {
-				label = "ubi";
-				reg = <0x20000 0x7fe0000>;
+				nand@3 {
+					reg = <0x3 0x0 0x2>;
+					atmel,rb = <0>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "hw";
+					nand-ecc-strength = <4>;
+					nand-ecc-step-size = <512>;
+					nand-on-flash-bbt;
+					label = "atmel_nand";
+
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
+
+						bootstrap@0 {
+							label = "bootstrap";
+							reg = <0x0 0x20000>;
+						};
+
+						ubi@20000 {
+							label = "ubi";
+							reg = <0x20000 0x7fe0000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/at91-kizboxmini.dts b/arch/arm/boot/dts/at91-kizboxmini.dts
index 9682d10..33238fc 100644
--- a/arch/arm/boot/dts/at91-kizboxmini.dts
+++ b/arch/arm/boot/dts/at91-kizboxmini.dts
@@ -68,6 +68,49 @@
 			status = "okay";
 		};
 
+		ebi: ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_addr_nand
+				     &pinctrl_ebi_data_0_7>;
+			pinctrl-names = "default";
+			status = "okay";
+
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_oe_we
+					     &pinctrl_nand_cs
+					     &pinctrl_nand_rb>;
+				pinctrl-names = "default";
+
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioD 5 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioD 4 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "hw";
+					nand-ecc-strength = <4>;
+					nand-ecc-step-size = <512>;
+					nand-on-flash-bbt;
+					label = "atmel_nand";
+
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
+
+						bootstrap@0 {
+							label = "bootstrap";
+							reg = <0x0 0x20000>;
+						};
+
+						ubi@20000 {
+							label = "ubi";
+							reg = <0x20000 0x7fe0000>;
+						};
+					};
+				};
+			};
+		};
+
 		nand0: nand@40000000 {
 			nand-bus-width = <8>;
 			nand-ecc-mode = "hw";
@@ -77,15 +120,6 @@
 			nand-on-flash-bbt;
 			status = "okay";
 
-			bootstrap@0 {
-				label = "bootstrap";
-				reg = <0x0 0x20000>;
-			};
-
-			ubi@20000 {
-				label = "ubi";
-				reg = <0x20000 0x7fe0000>;
-			};
 		};
 	};
 
diff --git a/arch/arm/boot/dts/at91-linea.dtsi b/arch/arm/boot/dts/at91-linea.dtsi
index 0721c84..87e5090 100644
--- a/arch/arm/boot/dts/at91-linea.dtsi
+++ b/arch/arm/boot/dts/at91-linea.dtsi
@@ -31,19 +31,30 @@
 	status = "okay";
 
 	eeprom@51 {
-		compatible = "st,24c64";
+		compatible = "st,24c64", "atmel,24c64";
 		reg = <0x51>;
 		pagesize = <32>;
 	};
 };
 
-&nand0 {
+&ebi {
+	pinctrl-0 = <&pinctrl_ebi_nand_addr>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+
+&nand_controller {
 	status = "okay";
 
-	nand-bus-width = <8>;
-	nand-ecc-mode = "hw";
-	atmel,has-pmecc;
-	atmel,pmecc-cap = <4>;
-	atmel,pmecc-sector-size = <512>;
-	nand-on-flash-bbt;
+	nand: nand@3 {
+		reg = <0x3 0x0 0x2>;
+		atmel,rb = <0>;
+		nand-bus-width = <8>;
+		nand-ecc-mode = "hw";
+		nand-ecc-strength = <4>;
+		nand-ecc-step-size = <512>;
+		nand-on-flash-bbt;
+		label = "atmel_nand";
+	};
 };
diff --git a/arch/arm/boot/dts/at91-qil_a9260.dts b/arch/arm/boot/dts/at91-qil_a9260.dts
index 8f01918..f463527 100644
--- a/arch/arm/boot/dts/at91-qil_a9260.dts
+++ b/arch/arm/boot/dts/at91-qil_a9260.dts
@@ -107,50 +107,69 @@
 			status = "okay";
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			barebox@20000 {
-				label = "barebox";
-				reg = <0x20000 0x40000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bareboxenv@60000 {
-				label = "bareboxenv";
-				reg = <0x60000 0x20000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			bareboxenv2@80000 {
-				label = "bareboxenv2";
-				reg = <0x80000 0x20000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x20000>;
+						};
 
-			oftree@a0000 {
-				label = "oftree";
-				reg = <0xa0000 0x20000>;
-			};
+						barebox@20000 {
+							label = "barebox";
+							reg = <0x20000 0x40000>;
+						};
 
-			kernel@c0000 {
-				label = "kernel";
-				reg = <0xc0000 0x400000>;
-			};
+						bareboxenv@60000 {
+							label = "bareboxenv";
+							reg = <0x60000 0x20000>;
+						};
 
-			rootfs@4c0000 {
-				label = "rootfs";
-				reg = <0x4c0000 0x7800000>;
-			};
+						bareboxenv2@80000 {
+							label = "bareboxenv2";
+							reg = <0x80000 0x20000>;
+						};
 
-			data@7cc0000 {
-				label = "data";
-				reg = <0x7cc0000 0x8340000>;
+						oftree@a0000 {
+							label = "oftree";
+							reg = <0xa0000 0x20000>;
+						};
+
+						kernel@c0000 {
+							label = "kernel";
+							reg = <0xc0000 0x400000>;
+						};
+
+						rootfs@4c0000 {
+							label = "rootfs";
+							reg = <0x4c0000 0x7800000>;
+						};
+
+						data@7cc0000 {
+							label = "data";
+							reg = <0x7cc0000 0x8340000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index 0bef9e0..2e2c3d1 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -56,10 +56,6 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
-		reg = <0x20000000 0x80000>;
-	};
-
 	clocks {
 		slow_xtal {
 			clock-frequency = <32768>;
@@ -227,6 +223,10 @@
 				};
 			};
 
+			pwm0: pwm@f802c000 {
+				status = "okay";
+			};
+
 			flx0: flexcom@f8034000 {
 				atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
 				status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */
@@ -258,6 +258,12 @@
 				status = "okay";
 			};
 
+			can0: can@f8054000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_can0_default>;
+				status = "okay";
+			};
+
 			uart3: serial@fc008000 {
 				atmel,use-dma-rx;
 				atmel,use-dma-tx;
@@ -322,6 +328,18 @@
 					bias-disable;
 				};
 
+				pinctrl_can0_default: can0_default {
+					pinmux = <PIN_PC10__CANTX0>,
+						 <PIN_PC11__CANRX0>;
+					bias-disable;
+				};
+
+				pinctrl_can1_default: can1_default {
+					pinmux = <PIN_PC26__CANTX1>,
+						 <PIN_PC27__CANRX1>;
+					bias-disable;
+				};
+
 				pinctrl_charger_chglev: charger_chglev {
 					pinmux = <PIN_PA12__GPIO>;
 					bias-disable;
@@ -469,6 +487,12 @@
 				};
 
 			};
+
+			can1: can@fc050000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_can1_default>;
+				status = "okay";
+			};
 		};
 	};
 
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index 5a53fcf..3af088d 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -235,46 +235,6 @@
 			};
 		};
 
-		nand0: nand@60000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			atmel,has-pmecc;
-			atmel,pmecc-cap = <4>;
-			atmel,pmecc-sector-size = <512>;
-			nand-on-flash-bbt;
-			status = "okay";
-
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x40000>;
-			};
-
-			bootloader@40000 {
-				label = "bootloader";
-				reg = <0x40000 0x80000>;
-			};
-
-			bootloaderenv@c0000 {
-				label = "bootloader env";
-				reg = <0xc0000 0xc0000>;
-			};
-
-			dtb@180000 {
-				label = "device tree";
-				reg = <0x180000 0x80000>;
-			};
-
-			kernel@200000 {
-				label = "kernel";
-				reg = <0x200000 0x600000>;
-			};
-
-			rootfs@800000 {
-				label = "rootfs";
-				reg = <0x800000 0x0f800000>;
-			};
-		};
-
 		usb0: gadget@00500000 {
 			atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>;	/* PE9, conflicts with A9 */
 			pinctrl-names = "default";
@@ -294,6 +254,63 @@
 		usb2: ehci@00700000 {
 			status = "okay";
 		};
+
+		ebi: ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_nand_addr>;
+			pinctrl-names = "default";
+			status = "okay";
+
+			nand_controller: nand-controller {
+				status = "okay";
+
+				nand@3 {
+					reg = <0x3 0x0 0x2>;
+					atmel,rb = <0>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "hw";
+					nand-ecc-strength = <4>;
+					nand-ecc-step-size = <512>;
+					nand-on-flash-bbt;
+					label = "atmel_nand";
+
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
+
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x40000>;
+						};
+
+						bootloader@40000 {
+							label = "bootloader";
+							reg = <0x40000 0x80000>;
+						};
+
+						bootloaderenv@c0000 {
+							label = "bootloader env";
+							reg = <0xc0000 0xc0000>;
+						};
+
+						dtb@180000 {
+							label = "device tree";
+							reg = <0x180000 0x80000>;
+						};
+
+						kernel@200000 {
+							label = "kernel";
+							reg = <0x200000 0x600000>;
+						};
+
+						rootfs@800000 {
+							label = "rootfs";
+							reg = <0x800000 0x0f800000>;
+						};
+					};
+				};
+			};
+		};
 	};
 
 	vcc_mmc0_reg: fixedregulator_mmc0 {
diff --git a/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi b/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
index b5a5a91..b813fdf 100644
--- a/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
@@ -75,7 +75,7 @@
 					reg = <0>;
 					clocks = <&clk20m>;
 					interrupt-parent = <&pioE>;
-					interrupts = <6 GPIO_ACTIVE_LOW>;
+					interrupts = <6 IRQ_TYPE_EDGE_RISING>;
 					spi-max-frequency = <10000000>;
 				};
 
@@ -84,7 +84,7 @@
 					reg = <1>;
 					clocks = <&clk20m>;
 					interrupt-parent = <&pioE>;
-					interrupts = <7 GPIO_ACTIVE_LOW>;
+					interrupts = <7 IRQ_TYPE_EDGE_RISING>;
 					spi-max-frequency = <10000000>;
 				};
 			};
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 5ab14ce..cf71244 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -190,41 +190,60 @@
 			status = "okay";
 		};
 
-		nand0: nand@80000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			nand-on-flash-bbt;
-			atmel,has-pmecc;
+		ebi: ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_cs3 &pinctrl_ebi_nrd_nandoe
+				     &pinctrl_ebi_nwe_nandwe &pinctrl_ebi_nandrdy
+				     &pinctrl_ebi_data_0_7 &pinctrl_ebi_nand_addr>;
+			pinctrl-names = "default";
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x40000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
 
-			bootloader@40000 {
-				label = "bootloader";
-				reg = <0x40000 0x80000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x2>;
+					atmel,rb = <0>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "hw";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bootloaderenv@c0000 {
-				label = "bootloader env";
-				reg = <0xc0000 0xc0000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			dtb@180000 {
-				label = "device tree";
-				reg = <0x180000 0x80000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x40000>;
+						};
 
-			kernel@200000 {
-				label = "kernel";
-				reg = <0x200000 0x600000>;
-			};
+						bootloader@40000 {
+							label = "bootloader";
+							reg = <0x40000 0x80000>;
+						};
 
-			rootfs@800000 {
-				label = "rootfs";
-				reg = <0x800000 0x0f800000>;
+						bootloaderenv@c0000 {
+							label = "bootloader env";
+							reg = <0xc0000 0xc0000>;
+						};
+
+						dtb@180000 {
+							label = "device tree";
+							reg = <0x180000 0x80000>;
+						};
+
+						kernel@200000 {
+							label = "kernel";
+							reg = <0x200000 0x600000>;
+						};
+
+						rootfs@800000 {
+							label = "rootfs";
+							reg = <0x800000 0x0f800000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index f8b96ce..bae5248 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -236,41 +236,60 @@
 			status = "okay";
 		};
 
-		nand0: nand@80000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			nand-on-flash-bbt;
-			atmel,has-pmecc;
+		ebi: ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_cs3 &pinctrl_ebi_nrd_nandoe
+				     &pinctrl_ebi_nwe_nandwe &pinctrl_ebi_nandrdy
+				     &pinctrl_ebi_data_0_7 &pinctrl_ebi_nand_addr>;
+			pinctrl-names = "default";
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x40000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
 
-			bootloader@40000 {
-				label = "bootloader";
-				reg = <0x40000 0x80000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x2>;
+					atmel,rb = <0>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "hw";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bootloaderenv@c0000 {
-				label = "bootloader env";
-				reg = <0xc0000 0xc0000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			dtb@180000 {
-				label = "device tree";
-				reg = <0x180000 0x80000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x40000>;
+						};
 
-			kernel@200000 {
-				label = "kernel";
-				reg = <0x200000 0x600000>;
-			};
+						bootloader@40000 {
+							label = "bootloader";
+							reg = <0x40000 0x80000>;
+						};
 
-			rootfs@800000 {
-				label = "rootfs";
-				reg = <0x800000 0x0f800000>;
+						bootloaderenv@c0000 {
+							label = "bootloader env";
+							reg = <0xc0000 0xc0000>;
+						};
+
+						dtb@180000 {
+							label = "device tree";
+							reg = <0x180000 0x80000>;
+						};
+
+						kernel@200000 {
+							label = "kernel";
+							reg = <0x200000 0x600000>;
+						};
+
+						rootfs@800000 {
+							label = "rootfs";
+							reg = <0x800000 0x0f800000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts
index 498fba3..5f29010 100644
--- a/arch/arm/boot/dts/at91-tse850-3.dts
+++ b/arch/arm/boot/dts/at91-tse850-3.dts
@@ -161,45 +161,51 @@
 	};
 };
 
-&nand0 {
-	at91bootstrap@0 {
-		label = "at91bootstrap";
-		reg = <0x0 0x40000>;
-	};
+&nand {
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
 
-	barebox@40000 {
-		label = "bootloader";
-		reg = <0x40000 0x60000>;
-	};
+		at91bootstrap@0 {
+			label = "at91bootstrap";
+			reg = <0x0 0x40000>;
+		};
 
-	bareboxenv@c0000 {
-		label = "bareboxenv";
-		reg = <0xc0000 0x40000>;
-	};
+		barebox@40000 {
+			label = "bootloader";
+			reg = <0x40000 0x60000>;
+		};
 
-	bareboxenv2@100000 {
-		label = "bareboxenv2";
-		reg = <0x100000 0x40000>;
-	};
+		bareboxenv@c0000 {
+			label = "bareboxenv";
+			reg = <0xc0000 0x40000>;
+		};
 
-	oftree@180000 {
-		label = "oftree";
-		reg = <0x180000 0x20000>;
-	};
+		bareboxenv2@100000 {
+			label = "bareboxenv2";
+			reg = <0x100000 0x40000>;
+		};
 
-	kernel@200000 {
-		label = "kernel";
-		reg = <0x200000 0x500000>;
-	};
+		oftree@180000 {
+			label = "oftree";
+			reg = <0x180000 0x20000>;
+		};
 
-	rootfs@800000 {
-		label = "rootfs";
-		reg = <0x800000 0x0f800000>;
-	};
+		kernel@200000 {
+			label = "kernel";
+			reg = <0x200000 0x500000>;
+		};
 
-	ovlfs@10000000 {
-		label = "ovlfs";
-		reg = <0x10000000 0x10000000>;
+		rootfs@800000 {
+			label = "rootfs";
+			reg = <0x800000 0x0f800000>;
+		};
+
+		ovlfs@10000000 {
+			label = "ovlfs";
+			reg = <0x10000000 0x10000000>;
+		};
 	};
 };
 
@@ -239,7 +245,7 @@
 	};
 
 	eeprom@50 {
-		compatible = "nxp,24c02";
+		compatible = "nxp,24c02", "atmel,24c02";
 		reg = <0x50>;
 		pagesize = <16>;
 	};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 9e035b2..6582f3c 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -99,6 +99,16 @@
 				reg = <0xffffea00 0x200>;
 			};
 
+			smc: smc@ffffec00 {
+				compatible = "atmel,at91sam9260-smc", "syscon";
+				reg = <0xffffec00 0x200>;
+			};
+
+			matrix: matrix@ffffee00 {
+				compatible = "atmel,at91sam9260-matrix", "syscon";
+				reg = <0xffffee00 0x200>;
+			};
+
 			pmc: pmc@fffffc00 {
 				compatible = "atmel,at91sam9260-pmc", "syscon";
 				reg = <0xfffffc00 0x100>;
@@ -522,10 +532,14 @@
 				};
 
 				nand {
-					pinctrl_nand: nand-0 {
+					pinctrl_nand_rb: nand-rb-0 {
 						atmel,pins =
-							<AT91_PIOC 13 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP	/* PC13 gpio RDY pin pull_up */
-							 AT91_PIOC 14 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;	/* PC14 gpio enable pin pull_up */
+							<AT91_PIOC 13 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
+					};
+
+					pinctrl_nand_cs: nand-cs-0 {
+						atmel,pins =
+							 <AT91_PIOC 14 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
 					};
 				};
 
@@ -985,24 +999,6 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			compatible = "atmel,at91rm9200-nand";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x40000000 0x10000000
-			       0xffffe800 0x200
-			      >;
-			atmel,nand-addr-offset = <21>;
-			atmel,nand-cmd-offset = <22>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_nand>;
-			gpios = <&pioC 13 GPIO_ACTIVE_HIGH
-				 &pioC 14 GPIO_ACTIVE_HIGH
-				 0
-				>;
-			status = "disabled";
-		};
-
 		usb0: ohci@500000 {
 			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
 			reg = <0x00500000 0x100000>;
@@ -1011,6 +1007,33 @@
 			clock-names = "ohci_clk", "hclk", "uhpck";
 			status = "disabled";
 		};
+
+		ebi: ebi@10000000 {
+			compatible = "atmel,at91sam9260-ebi";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			atmel,smc = <&smc>;
+			atmel,matrix = <&matrix>;
+			reg = <0x10000000 0x80000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x20000000 0x10000000
+				  0x2 0x0 0x30000000 0x10000000
+				  0x3 0x0 0x40000000 0x10000000
+				  0x4 0x0 0x50000000 0x10000000
+				  0x5 0x0 0x60000000 0x10000000
+				  0x6 0x0 0x70000000 0x10000000
+				  0x7 0x0 0x80000000 0x10000000>;
+			clocks = <&mck>;
+			status = "disabled";
+
+			nand_controller: nand-controller {
+				compatible = "atmel,at91sam9260-nand-controller";
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
+		};
 	};
 
 	i2c-gpio-0 {
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index 7e80acd..a05353f 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -91,20 +91,31 @@
 			status = "disabled";
 		};
 
-		nand0: nand@40000000 {
-			compatible = "atmel,at91rm9200-nand";
-			#address-cells = <1>;
+		ebi: ebi@10000000 {
+			compatible = "atmel,at91sam9261-ebi";
+			#address-cells = <2>;
 			#size-cells = <1>;
-			reg = <0x40000000 0x10000000>;
-			atmel,nand-addr-offset = <22>;
-			atmel,nand-cmd-offset = <21>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_nand>;
-
-			gpios = <&pioC 15 GPIO_ACTIVE_HIGH>,
-				<&pioC 14 GPIO_ACTIVE_HIGH>,
-				<0>;
+			atmel,smc = <&smc>;
+			atmel,matrix = <&matrix>;
+			reg = <0x10000000 0x80000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x20000000 0x10000000
+				  0x2 0x0 0x30000000 0x10000000
+				  0x3 0x0 0x40000000 0x10000000
+				  0x4 0x0 0x50000000 0x10000000
+				  0x5 0x0 0x60000000 0x10000000
+				  0x6 0x0 0x70000000 0x10000000
+				  0x7 0x0 0x80000000 0x10000000>;
+			clocks = <&mck>;
 			status = "disabled";
+
+			nand_controller: nand-controller {
+				compatible = "atmel,at91sam9261-nand-controller";
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
 		};
 
 		apb {
@@ -262,6 +273,11 @@
 				reg = <0xffffea00 0x200>;
 			};
 
+			smc: smc@ffffec00 {
+				compatible = "atmel,at91sam9260-smc", "syscon";
+				reg = <0xffffec00 0x200>;
+			};
+
 			matrix: matrix@ffffee00 {
 				compatible = "atmel,at91sam9261-matrix", "syscon";
 				reg = <0xffffee00 0x200>;
@@ -362,9 +378,13 @@
 				};
 
 				nand {
-					pinctrl_nand: nand-0 {
+					pinctrl_nand_rb: nand-rb-0 {
 						atmel,pins =
-							<AT91_PIOC 15 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>,
+							<AT91_PIOC 15 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
+					};
+
+					pinctrl_nand_cs: nand-cs-0 {
+						atmel,pins =
 							<AT91_PIOC 14 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
 					};
 				};
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
index 55bd51f..157e149 100644
--- a/arch/arm/boot/dts/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/at91sam9261ek.dts
@@ -68,40 +68,59 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x40000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			bootloader@40000 {
-				label = "bootloader";
-				reg = <0x40000 0x80000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioC 15 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bootloaderenv@c0000 {
-				label = "bootloader env";
-				reg = <0xc0000 0xc0000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			dtb@180000 {
-				label = "device tree";
-				reg = <0x180000 0x80000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x40000>;
+						};
 
-			kernel@200000 {
-				label = "kernel";
-				reg = <0x200000 0x600000>;
-			};
+						bootloader@40000 {
+							label = "bootloader";
+							reg = <0x40000 0x80000>;
+						};
 
-			rootfs@800000 {
-				label = "rootfs";
-				reg = <0x800000 0x0f800000>;
+						bootloaderenv@c0000 {
+							label = "bootloader env";
+							reg = <0xc0000 0xc0000>;
+						};
+
+						dtb@180000 {
+							label = "device tree";
+							reg = <0x180000 0x80000>;
+						};
+
+						kernel@200000 {
+							label = "kernel";
+							reg = <0x200000 0x600000>;
+						};
+
+						rootfs@800000 {
+							label = "rootfs";
+							reg = <0x800000 0x0f800000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index a1888f6..ed4b564 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -361,11 +361,26 @@
 				reg = <0xffffe200 0x200>;
 			};
 
+			smc0: smc@ffffe400 {
+				compatible = "atmel,at91sam9260-smc", "syscon";
+				reg = <0xffffe400 0x200>;
+			};
+
 			ramc1: ramc@ffffe800 {
 				compatible = "atmel,at91sam9260-sdramc";
 				reg = <0xffffe800 0x200>;
 			};
 
+			smc1: smc@ffffea00 {
+				compatible = "atmel,at91sam9260-smc", "syscon";
+				reg = <0xffffea00 0x200>;
+			};
+
+			matrix: matrix@ffffec00 {
+				compatible = "atmel,at91sam9263-matrix", "syscon";
+				reg = <0xffffec00 0x200>;
+			};
+
 			pit: timer@fffffd30 {
 				compatible = "atmel,at91sam9260-pit";
 				reg = <0xfffffd30 0xf>;
@@ -472,10 +487,14 @@
 				};
 
 				nand {
-					pinctrl_nand: nand-0 {
+					pinctrl_nand_rb: nand-rb-0 {
 						atmel,pins =
-							<AT91_PIOA 22 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP	/* PA22 gpio RDY pin pull_up*/
-							 AT91_PIOD 15 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;	/* PD15 gpio enable pin pull_up */
+							<AT91_PIOA 22 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
+					};
+
+					pinctrl_nand_cs: nand-cs-0 {
+						atmel,pins =
+							 <AT91_PIOD 15 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
 					};
 				};
 
@@ -991,24 +1010,6 @@
 			status = "disabled";
 		};
 
-		nand0: nand@40000000 {
-			compatible = "atmel,at91rm9200-nand";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x40000000 0x10000000
-			       0xffffe000 0x200
-			      >;
-			atmel,nand-addr-offset = <21>;
-			atmel,nand-cmd-offset = <22>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_nand>;
-			gpios = <&pioA 22 GPIO_ACTIVE_HIGH
-				 &pioD 15 GPIO_ACTIVE_HIGH
-				 0
-				>;
-			status = "disabled";
-		};
-
 		usb0: ohci@00a00000 {
 			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
 			reg = <0x00a00000 0x100000>;
@@ -1017,6 +1018,52 @@
 			clock-names = "ohci_clk", "hclk", "uhpck";
 			status = "disabled";
 		};
+
+		ebi0: ebi@10000000 {
+			compatible = "atmel,at91sam9263-ebi0";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			atmel,smc = <&smc0>;
+			atmel,matrix = <&matrix>;
+			reg = <0x10000000 0x80000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x20000000 0x10000000
+				  0x2 0x0 0x30000000 0x10000000
+				  0x3 0x0 0x40000000 0x10000000
+				  0x4 0x0 0x50000000 0x10000000
+				  0x5 0x0 0x60000000 0x10000000>;
+			clocks = <&mck>;
+			status = "disabled";
+
+			nand_controller0: nand-controller {
+				compatible = "atmel,at91sam9260-nand-controller";
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
+		};
+
+		ebi1: ebi@70000000 {
+			compatible = "atmel,at91sam9263-ebi1";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			atmel,smc = <&smc1>;
+			atmel,matrix = <&matrix>;
+			reg = <0x80000000 0x20000000>;
+			ranges = <0x0 0x0 0x80000000 0x10000000
+				  0x1 0x0 0x90000000 0x10000000>;
+			clocks = <&mck>;
+			status = "disabled";
+
+			nand_controller1: nand-controller {
+				compatible = "atmel,at91sam9260-nand-controller";
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
+		};
 	};
 
 	i2c-gpio-0 {
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 127cc42..10a0925d 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -125,50 +125,69 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt = <1>;
+		ebi0: ebi@10000000 {
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			barebox@20000 {
-				label = "barebox";
-				reg = <0x20000 0x40000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bareboxenv@60000 {
-				label = "bareboxenv";
-				reg = <0x60000 0x20000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			bareboxenv2@80000 {
-				label = "bareboxenv2";
-				reg = <0x80000 0x20000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x20000>;
+						};
 
-			oftree@80000 {
-				label = "oftree";
-				reg = <0xa0000 0x20000>;
-			};
+						barebox@20000 {
+							label = "barebox";
+							reg = <0x20000 0x40000>;
+						};
 
-			kernel@a0000 {
-				label = "kernel";
-				reg = <0xc0000 0x400000>;
-			};
+						bareboxenv@60000 {
+							label = "bareboxenv";
+							reg = <0x60000 0x20000>;
+						};
 
-			rootfs@4a0000 {
-				label = "rootfs";
-				reg = <0x4c0000 0x7800000>;
-			};
+						bareboxenv2@80000 {
+							label = "bareboxenv2";
+							reg = <0x80000 0x20000>;
+						};
 
-			data@7ca0000 {
-				label = "data";
-				reg = <0x7cc0000 0x8340000>;
+						oftree@80000 {
+							label = "oftree";
+							reg = <0xa0000 0x20000>;
+						};
+
+						kernel@a0000 {
+							label = "kernel";
+							reg = <0xc0000 0x400000>;
+						};
+
+						rootfs@4a0000 {
+							label = "rootfs";
+							reg = <0x4c0000 0x7800000>;
+						};
+
+						data@7ca0000 {
+							label = "data";
+							reg = <0x7cc0000 0x8340000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 27847a4..ac9a151 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -123,50 +123,69 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			barebox@20000 {
-				label = "barebox";
-				reg = <0x20000 0x40000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bareboxenv@60000 {
-				label = "bareboxenv";
-				reg = <0x60000 0x20000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			bareboxenv2@80000 {
-				label = "bareboxenv2";
-				reg = <0x80000 0x20000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x20000>;
+						};
 
-			oftree@80000 {
-				label = "oftree";
-				reg = <0xa0000 0x20000>;
-			};
+						barebox@20000 {
+							label = "barebox";
+							reg = <0x20000 0x40000>;
+						};
 
-			kernel@a0000 {
-				label = "kernel";
-				reg = <0xc0000 0x400000>;
-			};
+						bareboxenv@60000 {
+							label = "bareboxenv";
+							reg = <0x60000 0x20000>;
+						};
 
-			rootfs@4a0000 {
-				label = "rootfs";
-				reg = <0x4c0000 0x7800000>;
-			};
+						bareboxenv2@80000 {
+							label = "bareboxenv2";
+							reg = <0x80000 0x20000>;
+						};
 
-			data@7ca0000 {
-				label = "data";
-				reg = <0x7cc0000 0x8340000>;
+						oftree@80000 {
+							label = "oftree";
+							reg = <0xa0000 0x20000>;
+						};
+
+						kernel@a0000 {
+							label = "kernel";
+							reg = <0xc0000 0x400000>;
+						};
+
+						rootfs@4a0000 {
+							label = "rootfs";
+							reg = <0x4c0000 0x7800000>;
+						};
+
+						data@7ca0000 {
+							label = "data";
+							reg = <0x7cc0000 0x8340000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index e567d5f..a4808c4 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -113,6 +113,16 @@
 				clock-names = "ddrck";
 			};
 
+			smc: smc@ffffe800 {
+				compatible = "atmel,at91sam9260-smc", "syscon";
+				reg = <0xffffe800 0x200>;
+			};
+
+			matrix: matrix@ffffea00 {
+				compatible = "atmel,at91sam9g45-matrix", "syscon";
+				reg = <0xffffea00 0x200>;
+			};
+
 			pmc: pmc@fffffc00 {
 				compatible = "atmel,at91sam9g45-pmc", "syscon";
 				reg = <0xfffffc00 0x100>;
@@ -601,10 +611,14 @@
 				};
 
 				nand {
-					pinctrl_nand: nand-0 {
+					pinctrl_nand_rb: nand-rb-0 {
 						atmel,pins =
-							<AT91_PIOC 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP	/* PC8 gpio RDY pin pull_up*/
-							 AT91_PIOC 14 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;	/* PC14 gpio enable pin pull_up */
+							<AT91_PIOC 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
+					};
+
+					pinctrl_nand_cs: nand-cs-0 {
+						atmel,pins =
+							 <AT91_PIOC 14 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
 					};
 				};
 
@@ -1278,25 +1292,6 @@
 			status = "disabled";
 		};
 
-		nand0: nand@40000000 {
-			compatible = "atmel,at91rm9200-nand";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x40000000 0x10000000
-			       0xffffe200 0x200
-			      >;
-			atmel,nand-addr-offset = <21>;
-			atmel,nand-cmd-offset = <22>;
-			atmel,nand-has-dma;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_nand>;
-			gpios = <&pioC 8 GPIO_ACTIVE_HIGH
-				 &pioC 14 GPIO_ACTIVE_HIGH
-				 0
-				>;
-			status = "disabled";
-		};
-
 		usb0: ohci@00700000 {
 			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
 			reg = <0x00700000 0x100000>;
@@ -1314,6 +1309,31 @@
 			clock-names = "usb_clk", "ehci_clk";
 			status = "disabled";
 		};
+
+		ebi: ebi@10000000 {
+			compatible = "atmel,at91sam9g45-ebi";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			atmel,smc = <&smc>;
+			atmel,matrix = <&matrix>;
+			reg = <0x10000000 0x80000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x20000000 0x10000000
+				  0x2 0x0 0x30000000 0x10000000
+				  0x3 0x0 0x40000000 0x10000000
+				  0x4 0x0 0x50000000 0x10000000
+				  0x5 0x0 0x60000000 0x10000000>;
+			clocks = <&mck>;
+			status = "disabled";
+
+			nand_controller: nand-controller {
+				compatible = "atmel,at91sam9g45-nand-controller";
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
+		};
 	};
 
 	i2c-gpio-0 {
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 2400c991..2522c33 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -245,25 +245,44 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			boot@0 {
-				label = "bootstrap/uboot/kernel";
-				reg = <0x0 0x400000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			rootfs@400000 {
-				label = "rootfs";
-				reg = <0x400000 0x3C00000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioC 8 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			data@4000000 {
-				label = "data";
-				reg = <0x4000000 0xC000000>;
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
+
+						boot@0 {
+							label = "bootstrap/uboot/kernel";
+							reg = <0x0 0x400000>;
+						};
+
+						rootfs@400000 {
+							label = "rootfs";
+							reg = <0x400000 0x3C00000>;
+						};
+
+						data@4000000 {
+							label = "data";
+							reg = <0x4000000 0xC000000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index f43d769..06516d0 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -89,6 +89,17 @@
 				atmel,external-irqs = <31>;
 			};
 
+			matrix: matrix@ffffde00 {
+				compatible = "atmel,at91sam9n12-matrix", "syscon";
+				reg = <0xffffde00 0x100>;
+			};
+
+			pmecc: ecc-engine@ffffe000 {
+				compatible = "atmel,at91sam9g45-pmecc";
+				reg = <0xffffe000 0x600>,
+				      <0xffffe600 0x200>;
+			};
+
 			ramc0: ramc@ffffe800 {
 				compatible = "atmel,at91sam9g45-ddramc";
 				reg = <0xffffe800 0x200>;
@@ -96,6 +107,11 @@
 				clock-names = "ddrck";
 			};
 
+			smc: smc@ffffea00 {
+				compatible = "atmel,at91sam9260-smc", "syscon";
+				reg = <0xffffea00 0x200>;
+			};
+
 			pmc: pmc@fffffc00 {
 				compatible = "atmel,at91sam9n12-pmc", "syscon";
 				reg = <0xfffffc00 0x200>;
@@ -627,10 +643,14 @@
 				};
 
 				nand {
-					pinctrl_nand: nand-0 {
+					pinctrl_nand_rb: nand-rb-0 {
 						atmel,pins =
-							<AT91_PIOD 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP	/* PD5 gpio RDY pin pull_up*/
-							 AT91_PIOD 4 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;	/* PD4 gpio enable pin pull_up */
+							<AT91_PIOD 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
+					};
+
+					pinctrl_nand_cs: nand-cs-0 {
+						atmel,pins =
+							 <AT91_PIOD 4 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
 					};
 				};
 
@@ -998,28 +1018,6 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			compatible = "atmel,at91rm9200-nand";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = < 0x40000000 0x10000000
-				0xffffe000 0x00000600
-				0xffffe600 0x00000200
-				0x00108000 0x00018000
-			       >;
-			atmel,pmecc-lookup-table-offset = <0x0 0x8000>;
-			atmel,nand-addr-offset = <21>;
-			atmel,nand-cmd-offset = <22>;
-			atmel,nand-has-dma;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_nand>;
-			gpios = <&pioD 5 GPIO_ACTIVE_HIGH
-				 &pioD 4 GPIO_ACTIVE_HIGH
-				 0
-				>;
-			status = "disabled";
-		};
-
 		usb0: ohci@00500000 {
 			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
 			reg = <0x00500000 0x00100000>;
@@ -1028,6 +1026,32 @@
 			clock-names = "ohci_clk", "hclk", "uhpck";
 			status = "disabled";
 		};
+
+		ebi: ebi@10000000 {
+			compatible = "atmel,at91sam9x5-ebi";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			atmel,smc = <&smc>;
+			atmel,matrix = <&matrix>;
+			reg = <0x10000000 0x60000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x20000000 0x10000000
+				  0x2 0x0 0x30000000 0x10000000
+				  0x3 0x0 0x40000000 0x10000000
+				  0x4 0x0 0x50000000 0x10000000
+				  0x5 0x0 0x60000000 0x10000000>;
+			clocks = <&mck>;
+			status = "disabled";
+
+			nand_controller: nand-controller {
+				compatible = "atmel,at91sam9g45-nand-controller";
+				ecc-engine = <&pmecc>;
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
+		};
 	};
 
 	i2c-gpio-0 {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index 626c67d..5bea8c5 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -147,14 +147,26 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			atmel,has-pmecc;
-			atmel,pmecc-cap = <2>;
-			atmel,pmecc-sector-size = <512>;
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
+
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
+
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioD 5 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioD 4 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-ecc-strength = <2>;
+					nand-ecc-step-size = <512>;
+					nand-on-flash-bbt;
+					label = "atmel_nand";
+				};
+			};
 		};
 
 		usb0: ohci@00500000 {
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index f4c129a..7768342 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -92,21 +92,29 @@
 			status = "disabled";
 		};
 
-		nand0: nand@40000000 {
-			compatible = "atmel,at91rm9200-nand";
-			#address-cells = <1>;
+		ebi: ebi@10000000 {
+			compatible = "atmel,at91sam9rl-ebi";
+			#address-cells = <2>;
 			#size-cells = <1>;
-			reg = <0x40000000 0x10000000>,
-			      <0xffffe800 0x200>;
-			atmel,nand-addr-offset = <21>;
-			atmel,nand-cmd-offset = <22>;
-			atmel,nand-has-dma;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_nand>;
-			gpios = <&pioD 17 GPIO_ACTIVE_HIGH>,
-				<&pioB 6 GPIO_ACTIVE_HIGH>,
-				<0>;
+			atmel,smc = <&smc>;
+			atmel,matrix = <&matrix>;
+			reg = <0x10000000 0x80000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x20000000 0x10000000
+				  0x2 0x0 0x30000000 0x10000000
+				  0x3 0x0 0x40000000 0x10000000
+				  0x4 0x0 0x50000000 0x10000000
+				  0x5 0x0 0x60000000 0x10000000>;
+			clocks = <&mck>;
 			status = "disabled";
+
+			nand_controller: nand-controller {
+				compatible = "atmel,at91sam9g45-nand-controller";
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
 		};
 
 		apb {
@@ -364,6 +372,16 @@
 				reg = <0xffffea00 0x200>;
 			};
 
+			smc: smc@ffffec00 {
+				compatible = "atmel,at91sam9260-smc", "syscon";
+				reg = <0xffffec00 0x200>;
+			};
+
+			matrix: matrix@ffffee00 {
+				compatible = "atmel,at91sam9rl-matrix", "syscon";
+				reg = <0xffffee00 0x200>;
+			};
+
 			aic: interrupt-controller@fffff000 {
 				#interrupt-cells = <3>;
 				compatible = "atmel,at91rm9200-aic";
@@ -443,6 +461,14 @@
 					};
 				};
 
+				ebi {
+					pinctrl_ebi_addr_nand: ebi-addr-0 {
+						atmel,pins =
+							<AT91_PIOB 2 AT91_PERIPH_A AT91_PINCTRL_NONE>,
+							<AT91_PIOB 3 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+				};
+
 				fb {
 					pinctrl_fb: fb-0 {
 						atmel,pins =
@@ -507,28 +533,21 @@
 				};
 
 				nand {
-					pinctrl_nand: nand-0 {
+					pinctrl_nand_rb: nand-rb-0 {
 						atmel,pins =
-							<AT91_PIOD 17 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>,
+							<AT91_PIOD 17 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
+					};
+
+					pinctrl_nand_cs: nand-cs-0 {
+						atmel,pins =
 							<AT91_PIOB 6 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
 					};
 
-					pinctrl_nand0_ale_cle: nand_ale_cle-0 {
-						atmel,pins =
-							<AT91_PIOB 2 AT91_PERIPH_A AT91_PINCTRL_NONE>,
-							<AT91_PIOB 3 AT91_PERIPH_A AT91_PINCTRL_NONE>;
-					};
-
-					pinctrl_nand0_oe_we: nand_oe_we-0 {
+					pinctrl_nand_oe_we: nand-oe-we-0 {
 						atmel,pins =
 							<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE>,
 							<AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
-
-					pinctrl_nand0_cs: nand_cs-0 {
-						atmel,pins =
-							<AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE>;
-					};
 				};
 
 				pwm0 {
diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts
index 2e567d9..9047c16 100644
--- a/arch/arm/boot/dts/at91sam9rlek.dts
+++ b/arch/arm/boot/dts/at91sam9rlek.dts
@@ -63,40 +63,63 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt = <1>;
+		ebi: ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_addr_nand>;
+			pinctrl-names = "default";
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x40000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_oe_we
+					     &pinctrl_nand_cs
+					     &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			bootloader@40000 {
-				label = "bootloader";
-				reg = <0x40000 0x80000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioD 17 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioB 6 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bootloaderenv@c0000 {
-				label = "bootloader env";
-				reg = <0xc0000 0xc0000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			dtb@180000 {
-				label = "device tree";
-				reg = <0x180000 0x80000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x40000>;
+						};
 
-			kernel@200000 {
-				label = "kernel";
-				reg = <0x200000 0x600000>;
-			};
+						bootloader@40000 {
+							label = "bootloader";
+							reg = <0x40000 0x80000>;
+						};
 
-			rootfs@800000 {
-				label = "rootfs";
-				reg = <0x800000 0x0f800000>;
+						bootloaderenv@c0000 {
+							label = "bootloader env";
+							reg = <0xc0000 0xc0000>;
+						};
+
+						dtb@180000 {
+							label = "device tree";
+							reg = <0x180000 0x80000>;
+						};
+
+						kernel@200000 {
+							label = "kernel";
+							reg = <0x200000 0x600000>;
+						};
+
+						rootfs@800000 {
+							label = "rootfs";
+							reg = <0x800000 0x0f800000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index f66bae9..57f3075 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -97,6 +97,17 @@
 				atmel,external-irqs = <31>;
 			};
 
+			matrix: matrix@ffffde00 {
+				compatible = "atmel,at91sam9x5-matrix", "syscon";
+				reg = <0xffffde00 0x100>;
+			};
+
+			pmecc: ecc-engine@ffffe000 {
+				compatible = "atmel,at91sam9g45-pmecc";
+				reg = <0xffffe000 0x600>,
+				      <0xffffe600 0x200>;
+			};
+
 			ramc0: ramc@ffffe800 {
 				compatible = "atmel,at91sam9g45-ddramc";
 				reg = <0xffffe800 0x200>;
@@ -104,6 +115,11 @@
 				clock-names = "ddrck";
 			};
 
+			smc: smc@ffffea00 {
+				compatible = "atmel,at91sam9260-smc", "syscon";
+				reg = <0xffffea00 0x200>;
+			};
+
 			pmc: pmc@fffffc00 {
 				compatible = "atmel,at91sam9x5-pmc", "syscon";
 				reg = <0xfffffc00 0x200>;
@@ -465,6 +481,38 @@
 					};
 				};
 
+				ebi {
+					pinctrl_ebi_data_0_7: ebi-data-lsb-0 {
+						atmel,pins =
+							<AT91_PIOD 6 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 7 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 8 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 9 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 10 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 11 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 12 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 13 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_data_8_15: ebi-data-msb-0 {
+						atmel,pins =
+							<AT91_PIOD 14 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 15 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 16 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 17 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 18 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 19 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 20 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 21 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_addr_nand: ebi-addr-0 {
+						atmel,pins =
+							<AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 3 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+				};
+
 				usart0 {
 					pinctrl_usart0: usart0-0 {
 						atmel,pins =
@@ -551,34 +599,20 @@
 				};
 
 				nand {
-					pinctrl_nand: nand-0 {
+					pinctrl_nand_oe_we: nand-oe-we-0 {
 						atmel,pins =
-							<AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD0 periph A Read Enable */
-							 AT91_PIOD 1 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD1 periph A Write Enable */
-							 AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD2 periph A Address Latch Enable */
-							 AT91_PIOD 3 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD3 periph A Command Latch Enable */
-							 AT91_PIOD 4 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP	/* PD4 gpio Chip Enable pin pull_up */
-							 AT91_PIOD 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP	/* PD5 gpio RDY/BUSY pin pull_up */
-							 AT91_PIOD 6 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD6 periph A Data bit 0 */
-							 AT91_PIOD 7 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD7 periph A Data bit 1 */
-							 AT91_PIOD 8 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD8 periph A Data bit 2 */
-							 AT91_PIOD 9 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD9 periph A Data bit 3 */
-							 AT91_PIOD 10 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD10 periph A Data bit 4 */
-							 AT91_PIOD 11 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD11 periph A Data bit 5 */
-							 AT91_PIOD 12 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD12 periph A Data bit 6 */
-							 AT91_PIOD 13 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* PD13 periph A Data bit 7 */
+							<AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOD 1 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 
-					pinctrl_nand_16bits: nand_16bits-0 {
+					pinctrl_nand_rb: nand-rb-0 {
 						atmel,pins =
-							<AT91_PIOD 14 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD14 periph A Data bit 8 */
-							 AT91_PIOD 15 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD15 periph A Data bit 9 */
-							 AT91_PIOD 16 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD16 periph A Data bit 10 */
-							 AT91_PIOD 17 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD17 periph A Data bit 11 */
-							 AT91_PIOD 18 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD18 periph A Data bit 12 */
-							 AT91_PIOD 19 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD19 periph A Data bit 13 */
-							 AT91_PIOD 20 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PD20 periph A Data bit 14 */
-							 AT91_PIOD 21 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* PD21 periph A Data bit 15 */
+							<AT91_PIOD 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
+					};
+
+					pinctrl_nand_cs: nand-cs-0 {
+						atmel,pins =
+							<AT91_PIOD 4 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
 					};
 				};
 
@@ -1197,28 +1231,6 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			compatible = "atmel,at91rm9200-nand";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x40000000 0x10000000
-			       0xffffe000 0x600		/* PMECC Registers */
-			       0xffffe600 0x200		/* PMECC Error Location Registers */
-			       0x00108000 0x18000	/* PMECC looup table in ROM code  */
-			      >;
-			atmel,pmecc-lookup-table-offset = <0x0 0x8000>;
-			atmel,nand-addr-offset = <21>;
-			atmel,nand-cmd-offset = <22>;
-			atmel,nand-has-dma;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_nand>;
-			gpios = <&pioD 5 GPIO_ACTIVE_HIGH
-				 &pioD 4 GPIO_ACTIVE_HIGH
-				 0
-				>;
-			status = "disabled";
-		};
-
 		usb0: ohci@00600000 {
 			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
 			reg = <0x00600000 0x100000>;
@@ -1236,6 +1248,32 @@
 			clock-names = "usb_clk", "ehci_clk";
 			status = "disabled";
 		};
+
+		ebi: ebi@10000000 {
+			compatible = "atmel,at91sam9x5-ebi";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			atmel,smc = <&smc>;
+			atmel,matrix = <&matrix>;
+			reg = <0x10000000 0x60000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x20000000 0x10000000
+				  0x2 0x0 0x30000000 0x10000000
+				  0x3 0x0 0x40000000 0x10000000
+				  0x4 0x0 0x50000000 0x10000000
+				  0x5 0x0 0x60000000 0x10000000>;
+			clocks = <&mck>;
+			status = "disabled";
+
+			nand_controller: nand-controller {
+				compatible = "atmel,at91sam9g45-nand-controller";
+				ecc-engine = <&pmecc>;
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
+		};
 	};
 
 	i2c-gpio-0 {
diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi
index b098ad8..bdeaa0b 100644
--- a/arch/arm/boot/dts/at91sam9x5cm.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi
@@ -37,38 +37,61 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			atmel,has-pmecc;	/* Enable PMECC */
-			atmel,pmecc-cap = <2>;
-			atmel,pmecc-sector-size = <512>;
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_addr_nand
+				     &pinctrl_ebi_data_0_7>;
+			pinctrl-names = "default";
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x40000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_oe_we
+					     &pinctrl_nand_cs
+					     &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			uboot@40000 {
-				label = "u-boot";
-				reg = <0x40000 0x80000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioD 5 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioD 4 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "hw";
+					nand-ecc-strength = <2>;
+					nand-ecc-step-size = <512>;
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			ubootenv@c0000 {
-				label = "U-Boot Env";
-				reg = <0xc0000 0x140000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			kernel@200000 {
-				label = "kernel";
-				reg = <0x200000 0x600000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x40000>;
+						};
 
-			rootfs@800000 {
-				label = "rootfs";
-				reg = <0x800000 0x1f800000>;
+						uboot@40000 {
+							label = "u-boot";
+							reg = <0x40000 0x80000>;
+						};
+
+						ubootenv@c0000 {
+							label = "U-Boot Env";
+							reg = <0xc0000 0x140000>;
+						};
+
+						kernel@200000 {
+							label = "kernel";
+							reg = <0x200000 0x600000>;
+						};
+
+						rootfs@800000 {
+							label = "rootfs";
+							reg = <0x800000 0x1f800000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/axp209.dtsi b/arch/arm/boot/dts/axp209.dtsi
index 9677dd5..3c8fa26 100644
--- a/arch/arm/boot/dts/axp209.dtsi
+++ b/arch/arm/boot/dts/axp209.dtsi
@@ -64,6 +64,11 @@
 		#gpio-cells = <2>;
 	};
 
+	battery_power_supply: battery-power-supply {
+		compatible = "x-powers,axp209-battery-power-supply";
+		status = "disabled";
+	};
+
 	regulators {
 		/* Default work frequency for buck regulators */
 		x-powers,dcdc-freq = <1500>;
diff --git a/arch/arm/boot/dts/axp22x.dtsi b/arch/arm/boot/dts/axp22x.dtsi
index 67331c5..87fb08e 100644
--- a/arch/arm/boot/dts/axp22x.dtsi
+++ b/arch/arm/boot/dts/axp22x.dtsi
@@ -57,6 +57,11 @@
 		status = "disabled";
 	};
 
+	battery_power_supply: battery-power-supply {
+		compatible = "x-powers,axp221-battery-power-supply";
+		status = "disabled";
+	};
+
 	regulators {
 		/* Default work frequency for buck regulators */
 		x-powers,dcdc-freq = <3000>;
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 9644fdd..bf8c838 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -142,6 +142,55 @@
 			interrupts = <0>;
 		};
 
+		mdio: mdio@18002000 {
+			compatible = "brcm,iproc-mdio";
+			reg = <0x18002000 0x8>;
+			#size-cells = <1>;
+			#address-cells = <0>;
+			status = "disabled";
+
+			gphy0: ethernet-phy@0 {
+				reg = <0>;
+			};
+
+			gphy1: ethernet-phy@1 {
+				reg = <1>;
+			};
+		};
+
+		switch: switch@18007000 {
+			compatible = "brcm,bcm11360-srab", "brcm,cygnus-srab";
+			reg = <0x18007000 0x1000>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					phy-handle = <&gphy0>;
+					phy-mode = "rgmii";
+				};
+
+				port@1 {
+					reg = <1>;
+					phy-handle = <&gphy1>;
+					phy-mode = "rgmii";
+				};
+
+				port@8 {
+					reg = <8>;
+					label = "cpu";
+					ethernet = <&eth0>;
+					fixed-link {
+						speed = <1000>;
+						full-duplex;
+					};
+				};
+			};
+		};
+
 		i2c0: i2c@18008000 {
 			compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
 			reg = <0x18008000 0x100>;
@@ -295,6 +344,15 @@
 			status = "disabled";
 		};
 
+		eth0: ethernet@18042000 {
+			compatible = "brcm,amac";
+			reg = <0x18042000 0x1000>,
+			      <0x18110000 0x1000>;
+			reg-names = "amac_base", "idm_base";
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
 		nand: nand@18046000 {
 			compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1";
 			reg = <0x18046000 0x600>, <0xf8105408 0x600>,
@@ -386,6 +444,19 @@
 			status = "disabled";
 		};
 
+		v3d: v3d@180a2000 {
+			compatible = "brcm,cygnus-v3d";
+			reg = <0x180a2000 0x1000>;
+			clocks = <&mipipll BCM_CYGNUS_MIPIPLL_CH2_V3D>;
+			clock-names = "v3d_clk";
+			interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		vc4: gpu {
+			compatible = "brcm,cygnus-vc4";
+		};
+
 		adc: adc@180a6000 {
 			compatible = "brcm,iproc-static-adc";
 			#io-channel-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index fe6cba9..7204d1d 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -413,6 +413,12 @@
 			      <0x3f408 0x04>;
 		};
 
+		thermal: thermal@3f2c0 {
+			compatible = "brcm,ns-thermal";
+			reg = <0x3f2c0 0x10>;
+			#thermal-sensor-cells = <0>;
+		};
+
 		sata_phy: sata_phy@40100 {
 			compatible = "brcm,iproc-nsp-sata-phy";
 			reg = <0x40100 0x340>;
@@ -563,4 +569,24 @@
 			brcm,pcie-msi-inten;
 		};
 	};
+
+	thermal-zones {
+		cpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <1000>;
+			coefficients = <(-556) 418000>;
+			thermal-sensors = <&thermal>;
+
+			trips {
+				cpu-crit {
+					temperature     = <125000>;
+					hysteresis      = <0>;
+					type            = "critical";
+				};
+			};
+
+			cooling-maps {
+			};
+		};
+	};
 };
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero.dts b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
index cc8b832..79a20d5 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
@@ -12,7 +12,7 @@
 /dts-v1/;
 #include "bcm2835.dtsi"
 #include "bcm2835-rpi.dtsi"
-#include "bcm283x-rpi-usb-host.dtsi"
+#include "bcm283x-rpi-usb-otg.dtsi"
 
 / {
 	compatible = "raspberrypi,model-zero", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index a7b5ce1..e55b362 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -65,13 +65,13 @@
 &sdhci {
 	pinctrl-names = "default";
 	pinctrl-0 = <&emmc_gpio48>;
-	status = "okay";
 	bus-width = <4>;
 };
 
 &sdhost {
 	pinctrl-names = "default";
 	pinctrl-0 = <&sdhost_gpio48>;
+	status = "okay";
 	bus-width = <4>;
 };
 
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 0890d97..659b6e9 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -24,6 +24,10 @@
 	};
 };
 
+&cpu_thermal {
+	coefficients = <(-538)	407000>;
+};
+
 /* enable thermal sensor with the correct compatible property set */
 &thermal {
 	compatible = "brcm,bcm2835-thermal";
diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi
index 519a44f..da3deeb 100644
--- a/arch/arm/boot/dts/bcm2836.dtsi
+++ b/arch/arm/boot/dts/bcm2836.dtsi
@@ -77,6 +77,10 @@
 	interrupts = <8>;
 };
 
+&cpu_thermal {
+	coefficients = <(-538)	407000>;
+};
+
 /* enable thermal sensor with the correct compatible property set */
 &thermal {
 	compatible = "brcm,bcm2836-thermal";
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
new file mode 100644
index 0000000..c72a27d
--- /dev/null
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
@@ -0,0 +1 @@
+#include "arm64/broadcom/bcm2837-rpi-3-b.dts"
diff --git a/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi b/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi
new file mode 100644
index 0000000..e7d217c
--- /dev/null
+++ b/arch/arm/boot/dts/bcm283x-rpi-usb-otg.dtsi
@@ -0,0 +1,10 @@
+&usb {
+	dr_mode = "otg";
+	g-rx-fifo-size = <256>;
+	g-np-tx-fifo-size = <32>;
+	/*
+	 * According to dwc2 the sum of all device EP
+	 * fifo sizes shouldn't exceed 3776 bytes.
+	 */
+	g-tx-fifo-size = <256 256 512 512 512 768 768>;
+};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 561f27d..431dcfc 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -3,6 +3,11 @@
 #include <dt-bindings/clock/bcm2835-aux.h>
 #include <dt-bindings/gpio/gpio.h>
 
+/* firmware-provided startup stubs live here, where the secondary CPUs are
+ * spinning.
+ */
+/memreserve/ 0x00000000 0x00001000;
+
 /* This include file covers the common peripherals and configuration between
  * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
  * bcm2835.dtsi and bcm2836.dtsi.
@@ -19,6 +24,26 @@
 		bootargs = "earlyprintk console=ttyAMA0";
 	};
 
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <1000>;
+
+			thermal-sensors = <&thermal>;
+
+			trips {
+				cpu-crit {
+					temperature	= <80000>;
+					hysteresis	= <0>;
+					type		= "critical";
+				};
+			};
+
+			cooling-maps {
+			};
+		};
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <1>;
@@ -430,6 +455,7 @@
 			compatible = "brcm,bcm2835-thermal";
 			reg = <0x7e212000 0x8>;
 			clocks = <&clocks BCM2835_CLOCK_TSENS>;
+			#thermal-sensor-cells = <0>;
 			status = "disabled";
 		};
 
@@ -568,6 +594,8 @@
 			#size-cells = <0>;
 			clocks = <&clk_usb>;
 			clock-names = "otg";
+			phys = <&usbphy>;
+			phy-names = "usb2-phy";
 		};
 
 		v3d: v3d@7ec00000 {
@@ -603,4 +631,8 @@
 			clock-frequency = <480000000>;
 		};
 	};
+
+	usbphy: phy {
+		compatible = "usb-nop-xceiv";
+	};
 };
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index acee36a..98647d2 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -320,6 +320,14 @@
 		};
 	};
 
+	mdio: mdio@18003000 {
+		compatible = "brcm,iproc-mdio";
+		reg = <0x18003000 0x8>;
+		#size-cells = <1>;
+		#address-cells = <0>;
+		status = "disabled";
+	};
+
 	i2c0: i2c@18009000 {
 		compatible = "brcm,iproc-i2c";
 		reg = <0x18009000 0x50>;
@@ -349,6 +357,12 @@
 				     "sata2";
 	};
 
+	thermal: thermal@1800c2c0 {
+		compatible = "brcm,ns-thermal";
+		reg = <0x1800c2c0 0x10>;
+		#thermal-sensor-cells = <0>;
+	};
+
 	srab: srab@18007000 {
 		compatible = "brcm,bcm5301x-srab";
 		reg = <0x18007000 0x1000>;
@@ -412,4 +426,24 @@
 			status = "disabled";
 		};
 	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			polling-delay-passive = <0>;
+			polling-delay = <1000>;
+			coefficients = <(-556) 418000>;
+			thermal-sensors = <&thermal>;
+
+			trips {
+				cpu-crit {
+					temperature	= <125000>;
+					hysteresis	= <0>;
+					type		= "critical";
+				};
+			};
+
+			cooling-maps {
+			};
+		};
+	};
 };
diff --git a/arch/arm/boot/dts/bcm911360_entphn.dts b/arch/arm/boot/dts/bcm911360_entphn.dts
index 8b3800f..000f5f1 100644
--- a/arch/arm/boot/dts/bcm911360_entphn.dts
+++ b/arch/arm/boot/dts/bcm911360_entphn.dts
@@ -57,6 +57,26 @@
 	};
 };
 
+&eth0 {
+	status = "okay";
+};
+
+&mdio {
+	status = "okay";
+};
+
+&switch {
+	status = "okay";
+};
+
+&v3d {
+	assigned-clocks =
+		<&mipipll BCM_CYGNUS_MIPIPLL>,
+		<&mipipll BCM_CYGNUS_MIPIPLL_CH2_V3D>;
+	assigned-clock-rates = <525000000>, <300000000>;
+	status = "okay";
+};
+
 &uart3 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 8d244cd..a423e8e 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -82,6 +82,8 @@
 			tca6416: gpio@20 {
 				compatible = "ti,tca6416";
 				reg = <0x20>;
+				gpio-controller;
+				#gpio-cells = <2>;
 			};
 		};
 		wdt: wdt@21000 {
diff --git a/arch/arm/boot/dts/da850-lego-ev3.dts b/arch/arm/boot/dts/da850-lego-ev3.dts
index 512604a..45983c0 100644
--- a/arch/arm/boot/dts/da850-lego-ev3.dts
+++ b/arch/arm/boot/dts/da850-lego-ev3.dts
@@ -172,6 +172,18 @@
 		gpio = <&gpio 111 GPIO_ACTIVE_HIGH>;
 		enable-active-high;
 	};
+
+	/*
+	 * The EV3 can use 6-AA batteries or a rechargeable Li-ion battery pack.
+	 */
+	battery {
+		pinctrl-names = "default";
+		pintctrl-0 = <&battery_pins>;
+		compatible = "lego,ev3-battery";
+		io-channels = <&adc 4>, <&adc 3>;
+		io-channel-names = "voltage", "current";
+		rechargeable-gpios = <&gpio 136 GPIO_ACTIVE_LOW>;
+	};
 };
 
 &pmx_core {
@@ -228,6 +240,15 @@
 			0x34 0x00000008 0x0000000f
 		>;
 	};
+
+	battery_pins: pinmux_battery_pins {
+		pinctrl-single,bits = <
+			/* GP0[6] */
+			0x04 0x00000080 0x000000f0
+			/* GP8[8] */
+			0x4c 0x00000080 0x000000f0
+		>;
+	};
 };
 
 &pinconf {
@@ -342,6 +363,13 @@
 
 &gpio {
 	status = "okay";
+
+	/* Don't pull down battery voltage adc io channel */
+	batt_volt_en {
+		gpio-hog;
+		gpios = <6 GPIO_ACTIVE_HIGH>;
+		output-low;
+	};
 };
 
 &usb_phy {
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 941d455..af68ef7 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -446,13 +446,38 @@
 		};
 		usb0: usb@200000 {
 			compatible = "ti,da830-musb";
-			reg = <0x200000 0x10000>;
+			reg = <0x200000 0x1000>;
+			ranges;
 			interrupts = <58>;
 			interrupt-names = "mc";
 			dr_mode = "otg";
 			phys = <&usb_phy 0>;
 			phy-names = "usb-phy";
 			status = "disabled";
+
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			dmas = <&cppi41dma 0 0 &cppi41dma 1 0
+				&cppi41dma 2 0 &cppi41dma 3 0
+				&cppi41dma 0 1 &cppi41dma 1 1
+				&cppi41dma 2 1 &cppi41dma 3 1>;
+			dma-names =
+				"rx1", "rx2", "rx3", "rx4",
+				"tx1", "tx2", "tx3", "tx4";
+
+			cppi41dma: dma-controller@201000 {
+				compatible = "ti,da830-cppi41";
+				reg =  <0x201000 0x1000
+					0x202000 0x1000
+					0x204000 0x4000>;
+				reg-names = "controller",
+					    "scheduler", "queuemgr";
+				interrupts = <58>;
+				#dma-cells = <2>;
+				#dma-channels = <4>;
+				status = "okay";
+			};
 		};
 		sata: sata@218000 {
 			compatible = "ti,da850-ahci";
@@ -503,6 +528,8 @@
 			ti,ngpio = <144>;
 			ti,davinci-gpio-unbanked = <0>;
 			status = "disabled";
+			interrupt-controller;
+			#interrupt-cells = <2>;
 		};
 		pinconf: pin-controller@22c00c {
 			compatible = "ti,da850-pupd";
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 31a9e06..f47fc4d 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -529,7 +529,8 @@
 };
 
 &usb1 {
-	dr_mode = "peripheral";
+	dr_mode = "otg";
+	extcon = <&extcon_usb1>;
 };
 
 &usb2 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index e714466..0f0f6f5 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -99,14 +99,14 @@
 		compatible = "operating-points-v2-ti-cpu";
 		syscon = <&scm_wkup>;
 
-		opp_nom@1000000000 {
+		opp_nom-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			opp-microvolt = <1060000 850000 1150000>;
 			opp-supported-hw = <0xFF 0x01>;
 			opp-suspend;
 		};
 
-		opp_od@1176000000 {
+		opp_od-1176000000 {
 			opp-hz = /bits/ 64 <1176000000>;
 			opp-microvolt = <1160000 885000 1160000>;
 			opp-supported-hw = <0xFF 0x02>;
diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi
index ad24544..8578054 100644
--- a/arch/arm/boot/dts/dra72-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-common.dtsi
@@ -392,7 +392,8 @@
 };
 
 &usb1 {
-	dr_mode = "peripheral";
+	dr_mode = "otg";
+	extcon = <&extcon_usb1>;
 };
 
 &usb2 {
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 3330738e..cf229df 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -338,6 +338,8 @@
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>;
 		reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>;
+		assigned-clocks = <&dpll_dsp_ck>;
+		assigned-clock-rates = <600000000>;
 	};
 
 	dpll_dsp_m2_ck: dpll_dsp_m2_ck@244 {
@@ -349,6 +351,8 @@
 		reg = <0x0244>;
 		ti,index-starts-at-one;
 		ti,invert-autoidle-bit;
+		assigned-clocks = <&dpll_dsp_m2_ck>;
+		assigned-clock-rates = <600000000>;
 	};
 
 	iva_dpll_hs_clk_div: iva_dpll_hs_clk_div {
@@ -372,6 +376,8 @@
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>;
 		reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
+		assigned-clocks = <&dpll_iva_ck>;
+		assigned-clock-rates = <1165000000>;
 	};
 
 	dpll_iva_m2_ck: dpll_iva_m2_ck@1b0 {
@@ -383,6 +389,8 @@
 		reg = <0x01b0>;
 		ti,index-starts-at-one;
 		ti,invert-autoidle-bit;
+		assigned-clocks = <&dpll_iva_m2_ck>;
+		assigned-clock-rates = <388333334>;
 	};
 
 	iva_dclk: iva_dclk {
@@ -406,6 +414,8 @@
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>;
 		reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>;
+		assigned-clocks = <&dpll_gpu_ck>;
+		assigned-clock-rates = <1277000000>;
 	};
 
 	dpll_gpu_m2_ck: dpll_gpu_m2_ck@2e8 {
@@ -417,6 +427,8 @@
 		reg = <0x02e8>;
 		ti,index-starts-at-one;
 		ti,invert-autoidle-bit;
+		assigned-clocks = <&dpll_gpu_m2_ck>;
+		assigned-clock-rates = <425666667>;
 	};
 
 	dpll_core_m2_ck: dpll_core_m2_ck@130 {
@@ -659,6 +671,8 @@
 		reg = <0x0248>;
 		ti,index-starts-at-one;
 		ti,invert-autoidle-bit;
+		assigned-clocks = <&dpll_dsp_m3x2_ck>;
+		assigned-clock-rates = <400000000>;
 	};
 
 	dpll_gmac_x2_ck: dpll_gmac_x2_ck {
@@ -791,6 +805,8 @@
 		clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>;
 		ti,bit-shift = <24>;
 		reg = <0x0520>;
+		assigned-clocks = <&ipu1_gfclk_mux>;
+		assigned-clock-parents = <&dpll_core_h22x2_ck>;
 	};
 
 	mcasp1_ahclkr_mux: mcasp1_ahclkr_mux@550 {
@@ -1748,6 +1764,8 @@
 		clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>, <&dpll_gpu_m2_ck>;
 		ti,bit-shift = <24>;
 		reg = <0x1220>;
+		assigned-clocks = <&gpu_core_gclk_mux>;
+		assigned-clock-parents = <&dpll_gpu_m2_ck>;
 	};
 
 	gpu_hyd_gclk_mux: gpu_hyd_gclk_mux@1220 {
@@ -1756,6 +1774,8 @@
 		clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>, <&dpll_gpu_m2_ck>;
 		ti,bit-shift = <26>;
 		reg = <0x1220>;
+		assigned-clocks = <&gpu_hyd_gclk_mux>;
+		assigned-clock-parents = <&dpll_gpu_m2_ck>;
 	};
 
 	l3instr_ts_gclk_div: l3instr_ts_gclk_div@e50 {
diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi
index 0124faf..42ea246 100644
--- a/arch/arm/boot/dts/emev2.dtsi
+++ b/arch/arm/boot/dts/emev2.dtsi
@@ -197,7 +197,7 @@
 		clock-names = "sclk";
 	};
 
-	pfc: pfc@e0140200 {
+	pfc: pin-controller@e0140200 {
 		compatible = "renesas,pfc-emev2";
 		reg = <0xe0140200 0x100>;
 	};
diff --git a/arch/arm/boot/dts/ethernut5.dts b/arch/arm/boot/dts/ethernut5.dts
index 4687229..123b203 100644
--- a/arch/arm/boot/dts/ethernut5.dts
+++ b/arch/arm/boot/dts/ethernut5.dts
@@ -55,25 +55,38 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			gpios = <0
-				 &pioC 14 GPIO_ACTIVE_HIGH
-				 0
-				>;
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs>;
+				pinctrl-names = "default";
 
-			root@0 {
-				label = "root";
-				reg = <0x0 0x08000000>;
-			};
+				nand: nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			data@20000 {
-				label = "data";
-				reg = <0x08000000 0x38000000>;
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
+
+						root@0 {
+							label = "root";
+							reg = <0x0 0x08000000>;
+						};
+
+						data@20000 {
+							label = "data";
+							reg = <0x08000000 0x38000000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
index cccfe4b..accee81 100644
--- a/arch/arm/boot/dts/exynos3250-monk.dts
+++ b/arch/arm/boot/dts/exynos3250-monk.dts
@@ -67,7 +67,7 @@
 		max77836: subpmic@25 {
 			compatible = "maxim,max77836";
 			interrupt-parent = <&gpx1>;
-			interrupts = <5 0>;
+			interrupts = <5 IRQ_TYPE_NONE>;
 			reg = <0x25>;
 			wakeup-source;
 
@@ -191,7 +191,7 @@
 	s2mps14_pmic@66 {
 		compatible = "samsung,s2mps14-pmic";
 		interrupt-parent = <&gpx0>;
-		interrupts = <7 0>;
+		interrupts = <7 IRQ_TYPE_NONE>;
 		reg = <0x66>;
 		wakeup-source;
 
@@ -414,7 +414,7 @@
 	fuelgauge@36 {
 		compatible = "maxim,max77836-battery";
 		interrupt-parent = <&gpx1>;
-		interrupts = <2 8>;
+		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
 		reg = <0x36>;
 	};
 };
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index c9f191c..443e0c9 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -58,7 +58,7 @@
 		max77836: subpmic@25 {
 			compatible = "maxim,max77836";
 			interrupt-parent = <&gpx1>;
-			interrupts = <5 0>;
+			interrupts = <5 IRQ_TYPE_NONE>;
 			reg = <0x25>;
 			wakeup-source;
 
@@ -240,7 +240,7 @@
 		reg = <0>;
 		vdd3-supply = <&ldo16_reg>;
 		vci-supply = <&ldo20_reg>;
-		reset-gpios = <&gpe0 1 GPIO_ACTIVE_HIGH>;
+		reset-gpios = <&gpe0 1 GPIO_ACTIVE_LOW>;
 		te-gpios = <&gpx0 6 GPIO_ACTIVE_HIGH>;
 		power-on-delay= <30>;
 		power-off-delay= <120>;
@@ -295,7 +295,7 @@
 	s2mps14_pmic@66 {
 		compatible = "samsung,s2mps14-pmic";
 		interrupt-parent = <&gpx0>;
-		interrupts = <7 0>;
+		interrupts = <7 IRQ_TYPE_NONE>;
 		reg = <0x66>;
 		wakeup-source;
 
@@ -626,7 +626,7 @@
 	fuelgauge@36 {
 		compatible = "maxim,max77836-battery";
 		interrupt-parent = <&gpx1>;
-		interrupts = <2 8>;
+		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
 		reg = <0x36>;
 	};
 };
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index 312650e..084fcc5 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -151,7 +151,7 @@
 		compatible = "maxim,max8997-pmic";
 		reg = <0x66>;
 		interrupt-parent = <&gpx0>;
-		interrupts = <4 0>, <3 0>;
+		interrupts = <4 IRQ_TYPE_NONE>, <3 IRQ_TYPE_NONE>;
 
 		max8997,pmic-buck1-dvs-voltage = <1350000>;
 		max8997,pmic-buck2-dvs-voltage = <1100000>;
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 1743ca8..645feff 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -281,7 +281,7 @@
 		compatible = "melfas,mms114";
 		reg = <0x48>;
 		interrupt-parent = <&gpx0>;
-		interrupts = <4 2>;
+		interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
 		x-size = <720>;
 		y-size = <1280>;
 		avdd-supply = <&tsp_reg>;
@@ -302,7 +302,7 @@
 
 		reg = <0x66>;
 		interrupt-parent = <&gpx0>;
-		interrupts = <7 0>;
+		interrupts = <7 IRQ_TYPE_NONE>;
 
 		max8997,pmic-buck1-uses-gpio-dvs;
 		max8997,pmic-buck2-uses-gpio-dvs;
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 0f1ff79..219d587 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -32,7 +32,7 @@
 
 		power_key {
 			interrupt-parent = <&gpx1>;
-			interrupts = <3 0>;
+			interrupts = <3 IRQ_TYPE_NONE>;
 			gpios = <&gpx1 3 GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
 			label = "power key";
@@ -266,7 +266,7 @@
 	max77686: pmic@09 {
 		compatible = "maxim,max77686";
 		interrupt-parent = <&gpx3>;
-		interrupts = <2 0>;
+		interrupts = <2 IRQ_TYPE_NONE>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&max77686_irq>;
 		reg = <0x09>;
@@ -484,7 +484,7 @@
 		compatible = "maxim,max98090";
 		reg = <0x10>;
 		interrupt-parent = <&gpx0>;
-		interrupts = <0 0>;
+		interrupts = <0 IRQ_TYPE_NONE>;
 		clocks = <&i2s0 CLK_I2S_CDCLK>;
 		clock-names = "mclk";
 		#sound-dai-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos4412-odroidu3.dts b/arch/arm/boot/dts/exynos4412-odroidu3.dts
index 7504a5a..44a4de0 100644
--- a/arch/arm/boot/dts/exynos4412-odroidu3.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidu3.dts
@@ -78,6 +78,10 @@
 	regulator-max-microvolt = <3300000>;
 };
 
+&hdmicec {
+	needs-hpd;
+};
+
 /* VDDQ for MSHC (eMMC card) */
 &ldo22_reg {
 	regulator-name = "LDO22_VDDQ_MMC4_2.8V";
diff --git a/arch/arm/boot/dts/exynos4412-odroidx.dts b/arch/arm/boot/dts/exynos4412-odroidx.dts
index 46b931e..9788226 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx.dts
@@ -44,7 +44,7 @@
 
 		home_key {
 			interrupt-parent = <&gpx2>;
-			interrupts = <2 0>;
+			interrupts = <2 IRQ_TYPE_NONE>;
 			gpios = <&gpx2 2 GPIO_ACTIVE_HIGH>;
 			linux,code = <KEY_HOME>;
 			label = "home key";
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 82221a0..35e9b94 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -144,7 +144,7 @@
 		max77693@66 {
 			compatible = "maxim,max77693";
 			interrupt-parent = <&gpx1>;
-			interrupts = <5 2>;
+			interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
 			reg = <0x66>;
 
 			regulators {
@@ -224,7 +224,7 @@
 			compatible = "capella,cm36651";
 			reg = <0x18>;
 			interrupt-parent = <&gpx0>;
-			interrupts = <2 2>;
+			interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
 			vled-supply = <&ps_als_reg>;
 		};
 	};
@@ -573,7 +573,7 @@
 		compatible = "melfas,mms114";
 		reg = <0x48>;
 		interrupt-parent = <&gpm2>;
-		interrupts = <3 2>;
+		interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
 		x-size = <720>;
 		y-size = <1280>;
 		avdd-supply = <&ldo23_reg>;
@@ -611,7 +611,7 @@
 	max77686: max77686_pmic@09 {
 		compatible = "maxim,max77686";
 		interrupt-parent = <&gpx0>;
-		interrupts = <7 0>;
+		interrupts = <7 IRQ_TYPE_NONE>;
 		reg = <0x09>;
 		#clock-cells = <1>;
 
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index b74c537..66d2252 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -55,38 +55,38 @@
 			interrupt-controller;
 			samsung,combiner-nr = <32>;
 			reg = <0x10440000 0x1000>;
-			interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 1 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 2 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 3 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 4 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 5 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 6 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 7 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 8 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 9 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 10 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 11 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 12 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 13 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 14 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 15 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 16 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 17 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 18 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 19 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 20 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 21 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 22 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 23 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 24 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 25 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 26 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 27 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 28 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 29 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 30 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 31 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		gic: interrupt-controller@10481000 {
@@ -109,31 +109,31 @@
 		serial_0: serial@12C00000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C00000 0x100>;
-			interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		serial_1: serial@12C10000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C10000 0x100>;
-			interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		serial_2: serial@12C20000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C20000 0x100>;
-			interrupts = <0 53 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		serial_3: serial@12C30000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C30000 0x100>;
-			interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		i2c_0: i2c@12C60000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12C60000 0x100>;
-			interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			samsung,sysreg-phandle = <&sysreg_system_controller>;
@@ -143,7 +143,7 @@
 		i2c_1: i2c@12C70000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12C70000 0x100>;
-			interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			samsung,sysreg-phandle = <&sysreg_system_controller>;
@@ -153,7 +153,7 @@
 		i2c_2: i2c@12C80000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12C80000 0x100>;
-			interrupts = <0 58 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			samsung,sysreg-phandle = <&sysreg_system_controller>;
@@ -163,7 +163,7 @@
 		i2c_3: i2c@12C90000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12C90000 0x100>;
-			interrupts = <0 59 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			samsung,sysreg-phandle = <&sysreg_system_controller>;
@@ -180,8 +180,8 @@
 		rtc: rtc@101E0000 {
 			compatible = "samsung,s3c6410-rtc";
 			reg = <0x101E0000 0x100>;
-			interrupts = <0 43 IRQ_TYPE_LEVEL_HIGH>,
-				     <0 44 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 6098dacd..6a43246 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -14,7 +14,6 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/input/input.h>
 #include "exynos5250.dtsi"
-#include "exynos-mfc-reserved-memory.dtsi"
 
 / {
 	model = "Insignal Arndale evaluation board based on EXYNOS5250";
diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
index 2f6ab32..1fd122d 100644
--- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
@@ -589,6 +589,13 @@
 		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
 		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
 	};
+
+	hdmi_cec: hdmi-cec {
+		samsung,pins = "gpx3-6";
+		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+	};
 };
 
 &pinctrl_1 {
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index a97a785..6632f65 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -13,7 +13,6 @@
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include "exynos5250.dtsi"
-#include "exynos-mfc-reserved-memory.dtsi"
 
 / {
 	model = "SAMSUNG SMDK5250 board based on EXYNOS5250";
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index 8f3a804..e1d293db 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -272,6 +272,10 @@
 	vdd_pll-supply = <&ldo8_reg>;
 };
 
+&hdmicec {
+	status = "okay";
+};
+
 &i2c_0 {
 	status = "okay";
 	samsung,i2c-sda-delay = <100>;
diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
index 4d7bdb7..95c3bca 100644
--- a/arch/arm/boot/dts/exynos5250-spring.dts
+++ b/arch/arm/boot/dts/exynos5250-spring.dts
@@ -14,7 +14,6 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/input/input.h>
 #include "exynos5250.dtsi"
-#include "exynos-mfc-reserved-memory.dtsi"
 
 / {
 	model = "Google Spring";
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 79c9c88..8dbeb87 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -153,10 +153,10 @@
 
 		timer {
 			compatible = "arm,armv7-timer";
-			interrupts = <1 13 0xf08>,
-				     <1 14 0xf08>,
-				     <1 11 0xf08>,
-				     <1 10 0xf08>;
+			interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+				     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+				     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+				     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
 			/*
 			 * Unfortunately we need this since some versions
 			 * of U-Boot on Exynos don't set the CNTFRQ register,
@@ -689,6 +689,19 @@
 			samsung,syscon-phandle = <&pmu_system_controller>;
 		};
 
+		hdmicec: cec@101B0000 {
+			compatible = "samsung,s5p-cec";
+			reg = <0x101B0000 0x200>;
+			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clock CLK_HDMI_CEC>;
+			clock-names = "hdmicec";
+			samsung,syscon-phandle = <&pmu_system_controller>;
+			hdmi-phandle = <&hdmi>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hdmi_cec>;
+			status = "disabled";
+		};
+
 		mixer@14450000 {
 			compatible = "samsung,exynos5250-mixer";
 			reg = <0x14450000 0x10000>;
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 9cc83c5..ee1bb9b 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -16,7 +16,6 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/clock/samsung,s2mps11.h>
-#include "exynos-mfc-reserved-memory.dtsi"
 
 / {
 	model = "Insignal Arndale Octa evaluation board based on EXYNOS5420";
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index 1f964ec..f9a75bf 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -16,7 +16,6 @@
 #include <dt-bindings/regulator/maxim,max77802.h>
 #include "exynos5420.dtsi"
 #include "exynos5420-cpus.dtsi"
-#include "exynos-mfc-reserved-memory.dtsi"
 
 / {
 	model = "Google Peach Pit Rev 6+";
@@ -604,7 +603,7 @@
 	max98090: codec@10 {
 		compatible = "maxim,max98090";
 		reg = <0x10>;
-		interrupts = <2 0>;
+		interrupts = <2 IRQ_TYPE_NONE>;
 		interrupt-parent = <&gpx0>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&max98090_irq>;
@@ -945,7 +944,7 @@
 	cros_ec: cros-ec@0 {
 		compatible = "google,cros-ec-spi";
 		interrupt-parent = <&gpx1>;
-		interrupts = <5 0>;
+		interrupts = <5 IRQ_TYPE_NONE>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&ec_spi_cs &ec_irq>;
 		reg = <0>;
diff --git a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
index 3924b4f..65aa0e3 100644
--- a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
@@ -67,6 +67,13 @@
 		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
 		samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
 	};
+
+	hdmi_cec: hdmi-cec {
+		samsung,pins = "gpx3-6";
+		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+		samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
+	};
 };
 
 &pinctrl_1 {
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index aaccd0d..08c8ab17 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -13,7 +13,6 @@
 #include "exynos5420.dtsi"
 #include "exynos5420-cpus.dtsi"
 #include <dt-bindings/gpio/gpio.h>
-#include "exynos-mfc-reserved-memory.dtsi"
 
 / {
 	model = "Samsung SMDK5420 board based on EXYNOS5420";
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 0db0bcf..02d2f89 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -193,7 +193,7 @@
 		mfc: codec@11000000 {
 			compatible = "samsung,mfc-v7";
 			reg = <0x11000000 0x10000>;
-			interrupts = <0 96 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_MFC>;
 			clock-names = "mfc";
 			power-domains = <&mfc_pd>;
@@ -203,7 +203,7 @@
 
 		mmc_0: mmc@12200000 {
 			compatible = "samsung,exynos5420-dw-mshc-smu";
-			interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x12200000 0x2000>;
@@ -215,7 +215,7 @@
 
 		mmc_1: mmc@12210000 {
 			compatible = "samsung,exynos5420-dw-mshc-smu";
-			interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x12210000 0x2000>;
@@ -227,7 +227,7 @@
 
 		mmc_2: mmc@12220000 {
 			compatible = "samsung,exynos5420-dw-mshc";
-			interrupts = <0 77 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x12220000 0x1000>;
@@ -325,37 +325,37 @@
 		pinctrl_0: pinctrl@13400000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x13400000 0x1000>;
-			interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
 
 			wakeup-interrupt-controller {
 				compatible = "samsung,exynos4210-wakeup-eint";
 				interrupt-parent = <&gic>;
-				interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
 		pinctrl_1: pinctrl@13410000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x13410000 0x1000>;
-			interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_2: pinctrl@14000000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x14000000 0x1000>;
-			interrupts = <0 46 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_3: pinctrl@14010000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x14010000 0x1000>;
-			interrupts = <0 50 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_4: pinctrl@03860000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x03860000 0x1000>;
-			interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		amba {
@@ -368,7 +368,7 @@
 			adma: adma@03880000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x03880000 0x1000>;
-				interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock_audss EXYNOS_ADMA>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -379,7 +379,7 @@
 			pdma0: pdma@121A0000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x121A0000 0x1000>;
-				interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_PDMA0>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -390,7 +390,7 @@
 			pdma1: pdma@121B0000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x121B0000 0x1000>;
-				interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_PDMA1>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -401,7 +401,7 @@
 			mdma0: mdma@10800000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x10800000 0x1000>;
-				interrupts = <0 33 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_MDMA0>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -412,7 +412,7 @@
 			mdma1: mdma@11C10000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x11C10000 0x1000>;
-				interrupts = <0 124 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_MDMA1>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -484,7 +484,7 @@
 		spi_0: spi@12d20000 {
 			compatible = "samsung,exynos4210-spi";
 			reg = <0x12d20000 0x100>;
-			interrupts = <0 68 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma0 5
 				&pdma0 4>;
 			dma-names = "tx", "rx";
@@ -500,7 +500,7 @@
 		spi_1: spi@12d30000 {
 			compatible = "samsung,exynos4210-spi";
 			reg = <0x12d30000 0x100>;
-			interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma1 5
 				&pdma1 4>;
 			dma-names = "tx", "rx";
@@ -516,7 +516,7 @@
 		spi_2: spi@12d40000 {
 			compatible = "samsung,exynos4210-spi";
 			reg = <0x12d40000 0x100>;
-			interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma0 7
 				&pdma0 6>;
 			dma-names = "tx", "rx";
@@ -544,7 +544,7 @@
 		dsi@14500000 {
 			compatible = "samsung,exynos5410-mipi-dsi";
 			reg = <0x14500000 0x10000>;
-			interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
 			phys = <&mipi_phy 1>;
 			phy-names = "dsim";
 			clocks = <&clock CLK_DSIM1>, <&clock CLK_SCLK_MIPI1>;
@@ -557,7 +557,7 @@
 		adc: adc@12D10000 {
 			compatible = "samsung,exynos-adc-v2";
 			reg = <0x12D10000 0x100>;
-			interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TSADC>;
 			clock-names = "adc";
 			#io-channel-cells = <1>;
@@ -569,7 +569,7 @@
 		hsi2c_8: i2c@12E00000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12E00000 0x1000>;
-			interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -582,7 +582,7 @@
 		hsi2c_9: i2c@12E10000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12E10000 0x1000>;
-			interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -595,7 +595,7 @@
 		hsi2c_10: i2c@12E20000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12E20000 0x1000>;
-			interrupts = <0 203 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -608,7 +608,7 @@
 		hdmi: hdmi@14530000 {
 			compatible = "samsung,exynos5420-hdmi";
 			reg = <0x14530000 0x70000>;
-			interrupts = <0 95 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
 				 <&clock CLK_DOUT_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
 				 <&clock CLK_MOUT_HDMI>;
@@ -624,10 +624,23 @@
 			reg = <0x145D0000 0x20>;
 		};
 
+		hdmicec: cec@101B0000 {
+			compatible = "samsung,s5p-cec";
+			reg = <0x101B0000 0x200>;
+			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clock CLK_HDMI_CEC>;
+			clock-names = "hdmicec";
+			samsung,syscon-phandle = <&pmu_system_controller>;
+			hdmi-phandle = <&hdmi>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hdmi_cec>;
+			status = "disabled";
+		};
+
 		mixer: mixer@14450000 {
 			compatible = "samsung,exynos5420-mixer";
 			reg = <0x14450000 0x10000>;
-			interrupts = <0 94 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
 				 <&clock CLK_SCLK_HDMI>;
 			clock-names = "mixer", "hdmi", "sclk_hdmi";
@@ -638,7 +651,7 @@
 		rotator: rotator@11C00000 {
 			compatible = "samsung,exynos5250-rotator";
 			reg = <0x11C00000 0x64>;
-			interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_ROTATOR>;
 			clock-names = "rotator";
 			iommus = <&sysmmu_rotator>;
@@ -647,7 +660,7 @@
 		gsc_0: video-scaler@13e00000 {
 			compatible = "samsung,exynos5-gsc";
 			reg = <0x13e00000 0x1000>;
-			interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_GSCL0>;
 			clock-names = "gscl";
 			power-domains = <&gsc_pd>;
@@ -657,7 +670,7 @@
 		gsc_1: video-scaler@13e10000 {
 			compatible = "samsung,exynos5-gsc";
 			reg = <0x13e10000 0x1000>;
-			interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_GSCL1>;
 			clock-names = "gscl";
 			power-domains = <&gsc_pd>;
@@ -667,7 +680,7 @@
 		jpeg_0: jpeg@11F50000 {
 			compatible = "samsung,exynos5420-jpeg";
 			reg = <0x11F50000 0x1000>;
-			interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "jpeg";
 			clocks = <&clock CLK_JPEG>;
 			iommus = <&sysmmu_jpeg0>;
@@ -676,7 +689,7 @@
 		jpeg_1: jpeg@11F60000 {
 			compatible = "samsung,exynos5420-jpeg";
 			reg = <0x11F60000 0x1000>;
-			interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "jpeg";
 			clocks = <&clock CLK_JPEG2>;
 			iommus = <&sysmmu_jpeg1>;
@@ -696,7 +709,7 @@
 		tmu_cpu0: tmu@10060000 {
 			compatible = "samsung,exynos5420-tmu";
 			reg = <0x10060000 0x100>;
-			interrupts = <0 65 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>;
 			clock-names = "tmu_apbif";
 			#include "exynos5420-tmu-sensor-conf.dtsi"
@@ -705,7 +718,7 @@
 		tmu_cpu1: tmu@10064000 {
 			compatible = "samsung,exynos5420-tmu";
 			reg = <0x10064000 0x100>;
-			interrupts = <0 183 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>;
 			clock-names = "tmu_apbif";
 			#include "exynos5420-tmu-sensor-conf.dtsi"
@@ -714,7 +727,7 @@
 		tmu_cpu2: tmu@10068000 {
 			compatible = "samsung,exynos5420-tmu-ext-triminfo";
 			reg = <0x10068000 0x100>, <0x1006c000 0x4>;
-			interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
 			clock-names = "tmu_apbif", "tmu_triminfo_apbif";
 			#include "exynos5420-tmu-sensor-conf.dtsi"
@@ -723,7 +736,7 @@
 		tmu_cpu3: tmu@1006c000 {
 			compatible = "samsung,exynos5420-tmu-ext-triminfo";
 			reg = <0x1006c000 0x100>, <0x100a0000 0x4>;
-			interrupts = <0 185 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
 			clock-names = "tmu_apbif", "tmu_triminfo_apbif";
 			#include "exynos5420-tmu-sensor-conf.dtsi"
@@ -732,7 +745,7 @@
 		tmu_gpu: tmu@100a0000 {
 			compatible = "samsung,exynos5420-tmu-ext-triminfo";
 			reg = <0x100a0000 0x100>, <0x10068000 0x4>;
-			interrupts = <0 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
 			clock-names = "tmu_apbif", "tmu_triminfo_apbif";
 			#include "exynos5420-tmu-sensor-conf.dtsi"
@@ -804,7 +817,7 @@
 		sysmmu_scaler1r: sysmmu@0x12890000 {
 			compatible = "samsung,exynos-sysmmu";
 			reg = <0x12890000 0x1000>;
-			interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "sysmmu", "master";
 			clocks = <&clock CLK_SMMU_MSCL1>, <&clock CLK_MSCL1>;
 			#iommu-cells = <0>;
@@ -813,7 +826,7 @@
 		sysmmu_scaler2r: sysmmu@0x128A0000 {
 			compatible = "samsung,exynos-sysmmu";
 			reg = <0x128A0000 0x1000>;
-			interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "sysmmu", "master";
 			clocks = <&clock CLK_SMMU_MSCL2>, <&clock CLK_MSCL2>;
 			#iommu-cells = <0>;
@@ -872,7 +885,7 @@
 		sysmmu_jpeg1: sysmmu@0x11F20000 {
 			compatible = "samsung,exynos-sysmmu";
 			reg = <0x11F20000 0x1000>;
-			interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "sysmmu", "master";
 			clocks = <&clock CLK_SMMU_JPEG2>, <&clock CLK_JPEG2>;
 			#iommu-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
index 9493923..c0b8598 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
@@ -49,7 +49,7 @@
 		compatible = "maxim,max98090";
 		reg = <0x10>;
 		interrupt-parent = <&gpx3>;
-		interrupts = <2 0>;
+		interrupts = <2 IRQ_TYPE_NONE>;
 		clocks = <&i2s0 CLK_I2S_CDCLK>;
 		clock-names = "mclk";
 		#sound-dai-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 05b9afdd..f92f957 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -18,7 +18,6 @@
 #include <dt-bindings/sound/samsung-i2s.h>
 #include "exynos5800.dtsi"
 #include "exynos5422-cpus.dtsi"
-#include "exynos-mfc-reserved-memory.dtsi"
 
 / {
 	memory@40000000 {
@@ -265,6 +264,10 @@
 	vdd-supply = <&ldo6_reg>;
 };
 
+&hdmicec {
+	status = "okay";
+};
+
 &hsi2c_4 {
 	status = "okay";
 
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index a4ea018..bc4954e 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -200,7 +200,7 @@
 		compatible = "snps,dwmac-3.70a", "snps,dwmac";
 		reg = <0x00230000 0x8000>;
 		interrupt-parent = <&gic>;
-		interrupts = <GIC_SPI 31 4>;
+		interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "macirq";
 		phy-mode = "sgmii";
 		clocks = <&clock CLK_GMAC0>;
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index f9ff7f0..953dc86 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -16,7 +16,6 @@
 #include <dt-bindings/regulator/maxim,max77802.h>
 #include "exynos5800.dtsi"
 #include "exynos5420-cpus.dtsi"
-#include "exynos-mfc-reserved-memory.dtsi"
 
 / {
 	model = "Google Peach Pi Rev 10+";
@@ -604,7 +603,7 @@
 	max98091: codec@10 {
 		compatible = "maxim,max98091";
 		reg = <0x10>;
-		interrupts = <2 0>;
+		interrupts = <2 IRQ_TYPE_NONE>;
 		interrupt-parent = <&gpx0>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&max98091_irq>;
@@ -914,7 +913,7 @@
 	cros_ec: cros-ec@0 {
 		compatible = "google,cros-ec-spi";
 		interrupt-parent = <&gpx1>;
-		interrupts = <5 0>;
+		interrupts = <5 IRQ_TYPE_NONE>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&ec_spi_cs &ec_irq>;
 		reg = <0>;
diff --git a/arch/arm/boot/dts/ge863-pro3.dtsi b/arch/arm/boot/dts/ge863-pro3.dtsi
index 4aee5cc..8613944 100644
--- a/arch/arm/boot/dts/ge863-pro3.dtsi
+++ b/arch/arm/boot/dts/ge863-pro3.dtsi
@@ -23,20 +23,39 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			boot@0 {
-				label = "boot";
-				reg = <0x0 0x7c0000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			root@07c0000 {
-				label = "root";
-				reg = <0x7c0000 0x7840000>;
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
+
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
+
+						boot@0 {
+							label = "boot";
+							reg = <0x0 0x7c0000>;
+						};
+
+						root@07c0000 {
+							label = "root";
+							reg = <0x7c0000 0x7840000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts
index 7668ba5..55f6a4f 100644
--- a/arch/arm/boot/dts/gemini-nas4220b.dts
+++ b/arch/arm/boot/dts/gemini-nas4220b.dts
@@ -98,5 +98,15 @@
 				read-only;
 			};
 		};
+
+		sata: sata@46000000 {
+			cortina,gemini-ata-muxmode = <0>;
+			cortina,gemini-enable-sata-bridge;
+			status = "okay";
+		};
+
+		ata@63000000 {
+			status = "okay";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/gemini-sq201.dts b/arch/arm/boot/dts/gemini-sq201.dts
index 46309e7..4d200f0 100644
--- a/arch/arm/boot/dts/gemini-sq201.dts
+++ b/arch/arm/boot/dts/gemini-sq201.dts
@@ -93,6 +93,12 @@
 			};
 		};
 
+		sata: sata@46000000 {
+			cortina,gemini-ata-muxmode = <0>;
+			cortina,gemini-enable-sata-bridge;
+			status = "okay";
+		};
+
 		pci@50000000 {
 			status = "okay";
 			interrupt-map-mask = <0xf800 0 0 7>;
@@ -114,5 +120,9 @@
 				<0x6000 0 0 3 &pci_intc 1>,
 				<0x6000 0 0 4 &pci_intc 2>;
 		};
+
+		ata@63000000 {
+			status = "okay";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi
index b8d011b..141d8d3 100644
--- a/arch/arm/boot/dts/gemini.dtsi
+++ b/arch/arm/boot/dts/gemini.dtsi
@@ -25,8 +25,11 @@
 		};
 
 		syscon: syscon@40000000 {
-			compatible = "cortina,gemini-syscon", "syscon", "simple-mfd";
+			compatible = "cortina,gemini-syscon",
+				     "syscon", "simple-mfd";
 			reg = <0x40000000 0x1000>;
+			#clock-cells = <1>;
+			#reset-cells = <1>;
 
 			syscon-reboot {
 				compatible = "syscon-reboot";
@@ -42,23 +45,30 @@
 			compatible = "cortina,gemini-watchdog";
 			reg = <0x41000000 0x1000>;
 			interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&syscon 23>;
+			clocks = <&syscon 2>;
 		};
 
 		uart0: serial@42000000 {
 			compatible = "ns16550a";
 			reg = <0x42000000 0x100>;
-			clock-frequency = <48000000>;
+			resets = <&syscon 18>;
+			clocks = <&syscon 6>;
 			interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 		};
 
 		timer@43000000 {
-			compatible = "cortina,gemini-timer";
+			compatible = "faraday,fttmr010";
 			reg = <0x43000000 0x1000>;
 			interrupt-parent = <&intcon>;
 			interrupts = <14 IRQ_TYPE_EDGE_FALLING>, /* Timer 1 */
 				     <15 IRQ_TYPE_EDGE_FALLING>, /* Timer 2 */
 				     <16 IRQ_TYPE_EDGE_FALLING>; /* Timer 3 */
+			resets = <&syscon 17>;
+			/* APB clock or RTC clock */
+			clocks = <&syscon 2>, <&syscon 0>;
+			clock-names = "PCLK", "EXTCLK";
 			syscon = <&syscon>;
 		};
 
@@ -66,11 +76,28 @@
 			compatible = "cortina,gemini-rtc";
 			reg = <0x45000000 0x100>;
 			interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&syscon 16>;
+			clocks = <&syscon 2>, <&syscon 0>;
+			clock-names = "PCLK", "EXTCLK";
+		};
+
+		sata: sata@46000000 {
+			compatible = "cortina,gemini-sata-bridge";
+			reg = <0x46000000 0x100>;
+			resets = <&syscon 26>,
+				 <&syscon 27>;
+			reset-names = "sata0", "sata1";
+			clocks = <&syscon 10>,
+				 <&syscon 11>;
+			clock-names = "SATA0_PCLK", "SATA1_PCLK";
+			syscon = <&syscon>;
+			status = "disabled";
 		};
 
 		intcon: interrupt-controller@48000000 {
 			compatible = "faraday,ftintc010";
 			reg = <0x48000000 0x1000>;
+			resets = <&syscon 14>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 		};
@@ -85,6 +112,8 @@
 			compatible = "cortina,gemini-gpio", "faraday,ftgpio010";
 			reg = <0x4d000000 0x100>;
 			interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&syscon 20>;
+			clocks = <&syscon 2>;
 			gpio-controller;
 			#gpio-cells = <2>;
 			interrupt-controller;
@@ -95,6 +124,8 @@
 			compatible = "cortina,gemini-gpio", "faraday,ftgpio010";
 			reg = <0x4e000000 0x100>;
 			interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&syscon 21>;
+			clocks = <&syscon 2>;
 			gpio-controller;
 			#gpio-cells = <2>;
 			interrupt-controller;
@@ -105,6 +136,8 @@
 			compatible = "cortina,gemini-gpio", "faraday,ftgpio010";
 			reg = <0x4f000000 0x100>;
 			interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&syscon 22>;
+			clocks = <&syscon 2>;
 			gpio-controller;
 			#gpio-cells = <2>;
 			interrupt-controller;
@@ -118,6 +151,9 @@
 			 * to configure the host bridge.
 			 */
 			reg = <0x50000000 0x100>;
+			resets = <&syscon 7>;
+			clocks = <&syscon 15>, <&syscon 4>;
+			clock-names = "PCLK", "PCICLK";
 			#address-cells = <3>;
 			#size-cells = <2>;
 			#interrupt-cells = <1>;
@@ -152,5 +188,44 @@
 				#interrupt-cells = <1>;
 			};
 		};
+
+		ata@63000000 {
+			compatible = "cortina,gemini-pata", "faraday,ftide010";
+			reg = <0x63000000 0x1000>;
+			interrupts = <4 IRQ_TYPE_EDGE_RISING>;
+			resets = <&syscon 2>;
+			clocks = <&syscon 14>;
+			clock-names = "PCLK";
+			sata = <&sata>;
+			status = "disabled";
+		};
+
+		ata@63400000 {
+			compatible = "cortina,gemini-pata", "faraday,ftide010";
+			reg = <0x63400000 0x1000>;
+			interrupts = <5 IRQ_TYPE_EDGE_RISING>;
+			resets = <&syscon 2>;
+			clocks = <&syscon 14>;
+			clock-names = "PCLK";
+			sata = <&sata>;
+			status = "disabled";
+		};
+
+		dma-controller@67000000 {
+			compatible = "faraday,ftdma020", "arm,pl080", "arm,primecell";
+			/* Faraday Technology FTDMAC020 variant */
+			arm,primecell-periphid = <0x0003b080>;
+			reg = <0x67000000 0x1000>;
+			interrupts = <9 IRQ_TYPE_EDGE_RISING>;
+			resets = <&syscon 10>;
+			clocks = <&syscon 1>;
+			clock-names = "apb_pclk";
+			/* Bus interface AHB1 (AHB0) is totally tilted */
+			lli-bus-interface-ahb2;
+			mem-bus-interface-ahb2;
+			memcpy-burst-size = <256>;
+			memcpy-bus-width = <32>;
+			#dma-cells = <2>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/imx23-sansa.dts b/arch/arm/boot/dts/imx23-sansa.dts
index 4ec32f4..221fd55 100644
--- a/arch/arm/boot/dts/imx23-sansa.dts
+++ b/arch/arm/boot/dts/imx23-sansa.dts
@@ -42,6 +42,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
 #include "imx23.dtsi"
 
 / {
@@ -149,9 +150,8 @@
 		regulator-name = "vdd-touchpad0";
 		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
-		gpio = <&gpio0 26 0>;
+		gpio = <&gpio0 26 GPIO_ACTIVE_LOW>;
 		regulator-always-on;
-		enable-active-low;
 	};
 
 	reg_vdd_tuner: regulator-vdd-tuner0 {
@@ -159,9 +159,8 @@
 		regulator-name = "vdd-tuner0";
 		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
-		gpio = <&gpio0 29 0>;
+		gpio = <&gpio0 29 GPIO_ACTIVE_LOW>;
 		regulator-always-on;
-		enable-active-low;
 	};
 
 	backlight {
diff --git a/arch/arm/boot/dts/imx25-karo-tx25.dts b/arch/arm/boot/dts/imx25-karo-tx25.dts
index 9b31faa..30a62d4 100644
--- a/arch/arm/boot/dts/imx25-karo-tx25.dts
+++ b/arch/arm/boot/dts/imx25-karo-tx25.dts
@@ -97,7 +97,7 @@
 &fec {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec>;
-	phy-reset-gpios = <&gpio3 7 0>;
+	phy-reset-gpios = <&gpio3 7 GPIO_ACTIVE_LOW>;
 	phy-mode = "rmii";
 	phy-supply = <&reg_fec_phy>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index d921dd2..c526928 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -125,7 +125,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec>;
 	phy-supply = <&reg_fec_3v3>;
-	phy-reset-gpios = <&gpio4 8 0>;
+	phy-reset-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 0cdf333..dfcc8e0 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -9,6 +9,7 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <dt-bindings/gpio/gpio.h>
 #include "imx25-pinfunc.h"
 
 / {
diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-som.dtsi b/arch/arm/boot/dts/imx27-phytec-phycard-s-som.dtsi
index 4f3e0f4..0b8490b 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycard-s-som.dtsi
+++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-som.dtsi
@@ -40,7 +40,7 @@
 	status = "okay";
 
 	at24@52 {
-		compatible = "at,24c32";
+		compatible = "atmel,24c32";
 		pagesize = <32>;
 		reg = <0x52>;
 	};
diff --git a/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi
index 82fec93..c973c5d 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi
+++ b/arch/arm/boot/dts/imx27-phytec-phycore-som.dtsi
@@ -193,7 +193,7 @@
 	status = "okay";
 
 	at24@52 {
-		compatible = "at,24c32";
+		compatible = "atmel,24c32";
 		pagesize = <32>;
 		reg = <0x52>;
 	};
diff --git a/arch/arm/boot/dts/imx28-cfa10037.dts b/arch/arm/boot/dts/imx28-cfa10037.dts
index e5beaa5..3d13b32 100644
--- a/arch/arm/boot/dts/imx28-cfa10037.dts
+++ b/arch/arm/boot/dts/imx28-cfa10037.dts
@@ -64,7 +64,7 @@
 			pinctrl-names = "default";
 			pinctrl-0 = <&mac0_pins_a
 				&mac0_pins_cfa10037>;
-			phy-reset-gpios = <&gpio2 21 0>;
+			phy-reset-gpios = <&gpio2 21 GPIO_ACTIVE_LOW>;
 			phy-reset-duration = <100>;
 			status = "okay";
 		};
diff --git a/arch/arm/boot/dts/imx28-cfa10049.dts b/arch/arm/boot/dts/imx28-cfa10049.dts
index a9c347e..4cd52d5 100644
--- a/arch/arm/boot/dts/imx28-cfa10049.dts
+++ b/arch/arm/boot/dts/imx28-cfa10049.dts
@@ -327,7 +327,7 @@
 			pinctrl-names = "default";
 			pinctrl-0 = <&mac0_pins_a
 				&mac0_pins_cfa10049>;
-			phy-reset-gpios = <&gpio2 21 0>;
+			phy-reset-gpios = <&gpio2 21 GPIO_ACTIVE_LOW>;
 			phy-reset-duration = <100>;
 			status = "okay";
 		};
diff --git a/arch/arm/boot/dts/imx28-cfa10057.dts b/arch/arm/boot/dts/imx28-cfa10057.dts
index 7a80bd6..c606791 100644
--- a/arch/arm/boot/dts/imx28-cfa10057.dts
+++ b/arch/arm/boot/dts/imx28-cfa10057.dts
@@ -162,7 +162,7 @@
 			phy-mode = "rmii";
 			pinctrl-names = "default";
 			pinctrl-0 = <&mac0_pins_a>;
-			phy-reset-gpios = <&gpio2 21 0>;
+			phy-reset-gpios = <&gpio2 21 GPIO_ACTIVE_LOW>;
 			phy-reset-duration = <100>;
 			status = "okay";
 		};
diff --git a/arch/arm/boot/dts/imx28-cfa10058.dts b/arch/arm/boot/dts/imx28-cfa10058.dts
index f5c6dce..70c7bb4 100644
--- a/arch/arm/boot/dts/imx28-cfa10058.dts
+++ b/arch/arm/boot/dts/imx28-cfa10058.dts
@@ -129,7 +129,7 @@
 			phy-mode = "rmii";
 			pinctrl-names = "default";
 			pinctrl-0 = <&mac0_pins_a>;
-			phy-reset-gpios = <&gpio2 21 0>;
+			phy-reset-gpios = <&gpio2 21 GPIO_ACTIVE_LOW>;
 			phy-reset-duration = <100>;
 			status = "okay";
 		};
diff --git a/arch/arm/boot/dts/imx28-eukrea-mbmx287lc.dts b/arch/arm/boot/dts/imx28-eukrea-mbmx287lc.dts
index e773144..b61fd61 100644
--- a/arch/arm/boot/dts/imx28-eukrea-mbmx287lc.dts
+++ b/arch/arm/boot/dts/imx28-eukrea-mbmx287lc.dts
@@ -31,7 +31,7 @@
 	phy-mode = "rmii";
 	pinctrl-names = "default";
 	pinctrl-0 = <&mac1_pins_a>;
-	phy-reset-gpios = <&gpio3 27 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio3 27 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index a5ba669..5309bb9 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -203,7 +203,7 @@
 				};
 
 				at24@51 {
-					compatible = "at24,24c32";
+					compatible = "atmel,24c32";
 					pagesize = <32>;
 					reg = <0x51>;
 				};
@@ -262,7 +262,7 @@
 			pinctrl-names = "default";
 			pinctrl-0 = <&mac0_pins_a>;
 			phy-supply = <&reg_fec_3v3>;
-			phy-reset-gpios = <&gpio4 13 0>;
+			phy-reset-gpios = <&gpio4 13 GPIO_ACTIVE_LOW>;
 			phy-reset-duration = <100>;
 			status = "okay";
 		};
diff --git a/arch/arm/boot/dts/imx28-m28cu3.dts b/arch/arm/boot/dts/imx28-m28cu3.dts
index bb53294..9d6c8fe 100644
--- a/arch/arm/boot/dts/imx28-m28cu3.dts
+++ b/arch/arm/boot/dts/imx28-m28cu3.dts
@@ -188,7 +188,7 @@
 			phy-mode = "rmii";
 			pinctrl-names = "default";
 			pinctrl-0 = <&mac0_pins_a>;
-			phy-reset-gpios = <&gpio4 13 0>;
+			phy-reset-gpios = <&gpio4 13 GPIO_ACTIVE_LOW>;
 			phy-reset-duration = <100>;
 			status = "okay";
 		};
diff --git a/arch/arm/boot/dts/imx50-evk.dts b/arch/arm/boot/dts/imx50-evk.dts
index dba2d95..98b5faa 100644
--- a/arch/arm/boot/dts/imx50-evk.dts
+++ b/arch/arm/boot/dts/imx50-evk.dts
@@ -53,7 +53,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec>;
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio4 12 0>;
+	phy-reset-gpios = <&gpio4 12 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index 2a98afc..3747d80 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -12,6 +12,7 @@
  */
 
 #include "imx50-pinfunc.h"
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/clock/imx5-clock.h>
 
 / {
diff --git a/arch/arm/boot/dts/imx51-apf51.dts b/arch/arm/boot/dts/imx51-apf51.dts
index e88b2a6..c83ac16 100644
--- a/arch/arm/boot/dts/imx51-apf51.dts
+++ b/arch/arm/boot/dts/imx51-apf51.dts
@@ -36,7 +36,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec>;
 	phy-mode = "mii";
-	phy-reset-gpios = <&gpio3 0 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
 	phy-reset-duration = <1>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index 2e44d2a..df705ba 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -195,7 +195,7 @@
 };
 
 &fec {
-	phy-reset-gpios = <&gpio7 6 0>;
+	phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 40b3e31..683dcbe 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -344,7 +344,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec>;
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio7 6 0>;
+	phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index 472f6f0..33cb64f 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -273,6 +273,6 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec>;
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio7 6 0>;
+	phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
index 85972f2..eecdc1c 100644
--- a/arch/arm/boot/dts/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
@@ -272,7 +272,7 @@
 	};
 
 	eeprom: 24c64@50 {
-		compatible = "at,24c64";
+		compatible = "atmel,24c64";
 		pagesize = <32>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
index 3a32201..7807c1f 100644
--- a/arch/arm/boot/dts/imx53-tx53.dtsi
+++ b/arch/arm/boot/dts/imx53-tx53.dtsi
@@ -201,7 +201,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec>;
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>;
 	phy-handle = <&phy0>;
 	mac-address = [000000000000]; /* placeholder; will be overwritten by bootloader */
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
index 524192c..df8dafe 100644
--- a/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
+++ b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
@@ -137,7 +137,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_fec>;
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio4 2 0>;
+	phy-reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm/boot/dts/imx6dl-gw560x.dts
similarity index 77%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm/boot/dts/imx6dl-gw560x.dts
index 3956cff..21bdfaf 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm/boot/dts/imx6dl-gw560x.dts
@@ -1,4 +1,6 @@
 /*
+ * Copyright 2017 Gateworks Corporation
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
@@ -6,15 +8,20 @@
  *
  *  a) This file is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
+ *     published by the Free Software Foundation; either version 2 of
+ *     the License, or (at your option) any later version.
  *
  *     This file is distributed in the hope that it will be useful,
  *     but WITHOUT ANY WARRANTY; without even the implied warranty of
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ *     You should have received a copy of the GNU General Public
+ *     License along with this file; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -39,31 +46,10 @@
  */
 
 /dts-v1/;
-
-#include "rk1108.dtsi"
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw560x.dtsi"
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
-
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
-
-	chosen {
-		stdout-path = "serial2:1500000n8";
-	};
-};
-
-&uart0 {
-	status = "okay";
-};
-
-&uart1 {
-	status = "okay";
-};
-
-&uart2 {
-	status = "okay";
+	model = "Gateworks Ventana i.MX6 DualLite/Solo GW560X";
+	compatible = "gw,imx6dl-gw560x", "gw,ventana", "fsl,imx6dl";
 };
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 2cb7282..29b45f2 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -94,7 +94,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
-	phy-reset-gpios = <&gpio3 31 0>;
+	phy-reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
 	interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
 			      <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
 	fsl,err006687-workaround-present;
diff --git a/arch/arm/boot/dts/imx6dl-sabrelite.dts b/arch/arm/boot/dts/imx6dl-sabrelite.dts
index 2f90452..3304076 100644
--- a/arch/arm/boot/dts/imx6dl-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6dl-sabrelite.dts
@@ -48,3 +48,8 @@
 	model = "Freescale i.MX6 DualLite SABRE Lite Board";
 	compatible = "fsl,imx6dl-sabrelite", "fsl,imx6dl";
 };
+
+&ipu1_csi1_from_ipu1_csi1_mux {
+	clock-lanes = <0>;
+	data-lanes = <1 2>;
+};
diff --git a/arch/arm/boot/dts/imx6dl-sabresd.dts b/arch/arm/boot/dts/imx6dl-sabresd.dts
index 1e45f2f..9607afe 100644
--- a/arch/arm/boot/dts/imx6dl-sabresd.dts
+++ b/arch/arm/boot/dts/imx6dl-sabresd.dts
@@ -15,3 +15,8 @@
 	model = "Freescale i.MX6 DualLite SABRE Smart Device Board";
 	compatible = "fsl,imx6dl-sabresd", "fsl,imx6dl";
 };
+
+&ipu1_csi1_from_ipu1_csi1_mux {
+	clock-lanes = <0>;
+	data-lanes = <1 2>;
+};
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 7aa120f..8475e6c 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -100,6 +100,11 @@
 		};
 	};
 
+	capture-subsystem {
+		compatible = "fsl,imx-capture-subsystem";
+		ports = <&ipu1_csi0>, <&ipu1_csi1>;
+	};
+
 	display-subsystem {
 		compatible = "fsl,imx-display-subsystem";
 		ports = <&ipu1_di0>, <&ipu1_di1>;
@@ -164,6 +169,116 @@
 		      <&iomuxc 9 207 1>, <&iomuxc 10 206 1>, <&iomuxc 11 133 3>;
 };
 
+&gpr {
+	ipu1_csi0_mux: ipu1_csi0_mux@34 {
+		compatible = "video-mux";
+		mux-controls = <&mux 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		port@0 {
+			reg = <0>;
+
+			ipu1_csi0_mux_from_mipi_vc0: endpoint {
+				remote-endpoint = <&mipi_vc0_to_ipu1_csi0_mux>;
+			};
+		};
+
+		port@1 {
+			reg = <1>;
+
+			ipu1_csi0_mux_from_mipi_vc1: endpoint {
+				remote-endpoint = <&mipi_vc1_to_ipu1_csi0_mux>;
+			};
+		};
+
+		port@2 {
+			reg = <2>;
+
+			ipu1_csi0_mux_from_mipi_vc2: endpoint {
+				remote-endpoint = <&mipi_vc2_to_ipu1_csi0_mux>;
+			};
+		};
+
+		port@3 {
+			reg = <3>;
+
+			ipu1_csi0_mux_from_mipi_vc3: endpoint {
+				remote-endpoint = <&mipi_vc3_to_ipu1_csi0_mux>;
+			};
+		};
+
+		port@4 {
+			reg = <4>;
+
+			ipu1_csi0_mux_from_parallel_sensor: endpoint {
+			};
+		};
+
+		port@5 {
+			reg = <5>;
+
+			ipu1_csi0_mux_to_ipu1_csi0: endpoint {
+				remote-endpoint = <&ipu1_csi0_from_ipu1_csi0_mux>;
+			};
+		};
+	};
+
+	ipu1_csi1_mux: ipu1_csi1_mux@34 {
+		compatible = "video-mux";
+		mux-controls = <&mux 1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		port@0 {
+			reg = <0>;
+
+			ipu1_csi1_mux_from_mipi_vc0: endpoint {
+				remote-endpoint = <&mipi_vc0_to_ipu1_csi1_mux>;
+			};
+		};
+
+		port@1 {
+			reg = <1>;
+
+			ipu1_csi1_mux_from_mipi_vc1: endpoint {
+				remote-endpoint = <&mipi_vc1_to_ipu1_csi1_mux>;
+			};
+		};
+
+		port@2 {
+			reg = <2>;
+
+			ipu1_csi1_mux_from_mipi_vc2: endpoint {
+				remote-endpoint = <&mipi_vc2_to_ipu1_csi1_mux>;
+			};
+		};
+
+		port@3 {
+			reg = <3>;
+
+			ipu1_csi1_mux_from_mipi_vc3: endpoint {
+				remote-endpoint = <&mipi_vc3_to_ipu1_csi1_mux>;
+			};
+		};
+
+		port@4 {
+			reg = <4>;
+
+			ipu1_csi1_mux_from_parallel_sensor: endpoint {
+			};
+		};
+
+		port@5 {
+			reg = <5>;
+
+			ipu1_csi1_mux_to_ipu1_csi1: endpoint {
+				remote-endpoint = <&ipu1_csi1_from_ipu1_csi1_mux>;
+			};
+		};
+	};
+};
+
 &gpt {
 	compatible = "fsl,imx6dl-gpt";
 };
@@ -172,6 +287,12 @@
 	compatible = "fsl,imx6dl-hdmi";
 };
 
+&ipu1_csi1 {
+	ipu1_csi1_from_ipu1_csi1_mux: endpoint {
+		remote-endpoint = <&ipu1_csi1_mux_to_ipu1_csi1>;
+	};
+};
+
 &ldb {
 	clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
 		 <&clks IMX6QDL_CLK_IPU1_DI0_SEL>, <&clks IMX6QDL_CLK_IPU1_DI1_SEL>,
@@ -181,6 +302,74 @@
 		      "di0", "di1";
 };
 
+&mipi_csi {
+	port@1 {
+		reg = <1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mipi_vc0_to_ipu1_csi0_mux: endpoint@0 {
+			remote-endpoint = <&ipu1_csi0_mux_from_mipi_vc0>;
+		};
+
+		mipi_vc0_to_ipu1_csi1_mux: endpoint@1 {
+			remote-endpoint = <&ipu1_csi1_mux_from_mipi_vc0>;
+		};
+	};
+
+	port@2 {
+		reg = <2>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mipi_vc1_to_ipu1_csi0_mux: endpoint@0 {
+			remote-endpoint = <&ipu1_csi0_mux_from_mipi_vc1>;
+		};
+
+		mipi_vc1_to_ipu1_csi1_mux: endpoint@1 {
+			remote-endpoint = <&ipu1_csi1_mux_from_mipi_vc1>;
+		};
+	};
+
+	port@3 {
+		reg = <3>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mipi_vc2_to_ipu1_csi0_mux: endpoint@0 {
+			remote-endpoint = <&ipu1_csi0_mux_from_mipi_vc2>;
+		};
+
+		mipi_vc2_to_ipu1_csi1_mux: endpoint@1 {
+			remote-endpoint = <&ipu1_csi1_mux_from_mipi_vc2>;
+		};
+	};
+
+	port@4 {
+		reg = <4>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mipi_vc3_to_ipu1_csi0_mux: endpoint@0 {
+			remote-endpoint = <&ipu1_csi0_mux_from_mipi_vc3>;
+		};
+
+		mipi_vc3_to_ipu1_csi1_mux: endpoint@1 {
+			remote-endpoint = <&ipu1_csi1_mux_from_mipi_vc3>;
+		};
+	};
+};
+
+&mux {
+	mux-reg-masks = <0x34 0x00000007>, /* IPU_CSI0_MUX */
+			<0x34 0x00000038>, /* IPU_CSI1_MUX */
+			<0x0c 0x0000000c>, /* HDMI_MUX_CTL */
+			<0x0c 0x000000c0>, /* LVDS0_MUX_CTL */
+			<0x0c 0x00000300>, /* LVDS1_MUX_CTL */
+			<0x28 0x00000003>, /* DCIC1_MUX_CTL */
+			<0x28 0x0000000c>; /* DCIC2_MUX_CTL */
+};
+
 &vpu {
 	compatible = "fsl,imx6dl-vpu", "cnm,coda960";
 };
diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi
index 14fa6b2..5fcb037 100644
--- a/arch/arm/boot/dts/imx6q-ba16.dtsi
+++ b/arch/arm/boot/dts/imx6q-ba16.dtsi
@@ -321,7 +321,7 @@
 &pcie {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_pcie>;
-	reset-gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>;
+	reset-gpio = <&gpio7 12 GPIO_ACTIVE_LOW>;
 	fsl,tx-swing-full = <103>;
 	fsl,tx-swing-low = <103>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts
index 66cac53..fe6ab0a 100644
--- a/arch/arm/boot/dts/imx6q-cm-fx6.dts
+++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts
@@ -64,6 +64,14 @@
 		};
 	};
 
+	awnh387_pwrseq: pwrseq {
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_pwrseq>;
+		compatible = "mmc-pwrseq-sd8787";
+		powerdown-gpios = <&gpio7 12 GPIO_ACTIVE_HIGH>;
+		reset-gpios = <&gpio6 16 GPIO_ACTIVE_HIGH>;
+	};
+
 	reg_pcie_power_on_gpio: regulator-pcie-power-on-gpio {
 		compatible = "regulator-fixed";
 		regulator-name = "regulator-pcie-power-on-gpio";
@@ -215,7 +223,7 @@
 	clock-frequency = <100000>;
 
 	eeprom@50 {
-		compatible = "at24,24c02";
+		compatible = "atmel,24c02";
 		reg = <0x50>;
 		pagesize = <16>;
 	};
@@ -304,6 +312,13 @@
 		>;
 	};
 
+	pinctrl_pwrseq: pwrseqgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_17__GPIO7_IO12		0x1b0b0
+			MX6QDL_PAD_NANDF_CS3__GPIO6_IO16	0x1b0b0
+		>;
+	};
+
 	pinctrl_spdif: spdifgrp {
 		fsl,pins = <
 			MX6QDL_PAD_GPIO_16__SPDIF_IN  0x1b0b0
@@ -330,6 +345,17 @@
 			MX6QDL_PAD_EIM_D22__GPIO3_IO22	0x130b0
 		>;
 	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_CMD__SD1_CMD	0x17071
+			MX6QDL_PAD_SD1_CLK__SD1_CLK	0x10071
+			MX6QDL_PAD_SD1_DAT0__SD1_DATA0	0x17071
+			MX6QDL_PAD_SD1_DAT1__SD1_DATA1	0x17071
+			MX6QDL_PAD_SD1_DAT2__SD1_DATA2	0x17071
+			MX6QDL_PAD_SD1_DAT3__SD1_DATA3	0x17071
+		>;
+	};
 };
 
 &pcie {
@@ -382,3 +408,18 @@
 	dr_mode = "otg";
 	status = "okay";
 };
+
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	mmc-pwrseq = <&awnh387_pwrseq>;
+	non-removable;
+	/*
+	 * If the OS probes the Bluetooth AMP function advertised on this bus
+	 * but the firmware in place does not support it, the WiFi/BT module
+	 * gets unresponsive.
+	 * Users who configured their OS properly can enable this node to gain
+	 * WiFi and/or plain Bluetooth support.
+	 */
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
index f28883b..33eb7f1 100644
--- a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
+++ b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
@@ -118,7 +118,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
-	phy-reset-gpios = <&gpio1 25 0>;
+	phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
 	phy-supply = <&vgen2_1v2_eth>;
 	status = "okay";
 };
@@ -435,7 +435,7 @@
 &pcie {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_pcie>;
-	reset-gpio = <&gpio4 8 0>;
+	reset-gpio = <&gpio4 8 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6q-evi.dts b/arch/arm/boot/dts/imx6q-evi.dts
index fd2220a..1f0f950 100644
--- a/arch/arm/boot/dts/imx6q-evi.dts
+++ b/arch/arm/boot/dts/imx6q-evi.dts
@@ -135,7 +135,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
-	phy-reset-gpios = <&gpio1 25 0>;
+	phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
 	interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
 			      <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
 	fsl,err006687-workaround-present;
diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts
index 8e84713..9dbeea0 100644
--- a/arch/arm/boot/dts/imx6q-gw5400-a.dts
+++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts
@@ -19,7 +19,6 @@
 
 	/* these are used by bootloader for disabling nodes */
 	aliases {
-		ethernet1 = &eth1;
 		i2c0 = &i2c1;
 		i2c1 = &i2c2;
 		i2c2 = &i2c3;
@@ -154,7 +153,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii-id";
-	phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
@@ -347,10 +346,6 @@
 &pcie {
 	reset-gpio = <&gpio1 29 GPIO_ACTIVE_LOW>;
 	status = "okay";
-
-	eth1: sky2@8 { /* MAC/PHY on bus 8 */
-		compatible = "marvell,sky2";
-	};
 };
 
 &ssi1 {
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm/boot/dts/imx6q-gw560x.dts
similarity index 77%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm/boot/dts/imx6q-gw560x.dts
index 3956cff..735f2bb 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm/boot/dts/imx6q-gw560x.dts
@@ -1,4 +1,6 @@
 /*
+ * Copyright 2017 Gateworks Corporation
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
@@ -6,15 +8,20 @@
  *
  *  a) This file is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
+ *     published by the Free Software Foundation; either version 2 of
+ *     the License, or (at your option) any later version.
  *
  *     This file is distributed in the hope that it will be useful,
  *     but WITHOUT ANY WARRANTY; without even the implied warranty of
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ *     You should have received a copy of the GNU General Public
+ *     License along with this file; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -39,31 +46,14 @@
  */
 
 /dts-v1/;
-
-#include "rk1108.dtsi"
+#include "imx6q.dtsi"
+#include "imx6qdl-gw560x.dtsi"
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
-
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
-
-	chosen {
-		stdout-path = "serial2:1500000n8";
-	};
+	model = "Gateworks Ventana i.MX6 Dual/Quad GW560X";
+	compatible = "gw,imx6q-gw560x", "gw,ventana", "fsl,imx6q";
 };
 
-&uart0 {
-	status = "okay";
-};
-
-&uart1 {
-	status = "okay";
-};
-
-&uart2 {
+&sata {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/imx6q-novena.dts b/arch/arm/boot/dts/imx6q-novena.dts
index 0fa32b2..d83cfb6 100644
--- a/arch/arm/boot/dts/imx6q-novena.dts
+++ b/arch/arm/boot/dts/imx6q-novena.dts
@@ -217,7 +217,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet_novena>;
 	phy-mode = "rgmii";
-	phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
 	rxc-skew-ps = <3000>;
 	rxdv-skew-ps = <0>;
 	txc-skew-ps = <3000>;
@@ -446,7 +446,7 @@
 &pcie {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_pcie_novena>;
-	reset-gpio = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+	reset-gpio = <&gpio3 29 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index 02a7cdf..dc51262e 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -52,3 +52,8 @@
 &sata {
 	status = "okay";
 };
+
+&ipu1_csi1_from_mipi_vc1 {
+	clock-lanes = <0>;
+	data-lanes = <1 2>;
+};
diff --git a/arch/arm/boot/dts/imx6q-sabresd.dts b/arch/arm/boot/dts/imx6q-sabresd.dts
index 9cbdfe7..527772b 100644
--- a/arch/arm/boot/dts/imx6q-sabresd.dts
+++ b/arch/arm/boot/dts/imx6q-sabresd.dts
@@ -23,3 +23,8 @@
 &sata {
 	status = "okay";
 };
+
+&ipu1_csi1_from_mipi_vc1 {
+	clock-lanes = <0>;
+	data-lanes = <1 2>;
+};
diff --git a/arch/arm/boot/dts/imx6q-utilite-pro.dts b/arch/arm/boot/dts/imx6q-utilite-pro.dts
index d900ad6..16d5be1 100644
--- a/arch/arm/boot/dts/imx6q-utilite-pro.dts
+++ b/arch/arm/boot/dts/imx6q-utilite-pro.dts
@@ -130,7 +130,7 @@
 			#size-cells = <0>;
 
 			eeprom@50 {
-				compatible = "at24,24c02";
+				compatible = "atmel,24c02";
 				reg = <0x50>;
 				pagesize = <16>;
 			};
diff --git a/arch/arm/boot/dts/imx6q-zii-rdu2.dts b/arch/arm/boot/dts/imx6q-zii-rdu2.dts
index b2d3466..6be8a1e 100644
--- a/arch/arm/boot/dts/imx6q-zii-rdu2.dts
+++ b/arch/arm/boot/dts/imx6q-zii-rdu2.dts
@@ -41,8 +41,8 @@
 
 /dts-v1/;
 
-#include <imx6q.dtsi>
-#include <imx6qdl-zii-rdu2.dtsi>
+#include "imx6q.dtsi"
+#include "imx6qdl-zii-rdu2.dtsi"
 
 / {
 	model = "ZII RDU2 Board";
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index e9a5d0b..90a7417 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -125,7 +125,7 @@
 			clocks = <&clks IMX6QDL_CLK_OPENVG_AXI>,
 				 <&clks IMX6QDL_CLK_GPU2D_CORE>;
 			clock-names = "bus", "core";
-			power-domains = <&gpc 1>;
+			power-domains = <&pd_pu>;
 		};
 
 		ipu2: ipu@02800000 {
@@ -143,10 +143,18 @@
 
 			ipu2_csi0: port@0 {
 				reg = <0>;
+
+				ipu2_csi0_from_mipi_vc2: endpoint {
+					remote-endpoint = <&mipi_vc2_to_ipu2_csi0>;
+				};
 			};
 
 			ipu2_csi1: port@1 {
 				reg = <1>;
+
+				ipu2_csi1_from_ipu2_csi1_mux: endpoint {
+					remote-endpoint = <&ipu2_csi1_mux_to_ipu2_csi1>;
+				};
 			};
 
 			ipu2_di0: port@2 {
@@ -198,6 +206,11 @@
 		};
 	};
 
+	capture-subsystem {
+		compatible = "fsl,imx-capture-subsystem";
+		ports = <&ipu1_csi0>, <&ipu1_csi1>, <&ipu2_csi0>, <&ipu2_csi1>;
+	};
+
 	display-subsystem {
 		compatible = "fsl,imx-display-subsystem";
 		ports = <&ipu1_di0>, <&ipu1_di1>, <&ipu2_di0>, <&ipu2_di1>;
@@ -246,6 +259,68 @@
 	gpio-ranges = <&iomuxc 0 172 9>, <&iomuxc 9 189 2>, <&iomuxc 11 146 3>;
 };
 
+&gpr {
+	ipu1_csi0_mux {
+		compatible = "video-mux";
+		mux-controls = <&mux 0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		port@0 {
+			reg = <0>;
+
+			ipu1_csi0_mux_from_mipi_vc0: endpoint {
+				remote-endpoint = <&mipi_vc0_to_ipu1_csi0_mux>;
+			};
+		};
+
+		port@1 {
+			reg = <1>;
+
+			ipu1_csi0_mux_from_parallel_sensor: endpoint {
+			};
+		};
+
+		port@2 {
+			reg = <2>;
+
+			ipu1_csi0_mux_to_ipu1_csi0: endpoint {
+				remote-endpoint = <&ipu1_csi0_from_ipu1_csi0_mux>;
+			};
+		};
+	};
+
+	ipu2_csi1_mux {
+		compatible = "video-mux";
+		mux-controls = <&mux 1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		port@0 {
+			reg = <0>;
+
+			ipu2_csi1_mux_from_mipi_vc3: endpoint {
+				remote-endpoint = <&mipi_vc3_to_ipu2_csi1_mux>;
+			};
+		};
+
+		port@1 {
+			reg = <1>;
+
+			ipu2_csi1_mux_from_parallel_sensor: endpoint {
+			};
+		};
+
+		port@2 {
+			reg = <2>;
+
+			ipu2_csi1_mux_to_ipu2_csi1: endpoint {
+				remote-endpoint = <&ipu2_csi1_from_ipu2_csi1_mux>;
+			};
+		};
+	};
+};
+
 &hdmi {
 	compatible = "fsl,imx6q-hdmi";
 
@@ -266,6 +341,12 @@
 	};
 };
 
+&ipu1_csi1 {
+	ipu1_csi1_from_mipi_vc1: endpoint {
+		remote-endpoint = <&mipi_vc1_to_ipu1_csi1>;
+	};
+};
+
 &ldb {
 	clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
 		 <&clks IMX6QDL_CLK_IPU1_DI0_SEL>, <&clks IMX6QDL_CLK_IPU1_DI1_SEL>,
@@ -312,6 +393,40 @@
 	};
 };
 
+&mipi_csi {
+	port@1 {
+		reg = <1>;
+
+		mipi_vc0_to_ipu1_csi0_mux: endpoint {
+			remote-endpoint = <&ipu1_csi0_mux_from_mipi_vc0>;
+		};
+	};
+
+	port@2 {
+		reg = <2>;
+
+		mipi_vc1_to_ipu1_csi1: endpoint {
+			remote-endpoint = <&ipu1_csi1_from_mipi_vc1>;
+		};
+	};
+
+	port@3 {
+		reg = <3>;
+
+		mipi_vc2_to_ipu2_csi0: endpoint {
+			remote-endpoint = <&ipu2_csi0_from_mipi_vc2>;
+		};
+	};
+
+	port@4 {
+		reg = <4>;
+
+		mipi_vc3_to_ipu2_csi1_mux: endpoint {
+			remote-endpoint = <&ipu2_csi1_mux_from_mipi_vc3>;
+		};
+	};
+};
+
 &mipi_dsi {
 	ports {
 		port@2 {
@@ -332,6 +447,16 @@
 	};
 };
 
+&mux {
+	mux-reg-masks = <0x04 0x00080000>, /* MIPI_IPU1_MUX */
+			<0x04 0x00100000>, /* MIPI_IPU2_MUX */
+			<0x0c 0x0000000c>, /* HDMI_MUX_CTL */
+			<0x0c 0x000000c0>, /* LVDS0_MUX_CTL */
+			<0x0c 0x00000300>, /* LVDS1_MUX_CTL */
+			<0x28 0x00000003>, /* DCIC1_MUX_CTL */
+			<0x28 0x0000000c>; /* DCIC2_MUX_CTL */
+};
+
 &vpu {
 	compatible = "fsl,imx6q-vpu", "cnm,coda960";
 };
diff --git a/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi b/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
index 550e100..9cd2a74 100644
--- a/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
@@ -232,7 +232,7 @@
 &pcie {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_pcie>;
-	reset-gpio = <&gpio6 2 GPIO_ACTIVE_HIGH>;
+	reset-gpio = <&gpio6 2 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
index b2debc0..7d64075 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
@@ -118,7 +118,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
index ec68f1c..1b18728 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
@@ -320,7 +320,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
-	phy-reset-gpios = <&gpio7 18 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio7 18 GPIO_ACTIVE_LOW>;
 	txd0-skew-ps = <0>;
 	txd1-skew-ps = <0>;
 	txd2-skew-ps = <0>;
@@ -335,7 +335,7 @@
 };
 
 &pcie {
-	reset-gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;
+	reset-gpio = <&gpio2 16 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
index e807875..ad84edd 100644
--- a/arch/arm/boot/dts/imx6qdl-colibri.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
@@ -254,6 +254,7 @@
 		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
+		lrclk-strength = <3>;
 	};
 
 	/* STMPE811 touch screen controller */
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index a208e7e..5bc6ed1 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -14,7 +14,6 @@
 / {
 	/* these are used by bootloader for disabling nodes */
 	aliases {
-		ethernet1 = &eth1;
 		led0 = &led0;
 		led1 = &led1;
 		led2 = &led2;
@@ -342,10 +341,6 @@
 	pinctrl-0 = <&pinctrl_pcie>;
 	reset-gpio = <&gpio1 29 GPIO_ACTIVE_LOW>;
 	status = "okay";
-
-	eth1: sky2@8 { /* MAC/PHY on bus 8 */
-		compatible = "marvell,sky2";
-	};
 };
 
 &pwm2 {
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 968fda9..66fcf838 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -14,7 +14,6 @@
 / {
 	/* these are used by bootloader for disabling nodes */
 	aliases {
-		ethernet1 = &eth1;
 		led0 = &led0;
 		led1 = &led1;
 		led2 = &led2;
@@ -379,10 +378,6 @@
 	pinctrl-0 = <&pinctrl_pcie>;
 	reset-gpio = <&gpio1 29 GPIO_ACTIVE_LOW>;
 	status = "okay";
-
-	eth1: sky2@8 { /* MAC/PHY on bus 8 */
-		compatible = "marvell,sky2";
-	};
 };
 
 &pwm1 {
diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
new file mode 100644
index 0000000..d894dde
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
@@ -0,0 +1,749 @@
+/*
+ * Copyright 2017 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of
+ *     the License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this file; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	/* these are used by bootloader for disabling nodes */
+	aliases {
+		led0 = &led0;
+		led1 = &led1;
+		led2 = &led2;
+		ssi0 = &ssi1;
+		usb0 = &usbh1;
+		usb1 = &usbotg;
+	};
+
+	chosen {
+		stdout-path = &uart2;
+	};
+
+	backlight-display {
+		compatible = "pwm-backlight";
+		pwms = <&pwm4 0 5000000>;
+		brightness-levels = <
+			0  1  2  3  4  5  6  7  8  9
+			10 11 12 13 14 15 16 17 18 19
+			20 21 22 23 24 25 26 27 28 29
+			30 31 32 33 34 35 36 37 38 39
+			40 41 42 43 44 45 46 47 48 49
+			50 51 52 53 54 55 56 57 58 59
+			60 61 62 63 64 65 66 67 68 69
+			70 71 72 73 74 75 76 77 78 79
+			80 81 82 83 84 85 86 87 88 89
+			90 91 92 93 94 95 96 97 98 99
+			100
+			>;
+		default-brightness-level = <100>;
+	};
+
+	backlight-keypad {
+		compatible = "gpio-backlight";
+		gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>;
+		default-on;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_gpio_leds>;
+
+		led0: user1 {
+			label = "user1";
+			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+			default-state = "on";
+			linux,default-trigger = "heartbeat";
+		};
+
+		led1: user2 {
+			label = "user2";
+			gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+			default-state = "off";
+		};
+
+		led2: user3 {
+			label = "user3";
+			gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+			default-state = "off";
+		};
+	};
+
+	memory@10000000 {
+		reg = <0x10000000 0x40000000>;
+	};
+
+	pps {
+		compatible = "pps-gpio";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_pps>;
+		gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+	};
+
+	reg_2p5v: regulator-2p5v {
+		compatible = "regulator-fixed";
+		regulator-name = "2P5V";
+		regulator-min-microvolt = <2500000>;
+		regulator-max-microvolt = <2500000>;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_5p0v: regulator-5p0v {
+		compatible = "regulator-fixed";
+		regulator-name = "5P0V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+	};
+
+	reg_12p0v: regulator-12p0v {
+		compatible = "regulator-fixed";
+		regulator-name = "12P0V";
+		regulator-min-microvolt = <12000000>;
+		regulator-max-microvolt = <12000000>;
+		gpio = <&gpio4 25 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	reg_1p4v: regulator-vddsoc {
+		compatible = "regulator-fixed";
+		regulator-name = "vdd_soc";
+		regulator-min-microvolt = <1400000>;
+		regulator-max-microvolt = <1400000>;
+		regulator-always-on;
+	};
+
+	reg_usb_h1_vbus: regulator-usb-h1-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_h1_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+	};
+
+	reg_usb_otg_vbus: regulator-usb-otg-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	sound {
+		compatible = "fsl,imx6q-ventana-sgtl5000",
+			     "fsl,imx-audio-sgtl5000";
+		model = "sgtl5000-audio";
+		ssi-controller = <&ssi1>;
+		audio-codec = <&sgtl5000>;
+		audio-routing =
+			"MIC_IN", "Mic Jack",
+			"Mic Jack", "Mic Bias",
+			"Headphone Jack", "HP_OUT";
+		mux-int-port = <1>;
+		mux-ext-port = <4>;
+	};
+};
+
+&audmux {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_audmux>;
+	status = "okay";
+};
+
+&ecspi3 {
+	cs-gpios = <&gpio4 24 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ecspi3>;
+	status = "okay";
+};
+
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan>;
+	status = "okay";
+};
+
+&clks {
+	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet>;
+	phy-mode = "rgmii-id";
+	phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&hdmi {
+	ddc-i2c-bus = <&i2c3>;
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	eeprom1: eeprom@50 {
+		compatible = "atmel,24c02";
+		reg = <0x50>;
+		pagesize = <16>;
+	};
+
+	eeprom2: eeprom@51 {
+		compatible = "atmel,24c02";
+		reg = <0x51>;
+		pagesize = <16>;
+	};
+
+	eeprom3: eeprom@52 {
+		compatible = "atmel,24c02";
+		reg = <0x52>;
+		pagesize = <16>;
+	};
+
+	eeprom4: eeprom@53 {
+		compatible = "atmel,24c02";
+		reg = <0x53>;
+		pagesize = <16>;
+	};
+
+	pca9555: gpio@23 {
+		compatible = "nxp,pca9555";
+		reg = <0x23>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	ds1672: rtc@68 {
+		compatible = "dallas,ds1672";
+		reg = <0x68>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+
+	sgtl5000: codec@a {
+		compatible = "fsl,sgtl5000";
+		reg = <0x0a>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
+		VDDA-supply = <&reg_1p8v>;
+		VDDIO-supply = <&reg_3p3v>;
+	};
+
+	tca8418: keypad@34 {
+		compatible = "ti,tca8418";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_keypad>;
+		reg = <0x34>;
+		interrupt-parent = <&gpio5>;
+		interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+		linux,keymap = < MATRIX_KEY(0x00, 0x01, BTN_0)
+			         MATRIX_KEY(0x00, 0x00, BTN_1)
+			         MATRIX_KEY(0x01, 0x01, BTN_2)
+			         MATRIX_KEY(0x01, 0x00, BTN_3)
+			         MATRIX_KEY(0x02, 0x00, BTN_4)
+			         MATRIX_KEY(0x00, 0x03, BTN_5)
+			         MATRIX_KEY(0x00, 0x02, BTN_6)
+			         MATRIX_KEY(0x01, 0x03, BTN_7)
+			         MATRIX_KEY(0x01, 0x02, BTN_8)
+			         MATRIX_KEY(0x02, 0x02, BTN_9)
+		>;
+		keypad,num-rows = <4>;
+		keypad,num-columns = <4>;
+	};
+
+	ltc3676: pmic@3c {
+		compatible = "lltc,ltc3676";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_pmic>;
+		reg = <0x3c>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+
+		regulators {
+			/* VDD_DDR (1+R1/R2 = 2.105) */
+			reg_vdd_ddr: sw2 {
+				regulator-name = "vddddr";
+				regulator-min-microvolt = <868310>;
+				regulator-max-microvolt = <1684000>;
+				lltc,fb-voltage-divider = <221000 200000>;
+				regulator-ramp-delay = <7000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			/* VDD_ARM (1+R1/R2 = 1.931) */
+			reg_vdd_arm: sw3 {
+				regulator-name = "vddarm";
+				regulator-min-microvolt = <796551>;
+				regulator-max-microvolt = <1544827>;
+				lltc,fb-voltage-divider = <243000 261000>;
+				regulator-ramp-delay = <7000>;
+				regulator-boot-on;
+				regulator-always-on;
+				linux,phandle = <&reg_vdd_arm>;
+			};
+
+			/* VDD_1P8 (1+R1/R2 = 2.505): GPS/VideoIn/ENET-PHY */
+			reg_1p8v: sw4 {
+				regulator-name = "vdd1p8";
+				regulator-min-microvolt = <1033310>;
+				regulator-max-microvolt = <2004000>;
+				lltc,fb-voltage-divider = <301000 200000>;
+				regulator-ramp-delay = <7000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			/* VDD_1P0 (1+R1/R2 = 1.39): PCIe/ENET-PHY */
+			reg_1p0v: ldo2 {
+				regulator-name = "vdd1p0";
+				regulator-min-microvolt = <950000>;
+				regulator-max-microvolt = <1050000>;
+				lltc,fb-voltage-divider = <78700 200000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			/* VDD_AUD_1P8: Audio codec */
+			reg_aud_1p8v: ldo3 {
+				regulator-name = "vdd1p8a";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-boot-on;
+			};
+
+			/* VDD_HIGH (1+R1/R2 = 4.17) */
+			reg_3p0v: ldo4 {
+				regulator-name = "vdd3p0";
+				regulator-min-microvolt = <3023250>;
+				regulator-max-microvolt = <3023250>;
+				lltc,fb-voltage-divider = <634000 200000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+
+	egalax_ts: touchscreen@4 {
+		compatible = "eeti,egalax_ts";
+		reg = <0x04>;
+		interrupt-parent = <&gpio5>;
+		interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+		wakeup-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
+	};
+};
+
+&ldb {
+	fsl,dual-channel;
+	status = "okay";
+
+	lvds-channel@0 {
+		fsl,data-mapping = "spwg";
+		fsl,data-width = <18>;
+		status = "okay";
+
+		display-timings {
+			native-mode = <&timing0>;
+			timing0: hsd100pxn1 {
+				clock-frequency = <65000000>;
+				hactive = <1024>;
+				vactive = <768>;
+				hback-porch = <220>;
+				hfront-porch = <40>;
+				vback-porch = <21>;
+				vfront-porch = <7>;
+				hsync-len = <60>;
+				vsync-len = <10>;
+			};
+		};
+	};
+};
+
+&pcie {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pcie>;
+	reset-gpio = <&gpio4 31 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pwm2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm2>; /* MX6_DIO1 */
+	status = "disabled";
+};
+
+&pwm3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm3>; /* MX6_DIO2 */
+	status = "disabled";
+};
+
+&pwm4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm4>;
+	status = "okay";
+};
+
+&ssi1 {
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	uart-has-rtscts;
+	rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_usb_otg_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	disable-over-current;
+	status = "okay";
+};
+
+&usbh1 {
+	vbus-supply = <&reg_usb_h1_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbh1>;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	bus-width = <8>;
+	vmmc-supply = <&reg_3p3v>;
+	non-removable;
+	status = "okay";
+};
+
+&usdhc3 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+	cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+	vmmc-supply = <&reg_3p3v>;
+	status = "okay";
+};
+
+&wdog1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wdog>;
+	fsl,ext-reset-output;
+};
+
+&iomuxc {
+	pinctrl_audmux: audmuxgrp {
+		fsl,pins = <
+			/* AUD4 */
+			MX6QDL_PAD_DISP0_DAT20__AUD4_TXC	0x130b0
+			MX6QDL_PAD_DISP0_DAT21__AUD4_TXD	0x110b0
+			MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS	0x130b0
+			MX6QDL_PAD_DISP0_DAT23__AUD4_RXD	0x130b0
+			MX6QDL_PAD_GPIO_0__CCM_CLKO1		0x130b0 /* AUD4_MCK */
+			/* AUD6 */
+			MX6QDL_PAD_DI0_PIN2__AUD6_TXD		0x130b0
+			MX6QDL_PAD_DI0_PIN3__AUD6_TXFS		0x130b0
+			MX6QDL_PAD_DI0_PIN4__AUD6_RXD		0x130b0
+			MX6QDL_PAD_DI0_PIN15__AUD6_TXC		0x130b0
+		>;
+	};
+
+	pinctrl_ecspi3: escpi3grp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK	0x100b1
+			MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI	0x100b1
+			MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO	0x100b1
+			MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24	0x100b1
+		>;
+	};
+
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_RGMII_RXC__RGMII_RXC		0x1b030
+			MX6QDL_PAD_RGMII_RD0__RGMII_RD0		0x1b030
+			MX6QDL_PAD_RGMII_RD1__RGMII_RD1		0x1b030
+			MX6QDL_PAD_RGMII_RD2__RGMII_RD2		0x1b030
+			MX6QDL_PAD_RGMII_RD3__RGMII_RD3		0x1b030
+			MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x1b030
+			MX6QDL_PAD_RGMII_TXC__RGMII_TXC		0x1b030
+			MX6QDL_PAD_RGMII_TD0__RGMII_TD0		0x1b030
+			MX6QDL_PAD_RGMII_TD1__RGMII_TD1		0x1b030
+			MX6QDL_PAD_RGMII_TD2__RGMII_TD2		0x1b030
+			MX6QDL_PAD_RGMII_TD3__RGMII_TD3		0x1b030
+			MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL	0x1b030
+			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x1b0b0
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+			MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0x4001b0a8
+			MX6QDL_PAD_ENET_TXD0__GPIO1_IO30	0x4001b0b0 /* PHY_RST# */
+		>;
+	};
+
+	pinctrl_flexcan: flexcangrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX	0x1b0b1
+			MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX	0x1b0b1
+			MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x4001b0b0 /* CAN_STBY */
+		>;
+	};
+
+	pinctrl_gpio_leds: gpioledsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL0__GPIO4_IO06		0x1b0b0
+			MX6QDL_PAD_KEY_ROW0__GPIO4_IO07		0x1b0b0
+			MX6QDL_PAD_KEY_ROW4__GPIO4_IO15		0x1b0b0
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D21__I2C1_SCL		0x4001b8b1
+			MX6QDL_PAD_EIM_D28__I2C1_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL3__I2C2_SCL		0x4001b8b1
+			MX6QDL_PAD_KEY_ROW3__I2C2_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_3__I2C3_SCL		0x4001b8b1
+			MX6QDL_PAD_GPIO_6__I2C3_SDA		0x4001b8b1
+			MX6QDL_PAD_GPIO_19__GPIO4_IO05		0x4001b0b0 /* DIOI2C_DIS# */
+			MX6QDL_PAD_DISP0_DAT18__GPIO5_IO12	0x0001b0b0 /* LVDS_TOUCH_IRQ# */
+			MX6QDL_PAD_DISP0_DAT19__GPIO5_IO13	0x0001b0b0 /* LVDS_BACKEN */
+		>;
+	};
+
+	pinctrl_keypad: keypadgrp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT17__GPIO5_IO11	0x0001b0b0 /* KEYPAD_IRQ# */
+			MX6QDL_PAD_DISP0_DAT9__GPIO4_IO30	0x0001b0b0 /* KEYPAD_LED_EN */
+		>;
+	};
+
+	pinctrl_pcie: pciegrp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT10__GPIO4_IO31	0x1b0b0    /* PCI_RST# */
+			MX6QDL_PAD_GPIO_17__GPIO7_IO12		0x4001b0b0 /* PCIESKT_WDIS# */
+		>;
+	};
+
+	pinctrl_pmic: pmicgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_8__GPIO1_IO08		0x0001b0b0 /* PMIC_IRQ# */
+		>;
+	};
+
+	pinctrl_pps: ppsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_RXD1__GPIO1_IO26	0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm2: pwm2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT2__PWM2_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm3: pwm3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT1__PWM3_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm4: pwm4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_CMD__PWM4_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA	0x1b0b1
+			MX6QDL_PAD_SD3_DAT4__GPIO7_IO01		0x4001b0b1 /* TEN */
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL1__UART5_TX_DATA	0x1b0b1
+			MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_usbh1: usbh1grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_9__GPIO1_IO09		0x4001b0b0 /* USBHUB_RST# */
+		>;
+	};
+
+	pinctrl_usbotg: usbotggrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_1__USB_OTG_ID		0x17059
+			MX6QDL_PAD_EIM_D22__GPIO3_IO22		0x1b0b0 /* PWR_EN */
+			MX6QDL_PAD_KEY_COL4__GPIO4_IO14		0x1b0b0 /* OC */
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD2_CMD__SD2_CMD		0x170f9
+			MX6QDL_PAD_SD2_CLK__SD2_CLK		0x100f9
+			MX6QDL_PAD_SD2_DAT0__SD2_DATA0		0x170f9
+			MX6QDL_PAD_SD2_DAT1__SD2_DATA1		0x170f9
+			MX6QDL_PAD_SD2_DAT2__SD2_DATA2		0x170f9
+			MX6QDL_PAD_SD2_DAT3__SD2_DATA3		0x170f9
+			MX6QDL_PAD_NANDF_D4__SD2_DATA4		0x170f9
+			MX6QDL_PAD_NANDF_D5__SD2_DATA5		0x170f9
+			MX6QDL_PAD_NANDF_D6__SD2_DATA6		0x170f9
+			MX6QDL_PAD_NANDF_D7__SD2_DATA7		0x170f9
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x17059
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x10059
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x17059
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17059
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17059
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17059
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x17059 /* CD */
+			MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x17059
+		>;
+	};
+
+	pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170b9
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100b9
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170b9
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170b9
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170b9
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170b9
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170b9 /* CD */
+			MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170b9
+		>;
+	};
+
+	pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170f9
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100f9
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170f9
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170f9
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170f9
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170f9
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170f9 /* CD */
+			MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170f9
+		>;
+	};
+
+	pinctrl_wdog: wdoggrp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT8__WDOG1_B		0x1b0b0
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi b/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
index a9b2077..900e8c7 100644
--- a/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
@@ -46,7 +46,7 @@
 	pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
 	phy-mode = "rgmii";
 	phy-reset-duration = <2>;
-	phy-reset-gpios = <&gpio4 15 0>;
+	phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index bad3c9f..b63134e 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -408,7 +408,7 @@
 	};
 
 	rtc: rtc@68 {
-		compatible = "st,rv4162";
+		compatible = "microcrystal,rv4162";
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_rv4162>;
 		reg = <0x68>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
index 559da17..aeaa5a6 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
@@ -326,7 +326,7 @@
 	};
 
 	rtc@68 {
-		compatible = "st,rv4162";
+		compatible = "microcrystal,rv4162";
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_rv4162>;
 		reg = <0x68>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index 70772eb..f22e587 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -271,7 +271,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
-	phy-reset-gpios = <&gpio1 27 0>;
+	phy-reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
 	txen-skew-ps = <0>;
 	txc-skew-ps = <3000>;
 	rxdv-skew-ps = <0>;
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 6e5cb6a..d81b007 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -374,7 +374,7 @@
 &pcie {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_pcie>;
-	reset-gpio = <&gpio4 17 0>;
+	reset-gpio = <&gpio4 17 GPIO_ACTIVE_LOW>;
 	status = "disabled";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index a2a714d..6a7594e 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -108,6 +108,76 @@
 		default-brightness-level = <7>;
 		status = "okay";
 	};
+
+	i2cmux {
+		compatible = "i2c-mux-gpio";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_i2c3mux>;
+		mux-gpios = <&gpio5 4 0>;
+		i2c-parent = <&i2c3>;
+		idle-state = <0>;
+
+		i2c@1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <1>;
+
+			adv7180: camera@21 {
+				compatible = "adi,adv7180";
+				reg = <0x21>;
+				powerdown-gpios = <&max7310_b 2 GPIO_ACTIVE_LOW>;
+				interrupt-parent = <&gpio1>;
+				interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+
+				port {
+					adv7180_to_ipu1_csi0_mux: endpoint {
+						remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>;
+						bus-width = <8>;
+					};
+				};
+			};
+
+			max7310_a: gpio@30 {
+				compatible = "maxim,max7310";
+				reg = <0x30>;
+				gpio-controller;
+				#gpio-cells = <2>;
+			};
+
+			max7310_b: gpio@32 {
+				compatible = "maxim,max7310";
+				reg = <0x32>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_max7310>;
+				reset-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+			};
+
+			max7310_c: gpio@34 {
+				compatible = "maxim,max7310";
+				reg = <0x34>;
+				gpio-controller;
+				#gpio-cells = <2>;
+			};
+		};
+	};
+};
+
+&ipu1_csi0_from_ipu1_csi0_mux {
+	bus-width = <8>;
+};
+
+&ipu1_csi0_mux_from_parallel_sensor {
+	remote-endpoint = <&adv7180_to_ipu1_csi0_mux>;
+	bus-width = <8>;
+};
+
+&ipu1_csi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ipu1_csi0>;
 };
 
 &clks {
@@ -290,27 +360,6 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_i2c3>;
 	status = "okay";
-
-	max7310_a: gpio@30 {
-		compatible = "maxim,max7310";
-		reg = <0x30>;
-		gpio-controller;
-		#gpio-cells = <2>;
-	};
-
-	max7310_b: gpio@32 {
-		compatible = "maxim,max7310";
-		reg = <0x32>;
-		gpio-controller;
-		#gpio-cells = <2>;
-	};
-
-	max7310_c: gpio@34 {
-		compatible = "maxim,max7310";
-		reg = <0x34>;
-		gpio-controller;
-		#gpio-cells = <2>;
-	};
 };
 
 &iomuxc {
@@ -418,12 +467,52 @@
 			>;
 		};
 
+		pinctrl_i2c3mux: i2c3muxgrp {
+			fsl,pins = <
+				MX6QDL_PAD_EIM_A24__GPIO5_IO04 0x0b0b1
+			>;
+		};
+
+		pinctrl_ipu1_csi0: ipu1csi0grp {
+			fsl,pins = <
+				MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12  0x1b0b0
+				MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13  0x1b0b0
+				MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14  0x1b0b0
+				MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15  0x1b0b0
+				MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16  0x1b0b0
+				MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17  0x1b0b0
+				MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18  0x1b0b0
+				MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19  0x1b0b0
+				MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x1b0b0
+				MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC    0x1b0b0
+				MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC   0x1b0b0
+			>;
+		};
+
+		pinctrl_max7310: max7310grp {
+			fsl,pins = <
+				MX6QDL_PAD_SD2_DAT0__GPIO1_IO15 0x1b0b0
+			>;
+		};
+
 		pinctrl_pwm3: pwm1grp {
 			fsl,pins = <
 				MX6QDL_PAD_SD4_DAT1__PWM3_OUT		0x1b0b1
 			>;
 		};
 
+		pinctrl_gpt_input_capture0: gptinputcapture0grp {
+			fsl,pins = <
+				MX6QDL_PAD_SD1_DAT0__GPT_CAPTURE1	0x1b0b0
+			>;
+		};
+
+		pinctrl_gpt_input_capture1: gptinputcapture1grp {
+			fsl,pins = <
+				MX6QDL_PAD_SD1_DAT1__GPT_CAPTURE2	0x1b0b0
+			>;
+		};
+
 		pinctrl_spdif: spdifgrp {
 			fsl,pins = <
 				MX6QDL_PAD_KEY_COL3__SPDIF_IN 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index 8413179..afe7449 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -39,6 +39,8 @@
  *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
+
+#include <dt-bindings/clock/imx6qdl-clock.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
 
@@ -94,6 +96,42 @@
 			pinctrl-0 = <&pinctrl_can_xcvr>;
 			gpio = <&gpio1 2 GPIO_ACTIVE_LOW>;
 		};
+
+		reg_1p5v: regulator@4 {
+			compatible = "regulator-fixed";
+			reg = <4>;
+			regulator-name = "1P5V";
+			regulator-min-microvolt = <1500000>;
+			regulator-max-microvolt = <1500000>;
+			regulator-always-on;
+		};
+
+		reg_1p8v: regulator@5 {
+			compatible = "regulator-fixed";
+			reg = <5>;
+			regulator-name = "1P8V";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-always-on;
+		};
+
+		reg_2p8v: regulator@6 {
+			compatible = "regulator-fixed";
+			reg = <6>;
+			regulator-name = "2P8V";
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2800000>;
+			regulator-always-on;
+		};
+	};
+
+	mipi_xclk: mipi_xclk {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <22000000>;
+		clock-output-names = "mipi_pwm3";
+		pwms = <&pwm3 0 45>; /* 1 / 45 ns = 22 MHz */
+		status = "okay";
 	};
 
 	gpio-keys {
@@ -220,6 +258,22 @@
 	};
 };
 
+&ipu1_csi0_from_ipu1_csi0_mux {
+	bus-width = <8>;
+	data-shift = <12>; /* Lines 19:12 used */
+	hsync-active = <1>;
+	vync-active = <1>;
+};
+
+&ipu1_csi0_mux_from_parallel_sensor {
+	remote-endpoint = <&ov5642_to_ipu1_csi0_mux>;
+};
+
+&ipu1_csi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ipu1_csi0>;
+};
+
 &audmux {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_audmux>;
@@ -270,9 +324,6 @@
 	txd1-skew-ps = <0>;
 	txd2-skew-ps = <0>;
 	txd3-skew-ps = <0>;
-	interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
-			      <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
-	fsl,err006687-workaround-present;
 	status = "okay";
 };
 
@@ -301,6 +352,53 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_i2c2>;
 	status = "okay";
+
+	ov5640: camera@40 {
+		compatible = "ovti,ov5640";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_ov5640>;
+		reg = <0x40>;
+		clocks = <&mipi_xclk>;
+		clock-names = "xclk";
+		DOVDD-supply = <&reg_1p8v>;
+		AVDD-supply = <&reg_2p8v>;
+		DVDD-supply = <&reg_1p5v>;
+		reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>; /* NANDF_D5 */
+		powerdown-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>; /* NANDF_WP_B */
+
+		port {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			ov5640_to_mipi_csi2: endpoint {
+				remote-endpoint = <&mipi_csi2_in>;
+				clock-lanes = <0>;
+				data-lanes = <1 2>;
+			};
+		};
+	};
+
+	ov5642: camera@42 {
+		compatible = "ovti,ov5642";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_ov5642>;
+		clocks = <&clks IMX6QDL_CLK_CKO2>;
+		clock-names = "xclk";
+		reg = <0x42>;
+		reset-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+		powerdown-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+		gp-gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>;
+		status = "disabled";
+
+		port {
+			ov5642_to_ipu1_csi0_mux: endpoint {
+				remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>;
+				bus-width = <8>;
+				hsync-active = <1>;
+				vsync-active = <1>;
+			};
+		};
+	};
 };
 
 &i2c3 {
@@ -373,7 +471,6 @@
 				MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x1b030
 				/* Phy reset */
 				MX6QDL_PAD_EIM_D23__GPIO3_IO23		0x000b0
-				MX6QDL_PAD_GPIO_6__ENET_IRQ		0x000b1
 			>;
 		};
 
@@ -415,6 +512,23 @@
 			>;
 		};
 
+		pinctrl_ipu1_csi0: ipu1csi0grp {
+			fsl,pins = <
+				MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19    0x1b0b0
+				MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK   0x1b0b0
+				MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC      0x1b0b0
+				MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC     0x1b0b0
+				MX6QDL_PAD_CSI0_DATA_EN__IPU1_CSI0_DATA_EN 0x1b0b0
+			>;
+		};
+
 		pinctrl_j15: j15grp {
 			fsl,pins = <
 				MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
@@ -448,6 +562,22 @@
 			>;
 		};
 
+		pinctrl_ov5640: ov5640grp {
+			fsl,pins = <
+				MX6QDL_PAD_NANDF_D5__GPIO2_IO05   0x000b0
+				MX6QDL_PAD_NANDF_WP_B__GPIO6_IO09 0x0b0b0
+			>;
+		};
+
+		pinctrl_ov5642: ov5642grp {
+			fsl,pins = <
+				MX6QDL_PAD_SD1_DAT0__GPIO1_IO16 0x1b0b0
+				MX6QDL_PAD_GPIO_6__GPIO1_IO06   0x1b0b0
+				MX6QDL_PAD_GPIO_8__GPIO1_IO08   0x130b0
+				MX6QDL_PAD_GPIO_3__CCM_CLKO2    0x000b0
+			>;
+		};
+
 		pinctrl_pwm1: pwm1grp {
 			fsl,pins = <
 				MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
@@ -602,3 +732,17 @@
 	vmmc-supply = <&reg_3p3v>;
 	status = "okay";
 };
+
+&mipi_csi {
+	status = "okay";
+
+	port@0 {
+		reg = <0>;
+
+		mipi_csi2_in: endpoint {
+			remote-endpoint = <&ov5640_to_mipi_csi2>;
+			clock-lanes = <0>;
+			data-lanes = <1 2>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index 58055ce..b72b6fa 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -10,6 +10,7 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <dt-bindings/clock/imx6qdl-clock.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
 
@@ -146,6 +147,36 @@
 	};
 };
 
+&ipu1_csi0_from_ipu1_csi0_mux {
+	bus-width = <8>;
+	data-shift = <12>; /* Lines 19:12 used */
+	hsync-active = <1>;
+	vsync-active = <1>;
+};
+
+&ipu1_csi0_mux_from_parallel_sensor {
+	remote-endpoint = <&ov5642_to_ipu1_csi0_mux>;
+};
+
+&ipu1_csi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ipu1_csi0>;
+};
+
+&mipi_csi {
+	status = "okay";
+
+	port@0 {
+		reg = <0>;
+
+		mipi_csi2_in: endpoint {
+			remote-endpoint = <&ov5640_to_mipi_csi2>;
+			clock-lanes = <0>;
+			data-lanes = <1 2>;
+		};
+	};
+};
+
 &audmux {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_audmux>;
@@ -178,7 +209,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
-	phy-reset-gpios = <&gpio1 25 0>;
+	phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
@@ -213,7 +244,32 @@
 			0x8014 /* 4:FN_DMICCDAT */
 			0x0000 /* 5:Default */
 		>;
-       };
+	};
+
+	ov5642: camera@3c {
+		compatible = "ovti,ov5642";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_ov5642>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
+		clock-names = "xclk";
+		reg = <0x3c>;
+		DOVDD-supply = <&vgen4_reg>; /* 1.8v */
+		AVDD-supply = <&vgen3_reg>;  /* 2.8v, rev C board is VGEN3
+						rev B board is VGEN5 */
+		DVDD-supply = <&vgen2_reg>;  /* 1.5v*/
+		powerdown-gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>;
+		reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+		status = "disabled";
+
+		port {
+			ov5642_to_ipu1_csi0_mux: endpoint {
+				remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>;
+				bus-width = <8>;
+				hsync-active = <1>;
+				vsync-active = <1>;
+			};
+		};
+	};
 };
 
 &i2c2 {
@@ -222,6 +278,32 @@
 	pinctrl-0 = <&pinctrl_i2c2>;
 	status = "okay";
 
+	ov5640: camera@3c {
+		compatible = "ovti,ov5640";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_ov5640>;
+		reg = <0x3c>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
+		clock-names = "xclk";
+		DOVDD-supply = <&vgen4_reg>; /* 1.8v */
+		AVDD-supply = <&vgen3_reg>;  /* 2.8v, rev C board is VGEN3
+						rev B board is VGEN5 */
+		DVDD-supply = <&vgen2_reg>;  /* 1.5v*/
+		powerdown-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
+		reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+
+		port {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			ov5640_to_mipi_csi2: endpoint {
+				remote-endpoint = <&mipi_csi2_in>;
+				clock-lanes = <0>;
+				data-lanes = <1 2>;
+			};
+		};
+	};
+
 	pmic: pfuze100@08 {
 		compatible = "fsl,pfuze100";
 		reg = <0x08>;
@@ -425,6 +507,36 @@
 			>;
 		};
 
+		pinctrl_ipu1_csi0: ipu1csi0grp {
+			fsl,pins = <
+				MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18    0x1b0b0
+				MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19    0x1b0b0
+				MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK   0x1b0b0
+				MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC      0x1b0b0
+				MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC     0x1b0b0
+			>;
+		};
+
+		pinctrl_ov5640: ov5640grp {
+			fsl,pins = <
+				MX6QDL_PAD_SD1_DAT2__GPIO1_IO19 0x1b0b0
+				MX6QDL_PAD_SD1_CLK__GPIO1_IO20  0x1b0b0
+			>;
+		};
+
+		pinctrl_ov5642: ov5642grp {
+			fsl,pins = <
+				MX6QDL_PAD_SD1_DAT0__GPIO1_IO16 0x1b0b0
+				MX6QDL_PAD_SD1_DAT1__GPIO1_IO17 0x1b0b0
+			>;
+		};
+
 		pinctrl_pcie: pciegrp {
 			fsl,pins = <
 				MX6QDL_PAD_GPIO_17__GPIO7_IO12	0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
index 1691714..c6bec97 100644
--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
@@ -133,8 +133,7 @@
 		regulator-max-microvolt = <3300000>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_flexcan_xcvr>;
-		gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
-		enable-active-low;
+		gpio = <&gpio4 21 GPIO_ACTIVE_LOW>;
 	};
 
 	reg_lcd0_pwr: regulator-lcd0-pwr {
@@ -249,7 +248,7 @@
 		 <&clks IMX6QDL_CLK_ENET_REF>;
 	clock-names = "ipg", "ahb", "ptp", "enet_out";
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_LOW>;
 	phy-handle = <&etnphy>;
 	phy-supply = <&reg_3v3_etn>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 82dc5744..b4fa7f1 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -88,6 +88,7 @@
 		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
+		lrclk-strength = <3>;
 	};
 };
 
@@ -208,7 +209,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
-	phy-reset-gpios = <&gpio3 29 0>;
+	phy-reset-gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
 	interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
 			      <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
 	fsl,err006687-workaround-present;
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e426faa..a9723b9 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -156,7 +156,7 @@
 				 <&clks IMX6QDL_CLK_GPU3D_CORE>,
 				 <&clks IMX6QDL_CLK_GPU3D_SHADER>;
 			clock-names = "bus", "core", "shader";
-			power-domains = <&gpc 1>;
+			power-domains = <&pd_pu>;
 		};
 
 		gpu_2d: gpu@00134000 {
@@ -166,7 +166,7 @@
 			clocks = <&clks IMX6QDL_CLK_GPU2D_AXI>,
 				 <&clks IMX6QDL_CLK_GPU2D_CORE>;
 			clock-names = "bus", "core";
-			power-domains = <&gpc 1>;
+			power-domains = <&pd_pu>;
 		};
 
 		timer@00a00600 {
@@ -434,7 +434,7 @@
 				clocks = <&clks IMX6QDL_CLK_VPU_AXI>,
 					 <&clks IMX6QDL_CLK_MMDC_CH0_AXI>;
 				clock-names = "per", "ahb";
-				power-domains = <&gpc 1>;
+				power-domains = <&pd_pu>;
 				resets = <&src 1>;
 				iram = <&ocram>;
 			};
@@ -644,6 +644,7 @@
 					anatop-min-bit-val = <4>;
 					anatop-min-voltage = <800000>;
 					anatop-max-voltage = <1375000>;
+					anatop-enable-bit = <0>;
 				};
 
 				regulator-3p0 {
@@ -658,6 +659,7 @@
 					anatop-min-bit-val = <0>;
 					anatop-min-voltage = <2625000>;
 					anatop-max-voltage = <3400000>;
+					anatop-enable-bit = <0>;
 				};
 
 				regulator-2p5 {
@@ -672,6 +674,7 @@
 					anatop-min-bit-val = <0>;
 					anatop-min-voltage = <2100000>;
 					anatop-max-voltage = <2875000>;
+					anatop-enable-bit = <0>;
 				};
 
 				reg_arm: regulator-vddcore {
@@ -797,19 +800,39 @@
 				interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>,
 					     <0 90 IRQ_TYPE_LEVEL_HIGH>;
 				interrupt-parent = <&intc>;
-				pu-supply = <&reg_pu>;
-				clocks = <&clks IMX6QDL_CLK_GPU3D_CORE>,
-					 <&clks IMX6QDL_CLK_GPU3D_SHADER>,
-					 <&clks IMX6QDL_CLK_GPU2D_CORE>,
-					 <&clks IMX6QDL_CLK_GPU2D_AXI>,
-					 <&clks IMX6QDL_CLK_OPENVG_AXI>,
-					 <&clks IMX6QDL_CLK_VPU_AXI>;
-				#power-domain-cells = <1>;
+				clocks = <&clks IMX6QDL_CLK_IPG>;
+				clock-names = "ipg";
+
+				pgc {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					power-domain@0 {
+						reg = <0>;
+						#power-domain-cells = <0>;
+					};
+					pd_pu: power-domain@1 {
+						reg = <1>;
+						#power-domain-cells = <0>;
+						power-supply = <&reg_pu>;
+						clocks = <&clks IMX6QDL_CLK_GPU3D_CORE>,
+						         <&clks IMX6QDL_CLK_GPU3D_SHADER>,
+						         <&clks IMX6QDL_CLK_GPU2D_CORE>,
+						         <&clks IMX6QDL_CLK_GPU2D_AXI>,
+						         <&clks IMX6QDL_CLK_OPENVG_AXI>,
+						         <&clks IMX6QDL_CLK_VPU_AXI>;
+					};
+				};
 			};
 
 			gpr: iomuxc-gpr@020e0000 {
-				compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
+				compatible = "fsl,imx6q-iomuxc-gpr", "syscon", "simple-mfd";
 				reg = <0x020e0000 0x38>;
+
+				mux: mux-controller {
+					compatible = "mmio-mux";
+					#mux-control-cells = <1>;
+				};
 			};
 
 			iomuxc: iomuxc@020e0000 {
@@ -1135,7 +1158,16 @@
 			};
 
 			mipi_csi: mipi@021dc000 {
+				compatible = "fsl,imx6-mipi-csi2";
 				reg = <0x021dc000 0x4000>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 100 0x04>, <0 101 0x04>;
+				clocks = <&clks IMX6QDL_CLK_HSI_TX>,
+					 <&clks IMX6QDL_CLK_VIDEO_27M>,
+					 <&clks IMX6QDL_CLK_EIM_PODF>;
+				clock-names = "dphy", "ref", "pix";
+				status = "disabled";
 			};
 
 			mipi_dsi: mipi@021e0000 {
@@ -1237,6 +1269,10 @@
 
 			ipu1_csi0: port@0 {
 				reg = <0>;
+
+				ipu1_csi0_from_ipu1_csi0_mux: endpoint {
+					remote-endpoint = <&ipu1_csi0_mux_to_ipu1_csi0>;
+				};
 			};
 
 			ipu1_csi1: port@1 {
diff --git a/arch/arm/boot/dts/imx6qp-zii-rdu2.dts b/arch/arm/boot/dts/imx6qp-zii-rdu2.dts
index 882b3bd..547a766 100644
--- a/arch/arm/boot/dts/imx6qp-zii-rdu2.dts
+++ b/arch/arm/boot/dts/imx6qp-zii-rdu2.dts
@@ -41,8 +41,8 @@
 
 /dts-v1/;
 
-#include <imx6qp.dtsi>
-#include <imx6qdl-zii-rdu2.dtsi>
+#include "imx6qp.dtsi"
+#include "imx6qdl-zii-rdu2.dtsi"
 
 / {
 	model = "ZII RDU2+ Board";
diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi
index 59453f2..299d863 100644
--- a/arch/arm/boot/dts/imx6qp.dtsi
+++ b/arch/arm/boot/dts/imx6qp.dtsi
@@ -120,6 +120,10 @@
 		     <0 119 IRQ_TYPE_LEVEL_HIGH>;
 };
 
+&gpc {
+	compatible = "fsl,imx6qp-gpc", "fsl,imx6q-gpc";
+};
+
 &ipu1 {
 	compatible = "fsl,imx6qp-ipu", "fsl,imx6q-ipu";
 	fsl,prg = <&prg1>;
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index cc9572e..3243af4 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -530,6 +530,7 @@
 					anatop-min-bit-val = <4>;
 					anatop-min-voltage = <800000>;
 					anatop-max-voltage = <1375000>;
+					anatop-enable-bit = <0>;
 				};
 
 				regulator-3p0 {
@@ -544,6 +545,7 @@
 					anatop-min-bit-val = <0>;
 					anatop-min-voltage = <2625000>;
 					anatop-max-voltage = <3400000>;
+					anatop-enable-bit = <0>;
 				};
 
 				regulator-2p5 {
@@ -558,6 +560,7 @@
 					anatop-min-bit-val = <0>;
 					anatop-min-voltage = <2100000>;
 					anatop-max-voltage = <2850000>;
+					anatop-enable-bit = <0>;
 				};
 
 				reg_arm: regulator-vddcore {
diff --git a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
index 802da45..c5578d1 100644
--- a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
+++ b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
@@ -296,7 +296,7 @@
 &pcie {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_pcie>;
-	reset-gpio = <&gpio4 10 GPIO_ACTIVE_HIGH>;
+	reset-gpio = <&gpio4 10 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
@@ -374,7 +374,7 @@
 	cap-sdio-irq;
 	status = "okay";
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&gpio7>;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index d71da30..c0139d7 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -128,3 +128,11 @@
 		reg = <1>;
 	};
 };
+
+&reg_arm {
+	vin-supply = <&sw1a_reg>;
+};
+
+&reg_soc {
+	vin-supply = <&sw1a_reg>;
+};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 3f1416b..f16b9df 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -587,6 +587,7 @@
 					anatop-min-bit-val = <4>;
 					anatop-min-voltage = <800000>;
 					anatop-max-voltage = <1375000>;
+					anatop-enable-bit = <0>;
 				};
 
 				regulator-3p0 {
@@ -601,6 +602,7 @@
 					anatop-min-bit-val = <0>;
 					anatop-min-voltage = <2625000>;
 					anatop-max-voltage = <3400000>;
+					anatop-enable-bit = <0>;
 				};
 
 				regulator-2p5 {
@@ -615,6 +617,7 @@
 					anatop-min-bit-val = <0>;
 					anatop-min-voltage = <2100000>;
 					anatop-max-voltage = <2875000>;
+					anatop-enable-bit = <0>;
 				};
 
 				reg_arm: regulator-vddcore {
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index f18e1f1..d2be8aa 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -120,10 +120,16 @@
 
 		ethphy0: ethernet-phy@2 {
 			reg = <2>;
+			micrel,led-mode = <1>;
+			clocks = <&clks IMX6UL_CLK_ENET_REF>;
+			clock-names = "rmii-ref";
 		};
 
 		ethphy1: ethernet-phy@1 {
 			reg = <1>;
+			micrel,led-mode = <1>;
+			clocks = <&clks IMX6UL_CLK_ENET2_REF>;
+			clock-names = "rmii-ref";
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/imx6ul-opos6ul.dtsi b/arch/arm/boot/dts/imx6ul-opos6ul.dtsi
index 51095df..aec5ccc 100644
--- a/arch/arm/boot/dts/imx6ul-opos6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul-opos6ul.dtsi
@@ -120,7 +120,7 @@
 	#address-cells = <1>;
 	#size-cells = <0>;
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		compatible = "brcm,bcm4329-fmac";
 		reg = <1>;
 		interrupt-parent = <&gpio2>;
diff --git a/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts b/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts
index 7c5dd1b..28d055e 100644
--- a/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts
+++ b/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts
@@ -78,7 +78,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet2 &pinctrl_enet2_mdio &pinctrl_etnphy1_rst>;
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio4 28 GPIO_ACTIVE_LOW>;
 	phy-supply = <&reg_3v3_etn>;
 	phy-handle = <&etnphy1>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
index c784a0b..ec745eb 100644
--- a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
@@ -173,8 +173,7 @@
 		regulator-max-microvolt = <3300000>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_flexcan_xcvr>;
-		gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>;
-		enable-active-low;
+		gpio = <&gpio3 5 GPIO_ACTIVE_LOW>;
 	};
 
 	reg_lcd_pwr: regulator-lcdpwr {
@@ -308,7 +307,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet1 &pinctrl_enet1_mdio &pinctrl_etnphy0_rst>;
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio5 6 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio5 6 GPIO_ACTIVE_LOW>;
 	phy-supply = <&reg_3v3_etn>;
 	phy-handle = <&etnphy0>;
 	status = "okay";
@@ -343,7 +342,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet2 &pinctrl_etnphy1_rst>;
 	phy-mode = "rmii";
-	phy-reset-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio4 28 GPIO_ACTIVE_LOW>;
 	phy-supply = <&reg_3v3_etn>;
 	phy-handle = <&etnphy1>;
 	status = "disabled";
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index b9d7d2d..6da2b77 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -542,6 +542,7 @@
 					anatop-min-bit-val = <0>;
 					anatop-min-voltage = <2625000>;
 					anatop-max-voltage = <3400000>;
+					anatop-enable-bit = <0>;
 				};
 
 				reg_arm: regulator-vddcore {
diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
index 2d87489..d7753f7 100644
--- a/arch/arm/boot/dts/imx7-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri.dtsi
@@ -43,7 +43,7 @@
 / {
 	bl: backlight {
 		compatible = "pwm-backlight";
-		pwms = <&pwm1 0 5000000>;
+		pwms = <&pwm1 0 5000000 0>;
 	};
 
 	reg_module_3v3: regulator-module-3v3 {
diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
index 5d98e2b..e799830 100644
--- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
+++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
@@ -67,7 +67,7 @@
 
 	backlight-j20 {
 		compatible = "pwm-backlight";
-		pwms = <&pwm1 0 5000000>;
+		pwms = <&pwm1 0 5000000 0>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
 		default-brightness-level = <6>;
 		status = "okay";
@@ -279,7 +279,7 @@
 	status = "okay";
 
 	rtc@68 {
-		compatible = "rv4162";
+		compatible = "microcrystal,rv4162";
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_i2c2_rv4162>;
 		reg = <0x68>;
@@ -738,7 +738,7 @@
 
 	pinctrl_wdog1: wdog1grp {
 		fsl,pins = <
-			MX7D_PAD_LPSR_GPIO1_IO00__WDOD1_WDOG_B	0x75
+			MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B	0x75
 		>;
 	};
 };
diff --git a/arch/arm/boot/dts/imx7d-pico.dts b/arch/arm/boot/dts/imx7d-pico.dts
new file mode 100644
index 0000000..e78c2c9
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-pico.dts
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2017 NXP
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx7d.dtsi"
+
+/ {
+	model = "Technexion Pico i.MX7D Board";
+	compatible = "technexion,imx7d-pico", "fsl,imx7d";
+
+	memory {
+		reg = <0x80000000 0x80000000>;
+	};
+
+	reg_2p5v: regulator-2p5v {
+		compatible = "regulator-fixed";
+		regulator-name = "2P5V";
+		regulator-min-microvolt = <2500000>;
+		regulator-max-microvolt = <2500000>;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_usb_otg1_vbus: regulator-usb-otg1-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg1_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio4 5 GPIO_ACTIVE_LOW>;
+	};
+
+	reg_usb_otg2_vbus: regulator-usb-otg2-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg2_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+	};
+
+	reg_vref_1v8: regulator-vref-1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "vref-1v8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "imx7-sgtl5000";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,bitclock-master = <&dailink_master>;
+		simple-audio-card,frame-master = <&dailink_master>;
+		simple-audio-card,cpu {
+			sound-dai = <&sai1>;
+		};
+
+		dailink_master: simple-audio-card,codec {
+			sound-dai = <&codec>;
+			clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
+		};
+	};
+};
+
+&fec1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet1>;
+	assigned-clocks = <&clks IMX7D_ENET1_TIME_ROOT_SRC>,
+			  <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
+	assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+	assigned-clock-rates = <0>, <100000000>;
+	phy-mode = "rgmii";
+	phy-handle = <&ethphy0>;
+	fsl,magic-packet;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethphy0: ethernet-phy@1 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <1>;
+			status = "okay";
+		};
+	};
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	codec: sgtl5000@0a {
+		#sound-dai-cells = <0>;
+		reg = <0x0a>;
+		compatible = "fsl,sgtl5000";
+		clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
+		VDDA-supply = <&reg_2p5v>;
+		VDDIO-supply = <&reg_vref_1v8>;
+	};
+};
+
+&i2c4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c4>;
+	status = "okay";
+
+	pmic: pfuze3000@08 {
+		compatible = "fsl,pfuze3000";
+		reg = <0x08>;
+
+		regulators {
+			sw1a_reg: sw1a {
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <6250>;
+			};
+			/* use sw1c_reg to align with pfuze100/pfuze200 */
+			sw1c_reg: sw1b {
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1475000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <6250>;
+			};
+
+			sw2_reg: sw2 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1850000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			sw3a_reg: sw3 {
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1650000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			swbst_reg: swbst {
+				regulator-min-microvolt = <5000000>;
+				regulator-max-microvolt = <5150000>;
+			};
+
+			snvs_reg: vsnvs {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			vref_reg: vrefddr {
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			vgen1_reg: vldo1 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen2_reg: vldo2 {
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1550000>;
+			};
+
+			vgen3_reg: vccsd {
+				regulator-min-microvolt = <2850000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen4_reg: v33 {
+				regulator-min-microvolt = <2850000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen5_reg: vldo3 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen6_reg: vldo4 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&sai1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_sai1>;
+	assigned-clocks = <&clks IMX7D_SAI1_ROOT_SRC>,
+			  <&clks IMX7D_SAI1_ROOT_CLK>;
+	assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>;
+	assigned-clock-rates = <0>, <24576000>;
+	status = "okay";
+};
+
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	assigned-clocks = <&clks IMX7D_UART5_ROOT_SRC>;
+	assigned-clock-parents = <&clks IMX7D_PLL_SYS_MAIN_240M_CLK>;
+	status = "okay";
+};
+
+&usbotg1 {
+	vbus-supply = <&reg_usb_otg1_vbus>;
+	status = "okay";
+};
+
+&usbotg2 {
+	vbus-supply = <&reg_usb_otg2_vbus>;
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usdhc3 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+	assigned-clocks = <&clks IMX7D_USDHC3_ROOT_CLK>;
+	assigned-clock-rates = <400000000>;
+	bus-width = <8>;
+	no-1-8-v;
+	fsl,tuning-step = <2>;
+	non-removable;
+	status = "okay";
+};
+
+&wdog1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_wdog>;
+	fsl,ext-reset-output;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_enet1: enet1grp {
+		fsl,pins = <
+			MX7D_PAD_SD2_CD_B__ENET1_MDIO			0x3
+			MX7D_PAD_SD2_WP__ENET1_MDC			0x3
+			MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC	0x1
+			MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0	0x1
+			MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1	0x1
+			MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2	0x1
+			MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3	0x1
+			MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL	0x1
+			MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC	0x1
+			MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0	0x1
+			MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1	0x1
+			MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2	0x1
+			MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3	0x1
+			MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL	0x1
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX7D_PAD_UART1_TX_DATA__I2C1_SDA	0x4000007f
+			MX7D_PAD_UART1_RX_DATA__I2C1_SCL	0x4000007f
+		>;
+	};
+
+	pinctrl_i2c4: i2c4grp {
+		fsl,pins = <
+			MX7D_PAD_SAI1_RX_BCLK__I2C4_SDA		0x4000007f
+			MX7D_PAD_SAI1_RX_SYNC__I2C4_SCL		0x4000007f
+		>;
+	};
+
+	pinctrl_sai1: sai1grp {
+		fsl,pins = <
+			MX7D_PAD_ENET1_RX_CLK__SAI1_TX_BCLK	0x1f
+			MX7D_PAD_ENET1_CRS__SAI1_TX_SYNC	0x1f
+			MX7D_PAD_ENET1_COL__SAI1_TX_DATA0	0x30
+			MX7D_PAD_ENET1_TX_CLK__SAI1_RX_DATA0	0x1f
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX7D_PAD_I2C4_SDA__UART5_DCE_TX		0x79
+			MX7D_PAD_I2C4_SCL__UART5_DCE_RX		0x79
+		>;
+	};
+
+	pinctrl_usbotg1_pwr: usbotg_pwr {
+		fsl,pins = <
+			MX7D_PAD_UART3_TX_DATA__GPIO4_IO5	0x14
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX7D_PAD_SD3_CMD__SD3_CMD		0x59
+			MX7D_PAD_SD3_CLK__SD3_CLK		0x19
+			MX7D_PAD_SD3_DATA0__SD3_DATA0		0x59
+			MX7D_PAD_SD3_DATA1__SD3_DATA1		0x59
+			MX7D_PAD_SD3_DATA2__SD3_DATA2		0x59
+			MX7D_PAD_SD3_DATA3__SD3_DATA3		0x59
+			MX7D_PAD_SD3_DATA4__SD3_DATA4		0x59
+			MX7D_PAD_SD3_DATA5__SD3_DATA5		0x59
+			MX7D_PAD_SD3_DATA6__SD3_DATA6		0x59
+			MX7D_PAD_SD3_DATA7__SD3_DATA7		0x59
+		>;
+	};
+
+	pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+		fsl,pins = <
+			MX7D_PAD_SD3_CMD__SD3_CMD		0x5a
+			MX7D_PAD_SD3_CLK__SD3_CLK		0x1a
+			MX7D_PAD_SD3_DATA0__SD3_DATA0		0x5a
+			MX7D_PAD_SD3_DATA1__SD3_DATA1		0x5a
+			MX7D_PAD_SD3_DATA2__SD3_DATA2		0x5a
+			MX7D_PAD_SD3_DATA3__SD3_DATA3		0x5a
+			MX7D_PAD_SD3_DATA4__SD3_DATA4		0x5a
+			MX7D_PAD_SD3_DATA5__SD3_DATA5		0x5a
+			MX7D_PAD_SD3_DATA6__SD3_DATA6		0x5a
+			MX7D_PAD_SD3_DATA7__SD3_DATA7		0x5a
+		>;
+	};
+
+	pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+		fsl,pins = <
+			MX7D_PAD_SD3_CMD__SD3_CMD		0x5b
+			MX7D_PAD_SD3_CLK__SD3_CLK		0x1b
+			MX7D_PAD_SD3_DATA0__SD3_DATA0		0x5b
+			MX7D_PAD_SD3_DATA1__SD3_DATA1		0x5b
+			MX7D_PAD_SD3_DATA2__SD3_DATA2		0x5b
+			MX7D_PAD_SD3_DATA3__SD3_DATA3		0x5b
+			MX7D_PAD_SD3_DATA4__SD3_DATA4		0x5b
+			MX7D_PAD_SD3_DATA5__SD3_DATA5		0x5b
+			MX7D_PAD_SD3_DATA6__SD3_DATA6		0x5b
+			MX7D_PAD_SD3_DATA7__SD3_DATA7		0x5b
+		>;
+	};
+};
+
+&iomuxc_lpsr {
+	pinctrl_wdog: wdoggrp {
+		fsl,pins = <
+			MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B	0x74
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx7d-pinfunc.h b/arch/arm/boot/dts/imx7d-pinfunc.h
index f6f7e78..f2493bc 100644
--- a/arch/arm/boot/dts/imx7d-pinfunc.h
+++ b/arch/arm/boot/dts/imx7d-pinfunc.h
@@ -17,9 +17,9 @@
 
 #define MX7D_PAD_LPSR_GPIO1_IO00__GPIO1_IO0                       0x0000 0x0030 0x0000 0x0 0x0
 #define MX7D_PAD_LPSR_GPIO1_IO00__PWM4_OUT                        0x0000 0x0030 0x0000 0x1 0x0
-#define MX7D_PAD_LPSR_GPIO1_IO00__WDOD1_WDOG_ANY                  0x0000 0x0030 0x0000 0x2 0x0
-#define MX7D_PAD_LPSR_GPIO1_IO00__WDOD1_WDOG_B                    0x0000 0x0030 0x0000 0x3 0x0
-#define MX7D_PAD_LPSR_GPIO1_IO00__WDOD1_WDOG__RST_B_DEB           0x0000 0x0030 0x0000 0x4 0x0
+#define MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_ANY                  0x0000 0x0030 0x0000 0x2 0x0
+#define MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B                    0x0000 0x0030 0x0000 0x3 0x0
+#define MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG__RST_B_DEB           0x0000 0x0030 0x0000 0x4 0x0
 #define MX7D_PAD_LPSR_GPIO1_IO01__GPIO1_IO1                       0x0004 0x0034 0x0000 0x0 0x0
 #define MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT                        0x0004 0x0034 0x0000 0x1 0x0
 #define MX7D_PAD_LPSR_GPIO1_IO01__CCM_ENET_REF_CLK3               0x0004 0x0034 0x0000 0x2 0x0
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 5be01a1..54c4540 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -52,47 +52,70 @@
 		reg = <0x80000000 0x80000000>;
 	};
 
-	regulators {
-		compatible = "simple-bus";
+	spi4 {
+		compatible = "spi-gpio";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_spi4>;
+		gpio-sck = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+		gpio-mosi = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+		cs-gpios = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+		num-chipselects = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		reg_usb_otg1_vbus: regulator@0 {
-			compatible = "regulator-fixed";
+		extended_io: gpio-expander@0 {
+			compatible = "fairchild,74hc595";
+			gpio-controller;
+			#gpio-cells = <2>;
 			reg = <0>;
-			regulator-name = "usb_otg1_vbus";
-			regulator-min-microvolt = <5000000>;
-			regulator-max-microvolt = <5000000>;
-			gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
-			enable-active-high;
+			registers-number = <1>;
+			spi-max-frequency = <100000>;
 		};
+	};
 
-		reg_usb_otg2_vbus: regulator@1 {
-			compatible = "regulator-fixed";
-			reg = <1>;
-			regulator-name = "usb_otg2_vbus";
-			regulator-min-microvolt = <5000000>;
-			regulator-max-microvolt = <5000000>;
-			gpio = <&gpio4 7 GPIO_ACTIVE_HIGH>;
-			enable-active-high;
-		};
+	reg_usb_otg1_vbus: regulator-usb-otg1-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg1_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
 
-		reg_can2_3v3: regulator@2 {
-			compatible = "regulator-fixed";
-			reg = <2>;
-			regulator-name = "can2-3v3";
-			regulator-min-microvolt = <3300000>;
-			regulator-max-microvolt = <3300000>;
-			gpio = <&gpio1 7 GPIO_ACTIVE_LOW>;
-		};
+	reg_usb_otg2_vbus: regulator-usb-otg1-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg2_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio4 7 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
 
-		reg_vref_1v8: regulator@3 {
-			compatible = "regulator-fixed";
-			reg = <3>;
-			regulator-name = "vref-1v8";
-			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <1800000>;
-		};
+	reg_can2_3v3: regulator-can2-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "can2-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio1 7 GPIO_ACTIVE_LOW>;
+	};
+
+	reg_vref_1v8: regulator-vref-1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "vref-1v8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	reg_brcm: regulator-brcm {
+		compatible = "regulator-fixed";
+		gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+		regulator-name = "brcm_reg";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_brcm_reg>;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <200000>;
 	};
 };
 
@@ -328,6 +351,11 @@
 	};
 };
 
+&pcie {
+	reset-gpio = <&extended_io 1 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
 &pwm1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_pwm1>;
@@ -342,6 +370,15 @@
 	status = "okay";
 };
 
+&uart6 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart6>;
+	assigned-clocks = <&clks IMX7D_UART6_ROOT_SRC>;
+	assigned-clock-parents = <&clks IMX7D_PLL_SYS_MAIN_240M_CLK>;
+	uart-has-rtscts;
+	status = "okay";
+};
+
 &usbotg1 {
 	vbus-supply = <&reg_usb_otg1_vbus>;
 	status = "okay";
@@ -363,6 +400,19 @@
 	status = "okay";
 };
 
+&usdhc2 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+	wakeup-source;
+	keep-power-in-suspend;
+	non-removable;
+	vmmc-supply = <&reg_brcm>;
+	fsl,tuning-step = <2>;
+	status = "okay";
+};
+
 &usdhc3 {
 	pinctrl-names = "default", "state_100mhz", "state_200mhz";
 	pinctrl-0 = <&pinctrl_usdhc3>;
@@ -387,6 +437,12 @@
 	pinctrl-0 = <&pinctrl_hog>;
 
 	imx7d-sdb {
+		pinctrl_brcm_reg: brcmreggrp {
+			fsl,pins = <
+				MX7D_PAD_ECSPI2_MOSI__GPIO4_IO21	0x14
+			>;
+		};
+
 		pinctrl_ecspi3: ecspi3grp {
 			fsl,pins = <
 				MX7D_PAD_SAI2_TX_SYNC__ECSPI3_MISO	0x2
@@ -554,7 +610,6 @@
 				MX7D_PAD_SD2_DATA1__SD2_DATA1		0x59
 				MX7D_PAD_SD2_DATA2__SD2_DATA2		0x59
 				MX7D_PAD_SD2_DATA3__SD2_DATA3		0x59
-				MX7D_PAD_ECSPI2_MOSI__GPIO4_IO21	0x59 /* WL_REG_ON */
 			>;
 		};
 
@@ -634,7 +689,7 @@
 &iomuxc_lpsr {
 	pinctrl_wdog: wdoggrp {
 		fsl,pins = <
-			MX7D_PAD_LPSR_GPIO1_IO00__WDOD1_WDOG_B		0x74
+			MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B		0x74
 		>;
 	};
 
@@ -642,5 +697,13 @@
 		fsl,pins = <
 			MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT		0x110b0
 		>;
+
+		pinctrl_spi4: spi4grp {
+			fsl,pins = <
+				MX7D_PAD_GPIO1_IO09__GPIO1_IO9	0x59
+				MX7D_PAD_GPIO1_IO12__GPIO1_IO12	0x59
+				MX7D_PAD_GPIO1_IO13__GPIO1_IO13	0x59
+			>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index f6dee41..f46814a 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -42,6 +42,7 @@
  */
 
 #include "imx7s.dtsi"
+#include <dt-bindings/reset/imx7-reset.h>
 
 / {
 	cpus {
@@ -127,6 +128,42 @@
 		fsl,num-rx-queues=<3>;
 		status = "disabled";
 	};
+
+	pcie: pcie@0x33800000 {
+		compatible = "fsl,imx7d-pcie", "snps,dw-pcie";
+		reg = <0x33800000 0x4000>,
+		      <0x4ff00000 0x80000>;
+		reg-names = "dbi", "config";
+		#address-cells = <3>;
+		#size-cells = <2>;
+		device_type = "pci";
+		ranges = <0x81000000 0 0          0x4ff80000 0 0x00010000   /* downstream I/O */
+			  0x82000000 0 0x40000000 0x40000000 0 0x0ff00000>; /* non-prefetchable memory */
+		num-lanes = <1>;
+		interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "msi";
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0x7>;
+		interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+				<0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+				<0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+				<0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
+			 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
+			 <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
+		clock-names = "pcie", "pcie_bus", "pcie_phy";
+		assigned-clocks = <&clks IMX7D_PCIE_CTRL_ROOT_SRC>,
+				  <&clks IMX7D_PCIE_PHY_ROOT_SRC>;
+		assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_250M_CLK>,
+					 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+
+		fsl,max-link-speed = <2>;
+		power-domains = <&pgc_pcie_phy>;
+		resets = <&src IMX7_RESET_PCIEPHY>,
+			 <&src IMX7_RESET_PCIE_CTRL_APPS_EN>;
+		reset-names = "pciephy", "apps";
+		status = "disabled";
+	};
 };
 
 &ca_funnel_ports {
diff --git a/arch/arm/boot/dts/imx7s-warp.dts b/arch/arm/boot/dts/imx7s-warp.dts
index d5237fd..07b63f8 100644
--- a/arch/arm/boot/dts/imx7s-warp.dts
+++ b/arch/arm/boot/dts/imx7s-warp.dts
@@ -295,6 +295,7 @@
 	assigned-clocks = <&clks IMX7D_USDHC3_ROOT_CLK>;
 	assigned-clock-rates = <400000000>;
 	bus-width = <8>;
+	no-1-8-v;
 	fsl,tuning-step = <2>;
 	non-removable;
 	status = "okay";
@@ -442,7 +443,7 @@
 &iomuxc_lpsr {
 	pinctrl_wdog: wdoggrp {
 		fsl,pins = <
-			MX7D_PAD_LPSR_GPIO1_IO00__WDOD1_WDOG_B	0x74
+			MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B	0x74
 		>;
 	};
 };
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index c4f12fd..4cf6c458 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -42,6 +42,7 @@
  */
 
 #include <dt-bindings/clock/imx7d-clock.h>
+#include <dt-bindings/power/imx7-power.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -119,7 +120,7 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 		compatible = "simple-bus";
-		interrupt-parent = <&intc>;
+		interrupt-parent = <&gpc>;
 		ranges;
 
 		funnel@30041000 {
@@ -301,6 +302,7 @@
 			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 			#interrupt-cells = <3>;
 			interrupt-controller;
+			interrupt-parent = <&intc>;
 			reg = <0x31001000 0x1000>,
 			      <0x31002000 0x2000>,
 			      <0x31004000 0x2000>,
@@ -309,6 +311,7 @@
 
 		timer {
 			compatible = "arm,armv7-timer";
+			interrupt-parent = <&intc>;
 			interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
 				     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
 				     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
@@ -488,7 +491,8 @@
 			};
 
 			gpr: iomuxc-gpr@30340000 {
-				compatible = "fsl,imx7d-iomuxc-gpr", "syscon";
+				compatible = "fsl,imx7d-iomuxc-gpr",
+					"fsl,imx6q-iomuxc-gpr", "syscon";
 				reg = <0x30340000 0x10000>;
 			};
 
@@ -516,6 +520,7 @@
 					anatop-min-bit-val = <8>;
 					anatop-min-voltage = <800000>;
 					anatop-max-voltage = <1200000>;
+					anatop-enable-bit = <0>;
 				};
 			};
 
@@ -563,6 +568,27 @@
 				interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
 				#reset-cells = <1>;
 			};
+
+			gpc: gpc@303a0000 {
+				compatible = "fsl,imx7d-gpc";
+				reg = <0x303a0000 0x10000>;
+				interrupt-controller;
+				interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+				#interrupt-cells = <3>;
+				interrupt-parent = <&intc>;
+				#power-domain-cells = <1>;
+
+				pgc {
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					pgc_pcie_phy: pgc-power-domain@IMX7_POWER_DOMAIN_PCIE_PHY {
+						#power-domain-cells = <0>;
+						reg = <IMX7_POWER_DOMAIN_PCIE_PHY>;
+						power-supply = <&reg_1p0d>;
+					};
+				};
+			};
 		};
 
 		aips2: aips-bus@30400000 {
@@ -609,7 +635,7 @@
 				clocks = <&clks IMX7D_PWM1_ROOT_CLK>,
 					 <&clks IMX7D_PWM1_ROOT_CLK>;
 				clock-names = "ipg", "per";
-				#pwm-cells = <2>;
+				#pwm-cells = <3>;
 				status = "disabled";
 			};
 
@@ -620,7 +646,7 @@
 				clocks = <&clks IMX7D_PWM2_ROOT_CLK>,
 					 <&clks IMX7D_PWM2_ROOT_CLK>;
 				clock-names = "ipg", "per";
-				#pwm-cells = <2>;
+				#pwm-cells = <3>;
 				status = "disabled";
 			};
 
@@ -631,7 +657,7 @@
 				clocks = <&clks IMX7D_PWM3_ROOT_CLK>,
 					 <&clks IMX7D_PWM3_ROOT_CLK>;
 				clock-names = "ipg", "per";
-				#pwm-cells = <2>;
+				#pwm-cells = <3>;
 				status = "disabled";
 			};
 
@@ -642,7 +668,7 @@
 				clocks = <&clks IMX7D_PWM4_ROOT_CLK>,
 					 <&clks IMX7D_PWM4_ROOT_CLK>;
 				clock-names = "ipg", "per";
-				#pwm-cells = <2>;
+				#pwm-cells = <3>;
 				status = "disabled";
 			};
 
@@ -934,8 +960,8 @@
 				compatible = "fsl,imx7d-usdhc", "fsl,imx6sl-usdhc";
 				reg = <0x30b40000 0x10000>;
 				interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&clks IMX7D_CLK_DUMMY>,
-					<&clks IMX7D_CLK_DUMMY>,
+				clocks = <&clks IMX7D_IPG_ROOT_CLK>,
+					<&clks IMX7D_NAND_USDHC_BUS_ROOT_CLK>,
 					<&clks IMX7D_USDHC1_ROOT_CLK>;
 				clock-names = "ipg", "ahb", "per";
 				bus-width = <4>;
@@ -946,8 +972,8 @@
 				compatible = "fsl,imx7d-usdhc", "fsl,imx6sl-usdhc";
 				reg = <0x30b50000 0x10000>;
 				interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&clks IMX7D_CLK_DUMMY>,
-					<&clks IMX7D_CLK_DUMMY>,
+				clocks = <&clks IMX7D_IPG_ROOT_CLK>,
+					<&clks IMX7D_NAND_USDHC_BUS_ROOT_CLK>,
 					<&clks IMX7D_USDHC2_ROOT_CLK>;
 				clock-names = "ipg", "ahb", "per";
 				bus-width = <4>;
@@ -958,8 +984,8 @@
 				compatible = "fsl,imx7d-usdhc", "fsl,imx6sl-usdhc";
 				reg = <0x30b60000 0x10000>;
 				interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&clks IMX7D_CLK_DUMMY>,
-					<&clks IMX7D_CLK_DUMMY>,
+				clocks = <&clks IMX7D_IPG_ROOT_CLK>,
+					<&clks IMX7D_NAND_USDHC_BUS_ROOT_CLK>,
 					<&clks IMX7D_USDHC3_ROOT_CLK>;
 				clock-names = "ipg", "ahb", "per";
 				bus-width = <4>;
diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts
index 692fcbb..61883cb 100644
--- a/arch/arm/boot/dts/keystone-k2g-evm.dts
+++ b/arch/arm/boot/dts/keystone-k2g-evm.dts
@@ -20,7 +20,7 @@
 	compatible =  "ti,k2g-evm", "ti,k2g", "ti,keystone";
 	model = "Texas Instruments K2G General Purpose EVM";
 
-	memory {
+	memory@800000000 {
 		device_type = "memory";
 		reg = <0x00000008 0x00000000 0x00000000 0x80000000>;
 	};
diff --git a/arch/arm/boot/dts/keystone-k2g-ice.dts b/arch/arm/boot/dts/keystone-k2g-ice.dts
new file mode 100644
index 0000000..d820d37
--- /dev/null
+++ b/arch/arm/boot/dts/keystone-k2g-ice.dts
@@ -0,0 +1,35 @@
+/*
+ * Device Tree Source for K2G Industrial Communication Engine EVM
+ *
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+/dts-v1/;
+
+#include "keystone-k2g.dtsi"
+
+/ {
+	compatible = "ti,k2g-ice", "ti,k2g", "ti,keystone";
+	model = "Texas Instruments K2G Industrial Communication EVM";
+
+	memory@800000000 {
+		device_type = "memory";
+		reg = <0x00000008 0x00000000 0x00000000 0x20000000>;
+	};
+};
+
+&k2g_pinctrl {
+	uart0_pins: pinmux_uart0_pins {
+		pinctrl-single,pins = <
+			K2G_CORE_IOPAD(0x11cc) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0)	/* uart0_rxd.uart0_rxd */
+			K2G_CORE_IOPAD(0x11d0) (BUFFER_CLASS_B | PIN_PULLDOWN | MUX_MODE0)	/* uart0_txd.uart0_txd */
+		>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/keystone-k2g.dtsi b/arch/arm/boot/dts/keystone-k2g.dtsi
index f59567f..a789f75 100644
--- a/arch/arm/boot/dts/keystone-k2g.dtsi
+++ b/arch/arm/boot/dts/keystone-k2g.dtsi
@@ -15,7 +15,6 @@
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/pinctrl/keystone.h>
-#include "skeleton.dtsi"
 
 / {
 	compatible = "ti,k2g","ti,keystone";
@@ -24,6 +23,8 @@
 	#size-cells = <2>;
 	interrupt-parent = <&gic>;
 
+	chosen { };
+
 	aliases {
 		serial0 = &uart0;
 	};
diff --git a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
index b6f2682..66f615a 100644
--- a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
@@ -137,8 +137,8 @@
 	/* NetCP address range */
 	ranges = <0 0x26000000 0x1000000>;
 
-	clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
-	clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
+	clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
+	clock-names = "pa_clk", "ethss_clk", "cpts";
 	dma-coherent;
 
 	ti,navigator-dmas = <&dma_gbe 0>,
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
index b58e7eb..1486504 100644
--- a/arch/arm/boot/dts/keystone-k2l.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -232,6 +232,14 @@
 			};
 		};
 
+		osr: sram@70000000 {
+			compatible = "mmio-sram";
+			reg = <0x70000000 0x10000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&clkosr>;
+		};
+
 		dspgpio0: keystone_dsp_gpio@02620240 {
 			compatible = "ti,keystone-dsp-gpio";
 			gpio-controller;
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index 94e49f3..c75da5f 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -141,7 +141,7 @@
 
 	partition@d4000 {
 		label = "test";
-		reg = <0xd4000 0x24000>;
+		reg = <0xd4000 0x20000>;
 	};
 
 	partition@f4000 {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index efe5399..6d89736 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -7,6 +7,10 @@
 #include <dt-bindings/input/input.h>
 
 / {
+	chosen {
+		stdout-path = &uart1;
+	};
+
 	cpus {
 		cpu@0 {
 			cpu0-supply = <&vcc>;
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 45ea57f..7bb9df2 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -75,7 +75,7 @@
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <0xf00>;
-			clocks = <&cluster1_clk>;
+			clocks = <&clockgen 1 0>;
 			#cooling-cells = <2>;
 		};
 
@@ -83,10 +83,17 @@
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <0xf01>;
-			clocks = <&cluster1_clk>;
+			clocks = <&clockgen 1 0>;
 		};
 	};
 
+	sysclk: sysclk {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <100000000>;
+		clock-output-names = "sysclk";
+	};
+
 	timer {
 		compatible = "arm,armv7-timer";
 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
@@ -165,7 +172,7 @@
 			      <0x0 0x20220520 0x0 0x4>;
 			reg-names = "ahci", "sata-ecc";
 			interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			dma-coherent;
 			status = "disabled";
 		};
@@ -216,41 +223,10 @@
 		};
 
 		clockgen: clocking@1ee1000 {
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges = <0x0 0x0 0x1ee1000 0x10000>;
-
-			sysclk: sysclk {
-				compatible = "fixed-clock";
-				#clock-cells = <0>;
-				clock-output-names = "sysclk";
-			};
-
-			cga_pll1: pll@800 {
-				compatible = "fsl,qoriq-core-pll-2.0";
-				#clock-cells = <1>;
-				reg = <0x800 0x10>;
-				clocks = <&sysclk>;
-				clock-output-names = "cga-pll1", "cga-pll1-div2",
-						     "cga-pll1-div4";
-			};
-
-			platform_clk: pll@c00 {
-				compatible = "fsl,qoriq-core-pll-2.0";
-				#clock-cells = <1>;
-				reg = <0xc00 0x10>;
-				clocks = <&sysclk>;
-				clock-output-names = "platform-clk", "platform-clk-div2";
-			};
-
-			cluster1_clk: clk0c0@0 {
-				compatible = "fsl,qoriq-core-mux-2.0";
-				#clock-cells = <0>;
-				reg = <0x0 0x10>;
-				clock-names = "pll1cga", "pll1cga-div2", "pll1cga-div4";
-				clocks = <&cga_pll1 0>, <&cga_pll1 1>, <&cga_pll1 2>;
-				clock-output-names = "cluster1-clk";
-			};
+			compatible = "fsl,ls1021a-clockgen";
+			reg = <0x0 0x1ee1000 0x0 0x1000>;
+			#clock-cells = <2>;
+			clocks = <&sysclk>;
 		};
 
 		tmu: tmu@1f00000 {
@@ -338,7 +314,7 @@
 			reg = <0x0 0x2100000 0x0 0x10000>;
 			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "dspi";
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			spi-num-chipselects = <6>;
 			big-endian;
 			status = "disabled";
@@ -351,7 +327,7 @@
 			reg = <0x0 0x2110000 0x0 0x10000>;
 			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "dspi";
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			spi-num-chipselects = <6>;
 			big-endian;
 			status = "disabled";
@@ -364,7 +340,7 @@
 			reg = <0x0 0x2180000 0x0 0x10000>;
 			interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "i2c";
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			status = "disabled";
 		};
 
@@ -375,7 +351,7 @@
 			reg = <0x0 0x2190000 0x0 0x10000>;
 			interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "i2c";
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			status = "disabled";
 		};
 
@@ -386,7 +362,7 @@
 			reg = <0x0 0x21a0000 0x0 0x10000>;
 			interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "i2c";
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			status = "disabled";
 		};
 
@@ -479,7 +455,7 @@
 			compatible = "fsl,ls1021a-lpuart";
 			reg = <0x0 0x2960000 0x0 0x1000>;
 			interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			clock-names = "ipg";
 			status = "disabled";
 		};
@@ -488,7 +464,7 @@
 			compatible = "fsl,ls1021a-lpuart";
 			reg = <0x0 0x2970000 0x0 0x1000>;
 			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			clock-names = "ipg";
 			status = "disabled";
 		};
@@ -497,7 +473,7 @@
 			compatible = "fsl,ls1021a-lpuart";
 			reg = <0x0 0x2980000 0x0 0x1000>;
 			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			clock-names = "ipg";
 			status = "disabled";
 		};
@@ -506,7 +482,7 @@
 			compatible = "fsl,ls1021a-lpuart";
 			reg = <0x0 0x2990000 0x0 0x1000>;
 			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			clock-names = "ipg";
 			status = "disabled";
 		};
@@ -515,7 +491,7 @@
 			compatible = "fsl,ls1021a-lpuart";
 			reg = <0x0 0x29a0000 0x0 0x1000>;
 			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			clock-names = "ipg";
 			status = "disabled";
 		};
@@ -524,7 +500,7 @@
 			compatible = "fsl,imx21-wdt";
 			reg = <0x0 0x2ad0000 0x0 0x10000>;
 			interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>;
+			clocks = <&clockgen 4 1>;
 			clock-names = "wdog-en";
 			big-endian;
 		};
@@ -534,8 +510,8 @@
 			compatible = "fsl,vf610-sai";
 			reg = <0x0 0x2b50000 0x0 0x10000>;
 			interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>, <&platform_clk 1>,
-				 <&platform_clk 1>, <&platform_clk 1>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>,
+				 <&clockgen 4 1>, <&clockgen 4 1>;
 			clock-names = "bus", "mclk1", "mclk2", "mclk3";
 			dma-names = "tx", "rx";
 			dmas = <&edma0 1 47>,
@@ -548,8 +524,8 @@
 			compatible = "fsl,vf610-sai";
 			reg = <0x0 0x2b60000 0x0 0x10000>;
 			interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>, <&platform_clk 1>,
-				 <&platform_clk 1>, <&platform_clk 1>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>,
+				 <&clockgen 4 1>, <&clockgen 4 1>;
 			clock-names = "bus", "mclk1", "mclk2", "mclk3";
 			dma-names = "tx", "rx";
 			dmas = <&edma0 1 45>,
@@ -569,16 +545,16 @@
 			dma-channels = <32>;
 			big-endian;
 			clock-names = "dmamux0", "dmamux1";
-			clocks = <&platform_clk 1>,
-				 <&platform_clk 1>;
+			clocks = <&clockgen 4 1>,
+				 <&clockgen 4 1>;
 		};
 
 		dcu: dcu@2ce0000 {
 			compatible = "fsl,ls1021a-dcu";
 			reg = <0x0 0x2ce0000 0x0 0x10000>;
 			interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 0>,
-				<&platform_clk 0>;
+			clocks = <&clockgen 4 0>,
+				<&clockgen 4 0>;
 			clock-names = "dcu", "pix";
 			big-endian;
 			status = "disabled";
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 8c77c87..15204e4 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -45,6 +45,8 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 /include/ "skeleton.dtsi"
 
 / {
@@ -65,99 +67,158 @@
 		#interrupt-cells = <3>;
 	};
 
-	timer@c1109940 {
-		compatible = "amlogic,meson6-timer";
-		reg = <0xc1109940 0x18>;
-		interrupts = <0 10 1>;
-	};
-
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
 
-		wdt: watchdog@c1109900 {
-			compatible = "amlogic,meson6-wdt";
-			reg = <0xc1109900 0x8>;
-			interrupts = <0 0 1>;
+		cbus: cbus@c1100000 {
+			compatible = "simple-bus";
+			reg = <0xc1100000 0x200000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0xc1100000 0x200000>;
+
+			hwrng: rng@8100 {
+				compatible = "amlogic,meson-rng";
+				reg = <0x8100 0x8>;
+			};
+
+			uart_A: serial@84c0 {
+				compatible = "amlogic,meson-uart";
+				reg = <0x84c0 0x18>;
+				interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+
+			uart_B: serial@84dc {
+				compatible = "amlogic,meson-uart";
+				reg = <0x84dc 0x18>;
+				interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+
+			i2c_A: i2c@8500 {
+				compatible = "amlogic,meson6-i2c";
+				reg = <0x8500 0x20>;
+				interrupts = <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			saradc: adc@8680 {
+				compatible = "amlogic,meson-saradc";
+				reg = <0x8680 0x34>;
+				#io-channel-cells = <1>;
+				interrupts = <GIC_SPI 73 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+
+			uart_C: serial@8700 {
+				compatible = "amlogic,meson-uart";
+				reg = <0x8700 0x18>;
+				interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+
+			i2c_B: i2c@87c0 {
+				compatible = "amlogic,meson6-i2c";
+				reg = <0x87c0 0x20>;
+				interrupts = <GIC_SPI 128 IRQ_TYPE_EDGE_RISING>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			usb0_phy: phy@8800 {
+				compatible = "amlogic,meson-mx-usb2-phy";
+				#phy-cells = <0>;
+				reg = <0x8800 0x20>;
+				status = "disabled";
+			};
+
+			usb1_phy: phy@8820 {
+				compatible = "amlogic,meson-mx-usb2-phy";
+				#phy-cells = <0>;
+				reg = <0x8820 0x20>;
+				status = "disabled";
+			};
+
+			spifc: spi@8c80 {
+				compatible = "amlogic,meson6-spifc";
+				reg = <0x8c80 0x80>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			wdt: watchdog@9900 {
+				compatible = "amlogic,meson6-wdt";
+				reg = <0x9900 0x8>;
+				interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>;
+			};
+
+			timer@9940 {
+				compatible = "amlogic,meson6-timer";
+				reg = <0x9940 0x18>;
+				interrupts = <GIC_SPI 10 IRQ_TYPE_EDGE_RISING>;
+			};
 		};
 
-		uart_AO: serial@c81004c0 {
-			compatible = "amlogic,meson-uart";
-			reg = <0xc81004c0 0x18>;
-			interrupts = <0 90 1>;
-			clocks = <&clk81>;
-			status = "disabled";
+		aobus: aobus@c8100000 {
+			compatible = "simple-bus";
+			reg = <0xc8100000 0x100000>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0xc8100000 0x100000>;
+
+			ir_receiver: ir-receiver@480 {
+				compatible= "amlogic,meson6-ir";
+				reg = <0x480 0x20>;
+				interrupts = <GIC_SPI 15 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+
+			uart_AO: serial@4c0 {
+				compatible = "amlogic,meson-uart";
+				reg = <0x4c0 0x18>;
+				interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+
+			i2c_AO: i2c@500 {
+				compatible = "amlogic,meson6-i2c";
+				reg = <0x500 0x20>;
+				interrupts = <GIC_SPI 92 IRQ_TYPE_EDGE_RISING>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
 		};
 
-		uart_A: serial@c11084c0 {
-			compatible = "amlogic,meson-uart";
-			reg = <0xc11084c0 0x18>;
-			interrupts = <0 26 1>;
-			clocks = <&clk81>;
-			status = "disabled";
-		};
-
-		uart_B: serial@c11084dc {
-			compatible = "amlogic,meson-uart";
-			reg = <0xc11084dc 0x18>;
-			interrupts = <0 75 1>;
-			clocks = <&clk81>;
-			status = "disabled";
-		};
-
-		uart_C: serial@c1108700 {
-			compatible = "amlogic,meson-uart";
-			reg = <0xc1108700 0x18>;
-			interrupts = <0 93 1>;
-			clocks = <&clk81>;
-			status = "disabled";
-		};
-
-		i2c_AO: i2c@c8100500 {
-			compatible = "amlogic,meson6-i2c";
-			reg = <0xc8100500 0x20>;
-			interrupts = <0 92 1>;
-			clocks = <&clk81>;
+		usb0: usb@c9040000 {
+			compatible = "snps,dwc2";
 			#address-cells = <1>;
 			#size-cells = <0>;
+			reg = <0xc9040000 0x40000>;
+			interrupts = <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>;
+			phys = <&usb0_phy>;
+			phy-names = "usb2-phy";
+			dr_mode = "host";
 			status = "disabled";
 		};
 
-		i2c_A: i2c@c1108500 {
-			compatible = "amlogic,meson6-i2c";
-			reg = <0xc1108500 0x20>;
-			interrupts = <0 21 1>;
-			clocks = <&clk81>;
+		usb1: usb@c90c0000 {
+			compatible = "snps,dwc2";
 			#address-cells = <1>;
 			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		i2c_B: i2c@c11087c0 {
-			compatible = "amlogic,meson6-i2c";
-			reg = <0xc11087c0 0x20>;
-			interrupts = <0 128 1>;
-			clocks = <&clk81>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		ir_receiver: ir-receiver@c8100480 {
-			compatible= "amlogic,meson6-ir";
-			reg = <0xc8100480 0x20>;
-			interrupts = <0 15 1>;
-			status = "disabled";
-		};
-
-		spifc: spi@c1108c80 {
-			compatible = "amlogic,meson6-spifc";
-			reg = <0xc1108c80 0x80>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			clocks = <&clk81>;
+			reg = <0xc90c0000 0x40000>;
+			interrupts = <GIC_SPI 31 IRQ_TYPE_EDGE_RISING>;
+			phys = <&usb1_phy>;
+			phy-names = "usb2-phy";
+			dr_mode = "host";
 			status = "disabled";
 		};
 
@@ -165,10 +226,8 @@
 			compatible = "amlogic,meson6-dwmac", "snps,dwmac";
 			reg = <0xc9410000 0x10000
 			       0xc1108108 0x4>;
-			interrupts = <0 8 1>;
+			interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
 			interrupt-names = "macirq";
-			clocks = <&clk81>;
-			clock-names = "stmmaceth";
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm/boot/dts/meson6-atv1200.dts b/arch/arm/boot/dts/meson6-atv1200.dts
index 1237faa..9444b0d 100644
--- a/arch/arm/boot/dts/meson6-atv1200.dts
+++ b/arch/arm/boot/dts/meson6-atv1200.dts
@@ -46,7 +46,7 @@
  */
 
 /dts-v1/;
-/include/ "meson6.dtsi"
+#include "meson6.dtsi"
 
 / {
 	model = "Geniatech ATV1200";
diff --git a/arch/arm/boot/dts/meson6.dtsi b/arch/arm/boot/dts/meson6.dtsi
index 8b33be1..8557b61 100644
--- a/arch/arm/boot/dts/meson6.dtsi
+++ b/arch/arm/boot/dts/meson6.dtsi
@@ -45,14 +45,12 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/include/ "meson.dtsi"
+#include "meson.dtsi"
 
 / {
 	model = "Amlogic Meson6 SoC";
 	compatible = "amlogic,meson6";
 
-	interrupt-parent = <&gic>;
-
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index ebc763e..cada358 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -43,15 +43,14 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <dt-bindings/clock/meson8b-clkc.h>
 #include <dt-bindings/gpio/meson8-gpio.h>
-/include/ "meson.dtsi"
+#include "meson.dtsi"
 
 / {
 	model = "Amlogic Meson8 SoC";
 	compatible = "amlogic,meson8";
 
-	interrupt-parent = <&gic>;
-
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -85,67 +84,51 @@
 		};
 	};
 
-	clk81: clk@0 {
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <141666666>;
-	};
-
-	pinctrl_cbus: pinctrl@c1109880 {
-		compatible = "amlogic,meson8-cbus-pinctrl";
-		reg = <0xc1109880 0x10>;
+	reserved-memory {
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
 
-		gpio: banks@c11080b0 {
-			reg = <0xc11080b0 0x28>,
-			      <0xc11080e8 0x18>,
-			      <0xc1108120 0x18>,
-			      <0xc1108030 0x30>;
-			reg-names = "mux", "pull", "pull-enable", "gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio-ranges = <&pinctrl_cbus 0 0 120>;
+		/* 2 MiB reserved for Hardware ROM Firmware? */
+		hwrom@0 {
+			reg = <0x0 0x200000>;
+			no-map;
 		};
 
-		spi_nor_pins: nor {
-			mux {
-				groups = "nor_d", "nor_q", "nor_c", "nor_cs";
-				function = "nor";
-			};
-		};
-
-		ir_recv_pins: remote {
-			mux {
-				groups = "remote_input";
-				function = "remote";
-			};
-		};
-
-		eth_pins: ethernet {
-			mux {
-				groups = "eth_tx_clk_50m", "eth_tx_en",
-					 "eth_txd1", "eth_txd0",
-					 "eth_rx_clk_in", "eth_rx_dv",
-					 "eth_rxd1", "eth_rxd0", "eth_mdio",
-					 "eth_mdc";
-				function = "ethernet";
-			};
+		/*
+		 * 1 MiB reserved for the "ARM Power Firmware": this is ARM
+		 * code which is responsible for system suspend. It loads a
+		 * piece of ARC code ("arc_power" in the vendor u-boot tree)
+		 * into SRAM, executes that and shuts down the (last) ARM core.
+		 * The arc_power firmware then checks various wakeup sources
+		 * (IR remote receiver, HDMI CEC, WIFI and Bluetooth wakeup or
+		 * simply the power key) and re-starts the ARM core once it
+		 * detects a wakeup request.
+		 */
+		power-firmware@4f00000 {
+			reg = <0x4f00000 0x100000>;
+			no-map;
 		};
 	};
 
-	pinctrl_aobus: pinctrl@c8100084 {
+	scu@c4300000 {
+		compatible = "arm,cortex-a9-scu";
+		reg = <0xc4300000 0x100>;
+	};
+}; /* end of / */
+
+&aobus {
+	pinctrl_aobus: pinctrl@84 {
 		compatible = "amlogic,meson8-aobus-pinctrl";
-		reg = <0xc8100084 0xc>;
+		reg = <0x84 0xc>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
 
-		gpio_ao: ao-bank@c1108030 {
-			reg = <0xc8100014 0x4>,
-			      <0xc810002c 0x4>,
-			      <0xc8100024 0x8>;
+		gpio_ao: ao-bank@14 {
+			reg = <0x14 0x4>,
+			      <0x2c 0x4>,
+			      <0x24 0x8>;
 			reg-names = "mux", "pull", "gpio";
 			gpio-controller;
 			#gpio-cells = <2>;
@@ -165,5 +148,176 @@
 				function = "i2c_mst_ao";
 			};
 		};
+
+		ir_recv_pins: remote {
+			mux {
+				groups = "remote_input";
+				function = "remote";
+			};
+		};
+
+		pwm_f_ao_pins: pwm-f-ao {
+			mux {
+				groups = "pwm_f_ao";
+				function = "pwm_f_ao";
+			};
+		};
 	};
-}; /* end of / */
+};
+
+&cbus {
+	clkc: clock-controller@4000 {
+		#clock-cells = <1>;
+		compatible = "amlogic,meson8-clkc";
+		reg = <0x8000 0x4>, <0x4000 0x460>;
+	};
+
+	pinctrl_cbus: pinctrl@9880 {
+		compatible = "amlogic,meson8-cbus-pinctrl";
+		reg = <0x9880 0x10>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		gpio: banks@80b0 {
+			reg = <0x80b0 0x28>,
+			      <0x80e8 0x18>,
+			      <0x8120 0x18>,
+			      <0x8030 0x30>;
+			reg-names = "mux", "pull", "pull-enable", "gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pinctrl_cbus 0 0 120>;
+		};
+
+		sd_a_pins: sd-a {
+			mux {
+				groups = "sd_d0_a", "sd_d1_a", "sd_d2_a",
+					"sd_d3_a", "sd_clk_a", "sd_cmd_a";
+				function = "sd_a";
+			};
+		};
+
+		sd_b_pins: sd-b {
+			mux {
+				groups = "sd_d0_b", "sd_d1_b", "sd_d2_b",
+					"sd_d3_b", "sd_clk_b", "sd_cmd_b";
+				function = "sd_b";
+			};
+		};
+
+		sd_c_pins: sd-c {
+			mux {
+				groups = "sd_d0_c", "sd_d1_c", "sd_d2_c",
+					"sd_d3_c", "sd_clk_c", "sd_cmd_c";
+				function = "sd_c";
+			};
+		};
+
+		spi_nor_pins: nor {
+			mux {
+				groups = "nor_d", "nor_q", "nor_c", "nor_cs";
+				function = "nor";
+			};
+		};
+
+		eth_pins: ethernet {
+			mux {
+				groups = "eth_tx_clk_50m", "eth_tx_en",
+					 "eth_txd1", "eth_txd0",
+					 "eth_rx_clk_in", "eth_rx_dv",
+					 "eth_rxd1", "eth_rxd0", "eth_mdio",
+					 "eth_mdc";
+				function = "ethernet";
+			};
+		};
+
+		pwm_e_pins: pwm-e {
+			mux {
+				groups = "pwm_e";
+				function = "pwm_e";
+			};
+		};
+	};
+};
+
+&ethmac {
+	clocks = <&clkc CLKID_ETH>;
+	clock-names = "stmmaceth";
+};
+
+&hwrng {
+	compatible = "amlogic,meson8-rng", "amlogic,meson-rng";
+	clocks = <&clkc CLKID_RNG0>;
+	clock-names = "core";
+};
+
+&i2c_AO {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&i2c_A {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&i2c_B {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&L2 {
+	arm,data-latency = <3 3 3>;
+	arm,tag-latency = <2 2 2>;
+	arm,filter-ranges = <0x100000 0xc0000000>;
+};
+
+&saradc {
+	compatible = "amlogic,meson8-saradc", "amlogic,meson-saradc";
+	clocks = <&clkc CLKID_XTAL>,
+		<&clkc CLKID_SAR_ADC>,
+		<&clkc CLKID_SANA>;
+	clock-names = "clkin", "core", "sana";
+};
+
+&spifc {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&uart_AO {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&uart_A {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&uart_B {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&uart_C {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&usb0 {
+	compatible = "amlogic,meson8-usb", "snps,dwc2";
+	clocks = <&clkc CLKID_USB0_DDR_BRIDGE>;
+	clock-names = "otg";
+};
+
+&usb1 {
+	compatible = "amlogic,meson8-usb", "snps,dwc2";
+	clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
+	clock-names = "otg";
+};
+
+&usb0_phy {
+	compatible = "amlogic,meson8-usb2-phy", "amlogic,meson-mx-usb2-phy";
+	clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB0>;
+	clock-names = "usb_general", "usb";
+};
+
+&usb1_phy {
+	compatible = "amlogic,meson8-usb2-phy", "amlogic,meson-mx-usb2-phy";
+	clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB1>;
+	clock-names = "usb_general", "usb";
+};
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 828aa49..72e4f42 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -47,11 +47,9 @@
 #include <dt-bindings/clock/meson8b-clkc.h>
 #include <dt-bindings/gpio/meson8b-gpio.h>
 #include <dt-bindings/reset/amlogic,meson8b-reset.h>
-#include "skeleton.dtsi"
+#include "meson.dtsi"
 
 / {
-	interrupt-parent = <&gic>;
-
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -85,146 +83,162 @@
 		};
 	};
 
-	soc {
-		compatible = "simple-bus";
+	scu@c4300000 {
+		compatible = "arm,cortex-a5-scu";
+		reg = <0xc4300000 0x100>;
+	};
+}; /* end of / */
+
+&aobus {
+	pinctrl_aobus: pinctrl@84 {
+		compatible = "amlogic,meson8b-aobus-pinctrl";
+		reg = <0x84 0xc>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
 
-		L2: l2-cache-controller@c4200000 {
-			compatible = "arm,pl310-cache";
-			reg = <0xc4200000 0x1000>;
-			cache-unified;
-			cache-level = <2>;
+		gpio_ao: ao-bank@14 {
+			reg = <0x14 0x4>,
+				<0x2c 0x4>,
+				<0x24 0x8>;
+			reg-names = "mux", "pull", "gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pinctrl_aobus 0 130 16>;
 		};
 
-		gic: interrupt-controller@c4301000 {
-			compatible = "arm,cortex-a9-gic";
-			reg = <0xc4301000 0x1000>,
-			      <0xc4300100 0x0100>;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-		};
-
-		reset: reset-controller@c1104404 {
-			compatible = "amlogic,meson8b-reset";
-			reg = <0xc1104404 0x20>;
-			#reset-cells = <1>;
-		};
-
-		wdt: watchdog@c1109900 {
-			compatible = "amlogic,meson8b-wdt";
-			reg = <0xc1109900 0x8>;
-			interrupts = <0 0 1>;
-		};
-
-		timer@c1109940 {
-			compatible = "amlogic,meson6-timer";
-			reg = <0xc1109940 0x18>;
-			interrupts = <0 10 1>;
-		};
-
-		uart_AO: serial@c81004c0 {
-			compatible = "amlogic,meson-uart";
-			reg = <0xc81004c0 0x18>;
-			interrupts = <0 90 1>;
-			clocks = <&clkc CLKID_CLK81>;
-			status = "disabled";
-		};
-
-		uart_A: serial@c11084c0 {
-			compatible = "amlogic,meson-uart";
-			reg = <0xc11084c0 0x18>;
-			interrupts = <0 26 1>;
-			clocks = <&clkc CLKID_CLK81>;
-			status = "disabled";
-		};
-
-		uart_B: serial@c11084dc {
-			compatible = "amlogic,meson-uart";
-			reg = <0xc11084dc 0x18>;
-			interrupts = <0 75 1>;
-			clocks = <&clkc CLKID_CLK81>;
-			status = "disabled";
-		};
-
-		uart_C: serial@c1108700 {
-			compatible = "amlogic,meson-uart";
-			reg = <0xc1108700 0x18>;
-			interrupts = <0 93 1>;
-			clocks = <&clkc CLKID_CLK81>;
-			status = "disabled";
-		};
-
-		clkc: clock-controller@c1104000 {
-			#clock-cells = <1>;
-			compatible = "amlogic,meson8b-clkc";
-			reg = <0xc1108000 0x4>, <0xc1104000 0x460>;
-		};
-
-		pwm_ab: pwm@8550 {
-			compatible = "amlogic,meson8b-pwm";
-			reg = <0xc1108550 0x10>;
-			#pwm-cells = <3>;
-			status = "disabled";
-		};
-
-		pwm_cd: pwm@8650 {
-			compatible = "amlogic,meson8b-pwm";
-			reg = <0xc1108650 0x10>;
-			#pwm-cells = <3>;
-			status = "disabled";
-		};
-
-		pwm_ef: pwm@86c0 {
-			compatible = "amlogic,meson8b-pwm";
-			reg = <0xc11086c0 0x10>;
-			#pwm-cells = <3>;
-			status = "disabled";
-		};
-
-		pinctrl_cbus: pinctrl@c1109880 {
-			compatible = "amlogic,meson8b-cbus-pinctrl";
-			reg = <0xc1109880 0x10>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges;
-
-			gpio: banks@c11080b0 {
-				reg = <0xc11080b0 0x28>,
-				      <0xc11080e8 0x18>,
-				      <0xc1108120 0x18>,
-				      <0xc1108030 0x38>;
-				reg-names = "mux", "pull", "pull-enable", "gpio";
-				gpio-controller;
-				#gpio-cells = <2>;
-				gpio-ranges = <&pinctrl_cbus 0 0 130>;
-			};
-		};
-
-		pinctrl_aobus: pinctrl@c8100084 {
-			compatible = "amlogic,meson8b-aobus-pinctrl";
-			reg = <0xc8100084 0xc>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges;
-
-			gpio_ao: ao-bank@c1108030 {
-				reg = <0xc8100014 0x4>,
-				      <0xc810002c 0x4>,
-				      <0xc8100024 0x8>;
-				reg-names = "mux", "pull", "gpio";
-				gpio-controller;
-				#gpio-cells = <2>;
-				gpio-ranges = <&pinctrl_aobus 0 130 16>;
-			};
-
-			uart_ao_a_pins: uart_ao_a {
-				mux {
-					groups = "uart_tx_ao_a", "uart_rx_ao_a";
-					function = "uart_ao";
-				};
+		uart_ao_a_pins: uart_ao_a {
+			mux {
+				groups = "uart_tx_ao_a", "uart_rx_ao_a";
+				function = "uart_ao";
 			};
 		};
 	};
-}; /* end of / */
+};
+
+&cbus {
+	clkc: clock-controller@4000 {
+		#clock-cells = <1>;
+		compatible = "amlogic,meson8b-clkc";
+		reg = <0x8000 0x4>, <0x4000 0x460>;
+	};
+
+	reset: reset-controller@4404 {
+		compatible = "amlogic,meson8b-reset";
+		reg = <0x4404 0x20>;
+		#reset-cells = <1>;
+	};
+
+	pwm_ab: pwm@8550 {
+		compatible = "amlogic,meson8b-pwm";
+		reg = <0x8550 0x10>;
+		#pwm-cells = <3>;
+		status = "disabled";
+	};
+
+	pwm_cd: pwm@8650 {
+		compatible = "amlogic,meson8b-pwm";
+		reg = <0x8650 0x10>;
+		#pwm-cells = <3>;
+		status = "disabled";
+	};
+
+	pwm_ef: pwm@86c0 {
+		compatible = "amlogic,meson8b-pwm";
+		reg = <0x86c0 0x10>;
+		#pwm-cells = <3>;
+		status = "disabled";
+	};
+
+	wdt: watchdog@9900 {
+		compatible = "amlogic,meson8b-wdt";
+		reg = <0x9900 0x8>;
+		interrupts = <0 0 1>;
+	};
+
+	pinctrl_cbus: pinctrl@9880 {
+		compatible = "amlogic,meson8b-cbus-pinctrl";
+		reg = <0x9880 0x10>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		gpio: banks@80b0 {
+			reg = <0x80b0 0x28>,
+				<0x80e8 0x18>,
+				<0x8120 0x18>,
+				<0x8030 0x38>;
+			reg-names = "mux", "pull", "pull-enable", "gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pinctrl_cbus 0 0 130>;
+		};
+	};
+};
+
+&ethmac {
+	clocks = <&clkc CLKID_ETH>;
+	clock-names = "stmmaceth";
+};
+
+&hwrng {
+	compatible = "amlogic,meson8b-rng", "amlogic,meson-rng";
+	clocks = <&clkc CLKID_RNG0>;
+	clock-names = "core";
+};
+
+&L2 {
+	arm,data-latency = <3 3 3>;
+	arm,tag-latency = <2 2 2>;
+	arm,filter-ranges = <0x100000 0xc0000000>;
+};
+
+&saradc {
+	compatible = "amlogic,meson8b-saradc", "amlogic,meson-saradc";
+	clocks = <&clkc CLKID_XTAL>,
+		<&clkc CLKID_SAR_ADC>,
+		<&clkc CLKID_SANA>;
+	clock-names = "clkin", "core", "sana";
+};
+
+&uart_AO {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&uart_A {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&uart_B {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&uart_C {
+	clocks = <&clkc CLKID_CLK81>;
+};
+
+&usb0 {
+	compatible = "amlogic,meson8b-usb", "snps,dwc2";
+	clocks = <&clkc CLKID_USB0_DDR_BRIDGE>;
+	clock-names = "otg";
+};
+
+&usb1 {
+	compatible = "amlogic,meson8b-usb", "snps,dwc2";
+	clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
+	clock-names = "otg";
+};
+
+&usb0_phy {
+	compatible = "amlogic,meson8b-usb2-phy", "amlogic,meson-mx-usb2-phy";
+	clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB0>;
+	clock-names = "usb_general", "usb";
+	resets = <&reset RESET_USB_OTG>;
+};
+
+&usb1_phy {
+	compatible = "amlogic,meson8b-usb2-phy", "amlogic,meson-mx-usb2-phy";
+	clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB1>;
+	clock-names = "usb_general", "usb";
+	resets = <&reset RESET_USB_OTG>;
+};
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index f5aeb39..1eb5da1 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -26,6 +26,22 @@
 			#io-channel-cells = <1>;
 		};
 
+		cpcap_battery: battery {
+			compatible = "motorola,cpcap-battery";
+			interrupts-extended = <
+				&cpcap 6 0 &cpcap 5 0 &cpcap 3 0
+				&cpcap 20 0 &cpcap 54 0
+			>;
+			interrupt-names =
+				"eol", "lowbph", "lowbpl",
+				"chrgcurr1", "battdetb";
+			io-channels = <&cpcap_adc 0 &cpcap_adc 1
+				       &cpcap_adc 5 &cpcap_adc 6>;
+			io-channel-names = "battdetb", "battp",
+					   "chg_isense", "batti";
+			power-supplies = <&cpcap_charger>;
+		};
+
 		cpcap_charger: charger {
 			compatible = "motorola,mapphone-cpcap-charger";
 			interrupts-extended = <
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index e86f8c9..1f4c795 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -58,10 +58,11 @@
 		};
 
 		timer: timer@98400000 {
-			compatible = "moxa,moxart-timer";
+			compatible = "moxa,moxart-timer", "faraday,fttmr010";
 			reg = <0x98400000 0x42>;
 			interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
 			clocks = <&clk_apb>;
+			clock-names = "PCLK";
 		};
 
 		gpio: gpio@98700000 {
diff --git a/arch/arm/boot/dts/mt2701-evb.dts b/arch/arm/boot/dts/mt2701-evb.dts
index a4837985..f484973 100644
--- a/arch/arm/boot/dts/mt2701-evb.dts
+++ b/arch/arm/boot/dts/mt2701-evb.dts
@@ -22,13 +22,95 @@
 	memory {
 		reg = <0 0x80000000 0 0x40000000>;
 	};
+
+	sound:sound {
+		compatible = "mediatek,mt2701-cs42448-machine";
+		mediatek,platform = <&afe>;
+		/* CS42448 Machine name */
+		audio-routing =
+		"Line Out Jack", "AOUT1L",
+		"Line Out Jack", "AOUT1R",
+		"Line Out Jack", "AOUT2L",
+		"Line Out Jack", "AOUT2R",
+		"Line Out Jack", "AOUT3L",
+		"Line Out Jack", "AOUT3R",
+		"Line Out Jack", "AOUT4L",
+		"Line Out Jack", "AOUT4R",
+		"AIN1L", "AMIC",
+		"AIN1R", "AMIC",
+		"AIN2L", "Tuner In",
+		"AIN2R", "Tuner In",
+		"AIN3L", "Satellite Tuner In",
+		"AIN3R", "Satellite Tuner In",
+		"AIN3L", "AUX In",
+		"AIN3R", "AUX In";
+		mediatek,audio-codec = <&cs42448>;
+		mediatek,audio-codec-bt-mrg = <&bt_sco_codec>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&aud_pins_default>;
+		i2s1-in-sel-gpio1 = <&pio 53 0>;
+		i2s1-in-sel-gpio2 = <&pio 54 0>;
+		status = "okay";
+	};
+
+	bt_sco_codec:bt_sco_codec {
+		compatible = "linux,bt-sco";
+	};
 };
 
 &auxadc {
 	status = "okay";
 };
 
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins_a>;
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins_a>;
+	status = "okay";
+};
+
+&i2c2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c2_pins_a>;
+	status = "okay";
+	cs42448: cs42448@48 {
+		compatible = "cirrus,cs42448";
+		reg = <0x48>;
+		clocks = <&topckgen CLK_TOP_AUD_I2S1_MCLK>;
+		clock-names = "mclk";
+	};
+};
+
 &pio {
+	i2c0_pins_a: i2c0@0 {
+		pins1 {
+			pinmux = <MT2701_PIN_75_SDA0__FUNC_SDA0>,
+				 <MT2701_PIN_76_SCL0__FUNC_SCL0>;
+			bias-disable;
+		};
+	};
+
+	i2c1_pins_a: i2c1@0 {
+		pins1 {
+			pinmux = <MT2701_PIN_57_SDA1__FUNC_SDA1>,
+				 <MT2701_PIN_58_SCL1__FUNC_SCL1>;
+			bias-disable;
+		};
+	};
+
+	i2c2_pins_a: i2c2@0 {
+		pins1 {
+			pinmux = <MT2701_PIN_77_SDA2__FUNC_SDA2>,
+				 <MT2701_PIN_78_SCL2__FUNC_SCL2>;
+			bias-disable;
+		};
+	};
+
 	spi_pins_a: spi0@0 {
 		pins_spi {
 			pinmux = <MT2701_PIN_53_SPI0_CSN__FUNC_SPI0_CS>,
@@ -39,6 +121,31 @@
 		};
 	};
 
+	aud_pins_default: audiodefault {
+		pins_cmd_dat {
+			pinmux = <MT2701_PIN_49_I2S0_DATA__FUNC_I2S0_DATA>,
+				 <MT2701_PIN_72_I2S0_DATA_IN__FUNC_I2S0_DATA_IN>,
+				 <MT2701_PIN_73_I2S0_LRCK__FUNC_I2S0_LRCK>,
+				 <MT2701_PIN_74_I2S0_BCK__FUNC_I2S0_BCK>,
+				 <MT2701_PIN_126_I2S0_MCLK__FUNC_I2S0_MCLK>,
+				 <MT2701_PIN_33_I2S1_DATA__FUNC_I2S1_DATA>,
+				 <MT2701_PIN_34_I2S1_DATA_IN__FUNC_I2S1_DATA_IN>,
+				 <MT2701_PIN_35_I2S1_BCK__FUNC_I2S1_BCK>,
+				 <MT2701_PIN_36_I2S1_LRCK__FUNC_I2S1_LRCK>,
+				 <MT2701_PIN_37_I2S1_MCLK__FUNC_I2S1_MCLK>,
+				 <MT2701_PIN_203_PWM0__FUNC_I2S2_DATA>,
+				 <MT2701_PIN_204_PWM1__FUNC_I2S3_DATA>,
+				 <MT2701_PIN_53_SPI0_CSN__FUNC_GPIO53>,
+				 <MT2701_PIN_54_SPI0_CK__FUNC_GPIO54>,
+				 <MT2701_PIN_18_PCM_CLK__FUNC_MRG_CLK>,
+				 <MT2701_PIN_19_PCM_SYNC__FUNC_MRG_SYNC>,
+				 <MT2701_PIN_20_PCM_RX__FUNC_MRG_TX>,
+				 <MT2701_PIN_21_PCM_TX__FUNC_MRG_RX>;
+			drive-strength = <MTK_DRIVE_12mA>;
+			bias-pull-down;
+		};
+	};
+
 	spi_pins_b: spi1@0 {
 		pins_spi {
 			pinmux = <MT2701_PIN_7_SPI1_CSN__FUNC_SPI1_CS>,
@@ -78,6 +185,31 @@
 	status = "disabled";
 };
 
+&nor_flash {
+	pinctrl-names = "default";
+	pinctrl-0 = <&nor_pins_default>;
+	status = "okay";
+	flash@0 {
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+	};
+};
+
+&pio {
+	nor_pins_default: nor {
+		pins1 {
+			pinmux = <MT2701_PIN_240_EXT_XCS__FUNC_EXT_XCS>,
+				 <MT2701_PIN_241_EXT_SCK__FUNC_EXT_SCK>,
+				 <MT2701_PIN_239_EXT_SDIO0__FUNC_EXT_SDIO0>,
+				 <MT2701_PIN_238_EXT_SDIO1__FUNC_EXT_SDIO1>,
+				 <MT2701_PIN_237_EXT_SDIO2__FUNC_EXT_SDIO2>,
+				 <MT2701_PIN_236_EXT_SDIO3__FUNC_EXT_SDIO3>;
+			drive-strength = <MTK_DRIVE_4mA>;
+			bias-pull-up;
+		};
+	};
+};
+
 &uart0 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
index 8037210..f1efdc6 100644
--- a/arch/arm/boot/dts/mt2701.dtsi
+++ b/arch/arm/boot/dts/mt2701.dtsi
@@ -16,13 +16,14 @@
 #include <dt-bindings/power/mt2701-power.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/memory/mt2701-larb-port.h>
 #include <dt-bindings/reset/mt2701-resets.h>
 #include "skeleton64.dtsi"
 #include "mt2701-pinfunc.h"
 
 / {
 	compatible = "mediatek,mt2701";
-	interrupt-parent = <&sysirq>;
+	interrupt-parent = <&cirq>;
 
 	cpus {
 		#address-cells = <1>;
@@ -210,6 +211,16 @@
 		reg = <0 0x10200100 0 0x1c>;
 	};
 
+	cirq: interrupt-controller@10204000 {
+		compatible = "mediatek,mt2701-cirq",
+			     "mediatek,mtk-cirq";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&sysirq>;
+		reg = <0 0x10204000 0 0x400>;
+		mediatek,ext-irq-range = <32 200>;
+	};
+
 	iommu: mmsys_iommu@10205000 {
 		compatible = "mediatek,mt2701-m4u";
 		reg = <0 0x10205000 0 0x1000>;
@@ -286,6 +297,48 @@
 		status = "disabled";
 	};
 
+	i2c0: i2c@11007000 {
+		compatible = "mediatek,mt2701-i2c",
+			     "mediatek,mt6577-i2c";
+		reg = <0 0x11007000 0 0x70>,
+		      <0 0x11000200 0 0x80>;
+		interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_LOW>;
+		clock-div = <16>;
+		clocks = <&pericfg CLK_PERI_I2C0>, <&pericfg CLK_PERI_AP_DMA>;
+		clock-names = "main", "dma";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	i2c1: i2c@11008000 {
+		compatible = "mediatek,mt2701-i2c",
+			     "mediatek,mt6577-i2c";
+		reg = <0 0x11008000 0 0x70>,
+		      <0 0x11000280 0 0x80>;
+		interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_LOW>;
+		clock-div = <16>;
+		clocks = <&pericfg CLK_PERI_I2C1>, <&pericfg CLK_PERI_AP_DMA>;
+		clock-names = "main", "dma";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	i2c2: i2c@11009000 {
+		compatible = "mediatek,mt2701-i2c",
+			     "mediatek,mt6577-i2c";
+		reg = <0 0x11009000 0 0x70>,
+		      <0 0x11000300 0 0x80>;
+		interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_LOW>;
+		clock-div = <16>;
+		clocks = <&pericfg CLK_PERI_I2C2>, <&pericfg CLK_PERI_AP_DMA>;
+		clock-names = "main", "dma";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
 	spi0: spi@1100a000 {
 		compatible = "mediatek,mt2701-spi";
 		#address-cells = <1>;
@@ -334,6 +387,18 @@
 		status = "disabled";
 	};
 
+	nor_flash: spi@11014000 {
+		compatible = "mediatek,mt2701-nor",
+			     "mediatek,mt8173-nor";
+		reg = <0 0x11014000 0 0xe0>;
+		clocks = <&pericfg CLK_PERI_FLASH>,
+			 <&topckgen CLK_TOP_FLASH_SEL>;
+		clock-names = "spi", "sf";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
 	spi1: spi@11016000 {
 		compatible = "mediatek,mt2701-spi";
 		#address-cells = <1>;
@@ -360,6 +425,104 @@
 		status = "disabled";
 	};
 
+	afe: audio-controller@11220000 {
+		compatible = "mediatek,mt2701-audio";
+		reg = <0 0x11220000 0 0x2000>,
+		      <0 0x112a0000 0 0x20000>;
+		interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+		power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+
+		clocks = <&infracfg CLK_INFRA_AUDIO>,
+			 <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+			 <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+			 <&topckgen CLK_TOP_AUD_MUX1_DIV>,
+			 <&topckgen CLK_TOP_AUD_MUX2_DIV>,
+			 <&topckgen CLK_TOP_AUD_48K_TIMING>,
+			 <&topckgen CLK_TOP_AUD_44K_TIMING>,
+			 <&topckgen CLK_TOP_AUDPLL_MUX_SEL>,
+			 <&topckgen CLK_TOP_APLL_SEL>,
+			 <&topckgen CLK_TOP_AUD1PLL_98M>,
+			 <&topckgen CLK_TOP_AUD2PLL_90M>,
+			 <&topckgen CLK_TOP_HADDS2PLL_98M>,
+			 <&topckgen CLK_TOP_HADDS2PLL_294M>,
+			 <&topckgen CLK_TOP_AUDPLL>,
+			 <&topckgen CLK_TOP_AUDPLL_D4>,
+			 <&topckgen CLK_TOP_AUDPLL_D8>,
+			 <&topckgen CLK_TOP_AUDPLL_D16>,
+			 <&topckgen CLK_TOP_AUDPLL_D24>,
+			 <&topckgen CLK_TOP_AUDINTBUS_SEL>,
+			 <&clk26m>,
+			 <&topckgen CLK_TOP_SYSPLL1_D4>,
+			 <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K5_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K6_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K5_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K6_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S5_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S6_MCLK>,
+			 <&topckgen CLK_TOP_ASM_M_SEL>,
+			 <&topckgen CLK_TOP_ASM_H_SEL>,
+			 <&topckgen CLK_TOP_UNIVPLL2_D4>,
+			 <&topckgen CLK_TOP_UNIVPLL2_D2>,
+			 <&topckgen CLK_TOP_SYSPLL_D5>;
+
+		clock-names = "infra_sys_audio_clk",
+			 "top_audio_mux1_sel",
+			 "top_audio_mux2_sel",
+			 "top_audio_mux1_div",
+			 "top_audio_mux2_div",
+			 "top_audio_48k_timing",
+			 "top_audio_44k_timing",
+			 "top_audpll_mux_sel",
+			 "top_apll_sel",
+			 "top_aud1_pll_98M",
+			 "top_aud2_pll_90M",
+			 "top_hadds2_pll_98M",
+			 "top_hadds2_pll_294M",
+			 "top_audpll",
+			 "top_audpll_d4",
+			 "top_audpll_d8",
+			 "top_audpll_d16",
+			 "top_audpll_d24",
+			 "top_audintbus_sel",
+			 "clk_26m",
+			 "top_syspll1_d4",
+			 "top_aud_k1_src_sel",
+			 "top_aud_k2_src_sel",
+			 "top_aud_k3_src_sel",
+			 "top_aud_k4_src_sel",
+			 "top_aud_k5_src_sel",
+			 "top_aud_k6_src_sel",
+			 "top_aud_k1_src_div",
+			 "top_aud_k2_src_div",
+			 "top_aud_k3_src_div",
+			 "top_aud_k4_src_div",
+			 "top_aud_k5_src_div",
+			 "top_aud_k6_src_div",
+			 "top_aud_i2s1_mclk",
+			 "top_aud_i2s2_mclk",
+			 "top_aud_i2s3_mclk",
+			 "top_aud_i2s4_mclk",
+			 "top_aud_i2s5_mclk",
+			 "top_aud_i2s6_mclk",
+			 "top_asm_m_sel",
+			 "top_asm_h_sel",
+			 "top_univpll2_d4",
+			 "top_univpll2_d2",
+			 "top_syspll_d5";
+	};
+
 	mmsys: syscon@14000000 {
 		compatible = "mediatek,mt2701-mmsys", "syscon";
 		reg = <0 0x14000000 0 0x1000>;
@@ -392,6 +555,20 @@
 		power-domains = <&scpsys MT2701_POWER_DOMAIN_ISP>;
 	};
 
+	jpegdec: jpegdec@15004000 {
+		compatible = "mediatek,mt2701-jpgdec";
+		reg = <0 0x15004000 0 0x1000>;
+		interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_LOW>;
+		clocks =  <&imgsys CLK_IMG_JPGDEC_SMI>,
+			  <&imgsys CLK_IMG_JPGDEC>;
+		clock-names = "jpgdec-smi",
+			      "jpgdec";
+		power-domains = <&scpsys MT2701_POWER_DOMAIN_ISP>;
+		mediatek,larb = <&larb2>;
+		iommus = <&iommu MT2701_M4U_PORT_JPGDEC_WDMA>,
+			 <&iommu MT2701_M4U_PORT_JPGDEC_BSDMA>;
+	};
+
 	vdecsys: syscon@16000000 {
 		compatible = "mediatek,mt2701-vdecsys", "syscon";
 		reg = <0 0x16000000 0 0x1000>;
diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
index 3a9e9b6..d81158b2 100644
--- a/arch/arm/boot/dts/mt7623.dtsi
+++ b/arch/arm/boot/dts/mt7623.dtsi
@@ -1,6 +1,7 @@
 /*
- * Copyright (c) 2016 MediaTek Inc.
+ * Copyright (c) 2017 MediaTek Inc.
  * Author: John Crispin <john@phrozen.org>
+ *	   Sean Wang <sean.wang@mediatek.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -14,6 +15,12 @@
 
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/mt2701-clk.h>
+#include <dt-bindings/pinctrl/mt7623-pinfunc.h>
+#include <dt-bindings/power/mt2701-power.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/reset/mt2701-resets.h>
 #include "skeleton64.dtsi"
 
 / {
@@ -53,16 +60,18 @@
 		#clock-cells = <0>;
 	};
 
-	rtc_clk: dummy32k {
+	rtc32k: oscillator@1 {
 		compatible = "fixed-clock";
-		clock-frequency = <32000>;
 		#clock-cells = <0>;
+		clock-frequency = <32000>;
+		clock-output-names = "rtc32k";
 	};
 
-	uart_clk: dummy26m {
+	clk26m: oscillator@0 {
 		compatible = "fixed-clock";
-		clock-frequency = <26000000>;
 		#clock-cells = <0>;
+		clock-frequency = <26000000>;
+		clock-output-names = "clk26m";
 	};
 
 	timer {
@@ -76,6 +85,65 @@
 		arm,cpu-registers-not-fw-configured;
 	};
 
+	topckgen: syscon@10000000 {
+		compatible = "mediatek,mt7623-topckgen",
+			     "mediatek,mt2701-topckgen",
+			     "syscon";
+		reg = <0 0x10000000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	infracfg: syscon@10001000 {
+		compatible = "mediatek,mt7623-infracfg",
+			     "mediatek,mt2701-infracfg",
+			     "syscon";
+		reg = <0 0x10001000 0 0x1000>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	pericfg: syscon@10003000 {
+		compatible =  "mediatek,mt7623-pericfg",
+			      "mediatek,mt2701-pericfg",
+			      "syscon";
+		reg = <0 0x10003000 0 0x1000>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	pio: pinctrl@10005000 {
+		compatible = "mediatek,mt7623-pinctrl",
+			     "mediatek,mt2701-pinctrl";
+		reg = <0 0x1000b000 0 0x1000>;
+		mediatek,pctl-regmap = <&syscfg_pctl_a>;
+		pins-are-numbered;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		interrupt-parent = <&gic>;
+		#interrupt-cells = <2>;
+		interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	syscfg_pctl_a: syscfg@10005000 {
+		compatible = "mediatek,mt7623-pctl-a-syscfg", "syscon";
+		reg = <0 0x10005000 0 0x1000>;
+	};
+
+	scpsys: scpsys@10006000 {
+		compatible = "mediatek,mt7623-scpsys",
+			     "mediatek,mt2701-scpsys",
+			     "syscon";
+		#power-domain-cells = <1>;
+		reg = <0 0x10006000 0 0x1000>;
+		infracfg = <&infracfg>;
+		clocks = <&topckgen CLK_TOP_MM_SEL>,
+			 <&topckgen CLK_TOP_MFG_SEL>,
+			 <&topckgen CLK_TOP_ETHIF_SEL>;
+		clock-names = "mm", "mfg", "ethif";
+	};
+
 	watchdog: watchdog@10007000 {
 		compatible = "mediatek,mt7623-wdt",
 			     "mediatek,mt6589-wdt";
@@ -87,10 +155,32 @@
 			     "mediatek,mt6577-timer";
 		reg = <0 0x10008000 0 0x80>;
 		interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&system_clk>, <&rtc_clk>;
+		clocks = <&system_clk>, <&rtc32k>;
 		clock-names = "system-clk", "rtc-clk";
 	};
 
+	pwrap: pwrap@1000d000 {
+		compatible = "mediatek,mt7623-pwrap",
+			     "mediatek,mt2701-pwrap";
+		reg = <0 0x1000d000 0 0x1000>;
+		reg-names = "pwrap";
+		interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+		resets = <&infracfg MT2701_INFRA_PMIC_WRAP_RST>;
+		reset-names = "pwrap";
+		clocks = <&infracfg CLK_INFRA_PMICSPI>,
+			 <&infracfg CLK_INFRA_PMICWRAP>;
+		clock-names = "spi", "wrap";
+	};
+
+	cir: cir@0x10013000 {
+		compatible = "mediatek,mt7623-cir";
+		reg = <0 0x10013000 0 0x1000>;
+		interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&infracfg CLK_INFRA_IRRX>;
+		clock-names = "clk";
+		status = "disabled";
+	};
+
 	sysirq: interrupt-controller@10200100 {
 		compatible = "mediatek,mt7623-sysirq",
 			     "mediatek,mt6577-sysirq";
@@ -100,6 +190,32 @@
 		reg = <0 0x10200100 0 0x1c>;
 	};
 
+	efuse: efuse@10206000 {
+		compatible = "mediatek,mt7623-efuse",
+			     "mediatek,mt8173-efuse";
+		reg	   = <0 0x10206000 0 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		thermal_calibration_data: calib@424 {
+			reg = <0x424 0xc>;
+		};
+	};
+
+	apmixedsys: syscon@10209000 {
+		compatible = "mediatek,mt7623-apmixedsys",
+			     "mediatek,mt2701-apmixedsys",
+			     "syscon";
+		reg = <0 0x10209000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	rng: rng@1020f000 {
+		compatible = "mediatek,mt7623-rng";
+		reg = <0 0x1020f000 0 0x1000>;
+		clocks = <&infracfg CLK_INFRA_TRNG>;
+		clock-names = "rng";
+	};
+
 	gic: interrupt-controller@10211000 {
 		compatible = "arm,cortex-a7-gic";
 		interrupt-controller;
@@ -111,12 +227,23 @@
 		      <0 0x10216000 0 0x2000>;
 	};
 
+	auxadc: adc@11001000 {
+		compatible = "mediatek,mt7623-auxadc",
+			     "mediatek,mt2701-auxadc";
+		reg = <0 0x11001000 0 0x1000>;
+		clocks = <&pericfg CLK_PERI_AUXADC>;
+		clock-names = "main";
+		#io-channel-cells = <1>;
+	};
+
 	uart0: serial@11002000 {
 		compatible = "mediatek,mt7623-uart",
 			     "mediatek,mt6577-uart";
 		reg = <0 0x11002000 0 0x400>;
 		interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&uart_clk>;
+		clocks = <&pericfg CLK_PERI_UART0_SEL>,
+			 <&pericfg CLK_PERI_UART0>;
+		clock-names = "baud", "bus";
 		status = "disabled";
 	};
 
@@ -125,7 +252,9 @@
 			     "mediatek,mt6577-uart";
 		reg = <0 0x11003000 0 0x400>;
 		interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&uart_clk>;
+		clocks = <&pericfg CLK_PERI_UART1_SEL>,
+			 <&pericfg CLK_PERI_UART1>;
+		clock-names = "baud", "bus";
 		status = "disabled";
 	};
 
@@ -134,7 +263,9 @@
 			     "mediatek,mt6577-uart";
 		reg = <0 0x11004000 0 0x400>;
 		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&uart_clk>;
+		clocks = <&pericfg CLK_PERI_UART2_SEL>,
+			 <&pericfg CLK_PERI_UART2>;
+		clock-names = "baud", "bus";
 		status = "disabled";
 	};
 
@@ -143,7 +274,402 @@
 			     "mediatek,mt6577-uart";
 		reg = <0 0x11005000 0 0x400>;
 		interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&uart_clk>;
+		clocks = <&pericfg CLK_PERI_UART3_SEL>,
+			 <&pericfg CLK_PERI_UART3>;
+		clock-names = "baud", "bus";
+		status = "disabled";
+	};
+
+	pwm: pwm@11006000 {
+		compatible = "mediatek,mt7623-pwm";
+		reg = <0 0x11006000 0 0x1000>;
+		#pwm-cells = <2>;
+		clocks = <&topckgen CLK_TOP_PWM_SEL>,
+			 <&pericfg CLK_PERI_PWM>,
+			 <&pericfg CLK_PERI_PWM1>,
+			 <&pericfg CLK_PERI_PWM2>,
+			 <&pericfg CLK_PERI_PWM3>,
+			 <&pericfg CLK_PERI_PWM4>,
+			 <&pericfg CLK_PERI_PWM5>;
+		clock-names = "top", "main", "pwm1", "pwm2",
+			      "pwm3", "pwm4", "pwm5";
+		status = "disabled";
+	};
+
+	i2c0: i2c@11007000 {
+		compatible = "mediatek,mt7623-i2c",
+			     "mediatek,mt6577-i2c";
+		reg = <0 0x11007000 0 0x70>,
+		      <0 0x11000200 0 0x80>;
+		interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_LOW>;
+		clock-div = <16>;
+		clocks = <&pericfg CLK_PERI_I2C0>,
+			 <&pericfg CLK_PERI_AP_DMA>;
+		clock-names = "main", "dma";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	i2c1: i2c@11008000 {
+		compatible = "mediatek,mt7623-i2c",
+			     "mediatek,mt6577-i2c";
+		reg = <0 0x11008000 0 0x70>,
+		      <0 0x11000280 0 0x80>;
+		interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_LOW>;
+		clock-div = <16>;
+		clocks = <&pericfg CLK_PERI_I2C1>,
+			 <&pericfg CLK_PERI_AP_DMA>;
+		clock-names = "main", "dma";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	i2c2: i2c@11009000 {
+		compatible = "mediatek,mt7623-i2c",
+			     "mediatek,mt6577-i2c";
+		reg = <0 0x11009000 0 0x70>,
+		      <0 0x11000300 0 0x80>;
+		interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_LOW>;
+		clock-div = <16>;
+		clocks = <&pericfg CLK_PERI_I2C2>,
+			 <&pericfg CLK_PERI_AP_DMA>;
+		clock-names = "main", "dma";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	spi0: spi@1100a000 {
+		compatible = "mediatek,mt7623-spi",
+			     "mediatek,mt2701-spi";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0 0x1100a000 0 0x100>;
+		interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
+			 <&topckgen CLK_TOP_SPI0_SEL>,
+			 <&pericfg CLK_PERI_SPI0>;
+		clock-names = "parent-clk", "sel-clk", "spi-clk";
+		status = "disabled";
+	};
+
+	thermal: thermal@1100b000 {
+		#thermal-sensor-cells = <1>;
+		compatible = "mediatek,mt7623-thermal",
+			     "mediatek,mt2701-thermal";
+		reg = <0 0x1100b000 0 0x1000>;
+		interrupts = <0 70 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&pericfg CLK_PERI_THERM>, <&pericfg CLK_PERI_AUXADC>;
+		clock-names = "therm", "auxadc";
+		resets = <&pericfg MT2701_PERI_THERM_SW_RST>;
+		reset-names = "therm";
+		mediatek,auxadc = <&auxadc>;
+		mediatek,apmixedsys = <&apmixedsys>;
+		nvmem-cells = <&thermal_calibration_data>;
+		nvmem-cell-names = "calibration-data";
+	};
+
+	spi1: spi@11016000 {
+		compatible = "mediatek,mt7623-spi",
+			     "mediatek,mt2701-spi";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0 0x11016000 0 0x100>;
+		interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
+			 <&topckgen CLK_TOP_SPI1_SEL>,
+			 <&pericfg CLK_PERI_SPI1>;
+		clock-names = "parent-clk", "sel-clk", "spi-clk";
+		status = "disabled";
+	};
+
+	spi2: spi@11017000 {
+		compatible = "mediatek,mt7623-spi",
+			     "mediatek,mt2701-spi";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0 0x11017000 0 0x1000>;
+		interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
+			 <&topckgen CLK_TOP_SPI2_SEL>,
+			 <&pericfg CLK_PERI_SPI2>;
+		clock-names = "parent-clk", "sel-clk", "spi-clk";
+		status = "disabled";
+	};
+
+	nandc: nfi@1100d000 {
+		compatible = "mediatek,mt7623-nfc",
+			     "mediatek,mt2701-nfc";
+		reg = <0 0x1100d000 0 0x1000>;
+		interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_LOW>;
+		power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+		clocks = <&pericfg CLK_PERI_NFI>,
+			 <&pericfg CLK_PERI_NFI_PAD>;
+		clock-names = "nfi_clk", "pad_clk";
+		status = "disabled";
+		ecc-engine = <&bch>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	bch: ecc@1100e000 {
+		compatible = "mediatek,mt7623-ecc",
+			     "mediatek,mt2701-ecc";
+		reg = <0 0x1100e000 0 0x1000>;
+		interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&pericfg CLK_PERI_NFI_ECC>;
+		clock-names = "nfiecc_clk";
+		status = "disabled";
+	};
+
+	afe: audio-controller@11220000 {
+		compatible = "mediatek,mt7623-audio",
+			     "mediatek,mt2701-audio";
+		reg = <0 0x11220000 0 0x2000>,
+		      <0 0x112a0000 0 0x20000>;
+		interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+		power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+
+		clocks = <&infracfg CLK_INFRA_AUDIO>,
+			 <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+			 <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+			 <&topckgen CLK_TOP_AUD_MUX1_DIV>,
+			 <&topckgen CLK_TOP_AUD_MUX2_DIV>,
+			 <&topckgen CLK_TOP_AUD_48K_TIMING>,
+			 <&topckgen CLK_TOP_AUD_44K_TIMING>,
+			 <&topckgen CLK_TOP_AUDPLL_MUX_SEL>,
+			 <&topckgen CLK_TOP_APLL_SEL>,
+			 <&topckgen CLK_TOP_AUD1PLL_98M>,
+			 <&topckgen CLK_TOP_AUD2PLL_90M>,
+			 <&topckgen CLK_TOP_HADDS2PLL_98M>,
+			 <&topckgen CLK_TOP_HADDS2PLL_294M>,
+			 <&topckgen CLK_TOP_AUDPLL>,
+			 <&topckgen CLK_TOP_AUDPLL_D4>,
+			 <&topckgen CLK_TOP_AUDPLL_D8>,
+			 <&topckgen CLK_TOP_AUDPLL_D16>,
+			 <&topckgen CLK_TOP_AUDPLL_D24>,
+			 <&topckgen CLK_TOP_AUDINTBUS_SEL>,
+			 <&clk26m>,
+			 <&topckgen CLK_TOP_SYSPLL1_D4>,
+			 <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K5_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K6_SRC_SEL>,
+			 <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K5_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_K6_SRC_DIV>,
+			 <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S5_MCLK>,
+			 <&topckgen CLK_TOP_AUD_I2S6_MCLK>,
+			 <&topckgen CLK_TOP_ASM_M_SEL>,
+			 <&topckgen CLK_TOP_ASM_H_SEL>,
+			 <&topckgen CLK_TOP_UNIVPLL2_D4>,
+			 <&topckgen CLK_TOP_UNIVPLL2_D2>,
+			 <&topckgen CLK_TOP_SYSPLL_D5>;
+
+		clock-names = "infra_sys_audio_clk",
+			 "top_audio_mux1_sel",
+			 "top_audio_mux2_sel",
+			 "top_audio_mux1_div",
+			 "top_audio_mux2_div",
+			 "top_audio_48k_timing",
+			 "top_audio_44k_timing",
+			 "top_audpll_mux_sel",
+			 "top_apll_sel",
+			 "top_aud1_pll_98M",
+			 "top_aud2_pll_90M",
+			 "top_hadds2_pll_98M",
+			 "top_hadds2_pll_294M",
+			 "top_audpll",
+			 "top_audpll_d4",
+			 "top_audpll_d8",
+			 "top_audpll_d16",
+			 "top_audpll_d24",
+			 "top_audintbus_sel",
+			 "clk_26m",
+			 "top_syspll1_d4",
+			 "top_aud_k1_src_sel",
+			 "top_aud_k2_src_sel",
+			 "top_aud_k3_src_sel",
+			 "top_aud_k4_src_sel",
+			 "top_aud_k5_src_sel",
+			 "top_aud_k6_src_sel",
+			 "top_aud_k1_src_div",
+			 "top_aud_k2_src_div",
+			 "top_aud_k3_src_div",
+			 "top_aud_k4_src_div",
+			 "top_aud_k5_src_div",
+			 "top_aud_k6_src_div",
+			 "top_aud_i2s1_mclk",
+			 "top_aud_i2s2_mclk",
+			 "top_aud_i2s3_mclk",
+			 "top_aud_i2s4_mclk",
+			 "top_aud_i2s5_mclk",
+			 "top_aud_i2s6_mclk",
+			 "top_asm_m_sel",
+			 "top_asm_h_sel",
+			 "top_univpll2_d4",
+			 "top_univpll2_d2",
+			 "top_syspll_d5";
+	};
+
+	mmc0: mmc@11230000 {
+		compatible = "mediatek,mt7623-mmc",
+			     "mediatek,mt8135-mmc";
+		reg = <0 0x11230000 0 0x1000>;
+		interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&pericfg CLK_PERI_MSDC30_0>,
+			 <&topckgen CLK_TOP_MSDC30_0_SEL>;
+		clock-names = "source", "hclk";
+		status = "disabled";
+	};
+
+	mmc1: mmc@11240000 {
+		compatible = "mediatek,mt7623-mmc",
+			     "mediatek,mt8135-mmc";
+		reg = <0 0x11240000 0 0x1000>;
+		interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&pericfg CLK_PERI_MSDC30_1>,
+			 <&topckgen CLK_TOP_MSDC30_1_SEL>;
+		clock-names = "source", "hclk";
+		status = "disabled";
+	};
+
+	usb1: usb@1a1c0000 {
+		compatible = "mediatek,mt7623-xhci",
+			     "mediatek,mt8173-xhci";
+		reg = <0 0x1a1c0000 0 0x1000>,
+		      <0 0x1a1c4700 0 0x0100>;
+		reg-names = "mac", "ippc";
+		interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&hifsys CLK_HIFSYS_USB0PHY>,
+			 <&topckgen CLK_TOP_ETHIF_SEL>;
+		clock-names = "sys_ck", "free_ck";
+		power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>;
+		phys = <&u2port0 PHY_TYPE_USB2>, <&u3port0 PHY_TYPE_USB3>;
+		status = "disabled";
+	};
+
+	u3phy1: usb-phy@1a1c4000 {
+		compatible = "mediatek,mt7623-u3phy", "mediatek,mt2701-u3phy";
+		reg = <0 0x1a1c4000 0 0x0700>;
+		clocks = <&clk26m>;
+		clock-names = "u3phya_ref";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		status = "disabled";
+
+		u2port0: usb-phy@1a1c4800 {
+			reg = <0 0x1a1c4800 0 0x0100>;
+			#phy-cells = <1>;
+			status = "okay";
+		};
+
+		u3port0: usb-phy@1a1c4900 {
+			reg = <0 0x1a1c4900 0 0x0700>;
+			#phy-cells = <1>;
+			status = "okay";
+		};
+	};
+
+	usb2: usb@1a240000 {
+		compatible = "mediatek,mt7623-xhci",
+			     "mediatek,mt8173-xhci";
+		reg = <0 0x1a240000 0 0x1000>,
+		      <0 0x1a244700 0 0x0100>;
+		reg-names = "mac", "ippc";
+		interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&hifsys CLK_HIFSYS_USB1PHY>,
+			 <&topckgen CLK_TOP_ETHIF_SEL>;
+		clock-names = "sys_ck", "free_ck";
+		power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>;
+		phys = <&u2port1 PHY_TYPE_USB2>, <&u3port1 PHY_TYPE_USB3>;
+		status = "disabled";
+	};
+
+	u3phy2: usb-phy@1a244000 {
+		compatible = "mediatek,mt7623-u3phy", "mediatek,mt2701-u3phy";
+		reg = <0 0x1a244000 0 0x0700>;
+		clocks = <&clk26m>;
+		clock-names = "u3phya_ref";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		status = "disabled";
+
+		u2port1: usb-phy@1a244800 {
+			reg = <0 0x1a244800 0 0x0100>;
+			#phy-cells = <1>;
+			status = "okay";
+		};
+
+		u3port1: usb-phy@1a244900 {
+			reg = <0 0x1a244900 0 0x0700>;
+			#phy-cells = <1>;
+			status = "okay";
+		};
+	};
+
+	hifsys: syscon@1a000000 {
+		compatible = "mediatek,mt7623-hifsys",
+			     "mediatek,mt2701-hifsys",
+			     "syscon";
+		reg = <0 0x1a000000 0 0x1000>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	ethsys: syscon@1b000000 {
+		compatible = "mediatek,mt7623-ethsys",
+			     "mediatek,mt2701-ethsys",
+			     "syscon";
+		reg = <0 0x1b000000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	eth: ethernet@1b100000 {
+		compatible = "mediatek,mt2701-eth", "syscon";
+		reg = <0 0x1b100000 0 0x20000>;
+		interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 199 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&topckgen CLK_TOP_ETHIF_SEL>,
+			 <&ethsys CLK_ETHSYS_ESW>,
+			 <&ethsys CLK_ETHSYS_GP1>,
+			 <&ethsys CLK_ETHSYS_GP2>,
+			 <&apmixedsys CLK_APMIXED_TRGPLL>;
+		clock-names = "ethif", "esw", "gp1", "gp2", "trgpll";
+		power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
+		mediatek,ethsys = <&ethsys>;
+		mediatek,pctl = <&syscfg_pctl_a>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	crypto: crypto@1b240000 {
+		compatible = "mediatek,mt7623-crypto";
+		reg = <0 0x1b240000 0 0x20000>;
+		interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 83 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&topckgen CLK_TOP_ETHIF_SEL>,
+			 <&ethsys CLK_ETHSYS_CRYPTO>;
+		clock-names = "ethif","cryp";
+		power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
 		status = "disabled";
 	};
 };
diff --git a/arch/arm/boot/dts/omap3-cm-t3x.dtsi b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
index 57b9a02..fccd538 100644
--- a/arch/arm/boot/dts/omap3-cm-t3x.dtsi
+++ b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
@@ -188,7 +188,7 @@
 	clock-frequency = <400000>;
 
 	at24@50 {
-		compatible = "at24,24c02";
+		compatible = "atmel,24c02";
 		pagesize = <16>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
index f330c69..82aa9c4 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
+++ b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
@@ -201,7 +201,8 @@
 };
 
 &gpmc {
-	ranges = <0 0 0x30000000 0x1000000>;       /* CS0: 16MB for NAND */
+	ranges = <0 0 0x30000000 0x1000000	/* CS0: 16MB for NAND */
+		  6 0 0x2c000000 0x1000000>;	/* CS6: 16MB for DM9000 */
 
 	nand@0,0 {
 		compatible = "ti,omap2-nand";
@@ -256,12 +257,8 @@
 			reg = <0x680000 0xf980000>;
 		};
 	};
-};
 
-&gpmc {
-	ranges = <6 0 0x2c000000 0x1000000>;       /* CS6: 16MB for DM9000 */
-
-	ethernet@0,0 {
+	ethernet@6,0 {
 		compatible = "davicom,dm9000";
 		reg =  <6 0x000 2
 			6 0x400 2>; /* CS6, offset 0 and 0x400, IO size 2 */
diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts
index 4f9a765..c963b31 100644
--- a/arch/arm/boot/dts/omap3-evm-37xx.dts
+++ b/arch/arm/boot/dts/omap3-evm-37xx.dts
@@ -34,7 +34,15 @@
 	>;
 };
 
+&hsusb2_phy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ehci_phy_pins>;
+};
+
 &omap3_pmx_core {
+	pinctrl-names = "default";
+	pinctrl-0 = <&on_board_gpio_61 &hsusb2_pins>;
+
 	dss_dpi_pins1: pinmux_dss_dpi_pins2 {
 		pinctrl-single,pins = <
 			OMAP3_CORE1_IOPAD(0x20d4, PIN_OUTPUT | MUX_MODE0)   /* dss_pclk.dss_pclk */
@@ -98,6 +106,37 @@
 		>;
 	};
 
+	/* Devices are routed with gpmc_nbe1.gpio_61 to on-board devices */
+	on_board_gpio_61: pinmux_ehci_port_select_pins {
+		pinctrl-single,pins = <
+		OMAP3_CORE1_IOPAD(0x20c8, PIN_OUTPUT | MUX_MODE4)
+		>;
+	};
+
+	/* Used by OHCI and EHCI. OHCI won't work without external phy */
+	hsusb2_pins: pinmux_hsusb2_pins {
+		pinctrl-single,pins = <
+
+		/* mcspi1_cs3.hsusb2_data2 */
+		OMAP3_CORE1_IOPAD(0x21d4, PIN_INPUT_PULLDOWN | MUX_MODE3)
+
+		/* mcspi2_clk.hsusb2_data7 */
+		OMAP3_CORE1_IOPAD(0x21d6, PIN_INPUT_PULLDOWN | MUX_MODE3)
+
+		/* mcspi2_simo.hsusb2_data4 */
+		OMAP3_CORE1_IOPAD(0x21d8, PIN_INPUT_PULLDOWN | MUX_MODE3)
+
+		/* mcspi2_somi.hsusb2_data5 */
+		OMAP3_CORE1_IOPAD(0x21da, PIN_INPUT_PULLDOWN | MUX_MODE3)
+
+		/* mcspi2_cs0.hsusb2_data6 */
+		OMAP3_CORE1_IOPAD(0x21dc, PIN_INPUT_PULLDOWN | MUX_MODE3)
+
+		/* mcspi2_cs1.hsusb2_data3 */
+		OMAP3_CORE1_IOPAD(0x21de, PIN_INPUT_PULLDOWN | MUX_MODE3)
+		>;
+	};
+
 	wl12xx_gpio: pinmux_wl12xx_gpio {
 		pinctrl-single,pins = <
 			OMAP3_CORE1_IOPAD(0x2180, PIN_OUTPUT | MUX_MODE4)		/* uart1_cts.gpio_150 */
@@ -112,6 +151,46 @@
 	};
 };
 
+&omap3_pmx_core2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&hsusb2_2_pins>;
+
+	ehci_phy_pins: pinmux_ehci_phy_pins {
+		pinctrl-single,pins = <
+
+		/* EHCI PHY reset GPIO etk_d7.gpio_21 */
+		OMAP3630_CORE2_IOPAD(0x25ea, PIN_OUTPUT | MUX_MODE4)
+
+		/* EHCI VBUS etk_d8.gpio_22 */
+		OMAP3630_CORE2_IOPAD(0x25ec, PIN_OUTPUT | MUX_MODE4)
+		>;
+	};
+
+	/* Used by OHCI and EHCI. OHCI won't work without external phy */
+	hsusb2_2_pins: pinmux_hsusb2_2_pins {
+		pinctrl-single,pins = <
+
+		/* etk_d10.hsusb2_clk */
+		OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3)
+
+		/* etk_d11.hsusb2_stp */
+		OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3)
+
+		/* etk_d12.hsusb2_dir */
+		OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3)
+
+		/* etk_d13.hsusb2_nxt */
+		OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3)
+
+		/* etk_d14.hsusb2_data0 */
+		OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3)
+
+		/* etk_d15.hsusb2_data1 */
+		OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3)
+		>;
+	};
+};
+
 &omap3_pmx_wkup {
 	dss_dpi_pins2: pinmux_dss_dpi_pins1 {
 		pinctrl-single,pins = <
@@ -153,6 +232,29 @@
 	pinctrl-0 = <&uart3_pins>;
 };
 
+/*
+ * GPIO_61 (nUSB2_EN_1V8) must be low to enable on-board EHCI USB2 interface
+ * for bus switch SN74CB3Q3384A, level-shifter SN74AVC16T245DGGR, and 1.8V.
+ */
+&gpio2 {
+	en_usb2_port {
+		gpio-hog;
+		gpios = <29 GPIO_ACTIVE_HIGH>;	/* gpio_61 */
+		output-low;
+		line-name = "enable usb2 port";
+	};
+};
+
+/* T2_GPIO_2 low to route GPIO_61 to on-board devices */
+&twl_gpio {
+	en_on_board_gpio_61 {
+		gpio-hog;
+		gpios = <2 GPIO_ACTIVE_HIGH>;
+		output-low;
+		line-name = "en_hsusb2_clk";
+	};
+};
+
 &gpmc {
 	ranges = <0 0 0x30000000 0x1000000>,	/* CS0: 16MB for NAND */
 		 <5 0 0x2c000000 0x01000000>;
diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi
index 0904750..2b1d697 100644
--- a/arch/arm/boot/dts/omap3-evm-common.dtsi
+++ b/arch/arm/boot/dts/omap3-evm-common.dtsi
@@ -12,6 +12,24 @@
 		};
 	};
 
+	/* HS USB Port 2 Power */
+	hsusb2_power: hsusb2_power_reg {
+		compatible = "regulator-fixed";
+		regulator-name = "hsusb2_vbus";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio1 22 GPIO_ACTIVE_HIGH>; /* gpio_22 */
+		startup-delay-us = <70000>;
+		enable-active-high;
+	};
+
+	/* HS USB Host PHY on PORT 2 */
+	hsusb2_phy: hsusb2_phy {
+		compatible = "usb-nop-xceiv";
+		reset-gpios = <&gpio1 21 GPIO_ACTIVE_LOW>; /* gpio_21 */
+		vcc-supply = <&hsusb2_power>;
+	};
+
 	leds {
 		compatible = "gpio-leds";
 		ledb {
@@ -76,7 +94,6 @@
 
 &lcd_3v3 {
 	gpio = <&gpio5 25 GPIO_ACTIVE_LOW>;	/* gpio153 */
-	enable-active-low;
 };
 
 &lcd0 {
@@ -143,6 +160,14 @@
 			>;
 };
 
+&usbhshost {
+	port2-mode = "ehci-phy";
+};
+
+&usbhsehci {
+	phys = <0 &hsusb2_phy>;
+};
+
 &usb_otg_hs {
 	interface-type = <0>;
 	usb-phy = <&usb2_phy>;
@@ -159,3 +184,10 @@
 		reg = <5 0 0xff>;
 	};
 };
+
+&vaux2 {
+	regulator-name = "usb_1v8";
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	regulator-always-on;
+};
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index 9ec7370..4504908 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -419,7 +419,7 @@
 
 	/* RFID EEPROM */
 	m24lr64@50 {
-		compatible = "at,24c64";
+		compatible = "atmel,24c64";
 		reg = <0x50>;
 	};
 };
diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi
index 401fae8..cd22034 100644
--- a/arch/arm/boot/dts/omap3-overo-base.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-base.dtsi
@@ -74,16 +74,6 @@
 		gpio = <&gpio1 16 GPIO_ACTIVE_HIGH>;		/* gpio_16: WiFi nReset */
 		startup-delay-us = <10000>;
 	};
-
-	/* Regulator to trigger the nReset signal of the Bluetooth module */
-	w3cbw003c_bt_nreset: regulator-w3cbw003c-bt-nreset {
-		compatible = "regulator-fixed";
-		regulator-name = "regulator-w3cbw003c-bt-nreset";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-		gpio = <&gpio6 4 GPIO_ACTIVE_HIGH>;		/* gpio_164: BT nReset */
-		startup-delay-us = <10000>;
-	};
 };
 
 &omap3_pmx_core {
@@ -191,7 +181,6 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc2_pins>;
 	vmmc-supply = <&w3cbw003c_npoweron>;
-	vqmmc-supply = <&w3cbw003c_bt_nreset>;
 	vmmc_aux-supply = <&w3cbw003c_wifi_nreset>;
 	bus-width = <4>;
 	cap-sdio-irq;
diff --git a/arch/arm/boot/dts/omap3-sb-t35.dtsi b/arch/arm/boot/dts/omap3-sb-t35.dtsi
index 73643fa..4476fb6 100644
--- a/arch/arm/boot/dts/omap3-sb-t35.dtsi
+++ b/arch/arm/boot/dts/omap3-sb-t35.dtsi
@@ -50,7 +50,6 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&sb_t35_audio_amp>;
 		gpio = <&gpio2 29 GPIO_ACTIVE_LOW>;   /* gpio_61 */
-		enable-active-low;
 		regulator-always-on;
 	};
 };
@@ -90,7 +89,7 @@
 	clock-frequency = <400000>;
 
 	at24@50 {
-		compatible = "at24,24c02";
+		compatible = "atmel,24c02";
 		pagesize = <16>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/omap3-tao3530.dtsi b/arch/arm/boot/dts/omap3-tao3530.dtsi
index dc80886b..06ac0f8 100644
--- a/arch/arm/boot/dts/omap3-tao3530.dtsi
+++ b/arch/arm/boot/dts/omap3-tao3530.dtsi
@@ -63,7 +63,6 @@
 		regulator-min-microvolt = <3150000>;
 		regulator-max-microvolt = <3150000>;
 		gpio = <&gpio5 29 GPIO_ACTIVE_LOW>;		/* gpio_157 */
-		enable-active-low;
 		startup-delay-us = <10000>;
 	};
 };
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 89eb607..10ca1c1 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -301,7 +301,7 @@
 	#address-cells = <1>;
 	#size-cells = <0>;
 	wlcore: wlcore@2 {
-		compatible = "ti,wl1283";
+		compatible = "ti,wl1285", "ti,wl1283";
 		reg = <2>;
 		interrupt-parent = <&gpio4>;
 		interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; /* gpio100 */
@@ -348,6 +348,17 @@
 		interrupt-names = "irq", "wakeup";
 		wakeup-source;
 	};
+
+	isl29030@44 {
+		compatible = "isil,isl29030";
+		reg = <0x44>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&als_proximity_pins>;
+
+		interrupt-parent = <&gpio6>;
+		interrupts = <17 IRQ_TYPE_LEVEL_LOW>; /* gpio177 */
+	};
 };
 
 &omap4_pmx_core {
@@ -395,6 +406,12 @@
 		>;
 	};
 
+	als_proximity_pins: pinmux_als_proximity_pins {
+		pinctrl-single,pins = <
+		OMAP4_IOPAD(0x18c, PIN_INPUT_PULLUP | MUX_MODE3)
+		>;
+	};
+
 	usb_ulpi_pins: pinmux_usb_ulpi_pins {
 		pinctrl-single,pins = <
 		OMAP4_IOPAD(0x196, MUX_MODE7)
@@ -453,6 +470,15 @@
 		OMAP4_IOPAD(0x1c8, PIN_INPUT_PULLUP | MUX_MODE7)
 		>;
 	};
+
+	uart4_pins: pinmux_uart4_pins {
+		pinctrl-single,pins = <
+		OMAP4_IOPAD(0x15c, PIN_INPUT | MUX_MODE0)		/* uart4_rx */
+		OMAP4_IOPAD(0x15e, PIN_OUTPUT | MUX_MODE0)		/* uart4_tx */
+		OMAP4_IOPAD(0x110, PIN_INPUT_PULLUP | MUX_MODE5)	/* uart4_cts */
+		OMAP4_IOPAD(0x112, PIN_OUTPUT_PULLUP | MUX_MODE5)	/* uart4_rts */
+		>;
+	};
 };
 
 &omap4_pmx_wkup {
@@ -469,6 +495,17 @@
 			       &omap4_pmx_core 0x17c>;
 };
 
+&uart4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart4_pins>;
+
+	bluetooth {
+		compatible = "ti,wl1285-st";
+		enable-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>; /* gpio 174 */
+		max-speed = <3686400>;
+	};
+};
+
 &usbhsehci {
 	phys = <&hsusb1_phy>;
 };
diff --git a/arch/arm/boot/dts/omap4-duovero.dtsi b/arch/arm/boot/dts/omap4-duovero.dtsi
index ec0bd97..6e6810c2 100644
--- a/arch/arm/boot/dts/omap4-duovero.dtsi
+++ b/arch/arm/boot/dts/omap4-duovero.dtsi
@@ -12,6 +12,10 @@
 	model = "Gumstix Duovero";
 	compatible = "gumstix,omap4-duovero", "ti,omap4430", "ti,omap4";
 
+	chosen {
+		stdout-path = &uart3;
+	};
+
 	memory@80000000 {
 		device_type = "memory";
 		reg = <0x80000000 0x40000000>; /* 1 GB */
diff --git a/arch/arm/boot/dts/omap4-var-som-om44.dtsi b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
index 758b6eb..6500bfc 100644
--- a/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+++ b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
@@ -241,7 +241,7 @@
 	};
 
 	eeprom@50 {
-		compatible = "microchip,24c32";
+		compatible = "microchip,24c32", "atmel,24c32";
 		reg = <0x50>;
 	};
 };
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 578c53f..64d00f5 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -900,15 +900,24 @@
 			ctrl-module = <&omap_control_usbotg>;
 		};
 
-		aes: aes@4b501000 {
+		aes1: aes@4b501000 {
 			compatible = "ti,omap4-aes";
-			ti,hwmods = "aes";
+			ti,hwmods = "aes1";
 			reg = <0x4b501000 0xa0>;
 			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&sdma 111>, <&sdma 110>;
 			dma-names = "tx", "rx";
 		};
 
+		aes2: aes@4b701000 {
+			compatible = "ti,omap4-aes";
+			ti,hwmods = "aes2";
+			reg = <0x4b701000 0xa0>;
+			interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&sdma 114>, <&sdma 113>;
+			dma-names = "tx", "rx";
+		};
+
 		des: des@480a5000 {
 			compatible = "ti,omap4-des";
 			ti,hwmods = "des";
@@ -918,6 +927,15 @@
 			dma-names = "tx", "rx";
 		};
 
+		sham: sham@4b100000 {
+			compatible = "ti,omap4-sham";
+			ti,hwmods = "sham";
+			reg = <0x4b100000 0x300>;
+			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&sdma 119>;
+			dma-names = "rx";
+		};
+
 		abb_mpu: regulator-abb-mpu {
 			compatible = "ti,abb-v2";
 			regulator-name = "abb_mpu";
diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
index 9573b37..05732ed 100644
--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
@@ -357,6 +357,8 @@
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin_ck>, <&iva_hsd_byp_clk_mux_ck>;
 		reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
+		assigned-clocks = <&dpll_iva_ck>;
+		assigned-clock-rates = <931200000>;
 	};
 
 	dpll_iva_x2_ck: dpll_iva_x2_ck {
@@ -374,6 +376,8 @@
 		reg = <0x01b8>;
 		ti,index-starts-at-one;
 		ti,invert-autoidle-bit;
+		assigned-clocks = <&dpll_iva_m4x2_ck>;
+		assigned-clock-rates = <465600000>;
 	};
 
 	dpll_iva_m5x2_ck: dpll_iva_m5x2_ck@1bc {
@@ -385,6 +389,8 @@
 		reg = <0x01bc>;
 		ti,index-starts-at-one;
 		ti,invert-autoidle-bit;
+		assigned-clocks = <&dpll_iva_m5x2_ck>;
+		assigned-clock-rates = <266100000>;
 	};
 
 	dpll_mpu_ck: dpll_mpu_ck@160 {
@@ -969,22 +975,6 @@
 		ti,max-div = <2>;
 	};
 
-	aes1_fck: aes1_fck@15a0 {
-		#clock-cells = <0>;
-		compatible = "ti,gate-clock";
-		clocks = <&l3_div_ck>;
-		ti,bit-shift = <1>;
-		reg = <0x15a0>;
-	};
-
-	aes2_fck: aes2_fck@15a8 {
-		#clock-cells = <0>;
-		compatible = "ti,gate-clock";
-		clocks = <&l3_div_ck>;
-		ti,bit-shift = <1>;
-		reg = <0x15a8>;
-	};
-
 	dss_sys_clk: dss_sys_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index b153f60..78397f6 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -404,7 +404,7 @@
 	clock-frequency = <400000>;
 
 	at24@50 {
-		compatible = "at24,24c02";
+		compatible = "atmel,24c02";
 		pagesize = <16>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/omap5-sbc-t54.dts b/arch/arm/boot/dts/omap5-sbc-t54.dts
index 337bbbc..7b8810d 100644
--- a/arch/arm/boot/dts/omap5-sbc-t54.dts
+++ b/arch/arm/boot/dts/omap5-sbc-t54.dts
@@ -44,7 +44,7 @@
 	clock-frequency = <400000>;
 
 	at24@50 {
-		compatible = "at24,24c02";
+		compatible = "atmel,24c02";
 		pagesize = <16>;
 		reg = <0x50>;
 	};
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 0d42c46..ec2c8ba 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -137,6 +137,13 @@
 	};
 };
 
+&mmc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins>;
+
+	cd-gpios = <&gpio5 24 GPIO_ACTIVE_LOW>;	/* gpio5_152 */
+};
+
 &omap5_pmx_core {
 	evm_keys_pins: pinmux_evm_keys_gpio_pins {
 		pinctrl-single,pins = <
@@ -150,6 +157,12 @@
 			OMAP5_IOPAD(0x1c8, PIN_INPUT | MUX_MODE0)		/* i2c5_sda */
 		>;
 	};
+
+	mmc1_pins: pinmux_mmc1_pins {
+		pinctrl-single,pins = <
+			OMAP5_IOPAD(0x1d4, PIN_INPUT_PULLUP | MUX_MODE6)	/* gpio5_152 */
+		>;
+	};
 };
 
 &tpd12s015 {
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi
index 4899c23..5291934 100644
--- a/arch/arm/boot/dts/omap54xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi
@@ -315,6 +315,8 @@
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin>, <&dpll_iva_byp_mux>;
 		reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
+		assigned-clocks = <&dpll_iva_ck>;
+		assigned-clock-rates = <1165000000>;
 	};
 
 	dpll_iva_x2_ck: dpll_iva_x2_ck {
@@ -330,6 +332,8 @@
 		ti,max-div = <63>;
 		reg = <0x01b8>;
 		ti,index-starts-at-one;
+		assigned-clocks = <&dpll_iva_h11x2_ck>;
+		assigned-clock-rates = <465920000>;
 	};
 
 	dpll_iva_h12x2_ck: dpll_iva_h12x2_ck@1bc {
@@ -339,6 +343,8 @@
 		ti,max-div = <63>;
 		reg = <0x01bc>;
 		ti,index-starts-at-one;
+		assigned-clocks = <&dpll_iva_h12x2_ck>;
+		assigned-clock-rates = <388300000>;
 	};
 
 	mpu_dpll_hs_clk_div: mpu_dpll_hs_clk_div {
diff --git a/arch/arm/boot/dts/owl-s500-guitar-bb-rev-b.dts b/arch/arm/boot/dts/owl-s500-guitar-bb-rev-b.dts
new file mode 100644
index 0000000..521463d
--- /dev/null
+++ b/arch/arm/boot/dts/owl-s500-guitar-bb-rev-b.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016-2017 Andreas Färber
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+
+#include "owl-s500-guitar.dtsi"
+
+/ {
+	compatible = "lemaker,guitar-bb-rev-b", "lemaker,guitar", "actions,s500";
+	model = "LeMaker Guitar Base Board rev. B";
+
+	aliases {
+		serial3 = &uart3;
+	};
+
+	chosen {
+		stdout-path = "serial3:115200n8";
+	};
+};
+
+&uart3 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/owl-s500-guitar.dtsi b/arch/arm/boot/dts/owl-s500-guitar.dtsi
new file mode 100644
index 0000000..079b2c0
--- /dev/null
+++ b/arch/arm/boot/dts/owl-s500-guitar.dtsi
@@ -0,0 +1,22 @@
+/*
+ * LeMaker Guitar SoM
+ *
+ * Copyright (c) 2016-2017 Andreas Färber
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+#include "owl-s500.dtsi"
+
+/ {
+	compatible = "lemaker,guitar", "actions,s500";
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x40000000>;
+	};
+};
+
+&timer {
+	clocks = <&hosc>;
+};
diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi
new file mode 100644
index 0000000..51a4874
--- /dev/null
+++ b/arch/arm/boot/dts/owl-s500.dtsi
@@ -0,0 +1,186 @@
+/*
+ * Actions Semi S500 SoC
+ *
+ * Copyright (c) 2016-2017 Andreas Färber
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "actions,s500";
+	interrupt-parent = <&gic>;
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	aliases {
+	};
+
+	chosen {
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0x0>;
+			enable-method = "actions,s500-smp";
+		};
+
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0x1>;
+			enable-method = "actions,s500-smp";
+		};
+
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0x2>;
+			enable-method = "actions,s500-smp";
+		};
+
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0x3>;
+			enable-method = "actions,s500-smp";
+		};
+	};
+
+	arm-pmu {
+		compatible = "arm,cortex-a9-pmu";
+		interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+		             <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+		             <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+		             <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+	};
+
+	hosc: hosc {
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+		#clock-cells = <0>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		scu: scu@b0020000 {
+			compatible = "arm,cortex-a9-scu";
+			reg = <0xb0020000 0x100>;
+		};
+
+		global_timer: timer@b0020200 {
+			compatible = "arm,cortex-a9-global-timer";
+			reg = <0xb0020200 0x100>;
+			interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+			status = "disabled";
+		};
+
+		twd_timer: timer@b0020600 {
+			compatible = "arm,cortex-a9-twd-timer";
+			reg = <0xb0020600 0x20>;
+			interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+			status = "disabled";
+		};
+
+		twd_wdt: wdt@b0020620 {
+			compatible = "arm,cortex-a9-twd-wdt";
+			reg = <0xb0020620 0xe0>;
+			interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+			status = "disabled";
+		};
+
+		gic: interrupt-controller@b0021000 {
+			compatible = "arm,cortex-a9-gic";
+			reg = <0xb0021000 0x1000>,
+			      <0xb0020100 0x0100>;
+			interrupt-controller;
+			#interrupt-cells = <3>;
+		};
+
+		l2: cache-controller@b0022000 {
+			compatible = "arm,pl310-cache";
+			reg = <0xb0022000 0x1000>;
+			cache-unified;
+			cache-level = <2>;
+			interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+			arm,tag-latency = <3 3 2>;
+			arm,data-latency = <5 3 3>;
+		};
+
+		uart0: serial@b0120000 {
+			compatible = "actions,s500-uart", "actions,owl-uart";
+			reg = <0xb0120000 0x2000>;
+			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart1: serial@b0122000 {
+			compatible = "actions,s500-uart", "actions,owl-uart";
+			reg = <0xb0122000 0x2000>;
+			interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart2: serial@b0124000 {
+			compatible = "actions,s500-uart", "actions,owl-uart";
+			reg = <0xb0124000 0x2000>;
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart3: serial@b0126000 {
+			compatible = "actions,s500-uart", "actions,owl-uart";
+			reg = <0xb0126000 0x2000>;
+			interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart4: serial@b0128000 {
+			compatible = "actions,s500-uart", "actions,owl-uart";
+			reg = <0xb0128000 0x2000>;
+			interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart5: serial@b012a000 {
+			compatible = "actions,s500-uart", "actions,owl-uart";
+			reg = <0xb012a000 0x2000>;
+			interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart6: serial@b012c000 {
+			compatible = "actions,s500-uart", "actions,owl-uart";
+			reg = <0xb012c000 0x2000>;
+			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		timer: timer@b0168000 {
+			compatible = "actions,s500-timer";
+			reg = <0xb0168000 0x8000>;
+			interrupts = <GIC_SPI  8 IRQ_TYPE_LEVEL_HIGH>,
+			             <GIC_SPI  9 IRQ_TYPE_LEVEL_HIGH>,
+			             <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+			             <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "2hz0", "2hz1", "timer0", "timer1";
+		};
+
+		sps: power-controller@b01b0100 {
+			compatible = "actions,s500-sps";
+			reg = <0xb01b0100 0x100>;
+			#power-domain-cells = <1>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/pm9g45.dts b/arch/arm/boot/dts/pm9g45.dts
index 0abd7bf..3139221 100644
--- a/arch/arm/boot/dts/pm9g45.dts
+++ b/arch/arm/boot/dts/pm9g45.dts
@@ -37,12 +37,10 @@
 			};
 
 			pinctrl@fffff200 {
-
-				board {
-					pinctrl_board_nand: nand0-board {
+				nand {
+					pinctrl_nand_rb: nand-rb-0 {
 						atmel,pins =
-							<AT91_PIOD 3 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP	/* PD3 gpio RDY pin pull_up*/
-							 AT91_PIOC 14 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;	/* PC14 gpio enable pin pull_up */
+							<AT91_PIOD 3 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>;
 					};
 				};
 
@@ -71,50 +69,61 @@
 				phy-mode = "rmii";
 				status = "okay";
 			};
-
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
-			pinctrl-0 = <&pinctrl_board_nand>;
-
-			gpios = <&pioD 3 GPIO_ACTIVE_HIGH
-				 &pioC 14 GPIO_ACTIVE_HIGH
-				 0
-				>;
-
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			barebox@20000 {
-				label = "barebox";
-				reg = <0x20000 0x40000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioD 3 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bareboxenv@60000 {
-				label = "bareboxenv";
-				reg = <0x60000 0x1A0000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			kernel@200000 {
-				label = "bareboxenv2";
-				reg = <0x200000 0x300000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x20000>;
+						};
 
-			kernel@500000 {
-				label = "root";
-				reg = <0x500000 0x400000>;
-			};
+						barebox@20000 {
+							label = "barebox";
+							reg = <0x20000 0x40000>;
+						};
 
-			data@900000 {
-				label = "data";
-				reg = <0x900000 0x8340000>;
+						bareboxenv@60000 {
+							label = "bareboxenv";
+							reg = <0x60000 0x1A0000>;
+						};
+
+						kernel@200000 {
+							label = "bareboxenv2";
+							reg = <0x200000 0x300000>;
+						};
+
+						kernel@500000 {
+							label = "root";
+							reg = <0x500000 0x400000>;
+						};
+
+						data@900000 {
+							label = "data";
+							reg = <0x900000 0x8340000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/pxa25x.dtsi b/arch/arm/boot/dts/pxa25x.dtsi
index f9f4726..95d59be 100644
--- a/arch/arm/boot/dts/pxa25x.dtsi
+++ b/arch/arm/boot/dts/pxa25x.dtsi
@@ -93,22 +93,22 @@
 	pxa250_opp_table: opp_table0 {
 		compatible = "operating-points-v2";
 
-		opp@99532800 {
+		opp-99532800 {
 			opp-hz = /bits/ 64 <99532800>;
 			opp-microvolt = <1000000 950000 1650000>;
 			clock-latency-ns = <20>;
 		};
-		opp@199065600 {
+		opp-199065600 {
 			opp-hz = /bits/ 64 <199065600>;
 			opp-microvolt = <1000000 950000 1650000>;
 			clock-latency-ns = <20>;
 		};
-		opp@298598400 {
+		opp-298598400 {
 			opp-hz = /bits/ 64 <298598400>;
 			opp-microvolt = <1100000 1045000 1650000>;
 			clock-latency-ns = <20>;
 		};
-		opp@398131200 {
+		opp-398131200 {
 			opp-hz = /bits/ 64 <398131200>;
 			opp-microvolt = <1300000 1235000 1650000>;
 			clock-latency-ns = <20>;
diff --git a/arch/arm/boot/dts/pxa27x.dtsi b/arch/arm/boot/dts/pxa27x.dtsi
index e0fab48..5f1d6da 100644
--- a/arch/arm/boot/dts/pxa27x.dtsi
+++ b/arch/arm/boot/dts/pxa27x.dtsi
@@ -141,37 +141,37 @@
 	pxa270_opp_table: opp_table0 {
 		compatible = "operating-points-v2";
 
-		opp@104000000 {
+		opp-104000000 {
 			opp-hz = /bits/ 64 <104000000>;
 			opp-microvolt = <900000 900000 1705000>;
 			clock-latency-ns = <20>;
 		};
-		opp@156000000 {
+		opp-156000000 {
 			opp-hz = /bits/ 64 <156000000>;
 			opp-microvolt = <1000000 1000000 1705000>;
 			clock-latency-ns = <20>;
 		};
-		opp@208000000 {
+		opp-208000000 {
 			opp-hz = /bits/ 64 <208000000>;
 			opp-microvolt = <1180000 1180000 1705000>;
 			clock-latency-ns = <20>;
 		};
-		opp@312000000 {
+		opp-312000000 {
 			opp-hz = /bits/ 64 <312000000>;
 			opp-microvolt = <1250000 1250000 1705000>;
 			clock-latency-ns = <20>;
 		};
-		opp@416000000 {
+		opp-416000000 {
 			opp-hz = /bits/ 64 <416000000>;
 			opp-microvolt = <1350000 1350000 1705000>;
 			clock-latency-ns = <20>;
 		};
-		opp@520000000 {
+		opp-520000000 {
 			opp-hz = /bits/ 64 <520000000>;
 			opp-microvolt = <1450000 1450000 1705000>;
 			clock-latency-ns = <20>;
 		};
-		opp@624000000 {
+		opp-624000000 {
 			opp-hz = /bits/ 64 <624000000>;
 			opp-microvolt = <1550000 1550000 1705000>;
 			clock-latency-ns = <20>;
diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
index 2da1413..9d725f9 100644
--- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
@@ -23,6 +23,7 @@
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 #include "qcom-msm8660.dtsi"
 
 / {
@@ -83,6 +84,25 @@
 		};
 	};
 
+	/*
+	 * Capella CM3605 light and proximity sensor mounted directly
+	 * on the sensor board.
+	 */
+	cm3605 {
+		compatible = "capella,cm3605";
+		vdd-supply = <&pm8058_l14>; // 2.85V
+		aset-gpios = <&pm8058_gpio 35 GPIO_ACTIVE_LOW>;
+		capella,aset-resistance-ohms = <100000>;
+		/* GPIO34 has interrupt 225 on the PM8058 */
+		/* Trig on both edges - getting close or far away */
+		interrupts-extended = <&pm8058 225 IRQ_TYPE_EDGE_BOTH>;
+		/* MPP05 analog input to the XOADC */
+		io-channels = <&xoadc 0x00 0x05>;
+		io-channel-names = "aout";
+		pinctrl-names = "default";
+		pinctrl-0 = <&dragon_cm3605_gpios>, <&dragon_cm3605_mpps>;
+	};
+
 	soc {
 		pinctrl@800000 {
 			/* eMMMC pins, all 8 data lines connected */
@@ -160,6 +180,19 @@
 				};
 			};
 
+			dragon_gsbi8_i2c_pins: gsbi8_i2c {
+				mux {
+					pins = "gpio64", "gpio65";
+					function = "gsbi8";
+				};
+				pinconf {
+					pins = "gpio64", "gpio65";
+					drive-strength = <16>;
+					/* These have external pull-up 2.2kOhm to 1.8V */
+					bias-disable;
+				};
+			};
+
 			dragon_gsbi12_i2c_pins: gsbi12_i2c {
 				mux {
 					pins = "gpio115", "gpio116";
@@ -317,6 +350,24 @@
 							power-source = <PM8058_GPIO_S3>;
 						};
 					};
+					dragon_cm3605_gpios: cm3605-gpios {
+						/* Pin 34 connected to the proxy IRQ */
+						pinconf_gpio34 {
+							pins = "gpio34";
+							function = "normal";
+							input-enable;
+							bias-disable;
+							power-source = <PM8058_GPIO_S3>;
+						};
+						/* Pin 35 connected to ASET */
+						pinconf_gpio35 {
+							pins = "gpio35";
+							function = "normal";
+							output-high;
+							bias-disable;
+							power-source = <PM8058_GPIO_S3>;
+						};
+					};
 					dragon_veth_gpios: veth-gpios {
 						pinconf {
 							pins = "gpio40";
@@ -327,6 +378,47 @@
 					};
 				};
 
+				mpps@50 {
+					dragon_cm3605_mpps: cm3605-mpps {
+						pinconf {
+							pins = "mpp5";
+							function = "analog";
+							input-enable;
+							bias-high-impedance;
+							/* Let's use channel 5 */
+							qcom,amux-route = <PMIC_MPP_AMUX_ROUTE_CH5>;
+							power-source = <PM8058_GPIO_S3>;
+						};
+					};
+				};
+
+				xoadc@197 {
+					/* Reference voltage 2.2 V */
+					xoadc-ref-supply = <&pm8058_l18>;
+
+					/* Board-specific channels */
+					mpp5@05 {
+						/* Connected to AOUT of ALS sensor */
+						reg = <0x00 0x05>;
+					};
+					mpp6@06 {
+						/* Connected to test point TP43 */
+						reg = <0x00 0x06>;
+					};
+					mpp7@07 {
+						/* Connected to battery thermistor */
+						reg = <0x00 0x07>;
+					};
+					mpp8@08 {
+						/* Connected to battery ID detector */
+						reg = <0x00 0x08>;
+					};
+					mpp9@09 {
+						/* Connected to XO thermistor */
+						reg = <0x00 0x09>;
+					};
+				};
+
 				led@48 {
 					/*
 					 * The keypad LED @0x48 is routed to
@@ -340,6 +432,7 @@
 					reg = <0x48>;
 					label = "pm8058:infrared:proximitysensor";
 					default-state = "off";
+					linux,default-trigger = "cm3605";
 				};
 				led@131 {
 					compatible = "qcom,pm8058-led";
@@ -368,6 +461,42 @@
 			};
 		};
 
+		gsbi@19800000 {
+			status = "ok";
+			qcom,mode = <GSBI_PROT_I2C>;
+
+			i2c@19880000 {
+				status = "ok";
+				pinctrl-names = "default";
+				pinctrl-0 = <&dragon_gsbi8_i2c_pins>;
+
+				eeprom@52 {
+					/* A 16KiB Platform ID EEPROM on the CPU carrier board */
+					compatible = "atmel,24c128";
+					reg = <0x52>;
+					vcc-supply = <&pm8058_s3>;
+					pagesize = <64>;
+				};
+				wm8903: wm8903@1a {
+					/* This Woolfson Micro device has an unrouted interrupt line */
+					compatible = "wlf,wm8903";
+					reg = <0x1a>;
+
+					AVDD-supply = <&pm8058_l16>;
+					CPVDD-supply = <&pm8058_l16>;
+					DBVDD-supply = <&pm8058_s3>;
+					DCVDD-supply = <&pm8058_l0>;
+
+					gpio-controller;
+					#gpio-cells = <2>;
+
+					micdet-cfg = <0>;
+					micdet-delay = <100>;
+					gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+				};
+			};
+		};
+
 		gsbi@19c00000 {
 			status = "ok";
 			qcom,mode = <GSBI_PROT_I2C_UART>;
diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts
index 8f5de02..f245064 100644
--- a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts
@@ -320,43 +320,37 @@
 		};
 
 		/* OTG */
-		phy@12500000 {
-			status		= "okay";
-			dr_mode		= "peripheral";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l4>;
-		};
-
-		phy@12520000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l23>;
-		};
-
-		phy@12530000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l23>;
-		};
-
-		gadget@12500000 {
-			status = "okay";
-		};
-
-		/* OTG */
 		usb@12500000 {
 			status = "okay";
+			dr_mode = "peripheral";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l4>;
+				};
+			};
 		};
 
 		usb@12520000 {
 			status = "okay";
+			dr_mode = "otg";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l23>;
+				};
+			};
 		};
 
 		usb@12530000 {
 			status = "okay";
+			dr_mode = "otg";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l23>;
+				};
+			};
 		};
 
 		amba {
diff --git a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
index e39440a..3483a66 100644
--- a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
@@ -321,21 +321,15 @@
 		};
 
 		/* OTG */
-		phy@12500000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l4>;
-			dr_mode		= "otg";
-		};
-
-		gadget@12500000 {
-			status = "okay";
-		};
-
-		/* OTG */
 		usb@12500000 {
 			status = "okay";
+			dr_mode = "otg";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l4>;
+				};
+			};
 		};
 
 		amba {
diff --git a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
index 35f1d46..5b151e4 100644
--- a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
@@ -150,42 +150,37 @@
 		};
 
 		/* OTG */
-		usb1_phy: phy@12500000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l4>;
-		};
-
-		usb3_phy: phy@12520000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l23>;
-		};
-
-		usb4_phy: phy@12530000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l23>;
-		};
-
-		gadget1: gadget@12500000 {
+		usb@12500000 {
 			status = "ok";
+			dr_mode = "otg";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l4>;
+				};
+			};
 		};
 
-		/* OTG */
-		usb1: usb@12500000 {
-			status = "ok";
-		};
-
-		usb3: usb@12520000 {
+		usb@12520000 {
 			status = "okay";
+			dr_mode = "host";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l23>;
+				};
+			};
 		};
 
-		usb4: usb@12530000 {
+		usb@12530000 {
 			status = "okay";
+			dr_mode = "host";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l23>;
+				};
+			};
 		};
 
 		/* on board fixed 3.3v supply */
diff --git a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
index 881ce70..053b5bd 100644
--- a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
@@ -244,42 +244,37 @@
 		};
 
 		/* OTG */
-		usb1_phy: phy@12500000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l4>;
-		};
-
-		usb3_phy: phy@12520000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l23>;
-		};
-
-		usb4_phy: phy@12530000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l23>;
-		};
-
-		gadget1: gadget@12500000 {
+		usb@12500000 {
 			status = "okay";
+			dr_mode = "otg";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l4>;
+				};
+			};
 		};
 
-		/* OTG */
-		usb1: usb@12500000 {
+		usb@12520000 {
 			status = "okay";
+			dr_mode = "host";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l23>;
+				};
+			};
 		};
 
-		usb3: usb@12520000 {
+		usb@12530000 {
 			status = "okay";
-		};
-
-		usb4: usb@12530000 {
-			status = "okay";
+			dr_mode = "host";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l23>;
+				};
+			};
 		};
 
 		pci@1b500000 {
diff --git a/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts b/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts
index a34ba35..88a9aff 100644
--- a/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts
@@ -349,15 +349,15 @@
 			};
 		};
 
-		phy@12500000 {
-			status		= "okay";
-			vddcx-supply	= <&pm8921_s3>;
-			v3p3-supply	= <&pm8921_l3>;
-			v1p8-supply	= <&pm8921_l4>;
-		};
-
-		gadget@12500000 {
+		usb@12500000 {
 			status = "okay";
+			dr_mode = "otg";
+			ulpi {
+				phy {
+					v3p3-supply = <&pm8921_l3>;
+					v1p8-supply = <&pm8921_l4>;
+				};
+			};
 		};
 
 		gsbi@1a200000 {
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 14a6f5e..f3db185 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -197,7 +197,7 @@
 			clock-frequency = <27000000>;
 		};
 
-		sleep_clk {
+		sleep_clk: sleep_clk {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <32768>;
@@ -884,81 +884,97 @@
 			};
 		};
 
-		usb1_phy: phy@12500000 {
-			compatible	= "qcom,usb-otg-ci";
-			reg		= <0x12500000 0x400>;
-			interrupts	= <GIC_SPI 100 IRQ_TYPE_NONE>;
-			status		= "disabled";
-
-			clocks		= <&gcc USB_HS1_XCVR_CLK>,
-					  <&gcc USB_HS1_H_CLK>;
-			clock-names	= "core", "iface";
-
-			resets		= <&gcc USB_HS1_RESET>;
-			reset-names	= "link";
-		};
-
-		usb3_phy: phy@12520000 {
-			compatible	= "qcom,usb-otg-ci";
-			reg		= <0x12520000 0x400>;
-			interrupts	= <GIC_SPI 188 IRQ_TYPE_NONE>;
-			status		= "disabled";
-			dr_mode		= "host";
-
-			clocks		= <&gcc USB_HS3_XCVR_CLK>,
-					  <&gcc USB_HS3_H_CLK>;
-			clock-names	= "core", "iface";
-
-			resets		= <&gcc USB_HS3_RESET>;
-			reset-names	= "link";
-		};
-
-		usb4_phy: phy@12530000 {
-			compatible	= "qcom,usb-otg-ci";
-			reg		= <0x12530000 0x400>;
-			interrupts	= <GIC_SPI 215 IRQ_TYPE_NONE>;
-			status		= "disabled";
-			dr_mode		= "host";
-
-			clocks		= <&gcc USB_HS4_XCVR_CLK>,
-					  <&gcc USB_HS4_H_CLK>;
-			clock-names	= "core", "iface";
-
-			resets		= <&gcc USB_HS4_RESET>;
-			reset-names	= "link";
-		};
-
-		gadget1: gadget@12500000 {
-			compatible	= "qcom,ci-hdrc";
-			reg		= <0x12500000 0x400>;
-			status		= "disabled";
-			dr_mode		= "peripheral";
-			interrupts	= <GIC_SPI 100 IRQ_TYPE_NONE>;
-			usb-phy		= <&usb1_phy>;
-		};
-
 		usb1: usb@12500000 {
-			compatible	= "qcom,ehci-host";
-			reg		= <0x12500000 0x400>;
-			interrupts	= <GIC_SPI 100 IRQ_TYPE_NONE>;
-			status		= "disabled";
-			usb-phy		= <&usb1_phy>;
+			compatible = "qcom,ci-hdrc";
+			reg = <0x12500000 0x200>,
+			      <0x12500200 0x200>;
+			interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc USB_HS1_XCVR_CLK>, <&gcc USB_HS1_H_CLK>;
+			clock-names = "core", "iface";
+			assigned-clocks = <&gcc USB_HS1_XCVR_CLK>;
+			assigned-clock-rates = <60000000>;
+			resets = <&gcc USB_HS1_RESET>;
+			reset-names = "core";
+			phy_type = "ulpi";
+			ahb-burst-config = <0>;
+			phys = <&usb_hs1_phy>;
+			phy-names = "usb-phy";
+			status = "disabled";
+			#reset-cells = <1>;
+
+			ulpi {
+				usb_hs1_phy: phy {
+					compatible = "qcom,usb-hs-phy-apq8064",
+						     "qcom,usb-hs-phy";
+					#phy-cells = <0>;
+					clocks = <&sleep_clk>, <&cxo_board>;
+					clock-names = "sleep", "ref";
+					resets = <&usb1 0>;
+					reset-names = "por";
+				};
+			};
 		};
 
 		usb3: usb@12520000 {
-			compatible	= "qcom,ehci-host";
-			reg		= <0x12520000 0x400>;
-			interrupts	= <GIC_SPI 188 IRQ_TYPE_NONE>;
-			status		= "disabled";
-			usb-phy		= <&usb3_phy>;
+			compatible = "qcom,ci-hdrc";
+			reg = <0x12520000 0x200>,
+			      <0x12520200 0x200>;
+			interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc USB_HS3_XCVR_CLK>, <&gcc USB_HS3_H_CLK>;
+			clock-names = "core", "iface";
+			assigned-clocks = <&gcc USB_HS3_XCVR_CLK>;
+			assigned-clock-rates = <60000000>;
+			resets = <&gcc USB_HS3_RESET>;
+			reset-names = "core";
+			phy_type = "ulpi";
+			ahb-burst-config = <0>;
+			phys = <&usb_hs3_phy>;
+			phy-names = "usb-phy";
+			status = "disabled";
+			#reset-cells = <1>;
+
+			ulpi {
+				usb_hs3_phy: phy {
+					compatible = "qcom,usb-hs-phy-apq8064",
+						     "qcom,usb-hs-phy";
+					#phy-cells = <0>;
+					clocks = <&sleep_clk>, <&cxo_board>;
+					clock-names = "sleep", "ref";
+					resets = <&usb3 0>;
+					reset-names = "por";
+				};
+			};
 		};
 
 		usb4: usb@12530000 {
-			compatible	= "qcom,ehci-host";
-			reg		= <0x12530000 0x400>;
-			interrupts	= <GIC_SPI 215 IRQ_TYPE_NONE>;
-			status		= "disabled";
-			usb-phy		= <&usb4_phy>;
+			compatible = "qcom,ci-hdrc";
+			reg = <0x12530000 0x200>,
+			      <0x12530200 0x200>;
+			interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc USB_HS4_XCVR_CLK>, <&gcc USB_HS4_H_CLK>;
+			clock-names = "core", "iface";
+			assigned-clocks = <&gcc USB_HS4_XCVR_CLK>;
+			assigned-clock-rates = <60000000>;
+			resets = <&gcc USB_HS4_RESET>;
+			reset-names = "core";
+			phy_type = "ulpi";
+			ahb-burst-config = <0>;
+			phys = <&usb_hs4_phy>;
+			phy-names = "usb-phy";
+			status = "disabled";
+			#reset-cells = <1>;
+
+			ulpi {
+				usb_hs4_phy: phy {
+					compatible = "qcom,usb-hs-phy-apq8064",
+						     "qcom,usb-hs-phy";
+					#phy-cells = <0>;
+					clocks = <&sleep_clk>, <&cxo_board>;
+					clock-names = "sleep", "ref";
+					resets = <&usb4 0>;
+					reset-names = "por";
+				};
+			};
 		};
 
 		sata_phy0: phy@1b400000 {
diff --git a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
index ad51df2..32f3b81 100644
--- a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
@@ -44,6 +44,26 @@
 			vqmmc-supply = <&pm8941_l13>;
 		};
 
+		usb@f9a55000 {
+			status = "ok";
+			phys = <&usb_hs2_phy>;
+			phy-select = <&tcsr 0xb000 1>;
+			extcon = <&smbb>, <&usb_id>;
+			vbus-supply = <&chg_otg>;
+			hnp-disable;
+			srp-disable;
+			adp-disable;
+			ulpi {
+				phy@b {
+					status = "ok";
+					v3p3-supply = <&pm8941_l24>;
+					v1p8-supply = <&pm8941_l6>;
+					extcon = <&smbb>;
+					qcom,init-seq = /bits/ 8 <0x1 0x63>;
+				};
+			};
+		};
+
 
 		pinctrl@fd510000 {
 			i2c11_pins: i2c11 {
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index b7a24af..4b7d972 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -154,10 +154,10 @@
 
 		i2c_0: i2c@78b7000 {
 			compatible = "qcom,i2c-qup-v2.2.1";
-			reg = <0x78b7000 0x6000>;
+			reg = <0x78b7000 0x600>;
 			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&gcc GCC_BLSP1_AHB_CLK>,
-				 <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
+				 <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>;
 			clock-names = "iface", "core";
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 76f4e89..f1fbffe 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -284,6 +284,29 @@
 			};
 		};
 
+		gsbi7: gsbi@16600000 {
+			status = "disabled";
+			compatible = "qcom,gsbi-v1.0.0";
+			cell-index = <7>;
+			reg = <0x16600000 0x100>;
+			clocks = <&gcc GSBI7_H_CLK>;
+			clock-names = "iface";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+			syscon-tcsr = <&tcsr>;
+
+			gsbi7_serial: serial@16640000 {
+				compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+				reg = <0x16640000 0x1000>,
+				      <0x16600000 0x1000>;
+				interrupts = <0 158 0x0>;
+				clocks = <&gcc GSBI7_UART_CLK>, <&gcc GSBI7_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
+		};
+
 		sata_phy: sata-phy@1b400000 {
 			compatible = "qcom,ipq806x-sata-phy";
 			reg = <0x1b400000 0x200>;
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index 747669a..1b5d31b 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -63,6 +63,22 @@
 		};
 	};
 
+	/*
+	 * These channels from the ADC are simply hardware monitors.
+	 * That is why the ADC is referred to as "HKADC" - HouseKeeping
+	 * ADC.
+	 */
+	iio-hwmon {
+		compatible = "iio-hwmon";
+		io-channels = <&xoadc 0x00 0x01>, /* Battery */
+			    <&xoadc 0x00 0x02>, /* DC in (charger) */
+			    <&xoadc 0x00 0x04>, /* VPH the main system voltage */
+			    <&xoadc 0x00 0x0b>, /* Die temperature */
+			    <&xoadc 0x00 0x0c>, /* Reference voltage 1.25V */
+			    <&xoadc 0x00 0x0d>, /* Reference voltage 0.625V */
+			    <&xoadc 0x00 0x0e>; /* Reference voltage 0.325V */
+	};
+
 	soc: soc {
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -107,6 +123,31 @@
 			reg = <0x900000 0x4000>;
 		};
 
+
+		gsbi8: gsbi@19800000 {
+			compatible = "qcom,gsbi-v1.0.0";
+			cell-index = <12>;
+			reg = <0x19800000 0x100>;
+			clocks = <&gcc GSBI8_H_CLK>;
+			clock-names = "iface";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			syscon-tcsr = <&tcsr>;
+
+			gsbi8_i2c: i2c@19880000 {
+				compatible = "qcom,i2c-qup-v1.1.1";
+				reg = <0x19880000 0x1000>;
+				interrupts = <GIC_SPI 161 IRQ_TYPE_NONE>;
+				clocks = <&gcc GSBI8_QUP_CLK>, <&gcc GSBI8_H_CLK>;
+				clock-names = "core", "iface";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+		};
+
 		gsbi12: gsbi@19c00000 {
 			compatible = "qcom,gsbi-v1.0.0";
 			cell-index = <12>;
@@ -267,6 +308,49 @@
 					row-hold = <91500>;
 				};
 
+				xoadc: xoadc@197 {
+					compatible = "qcom,pm8058-adc";
+					reg = <0x197>;
+					interrupts-extended = <&pm8058 76 IRQ_TYPE_EDGE_RISING>;
+					#address-cells = <2>;
+					#size-cells = <0>;
+					#io-channel-cells = <2>;
+
+					vcoin: adc-channel@00 {
+						reg = <0x00 0x00>;
+					};
+					vbat: adc-channel@01 {
+						reg = <0x00 0x01>;
+					};
+					dcin: adc-channel@02 {
+						reg = <0x00 0x02>;
+					};
+					ichg: adc-channel@03 {
+						reg = <0x00 0x03>;
+					};
+					vph_pwr: adc-channel@04 {
+						reg = <0x00 0x04>;
+					};
+					usb_vbus: adc-channel@0a {
+						reg = <0x00 0x0a>;
+					};
+					die_temp: adc-channel@0b {
+						reg = <0x00 0x0b>;
+					};
+					ref_625mv: adc-channel@0c {
+						reg = <0x00 0x0c>;
+					};
+					ref_1250mv: adc-channel@0d {
+						reg = <0x00 0x0d>;
+					};
+					ref_325mv: adc-channel@0e {
+						reg = <0x00 0x0e>;
+					};
+					ref_muxoff: adc-channel@0f {
+						reg = <0x00 0x0f>;
+					};
+				};
+
 				rtc@1e8 {
 					compatible = "qcom,pm8058-rtc";
 					reg = <0x1e8>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
index e7c1577..5096637 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
@@ -92,7 +92,6 @@
 					vdd_l9_l10_l17_l22-supply = <&vreg_boost>;
 					vdd_l13_l20_l23_l24-supply = <&vreg_boost>;
 					vdd_l21-supply = <&vreg_boost>;
-					vin_5vs-supply = <&pm8941_5v>;
 
 					s1 {
 						regulator-min-microvolt = <1300000>;
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 307bf6a..c5ee68a 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -3,6 +3,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/clock/qcom,gcc-msm8974.h>
 #include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/reset/qcom,gcc-msm8974.h>
 #include <dt-bindings/gpio/gpio.h>
 #include "skeleton.dtsi"
 
@@ -551,6 +552,11 @@
 			reg = <0xfc400000 0x4000>;
 		};
 
+		tcsr: syscon@fd4a0000 {
+			compatible = "syscon";
+			reg = <0xfd4a0000 0x10000>;
+		};
+
 		tcsr_mutex_block: syscon@fd484000 {
 			compatible = "syscon";
 			reg = <0xfd484000 0x2000>;
@@ -620,6 +626,50 @@
 			status = "disabled";
 		};
 
+		otg: usb@f9a55000 {
+			compatible = "qcom,ci-hdrc";
+			reg = <0xf9a55000 0x200>,
+			      <0xf9a55200 0x200>;
+			interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_USB_HS_AHB_CLK>,
+				 <&gcc GCC_USB_HS_SYSTEM_CLK>;
+			clock-names = "iface", "core";
+			assigned-clocks = <&gcc GCC_USB_HS_SYSTEM_CLK>;
+			assigned-clock-rates = <75000000>;
+			resets = <&gcc GCC_USB_HS_BCR>;
+			reset-names = "core";
+			phy_type = "ulpi";
+			dr_mode = "otg";
+			ahb-burst-config = <0>;
+			phy-names = "usb-phy";
+			status = "disabled";
+			#reset-cells = <1>;
+
+			ulpi {
+				usb_hs1_phy: phy@a {
+					compatible = "qcom,usb-hs-phy-msm8974",
+						     "qcom,usb-hs-phy";
+					#phy-cells = <0>;
+					clocks = <&xo_board>, <&gcc GCC_USB2A_PHY_SLEEP_CLK>;
+					clock-names = "ref", "sleep";
+					resets = <&gcc GCC_USB2A_PHY_BCR>, <&otg 0>;
+					reset-names = "phy", "por";
+					status = "disabled";
+				};
+
+				usb_hs2_phy: phy@b {
+					compatible = "qcom,usb-hs-phy-msm8974",
+						     "qcom,usb-hs-phy";
+					#phy-cells = <0>;
+					clocks = <&xo_board>, <&gcc GCC_USB2B_PHY_SLEEP_CLK>;
+					clock-names = "ref", "sleep";
+					resets = <&gcc GCC_USB2B_PHY_BCR>, <&otg 1>;
+					reset-names = "phy", "por";
+					status = "disabled";
+				};
+			};
+		};
+
 		rng@f9bff000 {
 			compatible = "qcom,prng";
 			reg = <0xf9bff000 0x200>;
@@ -1021,7 +1071,6 @@
 					pm8941_s1: s1 {};
 					pm8941_s2: s2 {};
 					pm8941_s3: s3 {};
-					pm8941_5v: s4 {};
 
 					pm8941_l1: l1 {};
 					pm8941_l2: l2 {};
@@ -1051,9 +1100,6 @@
 					pm8941_lvs1: lvs1 {};
 					pm8941_lvs2: lvs2 {};
 					pm8941_lvs3: lvs3 {};
-
-					pm8941_5vs1: 5vs1 {};
-					pm8941_5vs2: 5vs2 {};
 				};
 			};
 		};
diff --git a/arch/arm/boot/dts/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom-pm8941.dtsi
index f8eb5e3..3fc9f34 100644
--- a/arch/arm/boot/dts/qcom-pm8941.dtsi
+++ b/arch/arm/boot/dts/qcom-pm8941.dtsi
@@ -26,7 +26,14 @@
 			bias-pull-up;
 		};
 
-		charger@1000 {
+		usb_id: misc@900 {
+			compatible = "qcom,pm8941-misc";
+			reg = <0x900>;
+			interrupts = <0x0 0x9 0 IRQ_TYPE_EDGE_BOTH>;
+			interrupt-names = "usb_id";
+		};
+
+		smbb: charger@1000 {
 			compatible = "qcom,pm8941-charger";
 			reg = <0x1000>;
 			interrupts = <0x0 0x10 7 IRQ_TYPE_EDGE_BOTH>,
@@ -45,6 +52,10 @@
 					  "chg-gone",
 					  "usb-valid",
 					  "dc-valid";
+
+			usb-otg-in-supply = <&pm8941_5vs1>;
+
+			chg_otg: otg-vbus { };
 		};
 
 		pm8941_gpios: gpios@c000 {
@@ -171,5 +182,28 @@
 
 			status = "disabled";
 		};
+
+		regulators {
+			compatible = "qcom,pm8941-regulators";
+			interrupts = <0x1 0x83 0x2 0>, <0x1 0x84 0x2 0>;
+			interrupt-names = "ocp-5vs1", "ocp-5vs2";
+			vin_5vs-supply = <&pm8941_5v>;
+
+			pm8941_5v: s4 {
+				regulator-min-microvolt = <5000000>;
+				regulator-max-microvolt = <5000000>;
+				regulator-enable-ramp-delay = <500>;
+			};
+
+			pm8941_5vs1: 5vs1 {
+				regulator-enable-ramp-delay = <1000>;
+				regulator-pull-down;
+				regulator-over-current-protection;
+				qcom,ocp-max-retries = <10>;
+				qcom,ocp-retry-delay = <30>;
+				qcom,vs-soft-start-strength = <0>;
+				regulator-initial-mode = <1>;
+			};
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/r7s72100-gr-peach.dts b/arch/arm/boot/dts/r7s72100-gr-peach.dts
new file mode 100644
index 0000000..a1b2aef9
--- /dev/null
+++ b/arch/arm/boot/dts/r7s72100-gr-peach.dts
@@ -0,0 +1,66 @@
+/*
+ * Device Tree Source for the GR-Peach board
+ *
+ * Copyright (C) 2017 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ * Copyright (C) 2016 Renesas Electronics
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r7s72100.dtsi"
+
+/ {
+	model = "GR-Peach";
+	compatible = "renesas,gr-peach", "renesas,r7s72100";
+
+	aliases {
+		serial0 = &scif2;
+	};
+
+	chosen {
+		bootargs = "ignore_loglevel rw root=/dev/mtdblock0";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@20000000 {
+		device_type = "memory";
+		reg = <0x20000000 0x00a00000>;
+
+	};
+
+	lbsc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
+
+	flash@18000000 {
+		compatible = "mtd-rom";
+		probe-type = "map_rom";
+		reg = <0x18000000 0x00800000>;
+		bank-width = <4>;
+		device-width = <1>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		rootfs@600000 {
+			label = "rootfs";
+			reg = <0x00600000 0x00200000>;
+		};
+	};
+};
+
+&extal_clk {
+	clock-frequency = <13333000>;
+};
+
+&usb_x1_clk {
+	clock-frequency = <48000000>;
+};
+
+&scif2 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
index 0423996..5cf53e9 100644
--- a/arch/arm/boot/dts/r7s72100.dtsi
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -144,9 +144,9 @@
 			#clock-cells = <1>;
 			compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
 			reg = <0xfcfe0430 4>;
-			clocks = <&b_clk>;
-			clock-indices = <R7S72100_CLK_ETHER>;
-			clock-output-names = "ether";
+			clocks = <&b_clk>, <&p1_clk>, <&p1_clk>;
+			clock-indices = <R7S72100_CLK_ETHER R7S72100_CLK_USB0 R7S72100_CLK_USB1>;
+			clock-output-names = "ether", "usb0", "usb1";
 		};
 
 		mstp8_clks: mstp8_clks@fcfe0434 {
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 1f5c9f6..3102226 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -219,7 +219,7 @@
 		power-domains = <&pd_c4>;
 	};
 
-	pfc: pfc@e6050000 {
+	pfc: pin-controller@e6050000 {
 		compatible = "renesas,pfc-r8a73a4";
 		reg = <0 0xe6050000 0 0x9000>;
 		gpio-controller;
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
index 7885075..1788e18 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
@@ -266,7 +266,9 @@
 	lcd0_pins: lcd0 {
 		groups = "lcd0_data24_0", "lcd0_lclk_1", "lcd0_sync";
 		function = "lcd0";
+	};
 
+	lcd0_mux {
 		/* DBGMD/LCDC0/FSIA MUX */
 		gpio-hog;
 		gpios = <176 0>;
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index 34159a8..d37d226 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -299,7 +299,7 @@
 		status = "disabled";
 	};
 
-	pfc: pfc@e6050000 {
+	pfc: pin-controller@e6050000 {
 		compatible = "renesas,pfc-r8a7740";
 		reg = <0xe6050000 0x8000>,
 		      <0xe605800c 0x20>;
diff --git a/arch/arm/boot/dts/r8a7743-iwg20d-q7.dts b/arch/arm/boot/dts/r8a7743-iwg20d-q7.dts
new file mode 100644
index 0000000..9b54783
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7743-iwg20d-q7.dts
@@ -0,0 +1,25 @@
+/*
+ * Device Tree Source for the iWave-RZG1M Qseven carrier board
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7743-iwg20m.dtsi"
+
+/ {
+	model = "iWave Systems RainboW-G20D-Qseven board based on RZ/G1M";
+	compatible = "iwave,g20d", "iwave,g20m", "renesas,r8a7743";
+
+	aliases {
+		serial0 = &scif0;
+	};
+};
+
+&scif0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/r8a7743-iwg20m.dtsi b/arch/arm/boot/dts/r8a7743-iwg20m.dtsi
new file mode 100644
index 0000000..001ca91
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7743-iwg20m.dtsi
@@ -0,0 +1,29 @@
+/*
+ * Device Tree Source for the iWave-RZG1M-20M Qseven SOM
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include "r8a7743.dtsi"
+
+/ {
+	compatible = "iwave,g20m", "renesas,r8a7743";
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x20000000>;
+	};
+
+	memory@200000000 {
+		device_type = "memory";
+		reg = <2 0x00000000 0 0x20000000>;
+	};
+};
+
+&extal_clk {
+	clock-frequency = <20000000>;
+};
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index 1e93c94..8f3156c 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -142,7 +142,7 @@
 		interrupt-controller;
 	};
 
-	pfc: pfc@fffc0000 {
+	pfc: pin-controller@fffc0000 {
 		compatible = "renesas,pfc-r8a7778";
 		reg = <0xfffc0000 0x118>;
 	};
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index ae2d9a9..8ee0b2c 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -286,7 +286,7 @@
 		status = "disabled";
 	};
 
-	pfc: pfc@fffc0000 {
+	pfc: pin-controller@fffc0000 {
 		compatible = "renesas,pfc-r8a7779";
 		reg = <0xfffc0000 0x23c>;
 	};
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 99269aa..2805a86 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -614,7 +614,7 @@
 		max-frequency = <97500000>;
 	};
 
-	pfc: pfc@e6060000 {
+	pfc: pin-controller@e6060000 {
 		compatible = "renesas,pfc-r8a7790";
 		reg = <0 0xe6060000 0 0x250>;
 	};
@@ -1602,16 +1602,14 @@
 				 0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
 				 0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
-		usb@0,1 {
+		usb@1,0 {
 			reg = <0x800 0 0 0 0>;
-			device_type = "pci";
 			phys = <&usb0 0>;
 			phy-names = "usb";
 		};
 
-		usb@0,2 {
+		usb@2,0 {
 			reg = <0x1000 0 0 0 0>;
-			device_type = "pci";
 			phys = <&usb0 0>;
 			phy-names = "usb";
 		};
@@ -1658,16 +1656,14 @@
 				 0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
 				 0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
-		usb@0,1 {
-			reg = <0x800 0 0 0 0>;
-			device_type = "pci";
+		usb@1,0 {
+			reg = <0x20800 0 0 0 0>;
 			phys = <&usb2 0>;
 			phy-names = "usb";
 		};
 
-		usb@0,2 {
-			reg = <0x1000 0 0 0 0>;
-			device_type = "pci";
+		usb@2,0 {
+			reg = <0x21000 0 0 0 0>;
 			phys = <&usb2 0>;
 			phy-names = "usb";
 		};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 4d0c2ce..bd93f69 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -562,7 +562,7 @@
 		status = "disabled";
 	};
 
-	pfc: pfc@e6060000 {
+	pfc: pin-controller@e6060000 {
 		compatible = "renesas,pfc-r8a7791";
 		reg = <0 0xe6060000 0 0x250>;
 	};
@@ -776,6 +776,15 @@
 		status = "disabled";
 	};
 
+	adc: adc@e6e54000 {
+		compatible = "renesas,r8a7791-gyroadc", "renesas,rcar-gyroadc";
+		reg = <0 0xe6e54000 0 64>;
+		clocks = <&mstp9_clks R8A7791_CLK_GYROADC>;
+		clock-names = "fck";
+		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+		status = "disabled";
+	};
+
 	scif2: serial@e6e58000 {
 		compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
 			     "renesas,scif";
@@ -1425,13 +1434,15 @@
 		mstp9_clks: mstp9_clks@e6150994 {
 			compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
 			reg = <0 0xe6150994 0 4>, <0 0xe61509a4 0 4>;
-			clocks = <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
+			clocks = <&p_clk>,
+				 <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
 				 <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
 				 <&p_clk>, <&p_clk>, <&cpg_clocks R8A7791_CLK_QSPI>, <&hp_clk>,
 				 <&cp_clk>, <&hp_clk>, <&hp_clk>, <&hp_clk>,
 				 <&hp_clk>, <&hp_clk>;
 			#clock-cells = <1>;
 			clock-indices = <
+				R8A7791_CLK_GYROADC
 				R8A7791_CLK_GPIO7 R8A7791_CLK_GPIO6 R8A7791_CLK_GPIO5 R8A7791_CLK_GPIO4
 				R8A7791_CLK_GPIO3 R8A7791_CLK_GPIO2 R8A7791_CLK_GPIO1 R8A7791_CLK_GPIO0
 				R8A7791_CLK_RCAN1 R8A7791_CLK_RCAN0 R8A7791_CLK_QSPI_MOD R8A7791_CLK_I2C5
@@ -1439,6 +1450,7 @@
 				R8A7791_CLK_I2C1 R8A7791_CLK_I2C0
 			>;
 			clock-output-names =
+				"gyroadc",
 				"gpio7", "gpio6", "gpio5", "gpio4", "gpio3", "gpio2", "gpio1", "gpio0",
 				"rcan1", "rcan0", "qspi_mod", "i2c5", "i2c6", "i2c4", "i2c3", "i2c2",
 				"i2c1", "i2c0";
@@ -1601,16 +1613,14 @@
 				 0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
 				 0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
-		usb@0,1 {
+		usb@1,0 {
 			reg = <0x800 0 0 0 0>;
-			device_type = "pci";
 			phys = <&usb0 0>;
 			phy-names = "usb";
 		};
 
-		usb@0,2 {
+		usb@2,0 {
 			reg = <0x1000 0 0 0 0>;
-			device_type = "pci";
 			phys = <&usb0 0>;
 			phy-names = "usb";
 		};
@@ -1636,16 +1646,14 @@
 				 0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
 				 0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
-		usb@0,1 {
-			reg = <0x800 0 0 0 0>;
-			device_type = "pci";
+		usb@1,0 {
+			reg = <0x10800 0 0 0 0>;
 			phys = <&usb2 0>;
 			phy-names = "usb";
 		};
 
-		usb@0,2 {
-			reg = <0x1000 0 0 0 0>;
-			device_type = "pci";
+		usb@2,0 {
+			reg = <0x11000 0 0 0 0>;
 			phys = <&usb2 0>;
 			phy-names = "usb";
 		};
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index 806c93f..76e3aca 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -253,17 +253,38 @@
 		};
 	};
 
+	hdmi-in {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_con_in: endpoint {
+				remote-endpoint = <&adv7612_in>;
+			};
+		};
+	};
+
 	hdmi-out {
 		compatible = "hdmi-connector";
 		type = "a";
 
 		port {
-			hdmi_con: endpoint {
+			hdmi_con_out: endpoint {
 				remote-endpoint = <&adv7511_out>;
 			};
 		};
 	};
 
+	composite-in {
+		compatible = "composite-video-connector";
+
+		port {
+			composite_con_in: endpoint {
+				remote-endpoint = <&adv7180_in>;
+			};
+		};
+	};
+
 	x2_clk: x2-clock {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
@@ -348,16 +369,37 @@
 	sdhi0_pins: sd0 {
 		groups = "sdhi0_data4", "sdhi0_ctrl";
 		function = "sdhi0";
+		power-source = <3300>;
+	};
+
+	sdhi0_pins_uhs: sd0_uhs {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <1800>;
 	};
 
 	sdhi1_pins: sd1 {
 		groups = "sdhi1_data4", "sdhi1_ctrl";
 		function = "sdhi1";
+		power-source = <3300>;
+	};
+
+	sdhi1_pins_uhs: sd1_uhs {
+		groups = "sdhi1_data4", "sdhi1_ctrl";
+		function = "sdhi1";
+		power-source = <1800>;
 	};
 
 	sdhi2_pins: sd2 {
 		groups = "sdhi2_data4", "sdhi2_ctrl";
 		function = "sdhi2";
+		power-source = <3300>;
+	};
+
+	sdhi2_pins_uhs: sd2_uhs {
+		groups = "sdhi2_data4", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <1800>;
 	};
 
 	qspi_pins: qspi {
@@ -374,6 +416,16 @@
 		groups = "audio_clk_a";
 		function = "audio_clk";
 	};
+
+	vin0_pins: vin0 {
+		groups = "vin0_data24", "vin0_sync", "vin0_clkenb", "vin0_clk";
+		function = "vin0";
+	};
+
+	vin1_pins: vin1 {
+		groups = "vin1_data8", "vin1_clk";
+		function = "vin1";
+	};
 };
 
 &ether {
@@ -416,33 +468,40 @@
 
 &sdhi0 {
 	pinctrl-0 = <&sdhi0_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi0_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi0>;
 	vqmmc-supply = <&vccq_sdhi0>;
 	cd-gpios = <&gpio6 6 GPIO_ACTIVE_LOW>;
 	wp-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
+	sd-uhs-sdr50;
+	sd-uhs-sdr104;
 	status = "okay";
 };
 
 &sdhi1 {
 	pinctrl-0 = <&sdhi1_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi1_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi1>;
 	vqmmc-supply = <&vccq_sdhi1>;
 	cd-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
 	wp-gpios = <&gpio6 15 GPIO_ACTIVE_HIGH>;
+	sd-uhs-sdr50;
 	status = "okay";
 };
 
 &sdhi2 {
 	pinctrl-0 = <&sdhi2_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi2_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi2>;
 	vqmmc-supply = <&vccq_sdhi2>;
 	cd-gpios = <&gpio6 22 GPIO_ACTIVE_LOW>;
+	sd-uhs-sdr50;
 	status = "okay";
 };
 
@@ -498,6 +557,32 @@
 		reg = <0x12>;
 	};
 
+	composite-in@20 {
+		compatible = "adi,adv7180cp";
+		reg = <0x20>;
+		remote = <&vin1>;
+
+		port {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				adv7180_in: endpoint {
+					remote-endpoint = <&composite_con_in>;
+				};
+			};
+
+			port@3 {
+				reg = <3>;
+				adv7180_out: endpoint {
+					bus-width = <8>;
+					remote-endpoint = <&vin1ep>;
+				};
+			};
+		};
+	};
+
 	hdmi@39 {
 		compatible = "adi,adv7511w";
 		reg = <0x39>;
@@ -524,7 +609,34 @@
 			port@1 {
 				reg = <1>;
 				adv7511_out: endpoint {
-					remote-endpoint = <&hdmi_con>;
+					remote-endpoint = <&hdmi_con_out>;
+				};
+			};
+		};
+	};
+
+	hdmi-in@4c {
+		compatible = "adi,adv7612";
+		reg = <0x4c>;
+		interrupt-parent = <&gpio4>;
+		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+		default-input = <0>;
+
+		port {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				adv7612_in: endpoint {
+					remote-endpoint = <&hdmi_con_in>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				adv7612_out: endpoint {
+					remote-endpoint = <&vin0ep2>;
 				};
 			};
 		};
@@ -578,3 +690,42 @@
 &ssi1 {
 	shared-pin;
 };
+
+/* HDMI video input */
+&vin0 {
+	status = "okay";
+	pinctrl-0 = <&vin0_pins>;
+	pinctrl-names = "default";
+
+	port {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		vin0ep2: endpoint {
+			remote-endpoint = <&adv7612_out>;
+			bus-width = <24>;
+			hsync-active = <0>;
+			vsync-active = <0>;
+			pclk-sample = <1>;
+			data-active = <1>;
+		};
+	};
+};
+
+/* composite video input */
+&vin1 {
+	pinctrl-0 = <&vin1_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+
+	port {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		vin1ep: endpoint {
+			remote-endpoint = <&adv7180_out>;
+			bus-width = <8>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index 4de6041..13b980f 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -529,7 +529,7 @@
 		status = "disabled";
 	};
 
-	pfc: pfc@e6060000 {
+	pfc: pin-controller@e6060000 {
 		compatible = "renesas,pfc-r8a7793";
 		reg = <0 0xe6060000 0 0x250>;
 	};
@@ -542,6 +542,7 @@
 		dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
 		       <&dmac1 0xcd>, <&dmac1 0xce>;
 		dma-names = "tx", "rx", "tx", "rx";
+		max-frequency = <195000000>;
 		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
@@ -554,6 +555,7 @@
 		dmas = <&dmac0 0xc1>, <&dmac0 0xc2>,
 		       <&dmac1 0xc1>, <&dmac1 0xc2>;
 		dma-names = "tx", "rx", "tx", "rx";
+		max-frequency = <97500000>;
 		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
@@ -566,6 +568,7 @@
 		dmas = <&dmac0 0xd3>, <&dmac0 0xd4>,
 		       <&dmac1 0xd3>, <&dmac1 0xd4>;
 		dma-names = "tx", "rx", "tx", "rx";
+		max-frequency = <97500000>;
 		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index a19b884..7d9a81d 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -820,16 +820,14 @@
 				 0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
 				 0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
-		usb@0,1 {
+		usb@1,0 {
 			reg = <0x800 0 0 0 0>;
-			device_type = "pci";
 			phys = <&usb0 0>;
 			phy-names = "usb";
 		};
 
-		usb@0,2 {
+		usb@2,0 {
 			reg = <0x1000 0 0 0 0>;
-			device_type = "pci";
 			phys = <&usb0 0>;
 			phy-names = "usb";
 		};
@@ -855,16 +853,14 @@
 				 0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
 				 0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
-		usb@0,1 {
-			reg = <0x800 0 0 0 0>;
-			device_type = "pci";
+		usb@1,0 {
+			reg = <0x10800 0 0 0 0>;
 			phys = <&usb2 0>;
 			phy-names = "usb";
 		};
 
-		usb@0,2 {
-			reg = <0x1000 0 0 0 0>;
-			device_type = "pci";
+		usb@2,0 {
+			reg = <0x11000 0 0 0 0>;
 			phys = <&usb2 0>;
 			phy-names = "usb";
 		};
diff --git a/arch/arm/boot/dts/r8a77xx-aa104xd12-panel.dtsi b/arch/arm/boot/dts/r8a77xx-aa104xd12-panel.dtsi
index 65cb50f..238d14b 100644
--- a/arch/arm/boot/dts/r8a77xx-aa104xd12-panel.dtsi
+++ b/arch/arm/boot/dts/r8a77xx-aa104xd12-panel.dtsi
@@ -10,10 +10,11 @@
 
 / {
 	panel {
-		compatible = "mitsubishi,aa104xd12", "panel-dpi";
+		compatible = "mitsubishi,aa104xd12", "panel-lvds";
 
 		width-mm = <210>;
 		height-mm = <158>;
+		data-mapping = "jeida-18";
 
 		panel-timing {
 			/* 1024x768 @65Hz */
diff --git a/arch/arm/boot/dts/r8a77xx-aa121td01-panel.dtsi b/arch/arm/boot/dts/r8a77xx-aa121td01-panel.dtsi
index a07ebf8..04aafd4 100644
--- a/arch/arm/boot/dts/r8a77xx-aa121td01-panel.dtsi
+++ b/arch/arm/boot/dts/r8a77xx-aa121td01-panel.dtsi
@@ -10,10 +10,11 @@
 
 / {
 	panel {
-		compatible = "mitsubishi,aa121td01", "panel-dpi";
+		compatible = "mitsubishi,aa121td01", "panel-lvds";
 
 		width-mm = <261>;
 		height-mm = <163>;
+		data-mapping = "jeida-18";
 
 		panel-timing {
 			/* 1280x800 @60Hz */
diff --git a/arch/arm/boot/dts/rk3229-evb.dts b/arch/arm/boot/dts/rk3229-evb.dts
index 275092a..1b55192 100644
--- a/arch/arm/boot/dts/rk3229-evb.dts
+++ b/arch/arm/boot/dts/rk3229-evb.dts
@@ -58,6 +58,17 @@
 		#clock-cells = <0>;
 	};
 
+	vcc_host: vcc-host-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio3 RK_PC4 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&host_vbus_drv>;
+		regulator-name = "vcc_host";
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
 	vcc_phy: vcc-phy-regulator {
 		compatible = "regulator-fixed";
 		enable-active-high;
@@ -85,6 +96,69 @@
 	status = "okay";
 };
 
+&pinctrl {
+	usb {
+		host_vbus_drv: host-vbus-drv {
+			rockchip,pins = <3 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
 &uart2 {
 	status = "okay";
 };
+
+&u2phy0 {
+	status = "okay";
+
+	u2phy0_otg: otg-port {
+		status = "okay";
+	};
+
+	u2phy0_host: host-port {
+		phy-supply = <&vcc_host>;
+		status = "okay";
+	};
+};
+
+&u2phy1 {
+	status = "okay";
+
+	u2phy1_otg: otg-port {
+		phy-supply = <&vcc_host>;
+		status = "okay";
+	};
+
+	u2phy1_host: host-port {
+		phy-supply = <&vcc_host>;
+		status = "okay";
+	};
+};
+
+&usb_host0_ehci {
+	status = "okay";
+};
+
+&usb_host0_ohci {
+	status = "okay";
+};
+
+&usb_host1_ehci {
+	status = "okay";
+};
+
+&usb_host1_ohci {
+	status = "okay";
+};
+
+&usb_host2_ehci {
+	status = "okay";
+};
+
+&usb_host2_ohci {
+	status = "okay";
+};
+
+&usb_otg {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 48a0c1c..f3e4ffd 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -66,10 +66,7 @@
 			compatible = "arm,cortex-a7";
 			reg = <0xf00>;
 			resets = <&cru SRST_CORE0>;
-			operating-points = <
-				/* KHz    uV */
-				 816000 1000000
-			>;
+			operating-points-v2 = <&cpu0_opp_table>;
 			#cooling-cells = <2>; /* min followed by max */
 			clock-latency = <40000>;
 			clocks = <&cru ARMCLK>;
@@ -80,6 +77,7 @@
 			compatible = "arm,cortex-a7";
 			reg = <0xf01>;
 			resets = <&cru SRST_CORE1>;
+			operating-points-v2 = <&cpu0_opp_table>;
 		};
 
 		cpu2: cpu@f02 {
@@ -87,6 +85,7 @@
 			compatible = "arm,cortex-a7";
 			reg = <0xf02>;
 			resets = <&cru SRST_CORE2>;
+			operating-points-v2 = <&cpu0_opp_table>;
 		};
 
 		cpu3: cpu@f03 {
@@ -94,6 +93,35 @@
 			compatible = "arm,cortex-a7";
 			reg = <0xf03>;
 			resets = <&cru SRST_CORE3>;
+			operating-points-v2 = <&cpu0_opp_table>;
+		};
+	};
+
+	cpu0_opp_table: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp-408000000 {
+			opp-hz = /bits/ 64 <408000000>;
+			opp-microvolt = <950000>;
+			clock-latency-ns = <40000>;
+			opp-suspend;
+		};
+		opp-600000000 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-microvolt = <975000>;
+		};
+		opp-816000000 {
+			opp-hz = /bits/ 64 <816000000>;
+			opp-microvolt = <1000000>;
+		};
+		opp-1008000000 {
+			opp-hz = /bits/ 64 <1008000000>;
+			opp-microvolt = <1175000>;
+		};
+		opp-1200000000 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <1275000>;
 		};
 	};
 
@@ -182,8 +210,61 @@
 	};
 
 	grf: syscon@11000000 {
-		compatible = "syscon";
+		compatible = "syscon", "simple-mfd";
 		reg = <0x11000000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		u2phy0: usb2-phy@760 {
+			compatible = "rockchip,rk3228-usb2phy";
+			reg = <0x0760 0x0c>;
+			clocks = <&cru SCLK_OTGPHY0>;
+			clock-names = "phyclk";
+			clock-output-names = "usb480m_phy0";
+			#clock-cells = <0>;
+			status = "disabled";
+
+			u2phy0_otg: otg-port {
+				interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "otg-bvalid", "otg-id",
+						  "linestate";
+				#phy-cells = <0>;
+				status = "disabled";
+			};
+
+			u2phy0_host: host-port {
+				interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "linestate";
+				#phy-cells = <0>;
+				status = "disabled";
+			};
+		};
+
+		u2phy1: usb2-phy@800 {
+			compatible = "rockchip,rk3228-usb2phy";
+			reg = <0x0800 0x0c>;
+			clocks = <&cru SCLK_OTGPHY1>;
+			clock-names = "phyclk";
+			clock-output-names = "usb480m_phy1";
+			#clock-cells = <0>;
+			status = "disabled";
+
+			u2phy1_otg: otg-port {
+				interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "linestate";
+				#phy-cells = <0>;
+				status = "disabled";
+			};
+
+			u2phy1_host: host-port {
+				interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "linestate";
+				#phy-cells = <0>;
+				status = "disabled";
+			};
+		};
 	};
 
 	uart0: serial@11010000 {
@@ -280,6 +361,14 @@
 		status = "disabled";
 	};
 
+	wdt: watchdog@110a0000 {
+		compatible = "snps,dw-wdt";
+		reg = <0x110a0000 0x100>;
+		interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru PCLK_CPU>;
+		status = "disabled";
+	};
+
 	pwm0: pwm@110b0000 {
 		compatible = "rockchip,rk3288-pwm";
 		reg = <0x110b0000 0x10>;
@@ -338,8 +427,18 @@
 		rockchip,grf = <&grf>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
-		assigned-clocks = <&cru PLL_GPLL>;
-		assigned-clock-rates = <594000000>;
+		assigned-clocks =
+			<&cru PLL_GPLL>, <&cru ARMCLK>,
+			<&cru PLL_CPLL>, <&cru ACLK_PERI>,
+			<&cru HCLK_PERI>, <&cru PCLK_PERI>,
+			<&cru ACLK_CPU>, <&cru HCLK_CPU>,
+			<&cru PCLK_CPU>;
+		assigned-clock-rates =
+			<594000000>, <816000000>,
+			<500000000>, <150000000>,
+			<150000000>, <75000000>,
+			<150000000>, <150000000>,
+			<75000000>;
 	};
 
 	thermal-zones {
@@ -388,6 +487,8 @@
 		interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
 		clock-names = "tsadc", "apb_pclk";
+		assigned-clocks = <&cru SCLK_TSADC>;
+		assigned-clock-rates = <32768>;
 		resets = <&cru SRST_TSADC>;
 		reset-names = "tsadc-apb";
 		pinctrl-names = "init", "default", "sleep";
@@ -419,6 +520,89 @@
 		status = "disabled";
 	};
 
+	usb_otg: usb@30040000 {
+		compatible = "rockchip,rk3228-usb", "rockchip,rk3066-usb",
+			     "snps,dwc2";
+		reg = <0x30040000 0x40000>;
+		interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_OTG>;
+		clock-names = "otg";
+		dr_mode = "otg";
+		g-np-tx-fifo-size = <16>;
+		g-rx-fifo-size = <280>;
+		g-tx-fifo-size = <256 128 128 64 32 16>;
+		g-use-dma;
+		phys = <&u2phy0_otg>;
+		phy-names = "usb2-phy";
+		status = "disabled";
+	};
+
+	usb_host0_ehci: usb@30080000 {
+		compatible = "generic-ehci";
+		reg = <0x30080000 0x20000>;
+		interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST0>, <&u2phy0>;
+		clock-names = "usbhost", "utmi";
+		phys = <&u2phy0_host>;
+		phy-names = "usb";
+		status = "disabled";
+	};
+
+	usb_host0_ohci: usb@300a0000 {
+		compatible = "generic-ohci";
+		reg = <0x300a0000 0x20000>;
+		interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST0>, <&u2phy0>;
+		clock-names = "usbhost", "utmi";
+		phys = <&u2phy0_host>;
+		phy-names = "usb";
+		status = "disabled";
+	};
+
+	usb_host1_ehci: usb@300c0000 {
+		compatible = "generic-ehci";
+		reg = <0x300c0000 0x20000>;
+		interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST1>, <&u2phy1>;
+		clock-names = "usbhost", "utmi";
+		phys = <&u2phy1_otg>;
+		phy-names = "usb";
+		status = "disabled";
+	};
+
+	usb_host1_ohci: usb@300e0000 {
+		compatible = "generic-ohci";
+		reg = <0x300e0000 0x20000>;
+		interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST1>, <&u2phy1>;
+		clock-names = "usbhost", "utmi";
+		phys = <&u2phy1_otg>;
+		phy-names = "usb";
+		status = "disabled";
+	};
+
+	usb_host2_ehci: usb@30100000 {
+		compatible = "generic-ehci";
+		reg = <0x30100000 0x20000>;
+		interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST2>, <&u2phy1>;
+		phys = <&u2phy1_host>;
+		phy-names = "usb";
+		clock-names = "usbhost", "utmi";
+		status = "disabled";
+	};
+
+	usb_host2_ohci: usb@30120000 {
+		compatible = "generic-ohci";
+		reg = <0x30120000 0x20000>;
+		interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST2>, <&u2phy1>;
+		clock-names = "usbhost", "utmi";
+		phys = <&u2phy1_host>;
+		phy-names = "usb";
+		status = "disabled";
+	};
+
 	gmac: ethernet@30200000 {
 		compatible = "rockchip,rk3228-gmac";
 		reg = <0x30200000 0x10000>;
@@ -621,9 +805,9 @@
 						<0 12 RK_FUNC_1 &pcfg_pull_none>,
 						<0 13 RK_FUNC_1 &pcfg_pull_none>,
 						<0 14 RK_FUNC_1 &pcfg_pull_none>,
-						<1 2 RK_FUNC_1 &pcfg_pull_none>,
-						<1 4 RK_FUNC_1 &pcfg_pull_none>,
-						<1 5 RK_FUNC_1 &pcfg_pull_none>;
+						<1 2 RK_FUNC_2 &pcfg_pull_none>,
+						<1 4 RK_FUNC_2 &pcfg_pull_none>,
+						<1 5 RK_FUNC_2 &pcfg_pull_none>;
 			};
 		};
 
@@ -693,10 +877,15 @@
 
 		uart2 {
 			uart2_xfer: uart2-xfer {
-				rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_none>,
+				rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>,
 						<1 19 RK_FUNC_2 &pcfg_pull_none>;
 			};
 
+			uart21_xfer: uart21-xfer {
+				rockchip,pins = <1 10 RK_FUNC_2 &pcfg_pull_up>,
+						<1 9 RK_FUNC_2 &pcfg_pull_none>;
+			};
+
 			uart2_cts: uart2-cts {
 				rockchip,pins = <0 25 RK_FUNC_1 &pcfg_pull_none>;
 			};
diff --git a/arch/arm/boot/dts/rk3288-firefly-reload.dts b/arch/arm/boot/dts/rk3288-firefly-reload.dts
index d0b3204a..b11a282 100644
--- a/arch/arm/boot/dts/rk3288-firefly-reload.dts
+++ b/arch/arm/boot/dts/rk3288-firefly-reload.dts
@@ -48,6 +48,19 @@
 	model = "Firefly-RK3288-reload";
 	compatible = "firefly,firefly-rk3288-reload", "rockchip,rk3288";
 
+	adc-keys {
+		compatible = "adc-keys";
+		io-channels = <&saradc 1>;
+		io-channel-names = "buttons";
+		keyup-threshold-microvolt = <1800000>;
+
+		button-recovery {
+			label = "Recovery";
+			linux,code = <KEY_VENDOR>;
+			press-threshold-microvolt = <0>;
+		};
+	};
+
 	gpio-keys {
 		compatible = "gpio-keys";
 
@@ -246,6 +259,10 @@
 	status = "okay";
 };
 
+&saradc {
+	status = "okay";
+};
+
 &sdmmc {
 	bus-width = <4>;
 	cap-mmc-highspeed;
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index 10793ac..32dabae 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -49,6 +49,19 @@
 		reg = <0 0x80000000>;
 	};
 
+	adc-keys {
+		compatible = "adc-keys";
+		io-channels = <&saradc 1>;
+		io-channel-names = "buttons";
+		keyup-threshold-microvolt = <1800000>;
+
+		button-recovery {
+			label = "Recovery";
+			linux,code = <KEY_VENDOR>;
+			press-threshold-microvolt = <0>;
+		};
+	};
+
 	dovdd_1v8: dovdd-1v8-regulator {
 		compatible = "regulator-fixed";
 		regulator-name = "dovdd_1v8";
@@ -219,6 +232,11 @@
 	status = "ok";
 };
 
+&gpu {
+	mali-supply = <&vdd_gpu>;
+	status = "okay";
+};
+
 &hdmi {
 	ddc-i2c-bus = <&i2c5>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/rk3288-rock2-som.dtsi b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
index f0778a4..749a9b8 100644
--- a/arch/arm/boot/dts/rk3288-rock2-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
@@ -113,6 +113,11 @@
 	tx_delay = <0x30>;
 };
 
+&gpu {
+	mali-supply = <&vdd_gpu>;
+	status = "okay";
+};
+
 &i2c0 {
 	status = "okay";
 
diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts
index a23a948..8ed25e9 100644
--- a/arch/arm/boot/dts/rk3288-rock2-square.dts
+++ b/arch/arm/boot/dts/rk3288-rock2-square.dts
@@ -125,10 +125,6 @@
 		gpio = <&gpio0 RK_PB6 GPIO_ACTIVE_HIGH>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&host_vbus_drv>;
-		/* Always on as the rockchip usb phy doesn't have a vbus-supply
-		 * property
-		 */
-		regulator-always-on;
 		regulator-name = "vcc_host";
 	};
 
@@ -279,6 +275,10 @@
 	status = "okay";
 };
 
+&usbphy1 {
+	vbus-supply = <&vcc_usb_host>;
+};
+
 &usb_host0_ehci {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 5d1eb0a..d709fa1 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -161,6 +161,11 @@
 	pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
 };
 
+&gpu {
+	mali-supply = <&vdd_gpu>;
+	status = "okay";
+};
+
 &hdmi {
 	ddc-i2c-bus = <&i2c5>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index ad5d602..2484f11 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -43,6 +43,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/pinctrl/rockchip.h>
 #include <dt-bindings/clock/rk3288-cru.h>
+#include <dt-bindings/power/rk3288-power.h>
 #include <dt-bindings/thermal/thermal.h>
 #include <dt-bindings/power/rk3288-power.h>
 #include <dt-bindings/soc/rockchip,boot-mode.h>
@@ -1125,6 +1126,48 @@
 		};
 	};
 
+	gpu: mali@ffa30000 {
+		compatible = "rockchip,rk3288-mali", "arm,mali-t760", "arm,mali-midgard";
+		reg = <0xffa30000 0x10000>;
+		interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "job", "mmu", "gpu";
+		clocks = <&cru ACLK_GPU>;
+		operating-points-v2 = <&gpu_opp_table>;
+		power-domains = <&power RK3288_PD_GPU>;
+		status = "disabled";
+	};
+
+	gpu_opp_table: gpu-opp-table {
+		compatible = "operating-points-v2";
+
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+			opp-microvolt = <950000>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+			opp-microvolt = <950000>;
+		};
+		opp@300000000 {
+			opp-hz = /bits/ 64 <300000000>;
+			opp-microvolt = <1000000>;
+		};
+		opp@400000000 {
+			opp-hz = /bits/ 64 <400000000>;
+			opp-microvolt = <1100000>;
+		};
+		opp@500000000 {
+			opp-hz = /bits/ 64 <500000000>;
+			opp-microvolt = <1200000>;
+		};
+		opp@600000000 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-microvolt = <1250000>;
+		};
+	};
+
 	qos_gpu_r: qos@ffaa0000 {
 		compatible = "syscon";
 		reg = <0xffaa0000 0x20>;
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm/boot/dts/rv1108-evb.dts
similarity index 94%
rename from arch/arm/boot/dts/rk1108-evb.dts
rename to arch/arm/boot/dts/rv1108-evb.dts
index 3956cff..58cf4ac 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm/boot/dts/rv1108-evb.dts
@@ -40,11 +40,11 @@
 
 /dts-v1/;
 
-#include "rk1108.dtsi"
+#include "rv1108.dtsi"
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
+	model = "Rockchip RV1108 Evaluation board";
+	compatible = "rockchip,rv1108-evb", "rockchip,rv1108";
 
 	memory@60000000 {
 		device_type = "memory";
diff --git a/arch/arm/boot/dts/rk1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi
similarity index 95%
rename from arch/arm/boot/dts/rk1108.dtsi
rename to arch/arm/boot/dts/rv1108.dtsi
index 1297924..437098b 100644
--- a/arch/arm/boot/dts/rk1108.dtsi
+++ b/arch/arm/boot/dts/rv1108.dtsi
@@ -47,7 +47,7 @@
 	#address-cells = <1>;
 	#size-cells = <1>;
 
-	compatible = "rockchip,rk1108";
+	compatible = "rockchip,rv1108";
 
 	interrupt-parent = <&gic>;
 
@@ -113,7 +113,7 @@
 	};
 
 	uart2: serial@10210000 {
-		compatible = "rockchip,rk1108-uart", "snps,dw-apb-uart";
+		compatible = "rockchip,rv1108-uart", "snps,dw-apb-uart";
 		reg = <0x10210000 0x100>;
 		interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
 		reg-shift = <2>;
@@ -127,7 +127,7 @@
 	};
 
 	uart1: serial@10220000 {
-		compatible = "rockchip,rk1108-uart", "snps,dw-apb-uart";
+		compatible = "rockchip,rv1108-uart", "snps,dw-apb-uart";
 		reg = <0x10220000 0x100>;
 		interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
 		reg-shift = <2>;
@@ -141,7 +141,7 @@
 	};
 
 	uart0: serial@10230000 {
-		compatible = "rockchip,rk1108-uart", "snps,dw-apb-uart";
+		compatible = "rockchip,rv1108-uart", "snps,dw-apb-uart";
 		reg = <0x10230000 0x100>;
 		interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
 		reg-shift = <2>;
@@ -155,17 +155,17 @@
 	};
 
 	grf: syscon@10300000 {
-		compatible = "rockchip,rk1108-grf", "syscon";
+		compatible = "rockchip,rv1108-grf", "syscon";
 		reg = <0x10300000 0x1000>;
 	};
 
 	pmugrf: syscon@20060000 {
-		compatible = "rockchip,rk1108-pmugrf", "syscon";
+		compatible = "rockchip,rv1108-pmugrf", "syscon";
 		reg = <0x20060000 0x1000>;
 	};
 
 	cru: clock-controller@20200000 {
-		compatible = "rockchip,rk1108-cru";
+		compatible = "rockchip,rv1108-cru";
 		reg = <0x20200000 0x1000>;
 		rockchip,grf = <&grf>;
 		#clock-cells = <1>;
@@ -173,7 +173,7 @@
 	};
 
 	emmc: dwmmc@30110000 {
-		compatible = "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc";
+		compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc";
 		clock-freq-min-max = <400000 150000000>;
 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
@@ -185,7 +185,7 @@
 	};
 
 	sdio: dwmmc@30120000 {
-		compatible = "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc";
+		compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc";
 		clock-freq-min-max = <400000 150000000>;
 		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
 			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
@@ -197,7 +197,7 @@
 	};
 
 	sdmmc: dwmmc@30130000 {
-		compatible = "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc";
+		compatible = "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc";
 		clock-freq-min-max = <400000 100000000>;
 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
 			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 8067c71..cc06da3 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -135,6 +135,12 @@
 		#size-cells = <1>;
 		ranges;
 
+		nfc_sram: sram@00100000 {
+			compatible = "mmio-sram";
+			no-memory-wc;
+			reg = <0x00100000 0x2400>;
+		};
+
 		usb0: gadget@00300000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -291,6 +297,32 @@
 			cache-level = <2>;
 		};
 
+		ebi: ebi@10000000 {
+			compatible = "atmel,sama5d3-ebi";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			atmel,smc = <&hsmc>;
+			reg = <0x10000000 0x10000000
+			       0x40000000 0x30000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x60000000 0x10000000
+				  0x2 0x0 0x70000000 0x10000000
+				  0x3 0x0 0x80000000 0x10000000>;
+			clocks = <&mck>;
+			status = "disabled";
+
+			nand_controller: nand-controller {
+				compatible = "atmel,sama5d3-nand-controller";
+				atmel,nfc-sram = <&nfc_sram>;
+				atmel,nfc-io = <&nfc_io>;
+				ecc-engine = <&pmecc>;
+				#address-cells = <2>;
+				#size-cells = <1>;
+				ranges;
+				status = "disabled";
+			};
+		};
+
 		nand0: nand@80000000 {
 			compatible = "atmel,sama5d2-nand";
 			#address-cells = <1>;
@@ -347,6 +379,11 @@
 			status = "disabled";
 		};
 
+		nfc_io: nfc-io@c0000000 {
+			compatible = "atmel,sama5d3-nfc-io", "syscon";
+			reg = <0xc0000000 0x8000000>;
+		};
+
 		apb {
 			compatible = "simple-bus";
 			#address-cells = <1>;
@@ -762,6 +799,18 @@
 						atmel,clk-output-range = <0 83000000>;
 					};
 
+					can0_clk: can0_clk {
+						#clock-cells = <0>;
+						reg = <56>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					can1_clk: can1_clk {
+						#clock-cells = <0>;
+						reg = <57>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
 					classd_clk: classd_clk {
 						#clock-cells = <0>;
 						reg = <59>;
@@ -890,6 +939,18 @@
 						#clock-cells = <0>;
 						reg = <55>;
 					};
+
+					can0_gclk: can0_gclk {
+						#clock-cells = <0>;
+						reg = <56>;
+						atmel,clk-output-range = <0 80000000>;
+					};
+
+					can1_gclk: can1_gclk {
+						#clock-cells = <0>;
+						reg = <57>;
+						atmel,clk-output-range = <0 80000000>;
+					};
 				};
 			};
 
@@ -986,6 +1047,22 @@
 				clock-names = "t0_clk", "slow_clk";
 			};
 
+			hsmc: hsmc@f8014000 {
+				compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd";
+				reg = <0xf8014000 0x1000>;
+				interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
+				clocks = <&hsmc_clk>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges;
+
+				pmecc: ecc-engine@ffffc070 {
+					compatible = "atmel,sama5d2-pmecc";
+					reg = <0xffffc070 0x490>,
+					      <0xffffc500 0x100>;
+				};
+			};
+
 			pdmic: pdmic@f8018000 {
 				compatible = "atmel,sama5d2-pdmic";
 				reg = <0xf8018000 0x124>;
@@ -1065,6 +1142,14 @@
 				status = "disabled";
 			};
 
+			pwm0: pwm@f802c000 {
+				compatible = "atmel,sama5d2-pwm";
+				reg = <0xf802c000 0x4000>;
+				interrupts = <38 IRQ_TYPE_LEVEL_HIGH 7>;
+				#pwm-cells = <3>;
+				clocks = <&pwm_clk>;
+			};
+
 			sfr: sfr@f8030000 {
 				compatible = "atmel,sama5d2-sfr", "syscon";
 				reg = <0xf8030000 0x98>;
@@ -1144,6 +1229,22 @@
 				clocks = <&clk32k>;
 			};
 
+			can0: can@f8054000 {
+				compatible = "bosch,m_can";
+				reg = <0xf8054000 0x4000>, <0x210000 0x4000>;
+				reg-names = "m_can", "message_ram";
+				interrupts = <56 IRQ_TYPE_LEVEL_HIGH 7>,
+					     <64 IRQ_TYPE_LEVEL_HIGH 7>;
+				interrupt-names = "int0", "int1";
+				clocks = <&can0_clk>, <&can0_gclk>;
+				clock-names = "hclk", "cclk";
+				assigned-clocks = <&can0_gclk>;
+				assigned-clock-parents = <&utmi>;
+				assigned-clock-rates = <40000000>;
+				bosch,mram-cfg = <0x0 0 0 64 0 0 32 32>;
+				status = "disabled";
+			};
+
 			spi1: spi@fc000000 {
 				compatible = "atmel,at91rm9200-spi";
 				reg = <0xfc000000 0x100>;
@@ -1305,6 +1406,22 @@
 				status = "okay";
 			};
 
+			can1: can@fc050000 {
+				compatible = "bosch,m_can";
+				reg = <0xfc050000 0x4000>, <0x210000 0x4000>;
+				reg-names = "m_can", "message_ram";
+				interrupts = <57 IRQ_TYPE_LEVEL_HIGH 7>,
+					     <65 IRQ_TYPE_LEVEL_HIGH 7>;
+				interrupt-names = "int0", "int1";
+				clocks = <&can1_clk>, <&can1_gclk>;
+				clock-names = "hclk", "cclk";
+				assigned-clocks = <&can1_gclk>;
+				assigned-clock-parents = <&utmi>;
+				assigned-clock-rates = <40000000>;
+				bosch,mram-cfg = <0x1100 0 0 64 0 0 32 32>;
+				status = "disabled";
+			};
+
 			sfrbu: sfr@fc05c000 {
 				compatible = "atmel,sama5d2-sfrbu", "syscon";
 				reg = <0xfc05c000 0x20>;
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index b06448b..554d0bd 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -429,6 +429,22 @@
 				clocks = <&trng_clk>;
 			};
 
+			hsmc: hsmc@ffffc000 {
+				compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd";
+				reg = <0xffffc000 0x1000>;
+				interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
+				clocks = <&hsmc_clk>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges;
+
+				pmecc: ecc-engine@ffffc070 {
+					compatible = "atmel,at91sam9g45-pmecc";
+					reg = <0xffffc070 0x490>,
+					      <0xffffc500 0x100>;
+				};
+			};
+
 			dma0: dma-controller@ffffe600 {
 				compatible = "atmel,at91sam9g45-dma";
 				reg = <0xffffe600 0x200>;
@@ -554,6 +570,66 @@
 					};
 				};
 
+				ebi {
+					pinctrl_ebi_addr: ebi-addr-0 {
+						atmel,pins =
+							<AT91_PIOE 1 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 2 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 3 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 4 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 5 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 6 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 7 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 8 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 9 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 10 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 11 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 12 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 13 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 14 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 15 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 16 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 17 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 18 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 19 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 20 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 21 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 22 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 23 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_nand_addr: ebi-addr-1 {
+						atmel,pins =
+							<AT91_PIOE 21 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 22 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_cs0: ebi-cs0-0 {
+						atmel,pins =
+							<AT91_PIOE 24 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_cs1: ebi-cs1-0 {
+						atmel,pins =
+							<AT91_PIOE 25 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_cs2: ebi-cs2-0 {
+						atmel,pins =
+							<AT91_PIOE 26 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_nwait: ebi-nwait-0 {
+						atmel,pins =
+							<AT91_PIOE 28 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_nwr1_nbs1: ebi-nwr1-nbs1-0 {
+						atmel,pins =
+							<AT91_PIOE 27 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+				};
+
 				i2c0 {
 					pinctrl_i2c0: i2c0-0 {
 						atmel,pins =
@@ -1326,6 +1402,12 @@
 			};
 		};
 
+		nfc_sram: sram@200000 {
+			compatible = "mmio-sram";
+			no-memory-wc;
+			reg = <0x200000 0x2400>;
+		};
+
 		usb0: gadget@00500000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -1461,36 +1543,35 @@
 			status = "disabled";
 		};
 
-		nand0: nand@60000000 {
-			compatible = "atmel,at91rm9200-nand";
-			#address-cells = <1>;
+		ebi: ebi@10000000 {
+			compatible = "atmel,sama5d3-ebi";
+			#address-cells = <2>;
 			#size-cells = <1>;
-			ranges;
-			reg = <	0x60000000 0x01000000	/* EBI CS3 */
-				0xffffc070 0x00000490	/* SMC PMECC regs */
-				0xffffc500 0x00000100	/* SMC PMECC Error Location regs */
-				0x00110000 0x00018000	/* ROM code */
-				>;
-			interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
-			atmel,nand-addr-offset = <21>;
-			atmel,nand-cmd-offset = <22>;
-			atmel,nand-has-dma;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_nand0_ale_cle>;
-			atmel,pmecc-lookup-table-offset = <0x0 0x8000>;
+			atmel,smc = <&hsmc>;
+			reg = <0x10000000 0x10000000
+			       0x40000000 0x30000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x40000000 0x10000000
+				  0x2 0x0 0x50000000 0x10000000
+				  0x3 0x0 0x60000000 0x10000000>;
+			clocks = <&mck>;
 			status = "disabled";
 
-			nfc@70000000 {
-				compatible = "atmel,sama5d3-nfc";
-				#address-cells = <1>;
+			nand_controller: nand-controller {
+				compatible = "atmel,sama5d3-nand-controller";
+				atmel,nfc-sram = <&nfc_sram>;
+				atmel,nfc-io = <&nfc_io>;
+				ecc-engine = <&pmecc>;
+				#address-cells = <2>;
 				#size-cells = <1>;
-				reg = <
-					0x70000000 0x08000000	/* NFC Command Registers */
-					0xffffc000 0x00000070	/* NFC HSMC regs */
-					0x00200000 0x00100000	/* NFC SRAM banks */
-					>;
-				clocks = <&hsmc_clk>;
+				ranges;
+				status = "disabled";
 			};
 		};
+
+		nfc_io: nfc-io@70000000 {
+			compatible = "atmel,sama5d3-nfc-io", "syscon";
+			reg = <0x70000000 0x8000000>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi
index b5e111b..9506daf 100644
--- a/arch/arm/boot/dts/sama5d3xcm.dtsi
+++ b/arch/arm/boot/dts/sama5d3xcm.dtsi
@@ -36,43 +36,82 @@
 			};
 		};
 
-		nand0: nand@60000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			atmel,has-pmecc;
-			atmel,pmecc-cap = <4>;
-			atmel,pmecc-sector-size = <512>;
-			nand-on-flash-bbt;
+		ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_addr &pinctrl_ebi_cs0>;
+			pinctr-name = "default";
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x40000>;
+			nor: flash@0,0 {
+				compatible = "cfi-flash";
+				linux,mtd-name = "physmap-flash.0";
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0x0 0x0 0x1000000>;
+				bank-width = <2>;
+				atmel,smc-read-mode = "nrd";
+				atmel,smc-write-mode = "nwe";
+				atmel,smc-bus-width = <16>;
+				atmel,smc-ncs-rd-setup-ns = <0>;
+				atmel,smc-ncs-wr-setup-ns = <0>;
+				atmel,smc-nwe-setup-ns = <8>;
+				atmel,smc-nrd-setup-ns = <16>;
+				atmel,smc-ncs-rd-pulse-ns = <84>;
+				atmel,smc-ncs-wr-pulse-ns = <84>;
+				atmel,smc-nrd-pulse-ns = <76>;
+				atmel,smc-nwe-pulse-ns = <76>;
+				atmel,smc-nrd-cycle-ns = <107>;
+				atmel,smc-nwe-cycle-ns = <84>;
+				atmel,smc-tdf-ns = <16>;
 			};
 
-			bootloader@40000 {
-				label = "bootloader";
-				reg = <0x40000 0x80000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
 
-			bootloaderenv@c0000 {
-				label = "bootloader env";
-				reg = <0xc0000 0xc0000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x2>;
+					atmel,rb = <0>;
+					nand-ecc-mode = "hw";
+					nand-ecc-strength = <4>;
+					nand-ecc-step-size = <512>;
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			dtb@180000 {
-				label = "device tree";
-				reg = <0x180000 0x80000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			kernel@200000 {
-				label = "kernel";
-				reg = <0x200000 0x600000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x40000>;
+						};
 
-			rootfs@800000 {
-				label = "rootfs";
-				reg = <0x800000 0x0f800000>;
+						bootloader@40000 {
+							label = "bootloader";
+							reg = <0x40000 0x80000>;
+						};
+
+						bootloaderenv@c0000 {
+							label = "bootloader env";
+							reg = <0xc0000 0xc0000>;
+						};
+
+						dtb@180000 {
+							label = "device tree";
+							reg = <0x180000 0x80000>;
+						};
+
+						kernel@200000 {
+							label = "kernel";
+							reg = <0x200000 0x600000>;
+						};
+
+						rootfs@800000 {
+							label = "rootfs";
+							reg = <0x800000 0x0f800000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/sama5d3xcm_cmp.dtsi b/arch/arm/boot/dts/sama5d3xcm_cmp.dtsi
index dc7572b..75cbf4d 100644
--- a/arch/arm/boot/dts/sama5d3xcm_cmp.dtsi
+++ b/arch/arm/boot/dts/sama5d3xcm_cmp.dtsi
@@ -148,43 +148,60 @@
 			};
 		};
 
-		nand0: nand@60000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "hw";
-			atmel,has-pmecc;
-			atmel,pmecc-cap = <4>;
-			atmel,pmecc-sector-size = <512>;
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
+			pinctrl-0 = <&pinctrl_ebi_nand_addr>;
+			pinctrl-names = "default";
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x40000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
 
-			bootloader@40000 {
-				label = "bootloader";
-				reg = <0x40000 0x80000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x2>;
+					atmel,rb = <0>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "hw";
+					nand-ecc-strength = <4>;
+					nand-ecc-step-size = <512>;
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bootloaderenv@c0000 {
-				label = "bootloader env";
-				reg = <0xc0000 0xc0000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			dtb@180000 {
-				label = "device tree";
-				reg = <0x180000 0x80000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x40000>;
+						};
 
-			kernel@200000 {
-				label = "kernel";
-				reg = <0x200000 0x600000>;
-			};
+						bootloader@40000 {
+							label = "bootloader";
+							reg = <0x40000 0x80000>;
+						};
 
-			rootfs@800000 {
-				label = "rootfs";
-				reg = <0x800000 0x0f800000>;
+						bootloaderenv@c0000 {
+							label = "bootloader env";
+							reg = <0xc0000 0xc0000>;
+						};
+
+						dtb@180000 {
+							label = "device tree";
+							reg = <0x180000 0x80000>;
+						};
+
+						kernel@200000 {
+							label = "kernel";
+							reg = <0x200000 0x600000>;
+						};
+
+						rootfs@800000 {
+							label = "rootfs";
+							reg = <0x800000 0x0f800000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index d3889c9..2fa36c5 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -124,6 +124,12 @@
 		#size-cells = <1>;
 		ranges;
 
+		nfc_sram: sram@100000 {
+			compatible = "mmio-sram";
+			no-memory-wc;
+			reg = <0x100000 0x2400>;
+		};
+
 		usb0: gadget@00400000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -280,37 +286,37 @@
 			cache-level = <2>;
 		};
 
-		nand0: nand@80000000 {
-			compatible = "atmel,sama5d4-nand", "atmel,at91rm9200-nand";
-			#address-cells = <1>;
+		ebi: ebi@10000000 {
+			compatible = "atmel,sama5d3-ebi";
+			#address-cells = <2>;
 			#size-cells = <1>;
-			ranges;
-			reg = <	0x80000000 0x08000000	/* EBI CS3 */
-				0xfc05c070 0x00000490	/* SMC PMECC regs */
-				0xfc05c500 0x00000100	/* SMC PMECC Error Location regs */
-				>;
-			interrupts = <22 IRQ_TYPE_LEVEL_HIGH 6>;
-			atmel,nand-addr-offset = <21>;
-			atmel,nand-cmd-offset = <22>;
-			atmel,nand-has-dma;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_nand>;
+			atmel,smc = <&hsmc>;
+			reg = <0x10000000 0x10000000
+			       0x60000000 0x28000000>;
+			ranges = <0x0 0x0 0x10000000 0x10000000
+				  0x1 0x0 0x60000000 0x10000000
+				  0x2 0x0 0x70000000 0x10000000
+				  0x3 0x0 0x80000000 0x8000000>;
+			clocks = <&mck>;
 			status = "disabled";
 
-			nfc@90000000 {
-				compatible = "atmel,sama5d3-nfc";
-				#address-cells = <1>;
+			nand_controller: nand-controller {
+				compatible = "atmel,sama5d3-nand-controller";
+				atmel,nfc-sram = <&nfc_sram>;
+				atmel,nfc-io = <&nfc_io>;
+				ecc-engine = <&pmecc>;
+				#address-cells = <2>;
 				#size-cells = <1>;
-				reg = <
-					0x90000000 0x08000000	/* NFC Command Registers */
-					0xfc05c000 0x00000070	/* NFC HSMC regs */
-					0x00100000 0x00100000	/* NFC SRAM banks */
-                                         >;
-				clocks = <&hsmc_clk>;
-				atmel,write-by-sram;
+				ranges;
+				status = "disabled";
 			};
 		};
 
+		nfc_io: nfc-io@90000000 {
+			compatible = "atmel,sama5d3-nfc-io", "syscon";
+			reg = <0x90000000 0x8000000>;
+		};
+
 		apb {
 			compatible = "simple-bus";
 			#address-cells = <1>;
@@ -1287,6 +1293,22 @@
 				status = "okay";
 			};
 
+			hsmc: smc@fc05c000 {
+				compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd";
+				reg = <0xfc05c000 0x1000>;
+				interrupts = <22 IRQ_TYPE_LEVEL_HIGH 6>;
+				clocks = <&hsmc_clk>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges;
+
+				pmecc: ecc-engine@ffffc070 {
+					compatible = "atmel,sama5d4-pmecc";
+					reg = <0xfc05c070 0x490>,
+					      <0xfc05c500 0x100>;
+				};
+			};
+
 			rstc@fc068600 {
 				compatible = "atmel,sama5d3-rstc", "atmel,at91sam9g45-rstc";
 				reg = <0xfc068600 0x10>;
@@ -1447,6 +1469,113 @@
 					};
 				};
 
+				ebi {
+					pinctrl_ebi_addr: ebi-addr-0 {
+						atmel,pins =
+							<AT91_PIOE 0 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 1 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 2 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 3 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 4 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 5 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 6 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 7 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 8 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 9 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 10 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 11 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 12 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 13 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 14 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 15 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 16 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 17 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 18 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 19 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 20 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 17 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 18 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 21 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 22 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOE 23 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_nand_addr: ebi-addr-1 {
+						atmel,pins =
+							<AT91_PIOC 17 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 18 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_cs0: ebi-cs0-0 {
+						atmel,pins =
+							<AT91_PIOE 26 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_cs1: ebi-cs1-0 {
+						atmel,pins =
+							<AT91_PIOE 27 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_cs2: ebi-cs2-0 {
+						atmel,pins =
+							<AT91_PIOE 28 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_cs3: ebi-cs3-0 {
+						atmel,pins =
+							<AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_data_0_7: ebi-data-lsb-0 {
+						atmel,pins =
+							<AT91_PIOC 5 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 6 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 7 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 10 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 11 AT91_PERIPH_A AT91_PINCTRL_NONE
+							 AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_data_8_15: ebi-data-msb-0 {
+						atmel,pins =
+							<AT91_PIOB 18 AT91_PERIPH_B AT91_PINCTRL_NONE
+							 AT91_PIOB 19 AT91_PERIPH_B AT91_PINCTRL_NONE
+							 AT91_PIOB 20 AT91_PERIPH_B AT91_PINCTRL_NONE
+							 AT91_PIOB 21 AT91_PERIPH_B AT91_PINCTRL_NONE
+							 AT91_PIOB 22 AT91_PERIPH_B AT91_PINCTRL_NONE
+							 AT91_PIOB 23 AT91_PERIPH_B AT91_PINCTRL_NONE
+							 AT91_PIOB 24 AT91_PERIPH_B AT91_PINCTRL_NONE
+							 AT91_PIOB 25 AT91_PERIPH_B AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_nandrdy: ebi-nandrdy-0 {
+						atmel,pins =
+							<AT91_PIOC 16 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_nrd_nandoe: ebi-nrd-nandoe-0 {
+						atmel,pins =
+							<AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_nwait: ebi-nwait-0 {
+						atmel,pins =
+							<AT91_PIOE 30 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_nwe_nandwe: ebi-nwe-nandwe-0 {
+						atmel,pins =
+							<AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_ebi_nwr1_nbs1: ebi-nwr1-nbs1-0 {
+						atmel,pins =
+							<AT91_PIOE 29 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+					};
+				};
+
 				i2c0 {
 					pinctrl_i2c0: i2c0-0 {
 						atmel,pins =
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index 6b01ab3..4ea5c5a 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -444,7 +444,7 @@
 		status = "disabled";
 	};
 
-	pfc: pfc@e6050000 {
+	pfc: pin-controller@e6050000 {
 		compatible = "renesas,pfc-sh73a0";
 		reg = <0xe6050000 0x8000>,
 		      <0xe605801c 0x1c>;
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index b2674bd..7e24dc8 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -557,7 +557,7 @@
 			interrupts = <0 115 4>;
 			interrupt-names = "macirq";
 			mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
-			clocks = <&emac0_clk>;
+			clocks = <&emac_0_clk>;
 			clock-names = "stmmaceth";
 			resets = <&rst EMAC0_RESET>;
 			reset-names = "stmmaceth";
@@ -575,7 +575,7 @@
 			interrupts = <0 120 4>;
 			interrupt-names = "macirq";
 			mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
-			clocks = <&emac1_clk>;
+			clocks = <&emac_1_clk>;
 			clock-names = "stmmaceth";
 			resets = <&rst EMAC1_RESET>;
 			reset-names = "stmmaceth";
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
index 94e0884..3a32de9 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
@@ -130,13 +130,13 @@
 };
 
 &i2c1 {
-	speed-mode = <0>;
 	status = "okay";
 
 	/*
 	 * adjust the falling times to decrease the i2c frequency to 50Khz
 	 * because the LCD module does not work at the standard 100Khz
 	 */
+	clock-frequency = <100000>;
 	i2c-sda-falling-time-ns = <6000>;
 	i2c-scl-falling-time-ns = <6000>;
 
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
index 7b49395..b280e64 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
@@ -86,7 +86,7 @@
 
 &i2c0 {
 	status = "okay";
-	speed-mode = <0>;
+	clock-frequency = <100000>;
 
 	adxl345: adxl345@0 {
 		compatible = "adi,adxl345";
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts b/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts
index 21e3972..c2eb88a 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts
@@ -58,7 +58,7 @@
 
 &i2c0 {
 	status = "okay";
-	speed-mode = <0>;
+	clock-frequency = <100000>;
 
 	stmpe1: stmpe811@41 {
 		compatible = "st,stmpe811";
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
index 8931980..655fe87 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -69,34 +69,7 @@
 		 * to be added to the gmac1 device tree blob.
 		 */
 		ethernet0 = &gmac1;
-	};
-
-	leds {
-		compatible = "gpio-leds";
-
-		hps_led0 {
-			label = "hps:green:led0";	/* ALIVE_LED_GR */
-			gpios = <&portb 19 0>;	/* HPS_GPIO48 */
-			linux,default-trigger = "heartbeat";
-		};
-
-		hps_led1 {
-			label = "hps:red:led0";		/* ALIVE_LED_RD */
-			gpios = <&portb 24 0>;	/* HPS_GPIO53 */
-			linux,default-trigger = "none";
-		};
-
-		hps_led2 {
-			label = "hps:green:led1";	/* LINK2HOST_LED_GR */
-			gpios = <&portb 25 0>;	/* HPS_GPIO54 */
-			linux,default-trigger = "heartbeat";
-		};
-
-		hps_led3 {
-			label = "hps:red:led1";		/* LINK2HOST_LED_RD */
-			gpios = <&portc 7 0>;	/* HPS_GPIO65 */
-			linux,default-trigger = "none";
-		};
+		ethernet1 = &gmac0;
 	};
 
 	gpio-keys {
@@ -203,69 +176,39 @@
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0>;
-			eeprom@51 {
-				compatible = "at,24c01";
-				pagesize = <8>;
-				reg = <0x51>;
-			};
 		};
 
 		i2c@1 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <1>;
-			eeprom@51 {
-				compatible = "at,24c01";
-				pagesize = <8>;
-				reg = <0x51>;
-			};
 		};
 
 		i2c@2 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <2>;
-			eeprom@51 {
-				compatible = "at,24c01";
-				pagesize = <8>;
-				reg = <0x51>;
-			};
 		};
 
 		i2c@3 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <3>;
-			eeprom@51 {
-				compatible = "at,24c01";
-				pagesize = <8>;
-				reg = <0x51>;
-			};
 		};
 
 		i2c@4 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <4>;
-			eeprom@51 {
-				compatible = "at,24c01";
-				pagesize = <8>;
-				reg = <0x51>;
-			};
 		};
 
 		i2c@5 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <5>;
-			eeprom@51 {
-				compatible = "at,24c01";
-				pagesize = <8>;
-				reg = <0x51>;
-			};
 		};
 
-		i2c@6 {
+		i2c@6 {	/* Backplane EEPROM */
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <6>;
@@ -276,7 +219,7 @@
 			};
 		};
 
-		i2c@7 {
+		i2c@7 {	/* Power board EEPROM */
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <7>;
@@ -300,6 +243,44 @@
 	};
 };
 
+&qspi {
+	status = "okay";
+
+	n25q128@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q128";
+		reg = <0>;		/* chip select */
+		spi-max-frequency = <100000000>;
+		m25p,fast-read;
+
+		cdns,page-size = <256>;
+		cdns,block-size = <16>;
+		cdns,read-delay = <4>;
+		cdns,tshsl-ns = <50>;
+		cdns,tsd2d-ns = <50>;
+		cdns,tchsh-ns = <4>;
+		cdns,tslch-ns = <4>;
+	};
+
+	n25q00@1 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q00";
+		reg = <1>;		/* chip select */
+		spi-max-frequency = <100000000>;
+		m25p,fast-read;
+
+		cdns,page-size = <256>;
+		cdns,block-size = <16>;
+		cdns,read-delay = <4>;
+		cdns,tshsl-ns = <50>;
+		cdns,tsd2d-ns = <50>;
+		cdns,tchsh-ns = <4>;
+		cdns,tslch-ns = <4>;
+	};
+};
+
 &usb0 {
 	dr_mode = "host";
 	status = "okay";
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts
index b633114..dcda0bb 100644
--- a/arch/arm/boot/dts/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/stm32429i-eval.dts
@@ -48,6 +48,7 @@
 /dts-v1/;
 #include "stm32f429.dtsi"
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
 
 / {
 	model = "STMicroelectronics STM32429i-EVAL board";
@@ -66,6 +67,14 @@
 		serial0 = &usart1;
 	};
 
+	clocks {
+		clk_ext_camera: clk-ext-camera {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <24000000>;
+		};
+	};
+
 	soc {
 		dma-ranges = <0xc0000000 0x0 0x10000000>;
 	};
@@ -124,6 +133,16 @@
 		clocks = <&rcc 0 STM32F4_AHB1_CLOCK(OTGHSULPI)>;
 		clock-names = "main_clk";
 	};
+
+	panel_rgb: panel-rgb {
+		compatible = "ampire,am-480272h3tmqw-t01h";
+		status = "okay";
+		port {
+			panel_in_rgb: endpoint {
+				remote-endpoint = <&ltdc_out_rgb>;
+			};
+		};
+	};
 };
 
 &adc {
@@ -141,10 +160,79 @@
 	clock-frequency = <25000000>;
 };
 
+&crc {
+	status = "okay";
+};
+
+&dcmi {
+	status = "okay";
+
+	port {
+		dcmi_0: endpoint {
+			remote-endpoint = <&ov2640_0>;
+			bus-width = <8>;
+			hsync-active = <0>;
+			vsync-active = <0>;
+			pclk-sample = <1>;
+		};
+	};
+};
+
 &i2c1 {
 	pinctrl-0 = <&i2c1_pins>;
 	pinctrl-names = "default";
 	status = "okay";
+
+	ov2640: camera@30 {
+		compatible = "ovti,ov2640";
+		reg = <0x30>;
+		resetb-gpios = <&stmpegpio 2 GPIO_ACTIVE_HIGH>;
+		pwdn-gpios = <&stmpegpio 0 GPIO_ACTIVE_LOW>;
+		clocks = <&clk_ext_camera>;
+		clock-names = "xvclk";
+		status = "okay";
+
+		port {
+			ov2640_0: endpoint {
+				remote-endpoint = <&dcmi_0>;
+			};
+		};
+	};
+
+	stmpe1600: stmpe1600@42 {
+		compatible = "st,stmpe1600";
+		reg = <0x42>;
+		irq-gpio = <&gpioi 8 0>;
+		irq-trigger = <3>;
+		interrupts = <8 3>;
+		interrupt-parent = <&exti>;
+		interrupt-controller;
+		wakeup-source;
+
+		stmpegpio: stmpe_gpio {
+			compatible = "st,stmpe-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+	};
+};
+
+&iwdg {
+	status = "okay";
+	timeout-sec = <32>;
+};
+
+&ltdc {
+	status = "okay";
+	pinctrl-0 = <&ltdc_pins>;
+	pinctrl-names = "default";
+	dma-ranges;
+
+	port {
+		ltdc_out_rgb: endpoint {
+			remote-endpoint = <&panel_in_rgb>;
+		};
+	};
 };
 
 &mac {
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts
index 191fa50..ae47cde 100644
--- a/arch/arm/boot/dts/stm32f429-disco.dts
+++ b/arch/arm/boot/dts/stm32f429-disco.dts
@@ -102,6 +102,10 @@
 	clock-frequency = <8000000>;
 };
 
+&crc {
+	status = "okay";
+};
+
 &rtc {
 	assigned-clocks = <&rcc 1 CLK_RTC>;
 	assigned-clock-parents = <&rcc 1 CLK_LSI>;
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index b2a2b5c..a8113dc 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -65,7 +65,7 @@
 			clock-frequency = <32768>;
 		};
 
-		clk-lsi {
+		clk_lsi: clk-lsi {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			clock-frequency = <32000>;
@@ -307,6 +307,13 @@
 			status = "disabled";
 		};
 
+		iwdg: watchdog@40003000 {
+			compatible = "st,stm32-iwdg";
+			reg = <0x40003000 0x400>;
+			clocks = <&clk_lsi>;
+			status = "disabled";
+		};
+
 		usart2: serial@40004400 {
 			compatible = "st,stm32-usart", "st,stm32-uart";
 			reg = <0x40004400 0x400>;
@@ -549,7 +556,17 @@
 			reg = <0x40007000 0x400>;
 		};
 
-		pin-controller {
+		ltdc: display-controller@40016800 {
+			compatible = "st,stm32-ltdc";
+			reg = <0x40016800 0x200>;
+			interrupts = <88>, <89>;
+			resets = <&rcc STM32F4_APB2_RESET(LTDC)>;
+			clocks = <&rcc 1 CLK_LCD>;
+			clock-names = "lcd";
+			status = "disabled";
+		};
+
+		pinctrl: pin-controller {
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "st,stm32f429-pinctrl";
@@ -561,6 +578,8 @@
 			gpioa: gpio@40020000 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x0 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>;
 				st,bank-name = "GPIOA";
@@ -569,6 +588,8 @@
 			gpiob: gpio@40020400 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x400 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>;
 				st,bank-name = "GPIOB";
@@ -577,6 +598,8 @@
 			gpioc: gpio@40020800 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x800 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>;
 				st,bank-name = "GPIOC";
@@ -585,6 +608,8 @@
 			gpiod: gpio@40020c00 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0xc00 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>;
 				st,bank-name = "GPIOD";
@@ -593,6 +618,8 @@
 			gpioe: gpio@40021000 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x1000 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>;
 				st,bank-name = "GPIOE";
@@ -601,6 +628,8 @@
 			gpiof: gpio@40021400 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x1400 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>;
 				st,bank-name = "GPIOF";
@@ -609,6 +638,8 @@
 			gpiog: gpio@40021800 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x1800 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>;
 				st,bank-name = "GPIOG";
@@ -617,6 +648,8 @@
 			gpioh: gpio@40021c00 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x1c00 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>;
 				st,bank-name = "GPIOH";
@@ -625,6 +658,8 @@
 			gpioi: gpio@40022000 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x2000 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>;
 				st,bank-name = "GPIOI";
@@ -633,6 +668,8 @@
 			gpioj: gpio@40022400 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x2400 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>;
 				st,bank-name = "GPIOJ";
@@ -641,6 +678,8 @@
 			gpiok: gpio@40022800 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x2800 0x400>;
 				clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>;
 				st,bank-name = "GPIOK";
@@ -764,6 +803,70 @@
 					slew-rate = <3>;
 				};
 			};
+
+			ltdc_pins: ltdc@0 {
+				pins {
+					pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>,
+						 <STM32F429_PI13_FUNC_LCD_VSYNC>,
+						 <STM32F429_PI14_FUNC_LCD_CLK>,
+						 <STM32F429_PI15_FUNC_LCD_R0>,
+						 <STM32F429_PJ0_FUNC_LCD_R1>,
+						 <STM32F429_PJ1_FUNC_LCD_R2>,
+						 <STM32F429_PJ2_FUNC_LCD_R3>,
+						 <STM32F429_PJ3_FUNC_LCD_R4>,
+						 <STM32F429_PJ4_FUNC_LCD_R5>,
+						 <STM32F429_PJ5_FUNC_LCD_R6>,
+						 <STM32F429_PJ6_FUNC_LCD_R7>,
+						 <STM32F429_PJ7_FUNC_LCD_G0>,
+						 <STM32F429_PJ8_FUNC_LCD_G1>,
+						 <STM32F429_PJ9_FUNC_LCD_G2>,
+						 <STM32F429_PJ10_FUNC_LCD_G3>,
+						 <STM32F429_PJ11_FUNC_LCD_G4>,
+						 <STM32F429_PJ12_FUNC_LCD_B0>,
+						 <STM32F429_PJ13_FUNC_LCD_B1>,
+						 <STM32F429_PJ14_FUNC_LCD_B2>,
+						 <STM32F429_PJ15_FUNC_LCD_B3>,
+						 <STM32F429_PK0_FUNC_LCD_G5>,
+						 <STM32F429_PK1_FUNC_LCD_G6>,
+						 <STM32F429_PK2_FUNC_LCD_G7>,
+						 <STM32F429_PK3_FUNC_LCD_B4>,
+						 <STM32F429_PK4_FUNC_LCD_B5>,
+						 <STM32F429_PK5_FUNC_LCD_B6>,
+						 <STM32F429_PK6_FUNC_LCD_B7>,
+						 <STM32F429_PK7_FUNC_LCD_DE>;
+					slew-rate = <2>;
+				};
+			};
+
+			dcmi_pins: dcmi@0 {
+				pins {
+					pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>,
+						 <STM32F429_PB7_FUNC_DCMI_VSYNC>,
+						 <STM32F429_PA6_FUNC_DCMI_PIXCLK>,
+						 <STM32F429_PC6_FUNC_DCMI_D0>,
+						 <STM32F429_PC7_FUNC_DCMI_D1>,
+						 <STM32F429_PC8_FUNC_DCMI_D2>,
+						 <STM32F429_PC9_FUNC_DCMI_D3>,
+						 <STM32F429_PC11_FUNC_DCMI_D4>,
+						 <STM32F429_PD3_FUNC_DCMI_D5>,
+						 <STM32F429_PB8_FUNC_DCMI_D6>,
+						 <STM32F429_PE6_FUNC_DCMI_D7>,
+						 <STM32F429_PC10_FUNC_DCMI_D8>,
+						 <STM32F429_PC12_FUNC_DCMI_D9>,
+						 <STM32F429_PD6_FUNC_DCMI_D10>,
+						 <STM32F429_PD2_FUNC_DCMI_D11>;
+					bias-disable;
+					drive-push-pull;
+					slew-rate = <3>;
+				};
+			};
+		};
+
+		crc: crc@40023000 {
+			compatible = "st,stm32f4-crc";
+			reg = <0x40023000 0x400>;
+			clocks = <&rcc 0 STM32F4_AHB1_CLOCK(CRC)>;
+			status = "disabled";
 		};
 
 		rcc: rcc@40023810 {
@@ -842,6 +945,20 @@
 			status = "disabled";
 		};
 
+		dcmi: dcmi@50050000 {
+			compatible = "st,stm32-dcmi";
+			reg = <0x50050000 0x400>;
+			interrupts = <78>;
+			resets = <&rcc STM32F4_AHB2_RESET(DCMI)>;
+			clocks = <&rcc 0 STM32F4_AHB2_CLOCK(DCMI)>;
+			clock-names = "mclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&dcmi_pins>;
+			dmas = <&dma2 1 1 0x414 0x3>;
+			dma-names = "tx";
+			status = "disabled";
+		};
+
 		rng: rng@50060800 {
 			compatible = "st,stm32-rng";
 			reg = <0x50060800 0x400>;
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm/boot/dts/stm32f746-disco.dts
similarity index 79%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm/boot/dts/stm32f746-disco.dts
index 3956cff..18f6560 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm/boot/dts/stm32f746-disco.dts
@@ -1,4 +1,6 @@
 /*
+ * Copyright 2017 - Vikas MANOCHA <vikas.manocha@st.com>
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
@@ -14,7 +16,7 @@
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -39,31 +41,34 @@
  */
 
 /dts-v1/;
-
-#include "rk1108.dtsi"
+#include "stm32f746.dtsi"
+#include <dt-bindings/input/input.h>
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
-
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
+	model = "STMicroelectronics STM32F746-DISCO board";
+	compatible = "st,stm32f746-disco", "st,stm32f746";
 
 	chosen {
-		stdout-path = "serial2:1500000n8";
+		bootargs = "root=/dev/ram";
+		stdout-path = "serial0:115200n8";
 	};
+
+	memory {
+		reg = <0xC0000000 0x800000>;
+	};
+
+	aliases {
+		serial0 = &usart1;
+	};
+
 };
 
-&uart0 {
-	status = "okay";
+&clk_hse {
+	clock-frequency = <25000000>;
 };
 
-&uart1 {
-	status = "okay";
-};
-
-&uart2 {
+&usart1 {
+	pinctrl-0 = <&usart1_pins_b>;
+	pinctrl-names = "default";
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi
index c2765ce..4506eb9 100644
--- a/arch/arm/boot/dts/stm32f746.dtsi
+++ b/arch/arm/boot/dts/stm32f746.dtsi
@@ -229,6 +229,8 @@
 			gpioa: gpio@40020000 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x0 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOA)>;
 				st,bank-name = "GPIOA";
@@ -237,6 +239,8 @@
 			gpiob: gpio@40020400 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x400 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOB)>;
 				st,bank-name = "GPIOB";
@@ -245,6 +249,8 @@
 			gpioc: gpio@40020800 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x800 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOC)>;
 				st,bank-name = "GPIOC";
@@ -253,6 +259,8 @@
 			gpiod: gpio@40020c00 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0xc00 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOD)>;
 				st,bank-name = "GPIOD";
@@ -261,6 +269,8 @@
 			gpioe: gpio@40021000 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x1000 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOE)>;
 				st,bank-name = "GPIOE";
@@ -269,6 +279,8 @@
 			gpiof: gpio@40021400 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x1400 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOF)>;
 				st,bank-name = "GPIOF";
@@ -277,6 +289,8 @@
 			gpiog: gpio@40021800 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x1800 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOG)>;
 				st,bank-name = "GPIOG";
@@ -285,6 +299,8 @@
 			gpioh: gpio@40021c00 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x1c00 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOH)>;
 				st,bank-name = "GPIOH";
@@ -293,6 +309,8 @@
 			gpioi: gpio@40022000 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x2000 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOI)>;
 				st,bank-name = "GPIOI";
@@ -301,6 +319,8 @@
 			gpioj: gpio@40022400 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x2400 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOJ)>;
 				st,bank-name = "GPIOJ";
@@ -309,6 +329,8 @@
 			gpiok: gpio@40022800 {
 				gpio-controller;
 				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
 				reg = <0x2800 0x400>;
 				clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOK)>;
 				st,bank-name = "GPIOK";
@@ -326,6 +348,19 @@
 					bias-disable;
 				};
 			};
+
+			usart1_pins_b: usart1@1 {
+				pins1 {
+					pinmux = <STM32F746_PA9_FUNC_USART1_TX>;
+					bias-disable;
+					drive-push-pull;
+					slew-rate = <0>;
+				};
+				pins2 {
+					pinmux = <STM32F746_PB7_FUNC_USART1_RX>;
+					bias-disable;
+				};
+			};
 		};
 
 		crc: crc@40023000 {
@@ -336,6 +371,7 @@
 		};
 
 		rcc: rcc@40023800 {
+			#reset-cells = <1>;
 			#clock-cells = <2>;
 			compatible = "st,stm32f746-rcc", "st,stm32-rcc";
 			reg = <0x40023800 0x400>;
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm/boot/dts/stm32f769-disco.dts
similarity index 79%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm/boot/dts/stm32f769-disco.dts
index 3956cff..166728a 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm/boot/dts/stm32f769-disco.dts
@@ -1,4 +1,6 @@
 /*
+ * Copyright 2017 - Vikas MANOCHA <vikas.manocha@st.com>
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
@@ -14,7 +16,7 @@
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -39,31 +41,34 @@
  */
 
 /dts-v1/;
-
-#include "rk1108.dtsi"
+#include "stm32f746.dtsi"
+#include <dt-bindings/input/input.h>
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
-
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
+	model = "STMicroelectronics STM32F769-DISCO board";
+	compatible = "st,stm32f769-disco", "st,stm32f7";
 
 	chosen {
-		stdout-path = "serial2:1500000n8";
+		bootargs = "root=/dev/ram";
+		stdout-path = "serial0:115200n8";
 	};
+
+	memory {
+		reg = <0xC0000000 0x1000000>;
+	};
+
+	aliases {
+		serial0 = &usart1;
+	};
+
 };
 
-&uart0 {
-	status = "okay";
+&clk_hse {
+	clock-frequency = <25000000>;
 };
 
-&uart1 {
-	status = "okay";
-};
-
-&uart2 {
+&usart1 {
+	pinctrl-0 = <&usart1_pins_a>;
+	pinctrl-names = "default";
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
index fcc1e06..76bbd65 100644
--- a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
@@ -151,6 +151,19 @@
 					bias-disable;
 				};
 			};
+
+			usart2_pins: usart2@0 {
+				pins1 {
+					pinmux = <STM32H7_PD5_FUNC_USART2_TX>;
+					bias-disable;
+					drive-push-pull;
+					slew-rate = <0>;
+				};
+				pins2 {
+					pinmux = <STM32H7_PD6_FUNC_USART2_RX>;
+					bias-disable;
+				};
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index 4685629..36a99db 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -68,6 +68,14 @@
 
 		};
 
+		usart2: serial@40004400 {
+			compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+			reg = <0x40004400 0x400>;
+			interrupts = <38>;
+			status = "disabled";
+			clocks = <&timer_clk>;
+		};
+
 		timer5: timer@40000c00 {
 			compatible = "st,stm32-timer";
 			reg = <0x40000c00 0x400>;
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm/boot/dts/stm32h743i-disco.dts
similarity index 79%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm/boot/dts/stm32h743i-disco.dts
index 3956cff..79e841d 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm/boot/dts/stm32h743i-disco.dts
@@ -1,4 +1,6 @@
 /*
+ * Copyright 2017 - Patrice Chotard <patrice.chotard@st.com>
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
@@ -14,7 +16,7 @@
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -39,31 +41,33 @@
  */
 
 /dts-v1/;
-
-#include "rk1108.dtsi"
+#include "stm32h743.dtsi"
+#include "stm32h743-pinctrl.dtsi"
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
-
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
+	model = "STMicroelectronics STM32H743i-Discovery board";
+	compatible = "st,stm32h743i-disco", "st,stm32h743";
 
 	chosen {
-		stdout-path = "serial2:1500000n8";
+		bootargs = "root=/dev/ram";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		reg = <0xd0000000 0x2000000>;
+	};
+
+	aliases {
+		serial0 = &usart2;
 	};
 };
 
-&uart0 {
-	status = "okay";
+&clk_hse {
+	clock-frequency = <125000000>;
 };
 
-&uart1 {
-	status = "okay";
-};
-
-&uart2 {
+&usart2 {
+	pinctrl-0 = <&usart2_pins>;
+	pinctrl-names = "default";
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
index f2a01fe..f80d37d 100644
--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
+++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
@@ -171,7 +171,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts b/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts
index 942d739..6b02de5 100644
--- a/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts
+++ b/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts
@@ -109,7 +109,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts b/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
index 17f8c5e..a7d6199 100644
--- a/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
+++ b/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
@@ -128,7 +128,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
index d844938..404ce76 100644
--- a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
@@ -142,7 +142,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
@@ -182,6 +182,10 @@
 
 #include "axp209.dtsi"
 
+&ac_power_supply {
+	status = "okay";
+};
+
 &reg_dcdc2 {
 	regulator-always-on;
 	regulator-min-microvolt = <1000000>;
diff --git a/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
index aad3bec..e0777ae 100644
--- a/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
+++ b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
@@ -163,7 +163,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-gemei-g9.dts b/arch/arm/boot/dts/sun4i-a10-gemei-g9.dts
index 9616cde..d8bfd7b 100644
--- a/arch/arm/boot/dts/sun4i-a10-gemei-g9.dts
+++ b/arch/arm/boot/dts/sun4i-a10-gemei-g9.dts
@@ -146,7 +146,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH01 */
diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
index a1a7282..856cfc9 100644
--- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts
+++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
@@ -107,7 +107,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-hyundai-a7hd.dts b/arch/arm/boot/dts/sun4i-a10-hyundai-a7hd.dts
index 85dcf81..6506595 100644
--- a/arch/arm/boot/dts/sun4i-a10-hyundai-a7hd.dts
+++ b/arch/arm/boot/dts/sun4i-a10-hyundai-a7hd.dts
@@ -79,7 +79,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
@@ -120,10 +120,6 @@
 	status = "okay";
 };
 
-&usb2_vbus_pin_a {
-	pins = "PH6";
-};
-
 &usb_otg {
 	dr_mode = "otg";
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-inet1.dts b/arch/arm/boot/dts/sun4i-a10-inet1.dts
index b8923b9..d51d8c3 100644
--- a/arch/arm/boot/dts/sun4i-a10-inet1.dts
+++ b/arch/arm/boot/dts/sun4i-a10-inet1.dts
@@ -161,7 +161,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-inet97fv2.dts b/arch/arm/boot/dts/sun4i-a10-inet97fv2.dts
index a1a2bbb..a8e479f 100644
--- a/arch/arm/boot/dts/sun4i-a10-inet97fv2.dts
+++ b/arch/arm/boot/dts/sun4i-a10-inet97fv2.dts
@@ -147,7 +147,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts b/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
index 4a27eb9..2acb89a 100644
--- a/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
+++ b/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
@@ -305,7 +305,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-itead-iteaduino-plus.dts b/arch/arm/boot/dts/sun4i-a10-itead-iteaduino-plus.dts
index 4e798f0..92e3e03 100644
--- a/arch/arm/boot/dts/sun4i-a10-itead-iteaduino-plus.dts
+++ b/arch/arm/boot/dts/sun4i-a10-itead-iteaduino-plus.dts
@@ -100,7 +100,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
index 308dc15..92b2d4a 100644
--- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
+++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
@@ -140,7 +140,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-marsboard.dts b/arch/arm/boot/dts/sun4i-a10-marsboard.dts
index 98a5f72..0f927da 100644
--- a/arch/arm/boot/dts/sun4i-a10-marsboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-marsboard.dts
@@ -141,7 +141,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts b/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
index 484c574..a5ed9e4 100644
--- a/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
+++ b/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
@@ -97,7 +97,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-mk802.dts b/arch/arm/boot/dts/sun4i-a10-mk802.dts
index 2b75745..81db682 100644
--- a/arch/arm/boot/dts/sun4i-a10-mk802.dts
+++ b/arch/arm/boot/dts/sun4i-a10-mk802.dts
@@ -72,7 +72,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-mk802ii.dts b/arch/arm/boot/dts/sun4i-a10-mk802ii.dts
index c861fa7..e74a881 100644
--- a/arch/arm/boot/dts/sun4i-a10-mk802ii.dts
+++ b/arch/arm/boot/dts/sun4i-a10-mk802ii.dts
@@ -83,7 +83,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index 3a2522a..462412e 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -145,7 +145,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-pcduino.dts b/arch/arm/boot/dts/sun4i-a10-pcduino.dts
index 83596fd..84f55e7 100644
--- a/arch/arm/boot/dts/sun4i-a10-pcduino.dts
+++ b/arch/arm/boot/dts/sun4i-a10-pcduino.dts
@@ -147,7 +147,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts b/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
index a68c7cc..c0f8c88 100644
--- a/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
+++ b/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
@@ -149,7 +149,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index b63668e..41c2579 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -1030,12 +1030,6 @@
 				bias-pull-up;
 			};
 
-			mmc0_cd_pin_reference_design: mmc0_cd_pin@0 {
-				pins = "PH1";
-				function = "gpio_in";
-				bias-pull-up;
-			};
-
 			ps20_pins_a: ps20@0 {
 				pins = "PI20", "PI21";
 				function = "ps2";
diff --git a/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts b/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts
index c6f742a..d2dee8d 100644
--- a/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts
@@ -136,14 +136,6 @@
 	status = "okay";
 };
 
-&usb0_vbus_pin_a {
-	pins = "PG13";
-};
-
-&usb1_vbus_pin_a {
-	pins = "PB10";
-};
-
 &usb_otg {
 	dr_mode = "host";
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
index a27c3fa..16f839d 100644
--- a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
@@ -168,10 +168,6 @@
 	status = "okay";
 };
 
-&usb1_vbus_pin_a {
-	pins = "PG13";
-};
-
 &usbphy {
 	pinctrl-names = "default";
 	pinctrl-0 = <&usb0_id_detect_pin>;
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index 894f874..da95118 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -63,6 +63,17 @@
 		stdout-path = "serial0:115200n8";
 	};
 
+	connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_con_in: endpoint {
+				remote-endpoint = <&hdmi_out_con>;
+			};
+		};
+	};
+
 	leds {
 		compatible = "gpio-leds";
 		pinctrl-names = "default";
@@ -76,6 +87,10 @@
 	};
 };
 
+&be0 {
+	status = "okay";
+};
+
 &ehci0 {
 	status = "okay";
 };
@@ -91,6 +106,16 @@
 	status = "okay";
 };
 
+&hdmi {
+	status = "okay";
+};
+
+&hdmi_out {
+	hdmi_out_con: endpoint {
+		remote-endpoint = <&hdmi_con_in>;
+	};
+};
+
 &i2c0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c0_pins_a>;
@@ -248,6 +273,10 @@
 	status = "okay";
 };
 
+&tcon0 {
+	status = "okay";
+};
+
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_pins_a>;
@@ -271,10 +300,6 @@
 	status = "okay";
 };
 
-&usb0_vbus_pin_a {
-	pins = "PG11";
-};
-
 &usbphy {
 	pinctrl-names = "default";
 	pinctrl-0 = <&usb0_id_detect_pin>;
diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
index ea3e565..5482be1 100644
--- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
@@ -216,10 +216,6 @@
 	status = "okay";
 };
 
-&usb1_vbus_pin_a {
-	pins = "PG12";
-};
-
 &usbphy {
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 1e38ff8..18f25c5 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -71,7 +71,46 @@
 		};
 	};
 
+	display-engine {
+		compatible = "allwinner,sun5i-a10s-display-engine";
+		allwinner,pipelines = <&fe0>;
+	};
+
 	soc@01c00000 {
+		hdmi: hdmi@01c16000 {
+			compatible = "allwinner,sun5i-a10s-hdmi";
+			reg = <0x01c16000 0x1000>;
+			interrupts = <58>;
+			clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>,
+				 <&ccu 9>,
+				 <&ccu 16>;
+			clock-names = "ahb", "mod", "pll-0", "pll-1";
+			dmas = <&dma SUN4I_DMA_NORMAL 16>,
+			       <&dma SUN4I_DMA_NORMAL 16>,
+			       <&dma SUN4I_DMA_DEDICATED 24>;
+			dma-names = "ddc-tx", "ddc-rx", "audio-tx";
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				hdmi_in: port@0 {
+					reg = <0>;
+
+					hdmi_in_tcon0: endpoint {
+						remote-endpoint = <&tcon0_out_hdmi>;
+					};
+				};
+
+				hdmi_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+				};
+			};
+		};
+
 		pwm: pwm@01c20e00 {
 			compatible = "allwinner,sun5i-a10s-pwm";
 			reg = <0x01c20e00 0xc>;
@@ -128,3 +167,11 @@
 
 &sram_a {
 };
+
+&tcon0_out {
+	tcon0_out_hdmi: endpoint@2 {
+		reg = <2>;
+		remote-endpoint = <&hdmi_in_tcon0>;
+		allwinner,tcon-channel = <1>;
+	};
+};
diff --git a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
index 34411d2..3dbb0d7 100644
--- a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
+++ b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
@@ -207,10 +207,6 @@
 	status = "okay";
 };
 
-&usb0_vbus_pin_a {
-	pins = "PG12";
-};
-
 &usbphy {
 	pinctrl-names = "default";
 	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
index 2489c16..584fa57 100644
--- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
+++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
@@ -186,7 +186,6 @@
 };
 
 &reg_usb0_vbus {
-	pinctrl-0 = <&usb0_vbus_pin_a>;
 	gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
 	status = "okay";
 };
@@ -202,10 +201,6 @@
 	status = "okay";
 };
 
-&usb0_vbus_pin_a {
-	pins = "PG12";
-};
-
 &usbphy {
 	pinctrl-names = "default";
 	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index 95f591b..38072c7 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -269,10 +269,6 @@
 	status = "okay";
 };
 
-&usb0_vbus_pin_a {
-	pins = "PG12";
-};
-
 &usbphy {
 	pinctrl-names = "default";
 	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
index d078560..879a4b0 100644
--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
+++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
@@ -132,6 +132,10 @@
 	status = "okay";
 };
 
+&battery_power_supply {
+	status = "okay";
+};
+
 &i2c1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c1_pins_a>;
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index 5175f9c..98cc003 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -272,6 +272,7 @@
 					tcon0_out_tve0: endpoint@1 {
 						reg = <1>;
 						remote-endpoint = <&tve0_in_tcon0>;
+						allwinner,tcon-channel = <1>;
 					};
 				};
 			};
@@ -355,6 +356,15 @@
 			status = "disabled";
 		};
 
+		crypto: crypto-engine@01c15000 {
+			compatible = "allwinner,sun5i-a13-crypto",
+				     "allwinner,sun4i-a10-crypto";
+			reg = <0x01c15000 0x1000>;
+			interrupts = <54>;
+			clocks = <&ccu CLK_AHB_SS>, <&ccu CLK_SS>;
+			clock-names = "ahb", "mod";
+		};
+
 		spi2: spi@01c17000 {
 			compatible = "allwinner,sun4i-a10-spi";
 			reg = <0x01c17000 0x1000>;
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index d4f74f4..9ecb5f0 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -253,6 +253,10 @@
 
 #include "axp22x.dtsi"
 
+&ac_power_supply {
+	status = "okay";
+};
+
 &reg_aldo1 {
 	regulator-min-microvolt = <3300000>;
 	regulator-max-microvolt = <3300000>;
@@ -319,7 +323,6 @@
 &tcon0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&lcd0_rgb888_pins>;
-	status = "okay";
 };
 
 &tcon0_out {
@@ -344,11 +347,6 @@
 	status = "okay";
 };
 
-&usb1_vbus_pin_a {
-	/* different pin from sunxi-common-regulators */
-	pins = "PH24";
-};
-
 &usbphy {
 	usb0_id_det-gpio = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
 	usb0_vbus_det-gpio = <&pio 0 16 GPIO_ACTIVE_HIGH>; /* PA16 */
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 9c999d3..aebc3f9 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -232,7 +232,7 @@
 
 	de: display-engine {
 		compatible = "allwinner,sun6i-a31-display-engine";
-		allwinner,pipelines = <&fe0>;
+		allwinner,pipelines = <&fe0>, <&fe1>;
 		status = "disabled";
 	};
 
@@ -264,7 +264,6 @@
 				      "tcon-ch0",
 				      "tcon-ch1";
 			clock-output-names = "tcon0-pixel-clock";
-			status = "disabled";
 
 			ports {
 				#address-cells = <1>;
@@ -289,6 +288,43 @@
 			};
 		};
 
+		tcon1: lcd-controller@01c0d000 {
+			compatible = "allwinner,sun6i-a31-tcon";
+			reg = <0x01c0d000 0x1000>;
+			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&ccu RST_AHB1_LCD1>;
+			reset-names = "lcd";
+			clocks = <&ccu CLK_AHB1_LCD1>,
+				 <&ccu CLK_LCD1_CH0>,
+				 <&ccu CLK_LCD1_CH1>;
+			clock-names = "ahb",
+				      "tcon-ch0",
+				      "tcon-ch1";
+			clock-output-names = "tcon1-pixel-clock";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				tcon1_in: port@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+
+					tcon1_in_drc1: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&drc1_out_tcon1>;
+					};
+				};
+
+				tcon1_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+				};
+			};
+		};
+
 		mmc0: mmc@01c0f000 {
 			compatible = "allwinner,sun7i-a20-mmc";
 			reg = <0x01c0f000 0x1000>;
@@ -780,7 +816,8 @@
 		};
 
 		crypto: crypto-engine@01c15000 {
-			compatible = "allwinner,sun4i-a10-crypto";
+			compatible = "allwinner,sun6i-a31-crypto",
+				     "allwinner,sun4i-a10-crypto";
 			reg = <0x01c15000 0x1000>;
 			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&ccu CLK_AHB1_SS>, <&ccu CLK_SS>;
@@ -896,6 +933,130 @@
 						reg = <0>;
 						remote-endpoint = <&be0_in_fe0>;
 					};
+
+					fe0_out_be1: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&be1_in_fe0>;
+					};
+				};
+			};
+		};
+
+		fe1: display-frontend@01e20000 {
+			compatible = "allwinner,sun6i-a31-display-frontend";
+			reg = <0x01e20000 0x20000>;
+			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_AHB1_FE1>, <&ccu CLK_FE1>,
+				 <&ccu CLK_DRAM_FE1>;
+			clock-names = "ahb", "mod",
+				      "ram";
+			resets = <&ccu RST_AHB1_FE1>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				fe1_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					fe1_out_be0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&be0_in_fe1>;
+					};
+
+					fe1_out_be1: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&be1_in_fe1>;
+					};
+				};
+			};
+		};
+
+		be1: display-backend@01e40000 {
+			compatible = "allwinner,sun6i-a31-display-backend";
+			reg = <0x01e40000 0x10000>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_AHB1_BE1>, <&ccu CLK_BE1>,
+				 <&ccu CLK_DRAM_BE1>;
+			clock-names = "ahb", "mod",
+				      "ram";
+			resets = <&ccu RST_AHB1_BE1>;
+
+			assigned-clocks = <&ccu CLK_BE1>;
+			assigned-clock-rates = <300000000>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				be1_in: port@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+
+					be1_in_fe0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&fe0_out_be1>;
+					};
+
+					be1_in_fe1: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&fe1_out_be1>;
+					};
+				};
+
+				be1_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					be1_out_drc1: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&drc1_in_be1>;
+					};
+				};
+			};
+		};
+
+		drc1: drc@01e50000 {
+			compatible = "allwinner,sun6i-a31-drc";
+			reg = <0x01e50000 0x10000>;
+			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_AHB1_DRC1>, <&ccu CLK_IEP_DRC1>,
+				 <&ccu CLK_DRAM_DRC1>;
+			clock-names = "ahb", "mod",
+				      "ram";
+			resets = <&ccu RST_AHB1_DRC1>;
+
+			assigned-clocks = <&ccu CLK_IEP_DRC1>;
+			assigned-clock-rates = <300000000>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				drc1_in: port@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+
+					drc1_in_be1: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&be1_out_drc1>;
+					};
+				};
+
+				drc1_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					drc1_out_tcon1: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&tcon1_in_drc1>;
+					};
 				};
 			};
 		};
@@ -926,6 +1087,11 @@
 						reg = <0>;
 						remote-endpoint = <&fe0_out_be0>;
 					};
+
+					be0_in_fe1: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&fe1_out_be0>;
+					};
 				};
 
 				be0_out: port@1 {
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index f371275..4c10123 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -158,11 +158,16 @@
 		reg = <0x68>;
 		interrupt-parent = <&nmi_intc>;
 		interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+		x-powers,drive-vbus-en;
 	};
 };
 
 #include "axp22x.dtsi"
 
+&battery_power_supply {
+	status = "okay";
+};
+
 &reg_aldo3 {
 	regulator-always-on;
 	regulator-min-microvolt = <2700000>;
@@ -226,6 +231,11 @@
 	regulator-name = "vddio-csi";
 };
 
+&reg_drivevbus {
+	regulator-name = "usb0-vbus";
+	status = "okay";
+};
+
 &reg_eldo3 {
 	regulator-min-microvolt = <1080000>;
 	regulator-max-microvolt = <1320000>;
@@ -238,12 +248,18 @@
 };
 
 &usb_otg {
-	/* otg support requires support for AXP221 usb-power-supply and GPIO */
-	dr_mode = "host";
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usb_power_supply {
 	status = "okay";
 };
 
 &usbphy {
+	usb0_id_det-gpio = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
+	usb0_vbus_power-supply = <&usb_power_supply>;
+	usb0_vbus-supply = <&reg_drivevbus>;
 	usb1_vbus-supply = <&reg_dldo1>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
index bdfdce8..51e6f1d 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
@@ -138,7 +138,7 @@
 	non-removable;
 	status = "okay";
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&r_pio>;
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
index 08e776a..eb55e74 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
@@ -144,6 +144,10 @@
 
 #include "axp209.dtsi"
 
+&ac_power_supply {
+	status = "okay";
+};
+
 &ir0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&ir0_rx_pins_a>;
@@ -172,7 +176,7 @@
 	wakeup-source;
 	status = "okay";
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&pio>;
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index ed2f35a..88a1c23 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -177,6 +177,57 @@
 };
 
 &pio {
+	gpio-line-names =
+		/* PA */
+		"ERXD3", "ERXD2", "ERXD1", "ERXD0", "ETXD3",
+			"ETXD2", "ETXD1", "ETXD0",
+		"ERXCK", "ERXERR", "ERXDV", "EMDC", "EMDIO",
+			"ETXEN", "ETXCK", "ECRS",
+		"ECOL", "ETXERR", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		/* PB */
+		"PMU-SCK", "PMU-SDA", "", "", "", "", "", "",
+		"", "USB0-DRV", "", "", "", "", "", "",
+		"", "", "", "", "SCL", "SDA", "", "",
+		"", "", "", "", "", "", "", "",
+		/* PC */
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		/* PD */
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		/* PE */
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		/* PF */
+		"SD0-D1", "SD0-D0", "SD0-CLK", "SD0-CMD", "SD0-D3",
+			"SD0-D2", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		/* PG */
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		"", "", "", "", "", "", "", "",
+		/* PH */
+		"TXD0", "RXD0", "IO-1", "PH3", "USB0-IDDET", "PH5", "", "",
+		"", "", "SD0-DET", "", "", "", "", "",
+		"", "", "", "", "IO-4", "IO-5", "", "EMAC-PWR-EN",
+		"LED1", "", "", "", "", "", "", "",
+		/* PI */
+		"", "", "", "IO-GCLK", "", "", "", "",
+		"", "", "SPI-CE0", "SPI-CLK", "SPI-MOSI",
+			"SPI-MISO", "SPI-CE1", "",
+		"IO-6", "IO-3", "IO-2", "IO-0", "", "", "", "",
+		"", "", "", "", "", "", "", "";
+
 	usb0_id_detect_pin: usb0_id_detect_pin@0 {
 		pins = "PH4";
 		function = "gpio_in";
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapro.dts b/arch/arm/boot/dts/sun7i-a20-bananapro.dts
index 83516bc..e7af1b7 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapro.dts
@@ -172,7 +172,7 @@
 	non-removable;
 	status = "okay";
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&pio>;
diff --git a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
index a2eab7a..2a50207 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
@@ -137,7 +137,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
@@ -174,13 +174,12 @@
 	status = "okay";
 };
 
-&usb_otg {
-	dr_mode = "otg";
+#include "axp209.dtsi"
+
+&ac_power_supply {
 	status = "okay";
 };
 
-#include "axp209.dtsi"
-
 &reg_dcdc2 {
 	regulator-always-on;
 	regulator-min-microvolt = <1000000>;
@@ -220,6 +219,11 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
 	pinctrl-names = "default";
 	pinctrl-0 = <&usb0_id_detect_pin>;
diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
index 102903e..bb51018 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
@@ -178,7 +178,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
@@ -195,7 +195,7 @@
 	non-removable;
 	status = "okay";
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&pio>;
@@ -336,6 +336,7 @@
 	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
 	usb0_id_det-gpios = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */
 	usb0_vbus_det-gpios = <&pio 7 22 GPIO_ACTIVE_HIGH>; /* PH22 */
+	usb0_vbus_power-supply = <&usb_power_supply>;
 	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	usb2_vbus-supply = <&reg_usb2_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
index 99c00b9..6e6264c 100644
--- a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
+++ b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
@@ -160,7 +160,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v0>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts b/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
index 4da4971..5580997 100644
--- a/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
+++ b/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
@@ -157,7 +157,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
@@ -173,7 +173,7 @@
 	non-removable;
 	status = "okay";
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&pio>;
diff --git a/arch/arm/boot/dts/sun7i-a20-icnova-swac.dts b/arch/arm/boot/dts/sun7i-a20-icnova-swac.dts
index 28d3abb..794e761 100644
--- a/arch/arm/boot/dts/sun7i-a20-icnova-swac.dts
+++ b/arch/arm/boot/dts/sun7i-a20-icnova-swac.dts
@@ -104,7 +104,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 8 5 GPIO_ACTIVE_HIGH>; /* PI5 */
diff --git a/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts b/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
index d52222c..8a8a6db 100644
--- a/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
+++ b/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
@@ -121,7 +121,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index 96bb0bc..004b6dd 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -85,10 +85,6 @@
 	};
 };
 
-&ahci_pwr_pin_a {
-	pins = "PB3";
-};
-
 &ahci {
 	target-supply = <&reg_ahci_5v>;
 	status = "okay";
@@ -319,10 +315,6 @@
 	status = "okay";
 };
 
-&usb2_vbus_pin_a {
-	pins = "PH12";
-};
-
 &usbphy {
 	pinctrl-names = "default";
 	pinctrl-0 = <&usb0_id_detect_pin>;
diff --git a/arch/arm/boot/dts/sun7i-a20-m3.dts b/arch/arm/boot/dts/sun7i-a20-m3.dts
index 86f6981..43c9478 100644
--- a/arch/arm/boot/dts/sun7i-a20-m3.dts
+++ b/arch/arm/boot/dts/sun7i-a20-m3.dts
@@ -117,7 +117,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-mk808c.dts b/arch/arm/boot/dts/sun7i-a20-mk808c.dts
index c4ee307..f741309 100644
--- a/arch/arm/boot/dts/sun7i-a20-mk808c.dts
+++ b/arch/arm/boot/dts/sun7i-a20-mk808c.dts
@@ -109,7 +109,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v0>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts b/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
index 1af5b46..64c8ef9 100644
--- a/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
@@ -187,7 +187,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
index dcd0f7a..2ce1a9f 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
@@ -130,7 +130,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
index e7d4542..097bd75 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
@@ -131,7 +131,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index def0ad8..0b7403e 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -198,7 +198,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
index f47a5c4..39bc73d 100644
--- a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
+++ b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
@@ -130,7 +130,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
index 7c96b53..777152a 100644
--- a/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
+++ b/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
@@ -106,10 +106,6 @@
 	status = "okay";
 };
 
-&ahci_pwr_pin_a {
-	pins = "PH2";
-};
-
 &codec {
 	status = "okay";
 };
@@ -160,7 +156,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts b/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts
index e19f171..f8d0aaf 100644
--- a/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts
+++ b/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts
@@ -151,7 +151,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
diff --git a/arch/arm/boot/dts/sun7i-a20-wits-pro-a20-dkt.dts b/arch/arm/boot/dts/sun7i-a20-wits-pro-a20-dkt.dts
index c3078d4..7f8405a 100644
--- a/arch/arm/boot/dts/sun7i-a20-wits-pro-a20-dkt.dts
+++ b/arch/arm/boot/dts/sun7i-a20-wits-pro-a20-dkt.dts
@@ -120,7 +120,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
@@ -137,7 +137,7 @@
 	non-removable;
 	status = "okay";
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&pio>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 93aa559..96bee77 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -1019,7 +1019,8 @@
 		};
 
 		crypto: crypto-engine@01c15000 {
-			compatible = "allwinner,sun4i-a10-crypto";
+			compatible = "allwinner,sun7i-a20-crypto",
+				     "allwinner,sun4i-a10-crypto";
 			reg = <0x01c15000 0x1000>;
 			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&ahb_gates 5>, <&ss_clk>;
@@ -1190,12 +1191,6 @@
 				bias-pull-up;
 			};
 
-			mmc0_cd_pin_reference_design: mmc0_cd_pin@0 {
-				pins = "PH1";
-				function = "gpio_in";
-				bias-pull-up;
-			};
-
 			mmc2_pins_a: mmc2@0 {
 				pins = "PC6", "PC7", "PC8",
 				       "PC9", "PC10", "PC11";
diff --git a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
index 9b620cc..433cf2a 100644
--- a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
+++ b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
@@ -88,13 +88,13 @@
 };
 
 &cpu0_opp_table {
-	opp@1104000000 {
+	opp-1104000000 {
 		opp-hz = /bits/ 64 <1104000000>;
 		opp-microvolt = <1320000>;
 		clock-latency-ns = <244144>; /* 8 32k periods */
 	};
 
-	opp@1200000000 {
+	opp-1200000000 {
 		opp-hz = /bits/ 64 <1200000000>;
 		opp-microvolt = <1320000>;
 		clock-latency-ns = <244144>; /* 8 32k periods */
@@ -196,6 +196,10 @@
 	status = "okay";
 };
 
+&battery_power_supply {
+	status = "okay";
+};
+
 &reg_aldo1 {
 	regulator-always-on;
 	regulator-min-microvolt = <3000000>;
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 0139782..2266091 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -50,73 +50,73 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@120000000 {
+		opp-120000000 {
 			opp-hz = /bits/ 64 <120000000>;
 			opp-microvolt = <1040000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@240000000 {
+		opp-240000000 {
 			opp-hz = /bits/ 64 <240000000>;
 			opp-microvolt = <1040000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@312000000 {
+		opp-312000000 {
 			opp-hz = /bits/ 64 <312000000>;
 			opp-microvolt = <1040000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@408000000 {
+		opp-408000000 {
 			opp-hz = /bits/ 64 <408000000>;
 			opp-microvolt = <1040000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@480000000 {
+		opp-480000000 {
 			opp-hz = /bits/ 64 <480000000>;
 			opp-microvolt = <1040000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@504000000 {
+		opp-504000000 {
 			opp-hz = /bits/ 64 <504000000>;
 			opp-microvolt = <1040000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@600000000 {
+		opp-600000000 {
 			opp-hz = /bits/ 64 <600000000>;
 			opp-microvolt = <1040000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@648000000 {
+		opp-648000000 {
 			opp-hz = /bits/ 64 <648000000>;
 			opp-microvolt = <1040000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@720000000 {
+		opp-720000000 {
 			opp-hz = /bits/ 64 <720000000>;
 			opp-microvolt = <1100000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@816000000 {
+		opp-816000000 {
 			opp-hz = /bits/ 64 <816000000>;
 			opp-microvolt = <1100000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@912000000 {
+		opp-912000000 {
 			opp-hz = /bits/ 64 <912000000>;
 			opp-microvolt = <1200000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
 		};
 
-		opp@1008000000 {
+		opp-1008000000 {
 			opp-hz = /bits/ 64 <1008000000>;
 			opp-microvolt = <1200000>;
 			clock-latency-ns = <244144>; /* 8 32k periods */
@@ -164,15 +164,15 @@
 	mali_opp_table: gpu-opp-table {
 		compatible = "operating-points-v2";
 
-		opp@144000000 {
+		opp-144000000 {
 			opp-hz = /bits/ 64 <144000000>;
 		};
 
-		opp@240000000 {
+		opp-240000000 {
 			opp-hz = /bits/ 64 <240000000>;
 		};
 
-		opp@384000000 {
+		opp-384000000 {
 			opp-hz = /bits/ 64 <384000000>;
 		};
 	};
diff --git a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
index 342e1d3..aecdeeb 100644
--- a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
@@ -59,6 +59,6 @@
 
 &uart0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_pins_b>;
+	pinctrl-0 = <&uart0_pb_pins>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
index 88b1e09..cff3345 100644
--- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
@@ -45,6 +45,8 @@
 /dts-v1/;
 #include "sun8i-a83t.dtsi"
 
+#include <dt-bindings/gpio/gpio.h>
+
 / {
 	model = "Cubietech Cubietruck Plus";
 	compatible = "cubietech,cubietruck-plus", "allwinner,sun8i-a83t";
@@ -56,10 +58,56 @@
 	chosen {
 		stdout-path = "serial0:115200n8";
 	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		blue {
+			label = "cubietruck-plus:blue:usr";
+			gpios = <&pio 3 25 GPIO_ACTIVE_HIGH>; /* PD25 */
+		};
+
+		orange {
+			label = "cubietruck-plus:orange:usr";
+			gpios = <&pio 3 26 GPIO_ACTIVE_HIGH>; /* PD26 */
+		};
+
+		white {
+			label = "cubietruck-plus:white:usr";
+			gpios = <&pio 3 27 GPIO_ACTIVE_HIGH>; /* PD27 */
+		};
+
+		green {
+			label = "cubietruck-plus:green:usr";
+			gpios = <&pio 4 4 GPIO_ACTIVE_HIGH>; /* PE4 */
+		};
+	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "On-board SPDIF";
+
+		simple-audio-card,cpu {
+			sound-dai = <&spdif>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&spdif_out>;
+		};
+	};
+
+	spdif_out: spdif-out {
+		#sound-dai-cells = <0>;
+		compatible = "linux,spdif-dit";
+	};
+};
+
+&spdif {
+	status = "okay";
 };
 
 &uart0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_pins_b>;
+	pinctrl-0 = <&uart0_pb_pins>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index 0ec1437..8923ba6 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -40,15 +40,22 @@
  *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  *     OTHER DEALINGS IN THE SOFTWARE.
-
  */
 
-#include "skeleton.dtsi"
-
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
+#include <dt-bindings/clock/sun8i-r-ccu.h>
+
 / {
 	interrupt-parent = <&gic>;
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	aliases {
+	};
+
+	chosen {
+	};
 
 	cpus {
 		#address-cells = <1>;
@@ -121,6 +128,7 @@
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			clock-frequency = <24000000>;
+			clock-accuracy = <50000>;
 			clock-output-names = "osc24M";
 		};
 
@@ -146,25 +154,55 @@
 		};
 	};
 
+	memory {
+		reg = <0x40000000 0x80000000>;
+		device_type = "memory";
+	};
+
 	soc {
 		compatible = "simple-bus";
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
 
-		pio: pinctrl@01c20800 {
+		syscon: syscon@1c00000 {
+			compatible = "allwinner,sun8i-a83t-system-controller",
+				"syscon";
+			reg = <0x01c00000 0x1000>;
+		};
+
+		dma: dma-controller@1c02000 {
+			compatible = "allwinner,sun8i-a83t-dma";
+			reg = <0x01c02000 0x1000>;
+			interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu 21>;
+			resets = <&ccu 7>;
+			#dma-cells = <1>;
+		};
+
+		ccu: clock@1c20000 {
+			compatible = "allwinner,sun8i-a83t-ccu";
+			reg = <0x01c20000 0x400>;
+			clocks = <&osc24M>, <&osc16Md512>;
+			clock-names = "hosc", "losc";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+		};
+
+		pio: pinctrl@1c20800 {
 			compatible = "allwinner,sun8i-a83t-pinctrl";
 			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
 			reg = <0x01c20800 0x400>;
-			clocks = <&osc24M>;
+			clocks = <&ccu 45>, <&osc24M>, <&osc16Md512>;
+			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			interrupt-controller;
 			#interrupt-cells = <3>;
 			#gpio-cells = <3>;
 
-			mmc0_pins_a: mmc0@0 {
+			mmc0_pins: mmc0-pins {
 				pins = "PF0", "PF1", "PF2",
 				       "PF3", "PF4", "PF5";
 				function = "mmc0";
@@ -172,18 +210,23 @@
 				bias-pull-up;
 			};
 
-			uart0_pins_a: uart0@0 {
-				pins = "PF2", "PF4";
+			spdif_tx_pin: spdif-tx-pin {
+				pins = "PE18";
+				function = "spdif";
+			};
+
+			uart0_pb_pins: uart0-pb-pins {
+				pins = "PB9", "PB10";
 				function = "uart0";
 			};
 
-			uart0_pins_b: uart0@1 {
-				pins = "PB9", "PB10";
+			uart0_pf_pins: uart0-pf-pins {
+				pins = "PF2", "PF4";
 				function = "uart0";
 			};
 		};
 
-		timer@01c20c00 {
+		timer@1c20c00 {
 			compatible = "allwinner,sun4i-a10-timer";
 			reg = <0x01c20c00 0xa0>;
 			interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
@@ -191,24 +234,41 @@
 			clocks = <&osc24M>;
 		};
 
-		watchdog@01c20ca0 {
+		watchdog@1c20ca0 {
 			compatible = "allwinner,sun6i-a31-wdt";
 			reg = <0x01c20ca0 0x20>;
 			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&osc24M>;
 		};
 
+		spdif: spdif@1c21000 {
+			#sound-dai-cells = <0>;
+			compatible = "allwinner,sun8i-a83t-spdif",
+				     "allwinner,sun8i-h3-spdif";
+			reg = <0x01c21000 0x400>;
+			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu 44>, <&ccu 76>;
+			resets = <&ccu 32>;
+			clock-names = "apb", "spdif";
+			dmas = <&dma 2>;
+			dma-names = "tx";
+			pinctrl-names = "default";
+			pinctrl-0 = <&spdif_tx_pin>;
+			status = "disabled";
+		};
+
 		uart0: serial@01c28000 {
 			compatible = "snps,dw-apb-uart";
 			reg = <0x01c28000 0x400>;
 			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&osc24M>;
+			clocks = <&ccu 53>;
+			resets = <&ccu 40>;
 			status = "disabled";
 		};
 
-		gic: interrupt-controller@01c81000 {
+		gic: interrupt-controller@1c81000 {
 			compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
 			reg = <0x01c81000 0x1000>,
 			      <0x01c82000 0x2000>,
@@ -218,5 +278,28 @@
 			#interrupt-cells = <3>;
 			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
 		};
+
+		r_ccu: clock@1f01400 {
+			compatible = "allwinner,sun8i-a83t-r-ccu";
+			reg = <0x01f01400 0x400>;
+			clocks = <&osc24M>, <&osc16Md512>, <&osc16M>,
+				 <&ccu 6>;
+			clock-names = "hosc", "losc", "iosc", "pll-periph";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+		};
+
+		r_pio: pinctrl@1f02c00 {
+			compatible = "allwinner,sun8i-a83t-r-pinctrl";
+			reg = <0x01f02c00 0x400>;
+			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&r_ccu CLK_APB0_PIO>, <&osc24M>,
+				 <&osc16Md512>;
+			clock-names = "apb", "hosc", "losc";
+			gpio-controller;
+			#gpio-cells = <3>;
+			interrupt-controller;
+			#interrupt-cells = <3>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
index 9e8b082..6713d0f 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
@@ -57,6 +57,7 @@
 	aliases {
 		serial0 = &uart0;
 		/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+		ethernet0 = &emac;
 		ethernet1 = &xr819;
 	};
 
@@ -103,6 +104,13 @@
 	status = "okay";
 };
 
+&emac {
+	phy-handle = <&int_mii_phy>;
+	phy-mode = "mii";
+	allwinner,leds-active-low;
+	status = "okay";
+};
+
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins_a>;
@@ -143,6 +151,19 @@
 	status = "okay";
 };
 
+&spi0 {
+	/* Disable SPI NOR by default: it optional on Orange Pi Zero boards */
+	status = "disabled";
+
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "mxicy,mx25l1606e", "winbond,w25q128";
+		reg = <0>;
+		spi-max-frequency = <40000000>;
+	};
+};
+
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
index 52acbe1..d756ff8 100644
--- a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
@@ -52,6 +52,7 @@
 	compatible = "sinovoip,bpi-m2-plus", "allwinner,sun8i-h3";
 
 	aliases {
+		ethernet0 = &emac;
 		serial0 = &uart0;
 		serial1 = &uart1;
 	};
@@ -84,6 +85,16 @@
 		};
 	};
 
+	reg_gmac_3v3: gmac-3v3 {
+		      compatible = "regulator-fixed";
+		      regulator-name = "gmac-3v3";
+		      regulator-min-microvolt = <3300000>;
+		      regulator-max-microvolt = <3300000>;
+		      startup-delay-us = <100000>;
+		      enable-active-high;
+		      gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
+	};
+
 	wifi_pwrseq: wifi_pwrseq {
 		compatible = "mmc-pwrseq-simple";
 		pinctrl-names = "default";
@@ -92,6 +103,10 @@
 	};
 };
 
+&ehci0 {
+	status = "okay";
+};
+
 &ehci1 {
 	status = "okay";
 };
@@ -100,12 +115,30 @@
 	status = "okay";
 };
 
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emac_rgmii_pins>;
+	phy-supply = <&reg_gmac_3v3>;
+	phy-handle = <&ext_rgmii_phy>;
+	phy-mode = "rgmii";
+
+	allwinner,leds-active-low;
+	status = "okay";
+};
+
 &ir {
 	pinctrl-names = "default";
 	pinctrl-0 = <&ir_pins_a>;
 	status = "okay";
 };
 
+&mdio {
+	ext_rgmii_phy: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+	};
+};
+
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
@@ -126,7 +159,7 @@
 	non-removable;
 	status = "okay";
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&pio>;
@@ -145,6 +178,10 @@
 	status = "okay";
 };
 
+&ohci0 {
+	status = "okay";
+};
+
 &ohci1 {
 	status = "okay";
 };
@@ -170,6 +207,11 @@
 	};
 };
 
+&reg_usb0_vbus {
+	gpio = <&pio 3 11 GPIO_ACTIVE_HIGH>; /* PD11 */
+	status = "okay";
+};
+
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_pins_a>;
@@ -182,7 +224,14 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
-	/* USB VBUS is on as long as VCC-IO is on */
+	usb0_id_det-gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
+	/* USB host VBUS is on as long as VCC-IO is on */
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts
similarity index 85%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts
index 3956cff..8ddd1b2 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
@@ -14,7 +16,7 @@
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -38,32 +40,25 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/dts-v1/;
-
-#include "rk1108.dtsi"
+#include "sun8i-h3-nanopi.dtsi"
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
-
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
-
-	chosen {
-		stdout-path = "serial2:1500000n8";
-	};
+	model = "FriendlyArm NanoPi M1 Plus";
+	compatible = "friendlyarm,nanopi-m1-plus", "allwinner,sun8i-h3";
 };
 
-&uart0 {
+&ehci1 {
 	status = "okay";
 };
 
-&uart1 {
+&ehci2 {
 	status = "okay";
 };
 
-&uart2 {
+&ohci1 {
+	status = "okay";
+};
+
+&ohci2 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
index 8d2cc6e..78f6c24 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
@@ -46,3 +46,10 @@
 	model = "FriendlyARM NanoPi NEO";
 	compatible = "friendlyarm,nanopi-neo", "allwinner,sun8i-h3";
 };
+
+&emac {
+	phy-handle = <&int_mii_phy>;
+	phy-mode = "mii";
+	allwinner,leds-active-low;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
index 5b6d145..17cdeae 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
@@ -54,6 +54,7 @@
 	aliases {
 		serial0 = &uart0;
 		/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+		ethernet0 = &emac;
 		ethernet1 = &rtl8189;
 	};
 
@@ -104,10 +105,26 @@
 	};
 };
 
+&codec {
+	allwinner,pa-gpios = <&pio 0 16 GPIO_ACTIVE_HIGH>; /* PA16 */
+	allwinner,audio-routing =
+		"Speaker", "LINEOUT",
+		"MIC1", "Mic",
+		"Mic",  "MBIAS";
+	status = "okay";
+};
+
 &ehci1 {
 	status = "okay";
 };
 
+&emac {
+	phy-handle = <&int_mii_phy>;
+	phy-mode = "mii";
+	allwinner,leds-active-low;
+	status = "okay";
+};
+
 &ir {
 	pinctrl-names = "default";
 	pinctrl-0 = <&ir_pins_a>;
@@ -195,10 +212,6 @@
 	status = "disabled";
 };
 
-&usb1_vbus_pin_a {
-	pins = "PG13";
-};
-
 &usbphy {
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
index 5fea430..6880268 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
@@ -52,6 +52,7 @@
 	compatible = "xunlong,orangepi-one", "allwinner,sun8i-h3";
 
 	aliases {
+		ethernet0 = &emac;
 		serial0 = &uart0;
 	};
 
@@ -97,6 +98,13 @@
 	status = "okay";
 };
 
+&emac {
+	phy-handle = <&int_mii_phy>;
+	phy-mode = "mii";
+	allwinner,leds-active-low;
+	status = "okay";
+};
+
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
index 8b93f5c..a10281b 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
@@ -53,6 +53,11 @@
 	};
 };
 
+&emac {
+	/* LEDs changed to active high on the plus */
+	/delete-property/ allwinner,leds-active-low;
+};
+
 &mmc1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc1_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
index f148111..998b60f 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
@@ -52,6 +52,7 @@
 	compatible = "xunlong,orangepi-pc", "allwinner,sun8i-h3";
 
 	aliases {
+		ethernet0 = &emac;
 		serial0 = &uart0;
 	};
 
@@ -97,6 +98,10 @@
 	status = "okay";
 };
 
+&ehci0 {
+	status = "okay";
+};
+
 &ehci1 {
 	status = "okay";
 };
@@ -109,6 +114,13 @@
 	status = "okay";
 };
 
+&emac {
+	phy-handle = <&int_mii_phy>;
+	phy-mode = "mii";
+	allwinner,leds-active-low;
+	status = "okay";
+};
+
 &ir {
 	pinctrl-names = "default";
 	pinctrl-0 = <&ir_pins_a>;
@@ -125,6 +137,10 @@
 	status = "okay";
 };
 
+&ohci0 {
+	status = "okay";
+};
+
 &ohci1 {
 	status = "okay";
 };
@@ -156,6 +172,11 @@
 	};
 };
 
+&reg_usb0_vbus {
+	gpio = <&r_pio 0 2 GPIO_ACTIVE_HIGH>; /* PL2 */
+	status = "okay";
+};
+
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_pins_a>;
@@ -180,7 +201,14 @@
 	status = "disabled";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
-	/* USB VBUS is always on */
+	usb0_id_det-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
+	/* VBUS on USB host ports are always on */
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
index 8c40ab7..331ed68 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
@@ -47,6 +47,20 @@
 	model = "Xunlong Orange Pi Plus / Plus 2";
 	compatible = "xunlong,orangepi-plus", "allwinner,sun8i-h3";
 
+	aliases {
+		ethernet0 = &emac;
+	};
+
+	reg_gmac_3v3: gmac-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "gmac-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <100000>;
+		enable-active-high;
+		gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
+	};
+
 	reg_usb3_vbus: usb3-vbus {
 		compatible = "regulator-fixed";
 		pinctrl-names = "default";
@@ -64,6 +78,24 @@
 	status = "okay";
 };
 
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emac_rgmii_pins>;
+	phy-supply = <&reg_gmac_3v3>;
+	phy-handle = <&ext_rgmii_phy>;
+	phy-mode = "rgmii";
+
+	allwinner,leds-active-low;
+	status = "okay";
+};
+
+&mdio {
+	ext_rgmii_phy: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+	};
+};
+
 &mmc2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc2_8bit_pins>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
index 5851a47..80026f3 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
@@ -50,4 +50,30 @@
 / {
 	model = "Xunlong Orange Pi Plus 2E";
 	compatible = "xunlong,orangepi-plus2e", "allwinner,sun8i-h3";
+
+	reg_gmac_3v3: gmac-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "gmac-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <100000>;
+		enable-active-high;
+		gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>; /* PD6 */
+	};
+};
+
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emac_rgmii_pins>;
+	phy-supply = <&reg_gmac_3v3>;
+	phy-handle = <&ext_rgmii_phy>;
+	phy-mode = "rgmii";
+	status = "okay";
+};
+
+&mdio {
+	ext_rgmii_phy: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
 };
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
similarity index 68%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
index 3956cff..d131109 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
@@ -1,4 +1,6 @@
 /*
+ * Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
@@ -14,7 +16,7 @@
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -38,32 +40,57 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/dts-v1/;
+#include "sun8i-v3s-licheepi-zero.dts"
 
-#include "rk1108.dtsi"
+#include <dt-bindings/input/input.h>
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
+	model = "Lichee Pi Zero with Dock";
+	compatible = "licheepi,licheepi-zero-dock", "licheepi,licheepi-zero",
+		     "allwinner,sun8i-v3s";
 
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
-
-	chosen {
-		stdout-path = "serial2:1500000n8";
+	leds {
+		/* The LEDs use PG0~2 pins, which conflict with MMC1 */
+		status = "disbaled";
 	};
 };
 
-&uart0 {
+&mmc1 {
+	broken-cd;
+	bus-width = <4>;
+	vmmc-supply = <&reg_vcc3v3>;
 	status = "okay";
 };
 
-&uart1 {
+&lradc {
+	vref-supply = <&reg_vcc3v0>;
 	status = "okay";
-};
 
-&uart2 {
-	status = "okay";
+	button@200 {
+		label = "Volume Up";
+		linux,code = <KEY_VOLUMEUP>;
+		channel = <0>;
+		voltage = <200000>;
+	};
+
+	button@400 {
+		label = "Volume Down";
+		linux,code = <KEY_VOLUMEDOWN>;
+		channel = <0>;
+		voltage = <400000>;
+	};
+
+	button@600 {
+		label = "Select";
+		linux,code = <KEY_SELECT>;
+		channel = <0>;
+		voltage = <600000>;
+	};
+
+	button@800 {
+		label = "Start";
+		linux,code = <KEY_OK>;
+		channel = <0>;
+		voltage = <800000>;
+	};
 };
diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
index 7107596..3a06dc5 100644
--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
@@ -41,6 +41,8 @@
  */
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/sun8i-v3s-ccu.h>
+#include <dt-bindings/reset/sun8i-v3s-ccu.h>
 
 / {
 	#address-cells = <1>;
@@ -55,10 +57,16 @@
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <0>;
-			clocks = <&ccu 14>;
+			clocks = <&ccu CLK_CPU>;
 		};
 	};
 
+	de: display-engine {
+		compatible = "allwinner,sun8i-v3s-display-engine";
+		allwinner,pipelines = <&mixer0>;
+		status = "disabled";
+	};
+
 	timer {
 		compatible = "arm,armv7-timer";
 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
@@ -93,18 +101,95 @@
 		#size-cells = <1>;
 		ranges;
 
+		display_clocks: clock@1000000 {
+			compatible = "allwinner,sun8i-v3s-de2-clk";
+			reg = <0x01000000 0x100000>;
+			clocks = <&ccu CLK_DE>,
+				 <&ccu CLK_BUS_DE>;
+			clock-names = "mod",
+				      "bus";
+			resets = <&ccu RST_BUS_DE>;
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+		};
+
+		mixer0: mixer@1100000 {
+			compatible = "allwinner,sun8i-v3s-de2-mixer";
+			reg = <0x01100000 0x100000>;
+			clocks = <&display_clocks 0>,
+				 <&display_clocks 6>;
+			clock-names = "bus",
+				      "mod";
+			resets = <&display_clocks 0>;
+			assigned-clocks = <&display_clocks 6>;
+			assigned-clock-rates = <150000000>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				mixer0_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					mixer0_out_tcon0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&tcon0_in_mixer0>;
+					};
+				};
+			};
+		};
+
+		tcon0: lcd-controller@1c0c000 {
+			compatible = "allwinner,sun8i-v3s-tcon";
+			reg = <0x01c0c000 0x1000>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_TCON0>,
+				 <&ccu CLK_TCON0>;
+			clock-names = "ahb",
+				      "tcon-ch0";
+			clock-output-names = "tcon-pixel-clock";
+			resets = <&ccu RST_BUS_TCON0>;
+			reset-names = "lcd";
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				tcon0_in: port@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+
+					tcon0_in_mixer0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&mixer0_out_tcon0>;
+					};
+				};
+
+				tcon0_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+				};
+			};
+		};
+
+
 		mmc0: mmc@01c0f000 {
 			compatible = "allwinner,sun7i-a20-mmc";
 			reg = <0x01c0f000 0x1000>;
-			clocks = <&ccu 22>,
-				 <&ccu 45>,
-				 <&ccu 47>,
-				 <&ccu 46>;
+			clocks = <&ccu CLK_BUS_MMC0>,
+				 <&ccu CLK_MMC0>,
+				 <&ccu CLK_MMC0_OUTPUT>,
+				 <&ccu CLK_MMC0_SAMPLE>;
 			clock-names = "ahb",
 				      "mmc",
 				      "output",
 				      "sample";
-			resets = <&ccu 7>;
+			resets = <&ccu RST_BUS_MMC0>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
@@ -115,17 +200,19 @@
 		mmc1: mmc@01c10000 {
 			compatible = "allwinner,sun7i-a20-mmc";
 			reg = <0x01c10000 0x1000>;
-			clocks = <&ccu 23>,
-				 <&ccu 48>,
-				 <&ccu 50>,
-				 <&ccu 49>;
+			clocks = <&ccu CLK_BUS_MMC1>,
+				 <&ccu CLK_MMC1>,
+				 <&ccu CLK_MMC1_OUTPUT>,
+				 <&ccu CLK_MMC1_SAMPLE>;
 			clock-names = "ahb",
 				      "mmc",
 				      "output",
 				      "sample";
-			resets = <&ccu 8>;
+			resets = <&ccu RST_BUS_MMC1>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&mmc1_pins>;
 			status = "disabled";
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -134,15 +221,15 @@
 		mmc2: mmc@01c11000 {
 			compatible = "allwinner,sun7i-a20-mmc";
 			reg = <0x01c11000 0x1000>;
-			clocks = <&ccu 24>,
-				 <&ccu 51>,
-				 <&ccu 53>,
-				 <&ccu 52>;
+			clocks = <&ccu CLK_BUS_MMC2>,
+				 <&ccu CLK_MMC2>,
+				 <&ccu CLK_MMC2_OUTPUT>,
+				 <&ccu CLK_MMC2_SAMPLE>;
 			clock-names = "ahb",
 				      "mmc",
 				      "output",
 				      "sample";
-			resets = <&ccu 9>;
+			resets = <&ccu RST_BUS_MMC2>;
 			reset-names = "ahb";
 			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
@@ -153,8 +240,8 @@
 		usb_otg: usb@01c19000 {
 			compatible = "allwinner,sun8i-h3-musb";
 			reg = <0x01c19000 0x0400>;
-			clocks = <&ccu 29>;
-			resets = <&ccu 17>;
+			clocks = <&ccu CLK_BUS_OTG>;
+			resets = <&ccu RST_BUS_OTG>;
 			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "mc";
 			phys = <&usbphy 0>;
@@ -169,9 +256,9 @@
 			      <0x01c1a800 0x4>;
 			reg-names = "phy_ctrl",
 				    "pmu0";
-			clocks = <&ccu 56>;
+			clocks = <&ccu CLK_USB_PHY0>;
 			clock-names = "usb0_phy";
-			resets = <&ccu 0>;
+			resets = <&ccu RST_USB_PHY0>;
 			reset-names = "usb0_reset";
 			status = "disabled";
 			#phy-cells = <1>;
@@ -198,7 +285,7 @@
 			reg = <0x01c20800 0x400>;
 			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu 37>, <&osc24M>, <&osc32k>;
+			clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&osc32k>;
 			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			#gpio-cells = <3>;
@@ -222,6 +309,19 @@
 				drive-strength = <30>;
 				bias-pull-up;
 			};
+
+			mmc1_pins: mmc1 {
+				pins = "PG0", "PG1", "PG2", "PG3",
+				       "PG4", "PG5";
+				function = "mmc1";
+				drive-strength = <30>;
+				bias-pull-up;
+			};
+
+			spi0_pins: spi0 {
+				pins = "PC0", "PC1", "PC2", "PC3";
+				function = "spi0";
+			};
 		};
 
 		timer@01c20c00 {
@@ -238,14 +338,21 @@
 			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		lradc: lradc@1c22800 {
+			compatible = "allwinner,sun4i-a10-lradc-keys";
+			reg = <0x01c22800 0x400>;
+			interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
 		uart0: serial@01c28000 {
 			compatible = "snps,dw-apb-uart";
 			reg = <0x01c28000 0x400>;
 			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&ccu 40>;
-			resets = <&ccu 49>;
+			clocks = <&ccu CLK_BUS_UART0>;
+			resets = <&ccu RST_BUS_UART0>;
 			status = "disabled";
 		};
 
@@ -255,8 +362,8 @@
 			interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&ccu 41>;
-			resets = <&ccu 50>;
+			clocks = <&ccu CLK_BUS_UART1>;
+			resets = <&ccu RST_BUS_UART1>;
 			status = "disabled";
 		};
 
@@ -266,8 +373,8 @@
 			interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&ccu 42>;
-			resets = <&ccu 51>;
+			clocks = <&ccu CLK_BUS_UART2>;
+			resets = <&ccu RST_BUS_UART2>;
 			status = "disabled";
 		};
 
@@ -275,8 +382,8 @@
 			compatible = "allwinner,sun6i-a31-i2c";
 			reg = <0x01c2ac00 0x400>;
 			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu 38>;
-			resets = <&ccu 46>;
+			clocks = <&ccu CLK_BUS_I2C0>;
+			resets = <&ccu RST_BUS_I2C0>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&i2c0_pins>;
 			status = "disabled";
@@ -288,8 +395,22 @@
 			compatible = "allwinner,sun6i-a31-i2c";
 			reg = <0x01c2b000 0x400>;
 			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu 39>;
-			resets = <&ccu 47>;
+			clocks = <&ccu CLK_BUS_I2C1>;
+			resets = <&ccu RST_BUS_I2C1>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		spi0: spi@1c68000 {
+			compatible = "allwinner,sun8i-h3-spi";
+			reg = <0x01c68000 0x1000>;
+			interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+			clock-names = "ahb", "mod";
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi0_pins>;
+			resets = <&ccu RST_BUS_SPI0>;
 			status = "disabled";
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm/boot/dts/sunxi-common-regulators.dtsi b/arch/arm/boot/dts/sunxi-common-regulators.dtsi
index ce5c53e..d8e5826f 100644
--- a/arch/arm/boot/dts/sunxi-common-regulators.dtsi
+++ b/arch/arm/boot/dts/sunxi-common-regulators.dtsi
@@ -44,33 +44,9 @@
 
 #include <dt-bindings/gpio/gpio.h>
 
-&pio {
-	ahci_pwr_pin_a: ahci_pwr_pin@0 {
-		pins = "PB8";
-		function = "gpio_out";
-	};
-
-	usb0_vbus_pin_a: usb0_vbus_pin@0 {
-		pins = "PB9";
-		function = "gpio_out";
-	};
-
-	usb1_vbus_pin_a: usb1_vbus_pin@0 {
-		pins = "PH6";
-		function = "gpio_out";
-	};
-
-	usb2_vbus_pin_a: usb2_vbus_pin@0 {
-		pins = "PH3";
-		function = "gpio_out";
-	};
-};
-
 / {
 	reg_ahci_5v: ahci-5v {
 		compatible = "regulator-fixed";
-		pinctrl-names = "default";
-		pinctrl-0 = <&ahci_pwr_pin_a>;
 		regulator-name = "ahci-5v";
 		regulator-min-microvolt = <5000000>;
 		regulator-max-microvolt = <5000000>;
@@ -82,8 +58,6 @@
 
 	reg_usb0_vbus: usb0-vbus {
 		compatible = "regulator-fixed";
-		pinctrl-names = "default";
-		pinctrl-0 = <&usb0_vbus_pin_a>;
 		regulator-name = "usb0-vbus";
 		regulator-min-microvolt = <5000000>;
 		regulator-max-microvolt = <5000000>;
@@ -94,8 +68,6 @@
 
 	reg_usb1_vbus: usb1-vbus {
 		compatible = "regulator-fixed";
-		pinctrl-names = "default";
-		pinctrl-0 = <&usb1_vbus_pin_a>;
 		regulator-name = "usb1-vbus";
 		regulator-min-microvolt = <5000000>;
 		regulator-max-microvolt = <5000000>;
@@ -107,8 +79,6 @@
 
 	reg_usb2_vbus: usb2-vbus {
 		compatible = "regulator-fixed";
-		pinctrl-names = "default";
-		pinctrl-0 = <&usb2_vbus_pin_a>;
 		regulator-name = "usb2-vbus";
 		regulator-min-microvolt = <5000000>;
 		regulator-max-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index 1aeeacb..6f21626 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -41,8 +41,10 @@
  */
 
 #include <dt-bindings/clock/sun8i-h3-ccu.h>
+#include <dt-bindings/clock/sun8i-r-ccu.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/reset/sun8i-h3-ccu.h>
+#include <dt-bindings/reset/sun8i-r-ccu.h>
 
 / {
 	interrupt-parent = <&gic>;
@@ -83,6 +85,12 @@
 		#size-cells = <1>;
 		ranges;
 
+		syscon: syscon@1c00000 {
+			compatible = "allwinner,sun8i-h3-system-controller",
+				"syscon";
+			reg = <0x01c00000 0x1000>;
+		};
+
 		dma: dma-controller@01c02000 {
 			compatible = "allwinner,sun8i-h3-dma";
 			reg = <0x01c02000 0x1000>;
@@ -279,6 +287,14 @@
 			interrupt-controller;
 			#interrupt-cells = <3>;
 
+			emac_rgmii_pins: emac0 {
+				pins = "PD0", "PD1", "PD2", "PD3", "PD4",
+				       "PD5", "PD7", "PD8", "PD9", "PD10",
+				       "PD12", "PD13", "PD15", "PD16", "PD17";
+				function = "emac";
+				drive-strength = <40>;
+			};
+
 			i2c0_pins: i2c0 {
 				pins = "PA11", "PA12";
 				function = "i2c0";
@@ -375,6 +391,32 @@
 			clocks = <&osc24M>;
 		};
 
+		emac: ethernet@1c30000 {
+			compatible = "allwinner,sun8i-h3-emac";
+			syscon = <&syscon>;
+			reg = <0x01c30000 0x104>;
+			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "macirq";
+			resets = <&ccu RST_BUS_EMAC>;
+			reset-names = "stmmaceth";
+			clocks = <&ccu CLK_BUS_EMAC>;
+			clock-names = "stmmaceth";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			mdio: mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				int_mii_phy: ethernet-phy@1 {
+					compatible = "ethernet-phy-ieee802.3-c22";
+					reg = <1>;
+					clocks = <&ccu CLK_BUS_EPHY>;
+					resets = <&ccu RST_BUS_EPHY>;
+				};
+			};
+		};
+
 		spi0: spi@01c68000 {
 			compatible = "allwinner,sun8i-h3-spi";
 			reg = <0x01c68000 0x1000>;
@@ -558,10 +600,11 @@
 		};
 
 		r_ccu: clock@1f01400 {
-			compatible = "allwinner,sun50i-a64-r-ccu";
+			compatible = "allwinner,sun8i-h3-r-ccu";
 			reg = <0x01f01400 0x100>;
-			clocks = <&osc24M>, <&osc32k>, <&iosc>;
-			clock-names = "hosc", "losc", "iosc";
+			clocks = <&osc24M>, <&osc32k>, <&iosc>,
+				 <&ccu 9>;
+			clock-names = "hosc", "losc", "iosc", "pll-periph";
 			#clock-cells = <1>;
 			#reset-cells = <1>;
 		};
@@ -573,9 +616,9 @@
 
 		ir: ir@01f02000 {
 			compatible = "allwinner,sun5i-a13-ir";
-			clocks = <&r_ccu 4>, <&r_ccu 11>;
+			clocks = <&r_ccu CLK_APB0_IR>, <&r_ccu CLK_IR>;
 			clock-names = "apb", "ir";
-			resets = <&r_ccu 0>;
+			resets = <&r_ccu RST_APB0_IR>;
 			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
 			reg = <0x01f02000 0x40>;
 			status = "disabled";
@@ -585,7 +628,7 @@
 			compatible = "allwinner,sun8i-h3-r-pinctrl";
 			reg = <0x01f02c00 0x400>;
 			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&r_ccu 3>, <&osc24M>, <&osc32k>;
+			clocks = <&r_ccu CLK_APB0_PIO>, <&osc24M>, <&osc32k>;
 			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			#gpio-cells = <3>;
diff --git a/arch/arm/boot/dts/tegra124-apalis-eval.dts b/arch/arm/boot/dts/tegra124-apalis-eval.dts
index 5b860ad..ecffcd1 100644
--- a/arch/arm/boot/dts/tegra124-apalis-eval.dts
+++ b/arch/arm/boot/dts/tegra124-apalis-eval.dts
@@ -63,7 +63,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	pcie-controller@01003000 {
+	pcie@1003000 {
 		pci@1,0 {
 			status = "okay";
 		};
diff --git a/arch/arm/boot/dts/tegra124-apalis.dtsi b/arch/arm/boot/dts/tegra124-apalis.dtsi
index f9e623b..5d9b18e 100644
--- a/arch/arm/boot/dts/tegra124-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra124-apalis.dtsi
@@ -54,7 +54,7 @@
 		reg = <0x0 0x80000000 0x0 0x80000000>;
 	};
 
-	pcie-controller@01003000 {
+	pcie@1003000 {
 		status = "okay";
 
 		avddio-pex-supply = <&vdd_1v05>;
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 53994f9..7bacb29 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -27,7 +27,7 @@
 		reg = <0x0 0x80000000 0x0 0x80000000>;
 	};
 
-	pcie-controller@01003000 {
+	pcie@1003000 {
 		status = "okay";
 
 		avddio-pex-supply = <&vdd_1v05_run>;
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 187a36c..1b10b14 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -14,7 +14,7 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
-	pcie-controller@01003000 {
+	pcie@1003000 {
 		compatible = "nvidia,tegra124-pcie";
 		device_type = "pci";
 		reg = <0x0 0x01003000 0x0 0x00000800   /* PADS registers */
@@ -54,6 +54,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82000800 0 0x01000000 0 0x1000>;
 			reg = <0x000800 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
@@ -67,6 +68,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82001000 0 0x01001000 0 0x1000>;
 			reg = <0x001000 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts
index d4fb4d3..4174969 100644
--- a/arch/arm/boot/dts/tegra20-harmony.dts
+++ b/arch/arm/boot/dts/tegra20-harmony.dts
@@ -566,7 +566,7 @@
 		nvidia,sys-clock-req-active-high;
 	};
 
-	pcie-controller@80003000 {
+	pcie@80003000 {
 		status = "okay";
 
 		avdd-pex-supply = <&pci_vdd_reg>;
diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
index 27d2bbb..7361f4a 100644
--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
+++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
@@ -478,7 +478,7 @@
 		nvidia,sys-clock-req-active-high;
 	};
 
-	pcie-controller@80003000 {
+	pcie@80003000 {
 		avdd-pex-supply = <&pci_vdd_reg>;
 		vdd-pex-supply = <&pci_vdd_reg>;
 		avdd-pex-pll-supply = <&pci_vdd_reg>;
diff --git a/arch/arm/boot/dts/tegra20-tec.dts b/arch/arm/boot/dts/tegra20-tec.dts
index c12d8be..9cb534f 100644
--- a/arch/arm/boot/dts/tegra20-tec.dts
+++ b/arch/arm/boot/dts/tegra20-tec.dts
@@ -32,7 +32,7 @@
 		};
 	};
 
-	pcie-controller@80003000 {
+	pcie@80003000 {
 		status = "okay";
 
 		pci@1,0 {
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index 87b07fb..b902ab5 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -321,7 +321,7 @@
 		nvidia,sys-clock-req-active-high;
 	};
 
-	pcie-controller@80003000 {
+	pcie@80003000 {
 		status = "okay";
 
 		avdd-pex-supply = <&pci_vdd_reg>;
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
deleted file mode 100644
index 1e06f85..0000000
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ /dev/null
@@ -1,636 +0,0 @@
-/dts-v1/;
-
-#include <dt-bindings/input/input.h>
-#include "tegra20.dtsi"
-
-/ {
-	model = "NVIDIA Tegra20 Whistler evaluation board";
-	compatible = "nvidia,whistler", "nvidia,tegra20";
-
-	aliases {
-		rtc0 = "/i2c@7000d000/max8907@3c";
-		rtc1 = "/rtc@7000e000";
-		serial0 = &uarta;
-	};
-
-	chosen {
-		stdout-path = "serial0:115200n8";
-	};
-
-	memory {
-		reg = <0x00000000 0x20000000>;
-	};
-
-	host1x@50000000 {
-		hdmi@54280000 {
-			status = "okay";
-
-			vdd-supply = <&hdmi_vdd_reg>;
-			pll-supply = <&hdmi_pll_reg>;
-
-			nvidia,ddc-i2c-bus = <&hdmi_ddc>;
-			nvidia,hpd-gpio = <&gpio TEGRA_GPIO(N, 7)
-				GPIO_ACTIVE_HIGH>;
-		};
-	};
-
-	pinmux@70000014 {
-		pinctrl-names = "default";
-		pinctrl-0 = <&state_default>;
-
-		state_default: pinmux {
-			ata {
-				nvidia,pins = "ata", "atb", "ate", "gma", "gmb",
-					"gmc", "gmd", "gpu";
-				nvidia,function = "gmi";
-			};
-			atc {
-				nvidia,pins = "atc", "atd";
-				nvidia,function = "sdio4";
-			};
-			cdev1 {
-				nvidia,pins = "cdev1";
-				nvidia,function = "plla_out";
-			};
-			cdev2 {
-				nvidia,pins = "cdev2";
-				nvidia,function = "osc";
-			};
-			crtp {
-				nvidia,pins = "crtp";
-				nvidia,function = "crt";
-			};
-			csus {
-				nvidia,pins = "csus";
-				nvidia,function = "vi_sensor_clk";
-			};
-			dap1 {
-				nvidia,pins = "dap1";
-				nvidia,function = "dap1";
-			};
-			dap2 {
-				nvidia,pins = "dap2";
-				nvidia,function = "dap2";
-			};
-			dap3 {
-				nvidia,pins = "dap3";
-				nvidia,function = "dap3";
-			};
-			dap4 {
-				nvidia,pins = "dap4";
-				nvidia,function = "dap4";
-			};
-			ddc {
-				nvidia,pins = "ddc";
-				nvidia,function = "i2c2";
-			};
-			dta {
-				nvidia,pins = "dta", "dtb", "dtc", "dtd";
-				nvidia,function = "vi";
-			};
-			dte {
-				nvidia,pins = "dte";
-				nvidia,function = "rsvd1";
-			};
-			dtf {
-				nvidia,pins = "dtf";
-				nvidia,function = "i2c3";
-			};
-			gme {
-				nvidia,pins = "gme";
-				nvidia,function = "dap5";
-			};
-			gpu7 {
-				nvidia,pins = "gpu7";
-				nvidia,function = "rtck";
-			};
-			gpv {
-				nvidia,pins = "gpv";
-				nvidia,function = "pcie";
-			};
-			hdint {
-				nvidia,pins = "hdint", "pta";
-				nvidia,function = "hdmi";
-			};
-			i2cp {
-				nvidia,pins = "i2cp";
-				nvidia,function = "i2cp";
-			};
-			irrx {
-				nvidia,pins = "irrx", "irtx";
-				nvidia,function = "uartb";
-			};
-			kbca {
-				nvidia,pins = "kbca", "kbcc", "kbce", "kbcf";
-				nvidia,function = "kbc";
-			};
-			kbcb {
-				nvidia,pins = "kbcb", "kbcd";
-				nvidia,function = "sdio2";
-			};
-			lcsn {
-				nvidia,pins = "lcsn", "lsck", "lsda", "lsdi",
-					"spia", "spib", "spic";
-				nvidia,function = "spi3";
-			};
-			ld0 {
-				nvidia,pins = "ld0", "ld1", "ld2", "ld3", "ld4",
-					"ld5", "ld6", "ld7", "ld8", "ld9",
-					"ld10", "ld11", "ld12", "ld13", "ld14",
-					"ld15", "ld16", "ld17", "ldc", "ldi",
-					"lhp0", "lhp1", "lhp2", "lhs", "lm0",
-					"lm1", "lpp", "lpw0", "lpw1", "lpw2",
-					"lsc0", "lsc1", "lspi", "lvp0", "lvp1",
-					"lvs";
-				nvidia,function = "displaya";
-			};
-			owc {
-				nvidia,pins = "owc", "uac";
-				nvidia,function = "owr";
-			};
-			pmc {
-				nvidia,pins = "pmc";
-				nvidia,function = "pwr_on";
-			};
-			rm {
-				nvidia,pins = "rm";
-				nvidia,function = "i2c1";
-			};
-			sdb {
-				nvidia,pins = "sdb", "sdc", "sdd", "slxa",
-					"slxc", "slxd", "slxk";
-				nvidia,function = "sdio3";
-			};
-			sdio1 {
-				nvidia,pins = "sdio1";
-				nvidia,function = "sdio1";
-			};
-			spdi {
-				nvidia,pins = "spdi", "spdo";
-				nvidia,function = "rsvd2";
-			};
-			spid {
-				nvidia,pins = "spid", "spie", "spig", "spih";
-				nvidia,function = "spi2_alt";
-			};
-			spif {
-				nvidia,pins = "spif";
-				nvidia,function = "spi2";
-			};
-			uaa {
-				nvidia,pins = "uaa", "uab";
-				nvidia,function = "uarta";
-			};
-			uad {
-				nvidia,pins = "uad";
-				nvidia,function = "irda";
-			};
-			uca {
-				nvidia,pins = "uca", "ucb";
-				nvidia,function = "uartc";
-			};
-			uda {
-				nvidia,pins = "uda";
-				nvidia,function = "spi1";
-			};
-			conf_ata {
-				nvidia,pins = "ata", "atb", "atc", "ddc", "gma",
-					"gmb", "gmc", "gmd", "irrx", "irtx",
-					"kbca", "kbcb", "kbcc", "kbcd", "kbce",
-					"kbcf", "sdc", "sdd", "spie", "spig",
-					"spih", "uaa", "uab", "uad", "uca",
-					"ucb";
-				nvidia,pull = <TEGRA_PIN_PULL_UP>;
-				nvidia,tristate = <TEGRA_PIN_DISABLE>;
-			};
-			conf_atd {
-				nvidia,pins = "atd", "ate", "cdev1", "csus",
-					"dap1", "dap2", "dap3", "dap4", "dte",
-					"dtf", "gpu", "gpu7", "gpv", "i2cp",
-					"rm", "sdio1", "slxa", "slxc", "slxd",
-					"slxk", "spdi", "spdo", "uac", "uda";
-				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
-				nvidia,tristate = <TEGRA_PIN_DISABLE>;
-			};
-			conf_cdev2 {
-				nvidia,pins = "cdev2", "spia", "spib";
-				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
-				nvidia,tristate = <TEGRA_PIN_ENABLE>;
-			};
-			conf_ck32 {
-				nvidia,pins = "ck32", "ddrc", "lc", "pmca",
-					"pmcb", "pmcc", "pmcd", "xm2c",
-					"xm2d";
-				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
-			};
-			conf_crtp {
-				nvidia,pins = "crtp";
-				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
-				nvidia,tristate = <TEGRA_PIN_ENABLE>;
-			};
-			conf_dta {
-				nvidia,pins = "dta", "dtb", "dtc", "dtd",
-					"spid", "spif";
-				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
-				nvidia,tristate = <TEGRA_PIN_DISABLE>;
-			};
-			conf_gme {
-				nvidia,pins = "gme", "owc", "pta", "spic";
-				nvidia,pull = <TEGRA_PIN_PULL_UP>;
-				nvidia,tristate = <TEGRA_PIN_ENABLE>;
-			};
-			conf_ld17_0 {
-				nvidia,pins = "ld17_0", "ld19_18", "ld21_20",
-					"ld23_22";
-				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
-			};
-			conf_ls {
-				nvidia,pins = "ls", "pmce";
-				nvidia,pull = <TEGRA_PIN_PULL_UP>;
-			};
-			drive_dap1 {
-				nvidia,pins = "drive_dap1";
-				nvidia,high-speed-mode = <TEGRA_PIN_DISABLE>;
-				nvidia,schmitt = <TEGRA_PIN_ENABLE>;
-				nvidia,low-power-mode = <TEGRA_PIN_LP_DRIVE_DIV_8>;
-				nvidia,pull-down-strength = <0>;
-				nvidia,pull-up-strength = <0>;
-				nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
-				nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
-			};
-		};
-	};
-
-	i2s@70002800 {
-		status = "okay";
-	};
-
-	serial@70006000 {
-		status = "okay";
-	};
-
-	hdmi_ddc: i2c@7000c400 {
-		status = "okay";
-		clock-frequency = <100000>;
-	};
-
-	i2c@7000d000 {
-		status = "okay";
-		clock-frequency = <100000>;
-
-		codec: codec@1a {
-			compatible = "wlf,wm8753";
-			reg = <0x1a>;
-		};
-
-		tca6416: gpio@20 {
-			compatible = "ti,tca6416";
-			reg = <0x20>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		max8907@3c {
-			compatible = "maxim,max8907";
-			reg = <0x3c>;
-			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
-
-			maxim,system-power-controller;
-
-			mbatt-supply = <&usb0_vbus_reg>;
-			in-v1-supply = <&mbatt_reg>;
-			in-v2-supply = <&mbatt_reg>;
-			in-v3-supply = <&mbatt_reg>;
-			in1-supply = <&mbatt_reg>;
-			in2-supply = <&nvvdd_sv3_reg>;
-			in3-supply = <&mbatt_reg>;
-			in4-supply = <&mbatt_reg>;
-			in5-supply = <&mbatt_reg>;
-			in6-supply = <&mbatt_reg>;
-			in7-supply = <&mbatt_reg>;
-			in8-supply = <&mbatt_reg>;
-			in9-supply = <&mbatt_reg>;
-			in10-supply = <&mbatt_reg>;
-			in11-supply = <&mbatt_reg>;
-			in12-supply = <&mbatt_reg>;
-			in13-supply = <&mbatt_reg>;
-			in14-supply = <&mbatt_reg>;
-			in15-supply = <&mbatt_reg>;
-			in16-supply = <&mbatt_reg>;
-			in17-supply = <&nvvdd_sv3_reg>;
-			in18-supply = <&nvvdd_sv3_reg>;
-			in19-supply = <&mbatt_reg>;
-			in20-supply = <&mbatt_reg>;
-
-			regulators {
-				mbatt_reg: mbatt {
-					regulator-name = "vbat_pmu";
-					regulator-always-on;
-				};
-
-				sd1 {
-					regulator-name = "nvvdd_sv1,vdd_cpu_pmu";
-					regulator-min-microvolt = <1000000>;
-					regulator-max-microvolt = <1000000>;
-					regulator-always-on;
-				};
-
-				sd2 {
-					regulator-name = "nvvdd_sv2,vdd_core";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
-					regulator-always-on;
-				};
-
-				nvvdd_sv3_reg: sd3 {
-					regulator-name = "nvvdd_sv3";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-always-on;
-				};
-
-				ldo1 {
-					regulator-name = "nvvdd_ldo1,vddio_rx_ddr,vcore_acc";
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-					regulator-always-on;
-				};
-
-				ldo2 {
-					regulator-name = "nvvdd_ldo2,avdd_pll*";
-					regulator-min-microvolt = <1100000>;
-					regulator-max-microvolt = <1100000>;
-					regulator-always-on;
-				};
-
-				ldo3 {
-					regulator-name = "nvvdd_ldo3,vcom_1v8b";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-always-on;
-				};
-
-				ldo4 {
-					regulator-name = "nvvdd_ldo4,avdd_usb*";
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-					regulator-always-on;
-				};
-
-				ldo5 {
-					regulator-name = "nvvdd_ldo5,vcore_mmc,avdd_lcd1,vddio_1wire";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-					regulator-always-on;
-				};
-
-				hdmi_pll_reg: ldo6 {
-					regulator-name = "nvvdd_ldo6,avdd_hdmi_pll";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-
-				ldo7 {
-					regulator-name = "nvvdd_ldo7,avddio_audio";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-					regulator-always-on;
-				};
-
-				ldo8 {
-					regulator-name = "nvvdd_ldo8,vcom_3v0,vcore_cmps";
-					regulator-min-microvolt = <3000000>;
-					regulator-max-microvolt = <3000000>;
-				};
-
-				ldo9 {
-					regulator-name = "nvvdd_ldo9,avdd_cam*";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-				};
-
-				ldo10 {
-					regulator-name = "nvvdd_ldo10,avdd_usb_ic_3v0";
-					regulator-min-microvolt = <3000000>;
-					regulator-max-microvolt = <3000000>;
-					regulator-always-on;
-				};
-
-				hdmi_vdd_reg: ldo11 {
-					regulator-name = "nvvdd_ldo11,vddio_pex_clk,vcom_33,avdd_hdmi";
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-				};
-
-				ldo12 {
-					regulator-name = "nvvdd_ldo12,vddio_sdio";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-					regulator-always-on;
-				};
-
-				ldo13 {
-					regulator-name = "nvvdd_ldo13,vcore_phtn,vdd_af";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-				};
-
-				ldo14 {
-					regulator-name = "nvvdd_ldo14,avdd_vdac";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-				};
-
-				ldo15 {
-					regulator-name = "nvvdd_ldo15,vcore_temp,vddio_hdcp";
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-				};
-
-				ldo16 {
-					regulator-name = "nvvdd_ldo16,vdd_dbrtr";
-					regulator-min-microvolt = <1300000>;
-					regulator-max-microvolt = <1300000>;
-				};
-
-				ldo17 {
-					regulator-name = "nvvdd_ldo17,vddio_mipi";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
-				};
-
-				ldo18 {
-					regulator-name = "nvvdd_ldo18,vddio_vi,vcore_cam*";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-
-				ldo19 {
-					regulator-name = "nvvdd_ldo19,avdd_lcd2,vddio_lx";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-				};
-
-				ldo20 {
-					regulator-name = "nvvdd_ldo20,vddio_ddr_1v2,vddio_hsic,vcom_1v2";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
-					regulator-always-on;
-				};
-
-				out5v {
-					regulator-name = "usb0_vbus_reg";
-				};
-
-				out33v {
-					regulator-name = "pmu_out3v3";
-				};
-
-				bbat {
-					regulator-name = "pmu_bbat";
-					regulator-min-microvolt = <2400000>;
-					regulator-max-microvolt = <2400000>;
-					regulator-always-on;
-				};
-
-				sdby {
-					regulator-name = "vdd_aon";
-					regulator-always-on;
-				};
-
-				vrtc {
-					regulator-name = "vrtc,pmu_vccadc";
-					regulator-always-on;
-				};
-			};
-		};
-	};
-
-	kbc@7000e200 {
-		status = "okay";
-		nvidia,debounce-delay-ms = <20>;
-		nvidia,repeat-delay-ms = <160>;
-		nvidia,kbc-row-pins = <0 1 2>;
-		nvidia,kbc-col-pins = <16 17>;
-		wakeup-source;
-		linux,keymap = <MATRIX_KEY(0x00, 0x00, KEY_POWER)
-				MATRIX_KEY(0x01, 0x00, KEY_HOME)
-				MATRIX_KEY(0x01, 0x01, KEY_BACK)
-				MATRIX_KEY(0x02, 0x01, KEY_MENU)>;
-	};
-
-	pmc@7000e400 {
-		nvidia,invert-interrupt;
-		nvidia,suspend-mode = <1>;
-		nvidia,cpu-pwr-good-time = <2000>;
-		nvidia,cpu-pwr-off-time = <1000>;
-		nvidia,core-pwr-good-time = <0 3845>;
-		nvidia,core-pwr-off-time = <93727>;
-		nvidia,core-power-req-active-high;
-		nvidia,sys-clock-req-active-high;
-		nvidia,combined-power-req;
-	};
-
-	usb@c5000000 {
-		status = "okay";
-	};
-
-	usb-phy@c5000000 {
-		status = "okay";
-		vbus-supply = <&vbus1_reg>;
-	};
-
-	usb@c5008000 {
-		status = "okay";
-	};
-
-	usb-phy@c5008000 {
-		status = "okay";
-		vbus-supply = <&vbus3_reg>;
-	};
-
-	sdhci@c8000400 {
-		status = "okay";
-		cd-gpios = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_LOW>;
-		wp-gpios = <&gpio TEGRA_GPIO(V, 5) GPIO_ACTIVE_HIGH>;
-		bus-width = <8>;
-	};
-
-	sdhci@c8000600 {
-		status = "okay";
-		bus-width = <8>;
-		non-removable;
-	};
-
-	clocks {
-		compatible = "simple-bus";
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		clk32k_in: clock@0 {
-			compatible = "fixed-clock";
-			reg = <0>;
-			#clock-cells = <0>;
-			clock-frequency = <32768>;
-		};
-	};
-
-	regulators {
-		compatible = "simple-bus";
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		usb0_vbus_reg: regulator@0 {
-			compatible = "regulator-fixed";
-			reg = <0>;
-			regulator-name = "usb0_vbus";
-			regulator-min-microvolt = <5000000>;
-			regulator-max-microvolt = <5000000>;
-			regulator-always-on;
-		};
-
-		vbus1_reg: regulator@2 {
-			compatible = "regulator-fixed";
-			reg = <2>;
-			regulator-name = "vbus1";
-			regulator-min-microvolt = <5000000>;
-			regulator-max-microvolt = <5000000>;
-			enable-active-high;
-			gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
-			regulator-always-on;
-			regulator-boot-on;
-		};
-
-		vbus3_reg: regulator@3 {
-			compatible = "regulator-fixed";
-			reg = <3>;
-			regulator-name = "vbus3";
-			regulator-min-microvolt = <5000000>;
-			regulator-max-microvolt = <5000000>;
-			enable-active-high;
-			gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
-			regulator-always-on;
-			regulator-boot-on;
-		};
-	};
-
-	sound {
-		compatible = "nvidia,tegra-audio-wm8753-whistler",
-			     "nvidia,tegra-audio-wm8753";
-		nvidia,model = "NVIDIA Tegra Whistler";
-
-		nvidia,audio-routing =
-			"Headphone Jack", "LOUT1",
-			"Headphone Jack", "ROUT1",
-			"MIC2", "Mic Jack",
-			"MIC2N", "Mic Jack";
-
-		nvidia,i2s-controller = <&tegra_i2s1>;
-		nvidia,audio-codec = <&codec>;
-
-		clocks = <&tegra_car TEGRA20_CLK_PLL_A>,
-			 <&tegra_car TEGRA20_CLK_PLL_A_OUT0>,
-			 <&tegra_car TEGRA20_CLK_CDEV1>;
-		clock-names = "pll_a", "pll_a_out0", "mclk";
-	};
-};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index e880750..7c85f97 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -586,7 +586,7 @@
 		reset-names = "fuse";
 	};
 
-	pcie-controller@80003000 {
+	pcie@80003000 {
 		compatible = "nvidia,tegra20-pcie";
 		device_type = "pci";
 		reg = <0x80003000 0x00000800   /* PADS registers */
@@ -625,6 +625,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82000800 0 0x80000000 0 0x1000>;
 			reg = <0x000800 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
@@ -638,6 +639,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82001000 0 0x80001000 0 0x1000>;
 			reg = <0x001000 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
diff --git a/arch/arm/boot/dts/tegra30-apalis-eval.dts b/arch/arm/boot/dts/tegra30-apalis-eval.dts
index 99a6945..fc530e4 100644
--- a/arch/arm/boot/dts/tegra30-apalis-eval.dts
+++ b/arch/arm/boot/dts/tegra30-apalis-eval.dts
@@ -21,7 +21,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	pcie-controller@00003000 {
+	pcie@3000 {
 		status = "okay";
 
 		pci@1,0 {
diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi
index f6c7c3e9..7a6a1a0 100644
--- a/arch/arm/boot/dts/tegra30-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra30-apalis.dtsi
@@ -9,7 +9,7 @@
 	model = "Toradex Apalis T30";
 	compatible = "toradex,apalis_t30", "nvidia,tegra30";
 
-	pcie-controller@00003000 {
+	pcie@3000 {
 		avdd-pexa-supply = <&vdd2_reg>;
 		vdd-pexa-supply = <&vdd2_reg>;
 		avdd-pexb-supply = <&vdd2_reg>;
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index 0350002..4f41b18 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -20,7 +20,7 @@
 		reg = <0x80000000 0x7ff00000>;
 	};
 
-	pcie-controller@00003000 {
+	pcie@3000 {
 		status = "okay";
 
 		avdd-pexa-supply = <&ldo1_reg>;
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index f11012b..83dc14a 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -43,7 +43,7 @@
 		reg = <0x80000000 0x40000000>;
 	};
 
-	pcie-controller@00003000 {
+	pcie@3000 {
 		status = "okay";
 
 		/* AVDD_PEXA and VDD_PEXA inputs are grounded on Cardhu. */
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index bbb1c00..13960fd 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -10,7 +10,7 @@
 	compatible = "nvidia,tegra30";
 	interrupt-parent = <&lic>;
 
-	pcie-controller@00003000 {
+	pcie@3000 {
 		compatible = "nvidia,tegra30-pcie";
 		device_type = "pci";
 		reg = <0x00003000 0x00000800   /* PADS registers */
@@ -51,6 +51,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82000800 0 0x00000000 0 0x1000>;
 			reg = <0x000800 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
@@ -64,6 +65,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82001000 0 0x00001000 0 0x1000>;
 			reg = <0x001000 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
@@ -77,6 +79,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82001800 0 0x00004000 0 0x1000>;
 			reg = <0x001800 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
diff --git a/arch/arm/boot/dts/tny_a9260_common.dtsi b/arch/arm/boot/dts/tny_a9260_common.dtsi
index f9dc463..5d83df4 100644
--- a/arch/arm/boot/dts/tny_a9260_common.dtsi
+++ b/arch/arm/boot/dts/tny_a9260_common.dtsi
@@ -32,50 +32,69 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			barebox@20000 {
-				label = "barebox";
-				reg = <0x20000 0x40000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bareboxenv@60000 {
-				label = "bareboxenv";
-				reg = <0x60000 0x20000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			bareboxenv2@80000 {
-				label = "bareboxenv2";
-				reg = <0x80000 0x20000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x20000>;
+						};
 
-			oftree@80000 {
-				label = "oftree";
-				reg = <0xa0000 0x20000>;
-			};
+						barebox@20000 {
+							label = "barebox";
+							reg = <0x20000 0x40000>;
+						};
 
-			kernel@a0000 {
-				label = "kernel";
-				reg = <0xc0000 0x400000>;
-			};
+						bareboxenv@60000 {
+							label = "bareboxenv";
+							reg = <0x60000 0x20000>;
+						};
 
-			rootfs@4a0000 {
-				label = "rootfs";
-				reg = <0x4c0000 0x7800000>;
-			};
+						bareboxenv2@80000 {
+							label = "bareboxenv2";
+							reg = <0x80000 0x20000>;
+						};
 
-			data@7ca0000 {
-				label = "data";
-				reg = <0x7cc0000 0x8340000>;
+						oftree@80000 {
+							label = "oftree";
+							reg = <0xa0000 0x20000>;
+						};
+
+						kernel@a0000 {
+							label = "kernel";
+							reg = <0xc0000 0x400000>;
+						};
+
+						rootfs@4a0000 {
+							label = "rootfs";
+							reg = <0x4c0000 0x7800000>;
+						};
+
+						data@7ca0000 {
+							label = "data";
+							reg = <0x7cc0000 0x8340000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/tny_a9263.dts b/arch/arm/boot/dts/tny_a9263.dts
index 9161cd98..8cf0a9e 100644
--- a/arch/arm/boot/dts/tny_a9263.dts
+++ b/arch/arm/boot/dts/tny_a9263.dts
@@ -42,50 +42,69 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi0: ebi@10000000 {
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			barebox@20000 {
-				label = "barebox";
-				reg = <0x20000 0x40000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bareboxenv@60000 {
-				label = "bareboxenv";
-				reg = <0x60000 0x20000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			bareboxenv2@80000 {
-				label = "bareboxenv2";
-				reg = <0x80000 0x20000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x20000>;
+						};
 
-			oftree@80000 {
-				label = "oftree";
-				reg = <0xa0000 0x20000>;
-			};
+						barebox@20000 {
+							label = "barebox";
+							reg = <0x20000 0x40000>;
+						};
 
-			kernel@a0000 {
-				label = "kernel";
-				reg = <0xc0000 0x400000>;
-			};
+						bareboxenv@60000 {
+							label = "bareboxenv";
+							reg = <0x60000 0x20000>;
+						};
 
-			rootfs@4a0000 {
-				label = "rootfs";
-				reg = <0x4c0000 0x7800000>;
-			};
+						bareboxenv2@80000 {
+							label = "bareboxenv2";
+							reg = <0x80000 0x20000>;
+						};
 
-			data@7ca0000 {
-				label = "data";
-				reg = <0x7cc0000 0x8340000>;
+						oftree@80000 {
+							label = "oftree";
+							reg = <0xa0000 0x20000>;
+						};
+
+						kernel@a0000 {
+							label = "kernel";
+							reg = <0xc0000 0x400000>;
+						};
+
+						rootfs@4a0000 {
+							label = "rootfs";
+							reg = <0x4c0000 0x7800000>;
+						};
+
+						data@7ca0000 {
+							label = "data";
+							reg = <0x7cc0000 0x8340000>;
+						};
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi
index 36ae916..16533b6 100644
--- a/arch/arm/boot/dts/twl4030.dtsi
+++ b/arch/arm/boot/dts/twl4030.dtsi
@@ -23,6 +23,8 @@
 		compatible = "ti,twl4030-bci";
 		interrupts = <9>, <2>;
 		bci3v1-supply = <&vusb3v1>;
+		io-channels = <&twl_madc 11>;
+		io-channel-names = "vac";
 	};
 
 	watchdog {
diff --git a/arch/arm/boot/dts/uniphier-ld4-ref.dts b/arch/arm/boot/dts/uniphier-ld4-ref.dts
index e0da4ee..4817ebb2 100644
--- a/arch/arm/boot/dts/uniphier-ld4-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ld4-ref.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/uniphier-ld4.dtsi b/arch/arm/boot/dts/uniphier-ld4.dtsi
index 4f5fe15..fb2fd96 100644
--- a/arch/arm/boot/dts/uniphier-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld4.dtsi
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 / {
@@ -201,7 +165,7 @@
 			pinctrl-0 = <&pinctrl_system_bus>;
 		};
 
-		smpctrl@59800000 {
+		smpctrl@59801000 {
 			compatible = "socionext,uniphier-smpctrl";
 			reg = <0x59801000 0x400>;
 		};
diff --git a/arch/arm/boot/dts/uniphier-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ld6b-ref.dts
index a397a88..96db4ab 100644
--- a/arch/arm/boot/dts/uniphier-ld6b-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ld6b-ref.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/uniphier-ld6b.dtsi b/arch/arm/boot/dts/uniphier-ld6b.dtsi
index 905c77d..8b9a797 100644
--- a/arch/arm/boot/dts/uniphier-ld6b.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld6b.dtsi
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /*
diff --git a/arch/arm/boot/dts/uniphier-pro4-ace.dts b/arch/arm/boot/dts/uniphier-pro4-ace.dts
index fefc891..11690b5 100644
--- a/arch/arm/boot/dts/uniphier-pro4-ace.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-ace.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
@@ -88,7 +52,7 @@
 	status = "okay";
 
 	eeprom@54 {
-		compatible = "st,24c64";
+		compatible = "st,24c64", "atmel,24c64";
 		reg = <0x54>;
 		pagesize = <32>;
 	};
diff --git a/arch/arm/boot/dts/uniphier-pro4-ref.dts b/arch/arm/boot/dts/uniphier-pro4-ref.dts
index 6077e63..4cf5392 100644
--- a/arch/arm/boot/dts/uniphier-pro4-ref.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-ref.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/uniphier-pro4-sanji.dts b/arch/arm/boot/dts/uniphier-pro4-sanji.dts
index 6c63c8b..2763ceb 100644
--- a/arch/arm/boot/dts/uniphier-pro4-sanji.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-sanji.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
@@ -83,7 +47,7 @@
 	status = "okay";
 
 	eeprom@54 {
-		compatible = "st,24c64";
+		compatible = "st,24c64", "atmel,24c64";
 		reg = <0x54>;
 		pagesize = <32>;
 	};
diff --git a/arch/arm/boot/dts/uniphier-pro4.dtsi b/arch/arm/boot/dts/uniphier-pro4.dtsi
index 794a85a..37400be 100644
--- a/arch/arm/boot/dts/uniphier-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro4.dtsi
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 / {
@@ -233,7 +197,7 @@
 			pinctrl-0 = <&pinctrl_system_bus>;
 		};
 
-		smpctrl@59800000 {
+		smpctrl@59801000 {
 			compatible = "socionext,uniphier-smpctrl";
 			reg = <0x59801000 0x400>;
 		};
diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index df07b55..9577769 100644
--- a/arch/arm/boot/dts/uniphier-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -77,67 +77,67 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@100000000 {
+		opp-100000000 {
 			opp-hz = /bits/ 64 <100000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@116667000 {
+		opp-116667000 {
 			opp-hz = /bits/ 64 <116667000>;
 			clock-latency-ns = <300>;
 		};
-		opp@150000000 {
+		opp-150000000 {
 			opp-hz = /bits/ 64 <150000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@175000000 {
+		opp-175000000 {
 			opp-hz = /bits/ 64 <175000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@200000000 {
+		opp-200000000 {
 			opp-hz = /bits/ 64 <200000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@233334000 {
+		opp-233334000 {
 			opp-hz = /bits/ 64 <233334000>;
 			clock-latency-ns = <300>;
 		};
-		opp@300000000 {
+		opp-300000000 {
 			opp-hz = /bits/ 64 <300000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@350000000 {
+		opp-350000000 {
 			opp-hz = /bits/ 64 <350000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@400000000 {
+		opp-400000000 {
 			opp-hz = /bits/ 64 <400000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@466667000 {
+		opp-466667000 {
 			opp-hz = /bits/ 64 <466667000>;
 			clock-latency-ns = <300>;
 		};
-		opp@600000000 {
+		opp-600000000 {
 			opp-hz = /bits/ 64 <600000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@700000000 {
+		opp-700000000 {
 			opp-hz = /bits/ 64 <700000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@800000000 {
+		opp-800000000 {
 			opp-hz = /bits/ 64 <800000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@933334000 {
+		opp-933334000 {
 			opp-hz = /bits/ 64 <933334000>;
 			clock-latency-ns = <300>;
 		};
-		opp@1200000000 {
+		opp-1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@1400000000 {
+		opp-1400000000 {
 			opp-hz = /bits/ 64 <1400000000>;
 			clock-latency-ns = <300>;
 		};
@@ -320,7 +320,7 @@
 			pinctrl-0 = <&pinctrl_system_bus>;
 		};
 
-		smpctrl@59800000 {
+		smpctrl@59801000 {
 			compatible = "socionext,uniphier-smpctrl";
 			reg = <0x59801000 0x400>;
 		};
diff --git a/arch/arm/boot/dts/uniphier-pxs2-gentil.dts b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
index cccc866..81560f7 100644
--- a/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
+++ b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
@@ -80,7 +44,7 @@
 	status = "okay";
 
 	eeprom@54 {
-		compatible = "st,24c64";
+		compatible = "st,24c64", "atmel,24c64";
 		reg = <0x54>;
 		pagesize = <32>;
 	};
diff --git a/arch/arm/boot/dts/uniphier-pxs2-vodka.dts b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
index 803a39aa3..dc2d057 100644
--- a/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
+++ b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index 58c3e2f..bace751 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 / {
@@ -97,35 +61,35 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@100000000 {
+		opp-100000000 {
 			opp-hz = /bits/ 64 <100000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@150000000 {
+		opp-150000000 {
 			opp-hz = /bits/ 64 <150000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@200000000 {
+		opp-200000000 {
 			opp-hz = /bits/ 64 <200000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@300000000 {
+		opp-300000000 {
 			opp-hz = /bits/ 64 <300000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@400000000 {
+		opp-400000000 {
 			opp-hz = /bits/ 64 <400000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@600000000 {
+		opp-600000000 {
 			opp-hz = /bits/ 64 <600000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@800000000 {
+		opp-800000000 {
 			opp-hz = /bits/ 64 <800000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@1200000000 {
+		opp-1200000000 {
 			opp-hz = /bits/ 64 <1200000000>;
 			clock-latency-ns = <300>;
 		};
@@ -304,7 +268,7 @@
 			pinctrl-0 = <&pinctrl_system_bus>;
 		};
 
-		smpctrl@59800000 {
+		smpctrl@59801000 {
 			compatible = "socionext,uniphier-smpctrl";
 			reg = <0x59801000 0x400>;
 		};
diff --git a/arch/arm/boot/dts/uniphier-ref-daughter.dtsi b/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
index c62ae1a..7a1c29b 100644
--- a/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
+++ b/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2017 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 &i2c0 {
diff --git a/arch/arm/boot/dts/uniphier-sld3-ref.dts b/arch/arm/boot/dts/uniphier-sld3-ref.dts
index eb63dcc..70cda39 100644
--- a/arch/arm/boot/dts/uniphier-sld3-ref.dts
+++ b/arch/arm/boot/dts/uniphier-sld3-ref.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/uniphier-sld3.dtsi b/arch/arm/boot/dts/uniphier-sld3.dtsi
index 01d77ed..4082879 100644
--- a/arch/arm/boot/dts/uniphier-sld3.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld3.dtsi
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 / {
@@ -216,7 +180,7 @@
 			#size-cells = <1>;
 		};
 
-		smpctrl@59800000 {
+		smpctrl@59801000 {
 			compatible = "socionext,uniphier-smpctrl";
 			reg = <0x59801000 0x400>;
 		};
diff --git a/arch/arm/boot/dts/uniphier-sld8-ref.dts b/arch/arm/boot/dts/uniphier-sld8-ref.dts
index 737d276..4536d5b7 100644
--- a/arch/arm/boot/dts/uniphier-sld8-ref.dts
+++ b/arch/arm/boot/dts/uniphier-sld8-ref.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/uniphier-sld8.dtsi b/arch/arm/boot/dts/uniphier-sld8.dtsi
index eb06fdc..9fb9167 100644
--- a/arch/arm/boot/dts/uniphier-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld8.dtsi
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 / {
@@ -201,7 +165,7 @@
 			pinctrl-0 = <&pinctrl_system_bus>;
 		};
 
-		smpctrl@59800000 {
+		smpctrl@59801000 {
 			compatible = "socionext,uniphier-smpctrl";
 			reg = <0x59801000 0x400>;
 		};
diff --git a/arch/arm/boot/dts/uniphier-support-card.dtsi b/arch/arm/boot/dts/uniphier-support-card.dtsi
index f61dfec..6c825f1 100644
--- a/arch/arm/boot/dts/uniphier-support-card.dtsi
+++ b/arch/arm/boot/dts/uniphier-support-card.dtsi
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2017 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 &system_bus {
@@ -53,14 +17,14 @@
 		#size-cells = <1>;
 		ranges = <0x00000000 1 0x01f00000 0x00100000>;
 
-		ethsc: ethernet@00000000 {
+		ethsc: ethernet@0 {
 			compatible = "smsc,lan9118", "smsc,lan9115";
 			reg = <0x00000000 0x1000>;
 			phy-mode = "mii";
 			reg-io-width = <4>;
 		};
 
-		serialsc: uart@000b0000 {
+		serialsc: uart@b0000 {
 			compatible = "ns16550a";
 			reg = <0x000b0000 0x20>;
 			clock-frequency = <12288000>;
diff --git a/arch/arm/boot/dts/usb_a9260_common.dtsi b/arch/arm/boot/dts/usb_a9260_common.dtsi
index 7514b34..34a4999 100644
--- a/arch/arm/boot/dts/usb_a9260_common.dtsi
+++ b/arch/arm/boot/dts/usb_a9260_common.dtsi
@@ -34,50 +34,69 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi: ebi@10000000 {
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			barebox@20000 {
-				label = "barebox";
-				reg = <0x20000 0x40000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioC 13 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioC 14 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bareboxenv@60000 {
-				label = "bareboxenv";
-				reg = <0x60000 0x20000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			bareboxenv2@80000 {
-				label = "bareboxenv2";
-				reg = <0x80000 0x20000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x20000>;
+						};
 
-			oftree@80000 {
-				label = "oftree";
-				reg = <0xa0000 0x20000>;
-			};
+						barebox@20000 {
+							label = "barebox";
+							reg = <0x20000 0x40000>;
+						};
 
-			kernel@a0000 {
-				label = "kernel";
-				reg = <0xc0000 0x400000>;
-			};
+						bareboxenv@60000 {
+							label = "bareboxenv";
+							reg = <0x60000 0x20000>;
+						};
 
-			rootfs@4a0000 {
-				label = "rootfs";
-				reg = <0x4c0000 0x7800000>;
-			};
+						bareboxenv2@80000 {
+							label = "bareboxenv2";
+							reg = <0x80000 0x20000>;
+						};
 
-			data@7ca0000 {
-				label = "data";
-				reg = <0x7cc0000 0x8340000>;
+						oftree@80000 {
+							label = "oftree";
+							reg = <0xa0000 0x20000>;
+						};
+
+						kernel@a0000 {
+							label = "kernel";
+							reg = <0xc0000 0x400000>;
+						};
+
+						rootfs@4a0000 {
+							label = "rootfs";
+							reg = <0x4c0000 0x7800000>;
+						};
+
+						data@7ca0000 {
+							label = "data";
+							reg = <0x7cc0000 0x8340000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/usb_a9263.dts b/arch/arm/boot/dts/usb_a9263.dts
index bfc48a2..482381c 100644
--- a/arch/arm/boot/dts/usb_a9263.dts
+++ b/arch/arm/boot/dts/usb_a9263.dts
@@ -62,50 +62,69 @@
 			};
 		};
 
-		nand0: nand@40000000 {
-			nand-bus-width = <8>;
-			nand-ecc-mode = "soft";
-			nand-on-flash-bbt;
+		ebi0: ebi@10000000 {
 			status = "okay";
 
-			at91bootstrap@0 {
-				label = "at91bootstrap";
-				reg = <0x0 0x20000>;
-			};
+			nand_controller: nand-controller {
+				status = "okay";
+				pinctrl-0 = <&pinctrl_nand_cs &pinctrl_nand_rb>;
+				pinctrl-names = "default";
 
-			barebox@20000 {
-				label = "barebox";
-				reg = <0x20000 0x40000>;
-			};
+				nand@3 {
+					reg = <0x3 0x0 0x800000>;
+					rb-gpios = <&pioA 22 GPIO_ACTIVE_HIGH>;
+					cs-gpios = <&pioA 15 GPIO_ACTIVE_HIGH>;
+					nand-bus-width = <8>;
+					nand-ecc-mode = "soft";
+					nand-on-flash-bbt;
+					label = "atmel_nand";
 
-			bareboxenv@60000 {
-				label = "bareboxenv";
-				reg = <0x60000 0x20000>;
-			};
+					partitions {
+						compatible = "fixed-partitions";
+						#address-cells = <1>;
+						#size-cells = <1>;
 
-			bareboxenv2@80000 {
-				label = "bareboxenv2";
-				reg = <0x80000 0x20000>;
-			};
+						at91bootstrap@0 {
+							label = "at91bootstrap";
+							reg = <0x0 0x20000>;
+						};
 
-			oftree@80000 {
-				label = "oftree";
-				reg = <0xa0000 0x20000>;
-			};
+						barebox@20000 {
+							label = "barebox";
+							reg = <0x20000 0x40000>;
+						};
 
-			kernel@a0000 {
-				label = "kernel";
-				reg = <0xc0000 0x400000>;
-			};
+						bareboxenv@60000 {
+							label = "bareboxenv";
+							reg = <0x60000 0x20000>;
+						};
 
-			rootfs@4a0000 {
-				label = "rootfs";
-				reg = <0x4c0000 0x7800000>;
-			};
+						bareboxenv2@80000 {
+							label = "bareboxenv2";
+							reg = <0x80000 0x20000>;
+						};
 
-			data@7ca0000 {
-				label = "data";
-				reg = <0x7cc0000 0x8340000>;
+						oftree@80000 {
+							label = "oftree";
+							reg = <0xa0000 0x20000>;
+						};
+
+						kernel@a0000 {
+							label = "kernel";
+							reg = <0xc0000 0x400000>;
+						};
+
+						rootfs@4a0000 {
+							label = "rootfs";
+							reg = <0x4c0000 0x7800000>;
+						};
+
+						data@7ca0000 {
+							label = "data";
+							reg = <0x7cc0000 0x8340000>;
+						};
+					};
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 33a8eb2..06e2331 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -1,4 +1,4 @@
-#include <versatile-ab.dts>
+#include "versatile-ab.dts"
 
 / {
 	model = "ARM Versatile PB";
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 37f9542..acdf12a 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -78,6 +78,7 @@
 				interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
+				eeprom-length = <512>;
 
 				ports {
 					#address-cells = <1>;
@@ -163,6 +164,7 @@
 				interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
+				eeprom-length = <512>;
 
 				ports {
 					#address-cells = <1>;
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index cf06247..2b913f1 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
 	return ret;
 }
 
-typedef void (*phys_reset_t)(unsigned long);
+typedef typeof(cpu_reset) phys_reset_t;
 
 void mcpm_cpu_power_down(void)
 {
@@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
 	 * on the CPU.
 	 */
 	phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-	phys_reset(__pa_symbol(mcpm_entry_point));
+	phys_reset(__pa_symbol(mcpm_entry_point), false);
 
 	/* should never get here */
 	BUG();
@@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
 	__mcpm_cpu_down(cpu, cluster);
 
 	phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-	phys_reset(__pa_symbol(mcpm_entry_point));
+	phys_reset(__pa_symbol(mcpm_entry_point), false);
 	BUG();
 }
 
diff --git a/arch/arm/configs/acs5k_defconfig b/arch/arm/configs/acs5k_defconfig
index 92b0f90..d04ee19 100644
--- a/arch/arm/configs/acs5k_defconfig
+++ b/arch/arm/configs/acs5k_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -30,13 +29,9 @@
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -49,8 +44,6 @@
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_ARM_KS8695_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PRISM54=m
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
@@ -69,7 +62,6 @@
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PCF8563=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_JFFS2_SUMMARY=y
diff --git a/arch/arm/configs/acs5k_tiny_defconfig b/arch/arm/configs/acs5k_tiny_defconfig
index 2a27a14..25c593d 100644
--- a/arch/arm/configs/acs5k_tiny_defconfig
+++ b/arch/arm/configs/acs5k_tiny_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -25,13 +24,9 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -40,12 +35,9 @@
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
 # CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_ARM_KS8695_ETHER=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -62,11 +54,9 @@
 CONFIG_WATCHDOG=y
 CONFIG_KS8695_WATCHDOG=y
 # CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PCF8563=y
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_JFFS2_SUMMARY=y
diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig
index 113a5d8..8c9b6ea 100644
--- a/arch/arm/configs/am200epdkit_defconfig
+++ b/arch/arm/configs/am200epdkit_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="gum"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
@@ -30,12 +29,9 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -43,7 +39,6 @@
 CONFIG_BT_HCIUART_H4=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -88,7 +83,6 @@
 CONFIG_MMC=y
 CONFIG_MMC_PXA=y
 # CONFIG_DNOTIFY is not set
-CONFIG_INOTIFY=y
 CONFIG_VFAT_FS=y
 CONFIG_JFFS2_FS=y
 CONFIG_JFFS2_COMPRESSION_OPTIONS=y
@@ -101,7 +95,6 @@
 # CONFIG_DEBUG_PREEMPT is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_CRYPTO=y
 CONFIG_CRYPTO_CBC=m
 CONFIG_CRYPTO_ECB=m
diff --git a/arch/arm/configs/assabet_defconfig b/arch/arm/configs/assabet_defconfig
index ab19ff1..04c86ff 100644
--- a/arch/arm/configs/assabet_defconfig
+++ b/arch/arm/configs/assabet_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
@@ -22,9 +21,7 @@
 CONFIG_IRLAN=m
 CONFIG_SA1100_FIR=m
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
diff --git a/arch/arm/configs/axm55xx_defconfig b/arch/arm/configs/axm55xx_defconfig
index d3260d7..8e17e7e 100644
--- a/arch/arm/configs/axm55xx_defconfig
+++ b/arch/arm/configs/axm55xx_defconfig
@@ -74,7 +74,6 @@
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_NETWORK_PHY_TIMESTAMPING=y
 CONFIG_BRIDGE=y
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 2a604aa..5ae5b52 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_EXPERT=y
 CONFIG_MODULES=y
@@ -23,14 +22,11 @@
 CONFIG_IRDA_ULTRA=y
 CONFIG_SA1100_FIR=y
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
 CONFIG_BT_HCIUART=m
 CONFIG_BT_HCIVHCI=m
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
 CONFIG_MTD_DEBUG=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -113,5 +109,4 @@
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 3ba8cd3..3ee9d78 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -24,6 +24,8 @@
 CONFIG_OPROFILE=y
 CONFIG_JUMP_LABEL=y
 CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
 CONFIG_ARCH_MULTI_V6=y
 CONFIG_ARCH_BCM=y
 CONFIG_ARCH_BCM2835=y
@@ -86,8 +88,14 @@
 CONFIG_SND_SOC=y
 CONFIG_SND_BCM2835_SOC_I2S=y
 CONFIG_USB=y
+CONFIG_USB_OTG=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_DWC2=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_EEM=y
+CONFIG_USB_G_SERIAL=m
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
diff --git a/arch/arm/configs/cerfcube_defconfig b/arch/arm/configs/cerfcube_defconfig
index 57a2a18..3f910bb 100644
--- a/arch/arm/configs/cerfcube_defconfig
+++ b/arch/arm/configs/cerfcube_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -29,10 +28,8 @@
 CONFIG_IP_PNP_RARP=y
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
@@ -70,5 +67,4 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/cm_x2xx_defconfig b/arch/arm/configs/cm_x2xx_defconfig
index 3b32d5f..fb45b49 100644
--- a/arch/arm/configs/cm_x2xx_defconfig
+++ b/arch/arm/configs/cm_x2xx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
@@ -39,12 +38,9 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_BNEP=m
 CONFIG_BT_HIDP=m
@@ -53,7 +49,6 @@
 CONFIG_FW_LOADER=m
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -69,7 +64,6 @@
 CONFIG_MTD_NAND_PLATFORM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_ATA=m
@@ -82,8 +76,6 @@
 CONFIG_NET_PCI=y
 CONFIG_8139TOO=m
 # CONFIG_8139TOO_PIO is not set
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PPP=m
 CONFIG_PPP_MULTILINK=y
 CONFIG_PPP_FILTER=y
@@ -144,7 +136,6 @@
 CONFIG_HID_SONY=y
 CONFIG_HID_SUNPLUS=y
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_MON=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_STORAGE=y
@@ -160,7 +151,6 @@
 CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_VFAT_FS=m
 # CONFIG_PROC_PAGE_MONITOR is not set
 CONFIG_TMPFS=y
@@ -179,10 +169,7 @@
 # CONFIG_DETECT_SOFTLOCKUP is not set
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/cm_x300_defconfig b/arch/arm/configs/cm_x300_defconfig
index 7df040e..c0418e03 100644
--- a/arch/arm/configs/cm_x300_defconfig
+++ b/arch/arm/configs/cm_x300_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-cm-x300"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
@@ -35,12 +34,9 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_BT=m
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -51,8 +47,6 @@
 CONFIG_LIB80211=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_PXA3xx=y
@@ -66,8 +60,6 @@
 CONFIG_DM9000=y
 CONFIG_DM9000_DEBUGLEVEL=0
 CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
@@ -129,7 +121,6 @@
 CONFIG_HID_THRUSTMASTER=y
 CONFIG_HID_ZEROPLUS=y
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_MON=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_STORAGE=y
@@ -165,7 +156,6 @@
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_USER=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/cns3420vb_defconfig b/arch/arm/configs/cns3420vb_defconfig
index b1ff5cd..63a953d 100644
--- a/arch/arm/configs/cns3420vb_defconfig
+++ b/arch/arm/configs/cns3420vb_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
@@ -31,9 +30,7 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
@@ -41,7 +38,6 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=20000
-# CONFIG_MISC_DEVICES is not set
 CONFIG_BLK_DEV_SD=y
 CONFIG_ATA=y
 # CONFIG_SATA_PMP is not set
@@ -56,20 +52,17 @@
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
-CONFIG_INOTIFY=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_FSCACHE=y
 CONFIG_TMPFS=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_ARM_UNWIND is not set
 CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/colibri_pxa270_defconfig b/arch/arm/configs/colibri_pxa270_defconfig
index 3146ad0..8995695 100644
--- a/arch/arm/configs/colibri_pxa270_defconfig
+++ b/arch/arm/configs/colibri_pxa270_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_BSD_PROCESS_ACCT=y
@@ -32,10 +31,8 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=m
 CONFIG_VLAN_8021Q=m
 CONFIG_IRDA=m
 CONFIG_IRLAN=m
@@ -45,8 +42,6 @@
 CONFIG_IRDA_FAST_RR=y
 CONFIG_IRTTY_SIR=m
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -57,8 +52,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_CONNECTOR=y
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -87,8 +80,6 @@
 CONFIG_PHYLIB=y
 CONFIG_NET_ETHERNET=y
 CONFIG_DM9000=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_HOSTAP=y
 CONFIG_HOSTAP_FIRMWARE=y
 CONFIG_HOSTAP_FIRMWARE_NVRAM=y
@@ -123,7 +114,6 @@
 CONFIG_LOGO=y
 # CONFIG_USB_HID is not set
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_SERIAL=m
 CONFIG_USB_GADGET=m
 CONFIG_USB_GADGET_DUMMY_HCD=y
@@ -132,7 +122,6 @@
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_HCTOSYS is not set
 CONFIG_RTC_DRV_PCF8583=m
-CONFIG_INOTIFY=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
@@ -160,9 +149,7 @@
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_KEYS=y
 CONFIG_SECURITY=y
diff --git a/arch/arm/configs/colibri_pxa300_defconfig b/arch/arm/configs/colibri_pxa300_defconfig
index be02fe2..d282e8b 100644
--- a/arch/arm/configs/colibri_pxa300_defconfig
+++ b/arch/arm/configs/colibri_pxa300_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -14,18 +13,14 @@
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_PNP=y
 CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_MISC_DEVICES is not set
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_AX88796=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -45,7 +40,6 @@
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_MON=y
@@ -54,16 +48,13 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_PXA=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_CRYPTO_ECB=y
 CONFIG_CRYPTO_AES=y
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index a8f3c59..d398ae5 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -26,8 +25,6 @@
 CONFIG_INET=y
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -91,5 +88,4 @@
 # CONFIG_DETECT_SOFTLOCKUP is not set
 CONFIG_DEBUG_MUTEXES=y
 # CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_ERRORS=y
 CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 462533b..09e1672 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -33,26 +32,22 @@
 CONFIG_XFRM_USER=m
 CONFIG_INET=y
 CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_TUNNEL=m
 CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_RAW=m
 CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_EUI64=m
 CONFIG_IP6_NF_MATCH_FRAG=m
@@ -69,8 +64,6 @@
 CONFIG_IRCOMM=m
 CONFIG_PXA_FICP=m
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -90,9 +83,7 @@
 CONFIG_BT_HCIVHCI=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_ROM=y
 CONFIG_MTD_COMPLEX_MAPPINGS=y
@@ -209,7 +200,6 @@
 CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
-CONFIG_USB_LED=m
 CONFIG_USB_CYTHERM=m
 CONFIG_USB_IDMOUSE=m
 CONFIG_USB_GADGET=y
@@ -221,7 +211,6 @@
 CONFIG_MMC=y
 CONFIG_MMC_PXA=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
@@ -243,9 +232,7 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_TEST=m
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 67db829..06e2e2a 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -58,6 +58,7 @@
 # CONFIG_FW_LOADER is not set
 CONFIG_DA8XX_MSTPRI=y
 CONFIG_MTD=m
+CONFIG_MTD_TESTS=m
 CONFIG_MTD_BLOCK=m
 CONFIG_MTD_CFI=m
 CONFIG_MTD_CFI_INTELEXT=m
@@ -83,14 +84,13 @@
 CONFIG_TUN=m
 CONFIG_DM9000=y
 CONFIG_TI_DAVINCI_EMAC=y
+CONFIG_LSI_ET1011C_PHY=y
 CONFIG_LXT_PHY=y
 CONFIG_SMSC_PHY=y
-CONFIG_LSI_ET1011C_PHY=y
 CONFIG_PPP=m
 CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=m
 CONFIG_INPUT_EVBUG=m
 CONFIG_KEYBOARD_ATKBD=m
@@ -122,6 +122,7 @@
 CONFIG_GPIO_PCA953X_IRQ=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_GPIO=y
+CONFIG_BATTERY_LEGO_EV3=m
 CONFIG_WATCHDOG=y
 CONFIG_DAVINCI_WATCHDOG=m
 CONFIG_MFD_DM355EVM_MSP=y
@@ -131,6 +132,8 @@
 CONFIG_REGULATOR_TPS6507X=y
 CONFIG_MEDIA_SUPPORT=m
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY=m
 CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE=m
@@ -147,6 +150,7 @@
 CONFIG_LOGO=y
 CONFIG_SOUND=m
 CONFIG_SND=m
+CONFIG_SND_USB_AUDIO=m
 CONFIG_SND_SOC=m
 CONFIG_SND_EDMA_SOC=m
 CONFIG_SND_DA850_SOC_EVM=m
@@ -174,7 +178,7 @@
 CONFIG_USB_STORAGE=m
 CONFIG_USB_MUSB_HDRC=m
 CONFIG_USB_MUSB_DA8XX=m
-CONFIG_MUSB_PIO_ONLY=y
+CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_TEST=m
 CONFIG_NOP_USB_XCEIV=m
 CONFIG_USB_GADGET=m
@@ -182,6 +186,7 @@
 CONFIG_USB_GADGET_DEBUG_FS=y
 CONFIG_USB_ZERO=m
 CONFIG_USB_ETH=m
+CONFIG_USB_G_NCM=m
 CONFIG_USB_GADGETFS=m
 CONFIG_USB_MASS_STORAGE=m
 CONFIG_USB_G_SERIAL=m
@@ -236,7 +241,6 @@
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=m
 CONFIG_DEBUG_FS=y
-CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_MUTEXES=y
 # CONFIG_ARM_UNWIND is not set
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index 701677f..a93cc2f 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
diff --git a/arch/arm/configs/ebsa110_defconfig b/arch/arm/configs/ebsa110_defconfig
index 14559db..731a22a 100644
--- a/arch/arm/configs/ebsa110_defconfig
+++ b/arch/arm/configs/ebsa110_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/arm/configs/efm32_defconfig b/arch/arm/configs/efm32_defconfig
index c0dac0f0f..860d271 100644
--- a/arch/arm/configs/efm32_defconfig
+++ b/arch/arm/configs/efm32_defconfig
@@ -38,7 +38,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
diff --git a/arch/arm/configs/em_x270_defconfig b/arch/arm/configs/em_x270_defconfig
index 8e10df7..30a6752 100644
--- a/arch/arm/configs/em_x270_defconfig
+++ b/arch/arm/configs/em_x270_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
@@ -34,12 +33,9 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_BNEP=m
 CONFIG_BT_HIDP=m
@@ -49,7 +45,6 @@
 CONFIG_FW_LOADER=m
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -63,7 +58,6 @@
 CONFIG_MTD_NAND_PLATFORM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
@@ -71,8 +65,6 @@
 CONFIG_NET_ETHERNET=y
 CONFIG_DM9000=y
 CONFIG_DM9000_DEBUGLEVEL=1
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PPP=m
 CONFIG_PPP_MULTILINK=y
 CONFIG_PPP_FILTER=y
@@ -144,7 +136,6 @@
 CONFIG_HID_SONY=y
 CONFIG_HID_SUNPLUS=y
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_MON=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_STORAGE=y
@@ -160,7 +151,6 @@
 CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_VFAT_FS=m
 # CONFIG_PROC_PAGE_MONITOR is not set
 CONFIG_TMPFS=y
@@ -180,10 +170,7 @@
 # CONFIG_DETECT_SOFTLOCKUP is not set
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index 158dde8..78cd73d 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -45,7 +45,6 @@
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 # CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET6_XFRM_MODE_TUNNEL is not set
diff --git a/arch/arm/configs/eseries_pxa_defconfig b/arch/arm/configs/eseries_pxa_defconfig
index d68ac67..cd27d65 100644
--- a/arch/arm/configs/eseries_pxa_defconfig
+++ b/arch/arm/configs/eseries_pxa_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -29,7 +28,6 @@
 CONFIG_NET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_IRDA=y
 CONFIG_IRLAN=m
@@ -55,8 +53,6 @@
 # CONFIG_SATA_PMP is not set
 CONFIG_PATA_PCMCIA=m
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_HERMES=m
 CONFIG_PCMCIA_HERMES=m
 CONFIG_NET_PCMCIA=y
@@ -97,11 +93,9 @@
 CONFIG_SND_PXA2XX_SOC_E800=m
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
 CONFIG_MMC_TMIO=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=m
-CONFIG_INOTIFY=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
@@ -111,8 +105,6 @@
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_CRYPTO_CBC=m
 CONFIG_CRYPTO_PCBC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 6dc661c..25325ed 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -49,7 +49,6 @@
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_CFG80211=y
-CONFIG_RFKILL_REGULATOR=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DMA_CMA=y
@@ -74,6 +73,7 @@
 CONFIG_MWIFIEX_SDIO=m
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_SAMSUNG=y
 CONFIG_KEYBOARD_CROS_EC=y
 # CONFIG_MOUSE_PS2 is not set
 CONFIG_MOUSE_CYAPA=y
@@ -83,7 +83,6 @@
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MAX77693_HAPTIC=y
 CONFIG_INPUT_MAX8997_HAPTIC=y
-CONFIG_KEYBOARD_SAMSUNG=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_SAMSUNG=y
@@ -158,6 +157,7 @@
 CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
 CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
 CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIVID=m
 CONFIG_DRM=y
 CONFIG_DRM_EXYNOS=y
 CONFIG_DRM_EXYNOS_FIMD=y
@@ -220,7 +220,6 @@
 CONFIG_PL330_DMA=y
 CONFIG_CROS_EC_CHARDEV=y
 CONFIG_COMMON_CLK_MAX77686=y
-CONFIG_COMMON_CLK_MAX77802=y
 CONFIG_COMMON_CLK_S2MPS11=y
 CONFIG_PM_DEVFREQ=y
 CONFIG_DEVFREQ_GOV_PERFORMANCE=y
@@ -265,6 +264,12 @@
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_USER=y
+CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_DEV_EXYNOS_RNG=y
 CONFIG_CRYPTO_DEV_S5P=y
 CONFIG_ARM_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM_NEON=m
@@ -274,4 +279,3 @@
 CONFIG_CRC_CCITT=y
 CONFIG_FONTS=y
 CONFIG_FONT_7x14=y
-CONFIG_VIDEO_VIVID=m
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index d3f1768..23660f3 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="-ezx200910312315"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
@@ -51,7 +50,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
@@ -109,7 +107,6 @@
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
@@ -118,7 +115,6 @@
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
 CONFIG_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
@@ -133,7 +129,6 @@
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -144,15 +139,12 @@
 CONFIG_IP6_NF_MATCH_MH=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
 CONFIG_BRIDGE=m
 CONFIG_BT=y
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
 CONFIG_BT_RFCOMM=y
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=y
@@ -174,7 +166,6 @@
 CONFIG_FW_LOADER=m
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -189,11 +180,8 @@
 CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_WLAN is not set
 CONFIG_PPP=m
 CONFIG_PPP_MULTILINK=y
@@ -282,14 +270,12 @@
 # CONFIG_USB_HID is not set
 CONFIG_HID_APPLE=m
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
 CONFIG_SDIO_UART=m
 CONFIG_MMC_PXA=y
 CONFIG_MMC_SPI=y
@@ -311,8 +297,6 @@
 CONFIG_REISERFS_FS_POSIX_ACL=y
 CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=m
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
@@ -385,10 +369,8 @@
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_PROVE_LOCKING=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 87e020f..3a7938f 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/arm/configs/h5000_defconfig b/arch/arm/configs/h5000_defconfig
index 37903e3..e90d1df 100644
--- a/arch/arm/configs/h5000_defconfig
+++ b/arch/arm/configs/h5000_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
@@ -31,12 +30,10 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -53,14 +50,12 @@
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_SA1100=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 # CONFIG_PROC_PAGE_MONITOR is not set
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
@@ -70,7 +65,6 @@
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 # CONFIG_FTRACE is not set
 CONFIG_CRYPTO=y
 CONFIG_CRYPTO_HMAC=y
diff --git a/arch/arm/configs/hackkit_defconfig b/arch/arm/configs/hackkit_defconfig
index bed8047..742d18cd 100644
--- a/arch/arm/configs/hackkit_defconfig
+++ b/arch/arm/configs/hackkit_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
@@ -22,7 +21,6 @@
 CONFIG_MTD=y
 CONFIG_MTD_DEBUG=y
 CONFIG_MTD_DEBUG_VERBOSE=3
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
@@ -44,6 +42,5 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 # CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 7f479cd..f204017 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -43,7 +42,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
@@ -101,7 +99,6 @@
 CONFIG_NETFILTER_XT_MATCH_TIME=m
 CONFIG_NETFILTER_XT_MATCH_U32=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
@@ -110,7 +107,6 @@
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
 CONFIG_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
@@ -125,7 +121,6 @@
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -136,7 +131,6 @@
 CONFIG_IP6_NF_MATCH_MH=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
@@ -154,7 +148,6 @@
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_AFS_PARTS=y
 CONFIG_MTD_AR7_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -169,11 +162,8 @@
 CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_WLAN is not set
 CONFIG_PPP=m
 CONFIG_PPP_MULTILINK=y
@@ -258,14 +248,12 @@
 CONFIG_SND_PXA2XX_SOC=y
 # CONFIG_USB_HID is not set
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_PXA27X=y
 CONFIG_USB_ETH=m
 # CONFIG_USB_ETH_RNDIS is not set
 CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
 CONFIG_SDIO_UART=m
 CONFIG_MMC_PXA=y
 CONFIG_MMC_SPI=y
@@ -282,8 +270,6 @@
 CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=m
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
@@ -356,10 +342,8 @@
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_PROVE_LOCKING=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 5f013c9f..ca0f13ca 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -45,7 +45,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_NETFILTER=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index bb6fa56..e74de69 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -55,6 +55,9 @@
 CONFIG_KEXEC=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_ARM_IMX6Q_CPUFREQ=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
@@ -204,6 +207,7 @@
 CONFIG_SENSORS_GPIO_FAN=y
 CONFIG_SENSORS_IIO_HWMON=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
 CONFIG_CPU_THERMAL=y
 CONFIG_IMX_THERMAL=y
 CONFIG_WATCHDOG=y
@@ -341,6 +345,7 @@
 CONFIG_MXS_DMA=y
 CONFIG_STAGING=y
 CONFIG_IIO=y
+CONFIG_IMX7D_ADC=y
 CONFIG_VF610_ADC=y
 CONFIG_MPL3115=y
 CONFIG_PWM=y
@@ -378,6 +383,7 @@
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_UTF8=y
 CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_PROVE_LOCKING=y
diff --git a/arch/arm/configs/iop13xx_defconfig b/arch/arm/configs/iop13xx_defconfig
index 652b7bd..a73b6a3 100644
--- a/arch/arm/configs/iop13xx_defconfig
+++ b/arch/arm/configs/iop13xx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -30,7 +29,6 @@
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 # CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET6_XFRM_MODE_TUNNEL is not set
@@ -38,7 +36,6 @@
 # CONFIG_IPV6_SIT is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
 CONFIG_MTD_REDBOOT_PARTS_READONLY=y
@@ -79,7 +76,6 @@
 CONFIG_INTEL_IOP_ADMA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/iop32x_defconfig b/arch/arm/configs/iop32x_defconfig
index aa3af0a..f63362b 100644
--- a/arch/arm/configs/iop32x_defconfig
+++ b/arch/arm/configs/iop32x_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -26,7 +25,6 @@
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 # CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET6_XFRM_MODE_TUNNEL is not set
@@ -34,11 +32,9 @@
 # CONFIG_IPV6_SIT is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
 CONFIG_MTD_REDBOOT_PARTS_READONLY=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
@@ -91,7 +87,6 @@
 CONFIG_NET_DMA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_ECRYPT_FS=y
 CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/iop33x_defconfig b/arch/arm/configs/iop33x_defconfig
index 713faee..d22f832 100644
--- a/arch/arm/configs/iop33x_defconfig
+++ b/arch/arm/configs/iop33x_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -24,7 +23,6 @@
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 # CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET6_XFRM_MODE_TUNNEL is not set
@@ -32,11 +30,9 @@
 # CONFIG_IPV6_SIT is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
 CONFIG_MTD_REDBOOT_PARTS_READONLY=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -73,7 +69,6 @@
 CONFIG_NET_DMA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_CRAMFS=y
 CONFIG_NFS_FS=y
@@ -84,7 +79,6 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_DEBUG_USER=y
 CONFIG_DEBUG_LL=y
 CONFIG_DEBUG_LL_UART_8250=y
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index bb910d9..c8378da 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -50,7 +49,6 @@
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
 CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_NETFILTER=y
 CONFIG_IP_VS=m
@@ -63,13 +61,11 @@
 CONFIG_IP_VS_LBLCR=m
 CONFIG_IP_VS_DH=m
 CONFIG_IP_VS_SH=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
@@ -114,9 +110,7 @@
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
@@ -170,7 +164,6 @@
 CONFIG_WATCHDOG=y
 CONFIG_IXP4XX_WATCHDOG=y
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_UHCI_HCD=y
@@ -192,7 +185,6 @@
 CONFIG_EXT2_FS_POSIX_ACL=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
@@ -201,6 +193,5 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_DEBUG_LL_UART_8250=y
diff --git a/arch/arm/configs/jornada720_defconfig b/arch/arm/configs/jornada720_defconfig
index 9056284..65d37ad 100644
--- a/arch/arm/configs/jornada720_defconfig
+++ b/arch/arm/configs/jornada720_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
@@ -19,7 +18,6 @@
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_NETFILTER=y
 CONFIG_IRDA=m
@@ -35,8 +33,6 @@
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
 CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_NET_PCMCIA=y
 CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
 CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
@@ -58,12 +54,10 @@
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_SA1100=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
@@ -106,8 +100,6 @@
 CONFIG_NLS_KOI8_U=m
 CONFIG_NLS_UTF8=m
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/ks8695_defconfig b/arch/arm/configs/ks8695_defconfig
index 47c4883..b8b91d7 100644
--- a/arch/arm/configs/ks8695_defconfig
+++ b/arch/arm/configs/ks8695_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -28,14 +27,11 @@
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -46,8 +42,6 @@
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PRISM54=m
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
@@ -58,7 +52,6 @@
 # CONFIG_HWMON is not set
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_JFFS2_SUMMARY=y
diff --git a/arch/arm/configs/lart_defconfig b/arch/arm/configs/lart_defconfig
index 8fc6fd0..b6ddb98 100644
--- a/arch/arm/configs/lart_defconfig
+++ b/arch/arm/configs/lart_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
@@ -31,8 +30,6 @@
 CONFIG_MTD=y
 CONFIG_MTD_DEBUG=y
 CONFIG_MTD_DEBUG_VERBOSE=1
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_LART=y
 CONFIG_BLK_DEV_RAM=y
diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig
index 2de1bf0..23df251 100644
--- a/arch/arm/configs/lpc18xx_defconfig
+++ b/arch/arm/configs/lpc18xx_defconfig
@@ -44,7 +44,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
diff --git a/arch/arm/configs/lpd270_defconfig b/arch/arm/configs/lpd270_defconfig
index a9dd1e9..3a4d0e6 100644
--- a/arch/arm/configs/lpd270_defconfig
+++ b/arch/arm/configs/lpd270_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SLAB=y
@@ -21,9 +20,7 @@
 # CONFIG_INET6_XFRM_MODE_BEET is not set
 # CONFIG_IPV6_SIT is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -50,7 +47,6 @@
 # CONFIG_SND_SUPPORT_OLD_API is not set
 CONFIG_SND_PXA2XX_AC97=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=y
 CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
@@ -60,5 +56,4 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/lubbock_defconfig b/arch/arm/configs/lubbock_defconfig
index c4ba274..4ce2da2 100644
--- a/arch/arm/configs/lubbock_defconfig
+++ b/arch/arm/configs/lubbock_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MODULES=y
@@ -20,9 +19,7 @@
 CONFIG_IP_PNP_BOOTP=y
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -52,5 +49,4 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index a5b4920..ec5674c 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
@@ -35,7 +34,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_IRDA=m
@@ -48,8 +46,6 @@
 CONFIG_IRTTY_SIR=m
 CONFIG_PXA_FICP=m
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -60,17 +56,12 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_PHYSMAP=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PPP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_DEFLATE=m
@@ -111,7 +102,6 @@
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 # CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_BACKLIGHT_PWM=y
-CONFIG_DISPLAY_SUPPORT=y
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
@@ -125,9 +115,7 @@
 # CONFIG_SND_USB is not set
 CONFIG_SND_SOC=m
 CONFIG_SND_PXA2XX_SOC=m
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_MON=m
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
@@ -152,7 +140,6 @@
 CONFIG_RTC_DEBUG=y
 CONFIG_RTC_DRV_PXA=y
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_TMPFS=y
@@ -172,9 +159,7 @@
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/mainstone_defconfig b/arch/arm/configs/mainstone_defconfig
index e8d26b8..26499b6 100644
--- a/arch/arm/configs/mainstone_defconfig
+++ b/arch/arm/configs/mainstone_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MODULES=y
@@ -18,9 +17,7 @@
 CONFIG_IP_PNP_BOOTP=y
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -50,5 +47,4 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig
index 0b02e4f4..cf7dcb2 100644
--- a/arch/arm/configs/mini2440_defconfig
+++ b/arch/arm/configs/mini2440_defconfig
@@ -1,10 +1,7 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_RELAY=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_MODULES=y
@@ -13,19 +10,22 @@
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
 CONFIG_ARCH_S3C24XX=y
 # CONFIG_CPU_S3C2410 is not set
 CONFIG_CPU_S3C2440=y
+CONFIG_MACH_MINI2440=y
 CONFIG_S3C_ADC=y
 CONFIG_S3C24XX_PWM=y
-CONFIG_MACH_MINI2440=y
 CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
 CONFIG_KEXEC=y
 CONFIG_CPU_IDLE=y
-CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
 CONFIG_APM_EMULATION=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -49,7 +49,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 # CONFIG_IPV6 is not set
 CONFIG_NETFILTER=y
@@ -58,8 +57,6 @@
 CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_NET_PKTGEN=m
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -69,7 +66,6 @@
 CONFIG_BT_HCIBTUSB=m
 CONFIG_BT_HCIBTSDIO=m
 CONFIG_BT_HCIUART=m
-CONFIG_BT_HCIUART_H4=y
 CONFIG_BT_HCIUART_BCSP=y
 CONFIG_BT_HCIUART_LL=y
 CONFIG_BT_HCIBCM203X=m
@@ -77,7 +73,6 @@
 CONFIG_BT_HCIBFUSB=m
 CONFIG_BT_HCIVHCI=m
 CONFIG_CFG80211=m
-CONFIG_CFG80211_REG_DEBUG=y
 CONFIG_MAC80211=m
 CONFIG_MAC80211_MESH=y
 CONFIG_MAC80211_LEDS=y
@@ -85,10 +80,7 @@
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_FTL=y
 CONFIG_NFTL=y
@@ -111,6 +103,7 @@
 CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_SENSORS_TSL2550=m
+CONFIG_EEPROM_AT24=y
 CONFIG_SCSI=m
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=m
@@ -118,10 +111,15 @@
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
 CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
 CONFIG_DM9000=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
 CONFIG_HOSTAP=m
 CONFIG_HOSTAP_FIRMWARE=y
 CONFIG_HOSTAP_FIRMWARE_NVRAM=y
@@ -129,14 +127,6 @@
 CONFIG_LIBERTAS_SDIO=m
 CONFIG_ZD1211RW=m
 CONFIG_ZD1211RW_DEBUG=y
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_MPPE=m
 CONFIG_INPUT_FF_MEMLESS=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_INPUT_EVBUG=m
@@ -144,10 +134,10 @@
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_SERIO_RAW=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=128
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=128
+CONFIG_SERIAL_DEV_BUS=m
 CONFIG_IPMI_HANDLER=m
 CONFIG_IPMI_DEVICE_INTERFACE=m
 CONFIG_IPMI_SI=m
@@ -158,7 +148,6 @@
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_S3C2410=y
 CONFIG_I2C_SIMTEC=y
-CONFIG_EEPROM_AT24=y
 CONFIG_SPI=y
 CONFIG_SPI_S3C24XX=y
 CONFIG_SPI_SPIDEV=y
@@ -167,7 +156,6 @@
 CONFIG_THERMAL=m
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
@@ -179,14 +167,9 @@
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 # CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_BACKLIGHT_PWM=y
-CONFIG_DISPLAY_SUPPORT=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_MINI_4x6=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
@@ -205,10 +188,7 @@
 CONFIG_SND_USB_CAIAQ=m
 CONFIG_SND_USB_CAIAQ_INPUT=y
 CONFIG_SND_SOC=y
-CONFIG_SND_S3C24XX_SOC=y
 CONFIG_HIDRAW=y
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
 CONFIG_HID_GYRATION=y
 CONFIG_HID_NTRIG=y
 CONFIG_HID_PANTHERLORD=y
@@ -217,8 +197,9 @@
 CONFIG_HID_SONY=y
 CONFIG_HID_SUNPLUS=y
 CONFIG_HID_TOPSEED=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_ACM=m
 CONFIG_USB_WDM=m
@@ -230,13 +211,12 @@
 CONFIG_USB_STORAGE_SDDR55=m
 CONFIG_USB_STORAGE_JUMPSHOT=m
 CONFIG_USB_STORAGE_ALAUDA=m
-CONFIG_USB_LIBUSUAL=y
 CONFIG_USB_SERIAL=m
 CONFIG_USB_SERIAL_CP210X=m
 CONFIG_USB_SERIAL_FTDI_SIO=m
 CONFIG_USB_SERIAL_SPCP8X5=m
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_S3C2410=y
+CONFIG_USB_S3C2410=y
 CONFIG_USB_ZERO=m
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
@@ -248,7 +228,6 @@
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SPI=y
 CONFIG_MMC_S3C=y
-CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_S3C24XX=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_TRIGGER_TIMER=y
@@ -259,6 +238,7 @@
 CONFIG_RTC_INTF_DEV_UIE_EMUL=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_DMADEVICES=y
+CONFIG_S3C24XX_DMAC=y
 CONFIG_EXT2_FS=m
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
@@ -266,30 +246,19 @@
 CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CONFIGFS_FS=m
 CONFIG_JFFS2_FS=y
 CONFIG_CRAMFS=y
 CONFIG_ROMFS_FS=y
 CONFIG_ROMFS_BACKED_BY_BOTH=y
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
 CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_LDM_PARTITION=y
-CONFIG_EFI_PARTITION=y
 CONFIG_NLS_DEFAULT="cp437"
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_737=m
@@ -329,24 +298,17 @@
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
 CONFIG_NLS_UTF8=m
+CONFIG_DEBUG_INFO=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_STRIP_ASM_SYMS=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_DEBUG_USER=y
-CONFIG_KEYS=y
-CONFIG_CRYPTO_FIPS=y
-CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_AUTHENC=m
 CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_ECB=y
 CONFIG_CRYPTO_LRW=m
@@ -360,11 +322,9 @@
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_ARC4=y
 CONFIG_CRYPTO_BLOWFISH=m
@@ -379,7 +339,8 @@
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_T10DIF=y
 CONFIG_LIBCRC32C=m
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_MINI_4x6=y
diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig
index f1cb95e..1eeee7f 100644
--- a/arch/arm/configs/mmp2_defconfig
+++ b/arch/arm/configs/mmp2_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
@@ -23,7 +22,6 @@
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
@@ -34,12 +32,9 @@
 CONFIG_MTD_ONENAND=y
 CONFIG_MTD_ONENAND_GENERIC=y
 # CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -60,13 +55,11 @@
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_MAX8925=y
 # CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MAX8925=y
 CONFIG_MMC=y
 # CONFIG_DNOTIFY is not set
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_EXT2_FS=y
@@ -87,12 +80,10 @@
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_DYNAMIC_DEBUG is not set
 CONFIG_DEBUG_USER=y
 CONFIG_DEBUG_LL=y
 CONFIG_DEBUG_MMP_UART3=y
 CONFIG_EARLY_PRINTK=y
-CONFIG_DEBUG_ERRORS=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig
index b2ddd53..2da0d9e 100644
--- a/arch/arm/configs/moxart_defconfig
+++ b/arch/arm/configs/moxart_defconfig
@@ -36,7 +36,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig
index 19d119f..0bcdec7 100644
--- a/arch/arm/configs/mps2_defconfig
+++ b/arch/arm/configs/mps2_defconfig
@@ -35,7 +35,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2685e03..4d19c1b 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -202,6 +202,8 @@
 CONFIG_MTD_M25P80=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_OMAP_BCH=y
 CONFIG_MTD_NAND_ATMEL=y
 CONFIG_MTD_NAND_BRCMNAND=y
 CONFIG_MTD_NAND_VF610_NFC=y
@@ -229,6 +231,7 @@
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
 CONFIG_AHCI_BRCM=y
+CONFIG_AHCI_DM816=y
 CONFIG_AHCI_ST=y
 CONFIG_AHCI_IMX=y
 CONFIG_AHCI_SUNXI=y
@@ -935,7 +938,13 @@
 CONFIG_KEYSTONE_IRQ=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_ST=y
+CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_DEV_MARVELL_CESA=m
+CONFIG_CRYPTO_DEV_EXYNOS_RNG=m
 CONFIG_CRYPTO_DEV_S5P=m
 CONFIG_CRYPTO_DEV_SUN4I_SS=m
 CONFIG_CRYPTO_DEV_ROCKCHIP=m
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index a0345e1..752e2e7 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
@@ -40,9 +39,7 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -66,7 +63,6 @@
 CONFIG_MII=y
 CONFIG_NET_PCI=y
 CONFIG_MV643XX_ETH=y
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
@@ -106,7 +102,6 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
 CONFIG_EXT4_FS=m
-CONFIG_INOTIFY=y
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
 CONFIG_UDF_FS=m
@@ -130,10 +125,7 @@
 CONFIG_SCHEDSTATS=y
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_CRYPTO_CBC=m
 CONFIG_CRYPTO_ECB=m
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index f1a0e25..6955370 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -135,6 +135,8 @@
 CONFIG_MV_XOR=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_MEMORY=y
+CONFIG_PWM=y
+CONFIG_SENSORS_PWM_FAN=y
 CONFIG_EXT4_FS=y
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 6e0f751..e5822ab 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -8,6 +8,7 @@
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
 # CONFIG_UTS_NS is not set
 # CONFIG_IPC_NS is not set
 # CONFIG_PID_NS is not set
@@ -38,7 +39,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_CAN=m
@@ -75,6 +75,7 @@
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_MXS_LRADC=y
 CONFIG_TOUCHSCREEN_TSC2007=m
 # CONFIG_SERIO is not set
 CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
@@ -95,6 +96,7 @@
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_STMP3XXX_RTC_WATCHDOG=y
+CONFIG_MFD_MXS_LRADC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_FB=y
@@ -136,10 +138,9 @@
 CONFIG_RTC_DRV_STMP=y
 CONFIG_DMADEVICES=y
 CONFIG_MXS_DMA=y
-CONFIG_STAGING=y
-CONFIG_MXS_LRADC=y
 CONFIG_IIO=y
 CONFIG_IIO_SYSFS_TRIGGER=y
+CONFIG_MXS_LRADC_ADC=y
 CONFIG_PWM=y
 CONFIG_PWM_MXS=y
 CONFIG_NVMEM=y
diff --git a/arch/arm/configs/neponset_defconfig b/arch/arm/configs/neponset_defconfig
index 460dca4..018a109 100644
--- a/arch/arm/configs/neponset_defconfig
+++ b/arch/arm/configs/neponset_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
@@ -25,8 +24,6 @@
 CONFIG_INET=y
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -84,5 +81,4 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/netwinder_defconfig b/arch/arm/configs/netwinder_defconfig
index f1395bb..2e3b20e 100644
--- a/arch/arm/configs/netwinder_defconfig
+++ b/arch/arm/configs/netwinder_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_ARCH_FOOTBRIDGE=y
@@ -21,7 +20,6 @@
 CONFIG_IP_PNP_RARP=y
 # CONFIG_IPV6 is not set
 CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_PARPORT=y
 CONFIG_PARPORT_PC=y
@@ -65,7 +63,6 @@
 CONFIG_SOUND_YM3812=y
 CONFIG_SOUND_WAVEARTIST=y
 CONFIG_EXT2_FS=y
-CONFIG_AUTOFS_FS=y
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_TMPFS=y
diff --git a/arch/arm/configs/netx_defconfig b/arch/arm/configs/netx_defconfig
index 9c0ad79..cc5c5f9 100644
--- a/arch/arm/configs/netx_defconfig
+++ b/arch/arm/configs/netx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_BSD_PROCESS_ACCT=y
@@ -34,12 +33,9 @@
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=m
 CONFIG_NET_PKTGEN=m
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
@@ -61,7 +57,6 @@
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_RTC_CLASS=y
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
@@ -70,7 +65,6 @@
 CONFIG_ROOT_NFS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 7d2ad30..0ac44ac 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -32,7 +32,6 @@
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_NET_IPIP=y
 CONFIG_IP_MROUTE=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_BT=m
 CONFIG_BT_RFCOMM=m
diff --git a/arch/arm/configs/nuc910_defconfig b/arch/arm/configs/nuc910_defconfig
index 10180cf..a726536 100644
--- a/arch/arm/configs/nuc910_defconfig
+++ b/arch/arm/configs/nuc910_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
@@ -16,16 +15,12 @@
 CONFIG_FPE_NWFPE=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=16384
-# CONFIG_MISC_DEVICES is not set
 CONFIG_SCSI=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
@@ -42,7 +37,6 @@
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
 CONFIG_USB_MON=y
 CONFIG_USB_STORAGE=y
@@ -56,5 +50,4 @@
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/nuc950_defconfig b/arch/arm/configs/nuc950_defconfig
index 27aa873..614a0a2 100644
--- a/arch/arm/configs/nuc950_defconfig
+++ b/arch/arm/configs/nuc950_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
@@ -22,16 +21,12 @@
 CONFIG_BINFMT_MISC=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=16384
-# CONFIG_MISC_DEVICES is not set
 CONFIG_SCSI=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
@@ -59,7 +54,6 @@
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
 CONFIG_USB_MON=y
 CONFIG_USB_STORAGE=y
@@ -73,4 +67,3 @@
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/nuc960_defconfig b/arch/arm/configs/nuc960_defconfig
index 56fd7ad..b84bbd2 100644
--- a/arch/arm/configs/nuc960_defconfig
+++ b/arch/arm/configs/nuc960_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
@@ -22,16 +21,12 @@
 CONFIG_BINFMT_MISC=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=16384
-# CONFIG_MISC_DEVICES is not set
 CONFIG_SCSI=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
@@ -48,7 +43,6 @@
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
 CONFIG_USB_MON=y
 CONFIG_USB_STORAGE=y
@@ -62,5 +56,4 @@
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_DEBUG_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_CRC32 is not set
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 6ffc984..72f4bc8 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -72,7 +72,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 CONFIG_IPV6=y
 CONFIG_NETFILTER=y
@@ -252,7 +251,6 @@
 CONFIG_CRYPTO_ECB=y
 CONFIG_CRYPTO_PCBC=y
 CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_ZLIB=y
 CONFIG_CRYPTO_LZO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 27a70a7..e39ee28 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -64,7 +63,6 @@
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
diff --git a/arch/arm/configs/palmz72_defconfig b/arch/arm/configs/palmz72_defconfig
index 83c135e1..e0a6142 100644
--- a/arch/arm/configs/palmz72_defconfig
+++ b/arch/arm/configs/palmz72_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -30,11 +29,9 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_BLK_DEV_LOOP=y
-# CONFIG_MISC_DEVICES is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
@@ -56,12 +53,10 @@
 # CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
-CONFIG_DISPLAY_SUPPORT=y
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FONTS=y
 CONFIG_FONT_8x8=y
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
 CONFIG_MMC_DEBUG=y
@@ -80,6 +75,5 @@
 CONFIG_NLS_DEFAULT="utf8"
 CONFIG_NLS_CODEPAGE_866=y
 CONFIG_NLS_UTF8=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_DEBUG_USER=y
 CONFIG_CRC_T10DIF=y
diff --git a/arch/arm/configs/pcm027_defconfig b/arch/arm/configs/pcm027_defconfig
index b5624e3..9c88a19 100644
--- a/arch/arm/configs/pcm027_defconfig
+++ b/arch/arm/configs/pcm027_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -34,28 +33,22 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_PHYSMAP=y
 # CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -74,7 +67,6 @@
 CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
 CONFIG_SND_PXA2XX_AC97=y
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_STORAGE=y
diff --git a/arch/arm/configs/pleb_defconfig b/arch/arm/configs/pleb_defconfig
index cb08cc5..f0541b0 100644
--- a/arch/arm/configs/pleb_defconfig
+++ b/arch/arm/configs/pleb_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -24,11 +23,9 @@
 CONFIG_SYN_COOKIES=y
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
diff --git a/arch/arm/configs/pxa168_defconfig b/arch/arm/configs/pxa168_defconfig
index 74d7e01..e7c7b91 100644
--- a/arch/arm/configs/pxa168_defconfig
+++ b/arch/arm/configs/pxa168_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
@@ -24,18 +23,14 @@
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -46,9 +41,7 @@
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_CRAMFS=y
@@ -62,9 +55,7 @@
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/pxa255-idp_defconfig b/arch/arm/configs/pxa255-idp_defconfig
index 088627a..4a383af 100644
--- a/arch/arm/configs/pxa255-idp_defconfig
+++ b/arch/arm/configs/pxa255-idp_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MODULES=y
@@ -18,8 +17,6 @@
 CONFIG_IP_PNP_DHCP=y
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -54,5 +51,4 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/pxa3xx_defconfig b/arch/arm/configs/pxa3xx_defconfig
index 5f337d7..bfea687 100644
--- a/arch/arm/configs/pxa3xx_defconfig
+++ b/arch/arm/configs/pxa3xx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_SYSFS_DEPRECATED_V2=y
@@ -25,15 +24,12 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_PXA3xx=y
@@ -43,12 +39,9 @@
 CONFIG_MTD_ONENAND_GENERIC=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_GPIO=y
@@ -92,7 +85,6 @@
 CONFIG_FONTS=y
 CONFIG_FONT_6x11=y
 CONFIG_LOGO=y
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
 CONFIG_MMC_PXA=y
@@ -125,7 +117,6 @@
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_SPINLOCK_SLEEP=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_USER=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/pxa910_defconfig b/arch/arm/configs/pxa910_defconfig
index 3bb7771..3aff71e 100644
--- a/arch/arm/configs/pxa910_defconfig
+++ b/arch/arm/configs/pxa910_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
@@ -24,18 +23,14 @@
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_BLK_DEV is not set
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -54,9 +49,7 @@
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_CRAMFS=y
@@ -70,9 +63,7 @@
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_DEBUG_MMP_UART2=y
 CONFIG_EARLY_PRINTK=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 1318f61..64e3a2a 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -156,7 +156,6 @@
 CONFIG_BT_MRVL=m
 CONFIG_BT_MRVL_SDIO=m
 CONFIG_CFG80211=m
-CONFIG_CFG80211_REG_DEBUG=y
 CONFIG_MAC80211=m
 CONFIG_RFKILL=y
 CONFIG_RFKILL_INPUT=y
@@ -592,7 +591,6 @@
 CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
-CONFIG_USB_LED=m
 CONFIG_USB_CYTHERM=m
 CONFIG_USB_IDMOUSE=m
 CONFIG_USB_GPIO_VBUS=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 07666a7..b02039c 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -52,7 +52,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
@@ -113,6 +112,7 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_APQ8064=y
 CONFIG_PINCTRL_APQ8084=y
+CONFIG_PINCTRL_IPQ4019=y
 CONFIG_PINCTRL_IPQ8064=y
 CONFIG_PINCTRL_MSM8660=y
 CONFIG_PINCTRL_MSM8960=y
@@ -173,6 +173,7 @@
 CONFIG_LEDS_PM8058=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RPMSG_QCOM_SMD=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PM8XXX=y
 CONFIG_DMADEVICES=y
@@ -182,6 +183,7 @@
 CONFIG_QCOM_CLK_RPM=y
 CONFIG_QCOM_CLK_SMD_RPM=y
 CONFIG_APQ_MMCC_8084=y
+CONFIG_IPQ_GCC_4019=y
 CONFIG_IPQ_LCC_806X=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_LCC_8960=y
diff --git a/arch/arm/configs/raumfeld_defconfig b/arch/arm/configs/raumfeld_defconfig
index 3d833ae..e3dc80e 100644
--- a/arch/arm/configs/raumfeld_defconfig
+++ b/arch/arm/configs/raumfeld_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 # CONFIG_SWAP is not set
 CONFIG_MODULES=y
@@ -24,16 +23,11 @@
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_PNP=y
 CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 CONFIG_CFG80211=y
-CONFIG_CFG80211_REG_DEBUG=y
 CONFIG_MAC80211=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_NFTL=y
 CONFIG_NFTL_RW=y
@@ -50,8 +44,6 @@
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMSC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_LIBERTAS=y
 CONFIG_LIBERTAS_SDIO=m
 CONFIG_USB_USBNET=y
@@ -146,7 +138,6 @@
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XIP=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_FSCACHE=y
 CONFIG_FSCACHE_STATS=y
 CONFIG_CACHEFILES=y
@@ -199,9 +190,7 @@
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig
index 9e77dc7..2a6d69d 100644
--- a/arch/arm/configs/realview_defconfig
+++ b/arch/arm/configs/realview_defconfig
@@ -35,7 +35,6 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig
index 8963179..3b82b64 100644
--- a/arch/arm/configs/rpc_defconfig
+++ b/arch/arm/configs/rpc_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -51,8 +50,6 @@
 CONFIG_ARM_ETHER1=y
 CONFIG_ARM_ETHER3=y
 CONFIG_ARM_ETHERH=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PPP=m
 CONFIG_PPPOE=m
 CONFIG_INPUT_EVDEV=y
@@ -81,13 +78,11 @@
 CONFIG_SOUND_PRIME=m
 CONFIG_SOUND_OSS=m
 CONFIG_SOUND_VIDC=m
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_PCF8583=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
@@ -129,6 +124,5 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_DEBUG_LL_UART_8250=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 1e6c48d..2afb359 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=m
 CONFIG_IKCONFIG_PROC=y
@@ -12,9 +11,6 @@
 CONFIG_BSD_DISKLABEL=y
 CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_ARCH_S3C24XX=y
-CONFIG_S3C_BOOT_ERROR_RESET=y
-CONFIG_S3C_ADC=y
-CONFIG_S3C24XX_PWM=y
 CONFIG_CPU_S3C2412=y
 CONFIG_CPU_S3C2416=y
 CONFIG_CPU_S3C2440=y
@@ -43,15 +39,13 @@
 CONFIG_ARCH_S3C2440=y
 CONFIG_MACH_NEO1973_GTA02=y
 CONFIG_MACH_RX1950=y
-CONFIG_SMDK2440_CPU2442=y
 CONFIG_MACH_SMDK2443=y
-# CONFIG_ARM_THUMB is not set
+CONFIG_S3C_ADC=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0"
 CONFIG_FPE_NWFPE=y
 CONFIG_FPE_NWFPE_XP=y
-CONFIG_BINFMT_AOUT=y
 CONFIG_APM_EMULATION=m
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -67,7 +61,6 @@
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
-# CONFIG_INET_LRO is not set
 CONFIG_TCP_CONG_ADVANCED=y
 CONFIG_TCP_CONG_HSTCP=m
 CONFIG_TCP_CONG_HYBLA=m
@@ -86,9 +79,6 @@
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -102,6 +92,7 @@
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
 CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
 CONFIG_NETFILTER_XT_TARGET_MARK=m
 CONFIG_NETFILTER_XT_TARGET_NFLOG=m
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
@@ -139,16 +130,13 @@
 CONFIG_NETFILTER_XT_MATCH_U32=m
 CONFIG_IP_VS=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -161,7 +149,6 @@
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -172,7 +159,6 @@
 CONFIG_IP6_NF_MATCH_MH=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
@@ -185,7 +171,6 @@
 CONFIG_BT_BNEP_PROTO_FILTER=y
 CONFIG_BT_HIDP=m
 CONFIG_BT_HCIUART=m
-CONFIG_BT_HCIUART_H4=y
 CONFIG_BT_HCIUART_BCSP=y
 CONFIG_BT_HCIUART_LL=y
 CONFIG_BT_HCIBCM203X=m
@@ -201,7 +186,6 @@
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -216,9 +200,9 @@
 CONFIG_PARPORT_1284=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_UB=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_ATA_OVER_ETH=m
+CONFIG_EEPROM_AT24=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -279,13 +263,13 @@
 CONFIG_SERIAL_8250_SHARE_IRQ=y
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=m
 CONFIG_PRINTER=y
 CONFIG_PPDEV=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C_CHARDEV=m
 CONFIG_I2C_S3C2410=y
 CONFIG_I2C_SIMTEC=y
-CONFIG_EEPROM_AT24=y
 CONFIG_SPI=y
 CONFIG_SPI_GPIO=m
 CONFIG_SPI_S3C24XX=m
@@ -297,6 +281,7 @@
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
 CONFIG_MFD_SM501=y
+CONFIG_TPS65010=y
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
@@ -336,7 +321,6 @@
 CONFIG_USB_STORAGE_ONETOUCH=m
 CONFIG_USB_STORAGE_KARMA=m
 CONFIG_USB_STORAGE_CYPRESS_ATACB=m
-CONFIG_USB_LIBUSUAL=y
 CONFIG_USB_MDC800=m
 CONFIG_USB_MICROTEK=m
 CONFIG_USB_USS720=m
@@ -353,7 +337,6 @@
 CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
-CONFIG_USB_LED=m
 CONFIG_USB_CYPRESS_CY7C63=m
 CONFIG_USB_CYTHERM=m
 CONFIG_USB_IDMOUSE=m
@@ -382,14 +365,14 @@
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_S3C=y
+CONFIG_DMADEVICES=y
+CONFIG_S3C24XX_DMAC=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
 CONFIG_EXT2_FS_SECURITY=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT4_FS=m
-CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_ISO9660_FS=y
@@ -407,7 +390,6 @@
 CONFIG_SQUASHFS=m
 CONFIG_ROMFS_FS=y
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
@@ -452,11 +434,9 @@
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
 CONFIG_NLS_UTF8=m
+CONFIG_DEBUG_INFO=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_DEBUG_USER=y
 CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig
index e0f6693..507d7ad 100644
--- a/arch/arm/configs/s3c6400_defconfig
+++ b/arch/arm/configs/s3c6400_defconfig
@@ -1,4 +1,4 @@
-CONFIG_EXPERIMENTAL=y
+CONFIG_SYSFS_DEPRECATED=y
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
@@ -8,7 +8,6 @@
 CONFIG_ARCH_MULTI_V6=y
 # CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_S3C64XX=y
-CONFIG_S3C_BOOT_ERROR_RESET=y
 CONFIG_MACH_SMDK6400=y
 CONFIG_MACH_ANW6410=y
 CONFIG_MACH_MINI6410=y
@@ -19,11 +18,9 @@
 CONFIG_MACH_SMARTQ5=y
 CONFIG_MACH_SMARTQ7=y
 CONFIG_MACH_WLF_CRAGG_6410=y
-CONFIG_CPU_32v6K=y
 CONFIG_AEABI=y
 CONFIG_CMDLINE="console=ttySAC0,115200 root=/dev/ram init=/linuxrc initrd=0x51000000,6M ramdisk_size=6144"
 CONFIG_VFP=y
-CONFIG_PM=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
 CONFIG_MTD_NAND=y
@@ -48,14 +45,11 @@
 CONFIG_LCD_LTV350QV=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_SOUND=y
 CONFIG_SND=m
 CONFIG_SND_MIXER_OSS=m
 CONFIG_SND_PCM_OSS=m
 CONFIG_SND_SOC=m
-CONFIG_SND_S3C24XX_SOC=m
-CONFIG_SND_SOC_SMDK_WM9713=m
 CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_OHCI_HCD=y
@@ -68,30 +62,26 @@
 CONFIG_USB_SERIAL_PL2303=m
 CONFIG_MMC=y
 CONFIG_MMC_DEBUG=y
-CONFIG_MMC_UNSAFE_RESUME=y
 CONFIG_SDIO_UART=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_S3C=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_S3C=y
+CONFIG_PWM=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
-CONFIG_INOTIFY=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_CRAMFS=y
 CONFIG_ROMFS_FS=y
+CONFIG_DEBUG_INFO=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig
index c51f0f0..09b5a73 100644
--- a/arch/arm/configs/s5pv210_defconfig
+++ b/arch/arm/configs/s5pv210_defconfig
@@ -1,16 +1,16 @@
-CONFIG_EXPERIMENTAL=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SYSFS_DEPRECATED=y
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_ARCH_S5PV210=y
-CONFIG_S3C_LOWLEVEL_UART_PORT=1
-CONFIG_S3C_DEV_FB=y
-CONFIG_S5PV210_SETUP_FB_24BPP=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_VMSPLIT_2G=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
@@ -21,7 +21,6 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -34,36 +33,27 @@
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_HW_RANDOM=y
 # CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_CRAMFS=y
 CONFIG_ROMFS_FS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_INFO=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_S3C_UART1=y
 CONFIG_EARLY_PRINTK=y
-CONFIG_DEBUG_S3C_UART=1
 CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index 777c9e9..6529cb4 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -3,6 +3,8 @@
 CONFIG_SYSVIPC=y
 CONFIG_FHANDLE=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CGROUPS=y
 CONFIG_BLK_DEV_INITRD=y
@@ -44,7 +46,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET6_XFRM_MODE_TUNNEL is not set
@@ -52,6 +53,7 @@
 CONFIG_IPV6_SIT_6RD=y
 CONFIG_CAN=y
 CONFIG_CAN_AT91=y
+CONFIG_CAN_M_CAN=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_MAC80211_LEDS=y
diff --git a/arch/arm/configs/shannon_defconfig b/arch/arm/configs/shannon_defconfig
index e523956..de33abd 100644
--- a/arch/arm/configs/shannon_defconfig
+++ b/arch/arm/configs/shannon_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
@@ -17,8 +16,6 @@
 CONFIG_INET=y
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/arm/configs/simpad_defconfig b/arch/arm/configs/simpad_defconfig
index d335815..28d99d8 100644
--- a/arch/arm/configs/simpad_defconfig
+++ b/arch/arm/configs/simpad_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION="oe1"
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -33,18 +32,13 @@
 CONFIG_IRTTY_SIR=m
 CONFIG_SA1100_FIR=m
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
 CONFIG_BT_BNEP_MC_FILTER=y
 CONFIG_BT_BNEP_PROTO_FILTER=y
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -107,5 +101,4 @@
 CONFIG_NLS_ISO8859_15=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig
index d271b26..7b36eeb 100644
--- a/arch/arm/configs/spear13xx_defconfig
+++ b/arch/arm/configs/spear13xx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BLK_DEV_INITRD=y
@@ -32,7 +31,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
 CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_FSMC=y
@@ -73,9 +71,7 @@
 CONFIG_GPIO_PL061=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_STORAGE=y
diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig
index 7ff23a0..2c5e8df 100644
--- a/arch/arm/configs/spear3xx_defconfig
+++ b/arch/arm/configs/spear3xx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BLK_DEV_INITRD=y
@@ -17,7 +16,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
 CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_FSMC=y
@@ -56,9 +54,7 @@
 CONFIG_ARM_SP805_WATCHDOG=y
 CONFIG_FB=y
 CONFIG_FB_ARMCLCD=y
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_MMC=y
diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig
index 7822980..124c244 100644
--- a/arch/arm/configs/spear6xx_defconfig
+++ b/arch/arm/configs/spear6xx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BLK_DEV_INITRD=y
@@ -14,7 +13,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
 CONFIG_MTD_OF_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_FSMC=y
@@ -49,7 +47,6 @@
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_ARM_SP805_WATCHDOG=y
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_OHCI_HCD=y
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index d8c5293..9ea82c1 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -30,26 +29,22 @@
 CONFIG_UNIX=y
 CONFIG_INET=y
 CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_TUNNEL=m
 CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_RAW=m
 CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_IP6_NF_QUEUE=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_EUI64=m
 CONFIG_IP6_NF_MATCH_FRAG=m
@@ -66,8 +61,6 @@
 CONFIG_IRCOMM=m
 CONFIG_PXA_FICP=m
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -87,9 +80,7 @@
 CONFIG_BT_HCIVHCI=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_ROM=y
 CONFIG_MTD_COMPLEX_MAPPINGS=y
@@ -203,7 +194,6 @@
 CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
-CONFIG_USB_LED=m
 CONFIG_USB_CYTHERM=m
 CONFIG_USB_IDMOUSE=m
 CONFIG_USB_GADGET=m
@@ -221,7 +211,6 @@
 CONFIG_EXT2_FS_SECURITY=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
@@ -243,9 +232,7 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_TEST=m
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index a097538..90e5c46 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -52,7 +52,10 @@
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_STM32F4=y
 # CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
 CONFIG_REGULATOR=y
+CONFIG_GPIO_STMPE=y
+CONFIG_MFD_STMPE=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 # CONFIG_USB_SUPPORT is not set
 CONFIG_NEW_LEDS=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 5cd5dd70..0ec1d1e 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -27,7 +27,6 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 # CONFIG_IPV6 is not set
 CONFIG_CAN=y
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig
index 7209a2c..d0a9e5d 100644
--- a/arch/arm/configs/tct_hammer_defconfig
+++ b/arch/arm/configs/tct_hammer_defconfig
@@ -1,13 +1,12 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSFS_DEPRECATED=y
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
-# CONFIG_BUGVERBOSE is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_SHMEM is not set
 CONFIG_SLOB=y
@@ -26,8 +25,6 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -36,22 +33,17 @@
 CONFIG_MTD_PHYSMAP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=10240
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT_CONSOLE is not set
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_MON=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_S3C2410=y
+CONFIG_USB_S3C2410=y
 CONFIG_USB_ETH=m
 CONFIG_EXT2_FS=y
 # CONFIG_DNOTIFY is not set
@@ -62,8 +54,5 @@
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
-CONFIG_DEBUG_ERRORS=y
 CONFIG_DEBUG_LL=y
 CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index 492f7f3..2b5a224 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_BSD_PROCESS_ACCT=y
@@ -38,7 +37,6 @@
 CONFIG_IP_PNP_BOOTP=y
 # CONFIG_IPV6 is not set
 CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=m
 CONFIG_VLAN_8021Q=m
 CONFIG_IRDA=m
 CONFIG_IRLAN=m
@@ -49,8 +47,6 @@
 CONFIG_IRDA_FAST_RR=y
 CONFIG_IRTTY_SIR=m
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -60,11 +56,9 @@
 CONFIG_CFG80211=y
 CONFIG_CONNECTOR=y
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
 CONFIG_MTD_REDBOOT_PARTS_READONLY=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -163,7 +157,6 @@
 CONFIG_SND_USB_AUDIO=m
 # CONFIG_USB_HID is not set
 CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_STORAGE=m
 CONFIG_USB_SERIAL=m
@@ -183,7 +176,6 @@
 CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
-CONFIG_INOTIFY=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index aaa95ab..36d7740 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -49,7 +49,6 @@
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_ARMMMCI=y
 CONFIG_RTC_CLASS=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index 37fe607..0fa0ed5 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -46,7 +46,6 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
 CONFIG_NET_9P=y
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 0d717a5..44d4fa5 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=13
@@ -34,10 +33,8 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -49,7 +46,6 @@
 CONFIG_MTD=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=0
-CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -72,8 +68,6 @@
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_USB_PEGASUS=m
 CONFIG_USB_USBNET=m
 # CONFIG_USB_NET_CDC_SUBSET is not set
@@ -148,7 +142,6 @@
 CONFIG_EXT3_FS=m
 # CONFIG_EXT3_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
-CONFIG_INOTIFY=y
 CONFIG_VFAT_FS=m
 CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
@@ -165,9 +158,7 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 # CONFIG_FTRACE is not set
-CONFIG_DEBUG_ERRORS=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_ARC4=m
 CONFIG_CRC_T10DIF=m
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
index 721832f..2eda246 100644
--- a/arch/arm/configs/xcep_defconfig
+++ b/arch/arm/configs/xcep_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOCALVERSION=".xcep-itech"
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
@@ -47,14 +46,10 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FW_LOADER is not set
-CONFIG_MTD_CONCAT=y
 CONFIG_MTD_COMPLEX_MAPPINGS=y
 CONFIG_MTD_PXA2XX=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT_MOUSEDEV is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -71,7 +66,6 @@
 CONFIG_SENSORS_ADM1021=m
 CONFIG_SENSORS_MAX6650=m
 # CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_SA1100=m
@@ -90,8 +84,6 @@
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 # CONFIG_FTRACE is not set
 # CONFIG_ARM_UNWIND is not set
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index cd11da8..8d4c0c9 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=13
@@ -29,10 +28,8 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
 CONFIG_BT_RFCOMM=m
 CONFIG_BT_RFCOMM_TTY=y
 CONFIG_BT_BNEP=m
@@ -46,7 +43,6 @@
 CONFIG_MTD=y
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_REDBOOT_PARTS_READONLY=y
-CONFIG_MTD_CHAR=m
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
@@ -70,8 +66,6 @@
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_DM9000=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_HERMES=m
 CONFIG_PCMCIA_HERMES=m
 CONFIG_RT2X00=m
@@ -130,7 +124,6 @@
 # CONFIG_SND_PCMCIA is not set
 CONFIG_SND_SOC=m
 CONFIG_SND_PXA2XX_SOC=m
-# CONFIG_HID_SUPPORT is not set
 CONFIG_USB=m
 CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_ACM=m
@@ -164,7 +157,6 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
-CONFIG_INOTIFY=y
 CONFIG_VFAT_FS=m
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
@@ -182,7 +174,5 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_MUTEXES=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_DEBUG_ERRORS=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_T10DIF=m
diff --git a/arch/arm/configs/zx_defconfig b/arch/arm/configs/zx_defconfig
index d6253a4..dfc061d 100644
--- a/arch/arm/configs/zx_defconfig
+++ b/arch/arm/configs/zx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -79,7 +78,6 @@
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
 CONFIG_MMC_BLOCK_MINORS=16
 CONFIG_MMC_DW=y
 CONFIG_EXT2_FS=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 3a36d99..d836050 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -28,7 +28,6 @@
 generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
-generic-y += siginfo.h
 generic-y += simd.h
 generic-y += sizes.h
 generic-y += socket.h
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 36ec9c8..3234fe9 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -19,7 +19,8 @@ struct dev_archdata {
 #ifdef CONFIG_XEN
 	const struct dma_map_ops *dev_dma_ops;
 #endif
-	bool dma_coherent;
+	unsigned int dma_coherent:1;
+	unsigned int dma_ops_setup:1;
 };
 
 struct omap_device;
diff --git a/arch/arm/include/asm/dmi.h b/arch/arm/include/asm/dmi.h
new file mode 100644
index 0000000..df2d2ff
--- /dev/null
+++ b/arch/arm/include/asm/dmi.h
@@ -0,0 +1,19 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_DMI_H
+#define __ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define dmi_early_remap(x, l)		memremap(x, l, MEMREMAP_WB)
+#define dmi_early_unmap(x, l)		memunmap(x)
+#define dmi_remap(x, l)			memremap(x, l, MEMREMAP_WB)
+#define dmi_unmap(x)			memunmap(x)
+#define dmi_alloc(l)			kzalloc(l, GFP_KERNEL)
+
+#endif
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 302240c..a0d726a 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
 #define pgprot_noncached(prot)	(prot)
 #define pgprot_writecombine(prot) (prot)
 #define pgprot_dmacoherent(prot) (prot)
+#define pgprot_device(prot)	(prot)
 
 
 /*
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 607f702..e9b098d 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -4,3 +4,5 @@
 genhdr-y += unistd-common.h
 genhdr-y += unistd-oabi.h
 genhdr-y += unistd-eabi.h
+
+generic-y += siginfo.h
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index be3b3fb..af2a7f1 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1090,7 +1090,7 @@ static int __init arch_hw_breakpoint_init(void)
 	 * driven low on this core and there isn't an architected way to
 	 * determine that.
 	 */
-	get_online_cpus();
+	cpus_read_lock();
 	register_undef_hook(&debug_reg_hook);
 
 	/*
@@ -1098,15 +1098,16 @@ static int __init arch_hw_breakpoint_init(void)
 	 * assume that a halting debugger will leave the world in a nice state
 	 * for us.
 	 */
-	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online",
-				dbg_reset_online, NULL);
+	ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN,
+					   "arm/hw_breakpoint:online",
+					   dbg_reset_online, NULL);
 	unregister_undef_hook(&debug_reg_hook);
 	if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
 		core_num_brps = 0;
 		core_num_wrps = 0;
 		if (ret > 0)
-			cpuhp_remove_state_nocalls(ret);
-		put_online_cpus();
+			cpuhp_remove_state_nocalls_cpuslocked(ret);
+		cpus_read_unlock();
 		return 0;
 	}
 
@@ -1124,7 +1125,7 @@ static int __init arch_hw_breakpoint_init(void)
 			TRAP_HWBKPT, "watchpoint debug exception");
 	hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
 			TRAP_HWBKPT, "breakpoint debug exception");
-	put_online_cpus();
+	cpus_read_unlock();
 
 	/* Register PM notifiers. */
 	pm_init();
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index 020560b..a1a3472 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -124,5 +124,5 @@ void __kprobes patch_text(void *addr, unsigned int insn)
 		.insn = insn,
 	};
 
-	stop_machine(patch_text_stop_machine, &patch, NULL);
+	stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
 }
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 32e1a95..4e80bf7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -315,7 +315,7 @@ static void __init cacheid_init(void)
 	if (arch >= CPU_ARCH_ARMv6) {
 		unsigned int cachetype = read_cpuid_cachetype();
 
-		if ((arch == CPU_ARCH_ARMv7M) && !cachetype) {
+		if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
 			cacheid = 0;
 		} else if ((cachetype & (7 << 29)) == 4 << 29) {
 			/* ARMv7 register format */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 572a8df..c9a0a52 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -555,8 +555,7 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
  */
 static void ipi_cpu_stop(unsigned int cpu)
 {
-	if (system_state == SYSTEM_BOOTING ||
-	    system_state == SYSTEM_RUNNING) {
+	if (system_state <= SYSTEM_RUNNING) {
 		raw_spin_lock(&stop_lock);
 		pr_crit("CPU%u: stopping\n", cpu);
 		dump_stack();
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 895ae51..b30eafe 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -403,7 +403,7 @@ static int __init twd_local_timer_of_register(struct device_node *np)
 	WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
 	return err;
 }
-CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
-CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
-CLOCKSOURCE_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register);
+TIMER_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
+TIMER_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
+TIMER_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register);
 #endif
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 97b22fa..629f8e9 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -120,6 +120,6 @@ void __init time_init(void)
 #ifdef CONFIG_COMMON_CLK
 		of_clk_init(NULL);
 #endif
-		clocksource_probe();
+		timer_probe();
 	}
 }
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index f8a3ab8..bf949a7 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -11,6 +11,7 @@
  * for more details.
  */
 
+#include <linux/arch_topology.h>
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/cpumask.h>
@@ -44,77 +45,6 @@
  * to run the rebalance_domains for all idle cores and the cpu_capacity can be
  * updated during this sequence.
  */
-static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-static DEFINE_MUTEX(cpu_scale_mutex);
-
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
-{
-	return per_cpu(cpu_scale, cpu);
-}
-
-static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
-{
-	per_cpu(cpu_scale, cpu) = capacity;
-}
-
-#ifdef CONFIG_PROC_SYSCTL
-static ssize_t cpu_capacity_show(struct device *dev,
-				 struct device_attribute *attr,
-				 char *buf)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-
-	return sprintf(buf, "%lu\n",
-			arch_scale_cpu_capacity(NULL, cpu->dev.id));
-}
-
-static ssize_t cpu_capacity_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf,
-				  size_t count)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-	int this_cpu = cpu->dev.id, i;
-	unsigned long new_capacity;
-	ssize_t ret;
-
-	if (count) {
-		ret = kstrtoul(buf, 0, &new_capacity);
-		if (ret)
-			return ret;
-		if (new_capacity > SCHED_CAPACITY_SCALE)
-			return -EINVAL;
-
-		mutex_lock(&cpu_scale_mutex);
-		for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
-			set_capacity_scale(i, new_capacity);
-		mutex_unlock(&cpu_scale_mutex);
-	}
-
-	return count;
-}
-
-static DEVICE_ATTR_RW(cpu_capacity);
-
-static int register_cpu_capacity_sysctl(void)
-{
-	int i;
-	struct device *cpu;
-
-	for_each_possible_cpu(i) {
-		cpu = get_cpu_device(i);
-		if (!cpu) {
-			pr_err("%s: too early to get CPU%d device!\n",
-			       __func__, i);
-			continue;
-		}
-		device_create_file(cpu, &dev_attr_cpu_capacity);
-	}
-
-	return 0;
-}
-subsys_initcall(register_cpu_capacity_sysctl);
-#endif
 
 #ifdef CONFIG_OF
 struct cpu_efficiency {
@@ -143,145 +73,6 @@ static unsigned long *__cpu_capacity;
 
 static unsigned long middle_capacity = 1;
 static bool cap_from_dt = true;
-static u32 *raw_capacity;
-static bool cap_parsing_failed;
-static u32 capacity_scale;
-
-static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
-{
-	int ret = 1;
-	u32 cpu_capacity;
-
-	if (cap_parsing_failed)
-		return !ret;
-
-	ret = of_property_read_u32(cpu_node,
-				   "capacity-dmips-mhz",
-				   &cpu_capacity);
-	if (!ret) {
-		if (!raw_capacity) {
-			raw_capacity = kcalloc(num_possible_cpus(),
-					       sizeof(*raw_capacity),
-					       GFP_KERNEL);
-			if (!raw_capacity) {
-				pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
-				cap_parsing_failed = true;
-				return !ret;
-			}
-		}
-		capacity_scale = max(cpu_capacity, capacity_scale);
-		raw_capacity[cpu] = cpu_capacity;
-		pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
-			cpu_node->full_name, raw_capacity[cpu]);
-	} else {
-		if (raw_capacity) {
-			pr_err("cpu_capacity: missing %s raw capacity\n",
-				cpu_node->full_name);
-			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
-		}
-		cap_parsing_failed = true;
-		kfree(raw_capacity);
-	}
-
-	return !ret;
-}
-
-static void normalize_cpu_capacity(void)
-{
-	u64 capacity;
-	int cpu;
-
-	if (!raw_capacity || cap_parsing_failed)
-		return;
-
-	pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
-	mutex_lock(&cpu_scale_mutex);
-	for_each_possible_cpu(cpu) {
-		capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
-			/ capacity_scale;
-		set_capacity_scale(cpu, capacity);
-		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
-			cpu, arch_scale_cpu_capacity(NULL, cpu));
-	}
-	mutex_unlock(&cpu_scale_mutex);
-}
-
-#ifdef CONFIG_CPU_FREQ
-static cpumask_var_t cpus_to_visit;
-static bool cap_parsing_done;
-static void parsing_done_workfn(struct work_struct *work);
-static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
-
-static int
-init_cpu_capacity_callback(struct notifier_block *nb,
-			   unsigned long val,
-			   void *data)
-{
-	struct cpufreq_policy *policy = data;
-	int cpu;
-
-	if (cap_parsing_failed || cap_parsing_done)
-		return 0;
-
-	switch (val) {
-	case CPUFREQ_NOTIFY:
-		pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
-				cpumask_pr_args(policy->related_cpus),
-				cpumask_pr_args(cpus_to_visit));
-		cpumask_andnot(cpus_to_visit,
-			       cpus_to_visit,
-			       policy->related_cpus);
-		for_each_cpu(cpu, policy->related_cpus) {
-			raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
-					    policy->cpuinfo.max_freq / 1000UL;
-			capacity_scale = max(raw_capacity[cpu], capacity_scale);
-		}
-		if (cpumask_empty(cpus_to_visit)) {
-			normalize_cpu_capacity();
-			kfree(raw_capacity);
-			pr_debug("cpu_capacity: parsing done\n");
-			cap_parsing_done = true;
-			schedule_work(&parsing_done_work);
-		}
-	}
-	return 0;
-}
-
-static struct notifier_block init_cpu_capacity_notifier = {
-	.notifier_call = init_cpu_capacity_callback,
-};
-
-static int __init register_cpufreq_notifier(void)
-{
-	if (cap_parsing_failed)
-		return -EINVAL;
-
-	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
-		pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
-		return -ENOMEM;
-	}
-	cpumask_copy(cpus_to_visit, cpu_possible_mask);
-
-	return cpufreq_register_notifier(&init_cpu_capacity_notifier,
-					 CPUFREQ_POLICY_NOTIFIER);
-}
-core_initcall(register_cpufreq_notifier);
-
-static void parsing_done_workfn(struct work_struct *work)
-{
-	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
-					 CPUFREQ_POLICY_NOTIFIER);
-}
-
-#else
-static int __init free_raw_capacity(void)
-{
-	kfree(raw_capacity);
-
-	return 0;
-}
-core_initcall(free_raw_capacity);
-#endif
 
 /*
  * Iterate all CPUs' descriptor in DT and compute the efficiency
@@ -320,7 +111,7 @@ static void __init parse_dt_topology(void)
 			continue;
 		}
 
-		if (parse_cpu_capacity(cn, cpu)) {
+		if (topology_parse_cpu_capacity(cn, cpu)) {
 			of_node_put(cn);
 			continue;
 		}
@@ -368,8 +159,8 @@ static void __init parse_dt_topology(void)
 		middle_capacity = ((max_capacity / 3)
 				>> (SCHED_CAPACITY_SHIFT-1)) + 1;
 
-	if (cap_from_dt && !cap_parsing_failed)
-		normalize_cpu_capacity();
+	if (cap_from_dt)
+		topology_normalize_cpu_scale();
 }
 
 /*
@@ -382,10 +173,10 @@ static void update_cpu_capacity(unsigned int cpu)
 	if (!cpu_capacity(cpu) || cap_from_dt)
 		return;
 
-	set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+	topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
 
 	pr_info("CPU%u: update cpu_capacity %lu\n",
-		cpu, arch_scale_cpu_capacity(NULL, cpu));
+		cpu, topology_get_cpu_scale(NULL, cpu));
 }
 
 #else
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 570ed4a..5386528 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -104,7 +104,6 @@
 	@  - Write permission implies XN: disabled
 	@  - Instruction cache: enabled
 	@  - Data/Unified cache: enabled
-	@  - Memory alignment checks: enabled
 	@  - MMU: enabled (this code must be run from an identity mapping)
 	mrc	p15, 4, r0, c1, c0, 0	@ HSCR
 	ldr	r2, =HSCTLR_MASK
@@ -112,8 +111,8 @@
 	mrc	p15, 0, r1, c1, c0, 0	@ SCTLR
 	ldr	r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
 	and	r1, r1, r2
- ARM(	ldr	r2, =(HSCTLR_M | HSCTLR_A)			)
- THUMB(	ldr	r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE)		)
+ ARM(	ldr	r2, =(HSCTLR_M)					)
+ THUMB(	ldr	r2, =(HSCTLR_M | HSCTLR_TE)			)
 	orr	r1, r1, r2
 	orr	r0, r0, r1
 	mcr	p15, 4, r0, c1, c0, 0	@ HSCR
diff --git a/arch/arm/mach-actions/Kconfig b/arch/arm/mach-actions/Kconfig
new file mode 100644
index 0000000..ad9c5c8
--- /dev/null
+++ b/arch/arm/mach-actions/Kconfig
@@ -0,0 +1,16 @@
+menuconfig ARCH_ACTIONS
+	bool "Actions Semi SoCs"
+	depends on ARCH_MULTI_V7
+	select ARM_AMBA
+	select ARM_GIC
+	select ARM_GLOBAL_TIMER
+	select CACHE_L2X0
+	select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
+	select COMMON_CLK
+	select GENERIC_IRQ_CHIP
+	select HAVE_ARM_SCU if SMP
+	select HAVE_ARM_TWD if SMP
+	select OWL_PM_DOMAINS_HELPER
+	select OWL_TIMER
+	help
+	  This enables support for the Actions Semiconductor S500 SoC family.
diff --git a/arch/arm/mach-actions/Makefile b/arch/arm/mach-actions/Makefile
new file mode 100644
index 0000000..c0f1162
--- /dev/null
+++ b/arch/arm/mach-actions/Makefile
@@ -0,0 +1,3 @@
+obj-${CONFIG_SMP} += platsmp.o headsmp.o
+
+AFLAGS_headsmp.o := -Wa,-march=armv7-a
diff --git a/arch/arm/mach-actions/headsmp.S b/arch/arm/mach-actions/headsmp.S
new file mode 100644
index 0000000..65f53bd
--- /dev/null
+++ b/arch/arm/mach-actions/headsmp.S
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Actions Semi Inc.
+ * Author: Actions Semi, Inc.
+ *
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ENTRY(owl_v7_invalidate_l1)
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c5, 0	@ invalidate I cache
+	mcr	p15, 2, r0, c0, c0, 0
+	mrc	p15, 1, r0, c0, c0, 0
+
+	ldr	r1, =0x7fff
+	and	r2, r1, r0, lsr #13
+
+	ldr	r1, =0x3ff
+
+	and	r3, r1, r0, lsr #3	@ NumWays - 1
+	add	r2, r2, #1		@ NumSets
+
+	and	r0, r0, #0x7
+	add	r0, r0, #4	@ SetShift
+
+	clz	r1, r3		@ WayShift
+	add	r4, r3, #1	@ NumWays
+1:	sub	r2, r2, #1	@ NumSets--
+	mov	r3, r4		@ Temp = NumWays
+2:	subs	r3, r3, #1	@ Temp--
+	mov	r5, r3, lsl r1
+	mov	r6, r2, lsl r0
+	orr	r5, r5, r6	@ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+	mcr	p15, 0, r5, c7, c6, 2
+	bgt	2b
+	cmp	r2, #0
+	bgt	1b
+	dsb
+	isb
+	mov	pc, lr
+ENDPROC(owl_v7_invalidate_l1)
+
+ENTRY(owl_secondary_startup)
+	bl	owl_v7_invalidate_l1
+	b	secondary_startup
diff --git a/arch/arm/mach-actions/platsmp.c b/arch/arm/mach-actions/platsmp.c
new file mode 100644
index 0000000..12a9e33
--- /dev/null
+++ b/arch/arm/mach-actions/platsmp.c
@@ -0,0 +1,171 @@
+/*
+ * Actions Semi Leopard
+ *
+ * This file is based on arm realview smp platform.
+ *
+ * Copyright 2012 Actions Semi Inc.
+ * Author: Actions Semi, Inc.
+ *
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+#include <linux/soc/actions/owl-sps.h>
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include <asm/smp_scu.h>
+
+#define OWL_CPU1_ADDR	0x50
+#define OWL_CPU1_FLAG	0x5c
+
+#define OWL_CPUx_FLAG_BOOT	0x55aa
+
+#define OWL_SPS_PG_CTL_PWR_CPU2	BIT(5)
+#define OWL_SPS_PG_CTL_PWR_CPU3	BIT(6)
+#define OWL_SPS_PG_CTL_ACK_CPU2	BIT(21)
+#define OWL_SPS_PG_CTL_ACK_CPU3	BIT(22)
+
+static void __iomem *scu_base_addr;
+static void __iomem *sps_base_addr;
+static void __iomem *timer_base_addr;
+static int ncores;
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void owl_secondary_startup(void);
+
+static int s500_wakeup_secondary(unsigned int cpu)
+{
+	int ret;
+
+	if (cpu > 3)
+		return -EINVAL;
+
+	/* The generic PM domain driver is not available this early. */
+	switch (cpu) {
+	case 2:
+		ret = owl_sps_set_pg(sps_base_addr,
+		                     OWL_SPS_PG_CTL_PWR_CPU2,
+				     OWL_SPS_PG_CTL_ACK_CPU2, true);
+		if (ret)
+			return ret;
+		break;
+	case 3:
+		ret = owl_sps_set_pg(sps_base_addr,
+		                     OWL_SPS_PG_CTL_PWR_CPU3,
+				     OWL_SPS_PG_CTL_ACK_CPU3, true);
+		if (ret)
+			return ret;
+		break;
+	}
+
+	/* wait for CPUx to run to WFE instruction */
+	udelay(200);
+
+	writel(virt_to_phys(owl_secondary_startup),
+	       timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4);
+	writel(OWL_CPUx_FLAG_BOOT,
+	       timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4);
+
+	dsb_sev();
+	mb();
+
+	return 0;
+}
+
+static int s500_smp_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	unsigned long timeout;
+	int ret;
+
+	ret = s500_wakeup_secondary(cpu);
+	if (ret)
+		return ret;
+
+	udelay(10);
+
+	spin_lock(&boot_lock);
+
+	smp_send_reschedule(cpu);
+
+	timeout = jiffies + (1 * HZ);
+	while (time_before(jiffies, timeout)) {
+		if (pen_release == -1)
+			break;
+	}
+
+	writel(0, timer_base_addr + OWL_CPU1_ADDR + (cpu - 1) * 4);
+	writel(0, timer_base_addr + OWL_CPU1_FLAG + (cpu - 1) * 4);
+
+	spin_unlock(&boot_lock);
+
+	return 0;
+}
+
+static void __init s500_smp_prepare_cpus(unsigned int max_cpus)
+{
+	struct device_node *node;
+
+	node = of_find_compatible_node(NULL, NULL, "actions,s500-timer");
+	if (!node) {
+		pr_err("%s: missing timer\n", __func__);
+		return;
+	}
+
+	timer_base_addr = of_iomap(node, 0);
+	if (!timer_base_addr) {
+		pr_err("%s: could not map timer registers\n", __func__);
+		return;
+	}
+
+	node = of_find_compatible_node(NULL, NULL, "actions,s500-sps");
+	if (!node) {
+		pr_err("%s: missing sps\n", __func__);
+		return;
+	}
+
+	sps_base_addr = of_iomap(node, 0);
+	if (!sps_base_addr) {
+		pr_err("%s: could not map sps registers\n", __func__);
+		return;
+	}
+
+	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
+		node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
+		if (!node) {
+			pr_err("%s: missing scu\n", __func__);
+			return;
+		}
+
+		scu_base_addr = of_iomap(node, 0);
+		if (!scu_base_addr) {
+			pr_err("%s: could not map scu registers\n", __func__);
+			return;
+		}
+
+		/*
+		 * While the number of cpus is gathered from dt, also get the
+		 * number of cores from the scu to verify this value when
+		 * booting the cores.
+		 */
+		ncores = scu_get_core_count(scu_base_addr);
+		pr_debug("%s: ncores %d\n", __func__, ncores);
+
+		scu_enable(scu_base_addr);
+	}
+}
+
+static const struct smp_operations s500_smp_ops __initconst = {
+	.smp_prepare_cpus = s500_smp_prepare_cpus,
+	.smp_boot_secondary = s500_smp_boot_secondary,
+};
+CPU_METHOD_OF_DECLARE(s500_smp, "actions,s500-smp", &s500_smp_ops);
diff --git a/arch/arm/mach-aspeed/Kconfig b/arch/arm/mach-aspeed/Kconfig
index f3f8c5c..2d5570e 100644
--- a/arch/arm/mach-aspeed/Kconfig
+++ b/arch/arm/mach-aspeed/Kconfig
@@ -4,7 +4,7 @@
 	select SRAM
 	select WATCHDOG
 	select ASPEED_WATCHDOG
-	select MOXART_TIMER
+	select FTTMR010_TIMER
 	select MFD_SYSCON
 	select PINCTRL
 	help
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 841e924..d735e5f 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,12 +1,21 @@
 menuconfig ARCH_AT91
 	bool "Atmel SoCs"
-	depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
+	depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M
+	select ARM_CPU_SUSPEND if PM
 	select COMMON_CLK_AT91
 	select GPIOLIB
 	select PINCTRL
 	select SOC_BUS
 
 if ARCH_AT91
+config SOC_SAMV7
+	bool "SAM Cortex-M7 family" if ARM_SINGLE_ARMV7M
+	select COMMON_CLK_AT91
+	select PINCTRL_AT91
+	help
+	  Select this if you are using an SoC from Atmel's SAME7, SAMS7 or SAMV7
+	  families.
+
 config SOC_SAMA5D2
 	bool "SAMA5D2 family"
 	depends on ARCH_MULTI_V7
@@ -52,6 +61,7 @@
 	bool "AT91RM9200"
 	depends on ARCH_MULTI_V4T
 	select ATMEL_AIC_IRQ
+	select ATMEL_PM if PM
 	select ATMEL_ST
 	select CPU_ARM920T
 	select HAVE_AT91_USB_CLK
@@ -65,6 +75,7 @@
 	bool "AT91SAM9"
 	depends on ARCH_MULTI_V5
 	select ATMEL_AIC_IRQ
+	select ATMEL_PM if PM
 	select ATMEL_SDRAMC
 	select CPU_ARM926T
 	select HAVE_AT91_SMD
@@ -123,9 +134,13 @@
 config SOC_SAMA5
 	bool
 	select ATMEL_AIC5_IRQ
+	select ATMEL_PM if PM
 	select ATMEL_SDRAMC
 	select MEMORY
 	select SOC_SAM_V7
 	select SRAM if PM
 
+config ATMEL_PM
+	bool
+
 endif
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index cfd8f60..ee34aa3 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -6,10 +6,10 @@
 obj-$(CONFIG_SOC_AT91RM9200)	+= at91rm9200.o
 obj-$(CONFIG_SOC_AT91SAM9)	+= at91sam9.o
 obj-$(CONFIG_SOC_SAMA5)		+= sama5.o
+obj-$(CONFIG_SOC_SAMV7)		+= samv7.o
 
 # Power Management
-obj-$(CONFIG_PM)		+= pm.o
-obj-$(CONFIG_PM)		+= pm_suspend.o
+obj-$(CONFIG_ATMEL_PM)		+= pm.o pm_suspend.o
 
 ifeq ($(CONFIG_CPU_V7),y)
 AFLAGS_pm_suspend.o := -march=armv7-a
diff --git a/arch/arm/mach-at91/Makefile.boot b/arch/arm/mach-at91/Makefile.boot
new file mode 100644
index 0000000..eacfc3f
--- /dev/null
+++ b/arch/arm/mach-at91/Makefile.boot
@@ -0,0 +1,3 @@
+# Empty file waiting for deletion once Makefile.boot isn't needed any more.
+# Patch waits for application at
+# http://www.arm.linux.org.uk/developer/patches/viewpatch.php?id=7889/1 .
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index f1ead0f..e2bd172 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -15,10 +15,12 @@
 extern void __init at91rm9200_pm_init(void);
 extern void __init at91sam9_pm_init(void);
 extern void __init sama5_pm_init(void);
+extern void __init sama5d2_pm_init(void);
 #else
 static inline void __init at91rm9200_pm_init(void) { }
 static inline void __init at91sam9_pm_init(void) { }
 static inline void __init sama5_pm_init(void) { }
+static inline void __init sama5d2_pm_init(void) { }
 #endif
 
 #endif /* _AT91_GENERIC_H */
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 283e79a..667fdda 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -15,6 +15,7 @@
 #include <linux/of_address.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/parser.h>
 #include <linux/suspend.h>
 
 #include <linux/clk/at91_pmc.h>
@@ -22,6 +23,7 @@
 #include <asm/cacheflush.h>
 #include <asm/fncpy.h>
 #include <asm/system_misc.h>
+#include <asm/suspend.h>
 
 #include "generic.h"
 #include "pm.h"
@@ -37,7 +39,17 @@ extern void at91_pinctrl_gpio_suspend(void);
 extern void at91_pinctrl_gpio_resume(void);
 #endif
 
-static struct at91_pm_data pm_data;
+static const match_table_t pm_modes __initconst = {
+	{ 0, "standby" },
+	{ AT91_PM_SLOW_CLOCK, "ulp0" },
+	{ AT91_PM_BACKUP, "backup" },
+	{ -1, NULL },
+};
+
+static struct at91_pm_data pm_data = {
+	.standby_mode = 0,
+	.suspend_mode = AT91_PM_SLOW_CLOCK,
+};
 
 #define at91_ramc_read(id, field) \
 	__raw_readl(pm_data.ramc[id] + field)
@@ -58,15 +70,33 @@ static int at91_pm_valid_state(suspend_state_t state)
 	}
 }
 
+static int canary = 0xA5A5A5A5;
 
-static suspend_state_t target_state;
+static struct at91_pm_bu {
+	int suspended;
+	unsigned long reserved;
+	phys_addr_t canary;
+	phys_addr_t resume;
+} *pm_bu;
 
 /*
  * Called after processes are frozen, but before we shutdown devices.
  */
 static int at91_pm_begin(suspend_state_t state)
 {
-	target_state = state;
+	switch (state) {
+	case PM_SUSPEND_MEM:
+		pm_data.mode = pm_data.suspend_mode;
+		break;
+
+	case PM_SUSPEND_STANDBY:
+		pm_data.mode = pm_data.standby_mode;
+		break;
+
+	default:
+		pm_data.mode = -1;
+	}
+
 	return 0;
 }
 
@@ -115,7 +145,7 @@ static int at91_pm_verify_clocks(void)
  */
 int at91_suspend_entering_slow_clock(void)
 {
-	return (target_state == PM_SUSPEND_MEM);
+	return (pm_data.mode >= AT91_PM_SLOW_CLOCK);
 }
 EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
 
@@ -123,50 +153,65 @@ static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
 extern u32 at91_pm_suspend_in_sram_sz;
 
-static void at91_pm_suspend(suspend_state_t state)
+static int at91_suspend_finish(unsigned long val)
 {
-	pm_data.mode = (state == PM_SUSPEND_MEM) ? AT91_PM_SLOW_CLOCK : 0;
-
 	flush_cache_all();
 	outer_disable();
 
 	at91_suspend_sram_fn(&pm_data);
 
+	return 0;
+}
+
+static void at91_pm_suspend(suspend_state_t state)
+{
+	if (pm_data.mode == AT91_PM_BACKUP) {
+		pm_bu->suspended = 1;
+
+		cpu_suspend(0, at91_suspend_finish);
+
+		/* The SRAM is lost between suspend cycles */
+		at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
+					     &at91_pm_suspend_in_sram,
+					     at91_pm_suspend_in_sram_sz);
+	} else {
+		at91_suspend_finish(0);
+	}
+
 	outer_resume();
 }
 
+/*
+ * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
+ * event sources; and reduces DRAM power.  But otherwise it's identical to
+ * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
+ *
+ * AT91_PM_SLOW_CLOCK is like STANDBY plus slow clock mode, so drivers must
+ * suspend more deeply, the master clock switches to the clk32k and turns off
+ * the main oscillator
+ *
+ * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
+ */
 static int at91_pm_enter(suspend_state_t state)
 {
 #ifdef CONFIG_PINCTRL_AT91
 	at91_pinctrl_gpio_suspend();
 #endif
+
 	switch (state) {
-	/*
-	 * Suspend-to-RAM is like STANDBY plus slow clock mode, so
-	 * drivers must suspend more deeply, the master clock switches
-	 * to the clk32k and turns off the main oscillator
-	 */
 	case PM_SUSPEND_MEM:
+	case PM_SUSPEND_STANDBY:
 		/*
 		 * Ensure that clocks are in a valid state.
 		 */
-		if (!at91_pm_verify_clocks())
+		if ((pm_data.mode >= AT91_PM_SLOW_CLOCK) &&
+		    !at91_pm_verify_clocks())
 			goto error;
 
 		at91_pm_suspend(state);
 
 		break;
 
-	/*
-	 * STANDBY mode has *all* drivers suspended; ignores irqs not
-	 * marked as 'wakeup' event sources; and reduces DRAM power.
-	 * But otherwise it's identical to PM_SUSPEND_ON: cpu idle, and
-	 * nothing fancy done with main or cpu clocks.
-	 */
-	case PM_SUSPEND_STANDBY:
-		at91_pm_suspend(state);
-		break;
-
 	case PM_SUSPEND_ON:
 		cpu_do_idle();
 		break;
@@ -177,8 +222,6 @@ static int at91_pm_enter(suspend_state_t state)
 	}
 
 error:
-	target_state = PM_SUSPEND_ON;
-
 #ifdef CONFIG_PINCTRL_AT91
 	at91_pinctrl_gpio_resume();
 #endif
@@ -190,7 +233,6 @@ static int at91_pm_enter(suspend_state_t state)
  */
 static void at91_pm_end(void)
 {
-	target_state = PM_SUSPEND_ON;
 }
 
 
@@ -436,6 +478,79 @@ static void __init at91_pm_sram_init(void)
 			&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
 }
 
+static void __init at91_pm_backup_init(void)
+{
+	struct gen_pool *sram_pool;
+	struct device_node *np;
+	struct platform_device *pdev = NULL;
+
+	if ((pm_data.standby_mode != AT91_PM_BACKUP) &&
+	    (pm_data.suspend_mode != AT91_PM_BACKUP))
+		return;
+
+	pm_bu = NULL;
+
+	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc");
+	if (!np) {
+		pr_warn("%s: failed to find shdwc!\n", __func__);
+		return;
+	}
+
+	pm_data.shdwc = of_iomap(np, 0);
+	of_node_put(np);
+
+	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
+	if (!np) {
+		pr_warn("%s: failed to find sfrbu!\n", __func__);
+		goto sfrbu_fail;
+	}
+
+	pm_data.sfrbu = of_iomap(np, 0);
+	of_node_put(np);
+	pm_bu = NULL;
+
+	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
+	if (!np)
+		goto securam_fail;
+
+	pdev = of_find_device_by_node(np);
+	of_node_put(np);
+	if (!pdev) {
+		pr_warn("%s: failed to find securam device!\n", __func__);
+		goto securam_fail;
+	}
+
+	sram_pool = gen_pool_get(&pdev->dev, NULL);
+	if (!sram_pool) {
+		pr_warn("%s: securam pool unavailable!\n", __func__);
+		goto securam_fail;
+	}
+
+	pm_bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
+	if (!pm_bu) {
+		pr_warn("%s: unable to alloc securam!\n", __func__);
+		goto securam_fail;
+	}
+
+	pm_bu->suspended = 0;
+	pm_bu->canary = virt_to_phys(&canary);
+	pm_bu->resume = virt_to_phys(cpu_resume);
+
+	return;
+
+sfrbu_fail:
+	iounmap(pm_data.shdwc);
+	pm_data.shdwc = NULL;
+securam_fail:
+	iounmap(pm_data.sfrbu);
+	pm_data.sfrbu = NULL;
+
+	if (pm_data.standby_mode == AT91_PM_BACKUP)
+		pm_data.standby_mode = AT91_PM_SLOW_CLOCK;
+	if (pm_data.suspend_mode == AT91_PM_BACKUP)
+		pm_data.suspend_mode = AT91_PM_SLOW_CLOCK;
+}
+
 struct pmc_info {
 	unsigned long uhp_udp_mask;
 };
@@ -481,10 +596,14 @@ static void __init at91_pm_init(void (*pm_idle)(void))
 
 	at91_pm_sram_init();
 
-	if (at91_suspend_sram_fn)
+	if (at91_suspend_sram_fn) {
 		suspend_set_ops(&at91_pm_ops);
-	else
+		pr_info("AT91: PM: standby: %s, suspend: %s\n",
+			pm_modes[pm_data.standby_mode].pattern,
+			pm_modes[pm_data.suspend_mode].pattern);
+	} else {
 		pr_info("AT91: PM not supported, due to no SRAM allocated\n");
+	}
 }
 
 void __init at91rm9200_pm_init(void)
@@ -510,3 +629,34 @@ void __init sama5_pm_init(void)
 	at91_dt_ramc();
 	at91_pm_init(NULL);
 }
+
+void __init sama5d2_pm_init(void)
+{
+	at91_pm_backup_init();
+	sama5_pm_init();
+}
+
+static int __init at91_pm_modes_select(char *str)
+{
+	char *s;
+	substring_t args[MAX_OPT_ARGS];
+	int standby, suspend;
+
+	if (!str)
+		return 0;
+
+	s = strsep(&str, ",");
+	standby = match_token(s, pm_modes, args);
+	if (standby < 0)
+		return 0;
+
+	suspend = match_token(str, pm_modes, args);
+	if (suspend < 0)
+		return 0;
+
+	pm_data.standby_mode = standby;
+	pm_data.suspend_mode = suspend;
+
+	return 0;
+}
+early_param("atmel.pm_modes", at91_pm_modes_select);
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index fc0f7d0..f95d314 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -22,6 +22,7 @@
 #define AT91_MEMCTRL_DDRSDR	2
 
 #define	AT91_PM_SLOW_CLOCK	0x01
+#define	AT91_PM_BACKUP		0x02
 
 #ifndef __ASSEMBLY__
 struct at91_pm_data {
@@ -30,6 +31,10 @@ struct at91_pm_data {
 	unsigned long uhp_udp_mask;
 	unsigned int memctrl;
 	unsigned int mode;
+	void __iomem *shdwc;
+	void __iomem *sfrbu;
+	unsigned int standby_mode;
+	unsigned int suspend_mode;
 };
 #endif
 
diff --git a/arch/arm/mach-at91/pm_data-offsets.c b/arch/arm/mach-at91/pm_data-offsets.c
index 30302cb..c0a73e6 100644
--- a/arch/arm/mach-at91/pm_data-offsets.c
+++ b/arch/arm/mach-at91/pm_data-offsets.c
@@ -9,5 +9,8 @@ int main(void)
 	DEFINE(PM_DATA_RAMC1,		offsetof(struct at91_pm_data, ramc[1]));
 	DEFINE(PM_DATA_MEMCTRL,	offsetof(struct at91_pm_data, memctrl));
 	DEFINE(PM_DATA_MODE,		offsetof(struct at91_pm_data, mode));
+	DEFINE(PM_DATA_SHDWC,		offsetof(struct at91_pm_data, shdwc));
+	DEFINE(PM_DATA_SFRBU,		offsetof(struct at91_pm_data, sfrbu));
+
 	return 0;
 }
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index 96781da..daca91f 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -97,15 +97,61 @@
 	str	tmp1, .memtype
 	ldr	tmp1, [r0, #PM_DATA_MODE]
 	str	tmp1, .pm_mode
+	/* Both ldrne below are here to preload their address in the TLB */
+	ldr	tmp1, [r0, #PM_DATA_SHDWC]
+	str	tmp1, .shdwc
+	cmp	tmp1, #0
+	ldrne	tmp2, [tmp1, #0]
+	ldr	tmp1, [r0, #PM_DATA_SFRBU]
+	str	tmp1, .sfr
+	cmp	tmp1, #0
+	ldrne	tmp2, [tmp1, #0x10]
 
 	/* Active the self-refresh mode */
 	mov	r0, #SRAMC_SELF_FRESH_ACTIVE
 	bl	at91_sramc_self_refresh
 
 	ldr	r0, .pm_mode
-	tst	r0, #AT91_PM_SLOW_CLOCK
-	beq	skip_disable_main_clock
+	cmp	r0, #AT91_PM_SLOW_CLOCK
+	beq	slow_clock
+	cmp	r0, #AT91_PM_BACKUP
+	beq	backup_mode
 
+	/* Wait for interrupt */
+	ldr	pmc, .pmc_base
+	at91_cpu_idle
+	b	exit_suspend
+
+slow_clock:
+	bl	at91_slowck_mode
+	b	exit_suspend
+backup_mode:
+	bl	at91_backup_mode
+	b	exit_suspend
+
+exit_suspend:
+	/* Exit the self-refresh mode */
+	mov	r0, #SRAMC_SELF_FRESH_EXIT
+	bl	at91_sramc_self_refresh
+
+	/* Restore registers, and return */
+	ldmfd	sp!, {r4 - r12, pc}
+ENDPROC(at91_pm_suspend_in_sram)
+
+ENTRY(at91_backup_mode)
+	/*BUMEN*/
+	ldr	r0, .sfr
+	mov	tmp1, #0x1
+	str	tmp1, [r0, #0x10]
+
+	/* Shutdown */
+	ldr	r0, .shdwc
+	mov	tmp1, #0xA5000000
+	add	tmp1, tmp1, #0x1
+	str	tmp1, [r0, #0]
+ENDPROC(at91_backup_mode)
+
+ENTRY(at91_slowck_mode)
 	ldr	pmc, .pmc_base
 
 	/* Save Master clock setting */
@@ -134,18 +180,9 @@
 	orr	tmp1, tmp1, #AT91_PMC_KEY
 	str	tmp1, [pmc, #AT91_CKGR_MOR]
 
-skip_disable_main_clock:
-	ldr	pmc, .pmc_base
-
 	/* Wait for interrupt */
 	at91_cpu_idle
 
-	ldr	r0, .pm_mode
-	tst	r0, #AT91_PM_SLOW_CLOCK
-	beq	skip_enable_main_clock
-
-	ldr	pmc, .pmc_base
-
 	/* Turn on the main oscillator */
 	ldr	tmp1, [pmc, #AT91_CKGR_MOR]
 	orr	tmp1, tmp1, #AT91_PMC_MOSCEN
@@ -174,14 +211,8 @@
 
 	wait_mckrdy
 
-skip_enable_main_clock:
-	/* Exit the self-refresh mode */
-	mov	r0, #SRAMC_SELF_FRESH_EXIT
-	bl	at91_sramc_self_refresh
-
-	/* Restore registers, and return */
-	ldmfd	sp!, {r4 - r12, pc}
-ENDPROC(at91_pm_suspend_in_sram)
+	mov	pc, lr
+ENDPROC(at91_slowck_mode)
 
 /*
  * void at91_sramc_self_refresh(unsigned int is_active)
@@ -314,6 +345,10 @@
 	.word 0
 .sramc1_base:
 	.word 0
+.shdwc:
+	.word 0
+.sfr:
+	.word 0
 .memtype:
 	.word 0
 .pm_mode:
diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c
index 6d157d0..3d0bf95 100644
--- a/arch/arm/mach-at91/sama5.c
+++ b/arch/arm/mach-at91/sama5.c
@@ -34,7 +34,6 @@ DT_MACHINE_START(sama5_dt, "Atmel SAMA5")
 MACHINE_END
 
 static const char *const sama5_alt_dt_board_compat[] __initconst = {
-	"atmel,sama5d2",
 	"atmel,sama5d4",
 	NULL
 };
@@ -45,3 +44,21 @@ DT_MACHINE_START(sama5_alt_dt, "Atmel SAMA5")
 	.dt_compat	= sama5_alt_dt_board_compat,
 	.l2c_aux_mask	= ~0UL,
 MACHINE_END
+
+static void __init sama5d2_init(void)
+{
+	of_platform_default_populate(NULL, NULL, NULL);
+	sama5d2_pm_init();
+}
+
+static const char *const sama5d2_compat[] __initconst = {
+	"atmel,sama5d2",
+	NULL
+};
+
+DT_MACHINE_START(sama5d2, "Atmel SAMA5")
+	/* Maintainer: Atmel */
+	.init_machine	= sama5d2_init,
+	.dt_compat	= sama5d2_compat,
+	.l2c_aux_mask	= ~0UL,
+MACHINE_END
diff --git a/arch/arm/mach-at91/samv7.c b/arch/arm/mach-at91/samv7.c
new file mode 100644
index 0000000..11386f1
--- /dev/null
+++ b/arch/arm/mach-at91/samv7.c
@@ -0,0 +1,25 @@
+/*
+ *  Setup code for SAMv7x
+ *
+ *  Copyright (C) 2013 Atmel,
+ *                2016 Andras Szemzo <szemzo.andras@gmail.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/system_misc.h>
+#include "generic.h"
+
+static const char *const samv7_dt_board_compat[] __initconst = {
+	"atmel,samv7",
+	NULL
+};
+
+DT_MACHINE_START(samv7_dt, "Atmel SAMV7")
+	.dt_compat	= samv7_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index f9389c5..73be3d5 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -44,6 +44,8 @@
 	select ARM_ERRATA_775420
 	select ARM_ERRATA_764369 if SMP
 	select HAVE_SMP
+	select THERMAL
+	select THERMAL_OF
 	help
 	  Support for Broadcom Northstar Plus SoC.
 	  Broadcom Northstar Plus family of SoCs are used for switching control
@@ -150,7 +152,7 @@
 	select ARM_ERRATA_411920 if ARCH_MULTI_V6
 	select ARM_TIMER_SP804
 	select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
-	select CLKSRC_OF
+	select TIMER_OF
 	select BCM2835_TIMER
 	select PINCTRL
 	select PINCTRL_BCM2835
diff --git a/arch/arm/mach-clps711x/Kconfig b/arch/arm/mach-clps711x/Kconfig
index 61284b9..f385b1f 100644
--- a/arch/arm/mach-clps711x/Kconfig
+++ b/arch/arm/mach-clps711x/Kconfig
@@ -2,7 +2,7 @@
 	bool "Cirrus Logic EP721x/EP731x-based"
 	depends on ARCH_MULTI_V4T
 	select AUTO_ZRELADDR
-	select CLKSRC_OF
+	select TIMER_OF
 	select CLPS711X_TIMER
 	select COMMON_CLK
 	select CPU_ARM720T
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index cb17682..055e947 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -641,7 +641,7 @@ static struct vpif_subdev_info vpif_capture_sdev_info[] = {
 	},
 };
 
-static const struct vpif_input dm6467_ch0_inputs[] = {
+static struct vpif_input dm6467_ch0_inputs[] = {
 	{
 		.input = {
 			.index = 0,
@@ -656,7 +656,7 @@ static const struct vpif_input dm6467_ch0_inputs[] = {
 	},
 };
 
-static const struct vpif_input dm6467_ch1_inputs[] = {
+static struct vpif_input dm6467_ch1_inputs[] = {
        {
 		.input = {
 			.index = 0,
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 7cf529f..22440c0 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -789,15 +789,35 @@ int __init da850_register_mmcsd1(struct davinci_mmc_config *config)
 
 static struct resource da8xx_rproc_resources[] = {
 	{ /* DSP boot address */
+		.name		= "host1cfg",
 		.start		= DA8XX_SYSCFG0_BASE + DA8XX_HOST1CFG_REG,
 		.end		= DA8XX_SYSCFG0_BASE + DA8XX_HOST1CFG_REG + 3,
 		.flags		= IORESOURCE_MEM,
 	},
 	{ /* DSP interrupt registers */
+		.name		= "chipsig",
 		.start		= DA8XX_SYSCFG0_BASE + DA8XX_CHIPSIG_REG,
 		.end		= DA8XX_SYSCFG0_BASE + DA8XX_CHIPSIG_REG + 7,
 		.flags		= IORESOURCE_MEM,
 	},
+	{ /* DSP L2 RAM */
+		.name		= "l2sram",
+		.start		= DA8XX_DSP_L2_RAM_BASE,
+		.end		= DA8XX_DSP_L2_RAM_BASE + SZ_256K - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	{ /* DSP L1P RAM */
+		.name		= "l1pram",
+		.start		= DA8XX_DSP_L1P_RAM_BASE,
+		.end		= DA8XX_DSP_L1P_RAM_BASE + SZ_32K - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	{ /* DSP L1D RAM */
+		.name		= "l1dram",
+		.start		= DA8XX_DSP_L1D_RAM_BASE,
+		.end		= DA8XX_DSP_L1D_RAM_BASE + SZ_32K - 1,
+		.flags		= IORESOURCE_MEM,
+	},
 	{ /* dsp irq */
 		.start		= IRQ_DA8XX_CHIPINT0,
 		.end		= IRQ_DA8XX_CHIPINT0,
@@ -814,6 +834,8 @@ static struct platform_device da8xx_dsp = {
 	.resource	= da8xx_rproc_resources,
 };
 
+static bool rproc_mem_inited __initdata;
+
 #if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC)
 
 static phys_addr_t rproc_base __initdata;
@@ -852,6 +874,8 @@ void __init da8xx_rproc_reserve_cma(void)
 	ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0);
 	if (ret)
 		pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret);
+	else
+		rproc_mem_inited = true;
 }
 
 #else
@@ -866,6 +890,12 @@ int __init da8xx_register_rproc(void)
 {
 	int ret;
 
+	if (!rproc_mem_inited) {
+		pr_warn("%s: memory not reserved for DSP, not registering DSP device\n",
+			__func__);
+		return -ENOMEM;
+	}
+
 	ret = platform_device_register(&da8xx_dsp);
 	if (ret)
 		pr_err("%s: can't register DSP device: %d\n", __func__, ret);
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index 7e46422..93ff156 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -75,6 +75,11 @@ extern unsigned int da850_max_speed;
 #define DA8XX_VPIF_BASE		0x01e17000
 #define DA8XX_GPIO_BASE		0x01e26000
 #define DA8XX_PSC1_BASE		0x01e27000
+
+#define DA8XX_DSP_L2_RAM_BASE	0x11800000
+#define DA8XX_DSP_L1P_RAM_BASE	(DA8XX_DSP_L2_RAM_BASE + 0x600000)
+#define DA8XX_DSP_L1D_RAM_BASE	(DA8XX_DSP_L2_RAM_BASE + 0x700000)
+
 #define DA8XX_AEMIF_CS2_BASE	0x60000000
 #define DA8XX_AEMIF_CS3_BASE	0x62000000
 #define DA8XX_AEMIF_CTL_BASE	0x68000000
diff --git a/arch/arm/mach-davinci/pdata-quirks.c b/arch/arm/mach-davinci/pdata-quirks.c
index 329f540..4858b1c 100644
--- a/arch/arm/mach-davinci/pdata-quirks.c
+++ b/arch/arm/mach-davinci/pdata-quirks.c
@@ -33,7 +33,7 @@ static struct tvp514x_platform_data tvp5146_pdata = {
 
 #define TVP514X_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
 
-static const struct vpif_input da850_ch0_inputs[] = {
+static struct vpif_input da850_ch0_inputs[] = {
 	{
 		.input = {
 			.index = 0,
@@ -48,7 +48,7 @@ static const struct vpif_input da850_ch0_inputs[] = {
 	},
 };
 
-static const struct vpif_input da850_ch1_inputs[] = {
+static struct vpif_input da850_ch1_inputs[] = {
 	{
 		.input = {
 			.index = 0,
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index efb8035..b5cc05d 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
 	davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
 	if (!davinci_sram_suspend) {
 		pr_err("PM: cannot allocate SRAM memory\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto no_sram_mem;
 	}
 
 	davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
 
 	suspend_set_ops(&davinci_pm_ops);
 
+	return 0;
+
+no_sram_mem:
+	iounmap(pm_config.ddrpsc_reg_base);
 no_ddrpsc_mem:
 	iounmap(pm_config.ddrpll_reg_base);
 no_ddrpll_mem:
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 936c59d..782699e 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -536,6 +536,7 @@
 	select HAVE_IMX_ANATOP
 	select HAVE_IMX_MMDC
 	select HAVE_IMX_SRC
+	select IMX_GPCV2
 	help
 		This enables support for Freescale i.MX7 Dual processor.
 
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index b3347d3..94906ed 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -131,6 +131,9 @@ struct device * __init imx_soc_device_init(void)
 	case MXC_CPU_IMX6UL:
 		soc_id = "i.MX6UL";
 		break;
+	case MXC_CPU_IMX6ULL:
+		soc_id = "i.MX6ULL";
+		break;
 	case MXC_CPU_IMX7D:
 		soc_id = "i.MX7D";
 		break;
diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h
index 34f2ff6..e00d626 100644
--- a/arch/arm/mach-imx/mxc.h
+++ b/arch/arm/mach-imx/mxc.h
@@ -39,6 +39,7 @@
 #define MXC_CPU_IMX6SX		0x62
 #define MXC_CPU_IMX6Q		0x63
 #define MXC_CPU_IMX6UL		0x64
+#define MXC_CPU_IMX6ULL		0x65
 #define MXC_CPU_IMX7D		0x72
 
 #define IMX_DDR_TYPE_LPDDR2		1
@@ -73,6 +74,11 @@ static inline bool cpu_is_imx6ul(void)
 	return __mxc_cpu_type == MXC_CPU_IMX6UL;
 }
 
+static inline bool cpu_is_imx6ull(void)
+{
+	return __mxc_cpu_type == MXC_CPU_IMX6ULL;
+}
+
 static inline bool cpu_is_imx6q(void)
 {
 	return __mxc_cpu_type == MXC_CPU_IMX6Q;
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index e61b1d1..ecdf071 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -295,7 +295,8 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
 		val &= ~BM_CLPCR_SBYOS;
 		if (cpu_is_imx6sl())
 			val |= BM_CLPCR_BYPASS_PMIC_READY;
-		if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
+		if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul() ||
+		    cpu_is_imx6ull())
 			val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
 		else
 			val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
@@ -312,7 +313,8 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
 		val |= BM_CLPCR_SBYOS;
 		if (cpu_is_imx6sl() || cpu_is_imx6sx())
 			val |= BM_CLPCR_BYPASS_PMIC_READY;
-		if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
+		if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul() ||
+		    cpu_is_imx6ull())
 			val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
 		else
 			val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
diff --git a/arch/arm/mach-mediatek/mediatek.c b/arch/arm/mach-mediatek/mediatek.c
index a6e3c98..c3cf215 100644
--- a/arch/arm/mach-mediatek/mediatek.c
+++ b/arch/arm/mach-mediatek/mediatek.c
@@ -41,7 +41,7 @@ static void __init mediatek_timer_init(void)
 	}
 
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 };
 
 static const char * const mediatek_board_dt_compat[] = {
diff --git a/arch/arm/mach-meson/Kconfig b/arch/arm/mach-meson/Kconfig
index b6e3acc..ee30511 100644
--- a/arch/arm/mach-meson/Kconfig
+++ b/arch/arm/mach-meson/Kconfig
@@ -21,6 +21,7 @@
 	bool "Amlogic Meson8 SoCs support"
 	default ARCH_MESON
 	select MESON6_TIMER
+	select COMMON_CLK_MESON8B
 
 config MACH_MESON8B
 	bool "Amlogic Meson8b SoCs support"
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index 70db2ab..a4a91f9 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -4,7 +4,7 @@
 	select CPU_FA526
 	select ARM_DMA_MEM_BUFFERABLE
 	select FARADAY_FTINTC010
-	select MOXART_TIMER
+	select FTTMR010_TIMER
 	select GPIOLIB
 	select PHYLIB if NETDEVICES
 	help
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index c821c1d..52d7eda 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -240,7 +240,6 @@ static void omap1_show_dma_caps(void)
 		w |= 1 << 3;
 		dma_write(w, GSCR, 0);
 	}
-	return;
 }
 
 static unsigned configure_dma_errata(void)
@@ -339,10 +338,8 @@ static int __init omap1_system_dma_init(void)
 		goto exit_iounmap;
 	}
 
-	d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
+	d = kzalloc(sizeof(*d), GFP_KERNEL);
 	if (!d) {
-		dev_err(&pdev->dev, "%s: Unable to allocate 'd' for %s\n",
-			__func__, pdev->name);
 		ret = -ENOMEM;
 		goto exit_iounmap;
 	}
diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
index 06c5ba7..8fb1ec6 100644
--- a/arch/arm/mach-omap1/timer.c
+++ b/arch/arm/mach-omap1/timer.c
@@ -3,7 +3,7 @@
  *
  * Contains first level initialization routines which internally
  * generates timer device information and registers with linux
- * device model. It also has low level function to chnage the timer
+ * device model. It also has a low level function to change the timer
  * input clock source.
  *
  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
@@ -134,8 +134,6 @@ static int __init omap1_dm_timer_init(void)
 
 		pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
 		if (!pdata) {
-			dev_err(&pdev->dev, "%s: Failed to allocate pdata.\n",
-				__func__);
 			ret = -ENOMEM;
 			goto err_free_pdata;
 		}
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index c89757a..779fb1f 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -69,7 +69,6 @@
 
 # OPP table initialization
 ifeq ($(CONFIG_PM_OPP),y)
-obj-y					+= opp.o
 obj-$(CONFIG_ARCH_OMAP3)		+= opp3xxx_data.o
 obj-$(CONFIG_ARCH_OMAP4)		+= opp4xxx_data.o
 endif
@@ -220,9 +219,6 @@
 obj-$(CONFIG_SOC_OMAP5)			+= omap_hwmod_54xx_data.o
 obj-$(CONFIG_SOC_DRA7XX)		+= omap_hwmod_7xx_data.o
 
-# EMU peripherals
-obj-$(CONFIG_HW_PERF_EVENTS)		+= pmu.o
-
 # OMAP2420 MSDI controller integration support ("MMC")
 obj-$(CONFIG_SOC_OMAP2420)		+= msdi.o
 
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 91272db..20f2553 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -53,14 +53,12 @@ static u32 board_caps;
 
 static void board_check_revision(void)
 {
-	if (of_have_populated_dt()) {
-		if (of_machine_is_compatible("nokia,n800"))
-			board_caps = NOKIA_N800;
-		else if (of_machine_is_compatible("nokia,n810"))
-			board_caps = NOKIA_N810;
-		else if (of_machine_is_compatible("nokia,n810-wimax"))
-			board_caps = NOKIA_N810_WIMAX;
-	}
+	if (of_machine_is_compatible("nokia,n800"))
+		board_caps = NOKIA_N800;
+	else if (of_machine_is_compatible("nokia,n810"))
+		board_caps = NOKIA_N810;
+	else if (of_machine_is_compatible("nokia,n810-wimax"))
+		board_caps = NOKIA_N810_WIMAX;
 
 	if (!board_caps)
 		pr_err("Unknown board\n");
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index b79b1ca..5189264 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -1224,6 +1224,14 @@ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
 	return 0;
 }
 
+u32 clkdm_xlate_address(struct clockdomain *clkdm)
+{
+	if (arch_clkdm->clkdm_xlate_address)
+		return arch_clkdm->clkdm_xlate_address(clkdm);
+
+	return 0;
+}
+
 /**
  * clkdm_hwmod_enable - add an enabled downstream hwmod to this clkdm
  * @clkdm: struct clockdomain *
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index 24667a5..827f01e 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -175,6 +175,7 @@ struct clkdm_ops {
 	void	(*clkdm_deny_idle)(struct clockdomain *clkdm);
 	int	(*clkdm_clk_enable)(struct clockdomain *clkdm);
 	int	(*clkdm_clk_disable)(struct clockdomain *clkdm);
+	u32	(*clkdm_xlate_address)(struct clockdomain *clkdm);
 };
 
 int clkdm_register_platform_funcs(struct clkdm_ops *co);
@@ -213,6 +214,7 @@ int clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk);
 int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk);
 int clkdm_hwmod_enable(struct clockdomain *clkdm, struct omap_hwmod *oh);
 int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh);
+u32 clkdm_xlate_address(struct clockdomain *clkdm);
 
 extern void __init omap242x_clockdomains_init(void);
 extern void __init omap243x_clockdomains_init(void);
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index de75cbc..e833984 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -24,8 +24,11 @@
 
 # ifndef __ASSEMBLER__
 #include <linux/clk/ti.h>
-extern void __iomem *cm_base;
-extern void __iomem *cm2_base;
+
+#include "prcm-common.h"
+
+extern struct omap_domain_base cm_base;
+extern struct omap_domain_base cm2_base;
 extern void omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2);
 # endif
 
diff --git a/arch/arm/mach-omap2/cm2xxx_3xxx.h b/arch/arm/mach-omap2/cm2xxx_3xxx.h
index 72928a3..aa148cd 100644
--- a/arch/arm/mach-omap2/cm2xxx_3xxx.h
+++ b/arch/arm/mach-omap2/cm2xxx_3xxx.h
@@ -52,12 +52,12 @@
 
 static inline u32 omap2_cm_read_mod_reg(s16 module, u16 idx)
 {
-	return readl_relaxed(cm_base + module + idx);
+	return readl_relaxed(cm_base.va + module + idx);
 }
 
 static inline void omap2_cm_write_mod_reg(u32 val, s16 module, u16 idx)
 {
-	writel_relaxed(val, cm_base + module + idx);
+	writel_relaxed(val, cm_base.va + module + idx);
 }
 
 /* Read-modify-write a register in a CM module. Caller must lock */
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
index 6f2d0ae..a9e08d8 100644
--- a/arch/arm/mach-omap2/cm33xx.c
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -50,13 +50,13 @@
 /* Read a register in a CM instance */
 static inline u32 am33xx_cm_read_reg(u16 inst, u16 idx)
 {
-	return readl_relaxed(cm_base + inst + idx);
+	return readl_relaxed(cm_base.va + inst + idx);
 }
 
 /* Write into a register in a CM */
 static inline void am33xx_cm_write_reg(u32 val, u16 inst, u16 idx)
 {
-	writel_relaxed(val, cm_base + inst + idx);
+	writel_relaxed(val, cm_base.va + inst + idx);
 }
 
 /* Read-modify-write a register in CM */
diff --git a/arch/arm/mach-omap2/cm3xxx.c b/arch/arm/mach-omap2/cm3xxx.c
index 55b046a..961bc47 100644
--- a/arch/arm/mach-omap2/cm3xxx.c
+++ b/arch/arm/mach-omap2/cm3xxx.c
@@ -669,7 +669,8 @@ static struct cm_ll_data omap3xxx_cm_ll_data = {
 
 int __init omap3xxx_cm_init(const struct omap_prcm_init_data *data)
 {
-	omap2_clk_legacy_provider_init(TI_CLKM_CM, cm_base + OMAP3430_IVA2_MOD);
+	omap2_clk_legacy_provider_init(TI_CLKM_CM, cm_base.va +
+				       OMAP3430_IVA2_MOD);
 	return cm_register(&omap3xxx_cm_ll_data);
 }
 
diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c
index bbe41f4..d555791 100644
--- a/arch/arm/mach-omap2/cm_common.c
+++ b/arch/arm/mach-omap2/cm_common.c
@@ -32,10 +32,10 @@ static struct cm_ll_data null_cm_ll_data;
 static struct cm_ll_data *cm_ll_data = &null_cm_ll_data;
 
 /* cm_base: base virtual address of the CM IP block */
-void __iomem *cm_base;
+struct omap_domain_base cm_base;
 
 /* cm2_base: base virtual address of the CM2 IP block (OMAP44xx only) */
-void __iomem *cm2_base;
+struct omap_domain_base cm2_base;
 
 #define CM_NO_CLOCKS		0x1
 #define CM_SINGLE_INSTANCE	0x2
@@ -49,8 +49,8 @@ void __iomem *cm2_base;
  */
 void __init omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2)
 {
-	cm_base = cm;
-	cm2_base = cm2;
+	cm_base.va = cm;
+	cm2_base.va = cm2;
 }
 
 /**
@@ -315,27 +315,34 @@ int __init omap2_cm_base_init(void)
 	struct device_node *np;
 	const struct of_device_id *match;
 	struct omap_prcm_init_data *data;
-	void __iomem *mem;
+	struct resource res;
+	int ret;
+	struct omap_domain_base *mem = NULL;
 
 	for_each_matching_node_and_match(np, omap_cm_dt_match_table, &match) {
 		data = (struct omap_prcm_init_data *)match->data;
 
-		mem = of_iomap(np, 0);
-		if (!mem)
-			return -ENOMEM;
+		ret = of_address_to_resource(np, 0, &res);
+		if (ret)
+			return ret;
 
 		if (data->index == TI_CLKM_CM)
-			cm_base = mem + data->offset;
+			mem = &cm_base;
 
 		if (data->index == TI_CLKM_CM2)
-			cm2_base = mem + data->offset;
+			mem = &cm2_base;
 
-		data->mem = mem;
+		data->mem = ioremap(res.start, resource_size(&res));
+
+		if (mem) {
+			mem->pa = res.start + data->offset;
+			mem->va = data->mem + data->offset;
+		}
 
 		data->np = np;
 
 		if (data->init && (data->flags & CM_SINGLE_INSTANCE ||
-				   (cm_base && cm2_base)))
+				   (cm_base.va && cm2_base.va)))
 			data->init(data);
 	}
 
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index 2ab27ad..8774e98 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -55,7 +55,7 @@
 #define CLKCTRL_IDLEST_INTERFACE_IDLE		0x2
 #define CLKCTRL_IDLEST_DISABLED			0x3
 
-static void __iomem *_cm_bases[OMAP4_MAX_PRCM_PARTITIONS];
+static struct omap_domain_base _cm_bases[OMAP4_MAX_PRCM_PARTITIONS];
 
 /**
  * omap_cm_base_init - Populates the cm partitions
@@ -65,10 +65,11 @@ static void __iomem *_cm_bases[OMAP4_MAX_PRCM_PARTITIONS];
  */
 static void omap_cm_base_init(void)
 {
-	_cm_bases[OMAP4430_PRM_PARTITION] = prm_base;
-	_cm_bases[OMAP4430_CM1_PARTITION] = cm_base;
-	_cm_bases[OMAP4430_CM2_PARTITION] = cm2_base;
-	_cm_bases[OMAP4430_PRCM_MPU_PARTITION] = prcm_mpu_base;
+	memcpy(&_cm_bases[OMAP4430_PRM_PARTITION], &prm_base, sizeof(prm_base));
+	memcpy(&_cm_bases[OMAP4430_CM1_PARTITION], &cm_base, sizeof(cm_base));
+	memcpy(&_cm_bases[OMAP4430_CM2_PARTITION], &cm2_base, sizeof(cm2_base));
+	memcpy(&_cm_bases[OMAP4430_PRCM_MPU_PARTITION], &prcm_mpu_base,
+	       sizeof(prcm_mpu_base));
 }
 
 /* Private functions */
@@ -116,8 +117,8 @@ static u32 omap4_cminst_read_inst_reg(u8 part, u16 inst, u16 idx)
 {
 	BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
 	       part == OMAP4430_INVALID_PRCM_PARTITION ||
-	       !_cm_bases[part]);
-	return readl_relaxed(_cm_bases[part] + inst + idx);
+	       !_cm_bases[part].va);
+	return readl_relaxed(_cm_bases[part].va + inst + idx);
 }
 
 /* Write into a register in a CM instance */
@@ -125,8 +126,8 @@ static void omap4_cminst_write_inst_reg(u32 val, u8 part, u16 inst, u16 idx)
 {
 	BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
 	       part == OMAP4430_INVALID_PRCM_PARTITION ||
-	       !_cm_bases[part]);
-	writel_relaxed(val, _cm_bases[part] + inst + idx);
+	       !_cm_bases[part].va);
+	writel_relaxed(val, _cm_bases[part].va + inst + idx);
 }
 
 /* Read-modify-write a register in CM1. Caller must lock */
@@ -475,6 +476,14 @@ static int omap4_clkdm_clk_disable(struct clockdomain *clkdm)
 	return 0;
 }
 
+static u32 omap4_clkdm_xlate_address(struct clockdomain *clkdm)
+{
+	u32 addr = _cm_bases[clkdm->prcm_partition].pa + clkdm->cm_inst +
+		clkdm->clkdm_offs;
+
+	return addr;
+}
+
 struct clkdm_ops omap4_clkdm_operations = {
 	.clkdm_add_wkdep	= omap4_clkdm_add_wkup_sleep_dep,
 	.clkdm_del_wkdep	= omap4_clkdm_del_wkup_sleep_dep,
@@ -490,6 +499,7 @@ struct clkdm_ops omap4_clkdm_operations = {
 	.clkdm_deny_idle	= omap4_clkdm_deny_idle,
 	.clkdm_clk_enable	= omap4_clkdm_clk_enable,
 	.clkdm_clk_disable	= omap4_clkdm_clk_disable,
+	.clkdm_xlate_address	= omap4_clkdm_xlate_address,
 };
 
 struct clkdm_ops am43xx_clkdm_operations = {
@@ -499,6 +509,7 @@ struct clkdm_ops am43xx_clkdm_operations = {
 	.clkdm_deny_idle	= omap4_clkdm_deny_idle,
 	.clkdm_clk_enable	= omap4_clkdm_clk_enable,
 	.clkdm_clk_disable	= omap4_clkdm_clk_disable,
+	.clkdm_xlate_address	= omap4_clkdm_xlate_address,
 };
 
 static struct cm_ll_data omap4xxx_cm_ll_data = {
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 4739512..93057fb 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -36,130 +36,6 @@
 #define L3_MODULES_MAX_LEN 12
 #define L3_MODULES 3
 
-static int __init omap3_l3_init(void)
-{
-	struct omap_hwmod *oh;
-	struct platform_device *pdev;
-	char oh_name[L3_MODULES_MAX_LEN];
-
-	/*
-	 * To avoid code running on other OMAPs in
-	 * multi-omap builds
-	 */
-	if (!(cpu_is_omap34xx()) || of_have_populated_dt())
-		return -ENODEV;
-
-	snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main");
-
-	oh = omap_hwmod_lookup(oh_name);
-
-	if (!oh)
-		pr_err("could not look up %s\n", oh_name);
-
-	pdev = omap_device_build("omap_l3_smx", 0, oh, NULL, 0);
-
-	WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);
-
-	return PTR_ERR_OR_ZERO(pdev);
-}
-omap_postcore_initcall(omap3_l3_init);
-
-static inline void omap_init_sti(void) {}
-
-#if IS_ENABLED(CONFIG_SPI_OMAP24XX)
-
-#include <linux/platform_data/spi-omap2-mcspi.h>
-
-static int __init omap_mcspi_init(struct omap_hwmod *oh, void *unused)
-{
-	struct platform_device *pdev;
-	char *name = "omap2_mcspi";
-	struct omap2_mcspi_platform_config *pdata;
-	static int spi_num;
-	struct omap2_mcspi_dev_attr *mcspi_attrib = oh->dev_attr;
-
-	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
-	if (!pdata) {
-		pr_err("Memory allocation for McSPI device failed\n");
-		return -ENOMEM;
-	}
-
-	pdata->num_cs = mcspi_attrib->num_chipselect;
-	switch (oh->class->rev) {
-	case OMAP2_MCSPI_REV:
-	case OMAP3_MCSPI_REV:
-			pdata->regs_offset = 0;
-			break;
-	case OMAP4_MCSPI_REV:
-			pdata->regs_offset = OMAP4_MCSPI_REG_OFFSET;
-			break;
-	default:
-			pr_err("Invalid McSPI Revision value\n");
-			kfree(pdata);
-			return -EINVAL;
-	}
-
-	spi_num++;
-	pdev = omap_device_build(name, spi_num, oh, pdata, sizeof(*pdata));
-	WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s\n",
-				name, oh->name);
-	kfree(pdata);
-	return 0;
-}
-
-static void omap_init_mcspi(void)
-{
-	omap_hwmod_for_each_by_class("mcspi", omap_mcspi_init, NULL);
-}
-
-#else
-static inline void omap_init_mcspi(void) {}
-#endif
-
-/**
- * omap_init_rng - bind the RNG hwmod to the RNG omap_device
- *
- * Bind the RNG hwmod to the RNG omap_device.  No return value.
- */
-static void __init omap_init_rng(void)
-{
-	struct omap_hwmod *oh;
-	struct platform_device *pdev;
-
-	oh = omap_hwmod_lookup("rng");
-	if (!oh)
-		return;
-
-	pdev = omap_device_build("omap_rng", -1, oh, NULL, 0);
-	WARN(IS_ERR(pdev), "Can't build omap_device for omap_rng\n");
-}
-
-static void __init omap_init_sham(void)
-{
-	struct omap_hwmod *oh;
-	struct platform_device *pdev;
-
-	oh = omap_hwmod_lookup("sham");
-	if (!oh)
-		return;
-
-	pdev = omap_device_build("omap-sham", -1, oh, NULL, 0);
-	WARN(IS_ERR(pdev), "Can't build omap_device for omap-sham\n");
-}
-
-static void __init omap_init_aes(void)
-{
-	struct omap_hwmod *oh;
-	struct platform_device *pdev;
-
-	oh = omap_hwmod_lookup("aes");
-	if (!oh)
-		return;
-
-	pdev = omap_device_build("omap-aes", -1, oh, NULL, 0);
-	WARN(IS_ERR(pdev), "Can't build omap_device for omap-aes\n");
-}
-
 /*-------------------------------------------------------------------------*/
 
 #if IS_ENABLED(CONFIG_VIDEO_OMAP2_VOUT)
@@ -185,54 +61,3 @@ int __init omap_init_vout(void)
 #else
 int __init omap_init_vout(void) { return 0; }
 #endif
-
-/*-------------------------------------------------------------------------*/
-
-static int __init omap2_init_devices(void)
-{
-	/* Enable dummy states for those platforms without pinctrl support */
-	if (!of_have_populated_dt())
-		pinctrl_provide_dummies();
-
-	/* If dtb is there, the devices will be created dynamically */
-	if (!of_have_populated_dt()) {
-		/*
-		 * please keep these calls, and their implementations above,
-		 * in alphabetical order so they're easier to sort through.
-		 */
-		omap_init_mcspi();
-		omap_init_sham();
-		omap_init_aes();
-		omap_init_rng();
-	}
-	omap_init_sti();
-
-	return 0;
-}
-omap_arch_initcall(omap2_init_devices);
-
-static int __init omap_gpmc_init(void)
-{
-	struct omap_hwmod *oh;
-	struct platform_device *pdev;
-	char *oh_name = "gpmc";
-
-	/*
-	 * if the board boots up with a populated DT, do not
-	 * manually add the device from this initcall
-	 */
-	if (of_have_populated_dt())
-		return -ENODEV;
-
-	oh = omap_hwmod_lookup(oh_name);
-	if (!oh) {
-		pr_err("Could not look up %s\n", oh_name);
-		return -ENODEV;
-	}
-
-	pdev = omap_device_build("omap-gpmc", -1, oh, NULL, 0);
-	WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);
-
-	return PTR_ERR_OR_ZERO(pdev);
-}
-omap_postcore_initcall(omap_gpmc_init);
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index e58c13a..0b77a01 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -249,6 +249,24 @@ static const struct dma_slave_map omap24xx_sdma_map[] = {
 	{ "omap_uart.2", "rx", SDMA_FILTER_PARAM(54) },
 	{ "omap_hsmmc.0", "tx", SDMA_FILTER_PARAM(61) },
 	{ "omap_hsmmc.0", "rx", SDMA_FILTER_PARAM(62) },
+
+	/* external DMA requests when tusb6010 is used */
+	{ "musb-tusb", "dmareq0", SDMA_FILTER_PARAM(2) },
+	{ "musb-tusb", "dmareq1", SDMA_FILTER_PARAM(3) },
+	{ "musb-tusb", "dmareq2", SDMA_FILTER_PARAM(14) }, /* OMAP2420 only */
+	{ "musb-tusb", "dmareq3", SDMA_FILTER_PARAM(15) }, /* OMAP2420 only */
+	{ "musb-tusb", "dmareq4", SDMA_FILTER_PARAM(16) }, /* OMAP2420 only */
+	{ "musb-tusb", "dmareq5", SDMA_FILTER_PARAM(64) }, /* OMAP2420 only */
+};
+
+static const struct dma_slave_map omap24xx_sdma_dt_map[] = {
+	/* external DMA requests when tusb6010 is used */
+	{ "musb-hdrc.1.auto", "dmareq0", SDMA_FILTER_PARAM(2) },
+	{ "musb-hdrc.1.auto", "dmareq1", SDMA_FILTER_PARAM(3) },
+	{ "musb-hdrc.1.auto", "dmareq2", SDMA_FILTER_PARAM(14) }, /* OMAP2420 only */
+	{ "musb-hdrc.1.auto", "dmareq3", SDMA_FILTER_PARAM(15) }, /* OMAP2420 only */
+	{ "musb-hdrc.1.auto", "dmareq4", SDMA_FILTER_PARAM(16) }, /* OMAP2420 only */
+	{ "musb-hdrc.1.auto", "dmareq5", SDMA_FILTER_PARAM(64) }, /* OMAP2420 only */
 };
 
 static const struct dma_slave_map omap3xxx_sdma_map[] = {
@@ -346,6 +364,12 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
 			       __func__);
 			return -ENODEV;
 		}
+	} else {
+		if (soc_is_omap24xx()) {
+			/* DMA slave map for drivers not yet converted to DT */
+			p.slave_map = omap24xx_sdma_dt_map;
+			p.slavecnt = ARRAY_SIZE(omap24xx_sdma_dt_map);
+		}
 	}
 
 	pdev = omap_device_build(name, 0, oh, &p, sizeof(p));
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index cb754c4..be517b0 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -153,7 +153,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
 
 	hc_name = kzalloc(sizeof(char) * (HSMMC_NAME_LEN + 1), GFP_KERNEL);
 	if (!hc_name) {
-		pr_err("Cannot allocate memory for controller slot name\n");
 		kfree(hc_name);
 		return -ENOMEM;
 	}
@@ -315,10 +314,8 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
 	int res;
 
 	mmc_data = kzalloc(sizeof(*mmc_data), GFP_KERNEL);
-	if (!mmc_data) {
-		pr_err("Cannot allocate memory for mmc device!\n");
+	if (!mmc_data)
 		return;
-	}
 
 	res = omap_hsmmc_pdata_init(hsmmcinfo, mmc_data);
 	if (res < 0)
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 5aafb84..1d739d1 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -493,67 +493,39 @@ void __init omap3_init_early(void)
 	omap2_set_globals_tap(OMAP343X_CLASS, OMAP2_L4_IO_ADDRESS(0x4830A000));
 	omap2_set_globals_sdrc(OMAP2_L3_IO_ADDRESS(OMAP343X_SDRC_BASE),
 			       OMAP2_L3_IO_ADDRESS(OMAP343X_SMS_BASE));
-	/* XXX: remove these once OMAP3 is DT only */
-	if (!of_have_populated_dt()) {
-		omap2_set_globals_control(
-			OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE));
-		omap2_set_globals_prm(OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE));
-		omap2_set_globals_cm(OMAP2_L4_IO_ADDRESS(OMAP3430_CM_BASE),
-				     NULL);
-	}
 	omap2_control_base_init();
 	omap3xxx_check_revision();
 	omap3xxx_check_features();
 	omap2_prcm_base_init();
-	/* XXX: remove these once OMAP3 is DT only */
-	if (!of_have_populated_dt()) {
-		omap3xxx_prm_init(NULL);
-		omap3xxx_cm_init(NULL);
-	}
 	omap3xxx_voltagedomains_init();
 	omap3xxx_powerdomains_init();
 	omap3xxx_clockdomains_init();
 	omap3xxx_hwmod_init();
 	omap_hwmod_init_postsetup();
-	if (!of_have_populated_dt()) {
-		omap3_control_legacy_iomap_init();
-		if (soc_is_am35xx())
-			omap_clk_soc_init = am35xx_clk_legacy_init;
-		else if (cpu_is_omap3630())
-			omap_clk_soc_init = omap36xx_clk_legacy_init;
-		else if (omap_rev() == OMAP3430_REV_ES1_0)
-			omap_clk_soc_init = omap3430es1_clk_legacy_init;
-		else
-			omap_clk_soc_init = omap3430_clk_legacy_init;
-	}
 }
 
 void __init omap3430_init_early(void)
 {
 	omap3_init_early();
-	if (of_have_populated_dt())
-		omap_clk_soc_init = omap3430_dt_clk_init;
+	omap_clk_soc_init = omap3430_dt_clk_init;
 }
 
 void __init omap35xx_init_early(void)
 {
 	omap3_init_early();
-	if (of_have_populated_dt())
-		omap_clk_soc_init = omap3430_dt_clk_init;
+	omap_clk_soc_init = omap3430_dt_clk_init;
 }
 
 void __init omap3630_init_early(void)
 {
 	omap3_init_early();
-	if (of_have_populated_dt())
-		omap_clk_soc_init = omap3630_dt_clk_init;
+	omap_clk_soc_init = omap3630_dt_clk_init;
 }
 
 void __init am35xx_init_early(void)
 {
 	omap3_init_early();
-	if (of_have_populated_dt())
-		omap_clk_soc_init = am35xx_dt_clk_init;
+	omap_clk_soc_init = am35xx_dt_clk_init;
 }
 
 void __init omap3_init_late(void)
@@ -628,8 +600,7 @@ void __init ti816x_init_early(void)
 	ti816x_clockdomains_init();
 	dm816x_hwmod_init();
 	omap_hwmod_init_postsetup();
-	if (of_have_populated_dt())
-		omap_clk_soc_init = dm816x_dt_clk_init;
+	omap_clk_soc_init = dm816x_dt_clk_init;
 }
 #endif
 
@@ -785,21 +756,19 @@ int __init omap_clk_init(void)
 
 	omap2_clk_setup_ll_ops();
 
-	if (of_have_populated_dt()) {
-		ret = omap_control_init();
-		if (ret)
-			return ret;
+	ret = omap_control_init();
+	if (ret)
+		return ret;
 
-		ret = omap_prcm_init();
-		if (ret)
-			return ret;
+	ret = omap_prcm_init();
+	if (ret)
+		return ret;
 
-		of_clk_init(NULL);
+	of_clk_init(NULL);
 
-		ti_dt_clk_init_retry_clks();
+	ti_dt_clk_init_retry_clks();
 
-		ti_dt_clockdomains_setup();
-	}
+	ti_dt_clockdomains_setup();
 
 	ret = omap_clk_soc_init();
 
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index fc04be7..4acc0da 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -53,73 +53,3 @@ void __init omap3_mcbsp_init_pdata_callback(
 
 	pdata->force_ick_on = omap3_mcbsp_force_ick_on;
 }
-
-static int __init omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
-{
-	int id, count = 1;
-	char *name = "omap-mcbsp";
-	struct omap_hwmod *oh_device[2];
-	struct omap_mcbsp_platform_data *pdata = NULL;
-	struct platform_device *pdev;
-
-	sscanf(oh->name, "mcbsp%d", &id);
-
-	pdata = kzalloc(sizeof(struct omap_mcbsp_platform_data), GFP_KERNEL);
-	if (!pdata) {
-		pr_err("%s: No memory for mcbsp\n", __func__);
-		return -ENOMEM;
-	}
-
-	pdata->reg_step = 4;
-	if (oh->class->rev < MCBSP_CONFIG_TYPE2) {
-		pdata->reg_size = 2;
-	} else {
-		pdata->reg_size = 4;
-		pdata->has_ccr = true;
-	}
-
-	if (oh->class->rev == MCBSP_CONFIG_TYPE2) {
-		/* The FIFO has 128 locations */
-		pdata->buffer_size = 0x80;
-	} else if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
-		if (id == 2)
-			/* The FIFO has 1024 + 256 locations */
-			pdata->buffer_size = 0x500;
-		else
-			/* The FIFO has 128 locations */
-			pdata->buffer_size = 0x80;
-	} else if (oh->class->rev == MCBSP_CONFIG_TYPE4) {
-		/* The FIFO has 128 locations for all instances */
-		pdata->buffer_size = 0x80;
-	}
-
-	if (oh->class->rev >= MCBSP_CONFIG_TYPE3)
-		pdata->has_wakeup = true;
-
-	oh_device[0] = oh;
-
-	if (oh->dev_attr) {
-		oh_device[1] = omap_hwmod_lookup((
-		(struct omap_mcbsp_dev_attr *)(oh->dev_attr))->sidetone);
-		pdata->force_ick_on = omap3_mcbsp_force_ick_on;
-		count++;
-	}
-	pdev = omap_device_build_ss(name, id, oh_device, count, pdata,
-				    sizeof(*pdata));
-	kfree(pdata);
-	if (IS_ERR(pdev))  {
-		pr_err("%s: Can't build omap_device for %s:%s.\n", __func__,
-					name, oh->name);
-		return PTR_ERR(pdev);
-	}
-	return 0;
-}
-
-static int __init omap2_mcbsp_init(void)
-{
-	if (!of_have_populated_dt())
-		omap_hwmod_for_each_by_class("mcbsp", omap_init_mcbsp, NULL);
-
-	return 0;
-}
-omap_arch_initcall(omap2_mcbsp_init);
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 369f95a..33ed5d5 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -58,6 +58,17 @@ static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
 static unsigned int max_irqs = DEFAULT_IRQS;
 static unsigned int omap_secure_apis;
 
+#ifdef CONFIG_CPU_PM
+static unsigned int wakeupgen_context[MAX_NR_REG_BANKS];
+#endif
+
+struct omap_wakeupgen_ops {
+	void (*save_context)(void);
+	void (*restore_context)(void);
+};
+
+static struct omap_wakeupgen_ops *wakeupgen_ops;
+
 /*
  * Static helper functions.
  */
@@ -264,6 +275,16 @@ static inline void omap5_irq_save_context(void)
 
 }
 
+static inline void am43xx_irq_save_context(void)
+{
+	u32 i;
+
+	for (i = 0; i < irq_banks; i++) {
+		wakeupgen_context[i] = wakeupgen_readl(i, 0);
+		wakeupgen_writel(0, i, CPU0_ID);
+	}
+}
+
 /*
  * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
  * ROM code. WakeupGen IP is integrated along with GIC to manage the
@@ -280,11 +301,8 @@ static void irq_save_context(void)
 
 	if (!sar_base)
 		sar_base = omap4_get_sar_ram_base();
-
-	if (soc_is_omap54xx())
-		omap5_irq_save_context();
-	else
-		omap4_irq_save_context();
+	if (wakeupgen_ops && wakeupgen_ops->save_context)
+		wakeupgen_ops->save_context();
 }
 
 /*
@@ -306,6 +324,20 @@ static void irq_sar_clear(void)
 	writel_relaxed(val, sar_base + offset);
 }
 
+static void am43xx_irq_restore_context(void)
+{
+	u32 i;
+
+	for (i = 0; i < irq_banks; i++)
+		wakeupgen_writel(wakeupgen_context[i], i, CPU0_ID);
+}
+
+static void irq_restore_context(void)
+{
+	if (wakeupgen_ops && wakeupgen_ops->restore_context)
+		wakeupgen_ops->restore_context();
+}
+
 /*
  * Save GIC and Wakeupgen interrupt context using secure API
  * for HS/EMU devices.
@@ -319,6 +351,26 @@ static void irq_save_secure_context(void)
 	if (ret != API_HAL_RET_VALUE_OK)
 		pr_err("GIC and Wakeupgen context save failed\n");
 }
+
+/* Define ops for context save and restore for each SoC */
+static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {
+	.save_context = omap4_irq_save_context,
+	.restore_context = irq_sar_clear,
+};
+
+static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {
+	.save_context = omap5_irq_save_context,
+	.restore_context = irq_sar_clear,
+};
+
+static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {
+	.save_context = am43xx_irq_save_context,
+	.restore_context = am43xx_irq_restore_context,
+};
+#else
+static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {};
+static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {};
+static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {};
 #endif
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -359,7 +411,7 @@ static int irq_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
 		break;
 	case CPU_CLUSTER_PM_EXIT:
 		if (omap_type() == OMAP2_DEVICE_TYPE_GP)
-			irq_sar_clear();
+			irq_restore_context();
 		break;
 	}
 	return NOTIFY_OK;
@@ -494,9 +546,13 @@ static int __init wakeupgen_init(struct device_node *node,
 		irq_banks = OMAP4_NR_BANKS;
 		max_irqs = OMAP4_NR_IRQS;
 		omap_secure_apis = 1;
+		wakeupgen_ops = &omap4_wakeupgen_ops;
+	} else if (soc_is_omap54xx()) {
+		wakeupgen_ops = &omap5_wakeupgen_ops;
 	} else if (soc_is_am43xx()) {
 		irq_banks = AM43XX_NR_REG_BANKS;
 		max_irqs = AM43XX_IRQS;
+		wakeupgen_ops = &am43xx_wakeupgen_ops;
 	}
 
 	domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index f989145..ef9ffb8 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -65,7 +65,7 @@ static void _add_clkdev(struct omap_device *od, const char *clk_alias,
 
 	r = clk_get_sys(NULL, clk_name);
 
-	if (IS_ERR(r) && of_have_populated_dt()) {
+	if (IS_ERR(r)) {
 		struct of_phandle_args clkspec;
 
 		clkspec.np = of_find_node_by_name(NULL, clk_name);
@@ -953,9 +953,6 @@ static int __init omap_device_late_init(void)
 {
 	bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle);
 
-	WARN(!of_have_populated_dt(),
-		"legacy booting deprecated, please update to boot with .dts\n");
-
 	return 0;
 }
 omap_late_initcall_sync(omap_device_late_init);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 8bcea0d..3b47ded 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -141,6 +141,7 @@
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/bootmem.h>
 
 #include <asm/system_misc.h>
 
@@ -182,6 +183,24 @@
 #define MOD_CLK_MAX_NAME_LEN		32
 
 /**
+ * struct clkctrl_provider - clkctrl provider mapping data
+ * @addr: base address for the provider
+ * @offset: base offset for the provider
+ * @clkdm: base clockdomain for provider
+ * @node: device node associated with the provider
+ * @link: list link
+ */
+struct clkctrl_provider {
+	u32			addr;
+	u16			offset;
+	struct clockdomain	*clkdm;
+	struct device_node	*node;
+	struct list_head	link;
+};
+
+static LIST_HEAD(clkctrl_providers);
+
+/**
  * struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations
  * @enable_module: function to enable a module (via MODULEMODE)
  * @disable_module: function to disable a module (via MODULEMODE)
@@ -204,6 +223,8 @@ struct omap_hwmod_soc_ops {
 	void (*update_context_lost)(struct omap_hwmod *oh);
 	int (*get_context_lost)(struct omap_hwmod *oh);
 	int (*disable_direct_prcm)(struct omap_hwmod *oh);
+	u32 (*xlate_clkctrl)(struct omap_hwmod *oh,
+			     struct clkctrl_provider *provider);
 };
 
 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
@@ -690,6 +711,103 @@ static int _del_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh)
 	return clkdm_del_sleepdep(clkdm, init_clkdm);
 }
 
+static const struct of_device_id ti_clkctrl_match_table[] __initconst = {
+	{ .compatible = "ti,clkctrl" },
+	{ }
+};
+
+static int _match_clkdm(struct clockdomain *clkdm, void *user)
+{
+	struct clkctrl_provider *provider = user;
+
+	if (clkdm_xlate_address(clkdm) == provider->addr) {
+		pr_debug("%s: Matched clkdm %s for addr %x (%s)\n", __func__,
+			 clkdm->name, provider->addr,
+			 provider->node->parent->name);
+		provider->clkdm = clkdm;
+
+		return -1;
+	}
+
+	return 0;
+}
+
+static int _setup_clkctrl_provider(struct device_node *np)
+{
+	const __be32 *addrp;
+	struct clkctrl_provider *provider;
+
+	provider = memblock_virt_alloc(sizeof(*provider), 0);
+	if (!provider)
+		return -ENOMEM;
+
+	addrp = of_get_address(np, 0, NULL, NULL);
+	provider->addr = (u32)of_translate_address(np, addrp);
+	provider->offset = provider->addr & 0xff;
+	provider->addr &= ~0xff;
+	provider->node = np;
+
+	clkdm_for_each(_match_clkdm, provider);
+
+	if (!provider->clkdm) {
+		pr_err("%s: nothing matched for node %s (%x)\n",
+		       __func__, np->parent->name, provider->addr);
+		memblock_free_early(__pa(provider), sizeof(*provider));
+		return -EINVAL;
+	}
+
+	list_add(&provider->link, &clkctrl_providers);
+
+	return 0;
+}
+
+static int _init_clkctrl_providers(void)
+{
+	struct device_node *np;
+	int ret = 0;
+
+	for_each_matching_node(np, ti_clkctrl_match_table) {
+		ret = _setup_clkctrl_provider(np);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static u32 _omap4_xlate_clkctrl(struct omap_hwmod *oh,
+				struct clkctrl_provider *provider)
+{
+	return oh->prcm.omap4.clkctrl_offs -
+	       provider->offset - provider->clkdm->clkdm_offs;
+}
+
+static struct clk *_lookup_clkctrl_clk(struct omap_hwmod *oh)
+{
+	struct clkctrl_provider *provider;
+	struct clk *clk;
+
+	if (!soc_ops.xlate_clkctrl)
+		return NULL;
+
+	list_for_each_entry(provider, &clkctrl_providers, link) {
+		if (provider->clkdm == oh->clkdm) {
+			struct of_phandle_args clkspec;
+
+			clkspec.np = provider->node;
+			clkspec.args_count = 2;
+			clkspec.args[0] = soc_ops.xlate_clkctrl(oh, provider);
+			clkspec.args[1] = 0;
+
+			clk = of_clk_get_from_provider(&clkspec);
+
+			return clk;
+		}
+	}
+
+	return NULL;
+}
+
 /**
  * _init_main_clk - get a struct clk * for the the hwmod's main functional clk
  * @oh: struct omap_hwmod *
@@ -701,22 +819,16 @@ static int _del_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh)
 static int _init_main_clk(struct omap_hwmod *oh)
 {
 	int ret = 0;
-	char name[MOD_CLK_MAX_NAME_LEN];
-	struct clk *clk;
-	static const char modck[] = "_mod_ck";
+	struct clk *clk = NULL;
 
-	if (strlen(oh->name) >= MOD_CLK_MAX_NAME_LEN - strlen(modck))
-		pr_warn("%s: warning: cropping name for %s\n", __func__,
-			oh->name);
+	clk = _lookup_clkctrl_clk(oh);
 
-	strlcpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - strlen(modck));
-	strlcat(name, modck, MOD_CLK_MAX_NAME_LEN);
-
-	clk = clk_get(NULL, name);
-	if (!IS_ERR(clk)) {
+	if (!IS_ERR_OR_NULL(clk)) {
+		pr_debug("%s: mapped main_clk %s for %s\n", __func__,
+			 __clk_get_name(clk), oh->name);
+		oh->main_clk = __clk_get_name(clk);
 		oh->_clk = clk;
 		soc_ops.disable_direct_prcm(oh);
-		oh->main_clk = kstrdup(name, GFP_KERNEL);
 	} else {
 		if (!oh->main_clk)
 			return 0;
@@ -1482,13 +1594,13 @@ static int _init_clkdm(struct omap_hwmod *oh)
  * _init_clocks - clk_get() all clocks associated with this hwmod. Retrieve as
  * well the clockdomain.
  * @oh: struct omap_hwmod *
- * @data: not used; pass NULL
+ * @np: device_node mapped to this hwmod
  *
  * Called by omap_hwmod_setup_*() (after omap2_clk_init()).
  * Resolves all clock names embedded in the hwmod.  Returns 0 on
  * success, or a negative error code on failure.
  */
-static int _init_clocks(struct omap_hwmod *oh, void *data)
+static int _init_clocks(struct omap_hwmod *oh, struct device_node *np)
 {
 	int ret = 0;
 
@@ -2334,24 +2446,21 @@ static int __init _init(struct omap_hwmod *oh, void *data)
 {
 	int r, index;
 	struct device_node *np = NULL;
+	struct device_node *bus;
 
 	if (oh->_state != _HWMOD_STATE_REGISTERED)
 		return 0;
 
-	if (of_have_populated_dt()) {
-		struct device_node *bus;
+	bus = of_find_node_by_name(NULL, "ocp");
+	if (!bus)
+		return -ENODEV;
 
-		bus = of_find_node_by_name(NULL, "ocp");
-		if (!bus)
-			return -ENODEV;
-
-		r = of_dev_hwmod_lookup(bus, oh, &index, &np);
-		if (r)
-			pr_debug("omap_hwmod: %s missing dt data\n", oh->name);
-		else if (np && index)
-			pr_warn("omap_hwmod: %s using broken dt data from %s\n",
-				oh->name, np->name);
-	}
+	r = of_dev_hwmod_lookup(bus, oh, &index, &np);
+	if (r)
+		pr_debug("omap_hwmod: %s missing dt data\n", oh->name);
+	else if (np && index)
+		pr_warn("omap_hwmod: %s using broken dt data from %s\n",
+			oh->name, np->name);
 
 	r = _init_mpu_rt_base(oh, NULL, index, np);
 	if (r < 0) {
@@ -2360,7 +2469,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
 		return 0;
 	}
 
-	r = _init_clocks(oh, NULL);
+	r = _init_clocks(oh, np);
 	if (r < 0) {
 		WARN(1, "omap_hwmod: %s: couldn't init clocks\n", oh->name);
 		return -EINVAL;
@@ -3722,6 +3831,7 @@ void __init omap_hwmod_init(void)
 		soc_ops.update_context_lost = _omap4_update_context_lost;
 		soc_ops.get_context_lost = _omap4_get_context_lost;
 		soc_ops.disable_direct_prcm = _omap4_disable_direct_prcm;
+		soc_ops.xlate_clkctrl = _omap4_xlate_clkctrl;
 	} else if (cpu_is_ti814x() || cpu_is_ti816x() || soc_is_am33xx() ||
 		   soc_is_am43xx()) {
 		soc_ops.enable_module = _omap4_enable_module;
@@ -3736,6 +3846,8 @@ void __init omap_hwmod_init(void)
 		WARN(1, "omap_hwmod: unknown SoC type\n");
 	}
 
+	_init_clkctrl_providers();
+
 	inited = true;
 }
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 1c6ca4d..c327643 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3204,8 +3204,7 @@ int __init omap3xxx_hwmod_init(void)
 	 * If DT information is missing, enable them only for GP devices.
 	 */
 
-	if (of_have_populated_dt())
-		bus = of_find_node_by_name(NULL, "ocp");
+	bus = of_find_node_by_name(NULL, "ocp");
 
 	if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
 		r = omap_hwmod_register_links(h_sham);
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 94f09c7..3e2d792 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -775,6 +775,7 @@ static struct omap_hwmod_dma_info omap44xx_dss_hdmi_sdma_reqs[] = {
 
 static struct omap_hwmod_opt_clk dss_hdmi_opt_clks[] = {
 	{ .role = "sys_clk", .clk = "dss_sys_clk" },
+	{ .role = "hdmi_clk", .clk = "dss_48mhz_clk" },
 };
 
 static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
@@ -785,7 +786,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
 	 * HDMI audio requires to use no-idle mode. Hence,
 	 * set idle mode by software.
 	 */
-	.flags		= HWMOD_SWSUP_SIDLE,
+	.flags		= HWMOD_SWSUP_SIDLE | HWMOD_OPT_CLKS_NEEDED,
 	.mpu_irqs	= omap44xx_dss_hdmi_irqs,
 	.xlate_irq	= omap4_xlate_irq,
 	.sdma_reqs	= omap44xx_dss_hdmi_sdma_reqs,
@@ -858,11 +859,16 @@ static struct omap_hwmod_class omap44xx_venc_hwmod_class = {
 };
 
 /* dss_venc */
+static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
+	{ .role = "tv_clk", .clk = "dss_tv_clk" },
+};
+
 static struct omap_hwmod omap44xx_dss_venc_hwmod = {
 	.name		= "dss_venc",
 	.class		= &omap44xx_venc_hwmod_class,
 	.clkdm_name	= "l3_dss_clkdm",
 	.main_clk	= "dss_tv_clk",
+	.flags		= HWMOD_OPT_CLKS_NEEDED,
 	.prcm = {
 		.omap4 = {
 			.clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET,
@@ -870,6 +876,35 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
 		},
 	},
 	.parent_hwmod	= &omap44xx_dss_hwmod,
+	.opt_clks	= dss_venc_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(dss_venc_opt_clks),
+};
+
+/* sha0 HIB2 (the 'P' (public) device) */
+static struct omap_hwmod_class_sysconfig omap44xx_sha0_sysc = {
+	.rev_offs	= 0x100,
+	.sysc_offs	= 0x110,
+	.syss_offs	= 0x114,
+	.sysc_flags	= SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class omap44xx_sha0_hwmod_class = {
+	.name		= "sham",
+	.sysc		= &omap44xx_sha0_sysc,
+};
+
+struct omap_hwmod omap44xx_sha0_hwmod = {
+	.name		= "sham",
+	.class		= &omap44xx_sha0_hwmod_class,
+	.clkdm_name	= "l4_secure_clkdm",
+	.main_clk	= "l3_div_ck",
+	.prcm		= {
+		.omap4 = {
+			.clkctrl_offs = OMAP4_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET,
+			.context_offs = OMAP4_RM_L4SEC_SHA2MD51_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
 };
 
 /*
@@ -953,6 +988,103 @@ static struct omap_hwmod omap44xx_emif2_hwmod = {
 };
 
 /*
+    Crypto modules AES0/1 belong to:
+	PD_L4_PER power domain
+	CD_L4_SEC clock domain
+	On the L3, the AES modules are mapped to
+	L3_CLK2: Peripherals and multimedia sub clock domain
+*/
+static struct omap_hwmod_class_sysconfig omap44xx_aes_sysc = {
+	.rev_offs	= 0x80,
+	.sysc_offs	= 0x84,
+	.syss_offs	= 0x88,
+	.sysc_flags	= SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class omap44xx_aes_hwmod_class = {
+	.name		= "aes",
+	.sysc		= &omap44xx_aes_sysc,
+};
+
+static struct omap_hwmod omap44xx_aes1_hwmod = {
+	.name		= "aes1",
+	.class		= &omap44xx_aes_hwmod_class,
+	.clkdm_name	= "l4_secure_clkdm",
+	.main_clk	= "l3_div_ck",
+	.prcm		= {
+		.omap4	= {
+			.context_offs	= OMAP4_RM_L4SEC_AES1_CONTEXT_OFFSET,
+			.clkctrl_offs	= OMAP4_CM_L4SEC_AES1_CLKCTRL_OFFSET,
+			.modulemode	= MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__aes1 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_aes1_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod omap44xx_aes2_hwmod = {
+	.name		= "aes2",
+	.class		= &omap44xx_aes_hwmod_class,
+	.clkdm_name	= "l4_secure_clkdm",
+	.main_clk	= "l3_div_ck",
+	.prcm		= {
+		.omap4	= {
+			.context_offs	= OMAP4_RM_L4SEC_AES2_CONTEXT_OFFSET,
+			.clkctrl_offs	= OMAP4_CM_L4SEC_AES2_CLKCTRL_OFFSET,
+			.modulemode	= MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__aes2 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_aes2_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/*
+ * 'des' class for DES3DES module
+ */
+static struct omap_hwmod_class_sysconfig omap44xx_des_sysc = {
+	.rev_offs	= 0x30,
+	.sysc_offs	= 0x34,
+	.syss_offs	= 0x38,
+	.sysc_flags	= SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class omap44xx_des_hwmod_class = {
+	.name		= "des",
+	.sysc		= &omap44xx_des_sysc,
+};
+
+static struct omap_hwmod omap44xx_des_hwmod = {
+	.name		= "des",
+	.class		= &omap44xx_des_hwmod_class,
+	.clkdm_name	= "l4_secure_clkdm",
+	.main_clk	= "l3_div_ck",
+	.prcm		= {
+		.omap4	= {
+			.context_offs	= OMAP4_RM_L4SEC_DES3DES_CONTEXT_OFFSET,
+			.clkctrl_offs	= OMAP4_CM_L4SEC_DES3DES_CLKCTRL_OFFSET,
+			.modulemode	= MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+struct omap_hwmod_ocp_if omap44xx_l3_main_2__des = {
+	.master		= &omap44xx_l3_main_2_hwmod,
+	.slave		= &omap44xx_des_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/*
  * 'fdif' class
  * face detection hw accelerator module
  */
@@ -3882,6 +4014,14 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_venc = {
 	.user		= OCP_USER_MPU,
 };
 
+/* l3_main_2 -> sham */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sha0 = {
+	.master		= &omap44xx_l3_main_2_hwmod,
+	.slave		= &omap44xx_sha0_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l4_per -> elm */
 static struct omap_hwmod_ocp_if omap44xx_l4_per__elm = {
 	.master		= &omap44xx_l4_per_hwmod,
@@ -4793,6 +4933,10 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
 	&omap44xx_l4_abe__wd_timer3_dma,
 	&omap44xx_mpu__emif1,
 	&omap44xx_mpu__emif2,
+	&omap44xx_l3_main_2__aes1,
+	&omap44xx_l3_main_2__aes2,
+	&omap44xx_l3_main_2__des,
+	&omap44xx_l3_main_2__sha0,
 	NULL,
 };
 
diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
deleted file mode 100644
index a358a07..0000000
--- a/arch/arm/mach-omap2/opp.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * OMAP SoC specific OPP wrapper function
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated - http://www.ti.com/
- *	Nishanth Menon
- *	Kevin Hilman
- * Copyright (C) 2010 Nokia Corporation.
- *      Eduardo Valentin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/pm_opp.h>
-#include <linux/cpu.h>
-
-#include "omap_device.h"
-
-#include "omap_opp_data.h"
-
-/* Temp variable to allow multiple calls */
-static u8 __initdata omap_table_init;
-
-/**
- * omap_init_opp_table() - Initialize opp table as per the CPU type
- * @opp_def:		opp default list for this silicon
- * @opp_def_size:	number of opp entries for this silicon
- *
- * Register the initial OPP table with the OPP library based on the CPU
- * type. This is meant to be used only by SoC specific registration.
- */
-int __init omap_init_opp_table(struct omap_opp_def *opp_def,
-		u32 opp_def_size)
-{
-	int i, r;
-
-	if (of_have_populated_dt())
-		return -EINVAL;
-
-	if (!opp_def || !opp_def_size) {
-		pr_err("%s: invalid params!\n", __func__);
-		return -EINVAL;
-	}
-
-	/*
-	 * Initialize only if not already initialized even if the previous
-	 * call failed, because, no reason we'd succeed again.
-	 */
-	if (omap_table_init)
-		return -EEXIST;
-	omap_table_init = 1;
-
-	/* Lets now register with OPP library */
-	for (i = 0; i < opp_def_size; i++, opp_def++) {
-		struct omap_hwmod *oh;
-		struct device *dev;
-
-		if (!opp_def->hwmod_name) {
-			pr_err("%s: NULL name of omap_hwmod, failing [%d].\n",
-				__func__, i);
-			return -EINVAL;
-		}
-
-		if (!strncmp(opp_def->hwmod_name, "mpu", 3)) {
-			/* 
-			 * All current OMAPs share voltage rail and
-			 * clock source, so CPU0 is used to represent
-			 * the MPU-SS.
-			 */
-			dev = get_cpu_device(0);
-		} else {
-			oh = omap_hwmod_lookup(opp_def->hwmod_name);
-			if (!oh || !oh->od) {
-				pr_debug("%s: no hwmod or odev for %s, [%d] cannot add OPPs.\n",
-					 __func__, opp_def->hwmod_name, i);
-				continue;
-			}
-			dev = &oh->od->pdev->dev;
-		}
-
-		r = dev_pm_opp_add(dev, opp_def->freq, opp_def->u_volt);
-		if (r) {
-			dev_err(dev, "%s: add OPP %ld failed for %s [%d] result=%d\n",
-				__func__, opp_def->freq,
-				opp_def->hwmod_name, i, r);
-		} else {
-			if (!opp_def->default_available)
-				r = dev_pm_opp_disable(dev, opp_def->freq);
-			if (r)
-				dev_err(dev, "%s: disable %ld failed for %s [%d] result=%d\n",
-					__func__, opp_def->freq,
-					opp_def->hwmod_name, i, r);
-		}
-	}
-
-	return 0;
-}
diff --git a/arch/arm/mach-omap2/opp3xxx_data.c b/arch/arm/mach-omap2/opp3xxx_data.c
index fc67add..c2d459f 100644
--- a/arch/arm/mach-omap2/opp3xxx_data.c
+++ b/arch/arm/mach-omap2/opp3xxx_data.c
@@ -83,89 +83,3 @@ struct omap_volt_data omap36xx_vddcore_volt_data[] = {
 	VOLT_DATA_DEFINE(OMAP3630_VDD_CORE_OPP100_UV, OMAP3630_CONTROL_FUSE_OPP100_VDD2, 0xf9, 0x16),
 	VOLT_DATA_DEFINE(0, 0, 0, 0),
 };
-
-/* OPP data */
-
-static struct omap_opp_def __initdata omap34xx_opp_def_list[] = {
-	/* MPU OPP1 */
-	OPP_INITIALIZER("mpu", true, 125000000, OMAP3430_VDD_MPU_OPP1_UV),
-	/* MPU OPP2 */
-	OPP_INITIALIZER("mpu", true, 250000000, OMAP3430_VDD_MPU_OPP2_UV),
-	/* MPU OPP3 */
-	OPP_INITIALIZER("mpu", true, 500000000, OMAP3430_VDD_MPU_OPP3_UV),
-	/* MPU OPP4 */
-	OPP_INITIALIZER("mpu", true, 550000000, OMAP3430_VDD_MPU_OPP4_UV),
-	/* MPU OPP5 */
-	OPP_INITIALIZER("mpu", true, 600000000, OMAP3430_VDD_MPU_OPP5_UV),
-
-	/*
-	 * L3 OPP1 - 41.5 MHz is disabled because: The voltage for that OPP is
-	 * almost the same than the one at 83MHz thus providing very little
-	 * gain for the power point of view. In term of energy it will even
-	 * increase the consumption due to the very negative performance
-	 * impact that frequency will do to the MPU and the whole system in
-	 * general.
-	 */
-	OPP_INITIALIZER("l3_main", false, 41500000, OMAP3430_VDD_CORE_OPP1_UV),
-	/* L3 OPP2 */
-	OPP_INITIALIZER("l3_main", true, 83000000, OMAP3430_VDD_CORE_OPP2_UV),
-	/* L3 OPP3 */
-	OPP_INITIALIZER("l3_main", true, 166000000, OMAP3430_VDD_CORE_OPP3_UV),
-
-	/* DSP OPP1 */
-	OPP_INITIALIZER("iva", true, 90000000, OMAP3430_VDD_MPU_OPP1_UV),
-	/* DSP OPP2 */
-	OPP_INITIALIZER("iva", true, 180000000, OMAP3430_VDD_MPU_OPP2_UV),
-	/* DSP OPP3 */
-	OPP_INITIALIZER("iva", true, 360000000, OMAP3430_VDD_MPU_OPP3_UV),
-	/* DSP OPP4 */
-	OPP_INITIALIZER("iva", true, 400000000, OMAP3430_VDD_MPU_OPP4_UV),
-	/* DSP OPP5 */
-	OPP_INITIALIZER("iva", true, 430000000, OMAP3430_VDD_MPU_OPP5_UV),
-};
-
-static struct omap_opp_def __initdata omap36xx_opp_def_list[] = {
-	/* MPU OPP1 - OPP50 */
-	OPP_INITIALIZER("mpu", true,  300000000, OMAP3630_VDD_MPU_OPP50_UV),
-	/* MPU OPP2 - OPP100 */
-	OPP_INITIALIZER("mpu", true,  600000000, OMAP3630_VDD_MPU_OPP100_UV),
-	/* MPU OPP3 - OPP-Turbo */
-	OPP_INITIALIZER("mpu", false, 800000000, OMAP3630_VDD_MPU_OPP120_UV),
-	/* MPU OPP4 - OPP-SB */
-	OPP_INITIALIZER("mpu", false, 1000000000, OMAP3630_VDD_MPU_OPP1G_UV),
-
-	/* L3 OPP1 - OPP50 */
-	OPP_INITIALIZER("l3_main", true, 100000000, OMAP3630_VDD_CORE_OPP50_UV),
-	/* L3 OPP2 - OPP100, OPP-Turbo, OPP-SB */
-	OPP_INITIALIZER("l3_main", true, 200000000, OMAP3630_VDD_CORE_OPP100_UV),
-
-	/* DSP OPP1 - OPP50 */
-	OPP_INITIALIZER("iva", true,  260000000, OMAP3630_VDD_MPU_OPP50_UV),
-	/* DSP OPP2 - OPP100 */
-	OPP_INITIALIZER("iva", true,  520000000, OMAP3630_VDD_MPU_OPP100_UV),
-	/* DSP OPP3 - OPP-Turbo */
-	OPP_INITIALIZER("iva", false, 660000000, OMAP3630_VDD_MPU_OPP120_UV),
-	/* DSP OPP4 - OPP-SB */
-	OPP_INITIALIZER("iva", false, 800000000, OMAP3630_VDD_MPU_OPP1G_UV),
-};
-
-/**
- * omap3_opp_init() - initialize omap3 opp table
- */
-int __init omap3_opp_init(void)
-{
-	int r = -ENODEV;
-
-	if (!cpu_is_omap34xx())
-		return r;
-
-	if (cpu_is_omap3630())
-		r = omap_init_opp_table(omap36xx_opp_def_list,
-			ARRAY_SIZE(omap36xx_opp_def_list));
-	else
-		r = omap_init_opp_table(omap34xx_opp_def_list,
-			ARRAY_SIZE(omap34xx_opp_def_list));
-
-	return r;
-}
-omap_device_initcall(omap3_opp_init);
diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c
index 1ef7a3e..adea43e 100644
--- a/arch/arm/mach-omap2/opp4xxx_data.c
+++ b/arch/arm/mach-omap2/opp4xxx_data.c
@@ -63,29 +63,6 @@ struct omap_volt_data omap443x_vdd_core_volt_data[] = {
 	VOLT_DATA_DEFINE(0, 0, 0, 0),
 };
 
-
-static struct omap_opp_def __initdata omap443x_opp_def_list[] = {
-	/* MPU OPP1 - OPP50 */
-	OPP_INITIALIZER("mpu", true, 300000000, OMAP4430_VDD_MPU_OPP50_UV),
-	/* MPU OPP2 - OPP100 */
-	OPP_INITIALIZER("mpu", true, 600000000, OMAP4430_VDD_MPU_OPP100_UV),
-	/* MPU OPP3 - OPP-Turbo */
-	OPP_INITIALIZER("mpu", true, 800000000, OMAP4430_VDD_MPU_OPPTURBO_UV),
-	/* MPU OPP4 - OPP-SB */
-	OPP_INITIALIZER("mpu", true, 1008000000, OMAP4430_VDD_MPU_OPPNITRO_UV),
-	/* L3 OPP1 - OPP50 */
-	OPP_INITIALIZER("l3_main_1", true, 100000000, OMAP4430_VDD_CORE_OPP50_UV),
-	/* L3 OPP2 - OPP100, OPP-Turbo, OPP-SB */
-	OPP_INITIALIZER("l3_main_1", true, 200000000, OMAP4430_VDD_CORE_OPP100_UV),
-	/* IVA OPP1 - OPP50 */
-	OPP_INITIALIZER("iva", true, 133000000, OMAP4430_VDD_IVA_OPP50_UV),
-	/* IVA OPP2 - OPP100 */
-	OPP_INITIALIZER("iva", true, 266100000, OMAP4430_VDD_IVA_OPP100_UV),
-	/* IVA OPP3 - OPP-Turbo */
-	OPP_INITIALIZER("iva", false, 332000000, OMAP4430_VDD_IVA_OPPTURBO_UV),
-	/* TODO: add DSP, aess, fdif, gpu */
-};
-
 #define OMAP4460_VDD_MPU_OPP50_UV		1025000
 #define OMAP4460_VDD_MPU_OPP100_UV		1200000
 #define OMAP4460_VDD_MPU_OPPTURBO_UV		1313000
@@ -122,59 +99,3 @@ struct omap_volt_data omap446x_vdd_core_volt_data[] = {
 	VOLT_DATA_DEFINE(OMAP4460_VDD_CORE_OPP100_OV_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP100OV, 0xf9, 0x16),
 	VOLT_DATA_DEFINE(0, 0, 0, 0),
 };
-
-static struct omap_opp_def __initdata omap446x_opp_def_list[] = {
-	/* MPU OPP1 - OPP50 */
-	OPP_INITIALIZER("mpu", true, 350000000, OMAP4460_VDD_MPU_OPP50_UV),
-	/* MPU OPP2 - OPP100 */
-	OPP_INITIALIZER("mpu", true, 700000000, OMAP4460_VDD_MPU_OPP100_UV),
-	/* MPU OPP3 - OPP-Turbo */
-	OPP_INITIALIZER("mpu", true, 920000000, OMAP4460_VDD_MPU_OPPTURBO_UV),
-	/*
-	 * MPU OPP4 - OPP-Nitro + Disabled as the reference schematics
-	 * recommends TPS623631 - confirm and enable the opp in board file
-	 * XXX: May be we should enable these based on mpu capability and
-	 * Exception board files disable it...
-	 */
-	OPP_INITIALIZER("mpu", false, 1200000000, OMAP4460_VDD_MPU_OPPNITRO_UV),
-	/* MPU OPP4 - OPP-Nitro SpeedBin */
-	OPP_INITIALIZER("mpu", false, 1500000000, OMAP4460_VDD_MPU_OPPNITRO_UV),
-	/* L3 OPP1 - OPP50 */
-	OPP_INITIALIZER("l3_main_1", true, 100000000, OMAP4460_VDD_CORE_OPP50_UV),
-	/* L3 OPP2 - OPP100 */
-	OPP_INITIALIZER("l3_main_1", true, 200000000, OMAP4460_VDD_CORE_OPP100_UV),
-	/* IVA OPP1 - OPP50 */
-	OPP_INITIALIZER("iva", true, 133000000, OMAP4460_VDD_IVA_OPP50_UV),
-	/* IVA OPP2 - OPP100 */
-	OPP_INITIALIZER("iva", true, 266100000, OMAP4460_VDD_IVA_OPP100_UV),
-	/*
-	 * IVA OPP3 - OPP-Turbo + Disabled as the reference schematics
-	 * recommends Phoenix VCORE2 which can supply only 600mA - so the ones
-	 * above this OPP frequency, even though OMAP is capable, should be
-	 * enabled by board file which is sure of the chip power capability
-	 */
-	OPP_INITIALIZER("iva", false, 332000000, OMAP4460_VDD_IVA_OPPTURBO_UV),
-	/* IVA OPP4 - OPP-Nitro */
-	OPP_INITIALIZER("iva", false, 430000000, OMAP4460_VDD_IVA_OPPNITRO_UV),
-	/* IVA OPP5 - OPP-Nitro SpeedBin*/
-	OPP_INITIALIZER("iva", false, 500000000, OMAP4460_VDD_IVA_OPPNITRO_UV),
-
-	/* TODO: add DSP, aess, fdif, gpu */
-};
-
-/**
- * omap4_opp_init() - initialize omap4 opp table
- */
-int __init omap4_opp_init(void)
-{
-	int r = -ENODEV;
-
-	if (cpu_is_omap443x())
-		r = omap_init_opp_table(omap443x_opp_def_list,
-			ARRAY_SIZE(omap443x_opp_def_list));
-	else if (cpu_is_omap446x())
-		r = omap_init_opp_table(omap446x_opp_def_list,
-			ARRAY_SIZE(omap446x_opp_def_list));
-	return r;
-}
-omap_device_initcall(omap4_opp_init);
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 63027e6..366158a 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -71,7 +71,7 @@ void omap_pm_get_oscillator(u32 *tstart, u32 *tshut)
 }
 #endif
 
-int __init omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
+int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
 {
 	clkdm_allow_idle(clkdm);
 	return 0;
diff --git a/arch/arm/mach-omap2/pmu.c b/arch/arm/mach-omap2/pmu.c
deleted file mode 100644
index d2adfeb..0000000
--- a/arch/arm/mach-omap2/pmu.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * OMAP2 ARM Performance Monitoring Unit (PMU) Support
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Contacts:
- * Jon Hunter <jon-hunter@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/of.h>
-
-#include <asm/system_info.h>
-
-#include "soc.h"
-#include "omap_hwmod.h"
-#include "omap_device.h"
-
-static char *omap2_pmu_oh_names[] = {"mpu"};
-static char *omap3_pmu_oh_names[] = {"mpu", "debugss"};
-static char *omap4430_pmu_oh_names[] = {"l3_main_3", "l3_instr", "debugss"};
-static struct platform_device *omap_pmu_dev;
-
-/**
- * omap2_init_pmu - creates and registers PMU platform device
- * @oh_num:	Number of OMAP HWMODs required to create PMU device
- * @oh_names:	Array of OMAP HWMODS names required to create PMU device
- *
- * Uses OMAP HWMOD framework to create and register an ARM PMU device
- * from a list of HWMOD names passed. Currently supports OMAP2, OMAP3
- * and OMAP4 devices.
- */
-static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[])
-{
-	int i;
-	struct omap_hwmod *oh[3];
-	char *dev_name = cpu_architecture() == CPU_ARCH_ARMv6 ?
-			 "armv6-pmu" : "armv7-pmu";
-
-	if ((!oh_num) || (oh_num > 3))
-		return -EINVAL;
-
-	for (i = 0; i < oh_num; i++) {
-		oh[i] = omap_hwmod_lookup(oh_names[i]);
-		if (!oh[i]) {
-			pr_err("Could not look up %s hwmod\n", oh_names[i]);
-			return -ENODEV;
-		}
-	}
-
-	omap_pmu_dev = omap_device_build_ss(dev_name, -1, oh, oh_num, NULL, 0);
-	WARN(IS_ERR(omap_pmu_dev), "Can't build omap_device for %s.\n",
-	     dev_name);
-
-	return PTR_ERR_OR_ZERO(omap_pmu_dev);
-}
-
-static int __init omap_init_pmu(void)
-{
-	unsigned oh_num;
-	char **oh_names;
-
-	/* XXX Remove this check when the CTI driver is available */
-	if (cpu_is_omap443x()) {
-		pr_info("ARM PMU: not yet supported on OMAP4430 due to missing CTI driver\n");
-		return 0;
-	}
-
-	if (of_have_populated_dt())
-		return 0;
-
-	/*
-	 * To create an ARM-PMU device the following HWMODs
-	 * are required for the various OMAP2+ devices.
-	 *
-	 * OMAP24xx:	mpu
-	 * OMAP3xxx:	mpu, debugss
-	 * OMAP4430:	l3_main_3, l3_instr, debugss
-	 * OMAP4460/70:	mpu, debugss
-	 */
-	if (cpu_is_omap443x()) {
-		oh_num = ARRAY_SIZE(omap4430_pmu_oh_names);
-		oh_names = omap4430_pmu_oh_names;
-	} else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
-		oh_num = ARRAY_SIZE(omap3_pmu_oh_names);
-		oh_names = omap3_pmu_oh_names;
-	} else {
-		oh_num = ARRAY_SIZE(omap2_pmu_oh_names);
-		oh_names = omap2_pmu_oh_names;
-	}
-
-	return omap2_init_pmu(oh_num, oh_names);
-}
-omap_subsys_initcall(omap_init_pmu);
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index c8f590b..ee7041d 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -526,10 +526,16 @@ struct omap_prcm_irq_setup {
 	.priority = _priority				\
 	}
 
+struct omap_domain_base {
+	u32 pa;
+	void __iomem *va;
+};
+
 /**
  * struct omap_prcm_init_data - PRCM driver init data
  * @index: clock memory mapping index to be used
  * @mem: IO mem pointer for this module
+ * @phys: IO mem physical base address for this module
  * @offset: module base address offset from the IO base
  * @flags: PRCM module init flags
  * @device_inst_offset: device instance offset within the module address space
@@ -539,6 +545,7 @@ struct omap_prcm_irq_setup {
 struct omap_prcm_init_data {
 	int index;
 	void __iomem *mem;
+	u32 phys;
 	s16 offset;
 	u16 flags;
 	s32 device_inst_offset;
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.c b/arch/arm/mach-omap2/prcm_mpu44xx.c
index cdbee63..9c782f5 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.c
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.c
@@ -24,7 +24,7 @@
  * prcm_mpu_base: the virtual address of the start of the PRCM_MPU IP
  *   block registers
  */
-void __iomem *prcm_mpu_base;
+struct omap_domain_base prcm_mpu_base;
 
 /* PRCM_MPU low-level functions */
 
@@ -58,5 +58,5 @@ u32 omap4_prcm_mpu_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 reg)
  */
 void __init omap2_set_globals_prcm_mpu(void __iomem *prcm_mpu)
 {
-	prcm_mpu_base = prcm_mpu;
+	prcm_mpu_base.va = prcm_mpu;
 }
diff --git a/arch/arm/mach-omap2/prcm_mpu_44xx_54xx.h b/arch/arm/mach-omap2/prcm_mpu_44xx_54xx.h
index ca149e7..f565f7f 100644
--- a/arch/arm/mach-omap2/prcm_mpu_44xx_54xx.h
+++ b/arch/arm/mach-omap2/prcm_mpu_44xx_54xx.h
@@ -24,7 +24,9 @@
 #define __ARCH_ARM_MACH_OMAP2_PRCM_MPU_44XX_54XX_H
 
 #ifndef __ASSEMBLER__
-extern void __iomem *prcm_mpu_base;
+#include "prcm-common.h"
+
+extern struct omap_domain_base prcm_mpu_base;
 
 extern u32 omap4_prcm_mpu_read_inst_reg(s16 inst, u16 idx);
 extern void omap4_prcm_mpu_write_inst_reg(u32 val, s16 inst, u16 idx);
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index 233bc84..94dc356 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -16,7 +16,7 @@
 #include "prcm-common.h"
 
 # ifndef __ASSEMBLER__
-extern void __iomem *prm_base;
+extern struct omap_domain_base prm_base;
 extern u16 prm_features;
 extern void omap2_set_globals_prm(void __iomem *prm);
 int omap_prcm_init(void);
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h
index f57e29b..6775e10 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.h
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h
@@ -55,12 +55,12 @@
 /* Power/reset management domain register get/set */
 static inline u32 omap2_prm_read_mod_reg(s16 module, u16 idx)
 {
-	return readl_relaxed(prm_base + module + idx);
+	return readl_relaxed(prm_base.va + module + idx);
 }
 
 static inline void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx)
 {
-	writel_relaxed(val, prm_base + module + idx);
+	writel_relaxed(val, prm_base.va + module + idx);
 }
 
 /* Read-modify-write a register in a PRM module. Caller must lock */
diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c
index dcb5001..d2c5bca 100644
--- a/arch/arm/mach-omap2/prm33xx.c
+++ b/arch/arm/mach-omap2/prm33xx.c
@@ -30,13 +30,13 @@
 /* Read a register in a PRM instance */
 static u32 am33xx_prm_read_reg(s16 inst, u16 idx)
 {
-	return readl_relaxed(prm_base + inst + idx);
+	return readl_relaxed(prm_base.va + inst + idx);
 }
 
 /* Write into a register in a PRM instance */
 static void am33xx_prm_write_reg(u32 val, s16 inst, u16 idx)
 {
-	writel_relaxed(val, prm_base + inst + idx);
+	writel_relaxed(val, prm_base.va + inst + idx);
 }
 
 /* Read-modify-write a register in PRM. Caller must lock */
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index 718981b..382e236 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -676,7 +676,7 @@ static struct prm_ll_data omap3xxx_prm_ll_data = {
 int __init omap3xxx_prm_init(const struct omap_prcm_init_data *data)
 {
 	omap2_clk_legacy_provider_init(TI_CLKM_PRM,
-				       prm_base + OMAP3430_IVA2_MOD);
+				       prm_base.va + OMAP3430_IVA2_MOD);
 	if (omap3_has_io_wakeup())
 		prm_features |= PRM_HAS_IO_WAKEUP;
 
@@ -690,6 +690,8 @@ static const struct of_device_id omap3_prm_dt_match_table[] = {
 
 static int omap3xxx_prm_late_init(void)
 {
+	struct device_node *np;
+	int irq_num;
 	int ret;
 
 	if (!(prm_features & PRM_HAS_IO_WAKEUP))
@@ -702,16 +704,11 @@ static int omap3xxx_prm_late_init(void)
 		omap3_prcm_irq_setup.reconfigure_io_chain =
 			omap3430_pre_es3_1_reconfigure_io_chain;
 
-	if (of_have_populated_dt()) {
-		struct device_node *np;
-		int irq_num;
-
-		np = of_find_matching_node(NULL, omap3_prm_dt_match_table);
-		if (np) {
-			irq_num = of_irq_get(np, 0);
-			if (irq_num >= 0)
-				omap3_prcm_irq_setup.irq = irq_num;
-		}
+	np = of_find_matching_node(NULL, omap3_prm_dt_match_table);
+	if (np) {
+		irq_num = of_irq_get(np, 0);
+		if (irq_num >= 0)
+			omap3_prcm_irq_setup.irq = irq_num;
 	}
 
 	omap3xxx_prm_enable_io_wakeup();
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 3076800..87e86a4 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -91,13 +91,13 @@ static struct prm_reset_src_map omap44xx_prm_reset_src_map[] = {
 /* Read a register in a CM/PRM instance in the PRM module */
 static u32 omap4_prm_read_inst_reg(s16 inst, u16 reg)
 {
-	return readl_relaxed(prm_base + inst + reg);
+	return readl_relaxed(prm_base.va + inst + reg);
 }
 
 /* Write into a register in a CM/PRM instance in the PRM module */
 static void omap4_prm_write_inst_reg(u32 val, s16 inst, u16 reg)
 {
-	writel_relaxed(val, prm_base + inst + reg);
+	writel_relaxed(val, prm_base.va + inst + reg);
 }
 
 /* Read-modify-write a register in a PRM module. Caller must lock */
@@ -337,27 +337,6 @@ static void omap44xx_prm_reconfigure_io_chain(void)
 }
 
 /**
- * omap44xx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches
- *
- * Activates the I/O wakeup event latches and allows events logged by
- * those latches to signal a wakeup event to the PRCM.  For I/O wakeups
- * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
- * omap44xx_prm_reconfigure_io_chain() must be called.  No return value.
- */
-static void __init omap44xx_prm_enable_io_wakeup(void)
-{
-	s32 inst = omap4_prmst_get_prm_dev_inst();
-
-	if (inst == PRM_INSTANCE_UNKNOWN)
-		return;
-
-	omap4_prm_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK,
-				    OMAP4430_GLOBAL_WUEN_MASK,
-				    inst,
-				    omap4_prcm_irq_setup.pm_ctrl);
-}
-
-/**
  * omap44xx_prm_read_reset_sources - return the last SoC reset source
  *
  * Return a u32 representing the last reset sources of the SoC.  The
@@ -689,8 +668,6 @@ struct pwrdm_ops omap4_pwrdm_operations = {
 	.pwrdm_has_voltdm	= omap4_check_vcvp,
 };
 
-static int omap44xx_prm_late_init(void);
-
 /*
  * XXX document
  */
@@ -698,7 +675,6 @@ static struct prm_ll_data omap44xx_prm_ll_data = {
 	.read_reset_sources = &omap44xx_prm_read_reset_sources,
 	.was_any_context_lost_old = &omap44xx_prm_was_any_context_lost_old,
 	.clear_context_loss_flags_old = &omap44xx_prm_clear_context_loss_flags_old,
-	.late_init = &omap44xx_prm_late_init,
 	.assert_hardreset	= omap4_prminst_assert_hardreset,
 	.deassert_hardreset	= omap4_prminst_deassert_hardreset,
 	.is_hardreset_asserted	= omap4_prminst_is_hardreset_asserted,
@@ -735,41 +711,6 @@ int __init omap44xx_prm_init(const struct omap_prcm_init_data *data)
 	return prm_register(&omap44xx_prm_ll_data);
 }
 
-static int omap44xx_prm_late_init(void)
-{
-	int irq_num;
-
-	if (!(prm_features & PRM_HAS_IO_WAKEUP))
-		return 0;
-
-	/* OMAP4+ is DT only now */
-	if (!of_have_populated_dt())
-		return 0;
-
-	irq_num = of_irq_get(prm_init_data->np, 0);
-	/*
-	 * Already have OMAP4 IRQ num. For all other platforms, we need
-	 * IRQ numbers from DT
-	 */
-	if (irq_num < 0 && !(prm_init_data->flags & PRM_IRQ_DEFAULT)) {
-		if (irq_num == -EPROBE_DEFER)
-			return irq_num;
-
-		/* Have nothing to do */
-		return 0;
-	}
-
-	/* Once OMAP4 DT is filled as well */
-	if (irq_num >= 0) {
-		omap4_prcm_irq_setup.irq = irq_num;
-		omap4_prcm_irq_setup.xlate_irq = NULL;
-	}
-
-	omap44xx_prm_enable_io_wakeup();
-
-	return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup);
-}
-
 static void __exit omap44xx_prm_exit(void)
 {
 	prm_unregister(&omap44xx_prm_ll_data);
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index dc11841..09180a5 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -66,7 +66,7 @@ static struct irq_chip_generic **prcm_irq_chips;
 static struct omap_prcm_irq_setup *prcm_irq_setup;
 
 /* prm_base: base virtual address of the PRM IP block */
-void __iomem *prm_base;
+struct omap_domain_base prm_base;
 
 u16 prm_features;
 
@@ -267,10 +267,9 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
 {
 	int nr_regs;
 	u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
-	int offset, i;
+	int offset, i, irq;
 	struct irq_chip_generic *gc;
 	struct irq_chip_type *ct;
-	unsigned int irq;
 
 	if (!irq_setup)
 		return -EINVAL;
@@ -325,7 +324,7 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
 
 	for (i = 0; i < irq_setup->nr_regs; i++) {
 		gc = irq_alloc_generic_chip("PRCM", 1,
-			irq_setup->base_irq + i * 32, prm_base,
+			irq_setup->base_irq + i * 32, prm_base.va,
 			handle_level_irq);
 
 		if (!gc) {
@@ -344,10 +343,8 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
 		prcm_irq_chips[i] = gc;
 	}
 
-	if (of_have_populated_dt()) {
-		int irq = omap_prcm_event_to_irq("io");
-		omap_pcs_legacy_init(irq, irq_setup->reconfigure_io_chain);
-	}
+	irq = omap_prcm_event_to_irq("io");
+	omap_pcs_legacy_init(irq, irq_setup->reconfigure_io_chain);
 
 	return 0;
 
@@ -364,7 +361,7 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
  */
 void __init omap2_set_globals_prm(void __iomem *prm)
 {
-	prm_base = prm;
+	prm_base.va = prm;
 }
 
 /**
@@ -755,19 +752,22 @@ int __init omap2_prm_base_init(void)
 	struct device_node *np;
 	const struct of_device_id *match;
 	struct omap_prcm_init_data *data;
-	void __iomem *mem;
+	struct resource res;
+	int ret;
 
 	for_each_matching_node_and_match(np, omap_prcm_dt_match_table, &match) {
 		data = (struct omap_prcm_init_data *)match->data;
 
-		mem = of_iomap(np, 0);
-		if (!mem)
-			return -ENOMEM;
+		ret = of_address_to_resource(np, 0, &res);
+		if (ret)
+			return ret;
 
-		if (data->index == TI_CLKM_PRM)
-			prm_base = mem + data->offset;
+		data->mem = ioremap(res.start, resource_size(&res));
 
-		data->mem = mem;
+		if (data->index == TI_CLKM_PRM) {
+			prm_base.va = data->mem + data->offset;
+			prm_base.pa = res.start + data->offset;
+		}
 
 		data->np = np;
 
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index d0b15db..48b8127 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -29,7 +29,7 @@
 #include "prcm_mpu44xx.h"
 #include "soc.h"
 
-static void __iomem *_prm_bases[OMAP4_MAX_PRCM_PARTITIONS];
+static struct omap_domain_base _prm_bases[OMAP4_MAX_PRCM_PARTITIONS];
 
 static s32 prm_dev_inst = PRM_INSTANCE_UNKNOWN;
 
@@ -41,8 +41,10 @@ static s32 prm_dev_inst = PRM_INSTANCE_UNKNOWN;
  */
 void omap_prm_base_init(void)
 {
-	_prm_bases[OMAP4430_PRM_PARTITION] = prm_base;
-	_prm_bases[OMAP4430_PRCM_MPU_PARTITION] = prcm_mpu_base;
+	memcpy(&_prm_bases[OMAP4430_PRM_PARTITION], &prm_base,
+	       sizeof(prm_base));
+	memcpy(&_prm_bases[OMAP4430_PRCM_MPU_PARTITION], &prcm_mpu_base,
+	       sizeof(prcm_mpu_base));
 }
 
 s32 omap4_prmst_get_prm_dev_inst(void)
@@ -60,8 +62,8 @@ u32 omap4_prminst_read_inst_reg(u8 part, s16 inst, u16 idx)
 {
 	BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
 	       part == OMAP4430_INVALID_PRCM_PARTITION ||
-	       !_prm_bases[part]);
-	return readl_relaxed(_prm_bases[part] + inst + idx);
+	       !_prm_bases[part].va);
+	return readl_relaxed(_prm_bases[part].va + inst + idx);
 }
 
 /* Write into a register in a PRM instance */
@@ -69,8 +71,8 @@ void omap4_prminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx)
 {
 	BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
 	       part == OMAP4430_INVALID_PRCM_PARTITION ||
-	       !_prm_bases[part]);
-	writel_relaxed(val, _prm_bases[part] + inst + idx);
+	       !_prm_bases[part].va);
+	writel_relaxed(val, _prm_bases[part].va + inst + idx);
 }
 
 /* Read-modify-write a register in PRM. Caller must lock */
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index d7cff26..eef6935 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -44,13 +44,9 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
 	while (volt_data[count].volt_nominal)
 		count++;
 
-	nvalue_table = kzalloc(sizeof(struct omap_sr_nvalue_table)*count,
-			GFP_KERNEL);
-
-	if (!nvalue_table) {
-		pr_err("OMAP: SmartReflex: cannot allocate memory for n-value table\n");
+	nvalue_table = kcalloc(count, sizeof(*nvalue_table), GFP_KERNEL);
+	if (!nvalue_table)
 		return;
-	}
 
 	for (i = 0, j = 0; i < count; i++) {
 		u32 v;
@@ -102,12 +98,9 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
 	char *name = "smartreflex";
 	static int i;
 
-	sr_data = kzalloc(sizeof(struct omap_sr_data), GFP_KERNEL);
-	if (!sr_data) {
-		pr_err("%s: Unable to allocate memory for %s sr_data\n",
-		       __func__, oh->name);
+	sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL);
+	if (!sr_data)
 		return -ENOMEM;
-	}
 
 	sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
 	if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 07dd692..ece09c9 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -68,6 +68,9 @@
 static struct omap_dm_timer clkev;
 static struct clock_event_device clockevent_gpt;
 
+/* Clockevent hwmod for am335x and am437x suspend */
+static struct omap_hwmod *clockevent_gpt_hwmod;
+
 #ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
 static unsigned long arch_timer_freq;
 
@@ -125,6 +128,23 @@ static int omap2_gp_timer_set_periodic(struct clock_event_device *evt)
 	return 0;
 }
 
+static void omap_clkevt_idle(struct clock_event_device *unused)
+{
+	if (!clockevent_gpt_hwmod)
+		return;
+
+	omap_hwmod_idle(clockevent_gpt_hwmod);
+}
+
+static void omap_clkevt_unidle(struct clock_event_device *unused)
+{
+	if (!clockevent_gpt_hwmod)
+		return;
+
+	omap_hwmod_enable(clockevent_gpt_hwmod);
+	__omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW);
+}
+
 static struct clock_event_device clockevent_gpt = {
 	.features		= CLOCK_EVT_FEAT_PERIODIC |
 				  CLOCK_EVT_FEAT_ONESHOT,
@@ -232,37 +252,29 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
 					 const char **timer_name,
 					 int posted)
 {
-	char name[10]; /* 10 = sizeof("gptXX_Xck0") */
 	const char *oh_name = NULL;
 	struct device_node *np;
 	struct omap_hwmod *oh;
-	struct resource irq, mem;
 	struct clk *src;
 	int r = 0;
 
-	if (of_have_populated_dt()) {
-		np = omap_get_timer_dt(omap_timer_match, property);
-		if (!np)
-			return -ENODEV;
+	np = omap_get_timer_dt(omap_timer_match, property);
+	if (!np)
+		return -ENODEV;
 
-		of_property_read_string_index(np, "ti,hwmods", 0, &oh_name);
-		if (!oh_name)
-			return -ENODEV;
+	of_property_read_string_index(np, "ti,hwmods", 0, &oh_name);
+	if (!oh_name)
+		return -ENODEV;
 
-		timer->irq = irq_of_parse_and_map(np, 0);
-		if (!timer->irq)
-			return -ENXIO;
+	timer->irq = irq_of_parse_and_map(np, 0);
+	if (!timer->irq)
+		return -ENXIO;
 
-		timer->io_base = of_iomap(np, 0);
+	timer->io_base = of_iomap(np, 0);
 
-		of_node_put(np);
-	} else {
-		if (omap_dm_timer_reserve_systimer(timer->id))
-			return -ENODEV;
+	timer->fclk = of_clk_get_by_name(np, "fck");
 
-		sprintf(name, "timer%d", timer->id);
-		oh_name = name;
-	}
+	of_node_put(np);
 
 	oh = omap_hwmod_lookup(oh_name);
 	if (!oh)
@@ -270,29 +282,14 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
 
 	*timer_name = oh->name;
 
-	if (!of_have_populated_dt()) {
-		r = omap_hwmod_get_resource_byname(oh, IORESOURCE_IRQ, NULL,
-						   &irq);
-		if (r)
-			return -ENXIO;
-		timer->irq = irq.start;
-
-		r = omap_hwmod_get_resource_byname(oh, IORESOURCE_MEM, NULL,
-						   &mem);
-		if (r)
-			return -ENXIO;
-
-		/* Static mapping, never released */
-		timer->io_base = ioremap(mem.start, mem.end - mem.start);
-	}
-
 	if (!timer->io_base)
 		return -ENXIO;
 
 	omap_hwmod_setup_one(oh_name);
 
 	/* After the dmtimer is using hwmod these clocks won't be needed */
-	timer->fclk = clk_get(NULL, omap_hwmod_get_main_clk(oh));
+	if (IS_ERR_OR_NULL(timer->fclk))
+		timer->fclk = clk_get(NULL, omap_hwmod_get_main_clk(oh));
 	if (IS_ERR(timer->fclk))
 		return PTR_ERR(timer->fclk);
 
@@ -358,6 +355,14 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
 					3, /* Timer internal resynch latency */
 					0xffffffff);
 
+	if (soc_is_am33xx() || soc_is_am43xx()) {
+		clockevent_gpt.suspend = omap_clkevt_idle;
+		clockevent_gpt.resume = omap_clkevt_unidle;
+
+		clockevent_gpt_hwmod =
+			omap_hwmod_lookup(clockevent_gpt.name);
+	}
+
 	pr_info("OMAP clockevent source: %s at %lu Hz\n", clockevent_gpt.name,
 		clkev.rate);
 }
@@ -405,18 +410,15 @@ static int __init __maybe_unused omap2_sync32k_clocksource_init(void)
 	const char *oh_name = "counter_32k";
 
 	/*
-	 * If device-tree is present, then search the DT blob
-	 * to see if the 32kHz counter is supported.
+	 * See if the 32kHz counter is supported.
 	 */
-	if (of_have_populated_dt()) {
-		np = omap_get_timer_dt(omap_counter_match, NULL);
-		if (!np)
-			return -ENODEV;
+	np = omap_get_timer_dt(omap_counter_match, NULL);
+	if (!np)
+		return -ENODEV;
 
-		of_property_read_string_index(np, "ti,hwmods", 0, &oh_name);
-		if (!oh_name)
-			return -ENODEV;
-	}
+	of_property_read_string_index(np, "ti,hwmods", 0, &oh_name);
+	if (!oh_name)
+		return -ENODEV;
 
 	/*
 	 * First check hwmod data is available for sync32k counter
@@ -434,18 +436,6 @@ static int __init __maybe_unused omap2_sync32k_clocksource_init(void)
 		return ret;
 	}
 
-	if (!of_have_populated_dt()) {
-		void __iomem *vbase;
-
-		vbase = omap_hwmod_get_mpu_rt_va(oh);
-
-		ret = omap_init_clocksource_32k(vbase);
-		if (ret) {
-			pr_warn("%s: failed to initialize counter_32k as a clocksource (%d)\n",
-					__func__, ret);
-			omap_hwmod_idle(oh);
-		}
-	}
 	return ret;
 }
 
@@ -497,7 +487,7 @@ void __init omap_init_time(void)
 	__omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
 			2, "timer_sys_ck", NULL, false);
 
-	clocksource_probe();
+	timer_probe();
 }
 
 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
@@ -506,7 +496,7 @@ void __init omap3_secure_sync32k_timer_init(void)
 	__omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
 			2, "timer_sys_ck", NULL, false);
 
-	clocksource_probe();
+	timer_probe();
 }
 #endif /* CONFIG_ARCH_OMAP3 */
 
@@ -517,7 +507,7 @@ void __init omap3_gptimer_timer_init(void)
 	__omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
 			1, "timer_sys_ck", "ti,timer-alwon", true);
 	if (of_have_populated_dt())
-		clocksource_probe();
+		timer_probe();
 }
 #endif
 
@@ -532,7 +522,7 @@ static void __init omap4_sync32k_timer_init(void)
 void __init omap4_local_timer_init(void)
 {
 	omap4_sync32k_timer_init();
-	clocksource_probe();
+	timer_probe();
 }
 #endif
 
@@ -656,101 +646,11 @@ void __init omap5_realtime_timer_init(void)
 	omap4_sync32k_timer_init();
 	realtime_counter_init();
 
-	clocksource_probe();
+	timer_probe();
 }
 #endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */
 
 /**
- * omap_timer_init - build and register timer device with an
- * associated timer hwmod
- * @oh:	timer hwmod pointer to be used to build timer device
- * @user:	parameter that can be passed from calling hwmod API
- *
- * Called by omap_hwmod_for_each_by_class to register each of the timer
- * devices present in the system. The number of timer devices is known
- * by parsing through the hwmod database for a given class name. At the
- * end of function call memory is allocated for timer device and it is
- * registered to the framework ready to be proved by the driver.
- */
-static int __init omap_timer_init(struct omap_hwmod *oh, void *unused)
-{
-	int id;
-	int ret = 0;
-	char *name = "omap_timer";
-	struct dmtimer_platform_data *pdata;
-	struct platform_device *pdev;
-	struct omap_timer_capability_dev_attr *timer_dev_attr;
-
-	pr_debug("%s: %s\n", __func__, oh->name);
-
-	/* on secure device, do not register secure timer */
-	timer_dev_attr = oh->dev_attr;
-	if (omap_type() != OMAP2_DEVICE_TYPE_GP && timer_dev_attr)
-		if (timer_dev_attr->timer_capability == OMAP_TIMER_SECURE)
-			return ret;
-
-	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
-	if (!pdata) {
-		pr_err("%s: No memory for [%s]\n", __func__, oh->name);
-		return -ENOMEM;
-	}
-
-	/*
-	 * Extract the IDs from name field in hwmod database
-	 * and use the same for constructing ids' for the
-	 * timer devices. In a way, we are avoiding usage of
-	 * static variable witin the function to do the same.
-	 * CAUTION: We have to be careful and make sure the
-	 * name in hwmod database does not change in which case
-	 * we might either make corresponding change here or
-	 * switch back static variable mechanism.
-	 */
-	sscanf(oh->name, "timer%2d", &id);
-
-	if (timer_dev_attr)
-		pdata->timer_capability = timer_dev_attr->timer_capability;
-
-	pdata->timer_errata = omap_dm_timer_get_errata();
-	pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
-
-	pdev = omap_device_build(name, id, oh, pdata, sizeof(*pdata));
-
-	if (IS_ERR(pdev)) {
-		pr_err("%s: Can't build omap_device for %s: %s.\n",
-			__func__, name, oh->name);
-		ret = -EINVAL;
-	}
-
-	kfree(pdata);
-
-	return ret;
-}
-
-/**
- * omap2_dm_timer_init - top level regular device initialization
- *
- * Uses dedicated hwmod api to parse through hwmod database for
- * given class name and then build and register the timer device.
- */
-static int __init omap2_dm_timer_init(void)
-{
-	int ret;
-
-	/* If dtb is there, the devices will be created dynamically */
-	if (of_have_populated_dt())
-		return -ENODEV;
-
-	ret = omap_hwmod_for_each_by_class("timer", omap_timer_init, NULL);
-	if (unlikely(ret)) {
-		pr_err("%s: device registration failed.\n", __func__);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-omap_arch_initcall(omap2_dm_timer_init);
-
-/**
  * omap2_override_clocksource - clocksource override with user configuration
  *
  * Allows user to override default clocksource, using kernel parameter
diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
index ff0a68c..0084b6c 100644
--- a/arch/arm/mach-omap2/wd_timer.c
+++ b/arch/arm/mach-omap2/wd_timer.c
@@ -102,31 +102,3 @@ int omap2_wd_timer_reset(struct omap_hwmod *oh)
 	return (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT :
 		omap2_wd_timer_disable(oh);
 }
-
-static int __init omap_init_wdt(void)
-{
-	int id = -1;
-	struct platform_device *pdev;
-	struct omap_hwmod *oh;
-	char *oh_name = "wd_timer2";
-	char *dev_name = "omap_wdt";
-	struct omap_wd_timer_platform_data pdata;
-
-	if (!cpu_class_is_omap2() || of_have_populated_dt())
-		return 0;
-
-	oh = omap_hwmod_lookup(oh_name);
-	if (!oh) {
-		pr_err("Could not look up wd_timer%d hwmod\n", id);
-		return -EINVAL;
-	}
-
-	pdata.read_reset_sources = prm_read_reset_sources;
-
-	pdev = omap_device_build(dev_name, id, oh, &pdata,
-				 sizeof(struct omap_wd_timer_platform_data));
-	WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
-	     dev_name, oh->name);
-	return 0;
-}
-omap_subsys_initcall(omap_init_wdt);
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index 85e874a..7426211 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -27,7 +27,6 @@
 	bool "CSR SiRFSoC ATLAS7 ARM Cortex A7 Platform"
 	default y
 	select ARM_GIC
-	select CPU_V7
 	select ATLAS7_TIMER
 	select HAVE_ARM_SCU if SMP
 	select HAVE_SMP
diff --git a/arch/arm/mach-pxa/include/mach/magician.h b/arch/arm/mach-pxa/include/mach/magician.h
index 5f6b850..c48b54d 100644
--- a/arch/arm/mach-pxa/include/mach/magician.h
+++ b/arch/arm/mach-pxa/include/mach/magician.h
@@ -24,6 +24,7 @@
 #define GPIO10_MAGICIAN_GSM_IRQ			10
 #define GPIO11_MAGICIAN_GSM_OUT1		11
 #define GPIO13_MAGICIAN_CPLD_IRQ		13
+#define GPIO14_MAGICIAN_TSC2046_CS		14
 #define GPIO18_MAGICIAN_UNKNOWN			18
 #define GPIO22_MAGICIAN_VIBRA_EN		22
 #define GPIO26_MAGICIAN_GSM_POWER		26
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index b413e36..7f3566c 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -54,6 +54,10 @@
 #include "devices.h"
 #include "generic.h"
 
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/ads7846.h>
+
 static unsigned long magician_pin_config[] __initdata = {
 
 	/* SDRAM and Static Memory I/O Signals */
@@ -85,7 +89,7 @@ static unsigned long magician_pin_config[] __initdata = {
 
 	/* SSP 2 TSC2046 touchscreen */
 	GPIO19_SSP2_SCLK,
-	GPIO14_SSP2_SFRM,
+	MFP_CFG_OUT(GPIO14, AF0, DRIVE_HIGH),	/* frame as GPIO */
 	GPIO89_SSP2_TXD,
 	GPIO88_SSP2_RXD,
 
@@ -675,6 +679,37 @@ static struct platform_device bq24022 = {
 };
 
 /*
+ * fixed regulator for ads7846
+ */
+
+static struct regulator_consumer_supply ads7846_supply =
+	REGULATOR_SUPPLY("vcc", "spi2.0");
+
+static struct regulator_init_data vads7846_regulator = {
+	.constraints	= {
+		.valid_ops_mask	= REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &ads7846_supply,
+};
+
+static struct fixed_voltage_config vads7846 = {
+	.supply_name	= "vads7846",
+	.microvolts	= 3300000, /* probably */
+	.gpio		= -EINVAL,
+	.startup_delay	= 0,
+	.init_data	= &vads7846_regulator,
+};
+
+static struct platform_device vads7846_device = {
+	.name	= "reg-fixed-voltage",
+	.id	= -1,
+	.dev	= {
+		.platform_data	= &vads7846,
+	},
+};
+
+/*
  * Vcore regulator MAX1587A
  */
 
@@ -852,6 +887,49 @@ static struct i2c_pxa_platform_data magician_i2c_power_info = {
 };
 
 /*
+ * Touchscreen
+ */
+
+static struct ads7846_platform_data ads7846_pdata = {
+	.model		= 7846,
+	.x_plate_ohms	= 317,
+	.y_plate_ohms	= 500,
+	.pressure_max	= 1023,	/* with x plate ohms it will overflow 255 */
+	.debounce_max	= 3,	/* first readout is always bad */
+	.debounce_tol	= 30,
+	.debounce_rep	= 0,
+	.gpio_pendown	= GPIO115_MAGICIAN_nPEN_IRQ,
+	.keep_vref_on	= 1,
+	.wakeup		= true,
+	.vref_delay_usecs		= 100,
+	.penirq_recheck_delay_usecs	= 100,
+};
+
+struct pxa2xx_spi_chip tsc2046_chip_info = {
+	.tx_threshold	= 1,
+	.rx_threshold	= 2,
+	.timeout	= 64,
+	/* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */
+	.gpio_cs	= GPIO14_MAGICIAN_TSC2046_CS,
+};
+
+static struct pxa2xx_spi_master magician_spi_info = {
+	.num_chipselect	= 1,
+	.enable_dma	= 1,
+};
+
+static struct spi_board_info ads7846_spi_board_info[] __initdata = {
+	{
+		.modalias		= "ads7846",
+		.bus_num		= 2,
+		.max_speed_hz		= 2500000,
+		.platform_data		= &ads7846_pdata,
+		.controller_data	= &tsc2046_chip_info,
+		.irq = PXA_GPIO_TO_IRQ(GPIO115_MAGICIAN_nPEN_IRQ),
+	},
+};
+
+/*
  * Platform devices
  */
 
@@ -865,6 +943,7 @@ static struct platform_device *devices[] __initdata = {
 	&power_supply,
 	&strataflash,
 	&leds_gpio,
+	&vads7846_device,
 };
 
 static struct gpio magician_global_gpios[] = {
@@ -922,6 +1001,9 @@ static void __init magician_init(void)
 	} else
 		pr_err("LCD detection: CPLD mapping failed\n");
 
+	pxa2xx_set_spi_info(2, &magician_spi_info);
+	spi_register_board_info(ARRAY_AND_SIZE(ads7846_spi_board_info));
+
 	regulator_register_always_on(0, "power", pwm_backlight_supply,
 		ARRAY_SIZE(pwm_backlight_supply), 5000000);
 
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index e7450fb4..f2237f4 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -107,10 +107,8 @@ static int __init pxa_pm_init(void)
 	sleep_save = kmalloc_array(pxa_cpu_pm_fns->save_count,
 				   sizeof(*sleep_save),
 				   GFP_KERNEL);
-	if (!sleep_save) {
-		printk(KERN_ERR "failed to alloc memory for pm save\n");
+	if (!sleep_save)
 		return -ENOMEM;
-	}
 
 	suspend_set_ops(&pxa_pm_ops);
 	return 0;
diff --git a/arch/arm/mach-pxa/pxa3xx-ulpi.c b/arch/arm/mach-pxa/pxa3xx-ulpi.c
index eba595f..60cb59a 100644
--- a/arch/arm/mach-pxa/pxa3xx-ulpi.c
+++ b/arch/arm/mach-pxa/pxa3xx-ulpi.c
@@ -286,11 +286,9 @@ static int pxa3xx_u2d_probe(struct platform_device *pdev)
 	struct resource *r;
 	int err;
 
-	u2d = kzalloc(sizeof(struct pxa3xx_u2d_ulpi), GFP_KERNEL);
-	if (!u2d) {
-		dev_err(&pdev->dev, "failed to allocate memory\n");
+	u2d = kzalloc(sizeof(*u2d), GFP_KERNEL);
+	if (!u2d)
 		return -ENOMEM;
-	}
 
 	u2d->clk = clk_get(&pdev->dev, NULL);
 	if (IS_ERR(u2d->clk)) {
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index ef0500a..e41cabc 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -55,7 +55,7 @@ static void __init rockchip_timer_init(void)
 	}
 
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 }
 
 static void __init rockchip_dt_init(void)
@@ -70,6 +70,7 @@ static const char * const rockchip_board_dt_compat[] = {
 	"rockchip,rk3188",
 	"rockchip,rk3228",
 	"rockchip,rk3288",
+	"rockchip,rv1108",
 	NULL,
 };
 
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index 6b279d0..bdb5ec1 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -761,19 +761,21 @@ static struct expansion_card *__init ecard_alloc_card(int type, int slot)
 	return ec;
 }
 
-static ssize_t ecard_show_irq(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t irq_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct expansion_card *ec = ECARD_DEV(dev);
 	return sprintf(buf, "%u\n", ec->irq);
 }
+static DEVICE_ATTR_RO(irq);
 
-static ssize_t ecard_show_dma(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dma_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct expansion_card *ec = ECARD_DEV(dev);
 	return sprintf(buf, "%u\n", ec->dma);
 }
+static DEVICE_ATTR_RO(dma);
 
-static ssize_t ecard_show_resources(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t resource_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct expansion_card *ec = ECARD_DEV(dev);
 	char *str = buf;
@@ -787,35 +789,39 @@ static ssize_t ecard_show_resources(struct device *dev, struct device_attribute
 
 	return str - buf;
 }
+static DEVICE_ATTR_RO(resource);
 
-static ssize_t ecard_show_vendor(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct expansion_card *ec = ECARD_DEV(dev);
 	return sprintf(buf, "%u\n", ec->cid.manufacturer);
 }
+static DEVICE_ATTR_RO(vendor);
 
-static ssize_t ecard_show_device(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t device_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct expansion_card *ec = ECARD_DEV(dev);
 	return sprintf(buf, "%u\n", ec->cid.product);
 }
+static DEVICE_ATTR_RO(device);
 
-static ssize_t ecard_show_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct expansion_card *ec = ECARD_DEV(dev);
 	return sprintf(buf, "%s\n", ec->easi ? "EASI" : "IOC");
 }
+static DEVICE_ATTR_RO(type);
 
-static struct device_attribute ecard_dev_attrs[] = {
-	__ATTR(device,   S_IRUGO, ecard_show_device,    NULL),
-	__ATTR(dma,      S_IRUGO, ecard_show_dma,       NULL),
-	__ATTR(irq,      S_IRUGO, ecard_show_irq,       NULL),
-	__ATTR(resource, S_IRUGO, ecard_show_resources, NULL),
-	__ATTR(type,     S_IRUGO, ecard_show_type,      NULL),
-	__ATTR(vendor,   S_IRUGO, ecard_show_vendor,    NULL),
-	__ATTR_NULL,
+static struct attribute *ecard_dev_attrs[] = {
+	&dev_attr_device.attr,
+	&dev_attr_dma.attr,
+	&dev_attr_irq.attr,
+	&dev_attr_resource.attr,
+	&dev_attr_type.attr,
+	&dev_attr_vendor.attr,
+	NULL,
 };
-
+ATTRIBUTE_GROUPS(ecard_dev);
 
 int ecard_request_resources(struct expansion_card *ec)
 {
@@ -1120,7 +1126,7 @@ static int ecard_match(struct device *_dev, struct device_driver *_drv)
 
 struct bus_type ecard_bus_type = {
 	.name		= "ecard",
-	.dev_attrs	= ecard_dev_attrs,
+	.dev_groups	= ecard_dev_groups,
 	.match		= ecard_match,
 	.probe		= ecard_drv_probe,
 	.remove		= ecard_drv_remove,
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 4b1690a..f07da82 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -394,7 +394,7 @@
 
 config MACH_S3C2416_DT
 	bool "Samsung S3C2416 machine using devicetree"
-	select CLKSRC_OF
+	select TIMER_OF
 	select USE_OF
 	select PINCTRL
 	select PINCTRL_S3C24XX
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 459214f..afd1f20 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -40,7 +40,6 @@
 
 config S3C64XX_PL080
 	def_bool DMADEVICES
-	select ARM_AMBA
 	select AMBA_PL08X
 
 config S3C64XX_SETUP_SDHCI
@@ -336,7 +335,7 @@
 
 config MACH_S3C64XX_DT
 	bool "Samsung S3C6400/S3C6410 machine using Device Tree"
-	select CLKSRC_OF
+	select TIMER_OF
 	select CPU_S3C6400
 	select CPU_S3C6410
 	select PINCTRL
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
index 45a1955..699429f 100644
--- a/arch/arm/mach-shmobile/pm-rmobile.c
+++ b/arch/arm/mach-shmobile/pm-rmobile.c
@@ -130,7 +130,7 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
 	struct generic_pm_domain *genpd = &rmobile_pd->genpd;
 	struct dev_power_governor *gov = rmobile_pd->gov;
 
-	genpd->flags = GENPD_FLAG_PM_CLK;
+	genpd->flags |= GENPD_FLAG_PM_CLK;
 	genpd->dev_ops.active_wakeup	= rmobile_pd_active_wakeup;
 	genpd->power_off		= rmobile_pd_power_down;
 	genpd->power_on			= rmobile_pd_power_up;
@@ -140,14 +140,6 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
 	pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
 }
 
-static int rmobile_pd_suspend_busy(void)
-{
-	/*
-	 * This domain should not be turned off.
-	 */
-	return -EBUSY;
-}
-
 static int rmobile_pd_suspend_console(void)
 {
 	/*
@@ -260,8 +252,7 @@ static void __init rmobile_setup_pm_domain(struct device_node *np,
 		 * only be turned off if the CPU is not in use.
 		 */
 		pr_debug("PM domain %s contains CPU\n", name);
-		pd->gov = &pm_domain_always_on_gov;
-		pd->suspend = rmobile_pd_suspend_busy;
+		pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
 		break;
 
 	case PD_CONSOLE:
@@ -277,8 +268,7 @@ static void __init rmobile_setup_pm_domain(struct device_node *np,
 		 * is not in use.
 		 */
 		pr_debug("PM domain %s contains Coresight-ETM\n", name);
-		pd->gov = &pm_domain_always_on_gov;
-		pd->suspend = rmobile_pd_suspend_busy;
+		pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
 		break;
 
 	case PD_MEMCTL:
@@ -287,8 +277,7 @@ static void __init rmobile_setup_pm_domain(struct device_node *np,
 		 * should only be turned off if memory is not in use.
 		 */
 		pr_debug("PM domain %s contains MEMCTL\n", name);
-		pd->gov = &pm_domain_always_on_gov;
-		pd->suspend = rmobile_pd_suspend_busy;
+		pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
 		break;
 
 	case PD_NORMAL:
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 52d466b..a6e74f4 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -113,7 +113,7 @@ void __init rcar_gen2_timer_init(void)
 #endif /* CONFIG_ARM_ARCH_TIMER */
 
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 }
 
 struct memory_reserve_config {
diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c
index ca2f6a8..31c43ca 100644
--- a/arch/arm/mach-spear/spear13xx.c
+++ b/arch/arm/mach-spear/spear13xx.c
@@ -124,5 +124,5 @@ void __init spear13xx_timer_init(void)
 	clk_put(pclk);
 
 	spear_setup_of_timer();
-	clocksource_probe();
+	timer_probe();
 }
diff --git a/arch/arm/mach-stm32/Kconfig b/arch/arm/mach-stm32/Kconfig
index 2d1419e..0d1889b 100644
--- a/arch/arm/mach-stm32/Kconfig
+++ b/arch/arm/mach-stm32/Kconfig
@@ -15,6 +15,11 @@
 	depends on ARCH_STM32
 	default y
 
+config MACH_STM32F469
+	bool "STMicrolectronics STM32F469"
+	depends on ARCH_STM32
+	default y
+
 config MACH_STM32F746
 	bool "STMicrolectronics STM32F746"
 	depends on ARCH_STM32
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index f44e3ac..7ab353f 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -42,7 +42,7 @@ static void __init sun6i_timer_init(void)
 	of_clk_init(NULL);
 	if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
 		sun6i_reset_init();
-	clocksource_probe();
+	timer_probe();
 }
 
 DT_MACHINE_START(SUN6I_DT, "Allwinner sun6i (A31) Family")
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index a4910ea..048f15e 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -407,7 +407,7 @@ static const char * u300_board_compat[] = {
 DT_MACHINE_START(U300_DT, "U300 S335/B335 (Device Tree)")
 	.map_io		= u300_map_io,
 	.init_irq	= u300_init_irq_dt,
-	.init_time	= clocksource_probe,
+	.init_time	= timer_probe,
 	.init_machine	= u300_init_machine_dt,
 	.restart	= u300_restart,
 	.dt_compat      = u300_board_compat,
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index ed11864..6aba9eb 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -150,7 +150,7 @@ static void __init zynq_timer_init(void)
 {
 	zynq_clock_init();
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 }
 
 static struct map_desc zynq_cortex_a9_scu_map __initdata = {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c742dfd..bd83c53 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2311,7 +2311,14 @@ int arm_iommu_attach_device(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
 
-static void __arm_iommu_detach_device(struct device *dev)
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
 {
 	struct dma_iommu_mapping *mapping;
 
@@ -2324,22 +2331,10 @@ static void __arm_iommu_detach_device(struct device *dev)
 	iommu_detach_device(mapping->domain, dev);
 	kref_put(&mapping->kref, release_iommu_mapping);
 	to_dma_iommu_mapping(dev) = NULL;
+	set_dma_ops(dev, NULL);
 
 	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
 }
-
-/**
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
- */
-void arm_iommu_detach_device(struct device *dev)
-{
-	__arm_iommu_detach_device(dev);
-	set_dma_ops(dev, NULL);
-}
 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 
 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2379,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
 	if (!mapping)
 		return;
 
-	__arm_iommu_detach_device(dev);
+	arm_iommu_detach_device(dev);
 	arm_iommu_release_mapping(mapping);
 }
 
@@ -2430,9 +2425,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 		dev->dma_ops = xen_dma_ops;
 	}
 #endif
+	dev->archdata.dma_ops_setup = true;
 }
 
 void arch_teardown_dma_ops(struct device *dev)
 {
+	if (!dev->archdata.dma_ops_setup)
+		return;
+
 	arm_teardown_iommu_dma_ops(dev);
 }
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 2239fde..f0701d8 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 			addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 31af3cb5..e46a6a4 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1218,15 +1218,15 @@ void __init adjust_lowmem_bounds(void)
 
 	high_memory = __va(arm_lowmem_limit - 1) + 1;
 
+	if (!memblock_limit)
+		memblock_limit = arm_lowmem_limit;
+
 	/*
 	 * Round the memblock limit down to a pmd size.  This
 	 * helps to ensure that we will allocate memory from the
 	 * last full pmd, which should be mapped.
 	 */
-	if (memblock_limit)
-		memblock_limit = round_down(memblock_limit, PMD_SIZE);
-	if (!memblock_limit)
-		memblock_limit = arm_lowmem_limit;
+	memblock_limit = round_down(memblock_limit, PMD_SIZE);
 
 	if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
 		if (memblock_end_of_DRAM() > arm_lowmem_limit) {
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index ad1f4e6..52d1cd1 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -182,7 +182,8 @@ void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn)
 		.addr = addr,
 		.insn = insn,
 	};
-	stop_machine(__kprobes_remove_breakpoint, &p, cpu_online_mask);
+	stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p,
+				cpu_online_mask);
 }
 
 void __kprobes arch_disarm_kprobe(struct kprobe *p)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3dcd7ec..300146d 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -41,6 +41,7 @@
 	select EDAC_SUPPORT
 	select FRAME_POINTER
 	select GENERIC_ALLOCATOR
+	select GENERIC_ARCH_TOPOLOGY
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CLOCKEVENTS_BROADCAST
 	select GENERIC_CPU_AUTOPROBE
@@ -205,7 +206,7 @@
 config ZONE_DMA
 	def_bool y
 
-config HAVE_GENERIC_RCU_GUP
+config HAVE_GENERIC_GUP
 	def_bool y
 
 config ARCH_DMA_ADDR_T_64BIT
@@ -1084,10 +1085,6 @@
 	def_bool y
 	depends on COMPAT && SYSVIPC
 
-config KEYS_COMPAT
-	def_bool y
-	depends on COMPAT && KEYS
-
 endmenu
 
 menu "Power management options"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 73272f4..f5f0c81 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -1,5 +1,11 @@
 menu "Platform selection"
 
+config ARCH_ACTIONS
+	bool "Actions Semi Platforms"
+	select OWL_TIMER
+	help
+	  This enables support for the Actions Semiconductor S900 SoC family.
+
 config ARCH_SUNXI
 	bool "Allwinner sunxi 64-bit SoC Family"
 	select ARCH_HAS_RESET_CONTROLLER
@@ -18,7 +24,7 @@
 
 config ARCH_BCM2835
 	bool "Broadcom BCM2835 family"
-	select CLKSRC_OF
+	select TIMER_OF
 	select GPIOLIB
 	select PINCTRL
 	select PINCTRL_BCM2835
@@ -108,11 +114,15 @@
 	select ARMADA_37XX_CLK
 	select GPIOLIB
 	select GPIOLIB_IRQCHIP
+	select MVEBU_GICP
+	select MVEBU_ICU
 	select MVEBU_ODMI
 	select MVEBU_PIC
 	select OF_GPIO
 	select PINCTRL
 	select PINCTRL_ARMADA_37XX
+	select PINCTRL_ARMADA_AP806
+	select PINCTRL_ARMADA_CP110
 	help
 	  This enables support for Marvell EBU familly, including:
 	   - Armada 3700 SoC Family
@@ -126,6 +136,12 @@
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
+config ARCH_REALTEK
+	bool "Realtek Platforms"
+	help
+	  This enables support for the ARMv8 based Realtek chipsets,
+	  like the RTD1295.
+
 config ARCH_ROCKCHIP
 	bool "Rockchip Platforms"
 	select ARCH_HAS_RESET_CONTROLLER
@@ -178,7 +194,7 @@
 	select ARCH_HAS_RESET_CONTROLLER
 	select CLKDEV_LOOKUP
 	select CLKSRC_MMIO
-	select CLKSRC_OF
+	select TIMER_OF
 	select GENERIC_CLOCKEVENTS
 	select GPIOLIB
 	select PINCTRL
@@ -225,10 +241,7 @@
 	  Express).
 
 config ARCH_VULCAN
-	bool "Broadcom Vulcan SOC Family"
-	select GPIOLIB
-	help
-	  This enables support for Broadcom Vulcan SoC Family
+	def_bool n
 
 config ARCH_XGENE
 	bool "AppliedMicro X-Gene SOC Family"
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 080232b..8e19512 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,3 +1,4 @@
+dts-dirs += actions
 dts-dirs += al
 dts-dirs += allwinner
 dts-dirs += altera
@@ -14,6 +15,7 @@
 dts-dirs += mediatek
 dts-dirs += nvidia
 dts-dirs += qcom
+dts-dirs += realtek
 dts-dirs += renesas
 dts-dirs += rockchip
 dts-dirs += socionext
diff --git a/arch/arm64/boot/dts/actions/Makefile b/arch/arm64/boot/dts/actions/Makefile
new file mode 100644
index 0000000..62922d6
--- /dev/null
+++ b/arch/arm64/boot/dts/actions/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_ACTIONS) += s900-bubblegum-96.dtb
+
+always		:= $(dtb-y)
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/actions/s900-bubblegum-96.dts b/arch/arm64/boot/dts/actions/s900-bubblegum-96.dts
new file mode 100644
index 0000000..a0c3484
--- /dev/null
+++ b/arch/arm64/boot/dts/actions/s900-bubblegum-96.dts
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+
+#include "s900.dtsi"
+
+/ {
+	compatible = "ucrobotics,bubblegum-96", "actions,s900";
+	model = "Bubblegum-96";
+
+	aliases {
+		serial5 = &uart5;
+	};
+
+	chosen {
+		stdout-path = "serial5:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+};
+
+&timer {
+	clocks = <&hosc>;
+};
+
+&uart5 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/actions/s900.dtsi b/arch/arm64/boot/dts/actions/s900.dtsi
new file mode 100644
index 0000000..11406f6
--- /dev/null
+++ b/arch/arm64/boot/dts/actions/s900.dtsi
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "actions,s900";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+		};
+
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x1>;
+			enable-method = "psci";
+		};
+
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x2>;
+			enable-method = "psci";
+		};
+
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x3>;
+			enable-method = "psci";
+		};
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		secmon@1f000000 {
+			reg = <0x0 0x1f000000 0x0 0x1000000>;
+			no-map;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	arm-pmu {
+		compatible = "arm,cortex-a53-pmu";
+		interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+		             <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+		             <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+		             <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	hosc: hosc {
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+		#clock-cells = <0>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		gic: interrupt-controller@e00f1000 {
+			compatible = "arm,gic-400";
+			reg = <0x0 0xe00f1000 0x0 0x1000>,
+			      <0x0 0xe00f2000 0x0 0x2000>,
+			      <0x0 0xe00f4000 0x0 0x2000>,
+			      <0x0 0xe00f6000 0x0 0x2000>;
+			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+			interrupt-controller;
+			#interrupt-cells = <3>;
+		};
+
+		uart0: serial@e0120000 {
+			compatible = "actions,s900-uart", "actions,owl-uart";
+			reg = <0x0 0xe0120000 0x0 0x2000>;
+			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart1: serial@e0122000 {
+			compatible = "actions,s900-uart", "actions,owl-uart";
+			reg = <0x0 0xe0122000 0x0 0x2000>;
+			interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart2: serial@e0124000 {
+			compatible = "actions,s900-uart", "actions,owl-uart";
+			reg = <0x0 0xe0124000 0x0 0x2000>;
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart3: serial@e0126000 {
+			compatible = "actions,s900-uart", "actions,owl-uart";
+			reg = <0x0 0xe0126000 0x0 0x2000>;
+			interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart4: serial@e0128000 {
+			compatible = "actions,s900-uart", "actions,owl-uart";
+			reg = <0x0 0xe0128000 0x0 0x2000>;
+			interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart5: serial@e012a000 {
+			compatible = "actions,s900-uart", "actions,owl-uart";
+			reg = <0x0 0xe012a000 0x0 0x2000>;
+			interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart6: serial@e012c000 {
+			compatible = "actions,s900-uart", "actions,owl-uart";
+			reg = <0x0 0xe012c000 0x0 0x2000>;
+			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		timer: timer@e0228000 {
+			compatible = "actions,s900-timer";
+			reg = <0x0 0xe0228000 0x0 0x8000>;
+			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "timer1";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index 244e8b7..108f12c 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -1,6 +1,11 @@
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-bananapi-m64.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-orangepi-win.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pine64-plus.dtb sun50i-a64-pine64.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-sopine-baseboard.dtb
 dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-pc2.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-prime.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-zero-plus2.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-nanopi-neo2.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 6872135..0d1f026 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -67,6 +67,14 @@
 	};
 };
 
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>;
+	phy-mode = "rgmii";
+	phy-handle = <&ext_rgmii_phy>;
+	status = "okay";
+};
+
 &i2c1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c1_pins>;
@@ -77,6 +85,13 @@
 	bias-pull-up;
 };
 
+&mdio {
+	ext_rgmii_phy: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
+};
+
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins>;
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
similarity index 67%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
index 3956cff..5f8ff401 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
@@ -1,20 +1,22 @@
 /*
+ * Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
  * whole.
  *
- *  a) This file is free software; you can redistribute it and/or
+ *  a) This library is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
  *     published by the Free Software Foundation; either version 2 of the
  *     License, or (at your option) any later version.
  *
- *     This file is distributed in the hope that it will be useful,
+ *     This library is distributed in the hope that it will be useful,
  *     but WITHOUT ANY WARRANTY; without even the implied warranty of
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -40,30 +42,54 @@
 
 /dts-v1/;
 
-#include "rk1108.dtsi"
+#include "sun50i-a64.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
+	model = "OrangePi Win/Win Plus";
+	compatible = "xunlong,orangepi-win", "allwinner,sun50i-a64";
 
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
+	aliases {
+		serial0 = &uart0;
 	};
 
 	chosen {
-		stdout-path = "serial2:1500000n8";
+		stdout-path = "serial0:115200n8";
+	};
+
+	reg_vcc3v3: vcc3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
 	};
 };
 
+&ehci1 {
+	status = "okay";
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins>;
+	vmmc-supply = <&reg_vcc3v3>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+	status = "okay";
+};
+
+&ohci1 {
+	status = "okay";
+};
+
 &uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
 	status = "okay";
 };
 
-&uart1 {
+&usbphy {
 	status = "okay";
 };
 
-&uart2 {
-	status = "okay";
-};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
index 790d14d..24f1aac 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
@@ -46,5 +46,20 @@
 	model = "Pine64+";
 	compatible = "pine64,pine64-plus", "allwinner,sun50i-a64";
 
-	/* TODO: Camera, Ethernet PHY, touchscreen, etc. */
+	/* TODO: Camera, touchscreen, etc. */
+};
+
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>;
+	phy-mode = "rgmii";
+	phy-handle = <&ext_rgmii_phy>;
+	status = "okay";
+};
+
+&mdio {
+	ext_rgmii_phy: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index c680ed3..08cda24 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -52,6 +52,10 @@
 
 	aliases {
 		serial0 = &uart0;
+		serial1 = &uart1;
+		serial2 = &uart2;
+		serial3 = &uart3;
+		serial4 = &uart4;
 	};
 
 	chosen {
@@ -66,10 +70,23 @@
 	};
 };
 
+&ehci0 {
+	status = "okay";
+};
+
 &ehci1 {
 	status = "okay";
 };
 
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&rmii_pins>;
+	phy-mode = "rmii";
+	phy-handle = <&ext_rmii_phy1>;
+	status = "okay";
+
+};
+
 &i2c1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c1_pins>;
@@ -80,6 +97,13 @@
 	bias-pull-up;
 };
 
+&mdio {
+	ext_rmii_phy1: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
+};
+
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins>;
@@ -91,16 +115,49 @@
 	status = "okay";
 };
 
+&ohci0 {
+	status = "okay";
+};
+
 &ohci1 {
 	status = "okay";
 };
 
+/* On Exp and Euler connectors */
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_pins_a>;
 	status = "okay";
 };
 
+/* On Wifi/BT connector, with RTS/CTS */
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+	status = "disabled";
+};
+
+/* On Pi-2 connector */
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart2_pins>;
+	status = "disabled";
+};
+
+/* On Euler connector */
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart3_pins>;
+	status = "disabled";
+};
+
+/* On Euler connector, RTS/CTS optional */
+&uart4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart4_pins>;
+	status = "disabled";
+};
+
 &usb_otg {
 	dr_mode = "host";
 	status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
new file mode 100644
index 0000000..17eb1cc
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ * Based on sun50i-a64-pine64.dts, which is:
+ *   Copyright (c) 2016 ARM Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "sun50i-a64-sopine.dtsi"
+
+/ {
+	model = "SoPine with baseboard";
+	compatible = "pine64,sopine-baseboard", "pine64,sopine",
+		     "allwinner,sun50i-a64";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	reg_vcc1v8: vcc1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc1v8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>;
+	phy-mode = "rgmii";
+	phy-handle = <&ext_rgmii_phy>;
+	status = "okay";
+};
+
+&mdio {
+	ext_rgmii_phy: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
+};
+
+&mmc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_pins>;
+	vmmc-supply = <&reg_vcc3v3>;
+	vqmmc-supply = <&reg_vcc1v8>;
+	bus-width = <8>;
+	non-removable;
+	cap-mmc-hw-reset;
+	status = "okay";
+};
+
+&ohci0 {
+	status = "okay";
+};
+
+&ohci1 {
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
similarity index 73%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index 3956cff..475518b 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -1,20 +1,25 @@
 /*
+ * Copyright (c) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ * Based on sun50i-a64-pine64.dts, which is:
+ *   Copyright (c) 2016 ARM Ltd.
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
  * whole.
  *
- *  a) This file is free software; you can redistribute it and/or
+ *  a) This library is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
  *     published by the Free Software Foundation; either version 2 of the
  *     License, or (at your option) any later version.
  *
- *     This file is distributed in the hope that it will be useful,
+ *     This library is distributed in the hope that it will be useful,
  *     but WITHOUT ANY WARRANTY; without even the implied warranty of
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -38,32 +43,23 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/dts-v1/;
-
-#include "rk1108.dtsi"
+#include "sun50i-a64.dtsi"
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
-
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
-
-	chosen {
-		stdout-path = "serial2:1500000n8";
+	reg_vcc3v3: vcc3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
 	};
 };
 
-&uart0 {
-	status = "okay";
-};
-
-&uart1 {
-	status = "okay";
-};
-
-&uart2 {
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins>;
+	vmmc-supply = <&reg_vcc3v3>;
+	non-removable;
+	disable-wp;
+	bus-width = <4>;
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index c7f669f..9d00622 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -43,6 +43,7 @@
  */
 
 #include <dt-bindings/clock/sun50i-a64-ccu.h>
+#include <dt-bindings/clock/sun8i-r-ccu.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/reset/sun50i-a64-ccu.h>
 
@@ -129,6 +130,12 @@
 		#size-cells = <1>;
 		ranges;
 
+		syscon: syscon@1c00000 {
+			compatible = "allwinner,sun50i-a64-system-controller",
+				"syscon";
+			reg = <0x01c00000 0x1000>;
+		};
+
 		mmc0: mmc@1c0f000 {
 			compatible = "allwinner,sun50i-a64-mmc";
 			reg = <0x01c0f000 0x1000>;
@@ -204,6 +211,28 @@
 			#phy-cells = <1>;
 		};
 
+		ehci0: usb@01c1a000 {
+			compatible = "allwinner,sun50i-a64-ehci", "generic-ehci";
+			reg = <0x01c1a000 0x100>;
+			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_OHCI0>,
+				 <&ccu CLK_BUS_EHCI0>,
+				 <&ccu CLK_USB_OHCI0>;
+			resets = <&ccu RST_BUS_OHCI0>,
+				 <&ccu RST_BUS_EHCI0>;
+			status = "disabled";
+		};
+
+		ohci0: usb@01c1a400 {
+			compatible = "allwinner,sun50i-a64-ohci", "generic-ohci";
+			reg = <0x01c1a400 0x100>;
+			interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_OHCI0>,
+				 <&ccu CLK_USB_OHCI0>;
+			resets = <&ccu RST_BUS_OHCI0>;
+			status = "disabled";
+		};
+
 		ehci1: usb@01c1b000 {
 			compatible = "allwinner,sun50i-a64-ehci", "generic-ehci";
 			reg = <0x01c1b000 0x100>;
@@ -281,6 +310,21 @@
 				bias-pull-up;
 			};
 
+			rmii_pins: rmii_pins {
+				pins = "PD10", "PD11", "PD13", "PD14", "PD17",
+				       "PD18", "PD19", "PD20", "PD22", "PD23";
+				function = "emac";
+				drive-strength = <40>;
+			};
+
+			rgmii_pins: rgmii_pins {
+				pins = "PD8", "PD9", "PD10", "PD11", "PD12",
+				       "PD13", "PD15", "PD16", "PD17", "PD18",
+				       "PD19", "PD20", "PD21", "PD22", "PD23";
+				function = "emac";
+				drive-strength = <40>;
+			};
+
 			uart0_pins_a: uart0@0 {
 				pins = "PB8", "PB9";
 				function = "uart0";
@@ -295,6 +339,26 @@
 				pins = "PG8", "PG9";
 				function = "uart1";
 			};
+
+			uart2_pins: uart2-pins {
+				pins = "PB0", "PB1";
+				function = "uart2";
+			};
+
+			uart3_pins: uart3-pins {
+				pins = "PD0", "PD1";
+				function = "uart3";
+			};
+
+			uart4_pins: uart4-pins {
+				pins = "PD2", "PD3";
+				function = "uart4";
+			};
+
+			uart4_rts_cts_pins: uart4-rts-cts-pins {
+				pins = "PD4", "PD5";
+				function = "uart4";
+			};
 		};
 
 		uart0: serial@1c28000 {
@@ -303,8 +367,8 @@
 			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&ccu 67>;
-			resets = <&ccu 46>;
+			clocks = <&ccu CLK_BUS_UART0>;
+			resets = <&ccu RST_BUS_UART0>;
 			status = "disabled";
 		};
 
@@ -314,8 +378,8 @@
 			interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&ccu 68>;
-			resets = <&ccu 47>;
+			clocks = <&ccu CLK_BUS_UART1>;
+			resets = <&ccu RST_BUS_UART1>;
 			status = "disabled";
 		};
 
@@ -325,8 +389,8 @@
 			interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&ccu 69>;
-			resets = <&ccu 48>;
+			clocks = <&ccu CLK_BUS_UART2>;
+			resets = <&ccu RST_BUS_UART2>;
 			status = "disabled";
 		};
 
@@ -336,8 +400,8 @@
 			interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&ccu 70>;
-			resets = <&ccu 49>;
+			clocks = <&ccu CLK_BUS_UART3>;
+			resets = <&ccu RST_BUS_UART3>;
 			status = "disabled";
 		};
 
@@ -347,8 +411,8 @@
 			interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
 			reg-shift = <2>;
 			reg-io-width = <4>;
-			clocks = <&ccu 71>;
-			resets = <&ccu 50>;
+			clocks = <&ccu CLK_BUS_UART4>;
+			resets = <&ccu RST_BUS_UART4>;
 			status = "disabled";
 		};
 
@@ -356,8 +420,8 @@
 			compatible = "allwinner,sun6i-a31-i2c";
 			reg = <0x01c2ac00 0x400>;
 			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu 63>;
-			resets = <&ccu 42>;
+			clocks = <&ccu CLK_BUS_I2C0>;
+			resets = <&ccu RST_BUS_I2C0>;
 			status = "disabled";
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -367,8 +431,8 @@
 			compatible = "allwinner,sun6i-a31-i2c";
 			reg = <0x01c2b000 0x400>;
 			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu 64>;
-			resets = <&ccu 43>;
+			clocks = <&ccu CLK_BUS_I2C1>;
+			resets = <&ccu RST_BUS_I2C1>;
 			status = "disabled";
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -378,13 +442,33 @@
 			compatible = "allwinner,sun6i-a31-i2c";
 			reg = <0x01c2b400 0x400>;
 			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu 65>;
-			resets = <&ccu 44>;
+			clocks = <&ccu CLK_BUS_I2C2>;
+			resets = <&ccu RST_BUS_I2C2>;
 			status = "disabled";
 			#address-cells = <1>;
 			#size-cells = <0>;
 		};
 
+		emac: ethernet@1c30000 {
+			compatible = "allwinner,sun50i-a64-emac";
+			syscon = <&syscon>;
+			reg = <0x01c30000 0x100>;
+			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "macirq";
+			resets = <&ccu RST_BUS_EMAC>;
+			reset-names = "stmmaceth";
+			clocks = <&ccu CLK_BUS_EMAC>;
+			clock-names = "stmmaceth";
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			mdio: mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+			};
+		};
+
 		gic: interrupt-controller@1c81000 {
 			compatible = "arm,gic-400";
 			reg = <0x01c81000 0x1000>,
@@ -406,8 +490,9 @@
 		r_ccu: clock@1f01400 {
 			compatible = "allwinner,sun50i-a64-r-ccu";
 			reg = <0x01f01400 0x100>;
-			clocks = <&osc24M>, <&osc32k>, <&iosc>;
-			clock-names = "hosc", "losc", "iosc";
+			clocks = <&osc24M>, <&osc32k>, <&iosc>,
+				 <&ccu 11>;
+			clock-names = "hosc", "losc", "iosc", "pll-periph";
 			#clock-cells = <1>;
 			#reset-cells = <1>;
 		};
@@ -416,12 +501,31 @@
 			compatible = "allwinner,sun50i-a64-r-pinctrl";
 			reg = <0x01f02c00 0x400>;
 			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&r_ccu 3>, <&osc24M>, <&osc32k>;
+			clocks = <&r_ccu CLK_APB0_PIO>, <&osc24M>, <&osc32k>;
 			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			#gpio-cells = <3>;
 			interrupt-controller;
 			#interrupt-cells = <3>;
+
+			r_rsb_pins: rsb@0 {
+				pins = "PL0", "PL1";
+				function = "s_rsb";
+			};
+		};
+
+		r_rsb: rsb@1f03400 {
+			compatible = "allwinner,sun8i-a23-rsb";
+			reg = <0x01f03400 0x400>;
+			interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&r_ccu 6>;
+			clock-frequency = <3000000>;
+			resets = <&r_ccu 2>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&r_rsb_pins>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
new file mode 100644
index 0000000..9689087
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun50i-h5.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "FriendlyARM NanoPi NEO 2";
+	compatible = "friendlyarm,nanopi-neo2", "allwinner,sun50i-h5";
+
+	aliases {
+		ethernet0 = &emac;
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		pwr {
+			label = "nanopi:green:pwr";
+			gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+		};
+
+		status {
+			label = "nanopi:blue:status";
+			gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	reg_gmac_3v3: gmac-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "gmac-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <100000>;
+		enable-active-high;
+		gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
+	};
+
+	reg_vcc3v3: vcc3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	reg_usb0_vbus: usb0-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb0-vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		enable-active-high;
+		gpio = <&r_pio 0 2 GPIO_ACTIVE_HIGH>; /* PL2 */
+		status = "okay";
+	};
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&ehci3 {
+	status = "okay";
+};
+
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emac_rgmii_pins>;
+	phy-supply = <&reg_gmac_3v3>;
+	phy-handle = <&ext_rgmii_phy>;
+	phy-mode = "rgmii";
+	status = "okay";
+};
+
+&mdio {
+	ext_rgmii_phy: ethernet-phy@7 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <7>;
+	};
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+	status = "okay";
+};
+
+&ohci0 {
+	status = "okay";
+};
+
+&ohci3 {
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usbphy {
+	/* USB Type-A port's VBUS is always on */
+	usb0_id_det-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
index dfecc17..a8296fe 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
@@ -59,6 +59,7 @@
 	};
 
 	aliases {
+		ethernet0 = &emac;
 		serial0 = &uart0;
 	};
 
@@ -91,6 +92,16 @@
 		};
 	};
 
+	reg_gmac_3v3: gmac-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "gmac-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <100000>;
+		enable-active-high;
+		gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
+	};
+
 	reg_usb0_vbus: usb0-vbus {
 		compatible = "regulator-fixed";
 		regulator-name = "usb0-vbus";
@@ -126,12 +137,28 @@
 	status = "okay";
 };
 
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emac_rgmii_pins>;
+	phy-supply = <&reg_gmac_3v3>;
+	phy-handle = <&ext_rgmii_phy>;
+	phy-mode = "rgmii";
+	status = "okay";
+};
+
 &ir {
 	pinctrl-names = "default";
 	pinctrl-0 = <&ir_pins_a>;
 	status = "okay";
 };
 
+&mdio {
+	ext_rgmii_phy: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
+};
+
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
new file mode 100644
index 0000000..d906b30
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ * Based on sun50i-h5-orangepi-pc2.dts, which is:
+ *   Copyright (C) 2016 ARM Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun50i-h5.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "Xunlong Orange Pi Prime";
+	compatible = "xunlong,orangepi-prime", "allwinner,sun50i-h5";
+
+	aliases {
+		ethernet0 = &emac;
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		pwr {
+			label = "orangepi:green:pwr";
+			gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+		};
+
+		status {
+			label = "orangepi:red:status";
+			gpios = <&pio 0 20 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	r-gpio-keys {
+		compatible = "gpio-keys";
+
+		sw4 {
+			label = "sw4";
+			linux,code = <BTN_0>;
+			gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	reg_gmac_3v3: gmac-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "gmac-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <100000>;
+		enable-active-high;
+		gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>;
+	};
+
+	reg_vcc3v3: vcc3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	reg_usb0_vbus: usb0-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb0-vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		enable-active-high;
+		gpio = <&r_pio 0 2 GPIO_ACTIVE_HIGH>; /* PL2 */
+		status = "okay";
+	};
+
+	wifi_pwrseq: wifi_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&pio 2 14 GPIO_ACTIVE_LOW>; /* PC14 */
+	};
+};
+
+&codec {
+	allwinner,audio-routing =
+		"Line Out", "LINEOUT",
+		"MIC1", "Mic",
+		"Mic",  "MBIAS";
+	status = "okay";
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&ehci3 {
+	status = "okay";
+};
+
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emac_rgmii_pins>;
+	phy-supply = <&reg_gmac_3v3>;
+	phy-handle = <&ext_rgmii_phy>;
+	phy-mode = "rgmii";
+	status = "okay";
+};
+
+&ir {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ir_pins_a>;
+	status = "okay";
+};
+
+&mdio {
+	ext_rgmii_phy: ethernet-phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+	status = "okay";
+};
+
+&mmc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins_a>;
+	vmmc-supply = <&reg_vcc3v3>;
+	mmc-pwrseq = <&wifi_pwrseq>;
+	bus-width = <4>;
+	non-removable;
+	status = "okay";
+};
+
+&ohci0 {
+	status = "okay";
+};
+
+&ohci1 {
+	status = "okay";
+};
+
+&ohci2 {
+	status = "okay";
+};
+
+&ohci3 {
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins>;
+	status = "disabled";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart2_pins>;
+	status = "disabled";
+};
+
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usbphy {
+	/* USB Type-A ports' VBUS is always on */
+	usb0_id_det-gpios = <&pio 0 21 GPIO_ACTIVE_HIGH>; /* PA21 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
similarity index 65%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
index 3956cff..b6b7a56 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
@@ -1,20 +1,22 @@
 /*
+ * Copyright (C) 2017 Jagan Teki <jteki@openedev.com>
+ *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
  * whole.
  *
- *  a) This file is free software; you can redistribute it and/or
+ *  a) This library is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
  *     published by the Free Software Foundation; either version 2 of the
  *     License, or (at your option) any later version.
  *
- *     This file is distributed in the hope that it will be useful,
+ *     This library is distributed in the hope that it will be useful,
  *     but WITHOUT ANY WARRANTY; without even the implied warranty of
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -40,30 +42,51 @@
 
 /dts-v1/;
 
-#include "rk1108.dtsi"
+#include "sun50i-h5.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
+	model = "OrangePi Zero Plus2";
+	compatible = "xunlong,orangepi-zero-plus2", "allwinner,sun50i-h5";
 
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
+	aliases {
+		serial0 = &uart0;
 	};
 
 	chosen {
-		stdout-path = "serial2:1500000n8";
+		stdout-path = "serial0:115200n8";
+	};
+
+	reg_vcc3v3: vcc3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
 	};
 };
 
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&mmc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_8bit_pins>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <8>;
+	non-removable;
+	cap-mmc-hw-reset;
+	status = "okay";
+};
+
 &uart0 {
-	status = "okay";
-};
-
-&uart1 {
-	status = "okay";
-};
-
-&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index 4d314a2..732e2e0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -40,7 +40,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "sunxi-h3-h5.dtsi"
+#include <arm/sunxi-h3-h5.dtsi>
 
 / {
 	cpus {
diff --git a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi b/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
deleted file mode 120000
index 036f01d..0000000
--- a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
+++ /dev/null
@@ -1 +0,0 @@
-../../../../arm/boot/dts/sunxi-h3-h5.dtsi
\ No newline at end of file
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index b9ad2db..7a9f48c 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -1,3 +1,4 @@
+dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nanopi-k2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nexbox-a95x.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-odroidc2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-p200.dtb
@@ -7,15 +8,17 @@
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-telos.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-hub.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-play2.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-hwacom-amazetv.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-khadas-vim.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-libretech-cc.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-nexbox-a95x.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-p212.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p230.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p231.dtb
-dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-hwacom-amazetv.dtb
-dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-nexbox-a95x.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q200.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q201.dtb
-dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxm-rbox-pro.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index a84e276..dc478d0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -121,19 +121,42 @@
 	};
 };
 
-/* This UART is brought out to the DB9 connector */
-&uart_AO {
+&cvbs_vdac_port {
+	cvbs_vdac_out: endpoint {
+		remote-endpoint = <&cvbs_connector_in>;
+	};
+};
+
+&ethmac {
 	status = "okay";
-	pinctrl-0 = <&uart_ao_a_pins>;
+};
+
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
 	pinctrl-names = "default";
 };
 
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
 &ir {
 	status = "okay";
 	pinctrl-0 = <&remote_input_ao_pins>;
 	pinctrl-names = "default";
 };
 
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
+
 /* Wireless SDIO Module */
 &sd_emmc_a {
 	status = "okay";
@@ -154,7 +177,7 @@
 	vmmc-supply = <&vddao_3v3>;
 	vqmmc-supply = <&vddio_boot>;
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 	};
@@ -198,32 +221,9 @@
 	vqmmc-supply = <&vddio_boot>;
 };
 
-&pwm_ef {
+/* This UART is brought out to the DB9 connector */
+&uart_AO {
 	status = "okay";
-	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-0 = <&uart_ao_a_pins>;
 	pinctrl-names = "default";
-	clocks = <&clkc CLKID_FCLK_DIV4>;
-	clock-names = "clkin0";
-};
-
-&ethmac {
-	status = "okay";
-};
-
-&cvbs_vdac_port {
-	cvbs_vdac_out: endpoint {
-		remote-endpoint = <&cvbs_connector_in>;
-	};
-};
-
-&hdmi_tx {
-	status = "okay";
-	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
-	pinctrl-names = "default";
-};
-
-&hdmi_tx_tmds_port {
-	hdmi_tx_tmds_out: endpoint {
-		remote-endpoint = <&hdmi_connector_in>;
-	};
 };
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 436b875..35b8c88 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -200,7 +200,7 @@
 		};
 
 		scpi_sensors: sensors {
-			compatible = "arm,scpi-sensors";
+			compatible = "amlogic,meson-gxbb-scpi-sensors", "arm,scpi-sensors";
 			#thermal-sensor-cells = <1>;
 		};
 	};
@@ -304,6 +304,15 @@
 				status = "disabled";
 			};
 
+			spicc: spi@8d80 {
+				compatible = "amlogic,meson-gx-spicc";
+				reg = <0x0 0x08d80 0x0 0x80>;
+				interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
 			spifc: spi@8c80 {
 				compatible = "amlogic,meson-gx-spifc", "amlogic,meson-gxbb-spifc";
 				reg = <0x0 0x08c80 0x0 0x80>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
new file mode 100644
index 0000000..fa46283
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+
+#include "meson-gxbb.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	compatible = "friendlyarm,nanopi-k2", "amlogic,meson-gxbb";
+
+	aliases {
+		serial0 = &uart_AO;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		stat {
+			label = "nanopi-k2:blue:stat";
+			gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+			panic-indicator;
+		};
+	};
+
+	vdd_5v: regulator-vdd-5v {
+		compatible = "regulator-fixed";
+		regulator-name = "VDD_5V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+	};
+
+	vddio_ao18: regulator-vddio-ao18 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_AO18";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	vddio_ao3v3: regulator-vddio-ao3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_AO3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vddio_tf: regulator-vddio-tf {
+		compatible = "regulator-gpio";
+
+		regulator-name = "VDDIO_TF";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
+		gpios-states = <0>;
+
+		states = <3300000 0>,
+		         <1800000 1>;
+	};
+
+	wifi_32k: wifi-32k {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+		clocks = <&wifi_32k>;
+		clock-names = "ext_clock";
+	};
+
+	vcc1v8: regulator-vcc1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC1.8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	vcc3v3: regulator-vcc3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
+};
+
+&ethmac {
+	status = "okay";
+	pinctrl-0 = <&eth_rgmii_pins>;
+	pinctrl-names = "default";
+
+	phy-handle = <&eth_phy0>;
+	phy-mode = "rgmii";
+
+	amlogic,tx-delay-ns = <2>;
+
+	snps,reset-gpio = <&gpio GPIOZ_14 0>;
+	snps,reset-delays-us = <0 10000 1000000>;
+	snps,reset-active-low;
+
+	mdio {
+		compatible = "snps,dwmac-mdio";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		eth_phy0: ethernet-phy@0 {
+			/* Realtek RTL8211F (0x001cc916) */
+			reg = <0>;
+		};
+	};
+};
+
+&ir {
+	status = "okay";
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+};
+
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
+
+&saradc {
+	status = "okay";
+	vref-supply = <&vddio_ao18>;
+};
+
+/* SDIO */
+&sd_emmc_a {
+	status = "okay";
+	pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>;
+	pinctrl-names = "default";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <200000000>;
+
+	non-removable;
+	disable-wp;
+
+	mmc-pwrseq = <&sdio_pwrseq>;
+
+	vmmc-supply = <&vddio_ao3v3>;
+	vqmmc-supply = <&vddio_ao18>;
+
+	brcmf: wifi@1 {
+		compatible = "brcm,bcm4329-fmac";
+		reg = <1>;
+	};
+};
+
+/* SD */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+
+	vmmc-supply = <&vddio_ao3v3>;
+	vqmmc-supply = <&vddio_tf>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "disabled";
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <8>;
+	cap-sd-highspeed;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+	cap-mmc-highspeed;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vcc3v3>;
+	vqmmc-supply = <&vcc1v8>;
+};
+
+/* DBG_UART */
+&uart_AO {
+	status = "okay";
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+};
+
+/* Bluetooth on AP6212 */
+&uart_A {
+	status = "disabled";
+	pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+	pinctrl-names = "default";
+};
+
+/* 40-pin CON1 */
+&uart_C {
+	status = "disabled";
+	pinctrl-0 = <&uart_c_pins>;
+	pinctrl-names = "default";
+};
+
+&usb0_phy {
+	status = "okay";
+	phy-supply = <&vdd_5v>;
+};
+
+&usb1_phy {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 87198ea..a1078b3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -165,10 +165,10 @@
 	};
 };
 
-&uart_AO {
-	status = "okay";
-	pinctrl-0 = <&uart_ao_a_pins>;
-	pinctrl-names = "default";
+&cvbs_vdac_port {
+	cvbs_vdac_out: endpoint {
+		remote-endpoint = <&cvbs_connector_in>;
+	};
 };
 
 &ethmac {
@@ -195,12 +195,32 @@
 	};
 };
 
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
 &ir {
 	status = "okay";
 	pinctrl-0 = <&remote_input_ao_pins>;
 	pinctrl-names = "default";
 };
 
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
+
 /* Wireless SDIO Module */
 &sd_emmc_a {
 	status = "okay";
@@ -260,28 +280,8 @@
 	vqmmc-supply = <&vddio_boot>;
 };
 
-&pwm_ef {
+&uart_AO {
 	status = "okay";
-	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-0 = <&uart_ao_a_pins>;
 	pinctrl-names = "default";
-	clocks = <&clkc CLKID_FCLK_DIV4>;
-	clock-names = "clkin0";
-};
-
-&cvbs_vdac_port {
-	cvbs_vdac_out: endpoint {
-		remote-endpoint = <&cvbs_connector_in>;
-	};
-};
-
-&hdmi_tx {
-	status = "okay";
-	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
-	pinctrl-names = "default";
-};
-
-&hdmi_tx_tmds_port {
-	hdmi_tx_tmds_out: endpoint {
-		remote-endpoint = <&hdmi_connector_in>;
-	};
 };
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 54a9c6a..d147c85 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -137,16 +137,6 @@
 	};
 };
 
-&scpi_clocks {
-	status = "disabled";
-};
-
-&uart_AO {
-	status = "okay";
-	pinctrl-0 = <&uart_ao_a_pins>;
-	pinctrl-names = "default";
-};
-
 &ethmac {
 	status = "okay";
 	pinctrl-0 = <&eth_rgmii_pins>;
@@ -172,6 +162,33 @@
 	};
 };
 
+&gpio_ao {
+	/*
+	 * WARNING: The USB Hub on the Odroid-C2 needs a reset signal
+	 * to be turned high in order to be detected by the USB Controller
+	 * This signal should be handled by a USB specific power sequence
+	 * in order to reset the Hub when USB bus is powered down.
+	 */
+	usb-hub {
+		gpio-hog;
+		gpios = <GPIOAO_4 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "usb-hub-reset";
+	};
+};
+
+&i2c_A {
+	status = "okay";
+	pinctrl-0 = <&i2c_a_pins>;
+	pinctrl-names = "default";
+};
+
+&ir {
+	status = "okay";
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+};
+
 &pinctrl_aobus {
 	gpio-line-names = "UART TX", "UART RX", "VCCK En", "TF 3V3/1V8 En",
 			  "USB HUB nRESET", "USB OTG Power En",
@@ -223,55 +240,15 @@
 			  "";
 };
 
-&ir {
-	status = "okay";
-	pinctrl-0 = <&remote_input_ao_pins>;
-	pinctrl-names = "default";
-};
-
-&i2c_A {
-	status = "okay";
-	pinctrl-0 = <&i2c_a_pins>;
-	pinctrl-names = "default";
-};
-
-&gpio_ao {
-	/*
-	 * WARNING: The USB Hub on the Odroid-C2 needs a reset signal
-	 * to be turned high in order to be detected by the USB Controller
-	 * This signal should be handled by a USB specific power sequence
-	 * in order to reset the Hub when USB bus is powered down.
-	 */
-	usb-hub {
-		gpio-hog;
-		gpios = <GPIOAO_4 GPIO_ACTIVE_HIGH>;
-		output-high;
-		line-name = "usb-hub-reset";
-	};
-};
-
-&usb0_phy {
-	status = "okay";
-	phy-supply = <&usb_otg_pwr>;
-};
-
-&usb1_phy {
-	status = "okay";
-};
-
-&usb0 {
-	status = "okay";
-};
-
-&usb1 {
-	status = "okay";
-};
-
 &saradc {
 	status = "okay";
 	vref-supply = <&vcc1v8>;
 };
 
+&scpi_clocks {
+	status = "disabled";
+};
+
 /* SD */
 &sd_emmc_b {
 	status = "okay";
@@ -309,3 +286,26 @@
 	vmmc-supply = <&vcc3v3>;
 	vqmmc-supply = <&vcc1v8>;
 };
+
+&uart_AO {
+	status = "okay";
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+};
+
+&usb0_phy {
+	status = "okay";
+	phy-supply = <&usb_otg_pwr>;
+};
+
+&usb1_phy {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 3c6c0b7..d904deb 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -126,7 +126,7 @@
 		clock-names = "ext_clock";
 	};
 
-	cvbs-connector {
+	cvbs_connector: cvbs-connector {
 		compatible = "composite-video-connector";
 
 		port {
@@ -148,34 +148,36 @@
 	};
 };
 
-/* This UART is brought out to the DB9 connector */
-&uart_AO {
+&cvbs_vdac_port {
+	cvbs_vdac_out: endpoint {
+		remote-endpoint = <&cvbs_connector_in>;
+	};
+};
+
+&hdmi_tx {
 	status = "okay";
-	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
 	pinctrl-names = "default";
 };
 
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
 &ir {
 	status = "okay";
 	pinctrl-0 = <&remote_input_ao_pins>;
 	pinctrl-names = "default";
 };
 
-&usb0_phy {
+&pwm_ef {
 	status = "okay";
-	phy-supply = <&usb_pwr>;
-};
-
-&usb1_phy {
-	status = "okay";
-};
-
-&usb0 {
-	status = "okay";
-};
-
-&usb1 {
-	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
 };
 
 /* Wireless SDIO Module */
@@ -198,7 +200,7 @@
 	vmmc-supply = <&vddao_3v3>;
 	vqmmc-supply = <&vddio_boot>;
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 	};
@@ -242,28 +244,26 @@
 	vqmmc-supply = <&vddio_boot>;
 };
 
-&pwm_ef {
+/* This UART is brought out to the DB9 connector */
+&uart_AO {
 	status = "okay";
-	pinctrl-0 = <&pwm_e_pins>;
-	pinctrl-names = "default";
-	clocks = <&clkc CLKID_FCLK_DIV4>;
-	clock-names = "clkin0";
-};
-
-&cvbs_vdac_port {
-	cvbs_vdac_out: endpoint {
-		remote-endpoint = <&cvbs_connector_in>;
-	};
-};
-
-&hdmi_tx {
-	status = "okay";
-	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-0 = <&uart_ao_a_pins>;
 	pinctrl-names = "default";
 };
 
-&hdmi_tx_tmds_port {
-	hdmi_tx_tmds_out: endpoint {
-		remote-endpoint = <&hdmi_connector_in>;
-	};
+&usb0_phy {
+	status = "okay";
+	phy-supply = <&usb_pwr>;
+};
+
+&usb1_phy {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index aefa66d..346753f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -111,18 +111,6 @@
 	};
 };
 
-&uart_AO {
-	status = "okay";
-	pinctrl-0 = <&uart_ao_a_pins>;
-	pinctrl-names = "default";
-};
-
-&ir {
-	status = "okay";
-	pinctrl-0 = <&remote_input_ao_pins>;
-	pinctrl-names = "default";
-};
-
 &ethmac {
 	status = "okay";
 	pinctrl-0 = <&eth_rgmii_pins>;
@@ -149,21 +137,18 @@
 	};
 };
 
-&usb0_phy {
+&ir {
 	status = "okay";
-	phy-supply = <&usb_vbus>;
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
 };
 
-&usb1_phy {
+&pwm_ef {
 	status = "okay";
-};
-
-&usb0 {
-	status = "okay";
-};
-
-&usb1 {
-	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
 };
 
 /* Wireless SDIO Module */
@@ -186,7 +171,7 @@
 	vmmc-supply = <&vcc_3v3>;
 	vqmmc-supply = <&vcc_1v8>;
 
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 	};
@@ -229,10 +214,25 @@
 	vmmcq-sumpply = <&vcc_1v8>;
 };
 
-&pwm_ef {
+&uart_AO {
 	status = "okay";
-	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-0 = <&uart_ao_a_pins>;
 	pinctrl-names = "default";
-	clocks = <&clkc CLKID_FCLK_DIV4>;
-	clock-names = "clkin0";
+};
+
+&usb0_phy {
+	status = "okay";
+	phy-supply = <&usb_vbus>;
+};
+
+&usb1_phy {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
index f057fb4..1878ac2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
@@ -59,10 +59,10 @@
 			panic-indicator;
 		};
 	};
+};
 
-	cvbs-connector {
-		status = "disabled";
-	};
+&cvbs_connector {
+	status = "disabled";
 };
 
 &ethmac {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
index 743acb5..e76ac31 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
@@ -85,6 +85,34 @@
 			gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>;
 		};
 	};
+
+	cvbs-connector {
+		compatible = "composite-video-connector";
+
+		port {
+			cvbs_connector_in: endpoint {
+				remote-endpoint = <&cvbs_vdac_out>;
+			};
+		};
+	};
+
+	hdmi-connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&hdmi_tx_tmds_out>;
+			};
+		};
+	};
+};
+
+
+&cvbs_vdac_port {
+	cvbs_vdac_out: endpoint {
+		remote-endpoint = <&cvbs_connector_in>;
+	};
 };
 
 &ethmac {
@@ -113,6 +141,18 @@
 	};
 };
 
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
 &i2c_A {
 	status = "okay";
 	pinctrl-0 = <&i2c_a_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 86105a6..17d3efd 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -97,13 +97,6 @@
 	};
 };
 
-&ethmac {
-	clocks = <&clkc CLKID_ETH>,
-		 <&clkc CLKID_FCLK_DIV2>,
-		 <&clkc CLKID_MPLL2>;
-	clock-names = "stmmaceth", "clkin0", "clkin1";
-};
-
 &aobus {
 	pinctrl_aobus: pinctrl@14 {
 		compatible = "amlogic,meson-gxbb-aobus-pinctrl";
@@ -249,9 +242,119 @@
 				function = "spdif_out_ao";
 			};
 		};
+
+		ao_cec_pins: ao_cec {
+			mux {
+				groups = "ao_cec";
+				function = "cec_ao";
+			};
+		};
+
+		ee_cec_pins: ee_cec {
+			mux {
+				groups = "ee_cec";
+				function = "cec_ao";
+			};
+		};
 	};
 };
 
+&apb {
+	mali: gpu@c0000 {
+		compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
+		reg = <0x0 0xc0000 0x0 0x40000>;
+		interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "gp", "gpmmu", "pp", "pmu",
+			"pp0", "ppmmu0", "pp1", "ppmmu1",
+			"pp2", "ppmmu2";
+		clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
+		clock-names = "bus", "core";
+
+		/*
+		 * Mali clocking is provided by two identical clock paths
+		 * MALI_0 and MALI_1 muxed to a single clock by a glitch
+		 * free mux to safely change frequency while running.
+		 */
+		assigned-clocks = <&clkc CLKID_MALI_0_SEL>,
+				  <&clkc CLKID_MALI_0>,
+				  <&clkc CLKID_MALI>; /* Glitch free mux */
+		assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>,
+					 <0>, /* Do Nothing */
+					 <&clkc CLKID_MALI_0>;
+		assigned-clock-rates = <0>, /* Do Nothing */
+				       <666666666>,
+				       <0>; /* Do Nothing */
+	};
+};
+
+&cbus {
+	spifc: spi@8c80 {
+		compatible = "amlogic,meson-gxbb-spifc";
+		reg = <0x0 0x08c80 0x0 0x80>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&clkc CLKID_SPI>;
+		status = "disabled";
+	};
+};
+
+&ethmac {
+	clocks = <&clkc CLKID_ETH>,
+		 <&clkc CLKID_FCLK_DIV2>,
+		 <&clkc CLKID_MPLL2>;
+	clock-names = "stmmaceth", "clkin0", "clkin1";
+};
+
+&hdmi_tx {
+	compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
+	resets = <&reset RESET_HDMITX_CAPB3>,
+		 <&reset RESET_HDMI_SYSTEM_RESET>,
+		 <&reset RESET_HDMI_TX>;
+	reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+	clocks = <&clkc CLKID_HDMI_PCLK>,
+		 <&clkc CLKID_CLK81>,
+		 <&clkc CLKID_GCLK_VENCI_INT0>;
+	clock-names = "isfr", "iahb", "venci";
+};
+
+&hiubus {
+	clkc: clock-controller@0 {
+		compatible = "amlogic,gxbb-clkc";
+		#clock-cells = <1>;
+		reg = <0x0 0x0 0x0 0x3db>;
+	};
+};
+
+&hwrng {
+	clocks = <&clkc CLKID_RNG0>;
+	clock-names = "core";
+};
+
+&i2c_A {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&i2c_AO {
+	clocks = <&clkc CLKID_AO_I2C>;
+};
+
+&i2c_B {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&i2c_C {
+	clocks = <&clkc CLKID_I2C>;
+};
+
 &periphs {
 	pinctrl_periphs: pinctrl@4b0 {
 		compatible = "amlogic,meson-gxbb-periphs-pinctrl";
@@ -262,7 +365,7 @@
 		gpio: bank@4b0 {
 			reg = <0x0 0x004b0 0x0 0x28>,
 			      <0x0 0x004e8 0x0 0x14>,
-			      <0x0 0x00120 0x0 0x14>,
+			      <0x0 0x00520 0x0 0x14>,
 			      <0x0 0x00430 0x0 0x40>;
 			reg-names = "mux", "pull", "pull-enable", "gpio";
 			gpio-controller;
@@ -290,6 +393,22 @@
 			};
 		};
 
+		spi_pins: spi {
+			mux {
+				groups = "spi_miso",
+					"spi_mosi",
+					"spi_sclk";
+				function = "spi";
+			};
+		};
+
+		spi_ss0_pins: spi-ss0 {
+			mux {
+				groups = "spi_ss0";
+				function = "spi";
+			};
+		};
+
 		sdcard_pins: sdcard {
 			mux {
 				groups = "sdcard_d0",
@@ -521,67 +640,6 @@
 	};
 };
 
-&hiubus {
-	clkc: clock-controller@0 {
-		compatible = "amlogic,gxbb-clkc";
-		#clock-cells = <1>;
-		reg = <0x0 0x0 0x0 0x3db>;
-	};
-};
-
-&apb {
-	mali: gpu@c0000 {
-		compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
-		reg = <0x0 0xc0000 0x0 0x40000>;
-		interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
-		interrupt-names = "gp", "gpmmu", "pp", "pmu",
-			"pp0", "ppmmu0", "pp1", "ppmmu1",
-			"pp2", "ppmmu2";
-		clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
-		clock-names = "bus", "core";
-
-		/*
-		 * Mali clocking is provided by two identical clock paths
-		 * MALI_0 and MALI_1 muxed to a single clock by a glitch
-		 * free mux to safely change frequency while running.
-		 */
-		assigned-clocks = <&clkc CLKID_MALI_0_SEL>,
-				  <&clkc CLKID_MALI_0>,
-				  <&clkc CLKID_MALI>; /* Glitch free mux */
-		assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>,
-					 <0>, /* Do Nothing */
-					 <&clkc CLKID_MALI_0>;
-		assigned-clock-rates = <0>, /* Do Nothing */
-				       <666666666>,
-				       <0>; /* Do Nothing */
-	};
-};
-
-&i2c_A {
-	clocks = <&clkc CLKID_I2C>;
-};
-
-&i2c_AO {
-	clocks = <&clkc CLKID_AO_I2C>;
-};
-
-&i2c_B {
-	clocks = <&clkc CLKID_I2C>;
-};
-
-&i2c_C {
-	clocks = <&clkc CLKID_I2C>;
-};
-
 &saradc {
 	compatible = "amlogic,meson-gxbb-saradc", "amlogic,meson-saradc";
 	clocks = <&xtal>,
@@ -613,6 +671,13 @@
 	clock-names = "core", "clkin0", "clkin1";
 };
 
+&spicc {
+	clocks = <&clkc CLKID_SPICC>;
+	clock-names = "core";
+	resets = <&reset RESET_PERIPHS_SPICC>;
+	num-cs = <1>;
+};
+
 &spifc {
 	clocks = <&clkc CLKID_SPI>;
 };
@@ -620,20 +685,3 @@
 &vpu {
 	compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
 };
-
-&hwrng {
-	clocks = <&clkc CLKID_RNG0>;
-	clock-names = "core";
-};
-
-&hdmi_tx {
-	compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
-	resets = <&reset RESET_HDMITX_CAPB3>,
-		 <&reset RESET_HDMI_SYSTEM_RESET>,
-		 <&reset RESET_HDMI_TX>;
-	reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
-	clocks = <&clkc CLKID_HDMI_PCLK>,
-		 <&clkc CLKID_CLK81>,
-		 <&clkc CLKID_GCLK_VENCI_INT0>;
-	clock-names = "isfr", "iahb", "venci";
-};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
index f9fbfda..3e0c023d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
@@ -84,6 +84,17 @@
 		regulator-min-microvolt = <1800000>;
 		regulator-max-microvolt = <1800000>;
 	};
+
+	hdmi-connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&hdmi_tx_tmds_out>;
+			};
+		};
+	};
 };
 
 /* P230 has exclusive choice between internal or external PHY */
@@ -113,6 +124,19 @@
 	};
 };
 
+
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
 &saradc {
 	status = "okay";
 	vref-supply = <&vddio_ao18>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
index 3c8b0b5..72c5a9f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
@@ -54,6 +54,29 @@
 			linux,default-trigger = "default-on";
 		};
 	};
+
+	hdmi-connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&hdmi_tx_tmds_out>;
+			};
+		};
+	};
+};
+
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
 };
 
 &i2c_A {
@@ -95,7 +118,7 @@
 };
 
 &sd_emmc_a {
-	brcmf: bcrmf@1 {
+	brcmf: wifi@1 {
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 	};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
new file mode 100644
index 0000000..890821d
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2017 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+
+#include "meson-gxl-s905x-p212.dtsi"
+
+/ {
+	compatible = "libretech,cc", "amlogic,s905x", "amlogic,meson-gxl";
+	model = "Libre Technology CC";
+
+	cvbs-connector {
+		compatible = "composite-video-connector";
+
+		port {
+			cvbs_connector_in: endpoint {
+				remote-endpoint = <&cvbs_vdac_out>;
+			};
+		};
+	};
+
+	hdmi-connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&hdmi_tx_tmds_out>;
+			};
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		system {
+			label = "librecomputer:system-status";
+			gpios = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+			panic-indicator;
+		};
+
+		blue {
+			label = "librecomputer:blue";
+			gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+		};
+	};
+};
+
+&cvbs_vdac_port {
+	cvbs_vdac_out: endpoint {
+		remote-endpoint = <&cvbs_connector_in>;
+	};
+};
+
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
+/*
+ * The following devices exists but are exposed on the general
+ * purpose GPIO header. End user may well decide to use those pins
+ * for another purpose
+ */
+
+&sd_emmc_a {
+	status = "disabled";
+};
+
+&uart_A {
+	status = "disabled";
+};
+
+&wifi32k {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 8873c05..6633a5d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -140,10 +140,10 @@
 	};
 };
 
-&uart_AO {
-	status = "okay";
-	pinctrl-0 = <&uart_ao_a_pins>;
-	pinctrl-names = "default";
+&cvbs_vdac_port {
+	cvbs_vdac_out: endpoint {
+		remote-endpoint = <&cvbs_connector_in>;
+	};
 };
 
 &ethmac {
@@ -152,12 +152,32 @@
 	phy-handle = <&internal_phy>;
 };
 
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
 &ir {
 	status = "okay";
 	pinctrl-0 = <&remote_input_ao_pins>;
 	pinctrl-names = "default";
 };
 
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
+
 /* Wireless SDIO Module */
 &sd_emmc_a {
 	status = "okay";
@@ -217,28 +237,8 @@
 	vqmmc-supply = <&vddio_boot>;
 };
 
-&pwm_ef {
+&uart_AO {
 	status = "okay";
-	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-0 = <&uart_ao_a_pins>;
 	pinctrl-names = "default";
-	clocks = <&clkc CLKID_FCLK_DIV4>;
-	clock-names = "clkin0";
-};
-
-&cvbs_vdac_port {
-	cvbs_vdac_out: endpoint {
-		remote-endpoint = <&cvbs_connector_in>;
-	};
-};
-
-&hdmi_tx {
-	status = "okay";
-	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
-	pinctrl-names = "default";
-};
-
-&hdmi_tx_tmds_port {
-	hdmi_tx_tmds_out: endpoint {
-		remote-endpoint = <&hdmi_connector_in>;
-	};
 };
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
index db31e09..6ab17c1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
@@ -58,6 +58,17 @@
 			};
 		};
 	};
+
+	hdmi-connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&hdmi_tx_tmds_out>;
+			};
+		};
+	};
 };
 
 &cvbs_vdac_port {
@@ -66,6 +77,18 @@
 	};
 };
 
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
 /* This UART is brought out to the DB9 connector */
 &uart_AO {
 	status = "okay";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi
index 0f78d83..3314a0b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi
@@ -48,7 +48,7 @@
 	compatible = "amlogic,s905x", "amlogic,meson-gxl";
 };
 
-/* S905X Only has access to its internal PHY */
+/* S905X only has access to its internal PHY */
 &ethmac {
 	phy-mode = "rmii";
 	phy-handle = <&internal_phy>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index d8e096d..8d4f316 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -190,9 +190,59 @@
 				function = "spdif_out_ao";
 			};
 		};
+
+		ao_cec_pins: ao_cec {
+			mux {
+				groups = "ao_cec";
+				function = "cec_ao";
+			};
+		};
+
+		ee_cec_pins: ee_cec {
+			mux {
+				groups = "ee_cec";
+				function = "cec_ao";
+			};
+		};
 	};
 };
 
+&hdmi_tx {
+	compatible = "amlogic,meson-gxl-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
+	resets = <&reset RESET_HDMITX_CAPB3>,
+		 <&reset RESET_HDMI_SYSTEM_RESET>,
+		 <&reset RESET_HDMI_TX>;
+	reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+	clocks = <&clkc CLKID_HDMI_PCLK>,
+		 <&clkc CLKID_CLK81>,
+		 <&clkc CLKID_GCLK_VENCI_INT0>;
+	clock-names = "isfr", "iahb", "venci";
+};
+
+&hiubus {
+	clkc: clock-controller@0 {
+		compatible = "amlogic,gxl-clkc", "amlogic,gxbb-clkc";
+		#clock-cells = <1>;
+		reg = <0x0 0x0 0x0 0x3db>;
+	};
+};
+
+&i2c_A {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&i2c_AO {
+	clocks = <&clkc CLKID_AO_I2C>;
+};
+
+&i2c_B {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&i2c_C {
+	clocks = <&clkc CLKID_I2C>;
+};
+
 &periphs {
 	pinctrl_periphs: pinctrl@4b0 {
 		compatible = "amlogic,meson-gxl-periphs-pinctrl";
@@ -203,12 +253,12 @@
 		gpio: bank@4b0 {
 			reg = <0x0 0x004b0 0x0 0x28>,
 			      <0x0 0x004e8 0x0 0x14>,
-			      <0x0 0x00120 0x0 0x14>,
+			      <0x0 0x00520 0x0 0x14>,
 			      <0x0 0x00430 0x0 0x40>;
 			reg-names = "mux", "pull", "pull-enable", "gpio";
 			gpio-controller;
 			#gpio-cells = <2>;
-			gpio-ranges = <&pinctrl_periphs 0 14 101>;
+			gpio-ranges = <&pinctrl_periphs 0 10 101>;
 		};
 
 		emmc_pins: emmc {
@@ -231,6 +281,22 @@
 			};
 		};
 
+		spi_pins: spi {
+			mux {
+				groups = "spi_miso",
+					"spi_mosi",
+					"spi_sclk";
+				function = "spi";
+			};
+		};
+
+		spi_ss0_pins: spi-ss0 {
+			mux {
+				groups = "spi_ss0";
+				function = "spi";
+			};
+		};
+
 		sdcard_pins: sdcard {
 			mux {
 				groups = "sdcard_d0",
@@ -354,6 +420,20 @@
 			};
 		};
 
+		eth_link_led_pins: eth_link_led {
+			mux {
+				groups = "eth_link_led";
+				function = "eth_led";
+			};
+		};
+
+		eth_act_led_pins: eth_act_led {
+			mux {
+				groups = "eth_act_led";
+				function = "eth_led";
+			};
+		};
+		
 		pwm_a_pins: pwm_a {
 			mux {
 				groups = "pwm_a";
@@ -501,30 +581,6 @@
 	};
 };
 
-&hiubus {
-	clkc: clock-controller@0 {
-		compatible = "amlogic,gxl-clkc", "amlogic,gxbb-clkc";
-		#clock-cells = <1>;
-		reg = <0x0 0x0 0x0 0x3db>;
-	};
-};
-
-&i2c_A {
-	clocks = <&clkc CLKID_I2C>;
-};
-
-&i2c_AO {
-	clocks = <&clkc CLKID_AO_I2C>;
-};
-
-&i2c_B {
-	clocks = <&clkc CLKID_I2C>;
-};
-
-&i2c_C {
-	clocks = <&clkc CLKID_I2C>;
-};
-
 &saradc {
 	compatible = "amlogic,meson-gxl-saradc", "amlogic,meson-saradc";
 	clocks = <&xtal>,
@@ -556,6 +612,13 @@
 	clock-names = "core", "clkin0", "clkin1";
 };
 
+&spicc {
+	clocks = <&clkc CLKID_SPICC>;
+	clock-names = "core";
+	resets = <&reset RESET_PERIPHS_SPICC>;
+	num-cs = <1>;
+};
+
 &spifc {
 	clocks = <&clkc CLKID_SPI>;
 };
@@ -563,15 +626,3 @@
 &vpu {
 	compatible = "amlogic,meson-gxl-vpu", "amlogic,meson-gx-vpu";
 };
-
-&hdmi_tx {
-	compatible = "amlogic,meson-gxl-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
-	resets = <&reset RESET_HDMITX_CAPB3>,
-		 <&reset RESET_HDMI_SYSTEM_RESET>,
-		 <&reset RESET_HDMI_TX>;
-	reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
-	clocks = <&clkc CLKID_HDMI_PCLK>,
-		 <&clkc CLKID_CLK81>,
-		 <&clkc CLKID_GCLK_VENCI_INT0>;
-	clock-names = "isfr", "iahb", "venci";
-};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index 11b0bf4..5f626d6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -113,11 +113,49 @@
 	};
 };
 
-/* This UART is brought out to the DB9 connector */
-&uart_AO {
+&cvbs_vdac_port {
+	cvbs_vdac_out: endpoint {
+		remote-endpoint = <&cvbs_connector_in>;
+	};
+};
+
+&ethmac {
 	status = "okay";
-	pinctrl-0 = <&uart_ao_a_pins>;
+
+	pinctrl-0 = <&eth_pins>;
 	pinctrl-names = "default";
+
+	/* Select external PHY by default */
+	phy-handle = <&external_phy>;
+
+	amlogic,tx-delay-ns = <2>;
+
+	snps,reset-gpio = <&gpio GPIOZ_14 0>;
+	snps,reset-delays-us = <0 10000 1000000>;
+	snps,reset-active-low;
+
+	/* External PHY is in RGMII */
+	phy-mode = "rgmii";
+};
+
+&external_mdio {
+	external_phy: ethernet-phy@0 {
+		compatible = "ethernet-phy-id001c.c916", "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+		max-speed = <1000>;
+	};
+};
+
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
 };
 
 &ir {
@@ -164,47 +202,8 @@
 	vqmmc-supply = <&vddio_boot>;
 };
 
-&ethmac {
+&uart_AO {
 	status = "okay";
-
-	pinctrl-0 = <&eth_pins>;
+	pinctrl-0 = <&uart_ao_a_pins>;
 	pinctrl-names = "default";
-
-	/* Select external PHY by default */
-	phy-handle = <&external_phy>;
-
-	amlogic,tx-delay-ns = <2>;
-
-	snps,reset-gpio = <&gpio GPIOZ_14 0>;
-	snps,reset-delays-us = <0 10000 1000000>;
-	snps,reset-active-low;
-
-	/* External PHY is in RGMII */
-	phy-mode = "rgmii";
-};
-
-&external_mdio {
-	external_phy: ethernet-phy@0 {
-		compatible = "ethernet-phy-id001c.c916", "ethernet-phy-ieee802.3-c22";
-		reg = <0>;
-		max-speed = <1000>;
-	};
-};
-
-&cvbs_vdac_port {
-	cvbs_vdac_out: endpoint {
-		remote-endpoint = <&cvbs_connector_in>;
-	};
-};
-
-&hdmi_tx {
-	status = "okay";
-	pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
-	pinctrl-names = "default";
-};
-
-&hdmi_tx_tmds_port {
-	hdmi_tx_tmds_out: endpoint {
-		remote-endpoint = <&hdmi_connector_in>;
-	};
 };
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
new file mode 100644
index 0000000..08f1dd6
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2016-2017 Andreas Färber
+ *
+ * Based on nexbox-a1:
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxm.dtsi"
+
+/ {
+	compatible = "kingnovel,r-box-pro", "amlogic,s912", "amlogic,meson-gxm";
+	model = "R-Box Pro";
+
+	aliases {
+		serial0 = &uart_AO;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>; /* 2 GiB or 3 GiB */
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		blue {
+			label = "rbox-pro:blue:on";
+			gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+		};
+
+		red {
+			label = "rbox-pro:red:standby";
+			gpios = <&gpio GPIODV_28 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			retain-state-suspended;
+			panic-indicator;
+		};
+	};
+
+	vddio_boot: regulator-vddio-boot {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_BOOT";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	vddao_3v3: regulator-vddao-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc_3v3: regulator-vcc-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
+
+	wifi32k: wifi32k {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+		clocks = <&wifi32k>;
+		clock-names = "ext_clock";
+	};
+};
+
+&ethmac {
+	status = "okay";
+
+	pinctrl-0 = <&eth_pins>;
+	pinctrl-names = "default";
+
+	/* Select external PHY by default */
+	phy-handle = <&external_phy>;
+
+	snps,reset-gpio = <&gpio GPIOZ_14 0>;
+	snps,reset-delays-us = <0 10000 1000000>;
+	snps,reset-active-low;
+
+	amlogic,tx-delay-ns = <2>;
+
+	/* External PHY is in RGMII */
+	phy-mode = "rgmii";
+};
+
+&external_mdio {
+	external_phy: ethernet-phy@0 {
+		compatible = "ethernet-phy-id001c.c916", "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+		max-speed = <1000>;
+	};
+};
+
+&ir {
+	status = "okay";
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+};
+
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
+
+/* Wireless SDIO Module */
+&sd_emmc_a {
+	status = "okay";
+	pinctrl-0 = <&sdio_pins>;
+	pinctrl-names = "default";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+
+	non-removable;
+	disable-wp;
+
+	mmc-pwrseq = <&sdio_pwrseq>;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+
+	brcmf: brcmf@1 {
+		reg = <1>;
+		compatible = "brcm,bcm4329-fmac";
+	};
+};
+
+/* SD card */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <8>;
+	cap-sd-highspeed;
+	cap-mmc-highspeed;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vcc_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+&uart_AO {
+	status = "okay";
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+};
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index bfe7d68..e8b7413 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -53,7 +53,6 @@
 		#global-interrupts = <1>;
 		dma-coherent;
 		power-domains = <&scpi_devpd 0>;
-		status = "disabled";
 	};
 
 	gic: interrupt-controller@2c010000 {
@@ -202,6 +201,15 @@
 		};
 	};
 
+	cpu_debug0: cpu_debug@22010000 {
+		compatible = "arm,coresight-cpu-debug", "arm,primecell";
+		reg = <0x0 0x22010000 0x0 0x1000>;
+
+		clocks = <&soc_smc50mhz>;
+		clock-names = "apb_pclk";
+		power-domains = <&scpi_devpd 0>;
+	};
+
 	etm0: etm@22040000 {
 		compatible = "arm,coresight-etm4x", "arm,primecell";
 		reg = <0 0x22040000 0 0x1000>;
@@ -252,6 +260,15 @@
 		};
 	};
 
+	cpu_debug1: cpu_debug@22110000 {
+		compatible = "arm,coresight-cpu-debug", "arm,primecell";
+		reg = <0x0 0x22110000 0x0 0x1000>;
+
+		clocks = <&soc_smc50mhz>;
+		clock-names = "apb_pclk";
+		power-domains = <&scpi_devpd 0>;
+	};
+
 	etm1: etm@22140000 {
 		compatible = "arm,coresight-etm4x", "arm,primecell";
 		reg = <0 0x22140000 0 0x1000>;
@@ -266,6 +283,15 @@
 		};
 	};
 
+	cpu_debug2: cpu_debug@23010000 {
+		compatible = "arm,coresight-cpu-debug", "arm,primecell";
+		reg = <0x0 0x23010000 0x0 0x1000>;
+
+		clocks = <&soc_smc50mhz>;
+		clock-names = "apb_pclk";
+		power-domains = <&scpi_devpd 0>;
+	};
+
 	etm2: etm@23040000 {
 		compatible = "arm,coresight-etm4x", "arm,primecell";
 		reg = <0 0x23040000 0 0x1000>;
@@ -330,6 +356,15 @@
 		};
 	};
 
+	cpu_debug3: cpu_debug@23110000 {
+		compatible = "arm,coresight-cpu-debug", "arm,primecell";
+		reg = <0x0 0x23110000 0x0 0x1000>;
+
+		clocks = <&soc_smc50mhz>;
+		clock-names = "apb_pclk";
+		power-domains = <&scpi_devpd 0>;
+	};
+
 	etm3: etm@23140000 {
 		compatible = "arm,coresight-etm4x", "arm,primecell";
 		reg = <0 0x23140000 0 0x1000>;
@@ -344,6 +379,15 @@
 		};
 	};
 
+	cpu_debug4: cpu_debug@23210000 {
+		compatible = "arm,coresight-cpu-debug", "arm,primecell";
+		reg = <0x0 0x23210000 0x0 0x1000>;
+
+		clocks = <&soc_smc50mhz>;
+		clock-names = "apb_pclk";
+		power-domains = <&scpi_devpd 0>;
+	};
+
 	etm4: etm@23240000 {
 		compatible = "arm,coresight-etm4x", "arm,primecell";
 		reg = <0 0x23240000 0 0x1000>;
@@ -358,6 +402,15 @@
 		};
 	};
 
+	cpu_debug5: cpu_debug@23310000 {
+		compatible = "arm,coresight-cpu-debug", "arm,primecell";
+		reg = <0x0 0x23310000 0x0 0x1000>;
+
+		clocks = <&soc_smc50mhz>;
+		clock-names = "apb_pclk";
+		power-domains = <&scpi_devpd 0>;
+	};
+
 	etm5: etm@23340000 {
 		compatible = "arm,coresight-etm4x", "arm,primecell";
 		reg = <0 0x23340000 0 0x1000>;
@@ -546,7 +599,6 @@
 			     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
 		#iommu-cells = <1>;
 		#global-interrupts = <1>;
-		status = "disabled";
 	};
 
 	smmu_hdlcd0: iommu@7fb20000 {
@@ -556,7 +608,6 @@
 			     <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
 		#iommu-cells = <1>;
 		#global-interrupts = <1>;
-		status = "disabled";
 	};
 
 	smmu_usb: iommu@7fb30000 {
@@ -567,7 +618,6 @@
 		#iommu-cells = <1>;
 		#global-interrupts = <1>;
 		dma-coherent;
-		status = "disabled";
 	};
 
 	dma@7ff00000 {
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index 0e8943a..aed6389 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -281,3 +281,27 @@
 &stm_out_port {
 	remote-endpoint = <&csys1_funnel_in_port0>;
 };
+
+&cpu_debug0 {
+	cpu = <&A57_0>;
+};
+
+&cpu_debug1 {
+	cpu = <&A57_1>;
+};
+
+&cpu_debug2 {
+	cpu = <&A53_0>;
+};
+
+&cpu_debug3 {
+	cpu = <&A53_1>;
+};
+
+&cpu_debug4 {
+	cpu = <&A53_2>;
+};
+
+&cpu_debug5 {
+	cpu = <&A53_3>;
+};
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index 405e2fb..b39b6d6 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -281,3 +281,27 @@
 &stm_out_port {
 	remote-endpoint = <&csys1_funnel_in_port0>;
 };
+
+&cpu_debug0 {
+	cpu = <&A72_0>;
+};
+
+&cpu_debug1 {
+	cpu = <&A72_1>;
+};
+
+&cpu_debug2 {
+	cpu = <&A53_0>;
+};
+
+&cpu_debug3 {
+	cpu = <&A53_1>;
+};
+
+&cpu_debug4 {
+	cpu = <&A53_2>;
+};
+
+&cpu_debug5 {
+	cpu = <&A53_3>;
+};
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index 0220494..c9236c4 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -268,3 +268,27 @@
 		};
 	};
 };
+
+&cpu_debug0 {
+	cpu = <&A57_0>;
+};
+
+&cpu_debug1 {
+	cpu = <&A57_1>;
+};
+
+&cpu_debug2 {
+	cpu = <&A53_0>;
+};
+
+&cpu_debug3 {
+	cpu = <&A53_1>;
+};
+
+&cpu_debug4 {
+	cpu = <&A53_2>;
+};
+
+&cpu_debug5 {
+	cpu = <&A53_3>;
+};
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index bfa8f8e..f11bdd6 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -1,6 +1,7 @@
 dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-b.dtb
 dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-svk.dtb ns2-xmc.dtb
 
+dts-dirs	:= stingray
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
 clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
index c309633..972f14d 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
@@ -22,3 +22,20 @@
 &uart1 {
 	status = "okay";
 };
+
+/* SDHCI is used to control the SDIO for wireless */
+&sdhci {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emmc_gpio34>;
+	status = "okay";
+	bus-width = <4>;
+	non-removable;
+};
+
+/* SDHOST is used to drive the SD card */
+&sdhost {
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdhost_gpio48>;
+	status = "okay";
+	bus-width = <4>;
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
index 19f2fe6..2d5de6f0 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
@@ -75,6 +75,10 @@
 	interrupts = <8>;
 };
 
+&cpu_thermal {
+	coefficients = <(-538)	412000>;
+};
+
 /* enable thermal sensor with the correct compatible property set */
 &thermal {
 	compatible = "brcm,bcm2837-thermal";
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 35a309a..35c8457 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -460,6 +460,20 @@
 			};
 		};
 
+		usbdrd_phy: phy@66000960 {
+			#phy-cells = <0>;
+			compatible = "brcm,ns2-drd-phy";
+			reg = <0x66000960 0x24>,
+			      <0x67012800 0x4>,
+			      <0x6501d148 0x4>,
+			      <0x664d0700 0x4>;
+			reg-names = "icfg", "rst-ctrl",
+				    "crmu-ctrl", "usb2-strap";
+			id-gpios = <&gpio_g 30 0>;
+			vbus-gpios = <&gpio_g 31 0>;
+			status = "disabled";
+		};
+
 		pwm: pwm@66010000 {
 			compatible = "brcm,iproc-pwm";
 			reg = <0x66010000 0x28>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/Makefile b/arch/arm64/boot/dts/broadcom/stingray/Makefile
new file mode 100644
index 0000000..f70028e
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/stingray/Makefile
@@ -0,0 +1,6 @@
+dtb-$(CONFIG_ARCH_BCM_IPROC) += bcm958742k.dtb
+dtb-$(CONFIG_ARCH_BCM_IPROC) += bcm958742t.dtb
+
+always		:= $(dtb-y)
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi b/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi
new file mode 100644
index 0000000..5dca7d1
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742-base.dtsi
@@ -0,0 +1,131 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2016-2017 Broadcom.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "stingray.dtsi"
+
+/ {
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	aliases {
+		serial0 = &uart1;
+		serial1 = &uart0;
+		serial2 = &uart2;
+		serial3 = &uart3;
+	};
+
+	sdio0_vddo_ctrl_reg: sdio0_vddo_ctrl {
+		compatible = "regulator-gpio";
+		regulator-name = "sdio0_vddo_ctrl_reg";
+		regulator-type = "voltage";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+		gpios = <&pca9505 18 0>;
+		states = <3300000 0x0
+			  1800000 0x1>;
+	};
+
+	sdio1_vddo_ctrl_reg: sdio1_vddo_ctrl {
+		compatible = "regulator-gpio";
+		regulator-name = "sdio1_vddo_ctrl_reg";
+		regulator-type = "voltage";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+		gpios = <&pca9505 19 0>;
+		states = <3300000 0x0
+			  1800000 0x1>;
+	};
+};
+
+&memory { /* Default DRAM banks */
+	reg = <0x00000000 0x80000000 0x0 0x80000000>, /* 2G @ 2G */
+	      <0x00000008 0x80000000 0x1 0x80000000>; /* 6G @ 34G */
+};
+
+&uart1 {
+	status = "okay";
+};
+
+&pwm {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	pca9505: pca9505@20 {
+		compatible = "nxp,pca9505";
+		gpio-controller;
+		#gpio-cells = <2>;
+		reg = <0x20>;
+	};
+};
+
+&i2c1 {
+	status = "okay";
+
+	pcf8574: pcf8574@20 {
+		compatible = "nxp,pcf8574a";
+		gpio-controller;
+		#gpio-cells = <2>;
+		reg = <0x27>;
+	};
+};
+
+&nand {
+	status = "ok";
+	nandcs@0 {
+		compatible = "brcm,nandcs";
+		reg = <0>;
+		nand-ecc-mode = "hw";
+		nand-ecc-strength = <8>;
+		nand-ecc-step-size = <512>;
+		nand-bus-width = <16>;
+		brcm,nand-oob-sector-size = <16>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
+};
+
+&sdio0 {
+	vqmmc-supply = <&sdio0_vddo_ctrl_reg>;
+	non-removable;
+	full-pwr-cycle;
+	status = "okay";
+};
+
+&sdio1 {
+	vqmmc-supply = <&sdio1_vddo_ctrl_reg>;
+	full-pwr-cycle;
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
new file mode 100644
index 0000000..5671669
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
@@ -0,0 +1,78 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2016-2017 Broadcom.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm958742-base.dtsi"
+
+/ {
+	compatible = "brcm,bcm958742k", "brcm,stingray";
+	model = "Stingray Combo SVK (BCM958742K)";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&uart3 {
+	status = "okay";
+};
+
+&ssp0 {
+	pinctrl-0 = <&spi0_pins>;
+	pinctrl-names = "default";
+	cs-gpios = <&gpio_hsls 34 0>;
+	status = "okay";
+
+	spi-flash@0 {
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <20000000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
+};
+
+&ssp1 {
+	pinctrl-0 = <&spi1_pins>;
+	pinctrl-names = "default";
+	cs-gpios = <&gpio_hsls 96 0>;
+	status = "okay";
+
+	spi-flash@0 {
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <20000000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+	};
+};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
new file mode 100644
index 0000000..6ebe399
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
@@ -0,0 +1,40 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2017 Broadcom.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm958742-base.dtsi"
+
+/ {
+	compatible = "brcm,bcm958742t", "brcm,stingray";
+	model = "Stingray SST100 (BCM958742T)";
+};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-clock.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-clock.dtsi
new file mode 100644
index 0000000..cbc4337
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-clock.dtsi
@@ -0,0 +1,170 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2016-2017 Broadcom.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dt-bindings/clock/bcm-sr.h>
+
+		osc: oscillator {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <50000000>;
+		};
+
+		crmu_ref25m: crmu_ref25m {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clocks = <&osc>;
+			clock-div = <2>;
+			clock-mult = <1>;
+		};
+
+		genpll0: genpll0@0001d104 {
+			#clock-cells = <1>;
+			compatible = "brcm,sr-genpll0";
+			reg = <0x0001d104 0x32>,
+			      <0x0001c854 0x4>;
+			clocks = <&osc>;
+			clock-output-names = "genpll0", "clk_125", "clk_scr",
+					     "clk_250", "clk_pcie_axi",
+					     "clk_paxc_axi_x2",
+					     "clk_paxc_axi";
+		};
+
+		genpll3: genpll3@0001d1e0 {
+			#clock-cells = <1>;
+			compatible = "brcm,sr-genpll3";
+			reg = <0x0001d1e0 0x32>,
+			      <0x0001c854 0x4>;
+			clocks = <&osc>;
+			clock-output-names = "genpll3", "clk_hsls",
+					     "clk_sdio";
+		};
+
+		genpll4: genpll4@0001d214 {
+			#clock-cells = <1>;
+			compatible = "brcm,sr-genpll4";
+			reg = <0x0001d214 0x32>,
+			      <0x0001c854 0x4>;
+			clocks = <&osc>;
+			clock-output-names = "genpll4", "clk_ccn",
+					     "clk_tpiu_pll", "noc_clk",
+					     "pll_chclk_fs4",
+					     "clk_bridge_fscpu";
+		};
+
+		genpll5: genpll5@0001d248 {
+			#clock-cells = <1>;
+			compatible = "brcm,sr-genpll5";
+			reg = <0x0001d248 0x32>,
+			      <0x0001c870 0x4>;
+			clocks = <&osc>;
+			clock-output-names = "genpll5", "fs4_hf_clk",
+					     "crypto_ae_clk", "raid_ae_clk";
+		};
+
+		lcpll0: lcpll0@0001d0c4 {
+			#clock-cells = <1>;
+			compatible = "brcm,sr-lcpll0";
+			reg = <0x0001d0c4 0x3c>,
+			      <0x0001c870 0x4>;
+			clocks = <&osc>;
+			clock-output-names = "lcpll0", "clk_sata_refp",
+					     "clk_sata_refn", "clk_sata_350",
+					     "clk_sata_500";
+		};
+
+		lcpll1: lcpll1@0001d138 {
+			#clock-cells = <1>;
+			compatible = "brcm,sr-lcpll1";
+			reg = <0x0001d138 0x3c>,
+			      <0x0001c870 0x4>;
+			clocks = <&osc>;
+			clock-output-names = "lcpll1", "clk_wanpn",
+					     "clk_usb_ref",
+					     "timesync_evt_clk";
+		};
+
+		hsls_clk: hsls_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clocks = <&genpll3 1>;
+			clock-div = <1>;
+			clock-mult = <1>;
+		};
+
+		hsls_div2_clk: hsls_div2_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clocks = <&genpll3 BCM_SR_GENPLL3_HSLS_CLK>;
+			clock-div = <2>;
+			clock-mult = <1>;
+
+		};
+
+		hsls_div4_clk: hsls_div4_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clocks = <&genpll3 BCM_SR_GENPLL3_HSLS_CLK>;
+			clock-div = <4>;
+			clock-mult = <1>;
+		};
+
+		hsls_25m_clk: hsls_25m_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clocks = <&crmu_ref25m>;
+			clock-div = <1>;
+			clock-mult = <1>;
+		};
+
+		hsls_25m_div2_clk: hsls_25m_div2_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clocks = <&hsls_25m_clk>;
+			clock-div = <2>;
+			clock-mult = <1>;
+		};
+
+		sdio0_clk: sdio0_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clocks = <&genpll3 BCM_SR_GENPLL3_SDIO_CLK>;
+			clock-div = <1>;
+			clock-mult = <1>;
+		};
+
+		sdio1_clk: sdio1_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clocks = <&genpll3 BCM_SR_GENPLL3_SDIO_CLK>;
+			clock-div = <1>;
+			clock-mult = <1>;
+		};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi
new file mode 100644
index 0000000..15214d0
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi
@@ -0,0 +1,345 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2016-2017 Broadcom.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dt-bindings/pinctrl/brcm,pinctrl-stingray.h>
+
+		pinconf: pinconf@00140000 {
+			compatible = "pinconf-single";
+			reg = <0x00140000 0x250>;
+			pinctrl-single,register-width = <32>;
+
+			/* pinconf functions */
+		};
+
+		pinmux: pinmux@0014029c {
+			compatible = "pinctrl-single";
+			reg = <0x0014029c 0x250>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			pinctrl-single,register-width = <32>;
+			pinctrl-single,function-mask = <0xf>;
+			pinctrl-single,gpio-range = <
+				&range 0 154 MODE_GPIO
+				>;
+			range: gpio-range {
+				#pinctrl-single,gpio-range-cells = <3>;
+			};
+
+			/* pinctrl functions */
+			tsio_pins: pinmux_gpio_14 {
+				pinctrl-single,pins = <
+					0x038 MODE_NITRO /* tsio_0 */
+					0x03c MODE_NITRO /* tsio_1 */
+				>;
+			};
+
+			nor_pins: pinmux_pnor_adv_n {
+				pinctrl-single,pins = <
+					0x0ac MODE_PNOR /* nand_ce1_n */
+					0x0b0 MODE_PNOR /* nand_ce0_n */
+					0x0b4 MODE_PNOR /* nand_we_n */
+					0x0b8 MODE_PNOR /* nand_wp_n */
+					0x0bc MODE_PNOR /* nand_re_n */
+					0x0c0 MODE_PNOR /* nand_rdy_bsy_n */
+					0x0c4 MODE_PNOR /* nand_io0_0 */
+					0x0c8 MODE_PNOR /* nand_io1_0 */
+					0x0cc MODE_PNOR /* nand_io2_0 */
+					0x0d0 MODE_PNOR /* nand_io3_0 */
+					0x0d4 MODE_PNOR /* nand_io4_0 */
+					0x0d8 MODE_PNOR /* nand_io5_0 */
+					0x0dc MODE_PNOR /* nand_io6_0 */
+					0x0e0 MODE_PNOR /* nand_io7_0 */
+					0x0e4 MODE_PNOR /* nand_io8_0 */
+					0x0e8 MODE_PNOR /* nand_io9_0 */
+					0x0ec MODE_PNOR /* nand_io10_0 */
+					0x0f0 MODE_PNOR /* nand_io11_0 */
+					0x0f4 MODE_PNOR /* nand_io12_0 */
+					0x0f8 MODE_PNOR /* nand_io13_0 */
+					0x0fc MODE_PNOR /* nand_io14_0 */
+					0x100 MODE_PNOR /* nand_io15_0 */
+					0x104 MODE_PNOR /* nand_ale_0 */
+					0x108 MODE_PNOR /* nand_cle_0 */
+					0x040 MODE_PNOR /* pnor_adv_n */
+					0x044 MODE_PNOR /* pnor_baa_n */
+					0x048 MODE_PNOR /* pnor_bls_0_n */
+					0x04c MODE_PNOR /* pnor_bls_1_n */
+					0x050 MODE_PNOR /* pnor_cre */
+					0x054 MODE_PNOR /* pnor_cs_2_n */
+					0x058 MODE_PNOR /* pnor_cs_1_n */
+					0x05c MODE_PNOR /* pnor_cs_0_n */
+					0x060 MODE_PNOR /* pnor_we_n */
+					0x064 MODE_PNOR /* pnor_oe_n */
+					0x068 MODE_PNOR /* pnor_intr */
+					0x06c MODE_PNOR /* pnor_dat_0 */
+					0x070 MODE_PNOR /* pnor_dat_1 */
+					0x074 MODE_PNOR /* pnor_dat_2 */
+					0x078 MODE_PNOR /* pnor_dat_3 */
+					0x07c MODE_PNOR /* pnor_dat_4 */
+					0x080 MODE_PNOR /* pnor_dat_5 */
+					0x084 MODE_PNOR /* pnor_dat_6 */
+					0x088 MODE_PNOR /* pnor_dat_7 */
+					0x08c MODE_PNOR /* pnor_dat_8 */
+					0x090 MODE_PNOR /* pnor_dat_9 */
+					0x094 MODE_PNOR /* pnor_dat_10 */
+					0x098 MODE_PNOR /* pnor_dat_11 */
+					0x09c MODE_PNOR /* pnor_dat_12 */
+					0x0a0 MODE_PNOR /* pnor_dat_13 */
+					0x0a4 MODE_PNOR /* pnor_dat_14 */
+					0x0a8 MODE_PNOR /* pnor_dat_15 */
+				>;
+			};
+
+			nand_pins: pinmux_nand_ce1_n {
+				pinctrl-single,pins = <
+					0x0ac MODE_NAND /* nand_ce1_n */
+					0x0b0 MODE_NAND /* nand_ce0_n */
+					0x0b4 MODE_NAND /* nand_we_n */
+					0x0b8 MODE_NAND /* nand_wp_n */
+					0x0bc MODE_NAND /* nand_re_n */
+					0x0c0 MODE_NAND /* nand_rdy_bsy_n */
+					0x0c4 MODE_NAND /* nand_io0_0 */
+					0x0c8 MODE_NAND /* nand_io1_0 */
+					0x0cc MODE_NAND /* nand_io2_0 */
+					0x0d0 MODE_NAND /* nand_io3_0 */
+					0x0d4 MODE_NAND /* nand_io4_0 */
+					0x0d8 MODE_NAND /* nand_io5_0 */
+					0x0dc MODE_NAND /* nand_io6_0 */
+					0x0e0 MODE_NAND /* nand_io7_0 */
+					0x0e4 MODE_NAND /* nand_io8_0 */
+					0x0e8 MODE_NAND /* nand_io9_0 */
+					0x0ec MODE_NAND /* nand_io10_0 */
+					0x0f0 MODE_NAND /* nand_io11_0 */
+					0x0f4 MODE_NAND /* nand_io12_0 */
+					0x0f8 MODE_NAND /* nand_io13_0 */
+					0x0fc MODE_NAND /* nand_io14_0 */
+					0x100 MODE_NAND /* nand_io15_0 */
+					0x104 MODE_NAND /* nand_ale_0 */
+					0x108 MODE_NAND /* nand_cle_0 */
+				>;
+			};
+
+			pwm0_pins: pinmux_pwm_0 {
+				pinctrl-single,pins = <
+					0x10c MODE_NITRO
+				>;
+			};
+
+			pwm1_pins: pinmux_pwm_1 {
+				pinctrl-single,pins = <
+					0x110 MODE_NITRO
+				>;
+			};
+
+			pwm2_pins: pinmux_pwm_2 {
+				pinctrl-single,pins = <
+					0x114 MODE_NITRO
+				>;
+			};
+
+			pwm3_pins: pinmux_pwm_3 {
+				pinctrl-single,pins = <
+					0x118 MODE_NITRO
+				>;
+			};
+
+			dbu_rxd_pins: pinmux_uart1_sin_nitro {
+				pinctrl-single,pins = <
+					0x11c MODE_NITRO /* dbu_rxd */
+					0x120 MODE_NITRO /* dbu_txd */
+				>;
+			};
+
+			uart1_pins: pinmux_uart1_sin_nand {
+				pinctrl-single,pins = <
+					0x11c MODE_NAND /* uart1_sin */
+					0x120 MODE_NAND /* uart1_out */
+				>;
+			};
+
+			uart2_pins: pinmux_uart2_sin {
+				pinctrl-single,pins = <
+					0x124 MODE_NITRO /* uart2_sin */
+					0x128 MODE_NITRO /* uart2_out */
+				>;
+			};
+
+			uart3_pins: pinmux_uart3_sin {
+				pinctrl-single,pins = <
+					0x12c MODE_NITRO /* uart3_sin */
+					0x130 MODE_NITRO /* uart3_out */
+				>;
+			};
+
+			i2s_pins: pinmux_i2s_bitclk {
+				pinctrl-single,pins = <
+					0x134 MODE_NITRO /* i2s_bitclk */
+					0x138 MODE_NITRO /* i2s_sdout */
+					0x13c MODE_NITRO /* i2s_sdin */
+					0x140 MODE_NITRO /* i2s_ws */
+					0x144 MODE_NITRO /* i2s_mclk */
+					0x148 MODE_NITRO /* i2s_spdif_out */
+				>;
+			};
+
+			qspi_pins: pinumx_qspi_hold_n {
+				pinctrl-single,pins = <
+					0x14c MODE_NAND /* qspi_hold_n */
+					0x150 MODE_NAND /* qspi_wp_n */
+					0x154 MODE_NAND /* qspi_sck */
+					0x158 MODE_NAND /* qspi_cs_n */
+					0x15c MODE_NAND /* qspi_mosi */
+					0x160 MODE_NAND /* qspi_miso */
+				>;
+			};
+
+			mdio_pins: pinumx_ext_mdio {
+				pinctrl-single,pins = <
+					0x164 MODE_NITRO /* ext_mdio */
+					0x168 MODE_NITRO /* ext_mdc */
+				>;
+			};
+
+			i2c0_pins: pinmux_i2c0_sda {
+				pinctrl-single,pins = <
+					0x16c MODE_NITRO /* i2c0_sda */
+					0x170 MODE_NITRO /* i2c0_scl */
+				>;
+			};
+
+			i2c1_pins: pinmux_i2c1_sda {
+				pinctrl-single,pins = <
+					0x174 MODE_NITRO /* i2c1_sda */
+					0x178 MODE_NITRO /* i2c1_scl */
+				>;
+			};
+
+			sdio0_pins: pinmux_sdio0_cd_l {
+				pinctrl-single,pins = <
+					0x17c MODE_NITRO /* sdio0_cd_l */
+					0x180 MODE_NITRO /* sdio0_clk_sdcard */
+					0x184 MODE_NITRO /* sdio0_data0 */
+					0x188 MODE_NITRO /* sdio0_data1 */
+					0x18c MODE_NITRO /* sdio0_data2 */
+					0x190 MODE_NITRO /* sdio0_data3 */
+					0x194 MODE_NITRO /* sdio0_data4 */
+					0x198 MODE_NITRO /* sdio0_data5 */
+					0x19c MODE_NITRO /* sdio0_data6 */
+					0x1a0 MODE_NITRO /* sdio0_data7 */
+					0x1a4 MODE_NITRO /* sdio0_cmd */
+					0x1a8 MODE_NITRO /* sdio0_emmc_rst_n */
+					0x1ac MODE_NITRO /* sdio0_led_on */
+					0x1b0 MODE_NITRO /* sdio0_wp */
+				>;
+			};
+
+			sdio1_pins: pinmux_sdio1_cd_l {
+				pinctrl-single,pins = <
+					0x1b4 MODE_NITRO /* sdio1_cd_l */
+					0x1b8 MODE_NITRO /* sdio1_clk_sdcard */
+					0x1bc MODE_NITRO /* sdio1_data0 */
+					0x1c0 MODE_NITRO /* sdio1_data1 */
+					0x1c4 MODE_NITRO /* sdio1_data2 */
+					0x1c8 MODE_NITRO /* sdio1_data3 */
+					0x1cc MODE_NITRO /* sdio1_data4 */
+					0x1d0 MODE_NITRO /* sdio1_data5 */
+					0x1d4 MODE_NITRO /* sdio1_data6 */
+					0x1d8 MODE_NITRO /* sdio1_data7 */
+					0x1dc MODE_NITRO /* sdio1_cmd */
+					0x1e0 MODE_NITRO /* sdio1_emmc_rst_n */
+					0x1e4 MODE_NITRO /* sdio1_led_on */
+					0x1e8 MODE_NITRO /* sdio1_wp */
+				>;
+			};
+
+			spi0_pins: pinmux_spi0_sck_nand {
+				pinctrl-single,pins = <
+					0x1ec MODE_NITRO /* spi0_sck */
+					0x1f0 MODE_NITRO /* spi0_rxd */
+					0x1f4 MODE_NITRO /* spi0_fss */
+					0x1f8 MODE_NITRO /* spi0_txd */
+				>;
+			};
+
+			spi1_pins: pinmux_spi1_sck_nand {
+				pinctrl-single,pins = <
+					0x1fc MODE_NITRO /* spi1_sck */
+					0x200 MODE_NITRO /* spi1_rxd */
+					0x204 MODE_NITRO /* spi1_fss */
+					0x208 MODE_NITRO /* spi1_txd */
+				>;
+			};
+
+			nuart_pins: pinmux_uart0_sin_nitro {
+				pinctrl-single,pins = <
+					0x20c MODE_NITRO /* nuart_rxd */
+					0x210 MODE_NITRO /* nuart_txd */
+				>;
+			};
+
+			uart0_pins: pinumux_uart0_sin_nand {
+				pinctrl-single,pins = <
+					0x20c MODE_NAND /* uart0_sin */
+					0x210 MODE_NAND /* uart0_out */
+					0x214 MODE_NAND /* uart0_rts */
+					0x218 MODE_NAND /* uart0_cts */
+					0x21c MODE_NAND /* uart0_dtr */
+					0x220 MODE_NAND /* uart0_dcd */
+					0x224 MODE_NAND /* uart0_dsr */
+					0x228 MODE_NAND /* uart0_ri */
+				>;
+			};
+
+			drdu2_pins: pinmux_drdu2_overcurrent {
+				pinctrl-single,pins = <
+					0x22c MODE_NITRO /* drdu2_overcurrent */
+					0x230 MODE_NITRO /* drdu2_vbus_ppc */
+					0x234 MODE_NITRO /* drdu2_vbus_present */
+					0x238 MODE_NITRO /* drdu2_id */
+				>;
+			};
+
+			drdu3_pins: pinmux_drdu3_overcurrent {
+				pinctrl-single,pins = <
+					0x23c MODE_NITRO /* drdu3_overcurrent */
+					0x240 MODE_NITRO /* drdu3_vbus_ppc */
+					0x244 MODE_NITRO /* drdu3_vbus_present */
+					0x248 MODE_NITRO /* drdu3_id */
+				>;
+			};
+
+			usb3h_pins: pinmux_usb3h_overcurrent {
+				pinctrl-single,pins = <
+					0x24c MODE_NITRO /* usb3h_overcurrent */
+					0x250 MODE_NITRO /* usb3h_vbus_ppc */
+				>;
+			};
+		};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
new file mode 100644
index 0000000..49933cf
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -0,0 +1,460 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2015-2017 Broadcom.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "brcm,stingray";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu@000 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			next-level-cache = <&CLUSTER0_L2>;
+		};
+
+		cpu@001 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x1>;
+			enable-method = "psci";
+			next-level-cache = <&CLUSTER0_L2>;
+		};
+
+		cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x100>;
+			enable-method = "psci";
+			next-level-cache = <&CLUSTER1_L2>;
+		};
+
+		cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x101>;
+			enable-method = "psci";
+			next-level-cache = <&CLUSTER1_L2>;
+		};
+
+		cpu@200 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x200>;
+			enable-method = "psci";
+			next-level-cache = <&CLUSTER2_L2>;
+		};
+
+		cpu@201 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x201>;
+			enable-method = "psci";
+			next-level-cache = <&CLUSTER2_L2>;
+		};
+
+		cpu@300 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x300>;
+			enable-method = "psci";
+			next-level-cache = <&CLUSTER3_L2>;
+		};
+
+		cpu@301 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x301>;
+			enable-method = "psci";
+			next-level-cache = <&CLUSTER3_L2>;
+		};
+
+		CLUSTER0_L2: l2-cache@000 {
+			compatible = "cache";
+		};
+
+		CLUSTER1_L2: l2-cache@100 {
+			compatible = "cache";
+		};
+
+		CLUSTER2_L2: l2-cache@200 {
+			compatible = "cache";
+		};
+
+		CLUSTER3_L2: l2-cache@300 {
+			compatible = "cache";
+		};
+	};
+
+	memory: memory@80000000 {
+		device_type = "memory";
+		reg = <0x00000000 0x80000000 0 0x40000000>;
+	};
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	scr {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0x0 0x0 0x61000000 0x05000000>;
+
+		gic: interrupt-controller@02c00000 {
+			compatible = "arm,gic-v3";
+			#interrupt-cells = <3>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+			interrupt-controller;
+			reg = <0x02c00000 0x010000>, /* GICD */
+			      <0x02e00000 0x600000>; /* GICR */
+			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+			gic_its: gic-its@63c20000 {
+				compatible = "arm,gic-v3-its";
+				msi-controller;
+				#msi-cells = <1>;
+				reg = <0x02c20000 0x10000>;
+			};
+		};
+
+		smmu: mmu@03000000 {
+			compatible = "arm,mmu-500";
+			reg = <0x03000000 0x80000>;
+			#global-interrupts = <1>;
+			interrupts = <GIC_SPI 704 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 711 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 712 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 713 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 714 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 715 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 716 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 717 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 718 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 719 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 720 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 721 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 722 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 723 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 724 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 725 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 726 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 727 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 728 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 729 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 730 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 731 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 732 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 733 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 734 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 735 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 736 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 737 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 738 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 739 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 740 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 741 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 742 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 743 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 744 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 745 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 746 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 747 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 748 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 749 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 750 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 751 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 752 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 753 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 754 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 755 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 756 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 757 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 758 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 759 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 760 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 761 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 762 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 763 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 764 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 765 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 766 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 767 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 768 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 769 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 770 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 771 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 772 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 773 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 774 IRQ_TYPE_LEVEL_HIGH>;
+			#iommu-cells = <2>;
+		};
+	};
+
+	crmu: crmu {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0x0 0x0 0x66400000 0x100000>;
+
+		#include "stingray-clock.dtsi"
+
+		gpio_crmu: gpio@00024800 {
+			compatible = "brcm,iproc-gpio";
+			reg = <0x00024800 0x4c>;
+			ngpios = <6>;
+			#gpio-cells = <2>;
+			gpio-controller;
+		};
+	};
+
+	hsls {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0x0 0x0 0x68900000 0x17700000>;
+
+		#include "stingray-pinctrl.dtsi"
+
+		pwm: pwm@00010000 {
+			compatible = "brcm,iproc-pwm";
+			reg = <0x00010000 0x1000>;
+			clocks = <&crmu_ref25m>;
+			#pwm-cells = <3>;
+			status = "disabled";
+		};
+
+		i2c0: i2c@000b0000 {
+			compatible = "brcm,iproc-i2c";
+			reg = <0x000b0000 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>;
+			clock-frequency = <100000>;
+			status = "disabled";
+		};
+
+		wdt0: watchdog@000c0000 {
+			compatible = "arm,sp805", "arm,primecell";
+			reg = <0x000c0000 0x1000>;
+			interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&hsls_25m_div2_clk>, <&hsls_div4_clk>;
+			clock-names = "wdogclk", "apb_pclk";
+		};
+
+		gpio_hsls: gpio@000d0000 {
+			compatible = "brcm,iproc-gpio";
+			reg = <0x000d0000 0x864>;
+			ngpios = <151>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-ranges = <&pinmux 0 0 16>,
+					<&pinmux 16 71 2>,
+					<&pinmux 18 131 8>,
+					<&pinmux 26 83 6>,
+					<&pinmux 32 123 4>,
+					<&pinmux 36 43 24>,
+					<&pinmux 60 89 2>,
+					<&pinmux 62 73 4>,
+					<&pinmux 66 95 28>,
+					<&pinmux 94 127 4>,
+					<&pinmux 98 139 10>,
+					<&pinmux 108 16 27>,
+					<&pinmux 135 77 6>,
+					<&pinmux 141 67 4>,
+					<&pinmux 145 149 6>,
+					<&pinmux 151 91 4>;
+		};
+
+		i2c1: i2c@000e0000 {
+			compatible = "brcm,iproc-i2c";
+			reg = <0x000e0000 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>;
+			clock-frequency = <100000>;
+			status = "disabled";
+		};
+
+		uart0: uart@00100000 {
+			device_type = "serial";
+			compatible = "snps,dw-apb-uart";
+			reg = <0x00100000 0x1000>;
+			reg-shift = <2>;
+			clock-frequency = <25000000>;
+			interrupt-parent = <&gic>;
+			interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart1: uart@00110000 {
+			device_type = "serial";
+			compatible = "snps,dw-apb-uart";
+			reg = <0x00110000 0x1000>;
+			reg-shift = <2>;
+			clock-frequency = <25000000>;
+			interrupt-parent = <&gic>;
+			interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart2: uart@00120000 {
+			device_type = "serial";
+			compatible = "snps,dw-apb-uart";
+			reg = <0x00120000 0x1000>;
+			reg-shift = <2>;
+			clock-frequency = <25000000>;
+			interrupt-parent = <&gic>;
+			interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		uart3: uart@00130000 {
+			device_type = "serial";
+			compatible = "snps,dw-apb-uart";
+			reg = <0x00130000 0x1000>;
+			reg-shift = <2>;
+			clock-frequency = <25000000>;
+			interrupt-parent = <&gic>;
+			interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		ssp0: ssp@00180000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x00180000 0x1000>;
+			interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&hsls_div2_clk>, <&hsls_div2_clk>;
+			clock-names = "spiclk", "apb_pclk";
+			num-cs = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		ssp1: ssp@00190000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x00190000 0x1000>;
+			interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&hsls_div2_clk>, <&hsls_div2_clk>;
+			clock-names = "spiclk", "apb_pclk";
+			num-cs = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		hwrng: hwrng@00220000 {
+			compatible = "brcm,iproc-rng200";
+			reg = <0x00220000 0x28>;
+		};
+
+		dma0: dma@00310000 {
+			compatible = "arm,pl330", "arm,primecell";
+			reg = <0x00310000 0x1000>;
+			interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
+			#dma-cells = <1>;
+			#dma-channels = <8>;
+			#dma-requests = <32>;
+			clocks = <&hsls_div2_clk>;
+			clock-names = "apb_pclk";
+			iommus = <&smmu 0x6000 0x0000>;
+		};
+
+		nand: nand@00360000 {
+			compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1";
+			reg = <0x00360000 0x600>,
+			      <0x0050a408 0x600>,
+			      <0x00360f00 0x20>;
+			reg-names = "nand", "iproc-idm", "iproc-ext";
+			interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			brcm,nand-has-wp;
+			status = "disabled";
+		};
+
+		sdio0: sdhci@003f1000 {
+			compatible = "brcm,sdhci-iproc";
+			reg = <0x003f1000 0x100>;
+			interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
+			bus-width = <8>;
+			clocks = <&sdio0_clk>;
+			iommus = <&smmu 0x6002 0x0000>;
+			status = "disabled";
+		};
+
+		sdio1: sdhci@003f2000 {
+			compatible = "brcm,sdhci-iproc";
+			reg = <0x003f2000 0x100>;
+			interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+			bus-width = <8>;
+			clocks = <&sdio1_clk>;
+			iommus = <&smmu 0x6003 0x0000>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
index 3ff9527..23191eb 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
@@ -60,7 +60,6 @@
 		vci-supply = <&ldo28_reg>;
 		reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
 		enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
-		te-gpios = <&gpf1 3 GPIO_ACTIVE_HIGH>;
 	};
 };
 
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
index 17fae81..7286b1e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
@@ -1,7 +1,7 @@
 /*
  * Device Tree file for Freescale LS1012A Freedom Board.
  *
- * Copyright 2016, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPLv2 or the X11 license, at your option. Note that this dual
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
index e2a93d5..8c013b5 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
@@ -1,7 +1,7 @@
 /*
  * Device Tree file for Freescale LS1012A QDS Board.
  *
- * Copyright 2016, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPLv2 or the X11 license, at your option. Note that this dual
@@ -97,6 +97,14 @@
 	status = "okay";
 };
 
+&esdhc0 {
+	status = "okay";
+};
+
+&esdhc1 {
+	status = "okay";
+};
+
 &i2c0 {
 	status = "okay";
 
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
index ed77f6b..c1a119e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
@@ -1,7 +1,7 @@
 /*
  * Device Tree file for Freescale LS1012A RDB Board.
  *
- * Copyright 2016, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPLv2 or the X11 license, at your option. Note that this dual
@@ -54,6 +54,19 @@
 	status = "okay";
 };
 
+&esdhc0 {
+	sd-uhs-sdr104;
+	sd-uhs-sdr50;
+	sd-uhs-sdr25;
+	sd-uhs-sdr12;
+	status = "okay";
+};
+
+&esdhc1 {
+	mmc-hs200-1_8v;
+	status = "okay";
+};
+
 &i2c0 {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index b497ac1..b1554cb 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -1,7 +1,7 @@
 /*
  * Device Tree Include file for Freescale Layerscape-1012A family SoC.
  *
- * Copyright 2016, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPLv2 or the X11 license, at your option. Note that this dual
@@ -76,10 +76,17 @@
 	sysclk: sysclk {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
-		clock-frequency = <100000000>;
+		clock-frequency = <125000000>;
 		clock-output-names = "sysclk";
 	};
 
+	coreclk: coreclk {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <100000000>;
+		clock-output-names = "coreclk";
+	};
+
 	timer {
 		compatible = "arm,armv8-timer";
 		interrupts = <1 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */
@@ -117,12 +124,37 @@
 		#size-cells = <2>;
 		ranges;
 
+		esdhc0: esdhc@1560000 {
+			compatible = "fsl,ls1012a-esdhc", "fsl,esdhc";
+			reg = <0x0 0x1560000 0x0 0x10000>;
+			interrupts = <0 62 0x4>;
+			clocks = <&clockgen 4 0>;
+			voltage-ranges = <1800 1800 3300 3300>;
+			sdhci,auto-cmd12;
+			big-endian;
+			bus-width = <4>;
+			status = "disabled";
+		};
+
 		scfg: scfg@1570000 {
 			compatible = "fsl,ls1012a-scfg", "syscon";
 			reg = <0x0 0x1570000 0x0 0x10000>;
 			big-endian;
 		};
 
+		esdhc1: esdhc@1580000 {
+			compatible = "fsl,ls1012a-esdhc", "fsl,esdhc";
+			reg = <0x0 0x1580000 0x0 0x10000>;
+			interrupts = <0 65 0x4>;
+			clocks = <&clockgen 4 0>;
+			voltage-ranges = <1800 1800 3300 3300>;
+			sdhci,auto-cmd12;
+			big-endian;
+			broken-cd;
+			bus-width = <4>;
+			status = "disabled";
+		};
+
 		crypto: crypto@1700000 {
 			compatible = "fsl,sec-v5.4", "fsl,sec-v5.0",
 				     "fsl,sec-v4.0";
@@ -223,7 +255,8 @@
 			compatible = "fsl,ls1012a-clockgen";
 			reg = <0x0 0x1ee1000 0x0 0x1000>;
 			#clock-cells = <2>;
-			clocks = <&sysclk>;
+			clocks = <&sysclk &coreclk>;
+			clock-names = "sysclk", "coreclk";
 		};
 
 		tmu: tmu@1f00000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
new file mode 100644
index 0000000..169e171
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
@@ -0,0 +1,45 @@
+/*
+ * QorIQ FMan v3 device tree nodes for ls1043
+ *
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+&soc {
+
+/* include used FMan blocks */
+#include "qoriq-fman3-0.dtsi"
+#include "qoriq-fman3-0-1g-0.dtsi"
+#include "qoriq-fman3-0-1g-1.dtsi"
+#include "qoriq-fman3-0-1g-2.dtsi"
+#include "qoriq-fman3-0-1g-3.dtsi"
+#include "qoriq-fman3-0-1g-4.dtsi"
+#include "qoriq-fman3-0-1g-5.dtsi"
+#include "qoriq-fman3-0-10g-0.dtsi"
+
+};
+
+&fman0 {
+	/* these aliases provide the FMan ports mapping */
+	enet0: ethernet@e0000 {
+	};
+
+	enet1: ethernet@e2000 {
+	};
+
+	enet2: ethernet@e4000 {
+	};
+
+	enet3: ethernet@e6000 {
+	};
+
+	enet4: ethernet@e8000 {
+	};
+
+	enet5: ethernet@ea000 {
+	};
+
+	enet6: ethernet@f0000 {
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
index 0989d63..6341281 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
@@ -1,7 +1,7 @@
 /*
  * Device Tree Include file for Freescale Layerscape-1043A family SoC.
  *
- * Copyright 2014-2015, Freescale Semiconductor
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
  *
  * Mingkai Hu <Mingkai.hu@freescale.com>
  *
@@ -181,3 +181,5 @@
 		reg = <0>;
 	};
 };
+
+#include "fsl-ls1043-post.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
index c37110b..3dc0c8e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
@@ -1,7 +1,7 @@
 /*
  * Device Tree Include file for Freescale Layerscape-1043A family SoC.
  *
- * Copyright 2014-2015, Freescale Semiconductor
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
  *
  * Mingkai Hu <Mingkai.hu@freescale.com>
  *
@@ -75,11 +75,11 @@
 		reg = <0x4c>;
 	};
 	eeprom@52 {
-		compatible = "at24,24c512";
+		compatible = "atmel,24c512";
 		reg = <0x52>;
 	};
 	eeprom@53 {
-		compatible = "at24,24c512";
+		compatible = "atmel,24c512";
 		reg = <0x53>;
 	};
 	rtc@68 {
@@ -139,3 +139,76 @@
 &duart1 {
 	status = "okay";
 };
+
+#include "fsl-ls1043-post.dtsi"
+
+&fman0 {
+	ethernet@e0000 {
+		phy-handle = <&qsgmii_phy1>;
+		phy-connection-type = "qsgmii";
+	};
+
+	ethernet@e2000 {
+		phy-handle = <&qsgmii_phy2>;
+		phy-connection-type = "qsgmii";
+	};
+
+	ethernet@e4000 {
+		phy-handle = <&rgmii_phy1>;
+		phy-connection-type = "rgmii-txid";
+	};
+
+	ethernet@e6000 {
+		phy-handle = <&rgmii_phy2>;
+		phy-connection-type = "rgmii-txid";
+	};
+
+	ethernet@e8000 {
+		phy-handle = <&qsgmii_phy3>;
+		phy-connection-type = "qsgmii";
+	};
+
+	ethernet@ea000 {
+		phy-handle = <&qsgmii_phy4>;
+		phy-connection-type = "qsgmii";
+	};
+
+	ethernet@f0000 { /* 10GEC1 */
+		phy-handle = <&aqr105_phy>;
+		phy-connection-type = "xgmii";
+	};
+
+	mdio@fc000 {
+		rgmii_phy1: ethernet-phy@1 {
+			reg = <0x1>;
+		};
+
+		rgmii_phy2: ethernet-phy@2 {
+			reg = <0x2>;
+		};
+
+		qsgmii_phy1: ethernet-phy@4 {
+			reg = <0x4>;
+		};
+
+		qsgmii_phy2: ethernet-phy@5 {
+			reg = <0x5>;
+		};
+
+		qsgmii_phy3: ethernet-phy@6 {
+			reg = <0x6>;
+		};
+
+		qsgmii_phy4: ethernet-phy@7 {
+			reg = <0x7>;
+		};
+	};
+
+	mdio@fd000 {
+		aqr105_phy: ethernet-phy@1 {
+			compatible = "ethernet-phy-ieee802.3-c45";
+			interrupts = <0 132 4>;
+			reg = <0x1>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 45cface..31fd77f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -1,7 +1,7 @@
 /*
  * Device Tree Include file for Freescale Layerscape-1043A family SoC.
  *
- * Copyright 2014-2015, Freescale Semiconductor
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
  *
  * Mingkai Hu <Mingkai.hu@freescale.com>
  *
@@ -45,6 +45,7 @@
  */
 
 #include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 
 / {
 	compatible = "fsl,ls1043a";
@@ -52,6 +53,17 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
+	aliases {
+		fman0 = &fman0;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
+		ethernet3 = &enet3;
+		ethernet4 = &enet4;
+		ethernet5 = &enet5;
+		ethernet6 = &enet6;
+	};
+
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -106,6 +118,33 @@
 		      /* DRAM space 1, size: 2GiB DRAM */
 	};
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		bman_fbpr: bman-fbpr {
+			compatible = "shared-dma-pool";
+			size = <0 0x1000000>;
+			alignment = <0 0x1000000>;
+			no-map;
+		};
+
+		qman_fqd: qman-fqd {
+			compatible = "shared-dma-pool";
+			size = <0 0x400000>;
+			alignment = <0 0x400000>;
+			no-map;
+		};
+
+		qman_pfdr: qman-pfdr {
+			compatible = "shared-dma-pool";
+			size = <0 0x2000000>;
+			alignment = <0 0x2000000>;
+			no-map;
+		};
+	};
+
 	sysclk: sysclk {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
@@ -152,7 +191,7 @@
 		interrupts = <1 9 0xf08>;
 	};
 
-	soc {
+	soc: soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -223,6 +262,7 @@
 		ifc: ifc@1530000 {
 			compatible = "fsl,ifc", "simple-bus";
 			reg = <0x0 0x1530000 0x0 0x10000>;
+			big-endian;
 			interrupts = <0 43 0x4>;
 		};
 
@@ -333,6 +373,28 @@
 			};
 		};
 
+		qman: qman@1880000 {
+			compatible = "fsl,qman";
+			reg = <0x0 0x1880000 0x0 0x10000>;
+			interrupts = <0 45 0x4>;
+			memory-region = <&qman_fqd &qman_pfdr>;
+		};
+
+		bman: bman@1890000 {
+			compatible = "fsl,bman";
+			reg = <0x0 0x1890000 0x0 0x10000>;
+			interrupts = <0 45 0x4>;
+			memory-region = <&bman_fbpr>;
+		};
+
+		bportals: bman-portals@508000000 {
+			ranges = <0x0 0x5 0x08000000 0x8000000>;
+		};
+
+		qportals: qman-portals@500000000 {
+			ranges = <0x0 0x5 0x00000000 0x8000000>;
+		};
+
 		dspi0: dspi@2100000 {
 			compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi";
 			#address-cells = <1>;
@@ -688,3 +750,6 @@
 	};
 
 };
+
+#include "qoriq-qman-portals.dtsi"
+#include "qoriq-bman-portals.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
new file mode 100644
index 0000000..f5017db
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
@@ -0,0 +1,48 @@
+/*
+ * QorIQ FMan v3 device tree nodes for ls1046
+ *
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+&soc {
+
+/* include used FMan blocks */
+#include "qoriq-fman3-0.dtsi"
+#include "qoriq-fman3-0-1g-0.dtsi"
+#include "qoriq-fman3-0-1g-1.dtsi"
+#include "qoriq-fman3-0-1g-2.dtsi"
+#include "qoriq-fman3-0-1g-3.dtsi"
+#include "qoriq-fman3-0-1g-4.dtsi"
+#include "qoriq-fman3-0-1g-5.dtsi"
+#include "qoriq-fman3-0-10g-0.dtsi"
+#include "qoriq-fman3-0-10g-1.dtsi"
+};
+
+&fman0 {
+	/* these aliases provide the FMan ports mapping */
+	enet0: ethernet@e0000 {
+	};
+
+	enet1: ethernet@e2000 {
+	};
+
+	enet2: ethernet@e4000 {
+	};
+
+	enet3: ethernet@e6000 {
+	};
+
+	enet4: ethernet@e8000 {
+	};
+
+	enet5: ethernet@ea000 {
+	};
+
+	enet6: ethernet@f0000 {
+	};
+
+	enet7: ethernet@f2000 {
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
index 290e5b0..434383b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
@@ -1,7 +1,7 @@
 /*
  * Device Tree Include file for Freescale Layerscape-1046A family SoC.
  *
- * Copyright 2016, Freescale Semiconductor, Inc.
+ * Copyright 2016 Freescale Semiconductor, Inc.
  *
  * Shaohui Xie <Shaohui.Xie@nxp.com>
  *
@@ -210,3 +210,5 @@
 		reg = <0>;
 	};
 };
+
+#include "fsl-ls1046-post.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
index d1ccc00..5dc2782 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
@@ -1,7 +1,7 @@
 /*
  * Device Tree Include file for Freescale Layerscape-1046A family SoC.
  *
- * Copyright 2016, Freescale Semiconductor, Inc.
+ * Copyright 2016 Freescale Semiconductor, Inc.
  *
  * Mingkai Hu <mingkai.hu@nxp.com>
  *
@@ -72,6 +72,14 @@
 	status = "okay";
 };
 
+&esdhc {
+	mmc-hs200-1_8v;
+	sd-uhs-sdr104;
+	sd-uhs-sdr50;
+	sd-uhs-sdr25;
+	sd-uhs-sdr12;
+};
+
 &i2c0 {
 	status = "okay";
 
@@ -148,3 +156,63 @@
 		reg = <1>;
 	};
 };
+
+#include "fsl-ls1046-post.dtsi"
+
+&fman0 {
+	ethernet@e4000 {
+		phy-handle = <&rgmii_phy1>;
+		phy-connection-type = "rgmii";
+	};
+
+	ethernet@e6000 {
+		phy-handle = <&rgmii_phy2>;
+		phy-connection-type = "rgmii";
+	};
+
+	ethernet@e8000 {
+		phy-handle = <&sgmii_phy1>;
+		phy-connection-type = "sgmii";
+	};
+
+	ethernet@ea000 {
+		phy-handle = <&sgmii_phy2>;
+		phy-connection-type = "sgmii";
+	};
+
+	ethernet@f0000 { /* 10GEC1 */
+		phy-handle = <&aqr106_phy>;
+		phy-connection-type = "xgmii";
+	};
+
+	ethernet@f2000 { /* 10GEC2 */
+		fixed-link = <0 1 1000 0 0>;
+		phy-connection-type = "xgmii";
+	};
+
+	mdio@fc000 {
+		rgmii_phy1: ethernet-phy@1 {
+			reg = <0x1>;
+		};
+
+		rgmii_phy2: ethernet-phy@2 {
+			reg = <0x2>;
+		};
+
+		sgmii_phy1: ethernet-phy@3 {
+			reg = <0x3>;
+		};
+
+		sgmii_phy2: ethernet-phy@4 {
+			reg = <0x4>;
+		};
+	};
+
+	mdio@fd000 {
+		aqr106_phy: ethernet-phy@0 {
+			compatible = "ethernet-phy-ieee802.3-c45";
+			interrupts = <0 131 4>;
+			reg = <0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index f4b8b7e..dc1640b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -1,7 +1,7 @@
 /*
  * Device Tree Include file for Freescale Layerscape-1046A family SoC.
  *
- * Copyright 2016, Freescale Semiconductor, Inc.
+ * Copyright 2016 Freescale Semiconductor, Inc.
  *
  * Mingkai Hu <mingkai.hu@nxp.com>
  *
@@ -55,6 +55,15 @@
 
 	aliases {
 		crypto = &crypto;
+		fman0 = &fman0;
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
+		ethernet3 = &enet3;
+		ethernet4 = &enet4;
+		ethernet5 = &enet5;
+		ethernet6 = &enet6;
+		ethernet7 = &enet7;
 	};
 
 	cpus {
@@ -174,7 +183,7 @@
 					 IRQ_TYPE_LEVEL_LOW)>;
 	};
 
-	soc {
+	soc: soc {
 		compatible = "simple-bus";
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -190,6 +199,7 @@
 		ifc: ifc@1530000 {
 			compatible = "fsl,ifc", "simple-bus";
 			reg = <0x0 0x1530000 0x0 0x10000>;
+			big-endian;
 			interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
@@ -209,10 +219,10 @@
 		};
 
 		esdhc: esdhc@1560000 {
-			compatible = "fsl,esdhc";
+			compatible = "fsl,ls1046a-esdhc", "fsl,esdhc";
 			reg = <0x0 0x1560000 0x0 0x10000>;
 			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
-			clock-frequency = <0>;
+			clocks = <&clockgen 2 1>;
 			voltage-ranges = <1800 1800 3300 3300>;
 			sdhci,auto-cmd12;
 			big-endian;
@@ -268,6 +278,30 @@
 			};
 		};
 
+		qman: qman@1880000 {
+			compatible = "fsl,qman";
+			reg = <0x0 0x1880000 0x0 0x10000>;
+			interrupts = <0 45 0x4>;
+			memory-region = <&qman_fqd &qman_pfdr>;
+
+		};
+
+		bman: bman@1890000 {
+			compatible = "fsl,bman";
+			reg = <0x0 0x1890000 0x0 0x10000>;
+			interrupts = <0 45 0x4>;
+			memory-region = <&bman_fbpr>;
+
+		};
+
+		qportals: qman-portals@500000000 {
+			ranges = <0x0 0x5 0x00000000 0x8000000>;
+		};
+
+		bportals: bman-portals@508000000 {
+			ranges = <0x0 0x5 0x08000000 0x8000000>;
+		};
+
 		dcfg: dcfg@1ee0000 {
 			compatible = "fsl,ls1046a-dcfg", "syscon";
 			reg = <0x0 0x1ee0000 0x0 0x10000>;
@@ -567,6 +601,7 @@
 			interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
 			dr_mode = "host";
 			snps,quirk-frame-length-adjustment = <0x20>;
+			snps,dis_rxdet_inp3_quirk;
 		};
 
 		usb1: usb@3000000 {
@@ -575,6 +610,7 @@
 			interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
 			dr_mode = "host";
 			snps,quirk-frame-length-adjustment = <0x20>;
+			snps,dis_rxdet_inp3_quirk;
 		};
 
 		usb2: usb@3100000 {
@@ -583,6 +619,7 @@
 			interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
 			dr_mode = "host";
 			snps,quirk-frame-length-adjustment = <0x20>;
+			snps,dis_rxdet_inp3_quirk;
 		};
 
 		sata: sata@3200000 {
@@ -594,4 +631,34 @@
 			clocks = <&clockgen 4 1>;
 		};
 	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		bman_fbpr: bman-fbpr {
+			compatible = "shared-dma-pool";
+			size = <0 0x1000000>;
+			alignment = <0 0x1000000>;
+			no-map;
+		};
+
+		qman_fqd: qman-fqd {
+			compatible = "shared-dma-pool";
+			size = <0 0x800000>;
+			alignment = <0 0x800000>;
+			no-map;
+		};
+
+		qman_pfdr: qman-pfdr {
+			compatible = "shared-dma-pool";
+			size = <0 0x2000000>;
+			alignment = <0 0x2000000>;
+			no-map;
+		};
+	};
 };
+
+#include "qoriq-qman-portals.dtsi"
+#include "qoriq-bman-portals.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
index 8c3cae5..3012805 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
@@ -110,6 +110,30 @@
 	};
 };
 
+&ifc {
+	ranges = <0 0 0x5 0x80000000 0x08000000
+		  2 0 0x5 0x30000000 0x00010000
+		  3 0 0x5 0x20000000 0x00010000>;
+	status = "okay";
+
+	nor@0,0 {
+		compatible = "cfi-flash";
+		reg = <0x0 0x0 0x8000000>;
+		bank-width = <2>;
+		device-width = <1>;
+	};
+
+	nand@2,0 {
+		compatible = "fsl,ifc-nand";
+		reg = <0x2 0x0 0x10000>;
+	};
+
+	fpga: board-control@3,0 {
+		compatible = "fsl,ls1088aqds-fpga", "fsl,fpga-qixis";
+		reg = <0x3 0x0 0x0000100>;
+	};
+};
+
 &duart0 {
 	status = "okay";
 };
@@ -118,6 +142,10 @@
 	status = "okay";
 };
 
+&esdhc {
+	status = "okay";
+};
+
 &sata {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
index 8a04fbb..213abb7 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
@@ -94,6 +94,22 @@
 	};
 };
 
+&ifc {
+	ranges = <0 0 0x5 0x30000000 0x00010000
+		  2 0 0x5 0x20000000 0x00010000>;
+	status = "okay";
+
+	nand@0,0 {
+		compatible = "fsl,ifc-nand";
+		reg = <0x0 0x0 0x10000>;
+	};
+
+	fpga: board-control@2,0 {
+		compatible = "fsl,ls1088ardb-fpga", "fsl,fpga-qixis";
+		reg = <0x2 0x0 0x0000100>;
+	};
+};
+
 &duart0 {
 	status = "okay";
 };
@@ -102,6 +118,10 @@
 	status = "okay";
 };
 
+&esdhc {
+	status = "okay";
+};
+
 &sata {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index 2946fd7..c144d06 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -44,6 +44,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
 	compatible = "fsl,ls1088a";
@@ -61,6 +62,7 @@
 			compatible = "arm,cortex-a53";
 			reg = <0x0>;
 			clocks = <&clockgen 1 0>;
+			#cooling-cells = <2>;
 		};
 
 		cpu1: cpu@1 {
@@ -89,6 +91,7 @@
 			compatible = "arm,cortex-a53";
 			reg = <0x100>;
 			clocks = <&clockgen 1 1>;
+			#cooling-cells = <2>;
 		};
 
 		cpu5: cpu@101 {
@@ -153,6 +156,91 @@
 			clocks = <&sysclk>;
 		};
 
+		tmu: tmu@1f80000 {
+			compatible = "fsl,qoriq-tmu";
+			reg = <0x0 0x1f80000 0x0 0x10000>;
+			interrupts = <0 23 0x4>;
+			fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
+			fsl,tmu-calibration =
+				/* Calibration data group 1 */
+				<0x00000000 0x00000026
+				0x00000001 0x0000002d
+				0x00000002 0x00000032
+				0x00000003 0x00000039
+				0x00000004 0x0000003f
+				0x00000005 0x00000046
+				0x00000006 0x0000004d
+				0x00000007 0x00000054
+				0x00000008 0x0000005a
+				0x00000009 0x00000061
+				0x0000000a 0x0000006a
+				0x0000000b 0x00000071
+				/* Calibration data group 2 */
+				0x00010000 0x00000025
+				0x00010001 0x0000002c
+				0x00010002 0x00000035
+				0x00010003 0x0000003d
+				0x00010004 0x00000045
+				0x00010005 0x0000004e
+				0x00010006 0x00000057
+				0x00010007 0x00000061
+				0x00010008 0x0000006b
+				0x00010009 0x00000076
+				/* Calibration data group 3 */
+				0x00020000 0x00000029
+				0x00020001 0x00000033
+				0x00020002 0x0000003d
+				0x00020003 0x00000049
+				0x00020004 0x00000056
+				0x00020005 0x00000061
+				0x00020006 0x0000006d
+				/* Calibration data group 4 */
+				0x00030000 0x00000021
+				0x00030001 0x0000002a
+				0x00030002 0x0000003c
+				0x00030003 0x0000004e>;
+			little-endian;
+			#thermal-sensor-cells = <1>;
+		};
+
+		thermal-zones {
+			cpu_thermal: cpu-thermal {
+				polling-delay-passive = <1000>;
+				polling-delay = <5000>;
+				thermal-sensors = <&tmu 0>;
+
+				trips {
+					cpu_alert: cpu-alert {
+						temperature = <85000>;
+						hysteresis = <2000>;
+						type = "passive";
+					};
+
+					cpu_crit: cpu-crit {
+						temperature = <95000>;
+						hysteresis = <2000>;
+						type = "critical";
+					};
+				};
+
+				cooling-maps {
+					map0 {
+						trip = <&cpu_alert>;
+						cooling-device =
+							<&cpu0 THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+					};
+
+					map1 {
+						trip = <&cpu_alert>;
+						cooling-device =
+							<&cpu4 THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+					};
+				};
+			};
+		};
+
 		duart0: serial@21c0500 {
 			compatible = "fsl,ns16550", "ns16550a";
 			reg = <0x0 0x21c0500 0x0 0x100>;
@@ -216,10 +304,6 @@
 			little-endian;
 			#address-cells = <2>;
 			#size-cells = <1>;
-
-			ranges = <0 0 0x5 0x80000000 0x08000000
-				  2 0 0x5 0x30000000 0x00010000
-				  3 0 0x5 0x20000000 0x00010000>;
 			status = "disabled";
 		};
 
@@ -263,11 +347,26 @@
 			status = "disabled";
 		};
 
+		esdhc: esdhc@2140000 {
+			compatible = "fsl,ls1088a-esdhc", "fsl,esdhc";
+			reg = <0x0 0x2140000 0x0 0x10000>;
+			interrupts = <0 28 0x4>; /* Level high type */
+			clock-frequency = <0>;
+			voltage-ranges = <1800 1800 3300 3300>;
+			sdhci,auto-cmd12;
+			little-endian;
+			bus-width = <4>;
+			status = "disabled";
+		};
+
 		sata: sata@3200000 {
-			compatible = "fsl,ls1088a-ahci", "fsl,ls1043a-ahci";
-			reg = <0x0 0x3200000 0x0 0x10000>;
+			compatible = "fsl,ls1088a-ahci";
+			reg = <0x0 0x3200000 0x0 0x10000>,
+				<0x7 0x100520 0x0 0x4>;
+			reg-names = "ahci", "sata-ecc";
 			interrupts = <0 133 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clockgen 4 3>;
+			dma-coherent;
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
index c1e76df..ed209cd 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Freescale LS2080a QDS Board.
  *
- * Copyright (C) 2015-17, Freescale Semiconductor
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
  *
  * Abhimanyu Saini <abhimanyu.saini@nxp.com>
  * Bhupesh Sharma <bhupesh.sharma@freescale.com>
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
index 18ad195..67ec3f9 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Freescale LS2080a RDB Board.
  *
- * Copyright (C) 2016-17, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
  *
  * Abhimanyu Saini <abhimanyu.saini@nxp.com>
  * Bhupesh Sharma <bhupesh.sharma@freescale.com>
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
index 290604b..3ee718f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
@@ -1,7 +1,7 @@
 /*
  * Device Tree file for Freescale LS2080a software Simulator model
  *
- * Copyright (C) 2014-2015, Freescale Semiconductor
+ * Copyright 2014-2015 Freescale Semiconductor, Inc.
  *
  * Bhupesh Sharma <bhupesh.sharma@freescale.com>
  *
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 46a26c0..d789c68 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -1,7 +1,7 @@
 /*
  * Device Tree Include file for Freescale Layerscape-2080A family SoC.
  *
- * Copyright (C) 2014-2016, Freescale Semiconductor
+ * Copyright 2014-2016 Freescale Semiconductor, Inc.
  *
  * Abhimanyu Saini <abhimanyu.saini@nxp.com>
  * Bhupesh Sharma <bhupesh.sharma@freescale.com>
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
index ebcd6ee..4a1df5c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Freescale LS2088A QDS Board.
  *
- * Copyright (C) 2016-17, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
  *
  * Abhimanyu Saini <abhimanyu.saini@nxp.com>
  *
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
index 5992dc1..a76d4b4 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Freescale LS2088A RDB Board.
  *
- * Copyright (C) 2016-17, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
  *
  * Abhimanyu Saini <abhimanyu.saini@nxp.com>
  *
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
index 33ce404..5c695c6 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
@@ -1,7 +1,8 @@
 /*
  * Device Tree Include file for Freescale Layerscape-2088A family SoC.
  *
- * Copyright (C) 2016-17, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
  *
  * Abhimanyu Saini <abhimanyu.saini@nxp.com>
  *
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
index 8b62048..b237446 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Freescale LS2080A QDS Board.
  *
- * Copyright (C) 2016-17, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
  *
  * Abhimanyu Saini <abhimanyu.saini@nxp.com>
  *
@@ -45,6 +46,7 @@
  */
 
 &esdhc {
+	mmc-hs200-1_8v;
 	status = "okay";
 };
 
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
index 3737587..9a1d0d2 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Freescale LS2080A RDB Board.
  *
- * Copyright (C) 2016-17, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
  *
  * Abhimanyu Saini <abhimanyu.saini@nxp.com>
  *
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index abb2fff..94cdd30 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -1,7 +1,8 @@
 /*
  * Device Tree Include file for Freescale Layerscape-2080A family SoC.
  *
- * Copyright (C) 2016-2017, Freescale Semiconductor
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
  *
  * Abhimanyu Saini <abhimanyu.saini@nxp.com>
  *
@@ -471,7 +472,7 @@
 			compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
 			reg = <0x0 0x2140000 0x0 0x10000>;
 			interrupts = <0 28 0x4>; /* Level high type */
-			clock-frequency = <0>;	/* Updated by bootloader */
+			clocks = <&clockgen 4 1>;
 			voltage-ranges = <1800 1800 3300 3300>;
 			sdhci,auto-cmd12;
 			little-endian;
diff --git a/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi b/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
new file mode 100644
index 0000000..c3c2be4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-bman-portals.dtsi
@@ -0,0 +1,71 @@
+/*
+ * QorIQ BMan Portals device tree
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+&bportals {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "simple-bus";
+
+	bman-portal@0 {
+		/*
+		 * bootloader fix-ups are expected to provide the
+		 * "fsl,bman-portal-<hardware revision>" compatible
+		 */
+		compatible = "fsl,bman-portal";
+		reg = <0x0 0x4000>, <0x4000000 0x4000>;
+		interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	bman-portal@10000 {
+		compatible = "fsl,bman-portal";
+		reg = <0x10000 0x4000>, <0x4010000 0x4000>;
+		interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	bman-portal@20000 {
+		compatible = "fsl,bman-portal";
+		reg = <0x20000 0x4000>, <0x4020000 0x4000>;
+		interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	bman-portal@30000 {
+		compatible = "fsl,bman-portal";
+		reg = <0x30000 0x4000>, <0x4030000 0x4000>;
+		interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	bman-portal@40000 {
+		compatible = "fsl,bman-portal";
+		reg = <0x40000 0x4000>, <0x4040000 0x4000>;
+		interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	bman-portal@50000 {
+		compatible = "fsl,bman-portal";
+		reg = <0x50000 0x4000>, <0x4050000 0x4000>;
+		interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	bman-portal@60000 {
+		compatible = "fsl,bman-portal";
+		reg = <0x60000 0x4000>, <0x4060000 0x4000>;
+		interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	bman-portal@70000 {
+		compatible = "fsl,bman-portal";
+		reg = <0x70000 0x4000>, <0x4070000 0x4000>;
+		interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	bman-portal@80000 {
+		compatible = "fsl,bman-portal";
+		reg = <0x80000 0x4000>, <0x4080000 0x4000>;
+		interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
new file mode 100644
index 0000000..ecdffe7
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-0.dtsi
@@ -0,0 +1,42 @@
+/*
+ * QorIQ FMan v3 10g port #0 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
+	fman0_rx_0x10: port@90000 {
+		cell-index = <0x10>;
+		compatible = "fsl,fman-v3-port-rx";
+		reg = <0x90000 0x1000>;
+		fsl,fman-10g-port;
+	};
+
+	fman0_tx_0x30: port@b0000 {
+		cell-index = <0x30>;
+		compatible = "fsl,fman-v3-port-tx";
+		reg = <0xb0000 0x1000>;
+		fsl,fman-10g-port;
+	};
+
+	ethernet@f0000 {
+		cell-index = <0x8>;
+		compatible = "fsl,fman-memac";
+		reg = <0xf0000 0x1000>;
+		fsl,fman-ports = <&fman0_rx_0x10 &fman0_tx_0x30>;
+		pcsphy-handle = <&pcsphy6>;
+	};
+
+	mdio@f1000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xf1000 0x1000>;
+
+		pcsphy6: ethernet-phy@0 {
+			reg = <0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
new file mode 100644
index 0000000..a7f6af5
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-10g-1.dtsi
@@ -0,0 +1,42 @@
+/*
+ * QorIQ FMan v3 10g port #1 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
+	fman0_rx_0x11: port@91000 {
+		cell-index = <0x11>;
+		compatible = "fsl,fman-v3-port-rx";
+		reg = <0x91000 0x1000>;
+		fsl,fman-10g-port;
+	};
+
+	fman0_tx_0x31: port@b1000 {
+		cell-index = <0x31>;
+		compatible = "fsl,fman-v3-port-tx";
+		reg = <0xb1000 0x1000>;
+		fsl,fman-10g-port;
+	};
+
+	ethernet@f2000 {
+		cell-index = <0x9>;
+		compatible = "fsl,fman-memac";
+		reg = <0xf2000 0x1000>;
+		fsl,fman-ports = <&fman0_rx_0x11 &fman0_tx_0x31>;
+		pcsphy-handle = <&pcsphy7>;
+	};
+
+	mdio@f3000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xf3000 0x1000>;
+
+		pcsphy7: ethernet-phy@0 {
+			reg = <0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
new file mode 100644
index 0000000..d600786
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-0.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ FMan v3 1g port #0 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
+	fman0_rx_0x08: port@88000 {
+		cell-index = <0x8>;
+		compatible = "fsl,fman-v3-port-rx";
+		reg = <0x88000 0x1000>;
+	};
+
+	fman0_tx_0x28: port@a8000 {
+		cell-index = <0x28>;
+		compatible = "fsl,fman-v3-port-tx";
+		reg = <0xa8000 0x1000>;
+	};
+
+	ethernet@e0000 {
+		cell-index = <0>;
+		compatible = "fsl,fman-memac";
+		reg = <0xe0000 0x1000>;
+		fsl,fman-ports = <&fman0_rx_0x08 &fman0_tx_0x28>;
+		ptp-timer = <&ptp_timer0>;
+		pcsphy-handle = <&pcsphy0>;
+	};
+
+	mdio@e1000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xe1000 0x1000>;
+
+		pcsphy0: ethernet-phy@0 {
+			reg = <0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
new file mode 100644
index 0000000..3c0b76d
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-1.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ FMan v3 1g port #1 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
+	fman0_rx_0x09: port@89000 {
+		cell-index = <0x9>;
+		compatible = "fsl,fman-v3-port-rx";
+		reg = <0x89000 0x1000>;
+	};
+
+	fman0_tx_0x29: port@a9000 {
+		cell-index = <0x29>;
+		compatible = "fsl,fman-v3-port-tx";
+		reg = <0xa9000 0x1000>;
+	};
+
+	ethernet@e2000 {
+		cell-index = <1>;
+		compatible = "fsl,fman-memac";
+		reg = <0xe2000 0x1000>;
+		fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>;
+		ptp-timer = <&ptp_timer0>;
+		pcsphy-handle = <&pcsphy1>;
+	};
+
+	mdio@e3000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xe3000 0x1000>;
+
+		pcsphy1: ethernet-phy@0 {
+			reg = <0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
new file mode 100644
index 0000000..89633af
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-2.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ FMan v3 1g port #2 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
+	fman0_rx_0x0a: port@8a000 {
+		cell-index = <0xa>;
+		compatible = "fsl,fman-v3-port-rx";
+		reg = <0x8a000 0x1000>;
+	};
+
+	fman0_tx_0x2a: port@aa000 {
+		cell-index = <0x2a>;
+		compatible = "fsl,fman-v3-port-tx";
+		reg = <0xaa000 0x1000>;
+	};
+
+	ethernet@e4000 {
+		cell-index = <2>;
+		compatible = "fsl,fman-memac";
+		reg = <0xe4000 0x1000>;
+		fsl,fman-ports = <&fman0_rx_0x0a &fman0_tx_0x2a>;
+		ptp-timer = <&ptp_timer0>;
+		pcsphy-handle = <&pcsphy2>;
+	};
+
+	mdio@e5000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xe5000 0x1000>;
+
+		pcsphy2: ethernet-phy@0 {
+			reg = <0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
new file mode 100644
index 0000000..87c2b70
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-3.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ FMan v3 1g port #3 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
+	fman0_rx_0x0b: port@8b000 {
+		cell-index = <0xb>;
+		compatible = "fsl,fman-v3-port-rx";
+		reg = <0x8b000 0x1000>;
+	};
+
+	fman0_tx_0x2b: port@ab000 {
+		cell-index = <0x2b>;
+		compatible = "fsl,fman-v3-port-tx";
+		reg = <0xab000 0x1000>;
+	};
+
+	ethernet@e6000 {
+		cell-index = <3>;
+		compatible = "fsl,fman-memac";
+		reg = <0xe6000 0x1000>;
+		fsl,fman-ports = <&fman0_rx_0x0b &fman0_tx_0x2b>;
+		ptp-timer = <&ptp_timer0>;
+		pcsphy-handle = <&pcsphy3>;
+	};
+
+	mdio@e7000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xe7000 0x1000>;
+
+		pcsphy3: ethernet-phy@0 {
+			reg = <0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
new file mode 100644
index 0000000..8f4d74b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-4.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ FMan v3 1g port #4 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
+	fman0_rx_0x0c: port@8c000 {
+		cell-index = <0xc>;
+		compatible = "fsl,fman-v3-port-rx";
+		reg = <0x8c000 0x1000>;
+	};
+
+	fman0_tx_0x2c: port@ac000 {
+		cell-index = <0x2c>;
+		compatible = "fsl,fman-v3-port-tx";
+		reg = <0xac000 0x1000>;
+	};
+
+	ethernet@e8000 {
+		cell-index = <4>;
+		compatible = "fsl,fman-memac";
+		reg = <0xe8000 0x1000>;
+		fsl,fman-ports = <&fman0_rx_0x0c &fman0_tx_0x2c>;
+		ptp-timer = <&ptp_timer0>;
+		pcsphy-handle = <&pcsphy4>;
+	};
+
+	mdio@e9000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xe9000 0x1000>;
+
+		pcsphy4: ethernet-phy@0 {
+			reg = <0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
new file mode 100644
index 0000000..d534f77
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0-1g-5.dtsi
@@ -0,0 +1,41 @@
+/*
+ * QorIQ FMan v3 1g port #5 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman@1a00000 {
+	fman0_rx_0x0d: port@8d000 {
+		cell-index = <0xd>;
+		compatible = "fsl,fman-v3-port-rx";
+		reg = <0x8d000 0x1000>;
+	};
+
+	fman0_tx_0x2d: port@ad000 {
+		cell-index = <0x2d>;
+		compatible = "fsl,fman-v3-port-tx";
+		reg = <0xad000 0x1000>;
+	};
+
+	ethernet@ea000 {
+		cell-index = <5>;
+		compatible = "fsl,fman-memac";
+		reg = <0xea000 0x1000>;
+		fsl,fman-ports = <&fman0_rx_0x0d &fman0_tx_0x2d>;
+		ptp-timer = <&ptp_timer0>;
+		pcsphy-handle = <&pcsphy5>;
+	};
+
+	mdio@eb000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xeb000 0x1000>;
+
+		pcsphy5: ethernet-phy@0 {
+			reg = <0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
new file mode 100644
index 0000000..4dd0676
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
@@ -0,0 +1,81 @@
+/*
+ * QorIQ FMan v3 device tree
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+fman0: fman@1a00000 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	cell-index = <0>;
+	compatible = "fsl,fman";
+	ranges = <0x0 0x0 0x1a00000 0x100000>;
+	reg = <0x0 0x1a00000 0x0 0x100000>;
+	interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&clockgen 3 0>;
+	clock-names = "fmanclk";
+	fsl,qman-channel-range = <0x800 0x10>;
+
+	muram@0 {
+		compatible = "fsl,fman-muram";
+		reg = <0x0 0x60000>;
+	};
+
+	fman0_oh_0x2: port@82000 {
+		cell-index = <0x2>;
+		compatible = "fsl,fman-v3-port-oh";
+		reg = <0x82000 0x1000>;
+	};
+
+	fman0_oh_0x3: port@83000 {
+		cell-index = <0x3>;
+		compatible = "fsl,fman-v3-port-oh";
+		reg = <0x83000 0x1000>;
+	};
+
+	fman0_oh_0x4: port@84000 {
+		cell-index = <0x4>;
+		compatible = "fsl,fman-v3-port-oh";
+		reg = <0x84000 0x1000>;
+	};
+
+	fman0_oh_0x5: port@85000 {
+		cell-index = <0x5>;
+		compatible = "fsl,fman-v3-port-oh";
+		reg = <0x85000 0x1000>;
+	};
+
+	fman0_oh_0x6: port@86000 {
+		cell-index = <0x6>;
+		compatible = "fsl,fman-v3-port-oh";
+		reg = <0x86000 0x1000>;
+	};
+
+	fman0_oh_0x7: port@87000 {
+		cell-index = <0x7>;
+		compatible = "fsl,fman-v3-port-oh";
+		reg = <0x87000 0x1000>;
+	};
+
+	mdio0: mdio@fc000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xfc000 0x1000>;
+	};
+
+	xmdio0: mdio@fd000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
+		reg = <0xfd000 0x1000>;
+	};
+
+	ptp_timer0: ptp-timer@fe000 {
+		compatible = "fsl,fman-ptp-timer";
+		reg = <0xfe000 0x1000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi b/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
new file mode 100644
index 0000000..2a9aa06
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/qoriq-qman-portals.dtsi
@@ -0,0 +1,80 @@
+/*
+ * QorIQ QMan Portals device tree
+ *
+ * Copyright 2011-2016 Freescale Semiconductor Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+ */
+
+&qportals {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "simple-bus";
+
+	qportal0: qman-portal@0 {
+		/*
+		 * bootloader fix-ups are expected to provide the
+		 * "fsl,bman-portal-<hardware revision>" compatible
+		 */
+		compatible = "fsl,qman-portal";
+		reg = <0x0 0x4000>, <0x4000000 0x4000>;
+		interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
+		cell-index = <0>;
+	};
+
+	qportal1: qman-portal@10000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x10000 0x4000>, <0x4010000 0x4000>;
+		interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+		cell-index = <1>;
+	};
+
+	qportal2: qman-portal@20000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x20000 0x4000>, <0x4020000 0x4000>;
+		interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+		cell-index = <2>;
+	};
+
+	qportal3: qman-portal@30000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x30000 0x4000>, <0x4030000 0x4000>;
+		interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+		cell-index = <3>;
+	};
+
+	qportal4: qman-portal@40000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x40000 0x4000>, <0x4040000 0x4000>;
+		interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+		cell-index = <4>;
+	};
+
+	qportal5: qman-portal@50000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x50000 0x4000>, <0x4050000 0x4000>;
+		interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
+		cell-index = <5>;
+	};
+
+	qportal6: qman-portal@60000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x60000 0x4000>, <0x4060000 0x4000>;
+		interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+		cell-index = <6>;
+	};
+
+	qportal7: qman-portal@70000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x70000 0x4000>, <0x4070000 0x4000>;
+		interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+		cell-index = <7>;
+	};
+
+	qportal8: qman-portal@80000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x80000 0x4000>, <0x4080000 0x4000>;
+		interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+		cell-index = <8>;
+	};
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
index 186251f..6609b0f 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
@@ -9,17 +9,28 @@
 
 #include "hi3660.dtsi"
 #include "hikey960-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
 	model = "HiKey960";
-	compatible = "hisilicon,hi3660";
+	compatible = "hisilicon,hi3660-hikey960", "hisilicon,hi3660";
 
 	aliases {
-		serial5 = &uart5;       /* console UART */
+		mshc1 = &dwmmc1;
+		mshc2 = &dwmmc2;
+		serial0 = &uart0;
+		serial1 = &uart1;
+		serial2 = &uart2;
+		serial3 = &uart3;
+		serial4 = &uart4;
+		serial5 = &uart5;
+		serial6 = &uart6;
 	};
 
 	chosen {
-		stdout-path = "serial5:115200n8";
+		stdout-path = "serial6:115200n8";
 	};
 
 	memory@0 {
@@ -27,8 +38,206 @@
 		/* rewrite this at bootloader */
 		reg = <0x0 0x0 0x0 0x0>;
 	};
+
+	keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwr_key_pmx_func &pwr_key_cfg_func>;
+
+		power {
+			wakeup-source;
+			gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
+			label = "GPIO Power";
+			linux,code = <KEY_POWER>;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		user_led1 {
+			label = "user_led1";
+			/* gpio_150_user_led1 */
+			gpios = <&gpio18 6 0>;
+			linux,default-trigger = "heartbeat";
+		};
+
+		user_led2 {
+			label = "user_led2";
+			/* gpio_151_user_led2 */
+			gpios = <&gpio18 7 0>;
+			linux,default-trigger = "mmc0";
+		};
+
+		user_led3 {
+			label = "user_led3";
+			/* gpio_189_user_led3 */
+			gpios = <&gpio23 5 0>;
+			default-state = "off";
+		};
+
+		user_led4 {
+			label = "user_led4";
+			/* gpio_190_user_led4 */
+			gpios = <&gpio23 6 0>;
+			linux,default-trigger = "cpu0";
+		};
+
+		wlan_active_led {
+			label = "wifi_active";
+			/* gpio_205_wifi_active */
+			gpios = <&gpio25 5 0>;
+			linux,default-trigger = "phy0tx";
+			default-state = "off";
+		};
+
+		bt_active_led {
+			label = "bt_active";
+			gpios = <&gpio25 7 0>;
+			/* gpio_207_user_led1 */
+			linux,default-trigger = "hci0-power";
+			default-state = "off";
+		};
+	};
+
+	pmic: pmic@fff34000 {
+		compatible = "hisilicon,hi6421v530-pmic";
+		reg = <0x0 0xfff34000 0x0 0x1000>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		regulators {
+			ldo3: LDO3 { /* HDMI */
+				regulator-name = "VOUT3_1V85";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2200000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			ldo9: LDO9 { /* SDCARD I/O */
+				regulator-name = "VOUT9_1V8_2V95";
+				regulator-min-microvolt = <1750000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <240>;
+			};
+
+			ldo11: LDO11 { /* Low Speed Connector */
+				regulator-name = "VOUT11_1V8_2V95";
+				regulator-min-microvolt = <1750000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <240>;
+			};
+
+			ldo15: LDO15 { /* UFS VCC */
+				regulator-name = "VOUT15_3V0";
+				regulator-min-microvolt = <1750000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			ldo16: LDO16 { /* SD VDD */
+				regulator-name = "VOUT16_2V95";
+				regulator-min-microvolt = <1750000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <360>;
+			};
+		};
+	};
+
+	wlan_en: wlan-en-1-8v {
+		compatible = "regulator-fixed";
+		regulator-name = "wlan-en-regulator";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+
+		/* GPIO_051_WIFI_EN */
+		gpio = <&gpio6 3 0>;
+
+		/* WLAN card specific delay */
+		startup-delay-us = <70000>;
+		enable-active-high;
+	};
 };
 
-&uart5 {
+&i2c0 {
+	/* On Low speed expansion */
+	label = "LS-I2C0";
 	status = "okay";
 };
+
+&i2c1 {
+	status = "okay";
+
+	adv7533: adv7533@39 {
+		status = "ok";
+		compatible = "adi,adv7533";
+		reg = <0x39>;
+	};
+};
+
+&i2c7 {
+	/* On Low speed expansion */
+	label = "LS-I2C1";
+	status = "okay";
+};
+
+&uart3 {
+	/* On Low speed expansion */
+	label = "LS-UART0";
+	status = "okay";
+};
+
+&uart4 {
+	status = "okay";
+
+	bluetooth {
+		compatible = "ti,wl1837-st";
+		enable-gpios = <&gpio15 6 GPIO_ACTIVE_HIGH>;
+		max-speed = <921600>;
+	};
+};
+
+&uart6 {
+	/* On Low speed expansion */
+	label = "LS-UART1";
+	status = "okay";
+};
+
+&spi2 {
+	/* On Low speed expansion */
+	label = "LS-SPI0";
+	status = "okay";
+};
+
+&spi3 {
+	/* On High speed expansion */
+	label = "HS-SPI1";
+	status = "okay";
+};
+
+&dwmmc1 {
+	vmmc-supply = <&ldo16>;
+	vqmmc-supply = <&ldo9>;
+	status = "okay";
+};
+
+&dwmmc2 { /* WIFI */
+	broken-cd;
+	/* WL_EN */
+	vmmc-supply = <&wlan_en>;
+	ti,non-removable;
+	non-removable;
+	#address-cells = <0x1>;
+	#size-cells = <0x0>;
+	status = "ok";
+
+	wlcore: wlcore@2 {
+		compatible = "ti,wl1837";
+		reg = <2>;      /* sdio func num */
+		/* WL_IRQ, GPIO_179_WL_WAKEUP_AP */
+		interrupt-parent = <&gpio22>;
+		interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+	};
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index 3983086..c6a1961 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -5,6 +5,7 @@
  */
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/hi3660-clock.h>
 
 / {
 	compatible = "hisilicon,hi3660";
@@ -141,19 +142,710 @@
 		#size-cells = <2>;
 		ranges;
 
-		fixed_uart5: fixed_19_2M {
-			compatible = "fixed-clock";
-			#clock-cells = <0>;
-			clock-frequency = <19200000>;
-			clock-output-names = "fixed:uart5";
+		crg_ctrl: crg_ctrl@fff35000 {
+			compatible = "hisilicon,hi3660-crgctrl", "syscon";
+			reg = <0x0 0xfff35000 0x0 0x1000>;
+			#clock-cells = <1>;
 		};
 
-		uart5: uart@fdf05000 {
+		crg_rst: crg_rst_controller {
+			compatible = "hisilicon,hi3660-reset";
+			#reset-cells = <2>;
+			hisi,rst-syscon = <&crg_ctrl>;
+		};
+
+
+		pctrl: pctrl@e8a09000 {
+			compatible = "hisilicon,hi3660-pctrl", "syscon";
+			reg = <0x0 0xe8a09000 0x0 0x2000>;
+			#clock-cells = <1>;
+		};
+
+		pmuctrl: crg_ctrl@fff34000 {
+			compatible = "hisilicon,hi3660-pmuctrl", "syscon";
+			reg = <0x0 0xfff34000 0x0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		sctrl: sctrl@fff0a000 {
+			compatible = "hisilicon,hi3660-sctrl", "syscon";
+			reg = <0x0 0xfff0a000 0x0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		iomcu: iomcu@ffd7e000 {
+			compatible = "hisilicon,hi3660-iomcu", "syscon";
+			reg = <0x0 0xffd7e000 0x0 0x1000>;
+			#clock-cells = <1>;
+
+		};
+
+		iomcu_rst: reset {
+			compatible = "hisilicon,hi3660-reset";
+			hisi,rst-syscon = <&iomcu>;
+			#reset-cells = <2>;
+		};
+
+		dual_timer0: timer@fff14000 {
+			compatible = "arm,sp804", "arm,primecell";
+			reg = <0x0 0xfff14000 0x0 0x1000>;
+			interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_OSC32K>,
+				 <&crg_ctrl HI3660_OSC32K>,
+				 <&crg_ctrl HI3660_OSC32K>;
+			clock-names = "timer1", "timer2", "apb_pclk";
+		};
+
+		i2c0: i2c@ffd71000 {
+			compatible = "snps,designware-i2c";
+			reg = <0x0 0xffd71000 0x0 0x1000>;
+			interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			clock-frequency = <400000>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_I2C0>;
+			resets = <&iomcu_rst 0x20 3>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c0_pmx_func &i2c0_cfg_func>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@ffd72000 {
+			compatible = "snps,designware-i2c";
+			reg = <0x0 0xffd72000 0x0 0x1000>;
+			interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			clock-frequency = <400000>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_I2C1>;
+			resets = <&iomcu_rst 0x20 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c1_pmx_func &i2c1_cfg_func>;
+			status = "disabled";
+		};
+
+		i2c3: i2c@fdf0c000 {
+			compatible = "snps,designware-i2c";
+			reg = <0x0 0xfdf0c000 0x0 0x1000>;
+			interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			clock-frequency = <400000>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_I2C3>;
+			resets = <&crg_rst 0x78 7>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c3_pmx_func &i2c3_cfg_func>;
+			status = "disabled";
+		};
+
+		i2c7: i2c@fdf0b000 {
+			compatible = "snps,designware-i2c";
+			reg = <0x0 0xfdf0b000 0x0 0x1000>;
+			interrupts = <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			clock-frequency = <400000>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_I2C7>;
+			resets = <&crg_rst 0x60 14>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c7_pmx_func &i2c7_cfg_func>;
+			status = "disabled";
+		};
+
+		uart0: serial@fdf02000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x0 0xfdf02000 0x0 0x1000>;
+			interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_CLK_MUX_UART0>,
+				 <&crg_ctrl HI3660_PCLK>;
+			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart0_pmx_func &uart0_cfg_func>;
+			status = "disabled";
+		};
+
+		uart1: serial@fdf00000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x0 0xfdf00000 0x0 0x1000>;
+			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_UART1>,
+				 <&crg_ctrl HI3660_CLK_GATE_UART1>;
+			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart1_pmx_func &uart1_cfg_func>;
+			status = "disabled";
+		};
+
+		uart2: serial@fdf03000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x0 0xfdf03000 0x0 0x1000>;
+			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_UART2>,
+				 <&crg_ctrl HI3660_PCLK>;
+			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart2_pmx_func &uart2_cfg_func>;
+			status = "disabled";
+		};
+
+		uart3: serial@ffd74000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x0 0xffd74000 0x0 0x1000>;
+			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_FACTOR_UART3>,
+				 <&crg_ctrl HI3660_PCLK>;
+			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart3_pmx_func &uart3_cfg_func>;
+			status = "disabled";
+		};
+
+		uart4: serial@fdf01000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x0 0xfdf01000 0x0 0x1000>;
+			interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_UART4>,
+				 <&crg_ctrl HI3660_CLK_GATE_UART4>;
+			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart4_pmx_func &uart4_cfg_func>;
+			status = "disabled";
+		};
+
+		uart5: serial@fdf05000 {
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0x0 0xfdf05000 0x0 0x1000>;
 			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&fixed_uart5 &fixed_uart5>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_UART5>,
+				 <&crg_ctrl HI3660_CLK_GATE_UART5>;
 			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart5_pmx_func &uart5_cfg_func>;
+			status = "disabled";
+		};
+
+		uart6: serial@fff32000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x0 0xfff32000 0x0 0x1000>;
+			interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_CLK_UART6>,
+				 <&crg_ctrl HI3660_PCLK>;
+			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart6_pmx_func &uart6_cfg_func>;
+			status = "disabled";
+		};
+
+		rtc0: rtc@fff04000 {
+			compatible = "arm,pl031", "arm,primecell";
+			reg = <0x0 0Xfff04000 0x0 0x1000>;
+			interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_PCLK>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio0: gpio@e8a0b000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a0b000 0 0x1000>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 1 0 7>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO0>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio1: gpio@e8a0c000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a0c000 0 0x1000>;
+			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 1 7 7>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO1>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio2: gpio@e8a0d000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a0d000 0 0x1000>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 14 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio3: gpio@e8a0e000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a0e000 0 0x1000>;
+			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 22 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO3>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio4: gpio@e8a0f000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a0f000 0 0x1000>;
+			interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 30 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO4>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio5: gpio@e8a10000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a10000 0 0x1000>;
+			interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 38 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO5>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio6: gpio@e8a11000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a11000 0 0x1000>;
+			interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 46 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO6>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio7: gpio@e8a12000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a12000 0 0x1000>;
+			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 54 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO7>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio8: gpio@e8a13000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a13000 0 0x1000>;
+			interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 62 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO8>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio9: gpio@e8a14000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a14000 0 0x1000>;
+			interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 70 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO9>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio10: gpio@e8a15000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a15000 0 0x1000>;
+			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 78 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO10>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio11: gpio@e8a16000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a16000 0 0x1000>;
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 86 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO11>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio12: gpio@e8a17000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a17000 0 0x1000>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 94 3 &pmx0 7 101 1>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO12>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio13: gpio@e8a18000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a18000 0 0x1000>;
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 102 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO13>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio14: gpio@e8a19000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a19000 0 0x1000>;
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 110 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO14>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio15: gpio@e8a1a000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a1a000 0 0x1000>;
+			interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 118 6>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO15>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio16: gpio@e8a1b000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a1b000 0 0x1000>;
+			interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO16>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio17: gpio@e8a1c000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a1c000 0 0x1000>;
+			interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO17>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio18: gpio@ff3b4000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xff3b4000 0 0x1000>;
+			interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx2 0 0 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO18>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio19: gpio@ff3b5000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xff3b5000 0 0x1000>;
+			interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx2 0 8 4>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO19>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio20: gpio@e8a1f000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a1f000 0 0x1000>;
+			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx1 0 0 6>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO20>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio21: gpio@e8a20000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xe8a20000 0 0x1000>;
+			interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			gpio-ranges = <&pmx3 0 0 6>;
+			clocks = <&crg_ctrl HI3660_PCLK_GPIO21>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio22: gpio@fff0b000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xfff0b000 0 0x1000>;
+			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			/* GPIO176 */
+			gpio-ranges = <&pmx4 2 0 6>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&sctrl HI3660_PCLK_AO_GPIO0>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio23: gpio@fff0c000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xfff0c000 0 0x1000>;
+			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			/* GPIO184 */
+			gpio-ranges = <&pmx4 0 6 7>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&sctrl HI3660_PCLK_AO_GPIO1>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio24: gpio@fff0d000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xfff0d000 0 0x1000>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			/* GPIO192 */
+			gpio-ranges = <&pmx4 0 13 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&sctrl HI3660_PCLK_AO_GPIO2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio25: gpio@fff0e000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xfff0e000 0 0x1000>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			/* GPIO200 */
+			gpio-ranges = <&pmx4 0 21 4 &pmx4 5 25 3>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&sctrl HI3660_PCLK_AO_GPIO3>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio26: gpio@fff0f000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xfff0f000 0 0x1000>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			/* GPIO208 */
+			gpio-ranges = <&pmx4 0 28 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&sctrl HI3660_PCLK_AO_GPIO4>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio27: gpio@fff10000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xfff10000 0 0x1000>;
+			interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			/* GPIO216 */
+			gpio-ranges = <&pmx4 0 36 6>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&sctrl HI3660_PCLK_AO_GPIO5>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio28: gpio@fff1d000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0 0xfff1d000 0 0x1000>;
+			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&sctrl HI3660_PCLK_AO_GPIO6>;
+			clock-names = "apb_pclk";
+		};
+
+		spi2: spi@ffd68000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x0 0xffd68000 0x0 0x1000>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_SPI2>;
+			clock-names = "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi2_pmx_func>;
+			num-cs = <1>;
+			cs-gpios = <&gpio27 2 0>;
+			status = "disabled";
+		};
+
+		spi3: spi@ff3b3000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x0 0xff3b3000 0x0 0x1000>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_SPI3>;
+			clock-names = "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi3_pmx_func>;
+			num-cs = <1>;
+			cs-gpios = <&gpio18 5 0>;
+			status = "disabled";
+		};
+
+		pcie@f4000000 {
+			compatible = "hisilicon,kirin960-pcie";
+			reg = <0x0 0xf4000000 0x0 0x1000>,
+			      <0x0 0xff3fe000 0x0 0x1000>,
+			      <0x0 0xf3f20000 0x0 0x40000>,
+			      <0x0 0xf5000000 0x0 0x2000>;
+			reg-names = "dbi", "apb", "phy", "config";
+			bus-range = <0x0  0x1>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			device_type = "pci";
+			ranges = <0x02000000 0x0 0x00000000
+				  0x0 0xf6000000
+				  0x0 0x02000000>;
+			num-lanes = <1>;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0xf800 0 0 7>;
+			interrupt-map = <0x0 0 0 1
+					 &gic GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0 0 0 2
+					 &gic GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0 0 0 3
+					 &gic GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
+					<0x0 0 0 4
+					 &gic GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_PCIEPHY_REF>,
+				 <&crg_ctrl HI3660_CLK_GATE_PCIEAUX>,
+				 <&crg_ctrl HI3660_PCLK_GATE_PCIE_PHY>,
+				 <&crg_ctrl HI3660_PCLK_GATE_PCIE_SYS>,
+				 <&crg_ctrl HI3660_ACLK_GATE_PCIE>;
+			clock-names = "pcie_phy_ref", "pcie_aux",
+				      "pcie_apb_phy", "pcie_apb_sys",
+				      "pcie_aclk";
+			reset-gpios = <&gpio11 1 0 >;
+		};
+
+		/* SD */
+		dwmmc1: dwmmc1@ff37f000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			cd-inverted;
+			compatible = "hisilicon,hi3660-dw-mshc";
+			num-slots = <1>;
+			bus-width = <0x4>;
+			disable-wp;
+			cap-sd-highspeed;
+			supports-highspeed;
+			card-detect-delay = <200>;
+			reg = <0x0 0xff37f000 0x0 0x1000>;
+			interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_SD>,
+				<&crg_ctrl HI3660_HCLK_GATE_SD>;
+			clock-names = "ciu", "biu";
+			clock-frequency = <3200000>;
+			resets = <&crg_rst 0x94 18>;
+			cd-gpios = <&gpio25 3 0>;
+			hisilicon,peripheral-syscon = <&sctrl>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&sd_pmx_func
+				     &sd_clk_cfg_func
+				     &sd_cfg_func>;
+			sd-uhs-sdr12;
+			sd-uhs-sdr25;
+			sd-uhs-sdr50;
+			sd-uhs-sdr104;
+			status = "disabled";
+
+			slot@0 {
+				reg = <0x0>;
+				bus-width = <4>;
+				disable-wp;
+			};
+		};
+
+		/* SDIO */
+		dwmmc2: dwmmc2@ff3ff000 {
+			compatible = "hisilicon,hi3660-dw-mshc";
+			reg = <0x0 0xff3ff000 0x0 0x1000>;
+			interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+			num-slots = <1>;
+			clocks = <&crg_ctrl HI3660_CLK_GATE_SDIO0>,
+				 <&crg_ctrl HI3660_HCLK_GATE_SDIO0>;
+			clock-names = "ciu", "biu";
+			resets = <&crg_rst 0x94 20>;
+			card-detect-delay = <200>;
+			supports-highspeed;
+			keep-power-in-suspend;
+			pinctrl-names = "default";
+			pinctrl-0 = <&sdio_pmx_func
+				     &sdio_clk_cfg_func
+				     &sdio_cfg_func>;
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 49f6a62..afae4de 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -466,6 +466,11 @@
 			method = "smc";
 		};
 	};
+
+	sound_card {
+		compatible = "audio-graph-card";
+		dais = <&i2s0_port0>;
+	};
 };
 
 &uart2 {
@@ -506,10 +511,33 @@
 		interrupts = <1 2>;
 		pd-gpio = <&gpio0 4 0>;
 		adi,dsi-lanes = <4>;
+		#sound-dai-cells = <0>;
 
-		port {
-			adv7533_in: endpoint {
-				remote-endpoint = <&dsi_out0>;
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				adv7533_in: endpoint {
+					remote-endpoint = <&dsi_out0>;
+				};
+			};
+			port@2 {
+				reg = <2>;
+				codec_endpoint: endpoint {
+					remote-endpoint = <&i2s0_cpu_endpoint>;
+				};
+			};
+		};
+	};
+};
+
+&i2s0 {
+
+	ports {
+		i2s0_port0: port@0 {
+			i2s0_cpu_endpoint: endpoint {
+				remote-endpoint = <&codec_endpoint>;
+				dai-format = "i2s";
 			};
 		};
 	};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 5013e4b..eacbe0d 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -332,6 +332,19 @@
 			status = "disabled";
 		};
 
+		dma0: dma@f7370000 {
+			compatible = "hisilicon,k3-dma-1.0";
+			reg = <0x0 0xf7370000 0x0 0x1000>;
+			#dma-cells = <1>;
+			dma-channels = <15>;
+			dma-requests = <32>;
+			interrupts = <0 84 4>;
+			clocks = <&sys_ctrl HI6220_EDMAC_ACLK>;
+			dma-no-cci;
+			dma-type = "hi6220_dma";
+			status = "ok";
+		};
+
 		dual_timer0: timer@f8008000 {
 			compatible = "arm,sp804", "arm,primecell";
 			reg = <0x0 0xf8008000 0x0 0x1000>;
@@ -805,6 +818,19 @@
 			#thermal-sensor-cells = <1>;
 		};
 
+		i2s0: i2s@f7118000{
+			compatible = "hisilicon,hi6210-i2s";
+			reg = <0x0 0xf7118000 0x0 0x8000>; /* i2s unit */
+			interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; /* 155 "DigACodec_intr"-32 */
+			clocks = <&sys_ctrl HI6220_DACODEC_PCLK>,
+				 <&sys_ctrl HI6220_BBPPLL0_DIV>;
+			clock-names = "dacodec", "i2s-base";
+			dmas = <&dma0 15 &dma0 14>;
+			dma-names = "rx", "tx";
+			hisilicon,sysctrl-syscon = <&sys_ctrl>;
+			#sound-dai-cells = <1>;
+		};
+
 		thermal-zones {
 
 			cls0: cls0 {
@@ -887,5 +913,69 @@
 				};
 			};
 		};
+
+		debug@f6590000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0 0xf6590000 0 0x1000>;
+			clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&cpu0>;
+		};
+
+		debug@f6592000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0 0xf6592000 0 0x1000>;
+			clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&cpu1>;
+		};
+
+		debug@f6594000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0 0xf6594000 0 0x1000>;
+			clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&cpu2>;
+		};
+
+		debug@f6596000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0 0xf6596000 0 0x1000>;
+			clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&cpu3>;
+		};
+
+		debug@f65d0000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0 0xf65d0000 0 0x1000>;
+			clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&cpu4>;
+		};
+
+		debug@f65d2000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0 0xf65d2000 0 0x1000>;
+			clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&cpu5>;
+		};
+
+		debug@f65d4000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0 0xf65d4000 0 0x1000>;
+			clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&cpu6>;
+		};
+
+		debug@f65d6000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0 0xf65d6000 0 0x1000>;
+			clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&cpu7>;
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
index 719c4bc..7e542d2 100644
--- a/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
@@ -24,6 +24,27 @@
 				&range 0 7 0
 				&range 8 116 0>;
 
+			pmu_pmx_func: pmu_pmx_func {
+				pinctrl-single,pins = <
+					0x008 MUX_M1 /* PMU1_SSI */
+					0x00c MUX_M1 /* PMU2_SSI */
+					0x010 MUX_M1 /* PMU_CLKOUT */
+					0x100 MUX_M1 /* PMU_HKADC_SSI */
+				>;
+			};
+
+			csi0_pwd_n_pmx_func: csi0_pwd_n_pmx_func {
+				pinctrl-single,pins = <
+					0x044 MUX_M0 /* CSI0_PWD_N */
+				>;
+			};
+
+			csi1_pwd_n_pmx_func: csi1_pwd_n_pmx_func {
+				pinctrl-single,pins = <
+					0x04c MUX_M0 /* CSI1_PWD_N */
+				>;
+			};
+
 			isp0_pmx_func: isp0_pmx_func {
 				pinctrl-single,pins = <
 					0x058 MUX_M1 /* ISP_CLK0 */
@@ -40,6 +61,12 @@
 				>;
 			};
 
+			pwr_key_pmx_func: pwr_key_pmx_func {
+				pinctrl-single,pins = <
+					0x080 MUX_M0 /* GPIO_034 */
+				>;
+			};
+
 			i2c3_pmx_func: i2c3_pmx_func {
 				pinctrl-single,pins = <
 					0x02c MUX_M1 /* I2C3_SCL */
@@ -67,21 +94,10 @@
 				>;
 			};
 
-			spi1_pmx_func: spi1_pmx_func {
-				pinctrl-single,pins = <
-					0x034 MUX_M1 /* SPI1_CLK */
-					0x038 MUX_M1 /* SPI1_DI */
-					0x03c MUX_M1 /* SPI1_DO */
-					0x040 MUX_M1 /* SPI1_CS_N */
-				>;
-			};
-
 			uart0_pmx_func: uart0_pmx_func {
 				pinctrl-single,pins = <
 					0x0cc MUX_M2 /* UART0_RXD */
 					0x0d0 MUX_M2 /* UART0_TXD */
-					0x0d4 MUX_M2 /* UART0_RXD_M */
-					0x0d8 MUX_M2 /* UART0_TXD_M */
 				>;
 			};
 
@@ -138,6 +154,18 @@
 					0x0d8 MUX_M1 /* UART6_TXD */
 				>;
 			};
+
+			cam0_rst_pmx_func: cam0_rst_pmx_func {
+				pinctrl-single,pins = <
+					0x0c8 MUX_M0 /* CAM0_RST */
+				>;
+			};
+
+			cam1_rst_pmx_func: cam1_rst_pmx_func {
+				pinctrl-single,pins = <
+					0x124 MUX_M0 /* CAM1_RST */
+				>;
+			};
 		};
 
 		/* [IOMG_MMC0_000, IOMG_MMC0_005] */
@@ -174,6 +202,13 @@
 			/* pin base, nr pins & gpio function */
 			pinctrl-single,gpio-range = <&range 0 12 0>;
 
+			ufs_pmx_func: ufs_pmx_func {
+				pinctrl-single,pins = <
+					0x000 MUX_M1 /* UFS_REF_CLK */
+					0x004 MUX_M1 /* UFS_RST_N */
+				>;
+			};
+
 			spi3_pmx_func: spi3_pmx_func {
 				pinctrl-single,pins = <
 					0x008 MUX_M1 /* SPI3_CLK */
@@ -248,13 +283,6 @@
 				>;
 			};
 
-			i2c2_pmx_func: i2c2_pmx_func {
-				pinctrl-single,pins = <
-					0x024 MUX_M1 /* I2C2_SCL */
-					0x028 MUX_M1 /* I2C2_SDA */
-				>;
-			};
-
 			i2c7_pmx_func: i2c7_pmx_func {
 				pinctrl-single,pins = <
 					0x024 MUX_M3 /* I2C7_SCL */
@@ -262,6 +290,13 @@
 				>;
 			};
 
+			pcie_pmx_func: pcie_pmx_func {
+				pinctrl-single,pins = <
+					0x084 MUX_M1 /* PCIE_CLKREQ_N */
+					0x088 MUX_M1 /* PCIE_WAKE_N */
+				>;
+			};
+
 			spi2_pmx_func: spi2_pmx_func {
 				pinctrl-single,pins = <
 					0x08c MUX_M1 /* SPI2_CLK */
@@ -271,15 +306,6 @@
 				>;
 			};
 
-			spi4_pmx_func: spi4_pmx_func {
-				pinctrl-single,pins = <
-					0x08c MUX_M4 /* SPI4_CLK */
-					0x090 MUX_M4 /* SPI4_DI */
-					0x094 MUX_M4 /* SPI4_DO */
-					0x098 MUX_M4 /* SPI4_CS0_N */
-				>;
-			};
-
 			i2s0_pmx_func: i2s0_pmx_func {
 				pinctrl-single,pins = <
 					0x034 MUX_M1 /* I2S0_DI */
@@ -290,13 +316,433 @@
 			};
 		};
 
-		pmx5: pinmux@ff3fd800 {
+		pmx5: pinmux@e896c800 {
+			compatible = "pinconf-single";
+			reg = <0x0 0xe896c800 0x0 0x200>;
+			#pinctrl-cells = <1>;
+			pinctrl-single,register-width = <0x20>;
+
+			pmu_cfg_func: pmu_cfg_func {
+				pinctrl-single,pins = <
+					0x010 0x0 /* PMU1_SSI */
+					0x014 0x0 /* PMU2_SSI */
+					0x018 0x0 /* PMU_CLKOUT */
+					0x10c 0x0 /* PMU_HKADC_SSI */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_06MA DRIVE6_MASK
+				>;
+			};
+
+			i2c3_cfg_func: i2c3_cfg_func {
+				pinctrl-single,pins = <
+					0x038 0x0 /* I2C3_SCL */
+					0x03c 0x0 /* I2C3_SDA */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			csi0_pwd_n_cfg_func: csi0_pwd_n_cfg_func {
+				pinctrl-single,pins = <
+					0x050 0x0 /* CSI0_PWD_N */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_04MA DRIVE6_MASK
+				>;
+			};
+
+			csi1_pwd_n_cfg_func: csi1_pwd_n_cfg_func {
+				pinctrl-single,pins = <
+					0x058 0x0 /* CSI1_PWD_N */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_04MA DRIVE6_MASK
+				>;
+			};
+
+			isp0_cfg_func: isp0_cfg_func {
+				pinctrl-single,pins = <
+					0x064 0x0 /* ISP_CLK0 */
+					0x070 0x0 /* ISP_SCL0 */
+					0x074 0x0 /* ISP_SDA0 */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_04MA DRIVE6_MASK>;
+			};
+
+			isp1_cfg_func: isp1_cfg_func {
+				pinctrl-single,pins = <
+					0x068 0x0 /* ISP_CLK1 */
+					0x078 0x0 /* ISP_SCL1 */
+					0x07c 0x0 /* ISP_SDA1 */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_04MA DRIVE6_MASK
+				>;
+			};
+
+			pwr_key_cfg_func: pwr_key_cfg_func {
+				pinctrl-single,pins = <
+					0x08c 0x0 /* GPIO_034 */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			uart1_cfg_func: uart1_cfg_func {
+				pinctrl-single,pins = <
+					0x0b4 0x0 /* UART1_RXD */
+					0x0b8 0x0 /* UART1_TXD */
+					0x0bc 0x0 /* UART1_CTS_N */
+					0x0c0 0x0 /* UART1_RTS_N */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			uart2_cfg_func: uart2_cfg_func {
+				pinctrl-single,pins = <
+					0x0c8 0x0 /* UART2_CTS_N */
+					0x0cc 0x0 /* UART2_RTS_N */
+					0x0d0 0x0 /* UART2_TXD */
+					0x0d4 0x0 /* UART2_RXD */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			uart5_cfg_func: uart5_cfg_func {
+				pinctrl-single,pins = <
+					0x0c8 0x0 /* UART5_RXD */
+					0x0cc 0x0 /* UART5_TXD */
+					0x0d0 0x0 /* UART5_CTS_N */
+					0x0d4 0x0 /* UART5_RTS_N */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			cam0_rst_cfg_func: cam0_rst_cfg_func {
+				pinctrl-single,pins = <
+					0x0d4 0x0 /* CAM0_RST */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_04MA DRIVE6_MASK
+				>;
+			};
+
+			uart0_cfg_func: uart0_cfg_func {
+				pinctrl-single,pins = <
+					0x0d8 0x0 /* UART0_RXD */
+					0x0dc 0x0 /* UART0_TXD */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			uart6_cfg_func: uart6_cfg_func {
+				pinctrl-single,pins = <
+					0x0d8 0x0 /* UART6_CTS_N */
+					0x0dc 0x0 /* UART6_RTS_N */
+					0x0e0 0x0 /* UART6_RXD */
+					0x0e4 0x0 /* UART6_TXD */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			uart3_cfg_func: uart3_cfg_func {
+				pinctrl-single,pins = <
+					0x0e8 0x0 /* UART3_CTS_N */
+					0x0ec 0x0 /* UART3_RTS_N */
+					0x0f0 0x0 /* UART3_RXD */
+					0x0f4 0x0 /* UART3_TXD */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			uart4_cfg_func: uart4_cfg_func {
+				pinctrl-single,pins = <
+					0x0f8 0x0 /* UART4_CTS_N */
+					0x0fc 0x0 /* UART4_RTS_N */
+					0x100 0x0 /* UART4_RXD */
+					0x104 0x0 /* UART4_TXD */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			cam1_rst_cfg_func: cam1_rst_cfg_func {
+				pinctrl-single,pins = <
+					0x130 0x0 /* CAM1_RST */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_04MA DRIVE6_MASK
+				>;
+			};
+		};
+
+		pmx6: pinmux@ff3b6800 {
+			compatible = "pinconf-single";
+			reg = <0x0 0xff3b6800 0x0 0x18>;
+			#pinctrl-cells = <1>;
+			pinctrl-single,register-width = <0x20>;
+
+			ufs_cfg_func: ufs_cfg_func {
+				pinctrl-single,pins = <
+					0x000 0x0 /* UFS_REF_CLK */
+					0x004 0x0 /* UFS_RST_N */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_08MA DRIVE6_MASK
+				>;
+			};
+
+			spi3_cfg_func: spi3_cfg_func {
+				pinctrl-single,pins = <
+					0x008 0x0 /* SPI3_CLK */
+					0x0 /* SPI3_DI */
+					0x010 0x0 /* SPI3_DO */
+					0x014 0x0 /* SPI3_CS0_N */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_DIS
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+		};
+
+		pmx7: pinmux@ff3fd800 {
 			compatible = "pinconf-single";
 			reg = <0x0 0xff3fd800 0x0 0x18>;
 			#pinctrl-cells = <1>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			pinctrl-single,register-width = <32>;
+			pinctrl-single,register-width = <0x20>;
 
 			sdio_clk_cfg_func: sdio_clk_cfg_func {
 				pinctrl-single,pins = <
@@ -315,8 +761,7 @@
 					PULL_UP
 				>;
 				pinctrl-single,drive-strength = <
-					DRIVE6_32MA
-					DRIVE6_MASK
+					DRIVE6_32MA DRIVE6_MASK
 				>;
 			};
 
@@ -341,19 +786,16 @@
 					PULL_UP
 				>;
 				pinctrl-single,drive-strength = <
-					DRIVE6_19MA
-					DRIVE6_MASK
+					DRIVE6_19MA DRIVE6_MASK
 				>;
 			};
 		};
 
-		pmx6: pinmux@ff37e800 {
+		pmx8: pinmux@ff37e800 {
 			compatible = "pinconf-single";
 			reg = <0x0 0xff37e800 0x0 0x18>;
 			#pinctrl-cells = <1>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			pinctrl-single,register-width = <32>;
+			pinctrl-single,register-width = <0x20>;
 
 			sd_clk_cfg_func: sd_clk_cfg_func {
 				pinctrl-single,pins = <
@@ -403,5 +845,215 @@
 				>;
 			};
 		};
+
+		pmx9: pinmux@fff11800 {
+			compatible = "pinconf-single";
+			reg = <0x0 0xfff11800 0x0 0xbc>;
+			#pinctrl-cells = <1>;
+			pinctrl-single,register-width = <0x20>;
+
+			i2c0_cfg_func: i2c0_cfg_func {
+				pinctrl-single,pins = <
+					0x01c 0x0 /* I2C0_SCL */
+					0x020 0x0 /* I2C0_SDA */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_UP
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			i2c1_cfg_func: i2c1_cfg_func {
+				pinctrl-single,pins = <
+					0x024 0x0 /* I2C1_SCL */
+					0x028 0x0 /* I2C1_SDA */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_UP
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			i2c7_cfg_func: i2c7_cfg_func {
+				pinctrl-single,pins = <
+					0x02c 0x0 /* I2C7_SCL */
+					0x030 0x0 /* I2C7_SDA */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_UP
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			slimbus_cfg_func: slimbus_cfg_func {
+				pinctrl-single,pins = <
+					0x034 0x0 /* SLIMBUS_CLK */
+					0x038 0x0 /* SLIMBUS_DATA */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_UP
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			i2s0_cfg_func: i2s0_cfg_func {
+				pinctrl-single,pins = <
+					0x040 0x0 /* I2S0_DI */
+					0x044 0x0 /* I2S0_DO */
+					0x048 0x0 /* I2S0_XCLK */
+					0x04c 0x0 /* I2S0_XFS */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_UP
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			i2s2_cfg_func: i2s2_cfg_func {
+				pinctrl-single,pins = <
+					0x050 0x0 /* I2S2_DI */
+					0x054 0x0 /* I2S2_DO */
+					0x058 0x0 /* I2S2_XCLK */
+					0x05c 0x0 /* I2S2_XFS */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_UP
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			pcie_cfg_func: pcie_cfg_func {
+				pinctrl-single,pins = <
+					0x094 0x0 /* PCIE_CLKREQ_N */
+					0x098 0x0 /* PCIE_WAKE_N */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_UP
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			spi2_cfg_func: spi2_cfg_func {
+				pinctrl-single,pins = <
+					0x09c 0x0 /* SPI2_CLK */
+					0x0a0 0x0 /* SPI2_DI */
+					0x0a4 0x0 /* SPI2_DO */
+					0x0a8 0x0 /* SPI2_CS0_N */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_UP
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+
+			usb_cfg_func: usb_cfg_func {
+				pinctrl-single,pins = <
+					0x0ac 0x0 /* GPIO_219 */
+				>;
+				pinctrl-single,bias-pulldown = <
+					PULL_DIS
+					PULL_DOWN
+					PULL_DIS
+					PULL_DOWN
+				>;
+				pinctrl-single,bias-pullup = <
+					PULL_UP
+					PULL_UP
+					PULL_DIS
+					PULL_UP
+				>;
+				pinctrl-single,drive-strength = <
+					DRIVE7_02MA DRIVE6_MASK
+				>;
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index a89855f..9df0f06 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -42,6 +42,10 @@
  *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  *     OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * This file is compatible with the version 1.4 and the version 2.0 of
+ * the board, however the CON numbers are different between the 2
+ * version
  */
 
 /dts-v1/;
@@ -76,6 +80,36 @@
 		compatible = "usb-nop-xceiv";
 		vcc-supply = <&exp_usb3_vbus>;
 	};
+
+	vcc_sd_reg1: regulator {
+		compatible = "regulator-gpio";
+		regulator-name = "vcc_sd1";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+
+		gpios = <&gpiosb 23 GPIO_ACTIVE_HIGH>;
+		gpios-states = <0>;
+		states = <1800000 0x1
+			  3300000 0x0>;
+		enable-active-high;
+	};
+};
+
+/* Gigabit module on CON19(V2.0)/CON21(V1.4) */
+&eth0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>;
+	phy-mode = "rgmii-id";
+	phy = <&phy0>;
+	status = "okay";
+};
+
+/* Gigabit module on CON18(V2.0)/CON20(V1.4) */
+&eth1 {
+	phy-mode = "sgmii";
+	phy = <&phy1>;
+	status = "okay";
 };
 
 &i2c0 {
@@ -108,11 +142,46 @@
 	};
 };
 
+&mdio {
+	status = "okay";
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+	};
+};
+
+/* CON15(V2.0)/CON17(V1.4) : PCIe / CON15(V2.0)/CON12(V1.4) :mini-PCIe */
+&pcie0 {
+	status = "okay";
+};
+
 /* CON3 */
 &sata {
 	status = "okay";
 };
 
+&sdhci0 {
+	non-removable;
+	bus-width = <8>;
+	mmc-ddr-1_8v;
+	mmc-hs400-1_8v;
+	marvell,pad-type = "fixed-1-8v";
+	status = "okay";
+};
+
+/* SD slot module on CON14(V2.0)/CON15(V1.4) */
+&sdhci1 {
+	wp-inverted;
+	cd-gpios = <&gpiosb 2 GPIO_ACTIVE_LOW>;
+	bus-width = <4>;
+	marvell,pad-type = "sd";
+	vqmmc-supply = <&vcc_sd_reg1>;
+	status = "okay";
+};
+
 &spi0 {
 	status = "okay";
 	pinctrl-names = "default";
@@ -145,60 +214,23 @@
 	};
 };
 
-/* Exported on the micro USB connector CON32 through an FTDI */
+/*
+ * Exported on the micro USB connector CON30(V2.0)/CON32(V1.4) through
+ * an FTDI
+ */
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart1_pins>;
 	status = "okay";
 };
 
-&sdhci0 {
-	non-removable;
-	bus-width = <8>;
-	mmc-ddr-1_8v;
-	mmc-hs400-1_8v;
-	marvell,pad-type = "fixed-1-8v";
-	status = "okay";
-};
-
-/* CON31 */
-&usb3 {
-	status = "okay";
-	usb-phy = <&usb3_phy>;
-};
-
-/* CON17 (PCIe) / CON12 (mini-PCIe) */
-&pcie0 {
-	status = "okay";
-};
-
-/* CON27 */
+/* CON27(V2.0)/CON29(V1.4) */
 &usb2 {
 	status = "okay";
 };
 
-
-&mdio {
+/* CON29(V2.0)/CON31(V1.4) */
+&usb3 {
 	status = "okay";
-	phy0: ethernet-phy@0 {
-		reg = <0>;
-	};
-
-	phy1: ethernet-phy@1 {
-		reg = <1>;
-	};
-};
-
-&eth0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&rgmii_pins>;
-	phy-mode = "rgmii-id";
-	phy = <&phy0>;
-	status = "okay";
-};
-
-&eth1 {
-	phy-mode = "sgmii";
-	phy = <&phy1>;
-	status = "okay";
+	usb-phy = <&usb3_phy>;
 };
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 4d495ec..dbcc3d4 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -75,14 +75,10 @@
 
 	timer {
 		compatible = "arm,armv8-timer";
-		interrupts = <GIC_PPI 13
-			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
-			     <GIC_PPI 14
-			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
-			     <GIC_PPI 11
-			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
-			     <GIC_PPI 10
-			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
 	soc {
@@ -163,7 +159,7 @@
 
 			pinctrl_nb: pinctrl@13800 {
 				compatible = "marvell,armada3710-nb-pinctrl",
-				"syscon", "simple-mfd";
+					     "syscon", "simple-mfd";
 				reg = <0x13800 0x100>, <0x13C00 0x20>;
 				gpionb: gpio {
 					#gpio-cells = <2>;
@@ -219,7 +215,7 @@
 
 			pinctrl_sb: pinctrl@18800 {
 				compatible = "marvell,armada3710-sb-pinctrl",
-				"syscon", "simple-mfd";
+					     "syscon", "simple-mfd";
 				reg = <0x18800 0x100>, <0x18C00 0x20>;
 				gpiosb: gpio {
 					#gpio-cells = <2>;
@@ -281,8 +277,8 @@
 
 			xor@60900 {
 				compatible = "marvell,armada-3700-xor";
-				reg = <0x60900 0x100
-				       0x60b00 0x100>;
+				reg = <0x60900 0x100>,
+				      <0x60b00 0x100>;
 
 				xor10 {
 					interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
@@ -292,11 +288,22 @@
 				};
 			};
 
+			sdhci1: sdhci@d0000 {
+				compatible = "marvell,armada-3700-sdhci",
+					     "marvell,sdhci-xenon";
+				reg = <0xd0000 0x300>,
+				      <0x1e808 0x4>;
+				interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&nb_periph_clk 0>;
+				clock-names = "core";
+				status = "disabled";
+			};
+
 			sdhci0: sdhci@d8000 {
 				compatible = "marvell,armada-3700-sdhci",
-				"marvell,sdhci-xenon";
-				reg = <0xd8000 0x300
-				       0x17808 0x4>;
+					     "marvell,sdhci-xenon";
+				reg = <0xd8000 0x300>,
+				      <0x17808 0x4>;
 				interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&nb_periph_clk 0>;
 				clock-names = "core";
diff --git a/arch/arm64/boot/dts/marvell/armada-7020.dtsi b/arch/arm64/boot/dts/marvell/armada-7020.dtsi
index 975e733..4ab0129 100644
--- a/arch/arm64/boot/dts/marvell/armada-7020.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-7020.dtsi
@@ -46,7 +46,7 @@
  */
 
 #include "armada-ap806-dual.dtsi"
-#include "armada-cp110-master.dtsi"
+#include "armada-70x0.dtsi"
 
 / {
 	model = "Marvell Armada 7020";
diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
index 1244232..92c761c 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
@@ -162,6 +162,8 @@
 };
 
 &cpm_mdio {
+	status = "okay";
+
 	phy0: ethernet-phy@0 {
 		reg = <0>;
 	};
@@ -185,7 +187,3 @@
 	phy = <&phy1>;
 	phy-mode = "rgmii-id";
 };
-
-&cpm_crypto {
-	status = "okay";
-};
diff --git a/arch/arm64/boot/dts/marvell/armada-7040.dtsi b/arch/arm64/boot/dts/marvell/armada-7040.dtsi
index 78d995d..cbe460b 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-7040.dtsi
@@ -46,7 +46,7 @@
  */
 
 #include "armada-ap806-quad.dtsi"
-#include "armada-cp110-master.dtsi"
+#include "armada-70x0.dtsi"
 
 / {
 	model = "Marvell Armada 7040";
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm64/boot/dts/marvell/armada-70x0.dtsi
similarity index 75%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm64/boot/dts/marvell/armada-70x0.dtsi
index 3956cff..860b6ae 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm64/boot/dts/marvell/armada-70x0.dtsi
@@ -1,20 +1,22 @@
 /*
+ * Copyright (C) 2017 Marvell Technology Group Ltd.
+ *
  * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
  * whole.
  *
- *  a) This file is free software; you can redistribute it and/or
+ *  a) This library is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
  *     published by the Free Software Foundation; either version 2 of the
  *     License, or (at your option) any later version.
  *
- *     This file is distributed in the hope that it will be useful,
+ *     This library is distributed in the hope that it will be useful,
  *     but WITHOUT ANY WARRANTY; without even the implied warranty of
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -38,32 +40,29 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/dts-v1/;
+/*
+ * Device Tree file for the Armada 70x0 SoC
+ */
 
-#include "rk1108.dtsi"
+#include "armada-cp110-master.dtsi"
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
-
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
-
-	chosen {
-		stdout-path = "serial2:1500000n8";
+	aliases {
+		gpio1 = &cpm_gpio1;
+		gpio2 = &cpm_gpio2;
 	};
 };
 
-&uart0 {
+&cpm_gpio1 {
 	status = "okay";
 };
 
-&uart1 {
+&cpm_gpio2 {
 	status = "okay";
 };
 
-&uart2 {
-	status = "okay";
+&cpm_syscon0 {
+	cpm_pinctrl: pinctrl {
+		compatible = "marvell,armada-7k-pinctrl";
+	};
 };
diff --git a/arch/arm64/boot/dts/marvell/armada-8020.dtsi b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
index 7c08f1f..0ba0bc9 100644
--- a/arch/arm64/boot/dts/marvell/armada-8020.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
@@ -46,8 +46,7 @@
  */
 
 #include "armada-ap806-dual.dtsi"
-#include "armada-cp110-master.dtsi"
-#include "armada-cp110-slave.dtsi"
+#include "armada-80x0.dtsi"
 
 / {
 	model = "Marvell Armada 8020";
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
index dc0d084..1e8f724 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
@@ -125,6 +125,8 @@
 };
 
 &cpm_mdio {
+	status = "okay";
+
 	phy1: ethernet-phy@1 {
 		reg = <1>;
 	};
@@ -140,10 +142,6 @@
 	phy-mode = "rgmii-id";
 };
 
-&cpm_crypto {
-	status = "okay";
-};
-
 /* CON5 on CP1 expansion */
 &cps_pcie2 {
 	status = "okay";
@@ -169,6 +167,24 @@
 	status = "okay";
 };
 
+&cps_mdio {
+	status = "okay";
+
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+};
+
+&cps_ethernet {
+	status = "okay";
+};
+
+&cps_eth1 {
+	status = "okay";
+	phy = <&phy0>;
+	phy-mode = "rgmii-id";
+};
+
 &ap_sdhci0 {
 	status = "okay";
 	bus-width = <4>;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
index f7bb0cc..4968e73 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
@@ -95,16 +95,47 @@
 	status = "okay";
 };
 
+&ap_sdhci0 {
+	bus-width = <8>;
+	/*
+	 * Not stable in HS modes - phy needs "more calibration", so add
+	 * the "slow-mode" and disable SDR104, SDR50 and DDR50 modes.
+	 */
+	marvell,xenon-phy-slow-mode;
+	no-1-8-v;
+	no-sd;
+	no-sdio;
+	non-removable;
+	status = "okay";
+	vqmmc-supply = <&v_vddo_h>;
+};
+
 &cpm_i2c0 {
 	clock-frequency = <100000>;
 	status = "okay";
 };
 
+&cpm_mdio {
+	status = "okay";
+
+	ge_phy: ethernet-phy@0 {
+		reg = <0>;
+	};
+};
+
 &cpm_sata0 {
 	/* CPM Lane 0 - U29 */
 	status = "okay";
 };
 
+&cpm_sdhci0 {
+	/* U6 */
+	broken-cd;
+	bus-width = <4>;
+	status = "okay";
+	vqmmc-supply = <&v_3_3>;
+};
+
 &cpm_usb3_0 {
 	/* J38? - USB2.0 only */
 	status = "okay";
@@ -115,6 +146,17 @@
 	status = "okay";
 };
 
+&cps_ethernet {
+	status = "okay";
+};
+
+&cps_eth1 {
+	/* CPS Lane 0 - J5 (Gigabit RJ45) */
+	status = "okay";
+	phy = <&ge_phy>;
+	phy-mode = "sgmii";
+};
+
 &cps_sata0 {
 	/* CPS Lane 1 - U32 */
 	/* CPS Lane 3 - U31 */
diff --git a/arch/arm64/boot/dts/marvell/armada-8040.dtsi b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
index 33813a7..60fe84f 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
@@ -46,8 +46,7 @@
  */
 
 #include "armada-ap806-quad.dtsi"
-#include "armada-cp110-master.dtsi"
-#include "armada-cp110-slave.dtsi"
+#include "armada-80x0.dtsi"
 
 / {
 	model = "Marvell Armada 8040";
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm64/boot/dts/marvell/armada-80x0.dtsi
similarity index 69%
copy from arch/arm/boot/dts/rk1108-evb.dts
copy to arch/arm64/boot/dts/marvell/armada-80x0.dtsi
index 3956cff..666ebe9 100644
--- a/arch/arm/boot/dts/rk1108-evb.dts
+++ b/arch/arm64/boot/dts/marvell/armada-80x0.dtsi
@@ -1,20 +1,22 @@
 /*
+ * Copyright (C) 2017 Marvell Technology Group Ltd.
+ *
  * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
  * licensing only applies to this file, and not this project as a
  * whole.
  *
- *  a) This file is free software; you can redistribute it and/or
+ *  a) This library is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
  *     published by the Free Software Foundation; either version 2 of the
  *     License, or (at your option) any later version.
  *
- *     This file is distributed in the hope that it will be useful,
+ *     This library is distributed in the hope that it will be useful,
  *     but WITHOUT ANY WARRANTY; without even the implied warranty of
  *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *     GNU General Public License for more details.
  *
- *  Or, alternatively,
+ * Or, alternatively,
  *
  *  b) Permission is hereby granted, free of charge, to any person
  *     obtaining a copy of this software and associated documentation
@@ -38,32 +40,37 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/dts-v1/;
+/*
+ * Device Tree file for the Armada 80x0 SoC family
+ */
 
-#include "rk1108.dtsi"
+#include "armada-cp110-master.dtsi"
+#include "armada-cp110-slave.dtsi"
 
 / {
-	model = "Rockchip RK1108 Evaluation board";
-	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
-
-	memory@60000000 {
-		device_type = "memory";
-		reg = <0x60000000 0x08000000>;
-	};
-
-	chosen {
-		stdout-path = "serial2:1500000n8";
+	aliases {
+		gpio1 = &cps_gpio1;
+		gpio2 = &cpm_gpio2;
 	};
 };
 
-&uart0 {
+/* The 80x0 has two CP blocks, but uses only one block from each. */
+&cps_gpio1 {
 	status = "okay";
 };
 
-&uart1 {
+&cpm_gpio2 {
 	status = "okay";
 };
 
-&uart2 {
-	status = "okay";
+&cpm_syscon0 {
+	cpm_pinctrl: pinctrl {
+		compatible = "marvell,armada-8k-cpm-pinctrl";
+	};
+};
+
+&cps_syscon0 {
+	cps_pinctrl: pinctrl {
+		compatible = "marvell,armada-8k-cps-pinctrl";
+	};
 };
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index fe41bf9..1eb1f1e 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -57,6 +57,7 @@
 	aliases {
 		serial0 = &uart0;
 		serial1 = &uart1;
+		gpio0 = &ap_gpio;
 	};
 
 	psci {
@@ -146,6 +147,13 @@
 				marvell,spi-base = <128>, <136>, <144>, <152>;
 			};
 
+			gicp: gicp@3f0040 {
+				compatible = "marvell,ap806-gicp";
+				reg = <0x3f0040 0x10>;
+				marvell,spi-ranges = <64 64>, <288 64>;
+				msi-controller;
+			};
+
 			pic: interrupt-controller@3f0100 {
 				compatible = "marvell,armada-8k-pic";
 				reg = <0x3f0100 0x10>;
@@ -159,6 +167,7 @@
 				reg = <0x400000 0x1000>,
 				      <0x410000 0x1000>;
 				msi-parent = <&gic_v2m0>;
+				clocks = <&ap_clk 3>;
 				dma-coherent;
 			};
 
@@ -167,6 +176,7 @@
 				reg = <0x420000 0x1000>,
 				      <0x430000 0x1000>;
 				msi-parent = <&gic_v2m0>;
+				clocks = <&ap_clk 3>;
 				dma-coherent;
 			};
 
@@ -175,6 +185,7 @@
 				reg = <0x440000 0x1000>,
 				      <0x450000 0x1000>;
 				msi-parent = <&gic_v2m0>;
+				clocks = <&ap_clk 3>;
 				dma-coherent;
 			};
 
@@ -183,6 +194,7 @@
 				reg = <0x460000 0x1000>,
 				      <0x470000 0x1000>;
 				msi-parent = <&gic_v2m0>;
+				clocks = <&ap_clk 3>;
 				dma-coherent;
 			};
 
@@ -193,7 +205,7 @@
 				#size-cells = <0>;
 				cell-index = <0>;
 				interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&ap_syscon 3>;
+				clocks = <&ap_clk 3>;
 				status = "disabled";
 			};
 
@@ -204,7 +216,7 @@
 				#size-cells = <0>;
 				interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 				timeout-ms = <1000>;
-				clocks = <&ap_syscon 3>;
+				clocks = <&ap_clk 3>;
 				status = "disabled";
 			};
 
@@ -214,7 +226,7 @@
 				reg-shift = <2>;
 				interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
 				reg-io-width = <1>;
-				clocks = <&ap_syscon 3>;
+				clocks = <&ap_clk 3>;
 				status = "disabled";
 			};
 
@@ -224,7 +236,7 @@
 				reg-shift = <2>;
 				interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
 				reg-io-width = <1>;
-				clocks = <&ap_syscon 3>;
+				clocks = <&ap_clk 3>;
 				status = "disabled";
 
 			};
@@ -234,21 +246,33 @@
 				reg = <0x6e0000 0x300>;
 				interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
 				clock-names = "core";
-				clocks = <&ap_syscon 4>;
+				clocks = <&ap_clk 4>;
 				dma-coherent;
 				marvell,xenon-phy-slow-mode;
 				status = "disabled";
 			};
 
 			ap_syscon: system-controller@6f4000 {
-				compatible = "marvell,ap806-system-controller",
-					     "syscon";
-				#clock-cells = <1>;
-				clock-output-names = "ap-cpu-cluster-0",
-						     "ap-cpu-cluster-1",
-						     "ap-fixed", "ap-mss",
-						     "ap-emmc";
+				compatible = "syscon", "simple-mfd";
 				reg = <0x6f4000 0x1000>;
+
+				ap_clk: clock {
+					compatible = "marvell,ap806-clock";
+					#clock-cells = <1>;
+				};
+
+				ap_pinctrl: pinctrl {
+					compatible = "marvell,ap806-pinctrl";
+				};
+
+				ap_gpio: gpio {
+					compatible = "marvell,armada-8k-gpio";
+					offset = <0x1040>;
+					ngpios = <19>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					gpio-ranges = <&ap_pinctrl 0 0 19>;
+				};
 			};
 		};
 	};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index ac8df52..726528c 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -44,45 +44,46 @@
  * Device Tree file for Marvell Armada CP110 Master.
  */
 
+#define ICU_GRP_NSR 0x0
+
 / {
 	cp110-master {
 		#address-cells = <2>;
 		#size-cells = <2>;
 		compatible = "simple-bus";
-		interrupt-parent = <&gic>;
+		interrupt-parent = <&cpm_icu>;
 		ranges;
 
 		config-space@f2000000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "simple-bus";
-			interrupt-parent = <&gic>;
 			ranges = <0x0 0x0 0xf2000000 0x2000000>;
 
 			cpm_ethernet: ethernet@0 {
 				compatible = "marvell,armada-7k-pp22";
 				reg = <0x0 0x100000>, <0x129000 0xb000>;
-				clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>;
+				clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>;
 				clock-names = "pp_clk", "gop_clk", "mg_clk";
 				status = "disabled";
 				dma-coherent;
 
 				cpm_eth0: eth0 {
-					interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+					interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>;
 					port-id = <0>;
 					gop-port-id = <0>;
 					status = "disabled";
 				};
 
 				cpm_eth1: eth1 {
-					interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+					interrupts = <ICU_GRP_NSR 40 IRQ_TYPE_LEVEL_HIGH>;
 					port-id = <1>;
 					gop-port-id = <2>;
 					status = "disabled";
 				};
 
 				cpm_eth2: eth2 {
-					interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+					interrupts = <ICU_GRP_NSR 41 IRQ_TYPE_LEVEL_HIGH>;
 					port-id = <2>;
 					gop-port-id = <3>;
 					status = "disabled";
@@ -94,41 +95,70 @@
 				#size-cells = <0>;
 				compatible = "marvell,orion-mdio";
 				reg = <0x12a200 0x10>;
+				clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>;
+				status = "disabled";
+			};
+
+			cpm_xmdio: mdio@12a600 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "marvell,xmdio";
+				reg = <0x12a600 0x10>;
+				status = "disabled";
+			};
+
+			cpm_icu: interrupt-controller@1e0000 {
+				compatible = "marvell,cp110-icu";
+				reg = <0x1e0000 0x10>;
+				#interrupt-cells = <3>;
+				interrupt-controller;
+				msi-parent = <&gicp>;
 			};
 
 			cpm_syscon0: system-controller@440000 {
-				compatible = "marvell,cp110-system-controller0",
-					     "syscon";
+				compatible = "syscon", "simple-mfd";
 				reg = <0x440000 0x1000>;
-				#clock-cells = <2>;
-				core-clock-output-names =
-					"cpm-apll", "cpm-ppv2-core", "cpm-eip",
-					"cpm-core", "cpm-nand-core";
-				gate-clock-output-names =
-					"cpm-audio", "cpm-communit", "cpm-nand",
-					"cpm-ppv2", "cpm-sdio", "cpm-mg-domain",
-					"cpm-mg-core", "cpm-xor1", "cpm-xor0",
-					"cpm-gop-dp", "none", "cpm-pcie_x10",
-					"cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor",
-					"cpm-sata", "cpm-sata-usb", "cpm-main",
-					"cpm-sd-mmc-gop", "none", "none",
-					"cpm-slow-io", "cpm-usb3h0", "cpm-usb3h1",
-					"cpm-usb3dev", "cpm-eip150", "cpm-eip197";
+
+				cpm_clk: clock {
+					compatible = "marvell,cp110-clock";
+					#clock-cells = <2>;
+				};
+
+				cpm_gpio1: gpio@100 {
+					compatible = "marvell,armada-8k-gpio";
+					offset = <0x100>;
+					ngpios = <32>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					gpio-ranges = <&cpm_pinctrl 0 0 32>;
+					status = "disabled";
+
+				};
+
+				cpm_gpio2: gpio@140 {
+					compatible = "marvell,armada-8k-gpio";
+					offset = <0x140>;
+					ngpios = <31>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					gpio-ranges = <&cpm_pinctrl 0 32 31>;
+					status = "disabled";
+				};
 			};
 
 			cpm_rtc: rtc@284000 {
 				compatible = "marvell,armada-8k-rtc";
 				reg = <0x284000 0x20>, <0x284080 0x24>;
 				reg-names = "rtc", "rtc-soc";
-				interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <ICU_GRP_NSR 77 IRQ_TYPE_LEVEL_HIGH>;
 			};
 
 			cpm_sata0: sata@540000 {
 				compatible = "marvell,armada-8k-ahci",
 					     "generic-ahci";
 				reg = <0x540000 0x30000>;
-				interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cpm_syscon0 1 15>;
+				interrupts = <ICU_GRP_NSR 107 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_clk 1 15>;
 				status = "disabled";
 			};
 
@@ -137,8 +167,8 @@
 					     "generic-xhci";
 				reg = <0x500000 0x4000>;
 				dma-coherent;
-				interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cpm_syscon0 1 22>;
+				interrupts = <ICU_GRP_NSR 106 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_clk 1 22>;
 				status = "disabled";
 			};
 
@@ -147,8 +177,8 @@
 					     "generic-xhci";
 				reg = <0x510000 0x4000>;
 				dma-coherent;
-				interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cpm_syscon0 1 23>;
+				interrupts = <ICU_GRP_NSR 105 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_clk 1 23>;
 				status = "disabled";
 			};
 
@@ -158,7 +188,7 @@
 				      <0x6b0000 0x1000>;
 				dma-coherent;
 				msi-parent = <&gic_v2m0>;
-				clocks = <&cpm_syscon0 1 8>;
+				clocks = <&cpm_clk 1 8>;
 			};
 
 			cpm_xor1: xor@6c0000 {
@@ -167,7 +197,7 @@
 				      <0x6d0000 0x1000>;
 				dma-coherent;
 				msi-parent = <&gic_v2m0>;
-				clocks = <&cpm_syscon0 1 7>;
+				clocks = <&cpm_clk 1 7>;
 			};
 
 			cpm_spi0: spi@700600 {
@@ -176,7 +206,7 @@
 				#address-cells = <0x1>;
 				#size-cells = <0x0>;
 				cell-index = <1>;
-				clocks = <&cpm_syscon0 1 21>;
+				clocks = <&cpm_clk 1 21>;
 				status = "disabled";
 			};
 
@@ -186,7 +216,7 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 				cell-index = <2>;
-				clocks = <&cpm_syscon0 1 21>;
+				clocks = <&cpm_clk 1 21>;
 				status = "disabled";
 			};
 
@@ -195,8 +225,8 @@
 				reg = <0x701000 0x20>;
 				#address-cells = <1>;
 				#size-cells = <0>;
-				interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cpm_syscon0 1 21>;
+				interrupts = <ICU_GRP_NSR 120 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_clk 1 21>;
 				status = "disabled";
 			};
 
@@ -205,25 +235,25 @@
 				reg = <0x701100 0x20>;
 				#address-cells = <1>;
 				#size-cells = <0>;
-				interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cpm_syscon0 1 21>;
+				interrupts = <ICU_GRP_NSR 121 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_clk 1 21>;
 				status = "disabled";
 			};
 
 			cpm_trng: trng@760000 {
 				compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76";
 				reg = <0x760000 0x7d>;
-				interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cpm_syscon0 1 25>;
+				interrupts = <ICU_GRP_NSR 95 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_clk 1 25>;
 				status = "okay";
 			};
 
 			cpm_sdhci0: sdhci@780000 {
 				compatible = "marvell,armada-cp110-sdhci";
 				reg = <0x780000 0x300>;
-				interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>;
 				clock-names = "core";
-				clocks = <&cpm_syscon0 1 4>;
+				clocks = <&cpm_clk 1 4>;
 				dma-coherent;
 				status = "disabled";
 			};
@@ -231,17 +261,15 @@
 			cpm_crypto: crypto@800000 {
 				compatible = "inside-secure,safexcel-eip197";
 				reg = <0x800000 0x200000>;
-				interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-				| IRQ_TYPE_LEVEL_HIGH)>,
-					     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
-					     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
-					     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
-					     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
-					     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <ICU_GRP_NSR 87 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 88 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 89 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 90 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 91 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 92 IRQ_TYPE_LEVEL_HIGH>;
 				interrupt-names = "mem", "ring0", "ring1",
 				"ring2", "ring3", "eip";
-				clocks = <&cpm_syscon0 1 26>;
-				status = "disabled";
+				clocks = <&cpm_clk 1 26>;
 			};
 		};
 
@@ -264,10 +292,10 @@
 				/* non-prefetchable memory */
 				0x82000000 0 0xf6000000 0  0xf6000000 0 0xf00000>;
 			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
-			interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
 			num-lanes = <1>;
-			clocks = <&cpm_syscon0 1 13>;
+			clocks = <&cpm_clk 1 13>;
 			status = "disabled";
 		};
 
@@ -290,11 +318,11 @@
 				/* non-prefetchable memory */
 				0x82000000 0 0xf7000000 0  0xf7000000 0 0xf00000>;
 			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
-			interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
 
 			num-lanes = <1>;
-			clocks = <&cpm_syscon0 1 11>;
+			clocks = <&cpm_clk 1 11>;
 			status = "disabled";
 		};
 
@@ -317,11 +345,11 @@
 				/* non-prefetchable memory */
 				0x82000000 0 0xf8000000 0  0xf8000000 0 0xf00000>;
 			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
-			interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
 
 			num-lanes = <1>;
-			clocks = <&cpm_syscon0 1 12>;
+			clocks = <&cpm_clk 1 12>;
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 7740a75..95f8e5f 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -44,19 +44,20 @@
  * Device Tree file for Marvell Armada CP110 Slave.
  */
 
+#define ICU_GRP_NSR 0x0
+
 / {
 	cp110-slave {
 		#address-cells = <2>;
 		#size-cells = <2>;
 		compatible = "simple-bus";
-		interrupt-parent = <&gic>;
+		interrupt-parent = <&cps_icu>;
 		ranges;
 
 		config-space@f4000000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "simple-bus";
-			interrupt-parent = <&gic>;
 			ranges = <0x0 0x0 0xf4000000 0x2000000>;
 
 			cps_rtc: rtc@284000 {
@@ -69,27 +70,27 @@
 			cps_ethernet: ethernet@0 {
 				compatible = "marvell,armada-7k-pp22";
 				reg = <0x0 0x100000>, <0x129000 0xb000>;
-				clocks = <&cps_syscon0 1 3>, <&cps_syscon0 1 9>, <&cps_syscon0 1 5>;
+				clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>;
 				clock-names = "pp_clk", "gop_clk", "mg_clk";
 				status = "disabled";
 				dma-coherent;
 
 				cps_eth0: eth0 {
-					interrupts = <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+					interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>;
 					port-id = <0>;
 					gop-port-id = <0>;
 					status = "disabled";
 				};
 
 				cps_eth1: eth1 {
-					interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
+					interrupts = <ICU_GRP_NSR 40 IRQ_TYPE_LEVEL_HIGH>;
 					port-id = <1>;
 					gop-port-id = <2>;
 					status = "disabled";
 				};
 
 				cps_eth2: eth2 {
-					interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+					interrupts = <ICU_GRP_NSR 41 IRQ_TYPE_LEVEL_HIGH>;
 					port-id = <2>;
 					gop-port-id = <3>;
 					status = "disabled";
@@ -101,34 +102,64 @@
 				#size-cells = <0>;
 				compatible = "marvell,orion-mdio";
 				reg = <0x12a200 0x10>;
+				clocks = <&cps_clk 1 9>, <&cps_clk 1 5>;
+				status = "disabled";
+			};
+
+			cps_xmdio: mdio@12a600 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "marvell,xmdio";
+				reg = <0x12a600 0x10>;
+				status = "disabled";
+			};
+
+			cps_icu: interrupt-controller@1e0000 {
+				compatible = "marvell,cp110-icu";
+				reg = <0x1e0000 0x10>;
+				#interrupt-cells = <3>;
+				interrupt-controller;
+				msi-parent = <&gicp>;
 			};
 
 			cps_syscon0: system-controller@440000 {
-				compatible = "marvell,cp110-system-controller0",
-					     "syscon";
+				compatible = "syscon", "simple-mfd";
 				reg = <0x440000 0x1000>;
-				#clock-cells = <2>;
-				core-clock-output-names =
-					"cps-apll", "cps-ppv2-core", "cps-eip",
-					"cps-core", "cps-nand-core";
-				gate-clock-output-names =
-					"cps-audio", "cps-communit", "cps-nand",
-					"cps-ppv2", "cps-sdio", "cps-mg-domain",
-					"cps-mg-core", "cps-xor1", "cps-xor0",
-					"cps-gop-dp", "none", "cps-pcie_x10",
-					"cps-pcie_x11", "cps-pcie_x4", "cps-pcie-xor",
-					"cps-sata", "cps-sata-usb", "cps-main",
-					"cps-sd-mmc-gop", "none", "none",
-					"cps-slow-io", "cps-usb3h0", "cps-usb3h1",
-					"cps-usb3dev", "cps-eip150", "cps-eip197";
+
+				cps_clk: clock {
+					compatible = "marvell,cp110-clock";
+					#clock-cells = <2>;
+				};
+
+				cps_gpio1: gpio@100 {
+					compatible = "marvell,armada-8k-gpio";
+					offset = <0x100>;
+					ngpios = <32>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					gpio-ranges = <&cps_pinctrl 0 0 32>;
+					status = "disabled";
+
+				};
+
+				cps_gpio2: gpio@140 {
+					compatible = "marvell,armada-8k-gpio";
+					offset = <0x140>;
+					ngpios = <31>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					gpio-ranges = <&cps_pinctrl 0 32 31>;
+					status = "disabled";
+				};
+
 			};
 
 			cps_sata0: sata@540000 {
 				compatible = "marvell,armada-8k-ahci",
 					     "generic-ahci";
 				reg = <0x540000 0x30000>;
-				interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cps_syscon0 1 15>;
+				interrupts = <ICU_GRP_NSR 107 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cps_clk 1 15>;
 				status = "disabled";
 			};
 
@@ -137,8 +168,8 @@
 					     "generic-xhci";
 				reg = <0x500000 0x4000>;
 				dma-coherent;
-				interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cps_syscon0 1 22>;
+				interrupts = <ICU_GRP_NSR 106 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cps_clk 1 22>;
 				status = "disabled";
 			};
 
@@ -147,8 +178,8 @@
 					     "generic-xhci";
 				reg = <0x510000 0x4000>;
 				dma-coherent;
-				interrupts = <GIC_SPI 285 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cps_syscon0 1 23>;
+				interrupts = <ICU_GRP_NSR 105 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cps_clk 1 23>;
 				status = "disabled";
 			};
 
@@ -158,7 +189,7 @@
 				      <0x6b0000 0x1000>;
 				dma-coherent;
 				msi-parent = <&gic_v2m0>;
-				clocks = <&cps_syscon0 1 8>;
+				clocks = <&cps_clk 1 8>;
 			};
 
 			cps_xor1: xor@6c0000 {
@@ -167,7 +198,7 @@
 				      <0x6d0000 0x1000>;
 				dma-coherent;
 				msi-parent = <&gic_v2m0>;
-				clocks = <&cps_syscon0 1 7>;
+				clocks = <&cps_clk 1 7>;
 			};
 
 			cps_spi0: spi@700600 {
@@ -176,7 +207,7 @@
 				#address-cells = <0x1>;
 				#size-cells = <0x0>;
 				cell-index = <3>;
-				clocks = <&cps_syscon0 1 21>;
+				clocks = <&cps_clk 1 21>;
 				status = "disabled";
 			};
 
@@ -186,7 +217,7 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 				cell-index = <4>;
-				clocks = <&cps_syscon0 1 21>;
+				clocks = <&cps_clk 1 21>;
 				status = "disabled";
 			};
 
@@ -195,8 +226,8 @@
 				reg = <0x701000 0x20>;
 				#address-cells = <1>;
 				#size-cells = <0>;
-				interrupts = <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cps_syscon0 1 21>;
+				interrupts = <ICU_GRP_NSR 120 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cps_clk 1 21>;
 				status = "disabled";
 			};
 
@@ -205,32 +236,38 @@
 				reg = <0x701100 0x20>;
 				#address-cells = <1>;
 				#size-cells = <0>;
-				interrupts = <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cps_syscon0 1 21>;
+				interrupts = <ICU_GRP_NSR 121 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cps_clk 1 21>;
 				status = "disabled";
 			};
 
 			cps_trng: trng@760000 {
 				compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76";
 				reg = <0x760000 0x7d>;
-				interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&cps_syscon0 1 25>;
+				interrupts = <ICU_GRP_NSR 95 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cps_clk 1 25>;
 				status = "okay";
 			};
 
 			cps_crypto: crypto@800000 {
 				compatible = "inside-secure,safexcel-eip197";
 				reg = <0x800000 0x200000>;
-				interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-				| IRQ_TYPE_LEVEL_HIGH)>,
-					     <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
-					     <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
-					     <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
-					     <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
-					     <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <ICU_GRP_NSR 87 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 88 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 89 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 90 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 91 IRQ_TYPE_LEVEL_HIGH>,
+					     <ICU_GRP_NSR 92 IRQ_TYPE_LEVEL_HIGH>;
 				interrupt-names = "mem", "ring0", "ring1",
 						  "ring2", "ring3", "eip";
-				clocks = <&cps_syscon0 1 26>;
+				clocks = <&cps_clk 1 26>;
+				/*
+				 * The cryptographic engine found on the cp110
+				 * master is enabled by default at the SoC
+				 * level. Because it is not possible as of now
+				 * to enable two cryptographic engines in
+				 * parallel, disable this one by default.
+				 */
 				status = "disabled";
 			};
 		};
@@ -254,10 +291,10 @@
 				/* non-prefetchable memory */
 				0x82000000 0 0xfa000000 0  0xfa000000 0 0xf00000>;
 			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
-			interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
 			num-lanes = <1>;
-			clocks = <&cps_syscon0 1 13>;
+			clocks = <&cps_clk 1 13>;
 			status = "disabled";
 		};
 
@@ -280,11 +317,11 @@
 				/* non-prefetchable memory */
 				0x82000000 0 0xfb000000 0  0xfb000000 0 0xf00000>;
 			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>;
-			interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
 
 			num-lanes = <1>;
-			clocks = <&cps_syscon0 1 11>;
+			clocks = <&cps_clk 1 11>;
 			status = "disabled";
 		};
 
@@ -307,11 +344,11 @@
 				/* non-prefetchable memory */
 				0x82000000 0 0xfc000000 0  0xfc000000 0 0xf00000>;
 			interrupt-map-mask = <0 0 0 0>;
-			interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>;
-			interrupts = <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
 
 			num-lanes = <1>;
-			clocks = <&cps_syscon0 1 12>;
+			clocks = <&cps_clk 1 12>;
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 9fbfd32..015eb07 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -1,5 +1,6 @@
 dtb-$(CONFIG_ARCH_MEDIATEK) += mt6755-evb.dtb
 dtb-$(CONFIG_ARCH_MEDIATEK) += mt6795-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-evb.dtb
 dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
 
 always		:= $(dtb-y)
diff --git a/arch/arm64/boot/dts/mediatek/mt6797-evb.dts b/arch/arm64/boot/dts/mediatek/mt6797-evb.dts
new file mode 100644
index 0000000..c79109c
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6797-evb.dts
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+#include "mt6797.dtsi"
+
+/ {
+	model = "MediaTek MT6797 Evaluation Board";
+	compatible = "mediatek,mt6797-evb", "mediatek,mt6797";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x1e800000>;
+	};
+
+	chosen {};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
new file mode 100644
index 0000000..31088a9f
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/mt6797-clk.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "mediatek,mt6797";
+	interrupt-parent = <&sysirq>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x000>;
+		};
+
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x001>;
+		};
+
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x002>;
+		};
+
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x003>;
+		};
+
+		cpu4: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x100>;
+		};
+
+		cpu5: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x101>;
+		};
+
+		cpu6: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x102>;
+		};
+
+		cpu7: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x103>;
+		};
+
+		cpu8: cpu@200 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72";
+			enable-method = "psci";
+			reg = <0x200>;
+		};
+
+		cpu9: cpu@201 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72";
+			enable-method = "psci";
+			reg = <0x201>;
+		};
+	};
+
+	clk26m: oscillator@0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <26000000>;
+		clock-output-names = "clk26m";
+	};
+
+	clk32k: oscillator@1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32000>;
+		clock-output-names = "clk32k";
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	topckgen: topckgen@10000000 {
+		compatible = "mediatek,mt6797-topckgen";
+		reg = <0 0x10000000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	infrasys: infracfg_ao@10001000 {
+		compatible = "mediatek,mt6797-infracfg", "syscon";
+		reg = <0 0x10001000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	scpsys: scpsys@10006000 {
+		compatible = "mediatek,mt6797-scpsys";
+		#power-domain-cells = <1>;
+		reg = <0 0x10006000 0 0x1000>;
+		clocks = <&topckgen CLK_TOP_MUX_MFG>,
+			 <&topckgen CLK_TOP_MUX_MM>,
+			 <&topckgen CLK_TOP_MUX_VDEC>;
+		clock-names = "mfg", "mm", "vdec";
+		infracfg = <&infrasys>;
+	};
+
+	apmixedsys: apmixed@1000c000 {
+		compatible = "mediatek,mt6797-apmixedsys";
+		reg = <0 0x1000c000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	sysirq: intpol-controller@10200620 {
+		compatible = "mediatek,mt6797-sysirq",
+			     "mediatek,mt6577-sysirq";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&gic>;
+		reg = <0 0x10220620 0 0x20>,
+		      <0 0x10220690 0 0x10>;
+	};
+
+	uart0: serial@11002000 {
+		compatible = "mediatek,mt6797-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11002000 0 0x400>;
+		interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&infrasys CLK_INFRA_UART0>,
+			 <&infrasys CLK_INFRA_AP_DMA>;
+		clock-names = "baud", "bus";
+		status = "disabled";
+	};
+
+	uart1: serial@11003000 {
+		compatible = "mediatek,mt6797-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11003000 0 0x400>;
+		interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&infrasys CLK_INFRA_UART1>,
+			 <&infrasys CLK_INFRA_AP_DMA>;
+		clock-names = "baud", "bus";
+		status = "disabled";
+	};
+
+	uart2: serial@11004000 {
+		compatible = "mediatek,mt6797-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11004000 0 0x400>;
+		interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&infrasys CLK_INFRA_UART2>,
+			 <&infrasys CLK_INFRA_AP_DMA>;
+		clock-names = "baud", "bus";
+		status = "disabled";
+	};
+
+	uart3: serial@11005000 {
+		compatible = "mediatek,mt6797-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11005000 0 0x400>;
+		interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&infrasys CLK_INFRA_UART3>,
+			 <&infrasys CLK_INFRA_AP_DMA>;
+		clock-names = "baud", "bus";
+		status = "disabled";
+	};
+
+	mmsys: mmsys_config@14000000 {
+		compatible = "mediatek,mt6797-mmsys", "syscon";
+		reg = <0 0x14000000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	imgsys: imgsys_config@15000000  {
+		compatible = "mediatek,mt6797-imgsys", "syscon";
+		reg = <0 0x15000000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	vdecsys: vdec_gcon@16000000 {
+		compatible = "mediatek,mt6797-vdecsys", "syscon";
+		reg = <0 0x16000000 0 0x10000>;
+		#clock-cells = <1>;
+	};
+
+	vencsys: venc_gcon@17000000 {
+		compatible = "mediatek,mt6797-vencsys", "syscon";
+		reg = <0 0x17000000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	gic: interrupt-controller@19000000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-controller;
+		reg = <0 0x19000000 0 0x10000>,    /* GICD */
+		      <0 0x19200000 0 0x200000>,   /* GICR */
+		      <0 0x10240000 0 0x2000>;     /* GICC */
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 6922252..b99a273 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -731,8 +731,9 @@
 			      <0 0x11280700 0 0x0100>;
 			reg-names = "mac", "ippc";
 			interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>;
-			phys = <&phy_port0 PHY_TYPE_USB3>,
-			       <&phy_port1 PHY_TYPE_USB2>;
+			phys = <&u2port0 PHY_TYPE_USB2>,
+			       <&u3port0 PHY_TYPE_USB3>,
+			       <&u2port1 PHY_TYPE_USB2>;
 			power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
 			clocks = <&topckgen CLK_TOP_USB30_SEL>,
 				 <&clk26m>,
@@ -763,21 +764,31 @@
 		u3phy: usb-phy@11290000 {
 			compatible = "mediatek,mt8173-u3phy";
 			reg = <0 0x11290000 0 0x800>;
-			clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>;
-			clock-names = "u3phya_ref";
 			#address-cells = <2>;
 			#size-cells = <2>;
 			ranges;
 			status = "okay";
 
-			phy_port0: port@11290800 {
-				reg = <0 0x11290800 0 0x800>;
+			u2port0: usb-phy@11290800 {
+				reg = <0 0x11290800 0 0x100>;
+				clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>;
+				clock-names = "ref";
 				#phy-cells = <1>;
 				status = "okay";
 			};
 
-			phy_port1: port@11291000 {
-				reg = <0 0x11291000 0 0x800>;
+			u3port0: usb-phy@11290900 {
+				reg = <0 0x11290900 0 0x700>;
+				clocks = <&clk26m>;
+				clock-names = "ref";
+				#phy-cells = <1>;
+				status = "okay";
+			};
+
+			u2port1: usb-phy@11291000 {
+				reg = <0 0x11291000 0 0x100>;
+				clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>;
+				clock-names = "ref";
 				#phy-cells = <1>;
 				status = "okay";
 			};
@@ -792,80 +803,74 @@
 			#clock-cells = <1>;
 		};
 
-		mdp {
-			compatible = "mediatek,mt8173-mdp";
-			#address-cells = <2>;
-			#size-cells = <2>;
-			ranges;
+		mdp_rdma0: rdma@14001000 {
+			compatible = "mediatek,mt8173-mdp-rdma",
+				     "mediatek,mt8173-mdp";
+			reg = <0 0x14001000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_RDMA0>,
+				 <&mmsys CLK_MM_MUTEX_32K>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+			iommus = <&iommu M4U_PORT_MDP_RDMA0>;
+			mediatek,larb = <&larb0>;
 			mediatek,vpu = <&vpu>;
+		};
 
-			mdp_rdma0: rdma@14001000 {
-				compatible = "mediatek,mt8173-mdp-rdma";
-				reg = <0 0x14001000 0 0x1000>;
-				clocks = <&mmsys CLK_MM_MDP_RDMA0>,
-					 <&mmsys CLK_MM_MUTEX_32K>;
-				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
-				iommus = <&iommu M4U_PORT_MDP_RDMA0>;
-				mediatek,larb = <&larb0>;
-			};
+		mdp_rdma1: rdma@14002000 {
+			compatible = "mediatek,mt8173-mdp-rdma";
+			reg = <0 0x14002000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_RDMA1>,
+				 <&mmsys CLK_MM_MUTEX_32K>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+			iommus = <&iommu M4U_PORT_MDP_RDMA1>;
+			mediatek,larb = <&larb4>;
+		};
 
-			mdp_rdma1: rdma@14002000 {
-				compatible = "mediatek,mt8173-mdp-rdma";
-				reg = <0 0x14002000 0 0x1000>;
-				clocks = <&mmsys CLK_MM_MDP_RDMA1>,
-					 <&mmsys CLK_MM_MUTEX_32K>;
-				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
-				iommus = <&iommu M4U_PORT_MDP_RDMA1>;
-				mediatek,larb = <&larb4>;
-			};
+		mdp_rsz0: rsz@14003000 {
+			compatible = "mediatek,mt8173-mdp-rsz";
+			reg = <0 0x14003000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_RSZ0>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+		};
 
-			mdp_rsz0: rsz@14003000 {
-				compatible = "mediatek,mt8173-mdp-rsz";
-				reg = <0 0x14003000 0 0x1000>;
-				clocks = <&mmsys CLK_MM_MDP_RSZ0>;
-				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
-			};
+		mdp_rsz1: rsz@14004000 {
+			compatible = "mediatek,mt8173-mdp-rsz";
+			reg = <0 0x14004000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_RSZ1>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+		};
 
-			mdp_rsz1: rsz@14004000 {
-				compatible = "mediatek,mt8173-mdp-rsz";
-				reg = <0 0x14004000 0 0x1000>;
-				clocks = <&mmsys CLK_MM_MDP_RSZ1>;
-				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
-			};
+		mdp_rsz2: rsz@14005000 {
+			compatible = "mediatek,mt8173-mdp-rsz";
+			reg = <0 0x14005000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_RSZ2>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+		};
 
-			mdp_rsz2: rsz@14005000 {
-				compatible = "mediatek,mt8173-mdp-rsz";
-				reg = <0 0x14005000 0 0x1000>;
-				clocks = <&mmsys CLK_MM_MDP_RSZ2>;
-				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
-			};
+		mdp_wdma0: wdma@14006000 {
+			compatible = "mediatek,mt8173-mdp-wdma";
+			reg = <0 0x14006000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_WDMA>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+			iommus = <&iommu M4U_PORT_MDP_WDMA>;
+			mediatek,larb = <&larb0>;
+		};
 
-			mdp_wdma0: wdma@14006000 {
-				compatible = "mediatek,mt8173-mdp-wdma";
-				reg = <0 0x14006000 0 0x1000>;
-				clocks = <&mmsys CLK_MM_MDP_WDMA>;
-				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
-				iommus = <&iommu M4U_PORT_MDP_WDMA>;
-				mediatek,larb = <&larb0>;
-			};
+		mdp_wrot0: wrot@14007000 {
+			compatible = "mediatek,mt8173-mdp-wrot";
+			reg = <0 0x14007000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_WROT0>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+			iommus = <&iommu M4U_PORT_MDP_WROT0>;
+			mediatek,larb = <&larb0>;
+		};
 
-			mdp_wrot0: wrot@14007000 {
-				compatible = "mediatek,mt8173-mdp-wrot";
-				reg = <0 0x14007000 0 0x1000>;
-				clocks = <&mmsys CLK_MM_MDP_WROT0>;
-				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
-				iommus = <&iommu M4U_PORT_MDP_WROT0>;
-				mediatek,larb = <&larb0>;
-			};
-
-			mdp_wrot1: wrot@14008000 {
-				compatible = "mediatek,mt8173-mdp-wrot";
-				reg = <0 0x14008000 0 0x1000>;
-				clocks = <&mmsys CLK_MM_MDP_WROT1>;
-				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
-				iommus = <&iommu M4U_PORT_MDP_WROT1>;
-				mediatek,larb = <&larb4>;
-			};
+		mdp_wrot1: wrot@14008000 {
+			compatible = "mediatek,mt8173-mdp-wrot";
+			reg = <0 0x14008000 0 0x1000>;
+			clocks = <&mmsys CLK_MM_MDP_WROT1>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+			iommus = <&iommu M4U_PORT_MDP_WROT1>;
+			mediatek,larb = <&larb4>;
 		};
 
 		ovl0: ovl@1400c000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index 2b17936..c2f0f27 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -12,7 +12,7 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
-	pcie-controller@01003000 {
+	pcie@1003000 {
 		compatible = "nvidia,tegra124-pcie";
 		device_type = "pci";
 		reg = <0x0 0x01003000 0x0 0x00000800   /* PADS registers */
@@ -55,6 +55,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82000800 0 0x01000000 0 0x1000>;
 			reg = <0x000800 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
@@ -68,6 +69,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82001000 0 0x01001000 0 0x1000>;
 			reg = <0x001000 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 5e62e68..0b0552c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -348,6 +348,13 @@
 		reg-names = "pmc", "wake", "aotag", "scratch";
 	};
 
+	ccplex@e000000 {
+		compatible = "nvidia,tegra186-ccplex-cluster";
+		reg = <0x0 0x0e000000 0x0 0x3fffff>;
+
+		nvidia,bpmp = <&bpmp>;
+	};
+
 	gpu@17000000 {
 		compatible = "nvidia,gp10b";
 		reg = <0x0 0x17000000 0x0 0x1000000>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index 4c1ea7a..7cb95e0 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -7,7 +7,7 @@
 	model = "NVIDIA Jetson TX1 Developer Kit";
 	compatible = "nvidia,p2371-2180", "nvidia,tegra210";
 
-	pcie-controller@01003000 {
+	pcie@1003000 {
 		status = "okay";
 
 		avdd-pll-uerefe-supply = <&avdd_1v05_pll>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 8f26c4d..29f471e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -11,7 +11,7 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
-	pcie-controller@01003000 {
+	pcie@1003000 {
 		compatible = "nvidia,tegra210-pcie";
 		device_type = "pci";
 		reg = <0x0 0x01003000 0x0 0x00000800   /* PADS registers */
@@ -51,6 +51,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82000800 0 0x01000000 0 0x1000>;
 			reg = <0x000800 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
@@ -64,6 +65,7 @@
 			device_type = "pci";
 			assigned-addresses = <0x82001000 0 0x01001000 0 0x1000>;
 			reg = <0x001000 0 0 0 0>;
+			bus-range = <0x00 0xff>;
 			status = "disabled";
 
 			#address-cells = <3>;
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index a17f5b9..bd310ac1 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -178,7 +178,7 @@
 			led@5 {
 				label = "apq8016-sbc:yellow:wlan";
 				gpios = <&pm8916_mpps 2 GPIO_ACTIVE_HIGH>;
-				linux,default-trigger = "wlan";
+				linux,default-trigger = "phy0tx";
 				default-state = "off";
 			};
 
@@ -215,22 +215,16 @@
 		usb@78d9000 {
 			extcon = <&usb_id>, <&usb_id>;
 			status = "okay";
-		};
-
-		ehci@78d9000 {
-			status = "okay";
-		};
-
-		phy@78d9000 {
-			v1p8-supply = <&pm8916_l7>;
-			v3p3-supply = <&pm8916_l13>;
-			vddcx-supply = <&pm8916_s1>;
-			extcon = <&usb_id>, <&usb_id>;
-			dr_mode = "otg";
-			status = "okay";
-			switch-gpio = <&pm8916_gpios 4 GPIO_ACTIVE_HIGH>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&usb_sw_sel_pm>;
+			adp-disable;
+			hnp-disable;
+			srp-disable;
+			ulpi {
+				phy {
+					v1p8-supply = <&pm8916_l7>;
+					v3p3-supply = <&pm8916_l13>;
+					extcon = <&usb_id>;
+				};
+			};
 		};
 
 		lpass@07708000 {
@@ -348,6 +342,14 @@
 		pinctrl-0 = <&usb_id_default>;
 	};
 
+	usb-switch {
+		compatible = "toshiba,tc7usb40mu";
+		switch-gpios = <&pm8916_gpios 4 GPIO_ACTIVE_HIGH>;
+		extcon = <&usb_id>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb_sw_sel_pm>;
+	};
+
 	hdmi-out {
 		compatible = "hdmi-connector";
 		type = "a";
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index ab30939..039991f 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -546,44 +546,40 @@
 			status = "disabled";
 		};
 
-		usb_dev: usb@78d9000 {
+		otg: usb@78d9000 {
 			compatible = "qcom,ci-hdrc";
-			reg = <0x78d9000 0x400>;
-			dr_mode = "peripheral";
-			interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
-			usb-phy = <&usb_otg>;
-			status = "disabled";
-		};
-
-		usb_host: ehci@78d9000 {
-			compatible = "qcom,ehci-host";
-			reg = <0x78d9000 0x400>;
-			interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
-			usb-phy = <&usb_otg>;
-			status = "disabled";
-		};
-
-		usb_otg: phy@78d9000 {
-			compatible = "qcom,usb-otg-snps";
-			reg = <0x78d9000 0x400>;
+			reg = <0x78d9000 0x200>,
+			      <0x78d9200 0x200>;
 			interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
-
-			qcom,vdd-levels = <500000 1000000 1320000>;
-			qcom,phy-init-sequence = <0x44 0x6B 0x24 0x13>;
-			dr_mode = "peripheral";
-			qcom,otg-control = <2>; // PMIC
-			qcom,manual-pullup;
-
 			clocks = <&gcc GCC_USB_HS_AHB_CLK>,
-				 <&gcc GCC_USB_HS_SYSTEM_CLK>,
-				 <&gcc GCC_USB2A_PHY_SLEEP_CLK>;
-			clock-names = "iface", "core", "sleep";
-
-			resets = <&gcc GCC_USB2A_PHY_BCR>,
-				 <&gcc GCC_USB_HS_BCR>;
-			reset-names = "phy", "link";
+				 <&gcc GCC_USB_HS_SYSTEM_CLK>;
+			clock-names = "iface", "core";
+			assigned-clocks = <&gcc GCC_USB_HS_SYSTEM_CLK>;
+			assigned-clock-rates = <80000000>;
+			resets = <&gcc GCC_USB_HS_BCR>;
+			reset-names = "core";
+			phy_type = "ulpi";
+			dr_mode = "otg";
+			ahb-burst-config = <0>;
+			phy-names = "usb-phy";
+			phys = <&usb_hs_phy>;
 			status = "disabled";
+			#reset-cells = <1>;
+
+			ulpi {
+				usb_hs_phy: phy {
+					compatible = "qcom,usb-hs-phy-msm8916",
+						     "qcom,usb-hs-phy";
+					#phy-cells = <0>;
+					clocks = <&xo_board>, <&gcc GCC_USB2A_PHY_SLEEP_CLK>;
+					clock-names = "ref", "sleep";
+					resets = <&gcc GCC_USB2A_PHY_BCR>, <&otg 0>;
+					reset-names = "phy", "por";
+					qcom,init-seq = /bits/ 8 <0x0 0x44
+						0x1 0x6b 0x2 0x24 0x3 0x13>;
+				};
+			};
 		};
 
 		intc: interrupt-controller@b000000 {
@@ -1116,6 +1112,38 @@
 			};
 		};
 
+		debug@850000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0x850000 0x1000>;
+			clocks = <&rpmcc RPM_QDSS_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&CPU0>;
+		};
+
+		debug@852000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0x852000 0x1000>;
+			clocks = <&rpmcc RPM_QDSS_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&CPU1>;
+		};
+
+		debug@854000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0x854000 0x1000>;
+			clocks = <&rpmcc RPM_QDSS_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&CPU2>;
+		};
+
+		debug@856000 {
+			compatible = "arm,coresight-cpu-debug","arm,primecell";
+			reg = <0x856000 0x1000>;
+			clocks = <&rpmcc RPM_QDSS_CLK>;
+			clock-names = "apb_pclk";
+			cpu = <&CPU3>;
+		};
+
 		etm@85c000 {
 			compatible = "arm,coresight-etm4x", "arm,primecell";
 			reg = <0x85c000 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
index 44b2d37..1715787 100644
--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
@@ -68,6 +68,30 @@
 		clock-frequency = <32768>;
 	};
 
+	vreg_vph_pwr: vreg-vph-pwr {
+		compatible = "regulator-fixed";
+		status = "okay";
+		regulator-name = "vph-pwr";
+
+		regulator-min-microvolt = <3600000>;
+		regulator-max-microvolt = <3600000>;
+
+		regulator-always-on;
+	};
+
+	sfpb_mutex: hwmutex {
+		compatible = "qcom,sfpb-mutex";
+		syscon = <&sfpb_mutex_regs 0x0 0x100>;
+		#hwlock-cells = <1>;
+	};
+
+	smem {
+		compatible = "qcom,smem";
+		memory-region = <&smem_region>;
+		qcom,rpm-msg-ram = <&rpm_msg_ram>;
+		hwlocks = <&sfpb_mutex 3>;
+	};
+
 	soc {
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -82,6 +106,11 @@
 				<0xf9002000 0x1000>;
 		};
 
+		apcs: syscon@f900d000 {
+			compatible = "syscon";
+			reg = <0xf900d000 0x2000>;
+		};
+
 		timer@f9020000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
@@ -172,12 +201,36 @@
 			#power-domain-cells = <1>;
 			reg = <0xfc400000 0x2000>;
 		};
+
+		rpm_msg_ram: memory@fc428000 {
+			compatible = "qcom,rpm-msg-ram";
+			reg = <0xfc428000 0x4000>;
+		};
+
+		sfpb_mutex_regs: syscon@fd484000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "syscon";
+			reg = <0xfd484000 0x400>;
+		};
 	};
 
 	memory {
 		device_type = "memory";
 		reg = <0 0 0 0>; // bootloader will update
 	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		smem_region: smem@6a00000 {
+			reg = <0x0 0x6a00000 0x0 0x200000>;
+			no-map;
+		};
+	};
+
 };
 
 
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 9bc9c85..8f08571 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -311,6 +311,12 @@
 			reg = <0x300000 0x90000>;
 		};
 
+		kryocc: clock-controller@6400000 {
+			compatible = "qcom,apcc-msm8996";
+			reg = <0x6400000 0x90000>;
+			#clock-cells = <1>;
+		};
+
 		blsp1_spi0: spi@07575000 {
 			compatible = "qcom,spi-qup-v2.2.1";
 			reg = <0x07575000 0x600>;
diff --git a/arch/arm64/boot/dts/realtek/Makefile b/arch/arm64/boot/dts/realtek/Makefile
new file mode 100644
index 0000000..8521e92
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_REALTEK) += rtd1295-zidoo-x9s.dtb
+
+always		:= $(dtb-y)
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/realtek/rtd1295-zidoo-x9s.dts b/arch/arm64/boot/dts/realtek/rtd1295-zidoo-x9s.dts
new file mode 100644
index 0000000..6efa809
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd1295-zidoo-x9s.dts
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016-2017 Andreas Färber
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+
+/memreserve/	0x0000000000000000 0x0000000000030000;
+/memreserve/	0x000000000001f000 0x0000000000001000;
+/memreserve/	0x0000000000030000 0x00000000000d0000;
+/memreserve/	0x0000000001b00000 0x00000000004be000;
+/memreserve/	0x0000000001ffe000 0x0000000000004000;
+
+#include "rtd1295.dtsi"
+
+/ {
+	compatible = "zidoo,x9s", "realtek,rtd1295";
+	model = "Zidoo X9S";
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x80000000>;
+	};
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&uart1 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd1295.dtsi b/arch/arm64/boot/dts/realtek/rtd1295.dtsi
new file mode 100644
index 0000000..d8f8466
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd1295.dtsi
@@ -0,0 +1,131 @@
+/*
+ * Realtek RTD1295 SoC
+ *
+ * Copyright (c) 2016-2017 Andreas Färber
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "realtek,rtd1295";
+	interrupt-parent = <&gic>;
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x0>;
+			next-level-cache = <&l2>;
+		};
+
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x1>;
+			next-level-cache = <&l2>;
+		};
+
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x2>;
+			next-level-cache = <&l2>;
+		};
+
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x3>;
+			next-level-cache = <&l2>;
+		};
+
+		l2: l2-cache {
+			compatible = "cache";
+		};
+	};
+
+	reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		tee@10100000 {
+			reg = <0x10100000 0xf00000>;
+			no-map;
+		};
+	};
+
+	arm-pmu {
+		compatible = "arm,cortex-a53-pmu";
+		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13
+			(GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14
+			(GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11
+			(GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10
+			(GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		/* Exclude up to 2 GiB of RAM */
+		ranges = <0x80000000 0x80000000 0x80000000>;
+
+		uart0: serial@98007800 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x98007800 0x400>,
+			      <0x98007000 0x100>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clock-frequency = <27000000>;
+			status = "disabled";
+		};
+
+		uart1: serial@9801b200 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x9801b200 0x100>,
+			      <0x9801b00c 0x100>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clock-frequency = <432000000>;
+			status = "disabled";
+		};
+
+		uart2: serial@9801b400 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x9801b400 0x100>,
+			      <0x9801b00c 0x100>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clock-frequency = <432000000>;
+			status = "disabled";
+		};
+
+		gic: interrupt-controller@ff011000 {
+			compatible = "arm,gic-400";
+			reg = <0xff011000 0x1000>,
+			      <0xff012000 0x2000>,
+			      <0xff014000 0x2000>,
+			      <0xff016000 0x2000>;
+			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+			interrupt-controller;
+			#interrupt-cells = <3>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 1618e0a..acc4bb3 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -1,4 +1,6 @@
 dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb r8a7795-h3ulcb.dtb
+dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-xs.dtb
+dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-es1-salvator-x.dtb r8a7795-es1-h3ulcb.dtb
 dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-x.dtb r8a7796-m3ulcb.dtb
 
 always		:= $(dtb-y)
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts
new file mode 100644
index 0000000..95fe207
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a7795-es1-h3ulcb.dts
@@ -0,0 +1,42 @@
+/*
+ * Device Tree Source for the H3ULCB (R-Car Starter Kit Premier) board
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#define CPG_AUDIO_CLK_I		R8A7795_CLK_S0D4
+
+/dts-v1/;
+#include "r8a7795-es1.dtsi"
+#include "ulcb.dtsi"
+
+/ {
+	model = "Renesas H3ULCB board based on r8a7795 ES1.x";
+	compatible = "renesas,h3ulcb", "renesas,r8a7795";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	memory@500000000 {
+		device_type = "memory";
+		reg = <0x5 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@700000000 {
+		device_type = "memory";
+		reg = <0x7 0x00000000 0x0 0x40000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts
new file mode 100644
index 0000000..b84c156
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a7795-es1-salvator-x.dts
@@ -0,0 +1,115 @@
+/*
+ * Device Tree Source for the Salvator-X board
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#define CPG_AUDIO_CLK_I		R8A7795_CLK_S0D4
+
+/dts-v1/;
+#include "r8a7795-es1.dtsi"
+#include "salvator-x.dtsi"
+
+/ {
+	model = "Renesas Salvator-X board based on r8a7795 ES1.x";
+	compatible = "renesas,salvator-x", "renesas,r8a7795";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	memory@500000000 {
+		device_type = "memory";
+		reg = <0x5 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@700000000 {
+		device_type = "memory";
+		reg = <0x7 0x00000000 0x0 0x40000000>;
+	};
+};
+
+&du {
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&cpg CPG_MOD 721>,
+		 <&cpg CPG_MOD 727>,
+		 <&versaclock5 1>,
+		 <&x21_clk>,
+		 <&x22_clk>,
+		 <&versaclock5 2>;
+	clock-names = "du.0", "du.1", "du.2", "du.3", "lvds.0",
+		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&hdmi0 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi0_out: endpoint {
+				remote-endpoint = <&hdmi0_con>;
+			};
+		};
+	};
+};
+
+&hdmi0_con {
+	remote-endpoint = <&rcar_dw_hdmi0_out>;
+};
+
+&hdmi1 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi1_out: endpoint {
+				remote-endpoint = <&hdmi1_con>;
+			};
+		};
+	};
+};
+
+&hdmi1_con {
+	remote-endpoint = <&rcar_dw_hdmi1_out>;
+};
+
+&ohci2 {
+	status = "okay";
+};
+
+&pfc {
+	usb2_pins: usb2 {
+		groups = "usb2";
+		function = "usb2";
+	};
+};
+
+&sata {
+	status = "okay";
+};
+
+&usb2_phy2 {
+	pinctrl-0 = <&usb2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi b/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi
new file mode 100644
index 0000000..a0ba7bd
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi
@@ -0,0 +1,84 @@
+/*
+ * Device Tree Source for the r8a7795 ES1.x SoC
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include "r8a7795.dtsi"
+
+&soc {
+	xhci1: usb@ee0400000 {
+		compatible = "renesas,xhci-r8a7795", "renesas,rcar-gen3-xhci";
+		reg = <0 0xee040000 0 0xc00>;
+		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 327>;
+		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		resets = <&cpg 327>;
+		status = "disabled";
+	};
+
+	fcpf2: fcp@fe952000 {
+		compatible = "renesas,fcpf";
+		reg = <0 0xfe952000 0 0x200>;
+		clocks = <&cpg CPG_MOD 613>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
+		resets = <&cpg 613>;
+	};
+
+	vspi2: vsp@fe9c0000 {
+		compatible = "renesas,vsp2";
+		reg = <0 0xfe9c0000 0 0x8000>;
+		interrupts = <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 629>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
+		resets = <&cpg 629>;
+
+		renesas,fcp = <&fcpvi2>;
+	};
+
+	fcpvi2: fcp@fe9cf000 {
+		compatible = "renesas,fcpv";
+		reg = <0 0xfe9cf000 0 0x200>;
+		clocks = <&cpg CPG_MOD 609>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
+		resets = <&cpg 609>;
+	};
+
+	vspd3: vsp@fea38000 {
+		compatible = "renesas,vsp2";
+		reg = <0 0xfea38000 0 0x4000>;
+		interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 620>;
+		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		resets = <&cpg 620>;
+
+		renesas,fcp = <&fcpvd3>;
+	};
+
+	fcpvd3: fcp@fea3f000 {
+		compatible = "renesas,fcpv";
+		reg = <0 0xfea3f000 0 0x200>;
+		clocks = <&cpg CPG_MOD 600>;
+		power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+		resets = <&cpg 600>;
+	};
+
+	fdp1@fe948000 {
+		compatible = "renesas,fdp1";
+		reg = <0 0xfe948000 0 0x2400>;
+		interrupts = <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 117>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
+		resets = <&cpg 117>;
+		renesas,fcp = <&fcpf2>;
+	};
+};
+
+&du {
+	compatible = "renesas,du-r8a7795";
+	vsps = <&vspd0 &vspd1 &vspd2 &vspd3>;
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
index ab35215..0426f41 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
@@ -9,24 +9,16 @@
  * kind, whether express or implied.
  */
 
+#define CPG_AUDIO_CLK_I		R8A7795_CLK_S0D4
+
 /dts-v1/;
 #include "r8a7795.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
+#include "ulcb.dtsi"
 
 / {
-	model = "Renesas H3ULCB board based on r8a7795";
+	model = "Renesas H3ULCB board based on r8a7795 ES2.0+";
 	compatible = "renesas,h3ulcb", "renesas,r8a7795";
 
-	aliases {
-		serial0 = &scif2;
-		ethernet0 = &avb;
-	};
-
-	chosen {
-		stdout-path = "serial0:115200n8";
-	};
-
 	memory@48000000 {
 		device_type = "memory";
 		/* first 128MB is reserved for secure area. */
@@ -47,330 +39,4 @@
 		device_type = "memory";
 		reg = <0x7 0x00000000 0x0 0x40000000>;
 	};
-
-	leds {
-		compatible = "gpio-leds";
-
-		led5 {
-			gpios = <&gpio6 12 GPIO_ACTIVE_HIGH>;
-		};
-		led6 {
-			gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>;
-		};
-	};
-
-	keyboard {
-		compatible = "gpio-keys";
-
-		key-1 {
-			linux,code = <KEY_1>;
-			label = "SW3";
-			wakeup-source;
-			debounce-interval = <20>;
-			gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
-		};
-	};
-
-	x12_clk: x12 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <24576000>;
-	};
-
-	reg_1p8v: regulator0 {
-		compatible = "regulator-fixed";
-		regulator-name = "fixed-1.8V";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-		regulator-boot-on;
-		regulator-always-on;
-	};
-
-	reg_3p3v: regulator1 {
-		compatible = "regulator-fixed";
-		regulator-name = "fixed-3.3V";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-		regulator-boot-on;
-		regulator-always-on;
-	};
-
-	vcc_sdhi0: regulator-vcc-sdhi0 {
-		compatible = "regulator-fixed";
-
-		regulator-name = "SDHI0 Vcc";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
-		enable-active-high;
-	};
-
-	vccq_sdhi0: regulator-vccq-sdhi0 {
-		compatible = "regulator-gpio";
-
-		regulator-name = "SDHI0 VccQ";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
-		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
-	};
-
-	audio_clkout: audio-clkout {
-		/*
-		 * This is same as <&rcar_sound 0>
-		 * but needed to avoid cs2000/rcar_sound probe dead-lock
-		 */
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <11289600>;
-	};
-
-	rsnd_ak4613: sound {
-		compatible = "simple-audio-card";
-
-		simple-audio-card,format = "left_j";
-		simple-audio-card,bitclock-master = <&sndcpu>;
-		simple-audio-card,frame-master = <&sndcpu>;
-
-		sndcpu: simple-audio-card,cpu {
-			sound-dai = <&rcar_sound>;
-		};
-
-		sndcodec: simple-audio-card,codec {
-			sound-dai = <&ak4613>;
-		};
-	};
-};
-
-&extal_clk {
-	clock-frequency = <16666666>;
-};
-
-&extalr_clk {
-	clock-frequency = <32768>;
-};
-
-&pfc {
-	pinctrl-0 = <&scif_clk_pins>;
-	pinctrl-names = "default";
-
-	scif2_pins: scif2 {
-		groups = "scif2_data_a";
-		function = "scif2";
-	};
-
-	scif_clk_pins: scif_clk {
-		groups = "scif_clk_a";
-		function = "scif_clk";
-	};
-
-	i2c2_pins: i2c2 {
-		groups = "i2c2_a";
-		function = "i2c2";
-	};
-
-	avb_pins: avb {
-		groups = "avb_mdc";
-		function = "avb";
-	};
-
-	sdhi0_pins: sd0 {
-		groups = "sdhi0_data4", "sdhi0_ctrl";
-		function = "sdhi0";
-		power-source = <3300>;
-	};
-
-	sdhi0_pins_uhs: sd0_uhs {
-		groups = "sdhi0_data4", "sdhi0_ctrl";
-		function = "sdhi0";
-		power-source = <1800>;
-	};
-
-	sdhi2_pins: sd2 {
-		groups = "sdhi2_data8", "sdhi2_ctrl";
-		function = "sdhi2";
-		power-source = <3300>;
-	};
-
-	sdhi2_pins_uhs: sd2_uhs {
-		groups = "sdhi2_data8", "sdhi2_ctrl";
-		function = "sdhi2";
-		power-source = <1800>;
-	};
-
-	sound_pins: sound {
-		groups = "ssi01239_ctrl", "ssi0_data", "ssi1_data_a";
-		function = "ssi";
-	};
-
-	sound_clk_pins: sound-clk {
-		groups = "audio_clk_a_a", "audio_clk_b_a", "audio_clk_c_a",
-			 "audio_clkout_a", "audio_clkout3_a";
-		function = "audio_clk";
-	};
-
-	usb1_pins: usb1 {
-		groups = "usb1";
-		function = "usb1";
-	};
-};
-
-&scif2 {
-	pinctrl-0 = <&scif2_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
-
-&scif_clk {
-	clock-frequency = <14745600>;
-};
-
-&i2c2 {
-	pinctrl-0 = <&i2c2_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-
-	clock-frequency = <100000>;
-
-	ak4613: codec@10 {
-		compatible = "asahi-kasei,ak4613";
-		#sound-dai-cells = <0>;
-		reg = <0x10>;
-		clocks = <&rcar_sound 3>;
-
-		asahi-kasei,in1-single-end;
-		asahi-kasei,in2-single-end;
-		asahi-kasei,out1-single-end;
-		asahi-kasei,out2-single-end;
-		asahi-kasei,out3-single-end;
-		asahi-kasei,out4-single-end;
-		asahi-kasei,out5-single-end;
-		asahi-kasei,out6-single-end;
-	};
-
-	cs2000: clk-multiplier@4f {
-		#clock-cells = <0>;
-		compatible = "cirrus,cs2000-cp";
-		reg = <0x4f>;
-		clocks = <&audio_clkout>, <&x12_clk>;
-		clock-names = "clk_in", "ref_clk";
-
-		assigned-clocks = <&cs2000>;
-		assigned-clock-rates = <24576000>; /* 1/1 divide */
-	};
-};
-
-&rcar_sound {
-	pinctrl-0 = <&sound_pins &sound_clk_pins>;
-	pinctrl-names = "default";
-
-	/* Single DAI */
-	#sound-dai-cells = <0>;
-
-	/* audio_clkout0/1/2/3 */
-	#clock-cells = <1>;
-	clock-frequency = <11289600>;
-
-	status = "okay";
-
-	/* update <audio_clk_b> to <cs2000> */
-	clocks = <&cpg CPG_MOD 1005>,
-		 <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
-		 <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
-		 <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
-		 <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
-		 <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
-		 <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
-		 <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
-		 <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
-		 <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
-		 <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
-		 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
-		 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
-		 <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
-		 <&audio_clk_a>, <&cs2000>,
-		 <&audio_clk_c>,
-		 <&cpg CPG_CORE R8A7795_CLK_S0D4>;
-
-	rcar_sound,dai {
-		dai0 {
-			playback = <&ssi0 &src0 &dvc0>;
-			capture  = <&ssi1 &src1 &dvc1>;
-		};
-	};
-};
-
-&sdhi0 {
-	pinctrl-0 = <&sdhi0_pins>;
-	pinctrl-1 = <&sdhi0_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&vcc_sdhi0>;
-	vqmmc-supply = <&vccq_sdhi0>;
-	cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
-	bus-width = <4>;
-	sd-uhs-sdr50;
-	status = "okay";
-};
-
-&sdhi2 {
-	/* used for on-board 8bit eMMC */
-	pinctrl-0 = <&sdhi2_pins>;
-	pinctrl-1 = <&sdhi2_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&reg_3p3v>;
-	vqmmc-supply = <&reg_1p8v>;
-	bus-width = <8>;
-	non-removable;
-	status = "okay";
-};
-
-&ssi1 {
-	shared-pin;
-};
-
-&wdt0 {
-	timeout-sec = <60>;
-	status = "okay";
-};
-
-&audio_clk_a {
-	clock-frequency = <22579200>;
-};
-
-&avb {
-	pinctrl-0 = <&avb_pins>;
-	pinctrl-names = "default";
-	renesas,no-ether-link;
-	phy-handle = <&phy0>;
-	status = "okay";
-
-	phy0: ethernet-phy@0 {
-		rxc-skew-ps = <1500>;
-		reg = <0>;
-		interrupt-parent = <&gpio2>;
-		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
-	};
-};
-
-&usb2_phy1 {
-	pinctrl-0 = <&usb1_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
-
-&ehci1 {
-	status = "okay";
-};
-
-&ohci1 {
-	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
index 639aa08..684fb3b 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
@@ -8,577 +8,108 @@
  * kind, whether express or implied.
  */
 
-/*
- * SSI-AK4613
- *
- * This command is required when Playback/Capture
- *
- *	amixer set "DVC Out" 100%
- *	amixer set "DVC In" 100%
- *
- * You can use Mute
- *
- *	amixer set "DVC Out Mute" on
- *	amixer set "DVC In Mute" on
- *
- * You can use Volume Ramp
- *
- *	amixer set "DVC Out Ramp Up Rate"   "0.125 dB/64 steps"
- *	amixer set "DVC Out Ramp Down Rate" "0.125 dB/512 steps"
- *	amixer set "DVC Out Ramp" on
- *	aplay xxx.wav &
- *	amixer set "DVC Out"  80%  // Volume Down
- *	amixer set "DVC Out" 100%  // Volume Up
- */
+#define CPG_AUDIO_CLK_I		R8A7795_CLK_S0D4
 
 /dts-v1/;
 #include "r8a7795.dtsi"
-#include <dt-bindings/gpio/gpio.h>
+#include "salvator-x.dtsi"
 
 / {
-	model = "Renesas Salvator-X board based on r8a7795";
+	model = "Renesas Salvator-X board based on r8a7795 ES2.0+";
 	compatible = "renesas,salvator-x", "renesas,r8a7795";
 
-	aliases {
-		serial0 = &scif2;
-		serial1 = &scif1;
-		ethernet0 = &avb;
-	};
-
-	chosen {
-		bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
-		stdout-path = "serial0:115200n8";
-	};
-
 	memory@48000000 {
 		device_type = "memory";
 		/* first 128MB is reserved for secure area. */
 		reg = <0x0 0x48000000 0x0 0x38000000>;
 	};
 
-	x12_clk: x12 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <24576000>;
+	memory@500000000 {
+		device_type = "memory";
+		reg = <0x5 0x00000000 0x0 0x40000000>;
 	};
 
-	reg_1p8v: regulator0 {
-		compatible = "regulator-fixed";
-		regulator-name = "fixed-1.8V";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-		regulator-boot-on;
-		regulator-always-on;
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
 	};
 
-	reg_3p3v: regulator1 {
-		compatible = "regulator-fixed";
-		regulator-name = "fixed-3.3V";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-		regulator-boot-on;
-		regulator-always-on;
-	};
-
-	vcc_sdhi0: regulator-vcc-sdhi0 {
-		compatible = "regulator-fixed";
-
-		regulator-name = "SDHI0 Vcc";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
-		enable-active-high;
-	};
-
-	vccq_sdhi0: regulator-vccq-sdhi0 {
-		compatible = "regulator-gpio";
-
-		regulator-name = "SDHI0 VccQ";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
-		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
-	};
-
-	vcc_sdhi3: regulator-vcc-sdhi3 {
-		compatible = "regulator-fixed";
-
-		regulator-name = "SDHI3 Vcc";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpio = <&gpio3 15 GPIO_ACTIVE_HIGH>;
-		enable-active-high;
-	};
-
-	vccq_sdhi3: regulator-vccq-sdhi3 {
-		compatible = "regulator-gpio";
-
-		regulator-name = "SDHI3 VccQ";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
-		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
-	};
-
-	vbus0_usb2: regulator-vbus0-usb2 {
-		compatible = "regulator-fixed";
-
-		regulator-name = "USB20_VBUS0";
-		regulator-min-microvolt = <5000000>;
-		regulator-max-microvolt = <5000000>;
-
-		gpio = <&gpio6 16 GPIO_ACTIVE_HIGH>;
-		enable-active-high;
-	};
-
-	audio_clkout: audio_clkout {
-		/*
-		 * This is same as <&rcar_sound 0>
-		 * but needed to avoid cs2000/rcar_sound probe dead-lock
-		 */
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <11289600>;
-	};
-
-	rsnd_ak4613: sound {
-		compatible = "simple-audio-card";
-
-		simple-audio-card,format = "left_j";
-		simple-audio-card,bitclock-master = <&sndcpu>;
-		simple-audio-card,frame-master = <&sndcpu>;
-
-		sndcpu: simple-audio-card,cpu {
-			sound-dai = <&rcar_sound>;
-		};
-
-		sndcodec: simple-audio-card,codec {
-			sound-dai = <&ak4613>;
-		};
-	};
-
-	vga-encoder {
-		compatible = "adi,adv7123";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				reg = <0>;
-				adv7123_in: endpoint {
-					remote-endpoint = <&du_out_rgb>;
-				};
-			};
-			port@1 {
-				reg = <1>;
-				adv7123_out: endpoint {
-					remote-endpoint = <&vga_in>;
-				};
-			};
-		};
-	};
-
-	vga {
-		compatible = "vga-connector";
-
-		port {
-			vga_in: endpoint {
-				remote-endpoint = <&adv7123_out>;
-			};
-		};
+	memory@700000000 {
+		device_type = "memory";
+		reg = <0x7 0x00000000 0x0 0x40000000>;
 	};
 };
 
 &du {
-	pinctrl-0 = <&du_pins>;
-	pinctrl-names = "default";
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&cpg CPG_MOD 721>,
+		 <&cpg CPG_MOD 727>,
+		 <&versaclock5 1>,
+		 <&x21_clk>,
+		 <&x22_clk>,
+		 <&versaclock5 2>;
+	clock-names = "du.0", "du.1", "du.2", "du.3", "lvds.0",
+		      "dclkin.0", "dclkin.1", "dclkin.2", "dclkin.3";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&hdmi0 {
 	status = "okay";
 
 	ports {
-		port@0 {
-			endpoint {
-				remote-endpoint = <&adv7123_in>;
-			};
-		};
-		port@3 {
-			lvds_connector: endpoint {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi0_out: endpoint {
+				remote-endpoint = <&hdmi0_con>;
 			};
 		};
 	};
 };
 
-&extal_clk {
-	clock-frequency = <16666666>;
+&hdmi0_con {
+	remote-endpoint = <&rcar_dw_hdmi0_out>;
 };
 
-&extalr_clk {
-	clock-frequency = <32768>;
+&hdmi1 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi1_out: endpoint {
+				remote-endpoint = <&hdmi1_con>;
+			};
+		};
+	};
+};
+
+&hdmi1_con {
+	remote-endpoint = <&rcar_dw_hdmi1_out>;
+};
+
+&ohci2 {
+	status = "okay";
 };
 
 &pfc {
-	pinctrl-0 = <&scif_clk_pins>;
-	pinctrl-names = "default";
-
-	scif1_pins: scif1 {
-		groups = "scif1_data_a", "scif1_ctrl";
-		function = "scif1";
-	};
-	scif2_pins: scif2 {
-		groups = "scif2_data_a";
-		function = "scif2";
-	};
-	scif_clk_pins: scif_clk {
-		groups = "scif_clk_a";
-		function = "scif_clk";
-	};
-
-	i2c2_pins: i2c2 {
-		groups = "i2c2_a";
-		function = "i2c2";
-	};
-
-	avb_pins: avb {
-		mux {
-			groups = "avb_link", "avb_phy_int", "avb_mdc",
-				 "avb_mii";
-			function = "avb";
-		};
-
-		pins_mdc {
-			groups = "avb_mdc";
-			drive-strength = <24>;
-		};
-
-		pins_mii_tx {
-			pins = "PIN_AVB_TX_CTL", "PIN_AVB_TXC", "PIN_AVB_TD0",
-			       "PIN_AVB_TD1", "PIN_AVB_TD2", "PIN_AVB_TD3";
-			drive-strength = <12>;
-		};
-	};
-
-	du_pins: du {
-		groups = "du_rgb888", "du_sync", "du_oddf", "du_clk_out_0";
-		function = "du";
-	};
-
-	sdhi0_pins: sd0 {
-		groups = "sdhi0_data4", "sdhi0_ctrl";
-		function = "sdhi0";
-		power-source = <3300>;
-	};
-
-	sdhi0_pins_uhs: sd0_uhs {
-		groups = "sdhi0_data4", "sdhi0_ctrl";
-		function = "sdhi0";
-		power-source = <1800>;
-	};
-
-	sdhi2_pins: sd2 {
-		groups = "sdhi2_data8", "sdhi2_ctrl";
-		function = "sdhi2";
-		power-source = <3300>;
-	};
-
-	sdhi2_pins_uhs: sd2_uhs {
-		groups = "sdhi2_data8", "sdhi2_ctrl";
-		function = "sdhi2";
-		power-source = <1800>;
-	};
-
-	sdhi3_pins: sd3 {
-		groups = "sdhi3_data4", "sdhi3_ctrl";
-		function = "sdhi3";
-		power-source = <3300>;
-	};
-
-	sdhi3_pins_uhs: sd3_uhs {
-		groups = "sdhi3_data4", "sdhi3_ctrl";
-		function = "sdhi3";
-		power-source = <1800>;
-	};
-
-	sound_pins: sound {
-		groups = "ssi01239_ctrl", "ssi0_data", "ssi1_data_a";
-		function = "ssi";
-	};
-
-	sound_clk_pins: sound_clk {
-		groups = "audio_clk_a_a", "audio_clk_b_a", "audio_clk_c_a",
-			 "audio_clkout_a", "audio_clkout3_a";
-		function = "audio_clk";
-	};
-
-	usb0_pins: usb0 {
-		groups = "usb0";
-		function = "usb0";
-	};
-
-	usb1_pins: usb1 {
-		mux {
-			groups = "usb1";
-			function = "usb1";
-		};
-
-		ovc {
-			pins = "GP_6_27";
-			bias-pull-up;
-		};
-
-		pwen {
-			pins = "GP_6_26";
-			bias-pull-down;
-		};
-	};
-
 	usb2_pins: usb2 {
 		groups = "usb2";
 		function = "usb2";
 	};
 };
 
-&scif1 {
-	pinctrl-0 = <&scif1_pins>;
-	pinctrl-names = "default";
-
-	uart-has-rtscts;
-	status = "okay";
-};
-
-&scif2 {
-	pinctrl-0 = <&scif2_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
-
-&scif_clk {
-	clock-frequency = <14745600>;
-};
-
-&i2c2 {
-	pinctrl-0 = <&i2c2_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-
-	clock-frequency = <100000>;
-
-	ak4613: codec@10 {
-		compatible = "asahi-kasei,ak4613";
-		#sound-dai-cells = <0>;
-		reg = <0x10>;
-		clocks = <&rcar_sound 3>;
-
-		asahi-kasei,in1-single-end;
-		asahi-kasei,in2-single-end;
-		asahi-kasei,out1-single-end;
-		asahi-kasei,out2-single-end;
-		asahi-kasei,out3-single-end;
-		asahi-kasei,out4-single-end;
-		asahi-kasei,out5-single-end;
-		asahi-kasei,out6-single-end;
-	};
-
-	cs2000: clk_multiplier@4f {
-		#clock-cells = <0>;
-		compatible = "cirrus,cs2000-cp";
-		reg = <0x4f>;
-		clocks = <&audio_clkout>, <&x12_clk>;
-		clock-names = "clk_in", "ref_clk";
-
-		assigned-clocks = <&cs2000>;
-		assigned-clock-rates = <24576000>; /* 1/1 divide */
-	};
-};
-
-&rcar_sound {
-	pinctrl-0 = <&sound_pins &sound_clk_pins>;
-	pinctrl-names = "default";
-
-	/* Single DAI */
-	#sound-dai-cells = <0>;
-
-	/* audio_clkout0/1/2/3 */
-	#clock-cells = <1>;
-	clock-frequency = <11289600>;
-
-	status = "okay";
-
-	/* update <audio_clk_b> to <cs2000> */
-	clocks = <&cpg CPG_MOD 1005>,
-		 <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
-		 <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
-		 <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
-		 <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
-		 <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
-		 <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
-		 <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
-		 <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
-		 <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
-		 <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
-		 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
-		 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
-		 <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
-		 <&audio_clk_a>, <&cs2000>,
-		 <&audio_clk_c>,
-		 <&cpg CPG_CORE R8A7795_CLK_S0D4>;
-
-	rcar_sound,dai {
-		dai0 {
-			playback = <&ssi0 &src0 &dvc0>;
-			capture  = <&ssi1 &src1 &dvc1>;
-		};
-	};
-};
-
 &sata {
 	status = "okay";
 };
 
-&sdhi0 {
-	pinctrl-0 = <&sdhi0_pins>;
-	pinctrl-1 = <&sdhi0_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&vcc_sdhi0>;
-	vqmmc-supply = <&vccq_sdhi0>;
-	cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
-	wp-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
-	bus-width = <4>;
-	sd-uhs-sdr50;
-	status = "okay";
-};
-
-&sdhi2 {
-	/* used for on-board 8bit eMMC */
-	pinctrl-0 = <&sdhi2_pins>;
-	pinctrl-1 = <&sdhi2_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&reg_3p3v>;
-	vqmmc-supply = <&reg_1p8v>;
-	bus-width = <8>;
-	non-removable;
-	status = "okay";
-};
-
-&sdhi3 {
-	pinctrl-0 = <&sdhi3_pins>;
-	pinctrl-1 = <&sdhi3_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&vcc_sdhi3>;
-	vqmmc-supply = <&vccq_sdhi3>;
-	cd-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
-	wp-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
-	bus-width = <4>;
-	sd-uhs-sdr50;
-	status = "okay";
-};
-
-&ssi1 {
-	shared-pin;
-};
-
-&wdt0 {
-	timeout-sec = <60>;
-	status = "okay";
-};
-
-&audio_clk_a {
-	clock-frequency = <22579200>;
-};
-
-&i2c_dvfs {
-	status = "okay";
-};
-
-&avb {
-	pinctrl-0 = <&avb_pins>;
-	pinctrl-names = "default";
-	renesas,no-ether-link;
-	phy-handle = <&phy0>;
-	status = "okay";
-
-	phy0: ethernet-phy@0 {
-		rxc-skew-ps = <1500>;
-		reg = <0>;
-		interrupt-parent = <&gpio2>;
-		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
-	};
-};
-
-&xhci0 {
-	status = "okay";
-};
-
-&usb2_phy0 {
-	pinctrl-0 = <&usb0_pins>;
-	pinctrl-names = "default";
-
-	vbus-supply = <&vbus0_usb2>;
-	status = "okay";
-};
-
-&usb2_phy1 {
-	pinctrl-0 = <&usb1_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
-
 &usb2_phy2 {
 	pinctrl-0 = <&usb2_pins>;
 	pinctrl-names = "default";
 
 	status = "okay";
 };
-
-&ehci0 {
-	status = "okay";
-};
-
-&ehci1 {
-	status = "okay";
-};
-
-&ehci2 {
-	status = "okay";
-};
-
-&ohci0 {
-	status = "okay";
-};
-
-&ohci1 {
-	status = "okay";
-};
-
-&ohci2 {
-	status = "okay";
-};
-
-&hsusb {
-	status = "okay";
-};
-
-&pcie_bus_clk {
-	clock-frequency = <100000000>;
-};
-
-&pciec0 {
-	status = "okay";
-};
-
-&pciec1 {
-	status = "okay";
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts
new file mode 100644
index 0000000..de35495
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a7795-salvator-xs.dts
@@ -0,0 +1,109 @@
+/*
+ * Device Tree Source for the Salvator-X 2nd version board
+ *
+ * Copyright (C) 2015-2017 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#define CPG_AUDIO_CLK_I		R8A7795_CLK_S0D4
+
+/dts-v1/;
+#include "r8a7795.dtsi"
+#include "salvator-xs.dtsi"
+
+/ {
+	model = "Renesas Salvator-X 2nd version board based on r8a7795 ES2.0+";
+	compatible = "renesas,salvator-xs", "renesas,r8a7795";
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	memory@500000000 {
+		device_type = "memory";
+		reg = <0x5 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
+	};
+
+	memory@700000000 {
+		device_type = "memory";
+		reg = <0x7 0x00000000 0x0 0x40000000>;
+	};
+};
+
+&du {
+	clocks = <&cpg CPG_MOD 724>,
+		 <&cpg CPG_MOD 723>,
+		 <&cpg CPG_MOD 722>,
+		 <&cpg CPG_MOD 721>,
+		 <&cpg CPG_MOD 727>,
+		 <&x21_clk>,
+		 <&x22_clk>;
+	clock-names = "du.0", "du.1", "du.2", "du.3", "lvds.0",
+		      "dclkin.1", "dclkin.2";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&hdmi0 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi0_out: endpoint {
+				remote-endpoint = <&hdmi0_con>;
+			};
+		};
+	};
+};
+
+&hdmi0_con {
+	remote-endpoint = <&rcar_dw_hdmi0_out>;
+};
+
+&hdmi1 {
+	status = "okay";
+
+	ports {
+		port@1 {
+			reg = <1>;
+			rcar_dw_hdmi1_out: endpoint {
+				remote-endpoint = <&hdmi1_con>;
+			};
+		};
+	};
+};
+
+&hdmi1_con {
+	remote-endpoint = <&rcar_dw_hdmi1_out>;
+};
+
+&ohci2 {
+	status = "okay";
+};
+
+&pfc {
+	usb2_pins: usb2 {
+		groups = "usb2";
+		function = "usb2";
+	};
+};
+
+&usb2_phy2 {
+	pinctrl-0 = <&usb2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index e99d644..e31c1b6 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -182,7 +182,7 @@
 		clock-frequency = <0>;
 	};
 
-	soc {
+	soc: soc {
 		compatible = "simple-bus";
 		interrupt-parent = <&gic>;
 
@@ -398,7 +398,7 @@
 			#power-domain-cells = <1>;
 		};
 
-		pfc: pfc@e6060000 {
+		pfc: pin-controller@e6060000 {
 			compatible = "renesas,pfc-r8a7795";
 			reg = <0 0xe6060000 0 0x50c>;
 		};
@@ -883,6 +883,8 @@
 			clocks = <&cpg CPG_MOD 926>;
 			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
 			resets = <&cpg 926>;
+			dmas = <&dmac0 0x11>, <&dmac0 0x10>;
+			dma-names = "tx", "rx";
 			status = "disabled";
 		};
 
@@ -1118,6 +1120,16 @@
 				      "dvc.0", "dvc.1",
 				      "clk_a", "clk_b", "clk_c", "clk_i";
 			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 1005>,
+				 <&cpg 1006>, <&cpg 1007>,
+				 <&cpg 1008>, <&cpg 1009>,
+				 <&cpg 1010>, <&cpg 1011>,
+				 <&cpg 1012>, <&cpg 1013>,
+				 <&cpg 1014>, <&cpg 1015>;
+			reset-names = "ssi-all",
+				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
+				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
+				      "ssi.1", "ssi.0";
 			status = "disabled";
 
 			rcar_sound,dvc {
@@ -1274,16 +1286,6 @@
 			status = "disabled";
 		};
 
-		xhci1: usb@ee0400000 {
-			compatible = "renesas,xhci-r8a7795", "renesas,rcar-gen3-xhci";
-			reg = <0 0xee040000 0 0xc00>;
-			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 327>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 327>;
-			status = "disabled";
-		};
-
 		usb_dmac0: dma-controller@e65a0000 {
 			compatible = "renesas,r8a7795-usb-dmac",
 				     "renesas,usb-dmac";
@@ -1568,14 +1570,6 @@
 			resets = <&cpg 614>;
 		};
 
-		fcpf2: fcp@fe952000 {
-			compatible = "renesas,fcpf";
-			reg = <0 0xfe952000 0 0x200>;
-			clocks = <&cpg CPG_MOD 613>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 613>;
-		};
-
 		vspbd: vsp@fe960000 {
 			compatible = "renesas,vsp2";
 			reg = <0 0xfe960000 0 0x8000>;
@@ -1633,25 +1627,6 @@
 			resets = <&cpg 610>;
 		};
 
-		vspi2: vsp@fe9c0000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfe9c0000 0 0x8000>;
-			interrupts = <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 629>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 629>;
-
-			renesas,fcp = <&fcpvi2>;
-		};
-
-		fcpvi2: fcp@fe9cf000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfe9cf000 0 0x200>;
-			clocks = <&cpg CPG_MOD 609>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 609>;
-		};
-
 		vspd0: vsp@fea20000 {
 			compatible = "renesas,vsp2";
 			reg = <0 0xfea20000 0 0x4000>;
@@ -1709,25 +1684,6 @@
 			resets = <&cpg 601>;
 		};
 
-		vspd3: vsp@fea38000 {
-			compatible = "renesas,vsp2";
-			reg = <0 0xfea38000 0 0x4000>;
-			interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 620>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 620>;
-
-			renesas,fcp = <&fcpvd3>;
-		};
-
-		fcpvd3: fcp@fea3f000 {
-			compatible = "renesas,fcpv";
-			reg = <0 0xfea3f000 0 0x200>;
-			clocks = <&cpg CPG_MOD 600>;
-			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
-			resets = <&cpg 600>;
-		};
-
 		fdp1@fe940000 {
 			compatible = "renesas,fdp1";
 			reg = <0 0xfe940000 0 0x2400>;
@@ -1748,18 +1704,57 @@
 			renesas,fcp = <&fcpf1>;
 		};
 
-		fdp1@fe948000 {
-			compatible = "renesas,fdp1";
-			reg = <0 0xfe948000 0 0x2400>;
-			interrupts = <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&cpg CPG_MOD 117>;
-			power-domains = <&sysc R8A7795_PD_A3VP>;
-			resets = <&cpg 117>;
-			renesas,fcp = <&fcpf2>;
+		hdmi0: hdmi0@fead0000 {
+			compatible = "renesas,r8a7795-hdmi", "renesas,rcar-gen3-hdmi";
+			reg = <0 0xfead0000 0 0x10000>;
+			interrupts = <GIC_SPI 389 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 729>, <&cpg CPG_CORE R8A7795_CLK_HDMI>;
+			clock-names = "iahb", "isfr";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 729>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dw_hdmi0_in: endpoint {
+						remote-endpoint = <&du_out_hdmi0>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+				};
+			};
+		};
+
+		hdmi1: hdmi1@feae0000 {
+			compatible = "renesas,r8a7795-hdmi", "renesas,rcar-gen3-hdmi";
+			reg = <0 0xfeae0000 0 0x10000>;
+			interrupts = <GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 728>, <&cpg CPG_CORE R8A7795_CLK_HDMI>;
+			clock-names = "iahb", "isfr";
+			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+			resets = <&cpg 728>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				port@0 {
+					reg = <0>;
+					dw_hdmi1_in: endpoint {
+						remote-endpoint = <&du_out_hdmi1>;
+					};
+				};
+				port@1 {
+					reg = <1>;
+				};
+			};
 		};
 
 		du: display@feb00000 {
-			compatible = "renesas,du-r8a7795";
 			reg = <0 0xfeb00000 0 0x80000>,
 			      <0 0xfeb90000 0 0x14>;
 			reg-names = "du", "lvds.0";
@@ -1775,8 +1770,6 @@
 			clock-names = "du.0", "du.1", "du.2", "du.3", "lvds.0";
 			status = "disabled";
 
-			vsps = <&vspd0 &vspd1 &vspd2 &vspd3>;
-
 			ports {
 				#address-cells = <1>;
 				#size-cells = <0>;
@@ -1789,11 +1782,13 @@
 				port@1 {
 					reg = <1>;
 					du_out_hdmi0: endpoint {
+						remote-endpoint = <&dw_hdmi0_in>;
 					};
 				};
 				port@2 {
 					reg = <2>;
 					du_out_hdmi1: endpoint {
+						remote-endpoint = <&dw_hdmi1_in>;
 					};
 				};
 				port@3 {
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
index 372b2a9..38b58b7 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
@@ -9,180 +9,24 @@
  * kind, whether express or implied.
  */
 
+#define CPG_AUDIO_CLK_I		R8A7796_CLK_S0D4
+
 /dts-v1/;
 #include "r8a7796.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
+#include "ulcb.dtsi"
 
 / {
 	model = "Renesas M3ULCB board based on r8a7796";
 	compatible = "renesas,m3ulcb", "renesas,r8a7796";
 
-	aliases {
-		serial0 = &scif2;
-	};
-
-	chosen {
-		stdout-path = "serial0:115200n8";
-	};
-
 	memory@48000000 {
 		device_type = "memory";
 		/* first 128MB is reserved for secure area. */
 		reg = <0x0 0x48000000 0x0 0x38000000>;
 	};
 
-	leds {
-		compatible = "gpio-leds";
-
-		led5 {
-			gpios = <&gpio6 12 GPIO_ACTIVE_HIGH>;
-		};
-		led6 {
-			gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>;
-		};
+	memory@600000000 {
+		device_type = "memory";
+		reg = <0x6 0x00000000 0x0 0x40000000>;
 	};
-
-	keyboard {
-		compatible = "gpio-keys";
-
-		key-1 {
-			linux,code = <KEY_1>;
-			label = "SW3";
-			wakeup-source;
-			debounce-interval = <20>;
-			gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
-		};
-	};
-
-	reg_1p8v: regulator0 {
-		compatible = "regulator-fixed";
-		regulator-name = "fixed-1.8V";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-		regulator-boot-on;
-		regulator-always-on;
-	};
-
-	reg_3p3v: regulator1 {
-		compatible = "regulator-fixed";
-		regulator-name = "fixed-3.3V";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-		regulator-boot-on;
-		regulator-always-on;
-	};
-
-	vcc_sdhi0: regulator-vcc-sdhi0 {
-		compatible = "regulator-fixed";
-
-		regulator-name = "SDHI0 Vcc";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
-		enable-active-high;
-	};
-
-	vccq_sdhi0: regulator-vccq-sdhi0 {
-		compatible = "regulator-gpio";
-
-		regulator-name = "SDHI0 VccQ";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
-		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
-	};
-};
-
-&extal_clk {
-	clock-frequency = <16666666>;
-};
-
-&extalr_clk {
-	clock-frequency = <32768>;
-};
-
-&pfc {
-	pinctrl-0 = <&scif_clk_pins>;
-	pinctrl-names = "default";
-
-	scif2_pins: scif2 {
-		groups = "scif2_data_a";
-		function = "scif2";
-	};
-
-	scif_clk_pins: scif_clk {
-		groups = "scif_clk_a";
-		function = "scif_clk";
-	};
-
-	sdhi0_pins: sd0 {
-		groups = "sdhi0_data4", "sdhi0_ctrl";
-		function = "sdhi0";
-		power-source = <3300>;
-	};
-
-	sdhi0_pins_uhs: sd0_uhs {
-		groups = "sdhi0_data4", "sdhi0_ctrl";
-		function = "sdhi0";
-		power-source = <1800>;
-	};
-
-	sdhi2_pins: sd2 {
-		groups = "sdhi2_data8", "sdhi2_ctrl";
-		function = "sdhi2";
-		power-source = <3300>;
-	};
-
-	sdhi2_pins_uhs: sd2_uhs {
-		groups = "sdhi2_data8", "sdhi2_ctrl";
-		function = "sdhi2";
-		power-source = <1800>;
-	};
-};
-
-&sdhi0 {
-	pinctrl-0 = <&sdhi0_pins>;
-	pinctrl-1 = <&sdhi0_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&vcc_sdhi0>;
-	vqmmc-supply = <&vccq_sdhi0>;
-	cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
-	bus-width = <4>;
-	sd-uhs-sdr50;
-	status = "okay";
-};
-
-&sdhi2 {
-	/* used for on-board 8bit eMMC */
-	pinctrl-0 = <&sdhi2_pins>;
-	pinctrl-1 = <&sdhi2_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&reg_3p3v>;
-	vqmmc-supply = <&reg_1p8v>;
-	bus-width = <8>;
-	non-removable;
-	status = "okay";
-};
-
-&scif2 {
-	pinctrl-0 = <&scif2_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
-
-&scif_clk {
-	clock-frequency = <14745600>;
-};
-
-&wdt0 {
-	timeout-sec = <60>;
-	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
index c9f59b6..db4f162 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
@@ -8,25 +8,16 @@
  * kind, whether express or implied.
  */
 
+#define CPG_AUDIO_CLK_I		R8A7796_CLK_S0D4
+
 /dts-v1/;
 #include "r8a7796.dtsi"
-#include <dt-bindings/gpio/gpio.h>
+#include "salvator-x.dtsi"
 
 / {
 	model = "Renesas Salvator-X board based on r8a7796";
 	compatible = "renesas,salvator-x", "renesas,r8a7796";
 
-	aliases {
-		serial0 = &scif2;
-		serial1 = &scif1;
-		ethernet0 = &avb;
-	};
-
-	chosen {
-		bootargs = "ignore_loglevel";
-		stdout-path = "serial0:115200n8";
-	};
-
 	memory@48000000 {
 		device_type = "memory";
 		/* first 128MB is reserved for secure area. */
@@ -37,233 +28,4 @@
 		device_type = "memory";
 		reg = <0x6 0x00000000 0x0 0x80000000>;
 	};
-
-	reg_1p8v: regulator0 {
-		compatible = "regulator-fixed";
-		regulator-name = "fixed-1.8V";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-		regulator-boot-on;
-		regulator-always-on;
-	};
-
-	reg_3p3v: regulator1 {
-		compatible = "regulator-fixed";
-		regulator-name = "fixed-3.3V";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-		regulator-boot-on;
-		regulator-always-on;
-	};
-
-	vcc_sdhi0: regulator-vcc-sdhi0 {
-		compatible = "regulator-fixed";
-
-		regulator-name = "SDHI0 Vcc";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
-		enable-active-high;
-	};
-
-	vccq_sdhi0: regulator-vccq-sdhi0 {
-		compatible = "regulator-gpio";
-
-		regulator-name = "SDHI0 VccQ";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
-		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
-	};
-
-	vcc_sdhi3: regulator-vcc-sdhi3 {
-		compatible = "regulator-fixed";
-
-		regulator-name = "SDHI3 Vcc";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpio = <&gpio3 15 GPIO_ACTIVE_HIGH>;
-		enable-active-high;
-	};
-
-	vccq_sdhi3: regulator-vccq-sdhi3 {
-		compatible = "regulator-gpio";
-
-		regulator-name = "SDHI3 VccQ";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-
-		gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
-		gpios-states = <1>;
-		states = <3300000 1
-			  1800000 0>;
-	};
-};
-
-&pfc {
-	pinctrl-0 = <&scif_clk_pins>;
-	pinctrl-names = "default";
-
-	avb_pins: avb {
-		groups = "avb_mdc";
-		function = "avb";
-	};
-
-	scif1_pins: scif1 {
-		groups = "scif1_data_a", "scif1_ctrl";
-		function = "scif1";
-	};
-
-	scif2_pins: scif2 {
-		groups = "scif2_data_a";
-		function = "scif2";
-	};
-	scif_clk_pins: scif_clk {
-		groups = "scif_clk_a";
-		function = "scif_clk";
-	};
-
-	i2c2_pins: i2c2 {
-		groups = "i2c2_a";
-		function = "i2c2";
-	};
-
-	sdhi0_pins: sd0 {
-		groups = "sdhi0_data4", "sdhi0_ctrl";
-		function = "sdhi0";
-		power-source = <3300>;
-	};
-
-	sdhi0_pins_uhs: sd0_uhs {
-		groups = "sdhi0_data4", "sdhi0_ctrl";
-		function = "sdhi0";
-		power-source = <1800>;
-	};
-
-	sdhi2_pins: sd2 {
-		groups = "sdhi2_data8", "sdhi2_ctrl";
-		function = "sdhi2";
-		power-source = <3300>;
-	};
-
-	sdhi2_pins_uhs: sd2_uhs {
-		groups = "sdhi2_data8", "sdhi2_ctrl";
-		function = "sdhi2";
-		power-source = <1800>;
-	};
-
-	sdhi3_pins: sd3 {
-		groups = "sdhi3_data4", "sdhi3_ctrl";
-		function = "sdhi3";
-		power-source = <3300>;
-	};
-
-	sdhi3_pins_uhs: sd3_uhs {
-		groups = "sdhi3_data4", "sdhi3_ctrl";
-		function = "sdhi3";
-		power-source = <1800>;
-	};
-};
-
-&avb {
-	pinctrl-0 = <&avb_pins>;
-	pinctrl-names = "default";
-	renesas,no-ether-link;
-	phy-handle = <&phy0>;
-	status = "okay";
-
-	phy0: ethernet-phy@0 {
-		rxc-skew-ps = <1500>;
-		reg = <0>;
-		interrupt-parent = <&gpio2>;
-		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
-	};
-};
-
-&extal_clk {
-	clock-frequency = <16666666>;
-};
-
-&extalr_clk {
-	clock-frequency = <32768>;
-};
-
-&sdhi0 {
-	pinctrl-0 = <&sdhi0_pins>;
-	pinctrl-1 = <&sdhi0_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&vcc_sdhi0>;
-	vqmmc-supply = <&vccq_sdhi0>;
-	cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
-	wp-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
-	bus-width = <4>;
-	sd-uhs-sdr50;
-	status = "okay";
-};
-
-&sdhi2 {
-	/* used for on-board 8bit eMMC */
-	pinctrl-0 = <&sdhi2_pins>;
-	pinctrl-1 = <&sdhi2_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&reg_3p3v>;
-	vqmmc-supply = <&reg_1p8v>;
-	bus-width = <8>;
-	non-removable;
-	status = "okay";
-};
-
-&sdhi3 {
-	pinctrl-0 = <&sdhi3_pins>;
-	pinctrl-1 = <&sdhi3_pins_uhs>;
-	pinctrl-names = "default", "state_uhs";
-
-	vmmc-supply = <&vcc_sdhi3>;
-	vqmmc-supply = <&vccq_sdhi3>;
-	cd-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
-	wp-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
-	bus-width = <4>;
-	sd-uhs-sdr50;
-	status = "okay";
-};
-
-&scif1 {
-	pinctrl-0 = <&scif1_pins>;
-	pinctrl-names = "default";
-
-	uart-has-rtscts;
-	status = "okay";
-};
-
-&scif2 {
-	pinctrl-0 = <&scif2_pins>;
-	pinctrl-names = "default";
-	status = "okay";
-};
-
-&scif_clk {
-	clock-frequency = <14745600>;
-};
-
-&i2c2 {
-	pinctrl-0 = <&i2c2_pins>;
-	pinctrl-names = "default";
-
-	status = "okay";
-};
-
-&wdt0 {
-	timeout-sec = <60>;
-	status = "okay";
-};
-
-&i2c_dvfs {
-	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index 2ec1ed5f..1f67109 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -120,6 +120,29 @@
 		clock-frequency = <0>;
 	};
 
+	/*
+	 * The external audio clocks are configured as 0 Hz fixed frequency
+	 * clocks by default.
+	 * Boards that provide audio clocks should override them.
+	 */
+	audio_clk_a: audio_clk_a {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	audio_clk_b: audio_clk_b {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
+	audio_clk_c: audio_clk_c {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
 	/* External CAN clock - to be overridden by boards that provide it */
 	can_clk: can {
 		compatible = "fixed-clock";
@@ -134,6 +157,13 @@
 		clock-frequency = <0>;
 	};
 
+	/* External PCIe clock - can be overridden by the board */
+	pcie_bus_clk: pcie_bus {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
 	soc {
 		compatible = "simple-bus";
 		interrupt-parent = <&gic>;
@@ -362,6 +392,78 @@
 			clocks = <&cpg CPG_MOD 926>;
 			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
 			resets = <&cpg 926>;
+			dmas = <&dmac0 0x11>, <&dmac0 0x10>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		pwm0: pwm@e6e30000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e30000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm1: pwm@e6e31000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e31000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm2: pwm@e6e32000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e32000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm3: pwm@e6e33000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e33000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm4: pwm@e6e34000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e34000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm5: pwm@e6e35000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e35000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		pwm6: pwm@e6e36000 {
+			compatible = "renesas,pwm-r8a7796", "renesas,pwm-rcar";
+			reg = <0 0xe6e36000 0 8>;
+			#pwm-cells = <2>;
+			clocks = <&cpg CPG_MOD 523>;
+			resets = <&cpg 523>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
 
@@ -931,6 +1033,106 @@
 			dma-channels = <16>;
 		};
 
+		audma0: dma-controller@ec700000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xec700000 0 0x10000>;
+			interrupts = <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 502>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 502>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+		};
+
+		audma1: dma-controller@ec720000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xec720000 0 0x10000>;
+			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 501>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 501>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+		};
+
+		hsusb: usb@e6590000 {
+			/* placeholder */
+		};
+
+		xhci0: usb@ee000000 {
+			/* placeholder */
+		};
+
+		ohci0: usb@ee080000 {
+			/* placeholder */
+		};
+
+		ehci0: usb@ee080100 {
+			/* placeholder */
+		};
+
+		usb2_phy0: usb-phy@ee080200 {
+			/* placeholder */
+		};
+
+		ohci1: usb@ee0a0000 {
+			/* placeholder */
+		};
+
+		ehci1: usb@ee0a0100 {
+			/* placeholder */
+		};
+
+		usb2_phy1: usb-phy@ee0a0200 {
+			/* placeholder */
+		};
+
 		sdhi0: sd@ee100000 {
 			compatible = "renesas,sdhi-r8a7796";
 			reg = <0 0xee100000 0 0x2000>;
@@ -1033,5 +1235,224 @@
 				};
 			};
 		};
+
+		rcar_sound: sound@ec500000 {
+			/*
+			 * #sound-dai-cells is required
+			 *
+			 * Single DAI : #sound-dai-cells = <0>;	<&rcar_sound>;
+			 * Multi  DAI : #sound-dai-cells = <1>;	<&rcar_sound N>;
+			 */
+			/*
+			 * #clock-cells is required for audio_clkout0/1/2/3
+			 *
+			 * clkout	: #clock-cells = <0>;	<&rcar_sound>;
+			 * clkout0/1/2/3: #clock-cells = <1>;	<&rcar_sound N>;
+			 */
+			compatible =  "renesas,rcar_sound-r8a7796", "renesas,rcar_sound-gen3";
+			reg =	<0 0xec500000 0 0x1000>, /* SCU */
+				<0 0xec5a0000 0 0x100>,  /* ADG */
+				<0 0xec540000 0 0x1000>, /* SSIU */
+				<0 0xec541000 0 0x280>,  /* SSI */
+				<0 0xec740000 0 0x200>;  /* Audio DMAC peri peri*/
+			reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
+			clocks = <&cpg CPG_MOD 1005>,
+				 <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+				 <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+				 <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+				 <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+				 <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+				 <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+				 <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+				 <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+				 <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+				 <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+				 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+				 <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+				 <&audio_clk_a>, <&audio_clk_b>,
+				 <&audio_clk_c>,
+				 <&cpg CPG_CORE R8A7796_CLK_S0D4>;
+			clock-names = "ssi-all",
+				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
+				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
+				      "ssi.1", "ssi.0",
+				      "src.9", "src.8", "src.7", "src.6",
+				      "src.5", "src.4", "src.3", "src.2",
+				      "src.1", "src.0",
+				      "mix.1", "mix.0",
+				      "ctu.1", "ctu.0",
+				      "dvc.0", "dvc.1",
+				      "clk_a", "clk_b", "clk_c", "clk_i";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			resets = <&cpg 1005>,
+				 <&cpg 1006>, <&cpg 1007>,
+				 <&cpg 1008>, <&cpg 1009>,
+				 <&cpg 1010>, <&cpg 1011>,
+				 <&cpg 1012>, <&cpg 1013>,
+				 <&cpg 1014>, <&cpg 1015>;
+			reset-names = "ssi-all",
+				      "ssi.9", "ssi.8", "ssi.7", "ssi.6",
+				      "ssi.5", "ssi.4", "ssi.3", "ssi.2",
+				      "ssi.1", "ssi.0";
+			status = "disabled";
+
+			rcar_sound,dvc {
+				dvc0: dvc-0 {
+					dmas = <&audma1 0xbc>;
+					dma-names = "tx";
+				};
+				dvc1: dvc-1 {
+					dmas = <&audma1 0xbe>;
+					dma-names = "tx";
+				};
+			};
+
+			rcar_sound,mix {
+				mix0: mix-0 { };
+				mix1: mix-1 { };
+			};
+
+			rcar_sound,ctu {
+				ctu00: ctu-0 { };
+				ctu01: ctu-1 { };
+				ctu02: ctu-2 { };
+				ctu03: ctu-3 { };
+				ctu10: ctu-4 { };
+				ctu11: ctu-5 { };
+				ctu12: ctu-6 { };
+				ctu13: ctu-7 { };
+			};
+
+			rcar_sound,src {
+				src0: src-0 {
+					interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x85>, <&audma1 0x9a>;
+					dma-names = "rx", "tx";
+				};
+				src1: src-1 {
+					interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x87>, <&audma1 0x9c>;
+					dma-names = "rx", "tx";
+				};
+				src2: src-2 {
+					interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x89>, <&audma1 0x9e>;
+					dma-names = "rx", "tx";
+				};
+				src3: src-3 {
+					interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x8b>, <&audma1 0xa0>;
+					dma-names = "rx", "tx";
+				};
+				src4: src-4 {
+					interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x8d>, <&audma1 0xb0>;
+					dma-names = "rx", "tx";
+				};
+				src5: src-5 {
+					interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x8f>, <&audma1 0xb2>;
+					dma-names = "rx", "tx";
+				};
+				src6: src-6 {
+					interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x91>, <&audma1 0xb4>;
+					dma-names = "rx", "tx";
+				};
+				src7: src-7 {
+					interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x93>, <&audma1 0xb6>;
+					dma-names = "rx", "tx";
+				};
+				src8: src-8 {
+					interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x95>, <&audma1 0xb8>;
+					dma-names = "rx", "tx";
+				};
+				src9: src-9 {
+					interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x97>, <&audma1 0xba>;
+					dma-names = "rx", "tx";
+				};
+			};
+
+			rcar_sound,ssi {
+				ssi0: ssi-0 {
+					interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+				ssi1: ssi-1 {
+					interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+				ssi2: ssi-2 {
+					interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+				ssi3: ssi-3 {
+					interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+				ssi4: ssi-4 {
+					interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+				ssi5: ssi-5 {
+					interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+				ssi6: ssi-6 {
+					interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+				ssi7: ssi-7 {
+					interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+				ssi8: ssi-8 {
+					interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+				ssi9: ssi-9 {
+					interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
+					dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
+					dma-names = "rx", "tx", "rxu", "txu";
+				};
+			};
+		};
+
+		pciec0: pcie@fe000000 {
+			/* placeholder */
+		};
+
+		pciec1: pcie@ee800000 {
+			/* placeholder */
+		};
+
+		du: display@feb00000 {
+			/* placeholder */
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					du_out_rgb: endpoint {
+					};
+				};
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
new file mode 100644
index 0000000..aef35e0
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -0,0 +1,629 @@
+/*
+ * Device Tree Source for common parts of Salvator-X board variants
+ *
+ * Copyright (C) 2015-2016 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/*
+ * SSI-AK4613
+ *
+ * This command is required when Playback/Capture
+ *
+ *	amixer set "DVC Out" 100%
+ *	amixer set "DVC In" 100%
+ *
+ * You can use Mute
+ *
+ *	amixer set "DVC Out Mute" on
+ *	amixer set "DVC In Mute" on
+ *
+ * You can use Volume Ramp
+ *
+ *	amixer set "DVC Out Ramp Up Rate"   "0.125 dB/64 steps"
+ *	amixer set "DVC Out Ramp Down Rate" "0.125 dB/512 steps"
+ *	amixer set "DVC Out Ramp" on
+ *	aplay xxx.wav &
+ *	amixer set "DVC Out"  80%  // Volume Down
+ *	amixer set "DVC Out" 100%  // Volume Up
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	aliases {
+		serial0 = &scif2;
+		serial1 = &scif1;
+		ethernet0 = &avb;
+	};
+
+	chosen {
+		bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
+		stdout-path = "serial0:115200n8";
+	};
+
+	audio_clkout: audio_clkout {
+		/*
+		 * This is same as <&rcar_sound 0>
+		 * but needed to avoid cs2000/rcar_sound probe dead-lock
+		 */
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <11289600>;
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&pwm1 0 50000>;
+
+		brightness-levels = <256 128 64 16 8 4 0>;
+		default-brightness-level = <6>;
+
+		enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
+	};
+
+	reg_1p8v: regulator0 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-1.8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator1 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	rsnd_ak4613: sound {
+		compatible = "simple-audio-card";
+
+		simple-audio-card,format = "left_j";
+		simple-audio-card,bitclock-master = <&sndcpu>;
+		simple-audio-card,frame-master = <&sndcpu>;
+
+		sndcpu: simple-audio-card,cpu {
+			sound-dai = <&rcar_sound>;
+		};
+
+		sndcodec: simple-audio-card,codec {
+			sound-dai = <&ak4613>;
+		};
+	};
+
+	vbus0_usb2: regulator-vbus0-usb2 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "USB20_VBUS0";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+
+		gpio = <&gpio6 16 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vcc_sdhi0: regulator-vcc-sdhi0 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "SDHI0 Vcc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vccq_sdhi0: regulator-vccq-sdhi0 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "SDHI0 VccQ";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+		states = <3300000 1
+			  1800000 0>;
+	};
+
+	vcc_sdhi3: regulator-vcc-sdhi3 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "SDHI3 Vcc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio3 15 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vccq_sdhi3: regulator-vccq-sdhi3 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "SDHI3 VccQ";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+		states = <3300000 1
+			  1800000 0>;
+	};
+
+	hdmi0-out {
+		compatible = "hdmi-connector";
+		label = "HDMI0 OUT";
+		type = "a";
+
+		port {
+			hdmi0_con: endpoint {
+			};
+		};
+	};
+
+	hdmi1-out {
+		compatible = "hdmi-connector";
+		label = "HDMI1 OUT";
+		type = "a";
+
+		port {
+			hdmi1_con: endpoint {
+			};
+		};
+	};
+
+	vga {
+		compatible = "vga-connector";
+
+		port {
+			vga_in: endpoint {
+				remote-endpoint = <&adv7123_out>;
+			};
+		};
+	};
+
+	vga-encoder {
+		compatible = "adi,adv7123";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				adv7123_in: endpoint {
+					remote-endpoint = <&du_out_rgb>;
+				};
+			};
+			port@1 {
+				reg = <1>;
+				adv7123_out: endpoint {
+					remote-endpoint = <&vga_in>;
+				};
+			};
+		};
+	};
+
+	x12_clk: x12 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24576000>;
+	};
+
+	/* External DU dot clocks */
+	x21_clk: x21-clock {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <33000000>;
+	};
+
+	x22_clk: x22-clock {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <33000000>;
+	};
+
+	x23_clk: x23-clock {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <25000000>;
+	};
+};
+
+&audio_clk_a {
+	clock-frequency = <22579200>;
+};
+
+&avb {
+	pinctrl-0 = <&avb_pins>;
+	pinctrl-names = "default";
+	renesas,no-ether-link;
+	phy-handle = <&phy0>;
+	status = "okay";
+
+	phy0: ethernet-phy@0 {
+		rxc-skew-ps = <1500>;
+		reg = <0>;
+		interrupt-parent = <&gpio2>;
+		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+&du {
+	pinctrl-0 = <&du_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	ports {
+		port@0 {
+			endpoint {
+				remote-endpoint = <&adv7123_in>;
+			};
+		};
+		port@3 {
+			lvds_connector: endpoint {
+			};
+		};
+	};
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&extalr_clk {
+	clock-frequency = <32768>;
+};
+
+&hsusb {
+	status = "okay";
+};
+
+&i2c2 {
+	pinctrl-0 = <&i2c2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+
+	clock-frequency = <100000>;
+
+	ak4613: codec@10 {
+		compatible = "asahi-kasei,ak4613";
+		#sound-dai-cells = <0>;
+		reg = <0x10>;
+		clocks = <&rcar_sound 3>;
+
+		asahi-kasei,in1-single-end;
+		asahi-kasei,in2-single-end;
+		asahi-kasei,out1-single-end;
+		asahi-kasei,out2-single-end;
+		asahi-kasei,out3-single-end;
+		asahi-kasei,out4-single-end;
+		asahi-kasei,out5-single-end;
+		asahi-kasei,out6-single-end;
+	};
+
+	cs2000: clk_multiplier@4f {
+		#clock-cells = <0>;
+		compatible = "cirrus,cs2000-cp";
+		reg = <0x4f>;
+		clocks = <&audio_clkout>, <&x12_clk>;
+		clock-names = "clk_in", "ref_clk";
+
+		assigned-clocks = <&cs2000>;
+		assigned-clock-rates = <24576000>; /* 1/1 divide */
+	};
+};
+
+&i2c4 {
+	status = "okay";
+
+	csa_vdd: adc@7c {
+		compatible = "maxim,max9611";
+		reg = <0x7c>;
+
+		shunt-resistor-micro-ohms = <5000>;
+	};
+
+	csa_dvfs: adc@7f {
+		compatible = "maxim,max9611";
+		reg = <0x7f>;
+
+		shunt-resistor-micro-ohms = <5000>;
+	};
+};
+
+&i2c_dvfs {
+	status = "okay";
+};
+
+&ohci0 {
+	status = "okay";
+};
+
+&ohci1 {
+	status = "okay";
+};
+
+&pcie_bus_clk {
+	clock-frequency = <100000000>;
+};
+
+&pciec0 {
+	status = "okay";
+};
+
+&pciec1 {
+	status = "okay";
+};
+
+&pfc {
+	pinctrl-0 = <&scif_clk_pins>;
+	pinctrl-names = "default";
+
+	avb_pins: avb {
+		mux {
+			groups = "avb_link", "avb_phy_int", "avb_mdc",
+				 "avb_mii";
+			function = "avb";
+		};
+
+		pins_mdc {
+			groups = "avb_mdc";
+			drive-strength = <24>;
+		};
+
+		pins_mii_tx {
+			pins = "PIN_AVB_TX_CTL", "PIN_AVB_TXC", "PIN_AVB_TD0",
+			       "PIN_AVB_TD1", "PIN_AVB_TD2", "PIN_AVB_TD3";
+			drive-strength = <12>;
+		};
+	};
+
+	du_pins: du {
+		groups = "du_rgb888", "du_sync", "du_oddf", "du_clk_out_0";
+		function = "du";
+	};
+
+	i2c2_pins: i2c2 {
+		groups = "i2c2_a";
+		function = "i2c2";
+	};
+
+	pwm1_pins: pwm1 {
+		groups = "pwm1_a";
+		function = "pwm1";
+	};
+
+	scif1_pins: scif1 {
+		groups = "scif1_data_a", "scif1_ctrl";
+		function = "scif1";
+	};
+
+	scif2_pins: scif2 {
+		groups = "scif2_data_a";
+		function = "scif2";
+	};
+
+	scif_clk_pins: scif_clk {
+		groups = "scif_clk_a";
+		function = "scif_clk";
+	};
+
+	sdhi0_pins: sd0 {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <3300>;
+	};
+
+	sdhi0_pins_uhs: sd0_uhs {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <1800>;
+	};
+
+	sdhi2_pins: sd2 {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <3300>;
+	};
+
+	sdhi2_pins_uhs: sd2_uhs {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <1800>;
+	};
+
+	sdhi3_pins: sd3 {
+		groups = "sdhi3_data4", "sdhi3_ctrl";
+		function = "sdhi3";
+		power-source = <3300>;
+	};
+
+	sdhi3_pins_uhs: sd3_uhs {
+		groups = "sdhi3_data4", "sdhi3_ctrl";
+		function = "sdhi3";
+		power-source = <1800>;
+	};
+
+	sound_pins: sound {
+		groups = "ssi01239_ctrl", "ssi0_data", "ssi1_data_a";
+		function = "ssi";
+	};
+
+	sound_clk_pins: sound_clk {
+		groups = "audio_clk_a_a", "audio_clk_b_a", "audio_clk_c_a",
+			 "audio_clkout_a", "audio_clkout3_a";
+		function = "audio_clk";
+	};
+
+	usb0_pins: usb0 {
+		groups = "usb0";
+		function = "usb0";
+	};
+
+	usb1_pins: usb1 {
+		mux {
+			groups = "usb1";
+			function = "usb1";
+		};
+
+		ovc {
+			pins = "GP_6_27";
+			bias-pull-up;
+		};
+
+		pwen {
+			pins = "GP_6_26";
+			bias-pull-down;
+		};
+	};
+};
+
+&pwm1 {
+	pinctrl-0 = <&pwm1_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
+
+&rcar_sound {
+	pinctrl-0 = <&sound_pins &sound_clk_pins>;
+	pinctrl-names = "default";
+
+	/* Single DAI */
+	#sound-dai-cells = <0>;
+
+	/* audio_clkout0/1/2/3 */
+	#clock-cells = <1>;
+	clock-frequency = <11289600 12288000>;
+
+	status = "okay";
+
+	/* update <audio_clk_b> to <cs2000> */
+	clocks = <&cpg CPG_MOD 1005>,
+		 <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+		 <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+		 <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+		 <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+		 <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+		 <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+		 <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+		 <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+		 <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+		 <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+		 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+		 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+		 <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+		 <&audio_clk_a>, <&cs2000>,
+		 <&audio_clk_c>,
+		 <&cpg CPG_CORE CPG_AUDIO_CLK_I>;
+
+	rcar_sound,dai {
+		dai0 {
+			playback = <&ssi0 &src0 &dvc0>;
+			capture  = <&ssi1 &src1 &dvc1>;
+		};
+	};
+};
+
+&scif1 {
+	pinctrl-0 = <&scif1_pins>;
+	pinctrl-names = "default";
+
+	uart-has-rtscts;
+	status = "okay";
+};
+
+&scif2 {
+	pinctrl-0 = <&scif2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
+
+&scif_clk {
+	clock-frequency = <14745600>;
+};
+
+&sdhi0 {
+	pinctrl-0 = <&sdhi0_pins>;
+	pinctrl-1 = <&sdhi0_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&vcc_sdhi0>;
+	vqmmc-supply = <&vccq_sdhi0>;
+	cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
+	wp-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
+	bus-width = <4>;
+	sd-uhs-sdr50;
+	status = "okay";
+};
+
+&sdhi2 {
+	/* used for on-board 8bit eMMC */
+	pinctrl-0 = <&sdhi2_pins>;
+	pinctrl-1 = <&sdhi2_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&reg_3p3v>;
+	vqmmc-supply = <&reg_1p8v>;
+	bus-width = <8>;
+	mmc-hs200-1_8v;
+	non-removable;
+	status = "okay";
+};
+
+&sdhi3 {
+	pinctrl-0 = <&sdhi3_pins>;
+	pinctrl-1 = <&sdhi3_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&vcc_sdhi3>;
+	vqmmc-supply = <&vccq_sdhi3>;
+	cd-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
+	wp-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+	bus-width = <4>;
+	sd-uhs-sdr50;
+	status = "okay";
+};
+
+&ssi1 {
+	shared-pin;
+};
+
+&usb2_phy0 {
+	pinctrl-0 = <&usb0_pins>;
+	pinctrl-names = "default";
+
+	vbus-supply = <&vbus0_usb2>;
+	status = "okay";
+};
+
+&usb2_phy1 {
+	pinctrl-0 = <&usb1_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
+
+&wdt0 {
+	timeout-sec = <60>;
+	status = "okay";
+};
+
+&xhci0 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/salvator-x.dtsi b/arch/arm64/boot/dts/renesas/salvator-x.dtsi
new file mode 100644
index 0000000..468868c
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/salvator-x.dtsi
@@ -0,0 +1,30 @@
+/*
+ * Device Tree Source for the Salvator-X board
+ *
+ * Copyright (C) 2015-2016 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include "salvator-common.dtsi"
+
+/ {
+	model = "Renesas Salvator-X board";
+	compatible = "renesas,salvator-x";
+};
+
+&extal_clk {
+	clock-frequency = <16666666>;
+};
+
+&i2c4 {
+	versaclock5: clock-generator@6a {
+		compatible = "idt,5p49v5923";
+		reg = <0x6a>;
+		#clock-cells = <1>;
+		clocks = <&x23_clk>;
+		clock-names = "xin";
+	};
+};
diff --git a/arch/arm64/boot/dts/renesas/salvator-xs.dtsi b/arch/arm64/boot/dts/renesas/salvator-xs.dtsi
new file mode 100644
index 0000000..81227e3
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/salvator-xs.dtsi
@@ -0,0 +1,20 @@
+/*
+ * Device Tree Source for the Salvator-X 2nd version board
+ *
+ * Copyright (C) 2015-2017 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include "salvator-common.dtsi"
+
+/ {
+	model = "Renesas Salvator-X 2nd version board";
+	compatible = "renesas,salvator-xs";
+};
+
+&extal_clk {
+	clock-frequency = <16640000>;
+};
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
new file mode 100644
index 0000000..b5c6ee0
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -0,0 +1,367 @@
+/*
+ * Device Tree Source for the R-Car Gen3 ULCB board
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "Renesas R-Car Gen3 ULCB board";
+
+	aliases {
+		serial0 = &scif2;
+		ethernet0 = &avb;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	audio_clkout: audio-clkout {
+		/*
+		 * This is same as <&rcar_sound 0>
+		 * but needed to avoid cs2000/rcar_sound probe dead-lock
+		 */
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <11289600>;
+	};
+
+	keyboard {
+		compatible = "gpio-keys";
+
+		key-1 {
+			linux,code = <KEY_1>;
+			label = "SW3";
+			wakeup-source;
+			debounce-interval = <20>;
+			gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		led5 {
+			gpios = <&gpio6 12 GPIO_ACTIVE_HIGH>;
+		};
+		led6 {
+			gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	reg_1p8v: regulator0 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-1.8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator1 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	rsnd_ak4613: sound {
+		compatible = "simple-audio-card";
+
+		simple-audio-card,format = "left_j";
+		simple-audio-card,bitclock-master = <&sndcpu>;
+		simple-audio-card,frame-master = <&sndcpu>;
+
+		sndcpu: simple-audio-card,cpu {
+			sound-dai = <&rcar_sound>;
+		};
+
+		sndcodec: simple-audio-card,codec {
+			sound-dai = <&ak4613>;
+		};
+	};
+
+	vcc_sdhi0: regulator-vcc-sdhi0 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "SDHI0 Vcc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vccq_sdhi0: regulator-vccq-sdhi0 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "SDHI0 VccQ";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+		states = <3300000 1
+			  1800000 0>;
+	};
+
+	x12_clk: x12 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24576000>;
+	};
+};
+
+&audio_clk_a {
+	clock-frequency = <22579200>;
+};
+
+&avb {
+	pinctrl-0 = <&avb_pins>;
+	pinctrl-names = "default";
+	renesas,no-ether-link;
+	phy-handle = <&phy0>;
+	status = "okay";
+
+	phy0: ethernet-phy@0 {
+		rxc-skew-ps = <1500>;
+		reg = <0>;
+		interrupt-parent = <&gpio2>;
+		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&extal_clk {
+	clock-frequency = <16666666>;
+};
+
+&extalr_clk {
+	clock-frequency = <32768>;
+};
+
+&i2c2 {
+	pinctrl-0 = <&i2c2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+
+	clock-frequency = <100000>;
+
+	ak4613: codec@10 {
+		compatible = "asahi-kasei,ak4613";
+		#sound-dai-cells = <0>;
+		reg = <0x10>;
+		clocks = <&rcar_sound 3>;
+
+		asahi-kasei,in1-single-end;
+		asahi-kasei,in2-single-end;
+		asahi-kasei,out1-single-end;
+		asahi-kasei,out2-single-end;
+		asahi-kasei,out3-single-end;
+		asahi-kasei,out4-single-end;
+		asahi-kasei,out5-single-end;
+		asahi-kasei,out6-single-end;
+	};
+
+	cs2000: clk-multiplier@4f {
+		#clock-cells = <0>;
+		compatible = "cirrus,cs2000-cp";
+		reg = <0x4f>;
+		clocks = <&audio_clkout>, <&x12_clk>;
+		clock-names = "clk_in", "ref_clk";
+
+		assigned-clocks = <&cs2000>;
+		assigned-clock-rates = <24576000>; /* 1/1 divide */
+	};
+};
+
+&ohci1 {
+	status = "okay";
+};
+
+&pfc {
+	pinctrl-0 = <&scif_clk_pins>;
+	pinctrl-names = "default";
+
+	avb_pins: avb {
+		mux {
+			groups = "avb_link", "avb_phy_int", "avb_mdc",
+				 "avb_mii";
+			function = "avb";
+		};
+
+		pins_mdc {
+			groups = "avb_mdc";
+			drive-strength = <24>;
+		};
+
+		pins_mii_tx {
+			pins = "PIN_AVB_TX_CTL", "PIN_AVB_TXC", "PIN_AVB_TD0",
+			       "PIN_AVB_TD1", "PIN_AVB_TD2", "PIN_AVB_TD3";
+			drive-strength = <12>;
+		};
+	};
+
+	i2c2_pins: i2c2 {
+		groups = "i2c2_a";
+		function = "i2c2";
+	};
+
+	scif2_pins: scif2 {
+		groups = "scif2_data_a";
+		function = "scif2";
+	};
+
+	scif_clk_pins: scif_clk {
+		groups = "scif_clk_a";
+		function = "scif_clk";
+	};
+
+	sdhi0_pins: sd0 {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <3300>;
+	};
+
+	sdhi0_pins_uhs: sd0_uhs {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <1800>;
+	};
+
+	sdhi2_pins: sd2 {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <3300>;
+	};
+
+	sdhi2_pins_uhs: sd2_uhs {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <1800>;
+	};
+
+	sound_pins: sound {
+		groups = "ssi01239_ctrl", "ssi0_data", "ssi1_data_a";
+		function = "ssi";
+	};
+
+	sound_clk_pins: sound-clk {
+		groups = "audio_clk_a_a", "audio_clk_b_a", "audio_clk_c_a",
+			 "audio_clkout_a", "audio_clkout3_a";
+		function = "audio_clk";
+	};
+
+	usb1_pins: usb1 {
+		groups = "usb1";
+		function = "usb1";
+	};
+};
+
+&rcar_sound {
+	pinctrl-0 = <&sound_pins &sound_clk_pins>;
+	pinctrl-names = "default";
+
+	/* Single DAI */
+	#sound-dai-cells = <0>;
+
+	/* audio_clkout0/1/2/3 */
+	#clock-cells = <1>;
+	clock-frequency = <11289600 12288000>;
+
+	status = "okay";
+
+	/* update <audio_clk_b> to <cs2000> */
+	clocks = <&cpg CPG_MOD 1005>,
+		 <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>,
+		 <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>,
+		 <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>,
+		 <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>,
+		 <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>,
+		 <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>,
+		 <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>,
+		 <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>,
+		 <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>,
+		 <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>,
+		 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+		 <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>,
+		 <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>,
+		 <&audio_clk_a>, <&cs2000>,
+		 <&audio_clk_c>,
+		 <&cpg CPG_CORE CPG_AUDIO_CLK_I>;
+
+	rcar_sound,dai {
+		dai0 {
+			playback = <&ssi0 &src0 &dvc0>;
+			capture  = <&ssi1 &src1 &dvc1>;
+		};
+	};
+};
+
+&scif2 {
+	pinctrl-0 = <&scif2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
+
+&scif_clk {
+	clock-frequency = <14745600>;
+};
+
+&sdhi0 {
+	pinctrl-0 = <&sdhi0_pins>;
+	pinctrl-1 = <&sdhi0_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&vcc_sdhi0>;
+	vqmmc-supply = <&vccq_sdhi0>;
+	cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
+	bus-width = <4>;
+	sd-uhs-sdr50;
+	status = "okay";
+};
+
+&sdhi2 {
+	/* used for on-board 8bit eMMC */
+	pinctrl-0 = <&sdhi2_pins>;
+	pinctrl-1 = <&sdhi2_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&reg_3p3v>;
+	vqmmc-supply = <&reg_1p8v>;
+	bus-width = <8>;
+	mmc-hs200-1_8v;
+	non-removable;
+	status = "okay";
+};
+
+&ssi1 {
+	shared-pin;
+};
+
+&usb2_phy1 {
+	pinctrl-0 = <&usb1_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
+
+&wdt0 {
+	timeout-sec = <60>;
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index b5636bb..bcfa53b 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -5,6 +5,7 @@
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-px5-evb.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-r88.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-evb.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-firefly.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-kevin.dtb
 
 always		:= $(dtb-y)
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 7e69f1f..0be96ce 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -372,6 +372,39 @@
 			<32768>;
 	};
 
+	sdmmc: dwmmc@ff500000 {
+		compatible = "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc";
+		reg = <0x0 0xff500000 0x0 0x4000>;
+		interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+		clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+		fifo-depth = <0x100>;
+		status = "disabled";
+	};
+
+	sdio: dwmmc@ff510000 {
+		compatible = "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc";
+		reg = <0x0 0xff510000 0x0 0x4000>;
+		interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
+			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
+		clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+		fifo-depth = <0x100>;
+		status = "disabled";
+	};
+
+	emmc: dwmmc@ff520000 {
+		compatible = "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc";
+		reg = <0x0 0xff520000 0x0 0x4000>;
+		interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
+			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
+		clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+		fifo-depth = <0x100>;
+		status = "disabled";
+	};
+
 	gmac2io: ethernet@ff540000 {
 		compatible = "rockchip,rk3328-gmac";
 		reg = <0x0 0xff540000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
new file mode 100644
index 0000000..ba1d981
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/pwm/pwm.h>
+#include "rk3399.dtsi"
+
+/ {
+	model = "Firefly-RK3399 Board";
+	compatible = "firefly,firefly-rk3399", "rockchip,rk3399";
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		enable-gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>;
+		pwms = <&pwm0 0 25000 0>;
+		brightness-levels = <
+			  0   1   2   3   4   5   6   7
+			  8   9  10  11  12  13  14  15
+			 16  17  18  19  20  21  22  23
+			 24  25  26  27  28  29  30  31
+			 32  33  34  35  36  37  38  39
+			 40  41  42  43  44  45  46  47
+			 48  49  50  51  52  53  54  55
+			 56  57  58  59  60  61  62  63
+			 64  65  66  67  68  69  70  71
+			 72  73  74  75  76  77  78  79
+			 80  81  82  83  84  85  86  87
+			 88  89  90  91  92  93  94  95
+			 96  97  98  99 100 101 102 103
+			104 105 106 107 108 109 110 111
+			112 113 114 115 116 117 118 119
+			120 121 122 123 124 125 126 127
+			128 129 130 131 132 133 134 135
+			136 137 138 139 140 141 142 143
+			144 145 146 147 148 149 150 151
+			152 153 154 155 156 157 158 159
+			160 161 162 163 164 165 166 167
+			168 169 170 171 172 173 174 175
+			176 177 178 179 180 181 182 183
+			184 185 186 187 188 189 190 191
+			192 193 194 195 196 197 198 199
+			200 201 202 203 204 205 206 207
+			208 209 210 211 212 213 214 215
+			216 217 218 219 220 221 222 223
+			224 225 226 227 228 229 230 231
+			232 233 234 235 236 237 238 239
+			240 241 242 243 244 245 246 247
+			248 249 250 251 252 253 254 255>;
+		default-brightness-level = <200>;
+	};
+
+	clkin_gmac: external-gmac-clock {
+		compatible = "fixed-clock";
+		clock-frequency = <125000000>;
+		clock-output-names = "clkin_gmac";
+		#clock-cells = <0>;
+	};
+
+	dc_12v: dc-12v {
+		compatible = "regulator-fixed";
+		regulator-name = "dc_12v";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <12000000>;
+		regulator-max-microvolt = <12000000>;
+	};
+
+	rt5640-sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "rockchip,rt5640-codec";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,mclk-fs = <256>;
+		simple-audio-card,widgets =
+			"Microphone", "Mic Jack",
+			"Headphone", "Headphone Jack";
+		simple-audio-card,routing =
+			"Mic Jack", "MICBIAS1",
+			"IN1P", "Mic Jack",
+			"Headphone Jack", "HPOL",
+			"Headphone Jack", "HPOR";
+
+		simple-audio-card,cpu {
+			sound-dai = <&i2s1>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&rt5640>;
+		};
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&rk808 1>;
+		clock-names = "ext_clock";
+		pinctrl-names = "default";
+		pinctrl-0 = <&wifi_enable_h>;
+
+		/*
+		 * On the module itself this is one of these (depending
+		 * on the actual card populated):
+		 * - SDIO_RESET_L_WL_REG_ON
+		 * - PDN (power down when low)
+		 */
+		reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+	};
+
+	/* switched by pmic_sleep */
+	vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc1v8_s3";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vcc_1v8>;
+	};
+
+	vcc3v3_pcie: vcc3v3-pcie-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio1 RK_PC1 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pcie_pwr_en>;
+		regulator-name = "vcc3v3_pcie";
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&dc_12v>;
+	};
+
+	vcc3v3_sys: vcc3v3-sys {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3_sys";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&vcc_sys>;
+	};
+
+	/* Actually 3 regulators (host0, 1, 2) controlled by the same gpio */
+	vcc5v0_host: vcc5v0-host-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio1 RK_PA0 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&vcc5v0_host_en>;
+		regulator-name = "vcc5v0_host";
+		regulator-always-on;
+		vin-supply = <&vcc_sys>;
+	};
+
+	vcc_sys: vcc-sys {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_sys";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&dc_12v>;
+	};
+
+	vdd_log: vdd-log {
+		compatible = "pwm-regulator";
+		pwms = <&pwm2 0 25000 1>;
+		regulator-name = "vdd_log";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <800000>;
+		regulator-max-microvolt = <1400000>;
+		vin-supply = <&vcc_sys>;
+	};
+};
+
+&cpu_l0 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l1 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l2 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l3 {
+	cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_b0 {
+	cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_b1 {
+	cpu-supply = <&vdd_cpu_b>;
+};
+
+&emmc_phy {
+	status = "okay";
+};
+
+&gmac {
+	assigned-clocks = <&cru SCLK_RMII_SRC>;
+	assigned-clock-parents = <&clkin_gmac>;
+	clock_in_out = "input";
+	phy-supply = <&vcc_lan>;
+	phy-mode = "rgmii";
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>;
+	snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+	snps,reset-active-low;
+	snps,reset-delays-us = <0 10000 50000>;
+	tx_delay = <0x28>;
+	rx_delay = <0x11>;
+	status = "okay";
+};
+
+&i2c0 {
+	clock-frequency = <400000>;
+	i2c-scl-rising-time-ns = <168>;
+	i2c-scl-falling-time-ns = <4>;
+	status = "okay";
+
+	rk808: pmic@1b {
+		compatible = "rockchip,rk808";
+		reg = <0x1b>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+		#clock-cells = <1>;
+		clock-output-names = "xin32k", "rk808-clkout2";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pmic_int_l>;
+		rockchip,system-power-controller;
+		wakeup-source;
+
+		vcc1-supply = <&vcc_sys>;
+		vcc2-supply = <&vcc_sys>;
+		vcc3-supply = <&vcc_sys>;
+		vcc4-supply = <&vcc_sys>;
+		vcc6-supply = <&vcc_sys>;
+		vcc7-supply = <&vcc_sys>;
+		vcc8-supply = <&vcc3v3_sys>;
+		vcc9-supply = <&vcc_sys>;
+		vcc10-supply = <&vcc_sys>;
+		vcc11-supply = <&vcc_sys>;
+		vcc12-supply = <&vcc3v3_sys>;
+		vddio-supply = <&vcc1v8_pmu>;
+
+		regulators {
+			vdd_center: DCDC_REG1 {
+				regulator-name = "vdd_center";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <6001>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vdd_cpu_l: DCDC_REG2 {
+				regulator-name = "vdd_cpu_l";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <6001>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc_ddr: DCDC_REG3 {
+				regulator-name = "vcc_ddr";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+				};
+			};
+
+			vcc_1v8: DCDC_REG4 {
+				regulator-name = "vcc_1v8";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1800000>;
+				};
+			};
+
+			vcc1v8_dvp: LDO_REG1 {
+				regulator-name = "vcc1v8_dvp";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc2v8_dvp: LDO_REG2 {
+				regulator-name = "vcc2v8_dvp";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc1v8_pmu: LDO_REG3 {
+				regulator-name = "vcc1v8_pmu";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1800000>;
+				};
+			};
+
+			vcc_sdio: LDO_REG4 {
+				regulator-name = "vcc_sdio";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <3300000>;
+				};
+			};
+
+			vcca3v0_codec: LDO_REG5 {
+				regulator-name = "vcca3v0_codec";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc_1v5: LDO_REG6 {
+				regulator-name = "vcc_1v5";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1500000>;
+				};
+			};
+
+			vcca1v8_codec: LDO_REG7 {
+				regulator-name = "vcca1v8_codec";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc_3v0: LDO_REG8 {
+				regulator-name = "vcc_3v0";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <3000000>;
+				};
+			};
+
+			vcc3v3_s3: vcc_lan: SWITCH_REG1 {
+				regulator-name = "vcc3v3_s3";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc3v3_s0: SWITCH_REG2 {
+				regulator-name = "vcc3v3_s0";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+		};
+	};
+
+	vdd_cpu_b: regulator@40 {
+		compatible = "silergy,syr827";
+		reg = <0x40>;
+		fcs,suspend-voltage-selector = <0>;
+		regulator-name = "vdd_cpu_b";
+		regulator-min-microvolt = <712500>;
+		regulator-max-microvolt = <1500000>;
+		regulator-ramp-delay = <1000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_sys>;
+
+		regulator-state-mem {
+			regulator-off-in-suspend;
+		};
+	};
+
+	vdd_gpu: regulator@41 {
+		compatible = "silergy,syr828";
+		reg = <0x41>;
+		fcs,suspend-voltage-selector = <1>;
+		regulator-name = "vdd_gpu";
+		regulator-min-microvolt = <712500>;
+		regulator-max-microvolt = <1500000>;
+		regulator-ramp-delay = <1000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_sys>;
+
+		regulator-state-mem {
+			regulator-off-in-suspend;
+		};
+	};
+};
+
+&i2c1 {
+	i2c-scl-rising-time-ns = <300>;
+	i2c-scl-falling-time-ns = <15>;
+	status = "okay";
+
+	rt5640: rt5640@1c {
+		compatible = "realtek,rt5640";
+		reg = <0x1c>;
+		clocks = <&cru SCLK_I2S_8CH_OUT>;
+		clock-names = "mclk";
+		realtek,in1-differential;
+		#sound-dai-cells = <0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&rt5640_hpcon>;
+	};
+};
+
+&i2c3 {
+	i2c-scl-rising-time-ns = <450>;
+	i2c-scl-falling-time-ns = <15>;
+	status = "okay";
+};
+
+&i2c4 {
+	i2c-scl-rising-time-ns = <600>;
+	i2c-scl-falling-time-ns = <20>;
+	status = "okay";
+
+	accelerometer@68 {
+		compatible = "invensense,mpu6500";
+		reg = <0x68>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <RK_PC6 IRQ_TYPE_EDGE_RISING>;
+	};
+};
+
+&i2s0 {
+	rockchip,playback-channels = <8>;
+	rockchip,capture-channels = <8>;
+	#sound-dai-cells = <0>;
+	status = "okay";
+};
+
+&i2s1 {
+	rockchip,playback-channels = <2>;
+	rockchip,capture-channels = <2>;
+	#sound-dai-cells = <0>;
+	status = "okay";
+};
+
+&i2s2 {
+	#sound-dai-cells = <0>;
+	status = "okay";
+};
+
+&io_domains {
+	status = "okay";
+
+	bt656-supply = <&vcc1v8_dvp>;
+	audio-supply = <&vcca1v8_codec>;
+	sdmmc-supply = <&vcc_sdio>;
+	gpio1830-supply = <&vcc_3v0>;
+};
+
+&pcie_phy {
+	status = "okay";
+};
+
+&pcie0 {
+	ep-gpios = <&gpio4 RK_PD1 GPIO_ACTIVE_HIGH>;
+	num-lanes = <4>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pcie_clkreqn>;
+	status = "okay";
+};
+
+&pmu_io_domains {
+	pmu1830-supply = <&vcc_3v0>;
+	status = "okay";
+};
+
+&pinctrl {
+	buttons {
+		pwrbtn: pwrbtn {
+			rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	lcd-panel {
+		lcd_panel_reset: lcd-panel-reset {
+			rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	pcie {
+		pcie_pwr_en: pcie-pwr-en {
+			rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		pcie_3g_drv: pcie-3g-drv {
+			rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	pmic {
+		vsel1_gpio: vsel1-gpio {
+			rockchip,pins = <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+
+		vsel2_gpio: vsel2-gpio {
+			rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+	};
+
+	sdio-pwrseq {
+		wifi_enable_h: wifi-enable-h {
+			rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	rt5640 {
+		rt5640_hpcon: rt5640-hpcon {
+			rockchip,pins = <4 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	pmic {
+		pmic_int_l: pmic-int-l {
+			rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	usb2 {
+		vcc5v0_host_en: vcc5v0-host-en {
+			rockchip,pins = <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
+&pwm0 {
+	status = "okay";
+};
+
+&pwm2 {
+	status = "okay";
+};
+
+&saradc {
+	vref-supply = <&vcca1v8_s3>;
+	status = "okay";
+};
+
+&sdhci {
+	bus-width = <8>;
+	keep-power-in-suspend;
+	mmc-hs400-1_8v;
+	mmc-hs400-enhanced-strobe;
+	non-removable;
+	status = "okay";
+};
+
+&tsadc {
+	/* tshut mode 0:CRU 1:GPIO */
+	rockchip,hw-tshut-mode = <1>;
+	/* tshut polarity 0:LOW 1:HIGH */
+	rockchip,hw-tshut-polarity = <1>;
+	status = "okay";
+};
+
+&u2phy0 {
+	status = "okay";
+
+	u2phy0_otg: otg-port {
+		status = "okay";
+	};
+
+	u2phy0_host: host-port {
+		phy-supply = <&vcc5v0_host>;
+		status = "okay";
+	};
+};
+
+&u2phy1 {
+	status = "okay";
+
+	u2phy1_otg: otg-port {
+		status = "okay";
+	};
+
+	u2phy1_host: host-port {
+		phy-supply = <&vcc5v0_host>;
+		status = "okay";
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_xfer &uart0_cts>;
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&usb_host0_ehci {
+	status = "okay";
+};
+
+&usb_host0_ohci {
+	status = "okay";
+};
+
+&usb_host1_ehci {
+	status = "okay";
+};
+
+&usb_host1_ohci {
+	status = "okay";
+};
+
+&usbdrd3_0 {
+	status = "okay";
+};
+
+&usbdrd_dwc3_0 {
+	status = "okay";
+	dr_mode = "otg";
+};
+
+&usbdrd3_1 {
+	status = "okay";
+};
+
+&usbdrd_dwc3_1 {
+	status = "okay";
+	dr_mode = "host";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index 0d960b7..eb50593 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -44,7 +44,7 @@
 
 #include <dt-bindings/input/input.h>
 #include "rk3399.dtsi"
-#include "rk3399-opp.dtsi"
+#include "rk3399-op1-opp.dtsi"
 
 / {
 	chosen {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
new file mode 100644
index 0000000..be7fe63
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-op1-opp.dtsi
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+	cluster0_opp: opp-table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp00 {
+			opp-hz = /bits/ 64 <408000000>;
+			opp-microvolt = <800000>;
+			clock-latency-ns = <40000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-microvolt = <825000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <816000000>;
+			opp-microvolt = <850000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <1008000000>;
+			opp-microvolt = <900000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <975000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1416000000>;
+			opp-microvolt = <1100000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1512000000>;
+			opp-microvolt = <1150000>;
+		};
+	};
+
+	cluster1_opp: opp-table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp00 {
+			opp-hz = /bits/ 64 <408000000>;
+			opp-microvolt = <800000>;
+			clock-latency-ns = <40000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-microvolt = <800000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <816000000>;
+			opp-microvolt = <825000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <1008000000>;
+			opp-microvolt = <850000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <900000>;
+		};
+		opp05 {
+			opp-hz = /bits/ 64 <1416000000>;
+			opp-microvolt = <975000>;
+		};
+		opp06 {
+			opp-hz = /bits/ 64 <1608000000>;
+			opp-microvolt = <1050000>;
+		};
+		opp07 {
+			opp-hz = /bits/ 64 <1800000000>;
+			opp-microvolt = <1150000>;
+		};
+		opp08 {
+			opp-hz = /bits/ 64 <2016000000>;
+			opp-microvolt = <1250000>;
+		};
+	};
+};
+
+&cpu_l0 {
+	operating-points-v2 = <&cluster0_opp>;
+};
+
+&cpu_l1 {
+	operating-points-v2 = <&cluster0_opp>;
+};
+
+&cpu_l2 {
+	operating-points-v2 = <&cluster0_opp>;
+};
+
+&cpu_l3 {
+	operating-points-v2 = <&cluster0_opp>;
+};
+
+&cpu_b0 {
+	operating-points-v2 = <&cluster1_opp>;
+};
+
+&cpu_b1 {
+	operating-points-v2 = <&cluster1_opp>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
index dd82e16..c83460d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
@@ -56,22 +56,18 @@
 		};
 		opp02 {
 			opp-hz = /bits/ 64 <816000000>;
-			opp-microvolt = <800000>;
+			opp-microvolt = <850000>;
 		};
 		opp03 {
 			opp-hz = /bits/ 64 <1008000000>;
-			opp-microvolt = <875000>;
+			opp-microvolt = <925000>;
 		};
 		opp04 {
 			opp-hz = /bits/ 64 <1200000000>;
-			opp-microvolt = <925000>;
+			opp-microvolt = <1000000>;
 		};
 		opp05 {
 			opp-hz = /bits/ 64 <1416000000>;
-			opp-microvolt = <1050000>;
-		};
-		opp06 {
-			opp-hz = /bits/ 64 <1512000000>;
 			opp-microvolt = <1125000>;
 		};
 	};
@@ -107,15 +103,11 @@
 		};
 		opp06 {
 			opp-hz = /bits/ 64 <1608000000>;
-			opp-microvolt = <1075000>;
+			opp-microvolt = <1100000>;
 		};
 		opp07 {
 			opp-hz = /bits/ 64 <1800000000>;
-			opp-microvolt = <1150000>;
-		};
-		opp08 {
-			opp-hz = /bits/ 64 <2016000000>;
-			opp-microvolt = <1250000>;
+			opp-microvolt = <1200000>;
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index f4f3c96..69c56f7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -56,6 +56,7 @@
 	#size-cells = <2>;
 
 	aliases {
+		ethernet0 = &gmac;
 		i2c0 = &i2c0;
 		i2c1 = &i2c1;
 		i2c2 = &i2c2;
@@ -220,7 +221,7 @@
 		#size-cells = <2>;
 		#interrupt-cells = <1>;
 		aspm-no-l0s;
-		bus-range = <0x0 0x1>;
+		bus-range = <0x0 0x1f>;
 		clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
 			 <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
 		clock-names = "aclk", "aclk-perf",
@@ -239,8 +240,8 @@
 		msi-map = <0x0 &its 0x0 0x1000>;
 		phys = <&pcie_phy>;
 		phy-names = "pcie-phy";
-		ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
-			  0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
+		ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x1e00000
+			  0x81000000 0x0 0xfbe00000 0x0 0xfbe00000 0x0 0x100000>;
 		resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
 			 <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
 			 <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
@@ -769,11 +770,6 @@
 		status = "disabled";
 	};
 
-	qos_sd: qos@ffa74000 {
-		compatible = "syscon";
-		reg = <0x0 0xffa74000 0x0 0x20>;
-	};
-
 	qos_emmc: qos@ffa58000 {
 		compatible = "syscon";
 		reg = <0x0 0xffa58000 0x0 0x20>;
@@ -784,6 +780,41 @@
 		reg = <0x0 0xffa5c000 0x0 0x20>;
 	};
 
+	qos_pcie: qos@ffa60080 {
+		compatible = "syscon";
+		reg = <0x0 0xffa60080 0x0 0x20>;
+	};
+
+	qos_usb_host0: qos@ffa60100 {
+		compatible = "syscon";
+		reg = <0x0 0xffa60100 0x0 0x20>;
+	};
+
+	qos_usb_host1: qos@ffa60180 {
+		compatible = "syscon";
+		reg = <0x0 0xffa60180 0x0 0x20>;
+	};
+
+	qos_usb_otg0: qos@ffa70000 {
+		compatible = "syscon";
+		reg = <0x0 0xffa70000 0x0 0x20>;
+	};
+
+	qos_usb_otg1: qos@ffa70080 {
+		compatible = "syscon";
+		reg = <0x0 0xffa70080 0x0 0x20>;
+	};
+
+	qos_sd: qos@ffa74000 {
+		compatible = "syscon";
+		reg = <0x0 0xffa74000 0x0 0x20>;
+	};
+
+	qos_sdioaudio: qos@ffa76000 {
+		compatible = "syscon";
+		reg = <0x0 0xffa76000 0x0 0x20>;
+	};
+
 	qos_hdcp: qos@ffa90000 {
 		compatible = "syscon";
 		reg = <0x0 0xffa90000 0x0 0x20>;
@@ -854,6 +885,11 @@
 		reg = <0x0 0xffad0000 0x0 0x20>;
 	};
 
+	qos_perihp: qos@ffad8080 {
+		compatible = "syscon";
+		reg = <0x0 0xffad8080 0x0 0x20>;
+	};
+
 	qos_gpu: qos@ffae0000 {
 		compatible = "syscon";
 		reg = <0x0 0xffae0000 0x0 0x20>;
@@ -1676,6 +1712,91 @@
 			};
 		};
 
+		sdio0 {
+			sdio0_bus1: sdio0-bus1 {
+				rockchip,pins =
+					<2 RK_PC4 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_bus4: sdio0-bus4 {
+				rockchip,pins =
+					<2 RK_PC4 RK_FUNC_1 &pcfg_pull_up>,
+					<2 RK_PC5 RK_FUNC_1 &pcfg_pull_up>,
+					<2 RK_PC6 RK_FUNC_1 &pcfg_pull_up>,
+					<2 RK_PC7 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_cmd: sdio0-cmd {
+				rockchip,pins =
+					<2 RK_PD0 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_clk: sdio0-clk {
+				rockchip,pins =
+					<2 RK_PD1 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			sdio0_cd: sdio0-cd {
+				rockchip,pins =
+					<2 RK_PD2 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_pwr: sdio0-pwr {
+				rockchip,pins =
+					<2 RK_PD3 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_bkpwr: sdio0-bkpwr {
+				rockchip,pins =
+					<2 RK_PD4 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_wp: sdio0-wp {
+				rockchip,pins =
+					<0 RK_PA3 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_int: sdio0-int {
+				rockchip,pins =
+					<0 RK_PA4 RK_FUNC_1 &pcfg_pull_up>;
+			};
+		};
+
+		sdmmc {
+			sdmmc_bus1: sdmmc-bus1 {
+				rockchip,pins =
+					<4 RK_PB0 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdmmc_bus4: sdmmc-bus4 {
+				rockchip,pins =
+					<4 RK_PB0 RK_FUNC_1 &pcfg_pull_up>,
+					<4 RK_PB1 RK_FUNC_1 &pcfg_pull_up>,
+					<4 RK_PB2 RK_FUNC_1 &pcfg_pull_up>,
+					<4 RK_PB3 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdmmc_clk: sdmmc-clk {
+				rockchip,pins =
+					<4 RK_PB4 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			sdmmc_cmd: sdmmc-cmd {
+				rockchip,pins =
+					<4 RK_PB5 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdmmc_cd: sdmcc-cd {
+				rockchip,pins =
+					<0 RK_PA7 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdmmc_wp: sdmmc-wp {
+				rockchip,pins =
+					<0 RK_PB0 RK_FUNC_1 &pcfg_pull_up>;
+			};
+		};
+
 		sleep {
 			ap_pwroff: ap-pwroff {
 				rockchip,pins = <1 5 RK_FUNC_1 &pcfg_pull_none>;
@@ -1691,6 +1812,11 @@
 				rockchip,pins =
 					<4 21 RK_FUNC_1 &pcfg_pull_none>;
 			};
+
+			spdif_bus_1: spdif-bus-1 {
+				rockchip,pins =
+					<3 RK_PC0 RK_FUNC_3 &pcfg_pull_none>;
+			};
 		};
 
 		spi0 {
@@ -1950,6 +2076,19 @@
 			};
 		};
 
+		hdmi {
+			hdmi_i2c_xfer: hdmi-i2c-xfer {
+				rockchip,pins =
+					<4 RK_PC1 RK_FUNC_3 &pcfg_pull_none>,
+					<4 RK_PC0 RK_FUNC_3 &pcfg_pull_none>;
+			};
+
+			hdmi_cec: hdmi-cec {
+				rockchip,pins =
+					<4 RK_PC7 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
 		pcie {
 			pcie_clkreqn: pci-clkreqn {
 				rockchip,pins =
@@ -1960,6 +2099,16 @@
 				rockchip,pins =
 					<4 24 RK_FUNC_1 &pcfg_pull_none>;
 			};
+
+			pcie_clkreqn_cpm: pci-clkreqn-cpm {
+				rockchip,pins =
+					<2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+			};
+
+			pcie_clkreqnb_cpm: pci-clkreqnb-cpm {
+				rockchip,pins =
+					<4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+			};
 		};
 
 	};
diff --git a/arch/arm64/boot/dts/socionext/Makefile b/arch/arm64/boot/dts/socionext/Makefile
index 5538598..4a13a3a 100644
--- a/arch/arm64/boot/dts/socionext/Makefile
+++ b/arch/arm64/boot/dts/socionext/Makefile
@@ -1,5 +1,7 @@
 dtb-$(CONFIG_ARCH_UNIPHIER) += \
+	uniphier-ld11-global.dtb \
 	uniphier-ld11-ref.dtb \
+	uniphier-ld20-global.dtb \
 	uniphier-ld20-ref.dtb
 
 always		:= $(dtb-y)
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
new file mode 100644
index 0000000..1153570
--- /dev/null
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
@@ -0,0 +1,70 @@
+/*
+ * Device Tree Source for UniPhier LD11 Global Board
+ *
+ * Copyright (C) 2016-2017 Socionext Inc.
+ *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *           Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+/include/ "uniphier-ld11.dtsi"
+
+/ {
+	model = "UniPhier LD11 Global Board (REF_LD11_GP)";
+	compatible = "socionext,uniphier-ld11-global",
+		     "socionext,uniphier-ld11";
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		i2c5 = &i2c5;
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0 0x80000000 0 0x40000000>;
+	};
+};
+
+&serial0 {
+	status = "okay";
+};
+
+&serial1 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	eeprom@50 {
+		compatible = "st,24c64", "atmel,24c64";
+		reg = <0x50>;
+		pagesize = <32>;
+	};
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
+
+&usb2 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
index 0173e93..cc8ebe3 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
index 151c043..bdce5b8 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -4,46 +4,10 @@
  * Copyright (C) 2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
-/memreserve/ 0x80000000 0x00080000;
+/memreserve/ 0x80000000 0x02000000;
 
 / {
 	compatible = "socionext,uniphier-ld11";
@@ -89,31 +53,31 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@245000000 {
+		opp-245000000 {
 			opp-hz = /bits/ 64 <245000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@250000000 {
+		opp-250000000 {
 			opp-hz = /bits/ 64 <250000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@490000000 {
+		opp-490000000 {
 			opp-hz = /bits/ 64 <490000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@500000000 {
+		opp-500000000 {
 			opp-hz = /bits/ 64 <500000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@653334000 {
+		opp-653334000 {
 			opp-hz = /bits/ 64 <653334000>;
 			clock-latency-ns = <300>;
 		};
-		opp@666667000 {
+		opp-666667000 {
 			opp-hz = /bits/ 64 <666667000>;
 			clock-latency-ns = <300>;
 		};
-		opp@980000000 {
+		opp-980000000 {
 			opp-hz = /bits/ 64 <980000000>;
 			clock-latency-ns = <300>;
 		};
@@ -268,7 +232,7 @@
 			pinctrl-0 = <&pinctrl_system_bus>;
 		};
 
-		smpctrl@59800000 {
+		smpctrl@59801000 {
 			compatible = "socionext,uniphier-smpctrl";
 			reg = <0x59801000 0x400>;
 		};
@@ -310,6 +274,11 @@
 			bus-width = <8>;
 			mmc-ddr-1_8v;
 			mmc-hs200-1_8v;
+			cdns,phy-input-delay-legacy = <4>;
+			cdns,phy-input-delay-mmc-highspeed = <2>;
+			cdns,phy-input-delay-mmc-ddr = <3>;
+			cdns,phy-dll-delay-sdclk = <21>;
+			cdns,phy-dll-delay-sdclk-hsmmc = <21>;
 		};
 
 		usb0: usb@5a800100 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
new file mode 100644
index 0000000..9f620d4
--- /dev/null
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
@@ -0,0 +1,52 @@
+/*
+ * Device Tree Source for UniPhier LD20 Global Board
+ *
+ * Copyright (C) 2015-2017 Socionext Inc.
+ *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *           Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+/include/ "uniphier-ld20.dtsi"
+
+/ {
+	model = "UniPhier LD20 Global Board (REF_LD20_GP)";
+	compatible = "socionext,uniphier-ld20-global",
+		     "socionext,uniphier-ld20";
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		i2c5 = &i2c5;
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0 0x80000000 0 0xc0000000>;
+	};
+};
+
+&serial0 {
+	status = "okay";
+};
+
+&serial1 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
index fca4c47..494166a 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
@@ -4,43 +4,7 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
 /dts-v1/;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index 6193f11..de1e753 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -4,46 +4,10 @@
  * Copyright (C) 2015-2016 Socionext Inc.
  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
  */
 
-/memreserve/ 0x80000000 0x00080000;
+/memreserve/ 0x80000000 0x02000000;
 
 / {
 	compatible = "socionext,uniphier-ld20";
@@ -116,35 +80,35 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@250000000 {
+		opp-250000000 {
 			opp-hz = /bits/ 64 <250000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@275000000 {
+		opp-275000000 {
 			opp-hz = /bits/ 64 <275000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@500000000 {
+		opp-500000000 {
 			opp-hz = /bits/ 64 <500000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@550000000 {
+		opp-550000000 {
 			opp-hz = /bits/ 64 <550000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@666667000 {
+		opp-666667000 {
 			opp-hz = /bits/ 64 <666667000>;
 			clock-latency-ns = <300>;
 		};
-		opp@733334000 {
+		opp-733334000 {
 			opp-hz = /bits/ 64 <733334000>;
 			clock-latency-ns = <300>;
 		};
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@1100000000 {
+		opp-1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
 			clock-latency-ns = <300>;
 		};
@@ -154,35 +118,35 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@250000000 {
+		opp-250000000 {
 			opp-hz = /bits/ 64 <250000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@275000000 {
+		opp-275000000 {
 			opp-hz = /bits/ 64 <275000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@500000000 {
+		opp-500000000 {
 			opp-hz = /bits/ 64 <500000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@550000000 {
+		opp-550000000 {
 			opp-hz = /bits/ 64 <550000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@666667000 {
+		opp-666667000 {
 			opp-hz = /bits/ 64 <666667000>;
 			clock-latency-ns = <300>;
 		};
-		opp@733334000 {
+		opp-733334000 {
 			opp-hz = /bits/ 64 <733334000>;
 			clock-latency-ns = <300>;
 		};
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			clock-latency-ns = <300>;
 		};
-		opp@1100000000 {
+		opp-1100000000 {
 			opp-hz = /bits/ 64 <1100000000>;
 			clock-latency-ns = <300>;
 		};
@@ -337,7 +301,7 @@
 			pinctrl-0 = <&pinctrl_system_bus>;
 		};
 
-		smpctrl@59800000 {
+		smpctrl@59801000 {
 			compatible = "socionext,uniphier-smpctrl";
 			reg = <0x59801000 0x400>;
 		};
@@ -384,6 +348,11 @@
 			bus-width = <8>;
 			mmc-ddr-1_8v;
 			mmc-hs200-1_8v;
+			cdns,phy-input-delay-legacy = <4>;
+			cdns,phy-input-delay-mmc-highspeed = <2>;
+			cdns,phy-input-delay-mmc-ddr = <3>;
+			cdns,phy-dll-delay-sdclk = <21>;
+			cdns,phy-dll-delay-sdclk-hsmmc = <21>;
 		};
 
 		soc-glue@5f800000 {
diff --git a/arch/arm64/boot/dts/zte/zx296718.dtsi b/arch/arm64/boot/dts/zte/zx296718.dtsi
index 316dc71..d83bf78 100644
--- a/arch/arm64/boot/dts/zte/zx296718.dtsi
+++ b/arch/arm64/boot/dts/zte/zx296718.dtsi
@@ -118,27 +118,27 @@
 		compatible = "operating-points-v2";
 		opp-shared;
 
-		opp@500000000 {
+		opp-500000000 {
 			opp-hz = /bits/ 64 <500000000>;
 			clock-latency-ns = <500000>;
 		};
 
-		opp@648000000 {
+		opp-648000000 {
 			opp-hz = /bits/ 64 <648000000>;
 			clock-latency-ns = <500000>;
 		};
 
-		opp@800000000 {
+		opp-800000000 {
 			opp-hz = /bits/ 64 <800000000>;
 			clock-latency-ns = <500000>;
 		};
 
-		opp@1000000000 {
+		opp-1000000000 {
 			opp-hz = /bits/ 64 <1000000000>;
 			clock-latency-ns = <500000>;
 		};
 
-		opp@1188000000 {
+		opp-1188000000 {
 			opp-hz = /bits/ 64 <1188000000>;
 			clock-latency-ns = <500000>;
 		};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 65cdd87..6c7d147 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -36,6 +36,7 @@
 CONFIG_ARCH_BCM2835=y
 CONFIG_ARCH_BCM_IPROC=y
 CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
 CONFIG_ARCH_EXYNOS=y
 CONFIG_ARCH_LAYERSCAPE=y
 CONFIG_ARCH_LG1K=y
@@ -56,18 +57,21 @@
 CONFIG_ARCH_THUNDER2=y
 CONFIG_ARCH_UNIPHIER=y
 CONFIG_ARCH_VEXPRESS=y
-CONFIG_ARCH_VULCAN=y
 CONFIG_ARCH_XGENE=y
 CONFIG_ARCH_ZX=y
 CONFIG_ARCH_ZYNQMP=y
 CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
 CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
 CONFIG_PCI_LAYERSCAPE=y
 CONFIG_PCI_HISI=y
 CONFIG_PCIE_QCOM=y
 CONFIG_PCIE_ARMADA_8K=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=m
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_ARM64_VA_BITS_48=y
@@ -89,6 +93,7 @@
 CONFIG_CPUFREQ_DT=y
 CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
 CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -152,6 +157,7 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=m
 CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
 CONFIG_SRAM=y
 CONFIG_EEPROM_AT25=m
 # CONFIG_SCSI_PROC_FS is not set
@@ -186,11 +192,14 @@
 CONFIG_MVNETA=y
 CONFIG_MVPP2=y
 CONFIG_SKY2=y
+CONFIG_QCOM_EMAC=m
 CONFIG_RAVB=y
 CONFIG_SMC91X=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=m
 CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_AT803X_PHY=m
+CONFIG_MARVELL_PHY=m
 CONFIG_MESON_GXL_PHY=m
 CONFIG_MICREL_PHY=y
 CONFIG_REALTEK_PHY=m
@@ -208,6 +217,8 @@
 CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_PM8941_PWRKEY=y
@@ -257,12 +268,14 @@
 CONFIG_I2C_RCAR=y
 CONFIG_I2C_CROS_EC_TUNNEL=y
 CONFIG_SPI=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_BCM2835=m
 CONFIG_SPI_BCM2835AUX=m
-CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
@@ -292,6 +305,7 @@
 CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
 CONFIG_EXYNOS_THERMAL=y
+CONFIG_ROCKCHIP_THERMAL=m
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
 CONFIG_MESON_GXBB_WATCHDOG=m
@@ -300,12 +314,14 @@
 CONFIG_BCM2835_WDT=y
 CONFIG_MFD_CROS_EC=y
 CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
 CONFIG_MFD_EXYNOS_LPASS=m
 CONFIG_MFD_HI655X_PMIC=y
 CONFIG_MFD_MAX77620=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_RK808=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FAN53555=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_HI655X=y
@@ -320,6 +336,11 @@
 CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
 CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
+CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DEVICES=y
+CONFIG_RC_DECODERS=y
+CONFIG_IR_MESON=m
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 # CONFIG_DVB_NET is not set
 CONFIG_V4L_MEM2MEM_DRIVERS=y
@@ -361,6 +382,7 @@
 CONFIG_SND_SOC_SAMSUNG=y
 CONFIG_SND_SOC_RCAR=y
 CONFIG_SND_SOC_AK4613=y
+CONFIG_SND_SIMPLE_CARD=y
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
@@ -415,6 +437,7 @@
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_EDAC=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MAX77686=y
 CONFIG_RTC_DRV_RK808=m
@@ -473,8 +496,10 @@
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
 CONFIG_PWM=y
 CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
 CONFIG_PWM_MESON=m
 CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_SAMSUNG=y
@@ -484,11 +509,18 @@
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_ROCKCHIP_INNO_USB2=y
 CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
 CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
 CONFIG_ARM_SCPI_PROTOCOL=y
 CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_EFI_CAPSULE_LOADER=y
 CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 0e99978..59cca1d 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -23,9 +23,9 @@
 #define ACPI_MADT_GICC_LENGTH	\
 	(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
 
-#define BAD_MADT_GICC_ENTRY(entry, end)						\
-	(!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) ||	\
-	 (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end)					\
+	(!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH ||	\
+	(unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
 
 /* Basic configuration for ACPI */
 #ifdef	CONFIG_ACPI
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 29cb2ca..4214c38 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -433,7 +433,6 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset);
 bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
 
 int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
-int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
 int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
 
 s32 aarch64_insn_adrp_get_offset(u32 insn);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 15c142c..b4d13d9 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,6 +286,10 @@
 #define SCTLR_ELx_A	(1 << 1)
 #define SCTLR_ELx_M	1
 
+#define SCTLR_EL2_RES1	((1 << 4)  | (1 << 5)  | (1 << 11) | (1 << 16) | \
+			 (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
+			 (1 << 28) | (1 << 29))
+
 #define SCTLR_ELx_FLAGS	(SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
 			 SCTLR_ELx_SA | SCTLR_ELx_I)
 
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 5d17f37..82cd075 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,7 +11,6 @@
  *
  */
 
-#include <linux/dmi.h>
 #include <linux/efi.h>
 #include <linux/init.h>
 
@@ -117,20 +116,6 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
 				   set_permissions, md);
 }
 
-static int __init arm64_dmi_init(void)
-{
-	/*
-	 * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to
-	 * be called early because dmi_id_init(), which is an arch_initcall
-	 * itself, depends on dmi_scan_machine() having been called already.
-	 */
-	dmi_scan_machine();
-	if (dmi_available)
-		dmi_set_dump_stack_arch_desc();
-	return 0;
-}
-core_initcall(arm64_dmi_init);
-
 /*
  * UpdateCapsule() depends on the system being shutdown via
  * ResetSystem().
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index b884a92..cd87213 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -255,6 +255,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
 	return ret;
 }
 
+static
 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
 {
 	struct aarch64_insn_patch patch = {
@@ -267,8 +268,8 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
 	if (cnt <= 0)
 		return -EINVAL;
 
-	return stop_machine(aarch64_insn_patch_text_cb, &patch,
-			    cpu_online_mask);
+	return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
+				       cpu_online_mask);
 }
 
 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 4f0e3eb..c7e3e63 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 		return NULL;
 
 	root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
-	if (!root_ops)
+	if (!root_ops) {
+		kfree(ri);
 		return NULL;
+	}
 
 	ri->cfg = pci_acpi_setup_ecam_mapping(root);
 	if (!ri->cfg) {
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 6e0e16a..3211198 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -961,8 +961,7 @@ void smp_send_stop(void)
 		cpumask_copy(&mask, cpu_online_mask);
 		cpumask_clear_cpu(smp_processor_id(), &mask);
 
-		if (system_state == SYSTEM_BOOTING ||
-		    system_state == SYSTEM_RUNNING)
+		if (system_state <= SYSTEM_RUNNING)
 			pr_crit("SMP: stopping secondary CPUs\n");
 		smp_cross_call(&mask, IPI_CPU_STOP);
 	}
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 5977969..da33c90 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -70,7 +70,7 @@ void __init time_init(void)
 	u32 arch_timer_rate;
 
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 
 	tick_setup_hrtimer_broadcast();
 
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 0824353..79244c7 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -11,7 +11,7 @@
  * for more details.
  */
 
-#include <linux/acpi.h>
+#include <linux/arch_topology.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
@@ -23,227 +23,11 @@
 #include <linux/sched/topology.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/cpufreq.h>
 
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/topology.h>
 
-static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-static DEFINE_MUTEX(cpu_scale_mutex);
-
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
-{
-	return per_cpu(cpu_scale, cpu);
-}
-
-static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
-{
-	per_cpu(cpu_scale, cpu) = capacity;
-}
-
-static ssize_t cpu_capacity_show(struct device *dev,
-				 struct device_attribute *attr,
-				 char *buf)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-
-	return sprintf(buf, "%lu\n",
-			arch_scale_cpu_capacity(NULL, cpu->dev.id));
-}
-
-static ssize_t cpu_capacity_store(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf,
-				  size_t count)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-	int this_cpu = cpu->dev.id, i;
-	unsigned long new_capacity;
-	ssize_t ret;
-
-	if (count) {
-		ret = kstrtoul(buf, 0, &new_capacity);
-		if (ret)
-			return ret;
-		if (new_capacity > SCHED_CAPACITY_SCALE)
-			return -EINVAL;
-
-		mutex_lock(&cpu_scale_mutex);
-		for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
-			set_capacity_scale(i, new_capacity);
-		mutex_unlock(&cpu_scale_mutex);
-	}
-
-	return count;
-}
-
-static DEVICE_ATTR_RW(cpu_capacity);
-
-static int register_cpu_capacity_sysctl(void)
-{
-	int i;
-	struct device *cpu;
-
-	for_each_possible_cpu(i) {
-		cpu = get_cpu_device(i);
-		if (!cpu) {
-			pr_err("%s: too early to get CPU%d device!\n",
-			       __func__, i);
-			continue;
-		}
-		device_create_file(cpu, &dev_attr_cpu_capacity);
-	}
-
-	return 0;
-}
-subsys_initcall(register_cpu_capacity_sysctl);
-
-static u32 capacity_scale;
-static u32 *raw_capacity;
-static bool cap_parsing_failed;
-
-static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
-{
-	int ret;
-	u32 cpu_capacity;
-
-	if (cap_parsing_failed)
-		return;
-
-	ret = of_property_read_u32(cpu_node,
-				   "capacity-dmips-mhz",
-				   &cpu_capacity);
-	if (!ret) {
-		if (!raw_capacity) {
-			raw_capacity = kcalloc(num_possible_cpus(),
-					       sizeof(*raw_capacity),
-					       GFP_KERNEL);
-			if (!raw_capacity) {
-				pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
-				cap_parsing_failed = true;
-				return;
-			}
-		}
-		capacity_scale = max(cpu_capacity, capacity_scale);
-		raw_capacity[cpu] = cpu_capacity;
-		pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
-			cpu_node->full_name, raw_capacity[cpu]);
-	} else {
-		if (raw_capacity) {
-			pr_err("cpu_capacity: missing %s raw capacity\n",
-				cpu_node->full_name);
-			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
-		}
-		cap_parsing_failed = true;
-		kfree(raw_capacity);
-	}
-}
-
-static void normalize_cpu_capacity(void)
-{
-	u64 capacity;
-	int cpu;
-
-	if (!raw_capacity || cap_parsing_failed)
-		return;
-
-	pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
-	mutex_lock(&cpu_scale_mutex);
-	for_each_possible_cpu(cpu) {
-		pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
-			 cpu, raw_capacity[cpu]);
-		capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
-			/ capacity_scale;
-		set_capacity_scale(cpu, capacity);
-		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
-			cpu, arch_scale_cpu_capacity(NULL, cpu));
-	}
-	mutex_unlock(&cpu_scale_mutex);
-}
-
-#ifdef CONFIG_CPU_FREQ
-static cpumask_var_t cpus_to_visit;
-static bool cap_parsing_done;
-static void parsing_done_workfn(struct work_struct *work);
-static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
-
-static int
-init_cpu_capacity_callback(struct notifier_block *nb,
-			   unsigned long val,
-			   void *data)
-{
-	struct cpufreq_policy *policy = data;
-	int cpu;
-
-	if (cap_parsing_failed || cap_parsing_done)
-		return 0;
-
-	switch (val) {
-	case CPUFREQ_NOTIFY:
-		pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
-				cpumask_pr_args(policy->related_cpus),
-				cpumask_pr_args(cpus_to_visit));
-		cpumask_andnot(cpus_to_visit,
-			       cpus_to_visit,
-			       policy->related_cpus);
-		for_each_cpu(cpu, policy->related_cpus) {
-			raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
-					    policy->cpuinfo.max_freq / 1000UL;
-			capacity_scale = max(raw_capacity[cpu], capacity_scale);
-		}
-		if (cpumask_empty(cpus_to_visit)) {
-			normalize_cpu_capacity();
-			kfree(raw_capacity);
-			pr_debug("cpu_capacity: parsing done\n");
-			cap_parsing_done = true;
-			schedule_work(&parsing_done_work);
-		}
-	}
-	return 0;
-}
-
-static struct notifier_block init_cpu_capacity_notifier = {
-	.notifier_call = init_cpu_capacity_callback,
-};
-
-static int __init register_cpufreq_notifier(void)
-{
-	/*
-	 * on ACPI-based systems we need to use the default cpu capacity
-	 * until we have the necessary code to parse the cpu capacity, so
-	 * skip registering cpufreq notifier.
-	 */
-	if (!acpi_disabled || cap_parsing_failed)
-		return -EINVAL;
-
-	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
-		pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
-		return -ENOMEM;
-	}
-	cpumask_copy(cpus_to_visit, cpu_possible_mask);
-
-	return cpufreq_register_notifier(&init_cpu_capacity_notifier,
-					 CPUFREQ_POLICY_NOTIFIER);
-}
-core_initcall(register_cpufreq_notifier);
-
-static void parsing_done_workfn(struct work_struct *work)
-{
-	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
-					 CPUFREQ_POLICY_NOTIFIER);
-}
-
-#else
-static int __init free_raw_capacity(void)
-{
-	kfree(raw_capacity);
-
-	return 0;
-}
-core_initcall(free_raw_capacity);
-#endif
-
 static int __init get_cpu_for_node(struct device_node *node)
 {
 	struct device_node *cpu_node;
@@ -255,7 +39,7 @@ static int __init get_cpu_for_node(struct device_node *node)
 
 	for_each_possible_cpu(cpu) {
 		if (of_get_cpu_node(cpu, NULL) == cpu_node) {
-			parse_cpu_capacity(cpu_node, cpu);
+			topology_parse_cpu_capacity(cpu_node, cpu);
 			of_node_put(cpu_node);
 			return cpu;
 		}
@@ -400,16 +184,14 @@ static int __init parse_dt_topology(void)
 	 * cluster with restricted subnodes.
 	 */
 	map = of_get_child_by_name(cn, "cpu-map");
-	if (!map) {
-		cap_parsing_failed = true;
+	if (!map)
 		goto out;
-	}
 
 	ret = parse_cluster(map, 0);
 	if (ret != 0)
 		goto out_map;
 
-	normalize_cpu_capacity();
+	topology_normalize_cpu_scale();
 
 	/*
 	 * Check that all cores are in the topology; the SMP code will
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 41b6e31..7492d90 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -220,11 +220,10 @@ void update_vsyscall(struct timekeeper *tk)
 	if (!use_syscall) {
 		/* tkr_mono.cycle_last == tkr_raw.cycle_last */
 		vdso_data->cs_cycle_last	= tk->tkr_mono.cycle_last;
-		vdso_data->raw_time_sec		= tk->raw_time.tv_sec;
-		vdso_data->raw_time_nsec	= tk->raw_time.tv_nsec;
+		vdso_data->raw_time_sec         = tk->raw_sec;
+		vdso_data->raw_time_nsec        = tk->tkr_raw.xtime_nsec;
 		vdso_data->xtime_clock_sec	= tk->xtime_sec;
 		vdso_data->xtime_clock_nsec	= tk->tkr_mono.xtime_nsec;
-		/* tkr_raw.xtime_nsec == 0 */
 		vdso_data->cs_mono_mult		= tk->tkr_mono.mult;
 		vdso_data->cs_raw_mult		= tk->tkr_raw.mult;
 		/* tkr_mono.shift == tkr_raw.shift */
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index e00b467..76320e9 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -256,7 +256,6 @@
 	seqcnt_check fail=monotonic_raw
 
 	/* All computations are done with left-shifted nsecs. */
-	lsl	x14, x14, x12
 	get_nsec_per_sec res=x9
 	lsl	x9, x9, x12
 
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 839425c..3f96155 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -106,10 +106,13 @@
 	tlbi	alle2
 	dsb	sy
 
-	mrs	x4, sctlr_el2
-	and	x4, x4, #SCTLR_ELx_EE	// preserve endianness of EL2
-	ldr	x5, =SCTLR_ELx_FLAGS
-	orr	x4, x4, x5
+	/*
+	 * Preserve all the RES1 bits while setting the default flags,
+	 * as well as the EE bit on BE. Drop the A flag since the compiler
+	 * is allowed to generate unaligned accesses.
+	 */
+	ldr	x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
+CPU_BE(	orr	x4, x4, #SCTLR_ELx_EE)
 	msr	sctlr_el2, x4
 	isb
 
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 79f37e3..6260b69 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 		 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
 		 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
 		 */
-		vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
-		vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
+		vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
+		vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
 		vgic_set_vmcr(vcpu, &vmcr);
 	} else {
 		val = 0;
@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 		 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
 		 * Extract it directly using ICC_CTLR_EL1 reg definitions.
 		 */
-		val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
-		val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
+		val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
+		val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
 
 		p->regval = val;
 	}
@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 		p->regval = 0;
 
 	vgic_get_vmcr(vcpu, &vmcr);
-	if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
+	if (!vmcr.cbpr) {
 		if (p->is_write) {
 			vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
 				     ICC_BPR1_EL1_SHIFT;
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 71f9305..c870d6f 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -36,6 +36,7 @@ int bpf_jit_enable __read_mostly;
 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
+#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
 
 /* Map BPF registers to A64 registers */
 static const int bpf2a64[] = {
@@ -57,6 +58,7 @@ static const int bpf2a64[] = {
 	/* temporary registers for internal BPF JIT */
 	[TMP_REG_1] = A64_R(10),
 	[TMP_REG_2] = A64_R(11),
+	[TMP_REG_3] = A64_R(12),
 	/* tail_call_cnt */
 	[TCALL_CNT] = A64_R(26),
 	/* temporary register for blinding constants */
@@ -319,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 	const u8 src = bpf2a64[insn->src_reg];
 	const u8 tmp = bpf2a64[TMP_REG_1];
 	const u8 tmp2 = bpf2a64[TMP_REG_2];
+	const u8 tmp3 = bpf2a64[TMP_REG_3];
 	const s16 off = insn->off;
 	const s32 imm = insn->imm;
 	const int i = insn - ctx->prog->insnsi;
@@ -689,10 +692,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 		emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
 		emit(A64_LDXR(isdw, tmp2, tmp), ctx);
 		emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
-		emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx);
+		emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
 		jmp_offset = -3;
 		check_imm19(jmp_offset);
-		emit(A64_CBNZ(0, tmp2, jmp_offset), ctx);
+		emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
 		break;
 
 	/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index 85d4af9..dbdbb8a 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -75,11 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-/*
- * Return saved PC of a blocked thread.
- */
-#define thread_saved_pc(tsk)	(tsk->thread.pc)
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define	KSTK_EIP(tsk)							\
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index f0eaf047..a3c8d05 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -45,7 +45,6 @@
 generic-y += serial.h
 generic-y += shmbuf.h
 generic-y += shmparam.h
-generic-y += siginfo.h
 generic-y += signal.h
 generic-y += socket.h
 generic-y += sockios.h
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index b9eb3da..7c87b5b 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -96,11 +96,6 @@ static inline void release_thread(struct task_struct *dead_task)
 #define release_segments(mm)		do { } while (0)
 
 /*
- * saved PC of a blocked thread.
- */
-#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
-
-/*
  * saved kernel SP and DP of a blocked thread.
  */
 #ifdef _BIG_ENDIAN
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index 13a97aa..1c44d3b 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -2,3 +2,4 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += kvm_para.h
+generic-y += siginfo.h
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index e299d30..a2cdb15 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -69,14 +69,6 @@ void hard_reset_now (void)
 	while(1) /* waiting for RETRIBUTION! */ ;
 }
 
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-	return task_pt_regs(t)->irp;
-}
-
 /* setup the child's kernel stack with a pt_regs and switch_stack on it.
  * it will be un-nested during _resume and _ret_from_sys_call when the
  * new thread is scheduled.
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index c530a8f..fe87b38 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -85,14 +85,6 @@ hard_reset_now(void)
 }
 
 /*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-	return task_pt_regs(t)->erp;
-}
-
-/*
  * Setup the child's kernel stack with a pt_regs and call switch_stack() on it.
  * It will be unnested during _resume and _ret_from_sys_call when the new thread
  * is scheduled.
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 2890099..acc5781 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -36,7 +36,6 @@
 generic-y += sections.h
 generic-y += sembuf.h
 generic-y += shmbuf.h
-generic-y += siginfo.h
 generic-y += socket.h
 generic-y += sockios.h
 generic-y += statfs.h
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 15b815d..bc2729e 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -52,8 +52,6 @@ unsigned long get_wchan(struct task_struct *p);
 
 #define KSTK_ESP(tsk)   ((tsk) == current ? rdusp() : (tsk)->thread.usp)
 
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 /* Free all resources held by a thread. */
 static inline void release_thread(struct task_struct *dead_task)
 {
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index b15bf6b..b55fc2a 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += siginfo.h
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h
index ddaeb9c..e4d08d7 100644
--- a/arch/frv/include/asm/processor.h
+++ b/arch/frv/include/asm/processor.h
@@ -96,11 +96,6 @@ extern asmlinkage void *restore_user_regs(const struct user_context *target, ...
 #define release_segments(mm)		do { } while (0)
 #define forget_segments()		do { } while (0)
 
-/*
- * Return saved PC of a blocked thread.
- */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define	KSTK_EIP(tsk)	((tsk)->thread.frame0->pc)
diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h
index a89bdde..139093f 100644
--- a/arch/frv/include/asm/timex.h
+++ b/arch/frv/include/asm/timex.h
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
 #define vxtime_lock()		do {} while (0)
 #define vxtime_unlock()		do {} while (0)
 
+/* This attribute is used in include/linux/jiffies.h alongside with
+ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
+ * for frv does not contain another section specification.
+ */
+#define __jiffy_arch_data	__attribute__((__section__(".data")))
+
 #endif
 
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 5a4c92a..a957b37 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -198,15 +198,6 @@ unsigned long get_wchan(struct task_struct *p)
 	return 0;
 }
 
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	/* Check whether the thread is blocked in resume() */
-	if (in_sched_functions(tsk->thread.pc))
-		return ((unsigned long *)tsk->thread.fp)[2];
-	else
-		return tsk->thread.pc;
-}
-
 int elf_check_arch(const struct elf32_hdr *hdr)
 {
 	unsigned long hsr0 = __get_HSR(0);
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index da82c25..46aa289 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(current->mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			goto success;
 	}
 
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 3ae8525..6e3d36f 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -15,7 +15,7 @@
 	select OF_IRQ
 	select OF_EARLY_FLATTREE
 	select HAVE_MEMBLOCK
-	select CLKSRC_OF
+	select TIMER_OF
 	select H8300_TMR8
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_LZO
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 757cdeb..99c8246 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -54,7 +54,6 @@
 generic-y += setup.h
 generic-y += shmbuf.h
 generic-y += shmparam.h
-generic-y += siginfo.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
index 65132d7..afa5314 100644
--- a/arch/h8300/include/asm/processor.h
+++ b/arch/h8300/include/asm/processor.h
@@ -110,10 +110,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk);
 unsigned long get_wchan(struct task_struct *p);
 
 #define	KSTK_EIP(tsk)	\
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index b15bf6b..b55fc2a 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += siginfo.h
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 0f5db5b..d1ddcab 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -129,11 +129,6 @@ int copy_thread(unsigned long clone_flags,
 	return 0;
 }
 
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	return ((struct pt_regs *)tsk->thread.esp0)->pc;
-}
-
 unsigned long get_wchan(struct task_struct *p)
 {
 	unsigned long fp, pc;
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index c8c25a4..6be15d6 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -246,5 +246,5 @@ void __init calibrate_delay(void)
 void __init time_init(void)
 {
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 }
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 6b45ef7..0fc9cb0 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -41,7 +41,6 @@
 generic-y += serial.h
 generic-y += shmbuf.h
 generic-y += shmparam.h
-generic-y += siginfo.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
index 45a8254..ce67940 100644
--- a/arch/hexagon/include/asm/processor.h
+++ b/arch/hexagon/include/asm/processor.h
@@ -33,9 +33,6 @@
 /*  task_struct, defined elsewhere, is the "process descriptor" */
 struct task_struct;
 
-/*  this is defined in arch/process.c  */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
 
 /*
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index b15bf6b..b55fc2a 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += siginfo.h
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index de715ba..656050c 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -61,14 +61,6 @@ void arch_cpu_idle(void)
 }
 
 /*
- *  Return saved PC of a blocked thread
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	return 0;
-}
-
-/*
  * Copy architecture-specific thread state
  */
 int copy_thread(unsigned long clone_flags, unsigned long usp,
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
index ec90afd..c599eb1 100644
--- a/arch/hexagon/mm/uaccess.c
+++ b/arch/hexagon/mm/uaccess.c
@@ -37,15 +37,14 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
 	long uncleared;
 
 	while (count > PAGE_SIZE) {
-		uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
-						PAGE_SIZE);
+		uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
 		if (uncleared)
 			return count - (PAGE_SIZE - uncleared);
 		count -= PAGE_SIZE;
 		dest += PAGE_SIZE;
 	}
 	if (count)
-		count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
+		count = raw_copy_to_user(dest, &empty_zero_page, count);
 
 	return count;
 }
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index de8cba1..70d52e9 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -387,19 +387,6 @@ static int activate(struct tty_port *port, struct tty_struct *tty)
 	}
 
 	state->xmit.head = state->xmit.tail = 0;
-
-	/*
-	 * Set up the tty->alt_speed kludge
-	 */
-	if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
-		tty->alt_speed = 57600;
-	if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
-		tty->alt_speed = 115200;
-	if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
-		tty->alt_speed = 230400;
-	if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
-		tty->alt_speed = 460800;
-
 errout:
 	local_irq_restore(flags);
 	return retval;
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 5de673a..a2540e2 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -117,7 +117,7 @@ extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
  * following the barrier will arrive after all previous writes.  For most
  * ia64 platforms, this is a simple 'mf.a' instruction.
  *
- * See Documentation/DocBook/deviceiobook.tmpl for more information.
+ * See Documentation/driver-api/device-io.rst for more information.
  */
 static inline void ___ia64_mmiowb(void)
 {
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 26a63d6..ab982f0 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -602,23 +602,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
 }
 
 /*
- * Return saved PC of a blocked thread.
- * Note that the only way T can block is through a call to schedule() -> switch_to().
- */
-static inline unsigned long
-thread_saved_pc (struct task_struct *t)
-{
-	struct unw_frame_info info;
-	unsigned long ip;
-
-	unw_init_from_blocked_task(&info, t);
-	if (unw_unwind(&info) < 0)
-		return 0;
-	unw_get_ip(&info, &ip);
-	return ip;
-}
-
-/*
  * Get the current instruction/program counter value.
  */
 #define current_text_addr() \
diff --git a/arch/ia64/include/asm/siginfo.h b/arch/ia64/include/asm/siginfo.h
deleted file mode 100644
index 6f2e2dd..0000000
--- a/arch/ia64/include/asm/siginfo.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Based on <asm-i386/siginfo.h>.
- *
- * Modified 1998-2002
- *	David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-#ifndef _ASM_IA64_SIGINFO_H
-#define _ASM_IA64_SIGINFO_H
-
-#include <linux/string.h>
-#include <uapi/asm/siginfo.h>
-
-static inline void
-copy_siginfo (siginfo_t *to, siginfo_t *from)
-{
-	if (from->si_code < 0)
-		memcpy(to, from, sizeof(siginfo_t));
-	else
-		/* _sigchld is currently the largest know union member */
-		memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld));
-}
-
-#endif /* _ASM_IA64_SIGINFO_H */
diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h
index f72bf01..4694c64 100644
--- a/arch/ia64/include/uapi/asm/siginfo.h
+++ b/arch/ia64/include/uapi/asm/siginfo.h
@@ -11,7 +11,6 @@
 #define __ARCH_SI_PREAMBLE_SIZE	(4 * sizeof(int))
 
 #define HAVE_ARCH_SIGINFO_T
-#define HAVE_ARCH_COPY_SIGINFO
 #define HAVE_ARCH_COPY_SIGINFO_TO_USER
 
 #include <asm-generic/siginfo.h>
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index c77ebdf..2b22a716 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(sn_io_addr);
 /**
  * __sn_mmiowb - I/O space memory barrier
  *
- * See arch/ia64/include/asm/io.h and Documentation/DocBook/deviceiobook.tmpl
+ * See arch/ia64/include/asm/io.h and Documentation/driver-api/device-io.rst
  * for details.
  *
  * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h
index 5767367..657874e 100644
--- a/arch/m32r/include/asm/processor.h
+++ b/arch/m32r/include/asm/processor.h
@@ -122,8 +122,6 @@ extern void release_thread(struct task_struct *);
 extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
 extern void release_segments(struct mm_struct * mm);
 
-extern unsigned long thread_saved_pc(struct task_struct *);
-
 /* Copy and release all segment info associated with a VM */
 #define copy_segments(p, mm)  do { } while (0)
 #define release_segments(mm)  do { } while (0)
diff --git a/arch/m32r/include/uapi/asm/Kbuild b/arch/m32r/include/uapi/asm/Kbuild
index b15bf6b..c94ee54 100644
--- a/arch/m32r/include/uapi/asm/Kbuild
+++ b/arch/m32r/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y	+= siginfo.h
diff --git a/arch/m32r/include/uapi/asm/siginfo.h b/arch/m32r/include/uapi/asm/siginfo.h
deleted file mode 100644
index 7d9cd9e..0000000
--- a/arch/m32r/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M32R_SIGINFO_H
-#define _M32R_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif /* _M32R_SIGINFO_H */
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index d8ffcfe..8cd7e03 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -39,14 +39,6 @@
 
 #include <linux/err.h>
 
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	return tsk->thread.lr;
-}
-
 void (*pm_power_off)(void) = NULL;
 EXPORT_SYMBOL(pm_power_off);
 
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 531cb9e..ddff116 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -26,6 +26,8 @@
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -349,7 +351,6 @@
 CONFIG_SCSI_ZORRO7XX=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -361,6 +362,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -414,6 +416,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -572,6 +575,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -590,6 +595,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index ca91d39..17384dc 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -27,6 +27,8 @@
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -331,7 +333,6 @@
 CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -343,6 +344,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -388,6 +390,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -531,6 +534,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -549,6 +554,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 23a3d8a..53a641d 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -26,6 +26,8 @@
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -340,7 +342,6 @@
 CONFIG_ATARI_SCSI=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -352,6 +353,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -399,6 +401,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -552,6 +555,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -570,6 +575,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 95deb95..3925ae3 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -26,6 +26,8 @@
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
@@ -330,7 +332,6 @@
 CONFIG_BVME6000_SCSI=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -342,6 +343,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -387,6 +389,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -523,6 +526,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -541,6 +546,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index afae695..f4a134b 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -27,6 +27,8 @@
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -331,7 +333,6 @@
 CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -343,6 +344,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -389,6 +391,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -533,6 +536,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -551,6 +556,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b010734..9ed0cef 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -26,6 +26,8 @@
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -340,7 +342,6 @@
 CONFIG_SCSI_MAC_ESP=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -352,6 +353,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -408,6 +410,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -555,6 +558,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -573,6 +578,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 0e41454..efed0d48 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -22,6 +22,8 @@
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
@@ -373,7 +375,6 @@
 CONFIG_SUN3X_ESP=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -385,6 +386,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -454,6 +456,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PLIP=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -635,6 +638,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -653,6 +658,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index b2e687a..9040457 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -26,6 +26,8 @@
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68030=y
@@ -329,7 +331,6 @@
 CONFIG_MVME147_SCSI=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -341,6 +342,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -387,6 +389,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -523,6 +526,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -541,6 +546,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index cbd8ee2..8b17f00 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -26,6 +26,8 @@
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
@@ -330,7 +332,6 @@
 CONFIG_MVME16x_SCSI=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -342,6 +343,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -387,6 +389,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -523,6 +526,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -541,6 +546,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 1e82cc9..5f3718c 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -27,6 +27,8 @@
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
@@ -336,7 +338,6 @@
 CONFIG_ISCSI_BOOT_SYSFS=m
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -348,6 +349,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -398,6 +400,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PLIP=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -546,6 +549,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -564,6 +569,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index f9e77f5..8c979a6 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -26,6 +26,8 @@
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3=y
@@ -327,7 +329,6 @@
 CONFIG_SUN3_SCSI=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -339,6 +340,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -385,6 +387,7 @@
 # CONFIG_NET_VENDOR_SUN is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -525,6 +528,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -542,6 +547,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 3c394fc..a1e7953 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -26,6 +26,8 @@
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
 CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
 CONFIG_KEXEC=y
 CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3X=y
@@ -327,7 +329,6 @@
 CONFIG_SUN3X_ESP=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -339,6 +340,7 @@
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_TARGET_CORE=m
 CONFIG_TCM_IBLOCK=m
 CONFIG_TCM_FILEIO=m
@@ -385,6 +387,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -525,6 +528,8 @@
 # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
 CONFIG_ATOMIC64_SELFTEST=m
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_HEXDUMP=m
@@ -543,6 +548,7 @@
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 82005d2..5ecf4e4 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -25,7 +25,6 @@
 generic-y += resource.h
 generic-y += sections.h
 generic-y += shmparam.h
-generic-y += siginfo.h
 generic-y += spinlock.h
 generic-y += statfs.h
 generic-y += termios.h
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 77239e8..94c3603 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -130,8 +130,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define	KSTK_EIP(tsk)	\
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
index 8c8ce5e..3bc64d0 100644
--- a/arch/m68k/include/asm/signal.h
+++ b/arch/m68k/include/asm/signal.h
@@ -62,9 +62,4 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
 
 #endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */
 
-#ifndef __uClinux__
-extern void ptrace_signal_deliver(void);
-#define ptrace_signal_deliver ptrace_signal_deliver
-#endif /* __uClinux__ */
-
 #endif /* _M68K_SIGNAL_H */
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 6436807..68b45cc 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -5,6 +5,7 @@
 generic-y += msgbuf.h
 generic-y += sembuf.h
 generic-y += shmbuf.h
+generic-y += siginfo.h
 generic-y += socket.h
 generic-y += sockios.h
 generic-y += termbits.h
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index e475c94..7df92f8 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -40,20 +40,6 @@
 asmlinkage void ret_from_fork(void);
 asmlinkage void ret_from_kernel_thread(void);
 
-
-/*
- * Return saved PC from a blocked thread
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
-	/* Check whether the thread is blocked in resume() */
-	if (in_sched_functions(sw->retpc))
-		return ((unsigned long *)sw->a6)[1];
-	else
-		return sw->retpc;
-}
-
 void arch_cpu_idle(void)
 {
 #if defined(MACH_ATARI_ONLY)
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 6f945bb..e79421f 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -109,22 +109,6 @@ int fixup_exception(struct pt_regs *regs)
 	return 1;
 }
 
-void ptrace_signal_deliver(void)
-{
-	struct pt_regs *regs = signal_pt_regs();
-	if (regs->orig_d0 < 0)
-		return;
-	switch (regs->d0) {
-	case -ERESTARTNOHAND:
-	case -ERESTARTSYS:
-	case -ERESTARTNOINTR:
-		regs->d0 = regs->orig_d0;
-		regs->orig_d0 = -1;
-		regs->pc -= 2;
-		break;
-	}
-}
-
 static inline void push_cache (unsigned long vaddr)
 {
 	/*
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index 232a12b..2dbbb7c 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -567,8 +567,7 @@ static void stop_this_cpu(void *data)
 {
 	unsigned int cpu = smp_processor_id();
 
-	if (system_state == SYSTEM_BOOTING ||
-	    system_state == SYSTEM_RUNNING) {
+	if (system_state <= SYSTEM_RUNNING) {
 		spin_lock(&stop_lock);
 		pr_crit("CPU%u: stopping\n", cpu);
 		dump_stack();
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 85885a5..8e47121 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -4,7 +4,7 @@
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select BUILDTIME_EXTABLE_SORT
-	select CLKSRC_OF
+	select TIMER_OF
 	select CLONE_BACKWARDS3
 	select COMMON_CLK
 	select GENERIC_ATOMIC64
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index dc5dd5b..92fd4e95 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -1,8 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_SYSFS_DEPRECATED=y
@@ -33,10 +31,12 @@
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
+CONFIG_BRIDGE=m
 CONFIG_MTD=y
-CONFIG_PROC_DEVICETREE=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_NETDEVICES=y
@@ -47,9 +47,9 @@
 # CONFIG_VT is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_UARTLITE=y
 CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 CONFIG_XILINX_HWICAP=y
 CONFIG_I2C=y
@@ -66,7 +66,6 @@
 CONFIG_FB_XILINX=y
 # CONFIG_USB_SUPPORT is not set
 CONFIG_UIO=y
-CONFIG_UIO_PDRV=y
 CONFIG_UIO_PDRV_GENIRQ=y
 CONFIG_UIO_DMEM_GENIRQ=y
 CONFIG_EXT2_FS=y
@@ -77,14 +76,13 @@
 CONFIG_CIFS=y
 CONFIG_CIFS_STATS=y
 CONFIG_CIFS_STATS2=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_SPINLOCK=y
 CONFIG_KGDB=y
 CONFIG_KGDB_TESTS=y
 CONFIG_KGDB_KDB=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index 4cdaf56..06d69a6 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -1,9 +1,6 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_IKCONFIG=y
@@ -34,18 +31,15 @@
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_RAM=y
 CONFIG_MTD_UCLINUX=y
-CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_NETDEVICES=y
@@ -56,9 +50,9 @@
 # CONFIG_VT is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_UARTLITE=y
 CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 CONFIG_XILINX_HWICAP=y
 CONFIG_I2C=y
@@ -74,10 +68,6 @@
 CONFIG_FB=y
 CONFIG_FB_XILINX=y
 # CONFIG_USB_SUPPORT is not set
-CONFIG_UIO=y
-CONFIG_UIO_PDRV=y
-CONFIG_UIO_PDRV_GENIRQ=y
-CONFIG_UIO_DMEM_GENIRQ=y
 CONFIG_EXT2_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_CRAMFS=y
@@ -85,10 +75,10 @@
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NLS=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_SPINLOCK=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=y
@@ -97,4 +87,3 @@
 CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_ARC4=y
 CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 56830ff..83a4ef3 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,14 +1,57 @@
 
 generic-y += barrier.h
+generic-y += bitops.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
 generic-y += clkdev.h
 generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
 generic-y += exec.h
 generic-y += extable.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += hardirq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
 generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kprobes.h
+generic-y += linkage.h
+generic-y += local.h
+generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += poll.h
 generic-y += preempt.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
 generic-y += syscalls.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += ucontext.h
+generic-y += vga.h
 generic-y += word-at-a-time.h
-generic-y += kprobes.h
+generic-y += xor.h
diff --git a/arch/microblaze/include/asm/bitops.h b/arch/microblaze/include/asm/bitops.h
deleted file mode 100644
index a72468f..0000000
--- a/arch/microblaze/include/asm/bitops.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitops.h>
diff --git a/arch/microblaze/include/asm/bug.h b/arch/microblaze/include/asm/bug.h
deleted file mode 100644
index b12fd89..0000000
--- a/arch/microblaze/include/asm/bug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bug.h>
diff --git a/arch/microblaze/include/asm/bugs.h b/arch/microblaze/include/asm/bugs.h
deleted file mode 100644
index 61791e1..0000000
--- a/arch/microblaze/include/asm/bugs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bugs.h>
diff --git a/arch/microblaze/include/asm/div64.h b/arch/microblaze/include/asm/div64.h
deleted file mode 100644
index 6cd978c..0000000
--- a/arch/microblaze/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/microblaze/include/asm/emergency-restart.h b/arch/microblaze/include/asm/emergency-restart.h
deleted file mode 100644
index 3711bd9..0000000
--- a/arch/microblaze/include/asm/emergency-restart.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/emergency-restart.h>
diff --git a/arch/microblaze/include/asm/fb.h b/arch/microblaze/include/asm/fb.h
deleted file mode 100644
index 3a4988e..0000000
--- a/arch/microblaze/include/asm/fb.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fb.h>
diff --git a/arch/microblaze/include/asm/hardirq.h b/arch/microblaze/include/asm/hardirq.h
deleted file mode 100644
index fb3c05a..0000000
--- a/arch/microblaze/include/asm/hardirq.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/hardirq.h>
diff --git a/arch/microblaze/include/asm/irq_regs.h b/arch/microblaze/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b..0000000
--- a/arch/microblaze/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/microblaze/include/asm/kdebug.h b/arch/microblaze/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b0..0000000
--- a/arch/microblaze/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/microblaze/include/asm/kmap_types.h b/arch/microblaze/include/asm/kmap_types.h
deleted file mode 100644
index 2597525..0000000
--- a/arch/microblaze/include/asm/kmap_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_MICROBLAZE_KMAP_TYPES_H
-#define _ASM_MICROBLAZE_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif /* _ASM_MICROBLAZE_KMAP_TYPES_H */
diff --git a/arch/microblaze/include/asm/linkage.h b/arch/microblaze/include/asm/linkage.h
deleted file mode 100644
index 0540bba..0000000
--- a/arch/microblaze/include/asm/linkage.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/linkage.h>
diff --git a/arch/microblaze/include/asm/local.h b/arch/microblaze/include/asm/local.h
deleted file mode 100644
index c11c530..0000000
--- a/arch/microblaze/include/asm/local.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local.h>
diff --git a/arch/microblaze/include/asm/local64.h b/arch/microblaze/include/asm/local64.h
deleted file mode 100644
index 36c93b5..0000000
--- a/arch/microblaze/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/microblaze/include/asm/parport.h b/arch/microblaze/include/asm/parport.h
deleted file mode 100644
index cf252af..0000000
--- a/arch/microblaze/include/asm/parport.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/parport.h>
diff --git a/arch/microblaze/include/asm/percpu.h b/arch/microblaze/include/asm/percpu.h
deleted file mode 100644
index 06a959d..0000000
--- a/arch/microblaze/include/asm/percpu.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/percpu.h>
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 37ef196..330d556 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -69,8 +69,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
 extern unsigned long get_wchan(struct task_struct *p);
 
 # define KSTK_EIP(tsk)	(0)
@@ -121,10 +119,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-/* Return saved (kernel) PC of a blocked thread.  */
-#  define thread_saved_pc(tsk)	\
-	((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
-
 unsigned long get_wchan(struct task_struct *p);
 
 /* The size allocated for kernel stacks. This _must_ be a power of two! */
diff --git a/arch/microblaze/include/asm/serial.h b/arch/microblaze/include/asm/serial.h
deleted file mode 100644
index a0cb0caf..0000000
--- a/arch/microblaze/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/microblaze/include/asm/shmparam.h b/arch/microblaze/include/asm/shmparam.h
deleted file mode 100644
index 93f30de..0000000
--- a/arch/microblaze/include/asm/shmparam.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmparam.h>
diff --git a/arch/microblaze/include/asm/topology.h b/arch/microblaze/include/asm/topology.h
deleted file mode 100644
index 5428f33..0000000
--- a/arch/microblaze/include/asm/topology.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/topology.h>
diff --git a/arch/microblaze/include/asm/ucontext.h b/arch/microblaze/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9..0000000
--- a/arch/microblaze/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 032fed7..9774e1d 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         398
+#define __NR_syscalls         399
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/asm/vga.h b/arch/microblaze/include/asm/vga.h
deleted file mode 100644
index 89d82fd..0000000
--- a/arch/microblaze/include/asm/vga.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/vga.h>
diff --git a/arch/microblaze/include/asm/xor.h b/arch/microblaze/include/asm/xor.h
deleted file mode 100644
index c82eb12..0000000
--- a/arch/microblaze/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 2178c78..cb6784f 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -2,3 +2,4 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += types.h
+generic-y += siginfo.h
diff --git a/arch/microblaze/include/uapi/asm/bitsperlong.h b/arch/microblaze/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0..0000000
--- a/arch/microblaze/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/microblaze/include/uapi/asm/errno.h b/arch/microblaze/include/uapi/asm/errno.h
deleted file mode 100644
index 4c82b50..0000000
--- a/arch/microblaze/include/uapi/asm/errno.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/errno.h>
diff --git a/arch/microblaze/include/uapi/asm/fcntl.h b/arch/microblaze/include/uapi/asm/fcntl.h
deleted file mode 100644
index 46ab12d..0000000
--- a/arch/microblaze/include/uapi/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
diff --git a/arch/microblaze/include/uapi/asm/ioctl.h b/arch/microblaze/include/uapi/asm/ioctl.h
deleted file mode 100644
index b279fe0..0000000
--- a/arch/microblaze/include/uapi/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/microblaze/include/uapi/asm/ioctls.h b/arch/microblaze/include/uapi/asm/ioctls.h
deleted file mode 100644
index ec34c76..0000000
--- a/arch/microblaze/include/uapi/asm/ioctls.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctls.h>
diff --git a/arch/microblaze/include/uapi/asm/ipcbuf.h b/arch/microblaze/include/uapi/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51..0000000
--- a/arch/microblaze/include/uapi/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/microblaze/include/uapi/asm/kvm_para.h b/arch/microblaze/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f..0000000
--- a/arch/microblaze/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/microblaze/include/uapi/asm/mman.h b/arch/microblaze/include/uapi/asm/mman.h
deleted file mode 100644
index 8eebf89..0000000
--- a/arch/microblaze/include/uapi/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/microblaze/include/uapi/asm/msgbuf.h b/arch/microblaze/include/uapi/asm/msgbuf.h
deleted file mode 100644
index 809134c..0000000
--- a/arch/microblaze/include/uapi/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/microblaze/include/uapi/asm/param.h b/arch/microblaze/include/uapi/asm/param.h
deleted file mode 100644
index 965d454..0000000
--- a/arch/microblaze/include/uapi/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/microblaze/include/uapi/asm/poll.h b/arch/microblaze/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d..0000000
--- a/arch/microblaze/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/microblaze/include/uapi/asm/resource.h b/arch/microblaze/include/uapi/asm/resource.h
deleted file mode 100644
index 04bc4db..0000000
--- a/arch/microblaze/include/uapi/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/resource.h>
diff --git a/arch/microblaze/include/uapi/asm/sembuf.h b/arch/microblaze/include/uapi/asm/sembuf.h
deleted file mode 100644
index 7673b83..0000000
--- a/arch/microblaze/include/uapi/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sembuf.h>
diff --git a/arch/microblaze/include/uapi/asm/shmbuf.h b/arch/microblaze/include/uapi/asm/shmbuf.h
deleted file mode 100644
index 83c05fc..0000000
--- a/arch/microblaze/include/uapi/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/microblaze/include/uapi/asm/siginfo.h b/arch/microblaze/include/uapi/asm/siginfo.h
deleted file mode 100644
index 0815d29..0000000
--- a/arch/microblaze/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/siginfo.h>
diff --git a/arch/microblaze/include/uapi/asm/signal.h b/arch/microblaze/include/uapi/asm/signal.h
deleted file mode 100644
index 7b1573c..0000000
--- a/arch/microblaze/include/uapi/asm/signal.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/signal.h>
diff --git a/arch/microblaze/include/uapi/asm/socket.h b/arch/microblaze/include/uapi/asm/socket.h
deleted file mode 100644
index 6b71384..0000000
--- a/arch/microblaze/include/uapi/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/microblaze/include/uapi/asm/sockios.h b/arch/microblaze/include/uapi/asm/sockios.h
deleted file mode 100644
index def6d47..0000000
--- a/arch/microblaze/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sockios.h>
diff --git a/arch/microblaze/include/uapi/asm/stat.h b/arch/microblaze/include/uapi/asm/stat.h
deleted file mode 100644
index 3dc90fa..0000000
--- a/arch/microblaze/include/uapi/asm/stat.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/stat.h>
diff --git a/arch/microblaze/include/uapi/asm/statfs.h b/arch/microblaze/include/uapi/asm/statfs.h
deleted file mode 100644
index 0b91fe1..0000000
--- a/arch/microblaze/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/statfs.h>
diff --git a/arch/microblaze/include/uapi/asm/swab.h b/arch/microblaze/include/uapi/asm/swab.h
deleted file mode 100644
index 7847e56..0000000
--- a/arch/microblaze/include/uapi/asm/swab.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/swab.h>
diff --git a/arch/microblaze/include/uapi/asm/termbits.h b/arch/microblaze/include/uapi/asm/termbits.h
deleted file mode 100644
index 3935b10..0000000
--- a/arch/microblaze/include/uapi/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/microblaze/include/uapi/asm/termios.h b/arch/microblaze/include/uapi/asm/termios.h
deleted file mode 100644
index 280d78a..0000000
--- a/arch/microblaze/include/uapi/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index d808615..a88b3c1 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -413,5 +413,6 @@
 #define __NR_pkey_mprotect	395
 #define __NR_pkey_alloc		396
 #define __NR_pkey_free		397
+#define __NR_statx		398
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 12e093a..e45ada8 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -65,8 +65,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
 			continue;
 
-		__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
-							sg->length, direction);
+		__dma_sync(sg_phys(sg), sg->length, direction);
 	}
 
 	return nents;
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index ef54851..4e1b567 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -208,9 +208,7 @@
 	mfs	r11, rmsr;		/* save MSR */			\
 	swi	r11, r1, PT_MSR;
 
-#define RESTORE_REGS \
-	lwi	r11, r1, PT_MSR;					\
-	mts	rmsr , r11;						\
+#define RESTORE_REGS_GP \
 	lwi	r2, r1, PT_R2;	/* restore SDA */		\
 	lwi	r3, r1, PT_R3;					\
 	lwi	r4, r1, PT_R4;					\
@@ -242,6 +240,18 @@
 	lwi	r30, r1, PT_R30;					\
 	lwi	r31, r1, PT_R31;	/* Restore cur task reg */
 
+#define RESTORE_REGS \
+	lwi	r11, r1, PT_MSR;					\
+	mts	rmsr , r11;						\
+	RESTORE_REGS_GP
+
+#define RESTORE_REGS_RTBD \
+	lwi	r11, r1, PT_MSR;					\
+	andni	r11, r11, MSR_EIP;          /* clear EIP */             \
+	ori	r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */        \
+	mts	rmsr , r11;						\
+	RESTORE_REGS_GP
+
 #define SAVE_STATE	\
 	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */	\
 	/* See if already in kernel mode.*/				\
@@ -427,7 +437,7 @@
 	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
 	VM_OFF;
 	tophys(r1,r1);
-	RESTORE_REGS;
+	RESTORE_REGS_RTBD;
 	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
 	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
 	bri	6f;
@@ -436,7 +446,7 @@
 2:	set_bip;			/*  Ints masked for state restore */
 	VM_OFF;
 	tophys(r1,r1);
-	RESTORE_REGS;
+	RESTORE_REGS_RTBD;
 	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
 	tovirt(r1,r1);
 6:
@@ -612,7 +622,7 @@
 	VM_OFF;
 	tophys(r1,r1);
 
-	RESTORE_REGS;
+	RESTORE_REGS_RTBD;
 	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
 
 	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
@@ -621,7 +631,7 @@
 2:	set_bip;			/* Ints masked for state restore */
 	VM_OFF;
 	tophys(r1,r1);
-	RESTORE_REGS;
+	RESTORE_REGS_RTBD;
 	addik	r1, r1, PT_SIZE		/* Clean up stack space.  */
 
 	tovirt(r1,r1);
@@ -847,7 +857,7 @@
 	VM_OFF;
 	tophys(r1,r1);
 	/* MS: Restore all regs */
-	RESTORE_REGS
+	RESTORE_REGS_RTBD
 	addik	r1, r1, PT_SIZE	 /* Clean up stack space */
 	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
 DBTRAP_return_user: /* MS: Make global symbol for debugging */
@@ -858,7 +868,7 @@
 2:	VM_OFF;
 	tophys(r1,r1);
 	/* MS: Restore all regs */
-	RESTORE_REGS
+	RESTORE_REGS_RTBD
 	lwi	r14, r1, PT_R14;
 	lwi	r16, r1, PT_PC;
 	addik	r1, r1, PT_SIZE; /* MS: Clean up stack space */
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index e92a817..6527ec2 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -119,23 +119,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 	return 0;
 }
 
-#ifndef CONFIG_MMU
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	struct cpu_context *ctx =
-		&(((struct thread_info *)(tsk->stack))->cpu_context);
-
-	/* Check whether the thread is blocked in resume() */
-	if (in_sched_functions(ctx->r15))
-		return (unsigned long)ctx->r15;
-	else
-		return ctx->r14;
-}
-#endif
-
 unsigned long get_wchan(struct task_struct *p)
 {
 /* TBD (used by procfs) */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index f31ebb5..be98ffe 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -192,7 +192,7 @@ void __init time_init(void)
 {
 	of_clk_init(NULL);
 	setup_cpuinfo_clk();
-	clocksource_probe();
+	timer_probe();
 }
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 6841c2d..c48ff4a 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -398,3 +398,4 @@
 	.long sys_pkey_mprotect		/* 395 */
 	.long sys_pkey_alloc
 	.long sys_pkey_free
+	.long sys_statx
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 9990661..ea2d83f 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -178,8 +178,10 @@ static __init int xilinx_clockevent_init(void)
 				clockevent_xilinx_timer.shift);
 	clockevent_xilinx_timer.max_delta_ns =
 		clockevent_delta2ns((u32)~0, &clockevent_xilinx_timer);
+	clockevent_xilinx_timer.max_delta_ticks = (u32)~0;
 	clockevent_xilinx_timer.min_delta_ns =
 		clockevent_delta2ns(1, &clockevent_xilinx_timer);
+	clockevent_xilinx_timer.min_delta_ticks = 1;
 	clockevent_xilinx_timer.cpumask = cpumask_of(0);
 	clockevents_register_device(&clockevent_xilinx_timer);
 
@@ -333,5 +335,5 @@ static int __init xilinx_timer_init(struct device_node *timer)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
+TIMER_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
 		       xilinx_timer_init);
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c
index 2fcc5a5..ed4454c 100644
--- a/arch/microblaze/mm/highmem.c
+++ b/arch/microblaze/mm/highmem.c
@@ -60,6 +60,7 @@ void __kunmap_atomic(void *kvaddr)
 {
 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 	int type;
+	unsigned int idx;
 
 	if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
 		pagefault_enable();
@@ -68,21 +69,18 @@ void __kunmap_atomic(void *kvaddr)
 	}
 
 	type = kmap_atomic_idx();
+
+	idx = type + KM_TYPE_NR * smp_processor_id();
 #ifdef CONFIG_DEBUG_HIGHMEM
-	{
-		unsigned int idx;
-
-		idx = type + KM_TYPE_NR * smp_processor_id();
-		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
-		/*
-		 * force other mappings to Oops if they'll try to access
-		 * this pte without first remap it
-		 */
-		pte_clear(&init_mm, vaddr, kmap_pte-idx);
-		local_flush_tlb_page(NULL, vaddr);
-	}
+	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 #endif
+	/*
+	 * force other mappings to Oops if they'll try to access
+	 * this pte without first remap it
+	 */
+	pte_clear(&init_mm, vaddr, kmap_pte-idx);
+	local_flush_tlb_page(NULL, vaddr);
+
 	kmap_atomic_idx_pop();
 	pagefault_enable();
 	preempt_enable();
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 2728a9a..145b5ce 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -128,19 +128,19 @@
 			-DADDR_BITS=$(ADDR_BITS) \
 			-DADDR_CELLS=$(itb_addr_cells)
 
-$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
 	$(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
 
-$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
 	$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
 
-$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX)  FORCE
 	$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
 
-$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
 	$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
 
-$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
 	$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
 
 quiet_cmd_itb-image = ITB     $@
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 4af6192..1231b5a 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -161,7 +161,7 @@ void __init plat_time_init(void)
 		}
 	}
 
-	clocksource_probe();
+	timer_probe();
 }
 
 void __init arch_init_irq(void)
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index d34536e..279b6d1 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table;
  * easily, subsequent pte tables have to be allocated in one physical
  * chunk of RAM.
  */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define LAST_PKMAP 512
+#else
 #define LAST_PKMAP 1024
+#endif
+
 #define LAST_PKMAP_MASK (LAST_PKMAP-1)
 #define PKMAP_NR(virt)	((virt-PKMAP_BASE) >> PAGE_SHIFT)
 #define PKMAP_ADDR(nr)	(PKMAP_BASE + ((nr) << PAGE_SHIFT))
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index 291846d..ad1a999 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t;
 
 #define flush_insn_slot(p)						\
 do {									\
-	flush_icache_range((unsigned long)p->addr,			\
+	if (p->addr)							\
+		flush_icache_range((unsigned long)p->addr,		\
 			   (unsigned long)p->addr +			\
 			   (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)));	\
 } while (0)
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 6f94bed..74afe8c 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -19,6 +19,10 @@
 #define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+#endif
+
 extern int temp_tlb_entry;
 
 /*
@@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 
 #define VMALLOC_START	  MAP_BASE
 
-#define PKMAP_BASE		(0xfe000000UL)
+#define PKMAP_END	((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
+#define PKMAP_BASE	(PKMAP_END - PAGE_SIZE * LAST_PKMAP)
 
 #ifdef CONFIG_HIGHMEM
 # define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
diff --git a/arch/mips/include/uapi/asm/ioctls.h b/arch/mips/include/uapi/asm/ioctls.h
index 740219c..68e19b6 100644
--- a/arch/mips/include/uapi/asm/ioctls.h
+++ b/arch/mips/include/uapi/asm/ioctls.h
@@ -91,6 +91,7 @@
 #define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
 #define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER	_IOR('T', 0x41, int) /* Safely open the slave */
 
 /* I hope the range from 0x5480 on is free ... */
 #define TIOCSCTTY	0x5480		/* become controlling tty */
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index b11facd..f702a45 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 			break;
 		}
 		/* Compact branch: BNEZC || JIALC */
-		if (insn.i_format.rs)
+		if (!insn.i_format.rs) {
+			/* JIALC: set $31/ra */
 			regs->regs[31] = epc + 4;
+		}
 		regs->cp0_epc += 8;
 		break;
 #endif
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 8d83fc2..38a3029 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -11,6 +11,7 @@
 #include <asm/asm.h>
 #include <asm/asmmacro.h>
 #include <asm/compiler.h>
+#include <asm/irqflags.h>
 #include <asm/regdef.h>
 #include <asm/mipsregs.h>
 #include <asm/stackframe.h>
@@ -119,6 +120,7 @@
 	andi	t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
 	beqz	t0, work_notifysig
 work_resched:
+	TRACE_IRQS_OFF
 	jal	schedule
 
 	local_irq_disable		# make sure need_resched and
@@ -155,6 +157,7 @@
 	beqz	t0, work_pending	# trace bit set?
 	local_irq_enable		# could let syscall_trace_leave()
 					# call schedule() instead
+	TRACE_IRQS_ON
 	move	a0, sp
 	jal	syscall_trace_leave
 	b	resume_userspace
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 30a3b75..9d9b8fba 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command)
 
 #endif
 
-/*
- * Check if the address is in kernel space
- *
- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
- */
-static inline int in_kernel_space(unsigned long ip)
-{
-	if (ip >= (unsigned long)_stext &&
-	    ip <= (unsigned long)_etext)
-		return 1;
-	return 0;
-}
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 #define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
@@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod,
 	 * If ip is in kernel space, no long call, otherwise, long call is
 	 * needed.
 	 */
-	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+	new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
 #ifdef CONFIG_64BIT
 	return ftrace_modify_code(ip, new);
 #else
@@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 	unsigned int new;
 	unsigned long ip = rec->ip;
 
-	new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
+	new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
 
 #ifdef CONFIG_64BIT
 	return ftrace_modify_code(ip, new);
 #else
-	return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
+	return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
 						INSN_NOP : insn_la_mcount[1]);
 #endif
 }
@@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
 	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
 	 * kernel, move after the instruction "move ra, at"(offset is 16)
 	 */
-	ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
+	ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
 
 	/*
 	 * search the text until finding the non-store instruction or "s{d,w}
@@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
 	 * entries configured through the tracing/set_graph_function interface.
 	 */
 
-	insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+	insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
 	trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
 
 	/* Only trace if the calling function expects to */
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index cf05220..d1bb506 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -106,8 +106,8 @@
 	beq		t0, t1, dtb_found
 #endif
 	li		t1, -2
-	beq		a0, t1, dtb_found
 	move		t2, a1
+	beq		a0, t1, dtb_found
 
 	li		t2, 0
 dtb_found:
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index 3e586da..32e3168 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct jump_entry *e,
 		insn.word = 0; /* nop */
 	}
 
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
 		insn_p->halfword[0] = insn.word >> 16;
@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct jump_entry *e,
 			   (unsigned long)insn_p + sizeof(*insn_p));
 
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 #endif /* HAVE_JUMP_LABEL */
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 313a88b..f3e301f 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
 		break;
 	case CPU_P5600:
 	case CPU_P6600:
-	case CPU_I6400:
 		/* 8-bit event numbers */
 		raw_id = config & 0x1ff;
 		base_id = raw_id & 0xff;
@@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
 		raw_event.range = P;
 #endif
 		break;
+	case CPU_I6400:
+		/* 8-bit event numbers */
+		base_id = config & 0xff;
+		raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+		break;
 	case CPU_1004K:
 		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5f928c3..d994160 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
  * state. Actually per-core rather than per-CPU.
  */
 static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
-static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
 
 /* Indicates online CPUs coupled with the current CPU */
 static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu)
 {
 	enum cps_pm_state state;
 	unsigned core = cpu_data[cpu].core;
-	unsigned dlinesz = cpu_data[cpu].dcache.linesz;
 	void *entry_fn, *core_rc;
 
 	for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu)
 	}
 
 	if (!per_cpu(ready_count, core)) {
-		core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
+		core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
 		if (!core_rc) {
 			pr_err("Failed allocate core %u ready_count\n", core);
 			return -ENOMEM;
 		}
-		per_cpu(ready_count_alloc, core) = core_rc;
-
-		/* Ensure ready_count is aligned to a cacheline boundary */
-		core_rc += dlinesz - 1;
-		core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
 		per_cpu(ready_count, core) = core_rc;
 	}
 
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 918d4c7..5351e1f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
 	struct thread_info *ti = task_thread_info(p);
 	struct pt_regs *childregs, *regs = current_pt_regs();
 	unsigned long childksp;
-	p->set_child_tid = p->clear_child_tid = NULL;
 
 	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9681b58..38dfa27 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -201,6 +201,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
 {
 	struct pt_regs regs;
 	mm_segment_t old_fs = get_fs();
+
+	regs.cp0_status = KSU_KERNEL;
 	if (sp) {
 		regs.regs[29] = (unsigned long)sp;
 		regs.regs[31] = 0;
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 7c6336d..7cd9216 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
 			  bool user, bool kernel)
 {
-	int idx_user, idx_kernel;
+	/*
+	 * Initialize idx_user and idx_kernel to workaround bogus
+	 * maybe-initialized warning when using GCC 6.
+	 */
+	int idx_user = 0, idx_kernel = 0;
 	unsigned long flags, old_entryhi;
 
 	local_irq_save(flags);
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
index 4a2d03c7..caa62f2 100644
--- a/arch/mips/math-emu/dp_maddf.c
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -54,7 +54,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
 		return ieee754dp_nanxcpt(z);
 	case IEEE754_CLASS_DNORM:
 		DPDNORMZ;
-	/* QNAN is handled separately below */
+	/* QNAN and ZERO cases are handled separately below */
 	}
 
 	switch (CLPAIR(xc, yc)) {
@@ -210,6 +210,9 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
 	}
 	assert(rm & (DP_HIDDEN_BIT << 3));
 
+	if (zc == IEEE754_CLASS_ZERO)
+		return ieee754dp_format(rs, re, rm);
+
 	/* And now the addition */
 	assert(zm & DP_HIDDEN_BIT);
 
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
index a8cd8b4..c91d5e5 100644
--- a/arch/mips/math-emu/sp_maddf.c
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -54,7 +54,7 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
 		return ieee754sp_nanxcpt(z);
 	case IEEE754_CLASS_DNORM:
 		SPDNORMZ;
-	/* QNAN is handled separately below */
+	/* QNAN and ZERO cases are handled separately below */
 	}
 
 	switch (CLPAIR(xc, yc)) {
@@ -203,6 +203,9 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
 	}
 	assert(rm & (SP_HIDDEN_BIT << 3));
 
+	if (zc == IEEE754_CLASS_ZERO)
+		return ieee754sp_format(rs, re, rm);
+
 	/* And now the addition */
 
 	assert(zm & SP_HIDDEN_BIT);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index fe8df14..e08598c 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -68,12 +68,25 @@ static inline struct page *dma_addr_to_page(struct device *dev,
  * systems and only the R10000 and R12000 are used in such systems, the
  * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
  */
-static inline int cpu_needs_post_dma_flush(struct device *dev)
+static inline bool cpu_needs_post_dma_flush(struct device *dev)
 {
-	return !plat_device_is_coherent(dev) &&
-	       (boot_cpu_type() == CPU_R10000 ||
-		boot_cpu_type() == CPU_R12000 ||
-		boot_cpu_type() == CPU_BMIPS5000);
+	if (plat_device_is_coherent(dev))
+		return false;
+
+	switch (boot_cpu_type()) {
+	case CPU_R10000:
+	case CPU_R12000:
+	case CPU_BMIPS5000:
+		return true;
+
+	default:
+		/*
+		 * Presence of MAARs suggests that the CPU supports
+		 * speculatively prefetching data, and therefore requires
+		 * the post-DMA flush/invalidate.
+		 */
+		return cpu_has_maar;
+	}
 }
 
 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 64dd8bd..28adeab 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911..b19a3c5 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@ void __init pagetable_init(void)
 	/*
 	 * Fixed mappings:
 	 */
-	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
-	fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+	fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
 
 #ifdef CONFIG_HIGHMEM
 	/*
 	 * Permanent kmaps:
 	 */
 	vaddr = PKMAP_BASE;
-	fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+	fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
 
 	pgd = swapper_pg_dir + __pgd_offset(vaddr);
 	pud = pud_offset(pgd, vaddr);
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 289edcf..cea4ec9 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -265,7 +265,7 @@ void __init plat_time_init(void)
 		       (freq%1000000)*100/1000000);
 #ifdef CONFIG_CLKSRC_MIPS_GIC
 		update_gic_frequency_dt();
-		clocksource_probe();
+		timer_probe();
 #endif
 	}
 #endif
diff --git a/arch/mips/pic32/pic32mzda/time.c b/arch/mips/pic32/pic32mzda/time.c
index 62a0a78..1894e50 100644
--- a/arch/mips/pic32/pic32mzda/time.c
+++ b/arch/mips/pic32/pic32mzda/time.c
@@ -64,5 +64,5 @@ void __init plat_time_init(void)
 	pr_info("CPU Clock: %ldMHz\n", rate / 1000000);
 	mips_hpt_frequency = rate / 2;
 
-	clocksource_probe();
+	timer_probe();
 }
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 1022201..17a0f1d 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -39,7 +39,7 @@ void __init plat_time_init(void)
 	struct clk *clk;
 
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 
 	np = of_get_cpu_node(0, NULL);
 	if (!np) {
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 9825dee..710b04c 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -4,7 +4,7 @@
 	bool
 	depends on SOC_RT305X || SOC_MT7620
 	default y
-	select CLKSRC_OF
+	select TIMER_OF
 	select CLKSRC_MMIO
 
 config RALINK_ILL_ACC
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index b8a1376..92f284d 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -152,4 +152,4 @@ static int __init ralink_systick_init(struct device_node *np)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
+TIMER_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
index df79588..eb1c619 100644
--- a/arch/mips/ralink/clk.c
+++ b/arch/mips/ralink/clk.c
@@ -82,5 +82,5 @@ void __init plat_time_init(void)
 	pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
 	mips_hpt_frequency = clk_get_rate(clk) / 2;
 	clk_put(clk);
-	clocksource_probe();
+	timer_probe();
 }
diff --git a/arch/mips/ralink/timer-gic.c b/arch/mips/ralink/timer-gic.c
index 069771d..b5f07d2 100644
--- a/arch/mips/ralink/timer-gic.c
+++ b/arch/mips/ralink/timer-gic.c
@@ -20,5 +20,5 @@ void __init plat_time_init(void)
 	ralink_of_remap();
 
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 }
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
index cdf1876..b225033 100644
--- a/arch/mips/sgi-ip22/ip22-gio.c
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -169,6 +169,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
 
 	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
 }
+static DEVICE_ATTR_RO(modalias);
 
 static ssize_t name_show(struct device *dev,
 			 struct device_attribute *attr, char *buf)
@@ -178,6 +179,7 @@ static ssize_t name_show(struct device *dev,
 	giodev = to_gio_device(dev);
 	return sprintf(buf, "%s", giodev->name);
 }
+static DEVICE_ATTR_RO(name);
 
 static ssize_t id_show(struct device *dev,
 		       struct device_attribute *attr, char *buf)
@@ -187,13 +189,15 @@ static ssize_t id_show(struct device *dev,
 	giodev = to_gio_device(dev);
 	return sprintf(buf, "%x", giodev->id.id);
 }
+static DEVICE_ATTR_RO(id);
 
-static struct device_attribute gio_dev_attrs[] = {
-	__ATTR_RO(modalias),
-	__ATTR_RO(name),
-	__ATTR_RO(id),
-	__ATTR_NULL,
+static struct attribute *gio_dev_attrs[] = {
+	&dev_attr_modalias.attr,
+	&dev_attr_name.attr,
+	&dev_attr_id.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(gio_dev);
 
 static int gio_device_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
@@ -374,7 +378,7 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq)
 
 static struct bus_type gio_bus_type = {
 	.name	   = "gio",
-	.dev_attrs = gio_dev_attrs,
+	.dev_groups = gio_dev_groups,
 	.match	   = gio_bus_match,
 	.probe	   = gio_device_probe,
 	.remove	   = gio_device_remove,
diff --git a/arch/mips/xilfpga/time.c b/arch/mips/xilfpga/time.c
index cbb3fca..36f3f18 100644
--- a/arch/mips/xilfpga/time.c
+++ b/arch/mips/xilfpga/time.c
@@ -22,7 +22,7 @@ void __init plat_time_init(void)
 	struct clk *clk;
 
 	of_clk_init(NULL);
-	clocksource_probe();
+	timer_probe();
 
 	np = of_get_cpu_node(0, NULL);
 	if (!np) {
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index 18e17ab..3ae4791 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -132,11 +132,6 @@ static inline void start_thread(struct pt_regs *regs,
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
-/*
- * Return saved PC of a blocked thread.
- */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define task_pt_regs(task) ((task)->thread.uregs)
diff --git a/arch/mn10300/include/uapi/asm/Kbuild b/arch/mn10300/include/uapi/asm/Kbuild
index b15bf6b..c94ee54 100644
--- a/arch/mn10300/include/uapi/asm/Kbuild
+++ b/arch/mn10300/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y	+= siginfo.h
diff --git a/arch/mn10300/include/uapi/asm/siginfo.h b/arch/mn10300/include/uapi/asm/siginfo.h
deleted file mode 100644
index 0815d29..0000000
--- a/arch/mn10300/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/siginfo.h>
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index c9fa426..89e8027 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -40,14 +40,6 @@
 #include "internal.h"
 
 /*
- * return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	return ((unsigned long *) tsk->thread.sp)[3];
-}
-
-/*
  * power off function, if any
  */
 void (*pm_power_off)(void);
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index a72d5f0..c587764 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -1,6 +1,6 @@
 config NIOS2
 	def_bool y
-	select CLKSRC_OF
+	select TIMER_OF
 	select GENERIC_ATOMIC64
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CPU_DEVICES
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 727dbb3..e1a843d 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -47,7 +47,6 @@
 generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
-generic-y += siginfo.h
 generic-y += signal.h
 generic-y += socket.h
 generic-y += sockios.h
diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h
index 3bbbc3d..4944e2e 100644
--- a/arch/nios2/include/asm/processor.h
+++ b/arch/nios2/include/asm/processor.h
@@ -75,9 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
-/* Return saved PC of a blocked thread. */
-#define thread_saved_pc(tsk)	((tsk)->thread.kregs->ea)
-
 extern unsigned long get_wchan(struct task_struct *p);
 
 #define task_pt_regs(p) \
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 374bd12..51eff5b 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -2,4 +2,5 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += setup.h
+generic-y += siginfo.h
 generic-y += ucontext.h
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index 6e2bdc9..645129a 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -350,7 +350,7 @@ void __init time_init(void)
 	if (count < 2)
 		panic("%d timer is found, it needs 2 timers in system\n", count);
 
-	clocksource_probe();
+	timer_probe();
 }
 
-CLOCKSOURCE_OF_DECLARE(nios2_timer, ALTR_TIMER_COMPATIBLE, nios2_time_init);
+TIMER_OF_DECLARE(nios2_timer, ALTR_TIMER_COMPATIBLE, nios2_time_init);
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index fdbcf0b..091585a 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -46,7 +46,6 @@
 generic-y += setup.h
 generic-y += shmbuf.h
 generic-y += shmparam.h
-generic-y += siginfo.h
 generic-y += signal.h
 generic-y += socket.h
 generic-y += sockios.h
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index a908e6c..396d8f3 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -84,11 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
 void release_thread(struct task_struct *);
 unsigned long get_wchan(struct task_struct *p);
 
-/*
- * Return saved PC of a blocked thread. For now, this is the "user" PC
- */
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
 #define init_stack      (init_thread_union.stack)
 
 #define cpu_relax()     barrier()
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index b15bf6b..b55fc2a 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += siginfo.h
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index f8da545..f9b7700 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -110,11 +110,6 @@ void show_regs(struct pt_regs *regs)
 	show_registers(regs);
 }
 
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-	return (unsigned long)user_regs(t->stack)->pc;
-}
-
 void release_thread(struct task_struct *dead_task)
 {
 }
@@ -167,8 +162,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
 
 	top_of_kernel_stack = sp;
 
-	p->set_child_tid = p->clear_child_tid = NULL;
-
 	/* Locate userspace context on stack... */
 	sp -= STACK_FRAME_OVERHEAD;	/* redzone */
 	sp -= sizeof(struct pt_regs);
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 5404c6a..9a2a895 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -20,6 +20,8 @@
 ** flush/purge and allocate "regular" cacheable pages for everything.
 */
 
+#define DMA_ERROR_CODE	(~(dma_addr_t)0)
+
 #ifdef CONFIG_PA11
 extern const struct dma_map_ops pcxl_dma_ops;
 extern const struct dma_map_ops pcx_dma_ops;
@@ -54,12 +56,13 @@ parisc_walk_tree(struct device *dev)
 			break;
 		}
 	}
-	BUG_ON(!dev->platform_data);
 	return dev->platform_data;
 }
-		
-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
-	
+
+#define GET_IOC(dev) ({					\
+	void *__pdata = parisc_walk_tree(dev);		\
+	__pdata ? HBA_DATA(__pdata)->iommu : NULL;	\
+})
 
 #ifdef CONFIG_IOMMU_CCIO
 struct parisc_device;
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 1a16f1d..af98254 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -34,10 +34,10 @@ static inline unsigned char gsc_readb(unsigned long addr)
 	unsigned char ret;
 
 	__asm__ __volatile__(
-	"	rsm	2,%0\n"
+	"	rsm	%3,%0\n"
 	"	ldbx	0(%2),%1\n"
 	"	mtsm	%0\n"
-	: "=&r" (flags), "=r" (ret) : "r" (addr) );
+	: "=&r" (flags), "=r" (ret) : "r" (addr), "i" (PSW_SM_D) );
 
 	return ret;
 }
@@ -48,10 +48,10 @@ static inline unsigned short gsc_readw(unsigned long addr)
 	unsigned short ret;
 
 	__asm__ __volatile__(
-	"	rsm	2,%0\n"
+	"	rsm	%3,%0\n"
 	"	ldhx	0(%2),%1\n"
 	"	mtsm	%0\n"
-	: "=&r" (flags), "=r" (ret) : "r" (addr) );
+	: "=&r" (flags), "=r" (ret) : "r" (addr), "i" (PSW_SM_D) );
 
 	return ret;
 }
@@ -87,20 +87,20 @@ static inline void gsc_writeb(unsigned char val, unsigned long addr)
 {
 	long flags;
 	__asm__ __volatile__(
-	"	rsm	2,%0\n"
+	"	rsm	%3,%0\n"
 	"	stbs	%1,0(%2)\n"
 	"	mtsm	%0\n"
-	: "=&r" (flags) :  "r" (val), "r" (addr) );
+	: "=&r" (flags) :  "r" (val), "r" (addr), "i" (PSW_SM_D) );
 }
 
 static inline void gsc_writew(unsigned short val, unsigned long addr)
 {
 	long flags;
 	__asm__ __volatile__(
-	"	rsm	2,%0\n"
+	"	rsm	%3,%0\n"
 	"	sths	%1,0(%2)\n"
 	"	mtsm	%0\n"
-	: "=&r" (flags) :  "r" (val), "r" (addr) );
+	: "=&r" (flags) :  "r" (val), "r" (addr), "i" (PSW_SM_D) );
 }
 
 static inline void gsc_writel(unsigned int val, unsigned long addr)
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 59be257..a812262 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context)
 	mtctl(__space_to_prot(context), 8);
 }
 
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+		struct mm_struct *next, struct task_struct *tsk)
 {
-
 	if (prev != next) {
 		mtctl(__pa(next->pgd), 25);
 		load_context(next->context);
 	}
 }
 
+static inline void switch_mm(struct mm_struct *prev,
+		struct mm_struct *next, struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	switch_mm_irqs_off(prev, next, tsk);
+	local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
 
 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 451906d..7569627 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -6,6 +6,8 @@
 #if !defined(__ASSEMBLY__)
 
 extern int pdc_type;
+extern unsigned long parisc_cell_num; /* cell number the CPU runs on (PAT) */
+extern unsigned long parisc_cell_loc; /* cell location of CPU (PAT)	   */
 
 /* Values for pdc_type */
 #define PDC_TYPE_ILLEGAL	-1
@@ -143,6 +145,18 @@ struct pdc_btlb_info {	/* PDC_BLOCK_TLB, return of PDC_BTLB_INFO */
 
 #endif /* !CONFIG_PA20 */
 
+struct pdc_mem_retinfo { /* PDC_MEM/PDC_MEM_MEMINFO (return info) */
+	unsigned long pdt_size;
+	unsigned long pdt_entries;
+	unsigned long pdt_status;
+	unsigned long first_dbe_loc;
+	unsigned long good_mem;
+};
+
+struct pdc_mem_read_pdt { /* PDC_MEM/PDC_MEM_READ_PDT (return info) */
+	unsigned long pdt_entries;
+};
+
 #ifdef CONFIG_64BIT
 struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */
 	unsigned long entries_returned;
@@ -301,6 +315,10 @@ int pdc_get_initiator(struct hardware_path *, struct pdc_initiator *);
 int pdc_tod_read(struct pdc_tod *tod);
 int pdc_tod_set(unsigned long sec, unsigned long usec);
 
+void pdc_pdt_init(void);	/* in pdt.c */
+int pdc_mem_pdt_info(struct pdc_mem_retinfo *rinfo);
+int pdc_mem_pdt_read_entries(struct pdc_mem_read_pdt *rpdt_read,
+		unsigned long *pdt_entries_ptr);
 #ifdef CONFIG_64BIT
 int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
 		struct pdc_memory_table *tbl, unsigned long entries);
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
index e1d2890..32e105f 100644
--- a/arch/parisc/include/asm/pdcpat.h
+++ b/arch/parisc/include/asm/pdcpat.h
@@ -147,9 +147,9 @@
 #define PDC_PAT_MEM_CELL_CLEAR  	6L /* Clear PDT For Cell           */
 #define PDC_PAT_MEM_CELL_READ   	7L /* Read PDT entries For Cell    */
 #define PDC_PAT_MEM_CELL_RESET  	8L /* Reset clear bit For Cell     */
-#define PDC_PAT_MEM_SETGM	  	9L /* Set Golden Memory value      */
-#define PDC_PAT_MEM_ADD_PAGE    	10L /* ADDs a page to the cell      */
-#define PDC_PAT_MEM_ADDRESS     	11L /* Get Physical Location From   */
+#define PDC_PAT_MEM_SETGM		9L /* Set Good Memory value        */
+#define PDC_PAT_MEM_ADD_PAGE		10L /* ADDs a page to the cell      */
+#define PDC_PAT_MEM_ADDRESS		11L /* Get Physical Location From   */
                                     		 /* Memory Address               */
 #define PDC_PAT_MEM_GET_TXT_SIZE   	12L /* Get Formatted Text Size   */
 #define PDC_PAT_MEM_GET_PD_TXT     	13L /* Get PD Formatted Text     */
@@ -212,6 +212,23 @@ struct pdc_pat_cpu_num {
 	unsigned long cpu_loc;
 };
 
+struct pdc_pat_mem_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_INFO (return info) */
+	unsigned int ke;	/* bit 0: memory inside good memory? */
+	unsigned int current_pdt_entries:16;
+	unsigned int max_pdt_entries:16;
+	unsigned long Cs_bitmap;
+	unsigned long Ic_bitmap;
+	unsigned long good_mem;
+	unsigned long first_dbe_loc; /* first location of double bit error */
+	unsigned long clear_time; /* last PDT clear time (since Jan 1970) */
+};
+
+struct pdc_pat_mem_read_pd_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_READ */
+	unsigned long actual_count_bytes;
+	unsigned long pdt_entries;
+};
+
+
 struct pdc_pat_pd_addr_map_entry {
 	unsigned char entry_type;       /* 1 = Memory Descriptor Entry Type */
 	unsigned char reserve1[5];
@@ -293,15 +310,15 @@ extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned lon
 
 extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
 
-
 extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val); 
 extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val); 
 
-
-/* Flag to indicate this is a PAT box...don't use this unless you
-** really have to...it might go away some day.
-*/
-extern int pdc_pat;     /* arch/parisc/kernel/inventory.c */
+extern int pdc_pat_mem_pdt_info(struct pdc_pat_mem_retinfo *rinfo);
+extern int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
+		unsigned long *pdt_entries_ptr, unsigned long max_entries);
+extern int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
+		unsigned long *pdt_entries_ptr, unsigned long count,
+		unsigned long offset);
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 3a4ed9f..71ca86cb 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -511,6 +511,9 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 
 #define pte_same(A,B)	(pte_val(A) == pte_val(B))
 
+struct seq_file;
+extern void arch_report_meminfo(struct seq_file *m);
+
 #endif /* !__ASSEMBLY__ */
 
 
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index a3661ee..b3b66c3 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -103,6 +103,8 @@ struct cpuinfo_parisc {
 	unsigned long bh_count;     /* number of times bh was invoked */
 	unsigned long fp_rev;
 	unsigned long fp_model;
+	unsigned long cpu_num;      /* CPU number from PAT firmware */
+	unsigned long cpu_loc;      /* CPU location from PAT firmware */
 	unsigned int state;
 	struct parisc_device *dev;
 	unsigned long loops_per_jiffy;
@@ -163,12 +165,7 @@ struct thread_struct {
 	.flags		= 0 \
 	}
 
-/*
- * Return saved PC of a blocked thread.  This is used by ps mostly.
- */
-
 struct task_struct;
-unsigned long thread_saved_pc(struct task_struct *t);
 void show_trace(struct task_struct *task, unsigned long *stack);
 
 /*
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 6b113f3..c3e114f 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -69,17 +69,6 @@ struct exception_table_entry {
 	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
 
 /*
- * The page fault handler stores, in a per-cpu area, the following information
- * if a fixup routine is available.
- */
-struct exception_data {
-	unsigned long fault_ip;
-	unsigned long fault_gp;
-	unsigned long fault_space;
-	unsigned long fault_addr;
-};
-
-/*
  * load_sr2() preloads the space register %%sr2 - based on the value of
  * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
  * is 0), or with the current value of %%sr3 to access user space (USER_DS)
diff --git a/arch/parisc/include/uapi/asm/ioctls.h b/arch/parisc/include/uapi/asm/ioctls.h
index b6572f0..674c68a 100644
--- a/arch/parisc/include/uapi/asm/ioctls.h
+++ b/arch/parisc/include/uapi/asm/ioctls.h
@@ -60,6 +60,7 @@
 #define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
 #define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER	_IOR('T', 0x41, int) /* Safely open the slave */
 
 #define FIONCLEX	0x5450  /* these numbers need to be adjusted. */
 #define FIOCLEX		0x5451
diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
index 0609ff1..1f30b49 100644
--- a/arch/parisc/include/uapi/asm/pdc.h
+++ b/arch/parisc/include/uapi/asm/pdc.h
@@ -131,12 +131,12 @@
 #define PDC_TLB_SETUP		1	/* set up miss handling 	*/
 
 #define PDC_MEM		20		/* Manage memory		*/
-#define PDC_MEM_MEMINFO		0
-#define PDC_MEM_ADD_PAGE	1
-#define PDC_MEM_CLEAR_PDT	2
-#define PDC_MEM_READ_PDT	3
-#define PDC_MEM_RESET_CLEAR	4
-#define PDC_MEM_GOODMEM		5
+#define PDC_MEM_MEMINFO		0	/* Return PDT info		*/
+#define PDC_MEM_ADD_PAGE	1	/* Add page to PDT		*/
+#define PDC_MEM_CLEAR_PDT	2	/* Clear PDT			*/
+#define PDC_MEM_READ_PDT	3	/* Read PDT entry		*/
+#define PDC_MEM_RESET_CLEAR	4	/* Reset PDT clear flag		*/
+#define PDC_MEM_GOODMEM		5	/* Set good_mem value		*/
 #define PDC_MEM_TABLE		128	/* Non contig mem map (sprockets) */
 #define PDC_MEM_RETURN_ADDRESS_TABLE	PDC_MEM_TABLE
 #define PDC_MEM_GET_MEMORY_SYSTEM_TABLES_SIZE	131
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 69a1118..c4294df 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -4,7 +4,7 @@
 
 extra-y			:= head.o vmlinux.lds
 
-obj-y	     	:= cache.o pacache.o setup.o traps.o time.o irq.o \
+obj-y	     	:= cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
 		   pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
 		   ptrace.o hardware.o inventory.o drivers.o \
 		   signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 1c4fe61..dfff8a0 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -298,11 +298,6 @@ int main(void)
 	DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
 #endif
 	BLANK();
-	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
-	DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
-	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
-	DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
-	BLANK();
 	DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
 	BLANK();
 	return 0;
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index fa78419..d8f7735 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -575,7 +575,8 @@ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, ch
 {									\
 	struct parisc_device *padev = to_parisc_device(dev);		\
 	return sprintf(buf, format_string, padev->field);		\
-}
+}									\
+static DEVICE_ATTR_RO(name);
 
 #define pa_dev_attr_id(field, format) pa_dev_attr(field, id.field, format)
 
@@ -589,22 +590,24 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 {
 	return make_modalias(dev, buf);
 }
+static DEVICE_ATTR_RO(modalias);
 
-static struct device_attribute parisc_device_attrs[] = {
-	__ATTR_RO(irq),
-	__ATTR_RO(hw_type),
-	__ATTR_RO(rev),
-	__ATTR_RO(hversion),
-	__ATTR_RO(sversion),
-	__ATTR_RO(modalias),
-	__ATTR_NULL,
+static struct attribute *parisc_device_attrs[] = {
+	&dev_attr_irq.attr,
+	&dev_attr_hw_type.attr,
+	&dev_attr_rev.attr,
+	&dev_attr_hversion.attr,
+	&dev_attr_sversion.attr,
+	&dev_attr_modalias.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(parisc_device);
 
 struct bus_type parisc_bus_type = {
 	.name = "parisc",
 	.match = parisc_generic_match,
 	.uevent = parisc_uevent,
-	.dev_attrs = parisc_device_attrs,
+	.dev_groups = parisc_device_groups,
 	.probe = parisc_driver_probe,
 	.remove = parisc_driver_remove,
 };
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 9d797ae..9819025 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -957,6 +957,41 @@ int pdc_tod_read(struct pdc_tod *tod)
 }
 EXPORT_SYMBOL(pdc_tod_read);
 
+int pdc_mem_pdt_info(struct pdc_mem_retinfo *rinfo)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_MEM, PDC_MEM_MEMINFO, __pa(pdc_result), 0);
+	convert_to_wide(pdc_result);
+	memcpy(rinfo, pdc_result, sizeof(*rinfo));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+int pdc_mem_pdt_read_entries(struct pdc_mem_read_pdt *pret,
+		unsigned long *pdt_entries_ptr)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_MEM, PDC_MEM_READ_PDT, __pa(pdc_result),
+			__pa(pdc_result2));
+	if (retval == PDC_OK) {
+		convert_to_wide(pdc_result);
+		memcpy(pret, pdc_result, sizeof(*pret));
+		convert_to_wide(pdc_result2);
+		memcpy(pdt_entries_ptr, pdc_result2,
+			pret->pdt_entries * sizeof(*pdt_entries_ptr));
+	}
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
 /**
  * pdc_tod_set - Set the Time-Of-Day clock.
  * @sec: The number of seconds since epoch.
@@ -1383,6 +1418,79 @@ int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
 
 	return retval;
 }
+
+/**
+ * pdc_pat_mem_pdc_info - Retrieve information about page deallocation table
+ * @rinfo: memory pdt information
+ *
+ */
+int pdc_pat_mem_pdt_info(struct pdc_pat_mem_retinfo *rinfo)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_INFO,
+			__pa(&pdc_result));
+	if (retval == PDC_OK)
+		memcpy(rinfo, &pdc_result, sizeof(*rinfo));
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
+
+/**
+ * pdc_pat_mem_read_cell_pdt - Read PDT entries from (old) PAT firmware
+ * @pret: array of PDT entries
+ * @pdt_entries_ptr: ptr to hold number of PDT entries
+ * @max_entries: maximum number of entries to be read
+ *
+ */
+int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
+		unsigned long *pdt_entries_ptr, unsigned long max_entries)
+{
+	int retval;
+	unsigned long flags, entries;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	/* PDC_PAT_MEM_CELL_READ is available on early PAT machines only */
+	retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_CELL_READ,
+			__pa(&pdc_result), parisc_cell_num, __pa(&pdc_result2));
+
+	if (retval == PDC_OK) {
+		/* build up return value as for PDC_PAT_MEM_PD_READ */
+		entries = min(pdc_result[0], max_entries);
+		pret->pdt_entries = entries;
+		pret->actual_count_bytes = entries * sizeof(unsigned long);
+		memcpy(pdt_entries_ptr, &pdc_result2, pret->actual_count_bytes);
+	}
+
+	spin_unlock_irqrestore(&pdc_lock, flags);
+	WARN_ON(retval == PDC_OK && pdc_result[0] > max_entries);
+
+	return retval;
+}
+/**
+ * pdc_pat_mem_read_pd_pdt - Read PDT entries from (newer) PAT firmware
+ * @pret: array of PDT entries
+ * @pdt_entries_ptr: ptr to hold number of PDT entries
+ *
+ */
+int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
+		unsigned long *pdt_entries_ptr, unsigned long count,
+		unsigned long offset)
+{
+	int retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pdc_lock, flags);
+	retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_READ,
+		__pa(&pret), __pa(pdt_entries_ptr),
+		count, offset);
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return retval;
+}
 #endif /* CONFIG_64BIT */
 
 
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 0fbd0a0..e3a8e5e 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -44,6 +44,7 @@
 
 #include <asm/assembly.h>
 #include <asm/pdc.h>
+#include <asm/psw.h>
 
 #include <linux/linkage.h>
 #include <linux/init.h>
@@ -135,7 +136,7 @@
 	 * So turn on the Q bit and turn off the M bit.
 	 */
 
-	ldo     8(%r0),%r4                       /* PSW Q on, PSW M off */
+	ldi     PSW_SM_Q,%r4                   /* PSW Q on, PSW M off */
 	mtctl   %r4,ipsw
 	mtctl   %r0,pcsq
 	mtctl   %r0,pcsq
@@ -257,7 +258,7 @@
 
 	tovirt_r1 %r30      /* make sp virtual */
 
-	rsm 8,%r0           /* Clear Q bit */
+	rsm     PSW_SM_Q,%r0           /* Clear Q bit */
 	ldi     1,%r8       /* Set trap code to "1" for HPMC */
 	load32	PA(intr_save),%r1
 	be      0(%sr7,%r1)
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index c9789d9..b0fe19a 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -40,6 +40,11 @@
 
 int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
 
+/* cell number and location (PAT firmware only) */
+unsigned long parisc_cell_num __read_mostly;
+unsigned long parisc_cell_loc __read_mostly;
+
+
 void __init setup_pdc(void)
 {
 	long status;
@@ -78,6 +83,10 @@ void __init setup_pdc(void)
 	if (status == PDC_OK) {
 		pdc_type = PDC_TYPE_PAT;
 		pr_cont("64 bit PAT.\n");
+		parisc_cell_num = cell_info.cell_num;
+		parisc_cell_loc = cell_info.cell_loc;
+		pr_info("PAT: Running on cell %lu and location %lu.\n",
+			parisc_cell_num, parisc_cell_loc);
 		return;
 	}
 #endif
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
new file mode 100644
index 0000000..f3a797e
--- /dev/null
+++ b/arch/parisc/kernel/pdt.c
@@ -0,0 +1,143 @@
+/*
+ *    Page Deallocation Table (PDT) support
+ *
+ *    The Page Deallocation Table (PDT) holds a table with pointers to bad
+ *    memory (broken RAM modules) which is maintained by firmware.
+ *
+ *    Copyright 2017 by Helge Deller <deller@gmx.de>
+ *
+ *    TODO:
+ *    - check regularily for new bad memory
+ *    - add userspace interface with procfs or sysfs
+ *    - increase number of PDT entries dynamically
+ */
+
+#include <linux/memblock.h>
+#include <linux/seq_file.h>
+
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+
+enum pdt_access_type {
+	PDT_NONE,
+	PDT_PDC,
+	PDT_PAT_NEW,
+	PDT_PAT_OLD
+};
+
+static enum pdt_access_type pdt_type;
+
+/* global PDT status information */
+static struct pdc_mem_retinfo pdt_status;
+
+#define MAX_PDT_TABLE_SIZE	PAGE_SIZE
+#define MAX_PDT_ENTRIES		(MAX_PDT_TABLE_SIZE / sizeof(unsigned long))
+static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
+
+
+/* report PDT entries via /proc/meminfo */
+void arch_report_meminfo(struct seq_file *m)
+{
+	if (pdt_type == PDT_NONE)
+		return;
+
+	seq_printf(m, "PDT_max_entries: %7lu\n",
+			pdt_status.pdt_size);
+	seq_printf(m, "PDT_cur_entries: %7lu\n",
+			pdt_status.pdt_entries);
+}
+
+/*
+ * pdc_pdt_init()
+ *
+ * Initialize kernel PDT structures, read initial PDT table from firmware,
+ * report all current PDT entries and mark bad memory with memblock_reserve()
+ * to avoid that the kernel will use broken memory areas.
+ *
+ */
+void __init pdc_pdt_init(void)
+{
+	int ret, i;
+	unsigned long entries;
+	struct pdc_mem_read_pdt pdt_read_ret;
+
+	if (is_pdc_pat()) {
+		struct pdc_pat_mem_retinfo pat_rinfo;
+
+		pdt_type = PDT_PAT_NEW;
+		ret = pdc_pat_mem_pdt_info(&pat_rinfo);
+		pdt_status.pdt_size = pat_rinfo.max_pdt_entries;
+		pdt_status.pdt_entries = pat_rinfo.current_pdt_entries;
+		pdt_status.pdt_status = 0;
+		pdt_status.first_dbe_loc = pat_rinfo.first_dbe_loc;
+		pdt_status.good_mem = pat_rinfo.good_mem;
+	} else {
+		pdt_type = PDT_PDC;
+		ret = pdc_mem_pdt_info(&pdt_status);
+	}
+
+	if (ret != PDC_OK) {
+		pdt_type = PDT_NONE;
+		pr_info("PDT: Firmware does not provide any page deallocation"
+			" information.\n");
+		return;
+	}
+
+	entries = pdt_status.pdt_entries;
+	WARN_ON(entries > MAX_PDT_ENTRIES);
+
+	pr_info("PDT: size %lu, entries %lu, status %lu, dbe_loc 0x%lx,"
+		" good_mem %lu\n",
+			pdt_status.pdt_size, pdt_status.pdt_entries,
+			pdt_status.pdt_status, pdt_status.first_dbe_loc,
+			pdt_status.good_mem);
+
+	if (entries == 0) {
+		pr_info("PDT: Firmware reports all memory OK.\n");
+		return;
+	}
+
+	if (pdt_status.first_dbe_loc &&
+		pdt_status.first_dbe_loc <= __pa((unsigned long)&_end))
+		pr_crit("CRITICAL: Bad memory inside kernel image memory area!\n");
+
+	pr_warn("PDT: Firmware reports %lu entries of faulty memory:\n",
+		entries);
+
+	if (pdt_type == PDT_PDC)
+		ret = pdc_mem_pdt_read_entries(&pdt_read_ret, pdt_entry);
+	else {
+#ifdef CONFIG_64BIT
+		struct pdc_pat_mem_read_pd_retinfo pat_pret;
+
+		ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
+			MAX_PDT_ENTRIES);
+		if (ret != PDC_OK) {
+			pdt_type = PDT_PAT_OLD;
+			ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry,
+				MAX_PDT_TABLE_SIZE, 0);
+		}
+#else
+		ret = PDC_BAD_PROC;
+#endif
+	}
+
+	if (ret != PDC_OK) {
+		pdt_type = PDT_NONE;
+		pr_debug("PDT type %d, retval = %d\n", pdt_type, ret);
+		return;
+	}
+
+	for (i = 0; i < pdt_status.pdt_entries; i++) {
+		if (i < 20)
+			pr_warn("PDT: BAD PAGE #%d at 0x%08lx (error_type = %lu)\n",
+				i,
+				pdt_entry[i] & PAGE_MASK,
+				pdt_entry[i] & 1);
+
+		/* mark memory page bad */
+		memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
+	}
+}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 4516a5b..b64d7d2 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -239,11 +239,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
 	return 0;
 }
 
-unsigned long thread_saved_pc(struct task_struct *t)
-{
-	return t->thread.regs.kpc;
-}
-
 unsigned long
 get_wchan(struct task_struct *p)
 {
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 85de47f..0ab3277 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -94,7 +94,7 @@ static int processor_probe(struct parisc_device *dev)
 	unsigned long txn_addr;
 	unsigned long cpuid;
 	struct cpuinfo_parisc *p;
-	struct pdc_pat_cpu_num cpu_info __maybe_unused;
+	struct pdc_pat_cpu_num cpu_info = { };
 
 #ifdef CONFIG_SMP
 	if (num_online_cpus() >= nr_cpu_ids) {
@@ -113,6 +113,7 @@ static int processor_probe(struct parisc_device *dev)
 	 */
 	cpuid = boot_cpu_data.cpu_count;
 	txn_addr = dev->hpa.start;	/* for legacy PDC */
+	cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;
 
 #ifdef CONFIG_64BIT
 	if (is_pdc_pat()) {
@@ -180,6 +181,8 @@ static int processor_probe(struct parisc_device *dev)
 	p->hpa = dev->hpa.start;	/* save CPU hpa */
 	p->cpuid = cpuid;	/* save CPU id */
 	p->txn_addr = txn_addr;	/* save CPU IRQ address */
+	p->cpu_num = cpu_info.cpu_num;
+	p->cpu_loc = cpu_info.cpu_loc;
 #ifdef CONFIG_SMP
 	/*
 	** FIXME: review if any other initialization is clobbered
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index e528863..378a754 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 		unsigned long len, unsigned long pgoff, unsigned long flags)
 {
 	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma;
+	struct vm_area_struct *vma, *prev;
 	unsigned long task_size = TASK_SIZE;
 	int do_color_align, last_mmap;
 	struct vm_unmapped_area_info info;
@@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 		else
 			addr = PAGE_ALIGN(addr);
 
-		vma = find_vma(mm, addr);
+		vma = find_vma_prev(mm, addr, &prev);
 		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)) &&
+		    (!prev || addr >= vm_end_gap(prev)))
 			goto found_addr;
 	}
 
@@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 			  const unsigned long len, const unsigned long pgoff,
 			  const unsigned long flags)
 {
-	struct vm_area_struct *vma;
+	struct vm_area_struct *vma, *prev;
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = addr0;
 	int do_color_align, last_mmap;
@@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 			addr = COLOR_ALIGN(addr, last_mmap, pgoff);
 		else
 			addr = PAGE_ALIGN(addr);
-		vma = find_vma(mm, addr);
+
+		vma = find_vma_prev(mm, addr, &prev);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)) &&
+		    (!prev || addr >= vm_end_gap(prev)))
 			goto found_addr;
 	}
 
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 44aeaa9..6308749 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -361,7 +361,7 @@
 	ENTRY_SAME(ni_syscall)	/* 263: reserved for vserver */
 	ENTRY_SAME(add_key)
 	ENTRY_SAME(request_key)		/* 265 */
-	ENTRY_SAME(keyctl)
+	ENTRY_COMP(keyctl)
 	ENTRY_SAME(ioprio_set)
 	ENTRY_SAME(ioprio_get)
 	ENTRY_SAME(inotify_init)
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 89421df..2d956aa 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -243,14 +243,30 @@ void __init time_init(void)
 static int __init init_cr16_clocksource(void)
 {
 	/*
-	 * The cr16 interval timers are not syncronized across CPUs, so mark
-	 * them unstable and lower rating on SMP systems.
+	 * The cr16 interval timers are not syncronized across CPUs on
+	 * different sockets, so mark them unstable and lower rating on
+	 * multi-socket SMP systems.
 	 */
 	if (num_online_cpus() > 1) {
-		clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
-		clocksource_cr16.rating = 0;
+		int cpu;
+		unsigned long cpu0_loc;
+		cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
+
+		for_each_online_cpu(cpu) {
+			if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)
+				continue;
+
+			clocksource_cr16.name = "cr16_unstable";
+			clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+			clocksource_cr16.rating = 0;
+			break;
+		}
 	}
 
+	/* XXX: We may want to mark sched_clock stable here if cr16 clocks are
+	 *	in sync:
+	 *	(clocksource_cr16.flags == CLOCK_SOURCE_IS_CONTINUOUS) */
+
 	/* register at clocksource framework */
 	clocksource_register_hz(&clocksource_cr16,
 		100 * PAGE0->mem_10msec);
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 85c28bb..d4fe198 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -56,12 +56,6 @@
 	mtsp        %r1,%sr1
 	.endm
 
-	.macro fixup_branch lbl
-	ldil	    L%\lbl, %r1
-	ldo	    R%\lbl(%r1), %r1
-	bv          %r0(%r1)
-	.endm
-
 	/*
 	 * unsigned long lclear_user(void *to, unsigned long n)
 	 *
@@ -82,15 +76,15 @@
 $lclu_done:
 	bv          %r0(%r2)
 	copy        %r25,%r28
+
+2:	b           $lclu_done
+	ldo         1(%r25),%r25
+
+	ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
+
 	.exit
 ENDPROC_CFI(lclear_user)
 
-	.section .fixup,"ax"
-2:      fixup_branch $lclu_done
-	ldo        1(%r25),%r25
-	.previous
-
-	ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
 
 	.procend
 
@@ -122,16 +116,15 @@
 $lslen_nzero:
 	b           $lslen_done
 	ldo         1(%r26),%r26 /* special case for N == 0 */
-ENDPROC_CFI(lstrnlen_user)
 
-	.section .fixup,"ax"
-3:      fixup_branch $lslen_done
+3:      b	    $lslen_done
 	copy        %r24,%r26    /* reset r26 so 0 is returned on fault */
-	.previous
 
 	ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
 	ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
 
+ENDPROC_CFI(lstrnlen_user)
+
 	.procend
 
 
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 32ec221..5b101f6 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -29,8 +29,6 @@
 #define BITSSET		0x1c0	/* for identifying LDCW */
 
 
-DEFINE_PER_CPU(struct exception_data, exception_data);
-
 int show_unhandled_signals = 1;
 
 /*
@@ -143,13 +141,6 @@ int fixup_exception(struct pt_regs *regs)
 
 	fix = search_exception_tables(regs->iaoq[0]);
 	if (fix) {
-		struct exception_data *d;
-		d = this_cpu_ptr(&exception_data);
-		d->fault_ip = regs->iaoq[0];
-		d->fault_gp = regs->gr[27];
-		d->fault_space = regs->isr;
-		d->fault_addr = regs->ior;
-
 		/*
 		 * Fix up get_user() and put_user().
 		 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
@@ -163,6 +154,7 @@ int fixup_exception(struct pt_regs *regs)
 			/* zero target register for get_user() */
 			if (parisc_acctyp(0, regs->iir) == VM_READ) {
 				int treg = regs->iir & 0x1f;
+				BUG_ON(treg == 0);
 				regs->gr[treg] = 0;
 			}
 		}
@@ -367,7 +359,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
 		case 15:	/* Data TLB miss fault/Data page fault */
 			/* send SIGSEGV when outside of vma */
 			if (!vma ||
-			    address < vma->vm_start || address > vma->vm_end) {
+			    address < vma->vm_start || address >= vma->vm_end) {
 				si.si_signo = SIGSEGV;
 				si.si_code = SEGV_MAPERR;
 				break;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 66f3a63..1ca9a2b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -381,6 +381,9 @@ static void __init setup_bootmem(void)
 		request_resource(res, &data_resource);
 	}
 	request_resource(&sysram_resources[0], &pdcdata_resource);
+
+	/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
+	pdc_pdt_init();
 }
 
 static int __init parisc_text_address(unsigned long vaddr)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f7c8f997..6189238 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -184,7 +184,7 @@
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_TRACER
 	select HAVE_GCC_PLUGINS
-	select HAVE_GENERIC_RCU_GUP
+	select HAVE_GENERIC_GUP
 	select HAVE_HW_BREAKPOINT		if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
 	select HAVE_IDE
 	select HAVE_IOREMAP_PROT
@@ -380,22 +380,6 @@
 
 menu "Kernel options"
 
-config PPC_DT_CPU_FTRS
-	bool "Device-tree based CPU feature discovery & setup"
-	depends on PPC_BOOK3S_64
-	default n
-	help
-	  This enables code to use a new device tree binding for describing CPU
-	  compatibility and features. Saying Y here will attempt to use the new
-	  binding if the firmware provides it. Currently only the skiboot
-	  firmware provides this binding.
-	  If you're not sure say Y.
-
-config PPC_CPUFEATURES_ENABLE_UNKNOWN
-	bool "cpufeatures pass through unknown features to guest/userspace"
-	depends on PPC_DT_CPU_FTRS
-	default y
-
 config HIGHMEM
 	bool "High memory support"
 	depends on PPC32
@@ -1215,11 +1199,6 @@
 
 source "security/Kconfig"
 
-config KEYS_COMPAT
-	bool
-	depends on COMPAT && KEYS
-	default y
-
 source "crypto/Kconfig"
 
 config PPC_LIB_RHEAP
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index b4b5e6b..0c4e470 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -8,7 +8,7 @@
 #define H_PTE_INDEX_SIZE  9
 #define H_PMD_INDEX_SIZE  7
 #define H_PUD_INDEX_SIZE  9
-#define H_PGD_INDEX_SIZE  12
+#define H_PGD_INDEX_SIZE  9
 
 #ifndef __ASSEMBLY__
 #define H_PTE_TABLE_SIZE	(sizeof(pte_t) << H_PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index f2c562a..0151af6 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -104,7 +104,7 @@
 		"1:	"PPC_TLNEI"	%4,0\n"			\
 		_EMIT_BUG_ENTRY					\
 		: : "i" (__FILE__), "i" (__LINE__),		\
-		  "i" (BUGFLAG_TAINT(TAINT_WARN)),		\
+		  "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
 		  "i" (sizeof(struct bug_entry)),		\
 		  "r" (__ret_warn_on));				\
 	}							\
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index c2d5095..d02ad93 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -214,7 +214,6 @@ enum {
 #define CPU_FTR_DAWR			LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_DABRX			LONG_ASM_CONST(0x0800000000000000)
 #define CPU_FTR_PMAO_BUG		LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_SUBCORE			LONG_ASM_CONST(0x2000000000000000)
 #define CPU_FTR_POWER9_DD1		LONG_ASM_CONST(0x4000000000000000)
 
 #ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@ enum {
 	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
 	    CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
 	    CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
-	    CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
+	    CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
 #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index a83821f..8814a72 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
+extern int is_current_kprobe_addr(unsigned long addr);
 #ifdef CONFIG_KPROBES_ON_FTRACE
 extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
 			   struct kprobe_ctlblk *kcb);
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a2123f2..1189d04 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
 #define TASK_SIZE_128TB (0x0000800000000000UL)
 #define TASK_SIZE_512TB (0x0002000000000000UL)
 
-#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
 /*
  * Max value currently used:
  */
-#define TASK_SIZE_USER64	TASK_SIZE_512TB
+#define TASK_SIZE_USER64		TASK_SIZE_512TB
+#define DEFAULT_MAP_WINDOW_USER64	TASK_SIZE_128TB
 #else
-#define TASK_SIZE_USER64	TASK_SIZE_64TB
+#define TASK_SIZE_USER64		TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64	TASK_SIZE_64TB
 #endif
 
 /*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
  * space during mmap's.
  */
 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
 
 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
 		TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@ void release_thread(struct task_struct *);
  * with 128TB and conditionally enable upto 512TB
  */
 #ifdef CONFIG_PPC_BOOK3S_64
-#define DEFAULT_MAP_WINDOW	((is_32bit_task()) ? \
-				 TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#define DEFAULT_MAP_WINDOW	((is_32bit_task()) ?			\
+				 TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
 #else
 #define DEFAULT_MAP_WINDOW	TASK_SIZE
 #endif
 
 #ifdef __powerpc64__
 
-#ifdef CONFIG_PPC_BOOK3S_64
-/* Limit stack to 128TB */
-#define STACK_TOP_USER64 TASK_SIZE_128TB
-#else
-#define STACK_TOP_USER64 TASK_SIZE_USER64
-#endif
-
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
 #define STACK_TOP_USER32 TASK_SIZE_USER32
 
 #define STACK_TOP (is_32bit_task() ? \
@@ -379,12 +378,6 @@ struct thread_struct {
 }
 #endif
 
-/*
- * Return saved PC of a blocked thread. For now, this is the "user" PC
- */
-#define thread_saved_pc(tsk)    \
-        ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-
 #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.regs)
 
 unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b..dc4e159 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -43,9 +43,24 @@ extern void __init dump_numa_cpu_topology(void);
 
 extern int sysfs_add_device_to_node(struct device *dev, int nid);
 extern void sysfs_remove_device_from_node(struct device *dev, int nid);
+extern int numa_update_cpu_topology(bool cpus_locked);
 
+static inline int early_cpu_to_node(int cpu)
+{
+	int nid;
+
+	nid = numa_cpu_lookup_table[cpu];
+
+	/*
+	 * Fall back to node 0 if nid is unset (it should be, except bugs).
+	 * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+	 */
+	return (nid < 0) ? 0 : nid;
+}
 #else
 
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
 static inline void dump_numa_cpu_topology(void) {}
 
 static inline int sysfs_add_device_to_node(struct device *dev, int nid)
@@ -57,6 +72,11 @@ static inline void sysfs_remove_device_from_node(struct device *dev,
 						int nid)
 {
 }
+
+static inline int numa_update_cpu_topology(bool cpus_locked)
+{
+	return 0;
+}
 #endif /* CONFIG_NUMA */
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 5c0d8a8..41e88d3 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -267,13 +267,7 @@ do {								\
 extern unsigned long __copy_tofrom_user(void __user *to,
 		const void __user *from, unsigned long size);
 
-#ifndef __powerpc64__
-
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-#else /* __powerpc64__ */
-
+#ifdef __powerpc64__
 static inline unsigned long
 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index c8a822a..c23ff43 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -94,11 +94,13 @@ struct xive_q {
  * store at 0 and some ESBs support doing a trigger via a
  * separate trigger page.
  */
-#define XIVE_ESB_GET		0x800
-#define XIVE_ESB_SET_PQ_00	0xc00
-#define XIVE_ESB_SET_PQ_01	0xd00
-#define XIVE_ESB_SET_PQ_10	0xe00
-#define XIVE_ESB_SET_PQ_11	0xf00
+#define XIVE_ESB_STORE_EOI	0x400 /* Store */
+#define XIVE_ESB_LOAD_EOI	0x000 /* Load */
+#define XIVE_ESB_GET		0x800 /* Load */
+#define XIVE_ESB_SET_PQ_00	0xc00 /* Load */
+#define XIVE_ESB_SET_PQ_01	0xd00 /* Load */
+#define XIVE_ESB_SET_PQ_10	0xe00 /* Load */
+#define XIVE_ESB_SET_PQ_11	0xf00 /* Load */
 
 #define XIVE_ESB_VAL_P		0x2
 #define XIVE_ESB_VAL_Q		0x1
diff --git a/arch/powerpc/include/uapi/asm/ioctls.h b/arch/powerpc/include/uapi/asm/ioctls.h
index 49a2579..bfd609a 100644
--- a/arch/powerpc/include/uapi/asm/ioctls.h
+++ b/arch/powerpc/include/uapi/asm/ioctls.h
@@ -100,6 +100,7 @@
 #define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
 #define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER	_IOR('T', 0x41, int) /* Safely open the slave */
 
 #define TIOCSERCONFIG	0x5453
 #define TIOCSERGWILD	0x5454
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index fcc7588..4c7656d 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -8,6 +8,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/jump_label.h>
+#include <linux/libfdt.h>
 #include <linux/memblock.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
@@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
 	{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
 	{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
 	{"processor-utilization-of-resources-register", feat_enable_purr, 0},
-	{"subcore", feat_enable, CPU_FTR_SUBCORE},
 	{"no-execute", feat_enable, 0},
 	{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
 	{"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
 	{"wait-v3", feat_enable, 0},
 };
 
-/* XXX: how to configure this? Default + boot time? */
-#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
-#define CPU_FEATURE_ENABLE_UNKNOWN 1
-#else
-#define CPU_FEATURE_ENABLE_UNKNOWN 0
-#endif
+static bool __initdata using_dt_cpu_ftrs;
+static bool __initdata enable_unknown = true;
+
+static int __init dt_cpu_ftrs_parse(char *str)
+{
+	if (!str)
+		return 0;
+
+	if (!strcmp(str, "off"))
+		using_dt_cpu_ftrs = false;
+	else if (!strcmp(str, "known"))
+		enable_unknown = false;
+	else
+		return 1;
+
+	return 0;
+}
+early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
 
 static void __init cpufeatures_setup_start(u32 isa)
 {
@@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
 		}
 	}
 
-	if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
+	if (!known && enable_unknown) {
 		if (!feat_try_enable_unknown(f)) {
 			pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
 				f->name);
@@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
 		cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
 }
 
+static int __init disabled_on_cmdline(void)
+{
+	unsigned long root, chosen;
+	const char *p;
+
+	root = of_get_flat_dt_root();
+	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+	if (chosen == -FDT_ERR_NOTFOUND)
+		return false;
+
+	p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
+	if (!p)
+		return false;
+
+	if (strstr(p, "dt_cpu_ftrs=off"))
+		return true;
+
+	return false;
+}
+
 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
 					int depth, void *data)
 {
@@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
 	return 0;
 }
 
-static bool __initdata using_dt_cpu_ftrs = false;
-
 bool __init dt_cpu_ftrs_in_use(void)
 {
 	return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
 
 bool __init dt_cpu_ftrs_init(void *fdt)
 {
+	using_dt_cpu_ftrs = false;
+
 	/* Setup and verify the FDT, if it fails we just bail */
 	if (!early_init_dt_verify(fdt))
 		return false;
@@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
 	if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
 		return false;
 
+	if (disabled_on_cmdline())
+		return false;
+
 	cpufeatures_setup_cpu();
 
 	using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
 
 void __init dt_cpu_ftrs_scan(void)
 {
+	if (!using_dt_cpu_ftrs)
+		return;
+
 	of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
 }
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ae418b8..b886795 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1411,10 +1411,8 @@
 	.balign	IFETCH_ALIGN_BYTES
 do_hash_page:
 #ifdef CONFIG_PPC_STD_MMU_64
-	andis.	r0,r4,0xa410		/* weird error? */
+	andis.	r0,r4,0xa450		/* weird error? */
 	bne-	handle_page_fault	/* if not, try to insert a HPTE */
-	andis.  r0,r4,DSISR_DABRMATCH@h
-	bne-    handle_dabr_fault
 	CURRENT_THREAD_INFO(r11, r1)
 	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
 	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@
 
 	/* Error */
 	blt-	13f
+
+	/* Reload DSISR into r4 for the DABR check below */
+	ld      r4,_DSISR(r1)
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 /* Here we have a page fault that hash_page can't handle. */
 handle_page_fault:
-11:	ld	r4,_DAR(r1)
+11:	andis.  r0,r4,DSISR_DABRMATCH@h
+	bne-    handle_dabr_fault
+	ld	r4,_DAR(r1)
 	ld	r5,_DSISR(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	do_page_fault
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index fc43435..01addfb 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
 
+int is_current_kprobe_addr(unsigned long addr)
+{
+	struct kprobe *p = kprobe_running();
+	return (p && (unsigned long)p->addr == addr) ? 1 : 0;
+}
+
 bool arch_within_kprobe_blacklist(unsigned long addr)
 {
 	return  (addr >= (unsigned long)__kprobes_text_start &&
@@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 	regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
 #endif
 
+	/*
+	 * jprobes use jprobe_return() which skips the normal return
+	 * path of the function, and this messes up the accounting of the
+	 * function graph tracer.
+	 *
+	 * Pause function graph tracing while performing the jprobe function.
+	 */
+	pause_graph_tracing();
+
 	return 1;
 }
 NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 	 * saved regs...
 	 */
 	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+	/* It's OK to start function graph tracing again */
+	unpause_graph_tracing();
 	preempt_enable_no_resched();
 	return 1;
 }
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index baae104..2ad725e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 #ifdef CONFIG_VSX
 	current->thread.used_vsr = 0;
 #endif
+	current->thread.load_fp = 0;
 	memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
 	current->thread.fp_save_area = NULL;
 #ifdef CONFIG_ALTIVEC
@@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 	current->thread.vr_save_area = NULL;
 	current->thread.vrsave = 0;
 	current->thread.used_vr = 0;
+	current->thread.load_vec = 0;
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 	current->thread.tm_tfhar = 0;
 	current->thread.tm_texasr = 0;
 	current->thread.tm_tfiar = 0;
+	current->thread.load_tm = 0;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 }
 EXPORT_SYMBOL(start_thread);
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 3650732..0f0b1b2 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -283,7 +283,7 @@ static void prrn_work_fn(struct work_struct *work)
 	 * the RTAS event.
 	 */
 	pseries_devicetree_update(-prrn_update_scope);
-	arch_update_cpu_topology();
+	numa_update_cpu_topology(false);
 }
 
 static DECLARE_WORK(prrn_work, prrn_work_fn);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 71dcda9..857129a 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
 
 #ifdef CONFIG_PPC_MM_SLICES
 #ifdef CONFIG_PPC64
-	init_mm.context.addr_limit = TASK_SIZE_128TB;
+	init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 #else
 #error	"context.addr_limit not initialized."
 #endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f35ff9d..4640f6d 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -616,6 +616,24 @@ void __init exc_lvl_early_init(void)
 #endif
 
 /*
+ * Emergency stacks are used for a range of things, from asynchronous
+ * NMIs (system reset, machine check) to synchronous, process context.
+ * We set preempt_count to zero, even though that isn't necessarily correct. To
+ * get the right value we'd need to copy it from the previous thread_info, but
+ * doing that might fault causing more problems.
+ * TODO: what to do with accounting?
+ */
+static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
+{
+	ti->task = NULL;
+	ti->cpu = cpu;
+	ti->preempt_count = 0;
+	ti->local_flags = 0;
+	ti->flags = 0;
+	klp_init_thread_info(ti);
+}
+
+/*
  * Stack space used when we detect a bad kernel stack pointer, and
  * early in SMP boots before relocation is enabled. Exclusive emergency
  * stack for machine checks.
@@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
 	 * Since we use these as temporary stacks during secondary CPU
 	 * bringup, we need to get at them in real mode. This means they
 	 * must also be within the RMO region.
+	 *
+	 * The IRQ stacks allocated elsewhere in this file are zeroed and
+	 * initialized in kernel/irq.c. These are initialized here in order
+	 * to have emergency stacks available as early as possible.
 	 */
 	limit = min(safe_stack_limit(), ppc64_rma_size);
 
 	for_each_possible_cpu(i) {
 		struct thread_info *ti;
 		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-		klp_init_thread_info(ti);
+		memset(ti, 0, THREAD_SIZE);
+		emerg_stack_init_thread_info(ti, i);
 		paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
 
 #ifdef CONFIG_PPC_BOOK3S_64
 		/* emergency stack for NMI exception handling. */
 		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-		klp_init_thread_info(ti);
+		memset(ti, 0, THREAD_SIZE);
+		emerg_stack_init_thread_info(ti, i);
 		paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
 
 		/* emergency stack for machine check exception handling. */
 		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
-		klp_init_thread_info(ti);
+		memset(ti, 0, THREAD_SIZE);
+		emerg_stack_init_thread_info(ti, i);
 		paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
 #endif
 	}
@@ -661,7 +686,7 @@ void __init emergency_stack_init(void)
 
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 {
-	return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+	return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
 				    __pa(MAX_DMA_ADDRESS));
 }
 
@@ -672,7 +697,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
 
 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
-	if (cpu_to_node(from) == cpu_to_node(to))
+	if (early_cpu_to_node(from) == early_cpu_to_node(to))
 		return LOCAL_DISTANCE;
 	else
 		return REMOTE_DISTANCE;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index df2a416..1069f74 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -97,7 +97,7 @@ int smp_generic_cpu_bootable(unsigned int nr)
 	/* Special case - we inhibit secondary thread startup
 	 * during boot if the user requests it.
 	 */
-	if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
+	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
 		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
 			return 0;
 		if (smt_enabled_at_boot
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index 7c933a9..c98e90b 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -45,10 +45,14 @@
 	stdu	r1,-SWITCH_FRAME_SIZE(r1)
 
 	/* Save all gprs to pt_regs */
-	SAVE_8GPRS(0,r1)
-	SAVE_8GPRS(8,r1)
-	SAVE_8GPRS(16,r1)
-	SAVE_8GPRS(24,r1)
+	SAVE_GPR(0, r1)
+	SAVE_10GPRS(2, r1)
+	SAVE_10GPRS(12, r1)
+	SAVE_10GPRS(22, r1)
+
+	/* Save previous stack pointer (r1) */
+	addi	r8, r1, SWITCH_FRAME_SIZE
+	std	r8, GPR1(r1)
 
 	/* Load special regs for save below */
 	mfmsr   r8
@@ -95,18 +99,44 @@
 	bl	ftrace_stub
 	nop
 
-	/* Load ctr with the possibly modified NIP */
-	ld	r3, _NIP(r1)
-	mtctr	r3
+	/* Load the possibly modified NIP */
+	ld	r15, _NIP(r1)
+
 #ifdef CONFIG_LIVEPATCH
-	cmpd	r14,r3		/* has NIP been altered? */
+	cmpd	r14, r15	/* has NIP been altered? */
 #endif
 
+#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
+	/* NIP has not been altered, skip over further checks */
+	beq	1f
+
+	/* Check if there is an active kprobe on us */
+	subi	r3, r14, 4
+	bl	is_current_kprobe_addr
+	nop
+
+	/*
+	 * If r3 == 1, then this is a kprobe/jprobe.
+	 * else, this is livepatched function.
+	 *
+	 * The conditional branch for livepatch_handler below will use the
+	 * result of this comparison. For kprobe/jprobe, we just need to branch to
+	 * the new NIP, not call livepatch_handler. The branch below is bne, so we
+	 * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
+	 * CR0[EQ] = (r3 == 1).
+	 */
+	cmpdi	r3, 1
+1:
+#endif
+
+	/* Load CTR with the possibly modified NIP */
+	mtctr	r15
+
 	/* Restore gprs */
-	REST_8GPRS(0,r1)
-	REST_8GPRS(8,r1)
-	REST_8GPRS(16,r1)
-	REST_8GPRS(24,r1)
+	REST_GPR(0,r1)
+	REST_10GPRS(2,r1)
+	REST_10GPRS(12,r1)
+	REST_10GPRS(22,r1)
 
 	/* Restore possibly modified LR */
 	ld	r0, _LINK(r1)
@@ -119,7 +149,10 @@
 	addi r1, r1, SWITCH_FRAME_SIZE
 
 #ifdef CONFIG_LIVEPATCH
-        /* Based on the cmpd above, if the NIP was altered handle livepatch */
+        /*
+	 * Based on the cmpd or cmpdi above, if the NIP was altered and we're
+	 * not on a kprobe/jprobe, then handle livepatch.
+	 */
 	bne-	livepatch_handler
 #endif
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 42b7a4f..773b35d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
 		break;
 	case KVM_REG_PPC_TB_OFFSET:
+		/*
+		 * POWER9 DD1 has an erratum where writing TBU40 causes
+		 * the timebase to lose ticks.  So we don't let the
+		 * timebase offset be changed on P9 DD1.  (It is
+		 * initialized to zero.)
+		 */
+		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+			break;
 		/* round up to multiple of 2^24 */
 		vcpu->arch.vcore->tb_offset =
 			ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
 	int r;
 	int srcu_idx;
+	unsigned long ebb_regs[3] = {};	/* shut up GCC */
+	unsigned long user_tar = 0;
+	unsigned int user_vrsave;
 
 	if (!vcpu->arch.sane) {
 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		return -EINVAL;
 	}
 
+	/*
+	 * Don't allow entry with a suspended transaction, because
+	 * the guest entry/exit code will lose it.
+	 * If the guest has TM enabled, save away their TM-related SPRs
+	 * (they will get restored by the TM unavailable interrupt).
+	 */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+	if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+	    (current->thread.regs->msr & MSR_TM)) {
+		if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+			run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+			run->fail_entry.hardware_entry_failure_reason = 0;
+			return -EINVAL;
+		}
+		current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+		current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+		current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+		current->thread.regs->msr &= ~MSR_TM;
+	}
+#endif
+
 	kvmppc_core_prepare_to_enter(vcpu);
 
 	/* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 
 	flush_all_to_thread(current);
 
+	/* Save userspace EBB and other register values */
+	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		ebb_regs[0] = mfspr(SPRN_EBBHR);
+		ebb_regs[1] = mfspr(SPRN_EBBRR);
+		ebb_regs[2] = mfspr(SPRN_BESCR);
+		user_tar = mfspr(SPRN_TAR);
+	}
+	user_vrsave = mfspr(SPRN_VRSAVE);
+
 	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
 	vcpu->arch.pgdir = current->mm->pgd;
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 		}
 	} while (is_kvmppc_resume_guest(r));
 
+	/* Restore userspace EBB and other register values */
+	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+		mtspr(SPRN_EBBHR, ebb_regs[0]);
+		mtspr(SPRN_EBBRR, ebb_regs[1]);
+		mtspr(SPRN_BESCR, ebb_regs[2]);
+		mtspr(SPRN_TAR, user_tar);
+		mtspr(SPRN_FSCR, current->thread.fscr);
+	}
+	mtspr(SPRN_VRSAVE, user_vrsave);
+
  out:
 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
 	atomic_dec(&vcpu->kvm->arch.vcpus_running);
@@ -3317,7 +3368,7 @@ void kvmppc_alloc_host_rm_ops(void)
 		return;
 	}
 
-	get_online_cpus();
+	cpus_read_lock();
 
 	for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
 		if (!cpu_online(cpu))
@@ -3339,17 +3390,17 @@ void kvmppc_alloc_host_rm_ops(void)
 	l_ops = (unsigned long) ops;
 
 	if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
-		put_online_cpus();
+		cpus_read_unlock();
 		kfree(ops->rm_core);
 		kfree(ops);
 		return;
 	}
 
-	cpuhp_setup_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE,
-				  "ppc/kvm_book3s:prepare",
-				  kvmppc_set_host_core,
-				  kvmppc_clear_host_core);
-	put_online_cpus();
+	cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE,
+					     "ppc/kvm_book3s:prepare",
+					     kvmppc_set_host_core,
+					     kvmppc_clear_host_core);
+	cpus_read_unlock();
 }
 
 void kvmppc_free_host_rm_ops(void)
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 0fdc4a2..404deb5 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -121,10 +121,20 @@
 	 * Put whatever is in the decrementer into the
 	 * hypervisor decrementer.
 	 */
+BEGIN_FTR_SECTION
+	ld	r5, HSTATE_KVM_VCORE(r13)
+	ld	r6, VCORE_KVM(r5)
+	ld	r9, KVM_HOST_LPCR(r6)
+	andis.	r9, r9, LPCR_LD@h
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 	mfspr	r8,SPRN_DEC
 	mftb	r7
-	mtspr	SPRN_HDEC,r8
+BEGIN_FTR_SECTION
+	/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
+	bne	32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 	extsw	r8,r8
+32:	mtspr	SPRN_HDEC,r8
 	add	r8,r8,r7
 	std	r8,HSTATE_DECEXP(r13)
 
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bdb3f76..4888dd4 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,12 +32,29 @@
 #include <asm/opal.h>
 #include <asm/xive-regs.h>
 
+/* Sign-extend HDEC if not on POWER9 */
+#define EXTEND_HDEC(reg)			\
+BEGIN_FTR_SECTION;				\
+	extsw	reg, reg;			\
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+
 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
 
 /* Values in HSTATE_NAPPING(r13) */
 #define NAPPING_CEDE	1
 #define NAPPING_NOVCPU	2
 
+/* Stack frame offsets for kvmppc_hv_entry */
+#define SFS			144
+#define STACK_SLOT_TRAP		(SFS-4)
+#define STACK_SLOT_TID		(SFS-16)
+#define STACK_SLOT_PSSCR	(SFS-24)
+#define STACK_SLOT_PID		(SFS-32)
+#define STACK_SLOT_IAMR		(SFS-40)
+#define STACK_SLOT_CIABR	(SFS-48)
+#define STACK_SLOT_DAWR		(SFS-56)
+#define STACK_SLOT_DAWRX	(SFS-64)
+
 /*
  * Call kvmppc_hv_entry in real mode.
  * Must be called with interrupts hard-disabled.
@@ -214,6 +231,8 @@
 kvmppc_primary_no_guest:
 	/* We handle this much like a ceded vcpu */
 	/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
+	/* HDEC may be larger than DEC for arch >= v3.00, but since the */
+	/* HDEC value came from DEC in the first place, it will fit */
 	mfspr	r3, SPRN_HDEC
 	mtspr	SPRN_DEC, r3
 	/*
@@ -295,8 +314,9 @@
 
 	/* See if our timeslice has expired (HDEC is negative) */
 	mfspr	r0, SPRN_HDEC
+	EXTEND_HDEC(r0)
 	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
-	cmpwi	r0, 0
+	cmpdi	r0, 0
 	blt	kvm_novcpu_exit
 
 	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -319,10 +339,10 @@
 	bl	kvmhv_accumulate_time
 #endif
 13:	mr	r3, r12
-	stw	r12, 112-4(r1)
+	stw	r12, STACK_SLOT_TRAP(r1)
 	bl	kvmhv_commence_exit
 	nop
-	lwz	r12, 112-4(r1)
+	lwz	r12, STACK_SLOT_TRAP(r1)
 	b	kvmhv_switch_to_host
 
 /*
@@ -390,8 +410,8 @@
 	lbz	r4, HSTATE_PTID(r13)
 	cmpwi	r4, 0
 	bne	63f
-	lis	r6, 0x7fff
-	ori	r6, r6, 0xffff
+	LOAD_REG_ADDR(r6, decrementer_max)
+	ld	r6, 0(r6)
 	mtspr	SPRN_HDEC, r6
 	/* and set per-LPAR registers, if doing dynamic micro-threading */
 	ld	r6, HSTATE_SPLIT_MODE(r13)
@@ -545,11 +565,6 @@
  *                                                                            *
  *****************************************************************************/
 
-/* Stack frame offsets */
-#define STACK_SLOT_TID		(112-16)
-#define STACK_SLOT_PSSCR	(112-24)
-#define STACK_SLOT_PID		(112-32)
-
 .global kvmppc_hv_entry
 kvmppc_hv_entry:
 
@@ -565,7 +580,7 @@
 	 */
 	mflr	r0
 	std	r0, PPC_LR_STKOFF(r1)
-	stdu	r1, -112(r1)
+	stdu	r1, -SFS(r1)
 
 	/* Save R1 in the PACA */
 	std	r1, HSTATE_HOST_R1(r13)
@@ -749,10 +764,20 @@
 	mfspr	r5, SPRN_TIDR
 	mfspr	r6, SPRN_PSSCR
 	mfspr	r7, SPRN_PID
+	mfspr	r8, SPRN_IAMR
 	std	r5, STACK_SLOT_TID(r1)
 	std	r6, STACK_SLOT_PSSCR(r1)
 	std	r7, STACK_SLOT_PID(r1)
+	std	r8, STACK_SLOT_IAMR(r1)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+	mfspr	r5, SPRN_CIABR
+	mfspr	r6, SPRN_DAWR
+	mfspr	r7, SPRN_DAWRX
+	std	r5, STACK_SLOT_CIABR(r1)
+	std	r6, STACK_SLOT_DAWR(r1)
+	std	r7, STACK_SLOT_DAWRX(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
 BEGIN_FTR_SECTION
 	/* Set partition DABR */
@@ -968,7 +993,8 @@
 
 	/* Check if HDEC expires soon */
 	mfspr	r3, SPRN_HDEC
-	cmpwi	r3, 512		/* 1 microsecond */
+	EXTEND_HDEC(r3)
+	cmpdi	r3, 512		/* 1 microsecond */
 	blt	hdec_soon
 
 #ifdef CONFIG_KVM_XICS
@@ -1505,11 +1531,10 @@
 	 * set by the guest could disrupt the host.
 	 */
 	li	r0, 0
-	mtspr	SPRN_IAMR, r0
-	mtspr	SPRN_CIABR, r0
-	mtspr	SPRN_DAWRX, r0
+	mtspr	SPRN_PSPB, r0
 	mtspr	SPRN_WORT, r0
 BEGIN_FTR_SECTION
+	mtspr	SPRN_IAMR, r0
 	mtspr	SPRN_TCSCR, r0
 	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
 	li	r0, 1
@@ -1525,6 +1550,7 @@
 	std	r6,VCPU_UAMOR(r9)
 	li	r6,0
 	mtspr	SPRN_AMR,r6
+	mtspr	SPRN_UAMOR, r6
 
 	/* Switch DSCR back to host value */
 	mfspr	r8, SPRN_DSCR
@@ -1670,12 +1696,22 @@
 
 	/* Restore host values of some registers */
 BEGIN_FTR_SECTION
+	ld	r5, STACK_SLOT_CIABR(r1)
+	ld	r6, STACK_SLOT_DAWR(r1)
+	ld	r7, STACK_SLOT_DAWRX(r1)
+	mtspr	SPRN_CIABR, r5
+	mtspr	SPRN_DAWR, r6
+	mtspr	SPRN_DAWRX, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+BEGIN_FTR_SECTION
 	ld	r5, STACK_SLOT_TID(r1)
 	ld	r6, STACK_SLOT_PSSCR(r1)
 	ld	r7, STACK_SLOT_PID(r1)
+	ld	r8, STACK_SLOT_IAMR(r1)
 	mtspr	SPRN_TIDR, r5
 	mtspr	SPRN_PSSCR, r6
 	mtspr	SPRN_PID, r7
+	mtspr	SPRN_IAMR, r8
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 BEGIN_FTR_SECTION
 	PPC_INVALIDATE_ERAT
@@ -1819,8 +1855,8 @@
 	li	r0, KVM_GUEST_MODE_NONE
 	stb	r0, HSTATE_IN_GUEST(r13)
 
-	ld	r0, 112+PPC_LR_STKOFF(r1)
-	addi	r1, r1, 112
+	ld	r0, SFS+PPC_LR_STKOFF(r1)
+	addi	r1, r1, SFS
 	mtlr	r0
 	blr
 
@@ -2366,12 +2402,13 @@
 	mfspr	r3, SPRN_DEC
 	mfspr	r4, SPRN_HDEC
 	mftb	r5
-	cmpw	r3, r4
+	extsw	r3, r3
+	EXTEND_HDEC(r4)
+	cmpd	r3, r4
 	ble	67f
 	mtspr	SPRN_DEC, r4
 67:
 	/* save expiry time of guest decrementer */
-	extsw	r3, r3
 	add	r3, r3, r5
 	ld	r4, HSTATE_KVM_VCPU(r13)
 	ld	r5, HSTATE_KVM_VCORE(r13)
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 023a311..4636ca6 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
 {
 	/* If the XIVE supports the new "store EOI facility, use it */
 	if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
-		__x_writeq(0, __x_eoi_page(xd));
+		__x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
 	else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
 		opal_int_eoi(hw_irq);
 	} else {
@@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
 		 * properly.
 		 */
 		if (xd->flags & XIVE_IRQ_FLAG_LSI)
-			__x_readq(__x_eoi_page(xd));
+			__x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
 		else {
 			eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
 
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 6575b9a..a12e863 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
 		if (mm->task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 	/*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 9dbd2a73..0ee6be4 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index c6dca2a..a3edf81 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
 	 * mm->context.addr_limit. Default to max task size so that we copy the
 	 * default values to paca which will help us to handle slb miss early.
 	 */
-	mm->context.addr_limit = TASK_SIZE_128TB;
+	mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 
 	/*
 	 * The old code would re-promote on fork, we don't do that when using
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 371792e..b95c584 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1311,8 +1311,10 @@ static int update_lookup_table(void *data)
 /*
  * Update the node maps and sysfs entries for each cpu whose home node
  * has changed. Returns 1 when the topology has changed, and 0 otherwise.
+ *
+ * cpus_locked says whether we already hold cpu_hotplug_lock.
  */
-int arch_update_cpu_topology(void)
+int numa_update_cpu_topology(bool cpus_locked)
 {
 	unsigned int cpu, sibling, changed = 0;
 	struct topology_update_data *updates, *ud;
@@ -1400,15 +1402,23 @@ int arch_update_cpu_topology(void)
 	if (!cpumask_weight(&updated_cpus))
 		goto out;
 
-	stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+	if (cpus_locked)
+		stop_machine_cpuslocked(update_cpu_topology, &updates[0],
+					&updated_cpus);
+	else
+		stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
 
 	/*
 	 * Update the numa-cpu lookup table with the new mappings, even for
 	 * offline CPUs. It is best to perform this update from the stop-
 	 * machine context.
 	 */
-	stop_machine(update_lookup_table, &updates[0],
+	if (cpus_locked)
+		stop_machine_cpuslocked(update_lookup_table, &updates[0],
 					cpumask_of(raw_smp_processor_id()));
+	else
+		stop_machine(update_lookup_table, &updates[0],
+			     cpumask_of(raw_smp_processor_id()));
 
 	for (ud = &updates[0]; ud; ud = ud->next) {
 		unregister_cpu_under_node(ud->cpu, ud->old_nid);
@@ -1426,6 +1436,12 @@ int arch_update_cpu_topology(void)
 	return changed;
 }
 
+int arch_update_cpu_topology(void)
+{
+	lockdep_assert_cpus_held();
+	return numa_update_cpu_topology(true);
+}
+
 static void topology_work_fn(struct work_struct *work)
 {
 	rebuild_sched_domains();
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 966b9fc..45f6740 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
 	if ((mm->task_size - len) < addr)
 		return 0;
 	vma = find_vma(mm, addr);
-	return (!vma || (addr + len) <= vma->vm_start);
+	return (!vma || (addr + len) <= vm_start_gap(vma));
 }
 
 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index cbd82fd..09ceea6 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
 			struct pt_regs *regs_user_copy)
 {
 	regs_user->regs = task_pt_regs(current);
-	regs_user->abi  = perf_reg_abi(current);
+	regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
+			 PERF_SAMPLE_REGS_ABI_NONE;
 }
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 018f8e9..bb28e1a 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
 	.name			= "POWER9",
 	.n_counter		= MAX_PMU_COUNTERS,
 	.add_fields		= ISA207_ADD_FIELDS,
-	.test_adder		= ISA207_TEST_ADDER,
+	.test_adder		= P9_DD1_TEST_ADDER,
 	.compute_mmcr		= isa207_compute_mmcr,
 	.config_bhrb		= power9_config_bhrb,
 	.bhrb_filter_map	= power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
 	.name			= "POWER9",
 	.n_counter		= MAX_PMU_COUNTERS,
 	.add_fields		= ISA207_ADD_FIELDS,
-	.test_adder		= P9_DD1_TEST_ADDER,
+	.test_adder		= ISA207_TEST_ADDER,
 	.compute_mmcr		= isa207_compute_mmcr,
 	.config_bhrb		= power9_config_bhrb,
 	.bhrb_filter_map	= power9_bhrb_filter_map,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 33244e3..4fd64d3 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -59,6 +59,17 @@
 
 	  In case of doubt, say Y
 
+config PPC_DT_CPU_FTRS
+	bool "Device-tree based CPU feature discovery & setup"
+	depends on PPC_BOOK3S_64
+	default y
+	help
+	  This enables code to use a new device tree binding for describing CPU
+	  compatibility and features. Saying Y here will attempt to use the new
+	  binding if the firmware provides it. Currently only the skiboot
+	  firmware provides this binding.
+	  If you're not sure say Y.
+
 config UDBG_RTAS_CONSOLE
 	bool "RTAS based debug console"
 	depends on PPC_RTAS
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index e5a891a..84b7ac9 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
 	skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
 	if (!dump_skip(cprm, skip))
 		goto Eio;
+
+	rc = 0;
 out:
 	free_page((unsigned long)buf);
 	return rc;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 78fa939..b5d960d 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
 	if (WARN_ON(!gpdev))
 		return NULL;
 
-	if (WARN_ON(!gpdev->dev.of_node))
+	/* Not all PCI devices have device-tree nodes */
+	if (!gpdev->dev.of_node)
 		return NULL;
 
 	/* Get assoicated PCI device */
@@ -448,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
 	return mmio_atsd_reg;
 }
 
-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
+static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
 {
 	unsigned long launch;
 
@@ -464,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
 	/* PID */
 	launch |= pid << PPC_BITLSHIFT(38);
 
+	/* No flush */
+	launch |= !flush << PPC_BITLSHIFT(39);
+
 	/* Invalidating the entire process doesn't use a va */
 	return mmio_launch_invalidate(npu, launch, 0);
 }
 
 static int mmio_invalidate_va(struct npu *npu, unsigned long va,
-			unsigned long pid)
+			unsigned long pid, bool flush)
 {
 	unsigned long launch;
 
@@ -485,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
 	/* PID */
 	launch |= pid << PPC_BITLSHIFT(38);
 
+	/* No flush */
+	launch |= !flush << PPC_BITLSHIFT(39);
+
 	return mmio_launch_invalidate(npu, launch, va);
 }
 
 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
 
+struct mmio_atsd_reg {
+	struct npu *npu;
+	int reg;
+};
+
+static void mmio_invalidate_wait(
+	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
+{
+	struct npu *npu;
+	int i, reg;
+
+	/* Wait for all invalidations to complete */
+	for (i = 0; i <= max_npu2_index; i++) {
+		if (mmio_atsd_reg[i].reg < 0)
+			continue;
+
+		/* Wait for completion */
+		npu = mmio_atsd_reg[i].npu;
+		reg = mmio_atsd_reg[i].reg;
+		while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
+			cpu_relax();
+
+		put_mmio_atsd_reg(npu, reg);
+
+		/*
+		 * The GPU requires two flush ATSDs to ensure all entries have
+		 * been flushed. We use PID 0 as it will never be used for a
+		 * process on the GPU.
+		 */
+		if (flush)
+			mmio_invalidate_pid(npu, 0, true);
+	}
+}
+
 /*
  * Invalidate either a single address or an entire PID depending on
  * the value of va.
  */
 static void mmio_invalidate(struct npu_context *npu_context, int va,
-			unsigned long address)
+			unsigned long address, bool flush)
 {
-	int i, j, reg;
+	int i, j;
 	struct npu *npu;
 	struct pnv_phb *nphb;
 	struct pci_dev *npdev;
-	struct {
-		struct npu *npu;
-		int reg;
-	} mmio_atsd_reg[NV_MAX_NPUS];
+	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
 	unsigned long pid = npu_context->mm->context.id;
 
 	/*
@@ -524,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
 
 			if (va)
 				mmio_atsd_reg[i].reg =
-					mmio_invalidate_va(npu, address, pid);
+					mmio_invalidate_va(npu, address, pid,
+							flush);
 			else
 				mmio_atsd_reg[i].reg =
-					mmio_invalidate_pid(npu, pid);
+					mmio_invalidate_pid(npu, pid, flush);
 
 			/*
 			 * The NPU hardware forwards the shootdown to all GPUs
@@ -543,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
 	 */
 	flush_tlb_mm(npu_context->mm);
 
-	/* Wait for all invalidations to complete */
-	for (i = 0; i <= max_npu2_index; i++) {
-		if (mmio_atsd_reg[i].reg < 0)
-			continue;
-
-		/* Wait for completion */
-		npu = mmio_atsd_reg[i].npu;
-		reg = mmio_atsd_reg[i].reg;
-		while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
-			cpu_relax();
-		put_mmio_atsd_reg(npu, reg);
-	}
+	mmio_invalidate_wait(mmio_atsd_reg, flush);
+	if (flush)
+		/* Wait for the flush to complete */
+		mmio_invalidate_wait(mmio_atsd_reg, false);
 }
 
 static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -570,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
 	 * There should be no more translation requests for this PID, but we
 	 * need to ensure any entries for it are removed from the TLB.
 	 */
-	mmio_invalidate(npu_context, 0, 0);
+	mmio_invalidate(npu_context, 0, 0, true);
 }
 
 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -580,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
 {
 	struct npu_context *npu_context = mn_to_npu_context(mn);
 
-	mmio_invalidate(npu_context, 1, address);
+	mmio_invalidate(npu_context, 1, address, true);
 }
 
 static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -589,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
 {
 	struct npu_context *npu_context = mn_to_npu_context(mn);
 
-	mmio_invalidate(npu_context, 1, address);
+	mmio_invalidate(npu_context, 1, address, true);
 }
 
 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -599,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
 	struct npu_context *npu_context = mn_to_npu_context(mn);
 	unsigned long address;
 
-	for (address = start; address <= end; address += PAGE_SIZE)
-		mmio_invalidate(npu_context, 1, address);
+	for (address = start; address < end; address += PAGE_SIZE)
+		mmio_invalidate(npu_context, 1, address, false);
+
+	/* Do the flush only on the final addess == end */
+	mmio_invalidate(npu_context, 1, address, true);
 }
 
 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -650,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
 		/* No nvlink associated with this GPU device */
 		return ERR_PTR(-ENODEV);
 
-	if (!mm) {
-		/* kernel thread contexts are not supported */
+	if (!mm || mm->context.id == 0) {
+		/*
+		 * Kernel thread contexts are not supported and context id 0 is
+		 * reserved on the GPU.
+		 */
 		return ERR_PTR(-EINVAL);
 	}
 
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 0babef1..309876d 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -348,7 +348,7 @@ static int set_subcores_per_core(int new_mode)
 		state->master = 0;
 	}
 
-	get_online_cpus();
+	cpus_read_lock();
 
 	/* This cpu will update the globals before exiting stop machine */
 	this_cpu_ptr(&split_state)->master = 1;
@@ -356,9 +356,10 @@ static int set_subcores_per_core(int new_mode)
 	/* Ensure state is consistent before we call the other cpus */
 	mb();
 
-	stop_machine(cpu_update_split_mode, &new_mode, cpu_online_mask);
+	stop_machine_cpuslocked(cpu_update_split_mode, &new_mode,
+				cpu_online_mask);
 
-	put_online_cpus();
+	cpus_read_unlock();
 
 	return 0;
 }
@@ -407,7 +408,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
 
 static int subcore_init(void)
 {
-	if (!cpu_has_feature(CPU_FTR_SUBCORE))
+	unsigned pvr_ver;
+
+	pvr_ver = PVR_VER(mfspr(SPRN_PVR));
+
+	if (pvr_ver != PVR_POWER8 &&
+	    pvr_ver != PVR_POWER8E &&
+	    pvr_ver != PVR_POWER8NVL)
 		return 0;
 
 	/*
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 2d2e5f8..5cc35d6 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -471,11 +471,13 @@ static ssize_t modalias_show(struct device *_dev, struct device_attribute *a,
 
 	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
 }
+static DEVICE_ATTR_RO(modalias);
 
-static struct device_attribute ps3_system_bus_dev_attrs[] = {
-	__ATTR_RO(modalias),
-	__ATTR_NULL,
+static struct attribute *ps3_system_bus_dev_attrs[] = {
+	&dev_attr_modalias.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(ps3_system_bus_dev);
 
 struct bus_type ps3_system_bus_type = {
 	.name = "ps3_system_bus",
@@ -484,7 +486,7 @@ struct bus_type ps3_system_bus_type = {
 	.probe = ps3_system_bus_probe,
 	.remove = ps3_system_bus_remove,
 	.shutdown = ps3_system_bus_shutdown,
-	.dev_attrs = ps3_system_bus_dev_attrs,
+	.dev_groups = ps3_system_bus_dev_groups,
 };
 
 static int __init ps3_system_bus_init(void)
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index bda18d8..3918769 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -588,7 +588,7 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
 	return sprintf(buf, "%s\n", "memory,cpu");
 }
 
-static CLASS_ATTR(dlpar, S_IWUSR | S_IRUSR, dlpar_show, dlpar_store);
+static CLASS_ATTR_RW(dlpar);
 
 static int __init pseries_dlpar_init(void)
 {
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e104c71..1fb162b 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
 	for (i = 0; i < num_lmbs; i++) {
 		lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
 		lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+		lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
 		lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
 	}
 
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
 	for (i = 0; i < num_lmbs; i++) {
 		lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
 		lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+		lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
 		lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
 	}
 
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index b363e43..52146b1 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -397,6 +397,7 @@ static ssize_t devspec_show(struct device *dev,
 	ofdev = to_platform_device(dev);
 	return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
 }
+static DEVICE_ATTR_RO(devspec);
 
 static ssize_t name_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
@@ -406,19 +407,22 @@ static ssize_t name_show(struct device *dev,
 	ofdev = to_platform_device(dev);
 	return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
 }
+static DEVICE_ATTR_RO(name);
 
 static ssize_t modalias_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
 	return of_device_modalias(dev, buf, PAGE_SIZE);
 }
+static DEVICE_ATTR_RO(modalias);
 
-static struct device_attribute ibmebus_bus_device_attrs[] = {
-	__ATTR_RO(devspec),
-	__ATTR_RO(name),
-	__ATTR_RO(modalias),
-	__ATTR_NULL
+static struct attribute *ibmebus_bus_device_attrs[] = {
+	&dev_attr_devspec.attr,
+	&dev_attr_name.attr,
+	&dev_attr_modalias.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(ibmebus_bus_device);
 
 struct bus_type ibmebus_bus_type = {
 	.name      = "ibmebus",
@@ -428,7 +432,7 @@ struct bus_type ibmebus_bus_type = {
 	.probe     = ibmebus_bus_device_probe,
 	.remove    = ibmebus_bus_device_remove,
 	.shutdown  = ibmebus_bus_device_shutdown,
-	.dev_attrs = ibmebus_bus_device_attrs,
+	.dev_groups = ibmebus_bus_device_groups,
 };
 EXPORT_SYMBOL(ibmebus_bus_type);
 
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 5a0c7ba..2da4851 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -349,8 +349,9 @@ void post_mobility_fixup(void)
 	return;
 }
 
-static ssize_t migrate_store(struct class *class, struct class_attribute *attr,
-			     const char *buf, size_t count)
+static ssize_t migration_store(struct class *class,
+			       struct class_attribute *attr, const char *buf,
+			       size_t count)
 {
 	u64 streamid;
 	int rc;
@@ -380,7 +381,7 @@ static ssize_t migrate_store(struct class *class, struct class_attribute *attr,
  */
 #define MIGRATION_API_VERSION	1
 
-static CLASS_ATTR(migration, S_IWUSR, NULL, migrate_store);
+static CLASS_ATTR_WO(migration);
 static CLASS_ATTR_STRING(api_version, S_IRUGO, __stringify(MIGRATION_API_VERSION));
 
 static int __init mobility_sysfs_init(void)
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 28b09fd..117beb9 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -948,21 +948,21 @@ static void vio_cmo_bus_init(void)
 /* sysfs device functions and data structures for CMO */
 
 #define viodev_cmo_rd_attr(name)                                        \
-static ssize_t viodev_cmo_##name##_show(struct device *dev,             \
+static ssize_t cmo_##name##_show(struct device *dev,                    \
                                         struct device_attribute *attr,  \
                                          char *buf)                     \
 {                                                                       \
 	return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name);        \
 }
 
-static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
+static ssize_t cmo_allocs_failed_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
 	return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
 }
 
-static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
+static ssize_t cmo_allocs_failed_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
@@ -970,7 +970,7 @@ static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
 	return count;
 }
 
-static ssize_t viodev_cmo_desired_set(struct device *dev,
+static ssize_t cmo_desired_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct vio_dev *viodev = to_vio_dev(dev);
@@ -993,27 +993,37 @@ static ssize_t name_show(struct device *, struct device_attribute *, char *);
 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 			     char *buf);
-static struct device_attribute vio_cmo_dev_attrs[] = {
-	__ATTR_RO(name),
-	__ATTR_RO(devspec),
-	__ATTR_RO(modalias),
-	__ATTR(cmo_desired,       S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
-	       viodev_cmo_desired_show, viodev_cmo_desired_set),
-	__ATTR(cmo_entitled,      S_IRUGO, viodev_cmo_entitled_show,      NULL),
-	__ATTR(cmo_allocated,     S_IRUGO, viodev_cmo_allocated_show,     NULL),
-	__ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
-	       viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
-	__ATTR_NULL
+
+static struct device_attribute dev_attr_name;
+static struct device_attribute dev_attr_devspec;
+static struct device_attribute dev_attr_modalias;
+
+static DEVICE_ATTR_RO(cmo_entitled);
+static DEVICE_ATTR_RO(cmo_allocated);
+static DEVICE_ATTR_RW(cmo_desired);
+static DEVICE_ATTR_RW(cmo_allocs_failed);
+
+static struct attribute *vio_cmo_dev_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_devspec.attr,
+	&dev_attr_modalias.attr,
+	&dev_attr_cmo_entitled.attr,
+	&dev_attr_cmo_allocated.attr,
+	&dev_attr_cmo_desired.attr,
+	&dev_attr_cmo_allocs_failed.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(vio_cmo_dev);
 
 /* sysfs bus functions and data structures for CMO */
 
 #define viobus_cmo_rd_attr(name)                                        \
-static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf)        \
+static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf)    \
 {                                                                       \
 	return sprintf(buf, "%lu\n", vio_cmo.name);                     \
 }                                                                       \
-static BUS_ATTR_RO(cmo_##name)
+static struct bus_attribute bus_attr_cmo_bus_##name =			\
+	__ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
 
 #define viobus_cmo_pool_rd_attr(name, var)                              \
 static ssize_t                                                          \
@@ -1051,11 +1061,11 @@ static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
 static BUS_ATTR_RW(cmo_high);
 
 static struct attribute *vio_bus_attrs[] = {
-	&bus_attr_cmo_entitled.attr,
-	&bus_attr_cmo_spare.attr,
-	&bus_attr_cmo_min.attr,
-	&bus_attr_cmo_desired.attr,
-	&bus_attr_cmo_curr.attr,
+	&bus_attr_cmo_bus_entitled.attr,
+	&bus_attr_cmo_bus_spare.attr,
+	&bus_attr_cmo_bus_min.attr,
+	&bus_attr_cmo_bus_desired.attr,
+	&bus_attr_cmo_bus_curr.attr,
 	&bus_attr_cmo_high.attr,
 	&bus_attr_cmo_reserve_size.attr,
 	&bus_attr_cmo_excess_size.attr,
@@ -1066,7 +1076,7 @@ ATTRIBUTE_GROUPS(vio_bus);
 
 static void vio_cmo_sysfs_init(void)
 {
-	vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
+	vio_bus_type.dev_groups = vio_cmo_dev_groups;
 	vio_bus_type.bus_groups = vio_bus_groups;
 }
 #else /* CONFIG_PPC_SMLPAR */
@@ -1537,6 +1547,7 @@ static ssize_t name_show(struct device *dev,
 {
 	return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
 }
+static DEVICE_ATTR_RO(name);
 
 static ssize_t devspec_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
@@ -1545,6 +1556,7 @@ static ssize_t devspec_show(struct device *dev,
 
 	return sprintf(buf, "%s\n", of_node_full_name(of_node));
 }
+static DEVICE_ATTR_RO(devspec);
 
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
@@ -1566,13 +1578,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 
 	return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
 }
+static DEVICE_ATTR_RO(modalias);
 
-static struct device_attribute vio_dev_attrs[] = {
-	__ATTR_RO(name),
-	__ATTR_RO(devspec),
-	__ATTR_RO(modalias),
-	__ATTR_NULL
+static struct attribute *vio_dev_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_devspec.attr,
+	&dev_attr_modalias.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(vio_dev);
 
 void vio_unregister_device(struct vio_dev *viodev)
 {
@@ -1608,7 +1622,7 @@ static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
 
 struct bus_type vio_bus_type = {
 	.name = "vio",
-	.dev_attrs = vio_dev_attrs,
+	.dev_groups = vio_dev_groups,
 	.uevent = vio_hotplug,
 	.match = vio_bus_match,
 	.probe = vio_bus_probe,
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index ef470b4..6afddae2 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-	struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+	struct u8_gpio_chip *u8_gc =
+		container_of(mm_gc, struct u8_gpio_chip, mm_gc);
 
 	u8_gc->data = in_8(mm_gc->regs);
 }
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 9138250..8f5e303 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
 {
 	/* If the XIVE supports the new "store EOI facility, use it */
 	if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
-		out_be64(xd->eoi_mmio, 0);
+		out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
 	else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
 		/*
 		 * The FW told us to call it. This happens for some
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e161faf..37abe86 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -64,6 +64,7 @@
 
 config S390
 	def_bool y
+	select ARCH_BINFMT_ELF_STATE
 	select ARCH_HAS_DEVMEM_IS_ALLOWED
 	select ARCH_HAS_ELF_RANDOMIZE
 	select ARCH_HAS_GCOV_PROFILE_ALL
@@ -184,7 +185,7 @@
 
 config PGTABLE_LEVELS
 	int
-	default 4
+	default 5
 
 source "init/Kconfig"
 
@@ -363,9 +364,6 @@
 config SYSVIPC_COMPAT
 	def_bool y if COMPAT && SYSVIPC
 
-config KEYS_COMPAT
-	def_bool y if COMPAT && KEYS
-
 config SMP
 	def_bool y
 	prompt "Symmetric multi-processing support"
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index a5039fa..2820722 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -30,6 +30,7 @@
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -90,6 +94,8 @@
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
@@ -359,6 +365,7 @@
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
@@ -367,16 +374,19 @@
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_OSD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
 CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_RBD=m
 CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
 CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -442,6 +452,8 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -452,7 +464,6 @@
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
@@ -471,6 +482,7 @@
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -487,12 +499,18 @@
 CONFIG_XFS_RT=y
 CONFIG_XFS_DEBUG=y
 CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
 CONFIG_OCFS2_FS=m
 CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_BTRFS_DEBUG=y
 CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QUOTA_DEBUG=y
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
 CONFIG_AUTOFS4_FS=m
@@ -558,6 +576,7 @@
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_RODATA_TEST=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_SELFTEST=y
 CONFIG_DEBUG_OBJECTS_FREE=y
@@ -580,7 +599,6 @@
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
 CONFIG_PROVE_LOCKING=y
@@ -595,6 +613,7 @@
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
 CONFIG_NOTIFIER_ERROR_INJECTION=m
 CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
 CONFIG_FAULT_INJECTION=y
 CONFIG_FAILSLAB=y
 CONFIG_FAIL_PAGE_ALLOC=y
@@ -616,13 +635,12 @@
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
+CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_TEST_STRING_HELPERS=y
-CONFIG_TEST_KSTRTOX=y
 CONFIG_DMA_API_DEBUG=y
 CONFIG_TEST_BPF=m
 CONFIG_BUG_ON_DATA_CORRUPTION=y
@@ -630,6 +648,7 @@
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -640,7 +659,9 @@
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -648,6 +669,7 @@
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -657,8 +679,10 @@
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -674,6 +698,7 @@
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
@@ -685,6 +710,7 @@
 CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -692,6 +718,7 @@
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
+CONFIG_RANDOM32_SELFTEST=y
 CONFIG_CORDIC=m
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 83970b5..3c6b781 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -31,6 +31,7 @@
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -46,7 +47,10 @@
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -88,6 +92,8 @@
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
@@ -356,6 +362,7 @@
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
@@ -364,16 +371,18 @@
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_OSD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
 CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -439,6 +448,8 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -449,7 +460,6 @@
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
@@ -468,6 +478,7 @@
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -483,11 +494,15 @@
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_XFS_RT=y
 CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
 CONFIG_OCFS2_FS=m
 CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
@@ -553,7 +568,6 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
@@ -576,6 +590,7 @@
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -599,6 +614,7 @@
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -611,6 +627,7 @@
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -626,16 +643,19 @@
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
 CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index fbc6542..653d72b 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -31,6 +31,7 @@
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -86,6 +90,8 @@
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
 CONFIG_IP_ADVANCED_ROUTER=y
@@ -354,6 +360,7 @@
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
@@ -362,16 +369,18 @@
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_OSD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
 CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -437,6 +446,8 @@
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NATSEMI is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -447,7 +458,6 @@
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
@@ -466,6 +476,7 @@
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -481,11 +492,15 @@
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_XFS_RT=y
 CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
 CONFIG_OCFS2_FS=m
 CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QFMT_V1=m
 CONFIG_QFMT_V2=m
@@ -551,7 +566,6 @@
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
@@ -574,6 +588,7 @@
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -597,6 +612,7 @@
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -609,6 +625,7 @@
 CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
@@ -624,6 +641,7 @@
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
@@ -635,6 +653,7 @@
 CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e23d97c1..afa46a7 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -12,8 +12,10 @@
 CONFIG_NR_CPUS=2
 # CONFIG_HOTPLUG_CPU is not set
 CONFIG_HZ_100=y
+# CONFIG_ARCH_RANDOM is not set
 # CONFIG_COMPACTION is not set
 # CONFIG_MIGRATION is not set
+# CONFIG_BOUNCE is not set
 # CONFIG_CHECK_STACK is not set
 # CONFIG_CHSC_SCH is not set
 # CONFIG_SCM_BUS is not set
@@ -36,11 +38,11 @@
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_FC_ATTRS=y
 CONFIG_ZFCP=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_HVC_IUCV is not set
+# CONFIG_HW_RANDOM_S390 is not set
 CONFIG_RAW_DRIVER=y
 # CONFIG_SCLP_ASYNC is not set
 # CONFIG_HMC_DRV is not set
@@ -54,9 +56,9 @@
 # CONFIG_INOTIFY_USER is not set
 CONFIG_CONFIGFS_FS=y
 # CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 678d986..ad4bd77 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -6,7 +6,8 @@
 obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
 obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
 obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
-obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o paes_s390.o
+obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
+obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o
 obj-$(CONFIG_S390_PRNG) += prng.o
 obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
 obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
index 9317b3e..36aefc0 100644
--- a/arch/s390/crypto/arch_random.c
+++ b/arch/s390/crypto/arch_random.c
@@ -12,6 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/atomic.h>
+#include <linux/random.h>
 #include <linux/static_key.h>
 #include <asm/cpacf.h>
 
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 97189db..20244a3 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -28,6 +28,7 @@
 CONFIG_USER_NS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -108,7 +109,6 @@
 CONFIG_SCSI_VIRTIO=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
 CONFIG_MD_MULTIPATH=m
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=m
@@ -131,6 +131,7 @@
 CONFIG_VIRTIO_NET=y
 # CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 CONFIG_DEVKMEM=y
@@ -162,7 +163,6 @@
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
@@ -172,14 +172,12 @@
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
@@ -190,7 +188,6 @@
 CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
@@ -230,6 +227,7 @@
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
 CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 45092b1..b3c8847 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,10 +1,12 @@
 generic-y += asm-offsets.h
 generic-y += cacheflush.h
 generic-y += clkdev.h
+generic-y += device.h
 generic-y += dma-contiguous.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += export.h
+generic-y += fb.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
 generic-y += kmap_types.h
diff --git a/arch/s390/include/asm/device.h b/arch/s390/include/asm/device.h
deleted file mode 100644
index 5203fc8..0000000
--- a/arch/s390/include/asm/device.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-struct dev_archdata {
-};
-
-struct pdev_archdata {
-};
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 6702630..144809a 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/device.h>
+#include <linux/blkdev.h>
 
 struct arqb {
 	u64 data;
@@ -105,13 +106,14 @@ struct scm_driver {
 	int (*probe) (struct scm_device *scmdev);
 	int (*remove) (struct scm_device *scmdev);
 	void (*notify) (struct scm_device *scmdev, enum scm_event event);
-	void (*handler) (struct scm_device *scmdev, void *data, int error);
+	void (*handler) (struct scm_device *scmdev, void *data,
+			blk_status_t error);
 };
 
 int scm_driver_register(struct scm_driver *scmdrv);
 void scm_driver_unregister(struct scm_driver *scmdrv);
 
 int eadm_start_aob(struct aob *aob);
-void scm_irq_handler(struct aob *aob, int error);
+void scm_irq_handler(struct aob *aob, blk_status_t error);
 
 #endif /* _ASM_S390_EADM_H */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index e8f6230..ec024c0 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -117,6 +117,9 @@
 #define ELF_DATA	ELFDATA2MSB
 #define ELF_ARCH	EM_S390
 
+/* s390 specific phdr types */
+#define PT_S390_PGSTE	0x70000000
+
 /*
  * ELF register definitions..
  */
@@ -151,6 +154,35 @@ extern unsigned int vdso_enabled;
 	 && (x)->e_ident[EI_CLASS] == ELF_CLASS)
 #define compat_start_thread	start_thread31
 
+struct arch_elf_state {
+	int rc;
+};
+
+#define INIT_ARCH_ELF_STATE { .rc = 0 }
+
+#define arch_check_elf(ehdr, interp, interp_ehdr, state) (0)
+#ifdef CONFIG_PGSTE
+#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state)	\
+({								\
+	struct arch_elf_state *_state = state;			\
+	if ((phdr)->p_type == PT_S390_PGSTE &&			\
+	    !page_table_allocate_pgste &&			\
+	    !test_thread_flag(TIF_PGSTE) &&			\
+	    !current->mm->context.alloc_pgste) {		\
+		set_thread_flag(TIF_PGSTE);			\
+		set_pt_regs_flag(task_pt_regs(current),		\
+				 PIF_SYSCALL_RESTART);		\
+		_state->rc = -EAGAIN;				\
+	}							\
+	_state->rc;						\
+})
+#else
+#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state)	\
+({								\
+	(state)->rc;						\
+})
+#endif
+
 /* For SVR4/S390 the function pointer to be registered with `atexit` is
    passed in R14. */
 #define ELF_PLAT_INIT(_r, load_addr) \
diff --git a/arch/s390/include/asm/fb.h b/arch/s390/include/asm/fb.h
deleted file mode 100644
index c7df380..0000000
--- a/arch/s390/include/asm/fb.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
-	return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 437e9af..904e4b3 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -25,8 +25,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
 
 #define IO_SPACE_LIMIT 0
 
-#ifdef CONFIG_PCI
-
 #define ioremap_nocache(addr, size)	ioremap(addr, size)
 #define ioremap_wc			ioremap_nocache
 #define ioremap_wt			ioremap_nocache
@@ -49,6 +47,8 @@ static inline void ioport_unmap(void __iomem *p)
 {
 }
 
+#ifdef CONFIG_PCI
+
 /*
  * s390 needs a private implementation of pci_iomap since ioremap with its
  * offset parameter isn't sufficient. That's because BAR spaces are not
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 426614a..6baae23 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -107,6 +107,20 @@ struct esca_block {
 	struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
 } __packed;
 
+/*
+ * This struct is used to store some machine check info from lowcore
+ * for machine checks that happen while the guest is running.
+ * This info in host's lowcore might be overwritten by a second machine
+ * check from host when host is in the machine check's high-level handling.
+ * The size is 24 bytes.
+ */
+struct mcck_volatile_info {
+	__u64 mcic;
+	__u64 failing_storage_address;
+	__u32 ext_damage_code;
+	__u32 reserved;
+};
+
 #define CPUSTAT_STOPPED    0x80000000
 #define CPUSTAT_WAIT       0x10000000
 #define CPUSTAT_ECALL_PEND 0x08000000
@@ -264,7 +278,8 @@ struct kvm_s390_itdb {
 
 struct sie_page {
 	struct kvm_s390_sie_block sie_block;
-	__u8 reserved200[1024];		/* 0x0200 */
+	struct mcck_volatile_info mcck_info;	/* 0x0200 */
+	__u8 reserved218[1000];		/* 0x0218 */
 	struct kvm_s390_itdb itdb;	/* 0x0600 */
 	__u8 reserved700[2304];		/* 0x0700 */
 } __packed;
@@ -541,7 +556,6 @@ struct kvm_s390_float_interrupt {
 	struct mutex ais_lock;
 	u8 simm;
 	u8 nimm;
-	int ais_enabled;
 };
 
 struct kvm_hw_wp_info_arch {
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 8712e11..4541ac4 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -25,7 +25,9 @@ static inline int init_new_context(struct task_struct *tsk,
 	mm->context.gmap_asce = 0;
 	mm->context.flush_mm = 0;
 #ifdef CONFIG_PGSTE
-	mm->context.alloc_pgste = page_table_allocate_pgste;
+	mm->context.alloc_pgste = page_table_allocate_pgste ||
+		test_thread_flag(TIF_PGSTE) ||
+		current->mm->context.alloc_pgste;
 	mm->context.has_pgste = 0;
 	mm->context.use_skey = 0;
 	mm->context.use_cmma = 0;
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index e3e8895..13623b9 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -14,7 +14,14 @@
 #include <linux/const.h>
 #include <linux/types.h>
 
+#define MCIC_SUBCLASS_MASK	(1ULL<<63 | 1ULL<<62 | 1ULL<<61 | \
+				1ULL<<59 | 1ULL<<58 | 1ULL<<56 | \
+				1ULL<<55 | 1ULL<<54 | 1ULL<<53 | \
+				1ULL<<52 | 1ULL<<47 | 1ULL<<46 | \
+				1ULL<<45 | 1ULL<<44)
 #define MCCK_CODE_SYSTEM_DAMAGE		_BITUL(63)
+#define MCCK_CODE_EXT_DAMAGE		_BITUL(63 - 5)
+#define MCCK_CODE_CP			_BITUL(63 - 9)
 #define MCCK_CODE_CPU_TIMER_VALID	_BITUL(63 - 46)
 #define MCCK_CODE_PSW_MWP_VALID		_BITUL(63 - 20)
 #define MCCK_CODE_PSW_IA_VALID		_BITUL(63 - 23)
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 69b8a41..624deaa 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -74,6 +74,7 @@ typedef struct { unsigned long pgste; } pgste_t;
 typedef struct { unsigned long pte; } pte_t;
 typedef struct { unsigned long pmd; } pmd_t;
 typedef struct { unsigned long pud; } pud_t;
+typedef struct { unsigned long p4d; } p4d_t;
 typedef struct { unsigned long pgd; } pgd_t;
 typedef pte_t *pgtable_t;
 
@@ -82,12 +83,14 @@ typedef pte_t *pgtable_t;
 #define pte_val(x)	((x).pte)
 #define pmd_val(x)	((x).pmd)
 #define pud_val(x)	((x).pud)
+#define p4d_val(x)	((x).p4d)
 #define pgd_val(x)      ((x).pgd)
 
 #define __pgste(x)	((pgste_t) { (x) } )
 #define __pte(x)        ((pte_t) { (x) } )
 #define __pmd(x)        ((pmd_t) { (x) } )
 #define __pud(x)	((pud_t) { (x) } )
+#define __p4d(x)	((p4d_t) { (x) } )
 #define __pgd(x)        ((pgd_t) { (x) } )
 #define __pgprot(x)     ((pgprot_t) { (x) } )
 
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 4e31866..f36b4b7 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -70,11 +70,10 @@ struct zpci_fmb {
 } __packed __aligned(128);
 
 enum zpci_state {
-	ZPCI_FN_STATE_RESERVED,
-	ZPCI_FN_STATE_STANDBY,
-	ZPCI_FN_STATE_CONFIGURED,
-	ZPCI_FN_STATE_ONLINE,
-	NR_ZPCI_FN_STATES,
+	ZPCI_FN_STATE_STANDBY = 0,
+	ZPCI_FN_STATE_CONFIGURED = 1,
+	ZPCI_FN_STATE_RESERVED = 2,
+	ZPCI_FN_STATE_ONLINE = 3,
 };
 
 struct zpci_bar_struct {
@@ -109,7 +108,7 @@ struct zpci_dev {
 	u64		msi_addr;	/* MSI address */
 	unsigned int	max_msi;	/* maximum number of MSI's */
 	struct airq_iv *aibv;		/* adapter interrupt bit vector */
-	unsigned int	aisb;		/* number of the summary bit */
+	unsigned long	aisb;		/* number of the summary bit */
 
 	/* DMA stuff */
 	unsigned long	*dma_table;
@@ -159,11 +158,12 @@ extern const struct attribute_group *zpci_attr_groups[];
 ----------------------------------------------------------------------------- */
 /* Base stuff */
 int zpci_create_device(struct zpci_dev *);
+void zpci_remove_device(struct zpci_dev *zdev);
 int zpci_enable_device(struct zpci_dev *);
 int zpci_disable_device(struct zpci_dev *);
-void zpci_stop_device(struct zpci_dev *);
 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
 int zpci_unregister_ioat(struct zpci_dev *, u8);
+void zpci_remove_reserved_devices(void);
 
 /* CLP */
 int clp_scan_pci_devices(void);
@@ -172,6 +172,7 @@ int clp_rescan_pci_devices_simple(void);
 int clp_add_pci_device(u32, u32, int);
 int clp_enable_fh(struct zpci_dev *, u8);
 int clp_disable_fh(struct zpci_dev *);
+int clp_get_state(u32 fid, enum zpci_state *state);
 
 #ifdef CONFIG_PCI
 /* Error handling and recovery */
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 649eb62..34abcf2 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -76,7 +76,7 @@ struct zpci_fib {
 	u32 gd;
 } __packed __aligned(8);
 
-int zpci_mod_fc(u64 req, struct zpci_fib *fib);
+u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status);
 int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
 int zpci_load(u64 *data, u64 req, u64 offset);
 int zpci_store(u64 data, u64 req, u64 offset);
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 166f703..bb0ff1b 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -51,12 +51,24 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 		return _SEGMENT_ENTRY_EMPTY;
 	if (mm->context.asce_limit <= (1UL << 42))
 		return _REGION3_ENTRY_EMPTY;
-	return _REGION2_ENTRY_EMPTY;
+	if (mm->context.asce_limit <= (1UL << 53))
+		return _REGION2_ENTRY_EMPTY;
+	return _REGION1_ENTRY_EMPTY;
 }
 
-int crst_table_upgrade(struct mm_struct *);
+int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
 void crst_table_downgrade(struct mm_struct *);
 
+static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	unsigned long *table = crst_table_alloc(mm);
+
+	if (table)
+		crst_table_init(table, _REGION2_ENTRY_EMPTY);
+	return (p4d_t *) table;
+}
+#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
+
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 {
 	unsigned long *table = crst_table_alloc(mm);
@@ -86,9 +98,14 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 	crst_table_free(mm, (unsigned long *) pmd);
 }
 
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
 {
-	pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
+	pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
+}
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+	p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
 }
 
 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e6e3b88..57057fb 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -24,7 +24,6 @@
  * the S390 page table tree.
  */
 #ifndef __ASSEMBLY__
-#include <asm-generic/5level-fixup.h>
 #include <linux/sched.h>
 #include <linux/mm_types.h>
 #include <linux/page-flags.h>
@@ -87,12 +86,15 @@ extern unsigned long zero_page_mask;
  */
 #define PMD_SHIFT	20
 #define PUD_SHIFT	31
-#define PGDIR_SHIFT	42
+#define P4D_SHIFT	42
+#define PGDIR_SHIFT	53
 
 #define PMD_SIZE        (1UL << PMD_SHIFT)
 #define PMD_MASK        (~(PMD_SIZE-1))
 #define PUD_SIZE	(1UL << PUD_SHIFT)
 #define PUD_MASK	(~(PUD_SIZE-1))
+#define P4D_SIZE	(1UL << P4D_SHIFT)
+#define P4D_MASK	(~(P4D_SIZE-1))
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
@@ -105,6 +107,7 @@ extern unsigned long zero_page_mask;
 #define PTRS_PER_PTE	256
 #define PTRS_PER_PMD	2048
 #define PTRS_PER_PUD	2048
+#define PTRS_PER_P4D	2048
 #define PTRS_PER_PGD	2048
 
 #define FIRST_USER_ADDRESS  0UL
@@ -115,6 +118,8 @@ extern unsigned long zero_page_mask;
 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
 #define pud_ERROR(e) \
 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
+#define p4d_ERROR(e) \
+	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
 #define pgd_ERROR(e) \
 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
 
@@ -296,8 +301,6 @@ static inline int is_module_addr(void *addr)
 #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
 
 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
-#define _REGION3_ENTRY_ORIGIN  ~0x7ffUL/* region third table origin	     */
-
 #define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
 #define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
 #define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
@@ -310,8 +313,8 @@ static inline int is_module_addr(void *addr)
 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
 #endif
 
-#define _REGION_ENTRY_BITS	 0xfffffffffffff227UL
-#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe27UL
+#define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
+#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
 
 /* Bits in the segment table entry */
 #define _SEGMENT_ENTRY_BITS	0xfffffffffffffe33UL
@@ -560,18 +563,23 @@ static inline void crdte(unsigned long old, unsigned long new,
 }
 
 /*
- * pgd/pmd/pte query functions
+ * pgd/p4d/pud/pmd/pte query functions
  */
+static inline int pgd_folded(pgd_t pgd)
+{
+	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
+}
+
 static inline int pgd_present(pgd_t pgd)
 {
-	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
+	if (pgd_folded(pgd))
 		return 1;
 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
 }
 
 static inline int pgd_none(pgd_t pgd)
 {
-	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
+	if (pgd_folded(pgd))
 		return 0;
 	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
 }
@@ -589,16 +597,48 @@ static inline int pgd_bad(pgd_t pgd)
 	return (pgd_val(pgd) & mask) != 0;
 }
 
+static inline int p4d_folded(p4d_t p4d)
+{
+	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+	if (p4d_folded(p4d))
+		return 1;
+	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
+}
+
+static inline int p4d_none(p4d_t p4d)
+{
+	if (p4d_folded(p4d))
+		return 0;
+	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
+}
+
+static inline unsigned long p4d_pfn(p4d_t p4d)
+{
+	unsigned long origin_mask;
+
+	origin_mask = _REGION_ENTRY_ORIGIN;
+	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
+}
+
+static inline int pud_folded(pud_t pud)
+{
+	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
+}
+
 static inline int pud_present(pud_t pud)
 {
-	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
+	if (pud_folded(pud))
 		return 1;
 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
 }
 
 static inline int pud_none(pud_t pud)
 {
-	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
+	if (pud_folded(pud))
 		return 0;
 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
 }
@@ -614,7 +654,7 @@ static inline unsigned long pud_pfn(pud_t pud)
 {
 	unsigned long origin_mask;
 
-	origin_mask = _REGION3_ENTRY_ORIGIN;
+	origin_mask = _REGION_ENTRY_ORIGIN;
 	if (pud_large(pud))
 		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
 	return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
@@ -641,6 +681,13 @@ static inline int pud_bad(pud_t pud)
 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
 }
 
+static inline int p4d_bad(p4d_t p4d)
+{
+	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
+		return pud_bad(__pud(p4d_val(p4d)));
+	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
+}
+
 static inline int pmd_present(pmd_t pmd)
 {
 	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
@@ -794,8 +841,14 @@ static inline int pte_unused(pte_t pte)
 
 static inline void pgd_clear(pgd_t *pgd)
 {
-	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
-		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
+	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
+		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
+}
+
+static inline void p4d_clear(p4d_t *p4d)
+{
+	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
 }
 
 static inline void pud_clear(pud_t *pud)
@@ -1089,6 +1142,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 }
 
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
@@ -1098,19 +1152,31 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
 
 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
+#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
 
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 {
-	pud_t *pud = (pud_t *) pgd;
-	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
-		pud = (pud_t *) pgd_deref(*pgd);
-	return pud  + pud_index(address);
+	p4d_t *p4d = (p4d_t *) pgd;
+
+	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
+		p4d = (p4d_t *) pgd_deref(*pgd);
+	return p4d + p4d_index(address);
+}
+
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+	pud_t *pud = (pud_t *) p4d;
+
+	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+		pud = (pud_t *) p4d_deref(*p4d);
+	return pud + pud_index(address);
 }
 
 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
 {
 	pmd_t *pmd = (pmd_t *) pud;
+
 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
 		pmd = (pmd_t *) pud_deref(*pud);
 	return pmd + pmd_index(address);
@@ -1122,6 +1188,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
 
 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
+#define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
 
 /* Find an entry in the lowest level page table.. */
 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 60d395f..c25d57e 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -20,6 +20,7 @@
 #define CIF_FPU			4	/* restore FPU registers */
 #define CIF_IGNORE_IRQ		5	/* ignore interrupt (for udelay) */
 #define CIF_ENABLED_WAIT	6	/* in enabled wait state */
+#define CIF_MCCK_GUEST		7	/* machine check happening in guest */
 
 #define _CIF_MCCK_PENDING	_BITUL(CIF_MCCK_PENDING)
 #define _CIF_ASCE_PRIMARY	_BITUL(CIF_ASCE_PRIMARY)
@@ -28,6 +29,7 @@
 #define _CIF_FPU		_BITUL(CIF_FPU)
 #define _CIF_IGNORE_IRQ		_BITUL(CIF_IGNORE_IRQ)
 #define _CIF_ENABLED_WAIT	_BITUL(CIF_ENABLED_WAIT)
+#define _CIF_MCCK_GUEST		_BITUL(CIF_MCCK_GUEST)
 
 #ifndef __ASSEMBLY__
 
@@ -92,11 +94,11 @@ extern void execve_tail(void);
  */
 
 #define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_31BIT) ? \
-					(1UL << 31) : (1UL << 53))
+					(1UL << 31) : -PAGE_SIZE)
 #define TASK_UNMAPPED_BASE	(test_thread_flag(TIF_31BIT) ? \
 					(1UL << 30) : (1UL << 41))
 #define TASK_SIZE		TASK_SIZE_OF(current)
-#define TASK_SIZE_MAX		(1UL << 53)
+#define TASK_SIZE_MAX		(-PAGE_SIZE)
 
 #define STACK_TOP		(test_thread_flag(TIF_31BIT) ? \
 					(1UL << 31) : (1UL << 42))
@@ -221,11 +223,6 @@ extern void release_thread(struct task_struct *);
 /* Free guarded storage control block for current */
 void exit_thread_gs(void);
 
-/*
- * Return saved PC of a blocked thread.
- */
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
 unsigned long get_wchan(struct task_struct *p);
 #define task_pt_regs(tsk) ((struct pt_regs *) \
         (task_stack_page(tsk) + THREAD_SIZE) - 1)
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 99bc456..853b012 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -11,9 +11,11 @@
 
 #define PIF_SYSCALL		0	/* inside a system call */
 #define PIF_PER_TRAP		1	/* deliver sigtrap on return to user */
+#define PIF_SYSCALL_RESTART	2	/* restart the current system call */
 
 #define _PIF_SYSCALL		_BITUL(PIF_SYSCALL)
 #define _PIF_PER_TRAP		_BITUL(PIF_PER_TRAP)
+#define _PIF_SYSCALL_RESTART	_BITUL(PIF_SYSCALL_RESTART)
 
 #ifndef __ASSEMBLY__
 
@@ -24,38 +26,38 @@
 			 PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
 
 struct psw_bits {
-	unsigned long	   :  1;
-	unsigned long r	   :  1; /* PER-Mask */
-	unsigned long	   :  3;
-	unsigned long t	   :  1; /* DAT Mode */
-	unsigned long i	   :  1; /* Input/Output Mask */
-	unsigned long e	   :  1; /* External Mask */
-	unsigned long key  :  4; /* PSW Key */
-	unsigned long	   :  1;
-	unsigned long m	   :  1; /* Machine-Check Mask */
-	unsigned long w	   :  1; /* Wait State */
-	unsigned long p	   :  1; /* Problem State */
-	unsigned long as   :  2; /* Address Space Control */
-	unsigned long cc   :  2; /* Condition Code */
-	unsigned long pm   :  4; /* Program Mask */
-	unsigned long ri   :  1; /* Runtime Instrumentation */
-	unsigned long	   :  6;
-	unsigned long eaba :  2; /* Addressing Mode */
-	unsigned long	   : 31;
-	unsigned long ia   : 64; /* Instruction Address */
+	unsigned long	     :	1;
+	unsigned long per    :	1; /* PER-Mask */
+	unsigned long	     :	3;
+	unsigned long dat    :	1; /* DAT Mode */
+	unsigned long io     :	1; /* Input/Output Mask */
+	unsigned long ext    :	1; /* External Mask */
+	unsigned long key    :	4; /* PSW Key */
+	unsigned long	     :	1;
+	unsigned long mcheck :	1; /* Machine-Check Mask */
+	unsigned long wait   :	1; /* Wait State */
+	unsigned long pstate :	1; /* Problem State */
+	unsigned long as     :	2; /* Address Space Control */
+	unsigned long cc     :	2; /* Condition Code */
+	unsigned long pm     :	4; /* Program Mask */
+	unsigned long ri     :	1; /* Runtime Instrumentation */
+	unsigned long	     :	6;
+	unsigned long eaba   :	2; /* Addressing Mode */
+	unsigned long	     : 31;
+	unsigned long ia     : 64; /* Instruction Address */
 };
 
 enum {
-	PSW_AMODE_24BIT = 0,
-	PSW_AMODE_31BIT = 1,
-	PSW_AMODE_64BIT = 3
+	PSW_BITS_AMODE_24BIT = 0,
+	PSW_BITS_AMODE_31BIT = 1,
+	PSW_BITS_AMODE_64BIT = 3
 };
 
 enum {
-	PSW_AS_PRIMARY	 = 0,
-	PSW_AS_ACCREG	 = 1,
-	PSW_AS_SECONDARY = 2,
-	PSW_AS_HOME	 = 3
+	PSW_BITS_AS_PRIMARY	= 0,
+	PSW_BITS_AS_ACCREG	= 1,
+	PSW_BITS_AS_SECONDARY	= 2,
+	PSW_BITS_AS_HOME	= 3
 };
 
 #define psw_bits(__psw) (*({			\
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index 72df5f2..020a881 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -59,7 +59,7 @@ static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
 	int cc;
 
 	cc = ____pcpu_sigp(addr, order, parm, &_status);
-	if (status && cc == 1)
+	if (status && cc == SIGP_CC_STATUS_STORED)
 		*status = _status;
 	return cc;
 }
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index e784bed..2b498e5 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -109,7 +109,7 @@ struct sysinfo_2_2_2 {
 	unsigned short cpus_shared;
 	char reserved_4[3];
 	unsigned char vsne;
-	uuid_be uuid;
+	uuid_t uuid;
 	char reserved_5[160];
 	char ext_name[256];
 };
@@ -134,7 +134,7 @@ struct sysinfo_3_2_2 {
 		char reserved_1[3];
 		unsigned char evmne;
 		unsigned int reserved_2;
-		uuid_be uuid;
+		uuid_t uuid;
 	} vm[8];
 	char reserved_3[1504];
 	char ext_names[8][256];
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 0b3ee08..1aecf43 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -58,6 +58,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 #define TIF_UPROBE		3	/* breakpointed or single-stepping */
 #define TIF_GUARDED_STORAGE	4	/* load guarded storage control block */
 #define TIF_PATCH_PENDING	5	/* pending live patching update */
+#define TIF_PGSTE		6	/* New mm's will use 4K page tables */
 
 #define TIF_31BIT		16	/* 32bit process */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 853b2a3..7317b31 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -137,6 +137,21 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 }
 
 /*
+ * p4d_free_tlb frees a pud table and clears the CRSTE for the
+ * region second table entry from the tlb.
+ * If the mm uses a four level page table the single p4d is freed
+ * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
+ * to avoid the double free of the p4d in this case.
+ */
+static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
+				unsigned long address)
+{
+	if (tlb->mm->context.asce_limit <= (1UL << 53))
+		return;
+	tlb_remove_table(tlb, p4d);
+}
+
+/*
  * pud_free_tlb frees a pud table and clears the CRSTE for the
  * region third table entry from the tlb.
  * If the mm uses a three level page table the single pud is freed
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 6bb2963..b65c414 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -58,6 +58,9 @@ int main(void)
 	OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
 	OFFSET(__SF_GPRS, stack_frame, gprs);
 	OFFSET(__SF_EMPTY, stack_frame, empty1);
+	OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]);
+	OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]);
+	OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
 	BLANK();
 	/* timeval/timezone offsets for use by vdso */
 	OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 829e1c5..dab78ba 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -98,8 +98,10 @@ static int show_address(void *data, unsigned long address, int reliable)
 	return 0;
 }
 
-static void show_trace(struct task_struct *task, unsigned long sp)
+void show_stack(struct task_struct *task, unsigned long *stack)
 {
+	unsigned long sp = (unsigned long) stack;
+
 	if (!sp)
 		sp = task ? task->thread.ksp : current_stack_pointer();
 	printk("Call Trace:\n");
@@ -109,29 +111,6 @@ static void show_trace(struct task_struct *task, unsigned long sp)
 	debug_show_held_locks(task);
 }
 
-void show_stack(struct task_struct *task, unsigned long *sp)
-{
-	unsigned long *stack;
-	int i;
-
-	stack = sp;
-	if (!stack) {
-		if (!task)
-			stack = (unsigned long *)current_stack_pointer();
-		else
-			stack = (unsigned long *)task->thread.ksp;
-	}
-	printk(KERN_DEFAULT "Stack:\n");
-	for (i = 0; i < 20; i++) {
-		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
-			break;
-		if (i % 4 == 0)
-			printk(KERN_DEFAULT "       ");
-		pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
-	}
-	show_trace(task, (unsigned long)sp);
-}
-
 static void show_last_breaking_event(struct pt_regs *regs)
 {
 	printk("Last Breaking-Event-Address:\n");
@@ -149,8 +128,8 @@ void show_registers(struct pt_regs *regs)
 		pr_cont(" (%pSR)", (void *)regs->psw.addr);
 	pr_cont("\n");
 	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
-	       "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
-	       psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
+	       "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
+	       psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm);
 	pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
 	printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
 	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
@@ -169,7 +148,7 @@ void show_regs(struct pt_regs *regs)
 	show_registers(regs);
 	/* Show stack backtrace if pt_regs is from kernel mode */
 	if (!user_mode(regs))
-		show_trace(NULL, regs->gprs[15]);
+		show_stack(NULL, (unsigned long *) regs->gprs[15]);
 	show_last_breaking_event(regs);
 }
 
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index e408d9c..21900e1 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -52,7 +52,7 @@
 		   _TIF_SYSCALL_TRACEPOINT)
 _CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
 		   _CIF_ASCE_SECONDARY | _CIF_FPU)
-_PIF_WORK	= (_PIF_PER_TRAP)
+_PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
 
 #define BASED(name) name-cleanup_critical(%r13)
 
@@ -225,18 +225,24 @@
 	jnz	.Lsie_skip
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 	jo	.Lsie_skip			# exit if fp/vx regs changed
+.Lsie_entry:
 	sie	0(%r14)
 .Lsie_skip:
 	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 .Lsie_done:
 # some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
+# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
+# Other instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
 # See also .Lcleanup_sie
-.Lrewind_pad:
-	nop	0
+.Lrewind_pad6:
+	nopr	7
+.Lrewind_pad4:
+	nopr	7
+.Lrewind_pad2:
+	nopr	7
 	.globl sie_exit
 sie_exit:
 	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
@@ -249,7 +255,9 @@
 	stg	%r14,__SF_EMPTY+16(%r15)	# set exit reason code
 	j	sie_exit
 
-	EX_TABLE(.Lrewind_pad,.Lsie_fault)
+	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
+	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
+	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
 	EX_TABLE(sie_exit,.Lsie_fault)
 EXPORT_SYMBOL(sie64a)
 EXPORT_SYMBOL(sie_exit)
@@ -327,6 +335,8 @@
 	jo	.Lsysc_mcck_pending
 	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 	jo	.Lsysc_reschedule
+	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
+	jo	.Lsysc_syscall_restart
 #ifdef CONFIG_UPROBES
 	TSTMSK	__TI_flags(%r12),_TIF_UPROBE
 	jo	.Lsysc_uprobe_notify
@@ -340,6 +350,8 @@
 	jo	.Lsysc_patch_pending	# handle live patching just before
 					# signals and possible syscall restart
 #endif
+	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
+	jo	.Lsysc_syscall_restart
 	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
 	jo	.Lsysc_sigpending
 	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
@@ -441,6 +453,15 @@
 	jg	do_per_trap
 
 #
+# _PIF_SYSCALL_RESTART is set, repeat the current system call
+#
+.Lsysc_syscall_restart:
+	ni	__PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
+	lmg	%r1,%r7,__PT_R1(%r11)	# load svc arguments
+	lg	%r2,__PT_ORIG_GPR2(%r11)
+	j	.Lsysc_do_svc
+
+#
 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
 # and after the system call
 #
@@ -874,9 +895,7 @@
 	oi	__LC_CPU_FLAGS+7,_CIF_FPU
 	br	%r14
 .Lsave_fpu_regs_end:
-#if IS_ENABLED(CONFIG_KVM)
 EXPORT_SYMBOL(save_fpu_regs)
-#endif
 
 /*
  * Load floating-point controls and floating-point or vector registers.
@@ -1104,7 +1123,13 @@
 	.quad	.Lsie_done
 
 .Lcleanup_sie:
-	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
+	cghi    %r11,__LC_SAVE_AREA_ASYNC 	#Is this in normal interrupt?
+	je      1f
+	slg     %r9,BASED(.Lsie_crit_mcck_start)
+	clg     %r9,BASED(.Lsie_crit_mcck_length)
+	jh      1f
+	oi      __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+1:	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
 	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 	larl	%r9,sie_exit			# skip forward to sie_exit
@@ -1289,6 +1314,10 @@
 	.quad	.Lsie_gmap
 .Lsie_critical_length:
 	.quad	.Lsie_done - .Lsie_gmap
+.Lsie_crit_mcck_start:
+	.quad   .Lsie_entry
+.Lsie_crit_mcck_length:
+	.quad   .Lsie_skip - .Lsie_entry
 #endif
 
 	.section .rodata, "a"
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index e545ffe..8e622bb5 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -564,8 +564,6 @@ static struct kset *ipl_kset;
 
 static void __ipl_run(void *unused)
 {
-	if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
-		diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
 	diag308(DIAG308_LOAD_CLEAR, NULL);
 	if (MACHINE_IS_VM)
 		__cpcmd("IPL", NULL, 0, NULL);
@@ -1088,10 +1086,7 @@ static void __reipl_run(void *unused)
 		break;
 	case REIPL_METHOD_CCW_DIAG:
 		diag308(DIAG308_SET, reipl_block_ccw);
-		if (MACHINE_IS_LPAR)
-			diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
-		else
-			diag308(DIAG308_LOAD_CLEAR, NULL);
+		diag308(DIAG308_LOAD_CLEAR, NULL);
 		break;
 	case REIPL_METHOD_FCP_RW_DIAG:
 		diag308(DIAG308_SET, reipl_block_fcp);
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index 6aa630a..262506c 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -93,7 +93,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
 	args.entry = entry;
 	args.type = type;
 
-	stop_machine(__sm_arch_jump_label_transform, &args, NULL);
+	stop_machine_cpuslocked(__sm_arch_jump_label_transform, &args, NULL);
 }
 
 void arch_jump_label_transform_static(struct jump_entry *entry,
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 3d6a997..6842e45 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -196,7 +196,7 @@ void arch_arm_kprobe(struct kprobe *p)
 {
 	struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
 
-	stop_machine(swap_instruction, &args, NULL);
+	stop_machine_cpuslocked(swap_instruction, &args, NULL);
 }
 NOKPROBE_SYMBOL(arch_arm_kprobe);
 
@@ -204,7 +204,7 @@ void arch_disarm_kprobe(struct kprobe *p)
 {
 	struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
 
-	stop_machine(swap_instruction, &args, NULL);
+	stop_machine_cpuslocked(swap_instruction, &args, NULL);
 }
 NOKPROBE_SYMBOL(arch_disarm_kprobe);
 
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 9855895..31d03a8 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -25,6 +25,8 @@
 #include <asm/crw.h>
 #include <asm/switch_to.h>
 #include <asm/ctl_reg.h>
+#include <asm/asm-offsets.h>
+#include <linux/kvm_host.h>
 
 struct mcck_struct {
 	unsigned int kill_task : 1;
@@ -274,12 +276,39 @@ static int notrace s390_validate_registers(union mci mci, int umode)
 	return kill_task;
 }
 
+/*
+ * Backup the guest's machine check info to its description block
+ */
+static void notrace s390_backup_mcck_info(struct pt_regs *regs)
+{
+	struct mcck_volatile_info *mcck_backup;
+	struct sie_page *sie_page;
+
+	/* r14 contains the sie block, which was set in sie64a */
+	struct kvm_s390_sie_block *sie_block =
+			(struct kvm_s390_sie_block *) regs->gprs[14];
+
+	if (sie_block == NULL)
+		/* Something's seriously wrong, stop system. */
+		s390_handle_damage();
+
+	sie_page = container_of(sie_block, struct sie_page, sie_block);
+	mcck_backup = &sie_page->mcck_info;
+	mcck_backup->mcic = S390_lowcore.mcck_interruption_code &
+				~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
+	mcck_backup->ext_damage_code = S390_lowcore.external_damage_code;
+	mcck_backup->failing_storage_address
+			= S390_lowcore.failing_storage_address;
+}
+
 #define MAX_IPD_COUNT	29
 #define MAX_IPD_TIME	(5 * 60 * USEC_PER_SEC) /* 5 minutes */
 
 #define ED_STP_ISLAND	6	/* External damage STP island check */
 #define ED_STP_SYNC	7	/* External damage STP sync check */
 
+#define MCCK_CODE_NO_GUEST	(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE)
+
 /*
  * machine check handler.
  */
@@ -291,6 +320,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
 	struct mcck_struct *mcck;
 	unsigned long long tmp;
 	union mci mci;
+	unsigned long mcck_dam_code;
 
 	nmi_enter();
 	inc_irq_stat(NMI_NMI);
@@ -301,7 +331,13 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
 		/* System damage -> stopping machine */
 		s390_handle_damage();
 	}
-	if (mci.pd) {
+
+	/*
+	 * Reinject the instruction processing damages' machine checks
+	 * including Delayed Access Exception into the guest
+	 * instead of damaging the host if they happen in the guest.
+	 */
+	if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) {
 		if (mci.b) {
 			/* Processing backup -> verify if we can survive this */
 			u64 z_mcic, o_mcic, t_mcic;
@@ -345,6 +381,14 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
 		mcck->mcck_code = mci.val;
 		set_cpu_flag(CIF_MCCK_PENDING);
 	}
+
+	/*
+	 * Backup the machine check's info if it happens when the guest
+	 * is running.
+	 */
+	if (test_cpu_flag(CIF_MCCK_GUEST))
+		s390_backup_mcck_info(regs);
+
 	if (mci.cd) {
 		/* Timing facility damage */
 		s390_handle_damage();
@@ -358,15 +402,22 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
 		if (mcck->stp_queue)
 			set_cpu_flag(CIF_MCCK_PENDING);
 	}
-	if (mci.se)
-		/* Storage error uncorrected */
-		s390_handle_damage();
-	if (mci.ke)
-		/* Storage key-error uncorrected */
-		s390_handle_damage();
-	if (mci.ds && mci.fa)
-		/* Storage degradation */
-		s390_handle_damage();
+
+	/*
+	 * Reinject storage related machine checks into the guest if they
+	 * happen when the guest is running.
+	 */
+	if (!test_cpu_flag(CIF_MCCK_GUEST)) {
+		if (mci.se)
+			/* Storage error uncorrected */
+			s390_handle_damage();
+		if (mci.ke)
+			/* Storage key-error uncorrected */
+			s390_handle_damage();
+		if (mci.ds && mci.fa)
+			/* Storage degradation */
+			s390_handle_damage();
+	}
 	if (mci.cp) {
 		/* Channel report word pending */
 		mcck->channel_report = 1;
@@ -377,6 +428,19 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
 		mcck->warning = 1;
 		set_cpu_flag(CIF_MCCK_PENDING);
 	}
+
+	/*
+	 * If there are only Channel Report Pending and External Damage
+	 * machine checks, they will not be reinjected into the guest
+	 * because they refer to host conditions only.
+	 */
+	mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
+	if (test_cpu_flag(CIF_MCCK_GUEST) &&
+	(mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
+		/* Set exit reason code for host's later handling */
+		*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
+	}
+	clear_cpu_flag(CIF_MCCK_GUEST);
 	nmi_exit();
 }
 
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index ca960d0..0c82f79 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -995,11 +995,11 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
 	regs.int_parm = CPU_MF_INT_SF_PRA;
 	sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
 
-	psw_bits(regs.psw).ia = sfr->basic.ia;
-	psw_bits(regs.psw).t  = sfr->basic.T;
-	psw_bits(regs.psw).w  = sfr->basic.W;
-	psw_bits(regs.psw).p  = sfr->basic.P;
-	psw_bits(regs.psw).as = sfr->basic.AS;
+	psw_bits(regs.psw).ia	= sfr->basic.ia;
+	psw_bits(regs.psw).dat	= sfr->basic.T;
+	psw_bits(regs.psw).wait = sfr->basic.W;
+	psw_bits(regs.psw).per	= sfr->basic.P;
+	psw_bits(regs.psw).as	= sfr->basic.AS;
 
 	/*
 	 * Use the hardware provided configuration level to decide if the
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 955a7b6..93a386f 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -245,6 +245,5 @@ ssize_t cpumf_events_sysfs_show(struct device *dev,
 	struct perf_pmu_events_attr *pmu_attr;
 
 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
-	return sprintf(page, "event=0x%04llx,name=%s\n",
-		       pmu_attr->id, attr->attr.name);
+	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
 }
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 999d715..bb32b86 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -41,31 +41,6 @@
 
 asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 
-/*
- * Return saved PC of a blocked thread. used in kernel/sched.
- * resume in entry.S does not create a new stack frame, it
- * just stores the registers %r6-%r15 to the frame given by
- * schedule. We want to return the address of the caller of
- * schedule, so we have to walk the backchain one time to
- * find the frame schedule() store its return address.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	struct stack_frame *sf, *low, *high;
-
-	if (!tsk || !task_stack_page(tsk))
-		return 0;
-	low = task_stack_page(tsk);
-	high = (struct stack_frame *) task_pt_regs(tsk);
-	sf = (struct stack_frame *) tsk->thread.ksp;
-	if (sf <= low || sf > high)
-		return 0;
-	sf = (struct stack_frame *) sf->back_chain;
-	if (sf <= low || sf > high)
-		return 0;
-	return sf->gprs[8];
-}
-
 extern void kernel_thread_starter(void);
 
 /*
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 488c5bb..252ed61 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1160,6 +1160,8 @@ static int s390_gs_cb_get(struct task_struct *target,
 		return -ENODEV;
 	if (!data)
 		return -ENODATA;
+	if (target == current)
+		save_gs_cb(data);
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 				   data, 0, sizeof(struct gs_cb));
 }
@@ -1170,6 +1172,7 @@ static int s390_gs_cb_set(struct task_struct *target,
 			  const void *kbuf, const void __user *ubuf)
 {
 	struct gs_cb *data = target->thread.gs_cb;
+	int rc;
 
 	if (!MACHINE_HAS_GS)
 		return -ENODEV;
@@ -1177,10 +1180,18 @@ static int s390_gs_cb_set(struct task_struct *target,
 		data = kzalloc(sizeof(*data), GFP_KERNEL);
 		if (!data)
 			return -ENOMEM;
+		data->gsd = 25;
 		target->thread.gs_cb = data;
+		if (target == current)
+			__ctl_set_bit(2, 4);
+	} else if (target == current) {
+		save_gs_cb(data);
 	}
-	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				  data, 0, sizeof(struct gs_cb));
+	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+				data, 0, sizeof(struct gs_cb));
+	if (target == current)
+		restore_gs_cb(data);
+	return rc;
 }
 
 static int s390_gs_bc_get(struct task_struct *target,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 363000a..1020a11 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -26,6 +26,7 @@
 #include <linux/err.h>
 #include <linux/spinlock.h>
 #include <linux/kernel_stat.h>
+#include <linux/kmemleak.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/irqflags.h>
@@ -207,6 +208,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 				kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
 			if (!mcesa_origin)
 				goto out;
+			/* The pointer is stored with mcesa_bits ORed in */
+			kmemleak_not_leak((void *) mcesa_origin);
 			mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
 		}
 	} else {
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index eefcb54..fb869b10 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -242,7 +242,7 @@ static void print_ext_name(struct seq_file *m, int lvl,
 
 static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info)
 {
-	if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be)))
+	if (uuid_is_null(&info->vm[i].uuid))
 		return;
 	seq_printf(m, "VM%02d UUID:            %pUb\n", i, &info->vm[i].uuid);
 }
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index c3a52f9..192efdf 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -636,10 +636,10 @@ static void stp_work_fn(struct work_struct *work)
 		goto out_unlock;
 
 	memset(&stp_sync, 0, sizeof(stp_sync));
-	get_online_cpus();
+	cpus_read_lock();
 	atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
-	stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask);
-	put_online_cpus();
+	stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
+	cpus_read_unlock();
 
 	if (!check_sync_clock())
 		/*
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f787b9d..442e542 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -21,6 +21,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/cpu.h>
 #include <asm/fpu/api.h>
 #include "entry.h"
 
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 314e0ee..d94baa8 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -27,12 +27,12 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
 
 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
 {
-	if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT)
+	if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT)
 		return -EINVAL;
-	if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT)
+	if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)
 		return -EINVAL;
 	clear_pt_regs_flag(regs, PIF_PER_TRAP);
-	auprobe->saved_per = psw_bits(regs->psw).r;
+	auprobe->saved_per = psw_bits(regs->psw).per;
 	auprobe->saved_int_code = regs->int_code;
 	regs->int_code = UPROBE_TRAP_NR;
 	regs->psw.addr = current->utask->xol_vaddr;
@@ -81,7 +81,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
 
 	clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
 	update_cr_regs(current);
-	psw_bits(regs->psw).r = auprobe->saved_per;
+	psw_bits(regs->psw).per = auprobe->saved_per;
 	regs->int_code = auprobe->saved_int_code;
 
 	if (fixup & FIXUP_PSW_NORMAL)
@@ -372,8 +372,8 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
 
 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
 {
-	if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) ||
-	    ((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) &&
+	if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) ||
+	    ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) &&
 	     !is_compat_task())) {
 		regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
 		do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 10516ae..b89d19f6 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -50,6 +50,56 @@ static struct page **vdso64_pagelist;
  */
 unsigned int __read_mostly vdso_enabled = 1;
 
+static int vdso_fault(const struct vm_special_mapping *sm,
+		      struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct page **vdso_pagelist;
+	unsigned long vdso_pages;
+
+	vdso_pagelist = vdso64_pagelist;
+	vdso_pages = vdso64_pages;
+#ifdef CONFIG_COMPAT
+	if (is_compat_task()) {
+		vdso_pagelist = vdso32_pagelist;
+		vdso_pages = vdso32_pages;
+	}
+#endif
+
+	if (vmf->pgoff >= vdso_pages)
+		return VM_FAULT_SIGBUS;
+
+	vmf->page = vdso_pagelist[vmf->pgoff];
+	get_page(vmf->page);
+	return 0;
+}
+
+static int vdso_mremap(const struct vm_special_mapping *sm,
+		       struct vm_area_struct *vma)
+{
+	unsigned long vdso_pages;
+
+	vdso_pages = vdso64_pages;
+#ifdef CONFIG_COMPAT
+	if (is_compat_task())
+		vdso_pages = vdso32_pages;
+#endif
+
+	if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (WARN_ON_ONCE(current->mm != vma->vm_mm))
+		return -EFAULT;
+
+	current->mm->context.vdso_base = vma->vm_start;
+	return 0;
+}
+
+static const struct vm_special_mapping vdso_mapping = {
+	.name = "[vdso]",
+	.fault = vdso_fault,
+	.mremap = vdso_mremap,
+};
+
 static int __init vdso_setup(char *s)
 {
 	unsigned long val;
@@ -181,7 +231,7 @@ static void vdso_init_cr5(void)
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
 	struct mm_struct *mm = current->mm;
-	struct page **vdso_pagelist;
+	struct vm_area_struct *vma;
 	unsigned long vdso_pages;
 	unsigned long vdso_base;
 	int rc;
@@ -194,13 +244,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	if (!uses_interp)
 		return 0;
 
-	vdso_pagelist = vdso64_pagelist;
 	vdso_pages = vdso64_pages;
 #ifdef CONFIG_COMPAT
-	if (is_compat_task()) {
-		vdso_pagelist = vdso32_pagelist;
+	if (is_compat_task())
 		vdso_pages = vdso32_pages;
-	}
 #endif
 	/*
 	 * vDSO has a problem and was disabled, just don't "enable" it for
@@ -209,8 +256,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	if (vdso_pages == 0)
 		return 0;
 
-	current->mm->context.vdso_base = 0;
-
 	/*
 	 * pick a base address for the vDSO in process space. We try to put
 	 * it at vdso_base which is the "natural" base for it, but we might
@@ -225,13 +270,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	}
 
 	/*
-	 * Put vDSO base into mm struct. We need to do this before calling
-	 * install_special_mapping or the perf counter mmap tracking code
-	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
-	 */
-	current->mm->context.vdso_base = vdso_base;
-
-	/*
 	 * our vma flags don't have VM_WRITE so by default, the process
 	 * isn't allowed to write those pages.
 	 * gdb can break that with ptrace interface, and thus trigger COW
@@ -241,24 +279,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 	 * It's fine to use that for setting breakpoints in the vDSO code
 	 * pages though.
 	 */
-	rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
-				     VM_READ|VM_EXEC|
-				     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-				     vdso_pagelist);
-	if (rc)
-		current->mm->context.vdso_base = 0;
+	vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+				       VM_READ|VM_EXEC|
+				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+				       &vdso_mapping);
+	if (IS_ERR(vma)) {
+		rc = PTR_ERR(vma);
+		goto out_up;
+	}
+
+	current->mm->context.vdso_base = vdso_base;
+	rc = 0;
+
 out_up:
 	up_write(&mm->mmap_sem);
 	return rc;
 }
 
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
-	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
-		return "[vdso]";
-	return NULL;
-}
-
 static int __init vdso_init(void)
 {
 	int i;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 072d84b..dd7178f 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -110,11 +110,10 @@ static inline u64 scale_vtime(u64 vtime)
 	return vtime;
 }
 
-static void account_system_index_scaled(struct task_struct *p,
-					u64 cputime, u64 scaled,
+static void account_system_index_scaled(struct task_struct *p, u64 cputime,
 					enum cpu_usage_stat index)
 {
-	p->stimescaled += cputime_to_nsecs(scaled);
+	p->stimescaled += cputime_to_nsecs(scale_vtime(cputime));
 	account_system_index_time(p, cputime_to_nsecs(cputime), index);
 }
 
@@ -176,14 +175,11 @@ static int do_account_vtime(struct task_struct *tsk)
 	}
 
 	if (system)
-		account_system_index_scaled(tsk, system, scale_vtime(system),
-					    CPUTIME_SYSTEM);
+		account_system_index_scaled(tsk, system, CPUTIME_SYSTEM);
 	if (hardirq)
-		account_system_index_scaled(tsk, hardirq, scale_vtime(hardirq),
-					    CPUTIME_IRQ);
+		account_system_index_scaled(tsk, hardirq, CPUTIME_IRQ);
 	if (softirq)
-		account_system_index_scaled(tsk, softirq, scale_vtime(softirq),
-					    CPUTIME_SOFTIRQ);
+		account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
 
 	steal = S390_lowcore.steal_timer;
 	if ((s64) steal > 0) {
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 9da243d..875f8be 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -551,26 +551,26 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
 	int rc;
 	struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
 
-	if (!psw.t) {
+	if (!psw.dat) {
 		asce->val = 0;
 		asce->r = 1;
 		return 0;
 	}
 
-	if (mode == GACC_IFETCH)
-		psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY;
+	if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME))
+		psw.as = PSW_BITS_AS_PRIMARY;
 
 	switch (psw.as) {
-	case PSW_AS_PRIMARY:
+	case PSW_BITS_AS_PRIMARY:
 		asce->val = vcpu->arch.sie_block->gcr[1];
 		return 0;
-	case PSW_AS_SECONDARY:
+	case PSW_BITS_AS_SECONDARY:
 		asce->val = vcpu->arch.sie_block->gcr[7];
 		return 0;
-	case PSW_AS_HOME:
+	case PSW_BITS_AS_HOME:
 		asce->val = vcpu->arch.sie_block->gcr[13];
 		return 0;
-	case PSW_AS_ACCREG:
+	case PSW_BITS_AS_ACCREG:
 		rc = ar_translation(vcpu, asce, ar, mode);
 		if (rc > 0)
 			return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
@@ -771,7 +771,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
 
 	if (!ctlreg0.lap)
 		return 0;
-	if (psw_bits(*psw).t && asce.p)
+	if (psw_bits(*psw).dat && asce.p)
 		return 0;
 	return 1;
 }
@@ -790,7 +790,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
 			return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
 					 PROT_TYPE_LA);
 		ga &= PAGE_MASK;
-		if (psw_bits(*psw).t) {
+		if (psw_bits(*psw).dat) {
 			rc = guest_translate(vcpu, ga, pages, asce, mode);
 			if (rc < 0)
 				return rc;
@@ -831,7 +831,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
 		pages = vmalloc(nr_pages * sizeof(unsigned long));
 	if (!pages)
 		return -ENOMEM;
-	need_ipte_lock = psw_bits(*psw).t && !asce.r;
+	need_ipte_lock = psw_bits(*psw).dat && !asce.r;
 	if (need_ipte_lock)
 		ipte_lock(vcpu);
 	rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
@@ -899,7 +899,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
 					 mode, PROT_TYPE_LA);
 	}
 
-	if (psw_bits(*psw).t && !asce.r) {	/* Use DAT? */
+	if (psw_bits(*psw).dat && !asce.r) {	/* Use DAT? */
 		rc = guest_translate(vcpu, gva, gpa, asce, mode);
 		if (rc > 0)
 			return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT);
@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 	ptr = asce.origin * 4096;
 	if (asce.r) {
 		*fake = 1;
+		ptr = 0;
 		asce.dt = ASCE_TYPE_REGION1;
 	}
 	switch (asce.dt) {
 	case ASCE_TYPE_REGION1:
-		if (vaddr.rfx01 > asce.tl && !asce.r)
+		if (vaddr.rfx01 > asce.tl && !*fake)
 			return PGM_REGION_FIRST_TRANS;
 		break;
 	case ASCE_TYPE_REGION2:
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 		union region1_table_entry rfte;
 
 		if (*fake) {
-			/* offset in 16EB guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
+			ptr += (unsigned long) vaddr.rfx << 53;
 			rfte.val = ptr;
 			goto shadow_r2t;
 		}
@@ -1036,8 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 		union region2_table_entry rste;
 
 		if (*fake) {
-			/* offset in 8PB guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
+			ptr += (unsigned long) vaddr.rsx << 42;
 			rste.val = ptr;
 			goto shadow_r3t;
 		}
@@ -1064,8 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 		union region3_table_entry rtte;
 
 		if (*fake) {
-			/* offset in 4TB guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
+			ptr += (unsigned long) vaddr.rtx << 31;
 			rtte.val = ptr;
 			goto shadow_sgt;
 		}
@@ -1101,8 +1099,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 		union segment_table_entry ste;
 
 		if (*fake) {
-			/* offset in 2G guest memory block */
-			ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
+			ptr += (unsigned long) vaddr.sx << 20;
 			ste.val = ptr;
 			goto shadow_pgt;
 		}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 7ce47fd..bec42b8 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -57,9 +57,9 @@ static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
 {
 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
 
-	if (psw_bits(*psw).eaba == PSW_AMODE_64BIT)
+	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
 		return ga;
-	if (psw_bits(*psw).eaba == PSW_AMODE_31BIT)
+	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
 		return ga & ((1UL << 31) - 1);
 	return ga & ((1UL << 24) - 1);
 }
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index 23d9a4e..c2e0ddc 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -613,15 +613,15 @@ int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
 		 * instruction. Check primary and home space-switch-event
 		 * controls. (theoretically home -> home produced no event)
 		 */
-		if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) &&
-		     (pssec(vcpu) || hssec(vcpu)))
+		if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) &&
+		    (pssec(vcpu) || hssec(vcpu)))
 			vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
 
 		/*
 		 * PT, PTI, PR, PC instruction operate on primary AS only. Check
 		 * if the primary-space-switch-event control was or got set.
 		 */
-		if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) &&
+		if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) &&
 		    (pssec(vcpu) || old_ssec(vcpu)))
 			vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
 	}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index caf15c8..2d120fe 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2160,7 +2160,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
 	struct kvm_s390_ais_req req;
 	int ret = 0;
 
-	if (!fi->ais_enabled)
+	if (!test_kvm_facility(kvm, 72))
 		return -ENOTSUPP;
 
 	if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
@@ -2204,7 +2204,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
 	};
 	int ret = 0;
 
-	if (!fi->ais_enabled || !adapter->suppressible)
+	if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
 		return kvm_s390_inject_vm(kvm, &s390int);
 
 	mutex_lock(&fi->ais_lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 689ac48..b0d7de5 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -558,7 +558,6 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
 		} else {
 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
 			set_kvm_facility(kvm->arch.model.fac_list, 72);
-			kvm->arch.float_int.ais_enabled = 1;
 			r = 0;
 		}
 		mutex_unlock(&kvm->lock);
@@ -1533,7 +1532,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	mutex_init(&kvm->arch.float_int.ais_lock);
 	kvm->arch.float_int.simm = 0;
 	kvm->arch.float_int.nimm = 0;
-	kvm->arch.float_int.ais_enabled = 0;
 	spin_lock_init(&kvm->arch.float_int.lock);
 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
@@ -2069,6 +2067,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 	if (!vcpu)
 		goto out;
 
+	BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
 	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
 	if (!sie_page)
 		goto out_free_cpu;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index c03106c..e53292a 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -361,7 +361,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
 		}
 	}
 	if (m3 & SSKE_MB) {
-		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT)
+		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
 			vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
 		else
 			vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
@@ -374,7 +374,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
 {
 	vcpu->stat.instruction_ipte_interlock++;
-	if (psw_bits(vcpu->arch.sie_block->gpsw).p)
+	if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
 	kvm_s390_retry_instr(vcpu);
@@ -901,7 +901,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
 		/* only support 2G frame size if EDAT2 is available and we are
 		   not in 24-bit addressing mode */
 		if (!test_kvm_facility(vcpu->kvm, 78) ||
-		    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
+		    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
 			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
 		break;
@@ -938,7 +938,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
 		start += PAGE_SIZE;
 	}
 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
-		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) {
+		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
 			vcpu->run->s.regs.gprs[reg2] = end;
 		} else {
 			vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 1b553d8..049c3c4 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -149,7 +149,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
 }
 
 static void walk_pud_level(struct seq_file *m, struct pg_state *st,
-			   pgd_t *pgd, unsigned long addr)
+			   p4d_t *p4d, unsigned long addr)
 {
 	unsigned int prot;
 	pud_t *pud;
@@ -157,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
 
 	for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
 		st->current_address = addr;
-		pud = pud_offset(pgd, addr);
+		pud = pud_offset(p4d, addr);
 		if (!pud_none(*pud))
 			if (pud_large(*pud)) {
 				prot = pud_val(*pud) &
@@ -172,6 +172,23 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
 	}
 }
 
+static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
+			   pgd_t *pgd, unsigned long addr)
+{
+	p4d_t *p4d;
+	int i;
+
+	for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
+		st->current_address = addr;
+		p4d = p4d_offset(pgd, addr);
+		if (!p4d_none(*p4d))
+			walk_pud_level(m, st, p4d, addr);
+		else
+			note_page(m, st, _PAGE_INVALID, 2);
+		addr += P4D_SIZE;
+	}
+}
+
 static void walk_pgd_level(struct seq_file *m)
 {
 	unsigned long addr = 0;
@@ -184,7 +201,7 @@ static void walk_pgd_level(struct seq_file *m)
 		st.current_address = addr;
 		pgd = pgd_offset_k(addr);
 		if (!pgd_none(*pgd))
-			walk_pud_level(m, &st, pgd, addr);
+			walk_p4d_level(m, &st, pgd, addr);
 		else
 			note_page(m, &st, _PAGE_INVALID, 1);
 		addr += PGDIR_SIZE;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 5845d30..14f2579 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -130,7 +130,7 @@ static int bad_address(void *p)
 
 static void dump_pagetable(unsigned long asce, unsigned long address)
 {
-	unsigned long *table = __va(asce & PAGE_MASK);
+	unsigned long *table = __va(asce & _ASCE_ORIGIN);
 
 	pr_alert("AS:%016lx ", asce);
 	switch (asce & _ASCE_TYPE_MASK) {
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 7f6db1e..4fb3d3c 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -125,7 +125,7 @@ static void gmap_radix_tree_free(struct radix_tree_root *root)
 	struct radix_tree_iter iter;
 	unsigned long indices[16];
 	unsigned long index;
-	void **slot;
+	void __rcu **slot;
 	int i, nr;
 
 	/* A radix tree is freed by deleting all of its entries */
@@ -150,7 +150,7 @@ static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
 	struct radix_tree_iter iter;
 	unsigned long indices[16];
 	unsigned long index;
-	void **slot;
+	void __rcu **slot;
 	int i, nr;
 
 	/* A radix tree is freed by deleting all of its entries */
@@ -537,6 +537,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
 	unsigned long *table;
 	spinlock_t *ptl;
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 	int rc;
@@ -573,7 +574,9 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
 	mm = gmap->mm;
 	pgd = pgd_offset(mm, vmaddr);
 	VM_BUG_ON(pgd_none(*pgd));
-	pud = pud_offset(pgd, vmaddr);
+	p4d = p4d_offset(pgd, vmaddr);
+	VM_BUG_ON(p4d_none(*p4d));
+	pud = pud_offset(p4d, vmaddr);
 	VM_BUG_ON(pud_none(*pud));
 	/* large puds cannot yet be handled */
 	if (pud_large(*pud))
@@ -1008,7 +1011,7 @@ EXPORT_SYMBOL_GPL(gmap_read_table);
 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
 				    struct gmap_rmap *rmap)
 {
-	void **slot;
+	void __rcu **slot;
 
 	BUG_ON(!gmap_is_shadow(sg));
 	slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index b7b779c..8ecc25e 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -166,15 +166,15 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
 	return 1;
 }
 
-static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
+static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
 		unsigned long end, int write, struct page **pages, int *nr)
 {
 	unsigned long next;
 	pud_t *pudp, pud;
 
-	pudp = (pud_t *) pgdp;
-	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
-		pudp = (pud_t *) pgd_deref(pgd);
+	pudp = (pud_t *) p4dp;
+	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
+		pudp = (pud_t *) p4d_deref(p4d);
 	pudp += pud_index(addr);
 	do {
 		pud = *pudp;
@@ -194,6 +194,29 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
 	return 1;
 }
 
+static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
+		unsigned long end, int write, struct page **pages, int *nr)
+{
+	unsigned long next;
+	p4d_t *p4dp, p4d;
+
+	p4dp = (p4d_t *) pgdp;
+	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
+		p4dp = (p4d_t *) pgd_deref(pgd);
+	p4dp += p4d_index(addr);
+	do {
+		p4d = *p4dp;
+		barrier();
+		next = p4d_addr_end(addr, end);
+		if (p4d_none(p4d))
+			return 0;
+		if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr))
+			return 0;
+	} while (p4dp++, addr = next, addr != end);
+
+	return 1;
+}
+
 /*
  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
  * back to the regular GUP.
@@ -228,7 +251,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
 		next = pgd_addr_end(addr, end);
 		if (pgd_none(pgd))
 			break;
-		if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
+		if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr))
 			break;
 	} while (pgdp++, addr = next, addr != end);
 	local_irq_restore(flags);
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 9b4050c..d3a5e39 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -162,16 +162,20 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
 			unsigned long addr, unsigned long sz)
 {
 	pgd_t *pgdp;
+	p4d_t *p4dp;
 	pud_t *pudp;
 	pmd_t *pmdp = NULL;
 
 	pgdp = pgd_offset(mm, addr);
-	pudp = pud_alloc(mm, pgdp, addr);
-	if (pudp) {
-		if (sz == PUD_SIZE)
-			return (pte_t *) pudp;
-		else if (sz == PMD_SIZE)
-			pmdp = pmd_alloc(mm, pudp, addr);
+	p4dp = p4d_alloc(mm, pgdp, addr);
+	if (p4dp) {
+		pudp = pud_alloc(mm, p4dp, addr);
+		if (pudp) {
+			if (sz == PUD_SIZE)
+				return (pte_t *) pudp;
+			else if (sz == PMD_SIZE)
+				pmdp = pmd_alloc(mm, pudp, addr);
+		}
 	}
 	return (pte_t *) pmdp;
 }
@@ -179,16 +183,20 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
 	pgd_t *pgdp;
+	p4d_t *p4dp;
 	pud_t *pudp;
 	pmd_t *pmdp = NULL;
 
 	pgdp = pgd_offset(mm, addr);
 	if (pgd_present(*pgdp)) {
-		pudp = pud_offset(pgdp, addr);
-		if (pud_present(*pudp)) {
-			if (pud_large(*pudp))
-				return (pte_t *) pudp;
-			pmdp = pmd_offset(pudp, addr);
+		p4dp = p4d_offset(pgdp, addr);
+		if (p4d_present(*p4dp)) {
+			pudp = pud_offset(p4dp, addr);
+			if (pud_present(*pudp)) {
+				if (pud_large(*pudp))
+					return (pte_t *) pudp;
+				pmdp = pmd_offset(pudp, addr);
+			}
 		}
 	}
 	return (pte_t *) pmdp;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ee6a1d3..3348e60 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -81,6 +81,7 @@ void __init paging_init(void)
 {
 	unsigned long max_zone_pfns[MAX_NR_ZONES];
 	unsigned long pgd_type, asce_bits;
+	psw_t psw;
 
 	init_mm.pgd = swapper_pg_dir;
 	if (VMALLOC_END > (1UL << 42)) {
@@ -100,7 +101,10 @@ void __init paging_init(void)
 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
 	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
 	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
-	__arch_local_irq_stosm(0x04);
+	psw.mask = __extract_psw();
+	psw_bits(psw).dat = 1;
+	psw_bits(psw).as = PSW_BITS_AS_HOME;
+	__load_psw_mask(psw.mask);
 
 	sparse_memory_present_with_active_regions(MAX_NUMNODES);
 	sparse_init();
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index b017dae..2e10d2b 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			goto check_asce_limit;
 	}
 
@@ -120,7 +120,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
 check_asce_limit:
 	if (addr + len > current->mm->context.asce_limit) {
-		rc = crst_table_upgrade(mm);
+		rc = crst_table_upgrade(mm, addr + len);
 		if (rc)
 			return (unsigned long) rc;
 	}
@@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)))
 			goto check_asce_limit;
 	}
 
@@ -184,7 +184,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
 check_asce_limit:
 	if (addr + len > current->mm->context.asce_limit) {
-		rc = crst_table_upgrade(mm);
+		rc = crst_table_upgrade(mm, addr + len);
 		if (rc)
 			return (unsigned long) rc;
 	}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 49e721f..1804815 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -229,14 +229,14 @@ static void modify_pud_page(pud_t *pudp, unsigned long addr,
 	pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
 }
 
-static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
+static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
 			  unsigned long flags)
 {
 	unsigned long next;
 	pud_t *pudp;
 	int rc = 0;
 
-	pudp = pud_offset(pgd, addr);
+	pudp = pud_offset(p4d, addr);
 	do {
 		if (pud_none(*pudp))
 			return -EINVAL;
@@ -259,6 +259,26 @@ static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
 	return rc;
 }
 
+static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
+			  unsigned long flags)
+{
+	unsigned long next;
+	p4d_t *p4dp;
+	int rc = 0;
+
+	p4dp = p4d_offset(pgd, addr);
+	do {
+		if (p4d_none(*p4dp))
+			return -EINVAL;
+		next = p4d_addr_end(addr, end);
+		rc = walk_pud_level(p4dp, addr, next, flags);
+		p4dp++;
+		addr = next;
+		cond_resched();
+	} while (addr < end && !rc);
+	return rc;
+}
+
 static DEFINE_MUTEX(cpa_mutex);
 
 static int change_page_attr(unsigned long addr, unsigned long end,
@@ -278,7 +298,7 @@ static int change_page_attr(unsigned long addr, unsigned long end,
 		if (pgd_none(*pgdp))
 			break;
 		next = pgd_addr_end(addr, end);
-		rc = walk_pud_level(pgdp, addr, next, flags);
+		rc = walk_p4d_level(pgdp, addr, next, flags);
 		if (rc)
 			break;
 		cond_resched();
@@ -319,6 +339,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
 	unsigned long address;
 	int nr, i, j;
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte;
@@ -326,7 +347,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
 	for (i = 0; i < numpages;) {
 		address = page_to_phys(page + i);
 		pgd = pgd_offset_k(address);
-		pud = pud_offset(pgd, address);
+		p4d = p4d_offset(pgd, address);
+		pud = pud_offset(p4d, address);
 		pmd = pmd_offset(pud, address);
 		pte = pte_offset_kernel(pmd, address);
 		nr = (unsigned long)pte >> ilog2(sizeof(long));
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index f502cbe..18918e3 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -76,29 +76,46 @@ static void __crst_table_upgrade(void *arg)
 	__tlb_flush_local();
 }
 
-int crst_table_upgrade(struct mm_struct *mm)
+int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
 {
 	unsigned long *table, *pgd;
+	int rc, notify;
 
-	/* upgrade should only happen from 3 to 4 levels */
-	BUG_ON(mm->context.asce_limit != (1UL << 42));
-
-	table = crst_table_alloc(mm);
-	if (!table)
+	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
+	BUG_ON(mm->context.asce_limit < (1UL << 42));
+	if (end >= TASK_SIZE_MAX)
 		return -ENOMEM;
-
-	spin_lock_bh(&mm->page_table_lock);
-	pgd = (unsigned long *) mm->pgd;
-	crst_table_init(table, _REGION2_ENTRY_EMPTY);
-	pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
-	mm->pgd = (pgd_t *) table;
-	mm->context.asce_limit = 1UL << 53;
-	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
-			   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
-	spin_unlock_bh(&mm->page_table_lock);
-
-	on_each_cpu(__crst_table_upgrade, mm, 0);
-	return 0;
+	rc = 0;
+	notify = 0;
+	while (mm->context.asce_limit < end) {
+		table = crst_table_alloc(mm);
+		if (!table) {
+			rc = -ENOMEM;
+			break;
+		}
+		spin_lock_bh(&mm->page_table_lock);
+		pgd = (unsigned long *) mm->pgd;
+		if (mm->context.asce_limit == (1UL << 42)) {
+			crst_table_init(table, _REGION2_ENTRY_EMPTY);
+			p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
+			mm->pgd = (pgd_t *) table;
+			mm->context.asce_limit = 1UL << 53;
+			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+				_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+		} else {
+			crst_table_init(table, _REGION1_ENTRY_EMPTY);
+			pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
+			mm->pgd = (pgd_t *) table;
+			mm->context.asce_limit = -PAGE_SIZE;
+			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+				_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
+		}
+		notify = 1;
+		spin_unlock_bh(&mm->page_table_lock);
+	}
+	if (notify)
+		on_each_cpu(__crst_table_upgrade, mm, 0);
+	return rc;
 }
 
 void crst_table_downgrade(struct mm_struct *mm)
@@ -274,7 +291,7 @@ static void __tlb_remove_table(void *_table)
 	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
 
 	switch (mask) {
-	case 0:		/* pmd or pud */
+	case 0:		/* pmd, pud, or p4d */
 		free_pages((unsigned long) table, 2);
 		break;
 	case 1:		/* lower 2K of a 4K page table */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 947b66a..d4d409b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -610,6 +610,7 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
 {
 	spinlock_t *ptl;
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 	pgste_t pgste;
@@ -618,7 +619,10 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
 	bool dirty;
 
 	pgd = pgd_offset(mm, addr);
-	pud = pud_alloc(mm, pgd, addr);
+	p4d = p4d_alloc(mm, pgd, addr);
+	if (!p4d)
+		return false;
+	pud = pud_alloc(mm, p4d, addr);
 	if (!pud)
 		return false;
 	pmd = pmd_alloc(mm, pud, addr);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index c33c94b..d839896 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -38,6 +38,17 @@ static void __ref *vmem_alloc_pages(unsigned int order)
 	return (void *) memblock_alloc(size, size);
 }
 
+static inline p4d_t *vmem_p4d_alloc(void)
+{
+	p4d_t *p4d = NULL;
+
+	p4d = vmem_alloc_pages(2);
+	if (!p4d)
+		return NULL;
+	clear_table((unsigned long *) p4d, _REGION2_ENTRY_EMPTY, PAGE_SIZE * 4);
+	return p4d;
+}
+
 static inline pud_t *vmem_pud_alloc(void)
 {
 	pud_t *pud = NULL;
@@ -85,6 +96,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
 	unsigned long end = start + size;
 	unsigned long address = start;
 	pgd_t *pg_dir;
+	p4d_t *p4_dir;
 	pud_t *pu_dir;
 	pmd_t *pm_dir;
 	pte_t *pt_dir;
@@ -102,12 +114,19 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
 	while (address < end) {
 		pg_dir = pgd_offset_k(address);
 		if (pgd_none(*pg_dir)) {
+			p4_dir = vmem_p4d_alloc();
+			if (!p4_dir)
+				goto out;
+			pgd_populate(&init_mm, pg_dir, p4_dir);
+		}
+		p4_dir = p4d_offset(pg_dir, address);
+		if (p4d_none(*p4_dir)) {
 			pu_dir = vmem_pud_alloc();
 			if (!pu_dir)
 				goto out;
-			pgd_populate(&init_mm, pg_dir, pu_dir);
+			p4d_populate(&init_mm, p4_dir, pu_dir);
 		}
-		pu_dir = pud_offset(pg_dir, address);
+		pu_dir = pud_offset(p4_dir, address);
 		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
 		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
 		     !debug_pagealloc_enabled()) {
@@ -161,6 +180,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
 	unsigned long end = start + size;
 	unsigned long address = start;
 	pgd_t *pg_dir;
+	p4d_t *p4_dir;
 	pud_t *pu_dir;
 	pmd_t *pm_dir;
 	pte_t *pt_dir;
@@ -172,7 +192,12 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
 			address += PGDIR_SIZE;
 			continue;
 		}
-		pu_dir = pud_offset(pg_dir, address);
+		p4_dir = p4d_offset(pg_dir, address);
+		if (p4d_none(*p4_dir)) {
+			address += P4D_SIZE;
+			continue;
+		}
+		pu_dir = pud_offset(p4_dir, address);
 		if (pud_none(*pu_dir)) {
 			address += PUD_SIZE;
 			continue;
@@ -213,6 +238,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 	unsigned long pgt_prot, sgt_prot;
 	unsigned long address = start;
 	pgd_t *pg_dir;
+	p4d_t *p4_dir;
 	pud_t *pu_dir;
 	pmd_t *pm_dir;
 	pte_t *pt_dir;
@@ -227,13 +253,21 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 	for (address = start; address < end;) {
 		pg_dir = pgd_offset_k(address);
 		if (pgd_none(*pg_dir)) {
+			p4_dir = vmem_p4d_alloc();
+			if (!p4_dir)
+				goto out;
+			pgd_populate(&init_mm, pg_dir, p4_dir);
+		}
+
+		p4_dir = p4d_offset(pg_dir, address);
+		if (p4d_none(*p4_dir)) {
 			pu_dir = vmem_pud_alloc();
 			if (!pu_dir)
 				goto out;
-			pgd_populate(&init_mm, pg_dir, pu_dir);
+			p4d_populate(&init_mm, p4_dir, pu_dir);
 		}
 
-		pu_dir = pud_offset(pg_dir, address);
+		pu_dir = pud_offset(p4_dir, address);
 		if (pud_none(*pu_dir)) {
 			pm_dir = vmem_pmd_alloc();
 			if (!pm_dir)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 8051df1..7b30af5 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -86,6 +86,25 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
 	return zdev;
 }
 
+void zpci_remove_reserved_devices(void)
+{
+	struct zpci_dev *tmp, *zdev;
+	enum zpci_state state;
+	LIST_HEAD(remove);
+
+	spin_lock(&zpci_list_lock);
+	list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
+		if (zdev->state == ZPCI_FN_STATE_STANDBY &&
+		    !clp_get_state(zdev->fid, &state) &&
+		    state == ZPCI_FN_STATE_RESERVED)
+			list_move_tail(&zdev->entry, &remove);
+	}
+	spin_unlock(&zpci_list_lock);
+
+	list_for_each_entry_safe(zdev, tmp, &remove, entry)
+		zpci_remove_device(zdev);
+}
+
 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
 {
 	return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
@@ -108,6 +127,7 @@ static int zpci_set_airq(struct zpci_dev *zdev)
 {
 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
 	struct zpci_fib fib = {0};
+	u8 status;
 
 	fib.isc = PCI_ISC;
 	fib.sum = 1;		/* enable summary notifications */
@@ -117,60 +137,58 @@ static int zpci_set_airq(struct zpci_dev *zdev)
 	fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
 	fib.aisbo = zdev->aisb & 63;
 
-	return zpci_mod_fc(req, &fib);
+	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
 }
 
-struct mod_pci_args {
-	u64 base;
-	u64 limit;
-	u64 iota;
-	u64 fmb_addr;
-};
-
-static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
+/* Modify PCI: Unregister adapter interruptions */
+static int zpci_clear_airq(struct zpci_dev *zdev)
 {
-	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
+	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
 	struct zpci_fib fib = {0};
+	u8 cc, status;
 
-	fib.pba = args->base;
-	fib.pal = args->limit;
-	fib.iota = args->iota;
-	fib.fmb_addr = args->fmb_addr;
+	cc = zpci_mod_fc(req, &fib, &status);
+	if (cc == 3 || (cc == 1 && status == 24))
+		/* Function already gone or IRQs already deregistered. */
+		cc = 0;
 
-	return zpci_mod_fc(req, &fib);
+	return cc ? -EIO : 0;
 }
 
 /* Modify PCI: Register I/O address translation parameters */
 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
 		       u64 base, u64 limit, u64 iota)
 {
-	struct mod_pci_args args = { base, limit, iota, 0 };
+	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
+	struct zpci_fib fib = {0};
+	u8 status;
 
 	WARN_ON_ONCE(iota & 0x3fff);
-	args.iota |= ZPCI_IOTA_RTTO_FLAG;
-	return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
+	fib.pba = base;
+	fib.pal = limit;
+	fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
+	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
 }
 
 /* Modify PCI: Unregister I/O address translation parameters */
 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
 {
-	struct mod_pci_args args = { 0, 0, 0, 0 };
+	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
+	struct zpci_fib fib = {0};
+	u8 cc, status;
 
-	return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
-}
-
-/* Modify PCI: Unregister adapter interruptions */
-static int zpci_clear_airq(struct zpci_dev *zdev)
-{
-	struct mod_pci_args args = { 0, 0, 0, 0 };
-
-	return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
+	cc = zpci_mod_fc(req, &fib, &status);
+	if (cc == 3) /* Function already gone. */
+		cc = 0;
+	return cc ? -EIO : 0;
 }
 
 /* Modify PCI: Set PCI function measurement parameters */
 int zpci_fmb_enable_device(struct zpci_dev *zdev)
 {
-	struct mod_pci_args args = { 0, 0, 0, 0 };
+	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
+	struct zpci_fib fib = {0};
+	u8 cc, status;
 
 	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
 		return -EINVAL;
@@ -185,25 +203,35 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
 	atomic64_set(&zdev->mapped_pages, 0);
 	atomic64_set(&zdev->unmapped_pages, 0);
 
-	args.fmb_addr = virt_to_phys(zdev->fmb);
-	return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
+	fib.fmb_addr = virt_to_phys(zdev->fmb);
+	cc = zpci_mod_fc(req, &fib, &status);
+	if (cc) {
+		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
+		zdev->fmb = NULL;
+	}
+	return cc ? -EIO : 0;
 }
 
 /* Modify PCI: Disable PCI function measurement */
 int zpci_fmb_disable_device(struct zpci_dev *zdev)
 {
-	struct mod_pci_args args = { 0, 0, 0, 0 };
-	int rc;
+	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
+	struct zpci_fib fib = {0};
+	u8 cc, status;
 
 	if (!zdev->fmb)
 		return -EINVAL;
 
 	/* Function measurement is disabled if fmb address is zero */
-	rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
+	cc = zpci_mod_fc(req, &fib, &status);
+	if (cc == 3) /* Function already gone. */
+		cc = 0;
 
-	kmem_cache_free(zdev_fmb_cache, zdev->fmb);
-	zdev->fmb = NULL;
-	return rc;
+	if (!cc) {
+		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
+		zdev->fmb = NULL;
+	}
+	return cc ? -EIO : 0;
 }
 
 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
@@ -372,22 +400,21 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 	struct msi_msg msg;
 	int rc, irq;
 
+	zdev->aisb = -1UL;
 	if (type == PCI_CAP_ID_MSI && nvec > 1)
 		return 1;
 	msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
 
 	/* Allocate adapter summary indicator bit */
-	rc = -EIO;
 	aisb = airq_iv_alloc_bit(zpci_aisb_iv);
 	if (aisb == -1UL)
-		goto out;
+		return -EIO;
 	zdev->aisb = aisb;
 
 	/* Create adapter interrupt vector */
-	rc = -ENOMEM;
 	zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
 	if (!zdev->aibv)
-		goto out_si;
+		return -ENOMEM;
 
 	/* Wire up shortcut pointer */
 	zpci_aibv[aisb] = zdev->aibv;
@@ -398,10 +425,10 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 		rc = -EIO;
 		irq = irq_alloc_desc(0);	/* Alloc irq on node 0 */
 		if (irq < 0)
-			goto out_msi;
+			return -ENOMEM;
 		rc = irq_set_msi_desc(irq, msi);
 		if (rc)
-			goto out_msi;
+			return rc;
 		irq_set_chip_and_handler(irq, &zpci_irq_chip,
 					 handle_simple_irq);
 		msg.data = hwirq;
@@ -415,27 +442,9 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 	/* Enable adapter interrupts */
 	rc = zpci_set_airq(zdev);
 	if (rc)
-		goto out_msi;
+		return rc;
 
 	return (msi_vecs == nvec) ? 0 : msi_vecs;
-
-out_msi:
-	for_each_pci_msi_entry(msi, pdev) {
-		if (hwirq-- == 0)
-			break;
-		irq_set_msi_desc(msi->irq, NULL);
-		irq_free_desc(msi->irq);
-		msi->msg.address_lo = 0;
-		msi->msg.address_hi = 0;
-		msi->msg.data = 0;
-		msi->irq = 0;
-	}
-	zpci_aibv[aisb] = NULL;
-	airq_iv_release(zdev->aibv);
-out_si:
-	airq_iv_free_bit(zpci_aisb_iv, aisb);
-out:
-	return rc;
 }
 
 void arch_teardown_msi_irqs(struct pci_dev *pdev)
@@ -451,6 +460,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
 
 	/* Release MSI interrupts */
 	for_each_pci_msi_entry(msi, pdev) {
+		if (!msi->irq)
+			continue;
 		if (msi->msi_attrib.is_msix)
 			__pci_msix_desc_mask_irq(msi, 1);
 		else
@@ -463,9 +474,15 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
 		msi->irq = 0;
 	}
 
-	zpci_aibv[zdev->aisb] = NULL;
-	airq_iv_release(zdev->aibv);
-	airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
+	if (zdev->aisb != -1UL) {
+		zpci_aibv[zdev->aisb] = NULL;
+		airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
+		zdev->aisb = -1UL;
+	}
+	if (zdev->aibv) {
+		airq_iv_release(zdev->aibv);
+		zdev->aibv = NULL;
+	}
 }
 
 static void zpci_map_resources(struct pci_dev *pdev)
@@ -719,6 +736,16 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
 {
 	if (zpci_unique_uid) {
 		zdev->domain = (u16) zdev->uid;
+		if (zdev->domain >= ZPCI_NR_DEVICES)
+			return 0;
+
+		spin_lock(&zpci_domain_lock);
+		if (test_bit(zdev->domain, zpci_domain)) {
+			spin_unlock(&zpci_domain_lock);
+			return -EEXIST;
+		}
+		set_bit(zdev->domain, zpci_domain);
+		spin_unlock(&zpci_domain_lock);
 		return 0;
 	}
 
@@ -735,7 +762,7 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
 
 static void zpci_free_domain(struct zpci_dev *zdev)
 {
-	if (zpci_unique_uid)
+	if (zdev->domain >= ZPCI_NR_DEVICES)
 		return;
 
 	spin_lock(&zpci_domain_lock);
@@ -755,6 +782,7 @@ void pcibios_remove_bus(struct pci_bus *bus)
 	list_del(&zdev->entry);
 	spin_unlock(&zpci_list_lock);
 
+	zpci_dbg(3, "rem fid:%x\n", zdev->fid);
 	kfree(zdev);
 }
 
@@ -847,15 +875,14 @@ int zpci_create_device(struct zpci_dev *zdev)
 	return rc;
 }
 
-void zpci_stop_device(struct zpci_dev *zdev)
+void zpci_remove_device(struct zpci_dev *zdev)
 {
-	zpci_dma_exit_device(zdev);
-	/*
-	 * Note: SCLP disables fh via set-pci-fn so don't
-	 * do that here.
-	 */
+	if (!zdev->bus)
+		return;
+
+	pci_stop_root_bus(zdev->bus);
+	pci_remove_root_bus(zdev->bus);
 }
-EXPORT_SYMBOL_GPL(zpci_stop_device);
 
 int zpci_report_error(struct pci_dev *pdev,
 		      struct zpci_report_error_header *report)
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 1c3332a..bd534b4 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -193,12 +193,12 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
 int clp_add_pci_device(u32 fid, u32 fh, int configured)
 {
 	struct zpci_dev *zdev;
-	int rc;
+	int rc = -ENOMEM;
 
 	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
 	if (!zdev)
-		return -ENOMEM;
+		goto error;
 
 	zdev->fh = fh;
 	zdev->fid = fid;
@@ -219,6 +219,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
 	return 0;
 
 error:
+	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
 	kfree(zdev);
 	return rc;
 }
@@ -295,8 +296,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
 	return rc;
 }
 
-static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
-			void (*cb)(struct clp_fh_list_entry *entry))
+static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
+			void (*cb)(struct clp_fh_list_entry *, void *))
 {
 	u64 resume_token = 0;
 	int entries, i, rc;
@@ -327,21 +328,13 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
 
 		resume_token = rrb->response.resume_token;
 		for (i = 0; i < entries; i++)
-			cb(&rrb->response.fh_list[i]);
+			cb(&rrb->response.fh_list[i], data);
 	} while (resume_token);
 out:
 	return rc;
 }
 
-static void __clp_add(struct clp_fh_list_entry *entry)
-{
-	if (!entry->vendor_id)
-		return;
-
-	clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
-}
-
-static void __clp_rescan(struct clp_fh_list_entry *entry)
+static void __clp_add(struct clp_fh_list_entry *entry, void *data)
 {
 	struct zpci_dev *zdev;
 
@@ -349,22 +342,11 @@ static void __clp_rescan(struct clp_fh_list_entry *entry)
 		return;
 
 	zdev = get_zdev_by_fid(entry->fid);
-	if (!zdev) {
+	if (!zdev)
 		clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
-		return;
-	}
-
-	if (!entry->config_state) {
-		/*
-		 * The handle is already disabled, that means no iota/irq freeing via
-		 * the firmware interfaces anymore. Need to free resources manually
-		 * (DMA memory, debug, sysfs)...
-		 */
-		zpci_stop_device(zdev);
-	}
 }
 
-static void __clp_update(struct clp_fh_list_entry *entry)
+static void __clp_update(struct clp_fh_list_entry *entry, void *data)
 {
 	struct zpci_dev *zdev;
 
@@ -387,7 +369,7 @@ int clp_scan_pci_devices(void)
 	if (!rrb)
 		return -ENOMEM;
 
-	rc = clp_list_pci(rrb, __clp_add);
+	rc = clp_list_pci(rrb, NULL, __clp_add);
 
 	clp_free_block(rrb);
 	return rc;
@@ -398,11 +380,13 @@ int clp_rescan_pci_devices(void)
 	struct clp_req_rsp_list_pci *rrb;
 	int rc;
 
+	zpci_remove_reserved_devices();
+
 	rrb = clp_alloc_block(GFP_KERNEL);
 	if (!rrb)
 		return -ENOMEM;
 
-	rc = clp_list_pci(rrb, __clp_rescan);
+	rc = clp_list_pci(rrb, NULL, __clp_add);
 
 	clp_free_block(rrb);
 	return rc;
@@ -417,7 +401,40 @@ int clp_rescan_pci_devices_simple(void)
 	if (!rrb)
 		return -ENOMEM;
 
-	rc = clp_list_pci(rrb, __clp_update);
+	rc = clp_list_pci(rrb, NULL, __clp_update);
+
+	clp_free_block(rrb);
+	return rc;
+}
+
+struct clp_state_data {
+	u32 fid;
+	enum zpci_state state;
+};
+
+static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
+{
+	struct clp_state_data *sd = data;
+
+	if (entry->fid != sd->fid)
+		return;
+
+	sd->state = entry->config_state;
+}
+
+int clp_get_state(u32 fid, enum zpci_state *state)
+{
+	struct clp_req_rsp_list_pci *rrb;
+	struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
+	int rc;
+
+	rrb = clp_alloc_block(GFP_KERNEL);
+	if (!rrb)
+		return -ENOMEM;
+
+	rc = clp_list_pci(rrb, &sd, __clp_get_state);
+	if (!rc)
+		*state = sd.state;
 
 	clp_free_block(rrb);
 	return rc;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 9081a57..8eb1cc3 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -601,7 +601,9 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
 	 */
 	WARN_ON(zdev->s390_domain);
 
-	zpci_unregister_ioat(zdev, 0);
+	if (zpci_unregister_ioat(zdev, 0))
+		return;
+
 	dma_cleanup_tables(zdev->dma_table);
 	zdev->dma_table = NULL;
 	vfree(zdev->iommu_bitmap);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index c2b27ad..0bbc04a 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -74,6 +74,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 {
 	struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
 	struct pci_dev *pdev = NULL;
+	enum zpci_state state;
 	int ret;
 
 	if (zdev)
@@ -108,6 +109,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 			clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
 		break;
 	case 0x0303: /* Deconfiguration requested */
+		if (!zdev)
+			break;
 		if (pdev)
 			pci_stop_and_remove_bus_device_locked(pdev);
 
@@ -121,7 +124,9 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 			zdev->state = ZPCI_FN_STATE_STANDBY;
 
 		break;
-	case 0x0304: /* Configured -> Standby */
+	case 0x0304: /* Configured -> Standby|Reserved */
+		if (!zdev)
+			break;
 		if (pdev) {
 			/* Give the driver a hint that the function is
 			 * already unusable. */
@@ -132,6 +137,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 		zdev->fh = ccdf->fh;
 		zpci_disable_device(zdev);
 		zdev->state = ZPCI_FN_STATE_STANDBY;
+		if (!clp_get_state(ccdf->fid, &state) &&
+		    state == ZPCI_FN_STATE_RESERVED) {
+			zpci_remove_device(zdev);
+		}
 		break;
 	case 0x0306: /* 0x308 or 0x302 for multiple devices */
 		clp_rescan_pci_devices();
@@ -139,8 +148,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 	case 0x0308: /* Standby -> Reserved */
 		if (!zdev)
 			break;
-		pci_stop_root_bus(zdev->bus);
-		pci_remove_root_bus(zdev->bus);
+		zpci_remove_device(zdev);
 		break;
 	default:
 		break;
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index fa8d7d4..ea34086 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -40,20 +40,20 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
 	return cc;
 }
 
-int zpci_mod_fc(u64 req, struct zpci_fib *fib)
+u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
 {
-	u8 cc, status;
+	u8 cc;
 
 	do {
-		cc = __mpcifc(req, fib, &status);
+		cc = __mpcifc(req, fib, status);
 		if (cc == 2)
 			msleep(ZPCI_INSN_BUSY_DELAY);
 	} while (cc == 2);
 
 	if (cc)
-		zpci_err_insn(cc, status, req, 0);
+		zpci_err_insn(cc, *status, req, 0);
 
-	return (cc) ? -EIO : 0;
+	return cc;
 }
 
 /* Refresh PCI Translations */
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index be63fbd..025ea20 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -34,8 +34,6 @@ static struct facility_def facility_defs[] = {
 			18, /* long displacement facility */
 #endif
 #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
-			7,  /* stfle */
-			17, /* message security assist */
 			21, /* extended-immediate facility */
 			25, /* store clock fast */
 #endif
diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h
index d9a922d..2992745 100644
--- a/arch/score/include/asm/processor.h
+++ b/arch/score/include/asm/processor.h
@@ -13,7 +13,6 @@ struct task_struct;
  */
 extern void (*cpu_wait)(void);
 
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
 extern void start_thread(struct pt_regs *regs,
 			unsigned long pc, unsigned long sp);
 extern unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/score/include/uapi/asm/Kbuild b/arch/score/include/uapi/asm/Kbuild
index b15bf6b..c94ee54 100644
--- a/arch/score/include/uapi/asm/Kbuild
+++ b/arch/score/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y	+= siginfo.h
diff --git a/arch/score/include/uapi/asm/siginfo.h b/arch/score/include/uapi/asm/siginfo.h
deleted file mode 100644
index 87ca356..0000000
--- a/arch/score/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_SCORE_SIGINFO_H
-#define _ASM_SCORE_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif /* _ASM_SCORE_SIGINFO_H */
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index eb64d7a..6e20241 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -101,11 +101,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
 	return 1;
 }
 
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	return task_pt_regs(tsk)->cp0_epc;
-}
-
 unsigned long get_wchan(struct task_struct *task)
 {
 	if (!task || task == current || task->state == TASK_RUNNING)
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 4e21949..3554fca 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -10,7 +10,7 @@
 	bool "Board Described by Device Tree"
 	select OF
 	select OF_EARLY_FLATTREE
-	select CLKSRC_OF
+	select TIMER_OF
 	select COMMON_CLK
 	select GENERIC_CALIBRATE_DELAY
 	help
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index 1fb6d57..4feb7c8 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -119,7 +119,7 @@ static void __init sh_of_mem_reserve(void)
 static void __init sh_of_time_init(void)
 {
 	pr_info("SH generic board support: scanning for clocksource devices\n");
-	clocksource_probe();
+	timer_probe();
 }
 
 static void __init sh_of_setup(char **cmdline_p)
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index cf2a750..590c91a 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -29,7 +29,6 @@
 generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
-generic-y += siginfo.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += statfs.h
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild
index b15bf6b..b55fc2a 100644
--- a/arch/sh/include/uapi/asm/Kbuild
+++ b/arch/sh/include/uapi/asm/Kbuild
@@ -1,2 +1,4 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += siginfo.h
diff --git a/arch/sh/include/uapi/asm/ioctls.h b/arch/sh/include/uapi/asm/ioctls.h
index c9903e5..eec7901 100644
--- a/arch/sh/include/uapi/asm/ioctls.h
+++ b/arch/sh/include/uapi/asm/ioctls.h
@@ -93,6 +93,7 @@
 #define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
 #define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER	_IOR('T', 0x41, int) /* Safely open the slave */
 
 #define TIOCSERCONFIG	_IO('T', 83) /* 0x5453 */
 #define TIOCSERGWILD	_IOR('T', 84,  int) /* 0x5454 */
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 08e7af0..6a1a129 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 58243b0..5639c9f 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -192,9 +192,9 @@
 	int "Maximum number of CPUs"
 	depends on SMP
 	range 2 32 if SPARC32
-	range 2 1024 if SPARC64
+	range 2 4096 if SPARC64
 	default 32 if SPARC32
-	default 64 if SPARC64
+	default 4096 if SPARC64
 
 source kernel/Kconfig.hz
 
@@ -295,9 +295,13 @@
 	depends on SPARC64 && SMP
 
 config NODES_SHIFT
-	int
-	default "4"
+	int "Maximum NUMA Nodes (as a power of 2)"
+	range 4 5 if SPARC64
+	default "5"
 	depends on NEED_MULTIPLE_NODES
+	help
+	  Specify the maximum number of NUMA Nodes available on the target
+	  system.  Increases memory reserved to accommodate various tables.
 
 # Some NUMA nodes have memory ranges that span
 # other nodes.  Even though a pfn is valid and
@@ -573,9 +577,6 @@
 	depends on COMPAT && SYSVIPC
 	default y
 
-config KEYS_COMPAT
-	def_bool y if COMPAT && KEYS
-
 endmenu
 
 source "net/Kconfig"
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0db..83b36a5 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
 #define CTX_NR_MASK		TAG_CONTEXT_BITS
 #define CTX_HW_MASK		(CTX_NR_MASK | CTX_PGSZ_MASK)
 
-#define CTX_FIRST_VERSION	((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION	BIT(CTX_VERSION_SHIFT)
 #define CTX_VALID(__ctx)	\
 	 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
 #define CTX_HWBITS(__ctx)	((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 22fede6..2cddcda 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
 extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
 void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 {
 	unsigned long ctx_valid, flags;
-	int cpu;
+	int cpu = smp_processor_id();
 
+	per_cpu(per_cpu_secondary_mm, cpu) = mm;
 	if (unlikely(mm == &init_mm))
 		return;
 
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 	 * for the first time, we must flush that context out of the
 	 * local TLB.
 	 */
-	cpu = smp_processor_id();
 	if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
 		cpumask_set_cpu(cpu, mm_cpumask(mm));
 		__flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 }
 
 #define deactivate_mm(tsk,mm)	do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
-	unsigned long flags;
-	int cpu;
-
-	spin_lock_irqsave(&mm->context.lock, flags);
-	if (!CTX_VALID(mm->context))
-		get_new_mmu_context(mm);
-	cpu = smp_processor_id();
-	if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
-		cpumask_set_cpu(cpu, mm_cpumask(mm));
-
-	load_secondary_context(mm);
-	__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-	tsb_context_switch(mm);
-	spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 26693703..522b43d 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
 #define PIL_SMP_CALL_FUNC	1
 #define PIL_SMP_RECEIVE_SIGNAL	2
 #define PIL_SMP_CAPTURE		3
-#define PIL_SMP_CTX_NEW_VERSION	4
 #define PIL_DEVICE_IRQ		5
 #define PIL_SMP_CALL_FUNC_SNGL	6
 #define PIL_DEFERRED_PCR_WORK	7
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index dd27159..b395e56 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -67,9 +67,6 @@ struct thread_struct {
 	.current_ds = KERNEL_DS, \
 }
 
-/* Return saved PC of a blocked thread. */
-unsigned long thread_saved_pc(struct task_struct *t);
-
 /* Do necessary setup to start up a newly executed thread. */
 static inline void start_thread(struct pt_regs * regs, unsigned long pc,
 				    unsigned long sp)
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index b58ee90..f04dc5a 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -89,9 +89,7 @@ struct thread_struct {
 #include <linux/types.h>
 #include <asm/fpumacro.h>
 
-/* Return saved PC of a blocked thread. */
 struct task_struct;
-unsigned long thread_saved_pc(struct task_struct *);
 
 /* On Uniprocessor, even in RMO processes see TSO semantics */
 #ifdef CONFIG_SMP
diff --git a/arch/sparc/include/asm/siginfo.h b/arch/sparc/include/asm/siginfo.h
deleted file mode 100644
index 48c34c1..0000000
--- a/arch/sparc/include/asm/siginfo.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __SPARC_SIGINFO_H
-#define __SPARC_SIGINFO_H
-
-#include <uapi/asm/siginfo.h>
-
-
-#ifdef CONFIG_COMPAT
-
-struct compat_siginfo;
-
-#endif /* CONFIG_COMPAT */
-
-#endif /* !(__SPARC_SIGINFO_H) */
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6c..9dca7a8 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@ struct vio_dev {
 	int			compat_len;
 
 	u64			dev_no;
+	u64			id;
 
 	unsigned long		channel_id;
 
diff --git a/arch/sparc/include/uapi/asm/ioctls.h b/arch/sparc/include/uapi/asm/ioctls.h
index 06b3f6c..6d27398 100644
--- a/arch/sparc/include/uapi/asm/ioctls.h
+++ b/arch/sparc/include/uapi/asm/ioctls.h
@@ -27,7 +27,7 @@
 #define TIOCGRS485	_IOR('T', 0x41, struct serial_rs485)
 #define TIOCSRS485	_IOWR('T', 0x42, struct serial_rs485)
 
-/* Note that all the ioctls that are not available in Linux have a 
+/* Note that all the ioctls that are not available in Linux have a
  * double underscore on the front to: a) avoid some programs to
  * think we support some ioctls under Linux (autoconfiguration stuff)
  */
@@ -88,6 +88,7 @@
 #define TIOCGPTN	_IOR('t', 134, unsigned int) /* Get Pty Number */
 #define TIOCSPTLCK	_IOW('t', 135, int) /* Lock/unlock PTY */
 #define TIOCSIG		_IOW('t', 136, int) /* Generate signal on Pty slave */
+#define TIOCGPTPEER	_IOR('t', 137, int) /* Safely open the slave */
 
 /* Little f */
 #define FIOCLEX		_IO('f', 1)
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index b542cc7..f87265a 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
 		pbuf.req.handle = cp->handle;
 		pbuf.req.major = 1;
 		pbuf.req.minor = 0;
-		strcpy(pbuf.req.svc_id, cp->service_id);
+		strcpy(pbuf.id_buf, cp->service_id);
 
 		err = __ds_send(lp, &pbuf, msg_len);
 		if (err > 0)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 4d0248a..99dd133 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
 {
 #ifdef CONFIG_SMP
 	unsigned long page;
+	void *mondo, *p;
 
-	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+	/* Make sure mondo block is 64byte aligned */
+	p = kzalloc(127, GFP_KERNEL);
+	if (!p) {
+		prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+		prom_halt();
+	}
+	mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+	tb->cpu_mondo_block_pa = __pa(mondo);
 
 	page = get_zeroed_page(GFP_KERNEL);
 	if (!page) {
-		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+		prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
 		prom_halt();
 	}
 
-	tb->cpu_mondo_block_pa = __pa(page);
-	tb->cpu_list_pa = __pa(page + 64);
+	tb->cpu_list_pa = __pa(page);
 #endif
 }
 
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 07933b9..93adde1 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
 		val = 0x01000000;
 	}
 
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	*insn = val;
 	flushi(insn);
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 #endif
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index c9804551..6ae1e77 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
 /* smp_64.c */
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
 
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index b6dac8e..9245f93 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -177,14 +177,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
 }
 
 /*
- * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	return task_thread_info(tsk)->kpc;
-}
-
-/*
  * Free current thread data structures etc..
  */
 void exit_thread(struct task_struct *tsk)
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 1badc49..b96104d 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -400,25 +400,6 @@ core_initcall(sparc_sysrq_init);
 
 #endif
 
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	struct thread_info *ti = task_thread_info(tsk);
-	unsigned long ret = 0xdeadbeefUL;
-	
-	if (ti && ti->ksp) {
-		unsigned long *sp;
-		sp = (unsigned long *)(ti->ksp + STACK_BIAS);
-		if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
-		    sp[14]) {
-			unsigned long *fp;
-			fp = (unsigned long *)(sp[14] + STACK_BIAS);
-			if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
-				ret = fp[15];
-		}
-	}
-	return ret;
-}
-
 /* Free current thread data structures etc.. */
 void exit_thread(struct task_struct *tsk)
 {
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b3bc0ac..fdf3104 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
 	preempt_enable();
 }
 
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
-	struct mm_struct *mm;
-	unsigned long flags;
-
-	clear_softint(1 << irq);
-
-	/* See if we need to allocate a new TLB context because
-	 * the version of the one we are using is now out of date.
-	 */
-	mm = current->active_mm;
-	if (unlikely(!mm || (mm == &init_mm)))
-		return;
-
-	spin_lock_irqsave(&mm->context.lock, flags);
-
-	if (unlikely(!CTX_VALID(mm->context)))
-		get_new_mmu_context(mm);
-
-	spin_unlock_irqrestore(&mm->context.lock, flags);
-
-	load_secondary_context(mm);
-	__flush_tlb_mm(CTX_HWBITS(mm->context),
-		       SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
-	smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
 #ifdef CONFIG_KGDB
 void kgdb_roundup_cpus(unsigned long flags)
 {
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index ef4520e..043544d 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
 
 		vma = find_vma(mm, addr);
 		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
 		vma = find_vma(mm, addr);
 		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 10689cf..07c0df9 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -455,13 +455,16 @@
 	.type	copy_tsb,#function
 copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
 			 * %o2=new_tsb_base, %o3=new_tsb_size
+			 * %o4=page_size_shift
 			 */
 	sethi		%uhi(TSB_PASS_BITS), %g7
 	srlx		%o3, 4, %o3
-	add		%o0, %o1, %g1	/* end of old tsb */
+	add		%o0, %o1, %o1	/* end of old tsb */
 	sllx		%g7, 32, %g7
 	sub		%o3, 1, %o3	/* %o3 == new tsb hash mask */
 
+	mov		%o4, %g1	/* page_size_shift */
+
 661:	prefetcha	[%o0] ASI_N, #one_read
 	.section	.tsb_phys_patch, "ax"
 	.word		661b
@@ -486,9 +489,9 @@
 	/* This can definitely be computed faster... */
 	srlx		%o0, 4, %o5	/* Build index */
 	and		%o5, 511, %o5	/* Mask index */
-	sllx		%o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+	sllx		%o5, %g1, %o5	/* Put into vaddr position */
 	or		%o4, %o5, %o4	/* Full VADDR. */
-	srlx		%o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+	srlx		%o4, %g1, %o4	/* Shift down to create index */
 	and		%o4, %o3, %o4	/* Mask with new_tsb_nents-1 */
 	sllx		%o4, 4, %o4	/* Shift back up into tsb ent offset */
 	TSB_STORE(%o2 + %o4, %g2)	/* Store TAG */
@@ -496,7 +499,7 @@
 	TSB_STORE(%o2 + %o4, %g3)	/* Store TTE */
 
 80:	add		%o0, 16, %o0
-	cmp		%o0, %g1
+	cmp		%o0, %o1
 	bne,pt		%xcc, 90b
 	 nop
 
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index 7bd8f65..efe93ab 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@
 tl0_irq1:	TRAP_IRQ(smp_call_function_client, 1)
 tl0_irq2:	TRAP_IRQ(smp_receive_signal_client, 2)
 tl0_irq3:	TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4:	TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4:       BTRAP(0x44)
 #else
 tl0_irq1:	BTRAP(0x41)
 tl0_irq2:	BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index f6bb857..7dd9b57 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -105,6 +105,7 @@ static ssize_t devspec_show(struct device *dev,
 
 	return sprintf(buf, "%s\n", str);
 }
+static DEVICE_ATTR_RO(devspec);
 
 static ssize_t type_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
@@ -112,6 +113,7 @@ static ssize_t type_show(struct device *dev,
 	struct vio_dev *vdev = to_vio_dev(dev);
 	return sprintf(buf, "%s\n", vdev->type);
 }
+static DEVICE_ATTR_RO(type);
 
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
@@ -120,17 +122,19 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 
 	return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
 }
+static DEVICE_ATTR_RO(modalias);
 
-static struct device_attribute vio_dev_attrs[] = {
-	__ATTR_RO(devspec),
-	__ATTR_RO(type),
-	__ATTR_RO(modalias),
-	__ATTR_NULL
-};
+static struct attribute *vio_dev_attrs[] = {
+	&dev_attr_devspec.attr,
+	&dev_attr_type.attr,
+	&dev_attr_modalias.attr,
+	NULL,
+ };
+ATTRIBUTE_GROUPS(vio_dev);
 
 static struct bus_type vio_bus_type = {
 	.name		= "vio",
-	.dev_attrs	= vio_dev_attrs,
+	.dev_groups	= vio_dev_groups,
 	.uevent         = vio_hotplug,
 	.match		= vio_bus_match,
 	.probe		= vio_device_probe,
@@ -302,13 +306,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
 	if (!id) {
 		dev_set_name(&vdev->dev, "%s", bus_id_name);
 		vdev->dev_no = ~(u64)0;
+		vdev->id = ~(u64)0;
 	} else if (!cfg_handle) {
 		dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
 		vdev->dev_no = *id;
+		vdev->id = ~(u64)0;
 	} else {
 		dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
 			     *cfg_handle, *id);
 		vdev->dev_no = *cfg_handle;
+		vdev->id = *id;
 	}
 
 	vdev->dev.parent = parent;
@@ -351,27 +358,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
 	(void) vio_create_one(hp, node, &root_vdev->dev);
 }
 
+struct vio_md_node_query {
+	const char *type;
+	u64 dev_no;
+	u64 id;
+};
+
 static int vio_md_node_match(struct device *dev, void *arg)
 {
+	struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
 	struct vio_dev *vdev = to_vio_dev(dev);
 
-	if (vdev->mp == (u64) arg)
-		return 1;
+	if (vdev->dev_no != query->dev_no)
+		return 0;
+	if (vdev->id != query->id)
+		return 0;
+	if (strcmp(vdev->type, query->type))
+		return 0;
 
-	return 0;
+	return 1;
 }
 
 static void vio_remove(struct mdesc_handle *hp, u64 node)
 {
+	const char *type;
+	const u64 *id, *cfg_handle;
+	u64 a;
+	struct vio_md_node_query query;
 	struct device *dev;
 
-	dev = device_find_child(&root_vdev->dev, (void *) node,
+	type = mdesc_get_property(hp, node, "device-type", NULL);
+	if (!type) {
+		type = mdesc_get_property(hp, node, "name", NULL);
+		if (!type)
+			type = mdesc_node_name(hp, node);
+	}
+
+	query.type = type;
+
+	id = mdesc_get_property(hp, node, "id", NULL);
+	cfg_handle = NULL;
+	mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+		u64 target;
+
+		target = mdesc_arc_target(hp, a);
+		cfg_handle = mdesc_get_property(hp, target,
+						"cfg-handle", NULL);
+		if (cfg_handle)
+			break;
+	}
+
+	if (!id) {
+		query.dev_no = ~(u64)0;
+		query.id = ~(u64)0;
+	} else if (!cfg_handle) {
+		query.dev_no = *id;
+		query.id = ~(u64)0;
+	} else {
+		query.dev_no = *cfg_handle;
+		query.id = *id;
+	}
+
+	dev = device_find_child(&root_vdev->dev, &query,
 				vio_md_node_match);
 	if (dev) {
 		printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
 
 		device_unregister(dev);
 		put_device(dev);
+	} else {
+		if (!id)
+			printk(KERN_ERR "VIO: Removed unknown %s node.\n",
+			       type);
+		else if (!cfg_handle)
+			printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
+			       type, *id);
+		else
+			printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
+			       type, *cfg_handle, *id);
 	}
 }
 
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 69912d2..07c03e7 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -15,6 +15,7 @@
 lib-$(CONFIG_SPARC64) += atomic_64.o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
+lib-$(CONFIG_SPARC64) += multi3.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644
index 0000000..d6b6c97
--- /dev/null
+++ b/arch/sparc/lib/multi3.S
@@ -0,0 +1,35 @@
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+	.text
+	.align	4
+ENTRY(__multi3) /* %o0 = u, %o1 = v */
+	mov	%o1, %g1
+	srl	%o3, 0, %g4
+	mulx	%g4, %g1, %o1
+	srlx	%g1, 0x20, %g3
+	mulx	%g3, %g4, %g5
+	sllx	%g5, 0x20, %o5
+	srl	%g1, 0, %g4
+	sub	%o1, %o5, %o5
+	srlx	%o5, 0x20, %o5
+	addcc	%g5, %o5, %g5
+	srlx	%o3, 0x20, %o5
+	mulx	%g4, %o5, %g4
+	mulx	%g3, %o5, %o5
+	sethi	%hi(0x80000000), %g3
+	addcc	%g5, %g4, %g5
+	srlx	%g5, 0x20, %g5
+	add	%g3, %g3, %g3
+	movcc	%xcc, %g0, %g3
+	addcc	%o5, %g5, %o5
+	sllx	%g4, 0x20, %g4
+	add	%o1, %g4, %o1
+	add	%o5, %g3, %g2
+	mulx	%g1, %o2, %g1
+	add	%g1, %g2, %g1
+	mulx	%o0, %o3, %o0
+	retl
+	 add	%g1, %o0, %o0
+ENDPROC(__multi3)
+EXPORT_SYMBOL(__multi3)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 7c29d38..88855e3 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
 		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 	if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 0cda653..3c40ebd 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
 	}
 
 	if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
-		pr_warn("hugepagesz=%llu not supported by MMU.\n",
+		hugetlb_bad_size();
+		pr_err("hugepagesz=%llu not supported by MMU.\n",
 			hugepage_size);
 		goto out;
 	}
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
 
 /* get_new_mmu_context() uses "cache + 1".  */
 DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
 #define MAX_CTX_NR	(1UL << CTX_NR_BITS)
 #define CTX_BMAP_SLOTS	BITS_TO_LONGS(MAX_CTX_NR)
 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+	unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+	unsigned long new_ver, new_ctx, old_ctx;
+	struct mm_struct *mm;
+	int cpu;
+
+	bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+	/* Reserve kernel context */
+	set_bit(0, mmu_context_bmap);
+
+	new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+	if (unlikely(new_ver == 0))
+		new_ver = CTX_FIRST_VERSION;
+	tlb_context_cache = new_ver;
+
+	/*
+	 * Make sure that any new mm that are added into per_cpu_secondary_mm,
+	 * are going to go through get_new_mmu_context() path.
+	 */
+	mb();
+
+	/*
+	 * Updated versions to current on those CPUs that had valid secondary
+	 * contexts
+	 */
+	for_each_online_cpu(cpu) {
+		/*
+		 * If a new mm is stored after we took this mm from the array,
+		 * it will go into get_new_mmu_context() path, because we
+		 * already bumped the version in tlb_context_cache.
+		 */
+		mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+		if (unlikely(!mm || mm == &init_mm))
+			continue;
+
+		old_ctx = mm->context.sparc64_ctx_val;
+		if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+			new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+			set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+			mm->context.sparc64_ctx_val = new_ctx;
+		}
+	}
+}
 
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
 {
 	unsigned long ctx, new_ctx;
 	unsigned long orig_pgsz_bits;
-	int new_version;
 
 	spin_lock(&ctx_alloc_lock);
+retry:
+	/* wrap might have happened, test again if our context became valid */
+	if (unlikely(CTX_VALID(mm->context)))
+		goto out;
 	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
 	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
 	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
-	new_version = 0;
 	if (new_ctx >= (1 << CTX_NR_BITS)) {
 		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
 		if (new_ctx >= ctx) {
-			int i;
-			new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
-				CTX_FIRST_VERSION;
-			if (new_ctx == 1)
-				new_ctx = CTX_FIRST_VERSION;
-
-			/* Don't call memset, for 16 entries that's just
-			 * plain silly...
-			 */
-			mmu_context_bmap[0] = 3;
-			mmu_context_bmap[1] = 0;
-			mmu_context_bmap[2] = 0;
-			mmu_context_bmap[3] = 0;
-			for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
-				mmu_context_bmap[i + 0] = 0;
-				mmu_context_bmap[i + 1] = 0;
-				mmu_context_bmap[i + 2] = 0;
-				mmu_context_bmap[i + 3] = 0;
-			}
-			new_version = 1;
-			goto out;
+			mmu_context_wrap();
+			goto retry;
 		}
 	}
+	if (mm->context.sparc64_ctx_val)
+		cpumask_clear(mm_cpumask(mm));
 	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
 	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
 	tlb_context_cache = new_ctx;
 	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
 	spin_unlock(&ctx_alloc_lock);
-
-	if (unlikely(new_version))
-		smp_new_mmu_context_version();
 }
 
 static int numa_enabled = 1;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index bedf08b..0d4b998 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -496,7 +496,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
 		extern void copy_tsb(unsigned long old_tsb_base,
 				     unsigned long old_tsb_size,
 				     unsigned long new_tsb_base,
-				     unsigned long new_tsb_size);
+				     unsigned long new_tsb_size,
+				     unsigned long page_size_shift);
 		unsigned long old_tsb_base = (unsigned long) old_tsb;
 		unsigned long new_tsb_base = (unsigned long) new_tsb;
 
@@ -504,7 +505,9 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
 			old_tsb_base = __pa(old_tsb_base);
 			new_tsb_base = __pa(new_tsb_base);
 		}
-		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+			tsb_index == MM_TSB_BASE ?
+			PAGE_SHIFT : REAL_HPAGE_SHIFT);
 	}
 
 	mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6c..fcf4d27 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@
 	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
 	retry
 
-	.globl		xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
-	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
-	retry
-
 #ifdef CONFIG_KGDB
 	.globl		xcall_kgdb_capture
 xcall_kgdb_capture:
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 0bc9968..f71e520 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -214,13 +214,6 @@ static inline void release_thread(struct task_struct *dead_task)
 
 extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
 
-
-/*
- * Return saved (kernel) PC of a blocked thread.
- * Only used in a printk() in kernel/sched/core.c, so don't work too hard.
- */
-#define thread_saved_pc(t)   ((t)->thread.pc)
-
 unsigned long get_wchan(struct task_struct *p);
 
 /* Return initial ksp value for given task. */
diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
index 07802d5..93931a4 100644
--- a/arch/tile/kernel/jump_label.c
+++ b/arch/tile/kernel/jump_label.c
@@ -45,14 +45,12 @@ static void __jump_label_transform(struct jump_entry *e,
 void arch_jump_label_transform(struct jump_entry *e,
 				enum jump_label_type type)
 {
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 
 	__jump_label_transform(e, type);
 	flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
 
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 __init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 1a70e6c..94709ab 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -24,8 +24,7 @@
  * has an opportunity to return -EFAULT to the user if needed.
  * The 64-bit routines just return a "long long" with the value,
  * since they are only used from kernel space and don't expect to fault.
- * Support for 16-bit ops is included in the framework but we don't provide
- * any (x86_64 has an atomic_inc_short(), so we might want to some day).
+ * Support for 16-bit ops is included in the framework but we don't provide any.
  *
  * Note that the caller is advised to issue a suitable L1 or L2
  * prefetch on the address being manipulated to avoid extra stalls.
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index cb10153..03e5cc4 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 	if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 8541027..b55fe9b 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -534,7 +534,7 @@ static void ubd_handler(void)
 		for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
 			blk_end_request(
 				(*irq_req_buffer)[count]->req,
-				0,
+				BLK_STS_OK,
 				(*irq_req_buffer)[count]->length
 			);
 			kfree((*irq_req_buffer)[count]);
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 2d1e0dd..f6d1a3f 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -58,8 +58,6 @@ static inline void release_thread(struct task_struct *task)
 {
 }
 
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
 static inline void mm_copy_segments(struct mm_struct *from_mm,
 				    struct mm_struct *new_mm)
 {
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 64a1fd0..7b56401 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -56,12 +56,6 @@ union thread_union cpu0_irqstack
 	__attribute__((__section__(".data..init_irqstack"))) =
 		{ INIT_THREAD_INFO(init_task) };
 
-unsigned long thread_saved_pc(struct task_struct *task)
-{
-	/* FIXME: Need to look up userspace_pid by cpu */
-	return os_process_pc(userspace_pid[0]);
-}
-
 /* Changed in setup_arch, which is called in early boot */
 static char host_info[(__NEW_UTS_LEN + 1) * 5];
 
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index e9ad511..7a53a55 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -44,7 +44,6 @@
 generic-y += setup.h
 generic-y += shmbuf.h
 generic-y += shmparam.h
-generic-y += siginfo.h
 generic-y += signal.h
 generic-y += sizes.h
 generic-y += socket.h
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 13a97aa..1c44d3b 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -2,3 +2,4 @@
 include include/uapi/asm-generic/Kbuild.asm
 
 generic-y += kvm_para.h
+generic-y += siginfo.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4ccfacc..e767ed2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -69,7 +69,7 @@
 	select ARCH_USE_BUILTIN_BSWAP
 	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_USE_QUEUED_SPINLOCKS
-	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
+	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 	select ARCH_WANT_FRAME_POINTERS
 	select ARCH_WANTS_DYNAMIC_TASK_STRUCT
 	select BUILDTIME_EXTABLE_SORT
@@ -87,6 +87,8 @@
 	select GENERIC_EARLY_IOREMAP
 	select GENERIC_FIND_FIRST_BIT
 	select GENERIC_IOMAP
+	select GENERIC_IRQ_EFFECTIVE_AFF_MASK	if SMP
+	select GENERIC_IRQ_MIGRATION		if SMP
 	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_SHOW
 	select GENERIC_PENDING_IRQ		if SMP
@@ -166,6 +168,7 @@
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select HAVE_USER_RETURN_NOTIFIER
 	select IRQ_FORCED_THREADING
+	select PCI_LOCKLESS_CONFIG
 	select PERF_EVENTS
 	select RTC_LIB
 	select RTC_MC146818_LIB
@@ -1082,7 +1085,7 @@
 	def_bool y
 
 config X86_MCE_INJECT
-	depends on X86_MCE && X86_LOCAL_APIC && X86_MCELOG_LEGACY
+	depends on X86_MCE && X86_LOCAL_APIC && DEBUG_FS
 	tristate "Machine check injector support"
 	---help---
 	  Provide support for injecting machine checks for testing purposes.
@@ -2776,10 +2779,6 @@
 config SYSVIPC_COMPAT
 	def_bool y
 	depends on SYSVIPC
-
-config KEYS_COMPAT
-	def_bool y
-	depends on KEYS
 endif
 
 endmenu
@@ -2797,6 +2796,9 @@
 	bool
 	depends on STA2X11
 
+config HAVE_GENERIC_GUP
+	def_bool y
+
 source "net/Kconfig"
 
 source "drivers/Kconfig"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index bf240b9..ad2db82 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -257,8 +257,6 @@
 
 drivers-$(CONFIG_FB) += arch/x86/video/
 
-drivers-$(CONFIG_RAS) += arch/x86/ras/
-
 ####
 # boot loader support. Several targets are kept for legacy purposes
 
diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c
index 73ccf63..9dc1ce6 100644
--- a/arch/x86/boot/compressed/cmdline.c
+++ b/arch/x86/boot/compressed/cmdline.c
@@ -13,7 +13,7 @@ static inline char rdfs8(addr_t addr)
 	return *((char *)(fs + addr));
 }
 #include "../cmdline.c"
-static unsigned long get_cmd_line_ptr(void)
+unsigned long get_cmd_line_ptr(void)
 {
 	unsigned long cmd_line_ptr = boot_params->hdr.cmd_line_ptr;
 
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index cbf4b87..c3e869e 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1046,9 +1046,31 @@ struct boot_params *efi_main(struct efi_config *c,
 	memset((char *)gdt->address, 0x0, gdt->size);
 	desc = (struct desc_struct *)gdt->address;
 
-	/* The first GDT is a dummy and the second is unused. */
-	desc += 2;
+	/* The first GDT is a dummy. */
+	desc++;
 
+	if (IS_ENABLED(CONFIG_X86_64)) {
+		/* __KERNEL32_CS */
+		desc->limit0 = 0xffff;
+		desc->base0 = 0x0000;
+		desc->base1 = 0x0000;
+		desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+		desc->s = DESC_TYPE_CODE_DATA;
+		desc->dpl = 0;
+		desc->p = 1;
+		desc->limit = 0xf;
+		desc->avl = 0;
+		desc->l = 0;
+		desc->d = SEG_OP_SIZE_32BIT;
+		desc->g = SEG_GRANULARITY_4KB;
+		desc->base2 = 0x00;
+		desc++;
+	} else {
+		/* Second entry is unused on 32-bit */
+		desc++;
+	}
+
+	/* __KERNEL_CS */
 	desc->limit0 = 0xffff;
 	desc->base0 = 0x0000;
 	desc->base1 = 0x0000;
@@ -1058,12 +1080,18 @@ struct boot_params *efi_main(struct efi_config *c,
 	desc->p = 1;
 	desc->limit = 0xf;
 	desc->avl = 0;
-	desc->l = 0;
-	desc->d = SEG_OP_SIZE_32BIT;
+	if (IS_ENABLED(CONFIG_X86_64)) {
+		desc->l = 1;
+		desc->d = 0;
+	} else {
+		desc->l = 0;
+		desc->d = SEG_OP_SIZE_32BIT;
+	}
 	desc->g = SEG_GRANULARITY_4KB;
 	desc->base2 = 0x00;
-
 	desc++;
+
+	/* __KERNEL_DS */
 	desc->limit0 = 0xffff;
 	desc->base0 = 0x0000;
 	desc->base1 = 0x0000;
@@ -1077,24 +1105,25 @@ struct boot_params *efi_main(struct efi_config *c,
 	desc->d = SEG_OP_SIZE_32BIT;
 	desc->g = SEG_GRANULARITY_4KB;
 	desc->base2 = 0x00;
-
-#ifdef CONFIG_X86_64
-	/* Task segment value */
 	desc++;
-	desc->limit0 = 0x0000;
-	desc->base0 = 0x0000;
-	desc->base1 = 0x0000;
-	desc->type = SEG_TYPE_TSS;
-	desc->s = 0;
-	desc->dpl = 0;
-	desc->p = 1;
-	desc->limit = 0x0;
-	desc->avl = 0;
-	desc->l = 0;
-	desc->d = 0;
-	desc->g = SEG_GRANULARITY_4KB;
-	desc->base2 = 0x00;
-#endif /* CONFIG_X86_64 */
+
+	if (IS_ENABLED(CONFIG_X86_64)) {
+		/* Task segment value */
+		desc->limit0 = 0x0000;
+		desc->base0 = 0x0000;
+		desc->base1 = 0x0000;
+		desc->type = SEG_TYPE_TSS;
+		desc->s = 0;
+		desc->dpl = 0;
+		desc->p = 1;
+		desc->limit = 0x0;
+		desc->avl = 0;
+		desc->l = 0;
+		desc->d = 0;
+		desc->g = SEG_GRANULARITY_4KB;
+		desc->base2 = 0x00;
+		desc++;
+	}
 
 	asm volatile("cli");
 	asm volatile ("lgdt %0" : : "m" (*gdt));
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index d2ae1f8..fbf4c32 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -346,6 +346,48 @@
 	/* Set up the stack */
 	leaq	boot_stack_end(%rbx), %rsp
 
+#ifdef CONFIG_X86_5LEVEL
+	/* Check if 5-level paging has already enabled */
+	movq	%cr4, %rax
+	testl	$X86_CR4_LA57, %eax
+	jnz	lvl5
+
+	/*
+	 * At this point we are in long mode with 4-level paging enabled,
+	 * but we want to enable 5-level paging.
+	 *
+	 * The problem is that we cannot do it directly. Setting LA57 in
+	 * long mode would trigger #GP. So we need to switch off long mode
+	 * first.
+	 *
+	 * NOTE: This is not going to work if bootloader put us above 4G
+	 * limit.
+	 *
+	 * The first step is go into compatibility mode.
+	 */
+
+	/* Clear additional page table */
+	leaq	lvl5_pgtable(%rbx), %rdi
+	xorq	%rax, %rax
+	movq	$(PAGE_SIZE/8), %rcx
+	rep	stosq
+
+	/*
+	 * Setup current CR3 as the first and only entry in a new top level
+	 * page table.
+	 */
+	movq	%cr3, %rdi
+	leaq	0x7 (%rdi), %rax
+	movq	%rax, lvl5_pgtable(%rbx)
+
+	/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
+	pushq	$__KERNEL32_CS
+	leaq	compatible_mode(%rip), %rax
+	pushq	%rax
+	lretq
+lvl5:
+#endif
+
 	/* Zero EFLAGS */
 	pushq	$0
 	popfq
@@ -429,6 +471,44 @@
 	jmp	*%rax
 
 	.code32
+#ifdef CONFIG_X86_5LEVEL
+compatible_mode:
+	/* Setup data and stack segments */
+	movl	$__KERNEL_DS, %eax
+	movl	%eax, %ds
+	movl	%eax, %ss
+
+	/* Disable paging */
+	movl	%cr0, %eax
+	btrl	$X86_CR0_PG_BIT, %eax
+	movl	%eax, %cr0
+
+	/* Point CR3 to 5-level paging */
+	leal	lvl5_pgtable(%ebx), %eax
+	movl	%eax, %cr3
+
+	/* Enable PAE and LA57 mode */
+	movl	%cr4, %eax
+	orl	$(X86_CR4_PAE | X86_CR4_LA57), %eax
+	movl	%eax, %cr4
+
+	/* Calculate address we are running at */
+	call	1f
+1:	popl	%edi
+	subl	$1b, %edi
+
+	/* Prepare stack for far return to Long Mode */
+	pushl	$__KERNEL_CS
+	leal	lvl5(%edi), %eax
+	push	%eax
+
+	/* Enable paging back */
+	movl	$(X86_CR0_PG | X86_CR0_PE), %eax
+	movl	%eax, %cr0
+
+	lret
+#endif
+
 no_longmode:
 	/* This isn't an x86-64 CPU so hang */
 1:
@@ -442,7 +522,7 @@
 	.word	gdt_end - gdt
 	.long	gdt
 	.word	0
-	.quad	0x0000000000000000	/* NULL descriptor */
+	.quad	0x00cf9a000000ffff	/* __KERNEL32_CS */
 	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
 	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
 	.quad	0x0080890000000000	/* TS descriptor */
@@ -486,3 +566,7 @@
 	.balign 4096
 pgtable:
 	.fill BOOT_PGT_SIZE, 1, 0
+#ifdef CONFIG_X86_5LEVEL
+lvl5_pgtable:
+	.fill PAGE_SIZE, 1, 0
+#endif
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 54c24f0..91f27ab 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -9,16 +9,42 @@
  * contain the entire properly aligned running kernel image.
  *
  */
+
+/*
+ * isspace() in linux/ctype.h is expected by next_args() to filter
+ * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
+ * since isdigit() is implemented in both of them. Hence disable it
+ * here.
+ */
+#define BOOT_CTYPE_H
+
+/*
+ * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
+ * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
+ * which is meaningless and will cause compiling error in some cases.
+ * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
+ * as empty.
+ */
+#define _LINUX_EXPORT_H
+#define EXPORT_SYMBOL(sym)
+
 #include "misc.h"
 #include "error.h"
-#include "../boot.h"
+#include "../string.h"
 
 #include <generated/compile.h>
 #include <linux/module.h>
 #include <linux/uts.h>
 #include <linux/utsname.h>
+#include <linux/ctype.h>
 #include <generated/utsrelease.h>
 
+/* Macros used by the included decompressor code below. */
+#define STATIC
+#include <linux/decompress/mm.h>
+
+extern unsigned long get_cmd_line_ptr(void);
+
 /* Simplified build-specific string for starting entropy. */
 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
 		LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
@@ -62,6 +88,11 @@ struct mem_vector {
 
 static bool memmap_too_large;
 
+
+/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
+unsigned long long mem_limit = ULLONG_MAX;
+
+
 enum mem_avoid_index {
 	MEM_AVOID_ZO_RANGE = 0,
 	MEM_AVOID_INITRD,
@@ -85,49 +116,14 @@ static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
 	return true;
 }
 
-/**
- *	_memparse - Parse a string with mem suffixes into a number
- *	@ptr: Where parse begins
- *	@retptr: (output) Optional pointer to next char after parse completes
- *
- *	Parses a string into a number.  The number stored at @ptr is
- *	potentially suffixed with K, M, G, T, P, E.
- */
-static unsigned long long _memparse(const char *ptr, char **retptr)
+char *skip_spaces(const char *str)
 {
-	char *endptr;	/* Local pointer to end of parsed string */
-
-	unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
-
-	switch (*endptr) {
-	case 'E':
-	case 'e':
-		ret <<= 10;
-	case 'P':
-	case 'p':
-		ret <<= 10;
-	case 'T':
-	case 't':
-		ret <<= 10;
-	case 'G':
-	case 'g':
-		ret <<= 10;
-	case 'M':
-	case 'm':
-		ret <<= 10;
-	case 'K':
-	case 'k':
-		ret <<= 10;
-		endptr++;
-	default:
-		break;
-	}
-
-	if (retptr)
-		*retptr = endptr;
-
-	return ret;
+	while (isspace(*str))
+		++str;
+	return (char *)str;
 }
+#include "../../../../lib/ctype.c"
+#include "../../../../lib/cmdline.c"
 
 static int
 parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
@@ -142,40 +138,41 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
 		return -EINVAL;
 
 	oldp = p;
-	*size = _memparse(p, &p);
+	*size = memparse(p, &p);
 	if (p == oldp)
 		return -EINVAL;
 
 	switch (*p) {
-	case '@':
-		/* Skip this region, usable */
-		*start = 0;
-		*size = 0;
-		return 0;
 	case '#':
 	case '$':
 	case '!':
-		*start = _memparse(p + 1, &p);
+		*start = memparse(p + 1, &p);
+		return 0;
+	case '@':
+		/* memmap=nn@ss specifies usable region, should be skipped */
+		*size = 0;
+		/* Fall through */
+	default:
+		/*
+		 * If w/o offset, only size specified, memmap=nn[KMG] has the
+		 * same behaviour as mem=nn[KMG]. It limits the max address
+		 * system can use. Region above the limit should be avoided.
+		 */
+		*start = 0;
 		return 0;
 	}
 
 	return -EINVAL;
 }
 
-static void mem_avoid_memmap(void)
+static void mem_avoid_memmap(char *str)
 {
-	char arg[128];
+	static int i;
 	int rc;
-	int i;
-	char *str;
 
-	/* See if we have any memmap areas */
-	rc = cmdline_find_option("memmap", arg, sizeof(arg));
-	if (rc <= 0)
+	if (i >= MAX_MEMMAP_REGIONS)
 		return;
 
-	i = 0;
-	str = arg;
 	while (str && (i < MAX_MEMMAP_REGIONS)) {
 		int rc;
 		unsigned long long start, size;
@@ -188,9 +185,14 @@ static void mem_avoid_memmap(void)
 		if (rc < 0)
 			break;
 		str = k;
-		/* A usable region that should not be skipped */
-		if (size == 0)
+
+		if (start == 0) {
+			/* Store the specified memory limit if size > 0 */
+			if (size > 0)
+				mem_limit = size;
+
 			continue;
+		}
 
 		mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
 		mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
@@ -202,6 +204,57 @@ static void mem_avoid_memmap(void)
 		memmap_too_large = true;
 }
 
+static int handle_mem_memmap(void)
+{
+	char *args = (char *)get_cmd_line_ptr();
+	size_t len = strlen((char *)args);
+	char *tmp_cmdline;
+	char *param, *val;
+	u64 mem_size;
+
+	if (!strstr(args, "memmap=") && !strstr(args, "mem="))
+		return 0;
+
+	tmp_cmdline = malloc(len + 1);
+	if (!tmp_cmdline )
+		error("Failed to allocate space for tmp_cmdline");
+
+	memcpy(tmp_cmdline, args, len);
+	tmp_cmdline[len] = 0;
+	args = tmp_cmdline;
+
+	/* Chew leading spaces */
+	args = skip_spaces(args);
+
+	while (*args) {
+		args = next_arg(args, &param, &val);
+		/* Stop at -- */
+		if (!val && strcmp(param, "--") == 0) {
+			warn("Only '--' specified in cmdline");
+			free(tmp_cmdline);
+			return -1;
+		}
+
+		if (!strcmp(param, "memmap")) {
+			mem_avoid_memmap(val);
+		} else if (!strcmp(param, "mem")) {
+			char *p = val;
+
+			if (!strcmp(p, "nopentium"))
+				continue;
+			mem_size = memparse(p, &p);
+			if (mem_size == 0) {
+				free(tmp_cmdline);
+				return -EINVAL;
+			}
+			mem_limit = mem_size;
+		}
+	}
+
+	free(tmp_cmdline);
+	return 0;
+}
+
 /*
  * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
  * The mem_avoid array is used to store the ranges that need to be avoided
@@ -323,7 +376,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
 	/* We don't need to set a mapping for setup_data. */
 
 	/* Mark the memmap regions we need to avoid */
-	mem_avoid_memmap();
+	handle_mem_memmap();
 
 #ifdef CONFIG_X86_VERBOSE_BOOTUP
 	/* Make sure video RAM can be used. */
@@ -432,7 +485,8 @@ static void process_e820_entry(struct boot_e820_entry *entry,
 {
 	struct mem_vector region, overlap;
 	struct slot_area slot_area;
-	unsigned long start_orig;
+	unsigned long start_orig, end;
+	struct boot_e820_entry cur_entry;
 
 	/* Skip non-RAM entries. */
 	if (entry->type != E820_TYPE_RAM)
@@ -446,8 +500,15 @@ static void process_e820_entry(struct boot_e820_entry *entry,
 	if (entry->addr + entry->size < minimum)
 		return;
 
-	region.start = entry->addr;
-	region.size = entry->size;
+	/* Ignore entries above memory limit */
+	end = min(entry->size + entry->addr, mem_limit);
+	if (entry->addr >= end)
+		return;
+	cur_entry.addr = entry->addr;
+	cur_entry.size = end - entry->addr;
+
+	region.start = cur_entry.addr;
+	region.size = cur_entry.size;
 
 	/* Give up if slot area array is full. */
 	while (slot_area_index < MAX_SLOT_AREA) {
@@ -461,7 +522,7 @@ static void process_e820_entry(struct boot_e820_entry *entry,
 		region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
 
 		/* Did we raise the address above this e820 region? */
-		if (region.start > entry->addr + entry->size)
+		if (region.start > cur_entry.addr + cur_entry.size)
 			return;
 
 		/* Reduce size by any delta from the original address. */
@@ -564,9 +625,6 @@ void choose_random_location(unsigned long input,
 {
 	unsigned long random_addr, min_addr;
 
-	/* By default, keep output position unchanged. */
-	*virt_addr = *output;
-
 	if (cmdline_find_option_bool("nokaslr")) {
 		warn("KASLR disabled: 'nokaslr' on cmdline.");
 		return;
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index b3c5a5f0..00241c8 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -338,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
 				  unsigned long output_len)
 {
 	const unsigned long kernel_total_size = VO__end - VO__text;
-	unsigned long virt_addr = (unsigned long)output;
+	unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
 
 	/* Retain x86 boot parameters pointer passed from startup_32/64. */
 	boot_params = rmode;
@@ -390,6 +390,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
 #ifdef CONFIG_X86_64
 	if (heap > 0x3fffffffffffUL)
 		error("Destination address too large");
+	if (virt_addr + max(output_len, kernel_total_size) > KERNEL_IMAGE_SIZE)
+		error("Destination virtual address is beyond the kernel mapping area");
 #else
 	if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
 		error("Destination address too large");
@@ -397,7 +399,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
 #ifndef CONFIG_RELOCATABLE
 	if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
 		error("Destination address does not match LOAD_PHYSICAL_ADDR");
-	if ((unsigned long)output != virt_addr)
+	if (virt_addr != LOAD_PHYSICAL_ADDR)
 		error("Destination virtual address changed when not relocatable");
 #endif
 
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 1c8355e..766a521 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -81,8 +81,6 @@ static inline void choose_random_location(unsigned long input,
 					  unsigned long output_size,
 					  unsigned long *virt_addr)
 {
-	/* No change from existing output location. */
-	*virt_addr = *output;
 }
 #endif
 
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index 1d78f17..28029be 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -63,7 +63,7 @@ static void *alloc_pgt_page(void *context)
 static struct alloc_pgt_data pgt_data;
 
 /* The top level page table entry pointer. */
-static unsigned long level4p;
+static unsigned long top_level_pgt;
 
 /*
  * Mapping information structure passed to kernel_ident_mapping_init().
@@ -91,9 +91,15 @@ void initialize_identity_maps(void)
 	 * If we came here via startup_32(), cr3 will be _pgtable already
 	 * and we must append to the existing area instead of entirely
 	 * overwriting it.
+	 *
+	 * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
+	 * the top-level page table is allocated separately.
+	 *
+	 * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
+	 * cases. On 4-level paging it's equal to 'top_level_pgt'.
 	 */
-	level4p = read_cr3();
-	if (level4p == (unsigned long)_pgtable) {
+	top_level_pgt = read_cr3_pa();
+	if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
 		debug_putstr("booted via startup_32()\n");
 		pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
 		pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
@@ -103,7 +109,7 @@ void initialize_identity_maps(void)
 		pgt_data.pgt_buf = _pgtable;
 		pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
 		memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
-		level4p = (unsigned long)alloc_pgt_page(&pgt_data);
+		top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
 	}
 }
 
@@ -123,7 +129,7 @@ void add_identity_map(unsigned long start, unsigned long size)
 		return;
 
 	/* Build the mapping. */
-	kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p,
+	kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt,
 				  start, end);
 }
 
@@ -134,5 +140,5 @@ void add_identity_map(unsigned long start, unsigned long size)
  */
 void finalize_identity_maps(void)
 {
-	write_cr3(level4p);
+	write_cr3(top_level_pgt);
 }
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index 1eb7d29..15d9f74 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -65,23 +65,3 @@
 	popw	%es
 	retl
 ENDPROC(copy_to_fs)
-
-#if 0 /* Not currently used, but can be enabled as needed */
-GLOBAL(copy_from_gs)
-	pushw	%ds
-	pushw	%gs
-	popw	%ds
-	calll	memcpy
-	popw	%ds
-	retl
-ENDPROC(copy_from_gs)
-
-GLOBAL(copy_to_gs)
-	pushw	%es
-	pushw	%gs
-	popw	%es
-	calll	memcpy
-	popw	%es
-	retl
-ENDPROC(copy_to_gs)
-#endif
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 5457b02..630e366 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -122,6 +122,14 @@ unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int bas
 	return result;
 }
 
+long simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+	if (*cp == '-')
+		return -simple_strtoull(cp + 1, endp, base);
+
+	return simple_strtoull(cp, endp, base);
+}
+
 /**
  * strlen - Find the length of a string
  * @s: The string to be sized
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 113588d..f274a50 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -22,6 +22,7 @@ extern int strcmp(const char *str1, const char *str2);
 extern int strncmp(const char *cs, const char *ct, size_t count);
 extern size_t strlen(const char *s);
 extern char *strstr(const char *s1, const char *s2);
+extern char *strchr(const char *s, int c);
 extern size_t strnlen(const char *s, size_t maxlen);
 extern unsigned int atou(const char *s);
 extern unsigned long long simple_strtoull(const char *cp, char **endp,
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 34b3fa2..9e32d40 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -2,6 +2,8 @@
 # Arch-specific CryptoAPI modules.
 #
 
+OBJECT_FILES_NON_STANDARD := y
+
 avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
 avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
 				$(comma)4)$(comma)%ymm2,yes,no)
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
index 2f87563..2e14acc 100644
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ b/arch/x86/crypto/sha1-mb/Makefile
@@ -2,6 +2,8 @@
 # Arch-specific CryptoAPI modules.
 #
 
+OBJECT_FILES_NON_STANDARD := y
+
 avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
                                 $(comma)4)$(comma)%ymm2,yes,no)
 ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
index 41089e7..45b4fca 100644
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ b/arch/x86/crypto/sha256-mb/Makefile
@@ -2,6 +2,8 @@
 # Arch-specific CryptoAPI modules.
 #
 
+OBJECT_FILES_NON_STANDARD := y
+
 avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
                                 $(comma)4)$(comma)%ymm2,yes,no)
 ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 4a4c083..a9a8027 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -265,7 +265,8 @@
 	 * If width of "canonical tail" ever becomes variable, this will need
 	 * to be updated to remain correct on both old and new CPUs.
 	 *
-	 * Change top 16 bits to be the sign-extension of 47th bit
+	 * Change top bits to match most significant bit (47th or 56th bit
+	 * depending on paging mode) in the address.
 	 */
 	shl	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
 	sar	$(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 580b60f..ff1ea2f 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1750,6 +1750,8 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
 	return ret;
 }
 
+static struct attribute_group x86_pmu_attr_group;
+
 static int __init init_hw_perf_events(void)
 {
 	struct x86_pmu_quirk *quirk;
@@ -1813,6 +1815,14 @@ static int __init init_hw_perf_events(void)
 			x86_pmu_events_group.attrs = tmp;
 	}
 
+	if (x86_pmu.attrs) {
+		struct attribute **tmp;
+
+		tmp = merge_attr(x86_pmu_attr_group.attrs, x86_pmu.attrs);
+		if (!WARN_ON(!tmp))
+			x86_pmu_attr_group.attrs = tmp;
+	}
+
 	pr_info("... version:                %d\n",     x86_pmu.version);
 	pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
 	pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
@@ -2101,8 +2111,7 @@ static int x86_pmu_event_init(struct perf_event *event)
 
 static void refresh_pce(void *ignored)
 {
-	if (current->active_mm)
-		load_mm_cr4(current->active_mm);
+	load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
 }
 
 static void x86_pmu_event_mapped(struct perf_event *event)
@@ -2224,7 +2233,6 @@ void perf_check_microcode(void)
 	if (x86_pmu.check_microcode)
 		x86_pmu.check_microcode();
 }
-EXPORT_SYMBOL_GPL(perf_check_microcode);
 
 static struct pmu pmu = {
 	.pmu_enable		= x86_pmu_enable,
@@ -2255,7 +2263,7 @@ static struct pmu pmu = {
 void arch_perf_update_userpage(struct perf_event *event,
 			       struct perf_event_mmap_page *userpg, u64 now)
 {
-	struct cyc2ns_data *data;
+	struct cyc2ns_data data;
 	u64 offset;
 
 	userpg->cap_user_time = 0;
@@ -2267,17 +2275,17 @@ void arch_perf_update_userpage(struct perf_event *event,
 	if (!using_native_sched_clock() || !sched_clock_stable())
 		return;
 
-	data = cyc2ns_read_begin();
+	cyc2ns_read_begin(&data);
 
-	offset = data->cyc2ns_offset + __sched_clock_offset;
+	offset = data.cyc2ns_offset + __sched_clock_offset;
 
 	/*
 	 * Internal timekeeping for enabled/running/stopped times
 	 * is always in the local_clock domain.
 	 */
 	userpg->cap_user_time = 1;
-	userpg->time_mult = data->cyc2ns_mul;
-	userpg->time_shift = data->cyc2ns_shift;
+	userpg->time_mult = data.cyc2ns_mul;
+	userpg->time_shift = data.cyc2ns_shift;
 	userpg->time_offset = offset - now;
 
 	/*
@@ -2289,7 +2297,7 @@ void arch_perf_update_userpage(struct perf_event *event,
 		userpg->time_zero = offset;
 	}
 
-	cyc2ns_read_end(data);
+	cyc2ns_read_end();
 }
 
 void
@@ -2334,7 +2342,7 @@ static unsigned long get_segment_base(unsigned int segment)
 
 		/* IRQs are off, so this synchronizes with smp_store_release */
 		ldt = lockless_dereference(current->active_mm->context.ldt);
-		if (!ldt || idx > ldt->size)
+		if (!ldt || idx > ldt->nr_entries)
 			return 0;
 
 		desc = &ldt->entries[idx];
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a6d91d4..aa62437 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
  [ C(DTLB) ] = {
 	[ C(OP_READ) ] = {
 		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
-		[ C(RESULT_MISS)   ] = 0x608,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
+		[ C(RESULT_MISS)   ] = 0xe08,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
 	},
 	[ C(OP_WRITE) ] = {
 		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
-		[ C(RESULT_MISS)   ] = 0x649,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
+		[ C(RESULT_MISS)   ] = 0xe49,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
 	},
 	[ C(OP_PREFETCH) ] = {
 		[ C(RESULT_ACCESS) ] = 0x0,
@@ -3160,6 +3160,19 @@ static int intel_pmu_cpu_prepare(int cpu)
 	return -ENOMEM;
 }
 
+static void flip_smm_bit(void *data)
+{
+	unsigned long set = *(unsigned long *)data;
+
+	if (set > 0) {
+		msr_set_bit(MSR_IA32_DEBUGCTLMSR,
+			    DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
+	} else {
+		msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
+			      DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
+	}
+}
+
 static void intel_pmu_cpu_starting(int cpu)
 {
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -3174,6 +3187,8 @@ static void intel_pmu_cpu_starting(int cpu)
 
 	cpuc->lbr_sel = NULL;
 
+	flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
+
 	if (!cpuc->shared_regs)
 		return;
 
@@ -3410,12 +3425,10 @@ static void intel_snb_check_microcode(void)
 	int pebs_broken = 0;
 	int cpu;
 
-	get_online_cpus();
 	for_each_online_cpu(cpu) {
 		if ((pebs_broken = intel_snb_pebs_broken(cpu)))
 			break;
 	}
-	put_online_cpus();
 
 	if (pebs_broken == x86_pmu.pebs_broken)
 		return;
@@ -3488,7 +3501,9 @@ static bool check_msr(unsigned long msr, u64 mask)
 static __init void intel_sandybridge_quirk(void)
 {
 	x86_pmu.check_microcode = intel_snb_check_microcode;
+	cpus_read_lock();
 	intel_snb_check_microcode();
+	cpus_read_unlock();
 }
 
 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
@@ -3595,6 +3610,52 @@ static struct attribute *hsw_events_attrs[] = {
 	NULL
 };
 
+static ssize_t freeze_on_smi_show(struct device *cdev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
+}
+
+static DEFINE_MUTEX(freeze_on_smi_mutex);
+
+static ssize_t freeze_on_smi_store(struct device *cdev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	unsigned long val;
+	ssize_t ret;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	if (val > 1)
+		return -EINVAL;
+
+	mutex_lock(&freeze_on_smi_mutex);
+
+	if (x86_pmu.attr_freeze_on_smi == val)
+		goto done;
+
+	x86_pmu.attr_freeze_on_smi = val;
+
+	get_online_cpus();
+	on_each_cpu(flip_smm_bit, &val, 1);
+	put_online_cpus();
+done:
+	mutex_unlock(&freeze_on_smi_mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(freeze_on_smi);
+
+static struct attribute *intel_pmu_attrs[] = {
+	&dev_attr_freeze_on_smi.attr,
+	NULL,
+};
+
 __init int intel_pmu_init(void)
 {
 	union cpuid10_edx edx;
@@ -3641,6 +3702,8 @@ __init int intel_pmu_init(void)
 
 	x86_pmu.max_pebs_events		= min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
 
+
+	x86_pmu.attrs			= intel_pmu_attrs;
 	/*
 	 * Quirk: v2 perfmon does not report fixed-purpose events, so
 	 * assume at least 3 events, when not running in a hypervisor:
@@ -4112,13 +4175,12 @@ static __init int fixup_ht_bug(void)
 
 	lockup_detector_resume();
 
-	get_online_cpus();
+	cpus_read_lock();
 
-	for_each_online_cpu(c) {
+	for_each_online_cpu(c)
 		free_excl_cntrs(c);
-	}
 
-	put_online_cpus();
+	cpus_read_unlock();
 	pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
 	return 0;
 }
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 8c00dc0..2521f77 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -1682,7 +1682,7 @@ static int __init intel_cqm_init(void)
 	 *
 	 * Also, check that the scales match on all cpus.
 	 */
-	get_online_cpus();
+	cpus_read_lock();
 	for_each_online_cpu(cpu) {
 		struct cpuinfo_x86 *c = &cpu_data(cpu);
 
@@ -1746,14 +1746,14 @@ static int __init intel_cqm_init(void)
 	 * Setup the hot cpu notifier once we are sure cqm
 	 * is enabled to avoid notifier leak.
 	 */
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
-			  "perf/x86/cqm:starting",
-			  intel_cqm_cpu_starting, NULL);
-	cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "perf/x86/cqm:online",
-			  NULL, intel_cqm_cpu_exit);
-
+	cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_STARTING,
+				     "perf/x86/cqm:starting",
+				     intel_cqm_cpu_starting, NULL);
+	cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_ONLINE,
+				     "perf/x86/cqm:online",
+				     NULL, intel_cqm_cpu_exit);
 out:
-	put_online_cpus();
+	cpus_read_unlock();
 
 	if (ret) {
 		kfree(str);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index f924629..eb26165 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -18,7 +18,7 @@ enum {
 	LBR_FORMAT_MAX_KNOWN    = LBR_FORMAT_TIME,
 };
 
-static enum {
+static const enum {
 	LBR_EIP_FLAGS		= 1,
 	LBR_TSX			= 2,
 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
@@ -287,7 +287,7 @@ inline u64 lbr_from_signext_quirk_wr(u64 val)
 /*
  * If quirk is needed, ensure sign extension is 61 bits:
  */
-u64 lbr_from_signext_quirk_rd(u64 val)
+static u64 lbr_from_signext_quirk_rd(u64 val)
 {
 	if (static_branch_unlikely(&lbr_from_quirk_key)) {
 		/*
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 758c1aa..44ec523 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1170,7 +1170,7 @@ static int uncore_event_cpu_online(unsigned int cpu)
 		pmu = type->pmus;
 		for (i = 0; i < type->num_boxes; i++, pmu++) {
 			box = pmu->boxes[pkg];
-			if (!box && atomic_inc_return(&box->refcnt) == 1)
+			if (box && atomic_inc_return(&box->refcnt) == 1)
 				uncore_box_init(box);
 		}
 	}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index be3d362..53728ee 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -562,6 +562,9 @@ struct x86_pmu {
 	ssize_t		(*events_sysfs_show)(char *page, u64 config);
 	struct attribute **cpu_events;
 
+	unsigned long	attr_freeze_on_smi;
+	struct attribute **attrs;
+
 	/*
 	 * CPU Hotplug hooks
 	 */
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 00c88a0..da181ad 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -3,6 +3,7 @@
 
 #include <linux/ioport.h>
 #include <linux/pci.h>
+#include <linux/refcount.h>
 
 struct amd_nb_bus_dev_range {
 	u8 bus;
@@ -55,7 +56,7 @@ struct threshold_bank {
 	struct threshold_block	*blocks;
 
 	/* initialized to the number of CPUs on the node sharing this bank */
-	atomic_t		cpus;
+	refcount_t		cpus;
 };
 
 struct amd_northbridge {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index bdffcd9..5f01671 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -252,6 +252,8 @@ static inline int x2apic_enabled(void) { return 0; }
 #define	x2apic_supported()	(0)
 #endif /* !CONFIG_X86_X2APIC */
 
+struct irq_data;
+
 /*
  * Copyright 2004 James Cleverdon, IBM.
  * Subject to the GNU Public License, v.2
@@ -296,9 +298,9 @@ struct apic {
 	/* Can't be NULL on 64-bit */
 	unsigned long (*set_apic_id)(unsigned int id);
 
-	int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
-				      const struct cpumask *andmask,
-				      unsigned int *apicid);
+	int (*cpu_mask_to_apicid)(const struct cpumask *cpumask,
+				  struct irq_data *irqdata,
+				  unsigned int *apicid);
 
 	/* ipi */
 	void (*send_IPI)(int cpu, int vector);
@@ -540,28 +542,12 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
 
 #endif
 
-static inline int
-flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-			    const struct cpumask *andmask,
-			    unsigned int *apicid)
-{
-	unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
-				 cpumask_bits(andmask)[0] &
-				 cpumask_bits(cpu_online_mask)[0] &
-				 APIC_ALL_CPUS;
-
-	if (likely(cpu_mask)) {
-		*apicid = (unsigned int)cpu_mask;
-		return 0;
-	} else {
-		return -EINVAL;
-	}
-}
-
-extern int
-default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-			       const struct cpumask *andmask,
-			       unsigned int *apicid);
+extern int flat_cpu_mask_to_apicid(const struct cpumask *cpumask,
+				   struct irq_data *irqdata,
+				   unsigned int *apicid);
+extern int default_cpu_mask_to_apicid(const struct cpumask *cpumask,
+				      struct irq_data *irqdata,
+				      unsigned int *apicid);
 
 static inline void
 flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index caa5798..33380b8 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -246,19 +246,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
 	return c;
 }
 
-/**
- * atomic_inc_short - increment of a short integer
- * @v: pointer to type int
- *
- * Atomically adds 1 to @v
- * Returns the new value of @u
- */
-static __always_inline short int atomic_inc_short(short int *v)
-{
-	asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
-	return *v;
-}
-
 #ifdef CONFIG_X86_32
 # include <asm/atomic64_32.h>
 #else
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 2f77bce..d2ff779 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -74,7 +74,7 @@ struct efi_scratch {
 	__kernel_fpu_begin();						\
 									\
 	if (efi_scratch.use_pgd) {					\
-		efi_scratch.prev_cr3 = read_cr3();			\
+		efi_scratch.prev_cr3 = __read_cr3();			\
 		write_cr3((unsigned long)efi_scratch.efi_pgt);		\
 		__flush_tlb_all();					\
 	}								\
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index b8ad261..c66d19e 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -29,6 +29,7 @@ struct pt_regs;
 	} while (0)
 
 extern int fixup_exception(struct pt_regs *regs, int trapnr);
+extern int fixup_bug(struct pt_regs *regs, int trapnr);
 extern bool ex_has_fault_handler(unsigned long ip);
 extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
 
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 59405a2..9b76cd3 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -22,8 +22,8 @@ typedef struct {
 #ifdef CONFIG_SMP
 	unsigned int irq_resched_count;
 	unsigned int irq_call_count;
-	unsigned int irq_tlb_count;
 #endif
+	unsigned int irq_tlb_count;
 #ifdef CONFIG_X86_THERMAL_VECTOR
 	unsigned int irq_thermal_count;
 #endif
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 16d3fa2..668cca5 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -29,7 +29,6 @@ struct irq_desc;
 #include <linux/cpumask.h>
 extern int check_irq_vectors_for_cpu_disable(void);
 extern void fixup_irqs(void);
-extern void irq_force_complete_move(struct irq_desc *desc);
 #endif
 
 #ifdef CONFIG_HAVE_KVM
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index a210eba..023b4a9 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -55,7 +55,8 @@ extern struct irq_domain *
 irq_remapping_get_irq_domain(struct irq_alloc_info *info);
 
 /* Create PCI MSI/MSIx irqdomain, use @parent as the parent irqdomain. */
-extern struct irq_domain *arch_create_msi_irq_domain(struct irq_domain *parent);
+extern struct irq_domain *
+arch_create_remap_msi_irq_domain(struct irq_domain *par, const char *n, int id);
 
 /* Get parent irqdomain for interrupt remapping irqdomain */
 static inline struct irq_domain *arch_get_ir_parent_domain(void)
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 0559626..722d0e5 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
 
 	bool perm_ok; /* do not check permissions if true */
 	bool ud;	/* inject an #UD if host doesn't support insn */
+	bool tf;	/* TF value before instruction (after for syscall/sysret) */
 
 	bool have_exception;
 	struct x86_exception exception;
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 3f9a3d2..1812649 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -285,10 +285,6 @@ int mce_notify_irq(void);
 
 DECLARE_PER_CPU(struct mce, injectm);
 
-extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
-				    const char __user *ubuf,
-				    size_t usize, loff_t *off));
-
 /* Disable CMCI/polling for MCA bank claimed by firmware */
 extern void mce_disable_bank(int bank);
 
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index f9813b6..79b647a7 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -37,12 +37,6 @@ typedef struct {
 #endif
 } mm_context_t;
 
-#ifdef CONFIG_SMP
 void leave_mm(int cpu);
-#else
-static inline void leave_mm(int cpu)
-{
-}
-#endif
 
 #endif /* _ASM_X86_MMU_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 68b329d..ecfcb66 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -47,7 +47,7 @@ struct ldt_struct {
 	 * allocations, but it's not worth trying to optimize.
 	 */
 	struct desc_struct *entries;
-	unsigned int size;
+	unsigned int nr_entries;
 };
 
 /*
@@ -87,22 +87,46 @@ static inline void load_mm_ldt(struct mm_struct *mm)
 	 */
 
 	if (unlikely(ldt))
-		set_ldt(ldt->entries, ldt->size);
+		set_ldt(ldt->entries, ldt->nr_entries);
 	else
 		clear_LDT();
 #else
 	clear_LDT();
 #endif
+}
+
+static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+	/*
+	 * Load the LDT if either the old or new mm had an LDT.
+	 *
+	 * An mm will never go from having an LDT to not having an LDT.  Two
+	 * mms never share an LDT, so we don't gain anything by checking to
+	 * see whether the LDT changed.  There's also no guarantee that
+	 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
+	 * then prev->context.ldt will also be non-NULL.
+	 *
+	 * If we really cared, we could optimize the case where prev == next
+	 * and we're exiting lazy mode.  Most of the time, if this happens,
+	 * we don't actually need to reload LDTR, but modify_ldt() is mostly
+	 * used by legacy code and emulators where we don't need this level of
+	 * performance.
+	 *
+	 * This uses | instead of || because it generates better code.
+	 */
+	if (unlikely((unsigned long)prev->context.ldt |
+		     (unsigned long)next->context.ldt))
+		load_mm_ldt(next);
+#endif
 
 	DEBUG_LOCKS_WARN_ON(preemptible());
 }
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
-#ifdef CONFIG_SMP
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
-#endif
 }
 
 static inline int init_new_context(struct task_struct *tsk,
@@ -220,18 +244,6 @@ static inline int vma_pkey(struct vm_area_struct *vma)
 }
 #endif
 
-static inline bool __pkru_allows_pkey(u16 pkey, bool write)
-{
-	u32 pkru = read_pkru();
-
-	if (!__pkru_allows_read(pkru, pkey))
-		return false;
-	if (write && !__pkru_allows_write(pkru, pkey))
-		return false;
-
-	return true;
-}
-
 /*
  * We only want to enforce protection keys on the current process
  * because we effectively have no access to PKRU for other
@@ -268,4 +280,23 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 	return __pkru_allows_pkey(vma_pkey(vma), write);
 }
 
+
+/*
+ * This can be used from process context to figure out what the value of
+ * CR3 is without needing to do a (slow) __read_cr3().
+ *
+ * It's intended to be used for code like KVM that sneakily changes CR3
+ * and needs to restore it.  It needs to be used very carefully.
+ */
+static inline unsigned long __get_current_cr3_fast(void)
+{
+	unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
+
+	/* For now, be very restrictive about when this can be called. */
+	VM_WARN_ON(in_nmi() || !in_atomic());
+
+	VM_BUG_ON(cr3 != __read_cr3());
+	return cr3;
+}
+
 #endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index fba1007..2b58c8c 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -2,8 +2,7 @@
 #define _ASM_X86_MSHYPER_H
 
 #include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
+#include <linux/atomic.h>
 #include <asm/hyperv.h>
 
 /*
@@ -137,7 +136,6 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
 	}
 }
 
-#define hv_get_current_tick(tick) rdmsrl(HV_X64_MSR_TIME_REF_COUNT, tick)
 #define hv_init_timer(timer, tick) wrmsrl(timer, tick)
 #define hv_init_timer_config(config, val) wrmsrl(config, val)
 
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 673f9ac..d406894 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -137,6 +137,8 @@
 #define DEBUGCTLMSR_BTS_OFF_OS		(1UL <<  9)
 #define DEBUGCTLMSR_BTS_OFF_USR		(1UL << 10)
 #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI	(1UL << 11)
+#define DEBUGCTLMSR_FREEZE_IN_SMM_BIT	14
+#define DEBUGCTLMSR_FREEZE_IN_SMM	(1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
 
 #define MSR_PEBS_FRONTEND		0x000003f7
 
@@ -249,9 +251,13 @@
 #define HWP_MIN_PERF(x) 		(x & 0xff)
 #define HWP_MAX_PERF(x) 		((x & 0xff) << 8)
 #define HWP_DESIRED_PERF(x)		((x & 0xff) << 16)
-#define HWP_ENERGY_PERF_PREFERENCE(x)	((x & 0xff) << 24)
-#define HWP_ACTIVITY_WINDOW(x)		((x & 0xff3) << 32)
-#define HWP_PACKAGE_CONTROL(x)		((x & 0x1) << 42)
+#define HWP_ENERGY_PERF_PREFERENCE(x)	(((unsigned long long) x & 0xff) << 24)
+#define HWP_EPP_PERFORMANCE		0x00
+#define HWP_EPP_BALANCE_PERFORMANCE	0x80
+#define HWP_EPP_BALANCE_POWERSAVE	0xC0
+#define HWP_EPP_POWERSAVE		0xFF
+#define HWP_ACTIVITY_WINDOW(x)		((unsigned long long)(x & 0xff3) << 32)
+#define HWP_PACKAGE_CONTROL(x)		((unsigned long long)(x & 0x1) << 42)
 
 /* IA32_HWP_STATUS */
 #define HWP_GUARANTEED_CHANGE(x)	(x & 0x1)
@@ -474,9 +480,11 @@
 #define MSR_MISC_PWR_MGMT		0x000001aa
 
 #define MSR_IA32_ENERGY_PERF_BIAS	0x000001b0
-#define ENERGY_PERF_BIAS_PERFORMANCE	0
-#define ENERGY_PERF_BIAS_NORMAL		6
-#define ENERGY_PERF_BIAS_POWERSAVE	15
+#define ENERGY_PERF_BIAS_PERFORMANCE		0
+#define ENERGY_PERF_BIAS_BALANCE_PERFORMANCE	4
+#define ENERGY_PERF_BIAS_NORMAL			6
+#define ENERGY_PERF_BIAS_BALANCE_POWERSAVE	8
+#define ENERGY_PERF_BIAS_POWERSAVE		15
 
 #define MSR_IA32_PACKAGE_THERM_STATUS		0x000001b1
 
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 55fa56f..9ccac19 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -61,7 +61,7 @@ static inline void write_cr2(unsigned long x)
 	PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
 }
 
-static inline unsigned long read_cr3(void)
+static inline unsigned long __read_cr3(void)
 {
 	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
 }
@@ -118,7 +118,7 @@ static inline u64 paravirt_read_msr(unsigned msr)
 static inline void paravirt_write_msr(unsigned msr,
 				      unsigned low, unsigned high)
 {
-	return PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
+	PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
 }
 
 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
@@ -312,11 +312,9 @@ static inline void __flush_tlb_single(unsigned long addr)
 }
 
 static inline void flush_tlb_others(const struct cpumask *cpumask,
-				    struct mm_struct *mm,
-				    unsigned long start,
-				    unsigned long end)
+				    const struct flush_tlb_info *info)
 {
-	PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
+	PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
 }
 
 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 7465d6f..cb976ba 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -51,6 +51,7 @@ struct mm_struct;
 struct desc_struct;
 struct task_struct;
 struct cpumask;
+struct flush_tlb_info;
 
 /*
  * Wrapper type for pointers to code which uses the non-standard
@@ -223,9 +224,7 @@ struct pv_mmu_ops {
 	void (*flush_tlb_kernel)(void);
 	void (*flush_tlb_single)(unsigned long addr);
 	void (*flush_tlb_others)(const struct cpumask *cpus,
-				 struct mm_struct *mm,
-				 unsigned long start,
-				 unsigned long end);
+				 const struct flush_tlb_info *info);
 
 	/* Hooks for allocating and freeing a pagetable top-level */
 	int  (*pgd_alloc)(struct mm_struct *mm);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index f513cc2..473a729 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -77,14 +77,8 @@ static inline bool is_vmd(struct pci_bus *bus)
 
 extern unsigned int pcibios_assign_all_busses(void);
 extern int pci_legacy_init(void);
-# ifdef CONFIG_ACPI
-#  define x86_default_pci_init pci_acpi_init
-# else
-#  define x86_default_pci_init pci_legacy_init
-# endif
 #else
-# define pcibios_assign_all_busses()	0
-# define x86_default_pci_init		NULL
+static inline int pcibios_assign_all_busses(void) { return 0; }
 #endif
 
 extern unsigned long pci_mem_start;
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 50d35e3..c8821ba 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -212,4 +212,51 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
 #define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
 #define __swp_entry_to_pte(x)		((pte_t){ { .pte_high = (x).val } })
 
+#define gup_get_pte gup_get_pte
+/*
+ * WARNING: only to be used in the get_user_pages_fast() implementation.
+ *
+ * With get_user_pages_fast(), we walk down the pagetables without taking
+ * any locks.  For this we would like to load the pointers atomically,
+ * but that is not possible (without expensive cmpxchg8b) on PAE.  What
+ * we do have is the guarantee that a PTE will only either go from not
+ * present to present, or present to not present or both -- it will not
+ * switch to a completely different present page without a TLB flush in
+ * between; something that we are blocking by holding interrupts off.
+ *
+ * Setting ptes from not present to present goes:
+ *
+ *   ptep->pte_high = h;
+ *   smp_wmb();
+ *   ptep->pte_low = l;
+ *
+ * And present to not present goes:
+ *
+ *   ptep->pte_low = 0;
+ *   smp_wmb();
+ *   ptep->pte_high = 0;
+ *
+ * We must ensure here that the load of pte_low sees 'l' iff pte_high
+ * sees 'h'. We load pte_high *after* loading pte_low, which ensures we
+ * don't see an older value of pte_high.  *Then* we recheck pte_low,
+ * which ensures that we haven't picked up a changed pte high. We might
+ * have gotten rubbish values from pte_low and pte_high, but we are
+ * guaranteed that pte_low will not have the present bit set *unless*
+ * it is 'l'. Because get_user_pages_fast() only operates on present ptes
+ * we're safe.
+ */
+static inline pte_t gup_get_pte(pte_t *ptep)
+{
+	pte_t pte;
+
+	do {
+		pte.pte_low = ptep->pte_low;
+		smp_rmb();
+		pte.pte_high = ptep->pte_high;
+		smp_rmb();
+	} while (unlikely(pte.pte_low != ptep->pte_low));
+
+	return pte;
+}
+
 #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index f5af95a..77037b6 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -244,6 +244,11 @@ static inline int pud_devmap(pud_t pud)
 	return 0;
 }
 #endif
+
+static inline int pgd_devmap(pgd_t pgd)
+{
+	return 0;
+}
 #endif
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
@@ -917,7 +922,7 @@ extern pgd_t trampoline_pgd_entry;
 static inline void __meminit init_trampoline_default(void)
 {
 	/* Default trampoline pgd value */
-	trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
+	trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
 }
 # ifdef CONFIG_RANDOMIZE_MEMORY
 void __meminit init_trampoline(void);
@@ -1185,6 +1190,54 @@ static inline u16 pte_flags_pkey(unsigned long pte_flags)
 #endif
 }
 
+static inline bool __pkru_allows_pkey(u16 pkey, bool write)
+{
+	u32 pkru = read_pkru();
+
+	if (!__pkru_allows_read(pkru, pkey))
+		return false;
+	if (write && !__pkru_allows_write(pkru, pkey))
+		return false;
+
+	return true;
+}
+
+/*
+ * 'pteval' can come from a PTE, PMD or PUD.  We only check
+ * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
+ * same value on all 3 types.
+ */
+static inline bool __pte_access_permitted(unsigned long pteval, bool write)
+{
+	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
+
+	if (write)
+		need_pte_bits |= _PAGE_RW;
+
+	if ((pteval & need_pte_bits) != need_pte_bits)
+		return 0;
+
+	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
+}
+
+#define pte_access_permitted pte_access_permitted
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+	return __pte_access_permitted(pte_val(pte), write);
+}
+
+#define pmd_access_permitted pmd_access_permitted
+static inline bool pmd_access_permitted(pmd_t pmd, bool write)
+{
+	return __pte_access_permitted(pmd_val(pmd), write);
+}
+
+#define pud_access_permitted pud_access_permitted
+static inline bool pud_access_permitted(pud_t pud, bool write)
+{
+	return __pte_access_permitted(pud_val(pud), write);
+}
+
 #include <asm-generic/pgtable.h>
 #endif	/* __ASSEMBLY__ */
 
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 9991224..2160c1f 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -14,15 +14,17 @@
 #include <linux/bitops.h>
 #include <linux/threads.h>
 
+extern p4d_t level4_kernel_pgt[512];
+extern p4d_t level4_ident_pgt[512];
 extern pud_t level3_kernel_pgt[512];
 extern pud_t level3_ident_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
 extern pmd_t level2_ident_pgt[512];
 extern pte_t level1_fixmap_pgt[512];
-extern pgd_t init_level4_pgt[];
+extern pgd_t init_top_pgt[];
 
-#define swapper_pg_dir init_level4_pgt
+#define swapper_pg_dir init_top_pgt
 
 extern void paging_init(void);
 
@@ -227,6 +229,20 @@ extern void cleanup_highmap(void);
 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
 
-#endif /* !__ASSEMBLY__ */
+#define gup_fast_permitted gup_fast_permitted
+static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
+		int write)
+{
+	unsigned long len, end;
 
+	len = (unsigned long)nr_pages << PAGE_SHIFT;
+	end = start + len;
+	if (end < start)
+		return false;
+	if (end >> __VIRTUAL_MASK_SHIFT)
+		return false;
+	return true;
+}
+
+#endif /* !__ASSEMBLY__ */
 #endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 39fb618..79aa2f9 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -8,4 +8,40 @@
 #else
 #define X86_VM_MASK	0 /* No VM86 support */
 #endif
+
+/*
+ * CR3's layout varies depending on several things.
+ *
+ * If CR4.PCIDE is set (64-bit only), then CR3[11:0] is the address space ID.
+ * If PAE is enabled, then CR3[11:5] is part of the PDPT address
+ * (i.e. it's 32-byte aligned, not page-aligned) and CR3[4:0] is ignored.
+ * Otherwise (non-PAE, non-PCID), CR3[3] is PWT, CR3[4] is PCD, and
+ * CR3[2:0] and CR3[11:5] are ignored.
+ *
+ * In all cases, Linux puts zeros in the low ignored bits and in PWT and PCD.
+ *
+ * CR3[63] is always read as zero.  If CR4.PCIDE is set, then CR3[63] may be
+ * written as 1 to prevent the write to CR3 from flushing the TLB.
+ *
+ * On systems with SME, one bit (in a variable position!) is stolen to indicate
+ * that the top-level paging structure is encrypted.
+ *
+ * All of the remaining bits indicate the physical address of the top-level
+ * paging structure.
+ *
+ * CR3_ADDR_MASK is the mask used by read_cr3_pa().
+ */
+#ifdef CONFIG_X86_64
+/* Mask off the address space ID bits. */
+#define CR3_ADDR_MASK 0x7FFFFFFFFFFFF000ull
+#define CR3_PCID_MASK 0xFFFull
+#else
+/*
+ * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
+ * a tiny bit of code size by setting all the bits.
+ */
+#define CR3_ADDR_MASK 0xFFFFFFFFull
+#define CR3_PCID_MASK 0ull
+#endif
+
 #endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3cada99..6a79547 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -231,6 +231,14 @@ native_cpuid_reg(ebx)
 native_cpuid_reg(ecx)
 native_cpuid_reg(edx)
 
+/*
+ * Friendlier CR3 helpers.
+ */
+static inline unsigned long read_cr3_pa(void)
+{
+	return __read_cr3() & CR3_ADDR_MASK;
+}
+
 static inline void load_cr3(pgd_t *pgdir)
 {
 	write_cr3(__pa(pgdir));
@@ -860,8 +868,6 @@ extern unsigned long KSTK_ESP(struct task_struct *task);
 
 #endif /* CONFIG_X86_64 */
 
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
 					       unsigned long new_sp);
 
@@ -901,8 +907,13 @@ static inline int mpx_disable_management(void)
 }
 #endif /* CONFIG_X86_INTEL_MPX */
 
+#ifdef CONFIG_CPU_SUP_AMD
 extern u16 amd_get_nb_id(int cpu);
 extern u32 amd_get_nodes_per_socket(void);
+#else
+static inline u16 amd_get_nb_id(int cpu)		{ return 0; }
+static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
+#endif
 
 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
 {
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ac1d5da..e4585a3 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -44,7 +44,6 @@ extern unsigned long saved_video_mode;
 
 extern void reserve_standard_io_resources(void);
 extern void i386_reserve_resources(void);
-extern void setup_default_timer_irq(void);
 
 #ifdef CONFIG_X86_INTEL_MID
 extern void x86_intel_mid_early_setup(void);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 12af3e3..9efaabf 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -39,7 +39,7 @@ static inline void native_write_cr2(unsigned long val)
 	asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
 }
 
-static inline unsigned long native_read_cr3(void)
+static inline unsigned long __native_read_cr3(void)
 {
 	unsigned long val;
 	asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
@@ -159,9 +159,13 @@ static inline void write_cr2(unsigned long x)
 	native_write_cr2(x);
 }
 
-static inline unsigned long read_cr3(void)
+/*
+ * Careful!  CR3 contains more than just an address.  You probably want
+ * read_cr3_pa() instead.
+ */
+static inline unsigned long __read_cr3(void)
 {
-	return native_read_cr3();
+	return __native_read_cr3();
 }
 
 static inline void write_cr3(unsigned long x)
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 6136a18..2bd96b4 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -42,8 +42,7 @@ struct saved_context {
 	set_debugreg((thread)->debugreg##register, register)
 
 /* routines for saving/restoring kernel state */
-extern int acpi_save_state_mem(void);
-extern char core_restore_code;
-extern char restore_registers;
+extern char core_restore_code[];
+extern char restore_registers[];
 
 #endif /* _ASM_X86_SUSPEND_64_H */
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 27e9f9d..2016962 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -29,11 +29,9 @@ struct cyc2ns_data {
 	u32 cyc2ns_mul;
 	u32 cyc2ns_shift;
 	u64 cyc2ns_offset;
-	u32 __count;
-	/* u32 hole */
-}; /* 24 bytes -- do not grow */
+}; /* 16 bytes */
 
-extern struct cyc2ns_data *cyc2ns_read_begin(void);
-extern void cyc2ns_read_end(struct cyc2ns_data *);
+extern void cyc2ns_read_begin(struct cyc2ns_data *);
+extern void cyc2ns_read_end(void);
 
 #endif /* _ASM_X86_TIMER_H */
diff --git a/arch/x86/include/asm/tlbbatch.h b/arch/x86/include/asm/tlbbatch.h
new file mode 100644
index 0000000..f4a6ff3
--- /dev/null
+++ b/arch/x86/include/asm/tlbbatch.h
@@ -0,0 +1,14 @@
+#ifndef _ARCH_X86_TLBBATCH_H
+#define _ARCH_X86_TLBBATCH_H
+
+#include <linux/cpumask.h>
+
+struct arch_tlbflush_unmap_batch {
+	/*
+	 * Each bit set is a CPU that potentially has a TLB entry for one of
+	 * the PFNs being flushed..
+	 */
+	struct cpumask cpumask;
+};
+
+#endif /* _ARCH_X86_TLBBATCH_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6ed9ea4..50ea348 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -7,6 +7,7 @@
 #include <asm/processor.h>
 #include <asm/cpufeature.h>
 #include <asm/special_insns.h>
+#include <asm/smp.h>
 
 static inline void __invpcid(unsigned long pcid, unsigned long addr,
 			     unsigned long type)
@@ -65,10 +66,14 @@ static inline void invpcid_flush_all_nonglobals(void)
 #endif
 
 struct tlb_state {
-#ifdef CONFIG_SMP
-	struct mm_struct *active_mm;
+	/*
+	 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
+	 * are on.  This means that it may not match current->active_mm,
+	 * which will contain the previous user mm when we're in lazy TLB
+	 * mode even if we've already switched back to swapper_pg_dir.
+	 */
+	struct mm_struct *loaded_mm;
 	int state;
-#endif
 
 	/*
 	 * Access to this CR4 shadow and to H/W CR4 is protected by
@@ -151,7 +156,7 @@ static inline void __native_flush_tlb(void)
 	 * back:
 	 */
 	preempt_disable();
-	native_write_cr3(native_read_cr3());
+	native_write_cr3(__native_read_cr3());
 	preempt_enable();
 }
 
@@ -220,84 +225,16 @@ static inline void __flush_tlb_one(unsigned long addr)
  *  - flush_tlb_page(vma, vmaddr) flushes one page
  *  - flush_tlb_range(vma, start, end) flushes a range of pages
  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
+ *  - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
  *
  * ..but the i386 has somewhat limited tlb flushing capabilities,
  * and page-granular flushes are available only on i486 and up.
  */
-
-#ifndef CONFIG_SMP
-
-/* "_up" is for UniProcessor.
- *
- * This is a helper for other header functions.  *Not* intended to be called
- * directly.  All global TLB flushes need to either call this, or to bump the
- * vm statistics themselves.
- */
-static inline void __flush_tlb_up(void)
-{
-	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-	__flush_tlb();
-}
-
-static inline void flush_tlb_all(void)
-{
-	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-	__flush_tlb_all();
-}
-
-static inline void local_flush_tlb(void)
-{
-	__flush_tlb_up();
-}
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
-	if (mm == current->active_mm)
-		__flush_tlb_up();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
-				  unsigned long addr)
-{
-	if (vma->vm_mm == current->active_mm)
-		__flush_tlb_one(addr);
-}
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
-{
-	if (vma->vm_mm == current->active_mm)
-		__flush_tlb_up();
-}
-
-static inline void flush_tlb_mm_range(struct mm_struct *mm,
-	   unsigned long start, unsigned long end, unsigned long vmflag)
-{
-	if (mm == current->active_mm)
-		__flush_tlb_up();
-}
-
-static inline void native_flush_tlb_others(const struct cpumask *cpumask,
-					   struct mm_struct *mm,
-					   unsigned long start,
-					   unsigned long end)
-{
-}
-
-static inline void reset_lazy_tlbstate(void)
-{
-}
-
-static inline void flush_tlb_kernel_range(unsigned long start,
-					  unsigned long end)
-{
-	flush_tlb_all();
-}
-
-#else  /* SMP */
-
-#include <asm/smp.h>
+struct flush_tlb_info {
+	struct mm_struct *mm;
+	unsigned long start;
+	unsigned long end;
+};
 
 #define local_flush_tlb() __flush_tlb()
 
@@ -307,29 +244,32 @@ static inline void flush_tlb_kernel_range(unsigned long start,
 		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
 
 extern void flush_tlb_all(void);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 				unsigned long end, unsigned long vmflag);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
+static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
+{
+	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
+}
+
 void native_flush_tlb_others(const struct cpumask *cpumask,
-				struct mm_struct *mm,
-				unsigned long start, unsigned long end);
+			     const struct flush_tlb_info *info);
 
 #define TLBSTATE_OK	1
 #define TLBSTATE_LAZY	2
 
-static inline void reset_lazy_tlbstate(void)
+static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
+					struct mm_struct *mm)
 {
-	this_cpu_write(cpu_tlbstate.state, 0);
-	this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
+	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
 }
 
-#endif	/* SMP */
+extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
 
 #ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, start, end)	\
-	native_flush_tlb_others(mask, mm, start, end)
+#define flush_tlb_others(mask, info)	\
+	native_flush_tlb_others(mask, info)
 #endif
 
 #endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 6686820..b5a3223 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_UV_UV_H
 #define _ASM_X86_UV_UV_H
 
+#include <asm/tlbflush.h>
+
 enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
 
 struct cpumask;
@@ -15,10 +17,7 @@ extern void uv_cpu_init(void);
 extern void uv_nmi_init(void);
 extern void uv_system_init(void);
 extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
-						 struct mm_struct *mm,
-						 unsigned long start,
-						 unsigned long end,
-						 unsigned int cpu);
+						 const struct flush_tlb_info *info);
 
 #else	/* X86_UV */
 
@@ -28,8 +27,8 @@ static inline int is_uv_hubless(void)	{ return 0; }
 static inline void uv_cpu_init(void)	{ }
 static inline void uv_system_init(void)	{ }
 static inline const struct cpumask *
-uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
-		    unsigned long start, unsigned long end, unsigned int cpu)
+uv_flush_tlb_others(const struct cpumask *cpumask,
+		    const struct flush_tlb_info *info)
 { return cpumask; }
 
 #endif	/* X86_UV */
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 432df4b..f4fef5a 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -34,16 +34,10 @@
 #define HV_X64_MSR_REFERENCE_TSC		0x40000021
 
 /*
- * There is a single feature flag that signifies the presence of the MSR
- * that can be used to retrieve both the local APIC Timer frequency as
- * well as the TSC frequency.
+ * There is a single feature flag that signifies if the partition has access
+ * to MSRs with local APIC and TSC frequencies.
  */
-
-/* Local APIC timer frequency MSR (HV_X64_MSR_APIC_FREQUENCY) is available */
-#define HV_X64_MSR_APIC_FREQUENCY_AVAILABLE (1 << 11)
-
-/* TSC frequency MSR (HV_X64_MSR_TSC_FREQUENCY) is available */
-#define HV_X64_MSR_TSC_FREQUENCY_AVAILABLE (1 << 11)
+#define HV_X64_ACCESS_FREQUENCY_MSRS		(1 << 11)
 
 /*
  * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
@@ -73,6 +67,9 @@
   */
 #define HV_X64_MSR_STAT_PAGES_AVAILABLE		(1 << 8)
 
+/* Frequency MSRs available */
+#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE	(1 << 8)
+
 /* Crash MSR available */
 #define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
 
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index 567de50..185f3d1 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -104,6 +104,8 @@
 #define X86_CR4_OSFXSR		_BITUL(X86_CR4_OSFXSR_BIT)
 #define X86_CR4_OSXMMEXCPT_BIT	10 /* enable unmasked SSE exceptions */
 #define X86_CR4_OSXMMEXCPT	_BITUL(X86_CR4_OSXMMEXCPT_BIT)
+#define X86_CR4_LA57_BIT	12 /* enable 5-level page tables */
+#define X86_CR4_LA57		_BITUL(X86_CR4_LA57_BIT)
 #define X86_CR4_VMXE_BIT	13 /* enable VMX virtualization */
 #define X86_CR4_VMXE		_BITUL(X86_CR4_VMXE_BIT)
 #define X86_CR4_SMXE_BIT	14 /* enable safer mode (TXT) */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4b99423..a01892b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -18,6 +18,7 @@
 CFLAGS_REMOVE_kvmclock.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
+CFLAGS_REMOVE_head64.o = -pg
 endif
 
 KASAN_SANITIZE_head$(BITS).o				:= n
@@ -29,6 +30,7 @@
 OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o	:= y
 OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o		:= y
 OBJECT_FILES_NON_STANDARD_test_nx.o			:= y
+OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o	:= y
 
 # If instrumentation of this dir is enabled, boot hangs during first second.
 # Probably could be more selective here, but note that files related to irqs,
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 26b78d8..85a9e17 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_wakeup_$(BITS).o := y
+
 obj-$(CONFIG_ACPI)		+= boot.o
 obj-$(CONFIG_ACPI_SLEEP)	+= sleep.o wakeup_$(BITS).o
 obj-$(CONFIG_ACPI_APEI)		+= apei.o
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 8233a63..dde437f 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -167,7 +167,8 @@ static int __init ffh_cstate_init(void)
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
 
-	if (c->x86_vendor != X86_VENDOR_INTEL)
+	if (c->x86_vendor != X86_VENDOR_INTEL &&
+	    c->x86_vendor != X86_VENDOR_AMD)
 		return -1;
 
 	cpu_cstate_entry = alloc_percpu(struct cstate_entry);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 2d75faf..98b3dd8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -54,6 +54,8 @@
 #include <asm/mce.h>
 #include <asm/tsc.h>
 #include <asm/hypervisor.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 
 unsigned int num_processors;
 
@@ -545,6 +547,81 @@ static struct clock_event_device lapic_clockevent = {
 };
 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 
+#define DEADLINE_MODEL_MATCH_FUNC(model, func)	\
+	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&func }
+
+#define DEADLINE_MODEL_MATCH_REV(model, rev)	\
+	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev }
+
+static u32 hsx_deadline_rev(void)
+{
+	switch (boot_cpu_data.x86_mask) {
+	case 0x02: return 0x3a; /* EP */
+	case 0x04: return 0x0f; /* EX */
+	}
+
+	return ~0U;
+}
+
+static u32 bdx_deadline_rev(void)
+{
+	switch (boot_cpu_data.x86_mask) {
+	case 0x02: return 0x00000011;
+	case 0x03: return 0x0700000e;
+	case 0x04: return 0x0f00000c;
+	case 0x05: return 0x0e000003;
+	}
+
+	return ~0U;
+}
+
+static const struct x86_cpu_id deadline_match[] = {
+	DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X,	hsx_deadline_rev),
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X,	0x0b000020),
+	DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D,	bdx_deadline_rev),
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X,	0x02000014),
+
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE,	0x22),
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT,	0x20),
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_GT3E,	0x17),
+
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_CORE,	0x25),
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_GT3E,	0x17),
+
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_MOBILE,	0xb2),
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_DESKTOP,	0xb2),
+
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_MOBILE,	0x52),
+	DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_DESKTOP,	0x52),
+
+	{},
+};
+
+static void apic_check_deadline_errata(void)
+{
+	const struct x86_cpu_id *m = x86_match_cpu(deadline_match);
+	u32 rev;
+
+	if (!m)
+		return;
+
+	/*
+	 * Function pointers will have the MSB set due to address layout,
+	 * immediate revisions will not.
+	 */
+	if ((long)m->driver_data < 0)
+		rev = ((u32 (*)(void))(m->driver_data))();
+	else
+		rev = (u32)m->driver_data;
+
+	if (boot_cpu_data.microcode >= rev)
+		return;
+
+	setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+	pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
+	       "please update microcode to version: 0x%x (or later)\n", rev);
+}
+
 /*
  * Setup the local APIC timer for this CPU. Copy the initialized values
  * of the boot CPU and register the clock event in the framework.
@@ -563,6 +640,7 @@ static void setup_APIC_timer(void)
 	levt->cpumask = cpumask_of(smp_processor_id());
 
 	if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
+		levt->name = "lapic-deadline";
 		levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
 				    CLOCK_EVT_FEAT_DUMMY);
 		levt->set_next_event = lapic_next_deadline;
@@ -1779,6 +1857,8 @@ void __init init_apic_mappings(void)
 {
 	unsigned int new_apicid;
 
+	apic_check_deadline_errata();
+
 	if (x2apic_mode) {
 		boot_cpu_physical_apicid = read_apic_id();
 		return;
@@ -2201,23 +2281,32 @@ void default_init_apic_ldr(void)
 	apic_write(APIC_LDR, val);
 }
 
-int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-				   const struct cpumask *andmask,
-				   unsigned int *apicid)
+int default_cpu_mask_to_apicid(const struct cpumask *mask,
+			       struct irq_data *irqdata,
+			       unsigned int *apicid)
 {
-	unsigned int cpu;
+	unsigned int cpu = cpumask_first(mask);
 
-	for_each_cpu_and(cpu, cpumask, andmask) {
-		if (cpumask_test_cpu(cpu, cpu_online_mask))
-			break;
-	}
+	if (cpu >= nr_cpu_ids)
+		return -EINVAL;
+	*apicid = per_cpu(x86_cpu_to_apicid, cpu);
+	irq_data_update_effective_affinity(irqdata, cpumask_of(cpu));
+	return 0;
+}
 
-	if (likely(cpu < nr_cpu_ids)) {
-		*apicid = per_cpu(x86_cpu_to_apicid, cpu);
-		return 0;
-	}
+int flat_cpu_mask_to_apicid(const struct cpumask *mask,
+			    struct irq_data *irqdata,
+			    unsigned int *apicid)
 
-	return -EINVAL;
+{
+	struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
+	unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS;
+
+	if (!cpu_mask)
+		return -EINVAL;
+	*apicid = (unsigned int)cpu_mask;
+	cpumask_bits(effmsk)[0] = cpu_mask;
+	return 0;
 }
 
 /*
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index a4d7ff2..dedd5a4 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -172,7 +172,7 @@ static struct apic apic_flat __ro_after_init = {
 	.get_apic_id			= flat_get_apic_id,
 	.set_apic_id			= set_apic_id,
 
-	.cpu_mask_to_apicid_and		= flat_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= flat_cpu_mask_to_apicid,
 
 	.send_IPI			= default_send_IPI_single,
 	.send_IPI_mask			= flat_send_IPI_mask,
@@ -268,7 +268,7 @@ static struct apic apic_physflat __ro_after_init = {
 	.get_apic_id			= flat_get_apic_id,
 	.set_apic_id			= set_apic_id,
 
-	.cpu_mask_to_apicid_and		= default_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= default_cpu_mask_to_apicid,
 
 	.send_IPI			= default_send_IPI_single_phys,
 	.send_IPI_mask			= default_send_IPI_mask_sequence_phys,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 2262eb6..6599f43 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -141,7 +141,7 @@ struct apic apic_noop __ro_after_init = {
 	.get_apic_id			= noop_get_apic_id,
 	.set_apic_id			= NULL,
 
-	.cpu_mask_to_apicid_and		= flat_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= flat_cpu_mask_to_apicid,
 
 	.send_IPI			= noop_send_IPI,
 	.send_IPI_mask			= noop_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index e08fe2c..2fda912 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -267,7 +267,7 @@ static const struct apic apic_numachip1 __refconst = {
 	.get_apic_id			= numachip1_get_apic_id,
 	.set_apic_id			= numachip1_set_apic_id,
 
-	.cpu_mask_to_apicid_and		= default_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= default_cpu_mask_to_apicid,
 
 	.send_IPI			= numachip_send_IPI_one,
 	.send_IPI_mask			= numachip_send_IPI_mask,
@@ -318,7 +318,7 @@ static const struct apic apic_numachip2 __refconst = {
 	.get_apic_id			= numachip2_get_apic_id,
 	.set_apic_id			= numachip2_set_apic_id,
 
-	.cpu_mask_to_apicid_and		= default_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= default_cpu_mask_to_apicid,
 
 	.send_IPI			= numachip_send_IPI_one,
 	.send_IPI_mask			= numachip_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 5601201..456e45e 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -172,7 +172,7 @@ static struct apic apic_bigsmp __ro_after_init = {
 	.get_apic_id			= bigsmp_get_apic_id,
 	.set_apic_id			= NULL,
 
-	.cpu_mask_to_apicid_and		= default_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= default_cpu_mask_to_apicid,
 
 	.send_IPI			= default_send_IPI_single_phys,
 	.send_IPI_mask			= default_send_IPI_mask_sequence_phys,
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
index ae50d34..56ccf93 100644
--- a/arch/x86/kernel/apic/htirq.c
+++ b/arch/x86/kernel/apic/htirq.c
@@ -150,16 +150,27 @@ static const struct irq_domain_ops htirq_domain_ops = {
 	.deactivate	= htirq_domain_deactivate,
 };
 
-void arch_init_htirq_domain(struct irq_domain *parent)
+void __init arch_init_htirq_domain(struct irq_domain *parent)
 {
+	struct fwnode_handle *fn;
+
 	if (disable_apic)
 		return;
 
-	htirq_domain = irq_domain_add_tree(NULL, &htirq_domain_ops, NULL);
+	fn = irq_domain_alloc_named_fwnode("PCI-HT");
+	if (!fn)
+		goto warn;
+
+	htirq_domain = irq_domain_create_tree(fn, &htirq_domain_ops, NULL);
+	irq_domain_free_fwnode(fn);
 	if (!htirq_domain)
-		pr_warn("failed to initialize irqdomain for HTIRQ.\n");
-	else
-		htirq_domain->parent = parent;
+		goto warn;
+
+	htirq_domain->parent = parent;
+	return;
+
+warn:
+	pr_warn("Failed to initialize irqdomain for HTIRQ.\n");
 }
 
 int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 347bb9f..b4f5f73 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1200,28 +1200,6 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
 
 static struct irq_chip ioapic_chip, ioapic_ir_chip;
 
-#ifdef CONFIG_X86_32
-static inline int IO_APIC_irq_trigger(int irq)
-{
-	int apic, idx, pin;
-
-	for_each_ioapic_pin(apic, pin) {
-		idx = find_irq_entry(apic, pin, mp_INT);
-		if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin, 0)))
-			return irq_trigger(idx);
-	}
-	/*
-         * nonexistent IRQs are edge default
-         */
-	return 0;
-}
-#else
-static inline int IO_APIC_irq_trigger(int irq)
-{
-	return 1;
-}
-#endif
-
 static void __init setup_IO_APIC_irqs(void)
 {
 	unsigned int ioapic, pin;
@@ -2223,6 +2201,8 @@ static int mp_irqdomain_create(int ioapic)
 	struct ioapic *ip = &ioapics[ioapic];
 	struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg;
 	struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
+	struct fwnode_handle *fn;
+	char *name = "IO-APIC";
 
 	if (cfg->type == IOAPIC_DOMAIN_INVALID)
 		return 0;
@@ -2233,9 +2213,25 @@ static int mp_irqdomain_create(int ioapic)
 	parent = irq_remapping_get_ir_irq_domain(&info);
 	if (!parent)
 		parent = x86_vector_domain;
+	else
+		name = "IO-APIC-IR";
 
-	ip->irqdomain = irq_domain_add_linear(cfg->dev, hwirqs, cfg->ops,
-					      (void *)(long)ioapic);
+	/* Handle device tree enumerated APICs proper */
+	if (cfg->dev) {
+		fn = of_node_to_fwnode(cfg->dev);
+	} else {
+		fn = irq_domain_alloc_named_id_fwnode(name, ioapic);
+		if (!fn)
+			return -ENOMEM;
+	}
+
+	ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
+						 (void *)(long)ioapic);
+
+	/* Release fw handle if it was allocated above */
+	if (!cfg->dev)
+		irq_domain_free_fwnode(fn);
+
 	if (!ip->irqdomain)
 		return -ENOMEM;
 
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index c61aec7..9b18be7 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -136,13 +136,20 @@ static struct msi_domain_info pci_msi_domain_info = {
 	.handler_name	= "edge",
 };
 
-void arch_init_msi_domain(struct irq_domain *parent)
+void __init arch_init_msi_domain(struct irq_domain *parent)
 {
+	struct fwnode_handle *fn;
+
 	if (disable_apic)
 		return;
 
-	msi_default_domain = pci_msi_create_irq_domain(NULL,
-					&pci_msi_domain_info, parent);
+	fn = irq_domain_alloc_named_fwnode("PCI-MSI");
+	if (fn) {
+		msi_default_domain =
+			pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
+						  parent);
+		irq_domain_free_fwnode(fn);
+	}
 	if (!msi_default_domain)
 		pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
 }
@@ -167,9 +174,18 @@ static struct msi_domain_info pci_msi_ir_domain_info = {
 	.handler_name	= "edge",
 };
 
-struct irq_domain *arch_create_msi_irq_domain(struct irq_domain *parent)
+struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
+						    const char *name, int id)
 {
-	return pci_msi_create_irq_domain(NULL, &pci_msi_ir_domain_info, parent);
+	struct fwnode_handle *fn;
+	struct irq_domain *d;
+
+	fn = irq_domain_alloc_named_id_fwnode(name, id);
+	if (!fn)
+		return NULL;
+	d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent);
+	irq_domain_free_fwnode(fn);
+	return d;
 }
 #endif
 
@@ -221,13 +237,20 @@ static struct irq_domain *dmar_get_irq_domain(void)
 {
 	static struct irq_domain *dmar_domain;
 	static DEFINE_MUTEX(dmar_lock);
+	struct fwnode_handle *fn;
 
 	mutex_lock(&dmar_lock);
-	if (dmar_domain == NULL)
-		dmar_domain = msi_create_irq_domain(NULL, &dmar_msi_domain_info,
-						    x86_vector_domain);
-	mutex_unlock(&dmar_lock);
+	if (dmar_domain)
+		goto out;
 
+	fn = irq_domain_alloc_named_fwnode("DMAR-MSI");
+	if (fn) {
+		dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
+						    x86_vector_domain);
+		irq_domain_free_fwnode(fn);
+	}
+out:
+	mutex_unlock(&dmar_lock);
 	return dmar_domain;
 }
 
@@ -317,9 +340,10 @@ static struct msi_domain_info hpet_msi_domain_info = {
 
 struct irq_domain *hpet_create_irq_domain(int hpet_id)
 {
-	struct irq_domain *parent;
-	struct irq_alloc_info info;
 	struct msi_domain_info *domain_info;
+	struct irq_domain *parent, *d;
+	struct irq_alloc_info info;
+	struct fwnode_handle *fn;
 
 	if (x86_vector_domain == NULL)
 		return NULL;
@@ -340,7 +364,16 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id)
 	else
 		hpet_msi_controller.name = "IR-HPET-MSI";
 
-	return msi_create_irq_domain(NULL, domain_info, parent);
+	fn = irq_domain_alloc_named_id_fwnode(hpet_msi_controller.name,
+					      hpet_id);
+	if (!fn) {
+		kfree(domain_info);
+		return NULL;
+	}
+
+	d = msi_create_irq_domain(fn, domain_info, parent);
+	irq_domain_free_fwnode(fn);
+	return d;
 }
 
 int hpet_assign_irq(struct irq_domain *domain, struct hpet_dev *dev,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 2e8f7f0..6328765 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -102,7 +102,7 @@ static struct apic apic_default __ro_after_init = {
 	.get_apic_id			= default_get_apic_id,
 	.set_apic_id			= NULL,
 
-	.cpu_mask_to_apicid_and		= flat_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= flat_cpu_mask_to_apicid,
 
 	.send_IPI			= default_send_IPI_single,
 	.send_IPI_mask			= default_send_IPI_mask_logical,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index f3557a1..b3af457 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -103,7 +103,8 @@ static void free_apic_chip_data(struct apic_chip_data *data)
 }
 
 static int __assign_irq_vector(int irq, struct apic_chip_data *d,
-			       const struct cpumask *mask)
+			       const struct cpumask *mask,
+			       struct irq_data *irqdata)
 {
 	/*
 	 * NOTE! The local APIC isn't very good at handling
@@ -141,7 +142,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
 		/*
 		 * Clear the offline cpus from @vector_cpumask for searching
 		 * and verify whether the result overlaps with @mask. If true,
-		 * then the call to apic->cpu_mask_to_apicid_and() will
+		 * then the call to apic->cpu_mask_to_apicid() will
 		 * succeed as well. If not, no point in trying to find a
 		 * vector in this mask.
 		 */
@@ -221,34 +222,40 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
 	 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
 	 * as we already established, that mask & d->domain & cpu_online_mask
 	 * is not empty.
+	 *
+	 * vector_searchmask is a subset of d->domain and has the offline
+	 * cpus masked out.
 	 */
-	BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
-					    &d->cfg.dest_apicid));
+	cpumask_and(vector_searchmask, vector_searchmask, mask);
+	BUG_ON(apic->cpu_mask_to_apicid(vector_searchmask, irqdata,
+					&d->cfg.dest_apicid));
 	return 0;
 }
 
 static int assign_irq_vector(int irq, struct apic_chip_data *data,
-			     const struct cpumask *mask)
+			     const struct cpumask *mask,
+			     struct irq_data *irqdata)
 {
 	int err;
 	unsigned long flags;
 
 	raw_spin_lock_irqsave(&vector_lock, flags);
-	err = __assign_irq_vector(irq, data, mask);
+	err = __assign_irq_vector(irq, data, mask, irqdata);
 	raw_spin_unlock_irqrestore(&vector_lock, flags);
 	return err;
 }
 
 static int assign_irq_vector_policy(int irq, int node,
 				    struct apic_chip_data *data,
-				    struct irq_alloc_info *info)
+				    struct irq_alloc_info *info,
+				    struct irq_data *irqdata)
 {
 	if (info && info->mask)
-		return assign_irq_vector(irq, data, info->mask);
+		return assign_irq_vector(irq, data, info->mask, irqdata);
 	if (node != NUMA_NO_NODE &&
-	    assign_irq_vector(irq, data, cpumask_of_node(node)) == 0)
+	    assign_irq_vector(irq, data, cpumask_of_node(node), irqdata) == 0)
 		return 0;
-	return assign_irq_vector(irq, data, apic->target_cpus());
+	return assign_irq_vector(irq, data, apic->target_cpus(), irqdata);
 }
 
 static void clear_irq_vector(int irq, struct apic_chip_data *data)
@@ -360,9 +367,17 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
 		irq_data->chip = &lapic_controller;
 		irq_data->chip_data = data;
 		irq_data->hwirq = virq + i;
-		err = assign_irq_vector_policy(virq + i, node, data, info);
+		err = assign_irq_vector_policy(virq + i, node, data, info,
+					       irq_data);
 		if (err)
 			goto error;
+		/*
+		 * If the apic destination mode is physical, then the
+		 * effective affinity is restricted to a single target
+		 * CPU. Mark the interrupt accordingly.
+		 */
+		if (!apic->irq_dest_mode)
+			irqd_set_single_target(irq_data);
 	}
 
 	return 0;
@@ -405,7 +420,7 @@ int __init arch_probe_nr_irqs(void)
 }
 
 #ifdef	CONFIG_X86_IO_APIC
-static void init_legacy_irqs(void)
+static void __init init_legacy_irqs(void)
 {
 	int i, node = cpu_to_node(0);
 	struct apic_chip_data *data;
@@ -424,16 +439,21 @@ static void init_legacy_irqs(void)
 	}
 }
 #else
-static void init_legacy_irqs(void) { }
+static inline void init_legacy_irqs(void) { }
 #endif
 
 int __init arch_early_irq_init(void)
 {
+	struct fwnode_handle *fn;
+
 	init_legacy_irqs();
 
-	x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops,
-						NULL);
+	fn = irq_domain_alloc_named_fwnode("VECTOR");
+	BUG_ON(!fn);
+	x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
+						   NULL);
 	BUG_ON(x86_vector_domain == NULL);
+	irq_domain_free_fwnode(fn);
 	irq_set_default_host(x86_vector_domain);
 
 	arch_init_msi_domain(x86_vector_domain);
@@ -529,11 +549,12 @@ static int apic_set_affinity(struct irq_data *irq_data,
 	if (!cpumask_intersects(dest, cpu_online_mask))
 		return -EINVAL;
 
-	err = assign_irq_vector(irq, data, dest);
+	err = assign_irq_vector(irq, data, dest, irq_data);
 	return err ? err : IRQ_SET_MASK_OK;
 }
 
 static struct irq_chip lapic_controller = {
+	.name			= "APIC",
 	.irq_ack		= apic_ack_edge,
 	.irq_set_affinity	= apic_set_affinity,
 	.irq_retrigger		= apic_retrigger_irq,
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 5a35f20..481237c 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -4,6 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/ctype.h>
 #include <linux/dmar.h>
+#include <linux/irq.h>
 #include <linux/cpu.h>
 
 #include <asm/smp.h>
@@ -104,35 +105,30 @@ static void x2apic_send_IPI_all(int vector)
 }
 
 static int
-x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-			      const struct cpumask *andmask,
-			      unsigned int *apicid)
+x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
+			  unsigned int *apicid)
 {
+	struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
+	unsigned int cpu;
 	u32 dest = 0;
 	u16 cluster;
-	int i;
 
-	for_each_cpu_and(i, cpumask, andmask) {
-		if (!cpumask_test_cpu(i, cpu_online_mask))
-			continue;
-		dest = per_cpu(x86_cpu_to_logical_apicid, i);
-		cluster = x2apic_cluster(i);
-		break;
-	}
-
-	if (!dest)
+	cpu = cpumask_first(mask);
+	if (cpu >= nr_cpu_ids)
 		return -EINVAL;
 
-	for_each_cpu_and(i, cpumask, andmask) {
-		if (!cpumask_test_cpu(i, cpu_online_mask))
+	dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
+	cluster = x2apic_cluster(cpu);
+
+	cpumask_clear(effmsk);
+	for_each_cpu(cpu, mask) {
+		if (cluster != x2apic_cluster(cpu))
 			continue;
-		if (cluster != x2apic_cluster(i))
-			continue;
-		dest |= per_cpu(x86_cpu_to_logical_apicid, i);
+		dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
+		cpumask_set_cpu(cpu, effmsk);
 	}
 
 	*apicid = dest;
-
 	return 0;
 }
 
@@ -256,7 +252,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
 	.get_apic_id			= x2apic_get_apic_id,
 	.set_apic_id			= x2apic_set_apic_id,
 
-	.cpu_mask_to_apicid_and		= x2apic_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= x2apic_cpu_mask_to_apicid,
 
 	.send_IPI			= x2apic_send_IPI,
 	.send_IPI_mask			= x2apic_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index ff111f0..3baf0c3 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -127,7 +127,7 @@ static struct apic apic_x2apic_phys __ro_after_init = {
 	.get_apic_id			= x2apic_get_apic_id,
 	.set_apic_id			= x2apic_set_apic_id,
 
-	.cpu_mask_to_apicid_and		= default_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= default_cpu_mask_to_apicid,
 
 	.send_IPI			= x2apic_send_IPI,
 	.send_IPI_mask			= x2apic_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b487b3a..0d57bb9 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -526,27 +526,15 @@ static void uv_init_apic_ldr(void)
 }
 
 static int
-uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-			  const struct cpumask *andmask,
-			  unsigned int *apicid)
+uv_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
+		      unsigned int *apicid)
 {
-	int unsigned cpu;
+	int ret = default_cpu_mask_to_apicid(mask, irqdata, apicid);
 
-	/*
-	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
-	 * May as well be the first.
-	 */
-	for_each_cpu_and(cpu, cpumask, andmask) {
-		if (cpumask_test_cpu(cpu, cpu_online_mask))
-			break;
-	}
+	if (!ret)
+		*apicid |= uv_apicid_hibits;
 
-	if (likely(cpu < nr_cpu_ids)) {
-		*apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
-		return 0;
-	}
-
-	return -EINVAL;
+	return ret;
 }
 
 static unsigned int x2apic_get_apic_id(unsigned long x)
@@ -614,7 +602,7 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
 	.get_apic_id			= x2apic_get_apic_id,
 	.set_apic_id			= set_apic_id,
 
-	.cpu_mask_to_apicid_and		= uv_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= uv_cpu_mask_to_apicid,
 
 	.send_IPI			= uv_send_IPI_one,
 	.send_IPI_mask			= uv_send_IPI_mask,
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 5200001..cdf8249 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -21,6 +21,7 @@
 obj-y			+= rdrand.o
 obj-y			+= match.o
 obj-y			+= bugs.o
+obj-$(CONFIG_CPU_FREQ)	+= aperfmperf.o
 
 obj-$(CONFIG_PROC_FS)	+= proc.o
 obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
new file mode 100644
index 0000000..d869c86
--- /dev/null
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -0,0 +1,79 @@
+/*
+ * x86 APERF/MPERF KHz calculation for
+ * /sys/.../cpufreq/scaling_cur_freq
+ *
+ * Copyright (C) 2017 Intel Corp.
+ * Author: Len Brown <len.brown@intel.com>
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/math64.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+
+struct aperfmperf_sample {
+	unsigned int	khz;
+	unsigned long	jiffies;
+	u64	aperf;
+	u64	mperf;
+};
+
+static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
+
+/*
+ * aperfmperf_snapshot_khz()
+ * On the current CPU, snapshot APERF, MPERF, and jiffies
+ * unless we already did it within 10ms
+ * calculate kHz, save snapshot
+ */
+static void aperfmperf_snapshot_khz(void *dummy)
+{
+	u64 aperf, aperf_delta;
+	u64 mperf, mperf_delta;
+	struct aperfmperf_sample *s = this_cpu_ptr(&samples);
+
+	/* Don't bother re-computing within 10 ms */
+	if (time_before(jiffies, s->jiffies + HZ/100))
+		return;
+
+	rdmsrl(MSR_IA32_APERF, aperf);
+	rdmsrl(MSR_IA32_MPERF, mperf);
+
+	aperf_delta = aperf - s->aperf;
+	mperf_delta = mperf - s->mperf;
+
+	/*
+	 * There is no architectural guarantee that MPERF
+	 * increments faster than we can read it.
+	 */
+	if (mperf_delta == 0)
+		return;
+
+	/*
+	 * if (cpu_khz * aperf_delta) fits into ULLONG_MAX, then
+	 *	khz = (cpu_khz * aperf_delta) / mperf_delta
+	 */
+	if (div64_u64(ULLONG_MAX, cpu_khz) > aperf_delta)
+		s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
+	else	/* khz = aperf_delta / (mperf_delta / cpu_khz) */
+		s->khz = div64_u64(aperf_delta,
+			div64_u64(mperf_delta, cpu_khz));
+	s->jiffies = jiffies;
+	s->aperf = aperf;
+	s->mperf = mperf;
+}
+
+unsigned int arch_freq_get_on_cpu(int cpu)
+{
+	if (!cpu_khz)
+		return 0;
+
+	if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+		return 0;
+
+	smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+
+	return per_cpu(samples.khz, cpu);
+}
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index a70fd61..6f07744 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -255,6 +255,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
 		break;
 
 	case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
+	case 11: /* GX1 with inverted Device ID */
 #ifdef CONFIG_PCI
 	{
 		u32 vendor, device;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index f5af0cc..9257bd9 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -856,11 +856,13 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
 	dentry = kernfs_mount(fs_type, flags, rdt_root,
 			      RDTGROUP_SUPER_MAGIC, NULL);
 	if (IS_ERR(dentry))
-		goto out_cdp;
+		goto out_destroy;
 
 	static_branch_enable(&rdt_enable_key);
 	goto out;
 
+out_destroy:
+	kernfs_remove(kn_info);
 out_cdp:
 	cdp_disable();
 out:
diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
index 9c632cb8..10cec43 100644
--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
@@ -17,6 +17,8 @@
 
 #include "mce-internal.h"
 
+static BLOCKING_NOTIFIER_HEAD(mce_injector_chain);
+
 static DEFINE_MUTEX(mce_chrdev_read_mutex);
 
 static char mce_helper[128];
@@ -345,24 +347,49 @@ static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
 	}
 }
 
-static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
-			    size_t usize, loff_t *off);
-
-void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
-			     const char __user *ubuf,
-			     size_t usize, loff_t *off))
+void mce_register_injector_chain(struct notifier_block *nb)
 {
-	mce_write = fn;
+	blocking_notifier_chain_register(&mce_injector_chain, nb);
 }
-EXPORT_SYMBOL_GPL(register_mce_write_callback);
+EXPORT_SYMBOL_GPL(mce_register_injector_chain);
+
+void mce_unregister_injector_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&mce_injector_chain, nb);
+}
+EXPORT_SYMBOL_GPL(mce_unregister_injector_chain);
 
 static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
 				size_t usize, loff_t *off)
 {
-	if (mce_write)
-		return mce_write(filp, ubuf, usize, off);
-	else
+	struct mce m;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	/*
+	 * There are some cases where real MSR reads could slip
+	 * through.
+	 */
+	if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA))
+		return -EIO;
+
+	if ((unsigned long)usize > sizeof(struct mce))
+		usize = sizeof(struct mce);
+	if (copy_from_user(&m, ubuf, usize))
+		return -EFAULT;
+
+	if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu))
 		return -EINVAL;
+
+	/*
+	 * Need to give user space some time to set everything up,
+	 * so do it a jiffie or two later everywhere.
+	 */
+	schedule_timeout(2);
+
+	blocking_notifier_call_chain(&mce_injector_chain, 0, &m);
+
+	return usize;
 }
 
 static const struct file_operations mce_chrdev_ops = {
@@ -388,9 +415,15 @@ static __init int dev_mcelog_init_device(void)
 	/* register character device /dev/mcelog */
 	err = misc_register(&mce_chrdev_device);
 	if (err) {
-		pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
+		if (err == -EBUSY)
+			/* Xen dom0 might have registered the device already. */
+			pr_info("Unable to init device /dev/mcelog, already registered");
+		else
+			pr_err("Unable to init device /dev/mcelog (rc: %d)\n", err);
+
 		return err;
 	}
+
 	mce_register_decode_chain(&dev_mcelog_nb);
 	return 0;
 }
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 99165b2..231ad23 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -10,23 +10,105 @@
  * Authors:
  * Andi Kleen
  * Ying Huang
+ *
+ * The AMD part (from mce_amd_inj.c): a simple MCE injection facility
+ * for testing different aspects of the RAS code. This driver should be
+ * built as module so that it can be loaded on production kernels for
+ * testing purposes.
+ *
+ * This file may be distributed under the terms of the GNU General Public
+ * License version 2.
+ *
+ * Copyright (c) 2010-17:  Borislav Petkov <bp@alien8.de>
+ *			   Advanced Micro Devices Inc.
  */
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/preempt.h>
-#include <linux/smp.h>
-#include <linux/notifier.h>
-#include <linux/kdebug.h>
+
 #include <linux/cpu.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include <asm/mce.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+
+#include <asm/amd_nb.h>
 #include <asm/apic.h>
+#include <asm/irq_vectors.h>
+#include <asm/mce.h>
 #include <asm/nmi.h>
+#include <asm/smp.h>
+
+#include "mce-internal.h"
+
+/*
+ * Collect all the MCi_XXX settings
+ */
+static struct mce i_mce;
+static struct dentry *dfs_inj;
+
+static u8 n_banks;
+
+#define MAX_FLAG_OPT_SIZE	3
+#define NBCFG			0x44
+
+enum injection_type {
+	SW_INJ = 0,	/* SW injection, simply decode the error */
+	HW_INJ,		/* Trigger a #MC */
+	DFR_INT_INJ,    /* Trigger Deferred error interrupt */
+	THR_INT_INJ,    /* Trigger threshold interrupt */
+	N_INJ_TYPES,
+};
+
+static const char * const flags_options[] = {
+	[SW_INJ] = "sw",
+	[HW_INJ] = "hw",
+	[DFR_INT_INJ] = "df",
+	[THR_INT_INJ] = "th",
+	NULL
+};
+
+/* Set default injection to SW_INJ */
+static enum injection_type inj_type = SW_INJ;
+
+#define MCE_INJECT_SET(reg)						\
+static int inj_##reg##_set(void *data, u64 val)				\
+{									\
+	struct mce *m = (struct mce *)data;				\
+									\
+	m->reg = val;							\
+	return 0;							\
+}
+
+MCE_INJECT_SET(status);
+MCE_INJECT_SET(misc);
+MCE_INJECT_SET(addr);
+MCE_INJECT_SET(synd);
+
+#define MCE_INJECT_GET(reg)						\
+static int inj_##reg##_get(void *data, u64 *val)			\
+{									\
+	struct mce *m = (struct mce *)data;				\
+									\
+	*val = m->reg;							\
+	return 0;							\
+}
+
+MCE_INJECT_GET(status);
+MCE_INJECT_GET(misc);
+MCE_INJECT_GET(addr);
+MCE_INJECT_GET(synd);
+
+DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(synd_fops, inj_synd_get, inj_synd_set, "%llx\n");
+
+static void setup_inj_struct(struct mce *m)
+{
+	memset(m, 0, sizeof(struct mce));
+
+	m->cpuvendor = boot_cpu_data.x86_vendor;
+}
 
 /* Update fake mce registers on current CPU. */
 static void inject_mce(struct mce *m)
@@ -143,7 +225,7 @@ static int raise_local(void)
 	return ret;
 }
 
-static void raise_mce(struct mce *m)
+static void __maybe_unused raise_mce(struct mce *m)
 {
 	int context = MCJ_CTX(m->inject_flags);
 
@@ -198,55 +280,454 @@ static void raise_mce(struct mce *m)
 	}
 }
 
-/* Error injection interface */
-static ssize_t mce_write(struct file *filp, const char __user *ubuf,
-			 size_t usize, loff_t *off)
+static int mce_inject_raise(struct notifier_block *nb, unsigned long val,
+			    void *data)
 {
-	struct mce m;
+	struct mce *m = (struct mce *)data;
 
-	if (!capable(CAP_SYS_ADMIN))
-		return -EPERM;
-	/*
-	 * There are some cases where real MSR reads could slip
-	 * through.
-	 */
-	if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA))
-		return -EIO;
-
-	if ((unsigned long)usize > sizeof(struct mce))
-		usize = sizeof(struct mce);
-	if (copy_from_user(&m, ubuf, usize))
-		return -EFAULT;
-
-	if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu))
-		return -EINVAL;
-
-	/*
-	 * Need to give user space some time to set everything up,
-	 * so do it a jiffie or two later everywhere.
-	 */
-	schedule_timeout(2);
+	if (!m)
+		return NOTIFY_DONE;
 
 	mutex_lock(&mce_inject_mutex);
-	raise_mce(&m);
+	raise_mce(m);
 	mutex_unlock(&mce_inject_mutex);
-	return usize;
+
+	return NOTIFY_DONE;
 }
 
-static int inject_init(void)
+static struct notifier_block inject_nb = {
+	.notifier_call  = mce_inject_raise,
+};
+
+/*
+ * Caller needs to be make sure this cpu doesn't disappear
+ * from under us, i.e.: get_cpu/put_cpu.
+ */
+static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
 {
-	if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
-		return -ENOMEM;
-	pr_info("Machine check injector initialized\n");
-	register_mce_write_callback(mce_write);
-	register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
-				"mce_notify");
+	u32 l, h;
+	int err;
+
+	err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
+	if (err) {
+		pr_err("%s: error reading HWCR\n", __func__);
+		return err;
+	}
+
+	enable ? (l |= BIT(18)) : (l &= ~BIT(18));
+
+	err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
+	if (err)
+		pr_err("%s: error writing HWCR\n", __func__);
+
+	return err;
+}
+
+static int __set_inj(const char *buf)
+{
+	int i;
+
+	for (i = 0; i < N_INJ_TYPES; i++) {
+		if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) {
+			inj_type = i;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static ssize_t flags_read(struct file *filp, char __user *ubuf,
+			  size_t cnt, loff_t *ppos)
+{
+	char buf[MAX_FLAG_OPT_SIZE];
+	int n;
+
+	n = sprintf(buf, "%s\n", flags_options[inj_type]);
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
+}
+
+static ssize_t flags_write(struct file *filp, const char __user *ubuf,
+			   size_t cnt, loff_t *ppos)
+{
+	char buf[MAX_FLAG_OPT_SIZE], *__buf;
+	int err;
+
+	if (cnt > MAX_FLAG_OPT_SIZE)
+		return -EINVAL;
+
+	if (copy_from_user(&buf, ubuf, cnt))
+		return -EFAULT;
+
+	buf[cnt - 1] = 0;
+
+	/* strip whitespace */
+	__buf = strstrip(buf);
+
+	err = __set_inj(__buf);
+	if (err) {
+		pr_err("%s: Invalid flags value: %s\n", __func__, __buf);
+		return err;
+	}
+
+	*ppos += cnt;
+
+	return cnt;
+}
+
+static const struct file_operations flags_fops = {
+	.read           = flags_read,
+	.write          = flags_write,
+	.llseek         = generic_file_llseek,
+};
+
+/*
+ * On which CPU to inject?
+ */
+MCE_INJECT_GET(extcpu);
+
+static int inj_extcpu_set(void *data, u64 val)
+{
+	struct mce *m = (struct mce *)data;
+
+	if (val >= nr_cpu_ids || !cpu_online(val)) {
+		pr_err("%s: Invalid CPU: %llu\n", __func__, val);
+		return -EINVAL;
+	}
+	m->extcpu = val;
 	return 0;
 }
 
-module_init(inject_init);
+DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n");
+
+static void trigger_mce(void *info)
+{
+	asm volatile("int $18");
+}
+
+static void trigger_dfr_int(void *info)
+{
+	asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR));
+}
+
+static void trigger_thr_int(void *info)
+{
+	asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR));
+}
+
+static u32 get_nbc_for_node(int node_id)
+{
+	struct cpuinfo_x86 *c = &boot_cpu_data;
+	u32 cores_per_node;
+
+	cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket();
+
+	return cores_per_node * node_id;
+}
+
+static void toggle_nb_mca_mst_cpu(u16 nid)
+{
+	struct amd_northbridge *nb;
+	struct pci_dev *F3;
+	u32 val;
+	int err;
+
+	nb = node_to_amd_nb(nid);
+	if (!nb)
+		return;
+
+	F3 = nb->misc;
+	if (!F3)
+		return;
+
+	err = pci_read_config_dword(F3, NBCFG, &val);
+	if (err) {
+		pr_err("%s: Error reading F%dx%03x.\n",
+		       __func__, PCI_FUNC(F3->devfn), NBCFG);
+		return;
+	}
+
+	if (val & BIT(27))
+		return;
+
+	pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n",
+	       __func__);
+
+	val |= BIT(27);
+	err = pci_write_config_dword(F3, NBCFG, val);
+	if (err)
+		pr_err("%s: Error writing F%dx%03x.\n",
+		       __func__, PCI_FUNC(F3->devfn), NBCFG);
+}
+
+static void prepare_msrs(void *info)
+{
+	struct mce m = *(struct mce *)info;
+	u8 b = m.bank;
+
+	wrmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
+
+	if (boot_cpu_has(X86_FEATURE_SMCA)) {
+		if (m.inject_flags == DFR_INT_INJ) {
+			wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status);
+			wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr);
+		} else {
+			wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), m.status);
+			wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
+		}
+
+		wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
+		wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
+	} else {
+		wrmsrl(MSR_IA32_MCx_STATUS(b), m.status);
+		wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr);
+		wrmsrl(MSR_IA32_MCx_MISC(b), m.misc);
+	}
+}
+
+static void do_inject(void)
+{
+	u64 mcg_status = 0;
+	unsigned int cpu = i_mce.extcpu;
+	u8 b = i_mce.bank;
+
+	rdtscll(i_mce.tsc);
+
+	if (i_mce.misc)
+		i_mce.status |= MCI_STATUS_MISCV;
+
+	if (i_mce.synd)
+		i_mce.status |= MCI_STATUS_SYNDV;
+
+	if (inj_type == SW_INJ) {
+		mce_inject_log(&i_mce);
+		return;
+	}
+
+	/* prep MCE global settings for the injection */
+	mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
+
+	if (!(i_mce.status & MCI_STATUS_PCC))
+		mcg_status |= MCG_STATUS_RIPV;
+
+	/*
+	 * Ensure necessary status bits for deferred errors:
+	 * - MCx_STATUS[Deferred]: make sure it is a deferred error
+	 * - MCx_STATUS[UC] cleared: deferred errors are _not_ UC
+	 */
+	if (inj_type == DFR_INT_INJ) {
+		i_mce.status |= MCI_STATUS_DEFERRED;
+		i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
+	}
+
+	/*
+	 * For multi node CPUs, logging and reporting of bank 4 errors happens
+	 * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
+	 * Fam10h and later BKDGs.
+	 */
+	if (static_cpu_has(X86_FEATURE_AMD_DCM) &&
+	    b == 4 &&
+	    boot_cpu_data.x86 < 0x17) {
+		toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
+		cpu = get_nbc_for_node(amd_get_nb_id(cpu));
+	}
+
+	get_online_cpus();
+	if (!cpu_online(cpu))
+		goto err;
+
+	toggle_hw_mce_inject(cpu, true);
+
+	i_mce.mcgstatus = mcg_status;
+	i_mce.inject_flags = inj_type;
+	smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
+
+	toggle_hw_mce_inject(cpu, false);
+
+	switch (inj_type) {
+	case DFR_INT_INJ:
+		smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
+		break;
+	case THR_INT_INJ:
+		smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
+		break;
+	default:
+		smp_call_function_single(cpu, trigger_mce, NULL, 0);
+	}
+
+err:
+	put_online_cpus();
+
+}
+
 /*
- * Cannot tolerate unloading currently because we cannot
- * guarantee all openers of mce_chrdev will get a reference to us.
+ * This denotes into which bank we're injecting and triggers
+ * the injection, at the same time.
  */
+static int inj_bank_set(void *data, u64 val)
+{
+	struct mce *m = (struct mce *)data;
+
+	if (val >= n_banks) {
+		pr_err("Non-existent MCE bank: %llu\n", val);
+		return -EINVAL;
+	}
+
+	m->bank = val;
+	do_inject();
+
+	return 0;
+}
+
+MCE_INJECT_GET(bank);
+
+DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
+
+static const char readme_msg[] =
+"Description of the files and their usages:\n"
+"\n"
+"Note1: i refers to the bank number below.\n"
+"Note2: See respective BKDGs for the exact bit definitions of the files below\n"
+"as they mirror the hardware registers.\n"
+"\n"
+"status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n"
+"\t attributes of the error which caused the MCE.\n"
+"\n"
+"misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n"
+"\t used for error thresholding purposes and its validity is indicated by\n"
+"\t MCi_STATUS[MiscV].\n"
+"\n"
+"synd:\t Set MCi_SYND: provide syndrome info about the error. Only valid on\n"
+"\t Scalable MCA systems, and its validity is indicated by MCi_STATUS[SyndV].\n"
+"\n"
+"addr:\t Error address value to be written to MCi_ADDR. Log address information\n"
+"\t associated with the error.\n"
+"\n"
+"cpu:\t The CPU to inject the error on.\n"
+"\n"
+"bank:\t Specify the bank you want to inject the error into: the number of\n"
+"\t banks in a processor varies and is family/model-specific, therefore, the\n"
+"\t supplied value is sanity-checked. Setting the bank value also triggers the\n"
+"\t injection.\n"
+"\n"
+"flags:\t Injection type to be performed. Writing to this file will trigger a\n"
+"\t real machine check, an APIC interrupt or invoke the error decoder routines\n"
+"\t for AMD processors.\n"
+"\n"
+"\t Allowed error injection types:\n"
+"\t  - \"sw\": Software error injection. Decode error to a human-readable \n"
+"\t    format only. Safe to use.\n"
+"\t  - \"hw\": Hardware error injection. Causes the #MC exception handler to \n"
+"\t    handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
+"\t    is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
+"\t    before injecting.\n"
+"\t  - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n"
+"\t    error APIC interrupt handler to handle the error if the feature is \n"
+"\t    is present in hardware. \n"
+"\t  - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
+"\t    APIC interrupt handler to handle the error. \n"
+"\n";
+
+static ssize_t
+inj_readme_read(struct file *filp, char __user *ubuf,
+		       size_t cnt, loff_t *ppos)
+{
+	return simple_read_from_buffer(ubuf, cnt, ppos,
+					readme_msg, strlen(readme_msg));
+}
+
+static const struct file_operations readme_fops = {
+	.read		= inj_readme_read,
+};
+
+static struct dfs_node {
+	char *name;
+	struct dentry *d;
+	const struct file_operations *fops;
+	umode_t perm;
+} dfs_fls[] = {
+	{ .name = "status",	.fops = &status_fops, .perm = S_IRUSR | S_IWUSR },
+	{ .name = "misc",	.fops = &misc_fops,   .perm = S_IRUSR | S_IWUSR },
+	{ .name = "addr",	.fops = &addr_fops,   .perm = S_IRUSR | S_IWUSR },
+	{ .name = "synd",	.fops = &synd_fops,   .perm = S_IRUSR | S_IWUSR },
+	{ .name = "bank",	.fops = &bank_fops,   .perm = S_IRUSR | S_IWUSR },
+	{ .name = "flags",	.fops = &flags_fops,  .perm = S_IRUSR | S_IWUSR },
+	{ .name = "cpu",	.fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
+	{ .name = "README",	.fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH },
+};
+
+static int __init debugfs_init(void)
+{
+	unsigned int i;
+	u64 cap;
+
+	rdmsrl(MSR_IA32_MCG_CAP, cap);
+	n_banks = cap & MCG_BANKCNT_MASK;
+
+	dfs_inj = debugfs_create_dir("mce-inject", NULL);
+	if (!dfs_inj)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) {
+		dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name,
+						    dfs_fls[i].perm,
+						    dfs_inj,
+						    &i_mce,
+						    dfs_fls[i].fops);
+
+		if (!dfs_fls[i].d)
+			goto err_dfs_add;
+	}
+
+	return 0;
+
+err_dfs_add:
+	while (i-- > 0)
+		debugfs_remove(dfs_fls[i].d);
+
+	debugfs_remove(dfs_inj);
+	dfs_inj = NULL;
+
+	return -ENODEV;
+}
+
+static int __init inject_init(void)
+{
+	int err;
+
+	if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
+		return -ENOMEM;
+
+	err = debugfs_init();
+	if (err) {
+		free_cpumask_var(mce_inject_cpumask);
+		return err;
+	}
+
+	register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, "mce_notify");
+	mce_register_injector_chain(&inject_nb);
+
+	setup_inj_struct(&i_mce);
+
+	pr_info("Machine check injector initialized\n");
+
+	return 0;
+}
+
+static void __exit inject_exit(void)
+{
+
+	mce_unregister_injector_chain(&inject_nb);
+	unregister_nmi_handler(NMI_LOCAL, "mce_notify");
+
+	debugfs_remove_recursive(dfs_inj);
+	dfs_inj = NULL;
+
+	memset(&dfs_fls, 0, sizeof(dfs_fls));
+
+	free_cpumask_var(mce_inject_cpumask);
+}
+
+module_init(inject_init);
+module_exit(inject_exit);
 MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 654ad06..098530a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -100,7 +100,11 @@ static inline bool mce_cmp(struct mce *m1, struct mce *m2)
 extern struct device_attribute dev_attr_trigger;
 
 #ifdef CONFIG_X86_MCELOG_LEGACY
-extern void mce_work_trigger(void);
+void mce_work_trigger(void);
+void mce_register_injector_chain(struct notifier_block *nb);
+void mce_unregister_injector_chain(struct notifier_block *nb);
 #else
 static inline void mce_work_trigger(void)	{ }
+static inline void mce_register_injector_chain(struct notifier_block *nb)	{ }
+static inline void mce_unregister_injector_chain(struct notifier_block *nb)	{ }
 #endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5cfbaeb..6dde049 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -673,7 +673,6 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 {
 	bool error_seen = false;
 	struct mce m;
-	int severity;
 	int i;
 
 	this_cpu_inc(mce_poll_count);
@@ -710,11 +709,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 
 		mce_read_aux(&m, i);
 
-		severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
-
-		if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
-			if (m.status & MCI_STATUS_ADDRV)
-				m.severity = severity;
+		m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
 
 		/*
 		 * Don't get the IP here because it's unlikely to
@@ -1550,7 +1545,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
 			 */
 			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
 		}
-		if (c->x86 < 17 && cfg->bootlog < 0) {
+		if (c->x86 < 0x11 && cfg->bootlog < 0) {
 			/*
 			 * Lots of broken BIOS around that don't clear them
 			 * by default and leave crap in there. Don't log:
@@ -1832,7 +1827,8 @@ void mce_disable_bank(int bank)
  * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
  *	monarchtimeout is how long to wait for other CPUs on machine
  *	check, or 0 to not wait
- * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
+ * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
+	and older.
  * mce=nobootlog Don't log MCEs from before booting.
  * mce=bios_cmci_threshold Don't program the CMCI threshold
  * mce=recovery force enable memcpy_mcsafe()
@@ -1912,12 +1908,13 @@ static void mce_disable_error_reporting(void)
 static void vendor_disable_error_reporting(void)
 {
 	/*
-	 * Don't clear on Intel CPUs. Some of these MSRs are socket-wide.
+	 * Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide.
 	 * Disabling them for just a single offlined CPU is bad, since it will
 	 * inhibit reporting for all shared resources on the socket like the
 	 * last level cache (LLC), the integrated memory controller (iMC), etc.
 	 */
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+	    boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
 		return;
 
 	mce_disable_error_reporting();
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 6e4a047..9e314bc 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -164,17 +164,48 @@ static void default_deferred_error_interrupt(void)
 }
 void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
 
-static void get_smca_bank_info(unsigned int bank)
+static void smca_configure(unsigned int bank, unsigned int cpu)
 {
-	unsigned int i, hwid_mcatype, cpu = smp_processor_id();
+	unsigned int i, hwid_mcatype;
 	struct smca_hwid *s_hwid;
-	u32 high, instance_id;
+	u32 high, low;
+	u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank);
+
+	/* Set appropriate bits in MCA_CONFIG */
+	if (!rdmsr_safe(smca_config, &low, &high)) {
+		/*
+		 * OS is required to set the MCAX bit to acknowledge that it is
+		 * now using the new MSR ranges and new registers under each
+		 * bank. It also means that the OS will configure deferred
+		 * errors in the new MCx_CONFIG register. If the bit is not set,
+		 * uncorrectable errors will cause a system panic.
+		 *
+		 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
+		 */
+		high |= BIT(0);
+
+		/*
+		 * SMCA sets the Deferred Error Interrupt type per bank.
+		 *
+		 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
+		 * if the DeferredIntType bit field is available.
+		 *
+		 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
+		 * high portion of the MSR). OS should set this to 0x1 to enable
+		 * APIC based interrupt. First, check that no interrupt has been
+		 * set.
+		 */
+		if ((low & BIT(5)) && !((high >> 5) & 0x3))
+			high |= BIT(5);
+
+		wrmsr(smca_config, low, high);
+	}
 
 	/* Collect bank_info using CPU 0 for now. */
 	if (cpu)
 		return;
 
-	if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &instance_id, &high)) {
+	if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
 		pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
 		return;
 	}
@@ -191,7 +222,7 @@ static void get_smca_bank_info(unsigned int bank)
 			     smca_get_name(s_hwid->bank_type));
 
 			smca_banks[bank].hwid = s_hwid;
-			smca_banks[bank].id = instance_id;
+			smca_banks[bank].id = low;
 			smca_banks[bank].sysfs_id = s_hwid->count++;
 			break;
 		}
@@ -433,7 +464,7 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
 			int offset, u32 misc_high)
 {
 	unsigned int cpu = smp_processor_id();
-	u32 smca_low, smca_high, smca_addr;
+	u32 smca_low, smca_high;
 	struct threshold_block b;
 	int new;
 
@@ -457,51 +488,6 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
 		goto set_offset;
 	}
 
-	smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
-
-	if (!rdmsr_safe(smca_addr, &smca_low, &smca_high)) {
-		/*
-		 * OS is required to set the MCAX bit to acknowledge that it is
-		 * now using the new MSR ranges and new registers under each
-		 * bank. It also means that the OS will configure deferred
-		 * errors in the new MCx_CONFIG register. If the bit is not set,
-		 * uncorrectable errors will cause a system panic.
-		 *
-		 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
-		 */
-		smca_high |= BIT(0);
-
-		/*
-		 * SMCA logs Deferred Error information in MCA_DE{STAT,ADDR}
-		 * registers with the option of additionally logging to
-		 * MCA_{STATUS,ADDR} if MCA_CONFIG[LogDeferredInMcaStat] is set.
-		 *
-		 * This bit is usually set by BIOS to retain the old behavior
-		 * for OSes that don't use the new registers. Linux supports the
-		 * new registers so let's disable that additional logging here.
-		 *
-		 * MCA_CONFIG[LogDeferredInMcaStat] is bit 34 (bit 2 in the high
-		 * portion of the MSR).
-		 */
-		smca_high &= ~BIT(2);
-
-		/*
-		 * SMCA sets the Deferred Error Interrupt type per bank.
-		 *
-		 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
-		 * if the DeferredIntType bit field is available.
-		 *
-		 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
-		 * high portion of the MSR). OS should set this to 0x1 to enable
-		 * APIC based interrupt. First, check that no interrupt has been
-		 * set.
-		 */
-		if ((smca_low & BIT(5)) && !((smca_high >> 5) & 0x3))
-			smca_high |= BIT(5);
-
-		wrmsr(smca_addr, smca_low, smca_high);
-	}
-
 	/* Gather LVT offset for thresholding: */
 	if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
 		goto out;
@@ -530,7 +516,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
 
 	for (bank = 0; bank < mca_cfg.banks; ++bank) {
 		if (mce_flags.smca)
-			get_smca_bank_info(bank);
+			smca_configure(bank, cpu);
 
 		for (block = 0; block < NR_BLOCKS; ++block) {
 			address = get_block_address(cpu, address, low, high, bank, block);
@@ -755,37 +741,19 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
 }
 EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);
 
-static void
-__log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
+static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
 {
-	u32 msr_status = msr_ops.status(bank);
-	u32 msr_addr = msr_ops.addr(bank);
 	struct mce m;
-	u64 status;
-
-	WARN_ON_ONCE(deferred_err && threshold_err);
-
-	if (deferred_err && mce_flags.smca) {
-		msr_status = MSR_AMD64_SMCA_MCx_DESTAT(bank);
-		msr_addr = MSR_AMD64_SMCA_MCx_DEADDR(bank);
-	}
-
-	rdmsrl(msr_status, status);
-
-	if (!(status & MCI_STATUS_VAL))
-		return;
 
 	mce_setup(&m);
 
 	m.status = status;
+	m.misc   = misc;
 	m.bank   = bank;
 	m.tsc	 = rdtsc();
 
-	if (threshold_err)
-		m.misc = misc;
-
 	if (m.status & MCI_STATUS_ADDRV) {
-		rdmsrl(msr_addr, m.addr);
+		m.addr = addr;
 
 		/*
 		 * Extract [55:<lsb>] where lsb is the least significant
@@ -806,8 +774,6 @@ __log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
 	}
 
 	mce_log(&m);
-
-	wrmsrl(msr_status, 0);
 }
 
 static inline void __smp_deferred_error_interrupt(void)
@@ -832,87 +798,126 @@ asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
 	exiting_ack_irq();
 }
 
+/*
+ * Returns true if the logged error is deferred. False, otherwise.
+ */
+static inline bool
+_log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
+{
+	u64 status, addr = 0;
+
+	rdmsrl(msr_stat, status);
+	if (!(status & MCI_STATUS_VAL))
+		return false;
+
+	if (status & MCI_STATUS_ADDRV)
+		rdmsrl(msr_addr, addr);
+
+	__log_error(bank, status, addr, misc);
+
+	wrmsrl(msr_stat, 0);
+
+	return status & MCI_STATUS_DEFERRED;
+}
+
+/*
+ * We have three scenarios for checking for Deferred errors:
+ *
+ * 1) Non-SMCA systems check MCA_STATUS and log error if found.
+ * 2) SMCA systems check MCA_STATUS. If error is found then log it and also
+ *    clear MCA_DESTAT.
+ * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and
+ *    log it.
+ */
+static void log_error_deferred(unsigned int bank)
+{
+	bool defrd;
+
+	defrd = _log_error_bank(bank, msr_ops.status(bank),
+					msr_ops.addr(bank), 0);
+
+	if (!mce_flags.smca)
+		return;
+
+	/* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */
+	if (defrd) {
+		wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
+		return;
+	}
+
+	/*
+	 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check
+	 * for a valid error.
+	 */
+	_log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank),
+			      MSR_AMD64_SMCA_MCx_DEADDR(bank), 0);
+}
+
 /* APIC interrupt handler for deferred errors */
 static void amd_deferred_error_interrupt(void)
 {
 	unsigned int bank;
-	u32 msr_status;
-	u64 status;
 
-	for (bank = 0; bank < mca_cfg.banks; ++bank) {
-		msr_status = (mce_flags.smca) ? MSR_AMD64_SMCA_MCx_DESTAT(bank)
-					      : msr_ops.status(bank);
-
-		rdmsrl(msr_status, status);
-
-		if (!(status & MCI_STATUS_VAL) ||
-		    !(status & MCI_STATUS_DEFERRED))
-			continue;
-
-		__log_error(bank, true, false, 0);
-		break;
-	}
+	for (bank = 0; bank < mca_cfg.banks; ++bank)
+		log_error_deferred(bank);
 }
 
-/*
- * APIC Interrupt Handler
- */
-
-/*
- * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
- * the interrupt goes off when error_count reaches threshold_limit.
- * the handler will simply log mcelog w/ software defined bank number.
- */
-
-static void amd_threshold_interrupt(void)
+static void log_error_thresholding(unsigned int bank, u64 misc)
 {
-	u32 low = 0, high = 0, address = 0;
-	unsigned int bank, block, cpu = smp_processor_id();
+	_log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc);
+}
+
+static void log_and_reset_block(struct threshold_block *block)
+{
 	struct thresh_restart tr;
+	u32 low = 0, high = 0;
 
-	/* assume first bank caused it */
-	for (bank = 0; bank < mca_cfg.banks; ++bank) {
-		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
-			continue;
-		for (block = 0; block < NR_BLOCKS; ++block) {
-			address = get_block_address(cpu, address, low, high, bank, block);
-			if (!address)
-				break;
+	if (!block)
+		return;
 
-			if (rdmsr_safe(address, &low, &high))
-				break;
+	if (rdmsr_safe(block->address, &low, &high))
+		return;
 
-			if (!(high & MASK_VALID_HI)) {
-				if (block)
-					continue;
-				else
-					break;
-			}
+	if (!(high & MASK_OVERFLOW_HI))
+		return;
 
-			if (!(high & MASK_CNTP_HI)  ||
-			     (high & MASK_LOCKED_HI))
-				continue;
-
-			/*
-			 * Log the machine check that caused the threshold
-			 * event.
-			 */
-			if (high & MASK_OVERFLOW_HI)
-				goto log;
-		}
-	}
-	return;
-
-log:
-	__log_error(bank, false, true, ((u64)high << 32) | low);
+	/* Log the MCE which caused the threshold event. */
+	log_error_thresholding(block->bank, ((u64)high << 32) | low);
 
 	/* Reset threshold block after logging error. */
 	memset(&tr, 0, sizeof(tr));
-	tr.b = &per_cpu(threshold_banks, cpu)[bank]->blocks[block];
+	tr.b = block;
 	threshold_restart_bank(&tr);
 }
 
 /*
+ * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt
+ * goes off when error_count reaches threshold_limit.
+ */
+static void amd_threshold_interrupt(void)
+{
+	struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
+	unsigned int bank, cpu = smp_processor_id();
+
+	for (bank = 0; bank < mca_cfg.banks; ++bank) {
+		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+			continue;
+
+		first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
+		if (!first_block)
+			continue;
+
+		/*
+		 * The first block is also the head of the list. Check it first
+		 * before iterating over the rest.
+		 */
+		log_and_reset_block(first_block);
+		list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj)
+			log_and_reset_block(block);
+	}
+}
+
+/*
  * Sysfs Interface
  */
 
@@ -1202,7 +1207,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
 				goto out;
 
 			per_cpu(threshold_banks, cpu)[bank] = b;
-			atomic_inc(&b->cpus);
+			refcount_inc(&b->cpus);
 
 			err = __threshold_add_blocks(b);
 
@@ -1225,7 +1230,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
 	per_cpu(threshold_banks, cpu)[bank] = b;
 
 	if (is_shared_bank(bank)) {
-		atomic_set(&b->cpus, 1);
+		refcount_set(&b->cpus, 1);
 
 		/* nb is already initialized, see above */
 		if (nb) {
@@ -1289,7 +1294,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 		goto free_out;
 
 	if (is_shared_bank(bank)) {
-		if (!atomic_dec_and_test(&b->cpus)) {
+		if (!refcount_dec_and_test(&b->cpus)) {
 			__threshold_remove_blocks(b);
 			per_cpu(threshold_banks, cpu)[bank] = NULL;
 			return;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 45db4d2..21b1857 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -251,7 +251,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
 #endif
 }
 
-void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
+static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
 {
 	struct ucode_cpu_info *uci;
 	struct cpio_data cp;
@@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
 
 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
 {
@@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
 	if (!desc.mc)
 		return -EINVAL;
 
-	ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
-				 desc.data, desc.size);
+	ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
 	if (ret != UCODE_OK)
 		return -EINVAL;
 
@@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
 }
 
 static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
 {
 	enum ucode_state ret;
 
@@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
 
 #ifdef CONFIG_X86_32
 	/* save BSP's matching patch for early load */
-	if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
-		struct ucode_patch *p = find_patch(cpu);
+	if (save) {
+		struct ucode_patch *p = find_patch(0);
 		if (p) {
 			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
 			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
@@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
 {
 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
+	bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
 	enum ucode_state ret = UCODE_NFOUND;
 	const struct firmware *fw;
 
 	/* reload ucode container only on the boot cpu */
-	if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
+	if (!refresh_fw || !bsp)
 		return UCODE_OK;
 
 	if (c->x86 >= 0x15)
@@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
 		goto fw_release;
 	}
 
-	ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
+	ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
 
  fw_release:
 	release_firmware(fw);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index e53d3c9..9cb98ee 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -290,6 +290,17 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
 			return (struct cpio_data){ NULL, 0, "" };
 		if (initrd_start)
 			start = initrd_start;
+	} else {
+		/*
+		 * The picture with physical addresses is a bit different: we
+		 * need to get the *physical* address to which the ramdisk was
+		 * relocated, i.e., relocated_ramdisk (not initrd_start) and
+		 * since we're running from physical addresses, we need to access
+		 * relocated_ramdisk through its *physical* address too.
+		 */
+		u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
+		if (*rr)
+			start = *rr;
 	}
 
 	return find_cpio_data(path, (void *)start, size, NULL);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index afdfd23..59edbe9 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -42,7 +42,7 @@
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 
 /* Current microcode patch used in early patching on the APs. */
-struct microcode_intel *intel_ucode_patch;
+static struct microcode_intel *intel_ucode_patch;
 
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
 					unsigned int s2, unsigned int p2)
@@ -166,7 +166,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
 static void save_microcode_patch(void *data, unsigned int size)
 {
 	struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
-	struct ucode_patch *iter, *tmp, *p;
+	struct ucode_patch *iter, *tmp, *p = NULL;
 	bool prev_found = false;
 	unsigned int sig, pf;
 
@@ -202,6 +202,18 @@ static void save_microcode_patch(void *data, unsigned int size)
 		else
 			list_add_tail(&p->plist, &microcode_cache);
 	}
+
+	/*
+	 * Save for early loading. On 32-bit, that needs to be a physical
+	 * address as the APs are running from physical addresses, before
+	 * paging has been enabled.
+	 */
+	if (p) {
+		if (IS_ENABLED(CONFIG_X86_32))
+			intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
+		else
+			intel_ucode_patch = p->data;
+	}
 }
 
 static int microcode_sanity_check(void *mc, int print_err)
@@ -607,6 +619,14 @@ int __init save_microcode_in_initrd_intel(void)
 	struct ucode_cpu_info uci;
 	struct cpio_data cp;
 
+	/*
+	 * initrd is going away, clear patch ptr. We will scan the microcode one
+	 * last time before jettisoning and save a patch, if found. Then we will
+	 * update that pointer too, with a stable patch address to use when
+	 * resuming the cores.
+	 */
+	intel_ucode_patch = NULL;
+
 	if (!load_builtin_intel_microcode(&cp))
 		cp = find_microcode_in_initrd(ucode_path, false);
 
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 04cb8d3..70e717f 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -161,6 +161,15 @@ static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
 }
 #endif
 
+static unsigned long hv_get_tsc_khz(void)
+{
+	unsigned long freq;
+
+	rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
+
+	return freq / 1000;
+}
+
 static void __init ms_hyperv_init_platform(void)
 {
 	int hv_host_info_eax;
@@ -193,8 +202,15 @@ static void __init ms_hyperv_init_platform(void)
 			hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF);
 	}
 
+	if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
+	    ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
+		x86_platform.calibrate_tsc = hv_get_tsc_khz;
+		x86_platform.calibrate_cpu = hv_get_tsc_khz;
+	}
+
 #ifdef CONFIG_X86_LOCAL_APIC
-	if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
+	if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
+	    ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
 		/*
 		 * Get the APIC frequency.
 		 */
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 2bce84d..c5bb63b 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -807,10 +807,8 @@ void mtrr_save_state(void)
 	if (!mtrr_enabled())
 		return;
 
-	get_online_cpus();
 	first_cpu = cpumask_first(cpu_online_mask);
 	smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
-	put_online_cpus();
 }
 
 void set_mtrr_aps_delayed_init(void)
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 6df621a..218f798 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -2,7 +2,6 @@
 #include <linux/timex.h>
 #include <linux/string.h>
 #include <linux/seq_file.h>
-#include <linux/cpufreq.h>
 
 /*
  *	Get CPU information for use by the procfs.
@@ -76,14 +75,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 	if (c->microcode)
 		seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
 
-	if (cpu_has(c, X86_FEATURE_TSC)) {
-		unsigned int freq = cpufreq_quick_get(cpu);
-
-		if (!freq)
-			freq = cpu_khz;
+	if (cpu_has(c, X86_FEATURE_TSC))
 		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-			   freq / 1000, (freq % 1000));
-	}
+			   cpu_khz / 1000, (cpu_khz % 1000));
 
 	/* Cache size */
 	if (c->x86_cache_size >= 0)
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 8e598a1..6b91e2e 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -125,7 +125,7 @@ void __init init_espfix_bsp(void)
 	p4d_t *p4d;
 
 	/* Install the espfix pud into the kernel page directory */
-	pgd = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
+	pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
 	p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
 	p4d_populate(&init_mm, p4d, espfix_pud_page);
 
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 43b7002..46c3c73 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -33,17 +33,120 @@
 /*
  * Manage page tables very early on.
  */
-extern pgd_t early_level4_pgt[PTRS_PER_PGD];
+extern pgd_t early_top_pgt[PTRS_PER_PGD];
 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
-static unsigned int __initdata next_early_pgt = 2;
+static unsigned int __initdata next_early_pgt;
 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
 
+#define __head	__section(.head.text)
+
+static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
+{
+	return ptr - (void *)_text + (void *)physaddr;
+}
+
+void __head __startup_64(unsigned long physaddr)
+{
+	unsigned long load_delta, *p;
+	pgdval_t *pgd;
+	p4dval_t *p4d;
+	pudval_t *pud;
+	pmdval_t *pmd, pmd_entry;
+	int i;
+
+	/* Is the address too large? */
+	if (physaddr >> MAX_PHYSMEM_BITS)
+		for (;;);
+
+	/*
+	 * Compute the delta between the address I am compiled to run at
+	 * and the address I am actually running at.
+	 */
+	load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
+
+	/* Is the address not 2M aligned? */
+	if (load_delta & ~PMD_PAGE_MASK)
+		for (;;);
+
+	/* Fixup the physical addresses in the page table */
+
+	pgd = fixup_pointer(&early_top_pgt, physaddr);
+	pgd[pgd_index(__START_KERNEL_map)] += load_delta;
+
+	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+		p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
+		p4d[511] += load_delta;
+	}
+
+	pud = fixup_pointer(&level3_kernel_pgt, physaddr);
+	pud[510] += load_delta;
+	pud[511] += load_delta;
+
+	pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
+	pmd[506] += load_delta;
+
+	/*
+	 * Set up the identity mapping for the switchover.  These
+	 * entries should *NOT* have the global bit set!  This also
+	 * creates a bunch of nonsense entries but that is fine --
+	 * it avoids problems around wraparound.
+	 */
+
+	pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+	pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+
+	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+		p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+
+		i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
+		pgd[i + 0] = (pgdval_t)p4d + _KERNPG_TABLE;
+		pgd[i + 1] = (pgdval_t)p4d + _KERNPG_TABLE;
+
+		i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
+		p4d[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
+		p4d[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
+	} else {
+		i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
+		pgd[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
+		pgd[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
+	}
+
+	i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
+	pud[i + 0] = (pudval_t)pmd + _KERNPG_TABLE;
+	pud[i + 1] = (pudval_t)pmd + _KERNPG_TABLE;
+
+	pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
+	pmd_entry +=  physaddr;
+
+	for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
+		int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
+		pmd[idx] = pmd_entry + i * PMD_SIZE;
+	}
+
+	/*
+	 * Fixup the kernel text+data virtual addresses. Note that
+	 * we might write invalid pmds, when the kernel is relocated
+	 * cleanup_highmap() fixes this up along with the mappings
+	 * beyond _end.
+	 */
+
+	pmd = fixup_pointer(level2_kernel_pgt, physaddr);
+	for (i = 0; i < PTRS_PER_PMD; i++) {
+		if (pmd[i] & _PAGE_PRESENT)
+			pmd[i] += load_delta;
+	}
+
+	/* Fixup phys_base */
+	p = fixup_pointer(&phys_base, physaddr);
+	*p += load_delta;
+}
+
 /* Wipe all early page tables except for the kernel symbol map */
 static void __init reset_early_page_tables(void)
 {
-	memset(early_level4_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
+	memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
 	next_early_pgt = 0;
-	write_cr3(__pa_nodebug(early_level4_pgt));
+	write_cr3(__pa_nodebug(early_top_pgt));
 }
 
 /* Create a new PMD entry */
@@ -51,15 +154,16 @@ int __init early_make_pgtable(unsigned long address)
 {
 	unsigned long physaddr = address - __PAGE_OFFSET;
 	pgdval_t pgd, *pgd_p;
+	p4dval_t p4d, *p4d_p;
 	pudval_t pud, *pud_p;
 	pmdval_t pmd, *pmd_p;
 
 	/* Invalid address or early pgt is done ?  */
-	if (physaddr >= MAXMEM || read_cr3() != __pa_nodebug(early_level4_pgt))
+	if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
 		return -1;
 
 again:
-	pgd_p = &early_level4_pgt[pgd_index(address)].pgd;
+	pgd_p = &early_top_pgt[pgd_index(address)].pgd;
 	pgd = *pgd_p;
 
 	/*
@@ -67,8 +171,25 @@ int __init early_make_pgtable(unsigned long address)
 	 * critical -- __PAGE_OFFSET would point us back into the dynamic
 	 * range and we might end up looping forever...
 	 */
-	if (pgd)
-		pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
+	if (!IS_ENABLED(CONFIG_X86_5LEVEL))
+		p4d_p = pgd_p;
+	else if (pgd)
+		p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
+	else {
+		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
+			reset_early_page_tables();
+			goto again;
+		}
+
+		p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
+		memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
+		*pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
+	}
+	p4d_p += p4d_index(address);
+	p4d = *p4d_p;
+
+	if (p4d)
+		pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
 	else {
 		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
 			reset_early_page_tables();
@@ -77,7 +198,7 @@ int __init early_make_pgtable(unsigned long address)
 
 		pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
 		memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
-		*pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
+		*p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 	}
 	pud_p += pud_index(address);
 	pud = *pud_p;
@@ -156,7 +277,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
 
 	clear_bss();
 
-	clear_page(init_level4_pgt);
+	clear_page(init_top_pgt);
 
 	kasan_early_init();
 
@@ -171,8 +292,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
 	 */
 	load_ucode_bsp();
 
-	/* set init_level4_pgt kernel high mapping*/
-	init_level4_pgt[511] = early_level4_pgt[511];
+	/* set init_top_pgt kernel high mapping*/
+	init_top_pgt[511] = early_top_pgt[511];
 
 	x86_64_start_reservations(real_mode_data);
 }
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index ac9d327..6225550 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -37,10 +37,11 @@
  *
  */
 
+#define p4d_index(x)	(((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
 #define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 
-L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
-L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
+PGD_START_KERNEL = pgd_index(__START_KERNEL_map)
 L3_START_KERNEL = pud_index(__START_KERNEL_map)
 
 	.text
@@ -72,101 +73,12 @@
 	/* Sanitize CPU configuration */
 	call verify_cpu
 
-	/*
-	 * Compute the delta between the address I am compiled to run at and the
-	 * address I am actually running at.
-	 */
-	leaq	_text(%rip), %rbp
-	subq	$_text - __START_KERNEL_map, %rbp
-
-	/* Is the address not 2M aligned? */
-	testl	$~PMD_PAGE_MASK, %ebp
-	jnz	bad_address
-
-	/*
-	 * Is the address too large?
-	 */
-	leaq	_text(%rip), %rax
-	shrq	$MAX_PHYSMEM_BITS, %rax
-	jnz	bad_address
-
-	/*
-	 * Fixup the physical addresses in the page table
-	 */
-	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
-
-	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
-	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
-
-	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
-
-	/*
-	 * Set up the identity mapping for the switchover.  These
-	 * entries should *NOT* have the global bit set!  This also
-	 * creates a bunch of nonsense entries but that is fine --
-	 * it avoids problems around wraparound.
-	 */
 	leaq	_text(%rip), %rdi
-	leaq	early_level4_pgt(%rip), %rbx
+	pushq	%rsi
+	call	__startup_64
+	popq	%rsi
 
-	movq	%rdi, %rax
-	shrq	$PGDIR_SHIFT, %rax
-
-	leaq	(PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx
-	movq	%rdx, 0(%rbx,%rax,8)
-	movq	%rdx, 8(%rbx,%rax,8)
-
-	addq	$PAGE_SIZE, %rdx
-	movq	%rdi, %rax
-	shrq	$PUD_SHIFT, %rax
-	andl	$(PTRS_PER_PUD-1), %eax
-	movq	%rdx, PAGE_SIZE(%rbx,%rax,8)
-	incl	%eax
-	andl	$(PTRS_PER_PUD-1), %eax
-	movq	%rdx, PAGE_SIZE(%rbx,%rax,8)
-
-	addq	$PAGE_SIZE * 2, %rbx
-	movq	%rdi, %rax
-	shrq	$PMD_SHIFT, %rdi
-	addq	$(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
-	leaq	(_end - 1)(%rip), %rcx
-	shrq	$PMD_SHIFT, %rcx
-	subq	%rdi, %rcx
-	incl	%ecx
-
-1:
-	andq	$(PTRS_PER_PMD - 1), %rdi
-	movq	%rax, (%rbx,%rdi,8)
-	incq	%rdi
-	addq	$PMD_SIZE, %rax
-	decl	%ecx
-	jnz	1b
-
-	test %rbp, %rbp
-	jz .Lskip_fixup
-
-	/*
-	 * Fixup the kernel text+data virtual addresses. Note that
-	 * we might write invalid pmds, when the kernel is relocated
-	 * cleanup_highmap() fixes this up along with the mappings
-	 * beyond _end.
-	 */
-	leaq	level2_kernel_pgt(%rip), %rdi
-	leaq	PAGE_SIZE(%rdi), %r8
-	/* See if it is a valid page table entry */
-1:	testb	$_PAGE_PRESENT, 0(%rdi)
-	jz	2f
-	addq	%rbp, 0(%rdi)
-	/* Go to the next page */
-2:	addq	$8, %rdi
-	cmp	%r8, %rdi
-	jne	1b
-
-	/* Fixup phys_base */
-	addq	%rbp, phys_base(%rip)
-
-.Lskip_fixup:
-	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
+	movq	$(early_top_pgt - __START_KERNEL_map), %rax
 	jmp 1f
 ENTRY(secondary_startup_64)
 	/*
@@ -186,14 +98,17 @@
 	/* Sanitize CPU configuration */
 	call verify_cpu
 
-	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
+	movq	$(init_top_pgt - __START_KERNEL_map), %rax
 1:
 
-	/* Enable PAE mode and PGE */
+	/* Enable PAE mode, PGE and LA57 */
 	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
+#ifdef CONFIG_X86_5LEVEL
+	orl	$X86_CR4_LA57, %ecx
+#endif
 	movq	%rcx, %cr4
 
-	/* Setup early boot stage 4 level pagetables. */
+	/* Setup early boot stage 4-/5-level pagetables. */
 	addq	phys_base(%rip), %rax
 	movq	%rax, %cr3
 
@@ -417,9 +332,13 @@
 	.endr
 
 	__INITDATA
-NEXT_PAGE(early_level4_pgt)
+NEXT_PAGE(early_top_pgt)
 	.fill	511,8,0
+#ifdef CONFIG_X86_5LEVEL
+	.quad	level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+#else
 	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+#endif
 
 NEXT_PAGE(early_dynamic_pgts)
 	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -427,14 +346,14 @@
 	.data
 
 #ifndef CONFIG_XEN
-NEXT_PAGE(init_level4_pgt)
+NEXT_PAGE(init_top_pgt)
 	.fill	512,8,0
 #else
-NEXT_PAGE(init_level4_pgt)
+NEXT_PAGE(init_top_pgt)
 	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-	.org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
+	.org    init_top_pgt + PGD_PAGE_OFFSET*8, 0
 	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
-	.org    init_level4_pgt + L4_START_KERNEL*8, 0
+	.org    init_top_pgt + PGD_START_KERNEL*8, 0
 	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
 	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
 
@@ -448,6 +367,12 @@
 	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
 #endif
 
+#ifdef CONFIG_X86_5LEVEL
+NEXT_PAGE(level4_kernel_pgt)
+	.fill	511,8,0
+	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+#endif
+
 NEXT_PAGE(level3_kernel_pgt)
 	.fill	L3_START_KERNEL,8,0
 	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 89ff7af..16f82a3 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -285,7 +285,7 @@ static void hpet_legacy_clockevent_register(void)
 	 * Start hpet with the boot cpu mask and make it
 	 * global after the IO_APIC has been initialized.
 	 */
-	hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
+	hpet_clockevent.cpumask = cpumask_of(boot_cpu_data.cpu_index);
 	clockevents_config_and_register(&hpet_clockevent, hpet_freq,
 					HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
 	global_clock_event = &hpet_clockevent;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index f34fe74..4aa03c5 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -432,84 +432,12 @@ int check_irq_vectors_for_cpu_disable(void)
 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
 void fixup_irqs(void)
 {
-	unsigned int irq, vector;
-	static int warned;
+	unsigned int irr, vector;
 	struct irq_desc *desc;
 	struct irq_data *data;
 	struct irq_chip *chip;
-	int ret;
 
-	for_each_irq_desc(irq, desc) {
-		int break_affinity = 0;
-		int set_affinity = 1;
-		const struct cpumask *affinity;
-
-		if (!desc)
-			continue;
-		if (irq == 2)
-			continue;
-
-		/* interrupt's are disabled at this point */
-		raw_spin_lock(&desc->lock);
-
-		data = irq_desc_get_irq_data(desc);
-		affinity = irq_data_get_affinity_mask(data);
-		if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
-		    cpumask_subset(affinity, cpu_online_mask)) {
-			raw_spin_unlock(&desc->lock);
-			continue;
-		}
-
-		/*
-		 * Complete the irq move. This cpu is going down and for
-		 * non intr-remapping case, we can't wait till this interrupt
-		 * arrives at this cpu before completing the irq move.
-		 */
-		irq_force_complete_move(desc);
-
-		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
-			break_affinity = 1;
-			affinity = cpu_online_mask;
-		}
-
-		chip = irq_data_get_irq_chip(data);
-		/*
-		 * The interrupt descriptor might have been cleaned up
-		 * already, but it is not yet removed from the radix tree
-		 */
-		if (!chip) {
-			raw_spin_unlock(&desc->lock);
-			continue;
-		}
-
-		if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
-			chip->irq_mask(data);
-
-		if (chip->irq_set_affinity) {
-			ret = chip->irq_set_affinity(data, affinity, true);
-			if (ret == -ENOSPC)
-				pr_crit("IRQ %d set affinity failed because there are no available vectors.  The device assigned to this IRQ is unstable.\n", irq);
-		} else {
-			if (!(warned++))
-				set_affinity = 0;
-		}
-
-		/*
-		 * We unmask if the irq was not marked masked by the
-		 * core code. That respects the lazy irq disable
-		 * behaviour.
-		 */
-		if (!irqd_can_move_in_process_context(data) &&
-		    !irqd_irq_masked(data) && chip->irq_unmask)
-			chip->irq_unmask(data);
-
-		raw_spin_unlock(&desc->lock);
-
-		if (break_affinity && set_affinity)
-			pr_notice("Broke affinity for irq %i\n", irq);
-		else if (!set_affinity)
-			pr_notice("Cannot set affinity for irq %i\n", irq);
-	}
+	irq_migrate_all_off_this_cpu();
 
 	/*
 	 * We can remove mdelay() and then send spuriuous interrupts to
@@ -528,8 +456,6 @@ void fixup_irqs(void)
 	 * nothing else will touch it.
 	 */
 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-		unsigned int irr;
-
 		if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
 			continue;
 
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index c37bd0f..ab4f491 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -105,11 +105,9 @@ static void __jump_label_transform(struct jump_entry *entry,
 void arch_jump_label_transform(struct jump_entry *entry,
 			       enum jump_label_type type)
 {
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	__jump_label_transform(entry, type, NULL, 0);
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 static enum {
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 901c640..69ea0bc 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -28,6 +28,7 @@
 #include <linux/kdebug.h>
 #include <linux/kallsyms.h>
 #include <linux/ftrace.h>
+#include <linux/frame.h>
 
 #include <asm/text-patching.h>
 #include <asm/cacheflush.h>
@@ -94,6 +95,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
 }
 
 asm (
+			"optprobe_template_func:\n"
 			".global optprobe_template_entry\n"
 			"optprobe_template_entry:\n"
 #ifdef CONFIG_X86_64
@@ -131,7 +133,12 @@ asm (
 			"	popf\n"
 #endif
 			".global optprobe_template_end\n"
-			"optprobe_template_end:\n");
+			"optprobe_template_end:\n"
+			".type optprobe_template_func, @function\n"
+			".size optprobe_template_func, .-optprobe_template_func\n");
+
+void optprobe_template_func(void);
+STACK_FRAME_NON_STANDARD(optprobe_template_func);
 
 #define TMPL_MOVE_IDX \
 	((long)&optprobe_template_val - (long)&optprobe_template_entry)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index da5c097..43e10d6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
 			 */
 			rcu_irq_exit();
 			native_safe_halt();
-			rcu_irq_enter();
 			local_irq_disable();
+			rcu_irq_enter();
 		}
 	}
 	if (!n.halted)
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index d4a1583..a870910 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -22,24 +22,25 @@
 #include <asm/syscalls.h>
 
 /* context.lock is held for us, so we don't need any locking. */
-static void flush_ldt(void *current_mm)
+static void flush_ldt(void *__mm)
 {
+	struct mm_struct *mm = __mm;
 	mm_context_t *pc;
 
-	if (current->active_mm != current_mm)
+	if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
 		return;
 
-	pc = &current->active_mm->context;
-	set_ldt(pc->ldt->entries, pc->ldt->size);
+	pc = &mm->context;
+	set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
 }
 
 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
-static struct ldt_struct *alloc_ldt_struct(unsigned int size)
+static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 {
 	struct ldt_struct *new_ldt;
 	unsigned int alloc_size;
 
-	if (size > LDT_ENTRIES)
+	if (num_entries > LDT_ENTRIES)
 		return NULL;
 
 	new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
@@ -47,7 +48,7 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size)
 		return NULL;
 
 	BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
-	alloc_size = size * LDT_ENTRY_SIZE;
+	alloc_size = num_entries * LDT_ENTRY_SIZE;
 
 	/*
 	 * Xen is very picky: it requires a page-aligned LDT that has no
@@ -65,14 +66,14 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size)
 		return NULL;
 	}
 
-	new_ldt->size = size;
+	new_ldt->nr_entries = num_entries;
 	return new_ldt;
 }
 
 /* After calling this, the LDT is immutable. */
 static void finalize_ldt_struct(struct ldt_struct *ldt)
 {
-	paravirt_alloc_ldt(ldt->entries, ldt->size);
+	paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
 }
 
 /* context.lock is held */
@@ -91,8 +92,8 @@ static void free_ldt_struct(struct ldt_struct *ldt)
 	if (likely(!ldt))
 		return;
 
-	paravirt_free_ldt(ldt->entries, ldt->size);
-	if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+	paravirt_free_ldt(ldt->entries, ldt->nr_entries);
+	if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
 		vfree_atomic(ldt->entries);
 	else
 		free_page((unsigned long)ldt->entries);
@@ -122,14 +123,14 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
 		goto out_unlock;
 	}
 
-	new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
+	new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
 	if (!new_ldt) {
 		retval = -ENOMEM;
 		goto out_unlock;
 	}
 
 	memcpy(new_ldt->entries, old_mm->context.ldt->entries,
-	       new_ldt->size * LDT_ENTRY_SIZE);
+	       new_ldt->nr_entries * LDT_ENTRY_SIZE);
 	finalize_ldt_struct(new_ldt);
 
 	mm->context.ldt = new_ldt;
@@ -152,9 +153,9 @@ void destroy_context_ldt(struct mm_struct *mm)
 
 static int read_ldt(void __user *ptr, unsigned long bytecount)
 {
-	int retval;
-	unsigned long size;
 	struct mm_struct *mm = current->mm;
+	unsigned long entries_size;
+	int retval;
 
 	mutex_lock(&mm->context.lock);
 
@@ -166,18 +167,18 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
 	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
 		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
 
-	size = mm->context.ldt->size * LDT_ENTRY_SIZE;
-	if (size > bytecount)
-		size = bytecount;
+	entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
+	if (entries_size > bytecount)
+		entries_size = bytecount;
 
-	if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
+	if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
 		retval = -EFAULT;
 		goto out_unlock;
 	}
 
-	if (size != bytecount) {
+	if (entries_size != bytecount) {
 		/* Zero-fill the rest and pretend we read bytecount bytes. */
-		if (clear_user(ptr + size, bytecount - size)) {
+		if (clear_user(ptr + entries_size, bytecount - entries_size)) {
 			retval = -EFAULT;
 			goto out_unlock;
 		}
@@ -208,7 +209,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 {
 	struct mm_struct *mm = current->mm;
 	struct ldt_struct *new_ldt, *old_ldt;
-	unsigned int oldsize, newsize;
+	unsigned int old_nr_entries, new_nr_entries;
 	struct user_desc ldt_info;
 	struct desc_struct ldt;
 	int error;
@@ -247,17 +248,18 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 
 	mutex_lock(&mm->context.lock);
 
-	old_ldt = mm->context.ldt;
-	oldsize = old_ldt ? old_ldt->size : 0;
-	newsize = max(ldt_info.entry_number + 1, oldsize);
+	old_ldt       = mm->context.ldt;
+	old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
+	new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
 
 	error = -ENOMEM;
-	new_ldt = alloc_ldt_struct(newsize);
+	new_ldt = alloc_ldt_struct(new_nr_entries);
 	if (!new_ldt)
 		goto out_unlock;
 
 	if (old_ldt)
-		memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
+		memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
+
 	new_ldt->entries[ldt_info.entry_number] = ldt;
 	finalize_ldt_struct(new_ldt);
 
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 6f5ca4e..cb0a304 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -347,7 +347,7 @@ void machine_kexec(struct kimage *image)
 void arch_crash_save_vmcoreinfo(void)
 {
 	VMCOREINFO_NUMBER(phys_base);
-	VMCOREINFO_SYMBOL(init_level4_pgt);
+	VMCOREINFO_SYMBOL(init_top_pgt);
 
 #ifdef CONFIG_NUMA
 	VMCOREINFO_SYMBOL(node_data);
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index 6d9582e..d27f8d8 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -78,7 +78,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
 
 	/* Don't wait longer than a second */
 	timeout = USEC_PER_SEC;
-	while (!cpumask_empty(mask) && timeout--)
+	while (!cpumask_empty(mask) && --timeout)
 	        udelay(1);
 
 	/* What happens if we timeout, do we still unregister?? */
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 3586996..bc0a849 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -391,7 +391,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
 
 	.read_cr2 = native_read_cr2,
 	.write_cr2 = native_write_cr2,
-	.read_cr3 = native_read_cr3,
+	.read_cr3 = __native_read_cr3,
 	.write_cr3 = native_write_cr3,
 
 	.flush_tlb_user = native_flush_tlb,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 0bb8842..3ca1980 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -545,17 +545,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
 }
 
 /*
- * Return saved PC of a blocked thread.
- * What is this good for? it will be always the scheduler or ret_from_fork.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-	struct inactive_task_frame *frame =
-		(struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
-	return READ_ONCE_NOCHECK(frame->ret_addr);
-}
-
-/*
  * Called from fs/proc with a reference on @p to find the function
  * which called into schedule(). This needs to be done carefully
  * because the task might wake up and we might look at a stack
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index ff40e74..c6d6dc5 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
 
 	printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
 	printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
-		smp_processor_id());
+		raw_smp_processor_id());
 
 	printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
 		regs->ax, regs->bx, regs->cx, regs->dx);
@@ -92,7 +92,7 @@ void __show_regs(struct pt_regs *regs, int all)
 
 	cr0 = read_cr0();
 	cr2 = read_cr2();
-	cr3 = read_cr3();
+	cr3 = __read_cr3();
 	cr4 = __read_cr4();
 	printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
 			cr0, cr2, cr3, cr4);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b6840bf..c3169be 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -104,7 +104,7 @@ void __show_regs(struct pt_regs *regs, int all)
 
 	cr0 = read_cr0();
 	cr2 = read_cr2();
-	cr3 = read_cr3();
+	cr3 = __read_cr3();
 	cr4 = __read_cr4();
 
 	printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
@@ -142,7 +142,7 @@ void release_thread(struct task_struct *dead_task)
 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
 				dead_task->comm,
 				dead_task->mm->context.ldt->entries,
-				dead_task->mm->context.ldt->size);
+				dead_task->mm->context.ldt->nr_entries);
 			BUG();
 		}
 #endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 2544700..67393fc 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -9,6 +9,7 @@
 #include <linux/sched.h>
 #include <linux/tboot.h>
 #include <linux/delay.h>
+#include <linux/frame.h>
 #include <acpi/reboot.h>
 #include <asm/io.h>
 #include <asm/apic.h>
@@ -123,6 +124,7 @@ void __noreturn machine_real_restart(unsigned int type)
 #ifdef CONFIG_APM_MODULE
 EXPORT_SYMBOL(machine_real_restart);
 #endif
+STACK_FRAME_NON_STANDARD(machine_real_restart);
 
 /*
  * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index f818236..65622f0 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -503,7 +503,7 @@ static int __init reserve_crashkernel_low(void)
 			return 0;
 	}
 
-	low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
+	low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN);
 	if (!low_base) {
 		pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
 		       (unsigned long)(low_size >> 20));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f04479a..b474c8d 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -863,7 +863,7 @@ static void announce_cpu(int cpu, int apicid)
 	if (cpu == 1)
 		printk(KERN_INFO "x86: Booting SMP configuration:\n");
 
-	if (system_state == SYSTEM_BOOTING) {
+	if (system_state < SYSTEM_RUNNING) {
 		if (node != current_node) {
 			if (current_node > (-1))
 				pr_cont("\n");
@@ -1589,7 +1589,6 @@ void native_cpu_die(unsigned int cpu)
 void play_dead_common(void)
 {
 	idle_task_exit();
-	reset_lazy_tlbstate();
 
 	/* Ack it */
 	(void)cpu_report_death();
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index f07f83b..5f25cfb 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -34,7 +34,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
 
 		mutex_lock(&child->mm->context.lock);
 		if (unlikely(!child->mm->context.ldt ||
-			     seg >= child->mm->context.ldt->size))
+			     seg >= child->mm->context.ldt->nr_entries))
 			addr = -1L; /* bogus selector, access would fault */
 		else {
 			desc = &child->mm->context.ldt->entries[seg];
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 207b8f2..213ddf3 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (end - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
@@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 4b17240..a4eb279 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -514,7 +514,7 @@ int tboot_force_iommu(void)
 	if (!tboot_enabled())
 		return 0;
 
-	if (!intel_iommu_tboot_noforce)
+	if (intel_iommu_tboot_noforce)
 		return 1;
 
 	if (no_iommu || swiotlb || dmar_disabled)
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index d39c091..e0754cd 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -66,7 +66,7 @@ static struct irqaction irq0  = {
 	.name = "timer"
 };
 
-void __init setup_default_timer_irq(void)
+static void __init setup_default_timer_irq(void)
 {
 	if (!nr_legacy_irqs())
 		return;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 3995d3a..bf54309 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr)
 	return ud == INSN_UD0 || ud == INSN_UD2;
 }
 
-static int fixup_bug(struct pt_regs *regs, int trapnr)
+int fixup_bug(struct pt_regs *regs, int trapnr)
 {
 	if (trapnr != X86_TRAP_UD)
 		return 0;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 714dfba..796d96b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -51,115 +51,34 @@ static u32 art_to_tsc_denominator;
 static u64 art_to_tsc_offset;
 struct clocksource *art_related_clocksource;
 
-/*
- * Use a ring-buffer like data structure, where a writer advances the head by
- * writing a new data entry and a reader advances the tail when it observes a
- * new entry.
- *
- * Writers are made to wait on readers until there's space to write a new
- * entry.
- *
- * This means that we can always use an {offset, mul} pair to compute a ns
- * value that is 'roughly' in the right direction, even if we're writing a new
- * {offset, mul} pair during the clock read.
- *
- * The down-side is that we can no longer guarantee strict monotonicity anymore
- * (assuming the TSC was that to begin with), because while we compute the
- * intersection point of the two clock slopes and make sure the time is
- * continuous at the point of switching; we can no longer guarantee a reader is
- * strictly before or after the switch point.
- *
- * It does mean a reader no longer needs to disable IRQs in order to avoid
- * CPU-Freq updates messing with his times, and similarly an NMI reader will
- * no longer run the risk of hitting half-written state.
- */
-
 struct cyc2ns {
-	struct cyc2ns_data data[2];	/*  0 + 2*24 = 48 */
-	struct cyc2ns_data *head;	/* 48 + 8    = 56 */
-	struct cyc2ns_data *tail;	/* 56 + 8    = 64 */
-}; /* exactly fits one cacheline */
+	struct cyc2ns_data data[2];	/*  0 + 2*16 = 32 */
+	seqcount_t	   seq;		/* 32 + 4    = 36 */
+
+}; /* fits one cacheline */
 
 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
 
-struct cyc2ns_data *cyc2ns_read_begin(void)
+void cyc2ns_read_begin(struct cyc2ns_data *data)
 {
-	struct cyc2ns_data *head;
+	int seq, idx;
 
-	preempt_disable();
+	preempt_disable_notrace();
 
-	head = this_cpu_read(cyc2ns.head);
-	/*
-	 * Ensure we observe the entry when we observe the pointer to it.
-	 * matches the wmb from cyc2ns_write_end().
-	 */
-	smp_read_barrier_depends();
-	head->__count++;
-	barrier();
+	do {
+		seq = this_cpu_read(cyc2ns.seq.sequence);
+		idx = seq & 1;
 
-	return head;
+		data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
+		data->cyc2ns_mul    = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
+		data->cyc2ns_shift  = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
+
+	} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
 }
 
-void cyc2ns_read_end(struct cyc2ns_data *head)
+void cyc2ns_read_end(void)
 {
-	barrier();
-	/*
-	 * If we're the outer most nested read; update the tail pointer
-	 * when we're done. This notifies possible pending writers
-	 * that we've observed the head pointer and that the other
-	 * entry is now free.
-	 */
-	if (!--head->__count) {
-		/*
-		 * x86-TSO does not reorder writes with older reads;
-		 * therefore once this write becomes visible to another
-		 * cpu, we must be finished reading the cyc2ns_data.
-		 *
-		 * matches with cyc2ns_write_begin().
-		 */
-		this_cpu_write(cyc2ns.tail, head);
-	}
-	preempt_enable();
-}
-
-/*
- * Begin writing a new @data entry for @cpu.
- *
- * Assumes some sort of write side lock; currently 'provided' by the assumption
- * that cpufreq will call its notifiers sequentially.
- */
-static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
-{
-	struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
-	struct cyc2ns_data *data = c2n->data;
-
-	if (data == c2n->head)
-		data++;
-
-	/* XXX send an IPI to @cpu in order to guarantee a read? */
-
-	/*
-	 * When we observe the tail write from cyc2ns_read_end(),
-	 * the cpu must be done with that entry and its safe
-	 * to start writing to it.
-	 */
-	while (c2n->tail == data)
-		cpu_relax();
-
-	return data;
-}
-
-static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
-{
-	struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
-
-	/*
-	 * Ensure the @data writes are visible before we publish the
-	 * entry. Matches the data-depencency in cyc2ns_read_begin().
-	 */
-	smp_wmb();
-
-	ACCESS_ONCE(c2n->head) = data;
+	preempt_enable_notrace();
 }
 
 /*
@@ -191,7 +110,6 @@ static void cyc2ns_data_init(struct cyc2ns_data *data)
 	data->cyc2ns_mul = 0;
 	data->cyc2ns_shift = 0;
 	data->cyc2ns_offset = 0;
-	data->__count = 0;
 }
 
 static void cyc2ns_init(int cpu)
@@ -201,51 +119,29 @@ static void cyc2ns_init(int cpu)
 	cyc2ns_data_init(&c2n->data[0]);
 	cyc2ns_data_init(&c2n->data[1]);
 
-	c2n->head = c2n->data;
-	c2n->tail = c2n->data;
+	seqcount_init(&c2n->seq);
 }
 
 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
 {
-	struct cyc2ns_data *data, *tail;
+	struct cyc2ns_data data;
 	unsigned long long ns;
 
-	/*
-	 * See cyc2ns_read_*() for details; replicated in order to avoid
-	 * an extra few instructions that came with the abstraction.
-	 * Notable, it allows us to only do the __count and tail update
-	 * dance when its actually needed.
-	 */
+	cyc2ns_read_begin(&data);
 
-	preempt_disable_notrace();
-	data = this_cpu_read(cyc2ns.head);
-	tail = this_cpu_read(cyc2ns.tail);
+	ns = data.cyc2ns_offset;
+	ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
 
-	if (likely(data == tail)) {
-		ns = data->cyc2ns_offset;
-		ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
-	} else {
-		data->__count++;
-
-		barrier();
-
-		ns = data->cyc2ns_offset;
-		ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
-
-		barrier();
-
-		if (!--data->__count)
-			this_cpu_write(cyc2ns.tail, data);
-	}
-	preempt_enable_notrace();
+	cyc2ns_read_end();
 
 	return ns;
 }
 
-static void set_cyc2ns_scale(unsigned long khz, int cpu)
+static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
 {
-	unsigned long long tsc_now, ns_now;
-	struct cyc2ns_data *data;
+	unsigned long long ns_now;
+	struct cyc2ns_data data;
+	struct cyc2ns *c2n;
 	unsigned long flags;
 
 	local_irq_save(flags);
@@ -254,9 +150,6 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
 	if (!khz)
 		goto done;
 
-	data = cyc2ns_write_begin(cpu);
-
-	tsc_now = rdtsc();
 	ns_now = cycles_2_ns(tsc_now);
 
 	/*
@@ -264,7 +157,7 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
 	 * time function is continuous; see the comment near struct
 	 * cyc2ns_data.
 	 */
-	clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz,
+	clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
 			       NSEC_PER_MSEC, 0);
 
 	/*
@@ -273,20 +166,26 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
 	 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
 	 * value) - refer perf_event_mmap_page documentation in perf_event.h.
 	 */
-	if (data->cyc2ns_shift == 32) {
-		data->cyc2ns_shift = 31;
-		data->cyc2ns_mul >>= 1;
+	if (data.cyc2ns_shift == 32) {
+		data.cyc2ns_shift = 31;
+		data.cyc2ns_mul >>= 1;
 	}
 
-	data->cyc2ns_offset = ns_now -
-		mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift);
+	data.cyc2ns_offset = ns_now -
+		mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
 
-	cyc2ns_write_end(cpu, data);
+	c2n = per_cpu_ptr(&cyc2ns, cpu);
+
+	raw_write_seqcount_latch(&c2n->seq);
+	c2n->data[0] = data;
+	raw_write_seqcount_latch(&c2n->seq);
+	c2n->data[1] = data;
 
 done:
-	sched_clock_idle_wakeup_event(0);
+	sched_clock_idle_wakeup_event();
 	local_irq_restore(flags);
 }
+
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
@@ -374,6 +273,8 @@ static int __init tsc_setup(char *str)
 		tsc_clocksource_reliable = 1;
 	if (!strncmp(str, "noirqtime", 9))
 		no_sched_irq_time = 1;
+	if (!strcmp(str, "unstable"))
+		mark_tsc_unstable("boot parameter");
 	return 1;
 }
 
@@ -986,7 +887,6 @@ void tsc_restore_sched_clock_state(void)
 }
 
 #ifdef CONFIG_CPU_FREQ
-
 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
  * changes.
  *
@@ -1027,7 +927,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
 		if (!(freq->flags & CPUFREQ_CONST_LOOPS))
 			mark_tsc_unstable("cpufreq changes");
 
-		set_cyc2ns_scale(tsc_khz, freq->cpu);
+		set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc());
 	}
 
 	return 0;
@@ -1127,6 +1027,15 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
 	pr_info("Marking TSC unstable due to clocksource watchdog\n");
 }
 
+static void tsc_cs_tick_stable(struct clocksource *cs)
+{
+	if (tsc_unstable)
+		return;
+
+	if (using_native_sched_clock())
+		sched_clock_tick_stable();
+}
+
 /*
  * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
  */
@@ -1140,6 +1049,7 @@ static struct clocksource clocksource_tsc = {
 	.archdata               = { .vclock_mode = VCLOCK_TSC },
 	.resume			= tsc_resume,
 	.mark_unstable		= tsc_cs_mark_unstable,
+	.tick_stable		= tsc_cs_tick_stable,
 };
 
 void mark_tsc_unstable(char *reason)
@@ -1255,6 +1165,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
 	static int hpet;
 	u64 tsc_stop, ref_stop, delta;
 	unsigned long freq;
+	int cpu;
 
 	/* Don't bother refining TSC on unstable systems */
 	if (check_tsc_unstable())
@@ -1305,6 +1216,10 @@ static void tsc_refine_calibration_work(struct work_struct *work)
 	/* Inform the TSC deadline clockevent devices about the recalibration */
 	lapic_update_tsc_freq();
 
+	/* Update the sched_clock() rate to match the clocksource one */
+	for_each_possible_cpu(cpu)
+		set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
+
 out:
 	if (boot_cpu_has(X86_FEATURE_ART))
 		art_related_clocksource = &clocksource_tsc;
@@ -1350,7 +1265,7 @@ device_initcall(init_tsc_clocksource);
 
 void __init tsc_init(void)
 {
-	u64 lpj;
+	u64 lpj, cyc;
 	int cpu;
 
 	if (!boot_cpu_has(X86_FEATURE_TSC)) {
@@ -1390,9 +1305,10 @@ void __init tsc_init(void)
 	 * speed as the bootup CPU. (cpufreq notifiers will fix this
 	 * up if their speed diverges)
 	 */
+	cyc = rdtsc();
 	for_each_possible_cpu(cpu) {
 		cyc2ns_init(cpu);
-		set_cyc2ns_scale(tsc_khz, cpu);
+		set_cyc2ns_scale(tsc_khz, cpu, cyc);
 	}
 
 	if (tsc_disabled > 0)
@@ -1412,11 +1328,11 @@ void __init tsc_init(void)
 
 	use_tsc_delay();
 
+	check_system_tsc_reliable();
+
 	if (unsynchronized_tsc())
 		mark_tsc_unstable("TSCs unsynchronized");
 
-	check_system_tsc_reliable();
-
 	detect_art();
 }
 
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 728f753..7842371 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -71,13 +71,8 @@ static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
 	 * non zero. We don't do that on non boot cpus because physical
 	 * hotplug should have set the ADJUST register to a value > 0 so
 	 * the TSC is in sync with the already running cpus.
-	 *
-	 * But we always force positive ADJUST values. Otherwise the TSC
-	 * deadline timer creates an interrupt storm. We also have to
-	 * prevent values > 0x7FFFFFFF as those wreckage the timer as well.
 	 */
-	if ((bootcpu && bootval != 0) || (!bootcpu && bootval < 0) ||
-	    (bootval > 0x7FFFFFFF)) {
+	if (bootcpu && bootval != 0) {
 		pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", cpu,
 			bootval);
 		wrmsrl(MSR_IA32_TSC_ADJUST, 0);
@@ -451,20 +446,6 @@ void check_tsc_sync_target(void)
 	 */
 	cur->adjusted += cur_max_warp;
 
-	/*
-	 * TSC deadline timer stops working or creates an interrupt storm
-	 * with adjust values < 0 and > x07ffffff.
-	 *
-	 * To allow adjust values > 0x7FFFFFFF we need to disable the
-	 * deadline timer and use the local APIC timer, but that requires
-	 * more intrusive changes and we do not have any useful information
-	 * from Intel about the underlying HW wreckage yet.
-	 */
-	if (cur->adjusted < 0)
-		cur->adjusted = 0;
-	if (cur->adjusted > 0x7FFFFFFF)
-		cur->adjusted = 0x7FFFFFFF;
-
 	pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n",
 		cpu, cur_max_warp, cur->adjusted);
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a181ae7..59ca2ee 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -780,18 +780,20 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
 {
 	struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
-	int j, nent = vcpu->arch.cpuid_nent;
+	struct kvm_cpuid_entry2 *ej;
+	int j = i;
+	int nent = vcpu->arch.cpuid_nent;
 
 	e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
 	/* when no next entry is found, the current entry[i] is reselected */
-	for (j = i + 1; ; j = (j + 1) % nent) {
-		struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
-		if (ej->function == e->function) {
-			ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
-			return j;
-		}
-	}
-	return 0; /* silence gcc, even though control never reaches here */
+	do {
+		j = (j + 1) % nent;
+		ej = &vcpu->arch.cpuid_entries[j];
+	} while (ej->function != e->function);
+
+	ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+
+	return j;
 }
 
 /* find an entry with matching function, matching index (if needed), and that
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 0816ab2..80890de 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
 	}
 
+	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
 	return X86EMUL_CONTINUE;
 }
 
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c329d28..d24c874 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
 
 static void cancel_hv_timer(struct kvm_lapic *apic)
 {
+	preempt_disable();
 	kvm_x86_ops->cancel_hv_timer(apic->vcpu);
 	apic->lapic_timer.hv_timer_in_use = false;
+	preempt_enable();
 }
 
 static bool start_hv_timer(struct kvm_lapic *apic)
@@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 	for (i = 0; i < KVM_APIC_LVT_NUM; i++)
 		kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
 	apic_update_lvtt(apic);
-	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
+	if (kvm_vcpu_is_reset_bsp(vcpu) &&
+	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
 		kvm_lapic_set_reg(apic, APIC_LVT0,
 			     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
 	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5d3376f..cb82259 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3698,12 +3698,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
 	return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
 }
 
-static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
 {
 	if (unlikely(!lapic_in_kernel(vcpu) ||
 		     kvm_event_needs_reinjection(vcpu)))
 		return false;
 
+	if (is_guest_mode(vcpu))
+		return false;
+
 	return kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
@@ -3719,7 +3722,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
 	if (!async)
 		return false; /* *pfn has correct page already */
 
-	if (!prefault && can_do_async_pf(vcpu)) {
+	if (!prefault && kvm_can_do_async_pf(vcpu)) {
 		trace_kvm_try_async_get_page(gva, gfn);
 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
 			trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 2797580..330bf3a 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -76,6 +76,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 			     bool accessed_dirty);
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 
 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
 {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 183ddb2..33460fc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -36,6 +36,7 @@
 #include <linux/slab.h>
 #include <linux/amd-iommu.h>
 #include <linux/hashtable.h>
+#include <linux/frame.h>
 
 #include <asm/apic.h>
 #include <asm/perf_event.h>
@@ -1807,7 +1808,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
 	 * AMD's VMCB does not have an explicit unusable field, so emulate it
 	 * for cross vendor migration purposes by "not present"
 	 */
-	var->unusable = !var->present || (var->type == 0);
+	var->unusable = !var->present;
 
 	switch (seg) {
 	case VCPU_SREG_TR:
@@ -1840,6 +1841,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
 		 */
 		if (var->unusable)
 			var->db = 0;
+		/* This is symmetric with svm_set_segment() */
 		var->dpl = to_svm(vcpu)->vmcb->save.cpl;
 		break;
 	}
@@ -1980,18 +1982,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
 	s->base = var->base;
 	s->limit = var->limit;
 	s->selector = var->selector;
-	if (var->unusable)
-		s->attrib = 0;
-	else {
-		s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
-		s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
-		s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
-		s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
-		s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
-		s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
-		s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
-		s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
-	}
+	s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+	s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+	s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+	s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+	s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+	s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+	s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+	s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
 
 	/*
 	 * This is always accurate, except if SYSRET returned to a segment
@@ -2000,7 +1998,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
 	 * would entail passing the CPL to userspace and back.
 	 */
 	if (seg == VCPU_SREG_SS)
-		svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+		/* This is symmetric with svm_get_segment() */
+		svm->vmcb->save.cpl = (var->dpl & 3);
 
 	mark_dirty(svm->vmcb, VMCB_SEG);
 }
@@ -4908,6 +4907,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	mark_all_clean(svm->vmcb);
 }
+STACK_FRAME_NON_STANDARD(svm_vcpu_run);
 
 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 {
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 72f7839..6dcc487 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/tboot.h>
 #include <linux/hrtimer.h>
+#include <linux/frame.h>
 #include "kvm_cache_regs.h"
 #include "x86.h"
 
@@ -48,6 +49,7 @@
 #include <asm/kexec.h>
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
+#include <asm/mmu_context.h>
 
 #include "trace.h"
 #include "pmu.h"
@@ -596,6 +598,7 @@ struct vcpu_vmx {
 		int           gs_ldt_reload_needed;
 		int           fs_reload_needed;
 		u64           msr_host_bndcfgs;
+		unsigned long vmcs_host_cr3;	/* May not match real cr3 */
 		unsigned long vmcs_host_cr4;	/* May not match real cr4 */
 	} host_state;
 	struct {
@@ -2425,7 +2428,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
 	if (!(vmcs12->exception_bitmap & (1u << nr)))
 		return 0;
 
-	nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
+	nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
 			  vmcs_read32(VM_EXIT_INTR_INFO),
 			  vmcs_readl(EXIT_QUALIFICATION));
 	return 1;
@@ -5012,12 +5015,19 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
 	u32 low32, high32;
 	unsigned long tmpl;
 	struct desc_ptr dt;
-	unsigned long cr0, cr4;
+	unsigned long cr0, cr3, cr4;
 
 	cr0 = read_cr0();
 	WARN_ON(cr0 & X86_CR0_TS);
 	vmcs_writel(HOST_CR0, cr0);  /* 22.2.3 */
-	vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
+
+	/*
+	 * Save the most likely value for this task's CR3 in the VMCS.
+	 * We can't use __get_current_cr3_fast() because we're not atomic.
+	 */
+	cr3 = __read_cr3();
+	vmcs_writel(HOST_CR3, cr3);		/* 22.2.3  FIXME: shadow tables */
+	vmx->host_state.vmcs_host_cr3 = cr3;
 
 	/* Save the most likely value for this task's CR4 in the VMCS. */
 	cr4 = cr4_read_shadow();
@@ -6914,97 +6924,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
-/*
- * This function performs the various checks including
- * - if it's 4KB aligned
- * - No bits beyond the physical address width are set
- * - Returns 0 on success or else 1
- * (Intel SDM Section 30.3)
- */
-static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
-				  gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
 {
 	gva_t gva;
-	gpa_t vmptr;
 	struct x86_exception e;
-	struct page *page;
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
 			vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
 		return 1;
 
-	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
-				sizeof(vmptr), &e)) {
+	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
+				sizeof(*vmpointer), &e)) {
 		kvm_inject_page_fault(vcpu, &e);
 		return 1;
 	}
 
-	switch (exit_reason) {
-	case EXIT_REASON_VMON:
-		/*
-		 * SDM 3: 24.11.5
-		 * The first 4 bytes of VMXON region contain the supported
-		 * VMCS revision identifier
-		 *
-		 * Note - IA32_VMX_BASIC[48] will never be 1
-		 * for the nested case;
-		 * which replaces physical address width with 32
-		 *
-		 */
-		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-			nested_vmx_failInvalid(vcpu);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-
-		page = nested_get_page(vcpu, vmptr);
-		if (page == NULL) {
-			nested_vmx_failInvalid(vcpu);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-		if (*(u32 *)kmap(page) != VMCS12_REVISION) {
-			kunmap(page);
-			nested_release_page_clean(page);
-			nested_vmx_failInvalid(vcpu);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-		kunmap(page);
-		nested_release_page_clean(page);
-		vmx->nested.vmxon_ptr = vmptr;
-		break;
-	case EXIT_REASON_VMCLEAR:
-		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-			nested_vmx_failValid(vcpu,
-					     VMXERR_VMCLEAR_INVALID_ADDRESS);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-
-		if (vmptr == vmx->nested.vmxon_ptr) {
-			nested_vmx_failValid(vcpu,
-					     VMXERR_VMCLEAR_VMXON_POINTER);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-		break;
-	case EXIT_REASON_VMPTRLD:
-		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
-			nested_vmx_failValid(vcpu,
-					     VMXERR_VMPTRLD_INVALID_ADDRESS);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-
-		if (vmptr == vmx->nested.vmxon_ptr) {
-			nested_vmx_failValid(vcpu,
-					     VMXERR_VMPTRLD_VMXON_POINTER);
-			return kvm_skip_emulated_instruction(vcpu);
-		}
-		break;
-	default:
-		return 1; /* shouldn't happen */
-	}
-
-	if (vmpointer)
-		*vmpointer = vmptr;
 	return 0;
 }
 
@@ -7066,6 +7000,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
 	int ret;
+	gpa_t vmptr;
+	struct page *page;
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
 		| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7095,9 +7031,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 		return 1;
 	}
 
-	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
+	if (nested_vmx_get_vmptr(vcpu, &vmptr))
 		return 1;
- 
+
+	/*
+	 * SDM 3: 24.11.5
+	 * The first 4 bytes of VMXON region contain the supported
+	 * VMCS revision identifier
+	 *
+	 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
+	 * which replaces physical address width with 32
+	 */
+	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+		nested_vmx_failInvalid(vcpu);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
+	page = nested_get_page(vcpu, vmptr);
+	if (page == NULL) {
+		nested_vmx_failInvalid(vcpu);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+	if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+		kunmap(page);
+		nested_release_page_clean(page);
+		nested_vmx_failInvalid(vcpu);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+	kunmap(page);
+	nested_release_page_clean(page);
+
+	vmx->nested.vmxon_ptr = vmptr;
 	ret = enter_vmx_operation(vcpu);
 	if (ret)
 		return ret;
@@ -7213,9 +7177,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
+	if (nested_vmx_get_vmptr(vcpu, &vmptr))
 		return 1;
 
+	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+		nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
+	if (vmptr == vmx->nested.vmxon_ptr) {
+		nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
 	if (vmptr == vmx->nested.current_vmptr)
 		nested_release_vmcs12(vmx);
 
@@ -7545,9 +7519,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
+	if (nested_vmx_get_vmptr(vcpu, &vmptr))
 		return 1;
 
+	if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+		nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
+	if (vmptr == vmx->nested.vmxon_ptr) {
+		nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
+		return kvm_skip_emulated_instruction(vcpu);
+	}
+
 	if (vmx->nested.current_vmptr != vmptr) {
 		struct vmcs12 *new_vmcs12;
 		struct page *page;
@@ -7913,11 +7897,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
 {
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	int cr = exit_qualification & 15;
-	int reg = (exit_qualification >> 8) & 15;
-	unsigned long val = kvm_register_readl(vcpu, reg);
+	int reg;
+	unsigned long val;
 
 	switch ((exit_qualification >> 4) & 3) {
 	case 0: /* mov to cr */
+		reg = (exit_qualification >> 8) & 15;
+		val = kvm_register_readl(vcpu, reg);
 		switch (cr) {
 		case 0:
 			if (vmcs12->cr0_guest_host_mask &
@@ -7972,6 +7958,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
 		 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
 		 * cr0. Other attempted changes are ignored, with no exit.
 		 */
+		val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
 		if (vmcs12->cr0_guest_host_mask & 0xe &
 		    (val ^ vmcs12->cr0_read_shadow))
 			return true;
@@ -8675,6 +8662,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
 			);
 	}
 }
+STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
 
 static bool vmx_has_high_real_mode_segbase(void)
 {
@@ -8843,7 +8831,7 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	unsigned long debugctlmsr, cr4;
+	unsigned long debugctlmsr, cr3, cr4;
 
 	/* Don't enter VMX if guest state is invalid, let the exit handler
 	   start emulation until we arrive back to a valid state */
@@ -8865,6 +8853,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
 		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
 
+	cr3 = __get_current_cr3_fast();
+	if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) {
+		vmcs_writel(HOST_CR3, cr3);
+		vmx->host_state.vmcs_host_cr3 = cr3;
+	}
+
 	cr4 = cr4_read_shadow();
 	if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
 		vmcs_writel(HOST_CR4, cr4);
@@ -9051,6 +9045,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	vmx_recover_nmi_blocking(vmx);
 	vmx_complete_interrupts(vmx);
 }
+STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
 
 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
 {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 02363e3..0e846f0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 
 	ctxt->eflags = kvm_get_rflags(vcpu);
+	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
+
 	ctxt->eip = kvm_rip_read(vcpu);
 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
 		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
 	return dr6;
 }
 
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
+static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
 {
 	struct kvm_run *kvm_run = vcpu->run;
 
-	/*
-	 * rflags is the old, "raw" value of the flags.  The new value has
-	 * not been saved yet.
-	 *
-	 * This is correct even for TF set by the guest, because "the
-	 * processor will not generate this exception after the instruction
-	 * that sets the TF flag".
-	 */
-	if (unlikely(rflags & X86_EFLAGS_TF)) {
-		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-			kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
-						  DR6_RTM;
-			kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
-			kvm_run->debug.arch.exception = DB_VECTOR;
-			kvm_run->exit_reason = KVM_EXIT_DEBUG;
-			*r = EMULATE_USER_EXIT;
-		} else {
-			/*
-			 * "Certain debug exceptions may clear bit 0-3.  The
-			 * remaining contents of the DR6 register are never
-			 * cleared by the processor".
-			 */
-			vcpu->arch.dr6 &= ~15;
-			vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
-			kvm_queue_exception(vcpu, DB_VECTOR);
-		}
+	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+		kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
+		kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+		kvm_run->debug.arch.exception = DB_VECTOR;
+		kvm_run->exit_reason = KVM_EXIT_DEBUG;
+		*r = EMULATE_USER_EXIT;
+	} else {
+		/*
+		 * "Certain debug exceptions may clear bit 0-3.  The
+		 * remaining contents of the DR6 register are never
+		 * cleared by the processor".
+		 */
+		vcpu->arch.dr6 &= ~15;
+		vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
+		kvm_queue_exception(vcpu, DB_VECTOR);
 	}
 }
 
@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	int r = EMULATE_DONE;
 
 	kvm_x86_ops->skip_emulated_instruction(vcpu);
-	kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+
+	/*
+	 * rflags is the old, "raw" value of the flags.  The new value has
+	 * not been saved yet.
+	 *
+	 * This is correct even for TF set by the guest, because "the
+	 * processor will not generate this exception after the instruction
+	 * that sets the TF flag".
+	 */
+	if (unlikely(rflags & X86_EFLAGS_TF))
+		kvm_vcpu_do_singlestep(vcpu, &r);
 	return r == EMULATE_DONE;
 }
 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@@ -5726,8 +5727,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 		toggle_interruptibility(vcpu, ctxt->interruptibility);
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 		kvm_rip_write(vcpu, ctxt->eip);
-		if (r == EMULATE_DONE)
-			kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+		if (r == EMULATE_DONE &&
+		    (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+			kvm_vcpu_do_singlestep(vcpu, &r);
 		if (!ctxt->have_exception ||
 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
 			__kvm_set_rflags(vcpu, ctxt->eflags);
@@ -8394,10 +8396,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
 	if (vcpu->arch.pv.pv_unhalted)
 		return true;
 
-	if (atomic_read(&vcpu->arch.nmi_queued))
+	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+	    (vcpu->arch.nmi_pending &&
+	     kvm_x86_ops->nmi_allowed(vcpu)))
 		return true;
 
-	if (kvm_test_request(KVM_REQ_SMI, vcpu))
+	if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+	    (vcpu->arch.smi_pending && !is_smm(vcpu)))
 		return true;
 
 	if (kvm_arch_interrupt_allowed(vcpu) &&
@@ -8604,8 +8609,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
 		return true;
 	else
-		return !kvm_event_needs_reinjection(vcpu) &&
-			kvm_x86_ops->interrupt_allowed(vcpu);
+		return kvm_can_do_async_pf(vcpu);
 }
 
 void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index c595957..020f75c 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -37,7 +37,7 @@
 	movl %edx,%ecx
 	andl $63,%edx
 	shrl $6,%ecx
-	jz 17f
+	jz .L_copy_short_string
 1:	movq (%rsi),%r8
 2:	movq 1*8(%rsi),%r9
 3:	movq 2*8(%rsi),%r10
@@ -58,7 +58,8 @@
 	leaq 64(%rdi),%rdi
 	decl %ecx
 	jnz 1b
-17:	movl %edx,%ecx
+.L_copy_short_string:
+	movl %edx,%ecx
 	andl $7,%edx
 	shrl $3,%ecx
 	jz 20f
@@ -174,6 +175,8 @@
  */
 ENTRY(copy_user_enhanced_fast_string)
 	ASM_STAC
+	cmpl $64,%edx
+	jb .L_copy_short_string	/* less then 64 bytes, avoid the costly 'rep' */
 	movl %edx,%ecx
 1:	rep
 	movsb
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index c815564..10ffa7e 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -13,14 +13,14 @@
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
 	pushq %rbx
-	pushq %rbp
+	pushq %r12
 	movq	%rdi, %r10	/* Save pointer */
 	xorl	%r11d, %r11d	/* Return value */
 	movl    (%rdi), %eax
 	movl    4(%rdi), %ecx
 	movl    8(%rdi), %edx
 	movl    12(%rdi), %ebx
-	movl    20(%rdi), %ebp
+	movl    20(%rdi), %r12d
 	movl    24(%rdi), %esi
 	movl    28(%rdi), %edi
 1:	\op
@@ -29,10 +29,10 @@
 	movl    %ecx, 4(%r10)
 	movl    %edx, 8(%r10)
 	movl    %ebx, 12(%r10)
-	movl    %ebp, 20(%r10)
+	movl    %r12d, 20(%r10)
 	movl    %esi, 24(%r10)
 	movl    %edi, 28(%r10)
-	popq %rbp
+	popq %r12
 	popq %rbx
 	ret
 3:
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 767be7c..12e3771 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -1009,7 +1009,7 @@
 1: fxstor | RDGSBASE Ry (F3),(11B)
 2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
-4: XSAVE
+4: XSAVE | ptwrite Ey (F3),(11B)
 5: XRSTOR | lfence (11B)
 6: XSAVEOPT | clwb (66) | mfence (11B)
 7: clflush | clflushopt (66) | sfence (11B)
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 5e044d5..a179254 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -27,7 +27,7 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
 	seg >>= 3;
 	mutex_lock(&current->mm->context.lock);
-	if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+	if (current->mm->context.ldt && seg < current->mm->context.ldt->nr_entries)
 		ret = current->mm->context.ldt->entries[seg];
 	mutex_unlock(&current->mm->context.lock);
 #endif
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 96d2b84..0fbdcb6 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -2,7 +2,7 @@
 KCOV_INSTRUMENT_tlb.o	:= n
 
 obj-y	:=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-	    pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o
+	    pat.o pgtable.o physaddr.o setup_nx.o tlb.o
 
 # Make sure __phys_addr has no stackprotector
 nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index bce6990..0470826 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -431,7 +431,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
 				       bool checkwx)
 {
 #ifdef CONFIG_X86_64
-	pgd_t *start = (pgd_t *) &init_level4_pgt;
+	pgd_t *start = (pgd_t *) &init_top_pgt;
 #else
 	pgd_t *start = swapper_pg_dir;
 #endif
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 35ea061..0ea8afc 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
 	if (fixup_exception(regs, trapnr))
 		return;
 
+	if (fixup_bug(regs, trapnr))
+		return;
+
 fail:
 	early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
 		     (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 8ad91a0..2a1fa10c 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -346,7 +346,7 @@ static noinline int vmalloc_fault(unsigned long address)
 	 * Do _not_ use "current" here. We might be inside
 	 * an interrupt in the middle of a task switch..
 	 */
-	pgd_paddr = read_cr3();
+	pgd_paddr = read_cr3_pa();
 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 	if (!pmd_k)
 		return -1;
@@ -388,7 +388,7 @@ static bool low_pfn(unsigned long pfn)
 
 static void dump_pagetable(unsigned long address)
 {
-	pgd_t *base = __va(read_cr3());
+	pgd_t *base = __va(read_cr3_pa());
 	pgd_t *pgd = &base[pgd_index(address)];
 	p4d_t *p4d;
 	pud_t *pud;
@@ -451,7 +451,7 @@ static noinline int vmalloc_fault(unsigned long address)
 	 * happen within a race in page table update. In the later
 	 * case just flush:
 	 */
-	pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address);
+	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
 	pgd_ref = pgd_offset_k(address);
 	if (pgd_none(*pgd_ref))
 		return -1;
@@ -555,7 +555,7 @@ static int bad_address(void *p)
 
 static void dump_pagetable(unsigned long address)
 {
-	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
+	pgd_t *base = __va(read_cr3_pa());
 	pgd_t *pgd = base + pgd_index(address);
 	p4d_t *p4d;
 	pud_t *pud;
@@ -700,7 +700,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
 		pgd_t *pgd;
 		pte_t *pte;
 
-		pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
+		pgd = __va(read_cr3_pa());
 		pgd += pgd_index(address);
 
 		pte = lookup_address_in_pgd(pgd, address, &level);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
deleted file mode 100644
index 456dfdf..0000000
--- a/arch/x86/mm/gup.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * Lockless get_user_pages_fast for x86
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmstat.h>
-#include <linux/highmem.h>
-#include <linux/swap.h>
-#include <linux/memremap.h>
-
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-
-static inline pte_t gup_get_pte(pte_t *ptep)
-{
-#ifndef CONFIG_X86_PAE
-	return READ_ONCE(*ptep);
-#else
-	/*
-	 * With get_user_pages_fast, we walk down the pagetables without taking
-	 * any locks.  For this we would like to load the pointers atomically,
-	 * but that is not possible (without expensive cmpxchg8b) on PAE.  What
-	 * we do have is the guarantee that a pte will only either go from not
-	 * present to present, or present to not present or both -- it will not
-	 * switch to a completely different present page without a TLB flush in
-	 * between; something that we are blocking by holding interrupts off.
-	 *
-	 * Setting ptes from not present to present goes:
-	 * ptep->pte_high = h;
-	 * smp_wmb();
-	 * ptep->pte_low = l;
-	 *
-	 * And present to not present goes:
-	 * ptep->pte_low = 0;
-	 * smp_wmb();
-	 * ptep->pte_high = 0;
-	 *
-	 * We must ensure here that the load of pte_low sees l iff pte_high
-	 * sees h. We load pte_high *after* loading pte_low, which ensures we
-	 * don't see an older value of pte_high.  *Then* we recheck pte_low,
-	 * which ensures that we haven't picked up a changed pte high. We might
-	 * have got rubbish values from pte_low and pte_high, but we are
-	 * guaranteed that pte_low will not have the present bit set *unless*
-	 * it is 'l'. And get_user_pages_fast only operates on present ptes, so
-	 * we're safe.
-	 *
-	 * gup_get_pte should not be used or copied outside gup.c without being
-	 * very careful -- it does not atomically load the pte or anything that
-	 * is likely to be useful for you.
-	 */
-	pte_t pte;
-
-retry:
-	pte.pte_low = ptep->pte_low;
-	smp_rmb();
-	pte.pte_high = ptep->pte_high;
-	smp_rmb();
-	if (unlikely(pte.pte_low != ptep->pte_low))
-		goto retry;
-
-	return pte;
-#endif
-}
-
-static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
-{
-	while ((*nr) - nr_start) {
-		struct page *page = pages[--(*nr)];
-
-		ClearPageReferenced(page);
-		put_page(page);
-	}
-}
-
-/*
- * 'pteval' can come from a pte, pmd, pud or p4d.  We only check
- * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
- * same value on all 4 types.
- */
-static inline int pte_allows_gup(unsigned long pteval, int write)
-{
-	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
-
-	if (write)
-		need_pte_bits |= _PAGE_RW;
-
-	if ((pteval & need_pte_bits) != need_pte_bits)
-		return 0;
-
-	/* Check memory protection keys permissions. */
-	if (!__pkru_allows_pkey(pte_flags_pkey(pteval), write))
-		return 0;
-
-	return 1;
-}
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
-		unsigned long end, int write, struct page **pages, int *nr)
-{
-	struct dev_pagemap *pgmap = NULL;
-	int nr_start = *nr, ret = 0;
-	pte_t *ptep, *ptem;
-
-	/*
-	 * Keep the original mapped PTE value (ptem) around since we
-	 * might increment ptep off the end of the page when finishing
-	 * our loop iteration.
-	 */
-	ptem = ptep = pte_offset_map(&pmd, addr);
-	do {
-		pte_t pte = gup_get_pte(ptep);
-		struct page *page;
-
-		/* Similar to the PMD case, NUMA hinting must take slow path */
-		if (pte_protnone(pte))
-			break;
-
-		if (!pte_allows_gup(pte_val(pte), write))
-			break;
-
-		if (pte_devmap(pte)) {
-			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
-			if (unlikely(!pgmap)) {
-				undo_dev_pagemap(nr, nr_start, pages);
-				break;
-			}
-		} else if (pte_special(pte))
-			break;
-
-		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-		page = pte_page(pte);
-		get_page(page);
-		put_dev_pagemap(pgmap);
-		SetPageReferenced(page);
-		pages[*nr] = page;
-		(*nr)++;
-
-	} while (ptep++, addr += PAGE_SIZE, addr != end);
-	if (addr == end)
-		ret = 1;
-	pte_unmap(ptem);
-
-	return ret;
-}
-
-static inline void get_head_page_multiple(struct page *page, int nr)
-{
-	VM_BUG_ON_PAGE(page != compound_head(page), page);
-	VM_BUG_ON_PAGE(page_count(page) == 0, page);
-	page_ref_add(page, nr);
-	SetPageReferenced(page);
-}
-
-static int __gup_device_huge(unsigned long pfn, unsigned long addr,
-		unsigned long end, struct page **pages, int *nr)
-{
-	int nr_start = *nr;
-	struct dev_pagemap *pgmap = NULL;
-
-	do {
-		struct page *page = pfn_to_page(pfn);
-
-		pgmap = get_dev_pagemap(pfn, pgmap);
-		if (unlikely(!pgmap)) {
-			undo_dev_pagemap(nr, nr_start, pages);
-			return 0;
-		}
-		SetPageReferenced(page);
-		pages[*nr] = page;
-		get_page(page);
-		put_dev_pagemap(pgmap);
-		(*nr)++;
-		pfn++;
-	} while (addr += PAGE_SIZE, addr != end);
-	return 1;
-}
-
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
-		unsigned long end, struct page **pages, int *nr)
-{
-	unsigned long fault_pfn;
-
-	fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
-	return __gup_device_huge(fault_pfn, addr, end, pages, nr);
-}
-
-static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
-		unsigned long end, struct page **pages, int *nr)
-{
-	unsigned long fault_pfn;
-
-	fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-	return __gup_device_huge(fault_pfn, addr, end, pages, nr);
-}
-
-static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
-		unsigned long end, int write, struct page **pages, int *nr)
-{
-	struct page *head, *page;
-	int refs;
-
-	if (!pte_allows_gup(pmd_val(pmd), write))
-		return 0;
-
-	VM_BUG_ON(!pfn_valid(pmd_pfn(pmd)));
-	if (pmd_devmap(pmd))
-		return __gup_device_huge_pmd(pmd, addr, end, pages, nr);
-
-	/* hugepages are never "special" */
-	VM_BUG_ON(pmd_flags(pmd) & _PAGE_SPECIAL);
-
-	refs = 0;
-	head = pmd_page(pmd);
-	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
-	do {
-		VM_BUG_ON_PAGE(compound_head(page) != head, page);
-		pages[*nr] = page;
-		(*nr)++;
-		page++;
-		refs++;
-	} while (addr += PAGE_SIZE, addr != end);
-	get_head_page_multiple(head, refs);
-
-	return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
-		int write, struct page **pages, int *nr)
-{
-	unsigned long next;
-	pmd_t *pmdp;
-
-	pmdp = pmd_offset(&pud, addr);
-	do {
-		pmd_t pmd = *pmdp;
-
-		next = pmd_addr_end(addr, end);
-		if (pmd_none(pmd))
-			return 0;
-		if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
-			/*
-			 * NUMA hinting faults need to be handled in the GUP
-			 * slowpath for accounting purposes and so that they
-			 * can be serialised against THP migration.
-			 */
-			if (pmd_protnone(pmd))
-				return 0;
-			if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
-				return 0;
-		} else {
-			if (!gup_pte_range(pmd, addr, next, write, pages, nr))
-				return 0;
-		}
-	} while (pmdp++, addr = next, addr != end);
-
-	return 1;
-}
-
-static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
-		unsigned long end, int write, struct page **pages, int *nr)
-{
-	struct page *head, *page;
-	int refs;
-
-	if (!pte_allows_gup(pud_val(pud), write))
-		return 0;
-
-	VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
-	if (pud_devmap(pud))
-		return __gup_device_huge_pud(pud, addr, end, pages, nr);
-
-	/* hugepages are never "special" */
-	VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);
-
-	refs = 0;
-	head = pud_page(pud);
-	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-	do {
-		VM_BUG_ON_PAGE(compound_head(page) != head, page);
-		pages[*nr] = page;
-		(*nr)++;
-		page++;
-		refs++;
-	} while (addr += PAGE_SIZE, addr != end);
-	get_head_page_multiple(head, refs);
-
-	return 1;
-}
-
-static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
-			int write, struct page **pages, int *nr)
-{
-	unsigned long next;
-	pud_t *pudp;
-
-	pudp = pud_offset(&p4d, addr);
-	do {
-		pud_t pud = *pudp;
-
-		next = pud_addr_end(addr, end);
-		if (pud_none(pud))
-			return 0;
-		if (unlikely(pud_large(pud))) {
-			if (!gup_huge_pud(pud, addr, next, write, pages, nr))
-				return 0;
-		} else {
-			if (!gup_pmd_range(pud, addr, next, write, pages, nr))
-				return 0;
-		}
-	} while (pudp++, addr = next, addr != end);
-
-	return 1;
-}
-
-static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
-			int write, struct page **pages, int *nr)
-{
-	unsigned long next;
-	p4d_t *p4dp;
-
-	p4dp = p4d_offset(&pgd, addr);
-	do {
-		p4d_t p4d = *p4dp;
-
-		next = p4d_addr_end(addr, end);
-		if (p4d_none(p4d))
-			return 0;
-		BUILD_BUG_ON(p4d_large(p4d));
-		if (!gup_pud_range(p4d, addr, next, write, pages, nr))
-			return 0;
-	} while (p4dp++, addr = next, addr != end);
-
-	return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
-			  struct page **pages)
-{
-	struct mm_struct *mm = current->mm;
-	unsigned long addr, len, end;
-	unsigned long next;
-	unsigned long flags;
-	pgd_t *pgdp;
-	int nr = 0;
-
-	start &= PAGE_MASK;
-	addr = start;
-	len = (unsigned long) nr_pages << PAGE_SHIFT;
-	end = start + len;
-	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
-					(void __user *)start, len)))
-		return 0;
-
-	/*
-	 * XXX: batch / limit 'nr', to avoid large irq off latency
-	 * needs some instrumenting to determine the common sizes used by
-	 * important workloads (eg. DB2), and whether limiting the batch size
-	 * will decrease performance.
-	 *
-	 * It seems like we're in the clear for the moment. Direct-IO is
-	 * the main guy that batches up lots of get_user_pages, and even
-	 * they are limited to 64-at-a-time which is not so many.
-	 */
-	/*
-	 * This doesn't prevent pagetable teardown, but does prevent
-	 * the pagetables and pages from being freed on x86.
-	 *
-	 * So long as we atomically load page table pointers versus teardown
-	 * (which we do on x86, with the above PAE exception), we can follow the
-	 * address down to the the page and take a ref on it.
-	 */
-	local_irq_save(flags);
-	pgdp = pgd_offset(mm, addr);
-	do {
-		pgd_t pgd = *pgdp;
-
-		next = pgd_addr_end(addr, end);
-		if (pgd_none(pgd))
-			break;
-		if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
-			break;
-	} while (pgdp++, addr = next, addr != end);
-	local_irq_restore(flags);
-
-	return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start:	starting user address
- * @nr_pages:	number of pages from start to pin
- * @write:	whether pages will be written to
- * @pages:	array that receives pointers to the pages pinned.
- * 		Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
-			struct page **pages)
-{
-	struct mm_struct *mm = current->mm;
-	unsigned long addr, len, end;
-	unsigned long next;
-	pgd_t *pgdp;
-	int nr = 0;
-
-	start &= PAGE_MASK;
-	addr = start;
-	len = (unsigned long) nr_pages << PAGE_SHIFT;
-
-	end = start + len;
-	if (end < start)
-		goto slow_irqon;
-
-#ifdef CONFIG_X86_64
-	if (end >> __VIRTUAL_MASK_SHIFT)
-		goto slow_irqon;
-#endif
-
-	/*
-	 * XXX: batch / limit 'nr', to avoid large irq off latency
-	 * needs some instrumenting to determine the common sizes used by
-	 * important workloads (eg. DB2), and whether limiting the batch size
-	 * will decrease performance.
-	 *
-	 * It seems like we're in the clear for the moment. Direct-IO is
-	 * the main guy that batches up lots of get_user_pages, and even
-	 * they are limited to 64-at-a-time which is not so many.
-	 */
-	/*
-	 * This doesn't prevent pagetable teardown, but does prevent
-	 * the pagetables and pages from being freed on x86.
-	 *
-	 * So long as we atomically load page table pointers versus teardown
-	 * (which we do on x86, with the above PAE exception), we can follow the
-	 * address down to the the page and take a ref on it.
-	 */
-	local_irq_disable();
-	pgdp = pgd_offset(mm, addr);
-	do {
-		pgd_t pgd = *pgdp;
-
-		next = pgd_addr_end(addr, end);
-		if (pgd_none(pgd))
-			goto slow;
-		if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
-			goto slow;
-	} while (pgdp++, addr = next, addr != end);
-	local_irq_enable();
-
-	VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
-	return nr;
-
-	{
-		int ret;
-
-slow:
-		local_irq_enable();
-slow_irqon:
-		/* Try to get the remaining pages with get_user_pages */
-		start += nr << PAGE_SHIFT;
-		pages += nr;
-
-		ret = get_user_pages_unlocked(start,
-					      (end - start) >> PAGE_SHIFT,
-					      pages, write ? FOLL_WRITE : 0);
-
-		/* Have to be a bit careful with return values */
-		if (nr > 0) {
-			if (ret < 0)
-				ret = nr;
-			else
-				ret += nr;
-		}
-
-		return ret;
-	}
-}
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 302f43f..adad702 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 	if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cbc87ea..673541e 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -161,16 +161,16 @@ static int page_size_mask;
 
 static void __init probe_page_size_mask(void)
 {
-#if !defined(CONFIG_KMEMCHECK)
 	/*
 	 * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
 	 * use small pages.
 	 * This will simplify cpa(), which otherwise needs to support splitting
 	 * large pages into small in interrupt context, etc.
 	 */
-	if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
+	if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
 		page_size_mask |= 1 << PG_LEVEL_2M;
-#endif
+	else
+		direct_gbpages = 0;
 
 	/* Enable PSE if available */
 	if (boot_cpu_has(X86_FEATURE_PSE))
@@ -811,10 +811,8 @@ void __init zone_sizes_init(void)
 }
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
-#ifdef CONFIG_SMP
-	.active_mm = &init_mm,
+	.loaded_mm = &init_mm,
 	.state = 0,
-#endif
 	.cr4 = ~0UL,	/* fail hard if we screw up cr4 shadow initialization */
 };
 EXPORT_SYMBOL_GPL(cpu_tlbstate);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 95651dc..dae6a5e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -92,6 +92,44 @@ __setup("noexec32=", nonx32_setup);
  * When memory was added make sure all the processes MM have
  * suitable PGD entries in the local PGD level page.
  */
+#ifdef CONFIG_X86_5LEVEL
+void sync_global_pgds(unsigned long start, unsigned long end)
+{
+	unsigned long addr;
+
+	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+		const pgd_t *pgd_ref = pgd_offset_k(addr);
+		struct page *page;
+
+		/* Check for overflow */
+		if (addr < start)
+			break;
+
+		if (pgd_none(*pgd_ref))
+			continue;
+
+		spin_lock(&pgd_lock);
+		list_for_each_entry(page, &pgd_list, lru) {
+			pgd_t *pgd;
+			spinlock_t *pgt_lock;
+
+			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
+			/* the pgt_lock only for Xen */
+			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+			spin_lock(pgt_lock);
+
+			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
+				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+
+			if (pgd_none(*pgd))
+				set_pgd(pgd, *pgd_ref);
+
+			spin_unlock(pgt_lock);
+		}
+		spin_unlock(&pgd_lock);
+	}
+}
+#else
 void sync_global_pgds(unsigned long start, unsigned long end)
 {
 	unsigned long addr;
@@ -135,6 +173,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
 		spin_unlock(&pgd_lock);
 	}
 }
+#endif
 
 /*
  * NOTE: This function is marked __ref because it calls __init function
@@ -585,6 +624,57 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
 	return paddr_last;
 }
 
+static unsigned long __meminit
+phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
+	      unsigned long page_size_mask)
+{
+	unsigned long paddr_next, paddr_last = paddr_end;
+	unsigned long vaddr = (unsigned long)__va(paddr);
+	int i = p4d_index(vaddr);
+
+	if (!IS_ENABLED(CONFIG_X86_5LEVEL))
+		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);
+
+	for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
+		p4d_t *p4d;
+		pud_t *pud;
+
+		vaddr = (unsigned long)__va(paddr);
+		p4d = p4d_page + p4d_index(vaddr);
+		paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
+
+		if (paddr >= paddr_end) {
+			if (!after_bootmem &&
+			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
+					     E820_TYPE_RAM) &&
+			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
+					     E820_TYPE_RESERVED_KERN))
+				set_p4d(p4d, __p4d(0));
+			continue;
+		}
+
+		if (!p4d_none(*p4d)) {
+			pud = pud_offset(p4d, 0);
+			paddr_last = phys_pud_init(pud, paddr,
+					paddr_end,
+					page_size_mask);
+			__flush_tlb_all();
+			continue;
+		}
+
+		pud = alloc_low_page();
+		paddr_last = phys_pud_init(pud, paddr, paddr_end,
+					   page_size_mask);
+
+		spin_lock(&init_mm.page_table_lock);
+		p4d_populate(&init_mm, p4d, pud);
+		spin_unlock(&init_mm.page_table_lock);
+	}
+	__flush_tlb_all();
+
+	return paddr_last;
+}
+
 /*
  * Create page table mapping for the physical memory for specific physical
  * addresses. The virtual and physical addresses have to be aligned on PMD level
@@ -606,26 +696,26 @@ kernel_physical_mapping_init(unsigned long paddr_start,
 	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
 		pgd_t *pgd = pgd_offset_k(vaddr);
 		p4d_t *p4d;
-		pud_t *pud;
 
 		vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
 
-		BUILD_BUG_ON(pgd_none(*pgd));
-		p4d = p4d_offset(pgd, vaddr);
-		if (p4d_val(*p4d)) {
-			pud = (pud_t *)p4d_page_vaddr(*p4d);
-			paddr_last = phys_pud_init(pud, __pa(vaddr),
+		if (pgd_val(*pgd)) {
+			p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+			paddr_last = phys_p4d_init(p4d, __pa(vaddr),
 						   __pa(vaddr_end),
 						   page_size_mask);
 			continue;
 		}
 
-		pud = alloc_low_page();
-		paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
+		p4d = alloc_low_page();
+		paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
 					   page_size_mask);
 
 		spin_lock(&init_mm.page_table_lock);
-		p4d_populate(&init_mm, p4d, pud);
+		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+			pgd_populate(&init_mm, pgd, p4d);
+		else
+			p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
 		spin_unlock(&init_mm.page_table_lock);
 		pgd_changed = true;
 	}
@@ -990,7 +1080,13 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
 
 		pud_base = pud_offset(p4d, 0);
 		remove_pud_table(pud_base, addr, next, direct);
-		free_pud_table(pud_base, p4d);
+		/*
+		 * For 4-level page tables we do not want to free PUDs, but in the
+		 * 5-level case we should free them. This code will have to change
+		 * to adapt for boot-time switching between 4 and 5 level page tables.
+		 */
+		if (CONFIG_PGTABLE_LEVELS == 5)
+			free_pud_table(pud_base, p4d);
 	}
 
 	if (direct)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index bbc558b..4c1b5fd 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -424,7 +424,7 @@ static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 {
 	/* Don't assume we're using swapper_pg_dir at this point */
-	pgd_t *base = __va(read_cr3());
+	pgd_t *base = __va(read_cr3_pa());
 	pgd_t *pgd = &base[pgd_index(addr)];
 	p4d_t *p4d = p4d_offset(pgd, addr);
 	pud_t *pud = pud_offset(p4d, addr);
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0c7d812..88215ac 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -12,7 +12,7 @@
 #include <asm/tlbflush.h>
 #include <asm/sections.h>
 
-extern pgd_t early_level4_pgt[PTRS_PER_PGD];
+extern pgd_t early_top_pgt[PTRS_PER_PGD];
 extern struct range pfn_mapped[E820_MAX_ENTRIES];
 
 static int __init map_range(struct range *range)
@@ -109,8 +109,8 @@ void __init kasan_early_init(void)
 	for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
 		kasan_zero_p4d[i] = __p4d(p4d_val);
 
-	kasan_map_early_shadow(early_level4_pgt);
-	kasan_map_early_shadow(init_level4_pgt);
+	kasan_map_early_shadow(early_top_pgt);
+	kasan_map_early_shadow(init_top_pgt);
 }
 
 void __init kasan_init(void)
@@ -121,8 +121,8 @@ void __init kasan_init(void)
 	register_die_notifier(&kasan_die_notifier);
 #endif
 
-	memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
-	load_cr3(early_level4_pgt);
+	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
+	load_cr3(early_top_pgt);
 	__flush_tlb_all();
 
 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -148,7 +148,7 @@ void __init kasan_init(void)
 	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
 			(void *)KASAN_SHADOW_END);
 
-	load_cr3(init_level4_pgt);
+	load_cr3(init_top_pgt);
 	__flush_tlb_all();
 
 	/*
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index aed2064..af59916 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -6,12 +6,12 @@
  *
  * Entropy is generated using the KASLR early boot functions now shared in
  * the lib directory (originally written by Kees Cook). Randomization is
- * done on PGD & PUD page table levels to increase possible addresses. The
- * physical memory mapping code was adapted to support PUD level virtual
- * addresses. This implementation on the best configuration provides 30,000
- * possible virtual addresses in average for each memory region. An additional
- * low memory page is used to ensure each CPU can start with a PGD aligned
- * virtual address (for realmode).
+ * done on PGD & P4D/PUD page table levels to increase possible addresses.
+ * The physical memory mapping code was adapted to support P4D/PUD level
+ * virtual addresses. This implementation on the best configuration provides
+ * 30,000 possible virtual addresses in average for each memory region.
+ * An additional low memory page is used to ensure each CPU can start with
+ * a PGD aligned virtual address (for realmode).
  *
  * The order of each memory region is not changed. The feature looks at
  * the available space for the regions based on different configuration
@@ -70,7 +70,7 @@ static __initdata struct kaslr_memory_region {
 	unsigned long *base;
 	unsigned long size_tb;
 } kaslr_regions[] = {
-	{ &page_offset_base, 64/* Maximum */ },
+	{ &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ },
 	{ &vmalloc_base, VMALLOC_SIZE_TB },
 	{ &vmemmap_base, 1 },
 };
@@ -142,7 +142,10 @@ void __init kernel_randomize_memory(void)
 		 */
 		entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
 		prandom_bytes_state(&rand_state, &rand, sizeof(rand));
-		entropy = (rand % (entropy + 1)) & PUD_MASK;
+		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+			entropy = (rand % (entropy + 1)) & P4D_MASK;
+		else
+			entropy = (rand % (entropy + 1)) & PUD_MASK;
 		vaddr += entropy;
 		*kaslr_regions[i].base = vaddr;
 
@@ -151,27 +154,21 @@ void __init kernel_randomize_memory(void)
 		 * randomization alignment.
 		 */
 		vaddr += get_padding(&kaslr_regions[i]);
-		vaddr = round_up(vaddr + 1, PUD_SIZE);
+		if (IS_ENABLED(CONFIG_X86_5LEVEL))
+			vaddr = round_up(vaddr + 1, P4D_SIZE);
+		else
+			vaddr = round_up(vaddr + 1, PUD_SIZE);
 		remain_entropy -= entropy;
 	}
 }
 
-/*
- * Create PGD aligned trampoline table to allow real mode initialization
- * of additional CPUs. Consume only 1 low memory page.
- */
-void __meminit init_trampoline(void)
+static void __meminit init_trampoline_pud(void)
 {
 	unsigned long paddr, paddr_next;
 	pgd_t *pgd;
 	pud_t *pud_page, *pud_page_tramp;
 	int i;
 
-	if (!kaslr_memory_enabled()) {
-		init_trampoline_default();
-		return;
-	}
-
 	pud_page_tramp = alloc_low_page();
 
 	paddr = 0;
@@ -192,3 +189,49 @@ void __meminit init_trampoline(void)
 	set_pgd(&trampoline_pgd_entry,
 		__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
 }
+
+static void __meminit init_trampoline_p4d(void)
+{
+	unsigned long paddr, paddr_next;
+	pgd_t *pgd;
+	p4d_t *p4d_page, *p4d_page_tramp;
+	int i;
+
+	p4d_page_tramp = alloc_low_page();
+
+	paddr = 0;
+	pgd = pgd_offset_k((unsigned long)__va(paddr));
+	p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
+
+	for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
+		p4d_t *p4d, *p4d_tramp;
+		unsigned long vaddr = (unsigned long)__va(paddr);
+
+		p4d_tramp = p4d_page_tramp + p4d_index(paddr);
+		p4d = p4d_page + p4d_index(vaddr);
+		paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
+
+		*p4d_tramp = *p4d;
+	}
+
+	set_pgd(&trampoline_pgd_entry,
+		__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+}
+
+/*
+ * Create PGD aligned trampoline table to allow real mode initialization
+ * of additional CPUs. Consume only 1 low memory page.
+ */
+void __meminit init_trampoline(void)
+{
+
+	if (!kaslr_memory_enabled()) {
+		init_trampoline_default();
+		return;
+	}
+
+	if (IS_ENABLED(CONFIG_X86_5LEVEL))
+		init_trampoline_p4d();
+	else
+		init_trampoline_pud();
+}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 19ad095..797295e 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -74,9 +74,6 @@ static int mmap_is_legacy(void)
 	if (current->personality & ADDR_COMPAT_LAYOUT)
 		return 1;
 
-	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
-		return 1;
-
 	return sysctl_legacy_va_layout;
 }
 
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 83a59a6..9b78685 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -65,11 +65,9 @@ static int __init nopat(char *str)
 }
 early_param("nopat", nopat);
 
-static bool __read_mostly __pat_initialized = false;
-
 bool pat_enabled(void)
 {
-	return __pat_initialized;
+	return !!__pat_enabled;
 }
 EXPORT_SYMBOL_GPL(pat_enabled);
 
@@ -227,14 +225,13 @@ static void pat_bsp_init(u64 pat)
 	}
 
 	wrmsrl(MSR_IA32_CR_PAT, pat);
-	__pat_initialized = true;
 
 	__init_cache_modes(pat);
 }
 
 static void pat_ap_init(u64 pat)
 {
-	if (!this_cpu_has(X86_FEATURE_PAT)) {
+	if (!boot_cpu_has(X86_FEATURE_PAT)) {
 		/*
 		 * If this happens we are on a secondary CPU, but switched to
 		 * PAT on the boot CPU. We have no way to undo PAT.
@@ -309,7 +306,7 @@ void pat_init(void)
 	u64 pat;
 	struct cpuinfo_x86 *c = &boot_cpu_data;
 
-	if (!__pat_enabled) {
+	if (!pat_enabled()) {
 		init_cache_modes();
 		return;
 	}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 6e7bedf..014d07a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -15,7 +15,7 @@
 #include <linux/debugfs.h>
 
 /*
- *	Smarter SMP flushing macros.
+ *	TLB flushing, formerly SMP-only
  *		c/o Linus Torvalds.
  *
  *	These mean you can really definitely utterly forget about
@@ -28,39 +28,28 @@
  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  */
 
-#ifdef CONFIG_SMP
-
-struct flush_tlb_info {
-	struct mm_struct *flush_mm;
-	unsigned long flush_start;
-	unsigned long flush_end;
-};
-
-/*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm->cpu_vm_mask.
- */
 void leave_mm(int cpu)
 {
-	struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
+	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+
+	/*
+	 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
+	 * If so, our callers still expect us to flush the TLB, but there
+	 * aren't any user TLB entries in init_mm to worry about.
+	 *
+	 * This needs to happen before any other sanity checks due to
+	 * intel_idle's shenanigans.
+	 */
+	if (loaded_mm == &init_mm)
+		return;
+
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
 		BUG();
-	if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
-		cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
-		load_cr3(swapper_pg_dir);
-		/*
-		 * This gets called in the idle path where RCU
-		 * functions differently.  Tracing normally
-		 * uses RCU, so we have to call the tracepoint
-		 * specially here.
-		 */
-		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
-	}
+
+	switch_mm(NULL, &init_mm, NULL);
 }
 EXPORT_SYMBOL_GPL(leave_mm);
 
-#endif /* CONFIG_SMP */
-
 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	       struct task_struct *tsk)
 {
@@ -75,216 +64,167 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 			struct task_struct *tsk)
 {
 	unsigned cpu = smp_processor_id();
+	struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
 
-	if (likely(prev != next)) {
-		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
-			/*
-			 * If our current stack is in vmalloc space and isn't
-			 * mapped in the new pgd, we'll double-fault.  Forcibly
-			 * map it.
-			 */
-			unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
+	/*
+	 * NB: The scheduler will call us with prev == next when
+	 * switching from lazy TLB mode to normal mode if active_mm
+	 * isn't changing.  When this happens, there is no guarantee
+	 * that CR3 (and hence cpu_tlbstate.loaded_mm) matches next.
+	 *
+	 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
+	 */
 
-			pgd_t *pgd = next->pgd + stack_pgd_index;
+	this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 
-			if (unlikely(pgd_none(*pgd)))
-				set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
-		}
-
-#ifdef CONFIG_SMP
-		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-		this_cpu_write(cpu_tlbstate.active_mm, next);
-#endif
-
-		cpumask_set_cpu(cpu, mm_cpumask(next));
-
+	if (real_prev == next) {
 		/*
-		 * Re-load page tables.
-		 *
-		 * This logic has an ordering constraint:
-		 *
-		 *  CPU 0: Write to a PTE for 'next'
-		 *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
-		 *  CPU 1: set bit 1 in next's mm_cpumask
-		 *  CPU 1: load from the PTE that CPU 0 writes (implicit)
-		 *
-		 * We need to prevent an outcome in which CPU 1 observes
-		 * the new PTE value and CPU 0 observes bit 1 clear in
-		 * mm_cpumask.  (If that occurs, then the IPI will never
-		 * be sent, and CPU 0's TLB will contain a stale entry.)
-		 *
-		 * The bad outcome can occur if either CPU's load is
-		 * reordered before that CPU's store, so both CPUs must
-		 * execute full barriers to prevent this from happening.
-		 *
-		 * Thus, switch_mm needs a full barrier between the
-		 * store to mm_cpumask and any operation that could load
-		 * from next->pgd.  TLB fills are special and can happen
-		 * due to instruction fetches or for no reason at all,
-		 * and neither LOCK nor MFENCE orders them.
-		 * Fortunately, load_cr3() is serializing and gives the
-		 * ordering guarantee we need.
-		 *
+		 * There's nothing to do: we always keep the per-mm control
+		 * regs in sync with cpu_tlbstate.loaded_mm.  Just
+		 * sanity-check mm_cpumask.
 		 */
-		load_cr3(next->pgd);
-
-		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
-
-		/* Stop flush ipis for the previous mm */
-		cpumask_clear_cpu(cpu, mm_cpumask(prev));
-
-		/* Load per-mm CR4 state */
-		load_mm_cr4(next);
-
-#ifdef CONFIG_MODIFY_LDT_SYSCALL
-		/*
-		 * Load the LDT, if the LDT is different.
-		 *
-		 * It's possible that prev->context.ldt doesn't match
-		 * the LDT register.  This can happen if leave_mm(prev)
-		 * was called and then modify_ldt changed
-		 * prev->context.ldt but suppressed an IPI to this CPU.
-		 * In this case, prev->context.ldt != NULL, because we
-		 * never set context.ldt to NULL while the mm still
-		 * exists.  That means that next->context.ldt !=
-		 * prev->context.ldt, because mms never share an LDT.
-		 */
-		if (unlikely(prev->context.ldt != next->context.ldt))
-			load_mm_ldt(next);
-#endif
-	}
-#ifdef CONFIG_SMP
-	  else {
-		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-		BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
-
-		if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
-			/*
-			 * On established mms, the mm_cpumask is only changed
-			 * from irq context, from ptep_clear_flush() while in
-			 * lazy tlb mode, and here. Irqs are blocked during
-			 * schedule, protecting us from simultaneous changes.
-			 */
+		if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(next))))
 			cpumask_set_cpu(cpu, mm_cpumask(next));
-
-			/*
-			 * We were in lazy tlb mode and leave_mm disabled
-			 * tlb flush IPI delivery. We must reload CR3
-			 * to make sure to use no freed page tables.
-			 *
-			 * As above, load_cr3() is serializing and orders TLB
-			 * fills with respect to the mm_cpumask write.
-			 */
-			load_cr3(next->pgd);
-			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
-			load_mm_cr4(next);
-			load_mm_ldt(next);
-		}
+		return;
 	}
-#endif
+
+	if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+		/*
+		 * If our current stack is in vmalloc space and isn't
+		 * mapped in the new pgd, we'll double-fault.  Forcibly
+		 * map it.
+		 */
+		unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
+
+		pgd_t *pgd = next->pgd + stack_pgd_index;
+
+		if (unlikely(pgd_none(*pgd)))
+			set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
+	}
+
+	this_cpu_write(cpu_tlbstate.loaded_mm, next);
+
+	WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
+	cpumask_set_cpu(cpu, mm_cpumask(next));
+
+	/*
+	 * Re-load page tables.
+	 *
+	 * This logic has an ordering constraint:
+	 *
+	 *  CPU 0: Write to a PTE for 'next'
+	 *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
+	 *  CPU 1: set bit 1 in next's mm_cpumask
+	 *  CPU 1: load from the PTE that CPU 0 writes (implicit)
+	 *
+	 * We need to prevent an outcome in which CPU 1 observes
+	 * the new PTE value and CPU 0 observes bit 1 clear in
+	 * mm_cpumask.  (If that occurs, then the IPI will never
+	 * be sent, and CPU 0's TLB will contain a stale entry.)
+	 *
+	 * The bad outcome can occur if either CPU's load is
+	 * reordered before that CPU's store, so both CPUs must
+	 * execute full barriers to prevent this from happening.
+	 *
+	 * Thus, switch_mm needs a full barrier between the
+	 * store to mm_cpumask and any operation that could load
+	 * from next->pgd.  TLB fills are special and can happen
+	 * due to instruction fetches or for no reason at all,
+	 * and neither LOCK nor MFENCE orders them.
+	 * Fortunately, load_cr3() is serializing and gives the
+	 * ordering guarantee we need.
+	 */
+	load_cr3(next->pgd);
+
+	/*
+	 * This gets called via leave_mm() in the idle path where RCU
+	 * functions differently.  Tracing normally uses RCU, so we have to
+	 * call the tracepoint specially here.
+	 */
+	trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+
+	/* Stop flush ipis for the previous mm */
+	WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
+		     real_prev != &init_mm);
+	cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+
+	/* Load per-mm CR4 and LDTR state */
+	load_mm_cr4(next);
+	switch_ldt(real_prev, next);
 }
 
-#ifdef CONFIG_SMP
-
-/*
- * The flush IPI assumes that a thread switch happens in this order:
- * [cpu0: the cpu that switches]
- * 1) switch_mm() either 1a) or 1b)
- * 1a) thread switch to a different mm
- * 1a1) set cpu_tlbstate to TLBSTATE_OK
- *	Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
- *	if cpu0 was in lazy tlb mode.
- * 1a2) update cpu active_mm
- *	Now cpu0 accepts tlb flushes for the new mm.
- * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
- *	Now the other cpus will send tlb flush ipis.
- * 1a4) change cr3.
- * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
- *	Stop ipi delivery for the old mm. This is not synchronized with
- *	the other cpus, but flush_tlb_func ignore flush ipis for the wrong
- *	mm, and in the worst case we perform a superfluous tlb flush.
- * 1b) thread switch without mm change
- *	cpu active_mm is correct, cpu0 already handles flush ipis.
- * 1b1) set cpu_tlbstate to TLBSTATE_OK
- * 1b2) test_and_set the cpu bit in cpu_vm_mask.
- *	Atomically set the bit [other cpus will start sending flush ipis],
- *	and test the bit.
- * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
- * 2) switch %%esp, ie current
- *
- * The interrupt must handle 2 special cases:
- * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
- * - the cpu performs speculative tlb reads, i.e. even if the cpu only
- *   runs in kernel space, the cpu could load tlb entries for user space
- *   pages.
- *
- * The good news is that cpu_tlbstate is local to each cpu, no
- * write/read ordering problems.
- */
-
-/*
- * TLB flush funcation:
- * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
- * 2) Leave the mm if we are in the lazy tlb mode.
- */
-static void flush_tlb_func(void *info)
+static void flush_tlb_func_common(const struct flush_tlb_info *f,
+				  bool local, enum tlb_flush_reason reason)
 {
-	struct flush_tlb_info *f = info;
+	/* This code cannot presently handle being reentered. */
+	VM_WARN_ON(!irqs_disabled());
+
+	if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
+		leave_mm(smp_processor_id());
+		return;
+	}
+
+	if (f->end == TLB_FLUSH_ALL) {
+		local_flush_tlb();
+		if (local)
+			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+		trace_tlb_flush(reason, TLB_FLUSH_ALL);
+	} else {
+		unsigned long addr;
+		unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
+		addr = f->start;
+		while (addr < f->end) {
+			__flush_tlb_single(addr);
+			addr += PAGE_SIZE;
+		}
+		if (local)
+			count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
+		trace_tlb_flush(reason, nr_pages);
+	}
+}
+
+static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
+{
+	const struct flush_tlb_info *f = info;
+
+	flush_tlb_func_common(f, true, reason);
+}
+
+static void flush_tlb_func_remote(void *info)
+{
+	const struct flush_tlb_info *f = info;
 
 	inc_irq_stat(irq_tlb_count);
 
-	if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+	if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
 		return;
 
 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
-	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
-		if (f->flush_end == TLB_FLUSH_ALL) {
-			local_flush_tlb();
-			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
-		} else {
-			unsigned long addr;
-			unsigned long nr_pages =
-				(f->flush_end - f->flush_start) / PAGE_SIZE;
-			addr = f->flush_start;
-			while (addr < f->flush_end) {
-				__flush_tlb_single(addr);
-				addr += PAGE_SIZE;
-			}
-			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
-		}
-	} else
-		leave_mm(smp_processor_id());
-
+	flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
 }
 
 void native_flush_tlb_others(const struct cpumask *cpumask,
-				 struct mm_struct *mm, unsigned long start,
-				 unsigned long end)
+			     const struct flush_tlb_info *info)
 {
-	struct flush_tlb_info info;
-
-	info.flush_mm = mm;
-	info.flush_start = start;
-	info.flush_end = end;
-
 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
-	if (end == TLB_FLUSH_ALL)
+	if (info->end == TLB_FLUSH_ALL)
 		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
 	else
 		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
-				(end - start) >> PAGE_SHIFT);
+				(info->end - info->start) >> PAGE_SHIFT);
 
 	if (is_uv_system()) {
 		unsigned int cpu;
 
 		cpu = smp_processor_id();
-		cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
+		cpumask = uv_flush_tlb_others(cpumask, info);
 		if (cpumask)
-			smp_call_function_many(cpumask, flush_tlb_func,
-								&info, 1);
+			smp_call_function_many(cpumask, flush_tlb_func_remote,
+					       (void *)info, 1);
 		return;
 	}
-	smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
+	smp_call_function_many(cpumask, flush_tlb_func_remote,
+			       (void *)info, 1);
 }
 
 /*
@@ -302,84 +242,40 @@ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 				unsigned long end, unsigned long vmflag)
 {
-	unsigned long addr;
-	/* do a global flush by default */
-	unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
+	int cpu;
 
-	preempt_disable();
+	struct flush_tlb_info info = {
+		.mm = mm,
+	};
 
-	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
-		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
-	if (base_pages_to_flush > tlb_single_page_flush_ceiling)
-		base_pages_to_flush = TLB_FLUSH_ALL;
+	cpu = get_cpu();
 
-	if (current->active_mm != mm) {
-		/* Synchronize with switch_mm. */
-		smp_mb();
+	/* Synchronize with switch_mm. */
+	smp_mb();
 
-		goto out;
-	}
-
-	if (!current->mm) {
-		leave_mm(smp_processor_id());
-
-		/* Synchronize with switch_mm. */
-		smp_mb();
-
-		goto out;
-	}
-
-	/*
-	 * Both branches below are implicit full barriers (MOV to CR or
-	 * INVLPG) that synchronize with switch_mm.
-	 */
-	if (base_pages_to_flush == TLB_FLUSH_ALL) {
-		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-		local_flush_tlb();
+	/* Should we flush just the requested range? */
+	if ((end != TLB_FLUSH_ALL) &&
+	    !(vmflag & VM_HUGETLB) &&
+	    ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
+		info.start = start;
+		info.end = end;
 	} else {
-		/* flush range by one by one 'invlpg' */
-		for (addr = start; addr < end;	addr += PAGE_SIZE) {
-			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
-			__flush_tlb_single(addr);
-		}
+		info.start = 0UL;
+		info.end = TLB_FLUSH_ALL;
 	}
-	trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
-out:
-	if (base_pages_to_flush == TLB_FLUSH_ALL) {
-		start = 0UL;
-		end = TLB_FLUSH_ALL;
+
+	if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
+		VM_WARN_ON(irqs_disabled());
+		local_irq_disable();
+		flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
+		local_irq_enable();
 	}
-	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, start, end);
-	preempt_enable();
+
+	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
+		flush_tlb_others(mm_cpumask(mm), &info);
+	put_cpu();
 }
 
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
-{
-	struct mm_struct *mm = vma->vm_mm;
-
-	preempt_disable();
-
-	if (current->active_mm == mm) {
-		if (current->mm) {
-			/*
-			 * Implicit full barrier (INVLPG) that synchronizes
-			 * with switch_mm.
-			 */
-			__flush_tlb_one(start);
-		} else {
-			leave_mm(smp_processor_id());
-
-			/* Synchronize with switch_mm. */
-			smp_mb();
-		}
-	}
-
-	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
-		flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
-
-	preempt_enable();
-}
 
 static void do_flush_tlb_all(void *info)
 {
@@ -401,7 +297,7 @@ static void do_kernel_range_flush(void *info)
 	unsigned long addr;
 
 	/* flush range by one by one 'invlpg' */
-	for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
+	for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
 		__flush_tlb_single(addr);
 }
 
@@ -410,16 +306,40 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 
 	/* Balance as user space task's flush, a bit conservative */
 	if (end == TLB_FLUSH_ALL ||
-	    (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
+	    (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
 		on_each_cpu(do_flush_tlb_all, NULL, 1);
 	} else {
 		struct flush_tlb_info info;
-		info.flush_start = start;
-		info.flush_end = end;
+		info.start = start;
+		info.end = end;
 		on_each_cpu(do_kernel_range_flush, &info, 1);
 	}
 }
 
+void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+{
+	struct flush_tlb_info info = {
+		.mm = NULL,
+		.start = 0UL,
+		.end = TLB_FLUSH_ALL,
+	};
+
+	int cpu = get_cpu();
+
+	if (cpumask_test_cpu(cpu, &batch->cpumask)) {
+		VM_WARN_ON(irqs_disabled());
+		local_irq_disable();
+		flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
+		local_irq_enable();
+	}
+
+	if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
+		flush_tlb_others(&batch->cpumask, &info);
+	cpumask_clear(&batch->cpumask);
+
+	put_cpu();
+}
+
 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
 			     size_t count, loff_t *ppos)
 {
@@ -465,5 +385,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
 	return 0;
 }
 late_initcall(create_tlb_single_page_flush_ceiling);
-
-#endif /* CONFIG_SMP */
diff --git a/arch/x86/net/Makefile b/arch/x86/net/Makefile
index 90568c3..fefb4b6 100644
--- a/arch/x86/net/Makefile
+++ b/arch/x86/net/Makefile
@@ -1,4 +1,6 @@
 #
 # Arch-specific network modules
 #
+OBJECT_FILES_NON_STANDARD_bpf_jit.o += y
+
 obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c
index b914e20..3353b76d 100644
--- a/arch/x86/pci/ce4100.c
+++ b/arch/x86/pci/ce4100.c
@@ -65,6 +65,9 @@ struct sim_reg_op {
 { PCI_DEVFN(device, func), offset, init_op, read_op, write_op,\
 	{0, SIZE_TO_MASK(size)} },
 
+/*
+ * All read/write functions are called with pci_config_lock held.
+ */
 static void reg_init(struct sim_dev_reg *reg)
 {
 	pci_direct_conf1.read(0, 1, reg->dev_func, reg->reg, 4,
@@ -73,21 +76,13 @@ static void reg_init(struct sim_dev_reg *reg)
 
 static void reg_read(struct sim_dev_reg *reg, u32 *value)
 {
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&pci_config_lock, flags);
 	*value = reg->sim_reg.value;
-	raw_spin_unlock_irqrestore(&pci_config_lock, flags);
 }
 
 static void reg_write(struct sim_dev_reg *reg, u32 value)
 {
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&pci_config_lock, flags);
 	reg->sim_reg.value = (value & reg->sim_reg.mask) |
 		(reg->sim_reg.value & ~reg->sim_reg.mask);
-	raw_spin_unlock_irqrestore(&pci_config_lock, flags);
 }
 
 static void sata_reg_init(struct sim_dev_reg *reg)
@@ -117,12 +112,8 @@ static void sata_revid_read(struct sim_dev_reg *reg, u32 *value)
 
 static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value)
 {
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&pci_config_lock, flags);
 	/* force interrupt pin value to 0 */
 	*value = reg->sim_reg.value & 0xfff00ff;
-	raw_spin_unlock_irqrestore(&pci_config_lock, flags);
 }
 
 static struct sim_dev_reg bus1_fixups[] = {
@@ -265,24 +256,33 @@ int bridge_read(unsigned int devfn, int reg, int len, u32 *value)
 	return retval;
 }
 
+static int ce4100_bus1_read(unsigned int devfn, int reg, int len, u32 *value)
+{
+	unsigned long flags;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
+		if (bus1_fixups[i].dev_func == devfn &&
+		    bus1_fixups[i].reg == (reg & ~3) &&
+		    bus1_fixups[i].read) {
+
+			raw_spin_lock_irqsave(&pci_config_lock, flags);
+			bus1_fixups[i].read(&(bus1_fixups[i]), value);
+			raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+			extract_bytes(value, reg, len);
+			return 0;
+		}
+	}
+	return -1;
+}
+
 static int ce4100_conf_read(unsigned int seg, unsigned int bus,
 			    unsigned int devfn, int reg, int len, u32 *value)
 {
-	int i;
-
 	WARN_ON(seg);
-	if (bus == 1) {
-		for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
-			if (bus1_fixups[i].dev_func == devfn &&
-			    bus1_fixups[i].reg == (reg & ~3) &&
-			    bus1_fixups[i].read) {
-				bus1_fixups[i].read(&(bus1_fixups[i]),
-						    value);
-				extract_bytes(value, reg, len);
-				return 0;
-			}
-		}
-	}
+
+	if (bus == 1 && !ce4100_bus1_read(devfn, reg, len, value))
+		return 0;
 
 	if (bus == 0 && (PCI_DEVFN(1, 0) == devfn) &&
 	    !bridge_read(devfn, reg, len, value))
@@ -291,23 +291,32 @@ static int ce4100_conf_read(unsigned int seg, unsigned int bus,
 	return pci_direct_conf1.read(seg, bus, devfn, reg, len, value);
 }
 
+static int ce4100_bus1_write(unsigned int devfn, int reg, int len, u32 value)
+{
+	unsigned long flags;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
+		if (bus1_fixups[i].dev_func == devfn &&
+		    bus1_fixups[i].reg == (reg & ~3) &&
+		    bus1_fixups[i].write) {
+
+			raw_spin_lock_irqsave(&pci_config_lock, flags);
+			bus1_fixups[i].write(&(bus1_fixups[i]), value);
+			raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+			return 0;
+		}
+	}
+	return -1;
+}
+
 static int ce4100_conf_write(unsigned int seg, unsigned int bus,
 			     unsigned int devfn, int reg, int len, u32 value)
 {
-	int i;
-
 	WARN_ON(seg);
-	if (bus == 1) {
-		for (i = 0; i < ARRAY_SIZE(bus1_fixups); i++) {
-			if (bus1_fixups[i].dev_func == devfn &&
-			    bus1_fixups[i].reg == (reg & ~3) &&
-			    bus1_fixups[i].write) {
-				bus1_fixups[i].write(&(bus1_fixups[i]),
-						     value);
-				return 0;
-			}
-		}
-	}
+
+	if (bus == 1 && !ce4100_bus1_write(devfn, reg, len, value))
+		return 0;
 
 	/* Discard writes to A/V bridge BAR. */
 	if (bus == 0 && PCI_DEVFN(1, 0) == devfn &&
@@ -318,8 +327,8 @@ static int ce4100_conf_write(unsigned int seg, unsigned int bus,
 }
 
 static const struct pci_raw_ops ce4100_pci_conf = {
-	.read =	ce4100_conf_read,
-	.write = ce4100_conf_write,
+	.read	= ce4100_conf_read,
+	.write	= ce4100_conf_write,
 };
 
 int __init ce4100_pci_init(void)
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 190e718..cfd1a89 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -75,8 +75,8 @@ struct pci_ops pci_root_ops = {
 };
 
 /*
- * This interrupt-safe spinlock protects all accesses to PCI
- * configuration space.
+ * This interrupt-safe spinlock protects all accesses to PCI configuration
+ * space, except for the mmconfig (ECAM) based operations.
  */
 DEFINE_RAW_SPINLOCK(pci_config_lock);
 
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c
index ea6f380..1cb01ab 100644
--- a/arch/x86/pci/legacy.c
+++ b/arch/x86/pci/legacy.c
@@ -24,12 +24,10 @@ static void pcibios_fixup_peer_bridges(void)
 
 int __init pci_legacy_init(void)
 {
-	if (!raw_pci_ops) {
-		printk("PCI: System does not support PCI\n");
-		return 0;
-	}
+	if (!raw_pci_ops)
+		return 1;
 
-	printk("PCI: Probing PCI hardware\n");
+	pr_info("PCI: Probing PCI hardware\n");
 	pcibios_scan_root(0);
 	return 0;
 }
@@ -46,7 +44,7 @@ void pcibios_scan_specific_bus(int busn)
 		if (!raw_pci_read(0, busn, devfn, PCI_VENDOR_ID, 2, &l) &&
 		    l != 0x0000 && l != 0xffff) {
 			DBG("Found device at %02x:%02x [%04x]\n", busn, devfn, l);
-			printk(KERN_INFO "PCI: Discovered peer bus %02x\n", busn);
+			pr_info("PCI: Discovered peer bus %02x\n", busn);
 			pcibios_scan_root(busn);
 			return;
 		}
@@ -60,8 +58,12 @@ static int __init pci_subsys_init(void)
 	 * The init function returns an non zero value when
 	 * pci_legacy_init should be invoked.
 	 */
-	if (x86_init.pci.init())
-		pci_legacy_init();
+	if (x86_init.pci.init()) {
+		if (pci_legacy_init()) {
+			pr_info("PCI: System does not support PCI\n");
+			return -ENODEV;
+		}
+	}
 
 	pcibios_fixup_peer_bridges();
 	x86_init.pci.init_irq();
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index f1d83b3..2f56e1e 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -1,4 +1,5 @@
 OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y
 
 obj-$(CONFIG_EFI) 		+= quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
 obj-$(CONFIG_EARLY_PRINTK_EFI)	+= early_printk.o
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 7e76a4d..f084d87 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void)
 
 	/*
 	 * We don't do virtual mode, since we don't do runtime services, on
-	 * non-native EFI
+	 * non-native EFI. With efi=old_map, we don't do runtime services in
+	 * kexec kernel because in the initial boot something else might
+	 * have been mapped at these virtual addresses.
 	 */
-	if (!efi_is_native()) {
+	if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
 		efi_memmap_unmap();
 		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 		return;
@@ -1012,7 +1014,6 @@ static void __init __efi_enter_virtual_mode(void)
 	 * necessary relocation fixups for the new virtual addresses.
 	 */
 	efi_runtime_update_mappings();
-	efi_dump_pagetable();
 
 	/* clean DUMMY object */
 	efi_delete_dummy_variable();
@@ -1027,6 +1028,8 @@ void __init efi_enter_virtual_mode(void)
 		kexec_enter_virtual_mode();
 	else
 		__efi_enter_virtual_mode();
+
+	efi_dump_pagetable();
 }
 
 /*
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 3481268..52f7faa 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -44,7 +44,14 @@ int __init efi_alloc_page_tables(void)
 }
 
 void efi_sync_low_kernel_mappings(void) {}
-void __init efi_dump_pagetable(void) {}
+
+void __init efi_dump_pagetable(void)
+{
+#ifdef CONFIG_EFI_PGT_DUMP
+	ptdump_walk_pgd_level(NULL, swapper_pg_dir);
+#endif
+}
+
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
 	return 0;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index c488625..9bf72f5 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -71,14 +71,16 @@ static void __init early_code_mapping_set_exec(int executable)
 
 pgd_t * __init efi_call_phys_prolog(void)
 {
-	unsigned long vaddress;
-	pgd_t *save_pgd;
+	unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
+	pgd_t *save_pgd, *pgd_k, *pgd_efi;
+	p4d_t *p4d, *p4d_k, *p4d_efi;
+	pud_t *pud;
 
 	int pgd;
-	int n_pgds;
+	int n_pgds, i, j;
 
 	if (!efi_enabled(EFI_OLD_MEMMAP)) {
-		save_pgd = (pgd_t *)read_cr3();
+		save_pgd = (pgd_t *)__read_cr3();
 		write_cr3((unsigned long)efi_scratch.efi_pgt);
 		goto out;
 	}
@@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void)
 	n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
 	save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
 
+	/*
+	 * Build 1:1 identity mapping for efi=old_map usage. Note that
+	 * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
+	 * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
+	 * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
+	 * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
+	 * This means here we can only reuse the PMD tables of the direct mapping.
+	 */
 	for (pgd = 0; pgd < n_pgds; pgd++) {
-		save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
-		vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
-		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+		addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
+		vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
+		pgd_efi = pgd_offset_k(addr_pgd);
+		save_pgd[pgd] = *pgd_efi;
+
+		p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
+		if (!p4d) {
+			pr_err("Failed to allocate p4d table!\n");
+			goto out;
+		}
+
+		for (i = 0; i < PTRS_PER_P4D; i++) {
+			addr_p4d = addr_pgd + i * P4D_SIZE;
+			p4d_efi = p4d + p4d_index(addr_p4d);
+
+			pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
+			if (!pud) {
+				pr_err("Failed to allocate pud table!\n");
+				goto out;
+			}
+
+			for (j = 0; j < PTRS_PER_PUD; j++) {
+				addr_pud = addr_p4d + j * PUD_SIZE;
+
+				if (addr_pud > (max_pfn << PAGE_SHIFT))
+					break;
+
+				vaddr = (unsigned long)__va(addr_pud);
+
+				pgd_k = pgd_offset_k(vaddr);
+				p4d_k = p4d_offset(pgd_k, vaddr);
+				pud[j] = *pud_offset(p4d_k, vaddr);
+			}
+		}
 	}
 out:
 	__flush_tlb_all();
@@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
 	/*
 	 * After the lock is released, the original page table is restored.
 	 */
-	int pgd_idx;
+	int pgd_idx, i;
 	int nr_pgds;
+	pgd_t *pgd;
+	p4d_t *p4d;
+	pud_t *pud;
 
 	if (!efi_enabled(EFI_OLD_MEMMAP)) {
 		write_cr3((unsigned long)save_pgd);
@@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
 
 	nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
 
-	for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
+	for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
+		pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
 		set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
 
+		if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+			continue;
+
+		for (i = 0; i < PTRS_PER_P4D; i++) {
+			p4d = p4d_offset(pgd,
+					 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
+
+			if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+				continue;
+
+			pud = (pud_t *)p4d_page_vaddr(*p4d);
+			pud_free(&init_mm, pud);
+		}
+
+		p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+		p4d_free(&init_mm, p4d);
+	}
+
 	kfree(save_pgd);
 
 	__flush_tlb_all();
@@ -526,7 +589,10 @@ void __init efi_runtime_update_mappings(void)
 void __init efi_dump_pagetable(void)
 {
 #ifdef CONFIG_EFI_PGT_DUMP
-	ptdump_walk_pgd_level(NULL, efi_pgd);
+	if (efi_enabled(EFI_OLD_MEMMAP))
+		ptdump_walk_pgd_level(NULL, swapper_pg_dir);
+	else
+		ptdump_walk_pgd_level(NULL, efi_pgd);
 #endif
 }
 
@@ -583,7 +649,7 @@ efi_status_t efi_thunk_set_virtual_address_map(
 	efi_sync_low_kernel_mappings();
 	local_irq_save(flags);
 
-	efi_scratch.prev_cr3 = read_cr3();
+	efi_scratch.prev_cr3 = __read_cr3();
 	write_cr3((unsigned long)efi_scratch.efi_pgt);
 	__flush_tlb_all();
 
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 2661599..8a99a2e 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -15,12 +15,66 @@
 #include <asm/e820/api.h>
 #include <asm/efi.h>
 #include <asm/uv/uv.h>
+#include <asm/cpu_device_id.h>
 
 #define EFI_MIN_RESERVE 5120
 
 #define EFI_DUMMY_GUID \
 	EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
 
+#define QUARK_CSH_SIGNATURE		0x5f435348	/* _CSH */
+#define QUARK_SECURITY_HEADER_SIZE	0x400
+
+/*
+ * Header prepended to the standard EFI capsule on Quark systems the are based
+ * on Intel firmware BSP.
+ * @csh_signature:	Unique identifier to sanity check signed module
+ * 			presence ("_CSH").
+ * @version:		Current version of CSH used. Should be one for Quark A0.
+ * @modulesize:		Size of the entire module including the module header
+ * 			and payload.
+ * @security_version_number_index: Index of SVN to use for validation of signed
+ * 			module.
+ * @security_version_number: Used to prevent against roll back of modules.
+ * @rsvd_module_id:	Currently unused for Clanton (Quark).
+ * @rsvd_module_vendor:	Vendor Identifier. For Intel products value is
+ * 			0x00008086.
+ * @rsvd_date:		BCD representation of build date as yyyymmdd, where
+ * 			yyyy=4 digit year, mm=1-12, dd=1-31.
+ * @headersize:		Total length of the header including including any
+ * 			padding optionally added by the signing tool.
+ * @hash_algo:		What Hash is used in the module signing.
+ * @cryp_algo:		What Crypto is used in the module signing.
+ * @keysize:		Total length of the key data including including any
+ * 			padding optionally added by the signing tool.
+ * @signaturesize:	Total length of the signature including including any
+ * 			padding optionally added by the signing tool.
+ * @rsvd_next_header:	32-bit pointer to the next Secure Boot Module in the
+ * 			chain, if there is a next header.
+ * @rsvd:		Reserved, padding structure to required size.
+ *
+ * See also QuartSecurityHeader_t in
+ * Quark_EDKII_v1.2.1.1/QuarkPlatformPkg/Include/QuarkBootRom.h
+ * from https://downloadcenter.intel.com/download/23197/Intel-Quark-SoC-X1000-Board-Support-Package-BSP
+ */
+struct quark_security_header {
+	u32 csh_signature;
+	u32 version;
+	u32 modulesize;
+	u32 security_version_number_index;
+	u32 security_version_number;
+	u32 rsvd_module_id;
+	u32 rsvd_module_vendor;
+	u32 rsvd_date;
+	u32 headersize;
+	u32 hash_algo;
+	u32 cryp_algo;
+	u32 keysize;
+	u32 signaturesize;
+	u32 rsvd_next_header;
+	u32 rsvd[2];
+};
+
 static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
 
 static bool efi_no_storage_paranoia;
@@ -360,6 +414,9 @@ void __init efi_free_boot_services(void)
 		free_bootmem_late(start, size);
 	}
 
+	if (!num_entries)
+		return;
+
 	new_size = efi.memmap.desc_size * num_entries;
 	new_phys = efi_memmap_alloc(num_entries);
 	if (!new_phys) {
@@ -501,3 +558,86 @@ bool efi_poweroff_required(void)
 {
 	return acpi_gbl_reduced_hardware || acpi_no_s5;
 }
+
+#ifdef CONFIG_EFI_CAPSULE_QUIRK_QUARK_CSH
+
+static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
+				  size_t hdr_bytes)
+{
+	struct quark_security_header *csh = *pkbuff;
+
+	/* Only process data block that is larger than the security header */
+	if (hdr_bytes < sizeof(struct quark_security_header))
+		return 0;
+
+	if (csh->csh_signature != QUARK_CSH_SIGNATURE ||
+	    csh->headersize != QUARK_SECURITY_HEADER_SIZE)
+		return 1;
+
+	/* Only process data block if EFI header is included */
+	if (hdr_bytes < QUARK_SECURITY_HEADER_SIZE +
+			sizeof(efi_capsule_header_t))
+		return 0;
+
+	pr_debug("Quark security header detected\n");
+
+	if (csh->rsvd_next_header != 0) {
+		pr_err("multiple Quark security headers not supported\n");
+		return -EINVAL;
+	}
+
+	*pkbuff += csh->headersize;
+	cap_info->total_size = csh->headersize;
+
+	/*
+	 * Update the first page pointer to skip over the CSH header.
+	 */
+	cap_info->pages[0] += csh->headersize;
+
+	return 1;
+}
+
+#define ICPU(family, model, quirk_handler) \
+	{ X86_VENDOR_INTEL, family, model, X86_FEATURE_ANY, \
+	  (unsigned long)&quirk_handler }
+
+static const struct x86_cpu_id efi_capsule_quirk_ids[] = {
+	ICPU(5, 9, qrk_capsule_setup_info),	/* Intel Quark X1000 */
+	{ }
+};
+
+int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+			   size_t hdr_bytes)
+{
+	int (*quirk_handler)(struct capsule_info *, void **, size_t);
+	const struct x86_cpu_id *id;
+	int ret;
+
+	if (hdr_bytes < sizeof(efi_capsule_header_t))
+		return 0;
+
+	cap_info->total_size = 0;
+
+	id = x86_match_cpu(efi_capsule_quirk_ids);
+	if (id) {
+		/*
+		 * The quirk handler is supposed to return
+		 *  - a value > 0 if the setup should continue, after advancing
+		 *    kbuff as needed
+		 *  - 0 if not enough hdr_bytes are available yet
+		 *  - a negative error code otherwise
+		 */
+		quirk_handler = (typeof(quirk_handler))id->driver_data;
+		ret = quirk_handler(cap_info, &kbuff, hdr_bytes);
+		if (ret <= 0)
+			return ret;
+	}
+
+	memcpy(&cap_info->header, kbuff, sizeof(cap_info->header));
+
+	cap_info->total_size += cap_info->header.imagesize;
+
+	return __efi_capsule_setup_info(cap_info);
+}
+
+#endif
diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c
index c5350fd..0668aaf 100644
--- a/arch/x86/platform/olpc/olpc-xo1-pm.c
+++ b/arch/x86/platform/olpc/olpc-xo1-pm.c
@@ -77,7 +77,7 @@ static int xo1_power_state_enter(suspend_state_t pm_state)
 
 asmlinkage __visible int xo1_do_sleep(u8 sleep_state)
 {
-	void *pgd_addr = __va(read_cr3());
+	void *pgd_addr = __va(read_cr3_pa());
 
 	/* Program wakeup mask (using dword access to CS5536_PM1_EN) */
 	outl(wakeup_mask << 16, acpi_base + CS5536_PM1_STS);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 42e65fe..2983faa 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -456,12 +456,13 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
  */
 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
 {
-	struct cyc2ns_data *data = cyc2ns_read_begin();
+	struct cyc2ns_data data;
 	unsigned long long ns;
 
-	ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
+	cyc2ns_read_begin(&data);
+	ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
+	cyc2ns_read_end();
 
-	cyc2ns_read_end(data);
 	return ns;
 }
 
@@ -470,12 +471,13 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
  */
 static inline unsigned long long ns_2_cycles(unsigned long long ns)
 {
-	struct cyc2ns_data *data = cyc2ns_read_begin();
+	struct cyc2ns_data data;
 	unsigned long long cyc;
 
-	cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul;
+	cyc2ns_read_begin(&data);
+	cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
+	cyc2ns_read_end();
 
-	cyc2ns_read_end(data);
 	return cyc;
 }
 
@@ -1121,11 +1123,9 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
  * done.  The returned pointer is valid till preemption is re-enabled.
  */
 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
-						struct mm_struct *mm,
-						unsigned long start,
-						unsigned long end,
-						unsigned int cpu)
+					  const struct flush_tlb_info *info)
 {
+	unsigned int cpu = smp_processor_id();
 	int locals = 0, remotes = 0, hubs = 0;
 	struct bau_desc *bau_desc;
 	struct cpumask *flush_mask;
@@ -1179,8 +1179,8 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 
 	record_send_statistics(stat, locals, hubs, remotes, bau_desc);
 
-	if (!end || (end - start) <= PAGE_SIZE)
-		address = start;
+	if (!info->end || (info->end - info->start) <= PAGE_SIZE)
+		address = info->start;
 	else
 		address = TLB_FLUSH_ALL;
 
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 776c659..03fc397 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -160,13 +160,21 @@ static struct irq_domain *uv_get_irq_domain(void)
 {
 	static struct irq_domain *uv_domain;
 	static DEFINE_MUTEX(uv_lock);
+	struct fwnode_handle *fn;
 
 	mutex_lock(&uv_lock);
-	if (uv_domain == NULL) {
-		uv_domain = irq_domain_add_tree(NULL, &uv_domain_ops, NULL);
-		if (uv_domain)
-			uv_domain->parent = x86_vector_domain;
-	}
+	if (uv_domain)
+		goto out;
+
+	fn = irq_domain_alloc_named_fwnode("UV-CORE");
+	if (!fn)
+		goto out;
+
+	uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
+	irq_domain_free_fwnode(fn);
+	if (uv_domain)
+		uv_domain->parent = x86_vector_domain;
+out:
 	mutex_unlock(&uv_lock);
 
 	return uv_domain;
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index a6a198c..0504187 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y
+
 # __restore_processor_state() restores %gs after S3 resume and so should not
 # itself be stack-protected
 nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 6b05a92..78459a6 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -129,7 +129,7 @@ static void __save_processor_state(struct saved_context *ctxt)
 	 */
 	ctxt->cr0 = read_cr0();
 	ctxt->cr2 = read_cr2();
-	ctxt->cr3 = read_cr3();
+	ctxt->cr3 = __read_cr3();
 	ctxt->cr4 = __read_cr4();
 #ifdef CONFIG_X86_64
 	ctxt->cr8 = read_cr8();
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index a6e21fe..f2598d8 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -147,10 +147,11 @@ static int relocate_restore_code(void)
 	if (!relocated_restore_code)
 		return -ENOMEM;
 
-	memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
+	memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
 
 	/* Make the page containing the relocated code executable */
-	pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
+	pgd = (pgd_t *)__va(read_cr3_pa()) +
+		pgd_index(relocated_restore_code);
 	p4d = p4d_offset(pgd, relocated_restore_code);
 	if (p4d_large(*p4d)) {
 		set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
@@ -292,8 +293,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
 
 	if (max_size < sizeof(struct restore_data_record))
 		return -EOVERFLOW;
-	rdr->jump_address = (unsigned long)&restore_registers;
-	rdr->jump_address_phys = __pa_symbol(&restore_registers);
+	rdr->jump_address = (unsigned long)restore_registers;
+	rdr->jump_address_phys = __pa_symbol(restore_registers);
 	rdr->cr3 = restore_cr3;
 	rdr->magic = RESTORE_MAGIC;
 
diff --git a/arch/x86/ras/Kconfig b/arch/x86/ras/Kconfig
index 2a2d89d..bb02669 100644
--- a/arch/x86/ras/Kconfig
+++ b/arch/x86/ras/Kconfig
@@ -1,13 +1,3 @@
-config MCE_AMD_INJ
-	tristate "Simple MCE injection interface for AMD processors"
-	depends on RAS && X86_MCE && DEBUG_FS && AMD_NB
-	default n
-	help
-	  This is a simple debugfs interface to inject MCEs and test different
-	  aspects of the MCE handling code.
-
-	  WARNING: Do not even assume this interface is staying stable!
-
 config RAS_CEC
 	bool "Correctable Errors Collector"
 	depends on X86_MCE && MEMORY_FAILURE && DEBUG_FS
@@ -20,4 +10,3 @@
 
 	  Bear in mind that this is absolutely useless if your platform doesn't
 	  have ECC DIMMs and doesn't have DRAM ECC checking enabled in the BIOS.
-
diff --git a/arch/x86/ras/Makefile b/arch/x86/ras/Makefile
deleted file mode 100644
index 5f94546..0000000
--- a/arch/x86/ras/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_MCE_AMD_INJ)		+= mce_amd_inj.o
-
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c
deleted file mode 100644
index 8730c28..0000000
--- a/arch/x86/ras/mce_amd_inj.c
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * A simple MCE injection facility for testing different aspects of the RAS
- * code. This driver should be built as module so that it can be loaded
- * on production kernels for testing purposes.
- *
- * This file may be distributed under the terms of the GNU General Public
- * License version 2.
- *
- * Copyright (c) 2010-15:  Borislav Petkov <bp@alien8.de>
- *			Advanced Micro Devices Inc.
- */
-
-#include <linux/kobject.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <linux/pci.h>
-
-#include <asm/mce.h>
-#include <asm/smp.h>
-#include <asm/amd_nb.h>
-#include <asm/irq_vectors.h>
-
-#include "../kernel/cpu/mcheck/mce-internal.h"
-
-/*
- * Collect all the MCi_XXX settings
- */
-static struct mce i_mce;
-static struct dentry *dfs_inj;
-
-static u8 n_banks;
-
-#define MAX_FLAG_OPT_SIZE	3
-#define NBCFG			0x44
-
-enum injection_type {
-	SW_INJ = 0,	/* SW injection, simply decode the error */
-	HW_INJ,		/* Trigger a #MC */
-	DFR_INT_INJ,    /* Trigger Deferred error interrupt */
-	THR_INT_INJ,    /* Trigger threshold interrupt */
-	N_INJ_TYPES,
-};
-
-static const char * const flags_options[] = {
-	[SW_INJ] = "sw",
-	[HW_INJ] = "hw",
-	[DFR_INT_INJ] = "df",
-	[THR_INT_INJ] = "th",
-	NULL
-};
-
-/* Set default injection to SW_INJ */
-static enum injection_type inj_type = SW_INJ;
-
-#define MCE_INJECT_SET(reg)						\
-static int inj_##reg##_set(void *data, u64 val)				\
-{									\
-	struct mce *m = (struct mce *)data;				\
-									\
-	m->reg = val;							\
-	return 0;							\
-}
-
-MCE_INJECT_SET(status);
-MCE_INJECT_SET(misc);
-MCE_INJECT_SET(addr);
-MCE_INJECT_SET(synd);
-
-#define MCE_INJECT_GET(reg)						\
-static int inj_##reg##_get(void *data, u64 *val)			\
-{									\
-	struct mce *m = (struct mce *)data;				\
-									\
-	*val = m->reg;							\
-	return 0;							\
-}
-
-MCE_INJECT_GET(status);
-MCE_INJECT_GET(misc);
-MCE_INJECT_GET(addr);
-MCE_INJECT_GET(synd);
-
-DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(synd_fops, inj_synd_get, inj_synd_set, "%llx\n");
-
-/*
- * Caller needs to be make sure this cpu doesn't disappear
- * from under us, i.e.: get_cpu/put_cpu.
- */
-static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
-{
-	u32 l, h;
-	int err;
-
-	err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
-	if (err) {
-		pr_err("%s: error reading HWCR\n", __func__);
-		return err;
-	}
-
-	enable ? (l |= BIT(18)) : (l &= ~BIT(18));
-
-	err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
-	if (err)
-		pr_err("%s: error writing HWCR\n", __func__);
-
-	return err;
-}
-
-static int __set_inj(const char *buf)
-{
-	int i;
-
-	for (i = 0; i < N_INJ_TYPES; i++) {
-		if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) {
-			inj_type = i;
-			return 0;
-		}
-	}
-	return -EINVAL;
-}
-
-static ssize_t flags_read(struct file *filp, char __user *ubuf,
-			  size_t cnt, loff_t *ppos)
-{
-	char buf[MAX_FLAG_OPT_SIZE];
-	int n;
-
-	n = sprintf(buf, "%s\n", flags_options[inj_type]);
-
-	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
-}
-
-static ssize_t flags_write(struct file *filp, const char __user *ubuf,
-			   size_t cnt, loff_t *ppos)
-{
-	char buf[MAX_FLAG_OPT_SIZE], *__buf;
-	int err;
-
-	if (cnt > MAX_FLAG_OPT_SIZE)
-		return -EINVAL;
-
-	if (copy_from_user(&buf, ubuf, cnt))
-		return -EFAULT;
-
-	buf[cnt - 1] = 0;
-
-	/* strip whitespace */
-	__buf = strstrip(buf);
-
-	err = __set_inj(__buf);
-	if (err) {
-		pr_err("%s: Invalid flags value: %s\n", __func__, __buf);
-		return err;
-	}
-
-	*ppos += cnt;
-
-	return cnt;
-}
-
-static const struct file_operations flags_fops = {
-	.read           = flags_read,
-	.write          = flags_write,
-	.llseek         = generic_file_llseek,
-};
-
-/*
- * On which CPU to inject?
- */
-MCE_INJECT_GET(extcpu);
-
-static int inj_extcpu_set(void *data, u64 val)
-{
-	struct mce *m = (struct mce *)data;
-
-	if (val >= nr_cpu_ids || !cpu_online(val)) {
-		pr_err("%s: Invalid CPU: %llu\n", __func__, val);
-		return -EINVAL;
-	}
-	m->extcpu = val;
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n");
-
-static void trigger_mce(void *info)
-{
-	asm volatile("int $18");
-}
-
-static void trigger_dfr_int(void *info)
-{
-	asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR));
-}
-
-static void trigger_thr_int(void *info)
-{
-	asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR));
-}
-
-static u32 get_nbc_for_node(int node_id)
-{
-	struct cpuinfo_x86 *c = &boot_cpu_data;
-	u32 cores_per_node;
-
-	cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket();
-
-	return cores_per_node * node_id;
-}
-
-static void toggle_nb_mca_mst_cpu(u16 nid)
-{
-	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
-	u32 val;
-	int err;
-
-	if (!F3)
-		return;
-
-	err = pci_read_config_dword(F3, NBCFG, &val);
-	if (err) {
-		pr_err("%s: Error reading F%dx%03x.\n",
-		       __func__, PCI_FUNC(F3->devfn), NBCFG);
-		return;
-	}
-
-	if (val & BIT(27))
-		return;
-
-	pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n",
-	       __func__);
-
-	val |= BIT(27);
-	err = pci_write_config_dword(F3, NBCFG, val);
-	if (err)
-		pr_err("%s: Error writing F%dx%03x.\n",
-		       __func__, PCI_FUNC(F3->devfn), NBCFG);
-}
-
-static void prepare_msrs(void *info)
-{
-	struct mce m = *(struct mce *)info;
-	u8 b = m.bank;
-
-	wrmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
-
-	if (boot_cpu_has(X86_FEATURE_SMCA)) {
-		if (m.inject_flags == DFR_INT_INJ) {
-			wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status);
-			wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr);
-		} else {
-			wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), m.status);
-			wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr);
-		}
-
-		wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), m.misc);
-		wrmsrl(MSR_AMD64_SMCA_MCx_SYND(b), m.synd);
-	} else {
-		wrmsrl(MSR_IA32_MCx_STATUS(b), m.status);
-		wrmsrl(MSR_IA32_MCx_ADDR(b), m.addr);
-		wrmsrl(MSR_IA32_MCx_MISC(b), m.misc);
-	}
-}
-
-static void do_inject(void)
-{
-	u64 mcg_status = 0;
-	unsigned int cpu = i_mce.extcpu;
-	u8 b = i_mce.bank;
-
-	rdtscll(i_mce.tsc);
-
-	if (i_mce.misc)
-		i_mce.status |= MCI_STATUS_MISCV;
-
-	if (i_mce.synd)
-		i_mce.status |= MCI_STATUS_SYNDV;
-
-	if (inj_type == SW_INJ) {
-		mce_inject_log(&i_mce);
-		return;
-	}
-
-	/* prep MCE global settings for the injection */
-	mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
-
-	if (!(i_mce.status & MCI_STATUS_PCC))
-		mcg_status |= MCG_STATUS_RIPV;
-
-	/*
-	 * Ensure necessary status bits for deferred errors:
-	 * - MCx_STATUS[Deferred]: make sure it is a deferred error
-	 * - MCx_STATUS[UC] cleared: deferred errors are _not_ UC
-	 */
-	if (inj_type == DFR_INT_INJ) {
-		i_mce.status |= MCI_STATUS_DEFERRED;
-		i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
-	}
-
-	/*
-	 * For multi node CPUs, logging and reporting of bank 4 errors happens
-	 * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
-	 * Fam10h and later BKDGs.
-	 */
-	if (static_cpu_has(X86_FEATURE_AMD_DCM) &&
-	    b == 4 &&
-	    boot_cpu_data.x86 < 0x17) {
-		toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
-		cpu = get_nbc_for_node(amd_get_nb_id(cpu));
-	}
-
-	get_online_cpus();
-	if (!cpu_online(cpu))
-		goto err;
-
-	toggle_hw_mce_inject(cpu, true);
-
-	i_mce.mcgstatus = mcg_status;
-	i_mce.inject_flags = inj_type;
-	smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
-
-	toggle_hw_mce_inject(cpu, false);
-
-	switch (inj_type) {
-	case DFR_INT_INJ:
-		smp_call_function_single(cpu, trigger_dfr_int, NULL, 0);
-		break;
-	case THR_INT_INJ:
-		smp_call_function_single(cpu, trigger_thr_int, NULL, 0);
-		break;
-	default:
-		smp_call_function_single(cpu, trigger_mce, NULL, 0);
-	}
-
-err:
-	put_online_cpus();
-
-}
-
-/*
- * This denotes into which bank we're injecting and triggers
- * the injection, at the same time.
- */
-static int inj_bank_set(void *data, u64 val)
-{
-	struct mce *m = (struct mce *)data;
-
-	if (val >= n_banks) {
-		pr_err("Non-existent MCE bank: %llu\n", val);
-		return -EINVAL;
-	}
-
-	m->bank = val;
-	do_inject();
-
-	return 0;
-}
-
-MCE_INJECT_GET(bank);
-
-DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
-
-static const char readme_msg[] =
-"Description of the files and their usages:\n"
-"\n"
-"Note1: i refers to the bank number below.\n"
-"Note2: See respective BKDGs for the exact bit definitions of the files below\n"
-"as they mirror the hardware registers.\n"
-"\n"
-"status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n"
-"\t attributes of the error which caused the MCE.\n"
-"\n"
-"misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n"
-"\t used for error thresholding purposes and its validity is indicated by\n"
-"\t MCi_STATUS[MiscV].\n"
-"\n"
-"synd:\t Set MCi_SYND: provide syndrome info about the error. Only valid on\n"
-"\t Scalable MCA systems, and its validity is indicated by MCi_STATUS[SyndV].\n"
-"\n"
-"addr:\t Error address value to be written to MCi_ADDR. Log address information\n"
-"\t associated with the error.\n"
-"\n"
-"cpu:\t The CPU to inject the error on.\n"
-"\n"
-"bank:\t Specify the bank you want to inject the error into: the number of\n"
-"\t banks in a processor varies and is family/model-specific, therefore, the\n"
-"\t supplied value is sanity-checked. Setting the bank value also triggers the\n"
-"\t injection.\n"
-"\n"
-"flags:\t Injection type to be performed. Writing to this file will trigger a\n"
-"\t real machine check, an APIC interrupt or invoke the error decoder routines\n"
-"\t for AMD processors.\n"
-"\n"
-"\t Allowed error injection types:\n"
-"\t  - \"sw\": Software error injection. Decode error to a human-readable \n"
-"\t    format only. Safe to use.\n"
-"\t  - \"hw\": Hardware error injection. Causes the #MC exception handler to \n"
-"\t    handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
-"\t    is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
-"\t    before injecting.\n"
-"\t  - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n"
-"\t    error APIC interrupt handler to handle the error if the feature is \n"
-"\t    is present in hardware. \n"
-"\t  - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
-"\t    APIC interrupt handler to handle the error. \n"
-"\n";
-
-static ssize_t
-inj_readme_read(struct file *filp, char __user *ubuf,
-		       size_t cnt, loff_t *ppos)
-{
-	return simple_read_from_buffer(ubuf, cnt, ppos,
-					readme_msg, strlen(readme_msg));
-}
-
-static const struct file_operations readme_fops = {
-	.read		= inj_readme_read,
-};
-
-static struct dfs_node {
-	char *name;
-	struct dentry *d;
-	const struct file_operations *fops;
-	umode_t perm;
-} dfs_fls[] = {
-	{ .name = "status",	.fops = &status_fops, .perm = S_IRUSR | S_IWUSR },
-	{ .name = "misc",	.fops = &misc_fops,   .perm = S_IRUSR | S_IWUSR },
-	{ .name = "addr",	.fops = &addr_fops,   .perm = S_IRUSR | S_IWUSR },
-	{ .name = "synd",	.fops = &synd_fops,   .perm = S_IRUSR | S_IWUSR },
-	{ .name = "bank",	.fops = &bank_fops,   .perm = S_IRUSR | S_IWUSR },
-	{ .name = "flags",	.fops = &flags_fops,  .perm = S_IRUSR | S_IWUSR },
-	{ .name = "cpu",	.fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
-	{ .name = "README",	.fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH },
-};
-
-static int __init init_mce_inject(void)
-{
-	unsigned int i;
-	u64 cap;
-
-	rdmsrl(MSR_IA32_MCG_CAP, cap);
-	n_banks = cap & MCG_BANKCNT_MASK;
-
-	dfs_inj = debugfs_create_dir("mce-inject", NULL);
-	if (!dfs_inj)
-		return -EINVAL;
-
-	for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) {
-		dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name,
-						    dfs_fls[i].perm,
-						    dfs_inj,
-						    &i_mce,
-						    dfs_fls[i].fops);
-
-		if (!dfs_fls[i].d)
-			goto err_dfs_add;
-	}
-
-	return 0;
-
-err_dfs_add:
-	while (i-- > 0)
-		debugfs_remove(dfs_fls[i].d);
-
-	debugfs_remove(dfs_inj);
-	dfs_inj = NULL;
-
-	return -ENODEV;
-}
-
-static void __exit exit_mce_inject(void)
-{
-
-	debugfs_remove_recursive(dfs_inj);
-	dfs_inj = NULL;
-
-	memset(&dfs_fls, 0, sizeof(dfs_fls));
-}
-module_init(init_mce_inject);
-module_exit(exit_mce_inject);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
-MODULE_AUTHOR("AMD Inc.");
-MODULE_DESCRIPTION("MCE injection facility for RAS testing");
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index a163a90..cd4be19 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -102,7 +102,7 @@ static void __init setup_real_mode(void)
 
 	trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
 	trampoline_pgd[0] = trampoline_pgd_entry.pgd;
-	trampoline_pgd[511] = init_level4_pgt[511].pgd;
+	trampoline_pgd[511] = init_top_pgt[511].pgd;
 #endif
 }
 
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index fffb0a1..bced7a3 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,3 +1,6 @@
+OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
+
 ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_spinlock.o = -pg
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index bcea81f..b5e48da 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -178,7 +178,7 @@ static struct apic xen_pv_apic = {
 	.get_apic_id 			= xen_get_apic_id,
 	.set_apic_id 			= xen_set_apic_id, /* Can be NULL on 32-bit. */
 
-	.cpu_mask_to_apicid_and		= flat_cpu_mask_to_apicid_and,
+	.cpu_mask_to_apicid		= flat_cpu_mask_to_apicid,
 
 #ifdef CONFIG_SMP
 	.send_IPI_mask 			= xen_send_IPI_mask,
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index 30bb2e8..a18703b 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -54,38 +54,6 @@ static efi_system_table_t efi_systab_xen __initdata = {
 	.tables		= EFI_INVALID_TABLE_ADDR  /* Initialized later. */
 };
 
-static const struct efi efi_xen __initconst = {
-	.systab                   = NULL, /* Initialized later. */
-	.runtime_version	  = 0,    /* Initialized later. */
-	.mps                      = EFI_INVALID_TABLE_ADDR,
-	.acpi                     = EFI_INVALID_TABLE_ADDR,
-	.acpi20                   = EFI_INVALID_TABLE_ADDR,
-	.smbios                   = EFI_INVALID_TABLE_ADDR,
-	.smbios3                  = EFI_INVALID_TABLE_ADDR,
-	.sal_systab               = EFI_INVALID_TABLE_ADDR,
-	.boot_info                = EFI_INVALID_TABLE_ADDR,
-	.hcdp                     = EFI_INVALID_TABLE_ADDR,
-	.uga                      = EFI_INVALID_TABLE_ADDR,
-	.uv_systab                = EFI_INVALID_TABLE_ADDR,
-	.fw_vendor                = EFI_INVALID_TABLE_ADDR,
-	.runtime                  = EFI_INVALID_TABLE_ADDR,
-	.config_table             = EFI_INVALID_TABLE_ADDR,
-	.get_time                 = xen_efi_get_time,
-	.set_time                 = xen_efi_set_time,
-	.get_wakeup_time          = xen_efi_get_wakeup_time,
-	.set_wakeup_time          = xen_efi_set_wakeup_time,
-	.get_variable             = xen_efi_get_variable,
-	.get_next_variable        = xen_efi_get_next_variable,
-	.set_variable             = xen_efi_set_variable,
-	.query_variable_info      = xen_efi_query_variable_info,
-	.update_capsule           = xen_efi_update_capsule,
-	.query_capsule_caps       = xen_efi_query_capsule_caps,
-	.get_next_high_mono_count = xen_efi_get_next_high_mono_count,
-	.reset_system             = xen_efi_reset_system,
-	.set_virtual_address_map  = NULL, /* Not used under Xen. */
-	.flags			  = 0     /* Initialized later. */
-};
-
 static efi_system_table_t __init *xen_efi_probe(void)
 {
 	struct xen_platform_op op = {
@@ -102,7 +70,18 @@ static efi_system_table_t __init *xen_efi_probe(void)
 
 	/* Here we know that Xen runs on EFI platform. */
 
-	efi = efi_xen;
+	efi.get_time                 = xen_efi_get_time;
+	efi.set_time                 = xen_efi_set_time;
+	efi.get_wakeup_time          = xen_efi_get_wakeup_time;
+	efi.set_wakeup_time          = xen_efi_set_wakeup_time;
+	efi.get_variable             = xen_efi_get_variable;
+	efi.get_next_variable        = xen_efi_get_next_variable;
+	efi.set_variable             = xen_efi_set_variable;
+	efi.query_variable_info      = xen_efi_query_variable_info;
+	efi.update_capsule           = xen_efi_update_capsule;
+	efi.query_capsule_caps       = xen_efi_query_capsule_caps;
+	efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
+	efi.reset_system             = xen_efi_reset_system;
 
 	efi_systab_xen.tables = info->cfg.addr;
 	efi_systab_xen.nr_tables = info->cfg.nent;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 1f386d7..1d7a721 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -975,37 +975,32 @@ static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 	spin_unlock(&mm->page_table_lock);
 }
 
-
-#ifdef CONFIG_SMP
-/* Another cpu may still have their %cr3 pointing at the pagetable, so
-   we need to repoint it somewhere else before we can unpin it. */
-static void drop_other_mm_ref(void *info)
+static void drop_mm_ref_this_cpu(void *info)
 {
 	struct mm_struct *mm = info;
-	struct mm_struct *active_mm;
 
-	active_mm = this_cpu_read(cpu_tlbstate.active_mm);
-
-	if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
+	if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
 		leave_mm(smp_processor_id());
 
-	/* If this cpu still has a stale cr3 reference, then make sure
-	   it has been flushed. */
+	/*
+	 * If this cpu still has a stale cr3 reference, then make sure
+	 * it has been flushed.
+	 */
 	if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
-		load_cr3(swapper_pg_dir);
+		xen_mc_flush();
 }
 
+#ifdef CONFIG_SMP
+/*
+ * Another cpu may still have their %cr3 pointing at the pagetable, so
+ * we need to repoint it somewhere else before we can unpin it.
+ */
 static void xen_drop_mm_ref(struct mm_struct *mm)
 {
 	cpumask_var_t mask;
 	unsigned cpu;
 
-	if (current->active_mm == mm) {
-		if (current->mm == mm)
-			load_cr3(swapper_pg_dir);
-		else
-			leave_mm(smp_processor_id());
-	}
+	drop_mm_ref_this_cpu(mm);
 
 	/* Get the "official" set of cpus referring to our pagetable. */
 	if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
@@ -1013,31 +1008,31 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
 			if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
 			    && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
 				continue;
-			smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
+			smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
 		}
 		return;
 	}
 	cpumask_copy(mask, mm_cpumask(mm));
 
-	/* It's possible that a vcpu may have a stale reference to our
-	   cr3, because its in lazy mode, and it hasn't yet flushed
-	   its set of pending hypercalls yet.  In this case, we can
-	   look at its actual current cr3 value, and force it to flush
-	   if needed. */
+	/*
+	 * It's possible that a vcpu may have a stale reference to our
+	 * cr3, because its in lazy mode, and it hasn't yet flushed
+	 * its set of pending hypercalls yet.  In this case, we can
+	 * look at its actual current cr3 value, and force it to flush
+	 * if needed.
+	 */
 	for_each_online_cpu(cpu) {
 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
 			cpumask_set_cpu(cpu, mask);
 	}
 
-	if (!cpumask_empty(mask))
-		smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
+	smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
 	free_cpumask_var(mask);
 }
 #else
 static void xen_drop_mm_ref(struct mm_struct *mm)
 {
-	if (current->active_mm == mm)
-		load_cr3(swapper_pg_dir);
+	drop_mm_ref_this_cpu(mm);
 }
 #endif
 
@@ -1366,8 +1361,7 @@ static void xen_flush_tlb_single(unsigned long addr)
 }
 
 static void xen_flush_tlb_others(const struct cpumask *cpus,
-				 struct mm_struct *mm, unsigned long start,
-				 unsigned long end)
+				 const struct flush_tlb_info *info)
 {
 	struct {
 		struct mmuext_op op;
@@ -1379,7 +1373,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 	} *args;
 	struct multicall_space mcs;
 
-	trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
+	trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
 
 	if (cpumask_empty(cpus))
 		return;		/* nothing to do */
@@ -1393,9 +1387,10 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
 	cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
 
 	args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-	if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
+	if (info->end != TLB_FLUSH_ALL &&
+	    (info->end - info->start) <= PAGE_SIZE) {
 		args->op.cmd = MMUEXT_INVLPG_MULTI;
-		args->op.arg1.linear_addr = start;
+		args->op.arg1.linear_addr = info->start;
 	}
 
 	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
@@ -1470,8 +1465,8 @@ static void xen_write_cr3(unsigned long cr3)
  * At the start of the day - when Xen launches a guest, it has already
  * built pagetables for the guest. We diligently look over them
  * in xen_setup_kernel_pagetable and graft as appropriate them in the
- * init_level4_pgt and its friends. Then when we are happy we load
- * the new init_level4_pgt - and continue on.
+ * init_top_pgt and its friends. Then when we are happy we load
+ * the new init_top_pgt - and continue on.
  *
  * The generic code starts (start_kernel) and 'init_mem_mapping' sets
  * up the rest of the pagetables. When it has completed it loads the cr3.
@@ -1914,12 +1909,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 	pt_end = pt_base + xen_start_info->nr_pt_frames;
 
 	/* Zap identity mapping */
-	init_level4_pgt[0] = __pgd(0);
+	init_top_pgt[0] = __pgd(0);
 
 	/* Pre-constructed entries are in pfn, so convert to mfn */
 	/* L4[272] -> level3_ident_pgt  */
 	/* L4[511] -> level3_kernel_pgt */
-	convert_pfn_mfn(init_level4_pgt);
+	convert_pfn_mfn(init_top_pgt);
 
 	/* L3_i[0] -> level2_ident_pgt */
 	convert_pfn_mfn(level3_ident_pgt);
@@ -1950,10 +1945,10 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 	/* Copy the initial P->M table mappings if necessary. */
 	i = pgd_index(xen_start_info->mfn_list);
 	if (i && i < pgd_index(__START_KERNEL_map))
-		init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
+		init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
 
 	/* Make pagetable pieces RO */
-	set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+	set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
 	set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
 	set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
 	set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
@@ -1964,7 +1959,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 
 	/* Pin down new L4 */
 	pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
-			  PFN_DOWN(__pa_symbol(init_level4_pgt)));
+			  PFN_DOWN(__pa_symbol(init_top_pgt)));
 
 	/* Unpin Xen-provided one */
 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
@@ -1974,7 +1969,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 	 * attach it to, so make sure we just set kernel pgd.
 	 */
 	xen_mc_batch();
-	__xen_write_cr3(true, __pa(init_level4_pgt));
+	__xen_write_cr3(true, __pa(init_top_pgt));
 	xen_mc_issue(PARAVIRT_LAZY_CPU);
 
 	/* We can't that easily rip out L3 and L2, as the Xen pagetables are
@@ -2022,7 +2017,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
 	pmd_t pmd;
 	pte_t pte;
 
-	pa = read_cr3();
+	pa = read_cr3_pa();
 	pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
 						       sizeof(pgd)));
 	if (!pgd_present(pgd))
@@ -2102,7 +2097,7 @@ void __init xen_relocate_p2m(void)
 	pt_phys = pmd_phys + PFN_PHYS(n_pmd);
 	p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
 
-	pgd = __va(read_cr3());
+	pgd = __va(read_cr3_pa());
 	new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
 	idx_p4d = 0;
 	save_pud = n_pud;
@@ -2209,7 +2204,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
 {
 	unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
 
-	BUG_ON(read_cr3() != __pa(initial_page_table));
+	BUG_ON(read_cr3_pa() != __pa(initial_page_table));
 	BUG_ON(cr3 != __pa(swapper_pg_dir));
 
 	/*
diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index 5e24671..e1a5fbe 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -87,7 +87,7 @@
 	wrmsr
 
 	/* Enable pre-constructed page tables. */
-	mov $_pa(init_level4_pgt), %eax
+	mov $_pa(init_top_pgt), %eax
 	mov %eax, %cr3
 	mov $(X86_CR0_PG | X86_CR0_PE), %eax
 	mov %eax, %cr0
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index cc23e9e..30f62901 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -25,7 +25,6 @@
 generic-y += resource.h
 generic-y += rwsem.h
 generic-y += sections.h
-generic-y += siginfo.h
 generic-y += statfs.h
 generic-y += termios.h
 generic-y += topology.h
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88e..19707db 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
 # define PLATFORM_NR_IRQS 0
 #endif
 #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
 
 #if VARIANT_NR_IRQS == 0
 static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 003eeee..30ee8c6 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -213,8 +213,6 @@ struct mm_struct;
 #define release_segments(mm)	do { } while(0)
 #define forget_segments()	do { } while (0)
 
-#define thread_saved_pc(tsk)	(task_pt_regs(tsk)->pc)
-
 extern unsigned long get_wchan(struct task_struct *p);
 
 #define KSTK_EIP(tsk)		(task_pt_regs(tsk)->pc)
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
index b15bf6b..4cb0d2f 100644
--- a/arch/xtensa/include/uapi/asm/Kbuild
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -1,2 +1,3 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
+generic-y += siginfo.h
diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h
index 518954e..98b004e 100644
--- a/arch/xtensa/include/uapi/asm/ioctls.h
+++ b/arch/xtensa/include/uapi/asm/ioctls.h
@@ -105,6 +105,7 @@
 #define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
 #define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER	_IOR('T', 0x41, int) /* Safely open the slave */
 
 #define TIOCSERCONFIG	_IO('T', 83)
 #define TIOCSERGWILD	_IOR('T', 84,  int)
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a265edd..9934102 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
 {
 	int irq = irq_find_mapping(NULL, hwirq);
 
-	if (hwirq >= NR_IRQS) {
-		printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
-				__func__, hwirq);
-	}
-
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 	/* Debugging check for stack overflow: is there less than 1KB free? */
 	{
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 394ef08..33bfa52 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -593,8 +593,7 @@ c_show(struct seq_file *f, void *slot)
 		      (ccount_freq/10000) % 100,
 		      loops_per_jiffy/(500000/HZ),
 		      (loops_per_jiffy/(5000/HZ)) % 100);
-
-	seq_printf(f,"flags\t\t: "
+	seq_puts(f, "flags\t\t: "
 #if XCHAL_HAVE_NMI
 		     "nmi "
 #endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 0693792..74afbf0 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 		/* At this point:  (!vmm || addr < vmm->vm_end). */
 		if (TASK_SIZE - len < addr)
 			return -ENOMEM;
-		if (!vmm || addr + len <= vmm->vm_start)
+		if (!vmm || addr + len <= vm_start_gap(vmm))
 			return addr;
 		addr = vmm->vm_end;
 		if (flags & MAP_SHARED)
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 668c105..fd524a5 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -187,7 +187,7 @@ void __init time_init(void)
 	local_timer_setup(0);
 	setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
 	sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
-	clocksource_probe();
+	timer_probe();
 }
 
 /*
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 30d9fc2..162c77e 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -118,7 +118,7 @@
   SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
   SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
   SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
-  SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48)
+  SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
   SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
 #endif
 
@@ -306,13 +306,13 @@
 		  .UserExceptionVector.literal)
   SECTION_VECTOR (_DoubleExceptionVector_literal,
 		  .DoubleExceptionVector.literal,
-		  DOUBLEEXC_VECTOR_VADDR - 48,
+		  DOUBLEEXC_VECTOR_VADDR - 20,
 		  SIZEOF(.UserExceptionVector.text),
 		  .UserExceptionVector.text)
   SECTION_VECTOR (_DoubleExceptionVector_text,
 		  .DoubleExceptionVector.text,
 		  DOUBLEEXC_VECTOR_VADDR,
-		  48,
+		  20,
 		  .DoubleExceptionVector.literal)
 
   . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 02e94bb..c45b90b 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -317,8 +317,7 @@ static int __init simdisk_init(void)
 	if (simdisk_count > MAX_SIMDISK_COUNT)
 		simdisk_count = MAX_SIMDISK_COUNT;
 
-	sddev = kmalloc(simdisk_count * sizeof(struct simdisk),
-			GFP_KERNEL);
+	sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
 	if (sddev == NULL)
 		goto out_unregister;
 
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b..1fda7e2 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
 
 /* Interrupt configuration. */
 
-#define PLATFORM_NR_IRQS	10
+#define PLATFORM_NR_IRQS	0
 
 /* Default assignment of LX60 devices to external interrupts. */
 
 #ifdef CONFIG_XTENSA_MX
 #define DUART16552_INTNUM	XCHAL_EXTINT3_NUM
 #define OETH_IRQ		XCHAL_EXTINT4_NUM
+#define C67X00_IRQ		XCHAL_EXTINT8_NUM
 #else
 #define DUART16552_INTNUM	XCHAL_EXTINT0_NUM
 #define OETH_IRQ		XCHAL_EXTINT1_NUM
+#define C67X00_IRQ		XCHAL_EXTINT5_NUM
 #endif
 
 /*
@@ -63,5 +65,5 @@
 
 #define C67X00_PADDR		(XCHAL_KIO_PADDR + 0x0D0D0000)
 #define C67X00_SIZE		0x10
-#define C67X00_IRQ		5
+
 #endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 779be72..42285f3 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -175,8 +175,8 @@ static struct resource ethoc_res[] = {
 		.flags = IORESOURCE_MEM,
 	},
 	[2] = { /* IRQ number */
-		.start = OETH_IRQ,
-		.end   = OETH_IRQ,
+		.start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+		.end   = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
 		.flags = IORESOURCE_IRQ,
 	},
 };
@@ -213,8 +213,8 @@ static struct resource c67x00_res[] = {
 		.flags = IORESOURCE_MEM,
 	},
 	[1] = { /* IRQ number */
-		.start = C67X00_IRQ,
-		.end   = C67X00_IRQ,
+		.start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+		.end   = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
 		.flags = IORESOURCE_IRQ,
 	},
 };
@@ -247,7 +247,7 @@ static struct resource serial_resource = {
 static struct plat_serial8250_port serial_platform_data[] = {
 	[0] = {
 		.mapbase	= DUART16552_PADDR,
-		.irq		= DUART16552_INTNUM,
+		.irq		= XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
 		.flags		= UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
 				  UPF_IOREMAP,
 		.iotype		= XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
diff --git a/block/badblocks.c b/block/badblocks.c
index 6ebcef2..43c7116 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -533,6 +533,7 @@ ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
 	case 3:
 		if (newline != '\n')
 			return -EINVAL;
+		/* fall through */
 	case 2:
 		if (length <= 0)
 			return -EINVAL;
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index c8a32fb..78b2e0d 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling)
 BFQG_FLAG_FNS(empty)
 #undef BFQG_FLAG_FNS
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
 {
 	unsigned long long now;
@@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
 	bfqg_stats_clear_waiting(stats);
 }
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
 						 struct bfq_group *curr_bfqg)
 {
@@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
 	bfqg_stats_mark_waiting(stats);
 }
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
 {
 	unsigned long long now;
@@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
 
 static void bfqg_get(struct bfq_group *bfqg)
 {
-	return blkg_get(bfqg_to_blkg(bfqg));
+	bfqg->ref++;
 }
 
 void bfqg_put(struct bfq_group *bfqg)
 {
-	return blkg_put(bfqg_to_blkg(bfqg));
+	bfqg->ref--;
+
+	if (bfqg->ref == 0)
+		kfree(bfqg);
+}
+
+static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+{
+	/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+	bfqg_get(bfqg);
+
+	blkg_get(bfqg_to_blkg(bfqg));
+}
+
+void bfqg_and_blkg_put(struct bfq_group *bfqg)
+{
+	bfqg_put(bfqg);
+
+	blkg_put(bfqg_to_blkg(bfqg));
 }
 
 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
@@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
 	if (bfqq) {
 		bfqq->ioprio = bfqq->new_ioprio;
 		bfqq->ioprio_class = bfqq->new_ioprio_class;
-		bfqg_get(bfqg);
+		/*
+		 * Make sure that bfqg and its associated blkg do not
+		 * disappear before entity.
+		 */
+		bfqg_and_blkg_get(bfqg);
 	}
 	entity->parent = bfqg->my_entity; /* NULL for root group */
 	entity->sched_data = &bfqg->sched_data;
@@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
 		return NULL;
 	}
 
+	/* see comments in bfq_bic_update_cgroup for why refcounting */
+	bfqg_get(bfqg);
 	return &bfqg->pd;
 }
 
@@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
 	struct bfq_group *bfqg = pd_to_bfqg(pd);
 
 	bfqg_stats_exit(&bfqg->stats);
-	return kfree(bfqg);
+	bfqg_put(bfqg);
 }
 
 void bfq_pd_reset_stats(struct blkg_policy_data *pd)
@@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
  * it on the new one.  Avoid putting the entity on the old group idle tree.
  *
- * Must be called under the queue lock; the cgroup owning @bfqg must
- * not disappear (by now this just means that we are called under
- * rcu_read_lock()).
+ * Must be called under the scheduler lock, to make sure that the blkg
+ * owning @bfqg does not disappear (see comments in
+ * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
+ * objects).
  */
 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 		   struct bfq_group *bfqg)
@@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
 		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
 	else if (entity->on_st)
 		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
-	bfqg_put(bfqq_group(bfqq));
+	bfqg_and_blkg_put(bfqq_group(bfqq));
 
-	/*
-	 * Here we use a reference to bfqg.  We don't need a refcounter
-	 * as the cgroup reference will not be dropped, so that its
-	 * destroy() callback will not be invoked.
-	 */
 	entity->parent = bfqg->my_entity;
 	entity->sched_data = &bfqg->sched_data;
-	bfqg_get(bfqg);
+	/* pin down bfqg and its associated blkg  */
+	bfqg_and_blkg_get(bfqg);
 
 	if (bfq_bfqq_busy(bfqq)) {
 		bfq_pos_tree_add_move(bfqd, bfqq);
@@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  * @bic: the bic to move.
  * @blkcg: the blk-cgroup to move to.
  *
- * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
- * has to make sure that the reference to cgroup is valid across the call.
+ * Move bic to blkcg, assuming that bfqd->lock is held; which makes
+ * sure that the reference to cgroup is valid across the call (see
+ * comments in bfq_bic_update_cgroup on this issue)
  *
  * NOTE: an alternative approach might have been to store the current
  * cgroup in bfqq and getting a reference to it, reducing the lookup
@@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
 		goto out;
 
 	bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
+	/*
+	 * Update blkg_path for bfq_log_* functions. We cache this
+	 * path, and update it here, for the following
+	 * reasons. Operations on blkg objects in blk-cgroup are
+	 * protected with the request_queue lock, and not with the
+	 * lock that protects the instances of this scheduler
+	 * (bfqd->lock). This exposes BFQ to the following sort of
+	 * race.
+	 *
+	 * The blkg_lookup performed in bfq_get_queue, protected
+	 * through rcu, may happen to return the address of a copy of
+	 * the original blkg. If this is the case, then the
+	 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
+	 * the blkg, is useless: it does not prevent blk-cgroup code
+	 * from destroying both the original blkg and all objects
+	 * directly or indirectly referred by the copy of the
+	 * blkg.
+	 *
+	 * On the bright side, destroy operations on a blkg invoke, as
+	 * a first step, hooks of the scheduler associated with the
+	 * blkg. And these hooks are executed with bfqd->lock held for
+	 * BFQ. As a consequence, for any blkg associated with the
+	 * request queue this instance of the scheduler is attached
+	 * to, we are guaranteed that such a blkg is not destroyed, and
+	 * that all the pointers it contains are consistent, while we
+	 * are holding bfqd->lock. A blkg_lookup performed with
+	 * bfqd->lock held then returns a fully consistent blkg, which
+	 * remains consistent until this lock is held.
+	 *
+	 * Thanks to the last fact, and to the fact that: (1) bfqg has
+	 * been obtained through a blkg_lookup in the above
+	 * assignment, and (2) bfqd->lock is being held, here we can
+	 * safely use the policy data for the involved blkg (i.e., the
+	 * field bfqg->pd) to get to the blkg associated with bfqg,
+	 * and then we can safely use any field of blkg. After we
+	 * release bfqd->lock, even just getting blkg through this
+	 * bfqg may cause dangling references to be traversed, as
+	 * bfqg->pd may not exist any more.
+	 *
+	 * In view of the above facts, here we cache, in the bfqg, any
+	 * blkg data we may need for this bic, and for its associated
+	 * bfq_queue. As of now, we need to cache only the path of the
+	 * blkg, which is used in the bfq_log_* functions.
+	 *
+	 * Finally, note that bfqg itself needs to be protected from
+	 * destruction on the blkg_free of the original blkg (which
+	 * invokes bfq_pd_free). We use an additional private
+	 * refcounter for bfqg, to let it disappear only after no
+	 * bfq_queue refers to it any longer.
+	 */
+	blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
 	bic->blkcg_serial_nr = serial_nr;
 out:
 	rcu_read_unlock();
@@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
  * @bfqd: the device data structure with the root group.
  * @bfqg: the group to move from.
  * @st: the service tree with the entities.
- *
- * Needs queue_lock to be taken and reference to be valid over the call.
  */
 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
 					 struct bfq_group *bfqg,
@@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd)
 		/*
 		 * The idle tree may still contain bfq_queues belonging
 		 * to exited task because they never migrated to a different
-		 * cgroup from the one being destroyed now.  No one else
-		 * can access them so it's safe to act without any lock.
+		 * cgroup from the one being destroyed now.
 		 */
 		bfq_flush_idle_tree(st);
 
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 08ce450..12bbc6b 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -725,8 +725,12 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
 }
 
 static void
-bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
+bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
+		      struct bfq_io_cq *bic, bool bfq_already_existing)
 {
+	unsigned int old_wr_coeff = bfqq->wr_coeff;
+	bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
+
 	if (bic->saved_idle_window)
 		bfq_mark_bfqq_idle_window(bfqq);
 	else
@@ -754,6 +758,14 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
 
 	/* make sure weight will be updated, however we got here */
 	bfqq->entity.prio_changed = 1;
+
+	if (likely(!busy))
+		return;
+
+	if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
+		bfqd->wr_busy_queues++;
+	else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
+		bfqd->wr_busy_queues--;
 }
 
 static int bfqq_process_refs(struct bfq_queue *bfqq)
@@ -3665,7 +3677,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
 
 	kmem_cache_free(bfq_pool, bfqq);
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
-	bfqg_put(bfqg);
+	bfqg_and_blkg_put(bfqg);
 #endif
 }
 
@@ -4290,10 +4302,16 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
 	bfq_put_queue(bfqq);
 }
 
-static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+static void bfq_finish_request(struct request *rq)
 {
-	struct bfq_queue *bfqq = RQ_BFQQ(rq);
-	struct bfq_data *bfqd = bfqq->bfqd;
+	struct bfq_queue *bfqq;
+	struct bfq_data *bfqd;
+
+	if (!rq->elv.icq)
+		return;
+
+	bfqq = RQ_BFQQ(rq);
+	bfqd = bfqq->bfqd;
 
 	if (rq->rq_flags & RQF_STARTED)
 		bfqg_stats_update_completion(bfqq_group(bfqq),
@@ -4324,7 +4342,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
 		 */
 
 		if (!RB_EMPTY_NODE(&rq->rb_node))
-			bfq_remove_request(q, rq);
+			bfq_remove_request(rq->q, rq);
 		bfq_put_rq_priv_body(bfqq);
 	}
 
@@ -4394,21 +4412,22 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
 /*
  * Allocate bfq data structures associated with this request.
  */
-static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
-			      struct bio *bio)
+static void bfq_prepare_request(struct request *rq, struct bio *bio)
 {
+	struct request_queue *q = rq->q;
 	struct bfq_data *bfqd = q->elevator->elevator_data;
-	struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
+	struct bfq_io_cq *bic;
 	const int is_sync = rq_is_sync(rq);
 	struct bfq_queue *bfqq;
 	bool new_queue = false;
-	bool split = false;
+	bool bfqq_already_existing = false, split = false;
+
+	if (!rq->elv.icq)
+		return;
+	bic = icq_to_bic(rq->elv.icq);
 
 	spin_lock_irq(&bfqd->lock);
 
-	if (!bic)
-		goto queue_fail;
-
 	bfq_check_ioprio_change(bic, bio);
 
 	bfq_bic_update_cgroup(bic, bio);
@@ -4432,6 +4451,8 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
 				bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
 								 true, is_sync,
 								 NULL);
+			else
+				bfqq_already_existing = true;
 		}
 	}
 
@@ -4457,7 +4478,8 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
 			 * queue: restore the idle window and the
 			 * possible weight raising period.
 			 */
-			bfq_bfqq_resume_state(bfqq, bic);
+			bfq_bfqq_resume_state(bfqq, bfqd, bic,
+					      bfqq_already_existing);
 		}
 	}
 
@@ -4465,13 +4487,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
 		bfq_handle_burst(bfqd, bfqq);
 
 	spin_unlock_irq(&bfqd->lock);
-
-	return 0;
-
-queue_fail:
-	spin_unlock_irq(&bfqd->lock);
-
-	return 1;
 }
 
 static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
@@ -4950,8 +4965,8 @@ static struct elv_fs_entry bfq_attrs[] = {
 
 static struct elevator_type iosched_bfq_mq = {
 	.ops.mq = {
-		.get_rq_priv		= bfq_get_rq_private,
-		.put_rq_priv		= bfq_put_rq_private,
+		.prepare_request	= bfq_prepare_request,
+		.finish_request		= bfq_finish_request,
 		.exit_icq		= bfq_exit_icq,
 		.insert_requests	= bfq_insert_requests,
 		.dispatch_request	= bfq_dispatch_request,
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index ae783c0..5c3bf98 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -759,6 +759,12 @@ struct bfq_group {
 	/* must be the first member */
 	struct blkg_policy_data pd;
 
+	/* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
+	char blkg_path[128];
+
+	/* reference counter (see comments in bfq_bic_update_cgroup) */
+	int ref;
+
 	struct bfq_entity entity;
 	struct bfq_sched_data sched_data;
 
@@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
-void bfqg_put(struct bfq_group *bfqg);
+void bfqg_and_blkg_put(struct bfq_group *bfqg);
 
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
 extern struct cftype bfq_blkcg_legacy_files[];
@@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 
 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...)	do {			\
-	char __pbuf[128];						\
-									\
-	blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
-	blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
+	blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
 			bfq_bfqq_sync((bfqq)) ? 'S' : 'A',		\
-			  __pbuf, ##args);				\
+			bfqq_group(bfqq)->blkg_path, ##args);		\
 } while (0)
 
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...)	do {			\
-	char __pbuf[128];						\
-									\
-	blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf));		\
-	blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args);	\
-} while (0)
+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...)	\
+	blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
 
 #else /* CONFIG_BFQ_GROUP_IOSCHED */
 
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 5384713..b8a3a65 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
 	if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
 		return false;
 
+	if (!bio_sectors(bio))
+		return false;
+
 	/* Already protected? */
 	if (bio_integrity(bio))
 		return false;
@@ -221,7 +224,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
  * @bio:	bio to generate/verify integrity metadata for
  * @proc_fn:	Pointer to the relevant processing function
  */
-static int bio_integrity_process(struct bio *bio,
+static blk_status_t bio_integrity_process(struct bio *bio,
 				 integrity_processing_fn *proc_fn)
 {
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
@@ -229,7 +232,7 @@ static int bio_integrity_process(struct bio *bio,
 	struct bvec_iter bviter;
 	struct bio_vec bv;
 	struct bio_integrity_payload *bip = bio_integrity(bio);
-	unsigned int ret = 0;
+	blk_status_t ret = BLK_STS_OK;
 	void *prot_buf = page_address(bip->bip_vec->bv_page) +
 		bip->bip_vec->bv_offset;
 
@@ -366,7 +369,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
 	struct bio *bio = bip->bip_bio;
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-	bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
+	bio->bi_status = bio_integrity_process(bio, bi->profile->verify_fn);
 
 	/* Restore original bio completion handler */
 	bio->bi_end_io = bip->bip_end_io;
@@ -395,7 +398,7 @@ void bio_integrity_endio(struct bio *bio)
 	 * integrity metadata.  Restore original bio end_io handler
 	 * and run it.
 	 */
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		bio->bi_end_io = bip->bip_end_io;
 		bio_endio(bio);
 
diff --git a/block/bio.c b/block/bio.c
index 888e780..1cfcd0d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -240,20 +240,21 @@ struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
 	return bvl;
 }
 
-static void __bio_free(struct bio *bio)
+void bio_uninit(struct bio *bio)
 {
 	bio_disassociate_task(bio);
 
 	if (bio_integrity(bio))
 		bio_integrity_free(bio);
 }
+EXPORT_SYMBOL(bio_uninit);
 
 static void bio_free(struct bio *bio)
 {
 	struct bio_set *bs = bio->bi_pool;
 	void *p;
 
-	__bio_free(bio);
+	bio_uninit(bio);
 
 	if (bs) {
 		bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
@@ -271,6 +272,11 @@ static void bio_free(struct bio *bio)
 	}
 }
 
+/*
+ * Users of this function have their own bio allocation. Subsequently,
+ * they must remember to pair any call to bio_init() with bio_uninit()
+ * when IO has completed, or when the bio is released.
+ */
 void bio_init(struct bio *bio, struct bio_vec *table,
 	      unsigned short max_vecs)
 {
@@ -297,7 +303,7 @@ void bio_reset(struct bio *bio)
 {
 	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
 
-	__bio_free(bio);
+	bio_uninit(bio);
 
 	memset(bio, 0, BIO_RESET_BYTES);
 	bio->bi_flags = flags;
@@ -309,8 +315,8 @@ static struct bio *__bio_chain_endio(struct bio *bio)
 {
 	struct bio *parent = bio->bi_private;
 
-	if (!parent->bi_error)
-		parent->bi_error = bio->bi_error;
+	if (!parent->bi_status)
+		parent->bi_status = bio->bi_status;
 	bio_put(bio);
 	return parent;
 }
@@ -363,6 +369,8 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
 	struct bio_list punt, nopunt;
 	struct bio *bio;
 
+	if (WARN_ON_ONCE(!bs->rescue_workqueue))
+		return;
 	/*
 	 * In order to guarantee forward progress we must punt only bios that
 	 * were allocated from this bio_set; otherwise, if there was a bio on
@@ -474,7 +482,8 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
 
 		if (current->bio_list &&
 		    (!bio_list_empty(&current->bio_list[0]) ||
-		     !bio_list_empty(&current->bio_list[1])))
+		     !bio_list_empty(&current->bio_list[1])) &&
+		    bs->rescue_workqueue)
 			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 
 		p = mempool_alloc(bs->bio_pool, gfp_mask);
@@ -544,7 +553,7 @@ EXPORT_SYMBOL(zero_fill_bio);
  *
  * Description:
  *   Put a reference to a &struct bio, either one you have gotten with
- *   bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
+ *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
  **/
 void bio_put(struct bio *bio)
 {
@@ -593,6 +602,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 	bio->bi_bdev = bio_src->bi_bdev;
 	bio_set_flag(bio, BIO_CLONED);
 	bio->bi_opf = bio_src->bi_opf;
+	bio->bi_write_hint = bio_src->bi_write_hint;
 	bio->bi_iter = bio_src->bi_iter;
 	bio->bi_io_vec = bio_src->bi_io_vec;
 
@@ -676,6 +686,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
 		return NULL;
 	bio->bi_bdev		= bio_src->bi_bdev;
 	bio->bi_opf		= bio_src->bi_opf;
+	bio->bi_write_hint	= bio_src->bi_write_hint;
 	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
 	bio->bi_iter.bi_size	= bio_src->bi_iter.bi_size;
 
@@ -918,7 +929,7 @@ static void submit_bio_wait_endio(struct bio *bio)
 {
 	struct submit_bio_ret *ret = bio->bi_private;
 
-	ret->error = bio->bi_error;
+	ret->error = blk_status_to_errno(bio->bi_status);
 	complete(&ret->event);
 }
 
@@ -1817,8 +1828,8 @@ void bio_endio(struct bio *bio)
 	}
 
 	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
-		trace_block_bio_complete(bdev_get_queue(bio->bi_bdev),
-					 bio, bio->bi_error);
+		trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio,
+					 blk_status_to_errno(bio->bi_status));
 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
 	}
 
@@ -1921,9 +1932,29 @@ void bioset_free(struct bio_set *bs)
 }
 EXPORT_SYMBOL(bioset_free);
 
-static struct bio_set *__bioset_create(unsigned int pool_size,
-				       unsigned int front_pad,
-				       bool create_bvec_pool)
+/**
+ * bioset_create  - Create a bio_set
+ * @pool_size:	Number of bio and bio_vecs to cache in the mempool
+ * @front_pad:	Number of bytes to allocate in front of the returned bio
+ * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
+ *              and %BIOSET_NEED_RESCUER
+ *
+ * Description:
+ *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
+ *    to ask for a number of bytes to be allocated in front of the bio.
+ *    Front pad allocation is useful for embedding the bio inside
+ *    another structure, to avoid allocating extra data to go with the bio.
+ *    Note that the bio must be embedded at the END of that structure always,
+ *    or things will break badly.
+ *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
+ *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
+ *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
+ *    dispatch queued requests when the mempool runs out of space.
+ *
+ */
+struct bio_set *bioset_create(unsigned int pool_size,
+			      unsigned int front_pad,
+			      int flags)
 {
 	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
 	struct bio_set *bs;
@@ -1948,12 +1979,15 @@ static struct bio_set *__bioset_create(unsigned int pool_size,
 	if (!bs->bio_pool)
 		goto bad;
 
-	if (create_bvec_pool) {
+	if (flags & BIOSET_NEED_BVECS) {
 		bs->bvec_pool = biovec_create_pool(pool_size);
 		if (!bs->bvec_pool)
 			goto bad;
 	}
 
+	if (!(flags & BIOSET_NEED_RESCUER))
+		return bs;
+
 	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
 	if (!bs->rescue_workqueue)
 		goto bad;
@@ -1963,41 +1997,8 @@ static struct bio_set *__bioset_create(unsigned int pool_size,
 	bioset_free(bs);
 	return NULL;
 }
-
-/**
- * bioset_create  - Create a bio_set
- * @pool_size:	Number of bio and bio_vecs to cache in the mempool
- * @front_pad:	Number of bytes to allocate in front of the returned bio
- *
- * Description:
- *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
- *    to ask for a number of bytes to be allocated in front of the bio.
- *    Front pad allocation is useful for embedding the bio inside
- *    another structure, to avoid allocating extra data to go with the bio.
- *    Note that the bio must be embedded at the END of that structure always,
- *    or things will break badly.
- */
-struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
-{
-	return __bioset_create(pool_size, front_pad, true);
-}
 EXPORT_SYMBOL(bioset_create);
 
-/**
- * bioset_create_nobvec  - Create a bio_set without bio_vec mempool
- * @pool_size:	Number of bio to cache in the mempool
- * @front_pad:	Number of bytes to allocate in front of the returned bio
- *
- * Description:
- *    Same functionality as bioset_create() except that mempool is not
- *    created for bio_vecs. Saving some memory for bio_clone_fast() users.
- */
-struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
-{
-	return __bioset_create(pool_size, front_pad, false);
-}
-EXPORT_SYMBOL(bioset_create_nobvec);
-
 #ifdef CONFIG_BLK_CGROUP
 
 /**
@@ -2112,7 +2113,7 @@ static int __init init_bio(void)
 	bio_integrity_init();
 	biovec_init_slabs();
 
-	fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
+	fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 	if (!fs_bio_set)
 		panic("bio: can't allocate bios\n");
 
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 7c29471..0480892 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
 	if (blkg->blkcg != &blkcg_root)
-		blk_exit_rl(&blkg->rl);
+		blk_exit_rl(blkg->q, &blkg->rl);
 
 	blkg_rwstat_exit(&blkg->stat_ios);
 	blkg_rwstat_exit(&blkg->stat_bytes);
diff --git a/block/blk-core.c b/block/blk-core.c
index c706852..af393d5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -129,11 +129,70 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
 }
 EXPORT_SYMBOL(blk_rq_init);
 
+static const struct {
+	int		errno;
+	const char	*name;
+} blk_errors[] = {
+	[BLK_STS_OK]		= { 0,		"" },
+	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
+	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
+	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
+	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
+	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
+	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
+	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
+	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
+	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
+	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
+
+	/* device mapper special case, should not leak out: */
+	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },
+
+	/* everything else not covered above: */
+	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
+};
+
+blk_status_t errno_to_blk_status(int errno)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
+		if (blk_errors[i].errno == errno)
+			return (__force blk_status_t)i;
+	}
+
+	return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(errno_to_blk_status);
+
+int blk_status_to_errno(blk_status_t status)
+{
+	int idx = (__force int)status;
+
+	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
+		return -EIO;
+	return blk_errors[idx].errno;
+}
+EXPORT_SYMBOL_GPL(blk_status_to_errno);
+
+static void print_req_error(struct request *req, blk_status_t status)
+{
+	int idx = (__force int)status;
+
+	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
+		return;
+
+	printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
+			   __func__, blk_errors[idx].name, req->rq_disk ?
+			   req->rq_disk->disk_name : "?",
+			   (unsigned long long)blk_rq_pos(req));
+}
+
 static void req_bio_endio(struct request *rq, struct bio *bio,
-			  unsigned int nbytes, int error)
+			  unsigned int nbytes, blk_status_t error)
 {
 	if (error)
-		bio->bi_error = error;
+		bio->bi_status = error;
 
 	if (unlikely(rq->rq_flags & RQF_QUIET))
 		bio_set_flag(bio, BIO_QUIET);
@@ -177,10 +236,13 @@ static void blk_delay_work(struct work_struct *work)
  * Description:
  *   Sometimes queueing needs to be postponed for a little while, to allow
  *   resources to come back. This function will make sure that queueing is
- *   restarted around the specified time. Queue lock must be held.
+ *   restarted around the specified time.
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	if (likely(!blk_queue_dead(q)))
 		queue_delayed_work(kblockd_workqueue, &q->delay_work,
 				   msecs_to_jiffies(msecs));
@@ -198,6 +260,9 @@ EXPORT_SYMBOL(blk_delay_queue);
  **/
 void blk_start_queue_async(struct request_queue *q)
 {
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 	blk_run_queue_async(q);
 }
@@ -210,11 +275,13 @@ EXPORT_SYMBOL(blk_start_queue_async);
  * Description:
  *   blk_start_queue() will clear the stop flag on the queue, and call
  *   the request_fn for the queue if it was in a stopped state when
- *   entered. Also see blk_stop_queue(). Queue lock must be held.
+ *   entered. Also see blk_stop_queue().
  **/
 void blk_start_queue(struct request_queue *q)
 {
+	lockdep_assert_held(q->queue_lock);
 	WARN_ON(!irqs_disabled());
+	WARN_ON_ONCE(q->mq_ops);
 
 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 	__blk_run_queue(q);
@@ -233,10 +300,13 @@ EXPORT_SYMBOL(blk_start_queue);
  *   or if it simply chooses not to queue more I/O at one point, it can
  *   call this function to prevent the request_fn from being called until
  *   the driver has signalled it's ready to go again. This happens by calling
- *   blk_start_queue() to restart queue operations. Queue lock must be held.
+ *   blk_start_queue() to restart queue operations.
  **/
 void blk_stop_queue(struct request_queue *q)
 {
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	cancel_delayed_work(&q->delay_work);
 	queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
@@ -289,6 +359,9 @@ EXPORT_SYMBOL(blk_sync_queue);
  */
 inline void __blk_run_queue_uncond(struct request_queue *q)
 {
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	if (unlikely(blk_queue_dead(q)))
 		return;
 
@@ -310,11 +383,13 @@ EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
  * @q:	The queue to run
  *
  * Description:
- *    See @blk_run_queue. This variant must be called with the queue lock
- *    held and interrupts disabled.
+ *    See @blk_run_queue.
  */
 void __blk_run_queue(struct request_queue *q)
 {
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	if (unlikely(blk_queue_stopped(q)))
 		return;
 
@@ -328,10 +403,18 @@ EXPORT_SYMBOL(__blk_run_queue);
  *
  * Description:
  *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- *    of us. The caller must hold the queue lock.
+ *    of us.
+ *
+ * Note:
+ *    Since it is not allowed to run q->delay_work after blk_cleanup_queue()
+ *    has canceled q->delay_work, callers must hold the queue lock to avoid
+ *    race conditions between blk_cleanup_queue() and blk_run_queue_async().
  */
 void blk_run_queue_async(struct request_queue *q)
 {
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
 		mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
@@ -349,6 +432,8 @@ void blk_run_queue(struct request_queue *q)
 {
 	unsigned long flags;
 
+	WARN_ON_ONCE(q->mq_ops);
+
 	spin_lock_irqsave(q->queue_lock, flags);
 	__blk_run_queue(q);
 	spin_unlock_irqrestore(q->queue_lock, flags);
@@ -377,6 +462,7 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
 	int i;
 
 	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
 
 	while (true) {
 		bool drain = false;
@@ -455,6 +541,8 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
  */
 void blk_queue_bypass_start(struct request_queue *q)
 {
+	WARN_ON_ONCE(q->mq_ops);
+
 	spin_lock_irq(q->queue_lock);
 	q->bypass_depth++;
 	queue_flag_set(QUEUE_FLAG_BYPASS, q);
@@ -481,6 +569,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
  * @q: queue of interest
  *
  * Leave bypass mode and restore the normal queueing behavior.
+ *
+ * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
+ * this function is called for both blk-sq and blk-mq queues.
  */
 void blk_queue_bypass_end(struct request_queue *q)
 {
@@ -648,13 +739,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
 	if (!rl->rq_pool)
 		return -ENOMEM;
 
+	if (rl != &q->root_rl)
+		WARN_ON_ONCE(!blk_get_queue(q));
+
 	return 0;
 }
 
-void blk_exit_rl(struct request_list *rl)
+void blk_exit_rl(struct request_queue *q, struct request_list *rl)
 {
-	if (rl->rq_pool)
+	if (rl->rq_pool) {
 		mempool_destroy(rl->rq_pool);
+		if (rl != &q->root_rl)
+			blk_put_queue(q);
+	}
 }
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
@@ -726,7 +823,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 	if (q->id < 0)
 		goto fail_q;
 
-	q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+	q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 	if (!q->bio_split)
 		goto fail_id;
 
@@ -872,6 +969,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
 
 int blk_init_allocated_queue(struct request_queue *q)
 {
+	WARN_ON_ONCE(q->mq_ops);
+
 	q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
 	if (!q->fq)
 		return -ENOMEM;
@@ -1009,6 +1108,8 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
 	struct request_list *rl;
 	int on_thresh, off_thresh;
 
+	WARN_ON_ONCE(q->mq_ops);
+
 	spin_lock_irq(q->queue_lock);
 	q->nr_requests = nr;
 	blk_queue_congestion_threshold(q);
@@ -1071,6 +1172,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
 	int may_queue;
 	req_flags_t rq_flags = RQF_ALLOCED;
 
+	lockdep_assert_held(q->queue_lock);
+
 	if (unlikely(blk_queue_dying(q)))
 		return ERR_PTR(-ENODEV);
 
@@ -1244,12 +1347,20 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
 	struct request_list *rl;
 	struct request *rq;
 
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	rl = blk_get_rl(q, bio);	/* transferred to @rq on success */
 retry:
 	rq = __get_request(rl, op, bio, gfp_mask);
 	if (!IS_ERR(rq))
 		return rq;
 
+	if (op & REQ_NOWAIT) {
+		blk_put_rl(rl);
+		return ERR_PTR(-EAGAIN);
+	}
+
 	if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
 		blk_put_rl(rl);
 		return rq;
@@ -1277,16 +1388,18 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
 	goto retry;
 }
 
-static struct request *blk_old_get_request(struct request_queue *q, int rw,
-		gfp_t gfp_mask)
+static struct request *blk_old_get_request(struct request_queue *q,
+					   unsigned int op, gfp_t gfp_mask)
 {
 	struct request *rq;
 
+	WARN_ON_ONCE(q->mq_ops);
+
 	/* create ioc upfront */
 	create_io_context(gfp_mask, q->node);
 
 	spin_lock_irq(q->queue_lock);
-	rq = get_request(q, rw, NULL, gfp_mask);
+	rq = get_request(q, op, NULL, gfp_mask);
 	if (IS_ERR(rq)) {
 		spin_unlock_irq(q->queue_lock);
 		return rq;
@@ -1299,14 +1412,24 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
 	return rq;
 }
 
-struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+struct request *blk_get_request(struct request_queue *q, unsigned int op,
+				gfp_t gfp_mask)
 {
-	if (q->mq_ops)
-		return blk_mq_alloc_request(q, rw,
+	struct request *req;
+
+	if (q->mq_ops) {
+		req = blk_mq_alloc_request(q, op,
 			(gfp_mask & __GFP_DIRECT_RECLAIM) ?
 				0 : BLK_MQ_REQ_NOWAIT);
-	else
-		return blk_old_get_request(q, rw, gfp_mask);
+		if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
+			q->mq_ops->initialize_rq_fn(req);
+	} else {
+		req = blk_old_get_request(q, op, gfp_mask);
+		if (!IS_ERR(req) && q->initialize_rq_fn)
+			q->initialize_rq_fn(req);
+	}
+
+	return req;
 }
 EXPORT_SYMBOL(blk_get_request);
 
@@ -1322,6 +1445,9 @@ EXPORT_SYMBOL(blk_get_request);
  */
 void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	blk_delete_timer(rq);
 	blk_clear_rq_complete(rq);
 	trace_block_rq_requeue(q, rq);
@@ -1396,9 +1522,6 @@ static void blk_pm_put_request(struct request *rq)
 static inline void blk_pm_put_request(struct request *rq) {}
 #endif
 
-/*
- * queue lock must be held
- */
 void __blk_put_request(struct request_queue *q, struct request *req)
 {
 	req_flags_t rq_flags = req->rq_flags;
@@ -1411,6 +1534,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
 		return;
 	}
 
+	lockdep_assert_held(q->queue_lock);
+
 	blk_pm_put_request(req);
 
 	elv_completed_request(q, req);
@@ -1640,6 +1765,7 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio)
 		req->ioprio = ioc->ioprio;
 	else
 		req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+	req->write_hint = bio->bi_write_hint;
 	blk_rq_bio_prep(req->q, req, bio);
 }
 EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
@@ -1659,10 +1785,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 	 */
 	blk_queue_bounce(q, &bio);
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-		bio->bi_error = -EIO;
+		bio->bi_status = BLK_STS_IOERR;
 		bio_endio(bio);
 		return BLK_QC_T_NONE;
 	}
@@ -1720,7 +1846,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 	req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
 	if (IS_ERR(req)) {
 		__wbt_done(q->rq_wb, wb_acct);
-		bio->bi_error = PTR_ERR(req);
+		if (PTR_ERR(req) == -ENOMEM)
+			bio->bi_status = BLK_STS_RESOURCE;
+		else
+			bio->bi_status = BLK_STS_IOERR;
 		bio_endio(bio);
 		goto out_unlock;
 	}
@@ -1875,7 +2004,7 @@ generic_make_request_checks(struct bio *bio)
 {
 	struct request_queue *q;
 	int nr_sectors = bio_sectors(bio);
-	int err = -EIO;
+	blk_status_t status = BLK_STS_IOERR;
 	char b[BDEVNAME_SIZE];
 	struct hd_struct *part;
 
@@ -1894,6 +2023,14 @@ generic_make_request_checks(struct bio *bio)
 		goto end_io;
 	}
 
+	/*
+	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
+	 * if queue is not a request based queue.
+	 */
+
+	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+		goto not_supported;
+
 	part = bio->bi_bdev->bd_part;
 	if (should_fail_request(part, bio->bi_iter.bi_size) ||
 	    should_fail_request(&part_to_disk(part)->part0,
@@ -1918,7 +2055,7 @@ generic_make_request_checks(struct bio *bio)
 	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
 		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
 		if (!nr_sectors) {
-			err = 0;
+			status = BLK_STS_OK;
 			goto end_io;
 		}
 	}
@@ -1970,9 +2107,9 @@ generic_make_request_checks(struct bio *bio)
 	return true;
 
 not_supported:
-	err = -EOPNOTSUPP;
+	status = BLK_STS_NOTSUPP;
 end_io:
-	bio->bi_error = err;
+	bio->bi_status = status;
 	bio_endio(bio);
 	return false;
 }
@@ -2051,7 +2188,7 @@ blk_qc_t generic_make_request(struct bio *bio)
 	do {
 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-		if (likely(blk_queue_enter(q, false) == 0)) {
+		if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
 			struct bio_list lower, same;
 
 			/* Create a fresh bio_list for all subordinate requests */
@@ -2076,7 +2213,11 @@ blk_qc_t generic_make_request(struct bio *bio)
 			bio_list_merge(&bio_list_on_stack[0], &same);
 			bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
 		} else {
-			bio_io_error(bio);
+			if (unlikely(!blk_queue_dying(q) &&
+					(bio->bi_opf & REQ_NOWAIT)))
+				bio_wouldblock_error(bio);
+			else
+				bio_io_error(bio);
 		}
 		bio = bio_list_pop(&bio_list_on_stack[0]);
 	} while (bio);
@@ -2177,29 +2318,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
  * @q:  the queue to submit the request
  * @rq: the request being queued
  */
-int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 {
 	unsigned long flags;
 	int where = ELEVATOR_INSERT_BACK;
 
 	if (blk_cloned_rq_check_limits(q, rq))
-		return -EIO;
+		return BLK_STS_IOERR;
 
 	if (rq->rq_disk &&
 	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
-		return -EIO;
+		return BLK_STS_IOERR;
 
 	if (q->mq_ops) {
 		if (blk_queue_io_stat(q))
 			blk_account_io_start(rq, true);
 		blk_mq_sched_insert_request(rq, false, true, false, false);
-		return 0;
+		return BLK_STS_OK;
 	}
 
 	spin_lock_irqsave(q->queue_lock, flags);
 	if (unlikely(blk_queue_dying(q))) {
 		spin_unlock_irqrestore(q->queue_lock, flags);
-		return -ENODEV;
+		return BLK_STS_IOERR;
 	}
 
 	/*
@@ -2216,7 +2357,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 		__blk_run_queue(q);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 
-	return 0;
+	return BLK_STS_OK;
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
@@ -2232,9 +2373,6 @@ EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
  *
  * Return:
  *     The number of bytes to fail.
- *
- * Context:
- *     queue_lock must be held.
  */
 unsigned int blk_rq_err_bytes(const struct request *rq)
 {
@@ -2374,15 +2512,15 @@ void blk_account_io_start(struct request *rq, bool new_io)
  * Return:
  *     Pointer to the request at the top of @q if available.  Null
  *     otherwise.
- *
- * Context:
- *     queue_lock must be held.
  */
 struct request *blk_peek_request(struct request_queue *q)
 {
 	struct request *rq;
 	int ret;
 
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	while ((rq = __elv_next_request(q)) != NULL) {
 
 		rq = blk_pm_peek_request(q, rq);
@@ -2450,15 +2588,14 @@ struct request *blk_peek_request(struct request_queue *q)
 			rq = NULL;
 			break;
 		} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
-			int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
-
 			rq->rq_flags |= RQF_QUIET;
 			/*
 			 * Mark this request as started so we don't trigger
 			 * any debug logic in the end I/O path.
 			 */
 			blk_start_request(rq);
-			__blk_end_request_all(rq, err);
+			__blk_end_request_all(rq, ret == BLKPREP_INVALID ?
+					BLK_STS_TARGET : BLK_STS_IOERR);
 		} else {
 			printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
 			break;
@@ -2499,12 +2636,12 @@ void blk_dequeue_request(struct request *rq)
  *
  *     Block internal functions which don't want to start timer should
  *     call blk_dequeue_request().
- *
- * Context:
- *     queue_lock must be held.
  */
 void blk_start_request(struct request *req)
 {
+	lockdep_assert_held(req->q->queue_lock);
+	WARN_ON_ONCE(req->q->mq_ops);
+
 	blk_dequeue_request(req);
 
 	if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
@@ -2529,14 +2666,14 @@ EXPORT_SYMBOL(blk_start_request);
  * Return:
  *     Pointer to the request at the top of @q if available.  Null
  *     otherwise.
- *
- * Context:
- *     queue_lock must be held.
  */
 struct request *blk_fetch_request(struct request_queue *q)
 {
 	struct request *rq;
 
+	lockdep_assert_held(q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	rq = blk_peek_request(q);
 	if (rq)
 		blk_start_request(rq);
@@ -2547,7 +2684,7 @@ EXPORT_SYMBOL(blk_fetch_request);
 /**
  * blk_update_request - Special helper function for request stacking drivers
  * @req:      the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete @req
  *
  * Description:
@@ -2566,49 +2703,19 @@ EXPORT_SYMBOL(blk_fetch_request);
  *     %false - this request doesn't have any more data
  *     %true  - this request has more data
  **/
-bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
+bool blk_update_request(struct request *req, blk_status_t error,
+		unsigned int nr_bytes)
 {
 	int total_bytes;
 
-	trace_block_rq_complete(req, error, nr_bytes);
+	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
 
 	if (!req->bio)
 		return false;
 
-	if (error && !blk_rq_is_passthrough(req) &&
-	    !(req->rq_flags & RQF_QUIET)) {
-		char *error_type;
-
-		switch (error) {
-		case -ENOLINK:
-			error_type = "recoverable transport";
-			break;
-		case -EREMOTEIO:
-			error_type = "critical target";
-			break;
-		case -EBADE:
-			error_type = "critical nexus";
-			break;
-		case -ETIMEDOUT:
-			error_type = "timeout";
-			break;
-		case -ENOSPC:
-			error_type = "critical space allocation";
-			break;
-		case -ENODATA:
-			error_type = "critical medium";
-			break;
-		case -EIO:
-		default:
-			error_type = "I/O";
-			break;
-		}
-		printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
-				   __func__, error_type, req->rq_disk ?
-				   req->rq_disk->disk_name : "?",
-				   (unsigned long long)blk_rq_pos(req));
-
-	}
+	if (unlikely(error && !blk_rq_is_passthrough(req) &&
+		     !(req->rq_flags & RQF_QUIET)))
+		print_req_error(req, error);
 
 	blk_account_io_completion(req, nr_bytes);
 
@@ -2674,7 +2781,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 }
 EXPORT_SYMBOL_GPL(blk_update_request);
 
-static bool blk_update_bidi_request(struct request *rq, int error,
+static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
 				    unsigned int nr_bytes,
 				    unsigned int bidi_bytes)
 {
@@ -2712,13 +2819,13 @@ void blk_unprep_request(struct request *req)
 }
 EXPORT_SYMBOL_GPL(blk_unprep_request);
 
-/*
- * queue lock must be held
- */
-void blk_finish_request(struct request *req, int error)
+void blk_finish_request(struct request *req, blk_status_t error)
 {
 	struct request_queue *q = req->q;
 
+	lockdep_assert_held(req->q->queue_lock);
+	WARN_ON_ONCE(q->mq_ops);
+
 	if (req->rq_flags & RQF_STATS)
 		blk_stat_add(req);
 
@@ -2752,7 +2859,7 @@ EXPORT_SYMBOL(blk_finish_request);
 /**
  * blk_end_bidi_request - Complete a bidi request
  * @rq:         the request to complete
- * @error:      %0 for success, < %0 for error
+ * @error:      block status code
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
@@ -2766,12 +2873,14 @@ EXPORT_SYMBOL(blk_finish_request);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-static bool blk_end_bidi_request(struct request *rq, int error,
+static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
 				 unsigned int nr_bytes, unsigned int bidi_bytes)
 {
 	struct request_queue *q = rq->q;
 	unsigned long flags;
 
+	WARN_ON_ONCE(q->mq_ops);
+
 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
 		return true;
 
@@ -2785,7 +2894,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
 /**
  * __blk_end_bidi_request - Complete a bidi request with queue lock held
  * @rq:         the request to complete
- * @error:      %0 for success, < %0 for error
+ * @error:      block status code
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
@@ -2797,9 +2906,12 @@ static bool blk_end_bidi_request(struct request *rq, int error,
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-static bool __blk_end_bidi_request(struct request *rq, int error,
+static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
 				   unsigned int nr_bytes, unsigned int bidi_bytes)
 {
+	lockdep_assert_held(rq->q->queue_lock);
+	WARN_ON_ONCE(rq->q->mq_ops);
+
 	if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
 		return true;
 
@@ -2811,7 +2923,7 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
 /**
  * blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -2822,8 +2934,10 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool blk_end_request(struct request *rq, blk_status_t error,
+		unsigned int nr_bytes)
 {
+	WARN_ON_ONCE(rq->q->mq_ops);
 	return blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
 EXPORT_SYMBOL(blk_end_request);
@@ -2831,12 +2945,12 @@ EXPORT_SYMBOL(blk_end_request);
 /**
  * blk_end_request_all - Helper function for drives to finish the request.
  * @rq: the request to finish
- * @error: %0 for success, < %0 for error
+ * @error: block status code
  *
  * Description:
  *     Completely finish @rq.
  */
-void blk_end_request_all(struct request *rq, int error)
+void blk_end_request_all(struct request *rq, blk_status_t error)
 {
 	bool pending;
 	unsigned int bidi_bytes = 0;
@@ -2852,7 +2966,7 @@ EXPORT_SYMBOL(blk_end_request_all);
 /**
  * __blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -2862,8 +2976,12 @@ EXPORT_SYMBOL(blk_end_request_all);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool __blk_end_request(struct request *rq, blk_status_t error,
+		unsigned int nr_bytes)
 {
+	lockdep_assert_held(rq->q->queue_lock);
+	WARN_ON_ONCE(rq->q->mq_ops);
+
 	return __blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
 EXPORT_SYMBOL(__blk_end_request);
@@ -2871,16 +2989,19 @@ EXPORT_SYMBOL(__blk_end_request);
 /**
  * __blk_end_request_all - Helper function for drives to finish the request.
  * @rq: the request to finish
- * @error: %0 for success, < %0 for error
+ * @error:    block status code
  *
  * Description:
  *     Completely finish @rq.  Must be called with queue lock held.
  */
-void __blk_end_request_all(struct request *rq, int error)
+void __blk_end_request_all(struct request *rq, blk_status_t error)
 {
 	bool pending;
 	unsigned int bidi_bytes = 0;
 
+	lockdep_assert_held(rq->q->queue_lock);
+	WARN_ON_ONCE(rq->q->mq_ops);
+
 	if (unlikely(blk_bidi_rq(rq)))
 		bidi_bytes = blk_rq_bytes(rq->next_rq);
 
@@ -2892,7 +3013,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
 /**
  * __blk_end_request_cur - Helper function to finish the current request chunk.
  * @rq: the request to finish the current chunk for
- * @error: %0 for success, < %0 for error
+ * @error:    block status code
  *
  * Description:
  *     Complete the current consecutively mapped chunk from @rq.  Must
@@ -2902,7 +3023,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  */
-bool __blk_end_request_cur(struct request *rq, int error)
+bool __blk_end_request_cur(struct request *rq, blk_status_t error)
 {
 	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
 }
@@ -3145,6 +3266,8 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
 			    bool from_schedule)
 	__releases(q->queue_lock)
 {
+	lockdep_assert_held(q->queue_lock);
+
 	trace_block_unplug(q, depth, !from_schedule);
 
 	if (from_schedule)
@@ -3243,7 +3366,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 		 * Short-circuit if @q is dead
 		 */
 		if (unlikely(blk_queue_dying(q))) {
-			__blk_end_request_all(rq, -ENODEV);
+			__blk_end_request_all(rq, BLK_STS_IOERR);
 			continue;
 		}
 
diff --git a/block/blk-exec.c b/block/blk-exec.c
index a9451e3..5c0f3dc 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -16,7 +16,7 @@
  * @rq: request to complete
  * @error: end I/O status of the request
  */
-static void blk_end_sync_rq(struct request *rq, int error)
+static void blk_end_sync_rq(struct request *rq, blk_status_t error)
 {
 	struct completion *waiting = rq->end_io_data;
 
@@ -69,7 +69,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 
 	if (unlikely(blk_queue_dying(q))) {
 		rq->rq_flags |= RQF_QUIET;
-		__blk_end_request_all(rq, -ENXIO);
+		__blk_end_request_all(rq, BLK_STS_IOERR);
 		spin_unlock_irq(q->queue_lock);
 		return;
 	}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index c4e0880..ed5fe32 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -164,7 +164,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
  */
 static bool blk_flush_complete_seq(struct request *rq,
 				   struct blk_flush_queue *fq,
-				   unsigned int seq, int error)
+				   unsigned int seq, blk_status_t error)
 {
 	struct request_queue *q = rq->q;
 	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
@@ -216,7 +216,7 @@ static bool blk_flush_complete_seq(struct request *rq,
 	return kicked | queued;
 }
 
-static void flush_end_io(struct request *flush_rq, int error)
+static void flush_end_io(struct request *flush_rq, blk_status_t error)
 {
 	struct request_queue *q = flush_rq->q;
 	struct list_head *running;
@@ -341,11 +341,13 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
 	return blk_flush_queue_rq(flush_rq, false);
 }
 
-static void flush_data_end_io(struct request *rq, int error)
+static void flush_data_end_io(struct request *rq, blk_status_t error)
 {
 	struct request_queue *q = rq->q;
 	struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
 
+	lockdep_assert_held(q->queue_lock);
+
 	/*
 	 * Updating q->in_flight[] here for making this tag usable
 	 * early. Because in blk_queue_start_tag(),
@@ -382,7 +384,7 @@ static void flush_data_end_io(struct request *rq, int error)
 		blk_run_queue_async(q);
 }
 
-static void mq_flush_data_end_io(struct request *rq, int error)
+static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 {
 	struct request_queue *q = rq->q;
 	struct blk_mq_hw_ctx *hctx;
@@ -411,9 +413,6 @@ static void mq_flush_data_end_io(struct request *rq, int error)
  * or __blk_mq_run_hw_queue() to dispatch request.
  * @rq is being submitted.  Analyze what needs to be done and put it on the
  * right queue.
- *
- * CONTEXT:
- * spin_lock_irq(q->queue_lock) in !mq case
  */
 void blk_insert_flush(struct request *rq)
 {
@@ -422,6 +421,9 @@ void blk_insert_flush(struct request *rq)
 	unsigned int policy = blk_flush_policy(fflags, rq);
 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 
+	if (!q->mq_ops)
+		lockdep_assert_held(q->queue_lock);
+
 	/*
 	 * @policy now records what operations need to be done.  Adjust
 	 * REQ_PREFLUSH and FUA for the driver.
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 0f891a9..feb3057 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -384,9 +384,9 @@ static struct kobj_type integrity_ktype = {
 	.sysfs_ops	= &integrity_ops,
 };
 
-static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
+static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
 {
-	return 0;
+	return BLK_STS_OK;
 }
 
 static const struct blk_integrity_profile nop_profile = {
diff --git a/block/blk-map.c b/block/blk-map.c
index 3b5cb86..2547016 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -16,6 +16,8 @@
  */
 int blk_rq_append_bio(struct request *rq, struct bio *bio)
 {
+	blk_queue_bounce(rq->q, &bio);
+
 	if (!rq->bio) {
 		blk_rq_bio_prep(rq->q, rq, bio);
 	} else {
@@ -72,15 +74,13 @@ static int __blk_rq_map_user_iov(struct request *rq,
 		map_data->offset += bio->bi_iter.bi_size;
 
 	orig_bio = bio;
-	blk_queue_bounce(q, &bio);
 
 	/*
 	 * We link the bounce buffer in and could have to traverse it
 	 * later so we have to get a ref to prevent it from being freed
 	 */
-	bio_get(bio);
-
 	ret = blk_rq_append_bio(rq, bio);
+	bio_get(bio);
 	if (ret) {
 		bio_endio(bio);
 		__blk_rq_unmap_user(orig_bio);
@@ -249,7 +249,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 		return ret;
 	}
 
-	blk_queue_bounce(q, &rq->bio);
 	return 0;
 }
 EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3990ae4..99038830 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -108,31 +108,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 	bool do_split = true;
 	struct bio *new = NULL;
 	const unsigned max_sectors = get_max_io_size(q, bio);
-	unsigned bvecs = 0;
 
 	bio_for_each_segment(bv, bio, iter) {
 		/*
-		 * With arbitrary bio size, the incoming bio may be very
-		 * big. We have to split the bio into small bios so that
-		 * each holds at most BIO_MAX_PAGES bvecs because
-		 * bio_clone() can fail to allocate big bvecs.
-		 *
-		 * It should have been better to apply the limit per
-		 * request queue in which bio_clone() is involved,
-		 * instead of globally. The biggest blocker is the
-		 * bio_clone() in bio bounce.
-		 *
-		 * If bio is splitted by this reason, we should have
-		 * allowed to continue bios merging, but don't do
-		 * that now for making the change simple.
-		 *
-		 * TODO: deal with bio bounce's bio_clone() gracefully
-		 * and convert the global limit into per-queue limit.
-		 */
-		if (bvecs++ >= BIO_MAX_PAGES)
-			goto split;
-
-		/*
 		 * If the queue doesn't support SG gaps and adding this
 		 * offset would create a gap, disallow it.
 		 */
@@ -202,8 +180,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 	return do_split ? new : NULL;
 }
 
-void blk_queue_split(struct request_queue *q, struct bio **bio,
-		     struct bio_set *bs)
+void blk_queue_split(struct request_queue *q, struct bio **bio)
 {
 	struct bio *split, *res;
 	unsigned nsegs;
@@ -211,13 +188,13 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
 	switch (bio_op(*bio)) {
 	case REQ_OP_DISCARD:
 	case REQ_OP_SECURE_ERASE:
-		split = blk_bio_discard_split(q, *bio, bs, &nsegs);
+		split = blk_bio_discard_split(q, *bio, q->bio_split, &nsegs);
 		break;
 	case REQ_OP_WRITE_ZEROES:
-		split = blk_bio_write_zeroes_split(q, *bio, bs, &nsegs);
+		split = blk_bio_write_zeroes_split(q, *bio, q->bio_split, &nsegs);
 		break;
 	case REQ_OP_WRITE_SAME:
-		split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
+		split = blk_bio_write_same_split(q, *bio, q->bio_split, &nsegs);
 		break;
 	default:
 		split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
@@ -671,6 +648,9 @@ static void blk_account_io_merge(struct request *req)
 static struct request *attempt_merge(struct request_queue *q,
 				     struct request *req, struct request *next)
 {
+	if (!q->mq_ops)
+		lockdep_assert_held(q->queue_lock);
+
 	if (!rq_mergeable(req) || !rq_mergeable(next))
 		return NULL;
 
@@ -693,6 +673,13 @@ static struct request *attempt_merge(struct request_queue *q,
 		return NULL;
 
 	/*
+	 * Don't allow merge of different write hints, or for a hint with
+	 * non-hint IO.
+	 */
+	if (req->write_hint != next->write_hint)
+		return NULL;
+
+	/*
 	 * If we are allowed to merge, then append bio list
 	 * from next to rq and release next. merge_requests_fn
 	 * will have updated segment counts, update sector
@@ -811,6 +798,13 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 	    !blk_write_same_mergeable(rq->bio, bio))
 		return false;
 
+	/*
+	 * Don't allow merge of different write hints, or for a hint with
+	 * non-hint IO.
+	 */
+	if (rq->write_hint != bio->bi_write_hint)
+		return false;
+
 	return true;
 }
 
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 8e61e86..4891f04 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -14,10 +14,14 @@
 #include "blk.h"
 #include "blk-mq.h"
 
-static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
-			      const int cpu)
+static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
 {
-	return cpu * nr_queues / nr_cpus;
+	/*
+	 * Non online CPU will be mapped to queue index 0.
+	 */
+	if (!cpu_online(cpu))
+		return 0;
+	return cpu % nr_queues;
 }
 
 static int get_first_sibling(unsigned int cpu)
@@ -35,56 +39,26 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
 {
 	unsigned int *map = set->mq_map;
 	unsigned int nr_queues = set->nr_hw_queues;
-	const struct cpumask *online_mask = cpu_online_mask;
-	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
-	cpumask_var_t cpus;
+	unsigned int cpu, first_sibling;
 
-	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
-		return -ENOMEM;
-
-	cpumask_clear(cpus);
-	nr_cpus = nr_uniq_cpus = 0;
-	for_each_cpu(i, online_mask) {
-		nr_cpus++;
-		first_sibling = get_first_sibling(i);
-		if (!cpumask_test_cpu(first_sibling, cpus))
-			nr_uniq_cpus++;
-		cpumask_set_cpu(i, cpus);
+	for_each_possible_cpu(cpu) {
+		/*
+		 * First do sequential mapping between CPUs and queues.
+		 * In case we still have CPUs to map, and we have some number of
+		 * threads per cores then map sibling threads to the same queue for
+		 * performace optimizations.
+		 */
+		if (cpu < nr_queues) {
+			map[cpu] = cpu_to_queue_index(nr_queues, cpu);
+		} else {
+			first_sibling = get_first_sibling(cpu);
+			if (first_sibling == cpu)
+				map[cpu] = cpu_to_queue_index(nr_queues, cpu);
+			else
+				map[cpu] = map[first_sibling];
+		}
 	}
 
-	queue = 0;
-	for_each_possible_cpu(i) {
-		if (!cpumask_test_cpu(i, online_mask)) {
-			map[i] = 0;
-			continue;
-		}
-
-		/*
-		 * Easy case - we have equal or more hardware queues. Or
-		 * there are no thread siblings to take into account. Do
-		 * 1:1 if enough, or sequential mapping if less.
-		 */
-		if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
-			map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
-			queue++;
-			continue;
-		}
-
-		/*
-		 * Less then nr_cpus queues, and we have some number of
-		 * threads per cores. Map sibling threads to the same
-		 * queue.
-		 */
-		first_sibling = get_first_sibling(i);
-		if (first_sibling == i) {
-			map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
-							queue);
-			queue++;
-		} else
-			map[i] = map[first_sibling];
-	}
-
-	free_cpumask_var(cpus);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(blk_mq_map_queues);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 803aed4..9ebc294 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -114,10 +114,12 @@ static ssize_t queue_state_write(void *data, const char __user *buf,
 		blk_mq_run_hw_queues(q, true);
 	} else if (strcmp(op, "start") == 0) {
 		blk_mq_start_stopped_hw_queues(q, true);
+	} else if (strcmp(op, "kick") == 0) {
+		blk_mq_kick_requeue_list(q);
 	} else {
 		pr_err("%s: unsupported operation '%s'\n", __func__, op);
 inval:
-		pr_err("%s: use either 'run' or 'start'\n", __func__);
+		pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
 		return -EINVAL;
 	}
 	return count;
@@ -133,6 +135,29 @@ static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
 	}
 }
 
+static int queue_write_hint_show(void *data, struct seq_file *m)
+{
+	struct request_queue *q = data;
+	int i;
+
+	for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
+		seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
+
+	return 0;
+}
+
+static ssize_t queue_write_hint_store(void *data, const char __user *buf,
+				      size_t count, loff_t *ppos)
+{
+	struct request_queue *q = data;
+	int i;
+
+	for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
+		q->write_hints[i] = 0;
+
+	return count;
+}
+
 static int queue_poll_stat_show(void *data, struct seq_file *m)
 {
 	struct request_queue *q = data;
@@ -267,6 +292,14 @@ static const char *const rqf_name[] = {
 };
 #undef RQF_NAME
 
+#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
+static const char *const rqaf_name[] = {
+	RQAF_NAME(COMPLETE),
+	RQAF_NAME(STARTED),
+	RQAF_NAME(POLL_SLEPT),
+};
+#undef RQAF_NAME
+
 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
 {
 	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
@@ -283,6 +316,8 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
 	seq_puts(m, ", .rq_flags=");
 	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
 		       ARRAY_SIZE(rqf_name));
+	seq_puts(m, ", .atomic_flags=");
+	blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
 	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
 		   rq->internal_tag);
 	if (mq_ops->show_rq)
@@ -298,6 +333,37 @@ int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
 }
 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
 
+static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
+	__acquires(&q->requeue_lock)
+{
+	struct request_queue *q = m->private;
+
+	spin_lock_irq(&q->requeue_lock);
+	return seq_list_start(&q->requeue_list, *pos);
+}
+
+static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct request_queue *q = m->private;
+
+	return seq_list_next(v, &q->requeue_list, pos);
+}
+
+static void queue_requeue_list_stop(struct seq_file *m, void *v)
+	__releases(&q->requeue_lock)
+{
+	struct request_queue *q = m->private;
+
+	spin_unlock_irq(&q->requeue_lock);
+}
+
+static const struct seq_operations queue_requeue_list_seq_ops = {
+	.start	= queue_requeue_list_start,
+	.next	= queue_requeue_list_next,
+	.stop	= queue_requeue_list_stop,
+	.show	= blk_mq_debugfs_rq_show,
+};
+
 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
 	__acquires(&hctx->lock)
 {
@@ -329,6 +395,36 @@ static const struct seq_operations hctx_dispatch_seq_ops = {
 	.show	= blk_mq_debugfs_rq_show,
 };
 
+struct show_busy_params {
+	struct seq_file		*m;
+	struct blk_mq_hw_ctx	*hctx;
+};
+
+/*
+ * Note: the state of a request may change while this function is in progress,
+ * e.g. due to a concurrent blk_mq_finish_request() call.
+ */
+static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
+{
+	const struct show_busy_params *params = data;
+
+	if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
+	    test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+		__blk_mq_debugfs_rq_show(params->m,
+					 list_entry_rq(&rq->queuelist));
+}
+
+static int hctx_busy_show(void *data, struct seq_file *m)
+{
+	struct blk_mq_hw_ctx *hctx = data;
+	struct show_busy_params params = { .m = m, .hctx = hctx };
+
+	blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
+				&params);
+
+	return 0;
+}
+
 static int hctx_ctx_map_show(void *data, struct seq_file *m)
 {
 	struct blk_mq_hw_ctx *hctx = data;
@@ -655,7 +751,9 @@ const struct file_operations blk_mq_debugfs_fops = {
 
 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
 	{"poll_stat", 0400, queue_poll_stat_show},
+	{"requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops},
 	{"state", 0600, queue_state_show, queue_state_write},
+	{"write_hints", 0600, queue_write_hint_show, queue_write_hint_store},
 	{},
 };
 
@@ -663,6 +761,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
 	{"state", 0400, hctx_state_show},
 	{"flags", 0400, hctx_flags_show},
 	{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
+	{"busy", 0400, hctx_busy_show},
 	{"ctx_map", 0400, hctx_ctx_map_show},
 	{"tags", 0400, hctx_tags_show},
 	{"tags_bitmap", 0400, hctx_tags_bitmap_show},
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 1f5b692..7f0dc48 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -31,11 +31,10 @@ void blk_mq_sched_free_hctx_data(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
 
-static void __blk_mq_sched_assign_ioc(struct request_queue *q,
-				      struct request *rq,
-				      struct bio *bio,
-				      struct io_context *ioc)
+void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
 {
+	struct request_queue *q = rq->q;
+	struct io_context *ioc = rq_ioc(bio);
 	struct io_cq *icq;
 
 	spin_lock_irq(q->queue_lock);
@@ -47,90 +46,47 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
 		if (!icq)
 			return;
 	}
-
+	get_io_context(icq->ioc);
 	rq->elv.icq = icq;
-	if (!blk_mq_sched_get_rq_priv(q, rq, bio)) {
-		rq->rq_flags |= RQF_ELVPRIV;
-		get_io_context(icq->ioc);
+}
+
+/*
+ * Mark a hardware queue as needing a restart. For shared queues, maintain
+ * a count of how many hardware queues are marked for restart.
+ */
+static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
 		return;
-	}
 
-	rq->elv.icq = NULL;
+	if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+		struct request_queue *q = hctx->queue;
+
+		if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+			atomic_inc(&q->shared_hctx_restart);
+	} else
+		set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
 }
 
-static void blk_mq_sched_assign_ioc(struct request_queue *q,
-				    struct request *rq, struct bio *bio)
+static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
 {
-	struct io_context *ioc;
+	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+		return false;
 
-	ioc = rq_ioc(bio);
-	if (ioc)
-		__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
-}
+	if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+		struct request_queue *q = hctx->queue;
 
-struct request *blk_mq_sched_get_request(struct request_queue *q,
-					 struct bio *bio,
-					 unsigned int op,
-					 struct blk_mq_alloc_data *data)
-{
-	struct elevator_queue *e = q->elevator;
-	struct request *rq;
+		if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+			atomic_dec(&q->shared_hctx_restart);
+	} else
+		clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
 
-	blk_queue_enter_live(q);
-	data->q = q;
-	if (likely(!data->ctx))
-		data->ctx = blk_mq_get_ctx(q);
-	if (likely(!data->hctx))
-		data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
-
-	if (e) {
-		data->flags |= BLK_MQ_REQ_INTERNAL;
-
-		/*
-		 * Flush requests are special and go directly to the
-		 * dispatch list.
-		 */
-		if (!op_is_flush(op) && e->type->ops.mq.get_request) {
-			rq = e->type->ops.mq.get_request(q, op, data);
-			if (rq)
-				rq->rq_flags |= RQF_QUEUED;
-		} else
-			rq = __blk_mq_alloc_request(data, op);
-	} else {
-		rq = __blk_mq_alloc_request(data, op);
+	if (blk_mq_hctx_has_pending(hctx)) {
+		blk_mq_run_hw_queue(hctx, true);
+		return true;
 	}
 
-	if (rq) {
-		if (!op_is_flush(op)) {
-			rq->elv.icq = NULL;
-			if (e && e->type->icq_cache)
-				blk_mq_sched_assign_ioc(q, rq, bio);
-		}
-		data->hctx->queued++;
-		return rq;
-	}
-
-	blk_queue_exit(q);
-	return NULL;
-}
-
-void blk_mq_sched_put_request(struct request *rq)
-{
-	struct request_queue *q = rq->q;
-	struct elevator_queue *e = q->elevator;
-
-	if (rq->rq_flags & RQF_ELVPRIV) {
-		blk_mq_sched_put_rq_priv(rq->q, rq);
-		if (rq->elv.icq) {
-			put_io_context(rq->elv.icq->ioc);
-			rq->elv.icq = NULL;
-		}
-	}
-
-	if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
-		e->type->ops.mq.put_request(rq);
-	else
-		blk_mq_finish_request(rq);
+	return false;
 }
 
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
@@ -141,7 +97,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
 	bool did_work = false;
 	LIST_HEAD(rq_list);
 
-	if (unlikely(blk_mq_hctx_stopped(hctx)))
+	/* RCU or SRCU read lock is needed before checking quiesced flag */
+	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
 		return;
 
 	hctx->run++;
@@ -221,19 +178,73 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
 
+/*
+ * Reverse check our software queue for entries that we could potentially
+ * merge with. Currently includes a hand-wavy stop count of 8, to not spend
+ * too much time checking for merges.
+ */
+static bool blk_mq_attempt_merge(struct request_queue *q,
+				 struct blk_mq_ctx *ctx, struct bio *bio)
+{
+	struct request *rq;
+	int checked = 8;
+
+	lockdep_assert_held(&ctx->lock);
+
+	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
+		bool merged = false;
+
+		if (!checked--)
+			break;
+
+		if (!blk_rq_merge_ok(rq, bio))
+			continue;
+
+		switch (blk_try_merge(rq, bio)) {
+		case ELEVATOR_BACK_MERGE:
+			if (blk_mq_sched_allow_merge(q, rq, bio))
+				merged = bio_attempt_back_merge(q, rq, bio);
+			break;
+		case ELEVATOR_FRONT_MERGE:
+			if (blk_mq_sched_allow_merge(q, rq, bio))
+				merged = bio_attempt_front_merge(q, rq, bio);
+			break;
+		case ELEVATOR_DISCARD_MERGE:
+			merged = bio_attempt_discard_merge(q, rq, bio);
+			break;
+		default:
+			continue;
+		}
+
+		if (merged)
+			ctx->rq_merged++;
+		return merged;
+	}
+
+	return false;
+}
+
 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
 {
 	struct elevator_queue *e = q->elevator;
+	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+	bool ret = false;
 
-	if (e->type->ops.mq.bio_merge) {
-		struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
-		struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
-
+	if (e && e->type->ops.mq.bio_merge) {
 		blk_mq_put_ctx(ctx);
 		return e->type->ops.mq.bio_merge(hctx, bio);
 	}
 
-	return false;
+	if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
+		/* default per sw-queue merge */
+		spin_lock(&ctx->lock);
+		ret = blk_mq_attempt_merge(q, ctx, bio);
+		spin_unlock(&ctx->lock);
+	}
+
+	blk_mq_put_ctx(ctx);
+	return ret;
 }
 
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
@@ -266,18 +277,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
 	return true;
 }
 
-static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
-		clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-		if (blk_mq_hctx_has_pending(hctx)) {
-			blk_mq_run_hw_queue(hctx, true);
-			return true;
-		}
-	}
-	return false;
-}
-
 /**
  * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
  * @pos:    loop cursor.
@@ -309,6 +308,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
 	unsigned int i, j;
 
 	if (set->flags & BLK_MQ_F_TAG_SHARED) {
+		/*
+		 * If this is 0, then we know that no hardware queues
+		 * have RESTART marked. We're done.
+		 */
+		if (!atomic_read(&queue->shared_hctx_restart))
+			return;
+
 		rcu_read_lock();
 		list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
 					   tag_set_list) {
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index edafb53..9267d0b7 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -7,8 +7,7 @@
 void blk_mq_sched_free_hctx_data(struct request_queue *q,
 				 void (*exit)(struct blk_mq_hw_ctx *));
 
-struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
-void blk_mq_sched_put_request(struct request *rq);
+void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio);
 
 void blk_mq_sched_request_inserted(struct request *rq);
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
@@ -38,35 +37,12 @@ int blk_mq_sched_init(struct request_queue *q);
 static inline bool
 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
 {
-	struct elevator_queue *e = q->elevator;
-
-	if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
+	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
 		return false;
 
 	return __blk_mq_sched_bio_merge(q, bio);
 }
 
-static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
-					   struct request *rq,
-					   struct bio *bio)
-{
-	struct elevator_queue *e = q->elevator;
-
-	if (e && e->type->ops.mq.get_rq_priv)
-		return e->type->ops.mq.get_rq_priv(q, rq, bio);
-
-	return 0;
-}
-
-static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
-					    struct request *rq)
-{
-	struct elevator_queue *e = q->elevator;
-
-	if (e && e->type->ops.mq.put_rq_priv)
-		e->type->ops.mq.put_rq_priv(q, rq);
-}
-
 static inline bool
 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
 			 struct bio *bio)
@@ -115,15 +91,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
 	return false;
 }
 
-/*
- * Mark a hardware queue as needing a restart.
- */
-static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
-{
-	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-		set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-}
-
 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
 {
 	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2224ffd..6cef42f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -37,12 +37,8 @@
 #include "blk-wbt.h"
 #include "blk-mq-sched.h"
 
-static DEFINE_MUTEX(all_q_mutex);
-static LIST_HEAD(all_q_list);
-
 static void blk_mq_poll_stats_start(struct request_queue *q);
 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
-static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
 
 static int blk_mq_poll_stats_bkt(const struct request *rq)
 {
@@ -154,13 +150,28 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 
+/*
+ * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
+ * mpt3sas driver such that this function can be removed.
+ */
+void blk_mq_quiesce_queue_nowait(struct request_queue *q)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	queue_flag_set(QUEUE_FLAG_QUIESCED, q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
+
 /**
- * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
+ * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
  * @q: request queue.
  *
  * Note: this function does not prevent that the struct request end_io()
- * callback function is invoked. Additionally, it is not prevented that
- * new queue_rq() calls occur unless the queue has been stopped first.
+ * callback function is invoked. Once this function is returned, we make
+ * sure no dispatch can happen until the queue is unquiesced via
+ * blk_mq_unquiesce_queue().
  */
 void blk_mq_quiesce_queue(struct request_queue *q)
 {
@@ -168,11 +179,11 @@ void blk_mq_quiesce_queue(struct request_queue *q)
 	unsigned int i;
 	bool rcu = false;
 
-	__blk_mq_stop_hw_queues(q, true);
+	blk_mq_quiesce_queue_nowait(q);
 
 	queue_for_each_hw_ctx(q, hctx, i) {
 		if (hctx->flags & BLK_MQ_F_BLOCKING)
-			synchronize_srcu(&hctx->queue_rq_srcu);
+			synchronize_srcu(hctx->queue_rq_srcu);
 		else
 			rcu = true;
 	}
@@ -181,6 +192,26 @@ void blk_mq_quiesce_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
 
+/*
+ * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
+ * @q: request queue.
+ *
+ * This function recovers queue into the state before quiescing
+ * which is done by blk_mq_quiesce_queue.
+ */
+void blk_mq_unquiesce_queue(struct request_queue *q)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	/* dispatch requests which are inserted during quiescing */
+	blk_mq_run_hw_queues(q, true);
+}
+EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
+
 void blk_mq_wake_waiters(struct request_queue *q)
 {
 	struct blk_mq_hw_ctx *hctx;
@@ -204,15 +235,33 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 }
 EXPORT_SYMBOL(blk_mq_can_queue);
 
-void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-			struct request *rq, unsigned int op)
+static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
+		unsigned int tag, unsigned int op)
 {
+	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
+	struct request *rq = tags->static_rqs[tag];
+
+	rq->rq_flags = 0;
+
+	if (data->flags & BLK_MQ_REQ_INTERNAL) {
+		rq->tag = -1;
+		rq->internal_tag = tag;
+	} else {
+		if (blk_mq_tag_busy(data->hctx)) {
+			rq->rq_flags = RQF_MQ_INFLIGHT;
+			atomic_inc(&data->hctx->nr_active);
+		}
+		rq->tag = tag;
+		rq->internal_tag = -1;
+		data->hctx->tags->rqs[rq->tag] = rq;
+	}
+
 	INIT_LIST_HEAD(&rq->queuelist);
 	/* csd/requeue_work/fifo_time is initialized before use */
-	rq->q = q;
-	rq->mq_ctx = ctx;
+	rq->q = data->q;
+	rq->mq_ctx = data->ctx;
 	rq->cmd_flags = op;
-	if (blk_queue_io_stat(q))
+	if (blk_queue_io_stat(data->q))
 		rq->rq_flags |= RQF_IO_STAT;
 	/* do not touch atomic flags, it needs atomic ops against the timer */
 	rq->cpu = -1;
@@ -241,44 +290,60 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 	rq->end_io_data = NULL;
 	rq->next_rq = NULL;
 
-	ctx->rq_dispatched[op_is_sync(op)]++;
+	data->ctx->rq_dispatched[op_is_sync(op)]++;
+	return rq;
 }
-EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
 
-struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
-				       unsigned int op)
+static struct request *blk_mq_get_request(struct request_queue *q,
+		struct bio *bio, unsigned int op,
+		struct blk_mq_alloc_data *data)
 {
+	struct elevator_queue *e = q->elevator;
 	struct request *rq;
 	unsigned int tag;
 
-	tag = blk_mq_get_tag(data);
-	if (tag != BLK_MQ_TAG_FAIL) {
-		struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
+	blk_queue_enter_live(q);
+	data->q = q;
+	if (likely(!data->ctx))
+		data->ctx = blk_mq_get_ctx(q);
+	if (likely(!data->hctx))
+		data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
+	if (op & REQ_NOWAIT)
+		data->flags |= BLK_MQ_REQ_NOWAIT;
 
-		rq = tags->static_rqs[tag];
+	if (e) {
+		data->flags |= BLK_MQ_REQ_INTERNAL;
 
-		if (data->flags & BLK_MQ_REQ_INTERNAL) {
-			rq->tag = -1;
-			rq->internal_tag = tag;
-		} else {
-			if (blk_mq_tag_busy(data->hctx)) {
-				rq->rq_flags = RQF_MQ_INFLIGHT;
-				atomic_inc(&data->hctx->nr_active);
-			}
-			rq->tag = tag;
-			rq->internal_tag = -1;
-			data->hctx->tags->rqs[rq->tag] = rq;
-		}
-
-		blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
-		return rq;
+		/*
+		 * Flush requests are special and go directly to the
+		 * dispatch list.
+		 */
+		if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
+			e->type->ops.mq.limit_depth(op, data);
 	}
 
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
+	tag = blk_mq_get_tag(data);
+	if (tag == BLK_MQ_TAG_FAIL) {
+		blk_queue_exit(q);
+		return NULL;
+	}
 
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
+	rq = blk_mq_rq_ctx_init(data, tag, op);
+	if (!op_is_flush(op)) {
+		rq->elv.icq = NULL;
+		if (e && e->type->ops.mq.prepare_request) {
+			if (e->type->icq_cache && rq_ioc(bio))
+				blk_mq_sched_assign_ioc(rq, bio);
+
+			e->type->ops.mq.prepare_request(rq, bio);
+			rq->rq_flags |= RQF_ELVPRIV;
+		}
+	}
+	data->hctx->queued++;
+	return rq;
+}
+
+struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 		unsigned int flags)
 {
 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
@@ -289,7 +354,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
 	if (ret)
 		return ERR_PTR(ret);
 
-	rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
+	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
 
 	blk_mq_put_ctx(alloc_data.ctx);
 	blk_queue_exit(q);
@@ -304,8 +369,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
 
-struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
-		unsigned int flags, unsigned int hctx_idx)
+struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
+		unsigned int op, unsigned int flags, unsigned int hctx_idx)
 {
 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
 	struct request *rq;
@@ -340,7 +405,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
 	cpu = cpumask_first(alloc_data.hctx->cpumask);
 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
 
-	rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
+	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
 
 	blk_queue_exit(q);
 
@@ -351,17 +416,28 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
-void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-			     struct request *rq)
+void blk_mq_free_request(struct request *rq)
 {
-	const int sched_tag = rq->internal_tag;
 	struct request_queue *q = rq->q;
+	struct elevator_queue *e = q->elevator;
+	struct blk_mq_ctx *ctx = rq->mq_ctx;
+	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+	const int sched_tag = rq->internal_tag;
 
+	if (rq->rq_flags & RQF_ELVPRIV) {
+		if (e && e->type->ops.mq.finish_request)
+			e->type->ops.mq.finish_request(rq);
+		if (rq->elv.icq) {
+			put_io_context(rq->elv.icq->ioc);
+			rq->elv.icq = NULL;
+		}
+	}
+
+	ctx->rq_completed[rq_is_sync(rq)]++;
 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
 		atomic_dec(&hctx->nr_active);
 
 	wbt_done(q->rq_wb, &rq->issue_stat);
-	rq->rq_flags = 0;
 
 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
 	clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
@@ -372,29 +448,9 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 	blk_mq_sched_restart(hctx);
 	blk_queue_exit(q);
 }
-
-static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
-				     struct request *rq)
-{
-	struct blk_mq_ctx *ctx = rq->mq_ctx;
-
-	ctx->rq_completed[rq_is_sync(rq)]++;
-	__blk_mq_finish_request(hctx, ctx, rq);
-}
-
-void blk_mq_finish_request(struct request *rq)
-{
-	blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
-}
-EXPORT_SYMBOL_GPL(blk_mq_finish_request);
-
-void blk_mq_free_request(struct request *rq)
-{
-	blk_mq_sched_put_request(rq);
-}
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
-inline void __blk_mq_end_request(struct request *rq, int error)
+inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 {
 	blk_account_io_done(rq);
 
@@ -409,7 +465,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
 }
 EXPORT_SYMBOL(__blk_mq_end_request);
 
-void blk_mq_end_request(struct request *rq, int error)
+void blk_mq_end_request(struct request *rq, blk_status_t error)
 {
 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
 		BUG();
@@ -753,50 +809,6 @@ static void blk_mq_timeout_work(struct work_struct *work)
 	blk_queue_exit(q);
 }
 
-/*
- * Reverse check our software queue for entries that we could potentially
- * merge with. Currently includes a hand-wavy stop count of 8, to not spend
- * too much time checking for merges.
- */
-static bool blk_mq_attempt_merge(struct request_queue *q,
-				 struct blk_mq_ctx *ctx, struct bio *bio)
-{
-	struct request *rq;
-	int checked = 8;
-
-	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
-		bool merged = false;
-
-		if (!checked--)
-			break;
-
-		if (!blk_rq_merge_ok(rq, bio))
-			continue;
-
-		switch (blk_try_merge(rq, bio)) {
-		case ELEVATOR_BACK_MERGE:
-			if (blk_mq_sched_allow_merge(q, rq, bio))
-				merged = bio_attempt_back_merge(q, rq, bio);
-			break;
-		case ELEVATOR_FRONT_MERGE:
-			if (blk_mq_sched_allow_merge(q, rq, bio))
-				merged = bio_attempt_front_merge(q, rq, bio);
-			break;
-		case ELEVATOR_DISCARD_MERGE:
-			merged = bio_attempt_discard_merge(q, rq, bio);
-			break;
-		default:
-			continue;
-		}
-
-		if (merged)
-			ctx->rq_merged++;
-		return merged;
-	}
-
-	return false;
-}
-
 struct flush_busy_ctx_data {
 	struct blk_mq_hw_ctx *hctx;
 	struct list_head *list;
@@ -926,14 +938,14 @@ static bool reorder_tags_to_front(struct list_head *list)
 	return first != NULL;
 }
 
-static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
+static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
 				void *key)
 {
 	struct blk_mq_hw_ctx *hctx;
 
 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
 
-	list_del(&wait->task_list);
+	list_del(&wait->entry);
 	clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
 	blk_mq_run_hw_queue(hctx, true);
 	return 1;
@@ -968,7 +980,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
 {
 	struct blk_mq_hw_ctx *hctx;
 	struct request *rq;
-	int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
+	int errors, queued;
 
 	if (list_empty(list))
 		return false;
@@ -979,6 +991,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
 	errors = queued = 0;
 	do {
 		struct blk_mq_queue_data bd;
+		blk_status_t ret;
 
 		rq = list_first_entry(list, struct request, queuelist);
 		if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
@@ -1019,25 +1032,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
 		}
 
 		ret = q->mq_ops->queue_rq(hctx, &bd);
-		switch (ret) {
-		case BLK_MQ_RQ_QUEUE_OK:
-			queued++;
-			break;
-		case BLK_MQ_RQ_QUEUE_BUSY:
+		if (ret == BLK_STS_RESOURCE) {
 			blk_mq_put_driver_tag_hctx(hctx, rq);
 			list_add(&rq->queuelist, list);
 			__blk_mq_requeue_request(rq);
 			break;
-		default:
-			pr_err("blk-mq: bad return on queue: %d\n", ret);
-		case BLK_MQ_RQ_QUEUE_ERROR:
-			errors++;
-			blk_mq_end_request(rq, -EIO);
-			break;
 		}
 
-		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
-			break;
+		if (unlikely(ret != BLK_STS_OK)) {
+			errors++;
+			blk_mq_end_request(rq, BLK_STS_IOERR);
+			continue;
+		}
+
+		queued++;
 	} while (!list_empty(list));
 
 	hctx->dispatched[queued_to_index(queued)]++;
@@ -1075,7 +1083,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
 		 * - blk_mq_run_hw_queue() checks whether or not a queue has
 		 *   been stopped before rerunning a queue.
 		 * - Some but not all block drivers stop a queue before
-		 *   returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq
+		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
 		 *   and dm-rq.
 		 */
 		if (!blk_mq_sched_needs_restart(hctx) &&
@@ -1100,9 +1108,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 	} else {
 		might_sleep();
 
-		srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+		srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
 		blk_mq_sched_dispatch_requests(hctx);
-		srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
+		srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
 	}
 }
 
@@ -1134,8 +1142,10 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
 					unsigned long msecs)
 {
-	if (unlikely(blk_mq_hctx_stopped(hctx) ||
-		     !blk_mq_hw_queue_mapped(hctx)))
+	if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
+		return;
+
+	if (unlikely(blk_mq_hctx_stopped(hctx)))
 		return;
 
 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
@@ -1201,34 +1211,39 @@ bool blk_mq_queue_stopped(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_mq_queue_stopped);
 
-static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
+/*
+ * This function is often used for pausing .queue_rq() by driver when
+ * there isn't enough resource or some conditions aren't satisfied, and
+ * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
+ *
+ * We do not guarantee that dispatch can be drained or blocked
+ * after blk_mq_stop_hw_queue() returns. Please use
+ * blk_mq_quiesce_queue() for that requirement.
+ */
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
-	if (sync)
-		cancel_delayed_work_sync(&hctx->run_work);
-	else
-		cancel_delayed_work(&hctx->run_work);
+	cancel_delayed_work(&hctx->run_work);
 
 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
 }
-
-void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
-{
-	__blk_mq_stop_hw_queue(hctx, false);
-}
 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
 
-static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
+/*
+ * This function is often used for pausing .queue_rq() by driver when
+ * there isn't enough resource or some conditions aren't satisfied, and
+ * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
+ *
+ * We do not guarantee that dispatch can be drained or blocked
+ * after blk_mq_stop_hw_queues() returns. Please use
+ * blk_mq_quiesce_queue() for that requirement.
+ */
+void blk_mq_stop_hw_queues(struct request_queue *q)
 {
 	struct blk_mq_hw_ctx *hctx;
 	int i;
 
 	queue_for_each_hw_ctx(q, hctx, i)
-		__blk_mq_stop_hw_queue(hctx, sync);
-}
-
-void blk_mq_stop_hw_queues(struct request_queue *q)
-{
-	__blk_mq_stop_hw_queues(q, false);
+		blk_mq_stop_hw_queue(hctx);
 }
 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
 
@@ -1295,7 +1310,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
 
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 {
-	if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
+	if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
 		return;
 
 	/*
@@ -1317,6 +1332,8 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
 {
 	struct blk_mq_ctx *ctx = rq->mq_ctx;
 
+	lockdep_assert_held(&ctx->lock);
+
 	trace_block_rq_insert(hctx->queue, rq);
 
 	if (at_head)
@@ -1330,6 +1347,8 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 {
 	struct blk_mq_ctx *ctx = rq->mq_ctx;
 
+	lockdep_assert_held(&ctx->lock);
+
 	__blk_mq_insert_req_list(hctx, rq, at_head);
 	blk_mq_hctx_mark_pending(hctx, ctx);
 }
@@ -1427,30 +1446,13 @@ static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
 		!blk_queue_nomerges(hctx->queue);
 }
 
-static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
-					 struct blk_mq_ctx *ctx,
-					 struct request *rq, struct bio *bio)
+static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
+				   struct blk_mq_ctx *ctx,
+				   struct request *rq)
 {
-	if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
-		blk_mq_bio_to_request(rq, bio);
-		spin_lock(&ctx->lock);
-insert_rq:
-		__blk_mq_insert_request(hctx, rq, false);
-		spin_unlock(&ctx->lock);
-		return false;
-	} else {
-		struct request_queue *q = hctx->queue;
-
-		spin_lock(&ctx->lock);
-		if (!blk_mq_attempt_merge(q, ctx, bio)) {
-			blk_mq_bio_to_request(rq, bio);
-			goto insert_rq;
-		}
-
-		spin_unlock(&ctx->lock);
-		__blk_mq_finish_request(hctx, ctx, rq);
-		return true;
-	}
+	spin_lock(&ctx->lock);
+	__blk_mq_insert_request(hctx, rq, false);
+	spin_unlock(&ctx->lock);
 }
 
 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -1461,22 +1463,29 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
-				      bool may_sleep)
+static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+					struct request *rq,
+					blk_qc_t *cookie, bool may_sleep)
 {
 	struct request_queue *q = rq->q;
 	struct blk_mq_queue_data bd = {
 		.rq = rq,
 		.last = true,
 	};
-	struct blk_mq_hw_ctx *hctx;
 	blk_qc_t new_cookie;
-	int ret;
+	blk_status_t ret;
+	bool run_queue = true;
+
+	/* RCU or SRCU read lock is needed before checking quiesced flag */
+	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
+		run_queue = false;
+		goto insert;
+	}
 
 	if (q->elevator)
 		goto insert;
 
-	if (!blk_mq_get_driver_tag(rq, &hctx, false))
+	if (!blk_mq_get_driver_tag(rq, NULL, false))
 		goto insert;
 
 	new_cookie = request_to_qc_t(hctx, rq);
@@ -1487,20 +1496,21 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
 	 * would have done
 	 */
 	ret = q->mq_ops->queue_rq(hctx, &bd);
-	if (ret == BLK_MQ_RQ_QUEUE_OK) {
+	switch (ret) {
+	case BLK_STS_OK:
 		*cookie = new_cookie;
 		return;
-	}
-
-	if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
+	case BLK_STS_RESOURCE:
+		__blk_mq_requeue_request(rq);
+		goto insert;
+	default:
 		*cookie = BLK_QC_T_NONE;
-		blk_mq_end_request(rq, -EIO);
+		blk_mq_end_request(rq, ret);
 		return;
 	}
 
-	__blk_mq_requeue_request(rq);
 insert:
-	blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
+	blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
 }
 
 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1508,16 +1518,16 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 {
 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
 		rcu_read_lock();
-		__blk_mq_try_issue_directly(rq, cookie, false);
+		__blk_mq_try_issue_directly(hctx, rq, cookie, false);
 		rcu_read_unlock();
 	} else {
 		unsigned int srcu_idx;
 
 		might_sleep();
 
-		srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
-		__blk_mq_try_issue_directly(rq, cookie, true);
-		srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
+		srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
+		__blk_mq_try_issue_directly(hctx, rq, cookie, true);
+		srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
 	}
 }
 
@@ -1535,7 +1545,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 	blk_queue_bounce(q, &bio);
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
 		bio_io_error(bio);
@@ -1553,9 +1563,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 	trace_block_getrq(q, bio, bio->bi_opf);
 
-	rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
+	rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
 	if (unlikely(!rq)) {
 		__wbt_done(q->rq_wb, wb_acct);
+		if (bio->bi_opf & REQ_NOWAIT)
+			bio_wouldblock_error(bio);
 		return BLK_QC_T_NONE;
 	}
 
@@ -1619,9 +1631,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 		blk_mq_put_ctx(data.ctx);
 
-		if (same_queue_rq)
+		if (same_queue_rq) {
+			data.hctx = blk_mq_map_queue(q,
+					same_queue_rq->mq_ctx->cpu);
 			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
 					&cookie);
+		}
 	} else if (q->nr_hw_queues > 1 && is_sync) {
 		blk_mq_put_ctx(data.ctx);
 		blk_mq_bio_to_request(rq, bio);
@@ -1630,11 +1645,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 		blk_mq_put_ctx(data.ctx);
 		blk_mq_bio_to_request(rq, bio);
 		blk_mq_sched_insert_request(rq, false, true, true, true);
-	} else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+	} else {
 		blk_mq_put_ctx(data.ctx);
+		blk_mq_bio_to_request(rq, bio);
+		blk_mq_queue_io(data.hctx, data.ctx, rq);
 		blk_mq_run_hw_queue(data.hctx, true);
-	} else
-		blk_mq_put_ctx(data.ctx);
+	}
 
 	return cookie;
 }
@@ -1857,7 +1873,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
 		set->ops->exit_hctx(hctx, hctx_idx);
 
 	if (hctx->flags & BLK_MQ_F_BLOCKING)
-		cleanup_srcu_struct(&hctx->queue_rq_srcu);
+		cleanup_srcu_struct(hctx->queue_rq_srcu);
 
 	blk_mq_remove_cpuhp(hctx);
 	blk_free_flush_queue(hctx->fq);
@@ -1891,7 +1907,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
 	spin_lock_init(&hctx->lock);
 	INIT_LIST_HEAD(&hctx->dispatch);
 	hctx->queue = q;
-	hctx->queue_num = hctx_idx;
 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
 
 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
@@ -1930,7 +1945,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
 		goto free_fq;
 
 	if (hctx->flags & BLK_MQ_F_BLOCKING)
-		init_srcu_struct(&hctx->queue_rq_srcu);
+		init_srcu_struct(hctx->queue_rq_srcu);
 
 	blk_mq_debugfs_register_hctx(q, hctx);
 
@@ -1966,8 +1981,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
 		INIT_LIST_HEAD(&__ctx->rq_list);
 		__ctx->queue = q;
 
-		/* If the cpu isn't online, the cpu is mapped to first hctx */
-		if (!cpu_online(i))
+		/* If the cpu isn't present, the cpu is mapped to first hctx */
+		if (!cpu_present(i))
 			continue;
 
 		hctx = blk_mq_map_queue(q, i);
@@ -2010,8 +2025,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
 	}
 }
 
-static void blk_mq_map_swqueue(struct request_queue *q,
-			       const struct cpumask *online_mask)
+static void blk_mq_map_swqueue(struct request_queue *q)
 {
 	unsigned int i, hctx_idx;
 	struct blk_mq_hw_ctx *hctx;
@@ -2029,13 +2043,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
 	}
 
 	/*
-	 * Map software to hardware queues
+	 * Map software to hardware queues.
+	 *
+	 * If the cpu isn't present, the cpu is mapped to first hctx.
 	 */
-	for_each_possible_cpu(i) {
-		/* If the cpu isn't online, the cpu is mapped to first hctx */
-		if (!cpumask_test_cpu(i, online_mask))
-			continue;
-
+	for_each_present_cpu(i) {
 		hctx_idx = q->mq_map[i];
 		/* unmapped hw queue can be remapped after CPU topo changed */
 		if (!set->tags[hctx_idx] &&
@@ -2094,20 +2106,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
 	}
 }
 
+/*
+ * Caller needs to ensure that we're either frozen/quiesced, or that
+ * the queue isn't live yet.
+ */
 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
 {
 	struct blk_mq_hw_ctx *hctx;
 	int i;
 
 	queue_for_each_hw_ctx(q, hctx, i) {
-		if (shared)
+		if (shared) {
+			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+				atomic_inc(&q->shared_hctx_restart);
 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
-		else
+		} else {
+			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+				atomic_dec(&q->shared_hctx_restart);
 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+		}
 	}
 }
 
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
+					bool shared)
 {
 	struct request_queue *q;
 
@@ -2205,6 +2227,20 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 }
 EXPORT_SYMBOL(blk_mq_init_queue);
 
+static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
+{
+	int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
+
+	BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
+			   __alignof__(struct blk_mq_hw_ctx)) !=
+		     sizeof(struct blk_mq_hw_ctx));
+
+	if (tag_set->flags & BLK_MQ_F_BLOCKING)
+		hw_ctx_size += sizeof(struct srcu_struct);
+
+	return hw_ctx_size;
+}
+
 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 						struct request_queue *q)
 {
@@ -2219,7 +2255,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 			continue;
 
 		node = blk_mq_hw_queue_to_node(q->mq_map, i);
-		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
+		hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
 					GFP_KERNEL, node);
 		if (!hctxs[i])
 			break;
@@ -2321,16 +2357,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 		blk_queue_softirq_done(q, set->ops->complete);
 
 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
-
-	get_online_cpus();
-	mutex_lock(&all_q_mutex);
-
-	list_add_tail(&q->all_q_node, &all_q_list);
 	blk_mq_add_queue_tag_set(set, q);
-	blk_mq_map_swqueue(q, cpu_online_mask);
-
-	mutex_unlock(&all_q_mutex);
-	put_online_cpus();
+	blk_mq_map_swqueue(q);
 
 	if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
 		int ret;
@@ -2356,18 +2384,12 @@ void blk_mq_free_queue(struct request_queue *q)
 {
 	struct blk_mq_tag_set	*set = q->tag_set;
 
-	mutex_lock(&all_q_mutex);
-	list_del_init(&q->all_q_node);
-	mutex_unlock(&all_q_mutex);
-
 	blk_mq_del_queue_tag_set(q);
-
 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q,
-				const struct cpumask *online_mask)
+static void blk_mq_queue_reinit(struct request_queue *q)
 {
 	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
 
@@ -2380,76 +2402,12 @@ static void blk_mq_queue_reinit(struct request_queue *q,
 	 * involves free and re-allocate memory, worthy doing?)
 	 */
 
-	blk_mq_map_swqueue(q, online_mask);
+	blk_mq_map_swqueue(q);
 
 	blk_mq_sysfs_register(q);
 	blk_mq_debugfs_register_hctxs(q);
 }
 
-/*
- * New online cpumask which is going to be set in this hotplug event.
- * Declare this cpumasks as global as cpu-hotplug operation is invoked
- * one-by-one and dynamically allocating this could result in a failure.
- */
-static struct cpumask cpuhp_online_new;
-
-static void blk_mq_queue_reinit_work(void)
-{
-	struct request_queue *q;
-
-	mutex_lock(&all_q_mutex);
-	/*
-	 * We need to freeze and reinit all existing queues.  Freezing
-	 * involves synchronous wait for an RCU grace period and doing it
-	 * one by one may take a long time.  Start freezing all queues in
-	 * one swoop and then wait for the completions so that freezing can
-	 * take place in parallel.
-	 */
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_freeze_queue_start(q);
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_freeze_queue_wait(q);
-
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_queue_reinit(q, &cpuhp_online_new);
-
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_unfreeze_queue(q);
-
-	mutex_unlock(&all_q_mutex);
-}
-
-static int blk_mq_queue_reinit_dead(unsigned int cpu)
-{
-	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
-	blk_mq_queue_reinit_work();
-	return 0;
-}
-
-/*
- * Before hotadded cpu starts handling requests, new mappings must be
- * established.  Otherwise, these requests in hw queue might never be
- * dispatched.
- *
- * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
- * for CPU0, and ctx1 for CPU1).
- *
- * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
- * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
- *
- * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
- * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
- * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
- * ignored.
- */
-static int blk_mq_queue_reinit_prepare(unsigned int cpu)
-{
-	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
-	cpumask_set_cpu(cpu, &cpuhp_online_new);
-	blk_mq_queue_reinit_work();
-	return 0;
-}
-
 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
 {
 	int i;
@@ -2641,7 +2599,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
 	return ret;
 }
 
-void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+							int nr_hw_queues)
 {
 	struct request_queue *q;
 
@@ -2659,12 +2618,19 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
 	blk_mq_update_queue_map(set);
 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
 		blk_mq_realloc_hw_ctxs(set, q);
-		blk_mq_queue_reinit(q, cpu_online_mask);
+		blk_mq_queue_reinit(q);
 	}
 
 	list_for_each_entry(q, &set->tag_list, tag_set_list)
 		blk_mq_unfreeze_queue(q);
 }
+
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+{
+	mutex_lock(&set->tag_list_lock);
+	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+	mutex_unlock(&set->tag_list_lock);
+}
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
 /* Enable polling stats and return whether they were already enabled. */
@@ -2868,24 +2834,10 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
 }
 EXPORT_SYMBOL_GPL(blk_mq_poll);
 
-void blk_mq_disable_hotplug(void)
-{
-	mutex_lock(&all_q_mutex);
-}
-
-void blk_mq_enable_hotplug(void)
-{
-	mutex_unlock(&all_q_mutex);
-}
-
 static int __init blk_mq_init(void)
 {
 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
 				blk_mq_hctx_notify_dead);
-
-	cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
-				  blk_mq_queue_reinit_prepare,
-				  blk_mq_queue_reinit_dead);
 	return 0;
 }
 subsys_initcall(blk_mq_init);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index cc67b48..60b01c0 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -56,11 +56,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 				bool at_head);
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 				struct list_head *list);
-/*
- * CPU hotplug helpers
- */
-void blk_mq_enable_hotplug(void);
-void blk_mq_disable_hotplug(void);
 
 /*
  * CPU -> queue mappings
@@ -128,17 +123,6 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
 	return data->hctx->tags;
 }
 
-/*
- * Internal helpers for request allocation/init/free
- */
-void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-			struct request *rq, unsigned int op);
-void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-				struct request *rq);
-void blk_mq_finish_request(struct request *rq);
-struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
-					unsigned int op);
-
 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
 {
 	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 4fa81ed3..be1f115 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -172,11 +172,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
 	q->nr_batching = BLK_BATCH_REQ;
 
 	blk_set_default_limits(&q->limits);
-
-	/*
-	 * by default assume old behaviour and bounce for any highmem page
-	 */
-	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 }
 EXPORT_SYMBOL(blk_queue_make_request);
 
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 712b018..27aceab 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
 }
 
 /**
- * blk_release_queue: - release a &struct request_queue when it is no longer needed
- * @kobj:    the kobj belonging to the request queue to be released
+ * __blk_release_queue - release a request queue when it is no longer needed
+ * @work: pointer to the release_work member of the request queue to be released
  *
  * Description:
- *     blk_release_queue is the pair to blk_init_queue() or
- *     blk_queue_make_request().  It should be called when a request queue is
- *     being released; typically when a block device is being de-registered.
- *     Currently, its primary task it to free all the &struct request
- *     structures that were allocated to the queue and the queue itself.
+ *     blk_release_queue is the counterpart of blk_init_queue(). It should be
+ *     called when a request queue is being released; typically when a block
+ *     device is being de-registered. Its primary task it to free the queue
+ *     itself.
  *
- * Note:
+ * Notes:
  *     The low level driver must have finished any outstanding requests first
  *     via blk_cleanup_queue().
- **/
-static void blk_release_queue(struct kobject *kobj)
+ *
+ *     Although blk_release_queue() may be called with preemption disabled,
+ *     __blk_release_queue() may sleep.
+ */
+static void __blk_release_queue(struct work_struct *work)
 {
-	struct request_queue *q =
-		container_of(kobj, struct request_queue, kobj);
+	struct request_queue *q = container_of(work, typeof(*q), release_work);
 
 	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
 		blk_stat_remove_callback(q, q->poll_cb);
@@ -809,7 +810,7 @@ static void blk_release_queue(struct kobject *kobj)
 
 	blk_free_queue_stats(q->stats);
 
-	blk_exit_rl(&q->root_rl);
+	blk_exit_rl(q, &q->root_rl);
 
 	if (q->queue_tags)
 		__blk_queue_free_tags(q);
@@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj)
 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
 }
 
+static void blk_release_queue(struct kobject *kobj)
+{
+	struct request_queue *q =
+		container_of(kobj, struct request_queue, kobj);
+
+	INIT_WORK(&q->release_work, __blk_release_queue);
+	schedule_work(&q->release_work);
+}
+
 static const struct sysfs_ops queue_sysfs_ops = {
 	.show	= queue_attr_show,
 	.store	= queue_attr_store,
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 07cc329..2290f65 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -258,15 +258,14 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
  *    all transfers have been done for a request. It's important to call
  *    this function before end_that_request_last(), as that will put the
  *    request back on the free list thus corrupting the internal tag list.
- *
- *  Notes:
- *   queue lock must be held.
  **/
 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 {
 	struct blk_queue_tag *bqt = q->queue_tags;
 	unsigned tag = rq->tag; /* negative tags invalid */
 
+	lockdep_assert_held(q->queue_lock);
+
 	BUG_ON(tag >= bqt->real_max_depth);
 
 	list_del_init(&rq->queuelist);
@@ -307,9 +306,6 @@ EXPORT_SYMBOL(blk_queue_end_tag);
  *    calling this function.  The request will also be removed from
  *    the request queue, so it's the drivers responsibility to readd
  *    it if it should need to be restarted for some reason.
- *
- *  Notes:
- *   queue lock must be held.
  **/
 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
 {
@@ -317,6 +313,8 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
 	unsigned max_depth;
 	int tag;
 
+	lockdep_assert_held(q->queue_lock);
+
 	if (unlikely((rq->rq_flags & RQF_QUEUED))) {
 		printk(KERN_ERR
 		       "%s: request %p for device [%s] already tagged %d",
@@ -389,14 +387,13 @@ EXPORT_SYMBOL(blk_queue_start_tag);
  *   Hardware conditions may dictate a need to stop all pending requests.
  *   In this case, we will safely clear the block side of the tag queue and
  *   readd all requests to the request queue in the right order.
- *
- *  Notes:
- *   queue lock must be held.
  **/
 void blk_queue_invalidate_tags(struct request_queue *q)
 {
 	struct list_head *tmp, *n;
 
+	lockdep_assert_held(q->queue_lock);
+
 	list_for_each_safe(tmp, n, &q->tag_busy_list)
 		blk_requeue_request(q, list_entry_rq(tmp));
 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index fc13dd0..a7285bf 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -27,6 +27,13 @@ static int throtl_quantum = 32;
 #define MIN_THROTL_IOPS (10)
 #define DFL_LATENCY_TARGET (-1L)
 #define DFL_IDLE_THRESHOLD (0)
+#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
+#define LATENCY_FILTERED_SSD (0)
+/*
+ * For HD, very small latency comes from sequential IO. Such IO is helpless to
+ * help determine if its IO is impacted by others, hence we ignore the IO
+ */
+#define LATENCY_FILTERED_HD (1000L) /* 1ms */
 
 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
 
@@ -212,6 +219,7 @@ struct throtl_data
 	struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
 	struct latency_bucket __percpu *latency_buckets;
 	unsigned long last_calculate_time;
+	unsigned long filtered_latency;
 
 	bool track_bio_latency;
 };
@@ -698,7 +706,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
 					  unsigned long expires)
 {
-	unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice;
+	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
 
 	/*
 	 * Since we are adjusting the throttle limit dynamically, the sleep
@@ -2281,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio)
 		throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
 			bio_op(bio), lat);
 
-	if (tg->latency_target) {
+	if (tg->latency_target && lat >= tg->td->filtered_latency) {
 		int bucket;
 		unsigned int threshold;
 
@@ -2417,14 +2425,20 @@ void blk_throtl_exit(struct request_queue *q)
 void blk_throtl_register_queue(struct request_queue *q)
 {
 	struct throtl_data *td;
+	int i;
 
 	td = q->td;
 	BUG_ON(!td);
 
-	if (blk_queue_nonrot(q))
+	if (blk_queue_nonrot(q)) {
 		td->throtl_slice = DFL_THROTL_SLICE_SSD;
-	else
+		td->filtered_latency = LATENCY_FILTERED_SSD;
+	} else {
 		td->throtl_slice = DFL_THROTL_SLICE_HD;
+		td->filtered_latency = LATENCY_FILTERED_HD;
+		for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
+			td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
+	}
 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
 	/* if no low limit, use previous default */
 	td->throtl_slice = DFL_THROTL_SLICE_HD;
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index cbff183..17ec83b 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -189,13 +189,15 @@ unsigned long blk_rq_timeout(unsigned long timeout)
  * Notes:
  *    Each request has its own timer, and as it is added to the queue, we
  *    set up the timer. When the request completes, we cancel the timer.
- *    Queue lock must be held for the non-mq case, mq case doesn't care.
  */
 void blk_add_timer(struct request *req)
 {
 	struct request_queue *q = req->q;
 	unsigned long expiry;
 
+	if (!q->mq_ops)
+		lockdep_assert_held(q->queue_lock);
+
 	/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
 	if (!q->mq_ops && !q->rq_timed_out_fn)
 		return;
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 17676f4..6a9a0f0 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -503,7 +503,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
 }
 
 static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
-			     wait_queue_t *wait, unsigned long rw)
+			     wait_queue_entry_t *wait, unsigned long rw)
 {
 	/*
 	 * inc it here even if disabled, since we'll dec it at completion.
@@ -520,7 +520,7 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
 	 * in line to be woken up, wait for our turn.
 	 */
 	if (waitqueue_active(&rqw->wait) &&
-	    rqw->wait.task_list.next != &wait->task_list)
+	    rqw->wait.head.next != &wait->entry)
 		return false;
 
 	return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
diff --git a/block/blk.h b/block/blk.h
index 2ed7022..01ebb81 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
 
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
 		gfp_t gfp_mask);
-void blk_exit_rl(struct request_list *rl);
+void blk_exit_rl(struct request_queue *q, struct request_list *rl);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 			struct bio *bio);
 void blk_queue_bypass_start(struct request_queue *q);
@@ -143,6 +143,8 @@ static inline struct request *__elv_next_request(struct request_queue *q)
 	struct request *rq;
 	struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
 
+	WARN_ON_ONCE(q->mq_ops);
+
 	while (1) {
 		if (!list_empty(&q->queue_head)) {
 			rq = list_entry_rq(q->queue_head.next);
@@ -334,4 +336,17 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { }
 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
 #endif
 
+#ifdef CONFIG_BOUNCE
+extern int init_emergency_isa_pool(void);
+extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
+#else
+static inline int init_emergency_isa_pool(void)
+{
+	return 0;
+}
+static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
+{
+}
+#endif /* CONFIG_BOUNCE */
+
 #endif /* BLK_INTERNAL_H */
diff --git a/block/bounce.c b/block/bounce.c
index 1cb5dd3..5793c2d 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -22,10 +22,12 @@
 #include <asm/tlbflush.h>
 
 #include <trace/events/block.h>
+#include "blk.h"
 
 #define POOL_SIZE	64
 #define ISA_POOL_SIZE	16
 
+static struct bio_set *bounce_bio_set, *bounce_bio_split;
 static mempool_t *page_pool, *isa_page_pool;
 
 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
@@ -40,6 +42,14 @@ static __init int init_emergency_pool(void)
 	BUG_ON(!page_pool);
 	pr_info("pool size: %d pages\n", POOL_SIZE);
 
+	bounce_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+	BUG_ON(!bounce_bio_set);
+	if (bioset_integrity_create(bounce_bio_set, BIO_POOL_SIZE))
+		BUG_ON(1);
+
+	bounce_bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
+	BUG_ON(!bounce_bio_split);
+
 	return 0;
 }
 
@@ -143,7 +153,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
 		mempool_free(bvec->bv_page, pool);
 	}
 
-	bio_orig->bi_error = bio->bi_error;
+	bio_orig->bi_status = bio->bi_status;
 	bio_endio(bio_orig);
 	bio_put(bio);
 }
@@ -163,7 +173,7 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
 {
 	struct bio *bio_orig = bio->bi_private;
 
-	if (!bio->bi_error)
+	if (!bio->bi_status)
 		copy_to_high_bio_irq(bio_orig, bio);
 
 	bounce_end_io(bio, pool);
@@ -186,20 +196,31 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 	int rw = bio_data_dir(*bio_orig);
 	struct bio_vec *to, from;
 	struct bvec_iter iter;
-	unsigned i;
+	unsigned i = 0;
+	bool bounce = false;
+	int sectors = 0;
 
-	bio_for_each_segment(from, *bio_orig, iter)
-		if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
-			goto bounce;
+	bio_for_each_segment(from, *bio_orig, iter) {
+		if (i++ < BIO_MAX_PAGES)
+			sectors += from.bv_len >> 9;
+		if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
+			bounce = true;
+	}
+	if (!bounce)
+		return;
 
-	return;
-bounce:
-	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
+	if (sectors < bio_sectors(*bio_orig)) {
+		bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
+		bio_chain(bio, *bio_orig);
+		generic_make_request(*bio_orig);
+		*bio_orig = bio;
+	}
+	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
 
 	bio_for_each_segment_all(to, bio, i) {
 		struct page *page = to->bv_page;
 
-		if (page_to_pfn(page) <= queue_bounce_pfn(q))
+		if (page_to_pfn(page) <= q->limits.bounce_pfn)
 			continue;
 
 		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
@@ -251,7 +272,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 	 * don't waste time iterating over bio segments
 	 */
 	if (!(q->bounce_gfp & GFP_DMA)) {
-		if (queue_bounce_pfn(q) >= blk_max_pfn)
+		if (q->limits.bounce_pfn >= blk_max_pfn)
 			return;
 		pool = page_pool;
 	} else {
@@ -264,5 +285,3 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 	 */
 	__blk_queue_bounce(q, bio_orig, pool);
 }
-
-EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 0a23dbb..c4513b2 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -37,7 +37,7 @@ static void bsg_destroy_job(struct kref *kref)
 	struct bsg_job *job = container_of(kref, struct bsg_job, kref);
 	struct request *rq = job->req;
 
-	blk_end_request_all(rq, scsi_req(rq)->result);
+	blk_end_request_all(rq, BLK_STS_OK);
 
 	put_device(job->dev);	/* release reference for the request */
 
@@ -202,7 +202,7 @@ static void bsg_request_fn(struct request_queue *q)
 		ret = bsg_create_job(dev, req);
 		if (ret) {
 			scsi_req(req)->result = ret;
-			blk_end_request_all(req, ret);
+			blk_end_request_all(req, BLK_STS_OK);
 			spin_lock_irq(q->queue_lock);
 			continue;
 		}
@@ -246,6 +246,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
 	q->bsg_job_size = dd_job_size;
 	q->bsg_job_fn = job_fn;
 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+	queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
 	blk_queue_softirq_done(q, bsg_softirq_done);
 	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 
diff --git a/block/bsg.c b/block/bsg.c
index 6fd0854..37663b6 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -236,7 +236,6 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
 	rq = blk_get_request(q, op, GFP_KERNEL);
 	if (IS_ERR(rq))
 		return rq;
-	scsi_req_init(rq);
 
 	ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
 	if (ret)
@@ -294,14 +293,14 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
  * async completion call-back from the block layer, when scsi/ide/whatever
  * calls end_that_request_last() on a request
  */
-static void bsg_rq_end_io(struct request *rq, int uptodate)
+static void bsg_rq_end_io(struct request *rq, blk_status_t status)
 {
 	struct bsg_command *bc = rq->end_io_data;
 	struct bsg_device *bd = bc->bd;
 	unsigned long flags;
 
-	dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
-		bd->name, rq, bc, bc->bio, uptodate);
+	dprintk("%s: finished rq %p bc %p, bio %p\n",
+		bd->name, rq, bc, bc->bio);
 
 	bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
 
@@ -750,6 +749,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
 #ifdef BSG_DEBUG
 	unsigned char buf[32];
 #endif
+
+	if (!blk_queue_scsi_passthrough(rq)) {
+		WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	if (!blk_get_queue(rq))
 		return ERR_PTR(-ENXIO);
 
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index da69b07..3d5c289 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
 /*
- * offset from end of service tree
+ * offset from end of queue service tree for idle class
  */
 #define CFQ_IDLE_DELAY		(NSEC_PER_SEC / 5)
+/* offset from end of group service tree under time slice mode */
+#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
+/* offset from end of group service under IOPS mode */
+#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
 
 /*
  * below this threshold, we consider thinktime immediate
@@ -978,15 +982,6 @@ static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 	return min_vdisktime;
 }
 
-static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
-{
-	s64 delta = (s64)(vdisktime - min_vdisktime);
-	if (delta < 0)
-		min_vdisktime = vdisktime;
-
-	return min_vdisktime;
-}
-
 static void update_min_vdisktime(struct cfq_rb_root *st)
 {
 	struct cfq_group *cfqg;
@@ -1362,6 +1357,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
 	cfqg->vfraction = max_t(unsigned, vfr, 1);
 }
 
+static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
+{
+	if (!iops_mode(cfqd))
+		return CFQ_SLICE_MODE_GROUP_DELAY;
+	else
+		return CFQ_IOPS_MODE_GROUP_DELAY;
+}
+
 static void
 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
@@ -1381,7 +1384,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 	n = rb_last(&st->rb);
 	if (n) {
 		__cfqg = rb_entry_cfqg(n);
-		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
+		cfqg->vdisktime = __cfqg->vdisktime +
+			cfq_get_cfqg_vdisktime_delay(cfqd);
 	} else
 		cfqg->vdisktime = st->min_vdisktime;
 	cfq_group_service_tree_add(st, cfqg);
diff --git a/block/elevator.c b/block/elevator.c
index dac99fb..4bb2f0c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -681,6 +681,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 		 */
 		if (elv_attempt_insert_merge(q, rq))
 			break;
+		/* fall through */
 	case ELEVATOR_INSERT_SORT:
 		BUG_ON(blk_rq_is_passthrough(rq));
 		rq->rq_flags |= RQF_SORTED;
diff --git a/block/genhd.c b/block/genhd.c
index d252d29..7f520fa 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -36,7 +36,7 @@ struct kobject *block_depr;
 static DEFINE_SPINLOCK(ext_devt_lock);
 static DEFINE_IDR(ext_devt_idr);
 
-static struct device_type disk_type;
+static const struct device_type disk_type;
 
 static void disk_check_events(struct disk_events *ev,
 			      unsigned int *clearing_ptr);
@@ -1183,7 +1183,7 @@ static char *block_devnode(struct device *dev, umode_t *mode,
 	return NULL;
 }
 
-static struct device_type disk_type = {
+static const struct device_type disk_type = {
 	.name		= "disk",
 	.groups		= disk_attr_groups,
 	.release	= disk_release,
diff --git a/block/ioprio.c b/block/ioprio.c
index 4b120c9..6f5d0b6 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -75,7 +75,8 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
 		case IOPRIO_CLASS_RT:
 			if (!capable(CAP_SYS_ADMIN))
 				return -EPERM;
-			/* fall through, rt has prio field too */
+			/* fall through */
+			/* rt has prio field too */
 		case IOPRIO_CLASS_BE:
 			if (data >= IOPRIO_BE_NR || data < 0)
 				return -EINVAL;
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index b9faabc..f58cab8 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -99,7 +99,7 @@ struct kyber_hctx_data {
 	struct list_head rqs[KYBER_NUM_DOMAINS];
 	unsigned int cur_domain;
 	unsigned int batching;
-	wait_queue_t domain_wait[KYBER_NUM_DOMAINS];
+	wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
 	atomic_t wait_index[KYBER_NUM_DOMAINS];
 };
 
@@ -385,7 +385,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 
 	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 		INIT_LIST_HEAD(&khd->rqs[i]);
-		INIT_LIST_HEAD(&khd->domain_wait[i].task_list);
+		INIT_LIST_HEAD(&khd->domain_wait[i].entry);
 		atomic_set(&khd->wait_index[i], 0);
 	}
 
@@ -426,33 +426,29 @@ static void rq_clear_domain_token(struct kyber_queue_data *kqd,
 	}
 }
 
-static struct request *kyber_get_request(struct request_queue *q,
-					 unsigned int op,
-					 struct blk_mq_alloc_data *data)
+static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
 {
-	struct kyber_queue_data *kqd = q->elevator->elevator_data;
-	struct request *rq;
-
 	/*
 	 * We use the scheduler tags as per-hardware queue queueing tokens.
 	 * Async requests can be limited at this stage.
 	 */
-	if (!op_is_sync(op))
-		data->shallow_depth = kqd->async_depth;
+	if (!op_is_sync(op)) {
+		struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
 
-	rq = __blk_mq_alloc_request(data, op);
-	if (rq)
-		rq_set_domain_token(rq, -1);
-	return rq;
+		data->shallow_depth = kqd->async_depth;
+	}
 }
 
-static void kyber_put_request(struct request *rq)
+static void kyber_prepare_request(struct request *rq, struct bio *bio)
 {
-	struct request_queue *q = rq->q;
-	struct kyber_queue_data *kqd = q->elevator->elevator_data;
+	rq_set_domain_token(rq, -1);
+}
+
+static void kyber_finish_request(struct request *rq)
+{
+	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
 
 	rq_clear_domain_token(kqd, rq);
-	blk_mq_finish_request(rq);
 }
 
 static void kyber_completed_request(struct request *rq)
@@ -507,12 +503,12 @@ static void kyber_flush_busy_ctxs(struct kyber_hctx_data *khd,
 	}
 }
 
-static int kyber_domain_wake(wait_queue_t *wait, unsigned mode, int flags,
+static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
 			     void *key)
 {
 	struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
 
-	list_del_init(&wait->task_list);
+	list_del_init(&wait->entry);
 	blk_mq_run_hw_queue(hctx, true);
 	return 1;
 }
@@ -523,7 +519,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
 {
 	unsigned int sched_domain = khd->cur_domain;
 	struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
-	wait_queue_t *wait = &khd->domain_wait[sched_domain];
+	wait_queue_entry_t *wait = &khd->domain_wait[sched_domain];
 	struct sbq_wait_state *ws;
 	int nr;
 
@@ -536,7 +532,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
 	 * run when one becomes available. Note that this is serialized on
 	 * khd->lock, but we still need to be careful about the waker.
 	 */
-	if (list_empty_careful(&wait->task_list)) {
+	if (list_empty_careful(&wait->entry)) {
 		init_waitqueue_func_entry(wait, kyber_domain_wake);
 		wait->private = hctx;
 		ws = sbq_wait_ptr(domain_tokens,
@@ -734,9 +730,9 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m)	\
 {									\
 	struct blk_mq_hw_ctx *hctx = data;				\
 	struct kyber_hctx_data *khd = hctx->sched_data;			\
-	wait_queue_t *wait = &khd->domain_wait[domain];			\
+	wait_queue_entry_t *wait = &khd->domain_wait[domain];		\
 									\
-	seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list));	\
+	seq_printf(m, "%d\n", !list_empty_careful(&wait->entry));	\
 	return 0;							\
 }
 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
@@ -815,8 +811,9 @@ static struct elevator_type kyber_sched = {
 		.exit_sched = kyber_exit_sched,
 		.init_hctx = kyber_init_hctx,
 		.exit_hctx = kyber_exit_hctx,
-		.get_request = kyber_get_request,
-		.put_request = kyber_put_request,
+		.limit_depth = kyber_limit_depth,
+		.prepare_request = kyber_prepare_request,
+		.finish_request = kyber_finish_request,
 		.completed_request = kyber_completed_request,
 		.dispatch_request = kyber_dispatch_request,
 		.has_work = kyber_has_work,
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index edcea70..2a365c7 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -115,7 +115,7 @@ static bool ldm_parse_privhead(const u8 *data, struct privhead *ph)
 		ldm_error("PRIVHEAD disk size doesn't match real disk size");
 		return false;
 	}
-	if (uuid_be_to_bin(data + 0x0030, (uuid_be *)ph->disk_id)) {
+	if (uuid_parse(data + 0x0030, &ph->disk_id)) {
 		ldm_error("PRIVHEAD contains an invalid GUID.");
 		return false;
 	}
@@ -234,7 +234,7 @@ static bool ldm_compare_privheads (const struct privhead *ph1,
 		(ph1->logical_disk_size  == ph2->logical_disk_size)	&&
 		(ph1->config_start       == ph2->config_start)		&&
 		(ph1->config_size        == ph2->config_size)		&&
-		!memcmp (ph1->disk_id, ph2->disk_id, GUID_SIZE));
+		uuid_equal(&ph1->disk_id, &ph2->disk_id));
 }
 
 /**
@@ -557,7 +557,7 @@ static struct vblk * ldm_get_disk_objid (const struct ldmdb *ldb)
 
 	list_for_each (item, &ldb->v_disk) {
 		struct vblk *v = list_entry (item, struct vblk, list);
-		if (!memcmp (v->vblk.disk.disk_id, ldb->ph.disk_id, GUID_SIZE))
+		if (uuid_equal(&v->vblk.disk.disk_id, &ldb->ph.disk_id))
 			return v;
 	}
 
@@ -892,7 +892,7 @@ static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
 	disk = &vb->vblk.disk;
 	ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name,
 		sizeof (disk->alt_name));
-	if (uuid_be_to_bin(buffer + 0x19 + r_name, (uuid_be *)disk->disk_id))
+	if (uuid_parse(buffer + 0x19 + r_name, &disk->disk_id))
 		return false;
 
 	return true;
@@ -927,7 +927,7 @@ static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb)
 		return false;
 
 	disk = &vb->vblk.disk;
-	memcpy (disk->disk_id, buffer + 0x18 + r_name, GUID_SIZE);
+	uuid_copy(&disk->disk_id, (uuid_t *)(buffer + 0x18 + r_name));
 	return true;
 }
 
diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h
index 374242c..f4c6055 100644
--- a/block/partitions/ldm.h
+++ b/block/partitions/ldm.h
@@ -112,8 +112,6 @@ struct frag {				/* VBLK Fragment handling */
 
 /* In memory LDM database structures. */
 
-#define GUID_SIZE		16
-
 struct privhead {			/* Offsets and sizes are in sectors. */
 	u16	ver_major;
 	u16	ver_minor;
@@ -121,7 +119,7 @@ struct privhead {			/* Offsets and sizes are in sectors. */
 	u64	logical_disk_size;
 	u64	config_start;
 	u64	config_size;
-	u8	disk_id[GUID_SIZE];
+	uuid_t	disk_id;
 };
 
 struct tocblock {			/* We have exactly two bitmaps. */
@@ -154,7 +152,7 @@ struct vblk_dgrp {			/* VBLK Disk Group */
 };
 
 struct vblk_disk {			/* VBLK Disk */
-	u8	disk_id[GUID_SIZE];
+	uuid_t	disk_id;
 	u8	alt_name[128];
 };
 
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 4a294a5..7440de4 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -326,7 +326,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 	req = scsi_req(rq);
-	scsi_req_init(rq);
 
 	if (hdr->cmd_len > BLK_MAX_CDB) {
 		req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
@@ -456,7 +455,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
 		goto error_free_buffer;
 	}
 	req = scsi_req(rq);
-	scsi_req_init(rq);
 
 	cmdlen = COMMAND_SIZE(opcode);
 
@@ -542,7 +540,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
 	rq = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
-	scsi_req_init(rq);
 	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
 	scsi_req(rq)->cmd[0] = cmd;
 	scsi_req(rq)->cmd[4] = data;
@@ -744,10 +741,14 @@ int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
 }
 EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
 
-void scsi_req_init(struct request *rq)
+/**
+ * scsi_req_init - initialize certain fields of a scsi_request structure
+ * @req: Pointer to a scsi_request structure.
+ * Initializes .__cmd[], .cmd, .cmd_len and .sense_len but no other members
+ * of struct scsi_request.
+ */
+void scsi_req_init(struct scsi_request *req)
 {
-	struct scsi_request *req = scsi_req(rq);
-
 	memset(req->__cmd, 0, sizeof(req->__cmd));
 	req->cmd = req->__cmd;
 	req->cmd_len = BLK_MAX_CDB;
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 680c6d6..3416dad 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -46,8 +46,8 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len)
  * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
  * tag.
  */
-static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
-			   unsigned int type)
+static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
+		csum_fn *fn, unsigned int type)
 {
 	unsigned int i;
 
@@ -67,11 +67,11 @@ static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
 		iter->seed++;
 	}
 
-	return 0;
+	return BLK_STS_OK;
 }
 
-static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
-				unsigned int type)
+static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
+		csum_fn *fn, unsigned int type)
 {
 	unsigned int i;
 
@@ -91,7 +91,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
 				       "(rcvd %u)\n", iter->disk_name,
 				       (unsigned long long)
 				       iter->seed, be32_to_cpu(pi->ref_tag));
-				return -EILSEQ;
+				return BLK_STS_PROTECTION;
 			}
 			break;
 		case 3:
@@ -108,7 +108,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
 			       "(rcvd %04x, want %04x)\n", iter->disk_name,
 			       (unsigned long long)iter->seed,
 			       be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
-			return -EILSEQ;
+			return BLK_STS_PROTECTION;
 		}
 
 next:
@@ -117,45 +117,45 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
 		iter->seed++;
 	}
 
-	return 0;
+	return BLK_STS_OK;
 }
 
-static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
 {
 	return t10_pi_generate(iter, t10_pi_crc_fn, 1);
 }
 
-static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
 {
 	return t10_pi_generate(iter, t10_pi_ip_fn, 1);
 }
 
-static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
 {
 	return t10_pi_verify(iter, t10_pi_crc_fn, 1);
 }
 
-static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
 {
 	return t10_pi_verify(iter, t10_pi_ip_fn, 1);
 }
 
-static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
 {
 	return t10_pi_generate(iter, t10_pi_crc_fn, 3);
 }
 
-static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
 {
 	return t10_pi_generate(iter, t10_pi_ip_fn, 3);
 }
 
-static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
 {
 	return t10_pi_verify(iter, t10_pi_crc_fn, 3);
 }
 
-static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
 {
 	return t10_pi_verify(iter, t10_pi_ip_fn, 3);
 }
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index d3a989e..3cd6e12 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
 	 * signature and returns that to us.
 	 */
 	ret = crypto_akcipher_verify(req);
-	if (ret == -EINPROGRESS) {
+	if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
 		wait_for_completion(&compl.completion);
 		ret = compl.err;
 	}
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 672a94c..d178650 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -381,7 +381,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
 	}
 
 error:
-	kfree(desc);
+	kzfree(desc);
 error_no_desc:
 	crypto_free_shash(tfm);
 	kleave(" = %d", ret);
@@ -450,6 +450,6 @@ int verify_pefile_signature(const void *pebuf, unsigned pelen,
 	ret = pefile_digest_pe(pebuf, pelen, &ctx);
 
 error:
-	kfree(ctx.digest);
+	kzfree(ctx.digest);
 	return ret;
 }
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index c80765b..dd03fea 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -102,6 +102,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
 		}
 	}
 
+	ret = -ENOMEM;
 	cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
 	if (!cert->pub->key)
 		goto error_decode;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index fa749f4..cdb27ac 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1767,9 +1767,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
 			break;
 		case -EINPROGRESS:
 		case -EBUSY:
-			ret = wait_for_completion_interruptible(
-				&drbg->ctr_completion);
-			if (!ret && !drbg->ctr_async_err) {
+			wait_for_completion(&drbg->ctr_completion);
+			if (!drbg->ctr_async_err) {
 				reinit_completion(&drbg->ctr_completion);
 				break;
 			}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index b7ad808..3841b5e 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 
 	err = crypto_skcipher_encrypt(&data->req);
 	if (err == -EINPROGRESS || err == -EBUSY) {
-		err = wait_for_completion_interruptible(
-			&data->result.completion);
-		if (!err)
-			err = data->result.err;
+		wait_for_completion(&data->result.completion);
+		err = data->result.err;
 	}
 
 	if (err)
diff --git a/drivers/Kconfig b/drivers/Kconfig
index ba2901e..505c676 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -206,4 +206,6 @@
 
 source "drivers/tee/Kconfig"
 
+source "drivers/mux/Kconfig"
+
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index cfabd14..dfdcda0 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -181,3 +181,4 @@
 obj-$(CONFIG_FPGA)		+= fpga/
 obj-$(CONFIG_FSI)		+= fsi/
 obj-$(CONFIG_TEE)		+= tee/
+obj-$(CONFIG_MULTIPLEXER)	+= mux/
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 146a77fb..853bc7f 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -15,11 +15,15 @@
 #include <linux/configfs.h>
 #include <linux/acpi.h>
 
+#include "acpica/accommon.h"
+#include "acpica/actables.h"
+
 static struct config_group *acpi_table_group;
 
 struct acpi_table {
 	struct config_item cfg;
 	struct acpi_table_header *header;
+	u32 index;
 };
 
 static ssize_t acpi_table_aml_write(struct config_item *cfg,
@@ -52,7 +56,11 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
 	if (!table->header)
 		return -ENOMEM;
 
-	ret = acpi_load_table(table->header);
+	ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
+	ret = acpi_tb_install_and_load_table(
+			ACPI_PTR_TO_PHYSADDR(table->header),
+			ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL, FALSE,
+			&table->index);
 	if (ret) {
 		kfree(table->header);
 		table->header = NULL;
@@ -215,8 +223,18 @@ static struct config_item *acpi_table_make_item(struct config_group *group,
 	return &table->cfg;
 }
 
+static void acpi_table_drop_item(struct config_group *group,
+				 struct config_item *cfg)
+{
+	struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
+
+	ACPI_INFO(("Host-directed Dynamic ACPI Table Unload"));
+	acpi_tb_unload_table(table->index);
+}
+
 struct configfs_group_operations acpi_table_group_ops = {
 	.make_item = acpi_table_make_item,
+	.drop_item = acpi_table_drop_item,
 };
 
 static struct config_item_type acpi_tables_type = {
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index dee8692..3ec05aa 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -10,7 +10,7 @@
  */
 
 /* #define DEBUG */
-#define pr_fmt(fmt) "ACPI : AML: " fmt
+#define pr_fmt(fmt) "ACPI: AML: " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index 502ea4dc..560fdae 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -141,9 +141,9 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
 	int	cpu = mce->extcpu;
 	struct acpi_hest_generic_status *estatus, *tmp;
 	struct acpi_hest_generic_data *gdata;
-	const uuid_le *fru_id = &NULL_UUID_LE;
+	const guid_t *fru_id = &guid_null;
 	char *fru_text = "";
-	uuid_le *sec_type;
+	guid_t *sec_type;
 	static u32 err_seq;
 
 	estatus = extlog_elog_entry_check(cpu, bank);
@@ -165,11 +165,11 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
 	err_seq++;
 	gdata = (struct acpi_hest_generic_data *)(tmp + 1);
 	if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
-		fru_id = (uuid_le *)gdata->fru_id;
+		fru_id = (guid_t *)gdata->fru_id;
 	if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
 		fru_text = gdata->fru_text;
-	sec_type = (uuid_le *)gdata->section_type;
-	if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
+	sec_type = (guid_t *)gdata->section_type;
+	if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
 		struct cper_sec_mem_err *mem = (void *)(gdata + 1);
 		if (gdata->error_data_length >= sizeof(*mem))
 			trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
@@ -182,17 +182,17 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
 
 static bool __init extlog_get_l1addr(void)
 {
-	u8 uuid[16];
+	guid_t guid;
 	acpi_handle handle;
 	union acpi_object *obj;
 
-	acpi_str_to_uuid(extlog_dsm_uuid, uuid);
-
+	if (guid_parse(extlog_dsm_uuid, &guid))
+		return false;
 	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
 		return false;
-	if (!acpi_check_dsm(handle, uuid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
+	if (!acpi_check_dsm(handle, &guid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
 		return false;
-	obj = acpi_evaluate_dsm_typed(handle, uuid, EXTLOG_DSM_REV,
+	obj = acpi_evaluate_dsm_typed(handle, &guid, EXTLOG_DSM_REV,
 				      EXTLOG_FN_ADDR, NULL, ACPI_TYPE_INTEGER);
 	if (!obj) {
 		return false;
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index dea6530..b125bdd 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -172,6 +172,7 @@
 	utosi.o		\
 	utownerid.o	\
 	utpredef.o	\
+	utresdecode.o	\
 	utresrc.o	\
 	utstate.o	\
 	utstring.o	\
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index b65f273..bb6a84b 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -158,8 +158,8 @@ acpi_dm_finish_namespace_load(union acpi_parse_object *parse_tree_root,
 			      acpi_owner_id owner_id);
 
 void
-acpi_dm_convert_resource_indexes(union acpi_parse_object *parse_tree_root,
-				 struct acpi_namespace_node *namespace_root);
+acpi_dm_convert_parse_objects(union acpi_parse_object *parse_tree_root,
+			      struct acpi_namespace_node *namespace_root);
 
 /*
  * adfile
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index abe8c31..95eed44 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -315,6 +315,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_emit_external_opcodes, FALSE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_do_disassembler_optimizations, TRUE);
+ACPI_INIT_GLOBAL(ACPI_PARSE_OBJECT_LIST, *acpi_gbl_temp_list_head, NULL);
 
 ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm);
 ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing);
@@ -368,6 +369,8 @@ ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
 ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
 ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
 
+ACPI_INIT_GLOBAL(u8, acpi_gbl_disasm_flag, FALSE);
+
 #endif
 
 /*
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index f9b3f7f..8ddd3b2 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -859,7 +859,7 @@ ACPI_PARSE_COMMON};
  * and bytelists.
  */
 struct acpi_parse_obj_named {
-	ACPI_PARSE_COMMON u8 *path;
+	ACPI_PARSE_COMMON char *path;
 	u8 *data;		/* AML body or bytelist data */
 	u32 length;		/* AML length */
 	u32 name;		/* 4-byte name or zero if no name */
@@ -1142,8 +1142,13 @@ struct acpi_port_info {
 #define ACPI_RESOURCE_NAME_ADDRESS64            0x8A
 #define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64   0x8B
 #define ACPI_RESOURCE_NAME_GPIO                 0x8C
+#define ACPI_RESOURCE_NAME_PIN_FUNCTION         0x8D
 #define ACPI_RESOURCE_NAME_SERIAL_BUS           0x8E
-#define ACPI_RESOURCE_NAME_LARGE_MAX            0x8E
+#define ACPI_RESOURCE_NAME_PIN_CONFIG           0x8F
+#define ACPI_RESOURCE_NAME_PIN_GROUP            0x90
+#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION   0x91
+#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG     0x92
+#define ACPI_RESOURCE_NAME_LARGE_MAX            0x92
 
 /*****************************************************************************
  *
@@ -1176,12 +1181,18 @@ struct acpi_external_list {
 #define ACPI_EXT_INTERNAL_PATH_ALLOCATED    0x04	/* Deallocate internal path on completion */
 #define ACPI_EXT_EXTERNAL_EMITTED           0x08	/* External() statement has been emitted */
 #define ACPI_EXT_ORIGIN_FROM_OPCODE         0x10	/* External came from a External() opcode */
+#define ACPI_EXT_CONFLICTING_DECLARATION    0x20	/* External has a conflicting declaration within AML */
 
 struct acpi_external_file {
 	char *path;
 	struct acpi_external_file *next;
 };
 
+struct acpi_parse_object_list {
+	union acpi_parse_object *op;
+	struct acpi_parse_object_list *next;
+};
+
 /*****************************************************************************
  *
  * Debugger
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index a5d9af7..cbd59a3 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -112,7 +112,7 @@
 #define ARGP_DWORD_OP                   ARGP_LIST1 (ARGP_DWORDDATA)
 #define ARGP_ELSE_OP                    ARGP_LIST2 (ARGP_PKGLENGTH,  ARGP_TERMLIST)
 #define ARGP_EVENT_OP                   ARGP_LIST1 (ARGP_NAME)
-#define ARGP_EXTERNAL_OP                ARGP_LIST3 (ARGP_NAMESTRING, ARGP_BYTEDATA,      ARGP_BYTEDATA)
+#define ARGP_EXTERNAL_OP                ARGP_LIST3 (ARGP_NAME,       ARGP_BYTEDATA,      ARGP_BYTEDATA)
 #define ARGP_FATAL_OP                   ARGP_LIST3 (ARGP_BYTEDATA,   ARGP_DWORDDATA,     ARGP_TERMARG)
 #define ARGP_FIELD_OP                   ARGP_LIST4 (ARGP_PKGLENGTH,  ARGP_NAMESTRING,    ARGP_BYTEDATA,  ARGP_FIELDLIST)
 #define ARGP_FIND_SET_LEFT_BIT_OP       ARGP_LIST2 (ARGP_TERMARG,    ARGP_TARGET)
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index dcfc05d..cdfcad8 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -581,6 +581,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
 	{{"_HID", METHOD_0ARGS,
 	  METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING)}},
 
+	{{"_HMA", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
 	{{"_HOT", METHOD_0ARGS,
 	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
@@ -626,6 +629,19 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
 		     ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER | ACPI_RTYPE_STRING,
 		     10, 0),
 
+	{{"_LSI", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0, 0, 0),
+
+	{{"_LSR", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 1,
+		     ACPI_RTYPE_BUFFER, 1, 0),
+
+	{{"_LSW",
+	  METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_BUFFER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
 	{{"_MAT", METHOD_0ARGS,
 	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
 
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index b4d22f6..438f309 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -148,7 +148,10 @@ typedef enum {
 	ACPI_RSD_UINT16,
 	ACPI_RSD_UINT32,
 	ACPI_RSD_UINT64,
-	ACPI_RSD_WORDLIST
+	ACPI_RSD_WORDLIST,
+	ACPI_RSD_LABEL,
+	ACPI_RSD_SOURCE_LABEL,
+
 } ACPI_RSDUMP_OPCODES;
 
 /* restore default alignment */
@@ -329,6 +332,11 @@ extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
 extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
 extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
 extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_pin_function[];
+extern struct acpi_rsconvert_info acpi_rs_convert_pin_config[];
+extern struct acpi_rsconvert_info acpi_rs_convert_pin_group[];
+extern struct acpi_rsconvert_info acpi_rs_convert_pin_group_function[];
+extern struct acpi_rsconvert_info acpi_rs_convert_pin_group_config[];
 
 /* These resources require separate get/set tables */
 
@@ -372,12 +380,17 @@ extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
 extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
 extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
 extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
+extern struct acpi_rsdump_info acpi_rs_dump_pin_function[];
 extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
 extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
 extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
 extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
 extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
 extern struct acpi_rsdump_info acpi_rs_dump_general_flags[];
+extern struct acpi_rsdump_info acpi_rs_dump_pin_config[];
+extern struct acpi_rsdump_info acpi_rs_dump_pin_group[];
+extern struct acpi_rsdump_info acpi_rs_dump_pin_group_function[];
+extern struct acpi_rsdump_info acpi_rs_dump_pin_group_config[];
 #endif
 
 #endif				/* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 6f28cfa..2a3cc429 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -85,6 +85,7 @@ extern const char *acpi_gbl_bpb_decode[];
 extern const char *acpi_gbl_sb_decode[];
 extern const char *acpi_gbl_fc_decode[];
 extern const char *acpi_gbl_pt_decode[];
+extern const char *acpi_gbl_ptyp_decode[];
 #endif
 
 /*
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 176f7e9..f54dc5a 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -313,6 +313,11 @@
  *      #A is the number of required arguments
  *      #T is the number of target operands
  *      #R indicates whether there is a return value
+ *
+ * These types are used for the top-level dispatch of the AML
+ * opcode. They group similar operators that can share common
+ * front-end code before dispatch to the final code that implements
+ * the operator.
  */
 
 /*
@@ -353,42 +358,42 @@
  * The opcode Type is used in a dispatch table, do not change
  * or add anything new without updating the table.
  */
-#define AML_TYPE_EXEC_0A_0T_1R      0x00
-#define AML_TYPE_EXEC_1A_0T_0R      0x01	/* Monadic1  */
-#define AML_TYPE_EXEC_1A_0T_1R      0x02	/* Monadic2  */
-#define AML_TYPE_EXEC_1A_1T_0R      0x03
-#define AML_TYPE_EXEC_1A_1T_1R      0x04	/* monadic2_r */
-#define AML_TYPE_EXEC_2A_0T_0R      0x05	/* Dyadic1   */
-#define AML_TYPE_EXEC_2A_0T_1R      0x06	/* Dyadic2   */
-#define AML_TYPE_EXEC_2A_1T_1R      0x07	/* dyadic2_r  */
-#define AML_TYPE_EXEC_2A_2T_1R      0x08
-#define AML_TYPE_EXEC_3A_0T_0R      0x09
-#define AML_TYPE_EXEC_3A_1T_1R      0x0A
-#define AML_TYPE_EXEC_6A_0T_1R      0x0B
+#define AML_TYPE_EXEC_0A_0T_1R      0x00	/* 0 Args, 0 Target, 1 ret_val */
+#define AML_TYPE_EXEC_1A_0T_0R      0x01	/* 1 Args, 0 Target, 0 ret_val */
+#define AML_TYPE_EXEC_1A_0T_1R      0x02	/* 1 Args, 0 Target, 1 ret_val */
+#define AML_TYPE_EXEC_1A_1T_0R      0x03	/* 1 Args, 1 Target, 0 ret_val */
+#define AML_TYPE_EXEC_1A_1T_1R      0x04	/* 1 Args, 1 Target, 1 ret_val */
+#define AML_TYPE_EXEC_2A_0T_0R      0x05	/* 2 Args, 0 Target, 0 ret_val */
+#define AML_TYPE_EXEC_2A_0T_1R      0x06	/* 2 Args, 0 Target, 1 ret_val */
+#define AML_TYPE_EXEC_2A_1T_1R      0x07	/* 2 Args, 1 Target, 1 ret_val */
+#define AML_TYPE_EXEC_2A_2T_1R      0x08	/* 2 Args, 2 Target, 1 ret_val */
+#define AML_TYPE_EXEC_3A_0T_0R      0x09	/* 3 Args, 0 Target, 0 ret_val */
+#define AML_TYPE_EXEC_3A_1T_1R      0x0A	/* 3 Args, 1 Target, 1 ret_val */
+#define AML_TYPE_EXEC_6A_0T_1R      0x0B	/* 6 Args, 0 Target, 1 ret_val */
 /* End of types used in dispatch table */
 
-#define AML_TYPE_LITERAL            0x0B
-#define AML_TYPE_CONSTANT           0x0C
-#define AML_TYPE_METHOD_ARGUMENT    0x0D
-#define AML_TYPE_LOCAL_VARIABLE     0x0E
-#define AML_TYPE_DATA_TERM          0x0F
+#define AML_TYPE_LITERAL            0x0C
+#define AML_TYPE_CONSTANT           0x0D
+#define AML_TYPE_METHOD_ARGUMENT    0x0E
+#define AML_TYPE_LOCAL_VARIABLE     0x0F
+#define AML_TYPE_DATA_TERM          0x10
 
 /* Generic for an op that returns a value */
 
-#define AML_TYPE_METHOD_CALL        0x10
+#define AML_TYPE_METHOD_CALL        0x11
 
 /* Miscellaneous types */
 
-#define AML_TYPE_CREATE_FIELD       0x11
-#define AML_TYPE_CREATE_OBJECT      0x12
-#define AML_TYPE_CONTROL            0x13
-#define AML_TYPE_NAMED_NO_OBJ       0x14
-#define AML_TYPE_NAMED_FIELD        0x15
-#define AML_TYPE_NAMED_SIMPLE       0x16
-#define AML_TYPE_NAMED_COMPLEX      0x17
-#define AML_TYPE_RETURN             0x18
-#define AML_TYPE_UNDEFINED          0x19
-#define AML_TYPE_BOGUS              0x1A
+#define AML_TYPE_CREATE_FIELD       0x12
+#define AML_TYPE_CREATE_OBJECT      0x13
+#define AML_TYPE_CONTROL            0x14
+#define AML_TYPE_NAMED_NO_OBJ       0x15
+#define AML_TYPE_NAMED_FIELD        0x16
+#define AML_TYPE_NAMED_SIMPLE       0x17
+#define AML_TYPE_NAMED_COMPLEX      0x18
+#define AML_TYPE_RETURN             0x19
+#define AML_TYPE_UNDEFINED          0x1A
+#define AML_TYPE_BOGUS              0x1B
 
 /* AML Package Length encodings */
 
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 653a3d1..1236e9a 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -65,6 +65,7 @@
 #define ACPI_RESTAG_DRIVESTRENGTH               "_DRS"
 #define ACPI_RESTAG_ENDIANNESS                  "_END"
 #define ACPI_RESTAG_FLOWCONTROL                 "_FLC"
+#define ACPI_RESTAG_FUNCTION                    "_FUN"
 #define ACPI_RESTAG_GRANULARITY                 "_GRA"
 #define ACPI_RESTAG_INTERRUPT                   "_INT"
 #define ACPI_RESTAG_INTERRUPTLEVEL              "_LL_"	/* active_lo(1), active_hi(0) */
@@ -84,6 +85,8 @@
 #define ACPI_RESTAG_PHASE                       "_PHA"
 #define ACPI_RESTAG_PIN                         "_PIN"
 #define ACPI_RESTAG_PINCONFIG                   "_PPI"
+#define ACPI_RESTAG_PINCONFIG_TYPE              "_TYP"
+#define ACPI_RESTAG_PINCONFIG_VALUE             "_VAL"
 #define ACPI_RESTAG_POLARITY                    "_POL"
 #define ACPI_RESTAG_REGISTERBITOFFSET           "_RBO"
 #define ACPI_RESTAG_REGISTERBITWIDTH            "_RBW"
@@ -404,6 +407,102 @@ struct aml_resource_uart_serialbus {
 #define AML_RESOURCE_UART_TYPE_REVISION         1	/* ACPI 5.0 */
 #define AML_RESOURCE_UART_MIN_DATA_LEN          10
 
+struct aml_resource_pin_function {
+	AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+	u16 flags;
+	u8 pin_config;
+	u16 function_number;
+	u16 pin_table_offset;
+	u8 res_source_index;
+	u16 res_source_offset;
+	u16 vendor_offset;
+	u16 vendor_length;
+	/*
+	 * Optional fields follow immediately:
+	 * 1) PIN list (Words)
+	 * 2) Resource Source String
+	 * 3) Vendor Data bytes
+	 */
+};
+
+#define AML_RESOURCE_PIN_FUNCTION_REVISION      1	/* ACPI 6.2 */
+
+struct aml_resource_pin_config {
+	AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+	u16 flags;
+	u8 pin_config_type;
+	u32 pin_config_value;
+	u16 pin_table_offset;
+	u8 res_source_index;
+	u16 res_source_offset;
+	u16 vendor_offset;
+	u16 vendor_length;
+	/*
+	 * Optional fields follow immediately:
+	 * 1) PIN list (Words)
+	 * 2) Resource Source String
+	 * 3) Vendor Data bytes
+	 */
+};
+
+#define AML_RESOURCE_PIN_CONFIG_REVISION      1	/* ACPI 6.2 */
+
+struct aml_resource_pin_group {
+	AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+	u16 flags;
+	u16 pin_table_offset;
+	u16 label_offset;
+	u16 vendor_offset;
+	u16 vendor_length;
+	/*
+	 * Optional fields follow immediately:
+	 * 1) PIN list (Words)
+	 * 2) Resource Label String
+	 * 3) Vendor Data bytes
+	 */
+};
+
+#define AML_RESOURCE_PIN_GROUP_REVISION      1	/* ACPI 6.2 */
+
+struct aml_resource_pin_group_function {
+	AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+	u16 flags;
+	u16 function_number;
+	u8 res_source_index;
+	u16 res_source_offset;
+	u16 res_source_label_offset;
+	u16 vendor_offset;
+	u16 vendor_length;
+	/*
+	 * Optional fields follow immediately:
+	 * 1) Resource Source String
+	 * 2) Resource Source Label String
+	 * 3) Vendor Data bytes
+	 */
+};
+
+#define AML_RESOURCE_PIN_GROUP_FUNCTION_REVISION    1	/* ACPI 6.2 */
+
+struct aml_resource_pin_group_config {
+	AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+	u16 flags;
+	u8 pin_config_type;
+	u32 pin_config_value;
+	u8 res_source_index;
+	u16 res_source_offset;
+	u16 res_source_label_offset;
+	u16 vendor_offset;
+	u16 vendor_length;
+	/*
+	 * Optional fields follow immediately:
+	 * 1) Resource Source String
+	 * 2) Resource Source Label String
+	 * 3) Vendor Data bytes
+	 */
+};
+
+#define AML_RESOURCE_PIN_GROUP_CONFIG_REVISION    1	/* ACPI 6.2 */
+
 /* restore default alignment */
 
 #pragma pack()
@@ -446,6 +545,11 @@ union aml_resource {
 	struct aml_resource_spi_serialbus spi_serial_bus;
 	struct aml_resource_uart_serialbus uart_serial_bus;
 	struct aml_resource_common_serialbus common_serial_bus;
+	struct aml_resource_pin_function pin_function;
+	struct aml_resource_pin_config pin_config;
+	struct aml_resource_pin_group pin_group;
+	struct aml_resource_pin_group_function pin_group_function;
+	struct aml_resource_pin_group_config pin_group_config;
 
 	/* Utility overlays */
 
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index b611cd9..3b30319 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -181,6 +181,18 @@ acpi_db_execute_method(struct acpi_db_method_info *info,
 	acpi_gbl_method_executing = FALSE;
 
 	if (ACPI_FAILURE(status)) {
+		if ((status == AE_ABORT_METHOD) || acpi_gbl_abort_method) {
+
+			/* Clear the abort and fall back to the debugger prompt */
+
+			ACPI_EXCEPTION((AE_INFO, status,
+					"Aborting top-level method"));
+
+			acpi_gbl_abort_method = FALSE;
+			status = AE_OK;
+			goto cleanup;
+		}
+
 		ACPI_EXCEPTION((AE_INFO, status,
 				"while executing %s from debugger",
 				info->pathname));
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index f2252b1..e7b415c 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -448,7 +448,7 @@ void acpi_db_decode_locals(struct acpi_walk_state *walk_state)
 
 	if (display_locals) {
 		acpi_os_printf
-		    ("\nInitialized Local Variables for method [%4.4s]:\n",
+		    ("\nInitialized Local Variables for Method [%4.4s]:\n",
 		     acpi_ut_get_node_name(node));
 
 		for (i = 0; i < ACPI_METHOD_NUM_LOCALS; i++) {
@@ -461,7 +461,7 @@ void acpi_db_decode_locals(struct acpi_walk_state *walk_state)
 		}
 	} else {
 		acpi_os_printf
-		    ("No Local Variables are initialized for method [%4.4s]\n",
+		    ("No Local Variables are initialized for Method [%4.4s]\n",
 		     acpi_ut_get_node_name(node));
 	}
 }
@@ -515,7 +515,7 @@ void acpi_db_decode_arguments(struct acpi_walk_state *walk_state)
 		acpi_os_printf("Initialized Arguments for Method [%4.4s]:  "
 			       "(%X arguments defined for method invocation)\n",
 			       acpi_ut_get_node_name(node),
-			       obj_desc->method.param_count);
+			       node->object->method.param_count);
 
 		for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) {
 			obj_desc = walk_state->arguments[i].object;
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index 8f665d9..b698532 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -244,7 +244,7 @@ acpi_db_single_step(struct acpi_walk_state *walk_state,
 		if ((acpi_gbl_db_output_to_file) ||
 		    (acpi_dbg_level & ACPI_LV_PARSE)) {
 			acpi_os_printf
-			    ("\n[AmlDebug] Next AML Opcode to execute:\n");
+			    ("\nAML Debug: Next AML Opcode to execute:\n");
 		}
 
 		/*
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 287b3fd..2873455 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -82,7 +82,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
 	union acpi_parse_object *op;
 	struct acpi_walk_state *walk_state;
 
-	ACPI_FUNCTION_TRACE(ds_execute_arguments);
+	ACPI_FUNCTION_TRACE_PTR(ds_execute_arguments, aml_start);
 
 	/* Allocate a new parser op to be the root of the parsed tree */
 
@@ -338,7 +338,8 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
 		return_ACPI_STATUS(AE_AML_INTERNAL);
 	}
 
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Argument Init, AML Ptr: %p\n",
+			  obj_desc->package.aml_start));
 
 	/* Execute the AML code for the term_arg arguments */
 
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index 4d885eb..d1f457e 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -196,6 +196,7 @@ acpi_ds_dump_method_stack(acpi_status status,
 				op->common.next = NULL;
 
 #ifdef ACPI_DISASSEMBLER
+				acpi_os_printf("Failed at ");
 				acpi_dm_disassemble(next_walk_state, op,
 						    ACPI_UINT32_MAX);
 #endif
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 31c9c7a..d7fc369 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -212,6 +212,7 @@ acpi_status
 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
 {
 	u32 aml_offset;
+	acpi_name name = 0;
 
 	ACPI_FUNCTION_ENTRY();
 
@@ -237,10 +238,13 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
 						walk_state->parser_state.
 						aml_start);
 
-		status = acpi_gbl_exception_handler(status,
-						    walk_state->method_node ?
-						    walk_state->method_node->
-						    name.integer : 0,
+		if (walk_state->method_node) {
+			name = walk_state->method_node->name.integer;
+		} else if (walk_state->deferred_node) {
+			name = walk_state->deferred_node->name.integer;
+		}
+
+		status = acpi_gbl_exception_handler(status, name,
 						    walk_state->opcode,
 						    aml_offset, NULL);
 		acpi_ex_enter_interpreter();
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 9a8f8a9..dfc3c25 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -227,13 +227,12 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
 
 	/* Entire field must fit within the current length of the buffer */
 
-	if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
+	if ((bit_offset + bit_count) > (8 * (u32)buffer_desc->buffer.length)) {
 		ACPI_ERROR((AE_INFO,
-			    "Field [%4.4s] at %u exceeds Buffer [%4.4s] size %u (bits)",
-			    acpi_ut_get_node_name(result_desc),
-			    bit_offset + bit_count,
-			    acpi_ut_get_node_name(buffer_desc->buffer.node),
-			    8 * (u32) buffer_desc->buffer.length));
+			    "Field [%4.4s] at bit offset/length %u/%u "
+			    "exceeds size of target Buffer (%u bits)",
+			    acpi_ut_get_node_name(result_desc), bit_offset,
+			    bit_count, 8 * (u32)buffer_desc->buffer.length));
 		status = AE_AML_BUFFER_LIMIT;
 		goto cleanup;
 	}
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 406edec..0dabd9b 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -633,15 +633,6 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
 
 		if ((op_info->flags & AML_HAS_RETVAL) ||
 		    (arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
-			ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-					  "Argument previously created, already stacked\n"));
-
-			acpi_db_display_argument_object(walk_state->
-							operands[walk_state->
-								 num_operands -
-								 1],
-							walk_state);
-
 			/*
 			 * Use value that was already previously returned
 			 * by the evaluation of this argument
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index a2ff8ad..20d7744 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -576,8 +576,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
 		case AML_TYPE_CREATE_OBJECT:
 
 			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-					  "Executing CreateObject (Buffer/Package) Op=%p\n",
-					  op));
+					  "Executing CreateObject (Buffer/Package) Op=%p AMLPtr=%p\n",
+					  op, op->named.data));
 
 			switch (op->common.parent->common.aml_opcode) {
 			case AML_NAME_OP:
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index cafb3ab5..eaa859a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -397,7 +397,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
 	/* Initialize the op */
 
 #if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY))
-	op->named.path = ACPI_CAST_PTR(u8, path);
+	op->named.path = path;
 #endif
 
 	if (node) {
@@ -434,6 +434,10 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
 	acpi_object_type object_type;
 	acpi_status status = AE_OK;
 
+#ifdef ACPI_ASL_COMPILER
+	u8 param_count;
+#endif
+
 	ACPI_FUNCTION_TRACE(ds_load1_end_op);
 
 	op = walk_state->op;
@@ -514,6 +518,38 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
 			}
 		}
 	}
+#ifdef ACPI_ASL_COMPILER
+	/*
+	 * For external opcode, get the object type from the argument and
+	 * get the parameter count from the argument's next.
+	 */
+	if (acpi_gbl_disasm_flag &&
+	    op->common.node && op->common.aml_opcode == AML_EXTERNAL_OP) {
+		/*
+		 * Note, if this external is not a method
+		 * Op->Common.Value.Arg->Common.Next->Common.Value.Integer == 0
+		 * Therefore, param_count will be 0.
+		 */
+		param_count =
+		    (u8)op->common.value.arg->common.next->common.value.integer;
+		object_type = (u8)op->common.value.arg->common.value.integer;
+		op->common.node->flags |= ANOBJ_IS_EXTERNAL;
+		op->common.node->type = (u8)object_type;
+
+		acpi_dm_create_subobject_for_external((u8)object_type,
+						      &op->common.node,
+						      param_count);
+
+		/*
+		 * Add the external to the external list because we may be
+		 * emitting code based off of the items within the external list.
+		 */
+		acpi_dm_add_op_to_external_list(op, op->named.path,
+						(u8)object_type, param_count,
+						ACPI_EXT_ORIGIN_FROM_OPCODE |
+						ACPI_EXT_RESOLVED_REFERENCE);
+	}
+#endif
 
 	/*
 	 * If we are executing a method, do not create any namespace objects
@@ -563,7 +599,9 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
 
 	/* Pop the scope stack (only if loading a table) */
 
-	if (!walk_state->method_node && acpi_ns_opens_scope(object_type)) {
+	if (!walk_state->method_node &&
+	    op->common.aml_opcode != AML_EXTERNAL_OP &&
+	    acpi_ns_opens_scope(object_type)) {
 		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
 				  "(%s): Popping scope for Op %p\n",
 				  acpi_ut_get_type_name(object_type), op));
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 8d510c7..aad83ef 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -310,6 +310,22 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
 				flags |= ACPI_NS_TEMPORARY;
 			}
 		}
+#ifdef ACPI_ASL_COMPILER
+
+		/*
+		 * Do not open a scope for AML_EXTERNAL_OP
+		 * acpi_ns_lookup can open a new scope based on the object type
+		 * of this op. AML_EXTERNAL_OP is a declaration rather than a
+		 * definition. In the case that this external is a method object,
+		 * acpi_ns_lookup will open a new scope. However, an AML_EXTERNAL_OP
+		 * associated with the ACPI_TYPE_METHOD is a declaration, rather than
+		 * a definition. Flags is set to avoid opening a scope for any
+		 * AML_EXTERNAL_OP.
+		 */
+		if (walk_state->opcode == AML_EXTERNAL_OP) {
+			flags |= ACPI_NS_DONT_OPEN_SCOPE;
+		}
+#endif
 
 		/* Add new entry or lookup existing entry */
 
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 82e8971..c773ac4 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
 
 	ACPI_FUNCTION_TRACE(acpi_enable_event);
 
+	/* If Hardware Reduced flag is set, there are no fixed events */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/* Decode the Fixed Event */
 
 	if (event > ACPI_EVENT_MAX) {
@@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
 
 	ACPI_FUNCTION_TRACE(acpi_disable_event);
 
+	/* If Hardware Reduced flag is set, there are no fixed events */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/* Decode the Fixed Event */
 
 	if (event > ACPI_EVENT_MAX) {
@@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event)
 
 	ACPI_FUNCTION_TRACE(acpi_clear_event);
 
+	/* If Hardware Reduced flag is set, there are no fixed events */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/* Decode the Fixed Event */
 
 	if (event > ACPI_EVENT_MAX) {
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index ec614f5..a8191d2 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -117,10 +117,10 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 			timer = ((u32)acpi_os_get_timer() / 10);
 			timer &= 0x03FFFFFF;
 
-			acpi_os_printf("[ACPI Debug T=0x%8.8X] %*s", timer,
+			acpi_os_printf("ACPI Debug: T=0x%8.8X %*s", timer,
 				       level, " ");
 		} else {
-			acpi_os_printf("[ACPI Debug] %*s", level, " ");
+			acpi_os_printf("ACPI Debug: %*s", level, " ");
 		}
 	}
 
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 970dc6c..44092f7 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -645,10 +645,12 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
 	/* obj_desc is a valid object */
 
 	if (depth > 0) {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%*s[%u] %p ",
-				  depth, " ", depth, obj_desc));
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%*s[%u] %p Refs=%u ",
+				  depth, " ", depth, obj_desc,
+				  obj_desc->common.reference_count));
 	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p ", obj_desc));
+		ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p Refs=%u ",
+				  obj_desc, obj_desc->common.reference_count));
 	}
 
 	/* Decode object type */
@@ -690,8 +692,11 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
 
 		case ACPI_REFCLASS_NAME:
 
-			acpi_os_printf("- [%4.4s]\n",
-				       obj_desc->reference.node->name.ascii);
+			acpi_ut_repair_name(obj_desc->reference.node->name.
+					    ascii);
+			acpi_os_printf("- [%4.4s] (Node %p)\n",
+				       obj_desc->reference.node->name.ascii,
+				       obj_desc->reference.node);
 			break;
 
 		case ACPI_REFCLASS_ARG:
@@ -999,9 +1004,15 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
 		status = acpi_ns_handle_to_pathname(obj_desc->reference.node,
 						    &ret_buf, TRUE);
 		if (ACPI_FAILURE(status)) {
-			acpi_os_printf(" Could not convert name to pathname\n");
+			acpi_os_printf
+			    (" Could not convert name to pathname: %s\n",
+			     acpi_format_exception(status));
 		} else {
-			acpi_os_printf("%s\n", (char *)ret_buf.pointer);
+			acpi_os_printf("%s: %s\n",
+				       acpi_ut_get_type_name(obj_desc->
+							     reference.node->
+							     type),
+				       (char *)ret_buf.pointer);
 			ACPI_FREE(ret_buf.pointer);
 		}
 	} else if (obj_desc->reference.object) {
@@ -1111,9 +1122,8 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
 
 	case ACPI_TYPE_LOCAL_REFERENCE:
 
-		acpi_os_printf("[Object Reference] Type [%s] %2.2X",
-			       acpi_ut_get_reference_name(obj_desc),
-			       obj_desc->reference.class);
+		acpi_os_printf("[Object Reference] Class [%s]",
+			       acpi_ut_get_reference_name(obj_desc));
 		acpi_ex_dump_reference_obj(obj_desc);
 		break;
 
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index e327349..f787651 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -921,13 +921,26 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
 			 * This is a deref_of (object_reference)
 			 * Get the actual object from the Node (This is the dereference).
 			 * This case may only happen when a local_x or arg_x is
-			 * dereferenced above.
+			 * dereferenced above, or for references to device and
+			 * thermal objects.
 			 */
-			return_desc = acpi_ns_get_attached_object((struct
-								   acpi_namespace_node
-								   *)
-								  operand[0]);
-			acpi_ut_add_reference(return_desc);
+			switch (((struct acpi_namespace_node *)operand[0])->
+				type) {
+			case ACPI_TYPE_DEVICE:
+			case ACPI_TYPE_THERMAL:
+
+				/* These types have no node subobject, return the NS node */
+
+				return_desc = operand[0];
+				break;
+
+			default:
+				/* For most types, get the object attached to the node */
+
+				return_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)operand[0]);
+				acpi_ut_add_reference(return_desc);
+				break;
+			}
 		} else {
 			/*
 			 * This must be a reference object produced by either the
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index aa8c6fd..5e1854ea 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -368,11 +368,24 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
 								*)obj_desc);
 		}
 
-		if (!obj_desc) {
-			ACPI_ERROR((AE_INFO,
-				    "[%4.4s] Node is unresolved or uninitialized",
-				    acpi_ut_get_node_name(node)));
-			return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
+		switch (type) {
+		case ACPI_TYPE_DEVICE:
+		case ACPI_TYPE_THERMAL:
+
+			/* These types have no attached subobject */
+			break;
+
+		default:
+
+			/* All other types require a subobject */
+
+			if (!obj_desc) {
+				ACPI_ERROR((AE_INFO,
+					    "[%4.4s] Node is unresolved or uninitialized",
+					    acpi_ut_get_node_name(node)));
+				return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
+			}
+			break;
 		}
 		break;
 
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 5733b11..7ef1393 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -70,11 +70,15 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
 
 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
-	{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
-	 acpi_hw_extended_sleep},
-	{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
-	 acpi_hw_extended_wake_prep},
-	{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
+	{ACPI_STRUCT_INIT(legacy_function,
+			  ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep)),
+	 ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_sleep) },
+	{ACPI_STRUCT_INIT(legacy_function,
+			  ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep)),
+	 ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake_prep) },
+	{ACPI_STRUCT_INIT(legacy_function,
+			  ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake)),
+	 ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake) }
 };
 
 /*
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index fb265b57..e5f4fa4 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -47,6 +47,10 @@
 #include "acnamesp.h"
 #include "acdispat.h"
 
+#ifdef ACPI_ASL_COMPILER
+#include "acdisasm.h"
+#endif
+
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsaccess")
 
@@ -580,6 +584,29 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
 						  (char *)&current_node->name,
 						  current_node));
 			}
+#ifdef ACPI_ASL_COMPILER
+			/*
+			 * If this ACPI name already exists within the namespace as an
+			 * external declaration, then mark the external as a conflicting
+			 * declaration and proceed to process the current node as if it did
+			 * not exist in the namespace. If this node is not processed as
+			 * normal, then it could cause improper namespace resolution
+			 * by failing to open a new scope.
+			 */
+			if (acpi_gbl_disasm_flag &&
+			    (status == AE_ALREADY_EXISTS) &&
+			    ((this_node->flags & ANOBJ_IS_EXTERNAL) ||
+			     (walk_state
+			      && walk_state->opcode == AML_EXTERNAL_OP))) {
+				this_node->flags &= ~ANOBJ_IS_EXTERNAL;
+				this_node->type = (u8)this_search_type;
+				if (walk_state->opcode != AML_EXTERNAL_OP) {
+					acpi_dm_mark_external_conflict
+					    (this_node);
+				}
+				break;
+			}
+#endif
 
 			*return_node = this_node;
 			return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 3db9ca2..aa16aea 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -190,9 +190,6 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
 
 	(void)acpi_ns_build_normalized_path(node, buffer->pointer,
 					    required_size, no_trailing);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n",
 			  (char *)buffer->pointer, (u32) required_size));
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 2fe87d0..b43fe5fc 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -89,7 +89,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
 			acpi_os_printf("%s ", message);
 		}
 
-		acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node);
+		acpi_os_printf("%s", (char *)buffer.pointer);
 		ACPI_FREE(buffer.pointer);
 	}
 }
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index c944ff5..538c616 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -85,6 +85,8 @@ acpi_evaluate_object_typed(acpi_handle handle,
 {
 	acpi_status status;
 	u8 free_buffer_on_error = FALSE;
+	acpi_handle target_handle;
+	char *full_pathname;
 
 	ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
 
@@ -98,38 +100,51 @@ acpi_evaluate_object_typed(acpi_handle handle,
 		free_buffer_on_error = TRUE;
 	}
 
-	/* Evaluate the object */
-
-	status = acpi_evaluate_object(handle, pathname,
-				      external_params, return_buffer);
+	status = acpi_get_handle(handle, pathname, &target_handle);
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
 	}
 
-	/* Type ANY means "don't care" */
+	full_pathname = acpi_ns_get_external_pathname(target_handle);
+	if (!full_pathname) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/* Evaluate the object */
+
+	status = acpi_evaluate_object(target_handle, NULL, external_params,
+				      return_buffer);
+	if (ACPI_FAILURE(status)) {
+		goto exit;
+	}
+
+	/* Type ANY means "don't care about return value type" */
 
 	if (return_type == ACPI_TYPE_ANY) {
-		return_ACPI_STATUS(AE_OK);
+		goto exit;
 	}
 
 	if (return_buffer->length == 0) {
 
 		/* Error because caller specifically asked for a return value */
 
-		ACPI_ERROR((AE_INFO, "No return value"));
-		return_ACPI_STATUS(AE_NULL_OBJECT);
+		ACPI_ERROR((AE_INFO, "%s did not return any object",
+			    full_pathname));
+		status = AE_NULL_OBJECT;
+		goto exit;
 	}
 
 	/* Examine the object type returned from evaluate_object */
 
 	if (((union acpi_object *)return_buffer->pointer)->type == return_type) {
-		return_ACPI_STATUS(AE_OK);
+		goto exit;
 	}
 
 	/* Return object type does not match requested type */
 
 	ACPI_ERROR((AE_INFO,
-		    "Incorrect return type [%s] requested [%s]",
+		    "Incorrect return type from %s - received [%s], requested [%s]",
+		    full_pathname,
 		    acpi_ut_get_type_name(((union acpi_object *)return_buffer->
 					   pointer)->type),
 		    acpi_ut_get_type_name(return_type)));
@@ -147,7 +162,11 @@ acpi_evaluate_object_typed(acpi_handle handle,
 	}
 
 	return_buffer->length = 0;
-	return_ACPI_STATUS(AE_TYPE);
+	status = AE_TYPE;
+
+exit:
+	ACPI_FREE(full_pathname);
+	return_ACPI_STATUS(status);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 5bcb618..ef6384e 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -122,6 +122,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
 			     (u32)(aml_offset +
 				   sizeof(struct acpi_table_header)));
 
+			ACPI_ERROR((AE_INFO,
+				    "Aborting disassembly, AML byte code is corrupt"));
+
 			/* Dump the context surrounding the invalid opcode */
 
 			acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
@@ -130,6 +133,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
 					     sizeof(struct acpi_table_header) -
 					     16));
 			acpi_os_printf(" */\n");
+
+			/*
+			 * Just abort the disassembly, cannot continue because the
+			 * parser is essentially lost. The disassembler can then
+			 * randomly fail because an ill-constructed parse tree
+			 * can result.
+			 */
+			return_ACPI_STATUS(AE_AML_BAD_OPCODE);
 #endif
 		}
 
@@ -331,6 +342,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
 	if (status == AE_CTRL_PARSE_CONTINUE) {
 		return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
 	}
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
 
 	/* Create Op structure and append to parent's argument list */
 
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index c343a0d..a402ad7 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -650,9 +650,11 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
 
 /* ACPI 6.0 opcodes */
 
-	/* 81 */ ACPI_OP("External", ARGP_EXTERNAL_OP, ARGI_EXTERNAL_OP,
-			 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, /* ? */
-			 AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R),
+/* 81 */ ACPI_OP("External", ARGP_EXTERNAL_OP, ARGI_EXTERNAL_OP,
+			 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
+			 AML_TYPE_NAMED_SIMPLE,
+			 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+			 AML_NSNODE | AML_NAMED),
 /* 82 */ ACPI_OP("Comment", ARGP_COMMENT_OP, ARGI_COMMENT_OP,
 			 ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
 			 AML_TYPE_LITERAL, AML_CONSTANT)
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 8116a67..ac88319 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -56,6 +56,7 @@
 #include "acdispat.h"
 #include "amlcode.h"
 #include "acinterp.h"
+#include "acnamesp.h"
 
 #define _COMPONENT          ACPI_PARSER
 ACPI_MODULE_NAME("psparse")
@@ -538,9 +539,16 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
 			/* Either the method parse or actual execution failed */
 
 			acpi_ex_exit_interpreter();
-			ACPI_ERROR_METHOD("Method parse/execution failed",
-					  walk_state->method_node, NULL,
-					  status);
+			if (status == AE_ABORT_METHOD) {
+				acpi_ns_print_node_pathname(walk_state->
+							    method_node,
+							    "Method aborted:");
+				acpi_os_printf("\n");
+			} else {
+				ACPI_ERROR_METHOD
+				    ("Method parse/execution failed",
+				     walk_state->method_node, NULL, status);
+			}
 			acpi_ex_enter_interpreter();
 
 			/* Check for possible multi-thread reentrancy problem */
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 74e47f8..659fb71 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -340,6 +340,22 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
 
 			break;
 
+		case ACPI_RESOURCE_TYPE_PIN_FUNCTION:
+
+			total_size = (acpi_rs_length)(total_size +
+						      (resource->data.
+						       pin_function.
+						       pin_table_length * 2) +
+						      resource->data.
+						      pin_function.
+						      resource_source.
+						      string_length +
+						      resource->data.
+						      pin_function.
+						      vendor_length);
+
+			break;
+
 		case ACPI_RESOURCE_TYPE_SERIAL_BUS:
 
 			total_size =
@@ -359,6 +375,67 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
 
 			break;
 
+		case ACPI_RESOURCE_TYPE_PIN_CONFIG:
+
+			total_size = (acpi_rs_length)(total_size +
+						      (resource->data.
+						       pin_config.
+						       pin_table_length * 2) +
+						      resource->data.pin_config.
+						      resource_source.
+						      string_length +
+						      resource->data.pin_config.
+						      vendor_length);
+
+			break;
+
+		case ACPI_RESOURCE_TYPE_PIN_GROUP:
+
+			total_size = (acpi_rs_length)(total_size +
+						      (resource->data.pin_group.
+						       pin_table_length * 2) +
+						      resource->data.pin_group.
+						      resource_label.
+						      string_length +
+						      resource->data.pin_group.
+						      vendor_length);
+
+			break;
+
+		case ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION:
+
+			total_size = (acpi_rs_length)(total_size +
+						      resource->data.
+						      pin_group_function.
+						      resource_source.
+						      string_length +
+						      resource->data.
+						      pin_group_function.
+						      resource_source_label.
+						      string_length +
+						      resource->data.
+						      pin_group_function.
+						      vendor_length);
+
+			break;
+
+		case ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG:
+
+			total_size = (acpi_rs_length)(total_size +
+						      resource->data.
+						      pin_group_config.
+						      resource_source.
+						      string_length +
+						      resource->data.
+						      pin_group_config.
+						      resource_source_label.
+						      string_length +
+						      resource->data.
+						      pin_group_config.
+						      vendor_length);
+
+			break;
+
 		default:
 
 			break;
@@ -537,6 +614,24 @@ acpi_rs_get_list_length(u8 *aml_buffer,
 			}
 			break;
 
+		case ACPI_RESOURCE_NAME_PIN_FUNCTION:
+
+			/* Vendor data is optional */
+
+			if (aml_resource->pin_function.vendor_length) {
+				extra_struct_bytes +=
+				    aml_resource->pin_function.vendor_offset -
+				    aml_resource->pin_function.
+				    pin_table_offset +
+				    aml_resource->pin_function.vendor_length;
+			} else {
+				extra_struct_bytes +=
+				    aml_resource->large_header.resource_length +
+				    sizeof(struct aml_resource_large_header) -
+				    aml_resource->pin_function.pin_table_offset;
+			}
+			break;
+
 		case ACPI_RESOURCE_NAME_SERIAL_BUS:
 
 			minimum_aml_resource_length =
@@ -547,6 +642,50 @@ acpi_rs_get_list_length(u8 *aml_buffer,
 			    minimum_aml_resource_length;
 			break;
 
+		case ACPI_RESOURCE_NAME_PIN_CONFIG:
+
+			/* Vendor data is optional */
+
+			if (aml_resource->pin_config.vendor_length) {
+				extra_struct_bytes +=
+				    aml_resource->pin_config.vendor_offset -
+				    aml_resource->pin_config.pin_table_offset +
+				    aml_resource->pin_config.vendor_length;
+			} else {
+				extra_struct_bytes +=
+				    aml_resource->large_header.resource_length +
+				    sizeof(struct aml_resource_large_header) -
+				    aml_resource->pin_config.pin_table_offset;
+			}
+			break;
+
+		case ACPI_RESOURCE_NAME_PIN_GROUP:
+
+			extra_struct_bytes +=
+			    aml_resource->pin_group.vendor_offset -
+			    aml_resource->pin_group.pin_table_offset +
+			    aml_resource->pin_group.vendor_length;
+
+			break;
+
+		case ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION:
+
+			extra_struct_bytes +=
+			    aml_resource->pin_group_function.vendor_offset -
+			    aml_resource->pin_group_function.res_source_offset +
+			    aml_resource->pin_group_function.vendor_length;
+
+			break;
+
+		case ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG:
+
+			extra_struct_bytes +=
+			    aml_resource->pin_group_config.vendor_offset -
+			    aml_resource->pin_group_config.res_source_offset +
+			    aml_resource->pin_group_config.vendor_length;
+
+			break;
+
 		default:
 
 			break;
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index f4cdf8d..55fd188 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -75,6 +75,10 @@ static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
 static void
 acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
 
+static void
+acpi_rs_dump_resource_label(char *title,
+			    struct acpi_resource_label *resource_label);
+
 static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
 
 static void
@@ -371,6 +375,26 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
 								   target));
 			break;
 
+		case ACPI_RSD_LABEL:
+			/*
+			 * resource_label
+			 */
+			acpi_rs_dump_resource_label("Resource Label",
+						    ACPI_CAST_PTR(struct
+								  acpi_resource_label,
+								  target));
+			break;
+
+		case ACPI_RSD_SOURCE_LABEL:
+			/*
+			 * resource_source_label
+			 */
+			acpi_rs_dump_resource_label("Resource Source Label",
+						    ACPI_CAST_PTR(struct
+								  acpi_resource_label,
+								  target));
+			break;
+
 		default:
 
 			acpi_os_printf("**** Invalid table opcode [%X] ****\n",
@@ -414,6 +438,30 @@ acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source)
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_rs_dump_resource_label
+ *
+ * PARAMETERS:  title              - Title of the dumped resource field
+ *              resource_label     - Pointer to a Resource Label struct
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Common routine for dumping the resource_label
+ *
+ ******************************************************************************/
+
+static void
+acpi_rs_dump_resource_label(char *title,
+			    struct acpi_resource_label *resource_label)
+{
+	ACPI_FUNCTION_ENTRY();
+
+	acpi_rs_out_string(title,
+			   resource_label->string_ptr ?
+			   resource_label->string_ptr : "[Not Specified]");
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_rs_dump_address_common
  *
  * PARAMETERS:  resource        - Pointer to an internal resource descriptor
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 8aacd28..da150e1 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -314,6 +314,120 @@ struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
 	 NULL},
 };
 
+struct acpi_rsdump_info acpi_rs_dump_pin_function[10] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_pin_function),
+	 "PinFunction", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_function.revision_id),
+	 "RevisionId", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_function.pin_config), "PinConfig",
+	 acpi_gbl_ppc_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_function.sharable), "Sharing",
+	 acpi_gbl_shr_decode},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_function.function_number),
+	 "FunctionNumber", NULL},
+	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(pin_function.resource_source),
+	 "ResourceSource", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_function.pin_table_length),
+	 "PinTableLength", NULL},
+	{ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(pin_function.pin_table), "PinTable",
+	 NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_function.vendor_length),
+	 "VendorLength", NULL},
+	{ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(pin_function.vendor_data),
+	 "VendorData", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_pin_config[11] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_pin_config),
+	 "PinConfig", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_config.revision_id), "RevisionId",
+	 NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.producer_consumer),
+	 "ProducerConsumer", acpi_gbl_consume_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.sharable), "Sharing",
+	 acpi_gbl_shr_decode},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_config.pin_config_type),
+	 "PinConfigType", NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(pin_config.pin_config_value),
+	 "PinConfigValue", NULL},
+	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(pin_config.resource_source),
+	 "ResourceSource", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_config.pin_table_length),
+	 "PinTableLength", NULL},
+	{ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(pin_config.pin_table), "PinTable",
+	 NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_config.vendor_length),
+	 "VendorLength", NULL},
+	{ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(pin_config.vendor_data),
+	 "VendorData", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_pin_group[8] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_pin_group),
+	 "PinGroup", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_group.revision_id), "RevisionId",
+	 NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group.producer_consumer),
+	 "ProducerConsumer", acpi_gbl_consume_decode},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_group.pin_table_length),
+	 "PinTableLength", NULL},
+	{ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(pin_group.pin_table), "PinTable",
+	 NULL},
+	{ACPI_RSD_LABEL, ACPI_RSD_OFFSET(pin_group.resource_label),
+	 "ResourceLabel", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_group.vendor_length),
+	 "VendorLength", NULL},
+	{ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(pin_group.vendor_data),
+	 "VendorData", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_pin_group_function[9] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_pin_group_function),
+	 "PinGroupFunction", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_group_function.revision_id),
+	 "RevisionId", NULL},
+	{ACPI_RSD_1BITFLAG,
+	 ACPI_RSD_OFFSET(pin_group_function.producer_consumer),
+	 "ProducerConsumer", acpi_gbl_consume_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_function.sharable),
+	 "Sharing", acpi_gbl_shr_decode},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_group_function.function_number),
+	 "FunctionNumber", NULL},
+	{ACPI_RSD_SOURCE_LABEL,
+	 ACPI_RSD_OFFSET(pin_group_function.resource_source_label),
+	 "ResourceSourceLabel", NULL},
+	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(pin_group_function.resource_source),
+	 "ResourceSource", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_group_function.vendor_length),
+	 "VendorLength", NULL},
+	{ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(pin_group_function.vendor_data),
+	 "VendorData", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_pin_group_config[10] = {
+	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_pin_group_config),
+	 "PinGroupConfig", NULL},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_group_config.revision_id),
+	 "RevisionId", NULL},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.producer_consumer),
+	 "ProducerConsumer", acpi_gbl_consume_decode},
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.sharable),
+	 "Sharing", acpi_gbl_shr_decode},
+	{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_group_config.pin_config_type),
+	 "PinConfigType", NULL},
+	{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(pin_group_config.pin_config_value),
+	 "PinConfigValue", NULL},
+	{ACPI_RSD_SOURCE_LABEL,
+	 ACPI_RSD_OFFSET(pin_group_config.resource_source_label),
+	 "ResourceSourceLabel", NULL},
+	{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(pin_group_config.resource_source),
+	 "ResourceSource", NULL},
+	{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_group_config.vendor_length),
+	 "VendorLength", NULL},
+	{ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(pin_group_config.vendor_data),
+	 "VendorData", NULL},
+};
+
 struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
 	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
 	 "FixedDma", NULL},
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 475da9d..b0e5051 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -80,6 +80,11 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
 	acpi_rs_convert_gpio,	/* 0x11, ACPI_RESOURCE_TYPE_GPIO */
 	acpi_rs_convert_fixed_dma,	/* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */
 	NULL,			/* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */
+	acpi_rs_convert_pin_function,	/* 0x14, ACPI_RESOURCE_TYPE_PIN_FUNCTION */
+	acpi_rs_convert_pin_config,	/* 0x15, ACPI_RESOURCE_TYPE_PIN_CONFIG */
+	acpi_rs_convert_pin_group,	/* 0x16, ACPI_RESOURCE_TYPE_PIN_GROUP */
+	acpi_rs_convert_pin_group_function,	/* 0x17, ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION */
+	acpi_rs_convert_pin_group_config,	/* 0x18, ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG */
 };
 
 /* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -119,8 +124,12 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
 	acpi_rs_convert_address64,	/* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
 	acpi_rs_convert_ext_address64,	/* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
 	acpi_rs_convert_gpio,	/* 0x0C, ACPI_RESOURCE_NAME_GPIO */
-	NULL,			/* 0x0D, Reserved */
+	acpi_rs_convert_pin_function,	/* 0x0D, ACPI_RESOURCE_NAME_PIN_FUNCTION */
 	NULL,			/* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */
+	acpi_rs_convert_pin_config,	/* 0x0F, ACPI_RESOURCE_NAME_PIN_CONFIG */
+	acpi_rs_convert_pin_group,	/* 0x10, ACPI_RESOURCE_NAME_PIN_GROUP */
+	acpi_rs_convert_pin_group_function,	/* 0x11, ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION */
+	acpi_rs_convert_pin_group_config,	/* 0x12, ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG */
 };
 
 /* Subtype table for serial_bus -- I2C, SPI, and UART */
@@ -157,6 +166,11 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
 	acpi_rs_dump_gpio,	/* ACPI_RESOURCE_TYPE_GPIO */
 	acpi_rs_dump_fixed_dma,	/* ACPI_RESOURCE_TYPE_FIXED_DMA */
 	NULL,			/* ACPI_RESOURCE_TYPE_SERIAL_BUS */
+	acpi_rs_dump_pin_function,	/* ACPI_RESOURCE_TYPE_PIN_FUNCTION */
+	acpi_rs_dump_pin_config,	/* ACPI_RESOURCE_TYPE_PIN_CONFIG */
+	acpi_rs_dump_pin_group,	/* ACPI_RESOURCE_TYPE_PIN_GROUP */
+	acpi_rs_dump_pin_group_function,	/* ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION */
+	acpi_rs_dump_pin_group_config,	/* ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG */
 };
 
 struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
@@ -193,6 +207,11 @@ const u8 acpi_gbl_aml_resource_sizes[] = {
 	sizeof(struct aml_resource_gpio),	/* ACPI_RESOURCE_TYPE_GPIO */
 	sizeof(struct aml_resource_fixed_dma),	/* ACPI_RESOURCE_TYPE_FIXED_DMA */
 	sizeof(struct aml_resource_common_serialbus),	/* ACPI_RESOURCE_TYPE_SERIAL_BUS */
+	sizeof(struct aml_resource_pin_function),	/* ACPI_RESOURCE_TYPE_PIN_FUNCTION */
+	sizeof(struct aml_resource_pin_config),	/* ACPI_RESOURCE_TYPE_PIN_CONFIG */
+	sizeof(struct aml_resource_pin_group),	/* ACPI_RESOURCE_TYPE_PIN_GROUP */
+	sizeof(struct aml_resource_pin_group_function),	/* ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION */
+	sizeof(struct aml_resource_pin_group_config),	/* ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG */
 };
 
 const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -230,7 +249,12 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
 	ACPI_RS_SIZE(struct acpi_resource_address64),
 	ACPI_RS_SIZE(struct acpi_resource_extended_address64),
 	ACPI_RS_SIZE(struct acpi_resource_gpio),
-	ACPI_RS_SIZE(struct acpi_resource_common_serialbus)
+	ACPI_RS_SIZE(struct acpi_resource_pin_function),
+	ACPI_RS_SIZE(struct acpi_resource_common_serialbus),
+	ACPI_RS_SIZE(struct acpi_resource_pin_config),
+	ACPI_RS_SIZE(struct acpi_resource_pin_group),
+	ACPI_RS_SIZE(struct acpi_resource_pin_group_function),
+	ACPI_RS_SIZE(struct acpi_resource_pin_group_config),
 };
 
 const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 2ae79613..cc4b548 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -596,9 +596,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
 
 			/* Set vendor offset only if there is vendor data */
 
-			if (resource->data.gpio.vendor_length) {
-				ACPI_SET16(target, aml_length);
-			}
+			ACPI_SET16(target, aml_length);
 
 			acpi_rs_set_resource_length(aml_length, aml);
 			break;
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index c20e6d0..14d12d6 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -147,6 +147,82 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = {
 
 /*******************************************************************************
  *
+ * acpi_rs_convert_pinfunction
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_pin_function[13] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_FUNCTION,
+	 ACPI_RS_SIZE(struct acpi_resource_pin_function),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_function)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_FUNCTION,
+	 sizeof(struct aml_resource_pin_function),
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_function.revision_id),
+	 AML_OFFSET(pin_function.revision_id),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_function.sharable),
+	 AML_OFFSET(pin_function.flags),
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_function.pin_config),
+	 AML_OFFSET(pin_function.pin_config),
+	 1},
+
+	{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.pin_function.function_number),
+	 AML_OFFSET(pin_function.function_number),
+	 2},
+
+	/* Pin Table */
+
+	/*
+	 * It is OK to use GPIO operations here because none of them refer GPIO
+	 * structures directly but instead use offsets given here.
+	 */
+
+	{ACPI_RSC_COUNT_GPIO_PIN,
+	 ACPI_RS_OFFSET(data.pin_function.pin_table_length),
+	 AML_OFFSET(pin_function.pin_table_offset),
+	 AML_OFFSET(pin_function.res_source_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.pin_function.pin_table),
+	 AML_OFFSET(pin_function.pin_table_offset),
+	 0},
+
+	/* Resource Source */
+
+	{ACPI_RSC_MOVE8,
+	 ACPI_RS_OFFSET(data.pin_function.resource_source.index),
+	 AML_OFFSET(pin_function.res_source_index),
+	 1},
+
+	{ACPI_RSC_COUNT_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_function.resource_source.string_length),
+	 AML_OFFSET(pin_function.res_source_offset),
+	 AML_OFFSET(pin_function.vendor_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_function.resource_source.string_ptr),
+	 AML_OFFSET(pin_function.res_source_offset),
+	 0},
+
+	/* Vendor Data */
+
+	{ACPI_RSC_COUNT_GPIO_VEN,
+	 ACPI_RS_OFFSET(data.pin_function.vendor_length),
+	 AML_OFFSET(pin_function.vendor_length),
+	 1},
+
+	{ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_function.vendor_data),
+	 AML_OFFSET(pin_function.vendor_offset),
+	 0},
+};
+
+/*******************************************************************************
+ *
  * acpi_rs_convert_i2c_serial_bus
  *
  ******************************************************************************/
@@ -458,3 +534,300 @@ struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[23] = {
 	 AML_OFFSET(uart_serial_bus.default_baud_rate),
 	 1},
 };
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_pin_config
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_pin_config[14] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_CONFIG,
+	 ACPI_RS_SIZE(struct acpi_resource_pin_config),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_config)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_CONFIG,
+	 sizeof(struct aml_resource_pin_config),
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_config.revision_id),
+	 AML_OFFSET(pin_config.revision_id),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.sharable),
+	 AML_OFFSET(pin_config.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.producer_consumer),
+	 AML_OFFSET(pin_config.flags),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_config.pin_config_type),
+	 AML_OFFSET(pin_config.pin_config_type),
+	 1},
+
+	{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.pin_config.pin_config_value),
+	 AML_OFFSET(pin_config.pin_config_value),
+	 1},
+
+	/* Pin Table */
+
+	/*
+	 * It is OK to use GPIO operations here because none of them refer GPIO
+	 * structures directly but instead use offsets given here.
+	 */
+
+	{ACPI_RSC_COUNT_GPIO_PIN,
+	 ACPI_RS_OFFSET(data.pin_config.pin_table_length),
+	 AML_OFFSET(pin_config.pin_table_offset),
+	 AML_OFFSET(pin_config.res_source_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.pin_config.pin_table),
+	 AML_OFFSET(pin_config.pin_table_offset),
+	 0},
+
+	/* Resource Source */
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_config.resource_source.index),
+	 AML_OFFSET(pin_config.res_source_index),
+	 1},
+
+	{ACPI_RSC_COUNT_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_config.resource_source.string_length),
+	 AML_OFFSET(pin_config.res_source_offset),
+	 AML_OFFSET(pin_config.vendor_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_config.resource_source.string_ptr),
+	 AML_OFFSET(pin_config.res_source_offset),
+	 0},
+
+	/* Vendor Data */
+
+	{ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.pin_config.vendor_length),
+	 AML_OFFSET(pin_config.vendor_length),
+	 1},
+
+	{ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_config.vendor_data),
+	 AML_OFFSET(pin_config.vendor_offset),
+	 0},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_pin_group
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_pin_group[10] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_GROUP,
+	 ACPI_RS_SIZE(struct acpi_resource_pin_group),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_group)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_GROUP,
+	 sizeof(struct aml_resource_pin_group),
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group.revision_id),
+	 AML_OFFSET(pin_group.revision_id),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group.producer_consumer),
+	 AML_OFFSET(pin_group.flags),
+	 0},
+
+	/* Pin Table */
+
+	/*
+	 * It is OK to use GPIO operations here because none of them refer GPIO
+	 * structures directly but instead use offsets given here.
+	 */
+
+	{ACPI_RSC_COUNT_GPIO_PIN,
+	 ACPI_RS_OFFSET(data.pin_group.pin_table_length),
+	 AML_OFFSET(pin_group.pin_table_offset),
+	 AML_OFFSET(pin_group.label_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.pin_group.pin_table),
+	 AML_OFFSET(pin_group.pin_table_offset),
+	 0},
+
+	/* Resource Label */
+
+	{ACPI_RSC_COUNT_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group.resource_label.string_length),
+	 AML_OFFSET(pin_group.label_offset),
+	 AML_OFFSET(pin_group.vendor_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group.resource_label.string_ptr),
+	 AML_OFFSET(pin_group.label_offset),
+	 0},
+
+	/* Vendor Data */
+
+	{ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.pin_group.vendor_length),
+	 AML_OFFSET(pin_group.vendor_length),
+	 1},
+
+	{ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_group.vendor_data),
+	 AML_OFFSET(pin_group.vendor_offset),
+	 0},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_pin_group_function
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_pin_group_function[13] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION,
+	 ACPI_RS_SIZE(struct acpi_resource_pin_group_function),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_group_function)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION,
+	 sizeof(struct aml_resource_pin_group_function),
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group_function.revision_id),
+	 AML_OFFSET(pin_group_function.revision_id),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_function.sharable),
+	 AML_OFFSET(pin_group_function.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG,
+	 ACPI_RS_OFFSET(data.pin_group_function.producer_consumer),
+	 AML_OFFSET(pin_group_function.flags),
+	 1},
+
+	{ACPI_RSC_MOVE16,
+	 ACPI_RS_OFFSET(data.pin_group_function.function_number),
+	 AML_OFFSET(pin_group_function.function_number),
+	 1},
+
+	/* Resource Source */
+
+	{ACPI_RSC_MOVE8,
+	 ACPI_RS_OFFSET(data.pin_group_function.resource_source.index),
+	 AML_OFFSET(pin_group_function.res_source_index),
+	 1},
+
+	{ACPI_RSC_COUNT_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_function.resource_source.string_length),
+	 AML_OFFSET(pin_group_function.res_source_offset),
+	 AML_OFFSET(pin_group_function.res_source_label_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_function.resource_source.string_ptr),
+	 AML_OFFSET(pin_group_function.res_source_offset),
+	 0},
+
+	/* Resource Source Label */
+
+	{ACPI_RSC_COUNT_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_function.resource_source_label.
+			string_length),
+	 AML_OFFSET(pin_group_function.res_source_label_offset),
+	 AML_OFFSET(pin_group_function.vendor_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_function.resource_source_label.
+			string_ptr),
+	 AML_OFFSET(pin_group_function.res_source_label_offset),
+	 0},
+
+	/* Vendor Data */
+
+	{ACPI_RSC_COUNT_GPIO_VEN,
+	 ACPI_RS_OFFSET(data.pin_group_function.vendor_length),
+	 AML_OFFSET(pin_group_function.vendor_length),
+	 1},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_function.vendor_data),
+	 AML_OFFSET(pin_group_function.vendor_offset),
+	 0},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_pin_group_config
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_pin_group_config[14] = {
+	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG,
+	 ACPI_RS_SIZE(struct acpi_resource_pin_group_config),
+	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_group_config)},
+
+	{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG,
+	 sizeof(struct aml_resource_pin_group_config),
+	 0},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group_config.revision_id),
+	 AML_OFFSET(pin_group_config.revision_id),
+	 1},
+
+	{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_config.sharable),
+	 AML_OFFSET(pin_group_config.flags),
+	 0},
+
+	{ACPI_RSC_1BITFLAG,
+	 ACPI_RS_OFFSET(data.pin_group_config.producer_consumer),
+	 AML_OFFSET(pin_group_config.flags),
+	 1},
+
+	{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group_config.pin_config_type),
+	 AML_OFFSET(pin_group_config.pin_config_type),
+	 1},
+
+	{ACPI_RSC_MOVE32,
+	 ACPI_RS_OFFSET(data.pin_group_config.pin_config_value),
+	 AML_OFFSET(pin_group_config.pin_config_value),
+	 1},
+
+	/* Resource Source */
+
+	{ACPI_RSC_MOVE8,
+	 ACPI_RS_OFFSET(data.pin_group_config.resource_source.index),
+	 AML_OFFSET(pin_group_config.res_source_index),
+	 1},
+
+	{ACPI_RSC_COUNT_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_config.resource_source.string_length),
+	 AML_OFFSET(pin_group_config.res_source_offset),
+	 AML_OFFSET(pin_group_config.res_source_label_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_config.resource_source.string_ptr),
+	 AML_OFFSET(pin_group_config.res_source_offset),
+	 0},
+
+	/* Resource Source Label */
+
+	{ACPI_RSC_COUNT_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_config.resource_source_label.
+			string_length),
+	 AML_OFFSET(pin_group_config.res_source_label_offset),
+	 AML_OFFSET(pin_group_config.vendor_offset)},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_config.resource_source_label.string_ptr),
+	 AML_OFFSET(pin_group_config.res_source_label_offset),
+	 0},
+
+	/* Vendor Data */
+
+	{ACPI_RSC_COUNT_GPIO_VEN,
+	 ACPI_RS_OFFSET(data.pin_group_config.vendor_length),
+	 AML_OFFSET(pin_group_config.vendor_length),
+	 1},
+
+	{ACPI_RSC_MOVE_GPIO_RES,
+	 ACPI_RS_OFFSET(data.pin_group_config.vendor_data),
+	 AML_OFFSET(pin_group_config.vendor_offset),
+	 0},
+};
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 27c5c27..c9d6fa6 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -867,6 +867,8 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
 	return_ACPI_STATUS(status);
 }
 
+ACPI_EXPORT_SYMBOL(acpi_tb_install_and_load_table)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_tb_unload_table
@@ -914,3 +916,5 @@ acpi_status acpi_tb_unload_table(u32 table_index)
 	acpi_tb_set_table_loaded_flag(table_index, FALSE);
 	return_ACPI_STATUS(status);
 }
+
+ACPI_EXPORT_SYMBOL(acpi_tb_unload_table)
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 51860bf..5f051d8 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -449,8 +449,8 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
  * The 64-bit X fields are optional extensions to the original 32-bit FADT
  * V1.0 fields. Even if they are present in the FADT, they are optional and
  * are unused if the BIOS sets them to zero. Therefore, we must copy/expand
- * 32-bit V1.0 fields to the 64-bit X fields if the the 64-bit X field is
- * originally zero.
+ * 32-bit V1.0 fields to the 64-bit X fields if the 64-bit X field is originally
+ * zero.
  *
  * For ACPI 1.0 FADTs (that contain no 64-bit addresses), all 32-bit address
  * fields are expanded to the corresponding 64-bit X fields in the internal
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 5a968a7..0c6768d 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -141,9 +141,9 @@ void acpi_tb_check_dsdt_header(void)
  *
  * FUNCTION:    acpi_tb_copy_dsdt
  *
- * PARAMETERS:  table_desc          - Installed table to copy
+ * PARAMETERS:  table_index         - Index of installed table to copy
  *
- * RETURN:      None
+ * RETURN:      The copied DSDT
  *
  * DESCRIPTION: Implements a subsystem option to copy the DSDT to local memory.
  *              Some very bad BIOSs are known to either corrupt the DSDT or
@@ -239,7 +239,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
  *
  * FUNCTION:    acpi_tb_parse_root_table
  *
- * PARAMETERS:  rsdp                    - Pointer to the RSDP
+ * PARAMETERS:  rsdp_address        - Pointer to the RSDP
  *
  * RETURN:      Status
  *
@@ -416,13 +416,18 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
 		}
 	}
 
-	table_desc->validation_count++;
-	if (table_desc->validation_count == 0) {
-		ACPI_ERROR((AE_INFO,
-			    "Table %p, Validation count is zero after increment\n",
-			    table_desc));
-		table_desc->validation_count--;
-		return_ACPI_STATUS(AE_LIMIT);
+	if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
+		table_desc->validation_count++;
+
+		/*
+		 * Detect validation_count overflows to ensure that the warning
+		 * message will only be printed once.
+		 */
+		if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
+			ACPI_WARNING((AE_INFO,
+				      "Table %p, Validation count overflows\n",
+				      table_desc));
+		}
 	}
 
 	*out_table = table_desc->pointer;
@@ -449,13 +454,20 @@ void acpi_tb_put_table(struct acpi_table_desc *table_desc)
 
 	ACPI_FUNCTION_TRACE(acpi_tb_put_table);
 
-	if (table_desc->validation_count == 0) {
-		ACPI_WARNING((AE_INFO,
-			      "Table %p, Validation count is zero before decrement\n",
-			      table_desc));
-		return_VOID;
+	if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
+		table_desc->validation_count--;
+
+		/*
+		 * Detect validation_count underflows to ensure that the warning
+		 * message will only be printed once.
+		 */
+		if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
+			ACPI_WARNING((AE_INFO,
+				      "Table %p, Validation count underflows\n",
+				      table_desc));
+			return_VOID;
+		}
 	}
-	table_desc->validation_count--;
 
 	if (table_desc->validation_count == 0) {
 
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 6086830..02cd2c2 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -460,9 +460,11 @@ static const char *acpi_gbl_generic_notify[ACPI_GENERIC_NOTIFY_MAX + 1] = {
 	/* 09 */ "Device PLD Check",
 	/* 0A */ "Reserved",
 	/* 0B */ "System Locality Update",
-					/* 0C */ "Shutdown Request",
-					/* Reserved in ACPI 6.0 */
-	/* 0D */ "System Resource Affinity Update"
+								/* 0C */ "Reserved (was previously Shutdown Request)",
+								/* Reserved in ACPI 6.0 */
+	/* 0D */ "System Resource Affinity Update",
+								/* 0E */ "Heterogeneous Memory Attributes Update"
+								/* ACPI 6.2 */
 };
 
 static const char *acpi_gbl_device_notify[5] = {
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index c82399f..1b3ee74 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -104,13 +104,19 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id)
 				break;
 			}
 
-			if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
+			/*
+			 * Note: the u32 cast ensures that 1 is stored as a unsigned
+			 * integer. Omitting the cast may result in 1 being stored as an
+			 * int. Some compilers or runtime error detection may flag this as
+			 * an error.
+			 */
+			if (!(acpi_gbl_owner_id_mask[j] & ((u32)1 << k))) {
 				/*
 				 * Found a free ID. The actual ID is the bit index plus one,
 				 * making zero an invalid Owner ID. Save this as the last ID
 				 * allocated and update the global ID mask.
 				 */
-				acpi_gbl_owner_id_mask[j] |= (1 << k);
+				acpi_gbl_owner_id_mask[j] |= ((u32)1 << k);
 
 				acpi_gbl_last_owner_id_index = (u8)j;
 				acpi_gbl_next_owner_id_offset = (u8)(k + 1);
@@ -201,7 +207,7 @@ void acpi_ut_release_owner_id(acpi_owner_id *owner_id_ptr)
 	/* Decode ID to index/offset pair */
 
 	index = ACPI_DIV_32(owner_id);
-	bit = 1 << ACPI_MOD_32(owner_id);
+	bit = (u32)1 << ACPI_MOD_32(owner_id);
 
 	/* Free the owner ID only if it is valid */
 
diff --git a/drivers/acpi/acpica/utresdecode.c b/drivers/acpi/acpica/utresdecode.c
new file mode 100644
index 0000000..e15a2538
--- /dev/null
+++ b/drivers/acpi/acpica/utresdecode.c
@@ -0,0 +1,315 @@
+/*******************************************************************************
+ *
+ * Module Name: utresdecode - Resource descriptor keyword strings
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2017, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utresdecode")
+
+#if defined (ACPI_DEBUG_OUTPUT) || \
+	defined (ACPI_DISASSEMBLER) || \
+	defined (ACPI_DEBUGGER)
+/*
+ * Strings used to decode resource descriptors.
+ * Used by both the disassembler and the debugger resource dump routines
+ */
+const char *acpi_gbl_bm_decode[] = {
+	"NotBusMaster",
+	"BusMaster"
+};
+
+const char *acpi_gbl_config_decode[] = {
+	"0 - Good Configuration",
+	"1 - Acceptable Configuration",
+	"2 - Suboptimal Configuration",
+	"3 - ***Invalid Configuration***",
+};
+
+const char *acpi_gbl_consume_decode[] = {
+	"ResourceProducer",
+	"ResourceConsumer"
+};
+
+const char *acpi_gbl_dec_decode[] = {
+	"PosDecode",
+	"SubDecode"
+};
+
+const char *acpi_gbl_he_decode[] = {
+	"Level",
+	"Edge"
+};
+
+const char *acpi_gbl_io_decode[] = {
+	"Decode10",
+	"Decode16"
+};
+
+const char *acpi_gbl_ll_decode[] = {
+	"ActiveHigh",
+	"ActiveLow",
+	"ActiveBoth",
+	"Reserved"
+};
+
+const char *acpi_gbl_max_decode[] = {
+	"MaxNotFixed",
+	"MaxFixed"
+};
+
+const char *acpi_gbl_mem_decode[] = {
+	"NonCacheable",
+	"Cacheable",
+	"WriteCombining",
+	"Prefetchable"
+};
+
+const char *acpi_gbl_min_decode[] = {
+	"MinNotFixed",
+	"MinFixed"
+};
+
+const char *acpi_gbl_mtp_decode[] = {
+	"AddressRangeMemory",
+	"AddressRangeReserved",
+	"AddressRangeACPI",
+	"AddressRangeNVS"
+};
+
+const char *acpi_gbl_rng_decode[] = {
+	"InvalidRanges",
+	"NonISAOnlyRanges",
+	"ISAOnlyRanges",
+	"EntireRange"
+};
+
+const char *acpi_gbl_rw_decode[] = {
+	"ReadOnly",
+	"ReadWrite"
+};
+
+const char *acpi_gbl_shr_decode[] = {
+	"Exclusive",
+	"Shared",
+	"ExclusiveAndWake",	/* ACPI 5.0 */
+	"SharedAndWake"		/* ACPI 5.0 */
+};
+
+const char *acpi_gbl_siz_decode[] = {
+	"Transfer8",
+	"Transfer8_16",
+	"Transfer16",
+	"InvalidSize"
+};
+
+const char *acpi_gbl_trs_decode[] = {
+	"DenseTranslation",
+	"SparseTranslation"
+};
+
+const char *acpi_gbl_ttp_decode[] = {
+	"TypeStatic",
+	"TypeTranslation"
+};
+
+const char *acpi_gbl_typ_decode[] = {
+	"Compatibility",
+	"TypeA",
+	"TypeB",
+	"TypeF"
+};
+
+const char *acpi_gbl_ppc_decode[] = {
+	"PullDefault",
+	"PullUp",
+	"PullDown",
+	"PullNone"
+};
+
+const char *acpi_gbl_ior_decode[] = {
+	"IoRestrictionNone",
+	"IoRestrictionInputOnly",
+	"IoRestrictionOutputOnly",
+	"IoRestrictionNoneAndPreserve"
+};
+
+const char *acpi_gbl_dts_decode[] = {
+	"Width8bit",
+	"Width16bit",
+	"Width32bit",
+	"Width64bit",
+	"Width128bit",
+	"Width256bit",
+};
+
+/* GPIO connection type */
+
+const char *acpi_gbl_ct_decode[] = {
+	"Interrupt",
+	"I/O"
+};
+
+/* Serial bus type */
+
+const char *acpi_gbl_sbt_decode[] = {
+	"/* UNKNOWN serial bus type */",
+	"I2C",
+	"SPI",
+	"UART"
+};
+
+/* I2C serial bus access mode */
+
+const char *acpi_gbl_am_decode[] = {
+	"AddressingMode7Bit",
+	"AddressingMode10Bit"
+};
+
+/* I2C serial bus slave mode */
+
+const char *acpi_gbl_sm_decode[] = {
+	"ControllerInitiated",
+	"DeviceInitiated"
+};
+
+/* SPI serial bus wire mode */
+
+const char *acpi_gbl_wm_decode[] = {
+	"FourWireMode",
+	"ThreeWireMode"
+};
+
+/* SPI serial clock phase */
+
+const char *acpi_gbl_cph_decode[] = {
+	"ClockPhaseFirst",
+	"ClockPhaseSecond"
+};
+
+/* SPI serial bus clock polarity */
+
+const char *acpi_gbl_cpo_decode[] = {
+	"ClockPolarityLow",
+	"ClockPolarityHigh"
+};
+
+/* SPI serial bus device polarity */
+
+const char *acpi_gbl_dp_decode[] = {
+	"PolarityLow",
+	"PolarityHigh"
+};
+
+/* UART serial bus endian */
+
+const char *acpi_gbl_ed_decode[] = {
+	"LittleEndian",
+	"BigEndian"
+};
+
+/* UART serial bus bits per byte */
+
+const char *acpi_gbl_bpb_decode[] = {
+	"DataBitsFive",
+	"DataBitsSix",
+	"DataBitsSeven",
+	"DataBitsEight",
+	"DataBitsNine",
+	"/* UNKNOWN Bits per byte */",
+	"/* UNKNOWN Bits per byte */",
+	"/* UNKNOWN Bits per byte */"
+};
+
+/* UART serial bus stop bits */
+
+const char *acpi_gbl_sb_decode[] = {
+	"StopBitsZero",
+	"StopBitsOne",
+	"StopBitsOnePlusHalf",
+	"StopBitsTwo"
+};
+
+/* UART serial bus flow control */
+
+const char *acpi_gbl_fc_decode[] = {
+	"FlowControlNone",
+	"FlowControlHardware",
+	"FlowControlXON",
+	"/* UNKNOWN flow control keyword */"
+};
+
+/* UART serial bus parity type */
+
+const char *acpi_gbl_pt_decode[] = {
+	"ParityTypeNone",
+	"ParityTypeEven",
+	"ParityTypeOdd",
+	"ParityTypeMark",
+	"ParityTypeSpace",
+	"/* UNKNOWN parity keyword */",
+	"/* UNKNOWN parity keyword */",
+	"/* UNKNOWN parity keyword */"
+};
+
+/* pin_config type */
+
+const char *acpi_gbl_ptyp_decode[] = {
+	"Default",
+	"Bias Pull-up",
+	"Bias Pull-down",
+	"Bias Default",
+	"Bias Disable",
+	"Bias High Impedance",
+	"Bias Bus Hold",
+	"Drive Open Drain",
+	"Drive Open Source",
+	"Drive Push Pull",
+	"Drive Strength",
+	"Slew Rate",
+	"Input Debounce",
+	"Input Schmitt Trigger",
+};
+
+#endif
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index e0587c8..70f78a4 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -48,251 +48,6 @@
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utresrc")
 
-#if defined(ACPI_DEBUG_OUTPUT) || defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
-/*
- * Strings used to decode resource descriptors.
- * Used by both the disassembler and the debugger resource dump routines
- */
-const char *acpi_gbl_bm_decode[] = {
-	"NotBusMaster",
-	"BusMaster"
-};
-
-const char *acpi_gbl_config_decode[] = {
-	"0 - Good Configuration",
-	"1 - Acceptable Configuration",
-	"2 - Suboptimal Configuration",
-	"3 - ***Invalid Configuration***",
-};
-
-const char *acpi_gbl_consume_decode[] = {
-	"ResourceProducer",
-	"ResourceConsumer"
-};
-
-const char *acpi_gbl_dec_decode[] = {
-	"PosDecode",
-	"SubDecode"
-};
-
-const char *acpi_gbl_he_decode[] = {
-	"Level",
-	"Edge"
-};
-
-const char *acpi_gbl_io_decode[] = {
-	"Decode10",
-	"Decode16"
-};
-
-const char *acpi_gbl_ll_decode[] = {
-	"ActiveHigh",
-	"ActiveLow",
-	"ActiveBoth",
-	"Reserved"
-};
-
-const char *acpi_gbl_max_decode[] = {
-	"MaxNotFixed",
-	"MaxFixed"
-};
-
-const char *acpi_gbl_mem_decode[] = {
-	"NonCacheable",
-	"Cacheable",
-	"WriteCombining",
-	"Prefetchable"
-};
-
-const char *acpi_gbl_min_decode[] = {
-	"MinNotFixed",
-	"MinFixed"
-};
-
-const char *acpi_gbl_mtp_decode[] = {
-	"AddressRangeMemory",
-	"AddressRangeReserved",
-	"AddressRangeACPI",
-	"AddressRangeNVS"
-};
-
-const char *acpi_gbl_rng_decode[] = {
-	"InvalidRanges",
-	"NonISAOnlyRanges",
-	"ISAOnlyRanges",
-	"EntireRange"
-};
-
-const char *acpi_gbl_rw_decode[] = {
-	"ReadOnly",
-	"ReadWrite"
-};
-
-const char *acpi_gbl_shr_decode[] = {
-	"Exclusive",
-	"Shared",
-	"ExclusiveAndWake",	/* ACPI 5.0 */
-	"SharedAndWake"		/* ACPI 5.0 */
-};
-
-const char *acpi_gbl_siz_decode[] = {
-	"Transfer8",
-	"Transfer8_16",
-	"Transfer16",
-	"InvalidSize"
-};
-
-const char *acpi_gbl_trs_decode[] = {
-	"DenseTranslation",
-	"SparseTranslation"
-};
-
-const char *acpi_gbl_ttp_decode[] = {
-	"TypeStatic",
-	"TypeTranslation"
-};
-
-const char *acpi_gbl_typ_decode[] = {
-	"Compatibility",
-	"TypeA",
-	"TypeB",
-	"TypeF"
-};
-
-const char *acpi_gbl_ppc_decode[] = {
-	"PullDefault",
-	"PullUp",
-	"PullDown",
-	"PullNone"
-};
-
-const char *acpi_gbl_ior_decode[] = {
-	"IoRestrictionNone",
-	"IoRestrictionInputOnly",
-	"IoRestrictionOutputOnly",
-	"IoRestrictionNoneAndPreserve"
-};
-
-const char *acpi_gbl_dts_decode[] = {
-	"Width8bit",
-	"Width16bit",
-	"Width32bit",
-	"Width64bit",
-	"Width128bit",
-	"Width256bit",
-};
-
-/* GPIO connection type */
-
-const char *acpi_gbl_ct_decode[] = {
-	"Interrupt",
-	"I/O"
-};
-
-/* Serial bus type */
-
-const char *acpi_gbl_sbt_decode[] = {
-	"/* UNKNOWN serial bus type */",
-	"I2C",
-	"SPI",
-	"UART"
-};
-
-/* I2C serial bus access mode */
-
-const char *acpi_gbl_am_decode[] = {
-	"AddressingMode7Bit",
-	"AddressingMode10Bit"
-};
-
-/* I2C serial bus slave mode */
-
-const char *acpi_gbl_sm_decode[] = {
-	"ControllerInitiated",
-	"DeviceInitiated"
-};
-
-/* SPI serial bus wire mode */
-
-const char *acpi_gbl_wm_decode[] = {
-	"FourWireMode",
-	"ThreeWireMode"
-};
-
-/* SPI serial clock phase */
-
-const char *acpi_gbl_cph_decode[] = {
-	"ClockPhaseFirst",
-	"ClockPhaseSecond"
-};
-
-/* SPI serial bus clock polarity */
-
-const char *acpi_gbl_cpo_decode[] = {
-	"ClockPolarityLow",
-	"ClockPolarityHigh"
-};
-
-/* SPI serial bus device polarity */
-
-const char *acpi_gbl_dp_decode[] = {
-	"PolarityLow",
-	"PolarityHigh"
-};
-
-/* UART serial bus endian */
-
-const char *acpi_gbl_ed_decode[] = {
-	"LittleEndian",
-	"BigEndian"
-};
-
-/* UART serial bus bits per byte */
-
-const char *acpi_gbl_bpb_decode[] = {
-	"DataBitsFive",
-	"DataBitsSix",
-	"DataBitsSeven",
-	"DataBitsEight",
-	"DataBitsNine",
-	"/* UNKNOWN Bits per byte */",
-	"/* UNKNOWN Bits per byte */",
-	"/* UNKNOWN Bits per byte */"
-};
-
-/* UART serial bus stop bits */
-
-const char *acpi_gbl_sb_decode[] = {
-	"StopBitsZero",
-	"StopBitsOne",
-	"StopBitsOnePlusHalf",
-	"StopBitsTwo"
-};
-
-/* UART serial bus flow control */
-
-const char *acpi_gbl_fc_decode[] = {
-	"FlowControlNone",
-	"FlowControlHardware",
-	"FlowControlXON",
-	"/* UNKNOWN flow control keyword */"
-};
-
-/* UART serial bus parity type */
-
-const char *acpi_gbl_pt_decode[] = {
-	"ParityTypeNone",
-	"ParityTypeEven",
-	"ParityTypeOdd",
-	"ParityTypeMark",
-	"ParityTypeSpace",
-	"/* UNKNOWN parity keyword */",
-	"/* UNKNOWN parity keyword */",
-	"/* UNKNOWN parity keyword */"
-};
-
-#endif
-
 /*
  * Base sizes of the raw AML resource descriptors, indexed by resource type.
  * Zero indicates a reserved (and therefore invalid) resource type.
@@ -332,8 +87,12 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
 	ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
 	ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
 	ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
-	0,
+	ACPI_AML_SIZE_LARGE(struct aml_resource_pin_function),
 	ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_pin_config),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_pin_group),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_pin_group_function),
+	ACPI_AML_SIZE_LARGE(struct aml_resource_pin_group_config),
 };
 
 const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
@@ -384,8 +143,12 @@ static const u8 acpi_gbl_resource_types[] = {
 	ACPI_VARIABLE_LENGTH,	/* 0A Qword* address */
 	ACPI_FIXED_LENGTH,	/* 0B Extended* address */
 	ACPI_VARIABLE_LENGTH,	/* 0C Gpio* */
-	0,
-	ACPI_VARIABLE_LENGTH	/* 0E *serial_bus */
+	ACPI_VARIABLE_LENGTH,	/* 0D pin_function */
+	ACPI_VARIABLE_LENGTH,	/* 0E *serial_bus */
+	ACPI_VARIABLE_LENGTH,	/* 0F pin_config */
+	ACPI_VARIABLE_LENGTH,	/* 10 pin_group */
+	ACPI_VARIABLE_LENGTH,	/* 11 pin_group_function */
+	ACPI_VARIABLE_LENGTH,	/* 12 pin_group_config */
 };
 
 /*******************************************************************************
@@ -474,15 +237,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
 				return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
 			}
 
-			/*
-			 * The end_tag opcode must be followed by a zero byte.
-			 * Although this byte is technically defined to be a checksum,
-			 * in practice, all ASL compilers set this byte to zero.
-			 */
-			if (*(aml + 1) != 0) {
-				return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-			}
-
 			/* Return the pointer to the end_tag if requested */
 
 			if (!user_function) {
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index c016211..0b85f11 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -151,6 +151,8 @@ acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
 	return (status);
 }
 
+ACPI_EXPORT_SYMBOL(acpi_acquire_mutex)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_release_mutex
@@ -167,7 +169,6 @@ acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
  *              not both.
  *
  ******************************************************************************/
-
 acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
 {
 	acpi_status status;
@@ -185,3 +186,5 @@ acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
 	acpi_os_release_mutex(mutex_obj->mutex.os_mutex);
 	return (AE_OK);
 }
+
+ACPI_EXPORT_SYMBOL(acpi_release_mutex)
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index d0855c0..0968816 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -89,14 +89,14 @@ bool ghes_disable;
 module_param_named(disable, ghes_disable, bool, 0);
 
 /*
- * All error sources notified with SCI shares one notifier function,
- * so they need to be linked and checked one by one.  This is applied
- * to NMI too.
+ * All error sources notified with HED (Hardware Error Device) share a
+ * single notifier callback, so they need to be linked and checked one
+ * by one. This holds true for NMI too.
  *
  * RCU is used for these lists, so ghes_list_mutex is only used for
  * list changing, not for traversing.
  */
-static LIST_HEAD(ghes_sci);
+static LIST_HEAD(ghes_hed);
 static DEFINE_MUTEX(ghes_list_mutex);
 
 /*
@@ -431,12 +431,13 @@ static void ghes_do_proc(struct ghes *ghes,
 {
 	int sev, sec_sev;
 	struct acpi_hest_generic_data *gdata;
+	guid_t *sec_type;
 
 	sev = ghes_severity(estatus->error_severity);
 	apei_estatus_for_each_section(estatus, gdata) {
+		sec_type = (guid_t *)gdata->section_type;
 		sec_sev = ghes_severity(gdata->error_severity);
-		if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
-				 CPER_SEC_PLATFORM_MEM)) {
+		if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
 			struct cper_sec_mem_err *mem_err;
 			mem_err = (struct cper_sec_mem_err *)(gdata+1);
 			ghes_edac_report_mem_error(ghes, sev, mem_err);
@@ -445,8 +446,7 @@ static void ghes_do_proc(struct ghes *ghes,
 			ghes_handle_memory_failure(gdata, sev);
 		}
 #ifdef CONFIG_ACPI_APEI_PCIEAER
-		else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
-				      CPER_SEC_PCIE)) {
+		else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
 			struct cper_sec_pcie *pcie_err;
 			pcie_err = (struct cper_sec_pcie *)(gdata+1);
 			if (sev == GHES_SEV_RECOVERABLE &&
@@ -702,14 +702,14 @@ static irqreturn_t ghes_irq_func(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static int ghes_notify_sci(struct notifier_block *this,
-				  unsigned long event, void *data)
+static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
+			   void *data)
 {
 	struct ghes *ghes;
 	int ret = NOTIFY_DONE;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(ghes, &ghes_sci, list) {
+	list_for_each_entry_rcu(ghes, &ghes_hed, list) {
 		if (!ghes_proc(ghes))
 			ret = NOTIFY_OK;
 	}
@@ -718,8 +718,8 @@ static int ghes_notify_sci(struct notifier_block *this,
 	return ret;
 }
 
-static struct notifier_block ghes_notifier_sci = {
-	.notifier_call = ghes_notify_sci,
+static struct notifier_block ghes_notifier_hed = {
+	.notifier_call = ghes_notify_hed,
 };
 
 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
@@ -966,7 +966,10 @@ static int ghes_probe(struct platform_device *ghes_dev)
 	case ACPI_HEST_NOTIFY_POLLED:
 	case ACPI_HEST_NOTIFY_EXTERNAL:
 	case ACPI_HEST_NOTIFY_SCI:
+	case ACPI_HEST_NOTIFY_GSIV:
+	case ACPI_HEST_NOTIFY_GPIO:
 		break;
+
 	case ACPI_HEST_NOTIFY_NMI:
 		if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
@@ -1024,13 +1027,17 @@ static int ghes_probe(struct platform_device *ghes_dev)
 			goto err_edac_unreg;
 		}
 		break;
+
 	case ACPI_HEST_NOTIFY_SCI:
+	case ACPI_HEST_NOTIFY_GSIV:
+	case ACPI_HEST_NOTIFY_GPIO:
 		mutex_lock(&ghes_list_mutex);
-		if (list_empty(&ghes_sci))
-			register_acpi_hed_notifier(&ghes_notifier_sci);
-		list_add_rcu(&ghes->list, &ghes_sci);
+		if (list_empty(&ghes_hed))
+			register_acpi_hed_notifier(&ghes_notifier_hed);
+		list_add_rcu(&ghes->list, &ghes_hed);
 		mutex_unlock(&ghes_list_mutex);
 		break;
+
 	case ACPI_HEST_NOTIFY_NMI:
 		ghes_nmi_add(ghes);
 		break;
@@ -1066,14 +1073,18 @@ static int ghes_remove(struct platform_device *ghes_dev)
 	case ACPI_HEST_NOTIFY_EXTERNAL:
 		free_irq(ghes->irq, ghes);
 		break;
+
 	case ACPI_HEST_NOTIFY_SCI:
+	case ACPI_HEST_NOTIFY_GSIV:
+	case ACPI_HEST_NOTIFY_GPIO:
 		mutex_lock(&ghes_list_mutex);
 		list_del_rcu(&ghes->list);
-		if (list_empty(&ghes_sci))
-			unregister_acpi_hed_notifier(&ghes_notifier_sci);
+		if (list_empty(&ghes_hed))
+			unregister_acpi_hed_notifier(&ghes_notifier_hed);
 		mutex_unlock(&ghes_list_mutex);
 		synchronize_rcu();
 		break;
+
 	case ACPI_HEST_NOTIFY_NMI:
 		ghes_nmi_remove(ghes);
 		break;
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index c5fecf9..797b28d 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -666,14 +666,6 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
 	int ret = -ENODEV;
 	struct fwnode_handle *iort_fwnode;
 
-	/*
-	 * If we already translated the fwspec there
-	 * is nothing left to do, return the iommu_ops.
-	 */
-	ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
-	if (ops)
-		return ops;
-
 	if (node) {
 		iort_fwnode = iort_get_fwnode(node);
 		if (!iort_fwnode)
@@ -735,6 +727,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
 	u32 streamid = 0;
 	int err;
 
+	/*
+	 * If we already translated the fwspec there
+	 * is nothing left to do, return the iommu_ops.
+	 */
+	ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+	if (ops)
+		return ops;
+
 	if (dev_is_pci(dev)) {
 		struct pci_bus *bus = to_pci_dev(dev)->bus;
 		u32 rid;
@@ -782,6 +782,12 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
 	if (err)
 		ops = ERR_PTR(err);
 
+	/* Ignore all other errors apart from EPROBE_DEFER */
+	if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
+		dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+		ops = NULL;
+	}
+
 	return ops;
 }
 
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index a9a9ab3..1cbb88d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
 	if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
 	    (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
             (battery->capacity_now <= battery->alarm)))
-		pm_wakeup_hard_event(&battery->device->dev);
+		acpi_pm_wakeup_event(&battery->device->dev);
 
 	return result;
 }
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 784bda6..5a6fbe0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -196,42 +196,19 @@ static void acpi_print_osc_error(acpi_handle handle,
 	pr_debug("\n");
 }
 
-acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
-{
-	int i;
-	static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
-		24, 26, 28, 30, 32, 34};
-
-	if (strlen(str) != 36)
-		return AE_BAD_PARAMETER;
-	for (i = 0; i < 36; i++) {
-		if (i == 8 || i == 13 || i == 18 || i == 23) {
-			if (str[i] != '-')
-				return AE_BAD_PARAMETER;
-		} else if (!isxdigit(str[i]))
-			return AE_BAD_PARAMETER;
-	}
-	for (i = 0; i < 16; i++) {
-		uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
-		uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
-	}
-	return AE_OK;
-}
-EXPORT_SYMBOL_GPL(acpi_str_to_uuid);
-
 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
 {
 	acpi_status status;
 	struct acpi_object_list input;
 	union acpi_object in_params[4];
 	union acpi_object *out_obj;
-	u8 uuid[16];
+	guid_t guid;
 	u32 errors;
 	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 
 	if (!context)
 		return AE_ERROR;
-	if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
+	if (guid_parse(context->uuid_str, &guid))
 		return AE_ERROR;
 	context->ret.length = ACPI_ALLOCATE_BUFFER;
 	context->ret.pointer = NULL;
@@ -241,7 +218,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
 	input.pointer = in_params;
 	in_params[0].type 		= ACPI_TYPE_BUFFER;
 	in_params[0].buffer.length 	= 16;
-	in_params[0].buffer.pointer	= uuid;
+	in_params[0].buffer.pointer	= (u8 *)&guid;
 	in_params[1].type 		= ACPI_TYPE_INTEGER;
 	in_params[1].integer.value 	= context->rev;
 	in_params[2].type 		= ACPI_TYPE_INTEGER;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 25aba9b..ef1856b 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -19,7 +19,7 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
-#define pr_fmt(fmt) "ACPI : button: " fmt
+#define pr_fmt(fmt) "ACPI: button: " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -113,7 +113,7 @@ struct acpi_button {
 
 static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
 static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
 
 static unsigned long lid_report_interval __read_mostly = 500;
 module_param(lid_report_interval, ulong, 0644);
@@ -217,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
 	}
 
 	if (state)
-		pm_wakeup_hard_event(&device->dev);
+		acpi_pm_wakeup_event(&device->dev);
 
 	ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
 	if (ret == NOTIFY_DONE)
@@ -402,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
 		} else {
 			int keycode;
 
-			pm_wakeup_hard_event(&device->dev);
+			acpi_pm_wakeup_event(&device->dev);
 			if (button->suspended)
 				break;
 
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 798d500..28938b5 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -386,6 +386,12 @@ EXPORT_SYMBOL(acpi_bus_power_manageable);
 #ifdef CONFIG_PM
 static DEFINE_MUTEX(acpi_pm_notifier_lock);
 
+void acpi_pm_wakeup_event(struct device *dev)
+{
+	pm_wakeup_dev_event(dev, 0, acpi_s2idle_wakeup());
+}
+EXPORT_SYMBOL_GPL(acpi_pm_wakeup_event);
+
 static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
 {
 	struct acpi_device *adev;
@@ -400,9 +406,9 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
 	mutex_lock(&acpi_pm_notifier_lock);
 
 	if (adev->wakeup.flags.notifier_present) {
-		pm_wakeup_ws_event(adev->wakeup.ws, 0, true);
-		if (adev->wakeup.context.work.func)
-			queue_pm_work(&adev->wakeup.context.work);
+		pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
+		if (adev->wakeup.context.func)
+			adev->wakeup.context.func(&adev->wakeup.context);
 	}
 
 	mutex_unlock(&acpi_pm_notifier_lock);
@@ -414,7 +420,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
  * acpi_add_pm_notifier - Register PM notify handler for given ACPI device.
  * @adev: ACPI device to add the notify handler for.
  * @dev: Device to generate a wakeup event for while handling the notification.
- * @work_func: Work function to execute when handling the notification.
+ * @func: Work function to execute when handling the notification.
  *
  * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
  * PM wakeup events.  For example, wakeup events may be generated for bridges
@@ -422,11 +428,11 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
  * bridge itself doesn't have a wakeup GPE associated with it.
  */
 acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
-				 void (*work_func)(struct work_struct *work))
+			void (*func)(struct acpi_device_wakeup_context *context))
 {
 	acpi_status status = AE_ALREADY_EXISTS;
 
-	if (!dev && !work_func)
+	if (!dev && !func)
 		return AE_BAD_PARAMETER;
 
 	mutex_lock(&acpi_pm_notifier_lock);
@@ -436,8 +442,7 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
 
 	adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
 	adev->wakeup.context.dev = dev;
-	if (work_func)
-		INIT_WORK(&adev->wakeup.context.work, work_func);
+	adev->wakeup.context.func = func;
 
 	status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
 					     acpi_pm_notify_handler, NULL);
@@ -470,10 +475,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
 	if (ACPI_FAILURE(status))
 		goto out;
 
-	if (adev->wakeup.context.work.func) {
-		cancel_work_sync(&adev->wakeup.context.work);
-		adev->wakeup.context.work.func = NULL;
-	}
+	adev->wakeup.context.func = NULL;
 	adev->wakeup.context.dev = NULL;
 	wakeup_source_unregister(adev->wakeup.ws);
 
@@ -494,6 +496,13 @@ bool acpi_bus_can_wakeup(acpi_handle handle)
 }
 EXPORT_SYMBOL(acpi_bus_can_wakeup);
 
+bool acpi_pm_device_can_wakeup(struct device *dev)
+{
+	struct acpi_device *adev = ACPI_COMPANION(dev);
+
+	return adev ? acpi_device_can_wakeup(adev) : false;
+}
+
 /**
  * acpi_dev_pm_get_state - Get preferred power state of ACPI device.
  * @dev: Device whose preferred target power state to return.
@@ -659,16 +668,15 @@ EXPORT_SYMBOL(acpi_pm_device_sleep_state);
 
 /**
  * acpi_pm_notify_work_func - ACPI devices wakeup notification work function.
- * @work: Work item to handle.
+ * @context: Device wakeup context.
  */
-static void acpi_pm_notify_work_func(struct work_struct *work)
+static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
 {
-	struct device *dev;
+	struct device *dev = context->dev;
 
-	dev = container_of(work, struct acpi_device_wakeup_context, work)->dev;
 	if (dev) {
 		pm_wakeup_event(dev, 0);
-		pm_runtime_resume(dev);
+		pm_request_resume(dev);
 	}
 }
 
@@ -694,80 +702,53 @@ static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
 		acpi_status res;
 		int error;
 
+		if (adev->wakeup.flags.enabled)
+			return 0;
+
 		error = acpi_enable_wakeup_device_power(adev, target_state);
 		if (error)
 			return error;
 
-		if (adev->wakeup.flags.enabled)
-			return 0;
-
 		res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
-		if (ACPI_SUCCESS(res)) {
-			adev->wakeup.flags.enabled = 1;
-		} else {
+		if (ACPI_FAILURE(res)) {
 			acpi_disable_wakeup_device_power(adev);
 			return -EIO;
 		}
-	} else {
-		if (adev->wakeup.flags.enabled) {
-			acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
-			adev->wakeup.flags.enabled = 0;
-		}
+		adev->wakeup.flags.enabled = 1;
+	} else if (adev->wakeup.flags.enabled) {
+		acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
 		acpi_disable_wakeup_device_power(adev);
+		adev->wakeup.flags.enabled = 0;
 	}
 	return 0;
 }
 
 /**
- * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device.
- * @dev: Device to enable/disable the platform to wake up.
+ * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
+ * @dev: Device to enable/disable to generate wakeup events.
  * @enable: Whether to enable or disable the wakeup functionality.
  */
-int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
-{
-	struct acpi_device *adev;
-
-	if (!device_run_wake(phys_dev))
-		return -EINVAL;
-
-	adev = ACPI_COMPANION(phys_dev);
-	if (!adev) {
-		dev_dbg(phys_dev, "ACPI companion missing in %s!\n", __func__);
-		return -ENODEV;
-	}
-
-	return acpi_device_wakeup(adev, ACPI_STATE_S0, enable);
-}
-EXPORT_SYMBOL(acpi_pm_device_run_wake);
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system.
- * @dev: Device to enable/desible to wake up the system from sleep states.
- * @enable: Whether to enable or disable @dev to wake up the system.
- */
-int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
+int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
 {
 	struct acpi_device *adev;
 	int error;
 
-	if (!device_can_wakeup(dev))
-		return -EINVAL;
-
 	adev = ACPI_COMPANION(dev);
 	if (!adev) {
 		dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
 		return -ENODEV;
 	}
 
+	if (!acpi_device_can_wakeup(adev))
+		return -EINVAL;
+
 	error = acpi_device_wakeup(adev, acpi_target_system_state(), enable);
 	if (!error)
-		dev_info(dev, "System wakeup %s by ACPI\n",
-				enable ? "enabled" : "disabled");
+		dev_dbg(dev, "Wakeup %s by ACPI\n", enable ? "enabled" : "disabled");
 
 	return error;
 }
-#endif /* CONFIG_PM_SLEEP */
+EXPORT_SYMBOL(acpi_pm_set_device_wakeup);
 
 /**
  * acpi_dev_pm_low_power - Put ACPI device into a low-power state.
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index c24235d..854d428 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -27,7 +27,7 @@
 
 /* Uncomment next line to get verbose printout */
 /* #define DEBUG */
-#define pr_fmt(fmt) "ACPI : EC: " fmt
+#define pr_fmt(fmt) "ACPI: EC: " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -190,6 +190,7 @@ static struct workqueue_struct *ec_query_wq;
 
 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
 static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
+static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
 
 /* --------------------------------------------------------------------------
  *                           Logging/Debugging
@@ -316,7 +317,7 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
 	ec->timestamp = jiffies;
 }
 
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
 static const char *acpi_ec_cmd_string(u8 cmd)
 {
 	switch (cmd) {
@@ -459,8 +460,10 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
 
 static void acpi_ec_submit_query(struct acpi_ec *ec)
 {
-	if (acpi_ec_event_enabled(ec) &&
-	    !test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
+	acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+	if (!acpi_ec_event_enabled(ec))
+		return;
+	if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
 		ec_dbg_evt("Command(%s) submitted/blocked",
 			   acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
 		ec->nr_pending_queries++;
@@ -470,11 +473,10 @@ static void acpi_ec_submit_query(struct acpi_ec *ec)
 
 static void acpi_ec_complete_query(struct acpi_ec *ec)
 {
-	if (test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
-		clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+	if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
 		ec_dbg_evt("Command(%s) unblocked",
 			   acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
-	}
+	acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
 }
 
 static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
@@ -1362,13 +1364,23 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
 				     ec_parse_io_ports, ec);
 	if (ACPI_FAILURE(status))
 		return status;
+	if (ec->data_addr == 0 || ec->command_addr == 0)
+		return AE_OK;
 
-	/* Get GPE bit assignment (EC events). */
-	/* TODO: Add support for _GPE returning a package */
-	status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
-	if (ACPI_FAILURE(status))
-		return status;
-	ec->gpe = tmp;
+	if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
+		/*
+		 * Always inherit the GPE number setting from the ECDT
+		 * EC.
+		 */
+		ec->gpe = boot_ec->gpe;
+	} else {
+		/* Get GPE bit assignment (EC events). */
+		/* TODO: Add support for _GPE returning a package */
+		status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+		if (ACPI_FAILURE(status))
+			return status;
+		ec->gpe = tmp;
+	}
 	/* Use the global lock for all EC transactions? */
 	tmp = 0;
 	acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
@@ -1665,12 +1677,26 @@ static const struct acpi_device_id ec_device_ids[] = {
 	{"", 0},
 };
 
+/*
+ * This function is not Windows-compatible as Windows never enumerates the
+ * namespace EC before the main ACPI device enumeration process. It is
+ * retained for historical reason and will be deprecated in the future.
+ */
 int __init acpi_ec_dsdt_probe(void)
 {
 	acpi_status status;
 	struct acpi_ec *ec;
 	int ret;
 
+	/*
+	 * If a platform has ECDT, there is no need to proceed as the
+	 * following probe is not a part of the ACPI device enumeration,
+	 * executing _STA is not safe, and thus this probe may risk of
+	 * picking up an invalid EC device.
+	 */
+	if (boot_ec)
+		return -ENODEV;
+
 	ec = acpi_ec_alloc();
 	if (!ec)
 		return -ENOMEM;
@@ -1753,11 +1779,43 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
 	return 0;
 }
 
+/*
+ * Some DSDTs contain wrong GPE setting.
+ * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
+ * https://bugzilla.kernel.org/show_bug.cgi?id=195651
+ */
+static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
+{
+	pr_debug("Detected system needing ignore DSDT GPE setting.\n");
+	EC_FLAGS_IGNORE_DSDT_GPE = 1;
+	return 0;
+}
+
 static struct dmi_system_id ec_dmi_table[] __initdata = {
 	{
 	ec_correct_ecdt, "MSI MS-171F", {
 	DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
 	DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
+	{
+	ec_honor_ecdt_gpe, "ASUS FX502VD", {
+	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+	DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
+	{
+	ec_honor_ecdt_gpe, "ASUS FX502VE", {
+	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+	DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
+	{
+	ec_honor_ecdt_gpe, "ASUS GL702VMK", {
+	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+	DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
+	{
+	ec_honor_ecdt_gpe, "ASUS X550VXK", {
+	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+	DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
+	{
+	ec_honor_ecdt_gpe, "ASUS X580VD", {
+	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+	DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
 	{},
 };
 
@@ -1835,7 +1893,7 @@ static int acpi_ec_suspend(struct device *dev)
 	struct acpi_ec *ec =
 		acpi_driver_data(to_acpi_device(dev));
 
-	if (ec_freeze_events)
+	if (acpi_sleep_no_ec_events() && ec_freeze_events)
 		acpi_ec_disable_event(ec);
 	return 0;
 }
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 66229ff..be79f7d 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -198,8 +198,12 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
                                   Suspend/Resume
   -------------------------------------------------------------------------- */
 #ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT
+extern bool acpi_s2idle_wakeup(void);
+extern bool acpi_sleep_no_ec_events(void);
 extern int acpi_sleep_init(void);
 #else
+static inline bool acpi_s2idle_wakeup(void) { return false; }
+static inline bool acpi_sleep_no_ec_events(void) { return true; }
 static inline int acpi_sleep_init(void) { return -ENXIO; }
 #endif
 
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index 7e4fbf9..3595aa9 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -21,7 +21,7 @@
  * registered when we parsed the ACPI MADT.
  */
 
-#define pr_fmt(fmt) "ACPI : IOAPIC: " fmt
+#define pr_fmt(fmt) "ACPI: IOAPIC: " fmt
 
 #include <linux/slab.h>
 #include <linux/acpi.h>
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 656acb5..097eff0 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -74,11 +74,11 @@ struct nfit_table_prev {
 	struct list_head flushes;
 };
 
-static u8 nfit_uuid[NFIT_UUID_MAX][16];
+static guid_t nfit_uuid[NFIT_UUID_MAX];
 
-const u8 *to_nfit_uuid(enum nfit_uuids id)
+const guid_t *to_nfit_uuid(enum nfit_uuids id)
 {
-	return nfit_uuid[id];
+	return &nfit_uuid[id];
 }
 EXPORT_SYMBOL(to_nfit_uuid);
 
@@ -222,7 +222,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
 	u32 offset, fw_status = 0;
 	acpi_handle handle;
 	unsigned int func;
-	const u8 *uuid;
+	const guid_t *guid;
 	int rc, i;
 
 	func = cmd;
@@ -245,7 +245,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
 		cmd_mask = nvdimm_cmd_mask(nvdimm);
 		dsm_mask = nfit_mem->dsm_mask;
 		desc = nd_cmd_dimm_desc(cmd);
-		uuid = to_nfit_uuid(nfit_mem->family);
+		guid = to_nfit_uuid(nfit_mem->family);
 		handle = adev->handle;
 	} else {
 		struct acpi_device *adev = to_acpi_dev(acpi_desc);
@@ -254,7 +254,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
 		cmd_mask = nd_desc->cmd_mask;
 		dsm_mask = cmd_mask;
 		desc = nd_cmd_bus_desc(cmd);
-		uuid = to_nfit_uuid(NFIT_DEV_BUS);
+		guid = to_nfit_uuid(NFIT_DEV_BUS);
 		handle = adev->handle;
 		dimm_name = "bus";
 	}
@@ -289,7 +289,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
 			in_buf.buffer.pointer,
 			min_t(u32, 256, in_buf.buffer.length), true);
 
-	out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
+	out_obj = acpi_evaluate_dsm(handle, guid, 1, func, &in_obj);
 	if (!out_obj) {
 		dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
 				cmd_name);
@@ -409,7 +409,7 @@ int nfit_spa_type(struct acpi_nfit_system_address *spa)
 	int i;
 
 	for (i = 0; i < NFIT_UUID_MAX; i++)
-		if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
+		if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
 			return i;
 	return -1;
 }
@@ -1415,7 +1415,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
 	struct acpi_device *adev, *adev_dimm;
 	struct device *dev = acpi_desc->dev;
 	unsigned long dsm_mask;
-	const u8 *uuid;
+	const guid_t *guid;
 	int i;
 	int family = -1;
 
@@ -1444,7 +1444,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
 	/*
 	 * Until standardization materializes we need to consider 4
 	 * different command sets.  Note, that checking for function0 (bit0)
-	 * tells us if any commands are reachable through this uuid.
+	 * tells us if any commands are reachable through this GUID.
 	 */
 	for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
 		if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
@@ -1474,9 +1474,9 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
 		return 0;
 	}
 
-	uuid = to_nfit_uuid(nfit_mem->family);
+	guid = to_nfit_uuid(nfit_mem->family);
 	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
-		if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
+		if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
 			set_bit(i, &nfit_mem->dsm_mask);
 
 	return 0;
@@ -1611,7 +1611,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
 {
 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
-	const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
+	const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
 	struct acpi_device *adev;
 	int i;
 
@@ -1621,7 +1621,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
 		return;
 
 	for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
-		if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
+		if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
 			set_bit(i, &nd_desc->cmd_mask);
 }
 
@@ -3051,19 +3051,19 @@ static __init int nfit_init(void)
 	BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
 	BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
 
-	acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
-	acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
-	acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
-	acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
-	acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
-	acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
-	acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
-	acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
-	acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
-	acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
-	acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
-	acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
-	acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
+	guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
+	guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
+	guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
+	guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
+	guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
+	guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
+	guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
+	guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
+	guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
+	guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
+	guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
+	guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
+	guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
 
 	nfit_wq = create_singlethread_workqueue("nfit");
 	if (!nfit_wq)
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 58fb7d6..29bdd95 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -18,7 +18,6 @@
 #include <linux/libnvdimm.h>
 #include <linux/ndctl.h>
 #include <linux/types.h>
-#include <linux/uuid.h>
 #include <linux/acpi.h>
 #include <acpi/acuuid.h>
 
@@ -237,7 +236,7 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
 	return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
 }
 
-const u8 *to_nfit_uuid(enum nfit_uuids id);
+const guid_t *to_nfit_uuid(enum nfit_uuids id);
 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
 void acpi_nfit_shutdown(void *data);
 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 919be0a..9eec309 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -523,7 +523,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
 	struct acpi_pci_root *root;
 	acpi_handle handle = device->handle;
 	int no_aspm = 0;
-	bool hotadd = system_state != SYSTEM_BOOTING;
+	bool hotadd = system_state == SYSTEM_RUNNING;
 
 	root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
 	if (!root)
@@ -608,8 +608,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
 		pcie_no_aspm();
 
 	pci_acpi_add_bus_pm_notifier(device);
-	if (device->wakeup.flags.run_wake)
-		device_set_run_wake(root->bus->bridge, true);
+	device_set_wakeup_capable(root->bus->bridge, device->wakeup.flags.valid);
 
 	if (hotadd) {
 		pcibios_resource_survey_bus(root->bus);
@@ -649,7 +648,7 @@ static void acpi_pci_root_remove(struct acpi_device *device)
 	pci_stop_root_bus(root->bus);
 
 	pci_ioapic_remove(root);
-	device_set_run_wake(root->bus->bridge, false);
+	device_set_wakeup_capable(root->bus->bridge, false);
 	pci_acpi_remove_bus_pm_notifier(device);
 
 	pci_remove_root_bus(root->bus);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 1a76c78..3b7d5be 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -21,6 +21,11 @@
 #include "intel_pmic.h"
 
 #define XPOWER_GPADC_LOW	0x5b
+#define XPOWER_GPI1_CTRL	0x92
+
+#define GPI1_LDO_MASK		GENMASK(2, 0)
+#define GPI1_LDO_ON		(3 << 0)
+#define GPI1_LDO_OFF		(4 << 0)
 
 static struct pmic_table power_table[] = {
 	{
@@ -118,6 +123,10 @@ static struct pmic_table power_table[] = {
 		.reg = 0x10,
 		.bit = 0x00
 	}, /* BUC6 */
+	{
+		.address = 0x4c,
+		.reg = 0x92,
+	}, /* GPI1 */
 };
 
 /* TMP0 - TMP5 are the same, all from GPADC */
@@ -156,7 +165,12 @@ static int intel_xpower_pmic_get_power(struct regmap *regmap, int reg,
 	if (regmap_read(regmap, reg, &data))
 		return -EIO;
 
-	*value = (data & BIT(bit)) ? 1 : 0;
+	/* GPIO1 LDO regulator needs special handling */
+	if (reg == XPOWER_GPI1_CTRL)
+		*value = ((data & GPI1_LDO_MASK) == GPI1_LDO_ON);
+	else
+		*value = (data & BIT(bit)) ? 1 : 0;
+
 	return 0;
 }
 
@@ -165,6 +179,11 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
 {
 	int data;
 
+	/* GPIO1 LDO regulator needs special handling */
+	if (reg == XPOWER_GPI1_CTRL)
+		return regmap_update_bits(regmap, reg, GPI1_LDO_MASK,
+					  on ? GPI1_LDO_ON : GPI1_LDO_OFF);
+
 	if (regmap_read(regmap, reg, &data))
 		return -EIO;
 
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index a34669c..85ac848 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -42,7 +42,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
 
 		if (!dev->physical_node_count) {
 			seq_printf(seq, "%c%-8s\n",
-				dev->wakeup.flags.run_wake ? '*' : ' ',
+				dev->wakeup.flags.valid ? '*' : ' ',
 				device_may_wakeup(&dev->dev) ?
 					"enabled" : "disabled");
 		} else {
@@ -58,7 +58,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
 					seq_printf(seq, "\t\t");
 
 				seq_printf(seq, "%c%-8s  %s:%s\n",
-					dev->wakeup.flags.run_wake ? '*' : ' ',
+					dev->wakeup.flags.valid ? '*' : ' ',
 					(device_may_wakeup(&dev->dev) ||
 					device_may_wakeup(ldev)) ?
 					"enabled" : "disabled",
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 8697a82..591d1dd 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -268,9 +268,9 @@ static int acpi_processor_start(struct device *dev)
 		return -ENODEV;
 
 	/* Protect against concurrent CPU hotplug operations */
-	get_online_cpus();
+	cpu_hotplug_disable();
 	ret = __acpi_processor_start(device);
-	put_online_cpus();
+	cpu_hotplug_enable();
 	return ret;
 }
 
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 3de34633..7f9aff4 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -909,6 +909,13 @@ static long __acpi_processor_get_throttling(void *data)
 	return pr->throttling.acpi_processor_get_throttling(pr);
 }
 
+static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
+{
+	if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
+		return fn(arg);
+	return work_on_cpu(cpu, fn, arg);
+}
+
 static int acpi_processor_get_throttling(struct acpi_processor *pr)
 {
 	if (!pr)
@@ -926,7 +933,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
 	if (!cpu_online(pr->id))
 		return -ENODEV;
 
-	return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
+	return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false);
 }
 
 static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@@ -1076,13 +1083,6 @@ static long acpi_processor_throttling_fn(void *data)
 			arg->target_state, arg->force);
 }
 
-static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
-{
-	if (direct)
-		return fn(arg);
-	return work_on_cpu(cpu, fn, arg);
-}
-
 static int __acpi_processor_set_throttling(struct acpi_processor *pr,
 					   int state, bool force, bool direct)
 {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e39ec7b..09f65f5 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -835,7 +835,7 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
 	return err;
 }
 
-static void acpi_wakeup_gpe_init(struct acpi_device *device)
+static bool acpi_wakeup_gpe_init(struct acpi_device *device)
 {
 	static const struct acpi_device_id button_device_ids[] = {
 		{"PNP0C0C", 0},
@@ -845,13 +845,11 @@ static void acpi_wakeup_gpe_init(struct acpi_device *device)
 	};
 	struct acpi_device_wakeup *wakeup = &device->wakeup;
 	acpi_status status;
-	acpi_event_status event_status;
 
 	wakeup->flags.notifier_present = 0;
 
 	/* Power button, Lid switch always enable wakeup */
 	if (!acpi_match_device_ids(device, button_device_ids)) {
-		wakeup->flags.run_wake = 1;
 		if (!acpi_match_device_ids(device, &button_device_ids[1])) {
 			/* Do not use Lid/sleep button for S5 wakeup */
 			if (wakeup->sleep_state == ACPI_STATE_S5)
@@ -859,17 +857,12 @@ static void acpi_wakeup_gpe_init(struct acpi_device *device)
 		}
 		acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number);
 		device_set_wakeup_capable(&device->dev, true);
-		return;
+		return true;
 	}
 
-	acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device,
-				wakeup->gpe_number);
-	status = acpi_get_gpe_status(wakeup->gpe_device, wakeup->gpe_number,
-				     &event_status);
-	if (ACPI_FAILURE(status))
-		return;
-
-	wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HAS_HANDLER);
+	status = acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device,
+					 wakeup->gpe_number);
+	return ACPI_SUCCESS(status);
 }
 
 static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
@@ -887,10 +880,10 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
 		return;
 	}
 
-	device->wakeup.flags.valid = 1;
+	device->wakeup.flags.valid = acpi_wakeup_gpe_init(device);
 	device->wakeup.prepare_count = 0;
-	acpi_wakeup_gpe_init(device);
-	/* Call _PSW/_DSW object to disable its ability to wake the sleeping
+	/*
+	 * Call _PSW/_DSW object to disable its ability to wake the sleeping
 	 * system for the ACPI device with the _PRW object.
 	 * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
 	 * So it is necessary to call _DSW object first. Only when it is not
@@ -1371,8 +1364,8 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
 	iort_set_dma_mask(dev);
 
 	iommu = iort_iommu_configure(dev);
-	if (IS_ERR(iommu))
-		return PTR_ERR(iommu);
+	if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
 
 	size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
 	/*
@@ -1428,6 +1421,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
 	adev->flags.coherent_dma = cca;
 }
 
+static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
+{
+	bool *is_spi_i2c_slave_p = data;
+
+	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+		return 1;
+
+	/*
+	 * devices that are connected to UART still need to be enumerated to
+	 * platform bus
+	 */
+	if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+		*is_spi_i2c_slave_p = true;
+
+	 /* no need to do more checking */
+	return -1;
+}
+
+static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
+{
+	struct list_head resource_list;
+	bool is_spi_i2c_slave = false;
+
+	INIT_LIST_HEAD(&resource_list);
+	acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
+			       &is_spi_i2c_slave);
+	acpi_dev_free_resource_list(&resource_list);
+
+	return is_spi_i2c_slave;
+}
+
 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
 			     int type, unsigned long long sta)
 {
@@ -1443,6 +1467,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
 	acpi_bus_get_flags(device);
 	device->flags.match_driver = false;
 	device->flags.initialized = true;
+	device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
 	acpi_device_clear_enumerated(device);
 	device_initialize(&device->dev);
 	dev_set_uevent_suppress(&device->dev, true);
@@ -1727,38 +1752,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
 	return AE_OK;
 }
 
-static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
-{
-	bool *is_spi_i2c_slave_p = data;
-
-	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
-		return 1;
-
-	/*
-	 * devices that are connected to UART still need to be enumerated to
-	 * platform bus
-	 */
-	if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
-		*is_spi_i2c_slave_p = true;
-
-	 /* no need to do more checking */
-	return -1;
-}
-
 static void acpi_default_enumeration(struct acpi_device *device)
 {
-	struct list_head resource_list;
-	bool is_spi_i2c_slave = false;
-
 	/*
 	 * Do not enumerate SPI/I2C slaves as they will be enumerated by their
 	 * respective parents.
 	 */
-	INIT_LIST_HEAD(&resource_list);
-	acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
-			       &is_spi_i2c_slave);
-	acpi_dev_free_resource_list(&resource_list);
-	if (!is_spi_i2c_slave) {
+	if (!device->flags.spi_i2c_slave) {
 		acpi_create_platform_device(device, NULL);
 		acpi_device_set_enumerated(device);
 	} else {
@@ -1854,7 +1854,7 @@ static void acpi_bus_attach(struct acpi_device *device)
 		return;
 
 	device->flags.match_driver = true;
-	if (ret > 0) {
+	if (ret > 0 && !device->flags.spi_i2c_slave) {
 		acpi_device_set_enumerated(device);
 		goto ok;
 	}
@@ -1863,10 +1863,10 @@ static void acpi_bus_attach(struct acpi_device *device)
 	if (ret < 0)
 		return;
 
-	if (device->pnp.type.platform_id)
-		acpi_default_enumeration(device);
-	else
+	if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
 		acpi_device_set_enumerated(device);
+	else
+		acpi_default_enumeration(device);
 
  ok:
 	list_for_each_entry(child, &device->children, node)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a6574d6..be17664 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -650,17 +650,108 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
 	.recover = acpi_pm_finish,
 };
 
+static bool s2idle_in_progress;
+static bool s2idle_wakeup;
+
+/*
+ * On platforms supporting the Low Power S0 Idle interface there is an ACPI
+ * device object with the PNP0D80 compatible device ID (System Power Management
+ * Controller) and a specific _DSM method under it.  That method, if present,
+ * can be used to indicate to the platform that the OS is transitioning into a
+ * low-power state in which certain types of activity are not desirable or that
+ * it is leaving such a state, which allows the platform to adjust its operation
+ * mode accordingly.
+ */
+static const struct acpi_device_id lps0_device_ids[] = {
+	{"PNP0D80", },
+	{"", },
+};
+
+#define ACPI_LPS0_DSM_UUID	"c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
+
+#define ACPI_LPS0_SCREEN_OFF	3
+#define ACPI_LPS0_SCREEN_ON	4
+#define ACPI_LPS0_ENTRY		5
+#define ACPI_LPS0_EXIT		6
+
+#define ACPI_S2IDLE_FUNC_MASK	((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
+
+static acpi_handle lps0_device_handle;
+static guid_t lps0_dsm_guid;
+static char lps0_dsm_func_mask;
+
+static void acpi_sleep_run_lps0_dsm(unsigned int func)
+{
+	union acpi_object *out_obj;
+
+	if (!(lps0_dsm_func_mask & (1 << func)))
+		return;
+
+	out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, 1, func, NULL);
+	ACPI_FREE(out_obj);
+
+	acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
+			  func, out_obj ? "successful" : "failed");
+}
+
+static int lps0_device_attach(struct acpi_device *adev,
+			      const struct acpi_device_id *not_used)
+{
+	union acpi_object *out_obj;
+
+	if (lps0_device_handle)
+		return 0;
+
+	if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
+		return 0;
+
+	guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
+	/* Check if the _DSM is present and as expected. */
+	out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
+	if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
+		char bitmask = *(char *)out_obj->buffer.pointer;
+
+		if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
+			lps0_dsm_func_mask = bitmask;
+			lps0_device_handle = adev->handle;
+		}
+
+		acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
+				  bitmask);
+	} else {
+		acpi_handle_debug(adev->handle,
+				  "_DSM function 0 evaluation failed\n");
+	}
+	ACPI_FREE(out_obj);
+	return 0;
+}
+
+static struct acpi_scan_handler lps0_handler = {
+	.ids = lps0_device_ids,
+	.attach = lps0_device_attach,
+};
+
 static int acpi_freeze_begin(void)
 {
 	acpi_scan_lock_acquire();
+	s2idle_in_progress = true;
 	return 0;
 }
 
 static int acpi_freeze_prepare(void)
 {
-	acpi_enable_wakeup_devices(ACPI_STATE_S0);
-	acpi_enable_all_wakeup_gpes();
-	acpi_os_wait_events_complete();
+	if (lps0_device_handle) {
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+	} else {
+		/*
+		 * The configuration of GPEs is changed here to avoid spurious
+		 * wakeups, but that should not be necessary if this is a
+		 * "low-power S0" platform and the low-power S0 _DSM is present.
+		 */
+		acpi_enable_all_wakeup_gpes();
+		acpi_os_wait_events_complete();
+	}
 	if (acpi_sci_irq_valid())
 		enable_irq_wake(acpi_sci_irq);
 
@@ -675,8 +766,10 @@ static void acpi_freeze_wake(void)
 	 * case it has not been a wakeup event (the GPEs will be checked later).
 	 */
 	if (acpi_sci_irq_valid() &&
-	    !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+	    !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
 		pm_system_cancel_wakeup();
+		s2idle_wakeup = true;
+	}
 }
 
 static void acpi_freeze_sync(void)
@@ -689,19 +782,25 @@ static void acpi_freeze_sync(void)
 	 */
 	acpi_os_wait_events_complete();
 	flush_scheduled_work();
+	s2idle_wakeup = false;
 }
 
 static void acpi_freeze_restore(void)
 {
-	acpi_disable_wakeup_devices(ACPI_STATE_S0);
 	if (acpi_sci_irq_valid())
 		disable_irq_wake(acpi_sci_irq);
 
-	acpi_enable_all_runtime_gpes();
+	if (lps0_device_handle) {
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
+		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
+	} else {
+		acpi_enable_all_runtime_gpes();
+	}
 }
 
 static void acpi_freeze_end(void)
 {
+	s2idle_in_progress = false;
 	acpi_scan_lock_release();
 }
 
@@ -724,13 +823,28 @@ static void acpi_sleep_suspend_setup(void)
 
 	suspend_set_ops(old_suspend_ordering ?
 		&acpi_suspend_ops_old : &acpi_suspend_ops);
+
+	acpi_scan_add_handler(&lps0_handler);
 	freeze_set_ops(&acpi_freeze_ops);
 }
 
 #else /* !CONFIG_SUSPEND */
+#define s2idle_in_progress	(false)
+#define s2idle_wakeup		(false)
+#define lps0_device_handle	(NULL)
 static inline void acpi_sleep_suspend_setup(void) {}
 #endif /* !CONFIG_SUSPEND */
 
+bool acpi_s2idle_wakeup(void)
+{
+	return s2idle_wakeup;
+}
+
+bool acpi_sleep_no_ec_events(void)
+{
+	return !s2idle_in_progress || !lps0_device_handle;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static u32 saved_bm_rld;
 
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 1b5ee1e..e414fab 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -333,14 +333,17 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
 	    container_of(bin_attr, struct acpi_table_attr, attr);
 	struct acpi_table_header *table_header = NULL;
 	acpi_status status;
+	ssize_t rc;
 
 	status = acpi_get_table(table_attr->name, table_attr->instance,
 				&table_header);
 	if (ACPI_FAILURE(status))
 		return -ENODEV;
 
-	return memory_read_from_buffer(buf, count, &offset,
-				       table_header, table_header->length);
+	rc = memory_read_from_buffer(buf, count, &offset, table_header,
+			table_header->length);
+	acpi_put_table(table_header);
+	return rc;
 }
 
 static int acpi_table_attr_init(struct kobject *tables_obj,
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 27d0dcf..b9d956c 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -613,19 +613,19 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
 /**
  * acpi_evaluate_dsm - evaluate device's _DSM method
  * @handle: ACPI device handle
- * @uuid: UUID of requested functions, should be 16 bytes
+ * @guid: GUID of requested functions, should be 16 bytes
  * @rev: revision number of requested function
  * @func: requested function number
  * @argv4: the function specific parameter
  *
- * Evaluate device's _DSM method with specified UUID, revision id and
+ * Evaluate device's _DSM method with specified GUID, revision id and
  * function number. Caller needs to free the returned object.
  *
  * Though ACPI defines the fourth parameter for _DSM should be a package,
  * some old BIOSes do expect a buffer or an integer etc.
  */
 union acpi_object *
-acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
+acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func,
 		  union acpi_object *argv4)
 {
 	acpi_status ret;
@@ -638,7 +638,7 @@ acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
 
 	params[0].type = ACPI_TYPE_BUFFER;
 	params[0].buffer.length = 16;
-	params[0].buffer.pointer = (char *)uuid;
+	params[0].buffer.pointer = (u8 *)guid;
 	params[1].type = ACPI_TYPE_INTEGER;
 	params[1].integer.value = rev;
 	params[2].type = ACPI_TYPE_INTEGER;
@@ -666,7 +666,7 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
 /**
  * acpi_check_dsm - check if _DSM method supports requested functions.
  * @handle: ACPI device handle
- * @uuid: UUID of requested functions, should be 16 bytes at least
+ * @guid: GUID of requested functions, should be 16 bytes at least
  * @rev: revision number of requested functions
  * @funcs: bitmap of requested functions
  *
@@ -674,7 +674,7 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
  * functions. Currently only support 64 functions at maximum, should be
  * enough for now.
  */
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
+bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
 {
 	int i;
 	u64 mask = 0;
@@ -683,7 +683,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
 	if (funcs == 0)
 		return false;
 
-	obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
+	obj = acpi_evaluate_dsm(handle, guid, rev, 0, NULL);
 	if (!obj)
 		return false;
 
@@ -697,7 +697,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
 
 	/*
 	 * Bit 0 indicates whether there's support for any functions other than
-	 * function 0 for the specified UUID and revision.
+	 * function 0 for the specified GUID and revision.
 	 */
 	if ((mask & 0x1) && (mask & funcs) == funcs)
 		return true;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 7f48156..d179e8d 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -305,6 +305,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
 		DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
 		},
 	},
+	{
+	 .callback = video_detect_force_native,
+	 .ident = "Dell Precision 7510",
+	 .matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
+		},
+	},
 	{ },
 };
 
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index a56fa2a..e0f74dd 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -105,6 +105,7 @@ static ssize_t driver_override_store(struct device *_dev,
 
 	return count;
 }
+static DEVICE_ATTR_RW(driver_override);
 
 #define amba_attr_func(name,fmt,arg...)					\
 static ssize_t name##_show(struct device *_dev,				\
@@ -112,25 +113,23 @@ static ssize_t name##_show(struct device *_dev,				\
 {									\
 	struct amba_device *dev = to_amba_device(_dev);			\
 	return sprintf(buf, fmt, arg);					\
-}
-
-#define amba_attr(name,fmt,arg...)	\
-amba_attr_func(name,fmt,arg)		\
-static DEVICE_ATTR(name, S_IRUGO, name##_show, NULL)
+}									\
+static DEVICE_ATTR_RO(name)
 
 amba_attr_func(id, "%08x\n", dev->periphid);
-amba_attr(irq0, "%u\n", dev->irq[0]);
-amba_attr(irq1, "%u\n", dev->irq[1]);
+amba_attr_func(irq0, "%u\n", dev->irq[0]);
+amba_attr_func(irq1, "%u\n", dev->irq[1]);
 amba_attr_func(resource, "\t%016llx\t%016llx\t%016lx\n",
 	 (unsigned long long)dev->res.start, (unsigned long long)dev->res.end,
 	 dev->res.flags);
 
-static struct device_attribute amba_dev_attrs[] = {
-	__ATTR_RO(id),
-	__ATTR_RO(resource),
-	__ATTR_RW(driver_override),
-	__ATTR_NULL,
+static struct attribute *amba_dev_attrs[] = {
+	&dev_attr_id.attr,
+	&dev_attr_resource.attr,
+	&dev_attr_driver_override.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(amba_dev);
 
 #ifdef CONFIG_PM
 /*
@@ -192,7 +191,7 @@ static const struct dev_pm_ops amba_pm = {
  */
 struct bus_type amba_bustype = {
 	.name		= "amba",
-	.dev_attrs	= amba_dev_attrs,
+	.dev_groups	= amba_dev_groups,
 	.match		= amba_match,
 	.uevent		= amba_uevent,
 	.pm		= &amba_pm,
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index ed6a30c..940ddbc 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -25,7 +25,7 @@
  *
  *
  * libata documentation is available via 'make {ps|pdf}docs',
- * as Documentation/DocBook/libata.*
+ * as Documentation/driver-api/libata.rst
  *
  * AHCI hardware documentation:
  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 2fc5240..1e1c355 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -24,7 +24,7 @@
  *
  *
  * libata documentation is available via 'make {ps|pdf}docs',
- * as Documentation/DocBook/libata.*
+ * as Documentation/driver-api/libata.rst
  *
  * AHCI hardware documentation:
  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
 {}
 #endif
 
+/*
+ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
+ * as DUMMY, or detected but eventually get a "link down" and never get up
+ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
+ * port_map may hold a value of 0x00.
+ *
+ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
+ * and can significantly reduce the occurrence of the problem.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
+ */
+static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
+				    struct pci_dev *pdev)
+{
+	static const struct dmi_system_id sysids[] = {
+		{
+			.ident = "Acer Switch Alpha 12",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
+			},
+		},
+		{ }
+	};
+
+	if (dmi_check_system(sysids)) {
+		dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
+		if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
+			hpriv->port_map = 0x7;
+			hpriv->cap = 0xC734FF02;
+		}
+	}
+}
+
 #ifdef CONFIG_ARM64
 /*
  * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 			 "online status unreliable, applying workaround\n");
 	}
 
+
+	/* Acer SA5-271 workaround modifies private_data */
+	acer_sa5_271_workaround(hpriv, pdev);
+
 	/* CAP.NP sometimes indicate the index of the last enabled
 	 * port, at other times, that of the last possible port, so
 	 * determining the maximum port number requires looking at
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 5db6ab2..30f67a1 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -24,7 +24,7 @@
  *
  *
  * libata documentation is available via 'make {ps|pdf}docs',
- * as Documentation/DocBook/libata.*
+ * as Documentation/driver-api/libata.rst
  *
  * AHCI hardware documentation:
  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ffbe625..8401c3b 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -33,7 +33,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available at http://developer.intel.com/
  *
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 3159f9e..6154f0e 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -24,7 +24,7 @@
  *
  *
  * libata documentation is available via 'make {ps|pdf}docs',
- * as Documentation/DocBook/libata.*
+ * as Documentation/driver-api/libata.rst
  *
  * AHCI hardware documentation:
  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index aaa761b..cd2eab6 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq <= 0) {
-		dev_err(dev, "no irq\n");
-		return -EINVAL;
+		if (irq != -EPROBE_DEFER)
+			dev_err(dev, "no irq\n");
+		return irq;
 	}
 
 	hpriv->irq = irq;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2d83b8c..b82d6bb 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -25,7 +25,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available from http://www.t13.org/ and
  *  http://www.sata-io.org/
@@ -6800,7 +6800,7 @@ static int __init ata_parse_force_one(char **cur,
 	}
 
 	force_ent->port = simple_strtoul(id, &endp, 10);
-	if (p == endp || *endp != '\0') {
+	if (id == endp || *endp != '\0') {
 		*reason = "invalid port/link";
 		return -EINVAL;
 	}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index ef68232..7e33e20 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -25,7 +25,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available from http://www.t13.org/ and
  *  http://www.sata-io.org/
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 49ba983..b0866f040 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -25,7 +25,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available from
  *  - http://www.t10.org/
@@ -3398,9 +3398,10 @@ static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
  *
  * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
  * an SCT Write Same command.
- * Based on WRITE SAME has the UNMAP flag
- *   When set translate to DSM TRIM
- *   When clear translate to SCT Write Same
+ * Based on WRITE SAME has the UNMAP flag:
+ *
+ *   - When set translate to DSM TRIM
+ *   - When clear translate to SCT Write Same
  */
 static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
 {
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 274d6d7..0529213 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -25,7 +25,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available from http://www.t13.org/ and
  *  http://www.sata-io.org/
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index f3a65a3..8a01d09 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -174,8 +174,7 @@ void zpodd_enable_run_wake(struct ata_device *dev)
 	sdev_disable_disk_events(dev->sdev);
 
 	zpodd->powered_off = true;
-	device_set_run_wake(&dev->tdev, true);
-	acpi_pm_device_run_wake(&dev->tdev, true);
+	acpi_pm_set_device_wakeup(&dev->tdev, true);
 }
 
 /* Disable runtime wake capability if it is enabled */
@@ -183,10 +182,8 @@ void zpodd_disable_run_wake(struct ata_device *dev)
 {
 	struct zpodd *zpodd = dev->zpodd;
 
-	if (zpodd->powered_off) {
-		acpi_pm_device_run_wake(&dev->tdev, false);
-		device_set_run_wake(&dev->tdev, false);
-	}
+	if (zpodd->powered_off)
+		acpi_pm_set_device_wakeup(&dev->tdev, false);
 }
 
 /*
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 120fce0..5afe35b 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -21,7 +21,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  */
 
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index d9ef9e27..82bfd51 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -17,7 +17,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware information only available under NDA.
  *
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 64d682c..f1e873a 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -21,7 +21,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *
  *  Supports ATA disks in single-packet ADMA mode.
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b66bcda..3b2246d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev)
 	struct ata_host *host;
 	struct mv_host_priv *hpriv;
 	struct resource *res;
-	void __iomem *mmio;
 	int n_ports = 0, irq = 0;
 	int rc;
 	int port;
@@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev)
 	 * Get the register base first
 	 */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	mmio = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(mmio))
-		return PTR_ERR(mmio);
+	if (res == NULL)
+		return -EINVAL;
 
 	/* allocate host */
 	if (pdev->dev.of_node) {
@@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev)
 	hpriv->board_idx = chip_soc;
 
 	host->iomap = NULL;
-	hpriv->base = mmio - SATAHC0_REG_BASE;
+	hpriv->base = devm_ioremap(&pdev->dev, res->start,
+				   resource_size(res));
+	if (!hpriv->base)
+		return -ENOMEM;
+
+	hpriv->base -= SATAHC0_REG_BASE;
 
 	hpriv->clk = clk_get(&pdev->dev, NULL);
 	if (IS_ERR(hpriv->clk))
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 734f563..8c683dd 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -21,7 +21,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  No hardware documentation available outside of NVIDIA.
  *  This driver programs the NVIDIA SATA controller in a similar
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 0fa211e..d032bf6 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -25,7 +25,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware information only available under NDA.
  *
diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h
index 00d6000..61633ef 100644
--- a/drivers/ata/sata_promise.h
+++ b/drivers/ata/sata_promise.h
@@ -20,7 +20,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  */
 
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index af987a4..1fe9416 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -23,7 +23,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  */
 
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 5d38245..b7939a2 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "failed to get access to sata clock\n");
 		return PTR_ERR(priv->clk);
 	}
-	clk_prepare_enable(priv->clk);
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
 
 	host = ata_host_alloc(&pdev->dev, 1);
 	if (!host) {
@@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev)
 	struct ata_host *host = dev_get_drvdata(dev);
 	struct sata_rcar_priv *priv = host->private_data;
 	void __iomem *base = priv->base;
+	int ret;
 
-	clk_prepare_enable(priv->clk);
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
 
 	/* ack and mask */
 	iowrite32(0, base + SATAINTSTAT_REG);
@@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev)
 {
 	struct ata_host *host = dev_get_drvdata(dev);
 	struct sata_rcar_priv *priv = host->private_data;
+	int ret;
 
-	clk_prepare_enable(priv->clk);
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
 
 	sata_rcar_setup_port(host);
 
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 29bcff0..ed76f07 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -25,7 +25,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Documentation for SiI 3112:
  *  http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index d1637ac..30f4f35 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -24,7 +24,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available under NDA.
  *
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index ff614be..0fd6ac7 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -30,7 +30,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available under NDA.
  *
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 48301cb..405e606 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -24,7 +24,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available under NDA.
  *
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 08f98c3..4f6e8d8 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -18,7 +18,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available under NDA.
  *
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index f3f538e..22e96fc 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -25,7 +25,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available under NDA.
  *
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 183eb52..9648127 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -26,7 +26,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Vitesse hardware documentation presumably available under NDA.
  *  Intel 31244 (same hardware interface) documentation presumably
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index e0c014c..7a8b8fb 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1345,14 +1345,11 @@ static inline void input_state_falling(struct logical_input *input)
 
 static void panel_process_inputs(void)
 {
-	struct list_head *item;
 	struct logical_input *input;
 
 	keypressed = 0;
 	inputs_stable = 1;
-	list_for_each(item, &logical_inputs) {
-		input = list_entry(item, struct logical_input, list);
-
+	list_for_each_entry(input, &logical_inputs, list) {
 		switch (input->state) {
 		case INPUT_ST_LOW:
 			if ((phys_curr & input->mask) != input->value)
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d718ae4..f046d21 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -339,4 +339,12 @@
 
 endif
 
+config GENERIC_ARCH_TOPOLOGY
+	bool
+	help
+	  Enable support for architectures common topology code: e.g., parsing
+	  CPU capacity information from DT, usage of such information for
+	  appropriate scaling, sysfs interface for changing capacity values at
+	  runtime.
+
 endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index f2816f6..397e5c3 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -23,6 +23,7 @@
 obj-$(CONFIG_PINCTRL) += pinctrl.o
 obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
 obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
+obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o
 
 obj-y			+= test/
 
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
new file mode 100644
index 0000000..d1c33a8
--- /dev/null
+++ b/drivers/base/arch_topology.c
@@ -0,0 +1,243 @@
+/*
+ * Arch specific cpu topology information
+ *
+ * Copyright (C) 2016, ARM Ltd.
+ * Written by: Juri Lelli, ARM Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/acpi.h>
+#include <linux/arch_topology.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sched/topology.h>
+
+static DEFINE_MUTEX(cpu_scale_mutex);
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+
+unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
+{
+	return per_cpu(cpu_scale, cpu);
+}
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
+{
+	per_cpu(cpu_scale, cpu) = capacity;
+}
+
+static ssize_t cpu_capacity_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+	return sprintf(buf, "%lu\n",
+			topology_get_cpu_scale(NULL, cpu->dev.id));
+}
+
+static ssize_t cpu_capacity_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t count)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int this_cpu = cpu->dev.id;
+	int i;
+	unsigned long new_capacity;
+	ssize_t ret;
+
+	if (!count)
+		return 0;
+
+	ret = kstrtoul(buf, 0, &new_capacity);
+	if (ret)
+		return ret;
+	if (new_capacity > SCHED_CAPACITY_SCALE)
+		return -EINVAL;
+
+	mutex_lock(&cpu_scale_mutex);
+	for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
+		topology_set_cpu_scale(i, new_capacity);
+	mutex_unlock(&cpu_scale_mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(cpu_capacity);
+
+static int register_cpu_capacity_sysctl(void)
+{
+	int i;
+	struct device *cpu;
+
+	for_each_possible_cpu(i) {
+		cpu = get_cpu_device(i);
+		if (!cpu) {
+			pr_err("%s: too early to get CPU%d device!\n",
+			       __func__, i);
+			continue;
+		}
+		device_create_file(cpu, &dev_attr_cpu_capacity);
+	}
+
+	return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
+
+static u32 capacity_scale;
+static u32 *raw_capacity;
+static bool cap_parsing_failed;
+
+void topology_normalize_cpu_scale(void)
+{
+	u64 capacity;
+	int cpu;
+
+	if (!raw_capacity || cap_parsing_failed)
+		return;
+
+	pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+	mutex_lock(&cpu_scale_mutex);
+	for_each_possible_cpu(cpu) {
+		pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
+			 cpu, raw_capacity[cpu]);
+		capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
+			/ capacity_scale;
+		topology_set_cpu_scale(cpu, capacity);
+		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+			cpu, topology_get_cpu_scale(NULL, cpu));
+	}
+	mutex_unlock(&cpu_scale_mutex);
+}
+
+int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+{
+	int ret = 1;
+	u32 cpu_capacity;
+
+	if (cap_parsing_failed)
+		return !ret;
+
+	ret = of_property_read_u32(cpu_node,
+				   "capacity-dmips-mhz",
+				   &cpu_capacity);
+	if (!ret) {
+		if (!raw_capacity) {
+			raw_capacity = kcalloc(num_possible_cpus(),
+					       sizeof(*raw_capacity),
+					       GFP_KERNEL);
+			if (!raw_capacity) {
+				pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
+				cap_parsing_failed = true;
+				return 0;
+			}
+		}
+		capacity_scale = max(cpu_capacity, capacity_scale);
+		raw_capacity[cpu] = cpu_capacity;
+		pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
+			cpu_node->full_name, raw_capacity[cpu]);
+	} else {
+		if (raw_capacity) {
+			pr_err("cpu_capacity: missing %s raw capacity\n",
+				cpu_node->full_name);
+			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+		}
+		cap_parsing_failed = true;
+		kfree(raw_capacity);
+	}
+
+	return !ret;
+}
+
+#ifdef CONFIG_CPU_FREQ
+static cpumask_var_t cpus_to_visit;
+static bool cap_parsing_done;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+
+static int
+init_cpu_capacity_callback(struct notifier_block *nb,
+			   unsigned long val,
+			   void *data)
+{
+	struct cpufreq_policy *policy = data;
+	int cpu;
+
+	if (cap_parsing_failed || cap_parsing_done)
+		return 0;
+
+	switch (val) {
+	case CPUFREQ_NOTIFY:
+		pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+				cpumask_pr_args(policy->related_cpus),
+				cpumask_pr_args(cpus_to_visit));
+		cpumask_andnot(cpus_to_visit,
+			       cpus_to_visit,
+			       policy->related_cpus);
+		for_each_cpu(cpu, policy->related_cpus) {
+			raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
+					    policy->cpuinfo.max_freq / 1000UL;
+			capacity_scale = max(raw_capacity[cpu], capacity_scale);
+		}
+		if (cpumask_empty(cpus_to_visit)) {
+			topology_normalize_cpu_scale();
+			kfree(raw_capacity);
+			pr_debug("cpu_capacity: parsing done\n");
+			cap_parsing_done = true;
+			schedule_work(&parsing_done_work);
+		}
+	}
+	return 0;
+}
+
+static struct notifier_block init_cpu_capacity_notifier = {
+	.notifier_call = init_cpu_capacity_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+	/*
+	 * on ACPI-based systems we need to use the default cpu capacity
+	 * until we have the necessary code to parse the cpu capacity, so
+	 * skip registering cpufreq notifier.
+	 */
+	if (!acpi_disabled || !raw_capacity)
+		return -EINVAL;
+
+	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
+		pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
+		return -ENOMEM;
+	}
+
+	cpumask_copy(cpus_to_visit, cpu_possible_mask);
+
+	return cpufreq_register_notifier(&init_cpu_capacity_notifier,
+					 CPUFREQ_POLICY_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+static void parsing_done_workfn(struct work_struct *work)
+{
+	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
+					 CPUFREQ_POLICY_NOTIFIER);
+}
+
+#else
+static int __init free_raw_capacity(void)
+{
+	kfree(raw_capacity);
+
+	return 0;
+}
+core_initcall(free_raw_capacity);
+#endif
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 6470eb8..e162c9a 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -466,35 +466,6 @@ int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
 }
 EXPORT_SYMBOL_GPL(bus_for_each_drv);
 
-static int device_add_attrs(struct bus_type *bus, struct device *dev)
-{
-	int error = 0;
-	int i;
-
-	if (!bus->dev_attrs)
-		return 0;
-
-	for (i = 0; bus->dev_attrs[i].attr.name; i++) {
-		error = device_create_file(dev, &bus->dev_attrs[i]);
-		if (error) {
-			while (--i >= 0)
-				device_remove_file(dev, &bus->dev_attrs[i]);
-			break;
-		}
-	}
-	return error;
-}
-
-static void device_remove_attrs(struct bus_type *bus, struct device *dev)
-{
-	int i;
-
-	if (bus->dev_attrs) {
-		for (i = 0; bus->dev_attrs[i].attr.name; i++)
-			device_remove_file(dev, &bus->dev_attrs[i]);
-	}
-}
-
 /**
  * bus_add_device - add device to bus
  * @dev: device being added
@@ -510,12 +481,9 @@ int bus_add_device(struct device *dev)
 
 	if (bus) {
 		pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev));
-		error = device_add_attrs(bus, dev);
-		if (error)
-			goto out_put;
 		error = device_add_groups(dev, bus->dev_groups);
 		if (error)
-			goto out_id;
+			goto out_put;
 		error = sysfs_create_link(&bus->p->devices_kset->kobj,
 						&dev->kobj, dev_name(dev));
 		if (error)
@@ -532,8 +500,6 @@ int bus_add_device(struct device *dev)
 	sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
 out_groups:
 	device_remove_groups(dev, bus->dev_groups);
-out_id:
-	device_remove_attrs(bus, dev);
 out_put:
 	bus_put(dev->bus);
 	return error;
@@ -590,7 +556,6 @@ void bus_remove_device(struct device *dev)
 	sysfs_remove_link(&dev->kobj, "subsystem");
 	sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
 			  dev_name(dev));
-	device_remove_attrs(dev->bus, dev);
 	device_remove_groups(dev, dev->bus->dev_groups);
 	if (klist_node_attached(&dev->p->knode_bus))
 		klist_del(&dev->p->knode_bus);
@@ -648,10 +613,7 @@ static void remove_probe_files(struct bus_type *bus)
 static ssize_t uevent_store(struct device_driver *drv, const char *buf,
 			    size_t count)
 {
-	enum kobject_action action;
-
-	if (kobject_action_type(buf, count, &action) == 0)
-		kobject_uevent(&drv->p->kobj, action);
+	kobject_synth_uevent(&drv->p->kobj, buf, count);
 	return count;
 }
 static DRIVER_ATTR_WO(uevent);
@@ -868,10 +830,7 @@ static void klist_devices_put(struct klist_node *n)
 static ssize_t bus_uevent_store(struct bus_type *bus,
 				const char *buf, size_t count)
 {
-	enum kobject_action action;
-
-	if (kobject_action_type(buf, count, &action) == 0)
-		kobject_uevent(&bus->p->subsys.kobj, action);
+	kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
 	return count;
 }
 static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index a2b2896..52eb8e6 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -119,36 +119,6 @@ static void class_put(struct class *cls)
 		kset_put(&cls->p->subsys);
 }
 
-static int add_class_attrs(struct class *cls)
-{
-	int i;
-	int error = 0;
-
-	if (cls->class_attrs) {
-		for (i = 0; cls->class_attrs[i].attr.name; i++) {
-			error = class_create_file(cls, &cls->class_attrs[i]);
-			if (error)
-				goto error;
-		}
-	}
-done:
-	return error;
-error:
-	while (--i >= 0)
-		class_remove_file(cls, &cls->class_attrs[i]);
-	goto done;
-}
-
-static void remove_class_attrs(struct class *cls)
-{
-	int i;
-
-	if (cls->class_attrs) {
-		for (i = 0; cls->class_attrs[i].attr.name; i++)
-			class_remove_file(cls, &cls->class_attrs[i]);
-	}
-}
-
 static void klist_class_dev_get(struct klist_node *n)
 {
 	struct device *dev = container_of(n, struct device, knode_class);
@@ -217,8 +187,6 @@ int __class_register(struct class *cls, struct lock_class_key *key)
 	}
 	error = class_add_groups(class_get(cls), cls->class_groups);
 	class_put(cls);
-	error = add_class_attrs(class_get(cls));
-	class_put(cls);
 	return error;
 }
 EXPORT_SYMBOL_GPL(__class_register);
@@ -226,7 +194,6 @@ EXPORT_SYMBOL_GPL(__class_register);
 void class_unregister(struct class *cls)
 {
 	pr_debug("device class '%s': unregistering\n", cls->name);
-	remove_class_attrs(cls);
 	class_remove_groups(cls, cls->class_groups);
 	kset_unregister(&cls->p->subsys);
 }
diff --git a/drivers/base/core.c b/drivers/base/core.c
index bbecaf9..8dde934 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -981,12 +981,9 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	enum kobject_action action;
+	if (kobject_synth_uevent(&dev->kobj, buf, count))
+		dev_err(dev, "uevent: failed to send synthetic uevent\n");
 
-	if (kobject_action_type(buf, count, &action) == 0)
-		kobject_uevent(&dev->kobj, action);
-	else
-		dev_err(dev, "uevent: unknown action-string\n");
 	return count;
 }
 static DEVICE_ATTR_RW(uevent);
@@ -2884,3 +2881,19 @@ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
 	else
 		dev->fwnode = fwnode;
 }
+
+/**
+ * device_set_of_node_from_dev - reuse device-tree node of another device
+ * @dev: device whose device-tree node is being set
+ * @dev2: device whose device-tree node is being reused
+ *
+ * Takes another reference to the new device-tree node after first dropping
+ * any reference held to the old node.
+ */
+void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
+{
+	of_node_put(dev->of_node);
+	dev->of_node = of_node_get(dev2->of_node);
+	dev->of_node_reused = true;
+}
+EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index f3deb6a..9dbef4d 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -275,6 +275,24 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 EXPORT_SYMBOL(dma_common_mmap);
 
 #ifdef CONFIG_MMU
+static struct vm_struct *__dma_common_pages_remap(struct page **pages,
+			size_t size, unsigned long vm_flags, pgprot_t prot,
+			const void *caller)
+{
+	struct vm_struct *area;
+
+	area = get_vm_area_caller(size, vm_flags, caller);
+	if (!area)
+		return NULL;
+
+	if (map_vm_area(area, prot, pages)) {
+		vunmap(area->addr);
+		return NULL;
+	}
+
+	return area;
+}
+
 /*
  * remaps an array of PAGE_SIZE pages into another vm_area
  * Cannot be used in non-sleeping contexts
@@ -285,17 +303,12 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
 {
 	struct vm_struct *area;
 
-	area = get_vm_area_caller(size, vm_flags, caller);
+	area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
 	if (!area)
 		return NULL;
 
 	area->pages = pages;
 
-	if (map_vm_area(area, prot, pages)) {
-		vunmap(area->addr);
-		return NULL;
-	}
-
 	return area->addr;
 }
 
@@ -310,7 +323,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
 {
 	int i;
 	struct page **pages;
-	void *ptr;
+	struct vm_struct *area;
 
 	pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
 	if (!pages)
@@ -319,11 +332,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
 	for (i = 0; i < (size >> PAGE_SHIFT); i++)
 		pages[i] = nth_page(page, i);
 
-	ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+	area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
 
 	kfree(pages);
 
-	return ptr;
+	if (!area)
+		return NULL;
+	return area->addr;
 }
 
 /*
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index ac350c5..b9f907e 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -260,6 +260,38 @@ static int fw_cache_piggyback_on_request(const char *name);
  * guarding for corner cases a global lock should be OK */
 static DEFINE_MUTEX(fw_lock);
 
+static bool __enable_firmware = false;
+
+static void enable_firmware(void)
+{
+	mutex_lock(&fw_lock);
+	__enable_firmware = true;
+	mutex_unlock(&fw_lock);
+}
+
+static void disable_firmware(void)
+{
+	mutex_lock(&fw_lock);
+	__enable_firmware = false;
+	mutex_unlock(&fw_lock);
+}
+
+/*
+ * When disabled only the built-in firmware and the firmware cache will be
+ * used to look for firmware.
+ */
+static bool firmware_enabled(void)
+{
+	bool enabled = false;
+
+	mutex_lock(&fw_lock);
+	if (__enable_firmware)
+		enabled = true;
+	mutex_unlock(&fw_lock);
+
+	return enabled;
+}
+
 static struct firmware_cache fw_cache;
 
 static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
@@ -523,6 +555,44 @@ static int fw_add_devm_name(struct device *dev, const char *name)
 }
 #endif
 
+static int assign_firmware_buf(struct firmware *fw, struct device *device,
+			       unsigned int opt_flags)
+{
+	struct firmware_buf *buf = fw->priv;
+
+	mutex_lock(&fw_lock);
+	if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
+		mutex_unlock(&fw_lock);
+		return -ENOENT;
+	}
+
+	/*
+	 * add firmware name into devres list so that we can auto cache
+	 * and uncache firmware for device.
+	 *
+	 * device may has been deleted already, but the problem
+	 * should be fixed in devres or driver core.
+	 */
+	/* don't cache firmware handled without uevent */
+	if (device && (opt_flags & FW_OPT_UEVENT) &&
+	    !(opt_flags & FW_OPT_NOCACHE))
+		fw_add_devm_name(device, buf->fw_id);
+
+	/*
+	 * After caching firmware image is started, let it piggyback
+	 * on request firmware.
+	 */
+	if (!(opt_flags & FW_OPT_NOCACHE) &&
+	    buf->fwc->state == FW_LOADER_START_CACHE) {
+		if (fw_cache_piggyback_on_request(buf->fw_id))
+			kref_get(&buf->ref);
+	}
+
+	/* pass the pages buffer to driver at the last minute */
+	fw_set_page_data(buf, fw);
+	mutex_unlock(&fw_lock);
+	return 0;
+}
 
 /*
  * user-mode helper code
@@ -562,22 +632,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
 
 static LIST_HEAD(pending_fw_head);
 
-/* reboot notifier for avoid deadlock with usermode_lock */
-static int fw_shutdown_notify(struct notifier_block *unused1,
-			      unsigned long unused2, void *unused3)
+static void kill_pending_fw_fallback_reqs(bool only_kill_custom)
 {
-	mutex_lock(&fw_lock);
-	while (!list_empty(&pending_fw_head))
-		__fw_load_abort(list_first_entry(&pending_fw_head,
-					       struct firmware_buf,
-					       pending_list));
-	mutex_unlock(&fw_lock);
-	return NOTIFY_DONE;
-}
+	struct firmware_buf *buf;
+	struct firmware_buf *next;
 
-static struct notifier_block fw_shutdown_nb = {
-	.notifier_call = fw_shutdown_notify,
-};
+	mutex_lock(&fw_lock);
+	list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
+		if (!buf->need_uevent || !only_kill_custom)
+			 __fw_load_abort(buf);
+	}
+	mutex_unlock(&fw_lock);
+}
 
 static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
 			    char *buf)
@@ -1036,46 +1102,56 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
 
 static int fw_load_from_user_helper(struct firmware *firmware,
 				    const char *name, struct device *device,
-				    unsigned int opt_flags, long timeout)
+				    unsigned int opt_flags)
 {
 	struct firmware_priv *fw_priv;
+	long timeout;
+	int ret;
+
+	timeout = firmware_loading_timeout();
+	if (opt_flags & FW_OPT_NOWAIT) {
+		timeout = usermodehelper_read_lock_wait(timeout);
+		if (!timeout) {
+			dev_dbg(device, "firmware: %s loading timed out\n",
+				name);
+			return -EBUSY;
+		}
+	} else {
+		ret = usermodehelper_read_trylock();
+		if (WARN_ON(ret)) {
+			dev_err(device, "firmware: %s will not be loaded\n",
+				name);
+			return ret;
+		}
+	}
 
 	fw_priv = fw_create_instance(firmware, name, device, opt_flags);
-	if (IS_ERR(fw_priv))
-		return PTR_ERR(fw_priv);
+	if (IS_ERR(fw_priv)) {
+		ret = PTR_ERR(fw_priv);
+		goto out_unlock;
+	}
 
 	fw_priv->buf = firmware->priv;
-	return _request_firmware_load(fw_priv, opt_flags, timeout);
-}
+	ret = _request_firmware_load(fw_priv, opt_flags, timeout);
 
-#ifdef CONFIG_PM_SLEEP
-/* kill pending requests without uevent to avoid blocking suspend */
-static void kill_requests_without_uevent(void)
-{
-	struct firmware_buf *buf;
-	struct firmware_buf *next;
+	if (!ret)
+		ret = assign_firmware_buf(firmware, device, opt_flags);
 
-	mutex_lock(&fw_lock);
-	list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
-		if (!buf->need_uevent)
-			 __fw_load_abort(buf);
-	}
-	mutex_unlock(&fw_lock);
+out_unlock:
+	usermodehelper_read_unlock();
+
+	return ret;
 }
-#endif
 
 #else /* CONFIG_FW_LOADER_USER_HELPER */
 static inline int
 fw_load_from_user_helper(struct firmware *firmware, const char *name,
-			 struct device *device, unsigned int opt_flags,
-			 long timeout)
+			 struct device *device, unsigned int opt_flags)
 {
 	return -ENOENT;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static inline void kill_requests_without_uevent(void) { }
-#endif
+static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
 
 #endif /* CONFIG_FW_LOADER_USER_HELPER */
 
@@ -1124,45 +1200,6 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
 	return 1; /* need to load */
 }
 
-static int assign_firmware_buf(struct firmware *fw, struct device *device,
-			       unsigned int opt_flags)
-{
-	struct firmware_buf *buf = fw->priv;
-
-	mutex_lock(&fw_lock);
-	if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
-		mutex_unlock(&fw_lock);
-		return -ENOENT;
-	}
-
-	/*
-	 * add firmware name into devres list so that we can auto cache
-	 * and uncache firmware for device.
-	 *
-	 * device may has been deleted already, but the problem
-	 * should be fixed in devres or driver core.
-	 */
-	/* don't cache firmware handled without uevent */
-	if (device && (opt_flags & FW_OPT_UEVENT) &&
-	    !(opt_flags & FW_OPT_NOCACHE))
-		fw_add_devm_name(device, buf->fw_id);
-
-	/*
-	 * After caching firmware image is started, let it piggyback
-	 * on request firmware.
-	 */
-	if (!(opt_flags & FW_OPT_NOCACHE) &&
-	    buf->fwc->state == FW_LOADER_START_CACHE) {
-		if (fw_cache_piggyback_on_request(buf->fw_id))
-			kref_get(&buf->ref);
-	}
-
-	/* pass the pages buffer to driver at the last minute */
-	fw_set_page_data(buf, fw);
-	mutex_unlock(&fw_lock);
-	return 0;
-}
-
 /* called from request_firmware() and request_firmware_work_func() */
 static int
 _request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1170,7 +1207,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
 		  unsigned int opt_flags)
 {
 	struct firmware *fw = NULL;
-	long timeout;
 	int ret;
 
 	if (!firmware_p)
@@ -1185,23 +1221,10 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
 	if (ret <= 0) /* error or already assigned */
 		goto out;
 
-	ret = 0;
-	timeout = firmware_loading_timeout();
-	if (opt_flags & FW_OPT_NOWAIT) {
-		timeout = usermodehelper_read_lock_wait(timeout);
-		if (!timeout) {
-			dev_dbg(device, "firmware: %s loading timed out\n",
-				name);
-			ret = -EBUSY;
-			goto out;
-		}
-	} else {
-		ret = usermodehelper_read_trylock();
-		if (WARN_ON(ret)) {
-			dev_err(device, "firmware: %s will not be loaded\n",
-				name);
-			goto out;
-		}
+	if (!firmware_enabled()) {
+		WARN(1, "firmware request while host is not available\n");
+		ret = -EHOSTDOWN;
+		goto out;
 	}
 
 	ret = fw_get_filesystem_firmware(device, fw->priv);
@@ -1213,15 +1236,11 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
 		if (opt_flags & FW_OPT_USERHELPER) {
 			dev_warn(device, "Falling back to user helper\n");
 			ret = fw_load_from_user_helper(fw, name, device,
-						       opt_flags, timeout);
+						       opt_flags);
 		}
-	}
-
-	if (!ret)
+	} else
 		ret = assign_firmware_buf(fw, device, opt_flags);
 
-	usermodehelper_read_unlock();
-
  out:
 	if (ret < 0) {
 		release_firmware(fw);
@@ -1717,6 +1736,62 @@ static void device_uncache_fw_images_delay(unsigned long delay)
 			   msecs_to_jiffies(delay));
 }
 
+/**
+ * fw_pm_notify - notifier for suspend/resume
+ * @notify_block: unused
+ * @mode: mode we are switching to
+ * @unused: unused
+ *
+ * Used to modify the firmware_class state as we move in between states.
+ * The firmware_class implements a firmware cache to enable device driver
+ * to fetch firmware upon resume before the root filesystem is ready. We
+ * disable API calls which do not use the built-in firmware or the firmware
+ * cache when we know these calls will not work.
+ *
+ * The inner logic behind all this is a bit complex so it is worth summarizing
+ * the kernel's own suspend/resume process with context and focus on how this
+ * can impact the firmware API.
+ *
+ * First a review on how we go to suspend::
+ *
+ *	pm_suspend() --> enter_state() -->
+ *	sys_sync()
+ *	suspend_prepare() -->
+ *		__pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
+ *		suspend_freeze_processes() -->
+ *			freeze_processes() -->
+ *				__usermodehelper_set_disable_depth(UMH_DISABLED);
+ *				freeze all tasks ...
+ *			freeze_kernel_threads()
+ *	suspend_devices_and_enter() -->
+ *		dpm_suspend_start() -->
+ *				dpm_prepare()
+ *				dpm_suspend()
+ *		suspend_enter()  -->
+ *			platform_suspend_prepare()
+ *			dpm_suspend_late()
+ *			freeze_enter()
+ *			syscore_suspend()
+ *
+ * When we resume we bail out of a loop from suspend_devices_and_enter() and
+ * unwind back out to the caller enter_state() where we were before as follows::
+ *
+ * 	enter_state() -->
+ *	suspend_devices_and_enter() --> (bail from loop)
+ *		dpm_resume_end() -->
+ *			dpm_resume()
+ *			dpm_complete()
+ *	suspend_finish() -->
+ *		suspend_thaw_processes() -->
+ *			thaw_processes() -->
+ *				__usermodehelper_set_disable_depth(UMH_FREEZING);
+ *				thaw_workqueues();
+ *				thaw all processes ...
+ *				usermodehelper_enable();
+ *		pm_notifier_call_chain(PM_POST_SUSPEND);
+ *
+ * fw_pm_notify() works through pm_notifier_call_chain().
+ */
 static int fw_pm_notify(struct notifier_block *notify_block,
 			unsigned long mode, void *unused)
 {
@@ -1724,8 +1799,13 @@ static int fw_pm_notify(struct notifier_block *notify_block,
 	case PM_HIBERNATION_PREPARE:
 	case PM_SUSPEND_PREPARE:
 	case PM_RESTORE_PREPARE:
-		kill_requests_without_uevent();
+		/*
+		 * kill pending fallback requests with a custom fallback
+		 * to avoid stalling suspend.
+		 */
+		kill_pending_fw_fallback_reqs(true);
 		device_cache_fw_images();
+		disable_firmware();
 		break;
 
 	case PM_POST_SUSPEND:
@@ -1738,6 +1818,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
 		mutex_lock(&fw_lock);
 		fw_cache.state = FW_LOADER_NO_CACHE;
 		mutex_unlock(&fw_lock);
+		enable_firmware();
 
 		device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
 		break;
@@ -1783,11 +1864,29 @@ static void __init fw_cache_init(void)
 #endif
 }
 
+static int fw_shutdown_notify(struct notifier_block *unused1,
+			      unsigned long unused2, void *unused3)
+{
+	disable_firmware();
+	/*
+	 * Kill all pending fallback requests to avoid both stalling shutdown,
+	 * and avoid a deadlock with the usermode_lock.
+	 */
+	kill_pending_fw_fallback_reqs(false);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block fw_shutdown_nb = {
+	.notifier_call = fw_shutdown_notify,
+};
+
 static int __init firmware_class_init(void)
 {
+	enable_firmware();
 	fw_cache_init();
-#ifdef CONFIG_FW_LOADER_USER_HELPER
 	register_reboot_notifier(&fw_shutdown_nb);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
 	return class_register(&firmware_class);
 #else
 	return 0;
@@ -1796,12 +1895,13 @@ static int __init firmware_class_init(void)
 
 static void __exit firmware_class_exit(void)
 {
+	disable_firmware();
 #ifdef CONFIG_PM_SLEEP
 	unregister_syscore_ops(&fw_syscore_ops);
 	unregister_pm_notifier(&fw_cache.pm_notify);
 #endif
-#ifdef CONFIG_FW_LOADER_USER_HELPER
 	unregister_reboot_notifier(&fw_shutdown_nb);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
 	class_unregister(&firmware_class);
 #endif
 }
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 5548f96..0440d95 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -377,7 +377,7 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
 	if (!pfn_valid_within(pfn))
 		return -1;
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-	if (system_state == SYSTEM_BOOTING)
+	if (system_state < SYSTEM_RUNNING)
 		return early_pfn_to_nid(pfn);
 #endif
 	page = pfn_to_page(pfn);
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
index 5917b4b..eb929dd 100644
--- a/drivers/base/pinctrl.c
+++ b/drivers/base/pinctrl.c
@@ -23,6 +23,9 @@ int pinctrl_bind_pins(struct device *dev)
 {
 	int ret;
 
+	if (dev->of_node_reused)
+		return 0;
+
 	dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
 	if (!dev->pins)
 		return -ENOMEM;
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index d35e9a2..e547352 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -195,7 +195,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
 
 	domain = msi_create_irq_domain(fwnode, info, parent);
 	if (domain)
-		domain->bus_token = DOMAIN_BUS_PLATFORM_MSI;
+		irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
 
 	return domain;
 }
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index a102152..97332d0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -866,7 +866,7 @@ static ssize_t driver_override_store(struct device *dev,
 				     const char *buf, size_t count)
 {
 	struct platform_device *pdev = to_platform_device(dev);
-	char *driver_override, *old = pdev->driver_override, *cp;
+	char *driver_override, *old, *cp;
 
 	if (count > PATH_MAX)
 		return -EINVAL;
@@ -879,12 +879,15 @@ static ssize_t driver_override_store(struct device *dev,
 	if (cp)
 		*cp = '\0';
 
+	device_lock(dev);
+	old = pdev->driver_override;
 	if (strlen(driver_override)) {
 		pdev->driver_override = driver_override;
 	} else {
 		kfree(driver_override);
 		pdev->driver_override = NULL;
 	}
+	device_unlock(dev);
 
 	kfree(old);
 
@@ -895,8 +898,12 @@ static ssize_t driver_override_show(struct device *dev,
 				    struct device_attribute *attr, char *buf)
 {
 	struct platform_device *pdev = to_platform_device(dev);
+	ssize_t len;
 
-	return sprintf(buf, "%s\n", pdev->driver_override);
+	device_lock(dev);
+	len = sprintf(buf, "%s\n", pdev->driver_override);
+	device_unlock(dev);
+	return len;
 }
 static DEVICE_ATTR_RW(driver_override);
 
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index da49a83..9649dce 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -126,7 +126,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
 #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
 
 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
-		struct generic_pm_domain *genpd)
+		const struct generic_pm_domain *genpd)
 {
 	bool ret;
 
@@ -181,12 +181,14 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
 	return pd_to_genpd(dev->pm_domain);
 }
 
-static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
+static int genpd_stop_dev(const struct generic_pm_domain *genpd,
+			  struct device *dev)
 {
 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
 }
 
-static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+static int genpd_start_dev(const struct generic_pm_domain *genpd,
+			   struct device *dev)
 {
 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
 }
@@ -443,7 +445,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 
 		pdd = dev->power.subsys_data ?
 				dev->power.subsys_data->domain_data : NULL;
-		if (pdd && pdd->dev) {
+		if (pdd) {
 			to_gpd_data(pdd)->td.constraint_changed = true;
 			genpd = dev_to_genpd(dev);
 		} else {
@@ -738,7 +740,7 @@ static bool pm_genpd_present(const struct generic_pm_domain *genpd)
 
 #ifdef CONFIG_PM_SLEEP
 
-static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
+static bool genpd_dev_active_wakeup(const struct generic_pm_domain *genpd,
 				    struct device *dev)
 {
 	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
@@ -840,7 +842,8 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
  * signal remote wakeup from the system's working state as needed by runtime PM.
  * Return 'true' in either of the above cases.
  */
-static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
+static bool resume_needed(struct device *dev,
+			  const struct generic_pm_domain *genpd)
 {
 	bool active_wakeup;
 
@@ -899,19 +902,19 @@ static int pm_genpd_prepare(struct device *dev)
 }
 
 /**
- * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * genpd_finish_suspend - Completion of suspend or hibernation of device in an
+ *   I/O pm domain.
  * @dev: Device to suspend.
+ * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
  *
  * Stop the device and remove power from the domain if all devices in it have
  * been stopped.
  */
-static int pm_genpd_suspend_noirq(struct device *dev)
+static int genpd_finish_suspend(struct device *dev, bool poweroff)
 {
 	struct generic_pm_domain *genpd;
 	int ret;
 
-	dev_dbg(dev, "%s()\n", __func__);
-
 	genpd = dev_to_genpd(dev);
 	if (IS_ERR(genpd))
 		return -EINVAL;
@@ -919,6 +922,13 @@ static int pm_genpd_suspend_noirq(struct device *dev)
 	if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
 		return 0;
 
+	if (poweroff)
+		ret = pm_generic_poweroff_noirq(dev);
+	else
+		ret = pm_generic_suspend_noirq(dev);
+	if (ret)
+		return ret;
+
 	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
 		ret = pm_runtime_force_suspend(dev);
 		if (ret)
@@ -934,6 +944,20 @@ static int pm_genpd_suspend_noirq(struct device *dev)
 }
 
 /**
+ * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int pm_genpd_suspend_noirq(struct device *dev)
+{
+	dev_dbg(dev, "%s()\n", __func__);
+
+	return genpd_finish_suspend(dev, false);
+}
+
+/**
  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
  * @dev: Device to resume.
  *
@@ -961,6 +985,10 @@ static int pm_genpd_resume_noirq(struct device *dev)
 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
 		ret = pm_runtime_force_resume(dev);
 
+	ret = pm_generic_resume_noirq(dev);
+	if (ret)
+		return ret;
+
 	return ret;
 }
 
@@ -975,7 +1003,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
  */
 static int pm_genpd_freeze_noirq(struct device *dev)
 {
-	struct generic_pm_domain *genpd;
+	const struct generic_pm_domain *genpd;
 	int ret = 0;
 
 	dev_dbg(dev, "%s()\n", __func__);
@@ -984,6 +1012,10 @@ static int pm_genpd_freeze_noirq(struct device *dev)
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
+	ret = pm_generic_freeze_noirq(dev);
+	if (ret)
+		return ret;
+
 	if (genpd->dev_ops.stop && genpd->dev_ops.start)
 		ret = pm_runtime_force_suspend(dev);
 
@@ -999,7 +1031,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
  */
 static int pm_genpd_thaw_noirq(struct device *dev)
 {
-	struct generic_pm_domain *genpd;
+	const struct generic_pm_domain *genpd;
 	int ret = 0;
 
 	dev_dbg(dev, "%s()\n", __func__);
@@ -1008,10 +1040,28 @@ static int pm_genpd_thaw_noirq(struct device *dev)
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
-	if (genpd->dev_ops.stop && genpd->dev_ops.start)
+	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
 		ret = pm_runtime_force_resume(dev);
+		if (ret)
+			return ret;
+	}
 
-	return ret;
+	return pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_poweroff_noirq - Completion of hibernation of device in an
+ *   I/O PM domain.
+ * @dev: Device to poweroff.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int pm_genpd_poweroff_noirq(struct device *dev)
+{
+	dev_dbg(dev, "%s()\n", __func__);
+
+	return genpd_finish_suspend(dev, true);
 }
 
 /**
@@ -1048,10 +1098,13 @@ static int pm_genpd_restore_noirq(struct device *dev)
 	genpd_sync_power_on(genpd, true, 0);
 	genpd_unlock(genpd);
 
-	if (genpd->dev_ops.stop && genpd->dev_ops.start)
+	if (genpd->dev_ops.stop && genpd->dev_ops.start) {
 		ret = pm_runtime_force_resume(dev);
+		if (ret)
+			return ret;
+	}
 
-	return ret;
+	return pm_generic_restore_noirq(dev);
 }
 
 /**
@@ -1095,8 +1148,8 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
 {
 	struct generic_pm_domain *genpd;
 
-	genpd = dev_to_genpd(dev);
-	if (!pm_genpd_present(genpd))
+	genpd = genpd_lookup_dev(dev);
+	if (!genpd)
 		return;
 
 	if (suspend) {
@@ -1393,7 +1446,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 			      struct generic_pm_domain *subdomain)
 {
-	struct gpd_link *link;
+	struct gpd_link *l, *link;
 	int ret = -EINVAL;
 
 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
@@ -1409,7 +1462,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 		goto out;
 	}
 
-	list_for_each_entry(link, &genpd->master_links, master_node) {
+	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
 		if (link->slave != subdomain)
 			continue;
 
@@ -1493,7 +1546,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
 	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
 	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
 	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
-	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
+	genpd->domain.ops.poweroff_noirq = pm_genpd_poweroff_noirq;
 	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
 	genpd->domain.ops.complete = pm_genpd_complete;
 
@@ -1584,9 +1637,6 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove);
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
 
-typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
-						   void *data);
-
 /*
  * Device Tree based PM domain providers.
  *
@@ -1742,6 +1792,9 @@ int of_genpd_add_provider_onecell(struct device_node *np,
 
 	mutex_lock(&gpd_list_lock);
 
+	if (!data->xlate)
+		data->xlate = genpd_xlate_onecell;
+
 	for (i = 0; i < data->num_domains; i++) {
 		if (!data->domains[i])
 			continue;
@@ -1752,7 +1805,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
 		data->domains[i]->has_provider = true;
 	}
 
-	ret = genpd_add_provider(np, genpd_xlate_onecell, data);
+	ret = genpd_add_provider(np, data->xlate, data);
 	if (ret < 0)
 		goto error;
 
@@ -1780,12 +1833,12 @@ EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
  */
 void of_genpd_del_provider(struct device_node *np)
 {
-	struct of_genpd_provider *cp;
+	struct of_genpd_provider *cp, *tmp;
 	struct generic_pm_domain *gpd;
 
 	mutex_lock(&gpd_list_lock);
 	mutex_lock(&of_genpd_mutex);
-	list_for_each_entry(cp, &of_genpd_providers, link) {
+	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
 		if (cp->node == np) {
 			/*
 			 * For each PM domain associated with the
@@ -1925,14 +1978,14 @@ EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
  */
 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
 {
-	struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT);
+	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
 	int ret;
 
 	if (IS_ERR_OR_NULL(np))
 		return ERR_PTR(-EINVAL);
 
 	mutex_lock(&gpd_list_lock);
-	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
 		if (gpd->provider == &np->fwnode) {
 			ret = genpd_remove(gpd);
 			genpd = ret ? ERR_PTR(ret) : gpd;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 2e0fce7..281f949 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -92,12 +92,6 @@ static bool default_suspend_ok(struct device *dev)
 	return td->cached_suspend_ok;
 }
 
-/**
- * default_power_down_ok - Default generic PM domain power off governor routine.
- * @pd: PM domain to check.
- *
- * This routine must be executed under the PM domain's lock.
- */
 static bool __default_power_down_ok(struct dev_pm_domain *pd,
 				     unsigned int state)
 {
@@ -187,6 +181,12 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
 	return true;
 }
 
+/**
+ * default_power_down_ok - Default generic PM domain power off governor routine.
+ * @pd: PM domain to check.
+ *
+ * This routine must be executed under the PM domain's lock.
+ */
 static bool default_power_down_ok(struct dev_pm_domain *pd)
 {
 	struct generic_pm_domain *genpd = pd_to_genpd(pd);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e987a6f..c99f873 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -62,7 +62,7 @@ static pm_message_t pm_transition;
 
 static int async_error;
 
-static char *pm_verb(int event)
+static const char *pm_verb(int event)
 {
 	switch (event) {
 	case PM_EVENT_SUSPEND:
@@ -208,7 +208,8 @@ static ktime_t initcall_debug_start(struct device *dev)
 }
 
 static void initcall_debug_report(struct device *dev, ktime_t calltime,
-				  int error, pm_message_t state, char *info)
+				  int error, pm_message_t state,
+				  const char *info)
 {
 	ktime_t rettime;
 	s64 nsecs;
@@ -403,21 +404,23 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
 	return NULL;
 }
 
-static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
+static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 {
 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 		", may wakeup" : "");
 }
 
-static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
+static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 			int error)
 {
 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 		dev_name(dev), pm_verb(state.event), info, error);
 }
 
-static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
+#ifdef CONFIG_PM_DEBUG
+static void dpm_show_time(ktime_t starttime, pm_message_t state,
+			  const char *info)
 {
 	ktime_t calltime;
 	u64 usecs64;
@@ -433,9 +436,13 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
 		info ?: "", info ? " " : "", pm_verb(state.event),
 		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 }
+#else
+static inline void dpm_show_time(ktime_t starttime, pm_message_t state,
+				 const char *info) {}
+#endif /* CONFIG_PM_DEBUG */
 
 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
-			    pm_message_t state, char *info)
+			    pm_message_t state, const char *info)
 {
 	ktime_t calltime;
 	int error;
@@ -535,7 +542,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 {
 	pm_callback_t callback = NULL;
-	char *info = NULL;
+	const char *info = NULL;
 	int error = 0;
 
 	TRACE_DEVICE(dev);
@@ -665,7 +672,7 @@ void dpm_resume_noirq(pm_message_t state)
 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 {
 	pm_callback_t callback = NULL;
-	char *info = NULL;
+	const char *info = NULL;
 	int error = 0;
 
 	TRACE_DEVICE(dev);
@@ -793,7 +800,7 @@ EXPORT_SYMBOL_GPL(dpm_resume_start);
 static int device_resume(struct device *dev, pm_message_t state, bool async)
 {
 	pm_callback_t callback = NULL;
-	char *info = NULL;
+	const char *info = NULL;
 	int error = 0;
 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
@@ -955,7 +962,7 @@ void dpm_resume(pm_message_t state)
 static void device_complete(struct device *dev, pm_message_t state)
 {
 	void (*callback)(struct device *) = NULL;
-	char *info = NULL;
+	const char *info = NULL;
 
 	if (dev->power.syscore)
 		return;
@@ -1080,7 +1087,7 @@ static pm_message_t resume_event(pm_message_t sleep_state)
 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
 {
 	pm_callback_t callback = NULL;
-	char *info = NULL;
+	const char *info = NULL;
 	int error = 0;
 
 	TRACE_DEVICE(dev);
@@ -1220,7 +1227,7 @@ int dpm_suspend_noirq(pm_message_t state)
 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
 {
 	pm_callback_t callback = NULL;
-	char *info = NULL;
+	const char *info = NULL;
 	int error = 0;
 
 	TRACE_DEVICE(dev);
@@ -1379,7 +1386,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_end);
  */
 static int legacy_suspend(struct device *dev, pm_message_t state,
 			  int (*cb)(struct device *dev, pm_message_t state),
-			  char *info)
+			  const char *info)
 {
 	int error;
 	ktime_t calltime;
@@ -1421,7 +1428,7 @@ static void dpm_clear_suppliers_direct_complete(struct device *dev)
 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 {
 	pm_callback_t callback = NULL;
-	char *info = NULL;
+	const char *info = NULL;
 	int error = 0;
 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index dae6172..a8cc14f 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -180,7 +180,7 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
 {
 	struct opp_table *opp_table;
 	struct dev_pm_opp *opp;
-	struct regulator *reg, **regulators;
+	struct regulator *reg;
 	unsigned long latency_ns = 0;
 	int ret, i, count;
 	struct {
@@ -198,15 +198,9 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
 	if (!count)
 		goto put_opp_table;
 
-	regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
-	if (!regulators)
-		goto put_opp_table;
-
 	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
 	if (!uV)
-		goto free_regulators;
-
-	memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
+		goto put_opp_table;
 
 	mutex_lock(&opp_table->lock);
 
@@ -232,15 +226,13 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
 	 * isn't freed, while we are executing this routine.
 	 */
 	for (i = 0; i < count; i++) {
-		reg = regulators[i];
+		reg = opp_table->regulators[i];
 		ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
 		if (ret > 0)
 			latency_ns += ret * 1000;
 	}
 
 	kfree(uV);
-free_regulators:
-	kfree(regulators);
 put_opp_table:
 	dev_pm_opp_put_opp_table(opp_table);
 
@@ -543,17 +535,18 @@ _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
 	return ret;
 }
 
-static int _generic_set_opp(struct dev_pm_set_opp_data *data)
+static int _generic_set_opp_regulator(const struct opp_table *opp_table,
+				      struct device *dev,
+				      unsigned long old_freq,
+				      unsigned long freq,
+				      struct dev_pm_opp_supply *old_supply,
+				      struct dev_pm_opp_supply *new_supply)
 {
-	struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
-	struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
-	unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
-	struct regulator *reg = data->regulators[0];
-	struct device *dev= data->dev;
+	struct regulator *reg = opp_table->regulators[0];
 	int ret;
 
 	/* This function only supports single regulator per device */
-	if (WARN_ON(data->regulator_count > 1)) {
+	if (WARN_ON(opp_table->regulator_count > 1)) {
 		dev_err(dev, "multiple regulators are not supported\n");
 		return -EINVAL;
 	}
@@ -566,7 +559,7 @@ static int _generic_set_opp(struct dev_pm_set_opp_data *data)
 	}
 
 	/* Change frequency */
-	ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
+	ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq);
 	if (ret)
 		goto restore_voltage;
 
@@ -580,12 +573,12 @@ static int _generic_set_opp(struct dev_pm_set_opp_data *data)
 	return 0;
 
 restore_freq:
-	if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
+	if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq))
 		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
 			__func__, old_freq);
 restore_voltage:
 	/* This shouldn't harm even if the voltages weren't updated earlier */
-	if (old_supply->u_volt)
+	if (old_supply)
 		_set_opp_voltage(dev, reg, old_supply);
 
 	return ret;
@@ -603,10 +596,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 {
 	struct opp_table *opp_table;
 	unsigned long freq, old_freq;
-	int (*set_opp)(struct dev_pm_set_opp_data *data);
 	struct dev_pm_opp *old_opp, *opp;
-	struct regulator **regulators;
-	struct dev_pm_set_opp_data *data;
 	struct clk *clk;
 	int ret, size;
 
@@ -661,38 +651,35 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 	dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
 		old_freq, freq);
 
-	regulators = opp_table->regulators;
-
 	/* Only frequency scaling */
-	if (!regulators) {
+	if (!opp_table->regulators) {
 		ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
-		goto put_opps;
+	} else if (!opp_table->set_opp) {
+		ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
+						 IS_ERR(old_opp) ? NULL : old_opp->supplies,
+						 opp->supplies);
+	} else {
+		struct dev_pm_set_opp_data *data;
+
+		data = opp_table->set_opp_data;
+		data->regulators = opp_table->regulators;
+		data->regulator_count = opp_table->regulator_count;
+		data->clk = clk;
+		data->dev = dev;
+
+		data->old_opp.rate = old_freq;
+		size = sizeof(*opp->supplies) * opp_table->regulator_count;
+		if (IS_ERR(old_opp))
+			memset(data->old_opp.supplies, 0, size);
+		else
+			memcpy(data->old_opp.supplies, old_opp->supplies, size);
+
+		data->new_opp.rate = freq;
+		memcpy(data->new_opp.supplies, opp->supplies, size);
+
+		ret = opp_table->set_opp(data);
 	}
 
-	if (opp_table->set_opp)
-		set_opp = opp_table->set_opp;
-	else
-		set_opp = _generic_set_opp;
-
-	data = opp_table->set_opp_data;
-	data->regulators = regulators;
-	data->regulator_count = opp_table->regulator_count;
-	data->clk = clk;
-	data->dev = dev;
-
-	data->old_opp.rate = old_freq;
-	size = sizeof(*opp->supplies) * opp_table->regulator_count;
-	if (IS_ERR(old_opp))
-		memset(data->old_opp.supplies, 0, size);
-	else
-		memcpy(data->old_opp.supplies, old_opp->supplies, size);
-
-	data->new_opp.rate = freq;
-	memcpy(data->new_opp.supplies, opp->supplies, size);
-
-	ret = set_opp(data);
-
-put_opps:
 	dev_pm_opp_put(opp);
 put_old_opp:
 	if (!IS_ERR(old_opp))
@@ -1376,6 +1363,73 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
 
 /**
+ * dev_pm_opp_set_clkname() - Set clk name for the device
+ * @dev: Device for which clk name is being set.
+ * @name: Clk name.
+ *
+ * In order to support OPP switching, OPP layer needs to get pointer to the
+ * clock for the device. Simple cases work fine without using this routine (i.e.
+ * by passing connection-id as NULL), but for a device with multiple clocks
+ * available, the OPP core needs to know the exact name of the clk to use.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ */
+struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+	struct opp_table *opp_table;
+	int ret;
+
+	opp_table = dev_pm_opp_get_opp_table(dev);
+	if (!opp_table)
+		return ERR_PTR(-ENOMEM);
+
+	/* This should be called before OPPs are initialized */
+	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	/* Already have default clk set, free it */
+	if (!IS_ERR(opp_table->clk))
+		clk_put(opp_table->clk);
+
+	/* Find clk for the device */
+	opp_table->clk = clk_get(dev, name);
+	if (IS_ERR(opp_table->clk)) {
+		ret = PTR_ERR(opp_table->clk);
+		if (ret != -EPROBE_DEFER) {
+			dev_err(dev, "%s: Couldn't find clock: %d\n", __func__,
+				ret);
+		}
+		goto err;
+	}
+
+	return opp_table;
+
+err:
+	dev_pm_opp_put_opp_table(opp_table);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_clkname);
+
+/**
+ * dev_pm_opp_put_clkname() - Releases resources blocked for clk.
+ * @opp_table: OPP table returned from dev_pm_opp_set_clkname().
+ */
+void dev_pm_opp_put_clkname(struct opp_table *opp_table)
+{
+	/* Make sure there are no concurrent readers while updating opp_table */
+	WARN_ON(!list_empty(&opp_table->opp_list));
+
+	clk_put(opp_table->clk);
+	opp_table->clk = ERR_PTR(-EINVAL);
+
+	dev_pm_opp_put_opp_table(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_clkname);
+
+/**
  * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
  * @dev: Device for which the helper is getting registered.
  * @set_opp: Custom set OPP helper.
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
index 95f433d..81cf120 100644
--- a/drivers/base/power/opp/debugfs.c
+++ b/drivers/base/power/opp/debugfs.c
@@ -40,11 +40,10 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
 				      struct dentry *pdentry)
 {
 	struct dentry *d;
-	int i = 0;
+	int i;
 	char *name;
 
-	/* Always create at least supply-0 directory */
-	do {
+	for (i = 0; i < opp_table->regulator_count; i++) {
 		name = kasprintf(GFP_KERNEL, "supply-%d", i);
 
 		/* Create per-opp directory */
@@ -70,7 +69,7 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
 		if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
 					  &opp->supplies[i].u_amp))
 			return false;
-	} while (++i < opp_table->regulator_count);
+	}
 
 	return true;
 }
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 7794286..57eec1c 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -131,8 +131,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
 		prop = of_find_property(opp->np, name, NULL);
 
 		/* Missing property isn't a problem, but an invalid entry is */
-		if (!prop)
-			return 0;
+		if (!prop) {
+			if (!opp_table->regulator_count)
+				return 0;
+
+			dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
+				__func__);
+			return -EINVAL;
+		}
 	}
 
 	vcount = of_property_count_u32_elems(opp->np, name);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 33b4b90..185a525 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -607,7 +607,7 @@ static struct attribute *power_attrs[] = {
 #endif /* CONFIG_PM_ADVANCED_DEBUG */
 	NULL,
 };
-static struct attribute_group pm_attr_group = {
+static const struct attribute_group pm_attr_group = {
 	.name	= power_group_name,
 	.attrs	= power_attrs,
 };
@@ -629,7 +629,7 @@ static struct attribute *wakeup_attrs[] = {
 #endif
 	NULL,
 };
-static struct attribute_group pm_wakeup_attr_group = {
+static const struct attribute_group pm_wakeup_attr_group = {
 	.name	= power_group_name,
 	.attrs	= wakeup_attrs,
 };
@@ -644,7 +644,7 @@ static struct attribute *runtime_attrs[] = {
 	&dev_attr_autosuspend_delay_ms.attr,
 	NULL,
 };
-static struct attribute_group pm_runtime_attr_group = {
+static const struct attribute_group pm_runtime_attr_group = {
 	.name	= power_group_name,
 	.attrs	= runtime_attrs,
 };
@@ -653,7 +653,7 @@ static struct attribute *pm_qos_resume_latency_attrs[] = {
 	&dev_attr_pm_qos_resume_latency_us.attr,
 	NULL,
 };
-static struct attribute_group pm_qos_resume_latency_attr_group = {
+static const struct attribute_group pm_qos_resume_latency_attr_group = {
 	.name	= power_group_name,
 	.attrs	= pm_qos_resume_latency_attrs,
 };
@@ -662,7 +662,7 @@ static struct attribute *pm_qos_latency_tolerance_attrs[] = {
 	&dev_attr_pm_qos_latency_tolerance_us.attr,
 	NULL,
 };
-static struct attribute_group pm_qos_latency_tolerance_attr_group = {
+static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
 	.name	= power_group_name,
 	.attrs	= pm_qos_latency_tolerance_attrs,
 };
@@ -672,7 +672,7 @@ static struct attribute *pm_qos_flags_attrs[] = {
 	&dev_attr_pm_qos_remote_wakeup.attr,
 	NULL,
 };
-static struct attribute_group pm_qos_flags_attr_group = {
+static const struct attribute_group pm_qos_flags_attr_group = {
 	.name	= power_group_name,
 	.attrs	= pm_qos_flags_attrs,
 };
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 9c36b27..144e6d8 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -60,6 +60,8 @@ static LIST_HEAD(wakeup_sources);
 
 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
 
+DEFINE_STATIC_SRCU(wakeup_srcu);
+
 static struct wakeup_source deleted_ws = {
 	.name = "deleted",
 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
@@ -198,7 +200,7 @@ void wakeup_source_remove(struct wakeup_source *ws)
 	spin_lock_irqsave(&events_lock, flags);
 	list_del_rcu(&ws->entry);
 	spin_unlock_irqrestore(&events_lock, flags);
-	synchronize_rcu();
+	synchronize_srcu(&wakeup_srcu);
 }
 EXPORT_SYMBOL_GPL(wakeup_source_remove);
 
@@ -332,12 +334,12 @@ void device_wakeup_detach_irq(struct device *dev)
 void device_wakeup_arm_wake_irqs(void)
 {
 	struct wakeup_source *ws;
+	int srcuidx;
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
 		dev_pm_arm_wake_irq(ws->wakeirq);
-
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 
 /**
@@ -348,12 +350,12 @@ void device_wakeup_arm_wake_irqs(void)
 void device_wakeup_disarm_wake_irqs(void)
 {
 	struct wakeup_source *ws;
+	int srcuidx;
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
 		dev_pm_disarm_wake_irq(ws->wakeirq);
-
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 
 /**
@@ -804,10 +806,10 @@ EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
 void pm_print_active_wakeup_sources(void)
 {
 	struct wakeup_source *ws;
-	int active = 0;
+	int srcuidx, active = 0;
 	struct wakeup_source *last_activity_ws = NULL;
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
 		if (ws->active) {
 			pr_debug("active wakeup source: %s\n", ws->name);
@@ -823,7 +825,7 @@ void pm_print_active_wakeup_sources(void)
 	if (!active && last_activity_ws)
 		pr_debug("last active wakeup source: %s\n",
 			last_activity_ws->name);
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
 
@@ -956,8 +958,9 @@ void pm_wakep_autosleep_enabled(bool set)
 {
 	struct wakeup_source *ws;
 	ktime_t now = ktime_get();
+	int srcuidx;
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
 		spin_lock_irq(&ws->lock);
 		if (ws->autosleep_enabled != set) {
@@ -971,7 +974,7 @@ void pm_wakep_autosleep_enabled(bool set)
 		}
 		spin_unlock_irq(&ws->lock);
 	}
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 #endif /* CONFIG_PM_AUTOSLEEP */
 
@@ -1032,15 +1035,16 @@ static int print_wakeup_source_stats(struct seq_file *m,
 static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
 {
 	struct wakeup_source *ws;
+	int srcuidx;
 
 	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
 		"expire_count\tactive_since\ttotal_time\tmax_time\t"
 		"last_change\tprevent_suspend_time\n");
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
 		print_wakeup_source_stats(m, ws);
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 
 	print_wakeup_source_stats(m, &deleted_ws);
 
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index db9d00c3..073c0b7 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,10 +3,13 @@
 # subsystems should select the appropriate symbols.
 
 config REGMAP
-	default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
+	default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
+	select IRQ_DOMAIN if REGMAP_IRQ
+	bool
+
+config REGCACHE_COMPRESSED
 	select LZO_COMPRESS
 	select LZO_DECOMPRESS
-	select IRQ_DOMAIN if REGMAP_IRQ
 	bool
 
 config REGMAP_AC97
@@ -24,6 +27,10 @@
 	tristate
 	depends on SPMI
 
+config REGMAP_W1
+	tristate
+	depends on W1
+
 config REGMAP_MMIO
 	tristate
 
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 609e4c8..0cf4abc 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -2,7 +2,8 @@
 CFLAGS_regmap.o := -I$(src)
 
 obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o
+obj-$(CONFIG_REGCACHE_COMPRESSED) += regcache-lzo.o
 obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
 obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
 obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
@@ -10,3 +11,4 @@
 obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
 obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
 obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
+obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index b0a0dcf..7735603 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -21,7 +21,9 @@
 
 static const struct regcache_ops *cache_types[] = {
 	&regcache_rbtree_ops,
+#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
 	&regcache_lzo_ops,
+#endif
 	&regcache_flat_ops,
 };
 
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index cd54189..429ca8e 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -60,6 +60,16 @@ static void regmap_irq_lock(struct irq_data *data)
 	mutex_lock(&d->lock);
 }
 
+static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
+				  unsigned int reg, unsigned int mask,
+				  unsigned int val)
+{
+	if (d->chip->mask_writeonly)
+		return regmap_write_bits(d->map, reg, mask, val);
+	else
+		return regmap_update_bits(d->map, reg, mask, val);
+}
+
 static void regmap_irq_sync_unlock(struct irq_data *data)
 {
 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
@@ -84,11 +94,11 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
 		reg = d->chip->mask_base +
 			(i * map->reg_stride * d->irq_reg_stride);
 		if (d->chip->mask_invert) {
-			ret = regmap_update_bits(d->map, reg,
+			ret = regmap_irq_update_bits(d, reg,
 					 d->mask_buf_def[i], ~d->mask_buf[i]);
 		} else if (d->chip->unmask_base) {
 			/* set mask with mask_base register */
-			ret = regmap_update_bits(d->map, reg,
+			ret = regmap_irq_update_bits(d, reg,
 					d->mask_buf_def[i], ~d->mask_buf[i]);
 			if (ret < 0)
 				dev_err(d->map->dev,
@@ -97,12 +107,12 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
 			unmask_offset = d->chip->unmask_base -
 							d->chip->mask_base;
 			/* clear mask with unmask_base register */
-			ret = regmap_update_bits(d->map,
+			ret = regmap_irq_update_bits(d,
 					reg + unmask_offset,
 					d->mask_buf_def[i],
 					d->mask_buf[i]);
 		} else {
-			ret = regmap_update_bits(d->map, reg,
+			ret = regmap_irq_update_bits(d, reg,
 					 d->mask_buf_def[i], d->mask_buf[i]);
 		}
 		if (ret != 0)
@@ -113,11 +123,11 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
 			(i * map->reg_stride * d->irq_reg_stride);
 		if (d->wake_buf) {
 			if (d->chip->wake_invert)
-				ret = regmap_update_bits(d->map, reg,
+				ret = regmap_irq_update_bits(d, reg,
 							 d->mask_buf_def[i],
 							 ~d->wake_buf[i]);
 			else
-				ret = regmap_update_bits(d->map, reg,
+				ret = regmap_irq_update_bits(d, reg,
 							 d->mask_buf_def[i],
 							 d->wake_buf[i]);
 			if (ret != 0)
@@ -153,10 +163,10 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
 		reg = d->chip->type_base +
 			(i * map->reg_stride * d->type_reg_stride);
 		if (d->chip->type_invert)
-			ret = regmap_update_bits(d->map, reg,
+			ret = regmap_irq_update_bits(d, reg,
 				d->type_buf_def[i], ~d->type_buf[i]);
 		else
-			ret = regmap_update_bits(d->map, reg,
+			ret = regmap_irq_update_bits(d, reg,
 				d->type_buf_def[i], d->type_buf[i]);
 		if (ret != 0)
 			dev_err(d->map->dev, "Failed to sync type in %x\n",
@@ -394,7 +404,7 @@ static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
 
 static const struct irq_domain_ops regmap_domain_ops = {
 	.map	= regmap_irq_map,
-	.xlate	= irq_domain_xlate_twocell,
+	.xlate	= irq_domain_xlate_onetwocell,
 };
 
 /**
@@ -519,17 +529,17 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
 		reg = chip->mask_base +
 			(i * map->reg_stride * d->irq_reg_stride);
 		if (chip->mask_invert)
-			ret = regmap_update_bits(map, reg,
+			ret = regmap_irq_update_bits(d, reg,
 					 d->mask_buf[i], ~d->mask_buf[i]);
 		else if (d->chip->unmask_base) {
 			unmask_offset = d->chip->unmask_base -
 					d->chip->mask_base;
-			ret = regmap_update_bits(d->map,
+			ret = regmap_irq_update_bits(d,
 					reg + unmask_offset,
 					d->mask_buf[i],
 					d->mask_buf[i]);
 		} else
-			ret = regmap_update_bits(map, reg,
+			ret = regmap_irq_update_bits(d, reg,
 					 d->mask_buf[i], d->mask_buf[i]);
 		if (ret != 0) {
 			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
@@ -575,11 +585,11 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
 				(i * map->reg_stride * d->irq_reg_stride);
 
 			if (chip->wake_invert)
-				ret = regmap_update_bits(map, reg,
+				ret = regmap_irq_update_bits(d, reg,
 							 d->mask_buf_def[i],
 							 0);
 			else
-				ret = regmap_update_bits(map, reg,
+				ret = regmap_irq_update_bits(d, reg,
 							 d->mask_buf_def[i],
 							 d->wake_buf[i]);
 			if (ret != 0) {
@@ -603,10 +613,10 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
 			reg = chip->type_base +
 				(i * map->reg_stride * d->type_reg_stride);
 			if (chip->type_invert)
-				ret = regmap_update_bits(map, reg,
+				ret = regmap_irq_update_bits(d, reg,
 					d->type_buf_def[i], 0xFF);
 			else
-				ret = regmap_update_bits(map, reg,
+				ret = regmap_irq_update_bits(d, reg,
 					d->type_buf_def[i], 0x0);
 			if (ret != 0) {
 				dev_err(map->dev,
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
new file mode 100644
index 0000000..5f04e7b
--- /dev/null
+++ b/drivers/base/regmap/regmap-w1.c
@@ -0,0 +1,245 @@
+/*
+ * Register map access API - W1 (1-Wire) support
+ *
+ * Copyright (C) 2017 OAO Radioavionica
+ * Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include "../../w1/w1.h"
+
+#include "internal.h"
+
+#define W1_CMD_READ_DATA	0x69
+#define W1_CMD_WRITE_DATA	0x6C
+
+/*
+ * 1-Wire slaves registers with addess 8 bit and data 8 bit
+ */
+
+static int w1_reg_a8_v8_read(void *context, unsigned int reg, unsigned int *val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 255)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_READ_DATA);
+		w1_write_8(sl->master, reg);
+		*val = w1_read_8(sl->master);
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+static int w1_reg_a8_v8_write(void *context, unsigned int reg, unsigned int val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 255)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_WRITE_DATA);
+		w1_write_8(sl->master, reg);
+		w1_write_8(sl->master, val);
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+/*
+ * 1-Wire slaves registers with addess 8 bit and data 16 bit
+ */
+
+static int w1_reg_a8_v16_read(void *context, unsigned int reg,
+				unsigned int *val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 255)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_READ_DATA);
+		w1_write_8(sl->master, reg);
+		*val = w1_read_8(sl->master);
+		*val |= w1_read_8(sl->master)<<8;
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+static int w1_reg_a8_v16_write(void *context, unsigned int reg,
+				unsigned int val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 255)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_WRITE_DATA);
+		w1_write_8(sl->master, reg);
+		w1_write_8(sl->master, val & 0x00FF);
+		w1_write_8(sl->master, val>>8 & 0x00FF);
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+/*
+ * 1-Wire slaves registers with addess 16 bit and data 16 bit
+ */
+
+static int w1_reg_a16_v16_read(void *context, unsigned int reg,
+				unsigned int *val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 65535)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_READ_DATA);
+		w1_write_8(sl->master, reg & 0x00FF);
+		w1_write_8(sl->master, reg>>8 & 0x00FF);
+		*val = w1_read_8(sl->master);
+		*val |= w1_read_8(sl->master)<<8;
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+static int w1_reg_a16_v16_write(void *context, unsigned int reg,
+				unsigned int val)
+{
+	struct device *dev = context;
+	struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+	int ret = 0;
+
+	if (reg > 65535)
+		return -EINVAL;
+
+	mutex_lock(&sl->master->bus_mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_8(sl->master, W1_CMD_WRITE_DATA);
+		w1_write_8(sl->master, reg & 0x00FF);
+		w1_write_8(sl->master, reg>>8 & 0x00FF);
+		w1_write_8(sl->master, val & 0x00FF);
+		w1_write_8(sl->master, val>>8 & 0x00FF);
+	} else {
+		ret = -ENODEV;
+	}
+	mutex_unlock(&sl->master->bus_mutex);
+
+	return ret;
+}
+
+/*
+ * Various types of supported bus addressing
+ */
+
+static struct regmap_bus regmap_w1_bus_a8_v8 = {
+	.reg_read = w1_reg_a8_v8_read,
+	.reg_write = w1_reg_a8_v8_write,
+};
+
+static struct regmap_bus regmap_w1_bus_a8_v16 = {
+	.reg_read = w1_reg_a8_v16_read,
+	.reg_write = w1_reg_a8_v16_write,
+};
+
+static struct regmap_bus regmap_w1_bus_a16_v16 = {
+	.reg_read = w1_reg_a16_v16_read,
+	.reg_write = w1_reg_a16_v16_write,
+};
+
+static const struct regmap_bus *regmap_get_w1_bus(struct device *w1_dev,
+					const struct regmap_config *config)
+{
+	if (config->reg_bits == 8 && config->val_bits == 8)
+		return &regmap_w1_bus_a8_v8;
+
+	if (config->reg_bits == 8 && config->val_bits == 16)
+		return &regmap_w1_bus_a8_v16;
+
+	if (config->reg_bits == 16 && config->val_bits == 16)
+		return &regmap_w1_bus_a16_v16;
+
+	return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_w1(struct device *w1_dev,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
+{
+
+	const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __regmap_init(w1_dev, bus, w1_dev, config,
+			 lock_key, lock_name);
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(__regmap_init_w1);
+
+struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name)
+{
+
+	const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config);
+
+	if (IS_ERR(bus))
+		return ERR_CAST(bus);
+
+	return __devm_regmap_init(w1_dev, bus, w1_dev, config,
+				 lock_key, lock_name);
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_w1);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 26a51be..245a879 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3464,7 +3464,7 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
 						 bool SuccessfulIO)
 {
 	struct request *Request = Command->Request;
-	int Error = SuccessfulIO ? 0 : -EIO;
+	blk_status_t Error = SuccessfulIO ? BLK_STS_OK : BLK_STS_IOERR;
 
 	pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
 		Command->SegmentCount, Command->DmaDirection);
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index a328f67..49908c7 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1378,7 +1378,7 @@ static void redo_fd_request(void)
 	struct amiga_floppy_struct *floppy;
 	char *data;
 	unsigned long flags;
-	int err;
+	blk_status_t err;
 
 next_req:
 	rq = set_next_request();
@@ -1392,7 +1392,7 @@ static void redo_fd_request(void)
 
 next_segment:
 	/* Here someone could investigate to be more efficient */
-	for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
+	for (cnt = 0, err = BLK_STS_OK; cnt < blk_rq_cur_sectors(rq); cnt++) {
 #ifdef DEBUG
 		printk("fd: sector %ld + %d requested for %s\n",
 		       blk_rq_pos(rq), cnt,
@@ -1400,7 +1400,7 @@ static void redo_fd_request(void)
 #endif
 		block = blk_rq_pos(rq) + cnt;
 		if ((int)block > floppy->blocks) {
-			err = -EIO;
+			err = BLK_STS_IOERR;
 			break;
 		}
 
@@ -1413,7 +1413,7 @@ static void redo_fd_request(void)
 #endif
 
 		if (get_track(drive, track) == -1) {
-			err = -EIO;
+			err = BLK_STS_IOERR;
 			break;
 		}
 
@@ -1424,7 +1424,7 @@ static void redo_fd_request(void)
 
 			/* keep the drive spinning while writes are scheduled */
 			if (!fd_motor_on(drive)) {
-				err = -EIO;
+				err = BLK_STS_IOERR;
 				break;
 			}
 			/*
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 027b876..6797e6c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -388,6 +388,7 @@ aoeblk_gdalloc(void *vp)
 			d->aoemajor, d->aoeminor);
 		goto err_mempool;
 	}
+	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 
 	spin_lock_irqsave(&d->lock, flags);
 	WARN_ON(!(d->flags & DEVFL_GD_NOW));
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 3c606c0..dc43254 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1070,8 +1070,8 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
 		d->ip.rq = NULL;
 	do {
 		bio = rq->bio;
-		bok = !fastfail && !bio->bi_error;
-	} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
+		bok = !fastfail && !bio->bi_status;
+	} while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
 
 	/* cf. http://lkml.org/lkml/2006/10/31/28 */
 	if (!fastfail)
@@ -1131,7 +1131,7 @@ ktiocomplete(struct frame *f)
 			ahout->cmdstat, ahin->cmdstat,
 			d->aoemajor, d->aoeminor);
 noskb:		if (buf)
-			buf->bio->bi_error = -EIO;
+			buf->bio->bi_status = BLK_STS_IOERR;
 		goto out;
 	}
 
@@ -1144,7 +1144,7 @@ noskb:		if (buf)
 				"aoe: runt data size in read from",
 				(long) d->aoemajor, d->aoeminor,
 			       skb->len, n);
-			buf->bio->bi_error = -EIO;
+			buf->bio->bi_status = BLK_STS_IOERR;
 			break;
 		}
 		if (n > f->iter.bi_size) {
@@ -1152,7 +1152,7 @@ noskb:		if (buf)
 				"aoe: too-large data size in read from",
 				(long) d->aoemajor, d->aoeminor,
 				n, f->iter.bi_size);
-			buf->bio->bi_error = -EIO;
+			buf->bio->bi_status = BLK_STS_IOERR;
 			break;
 		}
 		bvcpy(skb, f->buf->bio, f->iter, n);
@@ -1654,7 +1654,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
 	if (buf == NULL)
 		return;
 	buf->iter.bi_size = 0;
-	buf->bio->bi_error = -EIO;
+	buf->bio->bi_status = BLK_STS_IOERR;
 	if (buf->nframesout == 0)
 		aoe_end_buf(d, buf);
 }
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index ffd1947..b28fefb 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
 	if (rq == NULL)
 		return;
 	while ((bio = d->ip.nxbio)) {
-		bio->bi_error = -EIO;
+		bio->bi_status = BLK_STS_IOERR;
 		d->ip.nxbio = bio->bi_next;
 		n = (unsigned long) rq->special;
 		rq->special = (void *) --n;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index fa69ecd..92da886 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -378,7 +378,7 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
 static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
 static DEFINE_TIMER(fd_timer, check_change, 0, 0);
 	
-static void fd_end_request_cur(int err)
+static void fd_end_request_cur(blk_status_t err)
 {
 	if (!__blk_end_request_cur(fd_request, err))
 		fd_request = NULL;
@@ -620,7 +620,7 @@ static void fd_error( void )
 	fd_request->error_count++;
 	if (fd_request->error_count >= MAX_ERRORS) {
 		printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
-		fd_end_request_cur(-EIO);
+		fd_end_request_cur(BLK_STS_IOERR);
 	}
 	else if (fd_request->error_count == RECALIBRATE_ERRORS) {
 		printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
@@ -739,7 +739,7 @@ static void do_fd_action( int drive )
 		    }
 		    else {
 			/* all sectors finished */
-			fd_end_request_cur(0);
+			fd_end_request_cur(BLK_STS_OK);
 			redo_fd_request();
 			return;
 		    }
@@ -1144,7 +1144,7 @@ static void fd_rwsec_done1(int status)
 	}
 	else {
 		/* all sectors finished */
-		fd_end_request_cur(0);
+		fd_end_request_cur(BLK_STS_OK);
 		redo_fd_request();
 	}
 	return;
@@ -1445,7 +1445,7 @@ static void redo_fd_request(void)
 	if (!UD.connected) {
 		/* drive not connected */
 		printk(KERN_ERR "Unknown Device: fd%d\n", drive );
-		fd_end_request_cur(-EIO);
+		fd_end_request_cur(BLK_STS_IOERR);
 		goto repeat;
 	}
 		
@@ -1461,12 +1461,12 @@ static void redo_fd_request(void)
 		/* user supplied disk type */
 		if (--type >= NUM_DISK_MINORS) {
 			printk(KERN_WARNING "fd%d: invalid disk format", drive );
-			fd_end_request_cur(-EIO);
+			fd_end_request_cur(BLK_STS_IOERR);
 			goto repeat;
 		}
 		if (minor2disktype[type].drive_types > DriveType)  {
 			printk(KERN_WARNING "fd%d: unsupported disk format", drive );
-			fd_end_request_cur(-EIO);
+			fd_end_request_cur(BLK_STS_IOERR);
 			goto repeat;
 		}
 		type = minor2disktype[type].index;
@@ -1476,7 +1476,7 @@ static void redo_fd_request(void)
 	}
 	
 	if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
-		fd_end_request_cur(-EIO);
+		fd_end_request_cur(BLK_STS_IOERR);
 		goto repeat;
 	}
 
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 57b574f..6112e99 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -418,7 +418,6 @@ static struct brd_device *brd_alloc(int i)
 
 	blk_queue_make_request(brd->brd_queue, brd_make_request);
 	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
-	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
 
 	/* This is so fdisk will align partitions on 4k, because of
 	 * direct_access API needing 4k alignment, returning a PFN
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index cd37550..02a6119 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1864,7 +1864,8 @@ static void cciss_softirq_done(struct request *rq)
 	/* set the residual count for pc requests */
 	if (blk_rq_is_passthrough(rq))
 		scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
-	blk_end_request_all(rq, scsi_req(rq)->result ? -EIO : 0);
+	blk_end_request_all(rq, scsi_req(rq)->result ?
+			BLK_STS_IOERR : BLK_STS_OK);
 
 	spin_lock_irqsave(&h->lock, flags);
 	cmd_free(h, c);
@@ -1956,6 +1957,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
 	disk->queue->cmd_size = sizeof(struct scsi_request);
 	disk->queue->request_fn = do_cciss_request;
 	disk->queue->queue_lock = &h->lock;
+	queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, disk->queue);
 	if (blk_init_allocated_queue(disk->queue) < 0)
 		goto cleanup_queue;
 
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 8d7bcfa..e02c45c 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -178,7 +178,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
 	else
 		submit_bio(bio);
 	wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
-	if (!bio->bi_error)
+	if (!bio->bi_status)
 		err = device->md_io.error;
 
  out:
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index a804a41..809fd24 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -959,16 +959,16 @@ static void drbd_bm_endio(struct bio *bio)
 	    !bm_test_page_unchanged(b->bm_pages[idx]))
 		drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		/* ctx error will hold the completed-last non-zero error code,
 		 * in case error codes differ. */
-		ctx->error = bio->bi_error;
+		ctx->error = blk_status_to_errno(bio->bi_status);
 		bm_set_page_io_err(b->bm_pages[idx]);
 		/* Not identical to on disk version of it.
 		 * Is BM_PAGE_IO_ERROR enough? */
 		if (__ratelimit(&drbd_ratelimit_state))
 			drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
-					bio->bi_error, idx);
+					bio->bi_status, idx);
 	} else {
 		bm_clear_page_io_err(b->bm_pages[idx]);
 		dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d5da45b..d17b6e6 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1441,6 +1441,9 @@ extern struct bio_set *drbd_md_io_bio_set;
 /* to allocate from that set */
 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
 
+/* And a bio_set for cloning */
+extern struct bio_set *drbd_io_bio_set;
+
 extern struct mutex resources_mutex;
 
 extern int conn_lowest_minor(struct drbd_connection *connection);
@@ -1627,7 +1630,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
 	__release(local);
 	if (!bio->bi_bdev) {
 		drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
-		bio->bi_error = -ENODEV;
+		bio->bi_status = BLK_STS_IOERR;
 		bio_endio(bio);
 		return;
 	}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 84455c3..5fb99e0 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -128,6 +128,7 @@ mempool_t *drbd_request_mempool;
 mempool_t *drbd_ee_mempool;
 mempool_t *drbd_md_io_page_pool;
 struct bio_set *drbd_md_io_bio_set;
+struct bio_set *drbd_io_bio_set;
 
 /* I do not use a standard mempool, because:
    1) I want to hand out the pre-allocated objects first.
@@ -2098,6 +2099,8 @@ static void drbd_destroy_mempools(void)
 
 	/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
 
+	if (drbd_io_bio_set)
+		bioset_free(drbd_io_bio_set);
 	if (drbd_md_io_bio_set)
 		bioset_free(drbd_md_io_bio_set);
 	if (drbd_md_io_page_pool)
@@ -2115,6 +2118,7 @@ static void drbd_destroy_mempools(void)
 	if (drbd_al_ext_cache)
 		kmem_cache_destroy(drbd_al_ext_cache);
 
+	drbd_io_bio_set      = NULL;
 	drbd_md_io_bio_set   = NULL;
 	drbd_md_io_page_pool = NULL;
 	drbd_ee_mempool      = NULL;
@@ -2142,6 +2146,7 @@ static int drbd_create_mempools(void)
 	drbd_pp_pool         = NULL;
 	drbd_md_io_page_pool = NULL;
 	drbd_md_io_bio_set   = NULL;
+	drbd_io_bio_set      = NULL;
 
 	/* caches */
 	drbd_request_cache = kmem_cache_create(
@@ -2165,7 +2170,13 @@ static int drbd_create_mempools(void)
 		goto Enomem;
 
 	/* mempools */
-	drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
+	drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_RESCUER);
+	if (drbd_io_bio_set == NULL)
+		goto Enomem;
+
+	drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0,
+					   BIOSET_NEED_BVECS |
+					   BIOSET_NEED_RESCUER);
 	if (drbd_md_io_bio_set == NULL)
 		goto Enomem;
 
@@ -2839,7 +2850,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
 	/* Setting the max_hw_sectors to an odd value of 8kibyte here
 	   This triggers a max_bio_size message upon first attach or connect */
 	blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
-	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
 	q->queue_lock = &resource->req_lock;
 
 	device->md_io.page = alloc_page(GFP_KERNEL);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 02255a0..ad0fcb4 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2294,7 +2294,7 @@ _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_
 static enum drbd_ret_code
 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
 {
-	static enum drbd_ret_code rv;
+	enum drbd_ret_code rv;
 	struct drbd_peer_device *peer_device;
 	int i;
 
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1b0a2be..c7e95e6 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1229,9 +1229,9 @@ void one_flush_endio(struct bio *bio)
 	struct drbd_device *device = octx->device;
 	struct issue_flush_context *ctx = octx->ctx;
 
-	if (bio->bi_error) {
-		ctx->error = bio->bi_error;
-		drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error);
+	if (bio->bi_status) {
+		ctx->error = blk_status_to_errno(bio->bi_status);
+		drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
 	}
 	kfree(octx);
 	bio_put(bio);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 6566243..f6e865b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -203,7 +203,7 @@ void start_new_tl_epoch(struct drbd_connection *connection)
 void complete_master_bio(struct drbd_device *device,
 		struct bio_and_error *m)
 {
-	m->bio->bi_error = m->error;
+	m->bio->bi_status = errno_to_blk_status(m->error);
 	bio_endio(m->bio);
 	dec_ap_bio(device);
 }
@@ -1157,7 +1157,7 @@ static void drbd_process_discard_req(struct drbd_request *req)
 
 	if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9,
 			GFP_NOIO, 0))
-		req->private_bio->bi_error = -EIO;
+		req->private_bio->bi_status = BLK_STS_IOERR;
 	bio_endio(req->private_bio);
 }
 
@@ -1225,7 +1225,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
 		/* only pass the error to the upper layers.
 		 * if user cannot handle io errors, that's not our business. */
 		drbd_err(device, "could not kmalloc() req\n");
-		bio->bi_error = -ENOMEM;
+		bio->bi_status = BLK_STS_RESOURCE;
 		bio_endio(bio);
 		return ERR_PTR(-ENOMEM);
 	}
@@ -1560,7 +1560,7 @@ blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
 	struct drbd_device *device = (struct drbd_device *) q->queuedata;
 	unsigned long start_jif;
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	start_jif = jiffies;
 
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index eb49e7f..9e1866a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -263,7 +263,7 @@ enum drbd_req_state_bits {
 static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
 {
 	struct bio *bio;
-	bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
+	bio = bio_clone_fast(bio_src, GFP_NOIO, drbd_io_bio_set);
 
 	req->private_bio = bio;
 
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1afcb4e..1d8726a 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -63,7 +63,7 @@ void drbd_md_endio(struct bio *bio)
 	struct drbd_device *device;
 
 	device = bio->bi_private;
-	device->md_io.error = bio->bi_error;
+	device->md_io.error = blk_status_to_errno(bio->bi_status);
 
 	/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
 	 * to timeout on the lower level device, and eventually detach from it.
@@ -177,13 +177,13 @@ void drbd_peer_request_endio(struct bio *bio)
 	bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
 			  bio_op(bio) == REQ_OP_DISCARD;
 
-	if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
+	if (bio->bi_status && __ratelimit(&drbd_ratelimit_state))
 		drbd_warn(device, "%s: error=%d s=%llus\n",
 				is_write ? (is_discard ? "discard" : "write")
-					: "read", bio->bi_error,
+					: "read", bio->bi_status,
 				(unsigned long long)peer_req->i.sector);
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		set_bit(__EE_WAS_ERROR, &peer_req->flags);
 
 	bio_put(bio); /* no need for the bio anymore */
@@ -243,16 +243,16 @@ void drbd_request_endio(struct bio *bio)
 		if (__ratelimit(&drbd_ratelimit_state))
 			drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
 
-		if (!bio->bi_error)
+		if (!bio->bi_status)
 			drbd_panic_after_delayed_completion_of_aborted_request(device);
 	}
 
 	/* to avoid recursion in __req_mod */
-	if (unlikely(bio->bi_error)) {
+	if (unlikely(bio->bi_status)) {
 		switch (bio_op(bio)) {
 		case REQ_OP_WRITE_ZEROES:
 		case REQ_OP_DISCARD:
-			if (bio->bi_error == -EOPNOTSUPP)
+			if (bio->bi_status == BLK_STS_NOTSUPP)
 				what = DISCARD_COMPLETED_NOTSUPP;
 			else
 				what = DISCARD_COMPLETED_WITH_ERROR;
@@ -272,7 +272,7 @@ void drbd_request_endio(struct bio *bio)
 	}
 
 	bio_put(req->private_bio);
-	req->private_bio = ERR_PTR(bio->bi_error);
+	req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
 
 	/* not req_mod(), we need irqsave here! */
 	spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 60d4c76..ce82364 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2202,7 +2202,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
  * =============================
  */
 
-static void floppy_end_request(struct request *req, int error)
+static void floppy_end_request(struct request *req, blk_status_t error)
 {
 	unsigned int nr_sectors = current_count_sectors;
 	unsigned int drive = (unsigned long)req->rq_disk->private_data;
@@ -2263,7 +2263,7 @@ static void request_done(int uptodate)
 			DRWE->last_error_generation = DRS->generation;
 		}
 		spin_lock_irqsave(q->queue_lock, flags);
-		floppy_end_request(req, -EIO);
+		floppy_end_request(req, BLK_STS_IOERR);
 		spin_unlock_irqrestore(q->queue_lock, flags);
 	}
 }
@@ -3780,9 +3780,9 @@ static void floppy_rb0_cb(struct bio *bio)
 	struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
 	int drive = cbdata->drive;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		pr_info("floppy: error %d while reading block 0\n",
-			bio->bi_error);
+			bio->bi_status);
 		set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
 	}
 	complete(&cbdata->complete);
@@ -4203,6 +4203,7 @@ static int __init do_floppy_init(void)
 			goto out_put_disk;
 		}
 
+		blk_queue_bounce_limit(disks[drive]->queue, BLK_BOUNCE_HIGH);
 		blk_queue_max_hw_sectors(disks[drive]->queue, 64);
 		disks[drive]->major = FLOPPY_MAJOR;
 		disks[drive]->first_minor = TOMINOR(drive);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28d9329..0de1144 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -221,7 +221,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
 }
 
 static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
+		 loff_t logical_blocksize)
 {
 	loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
 	sector_t x = (sector_t)size;
@@ -233,6 +234,12 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
 		lo->lo_offset = offset;
 	if (lo->lo_sizelimit != sizelimit)
 		lo->lo_sizelimit = sizelimit;
+	if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
+		lo->lo_logical_blocksize = logical_blocksize;
+		blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
+		blk_queue_logical_block_size(lo->lo_queue,
+					     lo->lo_logical_blocksize);
+	}
 	set_capacity(lo->lo_disk, x);
 	bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
 	/* let user-space know about the new size */
@@ -457,7 +464,7 @@ static void lo_complete_rq(struct request *rq)
 		zero_fill_bio(bio);
 	}
 
-	blk_mq_end_request(rq, cmd->ret < 0 ? -EIO : 0);
+	blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
 }
 
 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -608,6 +615,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
  */
 static int loop_flush(struct loop_device *lo)
 {
+	/* loop not yet configured, no running thread, nothing to flush */
+	if (lo->lo_state != Lo_bound)
+		return 0;
 	return loop_switch(lo, NULL);
 }
 
@@ -810,6 +820,7 @@ static void loop_config_discard(struct loop_device *lo)
 	struct file *file = lo->lo_backing_file;
 	struct inode *inode = file->f_mapping->host;
 	struct request_queue *q = lo->lo_queue;
+	int lo_bits = 9;
 
 	/*
 	 * We use punch hole to reclaim the free space used by the
@@ -829,8 +840,11 @@ static void loop_config_discard(struct loop_device *lo)
 
 	q->limits.discard_granularity = inode->i_sb->s_blocksize;
 	q->limits.discard_alignment = 0;
-	blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
-	blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+	if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
+		lo_bits = blksize_bits(lo->lo_logical_blocksize);
+
+	blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
+	blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 }
 
@@ -840,10 +854,16 @@ static void loop_unprepare_queue(struct loop_device *lo)
 	kthread_stop(lo->worker_task);
 }
 
+static int loop_kthread_worker_fn(void *worker_ptr)
+{
+	current->flags |= PF_LESS_THROTTLE;
+	return kthread_worker_fn(worker_ptr);
+}
+
 static int loop_prepare_queue(struct loop_device *lo)
 {
 	kthread_init_worker(&lo->worker);
-	lo->worker_task = kthread_run(kthread_worker_fn,
+	lo->worker_task = kthread_run(loop_kthread_worker_fn,
 			&lo->worker, "loop%d", lo->lo_number);
 	if (IS_ERR(lo->worker_task))
 		return -ENOMEM;
@@ -918,6 +938,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 
 	lo->use_dio = false;
 	lo->lo_blocksize = lo_blocksize;
+	lo->lo_logical_blocksize = 512;
 	lo->lo_device = bdev;
 	lo->lo_flags = lo_flags;
 	lo->lo_backing_file = file;
@@ -1083,6 +1104,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
 	int err;
 	struct loop_func_table *xfer;
 	kuid_t uid = current_uid();
+	int lo_flags = lo->lo_flags;
 
 	if (lo->lo_encrypt_key_size &&
 	    !uid_eq(lo->lo_key_owner, uid) &&
@@ -1115,12 +1137,30 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
 	if (err)
 		goto exit;
 
+	if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
+		if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
+			lo->lo_logical_blocksize = 512;
+		lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
+		if (LO_INFO_BLOCKSIZE(info) != 512 &&
+		    LO_INFO_BLOCKSIZE(info) != 1024 &&
+		    LO_INFO_BLOCKSIZE(info) != 2048 &&
+		    LO_INFO_BLOCKSIZE(info) != 4096)
+			return -EINVAL;
+		if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
+			return -EINVAL;
+	}
+
 	if (lo->lo_offset != info->lo_offset ||
-	    lo->lo_sizelimit != info->lo_sizelimit)
-		if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
+	    lo->lo_sizelimit != info->lo_sizelimit ||
+	    lo->lo_flags != lo_flags ||
+	    ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
+	     lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
+		if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
+				     LO_INFO_BLOCKSIZE(info))) {
 			err = -EFBIG;
 			goto exit;
 		}
+	}
 
 	loop_config_discard(lo);
 
@@ -1303,12 +1343,13 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
 	return err;
 }
 
-static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
+static int loop_set_capacity(struct loop_device *lo)
 {
 	if (unlikely(lo->lo_state != Lo_bound))
 		return -ENXIO;
 
-	return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
+	return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
+				lo->lo_logical_blocksize);
 }
 
 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
@@ -1366,7 +1407,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
 	case LOOP_SET_CAPACITY:
 		err = -EPERM;
 		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
-			err = loop_set_capacity(lo, bdev);
+			err = loop_set_capacity(lo);
 		break;
 	case LOOP_SET_DIRECT_IO:
 		err = -EPERM;
@@ -1642,7 +1683,7 @@ int loop_unregister_transfer(int number)
 EXPORT_SYMBOL(loop_register_transfer);
 EXPORT_SYMBOL(loop_unregister_transfer);
 
-static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 		const struct blk_mq_queue_data *bd)
 {
 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -1651,7 +1692,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	blk_mq_start_request(bd->rq);
 
 	if (lo->lo_state != Lo_bound)
-		return BLK_MQ_RQ_QUEUE_ERROR;
+		return BLK_STS_IOERR;
 
 	switch (req_op(cmd->rq)) {
 	case REQ_OP_FLUSH:
@@ -1666,7 +1707,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	kthread_queue_work(&lo->worker, &cmd->work);
 
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 }
 
 static void loop_handle_cmd(struct loop_cmd *cmd)
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index fecd3f9..2c096b9 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -49,6 +49,7 @@ struct loop_device {
 	struct file *	lo_backing_file;
 	struct block_device *lo_device;
 	unsigned	lo_blocksize;
+	unsigned	lo_logical_blocksize;
 	void		*key_data; 
 
 	gfp_t		old_gfp_mask;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3a779a4..61b046f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -532,7 +532,7 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
 						struct smart_attr *attrib);
 
-static void mtip_complete_command(struct mtip_cmd *cmd, int status)
+static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
 {
 	struct request *req = blk_mq_rq_from_pdu(cmd);
 
@@ -568,7 +568,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
 	if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
 		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
 		dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
-		mtip_complete_command(cmd, -EIO);
+		mtip_complete_command(cmd, BLK_STS_IOERR);
 		return;
 	}
 
@@ -667,7 +667,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
 					tag,
 					fail_reason != NULL ?
 						fail_reason : "unknown");
-					mtip_complete_command(cmd, -ENODATA);
+					mtip_complete_command(cmd, BLK_STS_MEDIUM);
 					continue;
 				}
 			}
@@ -690,7 +690,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
 			dev_warn(&port->dd->pdev->dev,
 				"retiring tag %d\n", tag);
 
-			mtip_complete_command(cmd, -EIO);
+			mtip_complete_command(cmd, BLK_STS_IOERR);
 		}
 	}
 	print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
@@ -1063,23 +1063,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
 	/* insert request and run queue */
 	blk_execute_rq(rq->q, NULL, rq, true);
 
-	rv = int_cmd->status;
-	if (rv < 0) {
-		if (rv == -ERESTARTSYS) { /* interrupted */
-			dev_err(&dd->pdev->dev,
-				"Internal command [%02X] was interrupted after %u ms\n",
-				fis->command,
-				jiffies_to_msecs(jiffies - start));
-			rv = -EINTR;
-			goto exec_ic_exit;
-		} else if (rv == 0) /* timeout */
-			dev_err(&dd->pdev->dev,
-				"Internal command did not complete [%02X] within timeout of  %lu ms\n",
-				fis->command, timeout);
-		else
-			dev_err(&dd->pdev->dev,
-				"Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
-				fis->command, rv, timeout);
+	if (int_cmd->status) {
+		dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
+				fis->command, int_cmd->status);
+		rv = -EIO;
 
 		if (mtip_check_surprise_removal(dd->pdev) ||
 			test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
@@ -2753,7 +2740,7 @@ static void mtip_abort_cmd(struct request *req, void *data,
 	dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
 
 	clear_bit(req->tag, dd->port->cmds_to_issue);
-	cmd->status = -EIO;
+	cmd->status = BLK_STS_IOERR;
 	mtip_softirq_done_fn(req);
 }
 
@@ -3597,7 +3584,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
 		int err;
 
 		err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
-		blk_mq_end_request(rq, err);
+		blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK);
 		return 0;
 	}
 
@@ -3633,8 +3620,8 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
 	return false;
 }
 
-static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
-				   struct request *rq)
+static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
+		struct request *rq)
 {
 	struct driver_data *dd = hctx->queue->queuedata;
 	struct mtip_int_cmd *icmd = rq->special;
@@ -3642,7 +3629,7 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
 	struct mtip_cmd_sg *command_sg;
 
 	if (mtip_commands_active(dd->port))
-		return BLK_MQ_RQ_QUEUE_BUSY;
+		return BLK_STS_RESOURCE;
 
 	/* Populate the SG list */
 	cmd->command_header->opts =
@@ -3666,10 +3653,10 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
 
 	blk_mq_start_request(rq);
 	mtip_issue_non_ncq_command(dd->port, rq->tag);
-	return BLK_MQ_RQ_QUEUE_OK;
+	return 0;
 }
 
-static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
 			 const struct blk_mq_queue_data *bd)
 {
 	struct request *rq = bd->rq;
@@ -3681,15 +3668,14 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
 		return mtip_issue_reserved_cmd(hctx, rq);
 
 	if (unlikely(mtip_check_unal_depth(hctx, rq)))
-		return BLK_MQ_RQ_QUEUE_BUSY;
+		return BLK_STS_RESOURCE;
 
 	blk_mq_start_request(rq);
 
 	ret = mtip_submit_request(hctx, rq);
 	if (likely(!ret))
-		return BLK_MQ_RQ_QUEUE_OK;
-
-	return BLK_MQ_RQ_QUEUE_ERROR;
+		return BLK_STS_OK;
+	return BLK_STS_IOERR;
 }
 
 static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
@@ -3730,7 +3716,7 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
 	if (reserved) {
 		struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
 
-		cmd->status = -ETIME;
+		cmd->status = BLK_STS_TIMEOUT;
 		return BLK_EH_HANDLED;
 	}
 
@@ -3961,7 +3947,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
 {
 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
 
-	cmd->status = -ENODEV;
+	cmd->status = BLK_STS_IOERR;
 	blk_mq_complete_request(rq);
 }
 
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 37b8e3e..e8286af 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -342,7 +342,7 @@ struct mtip_cmd {
 	int retries; /* The number of retries left for this command. */
 
 	int direction; /* Data transfer direction */
-	int status;
+	blk_status_t status;
 };
 
 /* Structure used to describe a port. */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9a7bb2c..977ec96 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -116,7 +116,7 @@ struct nbd_cmd {
 	int index;
 	int cookie;
 	struct completion send_complete;
-	int status;
+	blk_status_t status;
 };
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -286,7 +286,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
 	struct nbd_config *config;
 
 	if (!refcount_inc_not_zero(&nbd->config_refs)) {
-		cmd->status = -EIO;
+		cmd->status = BLK_STS_TIMEOUT;
 		return BLK_EH_HANDLED;
 	}
 
@@ -331,7 +331,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
 				    "Connection timed out\n");
 	}
 	set_bit(NBD_TIMEDOUT, &config->runtime_flags);
-	cmd->status = -EIO;
+	cmd->status = BLK_STS_IOERR;
 	sock_shutdown(nbd);
 	nbd_config_put(nbd);
 
@@ -400,6 +400,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 	unsigned long size = blk_rq_bytes(req);
 	struct bio *bio;
 	u32 type;
+	u32 nbd_cmd_flags = 0;
 	u32 tag = blk_mq_unique_tag(req);
 	int sent = nsock->sent, skip = 0;
 
@@ -429,6 +430,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 		return -EIO;
 	}
 
+	if (req->cmd_flags & REQ_FUA)
+		nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
+
 	/* We did a partial send previously, and we at least sent the whole
 	 * request struct, so just go and send the rest of the pages in the
 	 * request.
@@ -442,7 +446,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 	}
 	cmd->index = index;
 	cmd->cookie = nsock->cookie;
-	request.type = htonl(type);
+	request.type = htonl(type | nbd_cmd_flags);
 	if (type != NBD_CMD_FLUSH) {
 		request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
 		request.len = htonl(size);
@@ -465,7 +469,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 				nsock->pending = req;
 				nsock->sent = sent;
 			}
-			return BLK_MQ_RQ_QUEUE_BUSY;
+			return BLK_STS_RESOURCE;
 		}
 		dev_err_ratelimited(disk_to_dev(nbd->disk),
 			"Send control failed (result %d)\n", result);
@@ -506,7 +510,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 					 */
 					nsock->pending = req;
 					nsock->sent = sent;
-					return BLK_MQ_RQ_QUEUE_BUSY;
+					return BLK_STS_RESOURCE;
 				}
 				dev_err(disk_to_dev(nbd->disk),
 					"Send data failed (result %d)\n",
@@ -574,7 +578,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
 	if (ntohl(reply.error)) {
 		dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
 			ntohl(reply.error));
-		cmd->status = -EIO;
+		cmd->status = BLK_STS_IOERR;
 		return cmd;
 	}
 
@@ -599,7 +603,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
 				 */
 				if (nbd_disconnected(config) ||
 				    config->num_connections <= 1) {
-					cmd->status = -EIO;
+					cmd->status = BLK_STS_IOERR;
 					return cmd;
 				}
 				return ERR_PTR(-EIO);
@@ -651,7 +655,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
 	if (!blk_mq_request_started(req))
 		return;
 	cmd = blk_mq_rq_to_pdu(req);
-	cmd->status = -EIO;
+	cmd->status = BLK_STS_IOERR;
 	blk_mq_complete_request(req);
 }
 
@@ -740,7 +744,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
 		nbd_config_put(nbd);
 		return -EINVAL;
 	}
-	cmd->status = 0;
+	cmd->status = BLK_STS_OK;
 again:
 	nsock = config->socks[index];
 	mutex_lock(&nsock->tx_lock);
@@ -794,7 +798,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
 	return ret;
 }
 
-static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
 			const struct blk_mq_queue_data *bd)
 {
 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -818,13 +822,9 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
 	 * appropriate.
 	 */
 	ret = nbd_handle_cmd(cmd, hctx->queue_num);
-	if (ret < 0)
-		ret = BLK_MQ_RQ_QUEUE_ERROR;
-	if (!ret)
-		ret = BLK_MQ_RQ_QUEUE_OK;
 	complete(&cmd->send_complete);
 
-	return ret;
+	return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
 }
 
 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
@@ -910,6 +910,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
 			continue;
 		}
 		sk_set_memalloc(sock->sk);
+		sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
 		atomic_inc(&config->recv_threads);
 		refcount_inc(&nbd->config_refs);
 		old = nsock->sock;
@@ -937,14 +938,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
 	return -ENOSPC;
 }
 
-/* Reset all properties of an NBD device */
-static void nbd_reset(struct nbd_device *nbd)
-{
-	nbd->config = NULL;
-	nbd->tag_set.timeout = 0;
-	queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-}
-
 static void nbd_bdev_reset(struct block_device *bdev)
 {
 	if (bdev->bd_openers > 1)
@@ -965,8 +958,12 @@ static void nbd_parse_flags(struct nbd_device *nbd)
 		set_disk_ro(nbd->disk, false);
 	if (config->flags & NBD_FLAG_SEND_TRIM)
 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-	if (config->flags & NBD_FLAG_SEND_FLUSH)
-		blk_queue_write_cache(nbd->disk->queue, true, false);
+	if (config->flags & NBD_FLAG_SEND_FLUSH) {
+		if (config->flags & NBD_FLAG_SEND_FUA)
+			blk_queue_write_cache(nbd->disk->queue, true, true);
+		else
+			blk_queue_write_cache(nbd->disk->queue, true, false);
+	}
 	else
 		blk_queue_write_cache(nbd->disk->queue, false, false);
 }
@@ -1029,7 +1026,11 @@ static void nbd_config_put(struct nbd_device *nbd)
 			}
 			kfree(config->socks);
 		}
-		nbd_reset(nbd);
+		kfree(nbd->config);
+		nbd->config = NULL;
+
+		nbd->tag_set.timeout = 0;
+		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 
 		mutex_unlock(&nbd->config_lock);
 		nbd_put(nbd);
@@ -1075,6 +1076,7 @@ static int nbd_start_device(struct nbd_device *nbd)
 			return -ENOMEM;
 		}
 		sk_set_memalloc(config->socks[i]->sock->sk);
+		config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
 		atomic_inc(&config->recv_threads);
 		refcount_inc(&nbd->config_refs);
 		INIT_WORK(&args->work, recv_work);
@@ -1309,6 +1311,8 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
 		seq_puts(s, "NBD_FLAG_READ_ONLY\n");
 	if (flags & NBD_FLAG_SEND_FLUSH)
 		seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
+	if (flags & NBD_FLAG_SEND_FUA)
+		seq_puts(s, "NBD_FLAG_SEND_FUA\n");
 	if (flags & NBD_FLAG_SEND_TRIM)
 		seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
 
@@ -1483,7 +1487,6 @@ static int nbd_dev_add(int index)
 	disk->fops = &nbd_fops;
 	disk->private_data = nbd;
 	sprintf(disk->disk_name, "nbd%d", index);
-	nbd_reset(nbd);
 	add_disk(disk);
 	nbd_total_devices++;
 	return index;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index d946e1e..71f4422 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -35,7 +35,8 @@ struct nullb {
 	struct request_queue *q;
 	struct gendisk *disk;
 	struct nvm_dev *ndev;
-	struct blk_mq_tag_set tag_set;
+	struct blk_mq_tag_set *tag_set;
+	struct blk_mq_tag_set __tag_set;
 	struct hrtimer timer;
 	unsigned int queue_depth;
 	spinlock_t lock;
@@ -50,6 +51,7 @@ static struct mutex lock;
 static int null_major;
 static int nullb_indexes;
 static struct kmem_cache *ppa_cache;
+static struct blk_mq_tag_set tag_set;
 
 enum {
 	NULL_IRQ_NONE		= 0,
@@ -109,7 +111,7 @@ static int bs = 512;
 module_param(bs, int, S_IRUGO);
 MODULE_PARM_DESC(bs, "Block size (in bytes)");
 
-static int nr_devices = 2;
+static int nr_devices = 1;
 module_param(nr_devices, int, S_IRUGO);
 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
 
@@ -121,6 +123,10 @@ static bool blocking;
 module_param(blocking, bool, S_IRUGO);
 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
 
+static bool shared_tags;
+module_param(shared_tags, bool, S_IRUGO);
+MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
+
 static int irqmode = NULL_IRQ_SOFTIRQ;
 
 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -229,11 +235,11 @@ static void end_cmd(struct nullb_cmd *cmd)
 
 	switch (queue_mode)  {
 	case NULL_Q_MQ:
-		blk_mq_end_request(cmd->rq, 0);
+		blk_mq_end_request(cmd->rq, BLK_STS_OK);
 		return;
 	case NULL_Q_RQ:
 		INIT_LIST_HEAD(&cmd->rq->queuelist);
-		blk_end_request_all(cmd->rq, 0);
+		blk_end_request_all(cmd->rq, BLK_STS_OK);
 		break;
 	case NULL_Q_BIO:
 		bio_endio(cmd->bio);
@@ -356,7 +362,7 @@ static void null_request_fn(struct request_queue *q)
 	}
 }
 
-static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
 			 const struct blk_mq_queue_data *bd)
 {
 	struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -373,34 +379,11 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
 	blk_mq_start_request(bd->rq);
 
 	null_handle_cmd(cmd);
-	return BLK_MQ_RQ_QUEUE_OK;
-}
-
-static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
-{
-	BUG_ON(!nullb);
-	BUG_ON(!nq);
-
-	init_waitqueue_head(&nq->wait);
-	nq->queue_depth = nullb->queue_depth;
-}
-
-static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
-			  unsigned int index)
-{
-	struct nullb *nullb = data;
-	struct nullb_queue *nq = &nullb->queues[index];
-
-	hctx->driver_data = nq;
-	null_init_queue(nullb, nq);
-	nullb->nr_queues++;
-
-	return 0;
+	return BLK_STS_OK;
 }
 
 static const struct blk_mq_ops null_mq_ops = {
 	.queue_rq       = null_queue_rq,
-	.init_hctx	= null_init_hctx,
 	.complete	= null_softirq_done_fn,
 };
 
@@ -422,11 +405,12 @@ static void cleanup_queues(struct nullb *nullb)
 
 #ifdef CONFIG_NVM
 
-static void null_lnvm_end_io(struct request *rq, int error)
+static void null_lnvm_end_io(struct request *rq, blk_status_t status)
 {
 	struct nvm_rq *rqd = rq->end_io_data;
 
-	rqd->error = error;
+	/* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
+	rqd->error = status ? -EIO : 0;
 	nvm_end_io(rqd);
 
 	blk_put_request(rq);
@@ -591,8 +575,8 @@ static void null_del_dev(struct nullb *nullb)
 	else
 		del_gendisk(nullb->disk);
 	blk_cleanup_queue(nullb->q);
-	if (queue_mode == NULL_Q_MQ)
-		blk_mq_free_tag_set(&nullb->tag_set);
+	if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+		blk_mq_free_tag_set(nullb->tag_set);
 	if (!use_lightnvm)
 		put_disk(nullb->disk);
 	cleanup_queues(nullb);
@@ -614,6 +598,32 @@ static const struct block_device_operations null_fops = {
 	.release =	null_release,
 };
 
+static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+{
+	BUG_ON(!nullb);
+	BUG_ON(!nq);
+
+	init_waitqueue_head(&nq->wait);
+	nq->queue_depth = nullb->queue_depth;
+}
+
+static void null_init_queues(struct nullb *nullb)
+{
+	struct request_queue *q = nullb->q;
+	struct blk_mq_hw_ctx *hctx;
+	struct nullb_queue *nq;
+	int i;
+
+	queue_for_each_hw_ctx(q, hctx, i) {
+		if (!hctx->nr_ctx || !hctx->tags)
+			continue;
+		nq = &nullb->queues[i];
+		hctx->driver_data = nq;
+		null_init_queue(nullb, nq);
+		nullb->nr_queues++;
+	}
+}
+
 static int setup_commands(struct nullb_queue *nq)
 {
 	struct nullb_cmd *cmd;
@@ -694,6 +704,22 @@ static int null_gendisk_register(struct nullb *nullb)
 	return 0;
 }
 
+static int null_init_tag_set(struct blk_mq_tag_set *set)
+{
+	set->ops = &null_mq_ops;
+	set->nr_hw_queues = submit_queues;
+	set->queue_depth = hw_queue_depth;
+	set->numa_node = home_node;
+	set->cmd_size	= sizeof(struct nullb_cmd);
+	set->flags = BLK_MQ_F_SHOULD_MERGE;
+	set->driver_data = NULL;
+
+	if (blocking)
+		set->flags |= BLK_MQ_F_BLOCKING;
+
+	return blk_mq_alloc_tag_set(set);
+}
+
 static int null_add_dev(void)
 {
 	struct nullb *nullb;
@@ -715,26 +741,23 @@ static int null_add_dev(void)
 		goto out_free_nullb;
 
 	if (queue_mode == NULL_Q_MQ) {
-		nullb->tag_set.ops = &null_mq_ops;
-		nullb->tag_set.nr_hw_queues = submit_queues;
-		nullb->tag_set.queue_depth = hw_queue_depth;
-		nullb->tag_set.numa_node = home_node;
-		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
-		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
-		nullb->tag_set.driver_data = nullb;
+		if (shared_tags) {
+			nullb->tag_set = &tag_set;
+			rv = 0;
+		} else {
+			nullb->tag_set = &nullb->__tag_set;
+			rv = null_init_tag_set(nullb->tag_set);
+		}
 
-		if (blocking)
-			nullb->tag_set.flags |= BLK_MQ_F_BLOCKING;
-
-		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
 		if (rv)
 			goto out_cleanup_queues;
 
-		nullb->q = blk_mq_init_queue(&nullb->tag_set);
+		nullb->q = blk_mq_init_queue(nullb->tag_set);
 		if (IS_ERR(nullb->q)) {
 			rv = -ENOMEM;
 			goto out_cleanup_tags;
 		}
+		null_init_queues(nullb);
 	} else if (queue_mode == NULL_Q_BIO) {
 		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
 		if (!nullb->q) {
@@ -787,8 +810,8 @@ static int null_add_dev(void)
 out_cleanup_blk_queue:
 	blk_cleanup_queue(nullb->q);
 out_cleanup_tags:
-	if (queue_mode == NULL_Q_MQ)
-		blk_mq_free_tag_set(&nullb->tag_set);
+	if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+		blk_mq_free_tag_set(nullb->tag_set);
 out_cleanup_queues:
 	cleanup_queues(nullb);
 out_free_nullb:
@@ -821,6 +844,9 @@ static int __init null_init(void)
 		queue_mode = NULL_Q_MQ;
 	}
 
+	if (queue_mode == NULL_Q_MQ && shared_tags)
+		null_init_tag_set(&tag_set);
+
 	if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
 		if (submit_queues < nr_online_nodes) {
 			pr_warn("null_blk: submit_queues param is set to %u.",
@@ -881,6 +907,9 @@ static void __exit null_exit(void)
 	}
 	mutex_unlock(&lock);
 
+	if (queue_mode == NULL_Q_MQ && shared_tags)
+		blk_mq_free_tag_set(&tag_set);
+
 	kmem_cache_destroy(ppa_cache);
 }
 
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index b1267ef..7b8c636 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -305,6 +305,7 @@ static void pcd_init_units(void)
 			put_disk(disk);
 			continue;
 		}
+		blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
 		cd->disk = disk;
 		cd->pi = &cd->pia;
 		cd->present = 0;
@@ -783,7 +784,7 @@ static void pcd_request(void)
 			ps_set_intr(do_pcd_read, NULL, 0, nice);
 			return;
 		} else {
-			__blk_end_request_all(pcd_req, -EIO);
+			__blk_end_request_all(pcd_req, BLK_STS_IOERR);
 			pcd_req = NULL;
 		}
 	}
@@ -794,7 +795,7 @@ static void do_pcd_request(struct request_queue *q)
 	pcd_request();
 }
 
-static inline void next_request(int err)
+static inline void next_request(blk_status_t err)
 {
 	unsigned long saved_flags;
 
@@ -837,7 +838,7 @@ static void pcd_start(void)
 
 	if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
 		pcd_bufblk = -1;
-		next_request(-EIO);
+		next_request(BLK_STS_IOERR);
 		return;
 	}
 
@@ -871,7 +872,7 @@ static void do_pcd_read_drq(void)
 			return;
 		}
 		pcd_bufblk = -1;
-		next_request(-EIO);
+		next_request(BLK_STS_IOERR);
 		return;
 	}
 
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 7d2402f..27a44b9 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -438,7 +438,7 @@ static void run_fsm(void)
 				phase = NULL;
 				spin_lock_irqsave(&pd_lock, saved_flags);
 				if (!__blk_end_request_cur(pd_req,
-						res == Ok ? 0 : -EIO)) {
+						res == Ok ? 0 : BLK_STS_IOERR)) {
 					if (!set_next_request())
 						stop = 1;
 				}
@@ -863,6 +863,7 @@ static void pd_probe_drive(struct pd_unit *disk)
 		return;
 	}
 	blk_queue_max_hw_sectors(p->queue, cluster);
+	blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
 
 	if (disk->drive == -1) {
 		for (disk->drive = 0; disk->drive <= 1; disk->drive++)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index f24ca73..eef7a91 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -293,6 +293,7 @@ static void __init pf_init_units(void)
 			return;
 		}
 		blk_queue_max_segments(disk->queue, cluster);
+		blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
 		pf->disk = disk;
 		pf->pi = &pf->pia;
 		pf->media_status = PF_NM;
@@ -801,7 +802,7 @@ static int set_next_request(void)
 	return pf_req != NULL;
 }
 
-static void pf_end_request(int err)
+static void pf_end_request(blk_status_t err)
 {
 	if (pf_req && !__blk_end_request_cur(pf_req, err))
 		pf_req = NULL;
@@ -821,7 +822,7 @@ static void pf_request(void)
 	pf_count = blk_rq_cur_sectors(pf_req);
 
 	if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
-		pf_end_request(-EIO);
+		pf_end_request(BLK_STS_IOERR);
 		goto repeat;
 	}
 
@@ -836,7 +837,7 @@ static void pf_request(void)
 		pi_do_claimed(pf_current->pi, do_pf_write);
 	else {
 		pf_busy = 0;
-		pf_end_request(-EIO);
+		pf_end_request(BLK_STS_IOERR);
 		goto repeat;
 	}
 }
@@ -868,7 +869,7 @@ static int pf_next_buf(void)
 	return 0;
 }
 
-static inline void next_request(int err)
+static inline void next_request(blk_status_t err)
 {
 	unsigned long saved_flags;
 
@@ -896,7 +897,7 @@ static void do_pf_read_start(void)
 			pi_do_claimed(pf_current->pi, do_pf_read_start);
 			return;
 		}
-		next_request(-EIO);
+		next_request(BLK_STS_IOERR);
 		return;
 	}
 	pf_mask = STAT_DRQ;
@@ -915,7 +916,7 @@ static void do_pf_read_drq(void)
 				pi_do_claimed(pf_current->pi, do_pf_read_start);
 				return;
 			}
-			next_request(-EIO);
+			next_request(BLK_STS_IOERR);
 			return;
 		}
 		pi_read_block(pf_current->pi, pf_buf, 512);
@@ -942,7 +943,7 @@ static void do_pf_write_start(void)
 			pi_do_claimed(pf_current->pi, do_pf_write_start);
 			return;
 		}
-		next_request(-EIO);
+		next_request(BLK_STS_IOERR);
 		return;
 	}
 
@@ -955,7 +956,7 @@ static void do_pf_write_start(void)
 				pi_do_claimed(pf_current->pi, do_pf_write_start);
 				return;
 			}
-			next_request(-EIO);
+			next_request(BLK_STS_IOERR);
 			return;
 		}
 		pi_write_block(pf_current->pi, pf_buf, 512);
@@ -975,7 +976,7 @@ static void do_pf_write_done(void)
 			pi_do_claimed(pf_current->pi, do_pf_write_start);
 			return;
 		}
-		next_request(-EIO);
+		next_request(BLK_STS_IOERR);
 		return;
 	}
 	pi_disconnect(pf_current->pi);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 205b865..6b8b097 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -98,6 +98,7 @@ static int write_congestion_on  = PKT_WRITE_CONGESTION_ON;
 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
 static struct mutex ctl_mutex;	/* Serialize open/close/setup/teardown */
 static mempool_t *psd_pool;
+static struct bio_set *pkt_bio_set;
 
 static struct class	*class_pktcdvd = NULL;    /* /sys/class/pktcdvd */
 static struct dentry	*pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
@@ -348,9 +349,9 @@ static void class_pktcdvd_release(struct class *cls)
 {
 	kfree(cls);
 }
-static ssize_t class_pktcdvd_show_map(struct class *c,
-					struct class_attribute *attr,
-					char *data)
+
+static ssize_t device_map_show(struct class *c, struct class_attribute *attr,
+			       char *data)
 {
 	int n = 0;
 	int idx;
@@ -368,11 +369,10 @@ static ssize_t class_pktcdvd_show_map(struct class *c,
 	mutex_unlock(&ctl_mutex);
 	return n;
 }
+static CLASS_ATTR_RO(device_map);
 
-static ssize_t class_pktcdvd_store_add(struct class *c,
-					struct class_attribute *attr,
-					const char *buf,
-					size_t count)
+static ssize_t add_store(struct class *c, struct class_attribute *attr,
+			 const char *buf, size_t count)
 {
 	unsigned int major, minor;
 
@@ -390,11 +390,10 @@ static ssize_t class_pktcdvd_store_add(struct class *c,
 
 	return -EINVAL;
 }
+static CLASS_ATTR_WO(add);
 
-static ssize_t class_pktcdvd_store_remove(struct class *c,
-					  struct class_attribute *attr,
-					  const char *buf,
-					size_t count)
+static ssize_t remove_store(struct class *c, struct class_attribute *attr,
+			    const char *buf, size_t count)
 {
 	unsigned int major, minor;
 	if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
@@ -403,14 +402,15 @@ static ssize_t class_pktcdvd_store_remove(struct class *c,
 	}
 	return -EINVAL;
 }
+static CLASS_ATTR_WO(remove);
 
-static struct class_attribute class_pktcdvd_attrs[] = {
- __ATTR(add,            0200, NULL, class_pktcdvd_store_add),
- __ATTR(remove,         0200, NULL, class_pktcdvd_store_remove),
- __ATTR(device_map,     0444, class_pktcdvd_show_map, NULL),
- __ATTR_NULL
+static struct attribute *class_pktcdvd_attrs[] = {
+	&class_attr_add.attr,
+	&class_attr_remove.attr,
+	&class_attr_device_map.attr,
+	NULL,
 };
-
+ATTRIBUTE_GROUPS(class_pktcdvd);
 
 static int pkt_sysfs_init(void)
 {
@@ -426,7 +426,7 @@ static int pkt_sysfs_init(void)
 	class_pktcdvd->name = DRIVER_NAME;
 	class_pktcdvd->owner = THIS_MODULE;
 	class_pktcdvd->class_release = class_pktcdvd_release;
-	class_pktcdvd->class_attrs = class_pktcdvd_attrs;
+	class_pktcdvd->class_groups = class_pktcdvd_groups;
 	ret = class_register(class_pktcdvd);
 	if (ret) {
 		kfree(class_pktcdvd);
@@ -707,7 +707,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
 			     REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
-	scsi_req_init(rq);
 
 	if (cgc->buflen) {
 		ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
@@ -952,9 +951,9 @@ static void pkt_end_io_read(struct bio *bio)
 
 	pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
 		bio, (unsigned long long)pkt->sector,
-		(unsigned long long)bio->bi_iter.bi_sector, bio->bi_error);
+		(unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		atomic_inc(&pkt->io_errors);
 	if (atomic_dec_and_test(&pkt->io_wait)) {
 		atomic_inc(&pkt->run_sm);
@@ -969,7 +968,7 @@ static void pkt_end_io_packet_write(struct bio *bio)
 	struct pktcdvd_device *pd = pkt->pd;
 	BUG_ON(!pd);
 
-	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error);
+	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
 
 	pd->stats.pkt_ended++;
 
@@ -1305,16 +1304,16 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 	pkt_queue_bio(pd, pkt->w_bio);
 }
 
-static void pkt_finish_packet(struct packet_data *pkt, int error)
+static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
 {
 	struct bio *bio;
 
-	if (error)
+	if (status)
 		pkt->cache_valid = 0;
 
 	/* Finish all bios corresponding to this packet */
 	while ((bio = bio_list_pop(&pkt->orig_bios))) {
-		bio->bi_error = error;
+		bio->bi_status = status;
 		bio_endio(bio);
 	}
 }
@@ -1349,7 +1348,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
 			if (atomic_read(&pkt->io_wait) > 0)
 				return;
 
-			if (!pkt->w_bio->bi_error) {
+			if (!pkt->w_bio->bi_status) {
 				pkt_set_state(pkt, PACKET_FINISHED_STATE);
 			} else {
 				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
@@ -1366,7 +1365,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
 			break;
 
 		case PACKET_FINISHED_STATE:
-			pkt_finish_packet(pkt, pkt->w_bio->bi_error);
+			pkt_finish_packet(pkt, pkt->w_bio->bi_status);
 			return;
 
 		default:
@@ -2301,7 +2300,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
 	struct packet_stacked_data *psd = bio->bi_private;
 	struct pktcdvd_device *pd = psd->pd;
 
-	psd->bio->bi_error = bio->bi_error;
+	psd->bio->bi_status = bio->bi_status;
 	bio_put(bio);
 	bio_endio(psd->bio);
 	mempool_free(psd, psd_pool);
@@ -2310,7 +2309,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
 
 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
 {
-	struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+	struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, pkt_bio_set);
 	struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
 
 	psd->pd = pd;
@@ -2412,9 +2411,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
 	char b[BDEVNAME_SIZE];
 	struct bio *split;
 
-	blk_queue_bounce(q, &bio);
-
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	pd = q->queuedata;
 	if (!pd) {
@@ -2455,7 +2452,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
 
 			split = bio_split(bio, last_zone -
 					  bio->bi_iter.bi_sector,
-					  GFP_NOIO, fs_bio_set);
+					  GFP_NOIO, pkt_bio_set);
 			bio_chain(split, bio);
 		} else {
 			split = bio;
@@ -2583,6 +2580,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
 	bdev = bdget(dev);
 	if (!bdev)
 		return -ENOMEM;
+	if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+		WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+		bdput(bdev);
+		return -EINVAL;
+	}
 	ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
 	if (ret)
 		return ret;
@@ -2919,6 +2921,11 @@ static int __init pkt_init(void)
 					sizeof(struct packet_stacked_data));
 	if (!psd_pool)
 		return -ENOMEM;
+	pkt_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
+	if (!pkt_bio_set) {
+		mempool_destroy(psd_pool);
+		return -ENOMEM;
+	}
 
 	ret = register_blkdev(pktdev_major, DRIVER_NAME);
 	if (ret < 0) {
@@ -2951,6 +2958,7 @@ static int __init pkt_init(void)
 	unregister_blkdev(pktdev_major, DRIVER_NAME);
 out2:
 	mempool_destroy(psd_pool);
+	bioset_free(pkt_bio_set);
 	return ret;
 }
 
@@ -2964,6 +2972,7 @@ static void __exit pkt_exit(void)
 
 	unregister_blkdev(pktdev_major, DRIVER_NAME);
 	mempool_destroy(psd_pool);
+	bioset_free(pkt_bio_set);
 }
 
 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index a809e3e..075662f 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -158,7 +158,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
 	if (res) {
 		dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
 			__LINE__, op, res);
-		__blk_end_request_all(req, -EIO);
+		__blk_end_request_all(req, BLK_STS_IOERR);
 		return 0;
 	}
 
@@ -180,7 +180,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
 	if (res) {
 		dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
 			__func__, __LINE__, res);
-		__blk_end_request_all(req, -EIO);
+		__blk_end_request_all(req, BLK_STS_IOERR);
 		return 0;
 	}
 
@@ -208,7 +208,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
 			break;
 		default:
 			blk_dump_rq_flags(req, DEVICE_NAME " bad request");
-			__blk_end_request_all(req, -EIO);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 		}
 	}
 }
@@ -231,7 +231,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
 	struct ps3_storage_device *dev = data;
 	struct ps3disk_private *priv;
 	struct request *req;
-	int res, read, error;
+	int res, read;
+	blk_status_t error;
 	u64 tag, status;
 	const char *op;
 
@@ -269,7 +270,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
 	if (status) {
 		dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
 			__LINE__, op, status);
-		error = -EIO;
+		error = BLK_STS_IOERR;
 	} else {
 		dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
 			__LINE__, op);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 456b4fe..e0e81ca 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -428,7 +428,7 @@ static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
 	kfree(priv->cache.tags);
 }
 
-static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
+static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
 			size_t len, size_t *retlen, u_char *buf)
 {
 	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -438,7 +438,7 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
 		(unsigned int)from, len);
 
 	if (from >= priv->size)
-		return -EIO;
+		return BLK_STS_IOERR;
 
 	if (len > priv->size - from)
 		len = priv->size - from;
@@ -472,14 +472,14 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
 	return 0;
 }
 
-static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
+static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
 			 size_t len, size_t *retlen, const u_char *buf)
 {
 	struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
 	unsigned int cached, count;
 
 	if (to >= priv->size)
-		return -EIO;
+		return BLK_STS_IOERR;
 
 	if (len > priv->size - to)
 		len = priv->size - to;
@@ -554,7 +554,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
 	int write = bio_data_dir(bio) == WRITE;
 	const char *op = write ? "write" : "read";
 	loff_t offset = bio->bi_iter.bi_sector << 9;
-	int error = 0;
+	blk_status_t error = 0;
 	struct bio_vec bvec;
 	struct bvec_iter iter;
 	struct bio *next;
@@ -578,7 +578,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
 
 		if (retlen != len) {
 			dev_err(&dev->core, "Short %s\n", op);
-			error = -EIO;
+			error = BLK_STS_IOERR;
 			goto out;
 		}
 
@@ -593,7 +593,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
 	next = bio_list_peek(&priv->list);
 	spin_unlock_irq(&priv->lock);
 
-	bio->bi_error = error;
+	bio->bi_status = error;
 	bio_endio(bio);
 	return next;
 }
@@ -606,7 +606,7 @@ static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
 
 	dev_dbg(&dev->core, "%s\n", __func__);
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	spin_lock_irq(&priv->lock);
 	busy = !bio_list_empty(&priv->list);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 454bf9c..b008b6a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -442,6 +442,8 @@ static DEFINE_SPINLOCK(rbd_client_list_lock);
 static struct kmem_cache	*rbd_img_request_cache;
 static struct kmem_cache	*rbd_obj_request_cache;
 
+static struct bio_set		*rbd_bio_clone;
+
 static int rbd_major;
 static DEFINE_IDA(rbd_dev_id_ida);
 
@@ -1363,7 +1365,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
 {
 	struct bio *bio;
 
-	bio = bio_clone(bio_src, gfpmask);
+	bio = bio_clone_fast(bio_src, gfpmask, rbd_bio_clone);
 	if (!bio)
 		return NULL;	/* ENOMEM */
 
@@ -2293,11 +2295,13 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
 		rbd_assert(img_request->obj_request != NULL);
 		more = obj_request->which < img_request->obj_request_count - 1;
 	} else {
+		blk_status_t status = errno_to_blk_status(result);
+
 		rbd_assert(img_request->rq != NULL);
 
-		more = blk_update_request(img_request->rq, result, xferred);
+		more = blk_update_request(img_request->rq, status, xferred);
 		if (!more)
-			__blk_mq_end_request(img_request->rq, result);
+			__blk_mq_end_request(img_request->rq, status);
 	}
 
 	return more;
@@ -4023,6 +4027,7 @@ static void rbd_queue_workfn(struct work_struct *work)
 
 	switch (req_op(rq)) {
 	case REQ_OP_DISCARD:
+	case REQ_OP_WRITE_ZEROES:
 		op_type = OBJ_OP_DISCARD;
 		break;
 	case REQ_OP_WRITE:
@@ -4149,17 +4154,17 @@ static void rbd_queue_workfn(struct work_struct *work)
 			 obj_op_name(op_type), length, offset, result);
 	ceph_put_snap_context(snapc);
 err:
-	blk_mq_end_request(rq, result);
+	blk_mq_end_request(rq, errno_to_blk_status(result));
 }
 
-static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
 		const struct blk_mq_queue_data *bd)
 {
 	struct request *rq = bd->rq;
 	struct work_struct *work = blk_mq_rq_to_pdu(rq);
 
 	queue_work(rbd_wq, work);
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 }
 
 static void rbd_free_disk(struct rbd_device *rbd_dev)
@@ -4420,6 +4425,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
 	q->limits.discard_granularity = segment_size;
 	q->limits.discard_alignment = segment_size;
 	blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
+	blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
 
 	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
 		q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
@@ -6412,8 +6418,16 @@ static int rbd_slab_init(void)
 	if (!rbd_obj_request_cache)
 		goto out_err;
 
+	rbd_assert(!rbd_bio_clone);
+	rbd_bio_clone = bioset_create(BIO_POOL_SIZE, 0, 0);
+	if (!rbd_bio_clone)
+		goto out_err_clone;
+
 	return 0;
 
+out_err_clone:
+	kmem_cache_destroy(rbd_obj_request_cache);
+	rbd_obj_request_cache = NULL;
 out_err:
 	kmem_cache_destroy(rbd_img_request_cache);
 	rbd_img_request_cache = NULL;
@@ -6429,6 +6443,10 @@ static void rbd_slab_exit(void)
 	rbd_assert(rbd_img_request_cache);
 	kmem_cache_destroy(rbd_img_request_cache);
 	rbd_img_request_cache = NULL;
+
+	rbd_assert(rbd_bio_clone);
+	bioset_free(rbd_bio_clone);
+	rbd_bio_clone = NULL;
 }
 
 static int __init rbd_init(void)
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 9c56636..7f4aceb 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -149,9 +149,9 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
 {
 	struct rsxx_cardinfo *card = q->queuedata;
 	struct rsxx_bio_meta *bio_meta;
-	int st = -EINVAL;
+	blk_status_t st = BLK_STS_IOERR;
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	might_sleep();
 
@@ -161,15 +161,11 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
 	if (bio_end_sector(bio) > get_capacity(card->gendisk))
 		goto req_err;
 
-	if (unlikely(card->halt)) {
-		st = -EFAULT;
+	if (unlikely(card->halt))
 		goto req_err;
-	}
 
-	if (unlikely(card->dma_fault)) {
-		st = (-EFAULT);
+	if (unlikely(card->dma_fault))
 		goto req_err;
-	}
 
 	if (bio->bi_iter.bi_size == 0) {
 		dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
@@ -178,7 +174,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
 
 	bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
 	if (!bio_meta) {
-		st = -ENOMEM;
+		st = BLK_STS_RESOURCE;
 		goto req_err;
 	}
 
@@ -205,7 +201,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
 	kmem_cache_free(bio_meta_pool, bio_meta);
 req_err:
 	if (st)
-		bio->bi_error = st;
+		bio->bi_status = st;
 	bio_endio(bio);
 	return BLK_QC_T_NONE;
 }
@@ -288,7 +284,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
 	}
 
 	blk_queue_make_request(card->queue, rsxx_make_request);
-	blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
 	blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
 	blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
 
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 5a20385f8..6a1b217 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -611,7 +611,7 @@ static void rsxx_schedule_done(struct work_struct *work)
 	mutex_unlock(&ctrl->work_lock);
 }
 
-static int rsxx_queue_discard(struct rsxx_cardinfo *card,
+static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
 				  struct list_head *q,
 				  unsigned int laddr,
 				  rsxx_dma_cb cb,
@@ -621,7 +621,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
 
 	dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
 	if (!dma)
-		return -ENOMEM;
+		return BLK_STS_RESOURCE;
 
 	dma->cmd          = HW_CMD_BLK_DISCARD;
 	dma->laddr        = laddr;
@@ -640,7 +640,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
 	return 0;
 }
 
-static int rsxx_queue_dma(struct rsxx_cardinfo *card,
+static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
 			      struct list_head *q,
 			      int dir,
 			      unsigned int dma_off,
@@ -655,7 +655,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
 
 	dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
 	if (!dma)
-		return -ENOMEM;
+		return BLK_STS_RESOURCE;
 
 	dma->cmd          = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
 	dma->laddr        = laddr;
@@ -677,7 +677,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
 	return 0;
 }
 
-int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
 			   struct bio *bio,
 			   atomic_t *n_dmas,
 			   rsxx_dma_cb cb,
@@ -694,7 +694,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
 	unsigned int dma_len;
 	int dma_cnt[RSXX_MAX_TARGETS];
 	int tgt;
-	int st;
+	blk_status_t st;
 	int i;
 
 	addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
@@ -769,7 +769,6 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
 	for (i = 0; i < card->n_targets; i++)
 		rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
 					FREE_DMA);
-
 	return st;
 }
 
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 6bbc64d..277f27e6 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -391,7 +391,7 @@ int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
 void rsxx_dma_cleanup(void);
 void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
 int rsxx_dma_configure(struct rsxx_cardinfo *card);
-int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
 			   struct bio *bio,
 			   atomic_t *n_dmas,
 			   rsxx_dma_cb cb,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 27833e4..d036868 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -451,8 +451,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
 				    struct skd_special_context *skspcl);
 static void skd_request_fn(struct request_queue *rq);
 static void skd_end_request(struct skd_device *skdev,
-			    struct skd_request_context *skreq, int error);
-static int skd_preop_sg_list(struct skd_device *skdev,
+		struct skd_request_context *skreq, blk_status_t status);
+static bool skd_preop_sg_list(struct skd_device *skdev,
 			     struct skd_request_context *skreq);
 static void skd_postop_sg_list(struct skd_device *skdev,
 			       struct skd_request_context *skreq);
@@ -491,7 +491,7 @@ static void skd_fail_all_pending(struct skd_device *skdev)
 		if (req == NULL)
 			break;
 		blk_start_request(req);
-		__blk_end_request_all(req, -EIO);
+		__blk_end_request_all(req, BLK_STS_IOERR);
 	}
 }
 
@@ -545,7 +545,6 @@ static void skd_request_fn(struct request_queue *q)
 	struct request *req = NULL;
 	struct skd_scsi_request *scsi_req;
 	unsigned long io_flags;
-	int error;
 	u32 lba;
 	u32 count;
 	int data_dir;
@@ -716,9 +715,7 @@ static void skd_request_fn(struct request_queue *q)
 		if (!req->bio)
 			goto skip_sg;
 
-		error = skd_preop_sg_list(skdev, skreq);
-
-		if (error != 0) {
+		if (!skd_preop_sg_list(skdev, skreq)) {
 			/*
 			 * Complete the native request with error.
 			 * Note that the request context is still at the
@@ -730,7 +727,7 @@ static void skd_request_fn(struct request_queue *q)
 			 */
 			pr_debug("%s:%s:%d error Out\n",
 				 skdev->name, __func__, __LINE__);
-			skd_end_request(skdev, skreq, error);
+			skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
 			continue;
 		}
 
@@ -805,7 +802,7 @@ static void skd_request_fn(struct request_queue *q)
 }
 
 static void skd_end_request(struct skd_device *skdev,
-			    struct skd_request_context *skreq, int error)
+		struct skd_request_context *skreq, blk_status_t error)
 {
 	if (unlikely(error)) {
 		struct request *req = skreq->req;
@@ -822,7 +819,7 @@ static void skd_end_request(struct skd_device *skdev,
 	__blk_end_request_all(skreq->req, error);
 }
 
-static int skd_preop_sg_list(struct skd_device *skdev,
+static bool skd_preop_sg_list(struct skd_device *skdev,
 			     struct skd_request_context *skreq)
 {
 	struct request *req = skreq->req;
@@ -839,7 +836,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
 
 	n_sg = blk_rq_map_sg(skdev->queue, req, sg);
 	if (n_sg <= 0)
-		return -EINVAL;
+		return false;
 
 	/*
 	 * Map scatterlist to PCI bus addresses.
@@ -847,7 +844,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
 	 */
 	n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
 	if (n_sg <= 0)
-		return -EINVAL;
+		return false;
 
 	SKD_ASSERT(n_sg <= skdev->sgs_per_request);
 
@@ -882,7 +879,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
 		}
 	}
 
-	return 0;
+	return true;
 }
 
 static void skd_postop_sg_list(struct skd_device *skdev,
@@ -2333,7 +2330,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
 	switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
 	case SKD_CHECK_STATUS_REPORT_GOOD:
 	case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
-		skd_end_request(skdev, skreq, 0);
+		skd_end_request(skdev, skreq, BLK_STS_OK);
 		break;
 
 	case SKD_CHECK_STATUS_BUSY_IMMINENT:
@@ -2355,7 +2352,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
 
 	case SKD_CHECK_STATUS_REPORT_ERROR:
 	default:
-		skd_end_request(skdev, skreq, -EIO);
+		skd_end_request(skdev, skreq, BLK_STS_IOERR);
 		break;
 	}
 }
@@ -2748,7 +2745,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
 			 * native request.
 			 */
 			if (likely(cmp_status == SAM_STAT_GOOD))
-				skd_end_request(skdev, skreq, 0);
+				skd_end_request(skdev, skreq, BLK_STS_OK);
 			else
 				skd_resolve_req_exception(skdev, skreq);
 		}
@@ -3190,7 +3187,7 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue)
 			    SKD_MAX_RETRIES)
 				blk_requeue_request(skdev->queue, skreq->req);
 			else
-				skd_end_request(skdev, skreq, -EIO);
+				skd_end_request(skdev, skreq, BLK_STS_IOERR);
 
 			skreq->req = NULL;
 
@@ -4276,6 +4273,7 @@ static int skd_cons_disk(struct skd_device *skdev)
 		rc = -ENOMEM;
 		goto err_out;
 	}
+	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 
 	skdev->queue = q;
 	disk->queue = q;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 3f3a3ab..6b16ead 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -316,7 +316,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
 
 	rqe->req = NULL;
 
-	__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
+	__blk_end_request(req, (desc->status ? BLK_STS_IOERR : 0), desc->size);
 
 	vdc_blk_queue_start(port);
 }
@@ -1023,7 +1023,7 @@ static void vdc_queue_drain(struct vdc_port *port)
 	struct request *req;
 
 	while ((req = blk_fetch_request(port->disk->queue)) != NULL)
-		__blk_end_request_all(req, -EIO);
+		__blk_end_request_all(req, BLK_STS_IOERR);
 }
 
 static void vdc_ldc_reset_timer(unsigned long _arg)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 3064be6..84434d3 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -493,7 +493,7 @@ static inline int swim_read_sector(struct floppy_state *fs,
 	return ret;
 }
 
-static int floppy_read_sectors(struct floppy_state *fs,
+static blk_status_t floppy_read_sectors(struct floppy_state *fs,
 			       int req_sector, int sectors_nb,
 			       unsigned char *buffer)
 {
@@ -516,7 +516,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
 			ret = swim_read_sector(fs, side, track, sector,
 						buffer);
 			if (try-- == 0)
-				return -EIO;
+				return BLK_STS_IOERR;
 		} while (ret != 512);
 
 		buffer += ret;
@@ -553,7 +553,7 @@ static void do_fd_request(struct request_queue *q)
 
 	req = swim_next_request(swd);
 	while (req) {
-		int err = -EIO;
+		blk_status_t err = BLK_STS_IOERR;
 
 		fs = req->rq_disk->private_data;
 		if (blk_rq_pos(req) >= fs->total_secs)
@@ -864,6 +864,8 @@ static int swim_floppy_init(struct swim_priv *swd)
 			put_disk(swd->unit[drive].disk);
 			goto exit_put_disks;
 		}
+		blk_queue_bounce_limit(swd->unit[drive].disk->queue,
+				BLK_BOUNCE_HIGH);
 		swd->unit[drive].disk->queue->queuedata = swd;
 		swd->unit[drive].swd = swd;
 	}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ba4809c..9f931f8 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -257,7 +257,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
 					unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
 
-static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
 {
 	struct request *req = fs->cur_req;
 	int rc;
@@ -334,7 +334,7 @@ static void start_request(struct floppy_state *fs)
 		if (fs->mdev->media_bay &&
 		    check_media_bay(fs->mdev->media_bay) != MB_FD) {
 			swim3_dbg("%s", "  media bay absent, dropping req\n");
-			swim3_end_request(fs, -ENODEV, 0);
+			swim3_end_request(fs, BLK_STS_IOERR, 0);
 			continue;
 		}
 
@@ -350,12 +350,12 @@ static void start_request(struct floppy_state *fs)
 		if (blk_rq_pos(req) >= fs->total_secs) {
 			swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
 				  (long)blk_rq_pos(req), (long)fs->total_secs);
-			swim3_end_request(fs, -EIO, 0);
+			swim3_end_request(fs, BLK_STS_IOERR, 0);
 			continue;
 		}
 		if (fs->ejected) {
 			swim3_dbg("%s", "  disk ejected\n");
-			swim3_end_request(fs, -EIO, 0);
+			swim3_end_request(fs, BLK_STS_IOERR, 0);
 			continue;
 		}
 
@@ -364,7 +364,7 @@ static void start_request(struct floppy_state *fs)
 				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
 			if (fs->write_prot) {
 				swim3_dbg("%s", "  try to write, disk write protected\n");
-				swim3_end_request(fs, -EIO, 0);
+				swim3_end_request(fs, BLK_STS_IOERR, 0);
 				continue;
 			}
 		}
@@ -548,7 +548,7 @@ static void act(struct floppy_state *fs)
 				if (fs->retries > 5) {
 					swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
 						  fs->req_cyl, fs->cur_cyl);
-					swim3_end_request(fs, -EIO, 0);
+					swim3_end_request(fs, BLK_STS_IOERR, 0);
 					fs->state = idle;
 					return;
 				}
@@ -584,7 +584,7 @@ static void scan_timeout(unsigned long data)
 	out_8(&sw->intr_enable, 0);
 	fs->cur_cyl = -1;
 	if (fs->retries > 5) {
-		swim3_end_request(fs, -EIO, 0);
+		swim3_end_request(fs, BLK_STS_IOERR, 0);
 		fs->state = idle;
 		start_request(fs);
 	} else {
@@ -608,7 +608,7 @@ static void seek_timeout(unsigned long data)
 	out_8(&sw->select, RELAX);
 	out_8(&sw->intr_enable, 0);
 	swim3_err("%s", "Seek timeout\n");
-	swim3_end_request(fs, -EIO, 0);
+	swim3_end_request(fs, BLK_STS_IOERR, 0);
 	fs->state = idle;
 	start_request(fs);
 	spin_unlock_irqrestore(&swim3_lock, flags);
@@ -637,7 +637,7 @@ static void settle_timeout(unsigned long data)
 		goto unlock;
 	}
 	swim3_err("%s", "Seek settle timeout\n");
-	swim3_end_request(fs, -EIO, 0);
+	swim3_end_request(fs, BLK_STS_IOERR, 0);
 	fs->state = idle;
 	start_request(fs);
  unlock:
@@ -666,7 +666,7 @@ static void xfer_timeout(unsigned long data)
 	swim3_err("Timeout %sing sector %ld\n",
 	       (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
 	       (long)blk_rq_pos(fs->cur_req));
-	swim3_end_request(fs, -EIO, 0);
+	swim3_end_request(fs, BLK_STS_IOERR, 0);
 	fs->state = idle;
 	start_request(fs);
 	spin_unlock_irqrestore(&swim3_lock, flags);
@@ -703,7 +703,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 				swim3_err("%s", "Seen sector but cyl=ff?\n");
 				fs->cur_cyl = -1;
 				if (fs->retries > 5) {
-					swim3_end_request(fs, -EIO, 0);
+					swim3_end_request(fs, BLK_STS_IOERR, 0);
 					fs->state = idle;
 					start_request(fs);
 				} else {
@@ -786,7 +786,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 				swim3_err("Error %sing block %ld (err=%x)\n",
 				       rq_data_dir(req) == WRITE? "writ": "read",
 				       (long)blk_rq_pos(req), err);
-				swim3_end_request(fs, -EIO, 0);
+				swim3_end_request(fs, BLK_STS_IOERR, 0);
 				fs->state = idle;
 			}
 		} else {
@@ -795,7 +795,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
 				swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
 				swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
 					  fs->state, rq_data_dir(req), intr, err);
-				swim3_end_request(fs, -EIO, 0);
+				swim3_end_request(fs, BLK_STS_IOERR, 0);
 				fs->state = idle;
 				start_request(fs);
 				break;
@@ -1223,6 +1223,7 @@ static int swim3_attach(struct macio_dev *mdev,
 		put_disk(disk);
 		return -ENOMEM;
 	}
+	blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
 	disk->queue->queuedata = &floppy_states[index];
 
 	if (index == 0) {
@@ -1245,7 +1246,7 @@ static int swim3_attach(struct macio_dev *mdev,
 	return 0;
 }
 
-static struct of_device_id swim3_match[] =
+static const struct of_device_id swim3_match[] =
 {
 	{
 	.name		= "swim3",
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index c8e072c..08586dc 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -745,7 +745,7 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
 
 static inline void carm_end_request_queued(struct carm_host *host,
 					   struct carm_request *crq,
-					   int error)
+					   blk_status_t error)
 {
 	struct request *req = crq->rq;
 	int rc;
@@ -791,7 +791,7 @@ static inline void carm_round_robin(struct carm_host *host)
 }
 
 static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
-			       int error)
+			       blk_status_t error)
 {
 	carm_end_request_queued(host, crq, error);
 	if (max_queue == 1)
@@ -869,14 +869,14 @@ static void carm_rq_fn(struct request_queue *q)
 	sg = &crq->sg[0];
 	n_elem = blk_rq_map_sg(q, rq, sg);
 	if (n_elem <= 0) {
-		carm_end_rq(host, crq, -EIO);
+		carm_end_rq(host, crq, BLK_STS_IOERR);
 		return;		/* request with no s/g entries? */
 	}
 
 	/* map scatterlist to PCI bus addresses */
 	n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
 	if (n_elem <= 0) {
-		carm_end_rq(host, crq, -EIO);
+		carm_end_rq(host, crq, BLK_STS_IOERR);
 		return;		/* request with no s/g entries? */
 	}
 	crq->n_elem = n_elem;
@@ -937,7 +937,7 @@ static void carm_rq_fn(struct request_queue *q)
 
 static void carm_handle_array_info(struct carm_host *host,
 				   struct carm_request *crq, u8 *mem,
-				   int error)
+				   blk_status_t error)
 {
 	struct carm_port *port;
 	u8 *msg_data = mem + sizeof(struct carm_array_info);
@@ -997,7 +997,7 @@ static void carm_handle_array_info(struct carm_host *host,
 
 static void carm_handle_scan_chan(struct carm_host *host,
 				  struct carm_request *crq, u8 *mem,
-				  int error)
+				  blk_status_t error)
 {
 	u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
 	unsigned int i, dev_count = 0;
@@ -1029,7 +1029,7 @@ static void carm_handle_scan_chan(struct carm_host *host,
 }
 
 static void carm_handle_generic(struct carm_host *host,
-				struct carm_request *crq, int error,
+				struct carm_request *crq, blk_status_t error,
 				int cur_state, int next_state)
 {
 	DPRINTK("ENTER\n");
@@ -1045,7 +1045,7 @@ static void carm_handle_generic(struct carm_host *host,
 }
 
 static inline void carm_handle_rw(struct carm_host *host,
-				  struct carm_request *crq, int error)
+				  struct carm_request *crq, blk_status_t error)
 {
 	int pci_dir;
 
@@ -1067,7 +1067,7 @@ static inline void carm_handle_resp(struct carm_host *host,
 	u32 handle = le32_to_cpu(ret_handle_le);
 	unsigned int msg_idx;
 	struct carm_request *crq;
-	int error = (status == RMSG_OK) ? 0 : -EIO;
+	blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
 	u8 *mem;
 
 	VPRINTK("ENTER, handle == 0x%x\n", handle);
@@ -1155,7 +1155,7 @@ static inline void carm_handle_resp(struct carm_host *host,
 err_out:
 	printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
 	       pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
-	carm_end_rq(host, crq, -EIO);
+	carm_end_rq(host, crq, BLK_STS_IOERR);
 }
 
 static inline void carm_handle_responses(struct carm_host *host)
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index c141cc3..0677d25 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -454,7 +454,7 @@ static void process_page(unsigned long data)
 				PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
 		if (control & DMASCR_HARD_ERROR) {
 			/* error */
-			bio->bi_error = -EIO;
+			bio->bi_status = BLK_STS_IOERR;
 			dev_printk(KERN_WARNING, &card->dev->dev,
 				"I/O error on sector %d/%d\n",
 				le32_to_cpu(desc->local_addr)>>9,
@@ -529,7 +529,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
 		 (unsigned long long)bio->bi_iter.bi_sector,
 		 bio->bi_iter.bi_size);
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	spin_lock_irq(&card->lock);
 	*card->biotail = bio;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 553cc4c..0297ad7 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -64,15 +64,15 @@ struct virtblk_req {
 	struct scatterlist sg[];
 };
 
-static inline int virtblk_result(struct virtblk_req *vbr)
+static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
 {
 	switch (vbr->status) {
 	case VIRTIO_BLK_S_OK:
-		return 0;
+		return BLK_STS_OK;
 	case VIRTIO_BLK_S_UNSUPP:
-		return -ENOTTY;
+		return BLK_STS_NOTSUPP;
 	default:
-		return -EIO;
+		return BLK_STS_IOERR;
 	}
 }
 
@@ -214,7 +214,7 @@ static void virtblk_done(struct virtqueue *vq)
 	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
 }
 
-static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 			   const struct blk_mq_queue_data *bd)
 {
 	struct virtio_blk *vblk = hctx->queue->queuedata;
@@ -246,7 +246,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 		break;
 	default:
 		WARN_ON_ONCE(1);
-		return BLK_MQ_RQ_QUEUE_ERROR;
+		return BLK_STS_IOERR;
 	}
 
 	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
@@ -276,8 +276,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 		/* Out of mem doesn't actually happen, since we fall back
 		 * to direct descriptors */
 		if (err == -ENOMEM || err == -ENOSPC)
-			return BLK_MQ_RQ_QUEUE_BUSY;
-		return BLK_MQ_RQ_QUEUE_ERROR;
+			return BLK_STS_RESOURCE;
+		return BLK_STS_IOERR;
 	}
 
 	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -286,7 +286,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	if (notify)
 		virtqueue_notify(vblk->vqs[qid].vq);
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 }
 
 /* return id (s/n) string for *disk to *id_str
@@ -307,7 +307,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
 		goto out;
 
 	blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
-	err = virtblk_result(blk_mq_rq_to_pdu(req));
+	err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
 out:
 	blk_put_request(req);
 	return err;
@@ -720,9 +720,6 @@ static int virtblk_probe(struct virtio_device *vdev)
 	/* We can handle whatever the host told us to handle. */
 	blk_queue_max_segments(q, vblk->sg_elems-2);
 
-	/* No need to bounce any requests */
-	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
-
 	/* No real sector limit. */
 	blk_queue_max_hw_sectors(q, -1U);
 
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 726c32e..fe7cd58 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
 	unsigned long timeout;
 	int ret;
 
-	xen_blkif_get(blkif);
-
 	set_freezable();
 	while (!kthread_should_stop()) {
 		if (try_to_freeze())
@@ -665,7 +663,6 @@ int xen_blkif_schedule(void *arg)
 		print_stats(ring);
 
 	ring->xenblkd = NULL;
-	xen_blkif_put(blkif);
 
 	return 0;
 }
@@ -1069,20 +1066,17 @@ static void xen_blk_drain_io(struct xen_blkif_ring *ring)
 	atomic_set(&blkif->drain, 0);
 }
 
-/*
- * Completion callback on the bio's. Called as bh->b_end_io()
- */
-
-static void __end_block_io_op(struct pending_req *pending_req, int error)
+static void __end_block_io_op(struct pending_req *pending_req,
+		blk_status_t error)
 {
 	/* An error fails the entire request. */
-	if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
-	    (error == -EOPNOTSUPP)) {
+	if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
+	    error == BLK_STS_NOTSUPP) {
 		pr_debug("flush diskcache op failed, not supported\n");
 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
-	} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
-		    (error == -EOPNOTSUPP)) {
+	} else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
+		   error == BLK_STS_NOTSUPP) {
 		pr_debug("write barrier op failed, not supported\n");
 		xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
@@ -1106,7 +1100,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
  */
 static void end_block_io_op(struct bio *bio)
 {
-	__end_block_io_op(bio->bi_private, bio->bi_error);
+	__end_block_io_op(bio->bi_private, bio->bi_status);
 	bio_put(bio);
 }
 
@@ -1423,7 +1417,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 	for (i = 0; i < nbio; i++)
 		bio_put(biolist[i]);
 	atomic_set(&pending_req->pendcnt, 1);
-	__end_block_io_op(pending_req, -EINVAL);
+	__end_block_io_op(pending_req, BLK_STS_RESOURCE);
 	msleep(1); /* back off a bit */
 	return -EIO;
 }
@@ -1436,34 +1430,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 static void make_response(struct xen_blkif_ring *ring, u64 id,
 			  unsigned short op, int st)
 {
-	struct blkif_response  resp;
+	struct blkif_response *resp;
 	unsigned long     flags;
 	union blkif_back_rings *blk_rings;
 	int notify;
 
-	resp.id        = id;
-	resp.operation = op;
-	resp.status    = st;
-
 	spin_lock_irqsave(&ring->blk_ring_lock, flags);
 	blk_rings = &ring->blk_rings;
 	/* Place on the response ring for the relevant domain. */
 	switch (ring->blkif->blk_protocol) {
 	case BLKIF_PROTOCOL_NATIVE:
-		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
-		       &resp, sizeof(resp));
+		resp = RING_GET_RESPONSE(&blk_rings->native,
+					 blk_rings->native.rsp_prod_pvt);
 		break;
 	case BLKIF_PROTOCOL_X86_32:
-		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
-		       &resp, sizeof(resp));
+		resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+					 blk_rings->x86_32.rsp_prod_pvt);
 		break;
 	case BLKIF_PROTOCOL_X86_64:
-		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
-		       &resp, sizeof(resp));
+		resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+					 blk_rings->x86_64.rsp_prod_pvt);
 		break;
 	default:
 		BUG();
 	}
+
+	resp->id        = id;
+	resp->operation = op;
+	resp->status    = st;
+
 	blk_rings->common.rsp_prod_pvt++;
 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
 	spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dea61f6..ecb35fe 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
 struct blkif_common_request {
 	char dummy;
 };
-struct blkif_common_response {
-	char dummy;
-};
+
+/* i386 protocol version */
 
 struct blkif_x86_32_request_rw {
 	uint8_t        nr_segments;  /* number of segments                   */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
 	} u;
 } __attribute__((__packed__));
 
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
-	uint64_t        id;              /* copied from request */
-	uint8_t         operation;       /* copied from request */
-	int16_t         status;          /* BLKIF_RSP_???       */
-};
-#pragma pack(pop)
 /* x86_64 protocol version */
 
 struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
 	} u;
 } __attribute__((__packed__));
 
-struct blkif_x86_64_response {
-	uint64_t       __attribute__((__aligned__(8))) id;
-	uint8_t         operation;       /* copied from request */
-	int16_t         status;          /* BLKIF_RSP_???       */
-};
-
 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
-		  struct blkif_common_response);
+		  struct blkif_response);
 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
-		  struct blkif_x86_32_response);
+		  struct blkif_response __packed);
 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
-		  struct blkif_x86_64_response);
+		  struct blkif_response);
 
 union blkif_back_rings {
 	struct blkif_back_ring        native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
 
 	wait_queue_head_t	wq;
 	atomic_t		inflight;
+	bool			active;
 	/* One thread per blkif ring. */
 	struct task_struct	*xenblkd;
 	unsigned int		waiting_reqs;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 1f3dfaa..792da68 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
 		init_waitqueue_head(&ring->shutdown_wq);
 		ring->blkif = blkif;
 		ring->st_print = jiffies;
-		xen_blkif_get(blkif);
+		ring->active = true;
 	}
 
 	return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
 		struct xen_blkif_ring *ring = &blkif->rings[r];
 		unsigned int i = 0;
 
+		if (!ring->active)
+			continue;
+
 		if (ring->xenblkd) {
 			kthread_stop(ring->xenblkd);
 			wake_up(&ring->shutdown_wq);
-			ring->xenblkd = NULL;
 		}
 
 		/* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
 		BUG_ON(ring->free_pages_num != 0);
 		BUG_ON(ring->persistent_gnt_c != 0);
 		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-		xen_blkif_put(blkif);
+		ring->active = false;
 	}
 	blkif->nr_ring_pages = 0;
 	/*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
 
 static void xen_blkif_free(struct xen_blkif *blkif)
 {
-
-	xen_blkif_disconnect(blkif);
+	WARN_ON(xen_blkif_disconnect(blkif));
 	xen_vbd_free(&blkif->vbd);
+	kfree(blkif->be->mode);
+	kfree(blkif->be);
 
 	/* Make sure everything is drained before shutting down */
 	kmem_cache_free(xen_blkif_cachep, blkif);
@@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
 		xen_blkif_put(be->blkif);
 	}
 
-	kfree(be->mode);
-	kfree(be);
 	return 0;
 }
 
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3945963..c852ed3 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -110,11 +110,6 @@ struct blk_shadow {
 	unsigned long associated_id;
 };
 
-struct split_bio {
-	struct bio *bio;
-	atomic_t pending;
-};
-
 struct blkif_req {
 	int	error;
 };
@@ -881,7 +876,7 @@ static inline bool blkif_request_flush_invalid(struct request *req,
 		 !info->feature_fua));
 }
 
-static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
 			  const struct blk_mq_queue_data *qd)
 {
 	unsigned long flags;
@@ -904,16 +899,16 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	flush_requests(rinfo);
 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 
 out_err:
 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
-	return BLK_MQ_RQ_QUEUE_ERROR;
+	return BLK_STS_IOERR;
 
 out_busy:
 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
 	blk_mq_stop_hw_queue(hctx);
-	return BLK_MQ_RQ_QUEUE_BUSY;
+	return BLK_STS_RESOURCE;
 }
 
 static void blkif_complete_rq(struct request *rq)
@@ -958,9 +953,6 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
 
 	/* Make sure buffer addresses are sector-aligned. */
 	blk_queue_dma_alignment(rq, 511);
-
-	/* Make sure we don't use bounce buffers. */
-	blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
 }
 
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -1601,14 +1593,18 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 			continue;
 		}
 
-		blkif_req(req)->error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+		if (bret->status == BLKIF_RSP_OKAY)
+			blkif_req(req)->error = BLK_STS_OK;
+		else
+			blkif_req(req)->error = BLK_STS_IOERR;
+
 		switch (bret->operation) {
 		case BLKIF_OP_DISCARD:
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
 				struct request_queue *rq = info->rq;
 				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
 					   info->gd->disk_name, op_name(bret->operation));
-				blkif_req(req)->error = -EOPNOTSUPP;
+				blkif_req(req)->error = BLK_STS_NOTSUPP;
 				info->feature_discard = 0;
 				info->feature_secdiscard = 0;
 				queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
@@ -1626,11 +1622,11 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 				     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
 				printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
 				       info->gd->disk_name, op_name(bret->operation));
-				blkif_req(req)->error = -EOPNOTSUPP;
+				blkif_req(req)->error = BLK_STS_NOTSUPP;
 			}
 			if (unlikely(blkif_req(req)->error)) {
-				if (blkif_req(req)->error == -EOPNOTSUPP)
-					blkif_req(req)->error = 0;
+				if (blkif_req(req)->error == BLK_STS_NOTSUPP)
+					blkif_req(req)->error = BLK_STS_OK;
 				info->feature_fua = 0;
 				info->feature_flush = 0;
 				xlvbd_flush(info);
@@ -1996,28 +1992,13 @@ static int blkfront_probe(struct xenbus_device *dev,
 	return 0;
 }
 
-static void split_bio_end(struct bio *bio)
-{
-	struct split_bio *split_bio = bio->bi_private;
-
-	if (atomic_dec_and_test(&split_bio->pending)) {
-		split_bio->bio->bi_phys_segments = 0;
-		split_bio->bio->bi_error = bio->bi_error;
-		bio_endio(split_bio->bio);
-		kfree(split_bio);
-	}
-	bio_put(bio);
-}
-
 static int blkif_recover(struct blkfront_info *info)
 {
-	unsigned int i, r_index;
+	unsigned int r_index;
 	struct request *req, *n;
 	int rc;
-	struct bio *bio, *cloned_bio;
-	unsigned int segs, offset;
-	int pending, size;
-	struct split_bio *split_bio;
+	struct bio *bio;
+	unsigned int segs;
 
 	blkfront_gather_backend_features(info);
 	/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
@@ -2056,34 +2037,6 @@ static int blkif_recover(struct blkfront_info *info)
 
 	while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
 		/* Traverse the list of pending bios and re-queue them */
-		if (bio_segments(bio) > segs) {
-			/*
-			 * This bio has more segments than what we can
-			 * handle, we have to split it.
-			 */
-			pending = (bio_segments(bio) + segs - 1) / segs;
-			split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
-			BUG_ON(split_bio == NULL);
-			atomic_set(&split_bio->pending, pending);
-			split_bio->bio = bio;
-			for (i = 0; i < pending; i++) {
-				offset = (i * segs * XEN_PAGE_SIZE) >> 9;
-				size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
-					   (unsigned int)bio_sectors(bio) - offset);
-				cloned_bio = bio_clone(bio, GFP_NOIO);
-				BUG_ON(cloned_bio == NULL);
-				bio_trim(cloned_bio, offset, size);
-				cloned_bio->bi_private = split_bio;
-				cloned_bio->bi_end_io = split_bio_end;
-				submit_bio(cloned_bio);
-			}
-			/*
-			 * Now we have to wait for all those smaller bios to
-			 * end, so we can also end the "parent" bio.
-			 */
-			continue;
-		}
-		/* We don't need to split this bio */
 		submit_bio(bio);
 	}
 
@@ -2137,7 +2090,7 @@ static int blkfront_resume(struct xenbus_device *dev)
 			merge_bio.tail = shadow[j].request->biotail;
 			bio_list_merge(&info->bio_list, &merge_bio);
 			shadow[j].request->bio = NULL;
-			blk_mq_end_request(shadow[j].request, 0);
+			blk_mq_end_request(shadow[j].request, BLK_STS_OK);
 		}
 	}
 
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 757dce2..14459d6 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -471,7 +471,7 @@ static struct request *ace_get_next_request(struct request_queue *q)
 		if (!blk_rq_is_passthrough(req))
 			break;
 		blk_start_request(req);
-		__blk_end_request_all(req, -EIO);
+		__blk_end_request_all(req, BLK_STS_IOERR);
 	}
 	return req;
 }
@@ -499,11 +499,11 @@ static void ace_fsm_dostate(struct ace_device *ace)
 
 		/* Drop all in-flight and pending requests */
 		if (ace->req) {
-			__blk_end_request_all(ace->req, -EIO);
+			__blk_end_request_all(ace->req, BLK_STS_IOERR);
 			ace->req = NULL;
 		}
 		while ((req = blk_fetch_request(ace->queue)) != NULL)
-			__blk_end_request_all(req, -EIO);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 
 		/* Drop back to IDLE state and notify waiters */
 		ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -728,7 +728,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
 		}
 
 		/* bio finished; is there another one? */
-		if (__blk_end_request_cur(ace->req, 0)) {
+		if (__blk_end_request_cur(ace->req, BLK_STS_OK)) {
 			/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
 			 *      blk_rq_sectors(ace->req),
 			 *      blk_rq_cur_sectors(ace->req));
@@ -993,6 +993,7 @@ static int ace_setup(struct ace_device *ace)
 	if (ace->queue == NULL)
 		goto err_blk_initq;
 	blk_queue_logical_block_size(ace->queue, 512);
+	blk_queue_bounce_limit(ace->queue, BLK_BOUNCE_HIGH);
 
 	/*
 	 * Allocate and initialize GD structure
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 968f9e5..41c95c9 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -74,14 +74,14 @@ static void do_z2_request(struct request_queue *q)
 	while (req) {
 		unsigned long start = blk_rq_pos(req) << 9;
 		unsigned long len  = blk_rq_cur_bytes(req);
-		int err = 0;
+		blk_status_t err = BLK_STS_OK;
 
 		if (start + len > z2ram_size) {
 			pr_err(DEVICE_NAME ": bad access: block=%llu, "
 			       "count=%u\n",
 			       (unsigned long long)blk_rq_pos(req),
 			       blk_rq_cur_sectors(req));
-			err = -EIO;
+			err = BLK_STS_IOERR;
 			goto done;
 		}
 		while (len) {
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index debee95..558e245 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1272,6 +1272,13 @@ static int zram_remove(struct zram *zram)
 }
 
 /* zram-control sysfs attributes */
+
+/*
+ * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
+ * sense that reading from this file does alter the state of your system -- it
+ * creates a new un-initialized zram device and returns back this device's
+ * device_id (or an error code if it fails to create a new device).
+ */
 static ssize_t hot_add_show(struct class *class,
 			struct class_attribute *attr,
 			char *buf)
@@ -1286,6 +1293,7 @@ static ssize_t hot_add_show(struct class *class,
 		return ret;
 	return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
 }
+static CLASS_ATTR_RO(hot_add);
 
 static ssize_t hot_remove_store(struct class *class,
 			struct class_attribute *attr,
@@ -1316,23 +1324,19 @@ static ssize_t hot_remove_store(struct class *class,
 	mutex_unlock(&zram_index_mutex);
 	return ret ? ret : count;
 }
+static CLASS_ATTR_WO(hot_remove);
 
-/*
- * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
- * sense that reading from this file does alter the state of your system -- it
- * creates a new un-initialized zram device and returns back this device's
- * device_id (or an error code if it fails to create a new device).
- */
-static struct class_attribute zram_control_class_attrs[] = {
-	__ATTR(hot_add, 0400, hot_add_show, NULL),
-	__ATTR_WO(hot_remove),
-	__ATTR_NULL,
+static struct attribute *zram_control_class_attrs[] = {
+	&class_attr_hot_add.attr,
+	&class_attr_hot_remove.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(zram_control_class);
 
 static struct class zram_control_class = {
 	.name		= "zram-control",
 	.owner		= THIS_MODULE,
-	.class_attrs	= zram_control_class_attrs,
+	.class_groups	= zram_control_class_groups,
 };
 
 static int zram_remove_cb(int id, void *ptr, void *data)
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index c38cb5b..fe850f0 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -602,7 +602,7 @@ static int btmrvl_service_main_thread(void *data)
 	struct btmrvl_thread *thread = data;
 	struct btmrvl_private *priv = thread->priv;
 	struct btmrvl_adapter *adapter = priv->adapter;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	struct sk_buff *skb;
 	ulong flags;
 
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 0a52da4..d2a5f11 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -57,7 +57,7 @@
 
 config BRCMSTB_GISB_ARB
 	bool "Broadcom STB GISB bus arbiter"
-	depends on ARM || MIPS
+	depends on ARM || ARM64 || MIPS
 	default ARCH_BRCMSTB || BMIPS_GENERIC
 	help
 	  Driver for the Broadcom Set Top Box System-on-a-chip internal bus
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 4d6a2b7..e8c6946 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1520,10 +1520,10 @@ static int arm_ccn_probe(struct platform_device *pdev)
 	if (err)
 		return err;
 
-	ccn->node = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_nodes,
-		GFP_KERNEL);
-	ccn->xp = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_xps,
-		GFP_KERNEL);
+	ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node),
+				 GFP_KERNEL);
+	ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node),
+			       GFP_KERNEL);
 	if (!ccn->node || !ccn->xp)
 		return -ENOMEM;
 
@@ -1544,9 +1544,11 @@ static int arm_ccn_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id arm_ccn_match[] = {
+	{ .compatible = "arm,ccn-502", },
 	{ .compatible = "arm,ccn-504", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, arm_ccn_match);
 
 static struct platform_driver arm_ccn_driver = {
 	.driver = {
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 72fe0a5..68ac3e9 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -24,11 +24,9 @@
 #include <linux/of.h>
 #include <linux/bitops.h>
 #include <linux/pm.h>
-
-#ifdef CONFIG_ARM
-#include <asm/bug.h>
-#include <asm/signal.h>
-#endif
+#include <linux/kernel.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
 
 #ifdef CONFIG_MIPS
 #include <asm/traps.h>
@@ -37,8 +35,6 @@
 #define  ARB_ERR_CAP_CLEAR		(1 << 0)
 #define  ARB_ERR_CAP_STATUS_TIMEOUT	(1 << 12)
 #define  ARB_ERR_CAP_STATUS_TEA		(1 << 11)
-#define  ARB_ERR_CAP_STATUS_BS_SHIFT	(1 << 2)
-#define  ARB_ERR_CAP_STATUS_BS_MASK	0x3c
 #define  ARB_ERR_CAP_STATUS_WRITE	(1 << 1)
 #define  ARB_ERR_CAP_STATUS_VALID	(1 << 0)
 
@@ -47,7 +43,6 @@ enum {
 	ARB_ERR_CAP_CLR,
 	ARB_ERR_CAP_HI_ADDR,
 	ARB_ERR_CAP_ADDR,
-	ARB_ERR_CAP_DATA,
 	ARB_ERR_CAP_STATUS,
 	ARB_ERR_CAP_MASTER,
 };
@@ -57,17 +52,24 @@ static const int gisb_offsets_bcm7038[] = {
 	[ARB_ERR_CAP_CLR]	= 0x0c4,
 	[ARB_ERR_CAP_HI_ADDR]	= -1,
 	[ARB_ERR_CAP_ADDR]	= 0x0c8,
-	[ARB_ERR_CAP_DATA]	= 0x0cc,
 	[ARB_ERR_CAP_STATUS]	= 0x0d0,
 	[ARB_ERR_CAP_MASTER]	= -1,
 };
 
+static const int gisb_offsets_bcm7278[] = {
+	[ARB_TIMER]		= 0x008,
+	[ARB_ERR_CAP_CLR]	= 0x7f8,
+	[ARB_ERR_CAP_HI_ADDR]	= -1,
+	[ARB_ERR_CAP_ADDR]	= 0x7e0,
+	[ARB_ERR_CAP_STATUS]	= 0x7f0,
+	[ARB_ERR_CAP_MASTER]	= 0x7f4,
+};
+
 static const int gisb_offsets_bcm7400[] = {
 	[ARB_TIMER]		= 0x00c,
 	[ARB_ERR_CAP_CLR]	= 0x0c8,
 	[ARB_ERR_CAP_HI_ADDR]	= -1,
 	[ARB_ERR_CAP_ADDR]	= 0x0cc,
-	[ARB_ERR_CAP_DATA]	= 0x0d0,
 	[ARB_ERR_CAP_STATUS]	= 0x0d4,
 	[ARB_ERR_CAP_MASTER]	= 0x0d8,
 };
@@ -77,7 +79,6 @@ static const int gisb_offsets_bcm7435[] = {
 	[ARB_ERR_CAP_CLR]	= 0x168,
 	[ARB_ERR_CAP_HI_ADDR]	= -1,
 	[ARB_ERR_CAP_ADDR]	= 0x16c,
-	[ARB_ERR_CAP_DATA]	= 0x170,
 	[ARB_ERR_CAP_STATUS]	= 0x174,
 	[ARB_ERR_CAP_MASTER]	= 0x178,
 };
@@ -87,7 +88,6 @@ static const int gisb_offsets_bcm7445[] = {
 	[ARB_ERR_CAP_CLR]	= 0x7e4,
 	[ARB_ERR_CAP_HI_ADDR]	= 0x7e8,
 	[ARB_ERR_CAP_ADDR]	= 0x7ec,
-	[ARB_ERR_CAP_DATA]	= 0x7f0,
 	[ARB_ERR_CAP_STATUS]	= 0x7f4,
 	[ARB_ERR_CAP_MASTER]	= 0x7f8,
 };
@@ -109,9 +109,13 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
 {
 	int offset = gdev->gisb_offsets[reg];
 
-	/* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
-	if (offset == -1)
-		return 1;
+	if (offset < 0) {
+		/* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+		if (reg == ARB_ERR_CAP_MASTER)
+			return 1;
+		else
+			return 0;
+	}
 
 	if (gdev->big_endian)
 		return ioread32be(gdev->base + offset);
@@ -119,6 +123,16 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
 		return ioread32(gdev->base + offset);
 }
 
+static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
+{
+	u64 value;
+
+	value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
+	value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+
+	return value;
+}
+
 static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
 {
 	int offset = gdev->gisb_offsets[reg];
@@ -127,9 +141,9 @@ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
 		return;
 
 	if (gdev->big_endian)
-		iowrite32be(val, gdev->base + reg);
+		iowrite32be(val, gdev->base + offset);
 	else
-		iowrite32(val, gdev->base + reg);
+		iowrite32(val, gdev->base + offset);
 }
 
 static ssize_t gisb_arb_get_timeout(struct device *dev,
@@ -185,7 +199,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
 					const char *reason)
 {
 	u32 cap_status;
-	unsigned long arb_addr;
+	u64 arb_addr;
 	u32 master;
 	const char *m_name;
 	char m_fmt[11];
@@ -197,10 +211,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
 		return 1;
 
 	/* Read the address and master */
-	arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff;
-#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
-	arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
-#endif
+	arb_addr = gisb_read_address(gdev);
 	master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
 
 	m_name = brcmstb_gisb_master_to_str(gdev, master);
@@ -209,7 +220,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
 		m_name = m_fmt;
 	}
 
-	pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
+	pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
 		__func__, reason, arb_addr,
 		cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
 		cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
@@ -221,27 +232,6 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
 	return 0;
 }
 
-#ifdef CONFIG_ARM
-static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
-				     struct pt_regs *regs)
-{
-	int ret = 0;
-	struct brcmstb_gisb_arb_device *gdev;
-
-	/* iterate over each GISB arb registered handlers */
-	list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
-		ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
-	/*
-	 * If it was an imprecise abort, then we need to correct the
-	 * return address to be _after_ the instruction.
-	*/
-	if (fsr & (1 << 10))
-		regs->ARM_pc += 4;
-
-	return ret;
-}
-#endif
-
 #ifdef CONFIG_MIPS
 static int brcmstb_bus_error_handler(struct pt_regs *regs, int is_fixup)
 {
@@ -279,6 +269,36 @@ static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+/*
+ * Dump out gisb errors on die or panic.
+ */
+static int dump_gisb_error(struct notifier_block *self, unsigned long v,
+			   void *p);
+
+static struct notifier_block gisb_die_notifier = {
+	.notifier_call = dump_gisb_error,
+};
+
+static struct notifier_block gisb_panic_notifier = {
+	.notifier_call = dump_gisb_error,
+};
+
+static int dump_gisb_error(struct notifier_block *self, unsigned long v,
+			   void *p)
+{
+	struct brcmstb_gisb_arb_device *gdev;
+	const char *reason = "panic";
+
+	if (self == &gisb_die_notifier)
+		reason = "die";
+
+	/* iterate over each GISB arb registered handlers */
+	list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
+		brcmstb_gisb_arb_decode_addr(gdev, reason);
+
+	return NOTIFY_DONE;
+}
+
 static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO,
 		gisb_arb_get_timeout, gisb_arb_set_timeout);
 
@@ -296,6 +316,7 @@ static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
 	{ .compatible = "brcm,bcm7445-gisb-arb", .data = gisb_offsets_bcm7445 },
 	{ .compatible = "brcm,bcm7435-gisb-arb", .data = gisb_offsets_bcm7435 },
 	{ .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
+	{ .compatible = "brcm,bcm7278-gisb-arb", .data = gisb_offsets_bcm7278 },
 	{ .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
 	{ },
 };
@@ -378,14 +399,16 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
 
 	list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
 
-#ifdef CONFIG_ARM
-	hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
-			"imprecise external abort");
-#endif
 #ifdef CONFIG_MIPS
 	board_be_handler = brcmstb_bus_error_handler;
 #endif
 
+	if (list_is_singular(&brcmstb_gisb_arb_device_list)) {
+		register_die_notifier(&gisb_die_notifier);
+		atomic_notifier_chain_register(&panic_notifier_list,
+					       &gisb_panic_notifier);
+	}
+
 	dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
 			gdev->base, timeout_irq, tea_irq);
 
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 76c952f..e36d160 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2178,6 +2178,12 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
 	if (!q)
 		return -ENXIO;
 
+	if (!blk_queue_scsi_passthrough(q)) {
+		WARN_ONCE(true,
+			  "Attempt read CDDA info through a non-SCSI queue\n");
+		return -EINVAL;
+	}
+
 	cdi->last_sense = 0;
 
 	while (nframes) {
@@ -2195,7 +2201,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
 			break;
 		}
 		req = scsi_req(rq);
-		scsi_req_init(rq);
 
 		ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
 		if (ret) {
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 1372763..6495b03f 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -583,7 +583,8 @@ static int gdrom_set_interrupt_handlers(void)
  */
 static void gdrom_readdisk_dma(struct work_struct *work)
 {
-	int err, block, block_cnt;
+	int block, block_cnt;
+	blk_status_t err;
 	struct packet_command *read_command;
 	struct list_head *elem, *next;
 	struct request *req;
@@ -641,7 +642,7 @@ static void gdrom_readdisk_dma(struct work_struct *work)
 		__raw_writeb(1, GDROM_DMA_STATUS_REG);
 		wait_event_interruptible_timeout(request_queue,
 			gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
-		err = gd.transfer ? -EIO : 0;
+		err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
 		gd.transfer = 0;
 		gd.pending = 0;
 		/* now seek to take the request spinlock
@@ -670,11 +671,11 @@ static void gdrom_request(struct request_queue *rq)
 			break;
 		case REQ_OP_WRITE:
 			pr_notice("Read only device - write request ignored\n");
-			__blk_end_request_all(req, -EIO);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 			break;
 		default:
 			printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
-			__blk_end_request_all(req, -EIO);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 			break;
 		}
 	}
@@ -812,6 +813,7 @@ static int probe_gdrom(struct platform_device *devptr)
 		err = -ENOMEM;
 		goto probe_fail_requestq;
 	}
+	blk_queue_bounce_limit(gd.gdrom_rq, BLK_BOUNCE_HIGH);
 
 	err = probe_gdrom_setupqueue();
 	if (err)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 31adbeb..2af7001 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -539,15 +539,6 @@
 	  out to lunch past a certain margin.  It can reboot the system
 	  or merely print a warning.
 
-config MMTIMER
-	tristate "MMTIMER Memory mapped RTC for SGI Altix"
-	depends on IA64_GENERIC || IA64_SGI_SN2
-	depends on POSIX_TIMERS
-	default y
-	help
-	  The mmtimer device allows direct userspace access to the
-	  Altix system timer.
-
 config UV_MMTIMER
 	tristate "UV_MMTIMER Memory mapped RTC for SGI UV"
 	depends on X86_UV
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 6e6c244..53e3372 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -10,7 +10,6 @@
 obj-$(CONFIG_RAW_DRIVER)	+= raw.o
 obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
 obj-$(CONFIG_MSPEC)		+= mspec.o
-obj-$(CONFIG_MMTIMER)		+= mmtimer.o
 obj-$(CONFIG_UV_MMTIMER)	+= uv_mmtimer.o
 obj-$(CONFIG_IBM_BSR)		+= bsr.o
 obj-$(CONFIG_SGI_MBCS)		+= mbcs.o
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index d165af8..a5c6cfe 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -821,7 +821,7 @@ static ssize_t ipmi_read(struct file *file,
 			 loff_t      *ppos)
 {
 	int          rv = 0;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	if (count <= 0)
 		return 0;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6e0cbe0..593a881 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -343,7 +343,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 
 	/* It's illegal to wrap around the end of the physical address space. */
-	if (offset + (phys_addr_t)size < offset)
+	if (offset + (phys_addr_t)size - 1 < offset)
 		return -EINVAL;
 
 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
deleted file mode 100644
index 0e7fcb0..0000000
--- a/drivers/char/mmtimer.c
+++ /dev/null
@@ -1,858 +0,0 @@
-/*
- * Timer device implementation for SGI SN platforms.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2001-2006 Silicon Graphics, Inc.  All rights reserved.
- *
- * This driver exports an API that should be supportable by any HPET or IA-PC
- * multimedia timer.  The code below is currently specific to the SGI Altix
- * SHub RTC, however.
- *
- * 11/01/01 - jbarnes - initial revision
- * 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion
- * 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE
- * 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt
- *		support via the posix timer interface
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/ioctl.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/mmtimer.h>
-#include <linux/miscdevice.h>
-#include <linux/posix-timers.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/math64.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-
-#include <linux/uaccess.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/shub_mmr.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/shubio.h>
-
-MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
-MODULE_DESCRIPTION("SGI Altix RTC Timer");
-MODULE_LICENSE("GPL");
-
-/* name of the device, usually in /dev */
-#define MMTIMER_NAME "mmtimer"
-#define MMTIMER_DESC "SGI Altix RTC Timer"
-#define MMTIMER_VERSION "2.1"
-
-#define RTC_BITS 55 /* 55 bits for this implementation */
-
-static struct k_clock sgi_clock;
-
-extern unsigned long sn_rtc_cycles_per_second;
-
-#define RTC_COUNTER_ADDR        ((long *)LOCAL_MMR_ADDR(SH_RTC))
-
-#define rtc_time()              (*RTC_COUNTER_ADDR)
-
-static DEFINE_MUTEX(mmtimer_mutex);
-static long mmtimer_ioctl(struct file *file, unsigned int cmd,
-						unsigned long arg);
-static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
-
-/*
- * Period in femtoseconds (10^-15 s)
- */
-static unsigned long mmtimer_femtoperiod = 0;
-
-static const struct file_operations mmtimer_fops = {
-	.owner = THIS_MODULE,
-	.mmap =	mmtimer_mmap,
-	.unlocked_ioctl = mmtimer_ioctl,
-	.llseek = noop_llseek,
-};
-
-/*
- * We only have comparison registers RTC1-4 currently available per
- * node.  RTC0 is used by SAL.
- */
-/* Check for an RTC interrupt pending */
-static int mmtimer_int_pending(int comparator)
-{
-	if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
-			SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
-		return 1;
-	else
-		return 0;
-}
-
-/* Clear the RTC interrupt pending bit */
-static void mmtimer_clr_int_pending(int comparator)
-{
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
-		SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator);
-}
-
-/* Setup timer on comparator RTC1 */
-static void mmtimer_setup_int_0(int cpu, u64 expires)
-{
-	u64 val;
-
-	/* Disable interrupt */
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL);
-
-	/* Initialize comparator value */
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L);
-
-	/* Clear pending bit */
-	mmtimer_clr_int_pending(0);
-
-	val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
-		((u64)cpu_physical_id(cpu) <<
-			SH_RTC1_INT_CONFIG_PID_SHFT);
-
-	/* Set configuration */
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val);
-
-	/* Enable RTC interrupts */
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL);
-
-	/* Initialize comparator value */
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires);
-
-
-}
-
-/* Setup timer on comparator RTC2 */
-static void mmtimer_setup_int_1(int cpu, u64 expires)
-{
-	u64 val;
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL);
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L);
-
-	mmtimer_clr_int_pending(1);
-
-	val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) |
-		((u64)cpu_physical_id(cpu) <<
-			SH_RTC2_INT_CONFIG_PID_SHFT);
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val);
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL);
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires);
-}
-
-/* Setup timer on comparator RTC3 */
-static void mmtimer_setup_int_2(int cpu, u64 expires)
-{
-	u64 val;
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL);
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L);
-
-	mmtimer_clr_int_pending(2);
-
-	val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
-		((u64)cpu_physical_id(cpu) <<
-			SH_RTC3_INT_CONFIG_PID_SHFT);
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val);
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL);
-
-	HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires);
-}
-
-/*
- * This function must be called with interrupts disabled and preemption off
- * in order to insure that the setup succeeds in a deterministic time frame.
- * It will check if the interrupt setup succeeded.
- */
-static int mmtimer_setup(int cpu, int comparator, unsigned long expires,
-	u64 *set_completion_time)
-{
-	switch (comparator) {
-	case 0:
-		mmtimer_setup_int_0(cpu, expires);
-		break;
-	case 1:
-		mmtimer_setup_int_1(cpu, expires);
-		break;
-	case 2:
-		mmtimer_setup_int_2(cpu, expires);
-		break;
-	}
-	/* We might've missed our expiration time */
-	*set_completion_time = rtc_time();
-	if (*set_completion_time <= expires)
-		return 1;
-
-	/*
-	 * If an interrupt is already pending then its okay
-	 * if not then we failed
-	 */
-	return mmtimer_int_pending(comparator);
-}
-
-static int mmtimer_disable_int(long nasid, int comparator)
-{
-	switch (comparator) {
-	case 0:
-		nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE),
-			0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL);
-		break;
-	case 1:
-		nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE),
-			0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL);
-		break;
-	case 2:
-		nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE),
-			0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL);
-		break;
-	default:
-		return -EFAULT;
-	}
-	return 0;
-}
-
-#define COMPARATOR	1		/* The comparator to use */
-
-#define TIMER_OFF	0xbadcabLL	/* Timer is not setup */
-#define TIMER_SET	0		/* Comparator is set for this timer */
-
-#define MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT 40
-
-/* There is one of these for each timer */
-struct mmtimer {
-	struct rb_node list;
-	struct k_itimer *timer;
-	int cpu;
-};
-
-struct mmtimer_node {
-	spinlock_t lock ____cacheline_aligned;
-	struct rb_root timer_head;
-	struct rb_node *next;
-	struct tasklet_struct tasklet;
-};
-static struct mmtimer_node *timers;
-
-static unsigned mmtimer_interval_retry_increment =
-	MMTIMER_INTERVAL_RETRY_INCREMENT_DEFAULT;
-module_param(mmtimer_interval_retry_increment, uint, 0644);
-MODULE_PARM_DESC(mmtimer_interval_retry_increment,
-	"RTC ticks to add to expiration on interval retry (default 40)");
-
-/*
- * Add a new mmtimer struct to the node's mmtimer list.
- * This function assumes the struct mmtimer_node is locked.
- */
-static void mmtimer_add_list(struct mmtimer *n)
-{
-	int nodeid = n->timer->it.mmtimer.node;
-	unsigned long expires = n->timer->it.mmtimer.expires;
-	struct rb_node **link = &timers[nodeid].timer_head.rb_node;
-	struct rb_node *parent = NULL;
-	struct mmtimer *x;
-
-	/*
-	 * Find the right place in the rbtree:
-	 */
-	while (*link) {
-		parent = *link;
-		x = rb_entry(parent, struct mmtimer, list);
-
-		if (expires < x->timer->it.mmtimer.expires)
-			link = &(*link)->rb_left;
-		else
-			link = &(*link)->rb_right;
-	}
-
-	/*
-	 * Insert the timer to the rbtree and check whether it
-	 * replaces the first pending timer
-	 */
-	rb_link_node(&n->list, parent, link);
-	rb_insert_color(&n->list, &timers[nodeid].timer_head);
-
-	if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next,
-			struct mmtimer, list)->timer->it.mmtimer.expires)
-		timers[nodeid].next = &n->list;
-}
-
-/*
- * Set the comparator for the next timer.
- * This function assumes the struct mmtimer_node is locked.
- */
-static void mmtimer_set_next_timer(int nodeid)
-{
-	struct mmtimer_node *n = &timers[nodeid];
-	struct mmtimer *x;
-	struct k_itimer *t;
-	u64 expires, exp, set_completion_time;
-	int i;
-
-restart:
-	if (n->next == NULL)
-		return;
-
-	x = rb_entry(n->next, struct mmtimer, list);
-	t = x->timer;
-	if (!t->it.mmtimer.incr) {
-		/* Not an interval timer */
-		if (!mmtimer_setup(x->cpu, COMPARATOR,
-					t->it.mmtimer.expires,
-					&set_completion_time)) {
-			/* Late setup, fire now */
-			tasklet_schedule(&n->tasklet);
-		}
-		return;
-	}
-
-	/* Interval timer */
-	i = 0;
-	expires = exp = t->it.mmtimer.expires;
-	while (!mmtimer_setup(x->cpu, COMPARATOR, expires,
-				&set_completion_time)) {
-		int to;
-
-		i++;
-		expires = set_completion_time +
-				mmtimer_interval_retry_increment + (1 << i);
-		/* Calculate overruns as we go. */
-		to = ((u64)(expires - exp) / t->it.mmtimer.incr);
-		if (to) {
-			t->it_overrun += to;
-			t->it.mmtimer.expires += t->it.mmtimer.incr * to;
-			exp = t->it.mmtimer.expires;
-		}
-		if (i > 20) {
-			printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
-			t->it.mmtimer.clock = TIMER_OFF;
-			n->next = rb_next(&x->list);
-			rb_erase(&x->list, &n->timer_head);
-			kfree(x);
-			goto restart;
-		}
-	}
-}
-
-/**
- * mmtimer_ioctl - ioctl interface for /dev/mmtimer
- * @file: file structure for the device
- * @cmd: command to execute
- * @arg: optional argument to command
- *
- * Executes the command specified by @cmd.  Returns 0 for success, < 0 for
- * failure.
- *
- * Valid commands:
- *
- * %MMTIMER_GETOFFSET - Should return the offset (relative to the start
- * of the page where the registers are mapped) for the counter in question.
- *
- * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15)
- * seconds
- *
- * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address
- * specified by @arg
- *
- * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter
- *
- * %MMTIMER_MMAPAVAIL - Returns 1 if the registers can be mmap'd into userspace
- *
- * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it
- * in the address specified by @arg.
- */
-static long mmtimer_ioctl(struct file *file, unsigned int cmd,
-						unsigned long arg)
-{
-	int ret = 0;
-
-	mutex_lock(&mmtimer_mutex);
-
-	switch (cmd) {
-	case MMTIMER_GETOFFSET:	/* offset of the counter */
-		/*
-		 * SN RTC registers are on their own 64k page
-		 */
-		if(PAGE_SIZE <= (1 << 16))
-			ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8;
-		else
-			ret = -ENOSYS;
-		break;
-
-	case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
-		if(copy_to_user((unsigned long __user *)arg,
-				&mmtimer_femtoperiod, sizeof(unsigned long)))
-			ret = -EFAULT;
-		break;
-
-	case MMTIMER_GETFREQ: /* frequency in Hz */
-		if(copy_to_user((unsigned long __user *)arg,
-				&sn_rtc_cycles_per_second,
-				sizeof(unsigned long)))
-			ret = -EFAULT;
-		break;
-
-	case MMTIMER_GETBITS: /* number of bits in the clock */
-		ret = RTC_BITS;
-		break;
-
-	case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */
-		ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0;
-		break;
-
-	case MMTIMER_GETCOUNTER:
-		if(copy_to_user((unsigned long __user *)arg,
-				RTC_COUNTER_ADDR, sizeof(unsigned long)))
-			ret = -EFAULT;
-		break;
-	default:
-		ret = -ENOTTY;
-		break;
-	}
-	mutex_unlock(&mmtimer_mutex);
-	return ret;
-}
-
-/**
- * mmtimer_mmap - maps the clock's registers into userspace
- * @file: file structure for the device
- * @vma: VMA to map the registers into
- *
- * Calls remap_pfn_range() to map the clock's registers into
- * the calling process' address space.
- */
-static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	unsigned long mmtimer_addr;
-
-	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
-		return -EINVAL;
-
-	if (vma->vm_flags & VM_WRITE)
-		return -EPERM;
-
-	if (PAGE_SIZE > (1 << 16))
-		return -ENOSYS;
-
-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-	mmtimer_addr = __pa(RTC_COUNTER_ADDR);
-	mmtimer_addr &= ~(PAGE_SIZE - 1);
-	mmtimer_addr &= 0xfffffffffffffffUL;
-
-	if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT,
-					PAGE_SIZE, vma->vm_page_prot)) {
-		printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n");
-		return -EAGAIN;
-	}
-
-	return 0;
-}
-
-static struct miscdevice mmtimer_miscdev = {
-	.minor = SGI_MMTIMER,
-	.name = MMTIMER_NAME,
-	.fops = &mmtimer_fops
-};
-
-static struct timespec sgi_clock_offset;
-static int sgi_clock_period;
-
-/*
- * Posix Timer Interface
- */
-
-static struct timespec sgi_clock_offset;
-static int sgi_clock_period;
-
-static int sgi_clock_get(clockid_t clockid, struct timespec64 *tp)
-{
-	u64 nsec;
-
-	nsec = rtc_time() * sgi_clock_period
-			+ sgi_clock_offset.tv_nsec;
-	*tp = ns_to_timespec64(nsec);
-	tp->tv_sec += sgi_clock_offset.tv_sec;
-	return 0;
-};
-
-static int sgi_clock_set(const clockid_t clockid, const struct timespec64 *tp)
-{
-
-	u64 nsec;
-	u32 rem;
-
-	nsec = rtc_time() * sgi_clock_period;
-
-	sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
-
-	if (rem <= tp->tv_nsec)
-		sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
-	else {
-		sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem;
-		sgi_clock_offset.tv_sec--;
-	}
-	return 0;
-}
-
-/**
- * mmtimer_interrupt - timer interrupt handler
- * @irq: irq received
- * @dev_id: device the irq came from
- *
- * Called when one of the comarators matches the counter, This
- * routine will send signals to processes that have requested
- * them.
- *
- * This interrupt is run in an interrupt context
- * by the SHUB. It is therefore safe to locally access SHub
- * registers.
- */
-static irqreturn_t
-mmtimer_interrupt(int irq, void *dev_id)
-{
-	unsigned long expires = 0;
-	int result = IRQ_NONE;
-	unsigned indx = cpu_to_node(smp_processor_id());
-	struct mmtimer *base;
-
-	spin_lock(&timers[indx].lock);
-	base = rb_entry(timers[indx].next, struct mmtimer, list);
-	if (base == NULL) {
-		spin_unlock(&timers[indx].lock);
-		return result;
-	}
-
-	if (base->cpu == smp_processor_id()) {
-		if (base->timer)
-			expires = base->timer->it.mmtimer.expires;
-		/* expires test won't work with shared irqs */
-		if ((mmtimer_int_pending(COMPARATOR) > 0) ||
-			(expires && (expires <= rtc_time()))) {
-			mmtimer_clr_int_pending(COMPARATOR);
-			tasklet_schedule(&timers[indx].tasklet);
-			result = IRQ_HANDLED;
-		}
-	}
-	spin_unlock(&timers[indx].lock);
-	return result;
-}
-
-static void mmtimer_tasklet(unsigned long data)
-{
-	int nodeid = data;
-	struct mmtimer_node *mn = &timers[nodeid];
-	struct mmtimer *x;
-	struct k_itimer *t;
-	unsigned long flags;
-
-	/* Send signal and deal with periodic signals */
-	spin_lock_irqsave(&mn->lock, flags);
-	if (!mn->next)
-		goto out;
-
-	x = rb_entry(mn->next, struct mmtimer, list);
-	t = x->timer;
-
-	if (t->it.mmtimer.clock == TIMER_OFF)
-		goto out;
-
-	t->it_overrun = 0;
-
-	mn->next = rb_next(&x->list);
-	rb_erase(&x->list, &mn->timer_head);
-
-	if (posix_timer_event(t, 0) != 0)
-		t->it_overrun++;
-
-	if(t->it.mmtimer.incr) {
-		t->it.mmtimer.expires += t->it.mmtimer.incr;
-		mmtimer_add_list(x);
-	} else {
-		/* Ensure we don't false trigger in mmtimer_interrupt */
-		t->it.mmtimer.clock = TIMER_OFF;
-		t->it.mmtimer.expires = 0;
-		kfree(x);
-	}
-	/* Set comparator for next timer, if there is one */
-	mmtimer_set_next_timer(nodeid);
-
-	t->it_overrun_last = t->it_overrun;
-out:
-	spin_unlock_irqrestore(&mn->lock, flags);
-}
-
-static int sgi_timer_create(struct k_itimer *timer)
-{
-	/* Insure that a newly created timer is off */
-	timer->it.mmtimer.clock = TIMER_OFF;
-	return 0;
-}
-
-/* This does not really delete a timer. It just insures
- * that the timer is not active
- *
- * Assumption: it_lock is already held with irq's disabled
- */
-static int sgi_timer_del(struct k_itimer *timr)
-{
-	cnodeid_t nodeid = timr->it.mmtimer.node;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&timers[nodeid].lock, irqflags);
-	if (timr->it.mmtimer.clock != TIMER_OFF) {
-		unsigned long expires = timr->it.mmtimer.expires;
-		struct rb_node *n = timers[nodeid].timer_head.rb_node;
-		struct mmtimer *uninitialized_var(t);
-		int r = 0;
-
-		timr->it.mmtimer.clock = TIMER_OFF;
-		timr->it.mmtimer.expires = 0;
-
-		while (n) {
-			t = rb_entry(n, struct mmtimer, list);
-			if (t->timer == timr)
-				break;
-
-			if (expires < t->timer->it.mmtimer.expires)
-				n = n->rb_left;
-			else
-				n = n->rb_right;
-		}
-
-		if (!n) {
-			spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
-			return 0;
-		}
-
-		if (timers[nodeid].next == n) {
-			timers[nodeid].next = rb_next(n);
-			r = 1;
-		}
-
-		rb_erase(n, &timers[nodeid].timer_head);
-		kfree(t);
-
-		if (r) {
-			mmtimer_disable_int(cnodeid_to_nasid(nodeid),
-				COMPARATOR);
-			mmtimer_set_next_timer(nodeid);
-		}
-	}
-	spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
-	return 0;
-}
-
-/* Assumption: it_lock is already held with irq's disabled */
-static void sgi_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
-{
-
-	if (timr->it.mmtimer.clock == TIMER_OFF) {
-		cur_setting->it_interval.tv_nsec = 0;
-		cur_setting->it_interval.tv_sec = 0;
-		cur_setting->it_value.tv_nsec = 0;
-		cur_setting->it_value.tv_sec =0;
-		return;
-	}
-
-	cur_setting->it_interval = ns_to_timespec64(timr->it.mmtimer.incr * sgi_clock_period);
-	cur_setting->it_value = ns_to_timespec64((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
-}
-
-
-static int sgi_timer_set(struct k_itimer *timr, int flags,
-	struct itimerspec64 *new_setting,
-	struct itimerspec64 *old_setting)
-{
-	unsigned long when, period, irqflags;
-	int err = 0;
-	cnodeid_t nodeid;
-	struct mmtimer *base;
-	struct rb_node *n;
-
-	if (old_setting)
-		sgi_timer_get(timr, old_setting);
-
-	sgi_timer_del(timr);
-	when = timespec64_to_ns(&new_setting->it_value);
-	period = timespec64_to_ns(&new_setting->it_interval);
-
-	if (when == 0)
-		/* Clear timer */
-		return 0;
-
-	base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL);
-	if (base == NULL)
-		return -ENOMEM;
-
-	if (flags & TIMER_ABSTIME) {
-		struct timespec64 n;
-		unsigned long now;
-
-		getnstimeofday64(&n);
-		now = timespec64_to_ns(&n);
-		if (when > now)
-			when -= now;
-		else
-			/* Fire the timer immediately */
-			when = 0;
-	}
-
-	/*
-	 * Convert to sgi clock period. Need to keep rtc_time() as near as possible
-	 * to getnstimeofday() in order to be as faithful as possible to the time
-	 * specified.
-	 */
-	when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time();
-	period = (period + sgi_clock_period - 1)  / sgi_clock_period;
-
-	/*
-	 * We are allocating a local SHub comparator. If we would be moved to another
-	 * cpu then another SHub may be local to us. Prohibit that by switching off
-	 * preemption.
-	 */
-	preempt_disable();
-
-	nodeid =  cpu_to_node(smp_processor_id());
-
-	/* Lock the node timer structure */
-	spin_lock_irqsave(&timers[nodeid].lock, irqflags);
-
-	base->timer = timr;
-	base->cpu = smp_processor_id();
-
-	timr->it.mmtimer.clock = TIMER_SET;
-	timr->it.mmtimer.node = nodeid;
-	timr->it.mmtimer.incr = period;
-	timr->it.mmtimer.expires = when;
-
-	n = timers[nodeid].next;
-
-	/* Add the new struct mmtimer to node's timer list */
-	mmtimer_add_list(base);
-
-	if (timers[nodeid].next == n) {
-		/* No need to reprogram comparator for now */
-		spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
-		preempt_enable();
-		return err;
-	}
-
-	/* We need to reprogram the comparator */
-	if (n)
-		mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR);
-
-	mmtimer_set_next_timer(nodeid);
-
-	/* Unlock the node timer structure */
-	spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
-
-	preempt_enable();
-
-	return err;
-}
-
-static int sgi_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
-{
-	tp->tv_sec = 0;
-	tp->tv_nsec = sgi_clock_period;
-	return 0;
-}
-
-static struct k_clock sgi_clock = {
-	.clock_set	= sgi_clock_set,
-	.clock_get	= sgi_clock_get,
-	.clock_getres	= sgi_clock_getres,
-	.timer_create	= sgi_timer_create,
-	.timer_set	= sgi_timer_set,
-	.timer_del	= sgi_timer_del,
-	.timer_get	= sgi_timer_get
-};
-
-/**
- * mmtimer_init - device initialization routine
- *
- * Does initial setup for the mmtimer device.
- */
-static int __init mmtimer_init(void)
-{
-	cnodeid_t node, maxn = -1;
-
-	if (!ia64_platform_is("sn2"))
-		return 0;
-
-	/*
-	 * Sanity check the cycles/sec variable
-	 */
-	if (sn_rtc_cycles_per_second < 100000) {
-		printk(KERN_ERR "%s: unable to determine clock frequency\n",
-		       MMTIMER_NAME);
-		goto out1;
-	}
-
-	mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
-			       2) / sn_rtc_cycles_per_second;
-
-	if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) {
-		printk(KERN_WARNING "%s: unable to allocate interrupt.",
-			MMTIMER_NAME);
-		goto out1;
-	}
-
-	if (misc_register(&mmtimer_miscdev)) {
-		printk(KERN_ERR "%s: failed to register device\n",
-		       MMTIMER_NAME);
-		goto out2;
-	}
-
-	/* Get max numbered node, calculate slots needed */
-	for_each_online_node(node) {
-		maxn = node;
-	}
-	maxn++;
-
-	/* Allocate list of node ptrs to mmtimer_t's */
-	timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL);
-	if (!timers) {
-		printk(KERN_ERR "%s: failed to allocate memory for device\n",
-				MMTIMER_NAME);
-		goto out3;
-	}
-
-	/* Initialize struct mmtimer's for each online node */
-	for_each_online_node(node) {
-		spin_lock_init(&timers[node].lock);
-		tasklet_init(&timers[node].tasklet, mmtimer_tasklet,
-			(unsigned long) node);
-	}
-
-	sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second;
-	posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock);
-
-	printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
-	       sn_rtc_cycles_per_second/(unsigned long)1E6);
-
-	return 0;
-
-out3:
-	misc_deregister(&mmtimer_miscdev);
-out2:
-	free_irq(SGI_MMTIMER_VECTOR, NULL);
-out1:
-	return -1;
-}
-
-module_init(mmtimer_init);
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index d4dbd8d..382c864 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
 
 	rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
 	if (rc <= 0) {
-		DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+		DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
 		DEBUGP(2, dev, "<- cm4040_write (failed)\n");
 		if (rc == -ERESTARTSYS)
 			return rc;
@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
 	for (i = 0; i < bytes_to_write; i++) {
 		rc = wait_for_bulk_out_ready(dev);
 		if (rc <= 0) {
-			DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+			DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
 			       rc);
 			DEBUGP(2, dev, "<- cm4040_write (failed)\n");
 			if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
 	rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
 
 	if (rc <= 0) {
-		DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+		DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
 		DEBUGP(2, dev, "<- cm4040_write (failed)\n");
 		if (rc == -ERESTARTSYS)
 			return rc;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0ab0249..01a260f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,6 +1,9 @@
 /*
  * random.c -- A strong random number generator
  *
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
+ * Rights Reserved.
+ *
  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
  *
  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
 static struct crng_state **crng_node_pool __read_mostly;
 #endif
 
+static void invalidate_batched_entropy(void);
+
 static void crng_initialize(struct crng_state *crng)
 {
 	int		i;
@@ -798,12 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
 		p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
 		cp++; crng_init_cnt++; len--;
 	}
+	spin_unlock_irqrestore(&primary_crng.lock, flags);
 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+		invalidate_batched_entropy();
 		crng_init = 1;
 		wake_up_interruptible(&crng_init_wait);
 		pr_notice("random: fast init done\n");
 	}
-	spin_unlock_irqrestore(&primary_crng.lock, flags);
 	return 1;
 }
 
@@ -835,13 +841,14 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
 	}
 	memzero_explicit(&buf, sizeof(buf));
 	crng->init_time = jiffies;
+	spin_unlock_irqrestore(&primary_crng.lock, flags);
 	if (crng == &primary_crng && crng_init < 2) {
+		invalidate_batched_entropy();
 		crng_init = 2;
 		process_random_ready_list();
 		wake_up_interruptible(&crng_init_wait);
 		pr_notice("random: crng init done\n");
 	}
-	spin_unlock_irqrestore(&primary_crng.lock, flags);
 }
 
 static inline void crng_wait_ready(void)
@@ -1097,12 +1104,16 @@ static void add_interrupt_bench(cycles_t start)
 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 {
 	__u32 *ptr = (__u32 *) regs;
+	unsigned int idx;
 
 	if (regs == NULL)
 		return 0;
-	if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
-		f->reg_idx = 0;
-	return *(ptr + f->reg_idx++);
+	idx = READ_ONCE(f->reg_idx);
+	if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+		idx = 0;
+	ptr += idx++;
+	WRITE_ONCE(f->reg_idx, idx);
+	return *ptr;
 }
 
 void add_interrupt_randomness(int irq, int irq_flags)
@@ -2019,6 +2030,7 @@ struct batched_entropy {
 	};
 	unsigned int position;
 };
+static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
 
 /*
  * Get a random word for internal kernel use only. The quality of the random
@@ -2029,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
 u64 get_random_u64(void)
 {
 	u64 ret;
+	bool use_lock = READ_ONCE(crng_init) < 2;
+	unsigned long flags = 0;
 	struct batched_entropy *batch;
 
 #if BITS_PER_LONG == 64
@@ -2041,11 +2055,15 @@ u64 get_random_u64(void)
 #endif
 
 	batch = &get_cpu_var(batched_entropy_u64);
+	if (use_lock)
+		read_lock_irqsave(&batched_entropy_reset_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
 		extract_crng((u8 *)batch->entropy_u64);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u64[batch->position++];
+	if (use_lock)
+		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 	put_cpu_var(batched_entropy_u64);
 	return ret;
 }
@@ -2055,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
 u32 get_random_u32(void)
 {
 	u32 ret;
+	bool use_lock = READ_ONCE(crng_init) < 2;
+	unsigned long flags = 0;
 	struct batched_entropy *batch;
 
 	if (arch_get_random_int(&ret))
 		return ret;
 
 	batch = &get_cpu_var(batched_entropy_u32);
+	if (use_lock)
+		read_lock_irqsave(&batched_entropy_reset_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
 		extract_crng((u8 *)batch->entropy_u32);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u32[batch->position++];
+	if (use_lock)
+		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 	put_cpu_var(batched_entropy_u32);
 	return ret;
 }
 EXPORT_SYMBOL(get_random_u32);
 
+/* It's important to invalidate all potential batched entropy that might
+ * be stored before the crng is initialized, which we can do lazily by
+ * simply resetting the counter to zero so that it's re-extracted on the
+ * next usage. */
+static void invalidate_batched_entropy(void)
+{
+	int cpu;
+	unsigned long flags;
+
+	write_lock_irqsave(&batched_entropy_reset_lock, flags);
+	for_each_possible_cpu (cpu) {
+		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+	}
+	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+}
+
 /**
  * randomize_page - Generate a random, page aligned address
  * @start:	The smallest acceptable address the caller will take.
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 1b10e38..be5d1ab 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -127,7 +127,7 @@ static int st33zp24_i2c_acpi_request_resources(struct i2c_client *client)
 	struct device *dev = &client->dev;
 	int ret;
 
-	ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), acpi_st33zp24_gpios);
+	ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios);
 	if (ret)
 		return ret;
 
@@ -285,7 +285,6 @@ static int st33zp24_i2c_remove(struct i2c_client *client)
 	if (ret)
 		return ret;
 
-	acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev));
 	return 0;
 }
 
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index c69d151..0fc4f20 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -246,7 +246,7 @@ static int st33zp24_spi_acpi_request_resources(struct spi_device *spi_dev)
 	struct device *dev = &spi_dev->dev;
 	int ret;
 
-	ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), acpi_st33zp24_gpios);
+	ret = devm_acpi_dev_add_driver_gpios(dev, acpi_st33zp24_gpios);
 	if (ret)
 		return ret;
 
@@ -402,7 +402,6 @@ static int st33zp24_spi_remove(struct spi_device *dev)
 	if (ret)
 		return ret;
 
-	acpi_dev_remove_driver_gpios(ACPI_COMPANION(&dev->dev));
 	return 0;
 }
 
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 158c1db..d2b4df6 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -416,7 +416,8 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
 	/* Store the decision as chip->locality will be changed. */
 	need_locality = chip->locality == -1;
 
-	if (need_locality && chip->ops->request_locality)  {
+	if (!(flags & TPM_TRANSMIT_RAW) &&
+	    need_locality && chip->ops->request_locality)  {
 		rc = chip->ops->request_locality(chip, 0);
 		if (rc < 0)
 			goto out_no_locality;
@@ -429,8 +430,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
 
 	rc = chip->ops->send(chip, (u8 *) buf, count);
 	if (rc < 0) {
-		dev_err(&chip->dev,
-			"tpm_transmit: tpm_send: error %d\n", rc);
+		if (rc != -EPIPE)
+			dev_err(&chip->dev,
+				"%s: tpm_send: error %d\n", __func__, rc);
 		goto out;
 	}
 
@@ -536,59 +538,62 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(tpm_transmit_cmd);
 
 #define TPM_DIGEST_SIZE 20
 #define TPM_RET_CODE_IDX 6
 #define TPM_INTERNAL_RESULT_SIZE 200
-#define TPM_ORD_GET_CAP cpu_to_be32(101)
-#define TPM_ORD_GET_RANDOM cpu_to_be32(70)
+#define TPM_ORD_GET_CAP 101
+#define TPM_ORD_GET_RANDOM 70
 
 static const struct tpm_input_header tpm_getcap_header = {
-	.tag = TPM_TAG_RQU_COMMAND,
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
 	.length = cpu_to_be32(22),
-	.ordinal = TPM_ORD_GET_CAP
+	.ordinal = cpu_to_be32(TPM_ORD_GET_CAP)
 };
 
 ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
 		   const char *desc, size_t min_cap_length)
 {
-	struct tpm_cmd_t tpm_cmd;
+	struct tpm_buf buf;
 	int rc;
 
-	tpm_cmd.header.in = tpm_getcap_header;
+	rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_GET_CAP);
+	if (rc)
+		return rc;
+
 	if (subcap_id == TPM_CAP_VERSION_1_1 ||
 	    subcap_id == TPM_CAP_VERSION_1_2) {
-		tpm_cmd.params.getcap_in.cap = cpu_to_be32(subcap_id);
-		/*subcap field not necessary */
-		tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
-		tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
+		tpm_buf_append_u32(&buf, subcap_id);
+		tpm_buf_append_u32(&buf, 0);
 	} else {
 		if (subcap_id == TPM_CAP_FLAG_PERM ||
 		    subcap_id == TPM_CAP_FLAG_VOL)
-			tpm_cmd.params.getcap_in.cap =
-				cpu_to_be32(TPM_CAP_FLAG);
+			tpm_buf_append_u32(&buf, TPM_CAP_FLAG);
 		else
-			tpm_cmd.params.getcap_in.cap =
-				cpu_to_be32(TPM_CAP_PROP);
-		tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
-		tpm_cmd.params.getcap_in.subcap = cpu_to_be32(subcap_id);
+			tpm_buf_append_u32(&buf, TPM_CAP_PROP);
+
+		tpm_buf_append_u32(&buf, 4);
+		tpm_buf_append_u32(&buf, subcap_id);
 	}
-	rc = tpm_transmit_cmd(chip, NULL, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE,
 			      min_cap_length, 0, desc);
 	if (!rc)
-		*cap = tpm_cmd.params.getcap_out.cap;
+		*cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4];
+
+	tpm_buf_destroy(&buf);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(tpm_getcap);
 
-#define TPM_ORD_STARTUP cpu_to_be32(153)
+#define TPM_ORD_STARTUP 153
 #define TPM_ST_CLEAR cpu_to_be16(1)
 #define TPM_ST_STATE cpu_to_be16(2)
 #define TPM_ST_DEACTIVATED cpu_to_be16(3)
 static const struct tpm_input_header tpm_startup_header = {
-	.tag = TPM_TAG_RQU_COMMAND,
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
 	.length = cpu_to_be32(12),
-	.ordinal = TPM_ORD_STARTUP
+	.ordinal = cpu_to_be32(TPM_ORD_STARTUP)
 };
 
 static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
@@ -737,7 +742,7 @@ EXPORT_SYMBOL_GPL(tpm_get_timeouts);
 #define CONTINUE_SELFTEST_RESULT_SIZE 10
 
 static const struct tpm_input_header continue_selftest_header = {
-	.tag = TPM_TAG_RQU_COMMAND,
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
 	.length = cpu_to_be32(10),
 	.ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST),
 };
@@ -760,13 +765,13 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
 	return rc;
 }
 
-#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
+#define TPM_ORDINAL_PCRREAD 21
 #define READ_PCR_RESULT_SIZE 30
 #define READ_PCR_RESULT_BODY_SIZE 20
 static const struct tpm_input_header pcrread_header = {
-	.tag = TPM_TAG_RQU_COMMAND,
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
 	.length = cpu_to_be32(14),
-	.ordinal = TPM_ORDINAL_PCRREAD
+	.ordinal = cpu_to_be32(TPM_ORDINAL_PCRREAD)
 };
 
 int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
@@ -838,15 +843,34 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
 }
 EXPORT_SYMBOL_GPL(tpm_pcr_read);
 
-#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
+#define TPM_ORD_PCR_EXTEND 20
 #define EXTEND_PCR_RESULT_SIZE 34
 #define EXTEND_PCR_RESULT_BODY_SIZE 20
 static const struct tpm_input_header pcrextend_header = {
-	.tag = TPM_TAG_RQU_COMMAND,
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
 	.length = cpu_to_be32(34),
-	.ordinal = TPM_ORD_PCR_EXTEND
+	.ordinal = cpu_to_be32(TPM_ORD_PCR_EXTEND)
 };
 
+static int tpm1_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash,
+			   char *log_msg)
+{
+	struct tpm_buf buf;
+	int rc;
+
+	rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_PCR_EXTEND);
+	if (rc)
+		return rc;
+
+	tpm_buf_append_u32(&buf, pcr_idx);
+	tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE);
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, EXTEND_PCR_RESULT_SIZE,
+			      EXTEND_PCR_RESULT_BODY_SIZE, 0, log_msg);
+	tpm_buf_destroy(&buf);
+	return rc;
+}
+
 /**
  * tpm_pcr_extend - extend pcr value with hash
  * @chip_num:	tpm idx # or AN&
@@ -859,7 +883,6 @@ static const struct tpm_input_header pcrextend_header = {
  */
 int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
 {
-	struct tpm_cmd_t cmd;
 	int rc;
 	struct tpm_chip *chip;
 	struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)];
@@ -885,13 +908,8 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
 		return rc;
 	}
 
-	cmd.header.in = pcrextend_header;
-	cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
-	memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
-	rc = tpm_transmit_cmd(chip, NULL, &cmd, EXTEND_PCR_RESULT_SIZE,
-			      EXTEND_PCR_RESULT_BODY_SIZE, 0,
-			      "attempting extend a PCR value");
-
+	rc = tpm1_pcr_extend(chip, pcr_idx, hash,
+			     "attempting extend a PCR value");
 	tpm_put_ops(chip);
 	return rc;
 }
@@ -1060,13 +1078,13 @@ int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
 }
 EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
 
-#define TPM_ORD_SAVESTATE cpu_to_be32(152)
+#define TPM_ORD_SAVESTATE 152
 #define SAVESTATE_RESULT_SIZE 10
 
 static const struct tpm_input_header savestate_header = {
-	.tag = TPM_TAG_RQU_COMMAND,
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
 	.length = cpu_to_be32(10),
-	.ordinal = TPM_ORD_SAVESTATE
+	.ordinal = cpu_to_be32(TPM_ORD_SAVESTATE)
 };
 
 /*
@@ -1090,15 +1108,9 @@ int tpm_pm_suspend(struct device *dev)
 	}
 
 	/* for buggy tpm, flush pcrs with extend to selected dummy */
-	if (tpm_suspend_pcr) {
-		cmd.header.in = pcrextend_header;
-		cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
-		memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
-		       TPM_DIGEST_SIZE);
-		rc = tpm_transmit_cmd(chip, NULL, &cmd, EXTEND_PCR_RESULT_SIZE,
-				      EXTEND_PCR_RESULT_BODY_SIZE, 0,
-				      "extending dummy pcr before suspend");
-	}
+	if (tpm_suspend_pcr)
+		rc = tpm1_pcr_extend(chip, tpm_suspend_pcr, dummy_hash,
+				     "extending dummy pcr before suspend");
 
 	/* now do the actual savestate */
 	for (try = 0; try < TPM_RETRY; try++) {
@@ -1149,9 +1161,9 @@ EXPORT_SYMBOL_GPL(tpm_pm_resume);
 
 #define TPM_GETRANDOM_RESULT_SIZE	18
 static const struct tpm_input_header tpm_getrandom_header = {
-	.tag = TPM_TAG_RQU_COMMAND,
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
 	.length = cpu_to_be32(14),
-	.ordinal = TPM_ORD_GET_RANDOM
+	.ordinal = cpu_to_be32(TPM_ORD_GET_RANDOM)
 };
 
 /**
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 55405db..4bd0997c 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -22,11 +22,11 @@
 
 #define READ_PUBEK_RESULT_SIZE 314
 #define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
-#define TPM_ORD_READPUBEK cpu_to_be32(124)
+#define TPM_ORD_READPUBEK 124
 static const struct tpm_input_header tpm_readpubek_header = {
-	.tag = TPM_TAG_RQU_COMMAND,
+	.tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
 	.length = cpu_to_be32(30),
-	.ordinal = TPM_ORD_READPUBEK
+	.ordinal = cpu_to_be32(TPM_ORD_READPUBEK)
 };
 static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
 			  char *buf)
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4b4c8de..1df0521 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -247,7 +247,7 @@ struct tpm_output_header {
 	__be32	return_code;
 } __packed;
 
-#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
+#define TPM_TAG_RQU_COMMAND 193
 
 struct	stclear_flags_t {
 	__be16	tag;
@@ -339,17 +339,6 @@ enum tpm_sub_capabilities {
 	TPM_CAP_PROP_TIS_DURATION = 0x120,
 };
 
-struct	tpm_getcap_params_in {
-	__be32	cap;
-	__be32	subcap_size;
-	__be32	subcap;
-} __packed;
-
-struct	tpm_getcap_params_out {
-	__be32	cap_size;
-	cap_t	cap;
-} __packed;
-
 struct	tpm_readpubek_params_out {
 	u8	algorithm[4];
 	u8	encscheme[2];
@@ -374,11 +363,6 @@ struct tpm_pcrread_in {
 	__be32	pcr_idx;
 } __packed;
 
-struct tpm_pcrextend_in {
-	__be32	pcr_idx;
-	u8	hash[TPM_DIGEST_SIZE];
-} __packed;
-
 /* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
  * bytes, but 128 is still a relatively large number of random bytes and
  * anything much bigger causes users of struct tpm_cmd_t to start getting
@@ -399,13 +383,10 @@ struct tpm_startup_in {
 } __packed;
 
 typedef union {
-	struct	tpm_getcap_params_out getcap_out;
 	struct	tpm_readpubek_params_out readpubek_out;
 	u8	readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
-	struct	tpm_getcap_params_in getcap_in;
 	struct	tpm_pcrread_in	pcrread_in;
 	struct	tpm_pcrread_out	pcrread_out;
-	struct	tpm_pcrextend_in pcrextend_in;
 	struct	tpm_getrandom_in getrandom_in;
 	struct	tpm_getrandom_out getrandom_out;
 	struct tpm_startup_in startup_in;
@@ -525,6 +506,7 @@ extern struct idr dev_nums_idr;
 
 enum tpm_transmit_flags {
 	TPM_TRANSMIT_UNLOCKED	= BIT(0),
+	TPM_TRANSMIT_RAW	= BIT(1),
 };
 
 ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 3ee6883..3a99643 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -840,7 +840,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
 	/* In places where shutdown command is sent there's no much we can do
 	 * except print the error code on a system failure.
 	 */
-	if (rc < 0)
+	if (rc < 0 && rc != -EPIPE)
 		dev_warn(&chip->dev, "transmit returned %d while stopping the TPM",
 			 rc);
 }
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 0d322ab..66a1452 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -144,13 +144,11 @@ static void atml_plat_remove(void)
 	struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
 	struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
 
-	if (chip) {
-		tpm_chip_unregister(chip);
-		if (priv->have_region)
-			atmel_release_region(priv->base, priv->region_size);
-		atmel_put_base_addr(priv->iobase);
-		platform_device_unregister(pdev);
-	}
+	tpm_chip_unregister(chip);
+	if (priv->have_region)
+		atmel_release_region(priv->base, priv->region_size);
+	atmel_put_base_addr(priv->iobase);
+	platform_device_unregister(pdev);
 }
 
 static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume);
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index b917b9d..fe42c4a 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -27,10 +27,9 @@
 
 #define ACPI_SIG_TPM2 "TPM2"
 
-static const u8 CRB_ACPI_START_UUID[] = {
-	/* 0000 */ 0xAB, 0x6C, 0xBF, 0x6B, 0x63, 0x54, 0x14, 0x47,
-	/* 0008 */ 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4
-};
+static const guid_t crb_acpi_start_guid =
+	GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
+		  0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4);
 
 enum crb_defaults {
 	CRB_ACPI_START_REVISION_ID = 1,
@@ -266,7 +265,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
 	int rc;
 
 	obj = acpi_evaluate_dsm(chip->acpi_dev_handle,
-				CRB_ACPI_START_UUID,
+				&crb_acpi_start_guid,
 				CRB_ACPI_START_REVISION_ID,
 				CRB_ACPI_START_INDEX,
 				NULL);
@@ -564,12 +563,12 @@ static int crb_acpi_add(struct acpi_device *device)
 	    sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
 		priv->flags |= CRB_FL_ACPI_START;
 
-	if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_SMC) {
+	if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
 		if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
 			dev_err(dev,
 				FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
 				buf->header.length,
-				ACPI_TPM2_COMMAND_BUFFER_WITH_SMC);
+				ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
 			return -EINVAL;
 		}
 		crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index dc47fa2..79d6bbb 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -70,6 +70,7 @@ struct tpm_inf_dev {
 	u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
 	struct tpm_chip *chip;
 	enum i2c_chip_type chip_type;
+	unsigned int adapterlimit;
 };
 
 static struct tpm_inf_dev tpm_dev;
@@ -111,6 +112,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
 
 	int rc = 0;
 	int count;
+	unsigned int msglen = len;
 
 	/* Lock the adapter for the duration of the whole sequence. */
 	if (!tpm_dev.client->adapter->algo->master_xfer)
@@ -131,27 +133,61 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
 			usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
 		}
 	} else {
-		/* slb9635 protocol should work in all cases */
-		for (count = 0; count < MAX_COUNT; count++) {
-			rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
-			if (rc > 0)
-				break;	/* break here to skip sleep */
-
-			usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
-		}
-
-		if (rc <= 0)
-			goto out;
-
-		/* After the TPM has successfully received the register address
-		 * it needs some time, thus we're sleeping here again, before
-		 * retrieving the data
+		/* Expect to send one command message and one data message, but
+		 * support looping over each or both if necessary.
 		 */
-		for (count = 0; count < MAX_COUNT; count++) {
-			usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
-			rc = __i2c_transfer(tpm_dev.client->adapter, &msg2, 1);
-			if (rc > 0)
-				break;
+		while (len > 0) {
+			/* slb9635 protocol should work in all cases */
+			for (count = 0; count < MAX_COUNT; count++) {
+				rc = __i2c_transfer(tpm_dev.client->adapter,
+						    &msg1, 1);
+				if (rc > 0)
+					break;	/* break here to skip sleep */
+
+				usleep_range(SLEEP_DURATION_LOW,
+					     SLEEP_DURATION_HI);
+			}
+
+			if (rc <= 0)
+				goto out;
+
+			/* After the TPM has successfully received the register
+			 * address it needs some time, thus we're sleeping here
+			 * again, before retrieving the data
+			 */
+			for (count = 0; count < MAX_COUNT; count++) {
+				if (tpm_dev.adapterlimit) {
+					msglen = min_t(unsigned int,
+						       tpm_dev.adapterlimit,
+						       len);
+					msg2.len = msglen;
+				}
+				usleep_range(SLEEP_DURATION_LOW,
+					     SLEEP_DURATION_HI);
+				rc = __i2c_transfer(tpm_dev.client->adapter,
+						    &msg2, 1);
+				if (rc > 0) {
+					/* Since len is unsigned, make doubly
+					 * sure we do not underflow it.
+					 */
+					if (msglen > len)
+						len = 0;
+					else
+						len -= msglen;
+					msg2.buf += msglen;
+					break;
+				}
+				/* If the I2C adapter rejected the request (e.g
+				 * when the quirk read_max_len < len) fall back
+				 * to a sane minimum value and try again.
+				 */
+				if (rc == -EOPNOTSUPP)
+					tpm_dev.adapterlimit =
+							I2C_SMBUS_BLOCK_MAX;
+			}
+
+			if (rc <= 0)
+				goto out;
 		}
 	}
 
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index e3cf9f3..3b1b9f9 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -397,7 +397,7 @@ static int tpm_inf_pnp_probe(struct pnp_dev *dev,
 	int vendorid[2];
 	int version[2];
 	int productid[2];
-	char chipname[20];
+	const char *chipname;
 	struct tpm_chip *chip;
 
 	/* read IO-ports through PnP */
@@ -488,13 +488,13 @@ static int tpm_inf_pnp_probe(struct pnp_dev *dev,
 
 	switch ((productid[0] << 8) | productid[1]) {
 	case 6:
-		snprintf(chipname, sizeof(chipname), " (SLD 9630 TT 1.1)");
+		chipname = " (SLD 9630 TT 1.1)";
 		break;
 	case 11:
-		snprintf(chipname, sizeof(chipname), " (SLB 9635 TT 1.2)");
+		chipname = " (SLB 9635 TT 1.2)";
 		break;
 	default:
-		snprintf(chipname, sizeof(chipname), " (unknown chip)");
+		chipname = " (unknown chip)";
 		break;
 	}
 
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 692a2c6..86dd852 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -32,20 +32,16 @@
 #define PPI_VS_REQ_START	128
 #define PPI_VS_REQ_END		255
 
-static const u8 tpm_ppi_uuid[] = {
-	0xA6, 0xFA, 0xDD, 0x3D,
-	0x1B, 0x36,
-	0xB4, 0x4E,
-	0xA4, 0x24,
-	0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
-};
+static const guid_t tpm_ppi_guid =
+	GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
+		  0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
 
 static inline union acpi_object *
 tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
 	     union acpi_object *argv4)
 {
 	BUG_ON(!ppi_handle);
-	return acpi_evaluate_dsm_typed(ppi_handle, tpm_ppi_uuid,
+	return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid,
 				       TPM_PPI_REVISION_ID,
 				       func, argv4, type);
 }
@@ -107,7 +103,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
 	 * is updated with function index from SUBREQ to SUBREQ2 since PPI
 	 * version 1.1
 	 */
-	if (acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
+	if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
 			   TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_SUBREQ2))
 		func = TPM_PPI_FN_SUBREQ2;
 
@@ -268,7 +264,7 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
 		"User not required",
 	};
 
-	if (!acpi_check_dsm(dev_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+	if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID,
 			    1 << TPM_PPI_FN_GETOPR))
 		return -EPERM;
 
@@ -341,12 +337,12 @@ void tpm_add_ppi(struct tpm_chip *chip)
 	if (!chip->acpi_dev_handle)
 		return;
 
-	if (!acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
+	if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
 			    TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION))
 		return;
 
 	/* Cache PPI version string. */
-	obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, tpm_ppi_uuid,
+	obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid,
 				      TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
 				      NULL, ACPI_TYPE_STRING);
 	if (obj) {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index c7e1384..b14d4aa 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -80,6 +80,8 @@ static int has_hid(struct acpi_device *dev, const char *hid)
 
 static inline int is_itpm(struct acpi_device *dev)
 {
+	if (!dev)
+		return 0;
 	return has_hid(dev, "INTC0102");
 }
 #else
@@ -89,6 +91,47 @@ static inline int is_itpm(struct acpi_device *dev)
 }
 #endif
 
+#if defined(CONFIG_ACPI)
+#define DEVICE_IS_TPM2 1
+
+static const struct acpi_device_id tpm_acpi_tbl[] = {
+	{"MSFT0101", DEVICE_IS_TPM2},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl);
+
+static int check_acpi_tpm2(struct device *dev)
+{
+	const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
+	struct acpi_table_tpm2 *tbl;
+	acpi_status st;
+
+	if (!aid || aid->driver_data != DEVICE_IS_TPM2)
+		return 0;
+
+	/* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
+	 * table is mandatory
+	 */
+	st =
+	    acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+	if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
+		dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+		return -EINVAL;
+	}
+
+	/* The tpm2_crb driver handles this device */
+	if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
+		return -ENODEV;
+
+	return 0;
+}
+#else
+static int check_acpi_tpm2(struct device *dev)
+{
+	return 0;
+}
+#endif
+
 static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
 			      u8 *result)
 {
@@ -141,11 +184,15 @@ static const struct tpm_tis_phy_ops tpm_tcg = {
 	.write32 = tpm_tcg_write32,
 };
 
-static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
-			acpi_handle acpi_dev_handle)
+static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
 {
 	struct tpm_tis_tcg_phy *phy;
 	int irq = -1;
+	int rc;
+
+	rc = check_acpi_tpm2(dev);
+	if (rc)
+		return rc;
 
 	phy = devm_kzalloc(dev, sizeof(struct tpm_tis_tcg_phy), GFP_KERNEL);
 	if (phy == NULL)
@@ -158,11 +205,11 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
 	if (interrupts)
 		irq = tpm_info->irq;
 
-	if (itpm)
+	if (itpm || is_itpm(ACPI_COMPANION(dev)))
 		phy->priv.flags |= TPM_TIS_ITPM_WORKAROUND;
 
 	return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg,
-				 acpi_dev_handle);
+				 ACPI_HANDLE(dev));
 }
 
 static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
@@ -171,7 +218,6 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
 			    const struct pnp_device_id *pnp_id)
 {
 	struct tpm_info tpm_info = {};
-	acpi_handle acpi_dev_handle = NULL;
 	struct resource *res;
 
 	res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
@@ -184,14 +230,7 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
 	else
 		tpm_info.irq = -1;
 
-	if (pnp_acpi_device(pnp_dev)) {
-		if (is_itpm(pnp_acpi_device(pnp_dev)))
-			itpm = true;
-
-		acpi_dev_handle = ACPI_HANDLE(&pnp_dev->dev);
-	}
-
-	return tpm_tis_init(&pnp_dev->dev, &tpm_info, acpi_dev_handle);
+	return tpm_tis_init(&pnp_dev->dev, &tpm_info);
 }
 
 static struct pnp_device_id tpm_pnp_tbl[] = {
@@ -231,93 +270,6 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
 		    sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
 
-#ifdef CONFIG_ACPI
-static int tpm_check_resource(struct acpi_resource *ares, void *data)
-{
-	struct tpm_info *tpm_info = (struct tpm_info *) data;
-	struct resource res;
-
-	if (acpi_dev_resource_interrupt(ares, 0, &res))
-		tpm_info->irq = res.start;
-	else if (acpi_dev_resource_memory(ares, &res)) {
-		tpm_info->res = res;
-		tpm_info->res.name = NULL;
-	}
-
-	return 1;
-}
-
-static int tpm_tis_acpi_init(struct acpi_device *acpi_dev)
-{
-	struct acpi_table_tpm2 *tbl;
-	acpi_status st;
-	struct list_head resources;
-	struct tpm_info tpm_info = {};
-	int ret;
-
-	st = acpi_get_table(ACPI_SIG_TPM2, 1,
-			    (struct acpi_table_header **) &tbl);
-	if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
-		dev_err(&acpi_dev->dev,
-			FW_BUG "failed to get TPM2 ACPI table\n");
-		return -EINVAL;
-	}
-
-	if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
-		return -ENODEV;
-
-	INIT_LIST_HEAD(&resources);
-	tpm_info.irq = -1;
-	ret = acpi_dev_get_resources(acpi_dev, &resources, tpm_check_resource,
-				     &tpm_info);
-	if (ret < 0)
-		return ret;
-
-	acpi_dev_free_resource_list(&resources);
-
-	if (resource_type(&tpm_info.res) != IORESOURCE_MEM) {
-		dev_err(&acpi_dev->dev,
-			FW_BUG "TPM2 ACPI table does not define a memory resource\n");
-		return -EINVAL;
-	}
-
-	if (is_itpm(acpi_dev))
-		itpm = true;
-
-	return tpm_tis_init(&acpi_dev->dev, &tpm_info, acpi_dev->handle);
-}
-
-static int tpm_tis_acpi_remove(struct acpi_device *dev)
-{
-	struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
-
-	tpm_chip_unregister(chip);
-	tpm_tis_remove(chip);
-
-	return 0;
-}
-
-static struct acpi_device_id tpm_acpi_tbl[] = {
-	{"MSFT0101", 0},	/* TPM 2.0 */
-	/* Add new here */
-	{"", 0},		/* User Specified */
-	{"", 0}			/* Terminator */
-};
-MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl);
-
-static struct acpi_driver tis_acpi_driver = {
-	.name = "tpm_tis",
-	.ids = tpm_acpi_tbl,
-	.ops = {
-		.add = tpm_tis_acpi_init,
-		.remove = tpm_tis_acpi_remove,
-	},
-	.drv = {
-		.pm = &tpm_tis_pm,
-	},
-};
-#endif
-
 static struct platform_device *force_pdev;
 
 static int tpm_tis_plat_probe(struct platform_device *pdev)
@@ -332,18 +284,16 @@ static int tpm_tis_plat_probe(struct platform_device *pdev)
 	}
 	tpm_info.res = *res;
 
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (res) {
-		tpm_info.irq = res->start;
-	} else {
-		if (pdev == force_pdev)
+	tpm_info.irq = platform_get_irq(pdev, 0);
+	if (tpm_info.irq <= 0) {
+		if (pdev != force_pdev)
 			tpm_info.irq = -1;
 		else
 			/* When forcing auto probe the IRQ */
 			tpm_info.irq = 0;
 	}
 
-	return tpm_tis_init(&pdev->dev, &tpm_info, NULL);
+	return tpm_tis_init(&pdev->dev, &tpm_info);
 }
 
 static int tpm_tis_plat_remove(struct platform_device *pdev)
@@ -371,6 +321,7 @@ static struct platform_driver tis_drv = {
 		.name		= "tpm_tis",
 		.pm		= &tpm_tis_pm,
 		.of_match_table = of_match_ptr(tis_of_platform_match),
+		.acpi_match_table = ACPI_PTR(tpm_acpi_tbl),
 	},
 };
 
@@ -413,11 +364,6 @@ static int __init init_tis(void)
 	if (rc)
 		goto err_platform;
 
-#ifdef CONFIG_ACPI
-	rc = acpi_bus_register_driver(&tis_acpi_driver);
-	if (rc)
-		goto err_acpi;
-#endif
 
 	if (IS_ENABLED(CONFIG_PNP)) {
 		rc = pnp_register_driver(&tis_pnp_driver);
@@ -428,10 +374,6 @@ static int __init init_tis(void)
 	return 0;
 
 err_pnp:
-#ifdef CONFIG_ACPI
-	acpi_bus_unregister_driver(&tis_acpi_driver);
-err_acpi:
-#endif
 	platform_driver_unregister(&tis_drv);
 err_platform:
 	if (force_pdev)
@@ -443,9 +385,6 @@ static int __init init_tis(void)
 static void __exit cleanup_tis(void)
 {
 	pnp_unregister_driver(&tis_pnp_driver);
-#ifdef CONFIG_ACPI
-	acpi_bus_unregister_driver(&tis_acpi_driver);
-#endif
 	platform_driver_unregister(&tis_drv);
 
 	if (force_pdev)
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 751059d..1d877cc 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -43,6 +43,7 @@ struct proxy_dev {
 #define STATE_OPENED_FLAG        BIT(0)
 #define STATE_WAIT_RESPONSE_FLAG BIT(1)  /* waiting for emulator response */
 #define STATE_REGISTERED_FLAG	 BIT(2)
+#define STATE_DRIVER_COMMAND     BIT(3)  /* sending a driver specific command */
 
 	size_t req_len;              /* length of queued TPM request */
 	size_t resp_len;             /* length of queued TPM response */
@@ -299,6 +300,28 @@ static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 	return len;
 }
 
+static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
+					u8 *buf, size_t count)
+{
+	struct tpm_input_header *hdr = (struct tpm_input_header *)buf;
+
+	if (count < sizeof(struct tpm_input_header))
+		return 0;
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+		switch (be32_to_cpu(hdr->ordinal)) {
+		case TPM2_CC_SET_LOCALITY:
+			return 1;
+		}
+	} else {
+		switch (be32_to_cpu(hdr->ordinal)) {
+		case TPM_ORD_SET_LOCALITY:
+			return 1;
+		}
+	}
+	return 0;
+}
+
 /*
  * Called when core TPM driver forwards TPM requests to 'server side'.
  *
@@ -321,6 +344,10 @@ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
 		return -EIO;
 	}
 
+	if (!(proxy_dev->state & STATE_DRIVER_COMMAND) &&
+	    vtpm_proxy_is_driver_command(chip, buf, count))
+		return -EFAULT;
+
 	mutex_lock(&proxy_dev->buf_lock);
 
 	if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
@@ -371,6 +398,47 @@ static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip  *chip, u8 status)
 	return ret;
 }
 
+static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality)
+{
+	struct tpm_buf buf;
+	int rc;
+	const struct tpm_output_header *header;
+	struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS,
+				  TPM2_CC_SET_LOCALITY);
+	else
+		rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND,
+				  TPM_ORD_SET_LOCALITY);
+	if (rc)
+		return rc;
+	tpm_buf_append_u8(&buf, locality);
+
+	proxy_dev->state |= STATE_DRIVER_COMMAND;
+
+	rc = tpm_transmit_cmd(chip, NULL, buf.data, tpm_buf_length(&buf), 0,
+			      TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW,
+			      "attempting to set locality");
+
+	proxy_dev->state &= ~STATE_DRIVER_COMMAND;
+
+	if (rc < 0) {
+		locality = rc;
+		goto out;
+	}
+
+	header = (const struct tpm_output_header *)buf.data;
+	rc = be32_to_cpu(header->return_code);
+	if (rc)
+		locality = -1;
+
+out:
+	tpm_buf_destroy(&buf);
+
+	return locality;
+}
+
 static const struct tpm_class_ops vtpm_proxy_tpm_ops = {
 	.flags = TPM_OPS_AUTO_STARTUP,
 	.recv = vtpm_proxy_tpm_op_recv,
@@ -380,6 +448,7 @@ static const struct tpm_class_ops vtpm_proxy_tpm_ops = {
 	.req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG,
 	.req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG,
 	.req_canceled = vtpm_proxy_tpm_req_canceled,
+	.request_locality = vtpm_proxy_request_locality,
 };
 
 /*
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
index c636e7f..1a0e97a 100644
--- a/drivers/char/tpm/tpmrm-dev.c
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -45,7 +45,7 @@ static int tpmrm_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
-ssize_t tpmrm_write(struct file *file, const char __user *buf,
+static ssize_t tpmrm_write(struct file *file, const char __user *buf,
 		   size_t size, loff_t *off)
 {
 	struct file_priv *fpriv = file->private_data;
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 19480bc..2f29ee1 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -14,6 +14,7 @@
 config COMMON_CLK_GXBB
 	bool
 	depends on COMMON_CLK_AMLOGIC
+	select RESET_CONTROLLER
 	help
 	  Support for the clock controller on AmLogic S905 devices, aka gxbb.
 	  Say Y if you want peripherals and CPU frequency scaling to work.
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index 93b8f07..4d04f4a 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -171,7 +171,7 @@
  * to be exposed to client nodes in DT: include/dt-bindings/clock/gxbb-clkc.h
  */
 #define CLKID_SYS_PLL		  0
-/* CLKID_CPUCLK */
+#define CLKID_CPUCLK		  1
 /* CLKID_HDMI_PLL */
 #define CLKID_FIXED_PLL		  3
 /* CLKID_FCLK_DIV2 */
@@ -191,12 +191,12 @@
 #define CLKID_ISA		  18
 #define CLKID_PL301		  19
 #define CLKID_PERIPHS		  20
-#define CLKID_SPICC		  21
+/* CLKID_SPICC */
 /* CLKID_I2C */
 /* #define CLKID_SAR_ADC */
 #define CLKID_SMART_CARD	  24
 /* CLKID_RNG0 */
-#define CLKID_UART0		  26
+/* CLKID_UART0 */
 #define CLKID_SDHC		  27
 #define CLKID_STREAM		  28
 #define CLKID_ASYNC_FIFO	  29
@@ -209,7 +209,7 @@
 /* CLKID_ETH */
 #define CLKID_DEMUX		  37
 /* CLKID_AIU_GLUE */
-#define CLKID_IEC958		  39
+/* CLKID_IEC958 */
 /* CLKID_I2S_OUT */
 #define CLKID_AMCLK		  41
 #define CLKID_AIFIFO2		  42
@@ -218,7 +218,7 @@
 #define CLKID_ADC		  45
 #define CLKID_BLKMV		  46
 /* CLKID_AIU */
-#define CLKID_UART1		  48
+/* CLKID_UART1 */
 #define CLKID_G2D		  49
 /* CLKID_USB0 */
 /* CLKID_USB1 */
@@ -238,7 +238,7 @@
 /* CLKID_USB0_DDR_BRIDGE */
 #define CLKID_MMC_PCLK		  66
 #define CLKID_DVIN		  67
-#define CLKID_UART2		  68
+/* CLKID_UART2 */
 /* #define CLKID_SANA */
 #define CLKID_VPU_INTR		  70
 #define CLKID_SEC_AHB_AHB3_BRIDGE 71
@@ -251,7 +251,7 @@
 #define CLKID_GCLK_VENCI_INT	  78
 #define CLKID_DAC_CLK		  79
 /* CLKID_AOCLK_GATE */
-#define CLKID_IEC958_GATE	  81
+/* CLKID_IEC958_GATE */
 #define CLKID_ENC480P		  82
 #define CLKID_RNG1		  83
 #define CLKID_GCLK_VENCI_INT1	  84
@@ -277,13 +277,13 @@
 #define CLKID_MALI_1_DIV	 104
 /* CLKID_MALI_1	*/
 /* CLKID_MALI	*/
-#define CLKID_CTS_AMCLK		  107
+/* CLKID_CTS_AMCLK */
 #define CLKID_CTS_AMCLK_SEL	  108
 #define CLKID_CTS_AMCLK_DIV	  109
-#define CLKID_CTS_MCLK_I958	  110
+/* CLKID_CTS_MCLK_I958 */
 #define CLKID_CTS_MCLK_I958_SEL	  111
 #define CLKID_CTS_MCLK_I958_DIV	  112
-#define CLKID_CTS_I958		  113
+/* CLKID_CTS_I958 */
 
 #define NR_CLKS			  114
 
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 3881def..a687e02 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -87,20 +87,20 @@
 #define CLKID_PERIPHS		20
 #define CLKID_SPICC		21
 #define CLKID_I2C		22
-#define CLKID_SAR_ADC		23
+/* #define CLKID_SAR_ADC */
 #define CLKID_SMART_CARD	24
-#define CLKID_RNG0		25
+/* #define CLKID_RNG0 */
 #define CLKID_UART0		26
 #define CLKID_SDHC		27
 #define CLKID_STREAM		28
 #define CLKID_ASYNC_FIFO	29
-#define CLKID_SDIO		30
+/* #define CLKID_SDIO */
 #define CLKID_ABUF		31
 #define CLKID_HIU_IFACE		32
 #define CLKID_ASSIST_MISC	33
 #define CLKID_SPI		34
 #define CLKID_I2S_SPDIF		35
-#define CLKID_ETH		36
+/* #define CLKID_ETH */
 #define CLKID_DEMUX		37
 #define CLKID_AIU_GLUE		38
 #define CLKID_IEC958		39
@@ -114,12 +114,12 @@
 #define CLKID_AIU		47
 #define CLKID_UART1		48
 #define CLKID_G2D		49
-#define CLKID_USB0		50
-#define CLKID_USB1		51
+/* #define CLKID_USB0 */
+/* #define CLKID_USB1 */
 #define CLKID_RESET		52
 #define CLKID_NAND		53
 #define CLKID_DOS_PARSER	54
-#define CLKID_USB		55
+/* #define CLKID_USB */
 #define CLKID_VDIN1		56
 #define CLKID_AHB_ARB0		57
 #define CLKID_EFUSE		58
@@ -128,12 +128,12 @@
 #define CLKID_AHB_CTRL_BUS	61
 #define CLKID_HDMI_INTR_SYNC	62
 #define CLKID_HDMI_PCLK		63
-#define CLKID_USB1_DDR_BRIDGE	64
-#define CLKID_USB0_DDR_BRIDGE	65
+/* CLKID_USB1_DDR_BRIDGE */
+/* CLKID_USB0_DDR_BRIDGE */
 #define CLKID_MMC_PCLK		66
 #define CLKID_DVIN		67
 #define CLKID_UART2		68
-#define CLKID_SANA		69
+/* #define CLKID_SANA */
 #define CLKID_VPU_INTR		70
 #define CLKID_SEC_AHB_AHB3_BRIDGE	71
 #define CLKID_CLK81_A9		72
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index b0d551a..eb89c78 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -156,6 +156,7 @@
 	bool "Support for Allwinner SoCs' PRCM CCUs"
 	select SUNXI_CCU_DIV
 	select SUNXI_CCU_GATE
+	select SUNXI_CCU_MP
 	default MACH_SUN8I || (ARCH_SUNXI && ARM64)
 
 endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index 9b3cd24..061b6fb 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -31,7 +31,9 @@
 #define CLK_PLL_VIDEO0_2X		8
 #define CLK_PLL_VE			9
 #define CLK_PLL_DDR0			10
-#define CLK_PLL_PERIPH0			11
+
+/* PLL_PERIPH0 exported for PRCM */
+
 #define CLK_PLL_PERIPH0_2X		12
 #define CLK_PLL_PERIPH1			13
 #define CLK_PLL_PERIPH1_2X		14
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 5c476f9..5372bf8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk,	"ahb-ss",	"ahb",
 static SUNXI_CCU_GATE(ahb_dma_clk,	"ahb-dma",	"ahb",
 		      0x060, BIT(6), 0);
 static SUNXI_CCU_GATE(ahb_bist_clk,	"ahb-bist",	"ahb",
-		      0x060, BIT(6), 0);
+		      0x060, BIT(7), 0);
 static SUNXI_CCU_GATE(ahb_mmc0_clk,	"ahb-mmc0",	"ahb",
 		      0x060, BIT(8), 0);
 static SUNXI_CCU_GATE(ahb_mmc1_clk,	"ahb-mmc1",	"ahb",
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 89e68d2..df97e25 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
 				 0x12c, 0, 4, 24, 3, BIT(31),
 				 CLK_SET_RATE_PARENT);
 static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
-				 0x12c, 0, 4, 24, 3, BIT(31),
+				 0x130, 0, 4, 24, 3, BIT(31),
 				 CLK_SET_RATE_PARENT);
 
 static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
index 85973d1..1b4baea 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -29,7 +29,9 @@
 #define CLK_PLL_VIDEO		6
 #define CLK_PLL_VE		7
 #define CLK_PLL_DDR		8
-#define CLK_PLL_PERIPH0		9
+
+/* PLL_PERIPH0 exported for PRCM */
+
 #define CLK_PLL_PERIPH0_2X	10
 #define CLK_PLL_GPU		11
 #define CLK_PLL_PERIPH1		12
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index e58706b..6297add 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
 	[RST_BUS_EMAC]		=  { 0x2c0, BIT(17) },
 	[RST_BUS_HSTIMER]	=  { 0x2c0, BIT(19) },
 	[RST_BUS_SPI0]		=  { 0x2c0, BIT(20) },
-	[RST_BUS_OTG]		=  { 0x2c0, BIT(23) },
+	[RST_BUS_OTG]		=  { 0x2c0, BIT(24) },
 	[RST_BUS_EHCI0]		=  { 0x2c0, BIT(26) },
 	[RST_BUS_OHCI0]		=  { 0x2c0, BIT(29) },
 
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 545d541..fcae5ca 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,22 +1,16 @@
 menu "Clock Source drivers"
 	depends on !ARCH_USES_GETTIMEOFFSET
 
-config CLKSRC_OF
+config TIMER_OF
 	bool
-	select CLKSRC_PROBE
+	depends on GENERIC_CLOCKEVENTS
+	select TIMER_PROBE
 
-config CLKEVT_OF
+config TIMER_ACPI
 	bool
-	select CLKEVT_PROBE
+	select TIMER_PROBE
 
-config CLKSRC_ACPI
-	bool
-	select CLKSRC_PROBE
-
-config CLKSRC_PROBE
-	bool
-
-config CLKEVT_PROBE
+config TIMER_PROBE
 	bool
 
 config CLKSRC_I8253
@@ -65,14 +59,14 @@
 config DW_APB_TIMER_OF
 	bool
 	select DW_APB_TIMER
-	select CLKSRC_OF
+	select TIMER_OF
 
 config FTTMR010_TIMER
 	bool "Faraday Technology timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on HAS_IOMEM
 	select CLKSRC_MMIO
-	select CLKSRC_OF
+	select TIMER_OF
 	select MFD_SYSCON
 	help
 	  Enables support for the Faraday Technology timer block
@@ -81,7 +75,7 @@
 config ROCKCHIP_TIMER
 	bool "Rockchip timer driver" if COMPILE_TEST
 	depends on ARM || ARM64
-	select CLKSRC_OF
+	select TIMER_OF
 	select CLKSRC_MMIO
 	help
 	  Enables the support for the rockchip timer driver.
@@ -89,7 +83,7 @@
 config ARMADA_370_XP_TIMER
 	bool "Armada 370 and XP timer driver" if COMPILE_TEST
 	depends on ARM
-	select CLKSRC_OF
+	select TIMER_OF
 	select CLKSRC_MMIO
 	help
 	  Enables the support for the Armada 370 and XP timer driver.
@@ -104,16 +98,24 @@
 config ORION_TIMER
 	bool "Orion timer driver" if COMPILE_TEST
 	depends on ARM
-	select CLKSRC_OF
+	select TIMER_OF
 	select CLKSRC_MMIO
 	help
 	  Enables the support for the Orion timer driver
 
+config OWL_TIMER
+	bool "Owl timer driver" if COMPILE_TEST
+	depends on GENERIC_CLOCKEVENTS
+	select CLKSRC_MMIO
+	help
+	  Enables the support for the Actions Semi Owl timer driver.
+
 config SUN4I_TIMER
 	bool "Sun4i timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on HAS_IOMEM
 	select CLKSRC_MMIO
+	select TIMER_OF
 	help
 	  Enables support for the Sun4i timer.
 
@@ -148,7 +150,7 @@
 	bool "ASM9260 timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	select CLKSRC_MMIO
-	select CLKSRC_OF
+	select TIMER_OF
 	help
 	  Enables support for the ASM9260 timer.
 
@@ -188,13 +190,6 @@
 	help
 	  Enables support for the Atlas7 timer.
 
-config MOXART_TIMER
-	bool "Moxart timer driver" if COMPILE_TEST
-	depends on GENERIC_CLOCKEVENTS
-	select CLKSRC_MMIO
-	help
-	  Enables support for the Moxart timer.
-
 config MXS_TIMER
 	bool "Mxs timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
@@ -261,21 +256,21 @@
 	depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
 	depends on ARM
 	select CLKSRC_MMIO
-	select CLKSRC_OF
+	select TIMER_OF
 	help
 	  Support for the LPC32XX clocksource.
 
 config CLKSRC_PISTACHIO
 	bool "Clocksource for Pistachio SoC" if COMPILE_TEST
 	depends on HAS_IOMEM
-	select CLKSRC_OF
+	select TIMER_OF
 	help
 	  Enables the clocksource for the Pistachio SoC.
 
 config CLKSRC_TI_32K
 	bool "Texas Instruments 32.768 Hz Clocksource" if COMPILE_TEST
 	depends on GENERIC_SCHED_CLOCK
-	select CLKSRC_OF if OF
+	select TIMER_OF if OF
 	help
 	  This option enables support for Texas Instruments 32.768 Hz clocksource
 	  available on many OMAP-like platforms.
@@ -284,7 +279,7 @@
 	bool "NPS400 clocksource driver" if COMPILE_TEST
 	depends on !PHYS_ADDR_T_64BIT
 	select CLKSRC_MMIO
-	select CLKSRC_OF if OF
+	select TIMER_OF if OF
 	help
 	  NPS400 clocksource support.
 	  Got 64 bit counter with update rate up to 1000MHz.
@@ -299,12 +294,12 @@
 	bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
 	depends on GENERIC_SCHED_CLOCK
 	select CLKSRC_MMIO
-	select CLKSRC_OF
+	select TIMER_OF
 
 config ARC_TIMERS
 	bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
-	select CLKSRC_OF
+	select TIMER_OF
 	help
 	  These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
 	  (ARC700 as well as ARC HS38).
@@ -314,7 +309,7 @@
 	bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
 	depends on ARC_TIMERS
-	select CLKSRC_OF
+	select TIMER_OF
 	help
 	  This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP)
 	  RTC is implemented inside the core, while GFRC sits outside the core in
@@ -323,8 +318,8 @@
 
 config ARM_ARCH_TIMER
 	bool
-	select CLKSRC_OF if OF
-	select CLKSRC_ACPI if ACPI
+	select TIMER_OF if OF
+	select TIMER_ACPI if ACPI
 
 config ARM_ARCH_TIMER_EVTSTREAM
 	bool "Enable ARM architected timer event stream generation by default"
@@ -381,7 +376,7 @@
 
 config ARM_GLOBAL_TIMER
 	bool "Support for the ARM global timer" if COMPILE_TEST
-	select CLKSRC_OF if OF
+	select TIMER_OF if OF
 	depends on ARM
 	help
 	  This options enables support for the ARM global timer unit
@@ -390,7 +385,7 @@
 	bool "Support for Dual Timer SP804 module"
 	depends on GENERIC_SCHED_CLOCK && CLKDEV_LOOKUP
 	select CLKSRC_MMIO
-	select CLKSRC_OF if OF
+	select TIMER_OF if OF
 
 config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
 	bool
@@ -401,19 +396,19 @@
 
 config ARMV7M_SYSTICK
 	bool "Support for the ARMv7M system time" if COMPILE_TEST
-	select CLKSRC_OF if OF
+	select TIMER_OF if OF
 	select CLKSRC_MMIO
 	help
 	  This options enables support for the ARMv7M system timer unit
 
 config ATMEL_PIT
-	select CLKSRC_OF if OF
+	select TIMER_OF if OF
 	def_bool SOC_AT91SAM9 || SOC_SAMA5
 
 config ATMEL_ST
 	bool "Atmel ST timer support" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
-	select CLKSRC_OF
+	select TIMER_OF
 	select MFD_SYSCON
 	help
 	  Support for the Atmel ST timer.
@@ -456,7 +451,7 @@
 config OXNAS_RPS_TIMER
 	bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS
-	select CLKSRC_OF
+	select TIMER_OF
 	select CLKSRC_MMIO
 	help
 	  This enables support for the Oxford Semiconductor OXNAS RPS timers.
@@ -467,7 +462,7 @@
 config MTK_TIMER
 	bool "Mediatek timer driver" if COMPILE_TEST
 	depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
-	select CLKSRC_OF
+	select TIMER_OF
 	select CLKSRC_MMIO
 	help
 	  Support for Mediatek timer driver.
@@ -540,7 +535,7 @@
 config CLKSRC_QCOM
 	bool "Qualcomm MSM timer" if COMPILE_TEST
 	depends on ARM
-	select CLKSRC_OF
+	select TIMER_OF
 	help
 	  This enables the clocksource and the per CPU clockevent driver for the
 	  Qualcomm SoCs.
@@ -548,7 +543,7 @@
 config CLKSRC_VERSATILE
 	bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
 	depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
-	select CLKSRC_OF
+	select TIMER_OF
 	default y if MFD_VEXPRESS_SYSREG
 	help
 	  This option enables clock source based on free running
@@ -559,12 +554,12 @@
 config CLKSRC_MIPS_GIC
 	bool
 	depends on MIPS_GIC
-	select CLKSRC_OF
+	select TIMER_OF
 
 config CLKSRC_TANGO_XTAL
 	bool "Clocksource for Tango SoC" if COMPILE_TEST
 	depends on ARM
-	select CLKSRC_OF
+	select TIMER_OF
 	select CLKSRC_MMIO
 	help
 	  This enables the clocksource for Tango SoC
@@ -605,7 +600,7 @@
 
 config CLKSRC_ST_LPC
 	bool "Low power clocksource found in the LPC" if COMPILE_TEST
-	select CLKSRC_OF if OF
+	select TIMER_OF if OF
 	depends on HAS_IOMEM
 	select CLKSRC_MMIO
 	help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 2b5b56a..6df9494 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,5 +1,5 @@
-obj-$(CONFIG_CLKSRC_PROBE)	+= clksrc-probe.o
-obj-$(CONFIG_CLKEVT_PROBE)	+= clkevt-probe.o
+obj-$(CONFIG_TIMER_OF)		+= timer-of.o
+obj-$(CONFIG_TIMER_PROBE)	+= timer-probe.o
 obj-$(CONFIG_ATMEL_PIT)		+= timer-atmel-pit.o
 obj-$(CONFIG_ATMEL_ST)		+= timer-atmel-st.o
 obj-$(CONFIG_ATMEL_TCB_CLKSRC)	+= tcb_clksrc.o
@@ -26,7 +26,6 @@
 obj-$(CONFIG_BCM2835_TIMER)	+= bcm2835_timer.o
 obj-$(CONFIG_CLPS711X_TIMER)	+= clps711x-timer.o
 obj-$(CONFIG_ATLAS7_TIMER)	+= timer-atlas7.o
-obj-$(CONFIG_MOXART_TIMER)	+= moxart_timer.o
 obj-$(CONFIG_MXS_TIMER)		+= mxs_timer.o
 obj-$(CONFIG_CLKSRC_PXA)	+= pxa_timer.o
 obj-$(CONFIG_PRIMA2_TIMER)	+= timer-prima2.o
@@ -53,6 +52,7 @@
 obj-$(CONFIG_CLKSRC_TI_32K)	+= timer-ti-32k.o
 obj-$(CONFIG_CLKSRC_NPS)	+= timer-nps.o
 obj-$(CONFIG_OXNAS_RPS_TIMER)	+= timer-oxnas-rps.o
+obj-$(CONFIG_OWL_TIMER)		+= owl-timer.o
 
 obj-$(CONFIG_ARC_TIMERS)		+= arc_timer.o
 obj-$(CONFIG_ARM_ARCH_TIMER)		+= arm_arch_timer.o
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index 2164973..4927355 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -99,7 +99,7 @@ static int __init arc_cs_setup_gfrc(struct device_node *node)
 
 	return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
 }
-CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
+TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
 
 #define AUX_RTC_CTRL	0x103
 #define AUX_RTC_LOW	0x104
@@ -158,7 +158,7 @@ static int __init arc_cs_setup_rtc(struct device_node *node)
 
 	return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
 }
-CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
+TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
 
 #endif
 
@@ -333,4 +333,4 @@ static int __init arc_of_timer_init(struct device_node *np)
 
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
+TIMER_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4bed671e..aae87c4 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1194,8 +1194,8 @@ static int __init arch_timer_of_init(struct device_node *np)
 
 	return arch_timer_common_init();
 }
-CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
-CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
+TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
+TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
 
 static u32 __init
 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
@@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
 		return 0;
 	}
 
-	rate = readl_relaxed(frame + CNTFRQ);
+	rate = readl_relaxed(base + CNTFRQ);
 
-	iounmap(frame);
+	iounmap(base);
 
 	return rate;
 }
@@ -1382,7 +1382,7 @@ static int __init arch_timer_mem_of_init(struct device_node *np)
 	kfree(timer_mem);
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
+TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
 		       arch_timer_mem_of_init);
 
 #ifdef CONFIG_ACPI_GTDT
@@ -1516,5 +1516,5 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
 
 	return arch_timer_common_init();
 }
-CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
+TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
 #endif
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 123ed20..095bb96 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -339,5 +339,5 @@ static int __init global_timer_of_register(struct device_node *np)
 }
 
 /* Only tested on r2p2 and r3p0  */
-CLOCKSOURCE_OF_DECLARE(arm_gt, "arm,cortex-a9-global-timer",
+TIMER_OF_DECLARE(arm_gt, "arm,cortex-a9-global-timer",
 			global_timer_of_register);
diff --git a/drivers/clocksource/armv7m_systick.c b/drivers/clocksource/armv7m_systick.c
index a315491..ac046d6 100644
--- a/drivers/clocksource/armv7m_systick.c
+++ b/drivers/clocksource/armv7m_systick.c
@@ -82,5 +82,5 @@ static int __init system_timer_of_register(struct device_node *np)
 	return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick",
+TIMER_OF_DECLARE(arm_systick, "arm,armv7m-systick",
 			system_timer_of_register);
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index c678083..38cd2fe 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -238,5 +238,5 @@ static int __init asm9260_timer_init(struct device_node *np)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
+TIMER_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
 		asm9260_timer_init);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index dce4430..82828d3 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -148,5 +148,5 @@ static int __init bcm2835_timer_init(struct device_node *node)
 	iounmap(base);
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
+TIMER_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
 			bcm2835_timer_init);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index fda5e147..5c40be9 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -198,9 +198,9 @@ static int __init kona_timer_init(struct device_node *node)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
+TIMER_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
 /*
  * bcm,kona-timer is deprecated by brcm,kona-timer
  * being kept here for driver compatibility
  */
-CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer", kona_timer_init);
+TIMER_OF_DECLARE(bcm_kona, "bcm,kona-timer", kona_timer_init);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 44e5e95..29d5175 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -18,6 +18,7 @@
 #include <linux/clk.h>
 #include <linux/interrupt.h>
 #include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/slab.h>
@@ -539,4 +540,4 @@ static int __init ttc_timer_init(struct device_node *timer)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
+TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
diff --git a/drivers/clocksource/clkevt-probe.c b/drivers/clocksource/clkevt-probe.c
deleted file mode 100644
index eb89b50..0000000
--- a/drivers/clocksource/clkevt-probe.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2016, Linaro Ltd.  All rights reserved.
- * Daniel Lezcano <daniel.lezcano@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/clockchips.h>
-
-extern struct of_device_id __clkevt_of_table[];
-
-static const struct of_device_id __clkevt_of_table_sentinel
-	__used __section(__clkevt_of_table_end);
-
-int __init clockevent_probe(void)
-{
-	struct device_node *np;
-	const struct of_device_id *match;
-	of_init_fn_1_ret init_func;
-	int ret, clockevents = 0;
-
-	for_each_matching_node_and_match(np, __clkevt_of_table, &match) {
-		if (!of_device_is_available(np))
-			continue;
-
-		init_func = match->data;
-
-		ret = init_func(np);
-		if (ret) {
-			pr_warn("Failed to initialize '%s' (%d)\n",
-				np->name, ret);
-			continue;
-		}
-
-		clockevents++;
-	}
-
-	if (!clockevents) {
-		pr_crit("%s: no matching clockevent found\n", __func__);
-		return -ENODEV;
-	}
-
-	return 0;
-}
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index c69e277..c1b96dc 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -86,5 +86,5 @@ static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
 #endif
 	return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
 }
-CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
+TIMER_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
 		       clksrc_dbx500_prcmu_init);
diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c
index 03cc492..a1d01eb 100644
--- a/drivers/clocksource/clksrc_st_lpc.c
+++ b/drivers/clocksource/clksrc_st_lpc.c
@@ -132,4 +132,4 @@ static int __init st_clksrc_of_register(struct device_node *np)
 
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
+TIMER_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index 24db6d6..a8dd805 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -103,7 +103,7 @@ void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
 	BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
 }
 
-#ifdef CONFIG_CLKSRC_OF
+#ifdef CONFIG_TIMER_OF
 static int __init clps711x_timer_init(struct device_node *np)
 {
 	unsigned int irq = irq_of_parse_and_map(np, 0);
@@ -119,5 +119,5 @@ static int __init clps711x_timer_init(struct device_node *np)
 		return -EINVAL;
 	}
 }
-CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
+TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
 #endif
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index aee6c0d..69866cd 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -167,7 +167,7 @@ static int __init dw_apb_timer_init(struct device_node *timer)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
-CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
-CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
-CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
+TIMER_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
+TIMER_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+TIMER_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
+TIMER_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 670ff0f..7a244b6 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -610,5 +610,5 @@ static int __init mct_init_ppi(struct device_node *np)
 {
 	return mct_init_dt(np, MCT_INT_PPI);
 }
-CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
-CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);
+TIMER_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
+TIMER_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 738515b..3ee7e6f 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -329,13 +329,13 @@ static int __init ftm_timer_init(struct device_node *np)
 	priv->clkevt_base = of_iomap(np, 0);
 	if (!priv->clkevt_base) {
 		pr_err("ftm: unable to map event timer registers\n");
-		goto err;
+		goto err_clkevt;
 	}
 
 	priv->clksrc_base = of_iomap(np, 1);
 	if (!priv->clksrc_base) {
 		pr_err("ftm: unable to map source timer registers\n");
-		goto err;
+		goto err_clksrc;
 	}
 
 	ret = -EINVAL;
@@ -366,7 +366,11 @@ static int __init ftm_timer_init(struct device_node *np)
 	return 0;
 
 err:
+	iounmap(priv->clksrc_base);
+err_clksrc:
+	iounmap(priv->clkevt_base);
+err_clkevt:
 	kfree(priv);
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
+TIMER_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c
index 5b27fb9..dfbd4f8 100644
--- a/drivers/clocksource/h8300_timer16.c
+++ b/drivers/clocksource/h8300_timer16.c
@@ -187,5 +187,5 @@ static int __init h8300_16timer_init(struct device_node *node)
 	return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer",
+TIMER_OF_DECLARE(h8300_16bit, "renesas,16bit-timer",
 			   h8300_16timer_init);
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index 804c489..f6ffb0c 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -207,4 +207,4 @@ static int __init h8300_8timer_init(struct device_node *node)
 	return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);
+TIMER_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c
index 72e1cf2..45a8d17 100644
--- a/drivers/clocksource/h8300_tpu.c
+++ b/drivers/clocksource/h8300_tpu.c
@@ -154,4 +154,4 @@ static int __init h8300_tpu_init(struct device_node *node)
 	return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);
+TIMER_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index 7c61226..5d3d88e 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -246,4 +246,4 @@ static int __init jcore_pit_init(struct device_node *node)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
+TIMER_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
diff --git a/drivers/clocksource/meson6_timer.c b/drivers/clocksource/meson6_timer.c
index 39d21f6..92f2099 100644
--- a/drivers/clocksource/meson6_timer.c
+++ b/drivers/clocksource/meson6_timer.c
@@ -174,5 +174,5 @@ static int __init meson6_timer_init(struct device_node *node)
 					1, 0xfffe);
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer",
+TIMER_OF_DECLARE(meson6, "amlogic,meson6-timer",
 		       meson6_timer_init);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 3f52ee2..17b861e 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -167,10 +167,11 @@ static int __init gic_clocksource_of_init(struct device_node *node)
 
 	clk = of_clk_get(node, 0);
 	if (!IS_ERR(clk)) {
-		if (clk_prepare_enable(clk) < 0) {
+		ret = clk_prepare_enable(clk);
+		if (ret < 0) {
 			pr_err("GIC failed to enable clock\n");
 			clk_put(clk);
-			return PTR_ERR(clk);
+			return ret;
 		}
 
 		gic_frequency = clk_get_rate(clk);
@@ -200,5 +201,5 @@ static int __init gic_clocksource_of_init(struct device_node *node)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
+TIMER_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
 		       gic_clocksource_of_init);
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
deleted file mode 100644
index 7f34306..0000000
--- a/drivers/clocksource/moxart_timer.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * MOXA ART SoCs timer handling.
- *
- * Copyright (C) 2013 Jonas Jensen
- *
- * Jonas Jensen <jonas.jensen@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqreturn.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/clocksource.h>
-#include <linux/bitops.h>
-#include <linux/slab.h>
-
-#define TIMER1_BASE		0x00
-#define TIMER2_BASE		0x10
-#define TIMER3_BASE		0x20
-
-#define REG_COUNT		0x0 /* writable */
-#define REG_LOAD		0x4
-#define REG_MATCH1		0x8
-#define REG_MATCH2		0xC
-
-#define TIMER_CR		0x30
-#define TIMER_INTR_STATE	0x34
-#define TIMER_INTR_MASK		0x38
-
-/*
- * Moxart TIMER_CR flags:
- *
- * MOXART_CR_*_CLOCK	0: PCLK, 1: EXT1CLK
- * MOXART_CR_*_INT	overflow interrupt enable bit
- */
-#define MOXART_CR_1_ENABLE	BIT(0)
-#define MOXART_CR_1_CLOCK	BIT(1)
-#define MOXART_CR_1_INT	BIT(2)
-#define MOXART_CR_2_ENABLE	BIT(3)
-#define MOXART_CR_2_CLOCK	BIT(4)
-#define MOXART_CR_2_INT	BIT(5)
-#define MOXART_CR_3_ENABLE	BIT(6)
-#define MOXART_CR_3_CLOCK	BIT(7)
-#define MOXART_CR_3_INT	BIT(8)
-#define MOXART_CR_COUNT_UP	BIT(9)
-
-#define MOXART_TIMER1_ENABLE	(MOXART_CR_2_ENABLE | MOXART_CR_1_ENABLE)
-#define MOXART_TIMER1_DISABLE	(MOXART_CR_2_ENABLE)
-
-/*
- * The ASpeed variant of the IP block has a different layout
- * for the control register
- */
-#define ASPEED_CR_1_ENABLE	BIT(0)
-#define ASPEED_CR_1_CLOCK	BIT(1)
-#define ASPEED_CR_1_INT		BIT(2)
-#define ASPEED_CR_2_ENABLE	BIT(4)
-#define ASPEED_CR_2_CLOCK	BIT(5)
-#define ASPEED_CR_2_INT		BIT(6)
-#define ASPEED_CR_3_ENABLE	BIT(8)
-#define ASPEED_CR_3_CLOCK	BIT(9)
-#define ASPEED_CR_3_INT		BIT(10)
-
-#define ASPEED_TIMER1_ENABLE   (ASPEED_CR_2_ENABLE | ASPEED_CR_1_ENABLE)
-#define ASPEED_TIMER1_DISABLE  (ASPEED_CR_2_ENABLE)
-
-struct moxart_timer {
-	void __iomem *base;
-	unsigned int t1_disable_val;
-	unsigned int t1_enable_val;
-	unsigned int count_per_tick;
-	struct clock_event_device clkevt;
-};
-
-static inline struct moxart_timer *to_moxart(struct clock_event_device *evt)
-{
-	return container_of(evt, struct moxart_timer, clkevt);
-}
-
-static inline void moxart_disable(struct clock_event_device *evt)
-{
-	struct moxart_timer *timer = to_moxart(evt);
-
-	writel(timer->t1_disable_val, timer->base + TIMER_CR);
-}
-
-static inline void moxart_enable(struct clock_event_device *evt)
-{
-	struct moxart_timer *timer = to_moxart(evt);
-
-	writel(timer->t1_enable_val, timer->base + TIMER_CR);
-}
-
-static int moxart_shutdown(struct clock_event_device *evt)
-{
-	moxart_disable(evt);
-	return 0;
-}
-
-static int moxart_set_oneshot(struct clock_event_device *evt)
-{
-	moxart_disable(evt);
-	writel(~0, to_moxart(evt)->base + TIMER1_BASE + REG_LOAD);
-	return 0;
-}
-
-static int moxart_set_periodic(struct clock_event_device *evt)
-{
-	struct moxart_timer *timer = to_moxart(evt);
-
-	moxart_disable(evt);
-	writel(timer->count_per_tick, timer->base + TIMER1_BASE + REG_LOAD);
-	writel(0, timer->base + TIMER1_BASE + REG_MATCH1);
-	moxart_enable(evt);
-	return 0;
-}
-
-static int moxart_clkevt_next_event(unsigned long cycles,
-				    struct clock_event_device *evt)
-{
-	struct moxart_timer *timer = to_moxart(evt);
-	u32 u;
-
-	moxart_disable(evt);
-
-	u = readl(timer->base + TIMER1_BASE + REG_COUNT) - cycles;
-	writel(u, timer->base + TIMER1_BASE + REG_MATCH1);
-
-	moxart_enable(evt);
-
-	return 0;
-}
-
-static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id)
-{
-	struct clock_event_device *evt = dev_id;
-	evt->event_handler(evt);
-	return IRQ_HANDLED;
-}
-
-static int __init moxart_timer_init(struct device_node *node)
-{
-	int ret, irq;
-	unsigned long pclk;
-	struct clk *clk;
-	struct moxart_timer *timer;
-
-	timer = kzalloc(sizeof(*timer), GFP_KERNEL);
-	if (!timer)
-		return -ENOMEM;
-
-	timer->base = of_iomap(node, 0);
-	if (!timer->base) {
-		pr_err("%s: of_iomap failed\n", node->full_name);
-		ret = -ENXIO;
-		goto out_free;
-	}
-
-	irq = irq_of_parse_and_map(node, 0);
-	if (irq <= 0) {
-		pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
-		ret = -EINVAL;
-		goto out_unmap;
-	}
-
-	clk = of_clk_get(node, 0);
-	if (IS_ERR(clk))  {
-		pr_err("%s: of_clk_get failed\n", node->full_name);
-		ret = PTR_ERR(clk);
-		goto out_unmap;
-	}
-
-	pclk = clk_get_rate(clk);
-
-	if (of_device_is_compatible(node, "moxa,moxart-timer")) {
-		timer->t1_enable_val = MOXART_TIMER1_ENABLE;
-		timer->t1_disable_val = MOXART_TIMER1_DISABLE;
-	} else if (of_device_is_compatible(node, "aspeed,ast2400-timer")) {
-		timer->t1_enable_val = ASPEED_TIMER1_ENABLE;
-		timer->t1_disable_val = ASPEED_TIMER1_DISABLE;
-	} else {
-		pr_err("%s: unknown platform\n", node->full_name);
-		ret = -EINVAL;
-		goto out_unmap;
-	}
-
-	timer->count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
-
-	timer->clkevt.name = node->name;
-	timer->clkevt.rating = 200;
-	timer->clkevt.features = CLOCK_EVT_FEAT_PERIODIC |
-					CLOCK_EVT_FEAT_ONESHOT;
-	timer->clkevt.set_state_shutdown = moxart_shutdown;
-	timer->clkevt.set_state_periodic = moxart_set_periodic;
-	timer->clkevt.set_state_oneshot = moxart_set_oneshot;
-	timer->clkevt.tick_resume = moxart_set_oneshot;
-	timer->clkevt.set_next_event = moxart_clkevt_next_event;
-	timer->clkevt.cpumask = cpumask_of(0);
-	timer->clkevt.irq = irq;
-
-	ret = clocksource_mmio_init(timer->base + TIMER2_BASE + REG_COUNT,
-				    "moxart_timer", pclk, 200, 32,
-				    clocksource_mmio_readl_down);
-	if (ret) {
-		pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
-		goto out_unmap;
-	}
-
-	ret = request_irq(irq, moxart_timer_interrupt, IRQF_TIMER,
-			  node->name, &timer->clkevt);
-	if (ret) {
-		pr_err("%s: setup_irq failed\n", node->full_name);
-		goto out_unmap;
-	}
-
-	/* Clear match registers */
-	writel(0, timer->base + TIMER1_BASE + REG_MATCH1);
-	writel(0, timer->base + TIMER1_BASE + REG_MATCH2);
-	writel(0, timer->base + TIMER2_BASE + REG_MATCH1);
-	writel(0, timer->base + TIMER2_BASE + REG_MATCH2);
-
-	/*
-	 * Start timer 2 rolling as our main wall clock source, keep timer 1
-	 * disabled
-	 */
-	writel(0, timer->base + TIMER_CR);
-	writel(~0, timer->base + TIMER2_BASE + REG_LOAD);
-	writel(timer->t1_disable_val, timer->base + TIMER_CR);
-
-	/*
-	 * documentation is not publicly available:
-	 * min_delta / max_delta obtained by trial-and-error,
-	 * max_delta 0xfffffffe should be ok because count
-	 * register size is u32
-	 */
-	clockevents_config_and_register(&timer->clkevt, pclk, 0x4, 0xfffffffe);
-
-	return 0;
-
-out_unmap:
-	iounmap(timer->base);
-out_free:
-	kfree(timer);
-	return ret;
-}
-CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
-CLOCKSOURCE_OF_DECLARE(aspeed, "aspeed,ast2400-timer", moxart_timer_init);
diff --git a/drivers/clocksource/mps2-timer.c b/drivers/clocksource/mps2-timer.c
index 3e4431e..aa4d63af 100644
--- a/drivers/clocksource/mps2-timer.c
+++ b/drivers/clocksource/mps2-timer.c
@@ -274,4 +274,4 @@ static int __init mps2_timer_init(struct device_node *np)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
+TIMER_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 9065949..f9b724f 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -265,4 +265,4 @@ static int __init mtk_timer_init(struct device_node *node)
 
 	return -EINVAL;
 }
-CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
+TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index 99b77af..a03434e 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -293,4 +293,4 @@ static int __init mxs_timer_init(struct device_node *np)
 
 	return setup_irq(irq, &mxs_timer_irq);
 }
-CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
+TIMER_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 7d44de3..8e4ddb9 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -284,5 +284,5 @@ static int __init nmdk_timer_of_init(struct device_node *node)
 
 	return nmdk_timer_init(base, irq, pclk, clk);
 }
-CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
+TIMER_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
 		       nmdk_timer_of_init);
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/owl-timer.c
new file mode 100644
index 0000000..d19c53c
--- /dev/null
+++ b/drivers/clocksource/owl-timer.c
@@ -0,0 +1,172 @@
+/*
+ * Actions Semi Owl timer
+ *
+ * Copyright 2012 Actions Semi Inc.
+ * Author: Actions Semi, Inc.
+ *
+ * Copyright (c) 2017 SUSE Linux GmbH
+ * Author: Andreas Färber
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define OWL_Tx_CTL		0x0
+#define OWL_Tx_CMP		0x4
+#define OWL_Tx_VAL		0x8
+
+#define OWL_Tx_CTL_PD		BIT(0)
+#define OWL_Tx_CTL_INTEN	BIT(1)
+#define OWL_Tx_CTL_EN		BIT(2)
+
+static void __iomem *owl_timer_base;
+static void __iomem *owl_clksrc_base;
+static void __iomem *owl_clkevt_base;
+
+static inline void owl_timer_reset(void __iomem *base)
+{
+	writel(0, base + OWL_Tx_CTL);
+	writel(0, base + OWL_Tx_VAL);
+	writel(0, base + OWL_Tx_CMP);
+}
+
+static inline void owl_timer_set_enabled(void __iomem *base, bool enabled)
+{
+	u32 ctl = readl(base + OWL_Tx_CTL);
+
+	/* PD bit is cleared when set */
+	ctl &= ~OWL_Tx_CTL_PD;
+
+	if (enabled)
+		ctl |= OWL_Tx_CTL_EN;
+	else
+		ctl &= ~OWL_Tx_CTL_EN;
+
+	writel(ctl, base + OWL_Tx_CTL);
+}
+
+static u64 notrace owl_timer_sched_read(void)
+{
+	return (u64)readl(owl_clksrc_base + OWL_Tx_VAL);
+}
+
+static int owl_timer_set_state_shutdown(struct clock_event_device *evt)
+{
+	owl_timer_set_enabled(owl_clkevt_base, false);
+
+	return 0;
+}
+
+static int owl_timer_set_state_oneshot(struct clock_event_device *evt)
+{
+	owl_timer_reset(owl_clkevt_base);
+
+	return 0;
+}
+
+static int owl_timer_tick_resume(struct clock_event_device *evt)
+{
+	return 0;
+}
+
+static int owl_timer_set_next_event(unsigned long evt,
+				    struct clock_event_device *ev)
+{
+	void __iomem *base = owl_clkevt_base;
+
+	owl_timer_set_enabled(base, false);
+	writel(OWL_Tx_CTL_INTEN, base + OWL_Tx_CTL);
+	writel(0, base + OWL_Tx_VAL);
+	writel(evt, base + OWL_Tx_CMP);
+	owl_timer_set_enabled(base, true);
+
+	return 0;
+}
+
+static struct clock_event_device owl_clockevent = {
+	.name			= "owl_tick",
+	.rating			= 200,
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_DYNIRQ,
+	.set_state_shutdown	= owl_timer_set_state_shutdown,
+	.set_state_oneshot	= owl_timer_set_state_oneshot,
+	.tick_resume		= owl_timer_tick_resume,
+	.set_next_event		= owl_timer_set_next_event,
+};
+
+static irqreturn_t owl_timer1_interrupt(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+
+	writel(OWL_Tx_CTL_PD, owl_clkevt_base + OWL_Tx_CTL);
+
+	evt->event_handler(evt);
+
+	return IRQ_HANDLED;
+}
+
+static int __init owl_timer_init(struct device_node *node)
+{
+	struct clk *clk;
+	unsigned long rate;
+	int timer1_irq, ret;
+
+	owl_timer_base = of_io_request_and_map(node, 0, "owl-timer");
+	if (IS_ERR(owl_timer_base)) {
+		pr_err("Can't map timer registers");
+		return PTR_ERR(owl_timer_base);
+	}
+
+	owl_clksrc_base = owl_timer_base + 0x08;
+	owl_clkevt_base = owl_timer_base + 0x14;
+
+	timer1_irq = of_irq_get_byname(node, "timer1");
+	if (timer1_irq <= 0) {
+		pr_err("Can't parse timer1 IRQ");
+		return -EINVAL;
+	}
+
+	clk = of_clk_get(node, 0);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	rate = clk_get_rate(clk);
+
+	owl_timer_reset(owl_clksrc_base);
+	owl_timer_set_enabled(owl_clksrc_base, true);
+
+	sched_clock_register(owl_timer_sched_read, 32, rate);
+	clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name,
+			      rate, 200, 32, clocksource_mmio_readl_up);
+
+	owl_timer_reset(owl_clkevt_base);
+
+	ret = request_irq(timer1_irq, owl_timer1_interrupt, IRQF_TIMER,
+			  "owl-timer", &owl_clockevent);
+	if (ret) {
+		pr_err("failed to request irq %d\n", timer1_irq);
+		return ret;
+	}
+
+	owl_clockevent.cpumask = cpumask_of(0);
+	owl_clockevent.irq = timer1_irq;
+
+	clockevents_config_and_register(&owl_clockevent, rate,
+					0xf, 0xffffffff);
+
+	return 0;
+}
+CLOCKSOURCE_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
+CLOCKSOURCE_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index a10fa667..08cd6ea 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -216,7 +216,7 @@ static int __init pxa_timer_dt_init(struct device_node *np)
 
 	return pxa_timer_common_init(irq, clk_get_rate(clk));
 }
-CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
+TIMER_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
 
 /*
  * Legacy timer init for non device-tree boards.
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index ee358cd..89816f8 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -254,5 +254,5 @@ static int __init msm_dt_timer_init(struct device_node *np)
 
 	return msm_timer_init(freq, 32, irq, !!percpu_offset);
 }
-CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
-CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
+TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
+TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index c76f576..6cffd7c 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -262,4 +262,4 @@ static int __init ostm_init(struct device_node *np)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
+TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index 49c02be..c27f4c8 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -303,5 +303,5 @@ static int __init rk_timer_init(struct device_node *np)
 	return -EINVAL;
 }
 
-CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", rk_timer_init);
-CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", rk_timer_init);
+TIMER_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer", rk_timer_init);
+TIMER_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer", rk_timer_init);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index a68e653..6d5d126 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -418,7 +418,7 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
 	_samsung_pwm_clocksource_init();
 }
 
-#ifdef CONFIG_CLKSRC_OF
+#ifdef CONFIG_TIMER_OF
 static int __init samsung_pwm_alloc(struct device_node *np,
 				    const struct samsung_pwm_variant *variant)
 {
@@ -466,7 +466,7 @@ static int __init s3c2410_pwm_clocksource_init(struct device_node *np)
 {
 	return samsung_pwm_alloc(np, &s3c24xx_variant);
 }
-CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
+TIMER_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
 
 static const struct samsung_pwm_variant s3c64xx_variant = {
 	.bits		= 32,
@@ -479,7 +479,7 @@ static int __init s3c64xx_pwm_clocksource_init(struct device_node *np)
 {
 	return samsung_pwm_alloc(np, &s3c64xx_variant);
 }
-CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
+TIMER_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
 
 static const struct samsung_pwm_variant s5p64x0_variant = {
 	.bits		= 32,
@@ -492,7 +492,7 @@ static int __init s5p64x0_pwm_clocksource_init(struct device_node *np)
 {
 	return samsung_pwm_alloc(np, &s5p64x0_variant);
 }
-CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
+TIMER_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
 
 static const struct samsung_pwm_variant s5p_variant = {
 	.bits		= 32,
@@ -505,5 +505,5 @@ static int __init s5p_pwm_clocksource_init(struct device_node *np)
 {
 	return samsung_pwm_alloc(np, &s5p_variant);
 }
-CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
+TIMER_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
 #endif
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 4452d5c..6e0180a 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -24,6 +24,8 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
+#include "timer-of.h"
+
 #define TIMER_IRQ_EN_REG	0x00
 #define TIMER_IRQ_EN(val)		BIT(val)
 #define TIMER_IRQ_ST_REG	0x04
@@ -39,38 +41,37 @@
 
 #define TIMER_SYNC_TICKS	3
 
-static void __iomem *timer_base;
-static u32 ticks_per_jiffy;
-
 /*
  * When we disable a timer, we need to wait at least for 2 cycles of
  * the timer source clock. We will use for that the clocksource timer
  * that is already setup and runs at the same frequency than the other
  * timers, and we never will be disabled.
  */
-static void sun4i_clkevt_sync(void)
+static void sun4i_clkevt_sync(void __iomem *base)
 {
-	u32 old = readl(timer_base + TIMER_CNTVAL_REG(1));
+	u32 old = readl(base + TIMER_CNTVAL_REG(1));
 
-	while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < TIMER_SYNC_TICKS)
+	while ((old - readl(base + TIMER_CNTVAL_REG(1))) < TIMER_SYNC_TICKS)
 		cpu_relax();
 }
 
-static void sun4i_clkevt_time_stop(u8 timer)
+static void sun4i_clkevt_time_stop(void __iomem *base, u8 timer)
 {
-	u32 val = readl(timer_base + TIMER_CTL_REG(timer));
-	writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer));
-	sun4i_clkevt_sync();
+	u32 val = readl(base + TIMER_CTL_REG(timer));
+	writel(val & ~TIMER_CTL_ENABLE, base + TIMER_CTL_REG(timer));
+	sun4i_clkevt_sync(base);
 }
 
-static void sun4i_clkevt_time_setup(u8 timer, unsigned long delay)
+static void sun4i_clkevt_time_setup(void __iomem *base, u8 timer,
+				    unsigned long delay)
 {
-	writel(delay, timer_base + TIMER_INTVAL_REG(timer));
+	writel(delay, base + TIMER_INTVAL_REG(timer));
 }
 
-static void sun4i_clkevt_time_start(u8 timer, bool periodic)
+static void sun4i_clkevt_time_start(void __iomem *base, u8 timer,
+				    bool periodic)
 {
-	u32 val = readl(timer_base + TIMER_CTL_REG(timer));
+	u32 val = readl(base + TIMER_CTL_REG(timer));
 
 	if (periodic)
 		val &= ~TIMER_CTL_ONESHOT;
@@ -78,115 +79,106 @@ static void sun4i_clkevt_time_start(u8 timer, bool periodic)
 		val |= TIMER_CTL_ONESHOT;
 
 	writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
-	       timer_base + TIMER_CTL_REG(timer));
+	       base + TIMER_CTL_REG(timer));
 }
 
 static int sun4i_clkevt_shutdown(struct clock_event_device *evt)
 {
-	sun4i_clkevt_time_stop(0);
+	struct timer_of *to = to_timer_of(evt);
+
+	sun4i_clkevt_time_stop(timer_of_base(to), 0);
+
 	return 0;
 }
 
 static int sun4i_clkevt_set_oneshot(struct clock_event_device *evt)
 {
-	sun4i_clkevt_time_stop(0);
-	sun4i_clkevt_time_start(0, false);
+	struct timer_of *to = to_timer_of(evt);
+
+	sun4i_clkevt_time_stop(timer_of_base(to), 0);
+	sun4i_clkevt_time_start(timer_of_base(to), 0, false);
+
 	return 0;
 }
 
 static int sun4i_clkevt_set_periodic(struct clock_event_device *evt)
 {
-	sun4i_clkevt_time_stop(0);
-	sun4i_clkevt_time_setup(0, ticks_per_jiffy);
-	sun4i_clkevt_time_start(0, true);
+	struct timer_of *to = to_timer_of(evt);
+
+	sun4i_clkevt_time_stop(timer_of_base(to), 0);
+	sun4i_clkevt_time_setup(timer_of_base(to), 0, timer_of_period(to));
+	sun4i_clkevt_time_start(timer_of_base(to), 0, true);
+
 	return 0;
 }
 
 static int sun4i_clkevt_next_event(unsigned long evt,
-				   struct clock_event_device *unused)
+				   struct clock_event_device *clkevt)
 {
-	sun4i_clkevt_time_stop(0);
-	sun4i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS);
-	sun4i_clkevt_time_start(0, false);
+	struct timer_of *to = to_timer_of(clkevt);
+
+	sun4i_clkevt_time_stop(timer_of_base(to), 0);
+	sun4i_clkevt_time_setup(timer_of_base(to), 0, evt - TIMER_SYNC_TICKS);
+	sun4i_clkevt_time_start(timer_of_base(to), 0, false);
 
 	return 0;
 }
 
-static struct clock_event_device sun4i_clockevent = {
-	.name = "sun4i_tick",
-	.rating = 350,
-	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_state_shutdown = sun4i_clkevt_shutdown,
-	.set_state_periodic = sun4i_clkevt_set_periodic,
-	.set_state_oneshot = sun4i_clkevt_set_oneshot,
-	.tick_resume = sun4i_clkevt_shutdown,
-	.set_next_event = sun4i_clkevt_next_event,
-};
-
-static void sun4i_timer_clear_interrupt(void)
+static void sun4i_timer_clear_interrupt(void __iomem *base)
 {
-	writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
+	writel(TIMER_IRQ_EN(0), base + TIMER_IRQ_ST_REG);
 }
 
 static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
 {
 	struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+	struct timer_of *to = to_timer_of(evt);
 
-	sun4i_timer_clear_interrupt();
+	sun4i_timer_clear_interrupt(timer_of_base(to));
 	evt->event_handler(evt);
 
 	return IRQ_HANDLED;
 }
 
-static struct irqaction sun4i_timer_irq = {
-	.name = "sun4i_timer0",
-	.flags = IRQF_TIMER | IRQF_IRQPOLL,
-	.handler = sun4i_timer_interrupt,
-	.dev_id = &sun4i_clockevent,
+static struct timer_of to = {
+	.flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE,
+
+	.clkevt = {
+		.name = "sun4i_tick",
+		.rating = 350,
+		.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+		.set_state_shutdown = sun4i_clkevt_shutdown,
+		.set_state_periodic = sun4i_clkevt_set_periodic,
+		.set_state_oneshot = sun4i_clkevt_set_oneshot,
+		.tick_resume = sun4i_clkevt_shutdown,
+		.set_next_event = sun4i_clkevt_next_event,
+		.cpumask = cpu_possible_mask,
+	},
+
+	.of_irq = {
+		.handler = sun4i_timer_interrupt,
+		.flags = IRQF_TIMER | IRQF_IRQPOLL,
+	},
 };
 
 static u64 notrace sun4i_timer_sched_read(void)
 {
-	return ~readl(timer_base + TIMER_CNTVAL_REG(1));
+	return ~readl(timer_of_base(&to) + TIMER_CNTVAL_REG(1));
 }
 
 static int __init sun4i_timer_init(struct device_node *node)
 {
-	unsigned long rate = 0;
-	struct clk *clk;
-	int ret, irq;
+	int ret;
 	u32 val;
 
-	timer_base = of_iomap(node, 0);
-	if (!timer_base) {
-		pr_crit("Can't map registers\n");
-		return -ENXIO;
-	}
-
-	irq = irq_of_parse_and_map(node, 0);
-	if (irq <= 0) {
-		pr_crit("Can't parse IRQ\n");
-		return -EINVAL;
-	}
-
-	clk = of_clk_get(node, 0);
-	if (IS_ERR(clk)) {
-		pr_crit("Can't get timer clock\n");
-		return PTR_ERR(clk);
-	}
-
-	ret = clk_prepare_enable(clk);
-	if (ret) {
-		pr_err("Failed to prepare clock\n");
+	ret = timer_of_init(node, &to);
+	if (ret)
 		return ret;
-	}
 
-	rate = clk_get_rate(clk);
-
-	writel(~0, timer_base + TIMER_INTVAL_REG(1));
+	writel(~0, timer_of_base(&to) + TIMER_INTVAL_REG(1));
 	writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD |
 	       TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
-	       timer_base + TIMER_CTL_REG(1));
+	       timer_of_base(&to) + TIMER_CTL_REG(1));
 
 	/*
 	 * sched_clock_register does not have priorities, and on sun6i and
@@ -195,43 +187,34 @@ static int __init sun4i_timer_init(struct device_node *node)
 	if (of_machine_is_compatible("allwinner,sun4i-a10") ||
 	    of_machine_is_compatible("allwinner,sun5i-a13") ||
 	    of_machine_is_compatible("allwinner,sun5i-a10s"))
-		sched_clock_register(sun4i_timer_sched_read, 32, rate);
+		sched_clock_register(sun4i_timer_sched_read, 32,
+				     timer_of_rate(&to));
 
-	ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
-				    rate, 350, 32, clocksource_mmio_readl_down);
+	ret = clocksource_mmio_init(timer_of_base(&to) + TIMER_CNTVAL_REG(1),
+				    node->name, timer_of_rate(&to), 350, 32,
+				    clocksource_mmio_readl_down);
 	if (ret) {
 		pr_err("Failed to register clocksource\n");
 		return ret;
 	}
 
-	ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
-
 	writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
-	       timer_base + TIMER_CTL_REG(0));
+	       timer_of_base(&to) + TIMER_CTL_REG(0));
 
 	/* Make sure timer is stopped before playing with interrupts */
-	sun4i_clkevt_time_stop(0);
+	sun4i_clkevt_time_stop(timer_of_base(&to), 0);
 
 	/* clear timer0 interrupt */
-	sun4i_timer_clear_interrupt();
+	sun4i_timer_clear_interrupt(timer_of_base(&to));
 
-	sun4i_clockevent.cpumask = cpu_possible_mask;
-	sun4i_clockevent.irq = irq;
-
-	clockevents_config_and_register(&sun4i_clockevent, rate,
+	clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
 					TIMER_SYNC_TICKS, 0xffffffff);
 
-	ret = setup_irq(irq, &sun4i_timer_irq);
-	if (ret) {
-		pr_err("failed to setup irq %d\n", irq);
-		return ret;
-	}
-
 	/* Enable timer0 interrupt */
-	val = readl(timer_base + TIMER_IRQ_EN_REG);
-	writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+	val = readl(timer_of_base(&to) + TIMER_IRQ_EN_REG);
+	writel(val | TIMER_IRQ_EN(0), timer_of_base(&to) + TIMER_IRQ_EN_REG);
 
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
+TIMER_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
 		       sun4i_timer_init);
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index 12fcef8..c4e1c2e 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -53,4 +53,4 @@ static int __init tango_clocksource_init(struct device_node *np)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
+TIMER_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index d4ca996..59e8aee 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -9,6 +9,7 @@
 #include <linux/ioport.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
 #include <linux/atmel_tc.h>
 
 
@@ -40,6 +41,14 @@
  */
 
 static void __iomem *tcaddr;
+static struct
+{
+	u32 cmr;
+	u32 imr;
+	u32 rc;
+	bool clken;
+} tcb_cache[3];
+static u32 bmr_cache;
 
 static u64 tc_get_cycles(struct clocksource *cs)
 {
@@ -48,9 +57,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
 
 	raw_local_irq_save(flags);
 	do {
-		upper = __raw_readl(tcaddr + ATMEL_TC_REG(1, CV));
-		lower = __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
-	} while (upper != __raw_readl(tcaddr + ATMEL_TC_REG(1, CV)));
+		upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
+		lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
+	} while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
 
 	raw_local_irq_restore(flags);
 	return (upper << 16) | lower;
@@ -58,7 +67,47 @@ static u64 tc_get_cycles(struct clocksource *cs)
 
 static u64 tc_get_cycles32(struct clocksource *cs)
 {
-	return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
+	return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
+}
+
+void tc_clksrc_suspend(struct clocksource *cs)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
+		tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
+		tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
+		tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
+		tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
+					ATMEL_TC_CLKSTA);
+	}
+
+	bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
+}
+
+void tc_clksrc_resume(struct clocksource *cs)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
+		/* Restore registers for the channel, RA and RB are not used  */
+		writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
+		writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
+		writel(0, tcaddr + ATMEL_TC_REG(i, RA));
+		writel(0, tcaddr + ATMEL_TC_REG(i, RB));
+		/* Disable all the interrupts */
+		writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
+		/* Reenable interrupts that were enabled before suspending */
+		writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
+		/* Start the clock if it was used */
+		if (tcb_cache[i].clken)
+			writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
+	}
+
+	/* Dual channel, chain channels */
+	writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
+	/* Finally, trigger all the channels*/
+	writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
 }
 
 static struct clocksource clksrc = {
@@ -67,6 +116,8 @@ static struct clocksource clksrc = {
 	.read           = tc_get_cycles,
 	.mask           = CLOCKSOURCE_MASK(32),
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+	.suspend	= tc_clksrc_suspend,
+	.resume		= tc_clksrc_resume,
 };
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -96,8 +147,8 @@ static int tc_shutdown(struct clock_event_device *d)
 	struct tc_clkevt_device *tcd = to_tc_clkevt(d);
 	void __iomem		*regs = tcd->regs;
 
-	__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
-	__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
+	writel(0xff, regs + ATMEL_TC_REG(2, IDR));
+	writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
 	if (!clockevent_state_detached(d))
 		clk_disable(tcd->clk);
 
@@ -115,9 +166,9 @@ static int tc_set_oneshot(struct clock_event_device *d)
 	clk_enable(tcd->clk);
 
 	/* slow clock, count up to RC, then irq and stop */
-	__raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
+	writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
 		     ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
-	__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+	writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
 
 	/* set_next_event() configures and starts the timer */
 	return 0;
@@ -137,25 +188,25 @@ static int tc_set_periodic(struct clock_event_device *d)
 	clk_enable(tcd->clk);
 
 	/* slow clock, count up to RC, then irq and restart */
-	__raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+	writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
 		     regs + ATMEL_TC_REG(2, CMR));
-	__raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+	writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
 
 	/* Enable clock and interrupts on RC compare */
-	__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+	writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
 
 	/* go go gadget! */
-	__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
+	writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
 		     ATMEL_TC_REG(2, CCR));
 	return 0;
 }
 
 static int tc_next_event(unsigned long delta, struct clock_event_device *d)
 {
-	__raw_writel(delta, tcaddr + ATMEL_TC_REG(2, RC));
+	writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
 
 	/* go go gadget! */
-	__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+	writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
 			tcaddr + ATMEL_TC_REG(2, CCR));
 	return 0;
 }
@@ -179,7 +230,7 @@ static irqreturn_t ch2_irq(int irq, void *handle)
 	struct tc_clkevt_device	*dev = handle;
 	unsigned int		sr;
 
-	sr = __raw_readl(dev->regs + ATMEL_TC_REG(2, SR));
+	sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
 	if (sr & ATMEL_TC_CPCS) {
 		dev->clkevt.event_handler(&dev->clkevt);
 		return IRQ_HANDLED;
@@ -239,43 +290,43 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
 static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
 {
 	/* channel 0:  waveform mode, input mclk/8, clock TIOA0 on overflow */
-	__raw_writel(mck_divisor_idx			/* likely divide-by-8 */
+	writel(mck_divisor_idx			/* likely divide-by-8 */
 			| ATMEL_TC_WAVE
 			| ATMEL_TC_WAVESEL_UP		/* free-run */
 			| ATMEL_TC_ACPA_SET		/* TIOA0 rises at 0 */
 			| ATMEL_TC_ACPC_CLEAR,		/* (duty cycle 50%) */
 			tcaddr + ATMEL_TC_REG(0, CMR));
-	__raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
-	__raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
-	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */
-	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
+	writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
+	writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
+	writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */
+	writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
 
 	/* channel 1:  waveform mode, input TIOA0 */
-	__raw_writel(ATMEL_TC_XC1			/* input: TIOA0 */
+	writel(ATMEL_TC_XC1			/* input: TIOA0 */
 			| ATMEL_TC_WAVE
 			| ATMEL_TC_WAVESEL_UP,		/* free-run */
 			tcaddr + ATMEL_TC_REG(1, CMR));
-	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));	/* no irqs */
-	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
+	writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR));	/* no irqs */
+	writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
 
 	/* chain channel 0 to channel 1*/
-	__raw_writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
+	writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
 	/* then reset all the timers */
-	__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+	writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
 }
 
 static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
 {
 	/* channel 0:  waveform mode, input mclk/8 */
-	__raw_writel(mck_divisor_idx			/* likely divide-by-8 */
+	writel(mck_divisor_idx			/* likely divide-by-8 */
 			| ATMEL_TC_WAVE
 			| ATMEL_TC_WAVESEL_UP,		/* free-run */
 			tcaddr + ATMEL_TC_REG(0, CMR));
-	__raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */
-	__raw_writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
+	writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR));	/* no irqs */
+	writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
 
 	/* then reset all the timers */
-	__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+	writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
 }
 
 static int __init tcb_clksrc_init(void)
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index b9990b9..c337a81 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -237,7 +237,7 @@ static int __init tegra20_init_timer(struct device_node *np)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
+TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
 
 static int __init tegra20_init_rtc(struct device_node *np)
 {
@@ -261,4 +261,4 @@ static int __init tegra20_init_rtc(struct device_node *np)
 
 	return register_persistent_clock(NULL, tegra_read_persistent_clock64);
 }
-CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
+TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index aea4380..edf1a46 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -351,7 +351,7 @@ static int __init armada_xp_timer_init(struct device_node *np)
 
 	return armada_370_xp_timer_common_init(np);
 }
-CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
+TIMER_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
 		       armada_xp_timer_init);
 
 static int __init armada_375_timer_init(struct device_node *np)
@@ -389,7 +389,7 @@ static int __init armada_375_timer_init(struct device_node *np)
 
 	return armada_370_xp_timer_common_init(np);
 }
-CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
+TIMER_OF_DECLARE(armada_375, "marvell,armada-375-timer",
 		       armada_375_timer_init);
 
 static int __init armada_370_timer_init(struct device_node *np)
@@ -412,5 +412,5 @@ static int __init armada_370_timer_init(struct device_node *np)
 
 	return armada_370_xp_timer_common_init(np);
 }
-CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
+TIMER_OF_DECLARE(armada_370, "marvell,armada-370-timer",
 		       armada_370_timer_init);
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index ce0f97b..257e810 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -283,5 +283,5 @@ static int __init efm32_timer_init(struct device_node *np)
 
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
-CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
+TIMER_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
+TIMER_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c
index 9649cfd..d51a62a 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/time-lpc32xx.c
@@ -311,4 +311,4 @@ static int __init lpc32xx_timer_init(struct device_node *np)
 
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
+TIMER_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index b9b97f6..1220206 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -189,4 +189,4 @@ static int __init orion_timer_init(struct device_node *np)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
+TIMER_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index 3710e4d..a2dd85d 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -214,5 +214,5 @@ static int __init pistachio_clksrc_of_init(struct device_node *node)
 	sched_clock_register(pistachio_read_sched_clock, 32, rate);
 	return clocksource_register_hz(&pcs_gpt.cs, rate);
 }
-CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
+TIMER_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
 		       pistachio_clksrc_of_init);
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 50300ee..62c4bbc 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -283,4 +283,4 @@ static int __init sirfsoc_of_timer_init(struct device_node *np)
 
 	return sirfsoc_atlas7_timer_init(np);
 }
-CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
+TIMER_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index cc11235..ec8a437 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -255,5 +255,5 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
+TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
 		       at91sam926x_pit_dt_init);
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index be4ac76..d2e660f 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -260,5 +260,5 @@ static int __init atmel_st_timer_init(struct device_node *node)
 	/* register clocksource */
 	return clocksource_register_hz(&clk32k, sclk_rate);
 }
-CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
+TIMER_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
 		       atmel_st_timer_init);
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
index 94a161e..1e984a4 100644
--- a/drivers/clocksource/timer-digicolor.c
+++ b/drivers/clocksource/timer-digicolor.c
@@ -203,5 +203,5 @@ static int __init digicolor_timer_init(struct device_node *node)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
+TIMER_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
 		       digicolor_timer_init);
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
index b4a6f1e..66dd909 100644
--- a/drivers/clocksource/timer-fttmr010.c
+++ b/drivers/clocksource/timer-fttmr010.c
@@ -11,12 +11,13 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
 #include <linux/clockchips.h>
 #include <linux/clocksource.h>
 #include <linux/sched_clock.h>
 #include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
 
 /*
  * Register definitions for the timers
@@ -37,267 +38,368 @@
 #define TIMER_INTR_STATE	(0x34)
 #define TIMER_INTR_MASK		(0x38)
 
-#define TIMER_1_CR_ENABLE	(1 << 0)
-#define TIMER_1_CR_CLOCK	(1 << 1)
-#define TIMER_1_CR_INT		(1 << 2)
-#define TIMER_2_CR_ENABLE	(1 << 3)
-#define TIMER_2_CR_CLOCK	(1 << 4)
-#define TIMER_2_CR_INT		(1 << 5)
-#define TIMER_3_CR_ENABLE	(1 << 6)
-#define TIMER_3_CR_CLOCK	(1 << 7)
-#define TIMER_3_CR_INT		(1 << 8)
-#define TIMER_1_CR_UPDOWN	(1 << 9)
-#define TIMER_2_CR_UPDOWN	(1 << 10)
-#define TIMER_3_CR_UPDOWN	(1 << 11)
-#define TIMER_DEFAULT_FLAGS	(TIMER_1_CR_UPDOWN | \
-				 TIMER_3_CR_ENABLE | \
-				 TIMER_3_CR_UPDOWN)
+#define TIMER_1_CR_ENABLE	BIT(0)
+#define TIMER_1_CR_CLOCK	BIT(1)
+#define TIMER_1_CR_INT		BIT(2)
+#define TIMER_2_CR_ENABLE	BIT(3)
+#define TIMER_2_CR_CLOCK	BIT(4)
+#define TIMER_2_CR_INT		BIT(5)
+#define TIMER_3_CR_ENABLE	BIT(6)
+#define TIMER_3_CR_CLOCK	BIT(7)
+#define TIMER_3_CR_INT		BIT(8)
+#define TIMER_1_CR_UPDOWN	BIT(9)
+#define TIMER_2_CR_UPDOWN	BIT(10)
+#define TIMER_3_CR_UPDOWN	BIT(11)
 
-#define TIMER_1_INT_MATCH1	(1 << 0)
-#define TIMER_1_INT_MATCH2	(1 << 1)
-#define TIMER_1_INT_OVERFLOW	(1 << 2)
-#define TIMER_2_INT_MATCH1	(1 << 3)
-#define TIMER_2_INT_MATCH2	(1 << 4)
-#define TIMER_2_INT_OVERFLOW	(1 << 5)
-#define TIMER_3_INT_MATCH1	(1 << 6)
-#define TIMER_3_INT_MATCH2	(1 << 7)
-#define TIMER_3_INT_OVERFLOW	(1 << 8)
+/*
+ * The Aspeed AST2400 moves bits around in the control register
+ * and lacks bits for setting the timer to count upwards.
+ */
+#define TIMER_1_CR_ASPEED_ENABLE	BIT(0)
+#define TIMER_1_CR_ASPEED_CLOCK		BIT(1)
+#define TIMER_1_CR_ASPEED_INT		BIT(2)
+#define TIMER_2_CR_ASPEED_ENABLE	BIT(4)
+#define TIMER_2_CR_ASPEED_CLOCK		BIT(5)
+#define TIMER_2_CR_ASPEED_INT		BIT(6)
+#define TIMER_3_CR_ASPEED_ENABLE	BIT(8)
+#define TIMER_3_CR_ASPEED_CLOCK		BIT(9)
+#define TIMER_3_CR_ASPEED_INT		BIT(10)
+
+#define TIMER_1_INT_MATCH1	BIT(0)
+#define TIMER_1_INT_MATCH2	BIT(1)
+#define TIMER_1_INT_OVERFLOW	BIT(2)
+#define TIMER_2_INT_MATCH1	BIT(3)
+#define TIMER_2_INT_MATCH2	BIT(4)
+#define TIMER_2_INT_OVERFLOW	BIT(5)
+#define TIMER_3_INT_MATCH1	BIT(6)
+#define TIMER_3_INT_MATCH2	BIT(7)
+#define TIMER_3_INT_OVERFLOW	BIT(8)
 #define TIMER_INT_ALL_MASK	0x1ff
 
-static unsigned int tick_rate;
-static void __iomem *base;
+struct fttmr010 {
+	void __iomem *base;
+	unsigned int tick_rate;
+	bool count_down;
+	u32 t1_enable_val;
+	struct clock_event_device clkevt;
+#ifdef CONFIG_ARM
+	struct delay_timer delay_timer;
+#endif
+};
 
-static u64 notrace fttmr010_read_sched_clock(void)
+/*
+ * A local singleton used by sched_clock and delay timer reads, which are
+ * fast and stateless
+ */
+static struct fttmr010 *local_fttmr;
+
+static inline struct fttmr010 *to_fttmr010(struct clock_event_device *evt)
 {
-	return readl(base + TIMER3_COUNT);
+	return container_of(evt, struct fttmr010, clkevt);
+}
+
+static unsigned long fttmr010_read_current_timer_up(void)
+{
+	return readl(local_fttmr->base + TIMER2_COUNT);
+}
+
+static unsigned long fttmr010_read_current_timer_down(void)
+{
+	return ~readl(local_fttmr->base + TIMER2_COUNT);
+}
+
+static u64 notrace fttmr010_read_sched_clock_up(void)
+{
+	return fttmr010_read_current_timer_up();
+}
+
+static u64 notrace fttmr010_read_sched_clock_down(void)
+{
+	return fttmr010_read_current_timer_down();
 }
 
 static int fttmr010_timer_set_next_event(unsigned long cycles,
 				       struct clock_event_device *evt)
 {
+	struct fttmr010 *fttmr010 = to_fttmr010(evt);
 	u32 cr;
 
-	/* Setup the match register */
-	cr = readl(base + TIMER1_COUNT);
-	writel(cr + cycles, base + TIMER1_MATCH1);
-	if (readl(base + TIMER1_COUNT) - cr > cycles)
-		return -ETIME;
+	/* Stop */
+	cr = readl(fttmr010->base + TIMER_CR);
+	cr &= ~fttmr010->t1_enable_val;
+	writel(cr, fttmr010->base + TIMER_CR);
+
+	/* Setup the match register forward/backward in time */
+	cr = readl(fttmr010->base + TIMER1_COUNT);
+	if (fttmr010->count_down)
+		cr -= cycles;
+	else
+		cr += cycles;
+	writel(cr, fttmr010->base + TIMER1_MATCH1);
+
+	/* Start */
+	cr = readl(fttmr010->base + TIMER_CR);
+	cr |= fttmr010->t1_enable_val;
+	writel(cr, fttmr010->base + TIMER_CR);
 
 	return 0;
 }
 
 static int fttmr010_timer_shutdown(struct clock_event_device *evt)
 {
+	struct fttmr010 *fttmr010 = to_fttmr010(evt);
 	u32 cr;
 
-	/*
-	 * Disable also for oneshot: the set_next() call will arm the timer
-	 * instead.
-	 */
-	/* Stop timer and interrupt. */
-	cr = readl(base + TIMER_CR);
-	cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
-	writel(cr, base + TIMER_CR);
+	/* Stop */
+	cr = readl(fttmr010->base + TIMER_CR);
+	cr &= ~fttmr010->t1_enable_val;
+	writel(cr, fttmr010->base + TIMER_CR);
 
-	/* Setup counter start from 0 */
-	writel(0, base + TIMER1_COUNT);
-	writel(0, base + TIMER1_LOAD);
+	return 0;
+}
 
-	/* enable interrupt */
-	cr = readl(base + TIMER_INTR_MASK);
+static int fttmr010_timer_set_oneshot(struct clock_event_device *evt)
+{
+	struct fttmr010 *fttmr010 = to_fttmr010(evt);
+	u32 cr;
+
+	/* Stop */
+	cr = readl(fttmr010->base + TIMER_CR);
+	cr &= ~fttmr010->t1_enable_val;
+	writel(cr, fttmr010->base + TIMER_CR);
+
+	/* Setup counter start from 0 or ~0 */
+	writel(0, fttmr010->base + TIMER1_COUNT);
+	if (fttmr010->count_down)
+		writel(~0, fttmr010->base + TIMER1_LOAD);
+	else
+		writel(0, fttmr010->base + TIMER1_LOAD);
+
+	/* Enable interrupt */
+	cr = readl(fttmr010->base + TIMER_INTR_MASK);
 	cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
 	cr |= TIMER_1_INT_MATCH1;
-	writel(cr, base + TIMER_INTR_MASK);
-
-	/* start the timer */
-	cr = readl(base + TIMER_CR);
-	cr |= TIMER_1_CR_ENABLE;
-	writel(cr, base + TIMER_CR);
+	writel(cr, fttmr010->base + TIMER_INTR_MASK);
 
 	return 0;
 }
 
 static int fttmr010_timer_set_periodic(struct clock_event_device *evt)
 {
-	u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ);
+	struct fttmr010 *fttmr010 = to_fttmr010(evt);
+	u32 period = DIV_ROUND_CLOSEST(fttmr010->tick_rate, HZ);
 	u32 cr;
 
-	/* Stop timer and interrupt */
-	cr = readl(base + TIMER_CR);
-	cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
-	writel(cr, base + TIMER_CR);
+	/* Stop */
+	cr = readl(fttmr010->base + TIMER_CR);
+	cr &= ~fttmr010->t1_enable_val;
+	writel(cr, fttmr010->base + TIMER_CR);
 
-	/* Setup timer to fire at 1/HT intervals. */
-	cr = 0xffffffff - (period - 1);
-	writel(cr, base + TIMER1_COUNT);
-	writel(cr, base + TIMER1_LOAD);
+	/* Setup timer to fire at 1/HZ intervals. */
+	if (fttmr010->count_down) {
+		writel(period, fttmr010->base + TIMER1_LOAD);
+		writel(0, fttmr010->base + TIMER1_MATCH1);
+	} else {
+		cr = 0xffffffff - (period - 1);
+		writel(cr, fttmr010->base + TIMER1_COUNT);
+		writel(cr, fttmr010->base + TIMER1_LOAD);
 
-	/* enable interrupt on overflow */
-	cr = readl(base + TIMER_INTR_MASK);
-	cr &= ~(TIMER_1_INT_MATCH1 | TIMER_1_INT_MATCH2);
-	cr |= TIMER_1_INT_OVERFLOW;
-	writel(cr, base + TIMER_INTR_MASK);
+		/* Enable interrupt on overflow */
+		cr = readl(fttmr010->base + TIMER_INTR_MASK);
+		cr &= ~(TIMER_1_INT_MATCH1 | TIMER_1_INT_MATCH2);
+		cr |= TIMER_1_INT_OVERFLOW;
+		writel(cr, fttmr010->base + TIMER_INTR_MASK);
+	}
 
 	/* Start the timer */
-	cr = readl(base + TIMER_CR);
-	cr |= TIMER_1_CR_ENABLE;
-	cr |= TIMER_1_CR_INT;
-	writel(cr, base + TIMER_CR);
+	cr = readl(fttmr010->base + TIMER_CR);
+	cr |= fttmr010->t1_enable_val;
+	writel(cr, fttmr010->base + TIMER_CR);
 
 	return 0;
 }
 
-/* Use TIMER1 as clock event */
-static struct clock_event_device fttmr010_clockevent = {
-	.name			= "TIMER1",
-	/* Reasonably fast and accurate clock event */
-	.rating			= 300,
-	.shift                  = 32,
-	.features		= CLOCK_EVT_FEAT_PERIODIC |
-				  CLOCK_EVT_FEAT_ONESHOT,
-	.set_next_event		= fttmr010_timer_set_next_event,
-	.set_state_shutdown	= fttmr010_timer_shutdown,
-	.set_state_periodic	= fttmr010_timer_set_periodic,
-	.set_state_oneshot	= fttmr010_timer_shutdown,
-	.tick_resume		= fttmr010_timer_shutdown,
-};
-
 /*
  * IRQ handler for the timer
  */
 static irqreturn_t fttmr010_timer_interrupt(int irq, void *dev_id)
 {
-	struct clock_event_device *evt = &fttmr010_clockevent;
+	struct clock_event_device *evt = dev_id;
 
 	evt->event_handler(evt);
 	return IRQ_HANDLED;
 }
 
-static struct irqaction fttmr010_timer_irq = {
-	.name		= "Faraday FTTMR010 Timer Tick",
-	.flags		= IRQF_TIMER,
-	.handler	= fttmr010_timer_interrupt,
-};
-
-static int __init fttmr010_timer_common_init(struct device_node *np)
+static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
 {
+	struct fttmr010 *fttmr010;
 	int irq;
+	struct clk *clk;
+	int ret;
+	u32 val;
 
-	base = of_iomap(np, 0);
-	if (!base) {
-		pr_err("Can't remap registers");
-		return -ENXIO;
-	}
-	/* IRQ for timer 1 */
-	irq = irq_of_parse_and_map(np, 0);
-	if (irq <= 0) {
-		pr_err("Can't parse IRQ");
-		return -EINVAL;
-	}
-
-	/*
-	 * Reset the interrupt mask and status
-	 */
-	writel(TIMER_INT_ALL_MASK, base + TIMER_INTR_MASK);
-	writel(0, base + TIMER_INTR_STATE);
-	writel(TIMER_DEFAULT_FLAGS, base + TIMER_CR);
-
-	/*
-	 * Setup free-running clocksource timer (interrupts
-	 * disabled.)
-	 */
-	writel(0, base + TIMER3_COUNT);
-	writel(0, base + TIMER3_LOAD);
-	writel(0, base + TIMER3_MATCH1);
-	writel(0, base + TIMER3_MATCH2);
-	clocksource_mmio_init(base + TIMER3_COUNT,
-			      "fttmr010_clocksource", tick_rate,
-			      300, 32, clocksource_mmio_readl_up);
-	sched_clock_register(fttmr010_read_sched_clock, 32, tick_rate);
-
-	/*
-	 * Setup clockevent timer (interrupt-driven.)
-	 */
-	writel(0, base + TIMER1_COUNT);
-	writel(0, base + TIMER1_LOAD);
-	writel(0, base + TIMER1_MATCH1);
-	writel(0, base + TIMER1_MATCH2);
-	setup_irq(irq, &fttmr010_timer_irq);
-	fttmr010_clockevent.cpumask = cpumask_of(0);
-	clockevents_config_and_register(&fttmr010_clockevent, tick_rate,
-					1, 0xffffffff);
-
-	return 0;
-}
-
-static int __init fttmr010_timer_of_init(struct device_node *np)
-{
 	/*
 	 * These implementations require a clock reference.
 	 * FIXME: we currently only support clocking using PCLK
 	 * and using EXTCLK is not supported in the driver.
 	 */
-	struct clk *clk;
-
 	clk = of_clk_get_by_name(np, "PCLK");
 	if (IS_ERR(clk)) {
-		pr_err("could not get PCLK");
+		pr_err("could not get PCLK\n");
 		return PTR_ERR(clk);
 	}
-	tick_rate = clk_get_rate(clk);
-
-	return fttmr010_timer_common_init(np);
-}
-CLOCKSOURCE_OF_DECLARE(fttmr010, "faraday,fttmr010", fttmr010_timer_of_init);
-
-/*
- * Gemini-specific: relevant registers in the global syscon
- */
-#define GLOBAL_STATUS		0x04
-#define CPU_AHB_RATIO_MASK	(0x3 << 18)
-#define CPU_AHB_1_1		(0x0 << 18)
-#define CPU_AHB_3_2		(0x1 << 18)
-#define CPU_AHB_24_13		(0x2 << 18)
-#define CPU_AHB_2_1		(0x3 << 18)
-#define REG_TO_AHB_SPEED(reg)	((((reg) >> 15) & 0x7) * 10 + 130)
-
-static int __init gemini_timer_of_init(struct device_node *np)
-{
-	static struct regmap *map;
-	int ret;
-	u32 val;
-
-	map = syscon_regmap_lookup_by_phandle(np, "syscon");
-	if (IS_ERR(map)) {
-		pr_err("Can't get regmap for syscon handle\n");
-		return -ENODEV;
-	}
-	ret = regmap_read(map, GLOBAL_STATUS, &val);
+	ret = clk_prepare_enable(clk);
 	if (ret) {
-		pr_err("Can't read syscon status register\n");
-		return -ENXIO;
+		pr_err("failed to enable PCLK\n");
+		return ret;
 	}
 
-	tick_rate = REG_TO_AHB_SPEED(val) * 1000000;
-	pr_info("Bus: %dMHz ", tick_rate / 1000000);
+	fttmr010 = kzalloc(sizeof(*fttmr010), GFP_KERNEL);
+	if (!fttmr010) {
+		ret = -ENOMEM;
+		goto out_disable_clock;
+	}
+	fttmr010->tick_rate = clk_get_rate(clk);
 
-	tick_rate /= 6;		/* APB bus run AHB*(1/6) */
-
-	switch (val & CPU_AHB_RATIO_MASK) {
-	case CPU_AHB_1_1:
-		pr_cont("(1/1)\n");
-		break;
-	case CPU_AHB_3_2:
-		pr_cont("(3/2)\n");
-		break;
-	case CPU_AHB_24_13:
-		pr_cont("(24/13)\n");
-		break;
-	case CPU_AHB_2_1:
-		pr_cont("(2/1)\n");
-		break;
+	fttmr010->base = of_iomap(np, 0);
+	if (!fttmr010->base) {
+		pr_err("Can't remap registers");
+		ret = -ENXIO;
+		goto out_free;
+	}
+	/* IRQ for timer 1 */
+	irq = irq_of_parse_and_map(np, 0);
+	if (irq <= 0) {
+		pr_err("Can't parse IRQ");
+		ret = -EINVAL;
+		goto out_unmap;
 	}
 
-	return fttmr010_timer_common_init(np);
+	/*
+	 * The Aspeed AST2400 moves bits around in the control register,
+	 * otherwise it works the same.
+	 */
+	if (is_aspeed) {
+		fttmr010->t1_enable_val = TIMER_1_CR_ASPEED_ENABLE |
+			TIMER_1_CR_ASPEED_INT;
+		/* Downward not available */
+		fttmr010->count_down = true;
+	} else {
+		fttmr010->t1_enable_val = TIMER_1_CR_ENABLE | TIMER_1_CR_INT;
+	}
+
+	/*
+	 * Reset the interrupt mask and status
+	 */
+	writel(TIMER_INT_ALL_MASK, fttmr010->base + TIMER_INTR_MASK);
+	writel(0, fttmr010->base + TIMER_INTR_STATE);
+
+	/*
+	 * Enable timer 1 count up, timer 2 count up, except on Aspeed,
+	 * where everything just counts down.
+	 */
+	if (is_aspeed)
+		val = TIMER_2_CR_ASPEED_ENABLE;
+	else {
+		val = TIMER_2_CR_ENABLE;
+		if (!fttmr010->count_down)
+			val |= TIMER_1_CR_UPDOWN | TIMER_2_CR_UPDOWN;
+	}
+	writel(val, fttmr010->base + TIMER_CR);
+
+	/*
+	 * Setup free-running clocksource timer (interrupts
+	 * disabled.)
+	 */
+	local_fttmr = fttmr010;
+	writel(0, fttmr010->base + TIMER2_COUNT);
+	writel(0, fttmr010->base + TIMER2_MATCH1);
+	writel(0, fttmr010->base + TIMER2_MATCH2);
+
+	if (fttmr010->count_down) {
+		writel(~0, fttmr010->base + TIMER2_LOAD);
+		clocksource_mmio_init(fttmr010->base + TIMER2_COUNT,
+				      "FTTMR010-TIMER2",
+				      fttmr010->tick_rate,
+				      300, 32, clocksource_mmio_readl_down);
+		sched_clock_register(fttmr010_read_sched_clock_down, 32,
+				     fttmr010->tick_rate);
+	} else {
+		writel(0, fttmr010->base + TIMER2_LOAD);
+		clocksource_mmio_init(fttmr010->base + TIMER2_COUNT,
+				      "FTTMR010-TIMER2",
+				      fttmr010->tick_rate,
+				      300, 32, clocksource_mmio_readl_up);
+		sched_clock_register(fttmr010_read_sched_clock_up, 32,
+				     fttmr010->tick_rate);
+	}
+
+	/*
+	 * Setup clockevent timer (interrupt-driven) on timer 1.
+	 */
+	writel(0, fttmr010->base + TIMER1_COUNT);
+	writel(0, fttmr010->base + TIMER1_LOAD);
+	writel(0, fttmr010->base + TIMER1_MATCH1);
+	writel(0, fttmr010->base + TIMER1_MATCH2);
+	ret = request_irq(irq, fttmr010_timer_interrupt, IRQF_TIMER,
+			  "FTTMR010-TIMER1", &fttmr010->clkevt);
+	if (ret) {
+		pr_err("FTTMR010-TIMER1 no IRQ\n");
+		goto out_unmap;
+	}
+
+	fttmr010->clkevt.name = "FTTMR010-TIMER1";
+	/* Reasonably fast and accurate clock event */
+	fttmr010->clkevt.rating = 300;
+	fttmr010->clkevt.features = CLOCK_EVT_FEAT_PERIODIC |
+		CLOCK_EVT_FEAT_ONESHOT;
+	fttmr010->clkevt.set_next_event = fttmr010_timer_set_next_event;
+	fttmr010->clkevt.set_state_shutdown = fttmr010_timer_shutdown;
+	fttmr010->clkevt.set_state_periodic = fttmr010_timer_set_periodic;
+	fttmr010->clkevt.set_state_oneshot = fttmr010_timer_set_oneshot;
+	fttmr010->clkevt.tick_resume = fttmr010_timer_shutdown;
+	fttmr010->clkevt.cpumask = cpumask_of(0);
+	fttmr010->clkevt.irq = irq;
+	clockevents_config_and_register(&fttmr010->clkevt,
+					fttmr010->tick_rate,
+					1, 0xffffffff);
+
+#ifdef CONFIG_ARM
+	/* Also use this timer for delays */
+	if (fttmr010->count_down)
+		fttmr010->delay_timer.read_current_timer =
+			fttmr010_read_current_timer_down;
+	else
+		fttmr010->delay_timer.read_current_timer =
+			fttmr010_read_current_timer_up;
+	fttmr010->delay_timer.freq = fttmr010->tick_rate;
+	register_current_timer_delay(&fttmr010->delay_timer);
+#endif
+
+	return 0;
+
+out_unmap:
+	iounmap(fttmr010->base);
+out_free:
+	kfree(fttmr010);
+out_disable_clock:
+	clk_disable_unprepare(clk);
+
+	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(gemini, "cortina,gemini-timer", gemini_timer_of_init);
+
+static __init int aspeed_timer_init(struct device_node *np)
+{
+	return fttmr010_common_init(np, true);
+}
+
+static __init int fttmr010_timer_init(struct device_node *np)
+{
+	return fttmr010_common_init(np, false);
+}
+
+TIMER_OF_DECLARE(fttmr010, "faraday,fttmr010", fttmr010_timer_init);
+TIMER_OF_DECLARE(gemini, "cortina,gemini-timer", fttmr010_timer_init);
+TIMER_OF_DECLARE(moxart, "moxa,moxart-timer", fttmr010_timer_init);
+TIMER_OF_DECLARE(ast2400, "aspeed,ast2400-timer", aspeed_timer_init);
+TIMER_OF_DECLARE(ast2500, "aspeed,ast2500-timer", aspeed_timer_init);
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index f595460..6ec6d79 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -545,15 +545,15 @@ static int __init imx6dl_timer_init_dt(struct device_node *np)
 	return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
 }
 
-CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt);
-CLOCKSOURCE_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt);
+TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
+TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
+TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
+TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
+TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
+TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
+TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt);
+TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt);
+TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt);
+TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt);
+TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt);
+TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt);
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 04ad306..2ff64d9 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -232,5 +232,5 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
+TIMER_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
 		       integrator_ap_timer_init_of);
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index ab68a47..0eee032 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -226,5 +226,5 @@ static int __init keystone_timer_init(struct device_node *np)
 	return error;
 }
 
-CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
+TIMER_OF_DECLARE(keystone_timer, "ti,keystone-timer",
 			   keystone_timer_init);
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index e74ea17..7b6bb0d 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -110,9 +110,9 @@ static int __init nps_setup_clocksource(struct device_node *node)
 	return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
+TIMER_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
 		       nps_setup_clocksource);
-CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1",
+TIMER_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1",
 		       nps_setup_clocksource);
 
 #ifdef CONFIG_EZNPS_MTM_EXT
@@ -279,6 +279,6 @@ static int __init nps_setup_clockevent(struct device_node *node)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0",
+TIMER_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0",
 		       nps_setup_clockevent);
 #endif /* CONFIG_EZNPS_MTM_EXT */
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
new file mode 100644
index 0000000..f6e7491
--- /dev/null
+++ b/drivers/clocksource/timer-of.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2017, Linaro Ltd.  All rights reserved.
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include "timer-of.h"
+
+static __init void timer_irq_exit(struct of_timer_irq *of_irq)
+{
+	struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
+
+	struct clock_event_device *clkevt = &to->clkevt;
+
+	of_irq->percpu ? free_percpu_irq(of_irq->irq, clkevt) :
+		free_irq(of_irq->irq, clkevt);
+}
+
+static __init int timer_irq_init(struct device_node *np,
+				 struct of_timer_irq *of_irq)
+{
+	int ret;
+	struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
+	struct clock_event_device *clkevt = &to->clkevt;
+
+	of_irq->irq = of_irq->name ? of_irq_get_byname(np, of_irq->name):
+		irq_of_parse_and_map(np, of_irq->index);
+	if (!of_irq->irq) {
+		pr_err("Failed to map interrupt for %s\n", np->full_name);
+		return -EINVAL;
+	}
+
+	ret = of_irq->percpu ?
+		request_percpu_irq(of_irq->irq, of_irq->handler,
+				   np->full_name, clkevt) :
+		request_irq(of_irq->irq, of_irq->handler,
+			    of_irq->flags ? of_irq->flags : IRQF_TIMER,
+			    np->full_name, clkevt);
+	if (ret) {
+		pr_err("Failed to request irq %d for %s\n", of_irq->irq,
+		       np->full_name);
+		return ret;
+	}
+
+	clkevt->irq = of_irq->irq;
+
+	return 0;
+}
+
+static __init void timer_clk_exit(struct of_timer_clk *of_clk)
+{
+	of_clk->rate = 0;
+	clk_disable_unprepare(of_clk->clk);
+	clk_put(of_clk->clk);
+}
+
+static __init int timer_clk_init(struct device_node *np,
+				 struct of_timer_clk *of_clk)
+{
+	int ret;
+
+	of_clk->clk = of_clk->name ? of_clk_get_by_name(np, of_clk->name) :
+		of_clk_get(np, of_clk->index);
+	if (IS_ERR(of_clk->clk)) {
+		pr_err("Failed to get clock for %s\n", np->full_name);
+		return PTR_ERR(of_clk->clk);
+	}
+
+	ret = clk_prepare_enable(of_clk->clk);
+	if (ret) {
+		pr_err("Failed for enable clock for %s\n", np->full_name);
+		goto out_clk_put;
+	}
+
+	of_clk->rate = clk_get_rate(of_clk->clk);
+	if (!of_clk->rate) {
+		ret = -EINVAL;
+		pr_err("Failed to get clock rate for %s\n", np->full_name);
+		goto out_clk_disable;
+	}
+
+	of_clk->period = DIV_ROUND_UP(of_clk->rate, HZ);
+out:
+	return ret;
+
+out_clk_disable:
+	clk_disable_unprepare(of_clk->clk);
+out_clk_put:
+	clk_put(of_clk->clk);
+
+	goto out;
+}
+
+static __init void timer_base_exit(struct of_timer_base *of_base)
+{
+	iounmap(of_base->base);
+}
+
+static __init int timer_base_init(struct device_node *np,
+				  struct of_timer_base *of_base)
+{
+	const char *name = of_base->name ? of_base->name : np->full_name;
+
+	of_base->base = of_io_request_and_map(np, of_base->index, name);
+	if (!of_base->base) {
+		pr_err("Failed to iomap (%s)\n", name);
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+int __init timer_of_init(struct device_node *np, struct timer_of *to)
+{
+	int ret = -EINVAL;
+	int flags = 0;
+
+	if (to->flags & TIMER_OF_BASE) {
+		ret = timer_base_init(np, &to->of_base);
+		if (ret)
+			goto out_fail;
+		flags |= TIMER_OF_BASE;
+	}
+
+	if (to->flags & TIMER_OF_CLOCK) {
+		ret = timer_clk_init(np, &to->of_clk);
+		if (ret)
+			goto out_fail;
+		flags |= TIMER_OF_CLOCK;
+	}
+
+	if (to->flags & TIMER_OF_IRQ) {
+		ret = timer_irq_init(np, &to->of_irq);
+		if (ret)
+			goto out_fail;
+		flags |= TIMER_OF_IRQ;
+	}
+
+	if (!to->clkevt.name)
+		to->clkevt.name = np->name;
+	return ret;
+
+out_fail:
+	if (flags & TIMER_OF_IRQ)
+		timer_irq_exit(&to->of_irq);
+
+	if (flags & TIMER_OF_CLOCK)
+		timer_clk_exit(&to->of_clk);
+
+	if (flags & TIMER_OF_BASE)
+		timer_base_exit(&to->of_base);
+	return ret;
+}
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
new file mode 100644
index 0000000..e0d7272
--- /dev/null
+++ b/drivers/clocksource/timer-of.h
@@ -0,0 +1,69 @@
+#ifndef __TIMER_OF_H__
+#define __TIMER_OF_H__
+
+#include <linux/clockchips.h>
+
+#define TIMER_OF_BASE	0x1
+#define TIMER_OF_CLOCK	0x2
+#define TIMER_OF_IRQ	0x4
+
+struct of_timer_irq {
+	int irq;
+	int index;
+	int percpu;
+	const char *name;
+	unsigned long flags;
+	irq_handler_t handler;
+};
+
+struct of_timer_base {
+	void __iomem *base;
+	const char *name;
+	int index;
+};
+
+struct of_timer_clk {
+	struct clk *clk;
+	const char *name;
+	int index;
+	unsigned long rate;
+	unsigned long period;
+};
+
+struct timer_of {
+	unsigned int flags;
+	struct clock_event_device clkevt;
+	struct of_timer_base of_base;
+	struct of_timer_irq  of_irq;
+	struct of_timer_clk  of_clk;
+	void *private_data;
+};
+
+static inline struct timer_of *to_timer_of(struct clock_event_device *clkevt)
+{
+	return container_of(clkevt, struct timer_of, clkevt);
+}
+
+static inline void __iomem *timer_of_base(struct timer_of *to)
+{
+	return to->of_base.base;
+}
+
+static inline int timer_of_irq(struct timer_of *to)
+{
+	return to->of_irq.irq;
+}
+
+static inline unsigned long timer_of_rate(struct timer_of *to)
+{
+	return to->of_clk.rate;
+}
+
+static inline unsigned long timer_of_period(struct timer_of *to)
+{
+	return to->of_clk.period;
+}
+
+extern int __init timer_of_init(struct device_node *np,
+				struct timer_of *to);
+#endif
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
index d630bf4..eed6fef 100644
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -293,7 +293,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
 	return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(ox810se_rps,
+TIMER_OF_DECLARE(ox810se_rps,
 		       "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
-CLOCKSOURCE_OF_DECLARE(ox820_rps,
+TIMER_OF_DECLARE(ox820_rps,
 		       "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index b4122ed..20ff33b 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -245,5 +245,5 @@ static int __init sirfsoc_prima2_timer_init(struct device_node *np)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer,
+TIMER_OF_DECLARE(sirfsoc_prima2_timer,
 	"sirf,prima2-tick", sirfsoc_prima2_timer_init);
diff --git a/drivers/clocksource/clksrc-probe.c b/drivers/clocksource/timer-probe.c
similarity index 72%
rename from drivers/clocksource/clksrc-probe.c
rename to drivers/clocksource/timer-probe.c
index ac701ff..da81e5d 100644
--- a/drivers/clocksource/clksrc-probe.c
+++ b/drivers/clocksource/timer-probe.c
@@ -19,20 +19,20 @@
 #include <linux/of.h>
 #include <linux/clocksource.h>
 
-extern struct of_device_id __clksrc_of_table[];
+extern struct of_device_id __timer_of_table[];
 
-static const struct of_device_id __clksrc_of_table_sentinel
-	__used __section(__clksrc_of_table_end);
+static const struct of_device_id __timer_of_table_sentinel
+	__used __section(__timer_of_table_end);
 
-void __init clocksource_probe(void)
+void __init timer_probe(void)
 {
 	struct device_node *np;
 	const struct of_device_id *match;
 	of_init_fn_1_ret init_func_ret;
-	unsigned clocksources = 0;
+	unsigned timers = 0;
 	int ret;
 
-	for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
+	for_each_matching_node_and_match(np, __timer_of_table, &match) {
 		if (!of_device_is_available(np))
 			continue;
 
@@ -45,11 +45,11 @@ void __init clocksource_probe(void)
 			continue;
 		}
 
-		clocksources++;
+		timers++;
 	}
 
-	clocksources += acpi_probe_device_table(clksrc);
+	timers += acpi_probe_device_table(timer);
 
-	if (!clocksources)
-		pr_crit("%s: no matching clocksources found\n", __func__);
+	if (!timers)
+		pr_crit("%s: no matching timers found\n", __func__);
 }
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 2d575a8c..3ac9dec 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -287,7 +287,7 @@ static int __init sp804_of_init(struct device_node *np)
 	iounmap(base);
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
+TIMER_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
 
 static int __init integrator_cp_of_init(struct device_node *np)
 {
@@ -335,4 +335,4 @@ static int __init integrator_cp_of_init(struct device_node *np)
 	iounmap(base);
 	return ret;
 }
-CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
+TIMER_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index 1b2574c..174d1243 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -187,4 +187,4 @@ static int __init stm32_clockevent_init(struct device_node *np)
 	return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
+TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 2e9c830..2a3fe83 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -12,6 +12,7 @@
 
 #include <linux/clk.h>
 #include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
@@ -358,7 +359,7 @@ static int __init sun5i_timer_init(struct device_node *node)
 
 	return sun5i_setup_clockevent(node, timer_base, clk, irq);
 }
-CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
+TIMER_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
 			   sun5i_timer_init);
-CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
+TIMER_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
 			   sun5i_timer_init);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 6240677..880a861 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -124,5 +124,5 @@ static int __init ti_32k_timer_init(struct device_node *np)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k",
+TIMER_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k",
 		ti_32k_timer_init);
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
index 704e40c..be34b11 100644
--- a/drivers/clocksource/timer-u300.c
+++ b/drivers/clocksource/timer-u300.c
@@ -458,5 +458,5 @@ static int __init u300_timer_init_of(struct device_node *np)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
+TIMER_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
 		       u300_timer_init_of);
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c
index 220b490..39725d3 100644
--- a/drivers/clocksource/versatile.c
+++ b/drivers/clocksource/versatile.c
@@ -38,7 +38,7 @@ static int __init versatile_sched_clock_init(struct device_node *node)
 
 	return 0;
 }
-CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
+TIMER_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
 		       versatile_sched_clock_init);
-CLOCKSOURCE_OF_DECLARE(versatile, "arm,versatile-sysreg",
+TIMER_OF_DECLARE(versatile, "arm,versatile-sysreg",
 		       versatile_sched_clock_init);
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index e0849e2..0f92089 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -201,4 +201,4 @@ static int __init pit_timer_init(struct device_node *np)
 
 	return pit_clockevent_init(clk_rate, irq);
 }
-CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
+TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index d02b510..e0f7489 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -165,4 +165,4 @@ static int __init vt8500_timer_init(struct device_node *np)
 	return 0;
 }
 
-CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
+TIMER_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index 9a53f5e..a6a0338 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -215,4 +215,4 @@ static int __init zevio_timer_init(struct device_node *node)
 	return zevio_timer_add(node);
 }
 
-CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
+TIMER_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index e82bb3c..10be285 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -144,10 +144,23 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
 	cppc_dmi_max_khz = cppc_get_dmi_max_khz();
 
-	policy->min = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz / cpu->perf_caps.highest_perf;
+	/*
+	 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
+	 * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
+	 */
+	policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz /
+		cpu->perf_caps.highest_perf;
 	policy->max = cppc_dmi_max_khz;
-	policy->cpuinfo.min_freq = policy->min;
-	policy->cpuinfo.max_freq = policy->max;
+
+	/*
+	 * Set cpuinfo.min_freq to Lowest to make the full range of performance
+	 * available if userspace wants to use any perf between lowest & lowest
+	 * nonlinear perf
+	 */
+	policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz /
+		cpu->perf_caps.highest_perf;
+	policy->cpuinfo.max_freq = cppc_dmi_max_khz;
+
 	policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
 	policy->shared_type = cpu->shared_type;
 
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 921b4a6..1c26292 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -31,6 +31,7 @@ static const struct of_device_id machines[] __initconst = {
 	{ .compatible = "arm,integrator-ap", },
 	{ .compatible = "arm,integrator-cp", },
 
+	{ .compatible = "hisilicon,hi3660", },
 	{ .compatible = "hisilicon,hi6220", },
 
 	{ .compatible = "fsl,imx27", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0e3f649..9bf97a3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -632,11 +632,21 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
 show_one(scaling_min_freq, min);
 show_one(scaling_max_freq, max);
 
+__weak unsigned int arch_freq_get_on_cpu(int cpu)
+{
+	return 0;
+}
+
 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
 {
 	ssize_t ret;
+	unsigned int freq;
 
-	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
+	freq = arch_freq_get_on_cpu(policy->cpu);
+	if (freq)
+		ret = sprintf(buf, "%u\n", freq);
+	else if (cpufreq_driver && cpufreq_driver->setpolicy &&
+			cpufreq_driver->get)
 		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
 	else
 		ret = sprintf(buf, "%u\n", policy->cur);
@@ -887,7 +897,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
 	struct freq_attr *fattr = to_attr(attr);
 	ssize_t ret = -EINVAL;
 
-	get_online_cpus();
+	cpus_read_lock();
 
 	if (cpu_online(policy->cpu)) {
 		down_write(&policy->rwsem);
@@ -895,7 +905,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
 		up_write(&policy->rwsem);
 	}
 
-	put_online_cpus();
+	cpus_read_unlock();
 
 	return ret;
 }
@@ -2441,7 +2451,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 	pr_debug("trying to register driver %s\n", driver_data->name);
 
 	/* Protect against concurrent CPU online/offline. */
-	get_online_cpus();
+	cpus_read_lock();
 
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	if (cpufreq_driver) {
@@ -2468,14 +2478,16 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 	if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
 	    list_empty(&cpufreq_policy_list)) {
 		/* if all ->init() calls failed, unregister */
+		ret = -ENODEV;
 		pr_debug("%s: No CPU initialized for driver %s\n", __func__,
 			 driver_data->name);
 		goto err_if_unreg;
 	}
 
-	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
-					cpuhp_cpufreq_online,
-					cpuhp_cpufreq_offline);
+	ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
+						   "cpufreq:online",
+						   cpuhp_cpufreq_online,
+						   cpuhp_cpufreq_offline);
 	if (ret < 0)
 		goto err_if_unreg;
 	hp_online = ret;
@@ -2493,7 +2505,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 	cpufreq_driver = NULL;
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 out:
-	put_online_cpus();
+	cpus_read_unlock();
 	return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
@@ -2516,17 +2528,17 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
 	pr_debug("unregistering driver %s\n", driver->name);
 
 	/* Protect against concurrent cpu hotplug */
-	get_online_cpus();
+	cpus_read_lock();
 	subsys_interface_unregister(&cpufreq_interface);
 	remove_boost_sysfs_file();
-	cpuhp_remove_state_nocalls(hp_online);
+	cpuhp_remove_state_nocalls_cpuslocked(hp_online);
 
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
 
 	cpufreq_driver = NULL;
 
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-	put_online_cpus();
+	cpus_read_unlock();
 
 	return 0;
 }
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 992f7c2..88220ff 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -185,8 +185,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 
-	/* cannot be lower than 11 otherwise freq will not fall */
-	if (ret != 1 || input < 11 || input > 100 ||
+	/* cannot be lower than 1 otherwise freq will not fall */
+	if (ret != 1 || input < 1 || input > 100 ||
 			input >= dbs_data->up_threshold)
 		return -EINVAL;
 
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 9180d34..b6b369c 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -173,12 +173,12 @@ static void exynos_enable_dvfs(unsigned int cur_frequency)
 	/* Enable PSTATE Change Event */
 	tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
 	tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
-	 __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
+	__raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
 
 	/* Enable PSTATE Change IRQ */
 	tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
 	tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
-	 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
+	__raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
 
 	/* Set initial performance index */
 	cpufreq_for_each_entry(pos, freq_table)
@@ -330,7 +330,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
 	struct resource res;
 	unsigned int cur_frequency;
 
-	np =  pdev->dev.of_node;
+	np = pdev->dev.of_node;
 	if (!np)
 		return -ENODEV;
 
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 9c13f09..b6edd3c 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -101,7 +101,8 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
 	 *  - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
 	 *  - Disable pll2_pfd2_396m_clk
 	 */
-	if (of_machine_is_compatible("fsl,imx6ul")) {
+	if (of_machine_is_compatible("fsl,imx6ul") ||
+	    of_machine_is_compatible("fsl,imx6ull")) {
 		/*
 		 * When changing pll1_sw_clk's parent to pll1_sys_clk,
 		 * CPU may run at higher than 528MHz, this will lead to
@@ -215,7 +216,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
 		goto put_clk;
 	}
 
-	if (of_machine_is_compatible("fsl,imx6ul")) {
+	if (of_machine_is_compatible("fsl,imx6ul") ||
+	    of_machine_is_compatible("fsl,imx6ull")) {
 		pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
 		secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
 		if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7de5bd..48a98f1 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -231,10 +231,8 @@ struct global_params {
  * @prev_cummulative_iowait: IO Wait time difference from last and
  *			current sample
  * @sample:		Storage for storing last Sample data
- * @min_perf:		Minimum capacity limit as a fraction of the maximum
- *			turbo P-state capacity.
- * @max_perf:		Maximum capacity limit as a fraction of the maximum
- *			turbo P-state capacity.
+ * @min_perf_ratio:	Minimum capacity in terms of PERF or HWP ratios
+ * @max_perf_ratio:	Maximum capacity in terms of PERF or HWP ratios
  * @acpi_perf_data:	Stores ACPI perf information read from _PSS
  * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
  * @epp_powersave:	Last saved HWP energy performance preference
@@ -266,8 +264,8 @@ struct cpudata {
 	u64	prev_tsc;
 	u64	prev_cummulative_iowait;
 	struct sample sample;
-	int32_t	min_perf;
-	int32_t	max_perf;
+	int32_t	min_perf_ratio;
+	int32_t	max_perf_ratio;
 #ifdef CONFIG_ACPI
 	struct acpi_processor_performance acpi_perf_data;
 	bool valid_pss_table;
@@ -571,9 +569,10 @@ static inline void update_turbo_state(void)
 static int min_perf_pct_min(void)
 {
 	struct cpudata *cpu = all_cpu_data[0];
+	int turbo_pstate = cpu->pstate.turbo_pstate;
 
-	return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
-			    cpu->pstate.turbo_pstate);
+	return turbo_pstate ?
+		DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
 }
 
 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
@@ -652,6 +651,12 @@ static const char * const energy_perf_strings[] = {
 	"power",
 	NULL
 };
+static const unsigned int epp_values[] = {
+	HWP_EPP_PERFORMANCE,
+	HWP_EPP_BALANCE_PERFORMANCE,
+	HWP_EPP_BALANCE_POWERSAVE,
+	HWP_EPP_POWERSAVE
+};
 
 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
 {
@@ -663,17 +668,14 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
 		return epp;
 
 	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
-		/*
-		 * Range:
-		 *	0x00-0x3F	:	Performance
-		 *	0x40-0x7F	:	Balance performance
-		 *	0x80-0xBF	:	Balance power
-		 *	0xC0-0xFF	:	Power
-		 * The EPP is a 8 bit value, but our ranges restrict the
-		 * value which can be set. Here only using top two bits
-		 * effectively.
-		 */
-		index = (epp >> 6) + 1;
+		if (epp == HWP_EPP_PERFORMANCE)
+			return 1;
+		if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
+			return 2;
+		if (epp <= HWP_EPP_BALANCE_POWERSAVE)
+			return 3;
+		else
+			return 4;
 	} else if (static_cpu_has(X86_FEATURE_EPB)) {
 		/*
 		 * Range:
@@ -711,15 +713,8 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
 
 		value &= ~GENMASK_ULL(31, 24);
 
-		/*
-		 * If epp is not default, convert from index into
-		 * energy_perf_strings to epp value, by shifting 6
-		 * bits left to use only top two bits in epp.
-		 * The resultant epp need to shifted by 24 bits to
-		 * epp position in MSR_HWP_REQUEST.
-		 */
 		if (epp == -EINVAL)
-			epp = (pref_index - 1) << 6;
+			epp = epp_values[pref_index - 1];
 
 		value |= (u64)epp << 24;
 		ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
@@ -793,25 +788,32 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
 	NULL,
 };
 
+static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
+				     int *current_max)
+{
+	u64 cap;
+
+	rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+	if (global.no_turbo)
+		*current_max = HWP_GUARANTEED_PERF(cap);
+	else
+		*current_max = HWP_HIGHEST_PERF(cap);
+
+	*phy_max = HWP_HIGHEST_PERF(cap);
+}
+
 static void intel_pstate_hwp_set(unsigned int cpu)
 {
 	struct cpudata *cpu_data = all_cpu_data[cpu];
-	int min, hw_min, max, hw_max;
-	u64 value, cap;
+	int max, min;
+	u64 value;
 	s16 epp;
 
-	rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
-	hw_min = HWP_LOWEST_PERF(cap);
-	if (global.no_turbo)
-		hw_max = HWP_GUARANTEED_PERF(cap);
-	else
-		hw_max = HWP_HIGHEST_PERF(cap);
+	max = cpu_data->max_perf_ratio;
+	min = cpu_data->min_perf_ratio;
 
-	max = fp_ext_toint(hw_max * cpu_data->max_perf);
 	if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
 		min = max;
-	else
-		min = fp_ext_toint(hw_max * cpu_data->min_perf);
 
 	rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
 
@@ -1527,8 +1529,7 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
 
 	update_turbo_state();
 	pstate = intel_pstate_get_base_pstate(cpu);
-	pstate = max(cpu->pstate.min_pstate,
-		     fp_ext_toint(pstate * cpu->max_perf));
+	pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
 	intel_pstate_set_pstate(cpu, pstate);
 }
 
@@ -1615,9 +1616,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
 	int32_t busy_frac, boost;
 	int target, avg_pstate;
 
-	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
-		return cpu->pstate.turbo_pstate;
-
 	busy_frac = div_fp(sample->mperf, sample->tsc);
 
 	boost = cpu->iowait_boost;
@@ -1654,9 +1652,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
 	int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
 	u64 duration_ns;
 
-	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
-		return cpu->pstate.turbo_pstate;
-
 	/*
 	 * perf_scaled is the ratio of the average P-state during the last
 	 * sampling period to the P-state requested last time (in percent).
@@ -1694,9 +1689,8 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
 	int max_pstate = intel_pstate_get_base_pstate(cpu);
 	int min_pstate;
 
-	min_pstate = max(cpu->pstate.min_pstate,
-			 fp_ext_toint(max_pstate * cpu->min_perf));
-	max_pstate = max(min_pstate, fp_ext_toint(max_pstate * cpu->max_perf));
+	min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
+	max_pstate = max(min_pstate, cpu->max_perf_ratio);
 	return clamp_t(int, pstate, min_pstate, max_pstate);
 }
 
@@ -1732,16 +1726,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
 		fp_toint(cpu->iowait_boost * 100));
 }
 
-static void intel_pstate_update_util_hwp(struct update_util_data *data,
-					 u64 time, unsigned int flags)
-{
-	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
-	u64 delta_ns = time - cpu->sample.time;
-
-	if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL)
-		intel_pstate_sample(cpu, time);
-}
-
 static void intel_pstate_update_util_pid(struct update_util_data *data,
 					 u64 time, unsigned int flags)
 {
@@ -1933,6 +1917,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
 {
 	struct cpudata *cpu = all_cpu_data[cpu_num];
 
+	if (hwp_active)
+		return;
+
 	if (cpu->update_util_set)
 		return;
 
@@ -1966,52 +1953,61 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
 {
 	int max_freq = intel_pstate_get_max_freq(cpu);
 	int32_t max_policy_perf, min_policy_perf;
+	int max_state, turbo_max;
 
-	max_policy_perf = div_ext_fp(policy->max, max_freq);
-	max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
+	/*
+	 * HWP needs some special consideration, because on BDX the
+	 * HWP_REQUEST uses abstract value to represent performance
+	 * rather than pure ratios.
+	 */
+	if (hwp_active) {
+		intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+	} else {
+		max_state = intel_pstate_get_base_pstate(cpu);
+		turbo_max = cpu->pstate.turbo_pstate;
+	}
+
+	max_policy_perf = max_state * policy->max / max_freq;
 	if (policy->max == policy->min) {
 		min_policy_perf = max_policy_perf;
 	} else {
-		min_policy_perf = div_ext_fp(policy->min, max_freq);
+		min_policy_perf = max_state * policy->min / max_freq;
 		min_policy_perf = clamp_t(int32_t, min_policy_perf,
 					  0, max_policy_perf);
 	}
 
+	pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
+		 policy->cpu, max_state,
+		 min_policy_perf, max_policy_perf);
+
 	/* Normalize user input to [min_perf, max_perf] */
 	if (per_cpu_limits) {
-		cpu->min_perf = min_policy_perf;
-		cpu->max_perf = max_policy_perf;
+		cpu->min_perf_ratio = min_policy_perf;
+		cpu->max_perf_ratio = max_policy_perf;
 	} else {
 		int32_t global_min, global_max;
 
 		/* Global limits are in percent of the maximum turbo P-state. */
-		global_max = percent_ext_fp(global.max_perf_pct);
-		global_min = percent_ext_fp(global.min_perf_pct);
-		if (max_freq != cpu->pstate.turbo_freq) {
-			int32_t turbo_factor;
-
-			turbo_factor = div_ext_fp(cpu->pstate.turbo_pstate,
-						  cpu->pstate.max_pstate);
-			global_min = mul_ext_fp(global_min, turbo_factor);
-			global_max = mul_ext_fp(global_max, turbo_factor);
-		}
+		global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
+		global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
 		global_min = clamp_t(int32_t, global_min, 0, global_max);
 
-		cpu->min_perf = max(min_policy_perf, global_min);
-		cpu->min_perf = min(cpu->min_perf, max_policy_perf);
-		cpu->max_perf = min(max_policy_perf, global_max);
-		cpu->max_perf = max(min_policy_perf, cpu->max_perf);
+		pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
+			 global_min, global_max);
+
+		cpu->min_perf_ratio = max(min_policy_perf, global_min);
+		cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
+		cpu->max_perf_ratio = min(max_policy_perf, global_max);
+		cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
 
 		/* Make sure min_perf <= max_perf */
-		cpu->min_perf = min(cpu->min_perf, cpu->max_perf);
+		cpu->min_perf_ratio = min(cpu->min_perf_ratio,
+					  cpu->max_perf_ratio);
+
 	}
-
-	cpu->max_perf = round_up(cpu->max_perf, EXT_FRAC_BITS);
-	cpu->min_perf = round_up(cpu->min_perf, EXT_FRAC_BITS);
-
-	pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
-		 fp_ext_toint(cpu->max_perf * 100),
-		 fp_ext_toint(cpu->min_perf * 100));
+	pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
+		 cpu->max_perf_ratio,
+		 cpu->min_perf_ratio);
 }
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
@@ -2038,10 +2034,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 		 */
 		intel_pstate_clear_update_util_hook(policy->cpu);
 		intel_pstate_max_within_limits(cpu);
+	} else {
+		intel_pstate_set_update_util_hook(policy->cpu);
 	}
 
-	intel_pstate_set_update_util_hook(policy->cpu);
-
 	if (hwp_active)
 		intel_pstate_hwp_set(policy->cpu);
 
@@ -2114,8 +2110,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
 	cpu = all_cpu_data[policy->cpu];
 
-	cpu->max_perf = int_ext_tofp(1);
-	cpu->min_perf = 0;
+	cpu->max_perf_ratio = 0xFF;
+	cpu->min_perf_ratio = 0;
 
 	policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
 	policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2557,7 +2553,6 @@ static int __init intel_pstate_init(void)
 		} else {
 			hwp_active++;
 			intel_pstate.attr = hwp_cpufreq_attrs;
-			pstate_funcs.update_util = intel_pstate_update_util_hwp;
 			goto hwp_cpu_matched;
 		}
 	} else {
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 1b9bcd7..c2dd43f 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -127,7 +127,12 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
 		return PTR_ERR(priv.cpu_clk);
 	}
 
-	clk_prepare_enable(priv.cpu_clk);
+	err = clk_prepare_enable(priv.cpu_clk);
+	if (err) {
+		dev_err(priv.dev, "Unable to prepare cpuclk\n");
+		return err;
+	}
+
 	kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
 
 	priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
@@ -137,7 +142,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
 		goto out_cpu;
 	}
 
-	clk_prepare_enable(priv.ddr_clk);
+	err = clk_prepare_enable(priv.ddr_clk);
+	if (err) {
+		dev_err(priv.dev, "Unable to prepare ddrclk\n");
+		goto out_cpu;
+	}
 	kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
 
 	priv.powersave_clk = of_clk_get_by_name(np, "powersave");
@@ -146,7 +155,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
 		err = PTR_ERR(priv.powersave_clk);
 		goto out_ddr;
 	}
-	clk_prepare_enable(priv.powersave_clk);
+	err = clk_prepare_enable(priv.powersave_clk);
+	if (err) {
+		dev_err(priv.dev, "Unable to prepare powersave clk\n");
+		goto out_ddr;
+	}
 
 	of_node_put(np);
 	np = NULL;
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 35dd4d7..b257fc7 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -226,7 +226,7 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 	 * We don't support CPU hotplug. Don't unmap after the system
 	 * has already made it to a running state.
 	 */
-	if (system_state != SYSTEM_BOOTING)
+	if (system_state >= SYSTEM_RUNNING)
 		return 0;
 
 	if (sdcasr_mapbase)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index ea7a4e1..8de2364 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -30,46 +30,20 @@
 
 static struct scpi_ops *scpi_ops;
 
-static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
-{
-	int domain = topology_physical_package_id(cpu_dev->id);
-
-	if (domain < 0)
-		return ERR_PTR(-EINVAL);
-	return scpi_ops->dvfs_get_info(domain);
-}
-
 static int scpi_get_transition_latency(struct device *cpu_dev)
 {
-	struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
-
-	if (IS_ERR(info))
-		return PTR_ERR(info);
-	return info->latency;
+	return scpi_ops->get_transition_latency(cpu_dev);
 }
 
 static int scpi_init_opp_table(const struct cpumask *cpumask)
 {
-	int idx, ret;
-	struct scpi_opp *opp;
+	int ret;
 	struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
-	struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
 
-	if (IS_ERR(info))
-		return PTR_ERR(info);
-
-	if (!info->opps)
-		return -EIO;
-
-	for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
-		ret = dev_pm_opp_add(cpu_dev, opp->freq, opp->m_volt * 1000);
-		if (ret) {
-			dev_warn(cpu_dev, "failed to add opp %uHz %umV\n",
-				 opp->freq, opp->m_volt);
-			while (idx-- > 0)
-				dev_pm_opp_remove(cpu_dev, (--opp)->freq);
-			return ret;
-		}
+	ret = scpi_ops->add_opps_to_device(cpu_dev);
+	if (ret) {
+		dev_warn(cpu_dev, "failed to add opps to the device\n");
+		return ret;
 	}
 
 	ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
index 992ce6f..3779742 100644
--- a/drivers/cpufreq/sfi-cpufreq.c
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -24,7 +24,7 @@
 
 #include <asm/msr.h>
 
-struct cpufreq_frequency_table *freq_table;
+static struct cpufreq_frequency_table *freq_table;
 static struct sfi_freq_table_entry *sfi_cpufreq_array;
 static int num_freq_table_entries;
 
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 21340e0..f521448 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -4,6 +4,7 @@
 config ARM_CPUIDLE
         bool "Generic ARM/ARM64 CPU idle Driver"
         select DT_IDLE_STATES
+	select CPU_IDLE_MULTIPLE_DRIVERS
         help
           Select this to enable generic cpuidle driver for ARM.
           It provides a generic idle driver whose idle states are configured
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index f440d38..7080c38 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/slab.h>
+#include <linux/topology.h>
 
 #include <asm/cpuidle.h>
 
@@ -44,7 +45,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
 	return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
 }
 
-static struct cpuidle_driver arm_idle_driver = {
+static struct cpuidle_driver arm_idle_driver __initdata = {
 	.name = "arm_idle",
 	.owner = THIS_MODULE,
 	/*
@@ -80,30 +81,42 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
 static int __init arm_idle_init(void)
 {
 	int cpu, ret;
-	struct cpuidle_driver *drv = &arm_idle_driver;
+	struct cpuidle_driver *drv;
 	struct cpuidle_device *dev;
 
-	/*
-	 * Initialize idle states data, starting at index 1.
-	 * This driver is DT only, if no DT idle states are detected (ret == 0)
-	 * let the driver initialization fail accordingly since there is no
-	 * reason to initialize the idle driver if only wfi is supported.
-	 */
-	ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
-	if (ret <= 0)
-		return ret ? : -ENODEV;
-
-	ret = cpuidle_register_driver(drv);
-	if (ret) {
-		pr_err("Failed to register cpuidle driver\n");
-		return ret;
-	}
-
-	/*
-	 * Call arch CPU operations in order to initialize
-	 * idle states suspend back-end specific data
-	 */
 	for_each_possible_cpu(cpu) {
+
+		drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
+		if (!drv) {
+			ret = -ENOMEM;
+			goto out_fail;
+		}
+
+		drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+		/*
+		 * Initialize idle states data, starting at index 1.  This
+		 * driver is DT only, if no DT idle states are detected (ret
+		 * == 0) let the driver initialization fail accordingly since
+		 * there is no reason to initialize the idle driver if only
+		 * wfi is supported.
+		 */
+		ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
+		if (ret <= 0) {
+			ret = ret ? : -ENODEV;
+			goto out_fail;
+		}
+
+		ret = cpuidle_register_driver(drv);
+		if (ret) {
+			pr_err("Failed to register cpuidle driver\n");
+			goto out_fail;
+		}
+
+		/*
+		 * Call arch CPU operations in order to initialize
+		 * idle states suspend back-end specific data
+		 */
 		ret = arm_cpuidle_init(cpu);
 
 		/*
@@ -141,10 +154,11 @@ static int __init arm_idle_init(void)
 		dev = per_cpu(cpuidle_devices, cpu);
 		cpuidle_unregister_device(dev);
 		kfree(dev);
+		drv = cpuidle_get_driver();
+		cpuidle_unregister_driver(drv);
+		kfree(drv);
 	}
 
-	cpuidle_unregister_driver(drv);
-
 	return ret;
 }
 device_initcall(arm_idle_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2706be7..60bb64f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -220,6 +220,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
 	entered_state = target_state->enter(dev, drv, index);
 	start_critical_timings();
 
+	sched_clock_idle_wakeup_event();
 	time_end = ns_to_ktime(local_clock());
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index ffca4fc..ae8eb03 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -180,8 +180,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
 		if (!state_node)
 			break;
 
-		if (!of_device_is_available(state_node))
+		if (!of_device_is_available(state_node)) {
+			of_node_put(state_node);
 			continue;
+		}
 
 		if (!idle_state_valid(state_node, i, cpumask)) {
 			pr_warn("%s idle state not valid, bailing out\n",
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b2330fd..61b64c2 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -286,6 +286,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 	struct device *device = get_cpu_device(dev->cpu);
 	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
 	int i;
+	int first_idx;
+	int idx;
 	unsigned int interactivity_req;
 	unsigned int expected_interval;
 	unsigned long nr_iowaiters, cpu_load;
@@ -335,11 +337,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 		if (data->next_timer_us > polling_threshold &&
 		    latency_req > s->exit_latency && !s->disabled &&
 		    !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
-			data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+			first_idx = CPUIDLE_DRIVER_STATE_START;
 		else
-			data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
+			first_idx = CPUIDLE_DRIVER_STATE_START - 1;
 	} else {
-		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+		first_idx = 0;
 	}
 
 	/*
@@ -359,20 +361,28 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 	 * Find the idle state with the lowest power while satisfying
 	 * our constraints.
 	 */
-	for (i = data->last_state_idx + 1; i < drv->state_count; i++) {
+	idx = -1;
+	for (i = first_idx; i < drv->state_count; i++) {
 		struct cpuidle_state *s = &drv->states[i];
 		struct cpuidle_state_usage *su = &dev->states_usage[i];
 
 		if (s->disabled || su->disable)
 			continue;
+		if (idx == -1)
+			idx = i; /* first enabled state */
 		if (s->target_residency > data->predicted_us)
 			break;
 		if (s->exit_latency > latency_req)
 			break;
 
-		data->last_state_idx = i;
+		idx = i;
 	}
 
+	if (idx == -1)
+		idx = 0; /* No states enabled. Must use 0. */
+
+	data->last_state_idx = idx;
+
 	return data->last_state_idx;
 }
 
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fb1e60f..9c7951b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -89,6 +89,20 @@
 	  requires to have at least one CEX card in coprocessor mode
 	  available at runtime.
 
+config CRYPTO_PAES_S390
+	tristate "PAES cipher algorithms"
+	depends on S390
+	depends on ZCRYPT
+	depends on PKEY
+	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
+	help
+	  This is the s390 hardware accelerated implementation of the
+	  AES cipher algorithms for use with protected key.
+
+	  Select this option if you want to use the paes cipher
+	  for example to use protected key encrypted devices.
+
 config CRYPTO_SHA1_S390
 	tristate "SHA1 digest algorithm"
 	depends on S390
@@ -137,7 +151,6 @@
 	depends on S390
 	select CRYPTO_ALGAPI
 	select CRYPTO_BLKCIPHER
-	select PKEY
 	help
 	  This is the s390 hardware accelerated implementation of the
 	  AES cipher algorithms (FIPS-197).
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 6ed32aa..922d082 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -210,9 +210,12 @@ EXPORT_SYMBOL_GPL(kill_dax);
 static struct inode *dax_alloc_inode(struct super_block *sb)
 {
 	struct dax_device *dax_dev;
+	struct inode *inode;
 
 	dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
-	return &dax_dev->inode;
+	inode = &dax_dev->inode;
+	inode->i_rdev = 0;
+	return inode;
 }
 
 static struct dax_device *to_dax_dev(struct inode *inode)
@@ -227,7 +230,8 @@ static void dax_i_callback(struct rcu_head *head)
 
 	kfree(dax_dev->host);
 	dax_dev->host = NULL;
-	ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
+	if (inode->i_rdev)
+		ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
 	kmem_cache_free(dax_cache, dax_dev);
 }
 
@@ -423,6 +427,7 @@ static void init_once(void *_dax_dev)
 	struct dax_device *dax_dev = _dax_dev;
 	struct inode *inode = &dax_dev->inode;
 
+	memset(dax_dev, 0, sizeof(*dax_dev));
 	inode_init_once(inode);
 }
 
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 5c3e7b1..f6e7956 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -267,7 +267,11 @@ static int exynos_nocp_probe(struct platform_device *pdev)
 	}
 	platform_set_drvdata(pdev, nocp);
 
-	clk_prepare_enable(nocp->clk);
+	ret = clk_prepare_enable(nocp->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+		return ret;
+	}
 
 	pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
 			dev_name(dev));
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 9b73509..d96e3dc 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -44,7 +44,7 @@ struct exynos_ppmu {
 	{ "ppmu-event2-"#name, PPMU_PMNCNT2 },	\
 	{ "ppmu-event3-"#name, PPMU_PMNCNT3 }
 
-struct __exynos_ppmu_events {
+static struct __exynos_ppmu_events {
 	char *name;
 	int id;
 } ppmu_events[] = {
@@ -648,7 +648,11 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
 			dev_name(&pdev->dev), desc[i].name);
 	}
 
-	clk_prepare_enable(info->ppmu.clk);
+	ret = clk_prepare_enable(info->ppmu.clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
+		return ret;
+	}
 
 	return 0;
 }
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index d37e8dd..ec24059 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
 	struct dma_device	dma_dev;
 	bool			m2m;
 	int			(*hw_setup)(struct ep93xx_dma_chan *);
+	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
 	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
 	void			(*hw_submit)(struct ep93xx_dma_chan *);
 	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
@@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
 		| M2P_CONTROL_ENABLE;
 	m2p_set_control(edmac, control);
 
+	edmac->buffer = 0;
+
 	return 0;
 }
 
@@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
 	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 }
 
-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 {
+	unsigned long flags;
 	u32 control;
 
+	spin_lock_irqsave(&edmac->lock, flags);
 	control = readl(edmac->regs + M2P_CONTROL);
 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 	m2p_set_control(edmac, control);
+	spin_unlock_irqrestore(&edmac->lock, flags);
 
 	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
-		cpu_relax();
+		schedule();
+}
 
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
 	m2p_set_control(edmac, 0);
 
-	while (m2p_channel_state(edmac) == M2P_STATE_STALL)
-		cpu_relax();
+	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
+		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 }
 
 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
@@ -1161,6 +1170,26 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
 }
 
 /**
+ * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ *
+ * Synchronizes the DMA channel termination to the current context. When this
+ * function returns it is guaranteed that all transfers for previously issued
+ * descriptors have stopped and and it is safe to free the memory associated
+ * with them. Furthermore it is guaranteed that all complete callback functions
+ * for a previously submitted descriptor have finished running and it is safe to
+ * free resources accessed from within the complete callbacks.
+ */
+static void ep93xx_dma_synchronize(struct dma_chan *chan)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+
+	if (edmac->edma->hw_synchronize)
+		edmac->edma->hw_synchronize(edmac);
+}
+
+/**
  * ep93xx_dma_terminate_all - terminate all transactions
  * @chan: channel
  *
@@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
 	dma_dev->device_config = ep93xx_dma_slave_config;
+	dma_dev->device_synchronize = ep93xx_dma_synchronize;
 	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
@@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
 	} else {
 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
 
+		edma->hw_synchronize = m2p_hw_synchronize;
 		edma->hw_setup = m2p_hw_setup;
 		edma->hw_shutdown = m2p_hw_shutdown;
 		edma->hw_submit = m2p_hw_submit;
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index a28a01f..f3e211f 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
 	struct mv_xor_v2_sw_desc *sw_desq;
 	int desc_size;
 	unsigned int npendings;
+	unsigned int hw_queue_idx;
 };
 
 /**
@@ -214,18 +215,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
 }
 
 /*
- * Return the next available index in the DESQ.
- */
-static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
-{
-	/* read the index for the next available descriptor in the DESQ */
-	u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
-
-	return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
-		& MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
-}
-
-/*
  * notify the engine of new descriptors, and update the available index.
  */
 static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
@@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
 	return MV_XOR_V2_EXT_DESC_SIZE;
 }
 
-/*
- * Set the IMSG threshold
- */
-static inline
-void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
-{
-	u32 reg;
-
-	reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-
-	reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-	reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-
-	writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-}
-
 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 {
 	struct mv_xor_v2_device *xor_dev = data;
@@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 	if (!ndescs)
 		return IRQ_NONE;
 
-	/*
-	 * Update IMSG threshold, to disable new IMSG interrupts until
-	 * end of the tasklet
-	 */
-	mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
-
 	/* schedule a tasklet to handle descriptors callbacks */
 	tasklet_schedule(&xor_dev->irq_tasklet);
 
@@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 static dma_cookie_t
 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-	int desq_ptr;
 	void *dest_hw_desc;
 	dma_cookie_t cookie;
 	struct mv_xor_v2_sw_desc *sw_desc =
@@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
 	spin_lock_bh(&xor_dev->lock);
 	cookie = dma_cookie_assign(tx);
 
-	/* get the next available slot in the DESQ */
-	desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
-
 	/* copy the HW descriptor from the SW descriptor to the DESQ */
-	dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
+	dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
 
 	memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
 
 	xor_dev->npendings++;
+	xor_dev->hw_queue_idx++;
+	if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
+		xor_dev->hw_queue_idx = 0;
 
 	spin_unlock_bh(&xor_dev->lock);
 
@@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc	*
 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
 {
 	struct mv_xor_v2_sw_desc *sw_desc;
+	bool found = false;
 
 	/* Lock the channel */
 	spin_lock_bh(&xor_dev->lock);
@@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
 		return NULL;
 	}
 
-	/* get a free SW descriptor from the SW DESQ */
-	sw_desc = list_first_entry(&xor_dev->free_sw_desc,
-				   struct mv_xor_v2_sw_desc, free_list);
+	list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
+		if (async_tx_test_ack(&sw_desc->async_tx)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		spin_unlock_bh(&xor_dev->lock);
+		return NULL;
+	}
+
 	list_del(&sw_desc->free_list);
 
 	/* Release the channel */
 	spin_unlock_bh(&xor_dev->lock);
 
-	/* set the async tx descriptor */
-	dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
-	sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
-	async_tx_ack(&sw_desc->async_tx);
-
 	return sw_desc;
 }
 
@@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 		__func__, len, &src, &dest, flags);
 
 	sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+	if (!sw_desc)
+		return NULL;
 
 	sw_desc->async_tx.flags = flags;
 
@@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 		__func__, src_cnt, len, &dest, flags);
 
 	sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+	if (!sw_desc)
+		return NULL;
 
 	sw_desc->async_tx.flags = flags;
 
@@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 		container_of(chan, struct mv_xor_v2_device, dmachan);
 
 	sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+	if (!sw_desc)
+		return NULL;
 
 	/* set the HW descriptor */
 	hw_descriptor = &sw_desc->hw_desc;
@@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
 {
 	struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
 	int pending_ptr, num_of_pending, i;
-	struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
 	struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
 
 	dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
@@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
 	/* get the pending descriptors parameters */
 	num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
 
-	/* next HW descriptor */
-	next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
-
 	/* loop over free descriptors */
 	for (i = 0; i < num_of_pending; i++) {
-
-		if (pending_ptr > MV_XOR_V2_DESC_NUM)
-			pending_ptr = 0;
-
-		if (next_pending_sw_desc != NULL)
-			next_pending_hw_desc++;
+		struct mv_xor_v2_descriptor *next_pending_hw_desc =
+			xor_dev->hw_desq_virt + pending_ptr;
 
 		/* get the SW descriptor related to the HW descriptor */
 		next_pending_sw_desc =
@@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
 
 		/* increment the next descriptor */
 		pending_ptr++;
+		if (pending_ptr >= MV_XOR_V2_DESC_NUM)
+			pending_ptr = 0;
 	}
 
 	if (num_of_pending != 0) {
 		/* free the descriptores */
 		mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
 	}
-
-	/* Update IMSG threshold, to enable new IMSG interrupts */
-	mv_xor_v2_set_imsg_thrd(xor_dev, 0);
 }
 
 /*
@@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
 	writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
 	       xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
 
-	/* enable the DMA engine */
-	writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
-
 	/*
 	 * This is a temporary solution, until we activate the
 	 * SMMU. Set the attributes for reading & writing data buffers
@@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
 	reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
 	writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 
+	/* enable the DMA engine */
+	writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
 	return 0;
 }
 
@@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, xor_dev);
 
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+	if (ret)
+		return ret;
+
 	xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
@@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
 
 	/* add all SW descriptors to the free list */
 	for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
-		xor_dev->sw_desq[i].idx = i;
-		list_add(&xor_dev->sw_desq[i].free_list,
+		struct mv_xor_v2_sw_desc *sw_desc =
+			xor_dev->sw_desq + i;
+		sw_desc->idx = i;
+		dma_async_tx_descriptor_init(&sw_desc->async_tx,
+					     &xor_dev->dmachan);
+		sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+		async_tx_ack(&sw_desc->async_tx);
+
+		list_add(&sw_desc->free_list,
 			 &xor_dev->free_sw_desc);
 	}
 
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index daf479c..8c1665c 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -916,12 +916,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 		return NULL;
 	}
 
-	/* When the port_window is used, one frame must cover the window */
-	if (port_window) {
-		burst = port_window;
-		port_window_bytes = port_window * es_bytes[es];
-	}
-
 	/* Now allocate and setup the descriptor. */
 	d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
 	if (!d)
@@ -931,6 +925,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 	d->dev_addr = dev_addr;
 	d->es = es;
 
+	/* When the port_window is used, one frame must cover the window */
+	if (port_window) {
+		burst = port_window;
+		port_window_bytes = port_window * es_bytes[es];
+
+		d->ei = 1;
+		/*
+		 * One frame covers the port_window and by  configure
+		 * the source frame index to be -1 * (port_window - 1)
+		 * we instruct the sDMA that after a frame is processed
+		 * it should move back to the start of the window.
+		 */
+		d->fi = -(port_window_bytes - 1);
+	}
+
 	d->ccr = c->ccr | CCR_SYNC_FRAME;
 	if (dir == DMA_DEV_TO_MEM) {
 		d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
@@ -955,14 +964,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 		d->ccr |= CCR_SRC_AMODE_POSTINC;
 		if (port_window) {
 			d->ccr |= CCR_DST_AMODE_DBLIDX;
-			d->ei = 1;
-			/*
-			 * One frame covers the port_window and by  configure
-			 * the source frame index to be -1 * (port_window - 1)
-			 * we instruct the sDMA that after a frame is processed
-			 * it should move back to the start of the window.
-			 */
-			d->fi = -(port_window_bytes - 1);
 
 			if (port_window_bytes >= 64)
 				d->csdp |= CSDP_DST_BURST_64;
@@ -1018,16 +1019,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 		osg->addr = sg_dma_address(sgent);
 		osg->en = en;
 		osg->fn = sg_dma_len(sgent) / frame_bytes;
-		if (port_window && dir == DMA_DEV_TO_MEM) {
-			osg->ei = 1;
-			/*
-			 * One frame covers the port_window and by  configure
-			 * the source frame index to be -1 * (port_window - 1)
-			 * we instruct the sDMA that after a frame is processed
-			 * it should move back to the start of the window.
-			 */
-			osg->fi = -(port_window_bytes - 1);
-		}
 
 		if (d->using_ll) {
 			osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 8b0da7f..e90a7a0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3008,7 +3008,8 @@ static int pl330_remove(struct amba_device *adev)
 
 	for (i = 0; i < AMBA_NR_IRQS; i++) {
 		irq = adev->irq[i];
-		devm_free_irq(&adev->dev, irq, pl330);
+		if (irq)
+			devm_free_irq(&adev->dev, irq, pl330);
 	}
 
 	dma_async_device_unregister(&pl330->ddma);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index db41795..bd261c9 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
 	if (desc->hwdescs.use) {
 		dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
 			RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+		if (dptr == 0)
+			dptr = desc->nchunks;
+		dptr--;
 		WARN_ON(dptr >= desc->nchunks);
 	} else {
 		running = desc->running;
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 72c6497..31a1451 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -117,7 +117,7 @@ struct usb_dmac {
 #define USB_DMASWR			0x0008
 #define USB_DMASWR_SWR			(1 << 0)
 #define USB_DMAOR			0x0060
-#define USB_DMAOR_AE			(1 << 2)
+#define USB_DMAOR_AE			(1 << 1)
 #define USB_DMAOR_DME			(1 << 0)
 
 #define USB_DMASAR			0x0000
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 7717b09..db75d4b 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -214,24 +214,16 @@ static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
 static unsigned long get_total_mem(void)
 {
 	struct device_node *np = NULL;
-	const unsigned int *reg, *reg_end;
-	int len, sw, aw;
-	unsigned long start, size, total_mem = 0;
+	struct resource res;
+	int ret;
+	unsigned long total_mem = 0;
 
 	for_each_node_by_type(np, "memory") {
-		aw = of_n_addr_cells(np);
-		sw = of_n_size_cells(np);
-		reg = (const unsigned int *)of_get_property(np, "reg", &len);
-		reg_end = reg + (len / sizeof(u32));
+		ret = of_address_to_resource(np, 0, &res);
+		if (ret)
+			continue;
 
-		total_mem = 0;
-		do {
-			start = of_read_number(reg, aw);
-			reg += aw;
-			size = of_read_number(reg, sw);
-			reg += sw;
-			total_mem += size;
-		} while (reg < reg_end);
+		total_mem += resource_size(&res);
 	}
 	edac_dbg(0, "total_mem 0x%lx\n", total_mem);
 	return total_mem;
@@ -1839,7 +1831,7 @@ static int a10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq,
 	return 0;
 }
 
-static struct irq_domain_ops a10_eccmgr_ic_ops = {
+static const struct irq_domain_ops a10_eccmgr_ic_ops = {
 	.map = a10_eccmgr_irqdomain_map,
 	.xlate = irq_domain_xlate_twocell,
 };
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index f683919..8f5a56e 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -227,7 +227,7 @@
 #define			NREC_RDWR(x)		(((x)>>11) & 1)
 #define			NREC_RANK(x)		(((x)>>8) & 0x7)
 #define		NRECMEMB		0xC0
-#define			NREC_CAS(x)		(((x)>>16) & 0xFFFFFF)
+#define			NREC_CAS(x)		(((x)>>16) & 0xFFF)
 #define			NREC_RAS(x)		((x) & 0x7FFF)
 #define		NRECFGLOG		0xC4
 #define		NREEECFBDA		0xC8
@@ -371,7 +371,7 @@ struct i5000_error_info {
 	/* These registers are input ONLY if there was a
 	 * Non-Recoverable Error */
 	u16 nrecmema;		/* Non-Recoverable Mem log A */
-	u16 nrecmemb;		/* Non-Recoverable Mem log B */
+	u32 nrecmemb;		/* Non-Recoverable Mem log B */
 
 };
 
@@ -407,7 +407,7 @@ static void i5000_get_error_info(struct mem_ctl_info *mci,
 				NERR_FAT_FBD, &info->nerr_fat_fbd);
 		pci_read_config_word(pvt->branchmap_werrors,
 				NRECMEMA, &info->nrecmema);
-		pci_read_config_word(pvt->branchmap_werrors,
+		pci_read_config_dword(pvt->branchmap_werrors,
 				NRECMEMB, &info->nrecmemb);
 
 		/* Clear the error bits, by writing them back */
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 37a9ba7..cd889ed 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -368,7 +368,7 @@ struct i5400_error_info {
 
 	/* These registers are input ONLY if there was a Non-Rec Error */
 	u16 nrecmema;		/* Non-Recoverable Mem log A */
-	u16 nrecmemb;		/* Non-Recoverable Mem log B */
+	u32 nrecmemb;		/* Non-Recoverable Mem log B */
 
 };
 
@@ -458,7 +458,7 @@ static void i5400_get_error_info(struct mem_ctl_info *mci,
 				NERR_FAT_FBD, &info->nerr_fat_fbd);
 		pci_read_config_word(pvt->branchmap_werrors,
 				NRECMEMA, &info->nrecmema);
-		pci_read_config_word(pvt->branchmap_werrors,
+		pci_read_config_dword(pvt->branchmap_werrors,
 				NRECMEMB, &info->nrecmemb);
 
 		/* Clear the error bits, by writing them back */
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 2733fb5..4260579 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -18,10 +18,12 @@
  * 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
  * 0c08: Xeon E3-1200 v3 Processor DRAM Controller
  * 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
+ * 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
  *
  * Based on Intel specification:
  * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
  * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html
+ * http://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html
  *
  * According to the above datasheet (p.16):
  * "
@@ -57,6 +59,7 @@
 #define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
 #define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
 #define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x5918
 
 #define IE31200_DIMMS			4
 #define IE31200_RANKS			8
@@ -376,7 +379,12 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
 	void __iomem *window;
 	struct ie31200_priv *priv;
 	u32 addr_decode, mad_offset;
-	bool skl = (pdev->device == PCI_DEVICE_ID_INTEL_IE31200_HB_8);
+
+	/*
+	 * Kaby Lake seems to work like Skylake. Please re-visit this logic
+	 * when adding new CPU support.
+	 */
+	bool skl = (pdev->device >= PCI_DEVICE_ID_INTEL_IE31200_HB_8);
 
 	edac_dbg(0, "MC:\n");
 
@@ -560,6 +568,9 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
 		PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		IE31200},
 	{
+		PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		IE31200},
+	{
 		0,
 	}            /* 0 terminated list. */
 };
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ba35b7e..9a2658a 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -161,7 +161,7 @@ static const char * const smca_ls_mce_desc[] = {
 	"Sys Read data error thread 0",
 	"Sys read data error thread 1",
 	"DC tag error type 2",
-	"DC data error type 1 (poison comsumption)",
+	"DC data error type 1 (poison consumption)",
 	"DC data error type 2",
 	"DC data error type 3",
 	"DC tag error type 4",
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 14b7e7b..d3650df 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -32,21 +32,21 @@ static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
 	struct mv64x60_pci_pdata *pdata = pci->pvt_info;
 	u32 cause;
 
-	cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+	cause = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
 	if (!cause)
 		return;
 
 	printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
 	printk(KERN_ERR "Cause register: 0x%08x\n", cause);
 	printk(KERN_ERR "Address Low: 0x%08x\n",
-	       in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
+	       readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
 	printk(KERN_ERR "Address High: 0x%08x\n",
-	       in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
+	       readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
 	printk(KERN_ERR "Attribute: 0x%08x\n",
-	       in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
+	       readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
 	printk(KERN_ERR "Command: 0x%08x\n",
-	       in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
-	out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
+	       readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
+	writel(~cause, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
 
 	if (cause & MV64X60_PCI_PE_MASK)
 		edac_pci_handle_pe(pci, pci->ctl_name);
@@ -61,7 +61,7 @@ static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
 	struct mv64x60_pci_pdata *pdata = pci->pvt_info;
 	u32 val;
 
-	val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+	val = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
 	if (!val)
 		return IRQ_NONE;
 
@@ -93,7 +93,7 @@ static int __init mv64x60_pci_fixup(struct platform_device *pdev)
 	if (!pci_serr)
 		return -ENOMEM;
 
-	out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
+	writel(readl(pci_serr) & ~0x1, pci_serr);
 	iounmap(pci_serr);
 
 	return 0;
@@ -116,7 +116,7 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev)
 	pdata = pci->pvt_info;
 
 	pdata->pci_hose = pdev->id;
-	pdata->name = "mpc85xx_pci_err";
+	pdata->name = "mv64x60_pci_err";
 	platform_set_drvdata(pdev, pci);
 	pci->dev = &pdev->dev;
 	pci->dev_name = dev_name(&pdev->dev);
@@ -161,10 +161,10 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev)
 		goto err;
 	}
 
-	out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
-	out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
-	out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
-		 MV64X60_PCIx_ERR_MASK_VAL);
+	writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+	writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
+	writel(MV64X60_PCIx_ERR_MASK_VAL,
+		  pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
 
 	if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
 		edac_dbg(3, "failed edac_pci_add_device()\n");
@@ -233,23 +233,23 @@ static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
 	struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
 	u32 cause;
 
-	cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+	cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
 	if (!cause)
 		return;
 
 	printk(KERN_ERR "Error in internal SRAM\n");
 	printk(KERN_ERR "Cause register: 0x%08x\n", cause);
 	printk(KERN_ERR "Address Low: 0x%08x\n",
-	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
+	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
 	printk(KERN_ERR "Address High: 0x%08x\n",
-	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
+	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
 	printk(KERN_ERR "Data Low: 0x%08x\n",
-	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
+	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
 	printk(KERN_ERR "Data High: 0x%08x\n",
-	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
+	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
 	printk(KERN_ERR "Parity: 0x%08x\n",
-	       in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
-	out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
+	writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
 
 	edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
 }
@@ -260,7 +260,7 @@ static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
 	struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
 	u32 cause;
 
-	cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+	cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
 	if (!cause)
 		return IRQ_NONE;
 
@@ -322,7 +322,7 @@ static int mv64x60_sram_err_probe(struct platform_device *pdev)
 	}
 
 	/* setup SRAM err registers */
-	out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+	writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
 
 	edac_dev->mod_name = EDAC_MOD_STR;
 	edac_dev->ctl_name = pdata->name;
@@ -398,7 +398,7 @@ static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
 	struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
 	u32 cause;
 
-	cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+	cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
 	    MV64x60_CPU_CAUSE_MASK;
 	if (!cause)
 		return;
@@ -406,16 +406,16 @@ static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
 	printk(KERN_ERR "Error on CPU interface\n");
 	printk(KERN_ERR "Cause register: 0x%08x\n", cause);
 	printk(KERN_ERR "Address Low: 0x%08x\n",
-	       in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
+	       readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
 	printk(KERN_ERR "Address High: 0x%08x\n",
-	       in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
+	       readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
 	printk(KERN_ERR "Data Low: 0x%08x\n",
-	       in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
+	       readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
 	printk(KERN_ERR "Data High: 0x%08x\n",
-	       in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
+	       readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
 	printk(KERN_ERR "Parity: 0x%08x\n",
-	       in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
-	out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+	       readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
+	writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
 
 	edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
 }
@@ -426,7 +426,7 @@ static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
 	struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
 	u32 cause;
 
-	cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+	cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
 	    MV64x60_CPU_CAUSE_MASK;
 	if (!cause)
 		return IRQ_NONE;
@@ -515,9 +515,9 @@ static int mv64x60_cpu_err_probe(struct platform_device *pdev)
 	}
 
 	/* setup CPU err registers */
-	out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
-	out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
-	out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
+	writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
+	writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
+	writel(0x000000ff, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
 
 	edac_dev->mod_name = EDAC_MOD_STR;
 	edac_dev->ctl_name = pdata->name;
@@ -596,13 +596,13 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
 	u32 comp_ecc;
 	u32 syndrome;
 
-	reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+	reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
 	if (!reg)
 		return;
 
 	err_addr = reg & ~0x3;
-	sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
-	comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
+	sdram_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
+	comp_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
 	syndrome = sdram_ecc ^ comp_ecc;
 
 	/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
@@ -620,7 +620,7 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
 				     mci->ctl_name, "");
 
 	/* clear the error */
-	out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+	writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
 }
 
 static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
@@ -629,7 +629,7 @@ static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
 	struct mv64x60_mc_pdata *pdata = mci->pvt_info;
 	u32 reg;
 
-	reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+	reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
 	if (!reg)
 		return IRQ_NONE;
 
@@ -664,7 +664,7 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
 
 	get_total_mem(pdata);
 
-	ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+	ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
 
 	csrow = mci->csrows[0];
 	dimm = csrow->channels[0]->dimm;
@@ -753,7 +753,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
 		goto err;
 	}
 
-	ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+	ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
 	if (!(ctl & MV64X60_SDRAM_ECC)) {
 		/* Non-ECC RAM? */
 		printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
@@ -779,10 +779,10 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
 	mv64x60_init_csrows(mci, pdata);
 
 	/* setup MC registers */
-	out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
-	ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
+	writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+	ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
 	ctl = (ctl & 0xff00ffff) | 0x10000;
-	out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
+	writel(ctl, pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
 
 	res = edac_mc_add_mc(mci);
 	if (res) {
@@ -853,10 +853,10 @@ static struct platform_driver * const drivers[] = {
 
 static int __init mv64x60_edac_init(void)
 {
-	int ret = 0;
 
 	printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
 	printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
+
 	/* make sure error reporting method is sane */
 	switch (edac_op_state) {
 	case EDAC_OPSTATE_POLL:
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 1cad5a9..8e59949 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -131,7 +131,7 @@ static struct mem_ctl_info *pnd2_mci;
 
 #ifdef CONFIG_X86_INTEL_SBI_APL
 #include "linux/platform_data/sbi_apl.h"
-int sbi_send(int port, int off, int op, u32 *data)
+static int sbi_send(int port, int off, int op, u32 *data)
 {
 	struct sbi_apl_message sbi_arg;
 	int ret, read = 0;
@@ -160,7 +160,7 @@ int sbi_send(int port, int off, int op, u32 *data)
 	return ret;
 }
 #else
-int sbi_send(int port, int off, int op, u32 *data)
+static int sbi_send(int port, int off, int op, u32 *data)
 {
 	return -EUNATCH;
 }
@@ -168,14 +168,15 @@ int sbi_send(int port, int off, int op, u32 *data)
 
 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
 {
-	int	ret = 0;
+	int ret = 0;
 
 	edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
 	switch (sz) {
 	case 8:
 		ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
+		/* fall through */
 	case 4:
-		ret = sbi_send(port, off, op, (u32 *)data);
+		ret |= sbi_send(port, off, op, (u32 *)data);
 		pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
 					sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
 		break;
@@ -423,16 +424,21 @@ static void dnv_mk_region(char *name, struct region *rp, void *asym)
 
 static int apl_get_registers(void)
 {
+	int ret = -ENODEV;
 	int i;
 
 	if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
 		return -ENODEV;
 
+	/*
+	 * RD_REGP() will fail for unpopulated or non-existent
+	 * DIMM slots. Return success if we find at least one DIMM.
+	 */
 	for (i = 0; i < APL_NUM_CHANNELS; i++)
-		if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
-			return -ENODEV;
+		if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
+			ret = 0;
 
-	return 0;
+	return ret;
 }
 
 static int dnv_get_registers(void)
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index ea21cb6..80d860c 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -35,7 +35,7 @@ static LIST_HEAD(sbridge_edac_list);
 /*
  * Alter this version for the module when modifications are made
  */
-#define SBRIDGE_REVISION    " Ver: 1.1.1 "
+#define SBRIDGE_REVISION    " Ver: 1.1.2 "
 #define EDAC_MOD_STR      "sbridge_edac"
 
 /*
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
  * sbridge structs
  */
 
-#define NUM_CHANNELS		8	/* 2MC per socket, four chan per MC */
+#define NUM_CHANNELS		4	/* Max channels per MC */
 #define MAX_DIMMS		3	/* Max DIMMS per channel */
 #define KNL_MAX_CHAS		38	/* KNL max num. of Cache Home Agents */
 #define KNL_MAX_CHANNELS	6	/* KNL max num. of PCI channels */
@@ -294,6 +294,12 @@ enum type {
 	KNIGHTS_LANDING,
 };
 
+enum domain {
+	IMC0 = 0,
+	IMC1,
+	SOCK,
+};
+
 struct sbridge_pvt;
 struct sbridge_info {
 	enum type	type;
@@ -324,11 +330,14 @@ struct sbridge_channel {
 struct pci_id_descr {
 	int			dev_id;
 	int			optional;
+	enum domain		dom;
 };
 
 struct pci_id_table {
 	const struct pci_id_descr	*descr;
-	int				n_devs;
+	int				n_devs_per_imc;
+	int				n_devs_per_sock;
+	int				n_imcs_per_sock;
 	enum type			type;
 };
 
@@ -337,7 +346,9 @@ struct sbridge_dev {
 	u8			bus, mc;
 	u8			node_id, source_id;
 	struct pci_dev		**pdev;
+	enum domain		dom;
 	int			n_devs;
+	int			i_devs;
 	struct mem_ctl_info	*mci;
 };
 
@@ -352,11 +363,12 @@ struct knl_pvt {
 };
 
 struct sbridge_pvt {
-	struct pci_dev		*pci_ta, *pci_ddrio, *pci_ras;
+	/* Devices per socket */
+	struct pci_dev		*pci_ddrio;
 	struct pci_dev		*pci_sad0, *pci_sad1;
-	struct pci_dev		*pci_ha0, *pci_ha1;
 	struct pci_dev		*pci_br0, *pci_br1;
-	struct pci_dev		*pci_ha1_ta;
+	/* Devices per memory controller */
+	struct pci_dev		*pci_ha, *pci_ta, *pci_ras;
 	struct pci_dev		*pci_tad[NUM_CHANNELS];
 
 	struct sbridge_dev	*sbridge_dev;
@@ -373,39 +385,42 @@ struct sbridge_pvt {
 	struct knl_pvt knl;
 };
 
-#define PCI_DESCR(device_id, opt)	\
+#define PCI_DESCR(device_id, opt, domain)	\
 	.dev_id = (device_id),		\
-	.optional = opt
+	.optional = opt,	\
+	.dom = domain
 
 static const struct pci_id_descr pci_dev_descr_sbridge[] = {
 		/* Processor Home Agent */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0)	},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0,   0, IMC0) },
 
 		/* Memory controller */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1)	},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA,    0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS,   0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0,  0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1,  0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2,  0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3,  0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
 
 		/* System Address Decoder */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0)	},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0,      0, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1,      0, SOCK) },
 
 		/* Broadcast Registers */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0)		},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR,        0, SOCK) },
 };
 
-#define PCI_ID_TABLE_ENTRY(A, T) {	\
+#define PCI_ID_TABLE_ENTRY(A, N, M, T) {	\
 	.descr = A,			\
-	.n_devs = ARRAY_SIZE(A),	\
+	.n_devs_per_imc = N,	\
+	.n_devs_per_sock = ARRAY_SIZE(A),	\
+	.n_imcs_per_sock = M,	\
 	.type = T			\
 }
 
 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
 	{0,}			/* 0 terminated list. */
 };
 
@@ -439,40 +454,39 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
 
 static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 		/* Processor Home Agent */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0)		},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
 
 		/* Memory controller */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0)		},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0)		},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0)	},
-
-		/* System Address Decoder */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0)			},
-
-		/* Broadcast Registers */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1)			},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0)			},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS,    0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0,   0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1,   0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2,   0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
 
 		/* Optional, mode 2HA */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1)		},
-#if 0
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1)	},
-#endif
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1)	},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1,   1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2,   1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3,   1, IMC1) },
 
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1)	},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
+
+		/* System Address Decoder */
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD,            0, SOCK) },
+
+		/* Broadcast Registers */
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0,            1, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1,            0, SOCK) },
+
 };
 
 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
 	{0,}			/* 0 terminated list. */
 };
 
@@ -498,9 +512,9 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0	0x2fa0
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1	0x2f60
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA	0x2fa8
-#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM	0x2f71
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA	0x2f68
-#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM	0x2f79
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
@@ -517,35 +531,33 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
 static const struct pci_id_descr pci_dev_descr_haswell[] = {
 	/* first item must be the HA */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0)		},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0,      0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1,      1, IMC1) },
 
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0)	},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA,   0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM,   0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
 
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1)		},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA,   1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM,   1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
 
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0)		},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1)	},
-
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1)		},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1)		},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1)		},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1)		},
-
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1)		},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1)	},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0,   1, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1,   1, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2,   1, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3,   1, SOCK) },
 };
 
 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
 	{0,}			/* 0 terminated list. */
 };
 
@@ -559,7 +571,7 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
 /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
 #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
 /* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
-#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL  0x7843
+#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN     0x7843
 /* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA       0x7844
 /* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
@@ -579,17 +591,17 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
  */
 
 static const struct pci_id_descr pci_dev_descr_knl[] = {
-	[0]         = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0) },
-	[1]         = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0) },
-	[2 ... 3]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0)},
-	[4 ... 41]  = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0) },
-	[42 ... 47] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL, 0) },
-	[48]        = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0) },
-	[49]        = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0) },
+	[0 ... 1]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC,    0, IMC0)},
+	[2 ... 7]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN,  0, IMC0) },
+	[8]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA,    0, IMC0) },
+	[9]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
+	[10]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0,  0, SOCK) },
+	[11]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1,  0, SOCK) },
+	[12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA,   0, SOCK) },
 };
 
 static const struct pci_id_table pci_dev_descr_knl_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
 	{0,}
 };
 
@@ -615,9 +627,9 @@ static const struct pci_id_table pci_dev_descr_knl_table[] = {
 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0	0x6fa0
 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1	0x6f60
 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA	0x6fa8
-#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM	0x6f71
 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA	0x6f68
-#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL 0x6f79
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM	0x6f79
 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
@@ -632,32 +644,30 @@ static const struct pci_id_table pci_dev_descr_knl_table[] = {
 
 static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 	/* first item must be the HA */
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0)		},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0,      0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1,      1, IMC1) },
 
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0)	},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA,   0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM,   0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
 
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1)		},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA,   1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM,   1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
 
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1)	},
-
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1)	},
-
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1)	},
-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1)	},
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
+	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0,   1, SOCK) },
 };
 
 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
-	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
+	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
 	{0,}			/* 0 terminated list. */
 };
 
@@ -709,7 +719,8 @@ static inline int numcol(u32 mtr)
 	return 1 << cols;
 }
 
-static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus)
+static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus,
+					   struct sbridge_dev *prev)
 {
 	struct sbridge_dev *sbridge_dev;
 
@@ -722,16 +733,19 @@ static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus)
 				struct sbridge_dev, list);
 	}
 
-	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
-		if (sbridge_dev->bus == bus)
+	sbridge_dev = list_entry(prev ? prev->list.next
+				      : sbridge_edac_list.next, struct sbridge_dev, list);
+
+	list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
+		if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom))
 			return sbridge_dev;
 	}
 
 	return NULL;
 }
 
-static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
-					   const struct pci_id_table *table)
+static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
+					     const struct pci_id_table *table)
 {
 	struct sbridge_dev *sbridge_dev;
 
@@ -739,15 +753,17 @@ static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
 	if (!sbridge_dev)
 		return NULL;
 
-	sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
-				   GFP_KERNEL);
+	sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
+				    sizeof(*sbridge_dev->pdev),
+				    GFP_KERNEL);
 	if (!sbridge_dev->pdev) {
 		kfree(sbridge_dev);
 		return NULL;
 	}
 
 	sbridge_dev->bus = bus;
-	sbridge_dev->n_devs = table->n_devs;
+	sbridge_dev->dom = dom;
+	sbridge_dev->n_devs = table->n_devs_per_imc;
 	list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
 
 	return sbridge_dev;
@@ -1044,79 +1060,6 @@ static int haswell_chan_hash(int idx, u64 addr)
 	return idx;
 }
 
-/****************************************************************************
-			Memory check routines
- ****************************************************************************/
-static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id)
-{
-	struct pci_dev *pdev = NULL;
-
-	do {
-		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev);
-		if (pdev && pdev->bus->number == bus)
-			break;
-	} while (pdev);
-
-	return pdev;
-}
-
-/**
- * check_if_ecc_is_active() - Checks if ECC is active
- * @bus:	Device bus
- * @type:	Memory controller type
- * returns: 0 in case ECC is active, -ENODEV if it can't be determined or
- *	    disabled
- */
-static int check_if_ecc_is_active(const u8 bus, enum type type)
-{
-	struct pci_dev *pdev = NULL;
-	u32 mcmtr, id;
-
-	switch (type) {
-	case IVY_BRIDGE:
-		id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
-		break;
-	case HASWELL:
-		id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
-		break;
-	case SANDY_BRIDGE:
-		id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
-		break;
-	case BROADWELL:
-		id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA;
-		break;
-	case KNIGHTS_LANDING:
-		/*
-		 * KNL doesn't group things by bus the same way
-		 * SB/IB/Haswell does.
-		 */
-		id = PCI_DEVICE_ID_INTEL_KNL_IMC_TA;
-		break;
-	default:
-		return -ENODEV;
-	}
-
-	if (type != KNIGHTS_LANDING)
-		pdev = get_pdev_same_bus(bus, id);
-	else
-		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, 0);
-
-	if (!pdev) {
-		sbridge_printk(KERN_ERR, "Couldn't find PCI device "
-					"%04x:%04x! on bus %02d\n",
-					PCI_VENDOR_ID_INTEL, id, bus);
-		return -ENODEV;
-	}
-
-	pci_read_config_dword(pdev,
-			type == KNIGHTS_LANDING ? KNL_MCMTR : MCMTR, &mcmtr);
-	if (!IS_ECC_ENABLED(mcmtr)) {
-		sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
-		return -ENODEV;
-	}
-	return 0;
-}
-
 /* Low bits of TAD limit, and some metadata. */
 static const u32 knl_tad_dram_limit_lo[] = {
 	0x400, 0x500, 0x600, 0x700,
@@ -1587,25 +1530,13 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
 	return 0;
 }
 
-static int get_dimm_config(struct mem_ctl_info *mci)
+static void get_source_id(struct mem_ctl_info *mci)
 {
 	struct sbridge_pvt *pvt = mci->pvt_info;
-	struct dimm_info *dimm;
-	unsigned i, j, banks, ranks, rows, cols, npages;
-	u64 size;
 	u32 reg;
-	enum edac_type mode;
-	enum mem_type mtype;
-	int channels = pvt->info.type == KNIGHTS_LANDING ?
-		KNL_MAX_CHANNELS : NUM_CHANNELS;
-	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
 
-	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
-		pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
-		pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
-	}
 	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
-			pvt->info.type == KNIGHTS_LANDING)
+	    pvt->info.type == KNIGHTS_LANDING)
 		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
 	else
 		pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
@@ -1614,50 +1545,19 @@ static int get_dimm_config(struct mem_ctl_info *mci)
 		pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
 	else
 		pvt->sbridge_dev->source_id = SOURCE_ID(reg);
+}
 
-	pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
-	edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
-		 pvt->sbridge_dev->mc,
-		 pvt->sbridge_dev->node_id,
-		 pvt->sbridge_dev->source_id);
-
-	/* KNL doesn't support mirroring or lockstep,
-	 * and is always closed page
-	 */
-	if (pvt->info.type == KNIGHTS_LANDING) {
-		mode = EDAC_S4ECD4ED;
-		pvt->is_mirrored = false;
-
-		if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
-			return -1;
-	} else {
-		pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
-		if (IS_MIRROR_ENABLED(reg)) {
-			edac_dbg(0, "Memory mirror is enabled\n");
-			pvt->is_mirrored = true;
-		} else {
-			edac_dbg(0, "Memory mirror is disabled\n");
-			pvt->is_mirrored = false;
-		}
-
-		pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
-		if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
-			edac_dbg(0, "Lockstep is enabled\n");
-			mode = EDAC_S8ECD8ED;
-			pvt->is_lockstep = true;
-		} else {
-			edac_dbg(0, "Lockstep is disabled\n");
-			mode = EDAC_S4ECD4ED;
-			pvt->is_lockstep = false;
-		}
-		if (IS_CLOSE_PG(pvt->info.mcmtr)) {
-			edac_dbg(0, "address map is on closed page mode\n");
-			pvt->is_close_pg = true;
-		} else {
-			edac_dbg(0, "address map is on open page mode\n");
-			pvt->is_close_pg = false;
-		}
-	}
+static int __populate_dimms(struct mem_ctl_info *mci,
+			    u64 knl_mc_sizes[KNL_MAX_CHANNELS],
+			    enum edac_type mode)
+{
+	struct sbridge_pvt *pvt = mci->pvt_info;
+	int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
+							 : NUM_CHANNELS;
+	unsigned int i, j, banks, ranks, rows, cols, npages;
+	struct dimm_info *dimm;
+	enum mem_type mtype;
+	u64 size;
 
 	mtype = pvt->info.get_memory_type(pvt);
 	if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
@@ -1688,8 +1588,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
 		}
 
 		for (j = 0; j < max_dimms_per_channel; j++) {
-			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
-				       i, j, 0);
+			dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
 			if (pvt->info.type == KNIGHTS_LANDING) {
 				pci_read_config_dword(pvt->knl.pci_channel[i],
 					knl_mtr_reg, &mtr);
@@ -1699,6 +1598,12 @@ static int get_dimm_config(struct mem_ctl_info *mci)
 			}
 			edac_dbg(4, "Channel #%d  MTR%d = %x\n", i, j, mtr);
 			if (IS_DIMM_PRESENT(mtr)) {
+				if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
+					sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
+						       pvt->sbridge_dev->source_id,
+						       pvt->sbridge_dev->dom, i);
+					return -ENODEV;
+				}
 				pvt->channel[i].dimms++;
 
 				ranks = numrank(pvt->info.type, mtr);
@@ -1717,7 +1622,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
 				npages = MiB_TO_PAGES(size);
 
 				edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
-					 pvt->sbridge_dev->mc, i/4, i%4, j,
+					 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
 					 size, npages,
 					 banks, ranks, rows, cols);
 
@@ -1727,8 +1632,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
 				dimm->mtype = mtype;
 				dimm->edac_mode = mode;
 				snprintf(dimm->label, sizeof(dimm->label),
-					 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
-					 pvt->sbridge_dev->source_id, i/4, i%4, j);
+						 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
+						 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
 			}
 		}
 	}
@@ -1736,6 +1641,65 @@ static int get_dimm_config(struct mem_ctl_info *mci)
 	return 0;
 }
 
+static int get_dimm_config(struct mem_ctl_info *mci)
+{
+	struct sbridge_pvt *pvt = mci->pvt_info;
+	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
+	enum edac_type mode;
+	u32 reg;
+
+	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+		pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
+		pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+	}
+	pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
+	edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
+		 pvt->sbridge_dev->mc,
+		 pvt->sbridge_dev->node_id,
+		 pvt->sbridge_dev->source_id);
+
+	/* KNL doesn't support mirroring or lockstep,
+	 * and is always closed page
+	 */
+	if (pvt->info.type == KNIGHTS_LANDING) {
+		mode = EDAC_S4ECD4ED;
+		pvt->is_mirrored = false;
+
+		if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
+			return -1;
+		pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr);
+	} else {
+		pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
+		if (IS_MIRROR_ENABLED(reg)) {
+			edac_dbg(0, "Memory mirror is enabled\n");
+			pvt->is_mirrored = true;
+		} else {
+			edac_dbg(0, "Memory mirror is disabled\n");
+			pvt->is_mirrored = false;
+		}
+
+		pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
+		if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
+			edac_dbg(0, "Lockstep is enabled\n");
+			mode = EDAC_S8ECD8ED;
+			pvt->is_lockstep = true;
+		} else {
+			edac_dbg(0, "Lockstep is disabled\n");
+			mode = EDAC_S4ECD4ED;
+			pvt->is_lockstep = false;
+		}
+		if (IS_CLOSE_PG(pvt->info.mcmtr)) {
+			edac_dbg(0, "address map is on closed page mode\n");
+			pvt->is_close_pg = true;
+		} else {
+			edac_dbg(0, "address map is on open page mode\n");
+			pvt->is_close_pg = false;
+		}
+	}
+
+	return __populate_dimms(mci, knl_mc_sizes, mode);
+}
+
 static void get_memory_layout(const struct mem_ctl_info *mci)
 {
 	struct sbridge_pvt *pvt = mci->pvt_info;
@@ -1816,8 +1780,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
 	 */
 	prv = 0;
 	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
-		pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
-				      &reg);
+		pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], &reg);
 		limit = TAD_LIMIT(reg);
 		if (limit <= prv)
 			break;
@@ -1899,12 +1862,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
 	}
 }
 
-static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
+static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
 {
 	struct sbridge_dev *sbridge_dev;
 
 	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
-		if (sbridge_dev->node_id == node_id)
+		if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
 			return sbridge_dev->mci;
 	}
 	return NULL;
@@ -1925,7 +1888,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 	int			interleave_mode, shiftup = 0;
 	unsigned		sad_interleave[pvt->info.max_interleave];
 	u32			reg, dram_rule;
-	u8			ch_way, sck_way, pkg, sad_ha = 0, ch_add = 0;
+	u8			ch_way, sck_way, pkg, sad_ha = 0;
 	u32			tad_offset;
 	u32			rir_way;
 	u32			mb, gb;
@@ -2038,13 +2001,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
 		*socket = sad_pkg_socket(pkg);
 		sad_ha = sad_pkg_ha(pkg);
-		if (sad_ha)
-			ch_add = 4;
 
 		if (a7mode) {
 			/* MCChanShiftUpEnable */
-			pci_read_config_dword(pvt->pci_ha0,
-					      HASWELL_HASYSDEFEATURE2, &reg);
+			pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
 			shiftup = GET_BITFIELD(reg, 22, 22);
 		}
 
@@ -2056,8 +2016,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
 		*socket = sad_pkg_socket(pkg);
 		sad_ha = sad_pkg_ha(pkg);
-		if (sad_ha)
-			ch_add = 4;
 		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
 			 idx, *socket, sad_ha);
 	}
@@ -2068,7 +2026,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 	 * Move to the proper node structure, in order to access the
 	 * right PCI registers
 	 */
-	new_mci = get_mci_for_node_id(*socket);
+	new_mci = get_mci_for_node_id(*socket, sad_ha);
 	if (!new_mci) {
 		sprintf(msg, "Struct for socket #%u wasn't initialized",
 			*socket);
@@ -2081,14 +2039,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 	 * Step 2) Get memory channel
 	 */
 	prv = 0;
-	if (pvt->info.type == SANDY_BRIDGE)
-		pci_ha = pvt->pci_ha0;
-	else {
-		if (sad_ha)
-			pci_ha = pvt->pci_ha1;
-		else
-			pci_ha = pvt->pci_ha0;
-	}
+	pci_ha = pvt->pci_ha;
 	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
 		pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
 		limit = TAD_LIMIT(reg);
@@ -2139,9 +2090,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 	}
 	*channel_mask = 1 << base_ch;
 
-	pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
-				tad_ch_nilv_offset[n_tads],
-				&tad_offset);
+	pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
 
 	if (pvt->is_mirrored) {
 		*channel_mask |= 1 << ((base_ch + 2) % 4);
@@ -2192,9 +2141,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 	 * Step 3) Decode rank
 	 */
 	for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
-		pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
-				      rir_way_limit[n_rir],
-				      &reg);
+		pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], &reg);
 
 		if (!IS_RIR_VALID(reg))
 			continue;
@@ -2222,9 +2169,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 		idx = (ch_addr >> 13);	/* FIXME: Datasheet says to shift by 15 */
 	idx %= 1 << rir_way;
 
-	pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
-			      rir_offset[n_rir][idx],
-			      &reg);
+	pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
 	*rank = RIR_RNK_TGT(pvt->info.type, reg);
 
 	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
@@ -2277,10 +2222,11 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
 				 const unsigned devno,
 				 const int multi_bus)
 {
-	struct sbridge_dev *sbridge_dev;
+	struct sbridge_dev *sbridge_dev = NULL;
 	const struct pci_id_descr *dev_descr = &table->descr[devno];
 	struct pci_dev *pdev = NULL;
 	u8 bus = 0;
+	int i = 0;
 
 	sbridge_printk(KERN_DEBUG,
 		"Seeking for: PCI ID %04x:%04x\n",
@@ -2311,9 +2257,14 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
 	}
 	bus = pdev->bus->number;
 
-	sbridge_dev = get_sbridge_dev(bus, multi_bus);
+next_imc:
+	sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
 	if (!sbridge_dev) {
-		sbridge_dev = alloc_sbridge_dev(bus, table);
+
+		if (dev_descr->dom == SOCK)
+			goto out_imc;
+
+		sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table);
 		if (!sbridge_dev) {
 			pci_dev_put(pdev);
 			return -ENOMEM;
@@ -2321,7 +2272,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
 		(*num_mc)++;
 	}
 
-	if (sbridge_dev->pdev[devno]) {
+	if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
 		sbridge_printk(KERN_ERR,
 			"Duplicated device for %04x:%04x\n",
 			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
@@ -2329,8 +2280,16 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
 		return -ENODEV;
 	}
 
-	sbridge_dev->pdev[devno] = pdev;
+	sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
 
+	/* pdev belongs to more than one IMC, do extra gets */
+	if (++i > 1)
+		pci_dev_get(pdev);
+
+	if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
+		goto next_imc;
+
+out_imc:
 	/* Be sure that the device is enabled */
 	if (unlikely(pci_enable_device(pdev) < 0)) {
 		sbridge_printk(KERN_ERR,
@@ -2374,7 +2333,7 @@ static int sbridge_get_all_devices(u8 *num_mc,
 	if (table->type == KNIGHTS_LANDING)
 		allow_dups = multi_bus = 1;
 	while (table && table->descr) {
-		for (i = 0; i < table->n_devs; i++) {
+		for (i = 0; i < table->n_devs_per_sock; i++) {
 			if (!allow_dups || i == 0 ||
 					table->descr[i].dev_id !=
 						table->descr[i-1].dev_id) {
@@ -2385,7 +2344,7 @@ static int sbridge_get_all_devices(u8 *num_mc,
 							   table, i, multi_bus);
 				if (rc < 0) {
 					if (i == 0) {
-						i = table->n_devs;
+						i = table->n_devs_per_sock;
 						break;
 					}
 					sbridge_put_all_devices();
@@ -2399,6 +2358,13 @@ static int sbridge_get_all_devices(u8 *num_mc,
 	return 0;
 }
 
+/*
+ * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
+ * the format: XXXa. So we can convert from a device to the corresponding
+ * channel like this
+ */
+#define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
+
 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
 				 struct sbridge_dev *sbridge_dev)
 {
@@ -2423,7 +2389,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
 			pvt->pci_br0 = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
-			pvt->pci_ha0 = pdev;
+			pvt->pci_ha = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
 			pvt->pci_ta = pdev;
@@ -2436,7 +2402,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
 		{
-			int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
+			int id = TAD_DEV_TO_CHAN(pdev->device);
 			pvt->pci_tad[id] = pdev;
 			saw_chan_mask |= 1 << id;
 		}
@@ -2455,7 +2421,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
 	}
 
 	/* Check if everything were registered */
-	if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
+	if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
 	    !pvt->pci_ras || !pvt->pci_ta)
 		goto enodev;
 
@@ -2488,19 +2454,26 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
 
 		switch (pdev->device) {
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
-			pvt->pci_ha0 = pdev;
+		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
+			pvt->pci_ha = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
+		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
 			pvt->pci_ta = pdev;
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
+		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
 			pvt->pci_ras = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
+		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
+		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
+		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
+		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
 		{
-			int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0;
+			int id = TAD_DEV_TO_CHAN(pdev->device);
 			pvt->pci_tad[id] = pdev;
 			saw_chan_mask |= 1 << id;
 		}
@@ -2520,19 +2493,6 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
 			pvt->pci_br1 = pdev;
 			break;
-		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
-			pvt->pci_ha1 = pdev;
-			break;
-		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
-		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
-		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
-		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
-		{
-			int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 4;
-			pvt->pci_tad[id] = pdev;
-			saw_chan_mask |= 1 << id;
-		}
-			break;
 		default:
 			goto error;
 		}
@@ -2544,13 +2504,12 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
 	}
 
 	/* Check if everything were registered */
-	if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
+	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
 	    !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
 		goto enodev;
 
-	if (saw_chan_mask != 0x0f && /* -EN */
-	    saw_chan_mask != 0x33 && /* -EP */
-	    saw_chan_mask != 0xff)   /* -EX */
+	if (saw_chan_mask != 0x0f && /* -EN/-EX */
+	    saw_chan_mask != 0x03)   /* -EP */
 		goto enodev;
 	return 0;
 
@@ -2593,32 +2552,27 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
 			pvt->pci_sad1 = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
-			pvt->pci_ha0 = pdev;
+		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
+			pvt->pci_ha = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
+		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
 			pvt->pci_ta = pdev;
 			break;
-		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL:
+		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
+		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
 			pvt->pci_ras = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
-		{
-			int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0;
-
-			pvt->pci_tad[id] = pdev;
-			saw_chan_mask |= 1 << id;
-		}
-			break;
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
 		{
-			int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 + 4;
-
+			int id = TAD_DEV_TO_CHAN(pdev->device);
 			pvt->pci_tad[id] = pdev;
 			saw_chan_mask |= 1 << id;
 		}
@@ -2630,12 +2584,6 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
 			if (!pvt->pci_ddrio)
 				pvt->pci_ddrio = pdev;
 			break;
-		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
-			pvt->pci_ha1 = pdev;
-			break;
-		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
-			pvt->pci_ha1_ta = pdev;
-			break;
 		default:
 			break;
 		}
@@ -2647,13 +2595,12 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
 	}
 
 	/* Check if everything were registered */
-	if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
+	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
 	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
 		goto enodev;
 
-	if (saw_chan_mask != 0x0f && /* -EN */
-	    saw_chan_mask != 0x33 && /* -EP */
-	    saw_chan_mask != 0xff)   /* -EX */
+	if (saw_chan_mask != 0x0f && /* -EN/-EX */
+	    saw_chan_mask != 0x03)   /* -EP */
 		goto enodev;
 	return 0;
 
@@ -2690,30 +2637,27 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
 			pvt->pci_sad1 = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
-			pvt->pci_ha0 = pdev;
+		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
+			pvt->pci_ha = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
+		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
 			pvt->pci_ta = pdev;
 			break;
-		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL:
+		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
+		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
 			pvt->pci_ras = pdev;
 			break;
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
-		{
-			int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0;
-			pvt->pci_tad[id] = pdev;
-			saw_chan_mask |= 1 << id;
-		}
-			break;
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
 		{
-			int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 + 4;
+			int id = TAD_DEV_TO_CHAN(pdev->device);
 			pvt->pci_tad[id] = pdev;
 			saw_chan_mask |= 1 << id;
 		}
@@ -2721,12 +2665,6 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
 			pvt->pci_ddrio = pdev;
 			break;
-		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
-			pvt->pci_ha1 = pdev;
-			break;
-		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
-			pvt->pci_ha1_ta = pdev;
-			break;
 		default:
 			break;
 		}
@@ -2738,13 +2676,12 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
 	}
 
 	/* Check if everything were registered */
-	if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
+	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
 	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
 		goto enodev;
 
-	if (saw_chan_mask != 0x0f && /* -EN */
-	    saw_chan_mask != 0x33 && /* -EP */
-	    saw_chan_mask != 0xff)   /* -EX */
+	if (saw_chan_mask != 0x0f && /* -EN/-EX */
+	    saw_chan_mask != 0x03)   /* -EP */
 		goto enodev;
 	return 0;
 
@@ -2812,7 +2749,7 @@ static int knl_mci_bind_devs(struct mem_ctl_info *mci,
 			pvt->knl.pci_cha[devidx] = pdev;
 			break;
 
-		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL:
+		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
 			devidx = -1;
 
 			/*
@@ -3006,7 +2943,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
 
 	if (rc < 0)
 		goto err_parsing;
-	new_mci = get_mci_for_node_id(socket);
+	new_mci = get_mci_for_node_id(socket, ha);
 	if (!new_mci) {
 		strcpy(msg, "Error: socket got corrupted!");
 		goto err_parsing;
@@ -3053,7 +2990,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
 	/* Call the helper to output message */
 	edac_mc_handle_error(tp_event, mci, core_err_cnt,
 			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
-			     4*ha+channel, dimm, -1,
+			     channel, dimm, -1,
 			     optype, msg);
 	return;
 err_parsing:
@@ -3078,7 +3015,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
 	if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
 		return NOTIFY_DONE;
 
-	mci = get_mci_for_node_id(mce->socketid);
+	mci = get_mci_for_node_id(mce->socketid, IMC0);
 	if (!mci)
 		return NOTIFY_DONE;
 	pvt = mci->pvt_info;
@@ -3159,11 +3096,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 	struct pci_dev *pdev = sbridge_dev->pdev[0];
 	int rc;
 
-	/* Check the number of active and not disabled channels */
-	rc = check_if_ecc_is_active(sbridge_dev->bus, type);
-	if (unlikely(rc < 0))
-		return rc;
-
 	/* allocate a new MC control structure */
 	layers[0].type = EDAC_MC_LAYER_CHANNEL;
 	layers[0].size = type == KNIGHTS_LANDING ?
@@ -3192,7 +3124,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 		MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
 	mci->edac_ctl_cap = EDAC_FLAG_NONE;
 	mci->edac_cap = EDAC_FLAG_NONE;
-	mci->mod_name = "sbridge_edac.c";
+	mci->mod_name = "sb_edac.c";
 	mci->mod_ver = SBRIDGE_REVISION;
 	mci->dev_name = pci_name(pdev);
 	mci->ctl_page_to_phys = NULL;
@@ -3215,12 +3147,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 		pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
 		pvt->info.get_width = ibridge_get_width;
-		mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
 
 		/* Store pci devices at mci for faster access */
 		rc = ibridge_mci_bind_devs(mci, sbridge_dev);
 		if (unlikely(rc < 0))
 			goto fail0;
+		get_source_id(mci);
+		mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
+			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
 		break;
 	case SANDY_BRIDGE:
 		pvt->info.rankcfgr = SB_RANK_CFG_A;
@@ -3238,12 +3172,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 		pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
 		pvt->info.interleave_pkg = sbridge_interleave_pkg;
 		pvt->info.get_width = sbridge_get_width;
-		mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
 
 		/* Store pci devices at mci for faster access */
 		rc = sbridge_mci_bind_devs(mci, sbridge_dev);
 		if (unlikely(rc < 0))
 			goto fail0;
+		get_source_id(mci);
+		mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
+			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
 		break;
 	case HASWELL:
 		/* rankcfgr isn't used */
@@ -3261,12 +3197,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 		pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
 		pvt->info.get_width = ibridge_get_width;
-		mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
 
 		/* Store pci devices at mci for faster access */
 		rc = haswell_mci_bind_devs(mci, sbridge_dev);
 		if (unlikely(rc < 0))
 			goto fail0;
+		get_source_id(mci);
+		mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
+			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
 		break;
 	case BROADWELL:
 		/* rankcfgr isn't used */
@@ -3284,12 +3222,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 		pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
 		pvt->info.get_width = broadwell_get_width;
-		mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
 
 		/* Store pci devices at mci for faster access */
 		rc = broadwell_mci_bind_devs(mci, sbridge_dev);
 		if (unlikely(rc < 0))
 			goto fail0;
+		get_source_id(mci);
+		mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
+			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
 		break;
 	case KNIGHTS_LANDING:
 		/* pvt->info.rankcfgr == ??? */
@@ -3307,17 +3247,22 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 		pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list);
 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
 		pvt->info.get_width = knl_get_width;
-		mci->ctl_name = kasprintf(GFP_KERNEL,
-			"Knights Landing Socket#%d", mci->mc_idx);
 
 		rc = knl_mci_bind_devs(mci, sbridge_dev);
 		if (unlikely(rc < 0))
 			goto fail0;
+		get_source_id(mci);
+		mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
+			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
 		break;
 	}
 
 	/* Get dimm basic config and the memory layout */
-	get_dimm_config(mci);
+	rc = get_dimm_config(mci);
+	if (rc < 0) {
+		edac_dbg(0, "MC: failed to get_dimm_config()\n");
+		goto fail;
+	}
 	get_memory_layout(mci);
 
 	/* record ptr to the generic device */
@@ -3327,13 +3272,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
 	if (unlikely(edac_mc_add_mc(mci))) {
 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
 		rc = -EINVAL;
-		goto fail0;
+		goto fail;
 	}
 
 	return 0;
 
-fail0:
+fail:
 	kfree(mci->ctl_name);
+fail0:
 	edac_mc_free(mci);
 	sbridge_dev->mci = NULL;
 	return rc;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 86d585c..2d352b4 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -2080,7 +2080,7 @@ static int thunderx_l2c_probe(struct pci_dev *pdev,
 	if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
 		l2c->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
 
-		thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr,
+		ret = thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr,
 					      l2c, dfs_entries);
 
 		if (ret != dfs_entries) {
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 32f2dc8..6d50071 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -115,6 +115,7 @@
 
 config EXTCON_QCOM_SPMI_MISC
 	tristate "Qualcomm USB extcon support"
+	depends on ARCH_QCOM || COMPILE_TEST
 	help
 	  Say Y here to enable SPMI PMIC based USB cable detection
 	  support on Qualcomm PMICs such as PM8941.
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index e2d78cd..f84da4a 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1271,9 +1271,7 @@ static int arizona_extcon_get_micd_configs(struct device *dev,
 		goto out;
 
 	nconfs /= entries_per_config;
-
-	micd_configs = devm_kzalloc(dev,
-				    nconfs * sizeof(struct arizona_micd_range),
+	micd_configs = devm_kcalloc(dev, nconfs, sizeof(*micd_configs),
 				    GFP_KERNEL);
 	if (!micd_configs) {
 		ret = -ENOMEM;
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index 9d17984..d9f9afe 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -94,8 +94,7 @@ static int int3496_probe(struct platform_device *pdev)
 	struct int3496_data *data;
 	int ret;
 
-	ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
-					acpi_int3496_default_gpios);
+	ret = devm_acpi_dev_add_driver_gpios(dev, acpi_int3496_default_gpios);
 	if (ret) {
 		dev_err(dev, "can't add GPIO ACPI mapping\n");
 		return ret;
@@ -169,8 +168,6 @@ static int int3496_remove(struct platform_device *pdev)
 	devm_free_irq(&pdev->dev, data->usb_id_irq, data);
 	cancel_delayed_work_sync(&data->work);
 
-	acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
-
 	return 0;
 }
 
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index f422a78..8eccf7b 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -964,12 +964,12 @@ EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
 
 /**
  * extcon_register_notifier_all() - Register a notifier block for all connectors
- * @edev:	the extcon device that has the external connecotr.
+ * @edev:	the extcon device that has the external connector.
  * @nb:		a notifier block to be registered.
  *
- * This fucntion registers a notifier block in order to receive the state
+ * This function registers a notifier block in order to receive the state
  * change of all supported external connectors from extcon device.
- * And The second parameter given to the callback of nb (val) is
+ * And the second parameter given to the callback of nb (val) is
  * the current state and third parameter is the edev pointer.
  *
  * Returns 0 if success or error number if fail
@@ -1252,9 +1252,8 @@ int extcon_dev_register(struct extcon_dev *edev)
 	}
 
 	spin_lock_init(&edev->lock);
-
-	edev->nh = devm_kzalloc(&edev->dev,
-			sizeof(*edev->nh) * edev->max_supported, GFP_KERNEL);
+	edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
+				sizeof(*edev->nh), GFP_KERNEL);
 	if (!edev->nh) {
 		ret = -ENOMEM;
 		goto err_dev;
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index f6cfc31..8043e51 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -39,6 +39,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/printk.h>
+#include <linux/pm_opp.h>
 #include <linux/scpi_protocol.h>
 #include <linux/slab.h>
 #include <linux/sort.h>
@@ -684,6 +685,65 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
 	return info;
 }
 
+static int scpi_dev_domain_id(struct device *dev)
+{
+	struct of_phandle_args clkspec;
+
+	if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
+				       0, &clkspec))
+		return -EINVAL;
+
+	return clkspec.args[0];
+}
+
+static struct scpi_dvfs_info *scpi_dvfs_info(struct device *dev)
+{
+	int domain = scpi_dev_domain_id(dev);
+
+	if (domain < 0)
+		return ERR_PTR(domain);
+
+	return scpi_dvfs_get_info(domain);
+}
+
+static int scpi_dvfs_get_transition_latency(struct device *dev)
+{
+	struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
+
+	if (IS_ERR(info))
+		return PTR_ERR(info);
+
+	if (!info->latency)
+		return 0;
+
+	return info->latency;
+}
+
+static int scpi_dvfs_add_opps_to_device(struct device *dev)
+{
+	int idx, ret;
+	struct scpi_opp *opp;
+	struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
+
+	if (IS_ERR(info))
+		return PTR_ERR(info);
+
+	if (!info->opps)
+		return -EIO;
+
+	for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
+		ret = dev_pm_opp_add(dev, opp->freq, opp->m_volt * 1000);
+		if (ret) {
+			dev_warn(dev, "failed to add opp %uHz %umV\n",
+				 opp->freq, opp->m_volt);
+			while (idx-- > 0)
+				dev_pm_opp_remove(dev, (--opp)->freq);
+			return ret;
+		}
+	}
+	return 0;
+}
+
 static int scpi_sensor_get_capability(u16 *sensors)
 {
 	struct sensor_capabilities cap_buf;
@@ -765,6 +825,9 @@ static struct scpi_ops scpi_ops = {
 	.dvfs_get_idx = scpi_dvfs_get_idx,
 	.dvfs_set_idx = scpi_dvfs_set_idx,
 	.dvfs_get_info = scpi_dvfs_get_info,
+	.device_domain_id = scpi_dev_domain_id,
+	.get_transition_latency = scpi_dvfs_get_transition_latency,
+	.add_opps_to_device = scpi_dvfs_add_opps_to_device,
 	.sensor_get_capability = scpi_sensor_get_capability,
 	.sensor_get_info = scpi_sensor_get_info,
 	.sensor_get_value = scpi_sensor_get_value,
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 44c0139..951b6c7 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,		0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,	0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,	0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,		0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_family,	0444, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,		0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,		0444, DMI_BOARD_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(board_version,	0444, DMI_BOARD_VERSION);
@@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void)
 	ADD_DMI_ATTR(product_version,   DMI_PRODUCT_VERSION);
 	ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
 	ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
+	ADD_DMI_ATTR(product_family,    DMI_PRODUCT_FAMILY);
 	ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
 	ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
 	ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 54be60e..7830419 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
 
 	buf = dmi_early_remap(dmi_base, orig_dmi_len);
 	if (buf == NULL)
-		return -1;
+		return -ENOMEM;
 
 	dmi_decode_table(buf, decode, NULL);
 
@@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
 	const char *d = (const char *) dm;
 	const char *p;
 
-	if (dmi_ident[slot])
+	if (dmi_ident[slot] || dm->length <= string)
 		return;
 
 	p = dmi_string(dm, d[string]);
@@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
 static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
 		int index)
 {
-	const u8 *d = (u8 *) dm + index;
+	const u8 *d;
 	char *s;
 	int is_ff = 1, is_00 = 1, i;
 
-	if (dmi_ident[slot])
+	if (dmi_ident[slot] || dm->length <= index + 16)
 		return;
 
+	d = (u8 *) dm + index;
 	for (i = 0; i < 16 && (is_ff || is_00); i++) {
 		if (d[i] != 0x00)
 			is_00 = 0;
@@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
 static void __init dmi_save_type(const struct dmi_header *dm, int slot,
 		int index)
 {
-	const u8 *d = (u8 *) dm + index;
+	const u8 *d;
 	char *s;
 
-	if (dmi_ident[slot])
+	if (dmi_ident[slot] || dm->length <= index)
 		return;
 
 	s = dmi_alloc(4);
 	if (!s)
 		return;
 
+	d = (u8 *) dm + index;
 	sprintf(s, "%u", *d & 0x7F);
 	dmi_ident[slot] = s;
 }
@@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
 
 static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
 {
-	int i, count = *(u8 *)(dm + 1);
+	int i, count;
 	struct dmi_device *dev;
 
+	if (dm->length < 0x05)
+		return;
+
+	count = *(u8 *)(dm + 1);
 	for (i = 1; i <= count; i++) {
 		const char *devname = dmi_string(dm, i);
 
@@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
 	const char *name;
 	const u8 *d = (u8 *)dm;
 
+	if (dm->length < 0x0B)
+		return;
+
 	/* Skip disabled device */
 	if ((d[0x5] & 0x80) == 0)
 		return;
@@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
 	const char *d = (const char *)dm;
 	static int nr;
 
-	if (dm->type != DMI_ENTRY_MEM_DEVICE)
+	if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
 		return;
 	if (nr >= dmi_memdev_nr) {
 		pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@@ -430,6 +439,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
 		dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
 		dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
 		dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+		dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
 		break;
 	case 2:		/* Base Board Information */
 		dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
@@ -649,6 +659,21 @@ void __init dmi_scan_machine(void)
 			goto error;
 
 		/*
+		 * Same logic as above, look for a 64-bit entry point
+		 * first, and if not found, fall back to 32-bit entry point.
+		 */
+		memcpy_fromio(buf, p, 16);
+		for (q = p + 16; q < p + 0x10000; q += 16) {
+			memcpy_fromio(buf + 16, q, 16);
+			if (!dmi_smbios3_present(buf)) {
+				dmi_available = 1;
+				dmi_early_unmap(p, 0x10000);
+				goto out;
+			}
+			memcpy(buf, buf + 16, 16);
+		}
+
+		/*
 		 * Iterate over all possible DMI header addresses q.
 		 * Maintain the 32 bytes around q in buf.  On the
 		 * first iteration, substitute zero for the
@@ -658,7 +683,7 @@ void __init dmi_scan_machine(void)
 		memset(buf, 0, 16);
 		for (q = p; q < p + 0x10000; q += 16) {
 			memcpy_fromio(buf + 16, q, 16);
-			if (!dmi_smbios3_present(buf) || !dmi_present(buf)) {
+			if (!dmi_present(buf)) {
 				dmi_available = 1;
 				dmi_early_unmap(p, 0x10000);
 				goto out;
@@ -992,7 +1017,8 @@ EXPORT_SYMBOL(dmi_get_date);
  *	@decode: Callback function
  *	@private_data: Private data to be passed to the callback function
  *
- *	Returns -1 when the DMI table can't be reached, 0 on success.
+ *	Returns 0 on success, -ENXIO if DMI is not selected or not present,
+ *	or a different negative error code if DMI walking fails.
  */
 int dmi_walk(void (*decode)(const struct dmi_header *, void *),
 	     void *private_data)
@@ -1000,11 +1026,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
 	u8 *buf;
 
 	if (!dmi_available)
-		return -1;
+		return -ENXIO;
 
 	buf = dmi_remap(dmi_base, dmi_len);
 	if (buf == NULL)
-		return -1;
+		return -ENOMEM;
 
 	dmi_decode_table(buf, decode, private_data);
 
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 2e78b0b..394db40 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -112,6 +112,15 @@
 
 	  Most users should say N.
 
+config EFI_CAPSULE_QUIRK_QUARK_CSH
+	boolean "Add support for Quark capsules with non-standard headers"
+	depends on X86 && !64BIT
+	select EFI_CAPSULE_LOADER
+	default y
+	help
+	  Add support for processing Quark X1000 EFI capsules, whose header
+	  layout deviates from the layout mandated by the UEFI specification.
+
 config EFI_TEST
 	tristate "EFI Runtime Service Tests Support"
 	depends on EFI
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 974c5a3..1cc41c3 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/dmi.h>
 #include <linux/efi.h>
 #include <linux/io.h>
 #include <linux/memblock.h>
@@ -166,3 +167,18 @@ void efi_virtmap_unload(void)
 	efi_set_pgd(current->active_mm);
 	preempt_enable();
 }
+
+
+static int __init arm_dmi_init(void)
+{
+	/*
+	 * On arm64/ARM, DMI depends on UEFI, and dmi_scan_machine() needs to
+	 * be called early because dmi_id_init(), which is an arch_initcall
+	 * itself, depends on dmi_scan_machine() having been called already.
+	 */
+	dmi_scan_machine();
+	if (dmi_available)
+		dmi_set_dump_stack_arch_desc();
+	return 0;
+}
+core_initcall(arm_dmi_init);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 9ae6c11..ec8ac5c 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -20,15 +20,9 @@
 
 #define NO_FURTHER_WRITE_ACTION -1
 
-struct capsule_info {
-	bool		header_obtained;
-	int		reset_type;
-	long		index;
-	size_t		count;
-	size_t		total_size;
-	struct page	**pages;
-	size_t		page_bytes_remain;
-};
+#ifndef phys_to_page
+#define phys_to_page(x)		pfn_to_page((x) >> PAGE_SHIFT)
+#endif
 
 /**
  * efi_free_all_buff_pages - free all previous allocated buffer pages
@@ -41,62 +35,67 @@ struct capsule_info {
 static void efi_free_all_buff_pages(struct capsule_info *cap_info)
 {
 	while (cap_info->index > 0)
-		__free_page(cap_info->pages[--cap_info->index]);
+		__free_page(phys_to_page(cap_info->pages[--cap_info->index]));
 
 	cap_info->index = NO_FURTHER_WRITE_ACTION;
 }
 
+int __efi_capsule_setup_info(struct capsule_info *cap_info)
+{
+	size_t pages_needed;
+	int ret;
+	void *temp_page;
+
+	pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
+
+	if (pages_needed == 0) {
+		pr_err("invalid capsule size");
+		return -EINVAL;
+	}
+
+	/* Check if the capsule binary supported */
+	ret = efi_capsule_supported(cap_info->header.guid,
+				    cap_info->header.flags,
+				    cap_info->header.imagesize,
+				    &cap_info->reset_type);
+	if (ret) {
+		pr_err("capsule not supported\n");
+		return ret;
+	}
+
+	temp_page = krealloc(cap_info->pages,
+			     pages_needed * sizeof(void *),
+			     GFP_KERNEL | __GFP_ZERO);
+	if (!temp_page)
+		return -ENOMEM;
+
+	cap_info->pages = temp_page;
+
+	return 0;
+}
+
 /**
  * efi_capsule_setup_info - obtain the efi capsule header in the binary and
  *			    setup capsule_info structure
  * @cap_info: pointer to current instance of capsule_info structure
  * @kbuff: a mapped first page buffer pointer
  * @hdr_bytes: the total received number of bytes for efi header
+ *
+ * Platforms with non-standard capsule update mechanisms can override
+ * this __weak function so they can perform any required capsule
+ * image munging. See quark_quirk_function() for an example.
  **/
-static ssize_t efi_capsule_setup_info(struct capsule_info *cap_info,
-				      void *kbuff, size_t hdr_bytes)
+int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+				  size_t hdr_bytes)
 {
-	efi_capsule_header_t *cap_hdr;
-	size_t pages_needed;
-	int ret;
-	void *temp_page;
-
 	/* Only process data block that is larger than efi header size */
 	if (hdr_bytes < sizeof(efi_capsule_header_t))
 		return 0;
 
-	/* Reset back to the correct offset of header */
-	cap_hdr = kbuff - cap_info->count;
-	pages_needed = ALIGN(cap_hdr->imagesize, PAGE_SIZE) >> PAGE_SHIFT;
+	memcpy(&cap_info->header, kbuff, sizeof(cap_info->header));
+	cap_info->total_size = cap_info->header.imagesize;
 
-	if (pages_needed == 0) {
-		pr_err("%s: pages count invalid\n", __func__);
-		return -EINVAL;
-	}
-
-	/* Check if the capsule binary supported */
-	ret = efi_capsule_supported(cap_hdr->guid, cap_hdr->flags,
-				    cap_hdr->imagesize,
-				    &cap_info->reset_type);
-	if (ret) {
-		pr_err("%s: efi_capsule_supported() failed\n",
-		       __func__);
-		return ret;
-	}
-
-	cap_info->total_size = cap_hdr->imagesize;
-	temp_page = krealloc(cap_info->pages,
-			     pages_needed * sizeof(void *),
-			     GFP_KERNEL | __GFP_ZERO);
-	if (!temp_page) {
-		pr_debug("%s: krealloc() failed\n", __func__);
-		return -ENOMEM;
-	}
-
-	cap_info->pages = temp_page;
-	cap_info->header_obtained = true;
-
-	return 0;
+	return __efi_capsule_setup_info(cap_info);
 }
 
 /**
@@ -107,26 +106,17 @@ static ssize_t efi_capsule_setup_info(struct capsule_info *cap_info,
 static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
 {
 	int ret;
-	void *cap_hdr_temp;
 
-	cap_hdr_temp = vmap(cap_info->pages, cap_info->index,
-			VM_MAP, PAGE_KERNEL);
-	if (!cap_hdr_temp) {
-		pr_debug("%s: vmap() failed\n", __func__);
-		return -EFAULT;
-	}
-
-	ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
-	vunmap(cap_hdr_temp);
+	ret = efi_capsule_update(&cap_info->header, cap_info->pages);
 	if (ret) {
-		pr_err("%s: efi_capsule_update() failed\n", __func__);
+		pr_err("capsule update failed\n");
 		return ret;
 	}
 
 	/* Indicate capsule binary uploading is done */
 	cap_info->index = NO_FURTHER_WRITE_ACTION;
-	pr_info("%s: Successfully upload capsule file with reboot type '%s'\n",
-		__func__, !cap_info->reset_type ? "RESET_COLD" :
+	pr_info("Successfully upload capsule file with reboot type '%s'\n",
+		!cap_info->reset_type ? "RESET_COLD" :
 		cap_info->reset_type == 1 ? "RESET_WARM" :
 		"RESET_SHUTDOWN");
 	return 0;
@@ -171,37 +161,30 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
 	if (!cap_info->page_bytes_remain) {
 		page = alloc_page(GFP_KERNEL);
 		if (!page) {
-			pr_debug("%s: alloc_page() failed\n", __func__);
 			ret = -ENOMEM;
 			goto failed;
 		}
 
-		cap_info->pages[cap_info->index++] = page;
+		cap_info->pages[cap_info->index++] = page_to_phys(page);
 		cap_info->page_bytes_remain = PAGE_SIZE;
+	} else {
+		page = phys_to_page(cap_info->pages[cap_info->index - 1]);
 	}
 
-	page = cap_info->pages[cap_info->index - 1];
-
 	kbuff = kmap(page);
-	if (!kbuff) {
-		pr_debug("%s: kmap() failed\n", __func__);
-		ret = -EFAULT;
-		goto failed;
-	}
 	kbuff += PAGE_SIZE - cap_info->page_bytes_remain;
 
 	/* Copy capsule binary data from user space to kernel space buffer */
 	write_byte = min_t(size_t, count, cap_info->page_bytes_remain);
 	if (copy_from_user(kbuff, buff, write_byte)) {
-		pr_debug("%s: copy_from_user() failed\n", __func__);
 		ret = -EFAULT;
 		goto fail_unmap;
 	}
 	cap_info->page_bytes_remain -= write_byte;
 
 	/* Setup capsule binary info structure */
-	if (!cap_info->header_obtained) {
-		ret = efi_capsule_setup_info(cap_info, kbuff,
+	if (cap_info->header.headersize == 0) {
+		ret = efi_capsule_setup_info(cap_info, kbuff - cap_info->count,
 					     cap_info->count + write_byte);
 		if (ret)
 			goto fail_unmap;
@@ -211,11 +194,10 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
 	kunmap(page);
 
 	/* Submit the full binary to efi_capsule_update() API */
-	if (cap_info->header_obtained &&
+	if (cap_info->header.headersize > 0 &&
 	    cap_info->count >= cap_info->total_size) {
 		if (cap_info->count > cap_info->total_size) {
-			pr_err("%s: upload size exceeded header defined size\n",
-			       __func__);
+			pr_err("capsule upload size exceeded header defined size\n");
 			ret = -EINVAL;
 			goto failed;
 		}
@@ -249,7 +231,7 @@ static int efi_capsule_flush(struct file *file, fl_owner_t id)
 	struct capsule_info *cap_info = file->private_data;
 
 	if (cap_info->index > 0) {
-		pr_err("%s: capsule upload not complete\n", __func__);
+		pr_err("capsule upload not complete\n");
 		efi_free_all_buff_pages(cap_info);
 		ret = -ECANCELED;
 	}
@@ -328,8 +310,7 @@ static int __init efi_capsule_loader_init(void)
 
 	ret = misc_register(&efi_capsule_misc);
 	if (ret)
-		pr_err("%s: Failed to register misc char file note\n",
-		       __func__);
+		pr_err("Unable to register capsule loader device\n");
 
 	return ret;
 }
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
index 6eedff4..901b930 100644
--- a/drivers/firmware/efi/capsule.c
+++ b/drivers/firmware/efi/capsule.c
@@ -214,7 +214,7 @@ efi_capsule_update_locked(efi_capsule_header_t *capsule,
  *
  * Return 0 on success, a converted EFI status code on failure.
  */
-int efi_capsule_update(efi_capsule_header_t *capsule, struct page **pages)
+int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
 {
 	u32 imagesize = capsule->imagesize;
 	efi_guid_t guid = capsule->guid;
@@ -247,16 +247,13 @@ int efi_capsule_update(efi_capsule_header_t *capsule, struct page **pages)
 		efi_capsule_block_desc_t *sglist;
 
 		sglist = kmap(sg_pages[i]);
-		if (!sglist) {
-			rv = -ENOMEM;
-			goto out;
-		}
 
 		for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
-			u64 sz = min_t(u64, imagesize, PAGE_SIZE);
+			u64 sz = min_t(u64, imagesize,
+				       PAGE_SIZE - (u64)*pages % PAGE_SIZE);
 
 			sglist[j].length = sz;
-			sglist[j].data = page_to_phys(*pages++);
+			sglist[j].data = *pages++;
 
 			imagesize -= sz;
 			count--;
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index 04ca876..b58233e 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -27,6 +27,26 @@ struct bmp_header {
 	u32 size;
 } __packed;
 
+static bool efi_bgrt_addr_valid(u64 addr)
+{
+	efi_memory_desc_t *md;
+
+	for_each_efi_memory_desc(md) {
+		u64 size;
+		u64 end;
+
+		if (md->type != EFI_BOOT_SERVICES_DATA)
+			continue;
+
+		size = md->num_pages << EFI_PAGE_SHIFT;
+		end = md->phys_addr + size;
+		if (addr >= md->phys_addr && addr < end)
+			return true;
+	}
+
+	return false;
+}
+
 void __init efi_bgrt_init(struct acpi_table_header *table)
 {
 	void *image;
@@ -36,6 +56,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
 	if (acpi_disabled)
 		return;
 
+	if (!efi_enabled(EFI_MEMMAP))
+		return;
+
 	if (table->length < sizeof(bgrt_tab)) {
 		pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
 		       table->length, sizeof(bgrt_tab));
@@ -62,6 +85,10 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
 		goto out;
 	}
 
+	if (!efi_bgrt_addr_valid(bgrt->image_address)) {
+		pr_notice("Ignoring BGRT: invalid image address\n");
+		goto out;
+	}
 	image = early_memremap(bgrt->image_address, sizeof(bmp_header));
 	if (!image) {
 		pr_notice("Ignoring BGRT: failed to map image header memory\n");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index b372aad..045d6d3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -528,7 +528,8 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
 		}
 	}
 
-	efi_memattr_init();
+	if (efi_enabled(EFI_MEMMAP))
+		efi_memattr_init();
 
 	/* Parse the EFI Properties table if it exists */
 	if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 8c34d50..959777e 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -16,10 +16,10 @@
 
 /* BIOS variables */
 static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
-static const efi_char16_t const efi_SecureBoot_name[] = {
+static const efi_char16_t efi_SecureBoot_name[] = {
 	'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0
 };
-static const efi_char16_t const efi_SetupMode_name[] = {
+static const efi_char16_t efi_SetupMode_name[] = {
 	'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0
 };
 
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 8cd578f..08129b7 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -71,18 +71,13 @@ copy_ucs2_from_user_len(efi_char16_t **dst, efi_char16_t __user *src,
 	if (!access_ok(VERIFY_READ, src, 1))
 		return -EFAULT;
 
-	buf = kmalloc(len, GFP_KERNEL);
-	if (!buf) {
+	buf = memdup_user(src, len);
+	if (IS_ERR(buf)) {
 		*dst = NULL;
-		return -ENOMEM;
+		return PTR_ERR(buf);
 	}
 	*dst = buf;
 
-	if (copy_from_user(*dst, src, len)) {
-		kfree(buf);
-		return -EFAULT;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
index 0271111..5273888 100644
--- a/drivers/firmware/google/memconsole-coreboot.c
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -26,12 +26,52 @@
 
 /* CBMEM firmware console log descriptor. */
 struct cbmem_cons {
-	u32 buffer_size;
-	u32 buffer_cursor;
-	u8  buffer_body[0];
+	u32 size_dont_access_after_boot;
+	u32 cursor;
+	u8  body[0];
 } __packed;
 
+#define CURSOR_MASK ((1 << 28) - 1)
+#define OVERFLOW (1 << 31)
+
 static struct cbmem_cons __iomem *cbmem_console;
+static u32 cbmem_console_size;
+
+/*
+ * The cbmem_console structure is read again on every access because it may
+ * change at any time if runtime firmware logs new messages. This may rarely
+ * lead to race conditions where the firmware overwrites the beginning of the
+ * ring buffer with more lines after we have already read |cursor|. It should be
+ * rare and harmless enough that we don't spend extra effort working around it.
+ */
+static ssize_t memconsole_coreboot_read(char *buf, loff_t pos, size_t count)
+{
+	u32 cursor = cbmem_console->cursor & CURSOR_MASK;
+	u32 flags = cbmem_console->cursor & ~CURSOR_MASK;
+	u32 size = cbmem_console_size;
+	struct seg {	/* describes ring buffer segments in logical order */
+		u32 phys;	/* physical offset from start of mem buffer */
+		u32 len;	/* length of segment */
+	} seg[2] = { {0}, {0} };
+	size_t done = 0;
+	int i;
+
+	if (flags & OVERFLOW) {
+		if (cursor > size)	/* Shouldn't really happen, but... */
+			cursor = 0;
+		seg[0] = (struct seg){.phys = cursor, .len = size - cursor};
+		seg[1] = (struct seg){.phys = 0, .len = cursor};
+	} else {
+		seg[0] = (struct seg){.phys = 0, .len = min(cursor, size)};
+	}
+
+	for (i = 0; i < ARRAY_SIZE(seg) && count > done; i++) {
+		done += memory_read_from_buffer(buf + done, count - done, &pos,
+			cbmem_console->body + seg[i].phys, seg[i].len);
+		pos -= seg[i].len;
+	}
+	return done;
+}
 
 static int memconsole_coreboot_init(phys_addr_t physaddr)
 {
@@ -42,17 +82,17 @@ static int memconsole_coreboot_init(phys_addr_t physaddr)
 	if (!tmp_cbmc)
 		return -ENOMEM;
 
+	/* Read size only once to prevent overrun attack through /dev/mem. */
+	cbmem_console_size = tmp_cbmc->size_dont_access_after_boot;
 	cbmem_console = memremap(physaddr,
-				 tmp_cbmc->buffer_size + sizeof(*cbmem_console),
+				 cbmem_console_size + sizeof(*cbmem_console),
 				 MEMREMAP_WB);
 	memunmap(tmp_cbmc);
 
 	if (!cbmem_console)
 		return -ENOMEM;
 
-	memconsole_setup(cbmem_console->buffer_body,
-		min(cbmem_console->buffer_cursor, cbmem_console->buffer_size));
-
+	memconsole_setup(memconsole_coreboot_read);
 	return 0;
 }
 
diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c
index 1f279ee..8c1bf6d 100644
--- a/drivers/firmware/google/memconsole-x86-legacy.c
+++ b/drivers/firmware/google/memconsole-x86-legacy.c
@@ -48,6 +48,15 @@ struct biosmemcon_ebda {
 	};
 } __packed;
 
+static char *memconsole_baseaddr;
+static size_t memconsole_length;
+
+static ssize_t memconsole_read(char *buf, loff_t pos, size_t count)
+{
+	return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
+				       memconsole_length);
+}
+
 static void found_v1_header(struct biosmemcon_ebda *hdr)
 {
 	pr_info("memconsole: BIOS console v1 EBDA structure found at %p\n",
@@ -56,7 +65,9 @@ static void found_v1_header(struct biosmemcon_ebda *hdr)
 		hdr->v1.buffer_addr, hdr->v1.start,
 		hdr->v1.end, hdr->v1.num_chars);
 
-	memconsole_setup(phys_to_virt(hdr->v1.buffer_addr), hdr->v1.num_chars);
+	memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
+	memconsole_length = hdr->v1.num_chars;
+	memconsole_setup(memconsole_read);
 }
 
 static void found_v2_header(struct biosmemcon_ebda *hdr)
@@ -67,8 +78,9 @@ static void found_v2_header(struct biosmemcon_ebda *hdr)
 		hdr->v2.buffer_addr, hdr->v2.start,
 		hdr->v2.end, hdr->v2.num_bytes);
 
-	memconsole_setup(phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start),
-			 hdr->v2.end - hdr->v2.start);
+	memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start);
+	memconsole_length = hdr->v2.end - hdr->v2.start;
+	memconsole_setup(memconsole_read);
 }
 
 /*
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index 94e200d..166f07c 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -22,15 +22,15 @@
 
 #include "memconsole.h"
 
-static char *memconsole_baseaddr;
-static size_t memconsole_length;
+static ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
 
 static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
 			       struct bin_attribute *bin_attr, char *buf,
 			       loff_t pos, size_t count)
 {
-	return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
-				       memconsole_length);
+	if (WARN_ON_ONCE(!memconsole_read_func))
+		return -EIO;
+	return memconsole_read_func(buf, pos, count);
 }
 
 static struct bin_attribute memconsole_bin_attr = {
@@ -38,16 +38,14 @@ static struct bin_attribute memconsole_bin_attr = {
 	.read = memconsole_read,
 };
 
-void memconsole_setup(void *baseaddr, size_t length)
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
 {
-	memconsole_baseaddr = baseaddr;
-	memconsole_length = length;
+	memconsole_read_func = read_func;
 }
 EXPORT_SYMBOL(memconsole_setup);
 
 int memconsole_sysfs_init(void)
 {
-	memconsole_bin_attr.size = memconsole_length;
 	return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
 }
 EXPORT_SYMBOL(memconsole_sysfs_init);
diff --git a/drivers/firmware/google/memconsole.h b/drivers/firmware/google/memconsole.h
index 190fc03..ff1592d 100644
--- a/drivers/firmware/google/memconsole.h
+++ b/drivers/firmware/google/memconsole.h
@@ -18,13 +18,14 @@
 #ifndef __FIRMWARE_GOOGLE_MEMCONSOLE_H
 #define __FIRMWARE_GOOGLE_MEMCONSOLE_H
 
+#include <linux/types.h>
+
 /*
  * memconsole_setup
  *
- * Initialize the memory console from raw (virtual) base
- * address and length.
+ * Initialize the memory console, passing the function to handle read accesses.
  */
-void memconsole_setup(void *baseaddr, size_t length);
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t));
 
 /*
  * memconsole_sysfs_init
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1e7860f..7894572 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -118,14 +118,13 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
 	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
-	info->key = kzalloc(key_len + 1, GFP_KERNEL);
+
+	info->key = kstrndup(key, key_len, GFP_KERNEL);
 	if (!info->key) {
 		ret = -ENOMEM;
 		goto free_info;
 	}
 
-	memcpy(info->key, key, key_len);
-
 	sysfs_bin_attr_init(&info->bin_attr);
 	info->bin_attr.attr.name = info->key;
 	info->bin_attr.attr.mode = 0444;
@@ -136,12 +135,12 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
 	info->value = value;
 
 	INIT_LIST_HEAD(&info->list);
-	list_add_tail(&info->list, &sec->attribs);
 
 	ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
 	if (ret)
 		goto free_info_key;
 
+	list_add_tail(&info->list, &sec->attribs);
 	return 0;
 
 free_info_key:
@@ -158,8 +157,8 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec)
 	struct vpd_attrib_info *temp;
 
 	list_for_each_entry_safe(info, temp, &sec->attribs, list) {
-		kfree(info->key);
 		sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
+		kfree(info->key);
 		kfree(info);
 	}
 }
@@ -191,8 +190,7 @@ static int vpd_section_create_attribs(struct vpd_section *sec)
 static int vpd_section_init(const char *name, struct vpd_section *sec,
 			    phys_addr_t physaddr, size_t size)
 {
-	int ret;
-	int raw_len;
+	int err;
 
 	sec->baseaddr = memremap(physaddr, size, MEMREMAP_WB);
 	if (!sec->baseaddr)
@@ -201,10 +199,11 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
 	sec->name = name;
 
 	/* We want to export the raw partion with name ${name}_raw */
-	raw_len = strlen(name) + 5;
-	sec->raw_name = kzalloc(raw_len, GFP_KERNEL);
-	strncpy(sec->raw_name, name, raw_len);
-	strncat(sec->raw_name, "_raw", raw_len);
+	sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
+	if (!sec->raw_name) {
+		err = -ENOMEM;
+		goto err_iounmap;
+	}
 
 	sysfs_bin_attr_init(&sec->bin_attr);
 	sec->bin_attr.attr.name = sec->raw_name;
@@ -213,14 +212,14 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
 	sec->bin_attr.read = vpd_section_read;
 	sec->bin_attr.private = sec;
 
-	ret = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
-	if (ret)
-		goto free_sec;
+	err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
+	if (err)
+		goto err_free_raw_name;
 
 	sec->kobj = kobject_create_and_add(name, vpd_kobj);
 	if (!sec->kobj) {
-		ret = -EINVAL;
-		goto sysfs_remove;
+		err = -EINVAL;
+		goto err_sysfs_remove;
 	}
 
 	INIT_LIST_HEAD(&sec->attribs);
@@ -230,21 +229,20 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
 
 	return 0;
 
-sysfs_remove:
+err_sysfs_remove:
 	sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
-
-free_sec:
+err_free_raw_name:
 	kfree(sec->raw_name);
+err_iounmap:
 	iounmap(sec->baseaddr);
-
-	return ret;
+	return err;
 }
 
 static int vpd_section_destroy(struct vpd_section *sec)
 {
 	if (sec->enabled) {
 		vpd_section_attrib_destroy(sec);
-		kobject_del(sec->kobj);
+		kobject_put(sec->kobj);
 		sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
 		kfree(sec->raw_name);
 		iounmap(sec->baseaddr);
@@ -319,9 +317,6 @@ static int __init vpd_platform_init(void)
 	if (!vpd_kobj)
 		return -ENOMEM;
 
-	memset(&ro_vpd, 0, sizeof(ro_vpd));
-	memset(&rw_vpd, 0, sizeof(rw_vpd));
-
 	platform_driver_register(&vpd_driver);
 
 	return 0;
@@ -331,7 +326,7 @@ static void __exit vpd_platform_exit(void)
 {
 	vpd_section_destroy(&ro_vpd);
 	vpd_section_destroy(&rw_vpd);
-	kobject_del(vpd_kobj);
+	kobject_put(vpd_kobj);
 }
 
 module_init(vpd_platform_init);
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 84e4c9a..b251795 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -211,14 +211,17 @@ static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
 	int index;
 
 	index = tegra_bpmp_channel_get_thread_index(channel);
-	if (index < 0)
-		return index;
+	if (index < 0) {
+		err = index;
+		goto unlock;
+	}
 
 	spin_lock_irqsave(&bpmp->lock, flags);
 	err = __tegra_bpmp_channel_read(channel, data, size);
 	clear_bit(index, bpmp->threaded.allocated);
 	spin_unlock_irqrestore(&bpmp->lock, flags);
 
+unlock:
 	up(&bpmp->threaded.lock);
 
 	return err;
@@ -256,18 +259,18 @@ tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
 
 	index = find_first_zero_bit(bpmp->threaded.allocated, count);
 	if (index == count) {
-		channel = ERR_PTR(-EBUSY);
+		err = -EBUSY;
 		goto unlock;
 	}
 
 	channel = tegra_bpmp_channel_get_thread(bpmp, index);
 	if (!channel) {
-		channel = ERR_PTR(-EINVAL);
+		err = -EINVAL;
 		goto unlock;
 	}
 
 	if (!tegra_bpmp_master_free(channel)) {
-		channel = ERR_PTR(-EBUSY);
+		err = -EBUSY;
 		goto unlock;
 	}
 
@@ -275,16 +278,21 @@ tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
 
 	err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
 					 data, size);
-	if (err < 0) {
-		clear_bit(index, bpmp->threaded.allocated);
-		goto unlock;
-	}
+	if (err < 0)
+		goto clear_allocated;
 
 	set_bit(index, bpmp->threaded.busy);
 
-unlock:
 	spin_unlock_irqrestore(&bpmp->lock, flags);
 	return channel;
+
+clear_allocated:
+	clear_bit(index, bpmp->threaded.allocated);
+unlock:
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+	up(&bpmp->threaded.lock);
+
+	return ERR_PTR(err);
 }
 
 static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
@@ -810,6 +818,10 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
 	if (err < 0)
 		goto free_mrq;
 
+	err = tegra_bpmp_init_powergates(bpmp);
+	if (err < 0)
+		goto free_mrq;
+
 	platform_set_drvdata(pdev, bpmp);
 
 	return 0;
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index 04c1a0e..6821ed0 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -6,7 +6,33 @@
 
 config FSI
 	tristate "FSI support"
+	select CRC4
 	---help---
 	  FSI - the FRU Support Interface - is a simple bus for low-level
 	  access to POWER-based hardware.
+
+if FSI
+
+config FSI_MASTER_GPIO
+	tristate "GPIO-based FSI master"
+	depends on GPIOLIB
+	select CRC4
+	---help---
+	This option enables a FSI master driver using GPIO lines.
+
+config FSI_MASTER_HUB
+	tristate "FSI hub master"
+	---help---
+	This option enables a FSI hub master driver.  Hub is a type of FSI
+	master that is connected to the upstream master via a slave.  Hubs
+	allow chaining of FSI links to an arbitrary depth.  This allows for
+	a high target device fanout.
+
+config FSI_SCOM
+	tristate "SCOM FSI client device driver"
+	---help---
+	This option enables an FSI based SCOM device driver.
+
+endif
+
 endmenu
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index db0e5e7..65eb99d 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -1,2 +1,5 @@
 
 obj-$(CONFIG_FSI) += fsi-core.o
+obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
+obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
+obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 3d55bd5..a485864 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -13,9 +13,830 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/crc4.h>
 #include <linux/device.h>
 #include <linux/fsi.h>
+#include <linux/idr.h>
 #include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#include "fsi-master.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi.h>
+
+#define FSI_SLAVE_CONF_NEXT_MASK	GENMASK(31, 31)
+#define FSI_SLAVE_CONF_SLOTS_MASK	GENMASK(23, 16)
+#define FSI_SLAVE_CONF_SLOTS_SHIFT	16
+#define FSI_SLAVE_CONF_VERSION_MASK	GENMASK(15, 12)
+#define FSI_SLAVE_CONF_VERSION_SHIFT	12
+#define FSI_SLAVE_CONF_TYPE_MASK	GENMASK(11, 4)
+#define FSI_SLAVE_CONF_TYPE_SHIFT	4
+#define FSI_SLAVE_CONF_CRC_SHIFT	4
+#define FSI_SLAVE_CONF_CRC_MASK		GENMASK(3, 0)
+#define FSI_SLAVE_CONF_DATA_BITS	28
+
+#define FSI_PEEK_BASE			0x410
+
+static const int engine_page_size = 0x400;
+
+#define FSI_SLAVE_BASE			0x800
+
+/*
+ * FSI slave engine control register offsets
+ */
+#define FSI_SMODE		0x0	/* R/W: Mode register */
+#define FSI_SISC		0x8	/* R/W: Interrupt condition */
+#define FSI_SSTAT		0x14	/* R  : Slave status */
+#define FSI_LLMODE		0x100	/* R/W: Link layer mode register */
+
+/*
+ * SMODE fields
+ */
+#define FSI_SMODE_WSC		0x80000000	/* Warm start done */
+#define FSI_SMODE_ECRC		0x20000000	/* Hw CRC check */
+#define FSI_SMODE_SID_SHIFT	24		/* ID shift */
+#define FSI_SMODE_SID_MASK	3		/* ID Mask */
+#define FSI_SMODE_ED_SHIFT	20		/* Echo delay shift */
+#define FSI_SMODE_ED_MASK	0xf		/* Echo delay mask */
+#define FSI_SMODE_SD_SHIFT	16		/* Send delay shift */
+#define FSI_SMODE_SD_MASK	0xf		/* Send delay mask */
+#define FSI_SMODE_LBCRR_SHIFT	8		/* Clk ratio shift */
+#define FSI_SMODE_LBCRR_MASK	0xf		/* Clk ratio mask */
+
+/*
+ * LLMODE fields
+ */
+#define FSI_LLMODE_ASYNC	0x1
+
+#define FSI_SLAVE_SIZE_23b		0x800000
+
+static DEFINE_IDA(master_ida);
+
+struct fsi_slave {
+	struct device		dev;
+	struct fsi_master	*master;
+	int			id;
+	int			link;
+	uint32_t		size;	/* size of slave address space */
+};
+
+#define to_fsi_master(d) container_of(d, struct fsi_master, dev)
+#define to_fsi_slave(d) container_of(d, struct fsi_slave, dev)
+
+static const int slave_retries = 2;
+static int discard_errors;
+
+static int fsi_master_read(struct fsi_master *master, int link,
+		uint8_t slave_id, uint32_t addr, void *val, size_t size);
+static int fsi_master_write(struct fsi_master *master, int link,
+		uint8_t slave_id, uint32_t addr, const void *val, size_t size);
+static int fsi_master_break(struct fsi_master *master, int link);
+
+/*
+ * fsi_device_read() / fsi_device_write() / fsi_device_peek()
+ *
+ * FSI endpoint-device support
+ *
+ * Read / write / peek accessors for a client
+ *
+ * Parameters:
+ * dev:  Structure passed to FSI client device drivers on probe().
+ * addr: FSI address of given device.  Client should pass in its base address
+ *       plus desired offset to access its register space.
+ * val:  For read/peek this is the value read at the specified address. For
+ *       write this is value to write to the specified address.
+ *       The data in val must be FSI bus endian (big endian).
+ * size: Size in bytes of the operation.  Sizes supported are 1, 2 and 4 bytes.
+ *       Addresses must be aligned on size boundaries or an error will result.
+ */
+int fsi_device_read(struct fsi_device *dev, uint32_t addr, void *val,
+		size_t size)
+{
+	if (addr > dev->size || size > dev->size || addr > dev->size - size)
+		return -EINVAL;
+
+	return fsi_slave_read(dev->slave, dev->addr + addr, val, size);
+}
+EXPORT_SYMBOL_GPL(fsi_device_read);
+
+int fsi_device_write(struct fsi_device *dev, uint32_t addr, const void *val,
+		size_t size)
+{
+	if (addr > dev->size || size > dev->size || addr > dev->size - size)
+		return -EINVAL;
+
+	return fsi_slave_write(dev->slave, dev->addr + addr, val, size);
+}
+EXPORT_SYMBOL_GPL(fsi_device_write);
+
+int fsi_device_peek(struct fsi_device *dev, void *val)
+{
+	uint32_t addr = FSI_PEEK_BASE + ((dev->unit - 2) * sizeof(uint32_t));
+
+	return fsi_slave_read(dev->slave, addr, val, sizeof(uint32_t));
+}
+
+static void fsi_device_release(struct device *_device)
+{
+	struct fsi_device *device = to_fsi_dev(_device);
+
+	kfree(device);
+}
+
+static struct fsi_device *fsi_create_device(struct fsi_slave *slave)
+{
+	struct fsi_device *dev;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	dev->dev.parent = &slave->dev;
+	dev->dev.bus = &fsi_bus_type;
+	dev->dev.release = fsi_device_release;
+
+	return dev;
+}
+
+/* FSI slave support */
+static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
+		uint8_t *idp)
+{
+	uint32_t addr = *addrp;
+	uint8_t id = *idp;
+
+	if (addr > slave->size)
+		return -EINVAL;
+
+	/* For 23 bit addressing, we encode the extra two bits in the slave
+	 * id (and the slave's actual ID needs to be 0).
+	 */
+	if (addr > 0x1fffff) {
+		if (slave->id != 0)
+			return -EINVAL;
+		id = (addr >> 21) & 0x3;
+		addr &= 0x1fffff;
+	}
+
+	*addrp = addr;
+	*idp = id;
+	return 0;
+}
+
+int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
+{
+	struct fsi_master *master = slave->master;
+	uint32_t irq, stat;
+	int rc, link;
+	uint8_t id;
+
+	link = slave->link;
+	id = slave->id;
+
+	rc = fsi_master_read(master, link, id, FSI_SLAVE_BASE + FSI_SISC,
+			&irq, sizeof(irq));
+	if (rc)
+		return rc;
+
+	rc =  fsi_master_read(master, link, id, FSI_SLAVE_BASE + FSI_SSTAT,
+			&stat, sizeof(stat));
+	if (rc)
+		return rc;
+
+	dev_info(&slave->dev, "status: 0x%08x, sisc: 0x%08x\n",
+			be32_to_cpu(stat), be32_to_cpu(irq));
+
+	/* clear interrupts */
+	return fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SISC,
+			&irq, sizeof(irq));
+}
+
+static int fsi_slave_set_smode(struct fsi_master *master, int link, int id);
+
+int fsi_slave_handle_error(struct fsi_slave *slave, bool write, uint32_t addr,
+		size_t size)
+{
+	struct fsi_master *master = slave->master;
+	int rc, link;
+	uint32_t reg;
+	uint8_t id;
+
+	if (discard_errors)
+		return -1;
+
+	link = slave->link;
+	id = slave->id;
+
+	dev_dbg(&slave->dev, "handling error on %s to 0x%08x[%zd]",
+			write ? "write" : "read", addr, size);
+
+	/* try a simple clear of error conditions, which may fail if we've lost
+	 * communication with the slave
+	 */
+	rc = fsi_slave_report_and_clear_errors(slave);
+	if (!rc)
+		return 0;
+
+	/* send a TERM and retry */
+	if (master->term) {
+		rc = master->term(master, link, id);
+		if (!rc) {
+			rc = fsi_master_read(master, link, id, 0,
+					&reg, sizeof(reg));
+			if (!rc)
+				rc = fsi_slave_report_and_clear_errors(slave);
+			if (!rc)
+				return 0;
+		}
+	}
+
+	/* getting serious, reset the slave via BREAK */
+	rc = fsi_master_break(master, link);
+	if (rc)
+		return rc;
+
+	rc = fsi_slave_set_smode(master, link, id);
+	if (rc)
+		return rc;
+
+	return fsi_slave_report_and_clear_errors(slave);
+}
+
+int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
+			void *val, size_t size)
+{
+	uint8_t id = slave->id;
+	int rc, err_rc, i;
+
+	rc = fsi_slave_calc_addr(slave, &addr, &id);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < slave_retries; i++) {
+		rc = fsi_master_read(slave->master, slave->link,
+				id, addr, val, size);
+		if (!rc)
+			break;
+
+		err_rc = fsi_slave_handle_error(slave, false, addr, size);
+		if (err_rc)
+			break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(fsi_slave_read);
+
+int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
+			const void *val, size_t size)
+{
+	uint8_t id = slave->id;
+	int rc, err_rc, i;
+
+	rc = fsi_slave_calc_addr(slave, &addr, &id);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < slave_retries; i++) {
+		rc = fsi_master_write(slave->master, slave->link,
+				id, addr, val, size);
+		if (!rc)
+			break;
+
+		err_rc = fsi_slave_handle_error(slave, true, addr, size);
+		if (err_rc)
+			break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(fsi_slave_write);
+
+extern int fsi_slave_claim_range(struct fsi_slave *slave,
+		uint32_t addr, uint32_t size)
+{
+	if (addr + size < addr)
+		return -EINVAL;
+
+	if (addr + size > slave->size)
+		return -EINVAL;
+
+	/* todo: check for overlapping claims */
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fsi_slave_claim_range);
+
+extern void fsi_slave_release_range(struct fsi_slave *slave,
+		uint32_t addr, uint32_t size)
+{
+}
+EXPORT_SYMBOL_GPL(fsi_slave_release_range);
+
+static int fsi_slave_scan(struct fsi_slave *slave)
+{
+	uint32_t engine_addr;
+	uint32_t conf;
+	int rc, i;
+
+	/*
+	 * scan engines
+	 *
+	 * We keep the peek mode and slave engines for the core; so start
+	 * at the third slot in the configuration table. We also need to
+	 * skip the chip ID entry at the start of the address space.
+	 */
+	engine_addr = engine_page_size * 3;
+	for (i = 2; i < engine_page_size / sizeof(uint32_t); i++) {
+		uint8_t slots, version, type, crc;
+		struct fsi_device *dev;
+
+		rc = fsi_slave_read(slave, (i + 1) * sizeof(conf),
+				&conf, sizeof(conf));
+		if (rc) {
+			dev_warn(&slave->dev,
+				"error reading slave registers\n");
+			return -1;
+		}
+		conf = be32_to_cpu(conf);
+
+		crc = crc4(0, conf, 32);
+		if (crc) {
+			dev_warn(&slave->dev,
+				"crc error in slave register at 0x%04x\n",
+				i);
+			return -1;
+		}
+
+		slots = (conf & FSI_SLAVE_CONF_SLOTS_MASK)
+			>> FSI_SLAVE_CONF_SLOTS_SHIFT;
+		version = (conf & FSI_SLAVE_CONF_VERSION_MASK)
+			>> FSI_SLAVE_CONF_VERSION_SHIFT;
+		type = (conf & FSI_SLAVE_CONF_TYPE_MASK)
+			>> FSI_SLAVE_CONF_TYPE_SHIFT;
+
+		/*
+		 * Unused address areas are marked by a zero type value; this
+		 * skips the defined address areas
+		 */
+		if (type != 0 && slots != 0) {
+
+			/* create device */
+			dev = fsi_create_device(slave);
+			if (!dev)
+				return -ENOMEM;
+
+			dev->slave = slave;
+			dev->engine_type = type;
+			dev->version = version;
+			dev->unit = i;
+			dev->addr = engine_addr;
+			dev->size = slots * engine_page_size;
+
+			dev_dbg(&slave->dev,
+			"engine[%i]: type %x, version %x, addr %x size %x\n",
+					dev->unit, dev->engine_type, version,
+					dev->addr, dev->size);
+
+			dev_set_name(&dev->dev, "%02x:%02x:%02x:%02x",
+					slave->master->idx, slave->link,
+					slave->id, i - 2);
+
+			rc = device_register(&dev->dev);
+			if (rc) {
+				dev_warn(&slave->dev, "add failed: %d\n", rc);
+				put_device(&dev->dev);
+			}
+		}
+
+		engine_addr += slots * engine_page_size;
+
+		if (!(conf & FSI_SLAVE_CONF_NEXT_MASK))
+			break;
+	}
+
+	return 0;
+}
+
+static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
+	size_t total_len, read_len;
+	int rc;
+
+	if (off < 0)
+		return -EINVAL;
+
+	if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+		return -EINVAL;
+
+	for (total_len = 0; total_len < count; total_len += read_len) {
+		read_len = min_t(size_t, count, 4);
+		read_len -= off & 0x3;
+
+		rc = fsi_slave_read(slave, off, buf + total_len, read_len);
+		if (rc)
+			return rc;
+
+		off += read_len;
+	}
+
+	return count;
+}
+
+static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
+		struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t off, size_t count)
+{
+	struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
+	size_t total_len, write_len;
+	int rc;
+
+	if (off < 0)
+		return -EINVAL;
+
+	if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+		return -EINVAL;
+
+	for (total_len = 0; total_len < count; total_len += write_len) {
+		write_len = min_t(size_t, count, 4);
+		write_len -= off & 0x3;
+
+		rc = fsi_slave_write(slave, off, buf + total_len, write_len);
+		if (rc)
+			return rc;
+
+		off += write_len;
+	}
+
+	return count;
+}
+
+static struct bin_attribute fsi_slave_raw_attr = {
+	.attr = {
+		.name = "raw",
+		.mode = 0600,
+	},
+	.size = 0,
+	.read = fsi_slave_sysfs_raw_read,
+	.write = fsi_slave_sysfs_raw_write,
+};
+
+static ssize_t fsi_slave_sysfs_term_write(struct file *file,
+		struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t off, size_t count)
+{
+	struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
+	struct fsi_master *master = slave->master;
+
+	if (!master->term)
+		return -ENODEV;
+
+	master->term(master, slave->link, slave->id);
+	return count;
+}
+
+static struct bin_attribute fsi_slave_term_attr = {
+	.attr = {
+		.name = "term",
+		.mode = 0200,
+	},
+	.size = 0,
+	.write = fsi_slave_sysfs_term_write,
+};
+
+/* Encode slave local bus echo delay */
+static inline uint32_t fsi_smode_echodly(int x)
+{
+	return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
+}
+
+/* Encode slave local bus send delay */
+static inline uint32_t fsi_smode_senddly(int x)
+{
+	return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
+}
+
+/* Encode slave local bus clock rate ratio */
+static inline uint32_t fsi_smode_lbcrr(int x)
+{
+	return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
+}
+
+/* Encode slave ID */
+static inline uint32_t fsi_smode_sid(int x)
+{
+	return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
+}
+
+static const uint32_t fsi_slave_smode(int id)
+{
+	return FSI_SMODE_WSC | FSI_SMODE_ECRC
+		| fsi_smode_sid(id)
+		| fsi_smode_echodly(0xf) | fsi_smode_senddly(0xf)
+		| fsi_smode_lbcrr(0x8);
+}
+
+static int fsi_slave_set_smode(struct fsi_master *master, int link, int id)
+{
+	uint32_t smode;
+
+	/* set our smode register with the slave ID field to 0; this enables
+	 * extended slave addressing
+	 */
+	smode = fsi_slave_smode(id);
+	smode = cpu_to_be32(smode);
+
+	return fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SMODE,
+			&smode, sizeof(smode));
+}
+
+static void fsi_slave_release(struct device *dev)
+{
+	struct fsi_slave *slave = to_fsi_slave(dev);
+
+	kfree(slave);
+}
+
+static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
+{
+	uint32_t chip_id, llmode;
+	struct fsi_slave *slave;
+	uint8_t crc;
+	int rc;
+
+	/* Currently, we only support single slaves on a link, and use the
+	 * full 23-bit address range
+	 */
+	if (id != 0)
+		return -EINVAL;
+
+	rc = fsi_master_read(master, link, id, 0, &chip_id, sizeof(chip_id));
+	if (rc) {
+		dev_dbg(&master->dev, "can't read slave %02x:%02x %d\n",
+				link, id, rc);
+		return -ENODEV;
+	}
+	chip_id = be32_to_cpu(chip_id);
+
+	crc = crc4(0, chip_id, 32);
+	if (crc) {
+		dev_warn(&master->dev, "slave %02x:%02x invalid chip id CRC!\n",
+				link, id);
+		return -EIO;
+	}
+
+	dev_info(&master->dev, "fsi: found chip %08x at %02x:%02x:%02x\n",
+			chip_id, master->idx, link, id);
+
+	rc = fsi_slave_set_smode(master, link, id);
+	if (rc) {
+		dev_warn(&master->dev,
+				"can't set smode on slave:%02x:%02x %d\n",
+				link, id, rc);
+		return -ENODEV;
+	}
+
+	/* If we're behind a master that doesn't provide a self-running bus
+	 * clock, put the slave into async mode
+	 */
+	if (master->flags & FSI_MASTER_FLAG_SWCLOCK) {
+		llmode = cpu_to_be32(FSI_LLMODE_ASYNC);
+		rc = fsi_master_write(master, link, id,
+				FSI_SLAVE_BASE + FSI_LLMODE,
+				&llmode, sizeof(llmode));
+		if (rc)
+			dev_warn(&master->dev,
+				"can't set llmode on slave:%02x:%02x %d\n",
+				link, id, rc);
+	}
+
+	/* We can communicate with a slave; create the slave device and
+	 * register.
+	 */
+	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+	if (!slave)
+		return -ENOMEM;
+
+	slave->master = master;
+	slave->dev.parent = &master->dev;
+	slave->dev.release = fsi_slave_release;
+	slave->link = link;
+	slave->id = id;
+	slave->size = FSI_SLAVE_SIZE_23b;
+
+	dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
+	rc = device_register(&slave->dev);
+	if (rc < 0) {
+		dev_warn(&master->dev, "failed to create slave device: %d\n",
+				rc);
+		put_device(&slave->dev);
+		return rc;
+	}
+
+	rc = device_create_bin_file(&slave->dev, &fsi_slave_raw_attr);
+	if (rc)
+		dev_warn(&slave->dev, "failed to create raw attr: %d\n", rc);
+
+	rc = device_create_bin_file(&slave->dev, &fsi_slave_term_attr);
+	if (rc)
+		dev_warn(&slave->dev, "failed to create term attr: %d\n", rc);
+
+	rc = fsi_slave_scan(slave);
+	if (rc)
+		dev_dbg(&master->dev, "failed during slave scan with: %d\n",
+				rc);
+
+	return rc;
+}
+
+/* FSI master support */
+static int fsi_check_access(uint32_t addr, size_t size)
+{
+	if (size != 1 && size != 2 && size != 4)
+		return -EINVAL;
+
+	if ((addr & 0x3) != (size & 0x3))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int fsi_master_read(struct fsi_master *master, int link,
+		uint8_t slave_id, uint32_t addr, void *val, size_t size)
+{
+	int rc;
+
+	trace_fsi_master_read(master, link, slave_id, addr, size);
+
+	rc = fsi_check_access(addr, size);
+	if (!rc)
+		rc = master->read(master, link, slave_id, addr, val, size);
+
+	trace_fsi_master_rw_result(master, link, slave_id, addr, size,
+			false, val, rc);
+
+	return rc;
+}
+
+static int fsi_master_write(struct fsi_master *master, int link,
+		uint8_t slave_id, uint32_t addr, const void *val, size_t size)
+{
+	int rc;
+
+	trace_fsi_master_write(master, link, slave_id, addr, size, val);
+
+	rc = fsi_check_access(addr, size);
+	if (!rc)
+		rc = master->write(master, link, slave_id, addr, val, size);
+
+	trace_fsi_master_rw_result(master, link, slave_id, addr, size,
+			true, val, rc);
+
+	return rc;
+}
+
+static int fsi_master_link_enable(struct fsi_master *master, int link)
+{
+	if (master->link_enable)
+		return master->link_enable(master, link);
+
+	return 0;
+}
+
+/*
+ * Issue a break command on this link
+ */
+static int fsi_master_break(struct fsi_master *master, int link)
+{
+	trace_fsi_master_break(master, link);
+
+	if (master->send_break)
+		return master->send_break(master, link);
+
+	return 0;
+}
+
+static int fsi_master_scan(struct fsi_master *master)
+{
+	int link, rc;
+
+	for (link = 0; link < master->n_links; link++) {
+		rc = fsi_master_link_enable(master, link);
+		if (rc) {
+			dev_dbg(&master->dev,
+				"enable link %d failed: %d\n", link, rc);
+			continue;
+		}
+		rc = fsi_master_break(master, link);
+		if (rc) {
+			dev_dbg(&master->dev,
+				"break to link %d failed: %d\n", link, rc);
+			continue;
+		}
+
+		fsi_slave_init(master, link, 0);
+	}
+
+	return 0;
+}
+
+static int fsi_slave_remove_device(struct device *dev, void *arg)
+{
+	device_unregister(dev);
+	return 0;
+}
+
+static int fsi_master_remove_slave(struct device *dev, void *arg)
+{
+	device_for_each_child(dev, NULL, fsi_slave_remove_device);
+	device_unregister(dev);
+	return 0;
+}
+
+static void fsi_master_unscan(struct fsi_master *master)
+{
+	device_for_each_child(&master->dev, NULL, fsi_master_remove_slave);
+}
+
+static ssize_t master_rescan_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fsi_master *master = to_fsi_master(dev);
+	int rc;
+
+	fsi_master_unscan(master);
+	rc = fsi_master_scan(master);
+	if (rc < 0)
+		return rc;
+
+	return count;
+}
+
+static DEVICE_ATTR(rescan, 0200, NULL, master_rescan_store);
+
+static ssize_t master_break_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct fsi_master *master = to_fsi_master(dev);
+
+	fsi_master_break(master, 0);
+
+	return count;
+}
+
+static DEVICE_ATTR(break, 0200, NULL, master_break_store);
+
+int fsi_master_register(struct fsi_master *master)
+{
+	int rc;
+
+	if (!master)
+		return -EINVAL;
+
+	master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
+	dev_set_name(&master->dev, "fsi%d", master->idx);
+
+	rc = device_register(&master->dev);
+	if (rc) {
+		ida_simple_remove(&master_ida, master->idx);
+		return rc;
+	}
+
+	rc = device_create_file(&master->dev, &dev_attr_rescan);
+	if (rc) {
+		device_unregister(&master->dev);
+		ida_simple_remove(&master_ida, master->idx);
+		return rc;
+	}
+
+	rc = device_create_file(&master->dev, &dev_attr_break);
+	if (rc) {
+		device_unregister(&master->dev);
+		ida_simple_remove(&master_ida, master->idx);
+		return rc;
+	}
+
+	fsi_master_scan(master);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fsi_master_register);
+
+void fsi_master_unregister(struct fsi_master *master)
+{
+	if (master->idx >= 0) {
+		ida_simple_remove(&master_ida, master->idx);
+		master->idx = -1;
+	}
+
+	fsi_master_unscan(master);
+	device_unregister(&master->dev);
+}
+EXPORT_SYMBOL_GPL(fsi_master_unregister);
 
 /* FSI core & Linux bus type definitions */
 
@@ -39,6 +860,23 @@ static int fsi_bus_match(struct device *dev, struct device_driver *drv)
 	return 0;
 }
 
+int fsi_driver_register(struct fsi_driver *fsi_drv)
+{
+	if (!fsi_drv)
+		return -EINVAL;
+	if (!fsi_drv->id_table)
+		return -EINVAL;
+
+	return driver_register(&fsi_drv->drv);
+}
+EXPORT_SYMBOL_GPL(fsi_driver_register);
+
+void fsi_driver_unregister(struct fsi_driver *fsi_drv)
+{
+	driver_unregister(&fsi_drv->drv);
+}
+EXPORT_SYMBOL_GPL(fsi_driver_unregister);
+
 struct bus_type fsi_bus_type = {
 	.name		= "fsi",
 	.match		= fsi_bus_match,
@@ -57,3 +895,6 @@ static void fsi_exit(void)
 
 module_init(fsi_init);
 module_exit(fsi_exit);
+module_param(discard_errors, int, 0664);
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(discard_errors, "Don't invoke error handling on bus accesses");
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
new file mode 100644
index 0000000..ae26187
--- /dev/null
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -0,0 +1,604 @@
+/*
+ * A FSI master controller, using a simple GPIO bit-banging interface
+ */
+
+#include <linux/crc4.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "fsi-master.h"
+
+#define	FSI_GPIO_STD_DLY	1	/* Standard pin delay in nS */
+#define	FSI_ECHO_DELAY_CLOCKS	16	/* Number clocks for echo delay */
+#define	FSI_PRE_BREAK_CLOCKS	50	/* Number clocks to prep for break */
+#define	FSI_BREAK_CLOCKS	256	/* Number of clocks to issue break */
+#define	FSI_POST_BREAK_CLOCKS	16000	/* Number clocks to set up cfam */
+#define	FSI_INIT_CLOCKS		5000	/* Clock out any old data */
+#define	FSI_GPIO_STD_DELAY	10	/* Standard GPIO delay in nS */
+					/* todo: adjust down as low as */
+					/* possible or eliminate */
+#define	FSI_GPIO_CMD_DPOLL      0x2
+#define	FSI_GPIO_CMD_TERM	0x3f
+#define FSI_GPIO_CMD_ABS_AR	0x4
+
+#define	FSI_GPIO_DPOLL_CLOCKS	100      /* < 21 will cause slave to hang */
+
+/* Bus errors */
+#define	FSI_GPIO_ERR_BUSY	1	/* Slave stuck in busy state */
+#define	FSI_GPIO_RESP_ERRA	2	/* Any (misc) Error */
+#define	FSI_GPIO_RESP_ERRC	3	/* Slave reports master CRC error */
+#define	FSI_GPIO_MTOE		4	/* Master time out error */
+#define	FSI_GPIO_CRC_INVAL	5	/* Master reports slave CRC error */
+
+/* Normal slave responses */
+#define	FSI_GPIO_RESP_BUSY	1
+#define	FSI_GPIO_RESP_ACK	0
+#define	FSI_GPIO_RESP_ACKD	4
+
+#define	FSI_GPIO_MAX_BUSY	100
+#define	FSI_GPIO_MTOE_COUNT	1000
+#define	FSI_GPIO_DRAIN_BITS	20
+#define	FSI_GPIO_CRC_SIZE	4
+#define	FSI_GPIO_MSG_ID_SIZE		2
+#define	FSI_GPIO_MSG_RESPID_SIZE	2
+#define	FSI_GPIO_PRIME_SLAVE_CLOCKS	100
+
+struct fsi_master_gpio {
+	struct fsi_master	master;
+	struct device		*dev;
+	spinlock_t		cmd_lock;	/* Lock for commands */
+	struct gpio_desc	*gpio_clk;
+	struct gpio_desc	*gpio_data;
+	struct gpio_desc	*gpio_trans;	/* Voltage translator */
+	struct gpio_desc	*gpio_enable;	/* FSI enable */
+	struct gpio_desc	*gpio_mux;	/* Mux control */
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_gpio.h>
+
+#define to_fsi_master_gpio(m) container_of(m, struct fsi_master_gpio, master)
+
+struct fsi_gpio_msg {
+	uint64_t	msg;
+	uint8_t		bits;
+};
+
+static void clock_toggle(struct fsi_master_gpio *master, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		ndelay(FSI_GPIO_STD_DLY);
+		gpiod_set_value(master->gpio_clk, 0);
+		ndelay(FSI_GPIO_STD_DLY);
+		gpiod_set_value(master->gpio_clk, 1);
+	}
+}
+
+static int sda_in(struct fsi_master_gpio *master)
+{
+	int in;
+
+	ndelay(FSI_GPIO_STD_DLY);
+	in = gpiod_get_value(master->gpio_data);
+	return in ? 1 : 0;
+}
+
+static void sda_out(struct fsi_master_gpio *master, int value)
+{
+	gpiod_set_value(master->gpio_data, value);
+}
+
+static void set_sda_input(struct fsi_master_gpio *master)
+{
+	gpiod_direction_input(master->gpio_data);
+	gpiod_set_value(master->gpio_trans, 0);
+}
+
+static void set_sda_output(struct fsi_master_gpio *master, int value)
+{
+	gpiod_set_value(master->gpio_trans, 1);
+	gpiod_direction_output(master->gpio_data, value);
+}
+
+static void clock_zeros(struct fsi_master_gpio *master, int count)
+{
+	set_sda_output(master, 1);
+	clock_toggle(master, count);
+}
+
+static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
+			uint8_t num_bits)
+{
+	uint8_t bit, in_bit;
+
+	set_sda_input(master);
+
+	for (bit = 0; bit < num_bits; bit++) {
+		clock_toggle(master, 1);
+		in_bit = sda_in(master);
+		msg->msg <<= 1;
+		msg->msg |= ~in_bit & 0x1;	/* Data is active low */
+	}
+	msg->bits += num_bits;
+
+	trace_fsi_master_gpio_in(master, num_bits, msg->msg);
+}
+
+static void serial_out(struct fsi_master_gpio *master,
+			const struct fsi_gpio_msg *cmd)
+{
+	uint8_t bit;
+	uint64_t msg = ~cmd->msg;	/* Data is active low */
+	uint64_t sda_mask = 0x1ULL << (cmd->bits - 1);
+	uint64_t last_bit = ~0;
+	int next_bit;
+
+	trace_fsi_master_gpio_out(master, cmd->bits, cmd->msg);
+
+	if (!cmd->bits) {
+		dev_warn(master->dev, "trying to output 0 bits\n");
+		return;
+	}
+	set_sda_output(master, 0);
+
+	/* Send the start bit */
+	sda_out(master, 0);
+	clock_toggle(master, 1);
+
+	/* Send the message */
+	for (bit = 0; bit < cmd->bits; bit++) {
+		next_bit = (msg & sda_mask) >> (cmd->bits - 1);
+		if (last_bit ^ next_bit) {
+			sda_out(master, next_bit);
+			last_bit = next_bit;
+		}
+		clock_toggle(master, 1);
+		msg <<= 1;
+	}
+}
+
+static void msg_push_bits(struct fsi_gpio_msg *msg, uint64_t data, int bits)
+{
+	msg->msg <<= bits;
+	msg->msg |= data & ((1ull << bits) - 1);
+	msg->bits += bits;
+}
+
+static void msg_push_crc(struct fsi_gpio_msg *msg)
+{
+	uint8_t crc;
+	int top;
+
+	top = msg->bits & 0x3;
+
+	/* start bit, and any non-aligned top bits */
+	crc = crc4(0, 1 << top | msg->msg >> (msg->bits - top), top + 1);
+
+	/* aligned bits */
+	crc = crc4(crc, msg->msg, msg->bits - top);
+
+	msg_push_bits(msg, crc, 4);
+}
+
+/*
+ * Encode an Absolute Address command
+ */
+static void build_abs_ar_command(struct fsi_gpio_msg *cmd,
+		uint8_t id, uint32_t addr, size_t size, const void *data)
+{
+	bool write = !!data;
+	uint8_t ds;
+	int i;
+
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	msg_push_bits(cmd, id, 2);
+	msg_push_bits(cmd, FSI_GPIO_CMD_ABS_AR, 3);
+	msg_push_bits(cmd, write ? 0 : 1, 1);
+
+	/*
+	 * The read/write size is encoded in the lower bits of the address
+	 * (as it must be naturally-aligned), and the following ds bit.
+	 *
+	 *	size	addr:1	addr:0	ds
+	 *	1	x	x	0
+	 *	2	x	0	1
+	 *	4	0	1	1
+	 *
+	 */
+	ds = size > 1 ? 1 : 0;
+	addr &= ~(size - 1);
+	if (size == 4)
+		addr |= 1;
+
+	msg_push_bits(cmd, addr & ((1 << 21) - 1), 21);
+	msg_push_bits(cmd, ds, 1);
+	for (i = 0; write && i < size; i++)
+		msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
+
+	msg_push_crc(cmd);
+}
+
+static void build_dpoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
+{
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	msg_push_bits(cmd, slave_id, 2);
+	msg_push_bits(cmd, FSI_GPIO_CMD_DPOLL, 3);
+	msg_push_crc(cmd);
+}
+
+static void echo_delay(struct fsi_master_gpio *master)
+{
+	set_sda_output(master, 1);
+	clock_toggle(master, FSI_ECHO_DELAY_CLOCKS);
+}
+
+static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
+{
+	cmd->bits = 0;
+	cmd->msg = 0;
+
+	msg_push_bits(cmd, slave_id, 2);
+	msg_push_bits(cmd, FSI_GPIO_CMD_TERM, 6);
+	msg_push_crc(cmd);
+}
+
+/*
+ * Store information on master errors so handler can detect and clean
+ * up the bus
+ */
+static void fsi_master_gpio_error(struct fsi_master_gpio *master, int error)
+{
+
+}
+
+static int read_one_response(struct fsi_master_gpio *master,
+		uint8_t data_size, struct fsi_gpio_msg *msgp, uint8_t *tagp)
+{
+	struct fsi_gpio_msg msg;
+	uint8_t id, tag;
+	uint32_t crc;
+	int i;
+
+	/* wait for the start bit */
+	for (i = 0; i < FSI_GPIO_MTOE_COUNT; i++) {
+		msg.bits = 0;
+		msg.msg = 0;
+		serial_in(master, &msg, 1);
+		if (msg.msg)
+			break;
+	}
+	if (i == FSI_GPIO_MTOE_COUNT) {
+		dev_dbg(master->dev,
+			"Master time out waiting for response\n");
+		fsi_master_gpio_error(master, FSI_GPIO_MTOE);
+		return -EIO;
+	}
+
+	msg.bits = 0;
+	msg.msg = 0;
+
+	/* Read slave ID & response tag */
+	serial_in(master, &msg, 4);
+
+	id = (msg.msg >> FSI_GPIO_MSG_RESPID_SIZE) & 0x3;
+	tag = msg.msg & 0x3;
+
+	/* If we have an ACK and we're expecting data, clock the data in too */
+	if (tag == FSI_GPIO_RESP_ACK && data_size)
+		serial_in(master, &msg, data_size * 8);
+
+	/* read CRC */
+	serial_in(master, &msg, FSI_GPIO_CRC_SIZE);
+
+	/* we have a whole message now; check CRC */
+	crc = crc4(0, 1, 1);
+	crc = crc4(crc, msg.msg, msg.bits);
+	if (crc) {
+		dev_dbg(master->dev, "ERR response CRC\n");
+		fsi_master_gpio_error(master, FSI_GPIO_CRC_INVAL);
+		return -EIO;
+	}
+
+	if (msgp)
+		*msgp = msg;
+	if (tagp)
+		*tagp = tag;
+
+	return 0;
+}
+
+static int issue_term(struct fsi_master_gpio *master, uint8_t slave)
+{
+	struct fsi_gpio_msg cmd;
+	uint8_t tag;
+	int rc;
+
+	build_term_command(&cmd, slave);
+	serial_out(master, &cmd);
+	echo_delay(master);
+
+	rc = read_one_response(master, 0, NULL, &tag);
+	if (rc < 0) {
+		dev_err(master->dev,
+				"TERM failed; lost communication with slave\n");
+		return -EIO;
+	} else if (tag != FSI_GPIO_RESP_ACK) {
+		dev_err(master->dev, "TERM failed; response %d\n", tag);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int poll_for_response(struct fsi_master_gpio *master,
+		uint8_t slave, uint8_t size, void *data)
+{
+	struct fsi_gpio_msg response, cmd;
+	int busy_count = 0, rc, i;
+	uint8_t tag;
+	uint8_t *data_byte = data;
+
+retry:
+	rc = read_one_response(master, size, &response, &tag);
+	if (rc)
+		return rc;
+
+	switch (tag) {
+	case FSI_GPIO_RESP_ACK:
+		if (size && data) {
+			uint64_t val = response.msg;
+			/* clear crc & mask */
+			val >>= 4;
+			val &= (1ull << (size * 8)) - 1;
+
+			for (i = 0; i < size; i++) {
+				data_byte[size-i-1] = val;
+				val >>= 8;
+			}
+		}
+		break;
+	case FSI_GPIO_RESP_BUSY:
+		/*
+		 * Its necessary to clock slave before issuing
+		 * d-poll, not indicated in the hardware protocol
+		 * spec. < 20 clocks causes slave to hang, 21 ok.
+		 */
+		clock_zeros(master, FSI_GPIO_DPOLL_CLOCKS);
+		if (busy_count++ < FSI_GPIO_MAX_BUSY) {
+			build_dpoll_command(&cmd, slave);
+			serial_out(master, &cmd);
+			echo_delay(master);
+			goto retry;
+		}
+		dev_warn(master->dev,
+			"ERR slave is stuck in busy state, issuing TERM\n");
+		issue_term(master, slave);
+		rc = -EIO;
+		break;
+
+	case FSI_GPIO_RESP_ERRA:
+	case FSI_GPIO_RESP_ERRC:
+		dev_dbg(master->dev, "ERR%c received: 0x%x\n",
+			tag == FSI_GPIO_RESP_ERRA ? 'A' : 'C',
+			(int)response.msg);
+		fsi_master_gpio_error(master, response.msg);
+		rc = -EIO;
+		break;
+	}
+
+	/* Clock the slave enough to be ready for next operation */
+	clock_zeros(master, FSI_GPIO_PRIME_SLAVE_CLOCKS);
+	return rc;
+}
+
+static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
+		struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(&master->cmd_lock, flags);
+	serial_out(master, cmd);
+	echo_delay(master);
+	rc = poll_for_response(master, slave, resp_len, resp);
+	spin_unlock_irqrestore(&master->cmd_lock, flags);
+
+	return rc;
+}
+
+static int fsi_master_gpio_read(struct fsi_master *_master, int link,
+		uint8_t id, uint32_t addr, void *val, size_t size)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+	struct fsi_gpio_msg cmd;
+
+	if (link != 0)
+		return -ENODEV;
+
+	build_abs_ar_command(&cmd, id, addr, size, NULL);
+	return fsi_master_gpio_xfer(master, id, &cmd, size, val);
+}
+
+static int fsi_master_gpio_write(struct fsi_master *_master, int link,
+		uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+	struct fsi_gpio_msg cmd;
+
+	if (link != 0)
+		return -ENODEV;
+
+	build_abs_ar_command(&cmd, id, addr, size, val);
+	return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+}
+
+static int fsi_master_gpio_term(struct fsi_master *_master,
+		int link, uint8_t id)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+	struct fsi_gpio_msg cmd;
+
+	if (link != 0)
+		return -ENODEV;
+
+	build_term_command(&cmd, id);
+	return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+}
+
+static int fsi_master_gpio_break(struct fsi_master *_master, int link)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+
+	if (link != 0)
+		return -ENODEV;
+
+	trace_fsi_master_gpio_break(master);
+
+	set_sda_output(master, 1);
+	sda_out(master, 1);
+	clock_toggle(master, FSI_PRE_BREAK_CLOCKS);
+	sda_out(master, 0);
+	clock_toggle(master, FSI_BREAK_CLOCKS);
+	echo_delay(master);
+	sda_out(master, 1);
+	clock_toggle(master, FSI_POST_BREAK_CLOCKS);
+
+	/* Wait for logic reset to take effect */
+	udelay(200);
+
+	return 0;
+}
+
+static void fsi_master_gpio_init(struct fsi_master_gpio *master)
+{
+	gpiod_direction_output(master->gpio_mux, 1);
+	gpiod_direction_output(master->gpio_trans, 1);
+	gpiod_direction_output(master->gpio_enable, 1);
+	gpiod_direction_output(master->gpio_clk, 1);
+	gpiod_direction_output(master->gpio_data, 1);
+
+	/* todo: evaluate if clocks can be reduced */
+	clock_zeros(master, FSI_INIT_CLOCKS);
+}
+
+static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
+{
+	struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+
+	if (link != 0)
+		return -ENODEV;
+	gpiod_set_value(master->gpio_enable, 1);
+
+	return 0;
+}
+
+static int fsi_master_gpio_probe(struct platform_device *pdev)
+{
+	struct fsi_master_gpio *master;
+	struct gpio_desc *gpio;
+
+	master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+	if (!master)
+		return -ENOMEM;
+
+	master->dev = &pdev->dev;
+	master->master.dev.parent = master->dev;
+
+	gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get clock gpio\n");
+		return PTR_ERR(gpio);
+	}
+	master->gpio_clk = gpio;
+
+	gpio = devm_gpiod_get(&pdev->dev, "data", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get data gpio\n");
+		return PTR_ERR(gpio);
+	}
+	master->gpio_data = gpio;
+
+	/* Optional GPIOs */
+	gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get trans gpio\n");
+		return PTR_ERR(gpio);
+	}
+	master->gpio_trans = gpio;
+
+	gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get enable gpio\n");
+		return PTR_ERR(gpio);
+	}
+	master->gpio_enable = gpio;
+
+	gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
+	if (IS_ERR(gpio)) {
+		dev_err(&pdev->dev, "failed to get mux gpio\n");
+		return PTR_ERR(gpio);
+	}
+	master->gpio_mux = gpio;
+
+	master->master.n_links = 1;
+	master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
+	master->master.read = fsi_master_gpio_read;
+	master->master.write = fsi_master_gpio_write;
+	master->master.term = fsi_master_gpio_term;
+	master->master.send_break = fsi_master_gpio_break;
+	master->master.link_enable = fsi_master_gpio_link_enable;
+	platform_set_drvdata(pdev, master);
+	spin_lock_init(&master->cmd_lock);
+
+	fsi_master_gpio_init(master);
+
+	return fsi_master_register(&master->master);
+}
+
+
+static int fsi_master_gpio_remove(struct platform_device *pdev)
+{
+	struct fsi_master_gpio *master = platform_get_drvdata(pdev);
+
+	devm_gpiod_put(&pdev->dev, master->gpio_clk);
+	devm_gpiod_put(&pdev->dev, master->gpio_data);
+	if (master->gpio_trans)
+		devm_gpiod_put(&pdev->dev, master->gpio_trans);
+	if (master->gpio_enable)
+		devm_gpiod_put(&pdev->dev, master->gpio_enable);
+	if (master->gpio_mux)
+		devm_gpiod_put(&pdev->dev, master->gpio_mux);
+	fsi_master_unregister(&master->master);
+
+	return 0;
+}
+
+static const struct of_device_id fsi_master_gpio_match[] = {
+	{ .compatible = "fsi-master-gpio" },
+	{ },
+};
+
+static struct platform_driver fsi_master_gpio_driver = {
+	.driver = {
+		.name		= "fsi-master-gpio",
+		.of_match_table	= fsi_master_gpio_match,
+	},
+	.probe	= fsi_master_gpio_probe,
+	.remove = fsi_master_gpio_remove,
+};
+
+module_platform_driver(fsi_master_gpio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
new file mode 100644
index 0000000..133b9bf
--- /dev/null
+++ b/drivers/fsi/fsi-master-hub.c
@@ -0,0 +1,327 @@
+/*
+ * FSI hub master driver
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/fsi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "fsi-master.h"
+
+/* Control Registers */
+#define FSI_MMODE		0x0		/* R/W: mode */
+#define FSI_MDLYR		0x4		/* R/W: delay */
+#define FSI_MCRSP		0x8		/* R/W: clock rate */
+#define FSI_MENP0		0x10		/* R/W: enable */
+#define FSI_MLEVP0		0x18		/* R: plug detect */
+#define FSI_MSENP0		0x18		/* S: Set enable */
+#define FSI_MCENP0		0x20		/* C: Clear enable */
+#define FSI_MAEB		0x70		/* R: Error address */
+#define FSI_MVER		0x74		/* R: master version/type */
+#define FSI_MRESP0		0xd0		/* W: Port reset */
+#define FSI_MESRB0		0x1d0		/* R: Master error status */
+#define FSI_MRESB0		0x1d0		/* W: Reset bridge */
+#define FSI_MECTRL		0x2e0		/* W: Error control */
+
+/* MMODE: Mode control */
+#define FSI_MMODE_EIP		0x80000000	/* Enable interrupt polling */
+#define FSI_MMODE_ECRC		0x40000000	/* Enable error recovery */
+#define FSI_MMODE_EPC		0x10000000	/* Enable parity checking */
+#define FSI_MMODE_P8_TO_LSB	0x00000010	/* Timeout value LSB */
+						/*   MSB=1, LSB=0 is 0.8 ms */
+						/*   MSB=0, LSB=1 is 0.9 ms */
+#define FSI_MMODE_CRS0SHFT	18		/* Clk rate selection 0 shift */
+#define FSI_MMODE_CRS0MASK	0x3ff		/* Clk rate selection 0 mask */
+#define FSI_MMODE_CRS1SHFT	8		/* Clk rate selection 1 shift */
+#define FSI_MMODE_CRS1MASK	0x3ff		/* Clk rate selection 1 mask */
+
+/* MRESB: Reset brindge */
+#define FSI_MRESB_RST_GEN	0x80000000	/* General reset */
+#define FSI_MRESB_RST_ERR	0x40000000	/* Error Reset */
+
+/* MRESB: Reset port */
+#define FSI_MRESP_RST_ALL_MASTER 0x20000000	/* Reset all FSI masters */
+#define FSI_MRESP_RST_ALL_LINK	0x10000000	/* Reset all FSI port contr. */
+#define FSI_MRESP_RST_MCR	0x08000000	/* Reset FSI master reg. */
+#define FSI_MRESP_RST_PYE	0x04000000	/* Reset FSI parity error */
+#define FSI_MRESP_RST_ALL	0xfc000000	/* Reset any error */
+
+/* MECTRL: Error control */
+#define FSI_MECTRL_EOAE		0x8000		/* Enable machine check when */
+						/* master 0 in error */
+#define FSI_MECTRL_P8_AUTO_TERM	0x4000		/* Auto terminate */
+
+#define FSI_ENGID_HUB_MASTER		0x1c
+#define FSI_HUB_LINK_OFFSET		0x80000
+#define FSI_HUB_LINK_SIZE		0x80000
+#define FSI_HUB_MASTER_MAX_LINKS	8
+
+#define FSI_LINK_ENABLE_SETUP_TIME	10	/* in mS */
+
+/*
+ * FSI hub master support
+ *
+ * A hub master increases the number of potential target devices that the
+ * primary FSI master can access. For each link a primary master supports,
+ * each of those links can in turn be chained to a hub master with multiple
+ * links of its own.
+ *
+ * The hub is controlled by a set of control registers exposed as a regular fsi
+ * device (the hub->upstream device), and provides access to the downstream FSI
+ * bus as through an address range on the slave itself (->addr and ->size).
+ *
+ * [This differs from "cascaded" masters, which expose the entire downstream
+ * bus entirely through the fsi device address range, and so have a smaller
+ * accessible address space.]
+ */
+struct fsi_master_hub {
+	struct fsi_master	master;
+	struct fsi_device	*upstream;
+	uint32_t		addr, size;	/* slave-relative addr of */
+						/* master address space */
+};
+
+#define to_fsi_master_hub(m) container_of(m, struct fsi_master_hub, master)
+
+static int hub_master_read(struct fsi_master *master, int link,
+			uint8_t id, uint32_t addr, void *val, size_t size)
+{
+	struct fsi_master_hub *hub = to_fsi_master_hub(master);
+
+	if (id != 0)
+		return -EINVAL;
+
+	addr += hub->addr + (link * FSI_HUB_LINK_SIZE);
+	return fsi_slave_read(hub->upstream->slave, addr, val, size);
+}
+
+static int hub_master_write(struct fsi_master *master, int link,
+			uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+	struct fsi_master_hub *hub = to_fsi_master_hub(master);
+
+	if (id != 0)
+		return -EINVAL;
+
+	addr += hub->addr + (link * FSI_HUB_LINK_SIZE);
+	return fsi_slave_write(hub->upstream->slave, addr, val, size);
+}
+
+static int hub_master_break(struct fsi_master *master, int link)
+{
+	uint32_t addr, cmd;
+
+	addr = 0x4;
+	cmd = cpu_to_be32(0xc0de0000);
+
+	return hub_master_write(master, link, 0, addr, &cmd, sizeof(cmd));
+}
+
+static int hub_master_link_enable(struct fsi_master *master, int link)
+{
+	struct fsi_master_hub *hub = to_fsi_master_hub(master);
+	int idx, bit;
+	__be32 reg;
+	int rc;
+
+	idx = link / 32;
+	bit = link % 32;
+
+	reg = cpu_to_be32(0x80000000 >> bit);
+
+	rc = fsi_device_write(hub->upstream, FSI_MSENP0 + (4 * idx), &reg, 4);
+
+	mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+	fsi_device_read(hub->upstream, FSI_MENP0 + (4 * idx), &reg, 4);
+
+	return rc;
+}
+
+static void hub_master_release(struct device *dev)
+{
+	struct fsi_master_hub *hub = to_fsi_master_hub(dev_to_fsi_master(dev));
+
+	kfree(hub);
+}
+
+/* mmode encoders */
+static inline u32 fsi_mmode_crs0(u32 x)
+{
+	return (x & FSI_MMODE_CRS0MASK) << FSI_MMODE_CRS0SHFT;
+}
+
+static inline u32 fsi_mmode_crs1(u32 x)
+{
+	return (x & FSI_MMODE_CRS1MASK) << FSI_MMODE_CRS1SHFT;
+}
+
+static int hub_master_init(struct fsi_master_hub *hub)
+{
+	struct fsi_device *dev = hub->upstream;
+	__be32 reg;
+	int rc;
+
+	reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+			| FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+	rc = fsi_device_write(dev, FSI_MRESP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	/* Initialize the MFSI (hub master) engine */
+	reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+			| FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+	rc = fsi_device_write(dev, FSI_MRESP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(FSI_MECTRL_EOAE | FSI_MECTRL_P8_AUTO_TERM);
+	rc = fsi_device_write(dev, FSI_MECTRL, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(FSI_MMODE_EIP | FSI_MMODE_ECRC | FSI_MMODE_EPC
+			| fsi_mmode_crs0(1) | fsi_mmode_crs1(1)
+			| FSI_MMODE_P8_TO_LSB);
+	rc = fsi_device_write(dev, FSI_MMODE, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(0xffff0000);
+	rc = fsi_device_write(dev, FSI_MDLYR, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = ~0;
+	rc = fsi_device_write(dev, FSI_MSENP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	/* Leave enabled long enough for master logic to set up */
+	mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+	rc = fsi_device_write(dev, FSI_MCENP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	rc = fsi_device_read(dev, FSI_MAEB, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK);
+	rc = fsi_device_write(dev, FSI_MRESP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	rc = fsi_device_read(dev, FSI_MLEVP0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	/* Reset the master bridge */
+	reg = cpu_to_be32(FSI_MRESB_RST_GEN);
+	rc = fsi_device_write(dev, FSI_MRESB0, &reg, sizeof(reg));
+	if (rc)
+		return rc;
+
+	reg = cpu_to_be32(FSI_MRESB_RST_ERR);
+	return fsi_device_write(dev, FSI_MRESB0, &reg, sizeof(reg));
+}
+
+static int hub_master_probe(struct device *dev)
+{
+	struct fsi_device *fsi_dev = to_fsi_dev(dev);
+	struct fsi_master_hub *hub;
+	uint32_t reg, links;
+	__be32 __reg;
+	int rc;
+
+	rc = fsi_device_read(fsi_dev, FSI_MVER, &__reg, sizeof(__reg));
+	if (rc)
+		return rc;
+
+	reg = be32_to_cpu(__reg);
+	links = (reg >> 8) & 0xff;
+	dev_info(dev, "hub version %08x (%d links)\n", reg, links);
+
+	rc = fsi_slave_claim_range(fsi_dev->slave, FSI_HUB_LINK_OFFSET,
+			FSI_HUB_LINK_SIZE * links);
+	if (rc) {
+		dev_err(dev, "can't claim slave address range for links");
+		return rc;
+	}
+
+	hub = kzalloc(sizeof(*hub), GFP_KERNEL);
+	if (!hub) {
+		rc = -ENOMEM;
+		goto err_release;
+	}
+
+	hub->addr = FSI_HUB_LINK_OFFSET;
+	hub->size = FSI_HUB_LINK_SIZE * links;
+	hub->upstream = fsi_dev;
+
+	hub->master.dev.parent = dev;
+	hub->master.dev.release = hub_master_release;
+
+	hub->master.n_links = links;
+	hub->master.read = hub_master_read;
+	hub->master.write = hub_master_write;
+	hub->master.send_break = hub_master_break;
+	hub->master.link_enable = hub_master_link_enable;
+
+	dev_set_drvdata(dev, hub);
+
+	hub_master_init(hub);
+
+	rc = fsi_master_register(&hub->master);
+	if (!rc)
+		return 0;
+
+	kfree(hub);
+err_release:
+	fsi_slave_release_range(fsi_dev->slave, FSI_HUB_LINK_OFFSET,
+			FSI_HUB_LINK_SIZE * links);
+	return rc;
+}
+
+static int hub_master_remove(struct device *dev)
+{
+	struct fsi_master_hub *hub = dev_get_drvdata(dev);
+
+	fsi_master_unregister(&hub->master);
+	fsi_slave_release_range(hub->upstream->slave, hub->addr, hub->size);
+	return 0;
+}
+
+static struct fsi_device_id hub_master_ids[] = {
+	{
+		.engine_type = FSI_ENGID_HUB_MASTER,
+		.version = FSI_VERSION_ANY,
+	},
+	{ 0 }
+};
+
+static struct fsi_driver hub_master_driver = {
+	.id_table = hub_master_ids,
+	.drv = {
+		.name = "fsi-master-hub",
+		.bus = &fsi_bus_type,
+		.probe = hub_master_probe,
+		.remove = hub_master_remove,
+	}
+};
+
+module_fsi_driver(hub_master_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
new file mode 100644
index 0000000..12f7b11
--- /dev/null
+++ b/drivers/fsi/fsi-master.h
@@ -0,0 +1,43 @@
+/*
+ * FSI master definitions. These comprise the core <--> master interface,
+ * to allow the core to interact with the (hardware-specific) masters.
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DRIVERS_FSI_MASTER_H
+#define DRIVERS_FSI_MASTER_H
+
+#include <linux/device.h>
+
+#define FSI_MASTER_FLAG_SWCLOCK		0x1
+
+struct fsi_master {
+	struct device	dev;
+	int		idx;
+	int		n_links;
+	int		flags;
+	int		(*read)(struct fsi_master *, int link, uint8_t id,
+				uint32_t addr, void *val, size_t size);
+	int		(*write)(struct fsi_master *, int link, uint8_t id,
+				uint32_t addr, const void *val, size_t size);
+	int		(*term)(struct fsi_master *, int link, uint8_t id);
+	int		(*send_break)(struct fsi_master *, int link);
+	int		(*link_enable)(struct fsi_master *, int link);
+};
+
+#define dev_to_fsi_master(d) container_of(d, struct fsi_master, dev)
+
+extern int fsi_master_register(struct fsi_master *master);
+extern void fsi_master_unregister(struct fsi_master *master);
+
+#endif /* DRIVERS_FSI_MASTER_H */
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
new file mode 100644
index 0000000..98d062f
--- /dev/null
+++ b/drivers/fsi/fsi-scom.c
@@ -0,0 +1,263 @@
+/*
+ * SCOM FSI Client device driver
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fsi.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+
+#define FSI_ENGID_SCOM		0x5
+
+#define SCOM_FSI2PIB_DELAY	50
+
+/* SCOM engine register set */
+#define SCOM_DATA0_REG		0x00
+#define SCOM_DATA1_REG		0x04
+#define SCOM_CMD_REG		0x08
+#define SCOM_RESET_REG		0x1C
+
+#define SCOM_RESET_CMD		0x80000000
+#define SCOM_WRITE_CMD		0x80000000
+
+struct scom_device {
+	struct list_head link;
+	struct fsi_device *fsi_dev;
+	struct miscdevice mdev;
+	char	name[32];
+	int idx;
+};
+
+#define to_scom_dev(x)		container_of((x), struct scom_device, mdev)
+
+static struct list_head scom_devices;
+
+static DEFINE_IDA(scom_ida);
+
+static int put_scom(struct scom_device *scom_dev, uint64_t value,
+			uint32_t addr)
+{
+	int rc;
+	uint32_t data;
+
+	data = cpu_to_be32(SCOM_RESET_CMD);
+	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_RESET_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+
+	data = cpu_to_be32((value >> 32) & 0xffffffff);
+	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+
+	data = cpu_to_be32(value & 0xffffffff);
+	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA1_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+
+	data = cpu_to_be32(SCOM_WRITE_CMD | addr);
+	return fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
+				sizeof(uint32_t));
+}
+
+static int get_scom(struct scom_device *scom_dev, uint64_t *value,
+			uint32_t addr)
+{
+	uint32_t result, data;
+	int rc;
+
+	*value = 0ULL;
+	data = cpu_to_be32(addr);
+	rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+
+	rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &result,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+
+	*value |= (uint64_t)cpu_to_be32(result) << 32;
+	rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &result,
+				sizeof(uint32_t));
+	if (rc)
+		return rc;
+
+	*value |= cpu_to_be32(result);
+
+	return 0;
+}
+
+static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
+			loff_t *offset)
+{
+	int rc;
+	struct miscdevice *mdev =
+				(struct miscdevice *)filep->private_data;
+	struct scom_device *scom = to_scom_dev(mdev);
+	struct device *dev = &scom->fsi_dev->dev;
+	uint64_t val;
+
+	if (len != sizeof(uint64_t))
+		return -EINVAL;
+
+	rc = get_scom(scom, &val, *offset);
+	if (rc) {
+		dev_dbg(dev, "get_scom fail:%d\n", rc);
+		return rc;
+	}
+
+	rc = copy_to_user(buf, &val, len);
+	if (rc)
+		dev_dbg(dev, "copy to user failed:%d\n", rc);
+
+	return rc ? rc : len;
+}
+
+static ssize_t scom_write(struct file *filep, const char __user *buf,
+			size_t len, loff_t *offset)
+{
+	int rc;
+	struct miscdevice *mdev = filep->private_data;
+	struct scom_device *scom = to_scom_dev(mdev);
+	struct device *dev = &scom->fsi_dev->dev;
+	uint64_t val;
+
+	if (len != sizeof(uint64_t))
+		return -EINVAL;
+
+	rc = copy_from_user(&val, buf, len);
+	if (rc) {
+		dev_dbg(dev, "copy from user failed:%d\n", rc);
+		return -EINVAL;
+	}
+
+	rc = put_scom(scom, val, *offset);
+	if (rc) {
+		dev_dbg(dev, "put_scom failed with:%d\n", rc);
+		return rc;
+	}
+
+	return len;
+}
+
+static loff_t scom_llseek(struct file *file, loff_t offset, int whence)
+{
+	switch (whence) {
+	case SEEK_CUR:
+		break;
+	case SEEK_SET:
+		file->f_pos = offset;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return offset;
+}
+
+static const struct file_operations scom_fops = {
+	.owner	= THIS_MODULE,
+	.llseek	= scom_llseek,
+	.read	= scom_read,
+	.write	= scom_write,
+};
+
+static int scom_probe(struct device *dev)
+{
+	struct fsi_device *fsi_dev = to_fsi_dev(dev);
+	struct scom_device *scom;
+
+	scom = devm_kzalloc(dev, sizeof(*scom), GFP_KERNEL);
+	if (!scom)
+		return -ENOMEM;
+
+	scom->idx = ida_simple_get(&scom_ida, 1, INT_MAX, GFP_KERNEL);
+	snprintf(scom->name, sizeof(scom->name), "scom%d", scom->idx);
+	scom->fsi_dev = fsi_dev;
+	scom->mdev.minor = MISC_DYNAMIC_MINOR;
+	scom->mdev.fops = &scom_fops;
+	scom->mdev.name = scom->name;
+	scom->mdev.parent = dev;
+	list_add(&scom->link, &scom_devices);
+
+	return misc_register(&scom->mdev);
+}
+
+static int scom_remove(struct device *dev)
+{
+	struct scom_device *scom, *scom_tmp;
+	struct fsi_device *fsi_dev = to_fsi_dev(dev);
+
+	list_for_each_entry_safe(scom, scom_tmp, &scom_devices, link) {
+		if (scom->fsi_dev == fsi_dev) {
+			list_del(&scom->link);
+			ida_simple_remove(&scom_ida, scom->idx);
+			misc_deregister(&scom->mdev);
+		}
+	}
+
+	return 0;
+}
+
+static struct fsi_device_id scom_ids[] = {
+	{
+		.engine_type = FSI_ENGID_SCOM,
+		.version = FSI_VERSION_ANY,
+	},
+	{ 0 }
+};
+
+static struct fsi_driver scom_drv = {
+	.id_table = scom_ids,
+	.drv = {
+		.name = "scom",
+		.bus = &fsi_bus_type,
+		.probe = scom_probe,
+		.remove = scom_remove,
+	}
+};
+
+static int scom_init(void)
+{
+	INIT_LIST_HEAD(&scom_devices);
+	return fsi_driver_register(&scom_drv);
+}
+
+static void scom_exit(void)
+{
+	struct list_head *pos;
+	struct scom_device *scom;
+
+	list_for_each(pos, &scom_devices) {
+		scom = list_entry(pos, struct scom_device, link);
+		misc_deregister(&scom->mdev);
+		devm_kfree(&scom->fsi_dev->dev, scom);
+	}
+	fsi_driver_unregister(&scom_drv);
+}
+
+module_init(scom_init);
+module_exit(scom_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index ccea609..4ca436e 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -646,6 +646,9 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
 	int rc;
 	int i;
 
+	if (!gpio->clk)
+		return -EINVAL;
+
 	rc = usecs_to_cycles(gpio, usecs, &requested_cycles);
 	if (rc < 0) {
 		dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n",
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 2197368..e60156e 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -90,8 +90,18 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
 {
 	int reg;
 
-	if (gpio == 94)
-		return GPIOPANELCTL;
+	if (gpio >= CRYSTALCOVE_GPIO_NUM) {
+		/*
+		 * Virtual GPIO called from ACPI, for now we only support
+		 * the panel ctl.
+		 */
+		switch (gpio) {
+		case 0x5e:
+			return GPIOPANELCTL;
+		default:
+			return -EOPNOTSUPP;
+		}
+	}
 
 	if (reg_type == CTRL_IN) {
 		if (gpio < 8)
@@ -130,36 +140,36 @@ static void crystalcove_update_irq_ctrl(struct crystalcove_gpio *cg, int gpio)
 static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio)
 {
 	struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+	int reg = to_reg(gpio, CTRL_OUT);
 
-	if (gpio > CRYSTALCOVE_VGPIO_NUM)
+	if (reg < 0)
 		return 0;
 
-	return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
-			    CTLO_INPUT_SET);
+	return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
 }
 
 static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio,
 				    int value)
 {
 	struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+	int reg = to_reg(gpio, CTRL_OUT);
 
-	if (gpio > CRYSTALCOVE_VGPIO_NUM)
+	if (reg < 0)
 		return 0;
 
-	return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
-			    CTLO_OUTPUT_SET | value);
+	return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value);
 }
 
 static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio)
 {
 	struct crystalcove_gpio *cg = gpiochip_get_data(chip);
-	int ret;
 	unsigned int val;
+	int ret, reg = to_reg(gpio, CTRL_IN);
 
-	if (gpio > CRYSTALCOVE_VGPIO_NUM)
+	if (reg < 0)
 		return 0;
 
-	ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val);
+	ret = regmap_read(cg->regmap, reg, &val);
 	if (ret)
 		return ret;
 
@@ -170,14 +180,15 @@ static void crystalcove_gpio_set(struct gpio_chip *chip,
 				 unsigned gpio, int value)
 {
 	struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+	int reg = to_reg(gpio, CTRL_OUT);
 
-	if (gpio > CRYSTALCOVE_VGPIO_NUM)
+	if (reg < 0)
 		return;
 
 	if (value)
-		regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1);
+		regmap_update_bits(cg->regmap, reg, 1, 1);
 	else
-		regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
+		regmap_update_bits(cg->regmap, reg, 1, 0);
 }
 
 static int crystalcove_irq_type(struct irq_data *data, unsigned type)
@@ -185,6 +196,9 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned type)
 	struct crystalcove_gpio *cg =
 		gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
+	if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
+		return 0;
+
 	switch (type) {
 	case IRQ_TYPE_NONE:
 		cg->intcnt_value = CTLI_INTCNT_DIS;
@@ -235,8 +249,10 @@ static void crystalcove_irq_unmask(struct irq_data *data)
 	struct crystalcove_gpio *cg =
 		gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
-	cg->set_irq_mask = false;
-	cg->update |= UPDATE_IRQ_MASK;
+	if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+		cg->set_irq_mask = false;
+		cg->update |= UPDATE_IRQ_MASK;
+	}
 }
 
 static void crystalcove_irq_mask(struct irq_data *data)
@@ -244,8 +260,10 @@ static void crystalcove_irq_mask(struct irq_data *data)
 	struct crystalcove_gpio *cg =
 		gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
-	cg->set_irq_mask = true;
-	cg->update |= UPDATE_IRQ_MASK;
+	if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+		cg->set_irq_mask = true;
+		cg->update |= UPDATE_IRQ_MASK;
+	}
 }
 
 static struct irq_chip crystalcove_irqchip = {
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 19a92ef..c83ea68 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
 	u32 set;
 
 	if (!of_device_is_compatible(mvchip->chip.of_node,
-				     "marvell,armada-370-xp-gpio"))
+				     "marvell,armada-370-gpio"))
 		return 0;
 
 	if (IS_ERR(mvchip->clk))
@@ -747,7 +747,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
 		set = U32_MAX;
 	else
 		return -EINVAL;
-	writel_relaxed(0, mvebu_gpioreg_blink_counter_select(mvchip));
+	writel_relaxed(set, mvebu_gpioreg_blink_counter_select(mvchip));
 
 	mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
 	if (!mvpwm)
@@ -768,6 +768,13 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
 	mvpwm->chip.dev = dev;
 	mvpwm->chip.ops = &mvebu_pwm_ops;
 	mvpwm->chip.npwm = mvchip->chip.ngpio;
+	/*
+	 * There may already be some PWM allocated, so we can't force
+	 * mvpwm->chip.base to a fixed point like mvchip->chip.base.
+	 * So, we let pwmchip_add() do the numbering and take the next free
+	 * region.
+	 */
+	mvpwm->chip.base = -1;
 
 	spin_lock_init(&mvpwm->lock);
 
@@ -845,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
 		.data	    = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
 	},
 	{
-		.compatible = "marvell,armada-370-xp-gpio",
+		.compatible = "marvell,armada-370-gpio",
 		.data	    = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
 	},
 	{
@@ -1121,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
 						 mvchip);
 	}
 
-	/* Armada 370/XP has simple PWM support for GPIO lines */
+	/* Some MVEBU SoCs have simple PWM support for GPIO lines */
 	if (IS_ENABLED(CONFIG_PWM))
 		return mvebu_pwm_probe(pdev, mvchip, id);
 
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 2185232..8fa5fcd 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -201,7 +201,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 			handler = acpi_gpio_irq_handler_evt;
 	}
 	if (!handler)
-		return AE_BAD_PARAMETER;
+		return AE_OK;
 
 	pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin);
 	if (pin < 0)
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 4b44dd9..16fe974 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -479,6 +479,7 @@ static ssize_t export_store(struct class *class,
 		pr_debug("%s: status %d\n", __func__, status);
 	return status ? : len;
 }
+static CLASS_ATTR_WO(export);
 
 static ssize_t unexport_store(struct class *class,
 				struct class_attribute *attr,
@@ -514,18 +515,20 @@ static ssize_t unexport_store(struct class *class,
 		pr_debug("%s: status %d\n", __func__, status);
 	return status ? : len;
 }
+static CLASS_ATTR_WO(unexport);
 
-static struct class_attribute gpio_class_attrs[] = {
-	__ATTR(export, 0200, NULL, export_store),
-	__ATTR(unexport, 0200, NULL, unexport_store),
-	__ATTR_NULL,
+static struct attribute *gpio_class_attrs[] = {
+	&class_attr_export.attr,
+	&class_attr_unexport.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(gpio_class);
 
 static struct class gpio_class = {
 	.name =		"gpio",
 	.owner =	THIS_MODULE,
 
-	.class_attrs =	gpio_class_attrs,
+	.class_groups = gpio_class_groups,
 };
 
 
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5db4413..a42a1ee 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -708,7 +708,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
 
 	ge.timestamp = ktime_get_real_ns();
 
-	if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) {
+	if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
+	    && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
 		int level = gpiod_get_value_cansleep(le->desc);
 
 		if (level)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1cf78f4..1e8e112 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
 			DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
 				 adev->clock.default_dispclk / 100);
 			adev->clock.default_dispclk = 60000;
+		} else if (adev->clock.default_dispclk <= 60000) {
+			DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+				 adev->clock.default_dispclk / 100);
+			adev->clock.default_dispclk = 62500;
 		}
 		adev->clock.dp_extclk =
 			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f2d705e..ab6b0d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = {
 	{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+	{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 	/* Vega 10 */
 	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index a4831fe..a2c59a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
 }
 
 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
-	amdgpu_vram_mgr_init,
-	amdgpu_vram_mgr_fini,
-	amdgpu_vram_mgr_new,
-	amdgpu_vram_mgr_del,
-	amdgpu_vram_mgr_debug
+	.init		= amdgpu_vram_mgr_init,
+	.takedown	= amdgpu_vram_mgr_fini,
+	.get_node	= amdgpu_vram_mgr_new,
+	.put_node	= amdgpu_vram_mgr_del,
+	.debug		= amdgpu_vram_mgr_debug
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 8c9bc75..8a0818b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
 	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+	ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
 	memset(&args, 0, sizeof(args));
 
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
 void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
 {
 	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
-	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+	ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
 
 	memset(&args, 0, sizeof(args));
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 0cdeb6a..5dffa27 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 773654a..47bbc87f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 1f35529..d8c9a95 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 	fixed20_12 a, b, c;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 		priority_a_cnt = 0;
 		priority_b_cnt = 0;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3c558c1..db30c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index fb08193..90332f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
+	u32 v;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	if (adev->vce.harvest_config == 0 ||
+		adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+	else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
 	if (ring == &adev->vce.ring[0])
-		return RREG32(mmVCE_RB_RPTR);
+		v = RREG32(mmVCE_RB_RPTR);
 	else if (ring == &adev->vce.ring[1])
-		return RREG32(mmVCE_RB_RPTR2);
+		v = RREG32(mmVCE_RB_RPTR2);
 	else
-		return RREG32(mmVCE_RB_RPTR3);
+		v = RREG32(mmVCE_RB_RPTR3);
+
+	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return v;
 }
 
 /**
@@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
+	u32 v;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+	if (adev->vce.harvest_config == 0 ||
+		adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+	else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
 	if (ring == &adev->vce.ring[0])
-		return RREG32(mmVCE_RB_WPTR);
+		v = RREG32(mmVCE_RB_WPTR);
 	else if (ring == &adev->vce.ring[1])
-		return RREG32(mmVCE_RB_WPTR2);
+		v = RREG32(mmVCE_RB_WPTR2);
 	else
-		return RREG32(mmVCE_RB_WPTR3);
+		v = RREG32(mmVCE_RB_WPTR3);
+
+	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return v;
 }
 
 /**
@@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 
+	mutex_lock(&adev->grbm_idx_mutex);
+	if (adev->vce.harvest_config == 0 ||
+		adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+	else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
+
 	if (ring == &adev->vce.ring[0])
 		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
 	else if (ring == &adev->vce.ring[1])
 		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
 	else
 		WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+
+	WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+	mutex_unlock(&adev->grbm_idx_mutex);
 }
 
 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
@@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 	struct amdgpu_ring *ring;
 	int idx, r;
 
-	ring = &adev->vce.ring[0];
-	WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
-	WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
-	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
-	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
-
-	ring = &adev->vce.ring[1];
-	WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
-	WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
-	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
-	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
-
-	ring = &adev->vce.ring[2];
-	WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
-	WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
-	WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
-	WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
-	WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
-
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (idx = 0; idx < 2; ++idx) {
 		if (adev->vce.harvest_config & (1 << idx))
 			continue;
 
 		WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
+
+		/* Program instance 0 reg space for two instances or instance 0 case
+		program instance 1 reg space for only instance 1 available case */
+		if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
+			ring = &adev->vce.ring[0];
+			WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+			WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+			WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+			WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+			WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+			ring = &adev->vce.ring[1];
+			WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+			WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+			WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+			WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+			WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+			ring = &adev->vce.ring[2];
+			WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
+			WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+			WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+			WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+			WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+		}
+
 		vce_v3_0_mc_resume(adev, idx);
 		WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index d5f53d0..83e40fe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -709,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
 
 static struct phm_master_table_item
 vega10_thermal_start_thermal_controller_master_list[] = {
-	{NULL, tf_vega10_thermal_initialize},
-	{NULL, tf_vega10_thermal_set_temperature_range},
-	{NULL, tf_vega10_thermal_enable_alert},
+	{ .tableFunction = tf_vega10_thermal_initialize },
+	{ .tableFunction = tf_vega10_thermal_set_temperature_range },
+	{ .tableFunction = tf_vega10_thermal_enable_alert },
 /* We should restrict performance levels to low before we halt the SMC.
  * On the other hand we are still in boot state when we do this
  * so it would be pointless.
  * If this assumption changes we have to revisit this table.
  */
-	{NULL, tf_vega10_thermal_setup_fan_table},
-	{NULL, tf_vega10_thermal_start_smc_fan_control},
-	{NULL, NULL}
+	{ .tableFunction = tf_vega10_thermal_setup_fan_table },
+	{ .tableFunction = tf_vega10_thermal_start_smc_fan_control },
+	{ }
 };
 
 static struct phm_master_table_header
@@ -731,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = {
 
 static struct phm_master_table_item
 vega10_thermal_set_temperature_range_master_list[] = {
-	{NULL, tf_vega10_thermal_disable_alert},
-	{NULL, tf_vega10_thermal_set_temperature_range},
-	{NULL, tf_vega10_thermal_enable_alert},
-	{NULL, NULL}
+	{ .tableFunction = tf_vega10_thermal_disable_alert },
+	{ .tableFunction = tf_vega10_thermal_set_temperature_range },
+	{ .tableFunction = tf_vega10_thermal_enable_alert },
+	{ }
 };
 
 struct phm_master_table_header
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 40d2827..53e78d0 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,6 +1,7 @@
 config DRM_DW_HDMI
 	tristate
 	select DRM_KMS_HELPER
+	select REGMAP_MMIO
 
 config DRM_DW_HDMI_AHB_AUDIO
 	tristate "Synopsys Designware AHB Audio interface"
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8be9719..aa885a6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 		bool has_connectors =
 			!!new_crtc_state->connector_mask;
 
+		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
 		if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
 					 crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 	for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
 		const struct drm_connector_helper_funcs *funcs = connector->helper_private;
 
+		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
 		/*
 		 * This only sets crtc->connectors_changed for routing changes,
 		 * drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
 		const struct drm_plane_helper_funcs *funcs;
 
+		WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+
 		funcs = plane->helper_private;
 
 		drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
 
 	drm_modeset_acquire_init(&ctx, 0);
 	while (1) {
+		err = drm_modeset_lock_all_ctx(dev, &ctx);
+		if (err)
+			goto out;
+
 		err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+out:
 		if (err != -EDEADLK)
 			break;
 
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9f84761..48ca245 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 	if (!connector)
 		return -ENOENT;
 
-	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-	encoder = drm_connector_get_encoder(connector);
-	if (encoder)
-		out_resp->encoder_id = encoder->base.id;
-	else
-		out_resp->encoder_id = 0;
-
-	ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
-			(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
-			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
-			&out_resp->count_props);
-	drm_modeset_unlock(&dev->mode_config.connection_mutex);
-	if (ret)
-		goto out_unref;
-
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
 		if (connector->encoder_ids[i] != 0)
 			encoders_count++;
@@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 				if (put_user(connector->encoder_ids[i],
 					     encoder_ptr + copied)) {
 					ret = -EFAULT;
-					goto out_unref;
+					goto out;
 				}
 				copied++;
 			}
@@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 			if (copy_to_user(mode_ptr + copied,
 					 &u_mode, sizeof(u_mode))) {
 				ret = -EFAULT;
+				mutex_unlock(&dev->mode_config.mutex);
+
 				goto out;
 			}
 			copied++;
 		}
 	}
 	out_resp->count_modes = mode_count;
-out:
 	mutex_unlock(&dev->mode_config.mutex);
-out_unref:
+
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	encoder = drm_connector_get_encoder(connector);
+	if (encoder)
+		out_resp->encoder_id = encoder->base.id;
+	else
+		out_resp->encoder_id = 0;
+
+	/* Only grab properties after probing, to make sure EDID and other
+	 * properties reflect the latest status. */
+	ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+			(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+			(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+			&out_resp->count_props);
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+out:
 	drm_connector_put(connector);
 
 	return ret;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 3e5f521..213fb83 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
 	return 0;
 }
 EXPORT_SYMBOL(drm_dp_stop_crc);
+
+struct dpcd_quirk {
+	u8 oui[3];
+	bool is_branch;
+	u32 quirks;
+};
+
+#define OUI(first, second, third) { (first), (second), (third) }
+
+static const struct dpcd_quirk dpcd_quirk_list[] = {
+	/* Analogix 7737 needs reduced M and N at HBR2 link rates */
+	{ OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) },
+};
+
+#undef OUI
+
+/*
+ * Get a bit mask of DPCD quirks for the sink/branch device identified by
+ * ident. The quirk data is shared but it's up to the drivers to act on the
+ * data.
+ *
+ * For now, only the OUI (first three bytes) is used, but this may be extended
+ * to device identification string and hardware/firmware revisions later.
+ */
+static u32
+drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
+{
+	const struct dpcd_quirk *quirk;
+	u32 quirks = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
+		quirk = &dpcd_quirk_list[i];
+
+		if (quirk->is_branch != is_branch)
+			continue;
+
+		if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
+			continue;
+
+		quirks |= quirk->quirks;
+	}
+
+	return quirks;
+}
+
+/**
+ * drm_dp_read_desc - read sink/branch descriptor from DPCD
+ * @aux: DisplayPort AUX channel
+ * @desc: Device decriptor to fill from DPCD
+ * @is_branch: true for branch devices, false for sink devices
+ *
+ * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
+ * identification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+		     bool is_branch)
+{
+	struct drm_dp_dpcd_ident *ident = &desc->ident;
+	unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
+	int ret, dev_id_len;
+
+	ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+	if (ret < 0)
+		return ret;
+
+	desc->quirks = drm_dp_get_quirks(ident, is_branch);
+
+	dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
+
+	DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+		      is_branch ? "branch" : "sink",
+		      (int)sizeof(ident->oui), ident->oui,
+		      dev_id_len, ident->device_id,
+		      ident->hw_rev >> 4, ident->hw_rev & 0xf,
+		      ident->sw_major_rev, ident->sw_minor_rev,
+		      desc->quirks);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_dp_read_desc);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b5c6bb4..37b8ad3 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
 void drm_unplug_dev(struct drm_device *dev)
 {
 	/* for a USB device */
-	drm_dev_unregister(dev);
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_modeset_unregister_all(dev);
+
+	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+	drm_minor_unregister(dev, DRM_MINOR_RENDER);
+	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 
 	mutex_lock(&drm_global_mutex);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index c4a091e..e437fba 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -106,9 +106,10 @@ struct etnaviv_gem_submit {
 	struct etnaviv_gpu *gpu;
 	struct ww_acquire_ctx ticket;
 	struct dma_fence *fence;
+	u32 flags;
 	unsigned int nr_bos;
 	struct etnaviv_gem_submit_bo bos[0];
-	u32 flags;
+	/* No new members here, the previous one is variable-length! */
 };
 
 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index de80ee1..1013765 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
 	for (i = 0; i < submit->nr_bos; i++) {
 		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
 		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
-		bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
+		bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
 
 		ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
 						 explicit);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 09d3c4c..50294a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -82,14 +82,9 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 	return ret;
 }
 
-static void exynos_drm_preclose(struct drm_device *dev,
-					struct drm_file *file)
-{
-	exynos_drm_subdrv_close(dev, file);
-}
-
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
+	exynos_drm_subdrv_close(dev, file);
 	kfree(file->driver_priv);
 	file->driver_priv = NULL;
 }
@@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = {
 	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
 				  | DRIVER_ATOMIC | DRIVER_RENDER,
 	.open			= exynos_drm_open,
-	.preclose		= exynos_drm_preclose,
 	.lastclose		= exynos_drm_lastclose,
 	.postclose		= exynos_drm_postclose,
 	.gem_free_object_unlocked = exynos_drm_gem_free_object,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cb31769..39c7405 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -160,12 +160,9 @@ struct exynos_drm_clk {
  *	drm framework doesn't support multiple irq yet.
  *	we can refer to the crtc to current hardware interrupt occurred through
  *	this pipe value.
- * @enabled: if the crtc is enabled or not
- * @event: vblank event that is currently queued for flip
- * @wait_update: wait all pending planes updates to finish
- * @pending_update: number of pending plane updates in this crtc
  * @ops: pointer to callbacks for exynos drm specific functionality
  * @ctx: A pointer to the crtc's implementation specific context
+ * @pipe_clk: A pointer to the crtc's pipeline clock.
  */
 struct exynos_drm_crtc {
 	struct drm_crtc			base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index fc4fda7..d404de8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
 {
 	struct device *dev = dsi->dev;
 	struct device_node *node = dev->of_node;
-	struct device_node *ep;
 	int ret;
 
 	ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
 	if (ret < 0)
 		return ret;
 
-	ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
-	if (!ep) {
-		dev_err(dev, "no output port with endpoint specified\n");
-		return -EINVAL;
-	}
-
-	ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
+	ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
 				     &dsi->burst_clk_rate);
 	if (ret < 0)
-		goto end;
+		return ret;
 
-	ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
+	ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
 				     &dsi->esc_clk_rate);
 	if (ret < 0)
-		goto end;
-
-	of_node_put(ep);
+		return ret;
 
 	dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
 	if (!dsi->bridge_node)
 		return -EINVAL;
 
-end:
-	of_node_put(ep);
-
-	return ret;
+	return 0;
 }
 
 static int exynos_dsi_bind(struct device *dev, struct device *master,
@@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
 static int exynos_dsi_remove(struct platform_device *pdev)
 {
+	struct exynos_dsi *dsi = platform_get_drvdata(pdev);
+
+	of_node_put(dsi->bridge_node);
+
 	pm_runtime_disable(&pdev->dev);
 
 	component_del(&pdev->dev, &exynos_dsi_component_ops);
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5abc69c..f77dcfa 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
 	 * Get the endpoint node. In our case, dsi has one output port1
 	 * to which the external HDMI bridge is connected.
 	 */
-	ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge);
+	ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index dca989e..24fe04d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
 	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+{
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_engine_cs *engine;
+	struct intel_vgpu_workload *pos, *n;
+	unsigned int tmp;
+
+	/* free the unsubmited workloads in the queues. */
+	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+		list_for_each_entry_safe(pos, n,
+			&vgpu->workload_q_head[engine->id], list) {
+			list_del_init(&pos->list);
+			free_workload(pos);
+		}
+	}
+}
+
 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
 {
+	clean_workloads(vgpu, ALL_ENGINES);
 	kmem_cache_destroy(vgpu->workloads);
 }
 
@@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct intel_engine_cs *engine;
-	struct intel_vgpu_workload *pos, *n;
 	unsigned int tmp;
 
-	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
-		/* free the unsubmited workload in the queue */
-		list_for_each_entry_safe(pos, n,
-			&vgpu->workload_q_head[engine->id], list) {
-			list_del_init(&pos->list);
-			free_workload(pos);
-		}
-
+	clean_workloads(vgpu, engine_mask);
+	for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
 		init_vgpu_execlist(vgpu, engine->id);
-	}
 }
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index c995e54..0ffd696 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	i915_reg_t reg = {.reg = offset};
+	u32 v = *(u32 *)p_data;
+
+	if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
+		return intel_vgpu_default_mmio_write(vgpu,
+				offset, p_data, bytes);
 
 	switch (offset) {
 	case 0x4ddc:
-		vgpu_vreg(vgpu, offset) = 0x8000003c;
-		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
-		I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+		vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
 		break;
 	case 0x42080:
-		vgpu_vreg(vgpu, offset) = 0x8000;
-		/* WaCompressedResourceDisplayNewHashMode:skl */
-		I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+		/* bypass WaCompressedResourceDisplayNewHashMode */
+		vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
+		break;
+	case 0xe194:
+		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+		vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
+		break;
+	case 0x7014:
+		/* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+		vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
 		break;
 	default:
 		return -EINVAL;
@@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
 		NULL, NULL);
-	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+	MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+		 skl_misc_ctl_write);
 	MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(0x6e570, D_BDW_PLUS);
 	MMIO_D(0x65f10, D_BDW_PLUS);
 
-	MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+	MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+		 skl_misc_ctl_write);
 	MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d689e51..4bd1467 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)
 	struct file_stats *stats = data;
 	struct i915_vma *vma;
 
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
 	stats->count++;
 	stats->total += obj->base.size;
 	if (!obj->bind_count)
@@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 		struct drm_i915_gem_request *request;
 		struct task_struct *task;
 
+		mutex_lock(&dev->struct_mutex);
+
 		memset(&stats, 0, sizeof(stats));
 		stats.file_priv = file->driver_priv;
 		spin_lock(&file->table_lock);
@@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 		 * still alive (e.g. get_pid(current) => fork() => exit()).
 		 * Therefore, we need to protect this ->comm access using RCU.
 		 */
-		mutex_lock(&dev->struct_mutex);
 		request = list_first_entry_or_null(&file_priv->mm.request_list,
 						   struct drm_i915_gem_request,
 						   client_link);
@@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 				PIDTYPE_PID);
 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
 		rcu_read_unlock();
+
 		mutex_unlock(&dev->struct_mutex);
 	}
 	mutex_unlock(&dev->filelist_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3036d48..4842867 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto out_fini;
 
 	pci_set_drvdata(pdev, &dev_priv->drm);
+	/*
+	 * Disable the system suspend direct complete optimization, which can
+	 * leave the device suspended skipping the driver's suspend handlers
+	 * if the device was already runtime suspended. This is needed due to
+	 * the difference in our runtime and system suspend sequence and
+	 * becaue the HDA driver may require us to enable the audio power
+	 * domain during system suspend.
+	 */
+	pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
 
 	ret = i915_driver_init_early(dev_priv, ent);
 	if (ret < 0)
@@ -1272,10 +1281,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	dev_priv->ipc_enabled = false;
 
-	/* Everything is in place, we can now relax! */
-	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
-		 driver.name, driver.major, driver.minor, driver.patchlevel,
-		 driver.date, pci_name(pdev), dev_priv->drm.primary->index);
 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
 		DRM_INFO("DRM_I915_DEBUG enabled\n");
 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c9b0949..2c453a4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -562,7 +562,8 @@ struct intel_link_m_n {
 
 void intel_link_compute_m_n(int bpp, int nlanes,
 			    int pixel_clock, int link_clock,
-			    struct intel_link_m_n *m_n);
+			    struct intel_link_m_n *m_n,
+			    bool reduce_m_n);
 
 /* Interface history:
  *
@@ -2990,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
 	return false;
 }
 
+static inline bool
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+	if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
+		return true;
+#endif
+	return false;
+}
+
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
 				int enable_ppgtt);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b6ac3df..615f0a8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	struct page *page;
 	unsigned long last_pfn = 0;	/* suppress gcc warning */
 	unsigned int max_segment;
+	gfp_t noreclaim;
 	int ret;
-	gfp_t gfp;
 
 	/* Assert that the object is not currently in any GPU domain. As it
 	 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2315,22 +2315,31 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	 * Fail silently without starting the shrinker
 	 */
 	mapping = obj->base.filp->f_mapping;
-	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
-	gfp |= __GFP_NORETRY | __GFP_NOWARN;
+	noreclaim = mapping_gfp_constraint(mapping,
+					   ~(__GFP_IO | __GFP_RECLAIM));
+	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
+
 	sg = st->sgl;
 	st->nents = 0;
 	for (i = 0; i < page_count; i++) {
-		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-		if (unlikely(IS_ERR(page))) {
-			i915_gem_shrink(dev_priv,
-					page_count,
-					I915_SHRINK_BOUND |
-					I915_SHRINK_UNBOUND |
-					I915_SHRINK_PURGEABLE);
+		const unsigned int shrink[] = {
+			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
+			0,
+		}, *s = shrink;
+		gfp_t gfp = noreclaim;
+
+		do {
 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-		}
-		if (unlikely(IS_ERR(page))) {
-			gfp_t reclaim;
+			if (likely(!IS_ERR(page)))
+				break;
+
+			if (!*s) {
+				ret = PTR_ERR(page);
+				goto err_sg;
+			}
+
+			i915_gem_shrink(dev_priv, 2 * page_count, *s++);
+			cond_resched();
 
 			/* We've tried hard to allocate the memory by reaping
 			 * our own buffer, now let the real VM do its job and
@@ -2340,15 +2349,26 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 			 * defer the oom here by reporting the ENOMEM back
 			 * to userspace.
 			 */
-			reclaim = mapping_gfp_mask(mapping);
-			reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
+			if (!*s) {
+				/* reclaim and warn, but no oom */
+				gfp = mapping_gfp_mask(mapping);
 
-			page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
-			if (IS_ERR(page)) {
-				ret = PTR_ERR(page);
-				goto err_sg;
+				/* Our bo are always dirty and so we require
+				 * kswapd to reclaim our pages (direct reclaim
+				 * does not effectively begin pageout of our
+				 * buffers on its own). However, direct reclaim
+				 * only waits for kswapd when under allocation
+				 * congestion. So as a result __GFP_RECLAIM is
+				 * unreliable and fails to actually reclaim our
+				 * dirty pages -- unless you try over and over
+				 * again with !__GFP_NORETRY. However, we still
+				 * want to fail this allocation rather than
+				 * trigger the out-of-memory killer and for
+				 * this we want the future __GFP_MAYFAIL.
+				 */
 			}
-		}
+		} while (1);
+
 		if (!i ||
 		    sg->length >= max_segment ||
 		    page_to_pfn(page) != last_pfn + 1) {
@@ -3298,6 +3318,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 {
 	int ret;
 
+	/* If the device is asleep, we have no requests outstanding */
+	if (!READ_ONCE(i915->gt.awake))
+		return 0;
+
 	if (flags & I915_WAIT_LOCKED) {
 		struct i915_gem_timeline *tl;
 
@@ -4218,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
 
 	mapping = obj->base.filp->f_mapping;
 	mapping_set_gfp_mask(mapping, mask);
+	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
 
 	i915_gem_object_init(obj, &i915_gem_object_ops);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a3e59c8..9ad13ee 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -546,11 +546,12 @@ relocate_entry(struct drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,
 				   struct eb_vmas *eb,
 				   struct drm_i915_gem_relocation_entry *reloc,
 				   struct reloc_cache *cache)
 {
+	struct drm_i915_gem_object *obj = vma->obj;
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct drm_gem_object *target_obj;
 	struct drm_i915_gem_object *target_i915_obj;
@@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 		return -EINVAL;
 	}
 
+	/*
+	 * If we write into the object, we need to force the synchronisation
+	 * barrier, either with an asynchronous clflush or if we executed the
+	 * patching using the GPU (though that should be serialised by the
+	 * timeline). To be completely sure, and since we are required to
+	 * do relocations we are already stalling, disable the user's opt
+	 * of our synchronisation.
+	 */
+	vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
+
 	ret = relocate_entry(obj, reloc, cache, target_offset);
 	if (ret)
 		return ret;
@@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
 		do {
 			u64 offset = r->presumed_offset;
 
-			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
+			ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);
 			if (ret)
 				goto out;
 
@@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
 
 	reloc_cache_init(&cache, eb->i915);
 	for (i = 0; i < entry->relocation_count; i++) {
-		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
+		ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);
 		if (ret)
 			break;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a0563e1..f1989b8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 		gen8_set_pte(&gtt_base[i], scratch_pte);
 }
 
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+	struct drm_i915_private *dev_priv = vm->i915;
+
+	/*
+	 * Make sure the internal GAM fifo has been cleared of all GTT
+	 * writes before exiting stop_machine(). This guarantees that
+	 * any aperture accesses waiting to start in another process
+	 * cannot back up behind the GTT writes causing a hang.
+	 * The register can be any arbitrary GAM register.
+	 */
+	POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+	struct i915_address_space *vm;
+	dma_addr_t addr;
+	u64 offset;
+	enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+	struct insert_page *arg = _arg;
+
+	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+	bxt_vtd_ggtt_wa(arg->vm);
+
+	return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+					  dma_addr_t addr,
+					  u64 offset,
+					  enum i915_cache_level level,
+					  u32 unused)
+{
+	struct insert_page arg = { vm, addr, offset, level };
+
+	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+	struct i915_address_space *vm;
+	struct sg_table *st;
+	u64 start;
+	enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+	struct insert_entries *arg = _arg;
+
+	gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+	bxt_vtd_ggtt_wa(arg->vm);
+
+	return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+					     struct sg_table *st,
+					     u64 start,
+					     enum i915_cache_level level,
+					     u32 unused)
+{
+	struct insert_entries arg = { vm, st, start, level };
+
+	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+	struct i915_address_space *vm;
+	u64 start;
+	u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+	struct clear_range *arg = _arg;
+
+	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+	bxt_vtd_ggtt_wa(arg->vm);
+
+	return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+					  u64 start,
+					  u64 length)
+{
+	struct clear_range arg = { vm, start, length };
+
+	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 				  u64 start, u64 length)
 {
@@ -2313,7 +2408,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 		    appgtt->base.allocate_va_range) {
 			ret = appgtt->base.allocate_va_range(&appgtt->base,
 							     vma->node.start,
-							     vma->node.size);
+							     vma->size);
 			if (ret)
 				goto err_pages;
 		}
@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
 	ggtt->base.insert_entries = gen8_ggtt_insert_entries;
 
+	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+	if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+		ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+		ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+		if (ggtt->base.clear_range != nop_clear_range)
+			ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+	}
+
 	ggtt->invalidate = gen6_ggtt_invalidate;
 
 	return ggtt_probe_common(ggtt, size);
@@ -2997,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
 
 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 {
-	i915->ggtt.invalidate = gen6_ggtt_invalidate;
+	if (i915->ggtt.invalidate == guc_ggtt_invalidate)
+		i915->ggtt.invalidate = gen6_ggtt_invalidate;
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 5ddbc94..a74d0ac 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 * GPU processing the request, we never over-estimate the
 	 * position of the head.
 	 */
-	req->head = req->ring->tail;
+	req->head = req->ring->emit;
 
 	/* Check that we didn't interrupt ourselves with a new request */
 	GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 129c58b..a4a920c 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -123,7 +123,7 @@ struct drm_i915_gem_request {
 	 * It is used by the driver to then queue the request for execution.
 	 */
 	struct i915_sw_fence submit;
-	wait_queue_t submitq;
+	wait_queue_entry_t submitq;
 	wait_queue_head_t execute;
 
 	/* A list of everyone we wait upon, and everyone who waits upon us.
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 129ed30..57d9f7f 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
 		return;
 
 	mutex_unlock(&dev->struct_mutex);
-
-	/* expedite the RCU grace period to free some request slabs */
-	synchronize_rcu_expedited();
 }
 
 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 				I915_SHRINK_ACTIVE);
 	intel_runtime_pm_put(dev_priv);
 
-	synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
-
 	return freed;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a0d6d43..fb5231f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 			obj->mm.quirked = false;
 		}
 		if (!i915_gem_object_is_tiled(obj)) {
-			GEM_BUG_ON(!obj->mm.quirked);
+			GEM_BUG_ON(obj->mm.quirked);
 			__i915_gem_object_pin_pages(obj);
 			obj->mm.quirked = true;
 		}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 1642fff..ab5140b 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
 	GEM_BUG_ON(freespace < wqi_size);
 
 	/* The GuC firmware wants the tail index in QWords, not bytes */
-	tail = rq->tail;
-	assert_ring_tail_valid(rq->ring, rq->tail);
-	tail >>= 3;
+	tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
 	GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
 
 	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fd97fe0..190f6aa 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 	u32 pipestat_mask;
 	u32 enable_mask;
 	enum pipe pipe;
-	u32 val;
 
 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
 			PIPE_CRC_DONE_INTERRUPT_STATUS;
@@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 
 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		I915_LPE_PIPE_A_INTERRUPT |
+		I915_LPE_PIPE_B_INTERRUPT;
+
 	if (IS_CHERRYVIEW(dev_priv))
-		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
+			I915_LPE_PIPE_C_INTERRUPT;
 
 	WARN_ON(dev_priv->irq_mask != ~0);
 
-	val = (I915_LPE_PIPE_A_INTERRUPT |
-		I915_LPE_PIPE_B_INTERRUPT |
-		I915_LPE_PIPE_C_INTERRUPT);
-
-	enable_mask |= val;
-
 	dev_priv->irq_mask = ~enable_mask;
 
 	GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f87b0c4..1a78363 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
 static const struct intel_device_info intel_ironlake_m_info = {
 	GEN5_FEATURES,
 	.platform = INTEL_IRONLAKE,
-	.is_mobile = 1,
+	.is_mobile = 1, .has_fbc = 1,
 };
 
 #define GEN6_FEATURES \
@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
 	.has_hw_contexts = 1, \
 	.has_logical_ring_contexts = 1, \
 	.has_guc = 1, \
-	.has_decoupled_mmio = 1, \
 	.has_aliasing_ppgtt = 1, \
 	.has_full_ppgtt = 1, \
 	.has_full_48bit_ppgtt = 1, \
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index c0cb297..2cfe96d3 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -36,10 +36,6 @@
 #define VGT_VERSION_MAJOR 1
 #define VGT_VERSION_MINOR 0
 
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
-	INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
 /*
  * notifications from guest to vgpu device model
  */
@@ -55,8 +51,8 @@ enum vgt_g2v_type {
 
 struct vgt_if {
 	u64 magic;		/* VGT_MAGIC */
-	uint16_t version_major;
-	uint16_t version_minor;
+	u16 version_major;
+	u16 version_minor;
 	u32 vgt_id;		/* ID of vGT instance */
 	u32 rsv1[12];		/* pad to offset 0x40 */
 	/*
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5a7c63e..65b837e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8280,7 +8280,7 @@ enum {
 
 /* MIPI DSI registers */
 
-#define _MIPI_PORT(port, a, c)	((port) ? c : a)	/* ports A and C only */
+#define _MIPI_PORT(port, a, c)	(((port) == PORT_A) ? a : c)	/* ports A and C only */
 #define _MMIO_MIPI(port, a, c)	_MMIO(_MIPI_PORT(port, a, c))
 
 #define MIPIO_TXESC_CLK_DIV1			_MMIO(0x160004)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index a277f8e..380de43 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -152,7 +152,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
 					struct list_head *continuation)
 {
 	wait_queue_head_t *x = &fence->wait;
-	wait_queue_t *pos, *next;
+	wait_queue_entry_t *pos, *next;
 	unsigned long flags;
 
 	debug_fence_deactivate(fence);
@@ -160,31 +160,30 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
 
 	/*
 	 * To prevent unbounded recursion as we traverse the graph of
-	 * i915_sw_fences, we move the task_list from this, the next ready
-	 * fence, to the tail of the original fence's task_list
+	 * i915_sw_fences, we move the entry list from this, the next ready
+	 * fence, to the tail of the original fence's entry list
 	 * (and so added to the list to be woken).
 	 */
 
 	spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
 	if (continuation) {
-		list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
+		list_for_each_entry_safe(pos, next, &x->head, entry) {
 			if (pos->func == autoremove_wake_function)
 				pos->func(pos, TASK_NORMAL, 0, continuation);
 			else
-				list_move_tail(&pos->task_list, continuation);
+				list_move_tail(&pos->entry, continuation);
 		}
 	} else {
 		LIST_HEAD(extra);
 
 		do {
-			list_for_each_entry_safe(pos, next,
-						 &x->task_list, task_list)
+			list_for_each_entry_safe(pos, next, &x->head, entry)
 				pos->func(pos, TASK_NORMAL, 0, &extra);
 
 			if (list_empty(&extra))
 				break;
 
-			list_splice_tail_init(&extra, &x->task_list);
+			list_splice_tail_init(&extra, &x->head);
 		} while (1);
 	}
 	spin_unlock_irqrestore(&x->lock, flags);
@@ -254,9 +253,9 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)
 	__i915_sw_fence_commit(fence);
 }
 
-static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
+static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
 {
-	list_del(&wq->task_list);
+	list_del(&wq->entry);
 	__i915_sw_fence_complete(wq->private, key);
 	i915_sw_fence_put(wq->private);
 	if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
@@ -267,7 +266,7 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
 static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
 				    const struct i915_sw_fence * const signaler)
 {
-	wait_queue_t *wq;
+	wait_queue_entry_t *wq;
 
 	if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
 		return false;
@@ -275,7 +274,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
 	if (fence == signaler)
 		return true;
 
-	list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+	list_for_each_entry(wq, &fence->wait.head, entry) {
 		if (wq->func != i915_sw_fence_wake)
 			continue;
 
@@ -288,12 +287,12 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
 
 static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
 {
-	wait_queue_t *wq;
+	wait_queue_entry_t *wq;
 
 	if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
 		return;
 
-	list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+	list_for_each_entry(wq, &fence->wait.head, entry) {
 		if (wq->func != i915_sw_fence_wake)
 			continue;
 
@@ -320,7 +319,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
 
 static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 					  struct i915_sw_fence *signaler,
-					  wait_queue_t *wq, gfp_t gfp)
+					  wait_queue_entry_t *wq, gfp_t gfp)
 {
 	unsigned long flags;
 	int pending;
@@ -350,7 +349,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 		pending |= I915_SW_FENCE_FLAG_ALLOC;
 	}
 
-	INIT_LIST_HEAD(&wq->task_list);
+	INIT_LIST_HEAD(&wq->entry);
 	wq->flags = pending;
 	wq->func = i915_sw_fence_wake;
 	wq->private = i915_sw_fence_get(fence);
@@ -359,7 +358,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 
 	spin_lock_irqsave(&signaler->wait.lock, flags);
 	if (likely(!i915_sw_fence_done(signaler))) {
-		__add_wait_queue_tail(&signaler->wait, wq);
+		__add_wait_queue_entry_tail(&signaler->wait, wq);
 		pending = 1;
 	} else {
 		i915_sw_fence_wake(wq, 0, 0, NULL);
@@ -372,7 +371,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 
 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 				 struct i915_sw_fence *signaler,
-				 wait_queue_t *wq)
+				 wait_queue_entry_t *wq)
 {
 	return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
 }
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index d31cefbb..fd3c3bf 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -66,7 +66,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);
 
 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 				 struct i915_sw_fence *after,
-				 wait_queue_t *wq);
+				 wait_queue_entry_t *wq);
 int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
 				     struct i915_sw_fence *after,
 				     gfp_t gfp);
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 4ab8a97..2e73901 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,8 +60,8 @@
  */
 void i915_check_vgpu(struct drm_i915_private *dev_priv)
 {
-	uint64_t magic;
-	uint32_t version;
+	u64 magic;
+	u16 version_major;
 
 	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
@@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
 	if (magic != VGT_MAGIC)
 		return;
 
-	version = INTEL_VGT_IF_VERSION_ENCODE(
-		__raw_i915_read16(dev_priv, vgtif_reg(version_major)),
-		__raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
-	if (version != INTEL_VGT_IF_VERSION) {
+	version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+	if (version_major < VGT_VERSION_MAJOR) {
 		DRM_INFO("VGT interface version mismatch!\n");
 		return;
 	}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 1aba470..f066e2d 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma)
 				break;
 		}
 
+		if (!ret) {
+			ret = i915_gem_active_retire(&vma->last_fence,
+						     &vma->vm->i915->drm.struct_mutex);
+		}
+
 		__i915_vma_unpin(vma);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index eb638a1..42fb436 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -15,13 +15,9 @@ static struct intel_dsm_priv {
 	acpi_handle dhandle;
 } intel_dsm_priv;
 
-static const u8 intel_dsm_guid[] = {
-	0xd3, 0x73, 0xd8, 0x7e,
-	0xd0, 0xc2,
-	0x4f, 0x4e,
-	0xa8, 0x54,
-	0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
-};
+static const guid_t intel_dsm_guid =
+	GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
+		  0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
 
 static char *intel_dsm_port_name(u8 id)
 {
@@ -80,7 +76,7 @@ static void intel_dsm_platform_mux_info(void)
 	int i;
 	union acpi_object *pkg, *connector_count;
 
-	pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
+	pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
 			INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
 			NULL, ACPI_TYPE_PACKAGE);
 	if (!pkg) {
@@ -118,7 +114,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
 	if (!dhandle)
 		return false;
 
-	if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
+	if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
 			    1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
 		DRM_DEBUG_KMS("no _DSM method for intel device\n");
 		return false;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3617927..9106ea3 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
 static void skylake_pfit_enable(struct intel_crtc *crtc);
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
 static void ironlake_pfit_enable(struct intel_crtc *crtc);
-static void intel_modeset_setup_hw_state(struct drm_device *dev);
+static void intel_modeset_setup_hw_state(struct drm_device *dev,
+					 struct drm_modeset_acquire_ctx *ctx);
 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
 
 struct intel_limit {
@@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
 	struct drm_crtc *crtc;
 	int i, ret;
 
-	intel_modeset_setup_hw_state(dev);
+	intel_modeset_setup_hw_state(dev, ctx);
 	i915_redisable_vga(to_i915(dev));
 
 	if (!state)
@@ -4598,7 +4599,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
 
 static int
 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
-		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
+		  unsigned int scaler_user, int *scaler_id,
 		  int src_w, int src_h, int dst_w, int dst_h)
 {
 	struct intel_crtc_scaler_state *scaler_state =
@@ -4607,9 +4608,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 		to_intel_crtc(crtc_state->base.crtc);
 	int need_scaling;
 
-	need_scaling = drm_rotation_90_or_270(rotation) ?
-		(src_h != dst_w || src_w != dst_h):
-		(src_w != dst_w || src_h != dst_h);
+	/*
+	 * Src coordinates are already rotated by 270 degrees for
+	 * the 90/270 degree plane rotation cases (to match the
+	 * GTT mapping), hence no need to account for rotation here.
+	 */
+	need_scaling = src_w != dst_w || src_h != dst_h;
 
 	/*
 	 * if plane is being disabled or scaler is no more required or force detach
@@ -4671,7 +4675,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
 
 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
-		&state->scaler_state.scaler_id, DRM_ROTATE_0,
+		&state->scaler_state.scaler_id,
 		state->pipe_src_w, state->pipe_src_h,
 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
 }
@@ -4700,7 +4704,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 	ret = skl_update_scaler(crtc_state, force_detach,
 				drm_plane_index(&intel_plane->base),
 				&plane_state->scaler_id,
-				plane_state->base.rotation,
 				drm_rect_width(&plane_state->base.src) >> 16,
 				drm_rect_height(&plane_state->base.src) >> 16,
 				drm_rect_width(&plane_state->base.dst),
@@ -5823,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
 		intel_update_watermarks(intel_crtc);
 }
 
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+					struct drm_modeset_acquire_ctx *ctx)
 {
 	struct intel_encoder *encoder;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5853,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 		return;
 	}
 
-	state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+	state->acquire_ctx = ctx;
 
 	/* Everything's already locked, -EDEADLK can't happen. */
 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@@ -6101,7 +6105,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
 	pipe_config->fdi_lanes = lane;
 
 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
-			       link_bw, &pipe_config->fdi_m_n);
+			       link_bw, &pipe_config->fdi_m_n, false);
 
 	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
@@ -6277,7 +6281,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 }
 
 static void compute_m_n(unsigned int m, unsigned int n,
-			uint32_t *ret_m, uint32_t *ret_n)
+			uint32_t *ret_m, uint32_t *ret_n,
+			bool reduce_m_n)
 {
 	/*
 	 * Reduce M/N as much as possible without loss in precision. Several DP
@@ -6285,9 +6290,11 @@ static void compute_m_n(unsigned int m, unsigned int n,
 	 * values. The passed in values are more likely to have the least
 	 * significant bits zero than M after rounding below, so do this first.
 	 */
-	while ((m & 1) == 0 && (n & 1) == 0) {
-		m >>= 1;
-		n >>= 1;
+	if (reduce_m_n) {
+		while ((m & 1) == 0 && (n & 1) == 0) {
+			m >>= 1;
+			n >>= 1;
+		}
 	}
 
 	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -6298,16 +6305,19 @@ static void compute_m_n(unsigned int m, unsigned int n,
 void
 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
 		       int pixel_clock, int link_clock,
-		       struct intel_link_m_n *m_n)
+		       struct intel_link_m_n *m_n,
+		       bool reduce_m_n)
 {
 	m_n->tu = 64;
 
 	compute_m_n(bits_per_pixel * pixel_clock,
 		    link_clock * nlanes * 8,
-		    &m_n->gmch_m, &m_n->gmch_n);
+		    &m_n->gmch_m, &m_n->gmch_n,
+		    reduce_m_n);
 
 	compute_m_n(pixel_clock, link_clock,
-		    &m_n->link_m, &m_n->link_n);
+		    &m_n->link_m, &m_n->link_n,
+		    reduce_m_n);
 }
 
 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -12197,6 +12207,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 	 * type. For DP ports it behaves like most other platforms, but on HDMI
 	 * there's an extra 1 line difference. So we need to add two instead of
 	 * one to the value.
+	 *
+	 * On VLV/CHV DSI the scanline counter would appear to increment
+	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
+	 * that means we can't tell whether we're in vblank or not while
+	 * we're on that particular line. We must still set scanline_offset
+	 * to 1 so that the vblank timestamps come out correct when we query
+	 * the scanline counter from within the vblank interrupt handler.
+	 * However if queried just before the start of vblank we'll get an
+	 * answer that's slightly in the future.
 	 */
 	if (IS_GEN2(dev_priv)) {
 		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -15013,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
 	intel_setup_outputs(dev_priv);
 
 	drm_modeset_lock_all(dev);
-	intel_modeset_setup_hw_state(dev);
+	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
 	drm_modeset_unlock_all(dev);
 
 	for_each_intel_crtc(dev, crtc) {
@@ -15050,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
 	return 0;
 }
 
-static void intel_enable_pipe_a(struct drm_device *dev)
+static void intel_enable_pipe_a(struct drm_device *dev,
+				struct drm_modeset_acquire_ctx *ctx)
 {
 	struct intel_connector *connector;
 	struct drm_connector_list_iter conn_iter;
 	struct drm_connector *crt = NULL;
 	struct intel_load_detect_pipe load_detect_temp;
-	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
 	int ret;
 
 	/* We can't just switch on the pipe A, we need to set things up with a
@@ -15128,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
 }
 
-static void intel_sanitize_crtc(struct intel_crtc *crtc)
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+				struct drm_modeset_acquire_ctx *ctx)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15174,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 		plane = crtc->plane;
 		crtc->base.primary->state->visible = true;
 		crtc->plane = !plane;
-		intel_crtc_disable_noatomic(&crtc->base);
+		intel_crtc_disable_noatomic(&crtc->base, ctx);
 		crtc->plane = plane;
 	}
 
@@ -15184,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 		 * resume. Force-enable the pipe to fix this, the update_dpms
 		 * call below we restore the pipe to the right state, but leave
 		 * the required bits on. */
-		intel_enable_pipe_a(dev);
+		intel_enable_pipe_a(dev, ctx);
 	}
 
 	/* Adjust the state of the output pipe according to whether we
 	 * have active connectors/encoders. */
 	if (crtc->active && !intel_crtc_has_encoders(crtc))
-		intel_crtc_disable_noatomic(&crtc->base);
+		intel_crtc_disable_noatomic(&crtc->base, ctx);
 
 	if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
 		/*
@@ -15488,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
  * and sanitizes it to the current state
  */
 static void
-intel_modeset_setup_hw_state(struct drm_device *dev)
+intel_modeset_setup_hw_state(struct drm_device *dev,
+			     struct drm_modeset_acquire_ctx *ctx)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum pipe pipe;
@@ -15508,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
 	for_each_pipe(dev_priv, pipe) {
 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
-		intel_sanitize_crtc(crtc);
+		intel_sanitize_crtc(crtc, ctx);
 		intel_dump_pipe_config(crtc, crtc->config,
 				       "[setup_hw_state]");
 	}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ee77b51..fc691b8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
 	DRM_DEBUG_KMS("common rates: %s\n", str);
 }
 
-bool
-__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
-{
-	u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
-						      DP_SINK_OUI;
-
-	return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
-	       sizeof(*desc);
-}
-
-bool intel_dp_read_desc(struct intel_dp *intel_dp)
-{
-	struct intel_dp_desc *desc = &intel_dp->desc;
-	bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
-		       DP_OUI_SUPPORT;
-	int dev_id_len;
-
-	if (!__intel_dp_read_desc(intel_dp, desc))
-		return false;
-
-	dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
-	DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
-		      drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
-		      (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
-		      dev_id_len, desc->device_id,
-		      desc->hw_rev >> 4, desc->hw_rev & 0xf,
-		      desc->sw_major_rev, desc->sw_minor_rev);
-
-	return true;
-}
-
 static int rate_to_index(int find, const int *rates)
 {
 	int i = 0;
@@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
 	int common_len;
 	uint8_t link_bw, rate_select;
+	bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+					   DP_DPCD_QUIRK_LIMITED_M_N);
 
 	common_len = intel_dp_common_rates(intel_dp, common_rates);
 
@@ -1753,7 +1724,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	intel_link_compute_m_n(bpp, lane_count,
 			       adjusted_mode->crtc_clock,
 			       pipe_config->port_clock,
-			       &pipe_config->dp_m_n);
+			       &pipe_config->dp_m_n,
+			       reduce_m_n);
 
 	if (intel_connector->panel.downclock_mode != NULL &&
 		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -1761,7 +1733,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 			intel_link_compute_m_n(bpp, lane_count,
 				intel_connector->panel.downclock_mode->clock,
 				pipe_config->port_clock,
-				&pipe_config->dp_m2_n2);
+				&pipe_config->dp_m2_n2,
+				reduce_m_n);
 	}
 
 	/*
@@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 	if (!intel_dp_read_dpcd(intel_dp))
 		return false;
 
-	intel_dp_read_desc(intel_dp);
+	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+			 drm_dp_is_branch(intel_dp->dpcd));
 
 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
 		dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
 
 	intel_dp_print_rates(intel_dp);
 
-	intel_dp_read_desc(intel_dp);
+	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+			 drm_dp_is_branch(intel_dp->dpcd));
 
 	intel_dp_configure_mst(intel_dp);
 
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 6532e22..40ba313 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
 	struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
 	struct intel_panel *panel = &connector->panel;
 
-	intel_dp_aux_enable_backlight(connector);
-
 	if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
 		panel->backlight.max = 0xFFFF;
 	else
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index c1f62eb..989e255 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 	int lane_count, slots;
 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
 	int mst_pbn;
+	bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+					   DP_DPCD_QUIRK_LIMITED_M_N);
 
 	pipe_config->has_pch_encoder = false;
 	bpp = 24;
@@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 	intel_link_compute_m_n(bpp, lane_count,
 			       adjusted_mode->crtc_clock,
 			       pipe_config->port_clock,
-			       &pipe_config->dp_m_n);
+			       &pipe_config->dp_m_n,
+			       reduce_m_n);
 
 	pipe_config->dp_m_n.tu = slots;
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index aaee394..f630c7a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -906,14 +906,6 @@ enum link_m_n_set {
 	M2_N2
 };
 
-struct intel_dp_desc {
-	u8 oui[3];
-	u8 device_id[6];
-	u8 hw_rev;
-	u8 sw_major_rev;
-	u8 sw_minor_rev;
-} __packed;
-
 struct intel_dp_compliance_data {
 	unsigned long edid;
 	uint8_t video_pattern;
@@ -957,7 +949,7 @@ struct intel_dp {
 	/* Max link BW for the sink as per DPCD registers */
 	int max_sink_link_bw;
 	/* sink or branch descriptor */
-	struct intel_dp_desc desc;
+	struct drm_dp_desc desc;
 	struct drm_dp_aux aux;
 	enum intel_display_power_domain aux_power_domain;
 	uint8_t train_set[4];
@@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
 }
 
 bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
-bool __intel_dp_read_desc(struct intel_dp *intel_dp,
-			  struct intel_dp_desc *desc);
-bool intel_dp_read_desc(struct intel_dp *intel_dp);
 int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 854e8e0..f94eacf 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	return 0;
 }
 
+static bool ring_is_idle(struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *dev_priv = engine->i915;
+	bool idle = true;
+
+	intel_runtime_pm_get(dev_priv);
+
+	/* No bit for gen2, so assume the CS parser is idle */
+	if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+		idle = false;
+
+	intel_runtime_pm_put(dev_priv);
+
+	return idle;
+}
+
 /**
  * intel_engine_is_idle() - Report if the engine has finished process all work
  * @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
  */
 bool intel_engine_is_idle(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
 	/* Any inflight/incomplete requests? */
 	if (!i915_seqno_passed(intel_engine_get_seqno(engine),
 			       intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
 		return false;
 
 	/* Ring stopped? */
-	if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+	if (!ring_is_idle(engine))
 		return false;
 
 	return true;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index ded2add..d93c584 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
 					    int *width, int *height)
 {
-	int w, h;
-
-	if (drm_rotation_90_or_270(cache->plane.rotation)) {
-		w = cache->plane.src_h;
-		h = cache->plane.src_w;
-	} else {
-		w = cache->plane.src_w;
-		h = cache->plane.src_h;
-	}
-
 	if (width)
-		*width = w;
+		*width = cache->plane.src_w;
 	if (height)
-		*height = h;
+		*height = cache->plane.src_h;
 }
 
 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 		cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
 
 	cache->plane.rotation = plane_state->base.rotation;
+	/*
+	 * Src coordinates are already rotated by 270 degrees for
+	 * the 90/270 degree plane rotation cases (to match the
+	 * GTT mapping), hence no need to account for rotation here.
+	 */
 	cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
 	cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
 	cache->plane.visible = plane_state->base.visible;
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 668f004..292fedf 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
 
 static void lpe_audio_irq_unmask(struct irq_data *d)
 {
-	struct drm_i915_private *dev_priv = d->chip_data;
-	unsigned long irqflags;
-	u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-		I915_LPE_PIPE_B_INTERRUPT);
-
-	if (IS_CHERRYVIEW(dev_priv))
-		val |= I915_LPE_PIPE_C_INTERRUPT;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-	dev_priv->irq_mask &= ~val;
-	I915_WRITE(VLV_IIR, val);
-	I915_WRITE(VLV_IIR, val);
-	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-	POSTING_READ(VLV_IMR);
-
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static void lpe_audio_irq_mask(struct irq_data *d)
 {
-	struct drm_i915_private *dev_priv = d->chip_data;
-	unsigned long irqflags;
-	u32 val = (I915_LPE_PIPE_A_INTERRUPT |
-		I915_LPE_PIPE_B_INTERRUPT);
-
-	if (IS_CHERRYVIEW(dev_priv))
-		val |= I915_LPE_PIPE_C_INTERRUPT;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
-	dev_priv->irq_mask |= val;
-	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
-	I915_WRITE(VLV_IIR, val);
-	I915_WRITE(VLV_IIR, val);
-	POSTING_READ(VLV_IIR);
-
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 static struct irq_chip lpe_audio_irqchip = {
@@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
 
 	desc = irq_to_desc(dev_priv->lpe_audio.irq);
 
-	lpe_audio_irq_mask(&desc->irq_data);
-
 	lpe_audio_platdev_destroy(dev_priv);
 
 	irq_free_desc(dev_priv->lpe_audio.irq);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c8f7c63..62f44d3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
 		rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
 	u32 *reg_state = ce->lrc_reg_state;
 
-	assert_ring_tail_valid(rq->ring, rq->tail);
-	reg_state[CTX_RING_TAIL+1] = rq->tail;
+	reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
 
 	/* True 32b PPGTT with dynamic page allocation: update PDP
 	 * registers and point the unallocated PDPs to scratch page.
@@ -1989,7 +1988,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
 	ce->ring = ring;
 	ce->state = vma;
-	ce->initialised = engine->init_context == NULL;
+	ce->initialised |= engine->init_context == NULL;
 
 	return 0;
 
@@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 			ce->state->obj->mm.dirty = true;
 			i915_gem_object_unpin_map(ce->state->obj);
 
-			ce->ring->head = ce->ring->tail = 0;
-			intel_ring_update_space(ce->ring);
+			intel_ring_reset(ce->ring, 0);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 71cbe9c..5abef482 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
 		return false;
 	}
 
-	intel_dp_read_desc(dp);
+	drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
 
 	DRM_DEBUG_KMS("Success: LSPCON init\n");
 	return true;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 570bd60..078fd1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
 
 	/* n.b., src is 16.16 fixed point, dst is whole integer */
 	if (plane->id == PLANE_CURSOR) {
+		/*
+		 * Cursors only support 0/180 degree rotation,
+		 * hence no need to account for rotation here.
+		 */
 		src_w = pstate->base.src_w;
 		src_h = pstate->base.src_h;
 		dst_w = pstate->base.crtc_w;
 		dst_h = pstate->base.crtc_h;
 	} else {
+		/*
+		 * Src coordinates are already rotated by 270 degrees for
+		 * the 90/270 degree plane rotation cases (to match the
+		 * GTT mapping), hence no need to account for rotation here.
+		 */
 		src_w = drm_rect_width(&pstate->base.src);
 		src_h = drm_rect_height(&pstate->base.src);
 		dst_w = drm_rect_width(&pstate->base.dst);
 		dst_h = drm_rect_height(&pstate->base.dst);
 	}
 
-	if (drm_rotation_90_or_270(pstate->base.rotation))
-		swap(dst_w, dst_h);
-
 	downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
 	downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
 
@@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 	if (y && format != DRM_FORMAT_NV12)
 		return 0;
 
+	/*
+	 * Src coordinates are already rotated by 270 degrees for
+	 * the 90/270 degree plane rotation cases (to match the
+	 * GTT mapping), hence no need to account for rotation here.
+	 */
 	width = drm_rect_width(&intel_pstate->base.src) >> 16;
 	height = drm_rect_height(&intel_pstate->base.src) >> 16;
 
-	if (drm_rotation_90_or_270(pstate->rotation))
-		swap(width, height);
-
 	/* for planar format */
 	if (format == DRM_FORMAT_NV12) {
 		if (y)  /* y-plane data rate */
@@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
 	    fb->modifier != I915_FORMAT_MOD_Yf_TILED)
 		return 8;
 
+	/*
+	 * Src coordinates are already rotated by 270 degrees for
+	 * the 90/270 degree plane rotation cases (to match the
+	 * GTT mapping), hence no need to account for rotation here.
+	 */
 	src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
 	src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
 
-	if (drm_rotation_90_or_270(pstate->rotation))
-		swap(src_w, src_h);
-
 	/* Halve UV plane width and height for NV12 */
 	if (fb->format->format == DRM_FORMAT_NV12 && !y) {
 		src_w /= 2;
@@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 		width = intel_pstate->base.crtc_w;
 		height = intel_pstate->base.crtc_h;
 	} else {
+		/*
+		 * Src coordinates are already rotated by 270 degrees for
+		 * the 90/270 degree plane rotation cases (to match the
+		 * GTT mapping), hence no need to account for rotation here.
+		 */
 		width = drm_rect_width(&intel_pstate->base.src) >> 16;
 		height = drm_rect_height(&intel_pstate->base.src) >> 16;
 	}
 
-	if (drm_rotation_90_or_270(pstate->rotation))
-		swap(width, height);
-
 	cpp = fb->format->cpp[0];
 	plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
 
@@ -4335,11 +4347,19 @@ skl_compute_wm(struct drm_atomic_state *state)
 	struct drm_crtc_state *cstate;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct skl_wm_values *results = &intel_state->wm_results;
+	struct drm_device *dev = state->dev;
 	struct skl_pipe_wm *pipe_wm;
 	bool changed = false;
 	int ret, i;
 
 	/*
+	 * When we distrust bios wm we always need to recompute to set the
+	 * expected DDB allocations for each CRTC.
+	 */
+	if (to_i915(dev)->wm.distrust_bios_wm)
+		changed = true;
+
+	/*
 	 * If this transaction isn't actually touching any CRTC's, don't
 	 * bother with watermark calculation.  Note that if we pass this
 	 * test, we're guaranteed to hold at least one CRTC state mutex,
@@ -4349,6 +4369,7 @@ skl_compute_wm(struct drm_atomic_state *state)
 	 */
 	for_each_new_crtc_in_state(state, crtc, cstate, i)
 		changed = true;
+
 	if (!changed)
 		return 0;
 
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c3780d0..559f1ab 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
 	}
 
 	/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
-	if (intel_crtc->config->pipe_src_w > 3200 ||
-				intel_crtc->config->pipe_src_h > 2000) {
+	if (dev_priv->psr.psr2_support &&
+	    (intel_crtc->config->pipe_src_w > 3200 ||
+	     intel_crtc->config->pipe_src_h > 2000)) {
 		dev_priv->psr.psr2_support = false;
 		return false;
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 66a2b8b..513a0f4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
 
 void intel_ring_update_space(struct intel_ring *ring)
 {
-	ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
+	ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
 }
 
 static int
@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
 
 	i915_gem_request_submit(request);
 
-	assert_ring_tail_valid(request->ring, request->tail);
-	I915_WRITE_TAIL(request->engine, request->tail);
+	I915_WRITE_TAIL(request->engine,
+			intel_ring_set_tail(request->ring, request->tail));
 }
 
 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@@ -1316,11 +1316,23 @@ int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
 	return PTR_ERR(addr);
 }
 
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+	GEM_BUG_ON(!list_empty(&ring->request_list));
+	ring->tail = tail;
+	ring->head = tail;
+	ring->emit = tail;
+	intel_ring_update_space(ring);
+}
+
 void intel_ring_unpin(struct intel_ring *ring)
 {
 	GEM_BUG_ON(!ring->vma);
 	GEM_BUG_ON(!ring->vaddr);
 
+	/* Discard any unused bytes beyond that submitted to hw. */
+	intel_ring_reset(ring, ring->tail);
+
 	if (i915_vma_is_map_and_fenceable(ring->vma))
 		i915_vma_unpin_iomap(ring->vma);
 	else
@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
+	/* Restart from the beginning of the rings for convenience */
 	for_each_engine(engine, dev_priv, id)
-		engine->buffer->head = engine->buffer->tail;
+		intel_ring_reset(engine->buffer, 0);
 }
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 		unsigned space;
 
 		/* Would completion of this request free enough space? */
-		space = __intel_ring_space(target->postfix, ring->tail,
+		space = __intel_ring_space(target->postfix, ring->emit,
 					   ring->size);
 		if (space >= bytes)
 			break;
@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
 	struct intel_ring *ring = req->ring;
-	int remain_actual = ring->size - ring->tail;
-	int remain_usable = ring->effective_size - ring->tail;
+	int remain_actual = ring->size - ring->emit;
+	int remain_usable = ring->effective_size - ring->emit;
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 
 	if (unlikely(need_wrap)) {
 		GEM_BUG_ON(remain_actual > ring->space);
-		GEM_BUG_ON(ring->tail + remain_actual > ring->size);
+		GEM_BUG_ON(ring->emit + remain_actual > ring->size);
 
 		/* Fill the tail with MI_NOOP */
-		memset(ring->vaddr + ring->tail, 0, remain_actual);
-		ring->tail = 0;
+		memset(ring->vaddr + ring->emit, 0, remain_actual);
+		ring->emit = 0;
 		ring->space -= remain_actual;
 	}
 
-	GEM_BUG_ON(ring->tail > ring->size - bytes);
-	cs = ring->vaddr + ring->tail;
-	ring->tail += bytes;
+	GEM_BUG_ON(ring->emit > ring->size - bytes);
+	cs = ring->vaddr + ring->emit;
+	ring->emit += bytes;
 	ring->space -= bytes;
 	GEM_BUG_ON(ring->space < 0);
 
@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
 	int num_dwords =
-		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+		(req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	u32 *cs;
 
 	if (num_dwords == 0)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a82a080..f7144fe 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -145,6 +145,7 @@ struct intel_ring {
 
 	u32 head;
 	u32 tail;
+	u32 emit;
 
 	int space;
 	int size;
@@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
 int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+void intel_ring_update_space(struct intel_ring *ring);
 void intel_ring_unpin(struct intel_ring *ring);
 void intel_ring_free(struct intel_ring *ring);
 
@@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
 	 * reserved for the command packet (i.e. the value passed to
 	 * intel_ring_begin()).
 	 */
-	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
+	GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
 }
 
 static inline u32
@@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
 	GEM_BUG_ON(tail >= ring->size);
 }
 
-void intel_ring_update_space(struct intel_ring *ring);
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+	/* Whilst writes to the tail are strictly order, there is no
+	 * serialisation between readers and the writers. The tail may be
+	 * read by i915_gem_request_retire() just as it is being updated
+	 * by execlists, as although the breadcrumb is complete, the context
+	 * switch hasn't been seen.
+	 */
+	assert_ring_tail_valid(ring, tail);
+	ring->tail = tail;
+	return tail;
+}
 
 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8c87c71..e6517ed 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
  */
 void intel_pipe_update_start(struct intel_crtc *crtc)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
 	long timeout = msecs_to_jiffies_timeout(1);
 	int scanline, min, max, vblank_start;
 	wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+	bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+		intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
 	DEFINE_WAIT(wait);
 
 	vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
 
 	drm_crtc_vblank_put(&crtc->base);
 
+	/*
+	 * On VLV/CHV DSI the scanline counter would appear to
+	 * increment approx. 1/3 of a scanline before start of vblank.
+	 * The registers still get latched at start of vblank however.
+	 * This means we must not write any registers on the first
+	 * line of vblank (since not the whole line is actually in
+	 * vblank). And unfortunately we can't use the interrupt to
+	 * wait here since it will fire too soon. We could use the
+	 * frame start interrupt instead since it will fire after the
+	 * critical scanline, but that would require more changes
+	 * in the interrupt code. So for now we'll just do the nasty
+	 * thing and poll for the bad scanline to pass us by.
+	 *
+	 * FIXME figure out if BXT+ DSI suffers from this as well
+	 */
+	while (need_vlv_dsi_wa && scanline == vblank_start)
+		scanline = intel_get_crtc_scanline(crtc);
+
 	crtc->debug.scanline_start = scanline;
 	crtc->debug.start_vbl_time = ktime_get();
 	crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 4b7f73a..f841152 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
  *                available in the work queue (note, the queue is shared,
  *                not per-engine). It is OK for this to be nonzero, but
  *                it should not be huge!
- *   q_fail: failed to enqueue a work item. This should never happen,
- *           because we check for space beforehand.
  *   b_fail: failed to ring the doorbell. This should never happen, unless
  *           somehow the hardware misbehaves, or maybe if the GuC firmware
  *           crashes? We probably need to reset the GPU to recover.
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 1afb8b06..12b85b3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
 static int igt_ctx_exec(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_object *obj = NULL;
 	struct drm_file *file;
 	IGT_TIMEOUT(end_time);
 	LIST_HEAD(objects);
@@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
 		}
 
 		for_each_engine(engine, i915, id) {
-			if (dw == 0) {
+			if (!obj) {
 				obj = create_test_object(ctx, file, &objects);
 				if (IS_ERR(obj)) {
 					err = PTR_ERR(obj);
@@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
 				goto out_unlock;
 			}
 
-			if (++dw == max_dwords(obj))
+			if (++dw == max_dwords(obj)) {
+				obj = NULL;
 				dw = 0;
+			}
 			ndwords++;
 		}
 		ncontexts++;
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8fb801f..8b05ecb 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 		ret = drm_of_find_panel_or_bridge(child,
 						  imx_ldb->lvds_mux ? 4 : 2, 0,
 						  &channel->panel, &channel->bridge);
-		if (ret)
+		if (ret && ret != -ENODEV)
 			return ret;
 
 		/* panel ddc only if there is no bridge */
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 808b995..b5cc6e1 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -19,6 +19,7 @@
 #include <drm/drm_of.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/iopoll.h>
 #include <linux/irq.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
 
 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
 {
-	u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
+	int ret;
+	u32 val;
 
-	while (timeout_ms--) {
-		if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
-			break;
-
-		usleep_range(2, 4);
-	}
-
-	if (timeout_ms == 0) {
+	ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
+				 4, 2000000);
+	if (ret) {
 		DRM_WARN("polling dsi wait not busy timeout!\n");
 
 		mtk_dsi_enable(dsi);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 41a1c03..0a4ffd7 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
 	}
 
 	err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
-	if (err) {
+	if (err < 0) {
 		dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
 			err);
 		return err;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 75382f5..10b227d 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = {
 	.max_register   = 0x1000,
 };
 
-static int meson_drv_bind(struct device *dev)
+static int meson_drv_bind_master(struct device *dev, bool has_components)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct meson_drm *priv;
@@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev)
 	if (ret)
 		goto free_drm;
 
-	ret = component_bind_all(drm->dev, drm);
-	if (ret) {
-		dev_err(drm->dev, "Couldn't bind all components\n");
-		goto free_drm;
+	if (has_components) {
+		ret = component_bind_all(drm->dev, drm);
+		if (ret) {
+			dev_err(drm->dev, "Couldn't bind all components\n");
+			goto free_drm;
+		}
 	}
 
 	ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@ static int meson_drv_bind(struct device *dev)
 	return ret;
 }
 
+static int meson_drv_bind(struct device *dev)
+{
+	return meson_drv_bind_master(dev, true);
+}
+
 static void meson_drv_unbind(struct device *dev)
 {
 	struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev)
 		count += meson_probe_remote(pdev, &match, np, remote);
 	}
 
+	if (count && !match)
+		return meson_drv_bind_master(&pdev->dev, false);
+
 	/* If some endpoints were found, initialize the nodes */
 	if (count) {
 		dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index adb411a..f4b5358 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 
 
 	if (IS_G200_SE(mdev)) {
-		if (mdev->unique_rev_id >= 0x02) {
+		if  (mdev->unique_rev_id >= 0x04) {
+			WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+			WREG8(MGAREG_CRTCEXT_DATA, 0);
+		} else if (mdev->unique_rev_id >= 0x02) {
 			u8 hi_pri_lvl;
 			u32 bpp;
 			u32 mb;
@@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
 			if (mga_vga_calculate_mode_bandwidth(mode, bpp)
 				> (30100 * 1024))
 				return MODE_BANDWIDTH;
+		} else {
+			if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+				> (55000 * 1024))
+				return MODE_BANDWIDTH;
 		}
 	} else if (mdev->type == G200_WB) {
 		if (mode->hdisplay > 1280)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 5b8e23d..0a31cd6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -13,6 +13,7 @@
 	select QCOM_SCM
 	select SND_SOC_HDMI_CODEC if SND_SOC
 	select SYNC_FILE
+	select PM_OPP
 	default y
 	help
 	  DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
index f8f48d0..9c34d78 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
 	return 0;
 }
 
-static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
 	.map = mdss_hw_irqdomain_map,
 	.xlate = irq_domain_xlate_onecell,
 };
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index a38c5fe..7d37412 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
 
 	mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
 			sizeof(*mdp5_state), GFP_KERNEL);
+	if (!mdp5_state)
+		return NULL;
 
-	if (mdp5_state && mdp5_state->base.fb)
-		drm_framebuffer_reference(mdp5_state->base.fb);
+	__drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
 
 	return &mdp5_state->base;
 }
@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
 			mdp5_pipe_release(state->state, old_hwpipe);
 			mdp5_pipe_release(state->state, old_right_hwpipe);
 		}
+	} else {
+		mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+		mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+		mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 87b5695..9d498eb 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_export   = drm_gem_prime_export,
 	.gem_prime_import   = drm_gem_prime_import,
+	.gem_prime_res_obj  = msm_gem_prime_res_obj,
 	.gem_prime_pin      = msm_gem_prime_pin,
 	.gem_prime_unpin    = msm_gem_prime_unpin,
 	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 28b6f9b..1b26ca6 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
 		struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 3f299c5..a2f89ba 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
 }
 
 struct msm_fence {
-	struct msm_fence_context *fctx;
 	struct dma_fence base;
+	struct msm_fence_context *fctx;
 };
 
 static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
 	return fence_completed(f->fctx, f->base.seqno);
 }
 
-static void msm_fence_release(struct dma_fence *fence)
-{
-	struct msm_fence *f = to_msm_fence(fence);
-	kfree_rcu(f, base.rcu);
-}
-
 static const struct dma_fence_ops msm_fence_ops = {
 	.get_driver_name = msm_fence_get_driver_name,
 	.get_timeline_name = msm_fence_get_timeline_name,
 	.enable_signaling = msm_fence_enable_signaling,
 	.signaled = msm_fence_signaled,
 	.wait = dma_fence_default_wait,
-	.release = msm_fence_release,
+	.release = dma_fence_free,
 };
 
 struct dma_fence *
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 68e509b..50289a2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
 	struct msm_gem_object *msm_obj;
 	bool use_vram = false;
 
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
 	switch (flags & MSM_BO_CACHE_MASK) {
 	case MSM_BO_UNCACHED:
 	case MSM_BO_CACHED:
@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 
 	size = PAGE_ALIGN(dmabuf->size);
 
+	/* Take mutex so we can modify the inactive list in msm_gem_new_impl */
+	mutex_lock(&dev->struct_mutex);
 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
+	mutex_unlock(&dev->struct_mutex);
+
 	if (ret)
 		goto fail;
 
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 60bb290..13403c6 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
 	if (!obj->import_attach)
 		msm_gem_put_pages(obj);
 }
+
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	return msm_obj->resv;
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1c545eb..7832e64 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		if (!in_fence)
 			return -EINVAL;
 
-		/* TODO if we get an array-fence due to userspace merging multiple
-		 * fences, we need a way to determine if all the backing fences
-		 * are from our own context..
+		/*
+		 * Wait if the fence is from a foreign context, or if the fence
+		 * array contains any fence from a foreign context.
 		 */
-
-		if (in_fence->context != gpu->fctx->context) {
+		if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
 			ret = dma_fence_wait(in_fence, true);
 			if (ret)
 				return ret;
@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 			goto out;
 		}
 
-		if ((submit_cmd.size + submit_cmd.submit_offset) >=
-				msm_obj->base.size) {
+		if (!submit_cmd.size ||
+			((submit_cmd.size + submit_cmd.submit_offset) >
+				msm_obj->base.size)) {
 			DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
 			ret = -EINVAL;
 			goto out;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 97b9c38..0fdc88d 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
 		gpu->grp_clks[i] = get_clock(dev, name);
 
 		/* Remember the key clocks that we need to control later */
-		if (!strcmp(name, "core"))
+		if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
 			gpu->core_clk = gpu->grp_clks[i];
-		else if (!strcmp(name, "rbbmtimer"))
+		else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
 			gpu->rbbmtimer_clk = gpu->grp_clks[i];
 
 		++i;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 1144e0c..0abe776 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -35,6 +35,13 @@
 #include "mxsfb_drv.h"
 #include "mxsfb_regs.h"
 
+#define MXS_SET_ADDR		0x4
+#define MXS_CLR_ADDR		0x8
+#define MODULE_CLKGATE		BIT(30)
+#define MODULE_SFTRST		BIT(31)
+/* 1 second delay should be plenty of time for block reset */
+#define RESET_TIMEOUT		1000000
+
 static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
 {
 	return (val & mxsfb->devdata->hs_wdth_mask) <<
@@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
 		clk_disable_unprepare(mxsfb->clk_disp_axi);
 }
 
+/*
+ * Clear the bit and poll it cleared.  This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+	u32 reg;
+
+	writel(mask, addr + MXS_CLR_ADDR);
+	return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
+}
+
+static int mxsfb_reset_block(void __iomem *reset_addr)
+{
+	int ret;
+
+	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+	if (ret)
+		return ret;
+
+	writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+	if (ret)
+		return ret;
+
+	return clear_poll_bit(reset_addr, MODULE_CLKGATE);
+}
+
 static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
 {
 	struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
@@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
 	 */
 	mxsfb_enable_axi_clk(mxsfb);
 
+	/* Mandatory eLCDIF reset as per the Reference Manual */
+	err = mxsfb_reset_block(mxsfb->base);
+	if (err)
+		return;
+
 	/* Clear the FIFOs */
 	writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
 
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 6a567fe..820a480 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -4,6 +4,7 @@
 
 struct nvkm_alarm {
 	struct list_head head;
+	struct list_head exec;
 	u64 timestamp;
 	void (*func)(struct nvkm_alarm *);
 };
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 39468c21..7459ef9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -60,15 +60,13 @@ bool nouveau_is_v1_dsm(void) {
 }
 
 #ifdef CONFIG_VGA_SWITCHEROO
-static const char nouveau_dsm_muid[] = {
-	0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
-	0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
-};
+static const guid_t nouveau_dsm_muid =
+	GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
+		  0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
 
-static const char nouveau_op_dsm_muid[] = {
-	0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47,
-	0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0,
-};
+static const guid_t nouveau_op_dsm_muid =
+	GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
+		  0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
 
 static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
 {
@@ -86,7 +84,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
 		args_buff[i] = (arg >> i * 8) & 0xFF;
 
 	*result = 0;
-	obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100,
+	obj = acpi_evaluate_dsm_typed(handle, &nouveau_op_dsm_muid, 0x00000100,
 				      func, &argv4, ACPI_TYPE_BUFFER);
 	if (!obj) {
 		acpi_handle_info(handle, "failed to evaluate _DSM\n");
@@ -138,7 +136,7 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg)
 		.integer.value = arg,
 	};
 
-	obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102,
+	obj = acpi_evaluate_dsm_typed(handle, &nouveau_dsm_muid, 0x00000102,
 				      func, &argv4, ACPI_TYPE_INTEGER);
 	if (!obj) {
 		acpi_handle_info(handle, "failed to evaluate _DSM\n");
@@ -259,7 +257,7 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
 	if (!acpi_has_method(dhandle, "_DSM"))
 		return;
 
-	supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
+	supports_mux = acpi_check_dsm(dhandle, &nouveau_dsm_muid, 0x00000102,
 				      1 << NOUVEAU_DSM_POWER);
 	optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 36268e1..15a13d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -80,7 +80,7 @@ int nouveau_modeset = -1;
 module_param_named(modeset, nouveau_modeset, int, 0400);
 
 MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
-int nouveau_runtime_pm = -1;
+static int nouveau_runtime_pm = -1;
 module_param_named(runpm, nouveau_runtime_pm, int, 0400);
 
 static struct drm_driver driver_stub;
@@ -495,7 +495,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 	nouveau_fbcon_init(dev);
 	nouveau_led_init(dev);
 
-	if (nouveau_runtime_pm != 0) {
+	if (nouveau_pmops_runtime()) {
 		pm_runtime_use_autosuspend(dev->dev);
 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
 		pm_runtime_set_active(dev->dev);
@@ -527,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
-	if (nouveau_runtime_pm != 0) {
+	if (nouveau_pmops_runtime()) {
 		pm_runtime_get_sync(dev->dev);
 		pm_runtime_forbid(dev->dev);
 	}
@@ -726,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev)
 	return nouveau_do_resume(drm_dev, false);
 }
 
+bool
+nouveau_pmops_runtime()
+{
+	if (nouveau_runtime_pm == -1)
+		return nouveau_is_optimus() || nouveau_is_v1_dsm();
+	return nouveau_runtime_pm == 1;
+}
+
 static int
 nouveau_pmops_runtime_suspend(struct device *dev)
 {
@@ -733,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	int ret;
 
-	if (nouveau_runtime_pm == 0) {
-		pm_runtime_forbid(dev);
-		return -EBUSY;
-	}
-
-	/* are we optimus enabled? */
-	if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-		DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+	if (!nouveau_pmops_runtime()) {
 		pm_runtime_forbid(dev);
 		return -EBUSY;
 	}
@@ -765,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
 	struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
 	int ret;
 
-	if (nouveau_runtime_pm == 0)
-		return -EINVAL;
+	if (!nouveau_pmops_runtime()) {
+		pm_runtime_forbid(dev);
+		return -EBUSY;
+	}
 
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
@@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev)
 	struct nouveau_drm *drm = nouveau_drm(drm_dev);
 	struct drm_crtc *crtc;
 
-	if (nouveau_runtime_pm == 0) {
-		pm_runtime_forbid(dev);
-		return -EBUSY;
-	}
-
-	/* are we optimus enabled? */
-	if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-		DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+	if (!nouveau_pmops_runtime()) {
 		pm_runtime_forbid(dev);
 		return -EBUSY;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index eadec2f..a11b6aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv)
 #include <nvif/object.h>
 #include <nvif/device.h>
 
-extern int nouveau_runtime_pm;
-
 struct nouveau_drm {
 	struct nouveau_cli client;
 	struct drm_device *dev;
@@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev)
 
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
+bool nouveau_pmops_runtime(void);
 
 #include <nvkm/core/tegra.h>
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index a4aacbc..02fe0ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,7 +87,7 @@ void
 nouveau_vga_init(struct nouveau_drm *drm)
 {
 	struct drm_device *dev = drm->dev;
-	bool runtime = false;
+	bool runtime = nouveau_pmops_runtime();
 
 	/* only relevant for PCI devices */
 	if (!dev->pdev)
@@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm)
 	if (pci_is_thunderbolt_attached(dev->pdev))
 		return;
 
-	if (nouveau_runtime_pm == 1)
-		runtime = true;
-	if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-		runtime = true;
 	vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
 
 	if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@ void
 nouveau_vga_fini(struct nouveau_drm *drm)
 {
 	struct drm_device *dev = drm->dev;
-	bool runtime = false;
+	bool runtime = nouveau_pmops_runtime();
 
 	vga_client_register(dev->pdev, NULL, NULL, NULL);
 
 	if (pci_is_thunderbolt_attached(dev->pdev))
 		return;
 
-	if (nouveau_runtime_pm == 1)
-		runtime = true;
-	if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-		runtime = true;
-
 	vga_switcheroo_unregister_client(dev->pdev);
 	if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
 		vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a766324..06e564a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -2107,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
 					asyc->set.dither = true;
 			}
 		} else {
-			asyc->set.mask = ~0;
+			if (asyc)
+				asyc->set.mask = ~0;
 			asyh->set.mask = ~0;
 		}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index e3e2f5e..f44682d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -81,10 +81,9 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 {
 	struct nvkm_subdev *subdev = &mxm->subdev;
 	struct nvkm_device *device = subdev->device;
-	static char muid[] = {
-		0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
-		0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
-	};
+	static guid_t muid =
+		GUID_INIT(0x4004A400, 0x917D, 0x4CF2,
+			  0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65);
 	u32 mxms_args[] = { 0x00000000 };
 	union acpi_object argv4 = {
 		.buffer.type = ACPI_TYPE_BUFFER,
@@ -105,7 +104,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 	 * unless you pass in exactly the version it supports..
 	 */
 	rev = (version & 0xf0) << 4 | (version & 0x0f);
-	obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
+	obj = acpi_evaluate_dsm(handle, &muid, rev, 0x00000010, &argv4);
 	if (!obj) {
 		nvkm_debug(subdev, "DSM MXMS failed\n");
 		return false;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index f2a86ea..2437f7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
 		/* Move to completed list.  We'll drop the lock before
 		 * executing the callback so it can reschedule itself.
 		 */
-		list_move_tail(&alarm->head, &exec);
+		list_del_init(&alarm->head);
+		list_add(&alarm->exec, &exec);
 	}
 
 	/* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
 	spin_unlock_irqrestore(&tmr->lock, flags);
 
 	/* Execute completed callbacks. */
-	list_for_each_entry_safe(alarm, atemp, &exec, head) {
-		list_del_init(&alarm->head);
+	list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+		list_del(&alarm->exec);
 		alarm->func(alarm);
 	}
 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 008c145..ca44233 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
 	u32 tmp, wm_mask;
 
 	if (radeon_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 
 		/* watermark for high clocks */
 		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0bf1035..5346372 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
 	fixed20_12 a, b, c;
 
 	if (radeon_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 		priority_a_cnt = 0;
 		priority_b_cnt = 0;
 		dram_channels = evergreen_get_number_of_dram_channels(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c1c8e22..e562a78 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -375,7 +375,7 @@ struct radeon_fence {
 	unsigned		ring;
 	bool			is_vm_update;
 
-	wait_queue_t		fence_wake;
+	wait_queue_entry_t		fence_wake;
 };
 
 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 432480f..3178ba0 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
 	    rdev->pdev->subsystem_vendor == 0x103c &&
 	    rdev->pdev->subsystem_device == 0x280a)
 		return;
+	/* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS400 &&
+	    rdev->pdev->subsystem_vendor == 0x1179 &&
+	    rdev->pdev->subsystem_device == 0xff31)
+	        return;
 
 	/* DYN CLK 1 */
 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6ecf427..0a6444d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
 	 */
 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+	 */
+	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
 	/* macbook pro 8.2 */
 	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
 	{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index ef09f0a6..e86f2bd 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -158,7 +158,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
  * for the fence locking itself, so unlocked variants are used for
  * fence_signal, and remove_wait_queue.
  */
-static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
+static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
 {
 	struct radeon_fence *fence;
 	u64 seq;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 7431eb4..d34d1cf 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
 	}
 
 	/* TODO: is this still necessary on NI+ ? */
-	if ((cmd == 0 || cmd == 1 || cmd == 0x3) &&
+	if ((cmd == 0 || cmd == 0x3) &&
 	    (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
 		DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
 			  start, end);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 76d1888..5303f25 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
 	fixed20_12 a, b, c;
 
 	if (radeon_crtc->base.enabled && num_heads && mode) {
-		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
-		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+					    (u32)mode->clock);
+		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+					  (u32)mode->clock);
+		line_time = min(line_time, (u32)65535);
 		priority_a_cnt = 0;
 		priority_b_cnt = 0;
 
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index d8fa7a9..ce5f2d1 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
 				      struct drm_connector_state *conn_state)
 {
 	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
-	struct rockchip_dp_device *dp = to_dp(encoder);
-	int ret;
 
 	/*
 	 * The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
 
 	s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
 	s->output_type = DRM_MODE_CONNECTOR_eDP;
-	if (dp->data->chip_type == RK3399_EDP) {
-		/*
-		 * For RK3399, VOP Lit must code the out mode to RGB888,
-		 * VOP Big must code the out mode to RGB10.
-		 */
-		ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
-							encoder);
-		if (ret > 0)
-			s->output_mode = ROCKCHIP_OUT_MODE_P888;
-	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a2169dd..14fa1f8 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 {
 	struct cdn_dp_device *dp = encoder_to_dp(encoder);
 	int ret, val;
-	struct rockchip_crtc_state *state;
 
 	ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
 	if (ret < 0) {
@@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 
 	DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
 			  (ret) ? "LIT" : "BIG");
-	state = to_rockchip_crtc_state(encoder->crtc->state);
-	if (ret) {
+	if (ret)
 		val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
-		state->output_mode = ROCKCHIP_OUT_MODE_P888;
-	} else {
+	else
 		val = DP_SEL_VOP_LIT << 16;
-		state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
-	}
 
 	ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
 	if (ret)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 3f7a82d..45589d6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
 static void vop_crtc_enable(struct drm_crtc *crtc)
 {
 	struct vop *vop = to_vop(crtc);
+	const struct vop_data *vop_data = vop->data;
 	struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
 	struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
 	u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
 		DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
 			      s->output_type);
 	}
+
+	/*
+	 * if vop is not support RGB10 output, need force RGB10 to RGB888.
+	 */
+	if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
+	    !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
+		s->output_mode = ROCKCHIP_OUT_MODE_P888;
 	VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
 	VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 5a4faa85..9979fd0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -142,6 +142,9 @@ struct vop_data {
 	const struct vop_intr *intr;
 	const struct vop_win_data *win;
 	unsigned int win_size;
+
+#define VOP_FEATURE_OUTPUT_RGB10	BIT(0)
+	u64 feature;
 };
 
 /* interrupt define */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 0da4444..bafd698 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = {
 static const struct vop_data rk3288_vop = {
 	.init_table = rk3288_init_reg_table,
 	.table_size = ARRAY_SIZE(rk3288_init_reg_table),
+	.feature = VOP_FEATURE_OUTPUT_RGB10,
 	.intr = &rk3288_vop_intr,
 	.ctrl = &rk3288_ctrl_data,
 	.win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = {
 static const struct vop_data rk3399_vop_big = {
 	.init_table = rk3399_init_reg_table,
 	.table_size = ARRAY_SIZE(rk3399_init_reg_table),
+	.feature = VOP_FEATURE_OUTPUT_RGB10,
 	.intr = &rk3399_vop_intr,
 	.ctrl = &rk3399_ctrl_data,
 	/*
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 9a1e34e..81f86a6 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -451,18 +451,6 @@ int tegra_drm_submit(struct tegra_drm_context *context,
 
 
 #ifdef CONFIG_DRM_TEGRA_STAGING
-static struct tegra_drm_context *
-tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
-{
-	struct tegra_drm_context *context;
-
-	mutex_lock(&file->lock);
-	context = idr_find(&file->contexts, id);
-	mutex_unlock(&file->lock);
-
-	return context;
-}
-
 static int tegra_gem_create(struct drm_device *drm, void *data,
 			    struct drm_file *file)
 {
@@ -551,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
 	if (err < 0)
 		return err;
 
-	err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL);
+	err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
 	if (err < 0) {
 		client->ops->close_channel(context);
 		return err;
@@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
 
 	mutex_lock(&fpriv->lock);
 
-	context = tegra_drm_file_get_context(fpriv, args->context);
+	context = idr_find(&fpriv->contexts, args->context);
 	if (!context) {
 		err = -EINVAL;
 		goto unlock;
@@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
 
 	mutex_lock(&fpriv->lock);
 
-	context = tegra_drm_file_get_context(fpriv, args->context);
+	context = idr_find(&fpriv->contexts, args->context);
 	if (!context) {
 		err = -ENODEV;
 		goto unlock;
@@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
 
 	mutex_lock(&fpriv->lock);
 
-	context = tegra_drm_file_get_context(fpriv, args->context);
+	context = idr_find(&fpriv->contexts, args->context);
 	if (!context) {
 		err = -ENODEV;
 		goto unlock;
@@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
 
 	mutex_lock(&fpriv->lock);
 
-	context = tegra_drm_file_get_context(fpriv, args->context);
+	context = idr_find(&fpriv->contexts, args->context);
 	if (!context) {
 		err = -ENODEV;
 		goto unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 13db8a2..1f013d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
 	list_for_each_entry_safe(entry, next, &man->list, head)
 		vmw_cmdbuf_res_free(man, entry);
 
+	drm_ht_remove(&man->resources);
 	kfree(man);
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 130d51c..4b948fba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,9 +41,9 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20170221"
+#define VMWGFX_DRIVER_DATE "20170607"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 12
+#define VMWGFX_DRIVER_MINOR 13
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806..a1c68e6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
 				return fifo_state->static_buffer;
 			else {
 				fifo_state->dynamic_buffer = vmalloc(bytes);
+				if (!fifo_state->dynamic_buffer)
+					goto out_err;
 				return fifo_state->dynamic_buffer;
 			}
 		}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ef9f3a2..1d2db5d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 }
 
 
-
-/**
- * vmw_du_cursor_plane_update() - Update cursor image and location
- *
- * @plane: plane object to update
- * @crtc: owning CRTC of @plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of plane on crtc
- * @crtc_y: y offset of plane on crtc
- * @crtc_w: width of plane rectangle on crtc
- * @crtc_h: height of plane rectangle on crtc
- * @src_x: Not used
- * @src_y: Not used
- * @src_w: Not used
- * @src_h: Not used
- *
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-			       struct drm_crtc *crtc,
-			       struct drm_framebuffer *fb,
-			       int crtc_x, int crtc_y,
-			       unsigned int crtc_w,
-			       unsigned int crtc_h,
-			       uint32_t src_x, uint32_t src_y,
-			       uint32_t src_w, uint32_t src_h)
-{
-	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-	struct vmw_surface *surface = NULL;
-	struct vmw_dma_buffer *dmabuf = NULL;
-	s32 hotspot_x, hotspot_y;
-	int ret;
-
-	hotspot_x = du->hotspot_x + fb->hot_x;
-	hotspot_y = du->hotspot_y + fb->hot_y;
-
-	/* A lot of the code assumes this */
-	if (crtc_w != 64 || crtc_h != 64) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	if (vmw_framebuffer_to_vfb(fb)->dmabuf)
-		dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
-	else
-		surface = vmw_framebuffer_to_vfbs(fb)->surface;
-
-	if (surface && !surface->snooper.image) {
-		DRM_ERROR("surface not suitable for cursor\n");
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* setup new image */
-	ret = 0;
-	if (surface) {
-		/* vmw_user_surface_lookup takes one reference */
-		du->cursor_surface = surface;
-
-		du->cursor_age = du->cursor_surface->snooper.age;
-
-		ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
-					      64, 64, hotspot_x, hotspot_y);
-	} else if (dmabuf) {
-		/* vmw_user_surface_lookup takes one reference */
-		du->cursor_dmabuf = dmabuf;
-
-		ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
-					       hotspot_x, hotspot_y);
-	} else {
-		vmw_cursor_update_position(dev_priv, false, 0, 0);
-		goto out;
-	}
-
-	if (!ret) {
-		du->cursor_x = crtc_x + du->set_gui_x;
-		du->cursor_y = crtc_y + du->set_gui_y;
-
-		vmw_cursor_update_position(dev_priv, true,
-					   du->cursor_x + hotspot_x,
-					   du->cursor_y + hotspot_y);
-	}
-
-out:
-	return ret;
-}
-
-
-int vmw_du_cursor_plane_disable(struct drm_plane *plane)
-{
-	if (plane->fb) {
-		drm_framebuffer_unreference(plane->fb);
-		plane->fb = NULL;
-	}
-
-	return -EINVAL;
-}
-
-
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 {
 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -473,18 +371,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 
 
 void
-vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-				   struct drm_plane_state *old_state)
-{
-	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
-	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-
-	drm_atomic_set_fb_for_plane(plane->state, NULL);
-	vmw_cursor_update_position(dev_priv, false, 0, 0);
-}
-
-
-void
 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 				  struct drm_plane_state *old_state)
 {
@@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
 	 */
 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
 	    dmabuf && only_2d &&
+	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
 	    dev_priv->active_display_unit == vmw_du_screen_target) {
 		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
 					      dmabuf, &surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 13f2f1d2..5f8d678 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
 			   u16 *r, u16 *g, u16 *b,
 			   uint32_t size,
 			   struct drm_modeset_acquire_ctx *ctx);
-int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
-			    uint32_t handle, uint32_t width, uint32_t height,
-			    int32_t hot_x, int32_t hot_y);
-int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
 int vmw_du_connector_set_property(struct drm_connector *connector,
 				  struct drm_property *property,
 				  uint64_t val);
@@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
 /* Universal Plane Helpers */
 void vmw_du_primary_plane_destroy(struct drm_plane *plane);
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
-int vmw_du_cursor_plane_disable(struct drm_plane *plane);
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-			       struct drm_crtc *crtc,
-			       struct drm_framebuffer *fb,
-			       int crtc_x, int crtc_y,
-			       unsigned int crtc_w,
-			       unsigned int crtc_h,
-			       uint32_t src_x, uint32_t src_y,
-			       uint32_t src_w, uint32_t src_h);
 
 /* Atomic Helpers */
 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 				     struct drm_plane_state *state);
 void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 				       struct drm_plane_state *old_state);
-void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-					struct drm_plane_state *old_state);
 int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 				   struct drm_plane_state *new_state);
 void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index bad31bd..50be1f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -56,6 +56,8 @@ enum stdu_content_type {
  * @right: Right side of bounding box.
  * @top: Top side of bounding box.
  * @bottom: Bottom side of bounding box.
+ * @fb_left: Left side of the framebuffer/content bounding box
+ * @fb_top: Top of the framebuffer/content bounding box
  * @buf: DMA buffer when DMA-ing between buffer and screen targets.
  * @sid: Surface ID when copying between surface and screen targets.
  */
@@ -63,6 +65,7 @@ struct vmw_stdu_dirty {
 	struct vmw_kms_dirty base;
 	SVGA3dTransferType  transfer;
 	s32 left, right, top, bottom;
+	s32 fb_left, fb_top;
 	u32 pitch;
 	union {
 		struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  *
  * @dirty: The closure structure.
  *
- * This function calculates the bounding box for all the incoming clips
+ * This function calculates the bounding box for all the incoming clips.
  */
 static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 {
@@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 
 	dirty->num_hits = 1;
 
-	/* Calculate bounding box */
+	/* Calculate destination bounding box */
 	ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
 	ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
 	ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
 	ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+
+	/*
+	 * Calculate content bounding box.  We only need the top-left
+	 * coordinate because width and height will be the same as the
+	 * destination bounding box above
+	 */
+	ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
+	ddirty->fb_top  = min_t(s32, ddirty->fb_top, dirty->fb_y);
 }
 
 
@@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
 	/* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
 	src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
 	src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
-	src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp;
+	src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
 
 	dst_pitch = ddirty->pitch;
 	dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
-	dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp;
+	dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
 
 
 	/* Figure out the real direction */
@@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
 	}
 
 out_cleanup:
-	ddirty->left = ddirty->top = S32_MAX;
+	ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
 	ddirty->right = ddirty->bottom = S32_MIN;
 }
 
@@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
 		SVGA3D_READ_HOST_VRAM;
 	ddirty.left = ddirty.top = S32_MAX;
 	ddirty.right = ddirty.bottom = S32_MIN;
+	ddirty.fb_left = ddirty.fb_top = S32_MAX;
 	ddirty.pitch = vfb->base.pitches[0];
 	ddirty.buf = buf;
 	ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
 		DRM_ERROR("Failed to bind surface to STDU.\n");
 	else
 		crtc->primary->fb = plane->state->fb;
+
+	ret = vmw_stdu_update_st(dev_priv, stdu);
+
+	if (ret)
+		DRM_ERROR("Failed to update STDU.\n");
 }
 
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7681341..6b70bd2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	int ret;
 	uint32_t size;
-	uint32_t backup_handle;
+	uint32_t backup_handle = 0;
 
 	if (req->multisample_count != 0)
 		return -EINVAL;
 
+	if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+		return -EINVAL;
+
 	if (unlikely(vmw_user_surface_size == 0))
 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
 			128;
@@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
 					     &res->backup,
 					     &user_srf->backup_base);
-		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
-		    res->backup_size) {
-			DRM_ERROR("Surface backup buffer is too small.\n");
-			vmw_dmabuf_unreference(&res->backup);
-			ret = -EINVAL;
-			goto out_unlock;
+		if (ret == 0) {
+			if (res->backup->base.num_pages * PAGE_SIZE <
+			    res->backup_size) {
+				DRM_ERROR("Surface backup buffer is too small.\n");
+				vmw_dmabuf_unreference(&res->backup);
+				ret = -EINVAL;
+				goto out_unlock;
+			} else {
+				backup_handle = req->buffer_handle;
+			}
 		}
 	} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
 				 dev_priv->stdu_max_height);
 
 		if (size.width > max_width || size.height > max_height) {
-			DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u",
+			DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
 				  size.width, size.height,
 				  max_width, max_height);
 			return -EINVAL;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f05ebb1..ac65f52 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev)
 
 	host->rst = devm_reset_control_get(&pdev->dev, "host1x");
 	if (IS_ERR(host->rst)) {
-		err = PTR_ERR(host->clk);
+		err = PTR_ERR(host->rst);
 		dev_err(&pdev->dev, "failed to get reset: %d\n", err);
 		return err;
 	}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 16d5568..2fb5f43 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -725,15 +725,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
 	spin_lock_irqsave(&ipu->lock, flags);
 
 	val = ipu_cm_read(ipu, IPU_CONF);
-	if (vdi) {
+	if (vdi)
 		val |= IPU_CONF_IC_INPUT;
-	} else {
+	else
 		val &= ~IPU_CONF_IC_INPUT;
-		if (csi_id == 1)
-			val |= IPU_CONF_CSI_SEL;
-		else
-			val &= ~IPU_CONF_CSI_SEL;
-	}
+
+	if (csi_id == 1)
+		val |= IPU_CONF_CSI_SEL;
+	else
+		val &= ~IPU_CONF_CSI_SEL;
+
 	ipu_cm_write(ipu, val, IPU_CONF);
 
 	spin_unlock_irqrestore(&ipu->lock, flags);
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c555633..c35f74c 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -131,8 +131,6 @@ int ipu_pre_get(struct ipu_pre *pre)
 	if (pre->in_use)
 		return -EBUSY;
 
-	clk_prepare_enable(pre->clk_axi);
-
 	/* first get the engine out of reset and remove clock gating */
 	writel(0, pre->regs + IPU_PRE_CTRL);
 
@@ -149,12 +147,7 @@ int ipu_pre_get(struct ipu_pre *pre)
 
 void ipu_pre_put(struct ipu_pre *pre)
 {
-	u32 val;
-
-	val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
-	writel(val, pre->regs + IPU_PRE_CTRL);
-
-	clk_disable_unprepare(pre->clk_axi);
+	writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
 
 	pre->in_use = false;
 }
@@ -249,6 +242,8 @@ static int ipu_pre_probe(struct platform_device *pdev)
 	if (!pre->buffer_virt)
 		return -ENOMEM;
 
+	clk_prepare_enable(pre->clk_axi);
+
 	pre->dev = dev;
 	platform_set_drvdata(pdev, pre);
 	mutex_lock(&ipu_pre_list_mutex);
@@ -268,6 +263,8 @@ static int ipu_pre_remove(struct platform_device *pdev)
 	available_pres--;
 	mutex_unlock(&ipu_pre_list_mutex);
 
+	clk_disable_unprepare(pre->clk_axi);
+
 	if (pre->buffer_virt)
 		gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
 			      IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 92f1452..76875f62 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -417,7 +417,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
 {
 	struct vga_device *vgadev, *conflict;
 	unsigned long flags;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	int rc = 0;
 
 	vga_check_first_use();
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index fe40e5e..687705c 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -275,10 +275,12 @@
 	 - Trio Linker Plus II
 
 config HID_ELECOM
-	tristate "ELECOM BM084 bluetooth mouse"
+	tristate "ELECOM HID devices"
 	depends on HID
 	---help---
-	Support for the ELECOM BM084 (bluetooth mouse).
+	Support for ELECOM devices:
+	  - BM084 Bluetooth Mouse
+	  - DEFT Trackball (Wired and wireless)
 
 config HID_ELO
 	tristate "ELO USB 4000/4500 touchscreen"
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 16df6cc9..a6268f2 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -69,6 +69,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_IS_MULTITOUCH		BIT(3)
 #define QUIRK_NO_CONSUMER_USAGES	BIT(4)
 #define QUIRK_USE_KBD_BACKLIGHT		BIT(5)
+#define QUIRK_T100_KEYBOARD		BIT(6)
 
 #define I2C_KEYBOARD_QUIRKS			(QUIRK_FIX_NOTEBOOK_REPORT | \
 						 QUIRK_NO_INIT_REPORTS | \
@@ -536,6 +537,8 @@ static void asus_remove(struct hid_device *hdev)
 		drvdata->kbd_backlight->removed = true;
 		cancel_work_sync(&drvdata->kbd_backlight->work);
 	}
+
+	hid_hw_stop(hdev);
 }
 
 static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -548,6 +551,12 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 		hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
 		rdesc[55] = 0xdd;
 	}
+	if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
+		 *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
+		hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
+		rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
+	}
+
 	return rdesc;
 }
 
@@ -560,6 +569,9 @@ static const struct hid_device_id asus_devices[] = {
 		USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
 		USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+		USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
+	  QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 37084b6..2241e79 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -826,11 +826,35 @@ static int hid_scan_report(struct hid_device *hid)
 				 * hid-rmi should take care of them,
 				 * not hid-generic
 				 */
-				if (IS_ENABLED(CONFIG_HID_RMI))
-					hid->group = HID_GROUP_RMI;
+				hid->group = HID_GROUP_RMI;
 		break;
 	}
 
+	/* fall back to generic driver in case specific driver doesn't exist */
+	switch (hid->group) {
+	case HID_GROUP_MULTITOUCH_WIN_8:
+		/* fall-through */
+	case HID_GROUP_MULTITOUCH:
+		if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
+			hid->group = HID_GROUP_GENERIC;
+		break;
+	case HID_GROUP_SENSOR_HUB:
+		if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
+			hid->group = HID_GROUP_GENERIC;
+		break;
+	case HID_GROUP_RMI:
+		if (!IS_ENABLED(CONFIG_HID_RMI))
+			hid->group = HID_GROUP_GENERIC;
+		break;
+	case HID_GROUP_WACOM:
+		if (!IS_ENABLED(CONFIG_HID_WACOM))
+			hid->group = HID_GROUP_GENERIC;
+		break;
+	case HID_GROUP_LOGITECH_DJ_DEVICE:
+		if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
+			hid->group = HID_GROUP_GENERIC;
+		break;
+	}
 	vfree(parser);
 	return 0;
 }
@@ -1763,15 +1787,23 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
  * used as a driver. See hid_scan_report().
  */
 static const struct hid_device_id hid_have_special_driver[] = {
+#if IS_ENABLED(CONFIG_HID_A4TECH)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACRUX)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ALPS)
 	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLE)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
@@ -1792,11 +1824,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
@@ -1851,59 +1878,100 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLEIR)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ASUS)
 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_AUREAL)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BELKIN)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BETOP_FF)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHERRY)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHICONY)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CMEDIA)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CORSAIR)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CP2112)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CYPRESS)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+#endif
+#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
-#if IS_ENABLED(CONFIG_HID_MAYFLASH)
-	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
 #endif
-	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+#if IS_ENABLED(CONFIG_HID_ELECOM)
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELO)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EMS_FF)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EZKEY)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GEMBIRD)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GFRM)
+        { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
+        { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GREENASIA)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GT683R)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GYRATION)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+#endif
+#if IS_ENABLED(CONFIG_HID_HOLTEK)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
@@ -1912,12 +1980,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ICADE)
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KENSINGTON)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KYE)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
@@ -1927,21 +2000,29 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LCPOWER)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LED)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
 #if IS_ENABLED(CONFIG_HID_LENOVO)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
 #endif
-	{ HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#if IS_ENABLED(CONFIG_HID_LOGITECH)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
@@ -1954,7 +2035,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
@@ -1966,17 +2046,30 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
-	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MICROSOFT)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
@@ -1992,9 +2085,22 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MONTEREY)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WIIMOTE)
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTI)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTRIG)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
@@ -2014,13 +2120,41 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ORTEK)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PENMOUNT)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PETALYNX)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PICOLCD)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRIMAX)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RMI)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+#endif
 #if IS_ENABLED(CONFIG_HID_ROCCAT)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
@@ -2048,9 +2182,21 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
 #endif
+#if IS_ENABLED(CONFIG_HID_SAMSUNG)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SONY)
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@@ -2069,9 +2215,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_STEELSERIES)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SUNPLUS)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2080,12 +2234,25 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TIVO)
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TOPSEED)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TWINHAN)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UCLOGIC)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@@ -2093,20 +2260,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WALTOP)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
@@ -2114,19 +2278,18 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_XINMO)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZYDACRON)
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
-
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+#endif
 	{ }
 };
 
@@ -2144,7 +2307,7 @@ struct hid_dynid {
  * Adds a new dynamic hid device ID to this driver,
  * and causes the driver to probe for all devices again.
  */
-static ssize_t store_new_id(struct device_driver *drv, const char *buf,
+static ssize_t new_id_store(struct device_driver *drv, const char *buf,
 		size_t count)
 {
 	struct hid_driver *hdrv = to_hid_driver(drv);
@@ -2176,7 +2339,13 @@ static ssize_t store_new_id(struct device_driver *drv, const char *buf,
 
 	return ret ? : count;
 }
-static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
+static DRIVER_ATTR_WO(new_id);
+
+static struct attribute *hid_drv_attrs[] = {
+	&driver_attr_new_id.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(hid_drv);
 
 static void hid_free_dynids(struct hid_driver *hdrv)
 {
@@ -2340,6 +2509,7 @@ static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
 static struct bus_type hid_bus_type = {
 	.name		= "hid",
 	.dev_groups	= hid_dev_groups,
+	.drv_groups	= hid_drv_groups,
 	.match		= hid_bus_match,
 	.probe		= hid_device_probe,
 	.remove		= hid_device_remove,
@@ -2779,8 +2949,6 @@ EXPORT_SYMBOL_GPL(hid_destroy_device);
 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
 		const char *mod_name)
 {
-	int ret;
-
 	hdrv->driver.name = hdrv->name;
 	hdrv->driver.bus = &hid_bus_type;
 	hdrv->driver.owner = owner;
@@ -2789,21 +2957,12 @@ int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
 	INIT_LIST_HEAD(&hdrv->dyn_list);
 	spin_lock_init(&hdrv->dyn_lock);
 
-	ret = driver_register(&hdrv->driver);
-	if (ret)
-		return ret;
-
-	ret = driver_create_file(&hdrv->driver, &driver_attr_new_id);
-	if (ret)
-		driver_unregister(&hdrv->driver);
-
-	return ret;
+	return driver_register(&hdrv->driver);
 }
 EXPORT_SYMBOL_GPL(__hid_register_driver);
 
 void hid_unregister_driver(struct hid_driver *hdrv)
 {
-	driver_remove_file(&hdrv->driver, &driver_attr_new_id);
 	driver_unregister(&hdrv->driver);
 	hid_free_dynids(hdrv);
 }
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 6e3848a..e2c7465 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -1,10 +1,8 @@
 /*
- *  HID driver for Elecom BM084 (bluetooth mouse).
- *  Removes a non-existing horizontal wheel from
- *  the HID descriptor.
- *  (This module is based on "hid-ortek".)
- *
+ *  HID driver for ELECOM devices.
  *  Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
+ *  Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
+ *  Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
  */
 
 /*
@@ -23,15 +21,61 @@
 static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 		unsigned int *rsize)
 {
-	if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
-		hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
-		rdesc[47] = 0x00;
+	switch (hdev->product) {
+	case USB_DEVICE_ID_ELECOM_BM084:
+		/* The BM084 Bluetooth mouse includes a non-existing horizontal
+		 * wheel in the HID descriptor. */
+		if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
+			hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
+			rdesc[47] = 0x00;
+		}
+		break;
+	case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
+	case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
+		/* The DEFT trackball has eight buttons, but its descriptor only
+		 * reports five, disabling the three Fn buttons on the top of
+		 * the mouse.
+		 *
+		 * Apply the following diff to the descriptor:
+		 *
+		 * Collection (Physical),              Collection (Physical),
+		 *     Report ID (1),                      Report ID (1),
+		 *     Report Count (5),           ->      Report Count (8),
+		 *     Report Size (1),                    Report Size (1),
+		 *     Usage Page (Button),                Usage Page (Button),
+		 *     Usage Minimum (01h),                Usage Minimum (01h),
+		 *     Usage Maximum (05h),        ->      Usage Maximum (08h),
+		 *     Logical Minimum (0),                Logical Minimum (0),
+		 *     Logical Maximum (1),                Logical Maximum (1),
+		 *     Input (Variable),                   Input (Variable),
+		 *     Report Count (1),           ->      Report Count (0),
+		 *     Report Size (3),                    Report Size (3),
+		 *     Input (Constant),                   Input (Constant),
+		 *     Report Size (16),                   Report Size (16),
+		 *     Report Count (2),                   Report Count (2),
+		 *     Usage Page (Desktop),               Usage Page (Desktop),
+		 *     Usage (X),                          Usage (X),
+		 *     Usage (Y),                          Usage (Y),
+		 *     Logical Minimum (-32768),           Logical Minimum (-32768),
+		 *     Logical Maximum (32767),            Logical Maximum (32767),
+		 *     Input (Variable, Relative),         Input (Variable, Relative),
+		 * End Collection,                     End Collection,
+		 */
+		if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
+			hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
+			rdesc[13] = 8; /* Button/Variable Report Count */
+			rdesc[21] = 8; /* Button/Variable Usage Maximum */
+			rdesc[29] = 0; /* Button/Constant Report Count */
+		}
+		break;
 	}
 	return rdesc;
 }
 
 static const struct hid_device_id elecom_devices[] = {
-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)},
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 643390b..4f9a393 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -173,6 +173,7 @@
 #define USB_VENDOR_ID_ASUSTEK		0x0b05
 #define USB_DEVICE_ID_ASUSTEK_LCM	0x1726
 #define USB_DEVICE_ID_ASUSTEK_LCM2	0x175b
+#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD	0x17e0
 #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD	0x8585
 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD	0x0101
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
@@ -318,6 +319,9 @@
 #define USB_VENDOR_ID_DELCOM		0x0fc5
 #define USB_DEVICE_ID_DELCOM_VISUAL_IND	0xb080
 
+#define USB_VENDOR_ID_DELL				0x413c
+#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE	0x301a
+
 #define USB_VENDOR_ID_DELORME		0x1163
 #define USB_DEVICE_ID_DELORME_EARTHMATE	0x0100
 #define USB_DEVICE_ID_DELORME_EM_LT20	0x0200
@@ -358,6 +362,8 @@
 
 #define USB_VENDOR_ID_ELECOM		0x056e
 #define USB_DEVICE_ID_ELECOM_BM084	0x0061
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRED	0x00fe
+#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS	0x00ff
 
 #define USB_VENDOR_ID_DREAM_CHEEKY	0x1d34
 #define USB_DEVICE_ID_DREAM_CHEEKY_WN	0x0004
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 8daa8ce..0401503 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -872,10 +872,9 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
 static int i2c_hid_acpi_pdata(struct i2c_client *client,
 		struct i2c_hid_platform_data *pdata)
 {
-	static u8 i2c_hid_guid[] = {
-		0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
-		0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
-	};
+	static guid_t i2c_hid_guid =
+		GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+			  0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
 	union acpi_object *obj;
 	struct acpi_device *adev;
 	acpi_handle handle;
@@ -884,7 +883,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
 	if (!handle || acpi_bus_get_device(handle, &adev))
 		return -ENODEV;
 
-	obj = acpi_evaluate_dsm_typed(handle, i2c_hid_guid, 1, 1, NULL,
+	obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
 				      ACPI_TYPE_INTEGER);
 	if (!obj) {
 		dev_err(&client->dev, "device _DSM execution failed\n");
@@ -897,6 +896,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
 	return 0;
 }
 
+static void i2c_hid_acpi_fix_up_power(struct device *dev)
+{
+	acpi_handle handle = ACPI_HANDLE(dev);
+	struct acpi_device *adev;
+
+	if (handle && acpi_bus_get_device(handle, &adev) == 0)
+		acpi_device_fix_up_power(adev);
+}
+
 static const struct acpi_device_id i2c_hid_acpi_match[] = {
 	{"ACPI0C50", 0 },
 	{"PNP0C50", 0 },
@@ -909,6 +917,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
 {
 	return -ENODEV;
 }
+
+static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
 #endif
 
 #ifdef CONFIG_OF
@@ -1030,6 +1040,8 @@ static int i2c_hid_probe(struct i2c_client *client,
 	if (ret < 0)
 		goto err_regulator;
 
+	i2c_hid_acpi_fix_up_power(&client->dev);
+
 	pm_runtime_get_noresume(&client->dev);
 	pm_runtime_set_active(&client->dev);
 	pm_runtime_enable(&client->dev);
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 5f382fe..f272cdd 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -321,11 +321,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
 	len = snprintf(buf, PAGE_SIZE, "ishtp:%s\n", dev_name(dev));
 	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
 }
+static DEVICE_ATTR_RO(modalias);
 
-static struct device_attribute ishtp_cl_dev_attrs[] = {
-	__ATTR_RO(modalias),
-	__ATTR_NULL,
+static struct attribute *ishtp_cl_dev_attrs[] = {
+	&dev_attr_modalias.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(ishtp_cl_dev);
 
 static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
@@ -346,7 +348,7 @@ static const struct dev_pm_ops ishtp_cl_bus_dev_pm_ops = {
 
 static struct bus_type ishtp_cl_bus_type = {
 	.name		= "ishtp",
-	.dev_attrs	= ishtp_cl_dev_attrs,
+	.dev_groups	= ishtp_cl_dev_groups,
 	.probe		= ishtp_cl_device_probe,
 	.remove		= ishtp_cl_device_remove,
 	.pm		= &ishtp_cl_bus_dev_pm_ops,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 6316498..a88e7c7 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -85,6 +85,7 @@ static const struct hid_blacklist {
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 4b225fb..e274c9d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1571,37 +1571,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
 {
 	unsigned char *data = wacom->data;
 
-	if (wacom->pen_input)
+	if (wacom->pen_input) {
 		dev_dbg(wacom->pen_input->dev.parent,
 			"%s: received report #%d\n", __func__, data[0]);
-	else if (wacom->touch_input)
+
+		if (len == WACOM_PKGLEN_PENABLED ||
+		    data[0] == WACOM_REPORT_PENABLED)
+			return wacom_tpc_pen(wacom);
+	}
+	else if (wacom->touch_input) {
 		dev_dbg(wacom->touch_input->dev.parent,
 			"%s: received report #%d\n", __func__, data[0]);
 
-	switch (len) {
-	case WACOM_PKGLEN_TPC1FG:
-		return wacom_tpc_single_touch(wacom, len);
-
-	case WACOM_PKGLEN_TPC2FG:
-		return wacom_tpc_mt_touch(wacom);
-
-	case WACOM_PKGLEN_PENABLED:
-		return wacom_tpc_pen(wacom);
-
-	default:
-		switch (data[0]) {
-		case WACOM_REPORT_TPC1FG:
-		case WACOM_REPORT_TPCHID:
-		case WACOM_REPORT_TPCST:
-		case WACOM_REPORT_TPC1FGE:
+		switch (len) {
+		case WACOM_PKGLEN_TPC1FG:
 			return wacom_tpc_single_touch(wacom, len);
 
-		case WACOM_REPORT_TPCMT:
-		case WACOM_REPORT_TPCMT2:
-			return wacom_mt_touch(wacom);
+		case WACOM_PKGLEN_TPC2FG:
+			return wacom_tpc_mt_touch(wacom);
 
-		case WACOM_REPORT_PENABLED:
-			return wacom_tpc_pen(wacom);
+		default:
+			switch (data[0]) {
+			case WACOM_REPORT_TPC1FG:
+			case WACOM_REPORT_TPCHID:
+			case WACOM_REPORT_TPCST:
+			case WACOM_REPORT_TPC1FGE:
+				return wacom_tpc_single_touch(wacom, len);
+
+			case WACOM_REPORT_TPCMT:
+			case WACOM_REPORT_TPCMT2:
+				return wacom_mt_touch(wacom);
+
+			}
 		}
 	}
 
diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c
index c000780..f78cca9 100644
--- a/drivers/hsi/clients/nokia-modem.c
+++ b/drivers/hsi/clients/nokia-modem.c
@@ -102,12 +102,10 @@ static int nokia_modem_gpio_probe(struct device *dev)
 		return -EINVAL;
 	}
 
-	modem->gpios = devm_kzalloc(dev, gpio_count *
-				sizeof(struct nokia_modem_gpio), GFP_KERNEL);
-	if (!modem->gpios) {
-		dev_err(dev, "Could not allocate memory for gpios\n");
+	modem->gpios = devm_kcalloc(dev, gpio_count, sizeof(*modem->gpios),
+				    GFP_KERNEL);
+	if (!modem->gpios)
 		return -ENOMEM;
-	}
 
 	modem->gpio_amount = gpio_count;
 
@@ -156,10 +154,9 @@ static int nokia_modem_probe(struct device *dev)
 	}
 
 	modem = devm_kzalloc(dev, sizeof(*modem), GFP_KERNEL);
-	if (!modem) {
-		dev_err(dev, "Could not allocate memory for nokia_modem_device\n");
+	if (!modem)
 		return -ENOMEM;
-	}
+
 	dev_set_drvdata(dev, modem);
 	modem->device = dev;
 
@@ -182,7 +179,7 @@ static int nokia_modem_probe(struct device *dev)
 	}
 	enable_irq_wake(irq);
 
-	if(pm) {
+	if (pm) {
 		err = nokia_modem_gpio_probe(dev);
 		if (err < 0) {
 			dev_err(dev, "Could not probe GPIOs\n");
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 26b0510..93d28c0 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -1066,7 +1066,7 @@ static void ssip_pn_setup(struct net_device *dev)
 	dev->addr_len		= 1;
 	dev->tx_queue_len	= SSIP_TXQUEUE_LEN;
 
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 	dev->header_ops		= &phonet_header_ops;
 }
 
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 9a29b34..88e48b3 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -384,10 +384,8 @@ static int ssi_add_controller(struct hsi_controller *ssi,
 	int err;
 
 	omap_ssi = devm_kzalloc(&ssi->device, sizeof(*omap_ssi), GFP_KERNEL);
-	if (!omap_ssi) {
-		dev_err(&pd->dev, "not enough memory for omap ssi\n");
+	if (!omap_ssi)
 		return -ENOMEM;
-	}
 
 	err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL);
 	if (err < 0)
@@ -421,8 +419,8 @@ static int ssi_add_controller(struct hsi_controller *ssi,
 		goto out_err;
 	}
 
-	omap_ssi->port = devm_kzalloc(&ssi->device,
-		sizeof(struct omap_ssi_port *) * ssi->num_ports, GFP_KERNEL);
+	omap_ssi->port = devm_kcalloc(&ssi->device, ssi->num_ports,
+				      sizeof(*omap_ssi->port), GFP_KERNEL);
 	if (!omap_ssi->port) {
 		err = -ENOMEM;
 		goto out_err;
@@ -467,7 +465,7 @@ static int ssi_hw_init(struct hsi_controller *ssi)
 		dev_err(&ssi->device, "runtime PM failed %d\n", err);
 		return err;
 	}
-	/* Reseting GDD */
+	/* Resetting GDD */
 	writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG);
 	/* Get FCK rate in KHz */
 	omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000);
diff --git a/drivers/hsi/hsi_boardinfo.c b/drivers/hsi/hsi_boardinfo.c
index e56bc6d..e381cb4 100644
--- a/drivers/hsi/hsi_boardinfo.c
+++ b/drivers/hsi/hsi_boardinfo.c
@@ -49,7 +49,7 @@ int __init hsi_register_board_info(struct hsi_board_info const *info,
 {
 	struct hsi_cl_info *cl_info;
 
-	cl_info = kzalloc(sizeof(*cl_info) * len, GFP_KERNEL);
+	cl_info = kcalloc(len, sizeof(*cl_info), GFP_KERNEL);
 	if (!cl_info)
 		return -ENOMEM;
 
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
index c2a2a97..9065efd 100644
--- a/drivers/hsi/hsi_core.c
+++ b/drivers/hsi/hsi_core.c
@@ -267,14 +267,13 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
 
 	cl->rx_cfg.num_channels = cells;
 	cl->tx_cfg.num_channels = cells;
-
-	cl->rx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL);
+	cl->rx_cfg.channels = kcalloc(cells, sizeof(channel), GFP_KERNEL);
 	if (!cl->rx_cfg.channels) {
 		err = -ENOMEM;
 		goto err;
 	}
 
-	cl->tx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL);
+	cl->tx_cfg.channels = kcalloc(cells, sizeof(channel), GFP_KERNEL);
 	if (!cl->tx_cfg.channels) {
 		err = -ENOMEM;
 		goto err2;
@@ -485,7 +484,7 @@ struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
 	hsi = kzalloc(sizeof(*hsi), flags);
 	if (!hsi)
 		return NULL;
-	port = kzalloc(sizeof(*port)*n_ports, flags);
+	port = kcalloc(n_ports, sizeof(*port), flags);
 	if (!port) {
 		kfree(hsi);
 		return NULL;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 736ac76..e9bf0bb 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -630,9 +630,13 @@ void vmbus_close(struct vmbus_channel *channel)
 	 */
 	list_for_each_safe(cur, tmp, &channel->sc_list) {
 		cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
-		if (cur_channel->state != CHANNEL_OPENED_STATE)
-			continue;
 		vmbus_close_internal(cur_channel);
+		if (cur_channel->rescind) {
+			mutex_lock(&vmbus_connection.channel_mutex);
+			hv_process_channel_removal(cur_channel,
+					   cur_channel->offermsg.child_relid);
+			mutex_unlock(&vmbus_connection.channel_mutex);
+		}
 	}
 	/*
 	 * Now close the primary.
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 735f936..0fabd41 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -428,7 +428,6 @@ void vmbus_free_channels(void)
 {
 	struct vmbus_channel *channel, *tmp;
 
-	mutex_lock(&vmbus_connection.channel_mutex);
 	list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
 		listentry) {
 		/* hv_process_channel_removal() needs this */
@@ -436,7 +435,6 @@ void vmbus_free_channels(void)
 
 		vmbus_device_unregister(channel->device_obj);
 	}
-	mutex_unlock(&vmbus_connection.channel_mutex);
 }
 
 /*
@@ -483,8 +481,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 			list_add_tail(&newchannel->sc_list, &channel->sc_list);
 			channel->num_sc++;
 			spin_unlock_irqrestore(&channel->lock, flags);
-		} else
+		} else {
+			atomic_dec(&vmbus_connection.offer_in_progress);
 			goto err_free_chan;
+		}
 	}
 
 	dev_type = hv_get_dev_type(newchannel);
@@ -511,6 +511,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 	if (!fnew) {
 		if (channel->sc_creation_callback != NULL)
 			channel->sc_creation_callback(newchannel);
+		atomic_dec(&vmbus_connection.offer_in_progress);
 		return;
 	}
 
@@ -532,9 +533,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 	 * binding which eventually invokes the device driver's AddDevice()
 	 * method.
 	 */
-	mutex_lock(&vmbus_connection.channel_mutex);
 	ret = vmbus_device_register(newchannel->device_obj);
-	mutex_unlock(&vmbus_connection.channel_mutex);
 
 	if (ret != 0) {
 		pr_err("unable to add child device object (relid %d)\n",
@@ -542,6 +541,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 		kfree(newchannel->device_obj);
 		goto err_deq_chan;
 	}
+
+	atomic_dec(&vmbus_connection.offer_in_progress);
 	return;
 
 err_deq_chan:
@@ -797,6 +798,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
 	newchannel = alloc_channel();
 	if (!newchannel) {
 		vmbus_release_relid(offer->child_relid);
+		atomic_dec(&vmbus_connection.offer_in_progress);
 		pr_err("Unable to allocate channel object\n");
 		return;
 	}
@@ -843,16 +845,38 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 
 	rescind = (struct vmbus_channel_rescind_offer *)hdr;
 
+	/*
+	 * The offer msg and the corresponding rescind msg
+	 * from the host are guranteed to be ordered -
+	 * offer comes in first and then the rescind.
+	 * Since we process these events in work elements,
+	 * and with preemption, we may end up processing
+	 * the events out of order. Given that we handle these
+	 * work elements on the same CPU, this is possible only
+	 * in the case of preemption. In any case wait here
+	 * until the offer processing has moved beyond the
+	 * point where the channel is discoverable.
+	 */
+
+	while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
+		/*
+		 * We wait here until any channel offer is currently
+		 * being processed.
+		 */
+		msleep(1);
+	}
+
 	mutex_lock(&vmbus_connection.channel_mutex);
 	channel = relid2channel(rescind->child_relid);
+	mutex_unlock(&vmbus_connection.channel_mutex);
 
 	if (channel == NULL) {
 		/*
-		 * This is very impossible, because in
-		 * vmbus_process_offer(), we have already invoked
-		 * vmbus_release_relid() on error.
+		 * We failed in processing the offer message;
+		 * we would have cleaned up the relid in that
+		 * failure path.
 		 */
-		goto out;
+		return;
 	}
 
 	spin_lock_irqsave(&channel->lock, flags);
@@ -864,7 +888,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 	if (channel->device_obj) {
 		if (channel->chn_rescind_callback) {
 			channel->chn_rescind_callback(channel);
-			goto out;
+			return;
 		}
 		/*
 		 * We will have to unregister this device from the
@@ -875,13 +899,26 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
 			vmbus_device_unregister(channel->device_obj);
 			put_device(dev);
 		}
-	} else {
-		hv_process_channel_removal(channel,
-			channel->offermsg.child_relid);
 	}
-
-out:
-	mutex_unlock(&vmbus_connection.channel_mutex);
+	if (channel->primary_channel != NULL) {
+		/*
+		 * Sub-channel is being rescinded. Following is the channel
+		 * close sequence when initiated from the driveri (refer to
+		 * vmbus_close() for details):
+		 * 1. Close all sub-channels first
+		 * 2. Then close the primary channel.
+		 */
+		if (channel->state == CHANNEL_OPEN_STATE) {
+			/*
+			 * The channel is currently not open;
+			 * it is safe for us to cleanup the channel.
+			 */
+			mutex_lock(&vmbus_connection.channel_mutex);
+			hv_process_channel_removal(channel,
+						channel->offermsg.child_relid);
+			mutex_unlock(&vmbus_connection.channel_mutex);
+		}
+	}
 }
 
 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index fce27fb..59c11ff 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -93,10 +93,13 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
 	 * all the CPUs. This is needed for kexec to work correctly where
 	 * the CPU attempting to connect may not be CPU 0.
 	 */
-	if (version >= VERSION_WIN8_1)
+	if (version >= VERSION_WIN8_1) {
 		msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
-	else
+		vmbus_connection.connect_cpu = smp_processor_id();
+	} else {
 		msg->target_vcpu = 0;
+		vmbus_connection.connect_cpu = 0;
+	}
 
 	/*
 	 * Add to list before we send the request since we may
@@ -370,7 +373,7 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
 			break;
 		case HV_STATUS_INSUFFICIENT_MEMORY:
 		case HV_STATUS_INSUFFICIENT_BUFFERS:
-			ret = -ENOMEM;
+			ret = -ENOBUFS;
 			break;
 		case HV_STATUS_SUCCESS:
 			return ret;
@@ -387,7 +390,7 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
 		else
 			mdelay(usec / 1000);
 
-		if (usec < 256000)
+		if (retries < 22)
 			usec *= 2;
 	}
 	return ret;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 12e7bae..2ea1220 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -82,10 +82,15 @@ int hv_post_message(union hv_connection_id connection_id,
 	aligned_msg->message_type = message_type;
 	aligned_msg->payload_size = payload_size;
 	memcpy((void *)aligned_msg->payload, payload, payload_size);
-	put_cpu_ptr(hv_cpu);
 
 	status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
 
+	/* Preemption must remain disabled until after the hypercall
+	 * so some other thread can't get scheduled onto this cpu and
+	 * corrupt the per-cpu post_msg_page
+	 */
+	put_cpu_ptr(hv_cpu);
+
 	return status & 0xFFFF;
 }
 
@@ -96,7 +101,7 @@ static int hv_ce_set_next_event(unsigned long delta,
 
 	WARN_ON(!clockevent_state_oneshot(evt));
 
-	hv_get_current_tick(current_tick);
+	current_tick = hyperv_cs->read(NULL);
 	current_tick += delta;
 	hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
 	return 0;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index e99ff2d..9a90b91 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -112,7 +112,7 @@ static void kvp_poll_wrapper(void *channel)
 {
 	/* Transaction is finished, reset the state here to avoid races. */
 	kvp_transaction.state = HVUTIL_READY;
-	hv_kvp_onchannelcallback(channel);
+	tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
 }
 
 static void kvp_register_done(void)
@@ -159,7 +159,7 @@ static void kvp_timeout_func(struct work_struct *dummy)
 
 static void kvp_host_handshake_func(struct work_struct *dummy)
 {
-	hv_poll_channel(kvp_transaction.recv_channel, hv_kvp_onchannelcallback);
+	tasklet_schedule(&kvp_transaction.recv_channel->callback_event);
 }
 
 static int kvp_handle_handshake(struct hv_kvp_msg *msg)
@@ -625,16 +625,17 @@ void hv_kvp_onchannelcallback(void *context)
 		     NEGO_IN_PROGRESS,
 		     NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
 
-	if (host_negotiatied == NEGO_NOT_STARTED &&
-	    kvp_transaction.state < HVUTIL_READY) {
+	if (kvp_transaction.state < HVUTIL_READY) {
 		/*
 		 * If userspace daemon is not connected and host is asking
 		 * us to negotiate we need to delay to not lose messages.
 		 * This is important for Failover IP setting.
 		 */
-		host_negotiatied = NEGO_IN_PROGRESS;
-		schedule_delayed_work(&kvp_host_handshake_work,
+		if (host_negotiatied == NEGO_NOT_STARTED) {
+			host_negotiatied = NEGO_IN_PROGRESS;
+			schedule_delayed_work(&kvp_host_handshake_work,
 				      HV_UTIL_NEGO_TIMEOUT * HZ);
+		}
 		return;
 	}
 	if (kvp_transaction.state > HVUTIL_READY)
@@ -702,6 +703,7 @@ void hv_kvp_onchannelcallback(void *context)
 				       VM_PKT_DATA_INBAND, 0);
 
 		host_negotiatied = NEGO_FINISHED;
+		hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
 	}
 
 }
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 186b100..14dce25 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -202,27 +202,39 @@ static void shutdown_onchannelcallback(void *context)
 /*
  * Set the host time in a process context.
  */
+static struct work_struct adj_time_work;
 
-struct adj_time_work {
-	struct work_struct work;
-	u64	host_time;
-	u64	ref_time;
-	u8	flags;
-};
+/*
+ * The last time sample, received from the host. PTP device responds to
+ * requests by using this data and the current partition-wide time reference
+ * count.
+ */
+static struct {
+	u64				host_time;
+	u64				ref_time;
+	spinlock_t			lock;
+} host_ts;
+
+static struct timespec64 hv_get_adj_host_time(void)
+{
+	struct timespec64 ts;
+	u64 newtime, reftime;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host_ts.lock, flags);
+	reftime = hyperv_cs->read(hyperv_cs);
+	newtime = host_ts.host_time + (reftime - host_ts.ref_time);
+	ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
+	spin_unlock_irqrestore(&host_ts.lock, flags);
+
+	return ts;
+}
 
 static void hv_set_host_time(struct work_struct *work)
 {
-	struct adj_time_work *wrk;
-	struct timespec64 host_ts;
-	u64 reftime, newtime;
+	struct timespec64 ts = hv_get_adj_host_time();
 
-	wrk = container_of(work, struct adj_time_work, work);
-
-	reftime = hyperv_cs->read(hyperv_cs);
-	newtime = wrk->host_time + (reftime - wrk->ref_time);
-	host_ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
-
-	do_settimeofday64(&host_ts);
+	do_settimeofday64(&ts);
 }
 
 /*
@@ -238,62 +250,35 @@ static void hv_set_host_time(struct work_struct *work)
  * typically used as a hint to the guest. The guest is under no obligation
  * to discipline the clock.
  */
-static struct adj_time_work  wrk;
-
-/*
- * The last time sample, received from the host. PTP device responds to
- * requests by using this data and the current partition-wide time reference
- * count.
- */
-static struct {
-	u64				host_time;
-	u64				ref_time;
-	struct system_time_snapshot	snap;
-	spinlock_t			lock;
-} host_ts;
-
 static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
 {
 	unsigned long flags;
 	u64 cur_reftime;
 
 	/*
-	 * This check is safe since we are executing in the
-	 * interrupt context and time synch messages are always
-	 * delivered on the same CPU.
+	 * Save the adjusted time sample from the host and the snapshot
+	 * of the current system time.
 	 */
-	if (adj_flags & ICTIMESYNCFLAG_SYNC) {
-		/* Queue a job to do do_settimeofday64() */
-		if (work_pending(&wrk.work))
-			return;
+	spin_lock_irqsave(&host_ts.lock, flags);
 
-		wrk.host_time = hosttime;
-		wrk.ref_time = reftime;
-		wrk.flags = adj_flags;
-		schedule_work(&wrk.work);
-	} else {
-		/*
-		 * Save the adjusted time sample from the host and the snapshot
-		 * of the current system time for PTP device.
-		 */
-		spin_lock_irqsave(&host_ts.lock, flags);
+	cur_reftime = hyperv_cs->read(hyperv_cs);
+	host_ts.host_time = hosttime;
+	host_ts.ref_time = cur_reftime;
 
-		cur_reftime = hyperv_cs->read(hyperv_cs);
-		host_ts.host_time = hosttime;
-		host_ts.ref_time = cur_reftime;
-		ktime_get_snapshot(&host_ts.snap);
+	/*
+	 * TimeSync v4 messages contain reference time (guest's Hyper-V
+	 * clocksource read when the time sample was generated), we can
+	 * improve the precision by adding the delta between now and the
+	 * time of generation. For older protocols we set
+	 * reftime == cur_reftime on call.
+	 */
+	host_ts.host_time += (cur_reftime - reftime);
 
-		/*
-		 * TimeSync v4 messages contain reference time (guest's Hyper-V
-		 * clocksource read when the time sample was generated), we can
-		 * improve the precision by adding the delta between now and the
-		 * time of generation.
-		 */
-		if (ts_srv_version > TS_VERSION_3)
-			host_ts.host_time += (cur_reftime - reftime);
+	spin_unlock_irqrestore(&host_ts.lock, flags);
 
-		spin_unlock_irqrestore(&host_ts.lock, flags);
-	}
+	/* Schedule work to do do_settimeofday64() */
+	if (adj_flags & ICTIMESYNCFLAG_SYNC)
+		schedule_work(&adj_time_work);
 }
 
 /*
@@ -341,8 +326,8 @@ static void timesync_onchannelcallback(void *context)
 					sizeof(struct vmbuspipe_hdr) +
 					sizeof(struct icmsg_hdr)];
 				adj_guesttime(timedatap->parenttime,
-						0,
-						timedatap->flags);
+					      hyperv_cs->read(hyperv_cs),
+					      timedatap->flags);
 			}
 		}
 
@@ -526,58 +511,17 @@ static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 
 static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
 {
-	unsigned long flags;
-	u64 newtime, reftime;
-
-	spin_lock_irqsave(&host_ts.lock, flags);
-	reftime = hyperv_cs->read(hyperv_cs);
-	newtime = host_ts.host_time + (reftime - host_ts.ref_time);
-	*ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
-	spin_unlock_irqrestore(&host_ts.lock, flags);
+	*ts = hv_get_adj_host_time();
 
 	return 0;
 }
 
-static int hv_ptp_get_syncdevicetime(ktime_t *device,
-				     struct system_counterval_t *system,
-				     void *ctx)
-{
-	system->cs = hyperv_cs;
-	system->cycles = host_ts.ref_time;
-	*device = ns_to_ktime((host_ts.host_time - WLTIMEDELTA) * 100);
-
-	return 0;
-}
-
-static int hv_ptp_getcrosststamp(struct ptp_clock_info *ptp,
-				 struct system_device_crosststamp *xtstamp)
-{
-	unsigned long flags;
-	int ret;
-
-	spin_lock_irqsave(&host_ts.lock, flags);
-
-	/*
-	 * host_ts contains the last time sample from the host and the snapshot
-	 * of system time. We don't need to calculate the time delta between
-	 * the reception and now as get_device_system_crosststamp() does the
-	 * required interpolation.
-	 */
-	ret = get_device_system_crosststamp(hv_ptp_get_syncdevicetime,
-					    NULL, &host_ts.snap, xtstamp);
-
-	spin_unlock_irqrestore(&host_ts.lock, flags);
-
-	return ret;
-}
-
 static struct ptp_clock_info ptp_hyperv_info = {
 	.name		= "hyperv",
 	.enable         = hv_ptp_enable,
 	.adjtime        = hv_ptp_adjtime,
 	.adjfreq        = hv_ptp_adjfreq,
 	.gettime64      = hv_ptp_gettime,
-	.getcrosststamp = hv_ptp_getcrosststamp,
 	.settime64      = hv_ptp_settime,
 	.owner		= THIS_MODULE,
 };
@@ -592,7 +536,7 @@ static int hv_timesync_init(struct hv_util_service *srv)
 
 	spin_lock_init(&host_ts.lock);
 
-	INIT_WORK(&wrk.work, hv_set_host_time);
+	INIT_WORK(&adj_time_work, hv_set_host_time);
 
 	/*
 	 * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
@@ -613,7 +557,7 @@ static void hv_timesync_deinit(void)
 {
 	if (hv_ptp_clock)
 		ptp_clock_unregister(hv_ptp_clock);
-	cancel_work_sync(&wrk.work);
+	cancel_work_sync(&adj_time_work);
 }
 
 static int __init init_hyperv_utils(void)
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 6113e91..1b6a5e0 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -303,6 +303,13 @@ enum vmbus_connect_state {
 #define MAX_SIZE_CHANNEL_MESSAGE	HV_MESSAGE_PAYLOAD_BYTE_COUNT
 
 struct vmbus_connection {
+	/*
+	 * CPU on which the initial host contact was made.
+	 */
+	int connect_cpu;
+
+	atomic_t offer_in_progress;
+
 	enum vmbus_connect_state conn_state;
 
 	atomic_t next_gpadl_handle;
@@ -411,6 +418,10 @@ static inline void hv_poll_channel(struct vmbus_channel *channel,
 	if (!channel)
 		return;
 
+	if (in_interrupt() && (channel->target_cpu == smp_processor_id())) {
+		cb(channel);
+		return;
+	}
 	smp_call_function_single(channel->target_cpu, cb, channel, true);
 }
 
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0087b49..ed84e96 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -608,40 +608,6 @@ static void vmbus_free_dynids(struct hv_driver *drv)
 	spin_unlock(&drv->dynids.lock);
 }
 
-/* Parse string of form: 1b4e28ba-2fa1-11d2-883f-b9a761bde3f */
-static int get_uuid_le(const char *str, uuid_le *uu)
-{
-	unsigned int b[16];
-	int i;
-
-	if (strlen(str) < 37)
-		return -1;
-
-	for (i = 0; i < 36; i++) {
-		switch (i) {
-		case 8: case 13: case 18: case 23:
-			if (str[i] != '-')
-				return -1;
-			break;
-		default:
-			if (!isxdigit(str[i]))
-				return -1;
-		}
-	}
-
-	/* unparse little endian output byte order */
-	if (sscanf(str,
-		   "%2x%2x%2x%2x-%2x%2x-%2x%2x-%2x%2x-%2x%2x%2x%2x%2x%2x",
-		   &b[3], &b[2], &b[1], &b[0],
-		   &b[5], &b[4], &b[7], &b[6], &b[8], &b[9],
-		   &b[10], &b[11], &b[12], &b[13], &b[14], &b[15]) != 16)
-		return -1;
-
-	for (i = 0; i < 16; i++)
-		uu->b[i] = b[i];
-	return 0;
-}
-
 /*
  * store_new_id - sysfs frontend to vmbus_add_dynid()
  *
@@ -651,11 +617,12 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
 			    size_t count)
 {
 	struct hv_driver *drv = drv_to_hv_drv(driver);
-	uuid_le guid = NULL_UUID_LE;
+	uuid_le guid;
 	ssize_t retval;
 
-	if (get_uuid_le(buf, &guid) != 0)
-		return -EINVAL;
+	retval = uuid_le_to_bin(buf, &guid);
+	if (retval)
+		return retval;
 
 	if (hv_vmbus_get_id(drv, &guid))
 		return -EEXIST;
@@ -677,12 +644,14 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
 {
 	struct hv_driver *drv = drv_to_hv_drv(driver);
 	struct vmbus_dynid *dynid, *n;
-	uuid_le guid = NULL_UUID_LE;
-	size_t retval = -ENODEV;
+	uuid_le guid;
+	ssize_t retval;
 
-	if (get_uuid_le(buf, &guid))
-		return -EINVAL;
+	retval = uuid_le_to_bin(buf, &guid);
+	if (retval)
+		return retval;
 
+	retval = -ENODEV;
 	spin_lock(&drv->dynids.lock);
 	list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
 		struct hv_vmbus_device_id *id = &dynid->id;
@@ -798,8 +767,10 @@ static void vmbus_device_release(struct device *device)
 	struct hv_device *hv_dev = device_to_hv_device(device);
 	struct vmbus_channel *channel = hv_dev->channel;
 
+	mutex_lock(&vmbus_connection.channel_mutex);
 	hv_process_channel_removal(channel,
 				   channel->offermsg.child_relid);
+	mutex_unlock(&vmbus_connection.channel_mutex);
 	kfree(hv_dev);
 
 }
@@ -877,7 +848,32 @@ void vmbus_on_msg_dpc(unsigned long data)
 		INIT_WORK(&ctx->work, vmbus_onmessage_work);
 		memcpy(&ctx->msg, msg, sizeof(*msg));
 
-		queue_work(vmbus_connection.work_queue, &ctx->work);
+		/*
+		 * The host can generate a rescind message while we
+		 * may still be handling the original offer. We deal with
+		 * this condition by ensuring the processing is done on the
+		 * same CPU.
+		 */
+		switch (hdr->msgtype) {
+		case CHANNELMSG_RESCIND_CHANNELOFFER:
+			/*
+			 * If we are handling the rescind message;
+			 * schedule the work on the global work queue.
+			 */
+			schedule_work_on(vmbus_connection.connect_cpu,
+					 &ctx->work);
+			break;
+
+		case CHANNELMSG_OFFERCHANNEL:
+			atomic_inc(&vmbus_connection.offer_in_progress);
+			queue_work_on(vmbus_connection.connect_cpu,
+				      vmbus_connection.work_queue,
+				      &ctx->work);
+			break;
+
+		default:
+			queue_work(vmbus_connection.work_queue, &ctx->work);
+		}
 	} else
 		entry->message_handler(hdr);
 
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 22d5eaf..5ef2814 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -343,6 +343,7 @@
 
 config SENSORS_ASPEED
 	tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
+	select REGMAP
 	help
 	  This driver provides support for ASPEED AST2400/AST2500 PWM
 	  and Fan Tacho controllers.
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index 5140c27..357b426 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -34,7 +34,7 @@
 #include <linux/of_device.h>
 #include <linux/of.h>
 
-#include <linux/i2c/ads1015.h>
+#include <linux/platform_data/ads1015.h>
 
 /* ADS1015 registers */
 enum {
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index c803e3c..1baa213 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -22,6 +22,7 @@
 #include <linux/hwmon-vid.h>
 #include <linux/err.h>
 #include <linux/jiffies.h>
+#include <linux/util_macros.h>
 
 /* Indexes for the sysfs hooks */
 
@@ -78,6 +79,9 @@
 
 #define REG_TEMP_TRANGE_BASE	0x5F
 
+#define REG_ENHANCE_ACOUSTICS1	0x62
+#define REG_ENHANCE_ACOUSTICS2	0x63
+
 #define REG_PWM_MIN_BASE	0x64
 
 #define REG_TEMP_TMIN_BASE	0x67
@@ -208,6 +212,7 @@ struct adt7475_data {
 	u8 range[3];
 	u8 pwmctl[3];
 	u8 pwmchan[3];
+	u8 enh_acoustics[2];
 
 	u8 vid;
 	u8 vrm;
@@ -314,35 +319,6 @@ static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
 	i2c_smbus_write_byte_data(client, reg, val & 0xFF);
 }
 
-/*
- * Find the nearest value in a table - used for pwm frequency and
- * auto temp range
- */
-static int find_nearest(long val, const int *array, int size)
-{
-	int i;
-
-	if (val < array[0])
-		return 0;
-
-	if (val > array[size - 1])
-		return size - 1;
-
-	for (i = 0; i < size - 1; i++) {
-		int a, b;
-
-		if (val > array[i + 1])
-			continue;
-
-		a = val - array[i];
-		b = array[i + 1] - val;
-
-		return (a <= b) ? i : i + 1;
-	}
-
-	return 0;
-}
-
 static ssize_t show_voltage(struct device *dev, struct device_attribute *attr,
 			    char *buf)
 {
@@ -550,6 +526,88 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
 	return count;
 }
 
+/* Assuming CONFIG6[SLOW] is 0 */
+static const int ad7475_st_map[] = {
+	37500, 18800, 12500, 7500, 4700, 3100, 1600, 800,
+};
+
+static ssize_t show_temp_st(struct device *dev, struct device_attribute *attr,
+				  char *buf)
+{
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	long val;
+
+	switch (sattr->index) {
+	case 0:
+		val = data->enh_acoustics[0] & 0xf;
+		break;
+	case 1:
+		val = (data->enh_acoustics[1] >> 4) & 0xf;
+		break;
+	case 2:
+	default:
+		val = data->enh_acoustics[1] & 0xf;
+		break;
+	}
+
+	if (val & 0x8)
+		return sprintf(buf, "%d\n", ad7475_st_map[val & 0x7]);
+	else
+		return sprintf(buf, "0\n");
+}
+
+static ssize_t set_temp_st(struct device *dev, struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	unsigned char reg;
+	int shift, idx;
+	ulong val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	switch (sattr->index) {
+	case 0:
+		reg = REG_ENHANCE_ACOUSTICS1;
+		shift = 0;
+		idx = 0;
+		break;
+	case 1:
+		reg = REG_ENHANCE_ACOUSTICS2;
+		shift = 0;
+		idx = 1;
+		break;
+	case 2:
+	default:
+		reg = REG_ENHANCE_ACOUSTICS2;
+		shift = 4;
+		idx = 1;
+		break;
+	}
+
+	if (val > 0) {
+		val = find_closest_descending(val, ad7475_st_map,
+					      ARRAY_SIZE(ad7475_st_map));
+		val |= 0x8;
+	}
+
+	mutex_lock(&data->lock);
+
+	data->enh_acoustics[idx] &= ~(0xf << shift);
+	data->enh_acoustics[idx] |= (val << shift);
+
+	i2c_smbus_write_byte_data(client, reg, data->enh_acoustics[idx]);
+
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
 /*
  * Table of autorange values - the user will write the value in millidegrees,
  * and we'll convert it
@@ -606,7 +664,7 @@ static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
 	val -= temp;
 
 	/* Find the nearest table entry to what the user wrote */
-	val = find_nearest(val, autorange_table, ARRAY_SIZE(autorange_table));
+	val = find_closest(val, autorange_table, ARRAY_SIZE(autorange_table));
 
 	data->range[sattr->index] &= ~0xF0;
 	data->range[sattr->index] |= val << 4;
@@ -728,6 +786,43 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
 	data->pwm[sattr->nr][sattr->index] = clamp_val(val, 0, 0xFF);
 	i2c_smbus_write_byte_data(client, reg,
 				  data->pwm[sattr->nr][sattr->index]);
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
+static ssize_t show_stall_disable(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	u8 mask = BIT(5 + sattr->index);
+
+	return sprintf(buf, "%d\n", !!(data->enh_acoustics[0] & mask));
+}
+
+static ssize_t set_stall_disable(struct device *dev,
+				 struct device_attribute *attr, const char *buf,
+				 size_t count)
+{
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adt7475_data *data = i2c_get_clientdata(client);
+	long val;
+	u8 mask = BIT(5 + sattr->index);
+
+	if (kstrtol(buf, 10, &val))
+		return -EINVAL;
+
+	mutex_lock(&data->lock);
+
+	data->enh_acoustics[0] &= ~mask;
+	if (val)
+		data->enh_acoustics[0] |= mask;
+
+	i2c_smbus_write_byte_data(client, REG_ENHANCE_ACOUSTICS1,
+				  data->enh_acoustics[0]);
 
 	mutex_unlock(&data->lock);
 
@@ -839,7 +934,7 @@ static ssize_t set_pwmctrl(struct device *dev, struct device_attribute *attr,
 
 /* List of frequencies for the PWM */
 static const int pwmfreq_table[] = {
-	11, 14, 22, 29, 35, 44, 58, 88
+	11, 14, 22, 29, 35, 44, 58, 88, 22500
 };
 
 static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
@@ -847,9 +942,10 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
 {
 	struct adt7475_data *data = adt7475_update_device(dev);
 	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	int i = clamp_val(data->range[sattr->index] & 0xf, 0,
+			  ARRAY_SIZE(pwmfreq_table) - 1);
 
-	return sprintf(buf, "%d\n",
-		       pwmfreq_table[data->range[sattr->index] & 7]);
+	return sprintf(buf, "%d\n", pwmfreq_table[i]);
 }
 
 static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
@@ -864,13 +960,13 @@ static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
 	if (kstrtol(buf, 10, &val))
 		return -EINVAL;
 
-	out = find_nearest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
+	out = find_closest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
 
 	mutex_lock(&data->lock);
 
 	data->range[sattr->index] =
 		adt7475_read(TEMP_TRANGE_REG(sattr->index));
-	data->range[sattr->index] &= ~7;
+	data->range[sattr->index] &= ~0xf;
 	data->range[sattr->index] |= out;
 
 	i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(sattr->index),
@@ -995,6 +1091,8 @@ static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
 			    THERM, 0);
 static SENSOR_DEVICE_ATTR_2(temp1_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
 			    set_temp, HYSTERSIS, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_smoothing, S_IRUGO | S_IWUSR, show_temp_st,
+			    set_temp_st, 0, 0);
 static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, INPUT, 1);
 static SENSOR_DEVICE_ATTR_2(temp2_alarm, S_IRUGO, show_temp, NULL, ALARM, 1);
 static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
@@ -1011,6 +1109,8 @@ static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
 			    THERM, 1);
 static SENSOR_DEVICE_ATTR_2(temp2_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
 			    set_temp, HYSTERSIS, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_smoothing, S_IRUGO | S_IWUSR, show_temp_st,
+			    set_temp_st, 0, 1);
 static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, INPUT, 2);
 static SENSOR_DEVICE_ATTR_2(temp3_alarm, S_IRUGO, show_temp, NULL, ALARM, 2);
 static SENSOR_DEVICE_ATTR_2(temp3_fault, S_IRUGO, show_temp, NULL, FAULT, 2);
@@ -1028,6 +1128,8 @@ static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
 			    THERM, 2);
 static SENSOR_DEVICE_ATTR_2(temp3_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
 			    set_temp, HYSTERSIS, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_smoothing, S_IRUGO | S_IWUSR, show_temp_st,
+			    set_temp_st, 0, 2);
 static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_tach, NULL, INPUT, 0);
 static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
 			    MIN, 0);
@@ -1056,6 +1158,8 @@ static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
 			    set_pwm, MIN, 0);
 static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
 			    set_pwm, MAX, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_stall_disable, S_IRUGO | S_IWUSR,
+			    show_stall_disable, set_stall_disable, 0, 0);
 static SENSOR_DEVICE_ATTR_2(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
 			    1);
 static SENSOR_DEVICE_ATTR_2(pwm2_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
@@ -1068,6 +1172,8 @@ static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
 			    set_pwm, MIN, 1);
 static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
 			    set_pwm, MAX, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_stall_disable, S_IRUGO | S_IWUSR,
+			    show_stall_disable, set_stall_disable, 0, 1);
 static SENSOR_DEVICE_ATTR_2(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
 			    2);
 static SENSOR_DEVICE_ATTR_2(pwm3_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
@@ -1080,6 +1186,8 @@ static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
 			    set_pwm, MIN, 2);
 static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
 			    set_pwm, MAX, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_stall_disable, S_IRUGO | S_IWUSR,
+			    show_stall_disable, set_stall_disable, 0, 2);
 
 /* Non-standard name, might need revisiting */
 static DEVICE_ATTR_RW(pwm_use_point2_pwm_at_crit);
@@ -1106,6 +1214,7 @@ static struct attribute *adt7475_attrs[] = {
 	&sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
 	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_smoothing.dev_attr.attr,
 	&sensor_dev_attr_temp2_input.dev_attr.attr,
 	&sensor_dev_attr_temp2_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp2_max.dev_attr.attr,
@@ -1115,6 +1224,7 @@ static struct attribute *adt7475_attrs[] = {
 	&sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
 	&sensor_dev_attr_temp2_crit.dev_attr.attr,
 	&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp2_smoothing.dev_attr.attr,
 	&sensor_dev_attr_temp3_input.dev_attr.attr,
 	&sensor_dev_attr_temp3_fault.dev_attr.attr,
 	&sensor_dev_attr_temp3_alarm.dev_attr.attr,
@@ -1125,6 +1235,7 @@ static struct attribute *adt7475_attrs[] = {
 	&sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr,
 	&sensor_dev_attr_temp3_crit.dev_attr.attr,
 	&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp3_smoothing.dev_attr.attr,
 	&sensor_dev_attr_fan1_input.dev_attr.attr,
 	&sensor_dev_attr_fan1_min.dev_attr.attr,
 	&sensor_dev_attr_fan1_alarm.dev_attr.attr,
@@ -1140,12 +1251,14 @@ static struct attribute *adt7475_attrs[] = {
 	&sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
 	&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
 	&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm1_stall_disable.dev_attr.attr,
 	&sensor_dev_attr_pwm3.dev_attr.attr,
 	&sensor_dev_attr_pwm3_freq.dev_attr.attr,
 	&sensor_dev_attr_pwm3_enable.dev_attr.attr,
 	&sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
 	&sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
 	&sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm3_stall_disable.dev_attr.attr,
 	&dev_attr_pwm_use_point2_pwm_at_crit.attr,
 	NULL,
 };
@@ -1164,6 +1277,7 @@ static struct attribute *pwm2_attrs[] = {
 	&sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
 	&sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
 	&sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm2_stall_disable.dev_attr.attr,
 	NULL
 };
 
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 48403a2..ddfe66b 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/errno.h>
 #include <linux/gpio/consumer.h>
 #include <linux/delay.h>
 #include <linux/hwmon.h>
@@ -145,14 +146,26 @@
 
 #define PWM_MAX 255
 
+#define BOTH_EDGES 0x02 /* 10b */
+
 #define M_PWM_DIV_H 0x00
 #define M_PWM_DIV_L 0x05
 #define M_PWM_PERIOD 0x5F
 #define M_TACH_CLK_DIV 0x00
-#define M_TACH_MODE 0x00
-#define M_TACH_UNIT 0x1000
+/*
+ * 5:4 Type N fan tach mode selection bit:
+ * 00: falling
+ * 01: rising
+ * 10: both
+ * 11: reserved.
+ */
+#define M_TACH_MODE 0x02 /* 10b */
+#define M_TACH_UNIT 0x00c0
 #define INIT_FAN_CTRL 0xFF
 
+/* How long we sleep in us while waiting for an RPM result. */
+#define ASPEED_RPM_STATUS_SLEEP_USEC	500
+
 struct aspeed_pwm_tacho_data {
 	struct regmap *regmap;
 	unsigned long clk_freq;
@@ -162,6 +175,7 @@ struct aspeed_pwm_tacho_data {
 	u8 type_pwm_clock_division_h[3];
 	u8 type_pwm_clock_division_l[3];
 	u8 type_fan_tach_clock_division[3];
+	u8 type_fan_tach_mode[3];
 	u16 type_fan_tach_unit[3];
 	u8 pwm_port_type[8];
 	u8 pwm_port_fan_ctrl[8];
@@ -494,11 +508,12 @@ static u32 aspeed_get_fan_tach_ch_measure_period(struct aspeed_pwm_tacho_data
 	return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit);
 }
 
-static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
+static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
 				      u8 fan_tach_ch)
 {
-	u32 raw_data, tach_div, clk_source, sec, val;
-	u8 fan_tach_ch_source, type;
+	u32 raw_data, tach_div, clk_source, msec, usec, val;
+	u8 fan_tach_ch_source, type, mode, both;
+	int ret;
 
 	regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0);
 	regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch);
@@ -506,13 +521,31 @@ static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
 	fan_tach_ch_source = priv->fan_tach_ch_source[fan_tach_ch];
 	type = priv->pwm_port_type[fan_tach_ch_source];
 
-	sec = (1000 / aspeed_get_fan_tach_ch_measure_period(priv, type));
-	msleep(sec);
+	msec = (1000 / aspeed_get_fan_tach_ch_measure_period(priv, type));
+	usec = msec * 1000;
 
-	regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val);
+	ret = regmap_read_poll_timeout(
+		priv->regmap,
+		ASPEED_PTCR_RESULT,
+		val,
+		(val & RESULT_STATUS_MASK),
+		ASPEED_RPM_STATUS_SLEEP_USEC,
+		usec);
+
+	/* return -ETIMEDOUT if we didn't get an answer. */
+	if (ret)
+		return ret;
+
 	raw_data = val & RESULT_VALUE_MASK;
 	tach_div = priv->type_fan_tach_clock_division[type];
-	tach_div = 0x4 << (tach_div * 2);
+	/*
+	 * We need the mode to determine if the raw_data is double (from
+	 * counting both edges).
+	 */
+	mode = priv->type_fan_tach_mode[type];
+	both = (mode & BOTH_EDGES) ? 1 : 0;
+
+	tach_div = (0x4 << both) << (tach_div * 2);
 	clk_source = priv->clk_freq;
 
 	if (raw_data == 0)
@@ -561,12 +594,14 @@ static ssize_t show_rpm(struct device *dev, struct device_attribute *attr,
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	int index = sensor_attr->index;
-	u32 rpm;
+	int rpm;
 	struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev);
 
 	rpm = aspeed_get_fan_tach_ch_rpm(priv, index);
+	if (rpm < 0)
+		return rpm;
 
-	return sprintf(buf, "%u\n", rpm);
+	return sprintf(buf, "%d\n", rpm);
 }
 
 static umode_t pwm_is_visible(struct kobject *kobj,
@@ -591,24 +626,23 @@ static umode_t fan_dev_is_visible(struct kobject *kobj,
 	return a->mode;
 }
 
-static SENSOR_DEVICE_ATTR(pwm0, 0644,
-			show_pwm, set_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm1, 0644,
-			show_pwm, set_pwm, 1);
+			show_pwm, set_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm2, 0644,
-			show_pwm, set_pwm, 2);
+			show_pwm, set_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, 0644,
-			show_pwm, set_pwm, 3);
+			show_pwm, set_pwm, 2);
 static SENSOR_DEVICE_ATTR(pwm4, 0644,
-			show_pwm, set_pwm, 4);
+			show_pwm, set_pwm, 3);
 static SENSOR_DEVICE_ATTR(pwm5, 0644,
-			show_pwm, set_pwm, 5);
+			show_pwm, set_pwm, 4);
 static SENSOR_DEVICE_ATTR(pwm6, 0644,
-			show_pwm, set_pwm, 6);
+			show_pwm, set_pwm, 5);
 static SENSOR_DEVICE_ATTR(pwm7, 0644,
+			show_pwm, set_pwm, 6);
+static SENSOR_DEVICE_ATTR(pwm8, 0644,
 			show_pwm, set_pwm, 7);
 static struct attribute *pwm_dev_attrs[] = {
-	&sensor_dev_attr_pwm0.dev_attr.attr,
 	&sensor_dev_attr_pwm1.dev_attr.attr,
 	&sensor_dev_attr_pwm2.dev_attr.attr,
 	&sensor_dev_attr_pwm3.dev_attr.attr,
@@ -616,6 +650,7 @@ static struct attribute *pwm_dev_attrs[] = {
 	&sensor_dev_attr_pwm5.dev_attr.attr,
 	&sensor_dev_attr_pwm6.dev_attr.attr,
 	&sensor_dev_attr_pwm7.dev_attr.attr,
+	&sensor_dev_attr_pwm8.dev_attr.attr,
 	NULL,
 };
 
@@ -624,40 +659,39 @@ static const struct attribute_group pwm_dev_group = {
 	.is_visible = pwm_is_visible,
 };
 
-static SENSOR_DEVICE_ATTR(fan0_input, 0444,
-		show_rpm, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan1_input, 0444,
-		show_rpm, NULL, 1);
+		show_rpm, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan2_input, 0444,
-		show_rpm, NULL, 2);
+		show_rpm, NULL, 1);
 static SENSOR_DEVICE_ATTR(fan3_input, 0444,
-		show_rpm, NULL, 3);
+		show_rpm, NULL, 2);
 static SENSOR_DEVICE_ATTR(fan4_input, 0444,
-		show_rpm, NULL, 4);
+		show_rpm, NULL, 3);
 static SENSOR_DEVICE_ATTR(fan5_input, 0444,
-		show_rpm, NULL, 5);
+		show_rpm, NULL, 4);
 static SENSOR_DEVICE_ATTR(fan6_input, 0444,
-		show_rpm, NULL, 6);
+		show_rpm, NULL, 5);
 static SENSOR_DEVICE_ATTR(fan7_input, 0444,
-		show_rpm, NULL, 7);
+		show_rpm, NULL, 6);
 static SENSOR_DEVICE_ATTR(fan8_input, 0444,
-		show_rpm, NULL, 8);
+		show_rpm, NULL, 7);
 static SENSOR_DEVICE_ATTR(fan9_input, 0444,
-		show_rpm, NULL, 9);
+		show_rpm, NULL, 8);
 static SENSOR_DEVICE_ATTR(fan10_input, 0444,
-		show_rpm, NULL, 10);
+		show_rpm, NULL, 9);
 static SENSOR_DEVICE_ATTR(fan11_input, 0444,
-		show_rpm, NULL, 11);
+		show_rpm, NULL, 10);
 static SENSOR_DEVICE_ATTR(fan12_input, 0444,
-		show_rpm, NULL, 12);
+		show_rpm, NULL, 11);
 static SENSOR_DEVICE_ATTR(fan13_input, 0444,
-		show_rpm, NULL, 13);
+		show_rpm, NULL, 12);
 static SENSOR_DEVICE_ATTR(fan14_input, 0444,
-		show_rpm, NULL, 14);
+		show_rpm, NULL, 13);
 static SENSOR_DEVICE_ATTR(fan15_input, 0444,
+		show_rpm, NULL, 14);
+static SENSOR_DEVICE_ATTR(fan16_input, 0444,
 		show_rpm, NULL, 15);
 static struct attribute *fan_dev_attrs[] = {
-	&sensor_dev_attr_fan0_input.dev_attr.attr,
 	&sensor_dev_attr_fan1_input.dev_attr.attr,
 	&sensor_dev_attr_fan2_input.dev_attr.attr,
 	&sensor_dev_attr_fan3_input.dev_attr.attr,
@@ -673,6 +707,7 @@ static struct attribute *fan_dev_attrs[] = {
 	&sensor_dev_attr_fan13_input.dev_attr.attr,
 	&sensor_dev_attr_fan14_input.dev_attr.attr,
 	&sensor_dev_attr_fan15_input.dev_attr.attr,
+	&sensor_dev_attr_fan16_input.dev_attr.attr,
 	NULL
 };
 
@@ -696,6 +731,7 @@ static void aspeed_create_type(struct aspeed_pwm_tacho_data *priv)
 	aspeed_set_tacho_type_enable(priv->regmap, TYPEM, true);
 	priv->type_fan_tach_clock_division[TYPEM] = M_TACH_CLK_DIV;
 	priv->type_fan_tach_unit[TYPEM] = M_TACH_UNIT;
+	priv->type_fan_tach_mode[TYPEM] = M_TACH_MODE;
 	aspeed_set_tacho_type_values(priv->regmap, TYPEM, M_TACH_MODE,
 				     M_TACH_UNIT, M_TACH_CLK_DIV);
 }
@@ -802,7 +838,6 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
 		if (ret)
 			return ret;
 	}
-	of_node_put(np);
 
 	priv->groups[0] = &pwm_dev_group;
 	priv->groups[1] = &fan_dev_group;
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 0043a4c..57d6958 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -30,7 +30,7 @@
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
-#include <linux/i2c/ds620.h>
+#include <linux/platform_data/ds620.h>
 
 /*
  * Many DS620 constants specified below
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 6d2e660..5ccdd0b 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -50,22 +50,34 @@ enum sensors {
 	TEMP,
 	POWER_SUPPLY,
 	POWER_INPUT,
+	CURRENT,
 	MAX_SENSOR_TYPE,
 };
 
 #define INVALID_INDEX (-1U)
 
+/*
+ * 'compatible' string properties for sensor types as defined in old
+ * PowerNV firmware (skiboot). These are ordered as 'enum sensors'.
+ */
+static const char * const legacy_compatibles[] = {
+	"ibm,opal-sensor-cooling-fan",
+	"ibm,opal-sensor-amb-temp",
+	"ibm,opal-sensor-power-supply",
+	"ibm,opal-sensor-power"
+};
+
 static struct sensor_group {
-	const char *name;
-	const char *compatible;
+	const char *name; /* matches property 'sensor-type' */
 	struct attribute_group group;
 	u32 attr_count;
 	u32 hwmon_index;
 } sensor_groups[] = {
-	{"fan", "ibm,opal-sensor-cooling-fan"},
-	{"temp", "ibm,opal-sensor-amb-temp"},
-	{"in", "ibm,opal-sensor-power-supply"},
-	{"power", "ibm,opal-sensor-power"}
+	{ "fan"   },
+	{ "temp"  },
+	{ "in"    },
+	{ "power" },
+	{ "curr"  },
 };
 
 struct sensor_data {
@@ -239,8 +251,8 @@ static int get_sensor_type(struct device_node *np)
 	enum sensors type;
 	const char *str;
 
-	for (type = 0; type < MAX_SENSOR_TYPE; type++) {
-		if (of_device_is_compatible(np, sensor_groups[type].compatible))
+	for (type = 0; type < ARRAY_SIZE(legacy_compatibles); type++) {
+		if (of_device_is_compatible(np, legacy_compatibles[type]))
 			return type;
 	}
 
@@ -298,10 +310,14 @@ static int populate_attr_groups(struct platform_device *pdev)
 		sensor_groups[type].attr_count++;
 
 		/*
-		 * add a new attribute for labels
+		 * add attributes for labels, min and max
 		 */
 		if (!of_property_read_string(np, "label", &label))
 			sensor_groups[type].attr_count++;
+		if (of_find_property(np, "sensor-data-min", NULL))
+			sensor_groups[type].attr_count++;
+		if (of_find_property(np, "sensor-data-max", NULL))
+			sensor_groups[type].attr_count++;
 	}
 
 	of_node_put(opal);
@@ -337,6 +353,41 @@ static void create_hwmon_attr(struct sensor_data *sdata, const char *attr_name,
 	sdata->dev_attr.show = show;
 }
 
+static void populate_sensor(struct sensor_data *sdata, int od, int hd, int sid,
+			    const char *attr_name, enum sensors type,
+			    const struct attribute_group *pgroup,
+			    ssize_t (*show)(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf))
+{
+	sdata->id = sid;
+	sdata->type = type;
+	sdata->opal_index = od;
+	sdata->hwmon_index = hd;
+	create_hwmon_attr(sdata, attr_name, show);
+	pgroup->attrs[sensor_groups[type].attr_count++] = &sdata->dev_attr.attr;
+}
+
+static char *get_max_attr(enum sensors type)
+{
+	switch (type) {
+	case POWER_INPUT:
+		return "input_highest";
+	default:
+		return "highest";
+	}
+}
+
+static char *get_min_attr(enum sensors type)
+{
+	switch (type) {
+	case POWER_INPUT:
+		return "input_lowest";
+	default:
+		return "lowest";
+	}
+}
+
 /*
  * Iterate through the device tree for each child of 'sensors' node, create
  * a sysfs attribute file, the file is named by translating the DT node name
@@ -417,16 +468,31 @@ static int create_device_attrs(struct platform_device *pdev)
 			 * attribute. They are related to the same
 			 * sensor.
 			 */
-			sdata[count].type = type;
-			sdata[count].opal_index = sdata[count - 1].opal_index;
-			sdata[count].hwmon_index = sdata[count - 1].hwmon_index;
 
 			make_sensor_label(np, &sdata[count], label);
+			populate_sensor(&sdata[count], opal_index,
+					sdata[count - 1].hwmon_index,
+					sensor_id, "label", type, pgroups[type],
+					show_label);
+			count++;
+		}
 
-			create_hwmon_attr(&sdata[count], "label", show_label);
+		if (!of_property_read_u32(np, "sensor-data-max", &sensor_id)) {
+			attr_name = get_max_attr(type);
+			populate_sensor(&sdata[count], opal_index,
+					sdata[count - 1].hwmon_index,
+					sensor_id, attr_name, type,
+					pgroups[type], show_sensor);
+			count++;
+		}
 
-			pgroups[type]->attrs[sensor_groups[type].attr_count++] =
-				&sdata[count++].dev_attr.attr;
+		if (!of_property_read_u32(np, "sensor-data-min", &sensor_id)) {
+			attr_name = get_min_attr(type);
+			populate_sensor(&sdata[count], opal_index,
+					sdata[count - 1].hwmon_index,
+					sensor_id, attr_name, type,
+					pgroups[type], show_sensor);
+			count++;
 		}
 	}
 
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 4680d89..082f0a0 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -23,7 +23,7 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/jiffies.h>
-#include <linux/i2c/ltc4245.h>
+#include <linux/platform_data/ltc4245.h>
 
 /* Here are names of the chip's registers (a.k.a. commands) */
 enum ltc4245_cmd {
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index dac6d85..f98a83c 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -32,7 +32,7 @@
 #include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
-#include <linux/i2c/max6639.h>
+#include <linux/platform_data/max6639.h>
 
 /* Addresses to scan */
 static const unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 2458b40..c219e43 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -40,6 +40,8 @@
  * nct6791d    15      6       6       2+6    0xc800 0xc1    0x5ca3
  * nct6792d    15      6       6       2+6    0xc910 0xc1    0x5ca3
  * nct6793d    15      6       6       2+6    0xd120 0xc1    0x5ca3
+ * nct6795d    14      6       6       2+6    0xd350 0xc1    0x5ca3
+ *
  *
  * #temp lists the number of monitored temperature sources (first value) plus
  * the number of directly connectable temperature sensors (second value).
@@ -58,13 +60,15 @@
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <linux/acpi.h>
+#include <linux/bitops.h>
 #include <linux/dmi.h>
 #include <linux/io.h>
 #include "lm75.h"
 
 #define USE_ALTERNATE
 
-enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793 };
+enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
+	     nct6795 };
 
 /* used to set data->name = nct6775_device_names[data->sio_kind] */
 static const char * const nct6775_device_names[] = {
@@ -75,6 +79,7 @@ static const char * const nct6775_device_names[] = {
 	"nct6791",
 	"nct6792",
 	"nct6793",
+	"nct6795",
 };
 
 static const char * const nct6775_sio_names[] __initconst = {
@@ -85,6 +90,7 @@ static const char * const nct6775_sio_names[] __initconst = {
 	"NCT6791D",
 	"NCT6792D",
 	"NCT6793D",
+	"NCT6795D",
 };
 
 static unsigned short force_id;
@@ -104,6 +110,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
 #define NCT6775_LD_ACPI		0x0a
 #define NCT6775_LD_HWM		0x0b
 #define NCT6775_LD_VID		0x0d
+#define NCT6775_LD_12		0x12
 
 #define SIO_REG_LDSEL		0x07	/* Logical device select */
 #define SIO_REG_DEVID		0x20	/* Device ID (2 bytes) */
@@ -117,6 +124,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
 #define SIO_NCT6791_ID		0xc800
 #define SIO_NCT6792_ID		0xc910
 #define SIO_NCT6793_ID		0xd120
+#define SIO_NCT6795_ID		0xd350
 #define SIO_ID_MASK		0xFFF0
 
 enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -360,12 +368,24 @@ static const char *const nct6775_temp_label[] = {
 	"PCH_DIM3_TEMP"
 };
 
-static const u16 NCT6775_REG_TEMP_ALTERNATE[ARRAY_SIZE(nct6775_temp_label) - 1]
-	= { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x661, 0x662, 0x664 };
+#define NCT6775_TEMP_MASK	0x001ffffe
 
-static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
-	= { 0, 0, 0, 0, 0xa00, 0xa01, 0xa02, 0xa03, 0xa04, 0xa05, 0xa06,
-	    0xa07 };
+static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = {
+	[13] = 0x661,
+	[14] = 0x662,
+	[15] = 0x664,
+};
+
+static const u16 NCT6775_REG_TEMP_CRIT[32] = {
+	[4] = 0xa00,
+	[5] = 0xa01,
+	[6] = 0xa02,
+	[7] = 0xa03,
+	[8] = 0xa04,
+	[9] = 0xa05,
+	[10] = 0xa06,
+	[11] = 0xa07
+};
 
 /* NCT6776 specific data */
 
@@ -434,11 +454,18 @@ static const char *const nct6776_temp_label[] = {
 	"BYTE_TEMP"
 };
 
-static const u16 NCT6776_REG_TEMP_ALTERNATE[ARRAY_SIZE(nct6776_temp_label) - 1]
-	= { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x401, 0x402, 0x404 };
+#define NCT6776_TEMP_MASK	0x007ffffe
 
-static const u16 NCT6776_REG_TEMP_CRIT[ARRAY_SIZE(nct6776_temp_label) - 1]
-	= { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x709, 0x70a };
+static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = {
+	[14] = 0x401,
+	[15] = 0x402,
+	[16] = 0x404,
+};
+
+static const u16 NCT6776_REG_TEMP_CRIT[32] = {
+	[11] = 0x709,
+	[12] = 0x70a,
+};
 
 /* NCT6779 specific data */
 
@@ -525,17 +552,19 @@ static const char *const nct6779_temp_label[] = {
 	"Virtual_TEMP"
 };
 
-#define NCT6779_NUM_LABELS	(ARRAY_SIZE(nct6779_temp_label) - 5)
-#define NCT6791_NUM_LABELS	ARRAY_SIZE(nct6779_temp_label)
+#define NCT6779_TEMP_MASK	0x07ffff7e
+#define NCT6791_TEMP_MASK	0x87ffff7e
 
-static const u16 NCT6779_REG_TEMP_ALTERNATE[NCT6791_NUM_LABELS - 1]
+static const u16 NCT6779_REG_TEMP_ALTERNATE[32]
 	= { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
 	    0, 0, 0, 0, 0, 0, 0, 0,
 	    0, 0x400, 0x401, 0x402, 0x404, 0x405, 0x406, 0x407,
 	    0x408, 0 };
 
-static const u16 NCT6779_REG_TEMP_CRIT[NCT6791_NUM_LABELS - 1]
-	= { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x709, 0x70a };
+static const u16 NCT6779_REG_TEMP_CRIT[32] = {
+	[15] = 0x709,
+	[16] = 0x70a,
+};
 
 /* NCT6791 specific data */
 
@@ -602,6 +631,8 @@ static const char *const nct6792_temp_label[] = {
 	"Virtual_TEMP"
 };
 
+#define NCT6792_TEMP_MASK	0x9fffff7e
+
 static const char *const nct6793_temp_label[] = {
 	"",
 	"SYSTIN",
@@ -637,6 +668,45 @@ static const char *const nct6793_temp_label[] = {
 	"Virtual_TEMP"
 };
 
+#define NCT6793_TEMP_MASK	0xbfff037e
+
+static const char *const nct6795_temp_label[] = {
+	"",
+	"SYSTIN",
+	"CPUTIN",
+	"AUXTIN0",
+	"AUXTIN1",
+	"AUXTIN2",
+	"AUXTIN3",
+	"",
+	"SMBUSMASTER 0",
+	"SMBUSMASTER 1",
+	"SMBUSMASTER 2",
+	"SMBUSMASTER 3",
+	"SMBUSMASTER 4",
+	"SMBUSMASTER 5",
+	"SMBUSMASTER 6",
+	"SMBUSMASTER 7",
+	"PECI Agent 0",
+	"PECI Agent 1",
+	"PCH_CHIP_CPU_MAX_TEMP",
+	"PCH_CHIP_TEMP",
+	"PCH_CPU_TEMP",
+	"PCH_MCH_TEMP",
+	"PCH_DIM0_TEMP",
+	"PCH_DIM1_TEMP",
+	"PCH_DIM2_TEMP",
+	"PCH_DIM3_TEMP",
+	"BYTE_TEMP0",
+	"BYTE_TEMP1",
+	"PECI Agent 0 Calibration",
+	"PECI Agent 1 Calibration",
+	"",
+	"Virtual_TEMP"
+};
+
+#define NCT6795_TEMP_MASK	0xbfffff7e
+
 /* NCT6102D/NCT6106D specific data */
 
 #define NCT6106_REG_VBAT	0x318
@@ -731,11 +801,16 @@ static const s8 NCT6106_BEEP_BITS[] = {
 	34, -1				/* intrusion0, intrusion1 */
 };
 
-static const u16 NCT6106_REG_TEMP_ALTERNATE[ARRAY_SIZE(nct6776_temp_label) - 1]
-	= { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x51, 0x52, 0x54 };
+static const u16 NCT6106_REG_TEMP_ALTERNATE[32] = {
+	[14] = 0x51,
+	[15] = 0x52,
+	[16] = 0x54,
+};
 
-static const u16 NCT6106_REG_TEMP_CRIT[ARRAY_SIZE(nct6776_temp_label) - 1]
-	= { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x204, 0x205 };
+static const u16 NCT6106_REG_TEMP_CRIT[32] = {
+	[11] = 0x204,
+	[12] = 0x205,
+};
 
 static enum pwm_enable reg_to_pwm_enable(int pwm, int mode)
 {
@@ -810,7 +885,7 @@ static u16 fan_to_reg(u32 fan, unsigned int divreg)
 static inline unsigned int
 div_from_reg(u8 reg)
 {
-	return 1 << reg;
+	return BIT(reg);
 }
 
 /*
@@ -850,7 +925,7 @@ struct nct6775_data {
 	u8 temp_src[NUM_TEMP];
 	u16 reg_temp_config[NUM_TEMP];
 	const char * const *temp_label;
-	int temp_label_num;
+	u32 temp_mask;
 
 	u16 REG_CONFIG;
 	u16 REG_VBAT;
@@ -1155,6 +1230,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
 	case nct6791:
 	case nct6792:
 	case nct6793:
+	case nct6795:
 		return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
 		  ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
 		  reg == 0x402 ||
@@ -1276,7 +1352,7 @@ static void nct6775_update_fan_div(struct nct6775_data *data)
 	data->fan_div[1] = (i & 0x70) >> 4;
 	i = nct6775_read_value(data, NCT6775_REG_FANDIV2);
 	data->fan_div[2] = i & 0x7;
-	if (data->has_fan & (1 << 3))
+	if (data->has_fan & BIT(3))
 		data->fan_div[3] = (i & 0x70) >> 4;
 }
 
@@ -1298,7 +1374,7 @@ static void nct6775_init_fan_div(struct nct6775_data *data)
 	 * We'll compute a better divider later on.
 	 */
 	for (i = 0; i < ARRAY_SIZE(data->fan_div); i++) {
-		if (!(data->has_fan & (1 << i)))
+		if (!(data->has_fan & BIT(i)))
 			continue;
 		if (data->fan_div[i] == 0) {
 			data->fan_div[i] = 7;
@@ -1321,7 +1397,7 @@ static void nct6775_init_fan_common(struct device *dev,
 	 * prevents the unnecessary warning when fanX_min is reported as 0.
 	 */
 	for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
-		if (data->has_fan_min & (1 << i)) {
+		if (data->has_fan_min & BIT(i)) {
 			reg = nct6775_read_value(data, data->REG_FAN_MIN[i]);
 			if (!reg)
 				nct6775_write_value(data, data->REG_FAN_MIN[i],
@@ -1356,7 +1432,7 @@ static void nct6775_select_fan_div(struct device *dev,
 			div_from_reg(fan_div));
 
 		/* Preserve min limit if possible */
-		if (data->has_fan_min & (1 << nr)) {
+		if (data->has_fan_min & BIT(nr)) {
 			fan_min = data->fan_min[nr];
 			if (fan_div > data->fan_div[nr]) {
 				if (fan_min != 255 && fan_min > 1)
@@ -1387,7 +1463,7 @@ static void nct6775_update_pwm(struct device *dev)
 	bool duty_is_dc;
 
 	for (i = 0; i < data->pwm_num; i++) {
-		if (!(data->has_pwm & (1 << i)))
+		if (!(data->has_pwm & BIT(i)))
 			continue;
 
 		duty_is_dc = data->REG_PWM_MODE[i] &&
@@ -1457,7 +1533,7 @@ static void nct6775_update_pwm_limits(struct device *dev)
 	u16 reg_t;
 
 	for (i = 0; i < data->pwm_num; i++) {
-		if (!(data->has_pwm & (1 << i)))
+		if (!(data->has_pwm & BIT(i)))
 			continue;
 
 		for (j = 0; j < ARRAY_SIZE(data->fan_time); j++) {
@@ -1507,6 +1583,7 @@ static void nct6775_update_pwm_limits(struct device *dev)
 		case nct6791:
 		case nct6792:
 		case nct6793:
+		case nct6795:
 			reg = nct6775_read_value(data,
 					data->REG_CRITICAL_PWM_ENABLE[i]);
 			if (reg & data->CRITICAL_PWM_ENABLE_MASK)
@@ -1534,7 +1611,7 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
 
 		/* Measured voltages and limits */
 		for (i = 0; i < data->in_num; i++) {
-			if (!(data->have_in & (1 << i)))
+			if (!(data->have_in & BIT(i)))
 				continue;
 
 			data->in[i][0] = nct6775_read_value(data,
@@ -1549,14 +1626,14 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
 		for (i = 0; i < ARRAY_SIZE(data->rpm); i++) {
 			u16 reg;
 
-			if (!(data->has_fan & (1 << i)))
+			if (!(data->has_fan & BIT(i)))
 				continue;
 
 			reg = nct6775_read_value(data, data->REG_FAN[i]);
 			data->rpm[i] = data->fan_from_reg(reg,
 							  data->fan_div[i]);
 
-			if (data->has_fan_min & (1 << i))
+			if (data->has_fan_min & BIT(i))
 				data->fan_min[i] = nct6775_read_value(data,
 					   data->REG_FAN_MIN[i]);
 			data->fan_pulses[i] =
@@ -1571,7 +1648,7 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
 
 		/* Measured temperatures and limits */
 		for (i = 0; i < NUM_TEMP; i++) {
-			if (!(data->have_temp & (1 << i)))
+			if (!(data->have_temp & BIT(i)))
 				continue;
 			for (j = 0; j < ARRAY_SIZE(data->reg_temp); j++) {
 				if (data->reg_temp[j][i])
@@ -1580,7 +1657,7 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
 						data->reg_temp[j][i]);
 			}
 			if (i >= NUM_TEMP_FIXED ||
-			    !(data->have_temp_fixed & (1 << i)))
+			    !(data->have_temp_fixed & BIT(i)))
 				continue;
 			data->temp_offset[i]
 			  = nct6775_read_value(data, data->REG_TEMP_OFFSET[i]);
@@ -1801,7 +1878,7 @@ static umode_t nct6775_in_is_visible(struct kobject *kobj,
 	struct nct6775_data *data = dev_get_drvdata(dev);
 	int in = index / 5;	/* voltage index */
 
-	if (!(data->have_in & (1 << in)))
+	if (!(data->have_in & BIT(in)))
 		return 0;
 
 	return attr->mode;
@@ -1911,7 +1988,7 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
 		 * even with the highest divider (128)
 		 */
 		data->fan_min[nr] = 254;
-		new_div = 7; /* 128 == (1 << 7) */
+		new_div = 7; /* 128 == BIT(7) */
 		dev_warn(dev,
 			 "fan%u low limit %lu below minimum %u, set to minimum\n",
 			 nr + 1, val, data->fan_from_reg_min(254, 7));
@@ -1921,7 +1998,7 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
 		 * even with the lowest divider (1)
 		 */
 		data->fan_min[nr] = 1;
-		new_div = 0; /* 1 == (1 << 0) */
+		new_div = 0; /* 1 == BIT(0) */
 		dev_warn(dev,
 			 "fan%u low limit %lu above maximum %u, set to maximum\n",
 			 nr + 1, val, data->fan_from_reg_min(1, 0));
@@ -2008,14 +2085,14 @@ static umode_t nct6775_fan_is_visible(struct kobject *kobj,
 	int fan = index / 6;	/* fan index */
 	int nr = index % 6;	/* attribute index */
 
-	if (!(data->has_fan & (1 << fan)))
+	if (!(data->has_fan & BIT(fan)))
 		return 0;
 
 	if (nr == 1 && data->ALARM_BITS[FAN_ALARM_BASE + fan] == -1)
 		return 0;
 	if (nr == 2 && data->BEEP_BITS[FAN_ALARM_BASE + fan] == -1)
 		return 0;
-	if (nr == 4 && !(data->has_fan_min & (1 << fan)))
+	if (nr == 4 && !(data->has_fan_min & BIT(fan)))
 		return 0;
 	if (nr == 5 && data->kind != nct6775)
 		return 0;
@@ -2193,7 +2270,10 @@ static umode_t nct6775_temp_is_visible(struct kobject *kobj,
 	int temp = index / 10;	/* temp index */
 	int nr = index % 10;	/* attribute index */
 
-	if (!(data->have_temp & (1 << temp)))
+	if (!(data->have_temp & BIT(temp)))
+		return 0;
+
+	if (nr == 1 && !data->temp_label)
 		return 0;
 
 	if (nr == 2 && find_temp_source(data, temp, data->num_temp_alarms) < 0)
@@ -2215,7 +2295,7 @@ static umode_t nct6775_temp_is_visible(struct kobject *kobj,
 		return 0;
 
 	/* offset and type only apply to fixed sensors */
-	if (nr > 7 && !(data->have_temp_fixed & (1 << temp)))
+	if (nr > 7 && !(data->have_temp_fixed & BIT(temp)))
 		return 0;
 
 	return attr->mode;
@@ -2484,7 +2564,7 @@ show_pwm_temp_sel_common(struct nct6775_data *data, char *buf, int src)
 	int i, sel = 0;
 
 	for (i = 0; i < NUM_TEMP; i++) {
-		if (!(data->have_temp & (1 << i)))
+		if (!(data->have_temp & BIT(i)))
 			continue;
 		if (src == data->temp_src[i]) {
 			sel = i + 1;
@@ -2520,7 +2600,7 @@ store_pwm_temp_sel(struct device *dev, struct device_attribute *attr,
 		return err;
 	if (val == 0 || val > NUM_TEMP)
 		return -EINVAL;
-	if (!(data->have_temp & (1 << (val - 1))) || !data->temp_src[val - 1])
+	if (!(data->have_temp & BIT(val - 1)) || !data->temp_src[val - 1])
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
@@ -2562,7 +2642,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
 		return err;
 	if (val > NUM_TEMP)
 		return -EINVAL;
-	if (val && (!(data->have_temp & (1 << (val - 1))) ||
+	if (val && (!(data->have_temp & BIT(val - 1)) ||
 		    !data->temp_src[val - 1]))
 		return -EINVAL;
 
@@ -2923,6 +3003,7 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
 		case nct6791:
 		case nct6792:
 		case nct6793:
+		case nct6795:
 			nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
 					    val);
 			reg = nct6775_read_value(data,
@@ -2995,7 +3076,7 @@ static umode_t nct6775_pwm_is_visible(struct kobject *kobj,
 	int pwm = index / 36;	/* pwm index */
 	int nr = index % 36;	/* attribute index */
 
-	if (!(data->has_pwm & (1 << pwm)))
+	if (!(data->has_pwm & BIT(pwm)))
 		return 0;
 
 	if ((nr >= 14 && nr <= 18) || nr == 21)   /* weight */
@@ -3246,7 +3327,7 @@ static inline void nct6775_init_device(struct nct6775_data *data)
 
 	/* Enable temperature sensors if needed */
 	for (i = 0; i < NUM_TEMP; i++) {
-		if (!(data->have_temp & (1 << i)))
+		if (!(data->have_temp & BIT(i)))
 			continue;
 		if (!data->reg_temp_config[i])
 			continue;
@@ -3264,7 +3345,7 @@ static inline void nct6775_init_device(struct nct6775_data *data)
 	diode = nct6775_read_value(data, data->REG_DIODE);
 
 	for (i = 0; i < data->temp_fixed_num; i++) {
-		if (!(data->have_temp_fixed & (1 << i)))
+		if (!(data->have_temp_fixed & BIT(i)))
 			continue;
 		if ((tmp & (data->DIODE_MASK << i)))	/* diode */
 			data->temp_type[i]
@@ -3290,8 +3371,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
 	if (data->kind == nct6775) {
 		regval = superio_inb(sioreg, 0x2c);
 
-		fan3pin = regval & (1 << 6);
-		pwm3pin = regval & (1 << 7);
+		fan3pin = regval & BIT(6);
+		pwm3pin = regval & BIT(7);
 
 		/* On NCT6775, fan4 shares pins with the fdc interface */
 		fan4pin = !(superio_inb(sioreg, 0x2A) & 0x80);
@@ -3357,28 +3438,57 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
 		pwm4pin = false;
 		pwm5pin = false;
 		pwm6pin = false;
-	} else {	/* NCT6779D, NCT6791D, NCT6792D, or NCT6793D */
+	} else { /* NCT6779D, NCT6791D, NCT6792D, NCT6793D, or NCT6795D */
+		int regval_1b, regval_2a, regval_eb;
+
 		regval = superio_inb(sioreg, 0x1c);
 
-		fan3pin = !(regval & (1 << 5));
-		fan4pin = !(regval & (1 << 6));
-		fan5pin = !(regval & (1 << 7));
+		fan3pin = !(regval & BIT(5));
+		fan4pin = !(regval & BIT(6));
+		fan5pin = !(regval & BIT(7));
 
-		pwm3pin = !(regval & (1 << 0));
-		pwm4pin = !(regval & (1 << 1));
-		pwm5pin = !(regval & (1 << 2));
+		pwm3pin = !(regval & BIT(0));
+		pwm4pin = !(regval & BIT(1));
+		pwm5pin = !(regval & BIT(2));
 
-		fan4min = fan4pin;
+		regval = superio_inb(sioreg, 0x2d);
+		switch (data->kind) {
+		case nct6791:
+		case nct6792:
+			fan6pin = regval & BIT(1);
+			pwm6pin = regval & BIT(0);
+			break;
+		case nct6793:
+		case nct6795:
+			regval_1b = superio_inb(sioreg, 0x1b);
+			regval_2a = superio_inb(sioreg, 0x2a);
 
-		if (data->kind == nct6791 || data->kind == nct6792 ||
-		    data->kind == nct6793) {
-			regval = superio_inb(sioreg, 0x2d);
-			fan6pin = (regval & (1 << 1));
-			pwm6pin = (regval & (1 << 0));
-		} else {	/* NCT6779D */
+			if (!pwm5pin)
+				pwm5pin = regval & BIT(7);
+			fan6pin = regval & BIT(1);
+			pwm6pin = regval & BIT(0);
+			if (!fan5pin)
+				fan5pin = regval_1b & BIT(5);
+
+			superio_select(sioreg, NCT6775_LD_12);
+			regval_eb = superio_inb(sioreg, 0xeb);
+			if (!fan5pin)
+				fan5pin = regval_eb & BIT(5);
+			if (!pwm5pin)
+				pwm5pin = (regval_eb & BIT(4)) &&
+					   !(regval_2a & BIT(0));
+			if (!fan6pin)
+				fan6pin = regval_eb & BIT(3);
+			if (!pwm6pin)
+				pwm6pin = regval_eb & BIT(2);
+			break;
+		default:	/* NCT6779D */
 			fan6pin = false;
 			pwm6pin = false;
+			break;
 		}
+
+		fan4min = fan4pin;
 	}
 
 	/* fan 1 and 2 (0x03) are always present */
@@ -3403,16 +3513,15 @@ static void add_temp_sensors(struct nct6775_data *data, const u16 *regp,
 			continue;
 		src = nct6775_read_value(data, regp[i]);
 		src &= 0x1f;
-		if (!src || (*mask & (1 << src)))
+		if (!src || (*mask & BIT(src)))
 			continue;
-		if (src >= data->temp_label_num ||
-		    !strlen(data->temp_label[src]))
+		if (!(data->temp_mask & BIT(src)))
 			continue;
 
 		index = __ffs(*available);
 		nct6775_write_value(data, data->REG_TEMP_SOURCE[index], src);
-		*available &= ~(1 << index);
-		*mask |= 1 << src;
+		*available &= ~BIT(index);
+		*mask |= BIT(src);
 	}
 }
 
@@ -3464,7 +3573,7 @@ static int nct6775_probe(struct platform_device *pdev)
 		data->fan_from_reg_min = fan_from_reg13;
 
 		data->temp_label = nct6776_temp_label;
-		data->temp_label_num = ARRAY_SIZE(nct6776_temp_label);
+		data->temp_mask = NCT6776_TEMP_MASK;
 
 		data->REG_VBAT = NCT6106_REG_VBAT;
 		data->REG_DIODE = NCT6106_REG_DIODE;
@@ -3542,7 +3651,7 @@ static int nct6775_probe(struct platform_device *pdev)
 		data->speed_tolerance_limit = 15;
 
 		data->temp_label = nct6775_temp_label;
-		data->temp_label_num = ARRAY_SIZE(nct6775_temp_label);
+		data->temp_mask = NCT6775_TEMP_MASK;
 
 		data->REG_CONFIG = NCT6775_REG_CONFIG;
 		data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3614,7 +3723,7 @@ static int nct6775_probe(struct platform_device *pdev)
 		data->speed_tolerance_limit = 63;
 
 		data->temp_label = nct6776_temp_label;
-		data->temp_label_num = ARRAY_SIZE(nct6776_temp_label);
+		data->temp_mask = NCT6776_TEMP_MASK;
 
 		data->REG_CONFIG = NCT6775_REG_CONFIG;
 		data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3686,7 +3795,7 @@ static int nct6775_probe(struct platform_device *pdev)
 		data->speed_tolerance_limit = 63;
 
 		data->temp_label = nct6779_temp_label;
-		data->temp_label_num = NCT6779_NUM_LABELS;
+		data->temp_mask = NCT6779_TEMP_MASK;
 
 		data->REG_CONFIG = NCT6775_REG_CONFIG;
 		data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3746,6 +3855,7 @@ static int nct6775_probe(struct platform_device *pdev)
 	case nct6791:
 	case nct6792:
 	case nct6793:
+	case nct6795:
 		data->in_num = 15;
 		data->pwm_num = 6;
 		data->auto_pwm_num = 4;
@@ -3767,15 +3877,21 @@ static int nct6775_probe(struct platform_device *pdev)
 		default:
 		case nct6791:
 			data->temp_label = nct6779_temp_label;
+			data->temp_mask = NCT6791_TEMP_MASK;
 			break;
 		case nct6792:
 			data->temp_label = nct6792_temp_label;
+			data->temp_mask = NCT6792_TEMP_MASK;
 			break;
 		case nct6793:
 			data->temp_label = nct6793_temp_label;
+			data->temp_mask = NCT6793_TEMP_MASK;
+			break;
+		case nct6795:
+			data->temp_label = nct6795_temp_label;
+			data->temp_mask = NCT6795_TEMP_MASK;
 			break;
 		}
-		data->temp_label_num = NCT6791_NUM_LABELS;
 
 		data->REG_CONFIG = NCT6775_REG_CONFIG;
 		data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3843,7 +3959,7 @@ static int nct6775_probe(struct platform_device *pdev)
 	default:
 		return -ENODEV;
 	}
-	data->have_in = (1 << data->in_num) - 1;
+	data->have_in = BIT(data->in_num) - 1;
 	data->have_temp = 0;
 
 	/*
@@ -3861,10 +3977,10 @@ static int nct6775_probe(struct platform_device *pdev)
 			continue;
 
 		src = nct6775_read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
-		if (!src || (mask & (1 << src)))
-			available |= 1 << i;
+		if (!src || (mask & BIT(src)))
+			available |= BIT(i);
 
-		mask |= 1 << src;
+		mask |= BIT(src);
 	}
 
 	/*
@@ -3881,23 +3997,22 @@ static int nct6775_probe(struct platform_device *pdev)
 			continue;
 
 		src = nct6775_read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
-		if (!src || (mask & (1 << src)))
+		if (!src || (mask & BIT(src)))
 			continue;
 
-		if (src >= data->temp_label_num ||
-		    !strlen(data->temp_label[src])) {
+		if (!(data->temp_mask & BIT(src))) {
 			dev_info(dev,
 				 "Invalid temperature source %d at index %d, source register 0x%x, temp register 0x%x\n",
 				 src, i, data->REG_TEMP_SOURCE[i], reg_temp[i]);
 			continue;
 		}
 
-		mask |= 1 << src;
+		mask |= BIT(src);
 
 		/* Use fixed index for SYSTIN(1), CPUTIN(2), AUXTIN(3) */
 		if (src <= data->temp_fixed_num) {
-			data->have_temp |= 1 << (src - 1);
-			data->have_temp_fixed |= 1 << (src - 1);
+			data->have_temp |= BIT(src - 1);
+			data->have_temp_fixed |= BIT(src - 1);
 			data->reg_temp[0][src - 1] = reg_temp[i];
 			data->reg_temp[1][src - 1] = reg_temp_over[i];
 			data->reg_temp[2][src - 1] = reg_temp_hyst[i];
@@ -3917,7 +4032,7 @@ static int nct6775_probe(struct platform_device *pdev)
 			continue;
 
 		/* Use dynamic index for other sources */
-		data->have_temp |= 1 << s;
+		data->have_temp |= BIT(s);
 		data->reg_temp[0][s] = reg_temp[i];
 		data->reg_temp[1][s] = reg_temp_over[i];
 		data->reg_temp[2][s] = reg_temp_hyst[i];
@@ -3945,8 +4060,7 @@ static int nct6775_probe(struct platform_device *pdev)
 		if (!src)
 			continue;
 
-		if (src >= data->temp_label_num ||
-		    !strlen(data->temp_label[src])) {
+		if (!(data->temp_mask & BIT(src))) {
 			dev_info(dev,
 				 "Invalid temperature source %d at index %d, source register 0x%x, temp register 0x%x\n",
 				 src, i, data->REG_TEMP_SEL[i],
@@ -3960,17 +4074,17 @@ static int nct6775_probe(struct platform_device *pdev)
 		 * are no duplicates.
 		 */
 		if (src != TEMP_SOURCE_VIRTUAL) {
-			if (mask & (1 << src))
+			if (mask & BIT(src))
 				continue;
-			mask |= 1 << src;
+			mask |= BIT(src);
 		}
 
 		/* Use fixed index for SYSTIN(1), CPUTIN(2), AUXTIN(3) */
 		if (src <= data->temp_fixed_num) {
-			if (data->have_temp & (1 << (src - 1)))
+			if (data->have_temp & BIT(src - 1))
 				continue;
-			data->have_temp |= 1 << (src - 1);
-			data->have_temp_fixed |= 1 << (src - 1);
+			data->have_temp |= BIT(src - 1);
+			data->have_temp_fixed |= BIT(src - 1);
 			data->reg_temp[0][src - 1] = reg_temp_mon[i];
 			data->temp_src[src - 1] = src;
 			continue;
@@ -3980,7 +4094,7 @@ static int nct6775_probe(struct platform_device *pdev)
 			continue;
 
 		/* Use dynamic index for other sources */
-		data->have_temp |= 1 << s;
+		data->have_temp |= BIT(s);
 		data->reg_temp[0][s] = reg_temp_mon[i];
 		data->temp_src[s] = src;
 		s++;
@@ -3993,16 +4107,18 @@ static int nct6775_probe(struct platform_device *pdev)
 	 * The temperature is already monitored if the respective bit in <mask>
 	 * is set.
 	 */
-	for (i = 0; i < data->temp_label_num - 1; i++) {
+	for (i = 0; i < 32; i++) {
+		if (!(data->temp_mask & BIT(i + 1)))
+			continue;
 		if (!reg_temp_alternate[i])
 			continue;
-		if (mask & (1 << (i + 1)))
+		if (mask & BIT(i + 1))
 			continue;
 		if (i < data->temp_fixed_num) {
-			if (data->have_temp & (1 << i))
+			if (data->have_temp & BIT(i))
 				continue;
-			data->have_temp |= 1 << i;
-			data->have_temp_fixed |= 1 << i;
+			data->have_temp |= BIT(i);
+			data->have_temp_fixed |= BIT(i);
 			data->reg_temp[0][i] = reg_temp_alternate[i];
 			if (i < num_reg_temp) {
 				data->reg_temp[1][i] = reg_temp_over[i];
@@ -4015,7 +4131,7 @@ static int nct6775_probe(struct platform_device *pdev)
 		if (s >= NUM_TEMP)	/* Abort if no more space */
 			break;
 
-		data->have_temp |= 1 << s;
+		data->have_temp |= BIT(s);
 		data->reg_temp[0][s] = reg_temp_alternate[i];
 		data->temp_src[s] = i + 1;
 		s++;
@@ -4042,6 +4158,7 @@ static int nct6775_probe(struct platform_device *pdev)
 	case nct6791:
 	case nct6792:
 	case nct6793:
+	case nct6795:
 		break;
 	}
 
@@ -4075,6 +4192,7 @@ static int nct6775_probe(struct platform_device *pdev)
 		case nct6791:
 		case nct6792:
 		case nct6793:
+		case nct6795:
 			tmp |= 0x7e;
 			break;
 		}
@@ -4173,14 +4291,14 @@ static int __maybe_unused nct6775_resume(struct device *dev)
 		superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable);
 
 	if (data->kind == nct6791 || data->kind == nct6792 ||
-	    data->kind == nct6793)
+	    data->kind == nct6793 || data->kind == nct6795)
 		nct6791_enable_io_mapping(sioreg);
 
 	superio_exit(sioreg);
 
 	/* Restore limits */
 	for (i = 0; i < data->in_num; i++) {
-		if (!(data->have_in & (1 << i)))
+		if (!(data->have_in & BIT(i)))
 			continue;
 
 		nct6775_write_value(data, data->REG_IN_MINMAX[0][i],
@@ -4190,7 +4308,7 @@ static int __maybe_unused nct6775_resume(struct device *dev)
 	}
 
 	for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
-		if (!(data->has_fan_min & (1 << i)))
+		if (!(data->has_fan_min & BIT(i)))
 			continue;
 
 		nct6775_write_value(data, data->REG_FAN_MIN[i],
@@ -4198,7 +4316,7 @@ static int __maybe_unused nct6775_resume(struct device *dev)
 	}
 
 	for (i = 0; i < NUM_TEMP; i++) {
-		if (!(data->have_temp & (1 << i)))
+		if (!(data->have_temp & BIT(i)))
 			continue;
 
 		for (j = 1; j < ARRAY_SIZE(data->reg_temp); j++)
@@ -4270,6 +4388,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
 	case SIO_NCT6793_ID:
 		sio_data->kind = nct6793;
 		break;
+	case SIO_NCT6795_ID:
+		sio_data->kind = nct6795;
+		break;
 	default:
 		if (val != 0xffff)
 			pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -4296,7 +4417,7 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
 	}
 
 	if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
-	    sio_data->kind == nct6793)
+	    sio_data->kind == nct6793 || sio_data->kind == nct6795)
 		nct6791_enable_io_mapping(sioaddr);
 
 	superio_exit(sioaddr);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index cad1229..68d717a 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -37,6 +37,16 @@
 	  This driver can also be built as a module. If so, the module will
 	  be called adm1275.
 
+config SENSORS_IR35221
+	tristate "Infineon IR35221"
+	default n
+	help
+	  If you say yes here you get hardware monitoring support for the
+	  Infineon IR35221 controller.
+
+	  This driver can also be built as a module. If so, the module will
+	  be called ir35521.
+
 config SENSORS_LM25066
 	tristate "National Semiconductor LM25066 and compatibles"
 	default n
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 5621320..75bb7ca 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -5,6 +5,7 @@
 obj-$(CONFIG_PMBUS)		+= pmbus_core.o
 obj-$(CONFIG_SENSORS_PMBUS)	+= pmbus.o
 obj-$(CONFIG_SENSORS_ADM1275)	+= adm1275.o
+obj-$(CONFIG_SENSORS_IR35221)	+= ir35221.o
 obj-$(CONFIG_SENSORS_LM25066)	+= lm25066.o
 obj-$(CONFIG_SENSORS_LTC2978)	+= ltc2978.o
 obj-$(CONFIG_SENSORS_LTC3815)	+= ltc3815.o
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
new file mode 100644
index 0000000..8b906b4
--- /dev/null
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -0,0 +1,337 @@
+/*
+ * Hardware monitoring driver for IR35221
+ *
+ * Copyright (C) IBM Corporation 2017.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define IR35221_MFR_VIN_PEAK		0xc5
+#define IR35221_MFR_VOUT_PEAK		0xc6
+#define IR35221_MFR_IOUT_PEAK		0xc7
+#define IR35221_MFR_TEMP_PEAK		0xc8
+#define IR35221_MFR_VIN_VALLEY		0xc9
+#define IR35221_MFR_VOUT_VALLEY		0xca
+#define IR35221_MFR_IOUT_VALLEY		0xcb
+#define IR35221_MFR_TEMP_VALLEY		0xcc
+
+static long ir35221_reg2data(int data, enum pmbus_sensor_classes class)
+{
+	s16 exponent;
+	s32 mantissa;
+	long val;
+
+	/* We only modify LINEAR11 formats */
+	exponent = ((s16)data) >> 11;
+	mantissa = ((s16)((data & 0x7ff) << 5)) >> 5;
+
+	val = mantissa * 1000L;
+
+	/* scale result to micro-units for power sensors */
+	if (class == PSC_POWER)
+		val = val * 1000L;
+
+	if (exponent >= 0)
+		val <<= exponent;
+	else
+		val >>= -exponent;
+
+	return val;
+}
+
+#define MAX_MANTISSA	(1023 * 1000)
+#define MIN_MANTISSA	(511 * 1000)
+
+static u16 ir35221_data2reg(long val, enum pmbus_sensor_classes class)
+{
+	s16 exponent = 0, mantissa;
+	bool negative = false;
+
+	if (val == 0)
+		return 0;
+
+	if (val < 0) {
+		negative = true;
+		val = -val;
+	}
+
+	/* Power is in uW. Convert to mW before converting. */
+	if (class == PSC_POWER)
+		val = DIV_ROUND_CLOSEST(val, 1000L);
+
+	/* Reduce large mantissa until it fits into 10 bit */
+	while (val >= MAX_MANTISSA && exponent < 15) {
+		exponent++;
+		val >>= 1;
+	}
+	/* Increase small mantissa to improve precision */
+	while (val < MIN_MANTISSA && exponent > -15) {
+		exponent--;
+		val <<= 1;
+	}
+
+	/* Convert mantissa from milli-units to units */
+	mantissa = DIV_ROUND_CLOSEST(val, 1000);
+
+	/* Ensure that resulting number is within range */
+	if (mantissa > 0x3ff)
+		mantissa = 0x3ff;
+
+	/* restore sign */
+	if (negative)
+		mantissa = -mantissa;
+
+	/* Convert to 5 bit exponent, 11 bit mantissa */
+	return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
+static u16 ir35221_scale_result(s16 data, int shift,
+				enum pmbus_sensor_classes class)
+{
+	long val;
+
+	val = ir35221_reg2data(data, class);
+
+	if (shift < 0)
+		val >>= -shift;
+	else
+		val <<= shift;
+
+	return ir35221_data2reg(val, class);
+}
+
+static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
+{
+	int ret;
+
+	switch (reg) {
+	case PMBUS_IOUT_OC_FAULT_LIMIT:
+	case PMBUS_IOUT_OC_WARN_LIMIT:
+		ret = pmbus_read_word_data(client, page, reg);
+		if (ret < 0)
+			break;
+		ret = ir35221_scale_result(ret, 1, PSC_CURRENT_OUT);
+		break;
+	case PMBUS_VIN_OV_FAULT_LIMIT:
+	case PMBUS_VIN_OV_WARN_LIMIT:
+	case PMBUS_VIN_UV_WARN_LIMIT:
+		ret = pmbus_read_word_data(client, page, reg);
+		ret = ir35221_scale_result(ret, -4, PSC_VOLTAGE_IN);
+		break;
+	case PMBUS_IIN_OC_WARN_LIMIT:
+		ret = pmbus_read_word_data(client, page, reg);
+		if (ret < 0)
+			break;
+		ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
+		break;
+	case PMBUS_READ_VIN:
+		ret = pmbus_read_word_data(client, page, PMBUS_READ_VIN);
+		if (ret < 0)
+			break;
+		ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
+		break;
+	case PMBUS_READ_IIN:
+		ret = pmbus_read_word_data(client, page, PMBUS_READ_IIN);
+		if (ret < 0)
+			break;
+		if (page == 0)
+			ret = ir35221_scale_result(ret, -4, PSC_CURRENT_IN);
+		else
+			ret = ir35221_scale_result(ret, -5, PSC_CURRENT_IN);
+		break;
+	case PMBUS_READ_POUT:
+		ret = pmbus_read_word_data(client, page, PMBUS_READ_POUT);
+		if (ret < 0)
+			break;
+		ret = ir35221_scale_result(ret, -1, PSC_POWER);
+		break;
+	case PMBUS_READ_PIN:
+		ret = pmbus_read_word_data(client, page, PMBUS_READ_PIN);
+		if (ret < 0)
+			break;
+		ret = ir35221_scale_result(ret, -1, PSC_POWER);
+		break;
+	case PMBUS_READ_IOUT:
+		ret = pmbus_read_word_data(client, page, PMBUS_READ_IOUT);
+		if (ret < 0)
+			break;
+		if (page == 0)
+			ret = ir35221_scale_result(ret, -1, PSC_CURRENT_OUT);
+		else
+			ret = ir35221_scale_result(ret, -2, PSC_CURRENT_OUT);
+		break;
+	case PMBUS_VIRT_READ_VIN_MAX:
+		ret = pmbus_read_word_data(client, page, IR35221_MFR_VIN_PEAK);
+		if (ret < 0)
+			break;
+		ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
+		break;
+	case PMBUS_VIRT_READ_VOUT_MAX:
+		ret = pmbus_read_word_data(client, page, IR35221_MFR_VOUT_PEAK);
+		break;
+	case PMBUS_VIRT_READ_IOUT_MAX:
+		ret = pmbus_read_word_data(client, page, IR35221_MFR_IOUT_PEAK);
+		if (ret < 0)
+			break;
+		if (page == 0)
+			ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
+		else
+			ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
+		break;
+	case PMBUS_VIRT_READ_TEMP_MAX:
+		ret = pmbus_read_word_data(client, page, IR35221_MFR_TEMP_PEAK);
+		break;
+	case PMBUS_VIRT_READ_VIN_MIN:
+		ret = pmbus_read_word_data(client, page,
+					   IR35221_MFR_VIN_VALLEY);
+		if (ret < 0)
+			break;
+		ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
+		break;
+	case PMBUS_VIRT_READ_VOUT_MIN:
+		ret = pmbus_read_word_data(client, page,
+					   IR35221_MFR_VOUT_VALLEY);
+		break;
+	case PMBUS_VIRT_READ_IOUT_MIN:
+		ret = pmbus_read_word_data(client, page,
+					   IR35221_MFR_IOUT_VALLEY);
+		if (ret < 0)
+			break;
+		if (page == 0)
+			ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
+		else
+			ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
+		break;
+	case PMBUS_VIRT_READ_TEMP_MIN:
+		ret = pmbus_read_word_data(client, page,
+					   IR35221_MFR_TEMP_VALLEY);
+		break;
+	default:
+		ret = -ENODATA;
+		break;
+	}
+
+	return ret;
+}
+
+static int ir35221_write_word_data(struct i2c_client *client, int page, int reg,
+				   u16 word)
+{
+	int ret;
+	u16 val;
+
+	switch (reg) {
+	case PMBUS_IOUT_OC_FAULT_LIMIT:
+	case PMBUS_IOUT_OC_WARN_LIMIT:
+		val = ir35221_scale_result(word, -1, PSC_CURRENT_OUT);
+		ret = pmbus_write_word_data(client, page, reg, val);
+		break;
+	case PMBUS_VIN_OV_FAULT_LIMIT:
+	case PMBUS_VIN_OV_WARN_LIMIT:
+	case PMBUS_VIN_UV_WARN_LIMIT:
+		val = ir35221_scale_result(word, 4, PSC_VOLTAGE_IN);
+		ret = pmbus_write_word_data(client, page, reg, val);
+		break;
+	case PMBUS_IIN_OC_WARN_LIMIT:
+		val = ir35221_scale_result(word, 1, PSC_CURRENT_IN);
+		ret = pmbus_write_word_data(client, page, reg, val);
+		break;
+	default:
+		ret = -ENODATA;
+		break;
+	}
+
+	return ret;
+}
+
+static int ir35221_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct pmbus_driver_info *info;
+	u8 buf[I2C_SMBUS_BLOCK_MAX];
+	int ret;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_READ_BYTE_DATA
+				| I2C_FUNC_SMBUS_READ_WORD_DATA
+				| I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+		return -ENODEV;
+
+	ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+	if (ret < 0) {
+		dev_err(&client->dev, "Failed to read PMBUS_MFR_ID\n");
+		return ret;
+	}
+	if (ret != 2 || strncmp(buf, "RI", strlen("RI"))) {
+		dev_err(&client->dev, "MFR_ID unrecognised\n");
+		return -ENODEV;
+	}
+
+	ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+	if (ret < 0) {
+		dev_err(&client->dev, "Failed to read PMBUS_MFR_MODEL\n");
+		return ret;
+	}
+	if (ret != 2 || !(buf[0] == 0x6c && buf[1] == 0x00)) {
+		dev_err(&client->dev, "MFR_MODEL unrecognised\n");
+		return -ENODEV;
+	}
+
+	info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info),
+			    GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->write_word_data = ir35221_write_word_data;
+	info->read_word_data = ir35221_read_word_data;
+
+	info->pages = 2;
+	info->format[PSC_VOLTAGE_IN] = linear;
+	info->format[PSC_VOLTAGE_OUT] = linear;
+	info->format[PSC_CURRENT_IN] = linear;
+	info->format[PSC_CURRENT_OUT] = linear;
+	info->format[PSC_POWER] = linear;
+	info->format[PSC_TEMPERATURE] = linear;
+
+	info->func[0] = PMBUS_HAVE_VIN
+		| PMBUS_HAVE_VOUT | PMBUS_HAVE_IIN
+		| PMBUS_HAVE_IOUT | PMBUS_HAVE_PIN
+		| PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
+		| PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+		| PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP;
+	info->func[1] = info->func[0];
+
+	return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id ir35221_id[] = {
+	{"ir35221", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, ir35221_id);
+
+static struct i2c_driver ir35221_driver = {
+	.driver = {
+		.name	= "ir35221",
+	},
+	.probe		= ir35221_probe,
+	.remove		= pmbus_do_remove,
+	.id_table	= ir35221_id,
+};
+
+module_i2c_driver(ir35221_driver);
+
+MODULE_AUTHOR("Samuel Mendoza-Jonas <sam@mendozajonas.com");
+MODULE_DESCRIPTION("PMBus driver for IR35221");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 44ca8a9..7718e58 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -25,7 +25,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/i2c.h>
-#include <linux/i2c/pmbus.h>
+#include <linux/pmbus.h>
 #include "pmbus.h"
 
 /*
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index ba59eae..f1eff6b 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -28,7 +28,7 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/jiffies.h>
-#include <linux/i2c/pmbus.h>
+#include <linux/pmbus.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include "pmbus.h"
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 3518f0c..b74dbec 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -26,7 +26,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/i2c/pmbus.h>
+#include <linux/pmbus.h>
 #include "pmbus.h"
 
 enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 };
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index a8712c5..3ed9458 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -25,7 +25,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <linux/i2c/pmbus.h>
+#include <linux/pmbus.h>
 #include "pmbus.h"
 
 #define UCD9200_PHASE_INFO	0xd2
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index f9af393..70cc0d1 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -40,31 +40,22 @@ struct pwm_fan_ctx {
 
 static int  __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
 {
-	struct pwm_args pargs;
-	unsigned long duty;
+	unsigned long period;
 	int ret = 0;
-
-	pwm_get_args(ctx->pwm, &pargs);
+	struct pwm_state state = { };
 
 	mutex_lock(&ctx->lock);
 	if (ctx->pwm_value == pwm)
 		goto exit_set_pwm_err;
 
-	duty = DIV_ROUND_UP(pwm * (pargs.period - 1), MAX_PWM);
-	ret = pwm_config(ctx->pwm, duty, pargs.period);
-	if (ret)
-		goto exit_set_pwm_err;
+	pwm_init_state(ctx->pwm, &state);
+	period = ctx->pwm->args.period;
+	state.duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
+	state.enabled = pwm ? true : false;
 
-	if (pwm == 0)
-		pwm_disable(ctx->pwm);
-
-	if (ctx->pwm_value == 0) {
-		ret = pwm_enable(ctx->pwm);
-		if (ret)
-			goto exit_set_pwm_err;
-	}
-
-	ctx->pwm_value = pwm;
+	ret = pwm_apply_state(ctx->pwm, &state);
+	if (!ret)
+		ctx->pwm_value = pwm;
 exit_set_pwm_err:
 	mutex_unlock(&ctx->lock);
 	return ret;
@@ -218,10 +209,9 @@ static int pwm_fan_probe(struct platform_device *pdev)
 {
 	struct thermal_cooling_device *cdev;
 	struct pwm_fan_ctx *ctx;
-	struct pwm_args pargs;
 	struct device *hwmon;
-	int duty_cycle;
 	int ret;
+	struct pwm_state state = { };
 
 	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -237,37 +227,25 @@ static int pwm_fan_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, ctx);
 
-	/*
-	 * FIXME: pwm_apply_args() should be removed when switching to the
-	 * atomic PWM API.
-	 */
-	pwm_apply_args(ctx->pwm);
-
-	/* Set duty cycle to maximum allowed */
-	pwm_get_args(ctx->pwm, &pargs);
-
-	duty_cycle = pargs.period - 1;
 	ctx->pwm_value = MAX_PWM;
 
-	ret = pwm_config(ctx->pwm, duty_cycle, pargs.period);
+	/* Set duty cycle to maximum allowed and enable PWM output */
+	pwm_init_state(ctx->pwm, &state);
+	state.duty_cycle = ctx->pwm->args.period - 1;
+	state.enabled = true;
+
+	ret = pwm_apply_state(ctx->pwm, &state);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to configure PWM\n");
 		return ret;
 	}
 
-	/* Enbale PWM output */
-	ret = pwm_enable(ctx->pwm);
-	if (ret) {
-		dev_err(&pdev->dev, "Failed to enable PWM\n");
-		return ret;
-	}
-
 	hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, "pwmfan",
 						       ctx, pwm_fan_groups);
 	if (IS_ERR(hwmon)) {
 		dev_err(&pdev->dev, "Failed to register hwmon device\n");
-		pwm_disable(ctx->pwm);
-		return PTR_ERR(hwmon);
+		ret = PTR_ERR(hwmon);
+		goto err_pwm_disable;
 	}
 
 	ret = pwm_fan_of_get_cooling_data(&pdev->dev, ctx);
@@ -282,14 +260,20 @@ static int pwm_fan_probe(struct platform_device *pdev)
 		if (IS_ERR(cdev)) {
 			dev_err(&pdev->dev,
 				"Failed to register pwm-fan as cooling device");
-			pwm_disable(ctx->pwm);
-			return PTR_ERR(cdev);
+			ret = PTR_ERR(cdev);
+			goto err_pwm_disable;
 		}
 		ctx->cdev = cdev;
 		thermal_cdev_update(cdev);
 	}
 
 	return 0;
+
+err_pwm_disable:
+	state.enabled = false;
+	pwm_apply_state(ctx->pwm, &state);
+
+	return ret;
 }
 
 static int pwm_fan_remove(struct platform_device *pdev)
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 094f948..a586480 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -16,6 +16,7 @@
 
 #include <linux/hwmon.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/scpi_protocol.h>
 #include <linux/slab.h>
@@ -23,6 +24,7 @@
 #include <linux/thermal.h>
 
 struct sensor_data {
+	unsigned int scale;
 	struct scpi_sensor_info info;
 	struct device_attribute dev_attr_input;
 	struct device_attribute dev_attr_label;
@@ -44,6 +46,30 @@ struct scpi_sensors {
 	const struct attribute_group *groups[2];
 };
 
+static const u32 gxbb_scpi_scale[] = {
+	[TEMPERATURE]	= 1,		/* (celsius)		*/
+	[VOLTAGE]	= 1000,		/* (millivolts)		*/
+	[CURRENT]	= 1000,		/* (milliamperes)	*/
+	[POWER]		= 1000000,	/* (microwatts)		*/
+	[ENERGY]	= 1000000,	/* (microjoules)	*/
+};
+
+static const u32 scpi_scale[] = {
+	[TEMPERATURE]	= 1000,		/* (millicelsius)	*/
+	[VOLTAGE]	= 1000,		/* (millivolts)		*/
+	[CURRENT]	= 1000,		/* (milliamperes)	*/
+	[POWER]		= 1000000,	/* (microwatts)		*/
+	[ENERGY]	= 1000000,	/* (microjoules)	*/
+};
+
+static void scpi_scale_reading(u64 *value, struct sensor_data *sensor)
+{
+	if (scpi_scale[sensor->info.class] != sensor->scale) {
+		*value *= scpi_scale[sensor->info.class];
+		do_div(*value, sensor->scale);
+	}
+}
+
 static int scpi_read_temp(void *dev, int *temp)
 {
 	struct scpi_thermal_zone *zone = dev;
@@ -57,6 +83,8 @@ static int scpi_read_temp(void *dev, int *temp)
 	if (ret)
 		return ret;
 
+	scpi_scale_reading(&value, sensor);
+
 	*temp = value;
 	return 0;
 }
@@ -77,6 +105,8 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
 	if (ret)
 		return ret;
 
+	scpi_scale_reading(&value, sensor);
+
 	return sprintf(buf, "%llu\n", value);
 }
 
@@ -94,14 +124,23 @@ static struct thermal_zone_of_device_ops scpi_sensor_ops = {
 	.get_temp = scpi_read_temp,
 };
 
+static const struct of_device_id scpi_of_match[] = {
+	{.compatible = "arm,scpi-sensors", .data = &scpi_scale},
+	{.compatible = "amlogic,meson-gxbb-scpi-sensors", .data = &gxbb_scpi_scale},
+	{},
+};
+MODULE_DEVICE_TABLE(of, scpi_of_match);
+
 static int scpi_hwmon_probe(struct platform_device *pdev)
 {
 	u16 nr_sensors, i;
+	const u32 *scale;
 	int num_temp = 0, num_volt = 0, num_current = 0, num_power = 0;
 	int num_energy = 0;
 	struct scpi_ops *scpi_ops;
 	struct device *hwdev, *dev = &pdev->dev;
 	struct scpi_sensors *scpi_sensors;
+	const struct of_device_id *of_id;
 	int idx, ret;
 
 	scpi_ops = get_scpi_ops();
@@ -131,6 +170,13 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
 
 	scpi_sensors->scpi_ops = scpi_ops;
 
+	of_id = of_match_device(scpi_of_match, &pdev->dev);
+	if (!of_id) {
+		dev_err(&pdev->dev, "Unable to initialize scpi-hwmon data\n");
+		return -ENODEV;
+	}
+	scale = of_id->data;
+
 	for (i = 0, idx = 0; i < nr_sensors; i++) {
 		struct sensor_data *sensor = &scpi_sensors->data[idx];
 
@@ -178,6 +224,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
 			continue;
 		}
 
+		sensor->scale = scale[sensor->info.class];
+
 		sensor->dev_attr_input.attr.mode = S_IRUGO;
 		sensor->dev_attr_input.show = scpi_show_sensor;
 		sensor->dev_attr_input.attr.name = sensor->input;
@@ -247,12 +295,6 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
 	return 0;
 }
 
-static const struct of_device_id scpi_of_match[] = {
-	{.compatible = "arm,scpi-sensors"},
-	{},
-};
-MODULE_DEVICE_TABLE(of, scpi_of_match);
-
 static struct platform_driver scpi_hwmon_platdrv = {
 	.driver = {
 		.name	= "scpi-hwmon",
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 130cb21..8d55d6d 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -89,4 +89,18 @@
 	  logging useful software events or data coming from various entities
 	  in the system, possibly running different OSs
 
+config CORESIGHT_CPU_DEBUG
+	tristate "CoreSight CPU Debug driver"
+	depends on ARM || ARM64
+	depends on DEBUG_FS
+	help
+	  This driver provides support for coresight debugging module. This
+	  is primarily used to dump sample-based profiling registers when
+	  system triggers panic, the driver will parse context registers so
+	  can quickly get to know program counter (PC), secure state,
+	  exception level, etc. Before use debugging functionality, platform
+	  needs to ensure the clock domain and power domain are enabled
+	  properly, please refer Documentation/trace/coresight-cpu-debug.txt
+	  for detailed description and the example for usage.
+
 endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index af480d9..433d590 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -16,3 +16,4 @@
 					coresight-etm4x-sysfs.o
 obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
 obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
+obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
new file mode 100644
index 0000000..64a77e0
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright (c) 2017 Linaro Limited. All rights reserved.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/amba/bus.h>
+#include <linux/coresight.h>
+#include <linux/cpu.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "coresight-priv.h"
+
+#define EDPCSR				0x0A0
+#define EDCIDSR				0x0A4
+#define EDVIDSR				0x0A8
+#define EDPCSR_HI			0x0AC
+#define EDOSLAR				0x300
+#define EDPRCR				0x310
+#define EDPRSR				0x314
+#define EDDEVID1			0xFC4
+#define EDDEVID				0xFC8
+
+#define EDPCSR_PROHIBITED		0xFFFFFFFF
+
+/* bits definition for EDPCSR */
+#define EDPCSR_THUMB			BIT(0)
+#define EDPCSR_ARM_INST_MASK		GENMASK(31, 2)
+#define EDPCSR_THUMB_INST_MASK		GENMASK(31, 1)
+
+/* bits definition for EDPRCR */
+#define EDPRCR_COREPURQ			BIT(3)
+#define EDPRCR_CORENPDRQ		BIT(0)
+
+/* bits definition for EDPRSR */
+#define EDPRSR_DLK			BIT(6)
+#define EDPRSR_PU			BIT(0)
+
+/* bits definition for EDVIDSR */
+#define EDVIDSR_NS			BIT(31)
+#define EDVIDSR_E2			BIT(30)
+#define EDVIDSR_E3			BIT(29)
+#define EDVIDSR_HV			BIT(28)
+#define EDVIDSR_VMID			GENMASK(7, 0)
+
+/*
+ * bits definition for EDDEVID1:PSCROffset
+ *
+ * NOTE: armv8 and armv7 have different definition for the register,
+ * so consolidate the bits definition as below:
+ *
+ * 0b0000 - Sample offset applies based on the instruction state, we
+ *          rely on EDDEVID to check if EDPCSR is implemented or not
+ * 0b0001 - No offset applies.
+ * 0b0010 - No offset applies, but do not use in AArch32 mode
+ *
+ */
+#define EDDEVID1_PCSR_OFFSET_MASK	GENMASK(3, 0)
+#define EDDEVID1_PCSR_OFFSET_INS_SET	(0x0)
+#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32	(0x2)
+
+/* bits definition for EDDEVID */
+#define EDDEVID_PCSAMPLE_MODE		GENMASK(3, 0)
+#define EDDEVID_IMPL_EDPCSR		(0x1)
+#define EDDEVID_IMPL_EDPCSR_EDCIDSR	(0x2)
+#define EDDEVID_IMPL_FULL		(0x3)
+
+#define DEBUG_WAIT_SLEEP		1000
+#define DEBUG_WAIT_TIMEOUT		32000
+
+struct debug_drvdata {
+	void __iomem	*base;
+	struct device	*dev;
+	int		cpu;
+
+	bool		edpcsr_present;
+	bool		edcidsr_present;
+	bool		edvidsr_present;
+	bool		pc_has_offset;
+
+	u32		edpcsr;
+	u32		edpcsr_hi;
+	u32		edprsr;
+	u32		edvidsr;
+	u32		edcidsr;
+};
+
+static DEFINE_MUTEX(debug_lock);
+static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
+static int debug_count;
+static struct dentry *debug_debugfs_dir;
+
+static bool debug_enable;
+module_param_named(enable, debug_enable, bool, 0600);
+MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
+
+static void debug_os_unlock(struct debug_drvdata *drvdata)
+{
+	/* Unlocks the debug registers */
+	writel_relaxed(0x0, drvdata->base + EDOSLAR);
+
+	/* Make sure the registers are unlocked before accessing */
+	wmb();
+}
+
+/*
+ * According to ARM DDI 0487A.k, before access external debug
+ * registers should firstly check the access permission; if any
+ * below condition has been met then cannot access debug
+ * registers to avoid lockup issue:
+ *
+ * - CPU power domain is powered off;
+ * - The OS Double Lock is locked;
+ *
+ * By checking EDPRSR can get to know if meet these conditions.
+ */
+static bool debug_access_permitted(struct debug_drvdata *drvdata)
+{
+	/* CPU is powered off */
+	if (!(drvdata->edprsr & EDPRSR_PU))
+		return false;
+
+	/* The OS Double Lock is locked */
+	if (drvdata->edprsr & EDPRSR_DLK)
+		return false;
+
+	return true;
+}
+
+static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
+{
+	u32 edprcr;
+
+try_again:
+
+	/*
+	 * Send request to power management controller and assert
+	 * DBGPWRUPREQ signal; if power management controller has
+	 * sane implementation, it should enable CPU power domain
+	 * in case CPU is in low power state.
+	 */
+	edprcr = readl_relaxed(drvdata->base + EDPRCR);
+	edprcr |= EDPRCR_COREPURQ;
+	writel_relaxed(edprcr, drvdata->base + EDPRCR);
+
+	/* Wait for CPU to be powered up (timeout~=32ms) */
+	if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
+			drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
+			DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
+		/*
+		 * Unfortunately the CPU cannot be powered up, so return
+		 * back and later has no permission to access other
+		 * registers. For this case, should disable CPU low power
+		 * states to ensure CPU power domain is enabled!
+		 */
+		dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
+			__func__, drvdata->cpu);
+		return;
+	}
+
+	/*
+	 * At this point the CPU is powered up, so set the no powerdown
+	 * request bit so we don't lose power and emulate power down.
+	 */
+	edprcr = readl_relaxed(drvdata->base + EDPRCR);
+	edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
+	writel_relaxed(edprcr, drvdata->base + EDPRCR);
+
+	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
+
+	/* The core power domain got switched off on use, try again */
+	if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
+		goto try_again;
+}
+
+static void debug_read_regs(struct debug_drvdata *drvdata)
+{
+	u32 save_edprcr;
+
+	CS_UNLOCK(drvdata->base);
+
+	/* Unlock os lock */
+	debug_os_unlock(drvdata);
+
+	/* Save EDPRCR register */
+	save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
+
+	/*
+	 * Ensure CPU power domain is enabled to let registers
+	 * are accessiable.
+	 */
+	debug_force_cpu_powered_up(drvdata);
+
+	if (!debug_access_permitted(drvdata))
+		goto out;
+
+	drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
+
+	/*
+	 * As described in ARM DDI 0487A.k, if the processing
+	 * element (PE) is in debug state, or sample-based
+	 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
+	 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
+	 * UNKNOWN state. So directly bail out for this case.
+	 */
+	if (drvdata->edpcsr == EDPCSR_PROHIBITED)
+		goto out;
+
+	/*
+	 * A read of the EDPCSR normally has the side-effect of
+	 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
+	 * at this point it's safe to read value from them.
+	 */
+	if (IS_ENABLED(CONFIG_64BIT))
+		drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
+
+	if (drvdata->edcidsr_present)
+		drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
+
+	if (drvdata->edvidsr_present)
+		drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
+
+out:
+	/* Restore EDPRCR register */
+	writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
+
+	CS_LOCK(drvdata->base);
+}
+
+#ifdef CONFIG_64BIT
+static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
+{
+	return (unsigned long)drvdata->edpcsr_hi << 32 |
+	       (unsigned long)drvdata->edpcsr;
+}
+#else
+static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
+{
+	unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
+	unsigned long pc;
+
+	pc = (unsigned long)drvdata->edpcsr;
+
+	if (drvdata->pc_has_offset) {
+		arm_inst_offset = 8;
+		thumb_inst_offset = 4;
+	}
+
+	/* Handle thumb instruction */
+	if (pc & EDPCSR_THUMB) {
+		pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
+		return pc;
+	}
+
+	/*
+	 * Handle arm instruction offset, if the arm instruction
+	 * is not 4 byte alignment then it's possible the case
+	 * for implementation defined; keep original value for this
+	 * case and print info for notice.
+	 */
+	if (pc & BIT(1))
+		dev_emerg(drvdata->dev,
+			  "Instruction offset is implementation defined\n");
+	else
+		pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
+
+	return pc;
+}
+#endif
+
+static void debug_dump_regs(struct debug_drvdata *drvdata)
+{
+	struct device *dev = drvdata->dev;
+	unsigned long pc;
+
+	dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
+		  drvdata->edprsr,
+		  drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
+		  drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
+
+	if (!debug_access_permitted(drvdata)) {
+		dev_emerg(dev, "No permission to access debug registers!\n");
+		return;
+	}
+
+	if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
+		dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
+		return;
+	}
+
+	pc = debug_adjust_pc(drvdata);
+	dev_emerg(dev, " EDPCSR:  [<%p>] %pS\n", (void *)pc, (void *)pc);
+
+	if (drvdata->edcidsr_present)
+		dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
+
+	if (drvdata->edvidsr_present)
+		dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
+			  drvdata->edvidsr,
+			  drvdata->edvidsr & EDVIDSR_NS ?
+			  "Non-secure" : "Secure",
+			  drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
+				(drvdata->edvidsr & EDVIDSR_E2 ?
+				 "EL2" : "EL1/0"),
+			  drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
+			  drvdata->edvidsr & (u32)EDVIDSR_VMID);
+}
+
+static void debug_init_arch_data(void *info)
+{
+	struct debug_drvdata *drvdata = info;
+	u32 mode, pcsr_offset;
+	u32 eddevid, eddevid1;
+
+	CS_UNLOCK(drvdata->base);
+
+	/* Read device info */
+	eddevid  = readl_relaxed(drvdata->base + EDDEVID);
+	eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
+
+	CS_LOCK(drvdata->base);
+
+	/* Parse implementation feature */
+	mode = eddevid & EDDEVID_PCSAMPLE_MODE;
+	pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
+
+	drvdata->edpcsr_present  = false;
+	drvdata->edcidsr_present = false;
+	drvdata->edvidsr_present = false;
+	drvdata->pc_has_offset   = false;
+
+	switch (mode) {
+	case EDDEVID_IMPL_FULL:
+		drvdata->edvidsr_present = true;
+		/* Fall through */
+	case EDDEVID_IMPL_EDPCSR_EDCIDSR:
+		drvdata->edcidsr_present = true;
+		/* Fall through */
+	case EDDEVID_IMPL_EDPCSR:
+		/*
+		 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
+		 * define if has the offset for PC sampling value; if read
+		 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
+		 * module does not sample the instruction set state when
+		 * armv8 CPU in AArch32 state.
+		 */
+		drvdata->edpcsr_present =
+			((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
+			 (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
+
+		drvdata->pc_has_offset =
+			(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
+		break;
+	default:
+		break;
+	}
+}
+
+/*
+ * Dump out information on panic.
+ */
+static int debug_notifier_call(struct notifier_block *self,
+			       unsigned long v, void *p)
+{
+	int cpu;
+	struct debug_drvdata *drvdata;
+
+	mutex_lock(&debug_lock);
+
+	/* Bail out if the functionality is disabled */
+	if (!debug_enable)
+		goto skip_dump;
+
+	pr_emerg("ARM external debug module:\n");
+
+	for_each_possible_cpu(cpu) {
+		drvdata = per_cpu(debug_drvdata, cpu);
+		if (!drvdata)
+			continue;
+
+		dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
+
+		debug_read_regs(drvdata);
+		debug_dump_regs(drvdata);
+	}
+
+skip_dump:
+	mutex_unlock(&debug_lock);
+	return 0;
+}
+
+static struct notifier_block debug_notifier = {
+	.notifier_call = debug_notifier_call,
+};
+
+static int debug_enable_func(void)
+{
+	struct debug_drvdata *drvdata;
+	int cpu, ret = 0;
+	cpumask_t mask;
+
+	/*
+	 * Use cpumask to track which debug power domains have
+	 * been powered on and use it to handle failure case.
+	 */
+	cpumask_clear(&mask);
+
+	for_each_possible_cpu(cpu) {
+		drvdata = per_cpu(debug_drvdata, cpu);
+		if (!drvdata)
+			continue;
+
+		ret = pm_runtime_get_sync(drvdata->dev);
+		if (ret < 0)
+			goto err;
+		else
+			cpumask_set_cpu(cpu, &mask);
+	}
+
+	return 0;
+
+err:
+	/*
+	 * If pm_runtime_get_sync() has failed, need rollback on
+	 * all the other CPUs that have been enabled before that.
+	 */
+	for_each_cpu(cpu, &mask) {
+		drvdata = per_cpu(debug_drvdata, cpu);
+		pm_runtime_put_noidle(drvdata->dev);
+	}
+
+	return ret;
+}
+
+static int debug_disable_func(void)
+{
+	struct debug_drvdata *drvdata;
+	int cpu, ret, err = 0;
+
+	/*
+	 * Disable debug power domains, records the error and keep
+	 * circling through all other CPUs when an error has been
+	 * encountered.
+	 */
+	for_each_possible_cpu(cpu) {
+		drvdata = per_cpu(debug_drvdata, cpu);
+		if (!drvdata)
+			continue;
+
+		ret = pm_runtime_put(drvdata->dev);
+		if (ret < 0)
+			err = ret;
+	}
+
+	return err;
+}
+
+static ssize_t debug_func_knob_write(struct file *f,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u8 val;
+	int ret;
+
+	ret = kstrtou8_from_user(buf, count, 2, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&debug_lock);
+
+	if (val == debug_enable)
+		goto out;
+
+	if (val)
+		ret = debug_enable_func();
+	else
+		ret = debug_disable_func();
+
+	if (ret) {
+		pr_err("%s: unable to %s debug function: %d\n",
+		       __func__, val ? "enable" : "disable", ret);
+		goto err;
+	}
+
+	debug_enable = val;
+out:
+	ret = count;
+err:
+	mutex_unlock(&debug_lock);
+	return ret;
+}
+
+static ssize_t debug_func_knob_read(struct file *f,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	ssize_t ret;
+	char buf[3];
+
+	mutex_lock(&debug_lock);
+	snprintf(buf, sizeof(buf), "%d\n", debug_enable);
+	mutex_unlock(&debug_lock);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
+	return ret;
+}
+
+static const struct file_operations debug_func_knob_fops = {
+	.open	= simple_open,
+	.read	= debug_func_knob_read,
+	.write	= debug_func_knob_write,
+};
+
+static int debug_func_init(void)
+{
+	struct dentry *file;
+	int ret;
+
+	/* Create debugfs node */
+	debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
+	if (!debug_debugfs_dir) {
+		pr_err("%s: unable to create debugfs directory\n", __func__);
+		return -ENOMEM;
+	}
+
+	file = debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
+				   &debug_func_knob_fops);
+	if (!file) {
+		pr_err("%s: unable to create enable knob file\n", __func__);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	/* Register function to be called for panic */
+	ret = atomic_notifier_chain_register(&panic_notifier_list,
+					     &debug_notifier);
+	if (ret) {
+		pr_err("%s: unable to register notifier: %d\n",
+		       __func__, ret);
+		goto err;
+	}
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(debug_debugfs_dir);
+	return ret;
+}
+
+static void debug_func_exit(void)
+{
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &debug_notifier);
+	debugfs_remove_recursive(debug_debugfs_dir);
+}
+
+static int debug_probe(struct amba_device *adev, const struct amba_id *id)
+{
+	void __iomem *base;
+	struct device *dev = &adev->dev;
+	struct debug_drvdata *drvdata;
+	struct resource *res = &adev->res;
+	struct device_node *np = adev->dev.of_node;
+	int ret;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	drvdata->cpu = np ? of_coresight_get_cpu(np) : 0;
+	if (per_cpu(debug_drvdata, drvdata->cpu)) {
+		dev_err(dev, "CPU%d drvdata has already been initialized\n",
+			drvdata->cpu);
+		return -EBUSY;
+	}
+
+	drvdata->dev = &adev->dev;
+	amba_set_drvdata(adev, drvdata);
+
+	/* Validity for the resource is already checked by the AMBA core */
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	drvdata->base = base;
+
+	get_online_cpus();
+	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
+	ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
+				       drvdata, 1);
+	put_online_cpus();
+
+	if (ret) {
+		dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
+		goto err;
+	}
+
+	if (!drvdata->edpcsr_present) {
+		dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
+			drvdata->cpu);
+		ret = -ENXIO;
+		goto err;
+	}
+
+	if (!debug_count++) {
+		ret = debug_func_init();
+		if (ret)
+			goto err_func_init;
+	}
+
+	mutex_lock(&debug_lock);
+	/* Turn off debug power domain if debugging is disabled */
+	if (!debug_enable)
+		pm_runtime_put(dev);
+	mutex_unlock(&debug_lock);
+
+	dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
+	return 0;
+
+err_func_init:
+	debug_count--;
+err:
+	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
+	return ret;
+}
+
+static int debug_remove(struct amba_device *adev)
+{
+	struct device *dev = &adev->dev;
+	struct debug_drvdata *drvdata = amba_get_drvdata(adev);
+
+	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
+
+	mutex_lock(&debug_lock);
+	/* Turn off debug power domain before rmmod the module */
+	if (debug_enable)
+		pm_runtime_put(dev);
+	mutex_unlock(&debug_lock);
+
+	if (!--debug_count)
+		debug_func_exit();
+
+	return 0;
+}
+
+static struct amba_id debug_ids[] = {
+	{       /* Debug for Cortex-A53 */
+		.id	= 0x000bbd03,
+		.mask	= 0x000fffff,
+	},
+	{       /* Debug for Cortex-A57 */
+		.id	= 0x000bbd07,
+		.mask	= 0x000fffff,
+	},
+	{       /* Debug for Cortex-A72 */
+		.id	= 0x000bbd08,
+		.mask	= 0x000fffff,
+	},
+	{ 0, 0 },
+};
+
+static struct amba_driver debug_driver = {
+	.drv = {
+		.name   = "coresight-cpu-debug",
+		.suppress_bind_attrs = true,
+	},
+	.probe		= debug_probe,
+	.remove		= debug_remove,
+	.id_table	= debug_ids,
+};
+
+module_amba_driver(debug_driver);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 979ea6e..d5b9642 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -375,7 +375,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
 
 	/*
 	 * Entries should be aligned to the frame size.  If they are not
-	 * go back to the last alignement point to give decoding tools a
+	 * go back to the last alignment point to give decoding tools a
 	 * chance to fix things.
 	 */
 	if (write_ptr % ETB_FRAME_SIZE_WORDS) {
@@ -675,11 +675,8 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
 
 	drvdata->buf = devm_kzalloc(dev,
 				    drvdata->buffer_depth * 4, GFP_KERNEL);
-	if (!drvdata->buf) {
-		dev_err(dev, "Failed to allocate %u bytes for buffer data\n",
-			drvdata->buffer_depth * 4);
+	if (!drvdata->buf)
 		return -ENOMEM;
-	}
 
 	desc.type = CORESIGHT_DEV_TYPE_SINK;
 	desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 288a423..8f546f5 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -201,6 +201,7 @@ static void *etm_setup_aux(int event_cpu, void **pages,
 	event_data = alloc_event_data(event_cpu);
 	if (!event_data)
 		return NULL;
+	INIT_WORK(&event_data->work, free_event_data);
 
 	/*
 	 * In theory nothing prevent tracers in a trace session from being
@@ -217,8 +218,6 @@ static void *etm_setup_aux(int event_cpu, void **pages,
 	if (!sink)
 		goto err;
 
-	INIT_WORK(&event_data->work, free_event_data);
-
 	mask = &event_data->mask;
 
 	/* Setup the path for each CPU in a trace session */
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index a51b6b6..93ee8fc 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -587,7 +587,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
 	 * after cpu online mask indicates the cpu is offline but before the
 	 * DYING hotplug callback is serviced by the ETM driver.
 	 */
-	get_online_cpus();
+	cpus_read_lock();
 	spin_lock(&drvdata->spinlock);
 
 	/*
@@ -597,7 +597,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
 	smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
 
 	spin_unlock(&drvdata->spinlock);
-	put_online_cpus();
+	cpus_read_unlock();
 
 	dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
@@ -795,7 +795,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
 
 	drvdata->cpu = pdata ? pdata->cpu : 0;
 
-	get_online_cpus();
+	cpus_read_lock();
 	etmdrvdata[drvdata->cpu] = drvdata;
 
 	if (smp_call_function_single(drvdata->cpu,
@@ -803,17 +803,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
 		dev_err(dev, "ETM arch init failed\n");
 
 	if (!etm_count++) {
-		cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
-					  "arm/coresight:starting",
-					  etm_starting_cpu, etm_dying_cpu);
-		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-						"arm/coresight:online",
-						etm_online_cpu, NULL);
+		cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
+						     "arm/coresight:starting",
+						     etm_starting_cpu, etm_dying_cpu);
+		ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
+							   "arm/coresight:online",
+							   etm_online_cpu, NULL);
 		if (ret < 0)
 			goto err_arch_supported;
 		hp_online = ret;
 	}
-	put_online_cpus();
+	cpus_read_unlock();
 
 	if (etm_arch_supported(drvdata->arch) == false) {
 		ret = -EINVAL;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index d1340fb..532adc9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -371,7 +371,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
 	 * after cpu online mask indicates the cpu is offline but before the
 	 * DYING hotplug callback is serviced by the ETM driver.
 	 */
-	get_online_cpus();
+	cpus_read_lock();
 	spin_lock(&drvdata->spinlock);
 
 	/*
@@ -381,7 +381,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
 	smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
 
 	spin_unlock(&drvdata->spinlock);
-	put_online_cpus();
+	cpus_read_unlock();
 
 	dev_info(drvdata->dev, "ETM tracing disabled\n");
 }
@@ -982,7 +982,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
 
 	drvdata->cpu = pdata ? pdata->cpu : 0;
 
-	get_online_cpus();
+	cpus_read_lock();
 	etmdrvdata[drvdata->cpu] = drvdata;
 
 	if (smp_call_function_single(drvdata->cpu,
@@ -990,18 +990,18 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
 		dev_err(dev, "ETM arch init failed\n");
 
 	if (!etm4_count++) {
-		cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
-					  "arm/coresight4:starting",
-					  etm4_starting_cpu, etm4_dying_cpu);
-		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
-						"arm/coresight4:online",
-						etm4_online_cpu, NULL);
+		cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
+						     "arm/coresight4:starting",
+						     etm4_starting_cpu, etm4_dying_cpu);
+		ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
+							   "arm/coresight4:online",
+							   etm4_online_cpu, NULL);
 		if (ret < 0)
 			goto err_arch_supported;
 		hp_online = ret;
 	}
 
-	put_online_cpus();
+	cpus_read_unlock();
 
 	if (etm4_arch_supported(drvdata->arch) == false) {
 		ret = -EINVAL;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index aec61a6..e3b9fb8 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -166,9 +166,6 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
 	if (!used)
 		kfree(buf);
 
-	if (!ret)
-		dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
-
 	return ret;
 }
 
@@ -204,15 +201,27 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
 
 static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
 {
+	int ret;
+	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
 	switch (mode) {
 	case CS_MODE_SYSFS:
-		return tmc_enable_etf_sink_sysfs(csdev);
+		ret = tmc_enable_etf_sink_sysfs(csdev);
+		break;
 	case CS_MODE_PERF:
-		return tmc_enable_etf_sink_perf(csdev);
+		ret = tmc_enable_etf_sink_perf(csdev);
+		break;
+	/* We shouldn't be here */
+	default:
+		ret = -EINVAL;
+		break;
 	}
 
-	/* We shouldn't be here */
-	return -EINVAL;
+	if (ret)
+		return ret;
+
+	dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+	return 0;
 }
 
 static void tmc_disable_etf_sink(struct coresight_device *csdev)
@@ -273,7 +282,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
 	drvdata->mode = CS_MODE_DISABLED;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-	dev_info(drvdata->dev, "TMC disabled\n");
+	dev_info(drvdata->dev, "TMC-ETF disabled\n");
 }
 
 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index d8517d2..8644887 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -362,6 +362,13 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
 		desc.type = CORESIGHT_DEV_TYPE_SINK;
 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
 		desc.ops = &tmc_etr_cs_ops;
+		/*
+		 * ETR configuration uses a 40-bit AXI master in place of
+		 * the embedded SRAM of ETB/ETF.
+		 */
+		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+		if (ret)
+			goto out;
 	} else {
 		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
 		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 0c37356..6a0202b7 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -253,14 +253,22 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
 	return 0;
 }
 
-static void coresight_disable_source(struct coresight_device *csdev)
+/**
+ *  coresight_disable_source - Drop the reference count by 1 and disable
+ *  the device if there are no users left.
+ *
+ *  @csdev - The coresight device to disable
+ *
+ *  Returns true if the device has been disabled.
+ */
+static bool coresight_disable_source(struct coresight_device *csdev)
 {
 	if (atomic_dec_return(csdev->refcnt) == 0) {
-		if (source_ops(csdev)->disable) {
+		if (source_ops(csdev)->disable)
 			source_ops(csdev)->disable(csdev, NULL);
-			csdev->enable = false;
-		}
+		csdev->enable = false;
 	}
+	return !csdev->enable;
 }
 
 void coresight_disable_path(struct list_head *path)
@@ -550,6 +558,9 @@ int coresight_enable(struct coresight_device *csdev)
 	int cpu, ret = 0;
 	struct coresight_device *sink;
 	struct list_head *path;
+	enum coresight_dev_subtype_source subtype;
+
+	subtype = csdev->subtype.source_subtype;
 
 	mutex_lock(&coresight_mutex);
 
@@ -557,8 +568,16 @@ int coresight_enable(struct coresight_device *csdev)
 	if (ret)
 		goto out;
 
-	if (csdev->enable)
+	if (csdev->enable) {
+		/*
+		 * There could be multiple applications driving the software
+		 * source. So keep the refcount for each such user when the
+		 * source is already enabled.
+		 */
+		if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
+			atomic_inc(csdev->refcnt);
 		goto out;
+	}
 
 	/*
 	 * Search for a valid sink for this session but don't reset the
@@ -585,7 +604,7 @@ int coresight_enable(struct coresight_device *csdev)
 	if (ret)
 		goto err_source;
 
-	switch (csdev->subtype.source_subtype) {
+	switch (subtype) {
 	case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
 		/*
 		 * When working from sysFS it is important to keep track
@@ -629,7 +648,7 @@ void coresight_disable(struct coresight_device *csdev)
 	if (ret)
 		goto out;
 
-	if (!csdev->enable)
+	if (!csdev->enable || !coresight_disable_source(csdev))
 		goto out;
 
 	switch (csdev->subtype.source_subtype) {
@@ -647,7 +666,6 @@ void coresight_disable(struct coresight_device *csdev)
 		break;
 	}
 
-	coresight_disable_source(csdev);
 	coresight_disable_path(path);
 	coresight_release_path(path);
 
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 09142e9..a187941 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -52,7 +52,7 @@ of_coresight_get_endpoint_device(struct device_node *endpoint)
 			       endpoint, of_dev_node_match);
 }
 
-static void of_coresight_get_ports(struct device_node *node,
+static void of_coresight_get_ports(const struct device_node *node,
 				   int *nr_inport, int *nr_outport)
 {
 	struct device_node *ep = NULL;
@@ -101,14 +101,40 @@ static int of_coresight_alloc_memory(struct device *dev,
 	return 0;
 }
 
-struct coresight_platform_data *of_get_coresight_platform_data(
-				struct device *dev, struct device_node *node)
+int of_coresight_get_cpu(const struct device_node *node)
 {
-	int i = 0, ret = 0, cpu;
+	int cpu;
+	bool found;
+	struct device_node *dn, *np;
+
+	dn = of_parse_phandle(node, "cpu", 0);
+
+	/* Affinity defaults to CPU0 */
+	if (!dn)
+		return 0;
+
+	for_each_possible_cpu(cpu) {
+		np = of_cpu_device_node_get(cpu);
+		found = (dn == np);
+		of_node_put(np);
+		if (found)
+			break;
+	}
+	of_node_put(dn);
+
+	/* Affinity to CPU0 if no cpu nodes are found */
+	return found ? cpu : 0;
+}
+EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
+
+struct coresight_platform_data *
+of_get_coresight_platform_data(struct device *dev,
+			       const struct device_node *node)
+{
+	int i = 0, ret = 0;
 	struct coresight_platform_data *pdata;
 	struct of_endpoint endpoint, rendpoint;
 	struct device *rdev;
-	struct device_node *dn;
 	struct device_node *ep = NULL;
 	struct device_node *rparent = NULL;
 	struct device_node *rport = NULL;
@@ -175,16 +201,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
 		} while (ep);
 	}
 
-	/* Affinity defaults to CPU0 */
-	pdata->cpu = 0;
-	dn = of_parse_phandle(node, "cpu", 0);
-	for (cpu = 0; dn && cpu < nr_cpu_ids; cpu++) {
-		if (dn == of_get_cpu_node(cpu, NULL)) {
-			pdata->cpu = cpu;
-			break;
-		}
-	}
-	of_node_put(dn);
+	pdata->cpu = of_coresight_get_cpu(node);
 
 	return pdata;
 }
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 7563ece..8da567a 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -139,7 +139,6 @@ static int intel_th_remove(struct device *dev)
 
 static struct bus_type intel_th_bus = {
 	.name		= "intel_th",
-	.dev_attrs	= NULL,
 	.match		= intel_th_match,
 	.probe		= intel_th_probe,
 	.remove		= intel_th_remove,
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 95ed171..54a47b4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
 		 * the first read operation, otherwise the first read cost
 		 * one extra clock cycle.
 		 */
-		temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+		temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
 		temp |= I2CR_MTX;
-		writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+		imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
 	}
 	msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
 
@@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
 				 * the first read operation, otherwise the first read cost
 				 * one extra clock cycle.
 				 */
-				temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+				temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
 				temp |= I2CR_MTX;
-				writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+				imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
 			}
 		} else if (i == (msgs->len - 2)) {
 			dev_dbg(&i2c_imx->adapter.dev,
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index f573448..e98e44e 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -584,7 +584,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
 
 	/* unmap the data buffer */
 	if (dma_size != 0)
-		dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
+		dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
 
 	if (unlikely(!time_left)) {
 		dev_err(dev, "completion wait timed out\n");
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 214bf28..8be3e6c 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -319,7 +319,7 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
 	rcar_i2c_write(priv, ICFBSCR, TCYC06);
 
 	dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
-			 priv->msg->len, priv->dma_direction);
+			 sg_dma_len(&priv->sg), priv->dma_direction);
 
 	priv->dma_direction = DMA_NONE;
 }
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 1e160fc..2c64d0e 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -30,6 +30,19 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-mux-gpio.
 
+config I2C_MUX_GPMUX
+	tristate "General Purpose I2C multiplexer"
+	select MULTIPLEXER
+	depends on OF || COMPILE_TEST
+	help
+	  If you say yes to this option, support will be included for a
+	  general purpose I2C multiplexer. This driver provides access to
+	  I2C busses connected through a MUX, which in turn is controlled
+	  by a MUX-controller from the MUX subsystem.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-mux-gpmux.
+
 config I2C_MUX_LTC4306
 	tristate "LTC LTC4306/5 I2C multiplexer"
 	select GPIOLIB
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index ff7618c..4a67d31 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_I2C_DEMUX_PINCTRL)		+= i2c-demux-pinctrl.o
 
 obj-$(CONFIG_I2C_MUX_GPIO)	+= i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_GPMUX)	+= i2c-mux-gpmux.o
 obj-$(CONFIG_I2C_MUX_LTC4306)	+= i2c-mux-ltc4306.o
 obj-$(CONFIG_I2C_MUX_MLXCPLD)	+= i2c-mux-mlxcpld.o
 obj-$(CONFIG_I2C_MUX_PCA9541)	+= i2c-mux-pca9541.o
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
new file mode 100644
index 0000000..92cf5f4
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -0,0 +1,173 @@
+/*
+ * General Purpose I2C multiplexer
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct mux {
+	struct mux_control *control;
+
+	bool do_not_deselect;
+};
+
+static int i2c_mux_select(struct i2c_mux_core *muxc, u32 chan)
+{
+	struct mux *mux = i2c_mux_priv(muxc);
+	int ret;
+
+	ret = mux_control_select(mux->control, chan);
+	mux->do_not_deselect = ret < 0;
+
+	return ret;
+}
+
+static int i2c_mux_deselect(struct i2c_mux_core *muxc, u32 chan)
+{
+	struct mux *mux = i2c_mux_priv(muxc);
+
+	if (mux->do_not_deselect)
+		return 0;
+
+	return mux_control_deselect(mux->control);
+}
+
+static struct i2c_adapter *mux_parent_adapter(struct device *dev)
+{
+	struct device_node *np = dev->of_node;
+	struct device_node *parent_np;
+	struct i2c_adapter *parent;
+
+	parent_np = of_parse_phandle(np, "i2c-parent", 0);
+	if (!parent_np) {
+		dev_err(dev, "Cannot parse i2c-parent\n");
+		return ERR_PTR(-ENODEV);
+	}
+	parent = of_find_i2c_adapter_by_node(parent_np);
+	of_node_put(parent_np);
+	if (!parent)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return parent;
+}
+
+static const struct of_device_id i2c_mux_of_match[] = {
+	{ .compatible = "i2c-mux", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_of_match);
+
+static int i2c_mux_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct device_node *child;
+	struct i2c_mux_core *muxc;
+	struct mux *mux;
+	struct i2c_adapter *parent;
+	int children;
+	int ret;
+
+	if (!np)
+		return -ENODEV;
+
+	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+	if (!mux)
+		return -ENOMEM;
+
+	mux->control = devm_mux_control_get(dev, NULL);
+	if (IS_ERR(mux->control)) {
+		if (PTR_ERR(mux->control) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get control-mux\n");
+		return PTR_ERR(mux->control);
+	}
+
+	parent = mux_parent_adapter(dev);
+	if (IS_ERR(parent)) {
+		if (PTR_ERR(parent) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get i2c-parent adapter\n");
+		return PTR_ERR(parent);
+	}
+
+	children = of_get_child_count(np);
+
+	muxc = i2c_mux_alloc(parent, dev, children, 0, 0,
+			     i2c_mux_select, i2c_mux_deselect);
+	if (!muxc) {
+		ret = -ENOMEM;
+		goto err_parent;
+	}
+	muxc->priv = mux;
+
+	platform_set_drvdata(pdev, muxc);
+
+	muxc->mux_locked = of_property_read_bool(np, "mux-locked");
+
+	for_each_child_of_node(np, child) {
+		u32 chan;
+
+		ret = of_property_read_u32(child, "reg", &chan);
+		if (ret < 0) {
+			dev_err(dev, "no reg property for node '%s'\n",
+				child->name);
+			goto err_children;
+		}
+
+		if (chan >= mux_control_states(mux->control)) {
+			dev_err(dev, "invalid reg %u\n", chan);
+			ret = -EINVAL;
+			goto err_children;
+		}
+
+		ret = i2c_mux_add_adapter(muxc, 0, chan, 0);
+		if (ret)
+			goto err_children;
+	}
+
+	dev_info(dev, "%d-port mux on %s adapter\n", children, parent->name);
+
+	return 0;
+
+err_children:
+	i2c_mux_del_adapters(muxc);
+err_parent:
+	i2c_put_adapter(parent);
+
+	return ret;
+}
+
+static int i2c_mux_remove(struct platform_device *pdev)
+{
+	struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
+
+	i2c_mux_del_adapters(muxc);
+	i2c_put_adapter(muxc->parent);
+
+	return 0;
+}
+
+static struct platform_driver i2c_mux_driver = {
+	.probe	= i2c_mux_probe,
+	.remove	= i2c_mux_remove,
+	.driver	= {
+		.name	= "i2c-mux-gpmux",
+		.of_match_table = i2c_mux_of_match,
+	},
+};
+module_platform_driver(i2c_mux_driver);
+
+MODULE_DESCRIPTION("General Purpose I2C multiplexer driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 5901937..14d1e7d 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -93,7 +93,6 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
 	int error;
 
 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	ide_req(rq)->type = ATA_PRIV_MISC;
 	rq->special = (char *)pc;
 
@@ -200,7 +199,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
 	memset(sense, 0, sizeof(*sense));
 
 	blk_rq_init(rq->q, sense_rq);
-	scsi_req_init(sense_rq);
+	scsi_req_init(req);
 
 	err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
 			      GFP_NOIO);
@@ -273,7 +272,7 @@ void ide_retry_pc(ide_drive_t *drive)
 	ide_requeue_and_plug(drive, failed_rq);
 	if (ide_queue_sense_rq(drive, pc)) {
 		blk_start_request(failed_rq);
-		ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
+		ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(failed_rq));
 	}
 }
 EXPORT_SYMBOL_GPL(ide_retry_pc);
@@ -437,7 +436,8 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
 
 	/* No more interrupts */
 	if ((stat & ATA_DRQ) == 0) {
-		int uptodate, error;
+		int uptodate;
+		blk_status_t error;
 
 		debug_log("Packet command completed, %d bytes transferred\n",
 			  blk_rq_bytes(rq));
@@ -490,7 +490,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
 
 		if (ata_misc_request(rq)) {
 			scsi_req(rq)->result = 0;
-			error = 0;
+			error = BLK_STS_OK;
 		} else {
 
 			if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
@@ -498,7 +498,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
 					scsi_req(rq)->result = -EIO;
 			}
 
-			error = uptodate ? 0 : -EIO;
+			error = uptodate ? BLK_STS_OK : BLK_STS_IOERR;
 		}
 
 		ide_complete_rq(drive, error, blk_rq_bytes(rq));
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 07e5ff3..81e18f96 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -228,7 +228,7 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
 		scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
 		cdrom_analyze_sense_data(drive, failed);
 
-		if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed)))
+		if (ide_end_rq(drive, failed, BLK_STS_IOERR, blk_rq_bytes(failed)))
 			BUG();
 	} else
 		cdrom_analyze_sense_data(drive, NULL);
@@ -438,7 +438,6 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
 
 		rq = blk_get_request(drive->queue,
 			write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,  __GFP_RECLAIM);
-		scsi_req_init(rq);
 		memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
 		ide_req(rq)->type = ATA_PRIV_PC;
 		rq->rq_flags |= rq_flags;
@@ -508,7 +507,7 @@ static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
 		nr_bytes -= cmd->last_xfer_len;
 
 	if (nr_bytes > 0) {
-		ide_complete_rq(drive, 0, nr_bytes);
+		ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
 		return true;
 	}
 
@@ -674,7 +673,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 out_end:
 	if (blk_rq_is_scsi(rq) && rc == 0) {
 		scsi_req(rq)->resid_len = 0;
-		blk_end_request_all(rq, 0);
+		blk_end_request_all(rq, BLK_STS_OK);
 		hwif->rq = NULL;
 	} else {
 		if (sense && uptodate)
@@ -699,7 +698,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 				scsi_req(rq)->resid_len += cmd->last_xfer_len;
 		}
 
-		ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
+		ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, blk_rq_bytes(rq));
 
 		if (sense && rc == 2)
 			ide_error(drive, "request sense failure", stat);
@@ -844,7 +843,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
 	if (nsectors == 0)
 		nsectors = 1;
 
-	ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);
+	ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, nsectors << 9);
 
 	return ide_stopped;
 }
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 55cd736..9d26c97 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -304,7 +304,6 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
 	int ret;
 
 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	ide_req(rq)->type = ATA_PRIV_MISC;
 	rq->rq_flags = RQF_QUIET;
 	blk_execute_rq(drive->queue, cd->disk, rq, 0);
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 9b69c32..ef7c8c4 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -166,7 +166,6 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
 		return setting->set(drive, arg);
 
 	rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	ide_req(rq)->type = ATA_PRIV_MISC;
 	scsi_req(rq)->cmd_len = 5;
 	scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 7c06237..241983d 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -478,7 +478,6 @@ static int set_multcount(ide_drive_t *drive, int arg)
 		return -EBUSY;
 
 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	ide_req(rq)->type = ATA_PRIV_TASKFILE;
 
 	drive->mult_req = arg;
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 51c8122..54d4d78 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -104,7 +104,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
 			if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
 				ide_finish_cmd(drive, cmd, stat);
 			else
-				ide_complete_rq(drive, 0,
+				ide_complete_rq(drive, BLK_STS_OK,
 						blk_rq_sectors(cmd->rq) << 9);
 			return ide_stopped;
 		}
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 4b7ffd7..47d5f337 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -135,7 +135,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
 			return ide_stopped;
 		}
 		scsi_req(rq)->result = err;
-		ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
+		ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
 		return ide_stopped;
 	}
 
@@ -143,7 +143,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
 }
 EXPORT_SYMBOL_GPL(ide_error);
 
-static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
+static inline void ide_complete_drive_reset(ide_drive_t *drive, blk_status_t err)
 {
 	struct request *rq = drive->hwif->rq;
 
@@ -151,7 +151,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
 	    scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
 		if (err <= 0 && scsi_req(rq)->result == 0)
 			scsi_req(rq)->result = -EIO;
-		ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
+		ide_complete_rq(drive, err, blk_rq_bytes(rq));
 	}
 }
 
@@ -191,7 +191,7 @@ static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive)
 	}
 	/* done polling */
 	hwif->polling = 0;
-	ide_complete_drive_reset(drive, 0);
+	ide_complete_drive_reset(drive, BLK_STS_OK);
 	return ide_stopped;
 }
 
@@ -225,7 +225,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
 	ide_hwif_t *hwif = drive->hwif;
 	const struct ide_port_ops *port_ops = hwif->port_ops;
 	u8 tmp;
-	int err = 0;
+	blk_status_t err = BLK_STS_OK;
 
 	if (port_ops && port_ops->reset_poll) {
 		err = port_ops->reset_poll(drive);
@@ -247,7 +247,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
 		printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
 			hwif->name, tmp);
 		drive->failures++;
-		err = -EIO;
+		err = BLK_STS_IOERR;
 	} else  {
 		tmp = ide_read_error(drive);
 
@@ -257,7 +257,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
 		} else {
 			ide_reset_report_error(hwif, tmp);
 			drive->failures++;
-			err = -EIO;
+			err = BLK_STS_IOERR;
 		}
 	}
 out:
@@ -392,7 +392,7 @@ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
 
 	if (io_ports->ctl_addr == 0) {
 		spin_unlock_irqrestore(&hwif->lock, flags);
-		ide_complete_drive_reset(drive, -ENXIO);
+		ide_complete_drive_reset(drive, BLK_STS_IOERR);
 		return ide_stopped;
 	}
 
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 8ac6048..627b1f6 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -143,7 +143,7 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
 
 		drive->failed_pc = NULL;
 		drive->pc_callback(drive, 0);
-		ide_complete_rq(drive, -EIO, done);
+		ide_complete_rq(drive, BLK_STS_IOERR, done);
 		return ide_stopped;
 	}
 
@@ -248,7 +248,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
 
 		if (ata_misc_request(rq)) {
 			scsi_req(rq)->result = 0;
-			ide_complete_rq(drive, 0, blk_rq_bytes(rq));
+			ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
 			return ide_stopped;
 		} else
 			goto out_end;
@@ -303,7 +303,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
 	drive->failed_pc = NULL;
 	if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
 		scsi_req(rq)->result = -EIO;
-	ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
+	ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
 	return ide_stopped;
 }
 
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 323af72..3a234701 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -54,7 +54,7 @@
 #include <linux/uaccess.h>
 #include <asm/io.h>
 
-int ide_end_rq(ide_drive_t *drive, struct request *rq, int error,
+int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
 	       unsigned int nr_bytes)
 {
 	/*
@@ -112,7 +112,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
 	}
 }
 
-int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
+int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes)
 {
 	ide_hwif_t *hwif = drive->hwif;
 	struct request *rq = hwif->rq;
@@ -122,7 +122,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
 	 * if failfast is set on a request, override number of sectors
 	 * and complete the whole request right now
 	 */
-	if (blk_noretry_request(rq) && error <= 0)
+	if (blk_noretry_request(rq) && error)
 		nr_bytes = blk_rq_sectors(rq) << 9;
 
 	rc = ide_end_rq(drive, rq, error, nr_bytes);
@@ -149,7 +149,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
 			scsi_req(rq)->result = -EIO;
 	}
 
-	ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
+	ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
 }
 
 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
@@ -272,7 +272,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
  	printk("%s: DRIVE_CMD (null)\n", drive->name);
 #endif
 	scsi_req(rq)->result = 0;
-	ide_complete_rq(drive, 0, blk_rq_bytes(rq));
+	ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
 
  	return ide_stopped;
 }
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 8c0d172..3661abb 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -126,7 +126,6 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
 		struct request *rq;
 
 		rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-		scsi_req_init(rq);
 		ide_req(rq)->type = ATA_PRIV_TASKFILE;
 		blk_execute_rq(drive->queue, NULL, rq, 0);
 		err = scsi_req(rq)->result ? -EIO : 0;
@@ -224,7 +223,6 @@ static int generic_drive_reset(ide_drive_t *drive)
 	int ret = 0;
 
 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	ide_req(rq)->type = ATA_PRIV_MISC;
 	scsi_req(rq)->cmd_len = 1;
 	scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 94e3107..1f264d5 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -32,7 +32,6 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
 	spin_unlock_irq(&hwif->lock);
 
 	rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
 	scsi_req(rq)->cmd_len = 1;
 	ide_req(rq)->type = ATA_PRIV_MISC;
@@ -48,7 +47,6 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
 	 * timeout has expired, so power management will be reenabled.
 	 */
 	rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT);
-	scsi_req_init(rq);
 	if (IS_ERR(rq))
 		goto out;
 
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 0977fc1..544f02d 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -19,7 +19,6 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
 
 	memset(&rqpm, 0, sizeof(rqpm));
 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
 	rq->special = &rqpm;
 	rqpm.pm_step = IDE_PM_START_SUSPEND;
@@ -40,7 +39,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
 	return ret;
 }
 
-static void ide_end_sync_rq(struct request *rq, int error)
+static void ide_end_sync_rq(struct request *rq, blk_status_t error)
 {
 	complete(rq->end_io_data);
 }
@@ -57,7 +56,7 @@ static int ide_pm_execute_rq(struct request *rq)
 	if (unlikely(blk_queue_dying(q))) {
 		rq->rq_flags |= RQF_QUIET;
 		scsi_req(rq)->result = -ENXIO;
-		__blk_end_request_all(rq, 0);
+		__blk_end_request_all(rq, BLK_STS_OK);
 		spin_unlock_irq(q->queue_lock);
 		return -ENXIO;
 	}
@@ -91,7 +90,6 @@ int generic_ide_resume(struct device *dev)
 
 	memset(&rqpm, 0, sizeof(rqpm));
 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	ide_req(rq)->type = ATA_PRIV_PM_RESUME;
 	rq->rq_flags |= RQF_PREEMPT;
 	rq->special = &rqpm;
@@ -235,7 +233,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
 
 	drive->hwif->rq = NULL;
 
-	if (blk_end_request(rq, 0, 0))
+	if (blk_end_request(rq, BLK_STS_OK, 0))
 		BUG();
 }
 
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 0235625..01b2adf 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -741,12 +741,12 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
 	}
 }
 
-static int ide_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
+static void ide_initialize_rq(struct request *rq)
 {
 	struct ide_request *req = blk_mq_rq_to_pdu(rq);
 
+	scsi_req_init(&req->sreq);
 	req->sreq.sense = req->sense;
-	return 0;
 }
 
 /*
@@ -771,8 +771,9 @@ static int ide_init_queue(ide_drive_t *drive)
 		return 1;
 
 	q->request_fn = do_ide_request;
-	q->init_rq_fn = ide_init_rq;
+	q->initialize_rq_fn = ide_initialize_rq;
 	q->cmd_size = sizeof(struct ide_request);
+	queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
 	if (blk_init_allocated_queue(q) < 0) {
 		blk_cleanup_queue(q);
 		return 1;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index a0651f9..fd57e8c 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -474,7 +474,7 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
 
 		drive->failed_pc = NULL;
 		drive->pc_callback(drive, 0);
-		ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
+		ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
 		return ide_stopped;
 	}
 	ide_debug_log(IDE_DBG_SENSE, "retry #%d, cmd: 0x%02x", pc->retries,
@@ -855,7 +855,6 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
 	BUG_ON(size < 0 || size % tape->blk_size);
 
 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	ide_req(rq)->type = ATA_PRIV_MISC;
 	scsi_req(rq)->cmd[13] = cmd;
 	rq->rq_disk = tape->disk;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index d71199d..4efe4c6 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -318,7 +318,7 @@ static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
 		}
 
 		if (nr_bytes > 0)
-			ide_complete_rq(drive, 0, nr_bytes);
+			ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
 	}
 }
 
@@ -336,7 +336,7 @@ void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
 		ide_driveid_update(drive);
 	}
 
-	ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
+	ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
 }
 
 /*
@@ -394,7 +394,7 @@ static ide_startstop_t task_pio_intr(ide_drive_t *drive)
 	if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
 		ide_finish_cmd(drive, cmd, stat);
 	else
-		ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
+		ide_complete_rq(drive, BLK_STS_OK, blk_rq_sectors(cmd->rq) << 9);
 	return ide_stopped;
 out_err:
 	ide_error_cmd(drive, cmd);
@@ -433,7 +433,6 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
 	rq = blk_get_request(drive->queue,
 		(cmd->tf_flags & IDE_TFLAG_WRITE) ?
 			REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
-	scsi_req_init(rq);
 	ide_req(rq)->type = ATA_PRIV_TASKFILE;
 
 	/*
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index 6a1849b..57eea5a 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -406,7 +406,7 @@ static int siimage_dma_test_irq(ide_drive_t *drive)
  *	yet.
  */
 
-static int sil_sata_reset_poll(ide_drive_t *drive)
+static blk_status_t sil_sata_reset_poll(ide_drive_t *drive)
 {
 	ide_hwif_t *hwif = drive->hwif;
 	void __iomem *sata_status_addr
@@ -419,11 +419,11 @@ static int sil_sata_reset_poll(ide_drive_t *drive)
 		if ((sata_stat & 0x03) != 0x03) {
 			printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n",
 					    hwif->name, sata_stat);
-			return -ENXIO;
+			return BLK_STS_IOERR;
 		}
 	}
 
-	return 0;
+	return BLK_STS_OK;
 }
 
 /**
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 216d7ec..c2ae819 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -51,6 +51,8 @@
 /* un-comment DEBUG to enable pr_debug() statements */
 #define DEBUG
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/cpuidle.h>
 #include <linux/tick.h>
@@ -65,7 +67,6 @@
 #include <asm/msr.h>
 
 #define INTEL_IDLE_VERSION "0.4.1"
-#define PREFIX "intel_idle: "
 
 static struct cpuidle_driver intel_idle_driver = {
 	.name = "intel_idle",
@@ -1111,7 +1112,7 @@ static int __init intel_idle_probe(void)
 	const struct x86_cpu_id *id;
 
 	if (max_cstate == 0) {
-		pr_debug(PREFIX "disabled\n");
+		pr_debug("disabled\n");
 		return -EPERM;
 	}
 
@@ -1119,8 +1120,8 @@ static int __init intel_idle_probe(void)
 	if (!id) {
 		if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 		    boot_cpu_data.x86 == 6)
-			pr_debug(PREFIX "does not run on family %d model %d\n",
-				boot_cpu_data.x86, boot_cpu_data.x86_model);
+			pr_debug("does not run on family %d model %d\n",
+				 boot_cpu_data.x86, boot_cpu_data.x86_model);
 		return -ENODEV;
 	}
 
@@ -1134,13 +1135,13 @@ static int __init intel_idle_probe(void)
 	    !mwait_substates)
 			return -ENODEV;
 
-	pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
+	pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
 
 	icpu = (const struct idle_cpu *)id->driver_data;
 	cpuidle_state_table = icpu->state_table;
 
-	pr_debug(PREFIX "v" INTEL_IDLE_VERSION
-		" model 0x%X\n", boot_cpu_data.x86_model);
+	pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
+		 boot_cpu_data.x86_model);
 
 	return 0;
 }
@@ -1340,8 +1341,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
 			break;
 
 		if (cstate + 1 > max_cstate) {
-			printk(PREFIX "max_cstate %d reached\n",
-				max_cstate);
+			pr_info("max_cstate %d reached\n", max_cstate);
 			break;
 		}
 
@@ -1358,8 +1358,8 @@ static void __init intel_idle_cpuidle_driver_init(void)
 
 		/* if state marked as disabled, skip it */
 		if (cpuidle_state_table[cstate].disabled != 0) {
-			pr_debug(PREFIX "state %s is disabled",
-				cpuidle_state_table[cstate].name);
+			pr_debug("state %s is disabled\n",
+				 cpuidle_state_table[cstate].name);
 			continue;
 		}
 
@@ -1395,7 +1395,7 @@ static int intel_idle_cpu_init(unsigned int cpu)
 	dev->cpu = cpu;
 
 	if (cpuidle_register_device(dev)) {
-		pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
+		pr_debug("cpuidle_register_device %d failed!\n", cpu);
 		return -EIO;
 	}
 
@@ -1447,8 +1447,8 @@ static int __init intel_idle_init(void)
 	retval = cpuidle_register_driver(&intel_idle_driver);
 	if (retval) {
 		struct cpuidle_driver *drv = cpuidle_get_driver();
-		printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
-			drv ? drv->name : "none");
+		printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
+		       drv ? drv->name : "none");
 		goto init_driver_fail;
 	}
 
@@ -1460,8 +1460,8 @@ static int __init intel_idle_init(void)
 	if (retval < 0)
 		goto hp_setup_fail;
 
-	pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
-		lapic_timer_reliable_states);
+	pr_debug("lapic_timer_reliable_states 0x%x\n",
+		 lapic_timer_reliable_states);
 
 	return 0;
 
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index a918270..b3c8c6e 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -83,6 +83,7 @@
 source "drivers/iio/imu/Kconfig"
 source "drivers/iio/light/Kconfig"
 source "drivers/iio/magnetometer/Kconfig"
+source "drivers/iio/multiplexer/Kconfig"
 source "drivers/iio/orientation/Kconfig"
 if IIO_TRIGGER
    source "drivers/iio/trigger/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 33fa402..93c769c 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -28,6 +28,7 @@
 obj-y += imu/
 obj-y += light/
 obj-y += magnetometer/
+obj-y += multiplexer/
 obj-y += orientation/
 obj-y += potentiometer/
 obj-y += potentiostat/
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 43a6cb0..2238a26 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -347,7 +347,7 @@ static int accel_3d_parse_report(struct platform_device *pdev,
 static int hid_accel_3d_probe(struct platform_device *pdev)
 {
 	int ret = 0;
-	static const char *name;
+	const char *name;
 	struct iio_dev *indio_dev;
 	struct accel_3d_state *accel_state;
 	const struct iio_chan_spec *channel_spec;
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index bf27044..1f53f08 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -27,7 +27,6 @@
 
 #define MMA9551_DRV_NAME		"mma9551"
 #define MMA9551_IRQ_NAME		"mma9551_event"
-#define MMA9551_GPIO_NAME		"mma9551_int"
 #define MMA9551_GPIO_COUNT		4
 
 /* Tilt application (inclination in IIO terms). */
@@ -418,8 +417,7 @@ static int mma9551_gpio_probe(struct iio_dev *indio_dev)
 	struct device *dev = &data->client->dev;
 
 	for (i = 0; i < MMA9551_GPIO_COUNT; i++) {
-		gpio = devm_gpiod_get_index(dev, MMA9551_GPIO_NAME, i,
-					    GPIOD_IN);
+		gpio = devm_gpiod_get_index(dev, NULL, i, GPIOD_IN);
 		if (IS_ERR(gpio)) {
 			dev_err(dev, "acpi gpio get index failed\n");
 			return PTR_ERR(gpio);
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 784670e..07d1489 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -710,6 +710,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
 int st_accel_common_probe(struct iio_dev *indio_dev)
 {
 	struct st_sensor_data *adata = iio_priv(indio_dev);
+	struct st_sensors_platform_data *pdata =
+		(struct st_sensors_platform_data *)adata->dev->platform_data;
 	int irq = adata->get_irq_data_ready(indio_dev);
 	int err;
 
@@ -736,9 +738,8 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
 					&adata->sensor_settings->fs.fs_avl[0];
 	adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
 
-	if (!adata->dev->platform_data)
-		adata->dev->platform_data =
-			(struct st_sensors_platform_data *)&default_accel_pdata;
+	if (!pdata)
+		pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
 
 	err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
 	if (err < 0)
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 29a15f2..1a867f5 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -47,15 +47,11 @@ static int st_accel_spi_remove(struct spi_device *spi)
 }
 
 static const struct spi_device_id st_accel_id_table[] = {
-	{ LSM303DLH_ACCEL_DEV_NAME },
-	{ LSM303DLHC_ACCEL_DEV_NAME },
 	{ LIS3DH_ACCEL_DEV_NAME },
 	{ LSM330D_ACCEL_DEV_NAME },
 	{ LSM330DL_ACCEL_DEV_NAME },
 	{ LSM330DLC_ACCEL_DEV_NAME },
 	{ LIS331DLH_ACCEL_DEV_NAME },
-	{ LSM303DL_ACCEL_DEV_NAME },
-	{ LSM303DLM_ACCEL_DEV_NAME },
 	{ LSM330_ACCEL_DEV_NAME },
 	{ LSM303AGR_ACCEL_DEV_NAME },
 	{ LIS2DH12_ACCEL_DEV_NAME },
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 401f47b..614fa41 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -679,6 +679,18 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called ti-adc0832.
 
+config TI_ADC084S021
+	tristate "Texas Instruments ADC084S021"
+	depends on SPI
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  If you say yes here you get support for Texas Instruments ADC084S021
+	  chips.
+
+	  This driver can also be built as a module. If so, the module will be
+	  called ti-adc084s021.
+
 config TI_ADC12138
 	tristate "Texas Instruments ADC12130/ADC12132/ADC12138"
 	depends on SPI
@@ -691,6 +703,18 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called ti-adc12138.
 
+config TI_ADC108S102
+	tristate "Texas Instruments ADC108S102 and ADC128S102 driver"
+	depends on SPI
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  Say yes here to build support for Texas Instruments ADC108S102 and
+	  ADC128S102 ADC.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called ti-adc108s102.
+
 config TI_ADC128S052
 	tristate "Texas Instruments ADC128S052/ADC122S021/ADC124S021"
 	depends on SPI
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 9339bec..b546736a 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -62,7 +62,9 @@
 obj-$(CONFIG_STM32_ADC) += stm32-adc.o
 obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
 obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
+obj-$(CONFIG_TI_ADC084S021) += ti-adc084s021.o
 obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o
+obj-$(CONFIG_TI_ADC108S102) += ti-adc108s102.o
 obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
 obj-$(CONFIG_TI_ADC161S626) += ti-adc161s626.o
 obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 1817ebf..34e353c 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -272,11 +272,9 @@ static ssize_t ad7791_write_frequency(struct device *dev,
 	struct ad7791_state *st = iio_priv(indio_dev);
 	int i, ret;
 
-	for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++)
-		if (sysfs_streq(ad7791_sample_freq_avail[i], buf))
-			break;
-	if (i == ARRAY_SIZE(ad7791_sample_freq_avail))
-		return -EINVAL;
+	i = sysfs_match_string(ad7791_sample_freq_avail, buf);
+	if (i < 0)
+		return i;
 
 	ret = iio_device_claim_direct_mode(indio_dev);
 	if (ret)
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 62670cb..e0ea411 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -212,7 +212,10 @@ static int aspeed_adc_probe(struct platform_device *pdev)
 	}
 
 	/* Start all channels in normal mode. */
-	clk_prepare_enable(data->clk_scaler->clk);
+	ret = clk_prepare_enable(data->clk_scaler->clk);
+	if (ret)
+		goto clk_enable_error;
+
 	adc_engine_control_reg_val = GENMASK(31, 16) |
 		ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE;
 	writel(adc_engine_control_reg_val,
@@ -236,6 +239,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
 	writel(ASPEED_OPERATION_MODE_POWER_DOWN,
 		data->base + ASPEED_REG_ENGINE_CONTROL);
 	clk_disable_unprepare(data->clk_scaler->clk);
+clk_enable_error:
 	clk_hw_unregister_divider(data->clk_scaler);
 
 scaler_error:
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 21d38c8..7f4f9c4 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
 	iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
 }
 
-static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
 {
 	u32 channel_intr_status;
 	u32 intr_status;
@@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
 	return IRQ_NONE;
 }
 
-static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
 {
 	irqreturn_t retval = IRQ_NONE;
 	struct iproc_adc_priv *adc_priv;
@@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
 	adc_priv = iio_priv(indio_dev);
 
 	regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
-	dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
+	dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
 			intr_status);
 
 	intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
@@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
 	}
 
 	ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
-				iproc_adc_interrupt_thread,
 				iproc_adc_interrupt_handler,
+				iproc_adc_interrupt_thread,
 				IRQF_SHARED, "iproc-adc", indio_dev);
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq error %d\n", ret);
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index 62d37f8..6e419d5 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -52,6 +52,10 @@
 #define CPCAP_BIT_RAND0			BIT(1)	/* Set with CAL_MODE */
 #define CPCAP_BIT_ADEN			BIT(0)	/* Currently unused */
 
+#define CPCAP_REG_ADCC1_DEFAULTS	(CPCAP_BIT_ADEN_AUTO_CLR | \
+					 CPCAP_BIT_ADC_CLK_SEL0 |  \
+					 CPCAP_BIT_RAND1)
+
 /* Register CPCAP_REG_ADCC2 bits */
 #define CPCAP_BIT_CAL_FACTOR_ENABLE	BIT(15)	/* Currently unused */
 #define CPCAP_BIT_BATDETB_EN		BIT(14)	/* Currently unused */
@@ -62,7 +66,7 @@
 #define CPCAP_BIT_ADC_PS_FACTOR0	BIT(9)
 #define CPCAP_BIT_AD4_SELECT		BIT(8)	/* Currently unused */
 #define CPCAP_BIT_ADC_BUSY		BIT(7)	/* Currently unused */
-#define CPCAP_BIT_THERMBIAS_EN		BIT(6)	/* Currently unused */
+#define CPCAP_BIT_THERMBIAS_EN		BIT(6)	/* Bias for AD0_BATTDETB */
 #define CPCAP_BIT_ADTRIG_DIS		BIT(5)	/* Disable interrupt */
 #define CPCAP_BIT_LIADC			BIT(4)	/* Currently unused */
 #define CPCAP_BIT_TS_REFEN		BIT(3)	/* Currently unused */
@@ -70,6 +74,12 @@
 #define CPCAP_BIT_TS_M1			BIT(1)	/* Currently unused */
 #define CPCAP_BIT_TS_M0			BIT(0)	/* Currently unused */
 
+#define CPCAP_REG_ADCC2_DEFAULTS	(CPCAP_BIT_AD4_SELECT | \
+					 CPCAP_BIT_ADTRIG_DIS | \
+					 CPCAP_BIT_LIADC | \
+					 CPCAP_BIT_TS_M2 | \
+					 CPCAP_BIT_TS_M1)
+
 #define CPCAP_MAX_TEMP_LVL		27
 #define CPCAP_FOUR_POINT_TWO_ADC	801
 #define ST_ADC_CAL_CHRGI_HIGH_THRESHOLD	530
@@ -78,7 +88,7 @@
 #define ST_ADC_CAL_BATTI_LOW_THRESHOLD	494
 #define ST_ADC_CALIBRATE_DIFF_THRESHOLD	3
 
-#define CPCAP_ADC_MAX_RETRIES		5	/* Calibration and quirk */
+#define CPCAP_ADC_MAX_RETRIES		5	/* Calibration */
 
 /**
  * struct cpcap_adc_ato - timing settings for cpcap adc
@@ -124,10 +134,10 @@ struct cpcap_adc {
  */
 enum cpcap_adc_channel {
 	/* Bank0 channels */
-	CPCAP_ADC_AD0_BATTDETB,	/* Battery detection */
+	CPCAP_ADC_AD0,		/* Battery temperature */
 	CPCAP_ADC_BATTP,	/* Battery voltage */
 	CPCAP_ADC_VBUS,		/* USB VBUS voltage */
-	CPCAP_ADC_AD3,		/* Battery temperature when charging */
+	CPCAP_ADC_AD3,		/* Die temperature when charging */
 	CPCAP_ADC_BPLUS_AD4,	/* Another battery or system voltage */
 	CPCAP_ADC_CHG_ISENSE,	/* Calibrated charge current */
 	CPCAP_ADC_BATTI,	/* Calibrated system current */
@@ -217,7 +227,7 @@ struct cpcap_adc_request {
 /* Phasing table for channels. Note that channels 16 & 17 use BATTP and BATTI */
 static const struct cpcap_adc_phasing_tbl bank_phasing[] = {
 	/* Bank0 */
-	[CPCAP_ADC_AD0_BATTDETB] = {0, 0x80, 0x80,    0, 1023},
+	[CPCAP_ADC_AD0] =          {0, 0x80, 0x80,    0, 1023},
 	[CPCAP_ADC_BATTP] =        {0, 0x80, 0x80,    0, 1023},
 	[CPCAP_ADC_VBUS] =         {0, 0x80, 0x80,    0, 1023},
 	[CPCAP_ADC_AD3] =          {0, 0x80, 0x80,    0, 1023},
@@ -243,7 +253,7 @@ static const struct cpcap_adc_phasing_tbl bank_phasing[] = {
  */
 static struct cpcap_adc_conversion_tbl bank_conversion[] = {
 	/* Bank0 */
-	[CPCAP_ADC_AD0_BATTDETB] = {
+	[CPCAP_ADC_AD0] = {
 		IIO_CHAN_INFO_PROCESSED,    0,    0, 0,     1,    1,
 	},
 	[CPCAP_ADC_BATTP] = {
@@ -541,6 +551,15 @@ static void cpcap_adc_setup_bank(struct cpcap_adc *ddata,
 		return;
 
 	switch (req->channel) {
+	case CPCAP_ADC_AD0:
+		value2 |= CPCAP_BIT_THERMBIAS_EN;
+		error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+					   CPCAP_BIT_THERMBIAS_EN,
+					   value2);
+		if (error)
+			return;
+		usleep_range(800, 1000);
+		break;
 	case CPCAP_ADC_AD8 ... CPCAP_ADC_TSY2_AD15:
 		value1 |= CPCAP_BIT_AD_SEL1;
 		break;
@@ -583,7 +602,8 @@ static void cpcap_adc_setup_bank(struct cpcap_adc *ddata,
 	error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
 				   CPCAP_BIT_ATOX_PS_FACTOR |
 				   CPCAP_BIT_ADC_PS_FACTOR1 |
-				   CPCAP_BIT_ADC_PS_FACTOR0,
+				   CPCAP_BIT_ADC_PS_FACTOR0 |
+				   CPCAP_BIT_THERMBIAS_EN,
 				   value2);
 	if (error)
 		return;
@@ -614,27 +634,6 @@ static void cpcap_adc_setup_bank(struct cpcap_adc *ddata,
 	}
 }
 
-/*
- * Occasionally the ADC does not seem to start and there will be no
- * interrupt. Let's re-init interrupt to prevent the ADC from hanging
- * for the next request. It is unclear why this happens, but the next
- * request will usually work after doing this.
- */
-static void cpcap_adc_quirk_reset_lost_irq(struct cpcap_adc *ddata)
-{
-	int error;
-
-	dev_info(ddata->dev, "lost ADC irq, attempting to reinit\n");
-	disable_irq(ddata->irq);
-	error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
-				   CPCAP_BIT_ADTRIG_DIS,
-				   CPCAP_BIT_ADTRIG_DIS);
-	if (error)
-		dev_warn(ddata->dev, "%s reset failed: %i\n",
-			 __func__, error);
-	enable_irq(ddata->irq);
-}
-
 static int cpcap_adc_start_bank(struct cpcap_adc *ddata,
 				struct cpcap_adc_request *req)
 {
@@ -652,7 +651,6 @@ static int cpcap_adc_start_bank(struct cpcap_adc *ddata,
 			return 0;
 
 		if (error == 0) {
-			cpcap_adc_quirk_reset_lost_irq(ddata);
 			error = -ETIMEDOUT;
 			continue;
 		}
@@ -664,6 +662,21 @@ static int cpcap_adc_start_bank(struct cpcap_adc *ddata,
 	return error;
 }
 
+static int cpcap_adc_stop_bank(struct cpcap_adc *ddata)
+{
+	int error;
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC1,
+				   0xffff,
+				   CPCAP_REG_ADCC1_DEFAULTS);
+	if (error)
+		return error;
+
+	return regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+				  0xffff,
+				  CPCAP_REG_ADCC2_DEFAULTS);
+}
+
 static void cpcap_adc_phase(struct cpcap_adc_request *req)
 {
 	const struct cpcap_adc_conversion_tbl *conv_tbl = req->conv_tbl;
@@ -758,7 +771,7 @@ static void cpcap_adc_convert(struct cpcap_adc_request *req)
 		return;
 
 	/* Temperatures use a lookup table instead of conversion table */
-	if ((req->channel == CPCAP_ADC_AD0_BATTDETB) ||
+	if ((req->channel == CPCAP_ADC_AD0) ||
 	    (req->channel == CPCAP_ADC_AD3)) {
 		req->result =
 			cpcap_adc_table_to_millicelcius(req->result);
@@ -820,7 +833,7 @@ static int cpcap_adc_init_request(struct cpcap_adc_request *req,
 	req->conv_tbl = bank_conversion;
 
 	switch (channel) {
-	case CPCAP_ADC_AD0_BATTDETB ... CPCAP_ADC_USB_ID:
+	case CPCAP_ADC_AD0 ... CPCAP_ADC_USB_ID:
 		req->bank_index = channel;
 		break;
 	case CPCAP_ADC_AD8 ... CPCAP_ADC_TSY2_AD15:
@@ -839,6 +852,22 @@ static int cpcap_adc_init_request(struct cpcap_adc_request *req,
 	return 0;
 }
 
+static int cpcap_adc_read_st_die_temp(struct cpcap_adc *ddata,
+				      int addr, int *val)
+{
+	int error;
+
+	error = regmap_read(ddata->reg, addr, val);
+	if (error)
+		return error;
+
+	*val -= 282;
+	*val *= 114;
+	*val += 25000;
+
+	return 0;
+}
+
 static int cpcap_adc_read(struct iio_dev *indio_dev,
 			  struct iio_chan_spec const *chan,
 			  int *val, int *val2, long mask)
@@ -860,6 +889,9 @@ static int cpcap_adc_read(struct iio_dev *indio_dev,
 		error = regmap_read(ddata->reg, chan->address, val);
 		if (error)
 			goto err_unlock;
+		error = cpcap_adc_stop_bank(ddata);
+		if (error)
+			goto err_unlock;
 		mutex_unlock(&ddata->lock);
 		break;
 	case IIO_CHAN_INFO_PROCESSED:
@@ -867,7 +899,19 @@ static int cpcap_adc_read(struct iio_dev *indio_dev,
 		error = cpcap_adc_start_bank(ddata, &req);
 		if (error)
 			goto err_unlock;
-		error = cpcap_adc_read_bank_scaled(ddata, &req);
+		if ((ddata->vendor == CPCAP_VENDOR_ST) &&
+		    (chan->channel == CPCAP_ADC_AD3)) {
+			error = cpcap_adc_read_st_die_temp(ddata,
+							   chan->address,
+							   &req.result);
+			if (error)
+				goto err_unlock;
+		} else {
+			error = cpcap_adc_read_bank_scaled(ddata, &req);
+			if (error)
+				goto err_unlock;
+		}
+		error = cpcap_adc_stop_bank(ddata);
 		if (error)
 			goto err_unlock;
 		mutex_unlock(&ddata->lock);
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 678e8c7..adf7dc7 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -105,6 +105,26 @@ static int hi8435_writew(struct hi8435_priv *priv, u8 reg, u16 val)
 	return spi_write(priv->spi, priv->reg_buffer, 3);
 }
 
+static int hi8435_read_raw(struct iio_dev *idev,
+			   const struct iio_chan_spec *chan,
+			   int *val, int *val2, long mask)
+{
+	struct hi8435_priv *priv = iio_priv(idev);
+	u32 tmp;
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp);
+		if (ret < 0)
+			return ret;
+		*val = !!(tmp & BIT(chan->channel));
+		return IIO_VAL_INT;
+	default:
+		return -EINVAL;
+	}
+}
+
 static int hi8435_read_event_config(struct iio_dev *idev,
 				    const struct iio_chan_spec *chan,
 				    enum iio_event_type type,
@@ -121,10 +141,21 @@ static int hi8435_write_event_config(struct iio_dev *idev,
 				     enum iio_event_direction dir, int state)
 {
 	struct hi8435_priv *priv = iio_priv(idev);
+	int ret;
+	u32 tmp;
 
-	priv->event_scan_mask &= ~BIT(chan->channel);
-	if (state)
+	if (state) {
+		ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp);
+		if (ret < 0)
+			return ret;
+		if (tmp & BIT(chan->channel))
+			priv->event_prev_val |= BIT(chan->channel);
+		else
+			priv->event_prev_val &= ~BIT(chan->channel);
+
 		priv->event_scan_mask |= BIT(chan->channel);
+	} else
+		priv->event_scan_mask &= ~BIT(chan->channel);
 
 	return 0;
 }
@@ -325,6 +356,7 @@ static const struct iio_enum hi8435_sensing_mode = {
 
 static const struct iio_chan_spec_ext_info hi8435_ext_info[] = {
 	IIO_ENUM("sensing_mode", IIO_SEPARATE, &hi8435_sensing_mode),
+	IIO_ENUM_AVAILABLE("sensing_mode", &hi8435_sensing_mode),
 	{},
 };
 
@@ -333,6 +365,7 @@ static const struct iio_chan_spec_ext_info hi8435_ext_info[] = {
 	.type = IIO_VOLTAGE,				\
 	.indexed = 1,					\
 	.channel = num,					\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
 	.event_spec = hi8435_events,			\
 	.num_event_specs = ARRAY_SIZE(hi8435_events),	\
 	.ext_info = hi8435_ext_info,			\
@@ -376,11 +409,12 @@ static const struct iio_chan_spec hi8435_channels[] = {
 
 static const struct iio_info hi8435_info = {
 	.driver_module = THIS_MODULE,
-	.read_event_config = &hi8435_read_event_config,
+	.read_raw = hi8435_read_raw,
+	.read_event_config = hi8435_read_event_config,
 	.write_event_config = hi8435_write_event_config,
-	.read_event_value = &hi8435_read_event_value,
-	.write_event_value = &hi8435_write_event_value,
-	.debugfs_reg_access = &hi8435_debugfs_reg_access,
+	.read_event_value = hi8435_read_event_value,
+	.write_event_value = hi8435_write_event_value,
+	.debugfs_reg_access = hi8435_debugfs_reg_access,
 };
 
 static void hi8435_iio_push_event(struct iio_dev *idev, unsigned int val)
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index db98382..232c0b8 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -42,13 +42,14 @@
 #define INA2XX_CURRENT                  0x04	/* readonly */
 #define INA2XX_CALIBRATION              0x05
 
-#define INA226_ALERT_MASK		GENMASK(2, 1)
-#define INA266_CVRF			BIT(3)
+#define INA226_MASK_ENABLE		0x06
+#define INA226_CVRF			BIT(3)
 
 #define INA2XX_MAX_REGISTERS            8
 
 /* settings - depend on use case */
 #define INA219_CONFIG_DEFAULT           0x399F	/* PGA=8 */
+#define INA219_DEFAULT_IT		532
 #define INA226_CONFIG_DEFAULT           0x4327
 #define INA226_DEFAULT_AVG              4
 #define INA226_DEFAULT_IT		1110
@@ -56,19 +57,24 @@
 #define INA2XX_RSHUNT_DEFAULT           10000
 
 /*
- * bit mask for reading the averaging setting in the configuration register
+ * bit masks for reading the settings in the configuration register
  * FIXME: use regmap_fields.
  */
 #define INA2XX_MODE_MASK	GENMASK(3, 0)
 
+/* Averaging for VBus/VShunt/Power */
 #define INA226_AVG_MASK		GENMASK(11, 9)
 #define INA226_SHIFT_AVG(val)	((val) << 9)
 
 /* Integration time for VBus */
+#define INA219_ITB_MASK		GENMASK(10, 7)
+#define INA219_SHIFT_ITB(val)	((val) << 7)
 #define INA226_ITB_MASK		GENMASK(8, 6)
 #define INA226_SHIFT_ITB(val)	((val) << 6)
 
 /* Integration time for VShunt */
+#define INA219_ITS_MASK		GENMASK(6, 3)
+#define INA219_SHIFT_ITS(val)	((val) << 3)
 #define INA226_ITS_MASK		GENMASK(5, 3)
 #define INA226_SHIFT_ITS(val)	((val) << 3)
 
@@ -108,6 +114,7 @@ struct ina2xx_config {
 	int bus_voltage_shift;
 	int bus_voltage_lsb;	/* uV */
 	int power_lsb;		/* uW */
+	enum ina2xx_ids chip_id;
 };
 
 struct ina2xx_chip_info {
@@ -130,6 +137,7 @@ static const struct ina2xx_config ina2xx_config[] = {
 		.bus_voltage_shift = 3,
 		.bus_voltage_lsb = 4000,
 		.power_lsb = 20000,
+		.chip_id = ina219,
 	},
 	[ina226] = {
 		.config_default = INA226_CONFIG_DEFAULT,
@@ -138,6 +146,7 @@ static const struct ina2xx_config ina2xx_config[] = {
 		.bus_voltage_shift = 0,
 		.bus_voltage_lsb = 1250,
 		.power_lsb = 25000,
+		.chip_id = ina226,
 	},
 };
 
@@ -283,6 +292,66 @@ static int ina226_set_int_time_vshunt(struct ina2xx_chip_info *chip,
 	return 0;
 }
 
+/* Conversion times in uS. */
+static const int ina219_conv_time_tab_subsample[] = { 84, 148, 276, 532 };
+static const int ina219_conv_time_tab_average[] = { 532, 1060, 2130, 4260,
+						    8510, 17020, 34050, 68100};
+
+static int ina219_lookup_int_time(unsigned int *val_us, int *bits)
+{
+	if (*val_us > 68100 || *val_us < 84)
+		return -EINVAL;
+
+	if (*val_us <= 532) {
+		*bits = find_closest(*val_us, ina219_conv_time_tab_subsample,
+				    ARRAY_SIZE(ina219_conv_time_tab_subsample));
+		*val_us = ina219_conv_time_tab_subsample[*bits];
+	} else {
+		*bits = find_closest(*val_us, ina219_conv_time_tab_average,
+				    ARRAY_SIZE(ina219_conv_time_tab_average));
+		*val_us = ina219_conv_time_tab_average[*bits];
+		*bits |= 0x8;
+	}
+
+	return 0;
+}
+
+static int ina219_set_int_time_vbus(struct ina2xx_chip_info *chip,
+				    unsigned int val_us, unsigned int *config)
+{
+	int bits, ret;
+	unsigned int val_us_best = val_us;
+
+	ret = ina219_lookup_int_time(&val_us_best, &bits);
+	if (ret)
+		return ret;
+
+	chip->int_time_vbus = val_us_best;
+
+	*config &= ~INA219_ITB_MASK;
+	*config |= INA219_SHIFT_ITB(bits) & INA219_ITB_MASK;
+
+	return 0;
+}
+
+static int ina219_set_int_time_vshunt(struct ina2xx_chip_info *chip,
+				      unsigned int val_us, unsigned int *config)
+{
+	int bits, ret;
+	unsigned int val_us_best = val_us;
+
+	ret = ina219_lookup_int_time(&val_us_best, &bits);
+	if (ret)
+		return ret;
+
+	chip->int_time_vshunt = val_us_best;
+
+	*config &= ~INA219_ITS_MASK;
+	*config |= INA219_SHIFT_ITS(bits) & INA219_ITS_MASK;
+
+	return 0;
+}
+
 static int ina2xx_write_raw(struct iio_dev *indio_dev,
 			    struct iio_chan_spec const *chan,
 			    int val, int val2, long mask)
@@ -308,10 +377,21 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev,
 		break;
 
 	case IIO_CHAN_INFO_INT_TIME:
-		if (chan->address == INA2XX_SHUNT_VOLTAGE)
-			ret = ina226_set_int_time_vshunt(chip, val2, &tmp);
-		else
-			ret = ina226_set_int_time_vbus(chip, val2, &tmp);
+		if (chip->config->chip_id == ina226) {
+			if (chan->address == INA2XX_SHUNT_VOLTAGE)
+				ret = ina226_set_int_time_vshunt(chip, val2,
+								 &tmp);
+			else
+				ret = ina226_set_int_time_vbus(chip, val2,
+							       &tmp);
+		} else {
+			if (chan->address == INA2XX_SHUNT_VOLTAGE)
+				ret = ina219_set_int_time_vshunt(chip, val2,
+								 &tmp);
+			else
+				ret = ina219_set_int_time_vbus(chip, val2,
+							       &tmp);
+		}
 		break;
 
 	default:
@@ -412,13 +492,30 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
 	return len;
 }
 
-#define INA2XX_CHAN(_type, _index, _address) { \
+#define INA219_CHAN(_type, _index, _address) { \
 	.type = (_type), \
 	.address = (_address), \
 	.indexed = 1, \
 	.channel = (_index), \
-	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
-	| BIT(IIO_CHAN_INFO_SCALE), \
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+			      BIT(IIO_CHAN_INFO_SCALE), \
+	.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+	.scan_index = (_index), \
+	.scan_type = { \
+		.sign = 'u', \
+		.realbits = 16, \
+		.storagebits = 16, \
+		.endianness = IIO_CPU, \
+	} \
+}
+
+#define INA226_CHAN(_type, _index, _address) { \
+	.type = (_type), \
+	.address = (_address), \
+	.indexed = 1, \
+	.channel = (_index), \
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+			      BIT(IIO_CHAN_INFO_SCALE), \
 	.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
 				   BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
 	.scan_index = (_index), \
@@ -434,7 +531,7 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
  * Sampling Freq is a consequence of the integration times of
  * the Voltage channels.
  */
-#define INA2XX_CHAN_VOLTAGE(_index, _address) { \
+#define INA219_CHAN_VOLTAGE(_index, _address) { \
 	.type = IIO_VOLTAGE, \
 	.address = (_address), \
 	.indexed = 1, \
@@ -442,6 +539,7 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
 			      BIT(IIO_CHAN_INFO_SCALE) | \
 			      BIT(IIO_CHAN_INFO_INT_TIME), \
+	.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
 	.scan_index = (_index), \
 	.scan_type = { \
 		.sign = 'u', \
@@ -451,11 +549,39 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
 	} \
 }
 
-static const struct iio_chan_spec ina2xx_channels[] = {
-	INA2XX_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE),
-	INA2XX_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE),
-	INA2XX_CHAN(IIO_POWER, 2, INA2XX_POWER),
-	INA2XX_CHAN(IIO_CURRENT, 3, INA2XX_CURRENT),
+#define INA226_CHAN_VOLTAGE(_index, _address) { \
+	.type = IIO_VOLTAGE, \
+	.address = (_address), \
+	.indexed = 1, \
+	.channel = (_index), \
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+			      BIT(IIO_CHAN_INFO_SCALE) | \
+			      BIT(IIO_CHAN_INFO_INT_TIME), \
+	.info_mask_shared_by_dir = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+				   BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+	.scan_index = (_index), \
+	.scan_type = { \
+		.sign = 'u', \
+		.realbits = 16, \
+		.storagebits = 16, \
+		.endianness = IIO_LE, \
+	} \
+}
+
+
+static const struct iio_chan_spec ina226_channels[] = {
+	INA226_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE),
+	INA226_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE),
+	INA226_CHAN(IIO_POWER, 2, INA2XX_POWER),
+	INA226_CHAN(IIO_CURRENT, 3, INA2XX_CURRENT),
+	IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct iio_chan_spec ina219_channels[] = {
+	INA219_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE),
+	INA219_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE),
+	INA219_CHAN(IIO_POWER, 2, INA2XX_POWER),
+	INA219_CHAN(IIO_CURRENT, 3, INA2XX_CURRENT),
 	IIO_CHAN_SOFT_TIMESTAMP(4),
 };
 
@@ -481,12 +607,12 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
 	 */
 	if (!chip->allow_async_readout)
 		do {
-			ret = regmap_read(chip->regmap, INA226_ALERT_MASK,
+			ret = regmap_read(chip->regmap, INA226_MASK_ENABLE,
 					  &alert);
 			if (ret < 0)
 				return ret;
 
-			alert &= INA266_CVRF;
+			alert &= INA226_CVRF;
 		} while (!alert);
 
 	/*
@@ -590,7 +716,14 @@ static int ina2xx_debug_reg(struct iio_dev *indio_dev,
 }
 
 /* Possible integration times for vshunt and vbus */
-static IIO_CONST_ATTR_INT_TIME_AVAIL("0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244");
+static IIO_CONST_ATTR_NAMED(ina219_integration_time_available,
+			    integration_time_available,
+			    "0.000084 0.000148 0.000276 0.000532 0.001060 0.002130 0.004260 0.008510 0.017020 0.034050 0.068100");
+
+static IIO_CONST_ATTR_NAMED(ina226_integration_time_available,
+			    integration_time_available,
+			    "0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244");
+
 
 static IIO_DEVICE_ATTR(in_allow_async_readout, S_IRUGO | S_IWUSR,
 		       ina2xx_allow_async_readout_show,
@@ -600,20 +733,39 @@ static IIO_DEVICE_ATTR(in_shunt_resistor, S_IRUGO | S_IWUSR,
 		       ina2xx_shunt_resistor_show,
 		       ina2xx_shunt_resistor_store, 0);
 
-static struct attribute *ina2xx_attributes[] = {
+static struct attribute *ina219_attributes[] = {
 	&iio_dev_attr_in_allow_async_readout.dev_attr.attr,
-	&iio_const_attr_integration_time_available.dev_attr.attr,
+	&iio_const_attr_ina219_integration_time_available.dev_attr.attr,
 	&iio_dev_attr_in_shunt_resistor.dev_attr.attr,
 	NULL,
 };
 
-static const struct attribute_group ina2xx_attribute_group = {
-	.attrs = ina2xx_attributes,
+static struct attribute *ina226_attributes[] = {
+	&iio_dev_attr_in_allow_async_readout.dev_attr.attr,
+	&iio_const_attr_ina226_integration_time_available.dev_attr.attr,
+	&iio_dev_attr_in_shunt_resistor.dev_attr.attr,
+	NULL,
 };
 
-static const struct iio_info ina2xx_info = {
+static const struct attribute_group ina219_attribute_group = {
+	.attrs = ina219_attributes,
+};
+
+static const struct attribute_group ina226_attribute_group = {
+	.attrs = ina226_attributes,
+};
+
+static const struct iio_info ina219_info = {
 	.driver_module = THIS_MODULE,
-	.attrs = &ina2xx_attribute_group,
+	.attrs = &ina219_attribute_group,
+	.read_raw = ina2xx_read_raw,
+	.write_raw = ina2xx_write_raw,
+	.debugfs_reg_access = ina2xx_debug_reg,
+};
+
+static const struct iio_info ina226_info = {
+	.driver_module = THIS_MODULE,
+	.attrs = &ina226_attribute_group,
 	.read_raw = ina2xx_read_raw,
 	.write_raw = ina2xx_write_raw,
 	.debugfs_reg_access = ina2xx_debug_reg,
@@ -684,6 +836,10 @@ static int ina2xx_probe(struct i2c_client *client,
 		ina226_set_average(chip, INA226_DEFAULT_AVG, &val);
 		ina226_set_int_time_vbus(chip, INA226_DEFAULT_IT, &val);
 		ina226_set_int_time_vshunt(chip, INA226_DEFAULT_IT, &val);
+	} else {
+		chip->avg = 1;
+		ina219_set_int_time_vbus(chip, INA219_DEFAULT_IT, &val);
+		ina219_set_int_time_vshunt(chip, INA219_DEFAULT_IT, &val);
 	}
 
 	ret = ina2xx_init(chip, val);
@@ -695,10 +851,16 @@ static int ina2xx_probe(struct i2c_client *client,
 	indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
 	indio_dev->dev.parent = &client->dev;
 	indio_dev->dev.of_node = client->dev.of_node;
-	indio_dev->channels = ina2xx_channels;
-	indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels);
+	if (id->driver_data == ina226) {
+		indio_dev->channels = ina226_channels;
+		indio_dev->num_channels = ARRAY_SIZE(ina226_channels);
+		indio_dev->info = &ina226_info;
+	} else {
+		indio_dev->channels = ina219_channels;
+		indio_dev->num_channels = ARRAY_SIZE(ina219_channels);
+		indio_dev->info = &ina219_info;
+	}
 	indio_dev->name = id->name;
-	indio_dev->info = &ina2xx_info;
 	indio_dev->setup_ops = &ina2xx_setup_ops;
 
 	buffer = devm_iio_kfifo_allocate(&indio_dev->dev);
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
index 0de709b..6a5b9a9 100644
--- a/drivers/iio/adc/lpc32xx_adc.c
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -76,10 +76,14 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
 			    long mask)
 {
 	struct lpc32xx_adc_state *st = iio_priv(indio_dev);
-
+	int ret;
 	if (mask == IIO_CHAN_INFO_RAW) {
 		mutex_lock(&indio_dev->mlock);
-		clk_prepare_enable(st->clk);
+		ret = clk_prepare_enable(st->clk);
+		if (ret) {
+			mutex_unlock(&indio_dev->mlock);
+			return ret;
+		}
 		/* Measurement setup */
 		__raw_writel(LPC32XXAD_INTERNAL | (chan->address) |
 			     LPC32XXAD_REFp | LPC32XXAD_REFm,
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index ec82106..b0526e4 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -438,10 +438,10 @@ static ssize_t max9611_shunt_resistor_show(struct device *dev,
 	struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev));
 	unsigned int i, r;
 
-	i = max9611->shunt_resistor_uohm / 1000;
-	r = max9611->shunt_resistor_uohm % 1000;
+	i = max9611->shunt_resistor_uohm / 1000000;
+	r = max9611->shunt_resistor_uohm % 1000000;
 
-	return sprintf(buf, "%u.%03u\n", i, r);
+	return sprintf(buf, "%u.%06u\n", i, r);
 }
 
 static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444,
@@ -536,8 +536,8 @@ static int max9611_probe(struct i2c_client *client,
 	int ret;
 
 	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611));
-	if (IS_ERR(indio_dev))
-		return PTR_ERR(indio_dev);
+	if (!indio_dev)
+		return -ENOMEM;
 
 	i2c_set_clientdata(client, indio_dev);
 
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index dd4190b..83da50e 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -220,6 +220,7 @@ enum meson_sar_adc_chan7_mux_sel {
 };
 
 struct meson_sar_adc_data {
+	bool					has_bl30_integration;
 	unsigned int				resolution;
 	const char				*name;
 };
@@ -437,19 +438,24 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev)
 
 	mutex_lock(&indio_dev->mlock);
 
-	/* prevent BL30 from using the SAR ADC while we are using it */
-	regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY,
-			   MESON_SAR_ADC_DELAY_KERNEL_BUSY,
-			   MESON_SAR_ADC_DELAY_KERNEL_BUSY);
+	if (priv->data->has_bl30_integration) {
+		/* prevent BL30 from using the SAR ADC while we are using it */
+		regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY,
+				MESON_SAR_ADC_DELAY_KERNEL_BUSY,
+				MESON_SAR_ADC_DELAY_KERNEL_BUSY);
 
-	/* wait until BL30 releases it's lock (so we can use the SAR ADC) */
-	do {
-		udelay(1);
-		regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val);
-	} while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--);
+		/*
+		 * wait until BL30 releases it's lock (so we can use the SAR
+		 * ADC)
+		 */
+		do {
+			udelay(1);
+			regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val);
+		} while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--);
 
-	if (timeout < 0)
-		return -ETIMEDOUT;
+		if (timeout < 0)
+			return -ETIMEDOUT;
+	}
 
 	return 0;
 }
@@ -458,9 +464,10 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
 {
 	struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
 
-	/* allow BL30 to use the SAR ADC again */
-	regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY,
-			   MESON_SAR_ADC_DELAY_KERNEL_BUSY, 0);
+	if (priv->data->has_bl30_integration)
+		/* allow BL30 to use the SAR ADC again */
+		regmap_update_bits(priv->regmap, MESON_SAR_ADC_DELAY,
+				MESON_SAR_ADC_DELAY_KERNEL_BUSY, 0);
 
 	mutex_unlock(&indio_dev->mlock);
 }
@@ -468,13 +475,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
 static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev)
 {
 	struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
-	int count;
+	unsigned int count, tmp;
 
 	for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) {
 		if (!meson_sar_adc_get_fifo_count(indio_dev))
 			break;
 
-		regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0);
+		regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp);
 	}
 }
 
@@ -614,14 +621,16 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
 	 */
 	meson_sar_adc_set_chan7_mux(indio_dev, CHAN7_MUX_CH7_INPUT);
 
-	/*
-	 * leave sampling delay and the input clocks as configured by BL30 to
-	 * make sure BL30 gets the values it expects when reading the
-	 * temperature sensor.
-	 */
-	regmap_read(priv->regmap, MESON_SAR_ADC_REG3, &regval);
-	if (regval & MESON_SAR_ADC_REG3_BL30_INITIALIZED)
-		return 0;
+	if (priv->data->has_bl30_integration) {
+		/*
+		 * leave sampling delay and the input clocks as configured by
+		 * BL30 to make sure BL30 gets the values it expects when
+		 * reading the temperature sensor.
+		 */
+		regmap_read(priv->regmap, MESON_SAR_ADC_REG3, &regval);
+		if (regval & MESON_SAR_ADC_REG3_BL30_INITIALIZED)
+			return 0;
+	}
 
 	meson_sar_adc_stop_sample_engine(indio_dev);
 
@@ -834,23 +843,46 @@ static const struct iio_info meson_sar_adc_iio_info = {
 	.driver_module = THIS_MODULE,
 };
 
-struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
+static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
+	.has_bl30_integration = false,
+	.resolution = 10,
+	.name = "meson-meson8-saradc",
+};
+
+static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
+	.has_bl30_integration = false,
+	.resolution = 10,
+	.name = "meson-meson8b-saradc",
+};
+
+static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
+	.has_bl30_integration = true,
 	.resolution = 10,
 	.name = "meson-gxbb-saradc",
 };
 
-struct meson_sar_adc_data meson_sar_adc_gxl_data = {
+static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
+	.has_bl30_integration = true,
 	.resolution = 12,
 	.name = "meson-gxl-saradc",
 };
 
-struct meson_sar_adc_data meson_sar_adc_gxm_data = {
+static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
+	.has_bl30_integration = true,
 	.resolution = 12,
 	.name = "meson-gxm-saradc",
 };
 
 static const struct of_device_id meson_sar_adc_of_match[] = {
 	{
+		.compatible = "amlogic,meson8-saradc",
+		.data = &meson_sar_adc_meson8_data,
+	},
+	{
+		.compatible = "amlogic,meson8b-saradc",
+		.data = &meson_sar_adc_meson8b_data,
+	},
+	{
 		.compatible = "amlogic,meson-gxbb-saradc",
 		.data = &meson_sar_adc_gxbb_data,
 	}, {
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index b0c7d8e..d32b346 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -48,7 +48,7 @@
 
 #define VREF_MV_BASE 1850
 
-const char *mx23_lradc_adc_irq_names[] = {
+static const char *mx23_lradc_adc_irq_names[] = {
 	"mxs-lradc-channel0",
 	"mxs-lradc-channel1",
 	"mxs-lradc-channel2",
@@ -57,7 +57,7 @@ const char *mx23_lradc_adc_irq_names[] = {
 	"mxs-lradc-channel5",
 };
 
-const char *mx28_lradc_adc_irq_names[] = {
+static const char *mx28_lradc_adc_irq_names[] = {
 	"mxs-lradc-thresh0",
 	"mxs-lradc-thresh1",
 	"mxs-lradc-channel0",
@@ -344,20 +344,20 @@ static ssize_t mxs_lradc_adc_show_scale_avail(struct device *dev,
 	IIO_DEVICE_ATTR(in_voltage##ch##_scale_available, 0444,\
 			mxs_lradc_adc_show_scale_avail, NULL, ch)
 
-SHOW_SCALE_AVAILABLE_ATTR(0);
-SHOW_SCALE_AVAILABLE_ATTR(1);
-SHOW_SCALE_AVAILABLE_ATTR(2);
-SHOW_SCALE_AVAILABLE_ATTR(3);
-SHOW_SCALE_AVAILABLE_ATTR(4);
-SHOW_SCALE_AVAILABLE_ATTR(5);
-SHOW_SCALE_AVAILABLE_ATTR(6);
-SHOW_SCALE_AVAILABLE_ATTR(7);
-SHOW_SCALE_AVAILABLE_ATTR(10);
-SHOW_SCALE_AVAILABLE_ATTR(11);
-SHOW_SCALE_AVAILABLE_ATTR(12);
-SHOW_SCALE_AVAILABLE_ATTR(13);
-SHOW_SCALE_AVAILABLE_ATTR(14);
-SHOW_SCALE_AVAILABLE_ATTR(15);
+static SHOW_SCALE_AVAILABLE_ATTR(0);
+static SHOW_SCALE_AVAILABLE_ATTR(1);
+static SHOW_SCALE_AVAILABLE_ATTR(2);
+static SHOW_SCALE_AVAILABLE_ATTR(3);
+static SHOW_SCALE_AVAILABLE_ATTR(4);
+static SHOW_SCALE_AVAILABLE_ATTR(5);
+static SHOW_SCALE_AVAILABLE_ATTR(6);
+static SHOW_SCALE_AVAILABLE_ATTR(7);
+static SHOW_SCALE_AVAILABLE_ATTR(10);
+static SHOW_SCALE_AVAILABLE_ATTR(11);
+static SHOW_SCALE_AVAILABLE_ATTR(12);
+static SHOW_SCALE_AVAILABLE_ATTR(13);
+static SHOW_SCALE_AVAILABLE_ATTR(14);
+static SHOW_SCALE_AVAILABLE_ATTR(15);
 
 static struct attribute *mxs_lradc_adc_attributes[] = {
 	&iio_dev_attr_in_voltage0_scale_available.dev_attr.attr,
@@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
 	adc->dev = dev;
 
 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!iores)
+		return -EINVAL;
+
 	adc->base = devm_ioremap(dev, iores->start, resource_size(iores));
-	if (IS_ERR(adc->base))
-		return PTR_ERR(adc->base);
+	if (!adc->base)
+		return -ENOMEM;
 
 	init_completion(&adc->completion);
 	spin_lock_init(&adc->lock);
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index 018ed36..27a3181 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -73,7 +73,7 @@ enum rcar_gyroadc_model {
 struct rcar_gyroadc {
 	struct device			*dev;
 	void __iomem			*regs;
-	struct clk			*iclk;
+	struct clk			*clk;
 	struct regulator		*vref[8];
 	unsigned int			num_channels;
 	enum rcar_gyroadc_model		model;
@@ -83,7 +83,7 @@ struct rcar_gyroadc {
 
 static void rcar_gyroadc_hw_init(struct rcar_gyroadc *priv)
 {
-	const unsigned long clk_mhz = clk_get_rate(priv->iclk) / 1000000;
+	const unsigned long clk_mhz = clk_get_rate(priv->clk) / 1000000;
 	const unsigned long clk_mul =
 		(priv->mode == RCAR_GYROADC_MODE_SELECT_1_MB88101A) ? 10 : 5;
 	unsigned long clk_len = clk_mhz * clk_mul;
@@ -510,9 +510,9 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->regs))
 		return PTR_ERR(priv->regs);
 
-	priv->iclk = devm_clk_get(dev, "if");
-	if (IS_ERR(priv->iclk)) {
-		ret = PTR_ERR(priv->iclk);
+	priv->clk = devm_clk_get(dev, "fck");
+	if (IS_ERR(priv->clk)) {
+		ret = PTR_ERR(priv->clk);
 		if (ret != -EPROBE_DEFER)
 			dev_err(dev, "Failed to get IF clock (ret=%i)\n", ret);
 		return ret;
@@ -536,7 +536,7 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
 	indio_dev->info = &rcar_gyroadc_iio_info;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
-	ret = clk_prepare_enable(priv->iclk);
+	ret = clk_prepare_enable(priv->clk);
 	if (ret) {
 		dev_err(dev, "Could not prepare or enable the IF clock.\n");
 		goto err_clk_if_enable;
@@ -565,7 +565,7 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
 	pm_runtime_put_sync(dev);
 	pm_runtime_disable(dev);
 	pm_runtime_set_suspended(dev);
-	clk_disable_unprepare(priv->iclk);
+	clk_disable_unprepare(priv->clk);
 err_clk_if_enable:
 	rcar_gyroadc_deinit_supplies(indio_dev);
 
@@ -584,7 +584,7 @@ static int rcar_gyroadc_remove(struct platform_device *pdev)
 	pm_runtime_put_sync(dev);
 	pm_runtime_disable(dev);
 	pm_runtime_set_suspended(dev);
-	clk_disable_unprepare(priv->iclk);
+	clk_disable_unprepare(priv->clk);
 	rcar_gyroadc_deinit_supplies(indio_dev);
 
 	return 0;
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 22b7c93..e09233b 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -49,19 +49,66 @@
 /* STM32 F4 maximum analog clock rate (from datasheet) */
 #define STM32F4_ADC_MAX_CLK_RATE	36000000
 
+/* STM32H7 - common registers for all ADC instances */
+#define STM32H7_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32H7_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x08)
+
+/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_EOC_SLV			BIT(18)
+#define STM32H7_EOC_MST			BIT(2)
+
+/* STM32H7_ADC_CCR - bit fields */
+#define STM32H7_PRESC_SHIFT		18
+#define STM32H7_PRESC_MASK		GENMASK(21, 18)
+#define STM32H7_CKMODE_SHIFT		16
+#define STM32H7_CKMODE_MASK		GENMASK(17, 16)
+
+/* STM32 H7 maximum analog clock rate (from datasheet) */
+#define STM32H7_ADC_MAX_CLK_RATE	72000000
+
+/**
+ * stm32_adc_common_regs - stm32 common registers, compatible dependent data
+ * @csr:	common status register offset
+ * @eoc1:	adc1 end of conversion flag in @csr
+ * @eoc2:	adc2 end of conversion flag in @csr
+ * @eoc3:	adc3 end of conversion flag in @csr
+ */
+struct stm32_adc_common_regs {
+	u32 csr;
+	u32 eoc1_msk;
+	u32 eoc2_msk;
+	u32 eoc3_msk;
+};
+
+struct stm32_adc_priv;
+
+/**
+ * stm32_adc_priv_cfg - stm32 core compatible configuration data
+ * @regs:	common registers for all instances
+ * @clk_sel:	clock selection routine
+ */
+struct stm32_adc_priv_cfg {
+	const struct stm32_adc_common_regs *regs;
+	int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *);
+};
+
 /**
  * struct stm32_adc_priv - stm32 ADC core private data
  * @irq:		irq for ADC block
  * @domain:		irq domain reference
  * @aclk:		clock reference for the analog circuitry
+ * @bclk:		bus clock common for all ADCs, depends on part used
  * @vref:		regulator reference
+ * @cfg:		compatible configuration data
  * @common:		common data for all ADC instances
  */
 struct stm32_adc_priv {
 	int				irq;
 	struct irq_domain		*domain;
 	struct clk			*aclk;
+	struct clk			*bclk;
 	struct regulator		*vref;
+	const struct stm32_adc_priv_cfg	*cfg;
 	struct stm32_adc_common		common;
 };
 
@@ -85,14 +132,23 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
 	u32 val;
 	int i;
 
+	/* stm32f4 has one clk input for analog (mandatory), enforce it here */
+	if (!priv->aclk) {
+		dev_err(&pdev->dev, "No 'adc' clock found\n");
+		return -ENOENT;
+	}
+
 	rate = clk_get_rate(priv->aclk);
 	for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
 		if ((rate / stm32f4_pclk_div[i]) <= STM32F4_ADC_MAX_CLK_RATE)
 			break;
 	}
-	if (i >= ARRAY_SIZE(stm32f4_pclk_div))
+	if (i >= ARRAY_SIZE(stm32f4_pclk_div)) {
+		dev_err(&pdev->dev, "adc clk selection failed\n");
 		return -EINVAL;
+	}
 
+	priv->common.rate = rate;
 	val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
 	val &= ~STM32F4_ADC_ADCPRE_MASK;
 	val |= i << STM32F4_ADC_ADCPRE_SHIFT;
@@ -104,6 +160,126 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
 	return 0;
 }
 
+/**
+ * struct stm32h7_adc_ck_spec - specification for stm32h7 adc clock
+ * @ckmode: ADC clock mode, Async or sync with prescaler.
+ * @presc: prescaler bitfield for async clock mode
+ * @div: prescaler division ratio
+ */
+struct stm32h7_adc_ck_spec {
+	u32 ckmode;
+	u32 presc;
+	int div;
+};
+
+const struct stm32h7_adc_ck_spec stm32h7_adc_ckmodes_spec[] = {
+	/* 00: CK_ADC[1..3]: Asynchronous clock modes */
+	{ 0, 0, 1 },
+	{ 0, 1, 2 },
+	{ 0, 2, 4 },
+	{ 0, 3, 6 },
+	{ 0, 4, 8 },
+	{ 0, 5, 10 },
+	{ 0, 6, 12 },
+	{ 0, 7, 16 },
+	{ 0, 8, 32 },
+	{ 0, 9, 64 },
+	{ 0, 10, 128 },
+	{ 0, 11, 256 },
+	/* HCLK used: Synchronous clock modes (1, 2 or 4 prescaler) */
+	{ 1, 0, 1 },
+	{ 2, 0, 2 },
+	{ 3, 0, 4 },
+};
+
+static int stm32h7_adc_clk_sel(struct platform_device *pdev,
+			       struct stm32_adc_priv *priv)
+{
+	u32 ckmode, presc, val;
+	unsigned long rate;
+	int i, div;
+
+	/* stm32h7 bus clock is common for all ADC instances (mandatory) */
+	if (!priv->bclk) {
+		dev_err(&pdev->dev, "No 'bus' clock found\n");
+		return -ENOENT;
+	}
+
+	/*
+	 * stm32h7 can use either 'bus' or 'adc' clock for analog circuitry.
+	 * So, choice is to have bus clock mandatory and adc clock optional.
+	 * If optional 'adc' clock has been found, then try to use it first.
+	 */
+	if (priv->aclk) {
+		/*
+		 * Asynchronous clock modes (e.g. ckmode == 0)
+		 * From spec: PLL output musn't exceed max rate
+		 */
+		rate = clk_get_rate(priv->aclk);
+
+		for (i = 0; i < ARRAY_SIZE(stm32h7_adc_ckmodes_spec); i++) {
+			ckmode = stm32h7_adc_ckmodes_spec[i].ckmode;
+			presc = stm32h7_adc_ckmodes_spec[i].presc;
+			div = stm32h7_adc_ckmodes_spec[i].div;
+
+			if (ckmode)
+				continue;
+
+			if ((rate / div) <= STM32H7_ADC_MAX_CLK_RATE)
+				goto out;
+		}
+	}
+
+	/* Synchronous clock modes (e.g. ckmode is 1, 2 or 3) */
+	rate = clk_get_rate(priv->bclk);
+
+	for (i = 0; i < ARRAY_SIZE(stm32h7_adc_ckmodes_spec); i++) {
+		ckmode = stm32h7_adc_ckmodes_spec[i].ckmode;
+		presc = stm32h7_adc_ckmodes_spec[i].presc;
+		div = stm32h7_adc_ckmodes_spec[i].div;
+
+		if (!ckmode)
+			continue;
+
+		if ((rate / div) <= STM32H7_ADC_MAX_CLK_RATE)
+			goto out;
+	}
+
+	dev_err(&pdev->dev, "adc clk selection failed\n");
+	return -EINVAL;
+
+out:
+	/* rate used later by each ADC instance to control BOOST mode */
+	priv->common.rate = rate;
+
+	/* Set common clock mode and prescaler */
+	val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR);
+	val &= ~(STM32H7_CKMODE_MASK | STM32H7_PRESC_MASK);
+	val |= ckmode << STM32H7_CKMODE_SHIFT;
+	val |= presc << STM32H7_PRESC_SHIFT;
+	writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR);
+
+	dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n",
+		ckmode ? "bus" : "adc", div, rate / (div * 1000));
+
+	return 0;
+}
+
+/* STM32F4 common registers definitions */
+static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
+	.csr = STM32F4_ADC_CSR,
+	.eoc1_msk = STM32F4_EOC1,
+	.eoc2_msk = STM32F4_EOC2,
+	.eoc3_msk = STM32F4_EOC3,
+};
+
+/* STM32H7 common registers definitions */
+static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
+	.csr = STM32H7_ADC_CSR,
+	.eoc1_msk = STM32H7_EOC_MST,
+	.eoc2_msk = STM32H7_EOC_SLV,
+};
+
 /* ADC common interrupt for all instances */
 static void stm32_adc_irq_handler(struct irq_desc *desc)
 {
@@ -112,15 +288,15 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
 	u32 status;
 
 	chained_irq_enter(chip, desc);
-	status = readl_relaxed(priv->common.base + STM32F4_ADC_CSR);
+	status = readl_relaxed(priv->common.base + priv->cfg->regs->csr);
 
-	if (status & STM32F4_EOC1)
+	if (status & priv->cfg->regs->eoc1_msk)
 		generic_handle_irq(irq_find_mapping(priv->domain, 0));
 
-	if (status & STM32F4_EOC2)
+	if (status & priv->cfg->regs->eoc2_msk)
 		generic_handle_irq(irq_find_mapping(priv->domain, 1));
 
-	if (status & STM32F4_EOC3)
+	if (status & priv->cfg->regs->eoc3_msk)
 		generic_handle_irq(irq_find_mapping(priv->domain, 2));
 
 	chained_irq_exit(chip, desc);
@@ -186,6 +362,7 @@ static void stm32_adc_irq_remove(struct platform_device *pdev,
 static int stm32_adc_probe(struct platform_device *pdev)
 {
 	struct stm32_adc_priv *priv;
+	struct device *dev = &pdev->dev;
 	struct device_node *np = pdev->dev.of_node;
 	struct resource *res;
 	int ret;
@@ -197,6 +374,9 @@ static int stm32_adc_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
+	priv->cfg = (const struct stm32_adc_priv_cfg *)
+		of_match_device(dev->driver->of_match_table, dev)->data;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	priv->common.base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(priv->common.base))
@@ -227,25 +407,48 @@ static int stm32_adc_probe(struct platform_device *pdev)
 	priv->aclk = devm_clk_get(&pdev->dev, "adc");
 	if (IS_ERR(priv->aclk)) {
 		ret = PTR_ERR(priv->aclk);
-		dev_err(&pdev->dev, "Can't get 'adc' clock\n");
-		goto err_regulator_disable;
+		if (ret == -ENOENT) {
+			priv->aclk = NULL;
+		} else {
+			dev_err(&pdev->dev, "Can't get 'adc' clock\n");
+			goto err_regulator_disable;
+		}
 	}
 
-	ret = clk_prepare_enable(priv->aclk);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "adc clk enable failed\n");
-		goto err_regulator_disable;
+	if (priv->aclk) {
+		ret = clk_prepare_enable(priv->aclk);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "adc clk enable failed\n");
+			goto err_regulator_disable;
+		}
 	}
 
-	ret = stm32f4_adc_clk_sel(pdev, priv);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "adc clk selection failed\n");
-		goto err_clk_disable;
+	priv->bclk = devm_clk_get(&pdev->dev, "bus");
+	if (IS_ERR(priv->bclk)) {
+		ret = PTR_ERR(priv->bclk);
+		if (ret == -ENOENT) {
+			priv->bclk = NULL;
+		} else {
+			dev_err(&pdev->dev, "Can't get 'bus' clock\n");
+			goto err_aclk_disable;
+		}
 	}
 
+	if (priv->bclk) {
+		ret = clk_prepare_enable(priv->bclk);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "adc clk enable failed\n");
+			goto err_aclk_disable;
+		}
+	}
+
+	ret = priv->cfg->clk_sel(pdev, priv);
+	if (ret < 0)
+		goto err_bclk_disable;
+
 	ret = stm32_adc_irq_probe(pdev, priv);
 	if (ret < 0)
-		goto err_clk_disable;
+		goto err_bclk_disable;
 
 	platform_set_drvdata(pdev, &priv->common);
 
@@ -260,8 +463,13 @@ static int stm32_adc_probe(struct platform_device *pdev)
 err_irq_remove:
 	stm32_adc_irq_remove(pdev, priv);
 
-err_clk_disable:
-	clk_disable_unprepare(priv->aclk);
+err_bclk_disable:
+	if (priv->bclk)
+		clk_disable_unprepare(priv->bclk);
+
+err_aclk_disable:
+	if (priv->aclk)
+		clk_disable_unprepare(priv->aclk);
 
 err_regulator_disable:
 	regulator_disable(priv->vref);
@@ -276,15 +484,34 @@ static int stm32_adc_remove(struct platform_device *pdev)
 
 	of_platform_depopulate(&pdev->dev);
 	stm32_adc_irq_remove(pdev, priv);
-	clk_disable_unprepare(priv->aclk);
+	if (priv->bclk)
+		clk_disable_unprepare(priv->bclk);
+	if (priv->aclk)
+		clk_disable_unprepare(priv->aclk);
 	regulator_disable(priv->vref);
 
 	return 0;
 }
 
+static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
+	.regs = &stm32f4_adc_common_regs,
+	.clk_sel = stm32f4_adc_clk_sel,
+};
+
+static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
+	.regs = &stm32h7_adc_common_regs,
+	.clk_sel = stm32h7_adc_clk_sel,
+};
+
 static const struct of_device_id stm32_adc_of_match[] = {
-	{ .compatible = "st,stm32f4-adc-core" },
-	{},
+	{
+		.compatible = "st,stm32f4-adc-core",
+		.data = (void *)&stm32f4_adc_priv_cfg
+	}, {
+		.compatible = "st,stm32h7-adc-core",
+		.data = (void *)&stm32h7_adc_priv_cfg
+	}, {
+	},
 };
 MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
 
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 2ec7abb..250ee95 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -43,11 +43,13 @@
  * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
  * @base:		control registers base cpu addr
  * @phys_base:		control registers base physical addr
+ * @rate:		clock rate used for analog circuitry
  * @vref_mv:		vref voltage (mv)
  */
 struct stm32_adc_common {
 	void __iomem			*base;
 	phys_addr_t			phys_base;
+	unsigned long			rate;
 	int				vref_mv;
 };
 
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index c28e7ff..5bfcc1f 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -31,9 +31,11 @@
 #include <linux/iio/triggered_buffer.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 
 #include "stm32-adc-core.h"
 
@@ -76,6 +78,78 @@
 #define STM32F4_DMA			BIT(8)
 #define STM32F4_ADON			BIT(0)
 
+/* STM32H7 - Registers for each ADC instance */
+#define STM32H7_ADC_ISR			0x00
+#define STM32H7_ADC_IER			0x04
+#define STM32H7_ADC_CR			0x08
+#define STM32H7_ADC_CFGR		0x0C
+#define STM32H7_ADC_PCSEL		0x1C
+#define STM32H7_ADC_SQR1		0x30
+#define STM32H7_ADC_SQR2		0x34
+#define STM32H7_ADC_SQR3		0x38
+#define STM32H7_ADC_SQR4		0x3C
+#define STM32H7_ADC_DR			0x40
+#define STM32H7_ADC_CALFACT		0xC4
+#define STM32H7_ADC_CALFACT2		0xC8
+
+/* STM32H7_ADC_ISR - bit fields */
+#define STM32H7_EOC			BIT(2)
+#define STM32H7_ADRDY			BIT(0)
+
+/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_EOCIE			STM32H7_EOC
+
+/* STM32H7_ADC_CR - bit fields */
+#define STM32H7_ADCAL			BIT(31)
+#define STM32H7_ADCALDIF		BIT(30)
+#define STM32H7_DEEPPWD			BIT(29)
+#define STM32H7_ADVREGEN		BIT(28)
+#define STM32H7_LINCALRDYW6		BIT(27)
+#define STM32H7_LINCALRDYW5		BIT(26)
+#define STM32H7_LINCALRDYW4		BIT(25)
+#define STM32H7_LINCALRDYW3		BIT(24)
+#define STM32H7_LINCALRDYW2		BIT(23)
+#define STM32H7_LINCALRDYW1		BIT(22)
+#define STM32H7_ADCALLIN		BIT(16)
+#define STM32H7_BOOST			BIT(8)
+#define STM32H7_ADSTP			BIT(4)
+#define STM32H7_ADSTART			BIT(2)
+#define STM32H7_ADDIS			BIT(1)
+#define STM32H7_ADEN			BIT(0)
+
+/* STM32H7_ADC_CFGR bit fields */
+#define STM32H7_EXTEN_SHIFT		10
+#define STM32H7_EXTEN_MASK		GENMASK(11, 10)
+#define STM32H7_EXTSEL_SHIFT		5
+#define STM32H7_EXTSEL_MASK		GENMASK(9, 5)
+#define STM32H7_RES_SHIFT		2
+#define STM32H7_RES_MASK		GENMASK(4, 2)
+#define STM32H7_DMNGT_SHIFT		0
+#define STM32H7_DMNGT_MASK		GENMASK(1, 0)
+
+enum stm32h7_adc_dmngt {
+	STM32H7_DMNGT_DR_ONLY,		/* Regular data in DR only */
+	STM32H7_DMNGT_DMA_ONESHOT,	/* DMA one shot mode */
+	STM32H7_DMNGT_DFSDM,		/* DFSDM mode */
+	STM32H7_DMNGT_DMA_CIRC,		/* DMA circular mode */
+};
+
+/* STM32H7_ADC_CALFACT - bit fields */
+#define STM32H7_CALFACT_D_SHIFT		16
+#define STM32H7_CALFACT_D_MASK		GENMASK(26, 16)
+#define STM32H7_CALFACT_S_SHIFT		0
+#define STM32H7_CALFACT_S_MASK		GENMASK(10, 0)
+
+/* STM32H7_ADC_CALFACT2 - bit fields */
+#define STM32H7_LINCALFACT_SHIFT	0
+#define STM32H7_LINCALFACT_MASK		GENMASK(29, 0)
+
+/* Number of linear calibration shadow registers / LINCALRDYW control bits */
+#define STM32H7_LINCALFACT_NUM		6
+
+/* BOOST bit must be set on STM32H7 when ADC clock is above 20MHz */
+#define STM32H7_BOOST_CLKRATE		20000000UL
+
 #define STM32_ADC_MAX_SQ		16	/* SQ1..SQ16 */
 #define STM32_ADC_TIMEOUT_US		100000
 #define STM32_ADC_TIMEOUT	(msecs_to_jiffies(STM32_ADC_TIMEOUT_US / 1000))
@@ -121,6 +195,18 @@ struct stm32_adc_trig_info {
 };
 
 /**
+ * struct stm32_adc_calib - optional adc calibration data
+ * @calfact_s: Calibration offset for single ended channels
+ * @calfact_d: Calibration offset in differential
+ * @lincalfact: Linearity calibration factor
+ */
+struct stm32_adc_calib {
+	u32			calfact_s;
+	u32			calfact_d;
+	u32			lincalfact[STM32H7_LINCALFACT_NUM];
+};
+
+/**
  * stm32_adc_regs - stm32 ADC misc registers & bitfield desc
  * @reg:		register offset
  * @mask:		bitfield mask
@@ -133,9 +219,56 @@ struct stm32_adc_regs {
 };
 
 /**
+ * stm32_adc_regspec - stm32 registers definition, compatible dependent data
+ * @dr:			data register offset
+ * @ier_eoc:		interrupt enable register & eocie bitfield
+ * @isr_eoc:		interrupt status register & eoc bitfield
+ * @sqr:		reference to sequence registers array
+ * @exten:		trigger control register & bitfield
+ * @extsel:		trigger selection register & bitfield
+ * @res:		resolution selection register & bitfield
+ */
+struct stm32_adc_regspec {
+	const u32 dr;
+	const struct stm32_adc_regs ier_eoc;
+	const struct stm32_adc_regs isr_eoc;
+	const struct stm32_adc_regs *sqr;
+	const struct stm32_adc_regs exten;
+	const struct stm32_adc_regs extsel;
+	const struct stm32_adc_regs res;
+};
+
+struct stm32_adc;
+
+/**
+ * stm32_adc_cfg - stm32 compatible configuration data
+ * @regs:		registers descriptions
+ * @adc_info:		per instance input channels definitions
+ * @trigs:		external trigger sources
+ * @clk_required:	clock is required
+ * @selfcalib:		optional routine for self-calibration
+ * @prepare:		optional prepare routine (power-up, enable)
+ * @start_conv:		routine to start conversions
+ * @stop_conv:		routine to stop conversions
+ * @unprepare:		optional unprepare routine (disable, power-down)
+ */
+struct stm32_adc_cfg {
+	const struct stm32_adc_regspec	*regs;
+	const struct stm32_adc_info	*adc_info;
+	struct stm32_adc_trig_info	*trigs;
+	bool clk_required;
+	int (*selfcalib)(struct stm32_adc *);
+	int (*prepare)(struct stm32_adc *);
+	void (*start_conv)(struct stm32_adc *, bool dma);
+	void (*stop_conv)(struct stm32_adc *);
+	void (*unprepare)(struct stm32_adc *);
+};
+
+/**
  * struct stm32_adc - private data of each ADC IIO instance
  * @common:		reference to ADC block common data
  * @offset:		ADC instance register offset in ADC block
+ * @cfg:		compatible configuration data
  * @completion:		end of single conversion completion
  * @buffer:		data buffer
  * @clk:		clock for this adc instance
@@ -149,10 +282,13 @@ struct stm32_adc_regs {
  * @rx_buf:		dma rx buffer cpu address
  * @rx_dma_buf:		dma rx buffer bus address
  * @rx_buf_sz:		dma rx buffer size
+ * @pcsel		bitmask to preselect channels on some devices
+ * @cal:		optional calibration data on some devices
  */
 struct stm32_adc {
 	struct stm32_adc_common	*common;
 	u32			offset;
+	const struct stm32_adc_cfg	*cfg;
 	struct completion	completion;
 	u16			buffer[STM32_ADC_MAX_SQ];
 	struct clk		*clk;
@@ -166,6 +302,8 @@ struct stm32_adc {
 	u8			*rx_buf;
 	dma_addr_t		rx_dma_buf;
 	unsigned int		rx_buf_sz;
+	u32			pcsel;
+	struct stm32_adc_calib	cal;
 };
 
 /**
@@ -180,8 +318,26 @@ struct stm32_adc_chan_spec {
 	const char		*name;
 };
 
-/* Input definitions common for all STM32F4 instances */
-static const struct stm32_adc_chan_spec stm32f4_adc123_channels[] = {
+/**
+ * struct stm32_adc_info - stm32 ADC, per instance config data
+ * @channels:		Reference to stm32 channels spec
+ * @max_channels:	Number of channels
+ * @resolutions:	available resolutions
+ * @num_res:		number of available resolutions
+ */
+struct stm32_adc_info {
+	const struct stm32_adc_chan_spec *channels;
+	int max_channels;
+	const unsigned int *resolutions;
+	const unsigned int num_res;
+};
+
+/*
+ * Input definitions common for all instances:
+ * stm32f4 can have up to 16 channels
+ * stm32h7 can have up to 20 channels
+ */
+static const struct stm32_adc_chan_spec stm32_adc_channels[] = {
 	{ IIO_VOLTAGE, 0, "in0" },
 	{ IIO_VOLTAGE, 1, "in1" },
 	{ IIO_VOLTAGE, 2, "in2" },
@@ -198,6 +354,10 @@ static const struct stm32_adc_chan_spec stm32f4_adc123_channels[] = {
 	{ IIO_VOLTAGE, 13, "in13" },
 	{ IIO_VOLTAGE, 14, "in14" },
 	{ IIO_VOLTAGE, 15, "in15" },
+	{ IIO_VOLTAGE, 16, "in16" },
+	{ IIO_VOLTAGE, 17, "in17" },
+	{ IIO_VOLTAGE, 18, "in18" },
+	{ IIO_VOLTAGE, 19, "in19" },
 };
 
 static const unsigned int stm32f4_adc_resolutions[] = {
@@ -205,6 +365,25 @@ static const unsigned int stm32f4_adc_resolutions[] = {
 	12, 10, 8, 6,
 };
 
+static const struct stm32_adc_info stm32f4_adc_info = {
+	.channels = stm32_adc_channels,
+	.max_channels = 16,
+	.resolutions = stm32f4_adc_resolutions,
+	.num_res = ARRAY_SIZE(stm32f4_adc_resolutions),
+};
+
+static const unsigned int stm32h7_adc_resolutions[] = {
+	/* sorted values so the index matches RES[2:0] in STM32H7_ADC_CFGR */
+	16, 14, 12, 10, 8,
+};
+
+static const struct stm32_adc_info stm32h7_adc_info = {
+	.channels = stm32_adc_channels,
+	.max_channels = 20,
+	.resolutions = stm32h7_adc_resolutions,
+	.num_res = ARRAY_SIZE(stm32h7_adc_resolutions),
+};
+
 /**
  * stm32f4_sq - describe regular sequence registers
  * - L: sequence len (register & bit field)
@@ -252,6 +431,69 @@ static struct stm32_adc_trig_info stm32f4_adc_trigs[] = {
 	{}, /* sentinel */
 };
 
+static const struct stm32_adc_regspec stm32f4_adc_regspec = {
+	.dr = STM32F4_ADC_DR,
+	.ier_eoc = { STM32F4_ADC_CR1, STM32F4_EOCIE },
+	.isr_eoc = { STM32F4_ADC_SR, STM32F4_EOC },
+	.sqr = stm32f4_sq,
+	.exten = { STM32F4_ADC_CR2, STM32F4_EXTEN_MASK, STM32F4_EXTEN_SHIFT },
+	.extsel = { STM32F4_ADC_CR2, STM32F4_EXTSEL_MASK,
+		    STM32F4_EXTSEL_SHIFT },
+	.res = { STM32F4_ADC_CR1, STM32F4_RES_MASK, STM32F4_RES_SHIFT },
+};
+
+static const struct stm32_adc_regs stm32h7_sq[STM32_ADC_MAX_SQ + 1] = {
+	/* L: len bit field description to be kept as first element */
+	{ STM32H7_ADC_SQR1, GENMASK(3, 0), 0 },
+	/* SQ1..SQ16 registers & bit fields (reg, mask, shift) */
+	{ STM32H7_ADC_SQR1, GENMASK(10, 6), 6 },
+	{ STM32H7_ADC_SQR1, GENMASK(16, 12), 12 },
+	{ STM32H7_ADC_SQR1, GENMASK(22, 18), 18 },
+	{ STM32H7_ADC_SQR1, GENMASK(28, 24), 24 },
+	{ STM32H7_ADC_SQR2, GENMASK(4, 0), 0 },
+	{ STM32H7_ADC_SQR2, GENMASK(10, 6), 6 },
+	{ STM32H7_ADC_SQR2, GENMASK(16, 12), 12 },
+	{ STM32H7_ADC_SQR2, GENMASK(22, 18), 18 },
+	{ STM32H7_ADC_SQR2, GENMASK(28, 24), 24 },
+	{ STM32H7_ADC_SQR3, GENMASK(4, 0), 0 },
+	{ STM32H7_ADC_SQR3, GENMASK(10, 6), 6 },
+	{ STM32H7_ADC_SQR3, GENMASK(16, 12), 12 },
+	{ STM32H7_ADC_SQR3, GENMASK(22, 18), 18 },
+	{ STM32H7_ADC_SQR3, GENMASK(28, 24), 24 },
+	{ STM32H7_ADC_SQR4, GENMASK(4, 0), 0 },
+	{ STM32H7_ADC_SQR4, GENMASK(10, 6), 6 },
+};
+
+/* STM32H7 external trigger sources for all instances */
+static struct stm32_adc_trig_info stm32h7_adc_trigs[] = {
+	{ TIM1_CH1, STM32_EXT0 },
+	{ TIM1_CH2, STM32_EXT1 },
+	{ TIM1_CH3, STM32_EXT2 },
+	{ TIM2_CH2, STM32_EXT3 },
+	{ TIM3_TRGO, STM32_EXT4 },
+	{ TIM4_CH4, STM32_EXT5 },
+	{ TIM8_TRGO, STM32_EXT7 },
+	{ TIM8_TRGO2, STM32_EXT8 },
+	{ TIM1_TRGO, STM32_EXT9 },
+	{ TIM1_TRGO2, STM32_EXT10 },
+	{ TIM2_TRGO, STM32_EXT11 },
+	{ TIM4_TRGO, STM32_EXT12 },
+	{ TIM6_TRGO, STM32_EXT13 },
+	{ TIM3_CH4, STM32_EXT15 },
+	{},
+};
+
+static const struct stm32_adc_regspec stm32h7_adc_regspec = {
+	.dr = STM32H7_ADC_DR,
+	.ier_eoc = { STM32H7_ADC_IER, STM32H7_EOCIE },
+	.isr_eoc = { STM32H7_ADC_ISR, STM32H7_EOC },
+	.sqr = stm32h7_sq,
+	.exten = { STM32H7_ADC_CFGR, STM32H7_EXTEN_MASK, STM32H7_EXTEN_SHIFT },
+	.extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK,
+		    STM32H7_EXTSEL_SHIFT },
+	.res = { STM32H7_ADC_CFGR, STM32H7_RES_MASK, STM32H7_RES_SHIFT },
+};
+
 /**
  * STM32 ADC registers access routines
  * @adc: stm32 adc instance
@@ -265,6 +507,12 @@ static u32 stm32_adc_readl(struct stm32_adc *adc, u32 reg)
 	return readl_relaxed(adc->common->base + adc->offset + reg);
 }
 
+#define stm32_adc_readl_addr(addr)	stm32_adc_readl(adc, addr)
+
+#define stm32_adc_readl_poll_timeout(reg, val, cond, sleep_us, timeout_us) \
+	readx_poll_timeout(stm32_adc_readl_addr, reg, val, \
+			   cond, sleep_us, timeout_us)
+
 static u16 stm32_adc_readw(struct stm32_adc *adc, u32 reg)
 {
 	return readw_relaxed(adc->common->base + adc->offset + reg);
@@ -299,7 +547,8 @@ static void stm32_adc_clr_bits(struct stm32_adc *adc, u32 reg, u32 bits)
  */
 static void stm32_adc_conv_irq_enable(struct stm32_adc *adc)
 {
-	stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+	stm32_adc_set_bits(adc, adc->cfg->regs->ier_eoc.reg,
+			   adc->cfg->regs->ier_eoc.mask);
 };
 
 /**
@@ -308,19 +557,22 @@ static void stm32_adc_conv_irq_enable(struct stm32_adc *adc)
  */
 static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
 {
-	stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+	stm32_adc_clr_bits(adc, adc->cfg->regs->ier_eoc.reg,
+			   adc->cfg->regs->ier_eoc.mask);
 }
 
 static void stm32_adc_set_res(struct stm32_adc *adc)
 {
-	u32 val = stm32_adc_readl(adc, STM32F4_ADC_CR1);
+	const struct stm32_adc_regs *res = &adc->cfg->regs->res;
+	u32 val;
 
-	val = (val & ~STM32F4_RES_MASK) | (adc->res << STM32F4_RES_SHIFT);
-	stm32_adc_writel(adc, STM32F4_ADC_CR1, val);
+	val = stm32_adc_readl(adc, res->reg);
+	val = (val & ~res->mask) | (adc->res << res->shift);
+	stm32_adc_writel(adc, res->reg, val);
 }
 
 /**
- * stm32_adc_start_conv() - Start conversions for regular channels.
+ * stm32f4_adc_start_conv() - Start conversions for regular channels.
  * @adc: stm32 adc instance
  * @dma: use dma to transfer conversion result
  *
@@ -329,7 +581,7 @@ static void stm32_adc_set_res(struct stm32_adc *adc)
  * conversions, in IIO buffer modes. Otherwise, use ADC interrupt with direct
  * DR read instead (e.g. read_raw, or triggered buffer mode without DMA).
  */
-static void stm32_adc_start_conv(struct stm32_adc *adc, bool dma)
+static void stm32f4_adc_start_conv(struct stm32_adc *adc, bool dma)
 {
 	stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
 
@@ -347,7 +599,7 @@ static void stm32_adc_start_conv(struct stm32_adc *adc, bool dma)
 		stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_SWSTART);
 }
 
-static void stm32_adc_stop_conv(struct stm32_adc *adc)
+static void stm32f4_adc_stop_conv(struct stm32_adc *adc)
 {
 	stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
 	stm32_adc_clr_bits(adc, STM32F4_ADC_SR, STM32F4_STRT);
@@ -357,6 +609,324 @@ static void stm32_adc_stop_conv(struct stm32_adc *adc)
 			   STM32F4_ADON | STM32F4_DMA | STM32F4_DDS);
 }
 
+static void stm32h7_adc_start_conv(struct stm32_adc *adc, bool dma)
+{
+	enum stm32h7_adc_dmngt dmngt;
+	unsigned long flags;
+	u32 val;
+
+	if (dma)
+		dmngt = STM32H7_DMNGT_DMA_CIRC;
+	else
+		dmngt = STM32H7_DMNGT_DR_ONLY;
+
+	spin_lock_irqsave(&adc->lock, flags);
+	val = stm32_adc_readl(adc, STM32H7_ADC_CFGR);
+	val = (val & ~STM32H7_DMNGT_MASK) | (dmngt << STM32H7_DMNGT_SHIFT);
+	stm32_adc_writel(adc, STM32H7_ADC_CFGR, val);
+	spin_unlock_irqrestore(&adc->lock, flags);
+
+	stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADSTART);
+}
+
+static void stm32h7_adc_stop_conv(struct stm32_adc *adc)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+	int ret;
+	u32 val;
+
+	stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADSTP);
+
+	ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
+					   !(val & (STM32H7_ADSTART)),
+					   100, STM32_ADC_TIMEOUT_US);
+	if (ret)
+		dev_warn(&indio_dev->dev, "stop failed\n");
+
+	stm32_adc_clr_bits(adc, STM32H7_ADC_CFGR, STM32H7_DMNGT_MASK);
+}
+
+static void stm32h7_adc_exit_pwr_down(struct stm32_adc *adc)
+{
+	/* Exit deep power down, then enable ADC voltage regulator */
+	stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_DEEPPWD);
+	stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADVREGEN);
+
+	if (adc->common->rate > STM32H7_BOOST_CLKRATE)
+		stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_BOOST);
+
+	/* Wait for startup time */
+	usleep_range(10, 20);
+}
+
+static void stm32h7_adc_enter_pwr_down(struct stm32_adc *adc)
+{
+	stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_BOOST);
+
+	/* Setting DEEPPWD disables ADC vreg and clears ADVREGEN */
+	stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_DEEPPWD);
+}
+
+static int stm32h7_adc_enable(struct stm32_adc *adc)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+	int ret;
+	u32 val;
+
+	/* Clear ADRDY by writing one, then enable ADC */
+	stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
+	stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);
+
+	/* Poll for ADRDY to be set (after adc startup time) */
+	ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_ISR, val,
+					   val & STM32H7_ADRDY,
+					   100, STM32_ADC_TIMEOUT_US);
+	if (ret) {
+		stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);
+		dev_err(&indio_dev->dev, "Failed to enable ADC\n");
+	}
+
+	return ret;
+}
+
+static void stm32h7_adc_disable(struct stm32_adc *adc)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+	int ret;
+	u32 val;
+
+	/* Disable ADC and wait until it's effectively disabled */
+	stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS);
+	ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
+					   !(val & STM32H7_ADEN), 100,
+					   STM32_ADC_TIMEOUT_US);
+	if (ret)
+		dev_warn(&indio_dev->dev, "Failed to disable\n");
+}
+
+/**
+ * stm32h7_adc_read_selfcalib() - read calibration shadow regs, save result
+ * @adc: stm32 adc instance
+ */
+static int stm32h7_adc_read_selfcalib(struct stm32_adc *adc)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+	int i, ret;
+	u32 lincalrdyw_mask, val;
+
+	/* Enable adc so LINCALRDYW1..6 bits are writable */
+	ret = stm32h7_adc_enable(adc);
+	if (ret)
+		return ret;
+
+	/* Read linearity calibration */
+	lincalrdyw_mask = STM32H7_LINCALRDYW6;
+	for (i = STM32H7_LINCALFACT_NUM - 1; i >= 0; i--) {
+		/* Clear STM32H7_LINCALRDYW[6..1]: transfer calib to CALFACT2 */
+		stm32_adc_clr_bits(adc, STM32H7_ADC_CR, lincalrdyw_mask);
+
+		/* Poll: wait calib data to be ready in CALFACT2 register */
+		ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
+						   !(val & lincalrdyw_mask),
+						   100, STM32_ADC_TIMEOUT_US);
+		if (ret) {
+			dev_err(&indio_dev->dev, "Failed to read calfact\n");
+			goto disable;
+		}
+
+		val = stm32_adc_readl(adc, STM32H7_ADC_CALFACT2);
+		adc->cal.lincalfact[i] = (val & STM32H7_LINCALFACT_MASK);
+		adc->cal.lincalfact[i] >>= STM32H7_LINCALFACT_SHIFT;
+
+		lincalrdyw_mask >>= 1;
+	}
+
+	/* Read offset calibration */
+	val = stm32_adc_readl(adc, STM32H7_ADC_CALFACT);
+	adc->cal.calfact_s = (val & STM32H7_CALFACT_S_MASK);
+	adc->cal.calfact_s >>= STM32H7_CALFACT_S_SHIFT;
+	adc->cal.calfact_d = (val & STM32H7_CALFACT_D_MASK);
+	adc->cal.calfact_d >>= STM32H7_CALFACT_D_SHIFT;
+
+disable:
+	stm32h7_adc_disable(adc);
+
+	return ret;
+}
+
+/**
+ * stm32h7_adc_restore_selfcalib() - Restore saved self-calibration result
+ * @adc: stm32 adc instance
+ * Note: ADC must be enabled, with no on-going conversions.
+ */
+static int stm32h7_adc_restore_selfcalib(struct stm32_adc *adc)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+	int i, ret;
+	u32 lincalrdyw_mask, val;
+
+	val = (adc->cal.calfact_s << STM32H7_CALFACT_S_SHIFT) |
+		(adc->cal.calfact_d << STM32H7_CALFACT_D_SHIFT);
+	stm32_adc_writel(adc, STM32H7_ADC_CALFACT, val);
+
+	lincalrdyw_mask = STM32H7_LINCALRDYW6;
+	for (i = STM32H7_LINCALFACT_NUM - 1; i >= 0; i--) {
+		/*
+		 * Write saved calibration data to shadow registers:
+		 * Write CALFACT2, and set LINCALRDYW[6..1] bit to trigger
+		 * data write. Then poll to wait for complete transfer.
+		 */
+		val = adc->cal.lincalfact[i] << STM32H7_LINCALFACT_SHIFT;
+		stm32_adc_writel(adc, STM32H7_ADC_CALFACT2, val);
+		stm32_adc_set_bits(adc, STM32H7_ADC_CR, lincalrdyw_mask);
+		ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
+						   val & lincalrdyw_mask,
+						   100, STM32_ADC_TIMEOUT_US);
+		if (ret) {
+			dev_err(&indio_dev->dev, "Failed to write calfact\n");
+			return ret;
+		}
+
+		/*
+		 * Read back calibration data, has two effects:
+		 * - It ensures bits LINCALRDYW[6..1] are kept cleared
+		 *   for next time calibration needs to be restored.
+		 * - BTW, bit clear triggers a read, then check data has been
+		 *   correctly written.
+		 */
+		stm32_adc_clr_bits(adc, STM32H7_ADC_CR, lincalrdyw_mask);
+		ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
+						   !(val & lincalrdyw_mask),
+						   100, STM32_ADC_TIMEOUT_US);
+		if (ret) {
+			dev_err(&indio_dev->dev, "Failed to read calfact\n");
+			return ret;
+		}
+		val = stm32_adc_readl(adc, STM32H7_ADC_CALFACT2);
+		if (val != adc->cal.lincalfact[i] << STM32H7_LINCALFACT_SHIFT) {
+			dev_err(&indio_dev->dev, "calfact not consistent\n");
+			return -EIO;
+		}
+
+		lincalrdyw_mask >>= 1;
+	}
+
+	return 0;
+}
+
+/**
+ * Fixed timeout value for ADC calibration.
+ * worst cases:
+ * - low clock frequency
+ * - maximum prescalers
+ * Calibration requires:
+ * - 131,072 ADC clock cycle for the linear calibration
+ * - 20 ADC clock cycle for the offset calibration
+ *
+ * Set to 100ms for now
+ */
+#define STM32H7_ADC_CALIB_TIMEOUT_US		100000
+
+/**
+ * stm32h7_adc_selfcalib() - Procedure to calibrate ADC (from power down)
+ * @adc: stm32 adc instance
+ * Exit from power down, calibrate ADC, then return to power down.
+ */
+static int stm32h7_adc_selfcalib(struct stm32_adc *adc)
+{
+	struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+	int ret;
+	u32 val;
+
+	stm32h7_adc_exit_pwr_down(adc);
+
+	/*
+	 * Select calibration mode:
+	 * - Offset calibration for single ended inputs
+	 * - No linearity calibration (do it later, before reading it)
+	 */
+	stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADCALDIF);
+	stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADCALLIN);
+
+	/* Start calibration, then wait for completion */
+	stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADCAL);
+	ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
+					   !(val & STM32H7_ADCAL), 100,
+					   STM32H7_ADC_CALIB_TIMEOUT_US);
+	if (ret) {
+		dev_err(&indio_dev->dev, "calibration failed\n");
+		goto pwr_dwn;
+	}
+
+	/*
+	 * Select calibration mode, then start calibration:
+	 * - Offset calibration for differential input
+	 * - Linearity calibration (needs to be done only once for single/diff)
+	 *   will run simultaneously with offset calibration.
+	 */
+	stm32_adc_set_bits(adc, STM32H7_ADC_CR,
+			   STM32H7_ADCALDIF | STM32H7_ADCALLIN);
+	stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADCAL);
+	ret = stm32_adc_readl_poll_timeout(STM32H7_ADC_CR, val,
+					   !(val & STM32H7_ADCAL), 100,
+					   STM32H7_ADC_CALIB_TIMEOUT_US);
+	if (ret) {
+		dev_err(&indio_dev->dev, "calibration failed\n");
+		goto pwr_dwn;
+	}
+
+	stm32_adc_clr_bits(adc, STM32H7_ADC_CR,
+			   STM32H7_ADCALDIF | STM32H7_ADCALLIN);
+
+	/* Read calibration result for future reference */
+	ret = stm32h7_adc_read_selfcalib(adc);
+
+pwr_dwn:
+	stm32h7_adc_enter_pwr_down(adc);
+
+	return ret;
+}
+
+/**
+ * stm32h7_adc_prepare() - Leave power down mode to enable ADC.
+ * @adc: stm32 adc instance
+ * Leave power down mode.
+ * Enable ADC.
+ * Restore calibration data.
+ * Pre-select channels that may be used in PCSEL (required by input MUX / IO).
+ */
+static int stm32h7_adc_prepare(struct stm32_adc *adc)
+{
+	int ret;
+
+	stm32h7_adc_exit_pwr_down(adc);
+
+	ret = stm32h7_adc_enable(adc);
+	if (ret)
+		goto pwr_dwn;
+
+	ret = stm32h7_adc_restore_selfcalib(adc);
+	if (ret)
+		goto disable;
+
+	stm32_adc_writel(adc, STM32H7_ADC_PCSEL, adc->pcsel);
+
+	return 0;
+
+disable:
+	stm32h7_adc_disable(adc);
+pwr_dwn:
+	stm32h7_adc_enter_pwr_down(adc);
+
+	return ret;
+}
+
+static void stm32h7_adc_unprepare(struct stm32_adc *adc)
+{
+	stm32h7_adc_disable(adc);
+	stm32h7_adc_enter_pwr_down(adc);
+}
+
 /**
  * stm32_adc_conf_scan_seq() - Build regular channels scan sequence
  * @indio_dev: IIO device
@@ -371,6 +941,7 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
 				   const unsigned long *scan_mask)
 {
 	struct stm32_adc *adc = iio_priv(indio_dev);
+	const struct stm32_adc_regs *sqr = adc->cfg->regs->sqr;
 	const struct iio_chan_spec *chan;
 	u32 val, bit;
 	int i = 0;
@@ -388,20 +959,20 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
 		dev_dbg(&indio_dev->dev, "%s chan %d to SQ%d\n",
 			__func__, chan->channel, i);
 
-		val = stm32_adc_readl(adc, stm32f4_sq[i].reg);
-		val &= ~stm32f4_sq[i].mask;
-		val |= chan->channel << stm32f4_sq[i].shift;
-		stm32_adc_writel(adc, stm32f4_sq[i].reg, val);
+		val = stm32_adc_readl(adc, sqr[i].reg);
+		val &= ~sqr[i].mask;
+		val |= chan->channel << sqr[i].shift;
+		stm32_adc_writel(adc, sqr[i].reg, val);
 	}
 
 	if (!i)
 		return -EINVAL;
 
 	/* Sequence len */
-	val = stm32_adc_readl(adc, stm32f4_sq[0].reg);
-	val &= ~stm32f4_sq[0].mask;
-	val |= ((i - 1) << stm32f4_sq[0].shift);
-	stm32_adc_writel(adc, stm32f4_sq[0].reg, val);
+	val = stm32_adc_readl(adc, sqr[0].reg);
+	val &= ~sqr[0].mask;
+	val |= ((i - 1) << sqr[0].shift);
+	stm32_adc_writel(adc, sqr[0].reg, val);
 
 	return 0;
 }
@@ -412,19 +983,21 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
  *
  * Returns trigger extsel value, if trig matches, -EINVAL otherwise.
  */
-static int stm32_adc_get_trig_extsel(struct iio_trigger *trig)
+static int stm32_adc_get_trig_extsel(struct iio_dev *indio_dev,
+				     struct iio_trigger *trig)
 {
+	struct stm32_adc *adc = iio_priv(indio_dev);
 	int i;
 
 	/* lookup triggers registered by stm32 timer trigger driver */
-	for (i = 0; stm32f4_adc_trigs[i].name; i++) {
+	for (i = 0; adc->cfg->trigs[i].name; i++) {
 		/**
 		 * Checking both stm32 timer trigger type and trig name
 		 * should be safe against arbitrary trigger names.
 		 */
 		if (is_stm32_timer_trigger(trig) &&
-		    !strcmp(stm32f4_adc_trigs[i].name, trig->name)) {
-			return stm32f4_adc_trigs[i].extsel;
+		    !strcmp(adc->cfg->trigs[i].name, trig->name)) {
+			return adc->cfg->trigs[i].extsel;
 		}
 	}
 
@@ -449,7 +1022,7 @@ static int stm32_adc_set_trig(struct iio_dev *indio_dev,
 	int ret;
 
 	if (trig) {
-		ret = stm32_adc_get_trig_extsel(trig);
+		ret = stm32_adc_get_trig_extsel(indio_dev, trig);
 		if (ret < 0)
 			return ret;
 
@@ -459,11 +1032,11 @@ static int stm32_adc_set_trig(struct iio_dev *indio_dev,
 	}
 
 	spin_lock_irqsave(&adc->lock, flags);
-	val = stm32_adc_readl(adc, STM32F4_ADC_CR2);
-	val &= ~(STM32F4_EXTEN_MASK | STM32F4_EXTSEL_MASK);
-	val |= exten << STM32F4_EXTEN_SHIFT;
-	val |= extsel << STM32F4_EXTSEL_SHIFT;
-	stm32_adc_writel(adc, STM32F4_ADC_CR2, val);
+	val = stm32_adc_readl(adc, adc->cfg->regs->exten.reg);
+	val &= ~(adc->cfg->regs->exten.mask | adc->cfg->regs->extsel.mask);
+	val |= exten << adc->cfg->regs->exten.shift;
+	val |= extsel << adc->cfg->regs->extsel.shift;
+	stm32_adc_writel(adc,  adc->cfg->regs->exten.reg, val);
 	spin_unlock_irqrestore(&adc->lock, flags);
 
 	return 0;
@@ -515,6 +1088,7 @@ static int stm32_adc_single_conv(struct iio_dev *indio_dev,
 				 int *res)
 {
 	struct stm32_adc *adc = iio_priv(indio_dev);
+	const struct stm32_adc_regspec *regs = adc->cfg->regs;
 	long timeout;
 	u32 val;
 	int ret;
@@ -523,21 +1097,27 @@ static int stm32_adc_single_conv(struct iio_dev *indio_dev,
 
 	adc->bufi = 0;
 
+	if (adc->cfg->prepare) {
+		ret = adc->cfg->prepare(adc);
+		if (ret)
+			return ret;
+	}
+
 	/* Program chan number in regular sequence (SQ1) */
-	val = stm32_adc_readl(adc, stm32f4_sq[1].reg);
-	val &= ~stm32f4_sq[1].mask;
-	val |= chan->channel << stm32f4_sq[1].shift;
-	stm32_adc_writel(adc, stm32f4_sq[1].reg, val);
+	val = stm32_adc_readl(adc, regs->sqr[1].reg);
+	val &= ~regs->sqr[1].mask;
+	val |= chan->channel << regs->sqr[1].shift;
+	stm32_adc_writel(adc, regs->sqr[1].reg, val);
 
 	/* Set regular sequence len (0 for 1 conversion) */
-	stm32_adc_clr_bits(adc, stm32f4_sq[0].reg, stm32f4_sq[0].mask);
+	stm32_adc_clr_bits(adc, regs->sqr[0].reg, regs->sqr[0].mask);
 
 	/* Trigger detection disabled (conversion can be launched in SW) */
-	stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
+	stm32_adc_clr_bits(adc, regs->exten.reg, regs->exten.mask);
 
 	stm32_adc_conv_irq_enable(adc);
 
-	stm32_adc_start_conv(adc, false);
+	adc->cfg->start_conv(adc, false);
 
 	timeout = wait_for_completion_interruptible_timeout(
 					&adc->completion, STM32_ADC_TIMEOUT);
@@ -550,10 +1130,13 @@ static int stm32_adc_single_conv(struct iio_dev *indio_dev,
 		ret = IIO_VAL_INT;
 	}
 
-	stm32_adc_stop_conv(adc);
+	adc->cfg->stop_conv(adc);
 
 	stm32_adc_conv_irq_disable(adc);
 
+	if (adc->cfg->unprepare)
+		adc->cfg->unprepare(adc);
+
 	return ret;
 }
 
@@ -590,11 +1173,12 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
 {
 	struct stm32_adc *adc = data;
 	struct iio_dev *indio_dev = iio_priv_to_dev(adc);
-	u32 status = stm32_adc_readl(adc, STM32F4_ADC_SR);
+	const struct stm32_adc_regspec *regs = adc->cfg->regs;
+	u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg);
 
-	if (status & STM32F4_EOC) {
+	if (status & regs->isr_eoc.mask) {
 		/* Reading DR also clears EOC status flag */
-		adc->buffer[adc->bufi] = stm32_adc_readw(adc, STM32F4_ADC_DR);
+		adc->buffer[adc->bufi] = stm32_adc_readw(adc, regs->dr);
 		if (iio_buffer_enabled(indio_dev)) {
 			adc->bufi++;
 			if (adc->bufi >= adc->num_conv) {
@@ -621,7 +1205,7 @@ static irqreturn_t stm32_adc_isr(int irq, void *data)
 static int stm32_adc_validate_trigger(struct iio_dev *indio_dev,
 				      struct iio_trigger *trig)
 {
-	return stm32_adc_get_trig_extsel(trig) < 0 ? -EINVAL : 0;
+	return stm32_adc_get_trig_extsel(indio_dev, trig) < 0 ? -EINVAL : 0;
 }
 
 static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
@@ -777,10 +1361,16 @@ static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
 	struct stm32_adc *adc = iio_priv(indio_dev);
 	int ret;
 
+	if (adc->cfg->prepare) {
+		ret = adc->cfg->prepare(adc);
+		if (ret)
+			return ret;
+	}
+
 	ret = stm32_adc_set_trig(indio_dev, indio_dev->trig);
 	if (ret) {
 		dev_err(&indio_dev->dev, "Can't set trigger\n");
-		return ret;
+		goto err_unprepare;
 	}
 
 	ret = stm32_adc_dma_start(indio_dev);
@@ -799,7 +1389,7 @@ static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
 	if (!adc->dma_chan)
 		stm32_adc_conv_irq_enable(adc);
 
-	stm32_adc_start_conv(adc, !!adc->dma_chan);
+	adc->cfg->start_conv(adc, !!adc->dma_chan);
 
 	return 0;
 
@@ -808,6 +1398,9 @@ static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
 		dmaengine_terminate_all(adc->dma_chan);
 err_clr_trig:
 	stm32_adc_set_trig(indio_dev, NULL);
+err_unprepare:
+	if (adc->cfg->unprepare)
+		adc->cfg->unprepare(adc);
 
 	return ret;
 }
@@ -817,7 +1410,7 @@ static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
 	struct stm32_adc *adc = iio_priv(indio_dev);
 	int ret;
 
-	stm32_adc_stop_conv(adc);
+	adc->cfg->stop_conv(adc);
 	if (!adc->dma_chan)
 		stm32_adc_conv_irq_disable(adc);
 
@@ -831,6 +1424,9 @@ static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
 	if (stm32_adc_set_trig(indio_dev, NULL))
 		dev_err(&indio_dev->dev, "Can't clear trigger\n");
 
+	if (adc->cfg->unprepare)
+		adc->cfg->unprepare(adc);
+
 	return ret;
 }
 
@@ -895,12 +1491,12 @@ static int stm32_adc_of_get_resolution(struct iio_dev *indio_dev)
 	u32 res;
 
 	if (of_property_read_u32(node, "assigned-resolution-bits", &res))
-		res = stm32f4_adc_resolutions[0];
+		res = adc->cfg->adc_info->resolutions[0];
 
-	for (i = 0; i < ARRAY_SIZE(stm32f4_adc_resolutions); i++)
-		if (res == stm32f4_adc_resolutions[i])
+	for (i = 0; i < adc->cfg->adc_info->num_res; i++)
+		if (res == adc->cfg->adc_info->resolutions[i])
 			break;
-	if (i >= ARRAY_SIZE(stm32f4_adc_resolutions)) {
+	if (i >= adc->cfg->adc_info->num_res) {
 		dev_err(&indio_dev->dev, "Bad resolution: %u bits\n", res);
 		return -EINVAL;
 	}
@@ -926,14 +1522,19 @@ static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
 	chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
 	chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
 	chan->scan_type.sign = 'u';
-	chan->scan_type.realbits = stm32f4_adc_resolutions[adc->res];
+	chan->scan_type.realbits = adc->cfg->adc_info->resolutions[adc->res];
 	chan->scan_type.storagebits = 16;
 	chan->ext_info = stm32_adc_ext_info;
+
+	/* pre-build selected channels mask */
+	adc->pcsel |= BIT(chan->channel);
 }
 
 static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
 {
 	struct device_node *node = indio_dev->dev.of_node;
+	struct stm32_adc *adc = iio_priv(indio_dev);
+	const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
 	struct property *prop;
 	const __be32 *cur;
 	struct iio_chan_spec *channels;
@@ -942,7 +1543,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
 
 	num_channels = of_property_count_u32_elems(node, "st,adc-channels");
 	if (num_channels < 0 ||
-	    num_channels >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+	    num_channels >= adc_info->max_channels) {
 		dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
 		return num_channels < 0 ? num_channels : -EINVAL;
 	}
@@ -953,12 +1554,12 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
 		return -ENOMEM;
 
 	of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
-		if (val >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+		if (val >= adc_info->max_channels) {
 			dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
 			return -EINVAL;
 		}
 		stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
-					&stm32f4_adc123_channels[val],
+					&adc_info->channels[val],
 					scan_index);
 		scan_index++;
 	}
@@ -990,7 +1591,7 @@ static int stm32_adc_dma_request(struct iio_dev *indio_dev)
 	/* Configure DMA channel to read data register */
 	memset(&config, 0, sizeof(config));
 	config.src_addr = (dma_addr_t)adc->common->phys_base;
-	config.src_addr += adc->offset + STM32F4_ADC_DR;
+	config.src_addr += adc->offset + adc->cfg->regs->dr;
 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 
 	ret = dmaengine_slave_config(adc->dma_chan, &config);
@@ -1011,6 +1612,7 @@ static int stm32_adc_dma_request(struct iio_dev *indio_dev)
 static int stm32_adc_probe(struct platform_device *pdev)
 {
 	struct iio_dev *indio_dev;
+	struct device *dev = &pdev->dev;
 	struct stm32_adc *adc;
 	int ret;
 
@@ -1025,6 +1627,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
 	adc->common = dev_get_drvdata(pdev->dev.parent);
 	spin_lock_init(&adc->lock);
 	init_completion(&adc->completion);
+	adc->cfg = (const struct stm32_adc_cfg *)
+		of_match_device(dev->driver->of_match_table, dev)->data;
 
 	indio_dev->name = dev_name(&pdev->dev);
 	indio_dev->dev.parent = &pdev->dev;
@@ -1055,14 +1659,21 @@ static int stm32_adc_probe(struct platform_device *pdev)
 
 	adc->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(adc->clk)) {
-		dev_err(&pdev->dev, "Can't get clock\n");
-		return PTR_ERR(adc->clk);
+		ret = PTR_ERR(adc->clk);
+		if (ret == -ENOENT && !adc->cfg->clk_required) {
+			adc->clk = NULL;
+		} else {
+			dev_err(&pdev->dev, "Can't get clock\n");
+			return ret;
+		}
 	}
 
-	ret = clk_prepare_enable(adc->clk);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "clk enable failed\n");
-		return ret;
+	if (adc->clk) {
+		ret = clk_prepare_enable(adc->clk);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "clk enable failed\n");
+			return ret;
+		}
 	}
 
 	ret = stm32_adc_of_get_resolution(indio_dev);
@@ -1070,6 +1681,12 @@ static int stm32_adc_probe(struct platform_device *pdev)
 		goto err_clk_disable;
 	stm32_adc_set_res(adc);
 
+	if (adc->cfg->selfcalib) {
+		ret = adc->cfg->selfcalib(adc);
+		if (ret)
+			goto err_clk_disable;
+	}
+
 	ret = stm32_adc_chan_of_init(indio_dev);
 	if (ret < 0)
 		goto err_clk_disable;
@@ -1106,7 +1723,8 @@ static int stm32_adc_probe(struct platform_device *pdev)
 		dma_release_channel(adc->dma_chan);
 	}
 err_clk_disable:
-	clk_disable_unprepare(adc->clk);
+	if (adc->clk)
+		clk_disable_unprepare(adc->clk);
 
 	return ret;
 }
@@ -1124,13 +1742,35 @@ static int stm32_adc_remove(struct platform_device *pdev)
 				  adc->rx_buf, adc->rx_dma_buf);
 		dma_release_channel(adc->dma_chan);
 	}
-	clk_disable_unprepare(adc->clk);
+	if (adc->clk)
+		clk_disable_unprepare(adc->clk);
 
 	return 0;
 }
 
+static const struct stm32_adc_cfg stm32f4_adc_cfg = {
+	.regs = &stm32f4_adc_regspec,
+	.adc_info = &stm32f4_adc_info,
+	.trigs = stm32f4_adc_trigs,
+	.clk_required = true,
+	.start_conv = stm32f4_adc_start_conv,
+	.stop_conv = stm32f4_adc_stop_conv,
+};
+
+static const struct stm32_adc_cfg stm32h7_adc_cfg = {
+	.regs = &stm32h7_adc_regspec,
+	.adc_info = &stm32h7_adc_info,
+	.trigs = stm32h7_adc_trigs,
+	.selfcalib = stm32h7_adc_selfcalib,
+	.start_conv = stm32h7_adc_start_conv,
+	.stop_conv = stm32h7_adc_stop_conv,
+	.prepare = stm32h7_adc_prepare,
+	.unprepare = stm32h7_adc_unprepare,
+};
+
 static const struct of_device_id stm32_adc_of_match[] = {
-	{ .compatible = "st,stm32f4-adc" },
+	{ .compatible = "st,stm32f4-adc", .data = (void *)&stm32f4_adc_cfg },
+	{ .compatible = "st,stm32h7-adc", .data = (void *)&stm32h7_adc_cfg },
 	{},
 };
 MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index b235273..81d4c39 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -105,6 +105,8 @@ struct sun4i_gpadc_iio {
 	bool				no_irq;
 	/* prevents concurrent reads of temperature and ADC */
 	struct mutex			mutex;
+	struct thermal_zone_device	*tzd;
+	struct device			*sensor_device;
 };
 
 #define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) {		\
@@ -502,7 +504,6 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
 {
 	struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
 	const struct of_device_id *of_dev;
-	struct thermal_zone_device *tzd;
 	struct resource *mem;
 	void __iomem *base;
 	int ret;
@@ -532,13 +533,14 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
 	if (!IS_ENABLED(CONFIG_THERMAL_OF))
 		return 0;
 
-	tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, info,
-						   &sun4i_ts_tz_ops);
-	if (IS_ERR(tzd))
+	info->sensor_device = &pdev->dev;
+	info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 0,
+						    info, &sun4i_ts_tz_ops);
+	if (IS_ERR(info->tzd))
 		dev_err(&pdev->dev, "could not register thermal sensor: %ld\n",
-			PTR_ERR(tzd));
+			PTR_ERR(info->tzd));
 
-	return PTR_ERR_OR_ZERO(tzd);
+	return PTR_ERR_OR_ZERO(info->tzd);
 }
 
 static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
@@ -584,15 +586,15 @@ static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
 		 * of_node, and the device from this driver as third argument to
 		 * return the temperature.
 		 */
-		struct thermal_zone_device *tzd;
-		tzd = devm_thermal_zone_of_sensor_register(pdev->dev.parent, 0,
-							   info,
-							   &sun4i_ts_tz_ops);
-		if (IS_ERR(tzd)) {
+		info->sensor_device = pdev->dev.parent;
+		info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
+							    0, info,
+							    &sun4i_ts_tz_ops);
+		if (IS_ERR(info->tzd)) {
 			dev_err(&pdev->dev,
 				"could not register thermal sensor: %ld\n",
-				PTR_ERR(tzd));
-			return PTR_ERR(tzd);
+				PTR_ERR(info->tzd));
+			return PTR_ERR(info->tzd);
 		}
 	} else {
 		indio_dev->num_channels =
@@ -688,7 +690,13 @@ static int sun4i_gpadc_remove(struct platform_device *pdev)
 
 	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
-	if (!info->no_irq && IS_ENABLED(CONFIG_THERMAL_OF))
+
+	if (!IS_ENABLED(CONFIG_THERMAL_OF))
+		return 0;
+
+	thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd);
+
+	if (!info->no_irq)
 		iio_map_array_unregister(indio_dev);
 
 	return 0;
@@ -700,6 +708,7 @@ static const struct platform_device_id sun4i_gpadc_id[] = {
 	{ "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(platform, sun4i_gpadc_id);
 
 static struct platform_driver sun4i_gpadc_driver = {
 	.driver = {
@@ -711,6 +720,7 @@ static struct platform_driver sun4i_gpadc_driver = {
 	.probe = sun4i_gpadc_probe,
 	.remove = sun4i_gpadc_remove,
 };
+MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_id);
 
 module_platform_driver(sun4i_gpadc_driver);
 
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
new file mode 100644
index 0000000..a355121
--- /dev/null
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -0,0 +1,275 @@
+/**
+ * Copyright (C) 2017 Axis Communications AB
+ *
+ * Driver for Texas Instruments' ADC084S021 ADC chip.
+ * Datasheets can be found here:
+ * http://www.ti.com/lit/ds/symlink/adc084s021.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/regulator/consumer.h>
+
+#define ADC084S021_DRIVER_NAME "adc084s021"
+
+struct adc084s021 {
+	struct spi_device *spi;
+	struct spi_message message;
+	struct spi_transfer spi_trans;
+	struct regulator *reg;
+	struct mutex lock;
+	/*
+	 * DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache line.
+	 */
+	u16 tx_buf[4] ____cacheline_aligned;
+	__be16 rx_buf[5]; /* First 16-bits are trash */
+};
+
+#define ADC084S021_VOLTAGE_CHANNEL(num)                  \
+	{                                                      \
+		.type = IIO_VOLTAGE,                                 \
+		.channel = (num),                                    \
+		.indexed = 1,                                        \
+		.scan_index = (num),                                 \
+		.scan_type = {                                       \
+			.sign = 'u',                                       \
+			.realbits = 8,                                     \
+			.storagebits = 16,                                 \
+			.shift = 4,                                        \
+			.endianness = IIO_BE,                              \
+		},                                                   \
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),        \
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
+	}
+
+static const struct iio_chan_spec adc084s021_channels[] = {
+	ADC084S021_VOLTAGE_CHANNEL(0),
+	ADC084S021_VOLTAGE_CHANNEL(1),
+	ADC084S021_VOLTAGE_CHANNEL(2),
+	ADC084S021_VOLTAGE_CHANNEL(3),
+	IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+/**
+ * Read an ADC channel and return its value.
+ *
+ * @adc: The ADC SPI data.
+ * @data: Buffer for converted data.
+ */
+static int adc084s021_adc_conversion(struct adc084s021 *adc, void *data)
+{
+	int n_words = (adc->spi_trans.len >> 1) - 1; /* Discard first word */
+	int ret, i = 0;
+	u16 *p = data;
+
+	/* Do the transfer */
+	ret = spi_sync(adc->spi, &adc->message);
+	if (ret < 0)
+		return ret;
+
+	for (; i < n_words; i++)
+		*(p + i) = adc->rx_buf[i + 1];
+
+	return ret;
+}
+
+static int adc084s021_read_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *channel, int *val,
+			   int *val2, long mask)
+{
+	struct adc084s021 *adc = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = iio_device_claim_direct_mode(indio_dev);
+		if (ret < 0)
+			return ret;
+
+		ret = regulator_enable(adc->reg);
+		if (ret) {
+			iio_device_release_direct_mode(indio_dev);
+			return ret;
+		}
+
+		adc->tx_buf[0] = channel->channel << 3;
+		ret = adc084s021_adc_conversion(adc, val);
+		iio_device_release_direct_mode(indio_dev);
+		regulator_disable(adc->reg);
+		if (ret < 0)
+			return ret;
+
+		*val = be16_to_cpu(*val);
+		*val = (*val >> channel->scan_type.shift) & 0xff;
+
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		ret = regulator_enable(adc->reg);
+		if (ret)
+			return ret;
+
+		ret = regulator_get_voltage(adc->reg);
+		regulator_disable(adc->reg);
+		if (ret < 0)
+			return ret;
+
+		*val = ret / 1000;
+
+		return IIO_VAL_INT;
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * Read enabled ADC channels and push data to the buffer.
+ *
+ * @irq: The interrupt number (not used).
+ * @pollfunc: Pointer to the poll func.
+ */
+static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc)
+{
+	struct iio_poll_func *pf = pollfunc;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct adc084s021 *adc = iio_priv(indio_dev);
+	__be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */
+
+	mutex_lock(&adc->lock);
+
+	if (adc084s021_adc_conversion(adc, &data) < 0)
+		dev_err(&adc->spi->dev, "Failed to read data\n");
+
+	iio_push_to_buffers_with_timestamp(indio_dev, data,
+					   iio_get_time_ns(indio_dev));
+	mutex_unlock(&adc->lock);
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static int adc084s021_buffer_preenable(struct iio_dev *indio_dev)
+{
+	struct adc084s021 *adc = iio_priv(indio_dev);
+	int scan_index;
+	int i = 0;
+
+	for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+			 indio_dev->masklength) {
+		const struct iio_chan_spec *channel =
+			&indio_dev->channels[scan_index];
+		adc->tx_buf[i++] = channel->channel << 3;
+	}
+	adc->spi_trans.len = 2 + (i * sizeof(__be16)); /* Trash + channels */
+
+	return regulator_enable(adc->reg);
+}
+
+static int adc084s021_buffer_postdisable(struct iio_dev *indio_dev)
+{
+	struct adc084s021 *adc = iio_priv(indio_dev);
+
+	adc->spi_trans.len = 4; /* Trash + single channel */
+
+	return regulator_disable(adc->reg);
+}
+
+static const struct iio_info adc084s021_info = {
+	.read_raw = adc084s021_read_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static const struct iio_buffer_setup_ops adc084s021_buffer_setup_ops = {
+	.preenable = adc084s021_buffer_preenable,
+	.postenable = iio_triggered_buffer_postenable,
+	.predisable = iio_triggered_buffer_predisable,
+	.postdisable = adc084s021_buffer_postdisable,
+};
+
+static int adc084s021_probe(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev;
+	struct adc084s021 *adc;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+	if (!indio_dev) {
+		dev_err(&spi->dev, "Failed to allocate IIO device\n");
+		return -ENOMEM;
+	}
+
+	adc = iio_priv(indio_dev);
+	adc->spi = spi;
+
+	/* Connect the SPI device and the iio dev */
+	spi_set_drvdata(spi, indio_dev);
+
+	/* Initiate the Industrial I/O device */
+	indio_dev->dev.parent = &spi->dev;
+	indio_dev->dev.of_node = spi->dev.of_node;
+	indio_dev->name = spi_get_device_id(spi)->name;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->info = &adc084s021_info;
+	indio_dev->channels = adc084s021_channels;
+	indio_dev->num_channels = ARRAY_SIZE(adc084s021_channels);
+
+	/* Create SPI transfer for channel reads */
+	adc->spi_trans.tx_buf = adc->tx_buf;
+	adc->spi_trans.rx_buf = adc->rx_buf;
+	adc->spi_trans.len = 4; /* Trash + single channel */
+	spi_message_init_with_transfers(&adc->message, &adc->spi_trans, 1);
+
+	adc->reg = devm_regulator_get(&spi->dev, "vref");
+	if (IS_ERR(adc->reg))
+		return PTR_ERR(adc->reg);
+
+	mutex_init(&adc->lock);
+
+	/* Setup triggered buffer with pollfunction */
+	ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
+					    adc084s021_buffer_trigger_handler,
+					    &adc084s021_buffer_setup_ops);
+	if (ret) {
+		dev_err(&spi->dev, "Failed to setup triggered buffer\n");
+		return ret;
+	}
+
+	return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct of_device_id adc084s021_of_match[] = {
+	{ .compatible = "ti,adc084s021", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, adc084s021_of_match);
+
+static const struct spi_device_id adc084s021_id[] = {
+	{ ADC084S021_DRIVER_NAME, 0},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, adc084s021_id);
+
+static struct spi_driver adc084s021_driver = {
+	.driver = {
+		.name = ADC084S021_DRIVER_NAME,
+		.of_match_table = of_match_ptr(adc084s021_of_match),
+	},
+	.probe = adc084s021_probe,
+	.id_table = adc084s021_id,
+};
+module_spi_driver(adc084s021_driver);
+
+MODULE_AUTHOR("Mårten Lindahl <martenli@axis.com>");
+MODULE_DESCRIPTION("Texas Instruments ADC084S021");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
new file mode 100644
index 0000000..de4e5ac
--- /dev/null
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -0,0 +1,348 @@
+/*
+ * TI ADC108S102 SPI ADC driver
+ *
+ * Copyright (c) 2013-2015 Intel Corporation.
+ * Copyright (c) 2017 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * This IIO device driver is designed to work with the following
+ * analog to digital converters from Texas Instruments:
+ *  ADC108S102
+ *  ADC128S102
+ * The communication with ADC chip is via the SPI bus (mode 3).
+ */
+
+#include <linux/acpi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/types.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+/*
+ * In case of ACPI, we use the hard-wired 5000 mV of the Galileo and IOT2000
+ * boards as default for the reference pin VA. Device tree users encode that
+ * via the vref-supply regulator.
+ */
+#define ADC108S102_VA_MV_ACPI_DEFAULT	5000
+
+/*
+ * Defining the ADC resolution being 12 bits, we can use the same driver for
+ * both ADC108S102 (10 bits resolution) and ADC128S102 (12 bits resolution)
+ * chips. The ADC108S102 effectively returns a 12-bit result with the 2
+ * least-significant bits unset.
+ */
+#define ADC108S102_BITS		12
+#define ADC108S102_MAX_CHANNELS	8
+
+/*
+ * 16-bit SPI command format:
+ *   [15:14] Ignored
+ *   [13:11] 3-bit channel address
+ *   [10:0]  Ignored
+ */
+#define ADC108S102_CMD(ch)		((u16)(ch) << 11)
+
+/*
+ * 16-bit SPI response format:
+ *   [15:12] Zeros
+ *   [11:0]  12-bit ADC sample (for ADC108S102, [1:0] will always be 0).
+ */
+#define ADC108S102_RES_DATA(res)	((u16)res & GENMASK(11, 0))
+
+struct adc108s102_state {
+	struct spi_device		*spi;
+	struct regulator		*reg;
+	u32				va_millivolt;
+	/* SPI transfer used by triggered buffer handler*/
+	struct spi_transfer		ring_xfer;
+	/* SPI transfer used by direct scan */
+	struct spi_transfer		scan_single_xfer;
+	/* SPI message used by ring_xfer SPI transfer */
+	struct spi_message		ring_msg;
+	/* SPI message used by scan_single_xfer SPI transfer */
+	struct spi_message		scan_single_msg;
+
+	/*
+	 * SPI message buffers:
+	 *  tx_buf: |C0|C1|C2|C3|C4|C5|C6|C7|XX|
+	 *  rx_buf: |XX|R0|R1|R2|R3|R4|R5|R6|R7|tt|tt|tt|tt|
+	 *
+	 *  tx_buf: 8 channel read commands, plus 1 dummy command
+	 *  rx_buf: 1 dummy response, 8 channel responses, plus 64-bit timestamp
+	 */
+	__be16				rx_buf[13] ____cacheline_aligned;
+	__be16				tx_buf[9] ____cacheline_aligned;
+};
+
+#define ADC108S102_V_CHAN(index)					\
+	{								\
+		.type = IIO_VOLTAGE,					\
+		.indexed = 1,						\
+		.channel = index,					\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |		\
+			BIT(IIO_CHAN_INFO_SCALE),			\
+		.address = index,					\
+		.scan_index = index,					\
+		.scan_type = {						\
+			.sign = 'u',					\
+			.realbits = ADC108S102_BITS,			\
+			.storagebits = 16,				\
+			.endianness = IIO_BE,				\
+		},							\
+	}
+
+static const struct iio_chan_spec adc108s102_channels[] = {
+	ADC108S102_V_CHAN(0),
+	ADC108S102_V_CHAN(1),
+	ADC108S102_V_CHAN(2),
+	ADC108S102_V_CHAN(3),
+	ADC108S102_V_CHAN(4),
+	ADC108S102_V_CHAN(5),
+	ADC108S102_V_CHAN(6),
+	ADC108S102_V_CHAN(7),
+	IIO_CHAN_SOFT_TIMESTAMP(8),
+};
+
+static int adc108s102_update_scan_mode(struct iio_dev *indio_dev,
+		unsigned long const *active_scan_mask)
+{
+	struct adc108s102_state *st = iio_priv(indio_dev);
+	unsigned int bit, cmds;
+
+	/*
+	 * Fill in the first x shorts of tx_buf with the number of channels
+	 * enabled for sampling by the triggered buffer.
+	 */
+	cmds = 0;
+	for_each_set_bit(bit, active_scan_mask, ADC108S102_MAX_CHANNELS)
+		st->tx_buf[cmds++] = cpu_to_be16(ADC108S102_CMD(bit));
+
+	/* One dummy command added, to clock in the last response */
+	st->tx_buf[cmds++] = 0x00;
+
+	/* build SPI ring message */
+	st->ring_xfer.tx_buf = &st->tx_buf[0];
+	st->ring_xfer.rx_buf = &st->rx_buf[0];
+	st->ring_xfer.len = cmds * sizeof(st->tx_buf[0]);
+
+	spi_message_init_with_transfers(&st->ring_msg, &st->ring_xfer, 1);
+
+	return 0;
+}
+
+static irqreturn_t adc108s102_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct adc108s102_state *st = iio_priv(indio_dev);
+	int ret;
+
+	ret = spi_sync(st->spi, &st->ring_msg);
+	if (ret < 0)
+		goto out_notify;
+
+	/* Skip the dummy response in the first slot */
+	iio_push_to_buffers_with_timestamp(indio_dev,
+					   (u8 *)&st->rx_buf[1],
+					   iio_get_time_ns(indio_dev));
+
+out_notify:
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static int adc108s102_scan_direct(struct adc108s102_state *st, unsigned int ch)
+{
+	int ret;
+
+	st->tx_buf[0] = cpu_to_be16(ADC108S102_CMD(ch));
+	ret = spi_sync(st->spi, &st->scan_single_msg);
+	if (ret)
+		return ret;
+
+	/* Skip the dummy response in the first slot */
+	return be16_to_cpu(st->rx_buf[1]);
+}
+
+static int adc108s102_read_raw(struct iio_dev *indio_dev,
+			       struct iio_chan_spec const *chan,
+			       int *val, int *val2, long m)
+{
+	struct adc108s102_state *st = iio_priv(indio_dev);
+	int ret;
+
+	switch (m) {
+	case IIO_CHAN_INFO_RAW:
+		ret = iio_device_claim_direct_mode(indio_dev);
+		if (ret)
+			return ret;
+
+		ret = adc108s102_scan_direct(st, chan->address);
+
+		iio_device_release_direct_mode(indio_dev);
+
+		if (ret < 0)
+			return ret;
+
+		*val = ADC108S102_RES_DATA(ret);
+
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		if (chan->type != IIO_VOLTAGE)
+			break;
+
+		*val = st->va_millivolt;
+		*val2 = chan->scan_type.realbits;
+
+		return IIO_VAL_FRACTIONAL_LOG2;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info adc108s102_info = {
+	.read_raw		= &adc108s102_read_raw,
+	.update_scan_mode	= &adc108s102_update_scan_mode,
+	.driver_module		= THIS_MODULE,
+};
+
+static int adc108s102_probe(struct spi_device *spi)
+{
+	struct adc108s102_state *st;
+	struct iio_dev *indio_dev;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	st = iio_priv(indio_dev);
+
+	if (ACPI_COMPANION(&spi->dev)) {
+		st->va_millivolt = ADC108S102_VA_MV_ACPI_DEFAULT;
+	} else {
+		st->reg = devm_regulator_get(&spi->dev, "vref");
+		if (IS_ERR(st->reg))
+			return PTR_ERR(st->reg);
+
+		ret = regulator_enable(st->reg);
+		if (ret < 0) {
+			dev_err(&spi->dev, "Cannot enable vref regulator\n");
+			return ret;
+		}
+
+		ret = regulator_get_voltage(st->reg);
+		if (ret < 0) {
+			dev_err(&spi->dev, "vref get voltage failed\n");
+			return ret;
+		}
+
+		st->va_millivolt = ret / 1000;
+	}
+
+	spi_set_drvdata(spi, indio_dev);
+	st->spi = spi;
+
+	indio_dev->name = spi->modalias;
+	indio_dev->dev.parent = &spi->dev;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = adc108s102_channels;
+	indio_dev->num_channels = ARRAY_SIZE(adc108s102_channels);
+	indio_dev->info = &adc108s102_info;
+
+	/* Setup default message */
+	st->scan_single_xfer.tx_buf = st->tx_buf;
+	st->scan_single_xfer.rx_buf = st->rx_buf;
+	st->scan_single_xfer.len = 2 * sizeof(st->tx_buf[0]);
+
+	spi_message_init_with_transfers(&st->scan_single_msg,
+					&st->scan_single_xfer, 1);
+
+	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+					 &adc108s102_trigger_handler, NULL);
+	if (ret)
+		goto error_disable_reg;
+
+	ret = iio_device_register(indio_dev);
+	if (ret) {
+		dev_err(&spi->dev, "Failed to register IIO device\n");
+		goto error_cleanup_triggered_buffer;
+	}
+	return 0;
+
+error_cleanup_triggered_buffer:
+	iio_triggered_buffer_cleanup(indio_dev);
+
+error_disable_reg:
+	regulator_disable(st->reg);
+
+	return ret;
+}
+
+static int adc108s102_remove(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+	struct adc108s102_state *st = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	regulator_disable(st->reg);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id adc108s102_of_match[] = {
+	{ .compatible = "ti,adc108s102" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, adc108s102_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id adc108s102_acpi_ids[] = {
+	{ "INT3495", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, adc108s102_acpi_ids);
+#endif
+
+static const struct spi_device_id adc108s102_id[] = {
+	{ "adc108s102", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, adc108s102_id);
+
+static struct spi_driver adc108s102_driver = {
+	.driver = {
+		.name   = "adc108s102",
+		.of_match_table = of_match_ptr(adc108s102_of_match),
+		.acpi_match_table = ACPI_PTR(adc108s102_acpi_ids),
+	},
+	.probe		= adc108s102_probe,
+	.remove		= adc108s102_remove,
+	.id_table	= adc108s102_id,
+};
+module_spi_driver(adc108s102_driver);
+
+MODULE_AUTHOR("Bogdan Pricop <bogdan.pricop@emutex.com>");
+MODULE_DESCRIPTION("Texas Instruments ADC108S102 and ADC128S102 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index f76d979..884b8e46 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -23,7 +23,7 @@
 #include <linux/mutex.h>
 #include <linux/delay.h>
 
-#include <linux/i2c/ads1015.h>
+#include <linux/platform_data/ads1015.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/types.h>
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 4282cec..6cbed7e 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -614,7 +614,7 @@ static int tiadc_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
-	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev));
+	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
 	if (indio_dev == NULL) {
 		dev_err(&pdev->dev, "failed to allocate iio device\n");
 		return -ENOMEM;
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 0c74869..bd3d37f 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -36,7 +36,6 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/i2c/twl.h>
-#include <linux/i2c/twl4030-madc.h>
 #include <linux/module.h>
 #include <linux/stddef.h>
 #include <linux/mutex.h>
@@ -49,9 +48,121 @@
 
 #include <linux/iio/iio.h>
 
+#define TWL4030_MADC_MAX_CHANNELS 16
+
+#define TWL4030_MADC_CTRL1		0x00
+#define TWL4030_MADC_CTRL2		0x01
+
+#define TWL4030_MADC_RTSELECT_LSB	0x02
+#define TWL4030_MADC_SW1SELECT_LSB	0x06
+#define TWL4030_MADC_SW2SELECT_LSB	0x0A
+
+#define TWL4030_MADC_RTAVERAGE_LSB	0x04
+#define TWL4030_MADC_SW1AVERAGE_LSB	0x08
+#define TWL4030_MADC_SW2AVERAGE_LSB	0x0C
+
+#define TWL4030_MADC_CTRL_SW1		0x12
+#define TWL4030_MADC_CTRL_SW2		0x13
+
+#define TWL4030_MADC_RTCH0_LSB		0x17
+#define TWL4030_MADC_GPCH0_LSB		0x37
+
+#define TWL4030_MADC_MADCON	(1 << 0)	/* MADC power on */
+#define TWL4030_MADC_BUSY	(1 << 0)	/* MADC busy */
+/* MADC conversion completion */
+#define TWL4030_MADC_EOC_SW	(1 << 1)
+/* MADC SWx start conversion */
+#define TWL4030_MADC_SW_START	(1 << 5)
+#define TWL4030_MADC_ADCIN0	(1 << 0)
+#define TWL4030_MADC_ADCIN1	(1 << 1)
+#define TWL4030_MADC_ADCIN2	(1 << 2)
+#define TWL4030_MADC_ADCIN3	(1 << 3)
+#define TWL4030_MADC_ADCIN4	(1 << 4)
+#define TWL4030_MADC_ADCIN5	(1 << 5)
+#define TWL4030_MADC_ADCIN6	(1 << 6)
+#define TWL4030_MADC_ADCIN7	(1 << 7)
+#define TWL4030_MADC_ADCIN8	(1 << 8)
+#define TWL4030_MADC_ADCIN9	(1 << 9)
+#define TWL4030_MADC_ADCIN10	(1 << 10)
+#define TWL4030_MADC_ADCIN11	(1 << 11)
+#define TWL4030_MADC_ADCIN12	(1 << 12)
+#define TWL4030_MADC_ADCIN13	(1 << 13)
+#define TWL4030_MADC_ADCIN14	(1 << 14)
+#define TWL4030_MADC_ADCIN15	(1 << 15)
+
+/* Fixed channels */
+#define TWL4030_MADC_BTEMP	TWL4030_MADC_ADCIN1
+#define TWL4030_MADC_VBUS	TWL4030_MADC_ADCIN8
+#define TWL4030_MADC_VBKB	TWL4030_MADC_ADCIN9
+#define TWL4030_MADC_ICHG	TWL4030_MADC_ADCIN10
+#define TWL4030_MADC_VCHG	TWL4030_MADC_ADCIN11
+#define TWL4030_MADC_VBAT	TWL4030_MADC_ADCIN12
+
+/* Step size and prescaler ratio */
+#define TEMP_STEP_SIZE          147
+#define TEMP_PSR_R              100
+#define CURR_STEP_SIZE		147
+#define CURR_PSR_R1		44
+#define CURR_PSR_R2		88
+
+#define TWL4030_BCI_BCICTL1	0x23
+#define TWL4030_BCI_CGAIN	0x020
+#define TWL4030_BCI_MESBAT	(1 << 1)
+#define TWL4030_BCI_TYPEN	(1 << 4)
+#define TWL4030_BCI_ITHEN	(1 << 3)
+
+#define REG_BCICTL2             0x024
+#define TWL4030_BCI_ITHSENS	0x007
+
+/* Register and bits for GPBR1 register */
+#define TWL4030_REG_GPBR1		0x0c
+#define TWL4030_GPBR1_MADC_HFCLK_EN	(1 << 7)
+
 #define TWL4030_USB_SEL_MADC_MCPC	(1<<3)
 #define TWL4030_USB_CARKIT_ANA_CTRL	0xBB
 
+struct twl4030_madc_conversion_method {
+	u8 sel;
+	u8 avg;
+	u8 rbase;
+	u8 ctrl;
+};
+
+/**
+ * struct twl4030_madc_request - madc request packet for channel conversion
+ * @channels:	16 bit bitmap for individual channels
+ * @do_avg:	sample the input channel for 4 consecutive cycles
+ * @method:	RT, SW1, SW2
+ * @type:	Polling or interrupt based method
+ * @active:	Flag if request is active
+ * @result_pending: Flag from irq handler, that result is ready
+ * @raw:	Return raw value, do not convert it
+ * @rbuf:	Result buffer
+ */
+struct twl4030_madc_request {
+	unsigned long channels;
+	bool do_avg;
+	u16 method;
+	u16 type;
+	bool active;
+	bool result_pending;
+	bool raw;
+	int rbuf[TWL4030_MADC_MAX_CHANNELS];
+};
+
+enum conversion_methods {
+	TWL4030_MADC_RT,
+	TWL4030_MADC_SW1,
+	TWL4030_MADC_SW2,
+	TWL4030_MADC_NUM_METHODS
+};
+
+enum sample_type {
+	TWL4030_MADC_WAIT,
+	TWL4030_MADC_IRQ_ONESHOT,
+	TWL4030_MADC_IRQ_REARM
+};
+
 /**
  * struct twl4030_madc_data - a container for madc info
  * @dev:		Pointer to device structure for madc
@@ -72,6 +183,8 @@ struct twl4030_madc_data {
 	u8 isr;
 };
 
+static int twl4030_madc_conversion(struct twl4030_madc_request *req);
+
 static int twl4030_madc_read(struct iio_dev *iio_dev,
 			     const struct iio_chan_spec *chan,
 			     int *val, int *val2, long mask)
@@ -84,7 +197,6 @@ static int twl4030_madc_read(struct iio_dev *iio_dev,
 
 	req.channels = BIT(chan->channel);
 	req.active = false;
-	req.func_cb = NULL;
 	req.type = TWL4030_MADC_WAIT;
 	req.raw = !(mask == IIO_CHAN_INFO_PROCESSED);
 	req.do_avg = (mask == IIO_CHAN_INFO_AVERAGE_RAW);
@@ -341,37 +453,6 @@ static int twl4030_madc_read_channels(struct twl4030_madc_data *madc,
 }
 
 /*
- * Enables irq.
- * @madc - pointer to twl4030_madc_data struct
- * @id - irq number to be enabled
- * can take one of TWL4030_MADC_RT, TWL4030_MADC_SW1, TWL4030_MADC_SW2
- * corresponding to RT, SW1, SW2 conversion requests.
- * If the i2c read fails it returns an error else returns 0.
- */
-static int twl4030_madc_enable_irq(struct twl4030_madc_data *madc, u8 id)
-{
-	u8 val;
-	int ret;
-
-	ret = twl_i2c_read_u8(TWL4030_MODULE_MADC, &val, madc->imr);
-	if (ret) {
-		dev_err(madc->dev, "unable to read imr register 0x%X\n",
-			madc->imr);
-		return ret;
-	}
-
-	val &= ~(1 << id);
-	ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, val, madc->imr);
-	if (ret) {
-		dev_err(madc->dev,
-			"unable to write imr register 0x%X\n", madc->imr);
-		return ret;
-	}
-
-	return 0;
-}
-
-/*
  * Disables irq.
  * @madc - pointer to twl4030_madc_data struct
  * @id - irq number to be disabled
@@ -440,11 +521,6 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
 		/* Read results */
 		len = twl4030_madc_read_channels(madc, method->rbase,
 						 r->channels, r->rbuf, r->raw);
-		/* Return results to caller */
-		if (r->func_cb != NULL) {
-			r->func_cb(len, r->channels, r->rbuf);
-			r->func_cb = NULL;
-		}
 		/* Free request */
 		r->result_pending = 0;
 		r->active = 0;
@@ -466,11 +542,6 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
 		/* Read results */
 		len = twl4030_madc_read_channels(madc, method->rbase,
 						 r->channels, r->rbuf, r->raw);
-		/* Return results to caller */
-		if (r->func_cb != NULL) {
-			r->func_cb(len, r->channels, r->rbuf);
-			r->func_cb = NULL;
-		}
 		/* Free request */
 		r->result_pending = 0;
 		r->active = 0;
@@ -480,23 +551,6 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
 	return IRQ_HANDLED;
 }
 
-static int twl4030_madc_set_irq(struct twl4030_madc_data *madc,
-				struct twl4030_madc_request *req)
-{
-	struct twl4030_madc_request *p;
-	int ret;
-
-	p = &madc->requests[req->method];
-	memcpy(p, req, sizeof(*req));
-	ret = twl4030_madc_enable_irq(madc, req->method);
-	if (ret < 0) {
-		dev_err(madc->dev, "enable irq failed!!\n");
-		return ret;
-	}
-
-	return 0;
-}
-
 /*
  * Function which enables the madc conversion
  * by writing to the control register.
@@ -568,7 +622,7 @@ static int twl4030_madc_wait_conversion_ready(struct twl4030_madc_data *madc,
  * be a negative error value in the corresponding array element.
  * returns 0 if succeeds else error value
  */
-int twl4030_madc_conversion(struct twl4030_madc_request *req)
+static int twl4030_madc_conversion(struct twl4030_madc_request *req)
 {
 	const struct twl4030_madc_conversion_method *method;
 	int ret;
@@ -605,17 +659,6 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
 			goto out;
 		}
 	}
-	if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
-		ret = twl4030_madc_set_irq(twl4030_madc, req);
-		if (ret < 0)
-			goto out;
-		ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
-		if (ret < 0)
-			goto out;
-		twl4030_madc->requests[req->method].active = 1;
-		ret = 0;
-		goto out;
-	}
 	/* With RT method we should not be here anymore */
 	if (req->method == TWL4030_MADC_RT) {
 		ret = -EINVAL;
@@ -640,28 +683,6 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(twl4030_madc_conversion);
-
-int twl4030_get_madc_conversion(int channel_no)
-{
-	struct twl4030_madc_request req;
-	int temp = 0;
-	int ret;
-
-	req.channels = (1 << channel_no);
-	req.method = TWL4030_MADC_SW2;
-	req.active = 0;
-	req.raw = 0;
-	req.func_cb = NULL;
-	ret = twl4030_madc_conversion(&req);
-	if (ret < 0)
-		return ret;
-	if (req.rbuf[channel_no] > 0)
-		temp = req.rbuf[channel_no];
-
-	return temp;
-}
-EXPORT_SYMBOL_GPL(twl4030_get_madc_conversion);
 
 /**
  * twl4030_madc_set_current_generator() - setup bias current
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 56cf590..4a60497 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1204,7 +1204,10 @@ static int xadc_probe(struct platform_device *pdev)
 		ret = PTR_ERR(xadc->clk);
 		goto err_free_samplerate_trigger;
 	}
-	clk_prepare_enable(xadc->clk);
+
+	ret = clk_prepare_enable(xadc->clk);
+	if (ret)
+		goto err_free_samplerate_trigger;
 
 	ret = xadc->ops->setup(pdev, indio_dev, irq);
 	if (ret)
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index dd99d27..ff03324 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/poll.h>
 #include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
 #include <linux/iio/buffer-dma.h>
 #include <linux/dma-mapping.h>
 #include <linux/sizes.h>
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 9fabed4..2b5a320 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -14,6 +14,7 @@
 
 #include <linux/iio/iio.h>
 #include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
 #include <linux/iio/buffer-dma.h>
 #include <linux/iio/buffer-dmaengine.h>
 
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index 39188b7..8010537 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -16,7 +16,7 @@
 
 config HID_SENSOR_IIO_TRIGGER
 	tristate "Common module (trigger) for all HID Sensor IIO drivers"
-	depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON
+	depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
 	select IIO_TRIGGER
 	help
 	  Say yes here to build trigger support for HID sensors.
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 1c0874c..f5d4d78 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -69,6 +69,12 @@ static struct {
 	{HID_USAGE_SENSOR_TIME_TIMESTAMP, HID_USAGE_SENSOR_UNITS_MILLISECOND,
 		1000000, 0},
 
+	{HID_USAGE_SENSOR_DEVICE_ORIENTATION, 0, 1, 0},
+
+	{HID_USAGE_SENSOR_RELATIVE_ORIENTATION, 0, 1, 0},
+
+	{HID_USAGE_SENSOR_GEOMAGNETIC_ORIENTATION, 0, 1, 0},
+
 	{HID_USAGE_SENSOR_TEMPERATURE, 0, 1000, 0},
 	{HID_USAGE_SENSOR_TEMPERATURE, HID_USAGE_SENSOR_UNITS_DEGREES, 1000, 0},
 
@@ -230,7 +236,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
 	ret = sensor_hub_set_feature(st->hsdev, st->poll.report_id,
 				     st->poll.index, sizeof(value), &value);
 	if (ret < 0 || value < 0)
-		ret = -EINVAL;
+		return -EINVAL;
 
 	ret = sensor_hub_get_feature(st->hsdev,
 				     st->poll.report_id,
@@ -283,7 +289,7 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
 				     st->sensitivity.index, sizeof(value),
 				     &value);
 	if (ret < 0 || value < 0)
-		ret = -EINVAL;
+		return -EINVAL;
 
 	ret = sensor_hub_get_feature(st->hsdev,
 				     st->sensitivity.report_id,
@@ -404,6 +410,48 @@ int hid_sensor_get_reporting_interval(struct hid_sensor_hub_device *hsdev,
 
 }
 
+static void hid_sensor_get_report_latency_info(struct hid_sensor_hub_device *hsdev,
+					       u32 usage_id,
+					       struct hid_sensor_common *st)
+{
+	sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT,
+					    usage_id,
+					    HID_USAGE_SENSOR_PROP_REPORT_LATENCY,
+					    &st->report_latency);
+
+	hid_dbg(hsdev->hdev, "Report latency attributes: %x:%x\n",
+		st->report_latency.index, st->report_latency.report_id);
+}
+
+int hid_sensor_get_report_latency(struct hid_sensor_common *st)
+{
+	int ret;
+	int value;
+
+	ret = sensor_hub_get_feature(st->hsdev, st->report_latency.report_id,
+				     st->report_latency.index, sizeof(value),
+				     &value);
+	if (ret < 0)
+		return ret;
+
+	return value;
+}
+EXPORT_SYMBOL(hid_sensor_get_report_latency);
+
+int hid_sensor_set_report_latency(struct hid_sensor_common *st, int latency_ms)
+{
+	return sensor_hub_set_feature(st->hsdev, st->report_latency.report_id,
+				      st->report_latency.index,
+				      sizeof(latency_ms), &latency_ms);
+}
+EXPORT_SYMBOL(hid_sensor_set_report_latency);
+
+bool hid_sensor_batch_mode_supported(struct hid_sensor_common *st)
+{
+	return st->report_latency.index > 0 && st->report_latency.report_id > 0;
+}
+EXPORT_SYMBOL(hid_sensor_batch_mode_supported);
+
 int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
 					u32 usage_id,
 					struct hid_sensor_common *st)
@@ -445,6 +493,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
 	} else
 		st->timestamp_ns_scale = 1000000000;
 
+	hid_sensor_get_report_latency_info(hsdev, usage_id, st);
+
 	hid_dbg(hsdev->hdev, "common attributes: %x:%x, %x:%x, %x:%x %x:%x %x:%x\n",
 		st->poll.index, st->poll.report_id,
 		st->report_state.index, st->report_state.report_id,
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 0b5dea0..16ade0a 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -26,9 +26,84 @@
 #include <linux/hid-sensor-hub.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
 #include <linux/iio/sysfs.h>
 #include "hid-sensor-trigger.h"
 
+static ssize_t _hid_sensor_set_report_latency(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf, size_t len)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
+	int integer, fract, ret;
+	int latency;
+
+	ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
+	if (ret)
+		return ret;
+
+	latency = integer * 1000 + fract / 1000;
+	ret = hid_sensor_set_report_latency(attrb, latency);
+	if (ret < 0)
+		return len;
+
+	attrb->latency_ms = hid_sensor_get_report_latency(attrb);
+
+	return len;
+}
+
+static ssize_t _hid_sensor_get_report_latency(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
+	int latency;
+
+	latency = hid_sensor_get_report_latency(attrb);
+	if (latency < 0)
+		return latency;
+
+	return sprintf(buf, "%d.%06u\n", latency / 1000, (latency % 1000) * 1000);
+}
+
+static ssize_t _hid_sensor_get_fifo_state(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct hid_sensor_common *attrb = iio_device_get_drvdata(indio_dev);
+	int latency;
+
+	latency = hid_sensor_get_report_latency(attrb);
+	if (latency < 0)
+		return latency;
+
+	return sprintf(buf, "%d\n", !!latency);
+}
+
+static IIO_DEVICE_ATTR(hwfifo_timeout, 0644,
+		       _hid_sensor_get_report_latency,
+		       _hid_sensor_set_report_latency, 0);
+static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
+		       _hid_sensor_get_fifo_state, NULL, 0);
+
+static const struct attribute *hid_sensor_fifo_attributes[] = {
+	&iio_dev_attr_hwfifo_timeout.dev_attr.attr,
+	&iio_dev_attr_hwfifo_enabled.dev_attr.attr,
+	NULL,
+};
+
+static void hid_sensor_setup_batch_mode(struct iio_dev *indio_dev,
+					struct hid_sensor_common *st)
+{
+	if (!hid_sensor_batch_mode_supported(st))
+		return;
+
+	iio_buffer_set_attrs(indio_dev->buffer, hid_sensor_fifo_attributes);
+}
+
 static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 {
 	int state_val;
@@ -141,6 +216,9 @@ static void hid_sensor_set_power_work(struct work_struct *work)
 				       sizeof(attrb->raw_hystersis),
 				       &attrb->raw_hystersis);
 
+	if (attrb->latency_ms > 0)
+		hid_sensor_set_report_latency(attrb, attrb->latency_ms);
+
 	_hid_sensor_power_state(attrb, true);
 }
 
@@ -192,6 +270,8 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
 	attrb->trigger = trig;
 	indio_dev->trig = iio_trigger_get(trig);
 
+	hid_sensor_setup_batch_mode(indio_dev, attrb);
+
 	ret = pm_runtime_set_active(&indio_dev->dev);
 	if (ret)
 		goto error_unreg_trigger;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index df5abc4..25bed2d 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -13,7 +13,8 @@
 	  AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R, AD5627, AD5627R,
 	  AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R, AD5666,
 	  AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616,
-	  LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to Analog Converter.
+	  LTC2617, LTC2619, LTC2626, LTC2627, LTC2629, LTC2631, LTC2633, LTC2635
+	  Digital to Analog Converter.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ad5064.
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 6803e4a..3f9399c 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -2,8 +2,8 @@
  * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R,
  * AD5627, AD5627R, AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R,
  * AD5666, AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616,
- * LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to analog converters
- * driver
+ * LTC2617, LTC2619, LTC2626, LTC2627, LTC2629, LTC2631, LTC2633, LTC2635
+ * Digital to analog converters driver
  *
  * Copyright 2011 Analog Devices Inc.
  *
@@ -168,6 +168,24 @@ enum ad5064_type {
 	ID_LTC2626,
 	ID_LTC2627,
 	ID_LTC2629,
+	ID_LTC2631_L12,
+	ID_LTC2631_H12,
+	ID_LTC2631_L10,
+	ID_LTC2631_H10,
+	ID_LTC2631_L8,
+	ID_LTC2631_H8,
+	ID_LTC2633_L12,
+	ID_LTC2633_H12,
+	ID_LTC2633_L10,
+	ID_LTC2633_H10,
+	ID_LTC2633_L8,
+	ID_LTC2633_H8,
+	ID_LTC2635_L12,
+	ID_LTC2635_H12,
+	ID_LTC2635_L10,
+	ID_LTC2635_H10,
+	ID_LTC2635_L8,
+	ID_LTC2635_H8,
 };
 
 static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
@@ -425,6 +443,19 @@ static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0, ad5064_ext_info);
 static DECLARE_AD5064_CHANNELS(ltc2607_channels, 16, 0, ltc2617_ext_info);
 static DECLARE_AD5064_CHANNELS(ltc2617_channels, 14, 2, ltc2617_ext_info);
 static DECLARE_AD5064_CHANNELS(ltc2627_channels, 12, 4, ltc2617_ext_info);
+#define ltc2631_12_channels ltc2627_channels
+static DECLARE_AD5064_CHANNELS(ltc2631_10_channels, 10, 6, ltc2617_ext_info);
+static DECLARE_AD5064_CHANNELS(ltc2631_8_channels, 8, 8, ltc2617_ext_info);
+
+#define LTC2631_INFO(vref, pchannels, nchannels)	\
+	{						\
+		.shared_vref = true,			\
+		.internal_vref = vref,			\
+		.channels = pchannels,			\
+		.num_channels = nchannels,		\
+		.regmap_type = AD5064_REGMAP_LTC,	\
+	}
+
 
 static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
 	[ID_AD5024] = {
@@ -724,6 +755,24 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
 		.num_channels = 4,
 		.regmap_type = AD5064_REGMAP_LTC,
 	},
+	[ID_LTC2631_L12] = LTC2631_INFO(2500000, ltc2631_12_channels, 1),
+	[ID_LTC2631_H12] = LTC2631_INFO(4096000, ltc2631_12_channels, 1),
+	[ID_LTC2631_L10] = LTC2631_INFO(2500000, ltc2631_10_channels, 1),
+	[ID_LTC2631_H10] = LTC2631_INFO(4096000, ltc2631_10_channels, 1),
+	[ID_LTC2631_L8] = LTC2631_INFO(2500000, ltc2631_8_channels, 1),
+	[ID_LTC2631_H8] = LTC2631_INFO(4096000, ltc2631_8_channels, 1),
+	[ID_LTC2633_L12] = LTC2631_INFO(2500000, ltc2631_12_channels, 2),
+	[ID_LTC2633_H12] = LTC2631_INFO(4096000, ltc2631_12_channels, 2),
+	[ID_LTC2633_L10] = LTC2631_INFO(2500000, ltc2631_10_channels, 2),
+	[ID_LTC2633_H10] = LTC2631_INFO(4096000, ltc2631_10_channels, 2),
+	[ID_LTC2633_L8] = LTC2631_INFO(2500000, ltc2631_8_channels, 2),
+	[ID_LTC2633_H8] = LTC2631_INFO(4096000, ltc2631_8_channels, 2),
+	[ID_LTC2635_L12] = LTC2631_INFO(2500000, ltc2631_12_channels, 4),
+	[ID_LTC2635_H12] = LTC2631_INFO(4096000, ltc2631_12_channels, 4),
+	[ID_LTC2635_L10] = LTC2631_INFO(2500000, ltc2631_10_channels, 4),
+	[ID_LTC2635_H10] = LTC2631_INFO(4096000, ltc2631_10_channels, 4),
+	[ID_LTC2635_L8] = LTC2631_INFO(2500000, ltc2631_8_channels, 4),
+	[ID_LTC2635_H8] = LTC2631_INFO(4096000, ltc2631_8_channels, 4),
 };
 
 static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
@@ -982,6 +1031,24 @@ static const struct i2c_device_id ad5064_i2c_ids[] = {
 	{"ltc2626", ID_LTC2626},
 	{"ltc2627", ID_LTC2627},
 	{"ltc2629", ID_LTC2629},
+	{"ltc2631-l12", ID_LTC2631_L12},
+	{"ltc2631-h12", ID_LTC2631_H12},
+	{"ltc2631-l10", ID_LTC2631_L10},
+	{"ltc2631-h10", ID_LTC2631_H10},
+	{"ltc2631-l8", ID_LTC2631_L8},
+	{"ltc2631-h8", ID_LTC2631_H8},
+	{"ltc2633-l12", ID_LTC2633_L12},
+	{"ltc2633-h12", ID_LTC2633_H12},
+	{"ltc2633-l10", ID_LTC2633_L10},
+	{"ltc2633-h10", ID_LTC2633_H10},
+	{"ltc2633-l8", ID_LTC2633_L8},
+	{"ltc2633-h8", ID_LTC2633_H8},
+	{"ltc2635-l12", ID_LTC2635_L12},
+	{"ltc2635-h12", ID_LTC2635_H12},
+	{"ltc2635-l10", ID_LTC2635_L10},
+	{"ltc2635-h10", ID_LTC2635_H10},
+	{"ltc2635-l8", ID_LTC2635_L8},
+	{"ltc2635-h8", ID_LTC2635_H8},
 	{}
 };
 MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
index c715466..9451026 100644
--- a/drivers/iio/humidity/hts221.h
+++ b/drivers/iio/humidity/hts221.h
@@ -57,12 +57,15 @@ struct hts221_hw {
 
 	struct hts221_sensor sensors[HTS221_SENSOR_MAX];
 
+	bool enabled;
 	u8 odr;
 
 	const struct hts221_transfer_function *tf;
 	struct hts221_transfer_buffer tb;
 };
 
+extern const struct dev_pm_ops hts221_pm_ops;
+
 int hts221_config_drdy(struct hts221_hw *hw, bool enable);
 int hts221_probe(struct iio_dev *iio_dev);
 int hts221_power_on(struct hts221_hw *hw);
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index 3f3ef4a1..a56da39 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -13,6 +13,7 @@
 #include <linux/device.h>
 #include <linux/iio/sysfs.h>
 #include <linux/delay.h>
+#include <linux/pm.h>
 #include <asm/unaligned.h>
 
 #include "hts221.h"
@@ -307,15 +308,30 @@ hts221_sysfs_temp_oversampling_avail(struct device *dev,
 
 int hts221_power_on(struct hts221_hw *hw)
 {
-	return hts221_update_odr(hw, hw->odr);
+	int err;
+
+	err = hts221_update_odr(hw, hw->odr);
+	if (err < 0)
+		return err;
+
+	hw->enabled = true;
+
+	return 0;
 }
 
 int hts221_power_off(struct hts221_hw *hw)
 {
-	u8 data[] = {0x00, 0x00};
+	__le16 data = 0;
+	int err;
 
-	return hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
-			     data);
+	err = hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
+			    (u8 *)&data);
+	if (err < 0)
+		return err;
+
+	hw->enabled = false;
+
+	return 0;
 }
 
 static int hts221_parse_temp_caldata(struct hts221_hw *hw)
@@ -682,6 +698,36 @@ int hts221_probe(struct iio_dev *iio_dev)
 }
 EXPORT_SYMBOL(hts221_probe);
 
+static int __maybe_unused hts221_suspend(struct device *dev)
+{
+	struct iio_dev *iio_dev = dev_get_drvdata(dev);
+	struct hts221_hw *hw = iio_priv(iio_dev);
+	__le16 data = 0;
+	int err;
+
+	err = hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
+			    (u8 *)&data);
+
+	return err < 0 ? err : 0;
+}
+
+static int __maybe_unused hts221_resume(struct device *dev)
+{
+	struct iio_dev *iio_dev = dev_get_drvdata(dev);
+	struct hts221_hw *hw = iio_priv(iio_dev);
+	int err = 0;
+
+	if (hw->enabled)
+		err = hts221_update_odr(hw, hw->odr);
+
+	return err;
+}
+
+const struct dev_pm_ops hts221_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(hts221_suspend, hts221_resume)
+};
+EXPORT_SYMBOL(hts221_pm_ops);
+
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
 MODULE_DESCRIPTION("STMicroelectronics hts221 sensor driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index 8333c02..f38e4b7 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -105,6 +105,7 @@ MODULE_DEVICE_TABLE(i2c, hts221_i2c_id_table);
 static struct i2c_driver hts221_driver = {
 	.driver = {
 		.name = "hts221_i2c",
+		.pm = &hts221_pm_ops,
 		.of_match_table = of_match_ptr(hts221_i2c_of_match),
 		.acpi_match_table = ACPI_PTR(hts221_acpi_match),
 	},
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
index 70df5e7..57cbc25 100644
--- a/drivers/iio/humidity/hts221_spi.c
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -113,6 +113,7 @@ MODULE_DEVICE_TABLE(spi, hts221_spi_id_table);
 static struct spi_driver hts221_driver = {
 	.driver = {
 		.name = "hts221_spi",
+		.pm = &hts221_pm_ops,
 		.of_match_table = of_match_ptr(hts221_spi_of_match),
 	},
 	.probe = hts221_spi_probe,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 96dabbd..44830bc 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785};
 static const struct inv_mpu6050_reg_map reg_set_6500 = {
 	.sample_rate_div	= INV_MPU6050_REG_SAMPLE_RATE_DIV,
 	.lpf                    = INV_MPU6050_REG_CONFIG,
+	.accel_lpf              = INV_MPU6500_REG_ACCEL_CONFIG_2,
 	.user_ctrl              = INV_MPU6050_REG_USER_CTRL,
 	.fifo_en                = INV_MPU6050_REG_FIFO_EN,
 	.gyro_config            = INV_MPU6050_REG_GYRO_CONFIG,
@@ -187,7 +188,6 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
 	int result = 0;
 
 	if (power_on) {
-		/* Already under indio-dev->mlock mutex */
 		if (!st->powerup_count)
 			result = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
 		if (!result)
@@ -211,6 +211,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
 EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
 
 /**
+ *  inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
+ *
+ *  MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
+ *  MPU6500 and above have a dedicated register for accelerometer
+ */
+static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
+				    enum inv_mpu6050_filter_e val)
+{
+	int result;
+
+	result = regmap_write(st->map, st->reg->lpf, val);
+	if (result)
+		return result;
+
+	switch (st->chip_type) {
+	case INV_MPU6050:
+	case INV_MPU6000:
+	case INV_MPU9150:
+		/* old chips, nothing to do */
+		result = 0;
+		break;
+	default:
+		/* set accel lpf */
+		result = regmap_write(st->map, st->reg->accel_lpf, val);
+		break;
+	}
+
+	return result;
+}
+
+/**
  *  inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
  *
  *  Initial configuration:
@@ -233,8 +264,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
 	if (result)
 		return result;
 
-	d = INV_MPU6050_FILTER_20HZ;
-	result = regmap_write(st->map, st->reg->lpf, d);
+	result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
 	if (result)
 		return result;
 
@@ -298,50 +328,37 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
 		int result;
 
 		ret = IIO_VAL_INT;
-		result = 0;
-		mutex_lock(&indio_dev->mlock);
-		if (!st->chip_config.enable) {
-			result = inv_mpu6050_set_power_itg(st, true);
-			if (result)
-				goto error_read_raw;
-		}
-		/* when enable is on, power is already on */
+		mutex_lock(&st->lock);
+		result = iio_device_claim_direct_mode(indio_dev);
+		if (result)
+			goto error_read_raw_unlock;
+		result = inv_mpu6050_set_power_itg(st, true);
+		if (result)
+			goto error_read_raw_release;
 		switch (chan->type) {
 		case IIO_ANGL_VEL:
-			if (!st->chip_config.gyro_fifo_enable ||
-			    !st->chip_config.enable) {
-				result = inv_mpu6050_switch_engine(st, true,
-						INV_MPU6050_BIT_PWR_GYRO_STBY);
-				if (result)
-					goto error_read_raw;
-			}
+			result = inv_mpu6050_switch_engine(st, true,
+					INV_MPU6050_BIT_PWR_GYRO_STBY);
+			if (result)
+				goto error_read_raw_power_off;
 			ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
 						      chan->channel2, val);
-			if (!st->chip_config.gyro_fifo_enable ||
-			    !st->chip_config.enable) {
-				result = inv_mpu6050_switch_engine(st, false,
-						INV_MPU6050_BIT_PWR_GYRO_STBY);
-				if (result)
-					goto error_read_raw;
-			}
+			result = inv_mpu6050_switch_engine(st, false,
+					INV_MPU6050_BIT_PWR_GYRO_STBY);
+			if (result)
+				goto error_read_raw_power_off;
 			break;
 		case IIO_ACCEL:
-			if (!st->chip_config.accl_fifo_enable ||
-			    !st->chip_config.enable) {
-				result = inv_mpu6050_switch_engine(st, true,
-						INV_MPU6050_BIT_PWR_ACCL_STBY);
-				if (result)
-					goto error_read_raw;
-			}
+			result = inv_mpu6050_switch_engine(st, true,
+					INV_MPU6050_BIT_PWR_ACCL_STBY);
+			if (result)
+				goto error_read_raw_power_off;
 			ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
 						      chan->channel2, val);
-			if (!st->chip_config.accl_fifo_enable ||
-			    !st->chip_config.enable) {
-				result = inv_mpu6050_switch_engine(st, false,
-						INV_MPU6050_BIT_PWR_ACCL_STBY);
-				if (result)
-					goto error_read_raw;
-			}
+			result = inv_mpu6050_switch_engine(st, false,
+					INV_MPU6050_BIT_PWR_ACCL_STBY);
+			if (result)
+				goto error_read_raw_power_off;
 			break;
 		case IIO_TEMP:
 			/* wait for stablization */
@@ -353,10 +370,12 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
 			ret = -EINVAL;
 			break;
 		}
-error_read_raw:
-		if (!st->chip_config.enable)
-			result |= inv_mpu6050_set_power_itg(st, false);
-		mutex_unlock(&indio_dev->mlock);
+error_read_raw_power_off:
+		result |= inv_mpu6050_set_power_itg(st, false);
+error_read_raw_release:
+		iio_device_release_direct_mode(indio_dev);
+error_read_raw_unlock:
+		mutex_unlock(&st->lock);
 		if (result)
 			return result;
 
@@ -365,13 +384,17 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
 	case IIO_CHAN_INFO_SCALE:
 		switch (chan->type) {
 		case IIO_ANGL_VEL:
+			mutex_lock(&st->lock);
 			*val  = 0;
 			*val2 = gyro_scale_6050[st->chip_config.fsr];
+			mutex_unlock(&st->lock);
 
 			return IIO_VAL_INT_PLUS_NANO;
 		case IIO_ACCEL:
+			mutex_lock(&st->lock);
 			*val = 0;
 			*val2 = accel_scale[st->chip_config.accl_fs];
+			mutex_unlock(&st->lock);
 
 			return IIO_VAL_INT_PLUS_MICRO;
 		case IIO_TEMP:
@@ -394,12 +417,16 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
 	case IIO_CHAN_INFO_CALIBBIAS:
 		switch (chan->type) {
 		case IIO_ANGL_VEL:
+			mutex_lock(&st->lock);
 			ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
 						chan->channel2, val);
+			mutex_unlock(&st->lock);
 			return IIO_VAL_INT;
 		case IIO_ACCEL:
+			mutex_lock(&st->lock);
 			ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
 						chan->channel2, val);
+			mutex_unlock(&st->lock);
 			return IIO_VAL_INT;
 
 		default:
@@ -475,18 +502,17 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
 	struct inv_mpu6050_state  *st = iio_priv(indio_dev);
 	int result;
 
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&st->lock);
 	/*
 	 * we should only update scale when the chip is disabled, i.e.
 	 * not running
 	 */
-	if (st->chip_config.enable) {
-		result = -EBUSY;
-		goto error_write_raw;
-	}
+	result = iio_device_claim_direct_mode(indio_dev);
+	if (result)
+		goto error_write_raw_unlock;
 	result = inv_mpu6050_set_power_itg(st, true);
 	if (result)
-		goto error_write_raw;
+		goto error_write_raw_release;
 
 	switch (mask) {
 	case IIO_CHAN_INFO_SCALE:
@@ -522,9 +548,11 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
 		break;
 	}
 
-error_write_raw:
 	result |= inv_mpu6050_set_power_itg(st, false);
-	mutex_unlock(&indio_dev->mlock);
+error_write_raw_release:
+	iio_device_release_direct_mode(indio_dev);
+error_write_raw_unlock:
+	mutex_unlock(&st->lock);
 
 	return result;
 }
@@ -537,6 +565,8 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
  *                  would be alising. This function basically search for the
  *                  correct low pass parameters based on the fifo rate, e.g,
  *                  sampling frequency.
+ *
+ *  lpf is set automatically when setting sampling rate to avoid any aliases.
  */
 static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
 {
@@ -552,7 +582,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
 	while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
 		i++;
 	data = d[i];
-	result = regmap_write(st->map, st->reg->lpf, data);
+	result = inv_mpu6050_set_lpf_regs(st, data);
 	if (result)
 		return result;
 	st->chip_config.lpf = data;
@@ -578,31 +608,35 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
 	if (fifo_rate < INV_MPU6050_MIN_FIFO_RATE ||
 	    fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
 		return -EINVAL;
-	if (fifo_rate == st->chip_config.fifo_rate)
-		return count;
 
-	mutex_lock(&indio_dev->mlock);
-	if (st->chip_config.enable) {
-		result = -EBUSY;
-		goto fifo_rate_fail;
+	mutex_lock(&st->lock);
+	if (fifo_rate == st->chip_config.fifo_rate) {
+		result = 0;
+		goto fifo_rate_fail_unlock;
 	}
+	result = iio_device_claim_direct_mode(indio_dev);
+	if (result)
+		goto fifo_rate_fail_unlock;
 	result = inv_mpu6050_set_power_itg(st, true);
 	if (result)
-		goto fifo_rate_fail;
+		goto fifo_rate_fail_release;
 
 	d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
 	result = regmap_write(st->map, st->reg->sample_rate_div, d);
 	if (result)
-		goto fifo_rate_fail;
+		goto fifo_rate_fail_power_off;
 	st->chip_config.fifo_rate = fifo_rate;
 
 	result = inv_mpu6050_set_lpf(st, fifo_rate);
 	if (result)
-		goto fifo_rate_fail;
+		goto fifo_rate_fail_power_off;
 
-fifo_rate_fail:
+fifo_rate_fail_power_off:
 	result |= inv_mpu6050_set_power_itg(st, false);
-	mutex_unlock(&indio_dev->mlock);
+fifo_rate_fail_release:
+	iio_device_release_direct_mode(indio_dev);
+fifo_rate_fail_unlock:
+	mutex_unlock(&st->lock);
 	if (result)
 		return result;
 
@@ -617,8 +651,13 @@ inv_fifo_rate_show(struct device *dev, struct device_attribute *attr,
 		   char *buf)
 {
 	struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
+	unsigned fifo_rate;
 
-	return sprintf(buf, "%d\n", st->chip_config.fifo_rate);
+	mutex_lock(&st->lock);
+	fifo_rate = st->chip_config.fifo_rate;
+	mutex_unlock(&st->lock);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", fifo_rate);
 }
 
 /**
@@ -645,7 +684,8 @@ static ssize_t inv_attr_show(struct device *dev, struct device_attribute *attr,
 	case ATTR_ACCL_MATRIX:
 		m = st->plat_data.orientation;
 
-		return sprintf(buf, "%d, %d, %d; %d, %d, %d; %d, %d, %d\n",
+		return scnprintf(buf, PAGE_SIZE,
+			"%d, %d, %d; %d, %d, %d; %d, %d, %d\n",
 			m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
 	default:
 		return -EINVAL;
@@ -770,10 +810,35 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
 {
 	int result;
 	unsigned int regval;
+	int i;
 
 	st->hw  = &hw_info[st->chip_type];
 	st->reg = hw_info[st->chip_type].reg;
 
+	/* check chip self-identification */
+	result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, &regval);
+	if (result)
+		return result;
+	if (regval != st->hw->whoami) {
+		/* check whoami against all possible values */
+		for (i = 0; i < INV_NUM_PARTS; ++i) {
+			if (regval == hw_info[i].whoami) {
+				dev_warn(regmap_get_device(st->map),
+					"whoami mismatch got %#02x (%s)"
+					"expected %#02hhx (%s)\n",
+					regval, hw_info[i].name,
+					st->hw->whoami, st->hw->name);
+				break;
+			}
+		}
+		if (i >= INV_NUM_PARTS) {
+			dev_err(regmap_get_device(st->map),
+				"invalid whoami %#02x expected %#02hhx (%s)\n",
+				regval, st->hw->whoami, st->hw->name);
+			return -ENODEV;
+		}
+	}
+
 	/* reset to make sure previous state are not there */
 	result = regmap_write(st->map, st->reg->pwr_mgmt_1,
 			      INV_MPU6050_BIT_H_RESET);
@@ -781,16 +846,6 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
 		return result;
 	msleep(INV_MPU6050_POWER_UP_TIME);
 
-	/* check chip self-identification */
-	result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, &regval);
-	if (result)
-		return result;
-	if (regval != st->hw->whoami) {
-		dev_warn(regmap_get_device(st->map),
-				"whoami mismatch got %#02x expected %#02hhx for %s\n",
-				regval, st->hw->whoami, st->hw->name);
-	}
-
 	/*
 	 * toggle power state. After reset, the sleep bit could be on
 	 * or off depending on the OTP settings. Toggling power would
@@ -836,6 +891,7 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
 		return -ENODEV;
 	}
 	st = iio_priv(indio_dev);
+	mutex_init(&st->lock);
 	st->chip_type = chip_type;
 	st->powerup_count = 0;
 	st->irq = irq;
@@ -929,12 +985,26 @@ EXPORT_SYMBOL_GPL(inv_mpu_core_remove);
 
 static int inv_mpu_resume(struct device *dev)
 {
-	return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), true);
+	struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
+	int result;
+
+	mutex_lock(&st->lock);
+	result = inv_mpu6050_set_power_itg(st, true);
+	mutex_unlock(&st->lock);
+
+	return result;
 }
 
 static int inv_mpu_suspend(struct device *dev)
 {
-	return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), false);
+	struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
+	int result;
+
+	mutex_lock(&st->lock);
+	result = inv_mpu6050_set_power_itg(st, false);
+	mutex_unlock(&st->lock);
+
+	return result;
 }
 #endif /* CONFIG_PM_SLEEP */
 
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 64b5f5b..fcd7a92 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -32,7 +32,7 @@ static int inv_mpu6050_select_bypass(struct i2c_mux_core *muxc, u32 chan_id)
 	int ret = 0;
 
 	/* Use the same mutex which was used everywhere to protect power-op */
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&st->lock);
 	if (!st->powerup_count) {
 		ret = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
 		if (ret)
@@ -48,7 +48,7 @@ static int inv_mpu6050_select_bypass(struct i2c_mux_core *muxc, u32 chan_id)
 				   INV_MPU6050_BIT_BYPASS_EN);
 	}
 write_error:
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(&st->lock);
 
 	return ret;
 }
@@ -58,14 +58,14 @@ static int inv_mpu6050_deselect_bypass(struct i2c_mux_core *muxc, u32 chan_id)
 	struct iio_dev *indio_dev = i2c_mux_priv(muxc);
 	struct inv_mpu6050_state *st = iio_priv(indio_dev);
 
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&st->lock);
 	/* It doesn't really mattter, if any of the calls fails */
 	regmap_write(st->map, st->reg->int_pin_cfg, INV_MPU6050_INT_PIN_CFG);
 	st->powerup_count--;
 	if (!st->powerup_count)
 		regmap_write(st->map, st->reg->pwr_mgmt_1,
 			     INV_MPU6050_BIT_SLEEP);
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(&st->lock);
 
 	return 0;
 }
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index ef13de7..0657941 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -14,6 +14,7 @@
 #include <linux/i2c-mux.h>
 #include <linux/kfifo.h>
 #include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/buffer.h>
 #include <linux/regmap.h>
@@ -28,6 +29,7 @@
  *  struct inv_mpu6050_reg_map - Notable registers.
  *  @sample_rate_div:	Divider applied to gyro output rate.
  *  @lpf:		Configures internal low pass filter.
+ *  @accel_lpf:		Configures accelerometer low pass filter.
  *  @user_ctrl:		Enables/resets the FIFO.
  *  @fifo_en:		Determines which data will appear in FIFO.
  *  @gyro_config:	gyro config register.
@@ -47,6 +49,7 @@
 struct inv_mpu6050_reg_map {
 	u8 sample_rate_div;
 	u8 lpf;
+	u8 accel_lpf;
 	u8 user_ctrl;
 	u8 fifo_en;
 	u8 gyro_config;
@@ -80,7 +83,6 @@ enum inv_devices {
  *  @fsr:		Full scale range.
  *  @lpf:		Digital low pass filter frequency.
  *  @accl_fs:		accel full scale range.
- *  @enable:		master enable state.
  *  @accl_fifo_enable:	enable accel data output
  *  @gyro_fifo_enable:	enable gyro data output
  *  @fifo_rate:		FIFO update rate.
@@ -89,7 +91,6 @@ struct inv_mpu6050_chip_config {
 	unsigned int fsr:2;
 	unsigned int lpf:3;
 	unsigned int accl_fs:2;
-	unsigned int enable:1;
 	unsigned int accl_fifo_enable:1;
 	unsigned int gyro_fifo_enable:1;
 	u16 fifo_rate;
@@ -112,6 +113,7 @@ struct inv_mpu6050_hw {
 /*
  *  struct inv_mpu6050_state - Driver state variables.
  *  @TIMESTAMP_FIFO_SIZE: fifo size for timestamp.
+ *  @lock:              Chip access lock.
  *  @trig:              IIO trigger.
  *  @chip_config:	Cached attribute information.
  *  @reg:		Map of important registers.
@@ -126,6 +128,7 @@ struct inv_mpu6050_hw {
  */
 struct inv_mpu6050_state {
 #define TIMESTAMP_FIFO_SIZE 16
+	struct mutex lock;
 	struct iio_trigger  *trig;
 	struct inv_mpu6050_chip_config chip_config;
 	const struct inv_mpu6050_reg_map *reg;
@@ -188,6 +191,7 @@ struct inv_mpu6050_state {
 #define INV_MPU6050_FIFO_THRESHOLD           500
 
 /* mpu6500 registers */
+#define INV_MPU6500_REG_ACCEL_CONFIG_2      0x1D
 #define INV_MPU6500_REG_ACCEL_OFFSET        0x77
 
 /* delay time in milliseconds */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 3a9f3ea..ff81c6a 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -128,7 +128,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
 	u16 fifo_count;
 	s64 timestamp;
 
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&st->lock);
 	if (!(st->chip_config.accl_fifo_enable |
 		st->chip_config.gyro_fifo_enable))
 		goto end_session;
@@ -178,7 +178,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
 	}
 
 end_session:
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(&st->lock);
 	iio_trigger_notify_done(indio_dev->trig);
 
 	return IRQ_HANDLED;
@@ -186,7 +186,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
 flush_fifo:
 	/* Flush HW and SW FIFOs. */
 	inv_reset_fifo(indio_dev);
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(&st->lock);
 	iio_trigger_notify_done(indio_dev->trig);
 
 	return IRQ_HANDLED;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index e8818d4..540070f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -90,7 +90,6 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
 		if (result)
 			return result;
 	}
-	st->chip_config.enable = enable;
 
 	return 0;
 }
@@ -103,7 +102,15 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
 static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
 					      bool state)
 {
-	return inv_mpu6050_set_enable(iio_trigger_get_drvdata(trig), state);
+	struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+	struct inv_mpu6050_state *st = iio_priv(indio_dev);
+	int result;
+
+	mutex_lock(&st->lock);
+	result = inv_mpu6050_set_enable(indio_dev, state);
+	mutex_unlock(&st->lock);
+
+	return result;
 }
 
 static const struct iio_trigger_ops inv_mpu_trigger_ops = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 4839db7..46352c7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -135,6 +135,8 @@ struct st_lsm6dsx_hw {
 #endif /* CONFIG_SPI_MASTER */
 };
 
+extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
+
 int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
 		     const struct st_lsm6dsx_transfer_function *tf_ops);
 int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor);
@@ -144,5 +146,8 @@ int st_lsm6dsx_write_with_mask(struct st_lsm6dsx_hw *hw, u8 addr, u8 mask,
 			       u8 val);
 int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor,
 				u16 watermark);
+int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw);
+int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
+			     enum st_lsm6dsx_fifo_mode fifo_mode);
 
 #endif /* ST_LSM6DSX_H */
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index c8e5cfd..2a72acc 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -37,6 +37,8 @@
 #define ST_LSM6DSX_REG_FIFO_THH_ADDR		0x07
 #define ST_LSM6DSX_FIFO_TH_MASK			GENMASK(11, 0)
 #define ST_LSM6DSX_REG_FIFO_DEC_GXL_ADDR	0x08
+#define ST_LSM6DSX_REG_HLACTIVE_ADDR		0x12
+#define ST_LSM6DSX_REG_HLACTIVE_MASK		BIT(5)
 #define ST_LSM6DSX_REG_FIFO_MODE_ADDR		0x0a
 #define ST_LSM6DSX_FIFO_MODE_MASK		GENMASK(2, 0)
 #define ST_LSM6DSX_FIFO_ODR_MASK		GENMASK(6, 3)
@@ -130,8 +132,8 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
 	return 0;
 }
 
-static int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
-				    enum st_lsm6dsx_fifo_mode fifo_mode)
+int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
+			     enum st_lsm6dsx_fifo_mode fifo_mode)
 {
 	u8 data;
 	int err;
@@ -303,7 +305,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
 	return read_len;
 }
 
-static int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw)
+int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw)
 {
 	int err;
 
@@ -417,6 +419,7 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
 {
 	struct iio_buffer *buffer;
 	unsigned long irq_type;
+	bool irq_active_low;
 	int i, err;
 
 	irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
@@ -424,12 +427,23 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
 	switch (irq_type) {
 	case IRQF_TRIGGER_HIGH:
 	case IRQF_TRIGGER_RISING:
+		irq_active_low = false;
+		break;
+	case IRQF_TRIGGER_LOW:
+	case IRQF_TRIGGER_FALLING:
+		irq_active_low = true;
 		break;
 	default:
 		dev_info(hw->dev, "mode %lx unsupported\n", irq_type);
 		return -EINVAL;
 	}
 
+	err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_HLACTIVE_ADDR,
+					 ST_LSM6DSX_REG_HLACTIVE_MASK,
+					 irq_active_low);
+	if (err < 0)
+		return err;
+
 	err = devm_request_threaded_irq(hw->dev, hw->irq,
 					st_lsm6dsx_handler_irq,
 					st_lsm6dsx_handler_thread,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 462a27b..b485540 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -36,6 +36,7 @@
 #include <linux/delay.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
+#include <linux/pm.h>
 
 #include <linux/platform_data/st_sensors_pdata.h>
 
@@ -731,6 +732,57 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
 }
 EXPORT_SYMBOL(st_lsm6dsx_probe);
 
+static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
+{
+	struct st_lsm6dsx_hw *hw = dev_get_drvdata(dev);
+	struct st_lsm6dsx_sensor *sensor;
+	int i, err = 0;
+
+	for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
+		sensor = iio_priv(hw->iio_devs[i]);
+		if (!(hw->enable_mask & BIT(sensor->id)))
+			continue;
+
+		err = st_lsm6dsx_write_with_mask(hw,
+				st_lsm6dsx_odr_table[sensor->id].reg.addr,
+				st_lsm6dsx_odr_table[sensor->id].reg.mask, 0);
+		if (err < 0)
+			return err;
+	}
+
+	if (hw->fifo_mode != ST_LSM6DSX_FIFO_BYPASS)
+		err = st_lsm6dsx_flush_fifo(hw);
+
+	return err;
+}
+
+static int __maybe_unused st_lsm6dsx_resume(struct device *dev)
+{
+	struct st_lsm6dsx_hw *hw = dev_get_drvdata(dev);
+	struct st_lsm6dsx_sensor *sensor;
+	int i, err = 0;
+
+	for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
+		sensor = iio_priv(hw->iio_devs[i]);
+		if (!(hw->enable_mask & BIT(sensor->id)))
+			continue;
+
+		err = st_lsm6dsx_set_odr(sensor, sensor->odr);
+		if (err < 0)
+			return err;
+	}
+
+	if (hw->enable_mask)
+		err = st_lsm6dsx_set_fifo_mode(hw, ST_LSM6DSX_FIFO_CONT);
+
+	return err;
+}
+
+const struct dev_pm_ops st_lsm6dsx_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(st_lsm6dsx_suspend, st_lsm6dsx_resume)
+};
+EXPORT_SYMBOL(st_lsm6dsx_pm_ops);
+
 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
 MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
 MODULE_DESCRIPTION("STMicroelectronics st_lsm6dsx driver");
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 09a51cf..305fec7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -98,6 +98,7 @@ MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
 static struct i2c_driver st_lsm6dsx_driver = {
 	.driver = {
 		.name = "st_lsm6dsx_i2c",
+		.pm = &st_lsm6dsx_pm_ops,
 		.of_match_table = of_match_ptr(st_lsm6dsx_i2c_of_match),
 	},
 	.probe = st_lsm6dsx_i2c_probe,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index f765a50..95472f1 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -115,6 +115,7 @@ MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
 static struct spi_driver st_lsm6dsx_driver = {
 	.driver = {
 		.name = "st_lsm6dsx_spi",
+		.pm = &st_lsm6dsx_pm_ops,
 		.of_match_table = of_match_ptr(st_lsm6dsx_spi_of_match),
 	},
 	.probe = st_lsm6dsx_spi_probe,
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 57c14da..17ec4ce 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -478,21 +478,16 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
 	size_t len)
 {
 	const struct iio_enum *e = (const struct iio_enum *)priv;
-	unsigned int i;
 	int ret;
 
 	if (!e->set)
 		return -EINVAL;
 
-	for (i = 0; i < e->num_items; i++) {
-		if (sysfs_streq(buf, e->items[i]))
-			break;
-	}
+	ret = __sysfs_match_string(e->items, e->num_items, buf);
+	if (ret < 0)
+		return ret;
 
-	if (i == e->num_items)
-		return -EINVAL;
-
-	ret = e->set(indio_dev, chan, i);
+	ret = e->set(indio_dev, chan, ret);
 	return ret ? ret : len;
 }
 EXPORT_SYMBOL_GPL(iio_enum_write);
@@ -1089,7 +1084,7 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
 {
 	int i, ret, attrcount = 0;
 
-	for_each_set_bit(i, infomask, sizeof(infomask)*8) {
+	for_each_set_bit(i, infomask, sizeof(*infomask)*8) {
 		if (i >= ARRAY_SIZE(iio_chan_info_postfix))
 			return -EINVAL;
 		ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
@@ -1118,7 +1113,7 @@ static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
 	int i, ret, attrcount = 0;
 	char *avail_postfix;
 
-	for_each_set_bit(i, infomask, sizeof(infomask) * 8) {
+	for_each_set_bit(i, infomask, sizeof(*infomask) * 8) {
 		avail_postfix = kasprintf(GFP_KERNEL,
 					  "%s_available",
 					  iio_chan_info_postfix[i]);
@@ -1428,7 +1423,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
 static void iio_dev_release(struct device *device)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(device);
-	if (indio_dev->modes & (INDIO_BUFFER_TRIGGERED | INDIO_EVENT_TRIGGERED))
+	if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
 		iio_device_unregister_trigger_consumer(indio_dev);
 	iio_device_unregister_eventset(indio_dev);
 	iio_device_unregister_sysfs(indio_dev);
@@ -1710,7 +1705,7 @@ int iio_device_register(struct iio_dev *indio_dev)
 			"Failed to register event set\n");
 		goto error_free_sysfs;
 	}
-	if (indio_dev->modes & (INDIO_BUFFER_TRIGGERED | INDIO_EVENT_TRIGGERED))
+	if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
 		iio_device_register_trigger_consumer(indio_dev);
 
 	if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 978e1592..4061fed 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -451,7 +451,8 @@ static ssize_t iio_trigger_write_current(struct device *dev,
 	return len;
 
 out_trigger_put:
-	iio_trigger_put(trig);
+	if (trig)
+		iio_trigger_put(trig);
 	return ret;
 }
 
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 7a13535..da3d06b 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -750,11 +750,9 @@ int iio_read_avail_channel_raw(struct iio_channel *chan,
 err_unlock:
 	mutex_unlock(&chan->indio_dev->info_exist_lock);
 
-	if (ret >= 0 && type != IIO_VAL_INT) {
+	if (ret >= 0 && type != IIO_VAL_INT)
 		/* raw values are assumed to be IIO_VAL_INT */
 		ret = -EINVAL;
-		goto err_unlock;
-	}
 
 	return ret;
 }
@@ -869,3 +867,63 @@ int iio_write_channel_raw(struct iio_channel *chan, int val)
 	return ret;
 }
 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
+
+unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
+{
+	const struct iio_chan_spec_ext_info *ext_info;
+	unsigned int i = 0;
+
+	if (!chan->channel->ext_info)
+		return i;
+
+	for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
+		++i;
+
+	return i;
+}
+EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
+
+static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
+						const struct iio_channel *chan,
+						const char *attr)
+{
+	const struct iio_chan_spec_ext_info *ext_info;
+
+	if (!chan->channel->ext_info)
+		return NULL;
+
+	for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
+		if (!strcmp(attr, ext_info->name))
+			return ext_info;
+	}
+
+	return NULL;
+}
+
+ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
+				  const char *attr, char *buf)
+{
+	const struct iio_chan_spec_ext_info *ext_info;
+
+	ext_info = iio_lookup_ext_info(chan, attr);
+	if (!ext_info)
+		return -EINVAL;
+
+	return ext_info->read(chan->indio_dev, ext_info->private,
+			      chan->channel, buf);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
+
+ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
+				   const char *buf, size_t len)
+{
+	const struct iio_chan_spec_ext_info *ext_info;
+
+	ext_info = iio_lookup_ext_info(chan, attr);
+	if (!ext_info)
+		return -EINVAL;
+
+	return ext_info->write(chan->indio_dev, ext_info->private,
+			       chan->channel, buf, len);
+}
+EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 33e755d..2356ed9 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -172,6 +172,16 @@
 	 in lux, proximity infrared sensing and normal infrared sensing.
 	 Data from sensor is accessible via sysfs.
 
+config SENSORS_ISL29028
+	tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor"
+	depends on I2C
+	select REGMAP_I2C
+	help
+	 Provides driver for the Intersil's ISL29028 device.
+	 This driver supports the sysfs interface to get the ALS, IR intensity,
+	 Proximity value via iio. The ISL29028 provides the concurrent sensing
+	 of ambient light and proximity.
+
 config ISL29125
 	tristate "Intersil ISL29125 digital color light sensor"
 	depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 681363c..fa32fa4 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_HID_SENSOR_ALS)	+= hid-sensor-als.o
 obj-$(CONFIG_HID_SENSOR_PROX)	+= hid-sensor-prox.o
 obj-$(CONFIG_SENSORS_ISL29018)	+= isl29018.o
+obj-$(CONFIG_SENSORS_ISL29028)	+= isl29028.o
 obj-$(CONFIG_ISL29125)		+= isl29125.o
 obj-$(CONFIG_JSA1212)		+= jsa1212.o
 obj-$(CONFIG_SENSORS_LM3533)	+= lm3533-als.o
diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index 917dd8b..61f5924 100644
--- a/drivers/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -807,6 +807,7 @@ static SIMPLE_DEV_PM_OPS(isl29018_pm_ops, isl29018_suspend, isl29018_resume);
 #define ISL29018_PM_OPS NULL
 #endif
 
+#ifdef CONFIG_ACPI
 static const struct acpi_device_id isl29018_acpi_match[] = {
 	{"ISL29018", isl29018},
 	{"ISL29023", isl29023},
@@ -814,6 +815,7 @@ static const struct acpi_device_id isl29018_acpi_match[] = {
 	{},
 };
 MODULE_DEVICE_TABLE(acpi, isl29018_acpi_match);
+#endif
 
 static const struct i2c_device_id isl29018_id[] = {
 	{"isl29018", isl29018},
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
similarity index 89%
rename from drivers/staging/iio/light/isl29028.c
rename to drivers/iio/light/isl29028.c
index 5375e7a..3d09c1f 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -16,6 +16,10 @@
  *
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Datasheets:
+ *  - http://www.intersil.com/content/dam/Intersil/documents/isl2/isl29028.pdf
+ *  - http://www.intersil.com/content/dam/Intersil/documents/isl2/isl29030.pdf
  */
 
 #include <linux/module.h>
@@ -64,8 +68,25 @@
 
 #define ISL29028_POWER_OFF_DELAY_MS		2000
 
-static const unsigned int isl29028_prox_sleep_time[] = {800, 400, 200, 100, 75,
-							50, 12, 0};
+struct isl29028_prox_data {
+	int sampling_int;
+	int sampling_fract;
+	int sleep_time;
+};
+
+static const struct isl29028_prox_data isl29028_prox_data[] = {
+	{   1, 250000, 800 },
+	{   2, 500000, 400 },
+	{   5,      0, 200 },
+	{  10,      0, 100 },
+	{  13, 300000,  75 },
+	{  20,      0,  50 },
+	{  80,      0,  13 }, /*
+			       * Note: Data sheet lists 12.5 ms sleep time.
+			       * Round up a half millisecond for msleep().
+			       */
+	{ 100,  0,   0 }
+};
 
 enum isl29028_als_ir_mode {
 	ISL29028_MODE_NONE = 0,
@@ -76,32 +97,37 @@ enum isl29028_als_ir_mode {
 struct isl29028_chip {
 	struct mutex			lock;
 	struct regmap			*regmap;
-	unsigned int			prox_sampling;
+	int				prox_sampling_int;
+	int				prox_sampling_frac;
 	bool				enable_prox;
 	int				lux_scale;
 	enum isl29028_als_ir_mode	als_ir_mode;
 };
 
-static int isl29028_find_prox_sleep_time_index(int sampling)
+static int isl29028_find_prox_sleep_index(int sampling_int, int sampling_fract)
 {
-	unsigned int period = DIV_ROUND_UP(1000, sampling);
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(isl29028_prox_sleep_time); ++i) {
-		if (period >= isl29028_prox_sleep_time[i])
-			break;
+	for (i = 0; i < ARRAY_SIZE(isl29028_prox_data); ++i) {
+		if (isl29028_prox_data[i].sampling_int == sampling_int &&
+		    isl29028_prox_data[i].sampling_fract == sampling_fract)
+			return i;
 	}
 
-	return i;
+	return -EINVAL;
 }
 
 static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
-					unsigned int sampling)
+					int sampling_int, int sampling_fract)
 {
 	struct device *dev = regmap_get_device(chip->regmap);
 	int sleep_index, ret;
 
-	sleep_index = isl29028_find_prox_sleep_time_index(sampling);
+	sleep_index = isl29028_find_prox_sleep_index(sampling_int,
+						     sampling_fract);
+	if (sleep_index < 0)
+		return sleep_index;
+
 	ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
 				 ISL29028_CONF_PROX_SLP_MASK,
 				 sleep_index << ISL29028_CONF_PROX_SLP_SH);
@@ -112,16 +138,18 @@ static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
 		return ret;
 	}
 
-	chip->prox_sampling = sampling;
+	chip->prox_sampling_int = sampling_int;
+	chip->prox_sampling_frac = sampling_fract;
 
 	return ret;
 }
 
 static int isl29028_enable_proximity(struct isl29028_chip *chip)
 {
-	int sleep_index, ret;
+	int prox_index, ret;
 
-	ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling);
+	ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling_int,
+					   chip->prox_sampling_frac);
 	if (ret < 0)
 		return ret;
 
@@ -132,8 +160,12 @@ static int isl29028_enable_proximity(struct isl29028_chip *chip)
 		return ret;
 
 	/* Wait for conversion to be complete for first sample */
-	sleep_index = isl29028_find_prox_sleep_time_index(chip->prox_sampling);
-	msleep(isl29028_prox_sleep_time[sleep_index]);
+	prox_index = isl29028_find_prox_sleep_index(chip->prox_sampling_int,
+						    chip->prox_sampling_frac);
+	if (prox_index < 0)
+		return prox_index;
+
+	msleep(isl29028_prox_data[prox_index].sleep_time);
 
 	return 0;
 }
@@ -361,7 +393,7 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
 			break;
 		}
 
-		ret = isl29028_set_proxim_sampling(chip, val);
+		ret = isl29028_set_proxim_sampling(chip, val, val2);
 		break;
 	case IIO_LIGHT:
 		if (mask != IIO_CHAN_INFO_SCALE) {
@@ -439,7 +471,8 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
 		if (chan->type != IIO_PROXIMITY)
 			break;
 
-		*val = chip->prox_sampling;
+		*val = chip->prox_sampling_int;
+		*val2 = chip->prox_sampling_frac;
 		ret = IIO_VAL_INT;
 		break;
 	case IIO_CHAN_INFO_SCALE:
@@ -472,7 +505,7 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
 }
 
 static IIO_CONST_ATTR(in_proximity_sampling_frequency_available,
-				"1 3 5 10 13 20 83 100");
+				"1.25 2.5 5 10 13.3 20 80 100");
 static IIO_CONST_ATTR(in_illuminance_scale_available, "125 2000");
 
 #define ISL29028_CONST_ATTR(name) (&iio_const_attr_##name.dev_attr.attr)
@@ -571,7 +604,8 @@ static int isl29028_probe(struct i2c_client *client,
 	}
 
 	chip->enable_prox  = false;
-	chip->prox_sampling = 20;
+	chip->prox_sampling_int = 20;
+	chip->prox_sampling_frac = 0;
 	chip->lux_scale = 2000;
 
 	ret = regmap_write(chip->regmap, ISL29028_REG_TEST1_MODE, 0x0);
@@ -664,6 +698,7 @@ static const struct dev_pm_ops isl29028_pm_ops = {
 
 static const struct i2c_device_id isl29028_id[] = {
 	{"isl29028", 0},
+	{"isl29030", 0},
 	{}
 };
 MODULE_DEVICE_TABLE(i2c, isl29028_id);
@@ -671,6 +706,7 @@ MODULE_DEVICE_TABLE(i2c, isl29028_id);
 static const struct of_device_id isl29028_of_match[] = {
 	{ .compatible = "isl,isl29028", }, /* for backward compat., don't use */
 	{ .compatible = "isil,isl29028", },
+	{ .compatible = "isil,isl29030", },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, isl29028_of_match);
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index b30e0c1..67838ed 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000};
 static const struct reg_field reg_field_it =
 				REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
 static const struct reg_field reg_field_als_intr =
-				REG_FIELD(LTR501_INTR, 0, 0);
-static const struct reg_field reg_field_ps_intr =
 				REG_FIELD(LTR501_INTR, 1, 1);
+static const struct reg_field reg_field_ps_intr =
+				REG_FIELD(LTR501_INTR, 0, 0);
 static const struct reg_field reg_field_als_rate =
 				REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
 static const struct reg_field reg_field_ps_rate =
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 7de0f39..9d0c2e8 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -9,7 +9,7 @@
  *
  * IIO driver for RPR-0521RS (7-bit I2C slave address 0x38).
  *
- * TODO: illuminance channel, PM support, buffer
+ * TODO: illuminance channel, buffer
  */
 
 #include <linux/module.h>
@@ -30,6 +30,7 @@
 #define RPR0521_REG_PXS_DATA		0x44 /* 16-bit, little endian */
 #define RPR0521_REG_ALS_DATA0		0x46 /* 16-bit, little endian */
 #define RPR0521_REG_ALS_DATA1		0x48 /* 16-bit, little endian */
+#define RPR0521_REG_PS_OFFSET_LSB	0x53
 #define RPR0521_REG_ID			0x92
 
 #define RPR0521_MODE_ALS_MASK		BIT(7)
@@ -77,9 +78,9 @@ static const struct rpr0521_gain rpr0521_pxs_gain[3] = {
 };
 
 enum rpr0521_channel {
+	RPR0521_CHAN_PXS,
 	RPR0521_CHAN_ALS_DATA0,
 	RPR0521_CHAN_ALS_DATA1,
-	RPR0521_CHAN_PXS,
 };
 
 struct rpr0521_reg_desc {
@@ -88,6 +89,10 @@ struct rpr0521_reg_desc {
 };
 
 static const struct rpr0521_reg_desc rpr0521_data_reg[] = {
+	[RPR0521_CHAN_PXS]	= {
+		.address	= RPR0521_REG_PXS_DATA,
+		.device_mask	= RPR0521_MODE_PXS_MASK,
+	},
 	[RPR0521_CHAN_ALS_DATA0] = {
 		.address	= RPR0521_REG_ALS_DATA0,
 		.device_mask	= RPR0521_MODE_ALS_MASK,
@@ -96,10 +101,6 @@ static const struct rpr0521_reg_desc rpr0521_data_reg[] = {
 		.address	= RPR0521_REG_ALS_DATA1,
 		.device_mask	= RPR0521_MODE_ALS_MASK,
 	},
-	[RPR0521_CHAN_PXS]	= {
-		.address	= RPR0521_REG_PXS_DATA,
-		.device_mask	= RPR0521_MODE_PXS_MASK,
-	},
 };
 
 static const struct rpr0521_gain_info {
@@ -109,6 +110,13 @@ static const struct rpr0521_gain_info {
 	const struct rpr0521_gain *gain;
 	int size;
 } rpr0521_gain[] = {
+	[RPR0521_CHAN_PXS] = {
+		.reg	= RPR0521_REG_PXS_CTRL,
+		.mask	= RPR0521_PXS_GAIN_MASK,
+		.shift	= RPR0521_PXS_GAIN_SHIFT,
+		.gain	= rpr0521_pxs_gain,
+		.size	= ARRAY_SIZE(rpr0521_pxs_gain),
+	},
 	[RPR0521_CHAN_ALS_DATA0] = {
 		.reg	= RPR0521_REG_ALS_CTRL,
 		.mask	= RPR0521_ALS_DATA0_GAIN_MASK,
@@ -123,13 +131,30 @@ static const struct rpr0521_gain_info {
 		.gain	= rpr0521_als_gain,
 		.size	= ARRAY_SIZE(rpr0521_als_gain),
 	},
-	[RPR0521_CHAN_PXS] = {
-		.reg	= RPR0521_REG_PXS_CTRL,
-		.mask	= RPR0521_PXS_GAIN_MASK,
-		.shift	= RPR0521_PXS_GAIN_SHIFT,
-		.gain	= rpr0521_pxs_gain,
-		.size	= ARRAY_SIZE(rpr0521_pxs_gain),
-	},
+};
+
+struct rpr0521_samp_freq {
+	int	als_hz;
+	int	als_uhz;
+	int	pxs_hz;
+	int	pxs_uhz;
+};
+
+static const struct rpr0521_samp_freq rpr0521_samp_freq_i[13] = {
+/*	{ALS, PXS},		   W==currently writable option */
+	{0, 0, 0, 0},		/* W0000, 0=standby */
+	{0, 0, 100, 0},		/*  0001 */
+	{0, 0, 25, 0},		/*  0010 */
+	{0, 0, 10, 0},		/*  0011 */
+	{0, 0, 2, 500000},	/*  0100 */
+	{10, 0, 20, 0},		/*  0101 */
+	{10, 0, 10, 0},		/* W0110 */
+	{10, 0, 2, 500000},	/*  0111 */
+	{2, 500000, 20, 0},	/*  1000, measurement 100ms, sleep 300ms */
+	{2, 500000, 10, 0},	/*  1001, measurement 100ms, sleep 300ms */
+	{2, 500000, 0, 0},	/*  1010, high sensitivity mode */
+	{2, 500000, 2, 500000},	/* W1011, high sensitivity mode */
+	{20, 0, 20, 0}	/* 1100, ALS_data x 0.5, see specification P.18 */
 };
 
 struct rpr0521_data {
@@ -142,9 +167,11 @@ struct rpr0521_data {
 	bool als_dev_en;
 	bool pxs_dev_en;
 
-	/* optimize runtime pm ops - enable device only if needed */
+	/* optimize runtime pm ops - enable/disable device only if needed */
 	bool als_ps_need_en;
 	bool pxs_ps_need_en;
+	bool als_need_dis;
+	bool pxs_need_dis;
 
 	struct regmap *regmap;
 };
@@ -152,9 +179,16 @@ struct rpr0521_data {
 static IIO_CONST_ATTR(in_intensity_scale_available, RPR0521_ALS_SCALE_AVAIL);
 static IIO_CONST_ATTR(in_proximity_scale_available, RPR0521_PXS_SCALE_AVAIL);
 
+/*
+ * Start with easy freq first, whole table of freq combinations is more
+ * complicated.
+ */
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("2.5 10");
+
 static struct attribute *rpr0521_attributes[] = {
 	&iio_const_attr_in_intensity_scale_available.dev_attr.attr,
 	&iio_const_attr_in_proximity_scale_available.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
 	NULL,
 };
 
@@ -164,12 +198,21 @@ static const struct attribute_group rpr0521_attribute_group = {
 
 static const struct iio_chan_spec rpr0521_channels[] = {
 	{
+		.type = IIO_PROXIMITY,
+		.address = RPR0521_CHAN_PXS,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+			BIT(IIO_CHAN_INFO_OFFSET) |
+			BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+	},
+	{
 		.type = IIO_INTENSITY,
 		.modified = 1,
 		.address = RPR0521_CHAN_ALS_DATA0,
 		.channel2 = IIO_MOD_LIGHT_BOTH,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 			BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 	},
 	{
 		.type = IIO_INTENSITY,
@@ -178,13 +221,8 @@ static const struct iio_chan_spec rpr0521_channels[] = {
 		.channel2 = IIO_MOD_LIGHT_IR,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 			BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 	},
-	{
-		.type = IIO_PROXIMITY,
-		.address = RPR0521_CHAN_PXS,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
-			BIT(IIO_CHAN_INFO_SCALE),
-	}
 };
 
 static int rpr0521_als_enable(struct rpr0521_data *data, u8 status)
@@ -197,7 +235,10 @@ static int rpr0521_als_enable(struct rpr0521_data *data, u8 status)
 	if (ret < 0)
 		return ret;
 
-	data->als_dev_en = true;
+	if (status & RPR0521_MODE_ALS_MASK)
+		data->als_dev_en = true;
+	else
+		data->als_dev_en = false;
 
 	return 0;
 }
@@ -212,7 +253,10 @@ static int rpr0521_pxs_enable(struct rpr0521_data *data, u8 status)
 	if (ret < 0)
 		return ret;
 
-	data->pxs_dev_en = true;
+	if (status & RPR0521_MODE_PXS_MASK)
+		data->pxs_dev_en = true;
+	else
+		data->pxs_dev_en = false;
 
 	return 0;
 }
@@ -224,40 +268,32 @@ static int rpr0521_pxs_enable(struct rpr0521_data *data, u8 status)
  * @on: state to be set for devices in @device_mask
  * @device_mask: bitmask specifying for which device we need to update @on state
  *
- * We rely on rpr0521_runtime_resume to enable our @device_mask devices, but
- * if (for example) PXS was enabled (pxs_dev_en = true) by a previous call to
- * rpr0521_runtime_resume and we want to enable ALS we MUST set ALS enable
- * bit of RPR0521_REG_MODE_CTRL here because rpr0521_runtime_resume will not
- * be called twice.
+ * Calls for this function must be balanced so that each ON should have matching
+ * OFF. Otherwise pm usage_count gets out of sync.
  */
 static int rpr0521_set_power_state(struct rpr0521_data *data, bool on,
 				   u8 device_mask)
 {
 #ifdef CONFIG_PM
 	int ret;
-	u8 update_mask = 0;
 
 	if (device_mask & RPR0521_MODE_ALS_MASK) {
-		if (on && !data->als_ps_need_en && data->pxs_dev_en)
-			update_mask |= RPR0521_MODE_ALS_MASK;
-		else
-			data->als_ps_need_en = on;
+		data->als_ps_need_en = on;
+		data->als_need_dis = !on;
 	}
 
 	if (device_mask & RPR0521_MODE_PXS_MASK) {
-		if (on && !data->pxs_ps_need_en && data->als_dev_en)
-			update_mask |= RPR0521_MODE_PXS_MASK;
-		else
-			data->pxs_ps_need_en = on;
+		data->pxs_ps_need_en = on;
+		data->pxs_need_dis = !on;
 	}
 
-	if (update_mask) {
-		ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
-					 update_mask, update_mask);
-		if (ret < 0)
-			return ret;
-	}
-
+	/*
+	 * On: _resume() is called only when we are suspended
+	 * Off: _suspend() is called after delay if _resume() is not
+	 * called before that.
+	 * Note: If either measurement is re-enabled before _suspend(),
+	 * both stay enabled until _suspend().
+	 */
 	if (on) {
 		ret = pm_runtime_get_sync(&data->client->dev);
 	} else {
@@ -273,6 +309,23 @@ static int rpr0521_set_power_state(struct rpr0521_data *data, bool on,
 
 		return ret;
 	}
+
+	if (on) {
+		/* If _resume() was not called, enable measurement now. */
+		if (data->als_ps_need_en) {
+			ret = rpr0521_als_enable(data, RPR0521_MODE_ALS_ENABLE);
+			if (ret)
+				return ret;
+			data->als_ps_need_en = false;
+		}
+
+		if (data->pxs_ps_need_en) {
+			ret = rpr0521_pxs_enable(data, RPR0521_MODE_PXS_ENABLE);
+			if (ret)
+				return ret;
+			data->pxs_ps_need_en = false;
+		}
+	}
 #endif
 	return 0;
 }
@@ -314,6 +367,106 @@ static int rpr0521_set_gain(struct rpr0521_data *data, int chan,
 				  idx << rpr0521_gain[chan].shift);
 }
 
+static int rpr0521_read_samp_freq(struct rpr0521_data *data,
+				enum iio_chan_type chan_type,
+			    int *val, int *val2)
+{
+	int reg, ret;
+
+	ret = regmap_read(data->regmap, RPR0521_REG_MODE_CTRL, &reg);
+	if (ret < 0)
+		return ret;
+
+	reg &= RPR0521_MODE_MEAS_TIME_MASK;
+	if (reg >= ARRAY_SIZE(rpr0521_samp_freq_i))
+		return -EINVAL;
+
+	switch (chan_type) {
+	case IIO_INTENSITY:
+		*val = rpr0521_samp_freq_i[reg].als_hz;
+		*val2 = rpr0521_samp_freq_i[reg].als_uhz;
+		return 0;
+
+	case IIO_PROXIMITY:
+		*val = rpr0521_samp_freq_i[reg].pxs_hz;
+		*val2 = rpr0521_samp_freq_i[reg].pxs_uhz;
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int rpr0521_write_samp_freq_common(struct rpr0521_data *data,
+				enum iio_chan_type chan_type,
+				int val, int val2)
+{
+	int i;
+
+	/*
+	 * Ignore channel
+	 * both pxs and als are setup only to same freq because of simplicity
+	 */
+	switch (val) {
+	case 0:
+		i = 0;
+		break;
+
+	case 2:
+		if (val2 != 500000)
+			return -EINVAL;
+
+		i = 11;
+		break;
+
+	case 10:
+		i = 6;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return regmap_update_bits(data->regmap,
+		RPR0521_REG_MODE_CTRL,
+		RPR0521_MODE_MEAS_TIME_MASK,
+		i);
+}
+
+static int rpr0521_read_ps_offset(struct rpr0521_data *data, int *offset)
+{
+	int ret;
+	__le16 buffer;
+
+	ret = regmap_bulk_read(data->regmap,
+		RPR0521_REG_PS_OFFSET_LSB, &buffer, sizeof(buffer));
+
+	if (ret < 0) {
+		dev_err(&data->client->dev, "Failed to read PS OFFSET register\n");
+		return ret;
+	}
+	*offset = le16_to_cpu(buffer);
+
+	return ret;
+}
+
+static int rpr0521_write_ps_offset(struct rpr0521_data *data, int offset)
+{
+	int ret;
+	__le16 buffer;
+
+	buffer = cpu_to_le16(offset & 0x3ff);
+	ret = regmap_raw_write(data->regmap,
+		RPR0521_REG_PS_OFFSET_LSB, &buffer, sizeof(buffer));
+
+	if (ret < 0) {
+		dev_err(&data->client->dev, "Failed to write PS OFFSET register\n");
+		return ret;
+	}
+
+	return ret;
+}
+
 static int rpr0521_read_raw(struct iio_dev *indio_dev,
 			    struct iio_chan_spec const *chan, int *val,
 			    int *val2, long mask)
@@ -339,7 +492,7 @@ static int rpr0521_read_raw(struct iio_dev *indio_dev,
 
 		ret = regmap_bulk_read(data->regmap,
 				       rpr0521_data_reg[chan->address].address,
-				       &raw_data, 2);
+				       &raw_data, sizeof(raw_data));
 		if (ret < 0) {
 			rpr0521_set_power_state(data, false, device_mask);
 			mutex_unlock(&data->lock);
@@ -354,6 +507,7 @@ static int rpr0521_read_raw(struct iio_dev *indio_dev,
 		*val = le16_to_cpu(raw_data);
 
 		return IIO_VAL_INT;
+
 	case IIO_CHAN_INFO_SCALE:
 		mutex_lock(&data->lock);
 		ret = rpr0521_get_gain(data, chan->address, val, val2);
@@ -362,6 +516,25 @@ static int rpr0521_read_raw(struct iio_dev *indio_dev,
 			return ret;
 
 		return IIO_VAL_INT_PLUS_MICRO;
+
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		mutex_lock(&data->lock);
+		ret = rpr0521_read_samp_freq(data, chan->type, val, val2);
+		mutex_unlock(&data->lock);
+		if (ret < 0)
+			return ret;
+
+		return IIO_VAL_INT_PLUS_MICRO;
+
+	case IIO_CHAN_INFO_OFFSET:
+		mutex_lock(&data->lock);
+		ret = rpr0521_read_ps_offset(data, val);
+		mutex_unlock(&data->lock);
+		if (ret < 0)
+			return ret;
+
+		return IIO_VAL_INT;
+
 	default:
 		return -EINVAL;
 	}
@@ -381,6 +554,22 @@ static int rpr0521_write_raw(struct iio_dev *indio_dev,
 		mutex_unlock(&data->lock);
 
 		return ret;
+
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		mutex_lock(&data->lock);
+		ret = rpr0521_write_samp_freq_common(data, chan->type,
+						     val, val2);
+		mutex_unlock(&data->lock);
+
+		return ret;
+
+	case IIO_CHAN_INFO_OFFSET:
+		mutex_lock(&data->lock);
+		ret = rpr0521_write_ps_offset(data, val);
+		mutex_unlock(&data->lock);
+
+		return ret;
+
 	default:
 		return -EINVAL;
 	}
@@ -419,12 +608,14 @@ static int rpr0521_init(struct rpr0521_data *data)
 		return ret;
 	}
 
+#ifndef CONFIG_PM
 	ret = rpr0521_als_enable(data, RPR0521_MODE_ALS_ENABLE);
 	if (ret < 0)
 		return ret;
 	ret = rpr0521_pxs_enable(data, RPR0521_MODE_PXS_ENABLE);
 	if (ret < 0)
 		return ret;
+#endif
 
 	return 0;
 }
@@ -510,13 +701,26 @@ static int rpr0521_probe(struct i2c_client *client,
 
 	ret = pm_runtime_set_active(&client->dev);
 	if (ret < 0)
-		return ret;
+		goto err_poweroff;
 
 	pm_runtime_enable(&client->dev);
 	pm_runtime_set_autosuspend_delay(&client->dev, RPR0521_SLEEP_DELAY_MS);
 	pm_runtime_use_autosuspend(&client->dev);
 
-	return iio_device_register(indio_dev);
+	ret = iio_device_register(indio_dev);
+	if (ret)
+		goto err_pm_disable;
+
+	return 0;
+
+err_pm_disable:
+	pm_runtime_disable(&client->dev);
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_put_noidle(&client->dev);
+err_poweroff:
+	rpr0521_poweroff(data);
+
+	return ret;
 }
 
 static int rpr0521_remove(struct i2c_client *client)
@@ -541,9 +745,16 @@ static int rpr0521_runtime_suspend(struct device *dev)
 	struct rpr0521_data *data = iio_priv(indio_dev);
 	int ret;
 
-	/* disable channels and sets {als,pxs}_dev_en to false */
 	mutex_lock(&data->lock);
+	/* If measurements are enabled, enable them on resume */
+	if (!data->als_need_dis)
+		data->als_ps_need_en = data->als_dev_en;
+	if (!data->pxs_need_dis)
+		data->pxs_ps_need_en = data->pxs_dev_en;
+
+	/* disable channels and sets {als,pxs}_dev_en to false */
 	ret = rpr0521_poweroff(data);
+	regcache_mark_dirty(data->regmap);
 	mutex_unlock(&data->lock);
 
 	return ret;
@@ -555,6 +766,7 @@ static int rpr0521_runtime_resume(struct device *dev)
 	struct rpr0521_data *data = iio_priv(indio_dev);
 	int ret;
 
+	regcache_sync(data->regmap);
 	if (data->als_ps_need_en) {
 		ret = rpr0521_als_enable(data, RPR0521_MODE_ALS_ENABLE);
 		if (ret < 0)
@@ -568,6 +780,7 @@ static int rpr0521_runtime_resume(struct device *dev)
 			return ret;
 		data->pxs_ps_need_en = false;
 	}
+	msleep(100);	//wait for first measurement result
 
 	return 0;
 }
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index a78b602..1679181 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -3,7 +3,7 @@
  * within the TAOS tsl258x family of devices (tsl2580, tsl2581, tsl2583).
  *
  * Copyright (c) 2011, TAOS Corporation.
- * Copyright (c) 2016 Brian Masney <masneyb@onstation.org>
+ * Copyright (c) 2016-2017 Brian Masney <masneyb@onstation.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
+#include <linux/pm_runtime.h>
 
 /* Device Registers and Masks */
 #define TSL2583_CNTRL			0x00
@@ -64,6 +65,8 @@
 #define TSL2583_CHIP_ID			0x90
 #define TSL2583_CHIP_ID_MASK		0xf0
 
+#define TSL2583_POWER_OFF_DELAY_MS	2000
+
 /* Per-device data */
 struct tsl2583_als_info {
 	u16 als_ch0;
@@ -108,7 +111,6 @@ struct tsl2583_chip {
 	struct tsl2583_settings als_settings;
 	int als_time_scale;
 	int als_saturation;
-	bool suspended;
 };
 
 struct gainadj {
@@ -460,8 +462,6 @@ static int tsl2583_chip_init_and_power_on(struct iio_dev *indio_dev)
 	if (ret < 0)
 		return ret;
 
-	chip->suspended = false;
-
 	return ret;
 }
 
@@ -513,11 +513,6 @@ static ssize_t in_illuminance_calibrate_store(struct device *dev,
 
 	mutex_lock(&chip->als_mutex);
 
-	if (chip->suspended) {
-		ret = -EBUSY;
-		goto done;
-	}
-
 	ret = tsl2583_als_calibrate(indio_dev);
 	if (ret < 0)
 		goto done;
@@ -645,20 +640,36 @@ static const struct iio_chan_spec tsl2583_channels[] = {
 	},
 };
 
+static int tsl2583_set_pm_runtime_busy(struct tsl2583_chip *chip, bool on)
+{
+	int ret;
+
+	if (on) {
+		ret = pm_runtime_get_sync(&chip->client->dev);
+		if (ret < 0)
+			pm_runtime_put_noidle(&chip->client->dev);
+	} else {
+		pm_runtime_mark_last_busy(&chip->client->dev);
+		ret = pm_runtime_put_autosuspend(&chip->client->dev);
+	}
+
+	return ret;
+}
+
 static int tsl2583_read_raw(struct iio_dev *indio_dev,
 			    struct iio_chan_spec const *chan,
 			    int *val, int *val2, long mask)
 {
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int ret = -EINVAL;
+	int ret, pm_ret;
+
+	ret = tsl2583_set_pm_runtime_busy(chip, true);
+	if (ret < 0)
+		return ret;
 
 	mutex_lock(&chip->als_mutex);
 
-	if (chip->suspended) {
-		ret = -EBUSY;
-		goto read_done;
-	}
-
+	ret = -EINVAL;
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
 		if (chan->type == IIO_LIGHT) {
@@ -719,6 +730,18 @@ static int tsl2583_read_raw(struct iio_dev *indio_dev,
 read_done:
 	mutex_unlock(&chip->als_mutex);
 
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Preserve the ret variable if the call to
+	 * tsl2583_set_pm_runtime_busy() is successful so the reading
+	 * (if applicable) is returned to user space.
+	 */
+	pm_ret = tsl2583_set_pm_runtime_busy(chip, false);
+	if (pm_ret < 0)
+		return pm_ret;
+
 	return ret;
 }
 
@@ -727,15 +750,15 @@ static int tsl2583_write_raw(struct iio_dev *indio_dev,
 			     int val, int val2, long mask)
 {
 	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int ret = -EINVAL;
+	int ret;
+
+	ret = tsl2583_set_pm_runtime_busy(chip, true);
+	if (ret < 0)
+		return ret;
 
 	mutex_lock(&chip->als_mutex);
 
-	if (chip->suspended) {
-		ret = -EBUSY;
-		goto write_done;
-	}
-
+	ret = -EINVAL;
 	switch (mask) {
 	case IIO_CHAN_INFO_CALIBBIAS:
 		if (chan->type == IIO_LIGHT) {
@@ -767,9 +790,15 @@ static int tsl2583_write_raw(struct iio_dev *indio_dev,
 		break;
 	}
 
-write_done:
 	mutex_unlock(&chip->als_mutex);
 
+	if (ret < 0)
+		return ret;
+
+	ret = tsl2583_set_pm_runtime_busy(chip, false);
+	if (ret < 0)
+		return ret;
+
 	return ret;
 }
 
@@ -803,7 +832,6 @@ static int tsl2583_probe(struct i2c_client *clientp,
 	i2c_set_clientdata(clientp, indio_dev);
 
 	mutex_init(&chip->als_mutex);
-	chip->suspended = true;
 
 	ret = i2c_smbus_read_byte_data(clientp,
 				       TSL2583_CMD_REG | TSL2583_CHIPID);
@@ -826,6 +854,11 @@ static int tsl2583_probe(struct i2c_client *clientp,
 	indio_dev->modes = INDIO_DIRECT_MODE;
 	indio_dev->name = chip->client->name;
 
+	pm_runtime_enable(&clientp->dev);
+	pm_runtime_set_autosuspend_delay(&clientp->dev,
+					 TSL2583_POWER_OFF_DELAY_MS);
+	pm_runtime_use_autosuspend(&clientp->dev);
+
 	ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
 	if (ret) {
 		dev_err(&clientp->dev, "%s: iio registration failed\n",
@@ -836,16 +869,25 @@ static int tsl2583_probe(struct i2c_client *clientp,
 	/* Load up the V2 defaults (these are hard coded defaults for now) */
 	tsl2583_defaults(chip);
 
-	/* Make sure the chip is on */
-	ret = tsl2583_chip_init_and_power_on(indio_dev);
-	if (ret < 0)
-		return ret;
-
 	dev_info(&clientp->dev, "Light sensor found.\n");
 
 	return 0;
 }
 
+static int tsl2583_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+
+	pm_runtime_disable(&client->dev);
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_put_noidle(&client->dev);
+
+	return tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
+}
+
 static int __maybe_unused tsl2583_suspend(struct device *dev)
 {
 	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
@@ -855,7 +897,6 @@ static int __maybe_unused tsl2583_suspend(struct device *dev)
 	mutex_lock(&chip->als_mutex);
 
 	ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
-	chip->suspended = true;
 
 	mutex_unlock(&chip->als_mutex);
 
@@ -877,7 +918,11 @@ static int __maybe_unused tsl2583_resume(struct device *dev)
 	return ret;
 }
 
-static SIMPLE_DEV_PM_OPS(tsl2583_pm_ops, tsl2583_suspend, tsl2583_resume);
+static const struct dev_pm_ops tsl2583_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(tsl2583_suspend, tsl2583_resume, NULL)
+};
 
 static struct i2c_device_id tsl2583_idtable[] = {
 	{ "tsl2580", 0 },
@@ -904,6 +949,7 @@ static struct i2c_driver tsl2583_driver = {
 	},
 	.id_table = tsl2583_idtable,
 	.probe = tsl2583_probe,
+	.remove = tsl2583_remove,
 };
 module_i2c_driver(tsl2583_driver);
 
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 6325e7d..f3cb4dc 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi)
 }
 
 static const struct spi_device_id st_magn_id_table[] = {
-	{ LSM303DLHC_MAGN_DEV_NAME },
-	{ LSM303DLM_MAGN_DEV_NAME },
 	{ LIS3MDL_MAGN_DEV_NAME },
 	{ LSM303AGR_MAGN_DEV_NAME },
 	{},
diff --git a/drivers/iio/multiplexer/Kconfig b/drivers/iio/multiplexer/Kconfig
new file mode 100644
index 0000000..735a7b0
--- /dev/null
+++ b/drivers/iio/multiplexer/Kconfig
@@ -0,0 +1,18 @@
+#
+# Multiplexer drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Multiplexers"
+
+config IIO_MUX
+	tristate "IIO multiplexer driver"
+	select MULTIPLEXER
+	depends on OF || COMPILE_TEST
+	help
+	  Say yes here to build support for the IIO multiplexer.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called iio-mux.
+
+endmenu
diff --git a/drivers/iio/multiplexer/Makefile b/drivers/iio/multiplexer/Makefile
new file mode 100644
index 0000000..68be3c4
--- /dev/null
+++ b/drivers/iio/multiplexer/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O multiplexer drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_IIO_MUX) += iio-mux.o
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
new file mode 100644
index 0000000..37ba007
--- /dev/null
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -0,0 +1,459 @@
+/*
+ * IIO multiplexer driver
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+struct mux_ext_info_cache {
+	char *data;
+	ssize_t size;
+};
+
+struct mux_child {
+	struct mux_ext_info_cache *ext_info_cache;
+};
+
+struct mux {
+	int cached_state;
+	struct mux_control *control;
+	struct iio_channel *parent;
+	struct iio_dev *indio_dev;
+	struct iio_chan_spec *chan;
+	struct iio_chan_spec_ext_info *ext_info;
+	struct mux_child *child;
+};
+
+static int iio_mux_select(struct mux *mux, int idx)
+{
+	struct mux_child *child = &mux->child[idx];
+	struct iio_chan_spec const *chan = &mux->chan[idx];
+	int ret;
+	int i;
+
+	ret = mux_control_select(mux->control, chan->channel);
+	if (ret < 0) {
+		mux->cached_state = -1;
+		return ret;
+	}
+
+	if (mux->cached_state == chan->channel)
+		return 0;
+
+	if (chan->ext_info) {
+		for (i = 0; chan->ext_info[i].name; ++i) {
+			const char *attr = chan->ext_info[i].name;
+			struct mux_ext_info_cache *cache;
+
+			cache = &child->ext_info_cache[i];
+
+			if (cache->size < 0)
+				continue;
+
+			ret = iio_write_channel_ext_info(mux->parent, attr,
+							 cache->data,
+							 cache->size);
+
+			if (ret < 0) {
+				mux_control_deselect(mux->control);
+				mux->cached_state = -1;
+				return ret;
+			}
+		}
+	}
+	mux->cached_state = chan->channel;
+
+	return 0;
+}
+
+static void iio_mux_deselect(struct mux *mux)
+{
+	mux_control_deselect(mux->control);
+}
+
+static int mux_read_raw(struct iio_dev *indio_dev,
+			struct iio_chan_spec const *chan,
+			int *val, int *val2, long mask)
+{
+	struct mux *mux = iio_priv(indio_dev);
+	int idx = chan - mux->chan;
+	int ret;
+
+	ret = iio_mux_select(mux, idx);
+	if (ret < 0)
+		return ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = iio_read_channel_raw(mux->parent, val);
+		break;
+
+	case IIO_CHAN_INFO_SCALE:
+		ret = iio_read_channel_scale(mux->parent, val, val2);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	iio_mux_deselect(mux);
+
+	return ret;
+}
+
+static int mux_read_avail(struct iio_dev *indio_dev,
+			  struct iio_chan_spec const *chan,
+			  const int **vals, int *type, int *length,
+			  long mask)
+{
+	struct mux *mux = iio_priv(indio_dev);
+	int idx = chan - mux->chan;
+	int ret;
+
+	ret = iio_mux_select(mux, idx);
+	if (ret < 0)
+		return ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		*type = IIO_VAL_INT;
+		ret = iio_read_avail_channel_raw(mux->parent, vals, length);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	iio_mux_deselect(mux);
+
+	return ret;
+}
+
+static int mux_write_raw(struct iio_dev *indio_dev,
+			 struct iio_chan_spec const *chan,
+			 int val, int val2, long mask)
+{
+	struct mux *mux = iio_priv(indio_dev);
+	int idx = chan - mux->chan;
+	int ret;
+
+	ret = iio_mux_select(mux, idx);
+	if (ret < 0)
+		return ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = iio_write_channel_raw(mux->parent, val);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	iio_mux_deselect(mux);
+
+	return ret;
+}
+
+static const struct iio_info mux_info = {
+	.read_raw = mux_read_raw,
+	.read_avail = mux_read_avail,
+	.write_raw = mux_write_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static ssize_t mux_read_ext_info(struct iio_dev *indio_dev, uintptr_t private,
+				 struct iio_chan_spec const *chan, char *buf)
+{
+	struct mux *mux = iio_priv(indio_dev);
+	int idx = chan - mux->chan;
+	ssize_t ret;
+
+	ret = iio_mux_select(mux, idx);
+	if (ret < 0)
+		return ret;
+
+	ret = iio_read_channel_ext_info(mux->parent,
+					mux->ext_info[private].name,
+					buf);
+
+	iio_mux_deselect(mux);
+
+	return ret;
+}
+
+static ssize_t mux_write_ext_info(struct iio_dev *indio_dev, uintptr_t private,
+				  struct iio_chan_spec const *chan,
+				  const char *buf, size_t len)
+{
+	struct device *dev = indio_dev->dev.parent;
+	struct mux *mux = iio_priv(indio_dev);
+	int idx = chan - mux->chan;
+	char *new;
+	ssize_t ret;
+
+	if (len >= PAGE_SIZE)
+		return -EINVAL;
+
+	ret = iio_mux_select(mux, idx);
+	if (ret < 0)
+		return ret;
+
+	new = devm_kmemdup(dev, buf, len + 1, GFP_KERNEL);
+	if (!new) {
+		iio_mux_deselect(mux);
+		return -ENOMEM;
+	}
+
+	new[len] = 0;
+
+	ret = iio_write_channel_ext_info(mux->parent,
+					 mux->ext_info[private].name,
+					 buf, len);
+	if (ret < 0) {
+		iio_mux_deselect(mux);
+		devm_kfree(dev, new);
+		return ret;
+	}
+
+	devm_kfree(dev, mux->child[idx].ext_info_cache[private].data);
+	mux->child[idx].ext_info_cache[private].data = new;
+	mux->child[idx].ext_info_cache[private].size = len;
+
+	iio_mux_deselect(mux);
+
+	return ret;
+}
+
+static int mux_configure_channel(struct device *dev, struct mux *mux,
+				 u32 state, const char *label, int idx)
+{
+	struct mux_child *child = &mux->child[idx];
+	struct iio_chan_spec *chan = &mux->chan[idx];
+	struct iio_chan_spec const *pchan = mux->parent->channel;
+	char *page = NULL;
+	int num_ext_info;
+	int i;
+	int ret;
+
+	chan->indexed = 1;
+	chan->output = pchan->output;
+	chan->datasheet_name = label;
+	chan->ext_info = mux->ext_info;
+
+	ret = iio_get_channel_type(mux->parent, &chan->type);
+	if (ret < 0) {
+		dev_err(dev, "failed to get parent channel type\n");
+		return ret;
+	}
+
+	if (iio_channel_has_info(pchan, IIO_CHAN_INFO_RAW))
+		chan->info_mask_separate |= BIT(IIO_CHAN_INFO_RAW);
+	if (iio_channel_has_info(pchan, IIO_CHAN_INFO_SCALE))
+		chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SCALE);
+
+	if (iio_channel_has_available(pchan, IIO_CHAN_INFO_RAW))
+		chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW);
+
+	if (state >= mux_control_states(mux->control)) {
+		dev_err(dev, "too many channels\n");
+		return -EINVAL;
+	}
+
+	chan->channel = state;
+
+	num_ext_info = iio_get_channel_ext_info_count(mux->parent);
+	if (num_ext_info) {
+		page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
+		if (!page)
+			return -ENOMEM;
+	}
+	child->ext_info_cache = devm_kzalloc(dev,
+					     sizeof(*child->ext_info_cache) *
+					     num_ext_info, GFP_KERNEL);
+	for (i = 0; i < num_ext_info; ++i) {
+		child->ext_info_cache[i].size = -1;
+
+		if (!pchan->ext_info[i].write)
+			continue;
+		if (!pchan->ext_info[i].read)
+			continue;
+
+		ret = iio_read_channel_ext_info(mux->parent,
+						mux->ext_info[i].name,
+						page);
+		if (ret < 0) {
+			dev_err(dev, "failed to get ext_info '%s'\n",
+				pchan->ext_info[i].name);
+			return ret;
+		}
+		if (ret >= PAGE_SIZE) {
+			dev_err(dev, "too large ext_info '%s'\n",
+				pchan->ext_info[i].name);
+			return -EINVAL;
+		}
+
+		child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1,
+							     GFP_KERNEL);
+		child->ext_info_cache[i].data[ret] = 0;
+		child->ext_info_cache[i].size = ret;
+	}
+
+	if (page)
+		devm_kfree(dev, page);
+
+	return 0;
+}
+
+/*
+ * Same as of_property_for_each_string(), but also keeps track of the
+ * index of each string.
+ */
+#define of_property_for_each_string_index(np, propname, prop, s, i)	\
+	for (prop = of_find_property(np, propname, NULL),		\
+	     s = of_prop_next_string(prop, NULL),			\
+	     i = 0;							\
+	     s;								\
+	     s = of_prop_next_string(prop, s),				\
+	     i++)
+
+static int mux_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = pdev->dev.of_node;
+	struct iio_dev *indio_dev;
+	struct iio_channel *parent;
+	struct mux *mux;
+	struct property *prop;
+	const char *label;
+	u32 state;
+	int sizeof_ext_info;
+	int children;
+	int sizeof_priv;
+	int i;
+	int ret;
+
+	if (!np)
+		return -ENODEV;
+
+	parent = devm_iio_channel_get(dev, "parent");
+	if (IS_ERR(parent)) {
+		if (PTR_ERR(parent) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get parent channel\n");
+		return PTR_ERR(parent);
+	}
+
+	sizeof_ext_info = iio_get_channel_ext_info_count(parent);
+	if (sizeof_ext_info) {
+		sizeof_ext_info += 1; /* one extra entry for the sentinel */
+		sizeof_ext_info *= sizeof(*mux->ext_info);
+	}
+
+	children = 0;
+	of_property_for_each_string(np, "channels", prop, label) {
+		if (*label)
+			children++;
+	}
+	if (children <= 0) {
+		dev_err(dev, "not even a single child\n");
+		return -EINVAL;
+	}
+
+	sizeof_priv = sizeof(*mux);
+	sizeof_priv += sizeof(*mux->child) * children;
+	sizeof_priv += sizeof(*mux->chan) * children;
+	sizeof_priv += sizeof_ext_info;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof_priv);
+	if (!indio_dev)
+		return -ENOMEM;
+
+	mux = iio_priv(indio_dev);
+	mux->child = (struct mux_child *)(mux + 1);
+	mux->chan = (struct iio_chan_spec *)(mux->child + children);
+
+	platform_set_drvdata(pdev, indio_dev);
+
+	mux->parent = parent;
+	mux->cached_state = -1;
+
+	indio_dev->name = dev_name(dev);
+	indio_dev->dev.parent = dev;
+	indio_dev->info = &mux_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = mux->chan;
+	indio_dev->num_channels = children;
+	if (sizeof_ext_info) {
+		mux->ext_info = devm_kmemdup(dev,
+					     parent->channel->ext_info,
+					     sizeof_ext_info, GFP_KERNEL);
+		if (!mux->ext_info)
+			return -ENOMEM;
+
+		for (i = 0; mux->ext_info[i].name; ++i) {
+			if (parent->channel->ext_info[i].read)
+				mux->ext_info[i].read = mux_read_ext_info;
+			if (parent->channel->ext_info[i].write)
+				mux->ext_info[i].write = mux_write_ext_info;
+			mux->ext_info[i].private = i;
+		}
+	}
+
+	mux->control = devm_mux_control_get(dev, NULL);
+	if (IS_ERR(mux->control)) {
+		if (PTR_ERR(mux->control) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get control-mux\n");
+		return PTR_ERR(mux->control);
+	}
+
+	i = 0;
+	of_property_for_each_string_index(np, "channels", prop, label, state) {
+		if (!*label)
+			continue;
+
+		ret = mux_configure_channel(dev, mux, state, label, i++);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = devm_iio_device_register(dev, indio_dev);
+	if (ret) {
+		dev_err(dev, "failed to register iio device\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id mux_match[] = {
+	{ .compatible = "io-channel-mux" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mux_match);
+
+static struct platform_driver mux_driver = {
+	.probe = mux_probe,
+	.driver = {
+		.name = "iio-mux",
+		.of_match_table = mux_match,
+	},
+};
+module_platform_driver(mux_driver);
+
+MODULE_DESCRIPTION("IIO multiplexer driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index a97e802c..e9fa86c 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -31,6 +31,10 @@ struct dev_rot_state {
 	struct hid_sensor_common common_attributes;
 	struct hid_sensor_hub_attribute_info quaternion;
 	u32 sampled_vals[4];
+	int scale_pre_decml;
+	int scale_post_decml;
+	int scale_precision;
+	int value_offset;
 };
 
 /* Channel definitions */
@@ -41,6 +45,8 @@ static const struct iio_chan_spec dev_rot_channels[] = {
 		.channel2 = IIO_MOD_QUATERNION,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+					BIT(IIO_CHAN_INFO_OFFSET) |
+					BIT(IIO_CHAN_INFO_SCALE) |
 					BIT(IIO_CHAN_INFO_HYSTERESIS)
 	}
 };
@@ -80,6 +86,15 @@ static int dev_rot_read_raw(struct iio_dev *indio_dev,
 		} else
 			ret_type = -EINVAL;
 		break;
+	case IIO_CHAN_INFO_SCALE:
+		vals[0] = rot_state->scale_pre_decml;
+		vals[1] = rot_state->scale_post_decml;
+		return rot_state->scale_precision;
+
+	case IIO_CHAN_INFO_OFFSET:
+		*vals = rot_state->value_offset;
+		return IIO_VAL_INT;
+
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		ret_type = hid_sensor_read_samp_freq_value(
 			&rot_state->common_attributes, &vals[0], &vals[1]);
@@ -199,6 +214,11 @@ static int dev_rot_parse_report(struct platform_device *pdev,
 	dev_dbg(&pdev->dev, "dev_rot: attrib size %d\n",
 				st->quaternion.size);
 
+	st->scale_precision = hid_sensor_format_scale(
+				hsdev->usage,
+				&st->quaternion,
+				&st->scale_pre_decml, &st->scale_post_decml);
+
 	/* Set Sensitivity field ids, when there is no individual modifier */
 	if (st->common_attributes.sensitivity.index < 0) {
 		sensor_hub_input_get_attribute_info(hsdev,
@@ -218,7 +238,7 @@ static int dev_rot_parse_report(struct platform_device *pdev,
 static int hid_dev_rot_probe(struct platform_device *pdev)
 {
 	int ret;
-	static char *name = "dev_rotation";
+	static char *name;
 	struct iio_dev *indio_dev;
 	struct dev_rot_state *rot_state;
 	struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
@@ -234,8 +254,21 @@ static int hid_dev_rot_probe(struct platform_device *pdev)
 	rot_state->common_attributes.hsdev = hsdev;
 	rot_state->common_attributes.pdev = pdev;
 
-	ret = hid_sensor_parse_common_attributes(hsdev,
-				HID_USAGE_SENSOR_DEVICE_ORIENTATION,
+	switch (hsdev->usage) {
+	case HID_USAGE_SENSOR_DEVICE_ORIENTATION:
+		name = "dev_rotation";
+		break;
+	case HID_USAGE_SENSOR_RELATIVE_ORIENTATION:
+		name = "relative_orientation";
+		break;
+	case HID_USAGE_SENSOR_GEOMAGNETIC_ORIENTATION:
+		name = "geomagnetic_orientation";
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
 				&rot_state->common_attributes);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to setup common attributes\n");
@@ -252,8 +285,7 @@ static int hid_dev_rot_probe(struct platform_device *pdev)
 
 	ret = dev_rot_parse_report(pdev, hsdev,
 				   (struct iio_chan_spec *)indio_dev->channels,
-				   HID_USAGE_SENSOR_DEVICE_ORIENTATION,
-				   rot_state);
+					hsdev->usage, rot_state);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to setup attributes\n");
 		return ret;
@@ -288,8 +320,7 @@ static int hid_dev_rot_probe(struct platform_device *pdev)
 	rot_state->callbacks.send_event = dev_rot_proc_event;
 	rot_state->callbacks.capture_sample = dev_rot_capture_sample;
 	rot_state->callbacks.pdev = pdev;
-	ret = sensor_hub_register_callback(hsdev,
-					HID_USAGE_SENSOR_DEVICE_ORIENTATION,
+	ret = sensor_hub_register_callback(hsdev, hsdev->usage,
 					&rot_state->callbacks);
 	if (ret) {
 		dev_err(&pdev->dev, "callback reg failed\n");
@@ -314,7 +345,7 @@ static int hid_dev_rot_remove(struct platform_device *pdev)
 	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
 	struct dev_rot_state *rot_state = iio_priv(indio_dev);
 
-	sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_DEVICE_ORIENTATION);
+	sensor_hub_remove_callback(hsdev, hsdev->usage);
 	iio_device_unregister(indio_dev);
 	hid_sensor_remove_trigger(&rot_state->common_attributes);
 	iio_triggered_buffer_cleanup(indio_dev);
@@ -327,6 +358,14 @@ static const struct platform_device_id hid_dev_rot_ids[] = {
 		/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
 		.name = "HID-SENSOR-20008a",
 	},
+	{
+		/* Relative orientation(AG) sensor */
+		.name = "HID-SENSOR-20008e",
+	},
+	{
+		/* Geomagnetic orientation(AM) sensor */
+		.name = "HID-SENSOR-2000c1",
+	},
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(platform, hid_dev_rot_ids);
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 5d16b25..eaa7cfc 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -23,7 +23,7 @@
 	select BMP280_SPI if (SPI_MASTER)
 	help
 	  Say yes here to build support for Bosch Sensortec BMP180 and BMP280
-	  pressure and temperature sensors. Also supports the BE280 with
+	  pressure and temperature sensors. Also supports the BME280 with
 	  an additional humidity sensor channel.
 
 	  To compile this driver as a module, choose M here: the core module
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index fd0edca..aa61ec1 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -568,6 +568,8 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
 int st_press_common_probe(struct iio_dev *indio_dev)
 {
 	struct st_sensor_data *press_data = iio_priv(indio_dev);
+	struct st_sensors_platform_data *pdata =
+		(struct st_sensors_platform_data *)press_data->dev->platform_data;
 	int irq = press_data->get_irq_data_ready(indio_dev);
 	int err;
 
@@ -603,10 +605,8 @@ int st_press_common_probe(struct iio_dev *indio_dev)
 	press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
 
 	/* Some devices don't support a data ready pin. */
-	if (!press_data->dev->platform_data &&
-				press_data->sensor_settings->drdy_irq.addr)
-		press_data->dev->platform_data =
-			(struct st_sensors_platform_data *)&default_press_pdata;
+	if (!pdata && press_data->sensor_settings->drdy_irq.addr)
+		pdata =	(struct st_sensors_platform_data *)&default_press_pdata;
 
 	err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
 	if (err < 0)
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index e58a0ad..c92a95f 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -867,12 +867,13 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev   *indio_dev,
 {
 	int          ret;
 	unsigned int val;
+	long     timeout;
 
 	zpa2326_dbg(indio_dev, "waiting for one shot completion interrupt");
 
-	ret = wait_for_completion_interruptible_timeout(
+	timeout = wait_for_completion_interruptible_timeout(
 		&private->data_ready, ZPA2326_CONVERSION_JIFFIES);
-	if (ret > 0)
+	if (timeout > 0)
 		/*
 		 * Interrupt handler completed before timeout: return operation
 		 * status.
@@ -882,13 +883,16 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev   *indio_dev,
 	/* Clear all interrupts just to be sure. */
 	regmap_read(private->regmap, ZPA2326_INT_SOURCE_REG, &val);
 
-	if (!ret)
+	if (!timeout) {
 		/* Timed out. */
+		zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)",
+			     timeout);
 		ret = -ETIME;
-
-	if (ret != -ERESTARTSYS)
-		zpa2326_warn(indio_dev, "no one shot interrupt occurred (%d)",
-			     ret);
+	} else if (timeout < 0) {
+		zpa2326_warn(indio_dev,
+			     "wait for one shot interrupt cancelled");
+		ret = -ERESTARTSYS;
+	}
 
 	return ret;
 }
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index ddf9bee..0eeff29 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -40,9 +40,9 @@
 #define AS3935_AFE_PWR_BIT	BIT(0)
 
 #define AS3935_INT		0x03
-#define AS3935_INT_MASK		0x07
+#define AS3935_INT_MASK		0x0f
 #define AS3935_EVENT_INT	BIT(3)
-#define AS3935_NOISE_INT	BIT(1)
+#define AS3935_NOISE_INT	BIT(0)
 
 #define AS3935_DATA		0x07
 #define AS3935_DATA_MASK	0x3F
@@ -176,13 +176,13 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
 		if (ret)
 			return ret;
 
-		if (m == IIO_CHAN_INFO_RAW)
-			return IIO_VAL_INT;
-
 		/* storm out of range */
 		if (*val == AS3935_DATA_MASK)
 			return -EINVAL;
 
+		if (m == IIO_CHAN_INFO_RAW)
+			return IIO_VAL_INT;
+
 		if (m == IIO_CHAN_INFO_PROCESSED)
 			*val *= 1000;
 		break;
@@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
 
 	st->buffer[0] = val & AS3935_DATA_MASK;
 	iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
-					   pf->timestamp);
+					   iio_get_time_ns(indio_dev));
 err_read:
 	iio_trigger_notify_done(indio_dev->trig);
 
@@ -244,7 +244,7 @@ static void as3935_event_work(struct work_struct *work)
 
 	switch (val) {
 	case AS3935_EVENT_INT:
-		iio_trigger_poll(st->trig);
+		iio_trigger_poll_chained(st->trig);
 		break;
 	case AS3935_NOISE_INT:
 		dev_warn(&st->spi->dev, "noise level is too high\n");
@@ -269,8 +269,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
 
 static void calibrate_as3935(struct as3935_state *st)
 {
-	mutex_lock(&st->lock);
-
 	/* mask disturber interrupt bit */
 	as3935_write(st, AS3935_INT, BIT(5));
 
@@ -280,8 +278,6 @@ static void calibrate_as3935(struct as3935_state *st)
 
 	mdelay(2);
 	as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
-
-	mutex_unlock(&st->lock);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -318,6 +314,8 @@ static int as3935_resume(struct device *dev)
 	val &= ~AS3935_AFE_PWR_BIT;
 	ret = as3935_write(st, AS3935_AFE_GAIN, val);
 
+	calibrate_as3935(st);
+
 err_resume:
 	mutex_unlock(&st->lock);
 
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 9ea147f..f42b3a1 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -878,8 +878,7 @@ static void sx9500_gpio_probe(struct i2c_client *client,
 
 	dev = &client->dev;
 
-	data->gpiod_rst = devm_gpiod_get_index(dev, SX9500_GPIO_RESET,
-					       0, GPIOD_OUT_HIGH);
+	data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH);
 	if (IS_ERR(data->gpiod_rst)) {
 		dev_warn(dev, "gpio get reset pin failed\n");
 		data->gpiod_rst = NULL;
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 5572142..d70e2e5 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -267,6 +267,7 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
 static const struct spi_device_id maxim_thermocouple_id[] = {
 	{"max6675", MAX6675},
 	{"max31855", MAX31855},
+	{"max31856", MAX31855},
 	{},
 };
 MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 25248d6..d22bc56 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -14,19 +14,19 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
-#define MAX_TRIGGERS 6
+#define MAX_TRIGGERS 7
 #define MAX_VALIDS 5
 
 /* List the triggers created by each timer */
 static const void *triggers_table[][MAX_TRIGGERS] = {
-	{ TIM1_TRGO, TIM1_CH1, TIM1_CH2, TIM1_CH3, TIM1_CH4,},
+	{ TIM1_TRGO, TIM1_TRGO2, TIM1_CH1, TIM1_CH2, TIM1_CH3, TIM1_CH4,},
 	{ TIM2_TRGO, TIM2_CH1, TIM2_CH2, TIM2_CH3, TIM2_CH4,},
 	{ TIM3_TRGO, TIM3_CH1, TIM3_CH2, TIM3_CH3, TIM3_CH4,},
 	{ TIM4_TRGO, TIM4_CH1, TIM4_CH2, TIM4_CH3, TIM4_CH4,},
 	{ TIM5_TRGO, TIM5_CH1, TIM5_CH2, TIM5_CH3, TIM5_CH4,},
 	{ TIM6_TRGO,},
 	{ TIM7_TRGO,},
-	{ TIM8_TRGO, TIM8_CH1, TIM8_CH2, TIM8_CH3, TIM8_CH4,},
+	{ TIM8_TRGO, TIM8_TRGO2, TIM8_CH1, TIM8_CH2, TIM8_CH3, TIM8_CH4,},
 	{ TIM9_TRGO, TIM9_CH1, TIM9_CH2,},
 	{ }, /* timer 10 */
 	{ }, /* timer 11 */
@@ -56,9 +56,16 @@ struct stm32_timer_trigger {
 	u32 max_arr;
 	const void *triggers;
 	const void *valids;
+	bool has_trgo2;
 };
 
+static bool stm32_timer_is_trgo2_name(const char *name)
+{
+	return !!strstr(name, "trgo2");
+}
+
 static int stm32_timer_start(struct stm32_timer_trigger *priv,
+			     struct iio_trigger *trig,
 			     unsigned int frequency)
 {
 	unsigned long long prd, div;
@@ -102,7 +109,12 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
 	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
 
 	/* Force master mode to update mode */
-	regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0x20);
+	if (stm32_timer_is_trgo2_name(trig->name))
+		regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2,
+				   0x2 << TIM_CR2_MMS2_SHIFT);
+	else
+		regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS,
+				   0x2 << TIM_CR2_MMS_SHIFT);
 
 	/* Make sure that registers are updated */
 	regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
@@ -150,7 +162,7 @@ static ssize_t stm32_tt_store_frequency(struct device *dev,
 	if (freq == 0) {
 		stm32_timer_stop(priv);
 	} else {
-		ret = stm32_timer_start(priv, freq);
+		ret = stm32_timer_start(priv, trig, freq);
 		if (ret)
 			return ret;
 	}
@@ -183,6 +195,9 @@ static IIO_DEV_ATTR_SAMP_FREQ(0660,
 			      stm32_tt_read_frequency,
 			      stm32_tt_store_frequency);
 
+#define MASTER_MODE_MAX		7
+#define MASTER_MODE2_MAX	15
+
 static char *master_mode_table[] = {
 	"reset",
 	"enable",
@@ -191,7 +206,16 @@ static char *master_mode_table[] = {
 	"OC1REF",
 	"OC2REF",
 	"OC3REF",
-	"OC4REF"
+	"OC4REF",
+	/* Master mode selection 2 only */
+	"OC5REF",
+	"OC6REF",
+	"compare_pulse_OC4REF",
+	"compare_pulse_OC6REF",
+	"compare_pulse_OC4REF_r_or_OC6REF_r",
+	"compare_pulse_OC4REF_r_or_OC6REF_f",
+	"compare_pulse_OC5REF_r_or_OC6REF_r",
+	"compare_pulse_OC5REF_r_or_OC6REF_f",
 };
 
 static ssize_t stm32_tt_show_master_mode(struct device *dev,
@@ -199,10 +223,15 @@ static ssize_t stm32_tt_show_master_mode(struct device *dev,
 					 char *buf)
 {
 	struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
+	struct iio_trigger *trig = to_iio_trigger(dev);
 	u32 cr2;
 
 	regmap_read(priv->regmap, TIM_CR2, &cr2);
-	cr2 = (cr2 & TIM_CR2_MMS) >> TIM_CR2_MMS_SHIFT;
+
+	if (stm32_timer_is_trgo2_name(trig->name))
+		cr2 = (cr2 & TIM_CR2_MMS2) >> TIM_CR2_MMS2_SHIFT;
+	else
+		cr2 = (cr2 & TIM_CR2_MMS) >> TIM_CR2_MMS_SHIFT;
 
 	return snprintf(buf, PAGE_SIZE, "%s\n", master_mode_table[cr2]);
 }
@@ -212,13 +241,25 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
 					  const char *buf, size_t len)
 {
 	struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
+	struct iio_trigger *trig = to_iio_trigger(dev);
+	u32 mask, shift, master_mode_max;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(master_mode_table); i++) {
+	if (stm32_timer_is_trgo2_name(trig->name)) {
+		mask = TIM_CR2_MMS2;
+		shift = TIM_CR2_MMS2_SHIFT;
+		master_mode_max = MASTER_MODE2_MAX;
+	} else {
+		mask = TIM_CR2_MMS;
+		shift = TIM_CR2_MMS_SHIFT;
+		master_mode_max = MASTER_MODE_MAX;
+	}
+
+	for (i = 0; i <= master_mode_max; i++) {
 		if (!strncmp(master_mode_table[i], buf,
 			     strlen(master_mode_table[i]))) {
-			regmap_update_bits(priv->regmap, TIM_CR2,
-					   TIM_CR2_MMS, i << TIM_CR2_MMS_SHIFT);
+			regmap_update_bits(priv->regmap, TIM_CR2, mask,
+					   i << shift);
 			/* Make sure that registers are updated */
 			regmap_update_bits(priv->regmap, TIM_EGR,
 					   TIM_EGR_UG, TIM_EGR_UG);
@@ -229,8 +270,31 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
 	return -EINVAL;
 }
 
-static IIO_CONST_ATTR(master_mode_available,
-	"reset enable update compare_pulse OC1REF OC2REF OC3REF OC4REF");
+static ssize_t stm32_tt_show_master_mode_avail(struct device *dev,
+					       struct device_attribute *attr,
+					       char *buf)
+{
+	struct iio_trigger *trig = to_iio_trigger(dev);
+	unsigned int i, master_mode_max;
+	size_t len = 0;
+
+	if (stm32_timer_is_trgo2_name(trig->name))
+		master_mode_max = MASTER_MODE2_MAX;
+	else
+		master_mode_max = MASTER_MODE_MAX;
+
+	for (i = 0; i <= master_mode_max; i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+			"%s ", master_mode_table[i]);
+
+	/* replace trailing space by newline */
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(master_mode_available, 0444,
+		       stm32_tt_show_master_mode_avail, NULL, 0);
 
 static IIO_DEVICE_ATTR(master_mode, 0660,
 		       stm32_tt_show_master_mode,
@@ -240,7 +304,7 @@ static IIO_DEVICE_ATTR(master_mode, 0660,
 static struct attribute *stm32_trigger_attrs[] = {
 	&iio_dev_attr_sampling_frequency.dev_attr.attr,
 	&iio_dev_attr_master_mode.dev_attr.attr,
-	&iio_const_attr_master_mode_available.dev_attr.attr,
+	&iio_dev_attr_master_mode_available.dev_attr.attr,
 	NULL,
 };
 
@@ -264,6 +328,12 @@ static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv)
 
 	while (cur && *cur) {
 		struct iio_trigger *trig;
+		bool cur_is_trgo2 = stm32_timer_is_trgo2_name(*cur);
+
+		if (cur_is_trgo2 && !priv->has_trgo2) {
+			cur++;
+			continue;
+		}
 
 		trig = devm_iio_trigger_alloc(priv->dev, "%s", *cur);
 		if  (!trig)
@@ -277,7 +347,7 @@ static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv)
 		 * should only be available on trgo trigger which
 		 * is always the first in the list.
 		 */
-		if (cur == priv->triggers)
+		if (cur == priv->triggers || cur_is_trgo2)
 			trig->dev.groups = stm32_trigger_attr_groups;
 
 		iio_trigger_set_drvdata(trig, priv);
@@ -347,12 +417,70 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
 	return -EINVAL;
 }
 
+static int stm32_counter_validate_trigger(struct iio_dev *indio_dev,
+					  struct iio_trigger *trig)
+{
+	struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+	const char * const *cur = priv->valids;
+	unsigned int i = 0;
+
+	if (!is_stm32_timer_trigger(trig))
+		return -EINVAL;
+
+	while (cur && *cur) {
+		if (!strncmp(trig->name, *cur, strlen(trig->name))) {
+			regmap_update_bits(priv->regmap,
+					   TIM_SMCR, TIM_SMCR_TS,
+					   i << TIM_SMCR_TS_SHIFT);
+			return 0;
+		}
+		cur++;
+		i++;
+	}
+
+	return -EINVAL;
+}
+
 static const struct iio_info stm32_trigger_info = {
 	.driver_module = THIS_MODULE,
+	.validate_trigger = stm32_counter_validate_trigger,
 	.read_raw = stm32_counter_read_raw,
 	.write_raw = stm32_counter_write_raw
 };
 
+static const char *const stm32_trigger_modes[] = {
+	"trigger",
+};
+
+static int stm32_set_trigger_mode(struct iio_dev *indio_dev,
+				  const struct iio_chan_spec *chan,
+				  unsigned int mode)
+{
+	struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+
+	regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, TIM_SMCR_SMS);
+
+	return 0;
+}
+
+static int stm32_get_trigger_mode(struct iio_dev *indio_dev,
+				  const struct iio_chan_spec *chan)
+{
+	struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+	u32 smcr;
+
+	regmap_read(priv->regmap, TIM_SMCR, &smcr);
+
+	return smcr == TIM_SMCR_SMS ? 0 : -EINVAL;
+}
+
+static const struct iio_enum stm32_trigger_mode_enum = {
+	.items = stm32_trigger_modes,
+	.num_items = ARRAY_SIZE(stm32_trigger_modes),
+	.set = stm32_set_trigger_mode,
+	.get = stm32_get_trigger_mode
+};
+
 static const char *const stm32_enable_modes[] = {
 	"always",
 	"gated",
@@ -536,6 +664,8 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
 	IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_quadrature_mode_enum),
 	IIO_ENUM("enable_mode", IIO_SEPARATE, &stm32_enable_mode_enum),
 	IIO_ENUM_AVAILABLE("enable_mode", &stm32_enable_mode_enum),
+	IIO_ENUM("trigger_mode", IIO_SEPARATE, &stm32_trigger_mode_enum),
+	IIO_ENUM_AVAILABLE("trigger_mode", &stm32_trigger_mode_enum),
 	{}
 };
 
@@ -560,6 +690,7 @@ static struct stm32_timer_trigger *stm32_setup_counter_device(struct device *dev
 	indio_dev->name = dev_name(dev);
 	indio_dev->dev.parent = dev;
 	indio_dev->info = &stm32_trigger_info;
+	indio_dev->modes = INDIO_HARDWARE_TRIGGERED;
 	indio_dev->num_channels = 1;
 	indio_dev->channels = &stm32_trigger_channel;
 	indio_dev->dev.of_node = dev->of_node;
@@ -584,6 +715,20 @@ bool is_stm32_timer_trigger(struct iio_trigger *trig)
 }
 EXPORT_SYMBOL(is_stm32_timer_trigger);
 
+static void stm32_timer_detect_trgo2(struct stm32_timer_trigger *priv)
+{
+	u32 val;
+
+	/*
+	 * Master mode selection 2 bits can only be written and read back when
+	 * timer supports it.
+	 */
+	regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, TIM_CR2_MMS2);
+	regmap_read(priv->regmap, TIM_CR2, &val);
+	regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
+	priv->has_trgo2 = !!val;
+}
+
 static int stm32_timer_trigger_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -614,6 +759,7 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
 	priv->max_arr = ddata->max_arr;
 	priv->triggers = triggers_table[index];
 	priv->valids = valids_table[index];
+	stm32_timer_detect_trgo2(priv);
 
 	ret = stm32_setup_iio_triggers(priv);
 	if (ret)
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 6ebd9ad..e3cdaff 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -10,7 +10,8 @@
 ib_core-y :=			packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
 				device.o fmr_pool.o cache.o netlink.o \
 				roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
-				multicast.o mad.o smi.o agent.o mad_rmpp.o
+				multicast.o mad.o smi.o agent.o mad_rmpp.o \
+				security.o
 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
 ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
 ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 02971e2..ece6926 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
 		return ret;
 
 	rt = (struct rt6_info *)dst;
-	if (ipv6_addr_any(&fl6.saddr)) {
-		ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
-					 &fl6.daddr, 0, &fl6.saddr);
-		if (ret)
-			goto put;
-
+	if (ipv6_addr_any(&src_in->sin6_addr)) {
 		src_in->sin6_family = AF_INET6;
 		src_in->sin6_addr = fl6.saddr;
 	}
@@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
 
 	*pdst = dst;
 	return 0;
-put:
-	dst_release(dst);
-	return ret;
 }
 #else
 static int addr6_resolve(struct sockaddr_in6 *src_in,
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index b1371eb..efc9430 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -53,6 +53,7 @@ struct ib_update_work {
 	struct work_struct work;
 	struct ib_device  *device;
 	u8                 port_num;
+	bool		   enforce_security;
 };
 
 union ib_gid zgid;
@@ -911,6 +912,26 @@ int ib_get_cached_pkey(struct ib_device *device,
 }
 EXPORT_SYMBOL(ib_get_cached_pkey);
 
+int ib_get_cached_subnet_prefix(struct ib_device *device,
+				u8                port_num,
+				u64              *sn_pfx)
+{
+	unsigned long flags;
+	int p;
+
+	if (port_num < rdma_start_port(device) ||
+	    port_num > rdma_end_port(device))
+		return -EINVAL;
+
+	p = port_num - rdma_start_port(device);
+	read_lock_irqsave(&device->cache.lock, flags);
+	*sn_pfx = device->cache.ports[p].subnet_prefix;
+	read_unlock_irqrestore(&device->cache.lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
+
 int ib_find_cached_pkey(struct ib_device *device,
 			u8                port_num,
 			u16               pkey,
@@ -1022,7 +1043,8 @@ int ib_get_cached_port_state(struct ib_device   *device,
 EXPORT_SYMBOL(ib_get_cached_port_state);
 
 static void ib_cache_update(struct ib_device *device,
-			    u8                port)
+			    u8                port,
+			    bool	      enforce_security)
 {
 	struct ib_port_attr       *tprops = NULL;
 	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
@@ -1108,8 +1130,15 @@ static void ib_cache_update(struct ib_device *device,
 	device->cache.ports[port - rdma_start_port(device)].port_state =
 		tprops->state;
 
+	device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
+							tprops->subnet_prefix;
 	write_unlock_irq(&device->cache.lock);
 
+	if (enforce_security)
+		ib_security_cache_change(device,
+					 port,
+					 tprops->subnet_prefix);
+
 	kfree(gid_cache);
 	kfree(old_pkey_cache);
 	kfree(tprops);
@@ -1126,7 +1155,9 @@ static void ib_cache_task(struct work_struct *_work)
 	struct ib_update_work *work =
 		container_of(_work, struct ib_update_work, work);
 
-	ib_cache_update(work->device, work->port_num);
+	ib_cache_update(work->device,
+			work->port_num,
+			work->enforce_security);
 	kfree(work);
 }
 
@@ -1147,6 +1178,12 @@ static void ib_cache_event(struct ib_event_handler *handler,
 			INIT_WORK(&work->work, ib_cache_task);
 			work->device   = event->device;
 			work->port_num = event->element.port_num;
+			if (event->event == IB_EVENT_PKEY_CHANGE ||
+			    event->event == IB_EVENT_GID_CHANGE)
+				work->enforce_security = true;
+			else
+				work->enforce_security = false;
+
 			queue_work(ib_wq, &work->work);
 		}
 	}
@@ -1172,7 +1209,7 @@ int ib_cache_setup_one(struct ib_device *device)
 		goto out;
 
 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
-		ib_cache_update(device, p + rdma_start_port(device));
+		ib_cache_update(device, p + rdma_start_port(device), true);
 
 	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
 			      device, ib_cache_event);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 1844770..2b4d613 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
 	primary_path->packet_life_time =
 		cm_req_get_primary_local_ack_timeout(req_msg);
 	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
-	sa_path_set_service_id(primary_path, req_msg->service_id);
+	primary_path->service_id = req_msg->service_id;
 
 	if (req_msg->alt_local_lid) {
 		alt_path->dgid = req_msg->alt_local_gid;
@@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
 		alt_path->packet_life_time =
 			cm_req_get_alt_local_ack_timeout(req_msg);
 		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
-		sa_path_set_service_id(alt_path, req_msg->service_id);
+		alt_path->service_id = req_msg->service_id;
 	}
 }
 
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 91b7a2f..31bb82d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
 			ib->sib_pkey = path->pkey;
 			ib->sib_flowinfo = path->flow_label;
 			memcpy(&ib->sib_addr, &path->sgid, 16);
-			ib->sib_sid = sa_path_get_service_id(path);
+			ib->sib_sid = path->service_id;
 			ib->sib_scope_id = 0;
 		} else {
 			ib->sib_pkey = listen_ib->sib_pkey;
@@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
 		memcpy(&req->local_gid, &req_param->primary_path->sgid,
 		       sizeof(req->local_gid));
 		req->has_gid	= true;
-		req->service_id	=
-			sa_path_get_service_id(req_param->primary_path);
+		req->service_id = req_param->primary_path->service_id;
 		req->pkey	= be16_to_cpu(req_param->primary_path->pkey);
 		if (req->pkey != req_param->bth_pkey)
 			pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
@@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
 	struct rdma_route *rt;
 	const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
 	struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
-	const __be64 service_id = sa_path_get_service_id(path);
+	const __be64 service_id =
+		ib_event->param.req_rcvd.primary_path->service_id;
 	int ret;
 
 	id = rdma_create_id(listen_id->route.addr.dev_addr.net,
@@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
 	path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
 	path_rec.numb_path = 1;
 	path_rec.reversible = 1;
-	sa_path_set_service_id(&path_rec,
-			       rdma_get_service_id(&id_priv->id,
-						   cma_dst_addr(id_priv)));
+	path_rec.service_id = rdma_get_service_id(&id_priv->id,
+						  cma_dst_addr(id_priv));
 
 	comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
 		    IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index cb7d372..11ae675 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,6 +38,16 @@
 #include <linux/cgroup_rdma.h>
 
 #include <rdma/ib_verbs.h>
+#include <rdma/ib_mad.h>
+#include "mad_priv.h"
+
+struct pkey_index_qp_list {
+	struct list_head    pkey_index_list;
+	u16                 pkey_index;
+	/* Lock to hold while iterating the qp_list. */
+	spinlock_t          qp_list_lock;
+	struct list_head    qp_list;
+};
 
 #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
 int cma_configfs_init(void);
@@ -169,6 +179,16 @@ void ib_mad_cleanup(void);
 int ib_sa_init(void);
 void ib_sa_cleanup(void);
 
+int ibnl_init(void);
+void ibnl_cleanup(void);
+
+/**
+ * Check if there are any listeners to the netlink group
+ * @group: the netlink group ID
+ * Returns 0 on success or a negative for no listeners.
+ */
+int ibnl_chk_listeners(unsigned int group);
+
 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
 			      struct netlink_callback *cb);
 int ib_nl_handle_set_timeout(struct sk_buff *skb,
@@ -176,4 +196,109 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
 int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
 			     struct netlink_callback *cb);
 
+int ib_get_cached_subnet_prefix(struct ib_device *device,
+				u8                port_num,
+				u64              *sn_pfx);
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+int ib_security_pkey_access(struct ib_device *dev,
+			    u8 port_num,
+			    u16 pkey_index,
+			    void *sec);
+
+void ib_security_destroy_port_pkey_list(struct ib_device *device);
+
+void ib_security_cache_change(struct ib_device *device,
+			      u8 port_num,
+			      u64 subnet_prefix);
+
+int ib_security_modify_qp(struct ib_qp *qp,
+			  struct ib_qp_attr *qp_attr,
+			  int qp_attr_mask,
+			  struct ib_udata *udata);
+
+int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev);
+void ib_destroy_qp_security_begin(struct ib_qp_security *sec);
+void ib_destroy_qp_security_abort(struct ib_qp_security *sec);
+void ib_destroy_qp_security_end(struct ib_qp_security *sec);
+int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev);
+void ib_close_shared_qp_security(struct ib_qp_security *sec);
+int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
+				enum ib_qp_type qp_type);
+void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
+int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
+#else
+static inline int ib_security_pkey_access(struct ib_device *dev,
+					  u8 port_num,
+					  u16 pkey_index,
+					  void *sec)
+{
+	return 0;
+}
+
+static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
+{
+}
+
+static inline void ib_security_cache_change(struct ib_device *device,
+					    u8 port_num,
+					    u64 subnet_prefix)
+{
+}
+
+static inline int ib_security_modify_qp(struct ib_qp *qp,
+					struct ib_qp_attr *qp_attr,
+					int qp_attr_mask,
+					struct ib_udata *udata)
+{
+	return qp->device->modify_qp(qp->real_qp,
+				     qp_attr,
+				     qp_attr_mask,
+				     udata);
+}
+
+static inline int ib_create_qp_security(struct ib_qp *qp,
+					struct ib_device *dev)
+{
+	return 0;
+}
+
+static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
+{
+}
+
+static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
+{
+}
+
+static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec)
+{
+}
+
+static inline int ib_open_shared_qp_security(struct ib_qp *qp,
+					     struct ib_device *dev)
+{
+	return 0;
+}
+
+static inline void ib_close_shared_qp_security(struct ib_qp_security *sec)
+{
+}
+
+static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
+					      enum ib_qp_type qp_type)
+{
+	return 0;
+}
+
+static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
+{
+}
+
+static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
+					  u16 pkey_index)
+{
+	return 0;
+}
+#endif
 #endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 81d447d..631eaa9 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -39,6 +39,8 @@
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
+#include <linux/security.h>
+#include <linux/notifier.h>
 #include <rdma/rdma_netlink.h>
 #include <rdma/ib_addr.h>
 #include <rdma/ib_cache.h>
@@ -82,6 +84,14 @@ static LIST_HEAD(client_list);
 static DEFINE_MUTEX(device_mutex);
 static DECLARE_RWSEM(lists_rwsem);
 
+static int ib_security_change(struct notifier_block *nb, unsigned long event,
+			      void *lsm_data);
+static void ib_policy_change_task(struct work_struct *work);
+static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
+
+static struct notifier_block ibdev_lsm_nb = {
+	.notifier_call = ib_security_change,
+};
 
 static int ib_device_check_mandatory(struct ib_device *device)
 {
@@ -325,6 +335,64 @@ void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len)
 }
 EXPORT_SYMBOL(ib_get_device_fw_str);
 
+static int setup_port_pkey_list(struct ib_device *device)
+{
+	int i;
+
+	/**
+	 * device->port_pkey_list is indexed directly by the port number,
+	 * Therefore it is declared as a 1 based array with potential empty
+	 * slots at the beginning.
+	 */
+	device->port_pkey_list = kcalloc(rdma_end_port(device) + 1,
+					 sizeof(*device->port_pkey_list),
+					 GFP_KERNEL);
+
+	if (!device->port_pkey_list)
+		return -ENOMEM;
+
+	for (i = 0; i < (rdma_end_port(device) + 1); i++) {
+		spin_lock_init(&device->port_pkey_list[i].list_lock);
+		INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list);
+	}
+
+	return 0;
+}
+
+static void ib_policy_change_task(struct work_struct *work)
+{
+	struct ib_device *dev;
+
+	down_read(&lists_rwsem);
+	list_for_each_entry(dev, &device_list, core_list) {
+		int i;
+
+		for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) {
+			u64 sp;
+			int ret = ib_get_cached_subnet_prefix(dev,
+							      i,
+							      &sp);
+
+			WARN_ONCE(ret,
+				  "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
+				  ret);
+			ib_security_cache_change(dev, i, sp);
+		}
+	}
+	up_read(&lists_rwsem);
+}
+
+static int ib_security_change(struct notifier_block *nb, unsigned long event,
+			      void *lsm_data)
+{
+	if (event != LSM_POLICY_CHANGE)
+		return NOTIFY_DONE;
+
+	schedule_work(&ib_policy_change_work);
+
+	return NOTIFY_OK;
+}
+
 /**
  * ib_register_device - Register an IB device with IB core
  * @device:Device to register
@@ -385,6 +453,12 @@ int ib_register_device(struct ib_device *device,
 		goto out;
 	}
 
+	ret = setup_port_pkey_list(device);
+	if (ret) {
+		pr_warn("Couldn't create per port_pkey_list\n");
+		goto out;
+	}
+
 	ret = ib_cache_setup_one(device);
 	if (ret) {
 		pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
@@ -468,6 +542,9 @@ void ib_unregister_device(struct ib_device *device)
 	ib_device_unregister_sysfs(device);
 	ib_cache_cleanup_one(device);
 
+	ib_security_destroy_port_pkey_list(device);
+	kfree(device->port_pkey_list);
+
 	down_write(&lists_rwsem);
 	spin_lock_irqsave(&device->client_data_lock, flags);
 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
@@ -1082,10 +1159,18 @@ static int __init ib_core_init(void)
 		goto err_sa;
 	}
 
+	ret = register_lsm_notifier(&ibdev_lsm_nb);
+	if (ret) {
+		pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
+		goto err_ibnl_clients;
+	}
+
 	ib_cache_setup();
 
 	return 0;
 
+err_ibnl_clients:
+	ib_remove_ibnl_clients();
 err_sa:
 	ib_sa_cleanup();
 err_mad:
@@ -1105,6 +1190,7 @@ static int __init ib_core_init(void)
 
 static void __exit ib_core_cleanup(void)
 {
+	unregister_lsm_notifier(&ibdev_lsm_nb);
 	ib_cache_cleanup();
 	ib_remove_ibnl_clients();
 	ib_sa_cleanup();
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 192ee3da..f8f53bb 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -40,9 +40,11 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/security.h>
 #include <rdma/ib_cache.h>
 
 #include "mad_priv.h"
+#include "core_priv.h"
 #include "mad_rmpp.h"
 #include "smi.h"
 #include "opa_smi.h"
@@ -369,6 +371,12 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 	atomic_set(&mad_agent_priv->refcount, 1);
 	init_completion(&mad_agent_priv->comp);
 
+	ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
+	if (ret2) {
+		ret = ERR_PTR(ret2);
+		goto error4;
+	}
+
 	spin_lock_irqsave(&port_priv->reg_lock, flags);
 	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
 
@@ -386,7 +394,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 				if (method) {
 					if (method_in_use(&method,
 							   mad_reg_req))
-						goto error4;
+						goto error5;
 				}
 			}
 			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
@@ -402,14 +410,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 					if (is_vendor_method_in_use(
 							vendor_class,
 							mad_reg_req))
-						goto error4;
+						goto error5;
 				}
 			}
 			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
 		}
 		if (ret2) {
 			ret = ERR_PTR(ret2);
-			goto error4;
+			goto error5;
 		}
 	}
 
@@ -418,9 +426,10 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 
 	return &mad_agent_priv->agent;
-
-error4:
+error5:
 	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
+	ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
+error4:
 	kfree(reg_req);
 error3:
 	kfree(mad_agent_priv);
@@ -491,6 +500,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
 	struct ib_mad_agent *ret;
 	struct ib_mad_snoop_private *mad_snoop_priv;
 	int qpn;
+	int err;
 
 	/* Validate parameters */
 	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
@@ -525,17 +535,25 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
 	mad_snoop_priv->agent.port_num = port_num;
 	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
 	init_completion(&mad_snoop_priv->comp);
+
+	err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
+	if (err) {
+		ret = ERR_PTR(err);
+		goto error2;
+	}
+
 	mad_snoop_priv->snoop_index = register_snoop_agent(
 						&port_priv->qp_info[qpn],
 						mad_snoop_priv);
 	if (mad_snoop_priv->snoop_index < 0) {
 		ret = ERR_PTR(mad_snoop_priv->snoop_index);
-		goto error2;
+		goto error3;
 	}
 
 	atomic_set(&mad_snoop_priv->refcount, 1);
 	return &mad_snoop_priv->agent;
-
+error3:
+	ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
 error2:
 	kfree(mad_snoop_priv);
 error1:
@@ -581,6 +599,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 	deref_mad_agent(mad_agent_priv);
 	wait_for_completion(&mad_agent_priv->comp);
 
+	ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
+
 	kfree(mad_agent_priv->reg_req);
 	kfree(mad_agent_priv);
 }
@@ -599,6 +619,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
 	deref_snoop_agent(mad_snoop_priv);
 	wait_for_completion(&mad_snoop_priv->comp);
 
+	ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
+
 	kfree(mad_snoop_priv);
 }
 
@@ -1215,12 +1237,16 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
 
 	/* Walk list of send WRs and post each on send list */
 	for (; send_buf; send_buf = next_send_buf) {
-
 		mad_send_wr = container_of(send_buf,
 					   struct ib_mad_send_wr_private,
 					   send_buf);
 		mad_agent_priv = mad_send_wr->mad_agent_priv;
 
+		ret = ib_mad_enforce_security(mad_agent_priv,
+					      mad_send_wr->send_wr.pkey_index);
+		if (ret)
+			goto error;
+
 		if (!send_buf->mad_agent->send_handler ||
 		    (send_buf->timeout_ms &&
 		     !send_buf->mad_agent->recv_handler)) {
@@ -1946,6 +1972,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
 	struct ib_mad_send_wr_private *mad_send_wr;
 	struct ib_mad_send_wc mad_send_wc;
 	unsigned long flags;
+	int ret;
+
+	ret = ib_mad_enforce_security(mad_agent_priv,
+				      mad_recv_wc->wc->pkey_index);
+	if (ret) {
+		ib_free_recv_mad(mad_recv_wc);
+		deref_mad_agent(mad_agent_priv);
+	}
 
 	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
 	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
@@ -2003,6 +2037,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
 						   mad_recv_wc);
 		deref_mad_agent(mad_agent_priv);
 	}
+
+	return;
 }
 
 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index b784055..94931c4 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -37,6 +37,7 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <rdma/rdma_netlink.h>
+#include "core_priv.h"
 
 struct ibnl_client {
 	struct list_head		list;
@@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group)
 		return -1;
 	return 0;
 }
-EXPORT_SYMBOL(ibnl_chk_listeners);
 
 int ibnl_add_client(int index, int nops,
 		    const struct ibnl_client_cbs cb_table[])
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index e335b09c..fb7aec4 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -194,7 +194,7 @@ static u32 tid;
 	.field_name          = "sa_path_rec:" #field
 
 static const struct ib_field path_rec_table[] = {
-	{ PATH_REC_FIELD(ib.service_id),
+	{ PATH_REC_FIELD(service_id),
 	  .offset_words = 0,
 	  .offset_bits  = 0,
 	  .size_bits    = 64 },
@@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = {
 	.field_name          = "sa_path_rec:" #field
 
 static const struct ib_field opa_path_rec_table[] = {
-	{ OPA_PATH_REC_FIELD(opa.service_id),
+	{ OPA_PATH_REC_FIELD(service_id),
 	  .offset_words = 0,
 	  .offset_bits  = 0,
 	  .size_bits    = 64 },
@@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
 
 	/* Now build the attributes */
 	if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
-		val64 = be64_to_cpu(sa_path_get_service_id(sa_rec));
+		val64 = be64_to_cpu(sa_rec->service_id);
 		nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
 			sizeof(val64), &val64);
 	}
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
new file mode 100644
index 0000000..3e8c389
--- /dev/null
+++ b/drivers/infiniband/core/security.c
@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+
+#include <linux/security.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
+#include "core_priv.h"
+#include "mad_priv.h"
+
+static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
+{
+	struct pkey_index_qp_list *pkey = NULL;
+	struct pkey_index_qp_list *tmp_pkey;
+	struct ib_device *dev = pp->sec->dev;
+
+	spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
+	list_for_each_entry(tmp_pkey,
+			    &dev->port_pkey_list[pp->port_num].pkey_list,
+			    pkey_index_list) {
+		if (tmp_pkey->pkey_index == pp->pkey_index) {
+			pkey = tmp_pkey;
+			break;
+		}
+	}
+	spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
+	return pkey;
+}
+
+static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
+				      u16 *pkey,
+				      u64 *subnet_prefix)
+{
+	struct ib_device *dev = pp->sec->dev;
+	int ret;
+
+	ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
+	if (ret)
+		return ret;
+
+	ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
+
+	return ret;
+}
+
+static int enforce_qp_pkey_security(u16 pkey,
+				    u64 subnet_prefix,
+				    struct ib_qp_security *qp_sec)
+{
+	struct ib_qp_security *shared_qp_sec;
+	int ret;
+
+	ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
+	if (ret)
+		return ret;
+
+	if (qp_sec->qp == qp_sec->qp->real_qp) {
+		list_for_each_entry(shared_qp_sec,
+				    &qp_sec->shared_qp_list,
+				    shared_qp_list) {
+			ret = security_ib_pkey_access(shared_qp_sec->security,
+						      subnet_prefix,
+						      pkey);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+/* The caller of this function must hold the QP security
+ * mutex of the QP of the security structure in *pps.
+ *
+ * It takes separate ports_pkeys and security structure
+ * because in some cases the pps will be for a new settings
+ * or the pps will be for the real QP and security structure
+ * will be for a shared QP.
+ */
+static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
+				       struct ib_qp_security *sec)
+{
+	u64 subnet_prefix;
+	u16 pkey;
+	int ret = 0;
+
+	if (!pps)
+		return 0;
+
+	if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
+		get_pkey_and_subnet_prefix(&pps->main,
+					   &pkey,
+					   &subnet_prefix);
+
+		ret = enforce_qp_pkey_security(pkey,
+					       subnet_prefix,
+					       sec);
+	}
+	if (ret)
+		return ret;
+
+	if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
+		get_pkey_and_subnet_prefix(&pps->alt,
+					   &pkey,
+					   &subnet_prefix);
+
+		ret = enforce_qp_pkey_security(pkey,
+					       subnet_prefix,
+					       sec);
+	}
+
+	return ret;
+}
+
+/* The caller of this function must hold the QP security
+ * mutex.
+ */
+static void qp_to_error(struct ib_qp_security *sec)
+{
+	struct ib_qp_security *shared_qp_sec;
+	struct ib_qp_attr attr = {
+		.qp_state = IB_QPS_ERR
+	};
+	struct ib_event event = {
+		.event = IB_EVENT_QP_FATAL
+	};
+
+	/* If the QP is in the process of being destroyed
+	 * the qp pointer in the security structure is
+	 * undefined.  It cannot be modified now.
+	 */
+	if (sec->destroying)
+		return;
+
+	ib_modify_qp(sec->qp,
+		     &attr,
+		     IB_QP_STATE);
+
+	if (sec->qp->event_handler && sec->qp->qp_context) {
+		event.element.qp = sec->qp;
+		sec->qp->event_handler(&event,
+				       sec->qp->qp_context);
+	}
+
+	list_for_each_entry(shared_qp_sec,
+			    &sec->shared_qp_list,
+			    shared_qp_list) {
+		struct ib_qp *qp = shared_qp_sec->qp;
+
+		if (qp->event_handler && qp->qp_context) {
+			event.element.qp = qp;
+			event.device = qp->device;
+			qp->event_handler(&event,
+					  qp->qp_context);
+		}
+	}
+}
+
+static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
+				  struct ib_device *device,
+				  u8 port_num,
+				  u64 subnet_prefix)
+{
+	struct ib_port_pkey *pp, *tmp_pp;
+	bool comp;
+	LIST_HEAD(to_error_list);
+	u16 pkey_val;
+
+	if (!ib_get_cached_pkey(device,
+				port_num,
+				pkey->pkey_index,
+				&pkey_val)) {
+		spin_lock(&pkey->qp_list_lock);
+		list_for_each_entry(pp, &pkey->qp_list, qp_list) {
+			if (atomic_read(&pp->sec->error_list_count))
+				continue;
+
+			if (enforce_qp_pkey_security(pkey_val,
+						     subnet_prefix,
+						     pp->sec)) {
+				atomic_inc(&pp->sec->error_list_count);
+				list_add(&pp->to_error_list,
+					 &to_error_list);
+			}
+		}
+		spin_unlock(&pkey->qp_list_lock);
+	}
+
+	list_for_each_entry_safe(pp,
+				 tmp_pp,
+				 &to_error_list,
+				 to_error_list) {
+		mutex_lock(&pp->sec->mutex);
+		qp_to_error(pp->sec);
+		list_del(&pp->to_error_list);
+		atomic_dec(&pp->sec->error_list_count);
+		comp = pp->sec->destroying;
+		mutex_unlock(&pp->sec->mutex);
+
+		if (comp)
+			complete(&pp->sec->error_complete);
+	}
+}
+
+/* The caller of this function must hold the QP security
+ * mutex.
+ */
+static int port_pkey_list_insert(struct ib_port_pkey *pp)
+{
+	struct pkey_index_qp_list *tmp_pkey;
+	struct pkey_index_qp_list *pkey;
+	struct ib_device *dev;
+	u8 port_num = pp->port_num;
+	int ret = 0;
+
+	if (pp->state != IB_PORT_PKEY_VALID)
+		return 0;
+
+	dev = pp->sec->dev;
+
+	pkey = get_pkey_idx_qp_list(pp);
+
+	if (!pkey) {
+		bool found = false;
+
+		pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
+		if (!pkey)
+			return -ENOMEM;
+
+		spin_lock(&dev->port_pkey_list[port_num].list_lock);
+		/* Check for the PKey again.  A racing process may
+		 * have created it.
+		 */
+		list_for_each_entry(tmp_pkey,
+				    &dev->port_pkey_list[port_num].pkey_list,
+				    pkey_index_list) {
+			if (tmp_pkey->pkey_index == pp->pkey_index) {
+				kfree(pkey);
+				pkey = tmp_pkey;
+				found = true;
+				break;
+			}
+		}
+
+		if (!found) {
+			pkey->pkey_index = pp->pkey_index;
+			spin_lock_init(&pkey->qp_list_lock);
+			INIT_LIST_HEAD(&pkey->qp_list);
+			list_add(&pkey->pkey_index_list,
+				 &dev->port_pkey_list[port_num].pkey_list);
+		}
+		spin_unlock(&dev->port_pkey_list[port_num].list_lock);
+	}
+
+	spin_lock(&pkey->qp_list_lock);
+	list_add(&pp->qp_list, &pkey->qp_list);
+	spin_unlock(&pkey->qp_list_lock);
+
+	pp->state = IB_PORT_PKEY_LISTED;
+
+	return ret;
+}
+
+/* The caller of this function must hold the QP security
+ * mutex.
+ */
+static void port_pkey_list_remove(struct ib_port_pkey *pp)
+{
+	struct pkey_index_qp_list *pkey;
+
+	if (pp->state != IB_PORT_PKEY_LISTED)
+		return;
+
+	pkey = get_pkey_idx_qp_list(pp);
+
+	spin_lock(&pkey->qp_list_lock);
+	list_del(&pp->qp_list);
+	spin_unlock(&pkey->qp_list_lock);
+
+	/* The setting may still be valid, i.e. after
+	 * a destroy has failed for example.
+	 */
+	pp->state = IB_PORT_PKEY_VALID;
+}
+
+static void destroy_qp_security(struct ib_qp_security *sec)
+{
+	security_ib_free_security(sec->security);
+	kfree(sec->ports_pkeys);
+	kfree(sec);
+}
+
+/* The caller of this function must hold the QP security
+ * mutex.
+ */
+static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
+					  const struct ib_qp_attr *qp_attr,
+					  int qp_attr_mask)
+{
+	struct ib_ports_pkeys *new_pps;
+	struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
+
+	new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
+	if (!new_pps)
+		return NULL;
+
+	if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
+		if (!qp_pps) {
+			new_pps->main.port_num = qp_attr->port_num;
+			new_pps->main.pkey_index = qp_attr->pkey_index;
+		} else {
+			new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
+						  qp_attr->port_num :
+						  qp_pps->main.port_num;
+
+			new_pps->main.pkey_index =
+					(qp_attr_mask & IB_QP_PKEY_INDEX) ?
+					 qp_attr->pkey_index :
+					 qp_pps->main.pkey_index;
+		}
+		new_pps->main.state = IB_PORT_PKEY_VALID;
+	} else if (qp_pps) {
+		new_pps->main.port_num = qp_pps->main.port_num;
+		new_pps->main.pkey_index = qp_pps->main.pkey_index;
+		if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
+			new_pps->main.state = IB_PORT_PKEY_VALID;
+	}
+
+	if (qp_attr_mask & IB_QP_ALT_PATH) {
+		new_pps->alt.port_num = qp_attr->alt_port_num;
+		new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
+		new_pps->alt.state = IB_PORT_PKEY_VALID;
+	} else if (qp_pps) {
+		new_pps->alt.port_num = qp_pps->alt.port_num;
+		new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
+		if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
+			new_pps->alt.state = IB_PORT_PKEY_VALID;
+	}
+
+	new_pps->main.sec = qp->qp_sec;
+	new_pps->alt.sec = qp->qp_sec;
+	return new_pps;
+}
+
+int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
+{
+	struct ib_qp *real_qp = qp->real_qp;
+	int ret;
+
+	ret = ib_create_qp_security(qp, dev);
+
+	if (ret)
+		return ret;
+
+	mutex_lock(&real_qp->qp_sec->mutex);
+	ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
+					  qp->qp_sec);
+
+	if (ret)
+		goto ret;
+
+	if (qp != real_qp)
+		list_add(&qp->qp_sec->shared_qp_list,
+			 &real_qp->qp_sec->shared_qp_list);
+ret:
+	mutex_unlock(&real_qp->qp_sec->mutex);
+	if (ret)
+		destroy_qp_security(qp->qp_sec);
+
+	return ret;
+}
+
+void ib_close_shared_qp_security(struct ib_qp_security *sec)
+{
+	struct ib_qp *real_qp = sec->qp->real_qp;
+
+	mutex_lock(&real_qp->qp_sec->mutex);
+	list_del(&sec->shared_qp_list);
+	mutex_unlock(&real_qp->qp_sec->mutex);
+
+	destroy_qp_security(sec);
+}
+
+int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
+{
+	int ret;
+
+	qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
+	if (!qp->qp_sec)
+		return -ENOMEM;
+
+	qp->qp_sec->qp = qp;
+	qp->qp_sec->dev = dev;
+	mutex_init(&qp->qp_sec->mutex);
+	INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
+	atomic_set(&qp->qp_sec->error_list_count, 0);
+	init_completion(&qp->qp_sec->error_complete);
+	ret = security_ib_alloc_security(&qp->qp_sec->security);
+	if (ret)
+		kfree(qp->qp_sec);
+
+	return ret;
+}
+EXPORT_SYMBOL(ib_create_qp_security);
+
+void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
+{
+	mutex_lock(&sec->mutex);
+
+	/* Remove the QP from the lists so it won't get added to
+	 * a to_error_list during the destroy process.
+	 */
+	if (sec->ports_pkeys) {
+		port_pkey_list_remove(&sec->ports_pkeys->main);
+		port_pkey_list_remove(&sec->ports_pkeys->alt);
+	}
+
+	/* If the QP is already in one or more of those lists
+	 * the destroying flag will ensure the to error flow
+	 * doesn't operate on an undefined QP.
+	 */
+	sec->destroying = true;
+
+	/* Record the error list count to know how many completions
+	 * to wait for.
+	 */
+	sec->error_comps_pending = atomic_read(&sec->error_list_count);
+
+	mutex_unlock(&sec->mutex);
+}
+
+void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
+{
+	int ret;
+	int i;
+
+	/* If a concurrent cache update is in progress this
+	 * QP security could be marked for an error state
+	 * transition.  Wait for this to complete.
+	 */
+	for (i = 0; i < sec->error_comps_pending; i++)
+		wait_for_completion(&sec->error_complete);
+
+	mutex_lock(&sec->mutex);
+	sec->destroying = false;
+
+	/* Restore the position in the lists and verify
+	 * access is still allowed in case a cache update
+	 * occurred while attempting to destroy.
+	 *
+	 * Because these setting were listed already
+	 * and removed during ib_destroy_qp_security_begin
+	 * we know the pkey_index_qp_list for the PKey
+	 * already exists so port_pkey_list_insert won't fail.
+	 */
+	if (sec->ports_pkeys) {
+		port_pkey_list_insert(&sec->ports_pkeys->main);
+		port_pkey_list_insert(&sec->ports_pkeys->alt);
+	}
+
+	ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
+	if (ret)
+		qp_to_error(sec);
+
+	mutex_unlock(&sec->mutex);
+}
+
+void ib_destroy_qp_security_end(struct ib_qp_security *sec)
+{
+	int i;
+
+	/* If a concurrent cache update is occurring we must
+	 * wait until this QP security structure is processed
+	 * in the QP to error flow before destroying it because
+	 * the to_error_list is in use.
+	 */
+	for (i = 0; i < sec->error_comps_pending; i++)
+		wait_for_completion(&sec->error_complete);
+
+	destroy_qp_security(sec);
+}
+
+void ib_security_cache_change(struct ib_device *device,
+			      u8 port_num,
+			      u64 subnet_prefix)
+{
+	struct pkey_index_qp_list *pkey;
+
+	list_for_each_entry(pkey,
+			    &device->port_pkey_list[port_num].pkey_list,
+			    pkey_index_list) {
+		check_pkey_qps(pkey,
+			       device,
+			       port_num,
+			       subnet_prefix);
+	}
+}
+
+void ib_security_destroy_port_pkey_list(struct ib_device *device)
+{
+	struct pkey_index_qp_list *pkey, *tmp_pkey;
+	int i;
+
+	for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
+		spin_lock(&device->port_pkey_list[i].list_lock);
+		list_for_each_entry_safe(pkey,
+					 tmp_pkey,
+					 &device->port_pkey_list[i].pkey_list,
+					 pkey_index_list) {
+			list_del(&pkey->pkey_index_list);
+			kfree(pkey);
+		}
+		spin_unlock(&device->port_pkey_list[i].list_lock);
+	}
+}
+
+int ib_security_modify_qp(struct ib_qp *qp,
+			  struct ib_qp_attr *qp_attr,
+			  int qp_attr_mask,
+			  struct ib_udata *udata)
+{
+	int ret = 0;
+	struct ib_ports_pkeys *tmp_pps;
+	struct ib_ports_pkeys *new_pps;
+	bool special_qp = (qp->qp_type == IB_QPT_SMI ||
+			   qp->qp_type == IB_QPT_GSI ||
+			   qp->qp_type >= IB_QPT_RESERVED1);
+	bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
+			   (qp_attr_mask & IB_QP_ALT_PATH));
+
+	if (pps_change && !special_qp) {
+		mutex_lock(&qp->qp_sec->mutex);
+		new_pps = get_new_pps(qp,
+				      qp_attr,
+				      qp_attr_mask);
+
+		/* Add this QP to the lists for the new port
+		 * and pkey settings before checking for permission
+		 * in case there is a concurrent cache update
+		 * occurring.  Walking the list for a cache change
+		 * doesn't acquire the security mutex unless it's
+		 * sending the QP to error.
+		 */
+		ret = port_pkey_list_insert(&new_pps->main);
+
+		if (!ret)
+			ret = port_pkey_list_insert(&new_pps->alt);
+
+		if (!ret)
+			ret = check_qp_port_pkey_settings(new_pps,
+							  qp->qp_sec);
+	}
+
+	if (!ret)
+		ret = qp->device->modify_qp(qp->real_qp,
+					    qp_attr,
+					    qp_attr_mask,
+					    udata);
+
+	if (pps_change && !special_qp) {
+		/* Clean up the lists and free the appropriate
+		 * ports_pkeys structure.
+		 */
+		if (ret) {
+			tmp_pps = new_pps;
+		} else {
+			tmp_pps = qp->qp_sec->ports_pkeys;
+			qp->qp_sec->ports_pkeys = new_pps;
+		}
+
+		if (tmp_pps) {
+			port_pkey_list_remove(&tmp_pps->main);
+			port_pkey_list_remove(&tmp_pps->alt);
+		}
+		kfree(tmp_pps);
+		mutex_unlock(&qp->qp_sec->mutex);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(ib_security_modify_qp);
+
+int ib_security_pkey_access(struct ib_device *dev,
+			    u8 port_num,
+			    u16 pkey_index,
+			    void *sec)
+{
+	u64 subnet_prefix;
+	u16 pkey;
+	int ret;
+
+	ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
+	if (ret)
+		return ret;
+
+	ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
+
+	if (ret)
+		return ret;
+
+	return security_ib_pkey_access(sec, subnet_prefix, pkey);
+}
+EXPORT_SYMBOL(ib_security_pkey_access);
+
+static int ib_mad_agent_security_change(struct notifier_block *nb,
+					unsigned long event,
+					void *data)
+{
+	struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
+
+	if (event != LSM_POLICY_CHANGE)
+		return NOTIFY_DONE;
+
+	ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
+							     ag->device->name,
+							     ag->port_num);
+
+	return NOTIFY_OK;
+}
+
+int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
+				enum ib_qp_type qp_type)
+{
+	int ret;
+
+	ret = security_ib_alloc_security(&agent->security);
+	if (ret)
+		return ret;
+
+	if (qp_type != IB_QPT_SMI)
+		return 0;
+
+	ret = security_ib_endport_manage_subnet(agent->security,
+						agent->device->name,
+						agent->port_num);
+	if (ret)
+		return ret;
+
+	agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
+	ret = register_lsm_notifier(&agent->lsm_nb);
+	if (ret)
+		return ret;
+
+	agent->smp_allowed = true;
+	agent->lsm_nb_reg = true;
+	return 0;
+}
+
+void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
+{
+	security_ib_free_security(agent->security);
+	if (agent->lsm_nb_reg)
+		unregister_lsm_notifier(&agent->lsm_nb);
+}
+
+int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
+{
+	int ret;
+
+	if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
+		return -EACCES;
+
+	ret = ib_security_pkey_access(map->agent.device,
+				      map->agent.port_num,
+				      pkey_index,
+				      map->agent.security);
+
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+#endif /* CONFIG_SECURITY_INFINIBAND */
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 3dbf811..21e60b1 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
 	for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
 
 		page = sg_page(sg);
-		if (umem->writable && dirty)
+		if (!PageDirty(page) && umem->writable && dirty)
 			set_page_dirty_lock(page);
 		put_page(page);
 	}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 0780b1a..8c4ec56 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
 		struct vm_area_struct *vma;
 		struct hstate *h;
 
+		down_read(&mm->mmap_sem);
 		vma = find_vma(mm, ib_umem_start(umem));
-		if (!vma || !is_vm_hugetlb_page(vma))
+		if (!vma || !is_vm_hugetlb_page(vma)) {
+			up_read(&mm->mmap_sem);
 			return -EINVAL;
+		}
 		h = hstate_vma(vma);
 		umem->page_shift = huge_page_shift(h);
+		up_read(&mm->mmap_sem);
 		umem->hugetlb = 1;
 	} else {
 		umem->hugetlb = 0;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 70b7fb1..0ad3b05 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1508,6 +1508,10 @@ static int create_qp(struct ib_uverbs_file *file,
 	}
 
 	if (cmd->qp_type != IB_QPT_XRC_TGT) {
+		ret = ib_create_qp_security(qp, device);
+		if (ret)
+			goto err_cb;
+
 		qp->real_qp	  = qp;
 		qp->device	  = device;
 		qp->pd		  = pd;
@@ -2002,14 +2006,17 @@ static int modify_qp(struct ib_uverbs_file *file,
 			if (ret)
 				goto release_qp;
 		}
-		ret = qp->device->modify_qp(qp, attr,
+		ret = ib_security_modify_qp(qp,
+					    attr,
 					    modify_qp_mask(qp->qp_type,
 							   cmd->base.attr_mask),
 					    udata);
 	} else {
-		ret = ib_modify_qp(qp, attr,
-				   modify_qp_mask(qp->qp_type,
-						  cmd->base.attr_mask));
+		ret = ib_security_modify_qp(qp,
+					    attr,
+					    modify_qp_mask(qp->qp_type,
+							   cmd->base.attr_mask),
+					    NULL);
 	}
 
 release_qp:
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 8b9587f..94fd989 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
 }
 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
 
-void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
-				struct sa_path_rec *src)
+static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
+				       struct sa_path_rec *src)
 {
-	memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid);
-	memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid);
+	memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid));
+	memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid));
 
 	dst->dlid		= htons(ntohl(sa_path_get_dlid(src)));
 	dst->slid		= htons(ntohl(sa_path_get_slid(src)));
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 4792f52..c973a83c 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -44,6 +44,7 @@
 #include <linux/in.h>
 #include <linux/in6.h>
 #include <net/addrconf.h>
+#include <linux/security.h>
 
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_cache.h>
@@ -713,12 +714,20 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
 {
 	struct ib_qp *qp;
 	unsigned long flags;
+	int err;
 
 	qp = kzalloc(sizeof *qp, GFP_KERNEL);
 	if (!qp)
 		return ERR_PTR(-ENOMEM);
 
 	qp->real_qp = real_qp;
+	err = ib_open_shared_qp_security(qp, real_qp->device);
+	if (err) {
+		kfree(qp);
+		return ERR_PTR(err);
+	}
+
+	qp->real_qp = real_qp;
 	atomic_inc(&real_qp->usecnt);
 	qp->device = real_qp->device;
 	qp->event_handler = event_handler;
@@ -804,6 +813,12 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
 	if (IS_ERR(qp))
 		return qp;
 
+	ret = ib_create_qp_security(qp, device);
+	if (ret) {
+		ib_destroy_qp(qp);
+		return ERR_PTR(ret);
+	}
+
 	qp->device     = device;
 	qp->real_qp    = qp;
 	qp->uobject    = NULL;
@@ -1266,7 +1281,7 @@ int ib_modify_qp(struct ib_qp *qp,
 			return ret;
 	}
 
-	return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
+	return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
 }
 EXPORT_SYMBOL(ib_modify_qp);
 
@@ -1295,6 +1310,7 @@ int ib_close_qp(struct ib_qp *qp)
 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
 
 	atomic_dec(&real_qp->usecnt);
+	ib_close_shared_qp_security(qp->qp_sec);
 	kfree(qp);
 
 	return 0;
@@ -1335,6 +1351,7 @@ int ib_destroy_qp(struct ib_qp *qp)
 	struct ib_cq *scq, *rcq;
 	struct ib_srq *srq;
 	struct ib_rwq_ind_table *ind_tbl;
+	struct ib_qp_security *sec;
 	int ret;
 
 	WARN_ON_ONCE(qp->mrs_used > 0);
@@ -1350,6 +1367,9 @@ int ib_destroy_qp(struct ib_qp *qp)
 	rcq  = qp->recv_cq;
 	srq  = qp->srq;
 	ind_tbl = qp->rwq_ind_tbl;
+	sec  = qp->qp_sec;
+	if (sec)
+		ib_destroy_qp_security_begin(sec);
 
 	if (!qp->uobject)
 		rdma_rw_cleanup_mrs(qp);
@@ -1366,6 +1386,11 @@ int ib_destroy_qp(struct ib_qp *qp)
 			atomic_dec(&srq->usecnt);
 		if (ind_tbl)
 			atomic_dec(&ind_tbl->usecnt);
+		if (sec)
+			ib_destroy_qp_security_end(sec);
+	} else {
+		if (sec)
+			ib_destroy_qp_security_abort(sec);
 	}
 
 	return ret;
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ebf7be8..0877283 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -56,6 +56,10 @@
 #define BNXT_RE_MAX_SRQC_COUNT		(64 * 1024)
 #define BNXT_RE_MAX_CQ_COUNT		(64 * 1024)
 
+#define BNXT_RE_UD_QP_HW_STALL		0x400000
+
+#define BNXT_RE_RQ_WQE_THRESHOLD	32
+
 struct bnxt_re_work {
 	struct work_struct	work;
 	unsigned long		event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 7ba9e69..c7bd683 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -61,6 +61,48 @@
 #include "ib_verbs.h"
 #include <rdma/bnxt_re-abi.h>
 
+static int __from_ib_access_flags(int iflags)
+{
+	int qflags = 0;
+
+	if (iflags & IB_ACCESS_LOCAL_WRITE)
+		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
+	if (iflags & IB_ACCESS_REMOTE_READ)
+		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
+	if (iflags & IB_ACCESS_REMOTE_WRITE)
+		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
+	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
+		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
+	if (iflags & IB_ACCESS_MW_BIND)
+		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
+	if (iflags & IB_ZERO_BASED)
+		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
+	if (iflags & IB_ACCESS_ON_DEMAND)
+		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
+	return qflags;
+};
+
+static enum ib_access_flags __to_ib_access_flags(int qflags)
+{
+	enum ib_access_flags iflags = 0;
+
+	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
+		iflags |= IB_ACCESS_LOCAL_WRITE;
+	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
+		iflags |= IB_ACCESS_REMOTE_WRITE;
+	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
+		iflags |= IB_ACCESS_REMOTE_READ;
+	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
+		iflags |= IB_ACCESS_REMOTE_ATOMIC;
+	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
+		iflags |= IB_ACCESS_MW_BIND;
+	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
+		iflags |= IB_ZERO_BASED;
+	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
+		iflags |= IB_ACCESS_ON_DEMAND;
+	return iflags;
+};
+
 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
 			     struct bnxt_qplib_sge *sg_list, int num)
 {
@@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
 	ib_attr->max_total_mcast_qp_attach = 0;
 	ib_attr->max_ah = dev_attr->max_ah;
 
-	ib_attr->max_fmr = dev_attr->max_fmr;
-	ib_attr->max_map_per_fmr = 1;	/* ? */
+	ib_attr->max_fmr = 0;
+	ib_attr->max_map_per_fmr = 0;
 
 	ib_attr->max_srq = dev_attr->max_srq;
 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
@@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
 	return IB_LINK_LAYER_ETHERNET;
 }
 
+#define	BNXT_RE_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
+
+static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
+{
+	struct bnxt_re_fence_data *fence = &pd->fence;
+	struct ib_mr *ib_mr = &fence->mr->ib_mr;
+	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+
+	memset(wqe, 0, sizeof(*wqe));
+	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
+	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
+	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+	wqe->bind.zero_based = false;
+	wqe->bind.parent_l_key = ib_mr->lkey;
+	wqe->bind.va = (u64)(unsigned long)fence->va;
+	wqe->bind.length = fence->size;
+	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
+	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
+
+	/* Save the initial rkey in fence structure for now;
+	 * wqe->bind.r_key will be set at (re)bind time.
+	 */
+	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
+}
+
+static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
+{
+	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
+					     qplib_qp);
+	struct ib_pd *ib_pd = qp->ib_qp.pd;
+	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+	struct bnxt_re_fence_data *fence = &pd->fence;
+	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
+	struct bnxt_qplib_swqe wqe;
+	int rc;
+
+	memcpy(&wqe, fence_wqe, sizeof(wqe));
+	wqe.bind.r_key = fence->bind_rkey;
+	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
+
+	dev_dbg(rdev_to_dev(qp->rdev),
+		"Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
+		wqe.bind.r_key, qp->qplib_qp.id, pd);
+	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+	if (rc) {
+		dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
+		return rc;
+	}
+	bnxt_qplib_post_send_db(&qp->qplib_qp);
+
+	return rc;
+}
+
+static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
+{
+	struct bnxt_re_fence_data *fence = &pd->fence;
+	struct bnxt_re_dev *rdev = pd->rdev;
+	struct device *dev = &rdev->en_dev->pdev->dev;
+	struct bnxt_re_mr *mr = fence->mr;
+
+	if (fence->mw) {
+		bnxt_re_dealloc_mw(fence->mw);
+		fence->mw = NULL;
+	}
+	if (mr) {
+		if (mr->ib_mr.rkey)
+			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
+					     true);
+		if (mr->ib_mr.lkey)
+			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+		kfree(mr);
+		fence->mr = NULL;
+	}
+	if (fence->dma_addr) {
+		dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
+				 DMA_BIDIRECTIONAL);
+		fence->dma_addr = 0;
+	}
+}
+
+static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
+{
+	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
+	struct bnxt_re_fence_data *fence = &pd->fence;
+	struct bnxt_re_dev *rdev = pd->rdev;
+	struct device *dev = &rdev->en_dev->pdev->dev;
+	struct bnxt_re_mr *mr = NULL;
+	dma_addr_t dma_addr = 0;
+	struct ib_mw *mw;
+	u64 pbl_tbl;
+	int rc;
+
+	dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
+				  DMA_BIDIRECTIONAL);
+	rc = dma_mapping_error(dev, dma_addr);
+	if (rc) {
+		dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
+		rc = -EIO;
+		fence->dma_addr = 0;
+		goto fail;
+	}
+	fence->dma_addr = dma_addr;
+
+	/* Allocate a MR */
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+	if (!mr) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	fence->mr = mr;
+	mr->rdev = rdev;
+	mr->qplib_mr.pd = &pd->qplib_pd;
+	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+	if (rc) {
+		dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
+		goto fail;
+	}
+
+	/* Register MR */
+	mr->ib_mr.lkey = mr->qplib_mr.lkey;
+	mr->qplib_mr.va = (u64)(unsigned long)fence->va;
+	mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
+	pbl_tbl = dma_addr;
+	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
+			       BNXT_RE_FENCE_PBL_SIZE, false);
+	if (rc) {
+		dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
+		goto fail;
+	}
+	mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
+	/* Create a fence MW only for kernel consumers */
+	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
+	if (!mw) {
+		dev_err(rdev_to_dev(rdev),
+			"Failed to create fence-MW for PD: %p\n", pd);
+		rc = -EINVAL;
+		goto fail;
+	}
+	fence->mw = mw;
+
+	bnxt_re_create_fence_wqe(pd);
+	return 0;
+
+fail:
+	bnxt_re_destroy_fence_mr(pd);
+	return rc;
+}
+
 /* Protection Domains */
 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
 {
@@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
 	struct bnxt_re_dev *rdev = pd->rdev;
 	int rc;
 
+	bnxt_re_destroy_fence_mr(pd);
 	if (ib_pd->uobject && pd->dpi.dbr) {
 		struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
 		struct bnxt_re_ucontext *ucntx;
@@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
 		}
 	}
 
+	if (!udata)
+		if (bnxt_re_create_fence_mr(pd))
+			dev_warn(rdev_to_dev(rdev),
+				 "Failed to create Fence-MR\n");
 	return &pd->ib_pd;
 dbfail:
 	(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
 	qp->qplib_qp.sq.max_sge = 2;
+	/* Q full delta can be 1 since it is internal QP */
+	qp->qplib_qp.sq.q_full_delta = 1;
 
 	qp->qplib_qp.scq = qp1_qp->scq;
 	qp->qplib_qp.rcq = qp1_qp->rcq;
 
 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
+	/* Q full delta can be 1 since it is internal QP */
+	qp->qplib_qp.rq.q_full_delta = 1;
 
 	qp->qplib_qp.mtu = qp1_qp->mtu;
 
@@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
 	qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
 				  IB_SIGNAL_ALL_WR) ? true : false);
 
-	entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
-	qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
-					dev_attr->max_qp_wqes + 1);
-
 	qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
 	if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
 		qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
@@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
 		qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
 						dev_attr->max_qp_wqes + 1);
 
+		qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+						qp_init_attr->cap.max_recv_wr;
+
 		qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
 	qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
 
 	if (qp_init_attr->qp_type == IB_QPT_GSI) {
+		/* Allocate 1 more than what's provided */
+		entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
+		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+						dev_attr->max_qp_wqes + 1);
+		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+						qp_init_attr->cap.max_send_wr;
 		qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
 		}
 
 	} else {
+		/* Allocate 128 + 1 more than what's provided */
+		entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
+					     BNXT_QPLIB_RESERVED_QP_WRS + 1);
+		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
+						dev_attr->max_qp_wqes +
+						BNXT_QPLIB_RESERVED_QP_WRS + 1);
+		qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+
+		/*
+		 * Reserving one slot for Phantom WQE. Application can
+		 * post one extra entry in this case. But allowing this to avoid
+		 * unexpected Queue full condition
+		 */
+
+		qp->qplib_qp.sq.q_full_delta -= 1;
+
 		qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
 		qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
 		if (udata) {
@@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
 
 	qp->ib_qp.qp_num = qp->qplib_qp.id;
 	spin_lock_init(&qp->sq_lock);
+	spin_lock_init(&qp->rq_lock);
 
 	if (udata) {
 		struct bnxt_re_qp_resp resp;
@@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
 	}
 }
 
-static int __from_ib_access_flags(int iflags)
-{
-	int qflags = 0;
-
-	if (iflags & IB_ACCESS_LOCAL_WRITE)
-		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
-	if (iflags & IB_ACCESS_REMOTE_READ)
-		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
-	if (iflags & IB_ACCESS_REMOTE_WRITE)
-		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
-	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
-		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
-	if (iflags & IB_ACCESS_MW_BIND)
-		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
-	if (iflags & IB_ZERO_BASED)
-		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
-	if (iflags & IB_ACCESS_ON_DEMAND)
-		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
-	return qflags;
-};
-
-static enum ib_access_flags __to_ib_access_flags(int qflags)
-{
-	enum ib_access_flags iflags = 0;
-
-	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
-		iflags |= IB_ACCESS_LOCAL_WRITE;
-	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
-		iflags |= IB_ACCESS_REMOTE_WRITE;
-	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
-		iflags |= IB_ACCESS_REMOTE_READ;
-	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
-		iflags |= IB_ACCESS_REMOTE_ATOMIC;
-	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
-		iflags |= IB_ACCESS_MW_BIND;
-	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
-		iflags |= IB_ZERO_BASED;
-	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
-		iflags |= IB_ACCESS_ON_DEMAND;
-	return iflags;
-};
-
 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
 				    struct bnxt_re_qp *qp1_qp,
 				    int qp_attr_mask)
@@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
 		entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
 						dev_attr->max_qp_wqes + 1);
+		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+						qp_attr->cap.max_send_wr;
+		/*
+		 * Reserving one slot for Phantom WQE. Some application can
+		 * post one extra entry in this case. Allowing this to avoid
+		 * unexpected Queue full condition
+		 */
+		qp->qplib_qp.sq.q_full_delta -= 1;
 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
 		if (qp->qplib_qp.rq.max_wqe) {
 			entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
 			qp->qplib_qp.rq.max_wqe =
 				min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+						       qp_attr->cap.max_recv_wr;
 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
 		} else {
 			/* SRQ was used prior, just ignore the RQ caps */
@@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
 	return payload_sz;
 }
 
+static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
+{
+	if ((qp->ib_qp.qp_type == IB_QPT_UD ||
+	     qp->ib_qp.qp_type == IB_QPT_GSI ||
+	     qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
+	     qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
+		int qp_attr_mask;
+		struct ib_qp_attr qp_attr;
+
+		qp_attr_mask = IB_QP_STATE;
+		qp_attr.qp_state = IB_QPS_RTS;
+		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
+		qp->qplib_qp.wqe_cnt = 0;
+	}
+}
+
 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
 				       struct bnxt_re_qp *qp,
 				struct ib_send_wr *wr)
@@ -1928,6 +2137,7 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
 		wr = wr->next;
 	}
 	bnxt_qplib_post_send_db(&qp->qplib_qp);
+	bnxt_ud_qp_hw_stall_workaround(qp);
 	spin_unlock_irqrestore(&qp->sq_lock, flags);
 	return rc;
 }
@@ -2024,6 +2234,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
 		wr = wr->next;
 	}
 	bnxt_qplib_post_send_db(&qp->qplib_qp);
+	bnxt_ud_qp_hw_stall_workaround(qp);
 	spin_unlock_irqrestore(&qp->sq_lock, flags);
 
 	return rc;
@@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
 	struct bnxt_qplib_swqe wqe;
 	int rc = 0, payload_sz = 0;
+	unsigned long flags;
+	u32 count = 0;
 
+	spin_lock_irqsave(&qp->rq_lock, flags);
 	while (wr) {
 		/* House keeping */
 		memset(&wqe, 0, sizeof(wqe));
@@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
 			*bad_wr = wr;
 			break;
 		}
+
+		/* Ring DB if the RQEs posted reaches a threshold value */
+		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
+			bnxt_qplib_post_recv_db(&qp->qplib_qp);
+			count = 0;
+		}
+
 		wr = wr->next;
 	}
-	bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+	if (count)
+		bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+	spin_unlock_irqrestore(&qp->rq_lock, flags);
+
 	return rc;
 }
 
@@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
 }
 
+static int send_phantom_wqe(struct bnxt_re_qp *qp)
+{
+	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&qp->sq_lock, flags);
+
+	rc = bnxt_re_bind_fence_mw(lib_qp);
+	if (!rc) {
+		lib_qp->sq.phantom_wqe_cnt++;
+		dev_dbg(&lib_qp->sq.hwq.pdev->dev,
+			"qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
+			lib_qp->id, lib_qp->sq.hwq.prod,
+			HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
+			lib_qp->sq.phantom_wqe_cnt);
+	}
+
+	spin_unlock_irqrestore(&qp->sq_lock, flags);
+	return rc;
+}
+
 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
 {
 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
 	struct bnxt_re_qp *qp;
 	struct bnxt_qplib_cqe *cqe;
 	int i, ncqe, budget;
+	struct bnxt_qplib_q *sq;
+	struct bnxt_qplib_qp *lib_qp;
 	u32 tbl_idx;
 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
 	unsigned long flags;
@@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
 	}
 	cqe = &cq->cql[0];
 	while (budget) {
-		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget);
+		lib_qp = NULL;
+		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
+		if (lib_qp) {
+			sq = &lib_qp->sq;
+			if (sq->send_phantom) {
+				qp = container_of(lib_qp,
+						  struct bnxt_re_qp, qplib_qp);
+				if (send_phantom_wqe(qp) == -ENOMEM)
+					dev_err(rdev_to_dev(cq->rdev),
+						"Phantom failed! Scheduled to send again\n");
+				else
+					sq->send_phantom = false;
+			}
+		}
+
 		if (!ncqe)
 			break;
 
@@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
 	struct bnxt_re_dev *rdev = mr->rdev;
 	int rc;
 
+	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+	if (rc) {
+		dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
+		return rc;
+	}
+
 	if (mr->npages && mr->pages) {
 		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
 							&mr->qplib_frpl);
@@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
 		mr->npages = 0;
 		mr->pages = NULL;
 	}
-	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-
 	if (!IS_ERR_OR_NULL(mr->ib_umem))
 		ib_umem_release(mr->ib_umem);
 
@@ -2914,97 +3182,52 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
 	return ERR_PTR(rc);
 }
 
-/* Fast Memory Regions */
-struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags,
-				 struct ib_fmr_attr *fmr_attr)
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+			       struct ib_udata *udata)
 {
 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
 	struct bnxt_re_dev *rdev = pd->rdev;
-	struct bnxt_re_fmr *fmr;
+	struct bnxt_re_mw *mw;
 	int rc;
 
-	if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS ||
-	    fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) {
-		dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
+	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+	if (!mw)
 		return ERR_PTR(-ENOMEM);
-	}
-	fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
-	if (!fmr)
-		return ERR_PTR(-ENOMEM);
+	mw->rdev = rdev;
+	mw->qplib_mw.pd = &pd->qplib_pd;
 
-	fmr->rdev = rdev;
-	fmr->qplib_fmr.pd = &pd->qplib_pd;
-	fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
-
-	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
-	if (rc)
+	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
+			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
+			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
+	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
+	if (rc) {
+		dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
 		goto fail;
+	}
+	mw->ib_mw.rkey = mw->qplib_mw.rkey;
 
-	fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags);
-	fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey;
-	fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
+	atomic_inc(&rdev->mw_count);
+	return &mw->ib_mw;
 
-	atomic_inc(&rdev->mr_count);
-	return &fmr->ib_fmr;
 fail:
-	kfree(fmr);
+	kfree(mw);
 	return ERR_PTR(rc);
 }
 
-int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len,
-			 u64 iova)
+int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
 {
-	struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
-					     ib_fmr);
-	struct bnxt_re_dev *rdev = fmr->rdev;
+	struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
+	struct bnxt_re_dev *rdev = mw->rdev;
 	int rc;
 
-	fmr->qplib_fmr.va = iova;
-	fmr->qplib_fmr.total_size = list_len * PAGE_SIZE;
-
-	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list,
-			       list_len, true);
-	if (rc)
-		dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
-			fmr->ib_fmr.lkey);
-	return rc;
-}
-
-int bnxt_re_unmap_fmr(struct list_head *fmr_list)
-{
-	struct bnxt_re_dev *rdev;
-	struct bnxt_re_fmr *fmr;
-	struct ib_fmr *ib_fmr;
-	int rc = 0;
-
-	/* Validate each FMRs inside the fmr_list */
-	list_for_each_entry(ib_fmr, fmr_list, list) {
-		fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
-		rdev = fmr->rdev;
-
-		if (rdev) {
-			rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
-						  &fmr->qplib_fmr, true);
-			if (rc)
-				break;
-		}
+	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
+	if (rc) {
+		dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
+		return rc;
 	}
-	return rc;
-}
 
-int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
-{
-	struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
-					       ib_fmr);
-	struct bnxt_re_dev *rdev = fmr->rdev;
-	int rc;
-
-	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
-	if (rc)
-		dev_err(rdev_to_dev(rdev), "Failed to free FMR");
-
-	kfree(fmr);
-	atomic_dec(&rdev->mr_count);
+	kfree(mw);
+	atomic_dec(&rdev->mw_count);
 	return rc;
 }
 
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 5c3d717..6c160f6 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx {
 	u32			refcnt;
 };
 
+#define BNXT_RE_FENCE_BYTES	64
+struct bnxt_re_fence_data {
+	u32 size;
+	u8 va[BNXT_RE_FENCE_BYTES];
+	dma_addr_t dma_addr;
+	struct bnxt_re_mr *mr;
+	struct ib_mw *mw;
+	struct bnxt_qplib_swqe bind_wqe;
+	u32 bind_rkey;
+};
+
 struct bnxt_re_pd {
 	struct bnxt_re_dev	*rdev;
 	struct ib_pd		ib_pd;
 	struct bnxt_qplib_pd	qplib_pd;
 	struct bnxt_qplib_dpi	dpi;
+	struct bnxt_re_fence_data fence;
 };
 
 struct bnxt_re_ah {
@@ -62,6 +74,7 @@ struct bnxt_re_qp {
 	struct bnxt_re_dev	*rdev;
 	struct ib_qp		ib_qp;
 	spinlock_t		sq_lock;	/* protect sq */
+	spinlock_t		rq_lock;	/* protect rq */
 	struct bnxt_qplib_qp	qplib_qp;
 	struct ib_umem		*sumem;
 	struct ib_umem		*rumem;
@@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
 			       u32 max_num_sg);
 int bnxt_re_dereg_mr(struct ib_mr *mr);
-struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
-				 struct ib_fmr_attr *fmr_attr);
-int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len,
-			 u64 iova);
-int bnxt_re_unmap_fmr(struct list_head *fmr_list);
-int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+			       struct ib_udata *udata);
+int bnxt_re_dealloc_mw(struct ib_mw *mw);
 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 				  u64 virt_addr, int mr_access_flags,
 				  struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 5d35540..1fce5e7 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
 	ibdev->dereg_mr			= bnxt_re_dereg_mr;
 	ibdev->alloc_mr			= bnxt_re_alloc_mr;
 	ibdev->map_mr_sg		= bnxt_re_map_mr_sg;
-	ibdev->alloc_fmr		= bnxt_re_alloc_fmr;
-	ibdev->map_phys_fmr		= bnxt_re_map_phys_fmr;
-	ibdev->unmap_fmr		= bnxt_re_unmap_fmr;
-	ibdev->dealloc_fmr		= bnxt_re_dealloc_fmr;
 
 	ibdev->reg_user_mr		= bnxt_re_reg_user_mr;
 	ibdev->alloc_ucontext		= bnxt_re_alloc_ucontext;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 43d08b5..f05500bc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_create_qp1 req;
-	struct creq_create_qp1_resp *resp;
+	struct creq_create_qp1_resp resp;
 	struct bnxt_qplib_pbl *pbl;
 	struct bnxt_qplib_q *sq = &qp->sq;
 	struct bnxt_qplib_q *rq = &qp->rq;
@@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 
 	req.pd_id = cpu_to_le32(qp->pd->id);
 
-	resp = (struct creq_create_qp1_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp) {
-		dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed");
-		rc = -EINVAL;
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, 0);
+	if (rc)
 		goto fail;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out");
-		rc = -ETIMEDOUT;
-		goto fail;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		rc = -EINVAL;
-		goto fail;
-	}
-	qp->id = le32_to_cpu(resp->xid);
+
+	qp->id = le32_to_cpu(resp.xid);
 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
 	sq->flush_in_progress = false;
 	rq->flush_in_progress = false;
@@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
 	struct cmdq_create_qp req;
-	struct creq_create_qp_resp *resp;
+	struct creq_create_qp_resp resp;
 	struct bnxt_qplib_pbl *pbl;
 	struct sq_psn_search **psn_search_ptr;
 	unsigned long int psn_search, poff = 0;
@@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 	}
 	req.pd_id = cpu_to_le32(qp->pd->id);
 
-	resp = (struct creq_create_qp_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed");
-		rc = -EINVAL;
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, 0);
+	if (rc)
 		goto fail;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out");
-		rc = -ETIMEDOUT;
-		goto fail;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		rc = -EINVAL;
-		goto fail;
-	}
-	qp->id = le32_to_cpu(resp->xid);
+
+	qp->id = le32_to_cpu(resp.xid);
 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
 	sq->flush_in_progress = false;
 	rq->flush_in_progress = false;
@@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_modify_qp req;
-	struct creq_modify_qp_resp *resp;
+	struct creq_modify_qp_resp resp;
 	u16 cmd_flags = 0, pkey;
 	u32 temp32[4];
 	u32 bmask;
+	int rc;
 
 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
 
@@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 
 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
 
-	resp = (struct creq_modify_qp_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, 0);
+	if (rc)
+		return rc;
 	qp->cur_qp_state = qp->state;
 	return 0;
 }
@@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_query_qp req;
-	struct creq_query_qp_resp *resp;
+	struct creq_query_qp_resp resp;
+	struct bnxt_qplib_rcfw_sbuf *sbuf;
 	struct creq_query_qp_resp_sb *sb;
 	u16 cmd_flags = 0;
 	u32 temp32[4];
-	int i;
+	int i, rc = 0;
 
 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
 
+	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+	if (!sbuf)
+		return -ENOMEM;
+	sb = sbuf->sb;
+
 	req.qp_cid = cpu_to_le32(qp->id);
 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
-	resp = (struct creq_query_qp_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     (void **)&sb, 0);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+					  (void *)sbuf, 0);
+	if (rc)
+		goto bail;
 	/* Extract the context from the side buffer */
 	qp->state = sb->en_sqd_async_notify_state &
 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
@@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
 	memcpy(qp->smac, sb->src_mac, 6);
 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
-	return 0;
+bail:
+	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+	return rc;
 }
 
 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
@@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_destroy_qp req;
-	struct creq_destroy_qp_resp *resp;
+	struct creq_destroy_qp_resp resp;
 	unsigned long flags;
 	u16 cmd_flags = 0;
+	int rc;
 
 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
 
 	req.qp_cid = cpu_to_le32(qp->id);
-	resp = (struct creq_destroy_qp_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, 0);
+	if (rc)
+		return rc;
 
 	/* Must walk the associated CQs to nullified the QP ptr */
 	spin_lock_irqsave(&qp->scq->hwq.lock, flags);
@@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
 		rc = -EINVAL;
 		goto done;
 	}
-	if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) ==
-	    HWQ_CMP(sq->hwq.cons, &sq->hwq)) {
+
+	if (bnxt_qplib_queue_full(sq)) {
+		dev_err(&sq->hwq.pdev->dev,
+			"QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
+			sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
+			sq->q_full_delta);
 		rc = -ENOMEM;
 		goto done;
 	}
@@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
 	}
 
 	sq->hwq.prod++;
+
+	qp->wqe_cnt++;
+
 done:
 	return rc;
 }
@@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
 		rc = -EINVAL;
 		goto done;
 	}
-	if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) ==
-	    HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
+	if (bnxt_qplib_queue_full(rq)) {
 		dev_err(&rq->hwq.pdev->dev,
 			"QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
 		rc = -EINVAL;
@@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_create_cq req;
-	struct creq_create_cq_resp *resp;
+	struct creq_create_cq_resp resp;
 	struct bnxt_qplib_pbl *pbl;
 	u16 cmd_flags = 0;
 	int rc;
@@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
 
-	resp = (struct creq_create_cq_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out");
-		rc = -ETIMEDOUT;
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, 0);
+	if (rc)
 		goto fail;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		rc = -EINVAL;
-		goto fail;
-	}
-	cq->id = le32_to_cpu(resp->xid);
+
+	cq->id = le32_to_cpu(resp.xid);
 	cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
 	init_waitqueue_head(&cq->waitq);
@@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_destroy_cq req;
-	struct creq_destroy_cq_resp *resp;
+	struct creq_destroy_cq_resp resp;
 	u16 cmd_flags = 0;
+	int rc;
 
 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
 
 	req.cq_cid = cpu_to_le32(cq->id);
-	resp = (struct creq_destroy_cq_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, 0);
+	if (rc)
+		return rc;
 	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
 	return 0;
 }
@@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
 	return rc;
 }
 
+/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
+ *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
+ */
+static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
+		     u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
+{
+	struct bnxt_qplib_q *sq = &qp->sq;
+	struct bnxt_qplib_swq *swq;
+	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
+	struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
+	struct cq_req *peek_req_hwcqe;
+	struct bnxt_qplib_qp *peek_qp;
+	struct bnxt_qplib_q *peek_sq;
+	int i, rc = 0;
+
+	/* Normal mode */
+	/* Check for the psn_search marking before completing */
+	swq = &sq->swq[sw_sq_cons];
+	if (swq->psn_search &&
+	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
+		/* Unmark */
+		swq->psn_search->flags_next_psn = cpu_to_le32
+			(le32_to_cpu(swq->psn_search->flags_next_psn)
+				     & ~0x80000000);
+		dev_dbg(&cq->hwq.pdev->dev,
+			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
+			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
+		sq->condition = true;
+		sq->send_phantom = true;
+
+		/* TODO: Only ARM if the previous SQE is ARMALL */
+		bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
+
+		rc = -EAGAIN;
+		goto out;
+	}
+	if (sq->condition) {
+		/* Peek at the completions */
+		peek_raw_cq_cons = cq->hwq.cons;
+		peek_sw_cq_cons = cq_cons;
+		i = cq->hwq.max_elements;
+		while (i--) {
+			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
+			peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
+			peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
+						     [CQE_IDX(peek_sw_cq_cons)];
+			/* If the next hwcqe is VALID */
+			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
+					  cq->hwq.max_elements)) {
+				/* If the next hwcqe is a REQ */
+				if ((peek_hwcqe->cqe_type_toggle &
+				    CQ_BASE_CQE_TYPE_MASK) ==
+				    CQ_BASE_CQE_TYPE_REQ) {
+					peek_req_hwcqe = (struct cq_req *)
+							 peek_hwcqe;
+					peek_qp = (struct bnxt_qplib_qp *)
+						((unsigned long)
+						 le64_to_cpu
+						 (peek_req_hwcqe->qp_handle));
+					peek_sq = &peek_qp->sq;
+					peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
+						peek_req_hwcqe->sq_cons_idx) - 1
+						, &sq->hwq);
+					/* If the hwcqe's sq's wr_id matches */
+					if (peek_sq == sq &&
+					    sq->swq[peek_sq_cons_idx].wr_id ==
+					    BNXT_QPLIB_FENCE_WRID) {
+						/*
+						 *  Unbreak only if the phantom
+						 *  comes back
+						 */
+						dev_dbg(&cq->hwq.pdev->dev,
+							"FP:Got Phantom CQE");
+						sq->condition = false;
+						sq->single = true;
+						rc = 0;
+						goto out;
+					}
+				}
+				/* Valid but not the phantom, so keep looping */
+			} else {
+				/* Not valid yet, just exit and wait */
+				rc = -EINVAL;
+				goto out;
+			}
+			peek_sw_cq_cons++;
+			peek_raw_cq_cons++;
+		}
+		dev_err(&cq->hwq.pdev->dev,
+			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
+			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
+		rc = -EINVAL;
+	}
+out:
+	return rc;
+}
+
 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
 				     struct cq_req *hwcqe,
-				     struct bnxt_qplib_cqe **pcqe, int *budget)
+				     struct bnxt_qplib_cqe **pcqe, int *budget,
+				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
 {
 	struct bnxt_qplib_qp *qp;
 	struct bnxt_qplib_q *sq;
 	struct bnxt_qplib_cqe *cqe;
-	u32 sw_cons, cqe_cons;
+	u32 sw_sq_cons, cqe_sq_cons;
+	struct bnxt_qplib_swq *swq;
 	int rc = 0;
 
 	qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
 	}
 	sq = &qp->sq;
 
-	cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
-	if (cqe_cons > sq->hwq.max_elements) {
+	cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
+	if (cqe_sq_cons > sq->hwq.max_elements) {
 		dev_err(&cq->hwq.pdev->dev,
 			"QPLIB: FP: CQ Process req reported ");
 		dev_err(&cq->hwq.pdev->dev,
 			"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
-			cqe_cons, sq->hwq.max_elements);
+			cqe_sq_cons, sq->hwq.max_elements);
 		return -EINVAL;
 	}
 	/* If we were in the middle of flushing the SQ, continue */
@@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
 
 	/* Require to walk the sq's swq to fabricate CQEs for all previously
 	 * signaled SWQEs due to CQE aggregation from the current sq cons
-	 * to the cqe_cons
+	 * to the cqe_sq_cons
 	 */
 	cqe = *pcqe;
 	while (*budget) {
-		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
-		if (sw_cons == cqe_cons)
+		sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
+		if (sw_sq_cons == cqe_sq_cons)
+			/* Done */
 			break;
+
+		swq = &sq->swq[sw_sq_cons];
 		memset(cqe, 0, sizeof(*cqe));
 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
 		cqe->qp_handle = (u64)(unsigned long)qp;
 		cqe->src_qp = qp->id;
-		cqe->wr_id = sq->swq[sw_cons].wr_id;
-		cqe->type = sq->swq[sw_cons].type;
+		cqe->wr_id = swq->wr_id;
+		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
+			goto skip;
+		cqe->type = swq->type;
 
 		/* For the last CQE, check for status.  For errors, regardless
 		 * of the request being signaled or not, it must complete with
 		 * the hwcqe error status
 		 */
-		if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons &&
+		if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
 		    hwcqe->status != CQ_REQ_STATUS_OK) {
 			cqe->status = hwcqe->status;
 			dev_err(&cq->hwq.pdev->dev,
 				"QPLIB: FP: CQ Processed Req ");
 			dev_err(&cq->hwq.pdev->dev,
 				"QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
-				sw_cons, cqe->wr_id, cqe->status);
+				sw_sq_cons, cqe->wr_id, cqe->status);
 			cqe++;
 			(*budget)--;
 			sq->flush_in_progress = true;
 			/* Must block new posting of SQ and RQ */
 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+			sq->condition = false;
+			sq->single = false;
 		} else {
-			if (sq->swq[sw_cons].flags &
-			    SQ_SEND_FLAGS_SIGNAL_COMP) {
+			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+				/* Before we complete, do WA 9060 */
+				if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+					      cqe_sq_cons)) {
+					*lib_qp = qp;
+					goto out;
+				}
 				cqe->status = CQ_REQ_STATUS_OK;
 				cqe++;
 				(*budget)--;
 			}
 		}
+skip:
 		sq->hwq.cons++;
+		if (sq->single)
+			break;
 	}
+out:
 	*pcqe = cqe;
-	if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) {
+	if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
 		/* Out of budget */
 		rc = -EAGAIN;
 		goto done;
 	}
+	/*
+	 * Back to normal completion mode only after it has completed all of
+	 * the WC for this CQE
+	 */
+	sq->single = false;
 	if (!sq->flush_in_progress)
 		goto done;
 flush:
@@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
 }
 
 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
-		       int num_cqes)
+		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
 {
 	struct cq_base *hw_cqe, **hw_cqe_ptr;
 	unsigned long flags;
@@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
 		case CQ_BASE_CQE_TYPE_REQ:
 			rc = bnxt_qplib_cq_process_req(cq,
 						       (struct cq_req *)hw_cqe,
-						       &cqe, &budget);
+						       &cqe, &budget,
+						       sw_cons, lib_qp);
 			break;
 		case CQ_BASE_CQE_TYPE_RES_RC:
 			rc = bnxt_qplib_cq_process_res_rc(cq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index f0150f8..36b7b7d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -88,6 +88,7 @@ struct bnxt_qplib_swq {
 
 struct bnxt_qplib_swqe {
 	/* General */
+#define	BNXT_QPLIB_FENCE_WRID	0x46454E43	/* "FENC" */
 	u64				wr_id;
 	u8				reqs_type;
 	u8				type;
@@ -216,9 +217,16 @@ struct bnxt_qplib_q {
 	struct scatterlist		*sglist;
 	u32				nmap;
 	u32				max_wqe;
+	u16				q_full_delta;
 	u16				max_sge;
 	u32				psn;
 	bool				flush_in_progress;
+	bool				condition;
+	bool				single;
+	bool				send_phantom;
+	u32				phantom_wqe_cnt;
+	u32				phantom_cqe_cnt;
+	u32				next_cq_cons;
 };
 
 struct bnxt_qplib_qp {
@@ -242,6 +250,7 @@ struct bnxt_qplib_qp {
 	u8				timeout;
 	u8				retry_cnt;
 	u8				rnr_retry;
+	u64				wqe_cnt;
 	u32				min_rnr_timer;
 	u32				max_rd_atomic;
 	u32				max_dest_rd_atomic;
@@ -301,6 +310,13 @@ struct bnxt_qplib_qp {
 	(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) ==		\
 	   !((raw_cons) & (cp_bit)))
 
+static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
+{
+	return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
+		       &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
+						 &qplib_q->hwq);
+}
+
 struct bnxt_qplib_cqe {
 	u8				status;
 	u8				type;
@@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
-		       int num);
+		       int num, struct bnxt_qplib_qp **qp);
 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 23fb726..16e4275 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -39,72 +39,55 @@
 #include <linux/spinlock.h>
 #include <linux/pci.h>
 #include <linux/prefetch.h>
+#include <linux/delay.h>
+
 #include "roce_hsi.h"
 #include "qplib_res.h"
 #include "qplib_rcfw.h"
 static void bnxt_qplib_service_creq(unsigned long data);
 
 /* Hardware communication channel */
-int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
 {
 	u16 cbit;
 	int rc;
 
-	cookie &= RCFW_MAX_COOKIE_VALUE;
 	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
-	if (!test_bit(cbit, rcfw->cmdq_bitmap))
-		dev_warn(&rcfw->pdev->dev,
-			 "QPLIB: CMD bit %d for cookie 0x%x is not set?",
-			 cbit, cookie);
-
 	rc = wait_event_timeout(rcfw->waitq,
 				!test_bit(cbit, rcfw->cmdq_bitmap),
 				msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
-	if (!rc) {
-		dev_warn(&rcfw->pdev->dev,
-			 "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
-			 RCFW_CMD_WAIT_TIME_MS, cookie);
-	}
-
-	return rc;
+	return rc ? 0 : -ETIMEDOUT;
 };
 
-int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
 {
-	u32 count = -1;
+	u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
 	u16 cbit;
 
-	cookie &= RCFW_MAX_COOKIE_VALUE;
 	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
 	if (!test_bit(cbit, rcfw->cmdq_bitmap))
 		goto done;
 	do {
+		mdelay(1); /* 1m sec */
 		bnxt_qplib_service_creq((unsigned long)rcfw);
 	} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
 done:
-	return count;
+	return count ? 0 : -ETIMEDOUT;
 };
 
-void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
-				   struct cmdq_base *req, void **crsbe,
-				   u8 is_block)
+static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
+			  struct creq_base *resp, void *sb, u8 is_block)
 {
-	struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
 	struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
-	struct bnxt_qplib_hwq *crsb = &rcfw->crsb;
-	struct bnxt_qplib_crsqe *crsqe = NULL;
-	struct bnxt_qplib_crsbe **crsb_ptr;
+	struct bnxt_qplib_crsq *crsqe;
 	u32 sw_prod, cmdq_prod;
-	u8 retry_cnt = 0xFF;
-	dma_addr_t dma_addr;
 	unsigned long flags;
 	u32 size, opcode;
 	u16 cookie, cbit;
 	int pg, idx;
 	u8 *preq;
 
-retry:
 	opcode = req->opcode;
 	if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
 	    (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
@@ -112,63 +95,50 @@ void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
 		dev_err(&rcfw->pdev->dev,
 			"QPLIB: RCFW not initialized, reject opcode 0x%x",
 			opcode);
-		return NULL;
+		return -EINVAL;
 	}
 
 	if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
 	    opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
 		dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
-		return NULL;
+		return -EINVAL;
 	}
 
 	/* Cmdq are in 16-byte units, each request can consume 1 or more
 	 * cmdqe
 	 */
 	spin_lock_irqsave(&cmdq->lock, flags);
-	if (req->cmd_size > cmdq->max_elements -
-	    ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
-	     (cmdq->max_elements - 1))) {
+	if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
 		dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
 		spin_unlock_irqrestore(&cmdq->lock, flags);
-
-		if (!retry_cnt--)
-			return NULL;
-		goto retry;
+		return -EAGAIN;
 	}
 
-	retry_cnt = 0xFF;
 
-	cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE;
+	cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
 	cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
 	if (is_block)
 		cookie |= RCFW_CMD_IS_BLOCKING;
+
+	set_bit(cbit, rcfw->cmdq_bitmap);
 	req->cookie = cpu_to_le16(cookie);
-	if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) {
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: RCFW MAX outstanding cmd reached!");
-		atomic_dec(&rcfw->seq_num);
+	crsqe = &rcfw->crsqe_tbl[cbit];
+	if (crsqe->resp) {
 		spin_unlock_irqrestore(&cmdq->lock, flags);
-
-		if (!retry_cnt--)
-			return NULL;
-		goto retry;
+		return -EBUSY;
 	}
-	/* Reserve a resp buffer slot if requested */
-	if (req->resp_size && crsbe) {
-		spin_lock(&crsb->lock);
-		sw_prod = HWQ_CMP(crsb->prod, crsb);
-		crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr;
-		*crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)]
-					  [get_crsb_idx(sw_prod)];
-		bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
-		req->resp_addr = cpu_to_le64(dma_addr);
-		crsb->prod++;
-		spin_unlock(&crsb->lock);
+	memset(resp, 0, sizeof(*resp));
+	crsqe->resp = (struct creq_qp_event *)resp;
+	crsqe->resp->cookie = req->cookie;
+	crsqe->req_size = req->cmd_size;
+	if (req->resp_size && sb) {
+		struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
 
-		req->resp_size = (sizeof(struct bnxt_qplib_crsbe) +
-				  BNXT_QPLIB_CMDQE_UNITS - 1) /
-				 BNXT_QPLIB_CMDQE_UNITS;
+		req->resp_addr = cpu_to_le64(sbuf->dma_addr);
+		req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+				  BNXT_QPLIB_CMDQE_UNITS;
 	}
+
 	cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
 	preq = (u8 *)req;
 	size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
@@ -190,23 +160,24 @@ void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
 		preq += min_t(u32, size, sizeof(*cmdqe));
 		size -= min_t(u32, size, sizeof(*cmdqe));
 		cmdq->prod++;
+		rcfw->seq_num++;
 	} while (size > 0);
 
+	rcfw->seq_num++;
+
 	cmdq_prod = cmdq->prod;
 	if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
-		/* The very first doorbell write is required to set this flag
-		 * which prompts the FW to reset its internal pointers
+		/* The very first doorbell write
+		 * is required to set this flag
+		 * which prompts the FW to reset
+		 * its internal pointers
 		 */
 		cmdq_prod |= FIRMWARE_FIRST_FLAG;
 		rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
 	}
-	sw_prod = HWQ_CMP(crsq->prod, crsq);
-	crsqe = &crsq->crsq[sw_prod];
-	memset(crsqe, 0, sizeof(*crsqe));
-	crsq->prod++;
-	crsqe->req_size = req->cmd_size;
 
 	/* ring CMDQ DB */
+	wmb();
 	writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
 	       rcfw->cmdq_bar_reg_prod_off);
 	writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
@@ -214,9 +185,56 @@ void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
 done:
 	spin_unlock_irqrestore(&cmdq->lock, flags);
 	/* Return the CREQ response pointer */
-	return crsqe ? &crsqe->qp_event : NULL;
+	return 0;
 }
 
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+				 struct cmdq_base *req,
+				 struct creq_base *resp,
+				 void *sb, u8 is_block)
+{
+	struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
+	u16 cookie;
+	u8 opcode, retry_cnt = 0xFF;
+	int rc = 0;
+
+	do {
+		opcode = req->opcode;
+		rc = __send_message(rcfw, req, resp, sb, is_block);
+		cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
+		if (!rc)
+			break;
+
+		if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
+			/* send failed */
+			dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
+				cookie, opcode);
+			return rc;
+		}
+		is_block ? mdelay(1) : usleep_range(500, 1000);
+
+	} while (retry_cnt--);
+
+	if (is_block)
+		rc = __block_for_resp(rcfw, cookie);
+	else
+		rc = __wait_for_resp(rcfw, cookie);
+	if (rc) {
+		/* timed out */
+		dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
+			cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
+		return rc;
+	}
+
+	if (evnt->status) {
+		/* failed with status */
+		dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
+			cookie, opcode, evnt->status);
+		rc = -EFAULT;
+	}
+
+	return rc;
+}
 /* Completions */
 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
 					 struct creq_func_event *func_event)
@@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
 				       struct creq_qp_event *qp_event)
 {
-	struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
-	struct bnxt_qplib_crsqe *crsqe;
-	u16 cbit, cookie, blocked = 0;
+	struct bnxt_qplib_crsq *crsqe;
 	unsigned long flags;
-	u32 sw_cons;
+	u16 cbit, blocked = 0;
+	u16 cookie;
+	__le16  mcookie;
 
 	switch (qp_event->event) {
 	case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
@@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
 	default:
 		/* Command Response */
 		spin_lock_irqsave(&cmdq->lock, flags);
-		sw_cons = HWQ_CMP(crsq->cons, crsq);
-		crsqe = &crsq->crsq[sw_cons];
-		crsq->cons++;
-		memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
-
-		cookie = le16_to_cpu(crsqe->qp_event.cookie);
+		cookie = le16_to_cpu(qp_event->cookie);
+		mcookie = qp_event->cookie;
 		blocked = cookie & RCFW_CMD_IS_BLOCKING;
 		cookie &= RCFW_MAX_COOKIE_VALUE;
 		cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
+		crsqe = &rcfw->crsqe_tbl[cbit];
+		if (crsqe->resp &&
+		    crsqe->resp->cookie  == mcookie) {
+			memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
+			crsqe->resp = NULL;
+		} else {
+			dev_err(&rcfw->pdev->dev,
+				"QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
+				crsqe->resp ? "mismatch" : "collision",
+				crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
+		}
 		if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
 			dev_warn(&rcfw->pdev->dev,
 				 "QPLIB: CMD bit %d was not requested", cbit);
-
 		cmdq->cons += crsqe->req_size;
-		spin_unlock_irqrestore(&cmdq->lock, flags);
+		crsqe->req_size = 0;
+
 		if (!blocked)
 			wake_up(&rcfw->waitq);
-		break;
+		spin_unlock_irqrestore(&cmdq->lock, flags);
 	}
 	return 0;
 }
@@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data)
 	struct creq_base *creqe, **creq_ptr;
 	u32 sw_cons, raw_cons;
 	unsigned long flags;
-	u32 type;
+	u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
 
-	/* Service the CREQ until empty */
+	/* Service the CREQ until budget is over */
 	spin_lock_irqsave(&creq->lock, flags);
 	raw_cons = creq->cons;
-	while (1) {
+	while (budget > 0) {
 		sw_cons = HWQ_CMP(raw_cons, creq);
 		creq_ptr = (struct creq_base **)creq->pbl_ptr;
 		creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
@@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
 		type = creqe->type & CREQ_BASE_TYPE_MASK;
 		switch (type) {
 		case CREQ_BASE_TYPE_QP_EVENT:
-			if (!bnxt_qplib_process_qp_event
-			    (rcfw, (struct creq_qp_event *)creqe))
-				rcfw->creq_qp_event_processed++;
-			else {
-				dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
-				dev_warn(&rcfw->pdev->dev,
-					 "QPLIB: type = 0x%x not handled",
-					 type);
-			}
+			bnxt_qplib_process_qp_event
+				(rcfw, (struct creq_qp_event *)creqe);
+			rcfw->creq_qp_event_processed++;
 			break;
 		case CREQ_BASE_TYPE_FUNC_EVENT:
 			if (!bnxt_qplib_process_func_event
@@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
 			break;
 		}
 		raw_cons++;
+		budget--;
 	}
+
 	if (creq->cons != raw_cons) {
 		creq->cons = raw_cons;
 		CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
@@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
 /* RCFW */
 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
 {
-	struct creq_deinitialize_fw_resp *resp;
 	struct cmdq_deinitialize_fw req;
+	struct creq_deinitialize_fw_resp resp;
 	u16 cmd_flags = 0;
+	int rc;
 
 	RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
-	resp = (struct creq_deinitialize_fw_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp)
-		return -EINVAL;
-
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
-		return -ETIMEDOUT;
-
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
-		return -EFAULT;
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+					  NULL, 0);
+	if (rc)
+		return rc;
 
 	clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
 	return 0;
@@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
 			 struct bnxt_qplib_ctx *ctx, int is_virtfn)
 {
-	struct creq_initialize_fw_resp *resp;
 	struct cmdq_initialize_fw req;
+	struct creq_initialize_fw_resp resp;
 	u16 cmd_flags = 0, level;
+	int rc;
 
 	RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
 
@@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
 
 skip_ctx_setup:
 	req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
-	resp = (struct creq_initialize_fw_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: RCFW: INITIALIZE_FW send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: RCFW: INITIALIZE_FW timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: RCFW: INITIALIZE_FW failed");
-		return -EINVAL;
-	}
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+					  NULL, 0);
+	if (rc)
+		return rc;
 	set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
 	return 0;
 }
 
 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
 {
-	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb);
-	kfree(rcfw->crsq.crsq);
+	kfree(rcfw->crsqe_tbl);
 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
-
 	rcfw->pdev = NULL;
 }
 
@@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
 		goto fail;
 	}
 
-	rcfw->crsq.max_elements = rcfw->cmdq.max_elements;
-	rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements,
-				  sizeof(*rcfw->crsq.crsq), GFP_KERNEL);
-	if (!rcfw->crsq.crsq)
+	rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
+				  sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
+	if (!rcfw->crsqe_tbl)
 		goto fail;
 
-	rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
-	if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
-				      &rcfw->crsb.max_elements,
-				      BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
-				      HWQ_TYPE_CTX)) {
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: HW channel CRSB allocation failed");
-		goto fail;
-	}
 	return 0;
 
 fail:
@@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 	int rc;
 
 	/* General */
-	atomic_set(&rcfw->seq_num, 0);
+	rcfw->seq_num = 0;
 	rcfw->flags = FIRMWARE_FIRST_FLAG;
 	bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
 				  sizeof(unsigned long));
@@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 
 	rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
 
-	/* CRSQ */
-	rcfw->crsq.prod = 0;
-	rcfw->crsq.cons = 0;
-
 	/* CREQ */
 	rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
 	res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
@@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 	__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
 	return 0;
 }
+
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+		struct bnxt_qplib_rcfw *rcfw,
+		u32 size)
+{
+	struct bnxt_qplib_rcfw_sbuf *sbuf;
+
+	sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
+	if (!sbuf)
+		return NULL;
+
+	sbuf->size = size;
+	sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
+				       &sbuf->dma_addr, GFP_ATOMIC);
+	if (!sbuf->sb)
+		goto bail;
+
+	return sbuf;
+bail:
+	kfree(sbuf);
+	return NULL;
+}
+
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+			       struct bnxt_qplib_rcfw_sbuf *sbuf)
+{
+	if (sbuf->sb)
+		dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
+				  sbuf->sb, sbuf->dma_addr);
+	kfree(sbuf);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index d3567d7..09ce121 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -73,6 +73,7 @@
 #define RCFW_MAX_OUTSTANDING_CMD	BNXT_QPLIB_CMDQE_MAX_CNT
 #define RCFW_MAX_COOKIE_VALUE		0x7FFF
 #define RCFW_CMD_IS_BLOCKING		0x8000
+#define RCFW_BLOCKED_CMD_WAIT_COUNT	0x4E20
 
 /* Cmdq contains a fix number of a 16-Byte slots */
 struct bnxt_qplib_cmdqe {
@@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe {
 	u8			data[1024];
 };
 
-/* CRSQ SB */
-#define BNXT_QPLIB_CRSBE_MAX_CNT	4
-#define BNXT_QPLIB_CRSBE_UNITS		sizeof(struct bnxt_qplib_crsbe)
-#define BNXT_QPLIB_CRSBE_CNT_PER_PG	(PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
-
-#define MAX_CRSB_IDX			(BNXT_QPLIB_CRSBE_MAX_CNT - 1)
-#define MAX_CRSB_IDX_PER_PG		(BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
-
-static inline u32 get_crsb_pg(u32 val)
-{
-	return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
-}
-
-static inline u32 get_crsb_idx(u32 val)
-{
-	return val & MAX_CRSB_IDX_PER_PG;
-}
-
-static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
-					    u32 prod, dma_addr_t *dma_addr)
-{
-		*dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
-		*dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
-			      BNXT_QPLIB_CRSBE_UNITS;
-}
-
 /* CREQ */
 /* Allocate 1 per QP for async error notification for now */
 #define BNXT_QPLIB_CREQE_MAX_CNT	(64 * 1024)
@@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val)
 #define CREQ_DB(db, raw_cons, cp_bit)				\
 	writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
 
+#define CREQ_ENTRY_POLL_BUDGET		0x100
+
 /* HWQ */
-struct bnxt_qplib_crsqe {
-	struct creq_qp_event	qp_event;
+
+struct bnxt_qplib_crsq {
+	struct creq_qp_event	*resp;
 	u32			req_size;
 };
 
-struct bnxt_qplib_crsq {
-	struct bnxt_qplib_crsqe	*crsq;
-	u32			prod;
-	u32			cons;
-	u32			max_elements;
+struct bnxt_qplib_rcfw_sbuf {
+	void *sb;
+	dma_addr_t dma_addr;
+	u32 size;
 };
 
 /* RCFW Communication Channels */
@@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw {
 	wait_queue_head_t	waitq;
 	int			(*aeq_handler)(struct bnxt_qplib_rcfw *,
 					       struct creq_func_event *);
-	atomic_t		seq_num;
+	u32			seq_num;
 
 	/* Bar region info */
 	void __iomem		*cmdq_bar_reg_iomem;
@@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw {
 
 	/* Actual Cmd and Resp Queues */
 	struct bnxt_qplib_hwq	cmdq;
-	struct bnxt_qplib_crsq	crsq;
-	struct bnxt_qplib_hwq	crsb;
+	struct bnxt_qplib_crsq	*crsqe_tbl;
 };
 
 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
@@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
 					(struct bnxt_qplib_rcfw *,
 					 struct creq_func_event *));
 
-int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
-int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie);
-void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
-				   struct cmdq_base *req, void **crsbe,
-				   u8 is_block);
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+				struct bnxt_qplib_rcfw *rcfw,
+				u32 size);
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+			       struct bnxt_qplib_rcfw_sbuf *sbuf);
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+				 struct cmdq_base *req, struct creq_base *resp,
+				 void *sbuf, u8 is_block);
 
 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 6277d80..2e48555 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
 
 #define HWQ_CMP(idx, hwq)	((idx) & ((hwq)->max_elements - 1))
 
+#define HWQ_FREE_SLOTS(hwq)	(hwq->max_elements - \
+				((HWQ_CMP(hwq->prod, hwq)\
+				- HWQ_CMP(hwq->cons, hwq))\
+				& (hwq->max_elements - 1)))
 enum bnxt_qplib_hwq_type {
 	HWQ_TYPE_CTX,
 	HWQ_TYPE_QUEUE,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 7b31ecc..fde18cf 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
 			    struct bnxt_qplib_dev_attr *attr)
 {
 	struct cmdq_query_func req;
-	struct creq_query_func_resp *resp;
+	struct creq_query_func_resp resp;
+	struct bnxt_qplib_rcfw_sbuf *sbuf;
 	struct creq_query_func_resp_sb *sb;
 	u16 cmd_flags = 0;
 	u32 temp;
 	u8 *tqm_alloc;
-	int i;
+	int i, rc = 0;
 
 	RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags);
 
-	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
-	resp = (struct creq_query_func_resp *)
-		bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb,
-					     0);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed ");
+	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+	if (!sbuf) {
 		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
+			"QPLIB: SP: QUERY_FUNC alloc side buffer failed");
+		return -ENOMEM;
 	}
+
+	sb = sbuf->sb;
+	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+					  (void *)sbuf, 0);
+	if (rc)
+		goto bail;
+
 	/* Extract the context from the side buffer */
 	attr->max_qp = le32_to_cpu(sb->max_qp);
 	attr->max_qp_rd_atom =
@@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
 		sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
 		BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
 	attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
+	/*
+	 * 128 WQEs needs to be reserved for the HW (8916). Prevent
+	 * reporting the max number
+	 */
+	attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
 	attr->max_qp_sges = sb->max_sge;
 	attr->max_cq = le32_to_cpu(sb->max_cq);
 	attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
@@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
 		attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
 		attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
 	}
-	return 0;
+
+bail:
+	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+	return rc;
 }
 
 /* SGID */
@@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 	/* Remove GID from the SGID table */
 	if (update) {
 		struct cmdq_delete_gid req;
-		struct creq_delete_gid_resp *resp;
+		struct creq_delete_gid_resp resp;
 		u16 cmd_flags = 0;
+		int rc;
 
 		RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
 		if (sgid_tbl->hw_id[index] == 0xFFFF) {
@@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 			return -EINVAL;
 		}
 		req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
-		resp = (struct creq_delete_gid_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL,
-						     0);
-		if (!resp) {
-			dev_err(&res->pdev->dev,
-				"QPLIB: SP: DELETE_GID send failed");
-			return -EINVAL;
-		}
-		if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
-						   le16_to_cpu(req.cookie))) {
-			/* Cmd timed out */
-			dev_err(&res->pdev->dev,
-				"QPLIB: SP: DELETE_GID timed out");
-			return -ETIMEDOUT;
-		}
-		if (resp->status ||
-		    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-			dev_err(&res->pdev->dev,
-				"QPLIB: SP: DELETE_GID failed ");
-			dev_err(&res->pdev->dev,
-				"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-				resp->status, le16_to_cpu(req.cookie),
-				le16_to_cpu(resp->cookie));
-			return -EINVAL;
-		}
+		rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+						  (void *)&resp, NULL, 0);
+		if (rc)
+			return rc;
 	}
 	memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
 	       sizeof(bnxt_qplib_gid_zero));
@@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 						   struct bnxt_qplib_res,
 						   sgid_tbl);
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
-	int i, free_idx, rc = 0;
+	int i, free_idx;
 
 	if (!sgid_tbl) {
 		dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
@@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 	}
 	if (update) {
 		struct cmdq_add_gid req;
-		struct creq_add_gid_resp *resp;
+		struct creq_add_gid_resp resp;
 		u16 cmd_flags = 0;
 		u32 temp32[4];
 		u16 temp16[3];
+		int rc;
 
 		RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
 
@@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 		req.src_mac[1] = cpu_to_be16(temp16[1]);
 		req.src_mac[2] = cpu_to_be16(temp16[2]);
 
-		resp = (struct creq_add_gid_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-		if (!resp) {
-			dev_err(&res->pdev->dev,
-				"QPLIB: SP: ADD_GID send failed");
-			return -EINVAL;
-		}
-		if (!bnxt_qplib_rcfw_wait_for_resp(rcfw,
-						   le16_to_cpu(req.cookie))) {
-			/* Cmd timed out */
-			dev_err(&res->pdev->dev,
-				"QPIB: SP: ADD_GID timed out");
-			return -ETIMEDOUT;
-		}
-		if (resp->status ||
-		    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-			dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed ");
-			dev_err(&res->pdev->dev,
-				"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-				resp->status, le16_to_cpu(req.cookie),
-				le16_to_cpu(resp->cookie));
-			return -EINVAL;
-		}
-		sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid);
+		rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+						  (void *)&resp, NULL, 0);
+		if (rc)
+			return rc;
+		sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
 	}
 	/* Add GID to the sgid_tbl */
 	memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
@@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
 
 	*index = free_idx;
 	/* unlock */
-	return rc;
+	return 0;
 }
 
 /* pkeys */
@@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_create_ah req;
-	struct creq_create_ah_resp *resp;
+	struct creq_create_ah_resp resp;
 	u16 cmd_flags = 0;
 	u32 temp32[4];
 	u16 temp16[3];
+	int rc;
 
 	RCFW_CMD_PREP(req, CREATE_AH, cmd_flags);
 
@@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
 	req.dest_mac[1] = cpu_to_le16(temp16[1]);
 	req.dest_mac[2] = cpu_to_le16(temp16[2]);
 
-	resp = (struct creq_create_ah_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 1);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
-	ah->id = le32_to_cpu(resp->xid);
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+					  NULL, 1);
+	if (rc)
+		return rc;
+
+	ah->id = le32_to_cpu(resp.xid);
 	return 0;
 }
 
@@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_destroy_ah req;
-	struct creq_destroy_ah_resp *resp;
+	struct creq_destroy_ah_resp resp;
 	u16 cmd_flags = 0;
+	int rc;
 
 	/* Clean up the AH table in the device */
 	RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags);
 
 	req.ah_cid = cpu_to_le32(ah->id);
 
-	resp = (struct creq_destroy_ah_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 1);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+					  NULL, 1);
+	if (rc)
+		return rc;
 	return 0;
 }
 
@@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_deallocate_key req;
-	struct creq_deallocate_key_resp *resp;
+	struct creq_deallocate_key_resp resp;
 	u16 cmd_flags = 0;
+	int rc;
 
 	if (mrw->lkey == 0xFFFFFFFF) {
 		dev_info(&res->pdev->dev,
@@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
 	else
 		req.key = cpu_to_le32(mrw->lkey);
 
-	resp = (struct creq_deallocate_key_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp) {
-		dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed ");
-		dev_err(&res->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+					  NULL, 0);
+	if (rc)
+		return rc;
+
 	/* Free the qplib's MRW memory */
 	if (mrw->hwq.max_elements)
 		bnxt_qplib_free_hwq(res->pdev, &mrw->hwq);
@@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_allocate_mrw req;
-	struct creq_allocate_mrw_resp *resp;
+	struct creq_allocate_mrw_resp resp;
 	u16 cmd_flags = 0;
 	unsigned long tmp;
+	int rc;
 
 	RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags);
 
@@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
 	tmp = (unsigned long)mrw;
 	req.mrw_handle = cpu_to_le64(tmp);
 
-	resp = (struct creq_allocate_mrw_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, 0);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed");
-		return -EINVAL;
-	}
-	if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
-		/* Cmd timed out */
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, 0);
+	if (rc)
+		return rc;
+
 	if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1)  ||
 	    (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
 	    (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
-		mrw->rkey = le32_to_cpu(resp->xid);
+		mrw->rkey = le32_to_cpu(resp.xid);
 	else
-		mrw->lkey = le32_to_cpu(resp->xid);
+		mrw->lkey = le32_to_cpu(resp.xid);
 	return 0;
 }
 
@@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_deregister_mr req;
-	struct creq_deregister_mr_resp *resp;
+	struct creq_deregister_mr_resp resp;
 	u16 cmd_flags = 0;
 	int rc;
 
 	RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags);
 
 	req.lkey = cpu_to_le32(mrw->lkey);
-	resp = (struct creq_deregister_mr_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, block);
-	if (!resp) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed");
-		return -EINVAL;
-	}
-	if (block)
-		rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
-						    le16_to_cpu(req.cookie));
-	else
-		rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
-						   le16_to_cpu(req.cookie));
-	if (!rc) {
-		/* Cmd timed out */
-		dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out");
-		return -ETIMEDOUT;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed ");
-		dev_err(&rcfw->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, block);
+	if (rc)
+		return rc;
 
 	/* Free the qplib's MR memory */
 	if (mrw->hwq.max_elements) {
@@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_register_mr req;
-	struct creq_register_mr_resp *resp;
+	struct creq_register_mr_resp resp;
 	u16 cmd_flags = 0, level;
 	int pg_ptrs, pages, i, rc;
 	dma_addr_t **pbl_ptr;
@@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
 	req.key = cpu_to_le32(mr->lkey);
 	req.mr_size = cpu_to_le64(mr->total_size);
 
-	resp = (struct creq_register_mr_resp *)
-			bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-						     NULL, block);
-	if (!resp) {
-		dev_err(&res->pdev->dev, "SP: REG_MR send failed");
-		rc = -EINVAL;
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, block);
+	if (rc)
 		goto fail;
-	}
-	if (block)
-		rc = bnxt_qplib_rcfw_block_for_resp(rcfw,
-						    le16_to_cpu(req.cookie));
-	else
-		rc = bnxt_qplib_rcfw_wait_for_resp(rcfw,
-						   le16_to_cpu(req.cookie));
-	if (!rc) {
-		/* Cmd timed out */
-		dev_err(&res->pdev->dev, "SP: REG_MR timed out");
-		rc = -ETIMEDOUT;
-		goto fail;
-	}
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed ");
-		dev_err(&res->pdev->dev,
-			"QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		rc = -EINVAL;
-		goto fail;
-	}
+
 	return 0;
 
 fail:
@@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
 {
 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 	struct cmdq_map_tc_to_cos req;
-	struct creq_map_tc_to_cos_resp *resp;
+	struct creq_map_tc_to_cos_resp resp;
 	u16 cmd_flags = 0;
-	int tleft;
+	int rc = 0;
 
 	RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
 	req.cos0 = cpu_to_le16(cids[0]);
 	req.cos1 = cpu_to_le16(cids[1]);
 
-	resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0);
-	if (!resp) {
-		dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed");
-		return -EINVAL;
-	}
-
-	tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie));
-	if (!tleft) {
-		dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out");
-		return -ETIMEDOUT;
-	}
-
-	if (resp->status ||
-	    le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
-		dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed ");
-		dev_err(&res->pdev->dev,
-			"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
-			resp->status, le16_to_cpu(req.cookie),
-			le16_to_cpu(resp->cookie));
-		return -EINVAL;
-	}
-
+	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+					  (void *)&resp, NULL, 0);
 	return 0;
 }
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 1442a61..a543f95 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -40,6 +40,8 @@
 #ifndef __BNXT_QPLIB_SP_H__
 #define __BNXT_QPLIB_SP_H__
 
+#define BNXT_QPLIB_RESERVED_QP_WRS	128
+
 struct bnxt_qplib_dev_attr {
 	char				fw_ver[32];
 	u16				max_sgid;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b6fe459..0910faf 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
 
 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
 	release_ep_resources(ep);
+	kfree_skb(skb);
 	return 0;
 }
 
@@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
 	c4iw_put_ep(&ep->parent_ep->com);
 	release_ep_resources(ep);
+	kfree_skb(skb);
 	return 0;
 }
 
@@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
 
 	pr_debug("%s rdev %p\n", __func__, rdev);
 	req->cmd = CPL_ABORT_NO_RST;
+	skb_get(skb);
 	ret = c4iw_ofld_send(rdev, skb);
 	if (ret) {
 		__state_set(&ep->com, DEAD);
 		queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
-	}
+	} else
+		kfree_skb(skb);
 }
 
 static int send_flowc(struct c4iw_ep *ep)
@@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 		goto reject;
 	}
 
-	hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
+	hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+	       sizeof(struct tcphdr) +
 	       ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
 	if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
 		child_ep->mtu = peer_mss + hdrs;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 329fb65e..ae0b79ae 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
 		kfree(entry);
 	}
 
-	list_for_each_safe(pos, nxt, &uctx->qpids) {
+	list_for_each_safe(pos, nxt, &uctx->cqids) {
 		entry = list_entry(pos, struct c4iw_qid_list, entry);
 		list_del_init(&entry->entry);
 		kfree(entry);
@@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 	rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
 	if (!rdev->free_workq) {
 		err = -ENOMEM;
-		goto err_free_status_page;
+		goto err_free_status_page_and_wr_log;
 	}
 
 	rdev->status_page->db_off = 0;
 
 	return 0;
-err_free_status_page:
+err_free_status_page_and_wr_log:
+	if (c4iw_wr_log && rdev->wr_log)
+		kfree(rdev->wr_log);
 	free_page((unsigned long)rdev->status_page);
 destroy_ocqp_pool:
 	c4iw_ocqp_pool_destroy(rdev);
@@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
 	destroy_workqueue(rdev->free_workq);
 	kfree(rdev->wr_log);
+	c4iw_release_dev_ucontext(rdev, &rdev->uctx);
 	free_page((unsigned long)rdev->status_page);
 	c4iw_pblpool_destroy(rdev);
 	c4iw_rqtpool_destroy(rdev);
+	c4iw_ocqp_pool_destroy(rdev);
 	c4iw_destroy_resource(&rdev->resource);
 }
 
@@ -971,7 +975,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
 		 devp->rdev.lldi.sge_egrstatuspagesize);
 
 	devp->rdev.hw_queue.t4_eq_status_entries =
-		devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
+		devp->rdev.lldi.sge_egrstatuspagesize / 64;
 	devp->rdev.hw_queue.t4_max_eq_size = 65520;
 	devp->rdev.hw_queue.t4_max_iq_size = 65520;
 	devp->rdev.hw_queue.t4_max_rq_size = 8192 -
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 5d6b1ee..2ba00b8 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd)
 	}
 }
 
-static void write_global_credit(struct hfi1_devdata *dd,
-				u8 vau, u16 total, u16 shared)
+/*
+ * Set up allocation unit vaulue.
+ */
+void set_up_vau(struct hfi1_devdata *dd, u8 vau)
 {
-	write_csr(dd, SEND_CM_GLOBAL_CREDIT,
-		  ((u64)total <<
-		   SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
-		  ((u64)shared <<
-		   SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
-		  ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
+	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+	/* do not modify other values in the register */
+	reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
+	reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
+	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
 }
 
 /*
  * Set up initial VL15 credits of the remote.  Assumes the rest of
- * the CM credit registers are zero from a previous global or credit reset .
+ * the CM credit registers are zero from a previous global or credit reset.
+ * Shared limit for VL15 will always be 0.
  */
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
 {
-	/* leave shared count at zero for both global and VL15 */
-	write_global_credit(dd, vau, vl15buf, 0);
+	u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
+
+	/* set initial values for total and shared credit limit */
+	reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
+		 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
+
+	/*
+	 * Set total limit to be equal to VL15 credits.
+	 * Leave shared limit at 0.
+	 */
+	reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
+	write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
 
 	write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
 		  << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
@@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd)
 	for (i = 0; i < TXE_NUM_DATA_VL; i++)
 		write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
 	write_csr(dd, SEND_CM_CREDIT_VL15, 0);
-	write_global_credit(dd, 0, 0, 0);
+	write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
 	/* reset the CM block */
 	pio_send_control(dd, PSC_CM_RESET);
+	/* reset cached value */
+	dd->vl15buf_cached = 0;
 }
 
 /* convert a vCU to a CU */
@@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work)
 {
 	struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
 						  link_up_work);
+	struct hfi1_devdata *dd = ppd->dd;
+
 	set_link_state(ppd, HLS_UP_INIT);
 
 	/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
-	read_ltp_rtt(ppd->dd);
+	read_ltp_rtt(dd);
 	/*
 	 * OPA specifies that certain counters are cleared on a transition
 	 * to link up, so do that.
 	 */
-	clear_linkup_counters(ppd->dd);
+	clear_linkup_counters(dd);
 	/*
 	 * And (re)set link up default values.
 	 */
 	set_linkup_defaults(ppd);
 
+	/*
+	 * Set VL15 credits. Use cached value from verify cap interrupt.
+	 * In case of quick linkup or simulator, vl15 value will be set by
+	 * handle_linkup_change. VerifyCap interrupt handler will not be
+	 * called in those scenarios.
+	 */
+	if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
+		set_up_vl15(dd, dd->vl15buf_cached);
+
 	/* enforce link speed enabled */
 	if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
 		/* oops - current speed is not enabled, bounce */
-		dd_dev_err(ppd->dd,
+		dd_dev_err(dd,
 			   "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
 			   ppd->link_speed_active, ppd->link_speed_enabled);
 		set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
@@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work)
 	 */
 	if (vau == 0)
 		vau = 1;
-	set_up_vl15(dd, vau, vl15buf);
+	set_up_vau(dd, vau);
+
+	/*
+	 * Set VL15 credits to 0 in global credit register. Cache remote VL15
+	 * credits value and wait for link-up interrupt ot set it.
+	 */
+	set_up_vl15(dd, 0);
+	dd->vl15buf_cached = vl15buf;
 
 	/* set up the LCB CRC mode */
 	crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 5bfa839..793514f 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -839,7 +839,9 @@
 #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
 #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
 #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
+#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull
 #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
+#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull
 #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index da322e6..414a04a 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1045,6 +1045,14 @@ struct hfi1_devdata {
 	/* initial vl15 credits to use */
 	u16 vl15_init;
 
+	/*
+	 * Cached value for vl15buf, read during verify cap interrupt. VL15
+	 * credits are to be kept at 0 and set when handling the link-up
+	 * interrupt. This removes the possibility of receiving VL15 MAD
+	 * packets before this HFI is ready.
+	 */
+	u16 vl15buf_cached;
+
 	/* Misc small ints */
 	u8 n_krcv_queues;
 	u8 qos_shift;
@@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
 
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
+void set_up_vau(struct hfi1_devdata *dd, u8 vau);
+void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
 void reset_link_credits(struct hfi1_devdata *dd);
 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
 
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index ba265d0..04a5082d 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
 		 * the remote values.  Both sides must be using the values.
 		 */
 		if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
-			set_up_vl15(dd, dd->vau, dd->vl15_init);
+			set_up_vau(dd, dd->vau);
+			set_up_vl15(dd, dd->vl15_init);
 			assign_remote_cm_au_table(dd, dd->vcu);
 		}
 
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 93faf86..6a9f6f9 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
 	/*
 	 * Save BARs and command to rewrite after device reset.
 	 */
-	dd->pcibar0 = addr;
-	dd->pcibar1 = addr >> 32;
+	pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0);
+	pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1);
 	pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
 	pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
 	pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 069bdaf..1080778 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2159,8 +2159,11 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
 		ret = hfi1_rvt_get_rwqe(qp, 1);
 		if (ret < 0)
 			goto nack_op_err;
-		if (!ret)
+		if (!ret) {
+			/* peer will send again */
+			rvt_put_ss(&qp->r_sge);
 			goto rnr_nak;
+		}
 		wc.ex.imm_data = ohdr->u.rc.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
 		goto send_last;
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 50d140d..2f3bbca 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = {
 };
 
 static struct attribute *port_cc_default_attributes[] = {
-	&cc_prescan_attr.attr
+	&cc_prescan_attr.attr,
+	NULL
 };
 
 static struct kobj_type port_cc_ktype = {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index f3bc01b..6ae98aa 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
 	}
 
 	ctrl_ird |= IETF_PEER_TO_PEER;
-	ctrl_ird |= IETF_FLPDU_ZERO_LEN;
 
 	switch (mpa_key) {
 	case MPA_KEY_REQUEST:
@@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
 		} else {
 			type = I40IW_CM_EVENT_CONNECTED;
 			cm_node->state = I40IW_CM_STATE_OFFLOADED;
-			i40iw_send_ack(cm_node);
 		}
+		i40iw_send_ack(cm_node);
 		break;
 	default:
 		pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index f82483b..a027e20 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
 	struct i40iw_sc_dev *dev = vsi->dev;
 	struct i40iw_sc_qp *qp = NULL;
 	bool qs_handle_change = false;
-	bool mss_change = false;
 	unsigned long flags;
 	u16 qs_handle;
 	int i;
 
-	if (vsi->mss != l2params->mss) {
-		mss_change = true;
-		vsi->mss = l2params->mss;
-	}
+	vsi->mss = l2params->mss;
 
 	i40iw_fill_qos_list(l2params->qs_handle_list);
 	for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
 		qs_handle = l2params->qs_handle_list[i];
 		if (vsi->qos[i].qs_handle != qs_handle)
 			qs_handle_change = true;
-		else if (!mss_change)
-			continue;       /* no MSS nor qs handle change */
 		spin_lock_irqsave(&vsi->qos[i].lock, flags);
 		qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
 		while (qp) {
-			if (mss_change)
-				i40iw_qp_mss_modify(dev, qp);
 			if (qs_handle_change) {
 				qp->qs_handle = qs_handle;
 				/* issue cqp suspend command */
@@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
 
 	set_64bit_val(wqe,
 		      8,
-		      LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
 		      LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
 
 	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
@@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
 		 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
 		 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
 		 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
-		 LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
 		 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
 		 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
 		 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 2728af3..e0f47cc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
 	status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
 				       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
 	if (status)
-		goto exit;
+		goto error;
 	info.fpm_query_buf_pa = mem.pa;
 	info.fpm_query_buf = mem.va;
 	status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
 				       I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
 	if (status)
-		goto exit;
+		goto error;
 	info.fpm_commit_buf_pa = mem.pa;
 	info.fpm_commit_buf = mem.va;
 	info.hmc_fn_id = ldev->fid;
@@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
 	info.exception_lan_queue = 1;
 	info.vchnl_send = i40iw_virtchnl_send;
 	status = i40iw_device_init(&iwdev->sc_dev, &info);
-exit:
-	if (status) {
-		kfree(iwdev->hmc_info_mem);
-		iwdev->hmc_info_mem = NULL;
-	}
+
+	if (status)
+		goto error;
 	memset(&vsi_info, 0, sizeof(vsi_info));
 	vsi_info.dev = &iwdev->sc_dev;
 	vsi_info.back_vsi = (void *)iwdev;
@@ -1362,11 +1360,19 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
 		memset(&stats_info, 0, sizeof(stats_info));
 		stats_info.fcn_id = ldev->fid;
 		stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+		if (!stats_info.pestat) {
+			status = I40IW_ERR_NO_MEMORY;
+			goto error;
+		}
 		stats_info.stats_initialize = true;
 		if (stats_info.pestat)
 			i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
 	}
 	return status;
+error:
+	kfree(iwdev->hmc_info_mem);
+	iwdev->hmc_info_mem = NULL;
+	return status;
 }
 
 /**
@@ -1933,7 +1939,7 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev,
 bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
 {
 	struct i40iw_device *iwdev;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	iwdev = dev->back_dev;
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index aa66c1c..f27be3e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
 			    struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
 void *i40iw_remove_head(struct list_head *list);
 void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
 
 void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
 void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 7b76259..959ec81 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -541,7 +541,6 @@ struct i40iw_create_qp_info {
 struct i40iw_modify_qp_info {
 	u64 rx_win0;
 	u64 rx_win1;
-	u16 new_mss;
 	u8 next_iwarp_state;
 	u8 termlen;
 	bool ord_valid;
@@ -554,7 +553,6 @@ struct i40iw_modify_qp_info {
 	bool dont_send_term;
 	bool dont_send_fin;
 	bool cached_var_valid;
-	bool mss_change;
 	bool force_loopback;
 };
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 409a378..56d9869 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -757,23 +757,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b
 }
 
 /**
- * i40iw_qp_mss_modify - modify mss for qp
- * @dev: hardware control device structure
- * @qp: hardware control qp
- */
-void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
-{
-	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
-	struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
-	struct i40iw_modify_qp_info info;
-
-	memset(&info, 0, sizeof(info));
-	info.mss_change = true;
-	info.new_mss = qp->vsi->mss;
-	i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
-}
-
-/**
  * i40iw_term_modify_qp - modify qp for term message
  * @qp: hardware control qp
  * @next_state: qp's next state
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index f4d1368..48fd327 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
 	if (!dev->vchnl_up)
 		return I40IW_ERR_NOT_READY;
 	if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
-		if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
-			vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
-		else
-			vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+		vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
 		return I40IW_SUCCESS;
 	}
 	for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index b469471..21d31cb 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
 	if (port < 0)
 		return;
 	ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+	ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
 
 	mlx4_ib_query_ah(&ah.ibah, &ah_attr);
 	if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d45772d..9ecc089 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2979,6 +2979,18 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
 	return ret;
 }
 
+static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
+{
+	switch (umr_fence_cap) {
+	case MLX5_CAP_UMR_FENCE_NONE:
+		return MLX5_FENCE_MODE_NONE;
+	case MLX5_CAP_UMR_FENCE_SMALL:
+		return MLX5_FENCE_MODE_INITIATOR_SMALL;
+	default:
+		return MLX5_FENCE_MODE_STRONG_ORDERING;
+	}
+}
+
 static int create_dev_resources(struct mlx5_ib_resources *devr)
 {
 	struct ib_srq_init_attr attr;
@@ -3680,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 	dev->ib_dev.check_mr_status	= mlx5_ib_check_mr_status;
 	dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
 	dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
-	dev->ib_dev.alloc_rdma_netdev	= mlx5_ib_alloc_rdma_netdev;
-	dev->ib_dev.free_rdma_netdev	= mlx5_ib_free_rdma_netdev;
+	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
+		dev->ib_dev.alloc_rdma_netdev	= mlx5_ib_alloc_rdma_netdev;
+		dev->ib_dev.free_rdma_netdev	= mlx5_ib_free_rdma_netdev;
+	}
 	if (mlx5_core_is_pf(mdev)) {
 		dev->ib_dev.get_vf_config	= mlx5_ib_get_vf_config;
 		dev->ib_dev.set_vf_link_state	= mlx5_ib_set_vf_link_state;
@@ -3693,6 +3707,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 
 	mlx5_ib_internal_fill_odp_caps(dev);
 
+	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
+
 	if (MLX5_CAP_GEN(mdev, imaicl)) {
 		dev->ib_dev.alloc_mw		= mlx5_ib_alloc_mw;
 		dev->ib_dev.dealloc_mw		= mlx5_ib_dealloc_mw;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 38c877b..bdcf254 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -349,7 +349,7 @@ struct mlx5_ib_qp {
 	struct mlx5_ib_wq	rq;
 
 	u8			sq_signal_bits;
-	u8			fm_cache;
+	u8			next_fence;
 	struct mlx5_ib_wq	sq;
 
 	/* serialize qp state modifications
@@ -654,6 +654,7 @@ struct mlx5_ib_dev {
 	struct mlx5_ib_port	*port;
 	struct mlx5_sq_bfreg     bfreg;
 	struct mlx5_sq_bfreg     fp_bfreg;
+	u8				umr_fence;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 93959e1..ebb6768 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
 	}
 }
 
-static u8 get_fence(u8 fence, struct ib_send_wr *wr)
-{
-	if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
-		     wr->send_flags & IB_SEND_FENCE))
-		return MLX5_FENCE_MODE_STRONG_ORDERING;
-
-	if (unlikely(fence)) {
-		if (wr->send_flags & IB_SEND_FENCE)
-			return MLX5_FENCE_MODE_SMALL_AND_FENCE;
-		else
-			return fence;
-	} else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
-		return MLX5_FENCE_MODE_FENCE;
-	}
-
-	return 0;
-}
-
 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
 		     struct mlx5_wqe_ctrl_seg **ctrl,
 		     struct ib_send_wr *wr, unsigned *idx,
@@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
 static void finish_wqe(struct mlx5_ib_qp *qp,
 		       struct mlx5_wqe_ctrl_seg *ctrl,
 		       u8 size, unsigned idx, u64 wr_id,
-		       int nreq, u8 fence, u8 next_fence,
-		       u32 mlx5_opcode)
+		       int nreq, u8 fence, u32 mlx5_opcode)
 {
 	u8 opmod = 0;
 
@@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
 					     mlx5_opcode | ((u32)opmod << 24));
 	ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
 	ctrl->fm_ce_se |= fence;
-	qp->fm_cache = next_fence;
 	if (unlikely(qp->wq_sig))
 		ctrl->signature = wq_sig(ctrl);
 
@@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			goto out;
 		}
 
-		fence = qp->fm_cache;
 		num_sge = wr->num_sge;
 		if (unlikely(num_sge > qp->sq.max_gs)) {
 			mlx5_ib_warn(dev, "\n");
@@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			goto out;
 		}
 
+		if (wr->opcode == IB_WR_LOCAL_INV ||
+		    wr->opcode == IB_WR_REG_MR) {
+			fence = dev->umr_fence;
+			next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+		} else if (wr->send_flags & IB_SEND_FENCE) {
+			if (qp->next_fence)
+				fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+			else
+				fence = MLX5_FENCE_MODE_FENCE;
+		} else {
+			fence = qp->next_fence;
+		}
+
 		switch (ibqp->qp_type) {
 		case IB_QPT_XRC_INI:
 			xrc = seg;
@@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				goto out;
 
 			case IB_WR_LOCAL_INV:
-				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
 				ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
 				set_linv_wr(qp, &seg, &size);
@@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				break;
 
 			case IB_WR_REG_MR:
-				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				qp->sq.wr_data[idx] = IB_WR_REG_MR;
 				ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
 				err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
@@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr),
-					   next_fence, MLX5_OPCODE_UMR);
+				finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+					   fence, MLX5_OPCODE_UMR);
 				/*
 				 * SET_PSV WQEs are not signaled and solicited
 				 * on error
@@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr),
-					   next_fence, MLX5_OPCODE_SET_PSV);
+				finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+					   fence, MLX5_OPCODE_SET_PSV);
 				err = begin_wqe(qp, &seg, &ctrl, wr,
 						&idx, &size, nreq);
 				if (err) {
@@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
 						 mr->sig->psv_wire.psv_idx, &seg,
 						 &size);
@@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 					goto out;
 				}
 
-				finish_wqe(qp, ctrl, size, idx, wr->wr_id,
-					   nreq, get_fence(fence, wr),
-					   next_fence, MLX5_OPCODE_SET_PSV);
+				finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
+					   fence, MLX5_OPCODE_SET_PSV);
+				qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
 				num_sge = 0;
 				goto skip_psv;
 
@@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			}
 		}
 
-		finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
-			   get_fence(fence, wr), next_fence,
+		qp->next_fence = next_fence;
+		finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
 			   mlx5_ib_opcode[wr->opcode]);
 skip_psv:
 		if (0)
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 5b96010..a30aa65 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -815,7 +815,7 @@ static struct pci_driver nes_pci_driver = {
 	.remove = nes_remove,
 };
 
-static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
+static ssize_t adapter_show(struct device_driver *ddp, char *buf)
 {
 	unsigned int  devfn = 0xffffffff;
 	unsigned char bus_number = 0xff;
@@ -834,7 +834,7 @@ static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
 	return snprintf(buf, PAGE_SIZE, "%x:%x\n", bus_number, devfn);
 }
 
-static ssize_t nes_store_adapter(struct device_driver *ddp,
+static ssize_t adapter_store(struct device_driver *ddp,
 	const char *buf, size_t count)
 {
 	char *p = (char *)buf;
@@ -843,7 +843,7 @@ static ssize_t nes_store_adapter(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf)
+static ssize_t eeprom_cmd_show(struct device_driver *ddp, char *buf)
 {
 	u32 eeprom_cmd = 0xdead;
 	u32 i = 0;
@@ -859,7 +859,7 @@ static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf)
 	return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd);
 }
 
-static ssize_t nes_store_ee_cmd(struct device_driver *ddp,
+static ssize_t eeprom_cmd_store(struct device_driver *ddp,
 	const char *buf, size_t count)
 {
 	char *p = (char *)buf;
@@ -880,7 +880,7 @@ static ssize_t nes_store_ee_cmd(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf)
+static ssize_t eeprom_data_show(struct device_driver *ddp, char *buf)
 {
 	u32 eeprom_data = 0xdead;
 	u32 i = 0;
@@ -897,7 +897,7 @@ static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf)
 	return  snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data);
 }
 
-static ssize_t nes_store_ee_data(struct device_driver *ddp,
+static ssize_t eeprom_data_store(struct device_driver *ddp,
 	const char *buf, size_t count)
 {
 	char *p = (char *)buf;
@@ -918,7 +918,7 @@ static ssize_t nes_store_ee_data(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf)
+static ssize_t flash_cmd_show(struct device_driver *ddp, char *buf)
 {
 	u32 flash_cmd = 0xdead;
 	u32 i = 0;
@@ -935,7 +935,7 @@ static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf)
 	return  snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd);
 }
 
-static ssize_t nes_store_flash_cmd(struct device_driver *ddp,
+static ssize_t flash_cmd_store(struct device_driver *ddp,
 	const char *buf, size_t count)
 {
 	char *p = (char *)buf;
@@ -956,7 +956,7 @@ static ssize_t nes_store_flash_cmd(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf)
+static ssize_t flash_data_show(struct device_driver *ddp, char *buf)
 {
 	u32 flash_data = 0xdead;
 	u32 i = 0;
@@ -973,7 +973,7 @@ static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf)
 	return  snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data);
 }
 
-static ssize_t nes_store_flash_data(struct device_driver *ddp,
+static ssize_t flash_data_store(struct device_driver *ddp,
 	const char *buf, size_t count)
 {
 	char *p = (char *)buf;
@@ -994,12 +994,12 @@ static ssize_t nes_store_flash_data(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf)
+static ssize_t nonidx_addr_show(struct device_driver *ddp, char *buf)
 {
 	return  snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr);
 }
 
-static ssize_t nes_store_nonidx_addr(struct device_driver *ddp,
+static ssize_t nonidx_addr_store(struct device_driver *ddp,
 	const char *buf, size_t count)
 {
 	char *p = (char *)buf;
@@ -1010,7 +1010,7 @@ static ssize_t nes_store_nonidx_addr(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf)
+static ssize_t nonidx_data_show(struct device_driver *ddp, char *buf)
 {
 	u32 nonidx_data = 0xdead;
 	u32 i = 0;
@@ -1027,7 +1027,7 @@ static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf)
 	return  snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data);
 }
 
-static ssize_t nes_store_nonidx_data(struct device_driver *ddp,
+static ssize_t nonidx_data_store(struct device_driver *ddp,
 	const char *buf, size_t count)
 {
 	char *p = (char *)buf;
@@ -1048,12 +1048,12 @@ static ssize_t nes_store_nonidx_data(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf)
+static ssize_t idx_addr_show(struct device_driver *ddp, char *buf)
 {
 	return  snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr);
 }
 
-static ssize_t nes_store_idx_addr(struct device_driver *ddp,
+static ssize_t idx_addr_store(struct device_driver *ddp,
 	const char *buf, size_t count)
 {
 	char *p = (char *)buf;
@@ -1064,7 +1064,7 @@ static ssize_t nes_store_idx_addr(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf)
+static ssize_t idx_data_show(struct device_driver *ddp, char *buf)
 {
 	u32 idx_data = 0xdead;
 	u32 i = 0;
@@ -1081,7 +1081,7 @@ static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf)
 	return  snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data);
 }
 
-static ssize_t nes_store_idx_data(struct device_driver *ddp,
+static ssize_t idx_data_store(struct device_driver *ddp,
 	const char *buf, size_t count)
 {
 	char *p = (char *)buf;
@@ -1102,11 +1102,7 @@ static ssize_t nes_store_idx_data(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-
-/**
- * nes_show_wqm_quanta
- */
-static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf)
+static ssize_t wqm_quanta_show(struct device_driver *ddp, char *buf)
 {
 	u32 wqm_quanta_value = 0xdead;
 	u32 i = 0;
@@ -1123,12 +1119,8 @@ static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf)
 	return  snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value);
 }
 
-
-/**
- * nes_store_wqm_quanta
- */
-static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
-					const char *buf, size_t count)
+static ssize_t wqm_quanta_store(struct device_driver *ddp, const char *buf,
+				size_t count)
 {
 	unsigned long wqm_quanta_value;
 	u32 wqm_config1;
@@ -1153,26 +1145,16 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
 	return strnlen(buf, count);
 }
 
-static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR,
-		   nes_show_adapter, nes_store_adapter);
-static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR,
-		   nes_show_ee_cmd, nes_store_ee_cmd);
-static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR,
-		   nes_show_ee_data, nes_store_ee_data);
-static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR,
-		   nes_show_flash_cmd, nes_store_flash_cmd);
-static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR,
-		   nes_show_flash_data, nes_store_flash_data);
-static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR,
-		   nes_show_nonidx_addr, nes_store_nonidx_addr);
-static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR,
-		   nes_show_nonidx_data, nes_store_nonidx_data);
-static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR,
-		   nes_show_idx_addr, nes_store_idx_addr);
-static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR,
-		   nes_show_idx_data, nes_store_idx_data);
-static DRIVER_ATTR(wqm_quanta, S_IRUSR | S_IWUSR,
-		   nes_show_wqm_quanta, nes_store_wqm_quanta);
+static DRIVER_ATTR_RW(adapter);
+static DRIVER_ATTR_RW(eeprom_cmd);
+static DRIVER_ATTR_RW(eeprom_data);
+static DRIVER_ATTR_RW(flash_cmd);
+static DRIVER_ATTR_RW(flash_data);
+static DRIVER_ATTR_RW(nonidx_addr);
+static DRIVER_ATTR_RW(nonidx_data);
+static DRIVER_ATTR_RW(idx_addr);
+static DRIVER_ATTR_RW(idx_data);
+static DRIVER_ATTR_RW(wqm_quanta);
 
 static int nes_create_driver_sysfs(struct pci_driver *drv)
 {
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index fb983df..30b256a 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
 		ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD;
 	}
 	ctrl_ird |= IETF_PEER_TO_PEER;
-	ctrl_ird |= IETF_FLPDU_ZERO_LEN;
 
 	switch (mpa_key) {
 	case MPA_KEY_REQUEST:
@@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
 			type = NES_CM_EVENT_CONNECTED;
 			cm_node->state = NES_CM_STATE_TSA;
 		}
-
+		send_ack(cm_node, NULL);
 		break;
 	default:
 		WARN_ON(1);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index aa08c76..d961f79 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -58,7 +58,10 @@
 #define QEDR_MSG_QP   "  QP"
 #define QEDR_MSG_GSI  " GSI"
 
-#define QEDR_CQ_MAGIC_NUMBER   (0x11223344)
+#define QEDR_CQ_MAGIC_NUMBER	(0x11223344)
+
+#define FW_PAGE_SIZE		(RDMA_RING_PAGE_SIZE)
+#define FW_PAGE_SHIFT		(12)
 
 struct qedr_dev;
 
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 3d7705c..d86dbe8 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -270,11 +270,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
 		return rc;
 	}
 
-	vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
-	if (vlan_id < VLAN_CFI_MASK)
-		has_vlan = true;
-	if (sgid_attr.ndev)
+	if (sgid_attr.ndev) {
+		vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+		if (vlan_id < VLAN_CFI_MASK)
+			has_vlan = true;
+
 		dev_put(sgid_attr.ndev);
+	}
 
 	if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
 		DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 17685cf..d6723c3 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
 
 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
 			       struct qedr_pbl *pbl,
-			       struct qedr_pbl_info *pbl_info)
+			       struct qedr_pbl_info *pbl_info, u32 pg_shift)
 {
 	int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+	u32 fw_pg_cnt, fw_pg_per_umem_pg;
 	struct qedr_pbl *pbl_tbl;
 	struct scatterlist *sg;
 	struct regpair *pbe;
+	u64 pg_addr;
 	int entry;
-	u32 addr;
 
 	if (!pbl_info->num_pbes)
 		return;
@@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
 
 	shift = umem->page_shift;
 
+	fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
+
 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
 		pages = sg_dma_len(sg) >> shift;
+		pg_addr = sg_dma_address(sg);
 		for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
-			/* store the page address in pbe */
-			pbe->lo = cpu_to_le32(sg_dma_address(sg) +
-					      (pg_cnt << shift));
-			addr = upper_32_bits(sg_dma_address(sg) +
-					     (pg_cnt << shift));
-			pbe->hi = cpu_to_le32(addr);
-			pbe_cnt++;
-			total_num_pbes++;
-			pbe++;
+			for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
+				pbe->lo = cpu_to_le32(pg_addr);
+				pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
 
-			if (total_num_pbes == pbl_info->num_pbes)
-				return;
+				pg_addr += BIT(pg_shift);
+				pbe_cnt++;
+				total_num_pbes++;
+				pbe++;
 
-			/* If the given pbl is full storing the pbes,
-			 * move to next pbl.
-			 */
-			if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
-				pbl_tbl++;
-				pbe = (struct regpair *)pbl_tbl->va;
-				pbe_cnt = 0;
+				if (total_num_pbes == pbl_info->num_pbes)
+					return;
+
+				/* If the given pbl is full storing the pbes,
+				 * move to next pbl.
+				 */
+				if (pbe_cnt ==
+				    (pbl_info->pbl_size / sizeof(u64))) {
+					pbl_tbl++;
+					pbe = (struct regpair *)pbl_tbl->va;
+					pbe_cnt = 0;
+				}
+
+				fw_pg_cnt++;
 			}
 		}
 	}
@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
 				       u64 buf_addr, size_t buf_len,
 				       int access, int dmasync)
 {
-	int page_cnt;
+	u32 fw_pages;
 	int rc;
 
 	q->buf_addr = buf_addr;
@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
 		return PTR_ERR(q->umem);
 	}
 
-	page_cnt = ib_umem_page_count(q->umem);
-	rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+	fw_pages = ib_umem_page_count(q->umem) <<
+	    (q->umem->page_shift - FW_PAGE_SHIFT);
+
+	rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
 	if (rc)
 		goto err0;
 
@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
 		goto err0;
 	}
 
-	qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+		qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
+				   FW_PAGE_SHIFT);
 
 	return 0;
 
@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
 		goto err1;
 
 	qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
-			   &mr->info.pbl_info);
+			   &mr->info.pbl_info, mr->umem->page_shift);
 
 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
 	if (rc) {
@@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
 		case IB_WC_REG_MR:
 			qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
 			break;
+		case IB_WC_RDMA_READ:
+		case IB_WC_SEND:
+			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index fc8b885..4ddbcac 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1956,8 +1956,10 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
 		ret = qib_get_rwqe(qp, 1);
 		if (ret < 0)
 			goto nack_op_err;
-		if (!ret)
+		if (!ret) {
+			rvt_put_ss(&qp->r_sge);
 			goto rnr_nak;
+		}
 		wc.ex.imm_data = ohdr->u.rc.imm_data;
 		hdrsize += 4;
 		wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index ecdba2f..1ac5b85 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -68,6 +68,7 @@
 static inline u32 rxe_crc32(struct rxe_dev *rxe,
 			    u32 crc, void *next, size_t len)
 {
+	u32 retval;
 	int err;
 
 	SHASH_DESC_ON_STACK(shash, rxe->tfm);
@@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
 		return crc32_le(crc, next, len);
 	}
 
-	return *(u32 *)shash_desc_ctx(shash);
+	retval = *(u32 *)shash_desc_ctx(shash);
+	barrier_data(shash_desc_ctx(shash));
+	return retval;
 }
 
 int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 83d709e..073e667 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
 
 		sge = ibwr->sg_list;
 		for (i = 0; i < num_sge; i++, sge++) {
-			if (qp->is_user && copy_from_user(p, (__user void *)
-					    (uintptr_t)sge->addr, sge->length))
-				return -EFAULT;
-
-			else if (!qp->is_user)
-				memcpy(p, (void *)(uintptr_t)sge->addr,
-				       sge->length);
+			memcpy(p, (void *)(uintptr_t)sge->addr,
+					sge->length);
 
 			p += sge->length;
 		}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 874b243..7871379 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -178,7 +178,7 @@ static inline int ib_speed_enum_to_int(int speed)
 static int ipoib_get_link_ksettings(struct net_device *netdev,
 				    struct ethtool_link_ksettings *cmd)
 {
-	struct ipoib_dev_priv *priv = netdev_priv(netdev);
+	struct ipoib_dev_priv *priv = ipoib_priv(netdev);
 	struct ib_port_attr attr;
 	int ret, speed, width;
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0060b2f..efe7402 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -863,7 +863,6 @@ int ipoib_ib_dev_open(struct net_device *dev)
 	set_bit(IPOIB_STOP_REAPER, &priv->flags);
 	cancel_delayed_work(&priv->ah_reap_task);
 	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
-	napi_enable(&priv->napi);
 	ipoib_ib_dev_stop(dev);
 	return -1;
 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2869d1a..1015a63 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1590,12 +1590,14 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
 	wait_for_completion(&priv->ntbl.deleted);
 }
 
-void ipoib_dev_uninit_default(struct net_device *dev)
+static void ipoib_dev_uninit_default(struct net_device *dev)
 {
 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 
 	ipoib_transport_dev_cleanup(dev);
 
+	netif_napi_del(&priv->napi);
+
 	ipoib_cm_dev_cleanup(dev);
 
 	kfree(priv->rx_ring);
@@ -1649,6 +1651,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
 	kfree(priv->rx_ring);
 
 out:
+	netif_napi_del(&priv->napi);
 	return -ENOMEM;
 }
 
@@ -2237,6 +2240,7 @@ static struct net_device *ipoib_add_port(const char *format,
 
 device_init_failed:
 	free_netdev(priv->dev);
+	kfree(priv);
 
 alloc_mem_failed:
 	return ERR_PTR(result);
@@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device)
 
 static void ipoib_remove_one(struct ib_device *device, void *client_data)
 {
-	struct ipoib_dev_priv *priv, *tmp;
+	struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
 	struct list_head *dev_list = client_data;
 
 	if (!dev_list)
@@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
 		flush_workqueue(priv->wq);
 
 		unregister_netdev(priv->dev);
-		free_netdev(priv->dev);
+		if (device->free_rdma_netdev)
+			device->free_rdma_netdev(priv->dev);
+		else
+			free_netdev(priv->dev);
+
+		list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
+			kfree(cpriv);
+
 		kfree(priv);
 	}
 
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 36dc4fc..081b33d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 	snprintf(intf_name, sizeof intf_name, "%s.%04x",
 		 ppriv->dev->name, pkey);
 
+	if (!rtnl_trylock())
+		return restart_syscall();
+
 	priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
 	if (!priv)
 		return -ENOMEM;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-
 	down_write(&ppriv->vlan_rwsem);
 
 	/*
@@ -167,8 +167,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 
 	rtnl_unlock();
 
-	if (result)
+	if (result) {
 		free_netdev(priv->dev);
+		kfree(priv);
+	}
 
 	return result;
 }
@@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
 
 	if (dev) {
 		free_netdev(dev);
+		kfree(priv);
 		return 0;
 	}
 
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index def723a..2354c74 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
 	ch->path.sgid = target->sgid;
 	ch->path.dgid = target->orig_dgid;
 	ch->path.pkey = target->pkey;
-	sa_path_set_service_id(&ch->path, target->service_id);
+	ch->path.service_id = target->service_id;
 
 	return 0;
 }
@@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
 	return 0;
 
 err_qp:
-	srp_destroy_qp(ch, qp);
+	ib_destroy_qp(qp);
 
 err_send_cq:
 	ib_free_cq(send_cq);
diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c
index 485900f..abc266e 100644
--- a/drivers/input/keyboard/tm2-touchkey.c
+++ b/drivers/input/keyboard/tm2-touchkey.c
@@ -213,7 +213,7 @@ static int tm2_touchkey_probe(struct i2c_client *client,
 	/* led device */
 	touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME;
 	touchkey->led_dev.brightness = LED_FULL;
-	touchkey->led_dev.max_brightness = LED_FULL;
+	touchkey->led_dev.max_brightness = LED_ON;
 	touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set;
 
 	error = devm_led_classdev_register(&client->dev, &touchkey->led_dev);
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index f11807d..400869e 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -256,6 +256,42 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
 	return 0;
 }
 
+#ifdef CONFIG_ACPI
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
+					     struct platform_device *pdev)
+{
+	unsigned long long hrv = 0;
+	acpi_status status;
+
+	if (IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY) &&
+	    axp20x_pek->axp20x->variant == AXP288_ID) {
+		status = acpi_evaluate_integer(ACPI_HANDLE(pdev->dev.parent),
+					       "_HRV", NULL, &hrv);
+		if (ACPI_FAILURE(status))
+			dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
+
+		/*
+		 * On Cherry Trail platforms (hrv == 3), do not register the
+		 * input device if there is an "INTCFD9" or "ACPI0011" gpio
+		 * button ACPI device, as that handles the power button too,
+		 * and otherwise we end up reporting all presses twice.
+		 */
+		if (hrv == 3 && (acpi_dev_present("INTCFD9", NULL, -1) ||
+				 acpi_dev_present("ACPI0011", NULL, -1)))
+			return false;
+
+	}
+
+	return true;
+}
+#else
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
+					     struct platform_device *pdev)
+{
+	return true;
+}
+#endif
+
 static int axp20x_pek_probe(struct platform_device *pdev)
 {
 	struct axp20x_pek *axp20x_pek;
@@ -268,13 +304,7 @@ static int axp20x_pek_probe(struct platform_device *pdev)
 
 	axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
 
-	/*
-	 * Do not register the input device if there is an "INTCFD9"
-	 * gpio button ACPI device, that handles the power button too,
-	 * and otherwise we end up reporting all presses twice.
-	 */
-	if (!acpi_dev_found("INTCFD9") ||
-	    !IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY)) {
+	if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
 		error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
 		if (error)
 			return error;
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index e37d372..f600f3a 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -248,7 +248,8 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
 
 	if (!btns_desc) {
 		dev_err(dev, "ACPI Button Descriptors not found\n");
-		return ERR_PTR(-ENODEV);
+		button_info = ERR_PTR(-ENODEV);
+		goto out;
 	}
 
 	/* The first package describes the collection */
@@ -264,24 +265,31 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev)
 	}
 	if (collection_uid == -1) {
 		dev_err(dev, "Invalid Button Collection Descriptor\n");
-		return ERR_PTR(-ENODEV);
+		button_info = ERR_PTR(-ENODEV);
+		goto out;
 	}
 
 	/* There are package.count - 1 buttons + 1 terminating empty entry */
 	button_info = devm_kcalloc(dev, btns_desc->package.count,
 				   sizeof(*button_info), GFP_KERNEL);
-	if (!button_info)
-		return ERR_PTR(-ENOMEM);
+	if (!button_info) {
+		button_info = ERR_PTR(-ENOMEM);
+		goto out;
+	}
 
 	/* Parse the button descriptors */
 	for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) {
 		if (soc_button_parse_btn_desc(dev,
 					      &btns_desc->package.elements[i],
 					      collection_uid,
-					      &button_info[btn]))
-			return ERR_PTR(-ENODEV);
+					      &button_info[btn])) {
+			button_info = ERR_PTR(-ENODEV);
+			goto out;
+		}
 	}
 
+out:
+	kfree(buf.pointer);
 	return button_info;
 }
 
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index e73d968..f1fa1f1 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+ * Fujitsu LIFEBOOK E546   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
+ * Fujitsu LIFEBOOK E557   0x570f01        40, 14, 0c      2 hw buttons
  * Fujitsu T725            0x470f01        05, 12, 09      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
@@ -1525,6 +1527,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
 		},
 	},
 	{
+		/* Fujitsu LIFEBOOK E546  does not work with crc_enabled == 0 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
+		},
+	},
+	{
 		/* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -1546,6 +1555,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
 		},
 	},
 	{
+		/* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
+		},
+	},
+	{
 		/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 131df9d..16c3046 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -176,6 +176,12 @@ static const char * const smbus_pnp_ids[] = {
 	NULL
 };
 
+static const char * const forcepad_pnp_ids[] = {
+	"SYN300D",
+	"SYN3014",
+	NULL
+};
+
 /*
  * Send a command to the synpatics touchpad by special commands
  */
@@ -397,6 +403,8 @@ static int synaptics_query_hardware(struct psmouse *psmouse,
 {
 	int error;
 
+	memset(info, 0, sizeof(*info));
+
 	error = synaptics_identify(psmouse, info);
 	if (error)
 		return error;
@@ -480,13 +488,6 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
 	{ }
 };
 
-/* This list has been kindly provided by Synaptics. */
-static const char * const forcepad_pnp_ids[] = {
-	"SYN300D",
-	"SYN3014",
-	NULL
-};
-
 /*****************************************************************************
  *	Synaptics communications functions
  ****************************************************************************/
@@ -1687,7 +1688,8 @@ enum {
 	SYNAPTICS_INTERTOUCH_ON,
 };
 
-static int synaptics_intertouch = SYNAPTICS_INTERTOUCH_NOT_SET;
+static int synaptics_intertouch = IS_ENABLED(CONFIG_RMI4_SMB) ?
+		SYNAPTICS_INTERTOUCH_NOT_SET : SYNAPTICS_INTERTOUCH_OFF;
 module_param_named(synaptics_intertouch, synaptics_intertouch, int, 0644);
 MODULE_PARM_DESC(synaptics_intertouch, "Use a secondary bus for the Synaptics device.");
 
@@ -1737,8 +1739,16 @@ static int synaptics_setup_intertouch(struct psmouse *psmouse,
 
 	if (synaptics_intertouch == SYNAPTICS_INTERTOUCH_NOT_SET) {
 		if (!psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
-		    !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids))
+		    !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) {
+
+			if (!psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids))
+				psmouse_info(psmouse,
+					     "Your touchpad (%s) says it can support a different bus. "
+					     "If i2c-hid and hid-rmi are not used, you might want to try setting psmouse.synaptics_intertouch to 1 and report this to linux-input@vger.kernel.org.\n",
+					     psmouse->ps2dev.serio->firmware_id);
+
 			return -ENXIO;
+		}
 	}
 
 	psmouse_info(psmouse, "Trying to set up SMBus access\n");
@@ -1810,6 +1820,15 @@ int synaptics_init(struct psmouse *psmouse)
 	}
 
 	if (SYN_CAP_INTERTOUCH(info.ext_cap_0c)) {
+		if ((!IS_ENABLED(CONFIG_RMI4_SMB) ||
+		     !IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS)) &&
+		    /* Forcepads need F21, which is not ready */
+		    !psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids)) {
+			psmouse_warn(psmouse,
+				     "The touchpad can support a better bus than the too old PS/2 protocol. "
+				     "Make sure MOUSE_PS2_SYNAPTICS_SMBUS and RMI4_SMB are enabled to get a better touchpad experience.\n");
+		}
+
 		error = synaptics_setup_intertouch(psmouse, &info, true);
 		if (!error)
 			return PSMOUSE_SYNAPTICS_SMBUS;
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index 77dad04..ad71a5e 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -146,7 +146,7 @@ static int rmi_f03_register_pt(struct f03_data *f03)
 	if (!serio)
 		return -ENOMEM;
 
-	serio->id.type = SERIO_8042;
+	serio->id.type = SERIO_PS_PSTHRU;
 	serio->write = rmi_f03_pt_write;
 	serio->port_data = f03;
 
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index dea63e2..f5206e2 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -31,9 +31,6 @@
 #define F54_GET_REPORT          1
 #define F54_FORCE_CAL           2
 
-/* Fixed sizes of reports */
-#define F54_QUERY_LEN			27
-
 /* F54 capabilities */
 #define F54_CAP_BASELINE	(1 << 2)
 #define F54_CAP_IMAGE8		(1 << 3)
@@ -95,7 +92,6 @@ struct rmi_f54_reports {
 struct f54_data {
 	struct rmi_function *fn;
 
-	u8 qry[F54_QUERY_LEN];
 	u8 num_rx_electrodes;
 	u8 num_tx_electrodes;
 	u8 capabilities;
@@ -632,22 +628,23 @@ static int rmi_f54_detect(struct rmi_function *fn)
 {
 	int error;
 	struct f54_data *f54;
+	u8 buf[6];
 
 	f54 = dev_get_drvdata(&fn->dev);
 
 	error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
-			       &f54->qry, sizeof(f54->qry));
+			       buf, sizeof(buf));
 	if (error) {
 		dev_err(&fn->dev, "%s: Failed to query F54 properties\n",
 			__func__);
 		return error;
 	}
 
-	f54->num_rx_electrodes = f54->qry[0];
-	f54->num_tx_electrodes = f54->qry[1];
-	f54->capabilities = f54->qry[2];
-	f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8);
-	f54->family = f54->qry[5];
+	f54->num_rx_electrodes = buf[0];
+	f54->num_tx_electrodes = buf[1];
+	f54->capabilities = buf[2];
+	f54->clock_rate = buf[3] | (buf[4] << 8);
+	f54->family = buf[5];
 
 	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n",
 		f54->num_rx_electrodes);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 09720d9..f932a83 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -723,6 +723,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
 		},
 	},
+	{
+		/* Fujitsu UH554 laptop */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+		},
+	},
 	{ }
 };
 
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 813dd68..0dbcf10 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -526,6 +526,7 @@ static int __maybe_unused silead_ts_suspend(struct device *dev)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 
+	disable_irq(client->irq);
 	silead_ts_set_power(client, SILEAD_POWER_OFF);
 	return 0;
 }
@@ -551,6 +552,8 @@ static int __maybe_unused silead_ts_resume(struct device *dev)
 		return -ENODEV;
 	}
 
+	enable_irq(client->irq);
+
 	return 0;
 }
 
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 63cacf5..5c9759e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3879,11 +3879,9 @@ static void irte_ga_prepare(void *entry,
 			    u8 vector, u32 dest_apicid, int devid)
 {
 	struct irte_ga *irte = (struct irte_ga *) entry;
-	struct iommu_dev_data *dev_data = search_dev_data(devid);
 
 	irte->lo.val                      = 0;
 	irte->hi.val                      = 0;
-	irte->lo.fields_remap.guest_mode  = dev_data ? dev_data->use_vapic : 0;
 	irte->lo.fields_remap.int_type    = delivery_mode;
 	irte->lo.fields_remap.dm          = dest_mode;
 	irte->hi.fields.vector            = vector;
@@ -3939,10 +3937,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
 	struct irte_ga *irte = (struct irte_ga *) entry;
 	struct iommu_dev_data *dev_data = search_dev_data(devid);
 
-	if (!dev_data || !dev_data->use_vapic) {
+	if (!dev_data || !dev_data->use_vapic ||
+	    !irte->lo.fields_remap.guest_mode) {
 		irte->hi.fields.vector = vector;
 		irte->lo.fields_remap.destination = dest_apicid;
-		irte->lo.fields_remap.guest_mode = 0;
 		modify_irte_ga(devid, index, irte, NULL);
 	}
 }
@@ -4386,21 +4384,29 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
 }
 
 static struct irq_chip amd_ir_chip = {
-	.irq_ack = ir_ack_apic_edge,
-	.irq_set_affinity = amd_ir_set_affinity,
-	.irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
-	.irq_compose_msi_msg = ir_compose_msi_msg,
+	.name			= "AMD-IR",
+	.irq_ack		= ir_ack_apic_edge,
+	.irq_set_affinity	= amd_ir_set_affinity,
+	.irq_set_vcpu_affinity	= amd_ir_set_vcpu_affinity,
+	.irq_compose_msi_msg	= ir_compose_msi_msg,
 };
 
 int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
 {
-	iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu);
+	struct fwnode_handle *fn;
+
+	fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
+	if (!fn)
+		return -ENOMEM;
+	iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
+	irq_domain_free_fwnode(fn);
 	if (!iommu->ir_domain)
 		return -ENOMEM;
 
 	iommu->ir_domain->parent = arch_get_ir_parent_domain();
-	iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
-
+	iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
+							     "AMD-IR-MSI",
+							     iommu->index);
 	return 0;
 }
 
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index cbf7763..c8b0329 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1808,10 +1808,9 @@ IOMMU_INIT_POST(detect_intel_iommu);
  * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
  * "Remapping Hardware Unit Hot Plug".
  */
-static u8 dmar_hp_uuid[] = {
-	/* 0000 */    0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
-	/* 0008 */    0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
-};
+static guid_t dmar_hp_guid =
+	GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
+		  0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
 
 /*
  * Currently there's only one revision and BIOS will not check the revision id,
@@ -1824,7 +1823,7 @@ static u8 dmar_hp_uuid[] = {
 
 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
 {
-	return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
+	return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
 }
 
 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
@@ -1843,7 +1842,7 @@ static int dmar_walk_dsm_resource(acpi_handle handle, int func,
 	if (!dmar_detect_dsm(handle, func))
 		return 0;
 
-	obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
+	obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
 				      func, NULL, ACPI_TYPE_BUFFER);
 	if (!obj)
 		return -ENODEV;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index fc2765c..8500ded 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4315,7 +4315,7 @@ int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
 	struct acpi_dmar_atsr *atsr;
 	struct dmar_atsr_unit *atsru;
 
-	if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
+	if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
 		return 0;
 
 	atsr = container_of(hdr, struct acpi_dmar_atsr, header);
@@ -4565,7 +4565,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
 	struct acpi_dmar_atsr *atsr;
 	struct acpi_dmar_reserved_memory *rmrr;
 
-	if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
+	if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
 		return 0;
 
 	list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index a190cbd..8fc641e 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -500,8 +500,9 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
 {
 	struct ir_table *ir_table;
-	struct page *pages;
+	struct fwnode_handle *fn;
 	unsigned long *bitmap;
+	struct page *pages;
 
 	if (iommu->ir_table)
 		return 0;
@@ -525,15 +526,24 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
 		goto out_free_pages;
 	}
 
-	iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(),
-						    0, INTR_REMAP_TABLE_ENTRIES,
-						    NULL, &intel_ir_domain_ops,
-						    iommu);
+	fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
+	if (!fn)
+		goto out_free_bitmap;
+
+	iommu->ir_domain =
+		irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
+					    0, INTR_REMAP_TABLE_ENTRIES,
+					    fn, &intel_ir_domain_ops,
+					    iommu);
+	irq_domain_free_fwnode(fn);
 	if (!iommu->ir_domain) {
 		pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
 		goto out_free_bitmap;
 	}
-	iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
+	iommu->ir_msi_domain =
+		arch_create_remap_msi_irq_domain(iommu->ir_domain,
+						 "INTEL-IR-MSI",
+						 iommu->seq_id);
 
 	ir_table->base = page_address(pages);
 	ir_table->bitmap = bitmap;
@@ -1205,10 +1215,11 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
 }
 
 static struct irq_chip intel_ir_chip = {
-	.irq_ack = ir_ack_apic_edge,
-	.irq_set_affinity = intel_ir_set_affinity,
-	.irq_compose_msi_msg = intel_ir_compose_msi_msg,
-	.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
+	.name			= "INTEL-IR",
+	.irq_ack		= ir_ack_apic_edge,
+	.irq_set_affinity	= intel_ir_set_affinity,
+	.irq_compose_msi_msg	= intel_ir_compose_msi_msg,
+	.irq_set_vcpu_affinity	= intel_ir_set_vcpu_affinity,
 };
 
 static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 9f44ee8..8cb6082 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -103,7 +103,7 @@ static bool of_iommu_driver_present(struct device_node *np)
 	 * it never will be. We don't want to defer indefinitely, nor attempt
 	 * to dereference __iommu_of_table after it's been freed.
 	 */
-	if (system_state > SYSTEM_BOOTING)
+	if (system_state >= SYSTEM_RUNNING)
 		return false;
 
 	return of_match_node(&__iommu_of_table, np);
@@ -118,6 +118,7 @@ static const struct iommu_ops
 
 	ops = iommu_ops_from_fwnode(fwnode);
 	if ((ops && !ops->of_xlate) ||
+	    !of_device_is_available(iommu_spec->np) ||
 	    (!ops && !of_iommu_driver_present(iommu_spec->np)))
 		return NULL;
 
@@ -236,6 +237,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
 			ops = ERR_PTR(err);
 	}
 
+	/* Ignore all other errors apart from EPROBE_DEFER */
+	if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
+		dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+		ops = NULL;
+	}
+
 	return ops;
 }
 
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index 1210244..a1e07a7 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -212,7 +212,7 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
 	int bus_nr;
 	struct ipack_bus_device *bus;
 
-	bus = kzalloc(sizeof(struct ipack_bus_device), GFP_KERNEL);
+	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
 	if (!bus)
 		return NULL;
 
@@ -402,7 +402,6 @@ static int ipack_device_read_id(struct ipack_device *dev)
 	 * ID ROM contents */
 	dev->id = kmalloc(dev->id_avail, GFP_KERNEL);
 	if (!dev->id) {
-		dev_err(&dev->dev, "dev->id alloc failed.\n");
 		ret = -ENOMEM;
 		goto out;
 	}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 478f8ac..676232a 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -268,6 +268,12 @@
 	select IRQ_DOMAIN
 	select STMP_DEVICE
 
+config MVEBU_GICP
+	bool
+
+config MVEBU_ICU
+	bool
+
 config MVEBU_ODMI
 	bool
 	select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b64c59b..e88d856 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -69,10 +69,12 @@
 obj-$(CONFIG_INGENIC_IRQ)		+= irq-ingenic.o
 obj-$(CONFIG_IMX_GPCV2)			+= irq-imx-gpcv2.o
 obj-$(CONFIG_PIC32_EVIC)		+= irq-pic32-evic.o
+obj-$(CONFIG_MVEBU_GICP)		+= irq-mvebu-gicp.o
+obj-$(CONFIG_MVEBU_ICU)			+= irq-mvebu-icu.o
 obj-$(CONFIG_MVEBU_ODMI)		+= irq-mvebu-odmi.o
 obj-$(CONFIG_MVEBU_PIC)			+= irq-mvebu-pic.o
 obj-$(CONFIG_LS_SCFG_MSI)		+= irq-ls-scfg-msi.o
 obj-$(CONFIG_EZNPS_GIC)			+= irq-eznps.o
-obj-$(CONFIG_ARCH_ASPEED)		+= irq-aspeed-vic.o
+obj-$(CONFIG_ARCH_ASPEED)		+= irq-aspeed-vic.o irq-aspeed-i2c-ic.o
 obj-$(CONFIG_STM32_EXTI) 		+= irq-stm32-exti.o
 obj-$(CONFIG_QCOM_IRQ_COMBINER)		+= qcom-irq-combiner.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index eb0d4d4..b207b2c 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -34,25 +34,104 @@
 #include <asm/smp_plat.h>
 #include <asm/mach/irq.h>
 
-/* Interrupt Controller Registers Map */
-#define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
-#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
-#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS	(0x54)
-#define ARMADA_370_XP_INT_CAUSE_PERF(cpu)	(1 << cpu)
+/*
+ * Overall diagram of the Armada XP interrupt controller:
+ *
+ *    To CPU 0                 To CPU 1
+ *
+ *       /\                       /\
+ *       ||                       ||
+ * +---------------+     +---------------+
+ * |               |	 |               |
+ * |    per-CPU    |	 |    per-CPU    |
+ * |  mask/unmask  |	 |  mask/unmask  |
+ * |     CPU0      |	 |     CPU1      |
+ * |               |	 |               |
+ * +---------------+	 +---------------+
+ *        /\                       /\
+ *        ||                       ||
+ *        \\_______________________//
+ *                     ||
+ *            +-------------------+
+ *            |                   |
+ *            | Global interrupt  |
+ *            |    mask/unmask    |
+ *            |                   |
+ *            +-------------------+
+ *                     /\
+ *                     ||
+ *               interrupt from
+ *                   device
+ *
+ * The "global interrupt mask/unmask" is modified using the
+ * ARMADA_370_XP_INT_SET_ENABLE_OFFS and
+ * ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS registers, which are relative
+ * to "main_int_base".
+ *
+ * The "per-CPU mask/unmask" is modified using the
+ * ARMADA_370_XP_INT_SET_MASK_OFFS and
+ * ARMADA_370_XP_INT_CLEAR_MASK_OFFS registers, which are relative to
+ * "per_cpu_int_base". This base address points to a special address,
+ * which automatically accesses the registers of the current CPU.
+ *
+ * The per-CPU mask/unmask can also be adjusted using the global
+ * per-interrupt ARMADA_370_XP_INT_SOURCE_CTL register, which we use
+ * to configure interrupt affinity.
+ *
+ * Due to this model, all interrupts need to be mask/unmasked at two
+ * different levels: at the global level and at the per-CPU level.
+ *
+ * This driver takes the following approach to deal with this:
+ *
+ *  - For global interrupts:
+ *
+ *    At ->map() time, a global interrupt is unmasked at the per-CPU
+ *    mask/unmask level. It is therefore unmasked at this level for
+ *    the current CPU, running the ->map() code. This allows to have
+ *    the interrupt unmasked at this level in non-SMP
+ *    configurations. In SMP configurations, the ->set_affinity()
+ *    callback is called, which using the
+ *    ARMADA_370_XP_INT_SOURCE_CTL() readjusts the per-CPU mask/unmask
+ *    for the interrupt.
+ *
+ *    The ->mask() and ->unmask() operations only mask/unmask the
+ *    interrupt at the "global" level.
+ *
+ *    So, a global interrupt is enabled at the per-CPU level as soon
+ *    as it is mapped. At run time, the masking/unmasking takes place
+ *    at the global level.
+ *
+ *  - For per-CPU interrupts
+ *
+ *    At ->map() time, a per-CPU interrupt is unmasked at the global
+ *    mask/unmask level.
+ *
+ *    The ->mask() and ->unmask() operations mask/unmask the interrupt
+ *    at the per-CPU level.
+ *
+ *    So, a per-CPU interrupt is enabled at the global level as soon
+ *    as it is mapped. At run time, the masking/unmasking takes place
+ *    at the per-CPU level.
+ */
 
+/* Registers relative to main_int_base */
 #define ARMADA_370_XP_INT_CONTROL		(0x00)
+#define ARMADA_370_XP_SW_TRIG_INT_OFFS		(0x04)
 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
 #define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
 #define ARMADA_370_XP_INT_SOURCE_CPU_MASK	0xF
 #define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)	((BIT(0) | BIT(8)) << cpuid)
 
-#define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
+/* Registers relative to per_cpu_int_base */
+#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS	(0x08)
+#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS		(0x0c)
 #define ARMADA_375_PPI_CAUSE			(0x10)
-
-#define ARMADA_370_XP_SW_TRIG_INT_OFFS           (0x4)
-#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS          (0xc)
-#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS        (0x8)
+#define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
+#define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
+#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
+#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS	(0x54)
+#define ARMADA_370_XP_INT_CAUSE_PERF(cpu)	(1 << cpu)
 
 #define ARMADA_370_XP_MAX_PER_CPU_IRQS		(28)
 
@@ -281,13 +360,11 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
 		irq_set_percpu_devid(virq);
 		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
 					handle_percpu_devid_irq);
-
 	} else {
 		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
 					handle_level_irq);
 	}
 	irq_set_probe(virq);
-	irq_clear_status_flags(virq, IRQ_NOAUTOEN);
 
 	return 0;
 }
@@ -345,16 +422,40 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask,
 		ARMADA_370_XP_SW_TRIG_INT_OFFS);
 }
 
+static void armada_xp_mpic_reenable_percpu(void)
+{
+	unsigned int irq;
+
+	/* Re-enable per-CPU interrupts that were enabled before suspend */
+	for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) {
+		struct irq_data *data;
+		int virq;
+
+		virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
+		if (virq == 0)
+			continue;
+
+		data = irq_get_irq_data(virq);
+
+		if (!irq_percpu_is_enabled(virq))
+			continue;
+
+		armada_370_xp_irq_unmask(data);
+	}
+}
+
 static int armada_xp_mpic_starting_cpu(unsigned int cpu)
 {
 	armada_xp_mpic_perf_init();
 	armada_xp_mpic_smp_cpu_init();
+	armada_xp_mpic_reenable_percpu();
 	return 0;
 }
 
 static int mpic_cascaded_starting_cpu(unsigned int cpu)
 {
 	armada_xp_mpic_perf_init();
+	armada_xp_mpic_reenable_percpu();
 	enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
 	return 0;
 }
@@ -502,16 +603,27 @@ static void armada_370_xp_mpic_resume(void)
 		if (virq == 0)
 			continue;
 
-		if (!is_percpu_irq(irq))
+		data = irq_get_irq_data(virq);
+
+		if (!is_percpu_irq(irq)) {
+			/* Non per-CPU interrupts */
 			writel(irq, per_cpu_int_base +
 			       ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
-		else
+			if (!irqd_irq_disabled(data))
+				armada_370_xp_irq_unmask(data);
+		} else {
+			/* Per-CPU interrupts */
 			writel(irq, main_int_base +
 			       ARMADA_370_XP_INT_SET_ENABLE_OFFS);
 
-		data = irq_get_irq_data(virq);
-		if (!irqd_irq_disabled(data))
-			armada_370_xp_irq_unmask(data);
+			/*
+			 * Re-enable on the current CPU,
+			 * armada_xp_mpic_reenable_percpu() will take
+			 * care of secondary CPUs when they come up.
+			 */
+			if (irq_percpu_is_enabled(virq))
+				armada_370_xp_irq_unmask(data);
+		}
 	}
 
 	/* Reconfigure doorbells for IPIs and MSIs */
@@ -563,7 +675,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 		irq_domain_add_linear(node, nr_irqs,
 				&armada_370_xp_mpic_irq_ops, NULL);
 	BUG_ON(!armada_370_xp_mpic_domain);
-	armada_370_xp_mpic_domain->bus_token = DOMAIN_BUS_WIRED;
+	irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
 
 	/* Setup for the boot CPU */
 	armada_xp_mpic_perf_init();
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
new file mode 100644
index 0000000..815b88d
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -0,0 +1,115 @@
+/*
+ *  Aspeed 24XX/25XX I2C Interrupt Controller.
+ *
+ *  Copyright (C) 2012-2017 ASPEED Technology Inc.
+ *  Copyright 2017 IBM Corporation
+ *  Copyright 2017 Google, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+
+
+#define ASPEED_I2C_IC_NUM_BUS 14
+
+struct aspeed_i2c_ic {
+	void __iomem		*base;
+	int			parent_irq;
+	struct irq_domain	*irq_domain;
+};
+
+/*
+ * The aspeed chip provides a single hardware interrupt for all of the I2C
+ * busses, so we use a dummy interrupt chip to translate this single interrupt
+ * into multiple interrupts, each associated with a single I2C bus.
+ */
+static void aspeed_i2c_ic_irq_handler(struct irq_desc *desc)
+{
+	struct aspeed_i2c_ic *i2c_ic = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned long bit, status;
+	unsigned int bus_irq;
+
+	chained_irq_enter(chip, desc);
+	status = readl(i2c_ic->base);
+	for_each_set_bit(bit, &status, ASPEED_I2C_IC_NUM_BUS) {
+		bus_irq = irq_find_mapping(i2c_ic->irq_domain, bit);
+		generic_handle_irq(bus_irq);
+	}
+	chained_irq_exit(chip, desc);
+}
+
+/*
+ * Set simple handler and mark IRQ as valid. Nothing interesting to do here
+ * since we are using a dummy interrupt chip.
+ */
+static int aspeed_i2c_ic_map_irq_domain(struct irq_domain *domain,
+					unsigned int irq, irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops aspeed_i2c_ic_irq_domain_ops = {
+	.map = aspeed_i2c_ic_map_irq_domain,
+};
+
+static int __init aspeed_i2c_ic_of_init(struct device_node *node,
+					struct device_node *parent)
+{
+	struct aspeed_i2c_ic *i2c_ic;
+	int ret = 0;
+
+	i2c_ic = kzalloc(sizeof(*i2c_ic), GFP_KERNEL);
+	if (!i2c_ic)
+		return -ENOMEM;
+
+	i2c_ic->base = of_iomap(node, 0);
+	if (IS_ERR(i2c_ic->base)) {
+		ret = PTR_ERR(i2c_ic->base);
+		goto err_free_ic;
+	}
+
+	i2c_ic->parent_irq = irq_of_parse_and_map(node, 0);
+	if (i2c_ic->parent_irq < 0) {
+		ret = i2c_ic->parent_irq;
+		goto err_iounmap;
+	}
+
+	i2c_ic->irq_domain = irq_domain_add_linear(node, ASPEED_I2C_IC_NUM_BUS,
+						   &aspeed_i2c_ic_irq_domain_ops,
+						   NULL);
+	if (!i2c_ic->irq_domain) {
+		ret = -ENOMEM;
+		goto err_iounmap;
+	}
+
+	i2c_ic->irq_domain->name = "aspeed-i2c-domain";
+
+	irq_set_chained_handler_and_data(i2c_ic->parent_irq,
+					 aspeed_i2c_ic_irq_handler, i2c_ic);
+
+	pr_info("i2c controller registered, irq %d\n", i2c_ic->parent_irq);
+
+	return 0;
+
+err_iounmap:
+	iounmap(i2c_ic->base);
+err_free_ic:
+	kfree(i2c_ic);
+	return ret;
+}
+
+IRQCHIP_DECLARE(ast2400_i2c_ic, "aspeed,ast2400-i2c-ic", aspeed_i2c_ic_of_init);
+IRQCHIP_DECLARE(ast2500_i2c_ic, "aspeed,ast2500-i2c-ic", aspeed_i2c_ic_of_init);
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
index d24451d..03ba477 100644
--- a/drivers/irqchip/irq-aspeed-vic.c
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -186,7 +186,7 @@ static int avic_map(struct irq_domain *d, unsigned int irq,
 	return 0;
 }
 
-static struct irq_domain_ops avic_dom_ops = {
+static const struct irq_domain_ops avic_dom_ops = {
 	.map = avic_map,
 	.xlate = irq_domain_xlate_onetwocell,
 };
@@ -227,4 +227,5 @@ static int __init avic_of_init(struct device_node *node,
 	return 0;
 }
 
-IRQCHIP_DECLARE(aspeed_new_vic, "aspeed,ast2400-vic", avic_of_init);
+IRQCHIP_DECLARE(ast2400_vic, "aspeed,ast2400-vic", avic_of_init);
+IRQCHIP_DECLARE(ast2500_vic, "aspeed,ast2500-vic", avic_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 863e073..993a842 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -280,7 +280,7 @@ static int gicv2m_allocate_domains(struct irq_domain *parent)
 		return -ENOMEM;
 	}
 
-	inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
 	inner_domain->parent = parent;
 	pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
 					       &gicv2m_msi_domain_info,
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index aee1c60..7793121 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -41,27 +41,22 @@ static struct irq_chip its_msi_irq_chip = {
 	.irq_write_msi_msg	= pci_msi_domain_write_msg,
 };
 
-struct its_pci_alias {
-	struct pci_dev	*pdev;
-	u32		count;
-};
-
-static int its_pci_msi_vec_count(struct pci_dev *pdev)
+static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
 {
-	int msi, msix;
+	int msi, msix, *count = data;
 
 	msi = max(pci_msi_vec_count(pdev), 0);
 	msix = max(pci_msix_vec_count(pdev), 0);
+	*count += max(msi, msix);
 
-	return max(msi, msix);
+	return 0;
 }
 
 static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
 {
-	struct its_pci_alias *dev_alias = data;
+	struct pci_dev **alias_dev = data;
 
-	if (pdev != dev_alias->pdev)
-		dev_alias->count += its_pci_msi_vec_count(pdev);
+	*alias_dev = pdev;
 
 	return 0;
 }
@@ -69,9 +64,9 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
 static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
 			       int nvec, msi_alloc_info_t *info)
 {
-	struct pci_dev *pdev;
-	struct its_pci_alias dev_alias;
+	struct pci_dev *pdev, *alias_dev;
 	struct msi_domain_info *msi_info;
+	int alias_count = 0;
 
 	if (!dev_is_pci(dev))
 		return -EINVAL;
@@ -79,16 +74,20 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
 	msi_info = msi_get_domain_info(domain->parent);
 
 	pdev = to_pci_dev(dev);
-	dev_alias.pdev = pdev;
-	dev_alias.count = nvec;
-
-	pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
+	/*
+	 * If pdev is downstream of any aliasing bridges, take an upper
+	 * bound of how many other vectors could map to the same DevID.
+	 */
+	pci_for_each_dma_alias(pdev, its_get_pci_alias, &alias_dev);
+	if (alias_dev != pdev && alias_dev->subordinate)
+		pci_walk_bus(alias_dev->subordinate, its_pci_msi_vec_count,
+			     &alias_count);
 
 	/* ITS specific DeviceID, as the core ITS ignores dev. */
 	info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
 
 	return msi_info->ops->msi_prepare(domain->parent,
-					  dev, dev_alias.count, info);
+					  dev, max(nvec, alias_count), info);
 }
 
 static struct msi_domain_ops its_pci_msi_ops = {
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 9e9dda3..249240d 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -86,7 +86,7 @@ static struct msi_domain_info its_pmsi_domain_info = {
 	.chip	= &its_pmsi_irq_chip,
 };
 
-static struct of_device_id its_device_id[] = {
+static const struct of_device_id its_device_id[] = {
 	{	.compatible	= "arm,gic-v3-its",	},
 	{},
 };
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 45ea1933..6893287 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -644,9 +644,12 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 	if (cpu >= nr_cpu_ids)
 		return -EINVAL;
 
-	target_col = &its_dev->its->collections[cpu];
-	its_send_movi(its_dev, target_col, id);
-	its_dev->event_map.col_map[id] = cpu;
+	/* don't set the affinity when the target cpu is same as current one */
+	if (cpu != its_dev->event_map.col_map[id]) {
+		target_col = &its_dev->its->collections[cpu];
+		its_send_movi(its_dev, target_col, id);
+		its_dev->event_map.col_map[id] = cpu;
+	}
 
 	return IRQ_SET_MASK_OK_DONE;
 }
@@ -688,9 +691,11 @@ static struct irq_chip its_irq_chip = {
  */
 #define IRQS_PER_CHUNK_SHIFT	5
 #define IRQS_PER_CHUNK		(1 << IRQS_PER_CHUNK_SHIFT)
+#define ITS_MAX_LPI_NRBITS	16 /* 64K LPIs */
 
 static unsigned long *lpi_bitmap;
 static u32 lpi_chunks;
+static u32 lpi_id_bits;
 static DEFINE_SPINLOCK(lpi_lock);
 
 static int its_lpi_to_chunk(int lpi)
@@ -786,17 +791,13 @@ static void its_lpi_free(struct event_lpi_map *map)
 }
 
 /*
- * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
+ * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
  * deal with (one configuration byte per interrupt). PENDBASE has to
  * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
  */
-#define LPI_PROPBASE_SZ		SZ_64K
-#define LPI_PENDBASE_SZ		(LPI_PROPBASE_SZ / 8 + SZ_1K)
-
-/*
- * This is how many bits of ID we need, including the useless ones.
- */
-#define LPI_NRBITS		ilog2(LPI_PROPBASE_SZ + SZ_8K)
+#define LPI_NRBITS		lpi_id_bits
+#define LPI_PROPBASE_SZ		ALIGN(BIT(LPI_NRBITS), SZ_64K)
+#define LPI_PENDBASE_SZ		ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
 
 #define LPI_PROP_DEFAULT_PRIO	0xa0
 
@@ -804,6 +805,7 @@ static int __init its_alloc_lpi_tables(void)
 {
 	phys_addr_t paddr;
 
+	lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
 	gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
 					   get_order(LPI_PROPBASE_SZ));
 	if (!gic_rdists->prop_page) {
@@ -822,7 +824,7 @@ static int __init its_alloc_lpi_tables(void)
 	/* Make sure the GIC will observe the written configuration */
 	gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
 
-	return 0;
+	return its_lpi_init(lpi_id_bits);
 }
 
 static const char *its_base_type_string[] = {
@@ -1097,7 +1099,7 @@ static void its_cpu_init_lpis(void)
 		 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
 		 */
 		pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
-					get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
+					get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
 		if (!pend_page) {
 			pr_err("Failed to allocate PENDBASE for CPU%d\n",
 			       smp_processor_id());
@@ -1661,7 +1663,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
 	}
 
 	inner_domain->parent = its_parent;
-	inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+	irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
 	inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
 	info->ops = &its_msi_domain_ops;
 	info->data = its;
@@ -1801,7 +1803,7 @@ int its_cpu_init(void)
 	return 0;
 }
 
-static struct of_device_id its_device_id[] = {
+static const struct of_device_id its_device_id[] = {
 	{	.compatible	= "arm,gic-v3-its",	},
 	{},
 };
@@ -1833,6 +1835,78 @@ static int __init its_of_probe(struct device_node *node)
 
 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
 
+#if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531)
+struct its_srat_map {
+	/* numa node id */
+	u32	numa_node;
+	/* GIC ITS ID */
+	u32	its_id;
+};
+
+static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata;
+static int its_in_srat __initdata;
+
+static int __init acpi_get_its_numa_node(u32 its_id)
+{
+	int i;
+
+	for (i = 0; i < its_in_srat; i++) {
+		if (its_id == its_srat_maps[i].its_id)
+			return its_srat_maps[i].numa_node;
+	}
+	return NUMA_NO_NODE;
+}
+
+static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
+			 const unsigned long end)
+{
+	int node;
+	struct acpi_srat_gic_its_affinity *its_affinity;
+
+	its_affinity = (struct acpi_srat_gic_its_affinity *)header;
+	if (!its_affinity)
+		return -EINVAL;
+
+	if (its_affinity->header.length < sizeof(*its_affinity)) {
+		pr_err("SRAT: Invalid header length %d in ITS affinity\n",
+			its_affinity->header.length);
+		return -EINVAL;
+	}
+
+	if (its_in_srat >= MAX_NUMNODES) {
+		pr_err("SRAT: ITS affinity exceeding max count[%d]\n",
+				MAX_NUMNODES);
+		return -EINVAL;
+	}
+
+	node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
+
+	if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+		pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
+		return 0;
+	}
+
+	its_srat_maps[its_in_srat].numa_node = node;
+	its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
+	its_in_srat++;
+	pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
+		its_affinity->proximity_domain, its_affinity->its_id, node);
+
+	return 0;
+}
+
+static void __init acpi_table_parse_srat_its(void)
+{
+	acpi_table_parse_entries(ACPI_SIG_SRAT,
+			sizeof(struct acpi_table_srat),
+			ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
+			gic_acpi_parse_srat_its, 0);
+}
+#else
+static void __init acpi_table_parse_srat_its(void)	{ }
+static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
+#endif
+
 static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
 					  const unsigned long end)
 {
@@ -1861,7 +1935,8 @@ static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
 		goto dom_err;
 	}
 
-	err = its_probe_one(&res, dom_handle, NUMA_NO_NODE);
+	err = its_probe_one(&res, dom_handle,
+			acpi_get_its_numa_node(its_entry->translation_id));
 	if (!err)
 		return 0;
 
@@ -1873,6 +1948,7 @@ static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
 
 static void __init its_acpi_probe(void)
 {
+	acpi_table_parse_srat_its();
 	acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
 			      gic_acpi_parse_madt_its, 0);
 }
@@ -1898,8 +1974,5 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
 	}
 
 	gic_rdists = rdists;
-	its_alloc_lpi_tables();
-	its_lpi_init(rdists->id_bits);
-
-	return 0;
+	return its_alloc_lpi_tables();
 }
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index c132f29..dbffb7a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -645,6 +645,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 	int enabled;
 	u64 val;
 
+	if (cpu >= nr_cpu_ids)
+		return -EINVAL;
+
 	if (gic_irq_in_rdist(d))
 		return -EINVAL;
 
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 1aec12c..7aafbb0 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -307,7 +307,7 @@ static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
 	return 0;
 }
 
-static struct irq_domain_ops i8259A_ops = {
+static const struct irq_domain_ops i8259A_ops = {
 	.map = i8259A_irq_domain_map,
 	.xlate = irq_domain_xlate_onecell,
 };
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 9463f35..bb36f57 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -200,7 +200,7 @@ static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
 					    &parent_fwspec);
 }
 
-static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
+static const struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
 	.translate	= imx_gpcv2_domain_translate,
 	.alloc		= imx_gpcv2_domain_alloc,
 	.free		= irq_domain_free_irqs_common,
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 31d6b5a..567b29c 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -228,7 +228,7 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain,
 	return 0;
 }
 
-static struct irq_domain_ops mbigen_domain_ops = {
+static const struct irq_domain_ops mbigen_domain_ops = {
 	.translate	= mbigen_domain_translate,
 	.alloc		= mbigen_irq_domain_alloc,
 	.free		= irq_domain_free_irqs_common,
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index b247f3c..0a8ed1c 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -240,7 +240,7 @@ static void mips_cpu_register_ipi_domain(struct device_node *of_node)
 					      ipi_domain_state);
 	if (!ipi_domain)
 		panic("Failed to add MIPS CPU IPI domain");
-	ipi_domain->bus_token = DOMAIN_BUS_IPI;
+	irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
 }
 
 #else /* !CONFIG_GENERIC_IRQ_IPI */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index eb7fbe1..832ebf40 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -140,7 +140,7 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
 }
 
 #ifdef CONFIG_CLKSRC_MIPS_GIC
-u64 gic_read_count(void)
+u64 notrace gic_read_count(void)
 {
 	unsigned int hi, hi2, lo;
 
@@ -167,7 +167,7 @@ unsigned int gic_get_count_width(void)
 	return bits;
 }
 
-void gic_write_compare(u64 cnt)
+void notrace gic_write_compare(u64 cnt)
 {
 	if (mips_cm_is64) {
 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
@@ -179,7 +179,7 @@ void gic_write_compare(u64 cnt)
 	}
 }
 
-void gic_write_cpu_compare(u64 cnt, int cpu)
+void notrace gic_write_cpu_compare(u64 cnt, int cpu)
 {
 	unsigned long flags;
 
@@ -874,7 +874,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
 	}
 }
 
-static struct irq_domain_ops gic_ipi_domain_ops = {
+static const struct irq_domain_ops gic_ipi_domain_ops = {
 	.xlate = gic_ipi_domain_xlate,
 	.alloc = gic_ipi_domain_alloc,
 	.free = gic_ipi_domain_free,
@@ -960,7 +960,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
 		panic("Failed to add GIC IPI domain");
 
 	gic_ipi_domain->name = "mips-gic-ipi";
-	gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
+	irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
 
 	if (node &&
 	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
new file mode 100644
index 0000000..b283fc9
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "irq-mvebu-gicp.h"
+
+#define GICP_SETSPI_NSR_OFFSET	0x0
+#define GICP_CLRSPI_NSR_OFFSET	0x8
+
+struct mvebu_gicp_spi_range {
+	unsigned int start;
+	unsigned int count;
+};
+
+struct mvebu_gicp {
+	struct mvebu_gicp_spi_range *spi_ranges;
+	unsigned int spi_ranges_cnt;
+	unsigned int spi_cnt;
+	unsigned long *spi_bitmap;
+	spinlock_t spi_lock;
+	struct resource *res;
+	struct device *dev;
+};
+
+static int gicp_idx_to_spi(struct mvebu_gicp *gicp, int idx)
+{
+	int i;
+
+	for (i = 0; i < gicp->spi_ranges_cnt; i++) {
+		struct mvebu_gicp_spi_range *r = &gicp->spi_ranges[i];
+
+		if (idx < r->count)
+			return r->start + idx;
+
+		idx -= r->count;
+	}
+
+	return -EINVAL;
+}
+
+int mvebu_gicp_get_doorbells(struct device_node *dn, phys_addr_t *setspi,
+			     phys_addr_t *clrspi)
+{
+	struct platform_device *pdev;
+	struct mvebu_gicp *gicp;
+
+	pdev = of_find_device_by_node(dn);
+	if (!pdev)
+		return -ENODEV;
+
+	gicp = platform_get_drvdata(pdev);
+	if (!gicp)
+		return -ENODEV;
+
+	*setspi = gicp->res->start + GICP_SETSPI_NSR_OFFSET;
+	*clrspi = gicp->res->start + GICP_CLRSPI_NSR_OFFSET;
+
+	return 0;
+}
+
+static void gicp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct mvebu_gicp *gicp = data->chip_data;
+	phys_addr_t setspi = gicp->res->start + GICP_SETSPI_NSR_OFFSET;
+
+	msg->data = data->hwirq;
+	msg->address_lo = lower_32_bits(setspi);
+	msg->address_hi = upper_32_bits(setspi);
+}
+
+static struct irq_chip gicp_irq_chip = {
+	.name			= "GICP",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+	.irq_set_type		= irq_chip_set_type_parent,
+	.irq_compose_msi_msg	= gicp_compose_msi_msg,
+};
+
+static int gicp_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				 unsigned int nr_irqs, void *args)
+{
+	struct mvebu_gicp *gicp = domain->host_data;
+	struct irq_fwspec fwspec;
+	unsigned int hwirq;
+	int ret;
+
+	spin_lock(&gicp->spi_lock);
+	hwirq = find_first_zero_bit(gicp->spi_bitmap, gicp->spi_cnt);
+	if (hwirq == gicp->spi_cnt) {
+		spin_unlock(&gicp->spi_lock);
+		return -ENOSPC;
+	}
+	__set_bit(hwirq, gicp->spi_bitmap);
+	spin_unlock(&gicp->spi_lock);
+
+	fwspec.fwnode = domain->parent->fwnode;
+	fwspec.param_count = 3;
+	fwspec.param[0] = GIC_SPI;
+	fwspec.param[1] = gicp_idx_to_spi(gicp, hwirq) - 32;
+	/*
+	 * Assume edge rising for now, it will be properly set when
+	 * ->set_type() is called
+	 */
+	fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+	if (ret) {
+		dev_err(gicp->dev, "Cannot allocate parent IRQ\n");
+		goto free_hwirq;
+	}
+
+	ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+					    &gicp_irq_chip, gicp);
+	if (ret)
+		goto free_irqs_parent;
+
+	return 0;
+
+free_irqs_parent:
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+free_hwirq:
+	spin_lock(&gicp->spi_lock);
+	__clear_bit(hwirq, gicp->spi_bitmap);
+	spin_unlock(&gicp->spi_lock);
+	return ret;
+}
+
+static void gicp_irq_domain_free(struct irq_domain *domain,
+				 unsigned int virq, unsigned int nr_irqs)
+{
+	struct mvebu_gicp *gicp = domain->host_data;
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+	if (d->hwirq >= gicp->spi_cnt) {
+		dev_err(gicp->dev, "Invalid hwirq %lu\n", d->hwirq);
+		return;
+	}
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+	spin_lock(&gicp->spi_lock);
+	__clear_bit(d->hwirq, gicp->spi_bitmap);
+	spin_unlock(&gicp->spi_lock);
+}
+
+static const struct irq_domain_ops gicp_domain_ops = {
+	.alloc	= gicp_irq_domain_alloc,
+	.free	= gicp_irq_domain_free,
+};
+
+static struct irq_chip gicp_msi_irq_chip = {
+	.name		= "GICP",
+	.irq_set_type	= irq_chip_set_type_parent,
+};
+
+static struct msi_domain_ops gicp_msi_ops = {
+};
+
+static struct msi_domain_info gicp_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.ops	= &gicp_msi_ops,
+	.chip	= &gicp_msi_irq_chip,
+};
+
+static int mvebu_gicp_probe(struct platform_device *pdev)
+{
+	struct mvebu_gicp *gicp;
+	struct irq_domain *inner_domain, *plat_domain, *parent_domain;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *irq_parent_dn;
+	int ret, i;
+
+	gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL);
+	if (!gicp)
+		return -ENOMEM;
+
+	gicp->dev = &pdev->dev;
+
+	gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!gicp->res)
+		return -ENODEV;
+
+	ret = of_property_count_u32_elems(node, "marvell,spi-ranges");
+	if (ret < 0)
+		return ret;
+
+	gicp->spi_ranges_cnt = ret / 2;
+
+	gicp->spi_ranges =
+		devm_kzalloc(&pdev->dev,
+			     gicp->spi_ranges_cnt *
+			     sizeof(struct mvebu_gicp_spi_range),
+			     GFP_KERNEL);
+	if (!gicp->spi_ranges)
+		return -ENOMEM;
+
+	for (i = 0; i < gicp->spi_ranges_cnt; i++) {
+		of_property_read_u32_index(node, "marvell,spi-ranges",
+					   i * 2,
+					   &gicp->spi_ranges[i].start);
+
+		of_property_read_u32_index(node, "marvell,spi-ranges",
+					   i * 2 + 1,
+					   &gicp->spi_ranges[i].count);
+
+		gicp->spi_cnt += gicp->spi_ranges[i].count;
+	}
+
+	gicp->spi_bitmap = devm_kzalloc(&pdev->dev,
+				BITS_TO_LONGS(gicp->spi_cnt) * sizeof(long),
+				GFP_KERNEL);
+	if (!gicp->spi_bitmap)
+		return -ENOMEM;
+
+	irq_parent_dn = of_irq_find_parent(node);
+	if (!irq_parent_dn) {
+		dev_err(&pdev->dev, "failed to find parent IRQ node\n");
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(irq_parent_dn);
+	if (!parent_domain) {
+		dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
+		return -ENODEV;
+	}
+
+	inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
+						   gicp->spi_cnt,
+						   of_node_to_fwnode(node),
+						   &gicp_domain_ops, gicp);
+	if (!inner_domain)
+		return -ENOMEM;
+
+
+	plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+						     &gicp_msi_domain_info,
+						     inner_domain);
+	if (!plat_domain) {
+		irq_domain_remove(inner_domain);
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, gicp);
+
+	return 0;
+}
+
+static const struct of_device_id mvebu_gicp_of_match[] = {
+	{ .compatible = "marvell,ap806-gicp", },
+	{},
+};
+
+static struct platform_driver mvebu_gicp_driver = {
+	.probe  = mvebu_gicp_probe,
+	.driver = {
+		.name = "mvebu-gicp",
+		.of_match_table = mvebu_gicp_of_match,
+	},
+};
+builtin_platform_driver(mvebu_gicp_driver);
diff --git a/drivers/irqchip/irq-mvebu-gicp.h b/drivers/irqchip/irq-mvebu-gicp.h
new file mode 100644
index 0000000..98535e8
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-gicp.h
@@ -0,0 +1,11 @@
+#ifndef __MVEBU_GICP_H__
+#define __MVEBU_GICP_H__
+
+#include <linux/types.h>
+
+struct device_node;
+
+int mvebu_gicp_get_doorbells(struct device_node *dn, phys_addr_t *setspi,
+			     phys_addr_t *clrspi);
+
+#endif /* __MVEBU_GICP_H__ */
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
new file mode 100644
index 0000000..e18c48d
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Hanna Hawa <hannah@marvell.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interrupt-controller/mvebu-icu.h>
+
+#include "irq-mvebu-gicp.h"
+
+/* ICU registers */
+#define ICU_SETSPI_NSR_AL	0x10
+#define ICU_SETSPI_NSR_AH	0x14
+#define ICU_CLRSPI_NSR_AL	0x18
+#define ICU_CLRSPI_NSR_AH	0x1c
+#define ICU_INT_CFG(x)          (0x100 + 4 * (x))
+#define   ICU_INT_ENABLE	BIT(24)
+#define   ICU_IS_EDGE		BIT(28)
+#define   ICU_GROUP_SHIFT	29
+
+/* ICU definitions */
+#define ICU_MAX_IRQS		207
+#define ICU_SATA0_ICU_ID	109
+#define ICU_SATA1_ICU_ID	107
+
+struct mvebu_icu {
+	struct irq_chip irq_chip;
+	void __iomem *base;
+	struct irq_domain *domain;
+	struct device *dev;
+};
+
+struct mvebu_icu_irq_data {
+	struct mvebu_icu *icu;
+	unsigned int icu_group;
+	unsigned int type;
+};
+
+static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+	struct irq_data *d = irq_get_irq_data(desc->irq);
+	struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
+	struct mvebu_icu *icu = icu_irqd->icu;
+	unsigned int icu_int;
+
+	if (msg->address_lo || msg->address_hi) {
+		/* Configure the ICU with irq number & type */
+		icu_int = msg->data | ICU_INT_ENABLE;
+		if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
+			icu_int |= ICU_IS_EDGE;
+		icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT;
+	} else {
+		/* De-configure the ICU */
+		icu_int = 0;
+	}
+
+	writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq));
+
+	/*
+	 * The SATA unit has 2 ports, and a dedicated ICU entry per
+	 * port. The ahci sata driver supports only one irq interrupt
+	 * per SATA unit. To solve this conflict, we configure the 2
+	 * SATA wired interrupts in the south bridge into 1 GIC
+	 * interrupt in the north bridge. Even if only a single port
+	 * is enabled, if sata node is enabled, both interrupts are
+	 * configured (regardless of which port is actually in use).
+	 */
+	if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) {
+		writel_relaxed(icu_int,
+			       icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID));
+		writel_relaxed(icu_int,
+			       icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID));
+	}
+}
+
+static int
+mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+			       unsigned long *hwirq, unsigned int *type)
+{
+	struct mvebu_icu *icu = d->host_data;
+	unsigned int icu_group;
+
+	/* Check the count of the parameters in dt */
+	if (WARN_ON(fwspec->param_count < 3)) {
+		dev_err(icu->dev, "wrong ICU parameter count %d\n",
+			fwspec->param_count);
+		return -EINVAL;
+	}
+
+	/* Only ICU group type is handled */
+	icu_group = fwspec->param[0];
+	if (icu_group != ICU_GRP_NSR && icu_group != ICU_GRP_SR &&
+	    icu_group != ICU_GRP_SEI && icu_group != ICU_GRP_REI) {
+		dev_err(icu->dev, "wrong ICU group type %x\n", icu_group);
+		return -EINVAL;
+	}
+
+	*hwirq = fwspec->param[1];
+	if (*hwirq >= ICU_MAX_IRQS) {
+		dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq);
+		return -EINVAL;
+	}
+
+	/* Mask the type to prevent wrong DT configuration */
+	*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static int
+mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+			   unsigned int nr_irqs, void *args)
+{
+	int err;
+	unsigned long hwirq;
+	struct irq_fwspec *fwspec = args;
+	struct mvebu_icu *icu = platform_msi_get_host_data(domain);
+	struct mvebu_icu_irq_data *icu_irqd;
+
+	icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
+	if (!icu_irqd)
+		return -ENOMEM;
+
+	err = mvebu_icu_irq_domain_translate(domain, fwspec, &hwirq,
+					     &icu_irqd->type);
+	if (err) {
+		dev_err(icu->dev, "failed to translate ICU parameters\n");
+		goto free_irqd;
+	}
+
+	icu_irqd->icu_group = fwspec->param[0];
+	icu_irqd->icu = icu;
+
+	err = platform_msi_domain_alloc(domain, virq, nr_irqs);
+	if (err) {
+		dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
+		goto free_irqd;
+	}
+
+	/* Make sure there is no interrupt left pending by the firmware */
+	err = irq_set_irqchip_state(virq, IRQCHIP_STATE_PENDING, false);
+	if (err)
+		goto free_msi;
+
+	err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+					    &icu->irq_chip, icu_irqd);
+	if (err) {
+		dev_err(icu->dev, "failed to set the data to IRQ domain\n");
+		goto free_msi;
+	}
+
+	return 0;
+
+free_msi:
+	platform_msi_domain_free(domain, virq, nr_irqs);
+free_irqd:
+	kfree(icu_irqd);
+	return err;
+}
+
+static void
+mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+			  unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_get_irq_data(virq);
+	struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
+
+	kfree(icu_irqd);
+
+	platform_msi_domain_free(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops mvebu_icu_domain_ops = {
+	.translate = mvebu_icu_irq_domain_translate,
+	.alloc     = mvebu_icu_irq_domain_alloc,
+	.free      = mvebu_icu_irq_domain_free,
+};
+
+static int mvebu_icu_probe(struct platform_device *pdev)
+{
+	struct mvebu_icu *icu;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *gicp_dn;
+	struct resource *res;
+	phys_addr_t setspi, clrspi;
+	u32 i, icu_int;
+	int ret;
+
+	icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu),
+			   GFP_KERNEL);
+	if (!icu)
+		return -ENOMEM;
+
+	icu->dev = &pdev->dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	icu->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(icu->base)) {
+		dev_err(&pdev->dev, "Failed to map icu base address.\n");
+		return PTR_ERR(icu->base);
+	}
+
+	icu->irq_chip.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+					    "ICU.%x",
+					    (unsigned int)res->start);
+	if (!icu->irq_chip.name)
+		return -ENOMEM;
+
+	icu->irq_chip.irq_mask = irq_chip_mask_parent;
+	icu->irq_chip.irq_unmask = irq_chip_unmask_parent;
+	icu->irq_chip.irq_eoi = irq_chip_eoi_parent;
+	icu->irq_chip.irq_set_type = irq_chip_set_type_parent;
+#ifdef CONFIG_SMP
+	icu->irq_chip.irq_set_affinity = irq_chip_set_affinity_parent;
+#endif
+
+	/*
+	 * We're probed after MSI domains have been resolved, so force
+	 * resolution here.
+	 */
+	pdev->dev.msi_domain = of_msi_get_domain(&pdev->dev, node,
+						 DOMAIN_BUS_PLATFORM_MSI);
+	if (!pdev->dev.msi_domain)
+		return -EPROBE_DEFER;
+
+	gicp_dn = irq_domain_get_of_node(pdev->dev.msi_domain);
+	if (!gicp_dn)
+		return -ENODEV;
+
+	ret = mvebu_gicp_get_doorbells(gicp_dn, &setspi, &clrspi);
+	if (ret)
+		return ret;
+
+	/* Set Clear/Set ICU SPI message address in AP */
+	writel_relaxed(upper_32_bits(setspi), icu->base + ICU_SETSPI_NSR_AH);
+	writel_relaxed(lower_32_bits(setspi), icu->base + ICU_SETSPI_NSR_AL);
+	writel_relaxed(upper_32_bits(clrspi), icu->base + ICU_CLRSPI_NSR_AH);
+	writel_relaxed(lower_32_bits(clrspi), icu->base + ICU_CLRSPI_NSR_AL);
+
+	/*
+	 * Clean all ICU interrupts with type SPI_NSR, required to
+	 * avoid unpredictable SPI assignments done by firmware.
+	 */
+	for (i = 0 ; i < ICU_MAX_IRQS ; i++) {
+		icu_int = readl(icu->base + ICU_INT_CFG(i));
+		if ((icu_int >> ICU_GROUP_SHIFT) == ICU_GRP_NSR)
+			writel_relaxed(0x0, icu->base + ICU_INT_CFG(i));
+	}
+
+	icu->domain =
+		platform_msi_create_device_domain(&pdev->dev, ICU_MAX_IRQS,
+						  mvebu_icu_write_msg,
+						  &mvebu_icu_domain_ops, icu);
+	if (!icu->domain) {
+		dev_err(&pdev->dev, "Failed to create ICU domain\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id mvebu_icu_of_match[] = {
+	{ .compatible = "marvell,cp110-icu", },
+	{},
+};
+
+static struct platform_driver mvebu_icu_driver = {
+	.probe  = mvebu_icu_probe,
+	.driver = {
+		.name = "mvebu-icu",
+		.of_match_table = mvebu_icu_of_match,
+	},
+};
+builtin_platform_driver(mvebu_icu_driver);
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index 6a9a3e7..dd9d5d1 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -70,7 +70,7 @@ static struct or1k_pic_dev or1k_pic_level = {
 		.name = "or1k-PIC-level",
 		.irq_unmask = or1k_pic_unmask,
 		.irq_mask = or1k_pic_mask,
-		.irq_mask_ack = or1k_pic_mask,
+		.irq_mask_ack = or1k_pic_mask_ack,
 	},
 	.handle = handle_level_irq,
 	.flags = IRQ_LEVEL | IRQ_NOPROBE,
diff --git a/drivers/irqchip/irq-renesas-h8300h.c b/drivers/irqchip/irq-renesas-h8300h.c
index c378768..b832759 100644
--- a/drivers/irqchip/irq-renesas-h8300h.c
+++ b/drivers/irqchip/irq-renesas-h8300h.c
@@ -67,7 +67,7 @@ static int irq_map(struct irq_domain *h, unsigned int virq,
        return 0;
 }
 
-static struct irq_domain_ops irq_ops = {
+static const struct irq_domain_ops irq_ops = {
        .map    = irq_map,
        .xlate  = irq_domain_xlate_onecell,
 };
diff --git a/drivers/irqchip/irq-renesas-h8s.c b/drivers/irqchip/irq-renesas-h8s.c
index af8c6c6..71d8139 100644
--- a/drivers/irqchip/irq-renesas-h8s.c
+++ b/drivers/irqchip/irq-renesas-h8s.c
@@ -73,7 +73,7 @@ static __init int irq_map(struct irq_domain *h, unsigned int virq,
        return 0;
 }
 
-static struct irq_domain_ops irq_ops = {
+static const struct irq_domain_ops irq_ops = {
        .map    = irq_map,
        .xlate  = irq_domain_xlate_onecell,
 };
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 668730c..a412b5d 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -25,6 +25,29 @@
 
 #define SUNXI_NMI_SRC_TYPE_MASK	0x00000003
 
+#define SUNXI_NMI_IRQ_BIT	BIT(0)
+
+#define SUN6I_R_INTC_CTRL	0x0c
+#define SUN6I_R_INTC_PENDING	0x10
+#define SUN6I_R_INTC_ENABLE	0x40
+
+/*
+ * For deprecated sun6i-a31-sc-nmi compatible.
+ * Registers are offset by 0x0c.
+ */
+#define SUN6I_R_INTC_NMI_OFFSET	0x0c
+#define SUN6I_NMI_CTRL		(SUN6I_R_INTC_CTRL - SUN6I_R_INTC_NMI_OFFSET)
+#define SUN6I_NMI_PENDING	(SUN6I_R_INTC_PENDING - SUN6I_R_INTC_NMI_OFFSET)
+#define SUN6I_NMI_ENABLE	(SUN6I_R_INTC_ENABLE - SUN6I_R_INTC_NMI_OFFSET)
+
+#define SUN7I_NMI_CTRL		0x00
+#define SUN7I_NMI_PENDING	0x04
+#define SUN7I_NMI_ENABLE	0x08
+
+#define SUN9I_NMI_CTRL		0x00
+#define SUN9I_NMI_ENABLE	0x04
+#define SUN9I_NMI_PENDING	0x08
+
 enum {
 	SUNXI_SRC_TYPE_LEVEL_LOW = 0,
 	SUNXI_SRC_TYPE_EDGE_FALLING,
@@ -38,22 +61,28 @@ struct sunxi_sc_nmi_reg_offs {
 	u32 enable;
 };
 
-static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = {
-	.ctrl	= 0x00,
-	.pend	= 0x04,
-	.enable	= 0x08,
+static const struct sunxi_sc_nmi_reg_offs sun6i_r_intc_reg_offs __initconst = {
+	.ctrl	= SUN6I_R_INTC_CTRL,
+	.pend	= SUN6I_R_INTC_PENDING,
+	.enable	= SUN6I_R_INTC_ENABLE,
 };
 
-static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
-	.ctrl	= 0x00,
-	.pend	= 0x04,
-	.enable	= 0x34,
+static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
+	.ctrl	= SUN6I_NMI_CTRL,
+	.pend	= SUN6I_NMI_PENDING,
+	.enable	= SUN6I_NMI_ENABLE,
 };
 
-static struct sunxi_sc_nmi_reg_offs sun9i_reg_offs = {
-	.ctrl	= 0x00,
-	.pend	= 0x08,
-	.enable	= 0x04,
+static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
+	.ctrl	= SUN7I_NMI_CTRL,
+	.pend	= SUN7I_NMI_PENDING,
+	.enable	= SUN7I_NMI_ENABLE,
+};
+
+static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
+	.ctrl	= SUN9I_NMI_CTRL,
+	.pend	= SUN9I_NMI_PENDING,
+	.enable	= SUN9I_NMI_ENABLE,
 };
 
 static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
@@ -128,7 +157,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
 }
 
 static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
-					struct sunxi_sc_nmi_reg_offs *reg_offs)
+					const struct sunxi_sc_nmi_reg_offs *reg_offs)
 {
 	struct irq_domain *domain;
 	struct irq_chip_generic *gc;
@@ -187,8 +216,11 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
 	gc->chip_types[1].regs.type		= reg_offs->ctrl;
 	gc->chip_types[1].handler		= handle_edge_irq;
 
+	/* Disable any active interrupts */
 	sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
-	sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
+
+	/* Clear any pending NMI interrupts */
+	sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
 
 	irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
 
@@ -200,6 +232,14 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
 	return ret;
 }
 
+static int __init sun6i_r_intc_irq_init(struct device_node *node,
+					struct device_node *parent)
+{
+	return sunxi_sc_nmi_irq_init(node, &sun6i_r_intc_reg_offs);
+}
+IRQCHIP_DECLARE(sun6i_r_intc, "allwinner,sun6i-a31-r-intc",
+		sun6i_r_intc_irq_init);
+
 static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
 					struct device_node *parent)
 {
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index bb3ac5f..72a391e 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = {
 int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
 {
 	struct irq_domain *root_domain =
-		irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
 				&xtensa_mx_irq_domain_ops,
 				&xtensa_mx_irq_chip);
 	irq_set_default_host(root_domain);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 472ae17..f728755 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = {
 int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
 {
 	struct irq_domain *root_domain =
-		irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
 				&xtensa_irq_domain_ops, &xtensa_irq_chip);
 	irq_set_default_host(root_domain);
 	return 0;
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index 2265586..6aa3ea4 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -288,9 +288,4 @@ static struct platform_driver qcom_irq_combiner_probe = {
 	},
 	.probe = combiner_probe,
 };
-
-static int __init register_qcom_irq_combiner(void)
-{
-	return platform_driver_register(&qcom_irq_combiner_probe);
-}
-device_initcall(register_qcom_irq_combiner);
+builtin_platform_driver(qcom_irq_combiner_probe);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index d07dd51..8aa158a 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
 		       id);
 		return NULL;
 	} else {
-		rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
+		rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
 		if (!rs)
 			return NULL;
 		rs->state = CCPResetIdle;
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 8b7faea..422dced 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
 		if (sk->sk_state != MISDN_BOUND)
 			continue;
 		if (!cskb)
-			cskb = skb_copy(skb, GFP_KERNEL);
+			cskb = skb_copy(skb, GFP_ATOMIC);
 		if (!cskb) {
 			printk(KERN_WARNING "%s no skb\n", __func__);
 			break;
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 1548259..2cfd938 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -242,7 +242,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
 
 		spin_lock_irqsave(lock, flags);
 		val = bcm6328_led_read(addr);
-		val |= (BIT(reg) << (((sel % 4) * 4) + 16));
+		val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16));
 		bcm6328_led_write(addr, val);
 		spin_unlock_irqrestore(lock, flags);
 	}
@@ -269,7 +269,7 @@ static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg,
 
 		spin_lock_irqsave(lock, flags);
 		val = bcm6328_led_read(addr);
-		val |= (BIT(reg) << ((sel % 4) * 4));
+		val |= (BIT(reg % 4) << ((sel % 4) * 4));
 		bcm6328_led_write(addr, val);
 		spin_unlock_irqrestore(lock, flags);
 	}
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index afa3b40..e95ea65 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -20,7 +20,6 @@
 #include <linux/sched/loadavg.h>
 #include <linux/leds.h>
 #include <linux/reboot.h>
-#include <linux/suspend.h>
 #include "../leds.h"
 
 static int panic_heartbeats;
@@ -163,30 +162,6 @@ static struct led_trigger heartbeat_led_trigger = {
 	.deactivate = heartbeat_trig_deactivate,
 };
 
-static int heartbeat_pm_notifier(struct notifier_block *nb,
-				 unsigned long pm_event, void *unused)
-{
-	int rc;
-
-	switch (pm_event) {
-	case PM_SUSPEND_PREPARE:
-	case PM_HIBERNATION_PREPARE:
-	case PM_RESTORE_PREPARE:
-		led_trigger_unregister(&heartbeat_led_trigger);
-		break;
-	case PM_POST_SUSPEND:
-	case PM_POST_HIBERNATION:
-	case PM_POST_RESTORE:
-		rc = led_trigger_register(&heartbeat_led_trigger);
-		if (rc)
-			pr_err("could not re-register heartbeat trigger\n");
-		break;
-	default:
-		break;
-	}
-	return NOTIFY_DONE;
-}
-
 static int heartbeat_reboot_notifier(struct notifier_block *nb,
 				     unsigned long code, void *unused)
 {
@@ -201,10 +176,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block heartbeat_pm_nb = {
-	.notifier_call = heartbeat_pm_notifier,
-};
-
 static struct notifier_block heartbeat_reboot_nb = {
 	.notifier_call = heartbeat_reboot_notifier,
 };
@@ -221,14 +192,12 @@ static int __init heartbeat_trig_init(void)
 		atomic_notifier_chain_register(&panic_notifier_list,
 					       &heartbeat_panic_nb);
 		register_reboot_notifier(&heartbeat_reboot_nb);
-		register_pm_notifier(&heartbeat_pm_nb);
 	}
 	return rc;
 }
 
 static void __exit heartbeat_trig_exit(void)
 {
-	unregister_pm_notifier(&heartbeat_pm_nb);
 	unregister_reboot_notifier(&heartbeat_reboot_nb);
 	atomic_notifier_chain_unregister(&panic_notifier_list,
 					 &heartbeat_panic_nb);
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 6a4aa60..ddae430 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -252,8 +252,9 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 	}
 	mutex_unlock(&dev->mlock);
 
-	if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end))
-		return -ENOMEM;
+	ret = nvm_reserve_luns(dev, s->lun_begin, s->lun_end);
+	if (ret)
+		return ret;
 
 	t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
 	if (!t) {
@@ -640,6 +641,7 @@ EXPORT_SYMBOL(nvm_max_phys_sects);
 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 {
 	struct nvm_dev *dev = tgt_dev->parent;
+	int ret;
 
 	if (!dev->ops->submit_io)
 		return -ENODEV;
@@ -647,7 +649,12 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 	nvm_rq_tgt_to_dev(tgt_dev, rqd);
 
 	rqd->dev = tgt_dev;
-	return dev->ops->submit_io(dev, rqd);
+
+	/* In case of error, fail with right address format */
+	ret = dev->ops->submit_io(dev, rqd);
+	if (ret)
+		nvm_rq_dev_to_tgt(tgt_dev, rqd);
+	return ret;
 }
 EXPORT_SYMBOL(nvm_submit_io);
 
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 59bcea8..024a8fc 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -31,9 +31,13 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
 	 */
 retry:
 	ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
-	if (ret == NVM_IO_REQUEUE) {
+	switch (ret) {
+	case NVM_IO_REQUEUE:
 		io_schedule();
 		goto retry;
+	case NVM_IO_ERR:
+		pblk_pipeline_stop(pblk);
+		goto out;
 	}
 
 	if (unlikely(!bio_has_data(bio)))
@@ -58,6 +62,8 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
 	atomic_long_add(nr_entries, &pblk->req_writes);
 #endif
 
+	pblk_rl_inserted(&pblk->rl, nr_entries);
+
 out:
 	pblk_write_should_kick(pblk);
 	return ret;
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 5e44768..11fe0c5 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -17,7 +17,6 @@
  */
 
 #include "pblk.h"
-#include <linux/time.h>
 
 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
 			 struct ppa_addr *ppa)
@@ -34,7 +33,7 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
 		pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
 							line->id, pos);
 
-	pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
+	pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, pblk->bb_wq);
 }
 
 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
@@ -54,6 +53,8 @@ static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
 		*ppa = rqd->ppa_addr;
 		pblk_mark_bb(pblk, line, ppa);
 	}
+
+	atomic_dec(&pblk->inflight_io);
 }
 
 /* Erase completion assumes that only one block is erased at the time */
@@ -61,13 +62,12 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
 {
 	struct pblk *pblk = rqd->private;
 
-	up(&pblk->erase_sem);
 	__pblk_end_io_erase(pblk, rqd);
-	mempool_free(rqd, pblk->r_rq_pool);
+	mempool_free(rqd, pblk->g_rq_pool);
 }
 
-static void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
-				  u64 paddr)
+void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
+			   u64 paddr)
 {
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct list_head *move_list = NULL;
@@ -88,7 +88,7 @@ static void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
 		spin_unlock(&line->lock);
 		return;
 	}
-	line->vsc--;
+	le32_add_cpu(line->vsc, -1);
 
 	if (line->state == PBLK_LINESTATE_CLOSED)
 		move_list = pblk_line_gc_list(pblk, line);
@@ -130,18 +130,6 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
 	__pblk_map_invalidate(pblk, line, paddr);
 }
 
-void pblk_map_pad_invalidate(struct pblk *pblk, struct pblk_line *line,
-			     u64 paddr)
-{
-	__pblk_map_invalidate(pblk, line, paddr);
-
-	pblk_rb_sync_init(&pblk->rwb, NULL);
-	line->left_ssecs--;
-	if (!line->left_ssecs)
-		pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
-	pblk_rb_sync_end(&pblk->rwb, NULL);
-}
-
 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
 				  unsigned int nr_secs)
 {
@@ -172,8 +160,8 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
 		pool = pblk->w_rq_pool;
 		rq_size = pblk_w_rq_size;
 	} else {
-		pool = pblk->r_rq_pool;
-		rq_size = pblk_r_rq_size;
+		pool = pblk->g_rq_pool;
+		rq_size = pblk_g_rq_size;
 	}
 
 	rqd = mempool_alloc(pool, GFP_KERNEL);
@@ -189,7 +177,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
 	if (rw == WRITE)
 		pool = pblk->w_rq_pool;
 	else
-		pool = pblk->r_rq_pool;
+		pool = pblk->g_rq_pool;
 
 	mempool_free(rqd, pool);
 }
@@ -271,35 +259,26 @@ void pblk_end_io_sync(struct nvm_rq *rqd)
 	complete(waiting);
 }
 
-void pblk_flush_writer(struct pblk *pblk)
+void pblk_wait_for_meta(struct pblk *pblk)
 {
-	struct bio *bio;
-	int ret;
-	DECLARE_COMPLETION_ONSTACK(wait);
+	do {
+		if (!atomic_read(&pblk->inflight_io))
+			break;
 
-	bio = bio_alloc(GFP_KERNEL, 1);
-	if (!bio)
-		return;
+		schedule();
+	} while (1);
+}
 
-	bio->bi_iter.bi_sector = 0; /* internal bio */
-	bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
-	bio->bi_private = &wait;
-	bio->bi_end_io = pblk_end_bio_sync;
+static void pblk_flush_writer(struct pblk *pblk)
+{
+	pblk_rb_flush(&pblk->rwb);
+	do {
+		if (!pblk_rb_sync_count(&pblk->rwb))
+			break;
 
-	ret = pblk_write_to_cache(pblk, bio, 0);
-	if (ret == NVM_IO_OK) {
-		if (!wait_for_completion_io_timeout(&wait,
-				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
-			pr_err("pblk: flush cache timed out\n");
-		}
-	} else if (ret != NVM_IO_DONE) {
-		pr_err("pblk: tear down bio failed\n");
-	}
-
-	if (bio->bi_error)
-		pr_err("pblk: flush sync write failed (%u)\n", bio->bi_error);
-
-	bio_put(bio);
+		pblk_write_kick(pblk);
+		schedule();
+	} while (1);
 }
 
 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
@@ -307,28 +286,31 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
 	struct pblk_line_meta *lm = &pblk->lm;
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct list_head *move_list = NULL;
+	int vsc = le32_to_cpu(*line->vsc);
 
-	if (!line->vsc) {
+	lockdep_assert_held(&line->lock);
+
+	if (!vsc) {
 		if (line->gc_group != PBLK_LINEGC_FULL) {
 			line->gc_group = PBLK_LINEGC_FULL;
 			move_list = &l_mg->gc_full_list;
 		}
-	} else if (line->vsc < lm->mid_thrs) {
+	} else if (vsc < lm->high_thrs) {
 		if (line->gc_group != PBLK_LINEGC_HIGH) {
 			line->gc_group = PBLK_LINEGC_HIGH;
 			move_list = &l_mg->gc_high_list;
 		}
-	} else if (line->vsc < lm->high_thrs) {
+	} else if (vsc < lm->mid_thrs) {
 		if (line->gc_group != PBLK_LINEGC_MID) {
 			line->gc_group = PBLK_LINEGC_MID;
 			move_list = &l_mg->gc_mid_list;
 		}
-	} else if (line->vsc < line->sec_in_line) {
+	} else if (vsc < line->sec_in_line) {
 		if (line->gc_group != PBLK_LINEGC_LOW) {
 			line->gc_group = PBLK_LINEGC_LOW;
 			move_list = &l_mg->gc_low_list;
 		}
-	} else if (line->vsc == line->sec_in_line) {
+	} else if (vsc == line->sec_in_line) {
 		if (line->gc_group != PBLK_LINEGC_EMPTY) {
 			line->gc_group = PBLK_LINEGC_EMPTY;
 			move_list = &l_mg->gc_empty_list;
@@ -338,7 +320,7 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
 		line->gc_group = PBLK_LINEGC_NONE;
 		move_list =  &l_mg->corrupt_list;
 		pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
-						line->id, line->vsc,
+						line->id, vsc,
 						line->sec_in_line,
 						lm->high_thrs, lm->mid_thrs);
 	}
@@ -397,6 +379,11 @@ void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
 #endif
 }
 
+void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
+{
+	pblk->sec_per_write = sec_per_write;
+}
+
 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
@@ -431,21 +418,23 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
 		}
 	}
 #endif
+
+	atomic_inc(&pblk->inflight_io);
+
 	return nvm_submit_io(dev, rqd);
 }
 
 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
 			      unsigned int nr_secs, unsigned int len,
-			      gfp_t gfp_mask)
+			      int alloc_type, gfp_t gfp_mask)
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
-	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	void *kaddr = data;
 	struct page *page;
 	struct bio *bio;
 	int i, ret;
 
-	if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
+	if (alloc_type == PBLK_KMALLOC_META)
 		return bio_map_kern(dev->q, kaddr, len, gfp_mask);
 
 	bio = bio_kmalloc(gfp_mask, nr_secs);
@@ -478,7 +467,7 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 		   unsigned long secs_to_flush)
 {
-	int max = pblk->max_write_pgs;
+	int max = pblk->sec_per_write;
 	int min = pblk->min_write_pgs;
 	int secs_to_sync = 0;
 
@@ -492,12 +481,26 @@ int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 	return secs_to_sync;
 }
 
-static u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line,
-			     int nr_secs)
+void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 {
 	u64 addr;
 	int i;
 
+	addr = find_next_zero_bit(line->map_bitmap,
+					pblk->lm.sec_per_line, line->cur_sec);
+	line->cur_sec = addr - nr_secs;
+
+	for (i = 0; i < nr_secs; i++, line->cur_sec--)
+		WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
+}
+
+u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
+{
+	u64 addr;
+	int i;
+
+	lockdep_assert_held(&line->lock);
+
 	/* logic error: ppa out-of-bounds. Prevent generating bad address */
 	if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
 		WARN(1, "pblk: page allocation out of bounds\n");
@@ -528,27 +531,38 @@ u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 	return addr;
 }
 
+u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
+{
+	u64 paddr;
+
+	spin_lock(&line->lock);
+	paddr = find_next_zero_bit(line->map_bitmap,
+					pblk->lm.sec_per_line, line->cur_sec);
+	spin_unlock(&line->lock);
+
+	return paddr;
+}
+
 /*
  * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
  * taking the per LUN semaphore.
  */
 static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
-				     u64 paddr, int dir)
+				     void *emeta_buf, u64 paddr, int dir)
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
+	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_line_meta *lm = &pblk->lm;
+	void *ppa_list, *meta_list;
 	struct bio *bio;
 	struct nvm_rq rqd;
-	struct ppa_addr *ppa_list;
-	dma_addr_t dma_ppa_list;
-	void *emeta = line->emeta;
+	dma_addr_t dma_ppa_list, dma_meta_list;
 	int min = pblk->min_write_pgs;
-	int left_ppas = lm->emeta_sec;
+	int left_ppas = lm->emeta_sec[0];
 	int id = line->id;
 	int rq_ppas, rq_len;
 	int cmd_op, bio_op;
-	int flags;
 	int i, j;
 	int ret;
 	DECLARE_COMPLETION_ONSTACK(wait);
@@ -556,25 +570,28 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
 	if (dir == WRITE) {
 		bio_op = REQ_OP_WRITE;
 		cmd_op = NVM_OP_PWRITE;
-		flags = pblk_set_progr_mode(pblk, WRITE);
 	} else if (dir == READ) {
 		bio_op = REQ_OP_READ;
 		cmd_op = NVM_OP_PREAD;
-		flags = pblk_set_read_mode(pblk);
 	} else
 		return -EINVAL;
 
-	ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_ppa_list);
-	if (!ppa_list)
+	meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+							&dma_meta_list);
+	if (!meta_list)
 		return -ENOMEM;
 
+	ppa_list = meta_list + pblk_dma_meta_size;
+	dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+
 next_rq:
 	memset(&rqd, 0, sizeof(struct nvm_rq));
 
 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
 	rq_len = rq_ppas * geo->sec_size;
 
-	bio = pblk_bio_map_addr(pblk, emeta, rq_ppas, rq_len, GFP_KERNEL);
+	bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
+					l_mg->emeta_alloc_type, GFP_KERNEL);
 	if (IS_ERR(bio)) {
 		ret = PTR_ERR(bio);
 		goto free_rqd_dma;
@@ -584,27 +601,38 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
 	bio_set_op_attrs(bio, bio_op, 0);
 
 	rqd.bio = bio;
-	rqd.opcode = cmd_op;
-	rqd.flags = flags;
-	rqd.nr_ppas = rq_ppas;
+	rqd.meta_list = meta_list;
 	rqd.ppa_list = ppa_list;
+	rqd.dma_meta_list = dma_meta_list;
 	rqd.dma_ppa_list = dma_ppa_list;
+	rqd.opcode = cmd_op;
+	rqd.nr_ppas = rq_ppas;
 	rqd.end_io = pblk_end_io_sync;
 	rqd.private = &wait;
 
 	if (dir == WRITE) {
+		struct pblk_sec_meta *meta_list = rqd.meta_list;
+
+		rqd.flags = pblk_set_progr_mode(pblk, WRITE);
 		for (i = 0; i < rqd.nr_ppas; ) {
 			spin_lock(&line->lock);
 			paddr = __pblk_alloc_page(pblk, line, min);
 			spin_unlock(&line->lock);
-			for (j = 0; j < min; j++, i++, paddr++)
+			for (j = 0; j < min; j++, i++, paddr++) {
+				meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
 				rqd.ppa_list[i] =
 					addr_to_gen_ppa(pblk, paddr, id);
+			}
 		}
 	} else {
 		for (i = 0; i < rqd.nr_ppas; ) {
 			struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
 			int pos = pblk_dev_ppa_to_pos(geo, ppa);
+			int read_type = PBLK_READ_RANDOM;
+
+			if (pblk_io_aligned(pblk, rq_ppas))
+				read_type = PBLK_READ_SEQUENTIAL;
+			rqd.flags = pblk_set_read_mode(pblk, read_type);
 
 			while (test_bit(pos, line->blk_bitmap)) {
 				paddr += min;
@@ -645,9 +673,11 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
 				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
 		pr_err("pblk: emeta I/O timed out\n");
 	}
+	atomic_dec(&pblk->inflight_io);
 	reinit_completion(&wait);
 
-	bio_put(bio);
+	if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
+		bio_put(bio);
 
 	if (rqd.error) {
 		if (dir == WRITE)
@@ -656,12 +686,12 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
 			pblk_log_read_err(pblk, &rqd);
 	}
 
-	emeta += rq_len;
+	emeta_buf += rq_len;
 	left_ppas -= rq_ppas;
 	if (left_ppas)
 		goto next_rq;
 free_rqd_dma:
-	nvm_dev_dma_free(dev->parent, ppa_list, dma_ppa_list);
+	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 	return ret;
 }
 
@@ -697,21 +727,24 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
 		bio_op = REQ_OP_WRITE;
 		cmd_op = NVM_OP_PWRITE;
 		flags = pblk_set_progr_mode(pblk, WRITE);
-		lba_list = pblk_line_emeta_to_lbas(line->emeta);
+		lba_list = emeta_to_lbas(pblk, line->emeta->buf);
 	} else if (dir == READ) {
 		bio_op = REQ_OP_READ;
 		cmd_op = NVM_OP_PREAD;
-		flags = pblk_set_read_mode(pblk);
+		flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
 	} else
 		return -EINVAL;
 
 	memset(&rqd, 0, sizeof(struct nvm_rq));
 
-	rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-							&rqd.dma_ppa_list);
-	if (!rqd.ppa_list)
+	rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+							&rqd.dma_meta_list);
+	if (!rqd.meta_list)
 		return -ENOMEM;
 
+	rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
+	rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
+
 	bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
 	if (IS_ERR(bio)) {
 		ret = PTR_ERR(bio);
@@ -729,9 +762,15 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
 	rqd.private = &wait;
 
 	for (i = 0; i < lm->smeta_sec; i++, paddr++) {
+		struct pblk_sec_meta *meta_list = rqd.meta_list;
+
 		rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-		if (dir == WRITE)
-			lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
+
+		if (dir == WRITE) {
+			__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
+
+			meta_list[i].lba = lba_list[paddr] = addr_empty;
+		}
 	}
 
 	/*
@@ -750,6 +789,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
 				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
 		pr_err("pblk: smeta I/O timed out\n");
 	}
+	atomic_dec(&pblk->inflight_io);
 
 	if (rqd.error) {
 		if (dir == WRITE)
@@ -759,7 +799,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
 	}
 
 free_ppa_list:
-	nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
+	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 
 	return ret;
 }
@@ -771,9 +811,11 @@ int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
 	return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
 }
 
-int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line)
+int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
+			 void *emeta_buf)
 {
-	return pblk_line_submit_emeta_io(pblk, line, line->emeta_ssec, READ);
+	return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
+						line->emeta_ssec, READ);
 }
 
 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -789,7 +831,7 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
 {
 	struct nvm_rq rqd;
-	int ret;
+	int ret = 0;
 	DECLARE_COMPLETION_ONSTACK(wait);
 
 	memset(&rqd, 0, sizeof(struct nvm_rq));
@@ -824,14 +866,14 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
 	rqd.private = pblk;
 	__pblk_end_io_erase(pblk, &rqd);
 
-	return 0;
+	return ret;
 }
 
 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
 {
 	struct pblk_line_meta *lm = &pblk->lm;
 	struct ppa_addr ppa;
-	int bit = -1;
+	int ret, bit = -1;
 
 	/* Erase only good blocks, one at a time */
 	do {
@@ -850,27 +892,59 @@ int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
 		WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
 		spin_unlock(&line->lock);
 
-		if (pblk_blk_erase_sync(pblk, ppa)) {
+		ret = pblk_blk_erase_sync(pblk, ppa);
+		if (ret) {
 			pr_err("pblk: failed to erase line %d\n", line->id);
-			return -ENOMEM;
+			return ret;
 		}
 	} while (1);
 
 	return 0;
 }
 
+static void pblk_line_setup_metadata(struct pblk_line *line,
+				     struct pblk_line_mgmt *l_mg,
+				     struct pblk_line_meta *lm)
+{
+	int meta_line;
+
+	lockdep_assert_held(&l_mg->free_lock);
+
+retry_meta:
+	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
+	if (meta_line == PBLK_DATA_LINES) {
+		spin_unlock(&l_mg->free_lock);
+		io_schedule();
+		spin_lock(&l_mg->free_lock);
+		goto retry_meta;
+	}
+
+	set_bit(meta_line, &l_mg->meta_bitmap);
+	line->meta_line = meta_line;
+
+	line->smeta = l_mg->sline_meta[meta_line];
+	line->emeta = l_mg->eline_meta[meta_line];
+
+	memset(line->smeta, 0, lm->smeta_len);
+	memset(line->emeta->buf, 0, lm->emeta_len[0]);
+
+	line->emeta->mem = 0;
+	atomic_set(&line->emeta->sync, 0);
+}
+
 /* For now lines are always assumed full lines. Thus, smeta former and current
  * lun bitmaps are omitted.
  */
-static int pblk_line_set_metadata(struct pblk *pblk, struct pblk_line *line,
+static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
 				  struct pblk_line *cur)
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
 	struct pblk_line_meta *lm = &pblk->lm;
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-	struct line_smeta *smeta = line->smeta;
-	struct line_emeta *emeta = line->emeta;
+	struct pblk_emeta *emeta = line->emeta;
+	struct line_emeta *emeta_buf = emeta->buf;
+	struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
 	int nr_blk_line;
 
 	/* After erasing the line, new bad blocks might appear and we risk
@@ -893,42 +967,44 @@ static int pblk_line_set_metadata(struct pblk *pblk, struct pblk_line *line,
 	}
 
 	/* Run-time metadata */
-	line->lun_bitmap = ((void *)(smeta)) + sizeof(struct line_smeta);
+	line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
 
 	/* Mark LUNs allocated in this line (all for now) */
 	bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
 
-	smeta->header.identifier = cpu_to_le32(PBLK_MAGIC);
-	memcpy(smeta->header.uuid, pblk->instance_uuid, 16);
-	smeta->header.id = cpu_to_le32(line->id);
-	smeta->header.type = cpu_to_le16(line->type);
-	smeta->header.version = cpu_to_le16(1);
+	smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
+	memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
+	smeta_buf->header.id = cpu_to_le32(line->id);
+	smeta_buf->header.type = cpu_to_le16(line->type);
+	smeta_buf->header.version = cpu_to_le16(1);
 
 	/* Start metadata */
-	smeta->seq_nr = cpu_to_le64(line->seq_nr);
-	smeta->window_wr_lun = cpu_to_le32(geo->nr_luns);
+	smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
+	smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
 
 	/* Fill metadata among lines */
 	if (cur) {
 		memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
-		smeta->prev_id = cpu_to_le32(cur->id);
-		cur->emeta->next_id = cpu_to_le32(line->id);
+		smeta_buf->prev_id = cpu_to_le32(cur->id);
+		cur->emeta->buf->next_id = cpu_to_le32(line->id);
 	} else {
-		smeta->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
+		smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
 	}
 
 	/* All smeta must be set at this point */
-	smeta->header.crc = cpu_to_le32(pblk_calc_meta_header_crc(pblk, smeta));
-	smeta->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta));
+	smeta_buf->header.crc = cpu_to_le32(
+			pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
+	smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
 
 	/* End metadata */
-	memcpy(&emeta->header, &smeta->header, sizeof(struct line_header));
-	emeta->seq_nr = cpu_to_le64(line->seq_nr);
-	emeta->nr_lbas = cpu_to_le64(line->sec_in_line);
-	emeta->nr_valid_lbas = cpu_to_le64(0);
-	emeta->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
-	emeta->crc = cpu_to_le32(0);
-	emeta->prev_id = smeta->prev_id;
+	memcpy(&emeta_buf->header, &smeta_buf->header,
+						sizeof(struct line_header));
+	emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
+	emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
+	emeta_buf->nr_valid_lbas = cpu_to_le64(0);
+	emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
+	emeta_buf->crc = cpu_to_le32(0);
+	emeta_buf->prev_id = smeta_buf->prev_id;
 
 	return 1;
 }
@@ -965,7 +1041,6 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
 	/* Mark smeta metadata sectors as bad sectors */
 	bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
 	off = bit * geo->sec_per_pl;
-retry_smeta:
 	bitmap_set(line->map_bitmap, off, lm->smeta_sec);
 	line->sec_in_line -= lm->smeta_sec;
 	line->smeta_ssec = off;
@@ -973,8 +1048,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
 
 	if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
 		pr_debug("pblk: line smeta I/O failed. Retry\n");
-		off += geo->sec_per_pl;
-		goto retry_smeta;
+		return 1;
 	}
 
 	bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
@@ -983,8 +1057,8 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
 	 * blocks to make sure that there are enough sectors to store emeta
 	 */
 	bit = lm->sec_per_line;
-	off = lm->sec_per_line - lm->emeta_sec;
-	bitmap_set(line->invalid_bitmap, off, lm->emeta_sec);
+	off = lm->sec_per_line - lm->emeta_sec[0];
+	bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
 	while (nr_bb) {
 		off -= geo->sec_per_pl;
 		if (!test_bit(off, line->invalid_bitmap)) {
@@ -993,9 +1067,11 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
 		}
 	}
 
-	line->sec_in_line -= lm->emeta_sec;
+	line->sec_in_line -= lm->emeta_sec[0];
 	line->emeta_ssec = off;
-	line->vsc = line->left_ssecs = line->left_msecs = line->sec_in_line;
+	line->nr_valid_lbas = 0;
+	line->left_msecs = line->sec_in_line;
+	*line->vsc = cpu_to_le32(line->sec_in_line);
 
 	if (lm->sec_per_line - line->sec_in_line !=
 		bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
@@ -1034,14 +1110,20 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
 
 	spin_lock(&line->lock);
 	if (line->state != PBLK_LINESTATE_FREE) {
+		mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
+		mempool_free(line->map_bitmap, pblk->line_meta_pool);
 		spin_unlock(&line->lock);
-		WARN(1, "pblk: corrupted line state\n");
-		return -EINTR;
+		WARN(1, "pblk: corrupted line %d, state %d\n",
+							line->id, line->state);
+		return -EAGAIN;
 	}
+
 	line->state = PBLK_LINESTATE_OPEN;
 
 	atomic_set(&line->left_eblks, blk_in_line);
 	atomic_set(&line->left_seblks, blk_in_line);
+
+	line->meta_distance = lm->meta_distance;
 	spin_unlock(&line->lock);
 
 	/* Bad blocks do not need to be erased */
@@ -1091,15 +1173,15 @@ struct pblk_line *pblk_line_get(struct pblk *pblk)
 {
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_line_meta *lm = &pblk->lm;
-	struct pblk_line *line = NULL;
-	int bit;
+	struct pblk_line *line;
+	int ret, bit;
 
 	lockdep_assert_held(&l_mg->free_lock);
 
-retry_get:
+retry:
 	if (list_empty(&l_mg->free_list)) {
 		pr_err("pblk: no free lines\n");
-		goto out;
+		return NULL;
 	}
 
 	line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
@@ -1115,16 +1197,22 @@ struct pblk_line *pblk_line_get(struct pblk *pblk)
 		list_add_tail(&line->list, &l_mg->bad_list);
 
 		pr_debug("pblk: line %d is bad\n", line->id);
-		goto retry_get;
+		goto retry;
 	}
 
-	if (pblk_line_prepare(pblk, line)) {
-		pr_err("pblk: failed to prepare line %d\n", line->id);
-		list_add(&line->list, &l_mg->free_list);
-		return NULL;
+	ret = pblk_line_prepare(pblk, line);
+	if (ret) {
+		if (ret == -EAGAIN) {
+			list_add(&line->list, &l_mg->corrupt_list);
+			goto retry;
+		} else {
+			pr_err("pblk: failed to prepare line %d\n", line->id);
+			list_add(&line->list, &l_mg->free_list);
+			l_mg->nr_free_lines++;
+			return NULL;
+		}
 	}
 
-out:
 	return line;
 }
 
@@ -1134,6 +1222,7 @@ static struct pblk_line *pblk_line_retry(struct pblk *pblk,
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_line *retry_line;
 
+retry:
 	spin_lock(&l_mg->free_lock);
 	retry_line = pblk_line_get(pblk);
 	if (!retry_line) {
@@ -1150,23 +1239,25 @@ static struct pblk_line *pblk_line_retry(struct pblk *pblk,
 	l_mg->data_line = retry_line;
 	spin_unlock(&l_mg->free_lock);
 
-	if (pblk_line_erase(pblk, retry_line)) {
-		spin_lock(&l_mg->free_lock);
-		l_mg->data_line = NULL;
-		spin_unlock(&l_mg->free_lock);
-		return NULL;
-	}
-
 	pblk_rl_free_lines_dec(&pblk->rl, retry_line);
 
+	if (pblk_line_erase(pblk, retry_line))
+		goto retry;
+
 	return retry_line;
 }
 
+static void pblk_set_space_limit(struct pblk *pblk)
+{
+	struct pblk_rl *rl = &pblk->rl;
+
+	atomic_set(&rl->rb_space, 0);
+}
+
 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
 {
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_line *line;
-	int meta_line;
 	int is_next = 0;
 
 	spin_lock(&l_mg->free_lock);
@@ -1180,30 +1271,37 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
 	line->type = PBLK_LINETYPE_DATA;
 	l_mg->data_line = line;
 
-	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
-	set_bit(meta_line, &l_mg->meta_bitmap);
-	line->smeta = l_mg->sline_meta[meta_line].meta;
-	line->emeta = l_mg->eline_meta[meta_line].meta;
-	line->meta_line = meta_line;
+	pblk_line_setup_metadata(line, l_mg, &pblk->lm);
 
 	/* Allocate next line for preparation */
 	l_mg->data_next = pblk_line_get(pblk);
-	if (l_mg->data_next) {
+	if (!l_mg->data_next) {
+		/* If we cannot get a new line, we need to stop the pipeline.
+		 * Only allow as many writes in as we can store safely and then
+		 * fail gracefully
+		 */
+		pblk_set_space_limit(pblk);
+
+		l_mg->data_next = NULL;
+	} else {
 		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
 		l_mg->data_next->type = PBLK_LINETYPE_DATA;
 		is_next = 1;
 	}
 	spin_unlock(&l_mg->free_lock);
 
+	if (pblk_line_erase(pblk, line)) {
+		line = pblk_line_retry(pblk, line);
+		if (!line)
+			return NULL;
+	}
+
 	pblk_rl_free_lines_dec(&pblk->rl, line);
 	if (is_next)
 		pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
 
-	if (pblk_line_erase(pblk, line))
-		return NULL;
-
 retry_setup:
-	if (!pblk_line_set_metadata(pblk, line, NULL)) {
+	if (!pblk_line_init_metadata(pblk, line, NULL)) {
 		line = pblk_line_retry(pblk, line);
 		if (!line)
 			return NULL;
@@ -1222,69 +1320,89 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
 	return line;
 }
 
-struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
+static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
 {
-	struct pblk_line_meta *lm = &pblk->lm;
+	lockdep_assert_held(&pblk->l_mg.free_lock);
+
+	pblk_set_space_limit(pblk);
+	pblk->state = PBLK_STATE_STOPPING;
+}
+
+void pblk_pipeline_stop(struct pblk *pblk)
+{
+	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+	int ret;
+
+	spin_lock(&l_mg->free_lock);
+	if (pblk->state == PBLK_STATE_RECOVERING ||
+					pblk->state == PBLK_STATE_STOPPED) {
+		spin_unlock(&l_mg->free_lock);
+		return;
+	}
+	pblk->state = PBLK_STATE_RECOVERING;
+	spin_unlock(&l_mg->free_lock);
+
+	pblk_flush_writer(pblk);
+	pblk_wait_for_meta(pblk);
+
+	ret = pblk_recov_pad(pblk);
+	if (ret) {
+		pr_err("pblk: could not close data on teardown(%d)\n", ret);
+		return;
+	}
+
+	flush_workqueue(pblk->bb_wq);
+	pblk_line_close_meta_sync(pblk);
+
+	spin_lock(&l_mg->free_lock);
+	pblk->state = PBLK_STATE_STOPPED;
+	l_mg->data_line = NULL;
+	l_mg->data_next = NULL;
+	spin_unlock(&l_mg->free_lock);
+}
+
+void pblk_line_replace_data(struct pblk *pblk)
+{
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_line *cur, *new;
 	unsigned int left_seblks;
-	int meta_line;
 	int is_next = 0;
 
 	cur = l_mg->data_line;
 	new = l_mg->data_next;
 	if (!new)
-		return NULL;
+		return;
 	l_mg->data_line = new;
 
-retry_line:
+	spin_lock(&l_mg->free_lock);
+	if (pblk->state != PBLK_STATE_RUNNING) {
+		l_mg->data_line = NULL;
+		l_mg->data_next = NULL;
+		spin_unlock(&l_mg->free_lock);
+		return;
+	}
+
+	pblk_line_setup_metadata(new, l_mg, &pblk->lm);
+	spin_unlock(&l_mg->free_lock);
+
+retry_erase:
 	left_seblks = atomic_read(&new->left_seblks);
 	if (left_seblks) {
 		/* If line is not fully erased, erase it */
 		if (atomic_read(&new->left_eblks)) {
 			if (pblk_line_erase(pblk, new))
-				return NULL;
+				return;
 		} else {
 			io_schedule();
 		}
-		goto retry_line;
+		goto retry_erase;
 	}
 
-	spin_lock(&l_mg->free_lock);
-	/* Allocate next line for preparation */
-	l_mg->data_next = pblk_line_get(pblk);
-	if (l_mg->data_next) {
-		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
-		l_mg->data_next->type = PBLK_LINETYPE_DATA;
-		is_next = 1;
-	}
-
-retry_meta:
-	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
-	if (meta_line == PBLK_DATA_LINES) {
-		spin_unlock(&l_mg->free_lock);
-		io_schedule();
-		spin_lock(&l_mg->free_lock);
-		goto retry_meta;
-	}
-
-	set_bit(meta_line, &l_mg->meta_bitmap);
-	new->smeta = l_mg->sline_meta[meta_line].meta;
-	new->emeta = l_mg->eline_meta[meta_line].meta;
-	new->meta_line = meta_line;
-
-	memset(new->smeta, 0, lm->smeta_len);
-	memset(new->emeta, 0, lm->emeta_len);
-	spin_unlock(&l_mg->free_lock);
-
-	if (is_next)
-		pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
 retry_setup:
-	if (!pblk_line_set_metadata(pblk, new, cur)) {
+	if (!pblk_line_init_metadata(pblk, new, cur)) {
 		new = pblk_line_retry(pblk, new);
 		if (!new)
-			return NULL;
+			return;
 
 		goto retry_setup;
 	}
@@ -1292,12 +1410,30 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
 	if (!pblk_line_init_bb(pblk, new, 1)) {
 		new = pblk_line_retry(pblk, new);
 		if (!new)
-			return NULL;
+			return;
 
 		goto retry_setup;
 	}
 
-	return new;
+	/* Allocate next line for preparation */
+	spin_lock(&l_mg->free_lock);
+	l_mg->data_next = pblk_line_get(pblk);
+	if (!l_mg->data_next) {
+		/* If we cannot get a new line, we need to stop the pipeline.
+		 * Only allow as many writes in as we can store safely and then
+		 * fail gracefully
+		 */
+		pblk_stop_writes(pblk, new);
+		l_mg->data_next = NULL;
+	} else {
+		l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
+		l_mg->data_next->type = PBLK_LINETYPE_DATA;
+		is_next = 1;
+	}
+	spin_unlock(&l_mg->free_lock);
+
+	if (is_next)
+		pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
 }
 
 void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
@@ -1307,6 +1443,8 @@ void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
 	if (line->invalid_bitmap)
 		mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
 
+	*line->vsc = cpu_to_le32(EMPTY_ENTRY);
+
 	line->map_bitmap = NULL;
 	line->invalid_bitmap = NULL;
 	line->smeta = NULL;
@@ -1339,8 +1477,8 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
 	struct nvm_rq *rqd;
 	int err;
 
-	rqd = mempool_alloc(pblk->r_rq_pool, GFP_KERNEL);
-	memset(rqd, 0, pblk_r_rq_size);
+	rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
+	memset(rqd, 0, pblk_g_rq_size);
 
 	pblk_setup_e_rq(pblk, rqd, ppa);
 
@@ -1368,7 +1506,8 @@ struct pblk_line *pblk_line_get_data(struct pblk *pblk)
 	return pblk->l_mg.data_line;
 }
 
-struct pblk_line *pblk_line_get_data_next(struct pblk *pblk)
+/* For now, always erase next line */
+struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
 {
 	return pblk->l_mg.data_next;
 }
@@ -1378,18 +1517,58 @@ int pblk_line_is_full(struct pblk_line *line)
 	return (line->left_msecs == 0);
 }
 
+void pblk_line_close_meta_sync(struct pblk *pblk)
+{
+	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+	struct pblk_line_meta *lm = &pblk->lm;
+	struct pblk_line *line, *tline;
+	LIST_HEAD(list);
+
+	spin_lock(&l_mg->close_lock);
+	if (list_empty(&l_mg->emeta_list)) {
+		spin_unlock(&l_mg->close_lock);
+		return;
+	}
+
+	list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
+	spin_unlock(&l_mg->close_lock);
+
+	list_for_each_entry_safe(line, tline, &list, list) {
+		struct pblk_emeta *emeta = line->emeta;
+
+		while (emeta->mem < lm->emeta_len[0]) {
+			int ret;
+
+			ret = pblk_submit_meta_io(pblk, line);
+			if (ret) {
+				pr_err("pblk: sync meta line %d failed (%d)\n",
+							line->id, ret);
+				return;
+			}
+		}
+	}
+
+	pblk_wait_for_meta(pblk);
+	flush_workqueue(pblk->close_wq);
+}
+
+static void pblk_line_should_sync_meta(struct pblk *pblk)
+{
+	if (pblk_rl_is_limit(&pblk->rl))
+		pblk_line_close_meta_sync(pblk);
+}
+
 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
 {
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct list_head *move_list;
 
-	line->emeta->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, line->emeta));
+#ifdef CONFIG_NVM_DEBUG
+	struct pblk_line_meta *lm = &pblk->lm;
 
-	if (pblk_line_submit_emeta_io(pblk, line, line->cur_sec, WRITE))
-		pr_err("pblk: line %d close I/O failed\n", line->id);
-
-	WARN(!bitmap_full(line->map_bitmap, line->sec_in_line),
+	WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
 				"pblk: corrupt closed line %d\n", line->id);
+#endif
 
 	spin_lock(&l_mg->free_lock);
 	WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
@@ -1410,6 +1589,31 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
 
 	spin_unlock(&line->lock);
 	spin_unlock(&l_mg->gc_lock);
+
+	pblk_gc_should_kick(pblk);
+}
+
+void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
+{
+	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+	struct pblk_line_meta *lm = &pblk->lm;
+	struct pblk_emeta *emeta = line->emeta;
+	struct line_emeta *emeta_buf = emeta->buf;
+
+	/* No need for exact vsc value; avoid a big line lock and take aprox. */
+	memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
+	memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
+
+	emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
+	emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
+
+	spin_lock(&l_mg->close_lock);
+	spin_lock(&line->lock);
+	list_add_tail(&line->list, &l_mg->emeta_list);
+	spin_unlock(&line->lock);
+	spin_unlock(&l_mg->close_lock);
+
+	pblk_line_should_sync_meta(pblk);
 }
 
 void pblk_line_close_ws(struct work_struct *work)
@@ -1449,7 +1653,8 @@ void pblk_line_mark_bb(struct work_struct *work)
 }
 
 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
-		      void (*work)(struct work_struct *))
+		      void (*work)(struct work_struct *),
+		      struct workqueue_struct *wq)
 {
 	struct pblk_line_ws *line_ws;
 
@@ -1462,7 +1667,7 @@ void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
 	line_ws->priv = priv;
 
 	INIT_WORK(&line_ws->ws, work);
-	queue_work(pblk->kw_wq, &line_ws->ws);
+	queue_work(wq, &line_ws->ws);
 }
 
 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
@@ -1471,7 +1676,7 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
 	struct pblk_lun *rlun;
-	int lun_id = ppa_list[0].g.ch * geo->luns_per_chnl + ppa_list[0].g.lun;
+	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
 	int ret;
 
 	/*
@@ -1488,10 +1693,10 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
 	/* If the LUN has been locked for this same request, do no attempt to
 	 * lock it again
 	 */
-	if (test_and_set_bit(lun_id, lun_bitmap))
+	if (test_and_set_bit(pos, lun_bitmap))
 		return;
 
-	rlun = &pblk->luns[lun_id];
+	rlun = &pblk->luns[pos];
 	ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
 	if (ret) {
 		switch (ret) {
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index eaf479c..6090d28 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -20,8 +20,7 @@
 
 static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
 {
-	kfree(gc_rq->data);
-	kfree(gc_rq->lba_list);
+	vfree(gc_rq->data);
 	kfree(gc_rq);
 }
 
@@ -37,10 +36,8 @@ static int pblk_gc_write(struct pblk *pblk)
 		return 1;
 	}
 
-	list_for_each_entry_safe(gc_rq, tgc_rq, &gc->w_list, list) {
-		list_move_tail(&gc_rq->list, &w_list);
-		gc->w_entries--;
-	}
+	list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
+	gc->w_entries = 0;
 	spin_unlock(&gc->w_lock);
 
 	list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
@@ -48,9 +45,8 @@ static int pblk_gc_write(struct pblk *pblk)
 				gc_rq->nr_secs, gc_rq->secs_to_gc,
 				gc_rq->line, PBLK_IOTYPE_GC);
 
-		kref_put(&gc_rq->line->ref, pblk_line_put);
-
 		list_del(&gc_rq->list);
+		kref_put(&gc_rq->line->ref, pblk_line_put);
 		pblk_gc_free_gc_rq(gc_rq);
 	}
 
@@ -66,52 +62,41 @@ static void pblk_gc_writer_kick(struct pblk_gc *gc)
  * Responsible for managing all memory related to a gc request. Also in case of
  * failure
  */
-static int pblk_gc_move_valid_secs(struct pblk *pblk, struct pblk_line *line,
-				   u64 *lba_list, unsigned int nr_secs)
+static int pblk_gc_move_valid_secs(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
 	struct pblk_gc *gc = &pblk->gc;
-	struct pblk_gc_rq *gc_rq;
+	struct pblk_line *line = gc_rq->line;
 	void *data;
 	unsigned int secs_to_gc;
-	int ret = NVM_IO_OK;
+	int ret = 0;
 
-	data = kmalloc(nr_secs * geo->sec_size, GFP_KERNEL);
+	data = vmalloc(gc_rq->nr_secs * geo->sec_size);
 	if (!data) {
-		ret = NVM_IO_ERR;
-		goto free_lba_list;
+		ret = -ENOMEM;
+		goto out;
 	}
 
 	/* Read from GC victim block */
-	if (pblk_submit_read_gc(pblk, lba_list, data, nr_secs,
+	if (pblk_submit_read_gc(pblk, gc_rq->lba_list, data, gc_rq->nr_secs,
 							&secs_to_gc, line)) {
-		ret = NVM_IO_ERR;
+		ret = -EFAULT;
 		goto free_data;
 	}
 
 	if (!secs_to_gc)
-		goto free_data;
+		goto free_rq;
 
-	gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
-	if (!gc_rq) {
-		ret = NVM_IO_ERR;
-		goto free_data;
-	}
-
-	gc_rq->line = line;
 	gc_rq->data = data;
-	gc_rq->lba_list = lba_list;
-	gc_rq->nr_secs = nr_secs;
 	gc_rq->secs_to_gc = secs_to_gc;
 
-	kref_get(&line->ref);
-
 retry:
 	spin_lock(&gc->w_lock);
-	if (gc->w_entries > 256) {
+	if (gc->w_entries >= PBLK_GC_W_QD) {
 		spin_unlock(&gc->w_lock);
-		usleep_range(256, 1024);
+		pblk_gc_writer_kick(&pblk->gc);
+		usleep_range(128, 256);
 		goto retry;
 	}
 	gc->w_entries++;
@@ -120,13 +105,14 @@ static int pblk_gc_move_valid_secs(struct pblk *pblk, struct pblk_line *line,
 
 	pblk_gc_writer_kick(&pblk->gc);
 
-	return NVM_IO_OK;
+	return 0;
 
+free_rq:
+	kfree(gc_rq);
 free_data:
-	kfree(data);
-free_lba_list:
-	kfree(lba_list);
-
+	vfree(data);
+out:
+	kref_put(&line->ref, pblk_line_put);
 	return ret;
 }
 
@@ -150,98 +136,48 @@ static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
 
 static void pblk_gc_line_ws(struct work_struct *work)
 {
+	struct pblk_line_ws *line_rq_ws = container_of(work,
+						struct pblk_line_ws, ws);
+	struct pblk *pblk = line_rq_ws->pblk;
+	struct pblk_gc *gc = &pblk->gc;
+	struct pblk_line *line = line_rq_ws->line;
+	struct pblk_gc_rq *gc_rq = line_rq_ws->priv;
+
+	up(&gc->gc_sem);
+
+	if (pblk_gc_move_valid_secs(pblk, gc_rq)) {
+		pr_err("pblk: could not GC all sectors: line:%d (%d/%d)\n",
+						line->id, *line->vsc,
+						gc_rq->nr_secs);
+	}
+
+	mempool_free(line_rq_ws, pblk->line_ws_pool);
+}
+
+static void pblk_gc_line_prepare_ws(struct work_struct *work)
+{
 	struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
 									ws);
 	struct pblk *pblk = line_ws->pblk;
-	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_line *line = line_ws->line;
-	struct pblk_line_meta *lm = &pblk->lm;
-	__le64 *lba_list = line_ws->priv;
-	u64 *gc_list;
-	int sec_left;
-	int nr_ppas, bit;
-	int put_line = 1;
-
-	pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
-
-	spin_lock(&line->lock);
-	sec_left = line->vsc;
-	if (!sec_left) {
-		/* Lines are erased before being used (l_mg->data_/log_next) */
-		spin_unlock(&line->lock);
-		goto out;
-	}
-	spin_unlock(&line->lock);
-
-	if (sec_left < 0) {
-		pr_err("pblk: corrupted GC line (%d)\n", line->id);
-		put_line = 0;
-		pblk_put_line_back(pblk, line);
-		goto out;
-	}
-
-	bit = -1;
-next_rq:
-	gc_list = kmalloc_array(pblk->max_write_pgs, sizeof(u64), GFP_KERNEL);
-	if (!gc_list) {
-		put_line = 0;
-		pblk_put_line_back(pblk, line);
-		goto out;
-	}
-
-	nr_ppas = 0;
-	do {
-		bit = find_next_zero_bit(line->invalid_bitmap, lm->sec_per_line,
-								bit + 1);
-		if (bit > line->emeta_ssec)
-			break;
-
-		gc_list[nr_ppas++] = le64_to_cpu(lba_list[bit]);
-	} while (nr_ppas < pblk->max_write_pgs);
-
-	if (unlikely(!nr_ppas)) {
-		kfree(gc_list);
-		goto out;
-	}
-
-	if (pblk_gc_move_valid_secs(pblk, line, gc_list, nr_ppas)) {
-		pr_err("pblk: could not GC all sectors: line:%d (%d/%d/%d)\n",
-						line->id, line->vsc,
-						nr_ppas, nr_ppas);
-		put_line = 0;
-		pblk_put_line_back(pblk, line);
-		goto out;
-	}
-
-	sec_left -= nr_ppas;
-	if (sec_left > 0)
-		goto next_rq;
-
-out:
-	pblk_mfree(line->emeta, l_mg->emeta_alloc_type);
-	mempool_free(line_ws, pblk->line_ws_pool);
-	atomic_dec(&pblk->gc.inflight_gc);
-	if (put_line)
-		kref_put(&line->ref, pblk_line_put);
-}
-
-static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
-{
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_line_meta *lm = &pblk->lm;
-	struct pblk_line_ws *line_ws;
+	struct pblk_gc *gc = &pblk->gc;
+	struct line_emeta *emeta_buf;
+	struct pblk_line_ws *line_rq_ws;
+	struct pblk_gc_rq *gc_rq;
 	__le64 *lba_list;
+	int sec_left, nr_secs, bit;
 	int ret;
 
-	line_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL);
-	line->emeta = pblk_malloc(lm->emeta_len, l_mg->emeta_alloc_type,
+	emeta_buf = pblk_malloc(lm->emeta_len[0], l_mg->emeta_alloc_type,
 								GFP_KERNEL);
-	if (!line->emeta) {
+	if (!emeta_buf) {
 		pr_err("pblk: cannot use GC emeta\n");
-		goto fail_free_ws;
+		return;
 	}
 
-	ret = pblk_line_read_emeta(pblk, line);
+	ret = pblk_line_read_emeta(pblk, line, emeta_buf);
 	if (ret) {
 		pr_err("pblk: line %d read emeta failed (%d)\n", line->id, ret);
 		goto fail_free_emeta;
@@ -251,39 +187,155 @@ static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
 	 * the line untouched. TODO: Implement a recovery routine that scans and
 	 * moves all sectors on the line.
 	 */
-	lba_list = pblk_recov_get_lba_list(pblk, line->emeta);
+	lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
 	if (!lba_list) {
 		pr_err("pblk: could not interpret emeta (line %d)\n", line->id);
 		goto fail_free_emeta;
 	}
 
-	line_ws->pblk = pblk;
-	line_ws->line = line;
-	line_ws->priv = lba_list;
+	sec_left = pblk_line_vsc(line);
+	if (sec_left < 0) {
+		pr_err("pblk: corrupted GC line (%d)\n", line->id);
+		goto fail_free_emeta;
+	}
 
-	INIT_WORK(&line_ws->ws, pblk_gc_line_ws);
-	queue_work(pblk->gc.gc_reader_wq, &line_ws->ws);
+	bit = -1;
+next_rq:
+	gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
+	if (!gc_rq)
+		goto fail_free_emeta;
 
-	return 0;
+	nr_secs = 0;
+	do {
+		bit = find_next_zero_bit(line->invalid_bitmap, lm->sec_per_line,
+								bit + 1);
+		if (bit > line->emeta_ssec)
+			break;
 
-fail_free_emeta:
-	pblk_mfree(line->emeta, l_mg->emeta_alloc_type);
-fail_free_ws:
+		gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
+	} while (nr_secs < pblk->max_write_pgs);
+
+	if (unlikely(!nr_secs)) {
+		kfree(gc_rq);
+		goto out;
+	}
+
+	gc_rq->nr_secs = nr_secs;
+	gc_rq->line = line;
+
+	line_rq_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL);
+	if (!line_rq_ws)
+		goto fail_free_gc_rq;
+
+	line_rq_ws->pblk = pblk;
+	line_rq_ws->line = line;
+	line_rq_ws->priv = gc_rq;
+
+	down(&gc->gc_sem);
+	kref_get(&line->ref);
+
+	INIT_WORK(&line_rq_ws->ws, pblk_gc_line_ws);
+	queue_work(gc->gc_line_reader_wq, &line_rq_ws->ws);
+
+	sec_left -= nr_secs;
+	if (sec_left > 0)
+		goto next_rq;
+
+out:
+	pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
 	mempool_free(line_ws, pblk->line_ws_pool);
-	pblk_put_line_back(pblk, line);
 
-	return 1;
+	kref_put(&line->ref, pblk_line_put);
+	atomic_dec(&gc->inflight_gc);
+
+	return;
+
+fail_free_gc_rq:
+	kfree(gc_rq);
+fail_free_emeta:
+	pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+	pblk_put_line_back(pblk, line);
+	kref_put(&line->ref, pblk_line_put);
+	mempool_free(line_ws, pblk->line_ws_pool);
+	atomic_dec(&gc->inflight_gc);
+
+	pr_err("pblk: Failed to GC line %d\n", line->id);
 }
 
-static void pblk_gc_lines(struct pblk *pblk, struct list_head *gc_list)
+static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
 {
-	struct pblk_line *line, *tline;
+	struct pblk_gc *gc = &pblk->gc;
+	struct pblk_line_ws *line_ws;
 
-	list_for_each_entry_safe(line, tline, gc_list, list) {
-		if (pblk_gc_line(pblk, line))
-			pr_err("pblk: failed to GC line %d\n", line->id);
-		list_del(&line->list);
+	pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
+
+	line_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL);
+	if (!line_ws)
+		return -ENOMEM;
+
+	line_ws->pblk = pblk;
+	line_ws->line = line;
+
+	INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
+	queue_work(gc->gc_reader_wq, &line_ws->ws);
+
+	return 0;
+}
+
+static int pblk_gc_read(struct pblk *pblk)
+{
+	struct pblk_gc *gc = &pblk->gc;
+	struct pblk_line *line;
+
+	spin_lock(&gc->r_lock);
+	if (list_empty(&gc->r_list)) {
+		spin_unlock(&gc->r_lock);
+		return 1;
 	}
+
+	line = list_first_entry(&gc->r_list, struct pblk_line, list);
+	list_del(&line->list);
+	spin_unlock(&gc->r_lock);
+
+	pblk_gc_kick(pblk);
+
+	if (pblk_gc_line(pblk, line))
+		pr_err("pblk: failed to GC line %d\n", line->id);
+
+	return 0;
+}
+
+static void pblk_gc_reader_kick(struct pblk_gc *gc)
+{
+	wake_up_process(gc->gc_reader_ts);
+}
+
+static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
+						 struct list_head *group_list)
+{
+	struct pblk_line *line, *victim;
+	int line_vsc, victim_vsc;
+
+	victim = list_first_entry(group_list, struct pblk_line, list);
+	list_for_each_entry(line, group_list, list) {
+		line_vsc = le32_to_cpu(*line->vsc);
+		victim_vsc = le32_to_cpu(*victim->vsc);
+		if (line_vsc < victim_vsc)
+			victim = line;
+	}
+
+	return victim;
+}
+
+static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
+{
+	unsigned int nr_blocks_free, nr_blocks_need;
+
+	nr_blocks_need = pblk_rl_high_thrs(rl);
+	nr_blocks_free = pblk_rl_nr_free_blks(rl);
+
+	/* This is not critical, no need to take lock here */
+	return ((gc->gc_active) && (nr_blocks_need > nr_blocks_free));
 }
 
 /*
@@ -296,71 +348,83 @@ static void pblk_gc_run(struct pblk *pblk)
 {
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_gc *gc = &pblk->gc;
-	struct pblk_line *line, *tline;
-	unsigned int nr_blocks_free, nr_blocks_need;
+	struct pblk_line *line;
 	struct list_head *group_list;
-	int run_gc, gc_group = 0;
-	int prev_gc = 0;
-	int inflight_gc = atomic_read(&gc->inflight_gc);
-	LIST_HEAD(gc_list);
+	bool run_gc;
+	int inflight_gc, gc_group = 0, prev_group = 0;
 
-	spin_lock(&l_mg->gc_lock);
-	list_for_each_entry_safe(line, tline, &l_mg->gc_full_list, list) {
+	do {
+		spin_lock(&l_mg->gc_lock);
+		if (list_empty(&l_mg->gc_full_list)) {
+			spin_unlock(&l_mg->gc_lock);
+			break;
+		}
+
+		line = list_first_entry(&l_mg->gc_full_list,
+							struct pblk_line, list);
+
 		spin_lock(&line->lock);
 		WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
 		line->state = PBLK_LINESTATE_GC;
 		spin_unlock(&line->lock);
 
 		list_del(&line->list);
-		kref_put(&line->ref, pblk_line_put);
-	}
-	spin_unlock(&l_mg->gc_lock);
+		spin_unlock(&l_mg->gc_lock);
 
-	nr_blocks_need = pblk_rl_gc_thrs(&pblk->rl);
-	nr_blocks_free = pblk_rl_nr_free_blks(&pblk->rl);
-	run_gc = (nr_blocks_need > nr_blocks_free || gc->gc_forced);
+		kref_put(&line->ref, pblk_line_put);
+	} while (1);
+
+	run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
+	if (!run_gc || (atomic_read(&gc->inflight_gc) >= PBLK_GC_L_QD))
+		return;
 
 next_gc_group:
 	group_list = l_mg->gc_lists[gc_group++];
-	spin_lock(&l_mg->gc_lock);
-	while (run_gc && !list_empty(group_list)) {
-		/* No need to queue up more GC lines than we can handle */
-		if (!run_gc || inflight_gc > gc->gc_jobs_active) {
+
+	do {
+		spin_lock(&l_mg->gc_lock);
+		if (list_empty(group_list)) {
 			spin_unlock(&l_mg->gc_lock);
-			pblk_gc_lines(pblk, &gc_list);
-			return;
+			break;
 		}
 
-		line = list_first_entry(group_list, struct pblk_line, list);
-		nr_blocks_free += atomic_read(&line->blk_in_line);
+		line = pblk_gc_get_victim_line(pblk, group_list);
 
 		spin_lock(&line->lock);
 		WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
 		line->state = PBLK_LINESTATE_GC;
-		list_move_tail(&line->list, &gc_list);
-		atomic_inc(&gc->inflight_gc);
-		inflight_gc++;
 		spin_unlock(&line->lock);
 
-		prev_gc = 1;
-		run_gc = (nr_blocks_need > nr_blocks_free || gc->gc_forced);
-	}
-	spin_unlock(&l_mg->gc_lock);
+		list_del(&line->list);
+		spin_unlock(&l_mg->gc_lock);
 
-	pblk_gc_lines(pblk, &gc_list);
+		spin_lock(&gc->r_lock);
+		list_add_tail(&line->list, &gc->r_list);
+		spin_unlock(&gc->r_lock);
 
-	if (!prev_gc && pblk->rl.rb_state > gc_group &&
-						gc_group < PBLK_NR_GC_LISTS)
+		inflight_gc = atomic_inc_return(&gc->inflight_gc);
+		pblk_gc_reader_kick(gc);
+
+		prev_group = 1;
+
+		/* No need to queue up more GC lines than we can handle */
+		run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
+		if (!run_gc || inflight_gc >= PBLK_GC_L_QD)
+			break;
+	} while (1);
+
+	if (!prev_group && pblk->rl.rb_state > gc_group &&
+						gc_group < PBLK_GC_NR_LISTS)
 		goto next_gc_group;
 }
 
-
-static void pblk_gc_kick(struct pblk *pblk)
+void pblk_gc_kick(struct pblk *pblk)
 {
 	struct pblk_gc *gc = &pblk->gc;
 
 	wake_up_process(gc->gc_ts);
 	pblk_gc_writer_kick(gc);
+	pblk_gc_reader_kick(gc);
 	mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
 }
 
@@ -398,42 +462,34 @@ static int pblk_gc_writer_ts(void *data)
 	return 0;
 }
 
+static int pblk_gc_reader_ts(void *data)
+{
+	struct pblk *pblk = data;
+
+	while (!kthread_should_stop()) {
+		if (!pblk_gc_read(pblk))
+			continue;
+		set_current_state(TASK_INTERRUPTIBLE);
+		io_schedule();
+	}
+
+	return 0;
+}
+
 static void pblk_gc_start(struct pblk *pblk)
 {
 	pblk->gc.gc_active = 1;
-
 	pr_debug("pblk: gc start\n");
 }
 
-int pblk_gc_status(struct pblk *pblk)
-{
-	struct pblk_gc *gc = &pblk->gc;
-	int ret;
-
-	spin_lock(&gc->lock);
-	ret = gc->gc_active;
-	spin_unlock(&gc->lock);
-
-	return ret;
-}
-
-static void __pblk_gc_should_start(struct pblk *pblk)
-{
-	struct pblk_gc *gc = &pblk->gc;
-
-	lockdep_assert_held(&gc->lock);
-
-	if (gc->gc_enabled && !gc->gc_active)
-		pblk_gc_start(pblk);
-}
-
 void pblk_gc_should_start(struct pblk *pblk)
 {
 	struct pblk_gc *gc = &pblk->gc;
 
-	spin_lock(&gc->lock);
-	__pblk_gc_should_start(pblk);
-	spin_unlock(&gc->lock);
+	if (gc->gc_enabled && !gc->gc_active)
+		pblk_gc_start(pblk);
+
+	pblk_gc_kick(pblk);
 }
 
 /*
@@ -442,10 +498,7 @@ void pblk_gc_should_start(struct pblk *pblk)
  */
 static void pblk_gc_stop(struct pblk *pblk, int flush_wq)
 {
-	spin_lock(&pblk->gc.lock);
 	pblk->gc.gc_active = 0;
-	spin_unlock(&pblk->gc.lock);
-
 	pr_debug("pblk: gc stop\n");
 }
 
@@ -468,20 +521,25 @@ void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
 	spin_unlock(&gc->lock);
 }
 
-void pblk_gc_sysfs_force(struct pblk *pblk, int force)
+int pblk_gc_sysfs_force(struct pblk *pblk, int force)
 {
 	struct pblk_gc *gc = &pblk->gc;
-	int rsv = 0;
+
+	if (force < 0 || force > 1)
+		return -EINVAL;
 
 	spin_lock(&gc->lock);
-	if (force) {
-		gc->gc_enabled = 1;
-		rsv = 64;
-	}
-	pblk_rl_set_gc_rsc(&pblk->rl, rsv);
 	gc->gc_forced = force;
-	__pblk_gc_should_start(pblk);
+
+	if (force)
+		gc->gc_enabled = 1;
+	else
+		gc->gc_enabled = 0;
 	spin_unlock(&gc->lock);
+
+	pblk_gc_should_start(pblk);
+
+	return 0;
 }
 
 int pblk_gc_init(struct pblk *pblk)
@@ -503,30 +561,58 @@ int pblk_gc_init(struct pblk *pblk)
 		goto fail_free_main_kthread;
 	}
 
+	gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
+							"pblk-gc-reader-ts");
+	if (IS_ERR(gc->gc_reader_ts)) {
+		pr_err("pblk: could not allocate GC reader kthread\n");
+		ret = PTR_ERR(gc->gc_reader_ts);
+		goto fail_free_writer_kthread;
+	}
+
 	setup_timer(&gc->gc_timer, pblk_gc_timer, (unsigned long)pblk);
 	mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
 
 	gc->gc_active = 0;
 	gc->gc_forced = 0;
 	gc->gc_enabled = 1;
-	gc->gc_jobs_active = 8;
 	gc->w_entries = 0;
 	atomic_set(&gc->inflight_gc, 0);
 
-	gc->gc_reader_wq = alloc_workqueue("pblk-gc-reader-wq",
-			WQ_MEM_RECLAIM | WQ_UNBOUND, gc->gc_jobs_active);
+	/* Workqueue that reads valid sectors from a line and submit them to the
+	 * GC writer to be recycled.
+	 */
+	gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
+			WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
+	if (!gc->gc_line_reader_wq) {
+		pr_err("pblk: could not allocate GC line reader workqueue\n");
+		ret = -ENOMEM;
+		goto fail_free_reader_kthread;
+	}
+
+	/* Workqueue that prepare lines for GC */
+	gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
+					WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
 	if (!gc->gc_reader_wq) {
 		pr_err("pblk: could not allocate GC reader workqueue\n");
 		ret = -ENOMEM;
-		goto fail_free_writer_kthread;
+		goto fail_free_reader_line_wq;
 	}
 
 	spin_lock_init(&gc->lock);
 	spin_lock_init(&gc->w_lock);
+	spin_lock_init(&gc->r_lock);
+
+	sema_init(&gc->gc_sem, 128);
+
 	INIT_LIST_HEAD(&gc->w_list);
+	INIT_LIST_HEAD(&gc->r_list);
 
 	return 0;
 
+fail_free_reader_line_wq:
+	destroy_workqueue(gc->gc_line_reader_wq);
+fail_free_reader_kthread:
+	kthread_stop(gc->gc_reader_ts);
 fail_free_writer_kthread:
 	kthread_stop(gc->gc_writer_ts);
 fail_free_main_kthread:
@@ -540,6 +626,7 @@ void pblk_gc_exit(struct pblk *pblk)
 	struct pblk_gc *gc = &pblk->gc;
 
 	flush_workqueue(gc->gc_reader_wq);
+	flush_workqueue(gc->gc_line_reader_wq);
 
 	del_timer(&gc->gc_timer);
 	pblk_gc_stop(pblk, 1);
@@ -547,9 +634,15 @@ void pblk_gc_exit(struct pblk *pblk)
 	if (gc->gc_ts)
 		kthread_stop(gc->gc_ts);
 
-	if (pblk->gc.gc_reader_wq)
-		destroy_workqueue(pblk->gc.gc_reader_wq);
+	if (gc->gc_reader_wq)
+		destroy_workqueue(gc->gc_reader_wq);
+
+	if (gc->gc_line_reader_wq)
+		destroy_workqueue(gc->gc_line_reader_wq);
 
 	if (gc->gc_writer_ts)
 		kthread_stop(gc->gc_writer_ts);
+
+	if (gc->gc_reader_ts)
+		kthread_stop(gc->gc_reader_ts);
 }
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index ae8cd6d..1b0f612 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -20,9 +20,10 @@
 
 #include "pblk.h"
 
-static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_r_rq_cache,
-					*pblk_w_rq_cache, *pblk_line_meta_cache;
+static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
+				*pblk_w_rq_cache, *pblk_line_meta_cache;
 static DECLARE_RWSEM(pblk_lock);
+struct bio_set *pblk_bio_set;
 
 static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
 			  struct bio *bio)
@@ -33,7 +34,7 @@ static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
 	 * constraint. Writes can be of arbitrary size.
 	 */
 	if (bio_data_dir(bio) == READ) {
-		blk_queue_split(q, &bio, q->bio_split);
+		blk_queue_split(q, &bio);
 		ret = pblk_submit_read(pblk, bio);
 		if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
 			bio_put(bio);
@@ -46,7 +47,7 @@ static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
 	 * available for user I/O.
 	 */
 	if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
-		blk_queue_split(q, &bio, q->bio_split);
+		blk_queue_split(q, &bio);
 
 	return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
 }
@@ -199,9 +200,9 @@ static int pblk_init_global_caches(struct pblk *pblk)
 		return -ENOMEM;
 	}
 
-	pblk_r_rq_cache = kmem_cache_create("pblk_r_rq", pblk_r_rq_size,
+	pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
 				0, 0, NULL);
-	if (!pblk_r_rq_cache) {
+	if (!pblk_g_rq_cache) {
 		kmem_cache_destroy(pblk_blk_ws_cache);
 		kmem_cache_destroy(pblk_rec_cache);
 		up_write(&pblk_lock);
@@ -213,7 +214,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
 	if (!pblk_w_rq_cache) {
 		kmem_cache_destroy(pblk_blk_ws_cache);
 		kmem_cache_destroy(pblk_rec_cache);
-		kmem_cache_destroy(pblk_r_rq_cache);
+		kmem_cache_destroy(pblk_g_rq_cache);
 		up_write(&pblk_lock);
 		return -ENOMEM;
 	}
@@ -225,7 +226,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
 	if (!pblk_line_meta_cache) {
 		kmem_cache_destroy(pblk_blk_ws_cache);
 		kmem_cache_destroy(pblk_rec_cache);
-		kmem_cache_destroy(pblk_r_rq_cache);
+		kmem_cache_destroy(pblk_g_rq_cache);
 		kmem_cache_destroy(pblk_w_rq_cache);
 		up_write(&pblk_lock);
 		return -ENOMEM;
@@ -239,27 +240,10 @@ static int pblk_core_init(struct pblk *pblk)
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
-	int max_write_ppas;
-	int mod;
 
-	pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
-	max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
-	pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
-				max_write_ppas : nvm_max_phys_sects(dev);
 	pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
 						geo->nr_planes * geo->nr_luns;
 
-	if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
-		pr_err("pblk: cannot support device max_phys_sect\n");
-		return -EINVAL;
-	}
-
-	div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
-	if (mod) {
-		pr_err("pblk: bad configuration of sectors/pages\n");
-		return -EINVAL;
-	}
-
 	if (pblk_init_global_caches(pblk))
 		return -ENOMEM;
 
@@ -267,7 +251,7 @@ static int pblk_core_init(struct pblk *pblk)
 	if (!pblk->page_pool)
 		return -ENOMEM;
 
-	pblk->line_ws_pool = mempool_create_slab_pool(geo->nr_luns,
+	pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE,
 							pblk_blk_ws_cache);
 	if (!pblk->line_ws_pool)
 		goto free_page_pool;
@@ -276,41 +260,51 @@ static int pblk_core_init(struct pblk *pblk)
 	if (!pblk->rec_pool)
 		goto free_blk_ws_pool;
 
-	pblk->r_rq_pool = mempool_create_slab_pool(64, pblk_r_rq_cache);
-	if (!pblk->r_rq_pool)
+	pblk->g_rq_pool = mempool_create_slab_pool(PBLK_READ_REQ_POOL_SIZE,
+							pblk_g_rq_cache);
+	if (!pblk->g_rq_pool)
 		goto free_rec_pool;
 
-	pblk->w_rq_pool = mempool_create_slab_pool(64, pblk_w_rq_cache);
+	pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns * 2,
+							pblk_w_rq_cache);
 	if (!pblk->w_rq_pool)
-		goto free_r_rq_pool;
+		goto free_g_rq_pool;
 
 	pblk->line_meta_pool =
-			mempool_create_slab_pool(16, pblk_line_meta_cache);
+			mempool_create_slab_pool(PBLK_META_POOL_SIZE,
+							pblk_line_meta_cache);
 	if (!pblk->line_meta_pool)
 		goto free_w_rq_pool;
 
-	pblk->kw_wq = alloc_workqueue("pblk-aux-wq",
-					WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
-	if (!pblk->kw_wq)
+	pblk->close_wq = alloc_workqueue("pblk-close-wq",
+			WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
+	if (!pblk->close_wq)
 		goto free_line_meta_pool;
 
+	pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
+			WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+	if (!pblk->bb_wq)
+		goto free_close_wq;
+
 	if (pblk_set_ppaf(pblk))
-		goto free_kw_wq;
+		goto free_bb_wq;
 
 	if (pblk_rwb_init(pblk))
-		goto free_kw_wq;
+		goto free_bb_wq;
 
 	INIT_LIST_HEAD(&pblk->compl_list);
 	return 0;
 
-free_kw_wq:
-	destroy_workqueue(pblk->kw_wq);
+free_bb_wq:
+	destroy_workqueue(pblk->bb_wq);
+free_close_wq:
+	destroy_workqueue(pblk->close_wq);
 free_line_meta_pool:
 	mempool_destroy(pblk->line_meta_pool);
 free_w_rq_pool:
 	mempool_destroy(pblk->w_rq_pool);
-free_r_rq_pool:
-	mempool_destroy(pblk->r_rq_pool);
+free_g_rq_pool:
+	mempool_destroy(pblk->g_rq_pool);
 free_rec_pool:
 	mempool_destroy(pblk->rec_pool);
 free_blk_ws_pool:
@@ -322,19 +316,22 @@ static int pblk_core_init(struct pblk *pblk)
 
 static void pblk_core_free(struct pblk *pblk)
 {
-	if (pblk->kw_wq)
-		destroy_workqueue(pblk->kw_wq);
+	if (pblk->close_wq)
+		destroy_workqueue(pblk->close_wq);
+
+	if (pblk->bb_wq)
+		destroy_workqueue(pblk->bb_wq);
 
 	mempool_destroy(pblk->page_pool);
 	mempool_destroy(pblk->line_ws_pool);
 	mempool_destroy(pblk->rec_pool);
-	mempool_destroy(pblk->r_rq_pool);
+	mempool_destroy(pblk->g_rq_pool);
 	mempool_destroy(pblk->w_rq_pool);
 	mempool_destroy(pblk->line_meta_pool);
 
 	kmem_cache_destroy(pblk_blk_ws_cache);
 	kmem_cache_destroy(pblk_rec_cache);
-	kmem_cache_destroy(pblk_r_rq_cache);
+	kmem_cache_destroy(pblk_g_rq_cache);
 	kmem_cache_destroy(pblk_w_rq_cache);
 	kmem_cache_destroy(pblk_line_meta_cache);
 }
@@ -344,6 +341,12 @@ static void pblk_luns_free(struct pblk *pblk)
 	kfree(pblk->luns);
 }
 
+static void pblk_free_line_bitmaps(struct pblk_line *line)
+{
+	kfree(line->blk_bitmap);
+	kfree(line->erase_bitmap);
+}
+
 static void pblk_lines_free(struct pblk *pblk)
 {
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -355,8 +358,7 @@ static void pblk_lines_free(struct pblk *pblk)
 		line = &pblk->lines[i];
 
 		pblk_line_free(pblk, line);
-		kfree(line->blk_bitmap);
-		kfree(line->erase_bitmap);
+		pblk_free_line_bitmaps(line);
 	}
 	spin_unlock(&l_mg->free_lock);
 }
@@ -368,11 +370,15 @@ static void pblk_line_meta_free(struct pblk *pblk)
 
 	kfree(l_mg->bb_template);
 	kfree(l_mg->bb_aux);
+	kfree(l_mg->vsc_list);
 
+	spin_lock(&l_mg->free_lock);
 	for (i = 0; i < PBLK_DATA_LINES; i++) {
-		pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
-		pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
+		kfree(l_mg->sline_meta[i]);
+		pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
+		kfree(l_mg->eline_meta[i]);
 	}
+	spin_unlock(&l_mg->free_lock);
 
 	kfree(pblk->lines);
 }
@@ -411,13 +417,31 @@ static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
 	return ret;
 }
 
-static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line)
+static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
+			int blk_per_line)
 {
-	struct pblk_line_meta *lm = &pblk->lm;
+	struct nvm_tgt_dev *dev = pblk->dev;
+	struct nvm_geo *geo = &dev->geo;
 	struct pblk_lun *rlun;
 	int bb_cnt = 0;
 	int i;
 
+	for (i = 0; i < blk_per_line; i++) {
+		rlun = &pblk->luns[i];
+		if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
+			continue;
+
+		set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
+		bb_cnt++;
+	}
+
+	return bb_cnt;
+}
+
+static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
+{
+	struct pblk_line_meta *lm = &pblk->lm;
+
 	line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
 	if (!line->blk_bitmap)
 		return -ENOMEM;
@@ -428,16 +452,7 @@ static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line)
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < lm->blk_per_line; i++) {
-		rlun = &pblk->luns[i];
-		if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
-			continue;
-
-		set_bit(i, line->blk_bitmap);
-		bb_cnt++;
-	}
-
-	return bb_cnt;
+	return 0;
 }
 
 static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
@@ -505,12 +520,32 @@ static int pblk_lines_configure(struct pblk *pblk, int flags)
 }
 
 /* See comment over struct line_emeta definition */
-static unsigned int calc_emeta_len(struct pblk *pblk, struct pblk_line_meta *lm)
+static unsigned int calc_emeta_len(struct pblk *pblk)
 {
-	return (sizeof(struct line_emeta) +
-			((lm->sec_per_line - lm->emeta_sec) * sizeof(u64)) +
-			(pblk->l_mg.nr_lines * sizeof(u32)) +
-			lm->blk_bitmap_len);
+	struct pblk_line_meta *lm = &pblk->lm;
+	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+	struct nvm_tgt_dev *dev = pblk->dev;
+	struct nvm_geo *geo = &dev->geo;
+
+	/* Round to sector size so that lba_list starts on its own sector */
+	lm->emeta_sec[1] = DIV_ROUND_UP(
+			sizeof(struct line_emeta) + lm->blk_bitmap_len,
+			geo->sec_size);
+	lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
+
+	/* Round to sector size so that vsc_list starts on its own sector */
+	lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
+	lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
+			geo->sec_size);
+	lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
+
+	lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
+			geo->sec_size);
+	lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
+
+	lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
+
+	return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
 }
 
 static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
@@ -534,6 +569,78 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
 	atomic_set(&pblk->rl.free_blocks, nr_free_blks);
 }
 
+static int pblk_lines_alloc_metadata(struct pblk *pblk)
+{
+	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+	struct pblk_line_meta *lm = &pblk->lm;
+	int i;
+
+	/* smeta is always small enough to fit on a kmalloc memory allocation,
+	 * emeta depends on the number of LUNs allocated to the pblk instance
+	 */
+	for (i = 0; i < PBLK_DATA_LINES; i++) {
+		l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
+		if (!l_mg->sline_meta[i])
+			goto fail_free_smeta;
+	}
+
+	/* emeta allocates three different buffers for managing metadata with
+	 * in-memory and in-media layouts
+	 */
+	for (i = 0; i < PBLK_DATA_LINES; i++) {
+		struct pblk_emeta *emeta;
+
+		emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
+		if (!emeta)
+			goto fail_free_emeta;
+
+		if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
+			l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
+
+			emeta->buf = vmalloc(lm->emeta_len[0]);
+			if (!emeta->buf) {
+				kfree(emeta);
+				goto fail_free_emeta;
+			}
+
+			emeta->nr_entries = lm->emeta_sec[0];
+			l_mg->eline_meta[i] = emeta;
+		} else {
+			l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
+
+			emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
+			if (!emeta->buf) {
+				kfree(emeta);
+				goto fail_free_emeta;
+			}
+
+			emeta->nr_entries = lm->emeta_sec[0];
+			l_mg->eline_meta[i] = emeta;
+		}
+	}
+
+	l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
+	if (!l_mg->vsc_list)
+		goto fail_free_emeta;
+
+	for (i = 0; i < l_mg->nr_lines; i++)
+		l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
+
+	return 0;
+
+fail_free_emeta:
+	while (--i >= 0) {
+		vfree(l_mg->eline_meta[i]->buf);
+		kfree(l_mg->eline_meta[i]);
+	}
+
+fail_free_smeta:
+	for (i = 0; i < PBLK_DATA_LINES; i++)
+		kfree(l_mg->sline_meta[i]);
+
+	return -ENOMEM;
+}
+
 static int pblk_lines_init(struct pblk *pblk)
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
@@ -542,10 +649,32 @@ static int pblk_lines_init(struct pblk *pblk)
 	struct pblk_line_meta *lm = &pblk->lm;
 	struct pblk_line *line;
 	unsigned int smeta_len, emeta_len;
-	long nr_bad_blks, nr_meta_blks, nr_free_blks;
-	int bb_distance;
-	int i;
-	int ret;
+	long nr_bad_blks, nr_free_blks;
+	int bb_distance, max_write_ppas, mod;
+	int i, ret;
+
+	pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
+	max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
+	pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
+				max_write_ppas : nvm_max_phys_sects(dev);
+	pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
+
+	if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
+		pr_err("pblk: cannot support device max_phys_sect\n");
+		return -EINVAL;
+	}
+
+	div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
+	if (mod) {
+		pr_err("pblk: bad configuration of sectors/pages\n");
+		return -EINVAL;
+	}
+
+	l_mg->nr_lines = geo->blks_per_lun;
+	l_mg->log_line = l_mg->data_line = NULL;
+	l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
+	l_mg->nr_free_lines = 0;
+	bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
 
 	lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
 	lm->blk_per_line = geo->nr_luns;
@@ -554,20 +683,17 @@ static int pblk_lines_init(struct pblk *pblk)
 	lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
 	lm->high_thrs = lm->sec_per_line / 2;
 	lm->mid_thrs = lm->sec_per_line / 4;
+	lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
 
 	/* Calculate necessary pages for smeta. See comment over struct
 	 * line_smeta definition
 	 */
-	lm->smeta_len = sizeof(struct line_smeta) +
-				PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
-
 	i = 1;
 add_smeta_page:
 	lm->smeta_sec = i * geo->sec_per_pl;
 	lm->smeta_len = lm->smeta_sec * geo->sec_size;
 
-	smeta_len = sizeof(struct line_smeta) +
-				PBLK_LINE_NR_LUN_BITMAP * lm->lun_bitmap_len;
+	smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
 	if (smeta_len > lm->smeta_len) {
 		i++;
 		goto add_smeta_page;
@@ -578,66 +704,28 @@ static int pblk_lines_init(struct pblk *pblk)
 	 */
 	i = 1;
 add_emeta_page:
-	lm->emeta_sec = i * geo->sec_per_pl;
-	lm->emeta_len = lm->emeta_sec * geo->sec_size;
+	lm->emeta_sec[0] = i * geo->sec_per_pl;
+	lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
 
-	emeta_len = calc_emeta_len(pblk, lm);
-	if (emeta_len > lm->emeta_len) {
+	emeta_len = calc_emeta_len(pblk);
+	if (emeta_len > lm->emeta_len[0]) {
 		i++;
 		goto add_emeta_page;
 	}
+
 	lm->emeta_bb = geo->nr_luns - i;
-
-	nr_meta_blks = (lm->smeta_sec + lm->emeta_sec +
-				(geo->sec_per_blk / 2)) / geo->sec_per_blk;
-	lm->min_blk_line = nr_meta_blks + 1;
-
-	l_mg->nr_lines = geo->blks_per_lun;
-	l_mg->log_line = l_mg->data_line = NULL;
-	l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
-	l_mg->nr_free_lines = 0;
-	bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
-
-	/* smeta is always small enough to fit on a kmalloc memory allocation,
-	 * emeta depends on the number of LUNs allocated to the pblk instance
-	 */
-	l_mg->smeta_alloc_type = PBLK_KMALLOC_META;
-	for (i = 0; i < PBLK_DATA_LINES; i++) {
-		l_mg->sline_meta[i].meta = kmalloc(lm->smeta_len, GFP_KERNEL);
-		if (!l_mg->sline_meta[i].meta)
-			while (--i >= 0) {
-				kfree(l_mg->sline_meta[i].meta);
-				ret = -ENOMEM;
-				goto fail;
-			}
+	lm->min_blk_line = 1 + DIV_ROUND_UP(lm->smeta_sec + lm->emeta_sec[0],
+							geo->sec_per_blk);
+	if (lm->min_blk_line > lm->blk_per_line) {
+		pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
+							lm->blk_per_line);
+		ret = -EINVAL;
+		goto fail;
 	}
 
-	if (lm->emeta_len > KMALLOC_MAX_CACHE_SIZE) {
-		l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
-
-		for (i = 0; i < PBLK_DATA_LINES; i++) {
-			l_mg->eline_meta[i].meta = vmalloc(lm->emeta_len);
-			if (!l_mg->eline_meta[i].meta)
-				while (--i >= 0) {
-					vfree(l_mg->eline_meta[i].meta);
-					ret = -ENOMEM;
-					goto fail;
-				}
-		}
-	} else {
-		l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
-
-		for (i = 0; i < PBLK_DATA_LINES; i++) {
-			l_mg->eline_meta[i].meta =
-					kmalloc(lm->emeta_len, GFP_KERNEL);
-			if (!l_mg->eline_meta[i].meta)
-				while (--i >= 0) {
-					kfree(l_mg->eline_meta[i].meta);
-					ret = -ENOMEM;
-					goto fail;
-				}
-		}
-	}
+	ret = pblk_lines_alloc_metadata(pblk);
+	if (ret)
+		goto fail;
 
 	l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
 	if (!l_mg->bb_template) {
@@ -664,11 +752,14 @@ static int pblk_lines_init(struct pblk *pblk)
 	INIT_LIST_HEAD(&l_mg->gc_low_list);
 	INIT_LIST_HEAD(&l_mg->gc_empty_list);
 
+	INIT_LIST_HEAD(&l_mg->emeta_list);
+
 	l_mg->gc_lists[0] = &l_mg->gc_high_list;
 	l_mg->gc_lists[1] = &l_mg->gc_mid_list;
 	l_mg->gc_lists[2] = &l_mg->gc_low_list;
 
 	spin_lock_init(&l_mg->free_lock);
+	spin_lock_init(&l_mg->close_lock);
 	spin_lock_init(&l_mg->gc_lock);
 
 	pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
@@ -689,10 +780,16 @@ static int pblk_lines_init(struct pblk *pblk)
 		line->type = PBLK_LINETYPE_FREE;
 		line->state = PBLK_LINESTATE_FREE;
 		line->gc_group = PBLK_LINEGC_NONE;
+		line->vsc = &l_mg->vsc_list[i];
 		spin_lock_init(&line->lock);
 
-		nr_bad_blks = pblk_bb_line(pblk, line);
+		ret = pblk_alloc_line_bitmaps(pblk, line);
+		if (ret)
+			goto fail_free_lines;
+
+		nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
 		if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
+			pblk_free_line_bitmaps(line);
 			ret = -EINVAL;
 			goto fail_free_lines;
 		}
@@ -713,24 +810,20 @@ static int pblk_lines_init(struct pblk *pblk)
 
 	pblk_set_provision(pblk, nr_free_blks);
 
-	sema_init(&pblk->erase_sem, 1);
-
 	/* Cleanup per-LUN bad block lists - managed within lines on run-time */
 	for (i = 0; i < geo->nr_luns; i++)
 		kfree(pblk->luns[i].bb_list);
 
 	return 0;
 fail_free_lines:
-	kfree(pblk->lines);
+	while (--i >= 0)
+		pblk_free_line_bitmaps(&pblk->lines[i]);
 fail_free_bb_aux:
 	kfree(l_mg->bb_aux);
 fail_free_bb_template:
 	kfree(l_mg->bb_template);
 fail_free_meta:
-	for (i = 0; i < PBLK_DATA_LINES; i++) {
-		pblk_mfree(l_mg->sline_meta[i].meta, l_mg->smeta_alloc_type);
-		pblk_mfree(l_mg->eline_meta[i].meta, l_mg->emeta_alloc_type);
-	}
+	pblk_line_meta_free(pblk);
 fail:
 	for (i = 0; i < geo->nr_luns; i++)
 		kfree(pblk->luns[i].bb_list);
@@ -754,6 +847,15 @@ static int pblk_writer_init(struct pblk *pblk)
 
 static void pblk_writer_stop(struct pblk *pblk)
 {
+	/* The pipeline must be stopped and the write buffer emptied before the
+	 * write thread is stopped
+	 */
+	WARN(pblk_rb_read_count(&pblk->rwb),
+			"Stopping not fully persisted write buffer\n");
+
+	WARN(pblk_rb_sync_count(&pblk->rwb),
+			"Stopping not fully synced write buffer\n");
+
 	if (pblk->writer_ts)
 		kthread_stop(pblk->writer_ts);
 	del_timer(&pblk->wtimer);
@@ -772,10 +874,9 @@ static void pblk_free(struct pblk *pblk)
 
 static void pblk_tear_down(struct pblk *pblk)
 {
-	pblk_flush_writer(pblk);
+	pblk_pipeline_stop(pblk);
 	pblk_writer_stop(pblk);
 	pblk_rb_sync_l2p(&pblk->rwb);
-	pblk_recov_pad(pblk);
 	pblk_rwb_free(pblk);
 	pblk_rl_free(&pblk->rl);
 
@@ -821,6 +922,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
 
 	pblk->dev = dev;
 	pblk->disk = tdisk;
+	pblk->state = PBLK_STATE_RUNNING;
 
 	spin_lock_init(&pblk->trans_lock);
 	spin_lock_init(&pblk->lock);
@@ -836,8 +938,8 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
 	atomic_long_set(&pblk->req_writes, 0);
 	atomic_long_set(&pblk->sub_writes, 0);
 	atomic_long_set(&pblk->sync_writes, 0);
-	atomic_long_set(&pblk->compl_writes, 0);
 	atomic_long_set(&pblk->inflight_reads, 0);
+	atomic_long_set(&pblk->cache_reads, 0);
 	atomic_long_set(&pblk->sync_reads, 0);
 	atomic_long_set(&pblk->recov_writes, 0);
 	atomic_long_set(&pblk->recov_writes, 0);
@@ -946,11 +1048,20 @@ static struct nvm_tgt_type tt_pblk = {
 
 static int __init pblk_module_init(void)
 {
-	return nvm_register_tgt_type(&tt_pblk);
+	int ret;
+
+	pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
+	if (!pblk_bio_set)
+		return -ENOMEM;
+	ret = nvm_register_tgt_type(&tt_pblk);
+	if (ret)
+		bioset_free(pblk_bio_set);
+	return ret;
 }
 
 static void pblk_module_exit(void)
 {
+	bioset_free(pblk_bio_set);
 	nvm_unregister_tgt_type(&tt_pblk);
 }
 
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 17c1695..fddb924 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -25,9 +25,9 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
 			       unsigned int valid_secs)
 {
 	struct pblk_line *line = pblk_line_get_data(pblk);
-	struct line_emeta *emeta = line->emeta;
+	struct pblk_emeta *emeta = line->emeta;
 	struct pblk_w_ctx *w_ctx;
-	__le64 *lba_list = pblk_line_emeta_to_lbas(emeta);
+	__le64 *lba_list = emeta_to_lbas(pblk, emeta->buf);
 	u64 paddr;
 	int nr_secs = pblk->min_write_pgs;
 	int i;
@@ -51,18 +51,20 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
 			w_ctx->ppa = ppa_list[i];
 			meta_list[i].lba = cpu_to_le64(w_ctx->lba);
 			lba_list[paddr] = cpu_to_le64(w_ctx->lba);
-			le64_add_cpu(&line->emeta->nr_valid_lbas, 1);
+			line->nr_valid_lbas++;
 		} else {
-			meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
-			lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
-			pblk_map_pad_invalidate(pblk, line, paddr);
+			__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
+
+			lba_list[paddr] = meta_list[i].lba = addr_empty;
+			__pblk_map_invalidate(pblk, line, paddr);
 		}
 	}
 
 	if (pblk_line_is_full(line)) {
-		line = pblk_line_replace_data(pblk);
-		if (!line)
-			return;
+		struct pblk_line *prev_line = line;
+
+		pblk_line_replace_data(pblk);
+		pblk_line_close_meta(pblk, prev_line);
 	}
 
 	pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
@@ -91,8 +93,9 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
-	struct pblk_line *e_line = pblk_line_get_data_next(pblk);
+	struct pblk_line_meta *lm = &pblk->lm;
 	struct pblk_sec_meta *meta_list = rqd->meta_list;
+	struct pblk_line *e_line, *d_line;
 	unsigned int map_secs;
 	int min = pblk->min_write_pgs;
 	int i, erase_lun;
@@ -102,35 +105,63 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
 		pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
 					lun_bitmap, &meta_list[i], map_secs);
 
-		erase_lun = rqd->ppa_list[i].g.lun * geo->nr_chnls +
-							rqd->ppa_list[i].g.ch;
+		erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);
 
+		/* line can change after page map. We might also be writing the
+		 * last line.
+		 */
+		e_line = pblk_line_get_erase(pblk);
+		if (!e_line)
+			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
+							valid_secs, i + min);
+
+		spin_lock(&e_line->lock);
 		if (!test_bit(erase_lun, e_line->erase_bitmap)) {
-			if (down_trylock(&pblk->erase_sem))
-				continue;
-
 			set_bit(erase_lun, e_line->erase_bitmap);
 			atomic_dec(&e_line->left_eblks);
+
 			*erase_ppa = rqd->ppa_list[i];
 			erase_ppa->g.blk = e_line->id;
 
+			spin_unlock(&e_line->lock);
+
 			/* Avoid evaluating e_line->left_eblks */
 			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
 							valid_secs, i + min);
 		}
+		spin_unlock(&e_line->lock);
 	}
 
-	/* Erase blocks that are bad in this line but might not be in next */
-	if (unlikely(ppa_empty(*erase_ppa))) {
-		struct pblk_line_meta *lm = &pblk->lm;
+	d_line = pblk_line_get_data(pblk);
 
-		i = find_first_zero_bit(e_line->erase_bitmap, lm->blk_per_line);
-		if (i == lm->blk_per_line)
+	/* line can change after page map. We might also be writing the
+	 * last line.
+	 */
+	e_line = pblk_line_get_erase(pblk);
+	if (!e_line)
+		return;
+
+	/* Erase blocks that are bad in this line but might not be in next */
+	if (unlikely(ppa_empty(*erase_ppa)) &&
+			bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
+		int bit = -1;
+
+retry:
+		bit = find_next_bit(d_line->blk_bitmap,
+						lm->blk_per_line, bit + 1);
+		if (bit >= lm->blk_per_line)
 			return;
 
-		set_bit(i, e_line->erase_bitmap);
+		spin_lock(&e_line->lock);
+		if (test_bit(bit, e_line->erase_bitmap)) {
+			spin_unlock(&e_line->lock);
+			goto retry;
+		}
+		spin_unlock(&e_line->lock);
+
+		set_bit(bit, e_line->erase_bitmap);
 		atomic_dec(&e_line->left_eblks);
-		*erase_ppa = pblk->luns[i].bppa; /* set ch and lun */
+		*erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
 		erase_ppa->g.blk = e_line->id;
 	}
 }
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 045384d..5ecc154 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -150,6 +150,7 @@ static void clean_wctx(struct pblk_w_ctx *w_ctx)
 	/* Release flags on context. Protect from writes and reads */
 	smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
 	pblk_ppa_set_empty(&w_ctx->ppa);
+	w_ctx->lba = ADDR_EMPTY;
 }
 
 #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
@@ -180,6 +181,14 @@ unsigned int pblk_rb_read_count(struct pblk_rb *rb)
 	return pblk_rb_ring_count(mem, subm, rb->nr_entries);
 }
 
+unsigned int pblk_rb_sync_count(struct pblk_rb *rb)
+{
+	unsigned int mem = READ_ONCE(rb->mem);
+	unsigned int sync = READ_ONCE(rb->sync);
+
+	return pblk_rb_ring_count(mem, sync, rb->nr_entries);
+}
+
 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
 {
 	unsigned int subm;
@@ -199,12 +208,22 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
 	struct pblk_line *line;
 	struct pblk_rb_entry *entry;
 	struct pblk_w_ctx *w_ctx;
+	unsigned int user_io = 0, gc_io = 0;
 	unsigned int i;
+	int flags;
 
 	for (i = 0; i < to_update; i++) {
 		entry = &rb->entries[*l2p_upd];
 		w_ctx = &entry->w_ctx;
 
+		flags = READ_ONCE(entry->w_ctx.flags);
+		if (flags & PBLK_IOTYPE_USER)
+			user_io++;
+		else if (flags & PBLK_IOTYPE_GC)
+			gc_io++;
+		else
+			WARN(1, "pblk: unknown IO type\n");
+
 		pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
 							entry->cacheline);
 
@@ -214,6 +233,8 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
 		*l2p_upd = (*l2p_upd + 1) & (rb->nr_entries - 1);
 	}
 
+	pblk_rl_out(&pblk->rl, user_io, gc_io);
+
 	return 0;
 }
 
@@ -357,6 +378,9 @@ static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
 	/* Protect syncs */
 	smp_store_release(&rb->sync_point, sync_point);
 
+	if (!bio)
+		return 0;
+
 	spin_lock_irq(&rb->s_lock);
 	bio_list_add(&entry->w_ctx.bios, bio);
 	spin_unlock_irq(&rb->s_lock);
@@ -395,6 +419,17 @@ static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
 	return 1;
 }
 
+void pblk_rb_flush(struct pblk_rb *rb)
+{
+	struct pblk *pblk = container_of(rb, struct pblk, rwb);
+	unsigned int mem = READ_ONCE(rb->mem);
+
+	if (pblk_rb_sync_point_set(rb, NULL, mem))
+		return;
+
+	pblk_write_should_kick(pblk);
+}
+
 static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
 				   unsigned int *pos, struct bio *bio,
 				   int *io_ret)
@@ -431,15 +466,16 @@ int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
 			   unsigned int nr_entries, unsigned int *pos)
 {
 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
-	int flush_done;
+	int io_ret;
 
 	spin_lock(&rb->w_lock);
-	if (!pblk_rl_user_may_insert(&pblk->rl, nr_entries)) {
+	io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
+	if (io_ret) {
 		spin_unlock(&rb->w_lock);
-		return NVM_IO_REQUEUE;
+		return io_ret;
 	}
 
-	if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &flush_done)) {
+	if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
 		spin_unlock(&rb->w_lock);
 		return NVM_IO_REQUEUE;
 	}
@@ -447,7 +483,7 @@ int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
 	pblk_rl_user_in(&pblk->rl, nr_entries);
 	spin_unlock(&rb->w_lock);
 
-	return flush_done;
+	return io_ret;
 }
 
 /*
@@ -521,20 +557,18 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
  * This function is used by the write thread to form the write bio that will
  * persist data on the write buffer to the media.
  */
-unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct bio *bio,
-				 struct pblk_c_ctx *c_ctx,
-				 unsigned int pos,
-				 unsigned int nr_entries,
-				 unsigned int count)
+unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
+				 struct bio *bio, unsigned int pos,
+				 unsigned int nr_entries, unsigned int count)
 {
 	struct pblk *pblk = container_of(rb, struct pblk, rwb);
+	struct request_queue *q = pblk->dev->q;
+	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
 	struct pblk_rb_entry *entry;
 	struct page *page;
-	unsigned int pad = 0, read = 0, to_read = nr_entries;
-	unsigned int user_io = 0, gc_io = 0;
+	unsigned int pad = 0, to_read = nr_entries;
 	unsigned int i;
 	int flags;
-	int ret;
 
 	if (count < nr_entries) {
 		pad = nr_entries - count;
@@ -553,15 +587,10 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct bio *bio,
 		 */
 try:
 		flags = READ_ONCE(entry->w_ctx.flags);
-		if (!(flags & PBLK_WRITTEN_DATA))
+		if (!(flags & PBLK_WRITTEN_DATA)) {
+			io_schedule();
 			goto try;
-
-		if (flags & PBLK_IOTYPE_USER)
-			user_io++;
-		else if (flags & PBLK_IOTYPE_GC)
-			gc_io++;
-		else
-			WARN(1, "pblk: unknown IO type\n");
+		}
 
 		page = virt_to_page(entry->data);
 		if (!page) {
@@ -570,17 +599,17 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct bio *bio,
 			flags |= PBLK_SUBMITTED_ENTRY;
 			/* Release flags on context. Protect from writes */
 			smp_store_release(&entry->w_ctx.flags, flags);
-			goto out;
+			return NVM_IO_ERR;
 		}
 
-		ret = bio_add_page(bio, page, rb->seg_size, 0);
-		if (ret != rb->seg_size) {
+		if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
+								rb->seg_size) {
 			pr_err("pblk: could not add page to write bio\n");
 			flags &= ~PBLK_WRITTEN_DATA;
 			flags |= PBLK_SUBMITTED_ENTRY;
 			/* Release flags on context. Protect from writes */
 			smp_store_release(&entry->w_ctx.flags, flags);
-			goto out;
+			return NVM_IO_ERR;
 		}
 
 		if (flags & PBLK_FLUSH_ENTRY) {
@@ -607,14 +636,19 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct bio *bio,
 		pos = (pos + 1) & (rb->nr_entries - 1);
 	}
 
-	read = to_read;
-	pblk_rl_out(&pblk->rl, user_io, gc_io);
+	if (pad) {
+		if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
+			pr_err("pblk: could not pad page in write bio\n");
+			return NVM_IO_ERR;
+		}
+	}
+
 #ifdef CONFIG_NVM_DEBUG
 	atomic_long_add(pad, &((struct pblk *)
 			(container_of(rb, struct pblk, rwb)))->padded_writes);
 #endif
-out:
-	return read;
+
+	return NVM_IO_OK;
 }
 
 /*
@@ -623,15 +657,17 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct bio *bio,
  * be directed to disk.
  */
 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
-			u64 pos, int bio_iter)
+			struct ppa_addr ppa, int bio_iter)
 {
+	struct pblk *pblk = container_of(rb, struct pblk, rwb);
 	struct pblk_rb_entry *entry;
 	struct pblk_w_ctx *w_ctx;
+	struct ppa_addr l2p_ppa;
+	u64 pos = pblk_addr_to_cacheline(ppa);
 	void *data;
 	int flags;
 	int ret = 1;
 
-	spin_lock(&rb->w_lock);
 
 #ifdef CONFIG_NVM_DEBUG
 	/* Caller must ensure that the access will not cause an overflow */
@@ -641,8 +677,14 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
 	w_ctx = &entry->w_ctx;
 	flags = READ_ONCE(w_ctx->flags);
 
+	spin_lock(&rb->w_lock);
+	spin_lock(&pblk->trans_lock);
+	l2p_ppa = pblk_trans_map_get(pblk, lba);
+	spin_unlock(&pblk->trans_lock);
+
 	/* Check if the entry has been overwritten or is scheduled to be */
-	if (w_ctx->lba != lba || flags & PBLK_WRITABLE_ENTRY) {
+	if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba ||
+						flags & PBLK_WRITABLE_ENTRY) {
 		ret = 0;
 		goto out;
 	}
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 4a12f14..4e5c48f 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -34,8 +34,7 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
 	BUG_ON(!pblk_addr_in_cache(ppa));
 #endif
 
-	return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba,
-					pblk_addr_to_cacheline(ppa), bio_iter);
+	return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter);
 }
 
 static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -76,6 +75,9 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
 			}
 			WARN_ON(test_and_set_bit(i, read_bitmap));
 			advanced_bio = 1;
+#ifdef CONFIG_NVM_DEBUG
+			atomic_long_inc(&pblk->cache_reads);
+#endif
 		} else {
 			/* Read from media non-cached sectors */
 			rqd->ppa_list[j++] = p;
@@ -85,6 +87,11 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
 			bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
 	}
 
+	if (pblk_io_aligned(pblk, nr_secs))
+		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
+	else
+		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
 #ifdef CONFIG_NVM_DEBUG
 	atomic_long_add(nr_secs, &pblk->inflight_reads);
 #endif
@@ -94,8 +101,6 @@ static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
 {
 	int err;
 
-	rqd->flags = pblk_set_read_mode(pblk);
-
 	err = pblk_submit_io(pblk, rqd);
 	if (err)
 		return NVM_IO_ERR;
@@ -107,27 +112,27 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
 {
 	struct pblk *pblk = rqd->private;
 	struct nvm_tgt_dev *dev = pblk->dev;
-	struct pblk_r_ctx *r_ctx = nvm_rq_to_pdu(rqd);
+	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
 	struct bio *bio = rqd->bio;
 
 	if (rqd->error)
 		pblk_log_read_err(pblk, rqd);
 #ifdef CONFIG_NVM_DEBUG
 	else
-		WARN_ONCE(bio->bi_error, "pblk: corrupted read error\n");
+		WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
 #endif
 
-	if (rqd->nr_ppas > 1)
-		nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
+	nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
 
 	bio_put(bio);
-	if (r_ctx->orig_bio) {
+	if (r_ctx->private) {
+		struct bio *orig_bio = r_ctx->private;
+
 #ifdef CONFIG_NVM_DEBUG
-		WARN_ONCE(r_ctx->orig_bio->bi_error,
-						"pblk: corrupted read bio\n");
+		WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
 #endif
-		bio_endio(r_ctx->orig_bio);
-		bio_put(r_ctx->orig_bio);
+		bio_endio(orig_bio);
+		bio_put(orig_bio);
 	}
 
 #ifdef CONFIG_NVM_DEBUG
@@ -136,6 +141,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
 #endif
 
 	pblk_free_rqd(pblk, rqd, READ);
+	atomic_dec(&pblk->inflight_io);
 }
 
 static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
@@ -173,6 +179,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
 
 	rqd->bio = new_bio;
 	rqd->nr_ppas = nr_holes;
+	rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
 	rqd->end_io = NULL;
 
 	if (unlikely(nr_secs > 1 && nr_holes == 1)) {
@@ -280,9 +287,14 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
 			goto retry;
 		}
 		WARN_ON(test_and_set_bit(0, read_bitmap));
+#ifdef CONFIG_NVM_DEBUG
+			atomic_long_inc(&pblk->cache_reads);
+#endif
 	} else {
 		rqd->ppa_addr = ppa;
 	}
+
+	rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
 }
 
 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
@@ -316,13 +328,16 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
 	 */
 	bio_init_idx = pblk_get_bi_idx(bio);
 
+	rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+							&rqd->dma_meta_list);
+	if (!rqd->meta_list) {
+		pr_err("pblk: not able to allocate ppa list\n");
+		goto fail_rqd_free;
+	}
+
 	if (nr_secs > 1) {
-		rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-						&rqd->dma_ppa_list);
-		if (!rqd->ppa_list) {
-			pr_err("pblk: not able to allocate ppa list\n");
-			goto fail_rqd_free;
-		}
+		rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
+		rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
 
 		pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
 	} else {
@@ -332,6 +347,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
 	bio_get(bio);
 	if (bitmap_full(&read_bitmap, nr_secs)) {
 		bio_endio(bio);
+		atomic_inc(&pblk->inflight_io);
 		pblk_end_io_read(rqd);
 		return NVM_IO_OK;
 	}
@@ -339,17 +355,17 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
 	/* All sectors are to be read from the device */
 	if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
 		struct bio *int_bio = NULL;
-		struct pblk_r_ctx *r_ctx = nvm_rq_to_pdu(rqd);
+		struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
 
 		/* Clone read bio to deal with read errors internally */
-		int_bio = bio_clone_bioset(bio, GFP_KERNEL, fs_bio_set);
+		int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
 		if (!int_bio) {
 			pr_err("pblk: could not clone read bio\n");
 			return NVM_IO_ERR;
 		}
 
 		rqd->bio = int_bio;
-		r_ctx->orig_bio = bio;
+		r_ctx->private = bio;
 
 		ret = pblk_submit_read_io(pblk, rqd);
 		if (ret) {
@@ -445,7 +461,6 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
-	struct request_queue *q = dev->q;
 	struct bio *bio;
 	struct nvm_rq rqd;
 	int ret, data_len;
@@ -453,22 +468,19 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
 
 	memset(&rqd, 0, sizeof(struct nvm_rq));
 
+	rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+							&rqd.dma_meta_list);
+	if (!rqd.meta_list)
+		return NVM_IO_ERR;
+
 	if (nr_secs > 1) {
-		rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
-							&rqd.dma_ppa_list);
-		if (!rqd.ppa_list)
-			return NVM_IO_ERR;
+		rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
+		rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
 
 		*secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
 								nr_secs);
-		if (*secs_to_gc == 1) {
-			struct ppa_addr ppa;
-
-			ppa = rqd.ppa_list[0];
-			nvm_dev_dma_free(dev->parent, rqd.ppa_list,
-							rqd.dma_ppa_list);
-			rqd.ppa_addr = ppa;
-		}
+		if (*secs_to_gc == 1)
+			rqd.ppa_addr = rqd.ppa_list[0];
 	} else {
 		*secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
 	}
@@ -477,7 +489,8 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
 		goto out;
 
 	data_len = (*secs_to_gc) * geo->sec_size;
-	bio = bio_map_kern(q, data, data_len, GFP_KERNEL);
+	bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len,
+						PBLK_KMALLOC_META, GFP_KERNEL);
 	if (IS_ERR(bio)) {
 		pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
 		goto err_free_dma;
@@ -490,6 +503,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
 	rqd.end_io = pblk_end_io_sync;
 	rqd.private = &wait;
 	rqd.nr_ppas = *secs_to_gc;
+	rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
 	rqd.bio = bio;
 
 	ret = pblk_submit_read_io(pblk, &rqd);
@@ -503,6 +517,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
 				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
 		pr_err("pblk: GC read I/O timed out\n");
 	}
+	atomic_dec(&pblk->inflight_io);
 
 	if (rqd.error) {
 		atomic_long_inc(&pblk->read_failed_gc);
@@ -518,12 +533,10 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
 #endif
 
 out:
-	if (rqd.nr_ppas > 1)
-		nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
+	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 	return NVM_IO_OK;
 
 err_free_dma:
-	if (rqd.nr_ppas > 1)
-		nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
+	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 	return NVM_IO_ERR;
 }
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index f8f8508..0e48d3e 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -120,18 +120,18 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
 	return 0;
 }
 
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta)
+__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta_buf)
 {
 	u32 crc;
 
-	crc = pblk_calc_emeta_crc(pblk, emeta);
-	if (le32_to_cpu(emeta->crc) != crc)
+	crc = pblk_calc_emeta_crc(pblk, emeta_buf);
+	if (le32_to_cpu(emeta_buf->crc) != crc)
 		return NULL;
 
-	if (le32_to_cpu(emeta->header.identifier) != PBLK_MAGIC)
+	if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
 		return NULL;
 
-	return pblk_line_emeta_to_lbas(emeta);
+	return emeta_to_lbas(pblk, emeta_buf);
 }
 
 static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
@@ -139,19 +139,20 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
 	struct pblk_line_meta *lm = &pblk->lm;
-	struct line_emeta *emeta = line->emeta;
+	struct pblk_emeta *emeta = line->emeta;
+	struct line_emeta *emeta_buf = emeta->buf;
 	__le64 *lba_list;
 	int data_start;
 	int nr_data_lbas, nr_valid_lbas, nr_lbas = 0;
 	int i;
 
-	lba_list = pblk_recov_get_lba_list(pblk, emeta);
+	lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
 	if (!lba_list)
 		return 1;
 
 	data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
-	nr_data_lbas = lm->sec_per_line - lm->emeta_sec;
-	nr_valid_lbas = le64_to_cpu(emeta->nr_valid_lbas);
+	nr_data_lbas = lm->sec_per_line - lm->emeta_sec[0];
+	nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
 
 	for (i = data_start; i < nr_data_lbas && nr_lbas < nr_valid_lbas; i++) {
 		struct ppa_addr ppa;
@@ -169,7 +170,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
 			if (test_and_set_bit(i, line->invalid_bitmap))
 				WARN_ONCE(1, "pblk: rec. double invalidate:\n");
 			else
-				line->vsc--;
+				le32_add_cpu(line->vsc, -1);
 			spin_unlock(&line->lock);
 
 			continue;
@@ -181,7 +182,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
 
 	if (nr_valid_lbas != nr_lbas)
 		pr_err("pblk: line %d - inconsistent lba list(%llu/%d)\n",
-				line->id, line->emeta->nr_valid_lbas, nr_lbas);
+				line->id, emeta_buf->nr_valid_lbas, nr_lbas);
 
 	line->left_msecs = 0;
 
@@ -195,7 +196,7 @@ static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
 	struct pblk_line_meta *lm = &pblk->lm;
 	int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
 
-	return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec -
+	return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
 				nr_bb * geo->sec_per_blk;
 }
 
@@ -240,7 +241,7 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
 	r_ptr_int = r_ptr;
 
 next_read_rq:
-	memset(rqd, 0, pblk_r_rq_size);
+	memset(rqd, 0, pblk_g_rq_size);
 
 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
 	if (!rq_ppas)
@@ -256,7 +257,6 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
 
 	rqd->bio = bio;
 	rqd->opcode = NVM_OP_PREAD;
-	rqd->flags = pblk_set_read_mode(pblk);
 	rqd->meta_list = meta_list;
 	rqd->nr_ppas = rq_ppas;
 	rqd->ppa_list = ppa_list;
@@ -265,6 +265,11 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
 	rqd->end_io = pblk_end_io_sync;
 	rqd->private = &wait;
 
+	if (pblk_io_aligned(pblk, rq_ppas))
+		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
+	else
+		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
 	for (i = 0; i < rqd->nr_ppas; ) {
 		struct ppa_addr ppa;
 		int pos;
@@ -295,7 +300,7 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
 		pr_err("pblk: L2P recovery read timed out\n");
 		return -EINTR;
 	}
-
+	atomic_dec(&pblk->inflight_io);
 	reinit_completion(&wait);
 
 	/* At this point, the read should not fail. If it does, it is a problem
@@ -322,47 +327,94 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
 	return 0;
 }
 
+static void pblk_recov_complete(struct kref *ref)
+{
+	struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
+
+	complete(&pad_rq->wait);
+}
+
+static void pblk_end_io_recov(struct nvm_rq *rqd)
+{
+	struct pblk_pad_rq *pad_rq = rqd->private;
+	struct pblk *pblk = pad_rq->pblk;
+	struct nvm_tgt_dev *dev = pblk->dev;
+
+	kref_put(&pad_rq->ref, pblk_recov_complete);
+	nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
+	pblk_free_rqd(pblk, rqd, WRITE);
+}
+
 static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
-			      struct pblk_recov_alloc p, int left_ppas)
+			      int left_ppas)
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
 	struct ppa_addr *ppa_list;
 	struct pblk_sec_meta *meta_list;
+	struct pblk_pad_rq *pad_rq;
 	struct nvm_rq *rqd;
 	struct bio *bio;
 	void *data;
 	dma_addr_t dma_ppa_list, dma_meta_list;
-	__le64 *lba_list = pblk_line_emeta_to_lbas(line->emeta);
+	__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
 	u64 w_ptr = line->cur_sec;
-	int left_line_ppas = line->left_msecs;
-	int rq_ppas, rq_len;
+	int left_line_ppas, rq_ppas, rq_len;
 	int i, j;
 	int ret = 0;
-	DECLARE_COMPLETION_ONSTACK(wait);
 
-	ppa_list = p.ppa_list;
-	meta_list = p.meta_list;
-	rqd = p.rqd;
-	data = p.data;
-	dma_ppa_list = p.dma_ppa_list;
-	dma_meta_list = p.dma_meta_list;
+	spin_lock(&line->lock);
+	left_line_ppas = line->left_msecs;
+	spin_unlock(&line->lock);
+
+	pad_rq = kmalloc(sizeof(struct pblk_pad_rq), GFP_KERNEL);
+	if (!pad_rq)
+		return -ENOMEM;
+
+	data = vzalloc(pblk->max_write_pgs * geo->sec_size);
+	if (!data) {
+		ret = -ENOMEM;
+		goto free_rq;
+	}
+
+	pad_rq->pblk = pblk;
+	init_completion(&pad_rq->wait);
+	kref_init(&pad_rq->ref);
 
 next_pad_rq:
 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
-	if (!rq_ppas)
-		rq_ppas = pblk->min_write_pgs;
+	if (rq_ppas < pblk->min_write_pgs) {
+		pr_err("pblk: corrupted pad line %d\n", line->id);
+		goto free_rq;
+	}
+
 	rq_len = rq_ppas * geo->sec_size;
 
+	meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
+	if (!meta_list) {
+		ret = -ENOMEM;
+		goto free_data;
+	}
+
+	ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
+	dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+
+	rqd = pblk_alloc_rqd(pblk, WRITE);
+	if (IS_ERR(rqd)) {
+		ret = PTR_ERR(rqd);
+		goto fail_free_meta;
+	}
+	memset(rqd, 0, pblk_w_rq_size);
+
 	bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
-	if (IS_ERR(bio))
-		return PTR_ERR(bio);
+	if (IS_ERR(bio)) {
+		ret = PTR_ERR(bio);
+		goto fail_free_rqd;
+	}
 
 	bio->bi_iter.bi_sector = 0; /* internal bio */
 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
-	memset(rqd, 0, pblk_r_rq_size);
-
 	rqd->bio = bio;
 	rqd->opcode = NVM_OP_PWRITE;
 	rqd->flags = pblk_set_progr_mode(pblk, WRITE);
@@ -371,8 +423,8 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
 	rqd->ppa_list = ppa_list;
 	rqd->dma_ppa_list = dma_ppa_list;
 	rqd->dma_meta_list = dma_meta_list;
-	rqd->end_io = pblk_end_io_sync;
-	rqd->private = &wait;
+	rqd->end_io = pblk_end_io_recov;
+	rqd->private = pad_rq;
 
 	for (i = 0; i < rqd->nr_ppas; ) {
 		struct ppa_addr ppa;
@@ -390,34 +442,51 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
 
 		for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
 			struct ppa_addr dev_ppa;
+			__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 
 			dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
 
 			pblk_map_invalidate(pblk, dev_ppa);
-			meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
-			lba_list[w_ptr] = cpu_to_le64(ADDR_EMPTY);
+			lba_list[w_ptr] = meta_list[i].lba = addr_empty;
 			rqd->ppa_list[i] = dev_ppa;
 		}
 	}
 
+	kref_get(&pad_rq->ref);
+
 	ret = pblk_submit_io(pblk, rqd);
 	if (ret) {
 		pr_err("pblk: I/O submission failed: %d\n", ret);
-		return ret;
+		goto free_data;
 	}
 
-	if (!wait_for_completion_io_timeout(&wait,
-				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
-		pr_err("pblk: L2P recovery write timed out\n");
-	}
-	reinit_completion(&wait);
+	atomic_dec(&pblk->inflight_io);
 
 	left_line_ppas -= rq_ppas;
 	left_ppas -= rq_ppas;
-	if (left_ppas > 0 && left_line_ppas)
+	if (left_ppas && left_line_ppas)
 		goto next_pad_rq;
 
-	return 0;
+	kref_put(&pad_rq->ref, pblk_recov_complete);
+
+	if (!wait_for_completion_io_timeout(&pad_rq->wait,
+				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
+		pr_err("pblk: pad write timed out\n");
+		ret = -ETIME;
+	}
+
+free_rq:
+	kfree(pad_rq);
+free_data:
+	vfree(data);
+	return ret;
+
+fail_free_rqd:
+	pblk_free_rqd(pblk, rqd, WRITE);
+fail_free_meta:
+	nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
+	kfree(pad_rq);
+	return ret;
 }
 
 /* When this function is called, it means that not all upper pages have been
@@ -456,7 +525,7 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
 	rec_round = 0;
 
 next_rq:
-	memset(rqd, 0, pblk_r_rq_size);
+	memset(rqd, 0, pblk_g_rq_size);
 
 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
 	if (!rq_ppas)
@@ -472,7 +541,6 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
 
 	rqd->bio = bio;
 	rqd->opcode = NVM_OP_PREAD;
-	rqd->flags = pblk_set_read_mode(pblk);
 	rqd->meta_list = meta_list;
 	rqd->nr_ppas = rq_ppas;
 	rqd->ppa_list = ppa_list;
@@ -481,6 +549,11 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
 	rqd->end_io = pblk_end_io_sync;
 	rqd->private = &wait;
 
+	if (pblk_io_aligned(pblk, rq_ppas))
+		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
+	else
+		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
 	for (i = 0; i < rqd->nr_ppas; ) {
 		struct ppa_addr ppa;
 		int pos;
@@ -510,6 +583,7 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
 				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
 		pr_err("pblk: L2P recovery read timed out\n");
 	}
+	atomic_dec(&pblk->inflight_io);
 	reinit_completion(&wait);
 
 	/* This should not happen since the read failed during normal recovery,
@@ -544,7 +618,7 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
 		if (pad_secs > line->left_msecs)
 			pad_secs = line->left_msecs;
 
-		ret = pblk_recov_pad_oob(pblk, line, p, pad_secs);
+		ret = pblk_recov_pad_oob(pblk, line, pad_secs);
 		if (ret)
 			pr_err("pblk: OOB padding failed (err:%d)\n", ret);
 
@@ -552,7 +626,6 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
 		if (ret)
 			pr_err("pblk: OOB read failed (err:%d)\n", ret);
 
-		line->left_ssecs = line->left_msecs;
 		left_ppas = 0;
 	}
 
@@ -591,7 +664,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
 	*done = 1;
 
 next_rq:
-	memset(rqd, 0, pblk_r_rq_size);
+	memset(rqd, 0, pblk_g_rq_size);
 
 	rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
 	if (!rq_ppas)
@@ -607,7 +680,6 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
 
 	rqd->bio = bio;
 	rqd->opcode = NVM_OP_PREAD;
-	rqd->flags = pblk_set_read_mode(pblk);
 	rqd->meta_list = meta_list;
 	rqd->nr_ppas = rq_ppas;
 	rqd->ppa_list = ppa_list;
@@ -616,6 +688,11 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
 	rqd->end_io = pblk_end_io_sync;
 	rqd->private = &wait;
 
+	if (pblk_io_aligned(pblk, rq_ppas))
+		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
+	else
+		rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
 	for (i = 0; i < rqd->nr_ppas; ) {
 		struct ppa_addr ppa;
 		int pos;
@@ -646,6 +723,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
 				msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
 		pr_err("pblk: L2P recovery read timed out\n");
 	}
+	atomic_dec(&pblk->inflight_io);
 	reinit_completion(&wait);
 
 	/* Reached the end of the written line */
@@ -658,7 +736,6 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
 		/* Roll back failed sectors */
 		line->cur_sec -= nr_error_bits;
 		line->left_msecs += nr_error_bits;
-		line->left_ssecs = line->left_msecs;
 		bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
 
 		left_ppas = 0;
@@ -770,8 +847,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
 	struct pblk_line_meta *lm = &pblk->lm;
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_line *line, *tline, *data_line = NULL;
-	struct line_smeta *smeta;
-	struct line_emeta *emeta;
+	struct pblk_smeta *smeta;
+	struct pblk_emeta *emeta;
+	struct line_smeta *smeta_buf;
 	int found_lines = 0, recovered_lines = 0, open_lines = 0;
 	int is_next = 0;
 	int meta_line;
@@ -784,8 +862,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
 	spin_lock(&l_mg->free_lock);
 	meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
 	set_bit(meta_line, &l_mg->meta_bitmap);
-	smeta = l_mg->sline_meta[meta_line].meta;
-	emeta = l_mg->eline_meta[meta_line].meta;
+	smeta = l_mg->sline_meta[meta_line];
+	emeta = l_mg->eline_meta[meta_line];
+	smeta_buf = (struct line_smeta *)smeta;
 	spin_unlock(&l_mg->free_lock);
 
 	/* Order data lines using their sequence number */
@@ -796,33 +875,33 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
 
 		memset(smeta, 0, lm->smeta_len);
 		line->smeta = smeta;
-		line->lun_bitmap = ((void *)(smeta)) +
+		line->lun_bitmap = ((void *)(smeta_buf)) +
 						sizeof(struct line_smeta);
 
 		/* Lines that cannot be read are assumed as not written here */
 		if (pblk_line_read_smeta(pblk, line))
 			continue;
 
-		crc = pblk_calc_smeta_crc(pblk, smeta);
-		if (le32_to_cpu(smeta->crc) != crc)
+		crc = pblk_calc_smeta_crc(pblk, smeta_buf);
+		if (le32_to_cpu(smeta_buf->crc) != crc)
 			continue;
 
-		if (le32_to_cpu(smeta->header.identifier) != PBLK_MAGIC)
+		if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
 			continue;
 
-		if (le16_to_cpu(smeta->header.version) != 1) {
+		if (le16_to_cpu(smeta_buf->header.version) != 1) {
 			pr_err("pblk: found incompatible line version %u\n",
-					smeta->header.version);
+					smeta_buf->header.version);
 			return ERR_PTR(-EINVAL);
 		}
 
 		/* The first valid instance uuid is used for initialization */
 		if (!valid_uuid) {
-			memcpy(pblk->instance_uuid, smeta->header.uuid, 16);
+			memcpy(pblk->instance_uuid, smeta_buf->header.uuid, 16);
 			valid_uuid = 1;
 		}
 
-		if (memcmp(pblk->instance_uuid, smeta->header.uuid, 16)) {
+		if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
 			pr_debug("pblk: ignore line %u due to uuid mismatch\n",
 					i);
 			continue;
@@ -830,9 +909,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
 
 		/* Update line metadata */
 		spin_lock(&line->lock);
-		line->id = le32_to_cpu(line->smeta->header.id);
-		line->type = le16_to_cpu(line->smeta->header.type);
-		line->seq_nr = le64_to_cpu(line->smeta->seq_nr);
+		line->id = le32_to_cpu(smeta_buf->header.id);
+		line->type = le16_to_cpu(smeta_buf->header.type);
+		line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
 		spin_unlock(&line->lock);
 
 		/* Update general metadata */
@@ -848,7 +927,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
 		pblk_recov_line_add_ordered(&recov_list, line);
 		found_lines++;
 		pr_debug("pblk: recovering data line %d, seq:%llu\n",
-						line->id, smeta->seq_nr);
+						line->id, smeta_buf->seq_nr);
 	}
 
 	if (!found_lines) {
@@ -868,15 +947,15 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
 
 		recovered_lines++;
 		/* Calculate where emeta starts based on the line bb */
-		off = lm->sec_per_line - lm->emeta_sec;
+		off = lm->sec_per_line - lm->emeta_sec[0];
 		nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
 		off -= nr_bb * geo->sec_per_pl;
 
-		memset(emeta, 0, lm->emeta_len);
-		line->emeta = emeta;
 		line->emeta_ssec = off;
+		line->emeta = emeta;
+		memset(line->emeta->buf, 0, lm->emeta_len[0]);
 
-		if (pblk_line_read_emeta(pblk, line)) {
+		if (pblk_line_read_emeta(pblk, line, line->emeta->buf)) {
 			pblk_recov_l2p_from_oob(pblk, line);
 			goto next;
 		}
@@ -941,58 +1020,26 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
 }
 
 /*
- * Pad until smeta can be read on current data line
+ * Pad current line
  */
-void pblk_recov_pad(struct pblk *pblk)
+int pblk_recov_pad(struct pblk *pblk)
 {
-	struct nvm_tgt_dev *dev = pblk->dev;
-	struct nvm_geo *geo = &dev->geo;
 	struct pblk_line *line;
 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
-	struct nvm_rq *rqd;
-	struct pblk_recov_alloc p;
-	struct ppa_addr *ppa_list;
-	struct pblk_sec_meta *meta_list;
-	void *data;
-	dma_addr_t dma_ppa_list, dma_meta_list;
+	int left_msecs;
+	int ret = 0;
 
 	spin_lock(&l_mg->free_lock);
 	line = l_mg->data_line;
+	left_msecs = line->left_msecs;
 	spin_unlock(&l_mg->free_lock);
 
-	rqd = pblk_alloc_rqd(pblk, READ);
-	if (IS_ERR(rqd))
-		return;
-
-	meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
-	if (!meta_list)
-		goto free_rqd;
-
-	ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
-	dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
-
-	data = kcalloc(pblk->max_write_pgs, geo->sec_size, GFP_KERNEL);
-	if (!data)
-		goto free_meta_list;
-
-	p.ppa_list = ppa_list;
-	p.meta_list = meta_list;
-	p.rqd = rqd;
-	p.data = data;
-	p.dma_ppa_list = dma_ppa_list;
-	p.dma_meta_list = dma_meta_list;
-
-	if (pblk_recov_pad_oob(pblk, line, p, line->left_msecs)) {
-		pr_err("pblk: Tear down padding failed\n");
-		goto free_data;
+	ret = pblk_recov_pad_oob(pblk, line, left_msecs);
+	if (ret) {
+		pr_err("pblk: Tear down padding failed (%d)\n", ret);
+		return ret;
 	}
 
-	pblk_line_close(pblk, line);
-
-free_data:
-	kfree(data);
-free_meta_list:
-	nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
-free_rqd:
-	pblk_free_rqd(pblk, rqd, READ);
+	pblk_line_close_meta(pblk, line);
+	return ret;
 }
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index ab7cbb1..2e6a536 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -23,11 +23,35 @@ static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
 	mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
 }
 
+int pblk_rl_is_limit(struct pblk_rl *rl)
+{
+	int rb_space;
+
+	rb_space = atomic_read(&rl->rb_space);
+
+	return (rb_space == 0);
+}
+
 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
 {
 	int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
+	int rb_space = atomic_read(&rl->rb_space);
 
-	return (!(rb_user_cnt + nr_entries > rl->rb_user_max));
+	if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
+		return NVM_IO_ERR;
+
+	if (rb_user_cnt >= rl->rb_user_max)
+		return NVM_IO_REQUEUE;
+
+	return NVM_IO_OK;
+}
+
+void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
+{
+	int rb_space = atomic_read(&rl->rb_space);
+
+	if (unlikely(rb_space >= 0))
+		atomic_sub(nr_entries, &rl->rb_space);
 }
 
 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
@@ -37,7 +61,7 @@ int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
 
 	/* If there is no user I/O let GC take over space on the write buffer */
 	rb_user_active = READ_ONCE(rl->rb_user_active);
-	return (!(rb_gc_cnt + nr_entries > rl->rb_gc_max && rb_user_active));
+	return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
 }
 
 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
@@ -77,33 +101,32 @@ static int pblk_rl_update_rates(struct pblk_rl *rl, unsigned long max)
 	unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
 
 	if (free_blocks >= rl->high) {
-		rl->rb_user_max = max - rl->rb_gc_rsv;
-		rl->rb_gc_max = rl->rb_gc_rsv;
+		rl->rb_user_max = max;
+		rl->rb_gc_max = 0;
 		rl->rb_state = PBLK_RL_HIGH;
 	} else if (free_blocks < rl->high) {
 		int shift = rl->high_pw - rl->rb_windows_pw;
 		int user_windows = free_blocks >> shift;
 		int user_max = user_windows << PBLK_MAX_REQ_ADDRS_PW;
-		int gc_max;
 
 		rl->rb_user_max = user_max;
-		gc_max = max - rl->rb_user_max;
-		rl->rb_gc_max = max(gc_max, rl->rb_gc_rsv);
+		rl->rb_gc_max = max - user_max;
 
-		if (free_blocks > rl->low)
-			rl->rb_state = PBLK_RL_MID;
-		else
-			rl->rb_state = PBLK_RL_LOW;
+		if (free_blocks <= rl->rsv_blocks) {
+			rl->rb_user_max = 0;
+			rl->rb_gc_max = max;
+		}
+
+		/* In the worst case, we will need to GC lines in the low list
+		 * (high valid sector count). If there are lines to GC on high
+		 * or mid lists, these will be prioritized
+		 */
+		rl->rb_state = PBLK_RL_LOW;
 	}
 
 	return rl->rb_state;
 }
 
-void pblk_rl_set_gc_rsc(struct pblk_rl *rl, int rsv)
-{
-	rl->rb_gc_rsv = rl->rb_gc_max = rsv;
-}
-
 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
 {
 	struct pblk *pblk = container_of(rl, struct pblk, rl);
@@ -122,11 +145,15 @@ void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
 
 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
 {
-	struct pblk *pblk = container_of(rl, struct pblk, rl);
 	int blk_in_line = atomic_read(&line->blk_in_line);
-	int ret;
 
 	atomic_sub(blk_in_line, &rl->free_blocks);
+}
+
+void pblk_gc_should_kick(struct pblk *pblk)
+{
+	struct pblk_rl *rl = &pblk->rl;
+	int ret;
 
 	/* Rates will not change that often - no need to lock update */
 	ret = pblk_rl_update_rates(rl, rl->rb_budget);
@@ -136,11 +163,16 @@ void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
 		pblk_gc_should_stop(pblk);
 }
 
-int pblk_rl_gc_thrs(struct pblk_rl *rl)
+int pblk_rl_high_thrs(struct pblk_rl *rl)
 {
 	return rl->high;
 }
 
+int pblk_rl_low_thrs(struct pblk_rl *rl)
+{
+	return rl->low;
+}
+
 int pblk_rl_sysfs_rate_show(struct pblk_rl *rl)
 {
 	return rl->rb_user_max;
@@ -161,24 +193,36 @@ void pblk_rl_free(struct pblk_rl *rl)
 
 void pblk_rl_init(struct pblk_rl *rl, int budget)
 {
+	struct pblk *pblk = container_of(rl, struct pblk, rl);
+	struct pblk_line_meta *lm = &pblk->lm;
+	int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
 	unsigned int rb_windows;
 
 	rl->high = rl->total_blocks / PBLK_USER_HIGH_THRS;
-	rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
 	rl->high_pw = get_count_order(rl->high);
 
+	rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
+	if (rl->low < min_blocks)
+		rl->low = min_blocks;
+
+	rl->rsv_blocks = min_blocks;
+
 	/* This will always be a power-of-2 */
 	rb_windows = budget / PBLK_MAX_REQ_ADDRS;
-	rl->rb_windows_pw = get_count_order(rb_windows) + 1;
+	rl->rb_windows_pw = get_count_order(rb_windows);
 
 	/* To start with, all buffer is available to user I/O writers */
 	rl->rb_budget = budget;
 	rl->rb_user_max = budget;
-	atomic_set(&rl->rb_user_cnt, 0);
 	rl->rb_gc_max = 0;
 	rl->rb_state = PBLK_RL_HIGH;
+
+	atomic_set(&rl->rb_user_cnt, 0);
 	atomic_set(&rl->rb_gc_cnt, 0);
+	atomic_set(&rl->rb_space, -1);
 
 	setup_timer(&rl->u_timer, pblk_rl_u_timer, (unsigned long)rl);
+
 	rl->rb_user_active = 0;
+	rl->rb_gc_active = 0;
 }
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index f0af1d1..95fb434 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -49,30 +49,26 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
 
 static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
 {
-	struct nvm_tgt_dev *dev = pblk->dev;
-	struct nvm_geo *geo = &dev->geo;
 	int free_blocks, total_blocks;
 	int rb_user_max, rb_user_cnt;
-	int rb_gc_max, rb_gc_rsv, rb_gc_cnt, rb_budget, rb_state;
+	int rb_gc_max, rb_gc_cnt, rb_budget, rb_state;
 
 	free_blocks = atomic_read(&pblk->rl.free_blocks);
 	rb_user_max = pblk->rl.rb_user_max;
 	rb_user_cnt = atomic_read(&pblk->rl.rb_user_cnt);
 	rb_gc_max = pblk->rl.rb_gc_max;
-	rb_gc_rsv = pblk->rl.rb_gc_rsv;
 	rb_gc_cnt = atomic_read(&pblk->rl.rb_gc_cnt);
 	rb_budget = pblk->rl.rb_budget;
 	rb_state = pblk->rl.rb_state;
 
-	total_blocks = geo->blks_per_lun * geo->nr_luns;
+	total_blocks = pblk->rl.total_blocks;
 
 	return snprintf(page, PAGE_SIZE,
-		"u:%u/%u,gc:%u/%u/%u(%u/%u)(stop:<%u,full:>%u,free:%d/%d)-%d\n",
+		"u:%u/%u,gc:%u/%u(%u/%u)(stop:<%u,full:>%u,free:%d/%d)-%d\n",
 				rb_user_cnt,
 				rb_user_max,
 				rb_gc_cnt,
 				rb_gc_max,
-				rb_gc_rsv,
 				rb_state,
 				rb_budget,
 				pblk->rl.low,
@@ -150,11 +146,11 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
 	ssize_t sz = 0;
 	int nr_free_lines;
 	int cur_data, cur_log;
-	int free_line_cnt = 0, closed_line_cnt = 0;
+	int free_line_cnt = 0, closed_line_cnt = 0, emeta_line_cnt = 0;
 	int d_line_cnt = 0, l_line_cnt = 0;
 	int gc_full = 0, gc_high = 0, gc_mid = 0, gc_low = 0, gc_empty = 0;
-	int free = 0, bad = 0, cor = 0;
-	int msecs = 0, ssecs = 0, cur_sec = 0, vsc = 0, sec_in_line = 0;
+	int bad = 0, cor = 0;
+	int msecs = 0, cur_sec = 0, vsc = 0, sec_in_line = 0;
 	int map_weight = 0, meta_weight = 0;
 
 	spin_lock(&l_mg->free_lock);
@@ -166,6 +162,11 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
 		free_line_cnt++;
 	spin_unlock(&l_mg->free_lock);
 
+	spin_lock(&l_mg->close_lock);
+	list_for_each_entry(line, &l_mg->emeta_list, list)
+		emeta_line_cnt++;
+	spin_unlock(&l_mg->close_lock);
+
 	spin_lock(&l_mg->gc_lock);
 	list_for_each_entry(line, &l_mg->gc_full_list, list) {
 		if (line->type == PBLK_LINETYPE_DATA)
@@ -212,8 +213,6 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
 		gc_empty++;
 	}
 
-	list_for_each_entry(line, &l_mg->free_list, list)
-		free++;
 	list_for_each_entry(line, &l_mg->bad_list, list)
 		bad++;
 	list_for_each_entry(line, &l_mg->corrupt_list, list)
@@ -224,8 +223,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
 	if (l_mg->data_line) {
 		cur_sec = l_mg->data_line->cur_sec;
 		msecs = l_mg->data_line->left_msecs;
-		ssecs = l_mg->data_line->left_ssecs;
-		vsc = l_mg->data_line->vsc;
+		vsc = le32_to_cpu(*l_mg->data_line->vsc);
 		sec_in_line = l_mg->data_line->sec_in_line;
 		meta_weight = bitmap_weight(&l_mg->meta_bitmap,
 							PBLK_DATA_LINES);
@@ -235,17 +233,20 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
 	spin_unlock(&l_mg->free_lock);
 
 	if (nr_free_lines != free_line_cnt)
-		pr_err("pblk: corrupted free line list\n");
+		pr_err("pblk: corrupted free line list:%d/%d\n",
+						nr_free_lines, free_line_cnt);
 
 	sz = snprintf(page, PAGE_SIZE - sz,
 		"line: nluns:%d, nblks:%d, nsecs:%d\n",
 		geo->nr_luns, lm->blk_per_line, lm->sec_per_line);
 
 	sz += snprintf(page + sz, PAGE_SIZE - sz,
-		"lines:d:%d,l:%d-f:%d(%d),b:%d,co:%d,c:%d(d:%d,l:%d)t:%d\n",
+		"lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
 					cur_data, cur_log,
-					free, nr_free_lines, bad, cor,
+					nr_free_lines,
+					emeta_line_cnt, meta_weight,
 					closed_line_cnt,
+					bad, cor,
 					d_line_cnt, l_line_cnt,
 					l_mg->nr_lines);
 
@@ -255,9 +256,10 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
 			atomic_read(&pblk->gc.inflight_gc));
 
 	sz += snprintf(page + sz, PAGE_SIZE - sz,
-		"data (%d) cur:%d, left:%d/%d, vsc:%d, s:%d, map:%d/%d (%d)\n",
-			cur_data, cur_sec, msecs, ssecs, vsc, sec_in_line,
-			map_weight, lm->sec_per_line, meta_weight);
+		"data (%d) cur:%d, left:%d, vsc:%d, s:%d, map:%d/%d (%d)\n",
+			cur_data, cur_sec, msecs, vsc, sec_in_line,
+			map_weight, lm->sec_per_line,
+			atomic_read(&pblk->inflight_io));
 
 	return sz;
 }
@@ -274,7 +276,7 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
 					lm->smeta_len, lm->smeta_sec);
 	sz += snprintf(page + sz, PAGE_SIZE - sz,
 				"emeta - len:%d, sec:%d, bb_start:%d\n",
-					lm->emeta_len, lm->emeta_sec,
+					lm->emeta_len[0], lm->emeta_sec[0],
 					lm->emeta_bb);
 	sz += snprintf(page + sz, PAGE_SIZE - sz,
 				"bitmap lengths: sec:%d, blk:%d, lun:%d\n",
@@ -290,6 +292,11 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
 	return sz;
 }
 
+static ssize_t pblk_sysfs_get_sec_per_write(struct pblk *pblk, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%d\n", pblk->sec_per_write);
+}
+
 #ifdef CONFIG_NVM_DEBUG
 static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page)
 {
@@ -303,35 +310,14 @@ static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page)
 			atomic_long_read(&pblk->padded_wb),
 			atomic_long_read(&pblk->sub_writes),
 			atomic_long_read(&pblk->sync_writes),
-			atomic_long_read(&pblk->compl_writes),
 			atomic_long_read(&pblk->recov_writes),
 			atomic_long_read(&pblk->recov_gc_writes),
 			atomic_long_read(&pblk->recov_gc_reads),
+			atomic_long_read(&pblk->cache_reads),
 			atomic_long_read(&pblk->sync_reads));
 }
 #endif
 
-static ssize_t pblk_sysfs_rate_store(struct pblk *pblk, const char *page,
-				     size_t len)
-{
-	struct pblk_gc *gc = &pblk->gc;
-	size_t c_len;
-	int value;
-
-	c_len = strcspn(page, "\n");
-	if (c_len >= len)
-		return -EINVAL;
-
-	if (kstrtouint(page, 0, &value))
-		return -EINVAL;
-
-	spin_lock(&gc->lock);
-	pblk_rl_set_gc_rsc(&pblk->rl, value);
-	spin_unlock(&gc->lock);
-
-	return len;
-}
-
 static ssize_t pblk_sysfs_gc_force(struct pblk *pblk, const char *page,
 				   size_t len)
 {
@@ -345,10 +331,30 @@ static ssize_t pblk_sysfs_gc_force(struct pblk *pblk, const char *page,
 	if (kstrtouint(page, 0, &force))
 		return -EINVAL;
 
-	if (force < 0 || force > 1)
+	pblk_gc_sysfs_force(pblk, force);
+
+	return len;
+}
+
+static ssize_t pblk_sysfs_set_sec_per_write(struct pblk *pblk,
+					     const char *page, size_t len)
+{
+	size_t c_len;
+	int sec_per_write;
+
+	c_len = strcspn(page, "\n");
+	if (c_len >= len)
 		return -EINVAL;
 
-	pblk_gc_sysfs_force(pblk, force);
+	if (kstrtouint(page, 0, &sec_per_write))
+		return -EINVAL;
+
+	if (sec_per_write < pblk->min_write_pgs
+				|| sec_per_write > pblk->max_write_pgs
+				|| sec_per_write % pblk->min_write_pgs != 0)
+		return -EINVAL;
+
+	pblk_set_sec_per_write(pblk, sec_per_write);
 
 	return len;
 }
@@ -398,9 +404,9 @@ static struct attribute sys_gc_force = {
 	.mode = 0200,
 };
 
-static struct attribute sys_gc_rl_max = {
-	.name = "gc_rl_max",
-	.mode = 0200,
+static struct attribute sys_max_sec_per_write = {
+	.name = "max_sec_per_write",
+	.mode = 0644,
 };
 
 #ifdef CONFIG_NVM_DEBUG
@@ -416,7 +422,7 @@ static struct attribute *pblk_attrs[] = {
 	&sys_errors_attr,
 	&sys_gc_state,
 	&sys_gc_force,
-	&sys_gc_rl_max,
+	&sys_max_sec_per_write,
 	&sys_rb_attr,
 	&sys_stats_ppaf_attr,
 	&sys_lines_attr,
@@ -448,6 +454,8 @@ static ssize_t pblk_sysfs_show(struct kobject *kobj, struct attribute *attr,
 		return pblk_sysfs_lines(pblk, buf);
 	else if (strcmp(attr->name, "lines_info") == 0)
 		return pblk_sysfs_lines_info(pblk, buf);
+	else if (strcmp(attr->name, "max_sec_per_write") == 0)
+		return pblk_sysfs_get_sec_per_write(pblk, buf);
 #ifdef CONFIG_NVM_DEBUG
 	else if (strcmp(attr->name, "stats") == 0)
 		return pblk_sysfs_stats_debug(pblk, buf);
@@ -460,10 +468,10 @@ static ssize_t pblk_sysfs_store(struct kobject *kobj, struct attribute *attr,
 {
 	struct pblk *pblk = container_of(kobj, struct pblk, kobj);
 
-	if (strcmp(attr->name, "gc_rl_max") == 0)
-		return pblk_sysfs_rate_store(pblk, buf, len);
-	else if (strcmp(attr->name, "gc_force") == 0)
+	if (strcmp(attr->name, "gc_force") == 0)
 		return pblk_sysfs_gc_force(pblk, buf, len);
+	else if (strcmp(attr->name, "max_sec_per_write") == 0)
+		return pblk_sysfs_set_sec_per_write(pblk, buf, len);
 
 	return 0;
 }
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index aef6fd7..d62a8f4 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -17,18 +17,6 @@
 
 #include "pblk.h"
 
-static void pblk_sync_line(struct pblk *pblk, struct pblk_line *line)
-{
-#ifdef CONFIG_NVM_DEBUG
-	atomic_long_inc(&pblk->sync_writes);
-#endif
-
-	/* Counter protected by rb sync lock */
-	line->left_ssecs--;
-	if (!line->left_ssecs)
-		pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
-}
-
 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
 				    struct pblk_c_ctx *c_ctx)
 {
@@ -39,21 +27,14 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
 
 	for (i = 0; i < c_ctx->nr_valid; i++) {
 		struct pblk_w_ctx *w_ctx;
-		struct ppa_addr p;
-		struct pblk_line *line;
 
 		w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
-
-		p = rqd->ppa_list[i];
-		line = &pblk->lines[pblk_dev_ppa_to_line(p)];
-		pblk_sync_line(pblk, line);
-
 		while ((original_bio = bio_list_pop(&w_ctx->bios)))
 			bio_endio(original_bio);
 	}
 
 #ifdef CONFIG_NVM_DEBUG
-	atomic_long_add(c_ctx->nr_valid, &pblk->compl_writes);
+	atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
 #endif
 
 	ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
@@ -169,7 +150,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
 	}
 
 	INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
-	queue_work(pblk->kw_wq, &recovery->ws_rec);
+	queue_work(pblk->close_wq, &recovery->ws_rec);
 
 out:
 	pblk_complete_write(pblk, rqd, c_ctx);
@@ -186,14 +167,50 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
 	}
 #ifdef CONFIG_NVM_DEBUG
 	else
-		WARN_ONCE(rqd->bio->bi_error, "pblk: corrupted write error\n");
+		WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
 #endif
 
 	pblk_complete_write(pblk, rqd, c_ctx);
+	atomic_dec(&pblk->inflight_io);
+}
+
+static void pblk_end_io_write_meta(struct nvm_rq *rqd)
+{
+	struct pblk *pblk = rqd->private;
+	struct nvm_tgt_dev *dev = pblk->dev;
+	struct nvm_geo *geo = &dev->geo;
+	struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
+	struct pblk_line *line = m_ctx->private;
+	struct pblk_emeta *emeta = line->emeta;
+	int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]);
+	struct pblk_lun *rlun = &pblk->luns[pos];
+	int sync;
+
+	up(&rlun->wr_sem);
+
+	if (rqd->error) {
+		pblk_log_write_err(pblk, rqd);
+		pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
+	}
+#ifdef CONFIG_NVM_DEBUG
+	else
+		WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
+#endif
+
+	sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
+	if (sync == emeta->nr_entries)
+		pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws,
+								pblk->close_wq);
+
+	bio_put(rqd->bio);
+	pblk_free_rqd(pblk, rqd, READ);
+
+	atomic_dec(&pblk->inflight_io);
 }
 
 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
-			   unsigned int nr_secs)
+			   unsigned int nr_secs,
+			   nvm_end_io_fn(*end_io))
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 
@@ -202,7 +219,7 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
 	rqd->nr_ppas = nr_secs;
 	rqd->flags = pblk_set_progr_mode(pblk, WRITE);
 	rqd->private = pblk;
-	rqd->end_io = pblk_end_io_write;
+	rqd->end_io = end_io;
 
 	rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 							&rqd->dma_meta_list);
@@ -219,11 +236,10 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
 }
 
 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
-			   struct pblk_c_ctx *c_ctx)
+			   struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
 {
 	struct pblk_line_meta *lm = &pblk->lm;
-	struct pblk_line *e_line = pblk_line_get_data_next(pblk);
-	struct ppa_addr erase_ppa;
+	struct pblk_line *e_line = pblk_line_get_erase(pblk);
 	unsigned int valid = c_ctx->nr_valid;
 	unsigned int padded = c_ctx->nr_padded;
 	unsigned int nr_secs = valid + padded;
@@ -231,40 +247,23 @@ static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
 	int ret = 0;
 
 	lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
-	if (!lun_bitmap) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!lun_bitmap)
+		return -ENOMEM;
 	c_ctx->lun_bitmap = lun_bitmap;
 
-	ret = pblk_alloc_w_rq(pblk, rqd, nr_secs);
+	ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
 	if (ret) {
 		kfree(lun_bitmap);
-		goto out;
+		return ret;
 	}
 
-	ppa_set_empty(&erase_ppa);
 	if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
 		pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
 	else
 		pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
-							valid, &erase_ppa);
+							valid, erase_ppa);
 
-out:
-	if (unlikely(e_line && !ppa_empty(erase_ppa))) {
-		if (pblk_blk_erase_async(pblk, erase_ppa)) {
-			struct nvm_tgt_dev *dev = pblk->dev;
-			struct nvm_geo *geo = &dev->geo;
-			int bit;
-
-			atomic_inc(&e_line->left_eblks);
-			bit = erase_ppa.g.lun * geo->nr_chnls + erase_ppa.g.ch;
-			WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
-			up(&pblk->erase_sem);
-		}
-	}
-
-	return ret;
+	return 0;
 }
 
 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -280,7 +279,7 @@ int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
 
 	c_ctx->lun_bitmap = lun_bitmap;
 
-	ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas);
+	ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
 	if (ret)
 		return ret;
 
@@ -311,16 +310,237 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
 	return secs_to_sync;
 }
 
+static inline int pblk_valid_meta_ppa(struct pblk *pblk,
+				      struct pblk_line *meta_line,
+				      struct ppa_addr *ppa_list, int nr_ppas)
+{
+	struct nvm_tgt_dev *dev = pblk->dev;
+	struct nvm_geo *geo = &dev->geo;
+	struct pblk_line *data_line;
+	struct ppa_addr ppa, ppa_opt;
+	u64 paddr;
+	int i;
+
+	data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
+	paddr = pblk_lookup_page(pblk, meta_line);
+	ppa = addr_to_gen_ppa(pblk, paddr, 0);
+
+	if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
+		return 1;
+
+	/* Schedule a metadata I/O that is half the distance from the data I/O
+	 * with regards to the number of LUNs forming the pblk instance. This
+	 * balances LUN conflicts across every I/O.
+	 *
+	 * When the LUN configuration changes (e.g., due to GC), this distance
+	 * can align, which would result on a LUN deadlock. In this case, modify
+	 * the distance to not be optimal, but allow metadata I/Os to succeed.
+	 */
+	ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
+	if (unlikely(ppa_opt.ppa == ppa.ppa)) {
+		data_line->meta_distance--;
+		return 0;
+	}
+
+	for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
+		if (ppa_list[i].g.ch == ppa_opt.g.ch &&
+					ppa_list[i].g.lun == ppa_opt.g.lun)
+			return 1;
+
+	if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
+		for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
+			if (ppa_list[i].g.ch == ppa.g.ch &&
+						ppa_list[i].g.lun == ppa.g.lun)
+				return 0;
+
+		return 1;
+	}
+
+	return 0;
+}
+
+int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
+{
+	struct nvm_tgt_dev *dev = pblk->dev;
+	struct nvm_geo *geo = &dev->geo;
+	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+	struct pblk_line_meta *lm = &pblk->lm;
+	struct pblk_emeta *emeta = meta_line->emeta;
+	struct pblk_g_ctx *m_ctx;
+	struct pblk_lun *rlun;
+	struct bio *bio;
+	struct nvm_rq *rqd;
+	void *data;
+	u64 paddr;
+	int rq_ppas = pblk->min_write_pgs;
+	int id = meta_line->id;
+	int rq_len;
+	int i, j;
+	int ret;
+
+	rqd = pblk_alloc_rqd(pblk, READ);
+	if (IS_ERR(rqd)) {
+		pr_err("pblk: cannot allocate write req.\n");
+		return PTR_ERR(rqd);
+	}
+	m_ctx = nvm_rq_to_pdu(rqd);
+	m_ctx->private = meta_line;
+
+	rq_len = rq_ppas * geo->sec_size;
+	data = ((void *)emeta->buf) + emeta->mem;
+
+	bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
+					l_mg->emeta_alloc_type, GFP_KERNEL);
+	if (IS_ERR(bio)) {
+		ret = PTR_ERR(bio);
+		goto fail_free_rqd;
+	}
+	bio->bi_iter.bi_sector = 0; /* internal bio */
+	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+	rqd->bio = bio;
+
+	ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
+	if (ret)
+		goto fail_free_bio;
+
+	for (i = 0; i < rqd->nr_ppas; ) {
+		spin_lock(&meta_line->lock);
+		paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
+		spin_unlock(&meta_line->lock);
+		for (j = 0; j < rq_ppas; j++, i++, paddr++)
+			rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
+	}
+
+	rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])];
+	ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
+	if (ret) {
+		pr_err("pblk: lun semaphore timed out (%d)\n", ret);
+		goto fail_free_bio;
+	}
+
+	emeta->mem += rq_len;
+	if (emeta->mem >= lm->emeta_len[0]) {
+		spin_lock(&l_mg->close_lock);
+		list_del(&meta_line->list);
+		WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
+				"pblk: corrupt meta line %d\n", meta_line->id);
+		spin_unlock(&l_mg->close_lock);
+	}
+
+	ret = pblk_submit_io(pblk, rqd);
+	if (ret) {
+		pr_err("pblk: emeta I/O submission failed: %d\n", ret);
+		goto fail_rollback;
+	}
+
+	return NVM_IO_OK;
+
+fail_rollback:
+	spin_lock(&l_mg->close_lock);
+	pblk_dealloc_page(pblk, meta_line, rq_ppas);
+	list_add(&meta_line->list, &meta_line->list);
+	spin_unlock(&l_mg->close_lock);
+fail_free_bio:
+	if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
+		bio_put(bio);
+fail_free_rqd:
+	pblk_free_rqd(pblk, rqd, READ);
+	return ret;
+}
+
+static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
+			       int prev_n)
+{
+	struct pblk_line_meta *lm = &pblk->lm;
+	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+	struct pblk_line *meta_line;
+
+	spin_lock(&l_mg->close_lock);
+retry:
+	if (list_empty(&l_mg->emeta_list)) {
+		spin_unlock(&l_mg->close_lock);
+		return 0;
+	}
+	meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
+	if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
+		goto retry;
+	spin_unlock(&l_mg->close_lock);
+
+	if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
+		return 0;
+
+	return pblk_submit_meta_io(pblk, meta_line);
+}
+
+static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
+{
+	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
+	struct ppa_addr erase_ppa;
+	int err;
+
+	ppa_set_empty(&erase_ppa);
+
+	/* Assign lbas to ppas and populate request structure */
+	err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
+	if (err) {
+		pr_err("pblk: could not setup write request: %d\n", err);
+		return NVM_IO_ERR;
+	}
+
+	if (likely(ppa_empty(erase_ppa))) {
+		/* Submit metadata write for previous data line */
+		err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
+		if (err) {
+			pr_err("pblk: metadata I/O submission failed: %d", err);
+			return NVM_IO_ERR;
+		}
+
+		/* Submit data write for current data line */
+		err = pblk_submit_io(pblk, rqd);
+		if (err) {
+			pr_err("pblk: data I/O submission failed: %d\n", err);
+			return NVM_IO_ERR;
+		}
+	} else {
+		/* Submit data write for current data line */
+		err = pblk_submit_io(pblk, rqd);
+		if (err) {
+			pr_err("pblk: data I/O submission failed: %d\n", err);
+			return NVM_IO_ERR;
+		}
+
+		/* Submit available erase for next data line */
+		if (pblk_blk_erase_async(pblk, erase_ppa)) {
+			struct pblk_line *e_line = pblk_line_get_erase(pblk);
+			struct nvm_tgt_dev *dev = pblk->dev;
+			struct nvm_geo *geo = &dev->geo;
+			int bit;
+
+			atomic_inc(&e_line->left_eblks);
+			bit = pblk_ppa_to_pos(geo, erase_ppa);
+			WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
+		}
+	}
+
+	return NVM_IO_OK;
+}
+
+static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
+{
+	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
+	struct bio *bio = rqd->bio;
+
+	if (c_ctx->nr_padded)
+		pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
+}
+
 static int pblk_submit_write(struct pblk *pblk)
 {
 	struct bio *bio;
 	struct nvm_rq *rqd;
-	struct pblk_c_ctx *c_ctx;
-	unsigned int pgs_read;
 	unsigned int secs_avail, secs_to_sync, secs_to_com;
 	unsigned int secs_to_flush;
 	unsigned long pos;
-	int err;
 
 	/* If there are no sectors in the cache, flushes (bios without data)
 	 * will be cleared on the cache threads
@@ -338,7 +558,6 @@ static int pblk_submit_write(struct pblk *pblk)
 		pr_err("pblk: cannot allocate write req.\n");
 		return 1;
 	}
-	c_ctx = nvm_rq_to_pdu(rqd);
 
 	bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
 	if (!bio) {
@@ -358,29 +577,14 @@ static int pblk_submit_write(struct pblk *pblk)
 	secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
 	pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
 
-	pgs_read = pblk_rb_read_to_bio(&pblk->rwb, bio, c_ctx, pos,
-						secs_to_sync, secs_avail);
-	if (!pgs_read) {
+	if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
+								secs_avail)) {
 		pr_err("pblk: corrupted write bio\n");
 		goto fail_put_bio;
 	}
 
-	if (c_ctx->nr_padded)
-		if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, c_ctx->nr_padded))
-			goto fail_put_bio;
-
-	/* Assign lbas to ppas and populate request structure */
-	err = pblk_setup_w_rq(pblk, rqd, c_ctx);
-	if (err) {
-		pr_err("pblk: could not setup write request\n");
+	if (pblk_submit_io_set(pblk, rqd))
 		goto fail_free_bio;
-	}
-
-	err = pblk_submit_io(pblk, rqd);
-	if (err) {
-		pr_err("pblk: I/O submission failed: %d\n", err);
-		goto fail_free_bio;
-	}
 
 #ifdef CONFIG_NVM_DEBUG
 	atomic_long_add(secs_to_sync, &pblk->sub_writes);
@@ -389,8 +593,7 @@ static int pblk_submit_write(struct pblk *pblk)
 	return 0;
 
 fail_free_bio:
-	if (c_ctx->nr_padded)
-		pblk_bio_free_pages(pblk, bio, secs_to_sync, c_ctx->nr_padded);
+	pblk_free_write_rqd(pblk, rqd);
 fail_put_bio:
 	bio_put(bio);
 fail_free_rqd:
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 99f3186..1593138 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -40,6 +40,12 @@
 #define PBLK_MAX_REQ_ADDRS (64)
 #define PBLK_MAX_REQ_ADDRS_PW (6)
 
+#define PBLK_WS_POOL_SIZE (128)
+#define PBLK_META_POOL_SIZE (128)
+#define PBLK_READ_REQ_POOL_SIZE (1024)
+
+#define PBLK_NR_CLOSE_JOBS (4)
+
 #define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
 
 #define PBLK_COMMAND_TIMEOUT_MS 30000
@@ -72,11 +78,15 @@ enum {
 	PBLK_BLK_ST_CLOSED =	0x2,
 };
 
+struct pblk_sec_meta {
+	u64 reserved;
+	__le64 lba;
+};
+
 /* The number of GC lists and the rate-limiter states go together. This way the
  * rate-limiter can dictate how much GC is needed based on resource utilization.
  */
-#define PBLK_NR_GC_LISTS 3
-#define PBLK_MAX_GC_JOBS 32
+#define PBLK_GC_NR_LISTS 3
 
 enum {
 	PBLK_RL_HIGH = 1,
@@ -84,14 +94,9 @@ enum {
 	PBLK_RL_LOW = 3,
 };
 
-struct pblk_sec_meta {
-	u64 reserved;
-	__le64 lba;
-};
-
 #define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
 
-/* write completion context */
+/* write buffer completion context */
 struct pblk_c_ctx {
 	struct list_head list;		/* Head for out-of-order completion */
 
@@ -101,9 +106,16 @@ struct pblk_c_ctx {
 	unsigned int nr_padded;
 };
 
-/* Read context */
-struct pblk_r_ctx {
-	struct bio *orig_bio;
+/* generic context */
+struct pblk_g_ctx {
+	void *private;
+};
+
+/* Pad context */
+struct pblk_pad_rq {
+	struct pblk *pblk;
+	struct completion wait;
+	struct kref ref;
 };
 
 /* Recovery context */
@@ -195,29 +207,39 @@ struct pblk_lun {
 struct pblk_gc_rq {
 	struct pblk_line *line;
 	void *data;
-	u64 *lba_list;
+	u64 lba_list[PBLK_MAX_REQ_ADDRS];
 	int nr_secs;
 	int secs_to_gc;
 	struct list_head list;
 };
 
 struct pblk_gc {
+	/* These states are not protected by a lock since (i) they are in the
+	 * fast path, and (ii) they are not critical.
+	 */
 	int gc_active;
 	int gc_enabled;
 	int gc_forced;
-	int gc_jobs_active;
-	atomic_t inflight_gc;
 
 	struct task_struct *gc_ts;
 	struct task_struct *gc_writer_ts;
+	struct task_struct *gc_reader_ts;
+
+	struct workqueue_struct *gc_line_reader_wq;
 	struct workqueue_struct *gc_reader_wq;
+
 	struct timer_list gc_timer;
 
+	struct semaphore gc_sem;
+	atomic_t inflight_gc;
 	int w_entries;
+
 	struct list_head w_list;
+	struct list_head r_list;
 
 	spinlock_t lock;
 	spinlock_t w_lock;
+	spinlock_t r_lock;
 };
 
 struct pblk_rl {
@@ -229,10 +251,8 @@ struct pblk_rl {
 				 */
 	unsigned int high_pw;	/* High rounded up as a power of 2 */
 
-#define PBLK_USER_HIGH_THRS 2	/* Begin write limit at 50 percent
-				 * available blks
-				 */
-#define PBLK_USER_LOW_THRS 20	/* Aggressive GC at 5% available blocks */
+#define PBLK_USER_HIGH_THRS 8	/* Begin write limit at 12% available blks */
+#define PBLK_USER_LOW_THRS 10	/* Aggressive GC at 10% available blocks */
 
 	int rb_windows_pw;	/* Number of rate windows in the write buffer
 				 * given as a power-of-2. This guarantees that
@@ -244,13 +264,19 @@ struct pblk_rl {
 				 */
 	int rb_budget;		/* Total number of entries available for I/O */
 	int rb_user_max;	/* Max buffer entries available for user I/O */
-	atomic_t rb_user_cnt;	/* User I/O buffer counter */
 	int rb_gc_max;		/* Max buffer entries available for GC I/O */
 	int rb_gc_rsv;		/* Reserved buffer entries for GC I/O */
 	int rb_state;		/* Rate-limiter current state */
+
+	atomic_t rb_user_cnt;	/* User I/O buffer counter */
 	atomic_t rb_gc_cnt;	/* GC I/O buffer counter */
+	atomic_t rb_space;	/* Space limit in case of reaching capacity */
+
+	int rsv_blocks;		/* Reserved blocks for GC */
 
 	int rb_user_active;
+	int rb_gc_active;
+
 	struct timer_list u_timer;
 
 	unsigned long long nr_secs;
@@ -258,8 +284,6 @@ struct pblk_rl {
 	atomic_t free_blocks;
 };
 
-#define PBLK_LINE_NR_LUN_BITMAP 2
-#define PBLK_LINE_NR_SEC_BITMAP 2
 #define PBLK_LINE_EMPTY (~0U)
 
 enum {
@@ -310,16 +334,19 @@ struct line_smeta {
 	__le32 window_wr_lun;	/* Number of parallel LUNs to write */
 
 	__le32 rsvd[2];
+
+	__le64 lun_bitmap[];
 };
 
 /*
- * Metadata Layout:
- *	1. struct pblk_emeta
- *	2. nr_lbas u64 forming lba list
- *	3. nr_lines (all) u32 valid sector count (vsc) (~0U: non-alloc line)
- *	4. nr_luns bits (u64 format) forming line bad block bitmap
- *
- *	3. and 4. will be part of FTL log
+ * Metadata layout in media:
+ *	First sector:
+ *		1. struct line_emeta
+ *		2. bad block bitmap (u64 * window_wr_lun)
+ *	Mid sectors (start at lbas_sector):
+ *		3. nr_lbas (u64) forming lba list
+ *	Last sectors (start at vsc_sector):
+ *		4. u32 valid sector count (vsc) for all lines (~0U: free line)
  */
 struct line_emeta {
 	struct line_header header;
@@ -339,6 +366,23 @@ struct line_emeta {
 	__le32 next_id;		/* Line id for next line */
 	__le64 nr_lbas;		/* Number of lbas mapped in line */
 	__le64 nr_valid_lbas;	/* Number of valid lbas mapped in line */
+	__le64 bb_bitmap[];	/* Updated bad block bitmap for line */
+};
+
+struct pblk_emeta {
+	struct line_emeta *buf;		/* emeta buffer in media format */
+	int mem;			/* Write offset - points to next
+					 * writable entry in memory
+					 */
+	atomic_t sync;			/* Synced - backpointer that signals the
+					 * last entry that has been successfully
+					 * persisted to media
+					 */
+	unsigned int nr_entries;	/* Number of emeta entries */
+};
+
+struct pblk_smeta {
+	struct line_smeta *buf;		/* smeta buffer in persistent format */
 };
 
 struct pblk_line {
@@ -355,9 +399,12 @@ struct pblk_line {
 
 	unsigned long *lun_bitmap;	/* Bitmap for LUNs mapped in line */
 
-	struct line_smeta *smeta;	/* Start metadata */
-	struct line_emeta *emeta;	/* End metadata */
+	struct pblk_smeta *smeta;	/* Start metadata */
+	struct pblk_emeta *emeta;	/* End medatada */
+
 	int meta_line;			/* Metadata line id */
+	int meta_distance;		/* Distance between data and metadata */
+
 	u64 smeta_ssec;			/* Sector where smeta starts */
 	u64 emeta_ssec;			/* Sector where emeta starts */
 
@@ -374,9 +421,10 @@ struct pblk_line {
 	atomic_t left_seblks;		/* Blocks left for sync erasing */
 
 	int left_msecs;			/* Sectors left for mapping */
-	int left_ssecs;			/* Sectors left to sync */
 	unsigned int cur_sec;		/* Sector map pointer */
-	unsigned int vsc;		/* Valid sector count in line */
+	unsigned int nr_valid_lbas;	/* Number of valid lbas in line */
+
+	__le32 *vsc;			/* Valid sector count in line */
 
 	struct kref ref;		/* Write buffer L2P references */
 
@@ -385,13 +433,15 @@ struct pblk_line {
 
 #define PBLK_DATA_LINES 4
 
-enum{
+enum {
 	PBLK_KMALLOC_META = 1,
 	PBLK_VMALLOC_META = 2,
 };
 
-struct pblk_line_metadata {
-	void *meta;
+enum {
+	PBLK_EMETA_TYPE_HEADER = 1,	/* struct line_emeta first sector */
+	PBLK_EMETA_TYPE_LLBA = 2,	/* lba list - type: __le64 */
+	PBLK_EMETA_TYPE_VSC = 3,	/* vsc list - type: __le32 */
 };
 
 struct pblk_line_mgmt {
@@ -404,7 +454,7 @@ struct pblk_line_mgmt {
 	struct list_head bad_list;	/* Full lines bad */
 
 	/* GC lists - use gc_lock */
-	struct list_head *gc_lists[PBLK_NR_GC_LISTS];
+	struct list_head *gc_lists[PBLK_GC_NR_LISTS];
 	struct list_head gc_high_list;	/* Full lines ready to GC, high isc */
 	struct list_head gc_mid_list;	/* Full lines ready to GC, mid isc */
 	struct list_head gc_low_list;	/* Full lines ready to GC, low isc */
@@ -417,13 +467,16 @@ struct pblk_line_mgmt {
 	struct pblk_line *log_next;	/* Next FTL log line */
 	struct pblk_line *data_next;	/* Next data line */
 
+	struct list_head emeta_list;	/* Lines queued to schedule emeta */
+
+	__le32 *vsc_list;		/* Valid sector counts for all lines */
+
 	/* Metadata allocation type: VMALLOC | KMALLOC */
-	int smeta_alloc_type;
 	int emeta_alloc_type;
 
 	/* Pre-allocated metadata for data lines */
-	struct pblk_line_metadata sline_meta[PBLK_DATA_LINES];
-	struct pblk_line_metadata eline_meta[PBLK_DATA_LINES];
+	struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
+	struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
 	unsigned long meta_bitmap;
 
 	/* Helpers for fast bitmap calculations */
@@ -434,25 +487,40 @@ struct pblk_line_mgmt {
 	unsigned long l_seq_nr;		/* Log line unique sequence number */
 
 	spinlock_t free_lock;
+	spinlock_t close_lock;
 	spinlock_t gc_lock;
 };
 
 struct pblk_line_meta {
 	unsigned int smeta_len;		/* Total length for smeta */
-	unsigned int smeta_sec;		/* Sectors needed for smeta*/
-	unsigned int emeta_len;		/* Total length for emeta */
-	unsigned int emeta_sec;		/* Sectors needed for emeta*/
+	unsigned int smeta_sec;		/* Sectors needed for smeta */
+
+	unsigned int emeta_len[4];	/* Lengths for emeta:
+					 *  [0]: Total length
+					 *  [1]: struct line_emeta length
+					 *  [2]: L2P portion length
+					 *  [3]: vsc list length
+					 */
+	unsigned int emeta_sec[4];	/* Sectors needed for emeta. Same layout
+					 * as emeta_len
+					 */
+
 	unsigned int emeta_bb;		/* Boundary for bb that affects emeta */
+
+	unsigned int vsc_list_len;	/* Length for vsc list */
 	unsigned int sec_bitmap_len;	/* Length for sector bitmap in line */
 	unsigned int blk_bitmap_len;	/* Length for block bitmap in line */
 	unsigned int lun_bitmap_len;	/* Length for lun bitmap in line */
 
 	unsigned int blk_per_line;	/* Number of blocks in a full line */
 	unsigned int sec_per_line;	/* Number of sectors in a line */
+	unsigned int dsec_per_line;	/* Number of data sectors in a line */
 	unsigned int min_blk_line;	/* Min. number of good blocks in line */
 
 	unsigned int mid_thrs;		/* Threshold for GC mid list */
 	unsigned int high_thrs;		/* Threshold for GC high list */
+
+	unsigned int meta_distance;	/* Distance between data and metadata */
 };
 
 struct pblk_addr_format {
@@ -470,6 +538,13 @@ struct pblk_addr_format {
 	u8	sec_offset;
 };
 
+enum {
+	PBLK_STATE_RUNNING = 0,
+	PBLK_STATE_STOPPING = 1,
+	PBLK_STATE_RECOVERING = 2,
+	PBLK_STATE_STOPPED = 3,
+};
+
 struct pblk {
 	struct nvm_tgt_dev *dev;
 	struct gendisk *disk;
@@ -487,6 +562,8 @@ struct pblk {
 
 	struct pblk_rb rwb;
 
+	int state;			/* pblk line state */
+
 	int min_write_pgs; /* Minimum amount of pages required by controller */
 	int max_write_pgs; /* Maximum amount of pages supported by controller */
 	int pgs_in_buffer; /* Number of pages that need to be held in buffer to
@@ -499,7 +576,7 @@ struct pblk {
 	/* pblk provisioning values. Used by rate limiter */
 	struct pblk_rl rl;
 
-	struct semaphore erase_sem;
+	int sec_per_write;
 
 	unsigned char instance_uuid[16];
 #ifdef CONFIG_NVM_DEBUG
@@ -511,8 +588,8 @@ struct pblk {
 	atomic_long_t req_writes;	/* Sectors stored on write buffer */
 	atomic_long_t sub_writes;	/* Sectors submitted from buffer */
 	atomic_long_t sync_writes;	/* Sectors synced to media */
-	atomic_long_t compl_writes;	/* Sectors completed in write bio */
 	atomic_long_t inflight_reads;	/* Inflight sector read requests */
+	atomic_long_t cache_reads;	/* Read requests that hit the cache */
 	atomic_long_t sync_reads;	/* Completed sector read requests */
 	atomic_long_t recov_writes;	/* Sectors submitted from recovery */
 	atomic_long_t recov_gc_writes;	/* Sectors submitted from write GC */
@@ -528,6 +605,8 @@ struct pblk {
 	atomic_long_t write_failed;
 	atomic_long_t erase_failed;
 
+	atomic_t inflight_io;		/* General inflight I/O counter */
+
 	struct task_struct *writer_ts;
 
 	/* Simple translation map of logical addresses to physical addresses.
@@ -542,11 +621,13 @@ struct pblk {
 	mempool_t *page_pool;
 	mempool_t *line_ws_pool;
 	mempool_t *rec_pool;
-	mempool_t *r_rq_pool;
+	mempool_t *g_rq_pool;
 	mempool_t *w_rq_pool;
 	mempool_t *line_meta_pool;
 
-	struct workqueue_struct *kw_wq;
+	struct workqueue_struct *close_wq;
+	struct workqueue_struct *bb_wq;
+
 	struct timer_list wtimer;
 
 	struct pblk_gc gc;
@@ -559,7 +640,7 @@ struct pblk_line_ws {
 	struct work_struct ws;
 };
 
-#define pblk_r_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_r_ctx))
+#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
 #define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
 
 /*
@@ -579,18 +660,17 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
 			    struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
 			    unsigned int pos);
 struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
+void pblk_rb_flush(struct pblk_rb *rb);
 
 void pblk_rb_sync_l2p(struct pblk_rb *rb);
-unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct bio *bio,
-				 struct pblk_c_ctx *c_ctx,
-				 unsigned int pos,
-				 unsigned int nr_entries,
-				 unsigned int count);
+unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
+				 struct bio *bio, unsigned int pos,
+				 unsigned int nr_entries, unsigned int count);
 unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
 				      struct list_head *list,
 				      unsigned int max);
 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
-			u64 pos, int bio_iter);
+			struct ppa_addr ppa, int bio_iter);
 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
 
 unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
@@ -601,6 +681,7 @@ void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
 unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
 
 unsigned int pblk_rb_read_count(struct pblk_rb *rb);
+unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
 unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
 
 int pblk_rb_tear_down_check(struct pblk_rb *rb);
@@ -612,40 +693,50 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
  * pblk core
  */
 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw);
+void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
 int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
 			struct pblk_c_ctx *c_ctx);
 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw);
-void pblk_flush_writer(struct pblk *pblk);
+void pblk_wait_for_meta(struct pblk *pblk);
 struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba);
 void pblk_discard(struct pblk *pblk, struct bio *bio);
 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
+int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
 struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
 			      unsigned int nr_secs, unsigned int len,
-			      gfp_t gfp_mask);
+			      int alloc_type, gfp_t gfp_mask);
 struct pblk_line *pblk_line_get(struct pblk *pblk);
 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
-struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
+void pblk_line_replace_data(struct pblk *pblk);
 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
 struct pblk_line *pblk_line_get_data(struct pblk *pblk);
-struct pblk_line *pblk_line_get_data_next(struct pblk *pblk);
+struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
 int pblk_line_is_full(struct pblk_line *line);
 void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
-void pblk_line_close_ws(struct work_struct *work);
+void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
 void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
+void pblk_line_close_meta_sync(struct pblk *pblk);
+void pblk_line_close_ws(struct work_struct *work);
+void pblk_pipeline_stop(struct pblk *pblk);
 void pblk_line_mark_bb(struct work_struct *work);
 void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
-		      void (*work)(struct work_struct *));
+		      void (*work)(struct work_struct *),
+		      struct workqueue_struct *wq);
 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
 int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line);
+int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
+			 void *emeta_buf);
 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
 void pblk_line_put(struct kref *ref);
 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
+u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
+void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
+u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 		   unsigned long secs_to_flush);
 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
@@ -656,11 +747,11 @@ void pblk_end_bio_sync(struct bio *bio);
 void pblk_end_io_sync(struct nvm_rq *rqd);
 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 		       int nr_pages);
-void pblk_map_pad_invalidate(struct pblk *pblk, struct pblk_line *line,
-			     u64 paddr);
 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
 			 int nr_pages);
 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
+void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
+			   u64 paddr);
 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
 void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
 			   struct ppa_addr ppa);
@@ -702,6 +793,7 @@ void pblk_write_should_kick(struct pblk *pblk);
 /*
  * pblk read path
  */
+extern struct bio_set *pblk_bio_set;
 int pblk_submit_read(struct pblk *pblk, struct bio *bio);
 int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
 			unsigned int nr_secs, unsigned int *secs_to_gc,
@@ -711,7 +803,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
  */
 void pblk_submit_rec(struct work_struct *work);
 struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
-void pblk_recov_pad(struct pblk *pblk);
+int pblk_recov_pad(struct pblk *pblk);
 __le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
 int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
 			struct pblk_rec_ctx *recovery, u64 *comp_bits,
@@ -720,33 +812,40 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
 /*
  * pblk gc
  */
-#define PBLK_GC_TRIES 3
+#define PBLK_GC_MAX_READERS 8	/* Max number of outstanding GC reader jobs */
+#define PBLK_GC_W_QD 128	/* Queue depth for inflight GC write I/Os */
+#define PBLK_GC_L_QD 4		/* Queue depth for inflight GC lines */
+#define PBLK_GC_RSV_LINE 1	/* Reserved lines for GC */
 
 int pblk_gc_init(struct pblk *pblk);
 void pblk_gc_exit(struct pblk *pblk);
 void pblk_gc_should_start(struct pblk *pblk);
 void pblk_gc_should_stop(struct pblk *pblk);
-int pblk_gc_status(struct pblk *pblk);
+void pblk_gc_should_kick(struct pblk *pblk);
+void pblk_gc_kick(struct pblk *pblk);
 void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
 			      int *gc_active);
-void pblk_gc_sysfs_force(struct pblk *pblk, int force);
+int pblk_gc_sysfs_force(struct pblk *pblk, int force);
 
 /*
  * pblk rate limiter
  */
 void pblk_rl_init(struct pblk_rl *rl, int budget);
 void pblk_rl_free(struct pblk_rl *rl);
-int pblk_rl_gc_thrs(struct pblk_rl *rl);
+int pblk_rl_high_thrs(struct pblk_rl *rl);
+int pblk_rl_low_thrs(struct pblk_rl *rl);
 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
+void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
 void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
-void pblk_rl_set_gc_rsc(struct pblk_rl *rl, int rsv);
 int pblk_rl_sysfs_rate_show(struct pblk_rl *rl);
 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
+void pblk_rl_set_space_limit(struct pblk_rl *rl, int entries_left);
+int pblk_rl_is_limit(struct pblk_rl *rl);
 
 /*
  * pblk sysfs
@@ -774,9 +873,30 @@ static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
 	return c_ctx - sizeof(struct nvm_rq);
 }
 
-static inline void *pblk_line_emeta_to_lbas(struct line_emeta *emeta)
+static inline void *emeta_to_bb(struct line_emeta *emeta)
 {
-	return (emeta) + 1;
+	return emeta->bb_bitmap;
+}
+
+static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
+{
+	return ((void *)emeta + pblk->lm.emeta_len[1]);
+}
+
+static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
+{
+	return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
+}
+
+static inline int pblk_line_vsc(struct pblk_line *line)
+{
+	int vsc;
+
+	spin_lock(&line->lock);
+	vsc = le32_to_cpu(*line->vsc);
+	spin_unlock(&line->lock);
+
+	return vsc;
 }
 
 #define NVM_MEM_PAGE_WRITE (8)
@@ -917,6 +1037,14 @@ static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
 	ppa_addr->ppa = ADDR_EMPTY;
 }
 
+static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
+{
+	if (lppa.ppa == rppa.ppa)
+		return true;
+
+	return false;
+}
+
 static inline int pblk_addr_in_cache(struct ppa_addr ppa)
 {
 	return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
@@ -964,11 +1092,11 @@ static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
 }
 
 static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
-					    struct line_smeta *smeta)
+					    struct line_header *header)
 {
 	u32 crc = ~(u32)0;
 
-	crc = crc32_le(crc, (unsigned char *)smeta + sizeof(crc),
+	crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
 				sizeof(struct line_header) - sizeof(crc));
 
 	return crc;
@@ -996,7 +1124,7 @@ static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
 
 	crc = crc32_le(crc, (unsigned char *)emeta +
 				sizeof(struct line_header) + sizeof(crc),
-				lm->emeta_len -
+				lm->emeta_len[0] -
 				sizeof(struct line_header) - sizeof(crc));
 
 	return crc;
@@ -1016,9 +1144,27 @@ static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
 	return flags;
 }
 
-static inline int pblk_set_read_mode(struct pblk *pblk)
+enum {
+	PBLK_READ_RANDOM	= 0,
+	PBLK_READ_SEQUENTIAL	= 1,
+};
+
+static inline int pblk_set_read_mode(struct pblk *pblk, int type)
 {
-	return NVM_IO_SNGL_ACCESS | NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
+	struct nvm_tgt_dev *dev = pblk->dev;
+	struct nvm_geo *geo = &dev->geo;
+	int flags;
+
+	flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
+	if (type == PBLK_READ_SEQUENTIAL)
+		flags |= geo->plane_mode >> 1;
+
+	return flags;
+}
+
+static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
+{
+	return !(nr_secs % pblk->min_write_pgs);
 }
 
 #ifdef CONFIG_NVM_DEBUG
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index cf0e28a..267f01a 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -279,8 +279,8 @@ static void rrpc_end_sync_bio(struct bio *bio)
 {
 	struct completion *waiting = bio->bi_private;
 
-	if (bio->bi_error)
-		pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
+	if (bio->bi_status)
+		pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
 
 	complete(waiting);
 }
@@ -359,7 +359,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
 			goto finished;
 		}
 		wait_for_completion_io(&wait);
-		if (bio->bi_error) {
+		if (bio->bi_status) {
 			rrpc_inflight_laddr_release(rrpc, rqd);
 			goto finished;
 		}
@@ -385,7 +385,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
 		wait_for_completion_io(&wait);
 
 		rrpc_inflight_laddr_release(rrpc, rqd);
-		if (bio->bi_error)
+		if (bio->bi_status)
 			goto finished;
 
 		bio_reset(bio);
@@ -994,7 +994,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
 	struct nvm_rq *rqd;
 	int err;
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	if (bio_op(bio) == REQ_OP_DISCARD) {
 		rrpc_discard(rrpc, bio);
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index f757cef2..62f541f 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -133,7 +133,7 @@ static int macio_device_resume(struct device * dev)
 	return 0;
 }
 
-extern struct device_attribute macio_dev_attrs[];
+extern const struct attribute_group *macio_dev_groups[];
 
 struct bus_type macio_bus_type = {
        .name	= "macio",
@@ -144,7 +144,7 @@ struct bus_type macio_bus_type = {
        .shutdown = macio_device_shutdown,
        .suspend	= macio_device_suspend,
        .resume	= macio_device_resume,
-       .dev_attrs = macio_dev_attrs,
+       .dev_groups = macio_dev_groups,
 };
 
 static int __init macio_bus_driver_init(void)
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 0b1f9c7..2445274 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -10,7 +10,8 @@ field##_show (struct device *dev, struct device_attribute *attr,	\
 {									\
 	struct macio_dev *mdev = to_macio_device (dev);			\
 	return sprintf (buf, format_string, mdev->ofdev.dev.of_node->field); \
-}
+}									\
+static DEVICE_ATTR_RO(field);
 
 static ssize_t
 compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
@@ -37,6 +38,7 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
 
 	return length;
 }
+static DEVICE_ATTR_RO(compatible);
 
 static ssize_t modalias_show (struct device *dev, struct device_attribute *attr,
 			      char *buf)
@@ -52,15 +54,26 @@ static ssize_t devspec_show(struct device *dev,
 	ofdev = to_platform_device(dev);
 	return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
 }
+static DEVICE_ATTR_RO(modalias);
+static DEVICE_ATTR_RO(devspec);
 
 macio_config_of_attr (name, "%s\n");
 macio_config_of_attr (type, "%s\n");
 
-struct device_attribute macio_dev_attrs[] = {
-	__ATTR_RO(name),
-	__ATTR_RO(type),
-	__ATTR_RO(compatible),
-	__ATTR_RO(modalias),
-	__ATTR_RO(devspec),
-	__ATTR_NULL
+static struct attribute *macio_dev_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_type.attr,
+	&dev_attr_compatible.attr,
+	&dev_attr_modalias.attr,
+	&dev_attr_devspec.attr,
+	NULL,
+};
+
+static const struct attribute_group macio_dev_group = {
+	.attrs = macio_dev_attrs,
+};
+
+const struct attribute_group *macio_dev_groups[] = {
+	&macio_dev_group,
+	NULL,
 };
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index dd9ecd35..ac91fd0 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -203,7 +203,7 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
 		struct acpi_pcct_hw_reduced_type2 *pcct2_ss = chan->con_priv;
 		u32 id = chan - pcc_mbox_channels;
 
-		doorbell_ack = &pcct2_ss->doorbell_ack_register;
+		doorbell_ack = &pcct2_ss->platform_ack_register;
 		doorbell_ack_preserve = pcct2_ss->ack_preserve_mask;
 		doorbell_ack_write = pcct2_ss->ack_write_mask;
 
@@ -416,11 +416,11 @@ static int parse_pcc_subspace(struct acpi_subtable_header *header,
 static int pcc_parse_subspace_irq(int id,
 				  struct acpi_pcct_hw_reduced *pcct_ss)
 {
-	pcc_doorbell_irq[id] = pcc_map_interrupt(pcct_ss->doorbell_interrupt,
+	pcc_doorbell_irq[id] = pcc_map_interrupt(pcct_ss->platform_interrupt,
 						 (u32)pcct_ss->flags);
 	if (pcc_doorbell_irq[id] <= 0) {
 		pr_err("PCC GSI %d not registered\n",
-		       pcct_ss->doorbell_interrupt);
+		       pcct_ss->platform_interrupt);
 		return -EINVAL;
 	}
 
@@ -429,8 +429,8 @@ static int pcc_parse_subspace_irq(int id,
 		struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
 
 		pcc_doorbell_ack_vaddr[id] = acpi_os_ioremap(
-				pcct2_ss->doorbell_ack_register.address,
-				pcct2_ss->doorbell_ack_register.bit_width / 8);
+				pcct2_ss->platform_ack_register.address,
+				pcct2_ss->platform_ack_register.bit_width / 8);
 		if (!pcc_doorbell_ack_vaddr[id]) {
 			pr_err("Failed to ioremap PCC ACK register\n");
 			return -ENOMEM;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index c3ea03c..dee542f 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -849,10 +849,11 @@ static inline void wake_up_allocators(struct cache_set *c)
 
 /* Forward declarations */
 
-void bch_count_io_errors(struct cache *, int, const char *);
+void bch_count_io_errors(struct cache *, blk_status_t, const char *);
 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
-			      int, const char *);
-void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
+			      blk_status_t, const char *);
+void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
+		const char *);
 void bch_bbio_free(struct bio *, struct cache_set *);
 struct bio *bch_bbio_alloc(struct cache_set *);
 
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 450d0e8..866dcf7 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -307,7 +307,7 @@ static void bch_btree_node_read(struct btree *b)
 	bch_submit_bbio(bio, b->c, &b->key, 0);
 	closure_sync(&cl);
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		set_btree_node_io_error(b);
 
 	bch_bbio_free(bio, b->c);
@@ -374,10 +374,10 @@ static void btree_node_write_endio(struct bio *bio)
 	struct closure *cl = bio->bi_private;
 	struct btree *b = container_of(cl, struct btree, io);
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		set_btree_node_io_error(b);
 
-	bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
+	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
 	closure_put(cl);
 }
 
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 9b80417..73da1f5 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -207,7 +207,7 @@ void bkey_put(struct cache_set *c, struct bkey *k);
 
 struct btree_op {
 	/* for waiting on btree reserve in btree_split() */
-	wait_queue_t		wait;
+	wait_queue_entry_t		wait;
 
 	/* Btree level at which we start taking write locks */
 	short			lock;
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 06f5505..35a5a72 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -110,7 +110,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 	struct bio_vec bv, cbv;
 	struct bvec_iter iter, citer = { 0 };
 
-	check = bio_clone(bio, GFP_NOIO);
+	check = bio_clone_kmalloc(bio, GFP_NOIO);
 	if (!check)
 		return;
 	check->bi_opf = REQ_OP_READ;
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index db45a88..6a9b850 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -50,7 +50,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 
 /* IO errors */
 
-void bch_count_io_errors(struct cache *ca, int error, const char *m)
+void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
 {
 	/*
 	 * The halflife of an error is:
@@ -103,7 +103,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
 }
 
 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
-			      int error, const char *m)
+			      blk_status_t error, const char *m)
 {
 	struct bbio *b = container_of(bio, struct bbio, bio);
 	struct cache *ca = PTR_CACHE(c, &b->key, 0);
@@ -132,7 +132,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
 }
 
 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
-		    int error, const char *m)
+		    blk_status_t error, const char *m)
 {
 	struct closure *cl = bio->bi_private;
 
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 1198e53..0352d05 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -549,7 +549,7 @@ static void journal_write_endio(struct bio *bio)
 {
 	struct journal_write *w = bio->bi_private;
 
-	cache_set_err_on(bio->bi_error, w->c, "journal io error");
+	cache_set_err_on(bio->bi_status, w->c, "journal io error");
 	closure_put(&w->c->journal.io);
 }
 
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 13b8a90..f633b30 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -63,14 +63,14 @@ static void read_moving_endio(struct bio *bio)
 	struct moving_io *io = container_of(bio->bi_private,
 					    struct moving_io, cl);
 
-	if (bio->bi_error)
-		io->op.error = bio->bi_error;
+	if (bio->bi_status)
+		io->op.status = bio->bi_status;
 	else if (!KEY_DIRTY(&b->key) &&
 		 ptr_stale(io->op.c, &b->key, 0)) {
-		io->op.error = -EINTR;
+		io->op.status = BLK_STS_IOERR;
 	}
 
-	bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
+	bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
 }
 
 static void moving_init(struct moving_io *io)
@@ -92,7 +92,7 @@ static void write_moving(struct closure *cl)
 	struct moving_io *io = container_of(cl, struct moving_io, cl);
 	struct data_insert_op *op = &io->op;
 
-	if (!op->error) {
+	if (!op->status) {
 		moving_init(io);
 
 		io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 709c9cc..019b3df 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -81,7 +81,7 @@ static void bch_data_insert_keys(struct closure *cl)
 	if (ret == -ESRCH) {
 		op->replace_collision = true;
 	} else if (ret) {
-		op->error		= -ENOMEM;
+		op->status		= BLK_STS_RESOURCE;
 		op->insert_data_done	= true;
 	}
 
@@ -178,17 +178,17 @@ static void bch_data_insert_endio(struct bio *bio)
 	struct closure *cl = bio->bi_private;
 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		/* TODO: We could try to recover from this. */
 		if (op->writeback)
-			op->error = bio->bi_error;
+			op->status = bio->bi_status;
 		else if (!op->replace)
 			set_closure_fn(cl, bch_data_insert_error, op->wq);
 		else
 			set_closure_fn(cl, NULL, NULL);
 	}
 
-	bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
+	bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
 }
 
 static void bch_data_insert_start(struct closure *cl)
@@ -488,15 +488,15 @@ static void bch_cache_read_endio(struct bio *bio)
 	 * from the backing device.
 	 */
 
-	if (bio->bi_error)
-		s->iop.error = bio->bi_error;
+	if (bio->bi_status)
+		s->iop.status = bio->bi_status;
 	else if (!KEY_DIRTY(&b->key) &&
 		 ptr_stale(s->iop.c, &b->key, 0)) {
 		atomic_long_inc(&s->iop.c->cache_read_races);
-		s->iop.error = -EINTR;
+		s->iop.status = BLK_STS_IOERR;
 	}
 
-	bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
+	bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
 }
 
 /*
@@ -593,9 +593,9 @@ static void request_endio(struct bio *bio)
 {
 	struct closure *cl = bio->bi_private;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		struct search *s = container_of(cl, struct search, cl);
-		s->iop.error = bio->bi_error;
+		s->iop.status = bio->bi_status;
 		/* Only cache read errors are recoverable */
 		s->recoverable = false;
 	}
@@ -611,7 +611,7 @@ static void bio_complete(struct search *s)
 				    &s->d->disk->part0, s->start_time);
 
 		trace_bcache_request_end(s->d, s->orig_bio);
-		s->orig_bio->bi_error = s->iop.error;
+		s->orig_bio->bi_status = s->iop.status;
 		bio_endio(s->orig_bio);
 		s->orig_bio = NULL;
 	}
@@ -664,7 +664,7 @@ static inline struct search *search_alloc(struct bio *bio,
 	s->iop.inode		= d->id;
 	s->iop.write_point	= hash_long((unsigned long) current, 16);
 	s->iop.write_prio	= 0;
-	s->iop.error		= 0;
+	s->iop.status		= 0;
 	s->iop.flags		= 0;
 	s->iop.flush_journal	= op_is_flush(bio->bi_opf);
 	s->iop.wq		= bcache_wq;
@@ -707,7 +707,7 @@ static void cached_dev_read_error(struct closure *cl)
 		/* Retry from the backing device: */
 		trace_bcache_read_retry(s->orig_bio);
 
-		s->iop.error = 0;
+		s->iop.status = 0;
 		do_bio_hook(s, s->orig_bio);
 
 		/* XXX: invalidate cache */
@@ -767,7 +767,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
 				  !s->cache_miss, s->iop.bypass);
 	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
 
-	if (s->iop.error)
+	if (s->iop.status)
 		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
 	else if (s->iop.bio || verify(dc, &s->bio.bio))
 		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 1ff3687..7689176 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -10,7 +10,7 @@ struct data_insert_op {
 	unsigned		inode;
 	uint16_t		write_point;
 	uint16_t		write_prio;
-	short			error;
+	blk_status_t		status;
 
 	union {
 		uint16_t	flags;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index e57353e..8352fad 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -271,7 +271,7 @@ static void write_super_endio(struct bio *bio)
 {
 	struct cache *ca = bio->bi_private;
 
-	bch_count_io_errors(ca, bio->bi_error, "writing superblock");
+	bch_count_io_errors(ca, bio->bi_status, "writing superblock");
 	closure_put(&ca->set->sb_write);
 }
 
@@ -321,7 +321,7 @@ static void uuid_endio(struct bio *bio)
 	struct closure *cl = bio->bi_private;
 	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
 
-	cache_set_err_on(bio->bi_error, c, "accessing uuids");
+	cache_set_err_on(bio->bi_status, c, "accessing uuids");
 	bch_bbio_free(bio, c);
 	closure_put(cl);
 }
@@ -494,7 +494,7 @@ static void prio_endio(struct bio *bio)
 {
 	struct cache *ca = bio->bi_private;
 
-	cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
+	cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
 	bch_bbio_free(bio, ca->set);
 	closure_put(&ca->prio);
 }
@@ -782,7 +782,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
 
 	minor *= BCACHE_MINORS;
 
-	if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
+	if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
+					   BIOSET_NEED_BVECS |
+					   BIOSET_NEED_RESCUER)) ||
 	    !(d->disk = alloc_disk(BCACHE_MINORS))) {
 		ida_simple_remove(&bcache_minor, minor);
 		return -ENOMEM;
@@ -1516,7 +1518,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
 				sizeof(struct bbio) + sizeof(struct bio_vec) *
 				bucket_pages(c))) ||
 	    !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
-	    !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
+	    !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio),
+					   BIOSET_NEED_BVECS |
+					   BIOSET_NEED_RESCUER)) ||
 	    !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
 	    !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
 						WQ_MEM_RECLAIM, 0)) ||
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 6ac2e48..42c66e7 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -167,7 +167,7 @@ static void dirty_endio(struct bio *bio)
 	struct keybuf_key *w = bio->bi_private;
 	struct dirty_io *io = w->private;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		SET_KEY_DIRTY(&w->key, false);
 
 	closure_put(&io->cl);
@@ -195,7 +195,7 @@ static void read_dirty_endio(struct bio *bio)
 	struct dirty_io *io = w->private;
 
 	bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
-			    bio->bi_error, "reading dirty data from cache");
+			    bio->bi_status, "reading dirty data from cache");
 
 	dirty_endio(bio);
 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index bf7419a..f4eace5 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -485,10 +485,10 @@ void bitmap_print_sb(struct bitmap *bitmap)
 	pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
 	pr_debug("       version: %d\n", le32_to_cpu(sb->version));
 	pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
-		 *(__u32 *)(sb->uuid+0),
-		 *(__u32 *)(sb->uuid+4),
-		 *(__u32 *)(sb->uuid+8),
-		 *(__u32 *)(sb->uuid+12));
+		 le32_to_cpu(*(__u32 *)(sb->uuid+0)),
+		 le32_to_cpu(*(__u32 *)(sb->uuid+4)),
+		 le32_to_cpu(*(__u32 *)(sb->uuid+8)),
+		 le32_to_cpu(*(__u32 *)(sb->uuid+12)));
 	pr_debug("        events: %llu\n",
 		 (unsigned long long) le64_to_cpu(sb->events));
 	pr_debug("events cleared: %llu\n",
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index ae7da2c..82d2738 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -229,7 +229,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
 
 void dm_cell_error(struct dm_bio_prison *prison,
-		   struct dm_bio_prison_cell *cell, int error)
+		   struct dm_bio_prison_cell *cell, blk_status_t error)
 {
 	struct bio_list bios;
 	struct bio *bio;
@@ -238,7 +238,7 @@ void dm_cell_error(struct dm_bio_prison *prison,
 	dm_cell_release(prison, cell, &bios);
 
 	while ((bio = bio_list_pop(&bios))) {
-		bio->bi_error = error;
+		bio->bi_status = error;
 		bio_endio(bio);
 	}
 }
diff --git a/drivers/md/dm-bio-prison-v1.h b/drivers/md/dm-bio-prison-v1.h
index cddd4ac..cec52ac 100644
--- a/drivers/md/dm-bio-prison-v1.h
+++ b/drivers/md/dm-bio-prison-v1.h
@@ -91,7 +91,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
 			       struct dm_bio_prison_cell *cell,
 			       struct bio_list *inmates);
 void dm_cell_error(struct dm_bio_prison *prison,
-		   struct dm_bio_prison_cell *cell, int error);
+		   struct dm_bio_prison_cell *cell, blk_status_t error);
 
 /*
  * Visits the cell and then releases.  Guarantees no new inmates are
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index cd81395..850ff6c 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -145,8 +145,8 @@ struct dm_buffer {
 	enum data_mode data_mode;
 	unsigned char list_mode;		/* LIST_* */
 	unsigned hold_count;
-	int read_error;
-	int write_error;
+	blk_status_t read_error;
+	blk_status_t write_error;
 	unsigned long state;
 	unsigned long last_accessed;
 	struct dm_bufio_client *c;
@@ -555,7 +555,7 @@ static void dmio_complete(unsigned long error, void *context)
 {
 	struct dm_buffer *b = context;
 
-	b->bio.bi_error = error ? -EIO : 0;
+	b->bio.bi_status = error ? BLK_STS_IOERR : 0;
 	b->bio.bi_end_io(&b->bio);
 }
 
@@ -588,7 +588,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 
 	r = dm_io(&io_req, 1, &region, NULL);
 	if (r) {
-		b->bio.bi_error = r;
+		b->bio.bi_status = errno_to_blk_status(r);
 		end_io(&b->bio);
 	}
 }
@@ -596,7 +596,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 static void inline_endio(struct bio *bio)
 {
 	bio_end_io_t *end_fn = bio->bi_private;
-	int error = bio->bi_error;
+	blk_status_t status = bio->bi_status;
 
 	/*
 	 * Reset the bio to free any attached resources
@@ -604,7 +604,7 @@ static void inline_endio(struct bio *bio)
 	 */
 	bio_reset(bio);
 
-	bio->bi_error = error;
+	bio->bi_status = status;
 	end_fn(bio);
 }
 
@@ -685,11 +685,12 @@ static void write_endio(struct bio *bio)
 {
 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
-	b->write_error = bio->bi_error;
-	if (unlikely(bio->bi_error)) {
+	b->write_error = bio->bi_status;
+	if (unlikely(bio->bi_status)) {
 		struct dm_bufio_client *c = b->c;
-		int error = bio->bi_error;
-		(void)cmpxchg(&c->async_write_error, 0, error);
+
+		(void)cmpxchg(&c->async_write_error, 0,
+				blk_status_to_errno(bio->bi_status));
 	}
 
 	BUG_ON(!test_bit(B_WRITING, &b->state));
@@ -1063,7 +1064,7 @@ static void read_endio(struct bio *bio)
 {
 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
-	b->read_error = bio->bi_error;
+	b->read_error = bio->bi_status;
 
 	BUG_ON(!test_bit(B_READING, &b->state));
 
@@ -1107,7 +1108,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
 	wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
 
 	if (b->read_error) {
-		int error = b->read_error;
+		int error = blk_status_to_errno(b->read_error);
 
 		dm_bufio_release(b);
 
@@ -1257,7 +1258,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
  */
 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
 {
-	int a, f;
+	blk_status_t a;
+	int f;
 	unsigned long buffers_processed = 0;
 	struct dm_buffer *b, *tmp;
 
@@ -1334,7 +1336,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
 {
 	struct dm_io_request io_req = {
 		.bi_op = REQ_OP_WRITE,
-		.bi_op_flags = REQ_PREFLUSH,
+		.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
 		.mem.type = DM_IO_KMEM,
 		.mem.ptr.addr = NULL,
 		.client = c->dm_io,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index d682a05..c5ea03f 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -119,7 +119,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len)
  */
 struct continuation {
 	struct work_struct ws;
-	int input;
+	blk_status_t input;
 };
 
 static inline void init_continuation(struct continuation *k,
@@ -145,7 +145,7 @@ struct batcher {
 	/*
 	 * The operation that everyone is waiting for.
 	 */
-	int (*commit_op)(void *context);
+	blk_status_t (*commit_op)(void *context);
 	void *commit_context;
 
 	/*
@@ -171,8 +171,7 @@ struct batcher {
 static void __commit(struct work_struct *_ws)
 {
 	struct batcher *b = container_of(_ws, struct batcher, commit_work);
-
-	int r;
+	blk_status_t r;
 	unsigned long flags;
 	struct list_head work_items;
 	struct work_struct *ws, *tmp;
@@ -205,7 +204,7 @@ static void __commit(struct work_struct *_ws)
 
 	while ((bio = bio_list_pop(&bios))) {
 		if (r) {
-			bio->bi_error = r;
+			bio->bi_status = r;
 			bio_endio(bio);
 		} else
 			b->issue_op(bio, b->issue_context);
@@ -213,7 +212,7 @@ static void __commit(struct work_struct *_ws)
 }
 
 static void batcher_init(struct batcher *b,
-			 int (*commit_op)(void *),
+			 blk_status_t (*commit_op)(void *),
 			 void *commit_context,
 			 void (*issue_op)(struct bio *bio, void *),
 			 void *issue_context,
@@ -955,7 +954,7 @@ static void writethrough_endio(struct bio *bio)
 
 	dm_unhook_bio(&pb->hook_info, bio);
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		bio_endio(bio);
 		return;
 	}
@@ -1220,7 +1219,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
 	struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
 
 	if (read_err || write_err)
-		mg->k.input = -EIO;
+		mg->k.input = BLK_STS_IOERR;
 
 	queue_continuation(mg->cache->wq, &mg->k);
 }
@@ -1266,8 +1265,8 @@ static void overwrite_endio(struct bio *bio)
 
 	dm_unhook_bio(&pb->hook_info, bio);
 
-	if (bio->bi_error)
-		mg->k.input = bio->bi_error;
+	if (bio->bi_status)
+		mg->k.input = bio->bi_status;
 
 	queue_continuation(mg->cache->wq, &mg->k);
 }
@@ -1323,8 +1322,10 @@ static void mg_complete(struct dm_cache_migration *mg, bool success)
 		if (mg->overwrite_bio) {
 			if (success)
 				force_set_dirty(cache, cblock);
+			else if (mg->k.input)
+				mg->overwrite_bio->bi_status = mg->k.input;
 			else
-				mg->overwrite_bio->bi_error = (mg->k.input ? : -EIO);
+				mg->overwrite_bio->bi_status = BLK_STS_IOERR;
 			bio_endio(mg->overwrite_bio);
 		} else {
 			if (success)
@@ -1504,7 +1505,7 @@ static void mg_copy(struct work_struct *ws)
 		r = copy(mg, is_policy_promote);
 		if (r) {
 			DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
-			mg->k.input = -EIO;
+			mg->k.input = BLK_STS_IOERR;
 			mg_complete(mg, false);
 		}
 	}
@@ -1907,12 +1908,12 @@ static int commit(struct cache *cache, bool clean_shutdown)
 /*
  * Used by the batcher.
  */
-static int commit_op(void *context)
+static blk_status_t commit_op(void *context)
 {
 	struct cache *cache = context;
 
 	if (dm_cache_changed_this_transaction(cache->cmd))
-		return commit(cache, false);
+		return errno_to_blk_status(commit(cache, false));
 
 	return 0;
 }
@@ -2018,7 +2019,7 @@ static void requeue_deferred_bios(struct cache *cache)
 	bio_list_init(&cache->deferred_bios);
 
 	while ((bio = bio_list_pop(&bios))) {
-		bio->bi_error = DM_ENDIO_REQUEUE;
+		bio->bi_status = BLK_STS_DM_REQUEUE;
 		bio_endio(bio);
 	}
 }
@@ -2820,7 +2821,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
 	return r;
 }
 
-static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int cache_end_io(struct dm_target *ti, struct bio *bio,
+		blk_status_t *error)
 {
 	struct cache *cache = ti->private;
 	unsigned long flags;
@@ -2838,7 +2840,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
 	bio_drop_shared_lock(cache, bio);
 	accounted_complete(cache, bio);
 
-	return 0;
+	return DM_ENDIO_DONE;
 }
 
 static int write_dirty_bitset(struct cache *cache)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ebf9e72..9e1b72e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -71,7 +71,7 @@ struct dm_crypt_io {
 	struct convert_context ctx;
 
 	atomic_t io_pending;
-	int error;
+	blk_status_t error;
 	sector_t sector;
 
 	struct rb_node rb_node;
@@ -1292,7 +1292,7 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
 /*
  * Encrypt / decrypt data from one bio to another one (can be the same one)
  */
-static int crypt_convert(struct crypt_config *cc,
+static blk_status_t crypt_convert(struct crypt_config *cc,
 			 struct convert_context *ctx)
 {
 	unsigned int tag_offset = 0;
@@ -1343,13 +1343,13 @@ static int crypt_convert(struct crypt_config *cc,
 		 */
 		case -EBADMSG:
 			atomic_dec(&ctx->cc_pending);
-			return -EILSEQ;
+			return BLK_STS_PROTECTION;
 		/*
 		 * There was an error while processing the request.
 		 */
 		default:
 			atomic_dec(&ctx->cc_pending);
-			return -EIO;
+			return BLK_STS_IOERR;
 		}
 	}
 
@@ -1463,7 +1463,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
 {
 	struct crypt_config *cc = io->cc;
 	struct bio *base_bio = io->base_bio;
-	int error = io->error;
+	blk_status_t error = io->error;
 
 	if (!atomic_dec_and_test(&io->io_pending))
 		return;
@@ -1476,7 +1476,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
 	else
 		kfree(io->integrity_metadata);
 
-	base_bio->bi_error = error;
+	base_bio->bi_status = error;
 	bio_endio(base_bio);
 }
 
@@ -1502,7 +1502,7 @@ static void crypt_endio(struct bio *clone)
 	struct dm_crypt_io *io = clone->bi_private;
 	struct crypt_config *cc = io->cc;
 	unsigned rw = bio_data_dir(clone);
-	int error;
+	blk_status_t error;
 
 	/*
 	 * free the processed pages
@@ -1510,7 +1510,7 @@ static void crypt_endio(struct bio *clone)
 	if (rw == WRITE)
 		crypt_free_buffer_pages(cc, clone);
 
-	error = clone->bi_error;
+	error = clone->bi_status;
 	bio_put(clone);
 
 	if (rw == READ && !error) {
@@ -1570,7 +1570,7 @@ static void kcryptd_io_read_work(struct work_struct *work)
 
 	crypt_inc_pending(io);
 	if (kcryptd_io_read(io, GFP_NOIO))
-		io->error = -ENOMEM;
+		io->error = BLK_STS_RESOURCE;
 	crypt_dec_pending(io);
 }
 
@@ -1656,7 +1656,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
 	sector_t sector;
 	struct rb_node **rbp, *parent;
 
-	if (unlikely(io->error < 0)) {
+	if (unlikely(io->error)) {
 		crypt_free_buffer_pages(cc, clone);
 		bio_put(clone);
 		crypt_dec_pending(io);
@@ -1697,7 +1697,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 	struct bio *clone;
 	int crypt_finished;
 	sector_t sector = io->sector;
-	int r;
+	blk_status_t r;
 
 	/*
 	 * Prevent io from disappearing until this function completes.
@@ -1707,7 +1707,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 
 	clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
 	if (unlikely(!clone)) {
-		io->error = -EIO;
+		io->error = BLK_STS_IOERR;
 		goto dec;
 	}
 
@@ -1718,7 +1718,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 
 	crypt_inc_pending(io);
 	r = crypt_convert(cc, &io->ctx);
-	if (r < 0)
+	if (r)
 		io->error = r;
 	crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
 
@@ -1740,7 +1740,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
 {
 	struct crypt_config *cc = io->cc;
-	int r = 0;
+	blk_status_t r;
 
 	crypt_inc_pending(io);
 
@@ -1748,7 +1748,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
 			   io->sector);
 
 	r = crypt_convert(cc, &io->ctx);
-	if (r < 0)
+	if (r)
 		io->error = r;
 
 	if (atomic_dec_and_test(&io->ctx.cc_pending))
@@ -1781,9 +1781,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
 	if (error == -EBADMSG) {
 		DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
 			    (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
-		io->error = -EILSEQ;
+		io->error = BLK_STS_PROTECTION;
 	} else if (error < 0)
-		io->error = -EIO;
+		io->error = BLK_STS_IOERR;
 
 	crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
 
@@ -2677,7 +2677,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 		goto bad;
 	}
 
-	cc->bs = bioset_create(MIN_IOS, 0);
+	cc->bs = bioset_create(MIN_IOS, 0, (BIOSET_NEED_BVECS |
+					    BIOSET_NEED_RESCUER));
 	if (!cc->bs) {
 		ti->error = "Cannot allocate crypt bioset";
 		goto bad;
@@ -2795,10 +2796,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
 	 * and is aligned to this size as defined in IO hints.
 	 */
 	if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
-		return -EIO;
+		return DM_MAPIO_KILL;
 
 	if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
-		return -EIO;
+		return DM_MAPIO_KILL;
 
 	io = dm_per_bio_data(bio, cc->per_bio_data_size);
 	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 13305a1..3d04d5c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -321,7 +321,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
 		if (bio_data_dir(bio) == READ) {
 			if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
 			    !test_bit(ERROR_WRITES, &fc->flags))
-				return -EIO;
+				return DM_MAPIO_KILL;
 			goto map_bio;
 		}
 
@@ -349,7 +349,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
 		/*
 		 * By default, error all I/O.
 		 */
-		return -EIO;
+		return DM_MAPIO_KILL;
 	}
 
 map_bio:
@@ -358,12 +358,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
 	return DM_MAPIO_REMAPPED;
 }
 
-static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+		blk_status_t *error)
 {
 	struct flakey_c *fc = ti->private;
 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
 
-	if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
 		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
 		    all_corrupt_bio_flags_match(bio, fc)) {
 			/*
@@ -377,11 +378,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
 			 * Error read during the down_interval if drop_writes
 			 * and error_writes were not configured.
 			 */
-			return -EIO;
+			*error = BLK_STS_IOERR;
 		}
 	}
 
-	return error;
+	return DM_ENDIO_DONE;
 }
 
 static void flakey_status(struct dm_target *ti, status_type_t type,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c7f7c8d..1b224aa 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -246,7 +246,7 @@ struct dm_integrity_io {
 	unsigned metadata_offset;
 
 	atomic_t in_flight;
-	int bi_error;
+	blk_status_t bi_status;
 
 	struct completion *completion;
 
@@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
 			for (i = 0; i < commit_sections; i++)
 				rw_section_mac(ic, commit_start + i, true);
 		}
-		rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp);
+		rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
+			   commit_sections, &io_comp);
 	} else {
 		unsigned to_end;
 		io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
@@ -1104,18 +1105,21 @@ static void schedule_autocommit(struct dm_integrity_c *ic)
 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
 {
 	struct bio *bio;
-	spin_lock_irq(&ic->endio_wait.lock);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ic->endio_wait.lock, flags);
 	bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
 	bio_list_add(&ic->flush_bio_list, bio);
-	spin_unlock_irq(&ic->endio_wait.lock);
+	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
+
 	queue_work(ic->commit_wq, &ic->commit_work);
 }
 
 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
 {
 	int r = dm_integrity_failed(ic);
-	if (unlikely(r) && !bio->bi_error)
-		bio->bi_error = r;
+	if (unlikely(r) && !bio->bi_status)
+		bio->bi_status = errno_to_blk_status(r);
 	bio_endio(bio);
 }
 
@@ -1123,7 +1127,7 @@ static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *di
 {
 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
 
-	if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic)))
+	if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
 		submit_flush_bio(ic, dio);
 	else
 		do_endio(ic, bio);
@@ -1142,9 +1146,9 @@ static void dec_in_flight(struct dm_integrity_io *dio)
 
 		bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
 
-		if (unlikely(dio->bi_error) && !bio->bi_error)
-			bio->bi_error = dio->bi_error;
-		if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
+		if (unlikely(dio->bi_status) && !bio->bi_status)
+			bio->bi_status = dio->bi_status;
+		if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
 			dio->range.logical_sector += dio->range.n_sectors;
 			bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
 			INIT_WORK(&dio->work, integrity_bio_wait);
@@ -1318,7 +1322,7 @@ static void integrity_metadata(struct work_struct *w)
 	dec_in_flight(dio);
 	return;
 error:
-	dio->bi_error = r;
+	dio->bi_status = errno_to_blk_status(r);
 	dec_in_flight(dio);
 }
 
@@ -1331,7 +1335,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
 	sector_t area, offset;
 
 	dio->ic = ic;
-	dio->bi_error = 0;
+	dio->bi_status = 0;
 
 	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
 		submit_flush_bio(ic, dio);
@@ -1352,13 +1356,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
 		DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
 		      (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
 		      (unsigned long long)ic->provided_data_sectors);
-		return -EIO;
+		return DM_MAPIO_KILL;
 	}
 	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
 		DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
 		      ic->sectors_per_block,
 		      (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
-		return -EIO;
+		return DM_MAPIO_KILL;
 	}
 
 	if (ic->sectors_per_block > 1) {
@@ -1368,7 +1372,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
 			if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
 				DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
 					bv.bv_offset, bv.bv_len, ic->sectors_per_block);
-				return -EIO;
+				return DM_MAPIO_KILL;
 			}
 		}
 	}
@@ -1383,18 +1387,18 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
 				wanted_tag_size *= ic->tag_size;
 			if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
 				DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
-				return -EIO;
+				return DM_MAPIO_KILL;
 			}
 		}
 	} else {
 		if (unlikely(bip != NULL)) {
 			DMERR("Unexpected integrity data when using internal hash");
-			return -EIO;
+			return DM_MAPIO_KILL;
 		}
 	}
 
 	if (unlikely(ic->mode == 'R') && unlikely(dio->write))
-		return -EIO;
+		return DM_MAPIO_KILL;
 
 	get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
 	dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
@@ -2374,21 +2378,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
 	blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
 }
 
-/* FIXME: use new kvmalloc */
-static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp)
-{
-	void *ptr = NULL;
-
-	if (size <= PAGE_SIZE)
-		ptr = kmalloc(size, GFP_KERNEL | gfp);
-	if (!ptr && size <= KMALLOC_MAX_SIZE)
-		ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp);
-	if (!ptr)
-		ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL);
-
-	return ptr;
-}
-
 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
 {
 	unsigned i;
@@ -2407,7 +2396,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
 	struct page_list *pl;
 	unsigned i;
 
-	pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO);
+	pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
 	if (!pl)
 		return NULL;
 
@@ -2437,7 +2426,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
 	struct scatterlist **sl;
 	unsigned i;
 
-	sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO);
+	sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO);
 	if (!sl)
 		return NULL;
 
@@ -2453,7 +2442,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
 
 		n_pages = (end_index - start_index + 1);
 
-		s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0);
+		s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL);
 		if (!s) {
 			dm_integrity_free_journal_scatterlist(ic, sl);
 			return NULL;
@@ -2617,7 +2606,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
 				goto bad;
 			}
 
-			sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0);
+			sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL);
 			if (!sg) {
 				*error = "Unable to allocate sg list";
 				r = -ENOMEM;
@@ -2673,7 +2662,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
 				r = -ENOMEM;
 				goto bad;
 			}
-			ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO);
+			ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO);
 			if (!ic->sk_requests) {
 				*error = "Unable to allocate sk requests";
 				r = -ENOMEM;
@@ -2740,7 +2729,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
 		r = -ENOMEM;
 		goto bad;
 	}
-	ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0);
+	ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
 	if (!ic->journal_tree) {
 		*error = "Could not allocate memory for journal tree";
 		r = -ENOMEM;
@@ -3054,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
 		ti->error = "The device is too small";
 		goto bad;
 	}
+	if (ti->len > ic->provided_data_sectors) {
+		r = -EINVAL;
+		ti->error = "Not enough provided sectors for requested mapping size";
+		goto bad;
+	}
 
 	if (!buffer_sectors)
 		buffer_sectors = 1;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3702e50..2503960 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -58,7 +58,8 @@ struct dm_io_client *dm_io_client_create(void)
 	if (!client->pool)
 		goto bad;
 
-	client->bios = bioset_create(min_ios, 0);
+	client->bios = bioset_create(min_ios, 0, (BIOSET_NEED_BVECS |
+						  BIOSET_NEED_RESCUER));
 	if (!client->bios)
 		goto bad;
 
@@ -124,7 +125,7 @@ static void complete_io(struct io *io)
 	fn(error_bits, context);
 }
 
-static void dec_count(struct io *io, unsigned int region, int error)
+static void dec_count(struct io *io, unsigned int region, blk_status_t error)
 {
 	if (error)
 		set_bit(region, &io->error_bits);
@@ -137,9 +138,9 @@ static void endio(struct bio *bio)
 {
 	struct io *io;
 	unsigned region;
-	int error;
+	blk_status_t error;
 
-	if (bio->bi_error && bio_data_dir(bio) == READ)
+	if (bio->bi_status && bio_data_dir(bio) == READ)
 		zero_fill_bio(bio);
 
 	/*
@@ -147,7 +148,7 @@ static void endio(struct bio *bio)
 	 */
 	retrieve_io_and_region_from_bio(bio, &io, &region);
 
-	error = bio->bi_error;
+	error = bio->bi_status;
 	bio_put(bio);
 
 	dec_count(io, region, error);
@@ -317,9 +318,9 @@ static void do_region(int op, int op_flags, unsigned region,
 	else if (op == REQ_OP_WRITE_SAME)
 		special_cmd_max_sectors = q->limits.max_write_same_sectors;
 	if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
-	     op == REQ_OP_WRITE_SAME)  &&
-	    special_cmd_max_sectors == 0) {
-		dec_count(io, region, -EOPNOTSUPP);
+	     op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
+		atomic_inc(&io->count);
+		dec_count(io, region, BLK_STS_NOTSUPP);
 		return;
 	}
 
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 0555b44..41852ae 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
 	}
 
 	/*
-	 * Try to avoid low memory issues when a device is suspended.
+	 * Use __GFP_HIGH to avoid low memory issues when a device is
+	 * suspended and the ioctl is needed to resume it.
 	 * Use kmalloc() rather than vmalloc() when we can.
 	 */
 	dmi = NULL;
 	noio_flag = memalloc_noio_save();
-	dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL);
+	dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
 	memalloc_noio_restore(noio_flag);
 
 	if (!dmi) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 4dfe386..a1da0eb 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -150,10 +150,10 @@ static void log_end_io(struct bio *bio)
 {
 	struct log_writes_c *lc = bio->bi_private;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		unsigned long flags;
 
-		DMERR("Error writing log block, error=%d", bio->bi_error);
+		DMERR("Error writing log block, error=%d", bio->bi_status);
 		spin_lock_irqsave(&lc->blocks_lock, flags);
 		lc->logging_enabled = false;
 		spin_unlock_irqrestore(&lc->blocks_lock, flags);
@@ -586,7 +586,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
 		spin_lock_irq(&lc->blocks_lock);
 		lc->logging_enabled = false;
 		spin_unlock_irq(&lc->blocks_lock);
-		return -ENOMEM;
+		return DM_MAPIO_KILL;
 	}
 	INIT_LIST_HEAD(&block->list);
 	pb->block = block;
@@ -639,7 +639,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
 			spin_lock_irq(&lc->blocks_lock);
 			lc->logging_enabled = false;
 			spin_unlock_irq(&lc->blocks_lock);
-			return -ENOMEM;
+			return DM_MAPIO_KILL;
 		}
 
 		src = kmap_atomic(bv.bv_page);
@@ -664,7 +664,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
 	return DM_MAPIO_REMAPPED;
 }
 
-static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int normal_end_io(struct dm_target *ti, struct bio *bio,
+		blk_status_t *error)
 {
 	struct log_writes_c *lc = ti->private;
 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
@@ -686,7 +687,7 @@ static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
 		spin_unlock_irqrestore(&lc->blocks_lock, flags);
 	}
 
-	return error;
+	return DM_ENDIO_DONE;
 }
 
 /*
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3df056b..0e8ab5b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -559,13 +559,13 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
 		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
 			return DM_MAPIO_REQUEUE;
 		dm_report_EIO(m);
-		return -EIO;
+		return DM_MAPIO_KILL;
 	}
 
 	mpio->pgpath = pgpath;
 	mpio->nr_bytes = nr_bytes;
 
-	bio->bi_error = 0;
+	bio->bi_status = 0;
 	bio->bi_bdev = pgpath->path.dev->bdev;
 	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 
@@ -621,11 +621,19 @@ static void process_queued_bios(struct work_struct *work)
 	blk_start_plug(&plug);
 	while ((bio = bio_list_pop(&bios))) {
 		r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
-		if (r < 0 || r == DM_MAPIO_REQUEUE) {
-			bio->bi_error = r;
+		switch (r) {
+		case DM_MAPIO_KILL:
+			bio->bi_status = BLK_STS_IOERR;
 			bio_endio(bio);
-		} else if (r == DM_MAPIO_REMAPPED)
+			break;
+		case DM_MAPIO_REQUEUE:
+			bio->bi_status = BLK_STS_DM_REQUEUE;
+			bio_endio(bio);
+			break;
+		case DM_MAPIO_REMAPPED:
 			generic_make_request(bio);
+			break;
+		}
 	}
 	blk_finish_plug(&plug);
 }
@@ -1442,22 +1450,15 @@ static void activate_path_work(struct work_struct *work)
 	activate_or_offline_path(pgpath);
 }
 
-static int noretry_error(int error)
+static int noretry_error(blk_status_t error)
 {
 	switch (error) {
-	case -EBADE:
-		/*
-		 * EBADE signals an reservation conflict.
-		 * We shouldn't fail the path here as we can communicate with
-		 * the target.  We should failover to the next path, but in
-		 * doing so we might be causing a ping-pong between paths.
-		 * So just return the reservation conflict error.
-		 */
-	case -EOPNOTSUPP:
-	case -EREMOTEIO:
-	case -EILSEQ:
-	case -ENODATA:
-	case -ENOSPC:
+	case BLK_STS_NOTSUPP:
+	case BLK_STS_NOSPC:
+	case BLK_STS_TARGET:
+	case BLK_STS_NEXUS:
+	case BLK_STS_MEDIUM:
+	case BLK_STS_RESOURCE:
 		return 1;
 	}
 
@@ -1466,7 +1467,7 @@ static int noretry_error(int error)
 }
 
 static int multipath_end_io(struct dm_target *ti, struct request *clone,
-			    int error, union map_info *map_context)
+			    blk_status_t error, union map_info *map_context)
 {
 	struct dm_mpath_io *mpio = get_mpio(map_context);
 	struct pgpath *pgpath = mpio->pgpath;
@@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
 
 		if (atomic_read(&m->nr_valid_paths) == 0 &&
 		    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-			if (error == -EIO)
+			if (error == BLK_STS_IOERR)
 				dm_report_EIO(m);
 			/* complete with the original error */
 			r = DM_ENDIO_DONE;
@@ -1510,24 +1511,26 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
 	return r;
 }
 
-static int do_end_io_bio(struct multipath *m, struct bio *clone,
-			 int error, struct dm_mpath_io *mpio)
+static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
+		blk_status_t *error)
 {
+	struct multipath *m = ti->private;
+	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
+	struct pgpath *pgpath = mpio->pgpath;
 	unsigned long flags;
+	int r = DM_ENDIO_DONE;
 
-	if (!error)
-		return 0;	/* I/O complete */
+	if (!*error || noretry_error(*error))
+		goto done;
 
-	if (noretry_error(error))
-		return error;
-
-	if (mpio->pgpath)
-		fail_path(mpio->pgpath);
+	if (pgpath)
+		fail_path(pgpath);
 
 	if (atomic_read(&m->nr_valid_paths) == 0 &&
 	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 		dm_report_EIO(m);
-		return -EIO;
+		*error = BLK_STS_IOERR;
+		goto done;
 	}
 
 	/* Queue for the daemon to resubmit */
@@ -1539,23 +1542,11 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
 	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
 		queue_work(kmultipathd, &m->process_queued_bios);
 
-	return DM_ENDIO_INCOMPLETE;
-}
-
-static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
-{
-	struct multipath *m = ti->private;
-	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
-	struct pgpath *pgpath;
-	struct path_selector *ps;
-	int r;
-
-	BUG_ON(!mpio);
-
-	r = do_end_io_bio(m, clone, error, mpio);
-	pgpath = mpio->pgpath;
+	r = DM_ENDIO_INCOMPLETE;
+done:
 	if (pgpath) {
-		ps = &pgpath->pg->ps;
+		struct path_selector *ps = &pgpath->pg->ps;
+
 		if (ps->type->end_io)
 			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
 	}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 7d89322..b4b75da 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1927,7 +1927,7 @@ struct dm_raid_superblock {
 	/********************************************************************
 	 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
 	 *
-	 * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist
+	 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
 	 */
 
 	__le32 flags; /* Flags defining array states for reshaping */
@@ -2092,6 +2092,11 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
 	sb->layout = cpu_to_le32(mddev->layout);
 	sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
 
+	/********************************************************************
+	 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
+	 *
+	 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
+	 */
 	sb->new_level = cpu_to_le32(mddev->new_level);
 	sb->new_layout = cpu_to_le32(mddev->new_layout);
 	sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
@@ -2438,8 +2443,14 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
 	mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
 
 	if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
-		/* Retrieve device size stored in superblock to be prepared for shrink */
-		rdev->sectors = le64_to_cpu(sb->sectors);
+		/*
+		 * Retrieve rdev size stored in superblock to be prepared for shrink.
+		 * Check extended superblock members are present otherwise the size
+		 * will not be set!
+		 */
+		if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
+			rdev->sectors = le64_to_cpu(sb->sectors);
+
 		rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
 		if (rdev->recovery_offset == MaxSector)
 			set_bit(In_sync, &rdev->flags);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index a95cbb8..a4fbd91 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
 
 struct dm_raid1_bio_record {
 	struct mirror *m;
+	/* if details->bi_bdev == NULL, details were not saved */
 	struct dm_bio_details details;
 	region_t write_region;
 };
@@ -260,7 +261,7 @@ static int mirror_flush(struct dm_target *ti)
 	struct mirror *m;
 	struct dm_io_request io_req = {
 		.bi_op = REQ_OP_WRITE,
-		.bi_op_flags = REQ_PREFLUSH,
+		.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
 		.mem.type = DM_IO_KMEM,
 		.mem.ptr.addr = NULL,
 		.client = ms->io_client,
@@ -490,9 +491,9 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
 		 * If device is suspended, complete the bio.
 		 */
 		if (dm_noflush_suspending(ms->ti))
-			bio->bi_error = DM_ENDIO_REQUEUE;
+			bio->bi_status = BLK_STS_DM_REQUEUE;
 		else
-			bio->bi_error = -EIO;
+			bio->bi_status = BLK_STS_IOERR;
 
 		bio_endio(bio);
 		return;
@@ -626,7 +627,7 @@ static void write_callback(unsigned long error, void *context)
 	 * degrade the array.
 	 */
 	if (bio_op(bio) == REQ_OP_DISCARD) {
-		bio->bi_error = -EOPNOTSUPP;
+		bio->bi_status = BLK_STS_NOTSUPP;
 		bio_endio(bio);
 		return;
 	}
@@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
 	struct dm_raid1_bio_record *bio_record =
 	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
 
+	bio_record->details.bi_bdev = NULL;
+
 	if (rw == WRITE) {
 		/* Save region for mirror_end_io() handler */
 		bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1207,14 +1210,14 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
 
 	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
 	if (r < 0 && r != -EWOULDBLOCK)
-		return r;
+		return DM_MAPIO_KILL;
 
 	/*
 	 * If region is not in-sync queue the bio.
 	 */
 	if (!r || (r == -EWOULDBLOCK)) {
 		if (bio->bi_opf & REQ_RAHEAD)
-			return -EWOULDBLOCK;
+			return DM_MAPIO_KILL;
 
 		queue_bio(ms, bio, rw);
 		return DM_MAPIO_SUBMITTED;
@@ -1226,7 +1229,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
 	 */
 	m = choose_mirror(ms, bio->bi_iter.bi_sector);
 	if (unlikely(!m))
-		return -EIO;
+		return DM_MAPIO_KILL;
 
 	dm_bio_record(&bio_record->details, bio);
 	bio_record->m = m;
@@ -1236,7 +1239,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
 	return DM_MAPIO_REMAPPED;
 }
 
-static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int mirror_end_io(struct dm_target *ti, struct bio *bio,
+		blk_status_t *error)
 {
 	int rw = bio_data_dir(bio);
 	struct mirror_set *ms = (struct mirror_set *) ti->private;
@@ -1252,16 +1256,26 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
 		if (!(bio->bi_opf & REQ_PREFLUSH) &&
 		    bio_op(bio) != REQ_OP_DISCARD)
 			dm_rh_dec(ms->rh, bio_record->write_region);
-		return error;
+		return DM_ENDIO_DONE;
 	}
 
-	if (error == -EOPNOTSUPP)
-		return error;
+	if (*error == BLK_STS_NOTSUPP)
+		goto out;
 
-	if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
-		return error;
+	if (bio->bi_opf & REQ_RAHEAD)
+		goto out;
 
-	if (unlikely(error)) {
+	if (unlikely(*error)) {
+		if (!bio_record->details.bi_bdev) {
+			/*
+			 * There wasn't enough memory to record necessary
+			 * information for a retry or there was no other
+			 * mirror in-sync.
+			 */
+			DMERR_LIMIT("Mirror read failed.");
+			return DM_ENDIO_DONE;
+		}
+
 		m = bio_record->m;
 
 		DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1277,7 +1291,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
 			bd = &bio_record->details;
 
 			dm_bio_restore(bd, bio);
-			bio->bi_error = 0;
+			bio_record->details.bi_bdev = NULL;
+			bio->bi_status = 0;
 
 			queue_bio(ms, bio, rw);
 			return DM_ENDIO_INCOMPLETE;
@@ -1285,7 +1300,10 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
 		DMERR("All replicated volumes dead, failing I/O");
 	}
 
-	return error;
+out:
+	bio_record->details.bi_bdev = NULL;
+
+	return DM_ENDIO_DONE;
 }
 
 static void mirror_presuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index b639fa7..c6ebc5b 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -71,7 +71,7 @@ static void dm_old_start_queue(struct request_queue *q)
 
 static void dm_mq_start_queue(struct request_queue *q)
 {
-	blk_mq_start_stopped_hw_queues(q, true);
+	blk_mq_unquiesce_queue(q);
 	blk_mq_kick_requeue_list(q);
 }
 
@@ -119,7 +119,7 @@ static void end_clone_bio(struct bio *clone)
 	struct dm_rq_target_io *tio = info->tio;
 	struct bio *bio = info->orig;
 	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
-	int error = clone->bi_error;
+	blk_status_t error = clone->bi_status;
 
 	bio_put(clone);
 
@@ -158,7 +158,7 @@ static void end_clone_bio(struct bio *clone)
 	 * Do not use blk_end_request() here, because it may complete
 	 * the original request before the clone, and break the ordering.
 	 */
-	blk_update_request(tio->orig, 0, nr_bytes);
+	blk_update_request(tio->orig, BLK_STS_OK, nr_bytes);
 }
 
 static struct dm_rq_target_io *tio_from_request(struct request *rq)
@@ -216,7 +216,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
  * Must be called without clone's queue lock held,
  * see end_clone_request() for more details.
  */
-static void dm_end_request(struct request *clone, int error)
+static void dm_end_request(struct request *clone, blk_status_t error)
 {
 	int rw = rq_data_dir(clone);
 	struct dm_rq_target_io *tio = clone->end_io_data;
@@ -285,7 +285,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
 	rq_completed(md, rw, false);
 }
 
-static void dm_done(struct request *clone, int error, bool mapped)
+static void dm_done(struct request *clone, blk_status_t error, bool mapped)
 {
 	int r = DM_ENDIO_DONE;
 	struct dm_rq_target_io *tio = clone->end_io_data;
@@ -298,7 +298,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
 			r = rq_end_io(tio->ti, clone, error, &tio->info);
 	}
 
-	if (unlikely(error == -EREMOTEIO)) {
+	if (unlikely(error == BLK_STS_TARGET)) {
 		if (req_op(clone) == REQ_OP_WRITE_SAME &&
 		    !clone->q->limits.max_write_same_sectors)
 			disable_write_same(tio->md);
@@ -358,7 +358,7 @@ static void dm_softirq_done(struct request *rq)
  * Complete the clone and the original request with the error status
  * through softirq context.
  */
-static void dm_complete_request(struct request *rq, int error)
+static void dm_complete_request(struct request *rq, blk_status_t error)
 {
 	struct dm_rq_target_io *tio = tio_from_request(rq);
 
@@ -375,7 +375,7 @@ static void dm_complete_request(struct request *rq, int error)
  * Target's rq_end_io() function isn't called.
  * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
  */
-static void dm_kill_unmapped_request(struct request *rq, int error)
+static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
 {
 	rq->rq_flags |= RQF_FAILED;
 	dm_complete_request(rq, error);
@@ -384,7 +384,7 @@ static void dm_kill_unmapped_request(struct request *rq, int error)
 /*
  * Called with the clone's queue lock held (in the case of .request_fn)
  */
-static void end_clone_request(struct request *clone, int error)
+static void end_clone_request(struct request *clone, blk_status_t error)
 {
 	struct dm_rq_target_io *tio = clone->end_io_data;
 
@@ -401,7 +401,7 @@ static void end_clone_request(struct request *clone, int error)
 
 static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
 {
-	int r;
+	blk_status_t r;
 
 	if (blk_queue_io_stat(clone->q))
 		clone->rq_flags |= RQF_IO_STAT;
@@ -506,7 +506,7 @@ static int map_request(struct dm_rq_target_io *tio)
 		break;
 	case DM_MAPIO_KILL:
 		/* The target wants to complete the I/O */
-		dm_kill_unmapped_request(rq, -EIO);
+		dm_kill_unmapped_request(rq, BLK_STS_IOERR);
 		break;
 	default:
 		DMWARN("unimplemented target map return value: %d", r);
@@ -727,7 +727,7 @@ static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
 	return __dm_rq_init_rq(set->driver_data, rq);
 }
 
-static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 			  const struct blk_mq_queue_data *bd)
 {
 	struct request *rq = bd->rq;
@@ -744,7 +744,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 	}
 
 	if (ti->type->busy && ti->type->busy(ti))
-		return BLK_MQ_RQ_QUEUE_BUSY;
+		return BLK_STS_RESOURCE;
 
 	dm_start_request(md, rq);
 
@@ -762,10 +762,10 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 		rq_end_stats(md, rq);
 		rq_completed(md, rq_data_dir(rq), false);
 		blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
-		return BLK_MQ_RQ_QUEUE_BUSY;
+		return BLK_STS_RESOURCE;
 	}
 
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 }
 
 static const struct blk_mq_ops dm_mq_ops = {
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index f0020d2..9813922 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -24,7 +24,7 @@ struct dm_rq_target_io {
 	struct dm_target *ti;
 	struct request *orig, *clone;
 	struct kthread_work work;
-	int error;
+	blk_status_t error;
 	union map_info info;
 	struct dm_stats_aux stats_aux;
 	unsigned long duration_jiffies;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index b93476c..c5534d2 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store,
 	/*
 	 * Commit exceptions to disk.
 	 */
-	if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
+	if (ps->valid && area_io(ps, REQ_OP_WRITE,
+				 REQ_PREFLUSH | REQ_FUA | REQ_SYNC))
 		ps->valid = 0;
 
 	/*
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index e152d98..1ba4104 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1590,7 +1590,7 @@ static void full_bio_end_io(struct bio *bio)
 {
 	void *callback_data = bio->bi_private;
 
-	dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
+	dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
 }
 
 static void start_full_bio(struct dm_snap_pending_exception *pe,
@@ -1690,7 +1690,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 	/* Full snapshots are not usable */
 	/* To get here the table must be live so s->active is always set. */
 	if (!s->valid)
-		return -EIO;
+		return DM_MAPIO_KILL;
 
 	/* FIXME: should only take write lock if we need
 	 * to copy an exception */
@@ -1698,7 +1698,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
 	if (!s->valid || (unlikely(s->snapshot_overflowed) &&
 	    bio_data_dir(bio) == WRITE)) {
-		r = -EIO;
+		r = DM_MAPIO_KILL;
 		goto out_unlock;
 	}
 
@@ -1723,7 +1723,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
 			if (!s->valid || s->snapshot_overflowed) {
 				free_pending_exception(pe);
-				r = -EIO;
+				r = DM_MAPIO_KILL;
 				goto out_unlock;
 			}
 
@@ -1741,7 +1741,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 					DMERR("Snapshot overflowed: Unable to allocate exception.");
 				} else
 					__invalidate_snapshot(s, -ENOMEM);
-				r = -EIO;
+				r = DM_MAPIO_KILL;
 				goto out_unlock;
 			}
 		}
@@ -1851,14 +1851,15 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
 	return r;
 }
 
-static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
+		blk_status_t *error)
 {
 	struct dm_snapshot *s = ti->private;
 
 	if (is_bio_tracked(bio))
 		stop_tracking_chunk(s, bio);
 
-	return 0;
+	return DM_ENDIO_DONE;
 }
 
 static void snapshot_merge_presuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 7515248..11621a0 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -375,20 +375,21 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
 	}
 }
 
-static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int stripe_end_io(struct dm_target *ti, struct bio *bio,
+		blk_status_t *error)
 {
 	unsigned i;
 	char major_minor[16];
 	struct stripe_c *sc = ti->private;
 
-	if (!error)
-		return 0; /* I/O complete */
+	if (!*error)
+		return DM_ENDIO_DONE; /* I/O complete */
 
-	if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
-		return error;
+	if (bio->bi_opf & REQ_RAHEAD)
+		return DM_ENDIO_DONE;
 
-	if (error == -EOPNOTSUPP)
-		return error;
+	if (*error == BLK_STS_NOTSUPP)
+		return DM_ENDIO_DONE;
 
 	memset(major_minor, 0, sizeof(major_minor));
 	sprintf(major_minor, "%d:%d",
@@ -409,7 +410,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
 				schedule_work(&sc->trigger_event);
 		}
 
-	return error;
+	return DM_ENDIO_DONE;
 }
 
 static int stripe_iterate_devices(struct dm_target *ti,
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index b242b75..c0d7e60 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -128,7 +128,7 @@ static void io_err_dtr(struct dm_target *tt)
 
 static int io_err_map(struct dm_target *tt, struct bio *bio)
 {
-	return -EIO;
+	return DM_MAPIO_KILL;
 }
 
 static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 17ad50d..9dec2f8 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -383,8 +383,8 @@ static void end_discard(struct discard_op *op, int r)
 	 * Even if r is set, there could be sub discards in flight that we
 	 * need to wait for.
 	 */
-	if (r && !op->parent_bio->bi_error)
-		op->parent_bio->bi_error = r;
+	if (r && !op->parent_bio->bi_status)
+		op->parent_bio->bi_status = errno_to_blk_status(r);
 	bio_endio(op->parent_bio);
 }
 
@@ -450,22 +450,20 @@ static void cell_release_no_holder(struct pool *pool,
 }
 
 static void cell_error_with_code(struct pool *pool,
-				 struct dm_bio_prison_cell *cell, int error_code)
+		struct dm_bio_prison_cell *cell, blk_status_t error_code)
 {
 	dm_cell_error(pool->prison, cell, error_code);
 	dm_bio_prison_free_cell(pool->prison, cell);
 }
 
-static int get_pool_io_error_code(struct pool *pool)
+static blk_status_t get_pool_io_error_code(struct pool *pool)
 {
-	return pool->out_of_data_space ? -ENOSPC : -EIO;
+	return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR;
 }
 
 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
 {
-	int error = get_pool_io_error_code(pool);
-
-	cell_error_with_code(pool, cell, error);
+	cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
 }
 
 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
@@ -475,7 +473,7 @@ static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
 
 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
 {
-	cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
+	cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
 }
 
 /*----------------------------------------------------------------*/
@@ -555,17 +553,18 @@ static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
 	bio_list_init(master);
 }
 
-static void error_bio_list(struct bio_list *bios, int error)
+static void error_bio_list(struct bio_list *bios, blk_status_t error)
 {
 	struct bio *bio;
 
 	while ((bio = bio_list_pop(bios))) {
-		bio->bi_error = error;
+		bio->bi_status = error;
 		bio_endio(bio);
 	}
 }
 
-static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
+static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
+		blk_status_t error)
 {
 	struct bio_list bios;
 	unsigned long flags;
@@ -608,11 +607,11 @@ static void requeue_io(struct thin_c *tc)
 	__merge_bio_list(&bios, &tc->retry_on_resume_list);
 	spin_unlock_irqrestore(&tc->lock, flags);
 
-	error_bio_list(&bios, DM_ENDIO_REQUEUE);
+	error_bio_list(&bios, BLK_STS_DM_REQUEUE);
 	requeue_deferred_cells(tc);
 }
 
-static void error_retry_list_with_code(struct pool *pool, int error)
+static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
 {
 	struct thin_c *tc;
 
@@ -624,9 +623,7 @@ static void error_retry_list_with_code(struct pool *pool, int error)
 
 static void error_retry_list(struct pool *pool)
 {
-	int error = get_pool_io_error_code(pool);
-
-	error_retry_list_with_code(pool, error);
+	error_retry_list_with_code(pool, get_pool_io_error_code(pool));
 }
 
 /*
@@ -774,7 +771,7 @@ struct dm_thin_new_mapping {
 	 */
 	atomic_t prepare_actions;
 
-	int err;
+	blk_status_t status;
 	struct thin_c *tc;
 	dm_block_t virt_begin, virt_end;
 	dm_block_t data_block;
@@ -814,7 +811,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
 {
 	struct dm_thin_new_mapping *m = context;
 
-	m->err = read_err || write_err ? -EIO : 0;
+	m->status = read_err || write_err ? BLK_STS_IOERR : 0;
 	complete_mapping_preparation(m);
 }
 
@@ -825,7 +822,7 @@ static void overwrite_endio(struct bio *bio)
 
 	bio->bi_end_io = m->saved_bi_end_io;
 
-	m->err = bio->bi_error;
+	m->status = bio->bi_status;
 	complete_mapping_preparation(m);
 }
 
@@ -925,7 +922,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 	struct bio *bio = m->bio;
 	int r;
 
-	if (m->err) {
+	if (m->status) {
 		cell_error(pool, m->cell);
 		goto out;
 	}
@@ -1094,6 +1091,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
 		return;
 	}
 
+	/*
+	 * Increment the unmapped blocks.  This prevents a race between the
+	 * passdown io and reallocation of freed blocks.
+	 */
+	r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
+	if (r) {
+		metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
+		bio_io_error(m->bio);
+		cell_defer_no_holder(tc, m->cell);
+		mempool_free(m, pool->mapping_pool);
+		return;
+	}
+
 	discard_parent = bio_alloc(GFP_NOIO, 1);
 	if (!discard_parent) {
 		DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
@@ -1114,19 +1124,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
 			end_discard(&op, r);
 		}
 	}
-
-	/*
-	 * Increment the unmapped blocks.  This prevents a race between the
-	 * passdown io and reallocation of freed blocks.
-	 */
-	r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
-	if (r) {
-		metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
-		bio_io_error(m->bio);
-		cell_defer_no_holder(tc, m->cell);
-		mempool_free(m, pool->mapping_pool);
-		return;
-	}
 }
 
 static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
@@ -1495,7 +1492,7 @@ static void retry_on_resume(struct bio *bio)
 	spin_unlock_irqrestore(&tc->lock, flags);
 }
 
-static int should_error_unserviceable_bio(struct pool *pool)
+static blk_status_t should_error_unserviceable_bio(struct pool *pool)
 {
 	enum pool_mode m = get_pool_mode(pool);
 
@@ -1503,27 +1500,27 @@ static int should_error_unserviceable_bio(struct pool *pool)
 	case PM_WRITE:
 		/* Shouldn't get here */
 		DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
-		return -EIO;
+		return BLK_STS_IOERR;
 
 	case PM_OUT_OF_DATA_SPACE:
-		return pool->pf.error_if_no_space ? -ENOSPC : 0;
+		return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
 
 	case PM_READ_ONLY:
 	case PM_FAIL:
-		return -EIO;
+		return BLK_STS_IOERR;
 	default:
 		/* Shouldn't get here */
 		DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
-		return -EIO;
+		return BLK_STS_IOERR;
 	}
 }
 
 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
 {
-	int error = should_error_unserviceable_bio(pool);
+	blk_status_t error = should_error_unserviceable_bio(pool);
 
 	if (error) {
-		bio->bi_error = error;
+		bio->bi_status = error;
 		bio_endio(bio);
 	} else
 		retry_on_resume(bio);
@@ -1533,7 +1530,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
 {
 	struct bio *bio;
 	struct bio_list bios;
-	int error;
+	blk_status_t error;
 
 	error = should_error_unserviceable_bio(pool);
 	if (error) {
@@ -2071,7 +2068,8 @@ static void process_thin_deferred_bios(struct thin_c *tc)
 	unsigned count = 0;
 
 	if (tc->requeue_mode) {
-		error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
+		error_thin_bio_list(tc, &tc->deferred_bio_list,
+				BLK_STS_DM_REQUEUE);
 		return;
 	}
 
@@ -2322,7 +2320,7 @@ static void do_no_space_timeout(struct work_struct *ws)
 	if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
 		pool->pf.error_if_no_space = true;
 		notify_of_pool_mode_change_to_oods(pool);
-		error_retry_list_with_code(pool, -ENOSPC);
+		error_retry_list_with_code(pool, BLK_STS_NOSPC);
 	}
 }
 
@@ -2624,7 +2622,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
 	thin_hook_bio(tc, bio);
 
 	if (tc->requeue_mode) {
-		bio->bi_error = DM_ENDIO_REQUEUE;
+		bio->bi_status = BLK_STS_DM_REQUEUE;
 		bio_endio(bio);
 		return DM_MAPIO_SUBMITTED;
 	}
@@ -4177,7 +4175,8 @@ static int thin_map(struct dm_target *ti, struct bio *bio)
 	return thin_bio_map(ti, bio);
 }
 
-static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
+static int thin_endio(struct dm_target *ti, struct bio *bio,
+		blk_status_t *err)
 {
 	unsigned long flags;
 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -4212,7 +4211,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
 	if (h->cell)
 		cell_defer_no_holder(h->tc, h->cell);
 
-	return 0;
+	return DM_ENDIO_DONE;
 }
 
 static void thin_presuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 97de961..b46705e 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
 		return r;
 	}
 
-	if (likely(v->version >= 1))
+	if (likely(v->salt_size && (v->version >= 1)))
 		r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
 	return r;
@@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
 {
 	int r;
 
-	if (unlikely(!v->version)) {
+	if (unlikely(v->salt_size && (!v->version))) {
 		r = verity_hash_update(v, req, v->salt, v->salt_size, res);
 
 		if (r < 0) {
@@ -538,13 +538,13 @@ static int verity_verify_io(struct dm_verity_io *io)
 /*
  * End one "io" structure with a given error.
  */
-static void verity_finish_io(struct dm_verity_io *io, int error)
+static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
 {
 	struct dm_verity *v = io->v;
 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
 
 	bio->bi_end_io = io->orig_bi_end_io;
-	bio->bi_error = error;
+	bio->bi_status = status;
 
 	verity_fec_finish_io(io);
 
@@ -555,15 +555,15 @@ static void verity_work(struct work_struct *w)
 {
 	struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
 
-	verity_finish_io(io, verity_verify_io(io));
+	verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
 }
 
 static void verity_end_io(struct bio *bio)
 {
 	struct dm_verity_io *io = bio->bi_private;
 
-	if (bio->bi_error && !verity_fec_is_enabled(io->v)) {
-		verity_finish_io(io, bio->bi_error);
+	if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
+		verity_finish_io(io, bio->bi_status);
 		return;
 	}
 
@@ -643,17 +643,17 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
 	if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
 	    ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
 		DMERR_LIMIT("unaligned io");
-		return -EIO;
+		return DM_MAPIO_KILL;
 	}
 
 	if (bio_end_sector(bio) >>
 	    (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
 		DMERR_LIMIT("io out of range");
-		return -EIO;
+		return DM_MAPIO_KILL;
 	}
 
 	if (bio_data_dir(bio) == WRITE)
-		return -EIO;
+		return DM_MAPIO_KILL;
 
 	io = dm_per_bio_data(bio, ti->per_io_data_size);
 	io->v = v;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index b616f11..b65ca8d 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -39,7 +39,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
 	case REQ_OP_READ:
 		if (bio->bi_opf & REQ_RAHEAD) {
 			/* readahead of null bytes only wastes buffer cache */
-			return -EIO;
+			return DM_MAPIO_KILL;
 		}
 		zero_fill_bio(bio);
 		break;
@@ -47,7 +47,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
 		/* writes get silently dropped */
 		break;
 	default:
-		return -EIO;
+		return DM_MAPIO_KILL;
 	}
 
 	bio_endio(bio);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6ef9500..4029460 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -63,7 +63,7 @@ static struct workqueue_struct *deferred_remove_workqueue;
  */
 struct dm_io {
 	struct mapped_device *md;
-	int error;
+	blk_status_t status;
 	atomic_t io_count;
 	struct bio *bio;
 	unsigned long start_time;
@@ -768,23 +768,24 @@ static int __noflush_suspending(struct mapped_device *md)
  * Decrements the number of outstanding ios that a bio has been
  * cloned into, completing the original io if necc.
  */
-static void dec_pending(struct dm_io *io, int error)
+static void dec_pending(struct dm_io *io, blk_status_t error)
 {
 	unsigned long flags;
-	int io_error;
+	blk_status_t io_error;
 	struct bio *bio;
 	struct mapped_device *md = io->md;
 
 	/* Push-back supersedes any I/O errors */
 	if (unlikely(error)) {
 		spin_lock_irqsave(&io->endio_lock, flags);
-		if (!(io->error > 0 && __noflush_suspending(md)))
-			io->error = error;
+		if (!(io->status == BLK_STS_DM_REQUEUE &&
+				__noflush_suspending(md)))
+			io->status = error;
 		spin_unlock_irqrestore(&io->endio_lock, flags);
 	}
 
 	if (atomic_dec_and_test(&io->io_count)) {
-		if (io->error == DM_ENDIO_REQUEUE) {
+		if (io->status == BLK_STS_DM_REQUEUE) {
 			/*
 			 * Target requested pushing back the I/O.
 			 */
@@ -793,16 +794,16 @@ static void dec_pending(struct dm_io *io, int error)
 				bio_list_add_head(&md->deferred, io->bio);
 			else
 				/* noflush suspend was interrupted. */
-				io->error = -EIO;
+				io->status = BLK_STS_IOERR;
 			spin_unlock_irqrestore(&md->deferred_lock, flags);
 		}
 
-		io_error = io->error;
+		io_error = io->status;
 		bio = io->bio;
 		end_io_acct(io);
 		free_io(md, io);
 
-		if (io_error == DM_ENDIO_REQUEUE)
+		if (io_error == BLK_STS_DM_REQUEUE)
 			return;
 
 		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
@@ -814,7 +815,7 @@ static void dec_pending(struct dm_io *io, int error)
 			queue_io(md, bio);
 		} else {
 			/* done with normal IO or empty flush */
-			bio->bi_error = io_error;
+			bio->bi_status = io_error;
 			bio_endio(bio);
 		}
 	}
@@ -838,31 +839,13 @@ void disable_write_zeroes(struct mapped_device *md)
 
 static void clone_endio(struct bio *bio)
 {
-	int error = bio->bi_error;
-	int r = error;
+	blk_status_t error = bio->bi_status;
 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
 	struct dm_io *io = tio->io;
 	struct mapped_device *md = tio->io->md;
 	dm_endio_fn endio = tio->ti->type->end_io;
 
-	if (endio) {
-		r = endio(tio->ti, bio, error);
-		if (r < 0 || r == DM_ENDIO_REQUEUE)
-			/*
-			 * error and requeue request are handled
-			 * in dec_pending().
-			 */
-			error = r;
-		else if (r == DM_ENDIO_INCOMPLETE)
-			/* The target will handle the io */
-			return;
-		else if (r) {
-			DMWARN("unimplemented target endio return value: %d", r);
-			BUG();
-		}
-	}
-
-	if (unlikely(r == -EREMOTEIO)) {
+	if (unlikely(error == BLK_STS_TARGET)) {
 		if (bio_op(bio) == REQ_OP_WRITE_SAME &&
 		    !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
 			disable_write_same(md);
@@ -871,6 +854,23 @@ static void clone_endio(struct bio *bio)
 			disable_write_zeroes(md);
 	}
 
+	if (endio) {
+		int r = endio(tio->ti, bio, &error);
+		switch (r) {
+		case DM_ENDIO_REQUEUE:
+			error = BLK_STS_DM_REQUEUE;
+			/*FALLTHRU*/
+		case DM_ENDIO_DONE:
+			break;
+		case DM_ENDIO_INCOMPLETE:
+			/* The target will handle the io */
+			return;
+		default:
+			DMWARN("unimplemented target endio return value: %d", r);
+			BUG();
+		}
+	}
+
 	free_tio(tio);
 	dec_pending(io, error);
 }
@@ -1036,7 +1036,8 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
 
 		while ((bio = bio_list_pop(&list))) {
 			struct bio_set *bs = bio->bi_pool;
-			if (unlikely(!bs) || bs == fs_bio_set) {
+			if (unlikely(!bs) || bs == fs_bio_set ||
+			    !bs->rescue_workqueue) {
 				bio_list_add(&current->bio_list[i], bio);
 				continue;
 			}
@@ -1084,18 +1085,24 @@ static void __map_bio(struct dm_target_io *tio)
 	r = ti->type->map(ti, clone);
 	dm_offload_end(&o);
 
-	if (r == DM_MAPIO_REMAPPED) {
+	switch (r) {
+	case DM_MAPIO_SUBMITTED:
+		break;
+	case DM_MAPIO_REMAPPED:
 		/* the bio has been remapped so dispatch it */
-
 		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
 				      tio->io->bio->bi_bdev->bd_dev, sector);
-
 		generic_make_request(clone);
-	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
-		/* error the io and bail out, or requeue it if needed */
-		dec_pending(tio->io, r);
+		break;
+	case DM_MAPIO_KILL:
+		dec_pending(tio->io, BLK_STS_IOERR);
 		free_tio(tio);
-	} else if (r != DM_MAPIO_SUBMITTED) {
+		break;
+	case DM_MAPIO_REQUEUE:
+		dec_pending(tio->io, BLK_STS_DM_REQUEUE);
+		free_tio(tio);
+		break;
+	default:
 		DMWARN("unimplemented target map return value: %d", r);
 		BUG();
 	}
@@ -1360,7 +1367,7 @@ static void __split_and_process_bio(struct mapped_device *md,
 	ci.map = map;
 	ci.md = md;
 	ci.io = alloc_io(md);
-	ci.io->error = 0;
+	ci.io->status = 0;
 	atomic_set(&ci.io->io_count, 1);
 	ci.io->bio = bio;
 	ci.io->md = md;
@@ -1527,7 +1534,6 @@ void dm_init_normal_md_queue(struct mapped_device *md)
 	 * Initialize aspects of queue that aren't relevant for blk-mq
 	 */
 	md->queue->backing_dev_info->congested_fn = dm_any_congested;
-	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
 }
 
 static void cleanup_mapped_device(struct mapped_device *md)
@@ -1657,7 +1663,7 @@ static struct mapped_device *alloc_dev(int minor)
 
 	bio_init(&md->flush_bio, NULL, 0);
 	md->flush_bio.bi_bdev = md->bdev;
-	md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+	md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
 
 	dm_stats_init(&md->stats);
 
@@ -2654,7 +2660,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
 		BUG();
 	}
 
-	pools->bs = bioset_create_nobvec(pool_size, front_pad);
+	pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER);
 	if (!pools->bs)
 		goto out;
 
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 7299ce2..03082e1 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1311,8 +1311,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
 	cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
 	lock_comm(cinfo, 1);
 	ret = __sendmsg(cinfo, &cmsg);
-	if (ret)
+	if (ret) {
+		unlock_comm(cinfo);
 		return ret;
+	}
 	cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
 	ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
 	cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 10367ff..31bcbfb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -185,7 +185,7 @@ static int start_readonly;
 static bool create_on_open = true;
 
 /* bio_clone_mddev
- * like bio_clone, but with a local bio set
+ * like bio_clone_bioset, but with a local bio set
  */
 
 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
@@ -265,7 +265,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
 	unsigned int sectors;
 	int cpu;
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	if (mddev == NULL || mddev->pers == NULL) {
 		bio_io_error(bio);
@@ -273,7 +273,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
 	}
 	if (mddev->ro == 1 && unlikely(rw == WRITE)) {
 		if (bio_sectors(bio) != 0)
-			bio->bi_error = -EROFS;
+			bio->bi_status = BLK_STS_IOERR;
 		bio_endio(bio);
 		return BLK_QC_T_NONE;
 	}
@@ -719,8 +719,8 @@ static void super_written(struct bio *bio)
 	struct md_rdev *rdev = bio->bi_private;
 	struct mddev *mddev = rdev->mddev;
 
-	if (bio->bi_error) {
-		pr_err("md: super_written gets error=%d\n", bio->bi_error);
+	if (bio->bi_status) {
+		pr_err("md: super_written gets error=%d\n", bio->bi_status);
 		md_error(mddev, rdev);
 		if (!test_bit(Faulty, &rdev->flags)
 		    && (bio->bi_opf & MD_FAILFAST)) {
@@ -765,7 +765,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
 	    test_bit(FailFast, &rdev->flags) &&
 	    !test_bit(LastDev, &rdev->flags))
 		ff = MD_FAILFAST;
-	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
+	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
 
 	atomic_inc(&mddev->pending_writes);
 	submit_bio(bio);
@@ -801,7 +801,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
 
 	submit_bio_wait(bio);
 
-	ret = !bio->bi_error;
+	ret = !bio->bi_status;
 	bio_put(bio);
 	return ret;
 }
@@ -825,7 +825,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
 	return -EINVAL;
 }
 
-static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
+static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 {
 	return	sb1->set_uuid0 == sb2->set_uuid0 &&
 		sb1->set_uuid1 == sb2->set_uuid1 &&
@@ -833,7 +833,7 @@ static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 		sb1->set_uuid3 == sb2->set_uuid3;
 }
 
-static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
+static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 {
 	int ret;
 	mdp_super_t *tmp1, *tmp2;
@@ -1025,12 +1025,12 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
 	} else {
 		__u64 ev1, ev2;
 		mdp_super_t *refsb = page_address(refdev->sb_page);
-		if (!uuid_equal(refsb, sb)) {
+		if (!md_uuid_equal(refsb, sb)) {
 			pr_warn("md: %s has different UUID to %s\n",
 				b, bdevname(refdev->bdev,b2));
 			goto abort;
 		}
-		if (!sb_equal(refsb, sb)) {
+		if (!md_sb_equal(refsb, sb)) {
 			pr_warn("md: %s has same UUID but different superblock to %s\n",
 				b, bdevname(refdev->bdev, b2));
 			goto abort;
@@ -5174,6 +5174,18 @@ static void mddev_delayed_delete(struct work_struct *ws)
 
 static void no_op(struct percpu_ref *r) {}
 
+int mddev_init_writes_pending(struct mddev *mddev)
+{
+	if (mddev->writes_pending.percpu_count_ptr)
+		return 0;
+	if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
+		return -ENOMEM;
+	/* We want to start with the refcount at zero */
+	percpu_ref_put(&mddev->writes_pending);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
+
 static int md_alloc(dev_t dev, char *name)
 {
 	/*
@@ -5239,10 +5251,6 @@ static int md_alloc(dev_t dev, char *name)
 	blk_queue_make_request(mddev->queue, md_make_request);
 	blk_set_stacking_limits(&mddev->queue->limits);
 
-	if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
-		goto abort;
-	/* We want to start with the refcount at zero */
-	percpu_ref_put(&mddev->writes_pending);
 	disk = alloc_disk(1 << shift);
 	if (!disk) {
 		blk_cleanup_queue(mddev->queue);
@@ -5420,7 +5428,7 @@ int md_run(struct mddev *mddev)
 	}
 
 	if (mddev->bio_set == NULL) {
-		mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
+		mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 		if (!mddev->bio_set)
 			return -ENOMEM;
 	}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 11f1514..0fa1de4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -648,6 +648,7 @@ extern void md_unregister_thread(struct md_thread **threadp);
 extern void md_wakeup_thread(struct md_thread *thread);
 extern void md_check_recovery(struct mddev *mddev);
 extern void md_reap_sync_thread(struct mddev *mddev);
+extern int mddev_init_writes_pending(struct mddev *mddev);
 extern void md_write_start(struct mddev *mddev, struct bio *bi);
 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
 extern void md_write_end(struct mddev *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index e95d521..68d036e 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -73,12 +73,12 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
  * operation and are ready to return a success/failure code to the buffer
  * cache layer.
  */
-static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
+static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status)
 {
 	struct bio *bio = mp_bh->master_bio;
 	struct mpconf *conf = mp_bh->mddev->private;
 
-	bio->bi_error = err;
+	bio->bi_status = status;
 	bio_endio(bio);
 	mempool_free(mp_bh, conf->pool);
 }
@@ -89,7 +89,7 @@ static void multipath_end_request(struct bio *bio)
 	struct mpconf *conf = mp_bh->mddev->private;
 	struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
 
-	if (!bio->bi_error)
+	if (!bio->bi_status)
 		multipath_end_bh_io(mp_bh, 0);
 	else if (!(bio->bi_opf & REQ_RAHEAD)) {
 		/*
@@ -102,7 +102,7 @@ static void multipath_end_request(struct bio *bio)
 			(unsigned long long)bio->bi_iter.bi_sector);
 		multipath_reschedule_retry(mp_bh);
 	} else
-		multipath_end_bh_io(mp_bh, bio->bi_error);
+		multipath_end_bh_io(mp_bh, bio->bi_status);
 	rdev_dec_pending(rdev, conf->mddev);
 }
 
@@ -347,7 +347,7 @@ static void multipathd(struct md_thread *thread)
 			pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
 			       bdevname(bio->bi_bdev,b),
 			       (unsigned long long)bio->bi_iter.bi_sector);
-			multipath_end_bh_io(mp_bh, -EIO);
+			multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
 		} else {
 			pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
 			       bdevname(bio->bi_bdev,b),
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index af5056d..98ca2c1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -277,7 +277,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
 	struct r1conf *conf = r1_bio->mddev->private;
 
 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
-		bio->bi_error = -EIO;
+		bio->bi_status = BLK_STS_IOERR;
 
 	bio_endio(bio);
 	/*
@@ -335,7 +335,7 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
 
 static void raid1_end_read_request(struct bio *bio)
 {
-	int uptodate = !bio->bi_error;
+	int uptodate = !bio->bi_status;
 	struct r1bio *r1_bio = bio->bi_private;
 	struct r1conf *conf = r1_bio->mddev->private;
 	struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
@@ -426,12 +426,12 @@ static void raid1_end_write_request(struct bio *bio)
 	struct md_rdev *rdev = conf->mirrors[mirror].rdev;
 	bool discard_error;
 
-	discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
+	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
 	/*
 	 * 'one mirror IO has finished' event handler:
 	 */
-	if (bio->bi_error && !discard_error) {
+	if (bio->bi_status && !discard_error) {
 		set_bit(WriteErrorSeen,	&rdev->flags);
 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
 			set_bit(MD_RECOVERY_NEEDED, &
@@ -802,7 +802,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
 		bio->bi_next = NULL;
 		bio->bi_bdev = rdev->bdev;
 		if (test_bit(Faulty, &rdev->flags)) {
-			bio->bi_error = -EIO;
+			bio->bi_status = BLK_STS_IOERR;
 			bio_endio(bio);
 		} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
 				    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1856,7 +1856,7 @@ static void end_sync_read(struct bio *bio)
 	 * or re-read if the read failed.
 	 * We don't do much here, just schedule handling by raid1d
 	 */
-	if (!bio->bi_error)
+	if (!bio->bi_status)
 		set_bit(R1BIO_Uptodate, &r1_bio->state);
 
 	if (atomic_dec_and_test(&r1_bio->remaining))
@@ -1865,7 +1865,7 @@ static void end_sync_read(struct bio *bio)
 
 static void end_sync_write(struct bio *bio)
 {
-	int uptodate = !bio->bi_error;
+	int uptodate = !bio->bi_status;
 	struct r1bio *r1_bio = get_resync_r1bio(bio);
 	struct mddev *mddev = r1_bio->mddev;
 	struct r1conf *conf = mddev->private;
@@ -2058,7 +2058,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
 		idx ++;
 	}
 	set_bit(R1BIO_Uptodate, &r1_bio->state);
-	bio->bi_error = 0;
+	bio->bi_status = 0;
 	return 1;
 }
 
@@ -2082,16 +2082,16 @@ static void process_checks(struct r1bio *r1_bio)
 	for (i = 0; i < conf->raid_disks * 2; i++) {
 		int j;
 		int size;
-		int error;
+		blk_status_t status;
 		struct bio_vec *bi;
 		struct bio *b = r1_bio->bios[i];
 		struct resync_pages *rp = get_resync_pages(b);
 		if (b->bi_end_io != end_sync_read)
 			continue;
 		/* fixup the bio for reuse, but preserve errno */
-		error = b->bi_error;
+		status = b->bi_status;
 		bio_reset(b);
-		b->bi_error = error;
+		b->bi_status = status;
 		b->bi_vcnt = vcnt;
 		b->bi_iter.bi_size = r1_bio->sectors << 9;
 		b->bi_iter.bi_sector = r1_bio->sector +
@@ -2113,7 +2113,7 @@ static void process_checks(struct r1bio *r1_bio)
 	}
 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
-		    !r1_bio->bios[primary]->bi_error) {
+		    !r1_bio->bios[primary]->bi_status) {
 			r1_bio->bios[primary]->bi_end_io = NULL;
 			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
 			break;
@@ -2123,7 +2123,7 @@ static void process_checks(struct r1bio *r1_bio)
 		int j;
 		struct bio *pbio = r1_bio->bios[primary];
 		struct bio *sbio = r1_bio->bios[i];
-		int error = sbio->bi_error;
+		blk_status_t status = sbio->bi_status;
 		struct page **ppages = get_resync_pages(pbio)->pages;
 		struct page **spages = get_resync_pages(sbio)->pages;
 		struct bio_vec *bi;
@@ -2132,12 +2132,12 @@ static void process_checks(struct r1bio *r1_bio)
 		if (sbio->bi_end_io != end_sync_read)
 			continue;
 		/* Now we can 'fixup' the error value */
-		sbio->bi_error = 0;
+		sbio->bi_status = 0;
 
 		bio_for_each_segment_all(bi, sbio, j)
 			page_len[j] = bi->bv_len;
 
-		if (!error) {
+		if (!status) {
 			for (j = vcnt; j-- ; ) {
 				if (memcmp(page_address(ppages[j]),
 					   page_address(spages[j]),
@@ -2149,7 +2149,7 @@ static void process_checks(struct r1bio *r1_bio)
 		if (j >= 0)
 			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
 		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
-			      && !error)) {
+			      && !status)) {
 			/* No need to write to this device. */
 			sbio->bi_end_io = NULL;
 			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2400,11 +2400,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
 		struct bio *bio = r1_bio->bios[m];
 		if (bio->bi_end_io == NULL)
 			continue;
-		if (!bio->bi_error &&
+		if (!bio->bi_status &&
 		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
 			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
 		}
-		if (bio->bi_error &&
+		if (bio->bi_status &&
 		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
 			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
 				md_error(conf->mddev, rdev);
@@ -2955,7 +2955,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 	if (!conf->r1bio_pool)
 		goto abort;
 
-	conf->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+	conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
 	if (!conf->bio_split)
 		goto abort;
 
@@ -3063,6 +3063,8 @@ static int raid1_run(struct mddev *mddev)
 			mdname(mddev));
 		return -EIO;
 	}
+	if (mddev_init_writes_pending(mddev) < 0)
+		return -ENOMEM;
 	/*
 	 * copy the already verified devices into our private RAID1
 	 * bookkeeping area. [whatever we allocate in run(),
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4343d7f..57a250f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -336,7 +336,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
 	struct r10conf *conf = r10_bio->mddev->private;
 
 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
-		bio->bi_error = -EIO;
+		bio->bi_status = BLK_STS_IOERR;
 
 	bio_endio(bio);
 	/*
@@ -389,7 +389,7 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
 
 static void raid10_end_read_request(struct bio *bio)
 {
-	int uptodate = !bio->bi_error;
+	int uptodate = !bio->bi_status;
 	struct r10bio *r10_bio = bio->bi_private;
 	int slot, dev;
 	struct md_rdev *rdev;
@@ -477,7 +477,7 @@ static void raid10_end_write_request(struct bio *bio)
 	struct bio *to_put = NULL;
 	bool discard_error;
 
-	discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
+	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
 	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 
@@ -491,7 +491,7 @@ static void raid10_end_write_request(struct bio *bio)
 	/*
 	 * this branch is our 'one mirror IO has finished' event handler:
 	 */
-	if (bio->bi_error && !discard_error) {
+	if (bio->bi_status && !discard_error) {
 		if (repl)
 			/* Never record new bad blocks to replacement,
 			 * just fail it.
@@ -913,7 +913,7 @@ static void flush_pending_writes(struct r10conf *conf)
 			bio->bi_next = NULL;
 			bio->bi_bdev = rdev->bdev;
 			if (test_bit(Faulty, &rdev->flags)) {
-				bio->bi_error = -EIO;
+				bio->bi_status = BLK_STS_IOERR;
 				bio_endio(bio);
 			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
 					    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1098,7 +1098,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
 		bio->bi_next = NULL;
 		bio->bi_bdev = rdev->bdev;
 		if (test_bit(Faulty, &rdev->flags)) {
-			bio->bi_error = -EIO;
+			bio->bi_status = BLK_STS_IOERR;
 			bio_endio(bio);
 		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
 				    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1888,7 +1888,7 @@ static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
 {
 	struct r10conf *conf = r10_bio->mddev->private;
 
-	if (!bio->bi_error)
+	if (!bio->bi_status)
 		set_bit(R10BIO_Uptodate, &r10_bio->state);
 	else
 		/* The write handler will notice the lack of
@@ -1972,7 +1972,7 @@ static void end_sync_write(struct bio *bio)
 	else
 		rdev = conf->mirrors[d].rdev;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		if (repl)
 			md_error(mddev, rdev);
 		else {
@@ -2021,7 +2021,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 
 	/* find the first device with a block */
 	for (i=0; i<conf->copies; i++)
-		if (!r10_bio->devs[i].bio->bi_error)
+		if (!r10_bio->devs[i].bio->bi_status)
 			break;
 
 	if (i == conf->copies)
@@ -2050,7 +2050,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 		tpages = get_resync_pages(tbio)->pages;
 		d = r10_bio->devs[i].devnum;
 		rdev = conf->mirrors[d].rdev;
-		if (!r10_bio->devs[i].bio->bi_error) {
+		if (!r10_bio->devs[i].bio->bi_status) {
 			/* We know that the bi_io_vec layout is the same for
 			 * both 'first' and 'i', so we just compare them.
 			 * All vec entries are PAGE_SIZE;
@@ -2633,7 +2633,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
 			rdev = conf->mirrors[dev].rdev;
 			if (r10_bio->devs[m].bio == NULL)
 				continue;
-			if (!r10_bio->devs[m].bio->bi_error) {
+			if (!r10_bio->devs[m].bio->bi_status) {
 				rdev_clear_badblocks(
 					rdev,
 					r10_bio->devs[m].addr,
@@ -2649,7 +2649,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
 			if (r10_bio->devs[m].repl_bio == NULL)
 				continue;
 
-			if (!r10_bio->devs[m].repl_bio->bi_error) {
+			if (!r10_bio->devs[m].repl_bio->bi_status) {
 				rdev_clear_badblocks(
 					rdev,
 					r10_bio->devs[m].addr,
@@ -2675,7 +2675,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
 					r10_bio->devs[m].addr,
 					r10_bio->sectors, 0);
 				rdev_dec_pending(rdev, conf->mddev);
-			} else if (bio != NULL && bio->bi_error) {
+			} else if (bio != NULL && bio->bi_status) {
 				fail = true;
 				if (!narrow_write_error(r10_bio, m)) {
 					md_error(conf->mddev, rdev);
@@ -3267,7 +3267,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
 
 			bio = r10_bio->devs[i].bio;
-			bio->bi_error = -EIO;
+			bio->bi_status = BLK_STS_IOERR;
 			rcu_read_lock();
 			rdev = rcu_dereference(conf->mirrors[d].rdev);
 			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
@@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 
 			/* Need to set up for writing to the replacement */
 			bio = r10_bio->devs[i].repl_bio;
-			bio->bi_error = -EIO;
+			bio->bi_status = BLK_STS_IOERR;
 
 			sector = r10_bio->devs[i].addr;
 			bio->bi_next = biolist;
@@ -3375,7 +3375,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 
 		if (bio->bi_end_io == end_sync_read) {
 			md_sync_acct(bio->bi_bdev, nr_sectors);
-			bio->bi_error = 0;
+			bio->bi_status = 0;
 			generic_make_request(bio);
 		}
 	}
@@ -3552,7 +3552,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
 	if (!conf->r10bio_pool)
 		goto out;
 
-	conf->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+	conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
 	if (!conf->bio_split)
 		goto out;
 
@@ -3611,6 +3611,9 @@ static int raid10_run(struct mddev *mddev)
 	int first = 1;
 	bool discard_supported = false;
 
+	if (mddev_init_writes_pending(mddev) < 0)
+		return -ENOMEM;
+
 	if (mddev->private == NULL) {
 		conf = setup_conf(mddev);
 		if (IS_ERR(conf))
@@ -4394,7 +4397,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
 	read_bio->bi_end_io = end_reshape_read;
 	bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
 	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
-	read_bio->bi_error = 0;
+	read_bio->bi_status = 0;
 	read_bio->bi_vcnt = 0;
 	read_bio->bi_iter.bi_size = 0;
 	r10_bio->master_bio = read_bio;
@@ -4638,7 +4641,7 @@ static void end_reshape_write(struct bio *bio)
 		rdev = conf->mirrors[d].rdev;
 	}
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		/* FIXME should record badblock */
 		md_error(mddev, rdev);
 	}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 4c00bc2..bfa1e90 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -572,7 +572,7 @@ static void r5l_log_endio(struct bio *bio)
 	struct r5l_log *log = io->log;
 	unsigned long flags;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		md_error(log->rdev->mddev, log->rdev);
 
 	bio_put(bio);
@@ -1247,7 +1247,7 @@ static void r5l_log_flush_endio(struct bio *bio)
 	unsigned long flags;
 	struct r5l_io_unit *io;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		md_error(log->rdev->mddev, log->rdev);
 
 	spin_lock_irqsave(&log->io_list_lock, flags);
@@ -1782,7 +1782,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
 	mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
 					     mb, PAGE_SIZE));
 	if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
-			  REQ_FUA, false)) {
+			  REQ_SYNC | REQ_FUA, false)) {
 		__free_page(page);
 		return -EIO;
 	}
@@ -2388,7 +2388,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
 		mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
 						     mb, PAGE_SIZE));
 		sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
-			     REQ_OP_WRITE, REQ_FUA, false);
+			     REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false);
 		sh->log_start = ctx->pos;
 		list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
 		atomic_inc(&log->stripe_in_journal_count);
@@ -3063,7 +3063,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 	if (!log->io_pool)
 		goto io_pool;
 
-	log->bs = bioset_create(R5L_POOL_SIZE, 0);
+	log->bs = bioset_create(R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 	if (!log->bs)
 		goto io_bs;
 
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 5d25beb..77cce35 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -397,7 +397,7 @@ static void ppl_log_endio(struct bio *bio)
 
 	pr_debug("%s: seq: %llu\n", __func__, io->seq);
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		md_error(ppl_conf->mddev, log->rdev);
 
 	list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
@@ -907,8 +907,8 @@ static int ppl_write_empty_header(struct ppl_log *log)
 	pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
 
 	if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
-			  PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0,
-			  false)) {
+			  PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
+			  REQ_FUA, 0, false)) {
 		md_error(rdev->mddev, rdev);
 		ret = -EIO;
 	}
@@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
 		goto err;
 	}
 
-	ppl_conf->bs = bioset_create(conf->raid_disks, 0);
+	ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0);
 	if (!ppl_conf->bs) {
 		ret = -ENOMEM;
 		goto err;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9c4f765..62c965b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2476,7 +2476,7 @@ static void raid5_end_read_request(struct bio * bi)
 
 	pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
-		bi->bi_error);
+		bi->bi_status);
 	if (i == disks) {
 		bio_reset(bi);
 		BUG();
@@ -2496,7 +2496,7 @@ static void raid5_end_read_request(struct bio * bi)
 		s = sh->sector + rdev->new_data_offset;
 	else
 		s = sh->sector + rdev->data_offset;
-	if (!bi->bi_error) {
+	if (!bi->bi_status) {
 		set_bit(R5_UPTODATE, &sh->dev[i].flags);
 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
 			/* Note that this cannot happen on a
@@ -2613,7 +2613,7 @@ static void raid5_end_write_request(struct bio *bi)
 	}
 	pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
-		bi->bi_error);
+		bi->bi_status);
 	if (i == disks) {
 		bio_reset(bi);
 		BUG();
@@ -2621,14 +2621,14 @@ static void raid5_end_write_request(struct bio *bi)
 	}
 
 	if (replacement) {
-		if (bi->bi_error)
+		if (bi->bi_status)
 			md_error(conf->mddev, rdev);
 		else if (is_badblock(rdev, sh->sector,
 				     STRIPE_SECTORS,
 				     &first_bad, &bad_sectors))
 			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
 	} else {
-		if (bi->bi_error) {
+		if (bi->bi_status) {
 			set_bit(STRIPE_DEGRADED, &sh->state);
 			set_bit(WriteErrorSeen, &rdev->flags);
 			set_bit(R5_WriteError, &sh->dev[i].flags);
@@ -2649,7 +2649,7 @@ static void raid5_end_write_request(struct bio *bi)
 	}
 	rdev_dec_pending(rdev, conf->mddev);
 
-	if (sh->batch_head && bi->bi_error && !replacement)
+	if (sh->batch_head && bi->bi_status && !replacement)
 		set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
 	bio_reset(bi);
@@ -3381,7 +3381,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
 			sh->dev[i].sector + STRIPE_SECTORS) {
 			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
 
-			bi->bi_error = -EIO;
+			bi->bi_status = BLK_STS_IOERR;
 			md_write_end(conf->mddev);
 			bio_endio(bi);
 			bi = nextbi;
@@ -3403,7 +3403,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
 		       sh->dev[i].sector + STRIPE_SECTORS) {
 			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
 
-			bi->bi_error = -EIO;
+			bi->bi_status = BLK_STS_IOERR;
 			md_write_end(conf->mddev);
 			bio_endio(bi);
 			bi = bi2;
@@ -3429,7 +3429,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
 				struct bio *nextbi =
 					r5_next_bio(bi, sh->dev[i].sector);
 
-				bi->bi_error = -EIO;
+				bi->bi_status = BLK_STS_IOERR;
 				bio_endio(bi);
 				bi = nextbi;
 			}
@@ -4085,10 +4085,15 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
 			set_bit(STRIPE_INSYNC, &sh->state);
 		else {
 			atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
 				/* don't try to repair!! */
 				set_bit(STRIPE_INSYNC, &sh->state);
-			else {
+				pr_warn_ratelimited("%s: mismatch sector in range "
+						    "%llu-%llu\n", mdname(conf->mddev),
+						    (unsigned long long) sh->sector,
+						    (unsigned long long) sh->sector +
+						    STRIPE_SECTORS);
+			} else {
 				sh->check_state = check_state_compute_run;
 				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
 				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
@@ -4237,10 +4242,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
 			}
 		} else {
 			atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
-			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
 				/* don't try to repair!! */
 				set_bit(STRIPE_INSYNC, &sh->state);
-			else {
+				pr_warn_ratelimited("%s: mismatch sector in range "
+						    "%llu-%llu\n", mdname(conf->mddev),
+						    (unsigned long long) sh->sector,
+						    (unsigned long long) sh->sector +
+						    STRIPE_SECTORS);
+			} else {
 				int *target = &sh->ops.target;
 
 				sh->ops.target = -1;
@@ -5144,7 +5154,7 @@ static void raid5_align_endio(struct bio *bi)
 	struct mddev *mddev;
 	struct r5conf *conf;
 	struct md_rdev *rdev;
-	int error = bi->bi_error;
+	blk_status_t error = bi->bi_status;
 
 	bio_put(bi);
 
@@ -5721,7 +5731,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 			release_stripe_plug(mddev, sh);
 		} else {
 			/* cannot get stripe for read-ahead, just give-up */
-			bi->bi_error = -EIO;
+			bi->bi_status = BLK_STS_IOERR;
 			break;
 		}
 	}
@@ -6933,7 +6943,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 			goto abort;
 	}
 
-	conf->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+	conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
 	if (!conf->bio_split)
 		goto abort;
 	conf->mddev = mddev;
@@ -7108,6 +7118,9 @@ static int raid5_run(struct mddev *mddev)
 	long long min_offset_diff = 0;
 	int first = 1;
 
+	if (mddev_init_writes_pending(mddev) < 0)
+		return -ENOMEM;
+
 	if (mddev->recovery_cp != MaxSector)
 		pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
 			  mdname(mddev));
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index b72edd2..55d9c2b 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -2,6 +2,12 @@
 # Multimedia device configuration
 #
 
+config CEC_CORE
+	tristate
+
+config CEC_NOTIFIER
+	bool
+
 menuconfig MEDIA_SUPPORT
 	tristate "Multimedia support"
 	depends on HAS_IOMEM
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 523fea3..044503a 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -4,8 +4,6 @@
 
 media-objs	:= media-device.o media-devnode.o media-entity.o
 
-obj-$(CONFIG_CEC_CORE) += cec/
-
 #
 # I2C drivers should come before other drivers, otherwise they'll fail
 # when compiled as builtin drivers
@@ -26,6 +24,8 @@
 # There are both core and drivers at RC subtree - merge before drivers
 obj-y += rc/
 
+obj-$(CONFIG_CEC_CORE) += cec/
+
 #
 # Finally, merge the drivers that require the core
 #
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index f944d93..43428cec 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -1,19 +1,6 @@
-config CEC_CORE
-	tristate
-	depends on MEDIA_CEC_SUPPORT
-	default y
-
-config MEDIA_CEC_NOTIFIER
-	bool
-
 config MEDIA_CEC_RC
 	bool "HDMI CEC RC integration"
 	depends on CEC_CORE && RC_CORE
+	depends on CEC_CORE=m || RC_CORE=y
 	---help---
 	  Pass on CEC remote control messages to the RC framework.
-
-config MEDIA_CEC_DEBUG
-	bool "HDMI CEC debugfs interface"
-	depends on CEC_CORE && DEBUG_FS
-	---help---
-	  Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index 402a6c6..eaf408e 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,6 +1,6 @@
 cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
 
-ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y)
+ifeq ($(CONFIG_CEC_NOTIFIER),y)
   cec-objs += cec-notifier.o
 endif
 
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index f5fe01c..9dfc798 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1864,7 +1864,7 @@ void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
 		WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
 }
 
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
 /*
  * Log the current state of the CEC adapter.
  * Very useful for debugging.
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 0860fb4..999926f 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -271,16 +271,10 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
 			bool block, struct cec_msg __user *parg)
 {
 	struct cec_msg msg = {};
-	long err = 0;
+	long err;
 
 	if (copy_from_user(&msg, parg, sizeof(msg)))
 		return -EFAULT;
-	mutex_lock(&adap->lock);
-	if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
-		err = -ENONET;
-	mutex_unlock(&adap->lock);
-	if (err)
-		return err;
 
 	err = cec_receive_msg(fh, &msg, block);
 	if (err)
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index f9ebff9..2f87748 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
 	put_device(&devnode->dev);
 }
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
 {
 	cec_s_phys_addr(adap, pa, false);
@@ -323,7 +323,7 @@ int cec_register_adapter(struct cec_adapter *adap,
 	}
 
 	dev_set_drvdata(&adap->devnode.dev, adap);
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
 	if (!top_cec_dir)
 		return 0;
 
@@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap)
 	adap->rc = NULL;
 #endif
 	debugfs_remove_recursive(adap->cec_dir);
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 	if (adap->notifier)
 		cec_notifier_unregister(adap->notifier);
 #endif
@@ -395,7 +395,7 @@ static int __init cec_devnode_init(void)
 		return ret;
 	}
 
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
 	top_cec_dir = debugfs_create_dir("cec", NULL);
 	if (IS_ERR_OR_NULL(top_cec_dir)) {
 		pr_warn("cec: Failed to create debugfs cec dir\n");
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index fd181c9..aaa9471 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -220,7 +220,8 @@
 
 config VIDEO_ADV7604_CEC
 	bool "Enable Analog Devices ADV7604 CEC support"
-	depends on VIDEO_ADV7604 && CEC_CORE
+	depends on VIDEO_ADV7604
+	select CEC_CORE
 	---help---
 	  When selected the adv7604 will support the optional
 	  HDMI CEC feature.
@@ -240,7 +241,8 @@
 
 config VIDEO_ADV7842_CEC
 	bool "Enable Analog Devices ADV7842 CEC support"
-	depends on VIDEO_ADV7842 && CEC_CORE
+	depends on VIDEO_ADV7842
+	select CEC_CORE
 	---help---
 	  When selected the adv7842 will support the optional
 	  HDMI CEC feature.
@@ -478,7 +480,8 @@
 
 config VIDEO_ADV7511_CEC
 	bool "Enable Analog Devices ADV7511 CEC support"
-	depends on VIDEO_ADV7511 && CEC_CORE
+	depends on VIDEO_ADV7511
+	select CEC_CORE
 	---help---
 	  When selected the adv7511 will support the optional
 	  HDMI CEC feature.
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index acef4ec..3251cba 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -223,7 +223,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
 static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
 		u8 mask, u8 val)
 {
-	i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
+	i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
 }
 
 static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ac026ee..041cb80 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -501,8 +501,9 @@
 
 config VIDEO_SAMSUNG_S5P_CEC
        tristate "Samsung S5P CEC driver"
-       depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
-       select MEDIA_CEC_NOTIFIER
+       depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+       select CEC_CORE
+       select CEC_NOTIFIER
        ---help---
          This is a driver for Samsung S5P HDMI CEC interface. It uses the
          generic CEC framework interface.
@@ -511,8 +512,9 @@
 
 config VIDEO_STI_HDMI_CEC
        tristate "STMicroelectronics STiH4xx HDMI CEC driver"
-       depends on CEC_CORE && (ARCH_STI || COMPILE_TEST)
-       select MEDIA_CEC_NOTIFIER
+       depends on ARCH_STI || COMPILE_TEST
+       select CEC_CORE
+       select CEC_NOTIFIER
        ---help---
          This is a driver for STIH4xx HDMI CEC interface. It uses the
          generic CEC framework interface.
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index 57a842f..b7731b1 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -493,10 +493,10 @@ static int vdec_h264_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_h264_if = {
-	vdec_h264_init,
-	vdec_h264_decode,
-	vdec_h264_get_param,
-	vdec_h264_deinit,
+	.init		= vdec_h264_init,
+	.decode		= vdec_h264_decode,
+	.get_param	= vdec_h264_get_param,
+	.deinit		= vdec_h264_deinit,
 };
 
 struct vdec_common_if *get_h264_dec_comm_if(void);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index 6e7a62a..b9fad6a 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -620,10 +620,10 @@ static void vdec_vp8_deinit(unsigned long h_vdec)
 }
 
 static struct vdec_common_if vdec_vp8_if = {
-	vdec_vp8_init,
-	vdec_vp8_decode,
-	vdec_vp8_get_param,
-	vdec_vp8_deinit,
+	.init		= vdec_vp8_init,
+	.decode		= vdec_vp8_decode,
+	.get_param	= vdec_vp8_get_param,
+	.deinit		= vdec_vp8_deinit,
 };
 
 struct vdec_common_if *get_vp8_dec_comm_if(void);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 5539b18..1daee12 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -979,10 +979,10 @@ static int vdec_vp9_get_param(unsigned long h_vdec,
 }
 
 static struct vdec_common_if vdec_vp9_if = {
-	vdec_vp9_init,
-	vdec_vp9_decode,
-	vdec_vp9_get_param,
-	vdec_vp9_deinit,
+	.init		= vdec_vp9_init,
+	.decode		= vdec_vp9_decode,
+	.get_param	= vdec_vp9_get_param,
+	.deinit		= vdec_vp9_deinit,
 };
 
 struct vdec_common_if *get_vp9_dec_comm_if(void);
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index b36ac19..154de92 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -26,7 +26,8 @@
 
 config VIDEO_VIVID_CEC
 	bool "Enable CEC emulation support"
-	depends on VIDEO_VIVID && CEC_CORE
+	depends on VIDEO_VIVID
+	select CEC_CORE
 	---help---
 	  When selected the vivid module will emulate the optional
 	  HDMI CEC feature.
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 90f66dc7..a2fc1a1 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
  */
 void ir_raw_event_handle(struct rc_dev *dev)
 {
-	if (!dev->raw)
+	if (!dev->raw || !dev->raw->thread)
 		return;
 
 	wake_up_process(dev->raw->thread);
@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev)
 {
 	int rc;
 	struct ir_raw_handler *handler;
+	struct task_struct *thread;
 
 	if (!dev)
 		return -EINVAL;
@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev)
 	 * because the event is coming from userspace
 	 */
 	if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
-		dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
-					       "rc%u", dev->minor);
+		thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
+				     dev->minor);
 
-		if (IS_ERR(dev->raw->thread)) {
-			rc = PTR_ERR(dev->raw->thread);
+		if (IS_ERR(thread)) {
+			rc = PTR_ERR(thread);
 			goto out;
 		}
+
+		dev->raw->thread = thread;
 	}
 
 	mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index e12ec50..90a5f8f 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -183,9 +183,15 @@ static irqreturn_t sir_interrupt(int irq, void *dev_id)
 	static unsigned long delt;
 	unsigned long deltintr;
 	unsigned long flags;
+	int counter = 0;
 	int iir, lsr;
 
 	while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
+		if (++counter > 256) {
+			dev_err(&sir_ir_dev->dev, "Trapped in interrupt");
+			break;
+		}
+
 		switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */
 		case UART_IIR_MSI:
 			(void)inb(io + UART_MSR);
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
index 8937f39..18ead44 100644
--- a/drivers/media/usb/pulse8-cec/Kconfig
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -1,6 +1,7 @@
 config USB_PULSE8_CEC
 	tristate "Pulse Eight HDMI CEC"
-	depends on USB_ACM && CEC_CORE
+	depends on USB_ACM
+	select CEC_CORE
 	select SERIO
 	select SERIO_SERPORT
 	---help---
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/usb/rainshadow-cec/Kconfig
index 3eb8660..030ef01 100644
--- a/drivers/media/usb/rainshadow-cec/Kconfig
+++ b/drivers/media/usb/rainshadow-cec/Kconfig
@@ -1,6 +1,7 @@
 config USB_RAINSHADOW_CEC
 	tristate "RainShadow Tech HDMI CEC"
-	depends on USB_ACM && CEC_CORE
+	depends on USB_ACM
+	select CEC_CORE
 	select SERIO
 	select SERIO_SERPORT
 	---help---
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
index 541ca54..4126552 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -119,7 +119,7 @@ static void rain_irq_work_handler(struct work_struct *work)
 
 	while (true) {
 		unsigned long flags;
-		bool exit_loop;
+		bool exit_loop = false;
 		char data;
 
 		spin_lock_irqsave(&rain->buf_lock, flags);
@@ -336,6 +336,7 @@ static int rain_connect(struct serio *serio, struct serio_driver *drv)
 	serio_set_drvdata(serio, rain);
 	INIT_WORK(&rain->work, rain_irq_work_handler);
 	mutex_init(&rain->write_lock);
+	spin_lock_init(&rain->buf_lock);
 
 	err = serio_open(serio, drv);
 	if (err)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 94afbbf9..c0175ea 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -868,7 +868,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
 
 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
 {
-	if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
 		return NULL;
 
 	return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 35910f9..99e644c 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -581,7 +581,7 @@ static int atmel_ebi_probe(struct platform_device *pdev)
 	return of_platform_populate(np, NULL, NULL, dev);
 }
 
-static int atmel_ebi_resume(struct device *dev)
+static __maybe_unused int atmel_ebi_resume(struct device *dev)
 {
 	struct atmel_ebi *ebi = dev_get_drvdata(dev);
 	struct atmel_ebi_dev *ebid;
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 6d1b4b7..a80e17d 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -460,12 +460,12 @@ static int get_gpmc_timing_reg(
 		if (l)
 			time_ns_min = gpmc_clk_ticks_to_ns(l - 1, cs, cd) + 1;
 		time_ns = gpmc_clk_ticks_to_ns(l, cs, cd);
-		pr_info("gpmc,%s = <%u> /* %u ns - %u ns; %i ticks%s*/\n",
+		pr_info("gpmc,%s = <%u>; /* %u ns - %u ns; %i ticks%s*/\n",
 			name, time_ns, time_ns_min, time_ns, l,
 			invalid ? "; invalid " : " ");
 	} else {
 		/* raw format */
-		pr_info("gpmc,%s = <%u>%s\n", name, l,
+		pr_info("gpmc,%s = <%u>;%s\n", name, l,
 			invalid ? " /* invalid */" : "");
 	}
 
@@ -2083,8 +2083,11 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
 	} else {
 		ret = of_property_read_u32(child, "bank-width",
 					   &gpmc_s.device_width);
-		if (ret < 0)
+		if (ret < 0) {
+			dev_err(&pdev->dev, "%s has no 'bank-width' property\n",
+				child->full_name);
 			goto err;
+		}
 	}
 
 	/* Reserve wait pin if it is required and valid */
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 22c1aee..2744b1b 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -357,7 +357,10 @@ static int aemif_probe(struct platform_device *pdev)
 		return PTR_ERR(aemif->clk);
 	}
 
-	clk_prepare_enable(aemif->clk);
+	ret = clk_prepare_enable(aemif->clk);
+	if (ret)
+		return ret;
+
 	aemif->clk_rate = clk_get_rate(aemif->clk) / MSEC_PER_SEC;
 
 	if (of_device_is_compatible(np, "ti,da850-aemif"))
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 99e651c..22de7f5 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1921,12 +1921,13 @@ static void msb_io_work(struct work_struct *work)
 		spin_lock_irqsave(&msb->q_lock, flags);
 
 		if (len)
-			if (!__blk_end_request(msb->req, 0, len))
+			if (!__blk_end_request(msb->req, BLK_STS_OK, len))
 				msb->req = NULL;
 
 		if (error && msb->req) {
+			blk_status_t ret = errno_to_blk_status(error);
 			dbg_verbose("IO: ending one sector of the request with error");
-			if (!__blk_end_request(msb->req, error, msb->page_size))
+			if (!__blk_end_request(msb->req, ret, msb->page_size))
 				msb->req = NULL;
 		}
 
@@ -2014,7 +2015,7 @@ static void msb_submit_req(struct request_queue *q)
 		WARN_ON(!msb->io_queue_stopped);
 
 		while ((req = blk_fetch_request(q)) != NULL)
-			__blk_end_request_all(req, -ENODEV);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 		return;
 	}
 
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c00d8a2..8897962 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -709,7 +709,8 @@ static int mspro_block_issue_req(struct memstick_dev *card, int chunk)
 					       msb->req_sg);
 
 		if (!msb->seg_count) {
-			chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
+			chunk = __blk_end_request_cur(msb->block_req,
+					BLK_STS_RESOURCE);
 			continue;
 		}
 
@@ -776,7 +777,8 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
 		if (error && !t_len)
 			t_len = blk_rq_cur_bytes(msb->block_req);
 
-		chunk = __blk_end_request(msb->block_req, error, t_len);
+		chunk = __blk_end_request(msb->block_req,
+				errno_to_blk_status(error), t_len);
 
 		error = mspro_block_issue_req(card, chunk);
 
@@ -838,7 +840,7 @@ static void mspro_block_submit_req(struct request_queue *q)
 
 	if (msb->eject) {
 		while ((req = blk_fetch_request(q)) != NULL)
-			__blk_end_request_all(req, -ENODEV);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 
 		return;
 	}
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 75488e6..8d46e3a 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona,
 	int ret;
 
 	ret = regmap_read_poll_timeout(arizona->regmap,
-				       ARIZONA_INTERRUPT_RAW_STATUS_5, val,
-				       ((val & mask) == target),
+				       reg, val, ((val & mask) == target),
 				       ARIZONA_REG_POLL_DELAY_US,
 				       timeout_ms * 1000);
 	if (ret)
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 11cab15..8263605 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -328,11 +328,7 @@ static int tps65910_sleepinit(struct tps65910 *tps65910,
 		goto err_sleep_init;
 	}
 
-	/* Return if there is no sleep keepon data. */
-	if (!pmic_pdata->slp_keepon)
-		return 0;
-
-	if (pmic_pdata->slp_keepon->therm_keepon) {
+	if (pmic_pdata->slp_keepon.therm_keepon) {
 		ret = tps65910_reg_set_bits(tps65910,
 				TPS65910_SLEEP_KEEP_RES_ON,
 				SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK);
@@ -342,7 +338,7 @@ static int tps65910_sleepinit(struct tps65910 *tps65910,
 		}
 	}
 
-	if (pmic_pdata->slp_keepon->clkout32k_keepon) {
+	if (pmic_pdata->slp_keepon.clkout32k_keepon) {
 		ret = tps65910_reg_set_bits(tps65910,
 				TPS65910_SLEEP_KEEP_RES_ON,
 				SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK);
@@ -352,7 +348,7 @@ static int tps65910_sleepinit(struct tps65910 *tps65910,
 		}
 	}
 
-	if (pmic_pdata->slp_keepon->i2chs_keepon) {
+	if (pmic_pdata->slp_keepon.i2chs_keepon) {
 		ret = tps65910_reg_set_bits(tps65910,
 				TPS65910_SLEEP_KEEP_RES_ON,
 				SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK);
@@ -415,6 +411,18 @@ static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
 	prop = of_property_read_bool(np, "ti,en-ck32k-xtal");
 	board_info->en_ck32k_xtal = prop;
 
+	prop = of_property_read_bool(np, "ti,sleep-enable");
+	board_info->en_dev_slp = prop;
+
+	prop = of_property_read_bool(np, "ti,sleep-keep-therm");
+	board_info->slp_keepon.therm_keepon = prop;
+
+	prop = of_property_read_bool(np, "ti,sleep-keep-ck32k");
+	board_info->slp_keepon.clkout32k_keepon = prop;
+
+	prop = of_property_read_bool(np, "ti,sleep-keep-hsclk");
+	board_info->slp_keepon.i2chs_keepon = prop;
+
 	board_info->irq = client->irq;
 	board_info->irq_base = -1;
 	board_info->pm_off = of_property_read_bool(np,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 07bbd4c..8136dc7 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -490,6 +490,14 @@
 	  ioctl()s, the driver also provides a read/write interface to a BMC ram
 	  region where the host LPC read/write region can be buffered.
 
+config ASPEED_LPC_SNOOP
+	tristate "Aspeed ast2500 HOST LPC snoop support"
+	depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
+	help
+	  Provides a driver to control the LPC snoop interface which
+	  allows the BMC to listen on and save the data written by
+	  the host to an arbitrary LPC I/O port.
+
 config PCI_ENDPOINT_TEST
 	depends on PCI
 	select CRC32
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 81ef3e6..b0b7664 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,6 +53,7 @@
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
 obj-$(CONFIG_ASPEED_LPC_CTRL)	+= aspeed-lpc-ctrl.o
+obj-$(CONFIG_ASPEED_LPC_SNOOP)	+= aspeed-lpc-snoop.o
 obj-$(CONFIG_PCI_ENDPOINT_TEST)	+= pci_endpoint_test.o
 
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_core.o
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index dfb72ec..84e5b94 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -32,7 +32,7 @@
 #include <linux/delay.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
-#include <linux/i2c/apds990x.h>
+#include <linux/platform_data/apds990x.h>
 
 /* Register map */
 #define APDS990X_ENABLE	 0x00 /* Enable of states and interrupts */
@@ -841,7 +841,7 @@ static ssize_t apds990x_prox_enable_store(struct device *dev,
 static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show,
 						   apds990x_prox_enable_store);
 
-static const char reporting_modes[][9] = {"trigger", "periodic"};
+static const char *reporting_modes[] = {"trigger", "periodic"};
 
 static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
@@ -856,13 +856,13 @@ static ssize_t apds990x_prox_reporting_mode_store(struct device *dev,
 				  const char *buf, size_t len)
 {
 	struct apds990x_chip *chip =  dev_get_drvdata(dev);
+	int ret;
 
-	if (sysfs_streq(buf, reporting_modes[0]))
-		chip->prox_continuous_mode = 0;
-	else if (sysfs_streq(buf, reporting_modes[1]))
-		chip->prox_continuous_mode = 1;
-	else
-		return -EINVAL;
+	ret = sysfs_match_string(reporting_modes, buf);
+	if (ret < 0)
+		return ret;
+
+	chip->prox_continuous_mode = ret;
 	return len;
 }
 
diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/misc/aspeed-lpc-snoop.c
new file mode 100644
index 0000000..5939055
--- /dev/null
+++ b/drivers/misc/aspeed-lpc-snoop.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2017 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Provides a simple driver to control the ASPEED LPC snoop interface which
+ * allows the BMC to listen on and save the data written by
+ * the host to an arbitrary LPC I/O port.
+ *
+ * Typically used by the BMC to "watch" host boot progress via port
+ * 0x80 writes made by the BIOS during the boot process.
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define DEVICE_NAME	"aspeed-lpc-snoop"
+
+#define NUM_SNOOP_CHANNELS 2
+#define SNOOP_FIFO_SIZE 2048
+
+#define HICR5	0x0
+#define HICR5_EN_SNP0W		BIT(0)
+#define HICR5_ENINT_SNP0W	BIT(1)
+#define HICR5_EN_SNP1W		BIT(2)
+#define HICR5_ENINT_SNP1W	BIT(3)
+
+#define HICR6	0x4
+#define HICR6_STR_SNP0W		BIT(0)
+#define HICR6_STR_SNP1W		BIT(1)
+#define SNPWADR	0x10
+#define SNPWADR_CH0_MASK	GENMASK(15, 0)
+#define SNPWADR_CH0_SHIFT	0
+#define SNPWADR_CH1_MASK	GENMASK(31, 16)
+#define SNPWADR_CH1_SHIFT	16
+#define SNPWDR	0x14
+#define SNPWDR_CH0_MASK		GENMASK(7, 0)
+#define SNPWDR_CH0_SHIFT	0
+#define SNPWDR_CH1_MASK		GENMASK(15, 8)
+#define SNPWDR_CH1_SHIFT	8
+#define HICRB	0x80
+#define HICRB_ENSNP0D		BIT(14)
+#define HICRB_ENSNP1D		BIT(15)
+
+struct aspeed_lpc_snoop {
+	struct regmap		*regmap;
+	int			irq;
+	struct kfifo		snoop_fifo[NUM_SNOOP_CHANNELS];
+};
+
+/* Save a byte to a FIFO and discard the oldest byte if FIFO is full */
+static void put_fifo_with_discard(struct kfifo *fifo, u8 val)
+{
+	if (!kfifo_initialized(fifo))
+		return;
+	if (kfifo_is_full(fifo))
+		kfifo_skip(fifo);
+	kfifo_put(fifo, val);
+}
+
+static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg)
+{
+	struct aspeed_lpc_snoop *lpc_snoop = arg;
+	u32 reg, data;
+
+	if (regmap_read(lpc_snoop->regmap, HICR6, &reg))
+		return IRQ_NONE;
+
+	/* Check if one of the snoop channels is interrupting */
+	reg &= (HICR6_STR_SNP0W | HICR6_STR_SNP1W);
+	if (!reg)
+		return IRQ_NONE;
+
+	/* Ack pending IRQs */
+	regmap_write(lpc_snoop->regmap, HICR6, reg);
+
+	/* Read and save most recent snoop'ed data byte to FIFO */
+	regmap_read(lpc_snoop->regmap, SNPWDR, &data);
+
+	if (reg & HICR6_STR_SNP0W) {
+		u8 val = (data & SNPWDR_CH0_MASK) >> SNPWDR_CH0_SHIFT;
+
+		put_fifo_with_discard(&lpc_snoop->snoop_fifo[0], val);
+	}
+	if (reg & HICR6_STR_SNP1W) {
+		u8 val = (data & SNPWDR_CH1_MASK) >> SNPWDR_CH1_SHIFT;
+
+		put_fifo_with_discard(&lpc_snoop->snoop_fifo[1], val);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
+				       struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int rc;
+
+	lpc_snoop->irq = platform_get_irq(pdev, 0);
+	if (!lpc_snoop->irq)
+		return -ENODEV;
+
+	rc = devm_request_irq(dev, lpc_snoop->irq,
+			      aspeed_lpc_snoop_irq, IRQF_SHARED,
+			      DEVICE_NAME, lpc_snoop);
+	if (rc < 0) {
+		dev_warn(dev, "Unable to request IRQ %d\n", lpc_snoop->irq);
+		lpc_snoop->irq = 0;
+		return rc;
+	}
+
+	return 0;
+}
+
+static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+				  int channel, u16 lpc_port)
+{
+	int rc = 0;
+	u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en;
+
+	/* Create FIFO datastructure */
+	rc = kfifo_alloc(&lpc_snoop->snoop_fifo[channel],
+			 SNOOP_FIFO_SIZE, GFP_KERNEL);
+	if (rc)
+		return rc;
+
+	/* Enable LPC snoop channel at requested port */
+	switch (channel) {
+	case 0:
+		hicr5_en = HICR5_EN_SNP0W | HICR5_ENINT_SNP0W;
+		snpwadr_mask = SNPWADR_CH0_MASK;
+		snpwadr_shift = SNPWADR_CH0_SHIFT;
+		hicrb_en = HICRB_ENSNP0D;
+		break;
+	case 1:
+		hicr5_en = HICR5_EN_SNP1W | HICR5_ENINT_SNP1W;
+		snpwadr_mask = SNPWADR_CH1_MASK;
+		snpwadr_shift = SNPWADR_CH1_SHIFT;
+		hicrb_en = HICRB_ENSNP1D;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
+	regmap_update_bits(lpc_snoop->regmap, SNPWADR, snpwadr_mask,
+			   lpc_port << snpwadr_shift);
+	regmap_update_bits(lpc_snoop->regmap, HICRB, hicrb_en, hicrb_en);
+
+	return rc;
+}
+
+static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+				     int channel)
+{
+	switch (channel) {
+	case 0:
+		regmap_update_bits(lpc_snoop->regmap, HICR5,
+				   HICR5_EN_SNP0W | HICR5_ENINT_SNP0W,
+				   0);
+		break;
+	case 1:
+		regmap_update_bits(lpc_snoop->regmap, HICR5,
+				   HICR5_EN_SNP1W | HICR5_ENINT_SNP1W,
+				   0);
+		break;
+	default:
+		return;
+	}
+
+	kfifo_free(&lpc_snoop->snoop_fifo[channel]);
+}
+
+static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
+{
+	struct aspeed_lpc_snoop *lpc_snoop;
+	struct device *dev;
+	u32 port;
+	int rc;
+
+	dev = &pdev->dev;
+
+	lpc_snoop = devm_kzalloc(dev, sizeof(*lpc_snoop), GFP_KERNEL);
+	if (!lpc_snoop)
+		return -ENOMEM;
+
+	lpc_snoop->regmap = syscon_node_to_regmap(
+			pdev->dev.parent->of_node);
+	if (IS_ERR(lpc_snoop->regmap)) {
+		dev_err(dev, "Couldn't get regmap\n");
+		return -ENODEV;
+	}
+
+	dev_set_drvdata(&pdev->dev, lpc_snoop);
+
+	rc = of_property_read_u32_index(dev->of_node, "snoop-ports", 0, &port);
+	if (rc) {
+		dev_err(dev, "no snoop ports configured\n");
+		return -ENODEV;
+	}
+
+	rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
+	if (rc)
+		return rc;
+
+	rc = aspeed_lpc_enable_snoop(lpc_snoop, 0, port);
+	if (rc)
+		return rc;
+
+	/* Configuration of 2nd snoop channel port is optional */
+	if (of_property_read_u32_index(dev->of_node, "snoop-ports",
+				       1, &port) == 0) {
+		rc = aspeed_lpc_enable_snoop(lpc_snoop, 1, port);
+		if (rc)
+			aspeed_lpc_disable_snoop(lpc_snoop, 0);
+	}
+
+	return rc;
+}
+
+static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
+{
+	struct aspeed_lpc_snoop *lpc_snoop = dev_get_drvdata(&pdev->dev);
+
+	/* Disable both snoop channels */
+	aspeed_lpc_disable_snoop(lpc_snoop, 0);
+	aspeed_lpc_disable_snoop(lpc_snoop, 1);
+
+	return 0;
+}
+
+static const struct of_device_id aspeed_lpc_snoop_match[] = {
+	{ .compatible = "aspeed,ast2500-lpc-snoop" },
+	{ },
+};
+
+static struct platform_driver aspeed_lpc_snoop_driver = {
+	.driver = {
+		.name		= DEVICE_NAME,
+		.of_match_table = aspeed_lpc_snoop_match,
+	},
+	.probe = aspeed_lpc_snoop_probe,
+	.remove = aspeed_lpc_snoop_remove,
+};
+
+module_platform_driver(aspeed_lpc_snoop_driver);
+
+MODULE_DEVICE_TABLE(of, aspeed_lpc_snoop_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Lippert <rlippert@google.com>");
+MODULE_DESCRIPTION("Linux driver to control Aspeed LPC snoop functionality");
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 845466e..38fcfe2 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -27,7 +27,7 @@
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/mutex.h>
-#include <linux/i2c/bh1770glc.h>
+#include <linux/platform_data/bh1770glc.h>
 #include <linux/regulator/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/workqueue.h>
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 4472ce1..8c32040 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -45,7 +45,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
 	mutex_init(&ctx->mapping_lock);
 	ctx->mapping = NULL;
 
-	if (cxl_is_psl8(afu)) {
+	if (cxl_is_power8()) {
 		spin_lock_init(&ctx->sste_lock);
 
 		/*
@@ -189,7 +189,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
 		if (start + len > ctx->afu->adapter->ps_size)
 			return -EINVAL;
 
-		if (cxl_is_psl9(ctx->afu)) {
+		if (cxl_is_power9()) {
 			/*
 			 * Make sure there is a valid problem state
 			 * area space for this AFU.
@@ -324,7 +324,7 @@ static void reclaim_ctx(struct rcu_head *rcu)
 {
 	struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
 
-	if (cxl_is_psl8(ctx->afu))
+	if (cxl_is_power8())
 		free_page((u64)ctx->sstp);
 	if (ctx->ff_page)
 		__free_page(ctx->ff_page);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index c8568ea..a03f8e7 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -357,6 +357,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An     = {0x0A0};
 #define CXL_PSL9_DSISR_An_PF_RGP  0x0000000000000090ULL  /* PTE not found (Radix Guest (parent)) 0b10010000 */
 #define CXL_PSL9_DSISR_An_PF_HRH  0x0000000000000094ULL  /* PTE not found (HPT/Radix Host)       0b10010100 */
 #define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL  /* PTE not found (STEG VA)              0b10011100 */
+#define CXL_PSL9_DSISR_An_URTCH   0x00000000000000B4ULL  /* Unsupported Radix Tree Configuration 0b10110100 */
 
 /****** CXL_PSL_TFC_An ******************************************************/
 #define CXL_PSL_TFC_An_A  (1ull << (63-28)) /* Acknowledge non-translation fault */
@@ -844,24 +845,15 @@ static inline bool cxl_is_power8(void)
 
 static inline bool cxl_is_power9(void)
 {
-	/* intermediate solution */
-	if (!cxl_is_power8() &&
-	   (cpu_has_feature(CPU_FTRS_POWER9) ||
-	    cpu_has_feature(CPU_FTR_POWER9_DD1)))
+	if (pvr_version_is(PVR_POWER9))
 		return true;
 	return false;
 }
 
-static inline bool cxl_is_psl8(struct cxl_afu *afu)
+static inline bool cxl_is_power9_dd1(void)
 {
-	if (afu->adapter->caia_major == 1)
-		return true;
-	return false;
-}
-
-static inline bool cxl_is_psl9(struct cxl_afu *afu)
-{
-	if (afu->adapter->caia_major == 2)
+	if ((pvr_version_is(PVR_POWER9)) &&
+	    cpu_has_feature(CPU_FTR_POWER9_DD1))
 		return true;
 	return false;
 }
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 5344448..c79e39b 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -187,7 +187,7 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx)
 
 static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
 {
-	if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS))
+	if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
 		return true;
 
 	return false;
@@ -195,15 +195,22 @@ static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
 
 static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
 {
-	if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM))
+	u64 crs; /* Translation Checkout Response Status */
+
+	if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
 		return true;
 
-	if ((cxl_is_psl9(ctx->afu)) &&
-	   ((dsisr & CXL_PSL9_DSISR_An_CO_MASK) &
-		(CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC |
-		 CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH |
-		 CXL_PSL9_DSISR_An_PF_STEG)))
-		return true;
+	if (cxl_is_power9()) {
+		crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
+		if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
+		    (crs == CXL_PSL9_DSISR_An_PF_RGC) ||
+		    (crs == CXL_PSL9_DSISR_An_PF_RGP) ||
+		    (crs == CXL_PSL9_DSISR_An_PF_HRH) ||
+		    (crs == CXL_PSL9_DSISR_An_PF_STEG) ||
+		    (crs == CXL_PSL9_DSISR_An_URTCH)) {
+			return true;
+		}
+	}
 
 	return false;
 }
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 17b433f..0761271 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
 	/* Do this outside the status_mutex to avoid a circular dependency with
 	 * the locking in cxl_mmap_fault() */
-	if (copy_from_user(&work, uwork,
-			   sizeof(struct cxl_ioctl_start_work))) {
-		rc = -EFAULT;
-		goto out;
-	}
+	if (copy_from_user(&work, uwork, sizeof(work)))
+		return -EFAULT;
 
 	mutex_lock(&ctx->status_mutex);
 	if (ctx->status != OPENED) {
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 1703655..c1ba0d4 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -329,8 +329,15 @@ static int __init init_cxl(void)
 
 	cxl_debugfs_init();
 
-	if ((rc = register_cxl_calls(&cxl_calls)))
-		goto err;
+	/*
+	 * we don't register the callback on P9. slb callack is only
+	 * used for the PSL8 MMU and CX4.
+	 */
+	if (cxl_is_power8()) {
+		rc = register_cxl_calls(&cxl_calls);
+		if (rc)
+			goto err;
+	}
 
 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
 		cxl_ops = &cxl_native_ops;
@@ -347,7 +354,8 @@ static int __init init_cxl(void)
 
 	return 0;
 err1:
-	unregister_cxl_calls(&cxl_calls);
+	if (cxl_is_power8())
+		unregister_cxl_calls(&cxl_calls);
 err:
 	cxl_debugfs_exit();
 	cxl_file_exit();
@@ -366,7 +374,8 @@ static void exit_cxl(void)
 
 	cxl_debugfs_exit();
 	cxl_file_exit();
-	unregister_cxl_calls(&cxl_calls);
+	if (cxl_is_power8())
+		unregister_cxl_calls(&cxl_calls);
 	idr_destroy(&cxl_adapter_idr);
 }
 
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 871a2f0..2b2f889 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -105,11 +105,16 @@ static int native_afu_reset(struct cxl_afu *afu)
 			   CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
 			   false);
 
-	/* Re-enable any masked interrupts */
-	serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
-	serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
-	cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
-
+	/*
+	 * Re-enable any masked interrupts when the AFU is not
+	 * activated to avoid side effects after attaching a process
+	 * in dedicated mode.
+	 */
+	if (afu->current_mode == 0) {
+		serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+		serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
+		cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
+	}
 
 	return rc;
 }
@@ -139,9 +144,9 @@ int cxl_psl_purge(struct cxl_afu *afu)
 
 	pr_devel("PSL purge request\n");
 
-	if (cxl_is_psl8(afu))
+	if (cxl_is_power8())
 		trans_fault = CXL_PSL_DSISR_TRANS;
-	if (cxl_is_psl9(afu))
+	if (cxl_is_power9())
 		trans_fault = CXL_PSL9_DSISR_An_TF;
 
 	if (!cxl_ops->link_ok(afu->adapter, afu)) {
@@ -603,7 +608,7 @@ static u64 calculate_sr(struct cxl_context *ctx)
 		if (!test_tsk_thread_flag(current, TIF_32BIT))
 			sr |= CXL_PSL_SR_An_SF;
 	}
-	if (cxl_is_psl9(ctx->afu)) {
+	if (cxl_is_power9()) {
 		if (radix_enabled())
 			sr |= CXL_PSL_SR_An_XLAT_ror;
 		else
@@ -1117,10 +1122,10 @@ static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
 
 static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
 {
-	if ((cxl_is_psl8(afu)) && (dsisr & CXL_PSL_DSISR_TRANS))
+	if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
 		return true;
 
-	if ((cxl_is_psl9(afu)) && (dsisr & CXL_PSL9_DSISR_An_TF))
+	if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
 		return true;
 
 	return false;
@@ -1194,10 +1199,10 @@ static void native_irq_wait(struct cxl_context *ctx)
 		if (ph != ctx->pe)
 			return;
 		dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
-		if (cxl_is_psl8(ctx->afu) &&
+		if (cxl_is_power8() &&
 		   ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
 			return;
-		if (cxl_is_psl9(ctx->afu) &&
+		if (cxl_is_power9() &&
 		   ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
 			return;
 		/*
@@ -1302,13 +1307,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
 
 void cxl_native_release_psl_err_irq(struct cxl *adapter)
 {
-	if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+	if (adapter->native->err_virq == 0 ||
+	    adapter->native->err_virq !=
+	    irq_find_mapping(NULL, adapter->native->err_hwirq))
 		return;
 
 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
 	cxl_unmap_irq(adapter->native->err_virq, adapter);
 	cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
 	kfree(adapter->irq_name);
+	adapter->native->err_virq = 0;
 }
 
 int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1354,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
 
 void cxl_native_release_serr_irq(struct cxl_afu *afu)
 {
-	if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+	if (afu->serr_virq == 0 ||
+	    afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
 		return;
 
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
 	cxl_unmap_irq(afu->serr_virq, afu);
 	cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
 	kfree(afu->err_irq_name);
+	afu->serr_virq = 0;
 }
 
 int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1385,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
 
 void cxl_native_release_psl_irq(struct cxl_afu *afu)
 {
-	if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+	if (afu->native->psl_virq == 0 ||
+	    afu->native->psl_virq !=
+	    irq_find_mapping(NULL, afu->native->psl_hwirq))
 		return;
 
 	cxl_unmap_irq(afu->native->psl_virq, afu);
 	cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
 	kfree(afu->psl_irq_name);
+	afu->native->psl_virq = 0;
 }
 
 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 6dc1ee5..1eb9859 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -436,7 +436,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci
 	/* nMMU_ID Defaults to: b’000001001’*/
 	xsl_dsnctl |= ((u64)0x09 << (63-28));
 
-	if (cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+	if (!(cxl_is_power9_dd1())) {
 		/*
 		 * Used to identify CAPI packets which should be sorted into
 		 * the Non-Blocking queues by the PHB. This field should match
@@ -491,7 +491,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci
 	cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL);
 
 	/* Disable vc dd1 fix */
-	if ((cxl_is_power9() && cpu_has_feature(CPU_FTR_POWER9_DD1)))
+	if (cxl_is_power9_dd1())
 		cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL);
 
 	return 0;
@@ -1439,8 +1439,7 @@ int cxl_pci_reset(struct cxl *adapter)
 	 * The adapter is about to be reset, so ignore errors.
 	 * Not supported on P9 DD1
 	 */
-	if ((cxl_is_power8()) ||
-	    ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
+	if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
 		cxl_data_cache_flush(adapter);
 
 	/* pcie_warm_reset requests a fundamental pci reset which includes a
@@ -1750,7 +1749,6 @@ static const struct cxl_service_layer_ops psl9_ops = {
 	.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
 	.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
 	.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
-	.err_irq_dump_registers = cxl_native_err_irq_dump_regs,
 	.debugfs_stop_trace = cxl_stop_trace_psl9,
 	.write_timebase_ctrl = write_timebase_ctrl_psl9,
 	.timebase_read = timebase_read_psl9,
@@ -1889,8 +1887,7 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
 	 * Flush adapter datacache as its about to be removed.
 	 * Not supported on P9 DD1.
 	 */
-	if ((cxl_is_power8()) ||
-	    ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
+	if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
 		cxl_data_cache_flush(adapter);
 
 	cxl_deconfigure_adapter(adapter);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index d1928fd..40c7908 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -763,8 +763,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
 {
 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+	u8 version = mei_me_cl_ver(cldev->me_cl);
 
-	return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
+	return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
+			 cldev->name, uuid, version);
 }
 static DEVICE_ATTR_RO(modalias);
 
@@ -1038,7 +1040,7 @@ static void mei_cl_bus_dev_init(struct mei_device *bus,
  *
  * @bus: mei device
  */
-void mei_cl_bus_rescan(struct mei_device *bus)
+static void mei_cl_bus_rescan(struct mei_device *bus)
 {
 	struct mei_cl_device *cldev, *n;
 	struct mei_me_client *me_cl;
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index e1e4d47..5c8286b 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -65,7 +65,7 @@
 #define HBM_MAJOR_VERSION_DOT              2
 
 /*
- * MEI version with notifcation support
+ * MEI version with notification support
  */
 #define HBM_MINOR_VERSION_EV               0
 #define HBM_MAJOR_VERSION_EV               2
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index c8ad9ee..d2f6914 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -215,12 +215,6 @@ int mei_start(struct mei_device *dev)
 		}
 	} while (ret);
 
-	/* we cannot start the device w/o hbm start message completed */
-	if (dev->dev_state == MEI_DEV_DISABLED) {
-		dev_err(dev->dev, "reset failed");
-		goto err;
-	}
-
 	if (mei_hbm_start_wait(dev)) {
 		dev_err(dev->dev, "HBM haven't started");
 		goto err;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index c14e352..b0b8f18 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -235,6 +235,17 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
 	return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
 }
 
+static inline int hdr_is_valid(u32 msg_hdr)
+{
+	struct mei_msg_hdr *mei_hdr;
+
+	mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
+	if (!msg_hdr || mei_hdr->reserved)
+		return -EBADMSG;
+
+	return 0;
+}
+
 /**
  * mei_irq_read_handler - bottom half read routine after ISR to
  * handle the read processing.
@@ -256,17 +267,18 @@ int mei_irq_read_handler(struct mei_device *dev,
 		dev->rd_msg_hdr = mei_read_hdr(dev);
 		(*slots)--;
 		dev_dbg(dev->dev, "slots =%08x.\n", *slots);
-	}
-	mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
-	dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
 
-	if (mei_hdr->reserved || !dev->rd_msg_hdr) {
-		dev_err(dev->dev, "corrupted message header 0x%08X\n",
+		ret = hdr_is_valid(dev->rd_msg_hdr);
+		if (ret) {
+			dev_err(dev->dev, "corrupted message header 0x%08X\n",
 				dev->rd_msg_hdr);
-		ret = -EBADMSG;
-		goto end;
+			goto end;
+		}
 	}
 
+	mei_hdr = (struct mei_msg_hdr *)&dev->rd_msg_hdr;
+	dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
+
 	if (mei_slots2data(*slots) < mei_hdr->length) {
 		dev_err(dev->dev, "less data available than length=%08x.\n",
 				*slots);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 63a67c9..ebcd513 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -306,7 +306,6 @@ struct mei_hw_ops {
 };
 
 /* MEI bus API*/
-void mei_cl_bus_rescan(struct mei_device *bus);
 void mei_cl_bus_rescan_work(struct work_struct *work);
 void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index c862cd4..b8069ee 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -309,6 +309,9 @@ static inline enum xp_retval
 xpc_send(short partid, int ch_number, u32 flags, void *payload,
 	 u16 payload_size)
 {
+	if (!xpc_interface.send)
+		return xpNotLoaded;
+
 	return xpc_interface.send(partid, ch_number, flags, payload,
 				  payload_size);
 }
@@ -317,6 +320,9 @@ static inline enum xp_retval
 xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
 		u16 payload_size, xpc_notify_func func, void *key)
 {
+	if (!xpc_interface.send_notify)
+		return xpNotLoaded;
+
 	return xpc_interface.send_notify(partid, ch_number, flags, payload,
 					 payload_size, func, key);
 }
@@ -324,12 +330,16 @@ xpc_send_notify(short partid, int ch_number, u32 flags, void *payload,
 static inline void
 xpc_received(short partid, int ch_number, void *payload)
 {
-	return xpc_interface.received(partid, ch_number, payload);
+	if (xpc_interface.received)
+		xpc_interface.received(partid, ch_number, payload);
 }
 
 static inline enum xp_retval
 xpc_partid_to_nasids(short partid, void *nasids)
 {
+	if (!xpc_interface.partid_to_nasids)
+		return xpNotLoaded;
+
 	return xpc_interface.partid_to_nasids(partid, nasids);
 }
 
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 01be66d..6d7f557 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -69,23 +69,9 @@ struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
 EXPORT_SYMBOL_GPL(xpc_registrations);
 
 /*
- * Initialize the XPC interface to indicate that XPC isn't loaded.
+ * Initialize the XPC interface to NULL to indicate that XPC isn't loaded.
  */
-static enum xp_retval
-xpc_notloaded(void)
-{
-	return xpNotLoaded;
-}
-
-struct xpc_interface xpc_interface = {
-	(void (*)(int))xpc_notloaded,
-	(void (*)(int))xpc_notloaded,
-	(enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
-	(enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
-			   void *))xpc_notloaded,
-	(void (*)(short, int, void *))xpc_notloaded,
-	(enum xp_retval(*)(short, void *))xpc_notloaded
-};
+struct xpc_interface xpc_interface = { };
 EXPORT_SYMBOL_GPL(xpc_interface);
 
 /*
@@ -115,17 +101,7 @@ EXPORT_SYMBOL_GPL(xpc_set_interface);
 void
 xpc_clear_interface(void)
 {
-	xpc_interface.connect = (void (*)(int))xpc_notloaded;
-	xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
-	xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
-	    xpc_notloaded;
-	xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
-						       u16, xpc_notify_func,
-						       void *))xpc_notloaded;
-	xpc_interface.received = (void (*)(short, int, void *))
-	    xpc_notloaded;
-	xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
-	    xpc_notloaded;
+	memset(&xpc_interface, 0, sizeof(xpc_interface));
 }
 EXPORT_SYMBOL_GPL(xpc_clear_interface);
 
@@ -188,7 +164,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
 
 	mutex_unlock(&registration->mutex);
 
-	xpc_interface.connect(ch_number);
+	if (xpc_interface.connect)
+		xpc_interface.connect(ch_number);
 
 	return xpSuccess;
 }
@@ -237,7 +214,8 @@ xpc_disconnect(int ch_number)
 	registration->assigned_limit = 0;
 	registration->idle_limit = 0;
 
-	xpc_interface.disconnect(ch_number);
+	if (xpc_interface.disconnect)
+		xpc_interface.disconnect(ch_number);
 
 	mutex_unlock(&registration->mutex);
 
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index 3d528a1..426ad91 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -19,6 +19,7 @@
 #include <linux/mm.h>
 #include <linux/sram.h>
 
+#include <asm/fncpy.h>
 #include <asm/set_memory.h>
 
 #include "sram.h"
@@ -58,20 +59,32 @@ int sram_add_protect_exec(struct sram_partition *part)
  * @src: Source address for the data to copy
  * @size: Size of copy to perform, which starting from dst, must reside in pool
  *
+ * Return: Address for copied data that can safely be called through function
+ *	   pointer, or NULL if problem.
+ *
  * This helper function allows sram driver to act as central control location
  * of 'protect-exec' pools which are normal sram pools but are always set
  * read-only and executable except when copying data to them, at which point
  * they are set to read-write non-executable, to make sure no memory is
  * writeable and executable at the same time. This region must be page-aligned
  * and is checked during probe, otherwise page attribute manipulation would
- * not be possible.
+ * not be possible. Care must be taken to only call the returned address as
+ * dst address is not guaranteed to be safely callable.
+ *
+ * NOTE: This function uses the fncpy macro to move code to the executable
+ * region. Some architectures have strict requirements for relocating
+ * executable code, so fncpy is a macro that must be defined by any arch
+ * making use of this functionality that guarantees a safe copy of exec
+ * data and returns a safe address that can be called as a C function
+ * pointer.
  */
-int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
-		   size_t size)
+void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
+		     size_t size)
 {
 	struct sram_partition *part = NULL, *p;
 	unsigned long base;
 	int pages;
+	void *dst_cpy;
 
 	mutex_lock(&exec_pool_list_mutex);
 	list_for_each_entry(p, &exec_pool_list, list) {
@@ -81,10 +94,10 @@ int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
 	mutex_unlock(&exec_pool_list_mutex);
 
 	if (!part)
-		return -EINVAL;
+		return NULL;
 
 	if (!addr_in_gen_pool(pool, (unsigned long)dst, size))
-		return -EINVAL;
+		return NULL;
 
 	base = (unsigned long)part->base;
 	pages = PAGE_ALIGN(size) / PAGE_SIZE;
@@ -94,13 +107,13 @@ int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
 	set_memory_nx((unsigned long)base, pages);
 	set_memory_rw((unsigned long)base, pages);
 
-	memcpy(dst, src, size);
+	dst_cpy = fncpy(dst, src, size);
 
 	set_memory_ro((unsigned long)base, pages);
 	set_memory_x((unsigned long)base, pages);
 
 	mutex_unlock(&part->lock);
 
-	return 0;
+	return dst_cpy;
 }
 EXPORT_SYMBOL_GPL(sram_exec_copy);
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index fc1ecda..42e8906 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -61,24 +61,6 @@
 
 	  If unsure, say 8 here.
 
-config MMC_BLOCK_BOUNCE
-	bool "Use bounce buffer for simple hosts"
-	depends on MMC_BLOCK
-	default y
-	help
-	  SD/MMC is a high latency protocol where it is crucial to
-	  send large requests in order to get high performance. Many
-	  controllers, however, are restricted to continuous memory
-	  (i.e. they can't do scatter-gather), something the kernel
-	  rarely can provide.
-
-	  Say Y here to help these restricted hosts by bouncing
-	  requests back and forth from a large buffer. You will get
-	  a big performance gain at the cost of up to 64 KiB of
-	  physical memory.
-
-	  If unsure, say Y here.
-
 config SDIO_UART
 	tristate "SDIO UART/GPS class support"
 	depends on TTY
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 8273b07..0cfac2d 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -127,14 +127,6 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
 
 static inline int mmc_blk_part_switch(struct mmc_card *card,
 				      struct mmc_blk_data *md);
-static int get_card_status(struct mmc_card *card, u32 *status, int retries);
-
-static void mmc_blk_requeue(struct request_queue *q, struct request *req)
-{
-	spin_lock_irq(q->queue_lock);
-	blk_requeue_request(q, req);
-	spin_unlock_irq(q->queue_lock);
-}
 
 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 {
@@ -197,6 +189,8 @@ static ssize_t power_ro_lock_store(struct device *dev,
 	int ret;
 	struct mmc_blk_data *md, *part_md;
 	struct mmc_card *card;
+	struct mmc_queue *mq;
+	struct request *req;
 	unsigned long set;
 
 	if (kstrtoul(buf, 0, &set))
@@ -206,20 +200,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
 		return count;
 
 	md = mmc_blk_get(dev_to_disk(dev));
+	mq = &md->queue;
 	card = md->queue.card;
 
-	mmc_get_card(card);
-
-	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
-				card->ext_csd.boot_ro_lock |
-				EXT_CSD_BOOT_WP_B_PWR_WP_EN,
-				card->ext_csd.part_time);
-	if (ret)
-		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
-	else
-		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
-
-	mmc_put_card(card);
+	/* Dispatch locking to the block layer */
+	req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
+	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
+	blk_execute_rq(mq->queue, NULL, req, 0);
+	ret = req_to_mmc_queue_req(req)->drv_op_result;
 
 	if (!ret) {
 		pr_info("%s: Locking boot partition ro until next power on\n",
@@ -392,7 +380,7 @@ static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
 		return -EINVAL;
 
 	do {
-		err = get_card_status(card, status, 5);
+		err = __mmc_send_status(card, status, 5);
 		if (err)
 			break;
 
@@ -450,7 +438,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
 	struct mmc_request mrq = {};
 	struct scatterlist sg;
 	int err;
-	int is_rpmb = false;
+	bool is_rpmb = false;
 	u32 status = 0;
 
 	if (!card || !md || !idata)
@@ -570,9 +558,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
 			     struct mmc_ioc_cmd __user *ic_ptr)
 {
 	struct mmc_blk_ioc_data *idata;
+	struct mmc_blk_ioc_data *idatas[1];
 	struct mmc_blk_data *md;
+	struct mmc_queue *mq;
 	struct mmc_card *card;
 	int err = 0, ioc_err = 0;
+	struct request *req;
 
 	/*
 	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
@@ -598,17 +589,21 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
 		goto cmd_done;
 	}
 
-	mmc_get_card(card);
-
-	ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
-
-	/* Always switch back to main area after RPMB access */
-	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
-		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
-
-	mmc_put_card(card);
-
+	/*
+	 * Dispatch the ioctl() into the block request queue.
+	 */
+	mq = &md->queue;
+	req = blk_get_request(mq->queue,
+		idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+		__GFP_RECLAIM);
+	idatas[0] = idata;
+	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+	req_to_mmc_queue_req(req)->idata = idatas;
+	req_to_mmc_queue_req(req)->ioc_count = 1;
+	blk_execute_rq(mq->queue, NULL, req, 0);
+	ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
 	err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+	blk_put_request(req);
 
 cmd_done:
 	mmc_blk_put(md);
@@ -625,8 +620,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
 	struct mmc_ioc_cmd __user *cmds = user->cmds;
 	struct mmc_card *card;
 	struct mmc_blk_data *md;
+	struct mmc_queue *mq;
 	int i, err = 0, ioc_err = 0;
 	__u64 num_of_cmds;
+	struct request *req;
 
 	/*
 	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
@@ -668,21 +665,26 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
 		goto cmd_done;
 	}
 
-	mmc_get_card(card);
 
-	for (i = 0; i < num_of_cmds && !ioc_err; i++)
-		ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
-
-	/* Always switch back to main area after RPMB access */
-	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
-		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
-
-	mmc_put_card(card);
+	/*
+	 * Dispatch the ioctl()s into the block request queue.
+	 */
+	mq = &md->queue;
+	req = blk_get_request(mq->queue,
+		idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+		__GFP_RECLAIM);
+	req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+	req_to_mmc_queue_req(req)->idata = idata;
+	req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
+	blk_execute_rq(mq->queue, NULL, req, 0);
+	ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
 
 	/* copy to user if data and response */
 	for (i = 0; i < num_of_cmds && !err; i++)
 		err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
 
+	blk_put_request(req);
+
 cmd_done:
 	mmc_blk_put(md);
 cmd_err:
@@ -852,21 +854,6 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
 	return 0;
 }
 
-static int get_card_status(struct mmc_card *card, u32 *status, int retries)
-{
-	struct mmc_command cmd = {};
-	int err;
-
-	cmd.opcode = MMC_SEND_STATUS;
-	if (!mmc_host_is_spi(card->host))
-		cmd.arg = card->rca << 16;
-	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-	err = mmc_wait_for_cmd(card->host, &cmd, retries);
-	if (err == 0)
-		*status = cmd.resp[0];
-	return err;
-}
-
 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
 		bool hw_busy_detect, struct request *req, bool *gen_err)
 {
@@ -875,7 +862,7 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
 	u32 status;
 
 	do {
-		err = get_card_status(card, &status, 5);
+		err = __mmc_send_status(card, &status, 5);
 		if (err) {
 			pr_err("%s: error %d requesting status\n",
 			       req->rq_disk->disk_name, err);
@@ -1043,7 +1030,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
 	 * we can't be sure the returned status is for the r/w command.
 	 */
 	for (retry = 2; retry >= 0; retry--) {
-		err = get_card_status(card, &status, 0);
+		err = __mmc_send_status(card, &status, 0);
 		if (!err)
 			break;
 
@@ -1178,15 +1165,64 @@ int mmc_access_rpmb(struct mmc_queue *mq)
 	return false;
 }
 
+/*
+ * The non-block commands come back from the block layer after it queued it and
+ * processed it with all other requests and then they get issued in this
+ * function.
+ */
+static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+{
+	struct mmc_queue_req *mq_rq;
+	struct mmc_card *card = mq->card;
+	struct mmc_blk_data *md = mq->blkdata;
+	int ret;
+	int i;
+
+	mq_rq = req_to_mmc_queue_req(req);
+
+	switch (mq_rq->drv_op) {
+	case MMC_DRV_OP_IOCTL:
+		for (i = 0; i < mq_rq->ioc_count; i++) {
+			ret = __mmc_blk_ioctl_cmd(card, md, mq_rq->idata[i]);
+			if (ret)
+				break;
+		}
+		/* Always switch back to main area after RPMB access */
+		if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+			mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+		break;
+	case MMC_DRV_OP_BOOT_WP:
+		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+				 card->ext_csd.boot_ro_lock |
+				 EXT_CSD_BOOT_WP_B_PWR_WP_EN,
+				 card->ext_csd.part_time);
+		if (ret)
+			pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
+			       md->disk->disk_name, ret);
+		else
+			card->ext_csd.boot_ro_lock |=
+				EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+		break;
+	default:
+		pr_err("%s: unknown driver specific operation\n",
+		       md->disk->disk_name);
+		ret = -EINVAL;
+		break;
+	}
+	mq_rq->drv_op_result = ret;
+	blk_end_request_all(req, ret);
+}
+
 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_blk_data *md = mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 	unsigned int from, nr, arg;
 	int err = 0, type = MMC_BLK_DISCARD;
+	blk_status_t status = BLK_STS_OK;
 
 	if (!mmc_can_erase(card)) {
-		err = -EOPNOTSUPP;
+		status = BLK_STS_NOTSUPP;
 		goto fail;
 	}
 
@@ -1212,10 +1248,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 		if (!err)
 			err = mmc_erase(card, from, nr, arg);
 	} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
-	if (!err)
+	if (err)
+		status = BLK_STS_IOERR;
+	else
 		mmc_blk_reset_success(md, type);
 fail:
-	blk_end_request(req, err, blk_rq_bytes(req));
+	blk_end_request(req, status, blk_rq_bytes(req));
 }
 
 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
@@ -1225,9 +1263,10 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 	struct mmc_card *card = md->queue.card;
 	unsigned int from, nr, arg;
 	int err = 0, type = MMC_BLK_SECDISCARD;
+	blk_status_t status = BLK_STS_OK;
 
 	if (!(mmc_can_secure_erase_trim(card))) {
-		err = -EOPNOTSUPP;
+		status = BLK_STS_NOTSUPP;
 		goto out;
 	}
 
@@ -1254,8 +1293,10 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 	err = mmc_erase(card, from, nr, arg);
 	if (err == -EIO)
 		goto out_retry;
-	if (err)
+	if (err) {
+		status = BLK_STS_IOERR;
 		goto out;
+	}
 
 	if (arg == MMC_SECURE_TRIM1_ARG) {
 		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
@@ -1270,8 +1311,10 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
 		if (err == -EIO)
 			goto out_retry;
-		if (err)
+		if (err) {
+			status = BLK_STS_IOERR;
 			goto out;
+		}
 	}
 
 out_retry:
@@ -1280,7 +1323,7 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
 	if (!err)
 		mmc_blk_reset_success(md, type);
 out:
-	blk_end_request(req, err, blk_rq_bytes(req));
+	blk_end_request(req, status, blk_rq_bytes(req));
 }
 
 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
@@ -1290,10 +1333,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
 	int ret = 0;
 
 	ret = mmc_flush_cache(card);
-	if (ret)
-		ret = -EIO;
-
-	blk_end_request_all(req, ret);
+	blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
 }
 
 /*
@@ -1324,16 +1364,25 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
 	 R1_ADDRESS_ERROR |	/* Misaligned address */		\
 	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
 	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\
+	 R1_CARD_ECC_FAILED |	/* Card ECC failed */			\
 	 R1_CC_ERROR |		/* Card controller error */		\
 	 R1_ERROR)		/* General/unknown error */
 
+static bool mmc_blk_has_cmd_err(struct mmc_command *cmd)
+{
+	if (!cmd->error && cmd->resp[0] & CMD_ERRORS)
+		cmd->error = -EIO;
+
+	return cmd->error;
+}
+
 static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
 					     struct mmc_async_req *areq)
 {
 	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
 						    areq);
 	struct mmc_blk_request *brq = &mq_mrq->brq;
-	struct request *req = mq_mrq->req;
+	struct request *req = mmc_queue_req_to_req(mq_mrq);
 	int need_retune = card->host->need_retune;
 	bool ecc_err = false;
 	bool gen_err = false;
@@ -1348,7 +1397,7 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
 	 * stop.error indicates a problem with the stop command.  Data
 	 * may have been transferred, or may still be transferring.
 	 */
-	if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+	if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) ||
 	    brq->data.error) {
 		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
 		case ERR_RETRY:
@@ -1402,7 +1451,8 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
 		return MMC_BLK_RETRY;
 	}
 
-	if (brq->data.error) {
+	/* Some errors (ECC) are flagged on the next commmand, so check stop, too */
+	if (brq->data.error || brq->stop.error) {
 		if (need_retune && !brq->retune_retry_done) {
 			pr_debug("%s: retrying because a re-tune was needed\n",
 				 req->rq_disk->disk_name);
@@ -1410,7 +1460,7 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
 			return MMC_BLK_RETRY;
 		}
 		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
-		       req->rq_disk->disk_name, brq->data.error,
+		       req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
 		       (unsigned)blk_rq_pos(req),
 		       (unsigned)blk_rq_sectors(req),
 		       brq->cmd.resp[0], brq->stop.resp[0]);
@@ -1440,7 +1490,7 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
 	struct mmc_blk_data *md = mq->blkdata;
 	struct mmc_card *card = md->queue.card;
 	struct mmc_blk_request *brq = &mqrq->brq;
-	struct request *req = mqrq->req;
+	struct request *req = mmc_queue_req_to_req(mqrq);
 
 	/*
 	 * Reliable writes are used to implement Forced Unit Access and
@@ -1545,7 +1595,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
 {
 	u32 readcmd, writecmd;
 	struct mmc_blk_request *brq = &mqrq->brq;
-	struct request *req = mqrq->req;
+	struct request *req = mmc_queue_req_to_req(mqrq);
 	struct mmc_blk_data *md = mq->blkdata;
 	bool do_rel_wr, do_data_tag;
 
@@ -1641,8 +1691,8 @@ static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
 {
 	if (mmc_card_removed(card))
 		req->rq_flags |= RQF_QUIET;
-	while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
-	mmc_queue_req_free(mq, mqrq);
+	while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
+	mq->qcnt--;
 }
 
 /**
@@ -1661,8 +1711,8 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
 	 */
 	if (mmc_card_removed(mq->card)) {
 		req->rq_flags |= RQF_QUIET;
-		blk_end_request_all(req, -EIO);
-		mmc_queue_req_free(mq, mqrq);
+		blk_end_request_all(req, BLK_STS_IOERR);
+		mq->qcnt--; /* FIXME: just set to 0? */
 		return;
 	}
 	/* Else proceed and try to restart the current async request */
@@ -1685,12 +1735,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
 	bool req_pending = true;
 
 	if (new_req) {
-		mqrq_cur = mmc_queue_req_find(mq, new_req);
-		if (!mqrq_cur) {
-			WARN_ON(1);
-			mmc_blk_requeue(mq->queue, new_req);
-			new_req = NULL;
-		}
+		mqrq_cur = req_to_mmc_queue_req(new_req);
+		mq->qcnt++;
 	}
 
 	if (!mq->qcnt)
@@ -1731,7 +1777,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
 		 */
 		mq_rq =	container_of(old_areq, struct mmc_queue_req, areq);
 		brq = &mq_rq->brq;
-		old_req = mq_rq->req;
+		old_req = mmc_queue_req_to_req(mq_rq);
 		type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
 		mmc_queue_bounce_post(mq_rq);
 
@@ -1743,7 +1789,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
 			 */
 			mmc_blk_reset_success(md, type);
 
-			req_pending = blk_end_request(old_req, 0,
+			req_pending = blk_end_request(old_req, BLK_STS_OK,
 						      brq->data.bytes_xfered);
 			/*
 			 * If the blk_end_request function returns non-zero even
@@ -1764,12 +1810,12 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
 				if (req_pending)
 					mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
 				else
-					mmc_queue_req_free(mq, mq_rq);
+					mq->qcnt--;
 				mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
 				return;
 			}
 			if (!req_pending) {
-				mmc_queue_req_free(mq, mq_rq);
+				mq->qcnt--;
 				mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
 				return;
 			}
@@ -1811,10 +1857,10 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
 			 * time, so we only reach here after trying to
 			 * read a single sector.
 			 */
-			req_pending = blk_end_request(old_req, -EIO,
+			req_pending = blk_end_request(old_req, BLK_STS_IOERR,
 						      brq->data.blksz);
 			if (!req_pending) {
-				mmc_queue_req_free(mq, mq_rq);
+				mq->qcnt--;
 				mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
 				return;
 			}
@@ -1844,7 +1890,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
 		}
 	} while (req_pending);
 
-	mmc_queue_req_free(mq, mq_rq);
+	mq->qcnt--;
 }
 
 void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
@@ -1860,28 +1906,59 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
 	ret = mmc_blk_part_switch(card, md);
 	if (ret) {
 		if (req) {
-			blk_end_request_all(req, -EIO);
+			blk_end_request_all(req, BLK_STS_IOERR);
 		}
 		goto out;
 	}
 
-	if (req && req_op(req) == REQ_OP_DISCARD) {
-		/* complete ongoing async transfer before issuing discard */
-		if (mq->qcnt)
-			mmc_blk_issue_rw_rq(mq, NULL);
-		mmc_blk_issue_discard_rq(mq, req);
-	} else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
-		/* complete ongoing async transfer before issuing secure erase*/
-		if (mq->qcnt)
-			mmc_blk_issue_rw_rq(mq, NULL);
-		mmc_blk_issue_secdiscard_rq(mq, req);
-	} else if (req && req_op(req) == REQ_OP_FLUSH) {
-		/* complete ongoing async transfer before issuing flush */
-		if (mq->qcnt)
-			mmc_blk_issue_rw_rq(mq, NULL);
-		mmc_blk_issue_flush(mq, req);
+	if (req) {
+		switch (req_op(req)) {
+		case REQ_OP_DRV_IN:
+		case REQ_OP_DRV_OUT:
+			/*
+			 * Complete ongoing async transfer before issuing
+			 * ioctl()s
+			 */
+			if (mq->qcnt)
+				mmc_blk_issue_rw_rq(mq, NULL);
+			mmc_blk_issue_drv_op(mq, req);
+			break;
+		case REQ_OP_DISCARD:
+			/*
+			 * Complete ongoing async transfer before issuing
+			 * discard.
+			 */
+			if (mq->qcnt)
+				mmc_blk_issue_rw_rq(mq, NULL);
+			mmc_blk_issue_discard_rq(mq, req);
+			break;
+		case REQ_OP_SECURE_ERASE:
+			/*
+			 * Complete ongoing async transfer before issuing
+			 * secure erase.
+			 */
+			if (mq->qcnt)
+				mmc_blk_issue_rw_rq(mq, NULL);
+			mmc_blk_issue_secdiscard_rq(mq, req);
+			break;
+		case REQ_OP_FLUSH:
+			/*
+			 * Complete ongoing async transfer before issuing
+			 * flush.
+			 */
+			if (mq->qcnt)
+				mmc_blk_issue_rw_rq(mq, NULL);
+			mmc_blk_issue_flush(mq, req);
+			break;
+		default:
+			/* Normal request, just issue it */
+			mmc_blk_issue_rw_rq(mq, req);
+			card->host->context_info.is_waiting_last_req = false;
+			break;
+		}
 	} else {
-		mmc_blk_issue_rw_rq(mq, req);
+		/* No request, flushing the pipeline with NULL */
+		mmc_blk_issue_rw_rq(mq, NULL);
 		card->host->context_info.is_waiting_last_req = false;
 	}
 
@@ -2166,7 +2243,6 @@ static int mmc_blk_probe(struct mmc_card *card)
 {
 	struct mmc_blk_data *md, *part_md;
 	char cap_str[10];
-	int ret;
 
 	/*
 	 * Check that the card supports the command class(es) we need.
@@ -2176,15 +2252,9 @@ static int mmc_blk_probe(struct mmc_card *card)
 
 	mmc_fixup_device(card, mmc_blk_fixups);
 
-	ret = mmc_queue_alloc_shared_queue(card);
-	if (ret)
-		return ret;
-
 	md = mmc_blk_alloc(card);
-	if (IS_ERR(md)) {
-		mmc_queue_free_shared_queue(card);
+	if (IS_ERR(md))
 		return PTR_ERR(md);
-	}
 
 	string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
 			cap_str, sizeof(cap_str));
@@ -2222,7 +2292,6 @@ static int mmc_blk_probe(struct mmc_card *card)
  out:
 	mmc_blk_remove_parts(card, md);
 	mmc_blk_remove_req(md);
-	mmc_queue_free_shared_queue(card);
 	return 0;
 }
 
@@ -2240,7 +2309,6 @@ static void mmc_blk_remove(struct mmc_card *card)
 	pm_runtime_put_noidle(&card->dev);
 	mmc_blk_remove_req(md);
 	dev_set_drvdata(&card->dev, NULL);
-	mmc_queue_free_shared_queue(card);
 }
 
 static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 82c45dd..2643126 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -53,12 +53,6 @@
 /* If the device is not responding */
 #define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
 
-/*
- * Background operations can take a long time, depending on the housekeeping
- * operations the card has to perform.
- */
-#define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */
-
 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
 #define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
 
@@ -362,74 +356,6 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 	return 0;
 }
 
-/**
- *	mmc_start_bkops - start BKOPS for supported cards
- *	@card: MMC card to start BKOPS
- *	@form_exception: A flag to indicate if this function was
- *			 called due to an exception raised by the card
- *
- *	Start background operations whenever requested.
- *	When the urgent BKOPS bit is set in a R1 command response
- *	then background operations should be started immediately.
-*/
-void mmc_start_bkops(struct mmc_card *card, bool from_exception)
-{
-	int err;
-	int timeout;
-	bool use_busy_signal;
-
-	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
-		return;
-
-	err = mmc_read_bkops_status(card);
-	if (err) {
-		pr_err("%s: Failed to read bkops status: %d\n",
-		       mmc_hostname(card->host), err);
-		return;
-	}
-
-	if (!card->ext_csd.raw_bkops_status)
-		return;
-
-	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
-	    from_exception)
-		return;
-
-	mmc_claim_host(card->host);
-	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
-		timeout = MMC_BKOPS_MAX_TIMEOUT;
-		use_busy_signal = true;
-	} else {
-		timeout = 0;
-		use_busy_signal = false;
-	}
-
-	mmc_retune_hold(card->host);
-
-	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-			EXT_CSD_BKOPS_START, 1, timeout, 0,
-			use_busy_signal, true, false);
-	if (err) {
-		pr_warn("%s: Error %d starting bkops\n",
-			mmc_hostname(card->host), err);
-		mmc_retune_release(card->host);
-		goto out;
-	}
-
-	/*
-	 * For urgent bkops status (LEVEL_2 and more)
-	 * bkops executed synchronously, otherwise
-	 * the operation is in progress
-	 */
-	if (!use_busy_signal)
-		mmc_card_set_doing_bkops(card);
-	else
-		mmc_retune_release(card->host);
-out:
-	mmc_release_host(card->host);
-}
-EXPORT_SYMBOL(mmc_start_bkops);
-
 /*
  * mmc_wait_data_done() - done callback for data request
  * @mrq: done data request
@@ -749,71 +675,6 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 EXPORT_SYMBOL(mmc_wait_for_req);
 
 /**
- *	mmc_interrupt_hpi - Issue for High priority Interrupt
- *	@card: the MMC card associated with the HPI transfer
- *
- *	Issued High Priority Interrupt, and check for card status
- *	until out-of prg-state.
- */
-int mmc_interrupt_hpi(struct mmc_card *card)
-{
-	int err;
-	u32 status;
-	unsigned long prg_wait;
-
-	if (!card->ext_csd.hpi_en) {
-		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
-		return 1;
-	}
-
-	mmc_claim_host(card->host);
-	err = mmc_send_status(card, &status);
-	if (err) {
-		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
-		goto out;
-	}
-
-	switch (R1_CURRENT_STATE(status)) {
-	case R1_STATE_IDLE:
-	case R1_STATE_READY:
-	case R1_STATE_STBY:
-	case R1_STATE_TRAN:
-		/*
-		 * In idle and transfer states, HPI is not needed and the caller
-		 * can issue the next intended command immediately
-		 */
-		goto out;
-	case R1_STATE_PRG:
-		break;
-	default:
-		/* In all other states, it's illegal to issue HPI */
-		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
-			mmc_hostname(card->host), R1_CURRENT_STATE(status));
-		err = -EINVAL;
-		goto out;
-	}
-
-	err = mmc_send_hpi_cmd(card, &status);
-	if (err)
-		goto out;
-
-	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
-	do {
-		err = mmc_send_status(card, &status);
-
-		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
-			break;
-		if (time_after(jiffies, prg_wait))
-			err = -ETIMEDOUT;
-	} while (!err);
-
-out:
-	mmc_release_host(card->host);
-	return err;
-}
-EXPORT_SYMBOL(mmc_interrupt_hpi);
-
-/**
  *	mmc_wait_for_cmd - start a command and wait for completion
  *	@host: MMC host to start command
  *	@cmd: MMC command to start
@@ -843,53 +704,6 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
 EXPORT_SYMBOL(mmc_wait_for_cmd);
 
 /**
- *	mmc_stop_bkops - stop ongoing BKOPS
- *	@card: MMC card to check BKOPS
- *
- *	Send HPI command to stop ongoing background operations to
- *	allow rapid servicing of foreground operations, e.g. read/
- *	writes. Wait until the card comes out of the programming state
- *	to avoid errors in servicing read/write requests.
- */
-int mmc_stop_bkops(struct mmc_card *card)
-{
-	int err = 0;
-
-	err = mmc_interrupt_hpi(card);
-
-	/*
-	 * If err is EINVAL, we can't issue an HPI.
-	 * It should complete the BKOPS.
-	 */
-	if (!err || (err == -EINVAL)) {
-		mmc_card_clr_doing_bkops(card);
-		mmc_retune_release(card->host);
-		err = 0;
-	}
-
-	return err;
-}
-EXPORT_SYMBOL(mmc_stop_bkops);
-
-int mmc_read_bkops_status(struct mmc_card *card)
-{
-	int err;
-	u8 *ext_csd;
-
-	mmc_claim_host(card->host);
-	err = mmc_get_ext_csd(card, &ext_csd);
-	mmc_release_host(card->host);
-	if (err)
-		return err;
-
-	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
-	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
-	kfree(ext_csd);
-	return 0;
-}
-EXPORT_SYMBOL(mmc_read_bkops_status);
-
-/**
  *	mmc_set_data_timeout - set the timeout for a data command
  *	@data: data phase for command
  *	@card: the MMC card associated with the data transfer
@@ -2597,6 +2411,8 @@ EXPORT_SYMBOL(mmc_set_blockcount);
 
 static void mmc_hw_reset_for_init(struct mmc_host *host)
 {
+	mmc_pwrseq_reset(host);
+
 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
 		return;
 	host->ops->hw_reset(host);
@@ -2836,8 +2652,11 @@ void mmc_stop_host(struct mmc_host *host)
 	host->removed = 1;
 	spin_unlock_irqrestore(&host->lock, flags);
 #endif
-	if (host->slot.cd_irq >= 0)
+	if (host->slot.cd_irq >= 0) {
+		if (host->slot.cd_wake_enabled)
+			disable_irq_wake(host->slot.cd_irq);
 		disable_irq(host->slot.cd_irq);
+	}
 
 	host->rescan_disable = 1;
 	cancel_delayed_work_sync(&host->detect);
@@ -2913,27 +2732,6 @@ int mmc_power_restore_host(struct mmc_host *host)
 }
 EXPORT_SYMBOL(mmc_power_restore_host);
 
-/*
- * Flush the cache to the non-volatile storage.
- */
-int mmc_flush_cache(struct mmc_card *card)
-{
-	int err = 0;
-
-	if (mmc_card_mmc(card) &&
-			(card->ext_csd.cache_size > 0) &&
-			(card->ext_csd.cache_ctrl & 1)) {
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				EXT_CSD_FLUSH_CACHE, 1, 0);
-		if (err)
-			pr_err("%s: cache flush error %d\n",
-					mmc_hostname(card->host), err);
-	}
-
-	return err;
-}
-EXPORT_SYMBOL(mmc_flush_cache);
-
 #ifdef CONFIG_PM_SLEEP
 /* Do the card removal on suspend if card is assumed removeable
  * Do that in pm notifier while userspace isn't yet frozen, so we will be able
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 3f8c85d..1503412 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -30,6 +30,7 @@
 #include "host.h"
 #include "slot-gpio.h"
 #include "pwrseq.h"
+#include "sdio_ops.h"
 
 #define cls_dev_to_mmc_host(d)	container_of(d, struct mmc_host, class_dev)
 
@@ -176,19 +177,17 @@ static void mmc_retune_timer(unsigned long data)
  */
 int mmc_of_parse(struct mmc_host *host)
 {
-	struct device_node *np;
+	struct device *dev = host->parent;
 	u32 bus_width;
 	int ret;
 	bool cd_cap_invert, cd_gpio_invert = false;
 	bool ro_cap_invert, ro_gpio_invert = false;
 
-	if (!host->parent || !host->parent->of_node)
+	if (!dev || !dev_fwnode(dev))
 		return 0;
 
-	np = host->parent->of_node;
-
 	/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
-	if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
+	if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
 		dev_dbg(host->parent,
 			"\"bus-width\" property is missing, assuming 1 bit.\n");
 		bus_width = 1;
@@ -210,7 +209,7 @@ int mmc_of_parse(struct mmc_host *host)
 	}
 
 	/* f_max is obtained from the optional "max-frequency" property */
-	of_property_read_u32(np, "max-frequency", &host->f_max);
+	device_property_read_u32(dev, "max-frequency", &host->f_max);
 
 	/*
 	 * Configure CD and WP pins. They are both by default active low to
@@ -225,12 +224,12 @@ int mmc_of_parse(struct mmc_host *host)
 	 */
 
 	/* Parse Card Detection */
-	if (of_property_read_bool(np, "non-removable")) {
+	if (device_property_read_bool(dev, "non-removable")) {
 		host->caps |= MMC_CAP_NONREMOVABLE;
 	} else {
-		cd_cap_invert = of_property_read_bool(np, "cd-inverted");
+		cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
 
-		if (of_property_read_bool(np, "broken-cd"))
+		if (device_property_read_bool(dev, "broken-cd"))
 			host->caps |= MMC_CAP_NEEDS_POLL;
 
 		ret = mmc_gpiod_request_cd(host, "cd", 0, true,
@@ -256,7 +255,7 @@ int mmc_of_parse(struct mmc_host *host)
 	}
 
 	/* Parse Write Protection */
-	ro_cap_invert = of_property_read_bool(np, "wp-inverted");
+	ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
 
 	ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
 	if (!ret)
@@ -264,64 +263,64 @@ int mmc_of_parse(struct mmc_host *host)
 	else if (ret != -ENOENT && ret != -ENOSYS)
 		return ret;
 
-	if (of_property_read_bool(np, "disable-wp"))
+	if (device_property_read_bool(dev, "disable-wp"))
 		host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
 
 	/* See the comment on CD inversion above */
 	if (ro_cap_invert ^ ro_gpio_invert)
 		host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 
-	if (of_property_read_bool(np, "cap-sd-highspeed"))
+	if (device_property_read_bool(dev, "cap-sd-highspeed"))
 		host->caps |= MMC_CAP_SD_HIGHSPEED;
-	if (of_property_read_bool(np, "cap-mmc-highspeed"))
+	if (device_property_read_bool(dev, "cap-mmc-highspeed"))
 		host->caps |= MMC_CAP_MMC_HIGHSPEED;
-	if (of_property_read_bool(np, "sd-uhs-sdr12"))
+	if (device_property_read_bool(dev, "sd-uhs-sdr12"))
 		host->caps |= MMC_CAP_UHS_SDR12;
-	if (of_property_read_bool(np, "sd-uhs-sdr25"))
+	if (device_property_read_bool(dev, "sd-uhs-sdr25"))
 		host->caps |= MMC_CAP_UHS_SDR25;
-	if (of_property_read_bool(np, "sd-uhs-sdr50"))
+	if (device_property_read_bool(dev, "sd-uhs-sdr50"))
 		host->caps |= MMC_CAP_UHS_SDR50;
-	if (of_property_read_bool(np, "sd-uhs-sdr104"))
+	if (device_property_read_bool(dev, "sd-uhs-sdr104"))
 		host->caps |= MMC_CAP_UHS_SDR104;
-	if (of_property_read_bool(np, "sd-uhs-ddr50"))
+	if (device_property_read_bool(dev, "sd-uhs-ddr50"))
 		host->caps |= MMC_CAP_UHS_DDR50;
-	if (of_property_read_bool(np, "cap-power-off-card"))
+	if (device_property_read_bool(dev, "cap-power-off-card"))
 		host->caps |= MMC_CAP_POWER_OFF_CARD;
-	if (of_property_read_bool(np, "cap-mmc-hw-reset"))
+	if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
 		host->caps |= MMC_CAP_HW_RESET;
-	if (of_property_read_bool(np, "cap-sdio-irq"))
+	if (device_property_read_bool(dev, "cap-sdio-irq"))
 		host->caps |= MMC_CAP_SDIO_IRQ;
-	if (of_property_read_bool(np, "full-pwr-cycle"))
+	if (device_property_read_bool(dev, "full-pwr-cycle"))
 		host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
-	if (of_property_read_bool(np, "keep-power-in-suspend"))
+	if (device_property_read_bool(dev, "keep-power-in-suspend"))
 		host->pm_caps |= MMC_PM_KEEP_POWER;
-	if (of_property_read_bool(np, "wakeup-source") ||
-	    of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
+	if (device_property_read_bool(dev, "wakeup-source") ||
+	    device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
 		host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
-	if (of_property_read_bool(np, "mmc-ddr-3_3v"))
+	if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
 		host->caps |= MMC_CAP_3_3V_DDR;
-	if (of_property_read_bool(np, "mmc-ddr-1_8v"))
+	if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
 		host->caps |= MMC_CAP_1_8V_DDR;
-	if (of_property_read_bool(np, "mmc-ddr-1_2v"))
+	if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
 		host->caps |= MMC_CAP_1_2V_DDR;
-	if (of_property_read_bool(np, "mmc-hs200-1_8v"))
+	if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
 		host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
-	if (of_property_read_bool(np, "mmc-hs200-1_2v"))
+	if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
 		host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
-	if (of_property_read_bool(np, "mmc-hs400-1_8v"))
+	if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
 		host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
-	if (of_property_read_bool(np, "mmc-hs400-1_2v"))
+	if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
 		host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
-	if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
+	if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
 		host->caps2 |= MMC_CAP2_HS400_ES;
-	if (of_property_read_bool(np, "no-sdio"))
+	if (device_property_read_bool(dev, "no-sdio"))
 		host->caps2 |= MMC_CAP2_NO_SDIO;
-	if (of_property_read_bool(np, "no-sd"))
+	if (device_property_read_bool(dev, "no-sd"))
 		host->caps2 |= MMC_CAP2_NO_SD;
-	if (of_property_read_bool(np, "no-mmc"))
+	if (device_property_read_bool(dev, "no-mmc"))
 		host->caps2 |= MMC_CAP2_NO_MMC;
 
-	host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
+	host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
 	if (host->dsr_req && (host->dsr & ~0xffff)) {
 		dev_err(host->parent,
 			"device tree specified broken value for DSR: 0x%x, ignoring\n",
@@ -379,6 +378,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
 	spin_lock_init(&host->lock);
 	init_waitqueue_head(&host->wq);
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
+	INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
 	setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
 
 	/*
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2c87ded..4ffea14 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -27,6 +27,7 @@
 #include "mmc_ops.h"
 #include "quirks.h"
 #include "sd_ops.h"
+#include "pwrseq.h"
 
 #define DEFAULT_CMD6_TIMEOUT_MS	500
 
@@ -1555,10 +1556,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 	/*
 	 * Fetch CID from card.
 	 */
-	if (mmc_host_is_spi(host))
-		err = mmc_send_cid(host, cid);
-	else
-		err = mmc_all_send_cid(host, cid);
+	err = mmc_send_cid(host, cid);
 	if (err)
 		goto err;
 
@@ -1653,12 +1651,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
 		mmc_set_erase_size(card);
 	}
 
-	/*
-	 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
-	 * bit.  This bit will be lost every time after a reset or power off.
-	 */
-	if (card->ext_csd.partition_setting_completed ||
-	    (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
+	/* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
+	if (card->ext_csd.rev >= 3) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_ERASE_GROUP_DEF, 1,
 				 card->ext_csd.generic_cmd6_time);
@@ -2096,7 +2090,7 @@ static int mmc_runtime_resume(struct mmc_host *host)
 	return 0;
 }
 
-int mmc_can_reset(struct mmc_card *card)
+static int mmc_can_reset(struct mmc_card *card)
 {
 	u8 rst_n_function;
 
@@ -2105,7 +2099,6 @@ int mmc_can_reset(struct mmc_card *card)
 		return 0;
 	return 1;
 }
-EXPORT_SYMBOL(mmc_can_reset);
 
 static int mmc_reset(struct mmc_host *host)
 {
@@ -2127,6 +2120,7 @@ static int mmc_reset(struct mmc_host *host)
 	} else {
 		/* Do a brute force power cycle */
 		mmc_power_cycle(host, card->ocr);
+		mmc_pwrseq_reset(host);
 	}
 	return mmc_init_card(host, card->ocr, card);
 }
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 78f75f0..5f7c592 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -19,6 +19,7 @@
 #include <linux/mmc/mmc.h>
 
 #include "core.h"
+#include "card.h"
 #include "host.h"
 #include "mmc_ops.h"
 
@@ -54,7 +55,7 @@ static const u8 tuning_blk_pattern_8bit[] = {
 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
 };
 
-int mmc_send_status(struct mmc_card *card, u32 *status)
+int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
 {
 	int err;
 	struct mmc_command cmd = {};
@@ -64,7 +65,7 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
 		cmd.arg = card->rca << 16;
 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 
-	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
+	err = mmc_wait_for_cmd(card->host, &cmd, retries);
 	if (err)
 		return err;
 
@@ -76,6 +77,12 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__mmc_send_status);
+
+int mmc_send_status(struct mmc_card *card, u32 *status)
+{
+	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
+}
 
 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 {
@@ -200,24 +207,6 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 	return err;
 }
 
-int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
-{
-	int err;
-	struct mmc_command cmd = {};
-
-	cmd.opcode = MMC_ALL_SEND_CID;
-	cmd.arg = 0;
-	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
-
-	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
-	if (err)
-		return err;
-
-	memcpy(cid, cmd.resp, sizeof(u32) * 4);
-
-	return 0;
-}
-
 int mmc_set_relative_addr(struct mmc_card *card)
 {
 	struct mmc_command cmd = {};
@@ -302,15 +291,11 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
 	return 0;
 }
 
-int mmc_send_csd(struct mmc_card *card, u32 *csd)
+static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
 {
 	int ret, i;
 	__be32 *csd_tmp;
 
-	if (!mmc_host_is_spi(card->host))
-		return mmc_send_cxd_native(card->host, card->rca << 16,
-				csd, MMC_SEND_CSD);
-
 	csd_tmp = kzalloc(16, GFP_KERNEL);
 	if (!csd_tmp)
 		return -ENOMEM;
@@ -327,18 +312,20 @@ int mmc_send_csd(struct mmc_card *card, u32 *csd)
 	return ret;
 }
 
-int mmc_send_cid(struct mmc_host *host, u32 *cid)
+int mmc_send_csd(struct mmc_card *card, u32 *csd)
+{
+	if (mmc_host_is_spi(card->host))
+		return mmc_spi_send_csd(card, csd);
+
+	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
+				MMC_SEND_CSD);
+}
+
+static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
 {
 	int ret, i;
 	__be32 *cid_tmp;
 
-	if (!mmc_host_is_spi(host)) {
-		if (!host->card)
-			return -EINVAL;
-		return mmc_send_cxd_native(host, host->card->rca << 16,
-				cid, MMC_SEND_CID);
-	}
-
 	cid_tmp = kzalloc(16, GFP_KERNEL);
 	if (!cid_tmp)
 		return -ENOMEM;
@@ -355,6 +342,14 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid)
 	return ret;
 }
 
+int mmc_send_cid(struct mmc_host *host, u32 *cid)
+{
+	if (mmc_host_is_spi(host))
+		return mmc_spi_send_cid(host, cid);
+
+	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
+}
+
 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 {
 	int err;
@@ -800,7 +795,7 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 }
 
-int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
+static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
 {
 	struct mmc_command cmd = {};
 	unsigned int opcode;
@@ -834,11 +829,208 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
 	return 0;
 }
 
+/**
+ *	mmc_interrupt_hpi - Issue for High priority Interrupt
+ *	@card: the MMC card associated with the HPI transfer
+ *
+ *	Issued High Priority Interrupt, and check for card status
+ *	until out-of prg-state.
+ */
+int mmc_interrupt_hpi(struct mmc_card *card)
+{
+	int err;
+	u32 status;
+	unsigned long prg_wait;
+
+	if (!card->ext_csd.hpi_en) {
+		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
+		return 1;
+	}
+
+	mmc_claim_host(card->host);
+	err = mmc_send_status(card, &status);
+	if (err) {
+		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+		goto out;
+	}
+
+	switch (R1_CURRENT_STATE(status)) {
+	case R1_STATE_IDLE:
+	case R1_STATE_READY:
+	case R1_STATE_STBY:
+	case R1_STATE_TRAN:
+		/*
+		 * In idle and transfer states, HPI is not needed and the caller
+		 * can issue the next intended command immediately
+		 */
+		goto out;
+	case R1_STATE_PRG:
+		break;
+	default:
+		/* In all other states, it's illegal to issue HPI */
+		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
+			mmc_hostname(card->host), R1_CURRENT_STATE(status));
+		err = -EINVAL;
+		goto out;
+	}
+
+	err = mmc_send_hpi_cmd(card, &status);
+	if (err)
+		goto out;
+
+	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
+	do {
+		err = mmc_send_status(card, &status);
+
+		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
+			break;
+		if (time_after(jiffies, prg_wait))
+			err = -ETIMEDOUT;
+	} while (!err);
+
+out:
+	mmc_release_host(card->host);
+	return err;
+}
+
 int mmc_can_ext_csd(struct mmc_card *card)
 {
 	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 }
 
+/**
+ *	mmc_stop_bkops - stop ongoing BKOPS
+ *	@card: MMC card to check BKOPS
+ *
+ *	Send HPI command to stop ongoing background operations to
+ *	allow rapid servicing of foreground operations, e.g. read/
+ *	writes. Wait until the card comes out of the programming state
+ *	to avoid errors in servicing read/write requests.
+ */
+int mmc_stop_bkops(struct mmc_card *card)
+{
+	int err = 0;
+
+	err = mmc_interrupt_hpi(card);
+
+	/*
+	 * If err is EINVAL, we can't issue an HPI.
+	 * It should complete the BKOPS.
+	 */
+	if (!err || (err == -EINVAL)) {
+		mmc_card_clr_doing_bkops(card);
+		mmc_retune_release(card->host);
+		err = 0;
+	}
+
+	return err;
+}
+
+static int mmc_read_bkops_status(struct mmc_card *card)
+{
+	int err;
+	u8 *ext_csd;
+
+	mmc_claim_host(card->host);
+	err = mmc_get_ext_csd(card, &ext_csd);
+	mmc_release_host(card->host);
+	if (err)
+		return err;
+
+	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
+	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+	kfree(ext_csd);
+	return 0;
+}
+
+/**
+ *	mmc_start_bkops - start BKOPS for supported cards
+ *	@card: MMC card to start BKOPS
+ *	@form_exception: A flag to indicate if this function was
+ *			 called due to an exception raised by the card
+ *
+ *	Start background operations whenever requested.
+ *	When the urgent BKOPS bit is set in a R1 command response
+ *	then background operations should be started immediately.
+*/
+void mmc_start_bkops(struct mmc_card *card, bool from_exception)
+{
+	int err;
+	int timeout;
+	bool use_busy_signal;
+
+	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
+		return;
+
+	err = mmc_read_bkops_status(card);
+	if (err) {
+		pr_err("%s: Failed to read bkops status: %d\n",
+		       mmc_hostname(card->host), err);
+		return;
+	}
+
+	if (!card->ext_csd.raw_bkops_status)
+		return;
+
+	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
+	    from_exception)
+		return;
+
+	mmc_claim_host(card->host);
+	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
+		timeout = MMC_OPS_TIMEOUT_MS;
+		use_busy_signal = true;
+	} else {
+		timeout = 0;
+		use_busy_signal = false;
+	}
+
+	mmc_retune_hold(card->host);
+
+	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			EXT_CSD_BKOPS_START, 1, timeout, 0,
+			use_busy_signal, true, false);
+	if (err) {
+		pr_warn("%s: Error %d starting bkops\n",
+			mmc_hostname(card->host), err);
+		mmc_retune_release(card->host);
+		goto out;
+	}
+
+	/*
+	 * For urgent bkops status (LEVEL_2 and more)
+	 * bkops executed synchronously, otherwise
+	 * the operation is in progress
+	 */
+	if (!use_busy_signal)
+		mmc_card_set_doing_bkops(card);
+	else
+		mmc_retune_release(card->host);
+out:
+	mmc_release_host(card->host);
+}
+
+/*
+ * Flush the cache to the non-volatile storage.
+ */
+int mmc_flush_cache(struct mmc_card *card)
+{
+	int err = 0;
+
+	if (mmc_card_mmc(card) &&
+			(card->ext_csd.cache_size > 0) &&
+			(card->ext_csd.cache_ctrl & 1)) {
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				EXT_CSD_FLUSH_CACHE, 1, 0);
+		if (err)
+			pr_err("%s: cache flush error %d\n",
+					mmc_hostname(card->host), err);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(mmc_flush_cache);
+
 static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
 {
 	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 978bd2e..a1390d4 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -22,15 +22,14 @@ int mmc_deselect_cards(struct mmc_host *host);
 int mmc_set_dsr(struct mmc_host *host);
 int mmc_go_idle(struct mmc_host *host);
 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
-int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
 int mmc_set_relative_addr(struct mmc_card *card);
 int mmc_send_csd(struct mmc_card *card, u32 *csd);
+int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries);
 int mmc_send_status(struct mmc_card *card, u32 *status);
 int mmc_send_cid(struct mmc_host *host, u32 *cid);
 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
 int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
 int mmc_bus_test(struct mmc_card *card, u8 bus_width);
-int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
 int mmc_interrupt_hpi(struct mmc_card *card);
 int mmc_can_ext_csd(struct mmc_card *card);
 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
@@ -42,9 +41,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 		unsigned int timeout_ms);
 int mmc_stop_bkops(struct mmc_card *card);
-int mmc_read_bkops_status(struct mmc_card *card);
 void mmc_start_bkops(struct mmc_card *card, bool from_exception);
-int mmc_can_reset(struct mmc_card *card);
 int mmc_flush_cache(struct mmc_card *card);
 int mmc_cmdq_enable(struct mmc_card *card);
 int mmc_cmdq_disable(struct mmc_card *card);
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index fd1b4b8..7a304a6 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -3220,8 +3220,6 @@ static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
 	df = kmalloc(sizeof(*df), GFP_KERNEL);
 	if (!df) {
 		debugfs_remove(file);
-		dev_err(&card->dev,
-			"Can't allocate memory for internal usage.\n");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index 9386c47..e3ad30f 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -76,6 +76,14 @@ void mmc_pwrseq_power_off(struct mmc_host *host)
 		pwrseq->ops->power_off(host);
 }
 
+void mmc_pwrseq_reset(struct mmc_host *host)
+{
+	struct mmc_pwrseq *pwrseq = host->pwrseq;
+
+	if (pwrseq && pwrseq->ops->reset)
+		pwrseq->ops->reset(host);
+}
+
 void mmc_pwrseq_free(struct mmc_host *host)
 {
 	struct mmc_pwrseq *pwrseq = host->pwrseq;
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index 39c911a..819386f 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -18,6 +18,7 @@ struct mmc_pwrseq_ops {
 	void (*pre_power_on)(struct mmc_host *host);
 	void (*post_power_on)(struct mmc_host *host);
 	void (*power_off)(struct mmc_host *host);
+	void (*reset)(struct mmc_host *host);
 };
 
 struct mmc_pwrseq {
@@ -36,6 +37,7 @@ int mmc_pwrseq_alloc(struct mmc_host *host);
 void mmc_pwrseq_pre_power_on(struct mmc_host *host);
 void mmc_pwrseq_post_power_on(struct mmc_host *host);
 void mmc_pwrseq_power_off(struct mmc_host *host);
+void mmc_pwrseq_reset(struct mmc_host *host);
 void mmc_pwrseq_free(struct mmc_host *host);
 
 #else
@@ -49,6 +51,7 @@ static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
 static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
 static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
 static inline void mmc_pwrseq_power_off(struct mmc_host *host) {}
+static inline void mmc_pwrseq_reset(struct mmc_host *host) {}
 static inline void mmc_pwrseq_free(struct mmc_host *host) {}
 
 #endif
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index adc9c0c..efb8a79 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -56,7 +56,7 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
 }
 
 static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
-	.post_power_on = mmc_pwrseq_emmc_reset,
+	.reset = mmc_pwrseq_emmc_reset,
 };
 
 static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 5c37b6b..affa737 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -40,35 +40,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
 	return BLKPREP_OK;
 }
 
-struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
-					 struct request *req)
-{
-	struct mmc_queue_req *mqrq;
-	int i = ffz(mq->qslots);
-
-	if (i >= mq->qdepth)
-		return NULL;
-
-	mqrq = &mq->mqrq[i];
-	WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
-		test_bit(mqrq->task_id, &mq->qslots));
-	mqrq->req = req;
-	mq->qcnt += 1;
-	__set_bit(mqrq->task_id, &mq->qslots);
-
-	return mqrq;
-}
-
-void mmc_queue_req_free(struct mmc_queue *mq,
-			struct mmc_queue_req *mqrq)
-{
-	WARN_ON(!mqrq->req || mq->qcnt < 1 ||
-		!test_bit(mqrq->task_id, &mq->qslots));
-	mqrq->req = NULL;
-	mq->qcnt -= 1;
-	__clear_bit(mqrq->task_id, &mq->qslots);
-}
-
 static int mmc_queue_thread(void *d)
 {
 	struct mmc_queue *mq = d;
@@ -133,7 +104,7 @@ static void mmc_request_fn(struct request_queue *q)
 	if (!mq) {
 		while ((req = blk_fetch_request(q)) != NULL) {
 			req->rq_flags |= RQF_QUIET;
-			__blk_end_request_all(req, -EIO);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 		}
 		return;
 	}
@@ -149,11 +120,11 @@ static void mmc_request_fn(struct request_queue *q)
 		wake_up_process(mq->thread);
 }
 
-static struct scatterlist *mmc_alloc_sg(int sg_len)
+static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
 {
 	struct scatterlist *sg;
 
-	sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
+	sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
 	if (sg)
 		sg_init_table(sg, sg_len);
 
@@ -179,86 +150,11 @@ static void mmc_queue_setup_discard(struct request_queue *q,
 		queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
 }
 
-static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
-{
-	kfree(mqrq->bounce_sg);
-	mqrq->bounce_sg = NULL;
-
-	kfree(mqrq->sg);
-	mqrq->sg = NULL;
-
-	kfree(mqrq->bounce_buf);
-	mqrq->bounce_buf = NULL;
-}
-
-static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
-{
-	int i;
-
-	for (i = 0; i < qdepth; i++)
-		mmc_queue_req_free_bufs(&mqrq[i]);
-}
-
-static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
-{
-	mmc_queue_reqs_free_bufs(mqrq, qdepth);
-	kfree(mqrq);
-}
-
-static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
-{
-	struct mmc_queue_req *mqrq;
-	int i;
-
-	mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
-	if (mqrq) {
-		for (i = 0; i < qdepth; i++)
-			mqrq[i].task_id = i;
-	}
-
-	return mqrq;
-}
-
-#ifdef CONFIG_MMC_BLOCK_BOUNCE
-static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
-				       unsigned int bouncesz)
-{
-	int i;
-
-	for (i = 0; i < qdepth; i++) {
-		mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
-		if (!mqrq[i].bounce_buf)
-			return -ENOMEM;
-
-		mqrq[i].sg = mmc_alloc_sg(1);
-		if (!mqrq[i].sg)
-			return -ENOMEM;
-
-		mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
-		if (!mqrq[i].bounce_sg)
-			return -ENOMEM;
-	}
-
-	return 0;
-}
-
-static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
-				   unsigned int bouncesz)
-{
-	int ret;
-
-	ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
-	if (ret)
-		mmc_queue_reqs_free_bufs(mqrq, qdepth);
-
-	return !ret;
-}
-
 static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
 {
 	unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
 
-	if (host->max_segs != 1)
+	if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
 		return 0;
 
 	if (bouncesz > host->max_req_size)
@@ -273,84 +169,58 @@ static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
 
 	return bouncesz;
 }
-#else
-static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq,
-					  int qdepth, unsigned int bouncesz)
-{
-	return false;
-}
 
-static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
+/**
+ * mmc_init_request() - initialize the MMC-specific per-request data
+ * @q: the request queue
+ * @req: the request
+ * @gfp: memory allocation policy
+ */
+static int mmc_init_request(struct request_queue *q, struct request *req,
+			    gfp_t gfp)
 {
-	return 0;
-}
-#endif
+	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
+	struct mmc_queue *mq = q->queuedata;
+	struct mmc_card *card = mq->card;
+	struct mmc_host *host = card->host;
 
-static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
-			       int max_segs)
-{
-	int i;
-
-	for (i = 0; i < qdepth; i++) {
-		mqrq[i].sg = mmc_alloc_sg(max_segs);
-		if (!mqrq[i].sg)
+	if (card->bouncesz) {
+		mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
+		if (!mq_rq->bounce_buf)
+			return -ENOMEM;
+		if (card->bouncesz > 512) {
+			mq_rq->sg = mmc_alloc_sg(1, gfp);
+			if (!mq_rq->sg)
+				return -ENOMEM;
+			mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
+							gfp);
+			if (!mq_rq->bounce_sg)
+				return -ENOMEM;
+		}
+	} else {
+		mq_rq->bounce_buf = NULL;
+		mq_rq->bounce_sg = NULL;
+		mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
+		if (!mq_rq->sg)
 			return -ENOMEM;
 	}
 
 	return 0;
 }
 
-void mmc_queue_free_shared_queue(struct mmc_card *card)
+static void mmc_exit_request(struct request_queue *q, struct request *req)
 {
-	if (card->mqrq) {
-		mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
-		card->mqrq = NULL;
-	}
-}
+	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
 
-static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
-{
-	struct mmc_host *host = card->host;
-	struct mmc_queue_req *mqrq;
-	unsigned int bouncesz;
-	int ret = 0;
+	/* It is OK to kfree(NULL) so this will be smooth */
+	kfree(mq_rq->bounce_sg);
+	mq_rq->bounce_sg = NULL;
 
-	if (card->mqrq)
-		return -EINVAL;
+	kfree(mq_rq->bounce_buf);
+	mq_rq->bounce_buf = NULL;
 
-	mqrq = mmc_queue_alloc_mqrqs(qdepth);
-	if (!mqrq)
-		return -ENOMEM;
-
-	card->mqrq = mqrq;
-	card->qdepth = qdepth;
-
-	bouncesz = mmc_queue_calc_bouncesz(host);
-
-	if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
-		bouncesz = 0;
-		pr_warn("%s: unable to allocate bounce buffers\n",
-			mmc_card_name(card));
-	}
-
-	card->bouncesz = bouncesz;
-
-	if (!bouncesz) {
-		ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
-		if (ret)
-			goto out_err;
-	}
-
-	return ret;
-
-out_err:
-	mmc_queue_free_shared_queue(card);
-	return ret;
-}
-
-int mmc_queue_alloc_shared_queue(struct mmc_card *card)
-{
-	return __mmc_queue_alloc_shared_queue(card, 2);
+	kfree(mq_rq->sg);
+	mq_rq->sg = NULL;
 }
 
 /**
@@ -373,13 +243,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 
 	mq->card = card;
-	mq->queue = blk_init_queue(mmc_request_fn, lock);
+	mq->queue = blk_alloc_queue(GFP_KERNEL);
 	if (!mq->queue)
 		return -ENOMEM;
-
-	mq->mqrq = card->mqrq;
-	mq->qdepth = card->qdepth;
+	mq->queue->queue_lock = lock;
+	mq->queue->request_fn = mmc_request_fn;
+	mq->queue->init_rq_fn = mmc_init_request;
+	mq->queue->exit_rq_fn = mmc_exit_request;
+	mq->queue->cmd_size = sizeof(struct mmc_queue_req);
 	mq->queue->queuedata = mq;
+	mq->qcnt = 0;
+	ret = blk_init_allocated_queue(mq->queue);
+	if (ret) {
+		blk_cleanup_queue(mq->queue);
+		return ret;
+	}
 
 	blk_queue_prep_rq(mq->queue, mmc_prep_request);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
@@ -387,8 +265,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 	if (mmc_can_erase(card))
 		mmc_queue_setup_discard(mq->queue, card);
 
+	card->bouncesz = mmc_queue_calc_bouncesz(host);
 	if (card->bouncesz) {
-		blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
 		blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
 		blk_queue_max_segments(mq->queue, card->bouncesz / 512);
 		blk_queue_max_segment_size(mq->queue, card->bouncesz);
@@ -413,7 +291,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 	return 0;
 
 cleanup_queue:
-	mq->mqrq = NULL;
 	blk_cleanup_queue(mq->queue);
 	return ret;
 }
@@ -435,7 +312,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
 	blk_start_queue(q);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 
-	mq->mqrq = NULL;
 	mq->card = NULL;
 }
 EXPORT_SYMBOL(mmc_cleanup_queue);
@@ -492,12 +368,13 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
 	unsigned int sg_len;
 	size_t buflen;
 	struct scatterlist *sg;
+	struct request *req = mmc_queue_req_to_req(mqrq);
 	int i;
 
 	if (!mqrq->bounce_buf)
-		return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+		return blk_rq_map_sg(mq->queue, req, mqrq->sg);
 
-	sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+	sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
 
 	mqrq->bounce_sg_len = sg_len;
 
@@ -519,7 +396,7 @@ void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
 	if (!mqrq->bounce_buf)
 		return;
 
-	if (rq_data_dir(mqrq->req) != WRITE)
+	if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
 		return;
 
 	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
@@ -535,7 +412,7 @@ void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
 	if (!mqrq->bounce_buf)
 		return;
 
-	if (rq_data_dir(mqrq->req) != READ)
+	if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
 		return;
 
 	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 871796c..361b464 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -3,19 +3,25 @@
 
 #include <linux/types.h>
 #include <linux/blkdev.h>
+#include <linux/blk-mq.h>
 #include <linux/mmc/core.h>
 #include <linux/mmc/host.h>
 
-static inline bool mmc_req_is_special(struct request *req)
+static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
 {
-	return req &&
-		(req_op(req) == REQ_OP_FLUSH ||
-		 req_op(req) == REQ_OP_DISCARD ||
-		 req_op(req) == REQ_OP_SECURE_ERASE);
+	return blk_mq_rq_to_pdu(rq);
+}
+
+struct mmc_queue_req;
+
+static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
+{
+	return blk_mq_rq_from_pdu(mqr);
 }
 
 struct task_struct;
 struct mmc_blk_data;
+struct mmc_blk_ioc_data;
 
 struct mmc_blk_request {
 	struct mmc_request	mrq;
@@ -26,15 +32,27 @@ struct mmc_blk_request {
 	int			retune_retry_done;
 };
 
+/**
+ * enum mmc_drv_op - enumerates the operations in the mmc_queue_req
+ * @MMC_DRV_OP_IOCTL: ioctl operation
+ * @MMC_DRV_OP_BOOT_WP: write protect boot partitions
+ */
+enum mmc_drv_op {
+	MMC_DRV_OP_IOCTL,
+	MMC_DRV_OP_BOOT_WP,
+};
+
 struct mmc_queue_req {
-	struct request		*req;
 	struct mmc_blk_request	brq;
 	struct scatterlist	*sg;
 	char			*bounce_buf;
 	struct scatterlist	*bounce_sg;
 	unsigned int		bounce_sg_len;
 	struct mmc_async_req	areq;
-	int			task_id;
+	enum mmc_drv_op		drv_op;
+	int			drv_op_result;
+	struct mmc_blk_ioc_data	**idata;
+	unsigned int		ioc_count;
 };
 
 struct mmc_queue {
@@ -45,14 +63,15 @@ struct mmc_queue {
 	bool			asleep;
 	struct mmc_blk_data	*blkdata;
 	struct request_queue	*queue;
-	struct mmc_queue_req	*mqrq;
-	int			qdepth;
+	/*
+	 * FIXME: this counter is not a very reliable way of keeping
+	 * track of how many requests that are ongoing. Switch to just
+	 * letting the block core keep track of requests and per-request
+	 * associated mmc_queue_req data.
+	 */
 	int			qcnt;
-	unsigned long		qslots;
 };
 
-extern int mmc_queue_alloc_shared_queue(struct mmc_card *card);
-extern void mmc_queue_free_shared_queue(struct mmc_card *card);
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
 			  const char *);
 extern void mmc_cleanup_queue(struct mmc_queue *);
@@ -66,8 +85,4 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
 
 extern int mmc_access_rpmb(struct mmc_queue *);
 
-extern struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *,
-						struct request *);
-extern void mmc_queue_req_free(struct mmc_queue *, struct mmc_queue_req *);
-
 #endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d109634..a1b0aa1 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -294,12 +294,8 @@ static int mmc_read_switch(struct mmc_card *card)
 	err = -EIO;
 
 	status = kmalloc(64, GFP_KERNEL);
-	if (!status) {
-		pr_err("%s: could not allocate a buffer for "
-			"switch capabilities.\n",
-			mmc_hostname(card->host));
+	if (!status)
 		return -ENOMEM;
-	}
 
 	/*
 	 * Find out the card's support bits with a mode 0 operation.
@@ -359,11 +355,8 @@ int mmc_sd_switch_hs(struct mmc_card *card)
 		return 0;
 
 	status = kmalloc(64, GFP_KERNEL);
-	if (!status) {
-		pr_err("%s: could not allocate a buffer for "
-			"switch capabilities.\n", mmc_hostname(card->host));
+	if (!status)
 		return -ENOMEM;
-	}
 
 	err = mmc_sd_switch(card, 1, 0, 1, status);
 	if (err)
@@ -596,11 +589,8 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
 		return 0;
 
 	status = kmalloc(64, GFP_KERNEL);
-	if (!status) {
-		pr_err("%s: could not allocate a buffer for "
-			"switch capabilities.\n", mmc_hostname(card->host));
+	if (!status)
 		return -ENOMEM;
-	}
 
 	/* Set 4-bit bus width */
 	if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
@@ -798,11 +788,7 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
 		}
 	}
 
-	if (mmc_host_is_spi(host))
-		err = mmc_send_cid(host, cid);
-	else
-		err = mmc_all_send_cid(host, cid);
-
+	err = mmc_send_cid(host, cid);
 	return err;
 }
 
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index fae732c..cc43687 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1104,6 +1104,12 @@ int mmc_attach_sdio(struct mmc_host *host)
 	 */
 	if (host->caps & MMC_CAP_POWER_OFF_CARD) {
 		/*
+		 * Do not allow runtime suspend until after SDIO function
+		 * devices are added.
+		 */
+		pm_runtime_get_noresume(&card->dev);
+
+		/*
 		 * Let runtime PM core know our card is active
 		 */
 		err = pm_runtime_set_active(&card->dev);
@@ -1155,19 +1161,23 @@ int mmc_attach_sdio(struct mmc_host *host)
 			goto remove_added;
 	}
 
+	if (host->caps & MMC_CAP_POWER_OFF_CARD)
+		pm_runtime_put(&card->dev);
+
 	mmc_claim_host(host);
 	return 0;
 
 
-remove_added:
-	/* Remove without lock if the device has been added. */
-	mmc_sdio_remove(host);
-	mmc_claim_host(host);
 remove:
-	/* And with lock if it hasn't been added. */
 	mmc_release_host(host);
-	if (host->card)
-		mmc_sdio_remove(host);
+remove_added:
+	/*
+	 * The devices are being deleted so it is not necessary to disable
+	 * runtime PM. Similarly we also don't pm_runtime_put() the SDIO card
+	 * because it needs to be active to remove any function devices that
+	 * were probed, and after that it gets deleted.
+	 */
+	mmc_sdio_remove(host);
 	mmc_claim_host(host);
 err:
 	mmc_detach_bus(host);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 6d4b720..c771843 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -95,12 +95,30 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
 void sdio_run_irqs(struct mmc_host *host)
 {
 	mmc_claim_host(host);
-	host->sdio_irq_pending = true;
-	process_sdio_pending_irqs(host);
+	if (host->sdio_irqs) {
+		host->sdio_irq_pending = true;
+		process_sdio_pending_irqs(host);
+		if (host->ops->ack_sdio_irq)
+			host->ops->ack_sdio_irq(host);
+	}
 	mmc_release_host(host);
 }
 EXPORT_SYMBOL_GPL(sdio_run_irqs);
 
+void sdio_irq_work(struct work_struct *work)
+{
+	struct mmc_host *host =
+		container_of(work, struct mmc_host, sdio_irq_work.work);
+
+	sdio_run_irqs(host);
+}
+
+void sdio_signal_irq(struct mmc_host *host)
+{
+	queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+}
+EXPORT_SYMBOL_GPL(sdio_signal_irq);
+
 static int sdio_irq_thread(void *_host)
 {
 	struct mmc_host *host = _host;
diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h
index ee35cb4..96945ca 100644
--- a/drivers/mmc/core/sdio_ops.h
+++ b/drivers/mmc/core/sdio_ops.h
@@ -17,6 +17,7 @@
 
 struct mmc_host;
 struct mmc_card;
+struct work_struct;
 
 int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
 int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
@@ -25,6 +26,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
 	unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
 int sdio_reset(struct mmc_host *host);
 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz);
+void sdio_irq_work(struct work_struct *work);
 
 static inline bool sdio_is_io_busy(u32 opcode, u32 arg)
 {
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index a8450a8..863f1db 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -151,6 +151,8 @@ void mmc_gpiod_request_cd_irq(struct mmc_host *host)
 
 	if (irq < 0)
 		host->caps |= MMC_CAP_NEEDS_POLL;
+	else if ((host->caps & MMC_CAP_CD_WAKE) && !enable_irq_wake(irq))
+		host->slot.cd_wake_enabled = true;
 }
 EXPORT_SYMBOL(mmc_gpiod_request_cd_irq);
 
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2db84dd..5755b69 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -408,11 +408,11 @@
 
 config MMC_ATMELMCI
 	tristate "Atmel SD/MMC Driver (Multimedia Card Interface)"
-	depends on AVR32 || ARCH_AT91
+	depends on ARCH_AT91
 	help
-	  This selects the Atmel Multimedia Card Interface driver. If
-	  you have an AT32 (AVR32) or AT91 platform with a Multimedia
-	  Card slot, say Y or M here.
+	  This selects the Atmel Multimedia Card Interface driver.
+	  If you have an AT91 platform with a Multimedia Card slot,
+	  say Y or M here.
 
 	  If unsure, say N.
 
@@ -571,13 +571,13 @@
 	  T7L66XB and also HTC ASIC3
 
 config MMC_SDHI
-	tristate "SH-Mobile SDHI SD/SDIO controller support"
+	tristate "Renesas SDHI SD/SDIO controller support"
 	depends on SUPERH || ARM || ARM64
 	depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
 	select MMC_TMIO_CORE
 	help
 	  This provides support for the SDHI SD/SDIO controller found in
-	  SuperH and ARM SH-Mobile SoCs
+	  Renesas SuperH, ARM and ARM64 based SoCs
 
 config MMC_CB710
 	tristate "ENE CB710 MMC/SD Interface support"
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 926347c..4d45471 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -36,9 +36,7 @@
 obj-$(CONFIG_MMC_SDRICOH_CS)	+= sdricoh_cs.o
 obj-$(CONFIG_MMC_TMIO)		+= tmio_mmc.o
 obj-$(CONFIG_MMC_TMIO_CORE)	+= tmio_mmc_core.o
-tmio_mmc_core-y			:= tmio_mmc_pio.o
-tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_SDHI))	+= tmio_mmc_dma.o
-obj-$(CONFIG_MMC_SDHI)		+= sh_mobile_sdhi.o
+obj-$(CONFIG_MMC_SDHI)		+= renesas_sdhi_core.o renesas_sdhi_sys_dmac.o
 obj-$(CONFIG_MMC_CB710)		+= cb710-mmc.o
 obj-$(CONFIG_MMC_VIA_SDMMC)	+= via-sdmmc.o
 obj-$(CONFIG_SDH_BFIN)		+= bfin_sdh.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 388e4a3..97de2d3 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -44,7 +44,7 @@
 #include <asm/unaligned.h>
 
 /*
- * Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors
+ * Superset of MCI IP registers integrated in Atmel AT91 Processor
  * Registers and bitfields marked with [2] are only available in MCI2
  */
 
@@ -172,13 +172,6 @@
 #define	atmci_writel(port, reg, value)			\
 	__raw_writel((value), (port)->regs + reg)
 
-/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */
-#ifdef CONFIG_AVR32
-#	define ATMCI_PDC_CONNECTED	0
-#else
-#	define ATMCI_PDC_CONNECTED	1
-#endif
-
 #define AUTOSUSPEND_DELAY	50
 
 #define ATMCI_DATA_ERROR_FLAGS	(ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
@@ -667,10 +660,8 @@ atmci_of_init(struct platform_device *pdev)
 	}
 
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata) {
-		dev_err(&pdev->dev, "could not allocate memory for pdata\n");
+	if (!pdata)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	for_each_child_of_node(np, cnp) {
 		if (of_property_read_u32(cnp, "reg", &slot_id)) {
@@ -1549,21 +1540,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 		break;
 	default:
-		/*
-		 * TODO: None of the currently available AVR32-based
-		 * boards allow MMC power to be turned off. Implement
-		 * power control when this can be tested properly.
-		 *
-		 * We also need to hook this into the clock management
-		 * somehow so that newly inserted cards aren't
-		 * subjected to a fast clock before we have a chance
-		 * to figure out what the maximum rate is. Currently,
-		 * there's no way to avoid this, and there never will
-		 * be for boards that don't support power control.
-		 */
 		break;
 	}
-
 }
 
 static int atmci_get_ro(struct mmc_host *mmc)
@@ -2464,7 +2442,7 @@ static void atmci_get_cap(struct atmel_mci *host)
 			"version: 0x%x\n", version);
 
 	host->caps.has_dma_conf_reg = 0;
-	host->caps.has_pdc = ATMCI_PDC_CONNECTED;
+	host->caps.has_pdc = 1;
 	host->caps.has_cfg_reg = 0;
 	host->caps.has_cstor_reg = 0;
 	host->caps.has_highspeed = 0;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 1f343a4..abba9a2 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1172,7 +1172,10 @@ static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
 	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
 		dev_err(dev, "unsupported block size (%d bytes)\n",
 			mrq->data->blksz);
-		mrq->cmd->error = -EINVAL;
+
+		if (mrq->cmd)
+			mrq->cmd->error = -EINVAL;
+
 		mmc_request_done(mmc, mrq);
 		return;
 	}
@@ -1194,7 +1197,10 @@ static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
 			readl(host->ioaddr + SDCMD) & SDCMD_CMD_MASK,
 			edm);
 		bcm2835_dumpregs(host);
-		mrq->cmd->error = -EILSEQ;
+
+		if (mrq->cmd)
+			mrq->cmd->error = -EILSEQ;
+
 		bcm2835_finish_request(host);
 		mutex_unlock(&host->mutex);
 		return;
@@ -1207,7 +1213,7 @@ static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
 			if (!host->use_busy)
 				bcm2835_finish_command(host);
 		}
-	} else if (bcm2835_send_command(host, mrq->cmd)) {
+	} else if (mrq->cmd && bcm2835_send_command(host, mrq->cmd)) {
 		if (host->data && host->dma_desc) {
 			/* DMA transfer starts now, PIO starts after irq */
 			bcm2835_start_dma(host);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index b8aaf0f..3686d77 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -1035,10 +1035,12 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
 	 * We only have a 3.3v supply, we cannot support any
 	 * of the UHS modes. We do support the high speed DDR
 	 * modes up to 52MHz.
+	 *
+	 * Disable bounce buffers for max_segs = 1
 	 */
 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
 		     MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
-		     MMC_CAP_3_3V_DDR;
+		     MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF;
 
 	if (host->use_sg)
 		mmc->max_segs = 16;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 25691cc..3502679 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -157,8 +157,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
 	 * HOLD register should be bypassed in case there is no phase shift
 	 * applied on CMD/DATA that is sent to the card.
 	 */
-	if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->cur_slot)
-		set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
+	if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->slot)
+		set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 372fb6e..a3f1c2b 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -25,6 +25,7 @@ struct dw_mci_rockchip_priv_data {
 	struct clk		*drv_clk;
 	struct clk		*sample_clk;
 	int			default_sample_phase;
+	int			num_phases;
 };
 
 static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
@@ -133,8 +134,8 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
 	}
 }
 
-#define NUM_PHASES			360
-#define TUNING_ITERATION_TO_PHASE(i)	(DIV_ROUND_UP((i) * 360, NUM_PHASES))
+#define TUNING_ITERATION_TO_PHASE(i, num_phases) \
+		(DIV_ROUND_UP((i) * 360, num_phases))
 
 static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 {
@@ -159,13 +160,15 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 		return -EIO;
 	}
 
-	ranges = kmalloc_array(NUM_PHASES / 2 + 1, sizeof(*ranges), GFP_KERNEL);
+	ranges = kmalloc_array(priv->num_phases / 2 + 1,
+			       sizeof(*ranges), GFP_KERNEL);
 	if (!ranges)
 		return -ENOMEM;
 
 	/* Try each phase and extract good ranges */
-	for (i = 0; i < NUM_PHASES; ) {
-		clk_set_phase(priv->sample_clk, TUNING_ITERATION_TO_PHASE(i));
+	for (i = 0; i < priv->num_phases; ) {
+		clk_set_phase(priv->sample_clk,
+			      TUNING_ITERATION_TO_PHASE(i, priv->num_phases));
 
 		v = !mmc_send_tuning(mmc, opcode, NULL);
 
@@ -179,7 +182,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 		if (v) {
 			ranges[range_count-1].end = i;
 			i++;
-		} else if (i == NUM_PHASES - 1) {
+		} else if (i == priv->num_phases - 1) {
 			/* No extra skipping rules if we're at the end */
 			i++;
 		} else {
@@ -188,11 +191,11 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 			 * one since testing bad phases is slow.  Skip
 			 * 20 degrees.
 			 */
-			i += DIV_ROUND_UP(20 * NUM_PHASES, 360);
+			i += DIV_ROUND_UP(20 * priv->num_phases, 360);
 
 			/* Always test the last one */
-			if (i >= NUM_PHASES)
-				i = NUM_PHASES - 1;
+			if (i >= priv->num_phases)
+				i = priv->num_phases - 1;
 		}
 
 		prev_v = v;
@@ -210,7 +213,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 		range_count--;
 	}
 
-	if (ranges[0].start == 0 && ranges[0].end == NUM_PHASES - 1) {
+	if (ranges[0].start == 0 && ranges[0].end == priv->num_phases - 1) {
 		clk_set_phase(priv->sample_clk, priv->default_sample_phase);
 		dev_info(host->dev, "All phases work, using default phase %d.",
 			 priv->default_sample_phase);
@@ -222,7 +225,7 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 		int len = (ranges[i].end - ranges[i].start + 1);
 
 		if (len < 0)
-			len += NUM_PHASES;
+			len += priv->num_phases;
 
 		if (longest_range_len < len) {
 			longest_range_len = len;
@@ -230,25 +233,30 @@ static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 		}
 
 		dev_dbg(host->dev, "Good phase range %d-%d (%d len)\n",
-			TUNING_ITERATION_TO_PHASE(ranges[i].start),
-			TUNING_ITERATION_TO_PHASE(ranges[i].end),
+			TUNING_ITERATION_TO_PHASE(ranges[i].start,
+						  priv->num_phases),
+			TUNING_ITERATION_TO_PHASE(ranges[i].end,
+						  priv->num_phases),
 			len
 		);
 	}
 
 	dev_dbg(host->dev, "Best phase range %d-%d (%d len)\n",
-		TUNING_ITERATION_TO_PHASE(ranges[longest_range].start),
-		TUNING_ITERATION_TO_PHASE(ranges[longest_range].end),
+		TUNING_ITERATION_TO_PHASE(ranges[longest_range].start,
+					  priv->num_phases),
+		TUNING_ITERATION_TO_PHASE(ranges[longest_range].end,
+					  priv->num_phases),
 		longest_range_len
 	);
 
 	middle_phase = ranges[longest_range].start + longest_range_len / 2;
-	middle_phase %= NUM_PHASES;
+	middle_phase %= priv->num_phases;
 	dev_info(host->dev, "Successfully tuned phase to %d\n",
-		 TUNING_ITERATION_TO_PHASE(middle_phase));
+		 TUNING_ITERATION_TO_PHASE(middle_phase, priv->num_phases));
 
 	clk_set_phase(priv->sample_clk,
-		      TUNING_ITERATION_TO_PHASE(middle_phase));
+		      TUNING_ITERATION_TO_PHASE(middle_phase,
+						priv->num_phases));
 
 free:
 	kfree(ranges);
@@ -264,6 +272,10 @@ static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
 	if (!priv)
 		return -ENOMEM;
 
+	if (of_property_read_u32(np, "rockchip,desired-num-phases",
+					&priv->num_phases))
+		priv->num_phases = 360;
+
 	if (of_property_read_u32(np, "rockchip,default-sample-phase",
 					&priv->default_sample_phase))
 		priv->default_sample_phase = 0;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index e45129f..a9dfb26 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -392,7 +392,7 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
 	cmdr = stop->opcode | SDMMC_CMD_STOP |
 		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
 
-	if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags))
+	if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
 		cmdr |= SDMMC_CMD_USE_HOLD_REG;
 
 	return cmdr;
@@ -480,7 +480,7 @@ static void dw_mci_dmac_complete_dma(void *arg)
 	if ((host->use_dma == TRANS_MODE_EDMAC) &&
 	    data && (data->flags & MMC_DATA_READ))
 		/* Invalidate cache after read */
-		dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
+		dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
 				    data->sg,
 				    data->sg_len,
 				    DMA_FROM_DEVICE);
@@ -820,7 +820,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
 
 	/* Flush cache before write */
 	if (host->data->flags & MMC_DATA_WRITE)
-		dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
+		dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
 				       sg_elems, DMA_TO_DEVICE);
 
 	dma_async_issue_pending(host->dms->ch);
@@ -1282,7 +1282,6 @@ static void __dw_mci_start_request(struct dw_mci *host,
 
 	mrq = slot->mrq;
 
-	host->cur_slot = slot;
 	host->mrq = mrq;
 
 	host->pending_events = 0;
@@ -1621,16 +1620,10 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
 
 		if (card->type == MMC_TYPE_SDIO ||
 		    card->type == MMC_TYPE_SD_COMBO) {
-			if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
-				pm_runtime_get_noresume(mmc->parent);
-				set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
-			}
+			set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
 			clk_en_a = clk_en_a_old & ~clken_low_pwr;
 		} else {
-			if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) {
-				pm_runtime_put_noidle(mmc->parent);
-				clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
-			}
+			clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
 			clk_en_a = clk_en_a_old | clken_low_pwr;
 		}
 
@@ -1642,9 +1635,8 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
 	}
 }
 
-static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
+static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
 {
-	struct dw_mci_slot *slot = mmc_priv(mmc);
 	struct dw_mci *host = slot->host;
 	unsigned long irqflags;
 	u32 int_mask;
@@ -1662,6 +1654,27 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
 	spin_unlock_irqrestore(&host->irq_lock, irqflags);
 }
 
+static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci *host = slot->host;
+
+	__dw_mci_enable_sdio_irq(slot, enb);
+
+	/* Avoid runtime suspending the device when SDIO IRQ is enabled */
+	if (enb)
+		pm_runtime_get_noresume(host->dev);
+	else
+		pm_runtime_put_noidle(host->dev);
+}
+
+static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+
+	__dw_mci_enable_sdio_irq(slot, 1);
+}
+
 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
 	struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -1749,7 +1762,7 @@ static bool dw_mci_reset(struct dw_mci *host)
 
 ciu_out:
 	/* After a CTRL reset we need to have CIU set clock registers  */
-	mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
+	mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
 
 	return ret;
 }
@@ -1763,6 +1776,7 @@ static const struct mmc_host_ops dw_mci_ops = {
 	.get_cd			= dw_mci_get_cd,
 	.hw_reset               = dw_mci_hw_reset,
 	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
+	.ack_sdio_irq		= dw_mci_ack_sdio_irq,
 	.execute_tuning		= dw_mci_execute_tuning,
 	.card_busy		= dw_mci_card_busy,
 	.start_signal_voltage_switch = dw_mci_switch_voltage,
@@ -1775,11 +1789,11 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
 	__acquires(&host->lock)
 {
 	struct dw_mci_slot *slot;
-	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
+	struct mmc_host	*prev_mmc = host->slot->mmc;
 
 	WARN_ON(host->cmd || host->data);
 
-	host->cur_slot->mrq = NULL;
+	host->slot->mrq = NULL;
 	host->mrq = NULL;
 	if (!list_empty(&host->queue)) {
 		slot = list_entry(host->queue.next,
@@ -1929,7 +1943,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
 			err = dw_mci_command_complete(host, cmd);
 			if (cmd == mrq->sbc && !err) {
 				prev_state = state = STATE_SENDING_CMD;
-				__dw_mci_start_request(host, host->cur_slot,
+				__dw_mci_start_request(host, host->slot,
 						       mrq->cmd);
 				goto unlock;
 			}
@@ -2548,26 +2562,19 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
 
 static void dw_mci_handle_cd(struct dw_mci *host)
 {
-	int i;
+	struct dw_mci_slot *slot = host->slot;
 
-	for (i = 0; i < host->num_slots; i++) {
-		struct dw_mci_slot *slot = host->slot[i];
-
-		if (!slot)
-			continue;
-
-		if (slot->mmc->ops->card_event)
-			slot->mmc->ops->card_event(slot->mmc);
-		mmc_detect_change(slot->mmc,
-			msecs_to_jiffies(host->pdata->detect_delay_ms));
-	}
+	if (slot->mmc->ops->card_event)
+		slot->mmc->ops->card_event(slot->mmc);
+	mmc_detect_change(slot->mmc,
+		msecs_to_jiffies(host->pdata->detect_delay_ms));
 }
 
 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
 {
 	struct dw_mci *host = dev_id;
 	u32 pending;
-	int i;
+	struct dw_mci_slot *slot = host->slot;
 
 	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
 
@@ -2644,18 +2651,11 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
 			dw_mci_handle_cd(host);
 		}
 
-		/* Handle SDIO Interrupts */
-		for (i = 0; i < host->num_slots; i++) {
-			struct dw_mci_slot *slot = host->slot[i];
-
-			if (!slot)
-				continue;
-
-			if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
-				mci_writel(host, RINTSTS,
-					   SDMMC_INT_SDIO(slot->sdio_id));
-				mmc_signal_sdio_irq(slot->mmc);
-			}
+		if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
+			mci_writel(host, RINTSTS,
+				   SDMMC_INT_SDIO(slot->sdio_id));
+			__dw_mci_enable_sdio_irq(slot, 0);
+			sdio_signal_irq(slot->mmc);
 		}
 
 	}
@@ -2687,7 +2687,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+static int dw_mci_init_slot(struct dw_mci *host)
 {
 	struct mmc_host *mmc;
 	struct dw_mci_slot *slot;
@@ -2700,15 +2700,15 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
 		return -ENOMEM;
 
 	slot = mmc_priv(mmc);
-	slot->id = id;
-	slot->sdio_id = host->sdio_id0 + id;
+	slot->id = 0;
+	slot->sdio_id = host->sdio_id0 + slot->id;
 	slot->mmc = mmc;
 	slot->host = host;
-	host->slot[id] = slot;
+	host->slot = slot;
 
 	mmc->ops = &dw_mci_ops;
-	if (of_property_read_u32_array(host->dev->of_node,
-				       "clock-freq-min-max", freq, 2)) {
+	if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
+					   freq, 2)) {
 		mmc->f_min = DW_MCI_FREQ_MIN;
 		mmc->f_max = DW_MCI_FREQ_MAX;
 	} else {
@@ -2755,6 +2755,10 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
 	if (ret)
 		goto err_host_allocated;
 
+	/* Process SDIO IRQs through the sdio_irq_work. */
+	if (mmc->caps & MMC_CAP_SDIO_IRQ)
+		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
 	/* Useful defaults if platform data is unset. */
 	if (host->use_dma == TRANS_MODE_IDMAC) {
 		mmc->max_segs = host->ring_size;
@@ -2796,11 +2800,11 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
 	return ret;
 }
 
-static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
+static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
 {
 	/* Debugfs stuff is cleaned up by mmc core */
 	mmc_remove_host(slot->mmc);
-	slot->host->slot[id] = NULL;
+	slot->host->slot = NULL;
 	mmc_free_host(slot->mmc);
 }
 
@@ -2808,7 +2812,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
 {
 	int addr_config;
 	struct device *dev = host->dev;
-	struct device_node *np = dev->of_node;
 
 	/*
 	* Check tansfer mode from HCON[17:16]
@@ -2869,8 +2872,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
 		dev_info(host->dev, "Using internal DMA controller.\n");
 	} else {
 		/* TRANS_MODE_EDMAC: check dma bindings again */
-		if ((of_property_count_strings(np, "dma-names") < 0) ||
-		    (!of_find_property(np, "dmas", NULL))) {
+		if ((device_property_read_string_array(dev, "dma-names",
+						       NULL, 0) < 0) ||
+		    !device_property_present(dev, "dmas")) {
 			goto no_dma;
 		}
 		host->dma_ops = &dw_mci_edmac_ops;
@@ -2937,7 +2941,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
 {
 	struct dw_mci_board *pdata;
 	struct device *dev = host->dev;
-	struct device_node *np = dev->of_node;
 	const struct dw_mci_drv_data *drv_data = host->drv_data;
 	int ret;
 	u32 clock_frequency;
@@ -2954,20 +2957,22 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
 	}
 
 	/* find out number of slots supported */
-	of_property_read_u32(np, "num-slots", &pdata->num_slots);
+	if (device_property_read_u32(dev, "num-slots", &pdata->num_slots))
+		dev_info(dev, "'num-slots' was deprecated.\n");
 
-	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
+	if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
 		dev_info(dev,
 			 "fifo-depth property not found, using value of FIFOTH register as default\n");
 
-	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
+	device_property_read_u32(dev, "card-detect-delay",
+				 &pdata->detect_delay_ms);
 
-	of_property_read_u32(np, "data-addr", &host->data_addr_override);
+	device_property_read_u32(dev, "data-addr", &host->data_addr_override);
 
-	if (of_get_property(np, "fifo-watermark-aligned", NULL))
+	if (device_property_present(dev, "fifo-watermark-aligned"))
 		host->wm_aligned = true;
 
-	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
+	if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
 		pdata->bus_hz = clock_frequency;
 
 	if (drv_data && drv_data->parse_dt) {
@@ -2990,29 +2995,21 @@ static void dw_mci_enable_cd(struct dw_mci *host)
 {
 	unsigned long irqflags;
 	u32 temp;
-	int i;
-	struct dw_mci_slot *slot;
 
 	/*
 	 * No need for CD if all slots have a non-error GPIO
 	 * as well as broken card detection is found.
 	 */
-	for (i = 0; i < host->num_slots; i++) {
-		slot = host->slot[i];
-		if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
-			return;
-
-		if (mmc_gpio_get_cd(slot->mmc) < 0)
-			break;
-	}
-	if (i == host->num_slots)
+	if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
 		return;
 
-	spin_lock_irqsave(&host->irq_lock, irqflags);
-	temp = mci_readl(host, INTMASK);
-	temp  |= SDMMC_INT_CD;
-	mci_writel(host, INTMASK, temp);
-	spin_unlock_irqrestore(&host->irq_lock, irqflags);
+	if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
+		spin_lock_irqsave(&host->irq_lock, irqflags);
+		temp = mci_readl(host, INTMASK);
+		temp  |= SDMMC_INT_CD;
+		mci_writel(host, INTMASK, temp);
+		spin_unlock_irqrestore(&host->irq_lock, irqflags);
+	}
 }
 
 int dw_mci_probe(struct dw_mci *host)
@@ -3020,7 +3017,6 @@ int dw_mci_probe(struct dw_mci *host)
 	const struct dw_mci_drv_data *drv_data = host->drv_data;
 	int width, i, ret = 0;
 	u32 fifo_size;
-	int init_slots = 0;
 
 	if (!host->pdata) {
 		host->pdata = dw_mci_parse_dt(host);
@@ -3183,19 +3179,6 @@ int dw_mci_probe(struct dw_mci *host)
 	if (ret)
 		goto err_dmaunmap;
 
-	if (host->pdata->num_slots)
-		host->num_slots = host->pdata->num_slots;
-	else
-		host->num_slots = 1;
-
-	if (host->num_slots < 1 ||
-	    host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
-		dev_err(host->dev,
-			"Platform data must supply correct num_slots.\n");
-		ret = -ENODEV;
-		goto err_clk_ciu;
-	}
-
 	/*
 	 * Enable interrupts for command done, data over, data empty,
 	 * receive ready and error such as transmit, receive timeout, crc error
@@ -3211,20 +3194,9 @@ int dw_mci_probe(struct dw_mci *host)
 		 host->irq, width, fifo_size);
 
 	/* We need at least one slot to succeed */
-	for (i = 0; i < host->num_slots; i++) {
-		ret = dw_mci_init_slot(host, i);
-		if (ret)
-			dev_dbg(host->dev, "slot %d init failed\n", i);
-		else
-			init_slots++;
-	}
-
-	if (init_slots) {
-		dev_info(host->dev, "%d slots initialized\n", init_slots);
-	} else {
-		dev_dbg(host->dev,
-			"attempted to initialize %d slots, but failed on all\n",
-			host->num_slots);
+	ret = dw_mci_init_slot(host);
+	if (ret) {
+		dev_dbg(host->dev, "slot %d init failed\n", i);
 		goto err_dmaunmap;
 	}
 
@@ -3252,13 +3224,9 @@ EXPORT_SYMBOL(dw_mci_probe);
 
 void dw_mci_remove(struct dw_mci *host)
 {
-	int i;
-
-	for (i = 0; i < host->num_slots; i++) {
-		dev_dbg(host->dev, "remove slot %d\n", i);
-		if (host->slot[i])
-			dw_mci_cleanup_slot(host->slot[i], i);
-	}
+	dev_dbg(host->dev, "remove slot\n");
+	if (host->slot)
+		dw_mci_cleanup_slot(host->slot);
 
 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
 	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
@@ -3290,9 +3258,9 @@ int dw_mci_runtime_suspend(struct device *dev)
 
 	clk_disable_unprepare(host->ciu_clk);
 
-	if (host->cur_slot &&
-	    (mmc_can_gpio_cd(host->cur_slot->mmc) ||
-	     !mmc_card_is_removable(host->cur_slot->mmc)))
+	if (host->slot &&
+	    (mmc_can_gpio_cd(host->slot->mmc) ||
+	     !mmc_card_is_removable(host->slot->mmc)))
 		clk_disable_unprepare(host->biu_clk);
 
 	return 0;
@@ -3301,12 +3269,12 @@ EXPORT_SYMBOL(dw_mci_runtime_suspend);
 
 int dw_mci_runtime_resume(struct device *dev)
 {
-	int i, ret = 0;
+	int ret = 0;
 	struct dw_mci *host = dev_get_drvdata(dev);
 
-	if (host->cur_slot &&
-	    (mmc_can_gpio_cd(host->cur_slot->mmc) ||
-	     !mmc_card_is_removable(host->cur_slot->mmc))) {
+	if (host->slot &&
+	    (mmc_can_gpio_cd(host->slot->mmc) ||
+	     !mmc_card_is_removable(host->slot->mmc))) {
 		ret = clk_prepare_enable(host->biu_clk);
 		if (ret)
 			return ret;
@@ -3341,17 +3309,12 @@ int dw_mci_runtime_resume(struct device *dev)
 		   DW_MCI_ERROR_FLAGS);
 	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
 
-	for (i = 0; i < host->num_slots; i++) {
-		struct dw_mci_slot *slot = host->slot[i];
 
-		if (!slot)
-			continue;
-		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
-			dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
+	if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
+		dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
 
-		/* Force setup bus to guarantee available clock output */
-		dw_mci_setup_bus(slot, true);
-	}
+	/* Force setup bus to guarantee available clock output */
+	dw_mci_setup_bus(host->slot, true);
 
 	/* Now that slots are all setup, we can enable card detect */
 	dw_mci_enable_cd(host);
@@ -3359,9 +3322,9 @@ int dw_mci_runtime_resume(struct device *dev)
 	return 0;
 
 err:
-	if (host->cur_slot &&
-	    (mmc_can_gpio_cd(host->cur_slot->mmc) ||
-	     !mmc_card_is_removable(host->cur_slot->mmc)))
+	if (host->slot &&
+	    (mmc_can_gpio_cd(host->slot->mmc) ||
+	     !mmc_card_is_removable(host->slot->mmc)))
 		clk_disable_unprepare(host->biu_clk);
 
 	return ret;
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index ce34736..75da375 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -20,8 +20,6 @@
 #include <linux/reset.h>
 #include <linux/interrupt.h>
 
-#define MAX_MCI_SLOTS	2
-
 enum dw_mci_state {
 	STATE_IDLE = 0,
 	STATE_SENDING_CMD,
@@ -134,7 +132,6 @@ struct dw_mci_dma_slave {
  * =======
  *
  * @lock is a softirq-safe spinlock protecting @queue as well as
- * @cur_slot, @mrq and @state. These must always be updated
  * at the same time while holding @lock.
  *
  * @irq_lock is an irq-safe spinlock protecting the INTMASK register
@@ -170,7 +167,6 @@ struct dw_mci {
 	struct scatterlist	*sg;
 	struct sg_mapping_iter	sg_miter;
 
-	struct dw_mci_slot	*cur_slot;
 	struct mmc_request	*mrq;
 	struct mmc_command	*cmd;
 	struct mmc_data		*data;
@@ -206,7 +202,6 @@ struct dw_mci {
 
 	u32			bus_hz;
 	u32			current_speed;
-	u32			num_slots;
 	u32			fifoth_val;
 	u16			verid;
 	struct device		*dev;
@@ -215,7 +210,7 @@ struct dw_mci {
 	void			*priv;
 	struct clk		*biu_clk;
 	struct clk		*ciu_clk;
-	struct dw_mci_slot	*slot[MAX_MCI_SLOTS];
+	struct dw_mci_slot	*slot;
 
 	/* FIFO push and pull */
 	int			fifo_depth;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 1842ed3..de962c2 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -210,6 +210,15 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
 	int i;
 	bool use_desc_chain_mode = true;
 
+	/*
+	 * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
+	 * reported. For some strange reason this occurs in descriptor
+	 * chain mode only. So let's fall back to bounce buffer mode
+	 * for command SD_IO_RW_EXTENDED.
+	 */
+	if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
+		return;
+
 	for_each_sg(data->sg, sg, data->sg_len, i)
 		/* check for 8 byte alignment */
 		if (sg->offset & 7) {
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 5c1e178..5a672a5 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1774,7 +1774,7 @@ static int msdc_drv_remove(struct platform_device *pdev)
 	pm_runtime_disable(host->dev);
 	pm_runtime_put_noidle(host->dev);
 	dma_free_coherent(&pdev->dev,
-			sizeof(struct mt_gpdma_desc),
+			2 * sizeof(struct mt_gpdma_desc),
 			host->dma.gpd, host->dma.gpd_addr);
 	dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
 			host->dma.bd, host->dma.bd_addr);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 8c39dcc..7c12f37 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -250,14 +250,14 @@ static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
 	struct omap_hsmmc_host *host = mmc_priv(mmc);
 	struct mmc_ios *ios = &mmc->ios;
 
-	if (mmc->supply.vmmc) {
+	if (!IS_ERR(mmc->supply.vmmc)) {
 		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 		if (ret)
 			return ret;
 	}
 
 	/* Enable interface voltage rail, if needed */
-	if (mmc->supply.vqmmc && !host->vqmmc_enabled) {
+	if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
 		ret = regulator_enable(mmc->supply.vqmmc);
 		if (ret) {
 			dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n");
@@ -269,7 +269,7 @@ static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
 	return 0;
 
 err_vqmmc:
-	if (mmc->supply.vmmc)
+	if (!IS_ERR(mmc->supply.vmmc))
 		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 
 	return ret;
@@ -281,7 +281,7 @@ static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
 	int status;
 	struct omap_hsmmc_host *host = mmc_priv(mmc);
 
-	if (mmc->supply.vqmmc && host->vqmmc_enabled) {
+	if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
 		ret = regulator_disable(mmc->supply.vqmmc);
 		if (ret) {
 			dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n");
@@ -290,7 +290,7 @@ static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
 		host->vqmmc_enabled = 0;
 	}
 
-	if (mmc->supply.vmmc) {
+	if (!IS_ERR(mmc->supply.vmmc)) {
 		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 		if (ret)
 			goto err_set_ocr;
@@ -299,7 +299,7 @@ static int omap_hsmmc_disable_supply(struct mmc_host *mmc)
 	return 0;
 
 err_set_ocr:
-	if (mmc->supply.vqmmc) {
+	if (!IS_ERR(mmc->supply.vqmmc)) {
 		status = regulator_enable(mmc->supply.vqmmc);
 		if (status)
 			dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n");
@@ -313,7 +313,7 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
 {
 	int ret;
 
-	if (!host->pbias)
+	if (IS_ERR(host->pbias))
 		return 0;
 
 	if (power_on) {
@@ -363,7 +363,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
 	 * If we don't see a Vcc regulator, assume it's a fixed
 	 * voltage always-on regulator.
 	 */
-	if (!mmc->supply.vmmc)
+	if (IS_ERR(mmc->supply.vmmc))
 		return 0;
 
 	if (mmc_pdata(host)->before_set_reg)
@@ -415,7 +415,7 @@ static int omap_hsmmc_disable_boot_regulator(struct regulator *reg)
 {
 	int ret;
 
-	if (!reg)
+	if (IS_ERR(reg))
 		return 0;
 
 	if (regulator_is_enabled(reg)) {
@@ -466,36 +466,27 @@ static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host)
 
 static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
 {
-	int ocr_value = 0;
 	int ret;
 	struct mmc_host *mmc = host->mmc;
 
 	if (mmc_pdata(host)->set_power)
 		return 0;
 
-	mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
-	if (IS_ERR(mmc->supply.vmmc)) {
-		ret = PTR_ERR(mmc->supply.vmmc);
-		if ((ret != -ENODEV) && host->dev->of_node)
-			return ret;
-		dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
-			PTR_ERR(mmc->supply.vmmc));
-		mmc->supply.vmmc = NULL;
-	} else {
-		ocr_value = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
-		if (ocr_value > 0)
-			mmc_pdata(host)->ocr_mask = ocr_value;
-	}
+	ret = mmc_regulator_get_supply(mmc);
+	if (ret == -EPROBE_DEFER)
+		return ret;
 
 	/* Allow an aux regulator */
-	mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
 	if (IS_ERR(mmc->supply.vqmmc)) {
-		ret = PTR_ERR(mmc->supply.vqmmc);
-		if ((ret != -ENODEV) && host->dev->of_node)
-			return ret;
-		dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
-			PTR_ERR(mmc->supply.vqmmc));
-		mmc->supply.vqmmc = NULL;
+		mmc->supply.vqmmc = devm_regulator_get_optional(host->dev,
+								"vmmc_aux");
+		if (IS_ERR(mmc->supply.vqmmc)) {
+			ret = PTR_ERR(mmc->supply.vqmmc);
+			if ((ret != -ENODEV) && host->dev->of_node)
+				return ret;
+			dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
+				PTR_ERR(mmc->supply.vqmmc));
+		}
 	}
 
 	host->pbias = devm_regulator_get_optional(host->dev, "pbias");
@@ -508,7 +499,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
 		}
 		dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
 			PTR_ERR(host->pbias));
-		host->pbias = NULL;
 	}
 
 	/* For eMMC do not power off when not in sleep state */
@@ -2146,7 +2136,8 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_irq;
 
-	mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
+	if (!mmc->ocr_avail)
+		mmc->ocr_avail = mmc_pdata(host)->ocr_mask;
 
 	omap_hsmmc_disable_irq(host);
 
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c763b40..59ab194 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -702,7 +702,11 @@ static int pxamci_probe(struct platform_device *pdev)
 
 	pxamci_init_ocr(host);
 
-	mmc->caps = 0;
+	/*
+	 * This architecture used to disable bounce buffers through its
+	 * defconfig, now it is done at runtime as a host property.
+	 */
+	mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
 	host->cmdat = 0;
 	if (!cpu_is_pxa25x()) {
 		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
new file mode 100644
index 0000000..ca83acc
--- /dev/null
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -0,0 +1,39 @@
+/*
+ * Renesas Mobile SDHI
+ *
+ * Copyright (C) 2017 Horms Solutions Ltd., Simon Horman
+ * Copyright (C) 2017 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RENESAS_SDHI_H
+#define RENESAS_SDHI_H
+
+#include <linux/platform_device.h>
+#include "tmio_mmc.h"
+
+struct renesas_sdhi_scc {
+	unsigned long clk_rate;	/* clock rate for SDR104 */
+	u32 tap;		/* sampling clock position for SDR104 */
+};
+
+struct renesas_sdhi_of_data {
+	unsigned long tmio_flags;
+	u32	      tmio_ocr_mask;
+	unsigned long capabilities;
+	unsigned long capabilities2;
+	enum dma_slave_buswidth dma_buswidth;
+	dma_addr_t dma_rx_offset;
+	unsigned int bus_shift;
+	int scc_offset;
+	struct renesas_sdhi_scc *taps;
+	int taps_num;
+};
+
+int renesas_sdhi_probe(struct platform_device *pdev,
+		       const struct tmio_mmc_dma_ops *dma_ops);
+int renesas_sdhi_remove(struct platform_device *pdev);
+#endif
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/renesas_sdhi_core.c
similarity index 65%
rename from drivers/mmc/host/sh_mobile_sdhi.c
rename to drivers/mmc/host/renesas_sdhi_core.c
index bc6be0d..a4fb07d 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -1,8 +1,9 @@
 /*
- * SuperH Mobile SDHI
+ * Renesas SDHI
  *
- * Copyright (C) 2016 Sang Engineering, Wolfram Sang
- * Copyright (C) 2015-16 Renesas Electronics Corporation
+ * Copyright (C) 2015-17 Renesas Electronics Corporation
+ * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2016-17 Horms Solutions, Simon Horman
  * Copyright (C) 2009 Magnus Damm
  *
  * This program is free software; you can redistribute it and/or modify
@@ -23,8 +24,6 @@
 #include <linux/kernel.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/mmc/host.h>
@@ -35,6 +34,7 @@
 #include <linux/pinctrl/pinctrl-state.h>
 #include <linux/regulator/consumer.h>
 
+#include "renesas_sdhi.h"
 #include "tmio_mmc.h"
 
 #define EXT_ACC           0xe4
@@ -45,103 +45,10 @@
 #define SDHI_VER_GEN3_SD	0xcc10
 #define SDHI_VER_GEN3_SDMMC	0xcd10
 
-#define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data)
+#define host_to_priv(host) \
+	container_of((host)->pdata, struct renesas_sdhi, mmc_data)
 
-struct sh_mobile_sdhi_scc {
-	unsigned long clk_rate;	/* clock rate for SDR104 */
-	u32 tap;		/* sampling clock position for SDR104 */
-};
-
-struct sh_mobile_sdhi_of_data {
-	unsigned long tmio_flags;
-	u32	      tmio_ocr_mask;
-	unsigned long capabilities;
-	unsigned long capabilities2;
-	enum dma_slave_buswidth dma_buswidth;
-	dma_addr_t dma_rx_offset;
-	unsigned bus_shift;
-	int scc_offset;
-	struct sh_mobile_sdhi_scc *taps;
-	int taps_num;
-};
-
-static const struct sh_mobile_sdhi_of_data of_default_cfg = {
-	.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
-};
-
-static const struct sh_mobile_sdhi_of_data of_rz_compatible = {
-	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT,
-	.tmio_ocr_mask	= MMC_VDD_32_33,
-	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
-};
-
-static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
-	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
-			  TMIO_MMC_CLK_ACTUAL,
-	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
-};
-
-/* Definitions for sampling clocks */
-static struct sh_mobile_sdhi_scc rcar_gen2_scc_taps[] = {
-	{
-		.clk_rate = 156000000,
-		.tap = 0x00000703,
-	},
-	{
-		.clk_rate = 0,
-		.tap = 0x00000300,
-	},
-};
-
-static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
-	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
-			  TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
-	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
-	.dma_buswidth	= DMA_SLAVE_BUSWIDTH_4_BYTES,
-	.dma_rx_offset	= 0x2000,
-	.scc_offset	= 0x0300,
-	.taps		= rcar_gen2_scc_taps,
-	.taps_num	= ARRAY_SIZE(rcar_gen2_scc_taps),
-};
-
-/* Definitions for sampling clocks */
-static struct sh_mobile_sdhi_scc rcar_gen3_scc_taps[] = {
-	{
-		.clk_rate = 0,
-		.tap = 0x00000300,
-	},
-};
-
-static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
-	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
-			  TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
-	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
-	.bus_shift	= 2,
-	.scc_offset	= 0x1000,
-	.taps		= rcar_gen3_scc_taps,
-	.taps_num	= ARRAY_SIZE(rcar_gen3_scc_taps),
-};
-
-static const struct of_device_id sh_mobile_sdhi_of_match[] = {
-	{ .compatible = "renesas,sdhi-shmobile" },
-	{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
-	{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
-	{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
-	{ .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
-	{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
-	{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
-	{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
-	{ .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
-	{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
-	{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
-	{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
-	{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
-	{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
-	{},
-};
-MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
-
-struct sh_mobile_sdhi {
+struct renesas_sdhi {
 	struct clk *clk;
 	struct clk *clk_cd;
 	struct tmio_mmc_data mmc_data;
@@ -151,13 +58,13 @@ struct sh_mobile_sdhi {
 	void __iomem *scc_ctl;
 };
 
-static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
+static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
 {
 	u32 val;
 
 	/*
 	 * see also
-	 *	sh_mobile_sdhi_of_data :: dma_buswidth
+	 *	renesas_sdhi_of_data :: dma_buswidth
 	 */
 	switch (sd_ctrl_read16(host, CTL_VERSION)) {
 	case SDHI_VER_GEN2_SDR50:
@@ -183,11 +90,12 @@ static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
 	sd_ctrl_write16(host, EXT_ACC, val);
 }
 
-static int sh_mobile_sdhi_clk_enable(struct tmio_mmc_host *host)
+static int renesas_sdhi_clk_enable(struct tmio_mmc_host *host)
 {
 	struct mmc_host *mmc = host->mmc;
-	struct sh_mobile_sdhi *priv = host_to_priv(host);
+	struct renesas_sdhi *priv = host_to_priv(host);
 	int ret = clk_prepare_enable(priv->clk);
+
 	if (ret < 0)
 		return ret;
 
@@ -213,19 +121,19 @@ static int sh_mobile_sdhi_clk_enable(struct tmio_mmc_host *host)
 	mmc->f_min = max(clk_round_rate(priv->clk, 1) / 512, 1L);
 
 	/* enable 16bit data access on SDBUF as default */
-	sh_mobile_sdhi_sdbuf_width(host, 16);
+	renesas_sdhi_sdbuf_width(host, 16);
 
 	return 0;
 }
 
-static unsigned int sh_mobile_sdhi_clk_update(struct tmio_mmc_host *host,
-					      unsigned int new_clock)
+static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
+					    unsigned int new_clock)
 {
-	struct sh_mobile_sdhi *priv = host_to_priv(host);
+	struct renesas_sdhi *priv = host_to_priv(host);
 	unsigned int freq, diff, best_freq = 0, diff_min = ~0;
 	int i, ret;
 
-	/* tested only on RCar Gen2+ currently; may work for others */
+	/* tested only on R-Car Gen2+ currently; may work for others */
 	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
 		return clk_get_rate(priv->clk);
 
@@ -257,26 +165,27 @@ static unsigned int sh_mobile_sdhi_clk_update(struct tmio_mmc_host *host,
 	return ret == 0 ? best_freq : clk_get_rate(priv->clk);
 }
 
-static void sh_mobile_sdhi_clk_disable(struct tmio_mmc_host *host)
+static void renesas_sdhi_clk_disable(struct tmio_mmc_host *host)
 {
-	struct sh_mobile_sdhi *priv = host_to_priv(host);
+	struct renesas_sdhi *priv = host_to_priv(host);
 
 	clk_disable_unprepare(priv->clk);
 	clk_disable_unprepare(priv->clk_cd);
 }
 
-static int sh_mobile_sdhi_card_busy(struct mmc_host *mmc)
+static int renesas_sdhi_card_busy(struct mmc_host *mmc)
 {
 	struct tmio_mmc_host *host = mmc_priv(mmc);
 
-	return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_DAT0);
+	return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
+		 TMIO_STAT_DAT0);
 }
 
-static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
-						      struct mmc_ios *ios)
+static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
+						    struct mmc_ios *ios)
 {
 	struct tmio_mmc_host *host = mmc_priv(mmc);
-	struct sh_mobile_sdhi *priv = host_to_priv(host);
+	struct renesas_sdhi *priv = host_to_priv(host);
 	struct pinctrl_state *pin_state;
 	int ret;
 
@@ -327,21 +236,21 @@ static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
 #define SH_MOBILE_SDHI_SCC_RVSREQ_RVSERR	BIT(2)
 
 static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
-				struct sh_mobile_sdhi *priv, int addr)
+				struct renesas_sdhi *priv, int addr)
 {
 	return readl(priv->scc_ctl + (addr << host->bus_shift));
 }
 
 static inline void sd_scc_write32(struct tmio_mmc_host *host,
-				  struct sh_mobile_sdhi *priv,
+				  struct renesas_sdhi *priv,
 				  int addr, u32 val)
 {
 	writel(val, priv->scc_ctl + (addr << host->bus_shift));
 }
 
-static unsigned int sh_mobile_sdhi_init_tuning(struct tmio_mmc_host *host)
+static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
 {
-	struct sh_mobile_sdhi *priv;
+	struct renesas_sdhi *priv;
 
 	priv = host_to_priv(host);
 
@@ -378,10 +287,10 @@ static unsigned int sh_mobile_sdhi_init_tuning(struct tmio_mmc_host *host)
 		SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_MASK;
 }
 
-static void sh_mobile_sdhi_prepare_tuning(struct tmio_mmc_host *host,
-					 unsigned long tap)
+static void renesas_sdhi_prepare_tuning(struct tmio_mmc_host *host,
+					unsigned long tap)
 {
-	struct sh_mobile_sdhi *priv = host_to_priv(host);
+	struct renesas_sdhi *priv = host_to_priv(host);
 
 	/* Set sampling clock position */
 	sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, tap);
@@ -389,9 +298,9 @@ static void sh_mobile_sdhi_prepare_tuning(struct tmio_mmc_host *host,
 
 #define SH_MOBILE_SDHI_MAX_TAP 3
 
-static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
+static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
 {
-	struct sh_mobile_sdhi *priv = host_to_priv(host);
+	struct renesas_sdhi *priv = host_to_priv(host);
 	unsigned long tap_cnt;  /* counter of tuning success */
 	unsigned long tap_set;  /* tap position */
 	unsigned long tap_start;/* start position of tuning success */
@@ -412,9 +321,9 @@ static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
 	tap_start = 0;
 	tap_end = 0;
 	for (i = 0; i < host->tap_num * 2; i++) {
-		if (test_bit(i, host->taps))
+		if (test_bit(i, host->taps)) {
 			ntap++;
-		else {
+		} else {
 			if (ntap > tap_cnt) {
 				tap_start = i - ntap;
 				tap_end = i - 1;
@@ -446,10 +355,9 @@ static int sh_mobile_sdhi_select_tuning(struct tmio_mmc_host *host)
 	return 0;
 }
 
-
-static bool sh_mobile_sdhi_check_scc_error(struct tmio_mmc_host *host)
+static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
 {
-	struct sh_mobile_sdhi *priv = host_to_priv(host);
+	struct renesas_sdhi *priv = host_to_priv(host);
 
 	/* Check SCC error */
 	if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
@@ -464,9 +372,9 @@ static bool sh_mobile_sdhi_check_scc_error(struct tmio_mmc_host *host)
 	return false;
 }
 
-static void sh_mobile_sdhi_hw_reset(struct tmio_mmc_host *host)
+static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host)
 {
-	struct sh_mobile_sdhi *priv;
+	struct renesas_sdhi *priv;
 
 	priv = host_to_priv(host);
 
@@ -490,7 +398,7 @@ static void sh_mobile_sdhi_hw_reset(struct tmio_mmc_host *host)
 		       sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
 }
 
-static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
+static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host)
 {
 	int timeout = 1000;
 
@@ -506,10 +414,9 @@ static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
 	return 0;
 }
 
-static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
+static int renesas_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
 {
-	switch (addr)
-	{
+	switch (addr) {
 	case CTL_SD_CMD:
 	case CTL_STOP_INTERNAL_ACTION:
 	case CTL_XFER_BLK_COUNT:
@@ -519,14 +426,14 @@ static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
 	case CTL_TRANSACTION_CTL:
 	case CTL_DMA_ENABLE:
 	case EXT_ACC:
-		return sh_mobile_sdhi_wait_idle(host);
+		return renesas_sdhi_wait_idle(host);
 	}
 
 	return 0;
 }
 
-static int sh_mobile_sdhi_multi_io_quirk(struct mmc_card *card,
-					 unsigned int direction, int blk_size)
+static int renesas_sdhi_multi_io_quirk(struct mmc_card *card,
+				       unsigned int direction, int blk_size)
 {
 	/*
 	 * In Renesas controllers, when performing a
@@ -543,30 +450,34 @@ static int sh_mobile_sdhi_multi_io_quirk(struct mmc_card *card,
 	return blk_size;
 }
 
-static void sh_mobile_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
+static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
 {
 	sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
 
 	/* enable 32bit access if DMA mode if possibile */
-	sh_mobile_sdhi_sdbuf_width(host, enable ? 32 : 16);
+	renesas_sdhi_sdbuf_width(host, enable ? 32 : 16);
 }
 
-static int sh_mobile_sdhi_probe(struct platform_device *pdev)
+int renesas_sdhi_probe(struct platform_device *pdev,
+		       const struct tmio_mmc_dma_ops *dma_ops)
 {
-	const struct sh_mobile_sdhi_of_data *of_data = of_device_get_match_data(&pdev->dev);
-	struct sh_mobile_sdhi *priv;
-	struct tmio_mmc_data *mmc_data;
 	struct tmio_mmc_data *mmd = pdev->dev.platform_data;
+	const struct renesas_sdhi_of_data *of_data;
+	struct tmio_mmc_data *mmc_data;
+	struct tmio_mmc_dma *dma_priv;
 	struct tmio_mmc_host *host;
+	struct renesas_sdhi *priv;
 	struct resource *res;
 	int irq, ret, i;
-	struct tmio_mmc_dma *dma_priv;
+
+	of_data = of_device_get_match_data(&pdev->dev);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res)
 		return -EINVAL;
 
-	priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct renesas_sdhi),
+			    GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
@@ -609,7 +520,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
 		goto eprobe;
 	}
 
-
 	if (of_data) {
 		mmc_data->flags |= of_data->tmio_flags;
 		mmc_data->ocr_mask = of_data->tmio_ocr_mask;
@@ -621,18 +531,18 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
 	}
 
 	host->dma		= dma_priv;
-	host->write16_hook	= sh_mobile_sdhi_write16_hook;
-	host->clk_enable	= sh_mobile_sdhi_clk_enable;
-	host->clk_update	= sh_mobile_sdhi_clk_update;
-	host->clk_disable	= sh_mobile_sdhi_clk_disable;
-	host->multi_io_quirk	= sh_mobile_sdhi_multi_io_quirk;
+	host->write16_hook	= renesas_sdhi_write16_hook;
+	host->clk_enable	= renesas_sdhi_clk_enable;
+	host->clk_update	= renesas_sdhi_clk_update;
+	host->clk_disable	= renesas_sdhi_clk_disable;
+	host->multi_io_quirk	= renesas_sdhi_multi_io_quirk;
 
 	/* SDR speeds are only available on Gen2+ */
 	if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
 		/* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
-		host->card_busy	= sh_mobile_sdhi_card_busy;
+		host->card_busy	= renesas_sdhi_card_busy;
 		host->start_signal_voltage_switch =
-			sh_mobile_sdhi_start_signal_voltage_switch;
+			renesas_sdhi_start_signal_voltage_switch;
 	}
 
 	/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -643,7 +553,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
 		*mmc_data = *mmd;
 
 	dma_priv->filter = shdma_chan_filter;
-	dma_priv->enable = sh_mobile_sdhi_enable_dma;
+	dma_priv->enable = renesas_sdhi_enable_dma;
 
 	mmc_data->alignment_shift = 1; /* 2-byte alignment */
 	mmc_data->capabilities |= MMC_CAP_MMC_HIGHSPEED;
@@ -659,15 +569,13 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
 	 */
 	mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
 
-	/*
-	 * All SDHI have CMD12 controll bit
-	 */
+	/* All SDHI have CMD12 control bit */
 	mmc_data->flags |= TMIO_MMC_HAVE_CMD12_CTRL;
 
 	/* All SDHI have SDIO status bits which must be 1 */
 	mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
 
-	ret = tmio_mmc_host_probe(host, mmc_data);
+	ret = tmio_mmc_host_probe(host, mmc_data, dma_ops);
 	if (ret < 0)
 		goto efree;
 
@@ -675,7 +583,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
 	if (of_data && of_data->scc_offset &&
 	    (host->mmc->caps & MMC_CAP_UHS_SDR104 ||
 	     host->mmc->caps2 & MMC_CAP2_HS200_1_8V_SDR)) {
-		const struct sh_mobile_sdhi_scc *taps = of_data->taps;
+		const struct renesas_sdhi_scc *taps = of_data->taps;
 		bool hit = false;
 
 		host->mmc->caps |= MMC_CAP_HW_RESET;
@@ -693,11 +601,11 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
 			dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
 
 		priv->scc_ctl = host->ctl + of_data->scc_offset;
-		host->init_tuning = sh_mobile_sdhi_init_tuning;
-		host->prepare_tuning = sh_mobile_sdhi_prepare_tuning;
-		host->select_tuning = sh_mobile_sdhi_select_tuning;
-		host->check_scc_error = sh_mobile_sdhi_check_scc_error;
-		host->hw_reset = sh_mobile_sdhi_hw_reset;
+		host->init_tuning = renesas_sdhi_init_tuning;
+		host->prepare_tuning = renesas_sdhi_prepare_tuning;
+		host->select_tuning = renesas_sdhi_select_tuning;
+		host->check_scc_error = renesas_sdhi_check_scc_error;
+		host->hw_reset = renesas_sdhi_hw_reset;
 	}
 
 	i = 0;
@@ -707,7 +615,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
 			break;
 		i++;
 		ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
-				  dev_name(&pdev->dev), host);
+				       dev_name(&pdev->dev), host);
 		if (ret)
 			goto eirq;
 	}
@@ -732,8 +640,9 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
 eprobe:
 	return ret;
 }
+EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
 
-static int sh_mobile_sdhi_remove(struct platform_device *pdev)
+int renesas_sdhi_remove(struct platform_device *pdev)
 {
 	struct mmc_host *mmc = platform_get_drvdata(pdev);
 	struct tmio_mmc_host *host = mmc_priv(mmc);
@@ -742,28 +651,4 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
 
 	return 0;
 }
-
-static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-			pm_runtime_force_resume)
-	SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
-			tmio_mmc_host_runtime_resume,
-			NULL)
-};
-
-static struct platform_driver sh_mobile_sdhi_driver = {
-	.driver		= {
-		.name	= "sh_mobile_sdhi",
-		.pm	= &tmio_mmc_dev_pm_ops,
-		.of_match_table = sh_mobile_sdhi_of_match,
-	},
-	.probe		= sh_mobile_sdhi_probe,
-	.remove		= sh_mobile_sdhi_remove,
-};
-
-module_platform_driver(sh_mobile_sdhi_driver);
-
-MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
-MODULE_AUTHOR("Magnus Damm");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sh_mobile_sdhi");
+EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
new file mode 100644
index 0000000..642a0dc
--- /dev/null
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -0,0 +1,484 @@
+/*
+ * DMA function for TMIO MMC implementations
+ *
+ * Copyright (C) 2016-17 Renesas Electronics Corporation
+ * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2017 Horms Solutions, Simon Horman
+ * Copyright (C) 2010-2011 Guennadi Liakhovetski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+
+#include "renesas_sdhi.h"
+#include "tmio_mmc.h"
+
+#define TMIO_MMC_MIN_DMA_LEN 8
+
+static const struct renesas_sdhi_of_data of_default_cfg = {
+	.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
+};
+
+static const struct renesas_sdhi_of_data of_rz_compatible = {
+	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT,
+	.tmio_ocr_mask	= MMC_VDD_32_33,
+	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+};
+
+static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = {
+	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
+			  TMIO_MMC_CLK_ACTUAL,
+	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+};
+
+/* Definitions for sampling clocks */
+static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = {
+	{
+		.clk_rate = 156000000,
+		.tap = 0x00000703,
+	},
+	{
+		.clk_rate = 0,
+		.tap = 0x00000300,
+	},
+};
+
+static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
+	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
+			  TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
+	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+			  MMC_CAP_CMD23,
+	.dma_buswidth	= DMA_SLAVE_BUSWIDTH_4_BYTES,
+	.dma_rx_offset	= 0x2000,
+	.scc_offset	= 0x0300,
+	.taps		= rcar_gen2_scc_taps,
+	.taps_num	= ARRAY_SIZE(rcar_gen2_scc_taps),
+};
+
+/* Definitions for sampling clocks */
+static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
+	{
+		.clk_rate = 0,
+		.tap = 0x00000300,
+	},
+};
+
+static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
+	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
+			  TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
+	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+			  MMC_CAP_CMD23,
+	.bus_shift	= 2,
+	.scc_offset	= 0x1000,
+	.taps		= rcar_gen3_scc_taps,
+	.taps_num	= ARRAY_SIZE(rcar_gen3_scc_taps),
+};
+
+static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
+	{ .compatible = "renesas,sdhi-shmobile" },
+	{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
+	{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
+	{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
+	{ .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, },
+	{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
+	{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
+	{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
+	{ .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
+	{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
+	{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
+	{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
+	{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
+	{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+	{},
+};
+MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
+
+static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
+					     bool enable)
+{
+	if (!host->chan_tx || !host->chan_rx)
+		return;
+
+	if (host->dma->enable)
+		host->dma->enable(host, enable);
+}
+
+static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
+{
+	renesas_sdhi_sys_dmac_enable_dma(host, false);
+
+	if (host->chan_rx)
+		dmaengine_terminate_all(host->chan_rx);
+	if (host->chan_tx)
+		dmaengine_terminate_all(host->chan_tx);
+
+	renesas_sdhi_sys_dmac_enable_dma(host, true);
+}
+
+static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
+{
+	struct tmio_mmc_host *host = arg;
+
+	spin_lock_irq(&host->lock);
+
+	if (!host->data)
+		goto out;
+
+	if (host->data->flags & MMC_DATA_READ)
+		dma_unmap_sg(host->chan_rx->device->dev,
+			     host->sg_ptr, host->sg_len,
+			     DMA_FROM_DEVICE);
+	else
+		dma_unmap_sg(host->chan_tx->device->dev,
+			     host->sg_ptr, host->sg_len,
+			     DMA_TO_DEVICE);
+
+	spin_unlock_irq(&host->lock);
+
+	wait_for_completion(&host->dma_dataend);
+
+	spin_lock_irq(&host->lock);
+	tmio_mmc_do_data_irq(host);
+out:
+	spin_unlock_irq(&host->lock);
+}
+
+static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
+{
+	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
+	struct dma_async_tx_descriptor *desc = NULL;
+	struct dma_chan *chan = host->chan_rx;
+	dma_cookie_t cookie;
+	int ret, i;
+	bool aligned = true, multiple = true;
+	unsigned int align = (1 << host->pdata->alignment_shift) - 1;
+
+	for_each_sg(sg, sg_tmp, host->sg_len, i) {
+		if (sg_tmp->offset & align)
+			aligned = false;
+		if (sg_tmp->length & align) {
+			multiple = false;
+			break;
+		}
+	}
+
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
+			  (align & PAGE_MASK))) || !multiple) {
+		ret = -EINVAL;
+		goto pio;
+	}
+
+	if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
+		host->force_pio = true;
+		return;
+	}
+
+	tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
+
+	/* The only sg element can be unaligned, use our bounce buffer then */
+	if (!aligned) {
+		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+		host->sg_ptr = &host->bounce_sg;
+		sg = host->sg_ptr;
+	}
+
+	ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
+	if (ret > 0)
+		desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM,
+					       DMA_CTRL_ACK);
+
+	if (desc) {
+		reinit_completion(&host->dma_dataend);
+		desc->callback = renesas_sdhi_sys_dmac_dma_callback;
+		desc->callback_param = host;
+
+		cookie = dmaengine_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
+		}
+	}
+pio:
+	if (!desc) {
+		/* DMA failed, fall back to PIO */
+		renesas_sdhi_sys_dmac_enable_dma(host, false);
+		if (ret >= 0)
+			ret = -EIO;
+		host->chan_rx = NULL;
+		dma_release_channel(chan);
+		/* Free the Tx channel too */
+		chan = host->chan_tx;
+		if (chan) {
+			host->chan_tx = NULL;
+			dma_release_channel(chan);
+		}
+		dev_warn(&host->pdev->dev,
+			 "DMA failed: %d, falling back to PIO\n", ret);
+	}
+}
+
+static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
+{
+	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
+	struct dma_async_tx_descriptor *desc = NULL;
+	struct dma_chan *chan = host->chan_tx;
+	dma_cookie_t cookie;
+	int ret, i;
+	bool aligned = true, multiple = true;
+	unsigned int align = (1 << host->pdata->alignment_shift) - 1;
+
+	for_each_sg(sg, sg_tmp, host->sg_len, i) {
+		if (sg_tmp->offset & align)
+			aligned = false;
+		if (sg_tmp->length & align) {
+			multiple = false;
+			break;
+		}
+	}
+
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
+			  (align & PAGE_MASK))) || !multiple) {
+		ret = -EINVAL;
+		goto pio;
+	}
+
+	if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
+		host->force_pio = true;
+		return;
+	}
+
+	tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
+
+	/* The only sg element can be unaligned, use our bounce buffer then */
+	if (!aligned) {
+		unsigned long flags;
+		void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
+
+		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+		memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
+		tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
+		host->sg_ptr = &host->bounce_sg;
+		sg = host->sg_ptr;
+	}
+
+	ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
+	if (ret > 0)
+		desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV,
+					       DMA_CTRL_ACK);
+
+	if (desc) {
+		reinit_completion(&host->dma_dataend);
+		desc->callback = renesas_sdhi_sys_dmac_dma_callback;
+		desc->callback_param = host;
+
+		cookie = dmaengine_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
+		}
+	}
+pio:
+	if (!desc) {
+		/* DMA failed, fall back to PIO */
+		renesas_sdhi_sys_dmac_enable_dma(host, false);
+		if (ret >= 0)
+			ret = -EIO;
+		host->chan_tx = NULL;
+		dma_release_channel(chan);
+		/* Free the Rx channel too */
+		chan = host->chan_rx;
+		if (chan) {
+			host->chan_rx = NULL;
+			dma_release_channel(chan);
+		}
+		dev_warn(&host->pdev->dev,
+			 "DMA failed: %d, falling back to PIO\n", ret);
+	}
+}
+
+static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host,
+					    struct mmc_data *data)
+{
+	if (data->flags & MMC_DATA_READ) {
+		if (host->chan_rx)
+			renesas_sdhi_sys_dmac_start_dma_rx(host);
+	} else {
+		if (host->chan_tx)
+			renesas_sdhi_sys_dmac_start_dma_tx(host);
+	}
+}
+
+static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
+{
+	struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
+	struct dma_chan *chan = NULL;
+
+	spin_lock_irq(&host->lock);
+
+	if (host && host->data) {
+		if (host->data->flags & MMC_DATA_READ)
+			chan = host->chan_rx;
+		else
+			chan = host->chan_tx;
+	}
+
+	spin_unlock_irq(&host->lock);
+
+	tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
+
+	if (chan)
+		dma_async_issue_pending(chan);
+}
+
+static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
+					      struct tmio_mmc_data *pdata)
+{
+	/* We can only either use DMA for both Tx and Rx or not use it at all */
+	if (!host->dma || (!host->pdev->dev.of_node &&
+			   (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
+		return;
+
+	if (!host->chan_tx && !host->chan_rx) {
+		struct resource *res = platform_get_resource(host->pdev,
+							     IORESOURCE_MEM, 0);
+		struct dma_slave_config cfg = {};
+		dma_cap_mask_t mask;
+		int ret;
+
+		if (!res)
+			return;
+
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+
+		host->chan_tx = dma_request_slave_channel_compat(mask,
+					host->dma->filter, pdata->chan_priv_tx,
+					&host->pdev->dev, "tx");
+		dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
+			host->chan_tx);
+
+		if (!host->chan_tx)
+			return;
+
+		cfg.direction = DMA_MEM_TO_DEV;
+		cfg.dst_addr = res->start +
+			(CTL_SD_DATA_PORT << host->bus_shift);
+		cfg.dst_addr_width = host->dma->dma_buswidth;
+		if (!cfg.dst_addr_width)
+			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		cfg.src_addr = 0;
+		ret = dmaengine_slave_config(host->chan_tx, &cfg);
+		if (ret < 0)
+			goto ecfgtx;
+
+		host->chan_rx = dma_request_slave_channel_compat(mask,
+					host->dma->filter, pdata->chan_priv_rx,
+					&host->pdev->dev, "rx");
+		dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
+			host->chan_rx);
+
+		if (!host->chan_rx)
+			goto ereqrx;
+
+		cfg.direction = DMA_DEV_TO_MEM;
+		cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
+		cfg.src_addr_width = host->dma->dma_buswidth;
+		if (!cfg.src_addr_width)
+			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		cfg.dst_addr = 0;
+		ret = dmaengine_slave_config(host->chan_rx, &cfg);
+		if (ret < 0)
+			goto ecfgrx;
+
+		host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
+		if (!host->bounce_buf)
+			goto ebouncebuf;
+
+		init_completion(&host->dma_dataend);
+		tasklet_init(&host->dma_issue,
+			     renesas_sdhi_sys_dmac_issue_tasklet_fn,
+			     (unsigned long)host);
+	}
+
+	renesas_sdhi_sys_dmac_enable_dma(host, true);
+
+	return;
+
+ebouncebuf:
+ecfgrx:
+	dma_release_channel(host->chan_rx);
+	host->chan_rx = NULL;
+ereqrx:
+ecfgtx:
+	dma_release_channel(host->chan_tx);
+	host->chan_tx = NULL;
+}
+
+static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host)
+{
+	if (host->chan_tx) {
+		struct dma_chan *chan = host->chan_tx;
+
+		host->chan_tx = NULL;
+		dma_release_channel(chan);
+	}
+	if (host->chan_rx) {
+		struct dma_chan *chan = host->chan_rx;
+
+		host->chan_rx = NULL;
+		dma_release_channel(chan);
+	}
+	if (host->bounce_buf) {
+		free_pages((unsigned long)host->bounce_buf, 0);
+		host->bounce_buf = NULL;
+	}
+}
+
+static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = {
+	.start = renesas_sdhi_sys_dmac_start_dma,
+	.enable = renesas_sdhi_sys_dmac_enable_dma,
+	.request = renesas_sdhi_sys_dmac_request_dma,
+	.release = renesas_sdhi_sys_dmac_release_dma,
+	.abort = renesas_sdhi_sys_dmac_abort_dma,
+};
+
+static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
+{
+	return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops);
+}
+
+static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
+			   tmio_mmc_host_runtime_resume,
+			   NULL)
+};
+
+static struct platform_driver renesas_sys_dmac_sdhi_driver = {
+	.driver		= {
+		.name	= "sh_mobile_sdhi",
+		.pm	= &renesas_sdhi_sys_dmac_dev_pm_ops,
+		.of_match_table = renesas_sdhi_sys_dmac_of_match,
+	},
+	.probe		= renesas_sdhi_sys_dmac_probe,
+	.remove		= renesas_sdhi_remove,
+};
+
+module_platform_driver(renesas_sys_dmac_sdhi_driver);
+
+MODULE_DESCRIPTION("Renesas SDHI driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sh_mobile_sdhi");
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index c6a9a1b..cf66a3d 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -274,7 +274,6 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
 	.caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
 		   MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
 		   MMC_CAP_CMD_DURING_TFR | MMC_CAP_WAIT_WHILE_BUSY,
-	.caps2   = MMC_CAP2_HC_ERASE_SZ,
 	.flags   = SDHCI_ACPI_RUNTIME_PM,
 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
@@ -396,9 +395,6 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
 		if (child->status.present && child->status.enabled)
 			acpi_device_fix_up_power(child);
 
-	if (acpi_bus_get_status(device) || !device->status.present)
-		return -ENODEV;
-
 	if (sdhci_acpi_byt_defer(dev))
 		return -EPROBE_DEFER;
 
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 242c5dc..e2f6383 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -89,9 +89,6 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
 		goto err_clk;
 	}
 
-	/* Enable MMC_CAP2_HC_ERASE_SZ for better max discard calculations */
-	host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
-
 	sdhci_get_of_property(pdev);
 	mmc_of_parse(host->mmc);
 
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 23d8b8a..85140c9 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -95,7 +95,7 @@
 #define ESDHC_CTRL_BUSWIDTH_MASK	(0x3 << 1)
 
 /*
- * There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC:
+ * There is an INT DMA ERR mismatch between eSDHC and STD SDHC SPEC:
  * Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design,
  * but bit28 is used as the INT DMA ERR in fsl eSDHC design.
  * Define this macro DMA error INT for fsl eSDHC
@@ -110,16 +110,11 @@
  * In exact block transfer, the controller doesn't complete the
  * operations automatically as required at the end of the
  * transfer and remains on hold if the abort command is not sent.
- * As a result, the TC flag is not asserted and SW  received timeout
- * exeception. Bit1 of Vendor Spec registor is used to fix it.
+ * As a result, the TC flag is not asserted and SW received timeout
+ * exception. Bit1 of Vendor Spec register is used to fix it.
  */
 #define ESDHC_FLAG_MULTIBLK_NO_INT	BIT(1)
 /*
- * The flag enables the workaround for ESDHC errata ENGcm07207 which
- * affects i.MX25 and i.MX35.
- */
-#define ESDHC_FLAG_ENGCM07207		BIT(2)
-/*
  * The flag tells that the ESDHC controller is an USDHC block that is
  * integrated on the i.MX6 series.
  */
@@ -131,9 +126,11 @@
 /* The IP has SDHCI_CAPABILITIES_1 register */
 #define ESDHC_FLAG_HAVE_CAP1		BIT(6)
 /*
- * The IP has errata ERR004536
+ * The IP has erratum ERR004536
  * uSDHC: ADMA Length Mismatch Error occurs if the AHB read access is slow,
  * when reading data from the card
+ * This flag is also set for i.MX25 and i.MX35 in order to get
+ * SDHCI_QUIRK_BROKEN_ADMA, but for different reasons (ADMA capability bits).
  */
 #define ESDHC_FLAG_ERR004536		BIT(7)
 /* The IP supports HS200 mode */
@@ -141,7 +138,7 @@
 /* The IP supports HS400 mode */
 #define ESDHC_FLAG_HS400		BIT(9)
 
-/* A higher clock ferquency than this rate requires strobell dll control */
+/* A clock frequency higher than this rate requires strobe dll control */
 #define ESDHC_STROBE_DLL_CLK_FREQ	100000000
 
 struct esdhc_soc_data {
@@ -149,11 +146,11 @@ struct esdhc_soc_data {
 };
 
 static struct esdhc_soc_data esdhc_imx25_data = {
-	.flags = ESDHC_FLAG_ENGCM07207,
+	.flags = ESDHC_FLAG_ERR004536,
 };
 
 static struct esdhc_soc_data esdhc_imx35_data = {
-	.flags = ESDHC_FLAG_ENGCM07207,
+	.flags = ESDHC_FLAG_ERR004536,
 };
 
 static struct esdhc_soc_data esdhc_imx51_data = {
@@ -197,7 +194,7 @@ struct pltfm_imx_data {
 	struct clk *clk_ahb;
 	struct clk *clk_per;
 	enum {
-		NO_CMD_PENDING,      /* no multiblock command pending*/
+		NO_CMD_PENDING,      /* no multiblock command pending */
 		MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
 		WAIT_FOR_INT,        /* sent CMD12, waiting for response INT */
 	} multiblock_status;
@@ -286,7 +283,7 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
 		 * ADMA2 capability of esdhc, but this bit is messed up on
 		 * some SOCs (e.g. on MX25, MX35 this bit is set, but they
 		 * don't actually support ADMA2). So set the BROKEN_ADMA
-		 * uirk on MX25/35 platforms.
+		 * quirk on MX25/35 platforms.
 		 */
 
 		if (val & SDHCI_CAN_DO_ADMA1) {
@@ -351,7 +348,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
 		if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
 			/*
 			 * Clear and then set D3CD bit to avoid missing the
-			 * card interrupt.  This is a eSDHC controller problem
+			 * card interrupt. This is an eSDHC controller problem
 			 * so we need to apply the following workaround: clear
 			 * and set D3CD bit will make eSDHC re-sample the card
 			 * interrupt. In case a card interrupt was lost,
@@ -579,7 +576,7 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
-	u32 new_val;
+	u32 new_val = 0;
 	u32 mask;
 
 	switch (reg) {
@@ -604,35 +601,52 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
 		 * Do not touch buswidth bits here. This is done in
 		 * esdhc_pltfm_bus_width.
 		 * Do not touch the D3CD bit either which is used for the
-		 * SDIO interrupt errata workaround.
+		 * SDIO interrupt erratum workaround.
 		 */
 		mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD);
 
 		esdhc_clrset_le(host, mask, new_val, reg);
 		return;
+	case SDHCI_SOFTWARE_RESET:
+		if (val & SDHCI_RESET_DATA)
+			new_val = readl(host->ioaddr + SDHCI_HOST_CONTROL);
+		break;
 	}
 	esdhc_clrset_le(host, 0xff, val, reg);
 
-	/*
-	 * The esdhc has a design violation to SDHC spec which tells
-	 * that software reset should not affect card detection circuit.
-	 * But esdhc clears its SYSCTL register bits [0..2] during the
-	 * software reset.  This will stop those clocks that card detection
-	 * circuit relies on.  To work around it, we turn the clocks on back
-	 * to keep card detection circuit functional.
-	 */
-	if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) {
-		esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
-		/*
-		 * The reset on usdhc fails to clear MIX_CTRL register.
-		 * Do it manually here.
-		 */
-		if (esdhc_is_usdhc(imx_data)) {
-			/* the tuning bits should be kept during reset */
-			new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
-			writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK,
-					host->ioaddr + ESDHC_MIX_CTRL);
-			imx_data->is_ddr = 0;
+	if (reg == SDHCI_SOFTWARE_RESET) {
+		if (val & SDHCI_RESET_ALL) {
+			/*
+			 * The esdhc has a design violation to SDHC spec which
+			 * tells that software reset should not affect card
+			 * detection circuit. But esdhc clears its SYSCTL
+			 * register bits [0..2] during the software reset. This
+			 * will stop those clocks that card detection circuit
+			 * relies on. To work around it, we turn the clocks on
+			 * back to keep card detection circuit functional.
+			 */
+			esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
+			/*
+			 * The reset on usdhc fails to clear MIX_CTRL register.
+			 * Do it manually here.
+			 */
+			if (esdhc_is_usdhc(imx_data)) {
+				/*
+				 * the tuning bits should be kept during reset
+				 */
+				new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
+				writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK,
+						host->ioaddr + ESDHC_MIX_CTRL);
+				imx_data->is_ddr = 0;
+			}
+		} else if (val & SDHCI_RESET_DATA) {
+			/*
+			 * The eSDHC DAT line software reset clears at least the
+			 * data transfer width on i.MX25, so make sure that the
+			 * Host Control register is unaffected.
+			 */
+			esdhc_clrset_le(host, 0xff, new_val,
+					SDHCI_HOST_CONTROL);
 		}
 	}
 }
@@ -657,7 +671,8 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
 	unsigned int host_clock = pltfm_host->clock;
-	int pre_div = 2;
+	int ddr_pre_div = imx_data->is_ddr ? 2 : 1;
+	int pre_div = 1;
 	int div = 1;
 	u32 temp, val;
 
@@ -672,28 +687,23 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
 		return;
 	}
 
-	if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
-		pre_div = 1;
-
 	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
 	temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
 		| ESDHC_CLOCK_MASK);
 	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
 
-	while (host_clock / pre_div / 16 > clock && pre_div < 256)
+	while (host_clock / (16 * pre_div * ddr_pre_div) > clock &&
+			pre_div < 256)
 		pre_div *= 2;
 
-	while (host_clock / pre_div / div > clock && div < 16)
+	while (host_clock / (div * pre_div * ddr_pre_div) > clock && div < 16)
 		div++;
 
-	host->mmc->actual_clock = host_clock / pre_div / div;
+	host->mmc->actual_clock = host_clock / (div * pre_div * ddr_pre_div);
 	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
 		clock, host->mmc->actual_clock);
 
-	if (imx_data->is_ddr)
-		pre_div >>= 2;
-	else
-		pre_div >>= 1;
+	pre_div >>= 1;
 	div--;
 
 	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
@@ -763,7 +773,7 @@ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
 	writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
 	writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
 	dev_dbg(mmc_dev(host->mmc),
-		"tunning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
+		"tuning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
 			val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
 }
 
@@ -807,7 +817,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
 	ret = mmc_send_tuning(host->mmc, opcode, NULL);
 	esdhc_post_tuning(host);
 
-	dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
+	dev_dbg(mmc_dev(host->mmc), "tuning %s at 0x%x ret %d\n",
 		ret ? "failed" : "passed", avg, ret);
 
 	return ret;
@@ -847,15 +857,15 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
 }
 
 /*
- * For HS400 eMMC, there is a data_strobe line, this signal is generated
+ * For HS400 eMMC, there is a data_strobe line. This signal is generated
  * by the device and used for data output and CRC status response output
  * in HS400 mode. The frequency of this signal follows the frequency of
- * CLK generated by host. Host receive the data which is aligned to the
+ * CLK generated by host. The host receives the data which is aligned to the
  * edge of data_strobe line. Due to the time delay between CLK line and
  * data_strobe line, if the delay time is larger than one clock cycle,
- * then CLK and data_strobe line will misaligned, read error shows up.
+ * then CLK and data_strobe line will be misaligned, read error shows up.
  * So when the CLK is higher than 100MHz, each clock cycle is short enough,
- * host should config the delay target.
+ * host should configure the delay target.
  */
 static void esdhc_set_strobe_dll(struct sdhci_host *host)
 {
@@ -895,7 +905,7 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
 	struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
 	u32 ctrl;
 
-	/* Rest the tuning circurt */
+	/* Reset the tuning circuit */
 	if (esdhc_is_usdhc(imx_data)) {
 		if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
 			ctrl = readl(host->ioaddr + ESDHC_MIX_CTRL);
@@ -976,7 +986,7 @@ static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
 
-	/* Doc Errata: the uSDHC actual maximum timeout count is 1 << 29 */
+	/* Doc Erratum: the uSDHC actual maximum timeout count is 1 << 29 */
 	return esdhc_is_usdhc(imx_data) ? 1 << 29 : 1 << 27;
 }
 
@@ -1032,10 +1042,10 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
 
 		/*
 		 * ROM code will change the bit burst_length_enable setting
-		 * to zero if this usdhc is choosed to boot system. Change
+		 * to zero if this usdhc is chosen to boot system. Change
 		 * it back here, otherwise it will impact the performance a
 		 * lot. This bit is used to enable/disable the burst length
-		 * for the external AHB2AXI bridge, it's usefully especially
+		 * for the external AHB2AXI bridge. It's useful especially
 		 * for INCR transfer because without burst length indicator,
 		 * the AHB2AXI bridge does not know the burst length in
 		 * advance. And without burst length indicator, AHB INCR
@@ -1045,7 +1055,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
 			| ESDHC_BURST_LEN_EN_INCR,
 			host->ioaddr + SDHCI_HOST_CONTROL);
 		/*
-		* errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+		* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
 		* TO1.1, it's harmless for MX6SL
 		*/
 		writel(readl(host->ioaddr + 0x6c) | BIT(7),
@@ -1104,7 +1114,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
 
 	mmc_of_parse_voltage(np, &host->ocr_mask);
 
-	/* sdr50 and sdr104 needs work on 1.8v signal voltage */
+	/* sdr50 and sdr104 need work on 1.8v signal voltage */
 	if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
 	    !IS_ERR(imx_data->pins_default)) {
 		imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
@@ -1116,7 +1126,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
 			dev_warn(mmc_dev(host->mmc),
 				"could not get ultra high speed state, work on normal mode\n");
 			/*
-			 * fall back to not support uhs by specify no 1.8v quirk
+			 * fall back to not supporting uhs by specifying no
+			 * 1.8v quirk
 			 */
 			host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
 		}
@@ -1250,14 +1261,20 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
 
 	pltfm_host->clk = imx_data->clk_per;
 	pltfm_host->clock = clk_get_rate(pltfm_host->clk);
-	clk_prepare_enable(imx_data->clk_per);
-	clk_prepare_enable(imx_data->clk_ipg);
-	clk_prepare_enable(imx_data->clk_ahb);
+	err = clk_prepare_enable(imx_data->clk_per);
+	if (err)
+		goto free_sdhci;
+	err = clk_prepare_enable(imx_data->clk_ipg);
+	if (err)
+		goto disable_per_clk;
+	err = clk_prepare_enable(imx_data->clk_ahb);
+	if (err)
+		goto disable_ipg_clk;
 
 	imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
 	if (IS_ERR(imx_data->pinctrl)) {
 		err = PTR_ERR(imx_data->pinctrl);
-		goto disable_clk;
+		goto disable_ahb_clk;
 	}
 
 	imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl,
@@ -1265,11 +1282,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
 	if (IS_ERR(imx_data->pins_default))
 		dev_warn(mmc_dev(host->mmc), "could not get default state\n");
 
-	if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207)
-		/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
-		host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK
-			| SDHCI_QUIRK_BROKEN_ADMA;
-
 	if (esdhc_is_usdhc(imx_data)) {
 		host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
 		host->mmc->caps |= MMC_CAP_1_8V_DDR;
@@ -1297,13 +1309,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
 	else
 		err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
 	if (err)
-		goto disable_clk;
+		goto disable_ahb_clk;
 
 	sdhci_esdhc_imx_hwinit(host);
 
 	err = sdhci_add_host(host);
 	if (err)
-		goto disable_clk;
+		goto disable_ahb_clk;
 
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
@@ -1313,10 +1325,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
 
 	return 0;
 
-disable_clk:
-	clk_disable_unprepare(imx_data->clk_per);
-	clk_disable_unprepare(imx_data->clk_ipg);
+disable_ahb_clk:
 	clk_disable_unprepare(imx_data->clk_ahb);
+disable_ipg_clk:
+	clk_disable_unprepare(imx_data->clk_ipg);
+disable_per_clk:
+	clk_disable_unprepare(imx_data->clk_per);
 free_sdhci:
 	sdhci_pltfm_free(pdev);
 	return err;
@@ -1393,14 +1407,34 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
 	struct sdhci_host *host = dev_get_drvdata(dev);
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+	int err;
 
 	if (!sdhci_sdio_irq_enabled(host)) {
-		clk_prepare_enable(imx_data->clk_per);
-		clk_prepare_enable(imx_data->clk_ipg);
+		err = clk_prepare_enable(imx_data->clk_per);
+		if (err)
+			return err;
+		err = clk_prepare_enable(imx_data->clk_ipg);
+		if (err)
+			goto disable_per_clk;
 	}
-	clk_prepare_enable(imx_data->clk_ahb);
+	err = clk_prepare_enable(imx_data->clk_ahb);
+	if (err)
+		goto disable_ipg_clk;
+	err = sdhci_runtime_resume_host(host);
+	if (err)
+		goto disable_ahb_clk;
 
-	return sdhci_runtime_resume_host(host);
+	return 0;
+
+disable_ahb_clk:
+	clk_disable_unprepare(imx_data->clk_ahb);
+disable_ipg_clk:
+	if (!sdhci_sdio_irq_enabled(host))
+		clk_disable_unprepare(imx_data->clk_ipg);
+disable_per_clk:
+	if (!sdhci_sdio_irq_enabled(host))
+		clk_disable_unprepare(imx_data->clk_per);
+	return err;
 }
 #endif
 
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index c4bbd74..e7893f2 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -19,6 +19,7 @@
  */
 
 #define ESDHC_DEFAULT_QUIRKS	(SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
+				SDHCI_QUIRK_32BIT_DMA_ADDR | \
 				SDHCI_QUIRK_NO_BUSY_IRQ | \
 				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
 				SDHCI_QUIRK_PIO_NEEDS_DELAY | \
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index ea6b36c..b13c0a7 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -638,7 +638,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
 
 	ret = mmc_of_parse(host->mmc);
 	if (ret) {
-		dev_err(&pdev->dev, "parsing dt failed (%u)\n", ret);
+		dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
 		goto unreg_clk;
 	}
 
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 92fc3f7..e1721ac 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -347,8 +347,7 @@ static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
 {
 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
-	slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC |
-				  MMC_CAP2_HC_ERASE_SZ;
+	slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
 	return 0;
 }
 
@@ -404,10 +403,9 @@ struct intel_host {
 	bool	d3_retune;
 };
 
-const u8 intel_dsm_uuid[] = {
-	0xA5, 0x3E, 0xC1, 0xF6, 0xCD, 0x65, 0x1F, 0x46,
-	0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61,
-};
+static const guid_t intel_dsm_guid =
+	GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
+		  0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
 
 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
 		       unsigned int fn, u32 *result)
@@ -416,7 +414,7 @@ static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
 	int err = 0;
 	size_t len;
 
-	obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), intel_dsm_uuid, 0, fn, NULL);
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
 	if (!obj)
 		return -EOPNOTSUPP;
 
@@ -543,6 +541,23 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
 	}
 }
 
+#define INTEL_HS400_ES_REG 0x78
+#define INTEL_HS400_ES_BIT BIT(0)
+
+static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
+					struct mmc_ios *ios)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	u32 val;
+
+	val = sdhci_readl(host, INTEL_HS400_ES_REG);
+	if (ios->enhanced_strobe)
+		val |= INTEL_HS400_ES_BIT;
+	else
+		val &= ~INTEL_HS400_ES_BIT;
+	sdhci_writel(host, val, INTEL_HS400_ES_REG);
+}
+
 static const struct sdhci_ops sdhci_intel_byt_ops = {
 	.set_clock		= sdhci_set_clock,
 	.set_power		= sdhci_intel_set_power,
@@ -570,7 +585,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
 				 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
 				 MMC_CAP_CMD_DURING_TFR |
 				 MMC_CAP_WAIT_WHILE_BUSY;
-	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
 	slot->hw_reset = sdhci_pci_int_hw_reset;
 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
 		slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
@@ -579,6 +593,19 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
 	return 0;
 }
 
+static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+	int ret = byt_emmc_probe_slot(slot);
+
+	if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
+		slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
+		slot->host->mmc_host_ops.hs400_enhanced_strobe =
+						intel_hs400_enhanced_strobe;
+	}
+
+	return ret;
+}
+
 #ifdef CONFIG_ACPI
 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
 {
@@ -631,7 +658,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
 {
 	byt_read_dsm(slot);
 	slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
-				 MMC_CAP_AGGRESSIVE_PM;
+				 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
 	slot->cd_idx = 0;
 	slot->cd_override_level = true;
 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
@@ -654,6 +681,17 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
 	.priv_size	= sizeof(struct intel_host),
 };
 
+static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
+	.allow_runtime_pm	= true,
+	.probe_slot		= glk_emmc_probe_slot,
+	.quirks			= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+	.quirks2		= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+				  SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
+				  SDHCI_QUIRK2_STOP_WITH_TC,
+	.ops			= &sdhci_intel_byt_ops,
+	.priv_size		= sizeof(struct intel_host),
+};
+
 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON |
@@ -1171,554 +1209,79 @@ static const struct sdhci_pci_fixes sdhci_amd = {
 };
 
 static const struct pci_device_id pci_ids[] = {
-	{
-		.vendor		= PCI_VENDOR_ID_RICOH,
-		.device		= PCI_DEVICE_ID_RICOH_R5C822,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_ricoh,
-	},
-
-	{
-		.vendor         = PCI_VENDOR_ID_RICOH,
-		.device         = 0x843,
-		.subvendor      = PCI_ANY_ID,
-		.subdevice      = PCI_ANY_ID,
-		.driver_data    = (kernel_ulong_t)&sdhci_ricoh_mmc,
-	},
-
-	{
-		.vendor         = PCI_VENDOR_ID_RICOH,
-		.device         = 0xe822,
-		.subvendor      = PCI_ANY_ID,
-		.subdevice      = PCI_ANY_ID,
-		.driver_data    = (kernel_ulong_t)&sdhci_ricoh_mmc,
-	},
-
-	{
-		.vendor         = PCI_VENDOR_ID_RICOH,
-		.device         = 0xe823,
-		.subvendor      = PCI_ANY_ID,
-		.subdevice      = PCI_ANY_ID,
-		.driver_data    = (kernel_ulong_t)&sdhci_ricoh_mmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_ENE,
-		.device		= PCI_DEVICE_ID_ENE_CB712_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_ene_712,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_ENE,
-		.device		= PCI_DEVICE_ID_ENE_CB712_SD_2,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_ene_712,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_ENE,
-		.device		= PCI_DEVICE_ID_ENE_CB714_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_ene_714,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_ENE,
-		.device		= PCI_DEVICE_ID_ENE_CB714_SD_2,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_ene_714,
-	},
-
-	{
-		.vendor         = PCI_VENDOR_ID_MARVELL,
-		.device         = PCI_DEVICE_ID_MARVELL_88ALP01_SD,
-		.subvendor      = PCI_ANY_ID,
-		.subdevice      = PCI_ANY_ID,
-		.driver_data    = (kernel_ulong_t)&sdhci_cafe,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_JMICRON,
-		.device		= PCI_DEVICE_ID_JMICRON_JMB38X_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_JMICRON,
-		.device		= PCI_DEVICE_ID_JMICRON_JMB38X_MMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_JMICRON,
-		.device		= PCI_DEVICE_ID_JMICRON_JMB388_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_JMICRON,
-		.device		= PCI_DEVICE_ID_JMICRON_JMB388_ESD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_SYSKONNECT,
-		.device		= 0x8000,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_syskt,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_VIA,
-		.device		= 0x95d0,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_via,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_REALTEK,
-		.device		= 0x5250,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_rtsx,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_QRK_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_qrk,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_MRST_SD0,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrst_hc0,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_MRST_SD1,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_MRST_SD2,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_MFD_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sd,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_MFD_SDIO1,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_MFD_SDIO2,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_MFD_EMMC0,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_MFD_EMMC1,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_PCH_SDIO0,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_pch_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_PCH_SDIO1,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_pch_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_EMMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_SDIO,
-		.subvendor	= PCI_VENDOR_ID_NI,
-		.subdevice	= 0x7884,
-		.driver_data	= (kernel_ulong_t)&sdhci_ni_byt_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_SDIO,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BYT_EMMC2,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BSW_EMMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BSW_SDIO,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BSW_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO0,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sd,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO1,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO2,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_CLV_EMMC0,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_CLV_EMMC1,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_MRFLD_MMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrfld_mmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_SPT_EMMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_SPT_SDIO,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_SPT_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_DNV_EMMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BXT_EMMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BXT_SDIO,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BXT_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BXTM_EMMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BXTM_SDIO,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_BXTM_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_APL_EMMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_APL_SDIO,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_APL_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_GLK_EMMC,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_GLK_SDIO,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_INTEL,
-		.device		= PCI_DEVICE_ID_INTEL_GLK_SD,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_8120,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_8220,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_8221,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_8320,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_8321,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_FUJIN2,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_SDS0,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_SDS1,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_SEABIRD0,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-
-	{
-		.vendor		= PCI_VENDOR_ID_O2,
-		.device		= PCI_DEVICE_ID_O2_SEABIRD1,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_o2,
-	},
-	{
-		.vendor		= PCI_VENDOR_ID_AMD,
-		.device		= PCI_ANY_ID,
-		.class		= PCI_CLASS_SYSTEM_SDHCI << 8,
-		.class_mask	= 0xFFFF00,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.driver_data	= (kernel_ulong_t)&sdhci_amd,
-	},
-	{	/* Generic SD host controller */
-		PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
-	},
-
+	SDHCI_PCI_DEVICE(RICOH, R5C822,  ricoh),
+	SDHCI_PCI_DEVICE(RICOH, R5C843,  ricoh_mmc),
+	SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
+	SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
+	SDHCI_PCI_DEVICE(ENE, CB712_SD,   ene_712),
+	SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
+	SDHCI_PCI_DEVICE(ENE, CB714_SD,   ene_714),
+	SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
+	SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
+	SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD,  jmicron),
+	SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
+	SDHCI_PCI_DEVICE(JMICRON, JMB388_SD,  jmicron),
+	SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
+	SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
+	SDHCI_PCI_DEVICE(VIA, 95D0, via),
+	SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
+	SDHCI_PCI_DEVICE(INTEL, QRK_SD,    intel_qrk),
+	SDHCI_PCI_DEVICE(INTEL, MRST_SD0,  intel_mrst_hc0),
+	SDHCI_PCI_DEVICE(INTEL, MRST_SD1,  intel_mrst_hc1_hc2),
+	SDHCI_PCI_DEVICE(INTEL, MRST_SD2,  intel_mrst_hc1_hc2),
+	SDHCI_PCI_DEVICE(INTEL, MFD_SD,    intel_mfd_sd),
+	SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
+	SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
+	SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
+	SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
+	SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
+	SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
+	SDHCI_PCI_DEVICE(INTEL, BYT_EMMC,  intel_byt_emmc),
+	SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
+	SDHCI_PCI_DEVICE(INTEL, BYT_SDIO,  intel_byt_sdio),
+	SDHCI_PCI_DEVICE(INTEL, BYT_SD,    intel_byt_sd),
+	SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
+	SDHCI_PCI_DEVICE(INTEL, BSW_EMMC,  intel_byt_emmc),
+	SDHCI_PCI_DEVICE(INTEL, BSW_SDIO,  intel_byt_sdio),
+	SDHCI_PCI_DEVICE(INTEL, BSW_SD,    intel_byt_sd),
+	SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
+	SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
+	SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
+	SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
+	SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
+	SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
+	SDHCI_PCI_DEVICE(INTEL, SPT_EMMC,  intel_byt_emmc),
+	SDHCI_PCI_DEVICE(INTEL, SPT_SDIO,  intel_byt_sdio),
+	SDHCI_PCI_DEVICE(INTEL, SPT_SD,    intel_byt_sd),
+	SDHCI_PCI_DEVICE(INTEL, DNV_EMMC,  intel_byt_emmc),
+	SDHCI_PCI_DEVICE(INTEL, BXT_EMMC,  intel_byt_emmc),
+	SDHCI_PCI_DEVICE(INTEL, BXT_SDIO,  intel_byt_sdio),
+	SDHCI_PCI_DEVICE(INTEL, BXT_SD,    intel_byt_sd),
+	SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
+	SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
+	SDHCI_PCI_DEVICE(INTEL, BXTM_SD,   intel_byt_sd),
+	SDHCI_PCI_DEVICE(INTEL, APL_EMMC,  intel_byt_emmc),
+	SDHCI_PCI_DEVICE(INTEL, APL_SDIO,  intel_byt_sdio),
+	SDHCI_PCI_DEVICE(INTEL, APL_SD,    intel_byt_sd),
+	SDHCI_PCI_DEVICE(INTEL, GLK_EMMC,  intel_glk_emmc),
+	SDHCI_PCI_DEVICE(INTEL, GLK_SDIO,  intel_byt_sdio),
+	SDHCI_PCI_DEVICE(INTEL, GLK_SD,    intel_byt_sd),
+	SDHCI_PCI_DEVICE(INTEL, CNP_EMMC,  intel_glk_emmc),
+	SDHCI_PCI_DEVICE(INTEL, CNP_SD,    intel_byt_sd),
+	SDHCI_PCI_DEVICE(INTEL, CNPH_SD,   intel_byt_sd),
+	SDHCI_PCI_DEVICE(O2, 8120,     o2),
+	SDHCI_PCI_DEVICE(O2, 8220,     o2),
+	SDHCI_PCI_DEVICE(O2, 8221,     o2),
+	SDHCI_PCI_DEVICE(O2, 8320,     o2),
+	SDHCI_PCI_DEVICE(O2, 8321,     o2),
+	SDHCI_PCI_DEVICE(O2, FUJIN2,   o2),
+	SDHCI_PCI_DEVICE(O2, SDS0,     o2),
+	SDHCI_PCI_DEVICE(O2, SDS1,     o2),
+	SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
+	SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
+	SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
+	/* Generic SD host controller */
+	{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
 	{ /* end: all zeroes */ },
 };
 
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 37766d20..75196a2 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -2,7 +2,7 @@
 #define __SDHCI_PCI_H
 
 /*
- * PCI device IDs
+ * PCI device IDs, sub IDs
  */
 
 #define PCI_DEVICE_ID_INTEL_PCH_SDIO0	0x8809
@@ -37,6 +37,50 @@
 #define PCI_DEVICE_ID_INTEL_GLK_SD	0x31ca
 #define PCI_DEVICE_ID_INTEL_GLK_EMMC	0x31cc
 #define PCI_DEVICE_ID_INTEL_GLK_SDIO	0x31d0
+#define PCI_DEVICE_ID_INTEL_CNP_EMMC	0x9dc4
+#define PCI_DEVICE_ID_INTEL_CNP_SD	0x9df5
+#define PCI_DEVICE_ID_INTEL_CNPH_SD	0xa375
+
+#define PCI_DEVICE_ID_SYSKONNECT_8000	0x8000
+#define PCI_DEVICE_ID_VIA_95D0		0x95d0
+#define PCI_DEVICE_ID_REALTEK_5250	0x5250
+
+#define PCI_SUBDEVICE_ID_NI_7884	0x7884
+
+/*
+ * PCI device class and mask
+ */
+
+#define SYSTEM_SDHCI			(PCI_CLASS_SYSTEM_SDHCI << 8)
+#define PCI_CLASS_MASK			0xFFFF00
+
+/*
+ * Macros for PCI device-description
+ */
+
+#define _PCI_VEND(vend) PCI_VENDOR_ID_##vend
+#define _PCI_DEV(vend, dev) PCI_DEVICE_ID_##vend##_##dev
+#define _PCI_SUBDEV(subvend, subdev) PCI_SUBDEVICE_ID_##subvend##_##subdev
+
+#define SDHCI_PCI_DEVICE(vend, dev, cfg) { \
+	.vendor = _PCI_VEND(vend), .device = _PCI_DEV(vend, dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
+	.driver_data = (kernel_ulong_t)&(sdhci_##cfg) \
+}
+
+#define SDHCI_PCI_SUBDEVICE(vend, dev, subvend, subdev, cfg) { \
+	.vendor = _PCI_VEND(vend), .device = _PCI_DEV(vend, dev), \
+	.subvendor = _PCI_VEND(subvend), \
+	.subdevice = _PCI_SUBDEV(subvend, subdev), \
+	.driver_data = (kernel_ulong_t)&(sdhci_##cfg) \
+}
+
+#define SDHCI_PCI_DEVICE_CLASS(vend, cl, cl_msk, cfg) { \
+	.vendor = _PCI_VEND(vend), .device = PCI_ANY_ID, \
+	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
+	.class = (cl), .class_mask = (cl_msk), \
+	.driver_data = (kernel_ulong_t)&(sdhci_##cfg) \
+}
 
 /*
  * PCI registers
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index 5ff26ab..70cb00a 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -256,9 +256,6 @@ static int sdricoh_blockio(struct sdricoh_host *host, int read,
 		}
 	}
 
-	if (len)
-		return -EIO;
-
 	return 0;
 }
 
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e897e7f..64b7e9f 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -1,16 +1,16 @@
 /*
- * linux/drivers/mmc/host/tmio_mmc.c
+ * Driver for the MMC / SD / SDIO cell found in:
  *
+ * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
+ *
+ * Copyright (C) 2017 Renesas Electronics Corporation
+ * Copyright (C) 2017 Horms Solutions, Simon Horman
  * Copyright (C) 2007 Ian Molton
  * Copyright (C) 2004 Ian Molton
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * Driver for the MMC / SD / SDIO cell found in:
- *
- * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
  */
 
 #include <linux/device.h>
@@ -99,13 +99,13 @@ static int tmio_mmc_probe(struct platform_device *pdev)
 	/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
 	host->bus_shift = resource_size(res) >> 10;
 
-	ret = tmio_mmc_host_probe(host, pdata);
+	ret = tmio_mmc_host_probe(host, pdata, NULL);
 	if (ret)
 		goto host_free;
 
 	ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq,
-				IRQF_TRIGGER_FALLING,
-				dev_name(&pdev->dev), host);
+			       IRQF_TRIGGER_FALLING,
+			       dev_name(&pdev->dev), host);
 	if (ret)
 		goto host_remove;
 
@@ -132,6 +132,7 @@ static int tmio_mmc_remove(struct platform_device *pdev)
 
 	if (mmc) {
 		struct tmio_mmc_host *host = mmc_priv(mmc);
+
 		tmio_mmc_host_remove(host);
 		if (cell->disable)
 			cell->disable(pdev);
@@ -145,8 +146,7 @@ static int tmio_mmc_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume)
 	SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
-			tmio_mmc_host_runtime_resume,
-			NULL)
+			   tmio_mmc_host_runtime_resume, NULL)
 };
 
 static struct platform_driver tmio_mmc_driver = {
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index d0edb57..6ad6704 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -1,8 +1,11 @@
 /*
- * linux/drivers/mmc/host/tmio_mmc.h
+ * Driver for the MMC / SD / SDIO cell found in:
  *
- * Copyright (C) 2016 Sang Engineering, Wolfram Sang
- * Copyright (C) 2015-16 Renesas Electronics Corporation
+ * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
+ *
+ * Copyright (C) 2015-17 Renesas Electronics Corporation
+ * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2016-17 Horms Solutions, Simon Horman
  * Copyright (C) 2007 Ian Molton
  * Copyright (C) 2004 Ian Molton
  *
@@ -10,9 +13,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  *
- * Driver for the MMC / SD / SDIO cell found in:
- *
- * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
  */
 
 #ifndef TMIO_MMC_H
@@ -115,6 +115,15 @@ struct tmio_mmc_dma {
 	void (*enable)(struct tmio_mmc_host *host, bool enable);
 };
 
+struct tmio_mmc_dma_ops {
+	void (*start)(struct tmio_mmc_host *host, struct mmc_data *data);
+	void (*enable)(struct tmio_mmc_host *host, bool enable);
+	void (*request)(struct tmio_mmc_host *host,
+			struct tmio_mmc_data *pdata);
+	void (*release)(struct tmio_mmc_host *host);
+	void (*abort)(struct tmio_mmc_host *host);
+};
+
 struct tmio_mmc_host {
 	void __iomem *ctl;
 	struct mmc_command      *cmd;
@@ -189,12 +198,15 @@ struct tmio_mmc_host {
 	/* Tuning values: 1 for success, 0 for failure */
 	DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long));
 	unsigned int tap_num;
+
+	const struct tmio_mmc_dma_ops *dma_ops;
 };
 
 struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
 void tmio_mmc_host_free(struct tmio_mmc_host *host);
 int tmio_mmc_host_probe(struct tmio_mmc_host *host,
-			struct tmio_mmc_data *pdata);
+			struct tmio_mmc_data *pdata,
+			const struct tmio_mmc_dma_ops *dma_ops);
 void tmio_mmc_host_remove(struct tmio_mmc_host *host);
 void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
 
@@ -216,38 +228,6 @@ static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
 	local_irq_restore(*flags);
 }
 
-#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
-void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
-void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
-void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
-void tmio_mmc_release_dma(struct tmio_mmc_host *host);
-void tmio_mmc_abort_dma(struct tmio_mmc_host *host);
-#else
-static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
-			       struct mmc_data *data)
-{
-}
-
-static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
-{
-}
-
-static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
-				 struct tmio_mmc_data *pdata)
-{
-	host->chan_tx = NULL;
-	host->chan_rx = NULL;
-}
-
-static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
-{
-}
-
-static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
-{
-}
-#endif
-
 #ifdef CONFIG_PM
 int tmio_mmc_host_runtime_suspend(struct device *dev);
 int tmio_mmc_host_runtime_resume(struct device *dev);
@@ -259,24 +239,26 @@ static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
 }
 
 static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
-		u16 *buf, int count)
+				      u16 *buf, int count)
 {
 	readsw(host->ctl + (addr << host->bus_shift), buf, count);
 }
 
-static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, int addr)
+static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
+					      int addr)
 {
 	return readw(host->ctl + (addr << host->bus_shift)) |
 	       readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
 }
 
 static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
-		u32 *buf, int count)
+				      u32 *buf, int count)
 {
 	readsl(host->ctl + (addr << host->bus_shift), buf, count);
 }
 
-static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
+				   u16 val)
 {
 	/* If there is a hook and it returns non-zero then there
 	 * is an error and the write should be skipped
@@ -287,19 +269,20 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val
 }
 
 static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
-		u16 *buf, int count)
+				       u16 *buf, int count)
 {
 	writesw(host->ctl + (addr << host->bus_shift), buf, count);
 }
 
-static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int addr, u32 val)
+static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
+						int addr, u32 val)
 {
 	writew(val & 0xffff, host->ctl + (addr << host->bus_shift));
 	writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
 }
 
 static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
-		const u32 *buf, int count)
+				       const u32 *buf, int count)
 {
 	writesl(host->ctl + (addr << host->bus_shift), buf, count);
 }
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_core.c
similarity index 89%
rename from drivers/mmc/host/tmio_mmc_pio.c
rename to drivers/mmc/host/tmio_mmc_core.c
index a2d92f1..82b80d4 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1,8 +1,11 @@
 /*
- * linux/drivers/mmc/host/tmio_mmc_pio.c
+ * Driver for the MMC / SD / SDIO IP found in:
  *
- * Copyright (C) 2016 Sang Engineering, Wolfram Sang
- * Copyright (C) 2015-16 Renesas Electronics Corporation
+ * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
+ *
+ * Copyright (C) 2015-17 Renesas Electronics Corporation
+ * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2017 Horms Solutions, Simon Horman
  * Copyright (C) 2011 Guennadi Liakhovetski
  * Copyright (C) 2007 Ian Molton
  * Copyright (C) 2004 Ian Molton
@@ -11,10 +14,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  *
- * Driver for the MMC / SD / SDIO IP found in:
- *
- * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
- *
  * This driver draws mainly on scattered spec sheets, Reverse engineering
  * of the toshiba e800  SD driver and some parts of the 2.4 ASIC3 driver (4 bit
  * support). (Further 4 bit support from a later datasheet).
@@ -52,17 +51,55 @@
 
 #include "tmio_mmc.h"
 
+static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
+				      struct mmc_data *data)
+{
+	if (host->dma_ops)
+		host->dma_ops->start(host, data);
+}
+
+static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+	if (host->dma_ops)
+		host->dma_ops->enable(host, enable);
+}
+
+static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
+					struct tmio_mmc_data *pdata)
+{
+	if (host->dma_ops) {
+		host->dma_ops->request(host, pdata);
+	} else {
+		host->chan_tx = NULL;
+		host->chan_rx = NULL;
+	}
+}
+
+static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+	if (host->dma_ops)
+		host->dma_ops->release(host);
+}
+
+static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
+{
+	if (host->dma_ops)
+		host->dma_ops->abort(host);
+}
+
 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
 {
 	host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
 	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
 }
+EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs);
 
 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
 {
 	host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
 	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
 }
+EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs);
 
 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
 {
@@ -90,16 +127,17 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
 
 #define STATUS_TO_TEXT(a, status, i) \
 	do { \
-		if (status & TMIO_STAT_##a) { \
-			if (i++) \
-				printk(" | "); \
-			printk(#a); \
+		if ((status) & TMIO_STAT_##a) { \
+			if ((i)++) \
+				printk(KERN_DEBUG " | "); \
+			printk(KERN_DEBUG #a); \
 		} \
 	} while (0)
 
 static void pr_debug_status(u32 status)
 {
 	int i = 0;
+
 	pr_debug("status: %08x = ", status);
 	STATUS_TO_TEXT(CARD_REMOVE, status, i);
 	STATUS_TO_TEXT(CARD_INSERT, status, i);
@@ -140,8 +178,7 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 		pm_runtime_get_sync(mmc_dev(mmc));
 
 		host->sdio_irq_enabled = true;
-		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
-					~TMIO_SDIO_STAT_IOIRQ;
+		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ;
 
 		/* Clear obsolete interrupts before enabling */
 		sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
@@ -185,7 +222,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
 }
 
 static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
-				unsigned int new_clock)
+			       unsigned int new_clock)
 {
 	u32 clk = 0, clock;
 
@@ -229,6 +266,12 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
 	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
 		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
 	msleep(10);
+
+	if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
+		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+	}
+
 }
 
 static void tmio_mmc_reset_work(struct work_struct *work)
@@ -246,16 +289,16 @@ static void tmio_mmc_reset_work(struct work_struct *work)
 	 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
 	 * us, so, have to check for IS_ERR(host->mrq)
 	 */
-	if (IS_ERR_OR_NULL(mrq)
-	    || time_is_after_jiffies(host->last_req_ts +
-		msecs_to_jiffies(CMDREQ_TIMEOUT))) {
+	if (IS_ERR_OR_NULL(mrq) ||
+	    time_is_after_jiffies(host->last_req_ts +
+				  msecs_to_jiffies(CMDREQ_TIMEOUT))) {
 		spin_unlock_irqrestore(&host->lock, flags);
 		return;
 	}
 
 	dev_warn(&host->pdev->dev,
-		"timeout waiting for hardware interrupt (CMD%u)\n",
-		mrq->cmd->opcode);
+		 "timeout waiting for hardware interrupt (CMD%u)\n",
+		 mrq->cmd->opcode);
 
 	if (host->data)
 		host->data->error = -ETIMEDOUT;
@@ -279,45 +322,6 @@ static void tmio_mmc_reset_work(struct work_struct *work)
 	mmc_request_done(host->mmc, mrq);
 }
 
-/* called with host->lock held, interrupts disabled */
-static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
-{
-	struct mmc_request *mrq;
-	unsigned long flags;
-
-	spin_lock_irqsave(&host->lock, flags);
-
-	mrq = host->mrq;
-	if (IS_ERR_OR_NULL(mrq)) {
-		spin_unlock_irqrestore(&host->lock, flags);
-		return;
-	}
-
-	host->cmd = NULL;
-	host->data = NULL;
-	host->force_pio = false;
-
-	cancel_delayed_work(&host->delayed_reset_work);
-
-	host->mrq = NULL;
-	spin_unlock_irqrestore(&host->lock, flags);
-
-	if (mrq->cmd->error || (mrq->data && mrq->data->error))
-		tmio_mmc_abort_dma(host);
-
-	if (host->check_scc_error)
-		host->check_scc_error(host);
-
-	mmc_request_done(host->mmc, mrq);
-}
-
-static void tmio_mmc_done_work(struct work_struct *work)
-{
-	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
-						  done);
-	tmio_mmc_finish_request(host);
-}
-
 /* These are the bitmasks the tmio chip requires to implement the MMC response
  * types. Note that R1 and R6 are the same in this scheme. */
 #define APP_CMD        0x0040
@@ -332,7 +336,8 @@ static void tmio_mmc_done_work(struct work_struct *work)
 #define SECURITY_CMD   0x4000
 #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
 
-static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
+static int tmio_mmc_start_command(struct tmio_mmc_host *host,
+				  struct mmc_command *cmd)
 {
 	struct mmc_data *data = host->data;
 	int c = cmd->opcode;
@@ -371,11 +376,11 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
 			c |= TRANSFER_MULTI;
 
 			/*
-			 * Disable auto CMD12 at IO_RW_EXTENDED when
-			 * multiple block transfer
+			 * Disable auto CMD12 at IO_RW_EXTENDED and
+			 * SET_BLOCK_COUNT when doing multiple block transfer
 			 */
 			if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
-			    (cmd->opcode == SD_IO_RW_EXTENDED))
+			    (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc))
 				c |= NO_CMD12_ISSUE;
 		}
 		if (data->flags & MMC_DATA_READ)
@@ -497,8 +502,6 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 
 	if (host->sg_off == host->sg_ptr->length)
 		tmio_mmc_next_sg(host);
-
-	return;
 }
 
 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
@@ -506,6 +509,7 @@ static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
 	if (host->sg_ptr == &host->bounce_sg) {
 		unsigned long flags;
 		void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+
 		memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
 		tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
 	}
@@ -552,7 +556,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
 			host->mrq);
 	}
 
-	if (stop) {
+	if (stop && !host->mrq->sbc) {
 		if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
 			dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
 				stop->opcode, stop->arg);
@@ -565,10 +569,12 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
 
 	schedule_work(&host->done);
 }
+EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq);
 
 static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
 {
 	struct mmc_data *data;
+
 	spin_lock(&host->lock);
 	data = host->data;
 
@@ -613,8 +619,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
 	spin_unlock(&host->lock);
 }
 
-static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
-	unsigned int stat)
+static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
 {
 	struct mmc_command *cmd = host->cmd;
 	int i, addr;
@@ -675,7 +680,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
 }
 
 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
-				      int ireg, int status)
+				       int ireg, int status)
 {
 	struct mmc_host *mmc = host->mmc;
 
@@ -693,14 +698,13 @@ static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
 	return false;
 }
 
-static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
-				 int ireg, int status)
+static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
+				  int status)
 {
 	/* Command completion */
 	if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
-		tmio_mmc_ack_mmc_irqs(host,
-			     TMIO_STAT_CMDRESPEND |
-			     TMIO_STAT_CMDTIMEOUT);
+		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND |
+				      TMIO_STAT_CMDTIMEOUT);
 		tmio_mmc_cmd_irq(host, status);
 		return true;
 	}
@@ -768,10 +772,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
 
 	return IRQ_HANDLED;
 }
-EXPORT_SYMBOL(tmio_mmc_irq);
+EXPORT_SYMBOL_GPL(tmio_mmc_irq);
 
 static int tmio_mmc_start_data(struct tmio_mmc_host *host,
-	struct mmc_data *data)
+			       struct mmc_data *data)
 {
 	struct tmio_mmc_data *pdata = host->pdata;
 
@@ -826,7 +830,7 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
 
 	if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
 		dev_warn_once(&host->pdev->dev,
-		      "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
+			"Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
 		goto out;
 	}
 
@@ -857,12 +861,43 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
 	return ret;
 }
 
+static void tmio_process_mrq(struct tmio_mmc_host *host,
+			     struct mmc_request *mrq)
+{
+	struct mmc_command *cmd;
+	int ret;
+
+	if (mrq->sbc && host->cmd != mrq->sbc) {
+		cmd = mrq->sbc;
+	} else {
+		cmd = mrq->cmd;
+		if (mrq->data) {
+			ret = tmio_mmc_start_data(host, mrq->data);
+			if (ret)
+				goto fail;
+		}
+	}
+
+	ret = tmio_mmc_start_command(host, cmd);
+	if (ret)
+		goto fail;
+
+	schedule_delayed_work(&host->delayed_reset_work,
+			      msecs_to_jiffies(CMDREQ_TIMEOUT));
+	return;
+
+fail:
+	host->force_pio = false;
+	host->mrq = NULL;
+	mrq->cmd->error = ret;
+	mmc_request_done(host->mmc, mrq);
+}
+
 /* Process requests from the MMC layer */
 static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct tmio_mmc_host *host = mmc_priv(mmc);
 	unsigned long flags;
-	int ret;
 
 	spin_lock_irqsave(&host->lock, flags);
 
@@ -882,24 +917,54 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
 	spin_unlock_irqrestore(&host->lock, flags);
 
-	if (mrq->data) {
-		ret = tmio_mmc_start_data(host, mrq->data);
-		if (ret)
-			goto fail;
-	}
+	tmio_process_mrq(host, mrq);
+}
 
-	ret = tmio_mmc_start_command(host, mrq->cmd);
-	if (!ret) {
-		schedule_delayed_work(&host->delayed_reset_work,
-				      msecs_to_jiffies(CMDREQ_TIMEOUT));
+static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
+{
+	struct mmc_request *mrq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	mrq = host->mrq;
+	if (IS_ERR_OR_NULL(mrq)) {
+		spin_unlock_irqrestore(&host->lock, flags);
 		return;
 	}
 
-fail:
-	host->force_pio = false;
-	host->mrq = NULL;
-	mrq->cmd->error = ret;
-	mmc_request_done(mmc, mrq);
+	/* If not SET_BLOCK_COUNT, clear old data */
+	if (host->cmd != mrq->sbc) {
+		host->cmd = NULL;
+		host->data = NULL;
+		host->force_pio = false;
+		host->mrq = NULL;
+	}
+
+	cancel_delayed_work(&host->delayed_reset_work);
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	if (mrq->cmd->error || (mrq->data && mrq->data->error))
+		tmio_mmc_abort_dma(host);
+
+	if (host->check_scc_error)
+		host->check_scc_error(host);
+
+	/* If SET_BLOCK_COUNT, continue with main command */
+	if (host->mrq) {
+		tmio_process_mrq(host, mrq);
+		return;
+	}
+
+	mmc_request_done(host->mmc, mrq);
+}
+
+static void tmio_mmc_done_work(struct work_struct *work)
+{
+	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+						  done);
+	tmio_mmc_finish_request(host);
 }
 
 static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
@@ -965,7 +1030,7 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host)
 }
 
 static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
-				unsigned char bus_width)
+				   unsigned char bus_width)
 {
 	u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
 				& ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
@@ -1005,7 +1070,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 			dev_dbg(dev,
 				"%s.%d: CMD%u active since %lu, now %lu!\n",
 				current->comm, task_pid_nr(current),
-				host->mrq->cmd->opcode, host->last_req_ts, jiffies);
+				host->mrq->cmd->opcode, host->last_req_ts,
+				jiffies);
 		}
 		spin_unlock_irqrestore(&host->lock, flags);
 
@@ -1052,6 +1118,7 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
 	struct tmio_mmc_host *host = mmc_priv(mmc);
 	struct tmio_mmc_data *pdata = host->pdata;
 	int ret = mmc_gpio_get_ro(mmc);
+
 	if (ret >= 0)
 		return ret;
 
@@ -1108,6 +1175,7 @@ static void tmio_mmc_of_parse(struct platform_device *pdev,
 			      struct tmio_mmc_data *pdata)
 {
 	const struct device_node *np = pdev->dev.of_node;
+
 	if (!np)
 		return;
 
@@ -1131,16 +1199,17 @@ tmio_mmc_host_alloc(struct platform_device *pdev)
 
 	return host;
 }
-EXPORT_SYMBOL(tmio_mmc_host_alloc);
+EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc);
 
 void tmio_mmc_host_free(struct tmio_mmc_host *host)
 {
 	mmc_free_host(host->mmc);
 }
-EXPORT_SYMBOL(tmio_mmc_host_free);
+EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
 
 int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
-			struct tmio_mmc_data *pdata)
+			struct tmio_mmc_data *pdata,
+			const struct tmio_mmc_dma_ops *dma_ops)
 {
 	struct platform_device *pdev = _host->pdev;
 	struct mmc_host *mmc = _host->mmc;
@@ -1177,7 +1246,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
 		return -ENOMEM;
 
 	tmio_mmc_ops.card_busy = _host->card_busy;
-	tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
+	tmio_mmc_ops.start_signal_voltage_switch =
+		_host->start_signal_voltage_switch;
 	mmc->ops = &tmio_mmc_ops;
 
 	mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
@@ -1221,6 +1291,10 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
 	if (_host->native_hotplug)
 		pm_runtime_get_noresume(&pdev->dev);
 
+	_host->sdio_irq_enabled = false;
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
+
 	tmio_mmc_clk_stop(_host);
 	tmio_mmc_reset(_host);
 
@@ -1237,13 +1311,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
 
 	_host->sdcard_irq_mask &= ~irq_mask;
 
-	_host->sdio_irq_enabled = false;
-	if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
-		_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
-		sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
-		sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0001);
-	}
-
 	spin_lock_init(&_host->lock);
 	mutex_init(&_host->ios_lock);
 
@@ -1252,6 +1319,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
 	INIT_WORK(&_host->done, tmio_mmc_done_work);
 
 	/* See if we also get DMA */
+	_host->dma_ops = dma_ops;
 	tmio_mmc_request_dma(_host, pdata);
 
 	pm_runtime_set_active(&pdev->dev);
@@ -1278,7 +1346,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
 
 	return 0;
 }
-EXPORT_SYMBOL(tmio_mmc_host_probe);
+EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
 
 void tmio_mmc_host_remove(struct tmio_mmc_host *host)
 {
@@ -1303,7 +1371,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
 
 	tmio_mmc_clk_disable(host);
 }
-EXPORT_SYMBOL(tmio_mmc_host_remove);
+EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
 
 #ifdef CONFIG_PM
 int tmio_mmc_host_runtime_suspend(struct device *dev)
@@ -1320,7 +1388,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
 
 	return 0;
 }
-EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
+EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend);
 
 static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
 {
@@ -1345,7 +1413,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
 
 	return 0;
 }
-EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
+EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume);
 #endif
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
deleted file mode 100644
index e2093db..0000000
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * linux/drivers/mmc/tmio_mmc_dma.c
- *
- * Copyright (C) 2010-2011 Guennadi Liakhovetski
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * DMA function for TMIO MMC implementations
- */
-
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mmc/host.h>
-#include <linux/pagemap.h>
-#include <linux/scatterlist.h>
-
-#include "tmio_mmc.h"
-
-#define TMIO_MMC_MIN_DMA_LEN 8
-
-void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
-{
-	if (!host->chan_tx || !host->chan_rx)
-		return;
-
-	if (host->dma->enable)
-		host->dma->enable(host, enable);
-}
-
-void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
-{
-	tmio_mmc_enable_dma(host, false);
-
-	if (host->chan_rx)
-		dmaengine_terminate_all(host->chan_rx);
-	if (host->chan_tx)
-		dmaengine_terminate_all(host->chan_tx);
-
-	tmio_mmc_enable_dma(host, true);
-}
-
-static void tmio_mmc_dma_callback(void *arg)
-{
-	struct tmio_mmc_host *host = arg;
-
-	spin_lock_irq(&host->lock);
-
-	if (!host->data)
-		goto out;
-
-	if (host->data->flags & MMC_DATA_READ)
-		dma_unmap_sg(host->chan_rx->device->dev,
-			     host->sg_ptr, host->sg_len,
-			     DMA_FROM_DEVICE);
-	else
-		dma_unmap_sg(host->chan_tx->device->dev,
-			     host->sg_ptr, host->sg_len,
-			     DMA_TO_DEVICE);
-
-	spin_unlock_irq(&host->lock);
-
-	wait_for_completion(&host->dma_dataend);
-
-	spin_lock_irq(&host->lock);
-	tmio_mmc_do_data_irq(host);
-out:
-	spin_unlock_irq(&host->lock);
-}
-
-static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
-{
-	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
-	struct dma_async_tx_descriptor *desc = NULL;
-	struct dma_chan *chan = host->chan_rx;
-	dma_cookie_t cookie;
-	int ret, i;
-	bool aligned = true, multiple = true;
-	unsigned int align = (1 << host->pdata->alignment_shift) - 1;
-
-	for_each_sg(sg, sg_tmp, host->sg_len, i) {
-		if (sg_tmp->offset & align)
-			aligned = false;
-		if (sg_tmp->length & align) {
-			multiple = false;
-			break;
-		}
-	}
-
-	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
-			  (align & PAGE_MASK))) || !multiple) {
-		ret = -EINVAL;
-		goto pio;
-	}
-
-	if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
-		host->force_pio = true;
-		return;
-	}
-
-	tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
-
-	/* The only sg element can be unaligned, use our bounce buffer then */
-	if (!aligned) {
-		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
-		host->sg_ptr = &host->bounce_sg;
-		sg = host->sg_ptr;
-	}
-
-	ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
-	if (ret > 0)
-		desc = dmaengine_prep_slave_sg(chan, sg, ret,
-			DMA_DEV_TO_MEM, DMA_CTRL_ACK);
-
-	if (desc) {
-		reinit_completion(&host->dma_dataend);
-		desc->callback = tmio_mmc_dma_callback;
-		desc->callback_param = host;
-
-		cookie = dmaengine_submit(desc);
-		if (cookie < 0) {
-			desc = NULL;
-			ret = cookie;
-		}
-	}
-pio:
-	if (!desc) {
-		/* DMA failed, fall back to PIO */
-		tmio_mmc_enable_dma(host, false);
-		if (ret >= 0)
-			ret = -EIO;
-		host->chan_rx = NULL;
-		dma_release_channel(chan);
-		/* Free the Tx channel too */
-		chan = host->chan_tx;
-		if (chan) {
-			host->chan_tx = NULL;
-			dma_release_channel(chan);
-		}
-		dev_warn(&host->pdev->dev,
-			 "DMA failed: %d, falling back to PIO\n", ret);
-	}
-}
-
-static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
-{
-	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
-	struct dma_async_tx_descriptor *desc = NULL;
-	struct dma_chan *chan = host->chan_tx;
-	dma_cookie_t cookie;
-	int ret, i;
-	bool aligned = true, multiple = true;
-	unsigned int align = (1 << host->pdata->alignment_shift) - 1;
-
-	for_each_sg(sg, sg_tmp, host->sg_len, i) {
-		if (sg_tmp->offset & align)
-			aligned = false;
-		if (sg_tmp->length & align) {
-			multiple = false;
-			break;
-		}
-	}
-
-	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
-			  (align & PAGE_MASK))) || !multiple) {
-		ret = -EINVAL;
-		goto pio;
-	}
-
-	if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
-		host->force_pio = true;
-		return;
-	}
-
-	tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
-
-	/* The only sg element can be unaligned, use our bounce buffer then */
-	if (!aligned) {
-		unsigned long flags;
-		void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
-		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
-		memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
-		tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
-		host->sg_ptr = &host->bounce_sg;
-		sg = host->sg_ptr;
-	}
-
-	ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
-	if (ret > 0)
-		desc = dmaengine_prep_slave_sg(chan, sg, ret,
-			DMA_MEM_TO_DEV, DMA_CTRL_ACK);
-
-	if (desc) {
-		reinit_completion(&host->dma_dataend);
-		desc->callback = tmio_mmc_dma_callback;
-		desc->callback_param = host;
-
-		cookie = dmaengine_submit(desc);
-		if (cookie < 0) {
-			desc = NULL;
-			ret = cookie;
-		}
-	}
-pio:
-	if (!desc) {
-		/* DMA failed, fall back to PIO */
-		tmio_mmc_enable_dma(host, false);
-		if (ret >= 0)
-			ret = -EIO;
-		host->chan_tx = NULL;
-		dma_release_channel(chan);
-		/* Free the Rx channel too */
-		chan = host->chan_rx;
-		if (chan) {
-			host->chan_rx = NULL;
-			dma_release_channel(chan);
-		}
-		dev_warn(&host->pdev->dev,
-			 "DMA failed: %d, falling back to PIO\n", ret);
-	}
-}
-
-void tmio_mmc_start_dma(struct tmio_mmc_host *host,
-			       struct mmc_data *data)
-{
-	if (data->flags & MMC_DATA_READ) {
-		if (host->chan_rx)
-			tmio_mmc_start_dma_rx(host);
-	} else {
-		if (host->chan_tx)
-			tmio_mmc_start_dma_tx(host);
-	}
-}
-
-static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
-{
-	struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
-	struct dma_chan *chan = NULL;
-
-	spin_lock_irq(&host->lock);
-
-	if (host && host->data) {
-		if (host->data->flags & MMC_DATA_READ)
-			chan = host->chan_rx;
-		else
-			chan = host->chan_tx;
-	}
-
-	spin_unlock_irq(&host->lock);
-
-	tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
-
-	if (chan)
-		dma_async_issue_pending(chan);
-}
-
-void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
-{
-	/* We can only either use DMA for both Tx and Rx or not use it at all */
-	if (!host->dma || (!host->pdev->dev.of_node &&
-		(!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
-		return;
-
-	if (!host->chan_tx && !host->chan_rx) {
-		struct resource *res = platform_get_resource(host->pdev,
-							     IORESOURCE_MEM, 0);
-		struct dma_slave_config cfg = {};
-		dma_cap_mask_t mask;
-		int ret;
-
-		if (!res)
-			return;
-
-		dma_cap_zero(mask);
-		dma_cap_set(DMA_SLAVE, mask);
-
-		host->chan_tx = dma_request_slave_channel_compat(mask,
-					host->dma->filter, pdata->chan_priv_tx,
-					&host->pdev->dev, "tx");
-		dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
-			host->chan_tx);
-
-		if (!host->chan_tx)
-			return;
-
-		cfg.direction = DMA_MEM_TO_DEV;
-		cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
-		cfg.dst_addr_width = host->dma->dma_buswidth;
-		if (!cfg.dst_addr_width)
-			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-		cfg.src_addr = 0;
-		ret = dmaengine_slave_config(host->chan_tx, &cfg);
-		if (ret < 0)
-			goto ecfgtx;
-
-		host->chan_rx = dma_request_slave_channel_compat(mask,
-					host->dma->filter, pdata->chan_priv_rx,
-					&host->pdev->dev, "rx");
-		dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
-			host->chan_rx);
-
-		if (!host->chan_rx)
-			goto ereqrx;
-
-		cfg.direction = DMA_DEV_TO_MEM;
-		cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
-		cfg.src_addr_width = host->dma->dma_buswidth;
-		if (!cfg.src_addr_width)
-			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-		cfg.dst_addr = 0;
-		ret = dmaengine_slave_config(host->chan_rx, &cfg);
-		if (ret < 0)
-			goto ecfgrx;
-
-		host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
-		if (!host->bounce_buf)
-			goto ebouncebuf;
-
-		init_completion(&host->dma_dataend);
-		tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
-	}
-
-	tmio_mmc_enable_dma(host, true);
-
-	return;
-
-ebouncebuf:
-ecfgrx:
-	dma_release_channel(host->chan_rx);
-	host->chan_rx = NULL;
-ereqrx:
-ecfgtx:
-	dma_release_channel(host->chan_tx);
-	host->chan_tx = NULL;
-}
-
-void tmio_mmc_release_dma(struct tmio_mmc_host *host)
-{
-	if (host->chan_tx) {
-		struct dma_chan *chan = host->chan_tx;
-		host->chan_tx = NULL;
-		dma_release_channel(chan);
-	}
-	if (host->chan_rx) {
-		struct dma_chan *chan = host->chan_rx;
-		host->chan_rx = NULL;
-		dma_release_channel(chan);
-	}
-	if (host->bounce_buf) {
-		free_pages((unsigned long)host->bounce_buf, 0);
-		host->bounce_buf = NULL;
-	}
-}
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index c061e7c..fbeea1a 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2107,7 +2107,8 @@ static int vub300_probe(struct usb_interface *interface,
 	usb_string(udev, udev->descriptor.iSerialNumber, serial_number,
 		   sizeof(serial_number));
 	dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n",
-		 udev->descriptor.idVendor, udev->descriptor.idProduct,
+		 le16_to_cpu(udev->descriptor.idVendor),
+		 le16_to_cpu(udev->descriptor.idProduct),
 		 manufacturer, product, serial_number);
 	command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
 	if (!command_out_urb) {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 6b8d5cd..f336a9b 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -73,7 +73,7 @@ static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
 }
 
 
-static int do_blktrans_request(struct mtd_blktrans_ops *tr,
+static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
 			       struct mtd_blktrans_dev *dev,
 			       struct request *req)
 {
@@ -84,33 +84,37 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
 	nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
 	buf = bio_data(req->bio);
 
-	if (req_op(req) == REQ_OP_FLUSH)
-		return tr->flush(dev);
+	if (req_op(req) == REQ_OP_FLUSH) {
+		if (tr->flush(dev))
+			return BLK_STS_IOERR;
+		return BLK_STS_OK;
+	}
 
 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
 	    get_capacity(req->rq_disk))
-		return -EIO;
+		return BLK_STS_IOERR;
 
 	switch (req_op(req)) {
 	case REQ_OP_DISCARD:
-		return tr->discard(dev, block, nsect);
+		if (tr->discard(dev, block, nsect))
+			return BLK_STS_IOERR;
+		return BLK_STS_OK;
 	case REQ_OP_READ:
 		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 			if (tr->readsect(dev, block, buf))
-				return -EIO;
+				return BLK_STS_IOERR;
 		rq_flush_dcache_pages(req);
-		return 0;
+		return BLK_STS_OK;
 	case REQ_OP_WRITE:
 		if (!tr->writesect)
-			return -EIO;
+			return BLK_STS_IOERR;
 
 		rq_flush_dcache_pages(req);
 		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 			if (tr->writesect(dev, block, buf))
-				return -EIO;
-		return 0;
+				return BLK_STS_IOERR;
 	default:
-		return -EIO;
+		return BLK_STS_IOERR;
 	}
 }
 
@@ -132,7 +136,7 @@ static void mtd_blktrans_work(struct work_struct *work)
 	spin_lock_irq(rq->queue_lock);
 
 	while (1) {
-		int res;
+		blk_status_t res;
 
 		dev->bg_stop = false;
 		if (!req && !(req = blk_fetch_request(rq))) {
@@ -178,7 +182,7 @@ static void mtd_blktrans_request(struct request_queue *rq)
 
 	if (!dev)
 		while ((req = blk_fetch_request(rq)) != NULL)
-			__blk_end_request_all(req, -ENODEV);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 	else
 		queue_work(dev->wq, &dev->work);
 }
@@ -413,6 +417,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
 	new->rq->queuedata = new;
 	blk_queue_logical_block_size(new->rq, tr->blksize);
 
+	blk_queue_bounce_limit(new->rq, BLK_BOUNCE_HIGH);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d474378..bf8486c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -202,7 +202,7 @@ static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
 	return 0;
 }
 
-const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
 	.ecc = nand_ooblayout_ecc_lp_hamming,
 	.free = nand_ooblayout_free_lp_hamming,
 };
@@ -502,10 +502,12 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
  * specify how to write bad block markers to OOB (chip->block_markbad).
  *
  * We try operations in the following order:
+ *
  *  (1) erase the affected block, to allow OOB marker to be written cleanly
  *  (2) write bad block marker to OOB area of affected block (unless flag
  *      NAND_BBT_NO_OOB_BBM is present)
  *  (3) update the BBT
+ *
  * Note that we retain the first error encountered in (2) or (3), finish the
  * procedures, and dump the error in the end.
 */
@@ -1219,9 +1221,10 @@ int nand_reset(struct nand_chip *chip, int chipnr)
  * @mtd: mtd info
  * @ofs: offset to start unlock from
  * @len: length to unlock
- * @invert: when = 0, unlock the range of blocks within the lower and
+ * @invert:
+ *        - when = 0, unlock the range of blocks within the lower and
  *                    upper boundary address
- *          when = 1, unlock the range of blocks outside the boundaries
+ *        - when = 1, unlock the range of blocks outside the boundaries
  *                    of the lower and upper boundary address
  *
  * Returs unlock status.
@@ -4361,7 +4364,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
 	/* Initialize the ->data_interface field. */
 	ret = nand_init_data_interface(chip);
 	if (ret)
-		return ret;
+		goto err_nand_init;
 
 	/*
 	 * Setup the data interface correctly on the chip and controller side.
@@ -4373,7 +4376,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
 	 */
 	ret = nand_setup_data_interface(chip);
 	if (ret)
-		return ret;
+		goto err_nand_init;
 
 	nand_maf_id = chip->id.data[0];
 	nand_dev_id = chip->id.data[1];
@@ -4404,6 +4407,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
 	mtd->size = i * chip->chipsize;
 
 	return 0;
+
+err_nand_init:
+	/* Free manufacturer priv data. */
+	nand_manufacturer_cleanup(chip);
+
+	return ret;
 }
 EXPORT_SYMBOL(nand_scan_ident);
 
@@ -4574,18 +4583,23 @@ int nand_scan_tail(struct mtd_info *mtd)
 
 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
 	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
-		   !(chip->bbt_options & NAND_BBT_USE_FLASH)))
-		return -EINVAL;
+		   !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
+		ret = -EINVAL;
+		goto err_ident;
+	}
 
 	if (invalid_ecc_page_accessors(chip)) {
 		pr_err("Invalid ECC page accessors setup\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto err_ident;
 	}
 
 	if (!(chip->options & NAND_OWN_BUFFERS)) {
 		nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
-		if (!nbuf)
-			return -ENOMEM;
+		if (!nbuf) {
+			ret = -ENOMEM;
+			goto err_ident;
+		}
 
 		nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
 		if (!nbuf->ecccalc) {
@@ -4608,8 +4622,10 @@ int nand_scan_tail(struct mtd_info *mtd)
 
 		chip->buffers = nbuf;
 	} else {
-		if (!chip->buffers)
-			return -ENOMEM;
+		if (!chip->buffers) {
+			ret = -ENOMEM;
+			goto err_ident;
+		}
 	}
 
 	/* Set the internal oob buffer location, just after the page data */
@@ -4842,7 +4858,11 @@ int nand_scan_tail(struct mtd_info *mtd)
 		return 0;
 
 	/* Build bad block table */
-	return chip->scan_bbt(mtd);
+	ret = chip->scan_bbt(mtd);
+	if (ret)
+		goto err_free;
+	return 0;
+
 err_free:
 	if (nbuf) {
 		kfree(nbuf->databuf);
@@ -4850,6 +4870,13 @@ int nand_scan_tail(struct mtd_info *mtd)
 		kfree(nbuf->ecccalc);
 		kfree(nbuf);
 	}
+
+err_ident:
+	/* Clean up nand_scan_ident(). */
+
+	/* Free manufacturer priv data. */
+	nand_manufacturer_cleanup(chip);
+
 	return ret;
 }
 EXPORT_SYMBOL(nand_scan_tail);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 9d5ca0e..92e2cf8 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -6,7 +6,6 @@
  * published by the Free Software Foundation.
  *
  */
-#include <linux/module.h>
 #include <linux/mtd/nand.h>
 #include <linux/sizes.h>
 
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
index 9cfc403..1e07559 100644
--- a/drivers/mtd/nand/nand_samsung.c
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -84,6 +84,9 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
 			case 7:
 				chip->ecc_strength_ds = 60;
 				break;
+			default:
+				WARN(1, "Could not decode ECC info");
+				chip->ecc_step_ds = 0;
 			}
 		}
 	} else {
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 05b6e10..49b286c 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -55,10 +55,10 @@
  * byte 1 for other packets in the page (PKT_N, for N > 0)
  * ERR_COUNT_PKT_N is the max error count over all but the first packet.
  */
-#define DECODE_OK_PKT_0(v)	((v) & BIT(7))
-#define DECODE_OK_PKT_N(v)	((v) & BIT(15))
 #define ERR_COUNT_PKT_0(v)	(((v) >> 0) & 0x3f)
 #define ERR_COUNT_PKT_N(v)	(((v) >> 8) & 0x3f)
+#define DECODE_FAIL_PKT_0(v)	(((v) & BIT(7)) == 0)
+#define DECODE_FAIL_PKT_N(v)	(((v) & BIT(15)) == 0)
 
 /* Offsets relative to pbus_base */
 #define PBUS_CS_CTRL	0x83c
@@ -193,6 +193,8 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
 						  chip->ecc.strength);
 		if (res < 0)
 			mtd->ecc_stats.failed++;
+		else
+			mtd->ecc_stats.corrected += res;
 
 		bitflips = max(res, bitflips);
 		buf += pkt_size;
@@ -202,9 +204,11 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
 	return bitflips;
 }
 
-static int decode_error_report(struct tango_nfc *nfc)
+static int decode_error_report(struct nand_chip *chip)
 {
 	u32 status, res;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
 
 	status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
 	if (status & PAGE_IS_EMPTY)
@@ -212,10 +216,14 @@ static int decode_error_report(struct tango_nfc *nfc)
 
 	res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
 
-	if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
-		return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+	if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res))
+		return -EBADMSG;
 
-	return -EBADMSG;
+	/* ERR_COUNT_PKT_N is max, not sum, but that's all we have */
+	mtd->ecc_stats.corrected +=
+		ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res);
+
+	return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
 }
 
 static void tango_dma_callback(void *arg)
@@ -282,7 +290,7 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
 	if (err)
 		return err;
 
-	res = decode_error_report(nfc);
+	res = decode_error_report(chip);
 	if (res < 0) {
 		chip->ecc.read_oob_raw(mtd, chip, page);
 		res = check_erased_page(chip, buf);
@@ -663,6 +671,7 @@ static const struct of_device_id tango_nand_ids[] = {
 	{ .compatible = "sigma,smp8758-nand" },
 	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, tango_nand_ids);
 
 static struct platform_driver tango_nand_driver = {
 	.probe	= tango_nand_probe,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 5497e65..c3963f8 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -313,10 +313,10 @@ static void ubiblock_do_work(struct work_struct *work)
 	ret = ubiblock_read(pdu);
 	rq_flush_dcache_pages(req);
 
-	blk_mq_end_request(req, ret);
+	blk_mq_end_request(req, errno_to_blk_status(ret));
 }
 
-static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
 			     const struct blk_mq_queue_data *bd)
 {
 	struct request *req = bd->rq;
@@ -327,9 +327,9 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
 	case REQ_OP_READ:
 		ubi_sgl_init(&pdu->usgl);
 		queue_work(dev->wq, &pdu->work);
-		return BLK_MQ_RQ_QUEUE_OK;
+		return BLK_STS_OK;
 	default:
-		return BLK_MQ_RQ_QUEUE_ERROR;
+		return BLK_STS_IOERR;
 	}
 
 }
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 93e5d25..d854521 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -104,23 +104,25 @@ DEFINE_MUTEX(ubi_devices_mutex);
 static DEFINE_SPINLOCK(ubi_devices_lock);
 
 /* "Show" method for files in '/<sysfs>/class/ubi/' */
-static ssize_t ubi_version_show(struct class *class,
-				struct class_attribute *attr, char *buf)
+/* UBI version attribute ('/<sysfs>/class/ubi/version') */
+static ssize_t version_show(struct class *class, struct class_attribute *attr,
+			    char *buf)
 {
 	return sprintf(buf, "%d\n", UBI_VERSION);
 }
+static CLASS_ATTR_RO(version);
 
-/* UBI version attribute ('/<sysfs>/class/ubi/version') */
-static struct class_attribute ubi_class_attrs[] = {
-	__ATTR(version, S_IRUGO, ubi_version_show, NULL),
-	__ATTR_NULL
+static struct attribute *ubi_class_attrs[] = {
+	&class_attr_version.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(ubi_class);
 
 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
 struct class ubi_class = {
 	.name		= UBI_NAME_STR,
 	.owner		= THIS_MODULE,
-	.class_attrs	= ubi_class_attrs,
+	.class_groups	= ubi_class_groups,
 };
 
 static ssize_t dev_attribute_show(struct device *dev,
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
new file mode 100644
index 0000000..7c754a0
--- /dev/null
+++ b/drivers/mux/Kconfig
@@ -0,0 +1,59 @@
+#
+# Multiplexer devices
+#
+
+menuconfig MULTIPLEXER
+	tristate "Multiplexer subsystem"
+	help
+	  Multiplexer controller subsystem. Multiplexers are used in a
+	  variety of settings, and this subsystem abstracts their use
+	  so that the rest of the kernel sees a common interface. When
+	  multiple parallel multiplexers are controlled by one single
+	  multiplexer controller, this subsystem also coordinates the
+	  multiplexer accesses.
+
+	  To compile the subsystem as a module, choose M here: the module will
+	  be called mux-core.
+
+if MULTIPLEXER
+
+config MUX_ADG792A
+	tristate "Analog Devices ADG792A/ADG792G Multiplexers"
+	depends on I2C
+	help
+	  ADG792A and ADG792G Wide Bandwidth Triple 4:1 Multiplexers
+
+	  The driver supports both operating the three multiplexers in
+	  parallel and operating them independently.
+
+	  To compile the driver as a module, choose M here: the module will
+	  be called mux-adg792a.
+
+config MUX_GPIO
+	tristate "GPIO-controlled Multiplexer"
+	depends on GPIOLIB || COMPILE_TEST
+	help
+	  GPIO-controlled Multiplexer controller.
+
+	  The driver builds a single multiplexer controller using a number
+	  of gpio pins. For N pins, there will be 2^N possible multiplexer
+	  states. The GPIO pins can be connected (by the hardware) to several
+	  multiplexers, which in that case will be operated in parallel.
+
+	  To compile the driver as a module, choose M here: the module will
+	  be called mux-gpio.
+
+config MUX_MMIO
+	tristate "MMIO register bitfield-controlled Multiplexer"
+	depends on (OF && MFD_SYSCON) || COMPILE_TEST
+	help
+	  MMIO register bitfield-controlled Multiplexer controller.
+
+	  The driver builds multiplexer controllers for bitfields in a syscon
+	  register. For N bit wide bitfields, there will be 2^N possible
+	  multiplexer states.
+
+	  To compile the driver as a module, choose M here: the module will
+	  be called mux-mmio.
+
+endif
diff --git a/drivers/mux/Makefile b/drivers/mux/Makefile
new file mode 100644
index 0000000..6bac5b0
--- /dev/null
+++ b/drivers/mux/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for multiplexer devices.
+#
+
+obj-$(CONFIG_MULTIPLEXER)	+= mux-core.o
+obj-$(CONFIG_MUX_ADG792A)	+= mux-adg792a.o
+obj-$(CONFIG_MUX_GPIO)		+= mux-gpio.o
+obj-$(CONFIG_MUX_MMIO)		+= mux-mmio.o
diff --git a/drivers/mux/mux-adg792a.c b/drivers/mux/mux-adg792a.c
new file mode 100644
index 0000000..12aa221
--- /dev/null
+++ b/drivers/mux/mux-adg792a.c
@@ -0,0 +1,157 @@
+/*
+ * Multiplexer driver for Analog Devices ADG792A/G Triple 4:1 mux
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mux/driver.h>
+#include <linux/property.h>
+
+#define ADG792A_LDSW		BIT(0)
+#define ADG792A_RESETB		BIT(1)
+#define ADG792A_DISABLE(mux)	(0x50 | (mux))
+#define ADG792A_DISABLE_ALL	(0x5f)
+#define ADG792A_MUX(mux, state)	(0xc0 | (((mux) + 1) << 2) | (state))
+#define ADG792A_MUX_ALL(state)	(0xc0 | (state))
+
+static int adg792a_write_cmd(struct i2c_client *i2c, u8 cmd, int reset)
+{
+	u8 data = ADG792A_RESETB | ADG792A_LDSW;
+
+	/* ADG792A_RESETB is active low, the chip resets when it is zero. */
+	if (reset)
+		data &= ~ADG792A_RESETB;
+
+	return i2c_smbus_write_byte_data(i2c, cmd, data);
+}
+
+static int adg792a_set(struct mux_control *mux, int state)
+{
+	struct i2c_client *i2c = to_i2c_client(mux->chip->dev.parent);
+	u8 cmd;
+
+	if (mux->chip->controllers == 1) {
+		/* parallel mux controller operation */
+		if (state == MUX_IDLE_DISCONNECT)
+			cmd = ADG792A_DISABLE_ALL;
+		else
+			cmd = ADG792A_MUX_ALL(state);
+	} else {
+		unsigned int controller = mux_control_get_index(mux);
+
+		if (state == MUX_IDLE_DISCONNECT)
+			cmd = ADG792A_DISABLE(controller);
+		else
+			cmd = ADG792A_MUX(controller, state);
+	}
+
+	return adg792a_write_cmd(i2c, cmd, 0);
+}
+
+static const struct mux_control_ops adg792a_ops = {
+	.set = adg792a_set,
+};
+
+static int adg792a_probe(struct i2c_client *i2c,
+			 const struct i2c_device_id *id)
+{
+	struct device *dev = &i2c->dev;
+	struct mux_chip *mux_chip;
+	s32 idle_state[3];
+	u32 cells;
+	int ret;
+	int i;
+
+	if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
+
+	ret = device_property_read_u32(dev, "#mux-control-cells", &cells);
+	if (ret < 0)
+		return ret;
+	if (cells >= 2)
+		return -EINVAL;
+
+	mux_chip = devm_mux_chip_alloc(dev, cells ? 3 : 1, 0);
+	if (IS_ERR(mux_chip))
+		return PTR_ERR(mux_chip);
+
+	mux_chip->ops = &adg792a_ops;
+
+	ret = adg792a_write_cmd(i2c, ADG792A_DISABLE_ALL, 1);
+	if (ret < 0)
+		return ret;
+
+	ret = device_property_read_u32_array(dev, "idle-state",
+					     (u32 *)idle_state,
+					     mux_chip->controllers);
+	if (ret < 0) {
+		idle_state[0] = MUX_IDLE_AS_IS;
+		idle_state[1] = MUX_IDLE_AS_IS;
+		idle_state[2] = MUX_IDLE_AS_IS;
+	}
+
+	for (i = 0; i < mux_chip->controllers; ++i) {
+		struct mux_control *mux = &mux_chip->mux[i];
+
+		mux->states = 4;
+
+		switch (idle_state[i]) {
+		case MUX_IDLE_DISCONNECT:
+		case MUX_IDLE_AS_IS:
+		case 0 ... 4:
+			mux->idle_state = idle_state[i];
+			break;
+		default:
+			dev_err(dev, "invalid idle-state %d\n", idle_state[i]);
+			return -EINVAL;
+		}
+	}
+
+	ret = devm_mux_chip_register(dev, mux_chip);
+	if (ret < 0)
+		return ret;
+
+	if (cells)
+		dev_info(dev, "3x single pole quadruple throw muxes registered\n");
+	else
+		dev_info(dev, "triple pole quadruple throw mux registered\n");
+
+	return 0;
+}
+
+static const struct i2c_device_id adg792a_id[] = {
+	{ .name = "adg792a", },
+	{ .name = "adg792g", },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, adg792a_id);
+
+static const struct of_device_id adg792a_of_match[] = {
+	{ .compatible = "adi,adg792a", },
+	{ .compatible = "adi,adg792g", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, adg792a_of_match);
+
+static struct i2c_driver adg792a_driver = {
+	.driver		= {
+		.name		= "adg792a",
+		.of_match_table = of_match_ptr(adg792a_of_match),
+	},
+	.probe		= adg792a_probe,
+	.id_table	= adg792a_id,
+};
+module_i2c_driver(adg792a_driver);
+
+MODULE_DESCRIPTION("Analog Devices ADG792A/G Triple 4:1 mux driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mux/mux-core.c b/drivers/mux/mux-core.c
new file mode 100644
index 0000000..90b8995
--- /dev/null
+++ b/drivers/mux/mux-core.c
@@ -0,0 +1,547 @@
+/*
+ * Multiplexer subsystem
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "mux-core: " fmt
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/mux/driver.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+/*
+ * The idle-as-is "state" is not an actual state that may be selected, it
+ * only implies that the state should not be changed. So, use that state
+ * as indication that the cached state of the multiplexer is unknown.
+ */
+#define MUX_CACHE_UNKNOWN MUX_IDLE_AS_IS
+
+static struct class mux_class = {
+	.name = "mux",
+	.owner = THIS_MODULE,
+};
+
+static DEFINE_IDA(mux_ida);
+
+static int __init mux_init(void)
+{
+	ida_init(&mux_ida);
+	return class_register(&mux_class);
+}
+
+static void __exit mux_exit(void)
+{
+	class_register(&mux_class);
+	ida_destroy(&mux_ida);
+}
+
+static void mux_chip_release(struct device *dev)
+{
+	struct mux_chip *mux_chip = to_mux_chip(dev);
+
+	ida_simple_remove(&mux_ida, mux_chip->id);
+	kfree(mux_chip);
+}
+
+static struct device_type mux_type = {
+	.name = "mux-chip",
+	.release = mux_chip_release,
+};
+
+/**
+ * mux_chip_alloc() - Allocate a mux-chip.
+ * @dev: The parent device implementing the mux interface.
+ * @controllers: The number of mux controllers to allocate for this chip.
+ * @sizeof_priv: Size of extra memory area for private use by the caller.
+ *
+ * After allocating the mux-chip with the desired number of mux controllers
+ * but before registering the chip, the mux driver is required to configure
+ * the number of valid mux states in the mux_chip->mux[N].states members and
+ * the desired idle state in the returned mux_chip->mux[N].idle_state members.
+ * The default idle state is MUX_IDLE_AS_IS. The mux driver also needs to
+ * provide a pointer to the operations struct in the mux_chip->ops member
+ * before registering the mux-chip with mux_chip_register.
+ *
+ * Return: A pointer to the new mux-chip, or an ERR_PTR with a negative errno.
+ */
+struct mux_chip *mux_chip_alloc(struct device *dev,
+				unsigned int controllers, size_t sizeof_priv)
+{
+	struct mux_chip *mux_chip;
+	int i;
+
+	if (WARN_ON(!dev || !controllers))
+		return ERR_PTR(-EINVAL);
+
+	mux_chip = kzalloc(sizeof(*mux_chip) +
+			   controllers * sizeof(*mux_chip->mux) +
+			   sizeof_priv, GFP_KERNEL);
+	if (!mux_chip)
+		return ERR_PTR(-ENOMEM);
+
+	mux_chip->mux = (struct mux_control *)(mux_chip + 1);
+	mux_chip->dev.class = &mux_class;
+	mux_chip->dev.type = &mux_type;
+	mux_chip->dev.parent = dev;
+	mux_chip->dev.of_node = dev->of_node;
+	dev_set_drvdata(&mux_chip->dev, mux_chip);
+
+	mux_chip->id = ida_simple_get(&mux_ida, 0, 0, GFP_KERNEL);
+	if (mux_chip->id < 0) {
+		int err = mux_chip->id;
+
+		pr_err("muxchipX failed to get a device id\n");
+		kfree(mux_chip);
+		return ERR_PTR(err);
+	}
+	dev_set_name(&mux_chip->dev, "muxchip%d", mux_chip->id);
+
+	mux_chip->controllers = controllers;
+	for (i = 0; i < controllers; ++i) {
+		struct mux_control *mux = &mux_chip->mux[i];
+
+		mux->chip = mux_chip;
+		sema_init(&mux->lock, 1);
+		mux->cached_state = MUX_CACHE_UNKNOWN;
+		mux->idle_state = MUX_IDLE_AS_IS;
+	}
+
+	device_initialize(&mux_chip->dev);
+
+	return mux_chip;
+}
+EXPORT_SYMBOL_GPL(mux_chip_alloc);
+
+static int mux_control_set(struct mux_control *mux, int state)
+{
+	int ret = mux->chip->ops->set(mux, state);
+
+	mux->cached_state = ret < 0 ? MUX_CACHE_UNKNOWN : state;
+
+	return ret;
+}
+
+/**
+ * mux_chip_register() - Register a mux-chip, thus readying the controllers
+ *			 for use.
+ * @mux_chip: The mux-chip to register.
+ *
+ * Do not retry registration of the same mux-chip on failure. You should
+ * instead put it away with mux_chip_free() and allocate a new one, if you
+ * for some reason would like to retry registration.
+ *
+ * Return: Zero on success or a negative errno on error.
+ */
+int mux_chip_register(struct mux_chip *mux_chip)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < mux_chip->controllers; ++i) {
+		struct mux_control *mux = &mux_chip->mux[i];
+
+		if (mux->idle_state == mux->cached_state)
+			continue;
+
+		ret = mux_control_set(mux, mux->idle_state);
+		if (ret < 0) {
+			dev_err(&mux_chip->dev, "unable to set idle state\n");
+			return ret;
+		}
+	}
+
+	ret = device_add(&mux_chip->dev);
+	if (ret < 0)
+		dev_err(&mux_chip->dev,
+			"device_add failed in %s: %d\n", __func__, ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mux_chip_register);
+
+/**
+ * mux_chip_unregister() - Take the mux-chip off-line.
+ * @mux_chip: The mux-chip to unregister.
+ *
+ * mux_chip_unregister() reverses the effects of mux_chip_register().
+ * But not completely, you should not try to call mux_chip_register()
+ * on a mux-chip that has been registered before.
+ */
+void mux_chip_unregister(struct mux_chip *mux_chip)
+{
+	device_del(&mux_chip->dev);
+}
+EXPORT_SYMBOL_GPL(mux_chip_unregister);
+
+/**
+ * mux_chip_free() - Free the mux-chip for good.
+ * @mux_chip: The mux-chip to free.
+ *
+ * mux_chip_free() reverses the effects of mux_chip_alloc().
+ */
+void mux_chip_free(struct mux_chip *mux_chip)
+{
+	if (!mux_chip)
+		return;
+
+	put_device(&mux_chip->dev);
+}
+EXPORT_SYMBOL_GPL(mux_chip_free);
+
+static void devm_mux_chip_release(struct device *dev, void *res)
+{
+	struct mux_chip *mux_chip = *(struct mux_chip **)res;
+
+	mux_chip_free(mux_chip);
+}
+
+/**
+ * devm_mux_chip_alloc() - Resource-managed version of mux_chip_alloc().
+ * @dev: The parent device implementing the mux interface.
+ * @controllers: The number of mux controllers to allocate for this chip.
+ * @sizeof_priv: Size of extra memory area for private use by the caller.
+ *
+ * See mux_chip_alloc() for more details.
+ *
+ * Return: A pointer to the new mux-chip, or an ERR_PTR with a negative errno.
+ */
+struct mux_chip *devm_mux_chip_alloc(struct device *dev,
+				     unsigned int controllers,
+				     size_t sizeof_priv)
+{
+	struct mux_chip **ptr, *mux_chip;
+
+	ptr = devres_alloc(devm_mux_chip_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	mux_chip = mux_chip_alloc(dev, controllers, sizeof_priv);
+	if (IS_ERR(mux_chip)) {
+		devres_free(ptr);
+		return mux_chip;
+	}
+
+	*ptr = mux_chip;
+	devres_add(dev, ptr);
+
+	return mux_chip;
+}
+EXPORT_SYMBOL_GPL(devm_mux_chip_alloc);
+
+static void devm_mux_chip_reg_release(struct device *dev, void *res)
+{
+	struct mux_chip *mux_chip = *(struct mux_chip **)res;
+
+	mux_chip_unregister(mux_chip);
+}
+
+/**
+ * devm_mux_chip_register() - Resource-managed version mux_chip_register().
+ * @dev: The parent device implementing the mux interface.
+ * @mux_chip: The mux-chip to register.
+ *
+ * See mux_chip_register() for more details.
+ *
+ * Return: Zero on success or a negative errno on error.
+ */
+int devm_mux_chip_register(struct device *dev,
+			   struct mux_chip *mux_chip)
+{
+	struct mux_chip **ptr;
+	int res;
+
+	ptr = devres_alloc(devm_mux_chip_reg_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	res = mux_chip_register(mux_chip);
+	if (res) {
+		devres_free(ptr);
+		return res;
+	}
+
+	*ptr = mux_chip;
+	devres_add(dev, ptr);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(devm_mux_chip_register);
+
+/**
+ * mux_control_states() - Query the number of multiplexer states.
+ * @mux: The mux-control to query.
+ *
+ * Return: The number of multiplexer states.
+ */
+unsigned int mux_control_states(struct mux_control *mux)
+{
+	return mux->states;
+}
+EXPORT_SYMBOL_GPL(mux_control_states);
+
+/*
+ * The mux->lock must be down when calling this function.
+ */
+static int __mux_control_select(struct mux_control *mux, int state)
+{
+	int ret;
+
+	if (WARN_ON(state < 0 || state >= mux->states))
+		return -EINVAL;
+
+	if (mux->cached_state == state)
+		return 0;
+
+	ret = mux_control_set(mux, state);
+	if (ret >= 0)
+		return 0;
+
+	/* The mux update failed, try to revert if appropriate... */
+	if (mux->idle_state != MUX_IDLE_AS_IS)
+		mux_control_set(mux, mux->idle_state);
+
+	return ret;
+}
+
+/**
+ * mux_control_select() - Select the given multiplexer state.
+ * @mux: The mux-control to request a change of state from.
+ * @state: The new requested state.
+ *
+ * On successfully selecting the mux-control state, it will be locked until
+ * there is a call to mux_control_deselect(). If the mux-control is already
+ * selected when mux_control_select() is called, the caller will be blocked
+ * until mux_control_deselect() is called (by someone else).
+ *
+ * Therefore, make sure to call mux_control_deselect() when the operation is
+ * complete and the mux-control is free for others to use, but do not call
+ * mux_control_deselect() if mux_control_select() fails.
+ *
+ * Return: 0 when the mux-control state has the requested state or a negative
+ * errno on error.
+ */
+int mux_control_select(struct mux_control *mux, unsigned int state)
+{
+	int ret;
+
+	ret = down_killable(&mux->lock);
+	if (ret < 0)
+		return ret;
+
+	ret = __mux_control_select(mux, state);
+
+	if (ret < 0)
+		up(&mux->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mux_control_select);
+
+/**
+ * mux_control_try_select() - Try to select the given multiplexer state.
+ * @mux: The mux-control to request a change of state from.
+ * @state: The new requested state.
+ *
+ * On successfully selecting the mux-control state, it will be locked until
+ * mux_control_deselect() called.
+ *
+ * Therefore, make sure to call mux_control_deselect() when the operation is
+ * complete and the mux-control is free for others to use, but do not call
+ * mux_control_deselect() if mux_control_try_select() fails.
+ *
+ * Return: 0 when the mux-control state has the requested state or a negative
+ * errno on error. Specifically -EBUSY if the mux-control is contended.
+ */
+int mux_control_try_select(struct mux_control *mux, unsigned int state)
+{
+	int ret;
+
+	if (down_trylock(&mux->lock))
+		return -EBUSY;
+
+	ret = __mux_control_select(mux, state);
+
+	if (ret < 0)
+		up(&mux->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mux_control_try_select);
+
+/**
+ * mux_control_deselect() - Deselect the previously selected multiplexer state.
+ * @mux: The mux-control to deselect.
+ *
+ * It is required that a single call is made to mux_control_deselect() for
+ * each and every successful call made to either of mux_control_select() or
+ * mux_control_try_select().
+ *
+ * Return: 0 on success and a negative errno on error. An error can only
+ * occur if the mux has an idle state. Note that even if an error occurs, the
+ * mux-control is unlocked and is thus free for the next access.
+ */
+int mux_control_deselect(struct mux_control *mux)
+{
+	int ret = 0;
+
+	if (mux->idle_state != MUX_IDLE_AS_IS &&
+	    mux->idle_state != mux->cached_state)
+		ret = mux_control_set(mux, mux->idle_state);
+
+	up(&mux->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mux_control_deselect);
+
+static int of_dev_node_match(struct device *dev, const void *data)
+{
+	return dev->of_node == data;
+}
+
+static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
+{
+	struct device *dev;
+
+	dev = class_find_device(&mux_class, NULL, np, of_dev_node_match);
+
+	return dev ? to_mux_chip(dev) : NULL;
+}
+
+/**
+ * mux_control_get() - Get the mux-control for a device.
+ * @dev: The device that needs a mux-control.
+ * @mux_name: The name identifying the mux-control.
+ *
+ * Return: A pointer to the mux-control, or an ERR_PTR with a negative errno.
+ */
+struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
+{
+	struct device_node *np = dev->of_node;
+	struct of_phandle_args args;
+	struct mux_chip *mux_chip;
+	unsigned int controller;
+	int index = 0;
+	int ret;
+
+	if (mux_name) {
+		index = of_property_match_string(np, "mux-control-names",
+						 mux_name);
+		if (index < 0) {
+			dev_err(dev, "mux controller '%s' not found\n",
+				mux_name);
+			return ERR_PTR(index);
+		}
+	}
+
+	ret = of_parse_phandle_with_args(np,
+					 "mux-controls", "#mux-control-cells",
+					 index, &args);
+	if (ret) {
+		dev_err(dev, "%s: failed to get mux-control %s(%i)\n",
+			np->full_name, mux_name ?: "", index);
+		return ERR_PTR(ret);
+	}
+
+	mux_chip = of_find_mux_chip_by_node(args.np);
+	of_node_put(args.np);
+	if (!mux_chip)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (args.args_count > 1 ||
+	    (!args.args_count && (mux_chip->controllers > 1))) {
+		dev_err(dev, "%s: wrong #mux-control-cells for %s\n",
+			np->full_name, args.np->full_name);
+		return ERR_PTR(-EINVAL);
+	}
+
+	controller = 0;
+	if (args.args_count)
+		controller = args.args[0];
+
+	if (controller >= mux_chip->controllers) {
+		dev_err(dev, "%s: bad mux controller %u specified in %s\n",
+			np->full_name, controller, args.np->full_name);
+		return ERR_PTR(-EINVAL);
+	}
+
+	get_device(&mux_chip->dev);
+	return &mux_chip->mux[controller];
+}
+EXPORT_SYMBOL_GPL(mux_control_get);
+
+/**
+ * mux_control_put() - Put away the mux-control for good.
+ * @mux: The mux-control to put away.
+ *
+ * mux_control_put() reverses the effects of mux_control_get().
+ */
+void mux_control_put(struct mux_control *mux)
+{
+	put_device(&mux->chip->dev);
+}
+EXPORT_SYMBOL_GPL(mux_control_put);
+
+static void devm_mux_control_release(struct device *dev, void *res)
+{
+	struct mux_control *mux = *(struct mux_control **)res;
+
+	mux_control_put(mux);
+}
+
+/**
+ * devm_mux_control_get() - Get the mux-control for a device, with resource
+ *			    management.
+ * @dev: The device that needs a mux-control.
+ * @mux_name: The name identifying the mux-control.
+ *
+ * Return: Pointer to the mux-control, or an ERR_PTR with a negative errno.
+ */
+struct mux_control *devm_mux_control_get(struct device *dev,
+					 const char *mux_name)
+{
+	struct mux_control **ptr, *mux;
+
+	ptr = devres_alloc(devm_mux_control_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	mux = mux_control_get(dev, mux_name);
+	if (IS_ERR(mux)) {
+		devres_free(ptr);
+		return mux;
+	}
+
+	*ptr = mux;
+	devres_add(dev, ptr);
+
+	return mux;
+}
+EXPORT_SYMBOL_GPL(devm_mux_control_get);
+
+/*
+ * Using subsys_initcall instead of module_init here to try to ensure - for
+ * the non-modular case - that the subsystem is initialized when mux consumers
+ * and mux controllers start to use it.
+ * For the modular case, the ordering is ensured with module dependencies.
+ */
+subsys_initcall(mux_init);
+module_exit(mux_exit);
+
+MODULE_DESCRIPTION("Multiplexer subsystem");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mux/mux-gpio.c b/drivers/mux/mux-gpio.c
new file mode 100644
index 0000000..468bf17
--- /dev/null
+++ b/drivers/mux/mux-gpio.c
@@ -0,0 +1,114 @@
+/*
+ * GPIO-controlled multiplexer driver
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/mux/driver.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+struct mux_gpio {
+	struct gpio_descs *gpios;
+	int *val;
+};
+
+static int mux_gpio_set(struct mux_control *mux, int state)
+{
+	struct mux_gpio *mux_gpio = mux_chip_priv(mux->chip);
+	int i;
+
+	for (i = 0; i < mux_gpio->gpios->ndescs; i++)
+		mux_gpio->val[i] = (state >> i) & 1;
+
+	gpiod_set_array_value_cansleep(mux_gpio->gpios->ndescs,
+				       mux_gpio->gpios->desc,
+				       mux_gpio->val);
+
+	return 0;
+}
+
+static const struct mux_control_ops mux_gpio_ops = {
+	.set = mux_gpio_set,
+};
+
+static const struct of_device_id mux_gpio_dt_ids[] = {
+	{ .compatible = "gpio-mux", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mux_gpio_dt_ids);
+
+static int mux_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mux_chip *mux_chip;
+	struct mux_gpio *mux_gpio;
+	int pins;
+	s32 idle_state;
+	int ret;
+
+	pins = gpiod_count(dev, "mux");
+	if (pins < 0)
+		return pins;
+
+	mux_chip = devm_mux_chip_alloc(dev, 1, sizeof(*mux_gpio) +
+				       pins * sizeof(*mux_gpio->val));
+	if (IS_ERR(mux_chip))
+		return PTR_ERR(mux_chip);
+
+	mux_gpio = mux_chip_priv(mux_chip);
+	mux_gpio->val = (int *)(mux_gpio + 1);
+	mux_chip->ops = &mux_gpio_ops;
+
+	mux_gpio->gpios = devm_gpiod_get_array(dev, "mux", GPIOD_OUT_LOW);
+	if (IS_ERR(mux_gpio->gpios)) {
+		ret = PTR_ERR(mux_gpio->gpios);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to get gpios\n");
+		return ret;
+	}
+	WARN_ON(pins != mux_gpio->gpios->ndescs);
+	mux_chip->mux->states = 1 << pins;
+
+	ret = device_property_read_u32(dev, "idle-state", (u32 *)&idle_state);
+	if (ret >= 0 && idle_state != MUX_IDLE_AS_IS) {
+		if (idle_state < 0 || idle_state >= mux_chip->mux->states) {
+			dev_err(dev, "invalid idle-state %u\n", idle_state);
+			return -EINVAL;
+		}
+
+		mux_chip->mux->idle_state = idle_state;
+	}
+
+	ret = devm_mux_chip_register(dev, mux_chip);
+	if (ret < 0)
+		return ret;
+
+	dev_info(dev, "%u-way mux-controller registered\n",
+		 mux_chip->mux->states);
+
+	return 0;
+}
+
+static struct platform_driver mux_gpio_driver = {
+	.driver = {
+		.name = "gpio-mux",
+		.of_match_table	= of_match_ptr(mux_gpio_dt_ids),
+	},
+	.probe = mux_gpio_probe,
+};
+module_platform_driver(mux_gpio_driver);
+
+MODULE_DESCRIPTION("GPIO-controlled multiplexer driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mux/mux-mmio.c b/drivers/mux/mux-mmio.c
new file mode 100644
index 0000000..37c1de3
--- /dev/null
+++ b/drivers/mux/mux-mmio.c
@@ -0,0 +1,141 @@
+/*
+ * MMIO register bitfield-controlled multiplexer driver
+ *
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mux/driver.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+static int mux_mmio_set(struct mux_control *mux, int state)
+{
+	struct regmap_field **fields = mux_chip_priv(mux->chip);
+
+	return regmap_field_write(fields[mux_control_get_index(mux)], state);
+}
+
+static const struct mux_control_ops mux_mmio_ops = {
+	.set = mux_mmio_set,
+};
+
+static const struct of_device_id mux_mmio_dt_ids[] = {
+	{ .compatible = "mmio-mux", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mux_mmio_dt_ids);
+
+static int mux_mmio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct regmap_field **fields;
+	struct mux_chip *mux_chip;
+	struct regmap *regmap;
+	int num_fields;
+	int ret;
+	int i;
+
+	regmap = syscon_node_to_regmap(np->parent);
+	if (IS_ERR(regmap)) {
+		ret = PTR_ERR(regmap);
+		dev_err(dev, "failed to get regmap: %d\n", ret);
+		return ret;
+	}
+
+	ret = of_property_count_u32_elems(np, "mux-reg-masks");
+	if (ret == 0 || ret % 2)
+		ret = -EINVAL;
+	if (ret < 0) {
+		dev_err(dev, "mux-reg-masks property missing or invalid: %d\n",
+			ret);
+		return ret;
+	}
+	num_fields = ret / 2;
+
+	mux_chip = devm_mux_chip_alloc(dev, num_fields, num_fields *
+				       sizeof(*fields));
+	if (IS_ERR(mux_chip))
+		return PTR_ERR(mux_chip);
+
+	fields = mux_chip_priv(mux_chip);
+
+	for (i = 0; i < num_fields; i++) {
+		struct mux_control *mux = &mux_chip->mux[i];
+		struct reg_field field;
+		s32 idle_state = MUX_IDLE_AS_IS;
+		u32 reg, mask;
+		int bits;
+
+		ret = of_property_read_u32_index(np, "mux-reg-masks",
+						 2 * i, &reg);
+		if (!ret)
+			ret = of_property_read_u32_index(np, "mux-reg-masks",
+							 2 * i + 1, &mask);
+		if (ret < 0) {
+			dev_err(dev, "bitfield %d: failed to read mux-reg-masks property: %d\n",
+				i, ret);
+			return ret;
+		}
+
+		field.reg = reg;
+		field.msb = fls(mask) - 1;
+		field.lsb = ffs(mask) - 1;
+
+		if (mask != GENMASK(field.msb, field.lsb)) {
+			dev_err(dev, "bitfield %d: invalid mask 0x%x\n",
+				i, mask);
+			return -EINVAL;
+		}
+
+		fields[i] = devm_regmap_field_alloc(dev, regmap, field);
+		if (IS_ERR(fields[i])) {
+			ret = PTR_ERR(fields[i]);
+			dev_err(dev, "bitfield %d: failed allocate: %d\n",
+				i, ret);
+			return ret;
+		}
+
+		bits = 1 + field.msb - field.lsb;
+		mux->states = 1 << bits;
+
+		of_property_read_u32_index(np, "idle-states", i,
+					   (u32 *)&idle_state);
+		if (idle_state != MUX_IDLE_AS_IS) {
+			if (idle_state < 0 || idle_state >= mux->states) {
+				dev_err(dev, "bitfield: %d: out of range idle state %d\n",
+					i, idle_state);
+				return -EINVAL;
+			}
+
+			mux->idle_state = idle_state;
+		}
+	}
+
+	mux_chip->ops = &mux_mmio_ops;
+
+	return devm_mux_chip_register(dev, mux_chip);
+}
+
+static struct platform_driver mux_mmio_driver = {
+	.driver = {
+		.name = "mmio-mux",
+		.of_match_table	= of_match_ptr(mux_mmio_dt_ids),
+	},
+	.probe = mux_mmio_probe,
+};
+module_platform_driver(mux_mmio_driver);
+
+MODULE_DESCRIPTION("MMIO register bitfield-controlled multiplexer driver");
+MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 62ee439..53a1cb5 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -756,6 +756,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
 	struct net_device *dev = dev_id;
 	struct arcnet_local *lp;
 	int recbuf, status, diagstatus, didsomething, boguscount;
+	unsigned long flags;
 	int retval = IRQ_NONE;
 
 	arc_printk(D_DURING, dev, "\n");
@@ -765,7 +766,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
 	lp = netdev_priv(dev);
 	BUG_ON(!lp);
 
-	spin_lock(&lp->lock);
+	spin_lock_irqsave(&lp->lock, flags);
 
 	/* RESET flag was enabled - if device is not running, we must
 	 * clear it right away (but nothing else).
@@ -774,7 +775,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
 		if (lp->hw.status(dev) & RESETflag)
 			lp->hw.command(dev, CFLAGScmd | RESETclear);
 		lp->hw.intmask(dev, 0);
-		spin_unlock(&lp->lock);
+		spin_unlock_irqrestore(&lp->lock, flags);
 		return retval;
 	}
 
@@ -998,7 +999,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
 	udelay(1);
 	lp->hw.intmask(dev, lp->intmask);
 
-	spin_unlock(&lp->lock);
+	spin_unlock_irqrestore(&lp->lock, flags);
 	return retval;
 }
 EXPORT_SYMBOL(arcnet_interrupt);
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 2056878..4fa2e46 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -212,7 +212,7 @@ static int ack_tx(struct net_device *dev, int acked)
 	ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */
 	ackpkt->soft.cap.mes.ack = acked;
 
-	arc_printk(D_PROTO, dev, "Ackknowledge for cap packet %x.\n",
+	arc_printk(D_PROTO, dev, "Acknowledge for cap packet %x.\n",
 		   *((int *)&ackpkt->soft.cap.cookie[0]));
 
 	ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 239de38..47f80b8 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -135,6 +135,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
 	for (i = 0; i < ci->devcount; i++) {
 		struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i];
 		struct com20020_dev *card;
+		int dev_id_mask = 0xf;
 
 		dev = alloc_arcdev(device);
 		if (!dev) {
@@ -166,6 +167,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
 		arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND);
 		arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT);
 
+		SET_NETDEV_DEV(dev, &pdev->dev);
 		dev->base_addr = ioaddr;
 		dev->dev_addr[0] = node;
 		dev->irq = pdev->irq;
@@ -179,8 +181,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
 
 		/* Get the dev_id from the PLX rotary coder */
 		if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
-			dev->dev_id = 0xc;
-		dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4;
+			dev_id_mask = 0x3;
+		dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
 
 		snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
 
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 13d9ad4..78043a9 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -246,8 +246,6 @@ int com20020_found(struct net_device *dev, int shared)
 		return -ENODEV;
 	}
 
-	dev->base_addr = ioaddr;
-
 	arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n",
 		   lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq);
 
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b44a6ae..e5386ab 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -90,10 +90,13 @@ enum ad_link_speed_type {
 	AD_LINK_SPEED_100MBPS,
 	AD_LINK_SPEED_1000MBPS,
 	AD_LINK_SPEED_2500MBPS,
+	AD_LINK_SPEED_5000MBPS,
 	AD_LINK_SPEED_10000MBPS,
+	AD_LINK_SPEED_14000MBPS,
 	AD_LINK_SPEED_20000MBPS,
 	AD_LINK_SPEED_25000MBPS,
 	AD_LINK_SPEED_40000MBPS,
+	AD_LINK_SPEED_50000MBPS,
 	AD_LINK_SPEED_56000MBPS,
 	AD_LINK_SPEED_100000MBPS,
 };
@@ -259,10 +262,13 @@ static inline int __check_agg_selection_timer(struct port *port)
  *     %AD_LINK_SPEED_100MBPS,
  *     %AD_LINK_SPEED_1000MBPS,
  *     %AD_LINK_SPEED_2500MBPS,
+ *     %AD_LINK_SPEED_5000MBPS,
  *     %AD_LINK_SPEED_10000MBPS
+ *     %AD_LINK_SPEED_14000MBPS,
  *     %AD_LINK_SPEED_20000MBPS
  *     %AD_LINK_SPEED_25000MBPS
  *     %AD_LINK_SPEED_40000MBPS
+ *     %AD_LINK_SPEED_50000MBPS
  *     %AD_LINK_SPEED_56000MBPS
  *     %AD_LINK_SPEED_100000MBPS
  */
@@ -296,10 +302,18 @@ static u16 __get_link_speed(struct port *port)
 			speed = AD_LINK_SPEED_2500MBPS;
 			break;
 
+		case SPEED_5000:
+			speed = AD_LINK_SPEED_5000MBPS;
+			break;
+
 		case SPEED_10000:
 			speed = AD_LINK_SPEED_10000MBPS;
 			break;
 
+		case SPEED_14000:
+			speed = AD_LINK_SPEED_14000MBPS;
+			break;
+
 		case SPEED_20000:
 			speed = AD_LINK_SPEED_20000MBPS;
 			break;
@@ -312,6 +326,10 @@ static u16 __get_link_speed(struct port *port)
 			speed = AD_LINK_SPEED_40000MBPS;
 			break;
 
+		case SPEED_50000:
+			speed = AD_LINK_SPEED_50000MBPS;
+			break;
+
 		case SPEED_56000:
 			speed = AD_LINK_SPEED_56000MBPS;
 			break;
@@ -707,9 +725,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
 		case AD_LINK_SPEED_2500MBPS:
 			bandwidth = nports * 2500;
 			break;
+		case AD_LINK_SPEED_5000MBPS:
+			bandwidth = nports * 5000;
+			break;
 		case AD_LINK_SPEED_10000MBPS:
 			bandwidth = nports * 10000;
 			break;
+		case AD_LINK_SPEED_14000MBPS:
+			bandwidth = nports * 14000;
+			break;
 		case AD_LINK_SPEED_20000MBPS:
 			bandwidth = nports * 20000;
 			break;
@@ -719,6 +743,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
 		case AD_LINK_SPEED_40000MBPS:
 			bandwidth = nports * 40000;
 			break;
+		case AD_LINK_SPEED_50000MBPS:
+			bandwidth = nports * 50000;
+			break;
 		case AD_LINK_SPEED_56000MBPS:
 			bandwidth = nports * 56000;
 			break;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2359478b..8ab6bdb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4192,7 +4192,6 @@ static void bond_destructor(struct net_device *bond_dev)
 	struct bonding *bond = netdev_priv(bond_dev);
 	if (bond->wq)
 		destroy_workqueue(bond->wq);
-	free_netdev(bond_dev);
 }
 
 void bond_setup(struct net_device *bond_dev)
@@ -4212,7 +4211,8 @@ void bond_setup(struct net_device *bond_dev)
 	bond_dev->netdev_ops = &bond_netdev_ops;
 	bond_dev->ethtool_ops = &bond_ethtool_ops;
 
-	bond_dev->destructor = bond_destructor;
+	bond_dev->needs_free_netdev = true;
+	bond_dev->priv_destructor = bond_destructor;
 
 	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
 
@@ -4736,7 +4736,7 @@ int bond_create(struct net *net, const char *name)
 
 	rtnl_unlock();
 	if (res < 0)
-		bond_destructor(bond_dev);
+		free_netdev(bond_dev);
 	return res;
 }
 
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index ddabce7..71a7c3b 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1121,7 +1121,7 @@ static void cfhsi_setup(struct net_device *dev)
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 	dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
 	dev->priv_flags |= IFF_NO_QUEUE;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	dev->netdev_ops = &cfhsi_netdevops;
 	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
 		skb_queue_head_init(&cfhsi->qhead[i]);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index c2dea49..76e1d35 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -428,7 +428,7 @@ static void caifdev_setup(struct net_device *dev)
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 	dev->mtu = CAIF_MAX_MTU;
 	dev->priv_flags |= IFF_NO_QUEUE;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	skb_queue_head_init(&serdev->head);
 	serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
 	serdev->common.use_frag = true;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 3a529fb..cb0dcc9 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -289,44 +289,44 @@ static LIST_HEAD(cfspi_list);
 static spinlock_t cfspi_list_lock;
 
 /* SPI uplink head alignment. */
-static ssize_t show_up_head_align(struct device_driver *driver, char *buf)
+static ssize_t up_head_align_show(struct device_driver *driver, char *buf)
 {
 	return sprintf(buf, "%d\n", spi_up_head_align);
 }
 
-static DRIVER_ATTR(up_head_align, S_IRUSR, show_up_head_align, NULL);
+static DRIVER_ATTR_RO(up_head_align);
 
 /* SPI uplink tail alignment. */
-static ssize_t show_up_tail_align(struct device_driver *driver, char *buf)
+static ssize_t up_tail_align_show(struct device_driver *driver, char *buf)
 {
 	return sprintf(buf, "%d\n", spi_up_tail_align);
 }
 
-static DRIVER_ATTR(up_tail_align, S_IRUSR, show_up_tail_align, NULL);
+static DRIVER_ATTR_RO(up_tail_align);
 
 /* SPI downlink head alignment. */
-static ssize_t show_down_head_align(struct device_driver *driver, char *buf)
+static ssize_t down_head_align_show(struct device_driver *driver, char *buf)
 {
 	return sprintf(buf, "%d\n", spi_down_head_align);
 }
 
-static DRIVER_ATTR(down_head_align, S_IRUSR, show_down_head_align, NULL);
+static DRIVER_ATTR_RO(down_head_align);
 
 /* SPI downlink tail alignment. */
-static ssize_t show_down_tail_align(struct device_driver *driver, char *buf)
+static ssize_t down_tail_align_show(struct device_driver *driver, char *buf)
 {
 	return sprintf(buf, "%d\n", spi_down_tail_align);
 }
 
-static DRIVER_ATTR(down_tail_align, S_IRUSR, show_down_tail_align, NULL);
+static DRIVER_ATTR_RO(down_tail_align);
 
 /* SPI frame alignment. */
-static ssize_t show_frame_align(struct device_driver *driver, char *buf)
+static ssize_t frame_align_show(struct device_driver *driver, char *buf)
 {
 	return sprintf(buf, "%d\n", spi_frm_align);
 }
 
-static DRIVER_ATTR(frame_align, S_IRUSR, show_frame_align, NULL);
+static DRIVER_ATTR_RO(frame_align);
 
 int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
 {
@@ -712,7 +712,7 @@ static void cfspi_setup(struct net_device *dev)
 	dev->flags = IFF_NOARP | IFF_POINTOPOINT;
 	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->mtu = SPI_MAX_PAYLOAD_SIZE;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	skb_queue_head_init(&cfspi->qhead);
 	skb_queue_head_init(&cfspi->chead);
 	cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 6122768..1794ea0 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -617,7 +617,7 @@ static void cfv_netdev_setup(struct net_device *netdev)
 	netdev->tx_queue_len = 100;
 	netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
 	netdev->mtu = CFV_DEF_MTU_SIZE;
-	netdev->destructor = free_netdev;
+	netdev->needs_free_netdev = true;
 }
 
 /* Create debugfs counters for the device */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 611d16a..ae4ed03 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -391,6 +391,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
 	can_update_state_error_stats(dev, new_state);
 	priv->state = new_state;
 
+	if (!cf)
+		return;
+
 	if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
 		cf->can_id |= CAN_ERR_BUSOFF;
 		return;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 0d57be5..85268be 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -489,7 +489,7 @@ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
 				struct pucan_rx_msg *msg_list, int msg_count)
 {
 	void *msg_ptr = msg_list;
-	int i, msg_size;
+	int i, msg_size = 0;
 
 	for (i = 0; i < msg_count; i++) {
 		msg_size = peak_canfd_handle_msg(priv, msg_ptr);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index eb71737..6a6e896 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -417,7 +417,7 @@ static int slc_open(struct net_device *dev)
 static void slc_free_netdev(struct net_device *dev)
 {
 	int i = dev->base_addr;
-	free_netdev(dev);
+
 	slcan_devs[i] = NULL;
 }
 
@@ -436,7 +436,8 @@ static const struct net_device_ops slc_netdev_ops = {
 static void slc_setup(struct net_device *dev)
 {
 	dev->netdev_ops		= &slc_netdev_ops;
-	dev->destructor		= slc_free_netdev;
+	dev->needs_free_netdev	= true;
+	dev->priv_destructor	= slc_free_netdev;
 
 	dev->hard_header_len	= 0;
 	dev->addr_len		= 0;
@@ -761,8 +762,6 @@ static void __exit slcan_exit(void)
 		if (sl->tty) {
 			printk(KERN_ERR "%s: tty discipline still running\n",
 			       dev->name);
-			/* Intentionally leak the control block. */
-			dev->destructor = NULL;
 		}
 
 		unregister_netdev(dev);
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index eecee7f..afcc131 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -265,6 +265,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
 			     sizeof(*dm),
 			     1000);
 
+	kfree(dm);
+
 	return rc;
 }
 
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 57913db..1ca76e0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -908,8 +908,6 @@ static int peak_usb_probe(struct usb_interface *intf,
 	const struct peak_usb_adapter *peak_usb_adapter = NULL;
 	int i, err = -ENOMEM;
 
-	usb_dev = interface_to_usbdev(intf);
-
 	/* get corresponding PCAN-USB adapter */
 	for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++)
 		if (peak_usb_adapters_list[i]->device_id == usb_id_product) {
@@ -920,7 +918,7 @@ static int peak_usb_probe(struct usb_interface *intf,
 	if (!peak_usb_adapter) {
 		/* should never come except device_id bad usage in this file */
 		pr_err("%s: didn't find device id. 0x%x in devices list\n",
-			PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct);
+			PCAN_USB_DRIVER_NAME, usb_id_product);
 		return -ENODEV;
 	}
 
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index facca33..a8cb3326 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -152,7 +152,7 @@ static const struct net_device_ops vcan_netdev_ops = {
 static void vcan_setup(struct net_device *dev)
 {
 	dev->type		= ARPHRD_CAN;
-	dev->mtu		= CAN_MTU;
+	dev->mtu		= CANFD_MTU;
 	dev->hard_header_len	= 0;
 	dev->addr_len		= 0;
 	dev->tx_queue_len	= 0;
@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
 		dev->flags |= IFF_ECHO;
 
 	dev->netdev_ops		= &vcan_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 }
 
 static struct rtnl_link_ops vcan_link_ops __read_mostly = {
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 7fbb247..cfe889e 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -150,13 +150,13 @@ static const struct net_device_ops vxcan_netdev_ops = {
 static void vxcan_setup(struct net_device *dev)
 {
 	dev->type		= ARPHRD_CAN;
-	dev->mtu		= CAN_MTU;
+	dev->mtu		= CANFD_MTU;
 	dev->hard_header_len	= 0;
 	dev->addr_len		= 0;
 	dev->tx_queue_len	= 0;
 	dev->flags		= (IFF_NOARP|IFF_ECHO);
 	dev->netdev_ops		= &vxcan_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 }
 
 /* forward declaration for rtnl_create_link() */
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 96046bb..14c0be9 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -114,13 +114,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
 	return -EOPNOTSUPP;
 }
 
-int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
-			   int src_port, u16 data)
+static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
+					 int src_dev, int src_port, u16 data)
 {
 	return -EOPNOTSUPP;
 }
 
-int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 149244a..9905b52 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -328,7 +328,6 @@ static void dummy_free_netdev(struct net_device *dev)
 	struct dummy_priv *priv = netdev_priv(dev);
 
 	kfree(priv->vfinfo);
-	free_netdev(dev);
 }
 
 static void dummy_setup(struct net_device *dev)
@@ -338,7 +337,8 @@ static void dummy_setup(struct net_device *dev)
 	/* Initialize the device structure. */
 	dev->netdev_ops = &dummy_netdev_ops;
 	dev->ethtool_ops = &dummy_ethtool_ops;
-	dev->destructor = dummy_free_netdev;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = dummy_free_netdev;
 
 	/* Fill in device structure with ethernet-generic values. */
 	dev->flags |= IFF_NOARP;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 08d11ce..f5b237e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -61,6 +61,8 @@
 
 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
 
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
 /*****************************************************************************/
 /*****************************************************************************/
 /*****************************************************************************/
@@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
 	tail_masked = admin_queue->sq.tail & queue_size_mask;
 
 	/* In case of queue FULL */
-	cnt = admin_queue->sq.tail - admin_queue->sq.head;
+	cnt = atomic_read(&admin_queue->outstanding_cmds);
 	if (cnt >= admin_queue->q_depth) {
-		pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
-			 admin_queue->sq.tail, admin_queue->sq.head,
-			 admin_queue->q_depth);
+		pr_debug("admin queue is full.\n");
 		admin_queue->stats.out_of_space++;
 		return ERR_PTR(-ENOSPC);
 	}
@@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
 						     struct ena_com_admin_queue *admin_queue)
 {
-	unsigned long flags;
-	u32 start_time;
+	unsigned long flags, timeout;
 	int ret;
 
-	start_time = ((u32)jiffies_to_usecs(jiffies));
+	timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
 
-	while (comp_ctx->status == ENA_CMD_SUBMITTED) {
-		if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
-		    ADMIN_CMD_TIMEOUT_US) {
+	while (1) {
+		spin_lock_irqsave(&admin_queue->q_lock, flags);
+		ena_com_handle_admin_completion(admin_queue);
+		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+
+		if (comp_ctx->status != ENA_CMD_SUBMITTED)
+			break;
+
+		if (time_is_before_jiffies(timeout)) {
 			pr_err("Wait for completion (polling) timeout\n");
 			/* ENA didn't have any completion */
 			spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
 			goto err;
 		}
 
-		spin_lock_irqsave(&admin_queue->q_lock, flags);
-		ena_com_handle_admin_completion(admin_queue);
-		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
-
 		msleep(100);
 	}
 
@@ -1455,6 +1456,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
 
 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
 {
+	u32 mask_value = 0;
+
+	if (polling)
+		mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+	writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
 	ena_dev->admin_queue.polling = polling;
 }
 
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 67b2338f..3ee55e2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -80,7 +80,6 @@ static const struct ena_stats ena_stats_tx_strings[] = {
 	ENA_STAT_TX_ENTRY(tx_poll),
 	ENA_STAT_TX_ENTRY(doorbells),
 	ENA_STAT_TX_ENTRY(prepare_ctx_err),
-	ENA_STAT_TX_ENTRY(missing_tx_comp),
 	ENA_STAT_TX_ENTRY(bad_req_id),
 };
 
@@ -94,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 	ENA_STAT_RX_ENTRY(dma_mapping_err),
 	ENA_STAT_RX_ENTRY(bad_desc_num),
 	ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
+	ENA_STAT_RX_ENTRY(empty_rx_ring),
 };
 
 static const struct ena_stats ena_stats_ena_com_strings[] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7c1214d..4f16ed3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter)
 		rxr->sgl_size = adapter->max_rx_sgl_size;
 		rxr->smoothed_interval =
 			ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
+		rxr->empty_rx_queue = 0;
 	}
 }
 
@@ -1078,6 +1079,26 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
 	rx_ring->per_napi_bytes = 0;
 }
 
+static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
+					struct ena_ring *rx_ring)
+{
+	struct ena_eth_io_intr_reg intr_reg;
+
+	/* Update intr register: rx intr delay,
+	 * tx intr delay and interrupt unmask
+	 */
+	ena_com_update_intr_reg(&intr_reg,
+				rx_ring->smoothed_interval,
+				tx_ring->smoothed_interval,
+				true);
+
+	/* It is a shared MSI-X.
+	 * Tx and Rx CQ have pointer to it.
+	 * So we use one of them to reach the intr reg
+	 */
+	ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+}
+
 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
 					     struct ena_ring *rx_ring)
 {
@@ -1108,7 +1129,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
 {
 	struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
 	struct ena_ring *tx_ring, *rx_ring;
-	struct ena_eth_io_intr_reg intr_reg;
 
 	u32 tx_work_done;
 	u32 rx_work_done;
@@ -1149,22 +1169,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
 			if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
 				ena_adjust_intr_moderation(rx_ring, tx_ring);
 
-			/* Update intr register: rx intr delay,
-			 * tx intr delay and interrupt unmask
-			 */
-			ena_com_update_intr_reg(&intr_reg,
-						rx_ring->smoothed_interval,
-						tx_ring->smoothed_interval,
-						true);
-
-			/* It is a shared MSI-X.
-			 * Tx and Rx CQ have pointer to it.
-			 * So we use one of them to reach the intr reg
-			 */
-			ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+			ena_unmask_interrupt(tx_ring, rx_ring);
 		}
 
-
 		ena_update_ring_numa_node(tx_ring, rx_ring);
 
 		ret = rx_work_done;
@@ -1485,6 +1492,11 @@ static int ena_up_complete(struct ena_adapter *adapter)
 
 	ena_napi_enable_all(adapter);
 
+	/* Enable completion queues interrupt */
+	for (i = 0; i < adapter->num_queues; i++)
+		ena_unmask_interrupt(&adapter->tx_ring[i],
+				     &adapter->rx_ring[i]);
+
 	/* schedule napi in case we had pending packets
 	 * from the last time we disable napi
 	 */
@@ -1532,6 +1544,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
 			  "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
 			  qid, rc);
 		ena_com_destroy_io_queue(ena_dev, ena_qid);
+		return rc;
 	}
 
 	ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1596,6 +1609,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
 			  "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
 			  qid, rc);
 		ena_com_destroy_io_queue(ena_dev, ena_qid);
+		return rc;
 	}
 
 	ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
@@ -1981,6 +1995,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	tx_info->tx_descs = nb_hw_desc;
 	tx_info->last_jiffies = jiffies;
+	tx_info->print_once = 0;
 
 	tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
 		tx_ring->ring_size);
@@ -2550,13 +2565,44 @@ static void ena_fw_reset_device(struct work_struct *work)
 		"Reset attempt failed. Can not reset the device\n");
 }
 
-static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+static int check_missing_comp_in_queue(struct ena_adapter *adapter,
+				       struct ena_ring *tx_ring)
 {
 	struct ena_tx_buffer *tx_buf;
 	unsigned long last_jiffies;
+	u32 missed_tx = 0;
+	int i;
+
+	for (i = 0; i < tx_ring->ring_size; i++) {
+		tx_buf = &tx_ring->tx_buffer_info[i];
+		last_jiffies = tx_buf->last_jiffies;
+		if (unlikely(last_jiffies &&
+			     time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
+			if (!tx_buf->print_once)
+				netif_notice(adapter, tx_err, adapter->netdev,
+					     "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
+					     tx_ring->qid, i);
+
+			tx_buf->print_once = 1;
+			missed_tx++;
+
+			if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
+				netif_err(adapter, tx_err, adapter->netdev,
+					  "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+					  missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
+				set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+				return -EIO;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+{
 	struct ena_ring *tx_ring;
-	int i, j, budget;
-	u32 missed_tx;
+	int i, budget, rc;
 
 	/* Make sure the driver doesn't turn the device in other process */
 	smp_rmb();
@@ -2572,31 +2618,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
 	for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
 		tx_ring = &adapter->tx_ring[i];
 
-		for (j = 0; j < tx_ring->ring_size; j++) {
-			tx_buf = &tx_ring->tx_buffer_info[j];
-			last_jiffies = tx_buf->last_jiffies;
-			if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
-				netif_notice(adapter, tx_err, adapter->netdev,
-					     "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
-					     tx_ring->qid, j);
-
-				u64_stats_update_begin(&tx_ring->syncp);
-				missed_tx = tx_ring->tx_stats.missing_tx_comp++;
-				u64_stats_update_end(&tx_ring->syncp);
-
-				/* Clear last jiffies so the lost buffer won't
-				 * be counted twice.
-				 */
-				tx_buf->last_jiffies = 0;
-
-				if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
-					netif_err(adapter, tx_err, adapter->netdev,
-						  "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n",
-						  missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
-					set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
-				}
-			}
-		}
+		rc = check_missing_comp_in_queue(adapter, tx_ring);
+		if (unlikely(rc))
+			return;
 
 		budget--;
 		if (!budget)
@@ -2606,6 +2630,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
 	adapter->last_monitored_tx_qid = i % adapter->num_queues;
 }
 
+/* trigger napi schedule after 2 consecutive detections */
+#define EMPTY_RX_REFILL 2
+/* For the rare case where the device runs out of Rx descriptors and the
+ * napi handler failed to refill new Rx descriptors (due to a lack of memory
+ * for example).
+ * This case will lead to a deadlock:
+ * The device won't send interrupts since all the new Rx packets will be dropped
+ * The napi handler won't allocate new Rx descriptors so the device will be
+ * able to send new packets.
+ *
+ * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
+ * It is recommended to have at least 512MB, with a minimum of 128MB for
+ * constrained environment).
+ *
+ * When such a situation is detected - Reschedule napi
+ */
+static void check_for_empty_rx_ring(struct ena_adapter *adapter)
+{
+	struct ena_ring *rx_ring;
+	int i, refill_required;
+
+	if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+		return;
+
+	if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+		return;
+
+	for (i = 0; i < adapter->num_queues; i++) {
+		rx_ring = &adapter->rx_ring[i];
+
+		refill_required =
+			ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
+			rx_ring->empty_rx_queue++;
+
+			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
+				u64_stats_update_begin(&rx_ring->syncp);
+				rx_ring->rx_stats.empty_rx_ring++;
+				u64_stats_update_end(&rx_ring->syncp);
+
+				netif_err(adapter, drv, adapter->netdev,
+					  "trigger refill for ring %d\n", i);
+
+				napi_schedule(rx_ring->napi);
+				rx_ring->empty_rx_queue = 0;
+			}
+		} else {
+			rx_ring->empty_rx_queue = 0;
+		}
+	}
+}
+
 /* Check for keep alive expiration */
 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
 {
@@ -2660,6 +2736,8 @@ static void ena_timer_service(unsigned long data)
 
 	check_for_missing_tx_completions(adapter);
 
+	check_for_empty_rx_ring(adapter);
+
 	if (debug_area)
 		ena_dump_stats_to_buf(adapter, debug_area);
 
@@ -2840,6 +2918,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
 {
 	int release_bars;
 
+	if (ena_dev->mem_bar)
+		devm_iounmap(&pdev->dev, ena_dev->mem_bar);
+
+	devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+
 	release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
 	pci_release_selected_regions(pdev, release_bars);
 }
@@ -2927,8 +3010,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_free_ena_dev;
 	}
 
-	ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR),
-				   pci_resource_len(pdev, ENA_REG_BAR));
+	ena_dev->reg_bar = devm_ioremap(&pdev->dev,
+					pci_resource_start(pdev, ENA_REG_BAR),
+					pci_resource_len(pdev, ENA_REG_BAR));
 	if (!ena_dev->reg_bar) {
 		dev_err(&pdev->dev, "failed to remap regs bar\n");
 		rc = -EFAULT;
@@ -2948,8 +3032,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
 
 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
-		ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR),
-					      pci_resource_len(pdev, ENA_MEM_BAR));
+		ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+						   pci_resource_start(pdev, ENA_MEM_BAR),
+						   pci_resource_len(pdev, ENA_MEM_BAR));
 		if (!ena_dev->mem_bar) {
 			rc = -EFAULT;
 			goto err_device_destroy;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0e22bce..a4d3d5e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
 
 #define DRV_MODULE_VER_MAJOR	1
 #define DRV_MODULE_VER_MINOR	1
-#define DRV_MODULE_VER_SUBMINOR 2
+#define DRV_MODULE_VER_SUBMINOR 7
 
 #define DRV_MODULE_NAME		"ena"
 #ifndef DRV_MODULE_VERSION
@@ -146,7 +146,18 @@ struct ena_tx_buffer {
 	u32 tx_descs;
 	/* num of buffers used by this skb */
 	u32 num_of_bufs;
-	/* Save the last jiffies to detect missing tx packets */
+
+	/* Used for detect missing tx packets to limit the number of prints */
+	u32 print_once;
+	/* Save the last jiffies to detect missing tx packets
+	 *
+	 * sets to non zero value on ena_start_xmit and set to zero on
+	 * napi and timer_Service_routine.
+	 *
+	 * while this value is not protected by lock,
+	 * a given packet is not expected to be handled by ena_start_xmit
+	 * and by napi/timer_service at the same time.
+	 */
 	unsigned long last_jiffies;
 	struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
 } ____cacheline_aligned;
@@ -170,7 +181,6 @@ struct ena_stats_tx {
 	u64 napi_comp;
 	u64 tx_poll;
 	u64 doorbells;
-	u64 missing_tx_comp;
 	u64 bad_req_id;
 };
 
@@ -184,6 +194,7 @@ struct ena_stats_rx {
 	u64 dma_mapping_err;
 	u64 bad_desc_num;
 	u64 rx_copybreak_pkt;
+	u64 empty_rx_ring;
 };
 
 struct ena_ring {
@@ -231,6 +242,7 @@ struct ena_ring {
 		struct ena_stats_tx tx_stats;
 		struct ena_stats_rx rx_stats;
 	};
+	int empty_rx_queue;
 } ____cacheline_aligned;
 
 struct ena_stats_dev {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index b3bc87f..0a98c36 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
 			      struct xgbe_ring *ring,
 			      struct xgbe_ring_data *rdata)
 {
-	int order, ret;
+	int ret;
 
 	if (!ring->rx_hdr_pa.pages) {
 		ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
@@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
 	}
 
 	if (!ring->rx_buf_pa.pages) {
-		order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
 		ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
-				       order);
+				       PAGE_ALLOC_COSTLY_ORDER);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index b8e3d88..a66aee5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -193,9 +193,6 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
 			     struct aq_hw_caps_s *aq_hw_caps,
 			     u32 *regs_buff);
 
-int hw_atl_utils_hw_get_settings(struct aq_hw_s *self,
-				 struct ethtool_cmd *cmd);
-
 int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
 			      unsigned int power_state);
 
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 099b374..5274501 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
 	priv->num_rx_desc_words = params->num_rx_desc_words;
 
 	priv->irq0 = platform_get_irq(pdev, 0);
-	if (!priv->is_lite)
+	if (!priv->is_lite) {
 		priv->irq1 = platform_get_irq(pdev, 1);
-	priv->wol_irq = platform_get_irq(pdev, 2);
+		priv->wol_irq = platform_get_irq(pdev, 2);
+	} else {
+		priv->wol_irq = platform_get_irq(pdev, 1);
+	}
 	if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
 		dev_err(&pdev->dev, "invalid interrupts\n");
 		ret = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index eccb3d1..f619c4c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
 	}
 
 	/* select a non-FCoE queue */
-	return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+	return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -3883,15 +3883,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		/* when transmitting in a vf, start bd must hold the ethertype
 		 * for fw to enforce it
 		 */
+		u16 vlan_tci = 0;
 #ifndef BNX2X_STOP_ON_ERROR
-		if (IS_VF(bp))
+		if (IS_VF(bp)) {
 #endif
-			tx_start_bd->vlan_or_ethertype =
-				cpu_to_le16(ntohs(eth->h_proto));
+			/* Still need to consider inband vlan for enforced */
+			if (__vlan_get_tag(skb, &vlan_tci)) {
+				tx_start_bd->vlan_or_ethertype =
+					cpu_to_le16(ntohs(eth->h_proto));
+			} else {
+				tx_start_bd->bd_flags.as_bitfield |=
+					(X_ETH_INBAND_VLAN <<
+					 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+				tx_start_bd->vlan_or_ethertype =
+					cpu_to_le16(vlan_tci);
+			}
 #ifndef BNX2X_STOP_ON_ERROR
-		else
+		} else {
 			/* used by FW for packet accounting */
 			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+		}
 #endif
 	}
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a851f95..349a465 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12729,7 +12729,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
 	} else {
 		/* If no mc addresses are required, flush the configuration */
 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
-		if (rc)
+		if (rc < 0)
 			BNX2X_ERR("Failed to clear multicast configuration %d\n",
 				  rc);
 	}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index bdfd53b..9ca994d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -901,6 +901,8 @@ static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
 	/* release VF resources */
 	bnx2x_vf_free_resc(bp, vf);
 
+	vf->malicious = false;
+
 	/* re-open the mailbox */
 	bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
 	return;
@@ -1822,9 +1824,11 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
 		   vf->abs_vfid, qidx);
 		bnx2x_vf_handle_rss_update_eqe(bp, vf);
 	case EVENT_RING_OPCODE_VF_FLR:
-	case EVENT_RING_OPCODE_MALICIOUS_VF:
 		/* Do nothing for now */
 		return 0;
+	case EVENT_RING_OPCODE_MALICIOUS_VF:
+		vf->malicious = true;
+		return 0;
 	}
 
 	return 0;
@@ -1905,6 +1909,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
 			continue;
 		}
 
+		if (vf->malicious) {
+			DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+			       "vf %d malicious so no stats for it\n",
+			       vf->abs_vfid);
+			continue;
+		}
+
 		DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
 		       "add addresses for vf %d\n", vf->abs_vfid);
 		for_each_vfq(vf, j) {
@@ -3042,7 +3053,7 @@ void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
 {
 	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
 		       sizeof(struct bnx2x_vf_mbx_msg));
-	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
+	BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
 		       sizeof(union pf_vf_bulletin));
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 888d0b6..53466f6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -141,6 +141,7 @@ struct bnx2x_virtf {
 #define VF_RESET	3	/* VF FLR'd, pending cleanup */
 
 	bool flr_clnup_stage;	/* true during flr cleanup */
+	bool malicious;		/* true if FW indicated so, until FLR */
 
 	/* dma */
 	dma_addr_t fw_stat_map;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 03f55da..74e8e21 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1301,10 +1301,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
 		cp_cons = NEXT_CMP(cp_cons);
 	}
 
-	if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
 		bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
-		netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
-			    agg_bufs, (int)MAX_SKB_FRAGS);
+		if (agg_bufs > MAX_SKB_FRAGS)
+			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+				    agg_bufs, (int)MAX_SKB_FRAGS);
 		return NULL;
 	}
 
@@ -1562,6 +1563,45 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 	return rc;
 }
 
+/* In netpoll mode, if we are using a combined completion ring, we need to
+ * discard the rx packets and recycle the buffers.
+ */
+static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
+				 u32 *raw_cons, u8 *event)
+{
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	u32 tmp_raw_cons = *raw_cons;
+	struct rx_cmp_ext *rxcmp1;
+	struct rx_cmp *rxcmp;
+	u16 cp_cons;
+	u8 cmp_type;
+
+	cp_cons = RING_CMP(tmp_raw_cons);
+	rxcmp = (struct rx_cmp *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+	cp_cons = RING_CMP(tmp_raw_cons);
+	rxcmp1 = (struct rx_cmp_ext *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+		return -EBUSY;
+
+	cmp_type = RX_CMP_TYPE(rxcmp);
+	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+		struct rx_tpa_end_cmp_ext *tpa_end1;
+
+		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
+		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
+			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
+	}
+	return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
+}
+
 #define BNXT_GET_EVENT_PORT(data)	\
 	((data) &			\
 	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -1744,7 +1784,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
 			if (unlikely(tx_pkts > bp->tx_wake_thresh))
 				rx_pkts = budget;
 		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
-			rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+			if (likely(budget))
+				rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+			else
+				rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
+							   &event);
 			if (likely(rc >= 0))
 				rx_pkts += rc;
 			else if (rc == -EBUSY)	/* partial completion */
@@ -6663,12 +6707,11 @@ static void bnxt_poll_controller(struct net_device *dev)
 	struct bnxt *bp = netdev_priv(dev);
 	int i;
 
-	for (i = 0; i < bp->cp_nr_rings; i++) {
-		struct bnxt_irq *irq = &bp->irq_tbl[i];
+	/* Only process tx rings/combined rings in netpoll mode. */
+	for (i = 0; i < bp->tx_nr_rings; i++) {
+		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
 
-		disable_irq(irq->vector);
-		irq->handler(irq->vector, bp->bnapi[i]);
-		enable_irq(irq->vector);
+		napi_schedule(&txr->bnapi->napi);
 	}
 }
 #endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3ef42db..d46a850 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -374,12 +374,16 @@ struct rx_tpa_end_cmp_ext {
 
 	__le32 rx_tpa_end_cmp_errors_v2;
 	#define RX_TPA_END_CMP_V2				(0x1 << 0)
-	#define RX_TPA_END_CMP_ERRORS				(0x7fff << 1)
+	#define RX_TPA_END_CMP_ERRORS				(0x3 << 1)
 	#define RX_TPA_END_CMPL_ERRORS_SHIFT			 1
 
 	u32 rx_tpa_end_cmp_start_opaque;
 };
 
+#define TPA_END_ERRORS(rx_tpa_end_ext)					\
+	((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 &			\
+	 cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+
 #define DB_IDX_MASK						0xffffff
 #define DB_IDX_VALID						(0x1 << 26)
 #define DB_IRQ_DIS						(0x1 << 27)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index bed9ef1..7ccffbb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -144,7 +144,7 @@ static inline int
 sleep_cond(wait_queue_head_t *wait_queue, int *condition)
 {
 	int errno = 0;
-	wait_queue_t we;
+	wait_queue_entry_t we;
 
 	init_waitqueue_entry(&we, current);
 	add_wait_queue(wait_queue, &we);
@@ -171,7 +171,7 @@ sleep_timeout_cond(wait_queue_head_t *wait_queue,
 		   int *condition,
 		   int timeout)
 {
-	wait_queue_t we;
+	wait_queue_entry_t we;
 
 	init_waitqueue_entry(&we, current);
 	add_wait_queue(wait_queue, &we);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38a5c67..53309f6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap)
 {
 	int err;
 
+	mutex_lock(&uld_mutex);
 	err = setup_sge_queues(adap);
 	if (err)
-		goto out;
+		goto rel_lock;
 	err = setup_rss(adap);
 	if (err)
 		goto freeq;
@@ -2196,23 +2197,28 @@ static int cxgb_up(struct adapter *adap)
 		if (err)
 			goto irq_err;
 	}
+
 	enable_rx(adap);
 	t4_sge_start(adap);
 	t4_intr_enable(adap);
 	adap->flags |= FULL_INIT_DONE;
+	mutex_unlock(&uld_mutex);
+
 	notify_ulds(adap, CXGB4_STATE_UP);
 #if IS_ENABLED(CONFIG_IPV6)
 	update_clip(adap);
 #endif
 	/* Initialize hash mac addr list*/
 	INIT_LIST_HEAD(&adap->mac_hlist);
- out:
 	return err;
+
  irq_err:
 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
  freeq:
 	t4_free_sge_resources(adap);
-	goto out;
+ rel_lock:
+	mutex_unlock(&uld_mutex);
+	return err;
 }
 
 static void cxgb_down(struct adapter *adapter)
@@ -2771,6 +2777,9 @@ void t4_fatal_err(struct adapter *adap)
 {
 	int port;
 
+	if (pci_channel_offline(adap->pdev))
+		return;
+
 	/* Disable the SGE since ULDs are going to free resources that
 	 * could be exposed to the adapter.  RDMA MWs for example...
 	 */
@@ -3882,9 +3891,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
 	spin_lock(&adap->stats_lock);
 	for_each_port(adap, i) {
 		struct net_device *dev = adap->port[i];
-
-		netif_device_detach(dev);
-		netif_carrier_off(dev);
+		if (dev) {
+			netif_device_detach(dev);
+			netif_carrier_off(dev);
+		}
 	}
 	spin_unlock(&adap->stats_lock);
 	disable_interrupts(adap);
@@ -3963,12 +3973,13 @@ static void eeh_resume(struct pci_dev *pdev)
 	rtnl_lock();
 	for_each_port(adap, i) {
 		struct net_device *dev = adap->port[i];
-
-		if (netif_running(dev)) {
-			link_start(dev);
-			cxgb_set_rxmode(dev);
+		if (dev) {
+			if (netif_running(dev)) {
+				link_start(dev);
+				cxgb_set_rxmode(dev);
+			}
+			netif_device_attach(dev);
 		}
-		netif_device_attach(dev);
 	}
 	rtnl_unlock();
 }
@@ -4516,7 +4527,7 @@ static void dummy_setup(struct net_device *dev)
 	/* Initialize the device structure. */
 	dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
 	dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 }
 
 static int config_mgmt_dev(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index aded42b96..3a34aa6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter)
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
-	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+	u32 whoami, pf;
+
+	if (pci_channel_offline(adapter->pdev))
+		return;
+
+	whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+	pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
 			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 3549d387..f2d623a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
 
 #define T4FW_VERSION_MAJOR 0x01
 #define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x2B
+#define T4FW_VERSION_MICRO 0x2D
 #define T4FW_VERSION_BUILD 0x00
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
 
 #define T5FW_VERSION_MAJOR 0x01
 #define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x2B
+#define T5FW_VERSION_MICRO 0x2D
 #define T5FW_VERSION_BUILD 0x00
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
 
 #define T6FW_VERSION_MAJOR 0x01
 #define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x2B
+#define T6FW_VERSION_MICRO 0x2D
 #define T6FW_VERSION_BUILD 0x00
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index e863ba7..8bb0db9 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev)
 	if (ret)
 		return ret;
 
+	napi_enable(&priv->napi);
+
 	ethoc_init_ring(priv, dev->mem_start);
 	ethoc_reset(priv);
 
@@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev)
 	priv->old_duplex = -1;
 
 	phy_start(dev->phydev);
-	napi_enable(&priv->napi);
 
 	if (netif_msg_ifup(priv)) {
 		dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 9a520e4..290ad05 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
 	priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
 
 	/* device used for DMA mapping */
-	arch_setup_dma_ops(dev, 0, 0, NULL, false);
+	set_dma_ops(dev, get_dma_ops(&pdev->dev));
 	err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
 	if (err) {
 		dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index dc0850b..8870a9a 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -2,6 +2,7 @@
 	tristate "FMan support"
 	depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
 	select GENERIC_ALLOCATOR
+	depends on HAS_DMA
 	select PHYLIB
 	default n
 	help
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 0b31f85..6e67d22 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
 		goto no_mem;
 	}
 
+	set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
+
 	ret = platform_device_add_data(pdev, &data, sizeof(data));
 	if (ret)
 		goto err;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 446c7b3..a10de1e 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
 {
 	const struct of_device_id *id =
 		of_match_device(fsl_pq_mdio_match, &pdev->dev);
-	const struct fsl_pq_mdio_data *data = id->data;
+	const struct fsl_pq_mdio_data *data;
 	struct device_node *np = pdev->dev.of_node;
 	struct resource res;
 	struct device_node *tbi;
@@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
 	struct mii_bus *new_bus;
 	int err;
 
+	if (!id) {
+		dev_err(&pdev->dev, "Failed to match device\n");
+		return -ENODEV;
+	}
+
+	data = id->data;
+
 	dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
 
 	new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index e13aa06..7a8addd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -29,10 +29,9 @@ enum _dsm_rst_type {
 	HNS_ROCE_RESET_FUNC     = 0x7,
 };
 
-const u8 hns_dsaf_acpi_dsm_uuid[] = {
-	0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41,
-	0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A
-};
+static const guid_t hns_dsaf_acpi_dsm_guid =
+	GUID_INIT(0x1A85AA1A, 0xE293, 0x415E,
+		  0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A);
 
 static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
 {
@@ -151,7 +150,7 @@ static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
 	argv4.package.elements = obj_args;
 
 	obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
-				hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4);
+				&hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
 	if (!obj) {
 		dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
 			 port_type, port);
@@ -434,7 +433,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
 	argv4.package.elements = &obj_args,
 
 	obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
-				hns_dsaf_acpi_dsm_uuid, 0,
+				&hns_dsaf_acpi_dsm_guid, 0,
 				HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
 
 	if (!obj || obj->type != ACPI_TYPE_INTEGER)
@@ -474,7 +473,7 @@ int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
 	argv4.package.elements = &obj_args,
 
 	obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
-				hns_dsaf_acpi_dsm_uuid, 0,
+				&hns_dsaf_acpi_dsm_guid, 0,
 				HNS_OP_GET_SFP_STAT_FUNC, &argv4);
 
 	if (!obj || obj->type != ACPI_TYPE_INTEGER)
@@ -565,7 +564,7 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
 	argv4.package.elements = obj_args;
 
 	obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
-				hns_dsaf_acpi_dsm_uuid, 0,
+				&hns_dsaf_acpi_dsm_guid, 0,
 				HNS_OP_SERDES_LP_FUNC, &argv4);
 	if (!obj) {
 		dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index b8fab14..e95795b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
 
 		/* Force 1000M Link, Default is 0x0200 */
 		phy_write(phy_dev, 7, 0x20C);
-		phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
 
-		/* Enable PHY loop-back */
+		/* Powerup Fiber */
+		phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
+		val = phy_read(phy_dev, COPPER_CONTROL_REG);
+		val &= ~PHY_POWER_DOWN;
+		phy_write(phy_dev, COPPER_CONTROL_REG, val);
+
+		/* Enable Phy Loopback */
+		phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
 		val = phy_read(phy_dev, COPPER_CONTROL_REG);
 		val |= PHY_LOOP_BACK;
 		val &= ~PHY_POWER_DOWN;
@@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
 		phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
 		phy_write(phy_dev, 1, 0x400);
 		phy_write(phy_dev, 7, 0x200);
+
+		phy_write(phy_dev, HNS_PHY_PAGE_REG, 1);
+		val = phy_read(phy_dev, COPPER_CONTROL_REG);
+		val |= PHY_POWER_DOWN;
+		phy_write(phy_dev, COPPER_CONTROL_REG, val);
+
 		phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
 		phy_write(phy_dev, 9, 0xF00);
 
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 1e53d7a..b9d310f 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3553,14 +3553,12 @@ static int check_module_parm(void)
 	return ret;
 }
 
-static ssize_t ehea_show_capabilities(struct device_driver *drv,
-				      char *buf)
+static ssize_t capabilities_show(struct device_driver *drv, char *buf)
 {
 	return sprintf(buf, "%d", EHEA_CAPABILITIES);
 }
 
-static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
-		   ehea_show_capabilities, NULL);
+static DRIVER_ATTR_RO(capabilities);
 
 static int __init ehea_module_init(void)
 {
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 508923f..259e69a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -343,6 +343,7 @@ static int emac_reset(struct emac_instance *dev)
 {
 	struct emac_regs __iomem *p = dev->emacp;
 	int n = 20;
+	bool __maybe_unused try_internal_clock = false;
 
 	DBG(dev, "reset" NL);
 
@@ -355,6 +356,7 @@ static int emac_reset(struct emac_instance *dev)
 	}
 
 #ifdef CONFIG_PPC_DCR_NATIVE
+do_retry:
 	/*
 	 * PPC460EX/GT Embedded Processor Advanced User's Manual
 	 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -362,10 +364,19 @@ static int emac_reset(struct emac_instance *dev)
 	 * of the EMAC. If none is present, select the internal clock
 	 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
 	 * After a soft reset, select the external clock.
+	 *
+	 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
+	 * ethernet cable is not attached. This causes the reset to timeout
+	 * and the PHY detection code in emac_init_phy() is unable to
+	 * communicate and detect the AR8035-A PHY. As a result, the emac
+	 * driver bails out early and the user has no ethernet.
+	 * In order to stay compatible with existing configurations, the
+	 * driver will temporarily switch to the internal clock, after
+	 * the first reset fails.
 	 */
 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
-		if (dev->phy_address == 0xffffffff &&
-		    dev->phy_map == 0xffffffff) {
+		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+					   dev->phy_map == 0xffffffff)) {
 			/* No PHY: select internal loop clock before reset */
 			dcri_clrset(SDR0, SDR0_ETH_CFG,
 				    0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -383,8 +394,15 @@ static int emac_reset(struct emac_instance *dev)
 
 #ifdef CONFIG_PPC_DCR_NATIVE
 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
-		if (dev->phy_address == 0xffffffff &&
-		    dev->phy_map == 0xffffffff) {
+		if (!n && !try_internal_clock) {
+			/* first attempt has timed out. */
+			n = 20;
+			try_internal_clock = true;
+			goto do_retry;
+		}
+
+		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+					   dev->phy_map == 0xffffffff)) {
 			/* No PHY: restore external clock source after reset */
 			dcri_clrset(SDR0, SDR0_ETH_CFG,
 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
@@ -2460,20 +2478,24 @@ static int emac_mii_bus_reset(struct mii_bus *bus)
 	return emac_reset(dev);
 }
 
+static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
+				    struct phy_device *phy_dev)
+{
+	phy_dev->autoneg = phy->autoneg;
+	phy_dev->speed = phy->speed;
+	phy_dev->duplex = phy->duplex;
+	phy_dev->advertising = phy->advertising;
+	return phy_start_aneg(phy_dev);
+}
+
 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
 {
 	struct net_device *ndev = phy->dev;
 	struct emac_instance *dev = netdev_priv(ndev);
 
-	dev->phy.autoneg = AUTONEG_ENABLE;
-	dev->phy.speed = SPEED_1000;
-	dev->phy.duplex = DUPLEX_FULL;
-	dev->phy.advertising = advertise;
 	phy->autoneg = AUTONEG_ENABLE;
-	phy->speed = dev->phy.speed;
-	phy->duplex = dev->phy.duplex;
 	phy->advertising = advertise;
-	return phy_start_aneg(dev->phy_dev);
+	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
 }
 
 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
@@ -2481,13 +2503,10 @@ static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
 	struct net_device *ndev = phy->dev;
 	struct emac_instance *dev = netdev_priv(ndev);
 
-	dev->phy.autoneg =  AUTONEG_DISABLE;
-	dev->phy.speed = speed;
-	dev->phy.duplex = fd;
 	phy->autoneg = AUTONEG_DISABLE;
 	phy->speed = speed;
 	phy->duplex = fd;
-	return phy_start_aneg(dev->phy_dev);
+	return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
 }
 
 static int emac_mdio_poll_link(struct mii_phy *phy)
@@ -2509,16 +2528,17 @@ static int emac_mdio_read_link(struct mii_phy *phy)
 {
 	struct net_device *ndev = phy->dev;
 	struct emac_instance *dev = netdev_priv(ndev);
+	struct phy_device *phy_dev = dev->phy_dev;
 	int res;
 
-	res = phy_read_status(dev->phy_dev);
+	res = phy_read_status(phy_dev);
 	if (res)
 		return res;
 
-	dev->phy.speed = phy->speed;
-	dev->phy.duplex = phy->duplex;
-	dev->phy.pause = phy->pause;
-	dev->phy.asym_pause = phy->asym_pause;
+	phy->speed = phy_dev->speed;
+	phy->duplex = phy_dev->duplex;
+	phy->pause = phy_dev->pause;
+	phy->asym_pause = phy_dev->asym_pause;
 	return 0;
 }
 
@@ -2528,13 +2548,6 @@ static int emac_mdio_init_phy(struct mii_phy *phy)
 	struct emac_instance *dev = netdev_priv(ndev);
 
 	phy_start(dev->phy_dev);
-	dev->phy.autoneg = phy->autoneg;
-	dev->phy.speed = phy->speed;
-	dev->phy.duplex = phy->duplex;
-	dev->phy.advertising = phy->advertising;
-	dev->phy.pause = phy->pause;
-	dev->phy.asym_pause = phy->asym_pause;
-
 	return phy_init_hw(dev->phy_dev);
 }
 
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f2d329..c0fbeb3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -81,7 +81,7 @@
 static const char ibmvnic_driver_name[] = "ibmvnic";
 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
 
-MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
+MODULE_AUTHOR("Santiago Leon");
 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
@@ -1468,6 +1468,11 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
 }
 #endif
 
+static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	return -EOPNOTSUPP;
+}
+
 static const struct net_device_ops ibmvnic_netdev_ops = {
 	.ndo_open		= ibmvnic_open,
 	.ndo_stop		= ibmvnic_close,
@@ -1479,6 +1484,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ibmvnic_netpoll_controller,
 #endif
+	.ndo_change_mtu		= ibmvnic_change_mtu,
 };
 
 /* ethtool functions */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index cdde3cc2..44d9610 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -399,6 +399,7 @@ struct i40e_pf {
 #define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(1)
 #define I40E_FLAG_MSI_ENABLED			BIT_ULL(2)
 #define I40E_FLAG_MSIX_ENABLED			BIT_ULL(3)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED		BIT_ULL(4)
 #define I40E_FLAG_RSS_ENABLED			BIT_ULL(6)
 #define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(7)
 #define I40E_FLAG_IWARP_ENABLED			BIT_ULL(10)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 7a8eb48..894c8e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -224,7 +224,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
 	I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
 	I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
 	I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
-	I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0),
+	I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
 	I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
 };
 
@@ -4092,7 +4092,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
 
 	/* Only allow ATR evict on hardware that is capable of handling it */
 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
-		pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+		pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED;
 
 	if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
 		u16 sw_flags = 0, valid_flags = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d5c9c9e..a7a4b28 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
  **/
 void i40e_service_event_schedule(struct i40e_pf *pf)
 {
-	if (!test_bit(__I40E_VSI_DOWN, pf->state) &&
+	if (!test_bit(__I40E_DOWN, pf->state) &&
 	    !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
 		queue_work(i40e_wq, &pf->service_task);
 }
@@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
 		 * this is not a performance path and napi_schedule()
 		 * can deal with rescheduling.
 		 */
-		if (!test_bit(__I40E_VSI_DOWN, pf->state))
+		if (!test_bit(__I40E_DOWN, pf->state))
 			napi_schedule_irqoff(&q_vector->napi);
 	}
 
@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
 enable_intr:
 	/* re-enable interrupt causes */
 	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
-	if (!test_bit(__I40E_VSI_DOWN, pf->state)) {
+	if (!test_bit(__I40E_DOWN, pf->state)) {
 		i40e_service_event_schedule(pf);
 		i40e_irq_dynamic_enable_icr0(pf, false);
 	}
@@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
 {
 
 	/* if interface is down do nothing */
-	if (test_bit(__I40E_VSI_DOWN, pf->state))
+	if (test_bit(__I40E_DOWN, pf->state))
 		return;
 
 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
@@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
 	int i;
 
 	/* if interface is down do nothing */
-	if (test_bit(__I40E_VSI_DOWN, pf->state) ||
+	if (test_bit(__I40E_DOWN, pf->state) ||
 	    test_bit(__I40E_CONFIG_BUSY, pf->state))
 		return;
 
@@ -6399,9 +6399,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 		reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
 	}
-	if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) {
-		reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED);
-		clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state);
+	if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
+		reset_flags |= BIT(__I40E_DOWN_REQUESTED);
+		clear_bit(__I40E_DOWN_REQUESTED, pf->state);
 	}
 
 	/* If there's a recovery already waiting, it takes
@@ -6415,7 +6415,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 
 	/* If we're already down or resetting, just bail */
 	if (reset_flags &&
-	    !test_bit(__I40E_VSI_DOWN, pf->state) &&
+	    !test_bit(__I40E_DOWN, pf->state) &&
 	    !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
 		rtnl_lock();
 		i40e_do_reset(pf, reset_flags, true);
@@ -7002,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
 	u32 val;
 	int v;
 
-	if (test_bit(__I40E_VSI_DOWN, pf->state))
+	if (test_bit(__I40E_DOWN, pf->state))
 		goto clear_recovery;
 	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
 
@@ -8821,11 +8821,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
 		    (pf->hw.aq.api_min_ver > 4))) {
 		/* Supported in FW API version higher than 1.4 */
 		pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
-		pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
-	} else {
-		pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
 	}
 
+	/* Enable HW ATR eviction if possible */
+	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+		pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
+
 	pf->eeprom_version = 0xDEAD;
 	pf->lan_veb = I40E_NO_VEB;
 	pf->lan_vsi = I40E_NO_VSI;
@@ -9767,7 +9768,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
 		return -ENODEV;
 	}
 	if (vsi == pf->vsi[pf->lan_vsi] &&
-	    !test_bit(__I40E_VSI_DOWN, pf->state)) {
+	    !test_bit(__I40E_DOWN, pf->state)) {
 		dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
 		return -ENODEV;
 	}
@@ -11003,7 +11004,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	}
 	pf->next_vsi = 0;
 	pf->pdev = pdev;
-	set_bit(__I40E_VSI_DOWN, pf->state);
+	set_bit(__I40E_DOWN, pf->state);
 
 	hw = &pf->hw;
 	hw->back = pf;
@@ -11293,7 +11294,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	 * before setting up the misc vector or we get a race and the vector
 	 * ends up disabled forever.
 	 */
-	clear_bit(__I40E_VSI_DOWN, pf->state);
+	clear_bit(__I40E_DOWN, pf->state);
 
 	/* In case of MSIX we are going to setup the misc vector right here
 	 * to handle admin queue events etc. In case of legacy and MSI
@@ -11448,7 +11449,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	/* Unwind what we've done if something failed in the setup */
 err_vsis:
-	set_bit(__I40E_VSI_DOWN, pf->state);
+	set_bit(__I40E_DOWN, pf->state);
 	i40e_clear_interrupt_scheme(pf);
 	kfree(pf->vsi);
 err_switch_setup:
@@ -11500,7 +11501,7 @@ static void i40e_remove(struct pci_dev *pdev)
 
 	/* no more scheduling of any task */
 	set_bit(__I40E_SUSPENDED, pf->state);
-	set_bit(__I40E_VSI_DOWN, pf->state);
+	set_bit(__I40E_DOWN, pf->state);
 	if (pf->service_timer.data)
 		del_timer_sync(&pf->service_timer);
 	if (pf->service_task.func)
@@ -11740,7 +11741,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
 	struct i40e_hw *hw = &pf->hw;
 
 	set_bit(__I40E_SUSPENDED, pf->state);
-	set_bit(__I40E_VSI_DOWN, pf->state);
+	set_bit(__I40E_DOWN, pf->state);
 	rtnl_lock();
 	i40e_prep_for_reset(pf, true);
 	rtnl_unlock();
@@ -11789,7 +11790,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
 	int retval = 0;
 
 	set_bit(__I40E_SUSPENDED, pf->state);
-	set_bit(__I40E_VSI_DOWN, pf->state);
+	set_bit(__I40E_DOWN, pf->state);
 
 	if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
 		i40e_enable_mc_magic_wake(pf);
@@ -11841,7 +11842,7 @@ static int i40e_resume(struct pci_dev *pdev)
 
 	/* handling the reset will rebuild the device state */
 	if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
-		clear_bit(__I40E_VSI_DOWN, pf->state);
+		clear_bit(__I40E_DOWN, pf->state);
 		rtnl_lock();
 		i40e_reset_and_rebuild(pf, false, true);
 		rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 29321a6..77115c2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-	unsigned int truesize = SKB_DATA_ALIGN(size);
+	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+				SKB_DATA_ALIGN(I40E_SKB_PAD + size);
 #endif
 	struct sk_buff *skb;
 
@@ -2340,7 +2341,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 	/* Due to lack of space, no more new filters can be programmed */
 	if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
 		return;
-	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
 		/* HW ATR eviction will take care of removing filters on FIN
 		 * and RST packets.
 		 */
@@ -2402,7 +2403,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
-	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
 		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
 
 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 95c23fb..0fb38ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3017,10 +3017,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
 					   VLAN_VID_MASK));
 	}
 
+	spin_unlock_bh(&vsi->mac_filter_hash_lock);
 	if (vlan_id || qos)
 		ret = i40e_vsi_add_pvid(vsi, vlanprio);
 	else
 		i40e_vsi_remove_pvid(vsi);
+	spin_lock_bh(&vsi->mac_filter_hash_lock);
 
 	if (vlan_id) {
 		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index dfe241a..12b02e5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-	unsigned int truesize = SKB_DATA_ALIGN(size);
+	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+				SKB_DATA_ALIGN(I40E_SKB_PAD + size);
 #endif
 	struct sk_buff *skb;
 
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 9b875d7..33c9016 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3719,7 +3719,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
 				    dma_addr_t *dma_addr,
 				    phys_addr_t *phys_addr)
 {
-	int cpu = smp_processor_id();
+	int cpu = get_cpu();
 
 	*dma_addr = mvpp2_percpu_read(priv, cpu,
 				      MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
@@ -3740,6 +3740,8 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
 		if (sizeof(phys_addr_t) == 8)
 			*phys_addr |= (u64)phys_addr_highbits << 32;
 	}
+
+	put_cpu();
 }
 
 /* Free all buffers from the pool */
@@ -3920,18 +3922,12 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
 	return bm;
 }
 
-/* Get pool number from a BM cookie */
-static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
-{
-	return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
-}
-
 /* Release buffer to BM */
 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
 				     dma_addr_t buf_dma_addr,
 				     phys_addr_t buf_phys_addr)
 {
-	int cpu = smp_processor_id();
+	int cpu = get_cpu();
 
 	if (port->priv->hw_version == MVPP22) {
 		u32 val = 0;
@@ -3958,15 +3954,15 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
 			   MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
 	mvpp2_percpu_write(port->priv, cpu,
 			   MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+
+	put_cpu();
 }
 
 /* Refill BM pool */
-static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
+static void mvpp2_pool_refill(struct mvpp2_port *port, int pool,
 			      dma_addr_t dma_addr,
 			      phys_addr_t phys_addr)
 {
-	int pool = mvpp2_bm_cookie_pool_get(bm);
-
 	mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
 }
 
@@ -4186,8 +4182,6 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port)
 {
 	u32 val;
 
-	return;
-
 	/* Only GOP port 0 has an XLG MAC */
 	if (port->gop_id == 0) {
 		val = readl(port->base + MVPP22_XLG_CTRL3_REG);
@@ -4515,21 +4509,6 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
 }
 
-/* Obtain BM cookie information from descriptor */
-static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
-				 struct mvpp2_rx_desc *rx_desc)
-{
-	int cpu = smp_processor_id();
-	int pool;
-
-	pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
-		MVPP2_RXD_BM_POOL_ID_MASK) >>
-		MVPP2_RXD_BM_POOL_ID_OFFS;
-
-	return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
-	       ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
-}
-
 /* Tx descriptors helper methods */
 
 /* Get pointer to next Tx descriptor to be processed (send) by HW */
@@ -4757,7 +4736,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
 				   struct mvpp2_rx_queue *rxq)
 {
-	int cpu = smp_processor_id();
+	int cpu = get_cpu();
 
 	if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
 		rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
@@ -4765,6 +4744,8 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
 			   rxq->pkts_coal);
+
+	put_cpu();
 }
 
 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -4945,7 +4926,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
 
 	/* Set Rx descriptors queue starting address - indirect access */
-	cpu = smp_processor_id();
+	cpu = get_cpu();
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
 	if (port->priv->hw_version == MVPP21)
 		rxq_dma = rxq->descs_dma;
@@ -4954,6 +4935,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
+	put_cpu();
 
 	/* Set Offset */
 	mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
@@ -4980,9 +4962,13 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
 
 	for (i = 0; i < rx_received; i++) {
 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
-		u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
+		u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
+		int pool;
 
-		mvpp2_pool_refill(port, bm,
+		pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+			MVPP2_RXD_BM_POOL_ID_OFFS;
+
+		mvpp2_pool_refill(port, pool,
 				  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
 				  mvpp2_rxdesc_cookie_get(port, rx_desc));
 	}
@@ -5012,10 +4998,11 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port,
 	 * free descriptor number
 	 */
 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
-	cpu = smp_processor_id();
+	cpu = get_cpu();
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
+	put_cpu();
 }
 
 /* Create and initialize a Tx queue */
@@ -5038,7 +5025,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
 	txq->last_desc = txq->size - 1;
 
 	/* Set Tx descriptors queue starting address - indirect access */
-	cpu = smp_processor_id();
+	cpu = get_cpu();
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
 			   txq->descs_dma);
@@ -5063,6 +5050,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
 			   MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
 			   MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
+	put_cpu();
 
 	/* WRR / EJP configuration - indirect access */
 	tx_port_num = mvpp2_egress_port(port);
@@ -5133,10 +5121,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
 
 	/* Set Tx descriptors queue starting address and size */
-	cpu = smp_processor_id();
+	cpu = get_cpu();
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
+	put_cpu();
 }
 
 /* Cleanup Tx ports */
@@ -5146,7 +5135,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
 	int delay, pending, cpu;
 	u32 val;
 
-	cpu = smp_processor_id();
+	cpu = get_cpu();
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
 	val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
 	val |= MVPP2_TXQ_DRAIN_EN_MASK;
@@ -5173,6 +5162,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
 
 	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
 	mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+	put_cpu();
 
 	for_each_present_cpu(cpu) {
 		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
@@ -5420,7 +5410,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
 
 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
 static int mvpp2_rx_refill(struct mvpp2_port *port,
-			   struct mvpp2_bm_pool *bm_pool, u32 bm)
+			   struct mvpp2_bm_pool *bm_pool, int pool)
 {
 	dma_addr_t dma_addr;
 	phys_addr_t phys_addr;
@@ -5432,7 +5422,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port,
 	if (!buf)
 		return -ENOMEM;
 
-	mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
+	mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
 
 	return 0;
 }
@@ -5490,7 +5480,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
 		unsigned int frag_size;
 		dma_addr_t dma_addr;
 		phys_addr_t phys_addr;
-		u32 bm, rx_status;
+		u32 rx_status;
 		int pool, rx_bytes, err;
 		void *data;
 
@@ -5502,8 +5492,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
 		phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
 		data = (void *)phys_to_virt(phys_addr);
 
-		bm = mvpp2_bm_cookie_build(port, rx_desc);
-		pool = mvpp2_bm_cookie_pool_get(bm);
+		pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+			MVPP2_RXD_BM_POOL_ID_OFFS;
 		bm_pool = &port->priv->bm_pools[pool];
 
 		/* In case of an error, release the requested buffer pointer
@@ -5516,7 +5506,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
 			dev->stats.rx_errors++;
 			mvpp2_rx_error(port, rx_desc);
 			/* Return the buffer to the pool */
-			mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
+			mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
 			continue;
 		}
 
@@ -5531,7 +5521,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
 			goto err_drop_frame;
 		}
 
-		err = mvpp2_rx_refill(port, bm_pool, bm);
+		err = mvpp2_rx_refill(port, bm_pool, pool);
 		if (err) {
 			netdev_err(port->dev, "failed to refill BM pools\n");
 			goto err_drop_frame;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ae5fdc2..ffbcb27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev,
 		qpn = priv->drop_qp.qpn;
 	else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
 		qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
-		if (qpn < priv->rss_map.base_qpn ||
-		    qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
-			en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
-			return -EINVAL;
-		}
 	} else {
 		if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
 			en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1a670b6..0710b36 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
 #include <linux/etherdevice.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 #include <linux/export.h>
 
 #include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 
+	if (!mlx4_qp_lookup(dev, rule->qpn)) {
+		mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+		ret = -EINVAL;
+		goto out;
+	}
+
 	trans_rule_ctrl_to_hw(rule, mailbox->buf);
 
 	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
 
 	list_for_each_entry(cur, &rule->list, list) {
 		ret = parse_trans_rule(dev, cur, mailbox->buf + size);
-		if (ret < 0) {
-			mlx4_free_cmd_mailbox(dev, mailbox);
-			return ret;
-		}
+		if (ret < 0)
+			goto out;
+
 		size += ret;
 	}
 
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
 		}
 	}
 
+out:
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
 	return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2d6abd4..5a310d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
 		__mlx4_qp_free_icm(dev, qpn);
 }
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+	struct mlx4_qp *qp;
+
+	spin_lock(&qp_table->lock);
+
+	qp = __mlx4_qp_lookup(dev, qpn);
+
+	spin_unlock(&qp_table->lock);
+	return qp;
+}
+
 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
 	}
 
 	if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+		if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
+			mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
+			err = -EOPNOTSUPP;
+			goto out;
+		}
+
 		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
 		cmd->qp_context.qos_vport = params->qos_vport;
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 0751654..8127838 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
 	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
 }
 
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+			   struct mlx4_vf_immed_vlan_work *work)
+{
+	ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+	ctx->qp_context.qos_vport = work->qos_vport;
+}
+
 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
 {
 	struct mlx4_vf_immed_vlan_work *work =
@@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
 					qp->sched_queue & 0xC7;
 				upd_context->qp_context.pri_path.sched_queue |=
 					((work->qos & 0x7) << 3);
-				upd_context->qp_mask |=
-					cpu_to_be64(1ULL <<
-						    MLX4_UPD_QP_MASK_QOS_VPP);
-				upd_context->qp_context.qos_vport =
-					work->qos_vport;
+
+				if (dev->caps.flags2 &
+				    MLX4_DEV_CAP_FLAG2_QOS_VPP)
+					update_qos_vpp(upd_context, work);
 			}
 
 			err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 2fd044b..944fc17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -458,13 +458,15 @@ struct mlx5e_mpw_info {
 
 struct mlx5e_rx_am_stats {
 	int ppms; /* packets per msec */
+	int bpms; /* bytes per msec */
 	int epms; /* events per msec */
 };
 
 struct mlx5e_rx_am_sample {
-	ktime_t		time;
-	unsigned int	pkt_ctr;
-	u16		event_ctr;
+	ktime_t	time;
+	u32	pkt_ctr;
+	u32	byte_ctr;
+	u16	event_ctr;
 };
 
 struct mlx5e_rx_am { /* Adaptive Moderation */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8209aff..16486df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
 				 SOF_TIMESTAMPING_RX_HARDWARE |
 				 SOF_TIMESTAMPING_RAW_HARDWARE;
 
-	info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
-			 (BIT(1) << HWTSTAMP_TX_ON);
+	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+			 BIT(HWTSTAMP_TX_ON);
 
-	info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
-			   (BIT(1) << HWTSTAMP_FILTER_ALL);
+	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+			   BIT(HWTSTAMP_FILTER_ALL);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 41cd22a..277f4de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
 	return netdev;
 
 err_cleanup_nic:
-	profile->cleanup(priv);
+	if (profile->cleanup)
+		profile->cleanup(priv);
 	free_netdev(netdev);
 
 	return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 79462c0..46984a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
 	params->tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
 	params->num_tc                = 1;
 	params->lro_wqe_sz            = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 }
 
 static void mlx5e_build_rep_netdev(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index 02dd3a9..acf32fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
 	mlx5e_am_step(am);
 }
 
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+	(((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+
 static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
 				  struct mlx5e_rx_am_stats *prev)
 {
-	int diff;
-
-	if (!prev->ppms)
-		return curr->ppms ? MLX5E_AM_STATS_BETTER :
+	if (!prev->bpms)
+		return curr->bpms ? MLX5E_AM_STATS_BETTER :
 				    MLX5E_AM_STATS_SAME;
 
-	diff = curr->ppms - prev->ppms;
-	if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
-		return (diff > 0) ? MLX5E_AM_STATS_BETTER :
-				    MLX5E_AM_STATS_WORSE;
+	if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+		return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
+						   MLX5E_AM_STATS_WORSE;
 
-	if (!prev->epms)
-		return curr->epms ? MLX5E_AM_STATS_WORSE :
-				    MLX5E_AM_STATS_SAME;
+	if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+		return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
+						   MLX5E_AM_STATS_WORSE;
 
-	diff = curr->epms - prev->epms;
-	if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
-		return (diff < 0) ? MLX5E_AM_STATS_BETTER :
-				    MLX5E_AM_STATS_WORSE;
+	if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+		return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
+						   MLX5E_AM_STATS_WORSE;
 
 	return MLX5E_AM_STATS_SAME;
 }
@@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq,
 {
 	s->time	     = ktime_get();
 	s->pkt_ctr   = rq->stats.packets;
+	s->byte_ctr  = rq->stats.bytes;
 	s->event_ctr = rq->cq.event_ctr;
 }
 
 #define MLX5E_AM_NEVENTS 64
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
 
 static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
 				struct mlx5e_rx_am_sample *end,
@@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
 {
 	/* u32 holds up to 71 minutes, should be enough */
 	u32 delta_us = ktime_us_delta(end->time, start->time);
-	unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
+	u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+	u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+			     start->byte_ctr);
 
 	if (!delta_us)
 		return;
 
-	curr_stats->ppms =            (npkts * USEC_PER_MSEC) / delta_us;
-	curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
+	curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+	curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+	curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
+					delta_us);
 }
 
 void mlx5e_rx_am_work(struct work_struct *work)
@@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq)
 
 	switch (am->state) {
 	case MLX5E_AM_MEASURE_IN_PROGRESS:
-		nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
+		nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
+				  am->start_sample.event_ctr);
 		if (nevents < MLX5E_AM_NEVENTS)
 			break;
 		mlx5e_am_sample(rq, &end_sample);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 53e4992..f81c3aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -417,20 +417,13 @@ struct mlx5e_stats {
 };
 
 static const struct counter_desc mlx5e_pme_status_desc[] = {
-	{ "module_plug", 0 },
 	{ "module_unplug", 8 },
 };
 
 static const struct counter_desc mlx5e_pme_error_desc[] = {
-	{ "module_pwr_budget_exd", 0 },  /* power budget exceed */
-	{ "module_long_range", 8 },      /* long range for non MLNX cable */
-	{ "module_bus_stuck", 16 },      /* bus stuck (I2C or data shorted) */
-	{ "module_no_eeprom", 24 },      /* no eeprom/retry time out */
-	{ "module_enforce_part", 32 },   /* enforce part number list */
-	{ "module_unknown_id", 40 },     /* unknown identifier */
-	{ "module_high_temp", 48 },      /* high temperature */
+	{ "module_bus_stuck", 16 },       /* bus stuck (I2C or data shorted) */
+	{ "module_high_temp", 48 },       /* high temperature */
 	{ "module_bad_shorted", 56 },    /* bad or shorted cable/module */
-	{ "module_unknown_status", 64 },
 };
 
 #endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ec63158..9df9fc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = {
 	{MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0,  2, offsetof(struct pedit_headers, eth.h_source[4])},
 	{MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE,  2, offsetof(struct pedit_headers, eth.h_proto)},
 
-	{MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
 	{MLX5_ACTION_IN_FIELD_OUT_IP_TTL,  1, offsetof(struct pedit_headers, ip4.ttl)},
 	{MLX5_ACTION_IN_FIELD_OUT_SIPV4,   4, offsetof(struct pedit_headers, ip4.saddr)},
 	{MLX5_ACTION_IN_FIELD_OUT_DIPV4,   4, offsetof(struct pedit_headers, ip4.daddr)},
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f991f66..a53e982 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
 	return 0;
 }
 
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+static int mlx5_devlink_eswitch_check(struct devlink *devlink)
 {
-	struct mlx5_core_dev *dev;
-	u16 cur_mlx5_mode, mlx5_mode = 0;
+	struct mlx5_core_dev *dev = devlink_priv(devlink);
 
-	dev = devlink_priv(devlink);
+	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return -EOPNOTSUPP;
 
 	if (!MLX5_CAP_GEN(dev, vport_group_manager))
 		return -EOPNOTSUPP;
 
-	cur_mlx5_mode = dev->priv.eswitch->mode;
-
-	if (cur_mlx5_mode == SRIOV_NONE)
+	if (dev->priv.eswitch->mode == SRIOV_NONE)
 		return -EOPNOTSUPP;
 
+	return 0;
+}
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+	struct mlx5_core_dev *dev = devlink_priv(devlink);
+	u16 cur_mlx5_mode, mlx5_mode = 0;
+	int err;
+
+	err = mlx5_devlink_eswitch_check(devlink);
+	if (err)
+		return err;
+
+	cur_mlx5_mode = dev->priv.eswitch->mode;
+
 	if (esw_mode_from_devlink(mode, &mlx5_mode))
 		return -EINVAL;
 
@@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
 
 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
 {
-	struct mlx5_core_dev *dev;
+	struct mlx5_core_dev *dev = devlink_priv(devlink);
+	int err;
 
-	dev = devlink_priv(devlink);
-
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	if (dev->priv.eswitch->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
+	err = mlx5_devlink_eswitch_check(devlink);
+	if (err)
+		return err;
 
 	return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
 }
@@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
 {
 	struct mlx5_core_dev *dev = devlink_priv(devlink);
 	struct mlx5_eswitch *esw = dev->priv.eswitch;
-	int num_vports = esw->enabled_vports;
 	int err, vport;
 	u8 mlx5_mode;
 
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	if (esw->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
+	err = mlx5_devlink_eswitch_check(devlink);
+	if (err)
+		return err;
 
 	switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
 	if (err)
 		goto out;
 
-	for (vport = 1; vport < num_vports; vport++) {
+	for (vport = 1; vport < esw->enabled_vports; vport++) {
 		err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
 		if (err) {
 			esw_warn(dev, "Failed to set min inline on vport %d\n",
@@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
 {
 	struct mlx5_core_dev *dev = devlink_priv(devlink);
 	struct mlx5_eswitch *esw = dev->priv.eswitch;
+	int err;
 
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	if (esw->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
+	err = mlx5_devlink_eswitch_check(devlink);
+	if (err)
+		return err;
 
 	return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
 }
@@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
 	struct mlx5_eswitch *esw = dev->priv.eswitch;
 	int err;
 
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	if (esw->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
+	err = mlx5_devlink_eswitch_check(devlink);
+	if (err)
+		return err;
 
 	if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
 	    (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
@@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
 {
 	struct mlx5_core_dev *dev = devlink_priv(devlink);
 	struct mlx5_eswitch *esw = dev->priv.eswitch;
+	int err;
 
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	if (esw->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
+	err = mlx5_devlink_eswitch_check(devlink);
+	if (err)
+		return err;
 
 	*encap = esw->offloads.encap;
 	return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0e487e8..8f5125c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -862,7 +862,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace
 	ft_attr.level   = level;
 	ft_attr.prio    = prio;
 
-	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0);
+	return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
 }
 
 struct mlx5_flow_table*
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 44f59b1..f27f84f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -275,10 +275,8 @@ static void poll_health(unsigned long data)
 	struct mlx5_core_health *health = &dev->priv.health;
 	u32 count;
 
-	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-		mod_timer(&health->timer, get_next_poll_jiffies());
-		return;
-	}
+	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+		goto out;
 
 	count = ioread32be(health->health_counter);
 	if (count == health->prev)
@@ -290,8 +288,6 @@ static void poll_health(unsigned long data)
 	if (health->miss_counter == MAX_MISSES) {
 		dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
 		print_health_info(dev);
-	} else {
-		mod_timer(&health->timer, get_next_poll_jiffies());
 	}
 
 	if (in_fatal(dev) && !health->sick) {
@@ -305,6 +301,9 @@ static void poll_health(unsigned long data)
 				"new health works are not permitted at this stage\n");
 		spin_unlock(&health->wq_lock);
 	}
+
+out:
+	mod_timer(&health->timer, get_next_poll_jiffies());
 }
 
 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index fe5546b..13be264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = {
 	},
 };
 
-#define FW_INIT_TIMEOUT_MILI	2000
-#define FW_INIT_WAIT_MS		2
+#define FW_INIT_TIMEOUT_MILI		2000
+#define FW_INIT_WAIT_MS			2
+#define FW_PRE_INIT_TIMEOUT_MILI	10000
 
 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
 {
@@ -537,8 +538,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
 	/* disable cmdif checksum */
 	MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
-	/* If the HCA supports 4K UARs use it */
-	if (MLX5_CAP_GEN_MAX(dev, uar_4k))
+	/* Enable 4K UAR only when HCA supports it and page size is bigger
+	 * than 4K.
+	 */
+	if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
 		MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
 
 	MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
@@ -621,10 +624,9 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
 	cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
 			priv->irq_info[i].mask);
 
-#ifdef CONFIG_SMP
-	if (irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+	if (IS_ENABLED(CONFIG_SMP) &&
+	    irq_set_affinity_hint(irq, priv->irq_info[i].mask))
 		mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
-#endif
 
 	return 0;
 }
@@ -1012,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 	 */
 	dev->state = MLX5_DEVICE_STATE_UP;
 
+	/* wait for firmware to accept initialization segments configurations
+	 */
+	err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
+	if (err) {
+		dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+			FW_PRE_INIT_TIMEOUT_MILI);
+		goto out;
+	}
+
 	err = mlx5_cmd_init(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 9f89c41..0744452 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3334,6 +3334,9 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
 	u16 vid = vlan_dev_vlan_id(vlan_dev);
 
+	if (netif_is_bridge_port(vlan_dev))
+		return 0;
+
 	if (mlxsw_sp_port_dev_check(real_dev))
 		return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
 						     vid);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 483241b..a672f6a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -2956,7 +2956,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
 				qed_wr(p_hwfn,
 				       p_ptt,
 				       s_storm_defs[storm_id].cm_ctx_wr_addr,
-				       BIT(9) | lid);
+				       (i << 9) | lid);
 				*(dump_buf + offset) = qed_rd(p_hwfn,
 							      p_ptt,
 							      rd_reg_addr);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 537d123..715b3aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1730,7 +1730,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
 		qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
 		break;
 	default:
-		DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+		DP_VERBOSE(cdev, QED_MSG_SP,
+			   "Invalid protocol type = %d\n", type);
 		return;
 	}
 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7245b10..8131292 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops {
 	u32 (*get_cap_size)(void *, int);
 	void (*set_sys_info)(void *, int, u32);
 	void (*store_cap_mask)(void *, u32);
+	bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
+	bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
 };
 
 extern struct qlcnic_nic_template qlcnic_vf_ops;
 
-static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
 {
 	return adapter->ahw->extra_capability[0] &
 	       QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
 }
 
-static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
 {
 	return adapter->ahw->extra_capability[0] &
 	       QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
 }
 
+static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+	return false;
+}
+
+static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+        return false;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+        return adapter->ahw->hw_ops->encap_rx_offload(adapter);
+}
+
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+        return adapter->ahw->hw_ops->encap_tx_offload(adapter);
+}
+
 static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
 {
 	return adapter->nic_ops->start_firmware(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 4fb6879..f7080d0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
 	.get_cap_size			= qlcnic_83xx_get_cap_size,
 	.set_sys_info			= qlcnic_83xx_set_sys_info,
 	.store_cap_mask			= qlcnic_83xx_store_cap_mask,
+	.encap_rx_offload		= qlcnic_83xx_encap_rx_offload,
+	.encap_tx_offload		= qlcnic_83xx_encap_tx_offload,
 };
 
 static struct qlcnic_nic_template qlcnic_83xx_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 838cc0c..7848cf0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
 			}
 			return -EIO;
 		}
-		usleep_range(1000, 1500);
+		udelay(1200);
 	}
 
 	if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index b6628aa..1b5f7d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
 	.get_cap_size			= qlcnic_82xx_get_cap_size,
 	.set_sys_info			= qlcnic_82xx_set_sys_info,
 	.store_cap_mask			= qlcnic_82xx_store_cap_mask,
+	.encap_rx_offload               = qlcnic_82xx_encap_rx_offload,
+	.encap_tx_offload               = qlcnic_82xx_encap_tx_offload,
 };
 
 static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 2f656f3..c58180f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
 	.free_mac_list			= qlcnic_sriov_vf_free_mac_list,
 	.enable_sds_intr		= qlcnic_83xx_enable_sds_intr,
 	.disable_sds_intr		= qlcnic_83xx_disable_sds_intr,
+	.encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
+	.encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
 };
 
 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index cc065ff..bcd4708 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt)
 	emac_mac_config(adpt);
 	emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
 
-	adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
+	adpt->phydev->irq = PHY_POLL;
 	ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
 				 PHY_INTERFACE_MODE_SGMII);
 	if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 441c1936..18461fc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -13,15 +13,11 @@
 /* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
  */
 
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_net.h>
 #include <linux/of_mdio.h>
 #include <linux/phy.h>
 #include <linux/iopoll.h>
 #include <linux/acpi.h>
 #include "emac.h"
-#include "emac-mac.h"
 
 /* EMAC base register offsets */
 #define EMAC_MDIO_CTRL                                        0x001414
@@ -52,62 +48,10 @@
 
 #define MDIO_WAIT_TIMES                                           1000
 
-#define EMAC_LINK_SPEED_DEFAULT (\
-		EMAC_LINK_SPEED_10_HALF  |\
-		EMAC_LINK_SPEED_10_FULL  |\
-		EMAC_LINK_SPEED_100_HALF |\
-		EMAC_LINK_SPEED_100_FULL |\
-		EMAC_LINK_SPEED_1GB_FULL)
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The autopoll feature takes over the MDIO bus.  In order for
- * the PHY driver to be able to talk to the PHY over the MDIO
- * bus, we need to temporarily disable the autopoll feature.
- */
-static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
-{
-	u32 val;
-
-	/* disable autopoll */
-	emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
-
-	/* wait for any mdio polling to complete */
-	if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
-				!(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
-		return 0;
-
-	/* failed to disable; ensure it is enabled before returning */
-	emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-
-	return -EBUSY;
-}
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The EMAC has the ability to poll the external PHY on the MDIO
- * bus for link state changes.  This eliminates the need for the
- * driver to poll the phy.  If if the link state does change,
- * the EMAC issues an interrupt on behalf of the PHY.
- */
-static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
-{
-	emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-}
-
 static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
 {
 	struct emac_adapter *adpt = bus->priv;
 	u32 reg;
-	int ret;
-
-	ret = emac_phy_mdio_autopoll_disable(adpt);
-	if (ret)
-		return ret;
 
 	emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
 			  (addr << PHY_ADDR_SHFT));
@@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
 	if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
 			       !(reg & (MDIO_START | MDIO_BUSY)),
 			       100, MDIO_WAIT_TIMES * 100))
-		ret = -EIO;
-	else
-		ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
+		return -EIO;
 
-	emac_phy_mdio_autopoll_enable(adpt);
-
-	return ret;
+	return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
 }
 
 static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
 {
 	struct emac_adapter *adpt = bus->priv;
 	u32 reg;
-	int ret;
-
-	ret = emac_phy_mdio_autopoll_disable(adpt);
-	if (ret)
-		return ret;
 
 	emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
 			  (addr << PHY_ADDR_SHFT));
@@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
 	if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
 			       !(reg & (MDIO_START | MDIO_BUSY)), 100,
 			       MDIO_WAIT_TIMES * 100))
-		ret = -EIO;
+		return -EIO;
 
-	emac_phy_mdio_autopoll_enable(adpt);
-
-	return ret;
+	return 0;
 }
 
 /* Configure the MDIO bus and connect the external PHY */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 28a8cdc..98a326f 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -50,19 +50,7 @@
 #define DMAR_DLY_CNT_DEF				    15
 #define DMAW_DLY_CNT_DEF				     4
 
-#define IMR_NORMAL_MASK         (\
-		ISR_ERROR       |\
-		ISR_GPHY_LINK   |\
-		ISR_TX_PKT      |\
-		GPHY_WAKEUP_INT)
-
-#define IMR_EXTENDED_MASK       (\
-		SW_MAN_INT      |\
-		ISR_OVER        |\
-		ISR_ERROR       |\
-		ISR_GPHY_LINK   |\
-		ISR_TX_PKT      |\
-		GPHY_WAKEUP_INT)
+#define IMR_NORMAL_MASK		(ISR_ERROR | ISR_OVER | ISR_TX_PKT)
 
 #define ISR_TX_PKT      (\
 	TX_PKT_INT      |\
@@ -70,10 +58,6 @@
 	TX_PKT_INT2     |\
 	TX_PKT_INT3)
 
-#define ISR_GPHY_LINK        (\
-	GPHY_LINK_UP_INT     |\
-	GPHY_LINK_DOWN_INT)
-
 #define ISR_OVER        (\
 	RFD0_UR_INT     |\
 	RFD1_UR_INT     |\
@@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data)
 	if (status & ISR_OVER)
 		net_warn_ratelimited("warning: TX/RX overflow\n");
 
-	/* link event */
-	if (status & ISR_GPHY_LINK)
-		phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
-
 exit:
 	/* enable the interrupt */
 	writel(irq->mask, adpt->base + EMAC_INT_MASK);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 3cd7989..784782d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
 	int ring_size;
 	int i;
 
-	/* Free RX skb ringbuffer */
-	if (priv->rx_skb[q]) {
-		for (i = 0; i < priv->num_rx_ring[q]; i++)
-			dev_kfree_skb(priv->rx_skb[q][i]);
-	}
-	kfree(priv->rx_skb[q]);
-	priv->rx_skb[q] = NULL;
-
-	/* Free aligned TX buffers */
-	kfree(priv->tx_align[q]);
-	priv->tx_align[q] = NULL;
-
 	if (priv->rx_ring[q]) {
 		for (i = 0; i < priv->num_rx_ring[q]; i++) {
 			struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
 		priv->tx_ring[q] = NULL;
 	}
 
+	/* Free RX skb ringbuffer */
+	if (priv->rx_skb[q]) {
+		for (i = 0; i < priv->num_rx_ring[q]; i++)
+			dev_kfree_skb(priv->rx_skb[q][i]);
+	}
+	kfree(priv->rx_skb[q]);
+	priv->rx_skb[q] = NULL;
+
+	/* Free aligned TX buffers */
+	kfree(priv->tx_align[q]);
+	priv->tx_align[q] = NULL;
+
 	/* Free TX skb ringbuffer.
 	 * SKBs are freed by ravb_tx_free() call above.
 	 */
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 2ae8524..a9ce82d 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1505,8 +1505,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
 		*index = entry->index;
 		resolved = false;
 	} else if (removing) {
-		ofdpa_neigh_del(trans, found);
 		*index = found->index;
+		ofdpa_neigh_del(trans, found);
 	} else if (updating) {
 		ofdpa_neigh_update(found, trans, NULL, false);
 		resolved = !is_zero_ether_addr(found->eth_dst);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 78efb28..78f9e43 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4172,7 +4172,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
 	 * recipients
 	 */
 	if (is_mc_recip) {
-		MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+		MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
 		unsigned int depth, i;
 
 		memset(inbuf, 0, sizeof(inbuf));
@@ -4320,7 +4320,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
 		} else {
 			efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
-					       MC_CMD_FILTER_OP_IN_LEN,
+					       MC_CMD_FILTER_OP_EXT_IN_LEN,
 					       NULL, 0, rc);
 		}
 	}
@@ -4453,7 +4453,7 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
 				      struct efx_filter_spec *spec)
 {
 	struct efx_ef10_filter_table *table = efx->filter_state;
-	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
 	struct efx_filter_spec *saved_spec;
 	unsigned int hash, i, depth = 1;
 	bool replacing = false;
@@ -4940,7 +4940,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
 {
 	struct efx_ef10_filter_table *table = efx->filter_state;
-	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
 	struct efx_filter_spec *spec;
 	unsigned int filter_idx;
 	int rc;
@@ -5105,6 +5105,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
 
 	/* Insert/renew filters */
 	for (i = 0; i < addr_count; i++) {
+		EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
 		efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
 		rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -5122,11 +5123,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
 				}
 				return rc;
 			} else {
-				/* mark as not inserted, and carry on */
-				rc = EFX_EF10_FILTER_ID_INVALID;
+				/* keep invalid ID, and carry on */
 			}
+		} else {
+			ids[i] = efx_ef10_filter_get_unsafe_id(rc);
 		}
-		ids[i] = efx_ef10_filter_get_unsafe_id(rc);
 	}
 
 	if (multicast && rollback) {
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index b7e4345..019cef1 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -661,8 +661,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
 		up_write(&vf->efx->filter_sem);
 		mutex_unlock(&vf->efx->mac_lock);
 
-		up_write(&vf->efx->filter_sem);
-
 		rc2 = efx_net_open(vf->efx->net_dev);
 		if (rc2)
 			goto reset_nic;
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 489ef14..6a9c9544 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -37,6 +37,7 @@
 #define TSE_PCS_CONTROL_AN_EN_MASK			BIT(12)
 #define TSE_PCS_CONTROL_REG				0x00
 #define TSE_PCS_CONTROL_RESTART_AN_MASK			BIT(9)
+#define TSE_PCS_CTRL_AUTONEG_SGMII			0x1140
 #define TSE_PCS_IF_MODE_REG				0x28
 #define TSE_PCS_LINK_TIMER_0_REG			0x24
 #define TSE_PCS_LINK_TIMER_1_REG			0x26
@@ -65,6 +66,7 @@
 #define TSE_PCS_SW_RESET_TIMEOUT			100
 #define TSE_PCS_USE_SGMII_AN_MASK			BIT(1)
 #define TSE_PCS_USE_SGMII_ENA				BIT(0)
+#define TSE_PCS_IF_USE_SGMII				0x03
 
 #define SGMII_ADAPTER_CTRL_REG				0x00
 #define SGMII_ADAPTER_DISABLE				0x0001
@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
 {
 	int ret = 0;
 
-	writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
+	writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
+
+	writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
 
 	writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
 	writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index aa64764..e0ef02f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
 {
 	/* Context type from W/B descriptor must be zero */
 	if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
-		return -EINVAL;
+		return 0;
 
 	/* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
 	if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
-		return 0;
+		return 1;
 
-	return 1;
+	return 0;
 }
 
 static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
@@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
 		}
 	}
 exit:
-	return ret;
+	if (likely(ret == 0))
+		return 1;
+
+	return 0;
 }
 
 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a74c481..6e4cbc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -434,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 		return;
 
 	/* check tx tstamp status */
-	if (!priv->hw->desc->get_tx_timestamp_status(p)) {
+	if (priv->hw->desc->get_tx_timestamp_status(p)) {
 		/* get the valid tstamp */
 		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
 
 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
 
-		netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
 		/* pass tstamp to stack */
 		skb_tstamp_tx(skb, &shhwtstamp);
 	}
@@ -468,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 		return;
 
 	/* Check if timestamp is available */
-	if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
+	if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
 		/* For GMAC4, the valid timestamp is from CTX next desc. */
 		if (priv->plat->has_gmac4)
 			ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
 		else
 			ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
 
-		netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
 		shhwtstamp = skb_hwtstamps(skb);
 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
 	} else  {
-		netdev_err(priv->dev, "cannot get RX hw timestamp\n");
+		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
 	}
 }
 
@@ -546,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 			/* PTP v1, UDP, any kind of event packet */
 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 			/* take time stamp for all event messages */
-			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+			if (priv->plat->has_gmac4)
+				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+			else
+				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -578,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 			ptp_v2 = PTP_TCR_TSVER2ENA;
 			/* take time stamp for all event messages */
-			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+			if (priv->plat->has_gmac4)
+				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+			else
+				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -612,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 			ptp_v2 = PTP_TCR_TSVER2ENA;
 			/* take time stamp for all event messages */
-			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+			if (priv->plat->has_gmac4)
+				snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
+			else
+				snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 
 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -1208,7 +1217,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
 	u32 rx_count = priv->plat->rx_queues_to_use;
 	unsigned int bfsize = 0;
 	int ret = -ENOMEM;
-	u32 queue;
+	int queue;
 	int i;
 
 	if (priv->hw->mode->set_16kib_bfsize)
@@ -2724,7 +2733,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
 
 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
 			0, 1,
-			(last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+			(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
 			0, 0);
 
 		tmp_len -= TSO_MAX_BUFF_SIZE;
@@ -2822,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	tx_q->tx_skbuff_dma[first_entry].buf = des;
 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
-	tx_q->tx_skbuff[first_entry] = skb;
 
 	first->des0 = cpu_to_le32(des);
 
@@ -2856,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
 
+	/* Only the last descriptor gets to point to the skb. */
+	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+
+	/* We've used all descriptors we need for this skb, however,
+	 * advance cur_tx so that it references a fresh descriptor.
+	 * ndo_start_xmit will fill this descriptor the next time it's
+	 * called and stmmac_tx_clean may clean up to this descriptor.
+	 */
 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
 
 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
@@ -2947,7 +2963,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 	int i, csum_insertion = 0, is_jumbo = 0;
 	u32 queue = skb_get_queue_mapping(skb);
 	int nfrags = skb_shinfo(skb)->nr_frags;
-	unsigned int entry, first_entry;
+	int entry;
+	unsigned int first_entry;
 	struct dma_desc *desc, *first;
 	struct stmmac_tx_queue *tx_q;
 	unsigned int enh_desc;
@@ -2988,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	first = desc;
 
-	tx_q->tx_skbuff[first_entry] = skb;
-
 	enh_desc = priv->plat->enh_desc;
 	/* To program the descriptors according to the size of the frame */
 	if (enh_desc)
@@ -3037,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 						skb->len);
 	}
 
-	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+	/* Only the last descriptor gets to point to the skb. */
+	tx_q->tx_skbuff[entry] = skb;
 
+	/* We've used all descriptors we need for this skb, however,
+	 * advance cur_tx so that it references a fresh descriptor.
+	 * ndo_start_xmit will fill this descriptor the next time it's
+	 * called and stmmac_tx_clean may clean up to this descriptor.
+	 */
+	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
 	tx_q->cur_tx = entry;
 
 	if (netif_msg_pktdata(priv)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 48fb72f..f4b31d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -59,7 +59,8 @@
 /* Enable Snapshot for Messages Relevant to Master */
 #define	PTP_TCR_TSMSTRENA	BIT(15)
 /* Select PTP packets for Taking Snapshots */
-#define	PTP_TCR_SNAPTYPSEL_1	GENMASK(17, 16)
+#define	PTP_TCR_SNAPTYPSEL_1	BIT(16)
+#define	PTP_GMAC4_TCR_SNAPTYPSEL_1	GENMASK(17, 16)
 /* Enable MAC address for PTP Frame Filtering */
 #define	PTP_TCR_TSENMACADDR	BIT(18)
 
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 1562ab4..56ba411 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -90,7 +90,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
 	if (of_device_is_compatible(dev->of_node, "ti,dm816-emac"))
 		return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
 
-	if (of_machine_is_compatible("ti,am4372"))
+	if (of_machine_is_compatible("ti,am43"))
 		return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
 
 	if (of_machine_is_compatible("ti,dra7"))
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 959fd12..199459b 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1007,7 +1007,7 @@ static void geneve_setup(struct net_device *dev)
 
 	dev->netdev_ops = &geneve_netdev_ops;
 	dev->ethtool_ops = &geneve_ethtool_ops;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 
 	SET_NETDEV_DEVTYPE(dev, &geneve_type);
 
@@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
 
 	/* make enough headroom for basic scenario */
 	encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
-	if (ip_tunnel_info_af(info) == AF_INET) {
+	if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
 		encap_len += sizeof(struct iphdr);
 		dev->max_mtu -= sizeof(struct iphdr);
 	} else {
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 7b652bb..ca110cd 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -611,7 +611,7 @@ static const struct net_device_ops gtp_netdev_ops = {
 static void gtp_link_setup(struct net_device *dev)
 {
 	dev->netdev_ops		= &gtp_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 
 	dev->hard_header_len = 0;
 	dev->addr_len = 0;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 922bf44..021a8ec 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -311,7 +311,7 @@ static void sp_setup(struct net_device *dev)
 {
 	/* Finish setting up the DEVICE info. */
 	dev->netdev_ops		= &sp_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 	dev->mtu		= SIXP_MTU;
 	dev->hard_header_len	= AX25_MAX_HEADER_LEN;
 	dev->header_ops 	= &ax25_header_ops;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f62e7f3..78a6414 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -476,7 +476,7 @@ static const struct net_device_ops bpq_netdev_ops = {
 static void bpq_setup(struct net_device *dev)
 {
 	dev->netdev_ops	     = &bpq_netdev_ops;
-	dev->destructor	     = free_netdev;
+	dev->needs_free_netdev = true;
 
 	memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
 	memcpy(dev->dev_addr,  &ax25_defaddr, AX25_ADDR_LEN);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8c3633c..97e3bc6 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 	case HDLCDRVCTL_CALIBRATE:
 		if(!capable(CAP_SYS_RAWIO))
 			return -EPERM;
+		if (s->par.bitrate <= 0)
+			return -EINVAL;
 		if (bi.data.calibrate > INT_MAX / s->par.bitrate)
 			return -EINVAL;
 		s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 262b2ea5..6066f1b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -171,6 +171,8 @@ struct rndis_device {
 	spinlock_t request_lock;
 	struct list_head req_list;
 
+	struct work_struct mcast_work;
+
 	u8 hw_mac_adr[ETH_ALEN];
 	u8 rss_key[NETVSC_HASH_KEYLEN];
 	u16 ind_table[ITAB_NUM];
@@ -201,6 +203,7 @@ int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 int rndis_filter_device_add(struct hv_device *dev,
 			    struct netvsc_device_info *info);
+void rndis_filter_update(struct netvsc_device *nvdev);
 void rndis_filter_device_remove(struct hv_device *dev,
 				struct netvsc_device *nvdev);
 int rndis_filter_set_rss_param(struct rndis_device *rdev,
@@ -211,7 +214,6 @@ int rndis_filter_receive(struct net_device *ndev,
 			 struct vmbus_channel *channel,
 			 void *data, u32 buflen);
 
-int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
 int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
 
 void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
@@ -696,7 +698,6 @@ struct net_device_context {
 	/* list protection */
 	spinlock_t lock;
 
-	struct work_struct work;
 	u32 msg_enable; /* debug level */
 
 	u32 tx_checksum_mask;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4421a6d..643c539 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -56,37 +56,12 @@ static int debug = -1;
 module_param(debug, int, S_IRUGO);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
-static void do_set_multicast(struct work_struct *w)
-{
-	struct net_device_context *ndevctx =
-		container_of(w, struct net_device_context, work);
-	struct hv_device *device_obj = ndevctx->device_ctx;
-	struct net_device *ndev = hv_get_drvdata(device_obj);
-	struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
-	struct rndis_device *rdev;
-
-	if (!nvdev)
-		return;
-
-	rdev = nvdev->extension;
-	if (rdev == NULL)
-		return;
-
-	if (ndev->flags & IFF_PROMISC)
-		rndis_filter_set_packet_filter(rdev,
-			NDIS_PACKET_TYPE_PROMISCUOUS);
-	else
-		rndis_filter_set_packet_filter(rdev,
-			NDIS_PACKET_TYPE_BROADCAST |
-			NDIS_PACKET_TYPE_ALL_MULTICAST |
-			NDIS_PACKET_TYPE_DIRECTED);
-}
-
 static void netvsc_set_multicast_list(struct net_device *net)
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
+	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
 
-	schedule_work(&net_device_ctx->work);
+	rndis_filter_update(nvdev);
 }
 
 static int netvsc_open(struct net_device *net)
@@ -123,8 +98,6 @@ static int netvsc_close(struct net_device *net)
 
 	netif_tx_disable(net);
 
-	/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
-	cancel_work_sync(&net_device_ctx->work);
 	ret = rndis_filter_close(nvdev);
 	if (ret != 0) {
 		netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -803,7 +776,7 @@ static int netvsc_set_channels(struct net_device *net,
 	    channels->rx_count || channels->tx_count || channels->other_count)
 		return -EINVAL;
 
-	if (count > net->num_tx_queues || count > net->num_rx_queues)
+	if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX)
 		return -EINVAL;
 
 	if (!nvdev || nvdev->destroy)
@@ -1028,7 +1001,7 @@ static const struct {
 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
 {
 	struct net_device_context *ndc = netdev_priv(dev);
-	struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
+	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
 
 	if (!nvdev)
 		return -ENODEV;
@@ -1158,11 +1131,22 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static void netvsc_poll_controller(struct net_device *net)
+static void netvsc_poll_controller(struct net_device *dev)
 {
-	/* As netvsc_start_xmit() works synchronous we don't have to
-	 * trigger anything here.
-	 */
+	struct net_device_context *ndc = netdev_priv(dev);
+	struct netvsc_device *ndev;
+	int i;
+
+	rcu_read_lock();
+	ndev = rcu_dereference(ndc->nvdev);
+	if (ndev) {
+		for (i = 0; i < ndev->num_chn; i++) {
+			struct netvsc_channel *nvchan = &ndev->chan_table[i];
+
+			napi_schedule(&nvchan->napi);
+		}
+	}
+	rcu_read_unlock();
 }
 #endif
 
@@ -1219,7 +1203,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
 	rndis_dev = ndev->extension;
 	if (indir) {
 		for (i = 0; i < ITAB_NUM; i++)
-			if (indir[i] >= dev->num_rx_queues)
+			if (indir[i] >= VRSS_CHANNEL_MAX)
 				return -EINVAL;
 
 		for (i = 0; i < ITAB_NUM; i++)
@@ -1552,7 +1536,6 @@ static int netvsc_probe(struct hv_device *dev,
 	hv_set_drvdata(dev, net);
 
 	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
-	INIT_WORK(&net_device_ctx->work, do_set_multicast);
 
 	spin_lock_init(&net_device_ctx->lock);
 	INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
@@ -1622,7 +1605,6 @@ static int netvsc_remove(struct hv_device *dev)
 	netif_device_detach(net);
 
 	cancel_delayed_work_sync(&ndev_ctx->dwork);
-	cancel_work_sync(&ndev_ctx->work);
 
 	/*
 	 * Call to the vsc driver to let it know that the device is being
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index f9d5b0b..cb79cd0 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -31,6 +31,7 @@
 
 #include "hyperv_net.h"
 
+static void rndis_set_multicast(struct work_struct *w);
 
 #define RNDIS_EXT_LEN PAGE_SIZE
 struct rndis_request {
@@ -76,6 +77,7 @@ static struct rndis_device *get_rndis_device(void)
 	spin_lock_init(&device->request_lock);
 
 	INIT_LIST_HEAD(&device->req_list);
+	INIT_WORK(&device->mcast_work, rndis_set_multicast);
 
 	device->state = RNDIS_DEV_UNINITIALIZED;
 
@@ -815,7 +817,8 @@ static int rndis_filter_query_link_speed(struct rndis_device *dev)
 	return ret;
 }
 
-int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
+static int rndis_filter_set_packet_filter(struct rndis_device *dev,
+					  u32 new_filter)
 {
 	struct rndis_request *request;
 	struct rndis_set_request *set;
@@ -846,6 +849,28 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
 	return ret;
 }
 
+static void rndis_set_multicast(struct work_struct *w)
+{
+	struct rndis_device *rdev
+		= container_of(w, struct rndis_device, mcast_work);
+
+	if (rdev->ndev->flags & IFF_PROMISC)
+		rndis_filter_set_packet_filter(rdev,
+					       NDIS_PACKET_TYPE_PROMISCUOUS);
+	else
+		rndis_filter_set_packet_filter(rdev,
+					       NDIS_PACKET_TYPE_BROADCAST |
+					       NDIS_PACKET_TYPE_ALL_MULTICAST |
+					       NDIS_PACKET_TYPE_DIRECTED);
+}
+
+void rndis_filter_update(struct netvsc_device *nvdev)
+{
+	struct rndis_device *rdev = nvdev->extension;
+
+	schedule_work(&rdev->mcast_work);
+}
+
 static int rndis_filter_init_device(struct rndis_device *dev)
 {
 	struct rndis_request *request;
@@ -973,6 +998,9 @@ static int rndis_filter_close_device(struct rndis_device *dev)
 	if (dev->state != RNDIS_DEV_DATAINITIALIZED)
 		return 0;
 
+	/* Make sure rndis_set_multicast doesn't re-enable filter! */
+	cancel_work_sync(&dev->mcast_work);
+
 	ret = rndis_filter_set_packet_filter(dev, 0);
 	if (ret == -ENODEV)
 		ret = 0;
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 312fce7..144ea5a 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -207,7 +207,6 @@ static void ifb_dev_free(struct net_device *dev)
 		__skb_queue_purge(&txp->tq);
 	}
 	kfree(dp->tx_private);
-	free_netdev(dev);
 }
 
 static void ifb_setup(struct net_device *dev)
@@ -230,7 +229,8 @@ static void ifb_setup(struct net_device *dev)
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	netif_keep_dst(dev);
 	eth_hw_addr_random(dev);
-	dev->destructor = ifb_dev_free;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = ifb_dev_free;
 }
 
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 618ed88..7c7680c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -632,7 +632,7 @@ void ipvlan_link_setup(struct net_device *dev)
 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
 	dev->netdev_ops = &ipvlan_netdev_ops;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	dev->header_ops = &ipvlan_header_ops;
 	dev->ethtool_ops = &ipvlan_ethtool_ops;
 }
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 224f65c..3061249 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -159,7 +159,6 @@ static void loopback_dev_free(struct net_device *dev)
 {
 	dev_net(dev)->loopback_dev = NULL;
 	free_percpu(dev->lstats);
-	free_netdev(dev);
 }
 
 static const struct net_device_ops loopback_ops = {
@@ -196,7 +195,8 @@ static void loopback_setup(struct net_device *dev)
 	dev->ethtool_ops	= &loopback_ethtool_ops;
 	dev->header_ops		= &eth_header_ops;
 	dev->netdev_ops		= &loopback_ops;
-	dev->destructor		= loopback_dev_free;
+	dev->needs_free_netdev	= true;
+	dev->priv_destructor	= loopback_dev_free;
 }
 
 /* Setup and register the loopback device. */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index cdc347b..7941167 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2996,7 +2996,6 @@ static void macsec_free_netdev(struct net_device *dev)
 	free_percpu(macsec->secy.tx_sc.stats);
 
 	dev_put(real_dev);
-	free_netdev(dev);
 }
 
 static void macsec_setup(struct net_device *dev)
@@ -3006,7 +3005,8 @@ static void macsec_setup(struct net_device *dev)
 	dev->max_mtu = ETH_MAX_MTU;
 	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->netdev_ops = &macsec_netdev_ops;
-	dev->destructor = macsec_free_netdev;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = macsec_free_netdev;
 	SET_NETDEV_DEVTYPE(dev, &macsec_type);
 
 	eth_zero_addr(dev->broadcast);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 346ad2f..72b8018 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,16 +39,20 @@
 #define MACVLAN_HASH_SIZE	(1<<MACVLAN_HASH_BITS)
 #define MACVLAN_BC_QUEUE_LEN	1000
 
+#define MACVLAN_F_PASSTHRU	1
+#define MACVLAN_F_ADDRCHANGE	2
+
 struct macvlan_port {
 	struct net_device	*dev;
 	struct hlist_head	vlan_hash[MACVLAN_HASH_SIZE];
 	struct list_head	vlans;
 	struct sk_buff_head	bc_queue;
 	struct work_struct	bc_work;
-	bool 			passthru;
+	u32			flags;
 	int			count;
 	struct hlist_head	vlan_source_hash[MACVLAN_HASH_SIZE];
 	DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
+	unsigned char           perm_addr[ETH_ALEN];
 };
 
 struct macvlan_source_entry {
@@ -66,6 +70,31 @@ struct macvlan_skb_cb {
 
 static void macvlan_port_destroy(struct net_device *dev);
 
+static inline bool macvlan_passthru(const struct macvlan_port *port)
+{
+	return port->flags & MACVLAN_F_PASSTHRU;
+}
+
+static inline void macvlan_set_passthru(struct macvlan_port *port)
+{
+	port->flags |= MACVLAN_F_PASSTHRU;
+}
+
+static inline bool macvlan_addr_change(const struct macvlan_port *port)
+{
+	return port->flags & MACVLAN_F_ADDRCHANGE;
+}
+
+static inline void macvlan_set_addr_change(struct macvlan_port *port)
+{
+	port->flags |= MACVLAN_F_ADDRCHANGE;
+}
+
+static inline void macvlan_clear_addr_change(struct macvlan_port *port)
+{
+	port->flags &= ~MACVLAN_F_ADDRCHANGE;
+}
+
 /* Hash Ethernet address */
 static u32 macvlan_eth_hash(const unsigned char *addr)
 {
@@ -181,11 +210,12 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
 static bool macvlan_addr_busy(const struct macvlan_port *port,
 			      const unsigned char *addr)
 {
-	/* Test to see if the specified multicast address is
+	/* Test to see if the specified address is
 	 * currently in use by the underlying device or
 	 * another macvlan.
 	 */
-	if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
+	if (!macvlan_passthru(port) && !macvlan_addr_change(port) &&
+	    ether_addr_equal_64bits(port->dev->dev_addr, addr))
 		return true;
 
 	if (macvlan_hash_lookup(port, addr))
@@ -445,7 +475,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 	}
 
 	macvlan_forward_source(skb, port, eth->h_source);
-	if (port->passthru)
+	if (macvlan_passthru(port))
 		vlan = list_first_or_null_rcu(&port->vlans,
 					      struct macvlan_dev, list);
 	else
@@ -574,7 +604,7 @@ static int macvlan_open(struct net_device *dev)
 	struct net_device *lowerdev = vlan->lowerdev;
 	int err;
 
-	if (vlan->port->passthru) {
+	if (macvlan_passthru(vlan->port)) {
 		if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
 			err = dev_set_promiscuity(lowerdev, 1);
 			if (err < 0)
@@ -649,7 +679,7 @@ static int macvlan_stop(struct net_device *dev)
 	dev_uc_unsync(lowerdev, dev);
 	dev_mc_unsync(lowerdev, dev);
 
-	if (vlan->port->passthru) {
+	if (macvlan_passthru(vlan->port)) {
 		if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
 			dev_set_promiscuity(lowerdev, -1);
 		goto hash_del;
@@ -672,6 +702,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct net_device *lowerdev = vlan->lowerdev;
+	struct macvlan_port *port = vlan->port;
 	int err;
 
 	if (!(dev->flags & IFF_UP)) {
@@ -682,7 +713,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
 		if (macvlan_addr_busy(vlan->port, addr))
 			return -EBUSY;
 
-		if (!vlan->port->passthru) {
+		if (!macvlan_passthru(port)) {
 			err = dev_uc_add(lowerdev, addr);
 			if (err)
 				return err;
@@ -692,6 +723,15 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
 
 		macvlan_hash_change_addr(vlan, addr);
 	}
+	if (macvlan_passthru(port) && !macvlan_addr_change(port)) {
+		/* Since addr_change isn't set, we are here due to lower
+		 * device change.  Save the lower-dev address so we can
+		 * restore it later.
+		 */
+		ether_addr_copy(vlan->port->perm_addr,
+				lowerdev->dev_addr);
+	}
+	macvlan_clear_addr_change(port);
 	return 0;
 }
 
@@ -703,7 +743,12 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
+	/* If the addresses are the same, this is a no-op */
+	if (ether_addr_equal(dev->dev_addr, addr->sa_data))
+		return 0;
+
 	if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+		macvlan_set_addr_change(vlan->port);
 		dev_set_mac_address(vlan->lowerdev, addr);
 		return 0;
 	}
@@ -928,7 +973,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 	/* Support unicast filter only on passthru devices.
 	 * Multicast filter should be allowed on all devices.
 	 */
-	if (!vlan->port->passthru && is_unicast_ether_addr(addr))
+	if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
 		return -EOPNOTSUPP;
 
 	if (flags & NLM_F_REPLACE)
@@ -952,7 +997,7 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
 	/* Support unicast filter only on passthru devices.
 	 * Multicast filter should be allowed on all devices.
 	 */
-	if (!vlan->port->passthru && is_unicast_ether_addr(addr))
+	if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
 		return -EOPNOTSUPP;
 
 	if (is_unicast_ether_addr(addr))
@@ -1092,7 +1137,7 @@ void macvlan_common_setup(struct net_device *dev)
 	netif_keep_dst(dev);
 	dev->priv_flags	       |= IFF_UNICAST_FLT;
 	dev->netdev_ops		= &macvlan_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 	dev->header_ops		= &macvlan_hard_header_ops;
 	dev->ethtool_ops	= &macvlan_ethtool_ops;
 }
@@ -1120,8 +1165,8 @@ static int macvlan_port_create(struct net_device *dev)
 	if (port == NULL)
 		return -ENOMEM;
 
-	port->passthru = false;
 	port->dev = dev;
+	ether_addr_copy(port->perm_addr, dev->dev_addr);
 	INIT_LIST_HEAD(&port->vlans);
 	for (i = 0; i < MACVLAN_HASH_SIZE; i++)
 		INIT_HLIST_HEAD(&port->vlan_hash[i]);
@@ -1161,6 +1206,18 @@ static void macvlan_port_destroy(struct net_device *dev)
 		kfree_skb(skb);
 	}
 
+	/* If the lower device address has been changed by passthru
+	 * macvlan, put it back.
+	 */
+	if (macvlan_passthru(port) &&
+	    !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) {
+		struct sockaddr sa;
+
+		sa.sa_family = port->dev->type;
+		memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
+		dev_set_mac_address(port->dev, &sa);
+	}
+
 	kfree(port);
 }
 
@@ -1326,7 +1383,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 	port = macvlan_port_get_rtnl(lowerdev);
 
 	/* Only 1 macvlan device can be created in passthru mode */
-	if (port->passthru) {
+	if (macvlan_passthru(port)) {
 		/* The macvlan port must be not created this time,
 		 * still goto destroy_macvlan_port for readability.
 		 */
@@ -1352,7 +1409,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 			err = -EINVAL;
 			goto destroy_macvlan_port;
 		}
-		port->passthru = true;
+		macvlan_set_passthru(port);
 		eth_hw_addr_inherit(dev, lowerdev);
 	}
 
@@ -1434,7 +1491,7 @@ static int macvlan_changelink(struct net_device *dev,
 	if (data && data[IFLA_MACVLAN_FLAGS]) {
 		__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
 		bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
-		if (vlan->port->passthru && promisc) {
+		if (macvlan_passthru(vlan->port) && promisc) {
 			int err;
 
 			if (flags & MACVLAN_FLAG_NOPROMISC)
@@ -1597,7 +1654,7 @@ static int macvlan_device_event(struct notifier_block *unused,
 		}
 		break;
 	case NETDEV_CHANGEADDR:
-		if (!port->passthru)
+		if (!macvlan_passthru(port))
 			return NOTIFY_DONE;
 
 		vlan = list_first_entry_or_null(&port->vlans,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 06ee639..0e27920 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -358,7 +358,7 @@ static ssize_t enabled_store(struct config_item *item,
 		if (err)
 			goto out_unlock;
 
-		pr_info("netconsole: network logging started\n");
+		pr_info("network logging started\n");
 	} else {	/* false */
 		/* We need to disable the netconsole before cleaning it up
 		 * otherwise we might end up in write_msg() with
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index b916038..c4b3362 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -113,7 +113,7 @@ static void nlmon_setup(struct net_device *dev)
 
 	dev->netdev_ops	= &nlmon_ops;
 	dev->ethtool_ops = &nlmon_ethtool_ops;
-	dev->destructor	= free_netdev;
+	dev->needs_free_netdev = true;
 
 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
 			NETIF_F_HIGHDMA | NETIF_F_LLTX;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c360dd6..3ab6c58 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -127,6 +127,7 @@
 	tristate "ThunderX SOCs MDIO buses"
 	depends on 64BIT
 	depends on PCI
+	depends on !(MDIO_DEVICE=y && PHYLIB=m)
 	select MDIO_CAVIUM
 	help
 	  This driver supports the MDIO interfaces found on Cavium
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index ed0d10f..c3065236 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640,
 	if (overflow) {
 		pr_debug("tx timestamp queue overflow, count %d\n", overflow);
 		while (skb) {
-			skb_complete_tx_timestamp(skb, NULL);
+			kfree_skb(skb);
 			skb = skb_dequeue(&dp83640->tx_queue);
 		}
 		return;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 9097e42..57297ba 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1127,8 +1127,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
 		if (adv < 0)
 			return adv;
 
-		lpa &= adv;
-
 		if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
 			phydev->duplex = DUPLEX_FULL;
 		else
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8e73f5f..f99c21f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -658,6 +658,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
 	return 0;
 }
 
+static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	int rc;
+
+	/* Some devices have extra OF data and an OF-style MODALIAS */
+	rc = of_device_uevent_modalias(dev, env);
+	if (rc != -ENODEV)
+		return rc;
+
+	return 0;
+}
+
 #ifdef CONFIG_PM
 static int mdio_bus_suspend(struct device *dev)
 {
@@ -708,6 +720,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = {
 struct bus_type mdio_bus_type = {
 	.name		= "mdio_bus",
 	.match		= mdio_bus_match,
+	.uevent		= mdio_uevent,
 	.pm		= MDIO_BUS_PM_OPS,
 };
 EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 6a5fd18..8b20388 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -268,11 +268,31 @@ static int kszphy_nand_tree_disable(struct phy_device *phydev)
 	return ret;
 }
 
+/* Some config bits need to be set again on resume, handle them here. */
+static int kszphy_config_reset(struct phy_device *phydev)
+{
+	struct kszphy_priv *priv = phydev->priv;
+	int ret;
+
+	if (priv->rmii_ref_clk_sel) {
+		ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
+		if (ret) {
+			phydev_err(phydev,
+				   "failed to set rmii reference clock\n");
+			return ret;
+		}
+	}
+
+	if (priv->led_mode >= 0)
+		kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
+
+	return 0;
+}
+
 static int kszphy_config_init(struct phy_device *phydev)
 {
 	struct kszphy_priv *priv = phydev->priv;
 	const struct kszphy_type *type;
-	int ret;
 
 	if (!priv)
 		return 0;
@@ -285,19 +305,7 @@ static int kszphy_config_init(struct phy_device *phydev)
 	if (type->has_nand_tree_disable)
 		kszphy_nand_tree_disable(phydev);
 
-	if (priv->rmii_ref_clk_sel) {
-		ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
-		if (ret) {
-			phydev_err(phydev,
-				   "failed to set rmii reference clock\n");
-			return ret;
-		}
-	}
-
-	if (priv->led_mode >= 0)
-		kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
-
-	return 0;
+	return kszphy_config_reset(phydev);
 }
 
 static int ksz8041_config_init(struct phy_device *phydev)
@@ -611,6 +619,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
 	if ((regval & 0xFF) == 0xFF) {
 		phy_init_hw(phydev);
 		phydev->link = 0;
+		if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+			phydev->drv->config_intr(phydev);
 	}
 
 	return 0;
@@ -700,8 +710,14 @@ static int kszphy_suspend(struct phy_device *phydev)
 
 static int kszphy_resume(struct phy_device *phydev)
 {
+	int ret;
+
 	genphy_resume(phydev);
 
+	ret = kszphy_config_reset(phydev);
+	if (ret)
+		return ret;
+
 	/* Enable PHY Interrupts */
 	if (phy_interrupt_is_valid(phydev)) {
 		phydev->interrupts = PHY_INTERRUPT_ENABLED;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 82ab8fb..3e231a5 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -54,6 +54,8 @@ static const char *phy_speed_to_str(int speed)
 		return "5Gbps";
 	case SPEED_10000:
 		return "10Gbps";
+	case SPEED_14000:
+		return "14Gbps";
 	case SPEED_20000:
 		return "20Gbps";
 	case SPEED_25000:
@@ -241,7 +243,7 @@ static const struct phy_setting settings[] = {
  * phy_lookup_setting - lookup a PHY setting
  * @speed: speed to match
  * @duplex: duplex to match
- * @feature: allowed link modes
+ * @features: allowed link modes
  * @exact: an exact match is required
  *
  * Search the settings array for a setting that matches the speed and
@@ -377,6 +379,7 @@ static void phy_sanitize_settings(struct phy_device *phydev)
  * @cmd: ethtool_cmd
  *
  * A few notes about parameter checking:
+ *
  * - We don't set port or transceiver, so we don't care what they
  *   were set to.
  * - phy_start_aneg() will make sure forced settings are sane, and
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 1da31dc..74b9072 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -629,7 +629,7 @@ static void sl_uninit(struct net_device *dev)
 static void sl_free_netdev(struct net_device *dev)
 {
 	int i = dev->base_addr;
-	free_netdev(dev);
+
 	slip_devs[i] = NULL;
 }
 
@@ -651,7 +651,8 @@ static const struct net_device_ops sl_netdev_ops = {
 static void sl_setup(struct net_device *dev)
 {
 	dev->netdev_ops		= &sl_netdev_ops;
-	dev->destructor		= sl_free_netdev;
+	dev->needs_free_netdev	= true;
+	dev->priv_destructor	= sl_free_netdev;
 
 	dev->hard_header_len	= 0;
 	dev->addr_len		= 0;
@@ -1369,8 +1370,6 @@ static void __exit slip_exit(void)
 		if (sl->tty) {
 			printk(KERN_ERR "%s: tty discipline still running\n",
 			       dev->name);
-			/* Intentionally leak the control block. */
-			dev->destructor = NULL;
 		}
 
 		unregister_netdev(dev);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6c5d5ef..fba8c13 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1643,7 +1643,6 @@ static void team_destructor(struct net_device *dev)
 	struct team *team = netdev_priv(dev);
 
 	free_percpu(team->pcpu_stats);
-	free_netdev(dev);
 }
 
 static int team_open(struct net_device *dev)
@@ -2079,7 +2078,8 @@ static void team_setup(struct net_device *dev)
 
 	dev->netdev_ops = &team_netdev_ops;
 	dev->ethtool_ops = &team_ethtool_ops;
-	dev->destructor	= team_destructor;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = team_destructor;
 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->priv_flags |= IFF_TEAM;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bbd707b..9ee7d42 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1560,7 +1560,6 @@ static void tun_free_netdev(struct net_device *dev)
 	free_percpu(tun->pcpu_stats);
 	tun_flow_uninit(tun);
 	security_tun_dev_free_security(tun->security);
-	free_netdev(dev);
 }
 
 static void tun_setup(struct net_device *dev)
@@ -1571,7 +1570,8 @@ static void tun_setup(struct net_device *dev)
 	tun->group = INVALID_GID;
 
 	dev->ethtool_ops = &tun_ethtool_ops;
-	dev->destructor = tun_free_netdev;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = tun_free_netdev;
 	/* We prefer our own queue length */
 	dev->tx_queue_len = TUN_READQ_SIZE;
 }
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 51cf600..4037ab2 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1722,6 +1722,18 @@ static const struct driver_info lenovo_info = {
 	.tx_fixup = ax88179_tx_fixup,
 };
 
+static const struct driver_info belkin_info = {
+	.description = "Belkin USB Ethernet Adapter",
+	.bind	= ax88179_bind,
+	.unbind = ax88179_unbind,
+	.status = ax88179_status,
+	.link_reset = ax88179_link_reset,
+	.reset	= ax88179_reset,
+	.flags	= FLAG_ETHER | FLAG_FRAMING_AX,
+	.rx_fixup = ax88179_rx_fixup,
+	.tx_fixup = ax88179_tx_fixup,
+};
+
 static const struct usb_device_id products[] = {
 {
 	/* ASIX AX88179 10/100/1000 */
@@ -1751,6 +1763,10 @@ static const struct usb_device_id products[] = {
 	/* Lenovo OneLinkDock Gigabit LAN */
 	USB_DEVICE(0x17ef, 0x304b),
 	.driver_info = (unsigned long)&lenovo_info,
+}, {
+	/* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */
+	USB_DEVICE(0x050d, 0x0128),
+	.driver_info = (unsigned long)&belkin_info,
 },
 	{ },
 };
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index eb52de8..c7a350b 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -298,7 +298,7 @@ static void usbpn_setup(struct net_device *dev)
 	dev->addr_len		= 1;
 	dev->tx_queue_len	= 3;
 
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 }
 
 /*
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8f923a1..32a22f4e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,7 +123,7 @@ static void qmimux_setup(struct net_device *dev)
 	dev->addr_len        = 0;
 	dev->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
 	dev->netdev_ops      = &qmimux_netdev_ops;
-	dev->destructor      = free_netdev;
+	dev->needs_free_netdev = true;
 }
 
 static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id)
@@ -1192,6 +1192,8 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x1199, 0x9056, 8)},	/* Sierra Wireless Modem */
 	{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
 	{QMI_FIXED_INTF(0x1199, 0x9061, 8)},	/* Sierra Wireless Modem */
+	{QMI_FIXED_INTF(0x1199, 0x9063, 8)},	/* Sierra Wireless EM7305 */
+	{QMI_FIXED_INTF(0x1199, 0x9063, 10)},	/* Sierra Wireless EM7305 */
 	{QMI_FIXED_INTF(0x1199, 0x9071, 8)},	/* Sierra Wireless MC74xx */
 	{QMI_FIXED_INTF(0x1199, 0x9071, 10)},	/* Sierra Wireless MC74xx */
 	{QMI_FIXED_INTF(0x1199, 0x9079, 8)},	/* Sierra Wireless EM74xx */
@@ -1206,6 +1208,8 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},	/* Telit ME910 */
 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)},	/* Telit LE920, LE920A4 */
+	{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},	/* Telewell TW-3G HSPA+ */
+	{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},	/* Telewell TW-3G HSPA+ */
 	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
 	{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},	/* Olivetti Olicard 100 */
 	{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},	/* Olivetti Olicard 120 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ddc62cb..1a419a4 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4368,6 +4368,8 @@ static u8 rtl_get_version(struct usb_interface *intf)
 		break;
 	}
 
+	dev_dbg(&intf->dev, "Detected version 0x%04x\n", version);
+
 	return version;
 }
 
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 38f0f03..364fa9d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -222,7 +222,6 @@ static int veth_dev_init(struct net_device *dev)
 static void veth_dev_free(struct net_device *dev)
 {
 	free_percpu(dev->vstats);
-	free_netdev(dev);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -317,7 +316,8 @@ static void veth_setup(struct net_device *dev)
 			       NETIF_F_HW_VLAN_STAG_TX |
 			       NETIF_F_HW_VLAN_CTAG_RX |
 			       NETIF_F_HW_VLAN_STAG_RX);
-	dev->destructor = veth_dev_free;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = veth_dev_free;
 	dev->max_mtu = ETH_MAX_MTU;
 
 	dev->hw_features = VETH_FEATURES;
@@ -383,7 +383,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 		tbp = tb;
 	}
 
-	if (tbp[IFLA_IFNAME]) {
+	if (ifmp && tbp[IFLA_IFNAME]) {
 		nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
 		name_assign_type = NET_NAME_USER;
 	} else {
@@ -402,7 +402,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 		return PTR_ERR(peer);
 	}
 
-	if (tbp[IFLA_ADDRESS] == NULL)
+	if (!ifmp || !tbp[IFLA_ADDRESS])
 		eth_hw_addr_random(peer);
 
 	if (ifmp && (dev->ifindex != 0))
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3e9246c..143d8a9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
 	unsigned int len;
 
 	len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
-				rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len);
+				rq->min_buf_len, PAGE_SIZE - hdr_len);
 	return ALIGN(len, L1_CACHE_BYTES);
 }
 
@@ -1797,6 +1797,7 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
 	flush_work(&vi->config_work);
 
 	netif_device_detach(vi->dev);
+	netif_tx_disable(vi->dev);
 	cancel_delayed_work_sync(&vi->refill);
 
 	if (netif_running(vi->dev)) {
@@ -2144,7 +2145,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
 	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
 	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
 
-	return max(min_buf_len, hdr_len);
+	return max(max(min_buf_len, hdr_len) - hdr_len,
+		   (unsigned int)GOOD_PACKET_LEN);
 }
 
 static int virtnet_find_vqs(struct virtnet_info *vi)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index db88249..022c0b5 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -36,12 +36,14 @@
 #include <net/addrconf.h>
 #include <net/l3mdev.h>
 #include <net/fib_rules.h>
+#include <net/netns/generic.h>
 
 #define DRV_NAME	"vrf"
 #define DRV_VERSION	"1.0"
 
 #define FIB_RULE_PREF  1000       /* default preference for FIB rules */
-static bool add_fib_rules = true;
+
+static unsigned int vrf_net_id;
 
 struct net_vrf {
 	struct rtable __rcu	*rth;
@@ -1348,7 +1350,7 @@ static void vrf_setup(struct net_device *dev)
 	dev->netdev_ops = &vrf_netdev_ops;
 	dev->l3mdev_ops = &vrf_l3mdev_ops;
 	dev->ethtool_ops = &vrf_ethtool_ops;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 
 	/* Fill in device structure with ethernet-generic values. */
 	eth_hw_addr_random(dev);
@@ -1394,6 +1396,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
 		       struct nlattr *tb[], struct nlattr *data[])
 {
 	struct net_vrf *vrf = netdev_priv(dev);
+	bool *add_fib_rules;
+	struct net *net;
 	int err;
 
 	if (!data || !data[IFLA_VRF_TABLE])
@@ -1409,13 +1413,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
 	if (err)
 		goto out;
 
-	if (add_fib_rules) {
+	net = dev_net(dev);
+	add_fib_rules = net_generic(net, vrf_net_id);
+	if (*add_fib_rules) {
 		err = vrf_add_fib_rules(dev);
 		if (err) {
 			unregister_netdevice(dev);
 			goto out;
 		}
-		add_fib_rules = false;
+		*add_fib_rules = false;
 	}
 
 out:
@@ -1498,16 +1504,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = {
 	.notifier_call = vrf_device_event,
 };
 
+/* Initialize per network namespace state */
+static int __net_init vrf_netns_init(struct net *net)
+{
+	bool *add_fib_rules = net_generic(net, vrf_net_id);
+
+	*add_fib_rules = true;
+
+	return 0;
+}
+
+static struct pernet_operations vrf_net_ops __net_initdata = {
+	.init = vrf_netns_init,
+	.id   = &vrf_net_id,
+	.size = sizeof(bool),
+};
+
 static int __init vrf_init_module(void)
 {
 	int rc;
 
 	register_netdevice_notifier(&vrf_notifier_block);
 
-	rc = rtnl_link_register(&vrf_link_ops);
+	rc = register_pernet_subsys(&vrf_net_ops);
 	if (rc < 0)
 		goto error;
 
+	rc = rtnl_link_register(&vrf_link_ops);
+	if (rc < 0) {
+		unregister_pernet_subsys(&vrf_net_ops);
+		goto error;
+	}
+
 	return 0;
 
 error:
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index 7f0136f..c28bdce 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -135,7 +135,7 @@ static void vsockmon_setup(struct net_device *dev)
 
 	dev->netdev_ops	= &vsockmon_ops;
 	dev->ethtool_ops = &vsockmon_ethtool_ops;
-	dev->destructor	= free_netdev;
+	dev->needs_free_netdev = true;
 
 	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
 			NETIF_F_HIGHDMA | NETIF_F_LLTX;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 328b471..5fa798a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
 
 static int vxlan_sock_add(struct vxlan_dev *vxlan);
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
+
 /* per-network namespace private data for this module */
 struct vxlan_net {
 	struct list_head  vxlan_list;
@@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
 	call_rcu(&f->rcu, vxlan_fdb_free);
 }
 
+static void vxlan_dst_free(struct rcu_head *head)
+{
+	struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
+
+	dst_cache_destroy(&rd->dst_cache);
+	kfree(rd);
+}
+
+static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+				  struct vxlan_rdst *rd)
+{
+	list_del_rcu(&rd->list);
+	vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+	call_rcu(&rd->rcu, vxlan_dst_free);
+}
+
 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
 			   union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
 			   __be32 *vni, u32 *ifindex)
@@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
 	 * otherwise destroy the fdb entry
 	 */
 	if (rd && !list_is_singular(&f->remotes)) {
-		list_del_rcu(&rd->list);
-		vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
-		kfree_rcu(rd, rcu);
+		vxlan_fdb_dst_destroy(vxlan, f, rd);
 		goto out;
 	}
 
@@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
 	rcu_assign_pointer(vxlan->vn4_sock, NULL);
 	synchronize_net();
 
+	vxlan_vs_del_dev(vxlan);
+
 	if (__vxlan_sock_release_prep(sock4)) {
 		udp_tunnel_sock_release(sock4->sock);
 		kfree(sock4);
@@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg)
 	mod_timer(&vxlan->age_timer, next_timer);
 }
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
+{
+	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+
+	spin_lock(&vn->sock_lock);
+	hlist_del_init_rcu(&vxlan->hlist);
+	spin_unlock(&vn->sock_lock);
+}
+
 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
 {
 	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -2584,7 +2611,7 @@ static void vxlan_setup(struct net_device *dev)
 	eth_hw_addr_random(dev);
 	ether_setup(dev);
 
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	SET_NETDEV_DEVTYPE(dev, &vxlan_type);
 
 	dev->features	|= NETIF_F_LLTX;
@@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
-	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
 	vxlan_flush(vxlan, true);
 
-	spin_lock(&vn->sock_lock);
-	if (!hlist_unhashed(&vxlan->hlist))
-		hlist_del_rcu(&vxlan->hlist);
-	spin_unlock(&vn->sock_lock);
-
 	gro_cells_destroy(&vxlan->gro_cells);
 	list_del(&vxlan->next);
 	unregister_netdevice_queue(dev, head);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 65ee2a6..a0d76f7 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -475,7 +475,7 @@ static void dlci_setup(struct net_device *dev)
 	dev->flags		= 0;
 	dev->header_ops		= &dlci_header_ops;
 	dev->netdev_ops		= &dlci_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 
 	dlp->receive		= dlci_receive;
 
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index eb91528..78596e4 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1106,7 +1106,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
 		return -EIO;
 	}
 
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	*get_dev_p(pvc, type) = dev;
 	if (!used) {
 		state(hdlc)->dce_changed = 1;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 9df9ed6..63f7490 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -306,7 +306,7 @@ static const struct net_device_ops lapbeth_netdev_ops = {
 static void lapbeth_setup(struct net_device *dev)
 {
 	dev->netdev_ops	     = &lapbeth_netdev_ops;
-	dev->destructor	     = free_netdev;
+	dev->needs_free_netdev = true;
 	dev->type            = ARPHRD_X25;
 	dev->hard_header_len = 3;
 	dev->mtu             = 1000;
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 91ee542..b90c77e 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -1287,7 +1287,7 @@ void init_netdev(struct net_device *dev)
 	struct ath6kl *ar = ath6kl_priv(dev);
 
 	dev->netdev_ops = &ath6kl_netdev_ops;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
 
 	dev->needed_headroom = ETH_HLEN;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index d5e993d..517a315 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
 	qcom_smem_state_put(wcn->tx_enable_state);
 	qcom_smem_state_put(wcn->tx_rings_empty_state);
 
+	rpmsg_destroy_ept(wcn->smd_channel);
+
 	iounmap(wcn->dxe_base);
 	iounmap(wcn->ccu_base);
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index cd1d673..617199c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5225,7 +5225,6 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev)
 
 	if (vif)
 		brcmf_free_vif(vif);
-	free_netdev(ndev);
 }
 
 static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index a3d8236..511d190 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -624,7 +624,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
 		if (!ndev)
 			return ERR_PTR(-ENOMEM);
 
-		ndev->destructor = brcmf_cfg80211_free_netdev;
+		ndev->needs_free_netdev = true;
+		ndev->priv_destructor = brcmf_cfg80211_free_netdev;
 		ifp = netdev_priv(ndev);
 		ifp->ndev = ndev;
 		/* store mapping ifidx to bsscfgidx */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index c7c1e99..d231042 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -442,7 +442,7 @@ struct brcmf_fw {
 	const char *nvram_name;
 	u16 domain_nr;
 	u16 bus_nr;
-	void (*done)(struct device *dev, const struct firmware *fw,
+	void (*done)(struct device *dev, int err, const struct firmware *fw,
 		     void *nvram_image, u32 nvram_len);
 };
 
@@ -477,52 +477,51 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
 	if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
 		goto fail;
 
-	fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+	fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length);
 	kfree(fwctx);
 	return;
 
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
 	release_firmware(fwctx->code);
-	device_release_driver(fwctx->dev);
+	fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0);
 	kfree(fwctx);
 }
 
 static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
 {
 	struct brcmf_fw *fwctx = ctx;
-	int ret;
+	int ret = 0;
 
 	brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
-	if (!fw)
+	if (!fw) {
+		ret = -ENOENT;
 		goto fail;
-
-	/* only requested code so done here */
-	if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
-		fwctx->done(fwctx->dev, fw, NULL, 0);
-		kfree(fwctx);
-		return;
 	}
+	/* only requested code so done here */
+	if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM))
+		goto done;
+
 	fwctx->code = fw;
 	ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
 				      fwctx->dev, GFP_KERNEL, fwctx,
 				      brcmf_fw_request_nvram_done);
 
-	if (!ret)
-		return;
-
-	brcmf_fw_request_nvram_done(NULL, fwctx);
+	/* pass NULL to nvram callback for bcm47xx fallback */
+	if (ret)
+		brcmf_fw_request_nvram_done(NULL, fwctx);
 	return;
 
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
-	device_release_driver(fwctx->dev);
+done:
+	fwctx->done(fwctx->dev, ret, fw, NULL, 0);
 	kfree(fwctx);
 }
 
 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
 				const char *code, const char *nvram,
-				void (*fw_cb)(struct device *dev,
+				void (*fw_cb)(struct device *dev, int err,
 					      const struct firmware *fw,
 					      void *nvram_image, u32 nvram_len),
 				u16 domain_nr, u16 bus_nr)
@@ -555,7 +554,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
 
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
 			   const char *code, const char *nvram,
-			   void (*fw_cb)(struct device *dev,
+			   void (*fw_cb)(struct device *dev, int err,
 					 const struct firmware *fw,
 					 void *nvram_image, u32 nvram_len))
 {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index d3c9f0d..8fa4b7e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram);
  */
 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
 				const char *code, const char *nvram,
-				void (*fw_cb)(struct device *dev,
+				void (*fw_cb)(struct device *dev, int err,
 					      const struct firmware *fw,
 					      void *nvram_image, u32 nvram_len),
 				u16 domain_nr, u16 bus_nr);
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
 			   const char *code, const char *nvram,
-			   void (*fw_cb)(struct device *dev,
+			   void (*fw_cb)(struct device *dev, int err,
 					 const struct firmware *fw,
 					 void *nvram_image, u32 nvram_len));
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 72373e5..f59642b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2145,7 +2145,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
 	struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr);
 	struct brcmf_fws_mac_descriptor *entry;
 
-	if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE)
+	if (!ifp->ndev || !brcmf_fws_queue_skbs(fws))
 		return;
 
 	entry = &fws->desc.iface[ifp->ifidx];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index f36b96d..f878706 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1650,16 +1650,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
 	.write32 = brcmf_pcie_buscore_write32,
 };
 
-static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
+static void brcmf_pcie_setup(struct device *dev, int ret,
+			     const struct firmware *fw,
 			     void *nvram, u32 nvram_len)
 {
-	struct brcmf_bus *bus = dev_get_drvdata(dev);
-	struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie;
-	struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo;
+	struct brcmf_bus *bus;
+	struct brcmf_pciedev *pcie_bus_dev;
+	struct brcmf_pciedev_info *devinfo;
 	struct brcmf_commonring **flowrings;
-	int ret;
 	u32 i;
 
+	/* check firmware loading result */
+	if (ret)
+		goto fail;
+
+	bus = dev_get_drvdata(dev);
+	pcie_bus_dev = bus->bus_priv.pcie;
+	devinfo = pcie_bus_dev->devinfo;
 	brcmf_pcie_attach(devinfo);
 
 	/* Some of the firmwares have the size of the memory of the device
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index fc64b89..5653d6d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
 		/* otherwise, set txglomalign */
 		value = sdiodev->settings->bus.sdio.sd_sgentry_align;
 		/* SDIO ADMA requires at least 32 bit alignment */
-		value = max_t(u32, value, 4);
+		value = max_t(u32, value, ALIGNMENT);
 		err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
 					   sizeof(u32));
 	}
@@ -3982,21 +3982,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
 	.get_memdump = brcmf_sdio_bus_get_memdump,
 };
 
-static void brcmf_sdio_firmware_callback(struct device *dev,
+static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 					 const struct firmware *code,
 					 void *nvram, u32 nvram_len)
 {
-	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
-	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
-	struct brcmf_sdio *bus = sdiodev->bus;
-	int err = 0;
+	struct brcmf_bus *bus_if;
+	struct brcmf_sdio_dev *sdiodev;
+	struct brcmf_sdio *bus;
 	u8 saveclk;
 
-	brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+	brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
+	bus_if = dev_get_drvdata(dev);
+	sdiodev = bus_if->bus_priv.sdio;
+	if (err)
+		goto fail;
 
 	if (!bus_if->drvr)
 		return;
 
+	bus = sdiodev->bus;
+
 	/* try to download image and nvram to the dongle */
 	bus->alp_only = true;
 	err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4083,6 +4088,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
 	device_release_driver(dev);
+	device_release_driver(&sdiodev->func[2]->dev);
 }
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index e4d545f..0eea48e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1159,17 +1159,18 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
 	return ret;
 }
 
-static void brcmf_usb_probe_phase2(struct device *dev,
+static void brcmf_usb_probe_phase2(struct device *dev, int ret,
 				   const struct firmware *fw,
 				   void *nvram, u32 nvlen)
 {
 	struct brcmf_bus *bus = dev_get_drvdata(dev);
-	struct brcmf_usbdev_info *devinfo;
-	int ret;
+	struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo;
+
+	if (ret)
+		goto error;
 
 	brcmf_dbg(USB, "Start fw downloading\n");
 
-	devinfo = bus->bus_priv.usb->devinfo;
 	ret = check_file(fw->data);
 	if (ret < 0) {
 		brcmf_err("invalid firmware\n");
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 1b7e125..6a13303 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -3066,7 +3066,7 @@ static int airo_thread(void *data) {
 		if (ai->jobs) {
 			locked = down_interruptible(&ai->sem);
 		} else {
-			wait_queue_t wait;
+			wait_queue_entry_t wait;
 
 			init_waitqueue_entry(&wait, current);
 			add_wait_queue(&ai->thr_wait, &wait);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index f922859..aaaca4d 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -4160,12 +4160,12 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
 static DEVICE_ATTR(bssinfo, S_IRUGO, show_bssinfo, NULL);
 
 #ifdef CONFIG_IPW2100_DEBUG
-static ssize_t show_debug_level(struct device_driver *d, char *buf)
+static ssize_t debug_level_show(struct device_driver *d, char *buf)
 {
 	return sprintf(buf, "0x%08X\n", ipw2100_debug_level);
 }
 
-static ssize_t store_debug_level(struct device_driver *d,
+static ssize_t debug_level_store(struct device_driver *d,
 				 const char *buf, size_t count)
 {
 	u32 val;
@@ -4179,9 +4179,7 @@ static ssize_t store_debug_level(struct device_driver *d,
 
 	return strnlen(buf, count);
 }
-
-static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, show_debug_level,
-		   store_debug_level);
+static DRIVER_ATTR_RW(debug_level);
 #endif				/* CONFIG_IPW2100_DEBUG */
 
 static ssize_t show_fatal_error(struct device *d,
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index bbc579b..5b79e2e 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1195,12 +1195,12 @@ static void ipw_led_shutdown(struct ipw_priv *priv)
  *
  * See the level definitions in ipw for details.
  */
-static ssize_t show_debug_level(struct device_driver *d, char *buf)
+static ssize_t debug_level_show(struct device_driver *d, char *buf)
 {
 	return sprintf(buf, "0x%08X\n", ipw_debug_level);
 }
 
-static ssize_t store_debug_level(struct device_driver *d, const char *buf,
+static ssize_t debug_level_store(struct device_driver *d, const char *buf,
 				 size_t count)
 {
 	char *p = (char *)buf;
@@ -1221,9 +1221,7 @@ static ssize_t store_debug_level(struct device_driver *d, const char *buf,
 
 	return strnlen(buf, count);
 }
-
-static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
-		   show_debug_level, store_debug_level);
+static DRIVER_ATTR_RW(debug_level);
 
 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
 {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 3b3e076..45e2efc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -79,8 +79,8 @@
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN	17
 #define IWL7265_UCODE_API_MIN	17
-#define IWL7265D_UCODE_API_MIN	17
-#define IWL3168_UCODE_API_MIN	20
+#define IWL7265D_UCODE_API_MIN	22
+#define IWL3168_UCODE_API_MIN	22
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION		0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index b9718c0..8913771 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -74,8 +74,8 @@
 #define IWL8265_UCODE_API_MAX	30
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN	17
-#define IWL8265_UCODE_API_MIN	20
+#define IWL8000_UCODE_API_MIN	22
+#define IWL8265_UCODE_API_MIN	22
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION		0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 306bc96..77efbb7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -370,6 +370,7 @@
 #define MON_DMARB_RD_DATA_ADDR		(0xa03c5c)
 
 #define DBGC_IN_SAMPLE			(0xa03c00)
+#define DBGC_OUT_CTRL			(0xa03c0c)
 
 /* enable the ID buf for read */
 #define WFPM_PS_CTL_CLR			0xA0300C
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
index 1b7d265..a10c6aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
@@ -307,6 +307,11 @@ enum {
 /* Bit 1-3: LQ command color. Used to match responses to LQ commands */
 #define LQ_FLAG_COLOR_POS               1
 #define LQ_FLAG_COLOR_MSK               (7 << LQ_FLAG_COLOR_POS)
+#define LQ_FLAG_COLOR_GET(_f)		(((_f) & LQ_FLAG_COLOR_MSK) >>\
+					 LQ_FLAG_COLOR_POS)
+#define LQ_FLAGS_COLOR_INC(_c)		((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
+					 LQ_FLAG_COLOR_MSK)
+#define LQ_FLAG_COLOR_SET(_f, _c)	((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
 
 /* Bit 4-5: Tx RTS BW Signalling
  * (0) No RTS BW signalling
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 81b9891..1360ebf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -519,8 +519,11 @@ struct agg_tx_status {
  * bit-7 invalid rate indication
  */
 #define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_POS 4
 #define TX_RES_RATE_TABLE_COLOR_MSK 0x70
 #define TX_RES_INV_RATE_INDEX_MSK 0x80
+#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
+				       TX_RES_RATE_TABLE_COLOR_POS)
 
 #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
 #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 7b86a4f..c8712e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 	return 0;
 }
 
-static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
-{
-	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
-		iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
-	else
-		iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
-}
-
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
 {
 	u8 *ptr;
@@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
 	/* EARLY START - firmware's configuration is hard coded */
 	if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
 	     !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
-	    conf_id == FW_DBG_START_FROM_ALIVE) {
-		iwl_mvm_restart_early_start(mvm);
+	    conf_id == FW_DBG_START_FROM_ALIVE)
 		return 0;
-	}
 
 	if (!mvm->fw->dbg_conf_tlv[conf_id])
 		return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 0f1831b..fd2fc46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
 		struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
 		struct iwl_mac_beacon_cmd_v7 beacon_cmd;
 	} u = {};
-	struct iwl_mac_beacon_cmd beacon_cmd;
+	struct iwl_mac_beacon_cmd beacon_cmd = {};
 	struct ieee80211_tx_info *info;
 	u32 beacon_skb_len;
 	u32 rate, tx_flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 4e74a6b..52f8d7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
  */
 static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
 {
+	u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
+		IWL_MVM_CMD_QUEUE;
+
 	return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
-		~BIT(IWL_MVM_CMD_QUEUE));
+		~BIT(cmd_queue));
 }
 
 static inline
@@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
 	if (!iwl_mvm_has_new_tx_api(mvm))
 		iwl_free_fw_paging(mvm);
 	mvm->ucode_loaded = false;
+	mvm->fw_dbg_conf = FW_DBG_INVALID;
 	iwl_trans_stop_device(mvm->trans);
 }
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 9ffff6e..3da5ec4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1149,22 +1149,38 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
 
 	mutex_lock(&mvm->mutex);
 
-	/* stop recording */
 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+		/* stop recording */
 		iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+
+		iwl_mvm_fw_error_dump(mvm);
+
+		/* start recording again if the firmware is not crashed */
+		if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+		    mvm->fw->dbg_dest_tlv)
+			iwl_clear_bits_prph(mvm->trans,
+					    MON_BUFF_SAMPLE_CTL, 0x100);
 	} else {
+		u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
+		u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
+
+		/* stop recording */
 		iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
-		/* wait before we collect the data till the DBGC stop */
 		udelay(100);
+		iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+		/* wait before we collect the data till the DBGC stop */
+		udelay(500);
+
+		iwl_mvm_fw_error_dump(mvm);
+
+		/* start recording again if the firmware is not crashed */
+		if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+		    mvm->fw->dbg_dest_tlv) {
+			iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
+			iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
+		}
 	}
 
-	iwl_mvm_fw_error_dump(mvm);
-
-	/* start recording again if the firmware is not crashed */
-	WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
-		     mvm->fw->dbg_dest_tlv &&
-		     iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
-
 	mutex_unlock(&mvm->mutex);
 
 	iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 7788eef..aa785cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2,7 +2,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
 		rs_get_lower_rate_in_column(lq_sta, rate);
 }
 
-/* Check if both rates are identical
- * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
- * with a rate indicating STBC/BFER and ANT_AB.
- */
-static inline bool rs_rate_equal(struct rs_rate *a,
-				 struct rs_rate *b,
-				 bool allow_ant_mismatch)
-
-{
-	bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
-		(a->bfer == b->bfer);
-
-	if (allow_ant_mismatch) {
-		if (a->stbc || a->bfer) {
-			WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
-				  a->stbc, a->bfer, a->ant);
-			ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
-		} else if (b->stbc || b->bfer) {
-			WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
-				  b->stbc, b->bfer, b->ant);
-			ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
-		}
-	}
-
-	return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
-		(a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
-}
-
 /* Check if both rates share the same column */
 static inline bool rs_rate_column_match(struct rs_rate *a,
 					struct rs_rate *b)
@@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	u32 lq_hwrate;
 	struct rs_rate lq_rate, tx_resp_rate;
 	struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
-	u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
+	u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
+	u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
+	u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
 	u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
-	bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
-					     IWL_UCODE_TLV_API_LQ_SS_PARAMS);
 
 	/* Treat uninitialized rate scaling data same as non-existing. */
 	if (!lq_sta) {
@@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
 
 	/* Here we actually compare this rate to the latest LQ command */
-	if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
+	if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
 		IWL_DEBUG_RATE(mvm,
-			       "initial tx resp rate 0x%x does not match 0x%x\n",
-			       tx_resp_hwrate, lq_hwrate);
+			       "tx resp color 0x%x does not match 0x%x\n",
+			       lq_color, LQ_FLAG_COLOR_GET(table->flags));
 
 		/*
 		 * Since rates mis-match, the last LQ command may have failed.
@@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
 	u8 valid_tx_ant = 0;
 	struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
 	bool toggle_ant = false;
+	u32 color;
 
 	memcpy(&rate, initial_rate, sizeof(rate));
 
@@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
 				 num_rates, num_retries, valid_tx_ant,
 				 toggle_ant);
 
+	/* update the color of the LQ command (as a counter at bits 1-3) */
+	color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
+	lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
 }
 
 struct rs_bfer_active_iter_data {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index ee207f2..3abde1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -357,6 +358,20 @@ struct iwl_lq_sta {
 	} pers;
 };
 
+/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
+ * Note, it's iwlmvm <-> mac80211 interface.
+ * bits 0-7: reduced tx power
+ * bits 8-10: LQ command's color
+ */
+#define RS_DRV_DATA_TXP_MSK 0xff
+#define RS_DRV_DATA_LQ_COLOR_POS 8
+#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
+				      RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
+				  (((uintptr_t)_p) |\
+				   ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
+
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 			  enum nl80211_band band, bool init);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index f5c786d..614d678 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 	if (!iwl_mvm_is_dqa_supported(mvm))
 		return 0;
 
-	if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
+	if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+		    vif->type != NL80211_IFTYPE_ADHOC))
 		return -ENOTSUPP;
 
 	/*
@@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 		mvmvif->cab_queue = queue;
 	} else if (!fw_has_api(&mvm->fw->ucode_capa,
 			       IWL_UCODE_TLV_API_STA_TYPE)) {
+		/*
+		 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+		 * invalid, so make sure we use the queue we want.
+		 * Note that this is done here as we want to avoid making DQA
+		 * changes in mac80211 layer.
+		 */
+		if (vif->type == NL80211_IFTYPE_ADHOC) {
+			vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+			mvmvif->cab_queue = vif->cab_queue;
+		}
 		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
 				   &cfg, timeout);
 	}
@@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 
 	/* Get the station from the mvm local station table */
 	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
-	if (!mvm_sta) {
-		IWL_ERR(mvm, "Failed to find station\n");
-		return -EINVAL;
-	}
-	sta_id = mvm_sta->sta_id;
+	if (mvm_sta)
+		sta_id = mvm_sta->sta_id;
 
 	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
 		      keyconf->keyidx, sta_id);
 
-	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
-	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
-	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+	if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
 		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
 
 	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 2716cb5..ad62b67 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -313,6 +313,7 @@ enum iwl_mvm_agg_state {
  *	This is basically (last acked packet++).
  * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
  *	Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @lq_color: the color of the LQ command as it appears in tx response.
  * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
  * @state: state of the BA agreement establishment / tear down.
  * @txq_id: Tx queue used by the BA session / DQA
@@ -331,6 +332,7 @@ struct iwl_mvm_tid_data {
 	u16 next_reclaimed;
 	/* The rest is Tx AGG related */
 	u32 rate_n_flags;
+	u8 lq_color;
 	bool amsdu_in_ampdu_allowed;
 	enum iwl_mvm_agg_state state;
 	u16 txq_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f9cbd19..506d581 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
 	struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
 	int ret;
 
-	if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
-		return -EIO;
-
 	mutex_lock(&mvm->mutex);
 
+	if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+		ret = -EIO;
+		goto unlock;
+	}
+
 	if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
 		ret = -EINVAL;
 		goto unlock;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index bcaceb64..f21901c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 	struct iwl_mvm_sta *mvmsta;
 	struct sk_buff_head skbs;
 	u8 skb_freed = 0;
+	u8 lq_color;
 	u16 next_reclaimed, seq_ctl;
 	bool is_ndp = false;
 
@@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 		info->status.tx_time =
 			le16_to_cpu(tx_resp->wireless_media_time);
 		BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+		lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
 		info->status.status_driver_data[0] =
-				(void *)(uintptr_t)tx_resp->reduced_tpc;
+			RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
 
 		ieee80211_tx_status(mvm->hw, skb);
 	}
@@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
 			le32_to_cpu(tx_resp->initial_rate);
 		mvmsta->tid_data[tid].tx_time =
 			le16_to_cpu(tx_resp->wireless_media_time);
+		mvmsta->tid_data[tid].lq_color =
+			(tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
+			TX_RES_RATE_TABLE_COLOR_POS;
 	}
 
 	rcu_read_unlock();
@@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
 	iwl_mvm_check_ratid_empty(mvm, sta, tid);
 
 	freed = 0;
+
+	/* pack lq color from tid_data along the reduced txp */
+	ba_info->status.status_driver_data[0] =
+		RS_DRV_DATA_PACK(tid_data->lq_color,
+				 ba_info->status.status_driver_data[0]);
 	ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
 
 	skb_queue_walk(&reclaimed_skbs, skb) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 70acf85..93cbc7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data
 #ifdef CONFIG_PM_SLEEP
 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 {
-	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+	    (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
 		return iwl_pci_fw_enter_d0i3(trans);
 
 	return 0;
@@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 
 static void iwl_trans_pcie_resume(struct iwl_trans *trans)
 {
-	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+	    (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
 		iwl_pci_fw_exit_d0i3(trans);
 }
 #endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 9fb46a6f..9c9bfbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
 
 	if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
 		ret = -EINVAL;
-		goto error;
+		goto error_free_resp;
 	}
 
 	rsp = (void *)hcmd.resp_pkt->data;
@@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
 	if (qid > ARRAY_SIZE(trans_pcie->txq)) {
 		WARN_ONCE(1, "queue index %d unsupported", qid);
 		ret = -EIO;
-		goto error;
+		goto error_free_resp;
 	}
 
 	if (test_and_set_bit(qid, trans_pcie->queue_used)) {
 		WARN_ONCE(1, "queue %d already used", qid);
 		ret = -EIO;
-		goto error;
+		goto error_free_resp;
 	}
 
 	txq->id = qid;
@@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
 			   (txq->write_ptr) | (qid << 16));
 	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
 
+	iwl_free_resp(&hcmd);
 	return qid;
 
+error_free_resp:
+	iwl_free_resp(&hcmd);
 error:
 	iwl_pcie_gen2_txq_free_memory(trans, txq);
 	return ret;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index b2c6b06..ff153ce 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -2544,7 +2544,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
 			ret = -EINVAL;
 		}
 		if (local->iw_mode == IW_MODE_MASTER) {
-			wait_queue_t __wait;
+			wait_queue_entry_t __wait;
 			init_waitqueue_entry(&__wait, current);
 			add_wait_queue(&local->hostscan_wq, &__wait);
 			set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 544fc09..1372b20 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -73,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
 	dev->mem_end = mdev->mem_end;
 
 	hostap_setup_dev(dev, local, type);
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 
 	sprintf(dev->name, "%s%s", prefix, name);
 	if (!rtnl_locked)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 002b25c..c854a55 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2861,7 +2861,7 @@ static const struct net_device_ops hwsim_netdev_ops = {
 static void hwsim_mon_setup(struct net_device *dev)
 {
 	dev->netdev_ops = &hwsim_netdev_ops;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	ether_setup(dev);
 	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->type = ARPHRD_IEEE80211_RADIOTAP;
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index e350020..dde065d 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -453,7 +453,7 @@ static int lbs_thread(void *data)
 {
 	struct net_device *dev = data;
 	struct lbs_private *priv = dev->ml_priv;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	lbs_deb_enter(LBS_DEB_THREAD);
 
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index dd87b9f..39b6b5e 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1280,7 +1280,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
 			      struct net_device *dev)
 {
 	dev->netdev_ops = &mwifiex_netdev_ops;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	/* Initialize private structure */
 	priv->current_key_index = 0;
 	priv->media_connected = false;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 530586b..5b1d2e8 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
 	unsigned long   remaining_credit;
 	struct timer_list credit_timeout;
 	u64 credit_window_start;
+	bool rate_limited;
 
 	/* Statistics */
 	struct xenvif_stats stats;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 8397f6c..e322a86 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -106,7 +106,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
 
 	if (work_done < budget) {
 		napi_complete_done(napi, work_done);
-		xenvif_napi_schedule_or_enable_events(queue);
+		/* If the queue is rate-limited, it shall be
+		 * rescheduled in the timer callback.
+		 */
+		if (likely(!queue->rate_limited))
+			xenvif_napi_schedule_or_enable_events(queue);
 	}
 
 	return work_done;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 602d408..5042ff8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -180,6 +180,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
 		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
 
 	queue->remaining_credit = min(max_credit, max_burst);
+	queue->rate_limited = false;
 }
 
 void xenvif_tx_credit_callback(unsigned long data)
@@ -686,8 +687,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
 		msecs_to_jiffies(queue->credit_usec / 1000);
 
 	/* Timer could already be pending in rare cases. */
-	if (timer_pending(&queue->credit_timeout))
+	if (timer_pending(&queue->credit_timeout)) {
+		queue->rate_limited = true;
 		return true;
+	}
 
 	/* Passed the point where we can replenish credit? */
 	if (time_after_eq64(now, next_credit)) {
@@ -702,6 +705,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
 		mod_timer(&queue->credit_timeout,
 			  next_credit);
 		queue->credit_window_start = next_credit;
+		queue->rate_limited = true;
 
 		return true;
 	}
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index c002384..7b3b6fd 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -2878,7 +2878,7 @@ static const struct intel_ntb_reg skx_reg = {
 	.link_is_up		= xeon_link_is_up,
 	.db_ioread		= skx_db_ioread,
 	.db_iowrite		= skx_db_iowrite,
-	.db_size		= sizeof(u64),
+	.db_size		= sizeof(u32),
 	.ntb_ctl		= SKX_NTBCNTL_OFFSET,
 	.mw_bar			= {2, 4},
 };
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 02ca45f..10e5bf4 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -177,14 +177,12 @@ struct ntb_transport_qp {
 	u64 rx_err_ver;
 	u64 rx_memcpy;
 	u64 rx_async;
-	u64 dma_rx_prep_err;
 	u64 tx_bytes;
 	u64 tx_pkts;
 	u64 tx_ring_full;
 	u64 tx_err_no_buf;
 	u64 tx_memcpy;
 	u64 tx_async;
-	u64 dma_tx_prep_err;
 };
 
 struct ntb_transport_mw {
@@ -254,8 +252,6 @@ enum {
 #define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
 #define NTB_QP_DEF_NUM_ENTRIES	100
 #define NTB_LINK_DOWN_TIMEOUT	10
-#define DMA_RETRIES		20
-#define DMA_OUT_RESOURCE_TO	msecs_to_jiffies(50)
 
 static void ntb_transport_rxc_db(unsigned long data);
 static const struct ntb_ctx_ops ntb_transport_ops;
@@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "free tx - \t%u\n",
 			       ntb_transport_tx_free_entry(qp));
-	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "DMA tx prep err - \t%llu\n",
-			       qp->dma_tx_prep_err);
-	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "DMA rx prep err - \t%llu\n",
-			       qp->dma_rx_prep_err);
 
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "\n");
@@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
 	if (!mw->virt_addr)
 		return -ENOMEM;
 
-	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+	if (mw_num < qp_count % mw_count)
 		num_qps_mw = qp_count / mw_count + 1;
 	else
 		num_qps_mw = qp_count / mw_count;
@@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
 	qp->tx_err_no_buf = 0;
 	qp->tx_memcpy = 0;
 	qp->tx_async = 0;
-	qp->dma_tx_prep_err = 0;
-	qp->dma_rx_prep_err = 0;
 }
 
 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
@@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
 	qp->event_handler = NULL;
 	ntb_qp_link_down_reset(qp);
 
-	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+	if (mw_num < qp_count % mw_count)
 		num_qps_mw = qp_count / mw_count + 1;
 	else
 		num_qps_mw = qp_count / mw_count;
@@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
 	qp_count = ilog2(qp_bitmap);
 	if (max_num_clients && max_num_clients < qp_count)
 		qp_count = max_num_clients;
-	else if (mw_count < qp_count)
-		qp_count = mw_count;
+	else if (nt->mw_count < qp_count)
+		qp_count = nt->mw_count;
 
 	qp_bitmap &= BIT_ULL(qp_count) - 1;
 
@@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
 	struct dmaengine_unmap_data *unmap;
 	dma_cookie_t cookie;
 	void *buf = entry->buf;
-	int retries = 0;
 
 	len = entry->len;
 	device = chan->device;
@@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
 
 	unmap->from_cnt = 1;
 
-	for (retries = 0; retries < DMA_RETRIES; retries++) {
-		txd = device->device_prep_dma_memcpy(chan,
-						     unmap->addr[1],
-						     unmap->addr[0], len,
-						     DMA_PREP_INTERRUPT);
-		if (txd)
-			break;
-
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(DMA_OUT_RESOURCE_TO);
-	}
-
-	if (!txd) {
-		qp->dma_rx_prep_err++;
+	txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+					     unmap->addr[0], len,
+					     DMA_PREP_INTERRUPT);
+	if (!txd)
 		goto err_get_unmap;
-	}
 
 	txd->callback_result = ntb_rx_copy_callback;
 	txd->callback_param = entry;
@@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
 	struct dmaengine_unmap_data *unmap;
 	dma_addr_t dest;
 	dma_cookie_t cookie;
-	int retries = 0;
 
 	device = chan->device;
 	dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
@@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
 
 	unmap->to_cnt = 1;
 
-	for (retries = 0; retries < DMA_RETRIES; retries++) {
-		txd = device->device_prep_dma_memcpy(chan, dest,
-						     unmap->addr[0], len,
-						     DMA_PREP_INTERRUPT);
-		if (txd)
-			break;
-
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(DMA_OUT_RESOURCE_TO);
-	}
-
-	if (!txd) {
-		qp->dma_tx_prep_err++;
+	txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
+					     DMA_PREP_INTERRUPT);
+	if (!txd)
 		goto err_get_unmap;
-	}
 
 	txd->callback_result = ntb_tx_copy_callback;
 	txd->callback_param = entry;
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 434e1d4..5cab283 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -90,11 +90,11 @@ MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
 
 static unsigned int seg_order = 19; /* 512K */
 module_param(seg_order, uint, 0644);
-MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing");
+MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing");
 
 static unsigned int run_order = 32; /* 4G */
 module_param(run_order, uint, 0644);
-MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer");
+MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer");
 
 static bool use_dma; /* default to 0 */
 module_param(use_dma, bool, 0644);
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index 77a48a5..df431e8 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -13,7 +13,6 @@
 #include <linux/nubus.h>
 #include <linux/errno.h>
 #include <linux/init.h>
-#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <asm/setup.h>
@@ -34,14 +33,6 @@ extern void oss_nubus_init(void);
 
 #define NUBUS_TEST_PATTERN 0x5A932BC7
 
-/* Define this if you like to live dangerously - it is known not to
-   work on pretty much every machine except the Quadra 630 and the LC
-   III. */
-#undef I_WANT_TO_PROBE_SLOT_ZERO
-
-/* This sometimes helps combat failure to boot */
-#undef TRY_TO_DODGE_WSOD
-
 /* Globals */
 
 struct nubus_dev *nubus_devices;
@@ -101,9 +92,6 @@ static void nubus_rewind(unsigned char **ptr, int len, int map)
 {
 	unsigned char *p = *ptr;
 
-	/* Sanity check */
-	if (len > 65536)
-		pr_err("rewind of 0x%08x!\n", len);
 	while (len) {
 		do {
 			p--;
@@ -117,8 +105,6 @@ static void nubus_advance(unsigned char **ptr, int len, int map)
 {
 	unsigned char *p = *ptr;
 
-	if (len > 65536)
-		pr_err("advance of 0x%08x!\n", len);
 	while (len) {
 		while (not_useful(p, map))
 			p++;
@@ -130,10 +116,15 @@ static void nubus_advance(unsigned char **ptr, int len, int map)
 
 static void nubus_move(unsigned char **ptr, int len, int map)
 {
+	unsigned long slot_space = (unsigned long)*ptr & 0xFF000000;
+
 	if (len > 0)
 		nubus_advance(ptr, len, map);
 	else if (len < 0)
 		nubus_rewind(ptr, -len, map);
+
+	if (((unsigned long)*ptr & 0xFF000000) != slot_space)
+		pr_err("%s: moved out of slot address space!\n", __func__);
 }
 
 /* Now, functions to read the sResource tree */
@@ -454,10 +445,6 @@ nubus_get_functional_resource(struct nubus_board *board, int slot,
 	pr_info("  Function 0x%02x:\n", parent->type);
 	nubus_get_subdir(parent, &dir);
 
-	/* Apple seems to have botched the ROM on the IIx */
-	if (slot == 0 && (unsigned long)dir.base % 2)
-		dir.base += 1;
-
 	pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
 	         __func__, parent->base, dir.base);
 
@@ -691,83 +678,6 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
 	return 0;
 }
 
-/* Attempt to bypass the somewhat non-obvious arrangement of
-   sResources in the motherboard ROM */
-static void __init nubus_find_rom_dir(struct nubus_board* board)
-{
-	unsigned char *rp;
-	unsigned char *romdir;
-	struct nubus_dir dir;
-	struct nubus_dirent ent;
-
-	/* Check for the extra directory just under the format block */
-	rp = board->fblock;
-	nubus_rewind(&rp, 4, board->lanes);
-	if (nubus_get_rom(&rp, 4, board->lanes) != NUBUS_TEST_PATTERN) {
-		/* OK, the ROM was telling the truth */
-		board->directory = board->fblock;
-		nubus_move(&board->directory,
-			   nubus_expand32(board->doffset),
-			   board->lanes);
-		return;
-	}
-
-	/* On "slot zero", you have to walk down a few more
-	   directories to get to the equivalent of a real card's root
-	   directory.  We don't know what they were smoking when they
-	   came up with this. */
-	romdir = nubus_rom_addr(board->slot);
-	nubus_rewind(&romdir, ROM_DIR_OFFSET, board->lanes);
-	dir.base = dir.ptr = romdir;
-	dir.done = 0;
-	dir.mask = board->lanes;
-
-	/* This one points to an "Unknown Macintosh" directory */
-	if (nubus_readdir(&dir, &ent) == -1)
-		goto badrom;
-
-	if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
-		printk(KERN_INFO "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data);
-	/* This one takes us to where we want to go. */
-	if (nubus_readdir(&dir, &ent) == -1)
-		goto badrom;
-	if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
-		printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data);
-	nubus_get_subdir(&ent, &dir);
-
-	/* Resource ID 01, also an "Unknown Macintosh" */
-	if (nubus_readdir(&dir, &ent) == -1)
-		goto badrom;
-	if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
-		printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data);
-
-	/* FIXME: the first one is *not* always the right one.  We
-	   suspect this has something to do with the ROM revision.
-	   "The HORROR ROM" (LC-series) uses 0x7e, while "The HORROR
-	   Continues" (Q630) uses 0x7b.  The DAFB Macs evidently use
-	   something else.  Please run "Slots" on your Mac (see
-	   include/linux/nubus.h for where to get this program) and
-	   tell us where the 'SiDirPtr' for Slot 0 is.  If you feel
-	   brave, you should also use MacsBug to walk down the ROM
-	   directories like this function does and try to find the
-	   path to that address... */
-	if (nubus_readdir(&dir, &ent) == -1)
-		goto badrom;
-	if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
-		printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data);
-
-	/* Bwahahahaha... */
-	nubus_get_subdir(&ent, &dir);
-	board->directory = dir.base;
-	return;
-
-	/* Even more evil laughter... */
- badrom:
-	board->directory = board->fblock;
-	nubus_move(&board->directory, nubus_expand32(board->doffset), board->lanes);
-	printk(KERN_ERR "nubus_get_rom_dir: ROM weirdness!  Notify the developers...\n");
-}
-
 /* Add a board (might be many devices) to the list */
 static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
 {
@@ -828,8 +738,11 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
 	 * since the initial Macintosh ROM releases skipped the check.
 	 */
 
-	/* Attempt to work around slot zero weirdness */
-	nubus_find_rom_dir(board);
+	/* Set up the directory pointer */
+	board->directory = board->fblock;
+	nubus_move(&board->directory, nubus_expand32(board->doffset),
+	           board->lanes);
+
 	nubus_get_root_dir(board, &dir);
 
 	/* We're ready to rock */
@@ -849,9 +762,6 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
 		nubus_get_board_resource(board, slot, &ent);
 	}
 
-	/* Aaaarrrrgghh!  The LC III motherboard has *two* board
-	   resources.  I have no idea WTF to do about this. */
-
 	while (nubus_readdir(&dir, &ent) != -1) {
 		struct nubus_dev *dev;
 		struct nubus_dev **devp;
@@ -898,8 +808,6 @@ void __init nubus_probe_slot(int slot)
 			continue;
 
 		dp = *rp;
-		if(dp == 0)
-			continue;
 
 		/* The last byte of the format block consists of two
 		   nybbles which are "mirror images" of each other.
@@ -908,7 +816,7 @@ void __init nubus_probe_slot(int slot)
 			continue;
 		/* Check that this value is actually *on* one of the
 		   bytelanes it claims are valid! */
-		if ((dp & 0x0F) >= (1 << i))
+		if (not_useful(rp, dp))
 			continue;
 
 		/* Looks promising.  Let's put it on the list. */
@@ -922,10 +830,6 @@ void __init nubus_scan_bus(void)
 {
 	int slot;
 
-	/* This might not work on your machine */
-#ifdef I_WANT_TO_PROBE_SLOT_ZERO
-	nubus_probe_slot(0);
-#endif
 	for (slot = 9; slot < 15; slot++) {
 		nubus_probe_slot(slot);
 	}
@@ -943,13 +847,6 @@ static int __init nubus_init(void)
 		via_nubus_init();
 	}
 
-#ifdef TRY_TO_DODGE_WSOD
-	/* Rogue Ethernet interrupts can kill the machine if we don't
-	   do this.  Obviously this is bogus.  Hopefully the local VIA
-	   gurus can fix the real cause of the problem. */
-	mdelay(1000);
-#endif
-
 	/* And probe */
 	pr_info("NuBus: Scanning NuBus slots.\n");
 	nubus_devices = NULL;
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 822198a..f12d23c 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -186,7 +186,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
 	 * another kernel subsystem, and we just pass it through.
 	 */
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-		bio->bi_error = -EIO;
+		bio->bi_status = BLK_STS_IOERR;
 		goto out;
 	}
 
@@ -205,7 +205,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
 					"io error in %s sector %lld, len %d,\n",
 					(rw == READ) ? "READ" : "WRITE",
 					(unsigned long long) iter.bi_sector, len);
-			bio->bi_error = err;
+			bio->bi_status = errno_to_blk_status(err);
 			break;
 		}
 	}
@@ -273,7 +273,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
 
 	blk_queue_make_request(q, nd_blk_make_request);
 	blk_queue_max_hw_sectors(q, UINT_MAX);
-	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
 	blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
 	q->queuedata = nsblk;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 983718b..b6ba061 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1210,7 +1210,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
 	 * another kernel subsystem, and we just pass it through.
 	 */
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-		bio->bi_error = -EIO;
+		bio->bi_status = BLK_STS_IOERR;
 		goto out;
 	}
 
@@ -1232,7 +1232,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
 					(op_is_write(bio_op(bio))) ? "WRITE" :
 					"READ",
 					(unsigned long long) iter.bi_sector, len);
-			bio->bi_error = err;
+			bio->bi_status = errno_to_blk_status(err);
 			break;
 		}
 	}
@@ -1297,7 +1297,6 @@ static int btt_blk_init(struct btt *btt)
 	blk_queue_make_request(btt->btt_queue, btt_make_request);
 	blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
 	blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
-	blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
 	btt->btt_queue->queuedata = btt;
 
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index ae00dc0..4c989bb 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -222,13 +222,6 @@ struct device *nd_btt_create(struct nd_region *nd_region)
 	return dev;
 }
 
-static bool uuid_is_null(u8 *uuid)
-{
-	static const u8 null_uuid[16];
-
-	return (memcmp(uuid, null_uuid, 16) == 0);
-}
-
 /**
  * nd_btt_arena_is_valid - check if the metadata layout is valid
  * @nd_btt:	device with BTT geometry and backing device info
@@ -249,7 +242,7 @@ bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
 	if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0)
 		return false;
 
-	if (!uuid_is_null(super->parent_uuid))
+	if (!guid_is_null((guid_t *)&super->parent_uuid))
 		if (memcmp(super->parent_uuid, parent_uuid, 16) != 0)
 			return false;
 
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index c544d46..6b577af 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -49,19 +49,19 @@ static struct nd_region *to_region(struct pmem_device *pmem)
 	return to_nd_region(to_dev(pmem)->parent);
 }
 
-static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
-		unsigned int len)
+static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
+		phys_addr_t offset, unsigned int len)
 {
 	struct device *dev = to_dev(pmem);
 	sector_t sector;
 	long cleared;
-	int rc = 0;
+	blk_status_t rc = BLK_STS_OK;
 
 	sector = (offset - pmem->data_offset) / 512;
 
 	cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
 	if (cleared < len)
-		rc = -EIO;
+		rc = BLK_STS_IOERR;
 	if (cleared > 0 && cleared / 512) {
 		cleared /= 512;
 		dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
@@ -84,7 +84,7 @@ static void write_pmem(void *pmem_addr, struct page *page,
 	kunmap_atomic(mem);
 }
 
-static int read_pmem(struct page *page, unsigned int off,
+static blk_status_t read_pmem(struct page *page, unsigned int off,
 		void *pmem_addr, unsigned int len)
 {
 	int rc;
@@ -93,15 +93,15 @@ static int read_pmem(struct page *page, unsigned int off,
 	rc = memcpy_mcsafe(mem + off, pmem_addr, len);
 	kunmap_atomic(mem);
 	if (rc)
-		return -EIO;
-	return 0;
+		return BLK_STS_IOERR;
+	return BLK_STS_OK;
 }
 
-static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
+static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 			unsigned int len, unsigned int off, bool is_write,
 			sector_t sector)
 {
-	int rc = 0;
+	blk_status_t rc = BLK_STS_OK;
 	bool bad_pmem = false;
 	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
 	void *pmem_addr = pmem->virt_addr + pmem_off;
@@ -111,7 +111,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 
 	if (!is_write) {
 		if (unlikely(bad_pmem))
-			rc = -EIO;
+			rc = BLK_STS_IOERR;
 		else {
 			rc = read_pmem(page, off, pmem_addr, len);
 			flush_dcache_page(page);
@@ -149,7 +149,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 
 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
 {
-	int rc = 0;
+	blk_status_t rc = 0;
 	bool do_acct;
 	unsigned long start;
 	struct bio_vec bvec;
@@ -166,7 +166,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
 				bvec.bv_offset, op_is_write(bio_op(bio)),
 				iter.bi_sector);
 		if (rc) {
-			bio->bi_error = rc;
+			bio->bi_status = rc;
 			break;
 		}
 	}
@@ -184,7 +184,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
 		       struct page *page, bool is_write)
 {
 	struct pmem_device *pmem = bdev->bd_queue->queuedata;
-	int rc;
+	blk_status_t rc;
 
 	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
 
@@ -197,7 +197,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
 	if (rc == 0)
 		page_endio(page, is_write, 0);
 
-	return rc;
+	return blk_status_to_errno(rc);
 }
 
 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
@@ -343,7 +343,6 @@ static int pmem_attach_disk(struct device *dev,
 	blk_queue_make_request(q, pmem_make_request);
 	blk_queue_physical_block_size(q, PAGE_SIZE);
 	blk_queue_max_hw_sectors(q, UINT_MAX);
-	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
 	queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
 	q->queuedata = pmem;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 90745a6..46d6cb1 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -13,18 +13,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called nvme.
 
-config BLK_DEV_NVME_SCSI
-	bool "SCSI emulation for NVMe device nodes"
-	depends on NVME_CORE
-	---help---
-	  This adds support for the SG_IO ioctl on the NVMe character
-	  and block devices nodes, as well as a translation for a small
-	  number of selected SCSI commands to NVMe commands to the NVMe
-	  driver.  If you don't know what this means you probably want
-	  to say N here, unless you run a distro that abuses the SCSI
-	  emulation to provide stable device names for mount by id, like
-	  some OpenSuSE and SLES versions.
-
 config NVME_FABRICS
 	tristate
 
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index f1a7d94..cc0aacb 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -5,7 +5,6 @@
 obj-$(CONFIG_NVME_FC)			+= nvme-fc.o
 
 nvme-core-y				:= core.o
-nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI)	+= scsi.o
 nvme-core-$(CONFIG_NVM)			+= lightnvm.o
 
 nvme-y					+= pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a609264..d70df1d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -27,7 +27,6 @@
 #include <linux/nvme_ioctl.h>
 #include <linux/t10-pi.h>
 #include <linux/pm_qos.h>
-#include <scsi/sg.h>
 #include <asm/unaligned.h>
 
 #include "nvme.h"
@@ -45,7 +44,7 @@ module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 EXPORT_SYMBOL_GPL(nvme_io_timeout);
 
-unsigned char shutdown_timeout = 5;
+static unsigned char shutdown_timeout = 5;
 module_param(shutdown_timeout, byte, 0644);
 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
 
@@ -56,7 +55,7 @@ MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
 static int nvme_char_major;
 module_param(nvme_char_major, int, 0);
 
-static unsigned long default_ps_max_latency_us = 25000;
+static unsigned long default_ps_max_latency_us = 100000;
 module_param(default_ps_max_latency_us, ulong, 0644);
 MODULE_PARM_DESC(default_ps_max_latency_us,
 		 "max power saving latency for new devices; use PM QOS to change per device");
@@ -65,34 +64,53 @@ static bool force_apst;
 module_param(force_apst, bool, 0644);
 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
 
+static bool streams;
+module_param(streams, bool, 0644);
+MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
+
+struct workqueue_struct *nvme_wq;
+EXPORT_SYMBOL_GPL(nvme_wq);
+
 static LIST_HEAD(nvme_ctrl_list);
 static DEFINE_SPINLOCK(dev_list_lock);
 
 static struct class *nvme_class;
 
-static int nvme_error_status(struct request *req)
+int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
+{
+	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+		return -EBUSY;
+	if (!queue_work(nvme_wq, &ctrl->reset_work))
+		return -EBUSY;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
+
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+{
+	int ret;
+
+	ret = nvme_reset_ctrl(ctrl);
+	if (!ret)
+		flush_work(&ctrl->reset_work);
+	return ret;
+}
+
+static blk_status_t nvme_error_status(struct request *req)
 {
 	switch (nvme_req(req)->status & 0x7ff) {
 	case NVME_SC_SUCCESS:
-		return 0;
+		return BLK_STS_OK;
 	case NVME_SC_CAP_EXCEEDED:
-		return -ENOSPC;
-	default:
-		return -EIO;
-
-	/*
-	 * XXX: these errors are a nasty side-band protocol to
-	 * drivers/md/dm-mpath.c:noretry_error() that aren't documented
-	 * anywhere..
-	 */
-	case NVME_SC_CMD_SEQ_ERROR:
-		return -EILSEQ;
+		return BLK_STS_NOSPC;
 	case NVME_SC_ONCS_NOT_SUPPORTED:
-		return -EOPNOTSUPP;
+		return BLK_STS_NOTSUPP;
 	case NVME_SC_WRITE_FAULT:
 	case NVME_SC_READ_ERROR:
 	case NVME_SC_UNWRITTEN_BLOCK:
-		return -ENODATA;
+		return BLK_STS_MEDIUM;
+	default:
+		return BLK_STS_IOERR;
 	}
 }
 
@@ -165,7 +183,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
 		switch (old_state) {
 		case NVME_CTRL_NEW:
 		case NVME_CTRL_LIVE:
-		case NVME_CTRL_RECONNECTING:
 			changed = true;
 			/* FALLTHRU */
 		default:
@@ -283,6 +300,105 @@ struct request *nvme_alloc_request(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(nvme_alloc_request);
 
+static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
+{
+	struct nvme_command c;
+
+	memset(&c, 0, sizeof(c));
+
+	c.directive.opcode = nvme_admin_directive_send;
+	c.directive.nsid = cpu_to_le32(0xffffffff);
+	c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
+	c.directive.dtype = NVME_DIR_IDENTIFY;
+	c.directive.tdtype = NVME_DIR_STREAMS;
+	c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
+
+	return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
+}
+
+static int nvme_disable_streams(struct nvme_ctrl *ctrl)
+{
+	return nvme_toggle_streams(ctrl, false);
+}
+
+static int nvme_enable_streams(struct nvme_ctrl *ctrl)
+{
+	return nvme_toggle_streams(ctrl, true);
+}
+
+static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
+				  struct streams_directive_params *s, u32 nsid)
+{
+	struct nvme_command c;
+
+	memset(&c, 0, sizeof(c));
+	memset(s, 0, sizeof(*s));
+
+	c.directive.opcode = nvme_admin_directive_recv;
+	c.directive.nsid = cpu_to_le32(nsid);
+	c.directive.numd = sizeof(*s);
+	c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
+	c.directive.dtype = NVME_DIR_STREAMS;
+
+	return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
+}
+
+static int nvme_configure_directives(struct nvme_ctrl *ctrl)
+{
+	struct streams_directive_params s;
+	int ret;
+
+	if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
+		return 0;
+	if (!streams)
+		return 0;
+
+	ret = nvme_enable_streams(ctrl);
+	if (ret)
+		return ret;
+
+	ret = nvme_get_stream_params(ctrl, &s, 0xffffffff);
+	if (ret)
+		return ret;
+
+	ctrl->nssa = le16_to_cpu(s.nssa);
+	if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
+		dev_info(ctrl->device, "too few streams (%u) available\n",
+					ctrl->nssa);
+		nvme_disable_streams(ctrl);
+		return 0;
+	}
+
+	ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
+	dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
+	return 0;
+}
+
+/*
+ * Check if 'req' has a write hint associated with it. If it does, assign
+ * a valid namespace stream to the write.
+ */
+static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
+				     struct request *req, u16 *control,
+				     u32 *dsmgmt)
+{
+	enum rw_hint streamid = req->write_hint;
+
+	if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
+		streamid = 0;
+	else {
+		streamid--;
+		if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
+			return;
+
+		*control |= NVME_RW_DTYPE_STREAMS;
+		*dsmgmt |= streamid << 16;
+	}
+
+	if (streamid < ARRAY_SIZE(req->q->write_hints))
+		req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
+}
+
 static inline void nvme_setup_flush(struct nvme_ns *ns,
 		struct nvme_command *cmnd)
 {
@@ -291,7 +407,7 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
 	cmnd->common.nsid = cpu_to_le32(ns->ns_id);
 }
 
-static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmnd)
 {
 	unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
@@ -300,7 +416,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 
 	range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
 	if (!range)
-		return BLK_MQ_RQ_QUEUE_BUSY;
+		return BLK_STS_RESOURCE;
 
 	__rq_for_each_bio(bio, req) {
 		u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
@@ -314,7 +430,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 
 	if (WARN_ON_ONCE(n != segments)) {
 		kfree(range);
-		return BLK_MQ_RQ_QUEUE_ERROR;
+		return BLK_STS_IOERR;
 	}
 
 	memset(cmnd, 0, sizeof(*cmnd));
@@ -328,15 +444,26 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 	req->special_vec.bv_len = sizeof(*range) * segments;
 	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
 
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 }
 
-static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
-		struct nvme_command *cmnd)
+static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
+		struct request *req, struct nvme_command *cmnd)
 {
+	struct nvme_ctrl *ctrl = ns->ctrl;
 	u16 control = 0;
 	u32 dsmgmt = 0;
 
+	/*
+	 * If formated with metadata, require the block layer provide a buffer
+	 * unless this namespace is formated such that the metadata can be
+	 * stripped/generated by the controller with PRACT=1.
+	 */
+	if (ns && ns->ms &&
+	    (!ns->pi_type || ns->ms != sizeof(struct t10_pi_tuple)) &&
+	    !blk_integrity_rq(req) && !blk_rq_is_passthrough(req))
+		return BLK_STS_NOTSUPP;
+
 	if (req->cmd_flags & REQ_FUA)
 		control |= NVME_RW_FUA;
 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
@@ -351,6 +478,9 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
 	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
 
+	if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
+		nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
+
 	if (ns->ms) {
 		switch (ns->pi_type) {
 		case NVME_NS_DPS_PI_TYPE3:
@@ -370,12 +500,13 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
 
 	cmnd->rw.control = cpu_to_le16(control);
 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+	return 0;
 }
 
-int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmd)
 {
-	int ret = BLK_MQ_RQ_QUEUE_OK;
+	blk_status_t ret = BLK_STS_OK;
 
 	if (!(req->rq_flags & RQF_DONTPREP)) {
 		nvme_req(req)->retries = 0;
@@ -398,11 +529,11 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		break;
 	case REQ_OP_READ:
 	case REQ_OP_WRITE:
-		nvme_setup_rw(ns, req, cmd);
+		ret = nvme_setup_rw(ns, req, cmd);
 		break;
 	default:
 		WARN_ON_ONCE(1);
-		return BLK_MQ_RQ_QUEUE_ERROR;
+		return BLK_STS_IOERR;
 	}
 
 	cmd->common.command_id = req->tag;
@@ -555,15 +686,16 @@ int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
 			result, timeout);
 }
 
-static void nvme_keep_alive_end_io(struct request *rq, int error)
+static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
 {
 	struct nvme_ctrl *ctrl = rq->end_io_data;
 
 	blk_mq_free_request(rq);
 
-	if (error) {
+	if (status) {
 		dev_err(ctrl->device,
-			"failed nvme_keep_alive_end_io error=%d\n", error);
+			"failed nvme_keep_alive_end_io error=%d\n",
+				status);
 		return;
 	}
 
@@ -599,7 +731,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
 	if (nvme_keep_alive(ctrl)) {
 		/* allocation failure, reset the controller */
 		dev_err(ctrl->device, "keep-alive failed\n");
-		ctrl->ops->reset_ctrl(ctrl);
+		nvme_reset_ctrl(ctrl);
 		return;
 	}
 }
@@ -623,7 +755,7 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
 
-int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
+static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
 {
 	struct nvme_command c = { };
 	int error;
@@ -643,6 +775,77 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
 	return error;
 }
 
+static int nvme_identify_ns_descs(struct nvme_ns *ns, unsigned nsid)
+{
+	struct nvme_command c = { };
+	int status;
+	void *data;
+	int pos;
+	int len;
+
+	c.identify.opcode = nvme_admin_identify;
+	c.identify.nsid = cpu_to_le32(nsid);
+	c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
+
+	data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, data,
+				      NVME_IDENTIFY_DATA_SIZE);
+	if (status)
+		goto free_data;
+
+	for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+		struct nvme_ns_id_desc *cur = data + pos;
+
+		if (cur->nidl == 0)
+			break;
+
+		switch (cur->nidt) {
+		case NVME_NIDT_EUI64:
+			if (cur->nidl != NVME_NIDT_EUI64_LEN) {
+				dev_warn(ns->ctrl->device,
+					 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
+					 cur->nidl);
+				goto free_data;
+			}
+			len = NVME_NIDT_EUI64_LEN;
+			memcpy(ns->eui, data + pos + sizeof(*cur), len);
+			break;
+		case NVME_NIDT_NGUID:
+			if (cur->nidl != NVME_NIDT_NGUID_LEN) {
+				dev_warn(ns->ctrl->device,
+					 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
+					 cur->nidl);
+				goto free_data;
+			}
+			len = NVME_NIDT_NGUID_LEN;
+			memcpy(ns->nguid, data + pos + sizeof(*cur), len);
+			break;
+		case NVME_NIDT_UUID:
+			if (cur->nidl != NVME_NIDT_UUID_LEN) {
+				dev_warn(ns->ctrl->device,
+					 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
+					 cur->nidl);
+				goto free_data;
+			}
+			len = NVME_NIDT_UUID_LEN;
+			uuid_copy(&ns->uuid, data + pos + sizeof(*cur));
+			break;
+		default:
+			/* Skip unnkown types */
+			len = cur->nidl;
+			break;
+		}
+
+		len += sizeof(*cur);
+	}
+free_data:
+	kfree(data);
+	return status;
+}
+
 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
 {
 	struct nvme_command c = { };
@@ -653,7 +856,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
 	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
 }
 
-int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
+static int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
 		struct nvme_id_ns **id)
 {
 	struct nvme_command c = { };
@@ -675,26 +878,7 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
 	return error;
 }
 
-int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
-		      void *buffer, size_t buflen, u32 *result)
-{
-	struct nvme_command c;
-	union nvme_result res;
-	int ret;
-
-	memset(&c, 0, sizeof(c));
-	c.features.opcode = nvme_admin_get_features;
-	c.features.nsid = cpu_to_le32(nsid);
-	c.features.fid = cpu_to_le32(fid);
-
-	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0,
-			NVME_QID_ANY, 0, 0);
-	if (ret >= 0 && result)
-		*result = le32_to_cpu(res.u32);
-	return ret;
-}
-
-int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
+static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
 		      void *buffer, size_t buflen, u32 *result)
 {
 	struct nvme_command c;
@@ -713,28 +897,6 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
 	return ret;
 }
 
-int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
-{
-	struct nvme_command c = { };
-	int error;
-
-	c.common.opcode = nvme_admin_get_log_page,
-	c.common.nsid = cpu_to_le32(0xFFFFFFFF),
-	c.common.cdw10[0] = cpu_to_le32(
-			(((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
-			 NVME_LOG_SMART),
-
-	*log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
-	if (!*log)
-		return -ENOMEM;
-
-	error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
-			sizeof(struct nvme_smart_log));
-	if (error)
-		kfree(*log);
-	return error;
-}
-
 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
 {
 	u32 q_count = (*count - 1) | ((*count - 1) << 16);
@@ -752,7 +914,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
 	 * access to the admin queue, as that might be only way to fix them up.
 	 */
 	if (status > 0) {
-		dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
+		dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
 		*count = 0;
 	} else {
 		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
@@ -870,12 +1032,6 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
 		return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
 	case NVME_IOCTL_SUBMIT_IO:
 		return nvme_submit_io(ns, (void __user *)arg);
-#ifdef CONFIG_BLK_DEV_NVME_SCSI
-	case SG_GET_VERSION_NUM:
-		return nvme_sg_get_version_num((void __user *)arg);
-	case SG_IO:
-		return nvme_sg_io(ns, (void __user *)arg);
-#endif
 	default:
 #ifdef CONFIG_NVM
 		if (ns->ndev)
@@ -892,10 +1048,6 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
 			unsigned int cmd, unsigned long arg)
 {
-	switch (cmd) {
-	case SG_IO:
-		return -ENOIOCTLCMD;
-	}
 	return nvme_ioctl(bdev, mode, cmd, arg);
 }
 #else
@@ -983,6 +1135,12 @@ static void nvme_init_integrity(struct nvme_ns *ns)
 }
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
+static void nvme_set_chunk_size(struct nvme_ns *ns)
+{
+	u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
+	blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
+}
+
 static void nvme_config_discard(struct nvme_ns *ns)
 {
 	struct nvme_ctrl *ctrl = ns->ctrl;
@@ -991,8 +1149,15 @@ static void nvme_config_discard(struct nvme_ns *ns)
 	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
 			NVME_DSM_MAX_RANGES);
 
-	ns->queue->limits.discard_alignment = logical_block_size;
-	ns->queue->limits.discard_granularity = logical_block_size;
+	if (ctrl->nr_streams && ns->sws && ns->sgs) {
+		unsigned int sz = logical_block_size * ns->sws * ns->sgs;
+
+		ns->queue->limits.discard_alignment = sz;
+		ns->queue->limits.discard_granularity = sz;
+	} else {
+		ns->queue->limits.discard_alignment = logical_block_size;
+		ns->queue->limits.discard_granularity = logical_block_size;
+	}
 	blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
 	blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
@@ -1016,7 +1181,15 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
 	if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
 		memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
 	if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
-		memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
+		memcpy(ns->nguid, (*id)->nguid, sizeof(ns->nguid));
+	if (ns->ctrl->vs >= NVME_VS(1, 3, 0)) {
+		 /* Don't treat error as fatal we potentially
+		  * already have a NGUID or EUI-64
+		  */
+		if (nvme_identify_ns_descs(ns, ns->ns_id))
+			dev_warn(ns->ctrl->device,
+				 "%s: Identify Descriptors failed\n", __func__);
+	}
 
 	return 0;
 }
@@ -1024,6 +1197,7 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
 {
 	struct nvme_ns *ns = disk->private_data;
+	struct nvme_ctrl *ctrl = ns->ctrl;
 	u16 bs;
 
 	/*
@@ -1034,12 +1208,15 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
 	if (ns->lba_shift == 0)
 		ns->lba_shift = 9;
 	bs = 1 << ns->lba_shift;
+	ns->noiob = le16_to_cpu(id->noiob);
 
 	blk_mq_freeze_queue(disk->queue);
 
-	if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
+	if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
 		nvme_prep_integrity(disk, id, bs);
 	blk_queue_logical_block_size(ns->queue, bs);
+	if (ns->noiob)
+		nvme_set_chunk_size(ns);
 	if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
 		nvme_init_integrity(ns);
 	if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
@@ -1047,7 +1224,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
 	else
 		set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
 
-	if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
+	if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
 		nvme_config_discard(ns);
 	blk_mq_unfreeze_queue(disk->queue);
 }
@@ -1283,7 +1460,7 @@ EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
 
 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
 {
-	unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies;
+	unsigned long timeout = jiffies + (shutdown_timeout * HZ);
 	u32 csts;
 	int ret;
 
@@ -1342,7 +1519,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
 	 * transitioning between power states.  Therefore, when running
 	 * in any given state, we will enter the next lower-power
 	 * non-operational state after waiting 50 * (enlat + exlat)
-	 * microseconds, as long as that state's total latency is under
+	 * microseconds, as long as that state's exit latency is under
 	 * the requested maximum latency.
 	 *
 	 * We will not autonomously enter any non-operational state for
@@ -1372,7 +1549,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
 	if (!table)
 		return;
 
-	if (ctrl->ps_max_latency_us == 0) {
+	if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
 		/* Turn off APST. */
 		apste = 0;
 		dev_dbg(ctrl->device, "APST disabled\n");
@@ -1387,7 +1564,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
 		 * lowest-power state, not the number of states.
 		 */
 		for (state = (int)ctrl->npss; state >= 0; state--) {
-			u64 total_latency_us, transition_ms;
+			u64 total_latency_us, exit_latency_us, transition_ms;
 
 			if (target)
 				table->entries[state] = target;
@@ -1408,12 +1585,15 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
 			      NVME_PS_FLAGS_NON_OP_STATE))
 				continue;
 
-			total_latency_us =
-				(u64)le32_to_cpu(ctrl->psd[state].entry_lat) +
-				+ le32_to_cpu(ctrl->psd[state].exit_lat);
-			if (total_latency_us > ctrl->ps_max_latency_us)
+			exit_latency_us =
+				(u64)le32_to_cpu(ctrl->psd[state].exit_lat);
+			if (exit_latency_us > ctrl->ps_max_latency_us)
 				continue;
 
+			total_latency_us =
+				exit_latency_us +
+				le32_to_cpu(ctrl->psd[state].entry_lat);
+
 			/*
 			 * This state is good.  Use it as the APST idle
 			 * target for higher power states.
@@ -1525,6 +1705,31 @@ static bool quirk_matches(const struct nvme_id_ctrl *id,
 		string_matches(id->fr, q->fr, sizeof(id->fr));
 }
 
+static void nvme_init_subnqn(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+	size_t nqnlen;
+	int off;
+
+	nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
+	if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
+		strcpy(ctrl->subnqn, id->subnqn);
+		return;
+	}
+
+	if (ctrl->vs >= NVME_VS(1, 2, 1))
+		dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
+
+	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
+	off = snprintf(ctrl->subnqn, NVMF_NQN_SIZE,
+			"nqn.2014.08.org.nvmexpress:%4x%4x",
+			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
+	memcpy(ctrl->subnqn + off, id->sn, sizeof(id->sn));
+	off += sizeof(id->sn);
+	memcpy(ctrl->subnqn + off, id->mn, sizeof(id->mn));
+	off += sizeof(id->mn);
+	memset(ctrl->subnqn + off, 0, sizeof(ctrl->subnqn) - off);
+}
+
 /*
  * Initialize the cached copies of the Identify data and various controller
  * register in our nvme_ctrl structure.  This should be called as soon as
@@ -1536,7 +1741,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
 	u64 cap;
 	int ret, page_shift;
 	u32 max_hw_sectors;
-	u8 prev_apsta;
+	bool prev_apst_enabled;
 
 	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
 	if (ret) {
@@ -1560,6 +1765,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
 		return -EIO;
 	}
 
+	nvme_init_subnqn(ctrl, id);
+
 	if (!ctrl->identified) {
 		/*
 		 * Check for quirks.  Quirk can depend on firmware version,
@@ -1579,7 +1786,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
 	}
 
 	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
-		dev_warn(ctrl->dev, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
+		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
 		ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
 	}
 
@@ -1604,16 +1811,17 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
 	ctrl->kas = le16_to_cpu(id->kas);
 
 	ctrl->npss = id->npss;
-	prev_apsta = ctrl->apsta;
+	ctrl->apsta = id->apsta;
+	prev_apst_enabled = ctrl->apst_enabled;
 	if (ctrl->quirks & NVME_QUIRK_NO_APST) {
 		if (force_apst && id->apsta) {
-			dev_warn(ctrl->dev, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
-			ctrl->apsta = 1;
+			dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
+			ctrl->apst_enabled = true;
 		} else {
-			ctrl->apsta = 0;
+			ctrl->apst_enabled = false;
 		}
 	} else {
-		ctrl->apsta = id->apsta;
+		ctrl->apst_enabled = id->apsta;
 	}
 	memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
 
@@ -1631,22 +1839,25 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
 			ret = -EINVAL;
 
 		if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
-			dev_err(ctrl->dev,
+			dev_err(ctrl->device,
 				"keep-alive support is mandatory for fabrics\n");
 			ret = -EINVAL;
 		}
 	} else {
 		ctrl->cntlid = le16_to_cpu(id->cntlid);
+		ctrl->hmpre = le32_to_cpu(id->hmpre);
+		ctrl->hmmin = le32_to_cpu(id->hmmin);
 	}
 
 	kfree(id);
 
-	if (ctrl->apsta && !prev_apsta)
+	if (ctrl->apst_enabled && !prev_apst_enabled)
 		dev_pm_qos_expose_latency_tolerance(ctrl->device);
-	else if (!ctrl->apsta && prev_apsta)
+	else if (!ctrl->apst_enabled && prev_apst_enabled)
 		dev_pm_qos_hide_latency_tolerance(ctrl->device);
 
 	nvme_configure_apst(ctrl);
+	nvme_configure_directives(ctrl);
 
 	ctrl->identified = true;
 
@@ -1732,7 +1943,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
 		return nvme_dev_user_cmd(ctrl, argp);
 	case NVME_IOCTL_RESET:
 		dev_warn(ctrl->device, "resetting controller\n");
-		return ctrl->ops->reset_ctrl(ctrl);
+		return nvme_reset_ctrl_sync(ctrl);
 	case NVME_IOCTL_SUBSYS_RESET:
 		return nvme_reset_subsystem(ctrl);
 	case NVME_IOCTL_RESCAN:
@@ -1758,7 +1969,7 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
 	int ret;
 
-	ret = ctrl->ops->reset_ctrl(ctrl);
+	ret = nvme_reset_ctrl_sync(ctrl);
 	if (ret < 0)
 		return ret;
 	return count;
@@ -1784,8 +1995,8 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
 	int serial_len = sizeof(ctrl->serial);
 	int model_len = sizeof(ctrl->model);
 
-	if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
-		return sprintf(buf, "eui.%16phN\n", ns->uuid);
+	if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+		return sprintf(buf, "eui.%16phN\n", ns->nguid);
 
 	if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
 		return sprintf(buf, "eui.%8phN\n", ns->eui);
@@ -1800,11 +2011,28 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
 
+static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+	return sprintf(buf, "%pU\n", ns->nguid);
+}
+static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);
+
 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
 								char *buf)
 {
 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-	return sprintf(buf, "%pU\n", ns->uuid);
+
+	/* For backward compatibility expose the NGUID to userspace if
+	 * we have no UUID set
+	 */
+	if (uuid_is_null(&ns->uuid)) {
+		printk_ratelimited(KERN_WARNING
+				   "No UUID available providing old NGUID\n");
+		return sprintf(buf, "%pU\n", ns->nguid);
+	}
+	return sprintf(buf, "%pU\n", &ns->uuid);
 }
 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
 
@@ -1827,6 +2055,7 @@ static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
 static struct attribute *nvme_ns_attrs[] = {
 	&dev_attr_wwid.attr,
 	&dev_attr_uuid.attr,
+	&dev_attr_nguid.attr,
 	&dev_attr_eui.attr,
 	&dev_attr_nsid.attr,
 	NULL,
@@ -1839,7 +2068,12 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 
 	if (a == &dev_attr_uuid.attr) {
-		if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
+		if (uuid_is_null(&ns->uuid) ||
+		    !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+			return 0;
+	}
+	if (a == &dev_attr_nguid.attr) {
+		if (!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
 			return 0;
 	}
 	if (a == &dev_attr_eui.attr) {
@@ -1928,8 +2162,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
 {
 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
 
-	return snprintf(buf, PAGE_SIZE, "%s\n",
-			ctrl->ops->get_subsysnqn(ctrl));
+	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subnqn);
 }
 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
 
@@ -1958,24 +2191,16 @@ static struct attribute *nvme_dev_attrs[] = {
 	NULL
 };
 
-#define CHECK_ATTR(ctrl, a, name)		\
-	if ((a) == &dev_attr_##name.attr &&	\
-	    !(ctrl)->ops->get_##name)		\
-		return 0
-
 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
 		struct attribute *a, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
 
-	if (a == &dev_attr_delete_controller.attr) {
-		if (!ctrl->ops->delete_ctrl)
-			return 0;
-	}
-
-	CHECK_ATTR(ctrl, a, subsysnqn);
-	CHECK_ATTR(ctrl, a, address);
+	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
+		return 0;
+	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
+		return 0;
 
 	return a->mode;
 }
@@ -2016,6 +2241,32 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 	return ret;
 }
 
+static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
+{
+	struct streams_directive_params s;
+	int ret;
+
+	if (!ctrl->nr_streams)
+		return 0;
+
+	ret = nvme_get_stream_params(ctrl, &s, ns->ns_id);
+	if (ret)
+		return ret;
+
+	ns->sws = le32_to_cpu(s.sws);
+	ns->sgs = le16_to_cpu(s.sgs);
+
+	if (ns->sws) {
+		unsigned int bs = 1 << ns->lba_shift;
+
+		blk_queue_io_min(ns->queue, bs * ns->sws);
+		if (ns->sgs)
+			blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
+	}
+
+	return 0;
+}
+
 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
 	struct nvme_ns *ns;
@@ -2045,6 +2296,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
 	nvme_set_queue_limits(ctrl, ns->queue);
+	nvme_setup_streams_ns(ctrl, ns);
 
 	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
 
@@ -2053,7 +2305,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 	if (nvme_nvm_ns_supported(ns, id) &&
 				nvme_nvm_register(ns, disk_name, node)) {
-		dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__);
+		dev_warn(ctrl->device, "%s: LightNVM init failure\n", __func__);
 		goto out_free_id;
 	}
 
@@ -2228,7 +2480,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
 	 * removal.
 	 */
 	if (ctrl->state == NVME_CTRL_LIVE)
-		schedule_work(&ctrl->scan_work);
+		queue_work(nvme_wq, &ctrl->scan_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_scan);
 
@@ -2283,7 +2535,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
 		/*FALLTHRU*/
 	case NVME_SC_ABORT_REQ:
 		++ctrl->event_limit;
-		schedule_work(&ctrl->async_event_work);
+		queue_work(nvme_wq, &ctrl->async_event_work);
 		break;
 	default:
 		break;
@@ -2306,7 +2558,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 void nvme_queue_async_events(struct nvme_ctrl *ctrl)
 {
 	ctrl->event_limit = NVME_NR_AERS;
-	schedule_work(&ctrl->async_event_work);
+	queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_async_events);
 
@@ -2438,6 +2690,13 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
 	struct nvme_ns *ns;
 
 	mutex_lock(&ctrl->namespaces_mutex);
+
+	/* Forcibly unquiesce queues to avoid blocking dispatch */
+	blk_mq_unquiesce_queue(ctrl->admin_q);
+
+	/* Forcibly start all queues to avoid having stuck requests */
+	blk_mq_start_hw_queues(ctrl->admin_q);
+
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
 		/*
 		 * Revalidating a dead namespace sets capacity to 0. This will
@@ -2448,6 +2707,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
 		revalidate_disk(ns->disk);
 		blk_set_queue_dying(ns->queue);
 
+		/* Forcibly unquiesce queues to avoid blocking dispatch */
+		blk_mq_unquiesce_queue(ns->queue);
+
 		/*
 		 * Forcibly start all queues to avoid having stuck requests.
 		 * Note that we must ensure the queues are not stopped
@@ -2526,7 +2788,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 
 	mutex_lock(&ctrl->namespaces_mutex);
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
-		blk_mq_start_stopped_hw_queues(ns->queue, true);
+		blk_mq_unquiesce_queue(ns->queue);
 		blk_mq_kick_requeue_list(ns->queue);
 	}
 	mutex_unlock(&ctrl->namespaces_mutex);
@@ -2537,10 +2799,15 @@ int __init nvme_core_init(void)
 {
 	int result;
 
+	nvme_wq = alloc_workqueue("nvme-wq",
+			WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+	if (!nvme_wq)
+		return -ENOMEM;
+
 	result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
 							&nvme_dev_fops);
 	if (result < 0)
-		return result;
+		goto destroy_wq;
 	else if (result > 0)
 		nvme_char_major = result;
 
@@ -2552,8 +2819,10 @@ int __init nvme_core_init(void)
 
 	return 0;
 
- unregister_chrdev:
+unregister_chrdev:
 	__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+destroy_wq:
+	destroy_workqueue(nvme_wq);
 	return result;
 }
 
@@ -2561,6 +2830,7 @@ void nvme_core_exit(void)
 {
 	class_destroy(nvme_class);
 	__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+	destroy_workqueue(nvme_wq);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 990e6fb..2e582a2 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -58,7 +58,6 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
 
 	kref_init(&host->ref);
 	memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
-	uuid_be_gen(&host->id);
 
 	list_add_tail(&host->list, &nvmf_hosts);
 out_unlock:
@@ -75,7 +74,6 @@ static struct nvmf_host *nvmf_host_default(void)
 		return NULL;
 
 	kref_init(&host->ref);
-	uuid_be_gen(&host->id);
 	snprintf(host->nqn, NVMF_NQN_SIZE,
 		"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
 
@@ -128,16 +126,6 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
 EXPORT_SYMBOL_GPL(nvmf_get_address);
 
 /**
- * nvmf_get_subsysnqn() - Get subsystem NQN
- * @ctrl:	Host NVMe controller instance which we got the NQN
- */
-const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl)
-{
-	return ctrl->opts->subsysnqn;
-}
-EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
-
-/**
  * nvmf_reg_read32() -  NVMe Fabrics "Property Get" API function.
  * @ctrl:	Host NVMe controller instance maintaining the admin
  *		queue used to submit the property read command to
@@ -337,6 +325,24 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
 			}
 		}
 		break;
+
+	case NVME_SC_CONNECT_INVALID_HOST:
+		dev_err(ctrl->device,
+			"Connect for subsystem %s is not allowed, hostnqn: %s\n",
+			data->subsysnqn, data->hostnqn);
+		break;
+
+	case NVME_SC_CONNECT_CTRL_BUSY:
+		dev_err(ctrl->device,
+			"Connect command failed: controller is busy or not available\n");
+		break;
+
+	case NVME_SC_CONNECT_FORMAT:
+		dev_err(ctrl->device,
+			"Connect incompatible format: %d",
+			cmd->connect.recfmt);
+		break;
+
 	default:
 		dev_err(ctrl->device,
 			"Connect command failed, error wo/DNR bit: %d\n",
@@ -376,13 +382,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	cmd.connect.opcode = nvme_fabrics_command;
 	cmd.connect.fctype = nvme_fabrics_type_connect;
 	cmd.connect.qid = 0;
-
-	/*
-	 * fabrics spec sets a minimum of depth 32 for admin queue,
-	 * so set the queue with this depth always until
-	 * justification otherwise.
-	 */
-	cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+	cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
 
 	/*
 	 * Set keep-alive timeout in seconds granularity (ms * 1000)
@@ -395,7 +395,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	if (!data)
 		return -ENOMEM;
 
-	memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+	uuid_copy(&data->hostid, &ctrl->opts->host->id);
 	data->cntlid = cpu_to_le16(0xffff);
 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -454,7 +454,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 	if (!data)
 		return -ENOMEM;
 
-	memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+	uuid_copy(&data->hostid, &ctrl->opts->host->id);
 	data->cntlid = cpu_to_le16(ctrl->cntlid);
 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -474,7 +474,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
 {
 	if (ctrl->opts->max_reconnects != -1 &&
-	    ctrl->opts->nr_reconnects < ctrl->opts->max_reconnects)
+	    ctrl->nr_reconnects < ctrl->opts->max_reconnects)
 		return true;
 
 	return false;
@@ -547,6 +547,7 @@ static const match_table_t opt_tokens = {
 	{ NVMF_OPT_KATO,		"keep_alive_tmo=%d"	},
 	{ NVMF_OPT_HOSTNQN,		"hostnqn=%s"		},
 	{ NVMF_OPT_HOST_TRADDR,		"host_traddr=%s"	},
+	{ NVMF_OPT_HOST_ID,		"hostid=%s"		},
 	{ NVMF_OPT_ERR,			NULL			}
 };
 
@@ -558,6 +559,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 	int token, ret = 0;
 	size_t nqnlen  = 0;
 	int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO;
+	uuid_t hostid;
 
 	/* Set defaults */
 	opts->queue_size = NVMF_DEF_QUEUE_SIZE;
@@ -568,6 +570,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 	if (!options)
 		return -ENOMEM;
 
+	uuid_gen(&hostid);
+
 	while ((p = strsep(&o, ",\n")) != NULL) {
 		if (!*p)
 			continue;
@@ -724,6 +728,17 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 			}
 			opts->host_traddr = p;
 			break;
+		case NVMF_OPT_HOST_ID:
+			p = match_strdup(args);
+			if (!p) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			if (uuid_parse(p, &hostid)) {
+				ret = -EINVAL;
+				goto out;
+			}
+			break;
 		default:
 			pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
 				p);
@@ -743,6 +758,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 		opts->host = nvmf_default_host;
 	}
 
+	uuid_copy(&opts->host->id, &hostid);
+
 out:
 	if (!opts->discovery_nqn && !opts->kato)
 		opts->kato = NVME_DEFAULT_KATO;
@@ -803,7 +820,8 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
 
 #define NVMF_REQUIRED_OPTS	(NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
 #define NVMF_ALLOWED_OPTS	(NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
-				 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN)
+				 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
+				 NVMF_OPT_HOST_ID)
 
 static struct nvme_ctrl *
 nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
@@ -854,6 +872,15 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
 		goto out_unlock;
 	}
 
+	if (strcmp(ctrl->subnqn, opts->subsysnqn)) {
+		dev_warn(ctrl->device,
+			"controller returned incorrect NQN: \"%s\".\n",
+			ctrl->subnqn);
+		mutex_unlock(&nvmf_transports_mutex);
+		ctrl->ops->delete_ctrl(ctrl);
+		return ERR_PTR(-EINVAL);
+	}
+
 	mutex_unlock(&nvmf_transports_mutex);
 	return ctrl;
 
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index f5a9c1f..bf33663 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -36,7 +36,7 @@ struct nvmf_host {
 	struct kref		ref;
 	struct list_head	list;
 	char			nqn[NVMF_NQN_SIZE];
-	uuid_be			id;
+	uuid_t			id;
 };
 
 /**
@@ -56,6 +56,7 @@ enum {
 	NVMF_OPT_RECONNECT_DELAY = 1 << 9,
 	NVMF_OPT_HOST_TRADDR	= 1 << 10,
 	NVMF_OPT_CTRL_LOSS_TMO	= 1 << 11,
+	NVMF_OPT_HOST_ID	= 1 << 12,
 };
 
 /**
@@ -80,7 +81,6 @@ enum {
  * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
  * @kato:	Keep-alive timeout.
  * @host:	Virtual NVMe host, contains the NQN and Host ID.
- * @nr_reconnects: number of reconnect attempted since the last ctrl failure
  * @max_reconnects: maximum number of allowed reconnect attempts before removing
  *              the controller, (-1) means reconnect forever, zero means remove
  *              immediately;
@@ -98,7 +98,6 @@ struct nvmf_ctrl_options {
 	bool			discovery_nqn;
 	unsigned int		kato;
 	struct nvmf_host	*host;
-	int			nr_reconnects;
 	int			max_reconnects;
 };
 
@@ -140,7 +139,6 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
 int nvmf_register_transport(struct nvmf_transport_ops *ops);
 void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
 void nvmf_free_options(struct nvmf_ctrl_options *opts);
-const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl);
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
 
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5b14cbe..ed87214 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -36,7 +36,7 @@
  */
 #define NVME_FC_NR_AEN_COMMANDS	1
 #define NVME_FC_AQ_BLKMQ_DEPTH	\
-	(NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
+	(NVME_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
 #define AEN_CMDID_BASE		(NVME_FC_AQ_BLKMQ_DEPTH + 1)
 
 enum nvme_fc_queue_flags {
@@ -161,12 +161,12 @@ struct nvme_fc_ctrl {
 	struct blk_mq_tag_set	tag_set;
 
 	struct work_struct	delete_work;
-	struct work_struct	reset_work;
 	struct delayed_work	connect_work;
 
 	struct kref		ref;
 	u32			flags;
 	u32			iocnt;
+	wait_queue_head_t	ioabort_wait;
 
 	struct nvme_fc_fcp_op	aen_ops[NVME_FC_NR_AEN_COMMANDS];
 
@@ -214,7 +214,6 @@ static LIST_HEAD(nvme_fc_lport_list);
 static DEFINE_IDA(nvme_fc_local_port_cnt);
 static DEFINE_IDA(nvme_fc_ctrl_cnt);
 
-static struct workqueue_struct *nvme_fc_wq;
 
 
 
@@ -878,8 +877,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
 	assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
 	/* Linux supports only Dynamic controllers */
 	assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
-	memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
-		min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
+	uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
 	strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
 		min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
 	strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
@@ -1139,6 +1137,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
 /* *********************** NVME Ctrl Routines **************************** */
 
 static void __nvme_fc_final_op_cleanup(struct request *rq);
+static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
 
 static int
 nvme_fc_reinit_request(void *data, struct request *rq)
@@ -1241,8 +1240,10 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
 
 	spin_lock_irqsave(&ctrl->lock, flags);
 	if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
-		if (ctrl->flags & FCCTRL_TERMIO)
-			ctrl->iocnt--;
+		if (ctrl->flags & FCCTRL_TERMIO) {
+			if (!--ctrl->iocnt)
+				wake_up(&ctrl->ioabort_wait);
+		}
 	}
 	if (op->flags & FCOP_FLAGS_RELEASED)
 		complete_rq = true;
@@ -1265,7 +1266,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 	struct nvme_command *sqe = &op->cmd_iu.sqe;
 	__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
 	union nvme_result result;
-	bool complete_rq;
+	bool complete_rq, terminate_assoc = true;
 
 	/*
 	 * WARNING:
@@ -1294,6 +1295,14 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 	 * fabricate a CQE, the following fields will not be set as they
 	 * are not referenced:
 	 *      cqe.sqid,  cqe.sqhd,  cqe.command_id
+	 *
+	 * Failure or error of an individual i/o, in a transport
+	 * detected fashion unrelated to the nvme completion status,
+	 * potentially cause the initiator and target sides to get out
+	 * of sync on SQ head/tail (aka outstanding io count allowed).
+	 * Per FC-NVME spec, failure of an individual command requires
+	 * the connection to be terminated, which in turn requires the
+	 * association to be terminated.
 	 */
 
 	fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
@@ -1359,6 +1368,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 		goto done;
 	}
 
+	terminate_assoc = false;
+
 done:
 	if (op->flags & FCOP_FLAGS_AEN) {
 		nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
@@ -1366,7 +1377,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 		atomic_set(&op->state, FCPOP_STATE_IDLE);
 		op->flags = FCOP_FLAGS_AEN;	/* clear other flags */
 		nvme_fc_ctrl_put(ctrl);
-		return;
+		goto check_error;
 	}
 
 	complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
@@ -1379,6 +1390,10 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 		nvme_end_request(rq, status, result);
 	} else
 		__nvme_fc_final_op_cleanup(rq);
+
+check_error:
+	if (terminate_assoc)
+		nvme_fc_error_recovery(ctrl, "transport detected io error");
 }
 
 static int
@@ -1435,18 +1450,8 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
 {
 	struct nvme_fc_ctrl *ctrl = set->driver_data;
 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
-	struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
-
-	return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
-}
-
-static int
-nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
-		unsigned int hctx_idx, unsigned int numa_node)
-{
-	struct nvme_fc_ctrl *ctrl = set->driver_data;
-	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
-	struct nvme_fc_queue *queue = &ctrl->queues[0];
+	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
+	struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
 
 	return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
 }
@@ -1744,16 +1749,16 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
 static void
 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
 {
+	/* only proceed if in LIVE state - e.g. on first error */
+	if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+		return;
+
 	dev_warn(ctrl->ctrl.device,
 		"NVME-FC{%d}: transport association error detected: %s\n",
 		ctrl->cnum, errmsg);
 	dev_warn(ctrl->ctrl.device,
 		"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
 
-	/* stop the queues on error, cleanup is in reset thread */
-	if (ctrl->queue_count > 1)
-		nvme_stop_queues(&ctrl->ctrl);
-
 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
 		dev_err(ctrl->ctrl.device,
 			"NVME-FC{%d}: error_recovery: Couldn't change state "
@@ -1761,10 +1766,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
 		return;
 	}
 
-	if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
-		dev_err(ctrl->ctrl.device,
-			"NVME-FC{%d}: error_recovery: Failed to schedule "
-			"reset work\n", ctrl->cnum);
+	nvme_reset_ctrl(&ctrl->ctrl);
 }
 
 static enum blk_eh_timer_return
@@ -1873,7 +1875,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
  * level FC exchange resource that is also outstanding. This must be
  * considered in all cleanup operations.
  */
-static int
+static blk_status_t
 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 	struct nvme_fc_fcp_op *op, u32 data_len,
 	enum nvmefc_fcp_datadir	io_dir)
@@ -1888,10 +1890,10 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 	 * the target device is present
 	 */
 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
-		return BLK_MQ_RQ_QUEUE_ERROR;
+		return BLK_STS_IOERR;
 
 	if (!nvme_fc_ctrl_get(ctrl))
-		return BLK_MQ_RQ_QUEUE_ERROR;
+		return BLK_STS_IOERR;
 
 	/* format the FC-NVME CMD IU and fcp_req */
 	cmdiu->connection_id = cpu_to_be64(queue->connection_id);
@@ -1939,8 +1941,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 		if (ret < 0) {
 			nvme_cleanup_cmd(op->rq);
 			nvme_fc_ctrl_put(ctrl);
-			return (ret == -ENOMEM || ret == -EAGAIN) ?
-				BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+			if (ret == -ENOMEM || ret == -EAGAIN)
+				return BLK_STS_RESOURCE;
+			return BLK_STS_IOERR;
 		}
 	}
 
@@ -1957,28 +1960,26 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 					queue->lldd_handle, &op->fcp_req);
 
 	if (ret) {
-		if (op->rq) {			/* normal request */
+		if (op->rq)			/* normal request */
 			nvme_fc_unmap_data(ctrl, op->rq, op);
-			nvme_cleanup_cmd(op->rq);
-		}
 		/* else - aen. no cleanup needed */
 
 		nvme_fc_ctrl_put(ctrl);
 
 		if (ret != -EBUSY)
-			return BLK_MQ_RQ_QUEUE_ERROR;
+			return BLK_STS_IOERR;
 
 		if (op->rq) {
 			blk_mq_stop_hw_queues(op->rq->q);
 			blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
 		}
-		return BLK_MQ_RQ_QUEUE_BUSY;
+		return BLK_STS_RESOURCE;
 	}
 
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 }
 
-static int
+static blk_status_t
 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 			const struct blk_mq_queue_data *bd)
 {
@@ -1991,7 +1992,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 	struct nvme_command *sqe = &cmdiu->sqe;
 	enum nvmefc_fcp_datadir	io_dir;
 	u32 data_len;
-	int ret;
+	blk_status_t ret;
 
 	ret = nvme_setup_cmd(ns, rq, sqe);
 	if (ret)
@@ -2046,7 +2047,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 	struct nvme_fc_fcp_op *aen_op;
 	unsigned long flags;
 	bool terminating = false;
-	int ret;
+	blk_status_t ret;
 
 	if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
 		return;
@@ -2078,7 +2079,6 @@ __nvme_fc_final_op_cleanup(struct request *rq)
 	op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
 			FCOP_FLAGS_COMPLETE);
 
-	nvme_cleanup_cmd(rq);
 	nvme_fc_unmap_data(ctrl, rq, op);
 	nvme_complete_rq(rq);
 	nvme_fc_ctrl_put(ctrl);
@@ -2296,7 +2296,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	int ret;
 	bool changed;
 
-	++ctrl->ctrl.opts->nr_reconnects;
+	++ctrl->ctrl.nr_reconnects;
 
 	/*
 	 * Create the admin queue
@@ -2393,7 +2393,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
 	WARN_ON_ONCE(!changed);
 
-	ctrl->ctrl.opts->nr_reconnects = 0;
+	ctrl->ctrl.nr_reconnects = 0;
 
 	if (ctrl->queue_count > 1) {
 		nvme_start_queues(&ctrl->ctrl);
@@ -2479,11 +2479,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
 
 	/* wait for all io that had to be aborted */
 	spin_lock_irqsave(&ctrl->lock, flags);
-	while (ctrl->iocnt) {
-		spin_unlock_irqrestore(&ctrl->lock, flags);
-		msleep(1000);
-		spin_lock_irqsave(&ctrl->lock, flags);
-	}
+	wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
 	ctrl->flags &= ~FCCTRL_TERMIO;
 	spin_unlock_irqrestore(&ctrl->lock, flags);
 
@@ -2513,7 +2509,7 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
 	struct nvme_fc_ctrl *ctrl =
 		container_of(work, struct nvme_fc_ctrl, delete_work);
 
-	cancel_work_sync(&ctrl->reset_work);
+	cancel_work_sync(&ctrl->ctrl.reset_work);
 	cancel_delayed_work_sync(&ctrl->connect_work);
 
 	/*
@@ -2540,7 +2536,7 @@ __nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
 		return true;
 
-	if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
+	if (!queue_work(nvme_wq, &ctrl->delete_work))
 		return true;
 
 	return false;
@@ -2567,7 +2563,7 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
 	ret = __nvme_fc_del_ctrl(ctrl);
 
 	if (!ret)
-		flush_workqueue(nvme_fc_wq);
+		flush_workqueue(nvme_wq);
 
 	nvme_put_ctrl(&ctrl->ctrl);
 
@@ -2592,13 +2588,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 		dev_info(ctrl->ctrl.device,
 			"NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
 			ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
-		queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
+		queue_delayed_work(nvme_wq, &ctrl->connect_work,
 				ctrl->ctrl.opts->reconnect_delay * HZ);
 	} else {
 		dev_warn(ctrl->ctrl.device,
 				"NVME-FC{%d}: Max reconnect attempts (%d) "
 				"reached. Removing controller\n",
-				ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
+				ctrl->cnum, ctrl->ctrl.nr_reconnects);
 		WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
 	}
 }
@@ -2607,7 +2603,7 @@ static void
 nvme_fc_reset_ctrl_work(struct work_struct *work)
 {
 	struct nvme_fc_ctrl *ctrl =
-			container_of(work, struct nvme_fc_ctrl, reset_work);
+		container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
 	int ret;
 
 	/* will block will waiting for io to terminate */
@@ -2621,29 +2617,6 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
 			"NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
 }
 
-/*
- * called by the nvme core layer, for sysfs interface that requests
- * a reset of the nvme controller
- */
-static int
-nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
-{
-	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
-
-	dev_info(ctrl->ctrl.device,
-		"NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
-
-	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-		return -EBUSY;
-
-	if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
-		return -EBUSY;
-
-	flush_work(&ctrl->reset_work);
-
-	return 0;
-}
-
 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
 	.name			= "fc",
 	.module			= THIS_MODULE,
@@ -2651,11 +2624,9 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
 	.reg_read32		= nvmf_reg_read32,
 	.reg_read64		= nvmf_reg_read64,
 	.reg_write32		= nvmf_reg_write32,
-	.reset_ctrl		= nvme_fc_reset_nvme_ctrl,
 	.free_ctrl		= nvme_fc_nvme_ctrl_freed,
 	.submit_async_event	= nvme_fc_submit_async_event,
 	.delete_ctrl		= nvme_fc_del_nvme_ctrl,
-	.get_subsysnqn		= nvmf_get_subsysnqn,
 	.get_address		= nvmf_get_address,
 };
 
@@ -2681,7 +2652,7 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
 	.queue_rq	= nvme_fc_queue_rq,
 	.complete	= nvme_fc_complete_rq,
-	.init_request	= nvme_fc_init_admin_request,
+	.init_request	= nvme_fc_init_request,
 	.exit_request	= nvme_fc_exit_request,
 	.reinit_request	= nvme_fc_reinit_request,
 	.init_hctx	= nvme_fc_init_admin_hctx,
@@ -2726,7 +2697,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 	kref_init(&ctrl->ref);
 
 	INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
-	INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
+	INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
 	INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
 	spin_lock_init(&ctrl->lock);
 
@@ -2791,6 +2762,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 		ctrl->ctrl.opts = NULL;
 		/* initiate nvme ctrl ref counting teardown */
 		nvme_uninit_ctrl(&ctrl->ctrl);
+		nvme_put_ctrl(&ctrl->ctrl);
+
+		/* Remove core ctrl ref. */
+		nvme_put_ctrl(&ctrl->ctrl);
 
 		/* as we're past the point where we transition to the ref
 		 * counting teardown path, if we return a bad pointer here,
@@ -2950,20 +2925,7 @@ static struct nvmf_transport_ops nvme_fc_transport = {
 
 static int __init nvme_fc_init_module(void)
 {
-	int ret;
-
-	nvme_fc_wq = create_workqueue("nvme_fc_wq");
-	if (!nvme_fc_wq)
-		return -ENOMEM;
-
-	ret = nvmf_register_transport(&nvme_fc_transport);
-	if (ret)
-		goto err;
-
-	return 0;
-err:
-	destroy_workqueue(nvme_fc_wq);
-	return ret;
+	return nvmf_register_transport(&nvme_fc_transport);
 }
 
 static void __exit nvme_fc_exit_module(void)
@@ -2974,8 +2936,6 @@ static void __exit nvme_fc_exit_module(void)
 
 	nvmf_unregister_transport(&nvme_fc_transport);
 
-	destroy_workqueue(nvme_fc_wq);
-
 	ida_destroy(&nvme_fc_local_port_cnt);
 	ida_destroy(&nvme_fc_ctrl_cnt);
 }
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index f5df78e..be85413 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -242,7 +242,7 @@ static inline void _nvme_nvm_check_size(void)
 	BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
 	BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
-	BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
+	BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != NVME_IDENTIFY_DATA_SIZE);
 	BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
 }
 
@@ -480,7 +480,7 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
 					rqd->bio->bi_iter.bi_sector));
 }
 
-static void nvme_nvm_end_io(struct request *rq, int error)
+static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
 {
 	struct nvm_rq *rqd = rq->end_io_data;
 
@@ -509,7 +509,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 	rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
 	if (IS_ERR(rq)) {
 		kfree(cmd);
-		return -ENOMEM;
+		return PTR_ERR(rq);
 	}
 	rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
 
@@ -571,13 +571,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
 	.max_phys_sect		= 64,
 };
 
-static void nvme_nvm_end_user_vio(struct request *rq, int error)
-{
-	struct completion *waiting = rq->end_io_data;
-
-	complete(waiting);
-}
-
 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
 				struct nvme_ns *ns,
 				struct nvme_nvm_command *vcmd,
@@ -608,7 +601,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
 	rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
 
 	rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
-	rq->end_io_data = &wait;
 
 	if (ppa_buf && ppa_len) {
 		ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
@@ -662,9 +654,7 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
 	}
 
 submit:
-	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_user_vio);
-
-	wait_for_completion_io(&wait);
+	blk_execute_rq(q, NULL, rq, 0);
 
 	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
 		ret = -EINTR;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9d6a070..d70ff0f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -27,12 +27,11 @@ extern unsigned char nvme_io_timeout;
 extern unsigned char admin_timeout;
 #define ADMIN_TIMEOUT	(admin_timeout * HZ)
 
-extern unsigned char shutdown_timeout;
-#define SHUTDOWN_TIMEOUT	(shutdown_timeout * HZ)
-
 #define NVME_DEFAULT_KATO	5
 #define NVME_KATO_GRACE		10
 
+extern struct workqueue_struct *nvme_wq;
+
 enum {
 	NVME_NS_LBA		= 0,
 	NVME_NS_LIGHTNVM	= 1,
@@ -131,6 +130,7 @@ struct nvme_ctrl {
 	struct device *device;	/* char device */
 	struct list_head node;
 	struct ida ns_ida;
+	struct work_struct reset_work;
 
 	struct opal_dev *opal_dev;
 
@@ -138,6 +138,7 @@ struct nvme_ctrl {
 	char serial[20];
 	char model[40];
 	char firmware_rev[8];
+	char subnqn[NVMF_NQN_SIZE];
 	u16 cntlid;
 
 	u32 ctrl_config;
@@ -147,6 +148,8 @@ struct nvme_ctrl {
 	u16 oncs;
 	u16 vid;
 	u16 oacs;
+	u16 nssa;
+	u16 nr_streams;
 	atomic_t abort_limit;
 	u8 event_limit;
 	u8 vwc;
@@ -165,6 +168,10 @@ struct nvme_ctrl {
 
 	/* Power saving configuration */
 	u64 ps_max_latency_us;
+	bool apst_enabled;
+
+	u32 hmpre;
+	u32 hmmin;
 
 	/* Fabrics only */
 	u16 sqsize;
@@ -172,12 +179,10 @@ struct nvme_ctrl {
 	u32 iorcsz;
 	u16 icdoff;
 	u16 maxcmd;
+	int nr_reconnects;
 	struct nvmf_ctrl_options *opts;
 };
 
-/*
- * An NVM Express namespace is equivalent to a SCSI LUN
- */
 struct nvme_ns {
 	struct list_head list;
 
@@ -189,14 +194,18 @@ struct nvme_ns {
 	int instance;
 
 	u8 eui[8];
-	u8 uuid[16];
+	u8 nguid[16];
+	uuid_t uuid;
 
 	unsigned ns_id;
 	int lba_shift;
 	u16 ms;
+	u16 sgs;
+	u32 sws;
 	bool ext;
 	u8 pi_type;
 	unsigned long flags;
+	u16 noiob;
 
 #define NVME_NS_REMOVING 0
 #define NVME_NS_DEAD     1
@@ -214,11 +223,9 @@ struct nvme_ctrl_ops {
 	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
 	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
 	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
-	int (*reset_ctrl)(struct nvme_ctrl *ctrl);
 	void (*free_ctrl)(struct nvme_ctrl *ctrl);
 	void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
 	int (*delete_ctrl)(struct nvme_ctrl *ctrl);
-	const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
 };
 
@@ -296,7 +303,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
 		struct nvme_command *cmd, unsigned int flags, int qid);
-int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void *buf, unsigned bufflen);
@@ -310,23 +317,10 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void __user *ubuffer, unsigned bufflen,
 		void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
 		u32 *result, unsigned timeout);
-int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
-int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
-		struct nvme_id_ns **id);
-int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
-int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
-		      void *buffer, size_t buflen, u32 *result);
-int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
-		      void *buffer, size_t buflen, u32 *result);
 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
-
-struct sg_io_hdr;
-
-int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
-int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
-int nvme_sg_get_version_num(int __user *ip);
+int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
 
 #ifdef CONFIG_NVM
 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d52701df..5b1ac79 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -17,28 +17,15 @@
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
 #include <linux/blk-mq-pci.h>
-#include <linux/cpu.h>
-#include <linux/delay.h>
 #include <linux/dmi.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/genhd.h>
-#include <linux/hdreg.h>
-#include <linux/idr.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
-#include <linux/kdev_t.h>
-#include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/moduleparam.h>
 #include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/poison.h>
-#include <linux/ptrace.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
 #include <linux/t10-pi.h>
 #include <linux/timer.h>
 #include <linux/types.h>
@@ -49,7 +36,6 @@
 #include "nvme.h"
 
 #define NVME_Q_DEPTH		1024
-#define NVME_AQ_DEPTH		256
 #define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
 
@@ -66,12 +52,14 @@ static bool use_cmb_sqes = true;
 module_param(use_cmb_sqes, bool, 0644);
 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
 
-static struct workqueue_struct *nvme_workq;
+static unsigned int max_host_mem_size_mb = 128;
+module_param(max_host_mem_size_mb, uint, 0444);
+MODULE_PARM_DESC(max_host_mem_size_mb,
+	"Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
 
 struct nvme_dev;
 struct nvme_queue;
 
-static int nvme_reset(struct nvme_dev *dev);
 static void nvme_process_cq(struct nvme_queue *nvmeq);
 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
 
@@ -92,9 +80,8 @@ struct nvme_dev {
 	int q_depth;
 	u32 db_stride;
 	void __iomem *bar;
-	struct work_struct reset_work;
+	unsigned long bar_mapped_size;
 	struct work_struct remove_work;
-	struct timer_list watchdog_timer;
 	struct mutex shutdown_lock;
 	bool subsystem;
 	void __iomem *cmb;
@@ -104,10 +91,18 @@ struct nvme_dev {
 	u32 cmbloc;
 	struct nvme_ctrl ctrl;
 	struct completion ioq_wait;
+
+	/* shadow doorbell buffer support: */
 	u32 *dbbuf_dbs;
 	dma_addr_t dbbuf_dbs_dma_addr;
 	u32 *dbbuf_eis;
 	dma_addr_t dbbuf_eis_dma_addr;
+
+	/* host memory buffer support: */
+	u64 host_mem_size;
+	u32 nr_host_mem_descs;
+	struct nvme_host_mem_buf_desc *host_mem_descs;
+	void **host_mem_desc_bufs;
 };
 
 static inline unsigned int sq_idx(unsigned int qid, u32 stride)
@@ -185,8 +180,8 @@ static inline void _nvme_check_size(void)
 	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
-	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
-	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
+	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
+	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
 	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
 	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
@@ -350,19 +345,6 @@ static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_i
 	nvmeq->tags = NULL;
 }
 
-static int nvme_admin_init_request(struct blk_mq_tag_set *set,
-		struct request *req, unsigned int hctx_idx,
-		unsigned int numa_node)
-{
-	struct nvme_dev *dev = set->driver_data;
-	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	struct nvme_queue *nvmeq = dev->queues[0];
-
-	BUG_ON(!nvmeq);
-	iod->nvmeq = nvmeq;
-	return 0;
-}
-
 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 			  unsigned int hctx_idx)
 {
@@ -382,7 +364,8 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
 {
 	struct nvme_dev *dev = set->driver_data;
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
+	int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
+	struct nvme_queue *nvmeq = dev->queues[queue_idx];
 
 	BUG_ON(!nvmeq);
 	iod->nvmeq = nvmeq;
@@ -427,7 +410,7 @@ static __le64 **iod_list(struct request *req)
 	return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
 }
 
-static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
 	int nseg = blk_rq_nr_phys_segments(rq);
@@ -436,7 +419,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
 		iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
 		if (!iod->sg)
-			return BLK_MQ_RQ_QUEUE_BUSY;
+			return BLK_STS_RESOURCE;
 	} else {
 		iod->sg = iod->inline_sg;
 	}
@@ -446,7 +429,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 	iod->nents = 0;
 	iod->length = size;
 
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 }
 
 static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
@@ -616,21 +599,21 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 	return true;
 }
 
-static int nvme_map_data(struct nvme_dev *dev, struct request *req,
+static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 		struct nvme_command *cmnd)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 	struct request_queue *q = req->q;
 	enum dma_data_direction dma_dir = rq_data_dir(req) ?
 			DMA_TO_DEVICE : DMA_FROM_DEVICE;
-	int ret = BLK_MQ_RQ_QUEUE_ERROR;
+	blk_status_t ret = BLK_STS_IOERR;
 
 	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
 	iod->nents = blk_rq_map_sg(q, req, iod->sg);
 	if (!iod->nents)
 		goto out;
 
-	ret = BLK_MQ_RQ_QUEUE_BUSY;
+	ret = BLK_STS_RESOURCE;
 	if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
 				DMA_ATTR_NO_WARN))
 		goto out;
@@ -638,7 +621,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
 	if (!nvme_setup_prps(dev, req))
 		goto out_unmap;
 
-	ret = BLK_MQ_RQ_QUEUE_ERROR;
+	ret = BLK_STS_IOERR;
 	if (blk_integrity_rq(req)) {
 		if (blk_rq_count_integrity_sg(q, req->bio) != 1)
 			goto out_unmap;
@@ -658,7 +641,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
 	cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
 	if (blk_integrity_rq(req))
 		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 
 out_unmap:
 	dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
@@ -688,7 +671,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
 /*
  * NOTE: ns is NULL when called on the admin queue.
  */
-static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 			 const struct blk_mq_queue_data *bd)
 {
 	struct nvme_ns *ns = hctx->queue->queuedata;
@@ -696,47 +679,34 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	struct nvme_dev *dev = nvmeq->dev;
 	struct request *req = bd->rq;
 	struct nvme_command cmnd;
-	int ret = BLK_MQ_RQ_QUEUE_OK;
-
-	/*
-	 * If formated with metadata, require the block layer provide a buffer
-	 * unless this namespace is formated such that the metadata can be
-	 * stripped/generated by the controller with PRACT=1.
-	 */
-	if (ns && ns->ms && !blk_integrity_rq(req)) {
-		if (!(ns->pi_type && ns->ms == 8) &&
-		    !blk_rq_is_passthrough(req)) {
-			blk_mq_end_request(req, -EFAULT);
-			return BLK_MQ_RQ_QUEUE_OK;
-		}
-	}
+	blk_status_t ret;
 
 	ret = nvme_setup_cmd(ns, req, &cmnd);
-	if (ret != BLK_MQ_RQ_QUEUE_OK)
+	if (ret)
 		return ret;
 
 	ret = nvme_init_iod(req, dev);
-	if (ret != BLK_MQ_RQ_QUEUE_OK)
+	if (ret)
 		goto out_free_cmd;
 
-	if (blk_rq_nr_phys_segments(req))
+	if (blk_rq_nr_phys_segments(req)) {
 		ret = nvme_map_data(dev, req, &cmnd);
-
-	if (ret != BLK_MQ_RQ_QUEUE_OK)
-		goto out_cleanup_iod;
+		if (ret)
+			goto out_cleanup_iod;
+	}
 
 	blk_mq_start_request(req);
 
 	spin_lock_irq(&nvmeq->q_lock);
 	if (unlikely(nvmeq->cq_vector < 0)) {
-		ret = BLK_MQ_RQ_QUEUE_ERROR;
+		ret = BLK_STS_IOERR;
 		spin_unlock_irq(&nvmeq->q_lock);
 		goto out_cleanup_iod;
 	}
 	__nvme_submit_cmd(nvmeq, &cmnd);
 	nvme_process_cq(nvmeq);
 	spin_unlock_irq(&nvmeq->q_lock);
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 out_cleanup_iod:
 	nvme_free_iod(dev, req);
 out_free_cmd:
@@ -759,65 +729,75 @@ static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
 	return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
 }
 
-static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
+static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
 {
-	u16 head, phase;
+	u16 head = nvmeq->cq_head;
 
-	head = nvmeq->cq_head;
-	phase = nvmeq->cq_phase;
-
-	while (nvme_cqe_valid(nvmeq, head, phase)) {
-		struct nvme_completion cqe = nvmeq->cqes[head];
-		struct request *req;
-
-		if (++head == nvmeq->q_depth) {
-			head = 0;
-			phase = !phase;
-		}
-
-		if (tag && *tag == cqe.command_id)
-			*tag = -1;
-
-		if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
-			dev_warn(nvmeq->dev->ctrl.device,
-				"invalid id %d completed on queue %d\n",
-				cqe.command_id, le16_to_cpu(cqe.sq_id));
-			continue;
-		}
-
-		/*
-		 * AEN requests are special as they don't time out and can
-		 * survive any kind of queue freeze and often don't respond to
-		 * aborts.  We don't even bother to allocate a struct request
-		 * for them but rather special case them here.
-		 */
-		if (unlikely(nvmeq->qid == 0 &&
-				cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
-			nvme_complete_async_event(&nvmeq->dev->ctrl,
-					cqe.status, &cqe.result);
-			continue;
-		}
-
-		req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
-		nvme_end_request(req, cqe.status, cqe.result);
-	}
-
-	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
-		return;
-
-	if (likely(nvmeq->cq_vector >= 0))
+	if (likely(nvmeq->cq_vector >= 0)) {
 		if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
 						      nvmeq->dbbuf_cq_ei))
 			writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
-	nvmeq->cq_head = head;
-	nvmeq->cq_phase = phase;
+	}
+}
 
-	nvmeq->cqe_seen = 1;
+static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
+		struct nvme_completion *cqe)
+{
+	struct request *req;
+
+	if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
+		dev_warn(nvmeq->dev->ctrl.device,
+			"invalid id %d completed on queue %d\n",
+			cqe->command_id, le16_to_cpu(cqe->sq_id));
+		return;
+	}
+
+	/*
+	 * AEN requests are special as they don't time out and can
+	 * survive any kind of queue freeze and often don't respond to
+	 * aborts.  We don't even bother to allocate a struct request
+	 * for them but rather special case them here.
+	 */
+	if (unlikely(nvmeq->qid == 0 &&
+			cqe->command_id >= NVME_AQ_BLKMQ_DEPTH)) {
+		nvme_complete_async_event(&nvmeq->dev->ctrl,
+				cqe->status, &cqe->result);
+		return;
+	}
+
+	req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
+	nvme_end_request(req, cqe->status, cqe->result);
+}
+
+static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
+		struct nvme_completion *cqe)
+{
+	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
+		*cqe = nvmeq->cqes[nvmeq->cq_head];
+
+		if (++nvmeq->cq_head == nvmeq->q_depth) {
+			nvmeq->cq_head = 0;
+			nvmeq->cq_phase = !nvmeq->cq_phase;
+		}
+		return true;
+	}
+	return false;
 }
 
 static void nvme_process_cq(struct nvme_queue *nvmeq)
 {
-	__nvme_process_cq(nvmeq, NULL);
+	struct nvme_completion cqe;
+	int consumed = 0;
+
+	while (nvme_read_cqe(nvmeq, &cqe)) {
+		nvme_handle_cqe(nvmeq, &cqe);
+		consumed++;
+	}
+
+	if (consumed) {
+		nvme_ring_cq_doorbell(nvmeq);
+		nvmeq->cqe_seen = 1;
+	}
 }
 
 static irqreturn_t nvme_irq(int irq, void *data)
@@ -842,16 +822,28 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
 
 static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
 {
-	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
-		spin_lock_irq(&nvmeq->q_lock);
-		__nvme_process_cq(nvmeq, &tag);
-		spin_unlock_irq(&nvmeq->q_lock);
+	struct nvme_completion cqe;
+	int found = 0, consumed = 0;
 
-		if (tag == -1)
-			return 1;
-	}
+	if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
+		return 0;
 
-	return 0;
+	spin_lock_irq(&nvmeq->q_lock);
+	while (nvme_read_cqe(nvmeq, &cqe)) {
+		nvme_handle_cqe(nvmeq, &cqe);
+		consumed++;
+
+		if (tag == cqe.command_id) {
+			found = 1;
+			break;
+		}
+       }
+
+	if (consumed)
+		nvme_ring_cq_doorbell(nvmeq);
+	spin_unlock_irq(&nvmeq->q_lock);
+
+	return found;
 }
 
 static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
@@ -939,7 +931,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
 	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
 }
 
-static void abort_endio(struct request *req, int error)
+static void abort_endio(struct request *req, blk_status_t error)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 	struct nvme_queue *nvmeq = iod->nvmeq;
@@ -950,6 +942,51 @@ static void abort_endio(struct request *req, int error)
 	blk_mq_free_request(req);
 }
 
+static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
+{
+
+	/* If true, indicates loss of adapter communication, possibly by a
+	 * NVMe Subsystem reset.
+	 */
+	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
+
+	/* If there is a reset ongoing, we shouldn't reset again. */
+	if (dev->ctrl.state == NVME_CTRL_RESETTING)
+		return false;
+
+	/* We shouldn't reset unless the controller is on fatal error state
+	 * _or_ if we lost the communication with it.
+	 */
+	if (!(csts & NVME_CSTS_CFS) && !nssro)
+		return false;
+
+	/* If PCI error recovery process is happening, we cannot reset or
+	 * the recovery mechanism will surely fail.
+	 */
+	if (pci_channel_offline(to_pci_dev(dev->dev)))
+		return false;
+
+	return true;
+}
+
+static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
+{
+	/* Read a config register to help see what died. */
+	u16 pci_status;
+	int result;
+
+	result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
+				      &pci_status);
+	if (result == PCIBIOS_SUCCESSFUL)
+		dev_warn(dev->ctrl.device,
+			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
+			 csts, pci_status);
+	else
+		dev_warn(dev->ctrl.device,
+			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
+			 csts, result);
+}
+
 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -957,6 +994,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 	struct nvme_dev *dev = nvmeq->dev;
 	struct request *abort_req;
 	struct nvme_command cmd;
+	u32 csts = readl(dev->bar + NVME_REG_CSTS);
+
+	/*
+	 * Reset immediately if the controller is failed
+	 */
+	if (nvme_should_reset(dev, csts)) {
+		nvme_warn_reset(dev, csts);
+		nvme_dev_disable(dev, false);
+		nvme_reset_ctrl(&dev->ctrl);
+		return BLK_EH_HANDLED;
+	}
 
 	/*
 	 * Did we miss an interrupt?
@@ -993,7 +1041,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 			 "I/O %d QID %d timeout, reset controller\n",
 			 req->tag, nvmeq->qid);
 		nvme_dev_disable(dev, false);
-		nvme_reset(dev);
+		nvme_reset_ctrl(&dev->ctrl);
 
 		/*
 		 * Mark the request as handled, since the inline shutdown
@@ -1247,7 +1295,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
 	.complete	= nvme_pci_complete_rq,
 	.init_hctx	= nvme_admin_init_hctx,
 	.exit_hctx      = nvme_admin_exit_hctx,
-	.init_request	= nvme_admin_init_request,
+	.init_request	= nvme_init_request,
 	.timeout	= nvme_timeout,
 };
 
@@ -1311,6 +1359,32 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 	return 0;
 }
 
+static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
+{
+	return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
+}
+
+static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
+{
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+	if (size <= dev->bar_mapped_size)
+		return 0;
+	if (size > pci_resource_len(pdev, 0))
+		return -ENOMEM;
+	if (dev->bar)
+		iounmap(dev->bar);
+	dev->bar = ioremap(pci_resource_start(pdev, 0), size);
+	if (!dev->bar) {
+		dev->bar_mapped_size = 0;
+		return -ENOMEM;
+	}
+	dev->bar_mapped_size = size;
+	dev->dbs = dev->bar + NVME_REG_DBS;
+
+	return 0;
+}
+
 static int nvme_configure_admin_queue(struct nvme_dev *dev)
 {
 	int result;
@@ -1318,6 +1392,10 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 	u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
 	struct nvme_queue *nvmeq;
 
+	result = nvme_remap_bar(dev, db_bar_size(dev, 0));
+	if (result < 0)
+		return result;
+
 	dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
 						NVME_CAP_NSSRC(cap) : 0;
 
@@ -1358,66 +1436,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 	return result;
 }
 
-static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
-{
-
-	/* If true, indicates loss of adapter communication, possibly by a
-	 * NVMe Subsystem reset.
-	 */
-	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
-
-	/* If there is a reset ongoing, we shouldn't reset again. */
-	if (work_busy(&dev->reset_work))
-		return false;
-
-	/* We shouldn't reset unless the controller is on fatal error state
-	 * _or_ if we lost the communication with it.
-	 */
-	if (!(csts & NVME_CSTS_CFS) && !nssro)
-		return false;
-
-	/* If PCI error recovery process is happening, we cannot reset or
-	 * the recovery mechanism will surely fail.
-	 */
-	if (pci_channel_offline(to_pci_dev(dev->dev)))
-		return false;
-
-	return true;
-}
-
-static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
-{
-	/* Read a config register to help see what died. */
-	u16 pci_status;
-	int result;
-
-	result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
-				      &pci_status);
-	if (result == PCIBIOS_SUCCESSFUL)
-		dev_warn(dev->ctrl.device,
-			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
-			 csts, pci_status);
-	else
-		dev_warn(dev->ctrl.device,
-			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
-			 csts, result);
-}
-
-static void nvme_watchdog_timer(unsigned long data)
-{
-	struct nvme_dev *dev = (struct nvme_dev *)data;
-	u32 csts = readl(dev->bar + NVME_REG_CSTS);
-
-	/* Skip controllers under certain specific conditions. */
-	if (nvme_should_reset(dev, csts)) {
-		if (!nvme_reset(dev))
-			nvme_warn_reset(dev, csts);
-		return;
-	}
-
-	mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
-}
-
 static int nvme_create_io_queues(struct nvme_dev *dev)
 {
 	unsigned i, max;
@@ -1514,18 +1532,170 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
 	}
 }
 
-static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
+static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
 {
-	return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
+	size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
+	struct nvme_command c;
+	u64 dma_addr;
+	int ret;
+
+	dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
+			DMA_TO_DEVICE);
+	if (dma_mapping_error(dev->dev, dma_addr))
+		return -ENOMEM;
+
+	memset(&c, 0, sizeof(c));
+	c.features.opcode	= nvme_admin_set_features;
+	c.features.fid		= cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
+	c.features.dword11	= cpu_to_le32(bits);
+	c.features.dword12	= cpu_to_le32(dev->host_mem_size >>
+					      ilog2(dev->ctrl.page_size));
+	c.features.dword13	= cpu_to_le32(lower_32_bits(dma_addr));
+	c.features.dword14	= cpu_to_le32(upper_32_bits(dma_addr));
+	c.features.dword15	= cpu_to_le32(dev->nr_host_mem_descs);
+
+	ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+	if (ret) {
+		dev_warn(dev->ctrl.device,
+			 "failed to set host mem (err %d, flags %#x).\n",
+			 ret, bits);
+	}
+	dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
+	return ret;
+}
+
+static void nvme_free_host_mem(struct nvme_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->nr_host_mem_descs; i++) {
+		struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
+		size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
+
+		dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
+				le64_to_cpu(desc->addr));
+	}
+
+	kfree(dev->host_mem_desc_bufs);
+	dev->host_mem_desc_bufs = NULL;
+	kfree(dev->host_mem_descs);
+	dev->host_mem_descs = NULL;
+}
+
+static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
+{
+	struct nvme_host_mem_buf_desc *descs;
+	u32 chunk_size, max_entries, i = 0;
+	void **bufs;
+	u64 size, tmp;
+
+	/* start big and work our way down */
+	chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER);
+retry:
+	tmp = (preferred + chunk_size - 1);
+	do_div(tmp, chunk_size);
+	max_entries = tmp;
+	descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
+	if (!descs)
+		goto out;
+
+	bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
+	if (!bufs)
+		goto out_free_descs;
+
+	for (size = 0; size < preferred; size += chunk_size) {
+		u32 len = min_t(u64, chunk_size, preferred - size);
+		dma_addr_t dma_addr;
+
+		bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
+				DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
+		if (!bufs[i])
+			break;
+
+		descs[i].addr = cpu_to_le64(dma_addr);
+		descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
+		i++;
+	}
+
+	if (!size || (min && size < min)) {
+		dev_warn(dev->ctrl.device,
+			"failed to allocate host memory buffer.\n");
+		goto out_free_bufs;
+	}
+
+	dev_info(dev->ctrl.device,
+		"allocated %lld MiB host memory buffer.\n",
+		size >> ilog2(SZ_1M));
+	dev->nr_host_mem_descs = i;
+	dev->host_mem_size = size;
+	dev->host_mem_descs = descs;
+	dev->host_mem_desc_bufs = bufs;
+	return 0;
+
+out_free_bufs:
+	while (--i >= 0) {
+		size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
+
+		dma_free_coherent(dev->dev, size, bufs[i],
+				le64_to_cpu(descs[i].addr));
+	}
+
+	kfree(bufs);
+out_free_descs:
+	kfree(descs);
+out:
+	/* try a smaller chunk size if we failed early */
+	if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
+		chunk_size /= 2;
+		goto retry;
+	}
+	dev->host_mem_descs = NULL;
+	return -ENOMEM;
+}
+
+static void nvme_setup_host_mem(struct nvme_dev *dev)
+{
+	u64 max = (u64)max_host_mem_size_mb * SZ_1M;
+	u64 preferred = (u64)dev->ctrl.hmpre * 4096;
+	u64 min = (u64)dev->ctrl.hmmin * 4096;
+	u32 enable_bits = NVME_HOST_MEM_ENABLE;
+
+	preferred = min(preferred, max);
+	if (min > max) {
+		dev_warn(dev->ctrl.device,
+			"min host memory (%lld MiB) above limit (%d MiB).\n",
+			min >> ilog2(SZ_1M), max_host_mem_size_mb);
+		nvme_free_host_mem(dev);
+		return;
+	}
+
+	/*
+	 * If we already have a buffer allocated check if we can reuse it.
+	 */
+	if (dev->host_mem_descs) {
+		if (dev->host_mem_size >= min)
+			enable_bits |= NVME_HOST_MEM_RETURN;
+		else
+			nvme_free_host_mem(dev);
+	}
+
+	if (!dev->host_mem_descs) {
+		if (nvme_alloc_host_mem(dev, min, preferred))
+			return;
+	}
+
+	if (nvme_set_host_mem(dev, enable_bits))
+		nvme_free_host_mem(dev);
 }
 
 static int nvme_setup_io_queues(struct nvme_dev *dev)
 {
 	struct nvme_queue *adminq = dev->queues[0];
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
-	int result, nr_io_queues, size;
+	int result, nr_io_queues;
+	unsigned long size;
 
-	nr_io_queues = num_online_cpus();
+	nr_io_queues = num_present_cpus();
 	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
 	if (result < 0)
 		return result;
@@ -1542,20 +1712,15 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 			nvme_release_cmb(dev);
 	}
 
-	size = db_bar_size(dev, nr_io_queues);
-	if (size > 8192) {
-		iounmap(dev->bar);
-		do {
-			dev->bar = ioremap(pci_resource_start(pdev, 0), size);
-			if (dev->bar)
-				break;
-			if (!--nr_io_queues)
-				return -ENOMEM;
-			size = db_bar_size(dev, nr_io_queues);
-		} while (1);
-		dev->dbs = dev->bar + 4096;
-		adminq->q_db = dev->dbs;
-	}
+	do {
+		size = db_bar_size(dev, nr_io_queues);
+		result = nvme_remap_bar(dev, size);
+		if (!result)
+			break;
+		if (!--nr_io_queues)
+			return -ENOMEM;
+	} while (1);
+	adminq->q_db = dev->dbs;
 
 	/* Deregister the admin queue's interrupt */
 	pci_free_irq(pdev, 0, adminq);
@@ -1586,7 +1751,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 	return nvme_create_io_queues(dev);
 }
 
-static void nvme_del_queue_end(struct request *req, int error)
+static void nvme_del_queue_end(struct request *req, blk_status_t error)
 {
 	struct nvme_queue *nvmeq = req->end_io_data;
 
@@ -1594,7 +1759,7 @@ static void nvme_del_queue_end(struct request *req, int error)
 	complete(&nvmeq->dev->ioq_wait);
 }
 
-static void nvme_del_cq_end(struct request *req, int error)
+static void nvme_del_cq_end(struct request *req, blk_status_t error)
 {
 	struct nvme_queue *nvmeq = req->end_io_data;
 
@@ -1799,13 +1964,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 	bool dead = true;
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
-	del_timer_sync(&dev->watchdog_timer);
-
 	mutex_lock(&dev->shutdown_lock);
 	if (pci_is_enabled(pdev)) {
 		u32 csts = readl(dev->bar + NVME_REG_CSTS);
 
-		if (dev->ctrl.state == NVME_CTRL_LIVE)
+		if (dev->ctrl.state == NVME_CTRL_LIVE ||
+		    dev->ctrl.state == NVME_CTRL_RESETTING)
 			nvme_start_freeze(&dev->ctrl);
 		dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
 			pdev->error_state  != pci_channel_io_normal);
@@ -1815,8 +1979,20 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
 	 * Give the controller a chance to complete all entered requests if
 	 * doing a safe shutdown.
 	 */
-	if (!dead && shutdown)
-		nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
+	if (!dead) {
+		if (shutdown)
+			nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
+
+		/*
+		 * If the controller is still alive tell it to stop using the
+		 * host memory buffer.  In theory the shutdown / reset should
+		 * make sure that it doesn't access the host memoery anymore,
+		 * but I'd rather be safe than sorry..
+		 */
+		if (dev->host_mem_descs)
+			nvme_set_host_mem(dev, 0);
+
+	}
 	nvme_stop_queues(&dev->ctrl);
 
 	queues = dev->online_queues - 1;
@@ -1899,11 +2075,12 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
 
 static void nvme_reset_work(struct work_struct *work)
 {
-	struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
+	struct nvme_dev *dev =
+		container_of(work, struct nvme_dev, ctrl.reset_work);
 	bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
 	int result = -ENODEV;
 
-	if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
+	if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
 		goto out;
 
 	/*
@@ -1913,9 +2090,6 @@ static void nvme_reset_work(struct work_struct *work)
 	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
 		nvme_dev_disable(dev, false);
 
-	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
-		goto out;
-
 	result = nvme_pci_enable(dev);
 	if (result)
 		goto out;
@@ -1951,6 +2125,9 @@ static void nvme_reset_work(struct work_struct *work)
 				 "unable to allocate dma for dbbuf\n");
 	}
 
+	if (dev->ctrl.hmpre)
+		nvme_setup_host_mem(dev);
+
 	result = nvme_setup_io_queues(dev);
 	if (result)
 		goto out;
@@ -1964,8 +2141,6 @@ static void nvme_reset_work(struct work_struct *work)
 	if (dev->online_queues > 1)
 		nvme_queue_async_events(&dev->ctrl);
 
-	mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
-
 	/*
 	 * Keep the controller around but remove all namespaces if we don't have
 	 * any working I/O queue.
@@ -2005,17 +2180,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
 	nvme_put_ctrl(&dev->ctrl);
 }
 
-static int nvme_reset(struct nvme_dev *dev)
-{
-	if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
-		return -ENODEV;
-	if (work_busy(&dev->reset_work))
-		return -ENODEV;
-	if (!queue_work(nvme_workq, &dev->reset_work))
-		return -EBUSY;
-	return 0;
-}
-
 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
 {
 	*val = readl(to_nvme_dev(ctrl)->bar + off);
@@ -2034,16 +2198,6 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 	return 0;
 }
 
-static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
-{
-	struct nvme_dev *dev = to_nvme_dev(ctrl);
-	int ret = nvme_reset(dev);
-
-	if (!ret)
-		flush_work(&dev->reset_work);
-	return ret;
-}
-
 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
 	.name			= "pcie",
 	.module			= THIS_MODULE,
@@ -2051,7 +2205,6 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
 	.reg_read32		= nvme_pci_reg_read32,
 	.reg_write32		= nvme_pci_reg_write32,
 	.reg_read64		= nvme_pci_reg_read64,
-	.reset_ctrl		= nvme_pci_reset_ctrl,
 	.free_ctrl		= nvme_pci_free_ctrl,
 	.submit_async_event	= nvme_pci_submit_async_event,
 };
@@ -2063,8 +2216,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
 	if (pci_request_mem_regions(pdev, "nvme"))
 		return -ENODEV;
 
-	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
-	if (!dev->bar)
+	if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
 		goto release;
 
 	return 0;
@@ -2118,10 +2270,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (result)
 		goto free;
 
-	INIT_WORK(&dev->reset_work, nvme_reset_work);
+	INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
 	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
-	setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
-		(unsigned long)dev);
 	mutex_init(&dev->shutdown_lock);
 	init_completion(&dev->ioq_wait);
 
@@ -2136,9 +2286,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (result)
 		goto release_pools;
 
+	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
 	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
-	queue_work(nvme_workq, &dev->reset_work);
+	queue_work(nvme_wq, &dev->ctrl.reset_work);
 	return 0;
 
  release_pools:
@@ -2159,7 +2310,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
 	if (prepare)
 		nvme_dev_disable(dev, false);
 	else
-		nvme_reset(dev);
+		nvme_reset_ctrl(&dev->ctrl);
 }
 
 static void nvme_shutdown(struct pci_dev *pdev)
@@ -2179,6 +2330,7 @@ static void nvme_remove(struct pci_dev *pdev)
 
 	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
 
+	cancel_work_sync(&dev->ctrl.reset_work);
 	pci_set_drvdata(pdev, NULL);
 
 	if (!pci_device_is_present(pdev)) {
@@ -2186,9 +2338,10 @@ static void nvme_remove(struct pci_dev *pdev)
 		nvme_dev_disable(dev, false);
 	}
 
-	flush_work(&dev->reset_work);
+	flush_work(&dev->ctrl.reset_work);
 	nvme_uninit_ctrl(&dev->ctrl);
 	nvme_dev_disable(dev, true);
+	nvme_free_host_mem(dev);
 	nvme_dev_remove_admin(dev);
 	nvme_free_queues(dev, 0);
 	nvme_release_prp_pools(dev);
@@ -2229,7 +2382,7 @@ static int nvme_resume(struct device *dev)
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
-	nvme_reset(ndev);
+	nvme_reset_ctrl(&ndev->ctrl);
 	return 0;
 }
 #endif
@@ -2268,7 +2421,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
 
 	dev_info(dev->ctrl.device, "restart after slot reset\n");
 	pci_restore_state(pdev);
-	nvme_reset(dev);
+	nvme_reset_ctrl(&dev->ctrl);
 	return PCI_ERS_RESULT_RECOVERED;
 }
 
@@ -2324,22 +2477,12 @@ static struct pci_driver nvme_driver = {
 
 static int __init nvme_init(void)
 {
-	int result;
-
-	nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
-	if (!nvme_workq)
-		return -ENOMEM;
-
-	result = pci_register_driver(&nvme_driver);
-	if (result)
-		destroy_workqueue(nvme_workq);
-	return result;
+	return pci_register_driver(&nvme_driver);
 }
 
 static void __exit nvme_exit(void)
 {
 	pci_unregister_driver(&nvme_driver);
-	destroy_workqueue(nvme_workq);
 	_nvme_check_size();
 }
 
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 28bd255..6d4119d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -48,7 +48,7 @@
  */
 #define NVME_RDMA_NR_AEN_COMMANDS      1
 #define NVME_RDMA_AQ_BLKMQ_DEPTH       \
-	(NVMF_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
+	(NVME_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
 
 struct nvme_rdma_device {
 	struct ib_device       *dev;
@@ -80,10 +80,8 @@ struct nvme_rdma_request {
 };
 
 enum nvme_rdma_queue_flags {
-	NVME_RDMA_Q_CONNECTED = (1 << 0),
-	NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
-	NVME_RDMA_Q_DELETING = (1 << 2),
-	NVME_RDMA_Q_LIVE = (1 << 3),
+	NVME_RDMA_Q_LIVE		= 0,
+	NVME_RDMA_Q_DELETING		= 1,
 };
 
 struct nvme_rdma_queue {
@@ -103,9 +101,6 @@ struct nvme_rdma_queue {
 };
 
 struct nvme_rdma_ctrl {
-	/* read and written in the hot path */
-	spinlock_t		lock;
-
 	/* read only in the hot path */
 	struct nvme_rdma_queue	*queues;
 	u32			queue_count;
@@ -113,7 +108,6 @@ struct nvme_rdma_ctrl {
 	/* other member variables */
 	struct blk_mq_tag_set	tag_set;
 	struct work_struct	delete_work;
-	struct work_struct	reset_work;
 	struct work_struct	err_work;
 
 	struct nvme_rdma_qe	async_event_sqe;
@@ -145,8 +139,6 @@ static DEFINE_MUTEX(device_list_mutex);
 static LIST_HEAD(nvme_rdma_ctrl_list);
 static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
 
-static struct workqueue_struct *nvme_rdma_wq;
-
 /*
  * Disabling this option makes small I/O goes faster, but is fundamentally
  * unsafe.  With it turned off we will have to register a global rkey that
@@ -301,10 +293,12 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
 	return ret;
 }
 
-static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
-		struct request *rq, unsigned int queue_idx)
+static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
+		struct request *rq, unsigned int hctx_idx)
 {
+	struct nvme_rdma_ctrl *ctrl = set->driver_data;
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
 	struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
 	struct nvme_rdma_device *dev = queue->device;
 
@@ -315,22 +309,13 @@ static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
 			DMA_TO_DEVICE);
 }
 
-static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
-		struct request *rq, unsigned int hctx_idx)
+static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
+		struct request *rq, unsigned int hctx_idx,
+		unsigned int numa_node)
 {
-	return __nvme_rdma_exit_request(set->driver_data, rq, hctx_idx + 1);
-}
-
-static void nvme_rdma_exit_admin_request(struct blk_mq_tag_set *set,
-		struct request *rq, unsigned int hctx_idx)
-{
-	return __nvme_rdma_exit_request(set->driver_data, rq, 0);
-}
-
-static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
-		struct request *rq, unsigned int queue_idx)
-{
+	struct nvme_rdma_ctrl *ctrl = set->driver_data;
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
 	struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
 	struct nvme_rdma_device *dev = queue->device;
 	struct ib_device *ibdev = dev->dev;
@@ -358,20 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
 	return -ENOMEM;
 }
 
-static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
-		struct request *rq, unsigned int hctx_idx,
-		unsigned int numa_node)
-{
-	return __nvme_rdma_init_request(set->driver_data, rq, hctx_idx + 1);
-}
-
-static int nvme_rdma_init_admin_request(struct blk_mq_tag_set *set,
-		struct request *rq, unsigned int hctx_idx,
-		unsigned int numa_node)
-{
-	return __nvme_rdma_init_request(set->driver_data, rq, 0);
-}
-
 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 		unsigned int hctx_idx)
 {
@@ -469,9 +440,6 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 	struct nvme_rdma_device *dev;
 	struct ib_device *ibdev;
 
-	if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
-		return;
-
 	dev = queue->device;
 	ibdev = dev->dev;
 	rdma_destroy_qp(queue->cm_id);
@@ -483,17 +451,21 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 	nvme_rdma_dev_put(dev);
 }
 
-static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
-		struct nvme_rdma_device *dev)
+static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
 {
-	struct ib_device *ibdev = dev->dev;
+	struct ib_device *ibdev;
 	const int send_wr_factor = 3;			/* MR, SEND, INV */
 	const int cq_factor = send_wr_factor + 1;	/* + RECV */
 	int comp_vector, idx = nvme_rdma_queue_idx(queue);
-
 	int ret;
 
-	queue->device = dev;
+	queue->device = nvme_rdma_find_get_device(queue->cm_id);
+	if (!queue->device) {
+		dev_err(queue->cm_id->device->dev.parent,
+			"no client data found!\n");
+		return -ECONNREFUSED;
+	}
+	ibdev = queue->device->dev;
 
 	/*
 	 * The admin queue is barely used once the controller is live, so don't
@@ -506,12 +478,12 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
 
 
 	/* +1 for ib_stop_cq */
-	queue->ib_cq = ib_alloc_cq(dev->dev, queue,
-				cq_factor * queue->queue_size + 1, comp_vector,
-				IB_POLL_SOFTIRQ);
+	queue->ib_cq = ib_alloc_cq(ibdev, queue,
+				cq_factor * queue->queue_size + 1,
+				comp_vector, IB_POLL_SOFTIRQ);
 	if (IS_ERR(queue->ib_cq)) {
 		ret = PTR_ERR(queue->ib_cq);
-		goto out;
+		goto out_put_dev;
 	}
 
 	ret = nvme_rdma_create_qp(queue, send_wr_factor);
@@ -524,7 +496,6 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
 		ret = -ENOMEM;
 		goto out_destroy_qp;
 	}
-	set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
 
 	return 0;
 
@@ -532,7 +503,8 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
 	ib_destroy_qp(queue->qp);
 out_destroy_ib_cq:
 	ib_free_cq(queue->ib_cq);
-out:
+out_put_dev:
+	nvme_rdma_dev_put(queue->device);
 	return ret;
 }
 
@@ -583,12 +555,10 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
 	}
 
 	clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
-	set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
 
 	return 0;
 
 out_destroy_cm_id:
-	nvme_rdma_destroy_queue_ib(queue);
 	rdma_destroy_id(queue->cm_id);
 	return ret;
 }
@@ -718,11 +688,11 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
 	if (nvmf_should_reconnect(&ctrl->ctrl)) {
 		dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
 			ctrl->ctrl.opts->reconnect_delay);
-		queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
+		queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
 				ctrl->ctrl.opts->reconnect_delay * HZ);
 	} else {
 		dev_info(ctrl->ctrl.device, "Removing controller...\n");
-		queue_work(nvme_rdma_wq, &ctrl->delete_work);
+		queue_work(nvme_wq, &ctrl->delete_work);
 	}
 }
 
@@ -733,7 +703,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 	bool changed;
 	int ret;
 
-	++ctrl->ctrl.opts->nr_reconnects;
+	++ctrl->ctrl.nr_reconnects;
 
 	if (ctrl->queue_count > 1) {
 		nvme_rdma_free_io_queues(ctrl);
@@ -749,40 +719,37 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 	if (ret)
 		goto requeue;
 
-	ret = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+	ret = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH);
 	if (ret)
 		goto requeue;
 
-	blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
-
 	ret = nvmf_connect_admin_queue(&ctrl->ctrl);
 	if (ret)
-		goto stop_admin_q;
+		goto requeue;
 
 	set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
 
 	ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
 	if (ret)
-		goto stop_admin_q;
+		goto requeue;
 
 	nvme_start_keep_alive(&ctrl->ctrl);
 
 	if (ctrl->queue_count > 1) {
 		ret = nvme_rdma_init_io_queues(ctrl);
 		if (ret)
-			goto stop_admin_q;
+			goto requeue;
 
 		ret = nvme_rdma_connect_io_queues(ctrl);
 		if (ret)
-			goto stop_admin_q;
+			goto requeue;
 	}
 
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
 	WARN_ON_ONCE(!changed);
-	ctrl->ctrl.opts->nr_reconnects = 0;
+	ctrl->ctrl.nr_reconnects = 0;
 
 	if (ctrl->queue_count > 1) {
-		nvme_start_queues(&ctrl->ctrl);
 		nvme_queue_scan(&ctrl->ctrl);
 		nvme_queue_async_events(&ctrl->ctrl);
 	}
@@ -791,11 +758,9 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 
 	return;
 
-stop_admin_q:
-	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
 requeue:
 	dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
-			ctrl->ctrl.opts->nr_reconnects);
+			ctrl->ctrl.nr_reconnects);
 	nvme_rdma_reconnect_or_remove(ctrl);
 }
 
@@ -807,10 +772,8 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
 	nvme_stop_keep_alive(&ctrl->ctrl);
 
-	for (i = 0; i < ctrl->queue_count; i++) {
-		clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+	for (i = 0; i < ctrl->queue_count; i++)
 		clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
-	}
 
 	if (ctrl->queue_count > 1)
 		nvme_stop_queues(&ctrl->ctrl);
@@ -823,6 +786,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
 				nvme_cancel_request, &ctrl->ctrl);
 
+	/*
+	 * queues are not a live anymore, so restart the queues to fail fast
+	 * new IO
+	 */
+	blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+	nvme_start_queues(&ctrl->ctrl);
+
 	nvme_rdma_reconnect_or_remove(ctrl);
 }
 
@@ -831,7 +801,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
 		return;
 
-	queue_work(nvme_rdma_wq, &ctrl->err_work);
+	queue_work(nvme_wq, &ctrl->err_work);
 }
 
 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
@@ -1276,21 +1246,11 @@ static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
 
 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
 {
-	struct nvme_rdma_device *dev;
 	int ret;
 
-	dev = nvme_rdma_find_get_device(queue->cm_id);
-	if (!dev) {
-		dev_err(queue->cm_id->device->dev.parent,
-			"no client data found!\n");
-		return -ECONNREFUSED;
-	}
-
-	ret = nvme_rdma_create_queue_ib(queue, dev);
-	if (ret) {
-		nvme_rdma_dev_put(dev);
-		goto out;
-	}
+	ret = nvme_rdma_create_queue_ib(queue);
+	if (ret)
+		return ret;
 
 	ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
 	if (ret) {
@@ -1304,7 +1264,6 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
 
 out_destroy_queue:
 	nvme_rdma_destroy_queue_ib(queue);
-out:
 	return ret;
 }
 
@@ -1332,8 +1291,8 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
 	 * specified by the Fabrics standard.
 	 */
 	if (priv.qid == 0) {
-		priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
-		priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+		priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
+		priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
 	} else {
 		/*
 		 * current interpretation of the fabrics spec
@@ -1381,12 +1340,14 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
 		complete(&queue->cm_done);
 		return 0;
 	case RDMA_CM_EVENT_REJECTED:
+		nvme_rdma_destroy_queue_ib(queue);
 		cm_error = nvme_rdma_conn_rejected(queue, ev);
 		break;
-	case RDMA_CM_EVENT_ADDR_ERROR:
 	case RDMA_CM_EVENT_ROUTE_ERROR:
 	case RDMA_CM_EVENT_CONNECT_ERROR:
 	case RDMA_CM_EVENT_UNREACHABLE:
+		nvme_rdma_destroy_queue_ib(queue);
+	case RDMA_CM_EVENT_ADDR_ERROR:
 		dev_dbg(queue->ctrl->ctrl.device,
 			"CM error event %d\n", ev->event);
 		cm_error = -ECONNRESET;
@@ -1433,22 +1394,32 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
 /*
  * We cannot accept any other command until the Connect command has completed.
  */
-static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
-		struct request *rq)
+static inline blk_status_t
+nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
 {
 	if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
 		struct nvme_command *cmd = nvme_req(rq)->cmd;
 
 		if (!blk_rq_is_passthrough(rq) ||
 		    cmd->common.opcode != nvme_fabrics_command ||
-		    cmd->fabrics.fctype != nvme_fabrics_type_connect)
-			return false;
+		    cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+			/*
+			 * reconnecting state means transport disruption, which
+			 * can take a long time and even might fail permanently,
+			 * so we can't let incoming I/O be requeued forever.
+			 * fail it fast to allow upper layers a chance to
+			 * failover.
+			 */
+			if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
+				return BLK_STS_IOERR;
+			return BLK_STS_RESOURCE; /* try again later */
+		}
 	}
 
-	return true;
+	return 0;
 }
 
-static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 		const struct blk_mq_queue_data *bd)
 {
 	struct nvme_ns *ns = hctx->queue->queuedata;
@@ -1459,27 +1430,29 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 	struct nvme_command *c = sqe->data;
 	bool flush = false;
 	struct ib_device *dev;
-	int ret;
+	blk_status_t ret;
+	int err;
 
 	WARN_ON_ONCE(rq->tag < 0);
 
-	if (!nvme_rdma_queue_is_ready(queue, rq))
-		return BLK_MQ_RQ_QUEUE_BUSY;
+	ret = nvme_rdma_queue_is_ready(queue, rq);
+	if (unlikely(ret))
+		return ret;
 
 	dev = queue->device->dev;
 	ib_dma_sync_single_for_cpu(dev, sqe->dma,
 			sizeof(struct nvme_command), DMA_TO_DEVICE);
 
 	ret = nvme_setup_cmd(ns, rq, c);
-	if (ret != BLK_MQ_RQ_QUEUE_OK)
+	if (ret)
 		return ret;
 
 	blk_mq_start_request(rq);
 
-	ret = nvme_rdma_map_data(queue, rq, c);
-	if (ret < 0) {
+	err = nvme_rdma_map_data(queue, rq, c);
+	if (err < 0) {
 		dev_err(queue->ctrl->ctrl.device,
-			     "Failed to map data (%d)\n", ret);
+			     "Failed to map data (%d)\n", err);
 		nvme_cleanup_cmd(rq);
 		goto err;
 	}
@@ -1489,17 +1462,18 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	if (req_op(rq) == REQ_OP_FLUSH)
 		flush = true;
-	ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
+	err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
 			req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
-	if (ret) {
+	if (err) {
 		nvme_rdma_unmap_data(queue, rq);
 		goto err;
 	}
 
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 err:
-	return (ret == -ENOMEM || ret == -EAGAIN) ?
-		BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+	if (err == -ENOMEM || err == -EAGAIN)
+		return BLK_STS_RESOURCE;
+	return BLK_STS_IOERR;
 }
 
 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
@@ -1509,7 +1483,6 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
 	struct ib_wc wc;
 	int found = 0;
 
-	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
 	while (ib_poll_cq(cq, 1, &wc) > 0) {
 		struct ib_cqe *cqe = wc.wr_cqe;
 
@@ -1546,8 +1519,8 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = {
 static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
 	.queue_rq	= nvme_rdma_queue_rq,
 	.complete	= nvme_rdma_complete_rq,
-	.init_request	= nvme_rdma_init_admin_request,
-	.exit_request	= nvme_rdma_exit_admin_request,
+	.init_request	= nvme_rdma_init_request,
+	.exit_request	= nvme_rdma_exit_request,
 	.reinit_request	= nvme_rdma_reinit_request,
 	.init_hctx	= nvme_rdma_init_admin_hctx,
 	.timeout	= nvme_rdma_timeout,
@@ -1557,7 +1530,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
 {
 	int error;
 
-	error = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+	error = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH);
 	if (error)
 		return error;
 
@@ -1658,7 +1631,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
 		nvme_rdma_free_io_queues(ctrl);
 	}
 
-	if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
+	if (test_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags))
 		nvme_shutdown_ctrl(&ctrl->ctrl);
 
 	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -1695,7 +1668,7 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
 		return -EBUSY;
 
-	if (!queue_work(nvme_rdma_wq, &ctrl->delete_work))
+	if (!queue_work(nvme_wq, &ctrl->delete_work))
 		return -EBUSY;
 
 	return 0;
@@ -1729,8 +1702,8 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
 
 static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
 {
-	struct nvme_rdma_ctrl *ctrl = container_of(work,
-					struct nvme_rdma_ctrl, reset_work);
+	struct nvme_rdma_ctrl *ctrl =
+		container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
 	int ret;
 	bool changed;
 
@@ -1771,22 +1744,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
 del_dead_ctrl:
 	/* Deleting this dead controller... */
 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
-	WARN_ON(!queue_work(nvme_rdma_wq, &ctrl->delete_work));
-}
-
-static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
-{
-	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-
-	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-		return -EBUSY;
-
-	if (!queue_work(nvme_rdma_wq, &ctrl->reset_work))
-		return -EBUSY;
-
-	flush_work(&ctrl->reset_work);
-
-	return 0;
+	WARN_ON(!queue_work(nvme_wq, &ctrl->delete_work));
 }
 
 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -1796,11 +1754,9 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
 	.reg_read32		= nvmf_reg_read32,
 	.reg_read64		= nvmf_reg_read64,
 	.reg_write32		= nvmf_reg_write32,
-	.reset_ctrl		= nvme_rdma_reset_ctrl,
 	.free_ctrl		= nvme_rdma_free_ctrl,
 	.submit_async_event	= nvme_rdma_submit_async_event,
 	.delete_ctrl		= nvme_rdma_del_ctrl,
-	.get_subsysnqn		= nvmf_get_subsysnqn,
 	.get_address		= nvmf_get_address,
 };
 
@@ -1905,8 +1861,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 			nvme_rdma_reconnect_ctrl_work);
 	INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
 	INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
-	INIT_WORK(&ctrl->reset_work, nvme_rdma_reset_ctrl_work);
-	spin_lock_init(&ctrl->lock);
+	INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
 
 	ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
 	ctrl->ctrl.sqsize = opts->queue_size - 1;
@@ -1925,12 +1880,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 	/* sanity check icdoff */
 	if (ctrl->ctrl.icdoff) {
 		dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
+		ret = -EINVAL;
 		goto out_remove_admin_queue;
 	}
 
 	/* sanity check keyed sgls */
 	if (!(ctrl->ctrl.sgls & (1 << 20))) {
 		dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
+		ret = -EINVAL;
 		goto out_remove_admin_queue;
 	}
 
@@ -2019,7 +1976,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
 	}
 	mutex_unlock(&nvme_rdma_ctrl_mutex);
 
-	flush_workqueue(nvme_rdma_wq);
+	flush_workqueue(nvme_wq);
 }
 
 static struct ib_client nvme_rdma_ib_client = {
@@ -2032,13 +1989,9 @@ static int __init nvme_rdma_init_module(void)
 {
 	int ret;
 
-	nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
-	if (!nvme_rdma_wq)
-		return -ENOMEM;
-
 	ret = ib_register_client(&nvme_rdma_ib_client);
 	if (ret)
-		goto err_destroy_wq;
+		return ret;
 
 	ret = nvmf_register_transport(&nvme_rdma_transport);
 	if (ret)
@@ -2048,8 +2001,6 @@ static int __init nvme_rdma_init_module(void)
 
 err_unreg_client:
 	ib_unregister_client(&nvme_rdma_ib_client);
-err_destroy_wq:
-	destroy_workqueue(nvme_rdma_wq);
 	return ret;
 }
 
@@ -2057,7 +2008,6 @@ static void __exit nvme_rdma_cleanup_module(void)
 {
 	nvmf_unregister_transport(&nvme_rdma_transport);
 	ib_unregister_client(&nvme_rdma_ib_client);
-	destroy_workqueue(nvme_rdma_wq);
 }
 
 module_init(nvme_rdma_init_module);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
deleted file mode 100644
index 1f7671e..0000000
--- a/drivers/nvme/host/scsi.c
+++ /dev/null
@@ -1,2460 +0,0 @@
-/*
- * NVM Express device driver
- * Copyright (c) 2011-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- */
-
-/*
- * Refer to the SCSI-NVMe Translation spec for details on how
- * each command is translated.
- */
-
-#include <linux/bio.h>
-#include <linux/bitops.h>
-#include <linux/blkdev.h>
-#include <linux/compat.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/genhd.h>
-#include <linux/idr.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kdev_t.h>
-#include <linux/kthread.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/poison.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <asm/unaligned.h>
-#include <scsi/sg.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_request.h>
-
-#include "nvme.h"
-
-static int sg_version_num = 30534;	/* 2 digits for each component */
-
-/* VPD Page Codes */
-#define VPD_SUPPORTED_PAGES				0x00
-#define VPD_SERIAL_NUMBER				0x80
-#define VPD_DEVICE_IDENTIFIERS				0x83
-#define VPD_EXTENDED_INQUIRY				0x86
-#define VPD_BLOCK_LIMITS				0xB0
-#define VPD_BLOCK_DEV_CHARACTERISTICS			0xB1
-
-/* format unit paramter list offsets */
-#define FORMAT_UNIT_SHORT_PARM_LIST_LEN			4
-#define FORMAT_UNIT_LONG_PARM_LIST_LEN			8
-#define FORMAT_UNIT_PROT_INT_OFFSET			3
-#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET		0
-#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK		0x07
-
-/* Misc. defines */
-#define FIXED_SENSE_DATA				0x70
-#define DESC_FORMAT_SENSE_DATA				0x72
-#define FIXED_SENSE_DATA_ADD_LENGTH			10
-#define LUN_ENTRY_SIZE					8
-#define LUN_DATA_HEADER_SIZE				8
-#define ALL_LUNS_RETURNED				0x02
-#define ALL_WELL_KNOWN_LUNS_RETURNED			0x01
-#define RESTRICTED_LUNS_RETURNED			0x00
-#define DOWNLOAD_SAVE_ACTIVATE				0x05
-#define DOWNLOAD_SAVE_DEFER_ACTIVATE			0x0E
-#define ACTIVATE_DEFERRED_MICROCODE			0x0F
-#define FORMAT_UNIT_IMMED_MASK				0x2
-#define FORMAT_UNIT_IMMED_OFFSET			1
-#define KELVIN_TEMP_FACTOR				273
-#define FIXED_FMT_SENSE_DATA_SIZE			18
-#define DESC_FMT_SENSE_DATA_SIZE			8
-
-/* SCSI/NVMe defines and bit masks */
-#define INQ_STANDARD_INQUIRY_PAGE			0x00
-#define INQ_SUPPORTED_VPD_PAGES_PAGE			0x00
-#define INQ_UNIT_SERIAL_NUMBER_PAGE			0x80
-#define INQ_DEVICE_IDENTIFICATION_PAGE			0x83
-#define INQ_EXTENDED_INQUIRY_DATA_PAGE			0x86
-#define INQ_BDEV_LIMITS_PAGE				0xB0
-#define INQ_BDEV_CHARACTERISTICS_PAGE			0xB1
-#define INQ_SERIAL_NUMBER_LENGTH			0x14
-#define INQ_NUM_SUPPORTED_VPD_PAGES			6
-#define VERSION_SPC_4					0x06
-#define ACA_UNSUPPORTED					0
-#define STANDARD_INQUIRY_LENGTH				36
-#define ADDITIONAL_STD_INQ_LENGTH			31
-#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH		0x3C
-#define RESERVED_FIELD					0
-
-/* Mode Sense/Select defines */
-#define MODE_PAGE_INFO_EXCEP				0x1C
-#define MODE_PAGE_CACHING				0x08
-#define MODE_PAGE_CONTROL				0x0A
-#define MODE_PAGE_POWER_CONDITION			0x1A
-#define MODE_PAGE_RETURN_ALL				0x3F
-#define MODE_PAGE_BLK_DES_LEN				0x08
-#define MODE_PAGE_LLBAA_BLK_DES_LEN			0x10
-#define MODE_PAGE_CACHING_LEN				0x14
-#define MODE_PAGE_CONTROL_LEN				0x0C
-#define MODE_PAGE_POW_CND_LEN				0x28
-#define MODE_PAGE_INF_EXC_LEN				0x0C
-#define MODE_PAGE_ALL_LEN				0x54
-#define MODE_SENSE6_MPH_SIZE				4
-#define MODE_SENSE_PAGE_CONTROL_MASK			0xC0
-#define MODE_SENSE_PAGE_CODE_OFFSET			2
-#define MODE_SENSE_PAGE_CODE_MASK			0x3F
-#define MODE_SENSE_LLBAA_MASK				0x10
-#define MODE_SENSE_LLBAA_SHIFT				4
-#define MODE_SENSE_DBD_MASK				8
-#define MODE_SENSE_DBD_SHIFT				3
-#define MODE_SENSE10_MPH_SIZE				8
-#define MODE_SELECT_CDB_PAGE_FORMAT_MASK		0x10
-#define MODE_SELECT_CDB_SAVE_PAGES_MASK			0x1
-#define MODE_SELECT_6_BD_OFFSET				3
-#define MODE_SELECT_10_BD_OFFSET			6
-#define MODE_SELECT_10_LLBAA_OFFSET			4
-#define MODE_SELECT_10_LLBAA_MASK			1
-#define MODE_SELECT_6_MPH_SIZE				4
-#define MODE_SELECT_10_MPH_SIZE				8
-#define CACHING_MODE_PAGE_WCE_MASK			0x04
-#define MODE_SENSE_BLK_DESC_ENABLED			0
-#define MODE_SENSE_BLK_DESC_COUNT			1
-#define MODE_SELECT_PAGE_CODE_MASK			0x3F
-#define SHORT_DESC_BLOCK				8
-#define LONG_DESC_BLOCK					16
-#define MODE_PAGE_POW_CND_LEN_FIELD			0x26
-#define MODE_PAGE_INF_EXC_LEN_FIELD			0x0A
-#define MODE_PAGE_CACHING_LEN_FIELD			0x12
-#define MODE_PAGE_CONTROL_LEN_FIELD			0x0A
-#define MODE_SENSE_PC_CURRENT_VALUES			0
-
-/* Log Sense defines */
-#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE		0x00
-#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH		0x07
-#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE		0x2F
-#define LOG_PAGE_TEMPERATURE_PAGE			0x0D
-#define LOG_SENSE_CDB_SP_NOT_ENABLED			0
-#define LOG_SENSE_CDB_PC_MASK				0xC0
-#define LOG_SENSE_CDB_PC_SHIFT				6
-#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES		1
-#define LOG_SENSE_CDB_PAGE_CODE_MASK			0x3F
-#define REMAINING_INFO_EXCP_PAGE_LENGTH			0x8
-#define LOG_INFO_EXCP_PAGE_LENGTH			0xC
-#define REMAINING_TEMP_PAGE_LENGTH			0xC
-#define LOG_TEMP_PAGE_LENGTH				0x10
-#define LOG_TEMP_UNKNOWN				0xFF
-#define SUPPORTED_LOG_PAGES_PAGE_LENGTH			0x3
-
-/* Read Capacity defines */
-#define READ_CAP_10_RESP_SIZE				8
-#define READ_CAP_16_RESP_SIZE				32
-
-/* NVMe Namespace and Command Defines */
-#define BYTES_TO_DWORDS					4
-#define NVME_MAX_FIRMWARE_SLOT				7
-
-/* Report LUNs defines */
-#define REPORT_LUNS_FIRST_LUN_OFFSET			8
-
-/* SCSI ADDITIONAL SENSE Codes */
-
-#define SCSI_ASC_NO_SENSE				0x00
-#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT		0x03
-#define SCSI_ASC_LUN_NOT_READY				0x04
-#define SCSI_ASC_WARNING				0x0B
-#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED		0x10
-#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED		0x10
-#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED		0x10
-#define SCSI_ASC_UNRECOVERED_READ_ERROR			0x11
-#define SCSI_ASC_MISCOMPARE_DURING_VERIFY		0x1D
-#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID		0x20
-#define SCSI_ASC_ILLEGAL_COMMAND			0x20
-#define SCSI_ASC_ILLEGAL_BLOCK				0x21
-#define SCSI_ASC_INVALID_CDB				0x24
-#define SCSI_ASC_INVALID_LUN				0x25
-#define SCSI_ASC_INVALID_PARAMETER			0x26
-#define SCSI_ASC_FORMAT_COMMAND_FAILED			0x31
-#define SCSI_ASC_INTERNAL_TARGET_FAILURE		0x44
-
-/* SCSI ADDITIONAL SENSE Code Qualifiers */
-
-#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE			0x00
-#define SCSI_ASCQ_FORMAT_COMMAND_FAILED			0x01
-#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED		0x01
-#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED		0x02
-#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED		0x03
-#define SCSI_ASCQ_FORMAT_IN_PROGRESS			0x04
-#define SCSI_ASCQ_POWER_LOSS_EXPECTED			0x08
-#define SCSI_ASCQ_INVALID_LUN_ID			0x09
-
-/* copied from drivers/usb/gadget/function/storage_common.h */
-static inline u32 get_unaligned_be24(u8 *buf)
-{
-	return 0xffffff & (u32) get_unaligned_be32(buf - 1);
-}
-
-/* Struct to gather data that needs to be extracted from a SCSI CDB.
-   Not conforming to any particular CDB variant, but compatible with all. */
-
-struct nvme_trans_io_cdb {
-	u8 fua;
-	u8 prot_info;
-	u64 lba;
-	u32 xfer_len;
-};
-
-
-/* Internal Helper Functions */
-
-
-/* Copy data to userspace memory */
-
-static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
-								unsigned long n)
-{
-	int i;
-	void *index = from;
-	size_t remaining = n;
-	size_t xfer_len;
-
-	if (hdr->iovec_count > 0) {
-		struct sg_iovec sgl;
-
-		for (i = 0; i < hdr->iovec_count; i++) {
-			if (copy_from_user(&sgl, hdr->dxferp +
-						i * sizeof(struct sg_iovec),
-						sizeof(struct sg_iovec)))
-				return -EFAULT;
-			xfer_len = min(remaining, sgl.iov_len);
-			if (copy_to_user(sgl.iov_base, index, xfer_len))
-				return -EFAULT;
-
-			index += xfer_len;
-			remaining -= xfer_len;
-			if (remaining == 0)
-				break;
-		}
-		return 0;
-	}
-
-	if (copy_to_user(hdr->dxferp, from, n))
-		return -EFAULT;
-	return 0;
-}
-
-/* Copy data from userspace memory */
-
-static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
-								unsigned long n)
-{
-	int i;
-	void *index = to;
-	size_t remaining = n;
-	size_t xfer_len;
-
-	if (hdr->iovec_count > 0) {
-		struct sg_iovec sgl;
-
-		for (i = 0; i < hdr->iovec_count; i++) {
-			if (copy_from_user(&sgl, hdr->dxferp +
-						i * sizeof(struct sg_iovec),
-						sizeof(struct sg_iovec)))
-				return -EFAULT;
-			xfer_len = min(remaining, sgl.iov_len);
-			if (copy_from_user(index, sgl.iov_base, xfer_len))
-				return -EFAULT;
-			index += xfer_len;
-			remaining -= xfer_len;
-			if (remaining == 0)
-				break;
-		}
-		return 0;
-	}
-
-	if (copy_from_user(to, hdr->dxferp, n))
-		return -EFAULT;
-	return 0;
-}
-
-/* Status/Sense Buffer Writeback */
-
-static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
-				 u8 asc, u8 ascq)
-{
-	u8 xfer_len;
-	u8 resp[DESC_FMT_SENSE_DATA_SIZE];
-
-	if (scsi_status_is_good(status)) {
-		hdr->status = SAM_STAT_GOOD;
-		hdr->masked_status = GOOD;
-		hdr->host_status = DID_OK;
-		hdr->driver_status = DRIVER_OK;
-		hdr->sb_len_wr = 0;
-	} else {
-		hdr->status = status;
-		hdr->masked_status = status >> 1;
-		hdr->host_status = DID_OK;
-		hdr->driver_status = DRIVER_OK;
-
-		memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
-		resp[0] = DESC_FORMAT_SENSE_DATA;
-		resp[1] = sense_key;
-		resp[2] = asc;
-		resp[3] = ascq;
-
-		xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
-		hdr->sb_len_wr = xfer_len;
-		if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
-			return -EFAULT;
-	}
-
-	return 0;
-}
-
-/*
- * Take a status code from a lowlevel routine, and if it was a positive NVMe
- * error code update the sense data based on it.  In either case the passed
- * in value is returned again, unless an -EFAULT from copy_to_user overrides
- * it.
- */
-static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
-{
-	u8 status, sense_key, asc, ascq;
-	int res;
-
-	/* For non-nvme (Linux) errors, simply return the error code */
-	if (nvme_sc < 0)
-		return nvme_sc;
-
-	/* Mask DNR, More, and reserved fields */
-	switch (nvme_sc & 0x7FF) {
-	/* Generic Command Status */
-	case NVME_SC_SUCCESS:
-		status = SAM_STAT_GOOD;
-		sense_key = NO_SENSE;
-		asc = SCSI_ASC_NO_SENSE;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_INVALID_OPCODE:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = ILLEGAL_REQUEST;
-		asc = SCSI_ASC_ILLEGAL_COMMAND;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_INVALID_FIELD:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = ILLEGAL_REQUEST;
-		asc = SCSI_ASC_INVALID_CDB;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_DATA_XFER_ERROR:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = MEDIUM_ERROR;
-		asc = SCSI_ASC_NO_SENSE;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_POWER_LOSS:
-		status = SAM_STAT_TASK_ABORTED;
-		sense_key = ABORTED_COMMAND;
-		asc = SCSI_ASC_WARNING;
-		ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
-		break;
-	case NVME_SC_INTERNAL:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = HARDWARE_ERROR;
-		asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_ABORT_REQ:
-		status = SAM_STAT_TASK_ABORTED;
-		sense_key = ABORTED_COMMAND;
-		asc = SCSI_ASC_NO_SENSE;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_ABORT_QUEUE:
-		status = SAM_STAT_TASK_ABORTED;
-		sense_key = ABORTED_COMMAND;
-		asc = SCSI_ASC_NO_SENSE;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_FUSED_FAIL:
-		status = SAM_STAT_TASK_ABORTED;
-		sense_key = ABORTED_COMMAND;
-		asc = SCSI_ASC_NO_SENSE;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_FUSED_MISSING:
-		status = SAM_STAT_TASK_ABORTED;
-		sense_key = ABORTED_COMMAND;
-		asc = SCSI_ASC_NO_SENSE;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_INVALID_NS:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = ILLEGAL_REQUEST;
-		asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
-		ascq = SCSI_ASCQ_INVALID_LUN_ID;
-		break;
-	case NVME_SC_LBA_RANGE:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = ILLEGAL_REQUEST;
-		asc = SCSI_ASC_ILLEGAL_BLOCK;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_CAP_EXCEEDED:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = MEDIUM_ERROR;
-		asc = SCSI_ASC_NO_SENSE;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_NS_NOT_READY:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = NOT_READY;
-		asc = SCSI_ASC_LUN_NOT_READY;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-
-	/* Command Specific Status */
-	case NVME_SC_INVALID_FORMAT:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = ILLEGAL_REQUEST;
-		asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
-		ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
-		break;
-	case NVME_SC_BAD_ATTRIBUTES:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = ILLEGAL_REQUEST;
-		asc = SCSI_ASC_INVALID_CDB;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-
-	/* Media Errors */
-	case NVME_SC_WRITE_FAULT:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = MEDIUM_ERROR;
-		asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_READ_ERROR:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = MEDIUM_ERROR;
-		asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_GUARD_CHECK:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = MEDIUM_ERROR;
-		asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
-		ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
-		break;
-	case NVME_SC_APPTAG_CHECK:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = MEDIUM_ERROR;
-		asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
-		ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
-		break;
-	case NVME_SC_REFTAG_CHECK:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = MEDIUM_ERROR;
-		asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
-		ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
-		break;
-	case NVME_SC_COMPARE_FAILED:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = MISCOMPARE;
-		asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	case NVME_SC_ACCESS_DENIED:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = ILLEGAL_REQUEST;
-		asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
-		ascq = SCSI_ASCQ_INVALID_LUN_ID;
-		break;
-
-	/* Unspecified/Default */
-	case NVME_SC_CMDID_CONFLICT:
-	case NVME_SC_CMD_SEQ_ERROR:
-	case NVME_SC_CQ_INVALID:
-	case NVME_SC_QID_INVALID:
-	case NVME_SC_QUEUE_SIZE:
-	case NVME_SC_ABORT_LIMIT:
-	case NVME_SC_ABORT_MISSING:
-	case NVME_SC_ASYNC_LIMIT:
-	case NVME_SC_FIRMWARE_SLOT:
-	case NVME_SC_FIRMWARE_IMAGE:
-	case NVME_SC_INVALID_VECTOR:
-	case NVME_SC_INVALID_LOG_PAGE:
-	default:
-		status = SAM_STAT_CHECK_CONDITION;
-		sense_key = ILLEGAL_REQUEST;
-		asc = SCSI_ASC_NO_SENSE;
-		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		break;
-	}
-
-	res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
-	return res ? res : nvme_sc;
-}
-
-/* INQUIRY Helper Functions */
-
-static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr, u8 *inq_response,
-					int alloc_len)
-{
-	struct nvme_ctrl *ctrl = ns->ctrl;
-	struct nvme_id_ns *id_ns;
-	int res;
-	int nvme_sc;
-	int xfer_len;
-	u8 resp_data_format = 0x02;
-	u8 protect;
-	u8 cmdque = 0x01 << 1;
-	u8 fw_offset = sizeof(ctrl->firmware_rev);
-
-	/* nvme ns identify - use DPS value for PROTECT field */
-	nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		return res;
-
-	if (id_ns->dps)
-		protect = 0x01;
-	else
-		protect = 0;
-	kfree(id_ns);
-
-	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
-	inq_response[2] = VERSION_SPC_4;
-	inq_response[3] = resp_data_format;	/*normaca=0 | hisup=0 */
-	inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
-	inq_response[5] = protect;	/* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
-	inq_response[7] = cmdque;	/* wbus16=0 | sync=0 | vs=0 */
-	strncpy(&inq_response[8], "NVMe    ", 8);
-	strncpy(&inq_response[16], ctrl->model, 16);
-
-	while (ctrl->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
-		fw_offset--;
-	fw_offset -= 4;
-	strncpy(&inq_response[32], ctrl->firmware_rev + fw_offset, 4);
-
-	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
-	return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
-}
-
-static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr, u8 *inq_response,
-					int alloc_len)
-{
-	int xfer_len;
-
-	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
-	inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE;   /* Page Code */
-	inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES;    /* Page Length */
-	inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
-	inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
-	inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
-	inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
-	inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
-	inq_response[9] = INQ_BDEV_LIMITS_PAGE;
-
-	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
-	return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
-}
-
-static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr, u8 *inq_response,
-					int alloc_len)
-{
-	int xfer_len;
-
-	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
-	inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
-	inq_response[3] = INQ_SERIAL_NUMBER_LENGTH;    /* Page Length */
-	strncpy(&inq_response[4], ns->ctrl->serial, INQ_SERIAL_NUMBER_LENGTH);
-
-	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
-	return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
-}
-
-static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-		u8 *inq_response, int alloc_len)
-{
-	struct nvme_id_ns *id_ns;
-	int nvme_sc, res;
-	size_t len;
-	void *eui;
-
-	nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		return res;
-
-	eui = id_ns->eui64;
-	len = sizeof(id_ns->eui64);
-
-	if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
-		if (bitmap_empty(eui, len * 8)) {
-			eui = id_ns->nguid;
-			len = sizeof(id_ns->nguid);
-		}
-	}
-
-	if (bitmap_empty(eui, len * 8)) {
-		res = -EOPNOTSUPP;
-		goto out_free_id;
-	}
-
-	memset(inq_response, 0, alloc_len);
-	inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
-	inq_response[3] = 4 + len; /* Page Length */
-
-	/* Designation Descriptor start */
-	inq_response[4] = 0x01;	/* Proto ID=0h | Code set=1h */
-	inq_response[5] = 0x02;	/* PIV=0b | Asso=00b | Designator Type=2h */
-	inq_response[6] = 0x00;	/* Rsvd */
-	inq_response[7] = len;	/* Designator Length */
-	memcpy(&inq_response[8], eui, len);
-
-	res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
-out_free_id:
-	kfree(id_ns);
-	return res;
-}
-
-static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
-		struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len)
-{
-	struct nvme_ctrl *ctrl = ns->ctrl;
-	struct nvme_id_ctrl *id_ctrl;
-	int nvme_sc, res;
-
-	if (alloc_len < 72) {
-		return nvme_trans_completion(hdr,
-				SAM_STAT_CHECK_CONDITION,
-				ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-	}
-
-	nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		return res;
-
-	memset(inq_response, 0, alloc_len);
-	inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
-	inq_response[3] = 0x48;	/* Page Length */
-
-	/* Designation Descriptor start */
-	inq_response[4] = 0x03;	/* Proto ID=0h | Code set=3h */
-	inq_response[5] = 0x08;	/* PIV=0b | Asso=00b | Designator Type=8h */
-	inq_response[6] = 0x00;	/* Rsvd */
-	inq_response[7] = 0x44;	/* Designator Length */
-
-	sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid));
-	memcpy(&inq_response[12], ctrl->model, sizeof(ctrl->model));
-	sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id));
-	memcpy(&inq_response[56], ctrl->serial, sizeof(ctrl->serial));
-
-	res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
-	kfree(id_ctrl);
-	return res;
-}
-
-static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					u8 *resp, int alloc_len)
-{
-	int res;
-
-	if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
-		res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
-		if (res != -EOPNOTSUPP)
-			return res;
-	}
-
-	return nvme_fill_device_id_scsi_string(ns, hdr, resp, alloc_len);
-}
-
-static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					int alloc_len)
-{
-	u8 *inq_response;
-	int res;
-	int nvme_sc;
-	struct nvme_ctrl *ctrl = ns->ctrl;
-	struct nvme_id_ctrl *id_ctrl;
-	struct nvme_id_ns *id_ns;
-	int xfer_len;
-	u8 microcode = 0x80;
-	u8 spt;
-	u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
-	u8 grd_chk, app_chk, ref_chk, protect;
-	u8 uask_sup = 0x20;
-	u8 v_sup;
-	u8 luiclr = 0x01;
-
-	inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
-	if (inq_response == NULL)
-		return -ENOMEM;
-
-	nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		goto out_free_inq;
-
-	spt = spt_lut[id_ns->dpc & 0x07] << 3;
-	if (id_ns->dps)
-		protect = 0x01;
-	else
-		protect = 0;
-	kfree(id_ns);
-
-	grd_chk = protect << 2;
-	app_chk = protect << 1;
-	ref_chk = protect;
-
-	nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		goto out_free_inq;
-
-	v_sup = id_ctrl->vwc;
-	kfree(id_ctrl);
-
-	memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
-	inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE;    /* Page Code */
-	inq_response[2] = 0x00;    /* Page Length MSB */
-	inq_response[3] = 0x3C;    /* Page Length LSB */
-	inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
-	inq_response[5] = uask_sup;
-	inq_response[6] = v_sup;
-	inq_response[7] = luiclr;
-	inq_response[8] = 0;
-	inq_response[9] = 0;
-
-	xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
-	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
-
- out_free_inq:
-	kfree(inq_response);
-	return res;
-}
-
-static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					u8 *inq_response, int alloc_len)
-{
-	__be32 max_sectors = cpu_to_be32(
-		nvme_block_nr(ns, queue_max_hw_sectors(ns->queue)));
-	__be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
-	__be32 discard_desc_count = cpu_to_be32(0x100);
-
-	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
-	inq_response[1] = VPD_BLOCK_LIMITS;
-	inq_response[3] = 0x3c; /* Page Length */
-	memcpy(&inq_response[8], &max_sectors, sizeof(u32));
-	memcpy(&inq_response[20], &max_discard, sizeof(u32));
-
-	if (max_discard)
-		memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
-
-	return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
-}
-
-static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					int alloc_len)
-{
-	u8 *inq_response;
-	int res;
-	int xfer_len;
-
-	inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
-	if (inq_response == NULL) {
-		res = -ENOMEM;
-		goto out_mem;
-	}
-
-	inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE;    /* Page Code */
-	inq_response[2] = 0x00;    /* Page Length MSB */
-	inq_response[3] = 0x3C;    /* Page Length LSB */
-	inq_response[4] = 0x00;    /* Medium Rotation Rate MSB */
-	inq_response[5] = 0x01;    /* Medium Rotation Rate LSB */
-	inq_response[6] = 0x00;    /* Form Factor */
-
-	xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
-	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
-
-	kfree(inq_response);
- out_mem:
-	return res;
-}
-
-/* LOG SENSE Helper Functions */
-
-static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					int alloc_len)
-{
-	int res;
-	int xfer_len;
-	u8 *log_response;
-
-	log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
-	if (log_response == NULL) {
-		res = -ENOMEM;
-		goto out_mem;
-	}
-
-	log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
-	/* Subpage=0x00, Page Length MSB=0 */
-	log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
-	log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
-	log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
-	log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
-
-	xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
-	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
-
-	kfree(log_response);
- out_mem:
-	return res;
-}
-
-static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr, int alloc_len)
-{
-	int res;
-	int xfer_len;
-	u8 *log_response;
-	struct nvme_smart_log *smart_log;
-	u8 temp_c;
-	u16 temp_k;
-
-	log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
-	if (log_response == NULL)
-		return -ENOMEM;
-
-	res = nvme_get_log_page(ns->ctrl, &smart_log);
-	if (res < 0)
-		goto out_free_response;
-
-	if (res != NVME_SC_SUCCESS) {
-		temp_c = LOG_TEMP_UNKNOWN;
-	} else {
-		temp_k = (smart_log->temperature[1] << 8) +
-				(smart_log->temperature[0]);
-		temp_c = temp_k - KELVIN_TEMP_FACTOR;
-	}
-	kfree(smart_log);
-
-	log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
-	/* Subpage=0x00, Page Length MSB=0 */
-	log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
-	/* Informational Exceptions Log Parameter 1 Start */
-	/* Parameter Code=0x0000 bytes 4,5 */
-	log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
-	log_response[7] = 0x04; /* PARAMETER LENGTH */
-	/* Add sense Code and qualifier = 0x00 each */
-	/* Use Temperature from NVMe Get Log Page, convert to C from K */
-	log_response[10] = temp_c;
-
-	xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
-	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
-
- out_free_response:
-	kfree(log_response);
-	return res;
-}
-
-static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					int alloc_len)
-{
-	int res;
-	int xfer_len;
-	u8 *log_response;
-	struct nvme_smart_log *smart_log;
-	u32 feature_resp;
-	u8 temp_c_cur, temp_c_thresh;
-	u16 temp_k;
-
-	log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
-	if (log_response == NULL)
-		return -ENOMEM;
-
-	res = nvme_get_log_page(ns->ctrl, &smart_log);
-	if (res < 0)
-		goto out_free_response;
-
-	if (res != NVME_SC_SUCCESS) {
-		temp_c_cur = LOG_TEMP_UNKNOWN;
-	} else {
-		temp_k = (smart_log->temperature[1] << 8) +
-				(smart_log->temperature[0]);
-		temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
-	}
-	kfree(smart_log);
-
-	/* Get Features for Temp Threshold */
-	res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, NULL, 0,
-								&feature_resp);
-	if (res != NVME_SC_SUCCESS)
-		temp_c_thresh = LOG_TEMP_UNKNOWN;
-	else
-		temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
-
-	log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
-	/* Subpage=0x00, Page Length MSB=0 */
-	log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
-	/* Temperature Log Parameter 1 (Temperature) Start */
-	/* Parameter Code = 0x0000 */
-	log_response[6] = 0x01;		/* Format and Linking = 01b */
-	log_response[7] = 0x02;		/* Parameter Length */
-	/* Use Temperature from NVMe Get Log Page, convert to C from K */
-	log_response[9] = temp_c_cur;
-	/* Temperature Log Parameter 2 (Reference Temperature) Start */
-	log_response[11] = 0x01;	/* Parameter Code = 0x0001 */
-	log_response[12] = 0x01;	/* Format and Linking = 01b */
-	log_response[13] = 0x02;	/* Parameter Length */
-	/* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
-	log_response[15] = temp_c_thresh;
-
-	xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
-	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
-
- out_free_response:
-	kfree(log_response);
-	return res;
-}
-
-/* MODE SENSE Helper Functions */
-
-static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
-					u16 mode_data_length, u16 blk_desc_len)
-{
-	/* Quick check to make sure I don't stomp on my own memory... */
-	if ((cdb10 && len < 8) || (!cdb10 && len < 4))
-		return -EINVAL;
-
-	if (cdb10) {
-		resp[0] = (mode_data_length & 0xFF00) >> 8;
-		resp[1] = (mode_data_length & 0x00FF);
-		resp[3] = 0x10 /* DPOFUA */;
-		resp[4] = llbaa;
-		resp[5] = RESERVED_FIELD;
-		resp[6] = (blk_desc_len & 0xFF00) >> 8;
-		resp[7] = (blk_desc_len & 0x00FF);
-	} else {
-		resp[0] = (mode_data_length & 0x00FF);
-		resp[2] = 0x10 /* DPOFUA */;
-		resp[3] = (blk_desc_len & 0x00FF);
-	}
-
-	return 0;
-}
-
-static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-				    u8 *resp, int len, u8 llbaa)
-{
-	int res;
-	int nvme_sc;
-	struct nvme_id_ns *id_ns;
-	u8 flbas;
-	u32 lba_length;
-
-	if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
-		return -EINVAL;
-	else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
-		return -EINVAL;
-
-	nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		return res;
-
-	flbas = (id_ns->flbas) & 0x0F;
-	lba_length = (1 << (id_ns->lbaf[flbas].ds));
-
-	if (llbaa == 0) {
-		__be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
-		/* Byte 4 is reserved */
-		__be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
-
-		memcpy(resp, &tmp_cap, sizeof(u32));
-		memcpy(&resp[4], &tmp_len, sizeof(u32));
-	} else {
-		__be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
-		__be32 tmp_len = cpu_to_be32(lba_length);
-
-		memcpy(resp, &tmp_cap, sizeof(u64));
-		/* Bytes 8, 9, 10, 11 are reserved */
-		memcpy(&resp[12], &tmp_len, sizeof(u32));
-	}
-
-	kfree(id_ns);
-	return res;
-}
-
-static int nvme_trans_fill_control_page(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr, u8 *resp,
-					int len)
-{
-	if (len < MODE_PAGE_CONTROL_LEN)
-		return -EINVAL;
-
-	resp[0] = MODE_PAGE_CONTROL;
-	resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
-	resp[2] = 0x0E;		/* TST=000b, TMF_ONLY=0, DPICZ=1,
-				 * D_SENSE=1, GLTSD=1, RLEC=0 */
-	resp[3] = 0x12;		/* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
-	/* Byte 4:  VS=0, RAC=0, UA_INT=0, SWP=0 */
-	resp[5] = 0x40;		/* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
-	/* resp[6] and [7] are obsolete, thus zero */
-	resp[8] = 0xFF;		/* Busy timeout period = 0xffff */
-	resp[9] = 0xFF;
-	/* Bytes 10,11: Extended selftest completion time = 0x0000 */
-
-	return 0;
-}
-
-static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr,
-					u8 *resp, int len)
-{
-	int res = 0;
-	int nvme_sc;
-	u32 feature_resp;
-	u8 vwc;
-
-	if (len < MODE_PAGE_CACHING_LEN)
-		return -EINVAL;
-
-	nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, NULL, 0,
-								&feature_resp);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		return res;
-
-	vwc = feature_resp & 0x00000001;
-
-	resp[0] = MODE_PAGE_CACHING;
-	resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
-	resp[2] = vwc << 2;
-	return 0;
-}
-
-static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr, u8 *resp,
-					int len)
-{
-	if (len < MODE_PAGE_POW_CND_LEN)
-		return -EINVAL;
-
-	resp[0] = MODE_PAGE_POWER_CONDITION;
-	resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
-	/* All other bytes are zero */
-
-	return 0;
-}
-
-static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr, u8 *resp,
-					int len)
-{
-	if (len < MODE_PAGE_INF_EXC_LEN)
-		return -EINVAL;
-
-	resp[0] = MODE_PAGE_INFO_EXCEP;
-	resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
-	resp[2] = 0x88;
-	/* All other bytes are zero */
-
-	return 0;
-}
-
-static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-				     u8 *resp, int len)
-{
-	int res;
-	u16 mode_pages_offset_1 = 0;
-	u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
-
-	mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
-	mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
-	mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
-
-	res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
-					MODE_PAGE_CACHING_LEN);
-	if (res)
-		return res;
-	res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
-					MODE_PAGE_CONTROL_LEN);
-	if (res)
-		return res;
-	res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
-					MODE_PAGE_POW_CND_LEN);
-	if (res)
-		return res;
-	return nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
-					MODE_PAGE_INF_EXC_LEN);
-}
-
-static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
-{
-	if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
-		/* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
-		return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
-	} else {
-		return 0;
-	}
-}
-
-static int nvme_trans_mode_page_create(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr, u8 *cmd,
-					u16 alloc_len, u8 cdb10,
-					int (*mode_page_fill_func)
-					(struct nvme_ns *,
-					struct sg_io_hdr *hdr, u8 *, int),
-					u16 mode_pages_tot_len)
-{
-	int res;
-	int xfer_len;
-	u8 *response;
-	u8 dbd, llbaa;
-	u16 resp_size;
-	int mph_size;
-	u16 mode_pages_offset_1;
-	u16 blk_desc_len, blk_desc_offset, mode_data_length;
-
-	dbd = (cmd[1] & MODE_SENSE_DBD_MASK) >> MODE_SENSE_DBD_SHIFT;
-	llbaa = (cmd[1] & MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT;
-	mph_size = cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE;
-
-	blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
-
-	resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
-	/* Refer spc4r34 Table 440 for calculation of Mode data Length field */
-	mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
-
-	blk_desc_offset = mph_size;
-	mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
-
-	response = kzalloc(resp_size, GFP_KERNEL);
-	if (response == NULL) {
-		res = -ENOMEM;
-		goto out_mem;
-	}
-
-	res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
-					llbaa, mode_data_length, blk_desc_len);
-	if (res)
-		goto out_free;
-	if (blk_desc_len > 0) {
-		res = nvme_trans_fill_blk_desc(ns, hdr,
-					       &response[blk_desc_offset],
-					       blk_desc_len, llbaa);
-		if (res)
-			goto out_free;
-	}
-	res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
-					mode_pages_tot_len);
-	if (res)
-		goto out_free;
-
-	xfer_len = min(alloc_len, resp_size);
-	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
-
- out_free:
-	kfree(response);
- out_mem:
-	return res;
-}
-
-/* Read Capacity Helper Functions */
-
-static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
-								u8 cdb16)
-{
-	u8 flbas;
-	u32 lba_length;
-	u64 rlba;
-	u8 prot_en;
-	u8 p_type_lut[4] = {0, 0, 1, 2};
-	__be64 tmp_rlba;
-	__be32 tmp_rlba_32;
-	__be32 tmp_len;
-
-	flbas = (id_ns->flbas) & 0x0F;
-	lba_length = (1 << (id_ns->lbaf[flbas].ds));
-	rlba = le64_to_cpup(&id_ns->nsze) - 1;
-	(id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
-
-	if (!cdb16) {
-		if (rlba > 0xFFFFFFFF)
-			rlba = 0xFFFFFFFF;
-		tmp_rlba_32 = cpu_to_be32(rlba);
-		tmp_len = cpu_to_be32(lba_length);
-		memcpy(response, &tmp_rlba_32, sizeof(u32));
-		memcpy(&response[4], &tmp_len, sizeof(u32));
-	} else {
-		tmp_rlba = cpu_to_be64(rlba);
-		tmp_len = cpu_to_be32(lba_length);
-		memcpy(response, &tmp_rlba, sizeof(u64));
-		memcpy(&response[8], &tmp_len, sizeof(u32));
-		response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
-		/* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
-		/* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
-		/* Bytes 16-31 - Reserved */
-	}
-}
-
-/* Start Stop Unit Helper Functions */
-
-static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					u8 buffer_id)
-{
-	struct nvme_command c;
-	int nvme_sc;
-
-	memset(&c, 0, sizeof(c));
-	c.common.opcode = nvme_admin_activate_fw;
-	c.common.cdw10[0] = cpu_to_le32(buffer_id | NVME_FWACT_REPL_ACTV);
-
-	nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
-	return nvme_trans_status_code(hdr, nvme_sc);
-}
-
-static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					u8 opcode, u32 tot_len, u32 offset,
-					u8 buffer_id)
-{
-	int nvme_sc;
-	struct nvme_command c;
-
-	if (hdr->iovec_count > 0) {
-		/* Assuming SGL is not allowed for this command */
-		return nvme_trans_completion(hdr,
-					SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST,
-					SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-	}
-
-	memset(&c, 0, sizeof(c));
-	c.common.opcode = nvme_admin_download_fw;
-	c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
-	c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
-
-	nvme_sc = nvme_submit_user_cmd(ns->ctrl->admin_q, &c,
-			hdr->dxferp, tot_len, NULL, 0);
-	return nvme_trans_status_code(hdr, nvme_sc);
-}
-
-/* Mode Select Helper Functions */
-
-static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
-						u16 *bd_len, u8 *llbaa)
-{
-	if (cdb10) {
-		/* 10 Byte CDB */
-		*bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
-			parm_list[MODE_SELECT_10_BD_OFFSET + 1];
-		*llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
-				MODE_SELECT_10_LLBAA_MASK;
-	} else {
-		/* 6 Byte CDB */
-		*bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
-	}
-}
-
-static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
-					u16 idx, u16 bd_len, u8 llbaa)
-{
-	/* Store block descriptor info if a FORMAT UNIT comes later */
-	/* TODO Saving 1st BD info; what to do if multiple BD received? */
-	if (llbaa == 0) {
-		/* Standard Block Descriptor - spc4r34 7.5.5.1 */
-		ns->mode_select_num_blocks =
-				(parm_list[idx + 1] << 16) +
-				(parm_list[idx + 2] << 8) +
-				(parm_list[idx + 3]);
-
-		ns->mode_select_block_len =
-				(parm_list[idx + 5] << 16) +
-				(parm_list[idx + 6] << 8) +
-				(parm_list[idx + 7]);
-	} else {
-		/* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
-		ns->mode_select_num_blocks =
-				(((u64)parm_list[idx + 0]) << 56) +
-				(((u64)parm_list[idx + 1]) << 48) +
-				(((u64)parm_list[idx + 2]) << 40) +
-				(((u64)parm_list[idx + 3]) << 32) +
-				(((u64)parm_list[idx + 4]) << 24) +
-				(((u64)parm_list[idx + 5]) << 16) +
-				(((u64)parm_list[idx + 6]) << 8) +
-				((u64)parm_list[idx + 7]);
-
-		ns->mode_select_block_len =
-				(parm_list[idx + 12] << 24) +
-				(parm_list[idx + 13] << 16) +
-				(parm_list[idx + 14] << 8) +
-				(parm_list[idx + 15]);
-	}
-}
-
-static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					u8 *mode_page, u8 page_code)
-{
-	int res = 0;
-	int nvme_sc;
-	unsigned dword11;
-
-	switch (page_code) {
-	case MODE_PAGE_CACHING:
-		dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
-		nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
-					    dword11, NULL, 0, NULL);
-		res = nvme_trans_status_code(hdr, nvme_sc);
-		break;
-	case MODE_PAGE_CONTROL:
-		break;
-	case MODE_PAGE_POWER_CONDITION:
-		/* Verify the OS is not trying to set timers */
-		if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
-			res = nvme_trans_completion(hdr,
-						SAM_STAT_CHECK_CONDITION,
-						ILLEGAL_REQUEST,
-						SCSI_ASC_INVALID_PARAMETER,
-						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-			break;
-		}
-		break;
-	default:
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		break;
-	}
-
-	return res;
-}
-
-static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-					u8 *cmd, u16 parm_list_len, u8 pf,
-					u8 sp, u8 cdb10)
-{
-	int res;
-	u8 *parm_list;
-	u16 bd_len;
-	u8 llbaa = 0;
-	u16 index, saved_index;
-	u8 page_code;
-	u16 mp_size;
-
-	/* Get parm list from data-in/out buffer */
-	parm_list = kmalloc(parm_list_len, GFP_KERNEL);
-	if (parm_list == NULL) {
-		res = -ENOMEM;
-		goto out;
-	}
-
-	res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
-	if (res)
-		goto out_mem;
-
-	nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
-	index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
-
-	if (bd_len != 0) {
-		/* Block Descriptors present, parse */
-		nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
-		index += bd_len;
-	}
-	saved_index = index;
-
-	/* Multiple mode pages may be present; iterate through all */
-	/* In 1st Iteration, don't do NVME Command, only check for CDB errors */
-	do {
-		page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
-		mp_size = parm_list[index + 1] + 2;
-		if ((page_code != MODE_PAGE_CACHING) &&
-		    (page_code != MODE_PAGE_CONTROL) &&
-		    (page_code != MODE_PAGE_POWER_CONDITION)) {
-			res = nvme_trans_completion(hdr,
-						SAM_STAT_CHECK_CONDITION,
-						ILLEGAL_REQUEST,
-						SCSI_ASC_INVALID_CDB,
-						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-			goto out_mem;
-		}
-		index += mp_size;
-	} while (index < parm_list_len);
-
-	/* In 2nd Iteration, do the NVME Commands */
-	index = saved_index;
-	do {
-		page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
-		mp_size = parm_list[index + 1] + 2;
-		res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
-								page_code);
-		if (res)
-			break;
-		index += mp_size;
-	} while (index < parm_list_len);
-
- out_mem:
-	kfree(parm_list);
- out:
-	return res;
-}
-
-/* Format Unit Helper Functions */
-
-static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
-					     struct sg_io_hdr *hdr)
-{
-	int res = 0;
-	int nvme_sc;
-	u8 flbas;
-
-	/*
-	 * SCSI Expects a MODE SELECT would have been issued prior to
-	 * a FORMAT UNIT, and the block size and number would be used
-	 * from the block descriptor in it. If a MODE SELECT had not
-	 * been issued, FORMAT shall use the current values for both.
-	 */
-
-	if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
-		struct nvme_id_ns *id_ns;
-
-		nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
-		res = nvme_trans_status_code(hdr, nvme_sc);
-		if (res)
-			return res;
-
-		if (ns->mode_select_num_blocks == 0)
-			ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
-		if (ns->mode_select_block_len == 0) {
-			flbas = (id_ns->flbas) & 0x0F;
-			ns->mode_select_block_len =
-						(1 << (id_ns->lbaf[flbas].ds));
-		}
-
-		kfree(id_ns);
-	}
-
-	return 0;
-}
-
-static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
-					u8 format_prot_info, u8 *nvme_pf_code)
-{
-	int res;
-	u8 *parm_list;
-	u8 pf_usage, pf_code;
-
-	parm_list = kmalloc(len, GFP_KERNEL);
-	if (parm_list == NULL) {
-		res = -ENOMEM;
-		goto out;
-	}
-	res = nvme_trans_copy_from_user(hdr, parm_list, len);
-	if (res)
-		goto out_mem;
-
-	if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
-				FORMAT_UNIT_IMMED_MASK) != 0) {
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		goto out_mem;
-	}
-
-	if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
-	    (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		goto out_mem;
-	}
-	pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
-			FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
-	pf_code = (pf_usage << 2) | format_prot_info;
-	switch (pf_code) {
-	case 0:
-		*nvme_pf_code = 0;
-		break;
-	case 2:
-		*nvme_pf_code = 1;
-		break;
-	case 3:
-		*nvme_pf_code = 2;
-		break;
-	case 7:
-		*nvme_pf_code = 3;
-		break;
-	default:
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		break;
-	}
-
- out_mem:
-	kfree(parm_list);
- out:
-	return res;
-}
-
-static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-				   u8 prot_info)
-{
-	int res;
-	int nvme_sc;
-	struct nvme_id_ns *id_ns;
-	u8 i;
-	u8 nlbaf;
-	u8 selected_lbaf = 0xFF;
-	u32 cdw10 = 0;
-	struct nvme_command c;
-
-	/* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
-	nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		return res;
-
-	nlbaf = id_ns->nlbaf;
-
-	for (i = 0; i < nlbaf; i++) {
-		if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
-			selected_lbaf = i;
-			break;
-		}
-	}
-	if (selected_lbaf > 0x0F) {
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-				ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
-				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-	}
-	if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-				ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
-				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-	}
-
-	cdw10 |= prot_info << 5;
-	cdw10 |= selected_lbaf & 0x0F;
-	memset(&c, 0, sizeof(c));
-	c.format.opcode = nvme_admin_format_nvm;
-	c.format.nsid = cpu_to_le32(ns->ns_id);
-	c.format.cdw10 = cpu_to_le32(cdw10);
-
-	nvme_sc = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL, 0);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-
-	kfree(id_ns);
-	return res;
-}
-
-static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
-					struct nvme_trans_io_cdb *cdb_info,
-					u32 max_blocks)
-{
-	/* If using iovecs, send one nvme command per vector */
-	if (hdr->iovec_count > 0)
-		return hdr->iovec_count;
-	else if (cdb_info->xfer_len > max_blocks)
-		return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
-	else
-		return 1;
-}
-
-static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
-					struct nvme_trans_io_cdb *cdb_info)
-{
-	u16 control = 0;
-
-	/* When Protection information support is added, implement here */
-
-	if (cdb_info->fua > 0)
-		control |= NVME_RW_FUA;
-
-	return control;
-}
-
-static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-				struct nvme_trans_io_cdb *cdb_info, u8 is_write)
-{
-	int nvme_sc = NVME_SC_SUCCESS;
-	u32 num_cmds;
-	u64 unit_len;
-	u64 unit_num_blocks;	/* Number of blocks to xfer in each nvme cmd */
-	u32 retcode;
-	u32 i = 0;
-	u64 nvme_offset = 0;
-	void __user *next_mapping_addr;
-	struct nvme_command c;
-	u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
-	u16 control;
-	u32 max_blocks = queue_max_hw_sectors(ns->queue) >> (ns->lba_shift - 9);
-
-	num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
-
-	/*
-	 * This loop handles two cases.
-	 * First, when an SGL is used in the form of an iovec list:
-	 *   - Use iov_base as the next mapping address for the nvme command_id
-	 *   - Use iov_len as the data transfer length for the command.
-	 * Second, when we have a single buffer
-	 *   - If larger than max_blocks, split into chunks, offset
-	 *        each nvme command accordingly.
-	 */
-	for (i = 0; i < num_cmds; i++) {
-		memset(&c, 0, sizeof(c));
-		if (hdr->iovec_count > 0) {
-			struct sg_iovec sgl;
-
-			retcode = copy_from_user(&sgl, hdr->dxferp +
-					i * sizeof(struct sg_iovec),
-					sizeof(struct sg_iovec));
-			if (retcode)
-				return -EFAULT;
-			unit_len = sgl.iov_len;
-			unit_num_blocks = unit_len >> ns->lba_shift;
-			next_mapping_addr = sgl.iov_base;
-		} else {
-			unit_num_blocks = min((u64)max_blocks,
-					(cdb_info->xfer_len - nvme_offset));
-			unit_len = unit_num_blocks << ns->lba_shift;
-			next_mapping_addr = hdr->dxferp +
-					((1 << ns->lba_shift) * nvme_offset);
-		}
-
-		c.rw.opcode = opcode;
-		c.rw.nsid = cpu_to_le32(ns->ns_id);
-		c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
-		c.rw.length = cpu_to_le16(unit_num_blocks - 1);
-		control = nvme_trans_io_get_control(ns, cdb_info);
-		c.rw.control = cpu_to_le16(control);
-
-		if (get_capacity(ns->disk) - unit_num_blocks <
-				cdb_info->lba + nvme_offset) {
-			nvme_sc = NVME_SC_LBA_RANGE;
-			break;
-		}
-		nvme_sc = nvme_submit_user_cmd(ns->queue, &c,
-				next_mapping_addr, unit_len, NULL, 0);
-		if (nvme_sc)
-			break;
-
-		nvme_offset += unit_num_blocks;
-	}
-
-	return nvme_trans_status_code(hdr, nvme_sc);
-}
-
-
-/* SCSI Command Translation Functions */
-
-static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
-							u8 *cmd)
-{
-	int res = 0;
-	struct nvme_trans_io_cdb cdb_info = { 0, };
-	u8 opcode = cmd[0];
-	u64 xfer_bytes;
-	u64 sum_iov_len = 0;
-	struct sg_iovec sgl;
-	int i;
-	size_t not_copied;
-
-	/*
-	 * The FUA and WPROTECT fields are not supported in 6-byte CDBs,
-	 * but always in the same place for all others.
-	 */
-	switch (opcode) {
-	case WRITE_6:
-	case READ_6:
-		break;
-	default:
-		cdb_info.fua = cmd[1] & 0x8;
-		cdb_info.prot_info = (cmd[1] & 0xe0) >> 5;
-		if (cdb_info.prot_info && !ns->pi_type) {
-			return nvme_trans_completion(hdr,
-					SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST,
-					SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		}
-	}
-
-	switch (opcode) {
-	case WRITE_6:
-	case READ_6:
-		cdb_info.lba = get_unaligned_be24(&cmd[1]);
-		cdb_info.xfer_len = cmd[4];
-		if (cdb_info.xfer_len == 0)
-			cdb_info.xfer_len = 256;
-		break;
-	case WRITE_10:
-	case READ_10:
-		cdb_info.lba = get_unaligned_be32(&cmd[2]);
-		cdb_info.xfer_len = get_unaligned_be16(&cmd[7]);
-		break;
-	case WRITE_12:
-	case READ_12:
-		cdb_info.lba = get_unaligned_be32(&cmd[2]);
-		cdb_info.xfer_len = get_unaligned_be32(&cmd[6]);
-		break;
-	case WRITE_16:
-	case READ_16:
-		cdb_info.lba = get_unaligned_be64(&cmd[2]);
-		cdb_info.xfer_len = get_unaligned_be32(&cmd[10]);
-		break;
-	default:
-		/* Will never really reach here */
-		res = -EIO;
-		goto out;
-	}
-
-	/* Calculate total length of transfer (in bytes) */
-	if (hdr->iovec_count > 0) {
-		for (i = 0; i < hdr->iovec_count; i++) {
-			not_copied = copy_from_user(&sgl, hdr->dxferp +
-						i * sizeof(struct sg_iovec),
-						sizeof(struct sg_iovec));
-			if (not_copied)
-				return -EFAULT;
-			sum_iov_len += sgl.iov_len;
-			/* IO vector sizes should be multiples of block size */
-			if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
-				res = nvme_trans_completion(hdr,
-						SAM_STAT_CHECK_CONDITION,
-						ILLEGAL_REQUEST,
-						SCSI_ASC_INVALID_PARAMETER,
-						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-				goto out;
-			}
-		}
-	} else {
-		sum_iov_len = hdr->dxfer_len;
-	}
-
-	/* As Per sg ioctl howto, if the lengths differ, use the lower one */
-	xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
-
-	/* If block count and actual data buffer size dont match, error out */
-	if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
-		res = -EINVAL;
-		goto out;
-	}
-
-	/* Check for 0 length transfer - it is not illegal */
-	if (cdb_info.xfer_len == 0)
-		goto out;
-
-	/* Send NVMe IO Command(s) */
-	res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
-	if (res)
-		goto out;
-
- out:
-	return res;
-}
-
-static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd)
-{
-	int res = 0;
-	u8 evpd;
-	u8 page_code;
-	int alloc_len;
-	u8 *inq_response;
-
-	evpd = cmd[1] & 0x01;
-	page_code = cmd[2];
-	alloc_len = get_unaligned_be16(&cmd[3]);
-
-	inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
-				GFP_KERNEL);
-	if (inq_response == NULL) {
-		res = -ENOMEM;
-		goto out_mem;
-	}
-
-	if (evpd == 0) {
-		if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
-			res = nvme_trans_standard_inquiry_page(ns, hdr,
-						inq_response, alloc_len);
-		} else {
-			res = nvme_trans_completion(hdr,
-						SAM_STAT_CHECK_CONDITION,
-						ILLEGAL_REQUEST,
-						SCSI_ASC_INVALID_CDB,
-						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		}
-	} else {
-		switch (page_code) {
-		case VPD_SUPPORTED_PAGES:
-			res = nvme_trans_supported_vpd_pages(ns, hdr,
-						inq_response, alloc_len);
-			break;
-		case VPD_SERIAL_NUMBER:
-			res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
-								alloc_len);
-			break;
-		case VPD_DEVICE_IDENTIFIERS:
-			res = nvme_trans_device_id_page(ns, hdr, inq_response,
-								alloc_len);
-			break;
-		case VPD_EXTENDED_INQUIRY:
-			res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
-			break;
-		case VPD_BLOCK_LIMITS:
-			res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
-								alloc_len);
-			break;
-		case VPD_BLOCK_DEV_CHARACTERISTICS:
-			res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
-			break;
-		default:
-			res = nvme_trans_completion(hdr,
-						SAM_STAT_CHECK_CONDITION,
-						ILLEGAL_REQUEST,
-						SCSI_ASC_INVALID_CDB,
-						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-			break;
-		}
-	}
-	kfree(inq_response);
- out_mem:
-	return res;
-}
-
-static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd)
-{
-	int res;
-	u16 alloc_len;
-	u8 pc;
-	u8 page_code;
-
-	if (cmd[1] != LOG_SENSE_CDB_SP_NOT_ENABLED) {
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		goto out;
-	}
-
-	page_code = cmd[2] & LOG_SENSE_CDB_PAGE_CODE_MASK;
-	pc = (cmd[2] & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
-	if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		goto out;
-	}
-	alloc_len = get_unaligned_be16(&cmd[7]);
-	switch (page_code) {
-	case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
-		res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
-		break;
-	case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
-		res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
-		break;
-	case LOG_PAGE_TEMPERATURE_PAGE:
-		res = nvme_trans_log_temperature(ns, hdr, alloc_len);
-		break;
-	default:
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		break;
-	}
-
- out:
-	return res;
-}
-
-static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd)
-{
-	u8 cdb10 = 0;
-	u16 parm_list_len;
-	u8 page_format;
-	u8 save_pages;
-
-	page_format = cmd[1] & MODE_SELECT_CDB_PAGE_FORMAT_MASK;
-	save_pages = cmd[1] & MODE_SELECT_CDB_SAVE_PAGES_MASK;
-
-	if (cmd[0] == MODE_SELECT) {
-		parm_list_len = cmd[4];
-	} else {
-		parm_list_len = cmd[7];
-		cdb10 = 1;
-	}
-
-	if (parm_list_len != 0) {
-		/*
-		 * According to SPC-4 r24, a paramter list length field of 0
-		 * shall not be considered an error
-		 */
-		return nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
-						page_format, save_pages, cdb10);
-	}
-
-	return 0;
-}
-
-static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd)
-{
-	int res = 0;
-	u16 alloc_len;
-	u8 cdb10 = 0;
-
-	if (cmd[0] == MODE_SENSE) {
-		alloc_len = cmd[4];
-	} else {
-		alloc_len = get_unaligned_be16(&cmd[7]);
-		cdb10 = 1;
-	}
-
-	if ((cmd[2] & MODE_SENSE_PAGE_CONTROL_MASK) !=
-			MODE_SENSE_PC_CURRENT_VALUES) {
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		goto out;
-	}
-
-	switch (cmd[2] & MODE_SENSE_PAGE_CODE_MASK) {
-	case MODE_PAGE_CACHING:
-		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
-						cdb10,
-						&nvme_trans_fill_caching_page,
-						MODE_PAGE_CACHING_LEN);
-		break;
-	case MODE_PAGE_CONTROL:
-		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
-						cdb10,
-						&nvme_trans_fill_control_page,
-						MODE_PAGE_CONTROL_LEN);
-		break;
-	case MODE_PAGE_POWER_CONDITION:
-		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
-						cdb10,
-						&nvme_trans_fill_pow_cnd_page,
-						MODE_PAGE_POW_CND_LEN);
-		break;
-	case MODE_PAGE_INFO_EXCEP:
-		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
-						cdb10,
-						&nvme_trans_fill_inf_exc_page,
-						MODE_PAGE_INF_EXC_LEN);
-		break;
-	case MODE_PAGE_RETURN_ALL:
-		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
-						cdb10,
-						&nvme_trans_fill_all_pages,
-						MODE_PAGE_ALL_LEN);
-		break;
-	default:
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		break;
-	}
-
- out:
-	return res;
-}
-
-static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd, u8 cdb16)
-{
-	int res;
-	int nvme_sc;
-	u32 alloc_len;
-	u32 resp_size;
-	u32 xfer_len;
-	struct nvme_id_ns *id_ns;
-	u8 *response;
-
-	if (cdb16) {
-		alloc_len = get_unaligned_be32(&cmd[10]);
-		resp_size = READ_CAP_16_RESP_SIZE;
-	} else {
-		alloc_len = READ_CAP_10_RESP_SIZE;
-		resp_size = READ_CAP_10_RESP_SIZE;
-	}
-
-	nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
-	res = nvme_trans_status_code(hdr, nvme_sc);
-	if (res)
-		return res;	
-
-	response = kzalloc(resp_size, GFP_KERNEL);
-	if (response == NULL) {
-		res = -ENOMEM;
-		goto out_free_id;
-	}
-	nvme_trans_fill_read_cap(response, id_ns, cdb16);
-
-	xfer_len = min(alloc_len, resp_size);
-	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
-
-	kfree(response);
- out_free_id:
-	kfree(id_ns);
-	return res;
-}
-
-static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd)
-{
-	int res;
-	int nvme_sc;
-	u32 alloc_len, xfer_len, resp_size;
-	u8 *response;
-	struct nvme_id_ctrl *id_ctrl;
-	u32 ll_length, lun_id;
-	u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
-	__be32 tmp_len;
-
-	switch (cmd[2]) {
-	default:
-		return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-	case ALL_LUNS_RETURNED:
-	case ALL_WELL_KNOWN_LUNS_RETURNED:
-	case RESTRICTED_LUNS_RETURNED:
-		nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
-		res = nvme_trans_status_code(hdr, nvme_sc);
-		if (res)
-			return res;
-
-		ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
-		resp_size = ll_length + LUN_DATA_HEADER_SIZE;
-
-		alloc_len = get_unaligned_be32(&cmd[6]);
-		if (alloc_len < resp_size) {
-			res = nvme_trans_completion(hdr,
-					SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-			goto out_free_id;
-		}
-
-		response = kzalloc(resp_size, GFP_KERNEL);
-		if (response == NULL) {
-			res = -ENOMEM;
-			goto out_free_id;
-		}
-
-		/* The first LUN ID will always be 0 per the SAM spec */
-		for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
-			/*
-			 * Set the LUN Id and then increment to the next LUN
-			 * location in the parameter data.
-			 */
-			__be64 tmp_id = cpu_to_be64(lun_id);
-			memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
-			lun_id_offset += LUN_ENTRY_SIZE;
-		}
-		tmp_len = cpu_to_be32(ll_length);
-		memcpy(response, &tmp_len, sizeof(u32));
-	}
-
-	xfer_len = min(alloc_len, resp_size);
-	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
-
-	kfree(response);
- out_free_id:
-	kfree(id_ctrl);
-	return res;
-}
-
-static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd)
-{
-	int res;
-	u8 alloc_len, xfer_len, resp_size;
-	u8 desc_format;
-	u8 *response;
-
-	desc_format = cmd[1] & 0x01;
-	alloc_len = cmd[4];
-
-	resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
-					(FIXED_FMT_SENSE_DATA_SIZE));
-	response = kzalloc(resp_size, GFP_KERNEL);
-	if (response == NULL) {
-		res = -ENOMEM;
-		goto out;
-	}
-
-	if (desc_format) {
-		/* Descriptor Format Sense Data */
-		response[0] = DESC_FORMAT_SENSE_DATA;
-		response[1] = NO_SENSE;
-		/* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
-		response[2] = SCSI_ASC_NO_SENSE;
-		response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		/* SDAT_OVFL = 0 | Additional Sense Length = 0 */
-	} else {
-		/* Fixed Format Sense Data */
-		response[0] = FIXED_SENSE_DATA;
-		/* Byte 1 = Obsolete */
-		response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
-		/* Bytes 3-6 - Information - set to zero */
-		response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
-		/* Bytes 8-11 - Cmd Specific Information - set to zero */
-		response[12] = SCSI_ASC_NO_SENSE;
-		response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
-		/* Byte 14 = Field Replaceable Unit Code = 0 */
-		/* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
-	}
-
-	xfer_len = min(alloc_len, resp_size);
-	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
-
-	kfree(response);
- out:
-	return res;
-}
-
-static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr)
-{
-	int nvme_sc;
-	struct nvme_command c;
-
-	memset(&c, 0, sizeof(c));
-	c.common.opcode = nvme_cmd_flush;
-	c.common.nsid = cpu_to_le32(ns->ns_id);
-
-	nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
-	return nvme_trans_status_code(hdr, nvme_sc);
-}
-
-static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd)
-{
-	int res;
-	u8 parm_hdr_len = 0;
-	u8 nvme_pf_code = 0;
-	u8 format_prot_info, long_list, format_data;
-
-	format_prot_info = (cmd[1] & 0xc0) >> 6;
-	long_list = cmd[1] & 0x20;
-	format_data = cmd[1] & 0x10;
-
-	if (format_data != 0) {
-		if (format_prot_info != 0) {
-			if (long_list == 0)
-				parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
-			else
-				parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
-		}
-	} else if (format_data == 0 && format_prot_info != 0) {
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		goto out;
-	}
-
-	/* Get parm header from data-in/out buffer */
-	/*
-	 * According to the translation spec, the only fields in the parameter
-	 * list we are concerned with are in the header. So allocate only that.
-	 */
-	if (parm_hdr_len > 0) {
-		res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
-					format_prot_info, &nvme_pf_code);
-		if (res)
-			goto out;
-	}
-
-	/* Attempt to activate any previously downloaded firmware image */
-	res = nvme_trans_send_activate_fw_cmd(ns, hdr, 0);
-
-	/* Determine Block size and count and send format command */
-	res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
-	if (res)
-		goto out;
-
-	res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
-
- out:
-	return res;
-}
-
-static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
-					struct sg_io_hdr *hdr,
-					u8 *cmd)
-{
-	if (nvme_ctrl_ready(ns->ctrl))
-		return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					    NOT_READY, SCSI_ASC_LUN_NOT_READY,
-					    SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-	else
-		return nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
-}
-
-static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd)
-{
-	int res = 0;
-	u32 buffer_offset, parm_list_length;
-	u8 buffer_id, mode;
-
-	parm_list_length = get_unaligned_be24(&cmd[6]);
-	if (parm_list_length % BYTES_TO_DWORDS != 0) {
-		/* NVMe expects Firmware file to be a whole number of DWORDS */
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		goto out;
-	}
-	buffer_id = cmd[2];
-	if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		goto out;
-	}
-	mode = cmd[1] & 0x1f;
-	buffer_offset = get_unaligned_be24(&cmd[3]);
-
-	switch (mode) {
-	case DOWNLOAD_SAVE_ACTIVATE:
-		res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
-						parm_list_length, buffer_offset,
-						buffer_id);
-		if (res)
-			goto out;
-		res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
-		break;
-	case DOWNLOAD_SAVE_DEFER_ACTIVATE:
-		res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
-						parm_list_length, buffer_offset,
-						buffer_id);
-		break;
-	case ACTIVATE_DEFERRED_MICROCODE:
-		res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
-		break;
-	default:
-		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		break;
-	}
-
- out:
-	return res;
-}
-
-struct scsi_unmap_blk_desc {
-	__be64	slba;
-	__be32	nlb;
-	u32	resv;
-};
-
-struct scsi_unmap_parm_list {
-	__be16	unmap_data_len;
-	__be16	unmap_blk_desc_data_len;
-	u32	resv;
-	struct scsi_unmap_blk_desc desc[0];
-};
-
-static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-							u8 *cmd)
-{
-	struct scsi_unmap_parm_list *plist;
-	struct nvme_dsm_range *range;
-	struct nvme_command c;
-	int i, nvme_sc, res;
-	u16 ndesc, list_len;
-
-	list_len = get_unaligned_be16(&cmd[7]);
-	if (!list_len)
-		return -EINVAL;
-
-	plist = kmalloc(list_len, GFP_KERNEL);
-	if (!plist)
-		return -ENOMEM;
-
-	res = nvme_trans_copy_from_user(hdr, plist, list_len);
-	if (res)
-		goto out;
-
-	ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
-	if (!ndesc || ndesc > 256) {
-		res = -EINVAL;
-		goto out;
-	}
-
-	range = kcalloc(ndesc, sizeof(*range), GFP_KERNEL);
-	if (!range) {
-		res = -ENOMEM;
-		goto out;
-	}
-
-	for (i = 0; i < ndesc; i++) {
-		range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
-		range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
-		range[i].cattr = 0;
-	}
-
-	memset(&c, 0, sizeof(c));
-	c.dsm.opcode = nvme_cmd_dsm;
-	c.dsm.nsid = cpu_to_le32(ns->ns_id);
-	c.dsm.nr = cpu_to_le32(ndesc - 1);
-	c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
-
-	nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, range,
-			ndesc * sizeof(*range));
-	res = nvme_trans_status_code(hdr, nvme_sc);
-
-	kfree(range);
- out:
-	kfree(plist);
-	return res;
-}
-
-static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
-{
-	u8 cmd[16];
-	int retcode;
-	unsigned int opcode;
-
-	if (hdr->cmdp == NULL)
-		return -EMSGSIZE;
-	if (hdr->cmd_len > sizeof(cmd))
-		return -EINVAL;
-	if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
-		return -EFAULT;
-
-	/*
-	 * Prime the hdr with good status for scsi commands that don't require
-	 * an nvme command for translation.
-	 */
-	retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
-	if (retcode)
-		return retcode;
-
-	opcode = cmd[0];
-
-	switch (opcode) {
-	case READ_6:
-	case READ_10:
-	case READ_12:
-	case READ_16:
-		retcode = nvme_trans_io(ns, hdr, 0, cmd);
-		break;
-	case WRITE_6:
-	case WRITE_10:
-	case WRITE_12:
-	case WRITE_16:
-		retcode = nvme_trans_io(ns, hdr, 1, cmd);
-		break;
-	case INQUIRY:
-		retcode = nvme_trans_inquiry(ns, hdr, cmd);
-		break;
-	case LOG_SENSE:
-		retcode = nvme_trans_log_sense(ns, hdr, cmd);
-		break;
-	case MODE_SELECT:
-	case MODE_SELECT_10:
-		retcode = nvme_trans_mode_select(ns, hdr, cmd);
-		break;
-	case MODE_SENSE:
-	case MODE_SENSE_10:
-		retcode = nvme_trans_mode_sense(ns, hdr, cmd);
-		break;
-	case READ_CAPACITY:
-		retcode = nvme_trans_read_capacity(ns, hdr, cmd, 0);
-		break;
-	case SERVICE_ACTION_IN_16:
-		switch (cmd[1]) {
-		case SAI_READ_CAPACITY_16:
-			retcode = nvme_trans_read_capacity(ns, hdr, cmd, 1);
-			break;
-		default:
-			goto out;
-		}
-		break;
-	case REPORT_LUNS:
-		retcode = nvme_trans_report_luns(ns, hdr, cmd);
-		break;
-	case REQUEST_SENSE:
-		retcode = nvme_trans_request_sense(ns, hdr, cmd);
-		break;
-	case SYNCHRONIZE_CACHE:
-		retcode = nvme_trans_synchronize_cache(ns, hdr);
-		break;
-	case FORMAT_UNIT:
-		retcode = nvme_trans_format_unit(ns, hdr, cmd);
-		break;
-	case TEST_UNIT_READY:
-		retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
-		break;
-	case WRITE_BUFFER:
-		retcode = nvme_trans_write_buffer(ns, hdr, cmd);
-		break;
-	case UNMAP:
-		retcode = nvme_trans_unmap(ns, hdr, cmd);
-		break;
-	default:
- out:
-		retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-				ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
-				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-		break;
-	}
-	return retcode;
-}
-
-int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
-{
-	struct sg_io_hdr hdr;
-	int retcode;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return -EACCES;
-	if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
-		return -EFAULT;
-	if (hdr.interface_id != 'S')
-		return -EINVAL;
-
-	/*
-	 * A positive return code means a NVMe status, which has been
-	 * translated to sense data.
-	 */
-	retcode = nvme_scsi_translate(ns, &hdr);
-	if (retcode < 0)
-		return retcode;
-	if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
-		return -EFAULT;
-	return 0;
-}
-
-int nvme_sg_get_version_num(int __user *ip)
-{
-	return put_user(sg_version_num, ip);
-}
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index ff1f970..35f930d 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -336,7 +336,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
 
 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
 {
-	static const int buf_size = 4096;
+	static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
 	struct nvmet_ns *ns;
 	u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
@@ -367,6 +367,64 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
 	nvmet_req_complete(req, status);
 }
 
+static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
+				    void *id, off_t *off)
+{
+	struct nvme_ns_id_desc desc = {
+		.nidt = type,
+		.nidl = len,
+	};
+	u16 status;
+
+	status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
+	if (status)
+		return status;
+	*off += sizeof(desc);
+
+	status = nvmet_copy_to_sgl(req, *off, id, len);
+	if (status)
+		return status;
+	*off += len;
+
+	return 0;
+}
+
+static void nvmet_execute_identify_desclist(struct nvmet_req *req)
+{
+	struct nvmet_ns *ns;
+	u16 status = 0;
+	off_t off = 0;
+
+	ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
+	if (!ns) {
+		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+		goto out;
+	}
+
+	if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
+		status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
+						  NVME_NIDT_UUID_LEN,
+						  &ns->uuid, &off);
+		if (status)
+			goto out_put_ns;
+	}
+	if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
+		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
+						  NVME_NIDT_NGUID_LEN,
+						  &ns->nguid, &off);
+		if (status)
+			goto out_put_ns;
+	}
+
+	if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
+			off) != NVME_IDENTIFY_DATA_SIZE - off)
+		status = NVME_SC_INTERNAL | NVME_SC_DNR;
+out_put_ns:
+	nvmet_put_namespace(ns);
+out:
+	nvmet_req_complete(req, status);
+}
+
 /*
  * A "mimimum viable" abort implementation: the command is mandatory in the
  * spec, but we are not required to do any useful work.  We couldn't really
@@ -504,7 +562,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
 		}
 		break;
 	case nvme_admin_identify:
-		req->data_len = 4096;
+		req->data_len = NVME_IDENTIFY_DATA_SIZE;
 		switch (cmd->identify.cns) {
 		case NVME_ID_CNS_NS:
 			req->execute = nvmet_execute_identify_ns;
@@ -515,6 +573,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
 		case NVME_ID_CNS_NS_ACTIVE_LIST:
 			req->execute = nvmet_execute_identify_nslist;
 			return 0;
+		case NVME_ID_CNS_NS_DESC_LIST:
+			req->execute = nvmet_execute_identify_desclist;
+			return 0;
 		}
 		break;
 	case nvme_admin_abort_cmd:
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index be8c800..a358ecd 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -305,11 +305,41 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
 
 CONFIGFS_ATTR(nvmet_ns_, device_path);
 
+static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
+}
+
+static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
+					  const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	struct nvmet_subsys *subsys = ns->subsys;
+	int ret = 0;
+
+
+	mutex_lock(&subsys->lock);
+	if (ns->enabled) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+
+
+	if (uuid_parse(page, &ns->uuid))
+		ret = -EINVAL;
+
+out_unlock:
+	mutex_unlock(&subsys->lock);
+	return ret ? ret : count;
+}
+
 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
 {
 	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
 }
 
+CONFIGFS_ATTR(nvmet_ns_, device_uuid);
+
 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
 		const char *page, size_t count)
 {
@@ -379,6 +409,7 @@ CONFIGFS_ATTR(nvmet_ns_, enable);
 static struct configfs_attribute *nvmet_ns_attrs[] = {
 	&nvmet_ns_attr_device_path,
 	&nvmet_ns_attr_device_nguid,
+	&nvmet_ns_attr_device_uuid,
 	&nvmet_ns_attr_enable,
 	NULL,
 };
@@ -619,8 +650,45 @@ static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
 
 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
 
+static ssize_t nvmet_subsys_version_show(struct config_item *item,
+					      char *page)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+
+	if (NVME_TERTIARY(subsys->ver))
+		return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
+				(int)NVME_MAJOR(subsys->ver),
+				(int)NVME_MINOR(subsys->ver),
+				(int)NVME_TERTIARY(subsys->ver));
+	else
+		return snprintf(page, PAGE_SIZE, "%d.%d\n",
+				(int)NVME_MAJOR(subsys->ver),
+				(int)NVME_MINOR(subsys->ver));
+}
+
+static ssize_t nvmet_subsys_version_store(struct config_item *item,
+					       const char *page, size_t count)
+{
+	struct nvmet_subsys *subsys = to_subsys(item);
+	int major, minor, tertiary = 0;
+	int ret;
+
+
+	ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
+	if (ret != 2 && ret != 3)
+		return -EINVAL;
+
+	down_write(&nvmet_config_sem);
+	subsys->ver = NVME_VS(major, minor, tertiary);
+	up_write(&nvmet_config_sem);
+
+	return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, version);
+
 static struct configfs_attribute *nvmet_subsys_attrs[] = {
 	&nvmet_subsys_attr_attr_allow_any_host,
+	&nvmet_subsys_attr_version,
 	NULL,
 };
 
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index eb9399a..b5b4ac1 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -380,6 +380,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
 
 	ns->nsid = nsid;
 	ns->subsys = subsys;
+	uuid_gen(&ns->uuid);
 
 	return ns;
 }
@@ -926,7 +927,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
 	if (!subsys)
 		return NULL;
 
-	subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
+	subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
 
 	switch (type) {
 	case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 1aaf597..8f3b57b 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -53,7 +53,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
 	e->portid = port->disc_addr.portid;
 	/* we support only dynamic controllers */
 	e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
-	e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
+	e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
 	e->subtype = type;
 	memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
 	memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
@@ -185,7 +185,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 		}
 	case nvme_admin_identify:
-		req->data_len = 4096;
+		req->data_len = NVME_IDENTIFY_DATA_SIZE;
 		switch (cmd->identify.cns) {
 		case NVME_ID_CNS_CTRL:
 			req->execute =
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 2006fae..7692a96 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -2096,20 +2096,22 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
 	/* clear any response payload */
 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
 
+	fod->data_sg = NULL;
+	fod->data_sg_cnt = 0;
+
 	ret = nvmet_req_init(&fod->req,
 				&fod->queue->nvme_cq,
 				&fod->queue->nvme_sq,
 				&nvmet_fc_tgt_fcp_ops);
-	if (!ret) {	/* bad SQE content or invalid ctrl state */
-		nvmet_fc_abort_op(tgtport, fod);
+	if (!ret) {
+		/* bad SQE content or invalid ctrl state */
+		/* nvmet layer has already called op done to send rsp. */
 		return;
 	}
 
 	/* keep a running counter of tail position */
 	atomic_inc(&fod->queue->sqtail);
 
-	fod->data_sg = NULL;
-	fod->data_sg_cnt = 0;
 	if (fod->total_length) {
 		ret = nvmet_fc_alloc_tgt_pgs(fod);
 		if (ret) {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 294a661..1bb9d5b 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -569,7 +569,6 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
 {
 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
-	int active;
 
 	/*
 	 * mark aborted only in case there were 2 threads in transport
@@ -577,7 +576,6 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
 	 * after the abort request
 	 */
 	spin_lock(&tfcp_req->reqlock);
-	active = tfcp_req->active;
 	tfcp_req->aborted = true;
 	spin_unlock(&tfcp_req->reqlock);
 
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index c77940d..4012879 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -21,7 +21,7 @@ static void nvmet_bio_done(struct bio *bio)
 	struct nvmet_req *req = bio->bi_private;
 
 	nvmet_req_complete(req,
-		bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+		bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
 
 	if (bio != &req->inline_bio)
 		bio_put(bio);
@@ -145,7 +145,7 @@ static void nvmet_execute_discard(struct nvmet_req *req)
 		bio->bi_private = req;
 		bio->bi_end_io = nvmet_bio_done;
 		if (status) {
-			bio->bi_error = -EIO;
+			bio->bi_status = BLK_STS_IOERR;
 			bio_endio(bio);
 		} else {
 			submit_bio(bio);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index e503cff..5f55c68 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -21,8 +21,6 @@
 #include "../host/nvme.h"
 #include "../host/fabrics.h"
 
-#define NVME_LOOP_AQ_DEPTH		256
-
 #define NVME_LOOP_MAX_SEGMENTS		256
 
 /*
@@ -31,7 +29,7 @@
  */
 #define NVME_LOOP_NR_AEN_COMMANDS	1
 #define NVME_LOOP_AQ_BLKMQ_DEPTH	\
-	(NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
+	(NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
 
 struct nvme_loop_iod {
 	struct nvme_request	nvme_req;
@@ -45,7 +43,6 @@ struct nvme_loop_iod {
 };
 
 struct nvme_loop_ctrl {
-	spinlock_t		lock;
 	struct nvme_loop_queue	*queues;
 	u32			queue_count;
 
@@ -59,7 +56,6 @@ struct nvme_loop_ctrl {
 
 	struct nvmet_ctrl	*target_ctrl;
 	struct work_struct	delete_work;
-	struct work_struct	reset_work;
 };
 
 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
@@ -151,7 +147,7 @@ nvme_loop_timeout(struct request *rq, bool reserved)
 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
 
 	/* queue error recovery */
-	schedule_work(&iod->queue->ctrl->reset_work);
+	nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
 
 	/* fail with DNR on admin cmd timeout */
 	nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
@@ -159,17 +155,17 @@ nvme_loop_timeout(struct request *rq, bool reserved)
 	return BLK_EH_HANDLED;
 }
 
-static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 		const struct blk_mq_queue_data *bd)
 {
 	struct nvme_ns *ns = hctx->queue->queuedata;
 	struct nvme_loop_queue *queue = hctx->driver_data;
 	struct request *req = bd->rq;
 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
-	int ret;
+	blk_status_t ret;
 
 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
-	if (ret != BLK_MQ_RQ_QUEUE_OK)
+	if (ret)
 		return ret;
 
 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
@@ -179,16 +175,15 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 		nvme_cleanup_cmd(req);
 		blk_mq_start_request(req);
 		nvme_loop_queue_response(&iod->req);
-		return BLK_MQ_RQ_QUEUE_OK;
+		return BLK_STS_OK;
 	}
 
 	if (blk_rq_bytes(req)) {
 		iod->sg_table.sgl = iod->first_sgl;
-		ret = sg_alloc_table_chained(&iod->sg_table,
+		if (sg_alloc_table_chained(&iod->sg_table,
 				blk_rq_nr_phys_segments(req),
-				iod->sg_table.sgl);
-		if (ret)
-			return BLK_MQ_RQ_QUEUE_BUSY;
+				iod->sg_table.sgl))
+			return BLK_STS_RESOURCE;
 
 		iod->req.sg = iod->sg_table.sgl;
 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
@@ -197,7 +192,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	blk_mq_start_request(req);
 
 	schedule_work(&iod->work);
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 }
 
 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
@@ -234,15 +229,10 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
 		struct request *req, unsigned int hctx_idx,
 		unsigned int numa_node)
 {
-	return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req),
-			hctx_idx + 1);
-}
+	struct nvme_loop_ctrl *ctrl = set->driver_data;
 
-static int nvme_loop_init_admin_request(struct blk_mq_tag_set *set,
-		struct request *req, unsigned int hctx_idx,
-		unsigned int numa_node)
-{
-	return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 0);
+	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
+			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
 }
 
 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -280,7 +270,7 @@ static const struct blk_mq_ops nvme_loop_mq_ops = {
 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
 	.queue_rq	= nvme_loop_queue_rq,
 	.complete	= nvme_loop_complete_rq,
-	.init_request	= nvme_loop_init_admin_request,
+	.init_request	= nvme_loop_init_request,
 	.init_hctx	= nvme_loop_init_admin_hctx,
 	.timeout	= nvme_loop_timeout,
 };
@@ -467,7 +457,7 @@ static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
 		return -EBUSY;
 
-	if (!schedule_work(&ctrl->delete_work))
+	if (!queue_work(nvme_wq, &ctrl->delete_work))
 		return -EBUSY;
 
 	return 0;
@@ -501,8 +491,8 @@ static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
 
 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 {
-	struct nvme_loop_ctrl *ctrl = container_of(work,
-					struct nvme_loop_ctrl, reset_work);
+	struct nvme_loop_ctrl *ctrl =
+		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
 	bool changed;
 	int ret;
 
@@ -540,21 +530,6 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 	nvme_put_ctrl(&ctrl->ctrl);
 }
 
-static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
-{
-	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
-
-	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-		return -EBUSY;
-
-	if (!schedule_work(&ctrl->reset_work))
-		return -EBUSY;
-
-	flush_work(&ctrl->reset_work);
-
-	return 0;
-}
-
 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
 	.name			= "loop",
 	.module			= THIS_MODULE,
@@ -562,11 +537,9 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
 	.reg_read32		= nvmf_reg_read32,
 	.reg_read64		= nvmf_reg_read64,
 	.reg_write32		= nvmf_reg_write32,
-	.reset_ctrl		= nvme_loop_reset_ctrl,
 	.free_ctrl		= nvme_loop_free_ctrl,
 	.submit_async_event	= nvme_loop_submit_async_event,
 	.delete_ctrl		= nvme_loop_del_ctrl,
-	.get_subsysnqn		= nvmf_get_subsysnqn,
 };
 
 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -629,15 +602,13 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 	INIT_LIST_HEAD(&ctrl->list);
 
 	INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
-	INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
+	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
 
 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
 				0 /* no quirks, we're perfect! */);
 	if (ret)
 		goto out_put_ctrl;
 
-	spin_lock_init(&ctrl->lock);
-
 	ret = -ENOMEM;
 
 	ctrl->ctrl.sqsize = opts->queue_size - 1;
@@ -766,7 +737,7 @@ static void __exit nvme_loop_cleanup_module(void)
 		__nvme_loop_del_ctrl(ctrl);
 	mutex_unlock(&nvme_loop_ctrl_mutex);
 
-	flush_scheduled_work();
+	flush_workqueue(nvme_wq);
 }
 
 module_init(nvme_loop_init_module);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index cfc5c7f..747bbdb 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -21,6 +21,7 @@
 #include <linux/percpu-refcount.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/uuid.h>
 #include <linux/nvme.h>
 #include <linux/configfs.h>
 #include <linux/rcupdate.h>
@@ -46,6 +47,7 @@ struct nvmet_ns {
 	u32			blksize_shift;
 	loff_t			size;
 	u8			nguid[16];
+	uuid_t			uuid;
 
 	bool			enabled;
 	struct nvmet_subsys	*subsys;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 9e45cde..56a4cba 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1027,7 +1027,7 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
 	queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
 	queue->send_queue_size = le16_to_cpu(req->hrqsize);
 
-	if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
+	if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
 		return NVME_RDMA_CM_INVALID_HSQSIZE;
 
 	/* XXX: Should we enforce some kind of max for IO queues? */
@@ -1307,53 +1307,44 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
 
 /**
  * nvme_rdma_device_removal() - Handle RDMA device removal
+ * @cm_id:	rdma_cm id, used for nvmet port
  * @queue:      nvmet rdma queue (cm id qp_context)
- * @addr:	nvmet address (cm_id context)
  *
  * DEVICE_REMOVAL event notifies us that the RDMA device is about
- * to unplug so we should take care of destroying our RDMA resources.
- * This event will be generated for each allocated cm_id.
+ * to unplug. Note that this event can be generated on a normal
+ * queue cm_id and/or a device bound listener cm_id (where in this
+ * case queue will be null).
  *
- * Note that this event can be generated on a normal queue cm_id
- * and/or a device bound listener cm_id (where in this case
- * queue will be null).
- *
- * we claim ownership on destroying the cm_id. For queues we move
- * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
+ * We registered an ib_client to handle device removal for queues,
+ * so we only need to handle the listening port cm_ids. In this case
  * we nullify the priv to prevent double cm_id destruction and destroying
  * the cm_id implicitely by returning a non-zero rc to the callout.
  */
 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
 		struct nvmet_rdma_queue *queue)
 {
-	unsigned long flags;
+	struct nvmet_port *port;
 
-	if (!queue) {
-		struct nvmet_port *port = cm_id->context;
-
+	if (queue) {
 		/*
-		 * This is a listener cm_id. Make sure that
-		 * future remove_port won't invoke a double
-		 * cm_id destroy. use atomic xchg to make sure
-		 * we don't compete with remove_port.
+		 * This is a queue cm_id. we have registered
+		 * an ib_client to handle queues removal
+		 * so don't interfear and just return.
 		 */
-		if (xchg(&port->priv, NULL) != cm_id)
-			return 0;
-	} else {
-		/*
-		 * This is a queue cm_id. Make sure that
-		 * release queue will not destroy the cm_id
-		 * and schedule all ctrl queues removal (only
-		 * if the queue is not disconnecting already).
-		 */
-		spin_lock_irqsave(&queue->state_lock, flags);
-		if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
-			queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
-		spin_unlock_irqrestore(&queue->state_lock, flags);
-		nvmet_rdma_queue_disconnect(queue);
-		flush_scheduled_work();
+		return 0;
 	}
 
+	port = cm_id->context;
+
+	/*
+	 * This is a listener cm_id. Make sure that
+	 * future remove_port won't invoke a double
+	 * cm_id destroy. use atomic xchg to make sure
+	 * we don't compete with remove_port.
+	 */
+	if (xchg(&port->priv, NULL) != cm_id)
+		return 0;
+
 	/*
 	 * We need to return 1 so that the core will destroy
 	 * it's own ID.  What a great API design..
@@ -1519,9 +1510,51 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = {
 	.delete_ctrl		= nvmet_rdma_delete_ctrl,
 };
 
+static void nvmet_rdma_add_one(struct ib_device *ib_device)
+{
+}
+
+static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+	struct nvmet_rdma_queue *queue;
+
+	/* Device is being removed, delete all queues using this device */
+	mutex_lock(&nvmet_rdma_queue_mutex);
+	list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
+		if (queue->dev->device != ib_device)
+			continue;
+
+		pr_info("Removing queue %d\n", queue->idx);
+		__nvmet_rdma_queue_disconnect(queue);
+	}
+	mutex_unlock(&nvmet_rdma_queue_mutex);
+
+	flush_scheduled_work();
+}
+
+static struct ib_client nvmet_rdma_ib_client = {
+	.name   = "nvmet_rdma",
+	.add = nvmet_rdma_add_one,
+	.remove = nvmet_rdma_remove_one
+};
+
 static int __init nvmet_rdma_init(void)
 {
-	return nvmet_register_transport(&nvmet_rdma_ops);
+	int ret;
+
+	ret = ib_register_client(&nvmet_rdma_ib_client);
+	if (ret)
+		return ret;
+
+	ret = nvmet_register_transport(&nvmet_rdma_ops);
+	if (ret)
+		goto err_ib_client;
+
+	return 0;
+
+err_ib_client:
+	ib_unregister_client(&nvmet_rdma_ib_client);
+	return ret;
 }
 
 static void __exit nvmet_rdma_exit(void)
@@ -1544,6 +1577,7 @@ static void __exit nvmet_rdma_exit(void)
 	mutex_unlock(&nvmet_rdma_queue_mutex);
 
 	flush_scheduled_work();
+	ib_unregister_client(&nvmet_rdma_ib_client);
 	ida_destroy(&nvmet_rdma_queue_ida);
 }
 
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 646cadb..3c56e3b 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -34,7 +34,7 @@
 #define OTPC_CMD_READ                0x0
 #define OTPC_CMD_OTP_PROG_ENABLE     0x2
 #define OTPC_CMD_OTP_PROG_DISABLE    0x3
-#define OTPC_CMD_PROGRAM             0xA
+#define OTPC_CMD_PROGRAM             0x8
 
 /* OTPC Status Bits */
 #define OTPC_STAT_CMD_DONE           BIT(1)
@@ -209,7 +209,7 @@ static int bcm_otpc_write(void *context, unsigned int offset, void *val,
 		set_command(priv->base, OTPC_CMD_PROGRAM);
 		set_cpu_address(priv->base, address++);
 		for (i = 0; i < priv->map->otpc_row_size; i++) {
-			writel(*buf, priv->base + priv->map->data_r_offset[i]);
+			writel(*buf, priv->base + priv->map->data_w_offset[i]);
 			buf++;
 			bytes_written += sizeof(*buf);
 		}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 8c830a8..4c49285 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -287,9 +287,15 @@ static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
 {
 	struct nvmem_cell *p;
 
+	mutex_lock(&nvmem_cells_mutex);
+
 	list_for_each_entry(p, &nvmem_cells, node)
-		if (p && !strcmp(p->name, cell_id))
+		if (p && !strcmp(p->name, cell_id)) {
+			mutex_unlock(&nvmem_cells_mutex);
 			return p;
+		}
+
+	mutex_unlock(&nvmem_cells_mutex);
 
 	return NULL;
 }
@@ -489,21 +495,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 
 	rval = device_add(&nvmem->dev);
 	if (rval)
-		goto out;
+		goto err_put_device;
 
 	if (config->compat) {
 		rval = nvmem_setup_compat(nvmem, config);
 		if (rval)
-			goto out;
+			goto err_device_del;
 	}
 
 	if (config->cells)
 		nvmem_add_cells(nvmem, config);
 
 	return nvmem;
-out:
-	ida_simple_remove(&nvmem_ida, nvmem->id);
-	kfree(nvmem);
+
+err_device_del:
+	device_del(&nvmem->dev);
+err_put_device:
+	put_device(&nvmem->dev);
+
 	return ERR_PTR(rval);
 }
 EXPORT_SYMBOL_GPL(nvmem_register);
@@ -529,6 +538,7 @@ int nvmem_unregister(struct nvmem_device *nvmem)
 
 	nvmem_device_remove_all_cells(nvmem);
 	device_del(&nvmem->dev);
+	put_device(&nvmem->dev);
 
 	return 0;
 }
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 423907b..a0d4ede 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -170,6 +170,10 @@ static const struct of_device_id rockchip_efuse_match[] = {
 		.data = (void *)&rockchip_rk3288_efuse_read,
 	},
 	{
+		.compatible = "rockchip,rk322x-efuse",
+		.data = (void *)&rockchip_rk3288_efuse_read,
+	},
+	{
 		.compatible = "rockchip,rk3288-efuse",
 		.data = (void *)&rockchip_rk3288_efuse_read,
 	},
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 9416d05..28c38c7 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -144,8 +144,8 @@ int of_dma_configure(struct device *dev, struct device_node *np)
 		coherent ? " " : " not ");
 
 	iommu = of_iommu_configure(dev, np);
-	if (IS_ERR(iommu))
-		return PTR_ERR(iommu);
+	if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
 
 	dev_dbg(dev, "device is%sbehind an iommu\n",
 		iommu ? " " : " not ");
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index e32ca2e..56c93f0 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
 
 	BUG_ON(!dev);
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return DMA_ERROR_CODE;
 
 	BUG_ON(size <= 0);
 
@@ -814,6 +816,10 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
 	
 	BUG_ON(!dev);
 	ioc = GET_IOC(dev);
+	if (!ioc) {
+		WARN_ON(!ioc);
+		return;
+	}
 
 	DBG_RUN("%s() iovp 0x%lx/%x\n",
 		__func__, (long)iova, size);
@@ -918,6 +924,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
 	
 	BUG_ON(!dev);
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return 0;
 	
 	DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
 
@@ -990,6 +998,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
 
 	BUG_ON(!dev);
 	ioc = GET_IOC(dev);
+	if (!ioc) {
+		WARN_ON(!ioc);
+		return;
+	}
 
 	DBG_RUN_SG("%s() START %d entries, %p,%x\n",
 		__func__, nents, sg_virt(sglist), sglist->length);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 1133b5c..5c63b92 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -154,7 +154,10 @@ struct dino_device
 };
 
 /* Looks nice and keeps the compiler happy */
-#define DINO_DEV(d) ((struct dino_device *) d)
+#define DINO_DEV(d) ({				\
+	void *__pdata = d;			\
+	BUG_ON(!__pdata);			\
+	(struct dino_device *)__pdata; })
 
 
 /*
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 2ec2aef..bc286cb 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -111,8 +111,10 @@ static u32 lba_t32;
 
 
 /* Looks nice and keeps the compiler happy */
-#define LBA_DEV(d) ((struct lba_device *) (d))
-
+#define LBA_DEV(d) ({				\
+	void *__pdata = d;			\
+	BUG_ON(!__pdata);			\
+	(struct lba_device *)__pdata; })
 
 /*
 ** Only allow 8 subsidiary busses per LBA
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 33385e5..87ad5fd 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
 		return 0;
 
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return 0;
 
 	/*
 	 * check if mask is >= than the current max IO Virt Address
@@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
 	int pide;
 
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return DMA_ERROR_CODE;
 
 	/* save offset bits */
 	offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@@ -813,6 +817,10 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
 	DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
 
 	ioc = GET_IOC(dev);
+	if (!ioc) {
+		WARN_ON(!ioc);
+		return;
+	}
 	offset = iova & ~IOVP_MASK;
 	iova ^= offset;        /* clear offset bits */
 	size += offset;
@@ -952,6 +960,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
 	DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
 
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return 0;
 
 	/* Fast path single entry scatterlists. */
 	if (nents == 1) {
@@ -1037,6 +1047,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
 		__func__, nents, sg_virt(sglist), sglist->length);
 
 	ioc = GET_IOC(dev);
+	if (!ioc) {
+		WARN_ON(!ioc);
+		return;
+	}
 
 #ifdef SBA_COLLECT_STATS
 	ioc->usg_calls++;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index e0cacb7..c32a77f 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -86,6 +86,9 @@
 config PCI_ECAM
 	bool
 
+config PCI_LOCKLESS_CONFIG
+	bool
+
 config PCI_IOV
 	bool "PCI IOV support"
 	depends on PCI
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 74cf5ff..913d672 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -25,6 +25,14 @@ DEFINE_RAW_SPINLOCK(pci_lock);
 #define PCI_word_BAD (pos & 1)
 #define PCI_dword_BAD (pos & 3)
 
+#ifdef CONFIG_PCI_LOCKLESS_CONFIG
+# define pci_lock_config(f)	do { (void)(f); } while (0)
+# define pci_unlock_config(f)	do { (void)(f); } while (0)
+#else
+# define pci_lock_config(f)	raw_spin_lock_irqsave(&pci_lock, f)
+# define pci_unlock_config(f)	raw_spin_unlock_irqrestore(&pci_lock, f)
+#endif
+
 #define PCI_OP_READ(size, type, len) \
 int pci_bus_read_config_##size \
 	(struct pci_bus *bus, unsigned int devfn, int pos, type *value)	\
@@ -33,10 +41,10 @@ int pci_bus_read_config_##size \
 	unsigned long flags;						\
 	u32 data = 0;							\
 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
-	raw_spin_lock_irqsave(&pci_lock, flags);			\
+	pci_lock_config(flags);						\
 	res = bus->ops->read(bus, devfn, pos, len, &data);		\
 	*value = (type)data;						\
-	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
+	pci_unlock_config(flags);					\
 	return res;							\
 }
 
@@ -47,9 +55,9 @@ int pci_bus_write_config_##size \
 	int res;							\
 	unsigned long flags;						\
 	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
-	raw_spin_lock_irqsave(&pci_lock, flags);			\
+	pci_lock_config(flags);						\
 	res = bus->ops->write(bus, devfn, pos, len, value);		\
-	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
+	pci_unlock_config(flags);					\
 	return res;							\
 }
 
@@ -896,7 +904,7 @@ int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
 {
 	if (pci_dev_is_disconnected(dev)) {
 		*val = ~0;
-		return -ENODEV;
+		return PCIBIOS_DEVICE_NOT_FOUND;
 	}
 	return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
 }
@@ -906,7 +914,7 @@ int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
 {
 	if (pci_dev_is_disconnected(dev)) {
 		*val = ~0;
-		return -ENODEV;
+		return PCIBIOS_DEVICE_NOT_FOUND;
 	}
 	return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
 }
@@ -917,7 +925,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where,
 {
 	if (pci_dev_is_disconnected(dev)) {
 		*val = ~0;
-		return -ENODEV;
+		return PCIBIOS_DEVICE_NOT_FOUND;
 	}
 	return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
 }
@@ -926,7 +934,7 @@ EXPORT_SYMBOL(pci_read_config_dword);
 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
 {
 	if (pci_dev_is_disconnected(dev))
-		return -ENODEV;
+		return PCIBIOS_DEVICE_NOT_FOUND;
 	return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
 }
 EXPORT_SYMBOL(pci_write_config_byte);
@@ -934,7 +942,7 @@ EXPORT_SYMBOL(pci_write_config_byte);
 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
 {
 	if (pci_dev_is_disconnected(dev))
-		return -ENODEV;
+		return PCIBIOS_DEVICE_NOT_FOUND;
 	return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
 }
 EXPORT_SYMBOL(pci_write_config_word);
@@ -943,7 +951,7 @@ int pci_write_config_dword(const struct pci_dev *dev, int where,
 					 u32 val)
 {
 	if (pci_dev_is_disconnected(dev))
-		return -ENODEV;
+		return PCIBIOS_DEVICE_NOT_FOUND;
 	return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
 }
 EXPORT_SYMBOL(pci_write_config_dword);
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 175edad..2942066 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -5,6 +5,7 @@
 config PCI_EPF_TEST
 	tristate "PCI Endpoint Test driver"
 	depends on PCI_ENDPOINT
+	select CRC32
 	help
 	   Enable this configuration option to enable the test driver
 	   for PCI Endpoint.
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index e27ad2a..31203d6 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -554,6 +554,7 @@ static int vmd_find_free_domain(void)
 static int vmd_enable_domain(struct vmd_dev *vmd)
 {
 	struct pci_sysdata *sd = &vmd->sysdata;
+	struct fwnode_handle *fn;
 	struct resource *res;
 	u32 upper_bits;
 	unsigned long flags;
@@ -617,8 +618,13 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
 
 	sd->node = pcibus_to_node(vmd->dev->bus);
 
-	vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info,
+	fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
+	if (!fn)
+		return -ENODEV;
+
+	vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
 						    x86_vector_domain);
+	irq_domain_free_fwnode(fn);
 	if (!vmd->irq_domain)
 		return -ENODEV;
 
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index ba44fdf..fbad5dc 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1463,7 +1463,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
 	if (!domain)
 		return NULL;
 
-	domain->bus_token = DOMAIN_BUS_PCI_MSI;
+	irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI);
 	return domain;
 }
 EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 0018603..e70c1c7 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -21,13 +21,12 @@
 #include "pci.h"
 
 /*
- * The UUID is defined in the PCI Firmware Specification available here:
+ * The GUID is defined in the PCI Firmware Specification available here:
  * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
  */
-const u8 pci_acpi_dsm_uuid[] = {
-	0xd0, 0x37, 0xc9, 0xe5, 0x53, 0x35, 0x7a, 0x4d,
-	0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
-};
+const guid_t pci_acpi_dsm_guid =
+	GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
+		  0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
 
 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
@@ -395,29 +394,26 @@ bool pciehp_is_native(struct pci_dev *pdev)
 
 /**
  * pci_acpi_wake_bus - Root bus wakeup notification fork function.
- * @work: Work item to handle.
+ * @context: Device wakeup context.
  */
-static void pci_acpi_wake_bus(struct work_struct *work)
+static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
 {
 	struct acpi_device *adev;
 	struct acpi_pci_root *root;
 
-	adev = container_of(work, struct acpi_device, wakeup.context.work);
+	adev = container_of(context, struct acpi_device, wakeup.context);
 	root = acpi_driver_data(adev);
 	pci_pme_wakeup_bus(root->bus);
 }
 
 /**
  * pci_acpi_wake_dev - PCI device wakeup notification work function.
- * @handle: ACPI handle of a device the notification is for.
- * @work: Work item to handle.
+ * @context: Device wakeup context.
  */
-static void pci_acpi_wake_dev(struct work_struct *work)
+static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
 {
-	struct acpi_device_wakeup_context *context;
 	struct pci_dev *pci_dev;
 
-	context = container_of(work, struct acpi_device_wakeup_context, work);
 	pci_dev = to_pci_dev(context->dev);
 
 	if (pci_dev->pme_poll)
@@ -425,7 +421,7 @@ static void pci_acpi_wake_dev(struct work_struct *work)
 
 	if (pci_dev->current_state == PCI_D3cold) {
 		pci_wakeup_event(pci_dev);
-		pm_runtime_resume(&pci_dev->dev);
+		pm_request_resume(&pci_dev->dev);
 		return;
 	}
 
@@ -434,7 +430,7 @@ static void pci_acpi_wake_dev(struct work_struct *work)
 		pci_check_pme_status(pci_dev);
 
 	pci_wakeup_event(pci_dev);
-	pm_runtime_resume(&pci_dev->dev);
+	pm_request_resume(&pci_dev->dev);
 
 	pci_pme_wakeup_bus(pci_dev->subordinate);
 }
@@ -573,67 +569,29 @@ static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
 	return state_conv[state];
 }
 
-static bool acpi_pci_can_wakeup(struct pci_dev *dev)
-{
-	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
-	return adev ? acpi_device_can_wakeup(adev) : false;
-}
-
-static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
+static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
 {
 	while (bus->parent) {
-		if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
-			return;
+		if (acpi_pm_device_can_wakeup(&bus->self->dev))
+			return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
+
 		bus = bus->parent;
 	}
 
 	/* We have reached the root bus. */
-	if (bus->bridge)
-		acpi_pm_device_sleep_wake(bus->bridge, enable);
-}
-
-static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
-{
-	if (acpi_pci_can_wakeup(dev))
-		return acpi_pm_device_sleep_wake(&dev->dev, enable);
-
-	acpi_pci_propagate_wakeup_enable(dev->bus, enable);
-	return 0;
-}
-
-static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
-{
-	while (bus->parent) {
-		struct pci_dev *bridge = bus->self;
-
-		if (bridge->pme_interrupt)
-			return;
-		if (!acpi_pm_device_run_wake(&bridge->dev, enable))
-			return;
-		bus = bus->parent;
+	if (bus->bridge) {
+		if (acpi_pm_device_can_wakeup(bus->bridge))
+			return acpi_pm_set_device_wakeup(bus->bridge, enable);
 	}
-
-	/* We have reached the root bus. */
-	if (bus->bridge)
-		acpi_pm_device_run_wake(bus->bridge, enable);
+	return 0;
 }
 
-static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
+static int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
 {
-	/*
-	 * Per PCI Express Base Specification Revision 2.0 section
-	 * 5.3.3.2 Link Wakeup, platform support is needed for D3cold
-	 * waking up to power on the main link even if there is PME
-	 * support for D3cold
-	 */
-	if (dev->pme_interrupt && !dev->runtime_d3cold)
-		return 0;
+	if (acpi_pm_device_can_wakeup(&dev->dev))
+		return acpi_pm_set_device_wakeup(&dev->dev, enable);
 
-	if (!acpi_pm_device_run_wake(&dev->dev, enable))
-		return 0;
-
-	acpi_pci_propagate_run_wake(dev->bus, enable);
-	return 0;
+	return acpi_pci_propagate_wakeup(dev->bus, enable);
 }
 
 static bool acpi_pci_need_resume(struct pci_dev *dev)
@@ -657,8 +615,7 @@ static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
 	.set_state = acpi_pci_set_power_state,
 	.get_state = acpi_pci_get_power_state,
 	.choose_state = acpi_pci_choose_state,
-	.sleep_wake = acpi_pci_sleep_wake,
-	.run_wake = acpi_pci_run_wake,
+	.set_wakeup = acpi_pci_wakeup,
 	.need_resume = acpi_pci_need_resume,
 };
 
@@ -680,7 +637,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
 	if (!pci_is_root_bus(bus))
 		return;
 
-	obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), pci_acpi_dsm_uuid, 3,
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
 				RESET_DELAY_DSM, NULL);
 	if (!obj)
 		return;
@@ -745,7 +702,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
 	if (bridge->ignore_reset_delay)
 		pdev->d3cold_delay = 0;
 
-	obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 3,
+	obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
 				FUNCTION_DELAY_DSM, NULL);
 	if (!obj)
 		return;
@@ -781,9 +738,7 @@ static void pci_acpi_setup(struct device *dev)
 		return;
 
 	device_set_wakeup_capable(dev, true);
-	acpi_pci_sleep_wake(pci_dev, false);
-	if (adev->wakeup.flags.run_wake)
-		device_set_run_wake(dev, true);
+	acpi_pci_wakeup(pci_dev, false);
 }
 
 static void pci_acpi_cleanup(struct device *dev)
@@ -794,10 +749,8 @@ static void pci_acpi_cleanup(struct device *dev)
 		return;
 
 	pci_acpi_remove_pm_notifier(adev);
-	if (adev->wakeup.flags.valid) {
+	if (adev->wakeup.flags.valid)
 		device_set_wakeup_capable(dev, false);
-		device_set_run_wake(dev, false);
-	}
 }
 
 static bool pci_acpi_bus_match(struct device *dev)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 192e7b6..df4aead 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -96,7 +96,7 @@ static void pci_free_dynids(struct pci_driver *drv)
  *
  * Allow PCI IDs to be added to an existing driver via sysfs.
  */
-static ssize_t store_new_id(struct device_driver *driver, const char *buf,
+static ssize_t new_id_store(struct device_driver *driver, const char *buf,
 			    size_t count)
 {
 	struct pci_driver *pdrv = to_pci_driver(driver);
@@ -154,7 +154,7 @@ static ssize_t store_new_id(struct device_driver *driver, const char *buf,
 		return retval;
 	return count;
 }
-static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
+static DRIVER_ATTR_WO(new_id);
 
 /**
  * store_remove_id - remove a PCI device ID from this driver
@@ -164,7 +164,7 @@ static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
  *
  * Removes a dynamic pci device ID to this driver.
  */
-static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
+static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
 			       size_t count)
 {
 	struct pci_dynid *dynid, *n;
@@ -198,7 +198,7 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
 
 	return retval;
 }
-static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
+static DRIVER_ATTR_WO(remove_id);
 
 static struct attribute *pci_drv_attrs[] = {
 	&driver_attr_new_id.attr,
@@ -320,10 +320,19 @@ static long local_pci_probe(void *_ddi)
 	return 0;
 }
 
+static bool pci_physfn_is_probed(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_IOV
+	return dev->is_virtfn && dev->physfn->is_probed;
+#else
+	return false;
+#endif
+}
+
 static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
 			  const struct pci_device_id *id)
 {
-	int error, node;
+	int error, node, cpu;
 	struct drv_dev_and_id ddi = { drv, dev, id };
 
 	/*
@@ -332,33 +341,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
 	 * on the right node.
 	 */
 	node = dev_to_node(&dev->dev);
+	dev->is_probed = 1;
+
+	cpu_hotplug_disable();
 
 	/*
-	 * On NUMA systems, we are likely to call a PF probe function using
-	 * work_on_cpu().  If that probe calls pci_enable_sriov() (which
-	 * adds the VF devices via pci_bus_add_device()), we may re-enter
-	 * this function to call the VF probe function.  Calling
-	 * work_on_cpu() again will cause a lockdep warning.  Since VFs are
-	 * always on the same node as the PF, we can work around this by
-	 * avoiding work_on_cpu() when we're already on the correct node.
-	 *
-	 * Preemption is enabled, so it's theoretically unsafe to use
-	 * numa_node_id(), but even if we run the probe function on the
-	 * wrong node, it should be functionally correct.
+	 * Prevent nesting work_on_cpu() for the case where a Virtual Function
+	 * device is probed from work_on_cpu() of the Physical device.
 	 */
-	if (node >= 0 && node != numa_node_id()) {
-		int cpu;
-
-		get_online_cpus();
+	if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
+	    pci_physfn_is_probed(dev))
+		cpu = nr_cpu_ids;
+	else
 		cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
-		if (cpu < nr_cpu_ids)
-			error = work_on_cpu(cpu, local_pci_probe, &ddi);
-		else
-			error = local_pci_probe(&ddi);
-		put_online_cpus();
-	} else
+
+	if (cpu < nr_cpu_ids)
+		error = work_on_cpu(cpu, local_pci_probe, &ddi);
+	else
 		error = local_pci_probe(&ddi);
 
+	dev->is_probed = 0;
+	cpu_hotplug_enable();
 	return error;
 }
 
@@ -1216,7 +1219,7 @@ static int pci_pm_runtime_resume(struct device *dev)
 
 	pci_restore_standard_config(pci_dev);
 	pci_fixup_device(pci_fixup_resume_early, pci_dev);
-	__pci_enable_wake(pci_dev, PCI_D0, true, false);
+	pci_enable_wake(pci_dev, PCI_D0, false);
 	pci_fixup_device(pci_fixup_resume, pci_dev);
 
 	rc = pm->runtime_resume(dev);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 5135737..2d8db3e 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -172,7 +172,7 @@ static int dsm_get_label(struct device *dev, char *buf,
 	if (!handle)
 		return -1;
 
-	obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 0x2,
+	obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 0x2,
 				DEVICE_LABEL_DSM, NULL);
 	if (!obj)
 		return -1;
@@ -212,7 +212,7 @@ static bool device_has_dsm(struct device *dev)
 	if (!handle)
 		return false;
 
-	return !!acpi_check_dsm(handle, pci_acpi_dsm_uuid, 0x2,
+	return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
 				1 << DEVICE_LABEL_DSM);
 }
 
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 1c4af72..a4ac940 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -39,12 +39,7 @@ static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
 	return PCI_D3hot;
 }
 
-static int mid_pci_sleep_wake(struct pci_dev *dev, bool enable)
-{
-	return 0;
-}
-
-static int mid_pci_run_wake(struct pci_dev *dev, bool enable)
+static int mid_pci_wakeup(struct pci_dev *dev, bool enable)
 {
 	return 0;
 }
@@ -59,8 +54,7 @@ static const struct pci_platform_pm_ops mid_pci_platform_pm = {
 	.set_state	= mid_pci_set_power_state,
 	.get_state	= mid_pci_get_power_state,
 	.choose_state	= mid_pci_choose_state,
-	.sleep_wake	= mid_pci_sleep_wake,
-	.run_wake	= mid_pci_run_wake,
+	.set_wakeup	= mid_pci_wakeup,
 	.need_resume	= mid_pci_need_resume,
 };
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 563901c..0b5302a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -574,8 +574,7 @@ static const struct pci_platform_pm_ops *pci_platform_pm;
 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
 {
 	if (!ops->is_manageable || !ops->set_state  || !ops->get_state ||
-	    !ops->choose_state  || !ops->sleep_wake || !ops->run_wake  ||
-	    !ops->need_resume)
+	    !ops->choose_state  || !ops->set_wakeup || !ops->need_resume)
 		return -EINVAL;
 	pci_platform_pm = ops;
 	return 0;
@@ -603,16 +602,10 @@ static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 }
 
-static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
+static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
 {
 	return pci_platform_pm ?
-			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
-}
-
-static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
-{
-	return pci_platform_pm ?
-			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
+			pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
 }
 
 static inline bool platform_pci_need_resume(struct pci_dev *dev)
@@ -1805,6 +1798,23 @@ static void __pci_pme_active(struct pci_dev *dev, bool enable)
 	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 }
 
+static void pci_pme_restore(struct pci_dev *dev)
+{
+	u16 pmcsr;
+
+	if (!dev->pme_support)
+		return;
+
+	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+	if (dev->wakeup_prepared) {
+		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+	} else {
+		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+		pmcsr |= PCI_PM_CTRL_PME_STATUS;
+	}
+	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+}
+
 /**
  * pci_pme_active - enable or disable PCI device's PME# function
  * @dev: PCI device to handle.
@@ -1872,10 +1882,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
 EXPORT_SYMBOL(pci_pme_active);
 
 /**
- * __pci_enable_wake - enable PCI device as wakeup event source
+ * pci_enable_wake - enable PCI device as wakeup event source
  * @dev: PCI device affected
  * @state: PCI state from which device will issue wakeup events
- * @runtime: True if the events are to be generated at run time
  * @enable: True to enable event generation; false to disable
  *
  * This enables the device as a wakeup event source, or disables it.
@@ -1891,17 +1900,18 @@ EXPORT_SYMBOL(pci_pme_active);
  * Error code depending on the platform is returned if both the platform and
  * the native mechanism fail to enable the generation of wake-up events
  */
-int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
-		      bool runtime, bool enable)
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
 {
 	int ret = 0;
 
-	if (enable && !runtime && !device_may_wakeup(&dev->dev))
-		return -EINVAL;
-
-	/* Don't do the same thing twice in a row for one device. */
-	if (!!enable == !!dev->wakeup_prepared)
+	/*
+	 * Don't do the same thing twice in a row for one device, but restore
+	 * PME Enable in case it has been updated by config space restoration.
+	 */
+	if (!!enable == !!dev->wakeup_prepared) {
+		pci_pme_restore(dev);
 		return 0;
+	}
 
 	/*
 	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
@@ -1916,24 +1926,20 @@ int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
 			pci_pme_active(dev, true);
 		else
 			ret = 1;
-		error = runtime ? platform_pci_run_wake(dev, true) :
-					platform_pci_sleep_wake(dev, true);
+		error = platform_pci_set_wakeup(dev, true);
 		if (ret)
 			ret = error;
 		if (!ret)
 			dev->wakeup_prepared = true;
 	} else {
-		if (runtime)
-			platform_pci_run_wake(dev, false);
-		else
-			platform_pci_sleep_wake(dev, false);
+		platform_pci_set_wakeup(dev, false);
 		pci_pme_active(dev, false);
 		dev->wakeup_prepared = false;
 	}
 
 	return ret;
 }
-EXPORT_SYMBOL(__pci_enable_wake);
+EXPORT_SYMBOL(pci_enable_wake);
 
 /**
  * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
@@ -2075,12 +2081,12 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
 
 	dev->runtime_d3cold = target_state == PCI_D3cold;
 
-	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
+	pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
 
 	error = pci_set_power_state(dev, target_state);
 
 	if (error) {
-		__pci_enable_wake(dev, target_state, true, false);
+		pci_enable_wake(dev, target_state, false);
 		dev->runtime_d3cold = false;
 	}
 
@@ -2099,7 +2105,7 @@ bool pci_dev_run_wake(struct pci_dev *dev)
 {
 	struct pci_bus *bus = dev->bus;
 
-	if (device_run_wake(&dev->dev))
+	if (device_can_wakeup(&dev->dev))
 		return true;
 
 	if (!dev->pme_support)
@@ -2112,7 +2118,7 @@ bool pci_dev_run_wake(struct pci_dev *dev)
 	while (bus->parent) {
 		struct pci_dev *bridge = bus->self;
 
-		if (device_run_wake(&bridge->dev))
+		if (device_can_wakeup(&bridge->dev))
 			return true;
 
 		bus = bus->parent;
@@ -2120,7 +2126,7 @@ bool pci_dev_run_wake(struct pci_dev *dev)
 
 	/* We have reached the root bus. */
 	if (bus->bridge)
-		return device_run_wake(bus->bridge);
+		return device_can_wakeup(bus->bridge);
 
 	return false;
 }
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f8113e5..240b2c0 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -47,11 +47,7 @@ int pci_probe_reset_function(struct pci_dev *dev);
  *                platform; to be used during system-wide transitions from a
  *                sleeping state to the working state and vice versa
  *
- * @sleep_wake: enables/disables the system wake up capability of given device
- *
- * @run_wake: enables/disables the platform to generate run-time wake-up events
- *		for given device (the device's wake-up capability has to be
- *		enabled by @sleep_wake for this feature to work)
+ * @set_wakeup: enables/disables wakeup capability for the device
  *
  * @need_resume: returns 'true' if the given device (which is currently
  *		suspended) needs to be resumed to be configured for system
@@ -65,8 +61,7 @@ struct pci_platform_pm_ops {
 	int (*set_state)(struct pci_dev *dev, pci_power_t state);
 	pci_power_t (*get_state)(struct pci_dev *dev);
 	pci_power_t (*choose_state)(struct pci_dev *dev);
-	int (*sleep_wake)(struct pci_dev *dev, bool enable);
-	int (*run_wake)(struct pci_dev *dev, bool enable);
+	int (*set_wakeup)(struct pci_dev *dev, bool enable);
 	bool (*need_resume)(struct pci_dev *dev);
 };
 
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 2dd1c68..80e58d2 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -294,31 +294,29 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
 }
 
 /**
- * pcie_pme_set_native - Set the PME interrupt flag for given device.
+ * pcie_pme_can_wakeup - Set the wakeup capability flag.
  * @dev: PCI device to handle.
  * @ign: Ignored.
  */
-static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
+static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign)
 {
-	device_set_run_wake(&dev->dev, true);
-	dev->pme_interrupt = true;
+	device_set_wakeup_capable(&dev->dev, true);
 	return 0;
 }
 
 /**
- * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port.
+ * pcie_pme_mark_devices - Set the wakeup flag for devices below a port.
  * @port: PCIe root port or event collector to handle.
  *
  * For each device below given root port, including the port itself (or for each
  * root complex integrated endpoint if @port is a root complex event collector)
- * set the flag indicating that it can signal run-time wake-up events via PCIe
- * PME interrupts.
+ * set the flag indicating that it can signal run-time wake-up events.
  */
 static void pcie_pme_mark_devices(struct pci_dev *port)
 {
-	pcie_pme_set_native(port, NULL);
+	pcie_pme_can_wakeup(port, NULL);
 	if (port->subordinate)
-		pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
+		pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL);
 }
 
 /**
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 69b5e81..a9258f6 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -95,7 +95,7 @@ struct pcmcia_dynid {
  * and causes the driver to probe for all devices again.
  */
 static ssize_t
-pcmcia_store_new_id(struct device_driver *driver, const char *buf, size_t count)
+new_id_store(struct device_driver *driver, const char *buf, size_t count)
 {
 	struct pcmcia_dynid *dynid;
 	struct pcmcia_driver *pdrv = to_pcmcia_drv(driver);
@@ -133,7 +133,7 @@ pcmcia_store_new_id(struct device_driver *driver, const char *buf, size_t count)
 		return retval;
 	return count;
 }
-static DRIVER_ATTR(new_id, S_IWUSR, NULL, pcmcia_store_new_id);
+static DRIVER_ATTR_WO(new_id);
 
 static void
 pcmcia_free_dynids(struct pcmcia_driver *drv)
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 34c862f..0a9b787 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -29,6 +29,17 @@ static int arm_pmu_acpi_register_irq(int cpu)
 		return -EINVAL;
 
 	gsi = gicc->performance_interrupt;
+
+	/*
+	 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
+	 * have an interrupt. QEMU advertises this by using a GSI of zero,
+	 * which is not known to be valid on any hardware despite being
+	 * valid per the spec. Take the pragmatic approach and reject a
+	 * GSI of zero for now.
+	 */
+	if (!gsi)
+		return 0;
+
 	if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
 		trigger = ACPI_EDGE_SENSITIVE;
 	else
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index afaf7b6..c1807d4 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -15,73 +15,6 @@
 	  phy users can obtain reference to the PHY. All the users of this
 	  framework should select this config.
 
-config PHY_BCM_NS_USB2
-	tristate "Broadcom Northstar USB 2.0 PHY Driver"
-	depends on ARCH_BCM_IPROC || COMPILE_TEST
-	depends on HAS_IOMEM && OF
-	select GENERIC_PHY
-	help
-	  Enable this to support Broadcom USB 2.0 PHY connected to the USB
-	  controller on Northstar family.
-
-config PHY_BCM_NS_USB3
-	tristate "Broadcom Northstar USB 3.0 PHY Driver"
-	depends on ARCH_BCM_IPROC || COMPILE_TEST
-	depends on HAS_IOMEM && OF
-	select GENERIC_PHY
-	help
-	  Enable this to support Broadcom USB 3.0 PHY connected to the USB
-	  controller on Northstar family.
-
-config PHY_BERLIN_USB
-	tristate "Marvell Berlin USB PHY Driver"
-	depends on ARCH_BERLIN && RESET_CONTROLLER && HAS_IOMEM && OF
-	select GENERIC_PHY
-	help
-	  Enable this to support the USB PHY on Marvell Berlin SoCs.
-
-config PHY_BERLIN_SATA
-	tristate "Marvell Berlin SATA PHY driver"
-	depends on ARCH_BERLIN && HAS_IOMEM && OF
-	select GENERIC_PHY
-	help
-	  Enable this to support the SATA PHY on Marvell Berlin SoCs.
-
-config ARMADA375_USBCLUSTER_PHY
-	def_bool y
-	depends on MACH_ARMADA_375 || COMPILE_TEST
-	depends on OF && HAS_IOMEM
-	select GENERIC_PHY
-
-config PHY_DA8XX_USB
-	tristate "TI DA8xx USB PHY Driver"
-	depends on ARCH_DAVINCI_DA8XX
-	select GENERIC_PHY
-	select MFD_SYSCON
-	help
-	  Enable this to support the USB PHY on DA8xx SoCs.
-
-	  This driver controls both the USB 1.1 PHY and the USB 2.0 PHY.
-
-config PHY_DM816X_USB
-	tristate "TI dm816x USB PHY driver"
-	depends on ARCH_OMAP2PLUS
-	depends on USB_SUPPORT
-	select GENERIC_PHY
-	select USB_PHY
-	help
-	  Enable this for dm816x USB to work.
-
-config PHY_EXYNOS_MIPI_VIDEO
-	tristate "S5P/EXYNOS SoC series MIPI CSI-2/DSI PHY driver"
-	depends on HAS_IOMEM
-	depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
-	select GENERIC_PHY
-	default y if ARCH_S5PV210 || ARCH_EXYNOS
-	help
-	  Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
-	  and EXYNOS SoCs.
-
 config PHY_LPC18XX_USB_OTG
 	tristate "NXP LPC18xx/43xx SoC USB OTG PHY driver"
 	depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
@@ -93,146 +26,6 @@
 	  This driver is need for USB0 support on LPC18xx/43xx and takes
 	  care of enabling and clock setup.
 
-config PHY_PXA_28NM_HSIC
-	tristate "Marvell USB HSIC 28nm PHY Driver"
-	depends on HAS_IOMEM
-	select GENERIC_PHY
-	help
-	  Enable this to support Marvell USB HSIC PHY driver for Marvell
-	  SoC. This driver will do the PHY initialization and shutdown.
-	  The PHY driver will be used by Marvell ehci driver.
-
-	  To compile this driver as a module, choose M here.
-
-config PHY_PXA_28NM_USB2
-	tristate "Marvell USB 2.0 28nm PHY Driver"
-	depends on HAS_IOMEM
-	select GENERIC_PHY
-	help
-	  Enable this to support Marvell USB 2.0 PHY driver for Marvell
-	  SoC. This driver will do the PHY initialization and shutdown.
-	  The PHY driver will be used by Marvell udc/ehci/otg driver.
-
-	  To compile this driver as a module, choose M here.
-
-config PHY_MVEBU_SATA
-	def_bool y
-	depends on ARCH_DOVE || MACH_DOVE || MACH_KIRKWOOD
-	depends on OF
-	select GENERIC_PHY
-
-config PHY_MIPHY28LP
-	tristate "STMicroelectronics MIPHY28LP PHY driver for STiH407"
-	depends on ARCH_STI
-	select GENERIC_PHY
-	help
-	  Enable this to support the miphy transceiver (for SATA/PCIE/USB3)
-	  that is part of STMicroelectronics STiH407 SoC.
-
-config PHY_RCAR_GEN2
-	tristate "Renesas R-Car generation 2 USB PHY driver"
-	depends on ARCH_RENESAS
-	depends on GENERIC_PHY
-	help
-	  Support for USB PHY found on Renesas R-Car generation 2 SoCs.
-
-config PHY_RCAR_GEN3_USB2
-	tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
-	depends on ARCH_RENESAS
-	depends on EXTCON
-	select GENERIC_PHY
-	help
-	  Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs.
-
-config OMAP_CONTROL_PHY
-	tristate "OMAP CONTROL PHY Driver"
-	depends on ARCH_OMAP2PLUS || COMPILE_TEST
-	help
-	  Enable this to add support for the PHY part present in the control
-	  module. This driver has API to power on the USB2 PHY and to write to
-	  the mailbox. The mailbox is present only in omap4 and the register to
-	  power on the USB2 PHY is present in OMAP4 and OMAP5. OMAP5 has an
-	  additional register to power on USB3 PHY/SATA PHY/PCIE PHY
-	  (PIPE3 PHY).
-
-config OMAP_USB2
-	tristate "OMAP USB2 PHY Driver"
-	depends on ARCH_OMAP2PLUS
-	depends on USB_SUPPORT
-	select GENERIC_PHY
-	select USB_PHY
-	select OMAP_CONTROL_PHY
-	depends on OMAP_OCP2SCP
-	help
-	  Enable this to support the transceiver that is part of SOC. This
-	  driver takes care of all the PHY functionality apart from comparator.
-	  The USB OTG controller communicates with the comparator using this
-	  driver.
-
-config TI_PIPE3
-	tristate "TI PIPE3 PHY Driver"
-	depends on ARCH_OMAP2PLUS || COMPILE_TEST
-	select GENERIC_PHY
-	select OMAP_CONTROL_PHY
-	depends on OMAP_OCP2SCP
-	help
-	  Enable this to support the PIPE3 PHY that is part of TI SOCs. This
-	  driver takes care of all the PHY functionality apart from comparator.
-	  This driver interacts with the "OMAP Control PHY Driver" to power
-	  on/off the PHY.
-
-config TWL4030_USB
-	tristate "TWL4030 USB Transceiver Driver"
-	depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
-	depends on USB_SUPPORT
-	depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't 'y'
-	select GENERIC_PHY
-	select USB_PHY
-	help
-	  Enable this to support the USB OTG transceiver on TWL4030
-	  family chips (including the TWL5030 and TPS659x0 devices).
-	  This transceiver supports high and full speed devices plus,
-	  in host mode, low speed.
-
-config PHY_EXYNOS_DP_VIDEO
-	tristate "EXYNOS SoC series Display Port PHY driver"
-	depends on OF
-	depends on ARCH_EXYNOS || COMPILE_TEST
-	default ARCH_EXYNOS
-	select GENERIC_PHY
-	help
-	  Support for Display Port PHY found on Samsung EXYNOS SoCs.
-
-config BCM_KONA_USB2_PHY
-	tristate "Broadcom Kona USB2 PHY Driver"
-	depends on HAS_IOMEM
-	select GENERIC_PHY
-	help
-	  Enable this to support the Broadcom Kona USB 2.0 PHY.
-
-config PHY_EXYNOS5250_SATA
-	tristate "Exynos5250 Sata SerDes/PHY driver"
-	depends on SOC_EXYNOS5250
-	depends on HAS_IOMEM
-	depends on OF
-	select GENERIC_PHY
-	select I2C
-	select I2C_S3C2410
-	select MFD_SYSCON
-	help
-	  Enable this to support SATA SerDes/Phy found on Samsung's
-	  Exynos5250 based SoCs.This SerDes/Phy supports SATA 1.5 Gb/s,
-	  SATA 3.0 Gb/s, SATA 6.0 Gb/s speeds. It supports one SATA host
-	  port to accept one SATA device.
-
-config PHY_HIX5HD2_SATA
-	tristate "HIX5HD2 SATA PHY Driver"
-	depends on ARCH_HIX5HD2 && OF && HAS_IOMEM
-	select GENERIC_PHY
-	select MFD_SYSCON
-	help
-	  Support for SATA PHY on Hisilicon hix5hd2 Soc.
-
 config PHY_MT65XX_USB3
 	tristate "Mediatek USB3.0 PHY Driver"
 	depends on ARCH_MEDIATEK && OF
@@ -241,104 +34,6 @@
 	  Say 'Y' here to add support for Mediatek USB3.0 PHY driver,
 	  it supports multiple usb2.0 and usb3.0 ports.
 
-config PHY_HI6220_USB
-	tristate "hi6220 USB PHY support"
-	depends on (ARCH_HISI && ARM64) || COMPILE_TEST
-	select GENERIC_PHY
-	select MFD_SYSCON
-	help
-	  Enable this to support the HISILICON HI6220 USB PHY.
-
-	  To compile this driver as a module, choose M here.
-
-config PHY_SUN4I_USB
-	tristate "Allwinner sunxi SoC USB PHY driver"
-	depends on ARCH_SUNXI && HAS_IOMEM && OF
-	depends on RESET_CONTROLLER
-	depends on EXTCON
-	depends on POWER_SUPPLY
-	depends on USB_SUPPORT
-	select GENERIC_PHY
-	select USB_COMMON
-	help
-	  Enable this to support the transceiver that is part of Allwinner
-	  sunxi SoCs.
-
-	  This driver controls the entire USB PHY block, both the USB OTG
-	  parts, as well as the 2 regular USB 2 host PHYs.
-
-config PHY_SUN9I_USB
-	tristate "Allwinner sun9i SoC USB PHY driver"
-	depends on ARCH_SUNXI && HAS_IOMEM && OF
-	depends on RESET_CONTROLLER
-	depends on USB_SUPPORT
-	select USB_COMMON
-	select GENERIC_PHY
-	help
-	  Enable this to support the transceiver that is part of Allwinner
-	  sun9i SoCs.
-
-	  This driver controls each individual USB 2 host PHY.
-
-config PHY_SAMSUNG_USB2
-	tristate "Samsung USB 2.0 PHY driver"
-	depends on HAS_IOMEM
-	depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2
-	select GENERIC_PHY
-	select MFD_SYSCON
-	default ARCH_EXYNOS
-	help
-	  Enable this to support the Samsung USB 2.0 PHY driver for Samsung
-	  SoCs. This driver provides the interface for USB 2.0 PHY. Support
-	  for particular PHYs will be enabled based on the SoC type in addition
-	  to this driver.
-
-config PHY_S5PV210_USB2
-	bool "Support for S5PV210"
-	depends on PHY_SAMSUNG_USB2
-	depends on ARCH_S5PV210
-	help
-	  Enable USB PHY support for S5PV210. This option requires that Samsung
-	  USB 2.0 PHY driver is enabled and means that support for this
-	  particular SoC is compiled in the driver. In case of S5PV210 two phys
-	  are available - device and host.
-
-config PHY_EXYNOS4210_USB2
-	bool
-	depends on PHY_SAMSUNG_USB2
-	default CPU_EXYNOS4210
-
-config PHY_EXYNOS4X12_USB2
-	bool
-	depends on PHY_SAMSUNG_USB2
-	default SOC_EXYNOS3250 || SOC_EXYNOS4212 || SOC_EXYNOS4412
-
-config PHY_EXYNOS5250_USB2
-	bool
-	depends on PHY_SAMSUNG_USB2
-	default SOC_EXYNOS5250 || SOC_EXYNOS5420
-
-config PHY_EXYNOS5_USBDRD
-	tristate "Exynos5 SoC series USB DRD PHY driver"
-	depends on ARCH_EXYNOS && OF
-	depends on HAS_IOMEM
-	depends on USB_DWC3_EXYNOS
-	select GENERIC_PHY
-	select MFD_SYSCON
-	default y
-	help
-	  Enable USB DRD PHY support for Exynos 5 SoC series.
-	  This driver provides PHY interface for USB 3.0 DRD controller
-	  present on Exynos5 SoC series.
-
-config PHY_EXYNOS_PCIE
-	bool "Exynos PCIe PHY driver"
-	depends on OF && (ARCH_EXYNOS || COMPILE_TEST)
-	select GENERIC_PHY
-	help
-	  Enable PCIe PHY support for Exynos SoC series.
-	  This driver provides PHY interface for Exynos PCIe controller.
-
 config PHY_PISTACHIO_USB
 	tristate "IMG Pistachio USB2.0 PHY driver"
 	depends on MACH_PISTACHIO
@@ -346,83 +41,6 @@
 	help
 	  Enable this to support the USB2.0 PHY on the IMG Pistachio SoC.
 
-config PHY_QCOM_APQ8064_SATA
-	tristate "Qualcomm APQ8064 SATA SerDes/PHY driver"
-	depends on ARCH_QCOM
-	depends on HAS_IOMEM
-	depends on OF
-	select GENERIC_PHY
-
-config PHY_QCOM_IPQ806X_SATA
-	tristate "Qualcomm IPQ806x SATA SerDes/PHY driver"
-	depends on ARCH_QCOM
-	depends on HAS_IOMEM
-	depends on OF
-	select GENERIC_PHY
-
-config PHY_ROCKCHIP_USB
-	tristate "Rockchip USB2 PHY Driver"
-	depends on ARCH_ROCKCHIP && OF
-	select GENERIC_PHY
-	help
-	  Enable this to support the Rockchip USB 2.0 PHY.
-
-config PHY_ROCKCHIP_INNO_USB2
-	tristate "Rockchip INNO USB2PHY Driver"
-	depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
-	depends on COMMON_CLK
-	depends on EXTCON
-	depends on USB_SUPPORT
-	select GENERIC_PHY
-	select USB_COMMON
-	help
-	  Support for Rockchip USB2.0 PHY with Innosilicon IP block.
-
-config PHY_ROCKCHIP_EMMC
-	tristate "Rockchip EMMC PHY Driver"
-	depends on ARCH_ROCKCHIP && OF
-	select GENERIC_PHY
-	help
-	  Enable this to support the Rockchip EMMC PHY.
-
-config PHY_ROCKCHIP_DP
-	tristate "Rockchip Display Port PHY Driver"
-	depends on ARCH_ROCKCHIP && OF
-	select GENERIC_PHY
-	help
-	  Enable this to support the Rockchip Display Port PHY.
-
-config PHY_ROCKCHIP_PCIE
-	tristate "Rockchip PCIe PHY Driver"
-	depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
-	select GENERIC_PHY
-	select MFD_SYSCON
-	help
-	  Enable this to support the Rockchip PCIe PHY.
-
-config PHY_ROCKCHIP_TYPEC
-	tristate "Rockchip TYPEC PHY Driver"
-	depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
-	select EXTCON
-	select GENERIC_PHY
-	select RESET_CONTROLLER
-	help
-	  Enable this to support the Rockchip USB TYPEC PHY.
-
-config PHY_ST_SPEAR1310_MIPHY
-	tristate "ST SPEAR1310-MIPHY driver"
-	select GENERIC_PHY
-	depends on MACH_SPEAR1310 || COMPILE_TEST
-	help
-	  Support for ST SPEAr1310 MIPHY which can be used for PCIe and SATA.
-
-config PHY_ST_SPEAR1340_MIPHY
-	tristate "ST SPEAR1340-MIPHY driver"
-	select GENERIC_PHY
-	depends on MACH_SPEAR1340 || COMPILE_TEST
-	help
-	  Support for ST SPEAr1340 MIPHY which can be used for PCIe and SATA.
-
 config PHY_XGENE
 	tristate "APM X-Gene 15Gbps PHY support"
 	depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST)
@@ -430,104 +48,18 @@
 	help
 	  This option enables support for APM X-Gene SoC multi-purpose PHY.
 
-config PHY_STIH407_USB
-	tristate "STMicroelectronics USB2 picoPHY driver for STiH407 family"
-	depends on RESET_CONTROLLER
-	depends on ARCH_STI || COMPILE_TEST
-	select GENERIC_PHY
-	help
-	  Enable this support to enable the picoPHY device used by USB2
-	  and USB3 controllers on STMicroelectronics STiH407 SoC families.
-
-config PHY_QCOM_QMP
-	tristate "Qualcomm QMP PHY Driver"
-	depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST)
-	select GENERIC_PHY
-	help
-	  Enable this to support the QMP PHY transceiver that is used
-	  with controllers such as PCIe, UFS, and USB on Qualcomm chips.
-
-config PHY_QCOM_QUSB2
-	tristate "Qualcomm QUSB2 PHY Driver"
-	depends on OF && (ARCH_QCOM || COMPILE_TEST)
-	depends on NVMEM || !NVMEM
-	select GENERIC_PHY
-	help
-	  Enable this to support the HighSpeed QUSB2 PHY transceiver for USB
-	  controllers on Qualcomm chips. This driver supports the high-speed
-	  PHY which is usually paired with either the ChipIdea or Synopsys DWC3
-	  USB IPs on MSM SOCs.
-
-config PHY_QCOM_UFS
-	tristate "Qualcomm UFS PHY driver"
-	depends on OF && ARCH_QCOM
-	select GENERIC_PHY
-	help
-	  Support for UFS PHY on QCOM chipsets.
-
-config PHY_QCOM_USB_HS
-	tristate "Qualcomm USB HS PHY module"
-	depends on USB_ULPI_BUS
-	depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
-	select GENERIC_PHY
-	help
-	  Support for the USB high-speed ULPI compliant phy on Qualcomm
-	  chipsets.
-
-config PHY_QCOM_USB_HSIC
-	tristate "Qualcomm USB HSIC ULPI PHY module"
-	depends on USB_ULPI_BUS
-	select GENERIC_PHY
-	help
-	  Support for the USB HSIC ULPI compliant PHY on QCOM chipsets.
-
-config PHY_TUSB1210
-	tristate "TI TUSB1210 ULPI PHY module"
-	depends on USB_ULPI_BUS
-	select GENERIC_PHY
-	help
-	  Support for TI TUSB1210 USB ULPI PHY.
-
-config PHY_BRCM_SATA
-	tristate "Broadcom SATA PHY driver"
-	depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || COMPILE_TEST
-	depends on OF
-	select GENERIC_PHY
-	default ARCH_BCM_IPROC
-	help
-	  Enable this to support the Broadcom SATA PHY.
-	  If unsure, say N.
-
-config PHY_CYGNUS_PCIE
-	tristate "Broadcom Cygnus PCIe PHY driver"
-	depends on OF && (ARCH_BCM_CYGNUS || COMPILE_TEST)
-	select GENERIC_PHY
-	default ARCH_BCM_CYGNUS
-	help
-	  Enable this to support the Broadcom Cygnus PCIe PHY.
-	  If unsure, say N.
-
+source "drivers/phy/allwinner/Kconfig"
+source "drivers/phy/amlogic/Kconfig"
+source "drivers/phy/broadcom/Kconfig"
+source "drivers/phy/hisilicon/Kconfig"
+source "drivers/phy/marvell/Kconfig"
+source "drivers/phy/motorola/Kconfig"
+source "drivers/phy/qualcomm/Kconfig"
+source "drivers/phy/renesas/Kconfig"
+source "drivers/phy/rockchip/Kconfig"
+source "drivers/phy/samsung/Kconfig"
+source "drivers/phy/st/Kconfig"
 source "drivers/phy/tegra/Kconfig"
-
-config PHY_NS2_PCIE
-	tristate "Broadcom Northstar2 PCIe PHY driver"
-	depends on OF && MDIO_BUS_MUX_BCM_IPROC
-	select GENERIC_PHY
-	default ARCH_BCM_IPROC
-	help
-	  Enable this to support the Broadcom Northstar2 PCIe PHY.
-	  If unsure, say N.
-
-config PHY_MESON8B_USB2
-	tristate "Meson8b and GXBB USB2 PHY driver"
-	default ARCH_MESON
-	depends on OF && (ARCH_MESON || COMPILE_TEST)
-	depends on USB_SUPPORT
-	select USB_COMMON
-	select GENERIC_PHY
-	help
-	  Enable this to support the Meson USB2 PHYs found in Meson8b
-	  and GXBB SoCs.
-	  If unsure, say N.
+source "drivers/phy/ti/Kconfig"
 
 endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index f8047b4..f252201 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -3,64 +3,21 @@
 #
 
 obj-$(CONFIG_GENERIC_PHY)		+= phy-core.o
-obj-$(CONFIG_PHY_BCM_NS_USB2)		+= phy-bcm-ns-usb2.o
-obj-$(CONFIG_PHY_BCM_NS_USB3)		+= phy-bcm-ns-usb3.o
-obj-$(CONFIG_PHY_BERLIN_USB)		+= phy-berlin-usb.o
-obj-$(CONFIG_PHY_BERLIN_SATA)		+= phy-berlin-sata.o
-obj-$(CONFIG_PHY_DA8XX_USB)		+= phy-da8xx-usb.o
-obj-$(CONFIG_PHY_DM816X_USB)		+= phy-dm816x-usb.o
-obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY)	+= phy-armada375-usb2.o
-obj-$(CONFIG_BCM_KONA_USB2_PHY)		+= phy-bcm-kona-usb2.o
-obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO)	+= phy-exynos-dp-video.o
-obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO)	+= phy-exynos-mipi-video.o
 obj-$(CONFIG_PHY_LPC18XX_USB_OTG)	+= phy-lpc18xx-usb-otg.o
-obj-$(CONFIG_PHY_PXA_28NM_USB2)		+= phy-pxa-28nm-usb2.o
-obj-$(CONFIG_PHY_PXA_28NM_HSIC)		+= phy-pxa-28nm-hsic.o
-obj-$(CONFIG_PHY_MVEBU_SATA)		+= phy-mvebu-sata.o
-obj-$(CONFIG_PHY_MIPHY28LP) 		+= phy-miphy28lp.o
-obj-$(CONFIG_PHY_RCAR_GEN2)		+= phy-rcar-gen2.o
-obj-$(CONFIG_PHY_RCAR_GEN3_USB2)	+= phy-rcar-gen3-usb2.o
-obj-$(CONFIG_OMAP_CONTROL_PHY)		+= phy-omap-control.o
-obj-$(CONFIG_OMAP_USB2)			+= phy-omap-usb2.o
-obj-$(CONFIG_TI_PIPE3)			+= phy-ti-pipe3.o
-obj-$(CONFIG_TWL4030_USB)		+= phy-twl4030-usb.o
-obj-$(CONFIG_PHY_EXYNOS5250_SATA)	+= phy-exynos5250-sata.o
-obj-$(CONFIG_PHY_HIX5HD2_SATA)		+= phy-hix5hd2-sata.o
-obj-$(CONFIG_PHY_HI6220_USB)		+= phy-hi6220-usb.o
 obj-$(CONFIG_PHY_MT65XX_USB3)		+= phy-mt65xx-usb3.o
-obj-$(CONFIG_PHY_SUN4I_USB)		+= phy-sun4i-usb.o
-obj-$(CONFIG_PHY_SUN9I_USB)		+= phy-sun9i-usb.o
-obj-$(CONFIG_PHY_SAMSUNG_USB2)		+= phy-exynos-usb2.o
-phy-exynos-usb2-y			+= phy-samsung-usb2.o
-phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2)	+= phy-exynos4210-usb2.o
-phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2)	+= phy-exynos4x12-usb2.o
-phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2)	+= phy-exynos5250-usb2.o
-phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2)	+= phy-s5pv210-usb2.o
-obj-$(CONFIG_PHY_EXYNOS5_USBDRD)	+= phy-exynos5-usbdrd.o
-obj-$(CONFIG_PHY_EXYNOS_PCIE)	+= phy-exynos-pcie.o
-obj-$(CONFIG_PHY_QCOM_APQ8064_SATA)	+= phy-qcom-apq8064-sata.o
-obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o
-obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2)	+= phy-rockchip-inno-usb2.o
-obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
-obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
-obj-$(CONFIG_PHY_ROCKCHIP_DP)		+= phy-rockchip-dp.o
-obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
-obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA)	+= phy-qcom-ipq806x-sata.o
-obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY)	+= phy-spear1310-miphy.o
-obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY)	+= phy-spear1340-miphy.o
 obj-$(CONFIG_PHY_XGENE)			+= phy-xgene.o
-obj-$(CONFIG_PHY_STIH407_USB)		+= phy-stih407-usb.o
-obj-$(CONFIG_PHY_QCOM_QMP)		+= phy-qcom-qmp.o
-obj-$(CONFIG_PHY_QCOM_QUSB2)		+= phy-qcom-qusb2.o
-obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs.o
-obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-20nm.o
-obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-14nm.o
-obj-$(CONFIG_PHY_QCOM_USB_HS) 		+= phy-qcom-usb-hs.o
-obj-$(CONFIG_PHY_QCOM_USB_HSIC) 	+= phy-qcom-usb-hsic.o
-obj-$(CONFIG_PHY_TUSB1210)		+= phy-tusb1210.o
-obj-$(CONFIG_PHY_BRCM_SATA)		+= phy-brcm-sata.o
 obj-$(CONFIG_PHY_PISTACHIO_USB)		+= phy-pistachio-usb.o
-obj-$(CONFIG_PHY_CYGNUS_PCIE)		+= phy-bcm-cygnus-pcie.o
-obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_PHY_NS2_PCIE)		+= phy-bcm-ns2-pcie.o
-obj-$(CONFIG_PHY_MESON8B_USB2)		+= phy-meson8b-usb2.o
+
+obj-$(CONFIG_ARCH_SUNXI)		+= allwinner/
+obj-$(CONFIG_ARCH_MESON)		+= amlogic/
+obj-$(CONFIG_ARCH_RENESAS)		+= renesas/
+obj-$(CONFIG_ARCH_ROCKCHIP)		+= rockchip/
+obj-$(CONFIG_ARCH_TEGRA)		+= tegra/
+obj-y					+= broadcom/	\
+					   hisilicon/	\
+					   marvell/	\
+					   motorola/	\
+					   qualcomm/	\
+					   samsung/	\
+					   st/		\
+					   ti/
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
new file mode 100644
index 0000000..cdc1e74
--- /dev/null
+++ b/drivers/phy/allwinner/Kconfig
@@ -0,0 +1,31 @@
+#
+# Phy drivers for Allwinner platforms
+#
+config PHY_SUN4I_USB
+	tristate "Allwinner sunxi SoC USB PHY driver"
+	depends on ARCH_SUNXI && HAS_IOMEM && OF
+	depends on RESET_CONTROLLER
+	depends on EXTCON
+	depends on POWER_SUPPLY
+	depends on USB_SUPPORT
+	select GENERIC_PHY
+	select USB_COMMON
+	help
+	  Enable this to support the transceiver that is part of Allwinner
+	  sunxi SoCs.
+
+	  This driver controls the entire USB PHY block, both the USB OTG
+	  parts, as well as the 2 regular USB 2 host PHYs.
+
+config PHY_SUN9I_USB
+	tristate "Allwinner sun9i SoC USB PHY driver"
+	depends on ARCH_SUNXI && HAS_IOMEM && OF
+	depends on RESET_CONTROLLER
+	depends on USB_SUPPORT
+	select USB_COMMON
+	select GENERIC_PHY
+	help
+	  Enable this to support the transceiver that is part of Allwinner
+	  sun9i SoCs.
+
+	  This driver controls each individual USB 2 host PHY.
diff --git a/drivers/phy/allwinner/Makefile b/drivers/phy/allwinner/Makefile
new file mode 100644
index 0000000..8605529c
--- /dev/null
+++ b/drivers/phy/allwinner/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_PHY_SUN4I_USB)		+= phy-sun4i-usb.o
+obj-$(CONFIG_PHY_SUN9I_USB)		+= phy-sun9i-usb.o
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
similarity index 100%
rename from drivers/phy/phy-sun4i-usb.c
rename to drivers/phy/allwinner/phy-sun4i-usb.c
diff --git a/drivers/phy/phy-sun9i-usb.c b/drivers/phy/allwinner/phy-sun9i-usb.c
similarity index 100%
rename from drivers/phy/phy-sun9i-usb.c
rename to drivers/phy/allwinner/phy-sun9i-usb.c
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
new file mode 100644
index 0000000..cb8f450
--- /dev/null
+++ b/drivers/phy/amlogic/Kconfig
@@ -0,0 +1,27 @@
+#
+# Phy drivers for Amlogic platforms
+#
+config PHY_MESON8B_USB2
+	tristate "Meson8, Meson8b and GXBB USB2 PHY driver"
+	default ARCH_MESON
+	depends on OF && (ARCH_MESON || COMPILE_TEST)
+	depends on USB_SUPPORT
+	select USB_COMMON
+	select GENERIC_PHY
+	help
+	  Enable this to support the Meson USB2 PHYs found in Meson8,
+	  Meson8b and GXBB SoCs.
+	  If unsure, say N.
+
+config PHY_MESON_GXL_USB2
+	tristate "Meson GXL and GXM USB2 PHY drivers"
+	default ARCH_MESON
+	depends on OF && (ARCH_MESON || COMPILE_TEST)
+	depends on USB_SUPPORT
+	select USB_COMMON
+	select GENERIC_PHY
+	select REGMAP_MMIO
+	help
+	  Enable this to support the Meson USB2 PHYs found in Meson
+	  GXL and GXM SoCs.
+	  If unsure, say N.
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
new file mode 100644
index 0000000..cfdc98715
--- /dev/null
+++ b/drivers/phy/amlogic/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_PHY_MESON8B_USB2)		+= phy-meson8b-usb2.o
+obj-$(CONFIG_PHY_MESON_GXL_USB2)	+= phy-meson-gxl-usb2.o
diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
new file mode 100644
index 0000000..e90c4ee
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
@@ -0,0 +1,273 @@
+/*
+ * Meson GXL and GXM USB2 PHY driver
+ *
+ * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb/of.h>
+
+/* bits [31:27] are read-only */
+#define U2P_R0							0x0
+	#define U2P_R0_BYPASS_SEL				BIT(0)
+	#define U2P_R0_BYPASS_DM_EN				BIT(1)
+	#define U2P_R0_BYPASS_DP_EN				BIT(2)
+	#define U2P_R0_TXBITSTUFF_ENH				BIT(3)
+	#define U2P_R0_TXBITSTUFF_EN				BIT(4)
+	#define U2P_R0_DM_PULLDOWN				BIT(5)
+	#define U2P_R0_DP_PULLDOWN				BIT(6)
+	#define U2P_R0_DP_VBUS_VLD_EXT_SEL			BIT(7)
+	#define U2P_R0_DP_VBUS_VLD_EXT				BIT(8)
+	#define U2P_R0_ADP_PRB_EN				BIT(9)
+	#define U2P_R0_ADP_DISCHARGE				BIT(10)
+	#define U2P_R0_ADP_CHARGE				BIT(11)
+	#define U2P_R0_DRV_VBUS					BIT(12)
+	#define U2P_R0_ID_PULLUP				BIT(13)
+	#define U2P_R0_LOOPBACK_EN_B				BIT(14)
+	#define U2P_R0_OTG_DISABLE				BIT(15)
+	#define U2P_R0_COMMON_ONN				BIT(16)
+	#define U2P_R0_FSEL_MASK				GENMASK(19, 17)
+	#define U2P_R0_REF_CLK_SEL_MASK				GENMASK(21, 20)
+	#define U2P_R0_POWER_ON_RESET				BIT(22)
+	#define U2P_R0_V_ATE_TEST_EN_B_MASK			GENMASK(24, 23)
+	#define U2P_R0_ID_SET_ID_DQ				BIT(25)
+	#define U2P_R0_ATE_RESET				BIT(26)
+	#define U2P_R0_FSV_MINUS				BIT(27)
+	#define U2P_R0_FSV_PLUS					BIT(28)
+	#define U2P_R0_BYPASS_DM_DATA				BIT(29)
+	#define U2P_R0_BYPASS_DP_DATA				BIT(30)
+
+#define U2P_R1							0x4
+	#define U2P_R1_BURN_IN_TEST				BIT(0)
+	#define U2P_R1_ACA_ENABLE				BIT(1)
+	#define U2P_R1_DCD_ENABLE				BIT(2)
+	#define U2P_R1_VDAT_SRC_EN_B				BIT(3)
+	#define U2P_R1_VDAT_DET_EN_B				BIT(4)
+	#define U2P_R1_CHARGES_SEL				BIT(5)
+	#define U2P_R1_TX_PREEMP_PULSE_TUNE			BIT(6)
+	#define U2P_R1_TX_PREEMP_AMP_TUNE_MASK			GENMASK(8, 7)
+	#define U2P_R1_TX_RES_TUNE_MASK				GENMASK(10, 9)
+	#define U2P_R1_TX_RISE_TUNE_MASK			GENMASK(12, 11)
+	#define U2P_R1_TX_VREF_TUNE_MASK			GENMASK(16, 13)
+	#define U2P_R1_TX_FSLS_TUNE_MASK			GENMASK(20, 17)
+	#define U2P_R1_TX_HSXV_TUNE_MASK			GENMASK(22, 21)
+	#define U2P_R1_OTG_TUNE_MASK				GENMASK(25, 23)
+	#define U2P_R1_SQRX_TUNE_MASK				GENMASK(28, 26)
+	#define U2P_R1_COMP_DIS_TUNE_MASK			GENMASK(31, 29)
+
+/* bits [31:14] are read-only */
+#define U2P_R2							0x8
+	#define U2P_R2_DATA_IN_MASK				GENMASK(3, 0)
+	#define U2P_R2_DATA_IN_EN_MASK				GENMASK(7, 4)
+	#define U2P_R2_ADDR_MASK				GENMASK(11, 8)
+	#define U2P_R2_DATA_OUT_SEL				BIT(12)
+	#define U2P_R2_CLK					BIT(13)
+	#define U2P_R2_DATA_OUT_MASK				GENMASK(17, 14)
+	#define U2P_R2_ACA_PIN_RANGE_C				BIT(18)
+	#define U2P_R2_ACA_PIN_RANGE_B				BIT(19)
+	#define U2P_R2_ACA_PIN_RANGE_A				BIT(20)
+	#define U2P_R2_ACA_PIN_GND				BIT(21)
+	#define U2P_R2_ACA_PIN_FLOAT				BIT(22)
+	#define U2P_R2_CHARGE_DETECT				BIT(23)
+	#define U2P_R2_DEVICE_SESSION_VALID			BIT(24)
+	#define U2P_R2_ADP_PROBE				BIT(25)
+	#define U2P_R2_ADP_SENSE				BIT(26)
+	#define U2P_R2_SESSION_END				BIT(27)
+	#define U2P_R2_VBUS_VALID				BIT(28)
+	#define U2P_R2_B_VALID					BIT(29)
+	#define U2P_R2_A_VALID					BIT(30)
+	#define U2P_R2_ID_DIG					BIT(31)
+
+#define U2P_R3							0xc
+
+#define RESET_COMPLETE_TIME				500
+
+struct phy_meson_gxl_usb2_priv {
+	struct regmap		*regmap;
+	enum phy_mode		mode;
+	int			is_enabled;
+};
+
+static const struct regmap_config phy_meson_gxl_usb2_regmap_conf = {
+	.reg_bits = 8,
+	.val_bits = 32,
+	.reg_stride = 4,
+	.max_register = U2P_R3,
+};
+
+static int phy_meson_gxl_usb2_reset(struct phy *phy)
+{
+	struct phy_meson_gxl_usb2_priv *priv = phy_get_drvdata(phy);
+
+	if (priv->is_enabled) {
+		/* reset the PHY and wait until settings are stabilized */
+		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_POWER_ON_RESET,
+				   U2P_R0_POWER_ON_RESET);
+		udelay(RESET_COMPLETE_TIME);
+		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_POWER_ON_RESET,
+				   0);
+		udelay(RESET_COMPLETE_TIME);
+	}
+
+	return 0;
+}
+
+static int phy_meson_gxl_usb2_set_mode(struct phy *phy, enum phy_mode mode)
+{
+	struct phy_meson_gxl_usb2_priv *priv = phy_get_drvdata(phy);
+
+	switch (mode) {
+	case PHY_MODE_USB_HOST:
+	case PHY_MODE_USB_OTG:
+		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_DM_PULLDOWN,
+				   U2P_R0_DM_PULLDOWN);
+		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_DP_PULLDOWN,
+				   U2P_R0_DP_PULLDOWN);
+		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_ID_PULLUP, 0);
+		break;
+
+	case PHY_MODE_USB_DEVICE:
+		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_DM_PULLDOWN,
+				   0);
+		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_DP_PULLDOWN,
+				   0);
+		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_ID_PULLUP,
+				   U2P_R0_ID_PULLUP);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	phy_meson_gxl_usb2_reset(phy);
+
+	priv->mode = mode;
+
+	return 0;
+}
+
+static int phy_meson_gxl_usb2_power_off(struct phy *phy)
+{
+	struct phy_meson_gxl_usb2_priv *priv = phy_get_drvdata(phy);
+
+	priv->is_enabled = 0;
+
+	/* power off the PHY by putting it into reset mode */
+	regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_POWER_ON_RESET,
+			   U2P_R0_POWER_ON_RESET);
+
+	return 0;
+}
+
+static int phy_meson_gxl_usb2_power_on(struct phy *phy)
+{
+	struct phy_meson_gxl_usb2_priv *priv = phy_get_drvdata(phy);
+	int ret;
+
+	priv->is_enabled = 1;
+
+	/* power on the PHY by taking it out of reset mode */
+	regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_POWER_ON_RESET, 0);
+
+	ret = phy_meson_gxl_usb2_set_mode(phy, priv->mode);
+	if (ret) {
+		phy_meson_gxl_usb2_power_off(phy);
+
+		dev_err(&phy->dev, "Failed to initialize PHY with mode %d\n",
+			priv->mode);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct phy_ops phy_meson_gxl_usb2_ops = {
+	.power_on	= phy_meson_gxl_usb2_power_on,
+	.power_off	= phy_meson_gxl_usb2_power_off,
+	.set_mode	= phy_meson_gxl_usb2_set_mode,
+	.reset		= phy_meson_gxl_usb2_reset,
+	.owner		= THIS_MODULE,
+};
+
+static int phy_meson_gxl_usb2_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy_provider *phy_provider;
+	struct resource *res;
+	struct phy_meson_gxl_usb2_priv *priv;
+	struct phy *phy;
+	void __iomem *base;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, priv);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	switch (of_usb_get_dr_mode_by_phy(dev->of_node, -1)) {
+	case USB_DR_MODE_PERIPHERAL:
+		priv->mode = PHY_MODE_USB_DEVICE;
+		break;
+	case USB_DR_MODE_OTG:
+		priv->mode = PHY_MODE_USB_OTG;
+		break;
+	case USB_DR_MODE_HOST:
+	default:
+		priv->mode = PHY_MODE_USB_HOST;
+		break;
+	}
+
+	priv->regmap = devm_regmap_init_mmio(dev, base,
+					     &phy_meson_gxl_usb2_regmap_conf);
+	if (IS_ERR(priv->regmap))
+		return PTR_ERR(priv->regmap);
+
+	phy = devm_phy_create(dev, NULL, &phy_meson_gxl_usb2_ops);
+	if (IS_ERR(phy)) {
+		dev_err(dev, "failed to create PHY\n");
+		return PTR_ERR(phy);
+	}
+
+	phy_set_drvdata(phy, priv);
+
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+	return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id phy_meson_gxl_usb2_of_match[] = {
+	{ .compatible = "amlogic,meson-gxl-usb2-phy", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, phy_meson_gxl_usb2_of_match);
+
+static struct platform_driver phy_meson_gxl_usb2_driver = {
+	.probe	= phy_meson_gxl_usb2_probe,
+	.driver	= {
+		.name		= "phy-meson-gxl-usb2",
+		.of_match_table	= phy_meson_gxl_usb2_of_match,
+	},
+};
+module_platform_driver(phy_meson_gxl_usb2_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Meson GXL and GXM USB2 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c
similarity index 97%
rename from drivers/phy/phy-meson8b-usb2.c
rename to drivers/phy/amlogic/phy-meson8b-usb2.c
index 30f56a6..9c01b7e 100644
--- a/drivers/phy/phy-meson8b-usb2.c
+++ b/drivers/phy/amlogic/phy-meson8b-usb2.c
@@ -1,5 +1,5 @@
 /*
- * Meson8b and GXBB USB2 PHY driver
+ * Meson8, Meson8b and GXBB USB2 PHY driver
  *
  * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
  *
@@ -266,6 +266,7 @@ static int phy_meson8b_usb2_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id phy_meson8b_usb2_of_match[] = {
+	{ .compatible = "amlogic,meson8-usb2-phy", },
 	{ .compatible = "amlogic,meson8b-usb2-phy", },
 	{ .compatible = "amlogic,meson-gxbb-usb2-phy", },
 	{ },
@@ -282,5 +283,5 @@ static struct platform_driver phy_meson8b_usb2_driver = {
 module_platform_driver(phy_meson8b_usb2_driver);
 
 MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
-MODULE_DESCRIPTION("Meson8b and GXBB USB2 PHY driver");
+MODULE_DESCRIPTION("Meson8, Meson8b and GXBB USB2 PHY driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
new file mode 100644
index 0000000..37371b8
--- /dev/null
+++ b/drivers/phy/broadcom/Kconfig
@@ -0,0 +1,69 @@
+#
+# Phy drivers for Broadcom platforms
+#
+config PHY_CYGNUS_PCIE
+	tristate "Broadcom Cygnus PCIe PHY driver"
+	depends on OF && (ARCH_BCM_CYGNUS || COMPILE_TEST)
+	select GENERIC_PHY
+	default ARCH_BCM_CYGNUS
+	help
+	  Enable this to support the Broadcom Cygnus PCIe PHY.
+	  If unsure, say N.
+
+config BCM_KONA_USB2_PHY
+	tristate "Broadcom Kona USB2 PHY Driver"
+	depends on HAS_IOMEM
+	select GENERIC_PHY
+	help
+	  Enable this to support the Broadcom Kona USB 2.0 PHY.
+
+config PHY_BCM_NS_USB2
+	tristate "Broadcom Northstar USB 2.0 PHY Driver"
+	depends on ARCH_BCM_IPROC || COMPILE_TEST
+	depends on HAS_IOMEM && OF
+	select GENERIC_PHY
+	help
+	  Enable this to support Broadcom USB 2.0 PHY connected to the USB
+	  controller on Northstar family.
+
+config PHY_BCM_NS_USB3
+	tristate "Broadcom Northstar USB 3.0 PHY Driver"
+	depends on ARCH_BCM_IPROC || COMPILE_TEST
+	depends on HAS_IOMEM && OF
+	select GENERIC_PHY
+	select MDIO_DEVICE
+	help
+	  Enable this to support Broadcom USB 3.0 PHY connected to the USB
+	  controller on Northstar family.
+
+config PHY_NS2_PCIE
+	tristate "Broadcom Northstar2 PCIe PHY driver"
+	depends on OF && MDIO_BUS_MUX_BCM_IPROC
+	select GENERIC_PHY
+	default ARCH_BCM_IPROC
+	help
+	  Enable this to support the Broadcom Northstar2 PCIe PHY.
+	  If unsure, say N.
+
+config PHY_NS2_USB_DRD
+	tristate "Broadcom Northstar2 USB DRD PHY support"
+	depends on OF && (ARCH_BCM_IPROC || COMPILE_TEST)
+	select GENERIC_PHY
+	select EXTCON
+	default ARCH_BCM_IPROC
+	help
+	  Enable this to support the Broadcom Northstar2 USB DRD PHY.
+	  This driver initializes the PHY in either HOST or DEVICE mode.
+	  The host or device configuration is read from device tree.
+
+	  If unsure, say N.
+
+config PHY_BRCM_SATA
+	tristate "Broadcom SATA PHY driver"
+	depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || COMPILE_TEST
+	depends on OF
+	select GENERIC_PHY
+	default ARCH_BCM_IPROC
+	help
+	  Enable this to support the Broadcom SATA PHY.
+	  If unsure, say N.
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
new file mode 100644
index 0000000..4eb82ec
--- /dev/null
+++ b/drivers/phy/broadcom/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_PHY_CYGNUS_PCIE)		+= phy-bcm-cygnus-pcie.o
+obj-$(CONFIG_BCM_KONA_USB2_PHY)		+= phy-bcm-kona-usb2.o
+obj-$(CONFIG_PHY_BCM_NS_USB2)		+= phy-bcm-ns-usb2.o
+obj-$(CONFIG_PHY_BCM_NS_USB3)		+= phy-bcm-ns-usb3.o
+obj-$(CONFIG_PHY_NS2_PCIE)		+= phy-bcm-ns2-pcie.o
+obj-$(CONFIG_PHY_NS2_USB_DRD)		+= phy-bcm-ns2-usbdrd.o
+obj-$(CONFIG_PHY_BRCM_SATA)		+= phy-brcm-sata.o
diff --git a/drivers/phy/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
similarity index 100%
rename from drivers/phy/phy-bcm-cygnus-pcie.c
rename to drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
diff --git a/drivers/phy/phy-bcm-kona-usb2.c b/drivers/phy/broadcom/phy-bcm-kona-usb2.c
similarity index 100%
rename from drivers/phy/phy-bcm-kona-usb2.c
rename to drivers/phy/broadcom/phy-bcm-kona-usb2.c
diff --git a/drivers/phy/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c
similarity index 100%
rename from drivers/phy/phy-bcm-ns-usb2.c
rename to drivers/phy/broadcom/phy-bcm-ns-usb2.c
diff --git a/drivers/phy/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
similarity index 70%
rename from drivers/phy/phy-bcm-ns-usb3.c
rename to drivers/phy/broadcom/phy-bcm-ns-usb3.c
index 22b5e70..a53ae12 100644
--- a/drivers/phy/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -16,7 +16,9 @@
 #include <linux/bcma/bcma.h>
 #include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/mdio.h>
 #include <linux/module.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
@@ -52,7 +54,10 @@ struct bcm_ns_usb3 {
 	enum bcm_ns_family family;
 	void __iomem *dmp;
 	void __iomem *ccb_mii;
+	struct mdio_device *mdiodev;
 	struct phy *phy;
+
+	int (*phy_write)(struct bcm_ns_usb3 *usb3, u16 reg, u16 value);
 };
 
 static const struct of_device_id bcm_ns_usb3_id_table[] = {
@@ -68,63 +73,16 @@ static const struct of_device_id bcm_ns_usb3_id_table[] = {
 };
 MODULE_DEVICE_TABLE(of, bcm_ns_usb3_id_table);
 
-static int bcm_ns_usb3_wait_reg(struct bcm_ns_usb3 *usb3, void __iomem *addr,
-				u32 mask, u32 value, unsigned long timeout)
-{
-	unsigned long deadline = jiffies + timeout;
-	u32 val;
-
-	do {
-		val = readl(addr);
-		if ((val & mask) == value)
-			return 0;
-		cpu_relax();
-		udelay(10);
-	} while (!time_after_eq(jiffies, deadline));
-
-	dev_err(usb3->dev, "Timeout waiting for register %p\n", addr);
-
-	return -EBUSY;
-}
-
-static inline int bcm_ns_usb3_mii_mng_wait_idle(struct bcm_ns_usb3 *usb3)
-{
-	return bcm_ns_usb3_wait_reg(usb3, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL,
-				    0x0100, 0x0000,
-				    usecs_to_jiffies(BCM_NS_USB3_MII_MNG_TIMEOUT_US));
-}
-
 static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
 				      u16 value)
 {
-	u32 tmp = 0;
-	int err;
-
-	err = bcm_ns_usb3_mii_mng_wait_idle(usb3);
-	if (err < 0) {
-		dev_err(usb3->dev, "Couldn't write 0x%08x value\n", value);
-		return err;
-	}
-
-	/* TODO: Use a proper MDIO bus layer */
-	tmp |= 0x58020000; /* Magic value for MDIO PHY write */
-	tmp |= reg << 18;
-	tmp |= value;
-	writel(tmp, usb3->ccb_mii + BCMA_CCB_MII_MNG_CMD_DATA);
-
-	return 0;
+	return usb3->phy_write(usb3, reg, value);
 }
 
 static int bcm_ns_usb3_phy_init_ns_bx(struct bcm_ns_usb3 *usb3)
 {
 	int err;
 
-	/* Enable MDIO. Setting MDCDIV as 26  */
-	writel(0x0000009a, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL);
-
-	/* Wait for MDIO? */
-	udelay(2);
-
 	/* USB3 PLL Block */
 	err = bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PHY_BASE_ADDR_REG,
 					 BCM_NS_USB3_PHY_PLL30_BLOCK);
@@ -143,9 +101,6 @@ static int bcm_ns_usb3_phy_init_ns_bx(struct bcm_ns_usb3 *usb3)
 	/* Deaaserting PLL Reset */
 	bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PLLA_CONTROL1, 0x8000);
 
-	/* Waiting MII Mgt interface idle */
-	bcm_ns_usb3_mii_mng_wait_idle(usb3);
-
 	/* Deasserting USB3 system reset */
 	writel(0, usb3->dmp + BCMA_RESET_CTL);
 
@@ -169,9 +124,6 @@ static int bcm_ns_usb3_phy_init_ns_bx(struct bcm_ns_usb3 *usb3)
 	/* Enabling SSC */
 	bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_TX_PMD_CONTROL1, 0x1003);
 
-	/* Waiting MII Mgt interface idle */
-	bcm_ns_usb3_mii_mng_wait_idle(usb3);
-
 	return 0;
 }
 
@@ -179,12 +131,6 @@ static int bcm_ns_usb3_phy_init_ns_ax(struct bcm_ns_usb3 *usb3)
 {
 	int err;
 
-	/* Enable MDIO. Setting MDCDIV as 26  */
-	writel(0x0000009a, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL);
-
-	/* Wait for MDIO? */
-	udelay(2);
-
 	/* PLL30 block */
 	err = bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PHY_BASE_ADDR_REG,
 					 BCM_NS_USB3_PHY_PLL30_BLOCK);
@@ -205,9 +151,6 @@ static int bcm_ns_usb3_phy_init_ns_ax(struct bcm_ns_usb3 *usb3)
 
 	bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_TX_PMD_CONTROL1, 0x1003);
 
-	/* Waiting MII Mgt interface idle */
-	bcm_ns_usb3_mii_mng_wait_idle(usb3);
-
 	/* Deasserting USB3 system reset */
 	writel(0, usb3->dmp + BCMA_RESET_CTL);
 
@@ -242,6 +185,128 @@ static const struct phy_ops ops = {
 	.owner		= THIS_MODULE,
 };
 
+/**************************************************
+ * MDIO driver code
+ **************************************************/
+
+static int bcm_ns_usb3_mdiodev_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
+					 u16 value)
+{
+	struct mdio_device *mdiodev = usb3->mdiodev;
+
+	return mdiobus_write(mdiodev->bus, mdiodev->addr, reg, value);
+}
+
+static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
+{
+	struct device *dev = &mdiodev->dev;
+	const struct of_device_id *of_id;
+	struct phy_provider *phy_provider;
+	struct device_node *syscon_np;
+	struct bcm_ns_usb3 *usb3;
+	struct resource res;
+	int err;
+
+	usb3 = devm_kzalloc(dev, sizeof(*usb3), GFP_KERNEL);
+	if (!usb3)
+		return -ENOMEM;
+
+	usb3->dev = dev;
+	usb3->mdiodev = mdiodev;
+
+	of_id = of_match_device(bcm_ns_usb3_id_table, dev);
+	if (!of_id)
+		return -EINVAL;
+	usb3->family = (enum bcm_ns_family)of_id->data;
+
+	syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0);
+	err = of_address_to_resource(syscon_np, 0, &res);
+	of_node_put(syscon_np);
+	if (err)
+		return err;
+
+	usb3->dmp = devm_ioremap_resource(dev, &res);
+	if (IS_ERR(usb3->dmp)) {
+		dev_err(dev, "Failed to map DMP regs\n");
+		return PTR_ERR(usb3->dmp);
+	}
+
+	usb3->phy_write = bcm_ns_usb3_mdiodev_phy_write;
+
+	usb3->phy = devm_phy_create(dev, NULL, &ops);
+	if (IS_ERR(usb3->phy)) {
+		dev_err(dev, "Failed to create PHY\n");
+		return PTR_ERR(usb3->phy);
+	}
+
+	phy_set_drvdata(usb3->phy, usb3);
+
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+	return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct mdio_driver bcm_ns_usb3_mdio_driver = {
+	.mdiodrv = {
+		.driver = {
+			.name = "bcm_ns_mdio_usb3",
+			.of_match_table = bcm_ns_usb3_id_table,
+		},
+	},
+	.probe = bcm_ns_usb3_mdio_probe,
+};
+
+/**************************************************
+ * Platform driver code
+ **************************************************/
+
+static int bcm_ns_usb3_wait_reg(struct bcm_ns_usb3 *usb3, void __iomem *addr,
+				u32 mask, u32 value, unsigned long timeout)
+{
+	unsigned long deadline = jiffies + timeout;
+	u32 val;
+
+	do {
+		val = readl(addr);
+		if ((val & mask) == value)
+			return 0;
+		cpu_relax();
+		udelay(10);
+	} while (!time_after_eq(jiffies, deadline));
+
+	dev_err(usb3->dev, "Timeout waiting for register %p\n", addr);
+
+	return -EBUSY;
+}
+
+static inline int bcm_ns_usb3_mii_mng_wait_idle(struct bcm_ns_usb3 *usb3)
+{
+	return bcm_ns_usb3_wait_reg(usb3, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL,
+				    0x0100, 0x0000,
+				    usecs_to_jiffies(BCM_NS_USB3_MII_MNG_TIMEOUT_US));
+}
+
+static int bcm_ns_usb3_platform_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
+					  u16 value)
+{
+	u32 tmp = 0;
+	int err;
+
+	err = bcm_ns_usb3_mii_mng_wait_idle(usb3);
+	if (err < 0) {
+		dev_err(usb3->dev, "Couldn't write 0x%08x value\n", value);
+		return err;
+	}
+
+	/* TODO: Use a proper MDIO bus layer */
+	tmp |= 0x58020000; /* Magic value for MDIO PHY write */
+	tmp |= reg << 18;
+	tmp |= value;
+	writel(tmp, usb3->ccb_mii + BCMA_CCB_MII_MNG_CMD_DATA);
+
+	return bcm_ns_usb3_mii_mng_wait_idle(usb3);
+}
+
 static int bcm_ns_usb3_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -275,6 +340,14 @@ static int bcm_ns_usb3_probe(struct platform_device *pdev)
 		return PTR_ERR(usb3->ccb_mii);
 	}
 
+	/* Enable MDIO. Setting MDCDIV as 26  */
+	writel(0x0000009a, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL);
+
+	/* Wait for MDIO? */
+	udelay(2);
+
+	usb3->phy_write = bcm_ns_usb3_platform_phy_write;
+
 	usb3->phy = devm_phy_create(dev, NULL, &ops);
 	if (IS_ERR(usb3->phy)) {
 		dev_err(dev, "Failed to create PHY\n");
@@ -298,6 +371,35 @@ static struct platform_driver bcm_ns_usb3_driver = {
 		.of_match_table = bcm_ns_usb3_id_table,
 	},
 };
-module_platform_driver(bcm_ns_usb3_driver);
+
+static int __init bcm_ns_usb3_module_init(void)
+{
+	int err;
+
+	/*
+	 * For backward compatibility we register as MDIO and platform driver.
+	 * After getting MDIO binding commonly used (e.g. switching all DT files
+	 * to use it) we should deprecate the old binding and eventually drop
+	 * support for it.
+	 */
+
+	err = mdio_driver_register(&bcm_ns_usb3_mdio_driver);
+	if (err)
+		return err;
+
+	err = platform_driver_register(&bcm_ns_usb3_driver);
+	if (err)
+		mdio_driver_unregister(&bcm_ns_usb3_mdio_driver);
+
+	return err;
+}
+module_init(bcm_ns_usb3_module_init);
+
+static void __exit bcm_ns_usb3_module_exit(void)
+{
+	platform_driver_unregister(&bcm_ns_usb3_driver);
+	mdio_driver_unregister(&bcm_ns_usb3_mdio_driver);
+}
+module_exit(bcm_ns_usb3_module_exit)
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-bcm-ns2-pcie.c b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
similarity index 100%
rename from drivers/phy/phy-bcm-ns2-pcie.c
rename to drivers/phy/broadcom/phy-bcm-ns2-pcie.c
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
new file mode 100644
index 0000000..9ae59e2
--- /dev/null
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/extcon.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#define ICFG_DRD_AFE		0x0
+#define ICFG_MISC_STAT		0x18
+#define ICFG_DRD_P0CTL		0x1C
+#define ICFG_STRAP_CTRL		0x20
+#define ICFG_FSM_CTRL		0x24
+
+#define ICFG_DEV_BIT		BIT(2)
+#define IDM_RST_BIT		BIT(0)
+#define AFE_CORERDY_VDDC	BIT(18)
+#define PHY_PLL_RESETB		BIT(15)
+#define PHY_RESETB		BIT(14)
+#define PHY_PLL_LOCK		BIT(0)
+
+#define DRD_DEV_MODE		BIT(20)
+#define OHCI_OVRCUR_POL		BIT(11)
+#define ICFG_OFF_MODE		BIT(6)
+#define PLL_LOCK_RETRY		1000
+
+#define EVT_DEVICE		0
+#define EVT_HOST		1
+
+#define DRD_HOST_MODE		(BIT(2) | BIT(3))
+#define DRD_DEVICE_MODE		(BIT(4) | BIT(5))
+#define DRD_HOST_VAL		0x803
+#define DRD_DEV_VAL		0x807
+#define GPIO_DELAY		20
+
+struct ns2_phy_data;
+struct ns2_phy_driver {
+	void __iomem *icfgdrd_regs;
+	void __iomem *idmdrd_rst_ctrl;
+	void __iomem *crmu_usb2_ctrl;
+	void __iomem *usb2h_strap_reg;
+	struct ns2_phy_data *data;
+	struct extcon_dev *edev;
+	struct gpio_desc *vbus_gpiod;
+	struct gpio_desc *id_gpiod;
+	int id_irq;
+	int vbus_irq;
+	unsigned long debounce_jiffies;
+	struct delayed_work wq_extcon;
+};
+
+struct ns2_phy_data {
+	struct ns2_phy_driver *driver;
+	struct phy *phy;
+	int new_state;
+};
+
+static const unsigned int usb_extcon_cable[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_NONE,
+};
+
+static inline int pll_lock_stat(u32 usb_reg, int reg_mask,
+				struct ns2_phy_driver *driver)
+{
+	int retry = PLL_LOCK_RETRY;
+	u32 val;
+
+	do {
+		udelay(1);
+		val = readl(driver->icfgdrd_regs + usb_reg);
+		if (val & reg_mask)
+			return 0;
+	} while (--retry > 0);
+
+	return -EBUSY;
+}
+
+static int ns2_drd_phy_init(struct phy *phy)
+{
+	struct ns2_phy_data *data = phy_get_drvdata(phy);
+	struct ns2_phy_driver *driver = data->driver;
+	u32 val;
+
+	val = readl(driver->icfgdrd_regs + ICFG_FSM_CTRL);
+
+	if (data->new_state == EVT_HOST) {
+		val &= ~DRD_DEVICE_MODE;
+		val |= DRD_HOST_MODE;
+	} else {
+		val &= ~DRD_HOST_MODE;
+		val |= DRD_DEVICE_MODE;
+	}
+	writel(val, driver->icfgdrd_regs + ICFG_FSM_CTRL);
+
+	return 0;
+}
+
+static int ns2_drd_phy_poweroff(struct phy *phy)
+{
+	struct ns2_phy_data *data = phy_get_drvdata(phy);
+	struct ns2_phy_driver *driver = data->driver;
+	u32 val;
+
+	val = readl(driver->crmu_usb2_ctrl);
+	val &= ~AFE_CORERDY_VDDC;
+	writel(val, driver->crmu_usb2_ctrl);
+
+	val = readl(driver->crmu_usb2_ctrl);
+	val &= ~DRD_DEV_MODE;
+	writel(val, driver->crmu_usb2_ctrl);
+
+	/* Disable Host and Device Mode */
+	val = readl(driver->icfgdrd_regs + ICFG_FSM_CTRL);
+	val &= ~(DRD_HOST_MODE | DRD_DEVICE_MODE | ICFG_OFF_MODE);
+	writel(val, driver->icfgdrd_regs + ICFG_FSM_CTRL);
+
+	return 0;
+}
+
+static int ns2_drd_phy_poweron(struct phy *phy)
+{
+	struct ns2_phy_data *data = phy_get_drvdata(phy);
+	struct ns2_phy_driver *driver = data->driver;
+	u32 extcon_event = data->new_state;
+	int ret;
+	u32 val;
+
+	if (extcon_event == EVT_DEVICE) {
+		writel(DRD_DEV_VAL, driver->icfgdrd_regs + ICFG_DRD_P0CTL);
+
+		val = readl(driver->idmdrd_rst_ctrl);
+		val &= ~IDM_RST_BIT;
+		writel(val, driver->idmdrd_rst_ctrl);
+
+		val = readl(driver->crmu_usb2_ctrl);
+		val |= (AFE_CORERDY_VDDC | DRD_DEV_MODE);
+		writel(val, driver->crmu_usb2_ctrl);
+
+		/* Bring PHY and PHY_PLL out of Reset */
+		val = readl(driver->crmu_usb2_ctrl);
+		val |= (PHY_PLL_RESETB | PHY_RESETB);
+		writel(val, driver->crmu_usb2_ctrl);
+
+		ret = pll_lock_stat(ICFG_MISC_STAT, PHY_PLL_LOCK, driver);
+		if (ret < 0) {
+			dev_err(&phy->dev, "Phy PLL lock failed\n");
+			return ret;
+		}
+	} else {
+		writel(DRD_HOST_VAL, driver->icfgdrd_regs + ICFG_DRD_P0CTL);
+
+		val = readl(driver->crmu_usb2_ctrl);
+		val |= AFE_CORERDY_VDDC;
+		writel(val, driver->crmu_usb2_ctrl);
+
+		ret = pll_lock_stat(ICFG_MISC_STAT, PHY_PLL_LOCK, driver);
+		if (ret < 0) {
+			dev_err(&phy->dev, "Phy PLL lock failed\n");
+			return ret;
+		}
+
+		val = readl(driver->idmdrd_rst_ctrl);
+		val &= ~IDM_RST_BIT;
+		writel(val, driver->idmdrd_rst_ctrl);
+
+		/* port over current Polarity */
+		val = readl(driver->usb2h_strap_reg);
+		val |= OHCI_OVRCUR_POL;
+		writel(val, driver->usb2h_strap_reg);
+	}
+
+	return 0;
+}
+
+static void connect_change(struct ns2_phy_driver *driver)
+{
+	u32 extcon_event;
+	u32 val;
+
+	extcon_event = driver->data->new_state;
+	val = readl(driver->icfgdrd_regs + ICFG_FSM_CTRL);
+
+	switch (extcon_event) {
+	case EVT_DEVICE:
+		val &= ~(DRD_HOST_MODE | DRD_DEVICE_MODE);
+		writel(val, driver->icfgdrd_regs + ICFG_FSM_CTRL);
+
+		val = (val & ~DRD_HOST_MODE) | DRD_DEVICE_MODE;
+		writel(val, driver->icfgdrd_regs + ICFG_FSM_CTRL);
+
+		val = readl(driver->icfgdrd_regs + ICFG_DRD_P0CTL);
+		val |= ICFG_DEV_BIT;
+		writel(val, driver->icfgdrd_regs + ICFG_DRD_P0CTL);
+		break;
+
+	case EVT_HOST:
+		val &= ~(DRD_HOST_MODE | DRD_DEVICE_MODE);
+		writel(val, driver->icfgdrd_regs + ICFG_FSM_CTRL);
+
+		val = (val & ~DRD_DEVICE_MODE) | DRD_HOST_MODE;
+		writel(val, driver->icfgdrd_regs + ICFG_FSM_CTRL);
+
+		val = readl(driver->usb2h_strap_reg);
+		val |= OHCI_OVRCUR_POL;
+		writel(val, driver->usb2h_strap_reg);
+
+		val = readl(driver->icfgdrd_regs + ICFG_DRD_P0CTL);
+		val &= ~ICFG_DEV_BIT;
+		writel(val, driver->icfgdrd_regs + ICFG_DRD_P0CTL);
+		break;
+
+	default:
+		pr_err("Invalid extcon event\n");
+		break;
+	}
+}
+
+static void extcon_work(struct work_struct *work)
+{
+	struct ns2_phy_driver *driver;
+	int vbus;
+	int id;
+
+	driver  = container_of(to_delayed_work(work),
+			       struct ns2_phy_driver, wq_extcon);
+
+	id = gpiod_get_value_cansleep(driver->id_gpiod);
+	vbus = gpiod_get_value_cansleep(driver->vbus_gpiod);
+
+	if (!id && vbus) { /* Host connected */
+		extcon_set_cable_state_(driver->edev, EXTCON_USB_HOST, true);
+		pr_debug("Host cable connected\n");
+		driver->data->new_state = EVT_HOST;
+		connect_change(driver);
+	} else if (id && !vbus) { /* Disconnected */
+		extcon_set_cable_state_(driver->edev, EXTCON_USB_HOST, false);
+		extcon_set_cable_state_(driver->edev, EXTCON_USB, false);
+		pr_debug("Cable disconnected\n");
+	} else if (id && vbus) { /* Device connected */
+		extcon_set_cable_state_(driver->edev, EXTCON_USB, true);
+		pr_debug("Device cable connected\n");
+		driver->data->new_state = EVT_DEVICE;
+		connect_change(driver);
+	}
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+	struct ns2_phy_driver *driver = dev_id;
+
+	queue_delayed_work(system_power_efficient_wq, &driver->wq_extcon,
+			   driver->debounce_jiffies);
+
+	return IRQ_HANDLED;
+}
+
+static struct phy_ops ops = {
+	.init		= ns2_drd_phy_init,
+	.power_on	= ns2_drd_phy_poweron,
+	.power_off	= ns2_drd_phy_poweroff,
+	.owner		= THIS_MODULE,
+};
+
+static const struct of_device_id ns2_drd_phy_dt_ids[] = {
+	{ .compatible = "brcm,ns2-drd-phy", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ns2_drd_phy_dt_ids);
+
+static int ns2_drd_phy_probe(struct platform_device *pdev)
+{
+	struct phy_provider *phy_provider;
+	struct device *dev = &pdev->dev;
+	struct ns2_phy_driver *driver;
+	struct ns2_phy_data *data;
+	struct resource *res;
+	int ret;
+	u32 val;
+
+	driver = devm_kzalloc(dev, sizeof(struct ns2_phy_driver),
+			      GFP_KERNEL);
+	if (!driver)
+		return -ENOMEM;
+
+	driver->data = devm_kzalloc(dev, sizeof(struct ns2_phy_data),
+				  GFP_KERNEL);
+	if (!driver->data)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "icfg");
+	driver->icfgdrd_regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(driver->icfgdrd_regs))
+		return PTR_ERR(driver->icfgdrd_regs);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rst-ctrl");
+	driver->idmdrd_rst_ctrl = devm_ioremap_resource(dev, res);
+	if (IS_ERR(driver->idmdrd_rst_ctrl))
+		return PTR_ERR(driver->idmdrd_rst_ctrl);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crmu-ctrl");
+	driver->crmu_usb2_ctrl = devm_ioremap_resource(dev, res);
+	if (IS_ERR(driver->crmu_usb2_ctrl))
+		return PTR_ERR(driver->crmu_usb2_ctrl);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usb2-strap");
+	driver->usb2h_strap_reg = devm_ioremap_resource(dev, res);
+	if (IS_ERR(driver->usb2h_strap_reg))
+		return PTR_ERR(driver->usb2h_strap_reg);
+
+	 /* create extcon */
+	driver->id_gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
+	if (IS_ERR(driver->id_gpiod)) {
+		dev_err(dev, "failed to get ID GPIO\n");
+		return PTR_ERR(driver->id_gpiod);
+	}
+	driver->vbus_gpiod = devm_gpiod_get(&pdev->dev, "vbus", GPIOD_IN);
+	if (IS_ERR(driver->vbus_gpiod)) {
+		dev_err(dev, "failed to get VBUS GPIO\n");
+		return PTR_ERR(driver->vbus_gpiod);
+	}
+
+	driver->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
+	if (IS_ERR(driver->edev)) {
+		dev_err(dev, "failed to allocate extcon device\n");
+		return -ENOMEM;
+	}
+
+	ret = devm_extcon_dev_register(dev, driver->edev);
+	if (ret < 0) {
+		dev_err(dev, "failed to register extcon device\n");
+		return ret;
+	}
+
+	ret = gpiod_set_debounce(driver->id_gpiod, GPIO_DELAY * 1000);
+	if (ret < 0)
+		driver->debounce_jiffies = msecs_to_jiffies(GPIO_DELAY);
+
+	INIT_DELAYED_WORK(&driver->wq_extcon, extcon_work);
+
+	driver->id_irq = gpiod_to_irq(driver->id_gpiod);
+	if (driver->id_irq < 0) {
+		dev_err(dev, "failed to get ID IRQ\n");
+		return driver->id_irq;
+	}
+
+	driver->vbus_irq = gpiod_to_irq(driver->vbus_gpiod);
+	if (driver->vbus_irq < 0) {
+		dev_err(dev, "failed to get ID IRQ\n");
+		return driver->vbus_irq;
+	}
+
+	ret = devm_request_irq(dev, driver->id_irq, gpio_irq_handler,
+			       IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+			       "usb_id", driver);
+	if (ret < 0) {
+		dev_err(dev, "failed to request handler for ID IRQ\n");
+		return ret;
+	}
+
+	ret = devm_request_irq(dev, driver->vbus_irq, gpio_irq_handler,
+			       IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+			       "usb_vbus", driver);
+	if (ret < 0) {
+		dev_err(dev, "failed to request handler for VBUS IRQ\n");
+		return ret;
+	}
+
+	dev_set_drvdata(dev, driver);
+
+	/* Shutdown all ports. They can be powered up as required */
+	val = readl(driver->crmu_usb2_ctrl);
+	val &= ~(AFE_CORERDY_VDDC | PHY_RESETB);
+	writel(val, driver->crmu_usb2_ctrl);
+
+	data = driver->data;
+	data->phy = devm_phy_create(dev, dev->of_node, &ops);
+	if (IS_ERR(data->phy)) {
+		dev_err(dev, "Failed to create usb drd phy\n");
+		return PTR_ERR(data->phy);
+	}
+
+	data->driver = driver;
+	phy_set_drvdata(data->phy, data);
+
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+	if (IS_ERR(phy_provider)) {
+		dev_err(dev, "Failed to register as phy provider\n");
+		return PTR_ERR(phy_provider);
+	}
+
+	platform_set_drvdata(pdev, driver);
+
+	dev_info(dev, "Registered NS2 DRD Phy device\n");
+	queue_delayed_work(system_power_efficient_wq, &driver->wq_extcon,
+			   driver->debounce_jiffies);
+
+	return 0;
+}
+
+static struct platform_driver ns2_drd_phy_driver = {
+	.probe = ns2_drd_phy_probe,
+	.driver = {
+		.name = "bcm-ns2-usbphy",
+		.of_match_table = of_match_ptr(ns2_drd_phy_dt_ids),
+	},
+};
+module_platform_driver(ns2_drd_phy_driver);
+
+MODULE_ALIAS("platform:bcm-ns2-drd-phy");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom NS2 USB2 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
similarity index 85%
rename from drivers/phy/phy-brcm-sata.c
rename to drivers/phy/broadcom/phy-brcm-sata.c
index ccbc3d9..e6544c8 100644
--- a/drivers/phy/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -46,6 +46,7 @@ enum brcm_sata_phy_version {
 	BRCM_SATA_PHY_STB_40NM,
 	BRCM_SATA_PHY_IPROC_NS2,
 	BRCM_SATA_PHY_IPROC_NSP,
+	BRCM_SATA_PHY_IPROC_SR,
 };
 
 struct brcm_sata_port {
@@ -81,12 +82,17 @@ enum sata_phy_regs {
 	PLL_ACTRL2				= 0x8b,
 	PLL_ACTRL2_SELDIV_MASK			= 0x1f,
 	PLL_ACTRL2_SELDIV_SHIFT			= 9,
+	PLL_ACTRL6				= 0x86,
 
 	PLL1_REG_BANK				= 0x060,
 	PLL1_ACTRL2				= 0x82,
 	PLL1_ACTRL3				= 0x83,
 	PLL1_ACTRL4				= 0x84,
 
+	TX_REG_BANK				= 0x070,
+	TX_ACTRL0				= 0x80,
+	TX_ACTRL0_TXPOL_FLIP			= BIT(6),
+
 	OOB_REG_BANK				= 0x150,
 	OOB1_REG_BANK				= 0x160,
 	OOB_CTRL1				= 0x80,
@@ -347,6 +353,68 @@ static int brcm_nsp_sata_init(struct brcm_sata_port *port)
 	return 0;
 }
 
+/* SR PHY PLL0 registers */
+#define SR_PLL0_ACTRL6_MAGIC			0xa
+
+/* SR PHY PLL1 registers */
+#define SR_PLL1_ACTRL2_MAGIC			0x32
+#define SR_PLL1_ACTRL3_MAGIC			0x2
+#define SR_PLL1_ACTRL4_MAGIC			0x3e8
+
+static int brcm_sr_sata_init(struct brcm_sata_port *port)
+{
+	struct brcm_sata_phy *priv = port->phy_priv;
+	struct device *dev = port->phy_priv->dev;
+	void __iomem *base = priv->phy_base;
+	unsigned int val, try;
+
+	/* Configure PHY PLL register bank 1 */
+	val = SR_PLL1_ACTRL2_MAGIC;
+	brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
+	val = SR_PLL1_ACTRL3_MAGIC;
+	brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
+	val = SR_PLL1_ACTRL4_MAGIC;
+	brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
+
+	/* Configure PHY PLL register bank 0 */
+	val = SR_PLL0_ACTRL6_MAGIC;
+	brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_ACTRL6, 0x0, val);
+
+	/* Wait for PHY PLL lock by polling pll_lock bit */
+	try = 50;
+	do {
+		val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+					BLOCK0_XGXSSTATUS);
+		if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
+			break;
+		msleep(20);
+		try--;
+	} while (try);
+
+	if ((val & BLOCK0_XGXSSTATUS_PLL_LOCK) == 0) {
+		/* PLL did not lock; give up */
+		dev_err(dev, "port%d PLL did not lock\n", port->portnum);
+		return -ETIMEDOUT;
+	}
+
+	/* Invert Tx polarity */
+	brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL0,
+			 ~TX_ACTRL0_TXPOL_FLIP, TX_ACTRL0_TXPOL_FLIP);
+
+	/* Configure OOB control to handle 100MHz reference clock */
+	val = ((0xc << OOB_CTRL1_BURST_MAX_SHIFT) |
+		(0x4 << OOB_CTRL1_BURST_MIN_SHIFT) |
+		(0x8 << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT) |
+		(0x3 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT));
+	brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
+	val = ((0x1b << OOB_CTRL2_RESET_IDLE_MAX_SHIFT) |
+		(0x2 << OOB_CTRL2_BURST_CNT_SHIFT) |
+		(0x9 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT));
+	brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
+
+	return 0;
+}
+
 static int brcm_sata_phy_init(struct phy *phy)
 {
 	int rc;
@@ -363,6 +431,9 @@ static int brcm_sata_phy_init(struct phy *phy)
 	case BRCM_SATA_PHY_IPROC_NSP:
 		rc = brcm_nsp_sata_init(port);
 		break;
+	case BRCM_SATA_PHY_IPROC_SR:
+		rc = brcm_sr_sata_init(port);
+		break;
 	default:
 		rc = -ENODEV;
 	}
@@ -384,6 +455,8 @@ static const struct of_device_id brcm_sata_phy_of_match[] = {
 	  .data = (void *)BRCM_SATA_PHY_IPROC_NS2 },
 	{ .compatible = "brcm,iproc-nsp-sata-phy",
 	  .data = (void *)BRCM_SATA_PHY_IPROC_NSP },
+	{ .compatible	= "brcm,iproc-sr-sata-phy",
+	  .data = (void *)BRCM_SATA_PHY_IPROC_SR },
 	{},
 };
 MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
diff --git a/drivers/phy/hisilicon/Kconfig b/drivers/phy/hisilicon/Kconfig
new file mode 100644
index 0000000..6164c4c
--- /dev/null
+++ b/drivers/phy/hisilicon/Kconfig
@@ -0,0 +1,20 @@
+#
+# Phy drivers for Hisilicon platforms
+#
+config PHY_HI6220_USB
+	tristate "hi6220 USB PHY support"
+	depends on (ARCH_HISI && ARM64) || COMPILE_TEST
+	select GENERIC_PHY
+	select MFD_SYSCON
+	help
+	  Enable this to support the HISILICON HI6220 USB PHY.
+
+	  To compile this driver as a module, choose M here.
+
+config PHY_HIX5HD2_SATA
+	tristate "HIX5HD2 SATA PHY Driver"
+	depends on ARCH_HIX5HD2 && OF && HAS_IOMEM
+	select GENERIC_PHY
+	select MFD_SYSCON
+	help
+	  Support for SATA PHY on Hisilicon hix5hd2 Soc.
diff --git a/drivers/phy/hisilicon/Makefile b/drivers/phy/hisilicon/Makefile
new file mode 100644
index 0000000..541b348
--- /dev/null
+++ b/drivers/phy/hisilicon/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_PHY_HI6220_USB)		+= phy-hi6220-usb.o
+obj-$(CONFIG_PHY_HIX5HD2_SATA)		+= phy-hix5hd2-sata.o
diff --git a/drivers/phy/phy-hi6220-usb.c b/drivers/phy/hisilicon/phy-hi6220-usb.c
similarity index 100%
rename from drivers/phy/phy-hi6220-usb.c
rename to drivers/phy/hisilicon/phy-hi6220-usb.c
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/hisilicon/phy-hix5hd2-sata.c
similarity index 100%
rename from drivers/phy/phy-hix5hd2-sata.c
rename to drivers/phy/hisilicon/phy-hix5hd2-sata.c
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
new file mode 100644
index 0000000..048d889
--- /dev/null
+++ b/drivers/phy/marvell/Kconfig
@@ -0,0 +1,50 @@
+#
+# Phy drivers for Marvell platforms
+#
+config ARMADA375_USBCLUSTER_PHY
+	def_bool y
+	depends on MACH_ARMADA_375 || COMPILE_TEST
+	depends on OF && HAS_IOMEM
+	select GENERIC_PHY
+
+config PHY_BERLIN_SATA
+	tristate "Marvell Berlin SATA PHY driver"
+	depends on ARCH_BERLIN && HAS_IOMEM && OF
+	select GENERIC_PHY
+	help
+	  Enable this to support the SATA PHY on Marvell Berlin SoCs.
+
+config PHY_BERLIN_USB
+	tristate "Marvell Berlin USB PHY Driver"
+	depends on ARCH_BERLIN && RESET_CONTROLLER && HAS_IOMEM && OF
+	select GENERIC_PHY
+	help
+	  Enable this to support the USB PHY on Marvell Berlin SoCs.
+
+config PHY_MVEBU_SATA
+	def_bool y
+	depends on ARCH_DOVE || MACH_DOVE || MACH_KIRKWOOD
+	depends on OF
+	select GENERIC_PHY
+
+config PHY_PXA_28NM_HSIC
+	tristate "Marvell USB HSIC 28nm PHY Driver"
+	depends on HAS_IOMEM
+	select GENERIC_PHY
+	help
+	  Enable this to support Marvell USB HSIC PHY driver for Marvell
+	  SoC. This driver will do the PHY initialization and shutdown.
+	  The PHY driver will be used by Marvell ehci driver.
+
+	  To compile this driver as a module, choose M here.
+
+config PHY_PXA_28NM_USB2
+	tristate "Marvell USB 2.0 28nm PHY Driver"
+	depends on HAS_IOMEM
+	select GENERIC_PHY
+	help
+	  Enable this to support Marvell USB 2.0 PHY driver for Marvell
+	  SoC. This driver will do the PHY initialization and shutdown.
+	  The PHY driver will be used by Marvell udc/ehci/otg driver.
+
+	  To compile this driver as a module, choose M here.
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
new file mode 100644
index 0000000..3fc188f
--- /dev/null
+++ b/drivers/phy/marvell/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY)	+= phy-armada375-usb2.o
+obj-$(CONFIG_PHY_BERLIN_SATA)		+= phy-berlin-sata.o
+obj-$(CONFIG_PHY_BERLIN_USB)		+= phy-berlin-usb.o
+obj-$(CONFIG_PHY_MVEBU_SATA)		+= phy-mvebu-sata.o
+obj-$(CONFIG_PHY_PXA_28NM_HSIC)		+= phy-pxa-28nm-hsic.o
+obj-$(CONFIG_PHY_PXA_28NM_USB2)		+= phy-pxa-28nm-usb2.o
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/marvell/phy-armada375-usb2.c
similarity index 100%
rename from drivers/phy/phy-armada375-usb2.c
rename to drivers/phy/marvell/phy-armada375-usb2.c
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
similarity index 100%
rename from drivers/phy/phy-berlin-sata.c
rename to drivers/phy/marvell/phy-berlin-sata.c
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/marvell/phy-berlin-usb.c
similarity index 100%
rename from drivers/phy/phy-berlin-usb.c
rename to drivers/phy/marvell/phy-berlin-usb.c
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/marvell/phy-mvebu-sata.c
similarity index 100%
rename from drivers/phy/phy-mvebu-sata.c
rename to drivers/phy/marvell/phy-mvebu-sata.c
diff --git a/drivers/phy/phy-pxa-28nm-hsic.c b/drivers/phy/marvell/phy-pxa-28nm-hsic.c
similarity index 100%
rename from drivers/phy/phy-pxa-28nm-hsic.c
rename to drivers/phy/marvell/phy-pxa-28nm-hsic.c
diff --git a/drivers/phy/phy-pxa-28nm-usb2.c b/drivers/phy/marvell/phy-pxa-28nm-usb2.c
similarity index 100%
rename from drivers/phy/phy-pxa-28nm-usb2.c
rename to drivers/phy/marvell/phy-pxa-28nm-usb2.c
diff --git a/drivers/phy/motorola/Kconfig b/drivers/phy/motorola/Kconfig
new file mode 100644
index 0000000..6bb7d6b
--- /dev/null
+++ b/drivers/phy/motorola/Kconfig
@@ -0,0 +1,12 @@
+#
+# Phy drivers for Motorola devices
+#
+config PHY_CPCAP_USB
+	tristate "CPCAP PMIC USB PHY driver"
+	depends on USB_SUPPORT && IIO
+	depends on USB_MUSB_HDRC || USB_MUSB_HDRC=n
+	select GENERIC_PHY
+	select USB_PHY
+	help
+	  Enable this for USB to work on Motorola phones and tablets
+	  such as Droid 4.
diff --git a/drivers/phy/motorola/Makefile b/drivers/phy/motorola/Makefile
new file mode 100644
index 0000000..b6cd618
--- /dev/null
+++ b/drivers/phy/motorola/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the phy drivers.
+#
+
+obj-$(CONFIG_PHY_CPCAP_USB)		+= phy-cpcap-usb.o
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
new file mode 100644
index 0000000..9b63efa
--- /dev/null
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -0,0 +1,676 @@
+/*
+ * Motorola CPCAP PMIC USB PHY driver
+ * Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
+ *
+ * Some parts based on earlier Motorola Linux kernel tree code in
+ * board-mapphone-usb.c and cpcap-usb-det.c:
+ * Copyright (C) 2007 - 2011 Motorola, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/iio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/motorola-cpcap.h>
+#include <linux/phy/omap_usb.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/musb.h>
+
+/* CPCAP_REG_USBC1 register bits */
+#define CPCAP_BIT_IDPULSE		BIT(15)
+#define CPCAP_BIT_ID100KPU		BIT(14)
+#define CPCAP_BIT_IDPUCNTRL		BIT(13)
+#define CPCAP_BIT_IDPU			BIT(12)
+#define CPCAP_BIT_IDPD			BIT(11)
+#define CPCAP_BIT_VBUSCHRGTMR3		BIT(10)
+#define CPCAP_BIT_VBUSCHRGTMR2		BIT(9)
+#define CPCAP_BIT_VBUSCHRGTMR1		BIT(8)
+#define CPCAP_BIT_VBUSCHRGTMR0		BIT(7)
+#define CPCAP_BIT_VBUSPU		BIT(6)
+#define CPCAP_BIT_VBUSPD		BIT(5)
+#define CPCAP_BIT_DMPD			BIT(4)
+#define CPCAP_BIT_DPPD			BIT(3)
+#define CPCAP_BIT_DM1K5PU		BIT(2)
+#define CPCAP_BIT_DP1K5PU		BIT(1)
+#define CPCAP_BIT_DP150KPU		BIT(0)
+
+/* CPCAP_REG_USBC2 register bits */
+#define CPCAP_BIT_ZHSDRV1		BIT(15)
+#define CPCAP_BIT_ZHSDRV0		BIT(14)
+#define CPCAP_BIT_DPLLCLKREQ		BIT(13)
+#define CPCAP_BIT_SE0CONN		BIT(12)
+#define CPCAP_BIT_UARTTXTRI		BIT(11)
+#define CPCAP_BIT_UARTSWAP		BIT(10)
+#define CPCAP_BIT_UARTMUX1		BIT(9)
+#define CPCAP_BIT_UARTMUX0		BIT(8)
+#define CPCAP_BIT_ULPISTPLOW		BIT(7)
+#define CPCAP_BIT_TXENPOL		BIT(6)
+#define CPCAP_BIT_USBXCVREN		BIT(5)
+#define CPCAP_BIT_USBCNTRL		BIT(4)
+#define CPCAP_BIT_USBSUSPEND		BIT(3)
+#define CPCAP_BIT_EMUMODE2		BIT(2)
+#define CPCAP_BIT_EMUMODE1		BIT(1)
+#define CPCAP_BIT_EMUMODE0		BIT(0)
+
+/* CPCAP_REG_USBC3 register bits */
+#define CPCAP_BIT_SPARE_898_15		BIT(15)
+#define CPCAP_BIT_IHSTX03		BIT(14)
+#define CPCAP_BIT_IHSTX02		BIT(13)
+#define CPCAP_BIT_IHSTX01		BIT(12)
+#define CPCAP_BIT_IHSTX0		BIT(11)
+#define CPCAP_BIT_IDPU_SPI		BIT(10)
+#define CPCAP_BIT_UNUSED_898_9		BIT(9)
+#define CPCAP_BIT_VBUSSTBY_EN		BIT(8)
+#define CPCAP_BIT_VBUSEN_SPI		BIT(7)
+#define CPCAP_BIT_VBUSPU_SPI		BIT(6)
+#define CPCAP_BIT_VBUSPD_SPI		BIT(5)
+#define CPCAP_BIT_DMPD_SPI		BIT(4)
+#define CPCAP_BIT_DPPD_SPI		BIT(3)
+#define CPCAP_BIT_SUSPEND_SPI		BIT(2)
+#define CPCAP_BIT_PU_SPI		BIT(1)
+#define CPCAP_BIT_ULPI_SPI_SEL		BIT(0)
+
+struct cpcap_usb_ints_state {
+	bool id_ground;
+	bool id_float;
+	bool chrg_det;
+	bool rvrs_chrg;
+	bool vbusov;
+
+	bool chrg_se1b;
+	bool se0conn;
+	bool rvrs_mode;
+	bool chrgcurr1;
+	bool vbusvld;
+	bool sessvld;
+	bool sessend;
+	bool se1;
+
+	bool battdetb;
+	bool dm;
+	bool dp;
+};
+
+enum cpcap_gpio_mode {
+	CPCAP_DM_DP,
+	CPCAP_MDM_RX_TX,
+	CPCAP_UNKNOWN,
+	CPCAP_OTG_DM_DP,
+};
+
+struct cpcap_phy_ddata {
+	struct regmap *reg;
+	struct device *dev;
+	struct clk *refclk;
+	struct usb_phy phy;
+	struct delayed_work detect_work;
+	struct pinctrl *pins;
+	struct pinctrl_state *pins_ulpi;
+	struct pinctrl_state *pins_utmi;
+	struct pinctrl_state *pins_uart;
+	struct gpio_desc *gpio[2];
+	struct iio_channel *vbus;
+	struct iio_channel *id;
+	struct regulator *vusb;
+	atomic_t active;
+};
+
+static bool cpcap_usb_vbus_valid(struct cpcap_phy_ddata *ddata)
+{
+	int error, value = 0;
+
+	error = iio_read_channel_processed(ddata->vbus, &value);
+	if (error >= 0)
+		return value > 3900 ? true : false;
+
+	dev_err(ddata->dev, "error reading VBUS: %i\n", error);
+
+	return false;
+}
+
+static int cpcap_usb_phy_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+	otg->host = host;
+	if (!host)
+		otg->state = OTG_STATE_UNDEFINED;
+
+	return 0;
+}
+
+static int cpcap_usb_phy_set_peripheral(struct usb_otg *otg,
+					struct usb_gadget *gadget)
+{
+	otg->gadget = gadget;
+	if (!gadget)
+		otg->state = OTG_STATE_UNDEFINED;
+
+	return 0;
+}
+
+static const struct phy_ops ops = {
+	.owner		= THIS_MODULE,
+};
+
+static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata,
+				    struct cpcap_usb_ints_state *s)
+{
+	int val, error;
+
+	error = regmap_read(ddata->reg, CPCAP_REG_INTS1, &val);
+	if (error)
+		return error;
+
+	s->id_ground = val & BIT(15);
+	s->id_float = val & BIT(14);
+	s->vbusov = val & BIT(11);
+
+	error = regmap_read(ddata->reg, CPCAP_REG_INTS2, &val);
+	if (error)
+		return error;
+
+	s->vbusvld = val & BIT(3);
+	s->sessvld = val & BIT(2);
+	s->sessend = val & BIT(1);
+	s->se1 = val & BIT(0);
+
+	error = regmap_read(ddata->reg, CPCAP_REG_INTS4, &val);
+	if (error)
+		return error;
+
+	s->dm = val & BIT(1);
+	s->dp = val & BIT(0);
+
+	return 0;
+}
+
+static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata);
+static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata);
+
+static void cpcap_usb_detect(struct work_struct *work)
+{
+	struct cpcap_phy_ddata *ddata;
+	struct cpcap_usb_ints_state s;
+	bool vbus = false;
+	int error;
+
+	ddata = container_of(work, struct cpcap_phy_ddata, detect_work.work);
+
+	error = cpcap_phy_get_ints_state(ddata, &s);
+	if (error)
+		return;
+
+	if (s.id_ground) {
+		dev_dbg(ddata->dev, "id ground, USB host mode\n");
+		error = cpcap_usb_set_usb_mode(ddata);
+		if (error)
+			goto out_err;
+
+		error = musb_mailbox(MUSB_ID_GROUND);
+		if (error)
+			goto out_err;
+
+		error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
+					   CPCAP_BIT_VBUSSTBY_EN,
+					   CPCAP_BIT_VBUSSTBY_EN);
+		if (error)
+			goto out_err;
+
+		return;
+	}
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
+				   CPCAP_BIT_VBUSSTBY_EN, 0);
+	if (error)
+		goto out_err;
+
+	vbus = cpcap_usb_vbus_valid(ddata);
+
+	if (vbus) {
+		/* Are we connected to a docking station with vbus? */
+		if (s.id_ground) {
+			dev_dbg(ddata->dev, "connected to a dock\n");
+
+			/* No VBUS needed with docks */
+			error = cpcap_usb_set_usb_mode(ddata);
+			if (error)
+				goto out_err;
+			error = musb_mailbox(MUSB_ID_GROUND);
+			if (error)
+				goto out_err;
+
+			return;
+		}
+
+		/* Otherwise assume we're connected to a USB host */
+		dev_dbg(ddata->dev, "connected to USB host\n");
+		error = cpcap_usb_set_usb_mode(ddata);
+		if (error)
+			goto out_err;
+		error = musb_mailbox(MUSB_VBUS_VALID);
+		if (error)
+			goto out_err;
+
+		return;
+	}
+
+	/* Default to debug UART mode */
+	error = cpcap_usb_set_uart_mode(ddata);
+	if (error)
+		goto out_err;
+
+	error = musb_mailbox(MUSB_VBUS_OFF);
+	if (error)
+		goto out_err;
+
+	dev_dbg(ddata->dev, "set UART mode\n");
+
+	return;
+
+out_err:
+	dev_err(ddata->dev, "error setting cable state: %i\n", error);
+}
+
+static irqreturn_t cpcap_phy_irq_thread(int irq, void *data)
+{
+	struct cpcap_phy_ddata *ddata = data;
+
+	if (!atomic_read(&ddata->active))
+		return IRQ_NONE;
+
+	schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
+
+	return IRQ_HANDLED;
+}
+
+static int cpcap_usb_init_irq(struct platform_device *pdev,
+			      struct cpcap_phy_ddata *ddata,
+			      const char *name)
+{
+	int irq, error;
+
+	irq = platform_get_irq_byname(pdev, name);
+	if (!irq)
+		return -ENODEV;
+
+	error = devm_request_threaded_irq(ddata->dev, irq, NULL,
+					  cpcap_phy_irq_thread,
+					  IRQF_SHARED,
+					  name, ddata);
+	if (error) {
+		dev_err(ddata->dev, "could not get irq %s: %i\n",
+			name, error);
+
+		return error;
+	}
+
+	return 0;
+}
+
+static const char * const cpcap_phy_irqs[] = {
+	/* REG_INT_0 */
+	"id_ground", "id_float",
+
+	/* REG_INT1 */
+	"se0conn", "vbusvld", "sessvld", "sessend", "se1",
+
+	/* REG_INT_3 */
+	"dm", "dp",
+};
+
+static int cpcap_usb_init_interrupts(struct platform_device *pdev,
+				     struct cpcap_phy_ddata *ddata)
+{
+	int i, error;
+
+	for (i = 0; i < ARRAY_SIZE(cpcap_phy_irqs); i++) {
+		error = cpcap_usb_init_irq(pdev, ddata, cpcap_phy_irqs[i]);
+		if (error)
+			return error;
+	}
+
+	return 0;
+}
+
+/*
+ * Optional pins and modes. At least Motorola mapphone devices
+ * are using two GPIOs and dynamic pinctrl to multiplex PHY pins
+ * to UART, ULPI or UTMI mode.
+ */
+
+static int cpcap_usb_gpio_set_mode(struct cpcap_phy_ddata *ddata,
+				   enum cpcap_gpio_mode mode)
+{
+	if (!ddata->gpio[0] || !ddata->gpio[1])
+		return 0;
+
+	gpiod_set_value(ddata->gpio[0], mode & 1);
+	gpiod_set_value(ddata->gpio[1], mode >> 1);
+
+	return 0;
+}
+
+static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata)
+{
+	int error;
+
+	error = cpcap_usb_gpio_set_mode(ddata, CPCAP_DM_DP);
+	if (error)
+		goto out_err;
+
+	if (ddata->pins_uart) {
+		error = pinctrl_select_state(ddata->pins, ddata->pins_uart);
+		if (error)
+			goto out_err;
+	}
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC1,
+				   CPCAP_BIT_VBUSPD,
+				   CPCAP_BIT_VBUSPD);
+	if (error)
+		goto out_err;
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
+				   0xffff, CPCAP_BIT_UARTMUX0 |
+				   CPCAP_BIT_EMUMODE0);
+	if (error)
+		goto out_err;
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3, 0x7fff,
+				   CPCAP_BIT_IDPU_SPI);
+	if (error)
+		goto out_err;
+
+	return 0;
+
+out_err:
+	dev_err(ddata->dev, "%s failed with %i\n", __func__, error);
+
+	return error;
+}
+
+static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata)
+{
+	int error;
+
+	error = cpcap_usb_gpio_set_mode(ddata, CPCAP_OTG_DM_DP);
+	if (error)
+		return error;
+
+	if (ddata->pins_utmi) {
+		error = pinctrl_select_state(ddata->pins, ddata->pins_utmi);
+		if (error) {
+			dev_err(ddata->dev, "could not set usb mode: %i\n",
+				error);
+
+			return error;
+		}
+	}
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC1,
+				   CPCAP_BIT_VBUSPD, 0);
+	if (error)
+		goto out_err;
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
+				   CPCAP_BIT_USBXCVREN,
+				   CPCAP_BIT_USBXCVREN);
+	if (error)
+		goto out_err;
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
+				   CPCAP_BIT_PU_SPI |
+				   CPCAP_BIT_DMPD_SPI |
+				   CPCAP_BIT_DPPD_SPI |
+				   CPCAP_BIT_SUSPEND_SPI |
+				   CPCAP_BIT_ULPI_SPI_SEL, 0);
+	if (error)
+		goto out_err;
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC2,
+				   CPCAP_BIT_USBXCVREN,
+				   CPCAP_BIT_USBXCVREN);
+	if (error)
+		goto out_err;
+
+	return 0;
+
+out_err:
+	dev_err(ddata->dev, "%s failed with %i\n", __func__, error);
+
+	return error;
+}
+
+static int cpcap_usb_init_optional_pins(struct cpcap_phy_ddata *ddata)
+{
+	ddata->pins = devm_pinctrl_get(ddata->dev);
+	if (IS_ERR(ddata->pins)) {
+		dev_info(ddata->dev, "default pins not configured: %ld\n",
+			 PTR_ERR(ddata->pins));
+		ddata->pins = NULL;
+
+		return 0;
+	}
+
+	ddata->pins_ulpi = pinctrl_lookup_state(ddata->pins, "ulpi");
+	if (IS_ERR(ddata->pins_ulpi)) {
+		dev_info(ddata->dev, "ulpi pins not configured\n");
+		ddata->pins_ulpi = NULL;
+	}
+
+	ddata->pins_utmi = pinctrl_lookup_state(ddata->pins, "utmi");
+	if (IS_ERR(ddata->pins_utmi)) {
+		dev_info(ddata->dev, "utmi pins not configured\n");
+		ddata->pins_utmi = NULL;
+	}
+
+	ddata->pins_uart = pinctrl_lookup_state(ddata->pins, "uart");
+	if (IS_ERR(ddata->pins_uart)) {
+		dev_info(ddata->dev, "uart pins not configured\n");
+		ddata->pins_uart = NULL;
+	}
+
+	if (ddata->pins_uart)
+		return pinctrl_select_state(ddata->pins, ddata->pins_uart);
+
+	return 0;
+}
+
+static void cpcap_usb_init_optional_gpios(struct cpcap_phy_ddata *ddata)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		ddata->gpio[i] = devm_gpiod_get_index(ddata->dev, "mode",
+						      i, GPIOD_OUT_HIGH);
+		if (IS_ERR(ddata->gpio[i])) {
+			dev_info(ddata->dev, "no mode change GPIO%i: %li\n",
+				 i, PTR_ERR(ddata->gpio[i]));
+				 ddata->gpio[i] = NULL;
+		}
+	}
+}
+
+static int cpcap_usb_init_iio(struct cpcap_phy_ddata *ddata)
+{
+	enum iio_chan_type type;
+	int error;
+
+	ddata->vbus = devm_iio_channel_get(ddata->dev, "vbus");
+	if (IS_ERR(ddata->vbus)) {
+		error = PTR_ERR(ddata->vbus);
+		goto out_err;
+	}
+
+	if (!ddata->vbus->indio_dev) {
+		error = -ENXIO;
+		goto out_err;
+	}
+
+	error = iio_get_channel_type(ddata->vbus, &type);
+	if (error < 0)
+		goto out_err;
+
+	if (type != IIO_VOLTAGE) {
+		error = -EINVAL;
+		goto out_err;
+	}
+
+	return 0;
+
+out_err:
+	dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
+		error);
+
+	return error;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id cpcap_usb_phy_id_table[] = {
+	{
+		.compatible = "motorola,cpcap-usb-phy",
+	},
+	{
+		.compatible = "motorola,mapphone-cpcap-usb-phy",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, cpcap_usb_phy_id_table);
+#endif
+
+static int cpcap_usb_phy_probe(struct platform_device *pdev)
+{
+	struct cpcap_phy_ddata *ddata;
+	struct phy *generic_phy;
+	struct phy_provider *phy_provider;
+	struct usb_otg *otg;
+	const struct of_device_id *of_id;
+	int error;
+
+	of_id = of_match_device(of_match_ptr(cpcap_usb_phy_id_table),
+				&pdev->dev);
+	if (!of_id)
+		return -EINVAL;
+
+	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+	if (!ddata)
+		return -ENOMEM;
+
+	ddata->reg = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!ddata->reg)
+		return -ENODEV;
+
+	otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
+	if (!otg)
+		return -ENOMEM;
+
+	ddata->dev = &pdev->dev;
+	ddata->phy.dev = ddata->dev;
+	ddata->phy.label = "cpcap_usb_phy";
+	ddata->phy.otg = otg;
+	ddata->phy.type = USB_PHY_TYPE_USB2;
+	otg->set_host = cpcap_usb_phy_set_host;
+	otg->set_peripheral = cpcap_usb_phy_set_peripheral;
+	otg->usb_phy = &ddata->phy;
+	INIT_DELAYED_WORK(&ddata->detect_work, cpcap_usb_detect);
+	platform_set_drvdata(pdev, ddata);
+
+	ddata->vusb = devm_regulator_get(&pdev->dev, "vusb");
+	if (IS_ERR(ddata->vusb))
+		return PTR_ERR(ddata->vusb);
+
+	error = regulator_enable(ddata->vusb);
+	if (error)
+		return error;
+
+	generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
+	if (IS_ERR(generic_phy)) {
+		error = PTR_ERR(generic_phy);
+		return PTR_ERR(generic_phy);
+	}
+
+	phy_set_drvdata(generic_phy, ddata);
+
+	phy_provider = devm_of_phy_provider_register(ddata->dev,
+						     of_phy_simple_xlate);
+	if (IS_ERR(phy_provider))
+		return PTR_ERR(phy_provider);
+
+	error = cpcap_usb_init_optional_pins(ddata);
+	if (error)
+		return error;
+
+	cpcap_usb_init_optional_gpios(ddata);
+
+	error = cpcap_usb_init_iio(ddata);
+	if (error)
+		return error;
+
+	error = cpcap_usb_init_interrupts(pdev, ddata);
+	if (error)
+		return error;
+
+	usb_add_phy_dev(&ddata->phy);
+	atomic_set(&ddata->active, 1);
+	schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
+
+	return 0;
+}
+
+static int cpcap_usb_phy_remove(struct platform_device *pdev)
+{
+	struct cpcap_phy_ddata *ddata = platform_get_drvdata(pdev);
+	int error;
+
+	atomic_set(&ddata->active, 0);
+	error = cpcap_usb_set_uart_mode(ddata);
+	if (error)
+		dev_err(ddata->dev, "could not set UART mode\n");
+
+	error = musb_mailbox(MUSB_VBUS_OFF);
+	if (error)
+		dev_err(ddata->dev, "could not set mailbox\n");
+
+	usb_remove_phy(&ddata->phy);
+	cancel_delayed_work_sync(&ddata->detect_work);
+	clk_unprepare(ddata->refclk);
+	regulator_disable(ddata->vusb);
+
+	return 0;
+}
+
+static struct platform_driver cpcap_usb_phy_driver = {
+	.probe		= cpcap_usb_phy_probe,
+	.remove		= cpcap_usb_phy_remove,
+	.driver		= {
+		.name	= "cpcap-usb-phy",
+		.of_match_table = of_match_ptr(cpcap_usb_phy_id_table),
+	},
+};
+
+module_platform_driver(cpcap_usb_phy_driver);
+
+MODULE_ALIAS("platform:cpcap_usb");
+MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
+MODULE_DESCRIPTION("CPCAP usb phy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
new file mode 100644
index 0000000..7bfa64b
--- /dev/null
+++ b/drivers/phy/qualcomm/Kconfig
@@ -0,0 +1,58 @@
+#
+# Phy drivers for Qualcomm platforms
+#
+config PHY_QCOM_APQ8064_SATA
+	tristate "Qualcomm APQ8064 SATA SerDes/PHY driver"
+	depends on ARCH_QCOM
+	depends on HAS_IOMEM
+	depends on OF
+	select GENERIC_PHY
+
+config PHY_QCOM_IPQ806X_SATA
+	tristate "Qualcomm IPQ806x SATA SerDes/PHY driver"
+	depends on ARCH_QCOM
+	depends on HAS_IOMEM
+	depends on OF
+	select GENERIC_PHY
+
+config PHY_QCOM_QMP
+	tristate "Qualcomm QMP PHY Driver"
+	depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST)
+	select GENERIC_PHY
+	help
+	  Enable this to support the QMP PHY transceiver that is used
+	  with controllers such as PCIe, UFS, and USB on Qualcomm chips.
+
+config PHY_QCOM_QUSB2
+	tristate "Qualcomm QUSB2 PHY Driver"
+	depends on OF && (ARCH_QCOM || COMPILE_TEST)
+	depends on NVMEM || !NVMEM
+	select GENERIC_PHY
+	help
+	  Enable this to support the HighSpeed QUSB2 PHY transceiver for USB
+	  controllers on Qualcomm chips. This driver supports the high-speed
+	  PHY which is usually paired with either the ChipIdea or Synopsys DWC3
+	  USB IPs on MSM SOCs.
+
+config PHY_QCOM_UFS
+	tristate "Qualcomm UFS PHY driver"
+	depends on OF && ARCH_QCOM
+	select GENERIC_PHY
+	help
+	  Support for UFS PHY on QCOM chipsets.
+
+config PHY_QCOM_USB_HS
+	tristate "Qualcomm USB HS PHY module"
+	depends on USB_ULPI_BUS
+	depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
+	select GENERIC_PHY
+	help
+	  Support for the USB high-speed ULPI compliant phy on Qualcomm
+	  chipsets.
+
+config PHY_QCOM_USB_HSIC
+	tristate "Qualcomm USB HSIC ULPI PHY module"
+	depends on USB_ULPI_BUS
+	select GENERIC_PHY
+	help
+	  Support for the USB HSIC ULPI compliant PHY on QCOM chipsets.
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
new file mode 100644
index 0000000..2e183d7
--- /dev/null
+++ b/drivers/phy/qualcomm/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_PHY_QCOM_APQ8064_SATA)	+= phy-qcom-apq8064-sata.o
+obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA)	+= phy-qcom-ipq806x-sata.o
+obj-$(CONFIG_PHY_QCOM_QMP)		+= phy-qcom-qmp.o
+obj-$(CONFIG_PHY_QCOM_QUSB2)		+= phy-qcom-qusb2.o
+obj-$(CONFIG_PHY_QCOM_UFS)		+= phy-qcom-ufs.o
+obj-$(CONFIG_PHY_QCOM_UFS)		+= phy-qcom-ufs-qmp-14nm.o
+obj-$(CONFIG_PHY_QCOM_UFS)		+= phy-qcom-ufs-qmp-20nm.o
+obj-$(CONFIG_PHY_QCOM_USB_HS) 		+= phy-qcom-usb-hs.o
+obj-$(CONFIG_PHY_QCOM_USB_HSIC) 	+= phy-qcom-usb-hsic.o
diff --git a/drivers/phy/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
similarity index 100%
rename from drivers/phy/phy-qcom-apq8064-sata.c
rename to drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
diff --git a/drivers/phy/phy-qcom-ipq806x-sata.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c
similarity index 100%
rename from drivers/phy/phy-qcom-ipq806x-sata.c
rename to drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c
diff --git a/drivers/phy/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
similarity index 98%
rename from drivers/phy/phy-qcom-qmp.c
rename to drivers/phy/qualcomm/phy-qcom-qmp.c
index 727e23b..78ca628 100644
--- a/drivers/phy/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -844,7 +844,7 @@ static int qcom_qmp_phy_vreg_init(struct device *dev)
 	int num = qmp->cfg->num_vregs;
 	int i;
 
-	qmp->vregs = devm_kcalloc(dev, num, sizeof(qmp->vregs), GFP_KERNEL);
+	qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
 	if (!qmp->vregs)
 		return -ENOMEM;
 
@@ -983,16 +983,16 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
 	 * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
 	 */
 	qphy->tx = of_iomap(np, 0);
-	if (IS_ERR(qphy->tx))
-		return PTR_ERR(qphy->tx);
+	if (!qphy->tx)
+		return -ENOMEM;
 
 	qphy->rx = of_iomap(np, 1);
-	if (IS_ERR(qphy->rx))
-		return PTR_ERR(qphy->rx);
+	if (!qphy->rx)
+		return -ENOMEM;
 
 	qphy->pcs = of_iomap(np, 2);
-	if (IS_ERR(qphy->pcs))
-		return PTR_ERR(qphy->pcs);
+	if (!qphy->pcs)
+		return -ENOMEM;
 
 	/*
 	 * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
diff --git a/drivers/phy/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
similarity index 100%
rename from drivers/phy/phy-qcom-qusb2.c
rename to drivers/phy/qualcomm/phy-qcom-qusb2.c
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
similarity index 100%
rename from drivers/phy/phy-qcom-ufs-i.h
rename to drivers/phy/qualcomm/phy-qcom-ufs-i.h
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
similarity index 100%
rename from drivers/phy/phy-qcom-ufs-qmp-14nm.c
rename to drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h
similarity index 100%
rename from drivers/phy/phy-qcom-ufs-qmp-14nm.h
rename to drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
similarity index 100%
rename from drivers/phy/phy-qcom-ufs-qmp-20nm.c
rename to drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h
similarity index 100%
rename from drivers/phy/phy-qcom-ufs-qmp-20nm.h
rename to drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
similarity index 100%
rename from drivers/phy/phy-qcom-ufs.c
rename to drivers/phy/qualcomm/phy-qcom-ufs.c
diff --git a/drivers/phy/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
similarity index 99%
rename from drivers/phy/phy-qcom-usb-hs.c
rename to drivers/phy/qualcomm/phy-qcom-usb-hs.c
index 94dfbfd..4b20abc 100644
--- a/drivers/phy/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -11,12 +11,11 @@
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of_device.h>
+#include <linux/phy/phy.h>
 #include <linux/reset.h>
 #include <linux/extcon.h>
 #include <linux/notifier.h>
 
-#include "ulpi_phy.h"
-
 #define ULPI_PWR_CLK_MNG_REG		0x88
 # define ULPI_PWR_OTG_COMP_DISABLE	BIT(0)
 
diff --git a/drivers/phy/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
similarity index 98%
rename from drivers/phy/phy-qcom-usb-hsic.c
rename to drivers/phy/qualcomm/phy-qcom-usb-hsic.c
index 47690f9..c110563 100644
--- a/drivers/phy/phy-qcom-usb-hsic.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
@@ -8,13 +8,12 @@
 #include <linux/module.h>
 #include <linux/ulpi/driver.h>
 #include <linux/ulpi/regs.h>
+#include <linux/phy/phy.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/pinctrl/pinctrl-state.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
 
-#include "ulpi_phy.h"
-
 #define ULPI_HSIC_CFG		0x30
 #define ULPI_HSIC_IO_CAL	0x33
 
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
new file mode 100644
index 0000000..cb09245
--- /dev/null
+++ b/drivers/phy/renesas/Kconfig
@@ -0,0 +1,24 @@
+#
+# Phy drivers for Renesas platforms
+#
+config PHY_RCAR_GEN2
+	tristate "Renesas R-Car generation 2 USB PHY driver"
+	depends on ARCH_RENESAS
+	depends on GENERIC_PHY
+	help
+	  Support for USB PHY found on Renesas R-Car generation 2 SoCs.
+
+config PHY_RCAR_GEN3_USB2
+	tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
+	depends on ARCH_RENESAS
+	depends on EXTCON
+	select GENERIC_PHY
+	help
+	  Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs.
+
+config PHY_RCAR_GEN3_USB3
+	tristate "Renesas R-Car generation 3 USB 3.0 PHY driver"
+	depends on ARCH_RENESAS || COMPILE_TEST
+	select GENERIC_PHY
+	help
+	  Support for USB 3.0 PHY found on Renesas R-Car generation 3 SoCs.
diff --git a/drivers/phy/renesas/Makefile b/drivers/phy/renesas/Makefile
new file mode 100644
index 0000000..8b60259
--- /dev/null
+++ b/drivers/phy/renesas/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_PHY_RCAR_GEN2)		+= phy-rcar-gen2.o
+obj-$(CONFIG_PHY_RCAR_GEN3_USB2)	+= phy-rcar-gen3-usb2.o
+obj-$(CONFIG_PHY_RCAR_GEN3_USB3)	+= phy-rcar-gen3-usb3.o
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
similarity index 100%
rename from drivers/phy/phy-rcar-gen2.c
rename to drivers/phy/renesas/phy-rcar-gen2.c
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
similarity index 100%
rename from drivers/phy/phy-rcar-gen3-usb2.c
rename to drivers/phy/renesas/phy-rcar-gen3-usb2.c
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb3.c b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
new file mode 100644
index 0000000..88c83c9
--- /dev/null
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
@@ -0,0 +1,226 @@
+/*
+ * Renesas R-Car Gen3 for USB3.0 PHY driver
+ *
+ * Copyright (C) 2017 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define USB30_CLKSET0		0x034
+#define USB30_CLKSET1		0x036
+#define USB30_SSC_SET		0x038
+#define USB30_PHY_ENABLE	0x060
+#define USB30_VBUS_EN		0x064
+
+/* USB30_CLKSET0 */
+#define CLKSET0_PRIVATE			0x05c0
+#define CLKSET0_USB30_FSEL_USB_EXTAL	0x0002
+
+/* USB30_CLKSET1 */
+#define CLKSET1_USB30_PLL_MULTI_SHIFT		6
+#define CLKSET1_USB30_PLL_MULTI_USB_EXTAL	(0x64 << \
+						 CLKSET1_USB30_PLL_MULTI_SHIFT)
+#define CLKSET1_PHYRESET	BIT(4)	/* 1: reset */
+#define CLKSET1_REF_CLKDIV	BIT(3)	/* 1: USB_EXTAL */
+#define CLKSET1_PRIVATE_2_1	BIT(1)	/* Write B'01 */
+#define CLKSET1_REF_CLK_SEL	BIT(0)	/* 1: USB3S0_CLK_P */
+
+/* USB30_SSC_SET */
+#define SSC_SET_SSC_EN		BIT(12)
+#define SSC_SET_RANGE_SHIFT	9
+#define SSC_SET_RANGE_4980	(0x0 << SSC_SET_RANGE_SHIFT)
+#define SSC_SET_RANGE_4492	(0x1 << SSC_SET_RANGE_SHIFT)
+#define SSC_SET_RANGE_4003	(0x2 << SSC_SET_RANGE_SHIFT)
+
+/* USB30_PHY_ENABLE */
+#define PHY_ENABLE_RESET_EN	BIT(4)
+
+/* USB30_VBUS_EN */
+#define VBUS_EN_VBUS_EN		BIT(1)
+
+struct rcar_gen3_usb3 {
+	void __iomem *base;
+	struct phy *phy;
+	u32 ssc_range;
+	bool usb3s_clk;
+	bool usb_extal;
+};
+
+static void write_clkset1_for_usb_extal(struct rcar_gen3_usb3 *r, bool reset)
+{
+	u16 val = CLKSET1_USB30_PLL_MULTI_USB_EXTAL |
+		  CLKSET1_REF_CLKDIV | CLKSET1_PRIVATE_2_1;
+
+	if (reset)
+		val |= CLKSET1_PHYRESET;
+
+	writew(val, r->base + USB30_CLKSET1);
+}
+
+static void rcar_gen3_phy_usb3_enable_ssc(struct rcar_gen3_usb3 *r)
+{
+	u16 val = SSC_SET_SSC_EN;
+
+	switch (r->ssc_range) {
+	case 4980:
+		val |= SSC_SET_RANGE_4980;
+		break;
+	case 4492:
+		val |= SSC_SET_RANGE_4492;
+		break;
+	case 4003:
+		val |= SSC_SET_RANGE_4003;
+		break;
+	default:
+		dev_err(&r->phy->dev, "%s: unsupported range (%x)\n", __func__,
+			r->ssc_range);
+		return;
+	}
+
+	writew(val, r->base + USB30_SSC_SET);
+}
+
+static void rcar_gen3_phy_usb3_select_usb_extal(struct rcar_gen3_usb3 *r)
+{
+	write_clkset1_for_usb_extal(r, false);
+	if (r->ssc_range)
+		rcar_gen3_phy_usb3_enable_ssc(r);
+	writew(CLKSET0_PRIVATE | CLKSET0_USB30_FSEL_USB_EXTAL,
+	       r->base + USB30_CLKSET0);
+	writew(PHY_ENABLE_RESET_EN, r->base + USB30_PHY_ENABLE);
+	write_clkset1_for_usb_extal(r, true);
+	usleep_range(10, 20);
+	write_clkset1_for_usb_extal(r, false);
+}
+
+static int rcar_gen3_phy_usb3_init(struct phy *p)
+{
+	struct rcar_gen3_usb3 *r = phy_get_drvdata(p);
+
+	dev_vdbg(&r->phy->dev, "%s: enter (%d, %d, %d)\n", __func__,
+		 r->usb3s_clk, r->usb_extal, r->ssc_range);
+
+	if (!r->usb3s_clk && r->usb_extal)
+		rcar_gen3_phy_usb3_select_usb_extal(r);
+
+	/* Enables VBUS detection anyway */
+	writew(VBUS_EN_VBUS_EN, r->base + USB30_VBUS_EN);
+
+	return 0;
+}
+
+static const struct phy_ops rcar_gen3_phy_usb3_ops = {
+	.init		= rcar_gen3_phy_usb3_init,
+	.owner		= THIS_MODULE,
+};
+
+static const struct of_device_id rcar_gen3_phy_usb3_match_table[] = {
+	{ .compatible = "renesas,rcar-gen3-usb3-phy" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb3_match_table);
+
+static int rcar_gen3_phy_usb3_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rcar_gen3_usb3 *r;
+	struct phy_provider *provider;
+	struct resource *res;
+	int ret = 0;
+	struct clk *clk;
+
+	if (!dev->of_node) {
+		dev_err(dev, "This driver needs device tree\n");
+		return -EINVAL;
+	}
+
+	r = devm_kzalloc(dev, sizeof(*r), GFP_KERNEL);
+	if (!r)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	r->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(r->base))
+		return PTR_ERR(r->base);
+
+	clk = devm_clk_get(dev, "usb3s_clk");
+	if (!IS_ERR(clk) && !clk_prepare_enable(clk)) {
+		r->usb3s_clk = !!clk_get_rate(clk);
+		clk_disable_unprepare(clk);
+	}
+	clk = devm_clk_get(dev, "usb_extal");
+	if (!IS_ERR(clk) && !clk_prepare_enable(clk)) {
+		r->usb_extal = !!clk_get_rate(clk);
+		clk_disable_unprepare(clk);
+	}
+
+	if (!r->usb3s_clk && !r->usb_extal) {
+		dev_err(dev, "This driver needs usb3s_clk and/or usb_extal\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * devm_phy_create() will call pm_runtime_enable(&phy->dev);
+	 * And then, phy-core will manage runtime pm for this device.
+	 */
+	pm_runtime_enable(dev);
+
+	r->phy = devm_phy_create(dev, NULL, &rcar_gen3_phy_usb3_ops);
+	if (IS_ERR(r->phy)) {
+		dev_err(dev, "Failed to create USB3 PHY\n");
+		ret = PTR_ERR(r->phy);
+		goto error;
+	}
+
+	of_property_read_u32(dev->of_node, "renesas,ssc-range", &r->ssc_range);
+
+	platform_set_drvdata(pdev, r);
+	phy_set_drvdata(r->phy, r);
+
+	provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+	if (IS_ERR(provider)) {
+		dev_err(dev, "Failed to register PHY provider\n");
+		ret = PTR_ERR(provider);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	pm_runtime_disable(dev);
+
+	return ret;
+}
+
+static int rcar_gen3_phy_usb3_remove(struct platform_device *pdev)
+{
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+};
+
+static struct platform_driver rcar_gen3_phy_usb3_driver = {
+	.driver = {
+		.name		= "phy_rcar_gen3_usb3",
+		.of_match_table	= rcar_gen3_phy_usb3_match_table,
+	},
+	.probe	= rcar_gen3_phy_usb3_probe,
+	.remove = rcar_gen3_phy_usb3_remove,
+};
+module_platform_driver(rcar_gen3_phy_usb3_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 USB 3.0 PHY");
+MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
new file mode 100644
index 0000000..f5325b2
--- /dev/null
+++ b/drivers/phy/rockchip/Kconfig
@@ -0,0 +1,51 @@
+#
+# Phy drivers for Rockchip platforms
+#
+config PHY_ROCKCHIP_DP
+	tristate "Rockchip Display Port PHY Driver"
+	depends on ARCH_ROCKCHIP && OF
+	select GENERIC_PHY
+	help
+	  Enable this to support the Rockchip Display Port PHY.
+
+config PHY_ROCKCHIP_EMMC
+	tristate "Rockchip EMMC PHY Driver"
+	depends on ARCH_ROCKCHIP && OF
+	select GENERIC_PHY
+	help
+	  Enable this to support the Rockchip EMMC PHY.
+
+config PHY_ROCKCHIP_INNO_USB2
+	tristate "Rockchip INNO USB2PHY Driver"
+	depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+	depends on COMMON_CLK
+	depends on EXTCON
+	depends on USB_SUPPORT
+	select GENERIC_PHY
+	select USB_COMMON
+	help
+	  Support for Rockchip USB2.0 PHY with Innosilicon IP block.
+
+config PHY_ROCKCHIP_PCIE
+	tristate "Rockchip PCIe PHY Driver"
+	depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
+	select GENERIC_PHY
+	select MFD_SYSCON
+	help
+	  Enable this to support the Rockchip PCIe PHY.
+
+config PHY_ROCKCHIP_TYPEC
+	tristate "Rockchip TYPEC PHY Driver"
+	depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
+	select EXTCON
+	select GENERIC_PHY
+	select RESET_CONTROLLER
+	help
+	  Enable this to support the Rockchip USB TYPEC PHY.
+
+config PHY_ROCKCHIP_USB
+	tristate "Rockchip USB2 PHY Driver"
+	depends on ARCH_ROCKCHIP && OF
+	select GENERIC_PHY
+	help
+	  Enable this to support the Rockchip USB 2.0 PHY.
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
new file mode 100644
index 0000000..bd0acdf
--- /dev/null
+++ b/drivers/phy/rockchip/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_PHY_ROCKCHIP_DP)		+= phy-rockchip-dp.o
+obj-$(CONFIG_PHY_ROCKCHIP_EMMC)		+= phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2)	+= phy-rockchip-inno-usb2.o
+obj-$(CONFIG_PHY_ROCKCHIP_PCIE)		+= phy-rockchip-pcie.o
+obj-$(CONFIG_PHY_ROCKCHIP_TYPEC)	+= phy-rockchip-typec.o
+obj-$(CONFIG_PHY_ROCKCHIP_USB)		+= phy-rockchip-usb.o
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/rockchip/phy-rockchip-dp.c
similarity index 100%
rename from drivers/phy/phy-rockchip-dp.c
rename to drivers/phy/rockchip/phy-rockchip-dp.c
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
similarity index 100%
rename from drivers/phy/phy-rockchip-emmc.c
rename to drivers/phy/rockchip/phy-rockchip-emmc.c
diff --git a/drivers/phy/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
similarity index 94%
rename from drivers/phy/phy-rockchip-inno-usb2.c
rename to drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 8efe78a..626883d 100644
--- a/drivers/phy/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -406,7 +406,8 @@ static int rockchip_usb2phy_init(struct phy *phy)
 	mutex_lock(&rport->mutex);
 
 	if (rport->port_id == USB2PHY_PORT_OTG) {
-		if (rport->mode != USB_DR_MODE_HOST) {
+		if (rport->mode != USB_DR_MODE_HOST &&
+		    rport->mode != USB_DR_MODE_UNKNOWN) {
 			/* clear bvalid status and enable bvalid detect irq */
 			ret = property_enable(rphy,
 					      &rport->port_cfg->bvalid_det_clr,
@@ -421,7 +422,7 @@ static int rockchip_usb2phy_init(struct phy *phy)
 				goto out;
 
 			schedule_delayed_work(&rport->otg_sm_work,
-					      OTG_SCHEDULE_DELAY);
+					      OTG_SCHEDULE_DELAY * 3);
 		} else {
 			/* If OTG works in host only mode, do nothing. */
 			dev_dbg(&rport->phy->dev, "mode %d\n", rport->mode);
@@ -463,6 +464,9 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
 	if (ret)
 		return ret;
 
+	/* waiting for the utmi_clk to become stable */
+	usleep_range(1500, 2000);
+
 	rport->suspended = false;
 	return 0;
 }
@@ -493,7 +497,8 @@ static int rockchip_usb2phy_exit(struct phy *phy)
 	struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
 
 	if (rport->port_id == USB2PHY_PORT_OTG &&
-	    rport->mode != USB_DR_MODE_HOST) {
+	    rport->mode != USB_DR_MODE_HOST &&
+	    rport->mode != USB_DR_MODE_UNKNOWN) {
 		cancel_delayed_work_sync(&rport->otg_sm_work);
 		cancel_delayed_work_sync(&rport->chg_work);
 	} else if (rport->port_id == USB2PHY_PORT_HOST)
@@ -970,7 +975,8 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
 	mutex_init(&rport->mutex);
 
 	rport->mode = of_usb_get_dr_mode_by_phy(child_np, -1);
-	if (rport->mode == USB_DR_MODE_HOST) {
+	if (rport->mode == USB_DR_MODE_HOST ||
+	    rport->mode == USB_DR_MODE_UNKNOWN) {
 		ret = 0;
 		goto out;
 	}
@@ -1138,6 +1144,65 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
 	return ret;
 }
 
+static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
+	{
+		.reg = 0x760,
+		.num_ports	= 2,
+		.clkout_ctl	= { 0x0768, 4, 4, 1, 0 },
+		.port_cfgs	= {
+			[USB2PHY_PORT_OTG] = {
+				.phy_sus	= { 0x0760, 15, 0, 0, 0x1d1 },
+				.bvalid_det_en	= { 0x0680, 3, 3, 0, 1 },
+				.bvalid_det_st	= { 0x0690, 3, 3, 0, 1 },
+				.bvalid_det_clr	= { 0x06a0, 3, 3, 0, 1 },
+				.ls_det_en	= { 0x0680, 2, 2, 0, 1 },
+				.ls_det_st	= { 0x0690, 2, 2, 0, 1 },
+				.ls_det_clr	= { 0x06a0, 2, 2, 0, 1 },
+				.utmi_bvalid	= { 0x0480, 4, 4, 0, 1 },
+				.utmi_ls	= { 0x0480, 3, 2, 0, 1 },
+			},
+			[USB2PHY_PORT_HOST] = {
+				.phy_sus	= { 0x0764, 15, 0, 0, 0x1d1 },
+				.ls_det_en	= { 0x0680, 4, 4, 0, 1 },
+				.ls_det_st	= { 0x0690, 4, 4, 0, 1 },
+				.ls_det_clr	= { 0x06a0, 4, 4, 0, 1 }
+			}
+		},
+		.chg_det = {
+			.opmode		= { 0x0760, 3, 0, 5, 1 },
+			.cp_det		= { 0x0884, 4, 4, 0, 1 },
+			.dcp_det	= { 0x0884, 3, 3, 0, 1 },
+			.dp_det		= { 0x0884, 5, 5, 0, 1 },
+			.idm_sink_en	= { 0x0768, 8, 8, 0, 1 },
+			.idp_sink_en	= { 0x0768, 7, 7, 0, 1 },
+			.idp_src_en	= { 0x0768, 9, 9, 0, 1 },
+			.rdm_pdwn_en	= { 0x0768, 10, 10, 0, 1 },
+			.vdm_src_en	= { 0x0768, 12, 12, 0, 1 },
+			.vdp_src_en	= { 0x0768, 11, 11, 0, 1 },
+		},
+	},
+	{
+		.reg = 0x800,
+		.num_ports	= 2,
+		.clkout_ctl	= { 0x0808, 4, 4, 1, 0 },
+		.port_cfgs	= {
+			[USB2PHY_PORT_OTG] = {
+				.phy_sus	= { 0x800, 15, 0, 0, 0x1d1 },
+				.ls_det_en	= { 0x0684, 0, 0, 0, 1 },
+				.ls_det_st	= { 0x0694, 0, 0, 0, 1 },
+				.ls_det_clr	= { 0x06a4, 0, 0, 0, 1 }
+			},
+			[USB2PHY_PORT_HOST] = {
+				.phy_sus	= { 0x804, 15, 0, 0, 0x1d1 },
+				.ls_det_en	= { 0x0684, 1, 1, 0, 1 },
+				.ls_det_st	= { 0x0694, 1, 1, 0, 1 },
+				.ls_det_clr	= { 0x06a4, 1, 1, 0, 1 }
+			}
+		},
+	},
+	{ /* sentinel */ }
+};
+
 static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = {
 	{
 		.reg = 0x100,
@@ -1263,6 +1328,7 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
 };
 
 static const struct of_device_id rockchip_usb2phy_dt_match[] = {
+	{ .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
 	{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
 	{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
 	{ .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
diff --git a/drivers/phy/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
similarity index 100%
rename from drivers/phy/phy-rockchip-pcie.c
rename to drivers/phy/rockchip/phy-rockchip-pcie.c
diff --git a/drivers/phy/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
similarity index 100%
rename from drivers/phy/phy-rockchip-typec.c
rename to drivers/phy/rockchip/phy-rockchip-typec.c
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
similarity index 100%
rename from drivers/phy/phy-rockchip-usb.c
rename to drivers/phy/rockchip/phy-rockchip-usb.c
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
new file mode 100644
index 0000000..b7e0645
--- /dev/null
+++ b/drivers/phy/samsung/Kconfig
@@ -0,0 +1,95 @@
+#
+# Phy drivers for Samsung platforms
+#
+config PHY_EXYNOS_DP_VIDEO
+	tristate "EXYNOS SoC series Display Port PHY driver"
+	depends on OF
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	default ARCH_EXYNOS
+	select GENERIC_PHY
+	help
+	  Support for Display Port PHY found on Samsung EXYNOS SoCs.
+
+config PHY_EXYNOS_MIPI_VIDEO
+	tristate "S5P/EXYNOS SoC series MIPI CSI-2/DSI PHY driver"
+	depends on HAS_IOMEM
+	depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+	select GENERIC_PHY
+	default y if ARCH_S5PV210 || ARCH_EXYNOS
+	help
+	  Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
+	  and EXYNOS SoCs.
+
+config PHY_EXYNOS_PCIE
+	bool "Exynos PCIe PHY driver"
+	depends on OF && (ARCH_EXYNOS || COMPILE_TEST)
+	select GENERIC_PHY
+	help
+	  Enable PCIe PHY support for Exynos SoC series.
+	  This driver provides PHY interface for Exynos PCIe controller.
+
+config PHY_SAMSUNG_USB2
+	tristate "Samsung USB 2.0 PHY driver"
+	depends on HAS_IOMEM
+	depends on USB_EHCI_EXYNOS || USB_OHCI_EXYNOS || USB_DWC2
+	select GENERIC_PHY
+	select MFD_SYSCON
+	default ARCH_EXYNOS
+	help
+	  Enable this to support the Samsung USB 2.0 PHY driver for Samsung
+	  SoCs. This driver provides the interface for USB 2.0 PHY. Support
+	  for particular PHYs will be enabled based on the SoC type in addition
+	  to this driver.
+
+config PHY_EXYNOS4210_USB2
+	bool
+	depends on PHY_SAMSUNG_USB2
+	default CPU_EXYNOS4210
+
+config PHY_EXYNOS4X12_USB2
+	bool
+	depends on PHY_SAMSUNG_USB2
+	default SOC_EXYNOS3250 || SOC_EXYNOS4212 || SOC_EXYNOS4412
+
+config PHY_EXYNOS5250_USB2
+	bool
+	depends on PHY_SAMSUNG_USB2
+	default SOC_EXYNOS5250 || SOC_EXYNOS5420
+
+config PHY_S5PV210_USB2
+	bool "Support for S5PV210"
+	depends on PHY_SAMSUNG_USB2
+	depends on ARCH_S5PV210
+	help
+	  Enable USB PHY support for S5PV210. This option requires that Samsung
+	  USB 2.0 PHY driver is enabled and means that support for this
+	  particular SoC is compiled in the driver. In case of S5PV210 two phys
+	  are available - device and host.
+
+config PHY_EXYNOS5_USBDRD
+	tristate "Exynos5 SoC series USB DRD PHY driver"
+	depends on ARCH_EXYNOS && OF
+	depends on HAS_IOMEM
+	depends on USB_DWC3_EXYNOS
+	select GENERIC_PHY
+	select MFD_SYSCON
+	default y
+	help
+	  Enable USB DRD PHY support for Exynos 5 SoC series.
+	  This driver provides PHY interface for USB 3.0 DRD controller
+	  present on Exynos5 SoC series.
+
+config PHY_EXYNOS5250_SATA
+	tristate "Exynos5250 Sata SerDes/PHY driver"
+	depends on SOC_EXYNOS5250
+	depends on HAS_IOMEM
+	depends on OF
+	select GENERIC_PHY
+	select I2C
+	select I2C_S3C2410
+	select MFD_SYSCON
+	help
+	  Enable this to support SATA SerDes/Phy found on Samsung's
+	  Exynos5250 based SoCs.This SerDes/Phy supports SATA 1.5 Gb/s,
+	  SATA 3.0 Gb/s, SATA 6.0 Gb/s speeds. It supports one SATA host
+	  port to accept one SATA device.
diff --git a/drivers/phy/samsung/Makefile b/drivers/phy/samsung/Makefile
new file mode 100644
index 0000000..20d7f24
--- /dev/null
+++ b/drivers/phy/samsung/Makefile
@@ -0,0 +1,11 @@
+obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO)	+= phy-exynos-dp-video.o
+obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO)	+= phy-exynos-mipi-video.o
+obj-$(CONFIG_PHY_EXYNOS_PCIE)		+= phy-exynos-pcie.o
+obj-$(CONFIG_PHY_SAMSUNG_USB2)		+= phy-exynos-usb2.o
+phy-exynos-usb2-y			+= phy-samsung-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2)	+= phy-exynos4210-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2)	+= phy-exynos4x12-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2)	+= phy-exynos5250-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2)	+= phy-s5pv210-usb2.o
+obj-$(CONFIG_PHY_EXYNOS5_USBDRD)	+= phy-exynos5-usbdrd.o
+obj-$(CONFIG_PHY_EXYNOS5250_SATA)	+= phy-exynos5250-sata.o
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c
similarity index 100%
rename from drivers/phy/phy-exynos-dp-video.c
rename to drivers/phy/samsung/phy-exynos-dp-video.c
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
similarity index 100%
rename from drivers/phy/phy-exynos-mipi-video.c
rename to drivers/phy/samsung/phy-exynos-mipi-video.c
diff --git a/drivers/phy/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
similarity index 100%
rename from drivers/phy/phy-exynos-pcie.c
rename to drivers/phy/samsung/phy-exynos-pcie.c
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/samsung/phy-exynos4210-usb2.c
similarity index 100%
rename from drivers/phy/phy-exynos4210-usb2.c
rename to drivers/phy/samsung/phy-exynos4210-usb2.c
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/samsung/phy-exynos4x12-usb2.c
similarity index 100%
rename from drivers/phy/phy-exynos4x12-usb2.c
rename to drivers/phy/samsung/phy-exynos4x12-usb2.c
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
similarity index 100%
rename from drivers/phy/phy-exynos5-usbdrd.c
rename to drivers/phy/samsung/phy-exynos5-usbdrd.c
diff --git a/drivers/phy/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c
similarity index 100%
rename from drivers/phy/phy-exynos5250-sata.c
rename to drivers/phy/samsung/phy-exynos5250-sata.c
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/samsung/phy-exynos5250-usb2.c
similarity index 100%
rename from drivers/phy/phy-exynos5250-usb2.c
rename to drivers/phy/samsung/phy-exynos5250-usb2.c
diff --git a/drivers/phy/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c
similarity index 100%
rename from drivers/phy/phy-s5pv210-usb2.c
rename to drivers/phy/samsung/phy-s5pv210-usb2.c
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
similarity index 100%
rename from drivers/phy/phy-samsung-usb2.c
rename to drivers/phy/samsung/phy-samsung-usb2.c
diff --git a/drivers/phy/phy-samsung-usb2.h b/drivers/phy/samsung/phy-samsung-usb2.h
similarity index 100%
rename from drivers/phy/phy-samsung-usb2.h
rename to drivers/phy/samsung/phy-samsung-usb2.h
diff --git a/drivers/phy/st/Kconfig b/drivers/phy/st/Kconfig
new file mode 100644
index 0000000..0814d3f
--- /dev/null
+++ b/drivers/phy/st/Kconfig
@@ -0,0 +1,33 @@
+#
+# Phy drivers for STMicro platforms
+#
+config PHY_MIPHY28LP
+	tristate "STMicroelectronics MIPHY28LP PHY driver for STiH407"
+	depends on ARCH_STI
+	select GENERIC_PHY
+	help
+	  Enable this to support the miphy transceiver (for SATA/PCIE/USB3)
+	  that is part of STMicroelectronics STiH407 SoC.
+
+config PHY_ST_SPEAR1310_MIPHY
+	tristate "ST SPEAR1310-MIPHY driver"
+	select GENERIC_PHY
+	depends on MACH_SPEAR1310 || COMPILE_TEST
+	help
+	  Support for ST SPEAr1310 MIPHY which can be used for PCIe and SATA.
+
+config PHY_ST_SPEAR1340_MIPHY
+	tristate "ST SPEAR1340-MIPHY driver"
+	select GENERIC_PHY
+	depends on MACH_SPEAR1340 || COMPILE_TEST
+	help
+	  Support for ST SPEAr1340 MIPHY which can be used for PCIe and SATA.
+
+config PHY_STIH407_USB
+	tristate "STMicroelectronics USB2 picoPHY driver for STiH407 family"
+	depends on RESET_CONTROLLER
+	depends on ARCH_STI || COMPILE_TEST
+	select GENERIC_PHY
+	help
+	  Enable this support to enable the picoPHY device used by USB2
+	  and USB3 controllers on STMicroelectronics STiH407 SoC families.
diff --git a/drivers/phy/st/Makefile b/drivers/phy/st/Makefile
new file mode 100644
index 0000000..e2adfe2
--- /dev/null
+++ b/drivers/phy/st/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_PHY_MIPHY28LP) 		+= phy-miphy28lp.o
+obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY)	+= phy-spear1310-miphy.o
+obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY)	+= phy-spear1340-miphy.o
+obj-$(CONFIG_PHY_STIH407_USB)		+= phy-stih407-usb.o
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/st/phy-miphy28lp.c
similarity index 100%
rename from drivers/phy/phy-miphy28lp.c
rename to drivers/phy/st/phy-miphy28lp.c
diff --git a/drivers/phy/phy-spear1310-miphy.c b/drivers/phy/st/phy-spear1310-miphy.c
similarity index 100%
rename from drivers/phy/phy-spear1310-miphy.c
rename to drivers/phy/st/phy-spear1310-miphy.c
diff --git a/drivers/phy/phy-spear1340-miphy.c b/drivers/phy/st/phy-spear1340-miphy.c
similarity index 100%
rename from drivers/phy/phy-spear1340-miphy.c
rename to drivers/phy/st/phy-spear1340-miphy.c
diff --git a/drivers/phy/phy-stih407-usb.c b/drivers/phy/st/phy-stih407-usb.c
similarity index 100%
rename from drivers/phy/phy-stih407-usb.c
rename to drivers/phy/st/phy-stih407-usb.c
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
new file mode 100644
index 0000000..2050356
--- /dev/null
+++ b/drivers/phy/ti/Kconfig
@@ -0,0 +1,78 @@
+#
+# Phy drivers for TI platforms
+#
+config PHY_DA8XX_USB
+	tristate "TI DA8xx USB PHY Driver"
+	depends on ARCH_DAVINCI_DA8XX
+	select GENERIC_PHY
+	select MFD_SYSCON
+	help
+	  Enable this to support the USB PHY on DA8xx SoCs.
+
+	  This driver controls both the USB 1.1 PHY and the USB 2.0 PHY.
+
+config PHY_DM816X_USB
+	tristate "TI dm816x USB PHY driver"
+	depends on ARCH_OMAP2PLUS
+	depends on USB_SUPPORT
+	select GENERIC_PHY
+	select USB_PHY
+	help
+	  Enable this for dm816x USB to work.
+
+config OMAP_CONTROL_PHY
+	tristate "OMAP CONTROL PHY Driver"
+	depends on ARCH_OMAP2PLUS || COMPILE_TEST
+	help
+	  Enable this to add support for the PHY part present in the control
+	  module. This driver has API to power on the USB2 PHY and to write to
+	  the mailbox. The mailbox is present only in omap4 and the register to
+	  power on the USB2 PHY is present in OMAP4 and OMAP5. OMAP5 has an
+	  additional register to power on USB3 PHY/SATA PHY/PCIE PHY
+	  (PIPE3 PHY).
+
+config OMAP_USB2
+	tristate "OMAP USB2 PHY Driver"
+	depends on ARCH_OMAP2PLUS
+	depends on USB_SUPPORT
+	select GENERIC_PHY
+	select USB_PHY
+	select OMAP_CONTROL_PHY
+	depends on OMAP_OCP2SCP
+	help
+	  Enable this to support the transceiver that is part of SOC. This
+	  driver takes care of all the PHY functionality apart from comparator.
+	  The USB OTG controller communicates with the comparator using this
+	  driver.
+
+config TI_PIPE3
+	tristate "TI PIPE3 PHY Driver"
+	depends on ARCH_OMAP2PLUS || COMPILE_TEST
+	select GENERIC_PHY
+	select OMAP_CONTROL_PHY
+	depends on OMAP_OCP2SCP
+	help
+	  Enable this to support the PIPE3 PHY that is part of TI SOCs. This
+	  driver takes care of all the PHY functionality apart from comparator.
+	  This driver interacts with the "OMAP Control PHY Driver" to power
+	  on/off the PHY.
+
+config PHY_TUSB1210
+	tristate "TI TUSB1210 ULPI PHY module"
+	depends on USB_ULPI_BUS
+	select GENERIC_PHY
+	help
+	  Support for TI TUSB1210 USB ULPI PHY.
+
+config TWL4030_USB
+	tristate "TWL4030 USB Transceiver Driver"
+	depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
+	depends on USB_SUPPORT
+	depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't 'y'
+	select GENERIC_PHY
+	select USB_PHY
+	help
+	  Enable this to support the USB OTG transceiver on TWL4030
+	  family chips (including the TWL5030 and TPS659x0 devices).
+	  This transceiver supports high and full speed devices plus,
+	  in host mode, low speed.
diff --git a/drivers/phy/ti/Makefile b/drivers/phy/ti/Makefile
new file mode 100644
index 0000000..0cc3a1a
--- /dev/null
+++ b/drivers/phy/ti/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_PHY_DA8XX_USB)		+= phy-da8xx-usb.o
+obj-$(CONFIG_PHY_DM816X_USB)		+= phy-dm816x-usb.o
+obj-$(CONFIG_OMAP_CONTROL_PHY)		+= phy-omap-control.o
+obj-$(CONFIG_OMAP_USB2)			+= phy-omap-usb2.o
+obj-$(CONFIG_TI_PIPE3)			+= phy-ti-pipe3.o
+obj-$(CONFIG_PHY_TUSB1210)		+= phy-tusb1210.o
+obj-$(CONFIG_TWL4030_USB)		+= phy-twl4030-usb.o
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/ti/phy-da8xx-usb.c
similarity index 100%
rename from drivers/phy/phy-da8xx-usb.c
rename to drivers/phy/ti/phy-da8xx-usb.c
diff --git a/drivers/phy/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
similarity index 100%
rename from drivers/phy/phy-dm816x-usb.c
rename to drivers/phy/ti/phy-dm816x-usb.c
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c
similarity index 100%
rename from drivers/phy/phy-omap-control.c
rename to drivers/phy/ti/phy-omap-control.c
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
similarity index 100%
rename from drivers/phy/phy-omap-usb2.c
rename to drivers/phy/ti/phy-omap-usb2.c
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
similarity index 100%
rename from drivers/phy/phy-ti-pipe3.c
rename to drivers/phy/ti/phy-ti-pipe3.c
diff --git a/drivers/phy/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
similarity index 78%
rename from drivers/phy/phy-tusb1210.c
rename to drivers/phy/ti/phy-tusb1210.c
index 4f6d5e7..b8ec39a 100644
--- a/drivers/phy/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -11,9 +11,9 @@
  */
 #include <linux/module.h>
 #include <linux/ulpi/driver.h>
+#include <linux/ulpi/regs.h>
 #include <linux/gpio/consumer.h>
-
-#include "ulpi_phy.h"
+#include <linux/phy/ulpi_phy.h>
 
 #define TUSB1210_VENDOR_SPECIFIC2		0x80
 #define TUSB1210_VENDOR_SPECIFIC2_IHSTX_SHIFT	0
@@ -53,9 +53,43 @@ static int tusb1210_power_off(struct phy *phy)
 	return 0;
 }
 
+static int tusb1210_set_mode(struct phy *phy, enum phy_mode mode)
+{
+	struct tusb1210 *tusb = phy_get_drvdata(phy);
+	int ret;
+
+	ret = ulpi_read(tusb->ulpi, ULPI_OTG_CTRL);
+	if (ret < 0)
+		return ret;
+
+	switch (mode) {
+	case PHY_MODE_USB_HOST:
+		ret |= (ULPI_OTG_CTRL_DRVVBUS_EXT
+			| ULPI_OTG_CTRL_ID_PULLUP
+			| ULPI_OTG_CTRL_DP_PULLDOWN
+			| ULPI_OTG_CTRL_DM_PULLDOWN);
+		ulpi_write(tusb->ulpi, ULPI_OTG_CTRL, ret);
+		ret |= ULPI_OTG_CTRL_DRVVBUS;
+		break;
+	case PHY_MODE_USB_DEVICE:
+		ret &= ~(ULPI_OTG_CTRL_DRVVBUS
+			 | ULPI_OTG_CTRL_DP_PULLDOWN
+			 | ULPI_OTG_CTRL_DM_PULLDOWN);
+		ulpi_write(tusb->ulpi, ULPI_OTG_CTRL, ret);
+		ret &= ~ULPI_OTG_CTRL_DRVVBUS_EXT;
+		break;
+	default:
+		/* nothing */
+		return 0;
+	}
+
+	return ulpi_write(tusb->ulpi, ULPI_OTG_CTRL, ret);
+}
+
 static const struct phy_ops phy_ops = {
 	.power_on = tusb1210_power_on,
 	.power_off = tusb1210_power_off,
+	.set_mode = tusb1210_set_mode,
 	.owner = THIS_MODULE,
 };
 
@@ -125,7 +159,8 @@ static void tusb1210_remove(struct ulpi *ulpi)
 #define TI_VENDOR_ID 0x0451
 
 static const struct ulpi_device_id tusb1210_ulpi_id[] = {
-	{ TI_VENDOR_ID, 0x1507, },
+	{ TI_VENDOR_ID, 0x1507, },  /* TUSB1210 */
+	{ TI_VENDOR_ID, 0x1508, },  /* TUSB1211 */
 	{ },
 };
 MODULE_DEVICE_TABLE(ulpi, tusb1210_ulpi_id);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
similarity index 100%
rename from drivers/phy/phy-twl4030-usb.c
rename to drivers/phy/ti/phy-twl4030-usb.c
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 1653cbd..bd459a9 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -680,30 +680,16 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group);
  * pinctrl_generic_free_groups() - removes all pin groups
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl groups
+ * are allocated with devm_kzalloc() so no need to free them here.
  */
 static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev)
 {
 	struct radix_tree_iter iter;
-	struct group_desc *group;
-	unsigned long *indices;
 	void **slot;
-	int i = 0;
-
-	indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-			       pctldev->num_groups, GFP_KERNEL);
-	if (!indices)
-		return;
 
 	radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
-		indices[i++] = iter.index;
-
-	for (i = 0; i < pctldev->num_groups; i++) {
-		group = radix_tree_lookup(&pctldev->pin_group_tree,
-					  indices[i]);
-		radix_tree_delete(&pctldev->pin_group_tree, indices[i]);
-		devm_kfree(pctldev->dev, group);
-	}
+		radix_tree_delete(&pctldev->pin_group_tree, iter.index);
 
 	pctldev->num_groups = 0;
 }
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 41b5b07..6852010 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -194,6 +194,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
 	return 0;
 }
 
+static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
+{
+	u32 tmp;
+
+	tmp = readl(reg);
+	tmp &= ~(mask << shift);
+	tmp |= value << shift;
+	writel(tmp, reg);
+}
+
 static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
 			       unsigned group)
 {
@@ -211,8 +221,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
 		reg += bank * 0x20 + pin / 16 * 0x10;
 		shift = pin % 16 * 2;
 
-		writel(0x3 << shift, reg + CLR);
-		writel(g->muxsel[i] << shift, reg + SET);
+		mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
 	}
 
 	return 0;
@@ -279,8 +288,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
 			/* mA */
 			if (config & MA_PRESENT) {
 				shift = pin % 8 * 4;
-				writel(0x3 << shift, reg + CLR);
-				writel(ma << shift, reg + SET);
+				mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
 			}
 
 			/* vol */
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2debba6..20f1b44 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1539,15 +1539,29 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
  * is not listed below.
  */
 static const struct dmi_system_id chv_no_valid_mask[] = {
+	/* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
 	{
-		/* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
-		.ident = "Acer Chromebook (CYAN)",
+		.ident = "Intel_Strago based Chromebooks (All models)",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
-			DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+			DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
 		},
-	}
+	},
+	{
+		.ident = "Acer Chromebook R11 (Cyan)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
+		},
+	},
+	{
+		.ident = "Samsung Chromebook 3 (Celes)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+		},
+	},
+	{}
 };
 
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 0d6b7f4..720a19f 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -35,7 +35,6 @@ static const struct pin_config_item conf_items[] = {
 	PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
 				"input bias pull to pin specific state", NULL, false),
 	PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false),
-	PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false),
 	PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false),
 	PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false),
 	PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false),
@@ -161,7 +160,6 @@ static const struct pinconf_generic_params dt_params[] = {
 	{ "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 },
 	{ "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 },
 	{ "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 },
-	{ "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 },
 	{ "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 },
 	{ "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 },
 	{ "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 },
@@ -174,7 +172,6 @@ static const struct pinconf_generic_params dt_params[] = {
 	{ "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
 	{ "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
 	{ "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
-	{ "output-enable", PIN_CONFIG_OUTPUT, 1, },
 	{ "output-high", PIN_CONFIG_OUTPUT, 1, },
 	{ "output-low", PIN_CONFIG_OUTPUT, 0, },
 	{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 1482d13..e432ec8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -495,64 +495,54 @@ static struct irq_chip amd_gpio_irqchip = {
 	.flags        = IRQCHIP_SKIP_SET_WAKE,
 };
 
-static void amd_gpio_irq_handler(struct irq_desc *desc)
+#define PIN_IRQ_PENDING	(BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
+
+static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
 {
-	u32 i;
-	u32 off;
-	u32 reg;
-	u32 pin_reg;
-	u64 reg64;
-	int handled = 0;
-	unsigned int irq;
+	struct amd_gpio *gpio_dev = dev_id;
+	struct gpio_chip *gc = &gpio_dev->gc;
+	irqreturn_t ret = IRQ_NONE;
+	unsigned int i, irqnr;
 	unsigned long flags;
-	struct irq_chip *chip = irq_desc_get_chip(desc);
-	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
-	struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+	u32 *regs, regval;
+	u64 status, mask;
 
-	chained_irq_enter(chip, desc);
-	/*enable GPIO interrupt again*/
+	/* Read the wake status */
 	raw_spin_lock_irqsave(&gpio_dev->lock, flags);
-	reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
-	reg64 = reg;
-	reg64 = reg64 << 32;
-
-	reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
-	reg64 |= reg;
+	status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1);
+	status <<= 32;
+	status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0);
 	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
-	/*
-	 * first 46 bits indicates interrupt status.
-	 * one bit represents four interrupt sources.
-	*/
-	for (off = 0; off < 46 ; off++) {
-		if (reg64 & BIT(off)) {
-			for (i = 0; i < 4; i++) {
-				pin_reg = readl(gpio_dev->base +
-						(off * 4 + i) * 4);
-				if ((pin_reg & BIT(INTERRUPT_STS_OFF)) ||
-					(pin_reg & BIT(WAKE_STS_OFF))) {
-					irq = irq_find_mapping(gc->irqdomain,
-								off * 4 + i);
-					generic_handle_irq(irq);
-					writel(pin_reg,
-						gpio_dev->base
-						+ (off * 4 + i) * 4);
-					handled++;
-				}
-			}
+	/* Bit 0-45 contain the relevant status bits */
+	status &= (1ULL << 46) - 1;
+	regs = gpio_dev->base;
+	for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) {
+		if (!(status & mask))
+			continue;
+		status &= ~mask;
+
+		/* Each status bit covers four pins */
+		for (i = 0; i < 4; i++) {
+			regval = readl(regs + i);
+			if (!(regval & PIN_IRQ_PENDING))
+				continue;
+			irq = irq_find_mapping(gc->irqdomain, irqnr + i);
+			generic_handle_irq(irq);
+			/* Clear interrupt */
+			writel(regval, regs + i);
+			ret = IRQ_HANDLED;
 		}
 	}
 
-	if (handled == 0)
-		handle_bad_irq(desc);
-
+	/* Signal EOI to the GPIO unit */
 	raw_spin_lock_irqsave(&gpio_dev->lock, flags);
-	reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
-	reg |= EOI_MASK;
-	writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG);
+	regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+	regval |= EOI_MASK;
+	writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG);
 	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
-	chained_irq_exit(chip, desc);
+	return ret;
 }
 
 static int amd_get_groups_count(struct pinctrl_dev *pctldev)
@@ -821,10 +811,11 @@ static int amd_gpio_probe(struct platform_device *pdev)
 		goto out2;
 	}
 
-	gpiochip_set_chained_irqchip(&gpio_dev->gc,
-				 &amd_gpio_irqchip,
-				 irq_base,
-				 amd_gpio_irq_handler);
+	ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
+			       KBUILD_MODNAME, gpio_dev);
+	if (ret)
+		goto out2;
+
 	platform_set_drvdata(pdev, gpio_dev);
 
 	dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index f141aa0..9dd981d 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -143,9 +143,6 @@ struct rockchip_drv {
  * @gpio_chip: gpiolib chip
  * @grange: gpio range
  * @slock: spinlock for the gpio bank
- * @irq_lock: bus lock for irq chip
- * @new_irqs: newly configured irqs which must be muxed as GPIOs in
- *	irq_bus_sync_unlock()
  */
 struct rockchip_pin_bank {
 	void __iomem			*reg_base;
@@ -168,8 +165,6 @@ struct rockchip_pin_bank {
 	struct pinctrl_gpio_range	grange;
 	raw_spinlock_t			slock;
 	u32				toggle_edge_mode;
-	struct mutex			irq_lock;
-	u32				new_irqs;
 };
 
 #define PIN_BANK(id, pins, label)			\
@@ -2134,12 +2129,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
 	int ret;
 
 	/* make sure the pin is configured as gpio input */
-	ret = rockchip_verify_mux(bank, d->hwirq, RK_FUNC_GPIO);
+	ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
 	if (ret < 0)
 		return ret;
 
-	bank->new_irqs |= mask;
-
+	clk_enable(bank->clk);
 	raw_spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -2197,6 +2191,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
 	default:
 		irq_gc_unlock(gc);
 		raw_spin_unlock_irqrestore(&bank->slock, flags);
+		clk_disable(bank->clk);
 		return -EINVAL;
 	}
 
@@ -2205,6 +2200,7 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
 
 	irq_gc_unlock(gc);
 	raw_spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 
 	return 0;
 }
@@ -2248,34 +2244,6 @@ static void rockchip_irq_disable(struct irq_data *d)
 	clk_disable(bank->clk);
 }
 
-static void rockchip_irq_bus_lock(struct irq_data *d)
-{
-	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-	struct rockchip_pin_bank *bank = gc->private;
-
-	clk_enable(bank->clk);
-	mutex_lock(&bank->irq_lock);
-}
-
-static void rockchip_irq_bus_sync_unlock(struct irq_data *d)
-{
-	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-	struct rockchip_pin_bank *bank = gc->private;
-
-	while (bank->new_irqs) {
-		unsigned int irq = __ffs(bank->new_irqs);
-		int ret;
-
-		ret = rockchip_set_mux(bank, irq, RK_FUNC_GPIO);
-		WARN_ON(ret < 0);
-
-		bank->new_irqs &= ~BIT(irq);
-	}
-
-	mutex_unlock(&bank->irq_lock);
-	clk_disable(bank->clk);
-}
-
 static int rockchip_interrupts_register(struct platform_device *pdev,
 						struct rockchip_pinctrl *info)
 {
@@ -2342,9 +2310,6 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
 		gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
 		gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
 		gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
-		gc->chip_types[0].chip.irq_bus_lock = rockchip_irq_bus_lock;
-		gc->chip_types[0].chip.irq_bus_sync_unlock =
-						rockchip_irq_bus_sync_unlock;
 		gc->wake_enabled = IRQ_MSK(bank->nr_pins);
 
 		irq_set_chained_handler_and_data(bank->irq,
@@ -2518,7 +2483,6 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
 		int bank_pins = 0;
 
 		raw_spin_lock_init(&bank->slock);
-		mutex_init(&bank->irq_lock);
 		bank->drvdata = d;
 		bank->pin_base = ctrl->nr_pins;
 		ctrl->nr_pins += bank->nr_pins;
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 9fd6d90..16b3ae5 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -826,30 +826,17 @@ EXPORT_SYMBOL_GPL(pinmux_generic_remove_function);
  * pinmux_generic_free_functions() - removes all functions
  * @pctldev: pin controller device
  *
- * Note that the caller must take care of locking.
+ * Note that the caller must take care of locking. The pinctrl
+ * functions are allocated with devm_kzalloc() so no need to free
+ * them here.
  */
 void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
 {
 	struct radix_tree_iter iter;
-	struct function_desc *function;
-	unsigned long *indices;
 	void **slot;
-	int i = 0;
-
-	indices = devm_kzalloc(pctldev->dev, sizeof(*indices) *
-			       pctldev->num_functions, GFP_KERNEL);
-	if (!indices)
-		return;
 
 	radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
-		indices[i++] = iter.index;
-
-	for (i = 0; i < pctldev->num_functions; i++) {
-		function = radix_tree_lookup(&pctldev->pin_function_tree,
-					     indices[i]);
-		radix_tree_delete(&pctldev->pin_function_tree, indices[i]);
-		devm_kfree(pctldev->dev, function);
-	}
+		radix_tree_delete(&pctldev->pin_function_tree, iter.index);
 
 	pctldev->num_functions = 0;
 }
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index d3c5f5d..222b668 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -798,7 +798,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
 		break;
 	case PIN_CONFIG_OUTPUT:
 		__stm32_gpio_set(bank, offset, arg);
-		ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false);
+		ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
 		break;
 	default:
 		ret = -EINVAL;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 9aec1d2..6624499 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
-		  SUNXI_FUNCTION(0x3, "owa")),		/* DOUT */
+		  SUNXI_FUNCTION(0x3, "spdif")),	/* DOUT */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out")),
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 2de1e60..0578d34 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -266,7 +266,7 @@ struct goldfish_pipe_dev {
 	unsigned char __iomem *base;
 };
 
-struct goldfish_pipe_dev pipe_dev[1] = {};
+static struct goldfish_pipe_dev pipe_dev[1] = {};
 
 static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
 {
@@ -704,7 +704,7 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
 		/* Reallocate the array */
 		u32 new_capacity = 2 * dev->pipes_capacity;
 		struct goldfish_pipe **pipes =
-			kcalloc(new_capacity, sizeof(*pipes), GFP_KERNEL);
+			kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
 		if (!pipes)
 			return -ENOMEM;
 		memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 8489020..a3ccc3c 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -794,6 +794,25 @@
 	  This driver instantiates i2c-clients for these, so that standard
 	  i2c drivers for these chips can bind to the them.
 
+config INTEL_INT0002_VGPIO
+	tristate "Intel ACPI INT0002 Virtual GPIO driver"
+	depends on GPIOLIB && ACPI
+	select GPIOLIB_IRQCHIP
+	---help---
+	  Some peripherals on Bay Trail and Cherry Trail platforms signal a
+	  Power Management Event (PME) to the Power Management Controller (PMC)
+	  to wakeup the system. When this happens software needs to explicitly
+	  clear the PME bus 0 status bit in the GPE0a_STS register to avoid an
+	  IRQ storm on IRQ 9.
+
+	  This is modelled in ACPI through the INT0002 ACPI device, which is
+	  called a "Virtual GPIO controller" in ACPI because it defines the
+	  event handler to call when the PME triggers through _AEI and _L02
+	  methods as would be done for a real GPIO interrupt in ACPI.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called intel_int0002_vgpio.
+
 config INTEL_HID_EVENT
 	tristate "INTEL HID Event"
 	depends on ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 182a3ed..ab22ce7 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -46,6 +46,7 @@
 obj-$(CONFIG_TOSHIBA_HAPS)	+= toshiba_haps.o
 obj-$(CONFIG_TOSHIBA_WMI)	+= toshiba-wmi.o
 obj-$(CONFIG_INTEL_CHT_INT33FE)	+= intel_cht_int33fe.o
+obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
 obj-$(CONFIG_INTEL_HID_EVENT)	+= intel-hid.o
 obj-$(CONFIG_INTEL_VBTN)	+= intel-vbtn.o
 obj-$(CONFIG_INTEL_SCU_IPC)	+= intel_scu_ipc.o
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 63ba2cb..8519e0f 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -23,6 +23,7 @@
 #include <linux/platform_device.h>
 #include <linux/input/sparse-keymap.h>
 #include <linux/acpi.h>
+#include <linux/suspend.h>
 #include <acpi/acpi_bus.h>
 
 MODULE_LICENSE("GPL");
@@ -75,6 +76,7 @@ static const struct key_entry intel_array_keymap[] = {
 struct intel_hid_priv {
 	struct input_dev *input_dev;
 	struct input_dev *array;
+	bool wakeup_mode;
 };
 
 static int intel_hid_set_enable(struct device *device, bool enable)
@@ -116,23 +118,37 @@ static void intel_button_array_enable(struct device *device, bool enable)
 		dev_warn(device, "failed to set button capability\n");
 }
 
+static int intel_hid_pm_prepare(struct device *device)
+{
+	struct intel_hid_priv *priv = dev_get_drvdata(device);
+
+	priv->wakeup_mode = true;
+	return 0;
+}
+
 static int intel_hid_pl_suspend_handler(struct device *device)
 {
-	intel_hid_set_enable(device, false);
-	intel_button_array_enable(device, false);
-
+	if (pm_suspend_via_firmware()) {
+		intel_hid_set_enable(device, false);
+		intel_button_array_enable(device, false);
+	}
 	return 0;
 }
 
 static int intel_hid_pl_resume_handler(struct device *device)
 {
-	intel_hid_set_enable(device, true);
-	intel_button_array_enable(device, true);
+	struct intel_hid_priv *priv = dev_get_drvdata(device);
 
+	priv->wakeup_mode = false;
+	if (pm_resume_via_firmware()) {
+		intel_hid_set_enable(device, true);
+		intel_button_array_enable(device, true);
+	}
 	return 0;
 }
 
 static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+	.prepare = intel_hid_pm_prepare,
 	.freeze  = intel_hid_pl_suspend_handler,
 	.thaw  = intel_hid_pl_resume_handler,
 	.restore  = intel_hid_pl_resume_handler,
@@ -186,6 +202,19 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
 	unsigned long long ev_index;
 	acpi_status status;
 
+	if (priv->wakeup_mode) {
+		/* Wake up on 5-button array events only. */
+		if (event == 0xc0 || !priv->array)
+			return;
+
+		if (sparse_keymap_entry_from_scancode(priv->array, event))
+			pm_wakeup_hard_event(&device->dev);
+		else
+			dev_info(&device->dev, "unknown event 0x%x\n", event);
+
+		return;
+	}
+
 	/* 0xC0 is for HID events, other values are for 5 button array */
 	if (event != 0xc0) {
 		if (!priv->array ||
@@ -270,6 +299,7 @@ static int intel_hid_probe(struct platform_device *device)
 				 "failed to enable HID power button\n");
 	}
 
+	device_init_wakeup(&device->dev, true);
 	return 0;
 
 err_remove_notify:
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index c2035e1..61f1063 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -23,6 +23,7 @@
 #include <linux/platform_device.h>
 #include <linux/input/sparse-keymap.h>
 #include <linux/acpi.h>
+#include <linux/suspend.h>
 #include <acpi/acpi_bus.h>
 
 MODULE_LICENSE("GPL");
@@ -46,6 +47,7 @@ static const struct key_entry intel_vbtn_keymap[] = {
 
 struct intel_vbtn_priv {
 	struct input_dev *input_dev;
+	bool wakeup_mode;
 };
 
 static int intel_vbtn_input_setup(struct platform_device *device)
@@ -73,9 +75,15 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
 	struct platform_device *device = context;
 	struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
 
-	if (!sparse_keymap_report_event(priv->input_dev, event, 1, true))
-		dev_info(&device->dev, "unknown event index 0x%x\n",
-			 event);
+	if (priv->wakeup_mode) {
+		if (sparse_keymap_entry_from_scancode(priv->input_dev, event)) {
+			pm_wakeup_hard_event(&device->dev);
+			return;
+		}
+	} else if (sparse_keymap_report_event(priv->input_dev, event, 1, true)) {
+		return;
+	}
+	dev_info(&device->dev, "unknown event index 0x%x\n", event);
 }
 
 static int intel_vbtn_probe(struct platform_device *device)
@@ -109,6 +117,7 @@ static int intel_vbtn_probe(struct platform_device *device)
 	if (ACPI_FAILURE(status))
 		return -EBUSY;
 
+	device_init_wakeup(&device->dev, true);
 	return 0;
 }
 
@@ -125,10 +134,34 @@ static int intel_vbtn_remove(struct platform_device *device)
 	return 0;
 }
 
+static int intel_vbtn_pm_prepare(struct device *dev)
+{
+	struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
+	priv->wakeup_mode = true;
+	return 0;
+}
+
+static int intel_vbtn_pm_resume(struct device *dev)
+{
+	struct intel_vbtn_priv *priv = dev_get_drvdata(dev);
+
+	priv->wakeup_mode = false;
+	return 0;
+}
+
+static const struct dev_pm_ops intel_vbtn_pm_ops = {
+	.prepare = intel_vbtn_pm_prepare,
+	.resume = intel_vbtn_pm_resume,
+	.restore = intel_vbtn_pm_resume,
+	.thaw = intel_vbtn_pm_resume,
+};
+
 static struct platform_driver intel_vbtn_pl_driver = {
 	.driver = {
 		.name = "intel-vbtn",
 		.acpi_match_table = intel_vbtn_ids,
+		.pm = &intel_vbtn_pm_ops,
 	},
 	.probe = intel_vbtn_probe,
 	.remove = intel_vbtn_remove,
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
new file mode 100644
index 0000000..92dc230
--- /dev/null
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -0,0 +1,219 @@
+/*
+ * Intel INT0002 "Virtual GPIO" driver
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Loosely based on android x86 kernel code which is:
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * Author: Dyut Kumar Sil <dyut.k.sil@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Some peripherals on Bay Trail and Cherry Trail platforms signal a Power
+ * Management Event (PME) to the Power Management Controller (PMC) to wakeup
+ * the system. When this happens software needs to clear the PME bus 0 status
+ * bit in the GPE0a_STS register to avoid an IRQ storm on IRQ 9.
+ *
+ * This is modelled in ACPI through the INT0002 ACPI device, which is
+ * called a "Virtual GPIO controller" in ACPI because it defines the event
+ * handler to call when the PME triggers through _AEI and _L02 / _E02
+ * methods as would be done for a real GPIO interrupt in ACPI. Note this
+ * is a hack to define an AML event handler for the PME while using existing
+ * ACPI mechanisms, this is not a real GPIO at all.
+ *
+ * This driver will bind to the INT0002 device, and register as a GPIO
+ * controller, letting gpiolib-acpi.c call the _L02 handler as it would
+ * for a real GPIO controller.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define DRV_NAME			"INT0002 Virtual GPIO"
+
+/* For some reason the virtual GPIO pin tied to the GPE is numbered pin 2 */
+#define GPE0A_PME_B0_VIRT_GPIO_PIN	2
+
+#define GPE0A_PME_B0_STS_BIT		BIT(13)
+#define GPE0A_PME_B0_EN_BIT		BIT(13)
+#define GPE0A_STS_PORT			0x420
+#define GPE0A_EN_PORT			0x428
+
+#define ICPU(model)	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id int0002_cpu_ids[] = {
+/*
+ * Limit ourselves to Cherry Trail for now, until testing shows we
+ * need to handle the INT0002 device on Baytrail too.
+ *	ICPU(INTEL_FAM6_ATOM_SILVERMONT1),	 * Valleyview, Bay Trail *
+ */
+	ICPU(INTEL_FAM6_ATOM_AIRMONT),		/* Braswell, Cherry Trail */
+	{}
+};
+
+/*
+ * As this is not a real GPIO at all, but just a hack to model an event in
+ * ACPI the get / set functions are dummy functions.
+ */
+
+static int int0002_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+	return 0;
+}
+
+static void int0002_gpio_set(struct gpio_chip *chip, unsigned int offset,
+			     int value)
+{
+}
+
+static int int0002_gpio_direction_output(struct gpio_chip *chip,
+					 unsigned int offset, int value)
+{
+	return 0;
+}
+
+static void int0002_irq_ack(struct irq_data *data)
+{
+	outl(GPE0A_PME_B0_STS_BIT, GPE0A_STS_PORT);
+}
+
+static void int0002_irq_unmask(struct irq_data *data)
+{
+	u32 gpe_en_reg;
+
+	gpe_en_reg = inl(GPE0A_EN_PORT);
+	gpe_en_reg |= GPE0A_PME_B0_EN_BIT;
+	outl(gpe_en_reg, GPE0A_EN_PORT);
+}
+
+static void int0002_irq_mask(struct irq_data *data)
+{
+	u32 gpe_en_reg;
+
+	gpe_en_reg = inl(GPE0A_EN_PORT);
+	gpe_en_reg &= ~GPE0A_PME_B0_EN_BIT;
+	outl(gpe_en_reg, GPE0A_EN_PORT);
+}
+
+static irqreturn_t int0002_irq(int irq, void *data)
+{
+	struct gpio_chip *chip = data;
+	u32 gpe_sts_reg;
+
+	gpe_sts_reg = inl(GPE0A_STS_PORT);
+	if (!(gpe_sts_reg & GPE0A_PME_B0_STS_BIT))
+		return IRQ_NONE;
+
+	generic_handle_irq(irq_find_mapping(chip->irqdomain,
+					    GPE0A_PME_B0_VIRT_GPIO_PIN));
+
+	pm_system_wakeup();
+
+	return IRQ_HANDLED;
+}
+
+static struct irq_chip int0002_irqchip = {
+	.name			= DRV_NAME,
+	.irq_ack		= int0002_irq_ack,
+	.irq_mask		= int0002_irq_mask,
+	.irq_unmask		= int0002_irq_unmask,
+};
+
+static int int0002_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct x86_cpu_id *cpu_id;
+	struct gpio_chip *chip;
+	int irq, ret;
+
+	/* Menlow has a different INT0002 device? <sigh> */
+	cpu_id = x86_match_cpu(int0002_cpu_ids);
+	if (!cpu_id)
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "Error getting IRQ: %d\n", irq);
+		return irq;
+	}
+
+	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->label = DRV_NAME;
+	chip->parent = dev;
+	chip->owner = THIS_MODULE;
+	chip->get = int0002_gpio_get;
+	chip->set = int0002_gpio_set;
+	chip->direction_input = int0002_gpio_get;
+	chip->direction_output = int0002_gpio_direction_output;
+	chip->base = -1;
+	chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1;
+	chip->irq_need_valid_mask = true;
+
+	ret = devm_gpiochip_add_data(&pdev->dev, chip, NULL);
+	if (ret) {
+		dev_err(dev, "Error adding gpio chip: %d\n", ret);
+		return ret;
+	}
+
+	bitmap_clear(chip->irq_valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
+
+	/*
+	 * We manually request the irq here instead of passing a flow-handler
+	 * to gpiochip_set_chained_irqchip, because the irq is shared.
+	 */
+	ret = devm_request_irq(dev, irq, int0002_irq,
+			       IRQF_SHARED | IRQF_NO_THREAD, "INT0002", chip);
+	if (ret) {
+		dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
+		return ret;
+	}
+
+	ret = gpiochip_irqchip_add(chip, &int0002_irqchip, 0, handle_edge_irq,
+				   IRQ_TYPE_NONE);
+	if (ret) {
+		dev_err(dev, "Error adding irqchip: %d\n", ret);
+		return ret;
+	}
+
+	gpiochip_set_chained_irqchip(chip, &int0002_irqchip, irq, NULL);
+
+	return 0;
+}
+
+static const struct acpi_device_id int0002_acpi_ids[] = {
+	{ "INT0002", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, int0002_acpi_ids);
+
+static struct platform_driver int0002_driver = {
+	.driver = {
+		.name			= DRV_NAME,
+		.acpi_match_table	= int0002_acpi_ids,
+	},
+	.probe	= int0002_probe,
+};
+
+module_platform_driver(int0002_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Intel INT0002 Virtual GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index ef29f18..4cc2f4e 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -97,11 +97,9 @@
 	} \
 }
 
-#ifdef CONFIG_PM_SLEEP
 static u8 suspend_prep_ok;
 static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp;
 static u64 suspend_shlw_res_temp, suspend_deep_res_temp;
-#endif
 
 struct telemetry_susp_stats {
 	u32 shlw_swake_ctr;
@@ -807,7 +805,6 @@ static const struct file_operations telem_ioss_trc_verb_ops = {
 	.release	= single_release,
 };
 
-#ifdef CONFIG_PM_SLEEP
 static int pm_suspend_prep_cb(void)
 {
 	struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS];
@@ -937,7 +934,6 @@ static int pm_notification(struct notifier_block *this,
 static struct notifier_block pm_notifier = {
 	.notifier_call = pm_notification,
 };
-#endif /* CONFIG_PM_SLEEP */
 
 static int __init telemetry_debugfs_init(void)
 {
@@ -960,14 +956,13 @@ static int __init telemetry_debugfs_init(void)
 	if (err < 0)
 		return -EINVAL;
 
-
-#ifdef CONFIG_PM_SLEEP
 	register_pm_notifier(&pm_notifier);
-#endif /* CONFIG_PM_SLEEP */
 
 	debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL);
-	if (!debugfs_conf->telemetry_dbg_dir)
-		return -ENOMEM;
+	if (!debugfs_conf->telemetry_dbg_dir) {
+		err = -ENOMEM;
+		goto out_pm;
+	}
 
 	f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO,
 				debugfs_conf->telemetry_dbg_dir, NULL,
@@ -1014,6 +1009,8 @@ static int __init telemetry_debugfs_init(void)
 out:
 	debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
 	debugfs_conf->telemetry_dbg_dir = NULL;
+out_pm:
+	unregister_pm_notifier(&pm_notifier);
 
 	return err;
 }
@@ -1022,6 +1019,7 @@ static void __exit telemetry_debugfs_exit(void)
 {
 	debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir);
 	debugfs_conf->telemetry_dbg_dir = NULL;
+	unregister_pm_notifier(&pm_notifier);
 }
 
 late_initcall(telemetry_debugfs_init);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7b6cb0c..f6861b5 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1438,25 +1438,20 @@ static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
  */
 
 /* interface_version --------------------------------------------------- */
-static ssize_t tpacpi_driver_interface_version_show(
-				struct device_driver *drv,
-				char *buf)
+static ssize_t interface_version_show(struct device_driver *drv, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "0x%08x\n", TPACPI_SYSFS_VERSION);
 }
-
-static DRIVER_ATTR(interface_version, S_IRUGO,
-		tpacpi_driver_interface_version_show, NULL);
+static DRIVER_ATTR_RO(interface_version);
 
 /* debug_level --------------------------------------------------------- */
-static ssize_t tpacpi_driver_debug_show(struct device_driver *drv,
-						char *buf)
+static ssize_t debug_level_show(struct device_driver *drv, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "0x%04x\n", dbg_level);
 }
 
-static ssize_t tpacpi_driver_debug_store(struct device_driver *drv,
-						const char *buf, size_t count)
+static ssize_t debug_level_store(struct device_driver *drv, const char *buf,
+				 size_t count)
 {
 	unsigned long t;
 
@@ -1467,34 +1462,28 @@ static ssize_t tpacpi_driver_debug_store(struct device_driver *drv,
 
 	return count;
 }
-
-static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
-		tpacpi_driver_debug_show, tpacpi_driver_debug_store);
+static DRIVER_ATTR_RW(debug_level);
 
 /* version ------------------------------------------------------------- */
-static ssize_t tpacpi_driver_version_show(struct device_driver *drv,
-						char *buf)
+static ssize_t version_show(struct device_driver *drv, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%s v%s\n",
 			TPACPI_DESC, TPACPI_VERSION);
 }
-
-static DRIVER_ATTR(version, S_IRUGO,
-		tpacpi_driver_version_show, NULL);
+static DRIVER_ATTR_RO(version);
 
 /* --------------------------------------------------------------------- */
 
 #ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
 
 /* wlsw_emulstate ------------------------------------------------------ */
-static ssize_t tpacpi_driver_wlsw_emulstate_show(struct device_driver *drv,
-						char *buf)
+static ssize_t wlsw_emulstate_show(struct device_driver *drv, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_wlsw_emulstate);
 }
 
-static ssize_t tpacpi_driver_wlsw_emulstate_store(struct device_driver *drv,
-						const char *buf, size_t count)
+static ssize_t wlsw_emulstate_store(struct device_driver *drv, const char *buf,
+				    size_t count)
 {
 	unsigned long t;
 
@@ -1508,22 +1497,16 @@ static ssize_t tpacpi_driver_wlsw_emulstate_store(struct device_driver *drv,
 
 	return count;
 }
-
-static DRIVER_ATTR(wlsw_emulstate, S_IWUSR | S_IRUGO,
-		tpacpi_driver_wlsw_emulstate_show,
-		tpacpi_driver_wlsw_emulstate_store);
+static DRIVER_ATTR_RW(wlsw_emulstate);
 
 /* bluetooth_emulstate ------------------------------------------------- */
-static ssize_t tpacpi_driver_bluetooth_emulstate_show(
-					struct device_driver *drv,
-					char *buf)
+static ssize_t bluetooth_emulstate_show(struct device_driver *drv, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_bluetooth_emulstate);
 }
 
-static ssize_t tpacpi_driver_bluetooth_emulstate_store(
-					struct device_driver *drv,
-					const char *buf, size_t count)
+static ssize_t bluetooth_emulstate_store(struct device_driver *drv,
+					 const char *buf, size_t count)
 {
 	unsigned long t;
 
@@ -1534,22 +1517,16 @@ static ssize_t tpacpi_driver_bluetooth_emulstate_store(
 
 	return count;
 }
-
-static DRIVER_ATTR(bluetooth_emulstate, S_IWUSR | S_IRUGO,
-		tpacpi_driver_bluetooth_emulstate_show,
-		tpacpi_driver_bluetooth_emulstate_store);
+static DRIVER_ATTR_RW(bluetooth_emulstate);
 
 /* wwan_emulstate ------------------------------------------------- */
-static ssize_t tpacpi_driver_wwan_emulstate_show(
-					struct device_driver *drv,
-					char *buf)
+static ssize_t wwan_emulstate_show(struct device_driver *drv, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_wwan_emulstate);
 }
 
-static ssize_t tpacpi_driver_wwan_emulstate_store(
-					struct device_driver *drv,
-					const char *buf, size_t count)
+static ssize_t wwan_emulstate_store(struct device_driver *drv, const char *buf,
+				    size_t count)
 {
 	unsigned long t;
 
@@ -1560,22 +1537,16 @@ static ssize_t tpacpi_driver_wwan_emulstate_store(
 
 	return count;
 }
-
-static DRIVER_ATTR(wwan_emulstate, S_IWUSR | S_IRUGO,
-		tpacpi_driver_wwan_emulstate_show,
-		tpacpi_driver_wwan_emulstate_store);
+static DRIVER_ATTR_RW(wwan_emulstate);
 
 /* uwb_emulstate ------------------------------------------------- */
-static ssize_t tpacpi_driver_uwb_emulstate_show(
-					struct device_driver *drv,
-					char *buf)
+static ssize_t uwb_emulstate_show(struct device_driver *drv, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_uwb_emulstate);
 }
 
-static ssize_t tpacpi_driver_uwb_emulstate_store(
-					struct device_driver *drv,
-					const char *buf, size_t count)
+static ssize_t uwb_emulstate_store(struct device_driver *drv, const char *buf,
+				   size_t count)
 {
 	unsigned long t;
 
@@ -1586,10 +1557,7 @@ static ssize_t tpacpi_driver_uwb_emulstate_store(
 
 	return count;
 }
-
-static DRIVER_ATTR(uwb_emulstate, S_IWUSR | S_IRUGO,
-		tpacpi_driver_uwb_emulstate_show,
-		tpacpi_driver_uwb_emulstate_store);
+static DRIVER_ATTR_RW(uwb_emulstate);
 #endif
 
 /* --------------------------------------------------------------------- */
@@ -8606,14 +8574,13 @@ static ssize_t fan_fan2_input_show(struct device *dev,
 static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL);
 
 /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
-static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
-				     char *buf)
+static ssize_t fan_watchdog_show(struct device_driver *drv, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%u\n", fan_watchdog_maxinterval);
 }
 
-static ssize_t fan_fan_watchdog_store(struct device_driver *drv,
-				      const char *buf, size_t count)
+static ssize_t fan_watchdog_store(struct device_driver *drv, const char *buf,
+				  size_t count)
 {
 	unsigned long t;
 
@@ -8630,9 +8597,7 @@ static ssize_t fan_fan_watchdog_store(struct device_driver *drv,
 
 	return count;
 }
-
-static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO,
-		fan_fan_watchdog_show, fan_fan_watchdog_store);
+static DRIVER_ATTR_RW(fan_watchdog);
 
 /* --------------------------------------------------------------------- */
 static struct attribute *fan_attributes[] = {
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 9113876..3a4c1aa 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -149,8 +149,8 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
 	}
 
 	if (device_can_wakeup(&dev->dev)) {
-		error = acpi_pm_device_sleep_wake(&dev->dev,
-				device_may_wakeup(&dev->dev));
+		error = acpi_pm_set_device_wakeup(&dev->dev,
+					      device_may_wakeup(&dev->dev));
 		if (error)
 			return error;
 	}
@@ -185,7 +185,7 @@ static int pnpacpi_resume(struct pnp_dev *dev)
 	}
 
 	if (device_may_wakeup(&dev->dev))
-		acpi_pm_device_sleep_wake(&dev->dev, false);
+		acpi_pm_set_device_wakeup(&dev->dev, false);
 
 	if (acpi_device_power_manageable(acpi_dev))
 		error = acpi_device_set_power(acpi_dev, ACPI_STATE_D0);
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 8581252..031a343 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -253,6 +253,16 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3188 = {
 	},
 };
 
+static const struct rockchip_iodomain_soc_data soc_data_rk3228 = {
+	.grf_offset = 0x418,
+	.supply_names = {
+		"vccio1",
+		"vccio2",
+		"vccio3",
+		"vccio4",
+	},
+};
+
 static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
 	.grf_offset = 0x380,
 	.supply_names = {
@@ -345,6 +355,10 @@ static const struct of_device_id rockchip_iodomain_match[] = {
 		.data = (void *)&soc_data_rk3188
 	},
 	{
+		.compatible = "rockchip,rk3228-io-voltage-domain",
+		.data = (void *)&soc_data_rk3228
+	},
+	{
 		.compatible = "rockchip,rk3288-io-voltage-domain",
 		.data = (void *)&soc_data_rk3288
 	},
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 13f1714..ca0de1a 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -58,9 +58,9 @@
 
 config POWER_RESET_BRCMSTB
 	bool "Broadcom STB reset driver"
-	depends on ARM || MIPS || COMPILE_TEST
+	depends on ARM || ARM64 || MIPS || COMPILE_TEST
 	depends on MFD_SYSCON
-	default ARCH_BRCMSTB
+	default ARCH_BRCMSTB || BMIPS_GENERIC
 	help
 	  This driver provides restart support for Broadcom STB boards.
 
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index c6c3bee..c30c401 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -97,7 +97,7 @@ static void at91_lpddr_poweroff(void)
 		  "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
 		  "r" (at91_shdwc_base),
 		  "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
-		: "r0");
+		: "r6");
 }
 
 static int at91_poweroff_get_wakeup_mode(struct device_node *np)
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 90b0b5a..55fce8b 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -132,7 +132,7 @@ static void at91_lpddr_poweroff(void)
 		  "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
 		  "r" (at91_shdwc->at91_shdwc_base),
 		  "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
-		: "r0");
+		: "r6");
 }
 
 static u32 at91_shdwc_debouncer_value(struct platform_device *pdev,
diff --git a/drivers/power/reset/reboot-mode.c b/drivers/power/reset/reboot-mode.c
index fb51218..8f975ca 100644
--- a/drivers/power/reset/reboot-mode.c
+++ b/drivers/power/reset/reboot-mode.c
@@ -13,7 +13,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/reboot.h>
-#include "reboot-mode.h"
+#include <linux/reboot-mode.h>
 
 #define PREFIX "mode-"
 
diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
index c8c371b..563a97d 100644
--- a/drivers/power/reset/syscon-reboot-mode.c
+++ b/drivers/power/reset/syscon-reboot-mode.c
@@ -15,7 +15,7 @@
 #include <linux/reboot.h>
 #include <linux/regmap.h>
 #include <linux/mfd/syscon.h>
-#include "reboot-mode.h"
+#include <linux/reboot-mode.h>
 
 struct syscon_reboot_mode {
 	struct regmap *map;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 86f40bf..969f500 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -82,6 +82,14 @@
 	  Say Y here to enable support for power supply provided by
 	  Active-semi ActivePath ACT8945A charger.
 
+config BATTERY_CPCAP
+	tristate "Motorola CPCAP PMIC battery driver"
+	depends on MFD_CPCAP && IIO
+	default MFD_CPCAP
+	help
+	  Say Y here to enable support for battery on Motorola
+	  phones and tablets such as droid 4.
+
 config BATTERY_DS2760
 	tristate "DS2760 battery driver (HP iPAQ & others)"
 	depends on W1 && W1_SLAVE_DS2760
@@ -190,6 +198,17 @@
 	  Say Y here to enable support for batteries with BQ27xxx chips
 	  connected over an I2C bus.
 
+config BATTERY_BQ27XXX_DT_UPDATES_NVM
+	bool "BQ27xxx support for update of NVM/flash data memory"
+	depends on BATTERY_BQ27XXX_I2C
+	help
+	  Say Y here to enable devicetree monitored-battery config to update
+	  NVM/flash data memory. Only enable this option for devices with a
+	  fuel gauge mounted on the circuit board, and a battery that cannot
+	  easily be replaced with one of a different type. Not for
+	  general-purpose kernels, as this can cause misconfiguration of a
+	  smart battery with embedded NVM/flash.
+
 config BATTERY_DA9030
 	tristate "DA9030 battery driver"
 	depends on PMIC_DA903X
@@ -408,6 +427,13 @@
           runtime and in suspend-to-RAM by waking up the system periodically
           with help of suspend_again support.
 
+config CHARGER_LTC3651
+	tristate "LTC3651 charger"
+	depends on GPIOLIB
+	help
+	  Say Y to include support for the LTC3651 battery charger which reports
+	  its status via GPIO lines.
+
 config CHARGER_MAX14577
 	tristate "Maxim MAX14577/77836 battery charger driver"
 	depends on MFD_MAX14577
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index a39126d..a41f409 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_BATTERY_ACT8945A)	+= act8945a_charger.o
 obj-$(CONFIG_BATTERY_AXP20X)	+= axp20x_battery.o
 obj-$(CONFIG_CHARGER_AXP20X)	+= axp20x_ac_power.o
+obj-$(CONFIG_BATTERY_CPCAP)	+= cpcap-battery.o
 obj-$(CONFIG_BATTERY_DS2760)	+= ds2760_battery.o
 obj-$(CONFIG_BATTERY_DS2780)	+= ds2780_battery.o
 obj-$(CONFIG_BATTERY_DS2781)	+= ds2781_battery.o
@@ -61,6 +62,7 @@
 obj-$(CONFIG_CHARGER_LP8788)	+= lp8788-charger.o
 obj-$(CONFIG_CHARGER_GPIO)	+= gpio-charger.o
 obj-$(CONFIG_CHARGER_MANAGER)	+= charger-manager.o
+obj-$(CONFIG_CHARGER_LTC3651)	+= ltc3651-charger.o
 obj-$(CONFIG_CHARGER_MAX14577)	+= max14577_charger.o
 obj-$(CONFIG_CHARGER_DETECTOR_MAX14656)	+= max14656_charger_detector.o
 obj-$(CONFIG_CHARGER_MAX77693)	+= max77693_charger.o
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index 5d29b2e..7494f0f 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -60,6 +60,8 @@ struct axp20x_batt_ps {
 	struct iio_channel *batt_chrg_i;
 	struct iio_channel *batt_dischrg_i;
 	struct iio_channel *batt_v;
+	/* Maximum constant charge current */
+	unsigned int max_ccc;
 	u8 axp_id;
 };
 
@@ -129,6 +131,14 @@ static void raw_to_constant_charge_current(struct axp20x_batt_ps *axp, int *val)
 		*val = *val * 150000 + 300000;
 }
 
+static void constant_charge_current_to_raw(struct axp20x_batt_ps *axp, int *val)
+{
+	if (axp->axp_id == AXP209_ID)
+		*val = (*val - 300000) / 100000;
+	else
+		*val = (*val - 300000) / 150000;
+}
+
 static int axp20x_get_constant_charge_current(struct axp20x_batt_ps *axp,
 					      int *val)
 {
@@ -221,9 +231,7 @@ static int axp20x_battery_get_prop(struct power_supply *psy,
 		break;
 
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
-		val->intval = AXP20X_CHRG_CTRL1_TGT_CURR;
-		raw_to_constant_charge_current(axp20x_batt, &val->intval);
-
+		val->intval = axp20x_batt->max_ccc;
 		break;
 
 	case POWER_SUPPLY_PROP_CURRENT_NOW:
@@ -340,10 +348,10 @@ static int axp20x_battery_set_max_voltage(struct axp20x_batt_ps *axp20x_batt,
 static int axp20x_set_constant_charge_current(struct axp20x_batt_ps *axp_batt,
 					      int charge_current)
 {
-	if (axp_batt->axp_id == AXP209_ID)
-		charge_current = (charge_current - 300000) / 100000;
-	else
-		charge_current = (charge_current - 300000) / 150000;
+	if (charge_current > axp_batt->max_ccc)
+		return -EINVAL;
+
+	constant_charge_current_to_raw(axp_batt, &charge_current);
 
 	if (charge_current > AXP20X_CHRG_CTRL1_TGT_CURR || charge_current < 0)
 		return -EINVAL;
@@ -352,6 +360,36 @@ static int axp20x_set_constant_charge_current(struct axp20x_batt_ps *axp_batt,
 				  AXP20X_CHRG_CTRL1_TGT_CURR, charge_current);
 }
 
+static int axp20x_set_max_constant_charge_current(struct axp20x_batt_ps *axp,
+						  int charge_current)
+{
+	bool lower_max = false;
+
+	constant_charge_current_to_raw(axp, &charge_current);
+
+	if (charge_current > AXP20X_CHRG_CTRL1_TGT_CURR || charge_current < 0)
+		return -EINVAL;
+
+	raw_to_constant_charge_current(axp, &charge_current);
+
+	if (charge_current > axp->max_ccc)
+		dev_warn(axp->dev,
+			 "Setting max constant charge current higher than previously defined. Note that increasing the constant charge current may damage your battery.\n");
+	else
+		lower_max = true;
+
+	axp->max_ccc = charge_current;
+
+	if (lower_max) {
+		int current_cc;
+
+		axp20x_get_constant_charge_current(axp, &current_cc);
+		if (current_cc > charge_current)
+			axp20x_set_constant_charge_current(axp, charge_current);
+	}
+
+	return 0;
+}
 static int axp20x_set_voltage_min_design(struct axp20x_batt_ps *axp_batt,
 					 int min_voltage)
 {
@@ -380,6 +418,9 @@ static int axp20x_battery_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
 		return axp20x_set_constant_charge_current(axp20x_batt,
 							  val->intval);
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		return axp20x_set_max_constant_charge_current(axp20x_batt,
+							      val->intval);
 
 	default:
 		return -EINVAL;
@@ -405,7 +446,8 @@ static int axp20x_battery_prop_writeable(struct power_supply *psy,
 {
 	return psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN ||
 	       psp == POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN ||
-	       psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT;
+	       psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT ||
+	       psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
 }
 
 static const struct power_supply_desc axp20x_batt_ps_desc = {
@@ -433,6 +475,7 @@ static int axp20x_power_probe(struct platform_device *pdev)
 {
 	struct axp20x_batt_ps *axp20x_batt;
 	struct power_supply_config psy_cfg = {};
+	struct power_supply_battery_info info;
 
 	if (!of_device_is_available(pdev->dev.of_node))
 		return -ENODEV;
@@ -484,6 +527,35 @@ static int axp20x_power_probe(struct platform_device *pdev)
 		return PTR_ERR(axp20x_batt->batt);
 	}
 
+	if (!power_supply_get_battery_info(axp20x_batt->batt, &info)) {
+		int vmin = info.voltage_min_design_uv;
+		int ccc = info.constant_charge_current_max_ua;
+
+		if (vmin > 0 && axp20x_set_voltage_min_design(axp20x_batt,
+							      vmin))
+			dev_err(&pdev->dev,
+				"couldn't set voltage_min_design\n");
+
+		/* Set max to unverified value to be able to set CCC */
+		axp20x_batt->max_ccc = ccc;
+
+		if (ccc <= 0 || axp20x_set_constant_charge_current(axp20x_batt,
+								   ccc)) {
+			dev_err(&pdev->dev,
+				"couldn't set constant charge current from DT: fallback to minimum value\n");
+			ccc = 300000;
+			axp20x_batt->max_ccc = ccc;
+			axp20x_set_constant_charge_current(axp20x_batt, ccc);
+		}
+	}
+
+	/*
+	 * Update max CCC to a valid value if battery info is present or set it
+	 * to current register value by default.
+	 */
+	axp20x_get_constant_charge_current(axp20x_batt,
+					   &axp20x_batt->max_ccc);
+
 	return 0;
 }
 
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 2397c48..44f70dc 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -339,7 +339,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
 		"VBUS_REMOVAL", "VBUS_VALID", "VBUS_NOT_VALID", NULL };
 	static const char * const axp22x_irq_names[] = {
 		"VBUS_PLUGIN", "VBUS_REMOVAL", NULL };
-	static const char * const *irq_names;
+	const char * const *irq_names;
 	const struct power_supply_desc *usb_power_desc;
 	int i, irq, ret;
 
diff --git a/drivers/power/supply/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c
index eb01453..6931e1d 100644
--- a/drivers/power/supply/bq24735-charger.c
+++ b/drivers/power/supply/bq24735-charger.c
@@ -81,14 +81,12 @@ static int bq24735_charger_property_is_writeable(struct power_supply *psy,
 static inline int bq24735_write_word(struct i2c_client *client, u8 reg,
 				     u16 value)
 {
-	return i2c_smbus_write_word_data(client, reg, le16_to_cpu(value));
+	return i2c_smbus_write_word_data(client, reg, value);
 }
 
 static inline int bq24735_read_word(struct i2c_client *client, u8 reg)
 {
-	s32 ret = i2c_smbus_read_word_data(client, reg);
-
-	return ret < 0 ? ret : le16_to_cpu(ret);
+	return i2c_smbus_read_word_data(client, reg);
 }
 
 static int bq24735_update_word(struct i2c_client *client, u8 reg,
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 398801a..ed44439 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -5,6 +5,7 @@
  * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
  * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
  * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2017 Liam Breck <kernel@networkimprov.net>
  *
  * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
  *
@@ -65,6 +66,7 @@
 #define BQ27XXX_FLAG_DSC	BIT(0)
 #define BQ27XXX_FLAG_SOCF	BIT(1) /* State-of-Charge threshold final */
 #define BQ27XXX_FLAG_SOC1	BIT(2) /* State-of-Charge threshold 1 */
+#define BQ27XXX_FLAG_CFGUP	BIT(4)
 #define BQ27XXX_FLAG_FC		BIT(9)
 #define BQ27XXX_FLAG_OTD	BIT(14)
 #define BQ27XXX_FLAG_OTC	BIT(15)
@@ -78,6 +80,12 @@
 #define BQ27000_FLAG_FC		BIT(5)
 #define BQ27000_FLAG_CHGS	BIT(7) /* Charge state flag */
 
+/* control register params */
+#define BQ27XXX_SEALED			0x20
+#define BQ27XXX_SET_CFGUPDATE		0x13
+#define BQ27XXX_SOFT_RESET		0x42
+#define BQ27XXX_RESET			0x41
+
 #define BQ27XXX_RS			(20) /* Resistor sense mOhm */
 #define BQ27XXX_POWER_CONSTANT		(29200) /* 29.2 µV^2 * 1000 */
 #define BQ27XXX_CURRENT_CONSTANT	(3570) /* 3.57 µV * 1000 */
@@ -108,9 +116,21 @@ enum bq27xxx_reg_index {
 	BQ27XXX_REG_SOC,	/* State-of-Charge */
 	BQ27XXX_REG_DCAP,	/* Design Capacity */
 	BQ27XXX_REG_AP,		/* Average Power */
+	BQ27XXX_DM_CTRL,	/* Block Data Control */
+	BQ27XXX_DM_CLASS,	/* Data Class */
+	BQ27XXX_DM_BLOCK,	/* Data Block */
+	BQ27XXX_DM_DATA,	/* Block Data */
+	BQ27XXX_DM_CKSUM,	/* Block Data Checksum */
 	BQ27XXX_REG_MAX,	/* sentinel */
 };
 
+#define BQ27XXX_DM_REG_ROWS \
+	[BQ27XXX_DM_CTRL] = 0x61,  \
+	[BQ27XXX_DM_CLASS] = 0x3e, \
+	[BQ27XXX_DM_BLOCK] = 0x3f, \
+	[BQ27XXX_DM_DATA] = 0x40,  \
+	[BQ27XXX_DM_CKSUM] = 0x60
+
 /* Register mappings */
 static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 	[BQ27000] = {
@@ -131,6 +151,11 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x0b,
 		[BQ27XXX_REG_DCAP] = 0x76,
 		[BQ27XXX_REG_AP] = 0x24,
+		[BQ27XXX_DM_CTRL] = INVALID_REG_ADDR,
+		[BQ27XXX_DM_CLASS] = INVALID_REG_ADDR,
+		[BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
+		[BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
+		[BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
 	},
 	[BQ27010] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -150,6 +175,11 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x0b,
 		[BQ27XXX_REG_DCAP] = 0x76,
 		[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+		[BQ27XXX_DM_CTRL] = INVALID_REG_ADDR,
+		[BQ27XXX_DM_CLASS] = INVALID_REG_ADDR,
+		[BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
+		[BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
+		[BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
 	},
 	[BQ2750X] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -169,6 +199,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ2751X] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -188,6 +219,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x20,
 		[BQ27XXX_REG_DCAP] = 0x2e,
 		[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27500] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -207,6 +239,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = 0x24,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27510G1] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -226,6 +259,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = 0x24,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27510G2] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -245,6 +279,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = 0x24,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27510G3] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -264,6 +299,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x20,
 		[BQ27XXX_REG_DCAP] = 0x2e,
 		[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27520G1] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -283,6 +319,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = 0x24,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27520G2] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -302,6 +339,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = 0x24,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27520G3] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -321,6 +359,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = 0x24,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27520G4] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -340,6 +379,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x20,
 		[BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
 		[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27530] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -359,6 +399,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
 		[BQ27XXX_REG_AP] = 0x24,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27541] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -378,6 +419,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = 0x24,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27545] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -397,6 +439,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x2c,
 		[BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
 		[BQ27XXX_REG_AP] = 0x24,
+		BQ27XXX_DM_REG_ROWS,
 	},
 	[BQ27421] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
@@ -416,6 +459,7 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_SOC] = 0x1c,
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = 0x18,
+		BQ27XXX_DM_REG_ROWS,
 	},
 };
 
@@ -757,6 +801,73 @@ static struct {
 static DEFINE_MUTEX(bq27xxx_list_lock);
 static LIST_HEAD(bq27xxx_battery_devices);
 
+#define BQ27XXX_MSLEEP(i) usleep_range((i)*1000, (i)*1000+500)
+
+#define BQ27XXX_DM_SZ	32
+
+struct bq27xxx_dm_reg {
+	u8 subclass_id;
+	u8 offset;
+	u8 bytes;
+	u16 min, max;
+};
+
+/**
+ * struct bq27xxx_dm_buf - chip data memory buffer
+ * @class: data memory subclass_id
+ * @block: data memory block number
+ * @data: data from/for the block
+ * @has_data: true if data has been filled by read
+ * @dirty: true if data has changed since last read/write
+ *
+ * Encapsulates info required to manage chip data memory blocks.
+ */
+struct bq27xxx_dm_buf {
+	u8 class;
+	u8 block;
+	u8 data[BQ27XXX_DM_SZ];
+	bool has_data, dirty;
+};
+
+#define BQ27XXX_DM_BUF(di, i) { \
+	.class = (di)->dm_regs[i].subclass_id, \
+	.block = (di)->dm_regs[i].offset / BQ27XXX_DM_SZ, \
+}
+
+static inline u16 *bq27xxx_dm_reg_ptr(struct bq27xxx_dm_buf *buf,
+				      struct bq27xxx_dm_reg *reg)
+{
+	if (buf->class == reg->subclass_id &&
+	    buf->block == reg->offset / BQ27XXX_DM_SZ)
+		return (u16 *) (buf->data + reg->offset % BQ27XXX_DM_SZ);
+
+	return NULL;
+}
+
+enum bq27xxx_dm_reg_id {
+	BQ27XXX_DM_DESIGN_CAPACITY = 0,
+	BQ27XXX_DM_DESIGN_ENERGY,
+	BQ27XXX_DM_TERMINATE_VOLTAGE,
+};
+
+static const char * const bq27xxx_dm_reg_name[] = {
+	[BQ27XXX_DM_DESIGN_CAPACITY] = "design-capacity",
+	[BQ27XXX_DM_DESIGN_ENERGY] = "design-energy",
+	[BQ27XXX_DM_TERMINATE_VOLTAGE] = "terminate-voltage",
+};
+
+
+static bool bq27xxx_dt_to_nvm = true;
+module_param_named(dt_monitored_battery_updates_nvm, bq27xxx_dt_to_nvm, bool, 0444);
+MODULE_PARM_DESC(dt_monitored_battery_updates_nvm,
+	"Devicetree monitored-battery config updates data memory on NVM/flash chips.\n"
+	"Users must set this =0 when installing a different type of battery!\n"
+	"Default is =1."
+#ifndef CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM
+	"\nSetting this affects future kernel updates, not the current configuration."
+#endif
+);
+
 static int poll_interval_param_set(const char *val, const struct kernel_param *kp)
 {
 	struct bq27xxx_device_info *di;
@@ -794,11 +905,419 @@ MODULE_PARM_DESC(poll_interval,
 static inline int bq27xxx_read(struct bq27xxx_device_info *di, int reg_index,
 			       bool single)
 {
-	/* Reports EINVAL for invalid/missing registers */
+	int ret;
+
 	if (!di || di->regs[reg_index] == INVALID_REG_ADDR)
 		return -EINVAL;
 
-	return di->bus.read(di, di->regs[reg_index], single);
+	ret = di->bus.read(di, di->regs[reg_index], single);
+	if (ret < 0)
+		dev_dbg(di->dev, "failed to read register 0x%02x (index %d)\n",
+			di->regs[reg_index], reg_index);
+
+	return ret;
+}
+
+static inline int bq27xxx_write(struct bq27xxx_device_info *di, int reg_index,
+				u16 value, bool single)
+{
+	int ret;
+
+	if (!di || di->regs[reg_index] == INVALID_REG_ADDR)
+		return -EINVAL;
+
+	if (!di->bus.write)
+		return -EPERM;
+
+	ret = di->bus.write(di, di->regs[reg_index], value, single);
+	if (ret < 0)
+		dev_dbg(di->dev, "failed to write register 0x%02x (index %d)\n",
+			di->regs[reg_index], reg_index);
+
+	return ret;
+}
+
+static inline int bq27xxx_read_block(struct bq27xxx_device_info *di, int reg_index,
+				     u8 *data, int len)
+{
+	int ret;
+
+	if (!di || di->regs[reg_index] == INVALID_REG_ADDR)
+		return -EINVAL;
+
+	if (!di->bus.read_bulk)
+		return -EPERM;
+
+	ret = di->bus.read_bulk(di, di->regs[reg_index], data, len);
+	if (ret < 0)
+		dev_dbg(di->dev, "failed to read_bulk register 0x%02x (index %d)\n",
+			di->regs[reg_index], reg_index);
+
+	return ret;
+}
+
+static inline int bq27xxx_write_block(struct bq27xxx_device_info *di, int reg_index,
+				      u8 *data, int len)
+{
+	int ret;
+
+	if (!di || di->regs[reg_index] == INVALID_REG_ADDR)
+		return -EINVAL;
+
+	if (!di->bus.write_bulk)
+		return -EPERM;
+
+	ret = di->bus.write_bulk(di, di->regs[reg_index], data, len);
+	if (ret < 0)
+		dev_dbg(di->dev, "failed to write_bulk register 0x%02x (index %d)\n",
+			di->regs[reg_index], reg_index);
+
+	return ret;
+}
+
+static int bq27xxx_battery_seal(struct bq27xxx_device_info *di)
+{
+	int ret;
+
+	ret = bq27xxx_write(di, BQ27XXX_REG_CTRL, BQ27XXX_SEALED, false);
+	if (ret < 0) {
+		dev_err(di->dev, "bus error on seal: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int bq27xxx_battery_unseal(struct bq27xxx_device_info *di)
+{
+	int ret;
+
+	if (di->unseal_key == 0) {
+		dev_err(di->dev, "unseal failed due to missing key\n");
+		return -EINVAL;
+	}
+
+	ret = bq27xxx_write(di, BQ27XXX_REG_CTRL, (u16)(di->unseal_key >> 16), false);
+	if (ret < 0)
+		goto out;
+
+	ret = bq27xxx_write(di, BQ27XXX_REG_CTRL, (u16)di->unseal_key, false);
+	if (ret < 0)
+		goto out;
+
+	return 0;
+
+out:
+	dev_err(di->dev, "bus error on unseal: %d\n", ret);
+	return ret;
+}
+
+static u8 bq27xxx_battery_checksum_dm_block(struct bq27xxx_dm_buf *buf)
+{
+	u16 sum = 0;
+	int i;
+
+	for (i = 0; i < BQ27XXX_DM_SZ; i++)
+		sum += buf->data[i];
+	sum &= 0xff;
+
+	return 0xff - sum;
+}
+
+static int bq27xxx_battery_read_dm_block(struct bq27xxx_device_info *di,
+					 struct bq27xxx_dm_buf *buf)
+{
+	int ret;
+
+	buf->has_data = false;
+
+	ret = bq27xxx_write(di, BQ27XXX_DM_CLASS, buf->class, true);
+	if (ret < 0)
+		goto out;
+
+	ret = bq27xxx_write(di, BQ27XXX_DM_BLOCK, buf->block, true);
+	if (ret < 0)
+		goto out;
+
+	BQ27XXX_MSLEEP(1);
+
+	ret = bq27xxx_read_block(di, BQ27XXX_DM_DATA, buf->data, BQ27XXX_DM_SZ);
+	if (ret < 0)
+		goto out;
+
+	ret = bq27xxx_read(di, BQ27XXX_DM_CKSUM, true);
+	if (ret < 0)
+		goto out;
+
+	if ((u8)ret != bq27xxx_battery_checksum_dm_block(buf)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	buf->has_data = true;
+	buf->dirty = false;
+
+	return 0;
+
+out:
+	dev_err(di->dev, "bus error reading chip memory: %d\n", ret);
+	return ret;
+}
+
+static void bq27xxx_battery_update_dm_block(struct bq27xxx_device_info *di,
+					    struct bq27xxx_dm_buf *buf,
+					    enum bq27xxx_dm_reg_id reg_id,
+					    unsigned int val)
+{
+	struct bq27xxx_dm_reg *reg = &di->dm_regs[reg_id];
+	const char *str = bq27xxx_dm_reg_name[reg_id];
+	u16 *prev = bq27xxx_dm_reg_ptr(buf, reg);
+
+	if (prev == NULL) {
+		dev_warn(di->dev, "buffer does not match %s dm spec\n", str);
+		return;
+	}
+
+	if (reg->bytes != 2) {
+		dev_warn(di->dev, "%s dm spec has unsupported byte size\n", str);
+		return;
+	}
+
+	if (!buf->has_data)
+		return;
+
+	if (be16_to_cpup(prev) == val) {
+		dev_info(di->dev, "%s has %u\n", str, val);
+		return;
+	}
+
+#ifdef CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM
+	if (!di->ram_chip && !bq27xxx_dt_to_nvm) {
+#else
+	if (!di->ram_chip) {
+#endif
+		/* devicetree and NVM differ; defer to NVM */
+		dev_warn(di->dev, "%s has %u; update to %u disallowed "
+#ifdef CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM
+			 "by dt_monitored_battery_updates_nvm=0"
+#else
+			 "for flash/NVM data memory"
+#endif
+			 "\n", str, be16_to_cpup(prev), val);
+		return;
+	}
+
+	dev_info(di->dev, "update %s to %u\n", str, val);
+
+	*prev = cpu_to_be16(val);
+	buf->dirty = true;
+}
+
+static int bq27xxx_battery_cfgupdate_priv(struct bq27xxx_device_info *di, bool active)
+{
+	const int limit = 100;
+	u16 cmd = active ? BQ27XXX_SET_CFGUPDATE : BQ27XXX_SOFT_RESET;
+	int ret, try = limit;
+
+	ret = bq27xxx_write(di, BQ27XXX_REG_CTRL, cmd, false);
+	if (ret < 0)
+		return ret;
+
+	do {
+		BQ27XXX_MSLEEP(25);
+		ret = bq27xxx_read(di, BQ27XXX_REG_FLAGS, false);
+		if (ret < 0)
+			return ret;
+	} while (!!(ret & BQ27XXX_FLAG_CFGUP) != active && --try);
+
+	if (!try) {
+		dev_err(di->dev, "timed out waiting for cfgupdate flag %d\n", active);
+		return -EINVAL;
+	}
+
+	if (limit - try > 3)
+		dev_warn(di->dev, "cfgupdate %d, retries %d\n", active, limit - try);
+
+	return 0;
+}
+
+static inline int bq27xxx_battery_set_cfgupdate(struct bq27xxx_device_info *di)
+{
+	int ret = bq27xxx_battery_cfgupdate_priv(di, true);
+	if (ret < 0 && ret != -EINVAL)
+		dev_err(di->dev, "bus error on set_cfgupdate: %d\n", ret);
+
+	return ret;
+}
+
+static inline int bq27xxx_battery_soft_reset(struct bq27xxx_device_info *di)
+{
+	int ret = bq27xxx_battery_cfgupdate_priv(di, false);
+	if (ret < 0 && ret != -EINVAL)
+		dev_err(di->dev, "bus error on soft_reset: %d\n", ret);
+
+	return ret;
+}
+
+static int bq27xxx_battery_write_dm_block(struct bq27xxx_device_info *di,
+					  struct bq27xxx_dm_buf *buf)
+{
+	bool cfgup = di->chip == BQ27421; /* assume related chips need cfgupdate */
+	int ret;
+
+	if (!buf->dirty)
+		return 0;
+
+	if (cfgup) {
+		ret = bq27xxx_battery_set_cfgupdate(di);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = bq27xxx_write(di, BQ27XXX_DM_CTRL, 0, true);
+	if (ret < 0)
+		goto out;
+
+	ret = bq27xxx_write(di, BQ27XXX_DM_CLASS, buf->class, true);
+	if (ret < 0)
+		goto out;
+
+	ret = bq27xxx_write(di, BQ27XXX_DM_BLOCK, buf->block, true);
+	if (ret < 0)
+		goto out;
+
+	BQ27XXX_MSLEEP(1);
+
+	ret = bq27xxx_write_block(di, BQ27XXX_DM_DATA, buf->data, BQ27XXX_DM_SZ);
+	if (ret < 0)
+		goto out;
+
+	ret = bq27xxx_write(di, BQ27XXX_DM_CKSUM,
+			    bq27xxx_battery_checksum_dm_block(buf), true);
+	if (ret < 0)
+		goto out;
+
+	/* DO NOT read BQ27XXX_DM_CKSUM here to verify it! That may cause NVM
+	 * corruption on the '425 chip (and perhaps others), which can damage
+	 * the chip.
+	 */
+
+	if (cfgup) {
+		BQ27XXX_MSLEEP(1);
+		ret = bq27xxx_battery_soft_reset(di);
+		if (ret < 0)
+			return ret;
+	} else {
+		BQ27XXX_MSLEEP(100); /* flash DM updates in <100ms */
+	}
+
+	buf->dirty = false;
+
+	return 0;
+
+out:
+	if (cfgup)
+		bq27xxx_battery_soft_reset(di);
+
+	dev_err(di->dev, "bus error writing chip memory: %d\n", ret);
+	return ret;
+}
+
+static void bq27xxx_battery_set_config(struct bq27xxx_device_info *di,
+				       struct power_supply_battery_info *info)
+{
+	struct bq27xxx_dm_buf bd = BQ27XXX_DM_BUF(di, BQ27XXX_DM_DESIGN_CAPACITY);
+	struct bq27xxx_dm_buf bt = BQ27XXX_DM_BUF(di, BQ27XXX_DM_TERMINATE_VOLTAGE);
+	bool updated;
+
+	if (bq27xxx_battery_unseal(di) < 0)
+		return;
+
+	if (info->charge_full_design_uah != -EINVAL &&
+	    info->energy_full_design_uwh != -EINVAL) {
+		bq27xxx_battery_read_dm_block(di, &bd);
+		/* assume design energy & capacity are in same block */
+		bq27xxx_battery_update_dm_block(di, &bd,
+					BQ27XXX_DM_DESIGN_CAPACITY,
+					info->charge_full_design_uah / 1000);
+		bq27xxx_battery_update_dm_block(di, &bd,
+					BQ27XXX_DM_DESIGN_ENERGY,
+					info->energy_full_design_uwh / 1000);
+	}
+
+	if (info->voltage_min_design_uv != -EINVAL) {
+		bool same = bd.class == bt.class && bd.block == bt.block;
+		if (!same)
+			bq27xxx_battery_read_dm_block(di, &bt);
+		bq27xxx_battery_update_dm_block(di, same ? &bd : &bt,
+					BQ27XXX_DM_TERMINATE_VOLTAGE,
+					info->voltage_min_design_uv / 1000);
+	}
+
+	updated = bd.dirty || bt.dirty;
+
+	bq27xxx_battery_write_dm_block(di, &bd);
+	bq27xxx_battery_write_dm_block(di, &bt);
+
+	bq27xxx_battery_seal(di);
+
+	if (updated && di->chip != BQ27421) { /* not a cfgupdate chip, so reset */
+		bq27xxx_write(di, BQ27XXX_REG_CTRL, BQ27XXX_RESET, false);
+		BQ27XXX_MSLEEP(300); /* reset time is not documented */
+	}
+	/* assume bq27xxx_battery_update() is called hereafter */
+}
+
+static void bq27xxx_battery_settings(struct bq27xxx_device_info *di)
+{
+	struct power_supply_battery_info info = {};
+	unsigned int min, max;
+
+	if (power_supply_get_battery_info(di->bat, &info) < 0)
+		return;
+
+	if (!di->dm_regs) {
+		dev_warn(di->dev, "data memory update not supported for chip\n");
+		return;
+	}
+
+	if (info.energy_full_design_uwh != info.charge_full_design_uah) {
+		if (info.energy_full_design_uwh == -EINVAL)
+			dev_warn(di->dev, "missing battery:energy-full-design-microwatt-hours\n");
+		else if (info.charge_full_design_uah == -EINVAL)
+			dev_warn(di->dev, "missing battery:charge-full-design-microamp-hours\n");
+	}
+
+	/* assume min == 0 */
+	max = di->dm_regs[BQ27XXX_DM_DESIGN_ENERGY].max;
+	if (info.energy_full_design_uwh > max * 1000) {
+		dev_err(di->dev, "invalid battery:energy-full-design-microwatt-hours %d\n",
+			info.energy_full_design_uwh);
+		info.energy_full_design_uwh = -EINVAL;
+	}
+
+	/* assume min == 0 */
+	max = di->dm_regs[BQ27XXX_DM_DESIGN_CAPACITY].max;
+	if (info.charge_full_design_uah > max * 1000) {
+		dev_err(di->dev, "invalid battery:charge-full-design-microamp-hours %d\n",
+			info.charge_full_design_uah);
+		info.charge_full_design_uah = -EINVAL;
+	}
+
+	min = di->dm_regs[BQ27XXX_DM_TERMINATE_VOLTAGE].min;
+	max = di->dm_regs[BQ27XXX_DM_TERMINATE_VOLTAGE].max;
+	if ((info.voltage_min_design_uv < min * 1000 ||
+	     info.voltage_min_design_uv > max * 1000) &&
+	     info.voltage_min_design_uv != -EINVAL) {
+		dev_err(di->dev, "invalid battery:voltage-min-design-microvolt %d\n",
+			info.voltage_min_design_uv);
+		info.voltage_min_design_uv = -EINVAL;
+	}
+
+	if ((info.energy_full_design_uwh != -EINVAL &&
+	     info.charge_full_design_uah != -EINVAL) ||
+	     info.voltage_min_design_uv  != -EINVAL)
+		bq27xxx_battery_set_config(di, &info);
 }
 
 /*
@@ -1318,6 +1837,13 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
 		ret = bq27xxx_simple_value(di->charge_design_full, val);
 		break;
+	/*
+	 * TODO: Implement these to make registers set from
+	 * power_supply_battery_info visible in sysfs.
+	 */
+	case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+		return -EINVAL;
 	case POWER_SUPPLY_PROP_CYCLE_COUNT:
 		ret = bq27xxx_simple_value(di->cache.cycle_count, val);
 		break;
@@ -1351,7 +1877,10 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
 int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
 {
 	struct power_supply_desc *psy_desc;
-	struct power_supply_config psy_cfg = { .drv_data = di, };
+	struct power_supply_config psy_cfg = {
+		.of_node = di->dev->of_node,
+		.drv_data = di,
+	};
 
 	INIT_DELAYED_WORK(&di->work, bq27xxx_battery_poll);
 	mutex_init(&di->lock);
@@ -1376,6 +1905,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
 
 	dev_info(di->dev, "support ver. %s enabled\n", DRIVER_VERSION);
 
+	bq27xxx_battery_settings(di);
 	bq27xxx_battery_update(di);
 
 	mutex_lock(&bq27xxx_list_lock);
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index c68fbc3..a597221 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -38,7 +38,7 @@ static int bq27xxx_battery_i2c_read(struct bq27xxx_device_info *di, u8 reg,
 {
 	struct i2c_client *client = to_i2c_client(di->dev);
 	struct i2c_msg msg[2];
-	unsigned char data[2];
+	u8 data[2];
 	int ret;
 
 	if (!client->adapter)
@@ -68,6 +68,82 @@ static int bq27xxx_battery_i2c_read(struct bq27xxx_device_info *di, u8 reg,
 	return ret;
 }
 
+static int bq27xxx_battery_i2c_write(struct bq27xxx_device_info *di, u8 reg,
+				     int value, bool single)
+{
+	struct i2c_client *client = to_i2c_client(di->dev);
+	struct i2c_msg msg;
+	u8 data[4];
+	int ret;
+
+	if (!client->adapter)
+		return -ENODEV;
+
+	data[0] = reg;
+	if (single) {
+		data[1] = (u8) value;
+		msg.len = 2;
+	} else {
+		put_unaligned_le16(value, &data[1]);
+		msg.len = 3;
+	}
+
+	msg.buf = data;
+	msg.addr = client->addr;
+	msg.flags = 0;
+
+	ret = i2c_transfer(client->adapter, &msg, 1);
+	if (ret < 0)
+		return ret;
+	if (ret != 1)
+		return -EINVAL;
+	return 0;
+}
+
+static int bq27xxx_battery_i2c_bulk_read(struct bq27xxx_device_info *di, u8 reg,
+					 u8 *data, int len)
+{
+	struct i2c_client *client = to_i2c_client(di->dev);
+	int ret;
+
+	if (!client->adapter)
+		return -ENODEV;
+
+	ret = i2c_smbus_read_i2c_block_data(client, reg, len, data);
+	if (ret < 0)
+		return ret;
+	if (ret != len)
+		return -EINVAL;
+	return 0;
+}
+
+static int bq27xxx_battery_i2c_bulk_write(struct bq27xxx_device_info *di,
+					  u8 reg, u8 *data, int len)
+{
+	struct i2c_client *client = to_i2c_client(di->dev);
+	struct i2c_msg msg;
+	u8 buf[33];
+	int ret;
+
+	if (!client->adapter)
+		return -ENODEV;
+
+	buf[0] = reg;
+	memcpy(&buf[1], data, len);
+
+	msg.buf = buf;
+	msg.addr = client->addr;
+	msg.flags = 0;
+	msg.len = len + 1;
+
+	ret = i2c_transfer(client->adapter, &msg, 1);
+	if (ret < 0)
+		return ret;
+	if (ret != 1)
+		return -EINVAL;
+	return 0;
+}
+
 static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
 				     const struct i2c_device_id *id)
 {
@@ -95,7 +171,11 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
 	di->dev = &client->dev;
 	di->chip = id->driver_data;
 	di->name = name;
+
 	di->bus.read = bq27xxx_battery_i2c_read;
+	di->bus.write = bq27xxx_battery_i2c_write;
+	di->bus.read_bulk = bq27xxx_battery_i2c_bulk_read;
+	di->bus.write_bulk = bq27xxx_battery_i2c_bulk_write;
 
 	ret = bq27xxx_battery_setup(di);
 	if (ret)
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
new file mode 100644
index 0000000..ee71a2b
--- /dev/null
+++ b/drivers/power/supply/cpcap-battery.c
@@ -0,0 +1,808 @@
+/*
+ * Battery driver for CPCAP PMIC
+ *
+ * Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
+ *
+ * Some parts of the code based on earlie Motorola mapphone Linux kernel
+ * drivers:
+ *
+ * Copyright (C) 2009-2010 Motorola, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/consumer.h>
+#include <linux/iio/types.h>
+#include <linux/mfd/motorola-cpcap.h>
+
+#include <asm/div64.h>
+
+/*
+ * Register bit defines for CPCAP_REG_BPEOL. Some of these seem to
+ * map to MC13783UG.pdf "Table 5-19. Register 13, Power Control 0"
+ * to enable BATTDETEN, LOBAT and EOL features. We currently use
+ * LOBAT interrupts instead of EOL.
+ */
+#define CPCAP_REG_BPEOL_BIT_EOL9	BIT(9)	/* Set for EOL irq */
+#define CPCAP_REG_BPEOL_BIT_EOL8	BIT(8)	/* Set for EOL irq */
+#define CPCAP_REG_BPEOL_BIT_UNKNOWN7	BIT(7)
+#define CPCAP_REG_BPEOL_BIT_UNKNOWN6	BIT(6)
+#define CPCAP_REG_BPEOL_BIT_UNKNOWN5	BIT(5)
+#define CPCAP_REG_BPEOL_BIT_EOL_MULTI	BIT(4)	/* Set for multiple EOL irqs */
+#define CPCAP_REG_BPEOL_BIT_UNKNOWN3	BIT(3)
+#define CPCAP_REG_BPEOL_BIT_UNKNOWN2	BIT(2)
+#define CPCAP_REG_BPEOL_BIT_BATTDETEN	BIT(1)	/* Enable battery detect */
+#define CPCAP_REG_BPEOL_BIT_EOLSEL	BIT(0)	/* BPDET = 0, EOL = 1 */
+
+#define CPCAP_BATTERY_CC_SAMPLE_PERIOD_MS	250
+
+enum {
+	CPCAP_BATTERY_IIO_BATTDET,
+	CPCAP_BATTERY_IIO_VOLTAGE,
+	CPCAP_BATTERY_IIO_CHRG_CURRENT,
+	CPCAP_BATTERY_IIO_BATT_CURRENT,
+	CPCAP_BATTERY_IIO_NR,
+};
+
+enum cpcap_battery_irq_action {
+	CPCAP_BATTERY_IRQ_ACTION_NONE,
+	CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW,
+	CPCAP_BATTERY_IRQ_ACTION_POWEROFF,
+};
+
+struct cpcap_interrupt_desc {
+	const char *name;
+	struct list_head node;
+	int irq;
+	enum cpcap_battery_irq_action action;
+};
+
+struct cpcap_battery_config {
+	int ccm;
+	int cd_factor;
+	struct power_supply_info info;
+};
+
+struct cpcap_coulomb_counter_data {
+	s32 sample;		/* 24-bits */
+	s32 accumulator;
+	s16 offset;		/* 10-bits */
+};
+
+enum cpcap_battery_state {
+	CPCAP_BATTERY_STATE_PREVIOUS,
+	CPCAP_BATTERY_STATE_LATEST,
+	CPCAP_BATTERY_STATE_NR,
+};
+
+struct cpcap_battery_state_data {
+	int voltage;
+	int current_ua;
+	int counter_uah;
+	int temperature;
+	ktime_t time;
+	struct cpcap_coulomb_counter_data cc;
+};
+
+struct cpcap_battery_ddata {
+	struct device *dev;
+	struct regmap *reg;
+	struct list_head irq_list;
+	struct iio_channel *channels[CPCAP_BATTERY_IIO_NR];
+	struct power_supply *psy;
+	struct cpcap_battery_config config;
+	struct cpcap_battery_state_data state[CPCAP_BATTERY_STATE_NR];
+	atomic_t active;
+	int status;
+	u16 vendor;
+};
+
+#define CPCAP_NO_BATTERY	-400
+
+static struct cpcap_battery_state_data *
+cpcap_battery_get_state(struct cpcap_battery_ddata *ddata,
+			enum cpcap_battery_state state)
+{
+	if (state >= CPCAP_BATTERY_STATE_NR)
+		return NULL;
+
+	return &ddata->state[state];
+}
+
+static struct cpcap_battery_state_data *
+cpcap_battery_latest(struct cpcap_battery_ddata *ddata)
+{
+	return cpcap_battery_get_state(ddata, CPCAP_BATTERY_STATE_LATEST);
+}
+
+static struct cpcap_battery_state_data *
+cpcap_battery_previous(struct cpcap_battery_ddata *ddata)
+{
+	return cpcap_battery_get_state(ddata, CPCAP_BATTERY_STATE_PREVIOUS);
+}
+
+static int cpcap_charger_battery_temperature(struct cpcap_battery_ddata *ddata,
+					     int *value)
+{
+	struct iio_channel *channel;
+	int error;
+
+	channel = ddata->channels[CPCAP_BATTERY_IIO_BATTDET];
+	error = iio_read_channel_processed(channel, value);
+	if (error < 0) {
+		dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
+		*value = CPCAP_NO_BATTERY;
+
+		return error;
+	}
+
+	*value /= 100;
+
+	return 0;
+}
+
+static int cpcap_battery_get_voltage(struct cpcap_battery_ddata *ddata)
+{
+	struct iio_channel *channel;
+	int error, value = 0;
+
+	channel = ddata->channels[CPCAP_BATTERY_IIO_VOLTAGE];
+	error = iio_read_channel_processed(channel, &value);
+	if (error < 0) {
+		dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
+
+		return 0;
+	}
+
+	return value * 1000;
+}
+
+static int cpcap_battery_get_current(struct cpcap_battery_ddata *ddata)
+{
+	struct iio_channel *channel;
+	int error, value = 0;
+
+	channel = ddata->channels[CPCAP_BATTERY_IIO_BATT_CURRENT];
+	error = iio_read_channel_processed(channel, &value);
+	if (error < 0) {
+		dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
+
+		return 0;
+	}
+
+	return value * 1000;
+}
+
+/**
+ * cpcap_battery_cc_raw_div - calculate and divide coulomb counter μAms values
+ * @ddata: device driver data
+ * @sample: coulomb counter sample value
+ * @accumulator: coulomb counter integrator value
+ * @offset: coulomb counter offset value
+ * @divider: conversion divider
+ *
+ * Note that cc_lsb and cc_dur values are from Motorola Linux kernel
+ * function data_get_avg_curr_ua() and seem to be based on measured test
+ * results. It also has the following comment:
+ *
+ * Adjustment factors are applied here as a temp solution per the test
+ * results. Need to work out a formal solution for this adjustment.
+ *
+ * A coulomb counter for similar hardware seems to be documented in
+ * "TWL6030 Gas Gauging Basics (Rev. A)" swca095a.pdf in chapter
+ * "10 Calculating Accumulated Current". We however follow what the
+ * Motorola mapphone Linux kernel is doing as there may be either a
+ * TI or ST coulomb counter in the PMIC.
+ */
+static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
+				    u32 sample, s32 accumulator,
+				    s16 offset, u32 divider)
+{
+	s64 acc;
+	u64 tmp;
+	int avg_current;
+	u32 cc_lsb;
+
+	sample &= 0xffffff;		/* 24-bits, unsigned */
+	offset &= 0x7ff;		/* 10-bits, signed */
+
+	switch (ddata->vendor) {
+	case CPCAP_VENDOR_ST:
+		cc_lsb = 95374;		/* μAms per LSB */
+		break;
+	case CPCAP_VENDOR_TI:
+		cc_lsb = 91501;		/* μAms per LSB */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	acc = accumulator;
+	acc = acc - ((s64)sample * offset);
+	cc_lsb = (cc_lsb * ddata->config.cd_factor) / 1000;
+
+	if (acc >=  0)
+		tmp = acc;
+	else
+		tmp = acc * -1;
+
+	tmp = tmp * cc_lsb;
+	do_div(tmp, divider);
+	avg_current = tmp;
+
+	if (acc >= 0)
+		return -avg_current;
+	else
+		return avg_current;
+}
+
+/* 3600000μAms = 1μAh */
+static int cpcap_battery_cc_to_uah(struct cpcap_battery_ddata *ddata,
+				   u32 sample, s32 accumulator,
+				   s16 offset)
+{
+	return cpcap_battery_cc_raw_div(ddata, sample,
+					accumulator, offset,
+					3600000);
+}
+
+static int cpcap_battery_cc_to_ua(struct cpcap_battery_ddata *ddata,
+				  u32 sample, s32 accumulator,
+				  s16 offset)
+{
+	return cpcap_battery_cc_raw_div(ddata, sample,
+					accumulator, offset,
+					sample *
+					CPCAP_BATTERY_CC_SAMPLE_PERIOD_MS);
+}
+
+/**
+ * cpcap_battery_read_accumulated - reads cpcap coulomb counter
+ * @ddata: device driver data
+ * @regs: coulomb counter values
+ *
+ * Based on Motorola mapphone kernel function data_read_regs().
+ * Looking at the registers, the coulomb counter seems similar to
+ * the coulomb counter in TWL6030. See "TWL6030 Gas Gauging Basics
+ * (Rev. A) swca095a.pdf for "10 Calculating Accumulated Current".
+ *
+ * Note that swca095a.pdf instructs to stop the coulomb counter
+ * before reading to avoid values changing. Motorola mapphone
+ * Linux kernel does not do it, so let's assume they've verified
+ * the data produced is correct.
+ */
+static int
+cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
+			       struct cpcap_coulomb_counter_data *ccd)
+{
+	u16 buf[7];	/* CPCAP_REG_CC1 to CCI */
+	int error;
+
+	ccd->sample = 0;
+	ccd->accumulator = 0;
+	ccd->offset = 0;
+
+	/* Read coulomb counter register range */
+	error = regmap_bulk_read(ddata->reg, CPCAP_REG_CCS1,
+				 buf, ARRAY_SIZE(buf));
+	if (error)
+		return 0;
+
+	/* Sample value CPCAP_REG_CCS1 & 2 */
+	ccd->sample = (buf[1] & 0x0fff) << 16;
+	ccd->sample |= buf[0];
+
+	/* Accumulator value CPCAP_REG_CCA1 & 2 */
+	ccd->accumulator = ((s16)buf[3]) << 16;
+	ccd->accumulator |= buf[2];
+
+	/* Offset value CPCAP_REG_CCO */
+	ccd->offset = buf[5];
+
+	/* Adjust offset based on mode value CPCAP_REG_CCM? */
+	if (buf[4] >= 0x200)
+		ccd->offset |= 0xfc00;
+
+	return cpcap_battery_cc_to_uah(ddata,
+				       ccd->sample,
+				       ccd->accumulator,
+				       ccd->offset);
+}
+
+/**
+ * cpcap_battery_cc_get_avg_current - read cpcap coulumb counter
+ * @ddata: cpcap battery driver device data
+ */
+static int cpcap_battery_cc_get_avg_current(struct cpcap_battery_ddata *ddata)
+{
+	int value, acc, error;
+	s32 sample = 1;
+	s16 offset;
+
+	if (ddata->vendor == CPCAP_VENDOR_ST)
+		sample = 4;
+
+	/* Coulomb counter integrator */
+	error = regmap_read(ddata->reg, CPCAP_REG_CCI, &value);
+	if (error)
+		return error;
+
+	if ((ddata->vendor == CPCAP_VENDOR_TI) && (value > 0x2000))
+		value = value | 0xc000;
+
+	acc = (s16)value;
+
+	/* Coulomb counter sample time */
+	error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
+	if (error)
+		return error;
+
+	if (value < 0x200)
+		offset = value;
+	else
+		offset = value | 0xfc00;
+
+	return cpcap_battery_cc_to_ua(ddata, sample, acc, offset);
+}
+
+static bool cpcap_battery_full(struct cpcap_battery_ddata *ddata)
+{
+	struct cpcap_battery_state_data *state = cpcap_battery_latest(ddata);
+
+	/* Basically anything that measures above 4347000 is full */
+	if (state->voltage >= (ddata->config.info.voltage_max_design - 4000))
+		return true;
+
+	return false;
+}
+
+static int cpcap_battery_update_status(struct cpcap_battery_ddata *ddata)
+{
+	struct cpcap_battery_state_data state, *latest, *previous;
+	ktime_t now;
+	int error;
+
+	memset(&state, 0, sizeof(state));
+	now = ktime_get();
+
+	latest = cpcap_battery_latest(ddata);
+	if (latest) {
+		s64 delta_ms = ktime_to_ms(ktime_sub(now, latest->time));
+
+		if (delta_ms < CPCAP_BATTERY_CC_SAMPLE_PERIOD_MS)
+			return delta_ms;
+	}
+
+	state.time = now;
+	state.voltage = cpcap_battery_get_voltage(ddata);
+	state.current_ua = cpcap_battery_get_current(ddata);
+	state.counter_uah = cpcap_battery_read_accumulated(ddata, &state.cc);
+
+	error = cpcap_charger_battery_temperature(ddata,
+						  &state.temperature);
+	if (error)
+		return error;
+
+	previous = cpcap_battery_previous(ddata);
+	memcpy(previous, latest, sizeof(*previous));
+	memcpy(latest, &state, sizeof(*latest));
+
+	return 0;
+}
+
+static enum power_supply_property cpcap_battery_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_POWER_NOW,
+	POWER_SUPPLY_PROP_POWER_AVG,
+	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_SCOPE,
+	POWER_SUPPLY_PROP_TEMP,
+};
+
+static int cpcap_battery_get_property(struct power_supply *psy,
+				      enum power_supply_property psp,
+				      union power_supply_propval *val)
+{
+	struct cpcap_battery_ddata *ddata = power_supply_get_drvdata(psy);
+	struct cpcap_battery_state_data *latest, *previous;
+	u32 sample;
+	s32 accumulator;
+	int cached;
+	s64 tmp;
+
+	cached = cpcap_battery_update_status(ddata);
+	if (cached < 0)
+		return cached;
+
+	latest = cpcap_battery_latest(ddata);
+	previous = cpcap_battery_previous(ddata);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		if (latest->temperature > CPCAP_NO_BATTERY)
+			val->intval = 1;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_STATUS:
+		if (cpcap_battery_full(ddata)) {
+			val->intval = POWER_SUPPLY_STATUS_FULL;
+			break;
+		}
+		if (cpcap_battery_cc_get_avg_current(ddata) < 0)
+			val->intval = POWER_SUPPLY_STATUS_CHARGING;
+		else
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = ddata->config.info.technology;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		val->intval = cpcap_battery_get_voltage(ddata);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		val->intval = ddata->config.info.voltage_max_design;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+		val->intval = ddata->config.info.voltage_min_design;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_AVG:
+		if (cached) {
+			val->intval = cpcap_battery_cc_get_avg_current(ddata);
+			break;
+		}
+		sample = latest->cc.sample - previous->cc.sample;
+		accumulator = latest->cc.accumulator - previous->cc.accumulator;
+		val->intval = cpcap_battery_cc_to_ua(ddata, sample,
+						     accumulator,
+						     latest->cc.offset);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		val->intval = latest->current_ua;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		val->intval = latest->counter_uah;
+		break;
+	case POWER_SUPPLY_PROP_POWER_NOW:
+		tmp = (latest->voltage / 10000) * latest->current_ua;
+		val->intval = div64_s64(tmp, 100);
+		break;
+	case POWER_SUPPLY_PROP_POWER_AVG:
+		if (cached) {
+			tmp = cpcap_battery_cc_get_avg_current(ddata);
+			tmp *= (latest->voltage / 10000);
+			val->intval = div64_s64(tmp, 100);
+			break;
+		}
+		sample = latest->cc.sample - previous->cc.sample;
+		accumulator = latest->cc.accumulator - previous->cc.accumulator;
+		tmp = cpcap_battery_cc_to_ua(ddata, sample, accumulator,
+					     latest->cc.offset);
+		tmp *= ((latest->voltage + previous->voltage) / 20000);
+		val->intval = div64_s64(tmp, 100);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+		if (cpcap_battery_full(ddata))
+			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+		else if (latest->voltage >= 3750000)
+			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
+		else if (latest->voltage >= 3300000)
+			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+		else if (latest->voltage > 3100000)
+			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+		else if (latest->voltage <= 3100000)
+			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+		else
+			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		val->intval = ddata->config.info.charge_full_design;
+		break;
+	case POWER_SUPPLY_PROP_SCOPE:
+		val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		val->intval = latest->temperature;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
+{
+	struct cpcap_battery_ddata *ddata = data;
+	struct cpcap_battery_state_data *latest;
+	struct cpcap_interrupt_desc *d;
+
+	if (!atomic_read(&ddata->active))
+		return IRQ_NONE;
+
+	list_for_each_entry(d, &ddata->irq_list, node) {
+		if (irq == d->irq)
+			break;
+	}
+
+	if (!d)
+		return IRQ_NONE;
+
+	latest = cpcap_battery_latest(ddata);
+
+	switch (d->action) {
+	case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW:
+		if (latest->counter_uah >= 0)
+			dev_warn(ddata->dev, "Battery low at 3.3V!\n");
+		break;
+	case CPCAP_BATTERY_IRQ_ACTION_POWEROFF:
+		if (latest->counter_uah >= 0) {
+			dev_emerg(ddata->dev,
+				  "Battery empty at 3.1V, powering off\n");
+			orderly_poweroff(true);
+		}
+		break;
+	default:
+		break;
+	}
+
+	power_supply_changed(ddata->psy);
+
+	return IRQ_HANDLED;
+}
+
+static int cpcap_battery_init_irq(struct platform_device *pdev,
+				  struct cpcap_battery_ddata *ddata,
+				  const char *name)
+{
+	struct cpcap_interrupt_desc *d;
+	int irq, error;
+
+	irq = platform_get_irq_byname(pdev, name);
+	if (!irq)
+		return -ENODEV;
+
+	error = devm_request_threaded_irq(ddata->dev, irq, NULL,
+					  cpcap_battery_irq_thread,
+					  IRQF_SHARED,
+					  name, ddata);
+	if (error) {
+		dev_err(ddata->dev, "could not get irq %s: %i\n",
+			name, error);
+
+		return error;
+	}
+
+	d = devm_kzalloc(ddata->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	d->name = name;
+	d->irq = irq;
+
+	if (!strncmp(name, "lowbph", 6))
+		d->action = CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW;
+	else if (!strncmp(name, "lowbpl", 6))
+		d->action = CPCAP_BATTERY_IRQ_ACTION_POWEROFF;
+
+	list_add(&d->node, &ddata->irq_list);
+
+	return 0;
+}
+
+static int cpcap_battery_init_interrupts(struct platform_device *pdev,
+					 struct cpcap_battery_ddata *ddata)
+{
+	const char * const cpcap_battery_irqs[] = {
+		"eol", "lowbph", "lowbpl",
+		"chrgcurr1", "battdetb"
+	};
+	int i, error;
+
+	for (i = 0; i < ARRAY_SIZE(cpcap_battery_irqs); i++) {
+		error = cpcap_battery_init_irq(pdev, ddata,
+					       cpcap_battery_irqs[i]);
+		if (error)
+			return error;
+	}
+
+	/* Enable low battery interrupts for 3.3V high and 3.1V low */
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_BPEOL,
+				   0xffff,
+				   CPCAP_REG_BPEOL_BIT_BATTDETEN);
+	if (error)
+		return error;
+
+	return 0;
+}
+
+static int cpcap_battery_init_iio(struct cpcap_battery_ddata *ddata)
+{
+	const char * const names[CPCAP_BATTERY_IIO_NR] = {
+		"battdetb", "battp", "chg_isense", "batti",
+	};
+	int error, i;
+
+	for (i = 0; i < CPCAP_BATTERY_IIO_NR; i++) {
+		ddata->channels[i] = devm_iio_channel_get(ddata->dev,
+							  names[i]);
+		if (IS_ERR(ddata->channels[i])) {
+			error = PTR_ERR(ddata->channels[i]);
+			goto out_err;
+		}
+
+		if (!ddata->channels[i]->indio_dev) {
+			error = -ENXIO;
+			goto out_err;
+		}
+	}
+
+	return 0;
+
+out_err:
+	dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
+		error);
+
+	return error;
+}
+
+/*
+ * Based on the values from Motorola mapphone Linux kernel. In the
+ * the Motorola mapphone Linux kernel tree the value for pm_cd_factor
+ * is passed to the kernel via device tree. If it turns out to be
+ * something device specific we can consider that too later.
+ *
+ * And looking at the battery full and shutdown values for the stock
+ * kernel on droid 4, full is 4351000 and software initiates shutdown
+ * at 3078000. The device will die around 2743000.
+ */
+static const struct cpcap_battery_config cpcap_battery_default_data = {
+	.ccm = 0x3ff,
+	.cd_factor = 0x3cc,
+	.info.technology = POWER_SUPPLY_TECHNOLOGY_LION,
+	.info.voltage_max_design = 4351000,
+	.info.voltage_min_design = 3100000,
+	.info.charge_full_design = 1740000,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id cpcap_battery_id_table[] = {
+	{
+		.compatible = "motorola,cpcap-battery",
+		.data = &cpcap_battery_default_data,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, cpcap_battery_id_table);
+#endif
+
+static int cpcap_battery_probe(struct platform_device *pdev)
+{
+	struct power_supply_desc *psy_desc;
+	struct cpcap_battery_ddata *ddata;
+	const struct of_device_id *match;
+	struct power_supply_config psy_cfg = {};
+	int error;
+
+	match = of_match_device(of_match_ptr(cpcap_battery_id_table),
+				&pdev->dev);
+	if (!match)
+		return -EINVAL;
+
+	if (!match->data) {
+		dev_err(&pdev->dev, "no configuration data found\n");
+
+		return -ENODEV;
+	}
+
+	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+	if (!ddata)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ddata->irq_list);
+	ddata->dev = &pdev->dev;
+	memcpy(&ddata->config, match->data, sizeof(ddata->config));
+
+	ddata->reg = dev_get_regmap(ddata->dev->parent, NULL);
+	if (!ddata->reg)
+		return -ENODEV;
+
+	error = cpcap_get_vendor(ddata->dev, ddata->reg, &ddata->vendor);
+	if (error)
+		return error;
+
+	platform_set_drvdata(pdev, ddata);
+
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_CCM,
+				   0xffff, ddata->config.ccm);
+	if (error)
+		return error;
+
+	error = cpcap_battery_init_interrupts(pdev, ddata);
+	if (error)
+		return error;
+
+	error = cpcap_battery_init_iio(ddata);
+	if (error)
+		return error;
+
+	psy_desc = devm_kzalloc(ddata->dev, sizeof(*psy_desc), GFP_KERNEL);
+	if (!psy_desc)
+		return -ENOMEM;
+
+	psy_desc->name = "battery",
+	psy_desc->type = POWER_SUPPLY_TYPE_BATTERY,
+	psy_desc->properties = cpcap_battery_props,
+	psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props),
+	psy_desc->get_property = cpcap_battery_get_property,
+
+	psy_cfg.of_node = pdev->dev.of_node;
+	psy_cfg.drv_data = ddata;
+
+	ddata->psy = devm_power_supply_register(ddata->dev, psy_desc,
+						&psy_cfg);
+	error = PTR_ERR_OR_ZERO(ddata->psy);
+	if (error) {
+		dev_err(ddata->dev, "failed to register power supply\n");
+		return error;
+	}
+
+	atomic_set(&ddata->active, 1);
+
+	return 0;
+}
+
+static int cpcap_battery_remove(struct platform_device *pdev)
+{
+	struct cpcap_battery_ddata *ddata = platform_get_drvdata(pdev);
+	int error;
+
+	atomic_set(&ddata->active, 0);
+	error = regmap_update_bits(ddata->reg, CPCAP_REG_BPEOL,
+				   0xffff, 0);
+	if (error)
+		dev_err(&pdev->dev, "could not disable: %i\n", error);
+
+	return 0;
+}
+
+static struct platform_driver cpcap_battery_driver = {
+	.driver	= {
+		.name		= "cpcap_battery",
+		.of_match_table = of_match_ptr(cpcap_battery_id_table),
+	},
+	.probe	= cpcap_battery_probe,
+	.remove = cpcap_battery_remove,
+};
+module_platform_driver(cpcap_battery_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
+MODULE_DESCRIPTION("CPCAP PMIC Battery Driver");
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 26a2dc7..11a0763 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -38,20 +38,27 @@
 #include <linux/iio/consumer.h>
 #include <linux/mfd/motorola-cpcap.h>
 
-/* CPCAP_REG_CRM register bits */
+/*
+ * CPCAP_REG_CRM register bits. For documentation of somewhat similar hardware,
+ * see NXP "MC13783 Power Management and Audio Circuit Users's Guide"
+ * MC13783UG.pdf chapter "8.5 Battery Interface Register Summary". The registers
+ * and values for CPCAP are different, but some of the internal components seem
+ * similar. Also see the Motorola Linux kernel cpcap-regbits.h. CPCAP_REG_CHRGR_1
+ * bits that seem to describe the CRM register.
+ */
 #define CPCAP_REG_CRM_UNUSED_641_15	BIT(15)	/* 641 = register number */
 #define CPCAP_REG_CRM_UNUSED_641_14	BIT(14)	/* 641 = register number */
-#define CPCAP_REG_CRM_CHRG_LED_EN	BIT(13)
-#define CPCAP_REG_CRM_RVRSMODE		BIT(12)
-#define CPCAP_REG_CRM_ICHRG_TR1		BIT(11)
+#define CPCAP_REG_CRM_CHRG_LED_EN	BIT(13)	/* Charger LED */
+#define CPCAP_REG_CRM_RVRSMODE		BIT(12)	/* USB VBUS output enable */
+#define CPCAP_REG_CRM_ICHRG_TR1		BIT(11)	/* Trickle charge current */
 #define CPCAP_REG_CRM_ICHRG_TR0		BIT(10)
-#define CPCAP_REG_CRM_FET_OVRD		BIT(9)
-#define CPCAP_REG_CRM_FET_CTRL		BIT(8)
-#define CPCAP_REG_CRM_VCHRG3		BIT(7)
+#define CPCAP_REG_CRM_FET_OVRD		BIT(9)	/* 0 = hardware, 1 = FET_CTRL */
+#define CPCAP_REG_CRM_FET_CTRL		BIT(8)	/* BPFET 1 if FET_OVRD set */
+#define CPCAP_REG_CRM_VCHRG3		BIT(7)	/* Charge voltage bits */
 #define CPCAP_REG_CRM_VCHRG2		BIT(6)
 #define CPCAP_REG_CRM_VCHRG1		BIT(5)
 #define CPCAP_REG_CRM_VCHRG0		BIT(4)
-#define CPCAP_REG_CRM_ICHRG3		BIT(3)
+#define CPCAP_REG_CRM_ICHRG3		BIT(3)	/* Charge current bits */
 #define CPCAP_REG_CRM_ICHRG2		BIT(2)
 #define CPCAP_REG_CRM_ICHRG1		BIT(1)
 #define CPCAP_REG_CRM_ICHRG0		BIT(0)
@@ -63,42 +70,50 @@
 #define CPCAP_REG_CRM_TR_0A48		CPCAP_REG_CRM_TR(0x2)
 #define CPCAP_REG_CRM_TR_0A72		CPCAP_REG_CRM_TR(0x4)
 
-/* CPCAP_REG_CRM charge voltages */
+/*
+ * CPCAP_REG_CRM charge voltages based on the ADC channel 1 values.
+ * Note that these register bits don't match MC13783UG.pdf VCHRG
+ * register bits.
+ */
 #define CPCAP_REG_CRM_VCHRG(val)	(((val) & 0xf) << 4)
 #define CPCAP_REG_CRM_VCHRG_3V80	CPCAP_REG_CRM_VCHRG(0x0)
 #define CPCAP_REG_CRM_VCHRG_4V10	CPCAP_REG_CRM_VCHRG(0x1)
-#define CPCAP_REG_CRM_VCHRG_4V15	CPCAP_REG_CRM_VCHRG(0x2)
-#define CPCAP_REG_CRM_VCHRG_4V20	CPCAP_REG_CRM_VCHRG(0x3)
-#define CPCAP_REG_CRM_VCHRG_4V22	CPCAP_REG_CRM_VCHRG(0x4)
-#define CPCAP_REG_CRM_VCHRG_4V24	CPCAP_REG_CRM_VCHRG(0x5)
-#define CPCAP_REG_CRM_VCHRG_4V26	CPCAP_REG_CRM_VCHRG(0x6)
-#define CPCAP_REG_CRM_VCHRG_4V28	CPCAP_REG_CRM_VCHRG(0x7)
-#define CPCAP_REG_CRM_VCHRG_4V30	CPCAP_REG_CRM_VCHRG(0x8)
-#define CPCAP_REG_CRM_VCHRG_4V32	CPCAP_REG_CRM_VCHRG(0x9)
-#define CPCAP_REG_CRM_VCHRG_4V34	CPCAP_REG_CRM_VCHRG(0xa)
+#define CPCAP_REG_CRM_VCHRG_4V12	CPCAP_REG_CRM_VCHRG(0x2)
+#define CPCAP_REG_CRM_VCHRG_4V15	CPCAP_REG_CRM_VCHRG(0x3)
+#define CPCAP_REG_CRM_VCHRG_4V17	CPCAP_REG_CRM_VCHRG(0x4)
+#define CPCAP_REG_CRM_VCHRG_4V20	CPCAP_REG_CRM_VCHRG(0x5)
+#define CPCAP_REG_CRM_VCHRG_4V23	CPCAP_REG_CRM_VCHRG(0x6)
+#define CPCAP_REG_CRM_VCHRG_4V25	CPCAP_REG_CRM_VCHRG(0x7)
+#define CPCAP_REG_CRM_VCHRG_4V27	CPCAP_REG_CRM_VCHRG(0x8)
+#define CPCAP_REG_CRM_VCHRG_4V30	CPCAP_REG_CRM_VCHRG(0x9)
+#define CPCAP_REG_CRM_VCHRG_4V33	CPCAP_REG_CRM_VCHRG(0xa)
 #define CPCAP_REG_CRM_VCHRG_4V35	CPCAP_REG_CRM_VCHRG(0xb)
 #define CPCAP_REG_CRM_VCHRG_4V38	CPCAP_REG_CRM_VCHRG(0xc)
 #define CPCAP_REG_CRM_VCHRG_4V40	CPCAP_REG_CRM_VCHRG(0xd)
 #define CPCAP_REG_CRM_VCHRG_4V42	CPCAP_REG_CRM_VCHRG(0xe)
 #define CPCAP_REG_CRM_VCHRG_4V44	CPCAP_REG_CRM_VCHRG(0xf)
 
-/* CPCAP_REG_CRM charge currents */
+/*
+ * CPCAP_REG_CRM charge currents. These seem to match MC13783UG.pdf
+ * values in "Table 8-3. Charge Path Regulator Current Limit
+ * Characteristics" for the nominal values.
+ */
 #define CPCAP_REG_CRM_ICHRG(val)	(((val) & 0xf) << 0)
 #define CPCAP_REG_CRM_ICHRG_0A000	CPCAP_REG_CRM_ICHRG(0x0)
 #define CPCAP_REG_CRM_ICHRG_0A070	CPCAP_REG_CRM_ICHRG(0x1)
-#define CPCAP_REG_CRM_ICHRG_0A176	CPCAP_REG_CRM_ICHRG(0x2)
-#define CPCAP_REG_CRM_ICHRG_0A264	CPCAP_REG_CRM_ICHRG(0x3)
-#define CPCAP_REG_CRM_ICHRG_0A352	CPCAP_REG_CRM_ICHRG(0x4)
-#define CPCAP_REG_CRM_ICHRG_0A440	CPCAP_REG_CRM_ICHRG(0x5)
-#define CPCAP_REG_CRM_ICHRG_0A528	CPCAP_REG_CRM_ICHRG(0x6)
-#define CPCAP_REG_CRM_ICHRG_0A616	CPCAP_REG_CRM_ICHRG(0x7)
-#define CPCAP_REG_CRM_ICHRG_0A704	CPCAP_REG_CRM_ICHRG(0x8)
-#define CPCAP_REG_CRM_ICHRG_0A792	CPCAP_REG_CRM_ICHRG(0x9)
-#define CPCAP_REG_CRM_ICHRG_0A880	CPCAP_REG_CRM_ICHRG(0xa)
-#define CPCAP_REG_CRM_ICHRG_0A968	CPCAP_REG_CRM_ICHRG(0xb)
-#define CPCAP_REG_CRM_ICHRG_1A056	CPCAP_REG_CRM_ICHRG(0xc)
-#define CPCAP_REG_CRM_ICHRG_1A144	CPCAP_REG_CRM_ICHRG(0xd)
-#define CPCAP_REG_CRM_ICHRG_1A584	CPCAP_REG_CRM_ICHRG(0xe)
+#define CPCAP_REG_CRM_ICHRG_0A177	CPCAP_REG_CRM_ICHRG(0x2)
+#define CPCAP_REG_CRM_ICHRG_0A266	CPCAP_REG_CRM_ICHRG(0x3)
+#define CPCAP_REG_CRM_ICHRG_0A355	CPCAP_REG_CRM_ICHRG(0x4)
+#define CPCAP_REG_CRM_ICHRG_0A443	CPCAP_REG_CRM_ICHRG(0x5)
+#define CPCAP_REG_CRM_ICHRG_0A532	CPCAP_REG_CRM_ICHRG(0x6)
+#define CPCAP_REG_CRM_ICHRG_0A621	CPCAP_REG_CRM_ICHRG(0x7)
+#define CPCAP_REG_CRM_ICHRG_0A709	CPCAP_REG_CRM_ICHRG(0x8)
+#define CPCAP_REG_CRM_ICHRG_0A798	CPCAP_REG_CRM_ICHRG(0x9)
+#define CPCAP_REG_CRM_ICHRG_0A886	CPCAP_REG_CRM_ICHRG(0xa)
+#define CPCAP_REG_CRM_ICHRG_0A975	CPCAP_REG_CRM_ICHRG(0xb)
+#define CPCAP_REG_CRM_ICHRG_1A064	CPCAP_REG_CRM_ICHRG(0xc)
+#define CPCAP_REG_CRM_ICHRG_1A152	CPCAP_REG_CRM_ICHRG(0xd)
+#define CPCAP_REG_CRM_ICHRG_1A596	CPCAP_REG_CRM_ICHRG(0xe)
 #define CPCAP_REG_CRM_ICHRG_NO_LIMIT	CPCAP_REG_CRM_ICHRG(0xf)
 
 enum {
@@ -428,9 +443,9 @@ static void cpcap_usb_detect(struct work_struct *work)
 		int max_current;
 
 		if (cpcap_charger_battery_found(ddata))
-			max_current = CPCAP_REG_CRM_ICHRG_1A584;
+			max_current = CPCAP_REG_CRM_ICHRG_1A596;
 		else
-			max_current = CPCAP_REG_CRM_ICHRG_0A528;
+			max_current = CPCAP_REG_CRM_ICHRG_0A532;
 
 		error = cpcap_charger_set_state(ddata,
 						CPCAP_REG_CRM_VCHRG_4V35,
@@ -586,6 +601,7 @@ static int cpcap_charger_probe(struct platform_device *pdev)
 {
 	struct cpcap_charger_ddata *ddata;
 	const struct of_device_id *of_id;
+	struct power_supply_config psy_cfg = {};
 	int error;
 
 	of_id = of_match_device(of_match_ptr(cpcap_charger_id_table),
@@ -614,9 +630,12 @@ static int cpcap_charger_probe(struct platform_device *pdev)
 
 	atomic_set(&ddata->active, 1);
 
+	psy_cfg.of_node = pdev->dev.of_node;
+	psy_cfg.drv_data = ddata;
+
 	ddata->usb = devm_power_supply_register(ddata->dev,
 						&cpcap_charger_usb_desc,
-						NULL);
+						&psy_cfg);
 	if (IS_ERR(ddata->usb)) {
 		error = PTR_ERR(ddata->usb);
 		dev_err(ddata->dev, "failed to register USB charger: %i\n",
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 1722568..ae180dc 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -28,7 +28,7 @@
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 
-#include "../../w1/w1.h"
+#include <linux/w1.h>
 #include "../../w1/slaves/w1_ds2760.h"
 
 struct ds2760_device_info {
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index 1b3b6fa..8edd4aa 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -21,7 +21,7 @@
 #include <linux/power_supply.h>
 #include <linux/idr.h>
 
-#include "../../w1/w1.h"
+#include <linux/w1.h>
 #include "../../w1/slaves/w1_ds2780.h"
 
 /* Current unit measurement in uA for a 1 milli-ohm sense resistor */
diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index cc01491..4400402 100644
--- a/drivers/power/supply/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -19,7 +19,7 @@
 #include <linux/power_supply.h>
 #include <linux/idr.h>
 
-#include "../../w1/w1.h"
+#include <linux/w1.h>
 #include "../../w1/slaves/w1_ds2781.h"
 
 /* Current unit measurement in uA for a 1 milli-ohm sense resistor */
diff --git a/drivers/power/supply/ltc3651-charger.c b/drivers/power/supply/ltc3651-charger.c
new file mode 100644
index 0000000..eea63ff
--- /dev/null
+++ b/drivers/power/supply/ltc3651-charger.c
@@ -0,0 +1,210 @@
+/*
+ *  Copyright (C) 2017, Topic Embedded Products
+ *  Driver for LTC3651 charger IC.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+struct ltc3651_charger {
+	struct power_supply *charger;
+	struct power_supply_desc charger_desc;
+	struct gpio_desc *acpr_gpio;
+	struct gpio_desc *fault_gpio;
+	struct gpio_desc *chrg_gpio;
+};
+
+static irqreturn_t ltc3651_charger_irq(int irq, void *devid)
+{
+	struct power_supply *charger = devid;
+
+	power_supply_changed(charger);
+
+	return IRQ_HANDLED;
+}
+
+static inline struct ltc3651_charger *psy_to_ltc3651_charger(
+	struct power_supply *psy)
+{
+	return power_supply_get_drvdata(psy);
+}
+
+static int ltc3651_charger_get_property(struct power_supply *psy,
+		enum power_supply_property psp, union power_supply_propval *val)
+{
+	struct ltc3651_charger *ltc3651_charger = psy_to_ltc3651_charger(psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		if (!ltc3651_charger->chrg_gpio) {
+			val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+			break;
+		}
+		if (gpiod_get_value(ltc3651_charger->chrg_gpio))
+			val->intval = POWER_SUPPLY_STATUS_CHARGING;
+		else
+			val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = gpiod_get_value(ltc3651_charger->acpr_gpio);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		if (!ltc3651_charger->fault_gpio) {
+			val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+			break;
+		}
+		if (!gpiod_get_value(ltc3651_charger->fault_gpio)) {
+			val->intval = POWER_SUPPLY_HEALTH_GOOD;
+			break;
+		}
+		/*
+		 * If the fault pin is active, the chrg pin explains the type
+		 * of failure.
+		 */
+		if (!ltc3651_charger->chrg_gpio) {
+			val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+			break;
+		}
+		val->intval = gpiod_get_value(ltc3651_charger->chrg_gpio) ?
+				POWER_SUPPLY_HEALTH_OVERHEAT :
+				POWER_SUPPLY_HEALTH_DEAD;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static enum power_supply_property ltc3651_charger_properties[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_HEALTH,
+};
+
+static int ltc3651_charger_probe(struct platform_device *pdev)
+{
+	struct power_supply_config psy_cfg = {};
+	struct ltc3651_charger *ltc3651_charger;
+	struct power_supply_desc *charger_desc;
+	int ret;
+
+	ltc3651_charger = devm_kzalloc(&pdev->dev, sizeof(*ltc3651_charger),
+					GFP_KERNEL);
+	if (!ltc3651_charger)
+		return -ENOMEM;
+
+	ltc3651_charger->acpr_gpio = devm_gpiod_get(&pdev->dev,
+					"lltc,acpr", GPIOD_IN);
+	if (IS_ERR(ltc3651_charger->acpr_gpio)) {
+		ret = PTR_ERR(ltc3651_charger->acpr_gpio);
+		dev_err(&pdev->dev, "Failed to acquire acpr GPIO: %d\n", ret);
+		return ret;
+	}
+	ltc3651_charger->fault_gpio = devm_gpiod_get_optional(&pdev->dev,
+					"lltc,fault", GPIOD_IN);
+	if (IS_ERR(ltc3651_charger->fault_gpio)) {
+		ret = PTR_ERR(ltc3651_charger->fault_gpio);
+		dev_err(&pdev->dev, "Failed to acquire fault GPIO: %d\n", ret);
+		return ret;
+	}
+	ltc3651_charger->chrg_gpio = devm_gpiod_get_optional(&pdev->dev,
+					"lltc,chrg", GPIOD_IN);
+	if (IS_ERR(ltc3651_charger->chrg_gpio)) {
+		ret = PTR_ERR(ltc3651_charger->chrg_gpio);
+		dev_err(&pdev->dev, "Failed to acquire chrg GPIO: %d\n", ret);
+		return ret;
+	}
+
+	charger_desc = &ltc3651_charger->charger_desc;
+	charger_desc->name = pdev->dev.of_node->name;
+	charger_desc->type = POWER_SUPPLY_TYPE_MAINS;
+	charger_desc->properties = ltc3651_charger_properties;
+	charger_desc->num_properties = ARRAY_SIZE(ltc3651_charger_properties);
+	charger_desc->get_property = ltc3651_charger_get_property;
+	psy_cfg.of_node = pdev->dev.of_node;
+	psy_cfg.drv_data = ltc3651_charger;
+
+	ltc3651_charger->charger = devm_power_supply_register(&pdev->dev,
+						      charger_desc, &psy_cfg);
+	if (IS_ERR(ltc3651_charger->charger)) {
+		ret = PTR_ERR(ltc3651_charger->charger);
+		dev_err(&pdev->dev, "Failed to register power supply: %d\n",
+			ret);
+		return ret;
+	}
+
+	/*
+	 * Acquire IRQs for the GPIO pins if possible. If the system does not
+	 * support IRQs on these pins, userspace will have to poll the sysfs
+	 * files manually.
+	 */
+	if (ltc3651_charger->acpr_gpio) {
+		ret = gpiod_to_irq(ltc3651_charger->acpr_gpio);
+		if (ret >= 0)
+			ret = devm_request_any_context_irq(&pdev->dev, ret,
+				ltc3651_charger_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				dev_name(&pdev->dev), ltc3651_charger->charger);
+		if (ret < 0)
+			dev_warn(&pdev->dev, "Failed to request acpr irq\n");
+	}
+	if (ltc3651_charger->fault_gpio) {
+		ret = gpiod_to_irq(ltc3651_charger->fault_gpio);
+		if (ret >= 0)
+			ret = devm_request_any_context_irq(&pdev->dev, ret,
+				ltc3651_charger_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				dev_name(&pdev->dev), ltc3651_charger->charger);
+		if (ret < 0)
+			dev_warn(&pdev->dev, "Failed to request fault irq\n");
+	}
+	if (ltc3651_charger->chrg_gpio) {
+		ret = gpiod_to_irq(ltc3651_charger->chrg_gpio);
+		if (ret >= 0)
+			ret = devm_request_any_context_irq(&pdev->dev, ret,
+				ltc3651_charger_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				dev_name(&pdev->dev), ltc3651_charger->charger);
+		if (ret < 0)
+			dev_warn(&pdev->dev, "Failed to request chrg irq\n");
+	}
+
+	platform_set_drvdata(pdev, ltc3651_charger);
+
+	return 0;
+}
+
+static const struct of_device_id ltc3651_charger_match[] = {
+	{ .compatible = "lltc,ltc3651-charger" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ltc3651_charger_match);
+
+static struct platform_driver ltc3651_charger_driver = {
+	.probe = ltc3651_charger_probe,
+	.driver = {
+		.name = "ltc3651-charger",
+		.of_match_table = ltc3651_charger_match,
+	},
+};
+
+module_platform_driver(ltc3651_charger_driver);
+
+MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
+MODULE_DESCRIPTION("Driver for LTC3651 charger");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ltc3651-charger");
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 7ec7c7c..540d3e0 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -17,6 +17,7 @@
 #include <linux/device.h>
 #include <linux/notifier.h>
 #include <linux/err.h>
+#include <linux/of.h>
 #include <linux/power_supply.h>
 #include <linux/thermal.h>
 #include "power_supply.h"
@@ -274,8 +275,30 @@ static int power_supply_check_supplies(struct power_supply *psy)
 	return power_supply_populate_supplied_from(psy);
 }
 #else
-static inline int power_supply_check_supplies(struct power_supply *psy)
+static int power_supply_check_supplies(struct power_supply *psy)
 {
+	int nval, ret;
+
+	if (!psy->dev.parent)
+		return 0;
+
+	nval = device_property_read_string_array(psy->dev.parent,
+						 "supplied-from", NULL, 0);
+	if (nval <= 0)
+		return 0;
+
+	psy->supplied_from = devm_kmalloc_array(&psy->dev, nval,
+						sizeof(char *), GFP_KERNEL);
+	if (!psy->supplied_from)
+		return -ENOMEM;
+
+	ret = device_property_read_string_array(psy->dev.parent,
+		"supplied-from", (const char **)psy->supplied_from, nval);
+	if (ret < 0)
+		return ret;
+
+	psy->num_supplies = nval;
+
 	return 0;
 }
 #endif
@@ -497,6 +520,62 @@ struct power_supply *devm_power_supply_get_by_phandle(struct device *dev,
 EXPORT_SYMBOL_GPL(devm_power_supply_get_by_phandle);
 #endif /* CONFIG_OF */
 
+int power_supply_get_battery_info(struct power_supply *psy,
+				  struct power_supply_battery_info *info)
+{
+	struct device_node *battery_np;
+	const char *value;
+	int err;
+
+	info->energy_full_design_uwh         = -EINVAL;
+	info->charge_full_design_uah         = -EINVAL;
+	info->voltage_min_design_uv          = -EINVAL;
+	info->precharge_current_ua           = -EINVAL;
+	info->charge_term_current_ua         = -EINVAL;
+	info->constant_charge_current_max_ua = -EINVAL;
+	info->constant_charge_voltage_max_uv = -EINVAL;
+
+	if (!psy->of_node) {
+		dev_warn(&psy->dev, "%s currently only supports devicetree\n",
+			 __func__);
+		return -ENXIO;
+	}
+
+	battery_np = of_parse_phandle(psy->of_node, "monitored-battery", 0);
+	if (!battery_np)
+		return -ENODEV;
+
+	err = of_property_read_string(battery_np, "compatible", &value);
+	if (err)
+		return err;
+
+	if (strcmp("simple-battery", value))
+		return -ENODEV;
+
+	/* The property and field names below must correspond to elements
+	 * in enum power_supply_property. For reasoning, see
+	 * Documentation/power/power_supply_class.txt.
+	 */
+
+	of_property_read_u32(battery_np, "energy-full-design-microwatt-hours",
+			     &info->energy_full_design_uwh);
+	of_property_read_u32(battery_np, "charge-full-design-microamp-hours",
+			     &info->charge_full_design_uah);
+	of_property_read_u32(battery_np, "voltage-min-design-microvolt",
+			     &info->voltage_min_design_uv);
+	of_property_read_u32(battery_np, "precharge-current-microamp",
+			     &info->precharge_current_ua);
+	of_property_read_u32(battery_np, "charge-term-current-microamp",
+			     &info->charge_term_current_ua);
+	of_property_read_u32(battery_np, "constant_charge_current_max_microamp",
+			     &info->constant_charge_current_max_ua);
+	of_property_read_u32(battery_np, "constant_charge_voltage_max_microvolt",
+			     &info->constant_charge_voltage_max_uv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(power_supply_get_battery_info);
+
 int power_supply_get_property(struct power_supply *psy,
 			    enum power_supply_property psp,
 			    union power_supply_propval *val)
@@ -669,7 +748,7 @@ static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
 	return ret;
 }
 
-static struct thermal_cooling_device_ops psy_tcd_ops = {
+static const struct thermal_cooling_device_ops psy_tcd_ops = {
 	.get_max_state = ps_get_max_charge_cntl_limit,
 	.get_cur_state = ps_get_cur_chrage_cntl_limit,
 	.set_cur_state = ps_set_cur_charge_cntl_limit,
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index bcde8d1..5204f11 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -40,35 +40,42 @@
 
 static struct device_attribute power_supply_attrs[];
 
+static const char * const power_supply_type_text[] = {
+	"Unknown", "Battery", "UPS", "Mains", "USB",
+	"USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
+	"USB_PD", "USB_PD_DRP", "BrickID"
+};
+
+static const char * const power_supply_status_text[] = {
+	"Unknown", "Charging", "Discharging", "Not charging", "Full"
+};
+
+static const char * const power_supply_charge_type_text[] = {
+	"Unknown", "N/A", "Trickle", "Fast"
+};
+
+static const char * const power_supply_health_text[] = {
+	"Unknown", "Good", "Overheat", "Dead", "Over voltage",
+	"Unspecified failure", "Cold", "Watchdog timer expire",
+	"Safety timer expire"
+};
+
+static const char * const power_supply_technology_text[] = {
+	"Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
+	"LiMn"
+};
+
+static const char * const power_supply_capacity_level_text[] = {
+	"Unknown", "Critical", "Low", "Normal", "High", "Full"
+};
+
+static const char * const power_supply_scope_text[] = {
+	"Unknown", "System", "Device"
+};
+
 static ssize_t power_supply_show_property(struct device *dev,
 					  struct device_attribute *attr,
 					  char *buf) {
-	static char *type_text[] = {
-		"Unknown", "Battery", "UPS", "Mains", "USB",
-		"USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
-		"USB_PD", "USB_PD_DRP"
-	};
-	static char *status_text[] = {
-		"Unknown", "Charging", "Discharging", "Not charging", "Full"
-	};
-	static char *charge_type[] = {
-		"Unknown", "N/A", "Trickle", "Fast"
-	};
-	static char *health_text[] = {
-		"Unknown", "Good", "Overheat", "Dead", "Over voltage",
-		"Unspecified failure", "Cold", "Watchdog timer expire",
-		"Safety timer expire"
-	};
-	static char *technology_text[] = {
-		"Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
-		"LiMn"
-	};
-	static char *capacity_level_text[] = {
-		"Unknown", "Critical", "Low", "Normal", "High", "Full"
-	};
-	static char *scope_text[] = {
-		"Unknown", "System", "Device"
-	};
 	ssize_t ret = 0;
 	struct power_supply *psy = dev_get_drvdata(dev);
 	const ptrdiff_t off = attr - power_supply_attrs;
@@ -91,19 +98,26 @@ static ssize_t power_supply_show_property(struct device *dev,
 	}
 
 	if (off == POWER_SUPPLY_PROP_STATUS)
-		return sprintf(buf, "%s\n", status_text[value.intval]);
+		return sprintf(buf, "%s\n",
+			       power_supply_status_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE)
-		return sprintf(buf, "%s\n", charge_type[value.intval]);
+		return sprintf(buf, "%s\n",
+			       power_supply_charge_type_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_HEALTH)
-		return sprintf(buf, "%s\n", health_text[value.intval]);
+		return sprintf(buf, "%s\n",
+			       power_supply_health_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TECHNOLOGY)
-		return sprintf(buf, "%s\n", technology_text[value.intval]);
+		return sprintf(buf, "%s\n",
+			       power_supply_technology_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
-		return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
+		return sprintf(buf, "%s\n",
+			       power_supply_capacity_level_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TYPE)
-		return sprintf(buf, "%s\n", type_text[value.intval]);
+		return sprintf(buf, "%s\n",
+			       power_supply_type_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_SCOPE)
-		return sprintf(buf, "%s\n", scope_text[value.intval]);
+		return sprintf(buf, "%s\n",
+			       power_supply_scope_text[value.intval]);
 	else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
 		return sprintf(buf, "%s\n", value.strval);
 
@@ -117,14 +131,46 @@ static ssize_t power_supply_store_property(struct device *dev,
 	struct power_supply *psy = dev_get_drvdata(dev);
 	const ptrdiff_t off = attr - power_supply_attrs;
 	union power_supply_propval value;
-	long long_val;
 
-	/* TODO: support other types than int */
-	ret = kstrtol(buf, 10, &long_val);
-	if (ret < 0)
-		return ret;
+	/* maybe it is a enum property? */
+	switch (off) {
+	case POWER_SUPPLY_PROP_STATUS:
+		ret = sysfs_match_string(power_supply_status_text, buf);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		ret = sysfs_match_string(power_supply_charge_type_text, buf);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		ret = sysfs_match_string(power_supply_health_text, buf);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		ret = sysfs_match_string(power_supply_technology_text, buf);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+		ret = sysfs_match_string(power_supply_capacity_level_text, buf);
+		break;
+	case POWER_SUPPLY_PROP_SCOPE:
+		ret = sysfs_match_string(power_supply_scope_text, buf);
+		break;
+	default:
+		ret = -EINVAL;
+	}
 
-	value.intval = long_val;
+	/*
+	 * If no match was found, then check to see if it is an integer.
+	 * Integer values are valid for enums in addition to the text value.
+	 */
+	if (ret < 0) {
+		long long_val;
+
+		ret = kstrtol(buf, 10, &long_val);
+		if (ret < 0)
+			return ret;
+
+		ret = long_val;
+	}
+
+	value.intval = ret;
 
 	ret = power_supply_set_property(psy, off, &value);
 	if (ret < 0)
@@ -196,6 +242,7 @@ static struct device_attribute power_supply_attrs[] = {
 	POWER_SUPPLY_ATTR(time_to_full_avg),
 	POWER_SUPPLY_ATTR(type),
 	POWER_SUPPLY_ATTR(scope),
+	POWER_SUPPLY_ATTR(precharge_current),
 	POWER_SUPPLY_ATTR(charge_term_current),
 	POWER_SUPPLY_ATTR(calibrate),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index e3a114e..f705945 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -171,6 +171,7 @@ struct sbs_info {
 	u32				i2c_retry_count;
 	u32				poll_retry_count;
 	struct delayed_work		work;
+	struct mutex			mode_lock;
 };
 
 static char model_name[I2C_SMBUS_BLOCK_MAX + 1];
@@ -199,7 +200,7 @@ static int sbs_read_word_data(struct i2c_client *client, u8 address)
 		return ret;
 	}
 
-	return le16_to_cpu(ret);
+	return ret;
 }
 
 static int sbs_read_string_data(struct i2c_client *client, u8 address,
@@ -265,7 +266,7 @@ static int sbs_read_string_data(struct i2c_client *client, u8 address,
 	memcpy(values, block_buffer + 1, block_length);
 	values[block_length] = '\0';
 
-	return le16_to_cpu(ret);
+	return ret;
 }
 
 static int sbs_write_word_data(struct i2c_client *client, u8 address,
@@ -278,8 +279,7 @@ static int sbs_write_word_data(struct i2c_client *client, u8 address,
 	retries = chip->i2c_retry_count;
 
 	while (retries > 0) {
-		ret = i2c_smbus_write_word_data(client, address,
-			le16_to_cpu(value));
+		ret = i2c_smbus_write_word_data(client, address, value);
 		if (ret >= 0)
 			break;
 		retries--;
@@ -438,6 +438,11 @@ static int sbs_get_battery_property(struct i2c_client *client,
 	} else {
 		if (psp == POWER_SUPPLY_PROP_STATUS)
 			val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+		else if (psp == POWER_SUPPLY_PROP_CAPACITY)
+			/* sbs spec says that this can be >100 %
+			 * even if max value is 100 %
+			 */
+			val->intval = min(ret, 100);
 		else
 			val->intval = 0;
 	}
@@ -548,12 +553,7 @@ static int sbs_get_battery_capacity(struct i2c_client *client,
 	if (ret < 0)
 		return ret;
 
-	if (psp == POWER_SUPPLY_PROP_CAPACITY) {
-		/* sbs spec says that this can be >100 %
-		* even if max value is 100 % */
-		val->intval = min(ret, 100);
-	} else
-		val->intval = ret;
+	val->intval = ret;
 
 	ret = sbs_set_battery_mode(client, mode);
 	if (ret < 0)
@@ -618,12 +618,17 @@ static int sbs_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_CHARGE_NOW:
 	case POWER_SUPPLY_PROP_CHARGE_FULL:
 	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
-	case POWER_SUPPLY_PROP_CAPACITY:
 		ret = sbs_get_property_index(client, psp);
 		if (ret < 0)
 			break;
 
+		/* sbs_get_battery_capacity() will change the battery mode
+		 * temporarily to read the requested attribute. Ensure we stay
+		 * in the desired mode for the duration of the attribute read.
+		 */
+		mutex_lock(&chip->mode_lock);
 		ret = sbs_get_battery_capacity(client, ret, psp, val);
+		mutex_unlock(&chip->mode_lock);
 		break;
 
 	case POWER_SUPPLY_PROP_SERIAL_NUMBER:
@@ -640,6 +645,7 @@ static int sbs_get_property(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+	case POWER_SUPPLY_PROP_CAPACITY:
 		ret = sbs_get_property_index(client, psp);
 		if (ret < 0)
 			break;
@@ -808,6 +814,7 @@ static int sbs_probe(struct i2c_client *client,
 	psy_cfg.of_node = client->dev.of_node;
 	psy_cfg.drv_data = chip;
 	chip->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
+	mutex_init(&chip->mode_lock);
 
 	/* use pdata if available, fall back to DT properties,
 	 * or hardcoded defaults if not
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index 2f82d0e..3de802f 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -153,7 +153,7 @@ struct twl4030_bci {
 };
 
 /* strings for 'usb_mode' values */
-static char *modes[] = { "off", "auto", "continuous" };
+static const char *modes[] = { "off", "auto", "continuous" };
 
 /*
  * clear and set bits on an given register on a given module
@@ -624,63 +624,6 @@ static irqreturn_t twl4030_bci_interrupt(int irq, void *arg)
 	return IRQ_HANDLED;
 }
 
-/*
- * Provide "max_current" attribute in sysfs.
- */
-static ssize_t
-twl4030_bci_max_current_store(struct device *dev, struct device_attribute *attr,
-	const char *buf, size_t n)
-{
-	struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
-	int cur = 0;
-	int status = 0;
-	status = kstrtoint(buf, 10, &cur);
-	if (status)
-		return status;
-	if (cur < 0)
-		return -EINVAL;
-	if (dev == &bci->ac->dev)
-		bci->ac_cur = cur;
-	else
-		bci->usb_cur_target = cur;
-
-	twl4030_charger_update_current(bci);
-	return n;
-}
-
-/*
- * sysfs max_current show
- */
-static ssize_t twl4030_bci_max_current_show(struct device *dev,
-	struct device_attribute *attr, char *buf)
-{
-	int status = 0;
-	int cur = -1;
-	u8 bcictl1;
-	struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
-
-	if (dev == &bci->ac->dev) {
-		if (!bci->ac_is_active)
-			cur = bci->ac_cur;
-	} else {
-		if (bci->ac_is_active)
-			cur = bci->usb_cur_target;
-	}
-	if (cur < 0) {
-		cur = twl4030bci_read_adc_val(TWL4030_BCIIREF1);
-		if (cur < 0)
-			return cur;
-		status = twl4030_bci_read(TWL4030_BCICTL1, &bcictl1);
-		if (status < 0)
-			return status;
-		cur = regval2ua(cur, bcictl1 & TWL4030_CGAIN);
-	}
-	return scnprintf(buf, PAGE_SIZE, "%u\n", cur);
-}
-
-static DEVICE_ATTR(max_current, 0644, twl4030_bci_max_current_show,
-			twl4030_bci_max_current_store);
-
 static void twl4030_bci_usb_work(struct work_struct *data)
 {
 	struct twl4030_bci *bci = container_of(data, struct twl4030_bci, work);
@@ -726,14 +669,10 @@ twl4030_bci_mode_store(struct device *dev, struct device_attribute *attr,
 	int mode;
 	int status;
 
-	if (sysfs_streq(buf, modes[0]))
-		mode = 0;
-	else if (sysfs_streq(buf, modes[1]))
-		mode = 1;
-	else if (sysfs_streq(buf, modes[2]))
-		mode = 2;
-	else
-		return -EINVAL;
+	mode = sysfs_match_string(modes, buf);
+	if (mode < 0)
+		return mode;
+
 	if (dev == &bci->ac->dev) {
 		if (mode == 2)
 			return -EINVAL;
@@ -1041,6 +980,12 @@ static int twl4030_bci_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, bci);
 
+	bci->channel_vac = devm_iio_channel_get(&pdev->dev, "vac");
+	if (IS_ERR(bci->channel_vac)) {
+		bci->channel_vac = NULL;
+		dev_warn(&pdev->dev, "could not request vac iio channel");
+	}
+
 	bci->ac = devm_power_supply_register(&pdev->dev, &twl4030_bci_ac_desc,
 					     NULL);
 	if (IS_ERR(bci->ac)) {
@@ -1074,12 +1019,6 @@ static int twl4030_bci_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	bci->channel_vac = iio_channel_get(&pdev->dev, "vac");
-	if (IS_ERR(bci->channel_vac)) {
-		bci->channel_vac = NULL;
-		dev_warn(&pdev->dev, "could not request vac iio channel");
-	}
-
 	INIT_WORK(&bci->work, twl4030_bci_usb_work);
 	INIT_DELAYED_WORK(&bci->current_worker, twl4030_current_worker);
 
@@ -1101,7 +1040,7 @@ static int twl4030_bci_probe(struct platform_device *pdev)
 			       TWL4030_INTERRUPTS_BCIIMR1A);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
-		goto fail;
+		return ret;
 	}
 
 	reg = ~(u32)(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
@@ -1111,14 +1050,10 @@ static int twl4030_bci_probe(struct platform_device *pdev)
 		dev_warn(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
 
 	twl4030_charger_update_current(bci);
-	if (device_create_file(&bci->usb->dev, &dev_attr_max_current))
-		dev_warn(&pdev->dev, "could not create sysfs file\n");
 	if (device_create_file(&bci->usb->dev, &dev_attr_mode))
 		dev_warn(&pdev->dev, "could not create sysfs file\n");
 	if (device_create_file(&bci->ac->dev, &dev_attr_mode))
 		dev_warn(&pdev->dev, "could not create sysfs file\n");
-	if (device_create_file(&bci->ac->dev, &dev_attr_max_current))
-		dev_warn(&pdev->dev, "could not create sysfs file\n");
 
 	twl4030_charger_enable_ac(bci, true);
 	if (!IS_ERR_OR_NULL(bci->transceiver))
@@ -1134,10 +1069,6 @@ static int twl4030_bci_probe(struct platform_device *pdev)
 		twl4030_charger_enable_backup(0, 0);
 
 	return 0;
-fail:
-	iio_channel_release(bci->channel_vac);
-
-	return ret;
 }
 
 static int twl4030_bci_remove(struct platform_device *pdev)
@@ -1148,11 +1079,7 @@ static int twl4030_bci_remove(struct platform_device *pdev)
 	twl4030_charger_enable_usb(bci, false);
 	twl4030_charger_enable_backup(0, 0);
 
-	iio_channel_release(bci->channel_vac);
-
-	device_remove_file(&bci->usb->dev, &dev_attr_max_current);
 	device_remove_file(&bci->usb->dev, &dev_attr_mode);
-	device_remove_file(&bci->ac->dev, &dev_attr_max_current);
 	device_remove_file(&bci->ac->dev, &dev_attr_mode);
 	/* mask interrupts */
 	twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 9ddad08..d1694f1 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -874,7 +874,9 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
 
 	cpu = rd->rp->lead_cpu;
 	bits = rapl_unit_xlate(rd, rp->unit, value, 1);
-	bits |= bits << rp->shift;
+	bits <<= rp->shift;
+	bits &= rp->mask;
+
 	memset(&ma, 0, sizeof(ma));
 
 	ma.msr_no = rd->msrs[rp->id];
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 564a51a..4b29a71 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -2,9 +2,7 @@
 # PPS support configuration
 #
 
-menu "PPS support"
-
-config PPS
+menuconfig PPS
 	tristate "PPS support"
 	---help---
 	  PPS (Pulse Per Second) is a special pulse provided by some GPS
@@ -20,10 +18,10 @@
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called pps_core.ko.
-if PPS
 
 config PPS_DEBUG
 	bool "PPS debugging messages"
+	depends on PPS
 	help
 	  Say Y here if you want the PPS support to produce a bunch of debug
 	  messages to the system log.  Select this if you are having a
@@ -31,17 +29,13 @@
 
 config NTP_PPS
 	bool "PPS kernel consumer support"
-	depends on !NO_HZ_COMMON
+	depends on PPS && !NO_HZ_COMMON
 	help
 	  This option adds support for direct in-kernel time
 	  synchronization using an external PPS signal.
 
 	  It doesn't work on tickless systems at the moment.
 
-endif
-
 source drivers/pps/clients/Kconfig
 
 source drivers/pps/generators/Kconfig
-
-endmenu
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 0c9f280..efec021 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -2,12 +2,12 @@
 # PPS clients configuration
 #
 
-if PPS
-
 comment "PPS clients support"
+	depends on PPS
 
 config PPS_CLIENT_KTIMER
 	tristate "Kernel timer client (Testing client, use for debug)"
+	depends on PPS
 	help
 	  If you say yes here you get support for a PPS debugging client
 	  which uses a kernel timer to generate the PPS signal.
@@ -37,5 +37,3 @@
 	  GPIO. To be useful you must also register a platform device
 	  specifying the GPIO pin and other options, usually in your board
 	  setup.
-
-endif
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index e4c4f3d..86b5937 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -3,10 +3,11 @@
 #
 
 comment "PPS generators support"
+	depends on PPS
 
 config PPS_GENERATOR_PARPORT
 	tristate "Parallel port PPS signal generator"
-	depends on PARPORT && BROKEN
+	depends on PPS && PARPORT && BROKEN
 	help
 	  If you say yes here you get support for a PPS signal generator which
 	  utilizes STROBE pin of a parallel port to send PPS signals. It uses
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 6aab46d..d0e5d6e 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -481,7 +481,7 @@ static int __init create_debugfs_nodes(void)
 
 	count = debugfs_create_file("count_threshold", S_IRUSR | S_IWUSR, d,
 				    &count_threshold, &count_threshold_ops);
-	if (!decay) {
+	if (!count) {
 		pr_warn("Error creating count_threshold debugfs node!\n");
 		goto err;
 	}
diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c
index 94f8038..ed4c343 100644
--- a/drivers/ras/ras.c
+++ b/drivers/ras/ras.c
@@ -29,7 +29,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(extlog_mem_event);
 EXPORT_TRACEPOINT_SYMBOL_GPL(mc_event);
 
 
-int __init parse_ras_param(char *str)
+static int __init parse_ras_param(char *str)
 {
 #ifdef CONFIG_RAS_CEC
 	parse_cec_param(str);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 48db87d..99b9362 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -214,11 +214,11 @@
 	  will be called da9055-regulator.
 
 config REGULATOR_DA9062
-	tristate "Dialog Semiconductor DA9062 regulators"
+	tristate "Dialog Semiconductor DA9061/62 regulators"
 	depends on MFD_DA9062
 	help
 	  Say y here to support the BUCKs and LDOs regulators found on
-	  DA9062 PMICs.
+	  DA9061 and DA9062 PMICs.
 
 	  This driver can also be built as a module. If so, the module
 	  will be called da9062-regulator.
@@ -296,6 +296,16 @@
 	  21 general purpose LDOs, 3 dedicated LDOs, and 5 BUCKs. All
 	  of them come with support to either ECO (idle) or sleep mode.
 
+config REGULATOR_HI6421V530
+	tristate "HiSilicon Hi6421v530 PMIC voltage regulator support"
+	depends on MFD_HI6421_PMIC && OF
+	help
+	  This driver provides support for the voltage regulators on
+	  HiSilicon Hi6421v530 PMU / Codec IC.
+	  Hi6421v530 is a multi-function device which, on regulator part,
+	  provides 5 general purpose LDOs, and all of them come with support
+	  to either ECO (idle) or sleep mode.
+
 config REGULATOR_HI655X
 	tristate "Hisilicon HI655X PMIC regulators support"
 	depends on ARCH_HISI || COMPILE_TEST
@@ -365,6 +375,14 @@
 	  chip contains six step-down DC/DC converters which can support
 	  9 mode multiphase configuration.
 
+config REGULATOR_LP87565
+	tristate "TI LP87565 Power regulators"
+	depends on MFD_TI_LP87565 && OF
+	help
+	  This driver supports LP87565 voltage regulator chips. LP87565
+	  provides four step-down converters. It supports software based
+	  voltage control for different voltage domains
+
 config REGULATOR_LP8788
 	tristate "TI LP8788 Power Regulators"
 	depends on MFD_LP8788
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index dc3503f..95b1e86 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -38,6 +38,7 @@
 obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
 obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
 obj-$(CONFIG_REGULATOR_HI6421) += hi6421-regulator.o
+obj-$(CONFIG_REGULATOR_HI6421V530) += hi6421v530-regulator.o
 obj-$(CONFIG_REGULATOR_HI655X) += hi655x-regulator.o
 obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
 obj-$(CONFIG_REGULATOR_ISL9305) += isl9305.o
@@ -46,6 +47,7 @@
 obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
 obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
 obj-$(CONFIG_REGULATOR_LP873X) += lp873x-regulator.o
+obj-$(CONFIG_REGULATOR_LP87565) += lp87565-regulator.o
 obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
 obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
 obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 0b9d4e3..e2608fe 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -244,6 +244,82 @@ static const struct regulator_desc axp22x_drivevbus_regulator = {
 	.ops		= &axp20x_ops_sw,
 };
 
+static const struct regulator_linear_range axp803_dcdc234_ranges[] = {
+	REGULATOR_LINEAR_RANGE(500000, 0x0, 0x46, 10000),
+	REGULATOR_LINEAR_RANGE(1220000, 0x47, 0x4b, 20000),
+};
+
+static const struct regulator_linear_range axp803_dcdc5_ranges[] = {
+	REGULATOR_LINEAR_RANGE(800000, 0x0, 0x20, 10000),
+	REGULATOR_LINEAR_RANGE(1140000, 0x21, 0x44, 20000),
+};
+
+static const struct regulator_linear_range axp803_dcdc6_ranges[] = {
+	REGULATOR_LINEAR_RANGE(600000, 0x0, 0x32, 10000),
+	REGULATOR_LINEAR_RANGE(1120000, 0x33, 0x47, 20000),
+};
+
+/* AXP806's CLDO2 and AXP809's DLDO1 shares the same range */
+static const struct regulator_linear_range axp803_dldo2_ranges[] = {
+	REGULATOR_LINEAR_RANGE(700000, 0x0, 0x1a, 100000),
+	REGULATOR_LINEAR_RANGE(3400000, 0x1b, 0x1f, 200000),
+};
+
+static const struct regulator_desc axp803_regulators[] = {
+	AXP_DESC(AXP803, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
+		 AXP803_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(0)),
+	AXP_DESC_RANGES(AXP803, DCDC2, "dcdc2", "vin2", axp803_dcdc234_ranges,
+			76, AXP803_DCDC2_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+			BIT(1)),
+	AXP_DESC_RANGES(AXP803, DCDC3, "dcdc3", "vin3", axp803_dcdc234_ranges,
+			76, AXP803_DCDC3_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+			BIT(2)),
+	AXP_DESC_RANGES(AXP803, DCDC4, "dcdc4", "vin4", axp803_dcdc234_ranges,
+			76, AXP803_DCDC4_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+			BIT(3)),
+	AXP_DESC_RANGES(AXP803, DCDC5, "dcdc5", "vin5", axp803_dcdc5_ranges,
+			68, AXP803_DCDC5_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+			BIT(4)),
+	AXP_DESC_RANGES(AXP803, DCDC6, "dcdc6", "vin6", axp803_dcdc6_ranges,
+			72, AXP803_DCDC6_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+			BIT(5)),
+	/* secondary switchable output of DCDC1 */
+	AXP_DESC_SW(AXP803, DC1SW, "dc1sw", NULL, AXP22X_PWR_OUT_CTRL2,
+		    BIT(7)),
+	AXP_DESC(AXP803, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+		 AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(5)),
+	AXP_DESC(AXP803, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
+		 AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(6)),
+	AXP_DESC(AXP803, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
+		 AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(7)),
+	AXP_DESC(AXP803, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
+		 AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(3)),
+	AXP_DESC_RANGES(AXP803, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges,
+			32, AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
+			BIT(4)),
+	AXP_DESC(AXP803, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
+		 AXP22X_DLDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
+	AXP_DESC(AXP803, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
+		 AXP22X_DLDO4_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(6)),
+	AXP_DESC(AXP803, ELDO1, "eldo1", "eldoin", 700, 1900, 50,
+		 AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+	AXP_DESC(AXP803, ELDO2, "eldo2", "eldoin", 700, 1900, 50,
+		 AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+	AXP_DESC(AXP803, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
+		 AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+	AXP_DESC(AXP803, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
+		 AXP803_FLDO1_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(2)),
+	AXP_DESC(AXP803, FLDO2, "fldo2", "fldoin", 700, 1450, 50,
+		 AXP803_FLDO2_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(3)),
+	AXP_DESC_IO(AXP803, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100,
+		    AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+	AXP_DESC_IO(AXP803, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100,
+		    AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+	AXP_DESC_FIXED(AXP803, RTC_LDO, "rtc-ldo", "ips", 3000),
+};
+
 static const struct regulator_linear_range axp806_dcdca_ranges[] = {
 	REGULATOR_LINEAR_RANGE(600000, 0x0, 0x32, 10000),
 	REGULATOR_LINEAR_RANGE(1120000, 0x33, 0x47, 20000),
@@ -254,11 +330,6 @@ static const struct regulator_linear_range axp806_dcdcd_ranges[] = {
 	REGULATOR_LINEAR_RANGE(1600000, 0x2e, 0x3f, 100000),
 };
 
-static const struct regulator_linear_range axp806_cldo2_ranges[] = {
-	REGULATOR_LINEAR_RANGE(700000, 0x0, 0x1a, 100000),
-	REGULATOR_LINEAR_RANGE(3400000, 0x1b, 0x1f, 200000),
-};
-
 static const struct regulator_desc axp806_regulators[] = {
 	AXP_DESC_RANGES(AXP806, DCDCA, "dcdca", "vina", axp806_dcdca_ranges,
 			72, AXP806_DCDCA_V_CTRL, 0x7f, AXP806_PWR_OUT_CTRL1,
@@ -289,7 +360,7 @@ static const struct regulator_desc axp806_regulators[] = {
 		 AXP806_BLDO4_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(3)),
 	AXP_DESC(AXP806, CLDO1, "cldo1", "cldoin", 700, 3300, 100,
 		 AXP806_CLDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2, BIT(4)),
-	AXP_DESC_RANGES(AXP806, CLDO2, "cldo2", "cldoin", axp806_cldo2_ranges,
+	AXP_DESC_RANGES(AXP806, CLDO2, "cldo2", "cldoin", axp803_dldo2_ranges,
 			32, AXP806_CLDO2_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2,
 			BIT(5)),
 	AXP_DESC(AXP806, CLDO3, "cldo3", "cldoin", 700, 3300, 100,
@@ -326,7 +397,7 @@ static const struct regulator_desc axp809_regulators[] = {
 		 AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(7)),
 	AXP_DESC(AXP809, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
 		 AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
-	AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin", axp806_cldo2_ranges,
+	AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin", axp803_dldo2_ranges,
 			32, AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
 			BIT(3)),
 	AXP_DESC(AXP809, DLDO2, "dldo2", "dldoin", 700, 3300, 100,
@@ -369,14 +440,21 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
 		def = 1500;
 		step = 75;
 		break;
-	case AXP806_ID:
+	case AXP803_ID:
 		/*
-		 * AXP806 DCDC work frequency setting has the same range and
+		 * AXP803 DCDC work frequency setting has the same range and
 		 * step as AXP22X, but at a different register.
 		 * Fall through to the check below.
 		 * (See include/linux/mfd/axp20x.h)
 		 */
-		reg = AXP806_DCDC_FREQ_CTRL;
+		reg = AXP803_DCDC_FREQ_CTRL;
+	case AXP806_ID:
+		/*
+		 * AXP806 also have DCDC work frequency setting register at a
+		 * different position.
+		 */
+		if (axp20x->variant == AXP806_ID)
+			reg = AXP806_DCDC_FREQ_CTRL;
 	case AXP221_ID:
 	case AXP223_ID:
 	case AXP809_ID:
@@ -475,6 +553,14 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
 		workmode <<= id - AXP22X_DCDC1;
 		break;
 
+	case AXP803_ID:
+		if (id < AXP803_DCDC1 || id > AXP803_DCDC6)
+			return -EINVAL;
+
+		mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP803_DCDC1);
+		workmode <<= id - AXP803_DCDC1;
+		break;
+
 	default:
 		/* should not happen */
 		WARN_ON(1);
@@ -492,20 +578,38 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
 {
 	u32 reg = 0;
 
-	/* Only AXP806 has poly-phase outputs */
-	if (axp20x->variant != AXP806_ID)
+	/*
+	 * Currently in our supported AXP variants, only AXP803 and AXP806
+	 * have polyphase regulators.
+	 */
+	switch (axp20x->variant) {
+	case AXP803_ID:
+		regmap_read(axp20x->regmap, AXP803_POLYPHASE_CTRL, &reg);
+
+		switch (id) {
+		case AXP803_DCDC3:
+			return !!(reg & BIT(6));
+		case AXP803_DCDC6:
+			return !!(reg & BIT(7));
+		}
+		break;
+
+	case AXP806_ID:
+		regmap_read(axp20x->regmap, AXP806_DCDC_MODE_CTRL2, &reg);
+
+		switch (id) {
+		case AXP806_DCDCB:
+			return (((reg & GENMASK(7, 6)) == BIT(6)) ||
+				((reg & GENMASK(7, 6)) == BIT(7)));
+		case AXP806_DCDCC:
+			return ((reg & GENMASK(7, 6)) == BIT(7));
+		case AXP806_DCDCE:
+			return !!(reg & BIT(5));
+		}
+		break;
+
+	default:
 		return false;
-
-	regmap_read(axp20x->regmap, AXP806_DCDC_MODE_CTRL2, &reg);
-
-	switch (id) {
-	case AXP806_DCDCB:
-		return (((reg & GENMASK(7, 6)) == BIT(6)) ||
-			((reg & GENMASK(7, 6)) == BIT(7)));
-	case AXP806_DCDCC:
-		return ((reg & GENMASK(7, 6)) == BIT(7));
-	case AXP806_DCDCE:
-		return !!(reg & BIT(5));
 	}
 
 	return false;
@@ -540,6 +644,10 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
 		drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
 						  "x-powers,drive-vbus-en");
 		break;
+	case AXP803_ID:
+		regulators = axp803_regulators;
+		nregulators = AXP803_REG_ID_MAX;
+		break;
 	case AXP806_ID:
 		regulators = axp806_regulators;
 		nregulators = AXP806_REG_ID_MAX;
@@ -579,6 +687,7 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
 		 * name.
 		 */
 		if ((regulators == axp22x_regulators && i == AXP22X_DC1SW) ||
+		    (regulators == axp803_regulators && i == AXP803_DC1SW) ||
 		    (regulators == axp809_regulators && i == AXP809_DC1SW)) {
 			new_desc = devm_kzalloc(&pdev->dev, sizeof(*desc),
 						GFP_KERNEL);
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index 8ba206f..c67a83d5 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -43,7 +43,7 @@ enum bd9571mwv_regulators { VD09, VD18, VD25, VD33, DVFS };
 		.linear_min_sel		= _lmin,		\
 	}
 
-int bd9571mwv_avs_get_moni_state(struct regulator_dev *rdev)
+static int bd9571mwv_avs_get_moni_state(struct regulator_dev *rdev)
 {
 	unsigned int val;
 	int ret;
@@ -55,8 +55,8 @@ int bd9571mwv_avs_get_moni_state(struct regulator_dev *rdev)
 	return val & BD9571MWV_AVS_SET_MONI_MASK;
 }
 
-int bd9571mwv_avs_set_voltage_sel_regmap(struct regulator_dev *rdev,
-					 unsigned int sel)
+static int bd9571mwv_avs_set_voltage_sel_regmap(struct regulator_dev *rdev,
+						unsigned int sel)
 {
 	int ret;
 
@@ -68,7 +68,7 @@ int bd9571mwv_avs_set_voltage_sel_regmap(struct regulator_dev *rdev,
 				 rdev->desc->vsel_mask, sel);
 }
 
-int bd9571mwv_avs_get_voltage_sel_regmap(struct regulator_dev *rdev)
+static int bd9571mwv_avs_get_voltage_sel_regmap(struct regulator_dev *rdev)
 {
 	unsigned int val;
 	int ret;
@@ -87,8 +87,8 @@ int bd9571mwv_avs_get_voltage_sel_regmap(struct regulator_dev *rdev)
 	return val;
 }
 
-int bd9571mwv_reg_set_voltage_sel_regmap(struct regulator_dev *rdev,
-					 unsigned int sel)
+static int bd9571mwv_reg_set_voltage_sel_regmap(struct regulator_dev *rdev,
+						unsigned int sel)
 {
 	return regmap_write_bits(rdev->regmap, BD9571MWV_DVFS_SETVID,
 				 rdev->desc->vsel_mask, sel);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c0d9ae8..e567fa5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1462,7 +1462,7 @@ static struct regulator_dev *regulator_lookup_by_name(const char *name)
 static struct regulator_dev *regulator_dev_lookup(struct device *dev,
 						  const char *supply)
 {
-	struct regulator_dev *r;
+	struct regulator_dev *r = NULL;
 	struct device_node *node;
 	struct regulator_map *map;
 	const char *devname = NULL;
@@ -1489,10 +1489,6 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
 	if (dev)
 		devname = dev_name(dev);
 
-	r = regulator_lookup_by_name(supply);
-	if (r)
-		return r;
-
 	mutex_lock(&regulator_list_mutex);
 	list_for_each_entry(map, &regulator_map_list, list) {
 		/* If the mapping has a device set up it must match */
@@ -1511,6 +1507,10 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
 	if (r)
 		return r;
 
+	r = regulator_lookup_by_name(supply);
+	if (r)
+		return r;
+
 	return ERR_PTR(-ENODEV);
 }
 
@@ -2767,6 +2767,12 @@ static int _regulator_set_voltage_time(struct regulator_dev *rdev,
 		ramp_delay = rdev->desc->ramp_delay;
 	else if (rdev->constraints->settling_time)
 		return rdev->constraints->settling_time;
+	else if (rdev->constraints->settling_time_up &&
+		 (new_uV > old_uV))
+		return rdev->constraints->settling_time_up;
+	else if (rdev->constraints->settling_time_down &&
+		 (new_uV < old_uV))
+		return rdev->constraints->settling_time_down;
 
 	if (ramp_delay == 0) {
 		rdev_dbg(rdev, "ramp_delay not set\n");
@@ -2938,7 +2944,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
 	if (rdev->supply &&
 	    regulator_ops_is_valid(rdev->supply->rdev,
 				   REGULATOR_CHANGE_VOLTAGE) &&
-	    (rdev->desc->min_dropout_uV || !rdev->desc->ops->get_voltage)) {
+	    (rdev->desc->min_dropout_uV || !(rdev->desc->ops->get_voltage ||
+					   rdev->desc->ops->get_voltage_sel))) {
 		int current_supply_uV;
 		int selector;
 
@@ -4311,41 +4318,31 @@ void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data)
 EXPORT_SYMBOL_GPL(regulator_get_init_drvdata);
 
 #ifdef CONFIG_DEBUG_FS
-static ssize_t supply_map_read_file(struct file *file, char __user *user_buf,
-				    size_t count, loff_t *ppos)
+static int supply_map_show(struct seq_file *sf, void *data)
 {
-	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-	ssize_t len, ret = 0;
 	struct regulator_map *map;
 
-	if (!buf)
-		return -ENOMEM;
-
 	list_for_each_entry(map, &regulator_map_list, list) {
-		len = snprintf(buf + ret, PAGE_SIZE - ret,
-			       "%s -> %s.%s\n",
-			       rdev_get_name(map->regulator), map->dev_name,
-			       map->supply);
-		if (len >= 0)
-			ret += len;
-		if (ret > PAGE_SIZE) {
-			ret = PAGE_SIZE;
-			break;
-		}
+		seq_printf(sf, "%s -> %s.%s\n",
+				rdev_get_name(map->regulator), map->dev_name,
+				map->supply);
 	}
 
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+	return 0;
+}
 
-	kfree(buf);
-
-	return ret;
+static int supply_map_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, supply_map_show, inode->i_private);
 }
 #endif
 
 static const struct file_operations supply_map_fops = {
 #ifdef CONFIG_DEBUG_FS
-	.read = supply_map_read_file,
-	.llseek = default_llseek,
+	.open = supply_map_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
 #endif
 };
 
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index 0638c8b..34a70d9 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -1,6 +1,6 @@
 /*
- * da9062-regulator.c - REGULATOR device driver for DA9062
- * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ * Regulator device driver for DA9061 and DA9062.
+ * Copyright (C) 2015-2017  Dialog Semiconductor
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -28,6 +28,17 @@
 
 /* Regulator IDs */
 enum {
+	DA9061_ID_BUCK1,
+	DA9061_ID_BUCK2,
+	DA9061_ID_BUCK3,
+	DA9061_ID_LDO1,
+	DA9061_ID_LDO2,
+	DA9061_ID_LDO3,
+	DA9061_ID_LDO4,
+	DA9061_MAX_REGULATORS,
+};
+
+enum {
 	DA9062_ID_BUCK1,
 	DA9062_ID_BUCK2,
 	DA9062_ID_BUCK3,
@@ -88,15 +99,21 @@ enum {
 
 /* Regulator operations */
 
-/* Current limits array (in uA) BUCK1 and BUCK3.
-   Entry indexes corresponds to register values. */
+/* Current limits array (in uA)
+ * - DA9061_ID_[BUCK1|BUCK3]
+ * - DA9062_ID_[BUCK1|BUCK2|BUCK4]
+ * Entry indexes corresponds to register values.
+ */
 static const int da9062_buck_a_limits[] = {
 	 500000,  600000,  700000,  800000,  900000, 1000000, 1100000, 1200000,
 	1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000
 };
 
-/* Current limits array (in uA) for BUCK2.
-   Entry indexes corresponds to register values. */
+/* Current limits array (in uA)
+ * - DA9061_ID_BUCK2
+ * - DA9062_ID_BUCK3
+ * Entry indexes corresponds to register values.
+ */
 static const int da9062_buck_b_limits[] = {
 	1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
 	2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
@@ -405,8 +422,254 @@ static const struct regulator_ops da9062_ldo_ops = {
 	.set_suspend_mode	= da9062_ldo_set_suspend_mode,
 };
 
-/* Regulator information */
-static const struct da9062_regulator_info local_regulator_info[] = {
+/* DA9061 Regulator information */
+static const struct da9062_regulator_info local_da9061_regulator_info[] = {
+	{
+		.desc.id = DA9061_ID_BUCK1,
+		.desc.name = "DA9061 BUCK1",
+		.desc.of_match = of_match_ptr("buck1"),
+		.desc.regulators_node = of_match_ptr("regulators"),
+		.desc.ops = &da9062_buck_ops,
+		.desc.min_uV = (300) * 1000,
+		.desc.uV_step = (10) * 1000,
+		.desc.n_voltages = ((1570) - (300))/(10) + 1,
+		.current_limits = da9062_buck_a_limits,
+		.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+		.desc.enable_reg = DA9062AA_BUCK1_CONT,
+		.desc.enable_mask = DA9062AA_BUCK1_EN_MASK,
+		.desc.vsel_reg = DA9062AA_VBUCK1_A,
+		.desc.vsel_mask = DA9062AA_VBUCK1_A_MASK,
+		.desc.linear_min_sel = 0,
+		.sleep = REG_FIELD(DA9062AA_VBUCK1_A,
+			__builtin_ffs((int)DA9062AA_BUCK1_SL_A_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK1_SL_A_MASK)) - 1),
+		.suspend_sleep = REG_FIELD(DA9062AA_VBUCK1_B,
+			__builtin_ffs((int)DA9062AA_BUCK1_SL_B_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK1_SL_B_MASK)) - 1),
+		.suspend_vsel_reg = DA9062AA_VBUCK1_B,
+		.mode = REG_FIELD(DA9062AA_BUCK1_CFG,
+			__builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
+		.suspend = REG_FIELD(DA9062AA_DVC_1,
+			__builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+		.ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_C,
+			__builtin_ffs((int)DA9062AA_BUCK1_ILIM_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK1_ILIM_MASK)) - 1),
+	},
+	{
+		.desc.id = DA9061_ID_BUCK2,
+		.desc.name = "DA9061 BUCK2",
+		.desc.of_match = of_match_ptr("buck2"),
+		.desc.regulators_node = of_match_ptr("regulators"),
+		.desc.ops = &da9062_buck_ops,
+		.desc.min_uV = (800) * 1000,
+		.desc.uV_step = (20) * 1000,
+		.desc.n_voltages = ((3340) - (800))/(20) + 1,
+		.current_limits = da9062_buck_b_limits,
+		.n_current_limits = ARRAY_SIZE(da9062_buck_b_limits),
+		.desc.enable_reg = DA9062AA_BUCK3_CONT,
+		.desc.enable_mask = DA9062AA_BUCK3_EN_MASK,
+		.desc.vsel_reg = DA9062AA_VBUCK3_A,
+		.desc.vsel_mask = DA9062AA_VBUCK3_A_MASK,
+		.desc.linear_min_sel = 0,
+		.sleep = REG_FIELD(DA9062AA_VBUCK3_A,
+			__builtin_ffs((int)DA9062AA_BUCK3_SL_A_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK3_SL_A_MASK)) - 1),
+		.suspend_sleep = REG_FIELD(DA9062AA_VBUCK3_B,
+			__builtin_ffs((int)DA9062AA_BUCK3_SL_B_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK3_SL_B_MASK)) - 1),
+		.suspend_vsel_reg = DA9062AA_VBUCK3_B,
+		.mode = REG_FIELD(DA9062AA_BUCK3_CFG,
+			__builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
+		.suspend = REG_FIELD(DA9062AA_DVC_1,
+			__builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+		.ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_A,
+			__builtin_ffs((int)DA9062AA_BUCK3_ILIM_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK3_ILIM_MASK)) - 1),
+	},
+	{
+		.desc.id = DA9061_ID_BUCK3,
+		.desc.name = "DA9061 BUCK3",
+		.desc.of_match = of_match_ptr("buck3"),
+		.desc.regulators_node = of_match_ptr("regulators"),
+		.desc.ops = &da9062_buck_ops,
+		.desc.min_uV = (530) * 1000,
+		.desc.uV_step = (10) * 1000,
+		.desc.n_voltages = ((1800) - (530))/(10) + 1,
+		.current_limits = da9062_buck_a_limits,
+		.n_current_limits = ARRAY_SIZE(da9062_buck_a_limits),
+		.desc.enable_reg = DA9062AA_BUCK4_CONT,
+		.desc.enable_mask = DA9062AA_BUCK4_EN_MASK,
+		.desc.vsel_reg = DA9062AA_VBUCK4_A,
+		.desc.vsel_mask = DA9062AA_VBUCK4_A_MASK,
+		.desc.linear_min_sel = 0,
+		.sleep = REG_FIELD(DA9062AA_VBUCK4_A,
+			__builtin_ffs((int)DA9062AA_BUCK4_SL_A_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK4_SL_A_MASK)) - 1),
+		.suspend_sleep = REG_FIELD(DA9062AA_VBUCK4_B,
+			__builtin_ffs((int)DA9062AA_BUCK4_SL_B_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK4_SL_B_MASK)) - 1),
+		.suspend_vsel_reg = DA9062AA_VBUCK4_B,
+		.mode = REG_FIELD(DA9062AA_BUCK4_CFG,
+			__builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
+		.suspend = REG_FIELD(DA9062AA_DVC_1,
+			__builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+		.ilimit = REG_FIELD(DA9062AA_BUCK_ILIM_B,
+			__builtin_ffs((int)DA9062AA_BUCK4_ILIM_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_BUCK4_ILIM_MASK)) - 1),
+	},
+	{
+		.desc.id = DA9061_ID_LDO1,
+		.desc.name = "DA9061 LDO1",
+		.desc.of_match = of_match_ptr("ldo1"),
+		.desc.regulators_node = of_match_ptr("regulators"),
+		.desc.ops = &da9062_ldo_ops,
+		.desc.min_uV = (900) * 1000,
+		.desc.uV_step = (50) * 1000,
+		.desc.n_voltages = ((3600) - (900))/(50) + 1,
+		.desc.enable_reg = DA9062AA_LDO1_CONT,
+		.desc.enable_mask = DA9062AA_LDO1_EN_MASK,
+		.desc.vsel_reg = DA9062AA_VLDO1_A,
+		.desc.vsel_mask = DA9062AA_VLDO1_A_MASK,
+		.desc.linear_min_sel = 0,
+		.sleep = REG_FIELD(DA9062AA_VLDO1_A,
+			__builtin_ffs((int)DA9062AA_LDO1_SL_A_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO1_SL_A_MASK)) - 1),
+		.suspend_sleep = REG_FIELD(DA9062AA_VLDO1_B,
+			__builtin_ffs((int)DA9062AA_LDO1_SL_B_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
+		.suspend_vsel_reg = DA9062AA_VLDO1_B,
+		.suspend = REG_FIELD(DA9062AA_DVC_1,
+			__builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+		.oc_event = REG_FIELD(DA9062AA_STATUS_D,
+			__builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO1_ILIM_MASK)) - 1),
+	},
+	{
+		.desc.id = DA9061_ID_LDO2,
+		.desc.name = "DA9061 LDO2",
+		.desc.of_match = of_match_ptr("ldo2"),
+		.desc.regulators_node = of_match_ptr("regulators"),
+		.desc.ops = &da9062_ldo_ops,
+		.desc.min_uV = (900) * 1000,
+		.desc.uV_step = (50) * 1000,
+		.desc.n_voltages = ((3600) - (600))/(50) + 1,
+		.desc.enable_reg = DA9062AA_LDO2_CONT,
+		.desc.enable_mask = DA9062AA_LDO2_EN_MASK,
+		.desc.vsel_reg = DA9062AA_VLDO2_A,
+		.desc.vsel_mask = DA9062AA_VLDO2_A_MASK,
+		.desc.linear_min_sel = 0,
+		.sleep = REG_FIELD(DA9062AA_VLDO2_A,
+			__builtin_ffs((int)DA9062AA_LDO2_SL_A_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO2_SL_A_MASK)) - 1),
+		.suspend_sleep = REG_FIELD(DA9062AA_VLDO2_B,
+			__builtin_ffs((int)DA9062AA_LDO2_SL_B_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
+		.suspend_vsel_reg = DA9062AA_VLDO2_B,
+		.suspend = REG_FIELD(DA9062AA_DVC_1,
+			__builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+		.oc_event = REG_FIELD(DA9062AA_STATUS_D,
+			__builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO2_ILIM_MASK)) - 1),
+	},
+	{
+		.desc.id = DA9061_ID_LDO3,
+		.desc.name = "DA9061 LDO3",
+		.desc.of_match = of_match_ptr("ldo3"),
+		.desc.regulators_node = of_match_ptr("regulators"),
+		.desc.ops = &da9062_ldo_ops,
+		.desc.min_uV = (900) * 1000,
+		.desc.uV_step = (50) * 1000,
+		.desc.n_voltages = ((3600) - (900))/(50) + 1,
+		.desc.enable_reg = DA9062AA_LDO3_CONT,
+		.desc.enable_mask = DA9062AA_LDO3_EN_MASK,
+		.desc.vsel_reg = DA9062AA_VLDO3_A,
+		.desc.vsel_mask = DA9062AA_VLDO3_A_MASK,
+		.desc.linear_min_sel = 0,
+		.sleep = REG_FIELD(DA9062AA_VLDO3_A,
+			__builtin_ffs((int)DA9062AA_LDO3_SL_A_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO3_SL_A_MASK)) - 1),
+		.suspend_sleep = REG_FIELD(DA9062AA_VLDO3_B,
+			__builtin_ffs((int)DA9062AA_LDO3_SL_B_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
+		.suspend_vsel_reg = DA9062AA_VLDO3_B,
+		.suspend = REG_FIELD(DA9062AA_DVC_1,
+			__builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+		.oc_event = REG_FIELD(DA9062AA_STATUS_D,
+			__builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO3_ILIM_MASK)) - 1),
+	},
+	{
+		.desc.id = DA9061_ID_LDO4,
+		.desc.name = "DA9061 LDO4",
+		.desc.of_match = of_match_ptr("ldo4"),
+		.desc.regulators_node = of_match_ptr("regulators"),
+		.desc.ops = &da9062_ldo_ops,
+		.desc.min_uV = (900) * 1000,
+		.desc.uV_step = (50) * 1000,
+		.desc.n_voltages = ((3600) - (900))/(50) + 1,
+		.desc.enable_reg = DA9062AA_LDO4_CONT,
+		.desc.enable_mask = DA9062AA_LDO4_EN_MASK,
+		.desc.vsel_reg = DA9062AA_VLDO4_A,
+		.desc.vsel_mask = DA9062AA_VLDO4_A_MASK,
+		.desc.linear_min_sel = 0,
+		.sleep = REG_FIELD(DA9062AA_VLDO4_A,
+			__builtin_ffs((int)DA9062AA_LDO4_SL_A_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO4_SL_A_MASK)) - 1),
+		.suspend_sleep = REG_FIELD(DA9062AA_VLDO4_B,
+			__builtin_ffs((int)DA9062AA_LDO4_SL_B_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
+		.suspend_vsel_reg = DA9062AA_VLDO4_B,
+		.suspend = REG_FIELD(DA9062AA_DVC_1,
+			__builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+		.oc_event = REG_FIELD(DA9062AA_STATUS_D,
+			__builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
+			sizeof(unsigned int) * 8 -
+			__builtin_clz((DA9062AA_LDO4_ILIM_MASK)) - 1),
+	},
+};
+
+/* DA9062 Regulator information */
+static const struct da9062_regulator_info local_da9062_regulator_info[] = {
 	{
 		.desc.id = DA9062_ID_BUCK1,
 		.desc.name = "DA9062 BUCK1",
@@ -727,17 +990,33 @@ static int da9062_regulator_probe(struct platform_device *pdev)
 	struct da9062_regulators *regulators;
 	struct da9062_regulator *regl;
 	struct regulator_config config = { };
+	const struct da9062_regulator_info *rinfo;
 	int irq, n, ret;
 	size_t size;
+	int max_regulators;
+
+	switch (chip->chip_type) {
+	case COMPAT_TYPE_DA9061:
+		max_regulators = DA9061_MAX_REGULATORS;
+		rinfo = local_da9061_regulator_info;
+		break;
+	case COMPAT_TYPE_DA9062:
+		max_regulators = DA9062_MAX_REGULATORS;
+		rinfo = local_da9062_regulator_info;
+		break;
+	default:
+		dev_err(chip->dev, "Unrecognised chip type\n");
+		return -ENODEV;
+	}
 
 	/* Allocate memory required by usable regulators */
 	size = sizeof(struct da9062_regulators) +
-		DA9062_MAX_REGULATORS * sizeof(struct da9062_regulator);
+		max_regulators * sizeof(struct da9062_regulator);
 	regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
 	if (!regulators)
 		return -ENOMEM;
 
-	regulators->n_regulators = DA9062_MAX_REGULATORS;
+	regulators->n_regulators = max_regulators;
 	platform_set_drvdata(pdev, regulators);
 
 	n = 0;
@@ -745,7 +1024,7 @@ static int da9062_regulator_probe(struct platform_device *pdev)
 		/* Initialise regulator structure */
 		regl = &regulators->regulator[n];
 		regl->hw = chip;
-		regl->info = &local_regulator_info[n];
+		regl->info = &rinfo[n];
 		regl->desc = regl->info->desc;
 		regl->desc.type = REGULATOR_VOLTAGE;
 		regl->desc.owner = THIS_MODULE;
@@ -836,6 +1115,6 @@ module_exit(da9062_regulator_cleanup);
 
 /* Module information */
 MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
-MODULE_DESCRIPTION("REGULATOR device driver for Dialog DA9062");
+MODULE_DESCRIPTION("REGULATOR device driver for Dialog DA9062 and DA9061");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:da9062-regulators");
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 62c5f54..259c3a8 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -621,7 +621,14 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
 	return 0;
 }
 
+static const struct platform_device_id hi6421_regulator_table[] = {
+	{ .name = "hi6421-regulator" },
+	{},
+};
+MODULE_DEVICE_TABLE(platform, hi6421_regulator_table);
+
 static struct platform_driver hi6421_regulator_driver = {
+	.id_table = hi6421_regulator_table,
 	.driver = {
 		.name	= "hi6421-regulator",
 	},
diff --git a/drivers/regulator/hi6421v530-regulator.c b/drivers/regulator/hi6421v530-regulator.c
new file mode 100644
index 0000000..c09bc71
--- /dev/null
+++ b/drivers/regulator/hi6421v530-regulator.c
@@ -0,0 +1,214 @@
+/*
+ * Device driver for regulators in Hi6421V530 IC
+ *
+ * Copyright (c) <2017> HiSilicon Technologies Co., Ltd.
+ *              http://www.hisilicon.com
+ * Copyright (c) <2017> Linaro Ltd.
+ *              http://www.linaro.org
+ *
+ * Author: Wang Xiaoyin <hw.wangxiaoyin@hisilicon.com>
+ *         Guodong Xu <guodong.xu@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/hi6421-pmic.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+/*
+ * struct hi6421v530_regulator_info - hi6421v530 regulator information
+ * @desc: regulator description
+ * @mode_mask: ECO mode bitmask of LDOs; for BUCKs, this masks sleep
+ * @eco_microamp: eco mode load upper limit (in uA), valid for LDOs only
+ */
+struct hi6421v530_regulator_info {
+	struct regulator_desc rdesc;
+	u8 mode_mask;
+	u32 eco_microamp;
+};
+
+/* HI6421v530 regulators */
+enum hi6421v530_regulator_id {
+	HI6421V530_LDO3,
+	HI6421V530_LDO9,
+	HI6421V530_LDO11,
+	HI6421V530_LDO15,
+	HI6421V530_LDO16,
+};
+
+static const unsigned int ldo_3_voltages[] = {
+	1800000, 1825000, 1850000, 1875000,
+	1900000, 1925000, 1950000, 1975000,
+	2000000, 2025000, 2050000, 2075000,
+	2100000, 2125000, 2150000, 2200000,
+};
+
+static const unsigned int ldo_9_11_voltages[] = {
+	1750000, 1800000, 1825000, 2800000,
+	2850000, 2950000, 3000000, 3300000,
+};
+
+static const unsigned int ldo_15_16_voltages[] = {
+	1750000, 1800000, 2400000, 2600000,
+	2700000, 2850000, 2950000, 3000000,
+};
+
+static const struct regulator_ops hi6421v530_ldo_ops;
+
+#define HI6421V530_LDO_ENABLE_TIME (350)
+
+/*
+ * _id - LDO id name string
+ * v_table - voltage table
+ * vreg - voltage select register
+ * vmask - voltage select mask
+ * ereg - enable register
+ * emask - enable mask
+ * odelay - off/on delay time in uS
+ * ecomask - eco mode mask
+ * ecoamp - eco mode load uppler limit in uA
+ */
+#define HI6421V530_LDO(_ID, v_table, vreg, vmask, ereg, emask,		\
+		   odelay, ecomask, ecoamp) {				\
+	.rdesc = {							\
+		.name		 = #_ID,				\
+		.of_match        = of_match_ptr(#_ID),			\
+		.regulators_node = of_match_ptr("regulators"),		\
+		.ops		 = &hi6421v530_ldo_ops,			\
+		.type		 = REGULATOR_VOLTAGE,			\
+		.id		 = HI6421V530_##_ID,			\
+		.owner		 = THIS_MODULE,				\
+		.n_voltages	 = ARRAY_SIZE(v_table),			\
+		.volt_table	 = v_table,				\
+		.vsel_reg	 = HI6421_REG_TO_BUS_ADDR(vreg),	\
+		.vsel_mask	 = vmask,				\
+		.enable_reg	 = HI6421_REG_TO_BUS_ADDR(ereg),	\
+		.enable_mask	 = emask,				\
+		.enable_time	 = HI6421V530_LDO_ENABLE_TIME,		\
+		.off_on_delay	 = odelay,				\
+	},								\
+	.mode_mask	= ecomask,					\
+	.eco_microamp	= ecoamp,					\
+}
+
+/* HI6421V530 regulator information */
+
+static struct hi6421v530_regulator_info hi6421v530_regulator_info[] = {
+	HI6421V530_LDO(LDO3, ldo_3_voltages, 0x061, 0xf, 0x060, 0x2,
+		   20000, 0x6, 8000),
+	HI6421V530_LDO(LDO9, ldo_9_11_voltages, 0x06b, 0x7, 0x06a, 0x2,
+		   40000, 0x6, 8000),
+	HI6421V530_LDO(LDO11, ldo_9_11_voltages, 0x06f, 0x7, 0x06e, 0x2,
+		   40000, 0x6, 8000),
+	HI6421V530_LDO(LDO15, ldo_15_16_voltages, 0x077, 0x7, 0x076, 0x2,
+		   40000, 0x6, 8000),
+	HI6421V530_LDO(LDO16, ldo_15_16_voltages, 0x079, 0x7, 0x078, 0x2,
+		   40000, 0x6, 8000),
+};
+
+static unsigned int hi6421v530_regulator_ldo_get_mode(
+					struct regulator_dev *rdev)
+{
+	struct hi6421v530_regulator_info *info;
+	unsigned int reg_val;
+
+	info = rdev_get_drvdata(rdev);
+	regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
+
+	if (reg_val & (info->mode_mask))
+		return REGULATOR_MODE_IDLE;
+
+	return REGULATOR_MODE_NORMAL;
+}
+
+static int hi6421v530_regulator_ldo_set_mode(struct regulator_dev *rdev,
+						unsigned int mode)
+{
+	struct hi6421v530_regulator_info *info;
+	unsigned int new_mode;
+
+	info = rdev_get_drvdata(rdev);
+	switch (mode) {
+	case REGULATOR_MODE_NORMAL:
+		new_mode = 0;
+		break;
+	case REGULATOR_MODE_IDLE:
+		new_mode = info->mode_mask;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+			   info->mode_mask, new_mode);
+
+	return 0;
+}
+
+
+static const struct regulator_ops hi6421v530_ldo_ops = {
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.list_voltage = regulator_list_voltage_table,
+	.map_voltage = regulator_map_voltage_ascend,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.get_mode = hi6421v530_regulator_ldo_get_mode,
+	.set_mode = hi6421v530_regulator_ldo_set_mode,
+};
+
+static int hi6421v530_regulator_probe(struct platform_device *pdev)
+{
+	struct hi6421_pmic *pmic;
+	struct regulator_dev *rdev;
+	struct regulator_config config = { };
+	unsigned int i;
+
+	pmic = dev_get_drvdata(pdev->dev.parent);
+	if (!pmic) {
+		dev_err(&pdev->dev, "no pmic in the regulator parent node\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(hi6421v530_regulator_info); i++) {
+		config.dev = pdev->dev.parent;
+		config.regmap = pmic->regmap;
+		config.driver_data = &hi6421v530_regulator_info[i];
+
+		rdev = devm_regulator_register(&pdev->dev,
+				&hi6421v530_regulator_info[i].rdesc,
+				&config);
+		if (IS_ERR(rdev)) {
+			dev_err(&pdev->dev, "failed to register regulator %s\n",
+				hi6421v530_regulator_info[i].rdesc.name);
+			return PTR_ERR(rdev);
+		}
+	}
+	return 0;
+}
+
+static const struct platform_device_id hi6421v530_regulator_table[] = {
+	{ .name = "hi6421v530-regulator" },
+	{},
+};
+MODULE_DEVICE_TABLE(platform, hi6421v530_regulator_table);
+
+static struct platform_driver hi6421v530_regulator_driver = {
+	.id_table = hi6421v530_regulator_table,
+	.driver = {
+		.name	= "hi6421v530-regulator",
+	},
+	.probe	= hi6421v530_regulator_probe,
+};
+module_platform_driver(hi6421v530_regulator_driver);
+
+MODULE_AUTHOR("Wang Xiaoyin <hw.wangxiaoyin@hisilicon.com>");
+MODULE_DESCRIPTION("Hi6421v530 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index db34e1d..244822b 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -99,7 +99,7 @@ static int lp8755_buck_enable_time(struct regulator_dev *rdev)
 
 	ret = lp8755_read(pchip, 0x12 + id, &regval);
 	if (ret < 0) {
-		dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+		dev_err(pchip->dev, "i2c access error %s\n", __func__);
 		return ret;
 	}
 	return (regval & 0xff) * 100;
@@ -144,7 +144,7 @@ static int lp8755_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
 		goto err_i2c;
 	return ret;
 err_i2c:
-	dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+	dev_err(pchip->dev, "i2c access error %s\n", __func__);
 	return ret;
 }
 
@@ -175,7 +175,7 @@ static unsigned int lp8755_buck_get_mode(struct regulator_dev *rdev)
 	return REGULATOR_MODE_NORMAL;
 
 err_i2c:
-	dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+	dev_err(pchip->dev, "i2c access error %s\n", __func__);
 	return 0;
 }
 
@@ -223,7 +223,7 @@ static int lp8755_buck_set_ramp(struct regulator_dev *rdev, int ramp)
 		goto err_i2c;
 	return ret;
 err_i2c:
-	dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+	dev_err(pchip->dev, "i2c access error %s\n", __func__);
 	return ret;
 }
 
@@ -295,7 +295,7 @@ static int lp8755_init_data(struct lp8755_chip *pchip)
 	return ret;
 
 out_i2c_error:
-	dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+	dev_err(pchip->dev, "i2c access error %s\n", __func__);
 	return ret;
 }
 
@@ -404,7 +404,7 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
 	return IRQ_HANDLED;
 
 err_i2c:
-	dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+	dev_err(pchip->dev, "i2c access error %s\n", __func__);
 	return IRQ_NONE;
 }
 
@@ -420,7 +420,7 @@ static int lp8755_int_config(struct lp8755_chip *pchip)
 
 	ret = lp8755_read(pchip, 0x0F, &regval);
 	if (ret < 0) {
-		dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+		dev_err(pchip->dev, "i2c access error %s\n", __func__);
 		return ret;
 	}
 
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
new file mode 100644
index 0000000..cfdbe29
--- /dev/null
+++ b/drivers/regulator/lp87565-regulator.c
@@ -0,0 +1,236 @@
+/*
+ * Regulator driver for LP87565 PMIC
+ *
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation version 2. 
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp87565.h>
+
+#define LP87565_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, _er, _em, \
+			 _delay, _lr, _cr)				\
+	[_id] = {							\
+		.desc = {						\
+			.name			= _name,		\
+			.supply_name		= _of "-in",		\
+			.id			= _id,			\
+			.of_match		= of_match_ptr(_of),	\
+			.regulators_node	= of_match_ptr("regulators"),\
+			.ops			= &_ops,		\
+			.n_voltages		= _n,			\
+			.type			= REGULATOR_VOLTAGE,	\
+			.owner			= THIS_MODULE,		\
+			.vsel_reg		= _vr,			\
+			.vsel_mask		= _vm,			\
+			.enable_reg		= _er,			\
+			.enable_mask		= _em,			\
+			.ramp_delay		= _delay,		\
+			.linear_ranges		= _lr,			\
+			.n_linear_ranges	= ARRAY_SIZE(_lr),	\
+		},							\
+		.ctrl2_reg = _cr,					\
+	}
+
+struct lp87565_regulator {
+	struct regulator_desc desc;
+	unsigned int ctrl2_reg;
+};
+
+static const struct lp87565_regulator regulators[];
+
+static const struct regulator_linear_range buck0_1_2_3_ranges[] = {
+	REGULATOR_LINEAR_RANGE(600000, 0xA, 0x17, 10000),
+	REGULATOR_LINEAR_RANGE(735000, 0x18, 0x9d, 5000),
+	REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
+};
+
+static unsigned int lp87565_buck_ramp_delay[] = {
+	30000, 15000, 10000, 7500, 3800, 1900, 940, 470
+};
+
+/* LP87565 BUCK current limit */
+static const unsigned int lp87565_buck_uA[] = {
+	1500000, 2000000, 2500000, 3000000, 3500000, 4000000, 4500000, 5000000,
+};
+
+static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
+				       int ramp_delay)
+{
+	int id = rdev_get_id(rdev);
+	struct lp87565 *lp87565 = rdev_get_drvdata(rdev);
+	unsigned int reg;
+	int ret;
+
+	if (ramp_delay <= 470)
+		reg = 7;
+	else if (ramp_delay <= 940)
+		reg = 6;
+	else if (ramp_delay <= 1900)
+		reg = 5;
+	else if (ramp_delay <= 3800)
+		reg = 4;
+	else if (ramp_delay <= 7500)
+		reg = 3;
+	else if (ramp_delay <= 10000)
+		reg = 2;
+	else if (ramp_delay <= 15000)
+		reg = 1;
+	else
+		reg = 0;
+
+	ret = regmap_update_bits(lp87565->regmap, regulators[id].ctrl2_reg,
+				 LP87565_BUCK_CTRL_2_SLEW_RATE,
+				 reg << __ffs(LP87565_BUCK_CTRL_2_SLEW_RATE));
+	if (ret) {
+		dev_err(lp87565->dev, "SLEW RATE write failed: %d\n", ret);
+		return ret;
+	}
+
+	rdev->constraints->ramp_delay = lp87565_buck_ramp_delay[reg];
+
+	return 0;
+}
+
+static int lp87565_buck_set_current_limit(struct regulator_dev *rdev,
+					  int min_uA, int max_uA)
+{
+	int id = rdev_get_id(rdev);
+	struct lp87565 *lp87565 = rdev_get_drvdata(rdev);
+	int i;
+
+	for (i = ARRAY_SIZE(lp87565_buck_uA) - 1; i >= 0; i--) {
+		if (lp87565_buck_uA[i] >= min_uA &&
+		    lp87565_buck_uA[i] <= max_uA)
+			return regmap_update_bits(lp87565->regmap,
+						  regulators[id].ctrl2_reg,
+						  LP87565_BUCK_CTRL_2_ILIM,
+						  i << __ffs(LP87565_BUCK_CTRL_2_ILIM));
+	}
+
+	return -EINVAL;
+}
+
+static int lp87565_buck_get_current_limit(struct regulator_dev *rdev)
+{
+	int id = rdev_get_id(rdev);
+	struct lp87565 *lp87565 = rdev_get_drvdata(rdev);
+	int ret;
+	unsigned int val;
+
+	ret = regmap_read(lp87565->regmap, regulators[id].ctrl2_reg, &val);
+	if (ret)
+		return ret;
+
+	val = (val & LP87565_BUCK_CTRL_2_ILIM) >>
+	       __ffs(LP87565_BUCK_CTRL_2_ILIM);
+
+	return (val < ARRAY_SIZE(lp87565_buck_uA)) ?
+			lp87565_buck_uA[val] : -EINVAL;
+}
+
+/* Operations permitted on BUCK0, BUCK1 */
+static struct regulator_ops lp87565_buck_ops = {
+	.is_enabled		= regulator_is_enabled_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
+	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.list_voltage		= regulator_list_voltage_linear_range,
+	.map_voltage		= regulator_map_voltage_linear_range,
+	.set_voltage_time_sel	= regulator_set_voltage_time_sel,
+	.set_ramp_delay		= lp87565_buck_set_ramp_delay,
+	.set_current_limit	= lp87565_buck_set_current_limit,
+	.get_current_limit	= lp87565_buck_get_current_limit,
+};
+
+static const struct lp87565_regulator regulators[] = {
+	LP87565_REGULATOR("BUCK0", LP87565_BUCK_0, "buck0", lp87565_buck_ops,
+			  256, LP87565_REG_BUCK0_VOUT, LP87565_BUCK_VSET,
+			  LP87565_REG_BUCK0_CTRL_1,
+			  LP87565_BUCK_CTRL_1_EN, 3800,
+			  buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
+	LP87565_REGULATOR("BUCK1", LP87565_BUCK_1, "buck1", lp87565_buck_ops,
+			  256, LP87565_REG_BUCK1_VOUT, LP87565_BUCK_VSET,
+			  LP87565_REG_BUCK1_CTRL_1,
+			  LP87565_BUCK_CTRL_1_EN, 3800,
+			  buck0_1_2_3_ranges, LP87565_REG_BUCK1_CTRL_2),
+	LP87565_REGULATOR("BUCK2", LP87565_BUCK_2, "buck2", lp87565_buck_ops,
+			  256, LP87565_REG_BUCK2_VOUT, LP87565_BUCK_VSET,
+			  LP87565_REG_BUCK2_CTRL_1,
+			  LP87565_BUCK_CTRL_1_EN, 3800,
+			  buck0_1_2_3_ranges, LP87565_REG_BUCK2_CTRL_2),
+	LP87565_REGULATOR("BUCK3", LP87565_BUCK_3, "buck3", lp87565_buck_ops,
+			  256, LP87565_REG_BUCK3_VOUT, LP87565_BUCK_VSET,
+			  LP87565_REG_BUCK3_CTRL_1,
+			  LP87565_BUCK_CTRL_1_EN, 3800,
+			  buck0_1_2_3_ranges, LP87565_REG_BUCK3_CTRL_2),
+	LP87565_REGULATOR("BUCK10", LP87565_BUCK_10, "buck10", lp87565_buck_ops,
+			  256, LP87565_REG_BUCK0_VOUT, LP87565_BUCK_VSET,
+			  LP87565_REG_BUCK0_CTRL_1,
+			  LP87565_BUCK_CTRL_1_EN, 3800,
+			  buck0_1_2_3_ranges, LP87565_REG_BUCK0_CTRL_2),
+	LP87565_REGULATOR("BUCK23", LP87565_BUCK_23, "buck23", lp87565_buck_ops,
+			  256, LP87565_REG_BUCK2_VOUT, LP87565_BUCK_VSET,
+			  LP87565_REG_BUCK2_CTRL_1,
+			  LP87565_BUCK_CTRL_1_EN, 3800,
+			  buck0_1_2_3_ranges, LP87565_REG_BUCK2_CTRL_2),
+};
+
+static int lp87565_regulator_probe(struct platform_device *pdev)
+{
+	struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent);
+	struct regulator_config config = { };
+	struct regulator_dev *rdev;
+	int i, min_idx = LP87565_BUCK_1, max_idx = LP87565_BUCK_3;
+
+	platform_set_drvdata(pdev, lp87565);
+
+	config.dev = &pdev->dev;
+	config.dev->of_node = lp87565->dev->of_node;
+	config.driver_data = lp87565;
+	config.regmap = lp87565->regmap;
+
+	if (lp87565->dev_type == LP87565_DEVICE_TYPE_LP87565_Q1) {
+		min_idx = LP87565_BUCK_10;
+		max_idx = LP87565_BUCK_23;
+	}
+
+	for (i = min_idx; i <= max_idx; i++) {
+		rdev = devm_regulator_register(&pdev->dev, &regulators[i].desc,
+					       &config);
+		if (IS_ERR(rdev)) {
+			dev_err(lp87565->dev, "failed to register %s regulator\n",
+				pdev->name);
+			return PTR_ERR(rdev);
+		}
+	}
+
+	return 0;
+}
+
+static const struct platform_device_id lp87565_regulator_id_table[] = {
+	{ "lp87565-regulator", },
+	{ "lp87565-q1-regulator", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, lp87565_regulator_id_table);
+
+static struct platform_driver lp87565_regulator_driver = {
+	.driver = {
+		.name = "lp87565-pmic",
+	},
+	.probe = lp87565_regulator_probe,
+	.id_table = lp87565_regulator_id_table,
+};
+module_platform_driver(lp87565_regulator_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("LP87565 voltage regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
index efabc0e..559b9ac 100644
--- a/drivers/regulator/max8997-regulator.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -428,12 +428,9 @@ static int max8997_set_voltage_charger_cv(struct regulator_dev *rdev,
 	if (max_uV < 4000000 || min_uV > 4350000)
 		return -EINVAL;
 
-	if (min_uV <= 4000000) {
-		if (max_uV >= 4000000)
-			return -EINVAL;
-		else
-			val = 0x1;
-	} else if (min_uV <= 4200000 && max_uV >= 4200000)
+	if (min_uV <= 4000000)
+		val = 0x1;
+	else if (min_uV <= 4200000 && max_uV >= 4200000)
 		val = 0x0;
 	else {
 		lb = (min_uV - 4000001) / 20000 + 2;
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 09d677d..96bf754 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -90,6 +90,25 @@ static void of_get_regulation_constraints(struct device_node *np,
 	if (!ret)
 		constraints->settling_time = pval;
 
+	ret = of_property_read_u32(np, "regulator-settling-time-up-us", &pval);
+	if (!ret)
+		constraints->settling_time_up = pval;
+	if (constraints->settling_time_up && constraints->settling_time) {
+		pr_warn("%s: ambiguous configuration for settling time, ignoring 'regulator-settling-time-up-us'\n",
+			np->name);
+		constraints->settling_time_up = 0;
+	}
+
+	ret = of_property_read_u32(np, "regulator-settling-time-down-us",
+				   &pval);
+	if (!ret)
+		constraints->settling_time_down = pval;
+	if (constraints->settling_time_down && constraints->settling_time) {
+		pr_warn("%s: ambiguous configuration for settling time, ignoring 'regulator-settling-time-down-us'\n",
+			np->name);
+		constraints->settling_time_down = 0;
+	}
+
 	ret = of_property_read_u32(np, "regulator-enable-ramp-delay", &pval);
 	if (!ret)
 		constraints->enable_time = pval;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 31ae5ee..bb5ab7d 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -264,6 +264,13 @@ static struct palmas_regs_info tps65917_regs_info[] = {
 		.sleep_id	= TPS65917_EXTERNAL_REQSTR_ID_SMPS5,
 	},
 	{
+		.name		= "SMPS12",
+		.sname		= "smps1-in",
+		.vsel_addr	= TPS65917_SMPS1_VOLTAGE,
+		.ctrl_addr	= TPS65917_SMPS1_CTRL,
+		.sleep_id	= TPS65917_EXTERNAL_REQSTR_ID_SMPS12,
+	},
+	{
 		.name		= "LDO1",
 		.sname		= "ldo1-in",
 		.vsel_addr	= TPS65917_LDO1_VOLTAGE,
@@ -367,6 +374,7 @@ static struct palmas_sleep_requestor_info tps65917_sleep_req_info[] = {
 	EXTERNAL_REQUESTOR_TPS65917(SMPS3, 1, 2),
 	EXTERNAL_REQUESTOR_TPS65917(SMPS4, 1, 3),
 	EXTERNAL_REQUESTOR_TPS65917(SMPS5, 1, 4),
+	EXTERNAL_REQUESTOR_TPS65917(SMPS12, 1, 5),
 	EXTERNAL_REQUESTOR_TPS65917(LDO1, 2, 0),
 	EXTERNAL_REQUESTOR_TPS65917(LDO2, 2, 1),
 	EXTERNAL_REQUESTOR_TPS65917(LDO3, 2, 2),
@@ -1305,7 +1313,8 @@ static int tps65917_smps_registration(struct palmas_pmic *pmic,
 		 */
 		desc = &pmic->desc[id];
 		desc->n_linear_ranges = 3;
-		if ((id == TPS65917_REG_SMPS2) && pmic->smps12)
+		if ((id == TPS65917_REG_SMPS2 || id == TPS65917_REG_SMPS1) &&
+		    pmic->smps12)
 			continue;
 
 		/* Initialise sleep/init values from platform data */
@@ -1427,6 +1436,7 @@ static struct of_regulator_match tps65917_matches[] = {
 	{ .name = "smps3", },
 	{ .name = "smps4", },
 	{ .name = "smps5", },
+	{ .name = "smps12",},
 	{ .name = "ldo1", },
 	{ .name = "ldo2", },
 	{ .name = "ldo3", },
@@ -1455,7 +1465,7 @@ static struct palmas_pmic_driver_data palmas_ddata = {
 
 static struct palmas_pmic_driver_data tps65917_ddata = {
 	.smps_start = TPS65917_REG_SMPS1,
-	.smps_end = TPS65917_REG_SMPS5,
+	.smps_end = TPS65917_REG_SMPS12,
 	.ldo_begin = TPS65917_REG_LDO1,
 	.ldo_end = TPS65917_REG_LDO5,
 	.max_reg = TPS65917_NUM_REGS,
@@ -1491,7 +1501,7 @@ static int palmas_dt_to_pdata(struct device *dev,
 	}
 
 	for (idx = 0; idx < ddata->max_reg; idx++) {
-		static struct of_regulator_match *match;
+		struct of_regulator_match *match;
 		struct palmas_reg_init *rinit;
 		struct device_node *np;
 
@@ -1643,8 +1653,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN)
+	if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN) {
 		pmic->smps123 = 1;
+		pmic->smps12 = 1;
+	}
 
 	if (reg & PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN)
 		pmic->smps457 = 1;
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 696116e..81672a5 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -1107,6 +1107,7 @@ static int tps65910_probe(struct platform_device *pdev)
 
 	switch (tps65910_chip_id(tps65910)) {
 	case TPS65910:
+		BUILD_BUG_ON(TPS65910_NUM_REGS < ARRAY_SIZE(tps65910_regs));
 		pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
 		pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
 		pmic->ext_sleep_control = tps65910_ext_sleep_control;
@@ -1119,6 +1120,7 @@ static int tps65910_probe(struct platform_device *pdev)
 					DCDCCTRL_DCDCCKSYNC_MASK);
 		break;
 	case TPS65911:
+		BUILD_BUG_ON(TPS65910_NUM_REGS < ARRAY_SIZE(tps65911_regs));
 		pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
 		pmic->num_regulators = ARRAY_SIZE(tps65911_regs);
 		pmic->ext_sleep_control = tps65911_ext_sleep_control;
@@ -1144,8 +1146,7 @@ static int tps65910_probe(struct platform_device *pdev)
 	if (!pmic->rdev)
 		return -ENOMEM;
 
-	for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
-			i++, info++) {
+	for (i = 0; i < pmic->num_regulators; i++, info++) {
 		/* Register the regulators */
 		pmic->info[i] = info;
 
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index d21c07cc..608c071 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -34,6 +34,13 @@
 	help
 	  This enables the reset controller driver for Marvell Berlin SoCs.
 
+config RESET_GEMINI
+	bool "Gemini Reset Driver" if COMPILE_TEST
+	default ARCH_GEMINI
+	select MFD_SYSCON
+	help
+	  This enables the reset controller driver for Cortina Systems Gemini.
+
 config RESET_IMX7
 	bool "i.MX7 Reset Driver" if COMPILE_TEST
 	default SOC_IMX7D
@@ -80,7 +87,15 @@
 	help
 	  This enables the reset driver for Allwinner SoCs.
 
-config TI_SYSCON_RESET
+config RESET_TI_SCI
+	tristate "TI System Control Interface (TI-SCI) reset driver"
+	depends on TI_SCI_PROTOCOL
+	help
+	  This enables the reset driver support over TI System Control Interface
+	  available on some new TI's SoCs. If you wish to use reset resources
+	  managed by the TI System Controller, say Y here. Otherwise, say N.
+
+config RESET_TI_SYSCON
 	tristate "TI SYSCON Reset Driver"
 	depends on HAS_IOMEM
 	select MFD_SYSCON
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 02a74db..7081f9d 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -5,6 +5,7 @@
 obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
 obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
 obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
+obj-$(CONFIG_RESET_GEMINI) += reset-gemini.o
 obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
 obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
 obj-$(CONFIG_RESET_MESON) += reset-meson.o
@@ -13,7 +14,8 @@
 obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
 obj-$(CONFIG_RESET_STM32) += reset-stm32.o
 obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
-obj-$(CONFIG_TI_SYSCON_RESET) += reset-ti-syscon.o
+obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
+obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
 obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
 obj-$(CONFIG_RESET_ZX2967) += reset-zx2967.o
 obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index cd739d2..0090784 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -13,6 +13,7 @@
 #include <linux/err.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
+#include <linux/kref.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/reset.h>
@@ -40,7 +41,7 @@ struct reset_control {
 	struct reset_controller_dev *rcdev;
 	struct list_head list;
 	unsigned int id;
-	unsigned int refcnt;
+	struct kref refcnt;
 	bool shared;
 	atomic_t deassert_count;
 	atomic_t triggered_count;
@@ -288,7 +289,7 @@ static struct reset_control *__reset_control_get_internal(
 			if (WARN_ON(!rstc->shared || !shared))
 				return ERR_PTR(-EBUSY);
 
-			rstc->refcnt++;
+			kref_get(&rstc->refcnt);
 			return rstc;
 		}
 	}
@@ -302,23 +303,30 @@ static struct reset_control *__reset_control_get_internal(
 	rstc->rcdev = rcdev;
 	list_add(&rstc->list, &rcdev->reset_control_head);
 	rstc->id = index;
-	rstc->refcnt = 1;
+	kref_init(&rstc->refcnt);
 	rstc->shared = shared;
 
 	return rstc;
 }
 
+static void __reset_control_release(struct kref *kref)
+{
+	struct reset_control *rstc = container_of(kref, struct reset_control,
+						  refcnt);
+
+	lockdep_assert_held(&reset_list_mutex);
+
+	module_put(rstc->rcdev->owner);
+
+	list_del(&rstc->list);
+	kfree(rstc);
+}
+
 static void __reset_control_put_internal(struct reset_control *rstc)
 {
 	lockdep_assert_held(&reset_list_mutex);
 
-	if (--rstc->refcnt)
-		return;
-
-	module_put(rstc->rcdev->owner);
-
-	list_del(&rstc->list);
-	kfree(rstc);
+	kref_put(&rstc->refcnt, __reset_control_release);
 }
 
 struct reset_control *__of_reset_control_get(struct device_node *node,
@@ -400,7 +408,6 @@ EXPORT_SYMBOL_GPL(__reset_control_get);
  * reset_control_put - free the reset controller
  * @rstc: reset controller
  */
-
 void reset_control_put(struct reset_control *rstc)
 {
 	if (IS_ERR_OR_NULL(rstc))
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 35ce53e..d5e5229 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void)
 }
 
 postcore_initcall(hi6220_reset_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-gemini.c b/drivers/reset/reset-gemini.c
new file mode 100644
index 0000000..a247899
--- /dev/null
+++ b/drivers/reset/reset-gemini.c
@@ -0,0 +1,110 @@
+/*
+ * Cortina Gemini Reset controller driver
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <dt-bindings/reset/cortina,gemini-reset.h>
+
+/**
+ * struct gemini_reset - gemini reset controller
+ * @map: regmap to access the containing system controller
+ * @rcdev: reset controller device
+ */
+struct gemini_reset {
+	struct regmap *map;
+	struct reset_controller_dev rcdev;
+};
+
+#define GEMINI_GLOBAL_SOFT_RESET 0x0c
+
+#define to_gemini_reset(p) \
+	container_of((p), struct gemini_reset, rcdev)
+
+/*
+ * This is a self-deasserting reset controller.
+ */
+static int gemini_reset(struct reset_controller_dev *rcdev,
+			unsigned long id)
+{
+	struct gemini_reset *gr = to_gemini_reset(rcdev);
+
+	/* Manual says to always set BIT 30 (CPU1) to 1 */
+	return regmap_write(gr->map,
+			    GEMINI_GLOBAL_SOFT_RESET,
+			    BIT(GEMINI_RESET_CPU1) | BIT(id));
+}
+
+static int gemini_reset_status(struct reset_controller_dev *rcdev,
+			     unsigned long id)
+{
+	struct gemini_reset *gr = to_gemini_reset(rcdev);
+	u32 val;
+	int ret;
+
+	ret = regmap_read(gr->map, GEMINI_GLOBAL_SOFT_RESET, &val);
+	if (ret)
+		return ret;
+
+	return !!(val & BIT(id));
+}
+
+static const struct reset_control_ops gemini_reset_ops = {
+	.reset = gemini_reset,
+	.status = gemini_reset_status,
+};
+
+static int gemini_reset_probe(struct platform_device *pdev)
+{
+	struct gemini_reset *gr;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	gr = devm_kzalloc(dev, sizeof(*gr), GFP_KERNEL);
+	if (!gr)
+		return -ENOMEM;
+
+	gr->map = syscon_node_to_regmap(np);
+	if (IS_ERR(gr->map)) {
+		ret = PTR_ERR(gr->map);
+		dev_err(dev, "unable to get regmap (%d)", ret);
+		return ret;
+	}
+	gr->rcdev.owner = THIS_MODULE;
+	gr->rcdev.nr_resets = 32;
+	gr->rcdev.ops = &gemini_reset_ops;
+	gr->rcdev.of_node = pdev->dev.of_node;
+
+	ret = devm_reset_controller_register(&pdev->dev, &gr->rcdev);
+	if (ret)
+		return ret;
+
+	dev_info(dev, "registered Gemini reset controller\n");
+	return 0;
+}
+
+static const struct of_device_id gemini_reset_dt_ids[] = {
+	{ .compatible = "cortina,gemini-syscon", },
+	{ /* sentinel */ },
+};
+
+static struct platform_driver gemini_reset_driver = {
+	.probe	= gemini_reset_probe,
+	.driver = {
+		.name		= "gemini-reset",
+		.of_match_table	= gemini_reset_dt_ids,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(gemini_reset_driver);
diff --git a/drivers/reset/reset-ti-sci.c b/drivers/reset/reset-ti-sci.c
new file mode 100644
index 0000000..bf68729
--- /dev/null
+++ b/drivers/reset/reset-ti-sci.c
@@ -0,0 +1,269 @@
+/*
+ * Texas Instrument's System Control Interface (TI-SCI) reset driver
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *	Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/**
+ * struct ti_sci_reset_control - reset control structure
+ * @dev_id: SoC-specific device identifier
+ * @reset_mask: reset mask to use for toggling reset
+ * @lock: synchronize reset_mask read-modify-writes
+ */
+struct ti_sci_reset_control {
+	u32 dev_id;
+	u32 reset_mask;
+	struct mutex lock;
+};
+
+/**
+ * struct ti_sci_reset_data - reset controller information structure
+ * @rcdev: reset controller entity
+ * @dev: reset controller device pointer
+ * @sci: TI SCI handle used for communication with system controller
+ * @idr: idr structure for mapping ids to reset control structures
+ */
+struct ti_sci_reset_data {
+	struct reset_controller_dev rcdev;
+	struct device *dev;
+	const struct ti_sci_handle *sci;
+	struct idr idr;
+};
+
+#define to_ti_sci_reset_data(p)	\
+	container_of((p), struct ti_sci_reset_data, rcdev)
+
+/**
+ * ti_sci_reset_set() - program a device's reset
+ * @rcdev: reset controller entity
+ * @id: ID of the reset to toggle
+ * @assert: boolean flag to indicate assert or deassert
+ *
+ * This is a common internal function used to assert or deassert a device's
+ * reset using the TI SCI protocol. The device's reset is asserted if the
+ * @assert argument is true, or deasserted if @assert argument is false.
+ * The mechanism itself is a read-modify-write procedure, the current device
+ * reset register is read using a TI SCI device operation, the new value is
+ * set or un-set using the reset's mask, and the new reset value written by
+ * using another TI SCI device operation.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int ti_sci_reset_set(struct reset_controller_dev *rcdev,
+			    unsigned long id, bool assert)
+{
+	struct ti_sci_reset_data *data = to_ti_sci_reset_data(rcdev);
+	const struct ti_sci_handle *sci = data->sci;
+	const struct ti_sci_dev_ops *dev_ops = &sci->ops.dev_ops;
+	struct ti_sci_reset_control *control;
+	u32 reset_state;
+	int ret;
+
+	control = idr_find(&data->idr, id);
+	if (!control)
+		return -EINVAL;
+
+	mutex_lock(&control->lock);
+
+	ret = dev_ops->get_device_resets(sci, control->dev_id, &reset_state);
+	if (ret)
+		goto out;
+
+	if (assert)
+		reset_state |= control->reset_mask;
+	else
+		reset_state &= ~control->reset_mask;
+
+	ret = dev_ops->set_device_resets(sci, control->dev_id, reset_state);
+out:
+	mutex_unlock(&control->lock);
+
+	return ret;
+}
+
+/**
+ * ti_sci_reset_assert() - assert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of the reset to be asserted
+ *
+ * This function implements the reset driver op to assert a device's reset
+ * using the TI SCI protocol. This invokes the function ti_sci_reset_set()
+ * with the corresponding parameters as passed in, but with the @assert
+ * argument set to true for asserting the reset.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int ti_sci_reset_assert(struct reset_controller_dev *rcdev,
+			       unsigned long id)
+{
+	return ti_sci_reset_set(rcdev, id, true);
+}
+
+/**
+ * ti_sci_reset_deassert() - deassert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of the reset to be deasserted
+ *
+ * This function implements the reset driver op to deassert a device's reset
+ * using the TI SCI protocol. This invokes the function ti_sci_reset_set()
+ * with the corresponding parameters as passed in, but with the @assert
+ * argument set to false for deasserting the reset.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int ti_sci_reset_deassert(struct reset_controller_dev *rcdev,
+				 unsigned long id)
+{
+	return ti_sci_reset_set(rcdev, id, false);
+}
+
+/**
+ * ti_sci_reset_status() - check device reset status
+ * @rcdev: reset controller entity
+ * @id: ID of reset to be checked
+ *
+ * This function implements the reset driver op to return the status of a
+ * device's reset using the TI SCI protocol. The reset register value is read
+ * by invoking the TI SCI device operation .get_device_resets(), and the
+ * status of the specific reset is extracted and returned using this reset's
+ * reset mask.
+ *
+ * Return: 0 if reset is deasserted, or a non-zero value if reset is asserted
+ */
+static int ti_sci_reset_status(struct reset_controller_dev *rcdev,
+			       unsigned long id)
+{
+	struct ti_sci_reset_data *data = to_ti_sci_reset_data(rcdev);
+	const struct ti_sci_handle *sci = data->sci;
+	const struct ti_sci_dev_ops *dev_ops = &sci->ops.dev_ops;
+	struct ti_sci_reset_control *control;
+	u32 reset_state;
+	int ret;
+
+	control = idr_find(&data->idr, id);
+	if (!control)
+		return -EINVAL;
+
+	ret = dev_ops->get_device_resets(sci, control->dev_id, &reset_state);
+	if (ret)
+		return ret;
+
+	return reset_state & control->reset_mask;
+}
+
+static const struct reset_control_ops ti_sci_reset_ops = {
+	.assert		= ti_sci_reset_assert,
+	.deassert	= ti_sci_reset_deassert,
+	.status		= ti_sci_reset_status,
+};
+
+/**
+ * ti_sci_reset_of_xlate() - translate a set of OF arguments to a reset ID
+ * @rcdev: reset controller entity
+ * @reset_spec: OF reset argument specifier
+ *
+ * This function performs the translation of the reset argument specifier
+ * values defined in a reset consumer device node. The function allocates a
+ * reset control structure for that device reset, and will be used by the
+ * driver for performing any reset functions on that reset. An idr structure
+ * is allocated and used to map to the reset control structure. This idr
+ * is used by the driver to do reset lookups.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int ti_sci_reset_of_xlate(struct reset_controller_dev *rcdev,
+				 const struct of_phandle_args *reset_spec)
+{
+	struct ti_sci_reset_data *data = to_ti_sci_reset_data(rcdev);
+	struct ti_sci_reset_control *control;
+
+	if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
+		return -EINVAL;
+
+	control = devm_kzalloc(data->dev, sizeof(*control), GFP_KERNEL);
+	if (!control)
+		return -ENOMEM;
+
+	control->dev_id = reset_spec->args[0];
+	control->reset_mask = reset_spec->args[1];
+	mutex_init(&control->lock);
+
+	return idr_alloc(&data->idr, control, 0, 0, GFP_KERNEL);
+}
+
+static const struct of_device_id ti_sci_reset_of_match[] = {
+	{ .compatible = "ti,sci-reset", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_reset_of_match);
+
+static int ti_sci_reset_probe(struct platform_device *pdev)
+{
+	struct ti_sci_reset_data *data;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->sci = devm_ti_sci_get_handle(&pdev->dev);
+	if (IS_ERR(data->sci))
+		return PTR_ERR(data->sci);
+
+	data->rcdev.ops = &ti_sci_reset_ops;
+	data->rcdev.owner = THIS_MODULE;
+	data->rcdev.of_node = pdev->dev.of_node;
+	data->rcdev.of_reset_n_cells = 2;
+	data->rcdev.of_xlate = ti_sci_reset_of_xlate;
+	data->dev = &pdev->dev;
+	idr_init(&data->idr);
+
+	platform_set_drvdata(pdev, data);
+
+	return reset_controller_register(&data->rcdev);
+}
+
+static int ti_sci_reset_remove(struct platform_device *pdev)
+{
+	struct ti_sci_reset_data *data = platform_get_drvdata(pdev);
+
+	reset_controller_unregister(&data->rcdev);
+
+	idr_destroy(&data->idr);
+
+	return 0;
+}
+
+static struct platform_driver ti_sci_reset_driver = {
+	.probe = ti_sci_reset_probe,
+	.remove = ti_sci_reset_remove,
+	.driver = {
+		.name = "ti-sci-reset",
+		.of_match_table = ti_sci_reset_of_match,
+	},
+};
+module_platform_driver(ti_sci_reset_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TI System Control Interface (TI SCI) Reset driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/sti/reset-syscfg.c b/drivers/reset/sti/reset-syscfg.c
index 9bd57a5..7e0f2aa 100644
--- a/drivers/reset/sti/reset-syscfg.c
+++ b/drivers/reset/sti/reset-syscfg.c
@@ -145,16 +145,14 @@ static int syscfg_reset_controller_register(struct device *dev,
 				const struct syscfg_reset_controller_data *data)
 {
 	struct syscfg_reset_controller *rc;
-	size_t size;
 	int i, err;
 
 	rc = devm_kzalloc(dev, sizeof(*rc), GFP_KERNEL);
 	if (!rc)
 		return -ENOMEM;
 
-	size = sizeof(struct syscfg_reset_channel) * data->nr_channels;
-
-	rc->channels = devm_kzalloc(dev, size, GFP_KERNEL);
+	rc->channels = devm_kcalloc(dev, data->nr_channels,
+				    sizeof(*rc->channels), GFP_KERNEL);
 	if (!rc->channels)
 		return -ENOMEM;
 
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 600f5f9..ad3d2a9 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -330,7 +330,8 @@ field##_show(struct device *dev,					\
 	struct rpmsg_device *rpdev = to_rpmsg_device(dev);		\
 									\
 	return sprintf(buf, format_string, rpdev->path);		\
-}
+}									\
+static DEVICE_ATTR_RO(field);
 
 /* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */
 rpmsg_show_attr(name, id.name, "%s\n");
@@ -345,15 +346,17 @@ static ssize_t modalias_show(struct device *dev,
 
 	return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name);
 }
+static DEVICE_ATTR_RO(modalias);
 
-static struct device_attribute rpmsg_dev_attrs[] = {
-	__ATTR_RO(name),
-	__ATTR_RO(modalias),
-	__ATTR_RO(dst),
-	__ATTR_RO(src),
-	__ATTR_RO(announce),
-	__ATTR_NULL
+static struct attribute *rpmsg_dev_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_modalias.attr,
+	&dev_attr_dst.attr,
+	&dev_attr_src.attr,
+	&dev_attr_announce.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(rpmsg_dev);
 
 /* rpmsg devices and drivers are matched using the service name */
 static inline int rpmsg_id_match(const struct rpmsg_device *rpdev,
@@ -455,7 +458,7 @@ static int rpmsg_dev_remove(struct device *dev)
 static struct bus_type rpmsg_bus = {
 	.name		= "rpmsg",
 	.match		= rpmsg_dev_match,
-	.dev_attrs	= rpmsg_dev_attrs,
+	.dev_groups	= rpmsg_dev_groups,
 	.uevent		= rpmsg_uevent,
 	.probe		= rpmsg_dev_probe,
 	.remove		= rpmsg_dev_remove,
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 6b54f6c..8093111 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -709,7 +709,7 @@ static irqreturn_t dryice_irq(int irq, void *dev_id)
 		/*If the write wait queue is empty then there is no pending
 		  operations. It means the interrupt is for DryIce -Security.
 		  IRQ must be returned as none.*/
-		if (list_empty_careful(&imxdi->write_wait.task_list))
+		if (list_empty_careful(&imxdi->write_wait.head))
 			return rc;
 
 		/* DSR_WCF clears itself on DSR read */
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 0acb8c2..31f014b 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -82,10 +82,3 @@
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called scm_block.
-
-config SCM_BLOCK_CLUSTER_WRITE
-	def_bool y
-	prompt "SCM force cluster writes"
-	depends on SCM_BLOCK
-	help
-	  Force writes to Storage Class Memory (SCM) to be in done in clusters.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index c2f4e67..b64e2b3 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -19,7 +19,4 @@
 obj-$(CONFIG_DCSSBLK) += dcssblk.o
 
 scm_block-objs := scm_drv.o scm_blk.o
-ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
-scm_block-objs += scm_blk_cluster.o
-endif
 obj-$(CONFIG_SCM_BLOCK) += scm_block.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 6fb3fd5..0f1fe4f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1965,8 +1965,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device,
 {
 	int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
 
-	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
-		/* dasd is being set offline. */
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
+	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+		/*
+		 * dasd is being set offline
+		 * but it is no safe offline where we have to allow I/O
+		 */
 		return 1;
 	}
 	if (device->stopped) {
@@ -2672,7 +2676,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
 	 */
 	if (basedev->state < DASD_STATE_READY) {
 		while ((req = blk_fetch_request(block->request_queue)))
-			__blk_end_request_all(req, -EIO);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 		return;
 	}
 
@@ -2692,7 +2696,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
 				      "Rejecting write request %p",
 				      req);
 			blk_start_request(req);
-			__blk_end_request_all(req, -EIO);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 			continue;
 		}
 		if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
@@ -2702,7 +2706,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
 				      "Rejecting failfast request %p",
 				      req);
 			blk_start_request(req);
-			__blk_end_request_all(req, -ETIMEDOUT);
+			__blk_end_request_all(req, BLK_STS_TIMEOUT);
 			continue;
 		}
 		cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -2734,7 +2738,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
 				      "on request %p",
 				      PTR_ERR(cqr), req);
 			blk_start_request(req);
-			__blk_end_request_all(req, -EIO);
+			__blk_end_request_all(req, BLK_STS_IOERR);
 			continue;
 		}
 		/*
@@ -2755,21 +2759,29 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
 {
 	struct request *req;
 	int status;
-	int error = 0;
+	blk_status_t error = BLK_STS_OK;
 
 	req = (struct request *) cqr->callback_data;
 	dasd_profile_end(cqr->block, cqr, req);
+
 	status = cqr->block->base->discipline->free_cp(cqr, req);
 	if (status < 0)
-		error = status;
+		error = errno_to_blk_status(status);
 	else if (status == 0) {
-		if (cqr->intrc == -EPERM)
-			error = -EBADE;
-		else if (cqr->intrc == -ENOLINK ||
-			 cqr->intrc == -ETIMEDOUT)
-			error = cqr->intrc;
-		else
-			error = -EIO;
+		switch (cqr->intrc) {
+		case -EPERM:
+			error = BLK_STS_NEXUS;
+			break;
+		case -ENOLINK:
+			error = BLK_STS_TRANSPORT;
+			break;
+		case -ETIMEDOUT:
+			error = BLK_STS_TIMEOUT;
+			break;
+		default:
+			error = BLK_STS_IOERR;
+			break;
+		}
 	}
 	__blk_end_request_all(req, error);
 }
@@ -3190,7 +3202,7 @@ static void dasd_flush_request_queue(struct dasd_block *block)
 
 	spin_lock_irq(&block->request_queue_lock);
 	while ((req = blk_fetch_request(block->request_queue)))
-		__blk_end_request_all(req, -EIO);
+		__blk_end_request_all(req, BLK_STS_IOERR);
 	spin_unlock_irq(&block->request_queue_lock);
 }
 
@@ -3562,57 +3574,69 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
 			else
 				pr_warn("%s: The DASD cannot be set offline while it is in use\n",
 					dev_name(&cdev->dev));
-			clear_bit(DASD_FLAG_OFFLINE, &device->flags);
-			goto out_busy;
+			rc = -EBUSY;
+			goto out_err;
 		}
 	}
 
-	if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
-		/*
-		 * safe offline already running
-		 * could only be called by normal offline so safe_offline flag
-		 * needs to be removed to run normal offline and kill all I/O
-		 */
-		if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags))
-			/* Already doing normal offline processing */
-			goto out_busy;
-		else
-			clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
-	} else {
-		if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
-			/* Already doing offline processing */
-			goto out_busy;
+	/*
+	 * Test if the offline processing is already running and exit if so.
+	 * If a safe offline is being processed this could only be a normal
+	 * offline that should be able to overtake the safe offline and
+	 * cancel any I/O we do not want to wait for any longer
+	 */
+	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+			clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
+				  &device->flags);
+		} else {
+			rc = -EBUSY;
+			goto out_err;
+		}
 	}
-
 	set_bit(DASD_FLAG_OFFLINE, &device->flags);
-	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 
 	/*
-	 * if safe_offline called set safe_offline_running flag and
+	 * if safe_offline is called set safe_offline_running flag and
 	 * clear safe_offline so that a call to normal offline
 	 * can overrun safe_offline processing
 	 */
 	if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
 	    !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+		/* need to unlock here to wait for outstanding I/O */
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 		/*
 		 * If we want to set the device safe offline all IO operations
 		 * should be finished before continuing the offline process
 		 * so sync bdev first and then wait for our queues to become
 		 * empty
 		 */
-		/* sync blockdev and partitions */
 		if (device->block) {
 			rc = fsync_bdev(device->block->bdev);
 			if (rc != 0)
 				goto interrupted;
 		}
-		/* schedule device tasklet and wait for completion */
 		dasd_schedule_device_bh(device);
 		rc = wait_event_interruptible(shutdown_waitq,
 					      _wait_for_empty_queues(device));
 		if (rc != 0)
 			goto interrupted;
+
+		/*
+		 * check if a normal offline process overtook the offline
+		 * processing in this case simply do nothing beside returning
+		 * that we got interrupted
+		 * otherwise mark safe offline as not running any longer and
+		 * continue with normal offline
+		 */
+		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+		if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
+			rc = -ERESTARTSYS;
+			goto out_err;
+		}
+		clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
 	}
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 
 	dasd_set_target_state(device, DASD_STATE_NEW);
 	/* dasd_delete_device destroys the device reference. */
@@ -3624,22 +3648,18 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
 	 */
 	if (block)
 		dasd_free_block(block);
+
 	return 0;
 
 interrupted:
 	/* interrupted by signal */
-	clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
 	clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
 	clear_bit(DASD_FLAG_OFFLINE, &device->flags);
-	dasd_put_device(device);
-
-	return rc;
-
-out_busy:
+out_err:
 	dasd_put_device(device);
 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
-
-	return -EBUSY;
+	return rc;
 }
 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
 
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 1164b51..7c73512 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -315,45 +315,58 @@ static int __init dasd_parse_range(const char *range)
 	char *features_str = NULL;
 	char *from_str = NULL;
 	char *to_str = NULL;
-	size_t len = strlen(range) + 1;
-	char tmp[len];
+	int rc = 0;
+	char *tmp;
 
-	strlcpy(tmp, range, len);
+	tmp = kstrdup(range, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
 
-	if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str))
-		goto out_err;
+	if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) {
+		rc = -EINVAL;
+		goto out;
+	}
 
-	if (dasd_busid(from_str, &from_id0, &from_id1, &from))
-		goto out_err;
+	if (dasd_busid(from_str, &from_id0, &from_id1, &from)) {
+		rc = -EINVAL;
+		goto out;
+	}
 
 	to = from;
 	to_id0 = from_id0;
 	to_id1 = from_id1;
 	if (to_str) {
-		if (dasd_busid(to_str, &to_id0, &to_id1, &to))
-			goto out_err;
+		if (dasd_busid(to_str, &to_id0, &to_id1, &to)) {
+			rc = -EINVAL;
+			goto out;
+		}
 		if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) {
 			pr_err("%s is not a valid device range\n", range);
-			goto out_err;
+			rc = -EINVAL;
+			goto out;
 		}
 	}
 
 	features = dasd_feature_list(features_str);
-	if (features < 0)
-		goto out_err;
+	if (features < 0) {
+		rc = -EINVAL;
+		goto out;
+	}
 	/* each device in dasd= parameter should be set initially online */
 	features |= DASD_FEATURE_INITIAL_ONLINE;
 	while (from <= to) {
 		sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++);
 		devmap = dasd_add_busid(bus_id, features);
-		if (IS_ERR(devmap))
-			return PTR_ERR(devmap);
+		if (IS_ERR(devmap)) {
+			rc = PTR_ERR(devmap);
+			goto out;
+		}
 	}
 
-	return 0;
+out:
+	kfree(tmp);
 
-out_err:
-	return -EINVAL;
+	return rc;
 }
 
 /*
@@ -735,13 +748,22 @@ static ssize_t
 dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct dasd_devmap *devmap;
-	int ro_flag;
+	struct dasd_device *device;
+	int ro_flag = 0;
 
 	devmap = dasd_find_busid(dev_name(dev));
-	if (!IS_ERR(devmap))
-		ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0;
-	else
-		ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0;
+	if (IS_ERR(devmap))
+		goto out;
+
+	ro_flag = !!(devmap->features & DASD_FEATURE_READONLY);
+
+	spin_lock(&dasd_devmap_lock);
+	device = devmap->device;
+	if (device)
+		ro_flag |= test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+	spin_unlock(&dasd_devmap_lock);
+
+out:
 	return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
 }
 
@@ -764,7 +786,7 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
 
 	device = dasd_device_from_cdev(cdev);
 	if (IS_ERR(device))
-		return PTR_ERR(device);
+		return count;
 
 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
 	val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
@@ -928,11 +950,14 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
 {
 	struct ccw_device *cdev = to_ccwdev(dev);
 	struct dasd_device *device;
+	unsigned long flags;
 	int rc;
 
-	device = dasd_device_from_cdev(cdev);
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	device = dasd_device_from_cdev_locked(cdev);
 	if (IS_ERR(device)) {
 		rc = PTR_ERR(device);
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 		goto out;
 	}
 
@@ -940,12 +965,14 @@ dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
 	    test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
 		/* Already doing offline processing */
 		dasd_put_device(device);
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 		rc = -EBUSY;
 		goto out;
 	}
 
 	set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
 	dasd_put_device(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 
 	rc = ccw_device_set_offline(cdev);
 
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 36e5280..06eb1de 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -845,7 +845,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
 	unsigned long source_addr;
 	unsigned long bytes_done;
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	bytes_done = 0;
 	dev_info = bio->bi_bdev->bd_disk->private_data;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 152de68..42018a2 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -13,6 +13,7 @@
 #include <linux/mempool.h>
 #include <linux/module.h>
 #include <linux/blkdev.h>
+#include <linux/blk-mq.h>
 #include <linux/genhd.h>
 #include <linux/slab.h>
 #include <linux/list.h>
@@ -42,7 +43,6 @@ static void __scm_free_rq(struct scm_request *scmrq)
 	struct aob_rq_header *aobrq = to_aobrq(scmrq);
 
 	free_page((unsigned long) scmrq->aob);
-	__scm_free_rq_cluster(scmrq);
 	kfree(scmrq->request);
 	kfree(aobrq);
 }
@@ -82,9 +82,6 @@ static int __scm_alloc_rq(void)
 	if (!scmrq->request)
 		goto free;
 
-	if (__scm_alloc_rq_cluster(scmrq))
-		goto free;
-
 	INIT_LIST_HEAD(&scmrq->list);
 	spin_lock_irq(&list_lock);
 	list_add(&scmrq->list, &inactive_requests);
@@ -114,13 +111,13 @@ static struct scm_request *scm_request_fetch(void)
 {
 	struct scm_request *scmrq = NULL;
 
-	spin_lock(&list_lock);
+	spin_lock_irq(&list_lock);
 	if (list_empty(&inactive_requests))
 		goto out;
 	scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
 	list_del(&scmrq->list);
 out:
-	spin_unlock(&list_lock);
+	spin_unlock_irq(&list_lock);
 	return scmrq;
 }
 
@@ -231,140 +228,133 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
 	aob->request.data = (u64) aobrq;
 	scmrq->bdev = bdev;
 	scmrq->retries = 4;
-	scmrq->error = 0;
+	scmrq->error = BLK_STS_OK;
 	/* We don't use all msbs - place aidaws at the end of the aob page. */
 	scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
-	scm_request_cluster_init(scmrq);
 }
 
-static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
+static void scm_request_requeue(struct scm_request *scmrq)
 {
-	if (atomic_read(&bdev->queued_reqs)) {
-		/* Queue restart is triggered by the next interrupt. */
-		return;
+	struct scm_blk_dev *bdev = scmrq->bdev;
+	int i;
+
+	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
+		blk_mq_requeue_request(scmrq->request[i], false);
+
+	atomic_dec(&bdev->queued_reqs);
+	scm_request_done(scmrq);
+	blk_mq_kick_requeue_list(bdev->rq);
+}
+
+static void scm_request_finish(struct scm_request *scmrq)
+{
+	struct scm_blk_dev *bdev = scmrq->bdev;
+	int i;
+
+	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
+		if (scmrq->error)
+			blk_mq_end_request(scmrq->request[i], scmrq->error);
+		else
+			blk_mq_complete_request(scmrq->request[i]);
 	}
-	blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
-}
-
-void scm_request_requeue(struct scm_request *scmrq)
-{
-	struct scm_blk_dev *bdev = scmrq->bdev;
-	int i;
-
-	scm_release_cluster(scmrq);
-	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
-		blk_requeue_request(bdev->rq, scmrq->request[i]);
-
-	atomic_dec(&bdev->queued_reqs);
-	scm_request_done(scmrq);
-	scm_ensure_queue_restart(bdev);
-}
-
-void scm_request_finish(struct scm_request *scmrq)
-{
-	struct scm_blk_dev *bdev = scmrq->bdev;
-	int i;
-
-	scm_release_cluster(scmrq);
-	for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
-		blk_end_request_all(scmrq->request[i], scmrq->error);
 
 	atomic_dec(&bdev->queued_reqs);
 	scm_request_done(scmrq);
 }
 
-static int scm_request_start(struct scm_request *scmrq)
+static void scm_request_start(struct scm_request *scmrq)
 {
 	struct scm_blk_dev *bdev = scmrq->bdev;
-	int ret;
 
 	atomic_inc(&bdev->queued_reqs);
-	if (!scmrq->aob->request.msb_count) {
-		scm_request_requeue(scmrq);
-		return -EINVAL;
-	}
-
-	ret = eadm_start_aob(scmrq->aob);
-	if (ret) {
+	if (eadm_start_aob(scmrq->aob)) {
 		SCM_LOG(5, "no subchannel");
 		scm_request_requeue(scmrq);
 	}
-	return ret;
 }
 
-static void scm_blk_request(struct request_queue *rq)
+struct scm_queue {
+	struct scm_request *scmrq;
+	spinlock_t lock;
+};
+
+static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
+			   const struct blk_mq_queue_data *qd)
 {
-	struct scm_device *scmdev = rq->queuedata;
+	struct scm_device *scmdev = hctx->queue->queuedata;
 	struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
-	struct scm_request *scmrq = NULL;
-	struct request *req;
+	struct scm_queue *sq = hctx->driver_data;
+	struct request *req = qd->rq;
+	struct scm_request *scmrq;
 
-	while ((req = blk_peek_request(rq))) {
-		if (!scm_permit_request(bdev, req))
-			goto out;
-
-		if (!scmrq) {
-			scmrq = scm_request_fetch();
-			if (!scmrq) {
-				SCM_LOG(5, "no request");
-				goto out;
-			}
-			scm_request_init(bdev, scmrq);
-		}
-		scm_request_set(scmrq, req);
-
-		if (!scm_reserve_cluster(scmrq)) {
-			SCM_LOG(5, "cluster busy");
-			scm_request_set(scmrq, NULL);
-			if (scmrq->aob->request.msb_count)
-				goto out;
-
-			scm_request_done(scmrq);
-			return;
-		}
-
-		if (scm_need_cluster_request(scmrq)) {
-			if (scmrq->aob->request.msb_count) {
-				/* Start cluster requests separately. */
-				scm_request_set(scmrq, NULL);
-				if (scm_request_start(scmrq))
-					return;
-			} else {
-				atomic_inc(&bdev->queued_reqs);
-				blk_start_request(req);
-				scm_initiate_cluster_request(scmrq);
-			}
-			scmrq = NULL;
-			continue;
-		}
-
-		if (scm_request_prepare(scmrq)) {
-			SCM_LOG(5, "aidaw alloc failed");
-			scm_request_set(scmrq, NULL);
-			goto out;
-		}
-		blk_start_request(req);
-
-		if (scmrq->aob->request.msb_count < nr_requests_per_io)
-			continue;
-
-		if (scm_request_start(scmrq))
-			return;
-
-		scmrq = NULL;
+	spin_lock(&sq->lock);
+	if (!scm_permit_request(bdev, req)) {
+		spin_unlock(&sq->lock);
+		return BLK_MQ_RQ_QUEUE_BUSY;
 	}
-out:
-	if (scmrq)
+
+	scmrq = sq->scmrq;
+	if (!scmrq) {
+		scmrq = scm_request_fetch();
+		if (!scmrq) {
+			SCM_LOG(5, "no request");
+			spin_unlock(&sq->lock);
+			return BLK_MQ_RQ_QUEUE_BUSY;
+		}
+		scm_request_init(bdev, scmrq);
+		sq->scmrq = scmrq;
+	}
+	scm_request_set(scmrq, req);
+
+	if (scm_request_prepare(scmrq)) {
+		SCM_LOG(5, "aidaw alloc failed");
+		scm_request_set(scmrq, NULL);
+
+		if (scmrq->aob->request.msb_count)
+			scm_request_start(scmrq);
+
+		sq->scmrq = NULL;
+		spin_unlock(&sq->lock);
+		return BLK_MQ_RQ_QUEUE_BUSY;
+	}
+	blk_mq_start_request(req);
+
+	if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
 		scm_request_start(scmrq);
-	else
-		scm_ensure_queue_restart(bdev);
+		sq->scmrq = NULL;
+	}
+	spin_unlock(&sq->lock);
+	return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+			     unsigned int idx)
+{
+	struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
+
+	if (!qd)
+		return -ENOMEM;
+
+	spin_lock_init(&qd->lock);
+	hctx->driver_data = qd;
+
+	return 0;
+}
+
+static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
+{
+	struct scm_queue *qd = hctx->driver_data;
+
+	WARN_ON(qd->scmrq);
+	kfree(hctx->driver_data);
+	hctx->driver_data = NULL;
 }
 
 static void __scmrq_log_error(struct scm_request *scmrq)
 {
 	struct aob *aob = scmrq->aob;
 
-	if (scmrq->error == -ETIMEDOUT)
+	if (scmrq->error == BLK_STS_TIMEOUT)
 		SCM_LOG(1, "Request timeout");
 	else {
 		SCM_LOG(1, "Request error");
@@ -377,27 +367,12 @@ static void __scmrq_log_error(struct scm_request *scmrq)
 		       scmrq->error);
 }
 
-void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
-{
-	struct scm_request *scmrq = data;
-	struct scm_blk_dev *bdev = scmrq->bdev;
-
-	scmrq->error = error;
-	if (error)
-		__scmrq_log_error(scmrq);
-
-	spin_lock(&bdev->lock);
-	list_add_tail(&scmrq->list, &bdev->finished_requests);
-	spin_unlock(&bdev->lock);
-	tasklet_hi_schedule(&bdev->tasklet);
-}
-
 static void scm_blk_handle_error(struct scm_request *scmrq)
 {
 	struct scm_blk_dev *bdev = scmrq->bdev;
 	unsigned long flags;
 
-	if (scmrq->error != -EIO)
+	if (scmrq->error != BLK_STS_IOERR)
 		goto restart;
 
 	/* For -EIO the response block is valid. */
@@ -419,54 +394,46 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
 		return;
 
 requeue:
-	spin_lock_irqsave(&bdev->rq_lock, flags);
 	scm_request_requeue(scmrq);
-	spin_unlock_irqrestore(&bdev->rq_lock, flags);
 }
 
-static void scm_blk_tasklet(struct scm_blk_dev *bdev)
+void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
 {
-	struct scm_request *scmrq;
-	unsigned long flags;
+	struct scm_request *scmrq = data;
 
-	spin_lock_irqsave(&bdev->lock, flags);
-	while (!list_empty(&bdev->finished_requests)) {
-		scmrq = list_first_entry(&bdev->finished_requests,
-					 struct scm_request, list);
-		list_del(&scmrq->list);
-		spin_unlock_irqrestore(&bdev->lock, flags);
-
-		if (scmrq->error && scmrq->retries-- > 0) {
+	scmrq->error = error;
+	if (error) {
+		__scmrq_log_error(scmrq);
+		if (scmrq->retries-- > 0) {
 			scm_blk_handle_error(scmrq);
-
-			/* Request restarted or requeued, handle next. */
-			spin_lock_irqsave(&bdev->lock, flags);
-			continue;
+			return;
 		}
-
-		if (scm_test_cluster_request(scmrq)) {
-			scm_cluster_request_irq(scmrq);
-			spin_lock_irqsave(&bdev->lock, flags);
-			continue;
-		}
-
-		scm_request_finish(scmrq);
-		spin_lock_irqsave(&bdev->lock, flags);
 	}
-	spin_unlock_irqrestore(&bdev->lock, flags);
-	/* Look out for more requests. */
-	blk_run_queue(bdev->rq);
+
+	scm_request_finish(scmrq);
+}
+
+static void scm_blk_request_done(struct request *req)
+{
+	blk_mq_end_request(req, 0);
 }
 
 static const struct block_device_operations scm_blk_devops = {
 	.owner = THIS_MODULE,
 };
 
+static const struct blk_mq_ops scm_mq_ops = {
+	.queue_rq = scm_blk_request,
+	.complete = scm_blk_request_done,
+	.init_hctx = scm_blk_init_hctx,
+	.exit_hctx = scm_blk_exit_hctx,
+};
+
 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
 {
-	struct request_queue *rq;
-	int len, ret = -ENOMEM;
 	unsigned int devindex, nr_max_blk;
+	struct request_queue *rq;
+	int len, ret;
 
 	devindex = atomic_inc_return(&nr_devices) - 1;
 	/* scma..scmz + scmaa..scmzz */
@@ -477,18 +444,23 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
 
 	bdev->scmdev = scmdev;
 	bdev->state = SCM_OPER;
-	spin_lock_init(&bdev->rq_lock);
 	spin_lock_init(&bdev->lock);
-	INIT_LIST_HEAD(&bdev->finished_requests);
 	atomic_set(&bdev->queued_reqs, 0);
-	tasklet_init(&bdev->tasklet,
-		     (void (*)(unsigned long)) scm_blk_tasklet,
-		     (unsigned long) bdev);
 
-	rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
-	if (!rq)
+	bdev->tag_set.ops = &scm_mq_ops;
+	bdev->tag_set.nr_hw_queues = nr_requests;
+	bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
+	bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+
+	ret = blk_mq_alloc_tag_set(&bdev->tag_set);
+	if (ret)
 		goto out;
 
+	rq = blk_mq_init_queue(&bdev->tag_set);
+	if (IS_ERR(rq)) {
+		ret = PTR_ERR(rq);
+		goto out_tag;
+	}
 	bdev->rq = rq;
 	nr_max_blk = min(scmdev->nr_max_block,
 			 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
@@ -498,12 +470,12 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
 	blk_queue_max_segments(rq, nr_max_blk);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
-	scm_blk_dev_cluster_setup(bdev);
 
 	bdev->gendisk = alloc_disk(SCM_NR_PARTS);
-	if (!bdev->gendisk)
+	if (!bdev->gendisk) {
+		ret = -ENOMEM;
 		goto out_queue;
-
+	}
 	rq->queuedata = scmdev;
 	bdev->gendisk->private_data = scmdev;
 	bdev->gendisk->fops = &scm_blk_devops;
@@ -528,6 +500,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
 
 out_queue:
 	blk_cleanup_queue(rq);
+out_tag:
+	blk_mq_free_tag_set(&bdev->tag_set);
 out:
 	atomic_dec(&nr_devices);
 	return ret;
@@ -535,9 +509,9 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
 
 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
 {
-	tasklet_kill(&bdev->tasklet);
 	del_gendisk(bdev->gendisk);
 	blk_cleanup_queue(bdev->gendisk->queue);
+	blk_mq_free_tag_set(&bdev->tag_set);
 	put_disk(bdev->gendisk);
 }
 
@@ -558,7 +532,7 @@ static bool __init scm_blk_params_valid(void)
 	if (!nr_requests_per_io || nr_requests_per_io > 64)
 		return false;
 
-	return scm_cluster_size_valid();
+	return true;
 }
 
 static int __init scm_blk_init(void)
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 09218cd..71288dd 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -4,6 +4,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/blkdev.h>
+#include <linux/blk-mq.h>
 #include <linux/genhd.h>
 #include <linux/list.h>
 
@@ -14,18 +15,14 @@
 #define SCM_QUEUE_DELAY 5
 
 struct scm_blk_dev {
-	struct tasklet_struct tasklet;
 	struct request_queue *rq;
 	struct gendisk *gendisk;
+	struct blk_mq_tag_set tag_set;
 	struct scm_device *scmdev;
-	spinlock_t rq_lock;	/* guard the request queue */
-	spinlock_t lock;	/* guard the rest of the blockdev */
+	spinlock_t lock;
 	atomic_t queued_reqs;
 	enum {SCM_OPER, SCM_WR_PROHIBIT} state;
 	struct list_head finished_requests;
-#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
-	struct list_head cluster_list;
-#endif
 };
 
 struct scm_request {
@@ -35,14 +32,7 @@ struct scm_request {
 	struct aob *aob;
 	struct list_head list;
 	u8 retries;
-	int error;
-#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
-	struct {
-		enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
-		struct list_head list;
-		void **buf;
-	} cluster;
-#endif
+	blk_status_t error;
 };
 
 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
@@ -50,57 +40,13 @@ struct scm_request {
 int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
 void scm_blk_dev_cleanup(struct scm_blk_dev *);
 void scm_blk_set_available(struct scm_blk_dev *);
-void scm_blk_irq(struct scm_device *, void *, int);
-
-void scm_request_finish(struct scm_request *);
-void scm_request_requeue(struct scm_request *);
+void scm_blk_irq(struct scm_device *, void *, blk_status_t);
 
 struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
 
 int scm_drv_init(void);
 void scm_drv_cleanup(void);
 
-#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
-void __scm_free_rq_cluster(struct scm_request *);
-int __scm_alloc_rq_cluster(struct scm_request *);
-void scm_request_cluster_init(struct scm_request *);
-bool scm_reserve_cluster(struct scm_request *);
-void scm_release_cluster(struct scm_request *);
-void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
-bool scm_need_cluster_request(struct scm_request *);
-void scm_initiate_cluster_request(struct scm_request *);
-void scm_cluster_request_irq(struct scm_request *);
-bool scm_test_cluster_request(struct scm_request *);
-bool scm_cluster_size_valid(void);
-#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
-static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
-static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
-{
-	return 0;
-}
-static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
-static inline bool scm_reserve_cluster(struct scm_request *scmrq)
-{
-	return true;
-}
-static inline void scm_release_cluster(struct scm_request *scmrq) {}
-static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
-static inline bool scm_need_cluster_request(struct scm_request *scmrq)
-{
-	return false;
-}
-static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
-static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
-static inline bool scm_test_cluster_request(struct scm_request *scmrq)
-{
-	return false;
-}
-static inline bool scm_cluster_size_valid(void)
-{
-	return true;
-}
-#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
-
 extern debug_info_t *scm_debug;
 
 #define SCM_LOG(imp, txt) do {					\
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
deleted file mode 100644
index 7497ddde..0000000
--- a/drivers/s390/block/scm_blk_cluster.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Block driver for s390 storage class memory.
- *
- * Copyright IBM Corp. 2012
- * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
- */
-
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/genhd.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <asm/eadm.h>
-#include "scm_blk.h"
-
-static unsigned int write_cluster_size = 64;
-module_param(write_cluster_size, uint, S_IRUGO);
-MODULE_PARM_DESC(write_cluster_size,
-		 "Number of pages used for contiguous writes.");
-
-#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
-
-void __scm_free_rq_cluster(struct scm_request *scmrq)
-{
-	int i;
-
-	if (!scmrq->cluster.buf)
-		return;
-
-	for (i = 0; i < 2 * write_cluster_size; i++)
-		free_page((unsigned long) scmrq->cluster.buf[i]);
-
-	kfree(scmrq->cluster.buf);
-}
-
-int __scm_alloc_rq_cluster(struct scm_request *scmrq)
-{
-	int i;
-
-	scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
-				 GFP_KERNEL);
-	if (!scmrq->cluster.buf)
-		return -ENOMEM;
-
-	for (i = 0; i < 2 * write_cluster_size; i++) {
-		scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
-		if (!scmrq->cluster.buf[i])
-			return -ENOMEM;
-	}
-	INIT_LIST_HEAD(&scmrq->cluster.list);
-	return 0;
-}
-
-void scm_request_cluster_init(struct scm_request *scmrq)
-{
-	scmrq->cluster.state = CLUSTER_NONE;
-}
-
-static bool clusters_intersect(struct request *A, struct request *B)
-{
-	unsigned long firstA, lastA, firstB, lastB;
-
-	firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
-	lastA = (((u64) blk_rq_pos(A) << 9) +
-		    blk_rq_bytes(A) - 1) / CLUSTER_SIZE;
-
-	firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
-	lastB = (((u64) blk_rq_pos(B) << 9) +
-		    blk_rq_bytes(B) - 1) / CLUSTER_SIZE;
-
-	return (firstB <= lastA && firstA <= lastB);
-}
-
-bool scm_reserve_cluster(struct scm_request *scmrq)
-{
-	struct request *req = scmrq->request[scmrq->aob->request.msb_count];
-	struct scm_blk_dev *bdev = scmrq->bdev;
-	struct scm_request *iter;
-	int pos, add = 1;
-
-	if (write_cluster_size == 0)
-		return true;
-
-	spin_lock(&bdev->lock);
-	list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
-		if (iter == scmrq) {
-			/*
-			 * We don't have to use clusters_intersect here, since
-			 * cluster requests are always started separately.
-			 */
-			add = 0;
-			continue;
-		}
-		for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
-			if (clusters_intersect(req, iter->request[pos]) &&
-			    (rq_data_dir(req) == WRITE ||
-			     rq_data_dir(iter->request[pos]) == WRITE)) {
-				spin_unlock(&bdev->lock);
-				return false;
-			}
-		}
-	}
-	if (add)
-		list_add(&scmrq->cluster.list, &bdev->cluster_list);
-	spin_unlock(&bdev->lock);
-
-	return true;
-}
-
-void scm_release_cluster(struct scm_request *scmrq)
-{
-	struct scm_blk_dev *bdev = scmrq->bdev;
-	unsigned long flags;
-
-	if (write_cluster_size == 0)
-		return;
-
-	spin_lock_irqsave(&bdev->lock, flags);
-	list_del(&scmrq->cluster.list);
-	spin_unlock_irqrestore(&bdev->lock, flags);
-}
-
-void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
-{
-	INIT_LIST_HEAD(&bdev->cluster_list);
-	blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
-}
-
-static int scm_prepare_cluster_request(struct scm_request *scmrq)
-{
-	struct scm_blk_dev *bdev = scmrq->bdev;
-	struct scm_device *scmdev = bdev->gendisk->private_data;
-	struct request *req = scmrq->request[0];
-	struct msb *msb = &scmrq->aob->msb[0];
-	struct req_iterator iter;
-	struct aidaw *aidaw;
-	struct bio_vec bv;
-	int i = 0;
-	u64 addr;
-
-	switch (scmrq->cluster.state) {
-	case CLUSTER_NONE:
-		scmrq->cluster.state = CLUSTER_READ;
-		/* fall through */
-	case CLUSTER_READ:
-		msb->bs = MSB_BS_4K;
-		msb->oc = MSB_OC_READ;
-		msb->flags = MSB_FLAG_IDA;
-		msb->blk_count = write_cluster_size;
-
-		addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
-		msb->scm_addr = round_down(addr, CLUSTER_SIZE);
-
-		if (msb->scm_addr !=
-		    round_down(addr + (u64) blk_rq_bytes(req) - 1,
-			       CLUSTER_SIZE))
-			msb->blk_count = 2 * write_cluster_size;
-
-		aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
-		if (!aidaw)
-			return -ENOMEM;
-
-		scmrq->aob->request.msb_count = 1;
-		msb->data_addr = (u64) aidaw;
-		for (i = 0; i < msb->blk_count; i++) {
-			aidaw->data_addr = (u64) scmrq->cluster.buf[i];
-			aidaw++;
-		}
-
-		break;
-	case CLUSTER_WRITE:
-		aidaw = (void *) msb->data_addr;
-		msb->oc = MSB_OC_WRITE;
-
-		for (addr = msb->scm_addr;
-		     addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
-		     addr += PAGE_SIZE) {
-			aidaw->data_addr = (u64) scmrq->cluster.buf[i];
-			aidaw++;
-			i++;
-		}
-		rq_for_each_segment(bv, req, iter) {
-			aidaw->data_addr = (u64) page_address(bv.bv_page);
-			aidaw++;
-			i++;
-		}
-		for (; i < msb->blk_count; i++) {
-			aidaw->data_addr = (u64) scmrq->cluster.buf[i];
-			aidaw++;
-		}
-		break;
-	}
-	return 0;
-}
-
-bool scm_need_cluster_request(struct scm_request *scmrq)
-{
-	int pos = scmrq->aob->request.msb_count;
-
-	if (rq_data_dir(scmrq->request[pos]) == READ)
-		return false;
-
-	return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
-}
-
-/* Called with queue lock held. */
-void scm_initiate_cluster_request(struct scm_request *scmrq)
-{
-	if (scm_prepare_cluster_request(scmrq))
-		goto requeue;
-	if (eadm_start_aob(scmrq->aob))
-		goto requeue;
-	return;
-requeue:
-	scm_request_requeue(scmrq);
-}
-
-bool scm_test_cluster_request(struct scm_request *scmrq)
-{
-	return scmrq->cluster.state != CLUSTER_NONE;
-}
-
-void scm_cluster_request_irq(struct scm_request *scmrq)
-{
-	struct scm_blk_dev *bdev = scmrq->bdev;
-	unsigned long flags;
-
-	switch (scmrq->cluster.state) {
-	case CLUSTER_NONE:
-		BUG();
-		break;
-	case CLUSTER_READ:
-		if (scmrq->error) {
-			scm_request_finish(scmrq);
-			break;
-		}
-		scmrq->cluster.state = CLUSTER_WRITE;
-		spin_lock_irqsave(&bdev->rq_lock, flags);
-		scm_initiate_cluster_request(scmrq);
-		spin_unlock_irqrestore(&bdev->rq_lock, flags);
-		break;
-	case CLUSTER_WRITE:
-		scm_request_finish(scmrq);
-		break;
-	}
-}
-
-bool scm_cluster_size_valid(void)
-{
-	if (write_cluster_size == 1 || write_cluster_size > 128)
-		return false;
-
-	return !(write_cluster_size & (write_cluster_size - 1));
-}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index b9d7e75..a48f0d4 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -190,7 +190,7 @@ static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
 	unsigned long page_addr;
 	unsigned long bytes;
 
-	blk_queue_split(q, &bio, q->bio_split);
+	blk_queue_split(q, &bio);
 
 	if ((bio->bi_iter.bi_sector & 7) != 0 ||
 	    (bio->bi_iter.bi_size & 4095) != 0)
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 9c471ea..6111c1f 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1096,26 +1096,26 @@ static const struct dev_pm_ops sclp_pm_ops = {
 	.restore	= sclp_restore,
 };
 
-static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf)
+static ssize_t con_pages_show(struct device_driver *dev, char *buf)
 {
 	return sprintf(buf, "%i\n", sclp_console_pages);
 }
 
-static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL);
+static DRIVER_ATTR_RO(con_pages);
 
-static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf)
+static ssize_t con_drop_show(struct device_driver *dev, char *buf)
 {
 	return sprintf(buf, "%i\n", sclp_console_drop);
 }
 
-static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL);
+static DRIVER_ATTR_RO(con_drop);
 
-static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf)
+static ssize_t con_full_show(struct device_driver *dev, char *buf)
 {
 	return sprintf(buf, "%lu\n", sclp_console_full);
 }
 
-static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL);
+static DRIVER_ATTR_RO(con_full);
 
 static struct attribute *sclp_drv_attrs[] = {
 	&driver_attr_con_pages.attr,
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 57974a1..b19020b 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -641,10 +641,8 @@ static ssize_t vmlogrdr_recording_store(struct device * dev,
 static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
 
 
-static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
-					      char *buf)
+static ssize_t recording_status_show(struct device_driver *driver, char *buf)
 {
-
 	static const char cp_command[] = "QUERY RECORDING ";
 	int len;
 
@@ -652,8 +650,7 @@ static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
 	len = strlen(buf);
 	return len;
 }
-static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
-		   NULL);
+static DRIVER_ATTR_RO(recording_status);
 static struct attribute *vmlogrdr_drv_attrs[] = {
 	&driver_attr_recording_status.attr,
 	NULL,
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index e2aa944..d3e504c 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -296,6 +296,51 @@ static const struct attribute_group *default_subch_attr_groups[] = {
 	NULL,
 };
 
+static ssize_t chpids_show(struct device *dev,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct chsc_ssd_info *ssd = &sch->ssd_info;
+	ssize_t ret = 0;
+	int mask;
+	int chp;
+
+	for (chp = 0; chp < 8; chp++) {
+		mask = 0x80 >> chp;
+		if (ssd->path_mask & mask)
+			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
+		else
+			ret += sprintf(buf + ret, "00 ");
+	}
+	ret += sprintf(buf + ret, "\n");
+	return ret;
+}
+static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
+
+static ssize_t pimpampom_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct subchannel *sch = to_subchannel(dev);
+	struct pmcw *pmcw = &sch->schib.pmcw;
+
+	return sprintf(buf, "%02x %02x %02x\n",
+		       pmcw->pim, pmcw->pam, pmcw->pom);
+}
+static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
+
+static struct attribute *io_subchannel_type_attrs[] = {
+	&dev_attr_chpids.attr,
+	&dev_attr_pimpampom.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(io_subchannel_type);
+
+static const struct device_type io_subchannel_type = {
+	.groups = io_subchannel_type_groups,
+};
+
 int css_register_subchannel(struct subchannel *sch)
 {
 	int ret;
@@ -304,6 +349,10 @@ int css_register_subchannel(struct subchannel *sch)
 	sch->dev.parent = &channel_subsystems[0]->device;
 	sch->dev.bus = &css_bus_type;
 	sch->dev.groups = default_subch_attr_groups;
+
+	if (sch->st == SUBCHANNEL_TYPE_IO)
+		sch->dev.type = &io_subchannel_type;
+
 	/*
 	 * We don't want to generate uevents for I/O subchannels that don't
 	 * have a working ccw device behind them since they will be
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index b8006ea..7be01a5 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -208,44 +208,6 @@ int __init io_subchannel_init(void)
 
 /************************ device handling **************************/
 
-/*
- * A ccw_device has some interfaces in sysfs in addition to the
- * standard ones.
- * The following entries are designed to export the information which
- * resided in 2.4 in /proc/subchannels. Subchannel and device number
- * are obvious, so they don't have an entry :)
- * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
- */
-static ssize_t
-chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
-{
-	struct subchannel *sch = to_subchannel(dev);
-	struct chsc_ssd_info *ssd = &sch->ssd_info;
-	ssize_t ret = 0;
-	int chp;
-	int mask;
-
-	for (chp = 0; chp < 8; chp++) {
-		mask = 0x80 >> chp;
-		if (ssd->path_mask & mask)
-			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
-		else
-			ret += sprintf(buf + ret, "00 ");
-	}
-	ret += sprintf (buf+ret, "\n");
-	return min((ssize_t)PAGE_SIZE, ret);
-}
-
-static ssize_t
-pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
-{
-	struct subchannel *sch = to_subchannel(dev);
-	struct pmcw *pmcw = &sch->schib.pmcw;
-
-	return sprintf (buf, "%02x %02x %02x\n",
-			pmcw->pim, pmcw->pam, pmcw->pom);
-}
-
 static ssize_t
 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -636,8 +598,6 @@ static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
 	return sprintf(buf, "%02x\n", sch->vpm);
 }
 
-static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
-static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
@@ -647,8 +607,6 @@ static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
 static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
 
 static struct attribute *io_subchannel_attrs[] = {
-	&dev_attr_chpids.attr,
-	&dev_attr_pimpampom.attr,
 	&dev_attr_logging.attr,
 	&dev_attr_vpm.attr,
 	NULL,
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index b3f44bc..0f11f3b 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -135,7 +135,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
 	struct eadm_private *private = get_eadm_private(sch);
 	struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
 	struct irb *irb = this_cpu_ptr(&cio_irb);
-	int error = 0;
+	blk_status_t error = BLK_STS_OK;
 
 	EADM_LOG(6, "irq");
 	EADM_LOG_HEX(6, irb, sizeof(*irb));
@@ -144,10 +144,10 @@ static void eadm_subchannel_irq(struct subchannel *sch)
 
 	if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
 	    && scsw->eswf == 1 && irb->esw.eadm.erw.r)
-		error = -EIO;
+		error = BLK_STS_IOERR;
 
 	if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
-		error = -ETIMEDOUT;
+		error = BLK_STS_TIMEOUT;
 
 	eadm_subchannel_set_timeout(sch, 0);
 
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index 15268ed..1fa53ec 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -71,7 +71,7 @@ void scm_driver_unregister(struct scm_driver *scmdrv)
 }
 EXPORT_SYMBOL_GPL(scm_driver_unregister);
 
-void scm_irq_handler(struct aob *aob, int error)
+void scm_irq_handler(struct aob *aob, blk_status_t error)
 {
 	struct aob_rq_header *aobrq = (void *) aob->request.data;
 	struct scm_device *scmdev = aobrq->scmdev;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index e90dd43..a25367e 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -90,54 +90,6 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
 }
 
 /*
- * Sysfs interfaces
- */
-static ssize_t chpids_show(struct device *dev,
-			   struct device_attribute *attr,
-			   char *buf)
-{
-	struct subchannel *sch = to_subchannel(dev);
-	struct chsc_ssd_info *ssd = &sch->ssd_info;
-	ssize_t ret = 0;
-	int chp;
-	int mask;
-
-	for (chp = 0; chp < 8; chp++) {
-		mask = 0x80 >> chp;
-		if (ssd->path_mask & mask)
-			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
-		else
-			ret += sprintf(buf + ret, "00 ");
-	}
-	ret += sprintf(buf+ret, "\n");
-	return ret;
-}
-
-static ssize_t pimpampom_show(struct device *dev,
-			      struct device_attribute *attr,
-			      char *buf)
-{
-	struct subchannel *sch = to_subchannel(dev);
-	struct pmcw *pmcw = &sch->schib.pmcw;
-
-	return sprintf(buf, "%02x %02x %02x\n",
-		       pmcw->pim, pmcw->pam, pmcw->pom);
-}
-
-static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
-static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
-
-static struct attribute *vfio_subchannel_attrs[] = {
-	&dev_attr_chpids.attr,
-	&dev_attr_pimpampom.attr,
-	NULL,
-};
-
-static struct attribute_group vfio_subchannel_attr_group = {
-	.attrs = vfio_subchannel_attrs,
-};
-
-/*
  * Css driver callbacks
  */
 static void vfio_ccw_sch_irq(struct subchannel *sch)
@@ -174,13 +126,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
 	if (ret)
 		goto out_free;
 
-	ret = sysfs_create_group(&sch->dev.kobj, &vfio_subchannel_attr_group);
-	if (ret)
-		goto out_disable;
-
 	ret = vfio_ccw_mdev_reg(sch);
 	if (ret)
-		goto out_rm_group;
+		goto out_disable;
 
 	INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
 	atomic_set(&private->avail, 1);
@@ -188,8 +136,6 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
 
 	return 0;
 
-out_rm_group:
-	sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group);
 out_disable:
 	cio_disable_subchannel(sch);
 out_free:
@@ -206,8 +152,6 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
 
 	vfio_ccw_mdev_unreg(sch);
 
-	sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group);
-
 	dev_set_drvdata(&sch->dev, NULL);
 
 	kfree(private);
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index e72abbc..a66a317 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -70,14 +70,14 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
 {
 	return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
 }
-MDEV_TYPE_ATTR_RO(name);
+static MDEV_TYPE_ATTR_RO(name);
 
 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
 			       char *buf)
 {
 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
 }
-MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(device_api);
 
 static ssize_t available_instances_show(struct kobject *kobj,
 					struct device *dev, char *buf)
@@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj,
 
 	return sprintf(buf, "%d\n", atomic_read(&private->avail));
 }
-MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(available_instances);
 
 static struct attribute *mdev_types_attrs[] = {
 	&mdev_type_attr_name.attr,
@@ -100,7 +100,7 @@ static struct attribute_group mdev_type_group = {
 	.attrs = mdev_types_attrs,
 };
 
-struct attribute_group *mdev_type_groups[] = {
+static struct attribute_group *mdev_type_groups[] = {
 	&mdev_type_group,
 	NULL,
 };
@@ -152,7 +152,7 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
 				      &events, &private->nb);
 }
 
-void vfio_ccw_mdev_release(struct mdev_device *mdev)
+static void vfio_ccw_mdev_release(struct mdev_device *mdev)
 {
 	struct vfio_ccw_private *private =
 		dev_get_drvdata(mdev_parent_dev(mdev));
@@ -233,7 +233,7 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
 	}
 }
 
-int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
+static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
 {
 	if (info->index != VFIO_CCW_IO_IRQ_INDEX)
 		return -EINVAL;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9be4596..6dee598 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -668,10 +668,28 @@ static int ap_device_probe(struct device *dev)
 	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
 	int rc;
 
+	/* Add queue/card to list of active queues/cards */
+	spin_lock_bh(&ap_list_lock);
+	if (is_card_dev(dev))
+		list_add(&to_ap_card(dev)->list, &ap_card_list);
+	else
+		list_add(&to_ap_queue(dev)->list,
+			 &to_ap_queue(dev)->card->queues);
+	spin_unlock_bh(&ap_list_lock);
+
 	ap_dev->drv = ap_drv;
 	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
-	if (rc)
+
+	if (rc) {
+		spin_lock_bh(&ap_list_lock);
+		if (is_card_dev(dev))
+			list_del_init(&to_ap_card(dev)->list);
+		else
+			list_del_init(&to_ap_queue(dev)->list);
+		spin_unlock_bh(&ap_list_lock);
 		ap_dev->drv = NULL;
+	}
+
 	return rc;
 }
 
@@ -680,14 +698,17 @@ static int ap_device_remove(struct device *dev)
 	struct ap_device *ap_dev = to_ap_dev(dev);
 	struct ap_driver *ap_drv = ap_dev->drv;
 
+	if (ap_drv->remove)
+		ap_drv->remove(ap_dev);
+
+	/* Remove queue/card from list of active queues/cards */
 	spin_lock_bh(&ap_list_lock);
 	if (is_card_dev(dev))
 		list_del_init(&to_ap_card(dev)->list);
 	else
 		list_del_init(&to_ap_queue(dev)->list);
 	spin_unlock_bh(&ap_list_lock);
-	if (ap_drv->remove)
-		ap_drv->remove(ap_dev);
+
 	return 0;
 }
 
@@ -745,7 +766,7 @@ static ssize_t ap_domain_store(struct bus_type *bus,
 	ap_domain_index = domain;
 	spin_unlock_bh(&ap_domain_lock);
 
-	AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain);
+	AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain);
 
 	return count;
 }
@@ -931,6 +952,7 @@ static int ap_select_domain(void)
 	}
 	if (best_domain >= 0){
 		ap_domain_index = best_domain;
+		AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
 		spin_unlock_bh(&ap_domain_lock);
 		return 0;
 	}
@@ -967,7 +989,7 @@ static void ap_scan_bus(struct work_struct *unused)
 	ap_qid_t qid;
 	int depth = 0, type = 0;
 	unsigned int functions = 0;
-	int rc, id, dom, borked, domains;
+	int rc, id, dom, borked, domains, defdomdevs = 0;
 
 	AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
 
@@ -1031,6 +1053,8 @@ static void ap_scan_bus(struct work_struct *unused)
 				put_device(dev);
 				if (!borked) {
 					domains++;
+					if (dom == ap_domain_index)
+						defdomdevs++;
 					continue;
 				}
 			}
@@ -1056,10 +1080,6 @@ static void ap_scan_bus(struct work_struct *unused)
 				}
 				/* get it and thus adjust reference counter */
 				get_device(&ac->ap_dev.device);
-				/* Add card device to card list */
-				spin_lock_bh(&ap_list_lock);
-				list_add(&ac->list, &ap_card_list);
-				spin_unlock_bh(&ap_list_lock);
 			}
 			/* now create the new queue device */
 			aq = ap_queue_create(qid, type);
@@ -1070,10 +1090,6 @@ static void ap_scan_bus(struct work_struct *unused)
 			aq->ap_dev.device.parent = &ac->ap_dev.device;
 			dev_set_name(&aq->ap_dev.device,
 				     "%02x.%04x", id, dom);
-			/* Add queue device to card queue list */
-			spin_lock_bh(&ap_list_lock);
-			list_add(&aq->list, &ac->queues);
-			spin_unlock_bh(&ap_list_lock);
 			/* Start with a device reset */
 			spin_lock_bh(&aq->lock);
 			ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
@@ -1081,13 +1097,12 @@ static void ap_scan_bus(struct work_struct *unused)
 			/* Register device */
 			rc = device_register(&aq->ap_dev.device);
 			if (rc) {
-				spin_lock_bh(&ap_list_lock);
-				list_del_init(&aq->list);
-				spin_unlock_bh(&ap_list_lock);
 				put_device(&aq->ap_dev.device);
 				continue;
 			}
 			domains++;
+			if (dom == ap_domain_index)
+				defdomdevs++;
 		} /* end domain loop */
 		if (ac) {
 			/* remove card dev if there are no queue devices */
@@ -1096,6 +1111,11 @@ static void ap_scan_bus(struct work_struct *unused)
 			put_device(&ac->ap_dev.device);
 		}
 	} /* end device loop */
+
+	if (defdomdevs < 1)
+		AP_DBF(DBF_INFO, "no queue device with default domain %d available\n",
+		       ap_domain_index);
+
 out:
 	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
 }
@@ -1164,14 +1184,14 @@ int __init ap_module_init(void)
 	ap_init_configuration();
 
 	if (ap_configuration)
-		max_domain_id = ap_max_domain_id ? : (AP_DOMAINS - 1);
+		max_domain_id =
+			ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1;
 	else
 		max_domain_id = 15;
 	if (ap_domain_index < -1 || ap_domain_index > max_domain_id) {
 		pr_warn("%d is not a valid cryptographic domain\n",
 			ap_domain_index);
-		rc = -EINVAL;
-		goto out_free;
+		ap_domain_index = -1;
 	}
 	/* In resume callback we need to know if the user had set the domain.
 	 * If so, we can not just reset it.
@@ -1244,7 +1264,6 @@ int __init ap_module_init(void)
 	unregister_reset_call(&ap_reset_call);
 	if (ap_using_interrupts())
 		unregister_adapter_interrupt(&ap_airq);
-out_free:
 	kfree(ap_configuration);
 	return rc;
 }
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index cfa161c..836efac9 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -160,7 +160,14 @@ static struct device_type ap_card_type = {
 
 static void ap_card_device_release(struct device *dev)
 {
-	kfree(to_ap_card(dev));
+	struct ap_card *ac = to_ap_card(dev);
+
+	if (!list_empty(&ac->list)) {
+		spin_lock_bh(&ap_list_lock);
+		list_del_init(&ac->list);
+		spin_unlock_bh(&ap_list_lock);
+	}
+	kfree(ac);
 }
 
 struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 480c58a..0f1a5d0 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -584,7 +584,14 @@ static struct device_type ap_queue_type = {
 
 static void ap_queue_device_release(struct device *dev)
 {
-	kfree(to_ap_queue(dev));
+	struct ap_queue *aq = to_ap_queue(dev);
+
+	if (!list_empty(&aq->list)) {
+		spin_lock_bh(&ap_list_lock);
+		list_del_init(&aq->list);
+		spin_unlock_bh(&ap_list_lock);
+	}
+	kfree(aq);
 }
 
 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index ea86da8..f61fa47 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -178,9 +178,9 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb,
 	pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
 	pxcrb->request_control_blk_length =
 		preqcblk->cprb_len + preqcblk->req_parml;
-	pxcrb->request_control_blk_addr = (void *) preqcblk;
+	pxcrb->request_control_blk_addr = (void __user *) preqcblk;
 	pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
-	pxcrb->reply_control_blk_addr = (void *) prepcblk;
+	pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
 }
 
 /*
@@ -1194,7 +1194,7 @@ static struct miscdevice pkey_dev = {
 /*
  * Module init
  */
-int __init pkey_init(void)
+static int __init pkey_init(void)
 {
 	cpacf_mask_t pckmo_functions;
 
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 93015f8..b1c27e2 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -821,8 +821,10 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
 			do {
 				rc = zcrypt_rsa_modexpo(&mex);
 			} while (rc == -EAGAIN);
-		if (rc)
+		if (rc) {
+			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d", rc);
 			return rc;
+		}
 		return put_user(mex.outputdatalength, &umex->outputdatalength);
 	}
 	case ICARSACRT: {
@@ -838,8 +840,10 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
 			do {
 				rc = zcrypt_rsa_crt(&crt);
 			} while (rc == -EAGAIN);
-		if (rc)
+		if (rc) {
+			ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d", rc);
 			return rc;
+		}
 		return put_user(crt.outputdatalength, &ucrt->outputdatalength);
 	}
 	case ZSECSENDCPRB: {
@@ -855,6 +859,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
 			do {
 				rc = zcrypt_send_cprb(&xcRB);
 			} while (rc == -EAGAIN);
+		if (rc)
+			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d", rc);
 		if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
 			return -EFAULT;
 		return rc;
@@ -872,6 +878,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
 			do {
 				rc = zcrypt_send_ep11_cprb(&xcrb);
 			} while (rc == -EAGAIN);
+		if (rc)
+			ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d", rc);
 		if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
 			return -EFAULT;
 		return rc;
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index ca0cdbe..12cff62 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -48,26 +48,6 @@ struct cca_token_hdr {
 
 #define CCA_TKN_HDR_ID_EXT 0x1E
 
-/**
- * mapping for the cca private ME section
- */
-struct cca_private_ext_ME_sec {
-	unsigned char  section_identifier;
-	unsigned char  version;
-	unsigned short section_length;
-	unsigned char  private_key_hash[20];
-	unsigned char  reserved1[4];
-	unsigned char  key_format;
-	unsigned char  reserved2;
-	unsigned char  key_name_hash[20];
-	unsigned char  key_use_flags[4];
-	unsigned char  reserved3[6];
-	unsigned char  reserved4[24];
-	unsigned char  confounder[24];
-	unsigned char  exponent[128];
-	unsigned char  modulus[128];
-} __attribute__((packed));
-
 #define CCA_PVT_USAGE_ALL 0x80
 
 /**
@@ -124,77 +104,6 @@ struct cca_pvt_ext_CRT_sec {
 #define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
 
 /**
- * Set up private key fields of a type6 MEX message.
- * Note that all numerics in the key token are big-endian,
- * while the entries in the key block header are little-endian.
- *
- * @mex: pointer to user input data
- * @p: pointer to memory area for the key
- *
- * Returns the size of the key area or -EFAULT
- */
-static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
-					  void *p, int big_endian)
-{
-	static struct cca_token_hdr static_pvt_me_hdr = {
-		.token_identifier	=  0x1E,
-		.token_length		=  0x0183,
-	};
-	static struct cca_private_ext_ME_sec static_pvt_me_sec = {
-		.section_identifier	=  0x02,
-		.section_length		=  0x016C,
-		.key_use_flags		= {0x80,0x00,0x00,0x00},
-	};
-	static struct cca_public_sec static_pub_me_sec = {
-		.section_identifier	=  0x04,
-		.section_length		=  0x000F,
-		.exponent_len		=  0x0003,
-	};
-	static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
-	struct {
-		struct T6_keyBlock_hdr t6_hdr;
-		struct cca_token_hdr pvtMeHdr;
-		struct cca_private_ext_ME_sec pvtMeSec;
-		struct cca_public_sec pubMeSec;
-		char exponent[3];
-	} __attribute__((packed)) *key = p;
-	unsigned char *temp;
-
-	memset(key, 0, sizeof(*key));
-
-	if (big_endian) {
-		key->t6_hdr.blen = cpu_to_be16(0x189);
-		key->t6_hdr.ulen = cpu_to_be16(0x189 - 2);
-	} else {
-		key->t6_hdr.blen = cpu_to_le16(0x189);
-		key->t6_hdr.ulen = cpu_to_le16(0x189 - 2);
-	}
-	key->pvtMeHdr = static_pvt_me_hdr;
-	key->pvtMeSec = static_pvt_me_sec;
-	key->pubMeSec = static_pub_me_sec;
-	/*
-	 * In a private key, the modulus doesn't appear in the public
-	 * section. So, an arbitrary public exponent of 0x010001 will be
-	 * used.
-	 */
-	memcpy(key->exponent, pk_exponent, 3);
-
-	/* key parameter block */
-	temp = key->pvtMeSec.exponent +
-		sizeof(key->pvtMeSec.exponent) - mex->inputdatalength;
-	if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
-		return -EFAULT;
-
-	/* modulus */
-	temp = key->pvtMeSec.modulus +
-		sizeof(key->pvtMeSec.modulus) - mex->inputdatalength;
-	if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
-		return -EFAULT;
-	key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength;
-	return sizeof(*key);
-}
-
-/**
  * Set up private key fields of a type6 MEX message. The _pad variant
  * strips leading zeroes from the b_key.
  * Note that all numerics in the key token are big-endian,
@@ -205,8 +114,7 @@ static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
  *
  * Returns the size of the key area or -EFAULT
  */
-static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
-					  void *p, int big_endian)
+static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
 {
 	static struct cca_token_hdr static_pub_hdr = {
 		.token_identifier	=  0x1E,
@@ -251,13 +159,8 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
 					2*mex->inputdatalength - i;
 	key->pubHdr.token_length =
 		key->pubSec.section_length + sizeof(key->pubHdr);
-	if (big_endian) {
-		key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4);
-		key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6);
-	} else {
-		key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4);
-		key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6);
-	}
+	key->t6_hdr.ulen = key->pubHdr.token_length + 4;
+	key->t6_hdr.blen = key->pubHdr.token_length + 6;
 	return sizeof(*key) + 2*mex->inputdatalength - i;
 }
 
@@ -271,8 +174,7 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
  *
  * Returns the size of the key area or -EFAULT
  */
-static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
-				       void *p, int big_endian)
+static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
 {
 	static struct cca_public_sec static_cca_pub_sec = {
 		.section_identifier = 4,
@@ -298,13 +200,8 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
 	size = sizeof(*key) + key_len + sizeof(*pub) + 3;
 
 	/* parameter block.key block */
-	if (big_endian) {
-		key->t6_hdr.blen = cpu_to_be16(size);
-		key->t6_hdr.ulen = cpu_to_be16(size - 2);
-	} else {
-		key->t6_hdr.blen = cpu_to_le16(size);
-		key->t6_hdr.ulen = cpu_to_le16(size - 2);
-	}
+	key->t6_hdr.blen = size;
+	key->t6_hdr.ulen = size - 2;
 
 	/* key token header */
 	key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index e5563ff..4fddb43 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -291,7 +291,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
 		return -EFAULT;
 
 	/* Set up key which is located after the variable length text. */
-	size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
+	size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength);
 	if (size < 0)
 		return size;
 	size += sizeof(*msg) + mex->inputdatalength;
@@ -353,7 +353,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
 		return -EFAULT;
 
 	/* Set up key which is located after the variable length text. */
-	size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
+	size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength);
 	if (size < 0)
 		return size;
 	size += sizeof(*msg) + crt->inputdatalength;	/* total size of msg */
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 198842c..b1fa38a 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1770,15 +1770,15 @@ static struct ccwgroup_driver ctcm_group_driver = {
 	.restore     = ctcm_pm_resume,
 };
 
-static ssize_t ctcm_driver_group_store(struct device_driver *ddrv,
-				       const char *buf,	size_t count)
+static ssize_t group_store(struct device_driver *ddrv, const char *buf,
+			   size_t count)
 {
 	int err;
 
 	err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf);
 	return err ? err : count;
 }
-static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
+static DRIVER_ATTR_WO(group);
 
 static struct attribute *ctcm_drv_attrs[] = {
 	&driver_attr_group.attr,
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 211b31d..589dba6 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2411,14 +2411,14 @@ static struct ccwgroup_driver lcs_group_driver = {
 	.restore     = lcs_restore,
 };
 
-static ssize_t lcs_driver_group_store(struct device_driver *ddrv,
-				      const char *buf, size_t count)
+static ssize_t group_store(struct device_driver *ddrv, const char *buf,
+			   size_t count)
 {
 	int err;
 	err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf);
 	return err ? err : count;
 }
-static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
+static DRIVER_ATTR_WO(group);
 
 static struct attribute *lcs_drv_attrs[] = {
 	&driver_attr_group.attr,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index dba94b4..f1f6eb1 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1954,7 +1954,6 @@ static void netiucv_free_netdevice(struct net_device *dev)
 		privptr->conn = NULL; privptr->fsm = NULL;
 		/* privptr gets freed by free_netdev() */
 	}
-	free_netdev(dev);
 }
 
 /**
@@ -1972,7 +1971,8 @@ static void netiucv_setup_netdevice(struct net_device *dev)
 	dev->mtu	         = NETIUCV_MTU_DEFAULT;
 	dev->min_mtu		 = 576;
 	dev->max_mtu		 = NETIUCV_MTU_MAX;
-	dev->destructor          = netiucv_free_netdevice;
+	dev->needs_free_netdev   = true;
+	dev->priv_destructor     = netiucv_free_netdevice;
 	dev->hard_header_len     = NETIUCV_HDRLEN;
 	dev->addr_len            = 0;
 	dev->type                = ARPHRD_SLIP;
@@ -2020,8 +2020,8 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
 	return NULL;
 }
 
-static ssize_t conn_write(struct device_driver *drv,
-			  const char *buf, size_t count)
+static ssize_t connection_store(struct device_driver *drv, const char *buf,
+				size_t count)
 {
 	char username[9];
 	char userdata[17];
@@ -2082,11 +2082,10 @@ static ssize_t conn_write(struct device_driver *drv,
 	netiucv_free_netdevice(dev);
 	return rc;
 }
+static DRIVER_ATTR_WO(connection);
 
-static DRIVER_ATTR(connection, 0200, NULL, conn_write);
-
-static ssize_t remove_write (struct device_driver *drv,
-			     const char *buf, size_t count)
+static ssize_t remove_store(struct device_driver *drv, const char *buf,
+			    size_t count)
 {
 	struct iucv_connection *cp;
         struct net_device *ndev;
@@ -2132,8 +2131,7 @@ static ssize_t remove_write (struct device_driver *drv,
 	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
         return -EINVAL;
 }
-
-static DRIVER_ATTR(remove, 0200, NULL, remove_write);
+static DRIVER_ATTR_WO(remove);
 
 static struct attribute * netiucv_drv_attrs[] = {
 	&driver_attr_connection.attr,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fc6d85f..462b82e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5800,8 +5800,8 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
 	.restore = qeth_core_restore,
 };
 
-static ssize_t qeth_core_driver_group_store(struct device_driver *ddrv,
-					    const char *buf, size_t count)
+static ssize_t group_store(struct device_driver *ddrv, const char *buf,
+			   size_t count)
 {
 	int err;
 
@@ -5810,7 +5810,7 @@ static ssize_t qeth_core_driver_group_store(struct device_driver *ddrv,
 
 	return err ? err : count;
 }
-static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
+static DRIVER_ATTR_WO(group);
 
 static struct attribute *qeth_drv_attrs[] = {
 	&driver_attr_group.attr,
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 62fed9d..14f377a 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -214,7 +214,7 @@ static void jsfd_request(void)
 		struct jsfd_part *jdp = req->rq_disk->private_data;
 		unsigned long offset = blk_rq_pos(req) << 9;
 		size_t len = blk_rq_cur_bytes(req);
-		int err = -EIO;
+		blk_status_t err = BLK_STS_IOERR;
 
 		if ((offset + len) > jdp->dsize)
 			goto end;
@@ -230,7 +230,7 @@ static void jsfd_request(void)
 		}
 
 		jsfd_read(bio_data(req->bio), jdp->dbase + offset, len);
-		err = 0;
+		err = BLK_STS_OK;
 	end:
 		if (!__blk_end_request_cur(req, err))
 			req = jsfd_next_request();
@@ -592,6 +592,7 @@ static int jsfd_init(void)
 			put_disk(disk);
 			goto out;
 		}
+		blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
 		jsfd_disk[i] = disk;
 	}
 
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 4fc8ed5..1f424e4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@ struct bnx2fc_hba {
 	struct bnx2fc_cmd_mgr *cmd_mgr;
 	spinlock_t hba_lock;
 	struct mutex hba_mutex;
+	struct mutex hba_stats_mutex;
 	unsigned long adapter_state;
 		#define ADAPTER_STATE_UP		0
 		#define ADAPTER_STATE_GOING_DOWN	1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 93b5a00..902722d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -663,15 +663,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
 	if (!fw_stats)
 		return NULL;
 
+	mutex_lock(&hba->hba_stats_mutex);
+
 	bnx2fc_stats = fc_get_host_stats(shost);
 
 	init_completion(&hba->stat_req_done);
 	if (bnx2fc_send_stat_req(hba))
-		return bnx2fc_stats;
+		goto unlock_stats_mutex;
 	rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
 	if (!rc) {
 		BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
-		return bnx2fc_stats;
+		goto unlock_stats_mutex;
 	}
 	BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
 	bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -693,6 +695,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
 
 	memcpy(&hba->prev_stats, hba->stats_buffer,
 	       sizeof(struct fcoe_statistics_params));
+
+unlock_stats_mutex:
+	mutex_unlock(&hba->hba_stats_mutex);
 	return bnx2fc_stats;
 }
 
@@ -1340,6 +1345,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
 	}
 	spin_lock_init(&hba->hba_lock);
 	mutex_init(&hba->hba_mutex);
+	mutex_init(&hba->hba_stats_mutex);
 
 	hba->cnic = cnic;
 
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 1076c15..0aae094 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1595,7 +1595,6 @@ static void release_offload_resources(struct cxgbi_sock *csk)
 		cxgbi_sock_put(csk);
 	}
 	csk->dst = NULL;
-	csk->cdev = NULL;
 }
 
 static int init_act_open(struct cxgbi_sock *csk)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index bd7d39e..e4c83b7 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -867,7 +867,8 @@ static void need_active_close(struct cxgbi_sock *csk)
 	log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
 		csk, (csk)->state, (csk)->flags, (csk)->tid);
 	spin_lock_bh(&csk->lock);
-	dst_confirm(csk->dst);
+	if (csk->dst)
+		dst_confirm(csk->dst);
 	data_lost = skb_queue_len(&csk->receive_queue);
 	__skb_queue_purge(&csk->receive_queue);
 
@@ -882,7 +883,8 @@ static void need_active_close(struct cxgbi_sock *csk)
 	}
 
 	if (close_req) {
-		if (data_lost)
+		if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) ||
+		    data_lost)
 			csk->cdev->csk_send_abort_req(csk);
 		else
 			csk->cdev->csk_send_close_req(csk);
@@ -1186,9 +1188,10 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
 				cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
 		skb = next;
 	}
-done:
+
 	if (likely(skb_queue_len(&csk->write_queue)))
 		cdev->csk_push_tx_frames(csk, 1);
+done:
 	spin_unlock_bh(&csk->lock);
 	return copied;
 
@@ -1568,9 +1571,12 @@ static inline int read_pdu_skb(struct iscsi_conn *conn,
 	}
 }
 
-static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
+static int
+skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn,
+		 struct sk_buff *skb)
 {
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	int err;
 
 	log_debug(1 << CXGBI_DBG_PDU_RX,
 		"conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
@@ -1608,7 +1614,16 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
 		}
 	}
 
-	return read_pdu_skb(conn, skb, 0, 0);
+	err = read_pdu_skb(conn, skb, 0, 0);
+	if (likely(err >= 0)) {
+		struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data;
+		u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+		if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP))
+			cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD);
+	}
+
+	return err;
 }
 
 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
@@ -1713,7 +1728,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
 			cxgbi_skcb_rx_pdulen(skb));
 
 		if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
-			err = skb_read_pdu_bhs(conn, skb);
+			err = skb_read_pdu_bhs(csk, conn, skb);
 			if (err < 0) {
 				pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
 					"f 0x%lx, plen %u.\n",
@@ -1731,7 +1746,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
 					cxgbi_skcb_flags(skb),
 					cxgbi_skcb_rx_pdulen(skb));
 		} else {
-			err = skb_read_pdu_bhs(conn, skb);
+			err = skb_read_pdu_bhs(csk, conn, skb);
 			if (err < 0) {
 				pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
 					"f 0x%lx, plen %u.\n",
@@ -1873,6 +1888,11 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
 	tcp_task->dd_data = tdata;
 	task->hdr = NULL;
 
+	if (tdata->skb) {
+		kfree_skb(tdata->skb);
+		tdata->skb = NULL;
+	}
+
 	if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
 	    (opcode == ISCSI_OP_SCSI_DATA_OUT ||
 	     (opcode == ISCSI_OP_SCSI_CMD &&
@@ -1890,6 +1910,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
 		return -ENOMEM;
 	}
 
+	skb_get(tdata->skb);
 	skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
 	task->hdr = (struct iscsi_hdr *)tdata->skb->data;
 	task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
@@ -2035,9 +2056,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
 	unsigned int datalen;
 	int err;
 
-	if (!skb) {
+	if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) {
 		log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
-			"task 0x%p, skb NULL.\n", task);
+			"task 0x%p, skb 0x%p\n", task, skb);
 		return 0;
 	}
 
@@ -2050,7 +2071,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
 	}
 
 	datalen = skb->data_len;
-	tdata->skb = NULL;
 
 	/* write ppod first if using ofldq to write ppod */
 	if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
@@ -2078,6 +2098,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
 			pdulen += ISCSI_DIGEST_SIZE;
 
 		task->conn->txdata_octets += pdulen;
+		cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
 		return 0;
 	}
 
@@ -2086,7 +2107,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
 			"task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
 			task, skb, skb->len, skb->data_len, err);
 		/* reset skb to send when we are called again */
-		tdata->skb = skb;
 		return err;
 	}
 
@@ -2094,7 +2114,8 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
 		"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
 		task->itt, skb, skb->len, skb->data_len, err);
 
-	kfree_skb(skb);
+	__kfree_skb(tdata->skb);
+	tdata->skb = NULL;
 
 	iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
 	iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
@@ -2113,8 +2134,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
 
 	tcp_task->dd_data = NULL;
 	/*  never reached the xmit task callout */
-	if (tdata->skb)
-		__kfree_skb(tdata->skb);
+	if (tdata->skb) {
+		kfree_skb(tdata->skb);
+		tdata->skb = NULL;
+	}
 
 	task_release_itt(task, task->hdr_itt);
 	memset(tdata, 0, sizeof(*tdata));
@@ -2714,6 +2737,9 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
 static int __init libcxgbi_init_module(void)
 {
 	pr_info("%s", version);
+
+	BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+		     sizeof(struct cxgbi_skb_cb));
 	return 0;
 }
 
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 18e0ea8..37f07aa 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -187,6 +187,7 @@ enum cxgbi_sock_flags {
 	CTPF_HAS_ATID,		/* reserved atid */
 	CTPF_HAS_TID,		/* reserved hw tid */
 	CTPF_OFFLOAD_DOWN,	/* offload function off */
+	CTPF_LOGOUT_RSP_RCVD,   /* received logout response */
 };
 
 struct cxgbi_skb_rx_cb {
@@ -195,7 +196,8 @@ struct cxgbi_skb_rx_cb {
 };
 
 struct cxgbi_skb_tx_cb {
-	void *l2t;
+	void *handle;
+	void *arp_err_handler;
 	struct sk_buff *wr_next;
 };
 
@@ -203,6 +205,7 @@ enum cxgbi_skcb_flags {
 	SKCBF_TX_NEED_HDR,	/* packet needs a header */
 	SKCBF_TX_MEM_WRITE,     /* memory write */
 	SKCBF_TX_FLAG_COMPL,    /* wr completion flag */
+	SKCBF_TX_DONE,		/* skb tx done */
 	SKCBF_RX_COALESCED,	/* received whole pdu */
 	SKCBF_RX_HDR,		/* received pdu header */
 	SKCBF_RX_DATA,		/* received pdu payload */
@@ -215,13 +218,13 @@ enum cxgbi_skcb_flags {
 };
 
 struct cxgbi_skb_cb {
-	unsigned char ulp_mode;
-	unsigned long flags;
-	unsigned int seq;
 	union {
 		struct cxgbi_skb_rx_cb rx;
 		struct cxgbi_skb_tx_cb tx;
 	};
+	unsigned char ulp_mode;
+	unsigned long flags;
+	unsigned int seq;
 };
 
 #define CXGBI_SKB_CB(skb)	((struct cxgbi_skb_cb *)&((skb)->cb[0]))
@@ -374,11 +377,9 @@ static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk,
 	cxgbi_skcb_tx_wr_next(skb) = NULL;
 	/*
 	 * We want to take an extra reference since both us and the driver
-	 * need to free the packet before it's really freed. We know there's
-	 * just one user currently so we use atomic_set rather than skb_get
-	 * to avoid the atomic op.
+	 * need to free the packet before it's really freed.
 	 */
-	atomic_set(&skb->users, 2);
+	skb_get(skb);
 
 	if (!csk->wr_pending_head)
 		csk->wr_pending_head = skb;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 3cbab87..2ceff58 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -265,18 +265,16 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
 				      struct list_head *list,
 				      unsigned char *cdb)
 {
-	struct scsi_device *sdev = ctlr->ms_sdev;
-	struct rdac_dh_data *h = sdev->handler_data;
 	struct rdac_mode_common *common;
 	unsigned data_size;
 	struct rdac_queue_data *qdata;
 	u8 *lun_table;
 
-	if (h->ctlr->use_ms10) {
+	if (ctlr->use_ms10) {
 		struct rdac_pg_expanded *rdac_pg;
 
 		data_size = sizeof(struct rdac_pg_expanded);
-		rdac_pg = &h->ctlr->mode_select.expanded;
+		rdac_pg = &ctlr->mode_select.expanded;
 		memset(rdac_pg, 0, data_size);
 		common = &rdac_pg->common;
 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
@@ -288,7 +286,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
 		struct rdac_pg_legacy *rdac_pg;
 
 		data_size = sizeof(struct rdac_pg_legacy);
-		rdac_pg = &h->ctlr->mode_select.legacy;
+		rdac_pg = &ctlr->mode_select.legacy;
 		memset(rdac_pg, 0, data_size);
 		common = &rdac_pg->common;
 		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
@@ -304,7 +302,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
 	}
 
 	/* Prepare the command. */
-	if (h->ctlr->use_ms10) {
+	if (ctlr->use_ms10) {
 		cdb[0] = MODE_SELECT_10;
 		cdb[7] = data_size >> 8;
 		cdb[8] = data_size & 0xff;
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index bd9e31e..16fc380 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -48,7 +48,7 @@
 #include <linux/wait.h>
 typedef wait_queue_head_t adpt_wait_queue_head_t;
 #define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
-typedef wait_queue_t adpt_wait_queue_t;
+typedef wait_queue_entry_t adpt_wait_queue_entry_t;
 
 /*
  * message structures
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index d390325..659ab48 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
 		cmd = list_first_entry_or_null(&vscsi->free_cmd,
 					       struct ibmvscsis_cmd, list);
 		if (cmd) {
+			if (cmd->abort_cmd)
+				cmd->abort_cmd = NULL;
 			cmd->flags &= ~(DELAY_SEND);
 			list_del(&cmd->list);
 			cmd->iue = iue;
@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 				if (cmd->abort_cmd) {
 					retry = true;
 					cmd->abort_cmd->flags &= ~(DELAY_SEND);
+					cmd->abort_cmd = NULL;
 				}
 
 				/*
@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 					list_del(&cmd->list);
 					ibmvscsis_free_cmd_resources(vscsi,
 								     cmd);
+					/*
+					 * With a successfully aborted op
+					 * through LIO we want to increment the
+					 * the vscsi credit so that when we dont
+					 * send a rsp to the original scsi abort
+					 * op (h_send_crq), but the tm rsp to
+					 * the abort is sent, the credit is
+					 * correctly sent with the abort tm rsp.
+					 * We would need 1 for the abort tm rsp
+					 * and 1 credit for the aborted scsi op.
+					 * Thus we need to increment here.
+					 * Also we want to increment the credit
+					 * here because we want to make sure
+					 * cmd is actually released first
+					 * otherwise the client will think it
+					 * it can send a new cmd, and we could
+					 * find ourselves short of cmd elements.
+					 */
+					vscsi->credit += 1;
 				} else {
 					iue = cmd->iue;
 
@@ -2962,10 +2984,7 @@ static long srp_build_response(struct scsi_info *vscsi,
 
 	rsp->opcode = SRP_RSP;
 
-	if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
-		rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
-	else
-		rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+	rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
 	rsp->tag = cmd->rsp.tag;
 	rsp->flags = 0;
 
@@ -3915,10 +3934,6 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
 
 static void ibmvscsis_dev_release(struct device *dev) {};
 
-static struct class_attribute ibmvscsis_class_attrs[] = {
-	__ATTR_NULL,
-};
-
 static struct device_attribute dev_attr_system_id =
 	__ATTR(system_id, S_IRUGO, system_id_show, NULL);
 
@@ -3938,7 +3953,6 @@ ATTRIBUTE_GROUPS(ibmvscsis_dev);
 static struct class ibmvscsis_class = {
 	.name		= "ibmvscsis",
 	.dev_release	= ibmvscsis_dev_release,
-	.class_attrs	= ibmvscsis_class_attrs,
 	.dev_groups	= ibmvscsis_dev_groups,
 };
 
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 3419e1b..6762130 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -301,13 +301,13 @@ static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
 static uint32_t ips_statupd_morpheus(ips_ha_t *);
 static ips_scb_t *ips_getscb(ips_ha_t *);
 static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
-static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *);
+static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *);
 static void ips_putq_copp_tail(ips_copp_queue_t *,
 				      ips_copp_wait_item_t *);
 static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
 static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
-static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *);
-static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *,
+static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *);
+static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *,
 					  struct scsi_cmnd *);
 static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
 						     ips_copp_wait_item_t *);
@@ -2871,7 +2871,7 @@ ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
 /* ASSUMED to be called from within the HA lock                             */
 /*                                                                          */
 /****************************************************************************/
-static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
+static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
 {
 	METHOD_TRACE("ips_putq_wait_tail", 1);
 
@@ -2902,7 +2902,7 @@ static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
 /* ASSUMED to be called from within the HA lock                             */
 /*                                                                          */
 /****************************************************************************/
-static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
+static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
 {
 	struct scsi_cmnd *item;
 
@@ -2936,7 +2936,7 @@ static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
 /* ASSUMED to be called from within the HA lock                             */
 /*                                                                          */
 /****************************************************************************/
-static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue,
+static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
 					  struct scsi_cmnd *item)
 {
 	struct scsi_cmnd *p;
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index b782bb6..366be3b 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -989,7 +989,7 @@ typedef struct ips_wait_queue {
 	struct scsi_cmnd *head;
 	struct scsi_cmnd *tail;
 	int count;
-} ips_wait_queue_t;
+} ips_wait_queue_entry_t;
 
 typedef struct ips_copp_wait_item {
 	struct scsi_cmnd *scsi_cmd;
@@ -1035,7 +1035,7 @@ typedef struct ips_ha {
    ips_stat_t         sp;                 /* Status packer pointer      */
    struct ips_scb    *scbs;               /* Array of all CCBS          */
    struct ips_scb    *scb_freelist;       /* SCB free list              */
-   ips_wait_queue_t   scb_waitlist;       /* Pending SCB list           */
+   ips_wait_queue_entry_t   scb_waitlist;       /* Pending SCB list           */
    ips_copp_queue_t   copp_waitlist;      /* Pending PT list            */
    ips_scb_queue_t    scb_activelist;     /* Active SCB list            */
    IPS_IO_CMD        *dummy;              /* dummy command              */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 8912767..da669dc 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -127,7 +127,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
 void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
 int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
 		     struct serv_parm *, uint32_t, int);
-int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
 void lpfc_more_plogi(struct lpfc_vport *);
 void lpfc_more_adisc(struct lpfc_vport *);
 void lpfc_end_rscn(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f2cd19c..24ce96d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -978,9 +978,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 					 ndlp, did, ndlp->nlp_fc4_type,
 					 FC_TYPE_FCP, FC_TYPE_NVME);
 			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+			lpfc_issue_els_prli(vport, ndlp, 0);
 		}
-		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
-		lpfc_issue_els_prli(vport, ndlp, 0);
 	} else
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
 				 "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bff3de0..f74cb01 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -206,7 +206,7 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  * associated with a LPFC_NODELIST entry. This
  * routine effectively results in a "software abort".
  */
-int
+void
 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
 	LIST_HEAD(abort_list);
@@ -215,6 +215,10 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 
 	pring = lpfc_phba_elsring(phba);
 
+	/* In case of error recovery path, we might have a NULL pring here */
+	if (!pring)
+		return;
+
 	/* Abort outstanding I/O on NPort <nlp_DID> */
 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
 			 "2819 Abort outstanding I/O on NPort x%x "
@@ -273,7 +277,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 
 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
-	return 0;
 }
 
 static int
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 074a6b5..518b15e 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -799,8 +799,8 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
 	}
 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 
-	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
-			 ctxp->state, 0);
+	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
+			 ctxp->state, aborting);
 
 	atomic_inc(&lpfc_nvmep->xmt_fcp_release);
 
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 8a1b948..a4f28b7 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -446,7 +446,7 @@ static void _put_request(struct request *rq)
 	 *       code paths.
 	 */
 	if (unlikely(rq->bio))
-		blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
+		blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq));
 	else
 		blk_put_request(rq);
 }
@@ -474,10 +474,10 @@ void osd_end_request(struct osd_request *or)
 EXPORT_SYMBOL(osd_end_request);
 
 static void _set_error_resid(struct osd_request *or, struct request *req,
-			     int error)
+			     blk_status_t error)
 {
 	or->async_error = error;
-	or->req_errors = scsi_req(req)->result ? : error;
+	or->req_errors = scsi_req(req)->result;
 	or->sense_len = scsi_req(req)->sense_len;
 	if (or->sense_len)
 		memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
@@ -489,17 +489,19 @@ static void _set_error_resid(struct osd_request *or, struct request *req,
 
 int osd_execute_request(struct osd_request *or)
 {
-	int error;
-
 	blk_execute_rq(or->request->q, NULL, or->request, 0);
-	error = scsi_req(or->request)->result ? -EIO : 0;
 
-	_set_error_resid(or, or->request, error);
-	return error;
+	if (scsi_req(or->request)->result) {
+		_set_error_resid(or, or->request, BLK_STS_IOERR);
+		return -EIO;
+	}
+
+	_set_error_resid(or, or->request, BLK_STS_OK);
+	return 0;
 }
 EXPORT_SYMBOL(osd_execute_request);
 
-static void osd_request_async_done(struct request *req, int error)
+static void osd_request_async_done(struct request *req, blk_status_t error)
 {
 	struct osd_request *or = req->end_io_data;
 
@@ -1572,13 +1574,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
 			flags);
 	if (IS_ERR(req))
 		return req;
-	scsi_req_init(req);
 
 	for_each_bio(bio) {
-		struct bio *bounce_bio = bio;
-
-		blk_queue_bounce(req->q, &bounce_bio);
-		ret = blk_rq_append_bio(req, bounce_bio);
+		ret = blk_rq_append_bio(req, bio);
 		if (ret)
 			return ERR_PTR(ret);
 	}
@@ -1617,7 +1615,6 @@ static int _init_blk_request(struct osd_request *or,
 				ret = PTR_ERR(req);
 				goto out;
 			}
-			scsi_req_init(req);
 			or->in.req = or->request->next_rq = req;
 		}
 	} else if (has_in)
@@ -1914,7 +1911,7 @@ int osd_req_decode_sense_full(struct osd_request *or,
 		/* scsi sense is Empty, the request was never issued to target
 		 * linux return code might tell us what happened.
 		 */
-		if (or->async_error == -ENOMEM)
+		if (or->async_error == BLK_STS_RESOURCE)
 			osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
 		else
 			osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 67cbed9..929ee7e 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -320,7 +320,7 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
 
 
 /* Wakeup from interrupt */
-static void osst_end_async(struct request *req, int update)
+static void osst_end_async(struct request *req, blk_status_t status)
 {
 	struct scsi_request *rq = scsi_req(req);
 	struct osst_request *SRpnt = req->end_io_data;
@@ -373,7 +373,6 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
 		return DRIVER_ERROR << 24;
 
 	rq = scsi_req(req);
-	scsi_req_init(req);
 	req->rq_flags |= RQF_QUIET;
 
 	SRpnt->bio = NULL;
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 5ca3e8c..32632c9 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -38,7 +38,7 @@ struct qedi_endpoint;
 #define QEDI_MAX_ISCSI_TASK		4096
 #define QEDI_MAX_TASK_NUM		0x0FFF
 #define QEDI_MAX_ISCSI_CONNS_PER_HBA	1024
-#define QEDI_ISCSI_MAX_BDS_PER_CMD	256	/* Firmware max BDs is 256 */
+#define QEDI_ISCSI_MAX_BDS_PER_CMD	255	/* Firmware max BDs is 255 */
 #define MAX_OUSTANDING_TASKS_PER_CON	1024
 
 #define QEDI_MAX_BD_LEN		0xffff
@@ -63,6 +63,7 @@ struct qedi_endpoint;
 #define QEDI_PAGE_MASK		(~((QEDI_PAGE_SIZE) - 1))
 
 #define QEDI_PAGE_SIZE		4096
+#define QEDI_HW_DMA_BOUNDARY	0xfff
 #define QEDI_PATH_HANDLE	0xFE0000000UL
 
 struct qedi_uio_ctrl {
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index d6978cb..507512c 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
 		QEDI_ERR(&qedi->dbg_ctx,
 			 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
 			 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
-		WARN_ON(1);
 	}
 }
 
@@ -1494,6 +1493,8 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
 	tmf_hdr = (struct iscsi_tm *)mtask->hdr;
 	qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
 	ep = qedi_conn->ep;
+	if (!ep)
+		return -ENODEV;
 
 	tid = qedi_get_task_idx(qedi);
 	if (tid == -1)
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 3548d46..87f0af3 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -59,6 +59,7 @@ struct scsi_host_template qedi_host_template = {
 	.this_id = -1,
 	.sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
 	.max_sectors = 0xffff,
+	.dma_boundary = QEDI_HW_DMA_BOUNDARY,
 	.cmd_per_lun = 128,
 	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = qedi_shost_attrs,
@@ -1223,8 +1224,12 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
 
 	iscsi_cid = (u32)path_data->handle;
 	qedi_ep = qedi->ep_tbl[iscsi_cid];
-	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
 		  "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+	if (!qedi_ep) {
+		ret = -EINVAL;
+		goto set_path_exit;
+	}
 
 	if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
 		QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 92775a8..879d3b7 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -151,6 +151,11 @@ static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
 
 static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
 {
+	if (udev->uctrl) {
+		free_page((unsigned long)udev->uctrl);
+		udev->uctrl = NULL;
+	}
+
 	if (udev->ll2_ring) {
 		free_page((unsigned long)udev->ll2_ring);
 		udev->ll2_ring = NULL;
@@ -169,7 +174,6 @@ static void __qedi_free_uio(struct qedi_uio_dev *udev)
 	__qedi_free_uio_rings(udev);
 
 	pci_dev_put(udev->pdev);
-	kfree(udev->uctrl);
 	kfree(udev);
 }
 
@@ -208,6 +212,11 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
 	if (udev->ll2_ring || udev->ll2_buf)
 		return rc;
 
+	/* Memory for control area.  */
+	udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL);
+	if (!udev->uctrl)
+		return -ENOMEM;
+
 	/* Allocating memory for LL2 ring  */
 	udev->ll2_ring_size = QEDI_PAGE_SIZE;
 	udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
@@ -237,7 +246,6 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
 static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
 {
 	struct qedi_uio_dev *udev = NULL;
-	struct qedi_uio_ctrl *uctrl = NULL;
 	int rc = 0;
 
 	list_for_each_entry(udev, &qedi_udev_list, list) {
@@ -258,21 +266,14 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
 		goto err_udev;
 	}
 
-	uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
-	if (!uctrl) {
-		rc = -ENOMEM;
-		goto err_uctrl;
-	}
-
 	udev->uio_dev = -1;
 
 	udev->qedi = qedi;
 	udev->pdev = qedi->pdev;
-	udev->uctrl = uctrl;
 
 	rc = __qedi_alloc_uio_rings(udev);
 	if (rc)
-		goto err_uio_rings;
+		goto err_uctrl;
 
 	list_add(&udev->list, &qedi_udev_list);
 
@@ -283,8 +284,6 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
 	udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
 	return 0;
 
- err_uio_rings:
-	kfree(uctrl);
  err_uctrl:
 	kfree(udev);
  err_udev:
@@ -828,6 +827,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
 	qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
 	qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
 	qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+	qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
+	qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
 
 	for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
 		if ((1 << log_page_size) == PAGE_SIZE)
@@ -1498,11 +1499,9 @@ int qedi_get_task_idx(struct qedi_ctx *qedi)
 
 void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
 {
-	if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+	if (!test_and_clear_bit(idx, qedi->task_idx_map))
 		QEDI_ERR(&qedi->dbg_ctx,
 			 "FW task context, already cleared, tid=0x%x\n", idx);
-		WARN_ON(1);
-	}
 }
 
 void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 634254a..8a29fb0 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -3390,7 +3390,7 @@ qla1280_isp_cmd(struct scsi_qla_host *ha)
 	 *    On PCI bus, order reverses and write of 6 posts, then index 5,
 	 *       causing chip to issue full queue of stale commands
 	 * The mmiowb() prevents future writes from crossing the barrier.
-	 * See Documentation/DocBook/deviceiobook.tmpl for more information.
+	 * See Documentation/driver-api/device-io.rst for more information.
 	 */
 	WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
 	mmiowb();
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 16d1cd5..ca3420d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -730,6 +730,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
 		return -EIO;
 	}
 
+	memset(&elreq, 0, sizeof(elreq));
+
 	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
 		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
 		DMA_TO_DEVICE);
@@ -795,10 +797,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
 
 	if (atomic_read(&vha->loop_state) == LOOP_READY &&
 	    (ha->current_topology == ISP_CFG_F ||
-	    ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
-	    le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
-	    && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
-		elreq.options == EXTERNAL_LOOPBACK) {
+	    (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
+	     req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
+	    elreq.options == EXTERNAL_LOOPBACK) {
 		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
 		ql_dbg(ql_dbg_user, vha, 0x701e,
 		    "BSG request type: %s.\n", type);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 51b4179..88748a6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
 	/* Mailbox registers. */
 	mbx_reg = &reg->mailbox0;
-	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
 	/* Transfer sequence registers. */
@@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
 	/* Mailbox registers. */
 	mbx_reg = &reg->mailbox0;
-	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
 	/* Transfer sequence registers. */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ae11901..eddbc12 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3425,6 +3425,7 @@ struct qla_hw_data {
 	uint8_t 	max_req_queues;
 	uint8_t 	max_rsp_queues;
 	uint8_t		max_qpairs;
+	uint8_t		num_qpairs;
 	struct qla_qpair *base_qpair;
 	struct qla_npiv_entry *npiv_info;
 	uint16_t	nvram_npiv_size;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0347433..0391fc3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -7543,12 +7543,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
 		/* Assign available que pair id */
 		mutex_lock(&ha->mq_lock);
 		qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
-		if (qpair_id >= ha->max_qpairs) {
+		if (ha->num_qpairs >= ha->max_qpairs) {
 			mutex_unlock(&ha->mq_lock);
 			ql_log(ql_log_warn, vha, 0x0183,
 			    "No resources to create additional q pair.\n");
 			goto fail_qid_map;
 		}
+		ha->num_qpairs++;
 		set_bit(qpair_id, ha->qpair_qid_map);
 		ha->queue_pair_map[qpair_id] = qpair;
 		qpair->id = qpair_id;
@@ -7635,6 +7636,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
 fail_msix:
 	ha->queue_pair_map[qpair_id] = NULL;
 	clear_bit(qpair_id, ha->qpair_qid_map);
+	ha->num_qpairs--;
 	mutex_unlock(&ha->mq_lock);
 fail_qid_map:
 	kfree(qpair);
@@ -7660,6 +7662,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
 	mutex_lock(&ha->mq_lock);
 	ha->queue_pair_map[qpair->id] = NULL;
 	clear_bit(qpair->id, ha->qpair_qid_map);
+	ha->num_qpairs--;
 	list_del(&qpair->qp_list_elem);
 	if (list_empty(&vha->qp_list))
 		vha->flags.qpairs_available = 0;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 66df6ce..c61a6a8 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -129,28 +129,16 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
 }
 
 static inline void
-qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp,
-	struct qla_tgt_cmd *tc)
+qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
 {
-	struct dsd_dma *dsd_ptr, *tdsd_ptr;
-	struct crc_context *ctx;
-
-	if (sp)
-		ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
-	else if (tc)
-		ctx = (struct crc_context *)tc->ctx;
-	else {
-		BUG();
-		return;
-	}
+	struct dsd_dma *dsd, *tdsd;
 
 	/* clean up allocated prev pool */
-	list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
-	    &ctx->dsd_list, list) {
-		dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
-		    dsd_ptr->dsd_list_dma);
-		list_del(&dsd_ptr->list);
-		kfree(dsd_ptr);
+	list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
+		dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
+		    dsd->dsd_list_dma);
+		list_del(&dsd->list);
+		kfree(dsd);
 	}
 	INIT_LIST_HEAD(&ctx->dsd_list);
 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index aac0350..2572121 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3282,7 +3282,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 	}
 
 	/* Enable MSI-X vector for response queue update for queue 0 */
-	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
 		if (ha->msixbase && ha->mqiobase &&
 		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
 		     ql2xmqsupport))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a113ab3..cba1fc5 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3676,15 +3676,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
 				qlt_update_host_map(vha, id);
 			}
 
-			fc_host_port_name(vha->host) =
-			    wwn_to_u64(vha->port_name);
-
-			if (qla_ini_mode_enabled(vha))
-				ql_dbg(ql_dbg_mbx, vha, 0x1018,
-				    "FA-WWN portname %016llx (%x)\n",
-				    fc_host_port_name(vha->host),
-				    rptid_entry->vp_status);
-
 			set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
 			set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
 		} else {
@@ -4821,9 +4812,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
 
 	memset(mcp->mb, 0 , sizeof(mcp->mb));
 	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
-	mcp->mb[1] = mreq->options | BIT_6;	/* BIT_6 specifies 64bit address */
+	/* BIT_6 specifies 64bit address */
+	mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
 	if (IS_CNA_CAPABLE(ha)) {
-		mcp->mb[1] |= BIT_15;
 		mcp->mb[2] = vha->fcoe_fcf_idx;
 	}
 	mcp->mb[16] = LSW(mreq->rcv_dma);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1c79579..79f0502 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -630,29 +630,34 @@ qla2x00_sp_free_dma(void *ptr)
 		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
 	}
 
+	if (!ctx)
+		goto end;
+
 	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
 		/* List assured to be having elements */
-		qla2x00_clean_dsd_pool(ha, sp, NULL);
+		qla2x00_clean_dsd_pool(ha, ctx);
 		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
 	}
 
 	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
-		dma_pool_free(ha->dl_dma_pool, ctx,
-		    ((struct crc_context *)ctx)->crc_ctx_dma);
+		struct crc_context *ctx0 = ctx;
+
+		dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
 		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
 	}
 
 	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
-		struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+		struct ct6_dsd *ctx1 = ctx;
 
 		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
-			ctx1->fcp_cmnd_dma);
+		    ctx1->fcp_cmnd_dma);
 		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
 		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
 		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
 		mempool_free(ctx1, ha->ctx_mempool);
 	}
 
+end:
 	CMD_SP(cmd) = NULL;
 	qla2x00_rel_sp(sp);
 }
@@ -699,21 +704,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
 		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
 	}
 
+	if (!ctx)
+		goto end;
+
 	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
 		/* List assured to be having elements */
-		qla2x00_clean_dsd_pool(ha, sp, NULL);
+		qla2x00_clean_dsd_pool(ha, ctx);
 		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
 	}
 
 	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
-		dma_pool_free(ha->dl_dma_pool, ctx,
-		    ((struct crc_context *)ctx)->crc_ctx_dma);
+		struct crc_context *ctx0 = ctx;
+
+		dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
 		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
 	}
 
 	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
-		struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
-
+		struct ct6_dsd *ctx1 = ctx;
 		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
 		    ctx1->fcp_cmnd_dma);
 		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
@@ -721,7 +729,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
 		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
 		mempool_free(ctx1, ha->ctx_mempool);
 	}
-
+end:
 	CMD_SP(cmd) = NULL;
 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
 }
@@ -1632,7 +1640,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
 void
 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 {
-	int que, cnt;
+	int que, cnt, status;
 	unsigned long flags;
 	srb_t *sp;
 	struct qla_hw_data *ha = vha->hw;
@@ -1662,8 +1670,12 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 					 */
 					sp_get(sp);
 					spin_unlock_irqrestore(&ha->hardware_lock, flags);
-					qla2xxx_eh_abort(GET_CMD_SP(sp));
+					status = qla2xxx_eh_abort(GET_CMD_SP(sp));
 					spin_lock_irqsave(&ha->hardware_lock, flags);
+					/* Get rid of extra reference if immediate exit
+					 * from ql2xxx_eh_abort */
+					if (status == FAILED && (qla2x00_isp_reg_stat(ha)))
+						atomic_dec(&sp->ref_count);
 				}
 				req->outstanding_cmds[cnt] = NULL;
 				sp->done(sp, res);
@@ -2623,10 +2635,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	if (mem_only) {
 		if (pci_enable_device_mem(pdev))
-			goto probe_out;
+			return ret;
 	} else {
 		if (pci_enable_device(pdev))
-			goto probe_out;
+			return ret;
 	}
 
 	/* This may fail but that's ok */
@@ -2636,7 +2648,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (!ha) {
 		ql_log_pci(ql_log_fatal, pdev, 0x0009,
 		    "Unable to allocate memory for ha.\n");
-		goto probe_out;
+		goto disable_device;
 	}
 	ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
 	    "Memory allocated for ha=%p.\n", ha);
@@ -3254,7 +3266,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	pci_release_selected_regions(ha->pdev, ha->bars);
 	kfree(ha);
 
-probe_out:
+disable_device:
 	pci_disable_device(pdev);
 	return ret;
 }
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0e03ca2..e766d84 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2245,11 +2245,13 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
 		pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
 			cmd->dma_data_direction);
 
-	if (cmd->ctx_dsd_alloced)
-		qla2x00_clean_dsd_pool(ha, NULL, cmd);
+	if (!cmd->ctx)
+		return;
 
-	if (cmd->ctx)
-		dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
+	if (cmd->ctx_dsd_alloced)
+		qla2x00_clean_dsd_pool(ha, cmd->ctx);
+
+	dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
 }
 
 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 8a58ef3..c197972 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -371,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
 		goto done;
 	}
 
-	if (end <= start || start == 0 || end == 0) {
+	if (end < start || start == 0 || end == 0) {
 		ql_dbg(ql_dbg_misc, vha, 0xd023,
 		    "%s: unusable range (start=%x end=%x)\n", __func__,
 		    ent->t262.end_addr, ent->t262.start_addr);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 17249c3..3be980d 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -245,7 +245,7 @@ struct sdebug_dev_info {
 	unsigned int channel;
 	unsigned int target;
 	u64 lun;
-	uuid_be lu_name;
+	uuid_t lu_name;
 	struct sdebug_host_info *sdbg_host;
 	unsigned long uas_bm[1];
 	atomic_t num_in_q;
@@ -965,7 +965,7 @@ static const u64 naa3_comp_c = 0x3111111000000000ULL;
 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
 			  int target_dev_id, int dev_id_num,
 			  const char *dev_id_str, int dev_id_str_len,
-			  const uuid_be *lu_name)
+			  const uuid_t *lu_name)
 {
 	int num, port_a;
 	char b[32];
@@ -1404,7 +1404,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
 	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
 	if (sdebug_vpd_use_hostno == 0)
-		arr[5] = 0x10; /* claim: implicit TGPS */
+		arr[5] |= 0x10; /* claim: implicit TPGS */
 	arr[6] = 0x10; /* claim: MultiP */
 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
 	arr[7] = 0xa; /* claim: LINKED + CMDQUE */
@@ -3568,7 +3568,7 @@ static void sdebug_q_cmd_wq_complete(struct work_struct *work)
 }
 
 static bool got_shared_uuid;
-static uuid_be shared_uuid;
+static uuid_t shared_uuid;
 
 static struct sdebug_dev_info *sdebug_device_create(
 			struct sdebug_host_info *sdbg_host, gfp_t flags)
@@ -3578,12 +3578,12 @@ static struct sdebug_dev_info *sdebug_device_create(
 	devip = kzalloc(sizeof(*devip), flags);
 	if (devip) {
 		if (sdebug_uuid_ctl == 1)
-			uuid_be_gen(&devip->lu_name);
+			uuid_gen(&devip->lu_name);
 		else if (sdebug_uuid_ctl == 2) {
 			if (got_shared_uuid)
 				devip->lu_name = shared_uuid;
 			else {
-				uuid_be_gen(&shared_uuid);
+				uuid_gen(&shared_uuid);
 				got_shared_uuid = true;
 				devip->lu_name = shared_uuid;
 			}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ecc07da..304a715 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1874,7 +1874,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
 	}
 }
 
-static void eh_lock_door_done(struct request *req, int uptodate)
+static void eh_lock_door_done(struct request *req, blk_status_t status)
 {
 	__blk_put_request(req->q, req);
 }
@@ -1903,7 +1903,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
 	if (IS_ERR(req))
 		return;
 	rq = scsi_req(req);
-	scsi_req_init(req);
 
 	rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
 	rq->cmd[1] = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 99e16ac..550e29f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -250,7 +250,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 	if (IS_ERR(req))
 		return ret;
 	rq = scsi_req(req);
-	scsi_req_init(req);
 
 	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
 					buffer, bufflen, __GFP_RECLAIM))
@@ -635,7 +634,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
 	cmd->request->next_rq->special = NULL;
 }
 
-static bool scsi_end_request(struct request *req, int error,
+static bool scsi_end_request(struct request *req, blk_status_t error,
 		unsigned int bytes, unsigned int bidi_bytes)
 {
 	struct scsi_cmnd *cmd = req->special;
@@ -694,45 +693,28 @@ static bool scsi_end_request(struct request *req, int error,
  * @cmd:	SCSI command (unused)
  * @result:	scsi error code
  *
- * Translate SCSI error code into standard UNIX errno.
- * Return values:
- * -ENOLINK	temporary transport failure
- * -EREMOTEIO	permanent target failure, do not retry
- * -EBADE	permanent nexus failure, retry on other path
- * -ENOSPC	No write space available
- * -ENODATA	Medium error
- * -EIO		unspecified I/O error
+ * Translate SCSI error code into block errors.
  */
-static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
+		int result)
 {
-	int error = 0;
-
-	switch(host_byte(result)) {
+	switch (host_byte(result)) {
 	case DID_TRANSPORT_FAILFAST:
-		error = -ENOLINK;
-		break;
+		return BLK_STS_TRANSPORT;
 	case DID_TARGET_FAILURE:
 		set_host_byte(cmd, DID_OK);
-		error = -EREMOTEIO;
-		break;
+		return BLK_STS_TARGET;
 	case DID_NEXUS_FAILURE:
-		set_host_byte(cmd, DID_OK);
-		error = -EBADE;
-		break;
+		return BLK_STS_NEXUS;
 	case DID_ALLOC_FAILURE:
 		set_host_byte(cmd, DID_OK);
-		error = -ENOSPC;
-		break;
+		return BLK_STS_NOSPC;
 	case DID_MEDIUM_ERROR:
 		set_host_byte(cmd, DID_OK);
-		error = -ENODATA;
-		break;
+		return BLK_STS_MEDIUM;
 	default:
-		error = -EIO;
-		break;
+		return BLK_STS_IOERR;
 	}
-
-	return error;
 }
 
 /*
@@ -769,7 +751,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 	int result = cmd->result;
 	struct request_queue *q = cmd->device->request_queue;
 	struct request *req = cmd->request;
-	int error = 0;
+	blk_status_t error = BLK_STS_OK;
 	struct scsi_sense_hdr sshdr;
 	bool sense_valid = false;
 	int sense_deferred = 0, level = 0;
@@ -808,7 +790,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 			 * both sides at once.
 			 */
 			scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
-			if (scsi_end_request(req, 0, blk_rq_bytes(req),
+			if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
 					blk_rq_bytes(req->next_rq)))
 				BUG();
 			return;
@@ -850,7 +832,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 			scsi_print_sense(cmd);
 		result = 0;
 		/* for passthrough error may be set */
-		error = 0;
+		error = BLK_STS_OK;
 	}
 
 	/*
@@ -922,18 +904,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 				action = ACTION_REPREP;
 			} else if (sshdr.asc == 0x10) /* DIX */ {
 				action = ACTION_FAIL;
-				error = -EILSEQ;
+				error = BLK_STS_PROTECTION;
 			/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
 			} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
 				action = ACTION_FAIL;
-				error = -EREMOTEIO;
+				error = BLK_STS_TARGET;
 			} else
 				action = ACTION_FAIL;
 			break;
 		case ABORTED_COMMAND:
 			action = ACTION_FAIL;
 			if (sshdr.asc == 0x10) /* DIF */
-				error = -EILSEQ;
+				error = BLK_STS_PROTECTION;
 			break;
 		case NOT_READY:
 			/* If the device is in the process of becoming
@@ -1134,6 +1116,20 @@ int scsi_init_io(struct scsi_cmnd *cmd)
 }
 EXPORT_SYMBOL(scsi_init_io);
 
+/**
+ * scsi_initialize_rq - initialize struct scsi_cmnd.req
+ *
+ * Called from inside blk_get_request().
+ */
+void scsi_initialize_rq(struct request *rq)
+{
+	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+	scsi_req_init(&cmd->req);
+}
+EXPORT_SYMBOL(scsi_initialize_rq);
+
+/* Called after a request has been started. */
 void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
 {
 	void *buf = cmd->sense_buffer;
@@ -1829,15 +1825,15 @@ static void scsi_request_fn(struct request_queue *q)
 		blk_delay_queue(q, SCSI_QUEUE_DELAY);
 }
 
-static inline int prep_to_mq(int ret)
+static inline blk_status_t prep_to_mq(int ret)
 {
 	switch (ret) {
 	case BLKPREP_OK:
-		return BLK_MQ_RQ_QUEUE_OK;
+		return BLK_STS_OK;
 	case BLKPREP_DEFER:
-		return BLK_MQ_RQ_QUEUE_BUSY;
+		return BLK_STS_RESOURCE;
 	default:
-		return BLK_MQ_RQ_QUEUE_ERROR;
+		return BLK_STS_IOERR;
 	}
 }
 
@@ -1909,7 +1905,7 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
 	blk_mq_complete_request(cmd->request);
 }
 
-static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 			 const struct blk_mq_queue_data *bd)
 {
 	struct request *req = bd->rq;
@@ -1917,14 +1913,14 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 	struct scsi_device *sdev = q->queuedata;
 	struct Scsi_Host *shost = sdev->host;
 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
-	int ret;
+	blk_status_t ret;
 	int reason;
 
 	ret = prep_to_mq(scsi_prep_state_check(sdev, req));
-	if (ret != BLK_MQ_RQ_QUEUE_OK)
+	if (ret != BLK_STS_OK)
 		goto out;
 
-	ret = BLK_MQ_RQ_QUEUE_BUSY;
+	ret = BLK_STS_RESOURCE;
 	if (!get_device(&sdev->sdev_gendev))
 		goto out;
 
@@ -1937,7 +1933,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	if (!(req->rq_flags & RQF_DONTPREP)) {
 		ret = prep_to_mq(scsi_mq_prep_fn(req));
-		if (ret != BLK_MQ_RQ_QUEUE_OK)
+		if (ret != BLK_STS_OK)
 			goto out_dec_host_busy;
 		req->rq_flags |= RQF_DONTPREP;
 	} else {
@@ -1955,11 +1951,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 	reason = scsi_dispatch_cmd(cmd);
 	if (reason) {
 		scsi_set_blocked(cmd, reason);
-		ret = BLK_MQ_RQ_QUEUE_BUSY;
+		ret = BLK_STS_RESOURCE;
 		goto out_dec_host_busy;
 	}
 
-	return BLK_MQ_RQ_QUEUE_OK;
+	return BLK_STS_OK;
 
 out_dec_host_busy:
 	atomic_dec(&shost->host_busy);
@@ -1972,12 +1968,14 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 	put_device(&sdev->sdev_gendev);
 out:
 	switch (ret) {
-	case BLK_MQ_RQ_QUEUE_BUSY:
+	case BLK_STS_OK:
+		break;
+	case BLK_STS_RESOURCE:
 		if (atomic_read(&sdev->device_busy) == 0 &&
 		    !scsi_device_blocked(sdev))
 			blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
 		break;
-	case BLK_MQ_RQ_QUEUE_ERROR:
+	default:
 		/*
 		 * Make sure to release all allocated ressources when
 		 * we hit an error, as we will never see this command
@@ -1986,8 +1984,6 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 		if (req->rq_flags & RQF_DONTPREP)
 			scsi_mq_uninit_cmd(cmd);
 		break;
-	default:
-		break;
 	}
 	return ret;
 }
@@ -2057,6 +2053,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 {
 	struct device *dev = shost->dma_dev;
 
+	queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+
 	/*
 	 * this limit is imposed by hardware restrictions
 	 */
@@ -2139,6 +2137,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
 	q->request_fn = scsi_request_fn;
 	q->init_rq_fn = scsi_init_rq;
 	q->exit_rq_fn = scsi_exit_rq;
+	q->initialize_rq_fn = scsi_initialize_rq;
 
 	if (blk_init_allocated_queue(q) < 0) {
 		blk_cleanup_queue(q);
@@ -2163,6 +2162,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
 #endif
 	.init_request	= scsi_init_request,
 	.exit_request	= scsi_exit_request,
+	.initialize_rq_fn = scsi_initialize_rq,
 	.map_queues	= scsi_map_queues,
 };
 
@@ -2977,7 +2977,7 @@ scsi_internal_device_block(struct scsi_device *sdev, bool wait)
 		if (wait)
 			blk_mq_quiesce_queue(q);
 		else
-			blk_mq_stop_hw_queues(q);
+			blk_mq_quiesce_queue_nowait(q);
 	} else {
 		spin_lock_irqsave(q->queue_lock, flags);
 		blk_stop_queue(q);
@@ -3031,7 +3031,7 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
 		return -EINVAL;
 
 	if (q->mq_ops) {
-		blk_mq_start_stopped_hw_queues(q, false);
+		blk_mq_unquiesce_queue(q);
 	} else {
 		spin_lock_irqsave(q->queue_lock, flags);
 		blk_start_queue(q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6f7128f..6997957 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1051,10 +1051,11 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
  *     allocate and set it up by calling scsi_add_lun.
  *
  * Return:
- *     SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
- *     SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
+ *
+ *   - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
+ *   - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
  *         attached at the LUN
- *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
+ *   - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_probe_and_add_lun(struct scsi_target *starget,
 				  u64 lun, int *bflagsp,
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index d4cf32d..1df7745 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2914,16 +2914,18 @@ EXPORT_SYMBOL(fc_remote_port_add);
  * port is no longer part of the topology. Note: Although a port
  * may no longer be part of the topology, it may persist in the remote
  * ports displayed by the fc_host. We do this under 2 conditions:
+ *
  * 1) If the port was a scsi target, we delay its deletion by "blocking" it.
- *   This allows the port to temporarily disappear, then reappear without
- *   disrupting the SCSI device tree attached to it. During the "blocked"
- *   period the port will still exist.
+ *    This allows the port to temporarily disappear, then reappear without
+ *    disrupting the SCSI device tree attached to it. During the "blocked"
+ *    period the port will still exist.
+ *
  * 2) If the port was a scsi target and disappears for longer than we
- *   expect, we'll delete the port and the tear down the SCSI device tree
- *   attached to it. However, we want to semi-persist the target id assigned
- *   to that port if it eventually does exist. The port structure will
- *   remain (although with minimal information) so that the target id
- *   bindings remails.
+ *    expect, we'll delete the port and the tear down the SCSI device tree
+ *    attached to it. However, we want to semi-persist the target id assigned
+ *    to that port if it eventually does exist. The port structure will
+ *    remain (although with minimal information) so that the target id
+ *    bindings remails.
  *
  * If the remote port is not an FCP Target, it will be fully torn down
  * and deallocated, including the fc_remote_port class device.
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0ebe2f1..5006a65 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -33,6 +33,7 @@
 #include <linux/bsg.h>
 
 #include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_request.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
@@ -172,7 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
 			    struct sas_rphy *rphy)
 {
 	struct request *req;
-	int ret;
+	blk_status_t ret;
 	int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
 
 	while ((req = blk_fetch_request(q)) != NULL) {
@@ -230,6 +231,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
 	q = blk_alloc_queue(GFP_KERNEL);
 	if (!q)
 		return -ENOMEM;
+	q->initialize_rq_fn = scsi_initialize_rq;
 	q->cmd_size = sizeof(struct scsi_request);
 
 	if (rphy) {
@@ -249,6 +251,11 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
 	if (error)
 		goto out_cleanup_queue;
 
+	/*
+	 * by default assume old behaviour and bounce for any highmem page
+	 */
+	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+
 	error = bsg_register_queue(q, dev, name, release);
 	if (error)
 		goto out_cleanup_queue;
@@ -264,6 +271,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
 		q->queuedata = shost;
 
 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+	queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
 	return 0;
 
 out_cleanup_queue:
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 910f4a7..3127346 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -116,8 +116,8 @@ EXPORT_SYMBOL(scsicam_bios_param);
  * @hds: put heads here
  * @secs: put sectors here
  *
- * Description: determine the BIOS mapping/geometry used to create the partition
- *      table, storing the results in *cyls, *hds, and *secs 
+ * Determine the BIOS mapping/geometry used to create the partition
+ * table, storing the results in @cyls, @hds, and @secs
  *
  * Returns: -1 on failure, 0 on success.
  */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 82c33a6..21225d6 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
 } Sg_device;
 
 /* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, int uptodate);
+static void sg_rq_end_io(struct request *rq, blk_status_t status);
 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
 static int sg_finish_rem_req(Sg_request * srp);
 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -808,7 +808,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
 	if (atomic_read(&sdp->detaching)) {
 		if (srp->bio) {
 			scsi_req_free_cmd(scsi_req(srp->rq));
-			blk_end_request_all(srp->rq, -EIO);
+			blk_end_request_all(srp->rq, BLK_STS_IOERR);
 			srp->rq = NULL;
 		}
 
@@ -1300,7 +1300,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
  * level when a command is completed (or has failed).
  */
 static void
-sg_rq_end_io(struct request *rq, int uptodate)
+sg_rq_end_io(struct request *rq, blk_status_t status)
 {
 	struct sg_request *srp = rq->end_io_data;
 	struct scsi_request *req = scsi_req(rq);
@@ -1732,8 +1732,6 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
 	}
 	req = scsi_req(rq);
 
-	scsi_req_init(rq);
-
 	if (hp->cmd_len > BLK_MAX_CDB)
 		req->cmd = long_cmdp;
 	memcpy(req->cmd, cmd, hp->cmd_len);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 1ea34d6..8e5013d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -511,7 +511,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
 	atomic64_dec(&STp->stats->in_flight);
 }
 
-static void st_scsi_execute_end(struct request *req, int uptodate)
+static void st_scsi_execute_end(struct request *req, blk_status_t status)
 {
 	struct st_request *SRpnt = req->end_io_data;
 	struct scsi_request *rq = scsi_req(req);
@@ -549,7 +549,6 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
 	if (IS_ERR(req))
 		return DRIVER_ERROR << 24;
 	rq = scsi_req(req);
-	scsi_req_init(req);
 	req->rq_flags |= RQF_QUIET;
 
 	mdata->null_mapped = 1;
diff --git a/drivers/sh/superhyway/superhyway-sysfs.c b/drivers/sh/superhyway/superhyway-sysfs.c
index 5543433..774f31b 100644
--- a/drivers/sh/superhyway/superhyway-sysfs.c
+++ b/drivers/sh/superhyway/superhyway-sysfs.c
@@ -19,7 +19,8 @@ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, ch
 {									\
 	struct superhyway_device *s = to_superhyway_device(dev);	\
 	return sprintf(buf, fmt, s->field);				\
-}
+}									\
+static DEVICE_ATTR_RO(name);
 
 /* VCR flags */
 superhyway_ro_attr(perr_flags, "0x%02x\n", vcr.perr_flags);
@@ -32,14 +33,22 @@ superhyway_ro_attr(top_mb, "0x%02x\n", vcr.top_mb);
 /* Misc */
 superhyway_ro_attr(resource, "0x%08lx\n", resource[0].start);
 
-struct device_attribute superhyway_dev_attrs[] = {
-	__ATTR_RO(perr_flags),
-	__ATTR_RO(merr_flags),
-	__ATTR_RO(mod_vers),
-	__ATTR_RO(mod_id),
-	__ATTR_RO(bot_mb),
-	__ATTR_RO(top_mb),
-	__ATTR_RO(resource),
-	__ATTR_NULL,
+static struct attribute *superhyway_dev_attrs[] = {
+	&dev_attr_perr_flags.attr,
+	&dev_attr_merr_flags.attr,
+	&dev_attr_mod_vers.attr,
+	&dev_attr_mod_id.attr,
+	&dev_attr_bot_mb.attr,
+	&dev_attr_top_mb.attr,
+	&dev_attr_resource.attr,
+	NULL,
 };
 
+static const struct attribute_group superhyway_dev_group = {
+	.attrs = superhyway_dev_attrs,
+};
+
+const struct attribute_group *superhyway_dev_groups[] = {
+	&superhyway_dev_group,
+	NULL,
+};
diff --git a/drivers/sh/superhyway/superhyway.c b/drivers/sh/superhyway/superhyway.c
index bb1fb771..348836b 100644
--- a/drivers/sh/superhyway/superhyway.c
+++ b/drivers/sh/superhyway/superhyway.c
@@ -209,7 +209,7 @@ struct bus_type superhyway_bus_type = {
 	.name		= "superhyway",
 	.match		= superhyway_bus_match,
 #ifdef CONFIG_SYSFS
-	.dev_attrs	= superhyway_dev_attrs,
+	.dev_groups	= superhyway_dev_groups,
 #endif
 	.probe		= superhyway_device_probe,
 	.remove		= superhyway_device_remove,
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 309643f..07fc0ac 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,11 +1,13 @@
 menu "SOC (System On Chip) specific Drivers"
 
+source "drivers/soc/actions/Kconfig"
 source "drivers/soc/atmel/Kconfig"
 source "drivers/soc/bcm/Kconfig"
 source "drivers/soc/fsl/Kconfig"
 source "drivers/soc/imx/Kconfig"
 source "drivers/soc/mediatek/Kconfig"
 source "drivers/soc/qcom/Kconfig"
+source "drivers/soc/renesas/Kconfig"
 source "drivers/soc/rockchip/Kconfig"
 source "drivers/soc/samsung/Kconfig"
 source "drivers/soc/sunxi/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 824b442..9241125 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -2,6 +2,7 @@
 # Makefile for the Linux Kernel SOC specific device drivers.
 #
 
+obj-$(CONFIG_ARCH_ACTIONS)	+= actions/
 obj-$(CONFIG_ARCH_AT91)		+= atmel/
 obj-y				+= bcm/
 obj-$(CONFIG_ARCH_DOVE)		+= dove/
@@ -10,7 +11,7 @@
 obj-$(CONFIG_ARCH_MXC)		+= imx/
 obj-$(CONFIG_ARCH_MEDIATEK)	+= mediatek/
 obj-$(CONFIG_ARCH_QCOM)		+= qcom/
-obj-$(CONFIG_ARCH_RENESAS)	+= renesas/
+obj-y				+= renesas/
 obj-$(CONFIG_ARCH_ROCKCHIP)	+= rockchip/
 obj-$(CONFIG_SOC_SAMSUNG)	+= samsung/
 obj-$(CONFIG_ARCH_SUNXI)	+= sunxi/
diff --git a/drivers/soc/actions/Kconfig b/drivers/soc/actions/Kconfig
new file mode 100644
index 0000000..9d68b5a
--- /dev/null
+++ b/drivers/soc/actions/Kconfig
@@ -0,0 +1,16 @@
+if ARCH_ACTIONS || COMPILE_TEST
+
+config OWL_PM_DOMAINS_HELPER
+	bool
+
+config OWL_PM_DOMAINS
+	bool "Actions Semi SPS power domains"
+	depends on PM
+	select OWL_PM_DOMAINS_HELPER
+	select PM_GENERIC_DOMAINS
+	help
+	  Say 'y' here to enable support for Smart Power System (SPS)
+	  power-gating on Actions Semiconductor S500 SoC.
+	  If unsure, say 'n'.
+
+endif
diff --git a/drivers/soc/actions/Makefile b/drivers/soc/actions/Makefile
new file mode 100644
index 0000000..1e101b0
--- /dev/null
+++ b/drivers/soc/actions/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_OWL_PM_DOMAINS_HELPER) += owl-sps-helper.o
+obj-$(CONFIG_OWL_PM_DOMAINS) += owl-sps.o
diff --git a/drivers/soc/actions/owl-sps-helper.c b/drivers/soc/actions/owl-sps-helper.c
new file mode 100644
index 0000000..9d7a2c2b
--- /dev/null
+++ b/drivers/soc/actions/owl-sps-helper.c
@@ -0,0 +1,51 @@
+/*
+ * Actions Semi Owl Smart Power System (SPS) shared helpers
+ *
+ * Copyright 2012 Actions Semi Inc.
+ * Author: Actions Semi, Inc.
+ *
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#define OWL_SPS_PG_CTL	0x0
+
+int owl_sps_set_pg(void __iomem *base, u32 pwr_mask, u32 ack_mask, bool enable)
+{
+	u32 val;
+	bool ack;
+	int timeout;
+
+	val = readl(base + OWL_SPS_PG_CTL);
+	ack = val & ack_mask;
+	if (ack == enable)
+		return 0;
+
+	if (enable)
+		val |= pwr_mask;
+	else
+		val &= ~pwr_mask;
+
+	writel(val, base + OWL_SPS_PG_CTL);
+
+	for (timeout = 5000; timeout > 0; timeout -= 50) {
+		val = readl(base + OWL_SPS_PG_CTL);
+		if ((val & ack_mask) == (enable ? ack_mask : 0))
+			break;
+		udelay(50);
+	}
+	if (timeout <= 0)
+		return -ETIMEDOUT;
+
+	udelay(10);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(owl_sps_set_pg);
diff --git a/drivers/soc/actions/owl-sps.c b/drivers/soc/actions/owl-sps.c
new file mode 100644
index 0000000..875225bfa
--- /dev/null
+++ b/drivers/soc/actions/owl-sps.c
@@ -0,0 +1,224 @@
+/*
+ * Actions Semi Owl Smart Power System (SPS)
+ *
+ * Copyright 2012 Actions Semi Inc.
+ * Author: Actions Semi, Inc.
+ *
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/actions/owl-sps.h>
+#include <dt-bindings/power/owl-s500-powergate.h>
+
+struct owl_sps_domain_info {
+	const char *name;
+	int pwr_bit;
+	int ack_bit;
+	unsigned int genpd_flags;
+};
+
+struct owl_sps_info {
+	unsigned num_domains;
+	const struct owl_sps_domain_info *domains;
+};
+
+struct owl_sps {
+	struct device *dev;
+	const struct owl_sps_info *info;
+	void __iomem *base;
+	struct genpd_onecell_data genpd_data;
+	struct generic_pm_domain *domains[];
+};
+
+#define to_owl_pd(gpd) container_of(gpd, struct owl_sps_domain, genpd)
+
+struct owl_sps_domain {
+	struct generic_pm_domain genpd;
+	const struct owl_sps_domain_info *info;
+	struct owl_sps *sps;
+};
+
+static int owl_sps_set_power(struct owl_sps_domain *pd, bool enable)
+{
+	u32 pwr_mask, ack_mask;
+
+	ack_mask = BIT(pd->info->ack_bit);
+	pwr_mask = BIT(pd->info->pwr_bit);
+
+	return owl_sps_set_pg(pd->sps->base, pwr_mask, ack_mask, enable);
+}
+
+static int owl_sps_power_on(struct generic_pm_domain *domain)
+{
+	struct owl_sps_domain *pd = to_owl_pd(domain);
+
+	dev_dbg(pd->sps->dev, "%s power on", pd->info->name);
+
+	return owl_sps_set_power(pd, true);
+}
+
+static int owl_sps_power_off(struct generic_pm_domain *domain)
+{
+	struct owl_sps_domain *pd = to_owl_pd(domain);
+
+	dev_dbg(pd->sps->dev, "%s power off", pd->info->name);
+
+	return owl_sps_set_power(pd, false);
+}
+
+static int owl_sps_init_domain(struct owl_sps *sps, int index)
+{
+	struct owl_sps_domain *pd;
+
+	pd = devm_kzalloc(sps->dev, sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return -ENOMEM;
+
+	pd->info = &sps->info->domains[index];
+	pd->sps = sps;
+
+	pd->genpd.name = pd->info->name;
+	pd->genpd.power_on = owl_sps_power_on;
+	pd->genpd.power_off = owl_sps_power_off;
+	pd->genpd.flags = pd->info->genpd_flags;
+	pm_genpd_init(&pd->genpd, NULL, false);
+
+	sps->genpd_data.domains[index] = &pd->genpd;
+
+	return 0;
+}
+
+static int owl_sps_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	const struct owl_sps_info *sps_info;
+	struct owl_sps *sps;
+	int i, ret;
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "no device node\n");
+		return -ENODEV;
+	}
+
+	match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
+	if (!match || !match->data) {
+		dev_err(&pdev->dev, "unknown compatible or missing data\n");
+		return -EINVAL;
+	}
+
+	sps_info = match->data;
+
+	sps = devm_kzalloc(&pdev->dev, sizeof(*sps) +
+			   sps_info->num_domains * sizeof(sps->domains[0]),
+			   GFP_KERNEL);
+	if (!sps)
+		return -ENOMEM;
+
+	sps->base = of_io_request_and_map(pdev->dev.of_node, 0, "owl-sps");
+	if (IS_ERR(sps->base)) {
+		dev_err(&pdev->dev, "failed to map sps registers\n");
+		return PTR_ERR(sps->base);
+	}
+
+	sps->dev = &pdev->dev;
+	sps->info = sps_info;
+	sps->genpd_data.domains = sps->domains;
+	sps->genpd_data.num_domains = sps_info->num_domains;
+
+	for (i = 0; i < sps_info->num_domains; i++) {
+		ret = owl_sps_init_domain(sps, i);
+		if (ret)
+			return ret;
+	}
+
+	ret = of_genpd_add_provider_onecell(pdev->dev.of_node, &sps->genpd_data);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add provider (%d)", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct owl_sps_domain_info s500_sps_domains[] = {
+	[S500_PD_VDE] = {
+		.name = "VDE",
+		.pwr_bit = 0,
+		.ack_bit = 16,
+	},
+	[S500_PD_VCE_SI] = {
+		.name = "VCE_SI",
+		.pwr_bit = 1,
+		.ack_bit = 17,
+	},
+	[S500_PD_USB2_1] = {
+		.name = "USB2_1",
+		.pwr_bit = 2,
+		.ack_bit = 18,
+	},
+	[S500_PD_CPU2] = {
+		.name = "CPU2",
+		.pwr_bit = 5,
+		.ack_bit = 21,
+		.genpd_flags = GENPD_FLAG_ALWAYS_ON,
+	},
+	[S500_PD_CPU3] = {
+		.name = "CPU3",
+		.pwr_bit = 6,
+		.ack_bit = 22,
+		.genpd_flags = GENPD_FLAG_ALWAYS_ON,
+	},
+	[S500_PD_DMA] = {
+		.name = "DMA",
+		.pwr_bit = 8,
+		.ack_bit = 12,
+	},
+	[S500_PD_DS] = {
+		.name = "DS",
+		.pwr_bit = 9,
+		.ack_bit = 13,
+	},
+	[S500_PD_USB3] = {
+		.name = "USB3",
+		.pwr_bit = 10,
+		.ack_bit = 14,
+	},
+	[S500_PD_USB2_0] = {
+		.name = "USB2_0",
+		.pwr_bit = 11,
+		.ack_bit = 15,
+	},
+};
+
+static const struct owl_sps_info s500_sps_info = {
+	.num_domains = ARRAY_SIZE(s500_sps_domains),
+	.domains = s500_sps_domains,
+};
+
+static const struct of_device_id owl_sps_of_matches[] = {
+	{ .compatible = "actions,s500-sps", .data = &s500_sps_info },
+	{ }
+};
+
+static struct platform_driver owl_sps_platform_driver = {
+	.probe = owl_sps_probe,
+	.driver = {
+		.name = "owl-sps",
+		.of_match_table = owl_sps_of_matches,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init owl_sps_init(void)
+{
+	return platform_driver_register(&owl_sps_platform_driver);
+}
+postcore_initcall(owl_sps_init);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 4790094..c1363c8 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -107,6 +107,30 @@ static const struct at91_soc __initconst socs[] = {
 	AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D44_EXID_MATCH,
 		 "sama5d44", "sama5d4"),
 #endif
+#ifdef CONFIG_SOC_SAMV7
+	AT91_SOC(SAME70Q21_CIDR_MATCH, SAME70Q21_EXID_MATCH,
+		 "same70q21", "same7"),
+	AT91_SOC(SAME70Q20_CIDR_MATCH, SAME70Q20_EXID_MATCH,
+		 "same70q20", "same7"),
+	AT91_SOC(SAME70Q19_CIDR_MATCH, SAME70Q19_EXID_MATCH,
+		 "same70q19", "same7"),
+	AT91_SOC(SAMS70Q21_CIDR_MATCH, SAMS70Q21_EXID_MATCH,
+		 "sams70q21", "sams7"),
+	AT91_SOC(SAMS70Q20_CIDR_MATCH, SAMS70Q20_EXID_MATCH,
+		 "sams70q20", "sams7"),
+	AT91_SOC(SAMS70Q19_CIDR_MATCH, SAMS70Q19_EXID_MATCH,
+		 "sams70q19", "sams7"),
+	AT91_SOC(SAMV71Q21_CIDR_MATCH, SAMV71Q21_EXID_MATCH,
+		 "samv71q21", "samv7"),
+	AT91_SOC(SAMV71Q20_CIDR_MATCH, SAMV71Q20_EXID_MATCH,
+		 "samv71q20", "samv7"),
+	AT91_SOC(SAMV71Q19_CIDR_MATCH, SAMV71Q19_EXID_MATCH,
+		 "samv71q19", "samv7"),
+	AT91_SOC(SAMV70Q20_CIDR_MATCH, SAMV70Q20_EXID_MATCH,
+		 "samv70q20", "samv7"),
+	AT91_SOC(SAMV70Q19_CIDR_MATCH, SAMV70Q19_EXID_MATCH,
+		 "samv70q19", "samv7"),
+#endif
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index 228efde..a90bd5b 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -88,4 +88,30 @@ at91_soc_init(const struct at91_soc *socs);
 #define SAMA5D43_EXID_MATCH		0x00000003
 #define SAMA5D44_EXID_MATCH		0x00000004
 
+#define SAME70Q21_CIDR_MATCH		0x21020e00
+#define SAME70Q21_EXID_MATCH		0x00000002
+#define SAME70Q20_CIDR_MATCH		0x21020c00
+#define SAME70Q20_EXID_MATCH		0x00000002
+#define SAME70Q19_CIDR_MATCH		0x210d0a00
+#define SAME70Q19_EXID_MATCH		0x00000002
+
+#define SAMS70Q21_CIDR_MATCH		0x21120e00
+#define SAMS70Q21_EXID_MATCH		0x00000002
+#define SAMS70Q20_CIDR_MATCH		0x21120c00
+#define SAMS70Q20_EXID_MATCH		0x00000002
+#define SAMS70Q19_CIDR_MATCH		0x211d0a00
+#define SAMS70Q19_EXID_MATCH		0x00000002
+
+#define SAMV71Q21_CIDR_MATCH		0x21220e00
+#define SAMV71Q21_EXID_MATCH		0x00000002
+#define SAMV71Q20_CIDR_MATCH		0x21220c00
+#define SAMV71Q20_EXID_MATCH		0x00000002
+#define SAMV71Q19_CIDR_MATCH		0x212d0a00
+#define SAMV71Q19_EXID_MATCH		0x00000002
+
+#define SAMV70Q20_CIDR_MATCH		0x21320c00
+#define SAMV70Q20_EXID_MATCH		0x00000002
+#define SAMV70Q19_CIDR_MATCH		0x213d0a00
+#define SAMV70Q19_EXID_MATCH		0x00000002
+
 #endif /* __AT91_SOC_H */
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index a39b0d5..49f1e2a 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -11,7 +11,7 @@
 
 config SOC_BRCMSTB
 	bool "Broadcom STB SoC drivers"
-	depends on ARM
+	depends on ARM || ARM64 || BMIPS_GENERIC || COMPILE_TEST
 	select SOC_BUS
 	help
 	  Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 5b6e396..aab41a5c 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -1,2 +1,2 @@
-obj-y += gpc.o
+obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
 obj-$(CONFIG_IMX7_PM_DOMAINS) += gpcv2.o
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index a5f1093..c80a04e 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -1117,6 +1117,11 @@ static int pwrap_probe(struct platform_device *pdev)
 	const struct of_device_id *of_slave_id = NULL;
 	struct resource *res;
 
+	if (!of_id) {
+		dev_err(&pdev->dev, "Error: No device match found\n");
+		return -ENODEV;
+	}
+
 	if (pdev->dev.of_node->child)
 		of_slave_id = of_match_node(of_slave_match_tbl,
 					    pdev->dev.of_node->child);
@@ -1200,7 +1205,8 @@ static int pwrap_probe(struct platform_device *pdev)
 
 	if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & PWRAP_STATE_INIT_DONE0)) {
 		dev_dbg(wrp->dev, "initialization isn't finished\n");
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err_out2;
 	}
 
 	/* Initialize watchdog, may not be done by the bootloader */
@@ -1220,8 +1226,10 @@ static int pwrap_probe(struct platform_device *pdev)
 		goto err_out2;
 
 	wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, &pwrap_regmap_config);
-	if (IS_ERR(wrp->regmap))
-		return PTR_ERR(wrp->regmap);
+	if (IS_ERR(wrp->regmap)) {
+		ret = PTR_ERR(wrp->regmap);
+		goto err_out2;
+	}
 
 	ret = of_platform_populate(np, NULL, NULL, wrp->dev);
 	if (ret) {
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index beb7916..ceb2cc4 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -21,6 +21,7 @@
 #include <linux/soc/mediatek/infracfg.h>
 
 #include <dt-bindings/power/mt2701-power.h>
+#include <dt-bindings/power/mt6797-power.h>
 #include <dt-bindings/power/mt8173-power.h>
 
 #define SPM_VDE_PWR_CON			0x0210
@@ -71,6 +72,7 @@ enum clk_id {
 	CLK_VENC,
 	CLK_VENC_LT,
 	CLK_ETHIF,
+	CLK_VDEC,
 	CLK_MAX,
 };
 
@@ -81,6 +83,7 @@ static const char * const clk_names[] = {
 	"venc",
 	"venc_lt",
 	"ethif",
+	"vdec",
 	NULL,
 };
 
@@ -107,21 +110,28 @@ struct scp_domain {
 	struct regulator *supply;
 };
 
+struct scp_ctrl_reg {
+	int pwr_sta_offs;
+	int pwr_sta2nd_offs;
+};
+
 struct scp {
 	struct scp_domain *domains;
 	struct genpd_onecell_data pd_data;
 	struct device *dev;
 	void __iomem *base;
 	struct regmap *infracfg;
+	struct scp_ctrl_reg ctrl_reg;
 };
 
 static int scpsys_domain_is_on(struct scp_domain *scpd)
 {
 	struct scp *scp = scpd->scp;
 
-	u32 status = readl(scp->base + SPM_PWR_STATUS) & scpd->data->sta_mask;
-	u32 status2 = readl(scp->base + SPM_PWR_STATUS_2ND) &
-				scpd->data->sta_mask;
+	u32 status = readl(scp->base + scp->ctrl_reg.pwr_sta_offs) &
+						scpd->data->sta_mask;
+	u32 status2 = readl(scp->base + scp->ctrl_reg.pwr_sta2nd_offs) &
+						scpd->data->sta_mask;
 
 	/*
 	 * A domain is on when both status bits are set. If only one is set
@@ -346,7 +356,8 @@ static void init_clks(struct platform_device *pdev, struct clk **clk)
 }
 
 static struct scp *init_scp(struct platform_device *pdev,
-			const struct scp_domain_data *scp_domain_data, int num)
+			const struct scp_domain_data *scp_domain_data, int num,
+			struct scp_ctrl_reg *scp_ctrl_reg)
 {
 	struct genpd_onecell_data *pd_data;
 	struct resource *res;
@@ -358,6 +369,9 @@ static struct scp *init_scp(struct platform_device *pdev,
 	if (!scp)
 		return ERR_PTR(-ENOMEM);
 
+	scp->ctrl_reg.pwr_sta_offs = scp_ctrl_reg->pwr_sta_offs;
+	scp->ctrl_reg.pwr_sta2nd_offs = scp_ctrl_reg->pwr_sta2nd_offs;
+
 	scp->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -556,8 +570,13 @@ static const struct scp_domain_data scp_domain_data_mt2701[] = {
 static int __init scpsys_probe_mt2701(struct platform_device *pdev)
 {
 	struct scp *scp;
+	struct scp_ctrl_reg scp_reg;
 
-	scp = init_scp(pdev, scp_domain_data_mt2701, NUM_DOMAINS_MT2701);
+	scp_reg.pwr_sta_offs = SPM_PWR_STATUS;
+	scp_reg.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND;
+
+	scp = init_scp(pdev, scp_domain_data_mt2701, NUM_DOMAINS_MT2701,
+		       &scp_reg);
 	if (IS_ERR(scp))
 		return PTR_ERR(scp);
 
@@ -567,6 +586,116 @@ static int __init scpsys_probe_mt2701(struct platform_device *pdev)
 }
 
 /*
+ * MT6797 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt6797[] = {
+	[MT6797_POWER_DOMAIN_VDEC] = {
+		.name = "vdec",
+		.sta_mask = BIT(7),
+		.ctl_offs = 0x300,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = {CLK_VDEC},
+	},
+	[MT6797_POWER_DOMAIN_VENC] = {
+		.name = "venc",
+		.sta_mask = BIT(21),
+		.ctl_offs = 0x304,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_NONE},
+	},
+	[MT6797_POWER_DOMAIN_ISP] = {
+		.name = "isp",
+		.sta_mask = BIT(5),
+		.ctl_offs = 0x308,
+		.sram_pdn_bits = GENMASK(9, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.clk_id = {CLK_NONE},
+	},
+	[MT6797_POWER_DOMAIN_MM] = {
+		.name = "mm",
+		.sta_mask = BIT(3),
+		.ctl_offs = 0x30C,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = {CLK_MM},
+		.bus_prot_mask = (BIT(1) | BIT(2)),
+	},
+	[MT6797_POWER_DOMAIN_AUDIO] = {
+		.name = "audio",
+		.sta_mask = BIT(24),
+		.ctl_offs = 0x314,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_NONE},
+	},
+	[MT6797_POWER_DOMAIN_MFG_ASYNC] = {
+		.name = "mfg_async",
+		.sta_mask = BIT(13),
+		.ctl_offs = 0x334,
+		.sram_pdn_bits = 0,
+		.sram_pdn_ack_bits = 0,
+		.clk_id = {CLK_MFG},
+	},
+	[MT6797_POWER_DOMAIN_MJC] = {
+		.name = "mjc",
+		.sta_mask = BIT(20),
+		.ctl_offs = 0x310,
+		.sram_pdn_bits = GENMASK(8, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = {CLK_NONE},
+	},
+};
+
+#define NUM_DOMAINS_MT6797	ARRAY_SIZE(scp_domain_data_mt6797)
+#define SPM_PWR_STATUS_MT6797		0x0180
+#define SPM_PWR_STATUS_2ND_MT6797	0x0184
+
+static int __init scpsys_probe_mt6797(struct platform_device *pdev)
+{
+	struct scp *scp;
+	struct genpd_onecell_data *pd_data;
+	int ret;
+	struct scp_ctrl_reg scp_reg;
+
+	scp_reg.pwr_sta_offs = SPM_PWR_STATUS_MT6797;
+	scp_reg.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND_MT6797;
+
+	scp = init_scp(pdev, scp_domain_data_mt6797, NUM_DOMAINS_MT6797,
+		       &scp_reg);
+	if (IS_ERR(scp))
+		return PTR_ERR(scp);
+
+	mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT6797);
+
+	pd_data = &scp->pd_data;
+
+	ret = pm_genpd_add_subdomain(pd_data->domains[MT6797_POWER_DOMAIN_MM],
+				     pd_data->domains[MT6797_POWER_DOMAIN_VDEC]);
+	if (ret && IS_ENABLED(CONFIG_PM))
+		dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+	ret = pm_genpd_add_subdomain(pd_data->domains[MT6797_POWER_DOMAIN_MM],
+				     pd_data->domains[MT6797_POWER_DOMAIN_ISP]);
+	if (ret && IS_ENABLED(CONFIG_PM))
+		dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+	ret = pm_genpd_add_subdomain(pd_data->domains[MT6797_POWER_DOMAIN_MM],
+				     pd_data->domains[MT6797_POWER_DOMAIN_VENC]);
+	if (ret && IS_ENABLED(CONFIG_PM))
+		dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+	ret = pm_genpd_add_subdomain(pd_data->domains[MT6797_POWER_DOMAIN_MM],
+				     pd_data->domains[MT6797_POWER_DOMAIN_MJC]);
+	if (ret && IS_ENABLED(CONFIG_PM))
+		dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+	return 0;
+}
+
+/*
  * MT8173 power domain support
  */
 
@@ -667,8 +796,13 @@ static int __init scpsys_probe_mt8173(struct platform_device *pdev)
 	struct scp *scp;
 	struct genpd_onecell_data *pd_data;
 	int ret;
+	struct scp_ctrl_reg scp_reg;
 
-	scp = init_scp(pdev, scp_domain_data_mt8173, NUM_DOMAINS_MT8173);
+	scp_reg.pwr_sta_offs = SPM_PWR_STATUS;
+	scp_reg.pwr_sta2nd_offs = SPM_PWR_STATUS_2ND;
+
+	scp = init_scp(pdev, scp_domain_data_mt8173, NUM_DOMAINS_MT8173,
+		       &scp_reg);
 	if (IS_ERR(scp))
 		return PTR_ERR(scp);
 
@@ -698,6 +832,9 @@ static const struct of_device_id of_scpsys_match_tbl[] = {
 		.compatible = "mediatek,mt2701-scpsys",
 		.data = scpsys_probe_mt2701,
 	}, {
+		.compatible = "mediatek,mt6797-scpsys",
+		.data = scpsys_probe_mt6797,
+	}, {
 		.compatible = "mediatek,mt8173-scpsys",
 		.data = scpsys_probe_mt8173,
 	}, {
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index d0337b2..dc540ea9 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -439,14 +439,15 @@ static int smsm_get_size_info(struct qcom_smsm *smsm)
 	} *info;
 
 	info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
-	if (PTR_ERR(info) == -ENOENT || size != sizeof(*info)) {
+	if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) {
+		if (PTR_ERR(info) != -EPROBE_DEFER)
+			dev_err(smsm->dev, "unable to retrieve smsm size info\n");
+		return PTR_ERR(info);
+	} else if (IS_ERR(info) || size != sizeof(*info)) {
 		dev_warn(smsm->dev, "no smsm size info, using defaults\n");
 		smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
 		smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
 		return 0;
-	} else if (IS_ERR(info)) {
-		dev_err(smsm->dev, "unable to retrieve smsm size info\n");
-		return PTR_ERR(info);
 	}
 
 	smsm->num_entries = info->num_entries;
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
new file mode 100644
index 0000000..87a4be4
--- /dev/null
+++ b/drivers/soc/renesas/Kconfig
@@ -0,0 +1,63 @@
+config SOC_RENESAS
+	bool "Renesas SoC driver support" if COMPILE_TEST && !ARCH_RENESAS
+	default y if ARCH_RENESAS
+	select SOC_BUS
+	select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \
+			   ARCH_R8A7795 || ARCH_R8A7796
+	select SYSC_R8A7743 if ARCH_R8A7743
+	select SYSC_R8A7745 if ARCH_R8A7745
+	select SYSC_R8A7779 if ARCH_R8A7779
+	select SYSC_R8A7790 if ARCH_R8A7790
+	select SYSC_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
+	select SYSC_R8A7792 if ARCH_R8A7792
+	select SYSC_R8A7794 if ARCH_R8A7794
+	select SYSC_R8A7795 if ARCH_R8A7795
+	select SYSC_R8A7796 if ARCH_R8A7796
+
+if SOC_RENESAS
+
+# SoC
+config SYSC_R8A7743
+	bool "RZ/G1M System Controller support" if COMPILE_TEST
+	select SYSC_RCAR
+
+config SYSC_R8A7745
+	bool "RZ/G1E System Controller support" if COMPILE_TEST
+	select SYSC_RCAR
+
+config SYSC_R8A7779
+	bool "R-Car H1 System Controller support" if COMPILE_TEST
+	select SYSC_RCAR
+
+config SYSC_R8A7790
+	bool "R-Car H2 System Controller support" if COMPILE_TEST
+	select SYSC_RCAR
+
+config SYSC_R8A7791
+	bool "R-Car M2-W/N System Controller support" if COMPILE_TEST
+	select SYSC_RCAR
+
+config SYSC_R8A7792
+	bool "R-Car V2H System Controller support" if COMPILE_TEST
+	select SYSC_RCAR
+
+config SYSC_R8A7794
+	bool "R-Car E2 System Controller support" if COMPILE_TEST
+	select SYSC_RCAR
+
+config SYSC_R8A7795
+	bool "R-Car H3 System Controller support" if COMPILE_TEST
+	select SYSC_RCAR
+
+config SYSC_R8A7796
+	bool "R-Car M3-W System Controller support" if COMPILE_TEST
+	select SYSC_RCAR
+
+# Family
+config RST_RCAR
+	bool "R-Car Reset Controller support" if COMPILE_TEST
+
+config SYSC_RCAR
+	bool "R-Car System Controller support" if COMPILE_TEST
+
+endif # SOC_RENESAS
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index d9115cb..1a1a297 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -1,18 +1,17 @@
-obj-$(CONFIG_SOC_BUS)		+= renesas-soc.o
+# Generic, must be first because of soc_device_register()
+obj-$(CONFIG_SOC_RENESAS)	+= renesas-soc.o
 
-obj-$(CONFIG_ARCH_RCAR_GEN1)	+= rcar-rst.o
-obj-$(CONFIG_ARCH_RCAR_GEN2)	+= rcar-rst.o
-obj-$(CONFIG_ARCH_R8A7795)	+= rcar-rst.o
-obj-$(CONFIG_ARCH_R8A7796)	+= rcar-rst.o
+# SoC
+obj-$(CONFIG_SYSC_R8A7743)	+= r8a7743-sysc.o
+obj-$(CONFIG_SYSC_R8A7745)	+= r8a7745-sysc.o
+obj-$(CONFIG_SYSC_R8A7779)	+= r8a7779-sysc.o
+obj-$(CONFIG_SYSC_R8A7790)	+= r8a7790-sysc.o
+obj-$(CONFIG_SYSC_R8A7791)	+= r8a7791-sysc.o
+obj-$(CONFIG_SYSC_R8A7792)	+= r8a7792-sysc.o
+obj-$(CONFIG_SYSC_R8A7794)	+= r8a7794-sysc.o
+obj-$(CONFIG_SYSC_R8A7795)	+= r8a7795-sysc.o
+obj-$(CONFIG_SYSC_R8A7796)	+= r8a7796-sysc.o
 
-obj-$(CONFIG_ARCH_R8A7743)	+= rcar-sysc.o r8a7743-sysc.o
-obj-$(CONFIG_ARCH_R8A7745)	+= rcar-sysc.o r8a7745-sysc.o
-obj-$(CONFIG_ARCH_R8A7779)	+= rcar-sysc.o r8a7779-sysc.o
-obj-$(CONFIG_ARCH_R8A7790)	+= rcar-sysc.o r8a7790-sysc.o
-obj-$(CONFIG_ARCH_R8A7791)	+= rcar-sysc.o r8a7791-sysc.o
-obj-$(CONFIG_ARCH_R8A7792)	+= rcar-sysc.o r8a7792-sysc.o
-# R-Car M2-N is identical to R-Car M2-W w.r.t. power domains.
-obj-$(CONFIG_ARCH_R8A7793)	+= rcar-sysc.o r8a7791-sysc.o
-obj-$(CONFIG_ARCH_R8A7794)	+= rcar-sysc.o r8a7794-sysc.o
-obj-$(CONFIG_ARCH_R8A7795)	+= rcar-sysc.o r8a7795-sysc.o
-obj-$(CONFIG_ARCH_R8A7796)	+= rcar-sysc.o r8a7796-sysc.o
+# Family
+obj-$(CONFIG_RST_RCAR)		+= rcar-rst.o
+obj-$(CONFIG_SYSC_RCAR)		+= rcar-sysc.o
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 528a137..7c8da3c 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -181,17 +181,6 @@ static int rcar_sysc_pd_power_off(struct generic_pm_domain *genpd)
 	struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
 
 	pr_debug("%s: %s\n", __func__, genpd->name);
-
-	if (pd->flags & PD_NO_CR) {
-		pr_debug("%s: Cannot control %s\n", __func__, genpd->name);
-		return -EBUSY;
-	}
-
-	if (pd->flags & PD_BUSY) {
-		pr_debug("%s: %s busy\n", __func__, genpd->name);
-		return -EBUSY;
-	}
-
 	return rcar_sysc_power_down(&pd->ch);
 }
 
@@ -200,12 +189,6 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
 	struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
 
 	pr_debug("%s: %s\n", __func__, genpd->name);
-
-	if (pd->flags & PD_NO_CR) {
-		pr_debug("%s: Cannot control %s\n", __func__, genpd->name);
-		return 0;
-	}
-
 	return rcar_sysc_power_up(&pd->ch);
 }
 
@@ -223,8 +206,7 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
 		 * only be turned off if the CPU is not in use.
 		 */
 		pr_debug("PM domain %s contains %s\n", name, "CPU");
-		pd->flags |= PD_BUSY;
-		gov = &pm_domain_always_on_gov;
+		genpd->flags |= GENPD_FLAG_ALWAYS_ON;
 	} else if (pd->flags & PD_SCU) {
 		/*
 		 * This domain contains an SCU and cache-controller, and
@@ -232,19 +214,17 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
 		 * not in use.
 		 */
 		pr_debug("PM domain %s contains %s\n", name, "SCU");
-		pd->flags |= PD_BUSY;
-		gov = &pm_domain_always_on_gov;
+		genpd->flags |= GENPD_FLAG_ALWAYS_ON;
 	} else if (pd->flags & PD_NO_CR) {
 		/*
 		 * This domain cannot be turned off.
 		 */
-		pd->flags |= PD_BUSY;
-		gov = &pm_domain_always_on_gov;
+		genpd->flags |= GENPD_FLAG_ALWAYS_ON;
 	}
 
 	if (!(pd->flags & (PD_CPU | PD_SCU))) {
 		/* Enable Clock Domain for I/O devices */
-		genpd->flags = GENPD_FLAG_PM_CLK;
+		genpd->flags |= GENPD_FLAG_PM_CLK;
 		if (has_cpg_mstp) {
 			genpd->attach_dev = cpg_mstp_attach_dev;
 			genpd->detach_dev = cpg_mstp_detach_dev;
@@ -275,35 +255,33 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
 }
 
 static const struct of_device_id rcar_sysc_matches[] = {
-#ifdef CONFIG_ARCH_R8A7743
+#ifdef CONFIG_SYSC_R8A7743
 	{ .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
 #endif
-#ifdef CONFIG_ARCH_R8A7745
+#ifdef CONFIG_SYSC_R8A7745
 	{ .compatible = "renesas,r8a7745-sysc", .data = &r8a7745_sysc_info },
 #endif
-#ifdef CONFIG_ARCH_R8A7779
+#ifdef CONFIG_SYSC_R8A7779
 	{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
 #endif
-#ifdef CONFIG_ARCH_R8A7790
+#ifdef CONFIG_SYSC_R8A7790
 	{ .compatible = "renesas,r8a7790-sysc", .data = &r8a7790_sysc_info },
 #endif
-#ifdef CONFIG_ARCH_R8A7791
+#ifdef CONFIG_SYSC_R8A7791
 	{ .compatible = "renesas,r8a7791-sysc", .data = &r8a7791_sysc_info },
-#endif
-#ifdef CONFIG_ARCH_R8A7792
-	{ .compatible = "renesas,r8a7792-sysc", .data = &r8a7792_sysc_info },
-#endif
-#ifdef CONFIG_ARCH_R8A7793
 	/* R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. */
 	{ .compatible = "renesas,r8a7793-sysc", .data = &r8a7791_sysc_info },
 #endif
-#ifdef CONFIG_ARCH_R8A7794
+#ifdef CONFIG_SYSC_R8A7792
+	{ .compatible = "renesas,r8a7792-sysc", .data = &r8a7792_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A7794
 	{ .compatible = "renesas,r8a7794-sysc", .data = &r8a7794_sysc_info },
 #endif
-#ifdef CONFIG_ARCH_R8A7795
+#ifdef CONFIG_SYSC_R8A7795
 	{ .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info },
 #endif
-#ifdef CONFIG_ARCH_R8A7796
+#ifdef CONFIG_SYSC_R8A7796
 	{ .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
 #endif
 	{ /* sentinel */ }
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 07edb04..1a5beba 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -20,8 +20,6 @@
 #define PD_SCU		BIT(1)	/* Area contains SCU and L2 cache */
 #define PD_NO_CR	BIT(2)	/* Area lacks PWR{ON,OFF}CR registers */
 
-#define PD_BUSY		BIT(3)	/* Busy, for internal use only */
-
 #define PD_CPU_CR	PD_CPU		  /* CPU area has CR (R-Car H1) */
 #define PD_CPU_NOCR	PD_CPU | PD_NO_CR /* CPU area lacks CR (R-Car Gen2/3) */
 #define PD_ALWAYS_ON	PD_NO_CR	  /* Always-on area */
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index dcf088d..1beb7c3 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -115,3 +115,8 @@
 
 config SOC_TEGRA_PMC_TEGRA186
 	bool
+
+config SOC_TEGRA_POWERGATE_BPMP
+	def_bool y
+	depends on PM_GENERIC_DOMAINS
+	depends on TEGRA_BPMP
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index 4f81dd5..0e52b45 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -4,3 +4,4 @@
 obj-$(CONFIG_SOC_TEGRA_FLOWCTRL) += flowctrl.o
 obj-$(CONFIG_SOC_TEGRA_PMC) += pmc.o
 obj-$(CONFIG_SOC_TEGRA_PMC_TEGRA186) += pmc-tegra186.o
+obj-$(CONFIG_SOC_TEGRA_POWERGATE_BPMP) += powergate-bpmp.o
diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c
index 0e345c0..5433cc7 100644
--- a/drivers/soc/tegra/flowctrl.c
+++ b/drivers/soc/tegra/flowctrl.c
@@ -157,7 +157,7 @@ static int tegra_flowctrl_probe(struct platform_device *pdev)
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	tegra_flowctrl_base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(tegra_flowctrl_base))
-		return PTR_ERR(base);
+		return PTR_ERR(tegra_flowctrl_base);
 
 	iounmap(base);
 
diff --git a/drivers/soc/tegra/powergate-bpmp.c b/drivers/soc/tegra/powergate-bpmp.c
new file mode 100644
index 0000000..8fc3560
--- /dev/null
+++ b/drivers/soc/tegra/powergate-bpmp.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+struct tegra_powergate_info {
+	unsigned int id;
+	char *name;
+};
+
+struct tegra_powergate {
+	struct generic_pm_domain genpd;
+	struct tegra_bpmp *bpmp;
+	unsigned int id;
+};
+
+static inline struct tegra_powergate *
+to_tegra_powergate(struct generic_pm_domain *genpd)
+{
+	return container_of(genpd, struct tegra_powergate, genpd);
+}
+
+static int tegra_bpmp_powergate_set_state(struct tegra_bpmp *bpmp,
+					  unsigned int id, u32 state)
+{
+	struct mrq_pg_request request;
+	struct tegra_bpmp_message msg;
+
+	memset(&request, 0, sizeof(request));
+	request.cmd = CMD_PG_SET_STATE;
+	request.id = id;
+	request.set_state.state = state;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_PG;
+	msg.tx.data = &request;
+	msg.tx.size = sizeof(request);
+
+	return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp,
+					  unsigned int id)
+{
+	struct mrq_pg_response response;
+	struct mrq_pg_request request;
+	struct tegra_bpmp_message msg;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.cmd = CMD_PG_GET_STATE;
+	request.id = id;
+
+	memset(&response, 0, sizeof(response));
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_PG;
+	msg.tx.data = &request;
+	msg.tx.size = sizeof(request);
+	msg.rx.data = &response;
+	msg.rx.size = sizeof(response);
+
+	err = tegra_bpmp_transfer(bpmp, &msg);
+	if (err < 0)
+		return PG_STATE_OFF;
+
+	return response.get_state.state;
+}
+
+static int tegra_bpmp_powergate_get_max_id(struct tegra_bpmp *bpmp)
+{
+	struct mrq_pg_response response;
+	struct mrq_pg_request request;
+	struct tegra_bpmp_message msg;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.cmd = CMD_PG_GET_MAX_ID;
+
+	memset(&response, 0, sizeof(response));
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_PG;
+	msg.tx.data = &request;
+	msg.tx.size = sizeof(request);
+	msg.rx.data = &response;
+	msg.rx.size = sizeof(response);
+
+	err = tegra_bpmp_transfer(bpmp, &msg);
+	if (err < 0)
+		return err;
+
+	return response.get_max_id.max_id;
+}
+
+static char *tegra_bpmp_powergate_get_name(struct tegra_bpmp *bpmp,
+					   unsigned int id)
+{
+	struct mrq_pg_response response;
+	struct mrq_pg_request request;
+	struct tegra_bpmp_message msg;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.cmd = CMD_PG_GET_NAME;
+	request.id = id;
+
+	memset(&response, 0, sizeof(response));
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_PG;
+	msg.tx.data = &request;
+	msg.tx.size = sizeof(request);
+	msg.rx.data = &response;
+	msg.rx.size = sizeof(response);
+
+	err = tegra_bpmp_transfer(bpmp, &msg);
+	if (err < 0)
+		return NULL;
+
+	return kstrdup(response.get_name.name, GFP_KERNEL);
+}
+
+static inline bool tegra_bpmp_powergate_is_powered(struct tegra_bpmp *bpmp,
+						   unsigned int id)
+{
+	return tegra_bpmp_powergate_get_state(bpmp, id) != PG_STATE_OFF;
+}
+
+static int tegra_powergate_power_on(struct generic_pm_domain *domain)
+{
+	struct tegra_powergate *powergate = to_tegra_powergate(domain);
+	struct tegra_bpmp *bpmp = powergate->bpmp;
+
+	return tegra_bpmp_powergate_set_state(bpmp, powergate->id,
+					      PG_STATE_ON);
+}
+
+static int tegra_powergate_power_off(struct generic_pm_domain *domain)
+{
+	struct tegra_powergate *powergate = to_tegra_powergate(domain);
+	struct tegra_bpmp *bpmp = powergate->bpmp;
+
+	return tegra_bpmp_powergate_set_state(bpmp, powergate->id,
+					      PG_STATE_OFF);
+}
+
+static struct tegra_powergate *
+tegra_powergate_add(struct tegra_bpmp *bpmp,
+		    const struct tegra_powergate_info *info)
+{
+	struct tegra_powergate *powergate;
+	bool off;
+	int err;
+
+	off = !tegra_bpmp_powergate_is_powered(bpmp, info->id);
+
+	powergate = devm_kzalloc(bpmp->dev, sizeof(*powergate), GFP_KERNEL);
+	if (!powergate)
+		return ERR_PTR(-ENOMEM);
+
+	powergate->id = info->id;
+	powergate->bpmp = bpmp;
+
+	powergate->genpd.name = kstrdup(info->name, GFP_KERNEL);
+	powergate->genpd.power_on = tegra_powergate_power_on;
+	powergate->genpd.power_off = tegra_powergate_power_off;
+
+	err = pm_genpd_init(&powergate->genpd, NULL, off);
+	if (err < 0) {
+		kfree(powergate->genpd.name);
+		return ERR_PTR(err);
+	}
+
+	return powergate;
+}
+
+static void tegra_powergate_remove(struct tegra_powergate *powergate)
+{
+	struct generic_pm_domain *genpd = &powergate->genpd;
+	struct tegra_bpmp *bpmp = powergate->bpmp;
+	int err;
+
+	err = pm_genpd_remove(genpd);
+	if (err < 0)
+		dev_err(bpmp->dev, "failed to remove power domain %s: %d\n",
+			genpd->name, err);
+
+	kfree(genpd->name);
+}
+
+static int
+tegra_bpmp_probe_powergates(struct tegra_bpmp *bpmp,
+			    struct tegra_powergate_info **powergatesp)
+{
+	struct tegra_powergate_info *powergates;
+	unsigned int max_id, id, count = 0;
+	unsigned int num_holes = 0;
+	int err;
+
+	err = tegra_bpmp_powergate_get_max_id(bpmp);
+	if (err < 0)
+		return err;
+
+	max_id = err;
+
+	dev_dbg(bpmp->dev, "maximum powergate ID: %u\n", max_id);
+
+	powergates = kcalloc(max_id + 1, sizeof(*powergates), GFP_KERNEL);
+	if (!powergates)
+		return -ENOMEM;
+
+	for (id = 0; id <= max_id; id++) {
+		struct tegra_powergate_info *info = &powergates[count];
+
+		info->name = tegra_bpmp_powergate_get_name(bpmp, id);
+		if (!info->name || info->name[0] == '\0') {
+			num_holes++;
+			continue;
+		}
+
+		info->id = id;
+		count++;
+	}
+
+	dev_dbg(bpmp->dev, "holes: %u\n", num_holes);
+
+	*powergatesp = powergates;
+
+	return count;
+}
+
+static int tegra_bpmp_add_powergates(struct tegra_bpmp *bpmp,
+				     struct tegra_powergate_info *powergates,
+				     unsigned int count)
+{
+	struct genpd_onecell_data *genpd = &bpmp->genpd;
+	struct generic_pm_domain **domains;
+	struct tegra_powergate *powergate;
+	unsigned int i;
+	int err;
+
+	domains = kcalloc(count, sizeof(*domains), GFP_KERNEL);
+	if (!domains)
+		return -ENOMEM;
+
+	for (i = 0; i < count; i++) {
+		powergate = tegra_powergate_add(bpmp, &powergates[i]);
+		if (IS_ERR(powergate)) {
+			err = PTR_ERR(powergate);
+			goto remove;
+		}
+
+		dev_dbg(bpmp->dev, "added power domain %s\n",
+			powergate->genpd.name);
+		domains[i] = &powergate->genpd;
+	}
+
+	genpd->num_domains = count;
+	genpd->domains = domains;
+
+	return 0;
+
+remove:
+	while (i--) {
+		powergate = to_tegra_powergate(domains[i]);
+		tegra_powergate_remove(powergate);
+	}
+
+	kfree(genpd->domains);
+	return err;
+}
+
+static void tegra_bpmp_remove_powergates(struct tegra_bpmp *bpmp)
+{
+	struct genpd_onecell_data *genpd = &bpmp->genpd;
+	unsigned int i = genpd->num_domains;
+	struct tegra_powergate *powergate;
+
+	while (i--) {
+		dev_dbg(bpmp->dev, "removing power domain %s\n",
+			genpd->domains[i]->name);
+		powergate = to_tegra_powergate(genpd->domains[i]);
+		tegra_powergate_remove(powergate);
+	}
+}
+
+static struct generic_pm_domain *
+tegra_powergate_xlate(struct of_phandle_args *spec, void *data)
+{
+	struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
+	struct genpd_onecell_data *genpd = data;
+	unsigned int i;
+
+	for (i = 0; i < genpd->num_domains; i++) {
+		struct tegra_powergate *powergate;
+
+		powergate = to_tegra_powergate(genpd->domains[i]);
+		if (powergate->id == spec->args[0]) {
+			domain = &powergate->genpd;
+			break;
+		}
+	}
+
+	return domain;
+}
+
+int tegra_bpmp_init_powergates(struct tegra_bpmp *bpmp)
+{
+	struct device_node *np = bpmp->dev->of_node;
+	struct tegra_powergate_info *powergates;
+	struct device *dev = bpmp->dev;
+	unsigned int count, i;
+	int err;
+
+	err = tegra_bpmp_probe_powergates(bpmp, &powergates);
+	if (err < 0)
+		return err;
+
+	count = err;
+
+	dev_dbg(dev, "%u power domains probed\n", count);
+
+	err = tegra_bpmp_add_powergates(bpmp, powergates, count);
+	if (err < 0)
+		goto free;
+
+	bpmp->genpd.xlate = tegra_powergate_xlate;
+
+	err = of_genpd_add_provider_onecell(np, &bpmp->genpd);
+	if (err < 0) {
+		dev_err(dev, "failed to add power domain provider: %d\n", err);
+		tegra_bpmp_remove_powergates(bpmp);
+	}
+
+free:
+	for (i = 0; i < count; i++)
+		kfree(powergates[i].name);
+
+	kfree(powergates);
+	return err;
+}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1761c90..9b31351 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -393,6 +393,13 @@
 	  From MPC8536, 85xx platform uses the controller, and all P10xx,
 	  P20xx, P30xx,P40xx, P50xx uses this controller.
 
+config SPI_MESON_SPICC
+	tristate "Amlogic Meson SPICC controller"
+	depends on ARCH_MESON || COMPILE_TEST
+	help
+	  This enables master mode support for the SPICC (SPI communication
+	  controller) available in Amlogic Meson SoCs.
+
 config SPI_MESON_SPIFC
 	tristate "Amlogic Meson SPIFC controller"
 	depends on ARCH_MESON || COMPILE_TEST
@@ -457,6 +464,7 @@
 
 config SPI_TI_QSPI
 	tristate "DRA7xxx QSPI controller support"
+	depends on HAS_DMA
 	depends on ARCH_OMAP2PLUS || COMPILE_TEST
 	help
 	  QSPI master controller for DRA7xxx used for flash devices.
@@ -619,6 +627,16 @@
 	help
 	  SPI driver for CSR SiRFprimaII SoCs
 
+config SPI_STM32
+	tristate "STMicroelectronics STM32 SPI controller"
+	depends on ARCH_STM32 || COMPILE_TEST
+	help
+	  SPI driver for STMicroelectonics STM32 SoCs.
+
+	  STM32 SPI controller supports DMA and PIO modes. When DMA
+	  is not available, the driver automatically falls back to
+	  PIO mode.
+
 config SPI_ST_SSC4
 	tristate "STMicroelectronics SPI SSC-based driver"
 	depends on ARCH_STI || COMPILE_TEST
@@ -784,6 +802,30 @@
 
 endif # SPI_MASTER
 
-# (slave support would go here)
+#
+# SLAVE side ... listening to other SPI masters
+#
+
+config SPI_SLAVE
+	bool "SPI slave protocol handlers"
+	help
+	  If your system has a slave-capable SPI controller, you can enable
+	  slave protocol handlers.
+
+if SPI_SLAVE
+
+config SPI_SLAVE_TIME
+	tristate "SPI slave handler reporting boot up time"
+	help
+	  SPI slave handler responding with the time of reception of the last
+	  SPI message.
+
+config SPI_SLAVE_SYSTEM_CONTROL
+	tristate "SPI slave handler controlling system state"
+	help
+	  SPI slave handler to allow remote control of system reboot, power
+	  off, halt, and suspend.
+
+endif # SPI_SLAVE
 
 endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index b375a7a..a3ae2b7 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -53,6 +53,7 @@
 obj-$(CONFIG_SPI_JCORE)			+= spi-jcore.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi-lm70llp.o
 obj-$(CONFIG_SPI_LP8841_RTC)		+= spi-lp8841-rtc.o
+obj-$(CONFIG_SPI_MESON_SPICC)		+= spi-meson-spicc.o
 obj-$(CONFIG_SPI_MESON_SPIFC)		+= spi-meson-spifc.o
 obj-$(CONFIG_SPI_MPC512x_PSC)		+= spi-mpc512x-psc.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)		+= spi-mpc52xx-psc.o
@@ -89,6 +90,7 @@
 obj-$(CONFIG_SPI_SH_MSIOF)		+= spi-sh-msiof.o
 obj-$(CONFIG_SPI_SH_SCI)		+= spi-sh-sci.o
 obj-$(CONFIG_SPI_SIRF)		+= spi-sirf.o
+obj-$(CONFIG_SPI_STM32) 		+= spi-stm32.o
 obj-$(CONFIG_SPI_ST_SSC4)		+= spi-st-ssc4.o
 obj-$(CONFIG_SPI_SUN4I)			+= spi-sun4i.o
 obj-$(CONFIG_SPI_SUN6I)			+= spi-sun6i.o
@@ -105,3 +107,7 @@
 obj-$(CONFIG_SPI_XLP)			+= spi-xlp.o
 obj-$(CONFIG_SPI_XTENSA_XTFPGA)		+= spi-xtensa-xtfpga.o
 obj-$(CONFIG_SPI_ZYNQMP_GQSPI)		+= spi-zynqmp-gqspi.o
+
+# SPI slave protocol handlers
+obj-$(CONFIG_SPI_SLAVE_TIME)		+= spi-slave-time.o
+obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL)	+= spi-slave-system-control.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 1eb83c9..f95da36 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -269,6 +269,7 @@ struct atmel_spi_caps {
 	bool	is_spi2;
 	bool	has_wdrbt;
 	bool	has_dma_support;
+	bool	has_pdc_support;
 };
 
 /*
@@ -1422,11 +1423,31 @@ static void atmel_get_caps(struct atmel_spi *as)
 	unsigned int version;
 
 	version = atmel_get_version(as);
-	dev_info(&as->pdev->dev, "version: 0x%x\n", version);
 
 	as->caps.is_spi2 = version > 0x121;
 	as->caps.has_wdrbt = version >= 0x210;
+#ifdef CONFIG_SOC_SAM_V4_V5
+	/*
+	 * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
+	 * since this later function tries to map buffers with dma_map_sg()
+	 * even if they have not been allocated inside DMA-safe areas.
+	 * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
+	 * those ARM cores, the data cache follows the PIPT model.
+	 * Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
+	 * In case of PIPT caches, there cannot be cache aliases.
+	 * However on ARM9 cores, the data cache follows the VIVT model, hence
+	 * the cache aliases issue can occur when buffers are allocated from
+	 * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
+	 * not taken into account or at least not handled completely (cache
+	 * lines of aliases are not invalidated).
+	 * This is not a theorical issue: it was reproduced when trying to mount
+	 * a UBI file-system on a at91sam9g35ek board.
+	 */
+	as->caps.has_dma_support = false;
+#else
 	as->caps.has_dma_support = version >= 0x212;
+#endif
+	as->caps.has_pdc_support = version < 0x212;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1567,7 +1588,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
 		} else if (ret == -EPROBE_DEFER) {
 			return ret;
 		}
-	} else {
+	} else if (as->caps.has_pdc_support) {
 		as->use_pdc = true;
 	}
 
@@ -1609,8 +1630,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
 		goto out_free_dma;
 
 	/* go! */
-	dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
-			(unsigned long)regs->start, irq);
+	dev_info(&pdev->dev, "Atmel SPI Controller version 0x%x at 0x%08lx (irq %d)\n",
+			atmel_get_version(as), (unsigned long)regs->start,
+			irq);
 
 	return 0;
 
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 5514cd0..4da2d4a 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -484,6 +484,7 @@ static const struct of_device_id bcm63xx_hsspi_of_match[] = {
 	{ .compatible = "brcm,bcm6328-hsspi", },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, bcm63xx_hsspi_of_match);
 
 static struct platform_driver bcm63xx_hsspi_driver = {
 	.driver = {
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 247f71b..84c7356 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -147,7 +147,7 @@ struct bcm63xx_spi {
 
 	/* Platform data */
 	const unsigned long	*reg_offsets;
-	unsigned		fifo_size;
+	unsigned int		fifo_size;
 	unsigned int		msg_type_shift;
 	unsigned int		msg_ctl_width;
 
@@ -191,7 +191,7 @@ static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
 #endif
 }
 
-static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
+static const unsigned int bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
 	{ 20000000, SPI_CLK_20MHZ },
 	{ 12500000, SPI_CLK_12_50MHZ },
 	{  6250000, SPI_CLK_6_250MHZ },
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 595acdc..6ddb6ef 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -873,9 +873,8 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
 	return 0;
 }
 #else
-static struct davinci_spi_platform_data
-	*spi_davinci_get_pdata(struct platform_device *pdev,
-		struct davinci_spi *dspi)
+static int spi_davinci_get_pdata(struct platform_device *pdev,
+			struct davinci_spi *dspi)
 {
 	return -ENODEV;
 }
@@ -965,7 +964,9 @@ static int davinci_spi_probe(struct platform_device *pdev)
 		ret = -ENODEV;
 		goto free_master;
 	}
-	clk_prepare_enable(dspi->clk);
+	ret = clk_prepare_enable(dspi->clk);
+	if (ret)
+		goto free_master;
 
 	master->dev.of_node = pdev->dev.of_node;
 	master->bus_num = pdev->id;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 1520164..d89127f 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1032,7 +1032,8 @@ static int dspi_probe(struct platform_device *pdev)
 		goto out_master_put;
 
 	if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
-		if (dspi_request_dma(dspi, res->start)) {
+		ret = dspi_request_dma(dspi, res->start);
+		if (ret < 0) {
 			dev_err(&pdev->dev, "can't get dma channels\n");
 			goto out_clk_put;
 		}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index b402530..f9698b7 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -56,10 +56,6 @@
 
 /* The maximum  bytes that a sdma BD can transfer.*/
 #define MAX_SDMA_BD_BYTES  (1 << 15)
-struct spi_imx_config {
-	unsigned int speed_hz;
-	unsigned int bpw;
-};
 
 enum spi_imx_devtype {
 	IMX1_CSPI,
@@ -74,7 +70,7 @@ struct spi_imx_data;
 
 struct spi_imx_devtype_data {
 	void (*intctrl)(struct spi_imx_data *, int);
-	int (*config)(struct spi_device *, struct spi_imx_config *);
+	int (*config)(struct spi_device *);
 	void (*trigger)(struct spi_imx_data *);
 	int (*rx_available)(struct spi_imx_data *);
 	void (*reset)(struct spi_imx_data *);
@@ -94,7 +90,8 @@ struct spi_imx_data {
 	unsigned long spi_clk;
 	unsigned int spi_bus_clk;
 
-	unsigned int bytes_per_word;
+	unsigned int speed_hz;
+	unsigned int bits_per_word;
 	unsigned int spi_drctl;
 
 	unsigned int count;
@@ -203,34 +200,27 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
 	return i;
 }
 
-static int spi_imx_bytes_per_word(const int bpw)
+static int spi_imx_bytes_per_word(const int bits_per_word)
 {
-	return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
+	return DIV_ROUND_UP(bits_per_word, BITS_PER_BYTE);
 }
 
 static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 			 struct spi_transfer *transfer)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
-	unsigned int bpw, i;
+	unsigned int bytes_per_word, i;
 
 	if (!master->dma_rx)
 		return false;
 
-	if (!transfer)
-		return false;
+	bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
 
-	bpw = transfer->bits_per_word;
-	if (!bpw)
-		bpw = spi->bits_per_word;
-
-	bpw = spi_imx_bytes_per_word(bpw);
-
-	if (bpw != 1 && bpw != 2 && bpw != 4)
+	if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4)
 		return false;
 
 	for (i = spi_imx_get_fifosize(spi_imx) / 2; i > 0; i--) {
-		if (!(transfer->len % (i * bpw)))
+		if (!(transfer->len % (i * bytes_per_word)))
 			break;
 	}
 
@@ -340,12 +330,11 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
 	writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
 }
 
-static int mx51_ecspi_config(struct spi_device *spi,
-			     struct spi_imx_config *config)
+static int mx51_ecspi_config(struct spi_device *spi)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 	u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
-	u32 clk = config->speed_hz, delay, reg;
+	u32 clk = spi_imx->speed_hz, delay, reg;
 	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
 
 	/*
@@ -364,13 +353,13 @@ static int mx51_ecspi_config(struct spi_device *spi,
 		ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
 
 	/* set clock speed */
-	ctrl |= mx51_ecspi_clkdiv(spi_imx, config->speed_hz, &clk);
+	ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk);
 	spi_imx->spi_bus_clk = clk;
 
 	/* set chip select to use */
 	ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
 
-	ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
+	ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
 
 	cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
 
@@ -501,21 +490,21 @@ static void mx31_trigger(struct spi_imx_data *spi_imx)
 	writel(reg, spi_imx->base + MXC_CSPICTRL);
 }
 
-static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
+static int mx31_config(struct spi_device *spi)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 	unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
 	unsigned int clk;
 
-	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz, &clk) <<
+	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) <<
 		MX31_CSPICTRL_DR_SHIFT;
 	spi_imx->spi_bus_clk = clk;
 
 	if (is_imx35_cspi(spi_imx)) {
-		reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
+		reg |= (spi_imx->bits_per_word - 1) << MX35_CSPICTRL_BL_SHIFT;
 		reg |= MX31_CSPICTRL_SSCTL;
 	} else {
-		reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
+		reg |= (spi_imx->bits_per_word - 1) << MX31_CSPICTRL_BC_SHIFT;
 	}
 
 	if (spi->mode & SPI_CPHA)
@@ -597,18 +586,18 @@ static void mx21_trigger(struct spi_imx_data *spi_imx)
 	writel(reg, spi_imx->base + MXC_CSPICTRL);
 }
 
-static int mx21_config(struct spi_device *spi, struct spi_imx_config *config)
+static int mx21_config(struct spi_device *spi)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 	unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
 	unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
 	unsigned int clk;
 
-	reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max, &clk)
+	reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->speed_hz, max, &clk)
 		<< MX21_CSPICTRL_DR_SHIFT;
 	spi_imx->spi_bus_clk = clk;
 
-	reg |= config->bpw - 1;
+	reg |= spi_imx->bits_per_word - 1;
 
 	if (spi->mode & SPI_CPHA)
 		reg |= MX21_CSPICTRL_PHA;
@@ -666,17 +655,17 @@ static void mx1_trigger(struct spi_imx_data *spi_imx)
 	writel(reg, spi_imx->base + MXC_CSPICTRL);
 }
 
-static int mx1_config(struct spi_device *spi, struct spi_imx_config *config)
+static int mx1_config(struct spi_device *spi)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 	unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
 	unsigned int clk;
 
-	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz, &clk) <<
+	reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) <<
 		MX1_CSPICTRL_DR_SHIFT;
 	spi_imx->spi_bus_clk = clk;
 
-	reg |= config->bpw - 1;
+	reg |= spi_imx->bits_per_word - 1;
 
 	if (spi->mode & SPI_CPHA)
 		reg |= MX1_CSPICTRL_PHA;
@@ -841,15 +830,14 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static int spi_imx_dma_configure(struct spi_master *master,
-				 int bytes_per_word)
+static int spi_imx_dma_configure(struct spi_master *master)
 {
 	int ret;
 	enum dma_slave_buswidth buswidth;
 	struct dma_slave_config rx = {}, tx = {};
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
 
-	switch (bytes_per_word) {
+	switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
 	case 4:
 		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
 		break;
@@ -883,8 +871,6 @@ static int spi_imx_dma_configure(struct spi_master *master,
 		return ret;
 	}
 
-	spi_imx->bytes_per_word = bytes_per_word;
-
 	return 0;
 }
 
@@ -892,22 +878,19 @@ static int spi_imx_setupxfer(struct spi_device *spi,
 				 struct spi_transfer *t)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
-	struct spi_imx_config config;
 	int ret;
 
-	config.bpw = t ? t->bits_per_word : spi->bits_per_word;
-	config.speed_hz  = t ? t->speed_hz : spi->max_speed_hz;
+	if (!t)
+		return 0;
 
-	if (!config.speed_hz)
-		config.speed_hz = spi->max_speed_hz;
-	if (!config.bpw)
-		config.bpw = spi->bits_per_word;
+	spi_imx->bits_per_word = t->bits_per_word;
+	spi_imx->speed_hz  = t->speed_hz;
 
 	/* Initialize the functions for transfer */
-	if (config.bpw <= 8) {
+	if (spi_imx->bits_per_word <= 8) {
 		spi_imx->rx = spi_imx_buf_rx_u8;
 		spi_imx->tx = spi_imx_buf_tx_u8;
-	} else if (config.bpw <= 16) {
+	} else if (spi_imx->bits_per_word <= 16) {
 		spi_imx->rx = spi_imx_buf_rx_u16;
 		spi_imx->tx = spi_imx_buf_tx_u16;
 	} else {
@@ -921,13 +904,12 @@ static int spi_imx_setupxfer(struct spi_device *spi,
 		spi_imx->usedma = 0;
 
 	if (spi_imx->usedma) {
-		ret = spi_imx_dma_configure(spi->master,
-					    spi_imx_bytes_per_word(config.bpw));
+		ret = spi_imx_dma_configure(spi->master);
 		if (ret)
 			return ret;
 	}
 
-	spi_imx->devtype_data->config(spi, &config);
+	spi_imx->devtype_data->config(spi);
 
 	return 0;
 }
@@ -976,8 +958,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
 		goto err;
 	}
 
-	spi_imx_dma_configure(master, 1);
-
 	init_completion(&spi_imx->dma_rx_completion);
 	init_completion(&spi_imx->dma_tx_completion);
 	master->can_dma = spi_imx_can_dma;
@@ -1189,15 +1169,15 @@ static int spi_imx_probe(struct platform_device *pdev)
 	}
 
 	master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
+	if (!master)
+		return -ENOMEM;
+
 	ret = of_property_read_u32(np, "fsl,spi-rdy-drctl", &spi_drctl);
 	if ((ret < 0) || (spi_drctl >= 0x3)) {
 		/* '11' is reserved */
 		spi_drctl = 0;
 	}
 
-	if (!master)
-		return -ENOMEM;
-
 	platform_set_drvdata(pdev, master);
 
 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index f4875f1..3459965 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -894,7 +894,7 @@ int spi_test_execute_msg(struct spi_device *spi, struct spi_test *test,
 		test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start));
 		if (ret == -ETIMEDOUT) {
 			dev_info(&spi->dev,
-				 "spi-message timed out - reruning...\n");
+				 "spi-message timed out - rerunning...\n");
 			/* rerun after a few explicit schedules */
 			for (i = 0; i < 16; i++)
 				schedule();
@@ -1021,10 +1021,9 @@ int spi_test_run_tests(struct spi_device *spi,
 		rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
 	else
 		rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
-	if (!rx) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!rx)
+		return -ENOMEM;
+
 
 	if (use_vmalloc)
 		tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
@@ -1032,7 +1031,7 @@ int spi_test_run_tests(struct spi_device *spi,
 		tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
 	if (!tx) {
 		ret = -ENOMEM;
-		goto out;
+		goto err_tx;
 	}
 
 	/* now run the individual tests in the table */
@@ -1057,8 +1056,9 @@ int spi_test_run_tests(struct spi_device *spi,
 	}
 
 out:
-	kvfree(rx);
 	kvfree(tx);
+err_tx:
+	kvfree(rx);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(spi_test_run_tests);
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
new file mode 100644
index 0000000..7f84296
--- /dev/null
+++ b/drivers/spi/spi-meson-spicc.c
@@ -0,0 +1,619 @@
+/*
+ * Driver for Amlogic Meson SPI communication controller (SPICC)
+ *
+ * Copyright (C) BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/gpio.h>
+
+/*
+ * The Meson SPICC controller could support DMA based transfers, but is not
+ * implemented by the vendor code, and while having the registers documentation
+ * it has never worked on the GXL Hardware.
+ * The PIO mode is the only mode implemented, and due to badly designed HW :
+ * - all transfers are cutted in 16 words burst because the FIFO hangs on
+ *   TX underflow, and there is no TX "Half-Empty" interrupt, so we go by
+ *   FIFO max size chunk only
+ * - CS management is dumb, and goes UP between every burst, so is really a
+ *   "Data Valid" signal than a Chip Select, GPIO link should be used instead
+ *   to have a CS go down over the full transfer
+ */
+
+#define SPICC_MAX_FREQ	30000000
+#define SPICC_MAX_BURST	128
+
+/* Register Map */
+#define SPICC_RXDATA	0x00
+
+#define SPICC_TXDATA	0x04
+
+#define SPICC_CONREG	0x08
+#define SPICC_ENABLE		BIT(0)
+#define SPICC_MODE_MASTER	BIT(1)
+#define SPICC_XCH		BIT(2)
+#define SPICC_SMC		BIT(3)
+#define SPICC_POL		BIT(4)
+#define SPICC_PHA		BIT(5)
+#define SPICC_SSCTL		BIT(6)
+#define SPICC_SSPOL		BIT(7)
+#define SPICC_DRCTL_MASK	GENMASK(9, 8)
+#define SPICC_DRCTL_IGNORE	0
+#define SPICC_DRCTL_FALLING	1
+#define SPICC_DRCTL_LOWLEVEL	2
+#define SPICC_CS_MASK		GENMASK(13, 12)
+#define SPICC_DATARATE_MASK	GENMASK(18, 16)
+#define SPICC_DATARATE_DIV4	0
+#define SPICC_DATARATE_DIV8	1
+#define SPICC_DATARATE_DIV16	2
+#define SPICC_DATARATE_DIV32	3
+#define SPICC_BITLENGTH_MASK	GENMASK(24, 19)
+#define SPICC_BURSTLENGTH_MASK	GENMASK(31, 25)
+
+#define SPICC_INTREG	0x0c
+#define SPICC_TE_EN	BIT(0) /* TX FIFO Empty Interrupt */
+#define SPICC_TH_EN	BIT(1) /* TX FIFO Half-Full Interrupt */
+#define SPICC_TF_EN	BIT(2) /* TX FIFO Full Interrupt */
+#define SPICC_RR_EN	BIT(3) /* RX FIFO Ready Interrupt */
+#define SPICC_RH_EN	BIT(4) /* RX FIFO Half-Full Interrupt */
+#define SPICC_RF_EN	BIT(5) /* RX FIFO Full Interrupt */
+#define SPICC_RO_EN	BIT(6) /* RX FIFO Overflow Interrupt */
+#define SPICC_TC_EN	BIT(7) /* Transfert Complete Interrupt */
+
+#define SPICC_DMAREG	0x10
+#define SPICC_DMA_ENABLE		BIT(0)
+#define SPICC_TXFIFO_THRESHOLD_MASK	GENMASK(5, 1)
+#define SPICC_RXFIFO_THRESHOLD_MASK	GENMASK(10, 6)
+#define SPICC_READ_BURST_MASK		GENMASK(14, 11)
+#define SPICC_WRITE_BURST_MASK		GENMASK(18, 15)
+#define SPICC_DMA_URGENT		BIT(19)
+#define SPICC_DMA_THREADID_MASK		GENMASK(25, 20)
+#define SPICC_DMA_BURSTNUM_MASK		GENMASK(31, 26)
+
+#define SPICC_STATREG	0x14
+#define SPICC_TE	BIT(0) /* TX FIFO Empty Interrupt */
+#define SPICC_TH	BIT(1) /* TX FIFO Half-Full Interrupt */
+#define SPICC_TF	BIT(2) /* TX FIFO Full Interrupt */
+#define SPICC_RR	BIT(3) /* RX FIFO Ready Interrupt */
+#define SPICC_RH	BIT(4) /* RX FIFO Half-Full Interrupt */
+#define SPICC_RF	BIT(5) /* RX FIFO Full Interrupt */
+#define SPICC_RO	BIT(6) /* RX FIFO Overflow Interrupt */
+#define SPICC_TC	BIT(7) /* Transfert Complete Interrupt */
+
+#define SPICC_PERIODREG	0x18
+#define SPICC_PERIOD	GENMASK(14, 0)	/* Wait cycles */
+
+#define SPICC_TESTREG	0x1c
+#define SPICC_TXCNT_MASK	GENMASK(4, 0)	/* TX FIFO Counter */
+#define SPICC_RXCNT_MASK	GENMASK(9, 5)	/* RX FIFO Counter */
+#define SPICC_SMSTATUS_MASK	GENMASK(12, 10)	/* State Machine Status */
+#define SPICC_LBC_RO		BIT(13)	/* Loop Back Control Read-Only */
+#define SPICC_LBC_W1		BIT(14) /* Loop Back Control Write-Only */
+#define SPICC_SWAP_RO		BIT(14) /* RX FIFO Data Swap Read-Only */
+#define SPICC_SWAP_W1		BIT(15) /* RX FIFO Data Swap Write-Only */
+#define SPICC_DLYCTL_RO_MASK	GENMASK(20, 15) /* Delay Control Read-Only */
+#define SPICC_DLYCTL_W1_MASK	GENMASK(21, 16) /* Delay Control Write-Only */
+#define SPICC_FIFORST_RO_MASK	GENMASK(22, 21) /* FIFO Softreset Read-Only */
+#define SPICC_FIFORST_W1_MASK	GENMASK(23, 22) /* FIFO Softreset Write-Only */
+
+#define SPICC_DRADDR	0x20	/* Read Address of DMA */
+
+#define SPICC_DWADDR	0x24	/* Write Address of DMA */
+
+#define writel_bits_relaxed(mask, val, addr) \
+	writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
+
+#define SPICC_BURST_MAX	16
+#define SPICC_FIFO_HALF 10
+
+struct meson_spicc_device {
+	struct spi_master		*master;
+	struct platform_device		*pdev;
+	void __iomem			*base;
+	struct clk			*core;
+	struct spi_message		*message;
+	struct spi_transfer		*xfer;
+	u8				*tx_buf;
+	u8				*rx_buf;
+	unsigned int			bytes_per_word;
+	unsigned long			tx_remain;
+	unsigned long			txb_remain;
+	unsigned long			rx_remain;
+	unsigned long			rxb_remain;
+	unsigned long			xfer_remain;
+	bool				is_burst_end;
+	bool				is_last_burst;
+};
+
+static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
+{
+	return !!FIELD_GET(SPICC_TF,
+			   readl_relaxed(spicc->base + SPICC_STATREG));
+}
+
+static inline bool meson_spicc_rxready(struct meson_spicc_device *spicc)
+{
+	return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF_EN,
+			 readl_relaxed(spicc->base + SPICC_STATREG));
+}
+
+static inline u32 meson_spicc_pull_data(struct meson_spicc_device *spicc)
+{
+	unsigned int bytes = spicc->bytes_per_word;
+	unsigned int byte_shift = 0;
+	u32 data = 0;
+	u8 byte;
+
+	while (bytes--) {
+		byte = *spicc->tx_buf++;
+		data |= (byte & 0xff) << byte_shift;
+		byte_shift += 8;
+	}
+
+	spicc->tx_remain--;
+	return data;
+}
+
+static inline void meson_spicc_push_data(struct meson_spicc_device *spicc,
+					 u32 data)
+{
+	unsigned int bytes = spicc->bytes_per_word;
+	unsigned int byte_shift = 0;
+	u8 byte;
+
+	while (bytes--) {
+		byte = (data >> byte_shift) & 0xff;
+		*spicc->rx_buf++ = byte;
+		byte_shift += 8;
+	}
+
+	spicc->rx_remain--;
+}
+
+static inline void meson_spicc_rx(struct meson_spicc_device *spicc)
+{
+	/* Empty RX FIFO */
+	while (spicc->rx_remain &&
+	       meson_spicc_rxready(spicc))
+		meson_spicc_push_data(spicc,
+				readl_relaxed(spicc->base + SPICC_RXDATA));
+}
+
+static inline void meson_spicc_tx(struct meson_spicc_device *spicc)
+{
+	/* Fill Up TX FIFO */
+	while (spicc->tx_remain &&
+	       !meson_spicc_txfull(spicc))
+		writel_relaxed(meson_spicc_pull_data(spicc),
+			       spicc->base + SPICC_TXDATA);
+}
+
+static inline u32 meson_spicc_setup_rx_irq(struct meson_spicc_device *spicc,
+					   u32 irq_ctrl)
+{
+	if (spicc->rx_remain > SPICC_FIFO_HALF)
+		irq_ctrl |= SPICC_RH_EN;
+	else
+		irq_ctrl |= SPICC_RR_EN;
+
+	return irq_ctrl;
+}
+
+static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc,
+					   unsigned int burst_len)
+{
+	/* Setup Xfer variables */
+	spicc->tx_remain = burst_len;
+	spicc->rx_remain = burst_len;
+	spicc->xfer_remain -= burst_len * spicc->bytes_per_word;
+	spicc->is_burst_end = false;
+	if (burst_len < SPICC_BURST_MAX || !spicc->xfer_remain)
+		spicc->is_last_burst = true;
+	else
+		spicc->is_last_burst = false;
+
+	/* Setup burst length */
+	writel_bits_relaxed(SPICC_BURSTLENGTH_MASK,
+			FIELD_PREP(SPICC_BURSTLENGTH_MASK,
+				burst_len),
+			spicc->base + SPICC_CONREG);
+
+	/* Fill TX FIFO */
+	meson_spicc_tx(spicc);
+}
+
+static irqreturn_t meson_spicc_irq(int irq, void *data)
+{
+	struct meson_spicc_device *spicc = (void *) data;
+	u32 ctrl = readl_relaxed(spicc->base + SPICC_INTREG);
+	u32 stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
+
+	ctrl &= ~(SPICC_RH_EN | SPICC_RR_EN);
+
+	/* Empty RX FIFO */
+	meson_spicc_rx(spicc);
+
+	/* Enable TC interrupt since we transferred everything */
+	if (!spicc->tx_remain && !spicc->rx_remain) {
+		spicc->is_burst_end = true;
+
+		/* Enable TC interrupt */
+		ctrl |= SPICC_TC_EN;
+
+		/* Reload IRQ status */
+		stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
+	}
+
+	/* Check transfer complete */
+	if ((stat & SPICC_TC) && spicc->is_burst_end) {
+		unsigned int burst_len;
+
+		/* Clear TC bit */
+		writel_relaxed(SPICC_TC, spicc->base + SPICC_STATREG);
+
+		/* Disable TC interrupt */
+		ctrl &= ~SPICC_TC_EN;
+
+		if (spicc->is_last_burst) {
+			/* Disable all IRQs */
+			writel(0, spicc->base + SPICC_INTREG);
+
+			spi_finalize_current_transfer(spicc->master);
+
+			return IRQ_HANDLED;
+		}
+
+		burst_len = min_t(unsigned int,
+				  spicc->xfer_remain / spicc->bytes_per_word,
+				  SPICC_BURST_MAX);
+
+		/* Setup burst */
+		meson_spicc_setup_burst(spicc, burst_len);
+
+		/* Restart burst */
+		writel_bits_relaxed(SPICC_XCH, SPICC_XCH,
+				    spicc->base + SPICC_CONREG);
+	}
+
+	/* Setup RX interrupt trigger */
+	ctrl = meson_spicc_setup_rx_irq(spicc, ctrl);
+
+	/* Reconfigure interrupts */
+	writel(ctrl, spicc->base + SPICC_INTREG);
+
+	return IRQ_HANDLED;
+}
+
+static u32 meson_spicc_setup_speed(struct meson_spicc_device *spicc, u32 conf,
+				   u32 speed)
+{
+	unsigned long parent, value;
+	unsigned int i, div;
+
+	parent = clk_get_rate(spicc->core);
+
+	/* Find closest inferior/equal possible speed */
+	for (i = 0 ; i < 7 ; ++i) {
+		/* 2^(data_rate+2) */
+		value = parent >> (i + 2);
+
+		if (value <= speed)
+			break;
+	}
+
+	/* If provided speed it lower than max divider, use max divider */
+	if (i > 7) {
+		div = 7;
+		dev_warn_once(&spicc->pdev->dev, "unable to get close to speed %u\n",
+			      speed);
+	} else
+		div = i;
+
+	dev_dbg(&spicc->pdev->dev, "parent %lu, speed %u -> %lu (%u)\n",
+		parent, speed, value, div);
+
+	conf &= ~SPICC_DATARATE_MASK;
+	conf |= FIELD_PREP(SPICC_DATARATE_MASK, div);
+
+	return conf;
+}
+
+static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
+				   struct spi_transfer *xfer)
+{
+	u32 conf, conf_orig;
+
+	/* Read original configuration */
+	conf = conf_orig = readl_relaxed(spicc->base + SPICC_CONREG);
+
+	/* Select closest divider */
+	conf = meson_spicc_setup_speed(spicc, conf, xfer->speed_hz);
+
+	/* Setup word width */
+	conf &= ~SPICC_BITLENGTH_MASK;
+	conf |= FIELD_PREP(SPICC_BITLENGTH_MASK,
+			   (spicc->bytes_per_word << 3) - 1);
+
+	/* Ignore if unchanged */
+	if (conf != conf_orig)
+		writel_relaxed(conf, spicc->base + SPICC_CONREG);
+}
+
+static int meson_spicc_transfer_one(struct spi_master *master,
+				    struct spi_device *spi,
+				    struct spi_transfer *xfer)
+{
+	struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+	unsigned int burst_len;
+	u32 irq = 0;
+
+	/* Store current transfer */
+	spicc->xfer = xfer;
+
+	/* Setup transfer parameters */
+	spicc->tx_buf = (u8 *)xfer->tx_buf;
+	spicc->rx_buf = (u8 *)xfer->rx_buf;
+	spicc->xfer_remain = xfer->len;
+
+	/* Pre-calculate word size */
+	spicc->bytes_per_word =
+	   DIV_ROUND_UP(spicc->xfer->bits_per_word, 8);
+
+	/* Setup transfer parameters */
+	meson_spicc_setup_xfer(spicc, xfer);
+
+	burst_len = min_t(unsigned int,
+			  spicc->xfer_remain / spicc->bytes_per_word,
+			  SPICC_BURST_MAX);
+
+	meson_spicc_setup_burst(spicc, burst_len);
+
+	irq = meson_spicc_setup_rx_irq(spicc, irq);
+
+	/* Start burst */
+	writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
+
+	/* Enable interrupts */
+	writel_relaxed(irq, spicc->base + SPICC_INTREG);
+
+	return 1;
+}
+
+static int meson_spicc_prepare_message(struct spi_master *master,
+				       struct spi_message *message)
+{
+	struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+	struct spi_device *spi = message->spi;
+	u32 conf = 0;
+
+	/* Store current message */
+	spicc->message = message;
+
+	/* Enable Master */
+	conf |= SPICC_ENABLE;
+	conf |= SPICC_MODE_MASTER;
+
+	/* SMC = 0 */
+
+	/* Setup transfer mode */
+	if (spi->mode & SPI_CPOL)
+		conf |= SPICC_POL;
+	else
+		conf &= ~SPICC_POL;
+
+	if (spi->mode & SPI_CPHA)
+		conf |= SPICC_PHA;
+	else
+		conf &= ~SPICC_PHA;
+
+	/* SSCTL = 0 */
+
+	if (spi->mode & SPI_CS_HIGH)
+		conf |= SPICC_SSPOL;
+	else
+		conf &= ~SPICC_SSPOL;
+
+	if (spi->mode & SPI_READY)
+		conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_LOWLEVEL);
+	else
+		conf |= FIELD_PREP(SPICC_DRCTL_MASK, SPICC_DRCTL_IGNORE);
+
+	/* Select CS */
+	conf |= FIELD_PREP(SPICC_CS_MASK, spi->chip_select);
+
+	/* Default Clock rate core/4 */
+
+	/* Default 8bit word */
+	conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, 8 - 1);
+
+	writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
+	/* Setup no wait cycles by default */
+	writel_relaxed(0, spicc->base + SPICC_PERIODREG);
+
+	writel_bits_relaxed(BIT(24), BIT(24), spicc->base + SPICC_TESTREG);
+
+	return 0;
+}
+
+static int meson_spicc_unprepare_transfer(struct spi_master *master)
+{
+	struct meson_spicc_device *spicc = spi_master_get_devdata(master);
+
+	/* Disable all IRQs */
+	writel(0, spicc->base + SPICC_INTREG);
+
+	/* Disable controller */
+	writel_bits_relaxed(SPICC_ENABLE, 0, spicc->base + SPICC_CONREG);
+
+	device_reset_optional(&spicc->pdev->dev);
+
+	return 0;
+}
+
+static int meson_spicc_setup(struct spi_device *spi)
+{
+	int ret = 0;
+
+	if (!spi->controller_state)
+		spi->controller_state = spi_master_get_devdata(spi->master);
+	else if (gpio_is_valid(spi->cs_gpio))
+		goto out_gpio;
+	else if (spi->cs_gpio == -ENOENT)
+		return 0;
+
+	if (gpio_is_valid(spi->cs_gpio)) {
+		ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+		if (ret) {
+			dev_err(&spi->dev, "failed to request cs gpio\n");
+			return ret;
+		}
+	}
+
+out_gpio:
+	ret = gpio_direction_output(spi->cs_gpio,
+			!(spi->mode & SPI_CS_HIGH));
+
+	return ret;
+}
+
+static void meson_spicc_cleanup(struct spi_device *spi)
+{
+	if (gpio_is_valid(spi->cs_gpio))
+		gpio_free(spi->cs_gpio);
+
+	spi->controller_state = NULL;
+}
+
+static int meson_spicc_probe(struct platform_device *pdev)
+{
+	struct spi_master *master;
+	struct meson_spicc_device *spicc;
+	struct resource *res;
+	int ret, irq, rate;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
+	if (!master) {
+		dev_err(&pdev->dev, "master allocation failed\n");
+		return -ENOMEM;
+	}
+	spicc = spi_master_get_devdata(master);
+	spicc->master = master;
+
+	spicc->pdev = pdev;
+	platform_set_drvdata(pdev, spicc);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spicc->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(spicc->base)) {
+		dev_err(&pdev->dev, "io resource mapping failed\n");
+		ret = PTR_ERR(spicc->base);
+		goto out_master;
+	}
+
+	/* Disable all IRQs */
+	writel_relaxed(0, spicc->base + SPICC_INTREG);
+
+	irq = platform_get_irq(pdev, 0);
+	ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
+			       0, NULL, spicc);
+	if (ret) {
+		dev_err(&pdev->dev, "irq request failed\n");
+		goto out_master;
+	}
+
+	spicc->core = devm_clk_get(&pdev->dev, "core");
+	if (IS_ERR(spicc->core)) {
+		dev_err(&pdev->dev, "core clock request failed\n");
+		ret = PTR_ERR(spicc->core);
+		goto out_master;
+	}
+
+	ret = clk_prepare_enable(spicc->core);
+	if (ret) {
+		dev_err(&pdev->dev, "core clock enable failed\n");
+		goto out_master;
+	}
+	rate = clk_get_rate(spicc->core);
+
+	device_reset_optional(&pdev->dev);
+
+	master->num_chipselect = 4;
+	master->dev.of_node = pdev->dev.of_node;
+	master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
+	master->bits_per_word_mask = SPI_BPW_MASK(32) |
+				     SPI_BPW_MASK(24) |
+				     SPI_BPW_MASK(16) |
+				     SPI_BPW_MASK(8);
+	master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
+	master->min_speed_hz = rate >> 9;
+	master->setup = meson_spicc_setup;
+	master->cleanup = meson_spicc_cleanup;
+	master->prepare_message = meson_spicc_prepare_message;
+	master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer;
+	master->transfer_one = meson_spicc_transfer_one;
+
+	/* Setup max rate according to the Meson GX datasheet */
+	if ((rate >> 2) > SPICC_MAX_FREQ)
+		master->max_speed_hz = SPICC_MAX_FREQ;
+	else
+		master->max_speed_hz = rate >> 2;
+
+	ret = devm_spi_register_master(&pdev->dev, master);
+	if (!ret)
+		return 0;
+
+	dev_err(&pdev->dev, "spi master registration failed\n");
+
+out_master:
+	spi_master_put(master);
+
+	return ret;
+}
+
+static int meson_spicc_remove(struct platform_device *pdev)
+{
+	struct meson_spicc_device *spicc = platform_get_drvdata(pdev);
+
+	/* Disable SPI */
+	writel(0, spicc->base + SPICC_CONREG);
+
+	clk_disable_unprepare(spicc->core);
+
+	return 0;
+}
+
+static const struct of_device_id meson_spicc_of_match[] = {
+	{ .compatible = "amlogic,meson-gx-spicc", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
+
+static struct platform_driver meson_spicc_driver = {
+	.probe   = meson_spicc_probe,
+	.remove  = meson_spicc_remove,
+	.driver  = {
+		.name = "meson-spicc",
+		.of_match_table = of_match_ptr(meson_spicc_of_match),
+	},
+};
+
+module_platform_driver(meson_spicc_driver);
+
+MODULE_DESCRIPTION("Meson SPI Communication Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 278867a..86bf456 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -35,11 +35,15 @@
 #define SPI_CMD_REG                       0x0018
 #define SPI_STATUS0_REG                   0x001c
 #define SPI_PAD_SEL_REG                   0x0024
+#define SPI_CFG2_REG                      0x0028
 
 #define SPI_CFG0_SCK_HIGH_OFFSET          0
 #define SPI_CFG0_SCK_LOW_OFFSET           8
 #define SPI_CFG0_CS_HOLD_OFFSET           16
 #define SPI_CFG0_CS_SETUP_OFFSET          24
+#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET    16
+#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET    0
+#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET   16
 
 #define SPI_CFG1_CS_IDLE_OFFSET           0
 #define SPI_CFG1_PACKET_LOOP_OFFSET       8
@@ -55,6 +59,8 @@
 #define SPI_CMD_RST                  BIT(2)
 #define SPI_CMD_PAUSE_EN             BIT(4)
 #define SPI_CMD_DEASSERT             BIT(5)
+#define SPI_CMD_SAMPLE_SEL           BIT(6)
+#define SPI_CMD_CS_POL               BIT(7)
 #define SPI_CMD_CPHA                 BIT(8)
 #define SPI_CMD_CPOL                 BIT(9)
 #define SPI_CMD_RX_DMA               BIT(10)
@@ -80,6 +86,8 @@ struct mtk_spi_compatible {
 	bool need_pad_sel;
 	/* Must explicitly send dummy Tx bytes to do Rx only transfer */
 	bool must_tx;
+	/* some IC design adjust cfg register to enhance time accuracy */
+	bool enhance_timing;
 };
 
 struct mtk_spi {
@@ -96,6 +104,16 @@ struct mtk_spi {
 };
 
 static const struct mtk_spi_compatible mtk_common_compat;
+
+static const struct mtk_spi_compatible mt2712_compat = {
+	.must_tx = true,
+};
+
+static const struct mtk_spi_compatible mt7622_compat = {
+	.must_tx = true,
+	.enhance_timing = true,
+};
+
 static const struct mtk_spi_compatible mt8173_compat = {
 	.need_pad_sel = true,
 	.must_tx = true,
@@ -108,15 +126,23 @@ static const struct mtk_spi_compatible mt8173_compat = {
 static const struct mtk_chip_config mtk_default_chip_info = {
 	.rx_mlsb = 1,
 	.tx_mlsb = 1,
+	.cs_pol = 0,
+	.sample_sel = 0,
 };
 
 static const struct of_device_id mtk_spi_of_match[] = {
 	{ .compatible = "mediatek,mt2701-spi",
 		.data = (void *)&mtk_common_compat,
 	},
+	{ .compatible = "mediatek,mt2712-spi",
+		.data = (void *)&mt2712_compat,
+	},
 	{ .compatible = "mediatek,mt6589-spi",
 		.data = (void *)&mtk_common_compat,
 	},
+	{ .compatible = "mediatek,mt7622-spi",
+		.data = (void *)&mt7622_compat,
+	},
 	{ .compatible = "mediatek,mt8135-spi",
 		.data = (void *)&mtk_common_compat,
 	},
@@ -182,6 +208,17 @@ static int mtk_spi_prepare_message(struct spi_master *master,
 	reg_val |= SPI_CMD_RX_ENDIAN;
 #endif
 
+	if (mdata->dev_comp->enhance_timing) {
+		if (chip_config->cs_pol)
+			reg_val |= SPI_CMD_CS_POL;
+		else
+			reg_val &= ~SPI_CMD_CS_POL;
+		if (chip_config->sample_sel)
+			reg_val |= SPI_CMD_SAMPLE_SEL;
+		else
+			reg_val &= ~SPI_CMD_SAMPLE_SEL;
+	}
+
 	/* set finish and pause interrupt always enable */
 	reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
 
@@ -233,11 +270,25 @@ static void mtk_spi_prepare_transfer(struct spi_master *master,
 	sck_time = (div + 1) / 2;
 	cs_time = sck_time * 2;
 
-	reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET);
-	reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
-	reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
-	reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
-	writel(reg_val, mdata->base + SPI_CFG0_REG);
+	if (mdata->dev_comp->enhance_timing) {
+		reg_val |= (((sck_time - 1) & 0xffff)
+			   << SPI_CFG0_SCK_HIGH_OFFSET);
+		reg_val |= (((sck_time - 1) & 0xffff)
+			   << SPI_ADJUST_CFG0_SCK_LOW_OFFSET);
+		writel(reg_val, mdata->base + SPI_CFG2_REG);
+		reg_val |= (((cs_time - 1) & 0xffff)
+			   << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
+		reg_val |= (((cs_time - 1) & 0xffff)
+			   << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+		writel(reg_val, mdata->base + SPI_CFG0_REG);
+	} else {
+		reg_val |= (((sck_time - 1) & 0xff)
+			   << SPI_CFG0_SCK_HIGH_OFFSET);
+		reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
+		reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+		reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
+		writel(reg_val, mdata->base + SPI_CFG0_REG);
+	}
 
 	reg_val = readl(mdata->base + SPI_CFG1_REG);
 	reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7275223..e048268 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1412,9 +1412,6 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
 		sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
 	}
 
-	if (status < 0)
-		goto free_master;
-
 	pm_runtime_use_autosuspend(&pdev->dev);
 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
 	pm_runtime_enable(&pdev->dev);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index be2e87e..28fc9f1 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -22,6 +22,7 @@
 #include <linux/of_device.h>
 #include <linux/clk.h>
 #include <linux/sizes.h>
+#include <linux/gpio.h>
 #include <asm/unaligned.h>
 
 #define DRIVER_NAME			"orion_spi"
@@ -320,12 +321,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
 static void orion_spi_set_cs(struct spi_device *spi, bool enable)
 {
 	struct orion_spi *orion_spi;
+	int cs;
+
+	if (gpio_is_valid(spi->cs_gpio))
+		cs = 0;
+	else
+		cs = spi->chip_select;
 
 	orion_spi = spi_master_get_devdata(spi->master);
 
 	orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
 	orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
-				ORION_SPI_CS(spi->chip_select));
+				ORION_SPI_CS(cs));
 
 	/* Chip select logic is inverted from spi_set_cs */
 	if (!enable)
@@ -606,6 +613,7 @@ static int orion_spi_probe(struct platform_device *pdev)
 	master->setup = orion_spi_setup;
 	master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
 	master->auto_runtime_pm = true;
+	master->flags = SPI_MASTER_GPIO_SS;
 
 	platform_set_drvdata(pdev, master);
 
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 47b65d7..38d0536 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -151,6 +151,18 @@ static const struct lpss_config lpss_platforms[] = {
 		.cs_sel_shift = 8,
 		.cs_sel_mask = 3 << 8,
 	},
+	{	/* LPSS_CNL_SSP */
+		.offset = 0x200,
+		.reg_general = -1,
+		.reg_ssp = 0x20,
+		.reg_cs_ctrl = 0x24,
+		.reg_capabilities = 0xfc,
+		.rx_threshold = 1,
+		.tx_threshold_lo = 32,
+		.tx_threshold_hi = 56,
+		.cs_sel_shift = 8,
+		.cs_sel_mask = 3 << 8,
+	},
 };
 
 static inline const struct lpss_config
@@ -167,6 +179,7 @@ static bool is_lpss_ssp(const struct driver_data *drv_data)
 	case LPSS_BSW_SSP:
 	case LPSS_SPT_SSP:
 	case LPSS_BXT_SSP:
+	case LPSS_CNL_SSP:
 		return true;
 	default:
 		return false;
@@ -1275,6 +1288,7 @@ static int setup(struct spi_device *spi)
 	case LPSS_BSW_SSP:
 	case LPSS_SPT_SSP:
 	case LPSS_BXT_SSP:
+	case LPSS_CNL_SSP:
 		config = lpss_get_config(drv_data);
 		tx_thres = config->tx_threshold_lo;
 		tx_hi_thres = config->tx_threshold_hi;
@@ -1470,6 +1484,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
 	{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
 	{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
 	{ PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
+	/* CNL-LP */
+	{ PCI_VDEVICE(INTEL, 0x9daa), LPSS_CNL_SSP },
+	{ PCI_VDEVICE(INTEL, 0x9dab), LPSS_CNL_SSP },
+	{ PCI_VDEVICE(INTEL, 0x9dfb), LPSS_CNL_SSP },
+	/* CNL-H */
+	{ PCI_VDEVICE(INTEL, 0xa32a), LPSS_CNL_SSP },
+	{ PCI_VDEVICE(INTEL, 0xa32b), LPSS_CNL_SSP },
+	{ PCI_VDEVICE(INTEL, 0xa37b), LPSS_CNL_SSP },
 	{ },
 };
 
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index acf31f3..0b4a52b 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -25,6 +25,11 @@
 
 #define DRIVER_NAME "rockchip-spi"
 
+#define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
+		writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
+#define ROCKCHIP_SPI_SET_BITS(reg, bits) \
+		writel_relaxed(readl_relaxed(reg) | (bits), reg)
+
 /* SPI register offsets */
 #define ROCKCHIP_SPI_CTRLR0			0x0000
 #define ROCKCHIP_SPI_CTRLR1			0x0004
@@ -149,6 +154,8 @@
  */
 #define ROCKCHIP_SPI_MAX_TRANLEN		0xffff
 
+#define ROCKCHIP_SPI_MAX_CS_NUM			2
+
 enum rockchip_ssi_type {
 	SSI_MOTO_SPI = 0,
 	SSI_TI_SSP,
@@ -193,6 +200,8 @@ struct rockchip_spi {
 	/* protect state */
 	spinlock_t lock;
 
+	bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
+
 	u32 use_dma;
 	struct sg_table tx_sg;
 	struct sg_table rx_sg;
@@ -264,37 +273,29 @@ static inline u32 rx_max(struct rockchip_spi *rs)
 
 static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
 {
-	u32 ser;
 	struct spi_master *master = spi->master;
 	struct rockchip_spi *rs = spi_master_get_devdata(master);
+	bool cs_asserted = !enable;
 
-	pm_runtime_get_sync(rs->dev);
+	/* Return immediately for no-op */
+	if (cs_asserted == rs->cs_asserted[spi->chip_select])
+		return;
 
-	ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
+	if (cs_asserted) {
+		/* Keep things powered as long as CS is asserted */
+		pm_runtime_get_sync(rs->dev);
 
-	/*
-	 * drivers/spi/spi.c:
-	 * static void spi_set_cs(struct spi_device *spi, bool enable)
-	 * {
-	 *		if (spi->mode & SPI_CS_HIGH)
-	 *			enable = !enable;
-	 *
-	 *		if (spi->cs_gpio >= 0)
-	 *			gpio_set_value(spi->cs_gpio, !enable);
-	 *		else if (spi->master->set_cs)
-	 *		spi->master->set_cs(spi, !enable);
-	 * }
-	 *
-	 * Note: enable(rockchip_spi_set_cs) = !enable(spi_set_cs)
-	 */
-	if (!enable)
-		ser |= 1 << spi->chip_select;
-	else
-		ser &= ~(1 << spi->chip_select);
+		ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER,
+				      BIT(spi->chip_select));
+	} else {
+		ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER,
+				      BIT(spi->chip_select));
 
-	writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
+		/* Drop reference from when we first asserted CS */
+		pm_runtime_put(rs->dev);
+	}
 
-	pm_runtime_put_sync(rs->dev);
+	rs->cs_asserted[spi->chip_select] = cs_asserted;
 }
 
 static int rockchip_spi_prepare_message(struct spi_master *master,
@@ -684,33 +685,33 @@ static int rockchip_spi_probe(struct platform_device *pdev)
 	rs->regs = devm_ioremap_resource(&pdev->dev, mem);
 	if (IS_ERR(rs->regs)) {
 		ret =  PTR_ERR(rs->regs);
-		goto err_ioremap_resource;
+		goto err_put_master;
 	}
 
 	rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
 	if (IS_ERR(rs->apb_pclk)) {
 		dev_err(&pdev->dev, "Failed to get apb_pclk\n");
 		ret = PTR_ERR(rs->apb_pclk);
-		goto err_ioremap_resource;
+		goto err_put_master;
 	}
 
 	rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
 	if (IS_ERR(rs->spiclk)) {
 		dev_err(&pdev->dev, "Failed to get spi_pclk\n");
 		ret = PTR_ERR(rs->spiclk);
-		goto err_ioremap_resource;
+		goto err_put_master;
 	}
 
 	ret = clk_prepare_enable(rs->apb_pclk);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
-		goto err_ioremap_resource;
+		goto err_put_master;
 	}
 
 	ret = clk_prepare_enable(rs->spiclk);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to enable spi_clk\n");
-		goto err_spiclk_enable;
+		goto err_disable_apbclk;
 	}
 
 	spi_enable_chip(rs, 0);
@@ -728,7 +729,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
 	if (!rs->fifo_len) {
 		dev_err(&pdev->dev, "Failed to get fifo length\n");
 		ret = -EINVAL;
-		goto err_get_fifo_len;
+		goto err_disable_spiclk;
 	}
 
 	spin_lock_init(&rs->lock);
@@ -739,7 +740,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
 	master->auto_runtime_pm = true;
 	master->bus_num = pdev->id;
 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
-	master->num_chipselect = 2;
+	master->num_chipselect = ROCKCHIP_SPI_MAX_CS_NUM;
 	master->dev.of_node = pdev->dev.of_node;
 	master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
 
@@ -749,13 +750,14 @@ static int rockchip_spi_probe(struct platform_device *pdev)
 	master->transfer_one = rockchip_spi_transfer_one;
 	master->max_transfer_size = rockchip_spi_max_transfer_size;
 	master->handle_err = rockchip_spi_handle_err;
+	master->flags = SPI_MASTER_GPIO_SS;
 
 	rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
 	if (IS_ERR(rs->dma_tx.ch)) {
 		/* Check tx to see if we need defer probing driver */
 		if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
 			ret = -EPROBE_DEFER;
-			goto err_get_fifo_len;
+			goto err_disable_pm_runtime;
 		}
 		dev_warn(rs->dev, "Failed to request TX DMA channel\n");
 		rs->dma_tx.ch = NULL;
@@ -786,23 +788,24 @@ static int rockchip_spi_probe(struct platform_device *pdev)
 	ret = devm_spi_register_master(&pdev->dev, master);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register master\n");
-		goto err_register_master;
+		goto err_free_dma_rx;
 	}
 
 	return 0;
 
-err_register_master:
-	pm_runtime_disable(&pdev->dev);
+err_free_dma_rx:
 	if (rs->dma_rx.ch)
 		dma_release_channel(rs->dma_rx.ch);
 err_free_dma_tx:
 	if (rs->dma_tx.ch)
 		dma_release_channel(rs->dma_tx.ch);
-err_get_fifo_len:
+err_disable_pm_runtime:
+	pm_runtime_disable(&pdev->dev);
+err_disable_spiclk:
 	clk_disable_unprepare(rs->spiclk);
-err_spiclk_enable:
+err_disable_apbclk:
 	clk_disable_unprepare(rs->apb_pclk);
-err_ioremap_resource:
+err_put_master:
 	spi_master_put(master);
 
 	return ret;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 2ce15ca..c304c71 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -2,7 +2,8 @@
  * SuperH MSIOF SPI Master Interface
  *
  * Copyright (c) 2009 Magnus Damm
- * Copyright (C) 2014 Glider bvba
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014-2017 Glider bvba
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -33,7 +34,6 @@
 
 #include <asm/unaligned.h>
 
-
 struct sh_msiof_chipdata {
 	u16 tx_fifo_size;
 	u16 rx_fifo_size;
@@ -53,6 +53,7 @@ struct sh_msiof_spi_priv {
 	void *rx_dma_page;
 	dma_addr_t tx_dma_addr;
 	dma_addr_t rx_dma_addr;
+	bool slave_aborted;
 };
 
 #define TMDR1	0x00	/* Transmit Mode Register 1 */
@@ -337,7 +338,10 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
 	tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
 	tmp |= lsb_first << MDR1_BITLSB_SHIFT;
 	tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
-	sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+	if (spi_controller_is_slave(p->master))
+		sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
+	else
+		sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
 	if (p->master->flags & SPI_MASTER_MUST_TX) {
 		/* These bits are reserved if RX needs TX */
 		tmp &= ~0x0000ffff;
@@ -564,17 +568,19 @@ static int sh_msiof_prepare_message(struct spi_master *master,
 
 static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
 {
-	int ret;
+	bool slave = spi_controller_is_slave(p->master);
+	int ret = 0;
 
 	/* setup clock and rx/tx signals */
-	ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+	if (!slave)
+		ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
 	if (rx_buf && !ret)
 		ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
 	if (!ret)
 		ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
 
 	/* start by setting frame bit */
-	if (!ret)
+	if (!ret && !slave)
 		ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
 
 	return ret;
@@ -582,20 +588,49 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
 
 static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
 {
-	int ret;
+	bool slave = spi_controller_is_slave(p->master);
+	int ret = 0;
 
 	/* shut down frame, rx/tx and clock signals */
-	ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+	if (!slave)
+		ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
 	if (!ret)
 		ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
 	if (rx_buf && !ret)
 		ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
-	if (!ret)
+	if (!ret && !slave)
 		ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
 
 	return ret;
 }
 
+static int sh_msiof_slave_abort(struct spi_master *master)
+{
+	struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+
+	p->slave_aborted = true;
+	complete(&p->done);
+	return 0;
+}
+
+static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p)
+{
+	if (spi_controller_is_slave(p->master)) {
+		if (wait_for_completion_interruptible(&p->done) ||
+		    p->slave_aborted) {
+			dev_dbg(&p->pdev->dev, "interrupted\n");
+			return -EINTR;
+		}
+	} else {
+		if (!wait_for_completion_timeout(&p->done, HZ)) {
+			dev_err(&p->pdev->dev, "timeout\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	return 0;
+}
+
 static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
 				  void (*tx_fifo)(struct sh_msiof_spi_priv *,
 						  const void *, int, int),
@@ -628,6 +663,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
 		tx_fifo(p, tx_buf, words, fifo_shift);
 
 	reinit_completion(&p->done);
+	p->slave_aborted = false;
 
 	ret = sh_msiof_spi_start(p, rx_buf);
 	if (ret) {
@@ -636,11 +672,9 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
 	}
 
 	/* wait for tx fifo to be emptied / rx fifo to be filled */
-	if (!wait_for_completion_timeout(&p->done, HZ)) {
-		dev_err(&p->pdev->dev, "PIO timeout\n");
-		ret = -ETIMEDOUT;
+	ret = sh_msiof_wait_for_completion(p);
+	if (ret)
 		goto stop_reset;
-	}
 
 	/* read rx fifo */
 	if (rx_buf)
@@ -732,6 +766,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
 	sh_msiof_write(p, IER, ier_bits);
 
 	reinit_completion(&p->done);
+	p->slave_aborted = false;
 
 	/* Now start DMA */
 	if (rx)
@@ -746,11 +781,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
 	}
 
 	/* wait for tx fifo to be emptied / rx fifo to be filled */
-	if (!wait_for_completion_timeout(&p->done, HZ)) {
-		dev_err(&p->pdev->dev, "DMA timeout\n");
-		ret = -ETIMEDOUT;
+	ret = sh_msiof_wait_for_completion(p);
+	if (ret)
 		goto stop_reset;
-	}
 
 	/* clear status bits */
 	sh_msiof_reset_str(p);
@@ -843,7 +876,8 @@ static int sh_msiof_transfer_one(struct spi_master *master,
 	int ret;
 
 	/* setup clocks (clock already enabled in chipselect()) */
-	sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
+	if (!spi_controller_is_slave(p->master))
+		sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
 
 	while (master->dma_tx && len > 15) {
 		/*
@@ -998,8 +1032,12 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
 	if (!info)
 		return NULL;
 
+	info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_SLAVE
+							    : MSIOF_SPI_MASTER;
+
 	/* Parse the MSIOF properties */
-	of_property_read_u32(np, "num-cs", &num_cs);
+	if (info->mode == MSIOF_SPI_MASTER)
+		of_property_read_u32(np, "num-cs", &num_cs);
 	of_property_read_u32(np, "renesas,tx-fifo-size",
 					&info->tx_fifo_override);
 	of_property_read_u32(np, "renesas,rx-fifo-size",
@@ -1159,11 +1197,31 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
 	struct spi_master *master;
 	const struct sh_msiof_chipdata *chipdata;
 	const struct of_device_id *of_id;
+	struct sh_msiof_spi_info *info;
 	struct sh_msiof_spi_priv *p;
 	int i;
 	int ret;
 
-	master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
+	of_id = of_match_device(sh_msiof_match, &pdev->dev);
+	if (of_id) {
+		chipdata = of_id->data;
+		info = sh_msiof_spi_parse_dt(&pdev->dev);
+	} else {
+		chipdata = (const void *)pdev->id_entry->driver_data;
+		info = dev_get_platdata(&pdev->dev);
+	}
+
+	if (!info) {
+		dev_err(&pdev->dev, "failed to obtain device info\n");
+		return -ENXIO;
+	}
+
+	if (info->mode == MSIOF_SPI_SLAVE)
+		master = spi_alloc_slave(&pdev->dev,
+					 sizeof(struct sh_msiof_spi_priv));
+	else
+		master = spi_alloc_master(&pdev->dev,
+					  sizeof(struct sh_msiof_spi_priv));
 	if (master == NULL)
 		return -ENOMEM;
 
@@ -1171,21 +1229,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, p);
 	p->master = master;
-
-	of_id = of_match_device(sh_msiof_match, &pdev->dev);
-	if (of_id) {
-		chipdata = of_id->data;
-		p->info = sh_msiof_spi_parse_dt(&pdev->dev);
-	} else {
-		chipdata = (const void *)pdev->id_entry->driver_data;
-		p->info = dev_get_platdata(&pdev->dev);
-	}
-
-	if (!p->info) {
-		dev_err(&pdev->dev, "failed to obtain device info\n");
-		ret = -ENXIO;
-		goto err1;
-	}
+	p->info = info;
 
 	init_completion(&p->done);
 
@@ -1237,6 +1281,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
 	master->num_chipselect = p->info->num_chipselect;
 	master->setup = sh_msiof_spi_setup;
 	master->prepare_message = sh_msiof_prepare_message;
+	master->slave_abort = sh_msiof_slave_abort;
 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
 	master->auto_runtime_pm = true;
 	master->transfer_one = sh_msiof_transfer_one;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 7072276a..bbb1a27 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1158,7 +1158,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
 	ret = spi_bitbang_start(&sspi->bitbang);
 	if (ret)
 		goto free_clk;
-	dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
+	dev_info(&pdev->dev, "registered, bus number = %d\n", master->bus_num);
 
 	return 0;
 free_clk:
diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c
new file mode 100644
index 0000000..c0257e9
--- /dev/null
+++ b/drivers/spi/spi-slave-system-control.c
@@ -0,0 +1,154 @@
+/*
+ * SPI slave handler controlling system state
+ *
+ * This SPI slave handler allows remote control of system reboot, power off,
+ * halt, and suspend.
+ *
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
+ * system):
+ *
+ *   # reboot='\x7c\x50'
+ *   # poweroff='\x71\x3f'
+ *   # halt='\x38\x76'
+ *   # suspend='\x1b\x1b'
+ *   # spidev_test -D /dev/spidev2.0 -p $suspend # or $reboot, $poweroff, $halt
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/spi/spi.h>
+
+/*
+ * The numbers are chosen to display something human-readable on two 7-segment
+ * displays connected to two 74HC595 shift registers
+ */
+#define CMD_REBOOT	0x7c50	/* rb */
+#define CMD_POWEROFF	0x713f	/* OF */
+#define CMD_HALT	0x3876	/* HL */
+#define CMD_SUSPEND	0x1b1b	/* ZZ */
+
+struct spi_slave_system_control_priv {
+	struct spi_device *spi;
+	struct completion finished;
+	struct spi_transfer xfer;
+	struct spi_message msg;
+	__be16 cmd;
+};
+
+static
+int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv);
+
+static void spi_slave_system_control_complete(void *arg)
+{
+	struct spi_slave_system_control_priv *priv = arg;
+	u16 cmd;
+	int ret;
+
+	if (priv->msg.status)
+		goto terminate;
+
+	cmd = be16_to_cpu(priv->cmd);
+	switch (cmd) {
+	case CMD_REBOOT:
+		dev_info(&priv->spi->dev, "Rebooting system...\n");
+		kernel_restart(NULL);
+
+	case CMD_POWEROFF:
+		dev_info(&priv->spi->dev, "Powering off system...\n");
+		kernel_power_off();
+		break;
+
+	case CMD_HALT:
+		dev_info(&priv->spi->dev, "Halting system...\n");
+		kernel_halt();
+		break;
+
+	case CMD_SUSPEND:
+		dev_info(&priv->spi->dev, "Suspending system...\n");
+		pm_suspend(PM_SUSPEND_MEM);
+		break;
+
+	default:
+		dev_warn(&priv->spi->dev, "Unknown command 0x%x\n", cmd);
+		break;
+	}
+
+	ret = spi_slave_system_control_submit(priv);
+	if (ret)
+		goto terminate;
+
+	return;
+
+terminate:
+	dev_info(&priv->spi->dev, "Terminating\n");
+	complete(&priv->finished);
+}
+
+static
+int spi_slave_system_control_submit(struct spi_slave_system_control_priv *priv)
+{
+	int ret;
+
+	spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
+
+	priv->msg.complete = spi_slave_system_control_complete;
+	priv->msg.context = priv;
+
+	ret = spi_async(priv->spi, &priv->msg);
+	if (ret)
+		dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
+
+	return ret;
+}
+
+static int spi_slave_system_control_probe(struct spi_device *spi)
+{
+	struct spi_slave_system_control_priv *priv;
+	int ret;
+
+	priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->spi = spi;
+	init_completion(&priv->finished);
+	priv->xfer.rx_buf = &priv->cmd;
+	priv->xfer.len = sizeof(priv->cmd);
+
+	ret = spi_slave_system_control_submit(priv);
+	if (ret)
+		return ret;
+
+	spi_set_drvdata(spi, priv);
+	return 0;
+}
+
+static int spi_slave_system_control_remove(struct spi_device *spi)
+{
+	struct spi_slave_system_control_priv *priv = spi_get_drvdata(spi);
+
+	spi_slave_abort(spi);
+	wait_for_completion(&priv->finished);
+	return 0;
+}
+
+static struct spi_driver spi_slave_system_control_driver = {
+	.driver = {
+		.name	= "spi-slave-system-control",
+	},
+	.probe		= spi_slave_system_control_probe,
+	.remove		= spi_slave_system_control_remove,
+};
+module_spi_driver(spi_slave_system_control_driver);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("SPI slave handler controlling system state");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-slave-time.c b/drivers/spi/spi-slave-time.c
new file mode 100644
index 0000000..f2e07a3
--- /dev/null
+++ b/drivers/spi/spi-slave-time.c
@@ -0,0 +1,129 @@
+/*
+ * SPI slave handler reporting uptime at reception of previous SPI message
+ *
+ * This SPI slave handler sends the time of reception of the last SPI message
+ * as two 32-bit unsigned integers in binary format and in network byte order,
+ * representing the number of seconds and fractional seconds (in microseconds)
+ * since boot up.
+ *
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Usage (assuming /dev/spidev2.0 corresponds to the SPI master on the remote
+ * system):
+ *
+ *   # spidev_test -D /dev/spidev2.0 -p dummy-8B
+ *   spi mode: 0x0
+ *   bits per word: 8
+ *   max speed: 500000 Hz (500 KHz)
+ *   RX | 00 00 04 6D 00 09 5B BB ...
+ *		^^^^^    ^^^^^^^^
+ *		seconds  microseconds
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/sched/clock.h>
+#include <linux/spi/spi.h>
+
+
+struct spi_slave_time_priv {
+	struct spi_device *spi;
+	struct completion finished;
+	struct spi_transfer xfer;
+	struct spi_message msg;
+	__be32 buf[2];
+};
+
+static int spi_slave_time_submit(struct spi_slave_time_priv *priv);
+
+static void spi_slave_time_complete(void *arg)
+{
+	struct spi_slave_time_priv *priv = arg;
+	int ret;
+
+	ret = priv->msg.status;
+	if (ret)
+		goto terminate;
+
+	ret = spi_slave_time_submit(priv);
+	if (ret)
+		goto terminate;
+
+	return;
+
+terminate:
+	dev_info(&priv->spi->dev, "Terminating\n");
+	complete(&priv->finished);
+}
+
+static int spi_slave_time_submit(struct spi_slave_time_priv *priv)
+{
+	u32 rem_us;
+	int ret;
+	u64 ts;
+
+	ts = local_clock();
+	rem_us = do_div(ts, 1000000000) / 1000;
+
+	priv->buf[0] = cpu_to_be32(ts);
+	priv->buf[1] = cpu_to_be32(rem_us);
+
+	spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
+
+	priv->msg.complete = spi_slave_time_complete;
+	priv->msg.context = priv;
+
+	ret = spi_async(priv->spi, &priv->msg);
+	if (ret)
+		dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
+
+	return ret;
+}
+
+static int spi_slave_time_probe(struct spi_device *spi)
+{
+	struct spi_slave_time_priv *priv;
+	int ret;
+
+	priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->spi = spi;
+	init_completion(&priv->finished);
+	priv->xfer.tx_buf = priv->buf;
+	priv->xfer.len = sizeof(priv->buf);
+
+	ret = spi_slave_time_submit(priv);
+	if (ret)
+		return ret;
+
+	spi_set_drvdata(spi, priv);
+	return 0;
+}
+
+static int spi_slave_time_remove(struct spi_device *spi)
+{
+	struct spi_slave_time_priv *priv = spi_get_drvdata(spi);
+
+	spi_slave_abort(spi);
+	wait_for_completion(&priv->finished);
+	return 0;
+}
+
+static struct spi_driver spi_slave_time_driver = {
+	.driver = {
+		.name	= "spi-slave-time",
+	},
+	.probe		= spi_slave_time_probe,
+	.remove		= spi_slave_time_remove,
+};
+module_spi_driver(spi_slave_time_driver);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("SPI slave reporting uptime at previous SPI message");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index e54b596..a4e43fc 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -229,42 +229,42 @@ static int spi_st_setup(struct spi_device *spi)
 		"setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
 		hz, spi_st->baud, sscbrg);
 
-	 /* Set SSC_CTL and enable SSC */
-	 var = readl_relaxed(spi_st->base + SSC_CTL);
-	 var |= SSC_CTL_MS;
+	/* Set SSC_CTL and enable SSC */
+	var = readl_relaxed(spi_st->base + SSC_CTL);
+	var |= SSC_CTL_MS;
 
-	 if (spi->mode & SPI_CPOL)
+	if (spi->mode & SPI_CPOL)
 		var |= SSC_CTL_PO;
-	 else
+	else
 		var &= ~SSC_CTL_PO;
 
-	 if (spi->mode & SPI_CPHA)
+	if (spi->mode & SPI_CPHA)
 		var |= SSC_CTL_PH;
-	 else
+	else
 		var &= ~SSC_CTL_PH;
 
-	 if ((spi->mode & SPI_LSB_FIRST) == 0)
+	if ((spi->mode & SPI_LSB_FIRST) == 0)
 		var |= SSC_CTL_HB;
-	 else
+	else
 		var &= ~SSC_CTL_HB;
 
-	 if (spi->mode & SPI_LOOP)
+	if (spi->mode & SPI_LOOP)
 		var |= SSC_CTL_LPB;
-	 else
+	else
 		var &= ~SSC_CTL_LPB;
 
-	 var &= ~SSC_CTL_DATA_WIDTH_MSK;
-	 var |= (spi->bits_per_word - 1);
+	var &= ~SSC_CTL_DATA_WIDTH_MSK;
+	var |= (spi->bits_per_word - 1);
 
-	 var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
-	 var |= SSC_CTL_EN;
+	var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
+	var |= SSC_CTL_EN;
 
-	 writel_relaxed(var, spi_st->base + SSC_CTL);
+	writel_relaxed(var, spi_st->base + SSC_CTL);
 
-	 /* Clear the status register */
-	 readl_relaxed(spi_st->base + SSC_RBUF);
+	/* Clear the status register */
+	readl_relaxed(spi_st->base + SSC_RBUF);
 
-	 return 0;
+	return 0;
 
 out_free_gpio:
 	gpio_free(cs);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
new file mode 100644
index 0000000..75644bc
--- /dev/null
+++ b/drivers/spi/spi-stm32.c
@@ -0,0 +1,1322 @@
+/*
+ * STMicroelectronics STM32 SPI Controller driver (master mode only)
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
+ *
+ * License terms: GPL V2.0.
+ *
+ * spi_stm32 driver is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * spi_stm32 driver is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * spi_stm32 driver. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/debugfs.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "spi_stm32"
+
+/* STM32 SPI registers */
+#define STM32_SPI_CR1		0x00
+#define STM32_SPI_CR2		0x04
+#define STM32_SPI_CFG1		0x08
+#define STM32_SPI_CFG2		0x0C
+#define STM32_SPI_IER		0x10
+#define STM32_SPI_SR		0x14
+#define STM32_SPI_IFCR		0x18
+#define STM32_SPI_TXDR		0x20
+#define STM32_SPI_RXDR		0x30
+#define STM32_SPI_I2SCFGR	0x50
+
+/* STM32_SPI_CR1 bit fields */
+#define SPI_CR1_SPE		BIT(0)
+#define SPI_CR1_MASRX		BIT(8)
+#define SPI_CR1_CSTART		BIT(9)
+#define SPI_CR1_CSUSP		BIT(10)
+#define SPI_CR1_HDDIR		BIT(11)
+#define SPI_CR1_SSI		BIT(12)
+
+/* STM32_SPI_CR2 bit fields */
+#define SPI_CR2_TSIZE_SHIFT	0
+#define SPI_CR2_TSIZE		GENMASK(15, 0)
+
+/* STM32_SPI_CFG1 bit fields */
+#define SPI_CFG1_DSIZE_SHIFT	0
+#define SPI_CFG1_DSIZE		GENMASK(4, 0)
+#define SPI_CFG1_FTHLV_SHIFT	5
+#define SPI_CFG1_FTHLV		GENMASK(8, 5)
+#define SPI_CFG1_RXDMAEN	BIT(14)
+#define SPI_CFG1_TXDMAEN	BIT(15)
+#define SPI_CFG1_MBR_SHIFT	28
+#define SPI_CFG1_MBR		GENMASK(30, 28)
+#define SPI_CFG1_MBR_MIN	0
+#define SPI_CFG1_MBR_MAX	(GENMASK(30, 28) >> 28)
+
+/* STM32_SPI_CFG2 bit fields */
+#define SPI_CFG2_MIDI_SHIFT	4
+#define SPI_CFG2_MIDI		GENMASK(7, 4)
+#define SPI_CFG2_COMM_SHIFT	17
+#define SPI_CFG2_COMM		GENMASK(18, 17)
+#define SPI_CFG2_SP_SHIFT	19
+#define SPI_CFG2_SP		GENMASK(21, 19)
+#define SPI_CFG2_MASTER		BIT(22)
+#define SPI_CFG2_LSBFRST	BIT(23)
+#define SPI_CFG2_CPHA		BIT(24)
+#define SPI_CFG2_CPOL		BIT(25)
+#define SPI_CFG2_SSM		BIT(26)
+#define SPI_CFG2_AFCNTR		BIT(31)
+
+/* STM32_SPI_IER bit fields */
+#define SPI_IER_RXPIE		BIT(0)
+#define SPI_IER_TXPIE		BIT(1)
+#define SPI_IER_DXPIE		BIT(2)
+#define SPI_IER_EOTIE		BIT(3)
+#define SPI_IER_TXTFIE		BIT(4)
+#define SPI_IER_OVRIE		BIT(6)
+#define SPI_IER_MODFIE		BIT(9)
+#define SPI_IER_ALL		GENMASK(10, 0)
+
+/* STM32_SPI_SR bit fields */
+#define SPI_SR_RXP		BIT(0)
+#define SPI_SR_TXP		BIT(1)
+#define SPI_SR_EOT		BIT(3)
+#define SPI_SR_OVR		BIT(6)
+#define SPI_SR_MODF		BIT(9)
+#define SPI_SR_SUSP		BIT(11)
+#define SPI_SR_RXPLVL_SHIFT	13
+#define SPI_SR_RXPLVL		GENMASK(14, 13)
+#define SPI_SR_RXWNE		BIT(15)
+
+/* STM32_SPI_IFCR bit fields */
+#define SPI_IFCR_ALL		GENMASK(11, 3)
+
+/* STM32_SPI_I2SCFGR bit fields */
+#define SPI_I2SCFGR_I2SMOD	BIT(0)
+
+/* SPI Master Baud Rate min/max divisor */
+#define SPI_MBR_DIV_MIN		(2 << SPI_CFG1_MBR_MIN)
+#define SPI_MBR_DIV_MAX		(2 << SPI_CFG1_MBR_MAX)
+
+/* SPI Communication mode */
+#define SPI_FULL_DUPLEX		0
+#define SPI_SIMPLEX_TX		1
+#define SPI_SIMPLEX_RX		2
+#define SPI_HALF_DUPLEX		3
+
+#define SPI_1HZ_NS		1000000000
+
+/**
+ * struct stm32_spi - private data of the SPI controller
+ * @dev: driver model representation of the controller
+ * @master: controller master interface
+ * @base: virtual memory area
+ * @clk: hw kernel clock feeding the SPI clock generator
+ * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
+ * @rst: SPI controller reset line
+ * @lock: prevent I/O concurrent access
+ * @irq: SPI controller interrupt line
+ * @fifo_size: size of the embedded fifo in bytes
+ * @cur_midi: master inter-data idleness in ns
+ * @cur_speed: speed configured in Hz
+ * @cur_bpw: number of bits in a single SPI data frame
+ * @cur_fthlv: fifo threshold level (data frames in a single data packet)
+ * @cur_comm: SPI communication mode
+ * @cur_xferlen: current transfer length in bytes
+ * @cur_usedma: boolean to know if dma is used in current transfer
+ * @tx_buf: data to be written, or NULL
+ * @rx_buf: data to be read, or NULL
+ * @tx_len: number of data to be written in bytes
+ * @rx_len: number of data to be read in bytes
+ * @dma_tx: dma channel for TX transfer
+ * @dma_rx: dma channel for RX transfer
+ * @phys_addr: SPI registers physical base address
+ */
+struct stm32_spi {
+	struct device *dev;
+	struct spi_master *master;
+	void __iomem *base;
+	struct clk *clk;
+	u32 clk_rate;
+	struct reset_control *rst;
+	spinlock_t lock; /* prevent I/O concurrent access */
+	int irq;
+	unsigned int fifo_size;
+
+	unsigned int cur_midi;
+	unsigned int cur_speed;
+	unsigned int cur_bpw;
+	unsigned int cur_fthlv;
+	unsigned int cur_comm;
+	unsigned int cur_xferlen;
+	bool cur_usedma;
+
+	const void *tx_buf;
+	void *rx_buf;
+	int tx_len;
+	int rx_len;
+	struct dma_chan *dma_tx;
+	struct dma_chan *dma_rx;
+	dma_addr_t phys_addr;
+};
+
+static inline void stm32_spi_set_bits(struct stm32_spi *spi,
+				      u32 offset, u32 bits)
+{
+	writel_relaxed(readl_relaxed(spi->base + offset) | bits,
+		       spi->base + offset);
+}
+
+static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
+				      u32 offset, u32 bits)
+{
+	writel_relaxed(readl_relaxed(spi->base + offset) & ~bits,
+		       spi->base + offset);
+}
+
+/**
+ * stm32_spi_get_fifo_size - Return fifo size
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
+{
+	unsigned long flags;
+	u32 count = 0;
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+
+	while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)
+		writeb_relaxed(++count, spi->base + STM32_SPI_TXDR);
+
+	stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	dev_dbg(spi->dev, "%d x 8-bit fifo size\n", count);
+
+	return count;
+}
+
+/**
+ * stm32_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+	unsigned long flags;
+	u32 cfg1, max_bpw;
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	/*
+	 * The most significant bit at DSIZE bit field is reserved when the
+	 * maximum data size of periperal instances is limited to 16-bit
+	 */
+	stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE);
+
+	cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1);
+	max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT;
+	max_bpw += 1;
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
+
+	return SPI_BPW_RANGE_MASK(4, max_bpw);
+}
+
+/**
+ * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value
+ * @spi: pointer to the spi controller data structure
+ * @speed_hz: requested speed
+ *
+ * Return SPI_CFG1.MBR value in case of success or -EINVAL
+ */
+static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
+{
+	u32 div, mbrdiv;
+
+	div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
+
+	/*
+	 * SPI framework set xfer->speed_hz to master->max_speed_hz if
+	 * xfer->speed_hz is greater than master->max_speed_hz, and it returns
+	 * an error when xfer->speed_hz is lower than master->min_speed_hz, so
+	 * no need to check it there.
+	 * However, we need to ensure the following calculations.
+	 */
+	if ((div < SPI_MBR_DIV_MIN) &&
+	    (div > SPI_MBR_DIV_MAX))
+		return -EINVAL;
+
+	/* Determine the first power of 2 greater than or equal to div */
+	if (div & (div - 1))
+		mbrdiv = fls(div);
+	else
+		mbrdiv = fls(div) - 1;
+
+	spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
+
+	return mbrdiv - 1;
+}
+
+/**
+ * stm32_spi_prepare_fthlv - Determine FIFO threshold level
+ * @spi: pointer to the spi controller data structure
+ */
+static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
+{
+	u32 fthlv, half_fifo;
+
+	/* data packet should not exceed 1/2 of fifo space */
+	half_fifo = (spi->fifo_size / 2);
+
+	if (spi->cur_bpw <= 8)
+		fthlv = half_fifo;
+	else if (spi->cur_bpw <= 16)
+		fthlv = half_fifo / 2;
+	else
+		fthlv = half_fifo / 4;
+
+	/* align packet size with data registers access */
+	if (spi->cur_bpw > 8)
+		fthlv -= (fthlv % 2); /* multiple of 2 */
+	else
+		fthlv -= (fthlv % 4); /* multiple of 4 */
+
+	return fthlv;
+}
+
+/**
+ * stm32_spi_write_txfifo - Write bytes in Transmit Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Read from tx_buf depends on remaining bytes to avoid to read beyond
+ * tx_buf end.
+ */
+static void stm32_spi_write_txfifo(struct stm32_spi *spi)
+{
+	while ((spi->tx_len > 0) &&
+	       (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) {
+		u32 offs = spi->cur_xferlen - spi->tx_len;
+
+		if (spi->tx_len >= sizeof(u32)) {
+			const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
+
+			writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR);
+			spi->tx_len -= sizeof(u32);
+		} else if (spi->tx_len >= sizeof(u16)) {
+			const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
+
+			writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR);
+			spi->tx_len -= sizeof(u16);
+		} else {
+			const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
+
+			writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR);
+			spi->tx_len -= sizeof(u8);
+		}
+	}
+
+	dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
+}
+
+/**
+ * stm32_spi_read_rxfifo - Read bytes in Receive Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Write in rx_buf depends on remaining bytes to avoid to write beyond
+ * rx_buf end.
+ */
+static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
+{
+	u32 sr = readl_relaxed(spi->base + STM32_SPI_SR);
+	u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+
+	while ((spi->rx_len > 0) &&
+	       ((sr & SPI_SR_RXP) ||
+		(flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) {
+		u32 offs = spi->cur_xferlen - spi->rx_len;
+
+		if ((spi->rx_len >= sizeof(u32)) ||
+		    (flush && (sr & SPI_SR_RXWNE))) {
+			u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
+
+			*rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR);
+			spi->rx_len -= sizeof(u32);
+		} else if ((spi->rx_len >= sizeof(u16)) ||
+			   (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
+			u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
+
+			*rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR);
+			spi->rx_len -= sizeof(u16);
+		} else {
+			u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
+
+			*rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR);
+			spi->rx_len -= sizeof(u8);
+		}
+
+		sr = readl_relaxed(spi->base + STM32_SPI_SR);
+		rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+	}
+
+	dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
+		flush ? "(flush)" : "", spi->rx_len);
+}
+
+/**
+ * stm32_spi_enable - Enable SPI controller
+ * @spi: pointer to the spi controller data structure
+ *
+ * SPI data transfer is enabled but spi_ker_ck is idle.
+ * SPI_CFG1 and SPI_CFG2 are now write protected.
+ */
+static void stm32_spi_enable(struct stm32_spi *spi)
+{
+	dev_dbg(spi->dev, "enable controller\n");
+
+	stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+}
+
+/**
+ * stm32_spi_disable - Disable SPI controller
+ * @spi: pointer to the spi controller data structure
+ *
+ * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
+ * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in
+ * RX-Fifo.
+ */
+static void stm32_spi_disable(struct stm32_spi *spi)
+{
+	unsigned long flags;
+	u32 cr1, sr;
+
+	dev_dbg(spi->dev, "disable controller\n");
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	cr1 = readl_relaxed(spi->base + STM32_SPI_CR1);
+
+	if (!(cr1 & SPI_CR1_SPE)) {
+		spin_unlock_irqrestore(&spi->lock, flags);
+		return;
+	}
+
+	/* Wait on EOT or suspend the flow */
+	if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR,
+					      sr, !(sr & SPI_SR_EOT),
+					      10, 100000) < 0) {
+		if (cr1 & SPI_CR1_CSTART) {
+			writel_relaxed(cr1 | SPI_CR1_CSUSP,
+				       spi->base + STM32_SPI_CR1);
+			if (readl_relaxed_poll_timeout_atomic(
+						spi->base + STM32_SPI_SR,
+						sr, !(sr & SPI_SR_SUSP),
+						10, 100000) < 0)
+				dev_warn(spi->dev,
+					 "Suspend request timeout\n");
+		}
+	}
+
+	if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
+		stm32_spi_read_rxfifo(spi, true);
+
+	if (spi->cur_usedma && spi->tx_buf)
+		dmaengine_terminate_all(spi->dma_tx);
+	if (spi->cur_usedma && spi->rx_buf)
+		dmaengine_terminate_all(spi->dma_rx);
+
+	stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
+
+	stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN |
+						SPI_CFG1_RXDMAEN);
+
+	/* Disable interrupts and clear status flags */
+	writel_relaxed(0, spi->base + STM32_SPI_IER);
+	writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR);
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+}
+
+/**
+ * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
+ *
+ * If the current transfer size is greater than fifo size, use DMA.
+ */
+static bool stm32_spi_can_dma(struct spi_master *master,
+			      struct spi_device *spi_dev,
+			      struct spi_transfer *transfer)
+{
+	struct stm32_spi *spi = spi_master_get_devdata(master);
+
+	dev_dbg(spi->dev, "%s: %s\n", __func__,
+		(transfer->len > spi->fifo_size) ? "true" : "false");
+
+	return (transfer->len > spi->fifo_size);
+}
+
+/**
+ * stm32_spi_irq - Interrupt handler for SPI controller events
+ * @irq: interrupt line
+ * @dev_id: SPI controller master interface
+ */
+static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
+{
+	struct spi_master *master = dev_id;
+	struct stm32_spi *spi = spi_master_get_devdata(master);
+	u32 sr, ier, mask;
+	unsigned long flags;
+	bool end = false;
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	sr = readl_relaxed(spi->base + STM32_SPI_SR);
+	ier = readl_relaxed(spi->base + STM32_SPI_IER);
+
+	mask = ier;
+	/* EOTIE is triggered on EOT, SUSP and TXC events. */
+	mask |= SPI_SR_SUSP;
+	/*
+	 * When TXTF is set, DXPIE and TXPIE are cleared. So in case of
+	 * Full-Duplex, need to poll RXP event to know if there are remaining
+	 * data, before disabling SPI.
+	 */
+	if (spi->rx_buf && !spi->cur_usedma)
+		mask |= SPI_SR_RXP;
+
+	if (!(sr & mask)) {
+		dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+			sr, ier);
+		spin_unlock_irqrestore(&spi->lock, flags);
+		return IRQ_NONE;
+	}
+
+	if (sr & SPI_SR_SUSP) {
+		dev_warn(spi->dev, "Communication suspended\n");
+		if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+			stm32_spi_read_rxfifo(spi, false);
+		/*
+		 * If communication is suspended while using DMA, it means
+		 * that something went wrong, so stop the current transfer
+		 */
+		if (spi->cur_usedma)
+			end = true;
+	}
+
+	if (sr & SPI_SR_MODF) {
+		dev_warn(spi->dev, "Mode fault: transfer aborted\n");
+		end = true;
+	}
+
+	if (sr & SPI_SR_OVR) {
+		dev_warn(spi->dev, "Overrun: received value discarded\n");
+		if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+			stm32_spi_read_rxfifo(spi, false);
+		/*
+		 * If overrun is detected while using DMA, it means that
+		 * something went wrong, so stop the current transfer
+		 */
+		if (spi->cur_usedma)
+			end = true;
+	}
+
+	if (sr & SPI_SR_EOT) {
+		if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+			stm32_spi_read_rxfifo(spi, true);
+		end = true;
+	}
+
+	if (sr & SPI_SR_TXP)
+		if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
+			stm32_spi_write_txfifo(spi);
+
+	if (sr & SPI_SR_RXP)
+		if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+			stm32_spi_read_rxfifo(spi, false);
+
+	writel_relaxed(mask, spi->base + STM32_SPI_IFCR);
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	if (end) {
+		spi_finalize_current_transfer(master);
+		stm32_spi_disable(spi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * stm32_spi_setup - setup device chip select
+ */
+static int stm32_spi_setup(struct spi_device *spi_dev)
+{
+	int ret = 0;
+
+	if (!gpio_is_valid(spi_dev->cs_gpio)) {
+		dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
+			spi_dev->cs_gpio);
+		return -EINVAL;
+	}
+
+	dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
+		spi_dev->cs_gpio,
+		(spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
+
+	ret = gpio_direction_output(spi_dev->cs_gpio,
+				    !(spi_dev->mode & SPI_CS_HIGH));
+
+	return ret;
+}
+
+/**
+ * stm32_spi_prepare_msg - set up the controller to transfer a single message
+ */
+static int stm32_spi_prepare_msg(struct spi_master *master,
+				 struct spi_message *msg)
+{
+	struct stm32_spi *spi = spi_master_get_devdata(master);
+	struct spi_device *spi_dev = msg->spi;
+	struct device_node *np = spi_dev->dev.of_node;
+	unsigned long flags;
+	u32 cfg2_clrb = 0, cfg2_setb = 0;
+
+	/* SPI slave device may need time between data frames */
+	spi->cur_midi = 0;
+	if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi))
+		dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
+
+	if (spi_dev->mode & SPI_CPOL)
+		cfg2_setb |= SPI_CFG2_CPOL;
+	else
+		cfg2_clrb |= SPI_CFG2_CPOL;
+
+	if (spi_dev->mode & SPI_CPHA)
+		cfg2_setb |= SPI_CFG2_CPHA;
+	else
+		cfg2_clrb |= SPI_CFG2_CPHA;
+
+	if (spi_dev->mode & SPI_LSB_FIRST)
+		cfg2_setb |= SPI_CFG2_LSBFRST;
+	else
+		cfg2_clrb |= SPI_CFG2_LSBFRST;
+
+	dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
+		spi_dev->mode & SPI_CPOL,
+		spi_dev->mode & SPI_CPHA,
+		spi_dev->mode & SPI_LSB_FIRST,
+		spi_dev->mode & SPI_CS_HIGH);
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	if (cfg2_clrb || cfg2_setb)
+		writel_relaxed(
+			(readl_relaxed(spi->base + STM32_SPI_CFG2) &
+				~cfg2_clrb) | cfg2_setb,
+			       spi->base + STM32_SPI_CFG2);
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	return 0;
+}
+
+/**
+ * stm32_spi_dma_cb - dma callback
+ *
+ * DMA callback is called when the transfer is complete or when an error
+ * occurs. If the transfer is complete, EOT flag is raised.
+ */
+static void stm32_spi_dma_cb(void *data)
+{
+	struct stm32_spi *spi = data;
+	unsigned long flags;
+	u32 sr;
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	sr = readl_relaxed(spi->base + STM32_SPI_SR);
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	if (!(sr & SPI_SR_EOT))
+		dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
+
+	/* Now wait for EOT, or SUSP or OVR in case of error */
+}
+
+/**
+ * stm32_spi_dma_config - configure dma slave channel depending on current
+ *			  transfer bits_per_word.
+ */
+static void stm32_spi_dma_config(struct stm32_spi *spi,
+				 struct dma_slave_config *dma_conf,
+				 enum dma_transfer_direction dir)
+{
+	enum dma_slave_buswidth buswidth;
+	u32 maxburst;
+
+	if (spi->cur_bpw <= 8)
+		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	else if (spi->cur_bpw <= 16)
+		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+	else
+		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+	/* Valid for DMA Half or Full Fifo threshold */
+	if (spi->cur_fthlv == 2)
+		maxburst = 1;
+	else
+		maxburst = spi->cur_fthlv;
+
+	memset(dma_conf, 0, sizeof(struct dma_slave_config));
+	dma_conf->direction = dir;
+	if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
+		dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR;
+		dma_conf->src_addr_width = buswidth;
+		dma_conf->src_maxburst = maxburst;
+
+		dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
+			buswidth, maxburst);
+	} else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
+		dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR;
+		dma_conf->dst_addr_width = buswidth;
+		dma_conf->dst_maxburst = maxburst;
+
+		dev_dbg(spi->dev, "Tx DMA config buswidth=%d, maxburst=%d\n",
+			buswidth, maxburst);
+	}
+}
+
+/**
+ * stm32_spi_transfer_one_irq - transfer a single spi_transfer using
+ *				interrupts
+ *
+ * It must returns 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
+{
+	unsigned long flags;
+	u32 ier = 0;
+
+	/* Enable the interrupts relative to the current communication mode */
+	if (spi->tx_buf && spi->rx_buf)	/* Full Duplex */
+		ier |= SPI_IER_DXPIE;
+	else if (spi->tx_buf)		/* Half-Duplex TX dir or Simplex TX */
+		ier |= SPI_IER_TXPIE;
+	else if (spi->rx_buf)		/* Half-Duplex RX dir or Simplex RX */
+		ier |= SPI_IER_RXPIE;
+
+	/* Enable the interrupts relative to the end of transfer */
+	ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE |	SPI_IER_OVRIE |	SPI_IER_MODFIE;
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	stm32_spi_enable(spi);
+
+	/* Be sure to have data in fifo before starting data transfer */
+	if (spi->tx_buf)
+		stm32_spi_write_txfifo(spi);
+
+	stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
+
+	writel_relaxed(ier, spi->base + STM32_SPI_IER);
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	return 1;
+}
+
+/**
+ * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
+ *
+ * It must returns 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
+				      struct spi_transfer *xfer)
+{
+	struct dma_slave_config tx_dma_conf, rx_dma_conf;
+	struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
+	unsigned long flags;
+	u32 ier = 0;
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	rx_dma_desc = NULL;
+	if (spi->rx_buf) {
+		stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
+		dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
+
+		/* Enable Rx DMA request */
+		stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
+
+		rx_dma_desc = dmaengine_prep_slave_sg(
+					spi->dma_rx, xfer->rx_sg.sgl,
+					xfer->rx_sg.nents,
+					rx_dma_conf.direction,
+					DMA_PREP_INTERRUPT);
+	}
+
+	tx_dma_desc = NULL;
+	if (spi->tx_buf) {
+		stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
+		dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
+
+		tx_dma_desc = dmaengine_prep_slave_sg(
+					spi->dma_tx, xfer->tx_sg.sgl,
+					xfer->tx_sg.nents,
+					tx_dma_conf.direction,
+					DMA_PREP_INTERRUPT);
+	}
+
+	if ((spi->tx_buf && !tx_dma_desc) ||
+	    (spi->rx_buf && !rx_dma_desc))
+		goto dma_desc_error;
+
+	if (rx_dma_desc) {
+		rx_dma_desc->callback = stm32_spi_dma_cb;
+		rx_dma_desc->callback_param = spi;
+
+		if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
+			dev_err(spi->dev, "Rx DMA submit failed\n");
+			goto dma_desc_error;
+		}
+		/* Enable Rx DMA channel */
+		dma_async_issue_pending(spi->dma_rx);
+	}
+
+	if (tx_dma_desc) {
+		if (spi->cur_comm == SPI_SIMPLEX_TX) {
+			tx_dma_desc->callback = stm32_spi_dma_cb;
+			tx_dma_desc->callback_param = spi;
+		}
+
+		if (dma_submit_error(dmaengine_submit(tx_dma_desc))) {
+			dev_err(spi->dev, "Tx DMA submit failed\n");
+			goto dma_submit_error;
+		}
+		/* Enable Tx DMA channel */
+		dma_async_issue_pending(spi->dma_tx);
+
+		/* Enable Tx DMA request */
+		stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN);
+	}
+
+	/* Enable the interrupts relative to the end of transfer */
+	ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE |	SPI_IER_OVRIE |	SPI_IER_MODFIE;
+	writel_relaxed(ier, spi->base + STM32_SPI_IER);
+
+	stm32_spi_enable(spi);
+
+	stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	return 1;
+
+dma_submit_error:
+	if (spi->rx_buf)
+		dmaengine_terminate_all(spi->dma_rx);
+
+dma_desc_error:
+	stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
+
+	return stm32_spi_transfer_one_irq(spi);
+}
+
+/**
+ * stm32_spi_transfer_one_setup - common setup to transfer a single
+ *				  spi_transfer either using DMA or
+ *				  interrupts.
+ */
+static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
+					struct spi_device *spi_dev,
+					struct spi_transfer *transfer)
+{
+	unsigned long flags;
+	u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0;
+	u32 mode, nb_words;
+	int ret = 0;
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	if (spi->cur_bpw != transfer->bits_per_word) {
+		u32 bpw, fthlv;
+
+		spi->cur_bpw = transfer->bits_per_word;
+		bpw = spi->cur_bpw - 1;
+
+		cfg1_clrb |= SPI_CFG1_DSIZE;
+		cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE;
+
+		spi->cur_fthlv = stm32_spi_prepare_fthlv(spi);
+		fthlv = spi->cur_fthlv - 1;
+
+		cfg1_clrb |= SPI_CFG1_FTHLV;
+		cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV;
+	}
+
+	if (spi->cur_speed != transfer->speed_hz) {
+		int mbr;
+
+		/* Update spi->cur_speed with real clock speed */
+		mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz);
+		if (mbr < 0) {
+			ret = mbr;
+			goto out;
+		}
+
+		transfer->speed_hz = spi->cur_speed;
+
+		cfg1_clrb |= SPI_CFG1_MBR;
+		cfg1_setb |= ((u32)mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR;
+	}
+
+	if (cfg1_clrb || cfg1_setb)
+		writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) &
+				~cfg1_clrb) | cfg1_setb,
+			       spi->base + STM32_SPI_CFG1);
+
+	mode = SPI_FULL_DUPLEX;
+	if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
+		/*
+		 * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
+		 * is forbidden und unvalidated by SPI subsystem so depending
+		 * on the valid buffer, we can determine the direction of the
+		 * transfer.
+		 */
+		mode = SPI_HALF_DUPLEX;
+		if (!transfer->tx_buf)
+			stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
+		else if (!transfer->rx_buf)
+			stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
+	} else {
+		if (!transfer->tx_buf)
+			mode = SPI_SIMPLEX_RX;
+		else if (!transfer->rx_buf)
+			mode = SPI_SIMPLEX_TX;
+	}
+	if (spi->cur_comm != mode) {
+		spi->cur_comm = mode;
+
+		cfg2_clrb |= SPI_CFG2_COMM;
+		cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM;
+	}
+
+	cfg2_clrb |= SPI_CFG2_MIDI;
+	if ((transfer->len > 1) && (spi->cur_midi > 0)) {
+		u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
+		u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
+			       (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT);
+
+		dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
+			sck_period_ns, midi, midi * sck_period_ns);
+
+		cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI;
+	}
+
+	if (cfg2_clrb || cfg2_setb)
+		writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) &
+				~cfg2_clrb) | cfg2_setb,
+			       spi->base + STM32_SPI_CFG2);
+
+	if (spi->cur_bpw <= 8)
+		nb_words = transfer->len;
+	else if (spi->cur_bpw <= 16)
+		nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
+	else
+		nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
+	nb_words <<= SPI_CR2_TSIZE_SHIFT;
+
+	if (nb_words <= SPI_CR2_TSIZE) {
+		writel_relaxed(nb_words, spi->base + STM32_SPI_CR2);
+	} else {
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
+	spi->cur_xferlen = transfer->len;
+
+	dev_dbg(spi->dev, "transfer communication mode set to %d\n",
+		spi->cur_comm);
+	dev_dbg(spi->dev,
+		"data frame of %d-bit, data packet of %d data frames\n",
+		spi->cur_bpw, spi->cur_fthlv);
+	dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
+	dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
+		spi->cur_xferlen, nb_words);
+	dev_dbg(spi->dev, "dma %s\n",
+		(spi->cur_usedma) ? "enabled" : "disabled");
+
+out:
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	return ret;
+}
+
+/**
+ * stm32_spi_transfer_one - transfer a single spi_transfer
+ *
+ * It must return 0 if the transfer is finished or 1 if the transfer is still
+ * in progress.
+ */
+static int stm32_spi_transfer_one(struct spi_master *master,
+				  struct spi_device *spi_dev,
+				  struct spi_transfer *transfer)
+{
+	struct stm32_spi *spi = spi_master_get_devdata(master);
+	int ret;
+
+	spi->tx_buf = transfer->tx_buf;
+	spi->rx_buf = transfer->rx_buf;
+	spi->tx_len = spi->tx_buf ? transfer->len : 0;
+	spi->rx_len = spi->rx_buf ? transfer->len : 0;
+
+	spi->cur_usedma = (master->can_dma &&
+			   stm32_spi_can_dma(master, spi_dev, transfer));
+
+	ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
+	if (ret) {
+		dev_err(spi->dev, "SPI transfer setup failed\n");
+		return ret;
+	}
+
+	if (spi->cur_usedma)
+		return stm32_spi_transfer_one_dma(spi, transfer);
+	else
+		return stm32_spi_transfer_one_irq(spi);
+}
+
+/**
+ * stm32_spi_unprepare_msg - relax the hardware
+ *
+ * Normally, if TSIZE has been configured, we should relax the hardware at the
+ * reception of the EOT interrupt. But in case of error, EOT will not be
+ * raised. So the subsystem unprepare_message call allows us to properly
+ * complete the transfer from an hardware point of view.
+ */
+static int stm32_spi_unprepare_msg(struct spi_master *master,
+				   struct spi_message *msg)
+{
+	struct stm32_spi *spi = spi_master_get_devdata(master);
+
+	stm32_spi_disable(spi);
+
+	return 0;
+}
+
+/**
+ * stm32_spi_config - Configure SPI controller as SPI master
+ */
+static int stm32_spi_config(struct stm32_spi *spi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&spi->lock, flags);
+
+	/* Ensure I2SMOD bit is kept cleared */
+	stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD);
+
+	/*
+	 * - SS input value high
+	 * - transmitter half duplex direction
+	 * - automatic communication suspend when RX-Fifo is full
+	 */
+	stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI |
+					       SPI_CR1_HDDIR |
+					       SPI_CR1_MASRX);
+
+	/*
+	 * - Set the master mode (default Motorola mode)
+	 * - Consider 1 master/n slaves configuration and
+	 *   SS input value is determined by the SSI bit
+	 * - keep control of all associated GPIOs
+	 */
+	stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER |
+						SPI_CFG2_SSM |
+						SPI_CFG2_AFCNTR);
+
+	spin_unlock_irqrestore(&spi->lock, flags);
+
+	return 0;
+}
+
+static const struct of_device_id stm32_spi_of_match[] = {
+	{ .compatible = "st,stm32h7-spi", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
+
+static int stm32_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master *master;
+	struct stm32_spi *spi;
+	struct resource *res;
+	int i, ret;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
+	if (!master) {
+		dev_err(&pdev->dev, "spi master allocation failed\n");
+		return -ENOMEM;
+	}
+	platform_set_drvdata(pdev, master);
+
+	spi = spi_master_get_devdata(master);
+	spi->dev = &pdev->dev;
+	spi->master = master;
+	spin_lock_init(&spi->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spi->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(spi->base)) {
+		ret = PTR_ERR(spi->base);
+		goto err_master_put;
+	}
+	spi->phys_addr = (dma_addr_t)res->start;
+
+	spi->irq = platform_get_irq(pdev, 0);
+	if (spi->irq <= 0) {
+		dev_err(&pdev->dev, "no irq: %d\n", spi->irq);
+		ret = -ENOENT;
+		goto err_master_put;
+	}
+	ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL,
+					stm32_spi_irq, IRQF_ONESHOT,
+					pdev->name, master);
+	if (ret) {
+		dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
+			ret);
+		goto err_master_put;
+	}
+
+	spi->clk = devm_clk_get(&pdev->dev, 0);
+	if (IS_ERR(spi->clk)) {
+		ret = PTR_ERR(spi->clk);
+		dev_err(&pdev->dev, "clk get failed: %d\n", ret);
+		goto err_master_put;
+	}
+
+	ret = clk_prepare_enable(spi->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
+		goto err_master_put;
+	}
+	spi->clk_rate = clk_get_rate(spi->clk);
+	if (!spi->clk_rate) {
+		dev_err(&pdev->dev, "clk rate = 0\n");
+		ret = -EINVAL;
+		goto err_master_put;
+	}
+
+	spi->rst = devm_reset_control_get(&pdev->dev, NULL);
+	if (!IS_ERR(spi->rst)) {
+		reset_control_assert(spi->rst);
+		udelay(2);
+		reset_control_deassert(spi->rst);
+	}
+
+	spi->fifo_size = stm32_spi_get_fifo_size(spi);
+
+	ret = stm32_spi_config(spi);
+	if (ret) {
+		dev_err(&pdev->dev, "controller configuration failed: %d\n",
+			ret);
+		goto err_clk_disable;
+	}
+
+	master->dev.of_node = pdev->dev.of_node;
+	master->auto_runtime_pm = true;
+	master->bus_num = pdev->id;
+	master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST |
+			    SPI_3WIRE | SPI_LOOP;
+	master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi);
+	master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN;
+	master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX;
+	master->setup = stm32_spi_setup;
+	master->prepare_message = stm32_spi_prepare_msg;
+	master->transfer_one = stm32_spi_transfer_one;
+	master->unprepare_message = stm32_spi_unprepare_msg;
+
+	spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
+	if (!spi->dma_tx)
+		dev_warn(&pdev->dev, "failed to request tx dma channel\n");
+	else
+		master->dma_tx = spi->dma_tx;
+
+	spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
+	if (!spi->dma_rx)
+		dev_warn(&pdev->dev, "failed to request rx dma channel\n");
+	else
+		master->dma_rx = spi->dma_rx;
+
+	if (spi->dma_tx || spi->dma_rx)
+		master->can_dma = stm32_spi_can_dma;
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	ret = devm_spi_register_master(&pdev->dev, master);
+	if (ret) {
+		dev_err(&pdev->dev, "spi master registration failed: %d\n",
+			ret);
+		goto err_dma_release;
+	}
+
+	if (!master->cs_gpios) {
+		dev_err(&pdev->dev, "no CS gpios available\n");
+		ret = -EINVAL;
+		goto err_dma_release;
+	}
+
+	for (i = 0; i < master->num_chipselect; i++) {
+		if (!gpio_is_valid(master->cs_gpios[i])) {
+			dev_err(&pdev->dev, "%i is not a valid gpio\n",
+				master->cs_gpios[i]);
+			ret = -EINVAL;
+			goto err_dma_release;
+		}
+
+		ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
+					DRIVER_NAME);
+		if (ret) {
+			dev_err(&pdev->dev, "can't get CS gpio %i\n",
+				master->cs_gpios[i]);
+			goto err_dma_release;
+		}
+	}
+
+	dev_info(&pdev->dev, "driver initialized\n");
+
+	return 0;
+
+err_dma_release:
+	if (spi->dma_tx)
+		dma_release_channel(spi->dma_tx);
+	if (spi->dma_rx)
+		dma_release_channel(spi->dma_rx);
+
+	pm_runtime_disable(&pdev->dev);
+err_clk_disable:
+	clk_disable_unprepare(spi->clk);
+err_master_put:
+	spi_master_put(master);
+
+	return ret;
+}
+
+static int stm32_spi_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct stm32_spi *spi = spi_master_get_devdata(master);
+
+	stm32_spi_disable(spi);
+
+	if (master->dma_tx)
+		dma_release_channel(master->dma_tx);
+	if (master->dma_rx)
+		dma_release_channel(master->dma_rx);
+
+	clk_disable_unprepare(spi->clk);
+
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int stm32_spi_runtime_suspend(struct device *dev)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct stm32_spi *spi = spi_master_get_devdata(master);
+
+	clk_disable_unprepare(spi->clk);
+
+	return 0;
+}
+
+static int stm32_spi_runtime_resume(struct device *dev)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct stm32_spi *spi = spi_master_get_devdata(master);
+
+	return clk_prepare_enable(spi->clk);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_spi_suspend(struct device *dev)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	int ret;
+
+	ret = spi_master_suspend(master);
+	if (ret)
+		return ret;
+
+	return pm_runtime_force_suspend(dev);
+}
+
+static int stm32_spi_resume(struct device *dev)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct stm32_spi *spi = spi_master_get_devdata(master);
+	int ret;
+
+	ret = pm_runtime_force_resume(dev);
+	if (ret)
+		return ret;
+
+	ret = spi_master_resume(master);
+	if (ret)
+		clk_disable_unprepare(spi->clk);
+
+	return ret;
+}
+#endif
+
+static const struct dev_pm_ops stm32_spi_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(stm32_spi_suspend, stm32_spi_resume)
+	SET_RUNTIME_PM_OPS(stm32_spi_runtime_suspend,
+			   stm32_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_spi_driver = {
+	.probe = stm32_spi_probe,
+	.remove = stm32_spi_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.pm = &stm32_spi_pm_ops,
+		.of_match_table = stm32_spi_of_match,
+	},
+};
+
+module_platform_driver(stm32_spi_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_DESCRIPTION("STMicroelectronics STM32 SPI Controller driver");
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 89254a5..4fcbb0a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -48,11 +48,11 @@ static void spidev_release(struct device *dev)
 {
 	struct spi_device	*spi = to_spi_device(dev);
 
-	/* spi masters may cleanup for released devices */
-	if (spi->master->cleanup)
-		spi->master->cleanup(spi);
+	/* spi controllers may cleanup for released devices */
+	if (spi->controller->cleanup)
+		spi->controller->cleanup(spi);
 
-	spi_master_put(spi->master);
+	spi_controller_put(spi->controller);
 	kfree(spi);
 }
 
@@ -71,17 +71,17 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
 static DEVICE_ATTR_RO(modalias);
 
 #define SPI_STATISTICS_ATTRS(field, file)				\
-static ssize_t spi_master_##field##_show(struct device *dev,		\
-					 struct device_attribute *attr,	\
-					 char *buf)			\
+static ssize_t spi_controller_##field##_show(struct device *dev,	\
+					     struct device_attribute *attr, \
+					     char *buf)			\
 {									\
-	struct spi_master *master = container_of(dev,			\
-						 struct spi_master, dev); \
-	return spi_statistics_##field##_show(&master->statistics, buf);	\
+	struct spi_controller *ctlr = container_of(dev,			\
+					 struct spi_controller, dev);	\
+	return spi_statistics_##field##_show(&ctlr->statistics, buf);	\
 }									\
-static struct device_attribute dev_attr_spi_master_##field = {		\
-	.attr = { .name = file, .mode = S_IRUGO },			\
-	.show = spi_master_##field##_show,				\
+static struct device_attribute dev_attr_spi_controller_##field = {	\
+	.attr = { .name = file, .mode = 0444 },				\
+	.show = spi_controller_##field##_show,				\
 };									\
 static ssize_t spi_device_##field##_show(struct device *dev,		\
 					 struct device_attribute *attr,	\
@@ -91,7 +91,7 @@ static ssize_t spi_device_##field##_show(struct device *dev,		\
 	return spi_statistics_##field##_show(&spi->statistics, buf);	\
 }									\
 static struct device_attribute dev_attr_spi_device_##field = {		\
-	.attr = { .name = file, .mode = S_IRUGO },			\
+	.attr = { .name = file, .mode = 0444 },				\
 	.show = spi_device_##field##_show,				\
 }
 
@@ -201,51 +201,51 @@ static const struct attribute_group *spi_dev_groups[] = {
 	NULL,
 };
 
-static struct attribute *spi_master_statistics_attrs[] = {
-	&dev_attr_spi_master_messages.attr,
-	&dev_attr_spi_master_transfers.attr,
-	&dev_attr_spi_master_errors.attr,
-	&dev_attr_spi_master_timedout.attr,
-	&dev_attr_spi_master_spi_sync.attr,
-	&dev_attr_spi_master_spi_sync_immediate.attr,
-	&dev_attr_spi_master_spi_async.attr,
-	&dev_attr_spi_master_bytes.attr,
-	&dev_attr_spi_master_bytes_rx.attr,
-	&dev_attr_spi_master_bytes_tx.attr,
-	&dev_attr_spi_master_transfer_bytes_histo0.attr,
-	&dev_attr_spi_master_transfer_bytes_histo1.attr,
-	&dev_attr_spi_master_transfer_bytes_histo2.attr,
-	&dev_attr_spi_master_transfer_bytes_histo3.attr,
-	&dev_attr_spi_master_transfer_bytes_histo4.attr,
-	&dev_attr_spi_master_transfer_bytes_histo5.attr,
-	&dev_attr_spi_master_transfer_bytes_histo6.attr,
-	&dev_attr_spi_master_transfer_bytes_histo7.attr,
-	&dev_attr_spi_master_transfer_bytes_histo8.attr,
-	&dev_attr_spi_master_transfer_bytes_histo9.attr,
-	&dev_attr_spi_master_transfer_bytes_histo10.attr,
-	&dev_attr_spi_master_transfer_bytes_histo11.attr,
-	&dev_attr_spi_master_transfer_bytes_histo12.attr,
-	&dev_attr_spi_master_transfer_bytes_histo13.attr,
-	&dev_attr_spi_master_transfer_bytes_histo14.attr,
-	&dev_attr_spi_master_transfer_bytes_histo15.attr,
-	&dev_attr_spi_master_transfer_bytes_histo16.attr,
-	&dev_attr_spi_master_transfers_split_maxsize.attr,
+static struct attribute *spi_controller_statistics_attrs[] = {
+	&dev_attr_spi_controller_messages.attr,
+	&dev_attr_spi_controller_transfers.attr,
+	&dev_attr_spi_controller_errors.attr,
+	&dev_attr_spi_controller_timedout.attr,
+	&dev_attr_spi_controller_spi_sync.attr,
+	&dev_attr_spi_controller_spi_sync_immediate.attr,
+	&dev_attr_spi_controller_spi_async.attr,
+	&dev_attr_spi_controller_bytes.attr,
+	&dev_attr_spi_controller_bytes_rx.attr,
+	&dev_attr_spi_controller_bytes_tx.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
+	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
+	&dev_attr_spi_controller_transfers_split_maxsize.attr,
 	NULL,
 };
 
-static const struct attribute_group spi_master_statistics_group = {
+static const struct attribute_group spi_controller_statistics_group = {
 	.name  = "statistics",
-	.attrs  = spi_master_statistics_attrs,
+	.attrs  = spi_controller_statistics_attrs,
 };
 
 static const struct attribute_group *spi_master_groups[] = {
-	&spi_master_statistics_group,
+	&spi_controller_statistics_group,
 	NULL,
 };
 
 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 				       struct spi_transfer *xfer,
-				       struct spi_master *master)
+				       struct spi_controller *ctlr)
 {
 	unsigned long flags;
 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
@@ -260,10 +260,10 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 
 	stats->bytes += xfer->len;
 	if ((xfer->tx_buf) &&
-	    (xfer->tx_buf != master->dummy_tx))
+	    (xfer->tx_buf != ctlr->dummy_tx))
 		stats->bytes_tx += xfer->len;
 	if ((xfer->rx_buf) &&
-	    (xfer->rx_buf != master->dummy_rx))
+	    (xfer->rx_buf != ctlr->dummy_rx))
 		stats->bytes_rx += xfer->len;
 
 	spin_unlock_irqrestore(&stats->lock, flags);
@@ -405,7 +405,7 @@ EXPORT_SYMBOL_GPL(__spi_register_driver);
 /*-------------------------------------------------------------------------*/
 
 /* SPI devices should normally not be created by SPI device drivers; that
- * would make them board-specific.  Similarly with SPI master drivers.
+ * would make them board-specific.  Similarly with SPI controller drivers.
  * Device registration normally goes into like arch/.../mach.../board-YYY.c
  * with other readonly (flashable) information about mainboard devices.
  */
@@ -416,17 +416,17 @@ struct boardinfo {
 };
 
 static LIST_HEAD(board_list);
-static LIST_HEAD(spi_master_list);
+static LIST_HEAD(spi_controller_list);
 
 /*
  * Used to protect add/del opertion for board_info list and
- * spi_master list, and their matching process
+ * spi_controller list, and their matching process
  */
 static DEFINE_MUTEX(board_lock);
 
 /**
  * spi_alloc_device - Allocate a new SPI device
- * @master: Controller to which device is connected
+ * @ctlr: Controller to which device is connected
  * Context: can sleep
  *
  * Allows a driver to allocate and initialize a spi_device without
@@ -435,27 +435,27 @@ static DEFINE_MUTEX(board_lock);
  * spi_add_device() on it.
  *
  * Caller is responsible to call spi_add_device() on the returned
- * spi_device structure to add it to the SPI master.  If the caller
+ * spi_device structure to add it to the SPI controller.  If the caller
  * needs to discard the spi_device without adding it, then it should
  * call spi_dev_put() on it.
  *
  * Return: a pointer to the new device, or NULL.
  */
-struct spi_device *spi_alloc_device(struct spi_master *master)
+struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
 {
 	struct spi_device	*spi;
 
-	if (!spi_master_get(master))
+	if (!spi_controller_get(ctlr))
 		return NULL;
 
 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
 	if (!spi) {
-		spi_master_put(master);
+		spi_controller_put(ctlr);
 		return NULL;
 	}
 
-	spi->master = master;
-	spi->dev.parent = &master->dev;
+	spi->master = spi->controller = ctlr;
+	spi->dev.parent = &ctlr->dev;
 	spi->dev.bus = &spi_bus_type;
 	spi->dev.release = spidev_release;
 	spi->cs_gpio = -ENOENT;
@@ -476,7 +476,7 @@ static void spi_dev_set_name(struct spi_device *spi)
 		return;
 	}
 
-	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
+	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
 		     spi->chip_select);
 }
 
@@ -485,7 +485,7 @@ static int spi_dev_check(struct device *dev, void *data)
 	struct spi_device *spi = to_spi_device(dev);
 	struct spi_device *new_spi = data;
 
-	if (spi->master == new_spi->master &&
+	if (spi->controller == new_spi->controller &&
 	    spi->chip_select == new_spi->chip_select)
 		return -EBUSY;
 	return 0;
@@ -503,15 +503,14 @@ static int spi_dev_check(struct device *dev, void *data)
 int spi_add_device(struct spi_device *spi)
 {
 	static DEFINE_MUTEX(spi_add_lock);
-	struct spi_master *master = spi->master;
-	struct device *dev = master->dev.parent;
+	struct spi_controller *ctlr = spi->controller;
+	struct device *dev = ctlr->dev.parent;
 	int status;
 
 	/* Chipselects are numbered 0..max; validate. */
-	if (spi->chip_select >= master->num_chipselect) {
-		dev_err(dev, "cs%d >= max %d\n",
-			spi->chip_select,
-			master->num_chipselect);
+	if (spi->chip_select >= ctlr->num_chipselect) {
+		dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+			ctlr->num_chipselect);
 		return -EINVAL;
 	}
 
@@ -531,8 +530,8 @@ int spi_add_device(struct spi_device *spi)
 		goto done;
 	}
 
-	if (master->cs_gpios)
-		spi->cs_gpio = master->cs_gpios[spi->chip_select];
+	if (ctlr->cs_gpios)
+		spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
 
 	/* Drivers may modify this initial i/o setup, but will
 	 * normally rely on the device being setup.  Devices
@@ -561,7 +560,7 @@ EXPORT_SYMBOL_GPL(spi_add_device);
 
 /**
  * spi_new_device - instantiate one new SPI device
- * @master: Controller to which device is connected
+ * @ctlr: Controller to which device is connected
  * @chip: Describes the SPI device
  * Context: can sleep
  *
@@ -573,7 +572,7 @@ EXPORT_SYMBOL_GPL(spi_add_device);
  *
  * Return: the new device, or NULL.
  */
-struct spi_device *spi_new_device(struct spi_master *master,
+struct spi_device *spi_new_device(struct spi_controller *ctlr,
 				  struct spi_board_info *chip)
 {
 	struct spi_device	*proxy;
@@ -586,7 +585,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
 	 * suggests syslogged diagnostics are best here (ugh).
 	 */
 
-	proxy = spi_alloc_device(master);
+	proxy = spi_alloc_device(ctlr);
 	if (!proxy)
 		return NULL;
 
@@ -604,7 +603,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
 	if (chip->properties) {
 		status = device_add_properties(&proxy->dev, chip->properties);
 		if (status) {
-			dev_err(&master->dev,
+			dev_err(&ctlr->dev,
 				"failed to add properties to '%s': %d\n",
 				chip->modalias, status);
 			goto err_dev_put;
@@ -631,7 +630,7 @@ EXPORT_SYMBOL_GPL(spi_new_device);
  * @spi: spi_device to unregister
  *
  * Start making the passed SPI device vanish. Normally this would be handled
- * by spi_unregister_master().
+ * by spi_unregister_controller().
  */
 void spi_unregister_device(struct spi_device *spi)
 {
@@ -648,17 +647,17 @@ void spi_unregister_device(struct spi_device *spi)
 }
 EXPORT_SYMBOL_GPL(spi_unregister_device);
 
-static void spi_match_master_to_boardinfo(struct spi_master *master,
-				struct spi_board_info *bi)
+static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
+					      struct spi_board_info *bi)
 {
 	struct spi_device *dev;
 
-	if (master->bus_num != bi->bus_num)
+	if (ctlr->bus_num != bi->bus_num)
 		return;
 
-	dev = spi_new_device(master, bi);
+	dev = spi_new_device(ctlr, bi);
 	if (!dev)
-		dev_err(master->dev.parent, "can't create new device for %s\n",
+		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
 			bi->modalias);
 }
 
@@ -697,7 +696,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
 		return -ENOMEM;
 
 	for (i = 0; i < n; i++, bi++, info++) {
-		struct spi_master *master;
+		struct spi_controller *ctlr;
 
 		memcpy(&bi->board_info, info, sizeof(*info));
 		if (info->properties) {
@@ -709,8 +708,9 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
 
 		mutex_lock(&board_lock);
 		list_add_tail(&bi->list, &board_list);
-		list_for_each_entry(master, &spi_master_list, list)
-			spi_match_master_to_boardinfo(master, &bi->board_info);
+		list_for_each_entry(ctlr, &spi_controller_list, list)
+			spi_match_controller_to_boardinfo(ctlr,
+							  &bi->board_info);
 		mutex_unlock(&board_lock);
 	}
 
@@ -727,16 +727,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
 	if (gpio_is_valid(spi->cs_gpio)) {
 		gpio_set_value(spi->cs_gpio, !enable);
 		/* Some SPI masters need both GPIO CS & slave_select */
-		if ((spi->master->flags & SPI_MASTER_GPIO_SS) &&
-		    spi->master->set_cs)
-			spi->master->set_cs(spi, !enable);
-	} else if (spi->master->set_cs) {
-		spi->master->set_cs(spi, !enable);
+		if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+		    spi->controller->set_cs)
+			spi->controller->set_cs(spi, !enable);
+	} else if (spi->controller->set_cs) {
+		spi->controller->set_cs(spi, !enable);
 	}
 }
 
 #ifdef CONFIG_HAS_DMA
-static int spi_map_buf(struct spi_master *master, struct device *dev,
+static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
 		       struct sg_table *sgt, void *buf, size_t len,
 		       enum dma_data_direction dir)
 {
@@ -761,7 +761,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
 		desc_len = min_t(int, max_seg_size, PAGE_SIZE);
 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
 	} else if (virt_addr_valid(buf)) {
-		desc_len = min_t(int, max_seg_size, master->max_dma_len);
+		desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
 		sgs = DIV_ROUND_UP(len, desc_len);
 	} else {
 		return -EINVAL;
@@ -811,7 +811,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
 	return 0;
 }
 
-static void spi_unmap_buf(struct spi_master *master, struct device *dev,
+static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
 			  struct sg_table *sgt, enum dma_data_direction dir)
 {
 	if (sgt->orig_nents) {
@@ -820,31 +820,31 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
 	}
 }
 
-static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
+static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 {
 	struct device *tx_dev, *rx_dev;
 	struct spi_transfer *xfer;
 	int ret;
 
-	if (!master->can_dma)
+	if (!ctlr->can_dma)
 		return 0;
 
-	if (master->dma_tx)
-		tx_dev = master->dma_tx->device->dev;
+	if (ctlr->dma_tx)
+		tx_dev = ctlr->dma_tx->device->dev;
 	else
-		tx_dev = master->dev.parent;
+		tx_dev = ctlr->dev.parent;
 
-	if (master->dma_rx)
-		rx_dev = master->dma_rx->device->dev;
+	if (ctlr->dma_rx)
+		rx_dev = ctlr->dma_rx->device->dev;
 	else
-		rx_dev = master->dev.parent;
+		rx_dev = ctlr->dev.parent;
 
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-		if (!master->can_dma(master, msg->spi, xfer))
+		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 			continue;
 
 		if (xfer->tx_buf != NULL) {
-			ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
+			ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
 					  (void *)xfer->tx_buf, xfer->len,
 					  DMA_TO_DEVICE);
 			if (ret != 0)
@@ -852,79 +852,78 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
 		}
 
 		if (xfer->rx_buf != NULL) {
-			ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
+			ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
 					  xfer->rx_buf, xfer->len,
 					  DMA_FROM_DEVICE);
 			if (ret != 0) {
-				spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
+				spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
 					      DMA_TO_DEVICE);
 				return ret;
 			}
 		}
 	}
 
-	master->cur_msg_mapped = true;
+	ctlr->cur_msg_mapped = true;
 
 	return 0;
 }
 
-static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
 {
 	struct spi_transfer *xfer;
 	struct device *tx_dev, *rx_dev;
 
-	if (!master->cur_msg_mapped || !master->can_dma)
+	if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
 		return 0;
 
-	if (master->dma_tx)
-		tx_dev = master->dma_tx->device->dev;
+	if (ctlr->dma_tx)
+		tx_dev = ctlr->dma_tx->device->dev;
 	else
-		tx_dev = master->dev.parent;
+		tx_dev = ctlr->dev.parent;
 
-	if (master->dma_rx)
-		rx_dev = master->dma_rx->device->dev;
+	if (ctlr->dma_rx)
+		rx_dev = ctlr->dma_rx->device->dev;
 	else
-		rx_dev = master->dev.parent;
+		rx_dev = ctlr->dev.parent;
 
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-		if (!master->can_dma(master, msg->spi, xfer))
+		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 			continue;
 
-		spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
-		spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+		spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+		spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
 	}
 
 	return 0;
 }
 #else /* !CONFIG_HAS_DMA */
-static inline int spi_map_buf(struct spi_master *master,
-			      struct device *dev, struct sg_table *sgt,
-			      void *buf, size_t len,
+static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+			      struct sg_table *sgt, void *buf, size_t len,
 			      enum dma_data_direction dir)
 {
 	return -EINVAL;
 }
 
-static inline void spi_unmap_buf(struct spi_master *master,
+static inline void spi_unmap_buf(struct spi_controller *ctlr,
 				 struct device *dev, struct sg_table *sgt,
 				 enum dma_data_direction dir)
 {
 }
 
-static inline int __spi_map_msg(struct spi_master *master,
+static inline int __spi_map_msg(struct spi_controller *ctlr,
 				struct spi_message *msg)
 {
 	return 0;
 }
 
-static inline int __spi_unmap_msg(struct spi_master *master,
+static inline int __spi_unmap_msg(struct spi_controller *ctlr,
 				  struct spi_message *msg)
 {
 	return 0;
 }
 #endif /* !CONFIG_HAS_DMA */
 
-static inline int spi_unmap_msg(struct spi_master *master,
+static inline int spi_unmap_msg(struct spi_controller *ctlr,
 				struct spi_message *msg)
 {
 	struct spi_transfer *xfer;
@@ -934,63 +933,63 @@ static inline int spi_unmap_msg(struct spi_master *master,
 		 * Restore the original value of tx_buf or rx_buf if they are
 		 * NULL.
 		 */
-		if (xfer->tx_buf == master->dummy_tx)
+		if (xfer->tx_buf == ctlr->dummy_tx)
 			xfer->tx_buf = NULL;
-		if (xfer->rx_buf == master->dummy_rx)
+		if (xfer->rx_buf == ctlr->dummy_rx)
 			xfer->rx_buf = NULL;
 	}
 
-	return __spi_unmap_msg(master, msg);
+	return __spi_unmap_msg(ctlr, msg);
 }
 
-static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 {
 	struct spi_transfer *xfer;
 	void *tmp;
 	unsigned int max_tx, max_rx;
 
-	if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+	if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
 		max_tx = 0;
 		max_rx = 0;
 
 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-			if ((master->flags & SPI_MASTER_MUST_TX) &&
+			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
 			    !xfer->tx_buf)
 				max_tx = max(xfer->len, max_tx);
-			if ((master->flags & SPI_MASTER_MUST_RX) &&
+			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
 			    !xfer->rx_buf)
 				max_rx = max(xfer->len, max_rx);
 		}
 
 		if (max_tx) {
-			tmp = krealloc(master->dummy_tx, max_tx,
+			tmp = krealloc(ctlr->dummy_tx, max_tx,
 				       GFP_KERNEL | GFP_DMA);
 			if (!tmp)
 				return -ENOMEM;
-			master->dummy_tx = tmp;
+			ctlr->dummy_tx = tmp;
 			memset(tmp, 0, max_tx);
 		}
 
 		if (max_rx) {
-			tmp = krealloc(master->dummy_rx, max_rx,
+			tmp = krealloc(ctlr->dummy_rx, max_rx,
 				       GFP_KERNEL | GFP_DMA);
 			if (!tmp)
 				return -ENOMEM;
-			master->dummy_rx = tmp;
+			ctlr->dummy_rx = tmp;
 		}
 
 		if (max_tx || max_rx) {
 			list_for_each_entry(xfer, &msg->transfers,
 					    transfer_list) {
 				if (!xfer->tx_buf)
-					xfer->tx_buf = master->dummy_tx;
+					xfer->tx_buf = ctlr->dummy_tx;
 				if (!xfer->rx_buf)
-					xfer->rx_buf = master->dummy_rx;
+					xfer->rx_buf = ctlr->dummy_rx;
 			}
 		}
 	}
 
-	return __spi_map_msg(master, msg);
+	return __spi_map_msg(ctlr, msg);
 }
 
 /*
@@ -1000,14 +999,14 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
  * drivers which implement a transfer_one() operation.  It provides
  * standard handling of delays and chip select management.
  */
-static int spi_transfer_one_message(struct spi_master *master,
+static int spi_transfer_one_message(struct spi_controller *ctlr,
 				    struct spi_message *msg)
 {
 	struct spi_transfer *xfer;
 	bool keep_cs = false;
 	int ret = 0;
 	unsigned long long ms = 1;
-	struct spi_statistics *statm = &master->statistics;
+	struct spi_statistics *statm = &ctlr->statistics;
 	struct spi_statistics *stats = &msg->spi->statistics;
 
 	spi_set_cs(msg->spi, true);
@@ -1018,13 +1017,13 @@ static int spi_transfer_one_message(struct spi_master *master,
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		trace_spi_transfer_start(msg, xfer);
 
-		spi_statistics_add_transfer_stats(statm, xfer, master);
-		spi_statistics_add_transfer_stats(stats, xfer, master);
+		spi_statistics_add_transfer_stats(statm, xfer, ctlr);
+		spi_statistics_add_transfer_stats(stats, xfer, ctlr);
 
 		if (xfer->tx_buf || xfer->rx_buf) {
-			reinit_completion(&master->xfer_completion);
+			reinit_completion(&ctlr->xfer_completion);
 
-			ret = master->transfer_one(master, msg->spi, xfer);
+			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
 			if (ret < 0) {
 				SPI_STATISTICS_INCREMENT_FIELD(statm,
 							       errors);
@@ -1044,7 +1043,7 @@ static int spi_transfer_one_message(struct spi_master *master,
 				if (ms > UINT_MAX)
 					ms = UINT_MAX;
 
-				ms = wait_for_completion_timeout(&master->xfer_completion,
+				ms = wait_for_completion_timeout(&ctlr->xfer_completion,
 								 msecs_to_jiffies(ms));
 			}
 
@@ -1099,33 +1098,33 @@ static int spi_transfer_one_message(struct spi_master *master,
 	if (msg->status == -EINPROGRESS)
 		msg->status = ret;
 
-	if (msg->status && master->handle_err)
-		master->handle_err(master, msg);
+	if (msg->status && ctlr->handle_err)
+		ctlr->handle_err(ctlr, msg);
 
-	spi_res_release(master, msg);
+	spi_res_release(ctlr, msg);
 
-	spi_finalize_current_message(master);
+	spi_finalize_current_message(ctlr);
 
 	return ret;
 }
 
 /**
  * spi_finalize_current_transfer - report completion of a transfer
- * @master: the master reporting completion
+ * @ctlr: the controller reporting completion
  *
  * Called by SPI drivers using the core transfer_one_message()
  * implementation to notify it that the current interrupt driven
  * transfer has finished and the next one may be scheduled.
  */
-void spi_finalize_current_transfer(struct spi_master *master)
+void spi_finalize_current_transfer(struct spi_controller *ctlr)
 {
-	complete(&master->xfer_completion);
+	complete(&ctlr->xfer_completion);
 }
 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
 
 /**
  * __spi_pump_messages - function which processes spi message queue
- * @master: master to process queue for
+ * @ctlr: controller to process queue for
  * @in_kthread: true if we are in the context of the message pump thread
  *
  * This function checks if there is any spi message in the queue that
@@ -1136,136 +1135,136 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
  * inside spi_sync(); the queue extraction handling at the top of the
  * function should deal with this safely.
  */
-static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
+static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
 {
 	unsigned long flags;
 	bool was_busy = false;
 	int ret;
 
 	/* Lock queue */
-	spin_lock_irqsave(&master->queue_lock, flags);
+	spin_lock_irqsave(&ctlr->queue_lock, flags);
 
 	/* Make sure we are not already running a message */
-	if (master->cur_msg) {
-		spin_unlock_irqrestore(&master->queue_lock, flags);
+	if (ctlr->cur_msg) {
+		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 		return;
 	}
 
 	/* If another context is idling the device then defer */
-	if (master->idling) {
-		kthread_queue_work(&master->kworker, &master->pump_messages);
-		spin_unlock_irqrestore(&master->queue_lock, flags);
+	if (ctlr->idling) {
+		kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 		return;
 	}
 
 	/* Check if the queue is idle */
-	if (list_empty(&master->queue) || !master->running) {
-		if (!master->busy) {
-			spin_unlock_irqrestore(&master->queue_lock, flags);
+	if (list_empty(&ctlr->queue) || !ctlr->running) {
+		if (!ctlr->busy) {
+			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 			return;
 		}
 
 		/* Only do teardown in the thread */
 		if (!in_kthread) {
-			kthread_queue_work(&master->kworker,
-					   &master->pump_messages);
-			spin_unlock_irqrestore(&master->queue_lock, flags);
+			kthread_queue_work(&ctlr->kworker,
+					   &ctlr->pump_messages);
+			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 			return;
 		}
 
-		master->busy = false;
-		master->idling = true;
-		spin_unlock_irqrestore(&master->queue_lock, flags);
+		ctlr->busy = false;
+		ctlr->idling = true;
+		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 
-		kfree(master->dummy_rx);
-		master->dummy_rx = NULL;
-		kfree(master->dummy_tx);
-		master->dummy_tx = NULL;
-		if (master->unprepare_transfer_hardware &&
-		    master->unprepare_transfer_hardware(master))
-			dev_err(&master->dev,
+		kfree(ctlr->dummy_rx);
+		ctlr->dummy_rx = NULL;
+		kfree(ctlr->dummy_tx);
+		ctlr->dummy_tx = NULL;
+		if (ctlr->unprepare_transfer_hardware &&
+		    ctlr->unprepare_transfer_hardware(ctlr))
+			dev_err(&ctlr->dev,
 				"failed to unprepare transfer hardware\n");
-		if (master->auto_runtime_pm) {
-			pm_runtime_mark_last_busy(master->dev.parent);
-			pm_runtime_put_autosuspend(master->dev.parent);
+		if (ctlr->auto_runtime_pm) {
+			pm_runtime_mark_last_busy(ctlr->dev.parent);
+			pm_runtime_put_autosuspend(ctlr->dev.parent);
 		}
-		trace_spi_master_idle(master);
+		trace_spi_controller_idle(ctlr);
 
-		spin_lock_irqsave(&master->queue_lock, flags);
-		master->idling = false;
-		spin_unlock_irqrestore(&master->queue_lock, flags);
+		spin_lock_irqsave(&ctlr->queue_lock, flags);
+		ctlr->idling = false;
+		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 		return;
 	}
 
 	/* Extract head of queue */
-	master->cur_msg =
-		list_first_entry(&master->queue, struct spi_message, queue);
+	ctlr->cur_msg =
+		list_first_entry(&ctlr->queue, struct spi_message, queue);
 
-	list_del_init(&master->cur_msg->queue);
-	if (master->busy)
+	list_del_init(&ctlr->cur_msg->queue);
+	if (ctlr->busy)
 		was_busy = true;
 	else
-		master->busy = true;
-	spin_unlock_irqrestore(&master->queue_lock, flags);
+		ctlr->busy = true;
+	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 
-	mutex_lock(&master->io_mutex);
+	mutex_lock(&ctlr->io_mutex);
 
-	if (!was_busy && master->auto_runtime_pm) {
-		ret = pm_runtime_get_sync(master->dev.parent);
+	if (!was_busy && ctlr->auto_runtime_pm) {
+		ret = pm_runtime_get_sync(ctlr->dev.parent);
 		if (ret < 0) {
-			dev_err(&master->dev, "Failed to power device: %d\n",
+			dev_err(&ctlr->dev, "Failed to power device: %d\n",
 				ret);
-			mutex_unlock(&master->io_mutex);
+			mutex_unlock(&ctlr->io_mutex);
 			return;
 		}
 	}
 
 	if (!was_busy)
-		trace_spi_master_busy(master);
+		trace_spi_controller_busy(ctlr);
 
-	if (!was_busy && master->prepare_transfer_hardware) {
-		ret = master->prepare_transfer_hardware(master);
+	if (!was_busy && ctlr->prepare_transfer_hardware) {
+		ret = ctlr->prepare_transfer_hardware(ctlr);
 		if (ret) {
-			dev_err(&master->dev,
+			dev_err(&ctlr->dev,
 				"failed to prepare transfer hardware\n");
 
-			if (master->auto_runtime_pm)
-				pm_runtime_put(master->dev.parent);
-			mutex_unlock(&master->io_mutex);
+			if (ctlr->auto_runtime_pm)
+				pm_runtime_put(ctlr->dev.parent);
+			mutex_unlock(&ctlr->io_mutex);
 			return;
 		}
 	}
 
-	trace_spi_message_start(master->cur_msg);
+	trace_spi_message_start(ctlr->cur_msg);
 
-	if (master->prepare_message) {
-		ret = master->prepare_message(master, master->cur_msg);
+	if (ctlr->prepare_message) {
+		ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
 		if (ret) {
-			dev_err(&master->dev,
-				"failed to prepare message: %d\n", ret);
-			master->cur_msg->status = ret;
-			spi_finalize_current_message(master);
+			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
+				ret);
+			ctlr->cur_msg->status = ret;
+			spi_finalize_current_message(ctlr);
 			goto out;
 		}
-		master->cur_msg_prepared = true;
+		ctlr->cur_msg_prepared = true;
 	}
 
-	ret = spi_map_msg(master, master->cur_msg);
+	ret = spi_map_msg(ctlr, ctlr->cur_msg);
 	if (ret) {
-		master->cur_msg->status = ret;
-		spi_finalize_current_message(master);
+		ctlr->cur_msg->status = ret;
+		spi_finalize_current_message(ctlr);
 		goto out;
 	}
 
-	ret = master->transfer_one_message(master, master->cur_msg);
+	ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
 	if (ret) {
-		dev_err(&master->dev,
+		dev_err(&ctlr->dev,
 			"failed to transfer one message from queue\n");
 		goto out;
 	}
 
 out:
-	mutex_unlock(&master->io_mutex);
+	mutex_unlock(&ctlr->io_mutex);
 
 	/* Prod the scheduler in case transfer_one() was busy waiting */
 	if (!ret)
@@ -1274,44 +1273,43 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 
 /**
  * spi_pump_messages - kthread work function which processes spi message queue
- * @work: pointer to kthread work struct contained in the master struct
+ * @work: pointer to kthread work struct contained in the controller struct
  */
 static void spi_pump_messages(struct kthread_work *work)
 {
-	struct spi_master *master =
-		container_of(work, struct spi_master, pump_messages);
+	struct spi_controller *ctlr =
+		container_of(work, struct spi_controller, pump_messages);
 
-	__spi_pump_messages(master, true);
+	__spi_pump_messages(ctlr, true);
 }
 
-static int spi_init_queue(struct spi_master *master)
+static int spi_init_queue(struct spi_controller *ctlr)
 {
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 
-	master->running = false;
-	master->busy = false;
+	ctlr->running = false;
+	ctlr->busy = false;
 
-	kthread_init_worker(&master->kworker);
-	master->kworker_task = kthread_run(kthread_worker_fn,
-					   &master->kworker, "%s",
-					   dev_name(&master->dev));
-	if (IS_ERR(master->kworker_task)) {
-		dev_err(&master->dev, "failed to create message pump task\n");
-		return PTR_ERR(master->kworker_task);
+	kthread_init_worker(&ctlr->kworker);
+	ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
+					 "%s", dev_name(&ctlr->dev));
+	if (IS_ERR(ctlr->kworker_task)) {
+		dev_err(&ctlr->dev, "failed to create message pump task\n");
+		return PTR_ERR(ctlr->kworker_task);
 	}
-	kthread_init_work(&master->pump_messages, spi_pump_messages);
+	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
 
 	/*
-	 * Master config will indicate if this controller should run the
+	 * Controller config will indicate if this controller should run the
 	 * message pump with high (realtime) priority to reduce the transfer
 	 * latency on the bus by minimising the delay between a transfer
 	 * request and the scheduling of the message pump thread. Without this
 	 * setting the message pump thread will remain at default priority.
 	 */
-	if (master->rt) {
-		dev_info(&master->dev,
+	if (ctlr->rt) {
+		dev_info(&ctlr->dev,
 			"will run message pump with realtime priority\n");
-		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
+		sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
 	}
 
 	return 0;
@@ -1320,23 +1318,23 @@ static int spi_init_queue(struct spi_master *master)
 /**
  * spi_get_next_queued_message() - called by driver to check for queued
  * messages
- * @master: the master to check for queued messages
+ * @ctlr: the controller to check for queued messages
  *
  * If there are more messages in the queue, the next message is returned from
  * this call.
  *
  * Return: the next message in the queue, else NULL if the queue is empty.
  */
-struct spi_message *spi_get_next_queued_message(struct spi_master *master)
+struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
 {
 	struct spi_message *next;
 	unsigned long flags;
 
 	/* get a pointer to the next message, if any */
-	spin_lock_irqsave(&master->queue_lock, flags);
-	next = list_first_entry_or_null(&master->queue, struct spi_message,
+	spin_lock_irqsave(&ctlr->queue_lock, flags);
+	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
 					queue);
-	spin_unlock_irqrestore(&master->queue_lock, flags);
+	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 
 	return next;
 }
@@ -1344,36 +1342,36 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
 
 /**
  * spi_finalize_current_message() - the current message is complete
- * @master: the master to return the message to
+ * @ctlr: the controller to return the message to
  *
  * Called by the driver to notify the core that the message in the front of the
  * queue is complete and can be removed from the queue.
  */
-void spi_finalize_current_message(struct spi_master *master)
+void spi_finalize_current_message(struct spi_controller *ctlr)
 {
 	struct spi_message *mesg;
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&master->queue_lock, flags);
-	mesg = master->cur_msg;
-	spin_unlock_irqrestore(&master->queue_lock, flags);
+	spin_lock_irqsave(&ctlr->queue_lock, flags);
+	mesg = ctlr->cur_msg;
+	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 
-	spi_unmap_msg(master, mesg);
+	spi_unmap_msg(ctlr, mesg);
 
-	if (master->cur_msg_prepared && master->unprepare_message) {
-		ret = master->unprepare_message(master, mesg);
+	if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
+		ret = ctlr->unprepare_message(ctlr, mesg);
 		if (ret) {
-			dev_err(&master->dev,
-				"failed to unprepare message: %d\n", ret);
+			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
+				ret);
 		}
 	}
 
-	spin_lock_irqsave(&master->queue_lock, flags);
-	master->cur_msg = NULL;
-	master->cur_msg_prepared = false;
-	kthread_queue_work(&master->kworker, &master->pump_messages);
-	spin_unlock_irqrestore(&master->queue_lock, flags);
+	spin_lock_irqsave(&ctlr->queue_lock, flags);
+	ctlr->cur_msg = NULL;
+	ctlr->cur_msg_prepared = false;
+	kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 
 	trace_spi_message_done(mesg);
 
@@ -1383,66 +1381,65 @@ void spi_finalize_current_message(struct spi_master *master)
 }
 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
 
-static int spi_start_queue(struct spi_master *master)
+static int spi_start_queue(struct spi_controller *ctlr)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&master->queue_lock, flags);
+	spin_lock_irqsave(&ctlr->queue_lock, flags);
 
-	if (master->running || master->busy) {
-		spin_unlock_irqrestore(&master->queue_lock, flags);
+	if (ctlr->running || ctlr->busy) {
+		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 		return -EBUSY;
 	}
 
-	master->running = true;
-	master->cur_msg = NULL;
-	spin_unlock_irqrestore(&master->queue_lock, flags);
+	ctlr->running = true;
+	ctlr->cur_msg = NULL;
+	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 
-	kthread_queue_work(&master->kworker, &master->pump_messages);
+	kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
 
 	return 0;
 }
 
-static int spi_stop_queue(struct spi_master *master)
+static int spi_stop_queue(struct spi_controller *ctlr)
 {
 	unsigned long flags;
 	unsigned limit = 500;
 	int ret = 0;
 
-	spin_lock_irqsave(&master->queue_lock, flags);
+	spin_lock_irqsave(&ctlr->queue_lock, flags);
 
 	/*
 	 * This is a bit lame, but is optimized for the common execution path.
-	 * A wait_queue on the master->busy could be used, but then the common
+	 * A wait_queue on the ctlr->busy could be used, but then the common
 	 * execution path (pump_messages) would be required to call wake_up or
 	 * friends on every SPI message. Do this instead.
 	 */
-	while ((!list_empty(&master->queue) || master->busy) && limit--) {
-		spin_unlock_irqrestore(&master->queue_lock, flags);
+	while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
+		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 		usleep_range(10000, 11000);
-		spin_lock_irqsave(&master->queue_lock, flags);
+		spin_lock_irqsave(&ctlr->queue_lock, flags);
 	}
 
-	if (!list_empty(&master->queue) || master->busy)
+	if (!list_empty(&ctlr->queue) || ctlr->busy)
 		ret = -EBUSY;
 	else
-		master->running = false;
+		ctlr->running = false;
 
-	spin_unlock_irqrestore(&master->queue_lock, flags);
+	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 
 	if (ret) {
-		dev_warn(&master->dev,
-			 "could not stop message queue\n");
+		dev_warn(&ctlr->dev, "could not stop message queue\n");
 		return ret;
 	}
 	return ret;
 }
 
-static int spi_destroy_queue(struct spi_master *master)
+static int spi_destroy_queue(struct spi_controller *ctlr)
 {
 	int ret;
 
-	ret = spi_stop_queue(master);
+	ret = spi_stop_queue(ctlr);
 
 	/*
 	 * kthread_flush_worker will block until all work is done.
@@ -1451,12 +1448,12 @@ static int spi_destroy_queue(struct spi_master *master)
 	 * return anyway.
 	 */
 	if (ret) {
-		dev_err(&master->dev, "problem destroying queue\n");
+		dev_err(&ctlr->dev, "problem destroying queue\n");
 		return ret;
 	}
 
-	kthread_flush_worker(&master->kworker);
-	kthread_stop(master->kworker_task);
+	kthread_flush_worker(&ctlr->kworker);
+	kthread_stop(ctlr->kworker_task);
 
 	return 0;
 }
@@ -1465,23 +1462,23 @@ static int __spi_queued_transfer(struct spi_device *spi,
 				 struct spi_message *msg,
 				 bool need_pump)
 {
-	struct spi_master *master = spi->master;
+	struct spi_controller *ctlr = spi->controller;
 	unsigned long flags;
 
-	spin_lock_irqsave(&master->queue_lock, flags);
+	spin_lock_irqsave(&ctlr->queue_lock, flags);
 
-	if (!master->running) {
-		spin_unlock_irqrestore(&master->queue_lock, flags);
+	if (!ctlr->running) {
+		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 		return -ESHUTDOWN;
 	}
 	msg->actual_length = 0;
 	msg->status = -EINPROGRESS;
 
-	list_add_tail(&msg->queue, &master->queue);
-	if (!master->busy && need_pump)
-		kthread_queue_work(&master->kworker, &master->pump_messages);
+	list_add_tail(&msg->queue, &ctlr->queue);
+	if (!ctlr->busy && need_pump)
+		kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
 
-	spin_unlock_irqrestore(&master->queue_lock, flags);
+	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 	return 0;
 }
 
@@ -1497,31 +1494,31 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
 	return __spi_queued_transfer(spi, msg, true);
 }
 
-static int spi_master_initialize_queue(struct spi_master *master)
+static int spi_controller_initialize_queue(struct spi_controller *ctlr)
 {
 	int ret;
 
-	master->transfer = spi_queued_transfer;
-	if (!master->transfer_one_message)
-		master->transfer_one_message = spi_transfer_one_message;
+	ctlr->transfer = spi_queued_transfer;
+	if (!ctlr->transfer_one_message)
+		ctlr->transfer_one_message = spi_transfer_one_message;
 
 	/* Initialize and start queue */
-	ret = spi_init_queue(master);
+	ret = spi_init_queue(ctlr);
 	if (ret) {
-		dev_err(&master->dev, "problem initializing queue\n");
+		dev_err(&ctlr->dev, "problem initializing queue\n");
 		goto err_init_queue;
 	}
-	master->queued = true;
-	ret = spi_start_queue(master);
+	ctlr->queued = true;
+	ret = spi_start_queue(ctlr);
 	if (ret) {
-		dev_err(&master->dev, "problem starting queue\n");
+		dev_err(&ctlr->dev, "problem starting queue\n");
 		goto err_start_queue;
 	}
 
 	return 0;
 
 err_start_queue:
-	spi_destroy_queue(master);
+	spi_destroy_queue(ctlr);
 err_init_queue:
 	return ret;
 }
@@ -1529,21 +1526,12 @@ static int spi_master_initialize_queue(struct spi_master *master)
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_OF)
-static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
+static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
 			   struct device_node *nc)
 {
 	u32 value;
 	int rc;
 
-	/* Device address */
-	rc = of_property_read_u32(nc, "reg", &value);
-	if (rc) {
-		dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
-			nc->full_name, rc);
-		return rc;
-	}
-	spi->chip_select = value;
-
 	/* Mode (clock phase/polarity/etc.) */
 	if (of_find_property(nc, "spi-cpha", NULL))
 		spi->mode |= SPI_CPHA;
@@ -1568,7 +1556,7 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
 			spi->mode |= SPI_TX_QUAD;
 			break;
 		default:
-			dev_warn(&master->dev,
+			dev_warn(&ctlr->dev,
 				"spi-tx-bus-width %d not supported\n",
 				value);
 			break;
@@ -1586,17 +1574,36 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
 			spi->mode |= SPI_RX_QUAD;
 			break;
 		default:
-			dev_warn(&master->dev,
+			dev_warn(&ctlr->dev,
 				"spi-rx-bus-width %d not supported\n",
 				value);
 			break;
 		}
 	}
 
+	if (spi_controller_is_slave(ctlr)) {
+		if (strcmp(nc->name, "slave")) {
+			dev_err(&ctlr->dev, "%s is not called 'slave'\n",
+				nc->full_name);
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	/* Device address */
+	rc = of_property_read_u32(nc, "reg", &value);
+	if (rc) {
+		dev_err(&ctlr->dev, "%s has no valid 'reg' property (%d)\n",
+			nc->full_name, rc);
+		return rc;
+	}
+	spi->chip_select = value;
+
 	/* Device speed */
 	rc = of_property_read_u32(nc, "spi-max-frequency", &value);
 	if (rc) {
-		dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
+		dev_err(&ctlr->dev,
+			"%s has no valid 'spi-max-frequency' property (%d)\n",
 			nc->full_name, rc);
 		return rc;
 	}
@@ -1606,15 +1613,15 @@ static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi,
 }
 
 static struct spi_device *
-of_register_spi_device(struct spi_master *master, struct device_node *nc)
+of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
 {
 	struct spi_device *spi;
 	int rc;
 
 	/* Alloc an spi_device */
-	spi = spi_alloc_device(master);
+	spi = spi_alloc_device(ctlr);
 	if (!spi) {
-		dev_err(&master->dev, "spi_device alloc error for %s\n",
+		dev_err(&ctlr->dev, "spi_device alloc error for %s\n",
 			nc->full_name);
 		rc = -ENOMEM;
 		goto err_out;
@@ -1624,12 +1631,12 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
 	rc = of_modalias_node(nc, spi->modalias,
 				sizeof(spi->modalias));
 	if (rc < 0) {
-		dev_err(&master->dev, "cannot find modalias for %s\n",
+		dev_err(&ctlr->dev, "cannot find modalias for %s\n",
 			nc->full_name);
 		goto err_out;
 	}
 
-	rc = of_spi_parse_dt(master, spi, nc);
+	rc = of_spi_parse_dt(ctlr, spi, nc);
 	if (rc)
 		goto err_out;
 
@@ -1640,7 +1647,7 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
 	/* Register the new device */
 	rc = spi_add_device(spi);
 	if (rc) {
-		dev_err(&master->dev, "spi_device register error %s\n",
+		dev_err(&ctlr->dev, "spi_device register error %s\n",
 			nc->full_name);
 		goto err_of_node_put;
 	}
@@ -1656,39 +1663,40 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
 
 /**
  * of_register_spi_devices() - Register child devices onto the SPI bus
- * @master:	Pointer to spi_master device
+ * @ctlr:	Pointer to spi_controller device
  *
- * Registers an spi_device for each child node of master node which has a 'reg'
- * property.
+ * Registers an spi_device for each child node of controller node which
+ * represents a valid SPI slave.
  */
-static void of_register_spi_devices(struct spi_master *master)
+static void of_register_spi_devices(struct spi_controller *ctlr)
 {
 	struct spi_device *spi;
 	struct device_node *nc;
 
-	if (!master->dev.of_node)
+	if (!ctlr->dev.of_node)
 		return;
 
-	for_each_available_child_of_node(master->dev.of_node, nc) {
+	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
 			continue;
-		spi = of_register_spi_device(master, nc);
+		spi = of_register_spi_device(ctlr, nc);
 		if (IS_ERR(spi)) {
-			dev_warn(&master->dev, "Failed to create SPI device for %s\n",
-				nc->full_name);
+			dev_warn(&ctlr->dev,
+				 "Failed to create SPI device for %s\n",
+				 nc->full_name);
 			of_node_clear_flag(nc, OF_POPULATED);
 		}
 	}
 }
 #else
-static void of_register_spi_devices(struct spi_master *master) { }
+static void of_register_spi_devices(struct spi_controller *ctlr) { }
 #endif
 
 #ifdef CONFIG_ACPI
 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
 {
 	struct spi_device *spi = data;
-	struct spi_master *master = spi->master;
+	struct spi_controller *ctlr = spi->controller;
 
 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
 		struct acpi_resource_spi_serialbus *sb;
@@ -1702,8 +1710,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
 			 * 0 .. max - 1 so we need to ask the driver to
 			 * translate between the two schemes.
 			 */
-			if (master->fw_translate_cs) {
-				int cs = master->fw_translate_cs(master,
+			if (ctlr->fw_translate_cs) {
+				int cs = ctlr->fw_translate_cs(ctlr,
 						sb->device_selection);
 				if (cs < 0)
 					return cs;
@@ -1732,7 +1740,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
 	return 1;
 }
 
-static acpi_status acpi_register_spi_device(struct spi_master *master,
+static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
 					    struct acpi_device *adev)
 {
 	struct list_head resource_list;
@@ -1743,9 +1751,9 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
 	    acpi_device_enumerated(adev))
 		return AE_OK;
 
-	spi = spi_alloc_device(master);
+	spi = spi_alloc_device(ctlr);
 	if (!spi) {
-		dev_err(&master->dev, "failed to allocate SPI device for %s\n",
+		dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
 			dev_name(&adev->dev));
 		return AE_NO_MEMORY;
 	}
@@ -1774,7 +1782,7 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
 	adev->power.flags.ignore_parent = true;
 	if (spi_add_device(spi)) {
 		adev->power.flags.ignore_parent = false;
-		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
+		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
 			dev_name(&adev->dev));
 		spi_dev_put(spi);
 	}
@@ -1785,104 +1793,211 @@ static acpi_status acpi_register_spi_device(struct spi_master *master,
 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
 				       void *data, void **return_value)
 {
-	struct spi_master *master = data;
+	struct spi_controller *ctlr = data;
 	struct acpi_device *adev;
 
 	if (acpi_bus_get_device(handle, &adev))
 		return AE_OK;
 
-	return acpi_register_spi_device(master, adev);
+	return acpi_register_spi_device(ctlr, adev);
 }
 
-static void acpi_register_spi_devices(struct spi_master *master)
+static void acpi_register_spi_devices(struct spi_controller *ctlr)
 {
 	acpi_status status;
 	acpi_handle handle;
 
-	handle = ACPI_HANDLE(master->dev.parent);
+	handle = ACPI_HANDLE(ctlr->dev.parent);
 	if (!handle)
 		return;
 
 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
-				     acpi_spi_add_device, NULL,
-				     master, NULL);
+				     acpi_spi_add_device, NULL, ctlr, NULL);
 	if (ACPI_FAILURE(status))
-		dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
+		dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
 }
 #else
-static inline void acpi_register_spi_devices(struct spi_master *master) {}
+static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
 #endif /* CONFIG_ACPI */
 
-static void spi_master_release(struct device *dev)
+static void spi_controller_release(struct device *dev)
 {
-	struct spi_master *master;
+	struct spi_controller *ctlr;
 
-	master = container_of(dev, struct spi_master, dev);
-	kfree(master);
+	ctlr = container_of(dev, struct spi_controller, dev);
+	kfree(ctlr);
 }
 
 static struct class spi_master_class = {
 	.name		= "spi_master",
 	.owner		= THIS_MODULE,
-	.dev_release	= spi_master_release,
+	.dev_release	= spi_controller_release,
 	.dev_groups	= spi_master_groups,
 };
 
+#ifdef CONFIG_SPI_SLAVE
+/**
+ * spi_slave_abort - abort the ongoing transfer request on an SPI slave
+ *		     controller
+ * @spi: device used for the current transfer
+ */
+int spi_slave_abort(struct spi_device *spi)
+{
+	struct spi_controller *ctlr = spi->controller;
+
+	if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
+		return ctlr->slave_abort(ctlr);
+
+	return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(spi_slave_abort);
+
+static int match_true(struct device *dev, void *data)
+{
+	return 1;
+}
+
+static ssize_t spi_slave_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
+						   dev);
+	struct device *child;
+
+	child = device_find_child(&ctlr->dev, NULL, match_true);
+	return sprintf(buf, "%s\n",
+		       child ? to_spi_device(child)->modalias : NULL);
+}
+
+static ssize_t spi_slave_store(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count)
+{
+	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
+						   dev);
+	struct spi_device *spi;
+	struct device *child;
+	char name[32];
+	int rc;
+
+	rc = sscanf(buf, "%31s", name);
+	if (rc != 1 || !name[0])
+		return -EINVAL;
+
+	child = device_find_child(&ctlr->dev, NULL, match_true);
+	if (child) {
+		/* Remove registered slave */
+		device_unregister(child);
+		put_device(child);
+	}
+
+	if (strcmp(name, "(null)")) {
+		/* Register new slave */
+		spi = spi_alloc_device(ctlr);
+		if (!spi)
+			return -ENOMEM;
+
+		strlcpy(spi->modalias, name, sizeof(spi->modalias));
+
+		rc = spi_add_device(spi);
+		if (rc) {
+			spi_dev_put(spi);
+			return rc;
+		}
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
+
+static struct attribute *spi_slave_attrs[] = {
+	&dev_attr_slave.attr,
+	NULL,
+};
+
+static const struct attribute_group spi_slave_group = {
+	.attrs = spi_slave_attrs,
+};
+
+static const struct attribute_group *spi_slave_groups[] = {
+	&spi_controller_statistics_group,
+	&spi_slave_group,
+	NULL,
+};
+
+static struct class spi_slave_class = {
+	.name		= "spi_slave",
+	.owner		= THIS_MODULE,
+	.dev_release	= spi_controller_release,
+	.dev_groups	= spi_slave_groups,
+};
+#else
+extern struct class spi_slave_class;	/* dummy */
+#endif
 
 /**
- * spi_alloc_master - allocate SPI master controller
+ * __spi_alloc_controller - allocate an SPI master or slave controller
  * @dev: the controller, possibly using the platform_bus
  * @size: how much zeroed driver-private data to allocate; the pointer to this
  *	memory is in the driver_data field of the returned device,
- *	accessible with spi_master_get_devdata().
+ *	accessible with spi_controller_get_devdata().
+ * @slave: flag indicating whether to allocate an SPI master (false) or SPI
+ *	slave (true) controller
  * Context: can sleep
  *
- * This call is used only by SPI master controller drivers, which are the
+ * This call is used only by SPI controller drivers, which are the
  * only ones directly touching chip registers.  It's how they allocate
- * an spi_master structure, prior to calling spi_register_master().
+ * an spi_controller structure, prior to calling spi_register_controller().
  *
  * This must be called from context that can sleep.
  *
- * The caller is responsible for assigning the bus number and initializing
- * the master's methods before calling spi_register_master(); and (after errors
- * adding the device) calling spi_master_put() to prevent a memory leak.
+ * The caller is responsible for assigning the bus number and initializing the
+ * controller's methods before calling spi_register_controller(); and (after
+ * errors adding the device) calling spi_controller_put() to prevent a memory
+ * leak.
  *
- * Return: the SPI master structure on success, else NULL.
+ * Return: the SPI controller structure on success, else NULL.
  */
-struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+struct spi_controller *__spi_alloc_controller(struct device *dev,
+					      unsigned int size, bool slave)
 {
-	struct spi_master	*master;
+	struct spi_controller	*ctlr;
 
 	if (!dev)
 		return NULL;
 
-	master = kzalloc(size + sizeof(*master), GFP_KERNEL);
-	if (!master)
+	ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
+	if (!ctlr)
 		return NULL;
 
-	device_initialize(&master->dev);
-	master->bus_num = -1;
-	master->num_chipselect = 1;
-	master->dev.class = &spi_master_class;
-	master->dev.parent = dev;
-	pm_suspend_ignore_children(&master->dev, true);
-	spi_master_set_devdata(master, &master[1]);
+	device_initialize(&ctlr->dev);
+	ctlr->bus_num = -1;
+	ctlr->num_chipselect = 1;
+	ctlr->slave = slave;
+	if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
+		ctlr->dev.class = &spi_slave_class;
+	else
+		ctlr->dev.class = &spi_master_class;
+	ctlr->dev.parent = dev;
+	pm_suspend_ignore_children(&ctlr->dev, true);
+	spi_controller_set_devdata(ctlr, &ctlr[1]);
 
-	return master;
+	return ctlr;
 }
-EXPORT_SYMBOL_GPL(spi_alloc_master);
+EXPORT_SYMBOL_GPL(__spi_alloc_controller);
 
 #ifdef CONFIG_OF
-static int of_spi_register_master(struct spi_master *master)
+static int of_spi_register_master(struct spi_controller *ctlr)
 {
 	int nb, i, *cs;
-	struct device_node *np = master->dev.of_node;
+	struct device_node *np = ctlr->dev.of_node;
 
 	if (!np)
 		return 0;
 
 	nb = of_gpio_named_count(np, "cs-gpios");
-	master->num_chipselect = max_t(int, nb, master->num_chipselect);
+	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
 
 	/* Return error only for an incorrectly formed cs-gpios property */
 	if (nb == 0 || nb == -ENOENT)
@@ -1890,15 +2005,14 @@ static int of_spi_register_master(struct spi_master *master)
 	else if (nb < 0)
 		return nb;
 
-	cs = devm_kzalloc(&master->dev,
-			  sizeof(int) * master->num_chipselect,
+	cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect,
 			  GFP_KERNEL);
-	master->cs_gpios = cs;
+	ctlr->cs_gpios = cs;
 
-	if (!master->cs_gpios)
+	if (!ctlr->cs_gpios)
 		return -ENOMEM;
 
-	for (i = 0; i < master->num_chipselect; i++)
+	for (i = 0; i < ctlr->num_chipselect; i++)
 		cs[i] = -ENOENT;
 
 	for (i = 0; i < nb; i++)
@@ -1907,20 +2021,21 @@ static int of_spi_register_master(struct spi_master *master)
 	return 0;
 }
 #else
-static int of_spi_register_master(struct spi_master *master)
+static int of_spi_register_master(struct spi_controller *ctlr)
 {
 	return 0;
 }
 #endif
 
 /**
- * spi_register_master - register SPI master controller
- * @master: initialized master, originally from spi_alloc_master()
+ * spi_register_controller - register SPI master or slave controller
+ * @ctlr: initialized master, originally from spi_alloc_master() or
+ *	spi_alloc_slave()
  * Context: can sleep
  *
- * SPI master controllers connect to their drivers using some non-SPI bus,
+ * SPI controllers connect to their drivers using some non-SPI bus,
  * such as the platform bus.  The final stage of probe() in that code
- * includes calling spi_register_master() to hook up to this SPI bus glue.
+ * includes calling spi_register_controller() to hook up to this SPI bus glue.
  *
  * SPI controllers use board specific (often SOC specific) bus numbers,
  * and board-specific addressing for SPI devices combines those numbers
@@ -1929,16 +2044,16 @@ static int of_spi_register_master(struct spi_master *master)
  * chip is at which address.
  *
  * This must be called from context that can sleep.  It returns zero on
- * success, else a negative error code (dropping the master's refcount).
+ * success, else a negative error code (dropping the controller's refcount).
  * After a successful return, the caller is responsible for calling
- * spi_unregister_master().
+ * spi_unregister_controller().
  *
  * Return: zero on success, else a negative error code.
  */
-int spi_register_master(struct spi_master *master)
+int spi_register_controller(struct spi_controller *ctlr)
 {
 	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
-	struct device		*dev = master->dev.parent;
+	struct device		*dev = ctlr->dev.parent;
 	struct boardinfo	*bi;
 	int			status = -ENODEV;
 	int			dynamic = 0;
@@ -1946,103 +2061,109 @@ int spi_register_master(struct spi_master *master)
 	if (!dev)
 		return -ENODEV;
 
-	status = of_spi_register_master(master);
-	if (status)
-		return status;
+	if (!spi_controller_is_slave(ctlr)) {
+		status = of_spi_register_master(ctlr);
+		if (status)
+			return status;
+	}
 
 	/* even if it's just one always-selected device, there must
 	 * be at least one chipselect
 	 */
-	if (master->num_chipselect == 0)
+	if (ctlr->num_chipselect == 0)
 		return -EINVAL;
 
-	if ((master->bus_num < 0) && master->dev.of_node)
-		master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
+	if ((ctlr->bus_num < 0) && ctlr->dev.of_node)
+		ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
 
 	/* convention:  dynamically assigned bus IDs count down from the max */
-	if (master->bus_num < 0) {
+	if (ctlr->bus_num < 0) {
 		/* FIXME switch to an IDR based scheme, something like
 		 * I2C now uses, so we can't run out of "dynamic" IDs
 		 */
-		master->bus_num = atomic_dec_return(&dyn_bus_id);
+		ctlr->bus_num = atomic_dec_return(&dyn_bus_id);
 		dynamic = 1;
 	}
 
-	INIT_LIST_HEAD(&master->queue);
-	spin_lock_init(&master->queue_lock);
-	spin_lock_init(&master->bus_lock_spinlock);
-	mutex_init(&master->bus_lock_mutex);
-	mutex_init(&master->io_mutex);
-	master->bus_lock_flag = 0;
-	init_completion(&master->xfer_completion);
-	if (!master->max_dma_len)
-		master->max_dma_len = INT_MAX;
+	INIT_LIST_HEAD(&ctlr->queue);
+	spin_lock_init(&ctlr->queue_lock);
+	spin_lock_init(&ctlr->bus_lock_spinlock);
+	mutex_init(&ctlr->bus_lock_mutex);
+	mutex_init(&ctlr->io_mutex);
+	ctlr->bus_lock_flag = 0;
+	init_completion(&ctlr->xfer_completion);
+	if (!ctlr->max_dma_len)
+		ctlr->max_dma_len = INT_MAX;
 
 	/* register the device, then userspace will see it.
 	 * registration fails if the bus ID is in use.
 	 */
-	dev_set_name(&master->dev, "spi%u", master->bus_num);
-	status = device_add(&master->dev);
+	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
+	status = device_add(&ctlr->dev);
 	if (status < 0)
 		goto done;
-	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
-			dynamic ? " (dynamic)" : "");
+	dev_dbg(dev, "registered %s %s%s\n",
+			spi_controller_is_slave(ctlr) ? "slave" : "master",
+			dev_name(&ctlr->dev), dynamic ? " (dynamic)" : "");
 
 	/* If we're using a queued driver, start the queue */
-	if (master->transfer)
-		dev_info(dev, "master is unqueued, this is deprecated\n");
+	if (ctlr->transfer)
+		dev_info(dev, "controller is unqueued, this is deprecated\n");
 	else {
-		status = spi_master_initialize_queue(master);
+		status = spi_controller_initialize_queue(ctlr);
 		if (status) {
-			device_del(&master->dev);
+			device_del(&ctlr->dev);
 			goto done;
 		}
 	}
 	/* add statistics */
-	spin_lock_init(&master->statistics.lock);
+	spin_lock_init(&ctlr->statistics.lock);
 
 	mutex_lock(&board_lock);
-	list_add_tail(&master->list, &spi_master_list);
+	list_add_tail(&ctlr->list, &spi_controller_list);
 	list_for_each_entry(bi, &board_list, list)
-		spi_match_master_to_boardinfo(master, &bi->board_info);
+		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
 	mutex_unlock(&board_lock);
 
 	/* Register devices from the device tree and ACPI */
-	of_register_spi_devices(master);
-	acpi_register_spi_devices(master);
+	of_register_spi_devices(ctlr);
+	acpi_register_spi_devices(ctlr);
 done:
 	return status;
 }
-EXPORT_SYMBOL_GPL(spi_register_master);
+EXPORT_SYMBOL_GPL(spi_register_controller);
 
 static void devm_spi_unregister(struct device *dev, void *res)
 {
-	spi_unregister_master(*(struct spi_master **)res);
+	spi_unregister_controller(*(struct spi_controller **)res);
 }
 
 /**
- * dev_spi_register_master - register managed SPI master controller
- * @dev:    device managing SPI master
- * @master: initialized master, originally from spi_alloc_master()
+ * devm_spi_register_controller - register managed SPI master or slave
+ *	controller
+ * @dev:    device managing SPI controller
+ * @ctlr: initialized controller, originally from spi_alloc_master() or
+ *	spi_alloc_slave()
  * Context: can sleep
  *
- * Register a SPI device as with spi_register_master() which will
+ * Register a SPI device as with spi_register_controller() which will
  * automatically be unregister
  *
  * Return: zero on success, else a negative error code.
  */
-int devm_spi_register_master(struct device *dev, struct spi_master *master)
+int devm_spi_register_controller(struct device *dev,
+				 struct spi_controller *ctlr)
 {
-	struct spi_master **ptr;
+	struct spi_controller **ptr;
 	int ret;
 
 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
 	if (!ptr)
 		return -ENOMEM;
 
-	ret = spi_register_master(master);
+	ret = spi_register_controller(ctlr);
 	if (!ret) {
-		*ptr = master;
+		*ptr = ctlr;
 		devres_add(dev, ptr);
 	} else {
 		devres_free(ptr);
@@ -2050,7 +2171,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master)
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(devm_spi_register_master);
+EXPORT_SYMBOL_GPL(devm_spi_register_controller);
 
 static int __unregister(struct device *dev, void *null)
 {
@@ -2059,71 +2180,71 @@ static int __unregister(struct device *dev, void *null)
 }
 
 /**
- * spi_unregister_master - unregister SPI master controller
- * @master: the master being unregistered
+ * spi_unregister_controller - unregister SPI master or slave controller
+ * @ctlr: the controller being unregistered
  * Context: can sleep
  *
- * This call is used only by SPI master controller drivers, which are the
+ * This call is used only by SPI controller drivers, which are the
  * only ones directly touching chip registers.
  *
  * This must be called from context that can sleep.
  */
-void spi_unregister_master(struct spi_master *master)
+void spi_unregister_controller(struct spi_controller *ctlr)
 {
 	int dummy;
 
-	if (master->queued) {
-		if (spi_destroy_queue(master))
-			dev_err(&master->dev, "queue remove failed\n");
+	if (ctlr->queued) {
+		if (spi_destroy_queue(ctlr))
+			dev_err(&ctlr->dev, "queue remove failed\n");
 	}
 
 	mutex_lock(&board_lock);
-	list_del(&master->list);
+	list_del(&ctlr->list);
 	mutex_unlock(&board_lock);
 
-	dummy = device_for_each_child(&master->dev, NULL, __unregister);
-	device_unregister(&master->dev);
+	dummy = device_for_each_child(&ctlr->dev, NULL, __unregister);
+	device_unregister(&ctlr->dev);
 }
-EXPORT_SYMBOL_GPL(spi_unregister_master);
+EXPORT_SYMBOL_GPL(spi_unregister_controller);
 
-int spi_master_suspend(struct spi_master *master)
+int spi_controller_suspend(struct spi_controller *ctlr)
 {
 	int ret;
 
-	/* Basically no-ops for non-queued masters */
-	if (!master->queued)
+	/* Basically no-ops for non-queued controllers */
+	if (!ctlr->queued)
 		return 0;
 
-	ret = spi_stop_queue(master);
+	ret = spi_stop_queue(ctlr);
 	if (ret)
-		dev_err(&master->dev, "queue stop failed\n");
+		dev_err(&ctlr->dev, "queue stop failed\n");
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(spi_master_suspend);
+EXPORT_SYMBOL_GPL(spi_controller_suspend);
 
-int spi_master_resume(struct spi_master *master)
+int spi_controller_resume(struct spi_controller *ctlr)
 {
 	int ret;
 
-	if (!master->queued)
+	if (!ctlr->queued)
 		return 0;
 
-	ret = spi_start_queue(master);
+	ret = spi_start_queue(ctlr);
 	if (ret)
-		dev_err(&master->dev, "queue restart failed\n");
+		dev_err(&ctlr->dev, "queue restart failed\n");
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(spi_master_resume);
+EXPORT_SYMBOL_GPL(spi_controller_resume);
 
-static int __spi_master_match(struct device *dev, const void *data)
+static int __spi_controller_match(struct device *dev, const void *data)
 {
-	struct spi_master *m;
+	struct spi_controller *ctlr;
 	const u16 *bus_num = data;
 
-	m = container_of(dev, struct spi_master, dev);
-	return m->bus_num == *bus_num;
+	ctlr = container_of(dev, struct spi_controller, dev);
+	return ctlr->bus_num == *bus_num;
 }
 
 /**
@@ -2133,22 +2254,22 @@ static int __spi_master_match(struct device *dev, const void *data)
  *
  * This call may be used with devices that are registered after
  * arch init time.  It returns a refcounted pointer to the relevant
- * spi_master (which the caller must release), or NULL if there is
+ * spi_controller (which the caller must release), or NULL if there is
  * no such master registered.
  *
  * Return: the SPI master structure on success, else NULL.
  */
-struct spi_master *spi_busnum_to_master(u16 bus_num)
+struct spi_controller *spi_busnum_to_master(u16 bus_num)
 {
 	struct device		*dev;
-	struct spi_master	*master = NULL;
+	struct spi_controller	*ctlr = NULL;
 
 	dev = class_find_device(&spi_master_class, NULL, &bus_num,
-				__spi_master_match);
+				__spi_controller_match);
 	if (dev)
-		master = container_of(dev, struct spi_master, dev);
+		ctlr = container_of(dev, struct spi_controller, dev);
 	/* reference got in class_find_device */
-	return master;
+	return ctlr;
 }
 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
 
@@ -2168,7 +2289,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
  * Return: the pointer to the allocated data
  *
  * This may get enhanced in the future to allocate from a memory pool
- * of the @spi_device or @spi_master to avoid repeated allocations.
+ * of the @spi_device or @spi_controller to avoid repeated allocations.
  */
 void *spi_res_alloc(struct spi_device *spi,
 		    spi_res_release_t release,
@@ -2220,11 +2341,10 @@ EXPORT_SYMBOL_GPL(spi_res_add);
 
 /**
  * spi_res_release - release all spi resources for this message
- * @master:  the @spi_master
+ * @ctlr:  the @spi_controller
  * @message: the @spi_message
  */
-void spi_res_release(struct spi_master *master,
-		     struct spi_message *message)
+void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
 {
 	struct spi_res *res;
 
@@ -2233,7 +2353,7 @@ void spi_res_release(struct spi_master *master,
 				      struct spi_res, entry);
 
 		if (res->release)
-			res->release(master, message, res->data);
+			res->release(ctlr, message, res->data);
 
 		list_del(&res->entry);
 
@@ -2246,7 +2366,7 @@ EXPORT_SYMBOL_GPL(spi_res_release);
 
 /* Core methods for spi_message alterations */
 
-static void __spi_replace_transfers_release(struct spi_master *master,
+static void __spi_replace_transfers_release(struct spi_controller *ctlr,
 					    struct spi_message *msg,
 					    void *res)
 {
@@ -2255,7 +2375,7 @@ static void __spi_replace_transfers_release(struct spi_master *master,
 
 	/* call extra callback if requested */
 	if (rxfer->release)
-		rxfer->release(master, msg, res);
+		rxfer->release(ctlr, msg, res);
 
 	/* insert replaced transfers back into the message */
 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
@@ -2375,7 +2495,7 @@ struct spi_replaced_transfers *spi_replace_transfers(
 }
 EXPORT_SYMBOL_GPL(spi_replace_transfers);
 
-static int __spi_split_transfer_maxsize(struct spi_master *master,
+static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
 					struct spi_message *msg,
 					struct spi_transfer **xferp,
 					size_t maxsize,
@@ -2437,7 +2557,7 @@ static int __spi_split_transfer_maxsize(struct spi_master *master,
 	*xferp = &xfers[count - 1];
 
 	/* increment statistics counters */
-	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
+	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
 				       transfers_split_maxsize);
 	SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
 				       transfers_split_maxsize);
@@ -2449,14 +2569,14 @@ static int __spi_split_transfer_maxsize(struct spi_master *master,
  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
  *                              when an individual transfer exceeds a
  *                              certain size
- * @master:    the @spi_master for this transfer
+ * @ctlr:    the @spi_controller for this transfer
  * @msg:   the @spi_message to transform
  * @maxsize:  the maximum when to apply this
  * @gfp: GFP allocation flags
  *
  * Return: status of transformation
  */
-int spi_split_transfers_maxsize(struct spi_master *master,
+int spi_split_transfers_maxsize(struct spi_controller *ctlr,
 				struct spi_message *msg,
 				size_t maxsize,
 				gfp_t gfp)
@@ -2472,8 +2592,8 @@ int spi_split_transfers_maxsize(struct spi_master *master,
 	 */
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		if (xfer->len > maxsize) {
-			ret = __spi_split_transfer_maxsize(
-				master, msg, &xfer, maxsize, gfp);
+			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
+							   maxsize, gfp);
 			if (ret)
 				return ret;
 		}
@@ -2485,18 +2605,18 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
 
 /*-------------------------------------------------------------------------*/
 
-/* Core methods for SPI master protocol drivers.  Some of the
+/* Core methods for SPI controller protocol drivers.  Some of the
  * other core methods are currently defined as inline functions.
  */
 
-static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
+static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
+					u8 bits_per_word)
 {
-	if (master->bits_per_word_mask) {
+	if (ctlr->bits_per_word_mask) {
 		/* Only 32 bits fit in the mask */
 		if (bits_per_word > 32)
 			return -EINVAL;
-		if (!(master->bits_per_word_mask &
-				SPI_BPW_MASK(bits_per_word)))
+		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
 			return -EINVAL;
 	}
 
@@ -2542,9 +2662,9 @@ int spi_setup(struct spi_device *spi)
 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
 		return -EINVAL;
 	/* help drivers fail *cleanly* when they need options
-	 * that aren't supported with their current master
+	 * that aren't supported with their current controller
 	 */
-	bad_bits = spi->mode & ~spi->master->mode_bits;
+	bad_bits = spi->mode & ~spi->controller->mode_bits;
 	ugly_bits = bad_bits &
 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
 	if (ugly_bits) {
@@ -2563,15 +2683,16 @@ int spi_setup(struct spi_device *spi)
 	if (!spi->bits_per_word)
 		spi->bits_per_word = 8;
 
-	status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
+	status = __spi_validate_bits_per_word(spi->controller,
+					      spi->bits_per_word);
 	if (status)
 		return status;
 
 	if (!spi->max_speed_hz)
-		spi->max_speed_hz = spi->master->max_speed_hz;
+		spi->max_speed_hz = spi->controller->max_speed_hz;
 
-	if (spi->master->setup)
-		status = spi->master->setup(spi);
+	if (spi->controller->setup)
+		status = spi->controller->setup(spi);
 
 	spi_set_cs(spi, false);
 
@@ -2590,7 +2711,7 @@ EXPORT_SYMBOL_GPL(spi_setup);
 
 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
 {
-	struct spi_master *master = spi->master;
+	struct spi_controller *ctlr = spi->controller;
 	struct spi_transfer *xfer;
 	int w_size;
 
@@ -2602,16 +2723,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
 	 * either MOSI or MISO is missing.  They can also be caused by
 	 * software limitations.
 	 */
-	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
-			|| (spi->mode & SPI_3WIRE)) {
-		unsigned flags = master->flags;
+	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
+	    (spi->mode & SPI_3WIRE)) {
+		unsigned flags = ctlr->flags;
 
 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
 			if (xfer->rx_buf && xfer->tx_buf)
 				return -EINVAL;
-			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
+			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
 				return -EINVAL;
-			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
+			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
 				return -EINVAL;
 		}
 	}
@@ -2631,13 +2752,12 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
 		if (!xfer->speed_hz)
 			xfer->speed_hz = spi->max_speed_hz;
 		if (!xfer->speed_hz)
-			xfer->speed_hz = master->max_speed_hz;
+			xfer->speed_hz = ctlr->max_speed_hz;
 
-		if (master->max_speed_hz &&
-		    xfer->speed_hz > master->max_speed_hz)
-			xfer->speed_hz = master->max_speed_hz;
+		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
+			xfer->speed_hz = ctlr->max_speed_hz;
 
-		if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
+		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
 			return -EINVAL;
 
 		/*
@@ -2655,8 +2775,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
 		if (xfer->len % w_size)
 			return -EINVAL;
 
-		if (xfer->speed_hz && master->min_speed_hz &&
-		    xfer->speed_hz < master->min_speed_hz)
+		if (xfer->speed_hz && ctlr->min_speed_hz &&
+		    xfer->speed_hz < ctlr->min_speed_hz)
 			return -EINVAL;
 
 		if (xfer->tx_buf && !xfer->tx_nbits)
@@ -2701,16 +2821,16 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
 
 static int __spi_async(struct spi_device *spi, struct spi_message *message)
 {
-	struct spi_master *master = spi->master;
+	struct spi_controller *ctlr = spi->controller;
 
 	message->spi = spi;
 
-	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
+	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
 	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
 
 	trace_spi_message_submit(message);
 
-	return master->transfer(spi, message);
+	return ctlr->transfer(spi, message);
 }
 
 /**
@@ -2746,7 +2866,7 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
  */
 int spi_async(struct spi_device *spi, struct spi_message *message)
 {
-	struct spi_master *master = spi->master;
+	struct spi_controller *ctlr = spi->controller;
 	int ret;
 	unsigned long flags;
 
@@ -2754,14 +2874,14 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
 	if (ret != 0)
 		return ret;
 
-	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
 
-	if (master->bus_lock_flag)
+	if (ctlr->bus_lock_flag)
 		ret = -EBUSY;
 	else
 		ret = __spi_async(spi, message);
 
-	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
 
 	return ret;
 }
@@ -2800,7 +2920,7 @@ EXPORT_SYMBOL_GPL(spi_async);
  */
 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
 {
-	struct spi_master *master = spi->master;
+	struct spi_controller *ctlr = spi->controller;
 	int ret;
 	unsigned long flags;
 
@@ -2808,11 +2928,11 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
 	if (ret != 0)
 		return ret;
 
-	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
 
 	ret = __spi_async(spi, message);
 
-	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
 
 	return ret;
 
@@ -2824,7 +2944,7 @@ int spi_flash_read(struct spi_device *spi,
 		   struct spi_flash_read_message *msg)
 
 {
-	struct spi_master *master = spi->master;
+	struct spi_controller *master = spi->controller;
 	struct device *rx_dev = NULL;
 	int ret;
 
@@ -2878,7 +2998,7 @@ EXPORT_SYMBOL_GPL(spi_flash_read);
 
 /*-------------------------------------------------------------------------*/
 
-/* Utility methods for SPI master protocol drivers, layered on
+/* Utility methods for SPI protocol drivers, layered on
  * top of the core.  Some other utility methods are defined as
  * inline functions.
  */
@@ -2892,7 +3012,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	int status;
-	struct spi_master *master = spi->master;
+	struct spi_controller *ctlr = spi->controller;
 	unsigned long flags;
 
 	status = __spi_validate(spi, message);
@@ -2903,7 +3023,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
 	message->context = &done;
 	message->spi = spi;
 
-	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
+	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
 	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
 
 	/* If we're not using the legacy transfer method then we will
@@ -2911,14 +3031,14 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
 	 * This code would be less tricky if we could remove the
 	 * support for driver implemented message queues.
 	 */
-	if (master->transfer == spi_queued_transfer) {
-		spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+	if (ctlr->transfer == spi_queued_transfer) {
+		spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
 
 		trace_spi_message_submit(message);
 
 		status = __spi_queued_transfer(spi, message, false);
 
-		spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+		spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
 	} else {
 		status = spi_async_locked(spi, message);
 	}
@@ -2927,12 +3047,12 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
 		/* Push out the messages in the calling context if we
 		 * can.
 		 */
-		if (master->transfer == spi_queued_transfer) {
-			SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
+		if (ctlr->transfer == spi_queued_transfer) {
+			SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
 						       spi_sync_immediate);
 			SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
 						       spi_sync_immediate);
-			__spi_pump_messages(master, false);
+			__spi_pump_messages(ctlr, false);
 		}
 
 		wait_for_completion(&done);
@@ -2967,9 +3087,9 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
 {
 	int ret;
 
-	mutex_lock(&spi->master->bus_lock_mutex);
+	mutex_lock(&spi->controller->bus_lock_mutex);
 	ret = __spi_sync(spi, message);
-	mutex_unlock(&spi->master->bus_lock_mutex);
+	mutex_unlock(&spi->controller->bus_lock_mutex);
 
 	return ret;
 }
@@ -2999,7 +3119,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
 
 /**
  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
- * @master: SPI bus master that should be locked for exclusive bus access
+ * @ctlr: SPI bus master that should be locked for exclusive bus access
  * Context: can sleep
  *
  * This call may only be used from a context that may sleep.  The sleep
@@ -3012,15 +3132,15 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
  *
  * Return: always zero.
  */
-int spi_bus_lock(struct spi_master *master)
+int spi_bus_lock(struct spi_controller *ctlr)
 {
 	unsigned long flags;
 
-	mutex_lock(&master->bus_lock_mutex);
+	mutex_lock(&ctlr->bus_lock_mutex);
 
-	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
-	master->bus_lock_flag = 1;
-	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+	ctlr->bus_lock_flag = 1;
+	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
 
 	/* mutex remains locked until spi_bus_unlock is called */
 
@@ -3030,7 +3150,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
 
 /**
  * spi_bus_unlock - release the lock for exclusive SPI bus usage
- * @master: SPI bus master that was locked for exclusive bus access
+ * @ctlr: SPI bus master that was locked for exclusive bus access
  * Context: can sleep
  *
  * This call may only be used from a context that may sleep.  The sleep
@@ -3041,11 +3161,11 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
  *
  * Return: always zero.
  */
-int spi_bus_unlock(struct spi_master *master)
+int spi_bus_unlock(struct spi_controller *ctlr)
 {
-	master->bus_lock_flag = 0;
+	ctlr->bus_lock_flag = 0;
 
-	mutex_unlock(&master->bus_lock_mutex);
+	mutex_unlock(&ctlr->bus_lock_mutex);
 
 	return 0;
 }
@@ -3147,45 +3267,48 @@ static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
 	return dev ? to_spi_device(dev) : NULL;
 }
 
-static int __spi_of_master_match(struct device *dev, const void *data)
+static int __spi_of_controller_match(struct device *dev, const void *data)
 {
 	return dev->of_node == data;
 }
 
-/* the spi masters are not using spi_bus, so we find it with another way */
-static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
+/* the spi controllers are not using spi_bus, so we find it with another way */
+static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
 {
 	struct device *dev;
 
 	dev = class_find_device(&spi_master_class, NULL, node,
-				__spi_of_master_match);
+				__spi_of_controller_match);
+	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
+		dev = class_find_device(&spi_slave_class, NULL, node,
+					__spi_of_controller_match);
 	if (!dev)
 		return NULL;
 
 	/* reference got in class_find_device */
-	return container_of(dev, struct spi_master, dev);
+	return container_of(dev, struct spi_controller, dev);
 }
 
 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
 			 void *arg)
 {
 	struct of_reconfig_data *rd = arg;
-	struct spi_master *master;
+	struct spi_controller *ctlr;
 	struct spi_device *spi;
 
 	switch (of_reconfig_get_state_change(action, arg)) {
 	case OF_RECONFIG_CHANGE_ADD:
-		master = of_find_spi_master_by_node(rd->dn->parent);
-		if (master == NULL)
+		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
+		if (ctlr == NULL)
 			return NOTIFY_OK;	/* not for us */
 
 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
-			put_device(&master->dev);
+			put_device(&ctlr->dev);
 			return NOTIFY_OK;
 		}
 
-		spi = of_register_spi_device(master, rd->dn);
-		put_device(&master->dev);
+		spi = of_register_spi_device(ctlr, rd->dn);
+		put_device(&ctlr->dev);
 
 		if (IS_ERR(spi)) {
 			pr_err("%s: failed to create for '%s'\n",
@@ -3224,7 +3347,7 @@ extern struct notifier_block spi_of_notifier;
 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
 
 #if IS_ENABLED(CONFIG_ACPI)
-static int spi_acpi_master_match(struct device *dev, const void *data)
+static int spi_acpi_controller_match(struct device *dev, const void *data)
 {
 	return ACPI_COMPANION(dev->parent) == data;
 }
@@ -3234,16 +3357,19 @@ static int spi_acpi_device_match(struct device *dev, void *data)
 	return ACPI_COMPANION(dev) == data;
 }
 
-static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
+static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
 {
 	struct device *dev;
 
 	dev = class_find_device(&spi_master_class, NULL, adev,
-				spi_acpi_master_match);
+				spi_acpi_controller_match);
+	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
+		dev = class_find_device(&spi_slave_class, NULL, adev,
+					spi_acpi_controller_match);
 	if (!dev)
 		return NULL;
 
-	return container_of(dev, struct spi_master, dev);
+	return container_of(dev, struct spi_controller, dev);
 }
 
 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
@@ -3259,17 +3385,17 @@ static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
 			   void *arg)
 {
 	struct acpi_device *adev = arg;
-	struct spi_master *master;
+	struct spi_controller *ctlr;
 	struct spi_device *spi;
 
 	switch (value) {
 	case ACPI_RECONFIG_DEVICE_ADD:
-		master = acpi_spi_find_master_by_adev(adev->parent);
-		if (!master)
+		ctlr = acpi_spi_find_controller_by_adev(adev->parent);
+		if (!ctlr)
 			break;
 
-		acpi_register_spi_device(master, adev);
-		put_device(&master->dev);
+		acpi_register_spi_device(ctlr, adev);
+		put_device(&ctlr->dev);
 		break;
 	case ACPI_RECONFIG_DEVICE_REMOVE:
 		if (!acpi_device_enumerated(adev))
@@ -3312,6 +3438,12 @@ static int __init spi_init(void)
 	if (status < 0)
 		goto err2;
 
+	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
+		status = class_register(&spi_slave_class);
+		if (status < 0)
+			goto err3;
+	}
+
 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
 	if (IS_ENABLED(CONFIG_ACPI))
@@ -3319,6 +3451,8 @@ static int __init spi_init(void)
 
 	return 0;
 
+err3:
+	class_unregister(&spi_master_class);
 err2:
 	bus_unregister(&spi_bus_type);
 err1:
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 9a2a79a8..d4d2d8d 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -99,7 +99,6 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
 static ssize_t
 spidev_sync(struct spidev_data *spidev, struct spi_message *message)
 {
-	DECLARE_COMPLETION_ONSTACK(done);
 	int status;
 	struct spi_device *spi;
 
@@ -325,7 +324,6 @@ static struct spi_ioc_transfer *
 spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
 		unsigned *n_ioc)
 {
-	struct spi_ioc_transfer	*ioc;
 	u32	tmp;
 
 	/* Check type, command number and direction */
@@ -342,14 +340,7 @@ spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
 		return NULL;
 
 	/* copy into scratch area */
-	ioc = kmalloc(tmp, GFP_KERNEL);
-	if (!ioc)
-		return ERR_PTR(-ENOMEM);
-	if (__copy_from_user(ioc, u_ioc, tmp)) {
-		kfree(ioc);
-		return ERR_PTR(-EFAULT);
-	}
-	return ioc;
+	return memdup_user(u_ioc, tmp);
 }
 
 static long
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 5ec3a59..2afe359 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -28,6 +28,7 @@
 /* PMIC Arbiter configuration registers */
 #define PMIC_ARB_VERSION		0x0000
 #define PMIC_ARB_VERSION_V2_MIN		0x20010000
+#define PMIC_ARB_VERSION_V3_MIN		0x30000000
 #define PMIC_ARB_INT_EN			0x0004
 
 /* PMIC Arbiter channel registers offsets */
@@ -58,10 +59,10 @@
 
 /* Channel Status fields */
 enum pmic_arb_chnl_status {
-	PMIC_ARB_STATUS_DONE	= (1 << 0),
-	PMIC_ARB_STATUS_FAILURE	= (1 << 1),
-	PMIC_ARB_STATUS_DENIED	= (1 << 2),
-	PMIC_ARB_STATUS_DROPPED	= (1 << 3),
+	PMIC_ARB_STATUS_DONE	= BIT(0),
+	PMIC_ARB_STATUS_FAILURE	= BIT(1),
+	PMIC_ARB_STATUS_DENIED	= BIT(2),
+	PMIC_ARB_STATUS_DROPPED	= BIT(3),
 };
 
 /* Command register fields */
@@ -96,10 +97,26 @@ enum pmic_arb_cmd_op_code {
 /* interrupt enable bit */
 #define SPMI_PIC_ACC_ENABLE_BIT		BIT(0)
 
+#define HWIRQ(slave_id, periph_id, irq_id, apid) \
+	((((slave_id) & 0xF)   << 28) | \
+	(((periph_id) & 0xFF)  << 20) | \
+	(((irq_id)    & 0x7)   << 16) | \
+	(((apid)      & 0x1FF) << 0))
+
+#define HWIRQ_SID(hwirq)  (((hwirq) >> 28) & 0xF)
+#define HWIRQ_PER(hwirq)  (((hwirq) >> 20) & 0xFF)
+#define HWIRQ_IRQ(hwirq)  (((hwirq) >> 16) & 0x7)
+#define HWIRQ_APID(hwirq) (((hwirq) >> 0)  & 0x1FF)
+
 struct pmic_arb_ver_ops;
 
+struct apid_data {
+	u16		ppid;
+	u8		owner;
+};
+
 /**
- * spmi_pmic_arb_dev - SPMI PMIC Arbiter object
+ * spmi_pmic_arb - SPMI PMIC Arbiter object
  *
  * @rd_base:		on v1 "core", on v2 "observer" register base off DT.
  * @wr_base:		on v1 "core", on v2 "chnls"    register base off DT.
@@ -111,15 +128,15 @@ struct pmic_arb_ver_ops;
  * @ee:			the current Execution Environment
  * @min_apid:		minimum APID (used for bounding IRQ search)
  * @max_apid:		maximum APID
+ * @max_periph:		maximum number of PMIC peripherals supported by HW.
  * @mapping_table:	in-memory copy of PPID -> APID mapping table.
  * @domain:		irq domain object for PMIC IRQ domain
  * @spmic:		SPMI controller object
- * @apid_to_ppid:	in-memory copy of APID -> PPID mapping table.
  * @ver_ops:		version dependent operations.
- * @ppid_to_chan	in-memory copy of PPID -> channel (APID) mapping table.
+ * @ppid_to_apid	in-memory copy of PPID -> channel (APID) mapping table.
  *			v2 only.
  */
-struct spmi_pmic_arb_dev {
+struct spmi_pmic_arb {
 	void __iomem		*rd_base;
 	void __iomem		*wr_base;
 	void __iomem		*intr;
@@ -132,19 +149,23 @@ struct spmi_pmic_arb_dev {
 	u8			ee;
 	u16			min_apid;
 	u16			max_apid;
+	u16			max_periph;
 	u32			*mapping_table;
 	DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
 	struct irq_domain	*domain;
 	struct spmi_controller	*spmic;
-	u16			*apid_to_ppid;
 	const struct pmic_arb_ver_ops *ver_ops;
-	u16			*ppid_to_chan;
-	u16			last_channel;
+	u16			*ppid_to_apid;
+	u16			last_apid;
+	struct apid_data	apid_data[PMIC_ARB_MAX_PERIPHS];
 };
 
 /**
  * pmic_arb_ver: version dependent functionality.
  *
+ * @ver_str:		version string.
+ * @ppid_to_apid:	finds the apid for a given ppid.
+ * @mode:		access rights to specified pmic peripheral.
  * @non_data_cmd:	on v1 issues an spmi non-data command.
  *			on v2 no HW support, returns -EOPNOTSUPP.
  * @offset:		on v1 offset of per-ee channel.
@@ -160,28 +181,33 @@ struct spmi_pmic_arb_dev {
  *			on v2 offset of SPMI_PIC_IRQ_CLEARn.
  */
 struct pmic_arb_ver_ops {
+	const char *ver_str;
+	int (*ppid_to_apid)(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+			u16 *apid);
+	int (*mode)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
+			mode_t *mode);
 	/* spmi commands (read_cmd, write_cmd, cmd) functionality */
-	int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr,
+	int (*offset)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
 		      u32 *offset);
 	u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
 	int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
 	/* Interrupts controller functionality (offset of PIC registers) */
-	u32 (*owner_acc_status)(u8 m, u8 n);
-	u32 (*acc_enable)(u8 n);
-	u32 (*irq_status)(u8 n);
-	u32 (*irq_clear)(u8 n);
+	u32 (*owner_acc_status)(u8 m, u16 n);
+	u32 (*acc_enable)(u16 n);
+	u32 (*irq_status)(u16 n);
+	u32 (*irq_clear)(u16 n);
 };
 
-static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
+static inline void pmic_arb_base_write(struct spmi_pmic_arb *pa,
 				       u32 offset, u32 val)
 {
-	writel_relaxed(val, dev->wr_base + offset);
+	writel_relaxed(val, pa->wr_base + offset);
 }
 
-static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev,
+static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pa,
 				       u32 offset, u32 val)
 {
-	writel_relaxed(val, dev->rd_base + offset);
+	writel_relaxed(val, pa->rd_base + offset);
 }
 
 /**
@@ -190,9 +216,10 @@ static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev,
  * @reg:	register's address
  * @buf:	output parameter, length must be bc + 1
  */
-static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
+static void pa_read_data(struct spmi_pmic_arb *pa, u8 *buf, u32 reg, u8 bc)
 {
-	u32 data = __raw_readl(dev->rd_base + reg);
+	u32 data = __raw_readl(pa->rd_base + reg);
+
 	memcpy(buf, &data, (bc & 3) + 1);
 }
 
@@ -203,23 +230,24 @@ static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
  * @buf:	buffer to write. length must be bc + 1.
  */
 static void
-pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
+pa_write_data(struct spmi_pmic_arb *pa, const u8 *buf, u32 reg, u8 bc)
 {
 	u32 data = 0;
+
 	memcpy(&data, buf, (bc & 3) + 1);
-	__raw_writel(data, dev->wr_base + reg);
+	pmic_arb_base_write(pa, reg, data);
 }
 
 static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
 				  void __iomem *base, u8 sid, u16 addr)
 {
-	struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	u32 status = 0;
 	u32 timeout = PMIC_ARB_TIMEOUT_US;
 	u32 offset;
 	int rc;
 
-	rc = dev->ver_ops->offset(dev, sid, addr, &offset);
+	rc = pa->ver_ops->offset(pa, sid, addr, &offset);
 	if (rc)
 		return rc;
 
@@ -264,22 +292,22 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
 static int
 pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	unsigned long flags;
 	u32 cmd;
 	int rc;
 	u32 offset;
 
-	rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset);
+	rc = pa->ver_ops->offset(pa, sid, 0, &offset);
 	if (rc)
 		return rc;
 
 	cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
 
-	raw_spin_lock_irqsave(&pmic_arb->lock, flags);
-	pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
-	rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0);
-	raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	raw_spin_lock_irqsave(&pa->lock, flags);
+	pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
+	rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, 0);
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
 
 	return rc;
 }
@@ -293,7 +321,7 @@ pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid)
 /* Non-data command */
 static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 
 	dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
 
@@ -301,23 +329,35 @@ static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
 	if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
 		return -EINVAL;
 
-	return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
+	return pa->ver_ops->non_data_cmd(ctrl, opc, sid);
 }
 
 static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
 			     u16 addr, u8 *buf, size_t len)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	unsigned long flags;
 	u8 bc = len - 1;
 	u32 cmd;
 	int rc;
 	u32 offset;
+	mode_t mode;
 
-	rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+	rc = pa->ver_ops->offset(pa, sid, addr, &offset);
 	if (rc)
 		return rc;
 
+	rc = pa->ver_ops->mode(pa, sid, addr, &mode);
+	if (rc)
+		return rc;
+
+	if (!(mode & S_IRUSR)) {
+		dev_err(&pa->spmic->dev,
+			"error: impermissible read from peripheral sid:%d addr:0x%x\n",
+			sid, addr);
+		return -EPERM;
+	}
+
 	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
 		dev_err(&ctrl->dev,
 			"pmic-arb supports 1..%d bytes per trans, but:%zu requested",
@@ -335,40 +375,51 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
 	else
 		return -EINVAL;
 
-	cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+	cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
 
-	raw_spin_lock_irqsave(&pmic_arb->lock, flags);
-	pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
-	rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr);
+	raw_spin_lock_irqsave(&pa->lock, flags);
+	pmic_arb_set_rd_cmd(pa, offset + PMIC_ARB_CMD, cmd);
+	rc = pmic_arb_wait_for_done(ctrl, pa->rd_base, sid, addr);
 	if (rc)
 		goto done;
 
-	pa_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
+	pa_read_data(pa, buf, offset + PMIC_ARB_RDATA0,
 		     min_t(u8, bc, 3));
 
 	if (bc > 3)
-		pa_read_data(pmic_arb, buf + 4,
-				offset + PMIC_ARB_RDATA1, bc - 4);
+		pa_read_data(pa, buf + 4, offset + PMIC_ARB_RDATA1, bc - 4);
 
 done:
-	raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
 	return rc;
 }
 
 static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
 			      u16 addr, const u8 *buf, size_t len)
 {
-	struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	unsigned long flags;
 	u8 bc = len - 1;
 	u32 cmd;
 	int rc;
 	u32 offset;
+	mode_t mode;
 
-	rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+	rc = pa->ver_ops->offset(pa, sid, addr, &offset);
 	if (rc)
 		return rc;
 
+	rc = pa->ver_ops->mode(pa, sid, addr, &mode);
+	if (rc)
+		return rc;
+
+	if (!(mode & S_IWUSR)) {
+		dev_err(&pa->spmic->dev,
+			"error: impermissible write to peripheral sid:%d addr:0x%x\n",
+			sid, addr);
+		return -EPERM;
+	}
+
 	if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
 		dev_err(&ctrl->dev,
 			"pmic-arb supports 1..%d bytes per trans, but:%zu requested",
@@ -388,20 +439,18 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
 	else
 		return -EINVAL;
 
-	cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+	cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
 
 	/* Write data to FIFOs */
-	raw_spin_lock_irqsave(&pmic_arb->lock, flags);
-	pa_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
-		      min_t(u8, bc, 3));
+	raw_spin_lock_irqsave(&pa->lock, flags);
+	pa_write_data(pa, buf, offset + PMIC_ARB_WDATA0, min_t(u8, bc, 3));
 	if (bc > 3)
-		pa_write_data(pmic_arb, buf + 4,
-				offset + PMIC_ARB_WDATA1, bc - 4);
+		pa_write_data(pa, buf + 4, offset + PMIC_ARB_WDATA1, bc - 4);
 
 	/* Start the transaction */
-	pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
-	rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr);
-	raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
+	rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, addr);
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
 
 	return rc;
 }
@@ -427,9 +476,9 @@ struct spmi_pmic_arb_qpnpint_type {
 static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
 			       size_t len)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 sid = d->hwirq >> 24;
-	u8 per = d->hwirq >> 16;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 sid = HWIRQ_SID(d->hwirq);
+	u8 per = HWIRQ_PER(d->hwirq);
 
 	if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
 			       (per << 8) + reg, buf, len))
@@ -440,9 +489,9 @@ static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
 
 static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 sid = d->hwirq >> 24;
-	u8 per = d->hwirq >> 16;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 sid = HWIRQ_SID(d->hwirq);
+	u8 per = HWIRQ_PER(d->hwirq);
 
 	if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
 			      (per << 8) + reg, buf, len))
@@ -451,33 +500,58 @@ static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
 				    d->irq);
 }
 
-static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
+static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
+{
+	u16 ppid = pa->apid_data[apid].ppid;
+	u8 sid = ppid >> 8;
+	u8 per = ppid & 0xFF;
+	u8 irq_mask = BIT(id);
+
+	writel_relaxed(irq_mask, pa->intr + pa->ver_ops->irq_clear(apid));
+
+	if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+			(per << 8) + QPNPINT_REG_LATCHED_CLR, &irq_mask, 1))
+		dev_err_ratelimited(&pa->spmic->dev,
+				"failed to ack irq_mask = 0x%x for ppid = %x\n",
+				irq_mask, ppid);
+
+	if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+			       (per << 8) + QPNPINT_REG_EN_CLR, &irq_mask, 1))
+		dev_err_ratelimited(&pa->spmic->dev,
+				"failed to ack irq_mask = 0x%x for ppid = %x\n",
+				irq_mask, ppid);
+}
+
+static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
 {
 	unsigned int irq;
 	u32 status;
 	int id;
+	u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
+	u8 per = pa->apid_data[apid].ppid & 0xFF;
 
 	status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
 	while (status) {
 		id = ffs(status) - 1;
-		status &= ~(1 << id);
-		irq = irq_find_mapping(pa->domain,
-				       pa->apid_to_ppid[apid] << 16
-				     | id << 8
-				     | apid);
+		status &= ~BIT(id);
+		irq = irq_find_mapping(pa->domain, HWIRQ(sid, per, id, apid));
+		if (irq == 0) {
+			cleanup_irq(pa, apid, id);
+			continue;
+		}
 		generic_handle_irq(irq);
 	}
 }
 
 static void pmic_arb_chained_irq(struct irq_desc *desc)
 {
-	struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
+	struct spmi_pmic_arb *pa = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	void __iomem *intr = pa->intr;
 	int first = pa->min_apid >> 5;
 	int last = pa->max_apid >> 5;
-	u32 status;
-	int i, id;
+	u32 status, enable;
+	int i, id, apid;
 
 	chained_irq_enter(chip, desc);
 
@@ -486,8 +560,12 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
 				      pa->ver_ops->owner_acc_status(pa->ee, i));
 		while (status) {
 			id = ffs(status) - 1;
-			status &= ~(1 << id);
-			periph_interrupt(pa, id + i * 32);
+			status &= ~BIT(id);
+			apid = id + i * 32;
+			enable = readl_relaxed(intr +
+					pa->ver_ops->acc_enable(apid));
+			if (enable & SPMI_PIC_ACC_ENABLE_BIT)
+				periph_interrupt(pa, apid);
 		}
 	}
 
@@ -496,100 +574,81 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
 
 static void qpnpint_irq_ack(struct irq_data *d)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 irq  = d->hwirq >> 8;
-	u8 apid = d->hwirq;
-	unsigned long flags;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u16 apid = HWIRQ_APID(d->hwirq);
 	u8 data;
 
-	raw_spin_lock_irqsave(&pa->lock, flags);
-	writel_relaxed(1 << irq, pa->intr + pa->ver_ops->irq_clear(apid));
-	raw_spin_unlock_irqrestore(&pa->lock, flags);
+	writel_relaxed(BIT(irq), pa->intr + pa->ver_ops->irq_clear(apid));
 
-	data = 1 << irq;
+	data = BIT(irq);
 	qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
 }
 
 static void qpnpint_irq_mask(struct irq_data *d)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 irq  = d->hwirq >> 8;
-	u8 apid = d->hwirq;
-	unsigned long flags;
-	u32 status;
-	u8 data;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u8 data = BIT(irq);
 
-	raw_spin_lock_irqsave(&pa->lock, flags);
-	status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
-	if (status & SPMI_PIC_ACC_ENABLE_BIT) {
-		status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
-		writel_relaxed(status, pa->intr +
-			       pa->ver_ops->acc_enable(apid));
-	}
-	raw_spin_unlock_irqrestore(&pa->lock, flags);
-
-	data = 1 << irq;
 	qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
 }
 
 static void qpnpint_irq_unmask(struct irq_data *d)
 {
-	struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
-	u8 irq  = d->hwirq >> 8;
-	u8 apid = d->hwirq;
-	unsigned long flags;
-	u32 status;
-	u8 data;
+	struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u16 apid = HWIRQ_APID(d->hwirq);
+	u8 buf[2];
 
-	raw_spin_lock_irqsave(&pa->lock, flags);
-	status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
-	if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
-		writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT,
-				pa->intr + pa->ver_ops->acc_enable(apid));
+	writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
+		pa->intr + pa->ver_ops->acc_enable(apid));
+
+	qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1);
+	if (!(buf[0] & BIT(irq))) {
+		/*
+		 * Since the interrupt is currently disabled, write to both the
+		 * LATCHED_CLR and EN_SET registers so that a spurious interrupt
+		 * cannot be triggered when the interrupt is enabled
+		 */
+		buf[0] = BIT(irq);
+		buf[1] = BIT(irq);
+		qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 2);
 	}
-	raw_spin_unlock_irqrestore(&pa->lock, flags);
-
-	data = 1 << irq;
-	qpnpint_spmi_write(d, QPNPINT_REG_EN_SET, &data, 1);
-}
-
-static void qpnpint_irq_enable(struct irq_data *d)
-{
-	u8 irq  = d->hwirq >> 8;
-	u8 data;
-
-	qpnpint_irq_unmask(d);
-
-	data = 1 << irq;
-	qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
 }
 
 static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
 	struct spmi_pmic_arb_qpnpint_type type;
-	u8 irq = d->hwirq >> 8;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
+	u8 bit_mask_irq = BIT(irq);
 
 	qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
 
 	if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
-		type.type |= 1 << irq;
+		type.type |= bit_mask_irq;
 		if (flow_type & IRQF_TRIGGER_RISING)
-			type.polarity_high |= 1 << irq;
+			type.polarity_high |= bit_mask_irq;
 		if (flow_type & IRQF_TRIGGER_FALLING)
-			type.polarity_low  |= 1 << irq;
+			type.polarity_low  |= bit_mask_irq;
 	} else {
 		if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
 		    (flow_type & (IRQF_TRIGGER_LOW)))
 			return -EINVAL;
 
-		type.type &= ~(1 << irq); /* level trig */
+		type.type &= ~bit_mask_irq; /* level trig */
 		if (flow_type & IRQF_TRIGGER_HIGH)
-			type.polarity_high |= 1 << irq;
+			type.polarity_high |= bit_mask_irq;
 		else
-			type.polarity_low  |= 1 << irq;
+			type.polarity_low  |= bit_mask_irq;
 	}
 
 	qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+
+	if (flow_type & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(d, handle_edge_irq);
+	else
+		irq_set_handler_locked(d, handle_level_irq);
+
 	return 0;
 }
 
@@ -597,7 +656,7 @@ static int qpnpint_get_irqchip_state(struct irq_data *d,
 				     enum irqchip_irq_state which,
 				     bool *state)
 {
-	u8 irq = d->hwirq >> 8;
+	u8 irq = HWIRQ_IRQ(d->hwirq);
 	u8 status = 0;
 
 	if (which != IRQCHIP_STATE_LINE_LEVEL)
@@ -611,7 +670,6 @@ static int qpnpint_get_irqchip_state(struct irq_data *d,
 
 static struct irq_chip pmic_arb_irqchip = {
 	.name		= "pmic_arb",
-	.irq_enable	= qpnpint_irq_enable,
 	.irq_ack	= qpnpint_irq_ack,
 	.irq_mask	= qpnpint_irq_mask,
 	.irq_unmask	= qpnpint_irq_unmask,
@@ -621,48 +679,6 @@ static struct irq_chip pmic_arb_irqchip = {
 			| IRQCHIP_SKIP_SET_WAKE,
 };
 
-struct spmi_pmic_arb_irq_spec {
-	unsigned slave:4;
-	unsigned per:8;
-	unsigned irq:3;
-};
-
-static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
-				struct spmi_pmic_arb_irq_spec *spec,
-				u8 *apid)
-{
-	u16 ppid = spec->slave << 8 | spec->per;
-	u32 *mapping_table = pa->mapping_table;
-	int index = 0, i;
-	u32 data;
-
-	for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
-		if (!test_and_set_bit(index, pa->mapping_table_valid))
-			mapping_table[index] = readl_relaxed(pa->cnfg +
-						SPMI_MAPPING_TABLE_REG(index));
-
-		data = mapping_table[index];
-
-		if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
-			if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
-				index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
-			} else {
-				*apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
-				return 0;
-			}
-		} else {
-			if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
-				index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
-			} else {
-				*apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
-				return 0;
-			}
-		}
-	}
-
-	return -ENODEV;
-}
-
 static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
 					   struct device_node *controller,
 					   const u32 *intspec,
@@ -670,10 +686,9 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
 					   unsigned long *out_hwirq,
 					   unsigned int *out_type)
 {
-	struct spmi_pmic_arb_dev *pa = d->host_data;
-	struct spmi_pmic_arb_irq_spec spec;
-	int err;
-	u8 apid;
+	struct spmi_pmic_arb *pa = d->host_data;
+	int rc;
+	u16 apid;
 
 	dev_dbg(&pa->spmic->dev,
 		"intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
@@ -686,15 +701,14 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
 	if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
 		return -EINVAL;
 
-	spec.slave = intspec[0];
-	spec.per   = intspec[1];
-	spec.irq   = intspec[2];
-
-	err = search_mapping_table(pa, &spec, &apid);
-	if (err)
-		return err;
-
-	pa->apid_to_ppid[apid] = spec.slave << 8 | spec.per;
+	rc = pa->ver_ops->ppid_to_apid(pa, intspec[0],
+			(intspec[1] << 8), &apid);
+	if (rc < 0) {
+		dev_err(&pa->spmic->dev,
+		"failed to xlate sid = 0x%x, periph = 0x%x, irq = %x rc = %d\n",
+		intspec[0], intspec[1], intspec[2], rc);
+		return rc;
+	}
 
 	/* Keep track of {max,min}_apid for bounding search during interrupt */
 	if (apid > pa->max_apid)
@@ -702,10 +716,7 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
 	if (apid < pa->min_apid)
 		pa->min_apid = apid;
 
-	*out_hwirq = spec.slave << 24
-		   | spec.per   << 16
-		   | spec.irq   << 8
-		   | apid;
+	*out_hwirq = HWIRQ(intspec[0], intspec[1], intspec[2], apid);
 	*out_type  = intspec[3] & IRQ_TYPE_SENSE_MASK;
 
 	dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
@@ -717,7 +728,7 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
 				  unsigned int virq,
 				  irq_hw_number_t hwirq)
 {
-	struct spmi_pmic_arb_dev *pa = d->host_data;
+	struct spmi_pmic_arb *pa = d->host_data;
 
 	dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
 
@@ -727,26 +738,85 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
 	return 0;
 }
 
+static int
+pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
+{
+	u16 ppid = sid << 8 | ((addr >> 8) & 0xFF);
+	u32 *mapping_table = pa->mapping_table;
+	int index = 0, i;
+	u16 apid_valid;
+	u32 data;
+
+	apid_valid = pa->ppid_to_apid[ppid];
+	if (apid_valid & PMIC_ARB_CHAN_VALID) {
+		*apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+		return 0;
+	}
+
+	for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+		if (!test_and_set_bit(index, pa->mapping_table_valid))
+			mapping_table[index] = readl_relaxed(pa->cnfg +
+						SPMI_MAPPING_TABLE_REG(index));
+
+		data = mapping_table[index];
+
+		if (ppid & BIT(SPMI_MAPPING_BIT_INDEX(data))) {
+			if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
+				index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+			} else {
+				*apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+				pa->ppid_to_apid[ppid]
+					= *apid | PMIC_ARB_CHAN_VALID;
+				pa->apid_data[*apid].ppid = ppid;
+				return 0;
+			}
+		} else {
+			if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
+				index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+			} else {
+				*apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+				pa->ppid_to_apid[ppid]
+					= *apid | PMIC_ARB_CHAN_VALID;
+				pa->apid_data[*apid].ppid = ppid;
+				return 0;
+			}
+		}
+	}
+
+	return -ENODEV;
+}
+
+static int
+pmic_arb_mode_v1_v3(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
+{
+	*mode = S_IRUSR | S_IWUSR;
+	return 0;
+}
+
 /* v1 offset per ee */
 static int
-pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
+pmic_arb_offset_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
 {
 	*offset = 0x800 + 0x80 * pa->channel;
 	return 0;
 }
 
-static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
+static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pa, u16 ppid)
 {
 	u32 regval, offset;
-	u16 chan;
+	u16 apid;
 	u16 id;
 
 	/*
 	 * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
-	 * ppid_to_chan is an in-memory invert of that table.
+	 * ppid_to_apid is an in-memory invert of that table.
 	 */
-	for (chan = pa->last_channel; ; chan++) {
-		offset = PMIC_ARB_REG_CHNL(chan);
+	for (apid = pa->last_apid; apid < pa->max_periph; apid++) {
+		regval = readl_relaxed(pa->cnfg +
+				      SPMI_OWNERSHIP_TABLE_REG(apid));
+		pa->apid_data[apid].owner = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
+
+		offset = PMIC_ARB_REG_CHNL(apid);
 		if (offset >= pa->core_size)
 			break;
 
@@ -755,33 +825,65 @@ static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
 			continue;
 
 		id = (regval >> 8) & PMIC_ARB_PPID_MASK;
-		pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID;
+		pa->ppid_to_apid[id] = apid | PMIC_ARB_CHAN_VALID;
+		pa->apid_data[apid].ppid = id;
 		if (id == ppid) {
-			chan |= PMIC_ARB_CHAN_VALID;
+			apid |= PMIC_ARB_CHAN_VALID;
 			break;
 		}
 	}
-	pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID;
+	pa->last_apid = apid & ~PMIC_ARB_CHAN_VALID;
 
-	return chan;
+	return apid;
 }
 
 
-/* v2 offset per ppid (chan) and per ee */
 static int
-pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
+pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
 {
 	u16 ppid = (sid << 8) | (addr >> 8);
-	u16 chan;
+	u16 apid_valid;
 
-	chan = pa->ppid_to_chan[ppid];
-	if (!(chan & PMIC_ARB_CHAN_VALID))
-		chan = pmic_arb_find_chan(pa, ppid);
-	if (!(chan & PMIC_ARB_CHAN_VALID))
+	apid_valid = pa->ppid_to_apid[ppid];
+	if (!(apid_valid & PMIC_ARB_CHAN_VALID))
+		apid_valid = pmic_arb_find_apid(pa, ppid);
+	if (!(apid_valid & PMIC_ARB_CHAN_VALID))
 		return -ENODEV;
-	chan &= ~PMIC_ARB_CHAN_VALID;
 
-	*offset = 0x1000 * pa->ee + 0x8000 * chan;
+	*apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+	return 0;
+}
+
+static int
+pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
+{
+	u16 apid;
+	int rc;
+
+	rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
+	if (rc < 0)
+		return rc;
+
+	*mode = 0;
+	*mode |= S_IRUSR;
+
+	if (pa->ee == pa->apid_data[apid].owner)
+		*mode |= S_IWUSR;
+	return 0;
+}
+
+/* v2 offset per ppid and per ee */
+static int
+pmic_arb_offset_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
+{
+	u16 apid;
+	int rc;
+
+	rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
+	if (rc < 0)
+		return rc;
+
+	*offset = 0x1000 * pa->ee + 0x8000 * apid;
 	return 0;
 }
 
@@ -795,47 +897,55 @@ static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
 	return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
 }
 
-static u32 pmic_arb_owner_acc_status_v1(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v1(u8 m, u16 n)
 {
 	return 0x20 * m + 0x4 * n;
 }
 
-static u32 pmic_arb_owner_acc_status_v2(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v2(u8 m, u16 n)
 {
 	return 0x100000 + 0x1000 * m + 0x4 * n;
 }
 
-static u32 pmic_arb_acc_enable_v1(u8 n)
+static u32 pmic_arb_owner_acc_status_v3(u8 m, u16 n)
+{
+	return 0x200000 + 0x1000 * m + 0x4 * n;
+}
+
+static u32 pmic_arb_acc_enable_v1(u16 n)
 {
 	return 0x200 + 0x4 * n;
 }
 
-static u32 pmic_arb_acc_enable_v2(u8 n)
+static u32 pmic_arb_acc_enable_v2(u16 n)
 {
 	return 0x1000 * n;
 }
 
-static u32 pmic_arb_irq_status_v1(u8 n)
+static u32 pmic_arb_irq_status_v1(u16 n)
 {
 	return 0x600 + 0x4 * n;
 }
 
-static u32 pmic_arb_irq_status_v2(u8 n)
+static u32 pmic_arb_irq_status_v2(u16 n)
 {
 	return 0x4 + 0x1000 * n;
 }
 
-static u32 pmic_arb_irq_clear_v1(u8 n)
+static u32 pmic_arb_irq_clear_v1(u16 n)
 {
 	return 0xA00 + 0x4 * n;
 }
 
-static u32 pmic_arb_irq_clear_v2(u8 n)
+static u32 pmic_arb_irq_clear_v2(u16 n)
 {
 	return 0x8 + 0x1000 * n;
 }
 
 static const struct pmic_arb_ver_ops pmic_arb_v1 = {
+	.ver_str		= "v1",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v1,
+	.mode			= pmic_arb_mode_v1_v3,
 	.non_data_cmd		= pmic_arb_non_data_cmd_v1,
 	.offset			= pmic_arb_offset_v1,
 	.fmt_cmd		= pmic_arb_fmt_cmd_v1,
@@ -846,6 +956,9 @@ static const struct pmic_arb_ver_ops pmic_arb_v1 = {
 };
 
 static const struct pmic_arb_ver_ops pmic_arb_v2 = {
+	.ver_str		= "v2",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v2,
+	.mode			= pmic_arb_mode_v2,
 	.non_data_cmd		= pmic_arb_non_data_cmd_v2,
 	.offset			= pmic_arb_offset_v2,
 	.fmt_cmd		= pmic_arb_fmt_cmd_v2,
@@ -855,6 +968,19 @@ static const struct pmic_arb_ver_ops pmic_arb_v2 = {
 	.irq_clear		= pmic_arb_irq_clear_v2,
 };
 
+static const struct pmic_arb_ver_ops pmic_arb_v3 = {
+	.ver_str		= "v3",
+	.ppid_to_apid		= pmic_arb_ppid_to_apid_v2,
+	.mode			= pmic_arb_mode_v1_v3,
+	.non_data_cmd		= pmic_arb_non_data_cmd_v2,
+	.offset			= pmic_arb_offset_v2,
+	.fmt_cmd		= pmic_arb_fmt_cmd_v2,
+	.owner_acc_status	= pmic_arb_owner_acc_status_v3,
+	.acc_enable		= pmic_arb_acc_enable_v2,
+	.irq_status		= pmic_arb_irq_status_v2,
+	.irq_clear		= pmic_arb_irq_clear_v2,
+};
+
 static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
 	.map	= qpnpint_irq_domain_map,
 	.xlate	= qpnpint_irq_domain_dt_translate,
@@ -862,13 +988,12 @@ static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
 
 static int spmi_pmic_arb_probe(struct platform_device *pdev)
 {
-	struct spmi_pmic_arb_dev *pa;
+	struct spmi_pmic_arb *pa;
 	struct spmi_controller *ctrl;
 	struct resource *res;
 	void __iomem *core;
 	u32 channel, ee, hw_ver;
 	int err;
-	bool is_v1;
 
 	ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
 	if (!ctrl)
@@ -879,6 +1004,12 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
 	pa->core_size = resource_size(res);
+	if (pa->core_size <= 0x800) {
+		dev_err(&pdev->dev, "core_size is smaller than 0x800. Failing Probe\n");
+		err = -EINVAL;
+		goto err_put_ctrl;
+	}
+
 	core = devm_ioremap_resource(&ctrl->dev, res);
 	if (IS_ERR(core)) {
 		err = PTR_ERR(core);
@@ -886,18 +1017,21 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 	}
 
 	hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
-	is_v1  = (hw_ver < PMIC_ARB_VERSION_V2_MIN);
 
-	dev_info(&ctrl->dev, "PMIC Arb Version-%d (0x%x)\n", (is_v1 ? 1 : 2),
-		hw_ver);
-
-	if (is_v1) {
+	if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
 		pa->ver_ops = &pmic_arb_v1;
 		pa->wr_base = core;
 		pa->rd_base = core;
 	} else {
 		pa->core = core;
-		pa->ver_ops = &pmic_arb_v2;
+
+		if (hw_ver < PMIC_ARB_VERSION_V3_MIN)
+			pa->ver_ops = &pmic_arb_v2;
+		else
+			pa->ver_ops = &pmic_arb_v3;
+
+		/* the apid to ppid table starts at PMIC_ARB_REG_CHNL(0) */
+		pa->max_periph = (pa->core_size - PMIC_ARB_REG_CHNL(0)) / 4;
 
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						   "obsrvr");
@@ -915,16 +1049,19 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 			goto err_put_ctrl;
 		}
 
-		pa->ppid_to_chan = devm_kcalloc(&ctrl->dev,
+		pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
 						PMIC_ARB_MAX_PPID,
-						sizeof(*pa->ppid_to_chan),
+						sizeof(*pa->ppid_to_apid),
 						GFP_KERNEL);
-		if (!pa->ppid_to_chan) {
+		if (!pa->ppid_to_apid) {
 			err = -ENOMEM;
 			goto err_put_ctrl;
 		}
 	}
 
+	dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
+		 pa->ver_ops->ver_str, hw_ver);
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
 	pa->intr = devm_ioremap_resource(&ctrl->dev, res);
 	if (IS_ERR(pa->intr)) {
@@ -974,14 +1111,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 
 	pa->ee = ee;
 
-	pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
-					    sizeof(*pa->apid_to_ppid),
-					    GFP_KERNEL);
-	if (!pa->apid_to_ppid) {
-		err = -ENOMEM;
-		goto err_put_ctrl;
-	}
-
 	pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
 					sizeof(*pa->mapping_table), GFP_KERNEL);
 	if (!pa->mapping_table) {
@@ -1011,6 +1140,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 	}
 
 	irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
+	enable_irq_wake(pa->irq);
 
 	err = spmi_controller_add(ctrl);
 	if (err)
@@ -1029,7 +1159,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 static int spmi_pmic_arb_remove(struct platform_device *pdev)
 {
 	struct spmi_controller *ctrl = platform_get_drvdata(pdev);
-	struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
+	struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
 	spmi_controller_remove(ctrl);
 	irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
 	irq_domain_remove(pa->domain);
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
index 76427e4..d9f8b14 100644
--- a/drivers/staging/android/ion/ion-ioctl.c
+++ b/drivers/staging/android/ion/ion-ioctl.c
@@ -83,8 +83,8 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		int fd;
 
 		fd = ion_alloc(data.allocation.len,
-						data.allocation.heap_id_mask,
-						data.allocation.flags);
+			       data.allocation.heap_id_mask,
+			       data.allocation.flags);
 		if (fd < 0)
 			return fd;
 
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 03d3a4f..93e2c90 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -103,7 +103,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
 			goto err2;
 	}
 
-	if (buffer->sg_table == NULL) {
+	if (!buffer->sg_table) {
 		WARN_ONCE(1, "This heap needs to set the sgtable");
 		ret = -EINVAL;
 		goto err1;
@@ -115,7 +115,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
 
 	buffer->dev = dev;
 	buffer->size = len;
-	INIT_LIST_HEAD(&buffer->vmas);
 	INIT_LIST_HEAD(&buffer->attachments);
 	mutex_init(&buffer->lock);
 	mutex_lock(&dev->buffer_lock);
@@ -135,7 +134,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
 	if (WARN_ON(buffer->kmap_cnt > 0))
 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
 	buffer->heap->ops->free(buffer);
-	vfree(buffer->pages);
 	kfree(buffer);
 }
 
@@ -163,7 +161,7 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
 		return buffer->vaddr;
 	}
 	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
-	if (WARN_ONCE(vaddr == NULL,
+	if (WARN_ONCE(!vaddr,
 		      "heap->ops->map_kernel should return ERR_PTR on error"))
 		return ERR_PTR(-EINVAL);
 	if (IS_ERR(vaddr))
@@ -221,7 +219,7 @@ struct ion_dma_buf_attachment {
 };
 
 static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
-				struct dma_buf_attachment *attachment)
+			      struct dma_buf_attachment *attachment)
 {
 	struct ion_dma_buf_attachment *a;
 	struct sg_table *table;
@@ -264,26 +262,19 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
 	kfree(a);
 }
 
-
 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
 					enum dma_data_direction direction)
 {
 	struct ion_dma_buf_attachment *a = attachment->priv;
 	struct sg_table *table;
-	int ret;
 
 	table = a->table;
 
 	if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
-			direction)){
-		ret = -ENOMEM;
-		goto err;
-	}
-	return table;
+			direction))
+		return ERR_PTR(-ENOMEM);
 
-err:
-	free_duped_table(table);
-	return ERR_PTR(ret);
+	return table;
 }
 
 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
@@ -354,11 +345,10 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 		mutex_unlock(&buffer->lock);
 	}
 
-
 	mutex_lock(&buffer->lock);
 	list_for_each_entry(a, &buffer->attachments, list) {
 		dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
-					DMA_BIDIRECTIONAL);
+				    DMA_BIDIRECTIONAL);
 	}
 	mutex_unlock(&buffer->lock);
 
@@ -380,7 +370,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 	mutex_lock(&buffer->lock);
 	list_for_each_entry(a, &buffer->attachments, list) {
 		dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
-					DMA_BIDIRECTIONAL);
+				       DMA_BIDIRECTIONAL);
 	}
 	mutex_unlock(&buffer->lock);
 
@@ -435,7 +425,7 @@ int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
 	}
 	up_read(&dev->lock);
 
-	if (buffer == NULL)
+	if (!buffer)
 		return -ENODEV;
 
 	if (IS_ERR(buffer))
@@ -596,7 +586,7 @@ void ion_device_add_heap(struct ion_heap *heap)
 }
 EXPORT_SYMBOL(ion_device_add_heap);
 
-int ion_device_create(void)
+static int ion_device_create(void)
 {
 	struct ion_device *idev;
 	int ret;
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index ace8416..fa9ed81 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -68,14 +68,6 @@ struct ion_platform_heap {
  * @kmap_cnt:		number of times the buffer is mapped to the kernel
  * @vaddr:		the kernel mapping if kmap_cnt is not zero
  * @sg_table:		the sg table for the buffer if dmap_cnt is not zero
- * @pages:		flat array of pages in the buffer -- used by fault
- *			handler and only valid for buffers that are faulted in
- * @vmas:		list of vma's mapping this buffer
- * @handle_count:	count of handles referencing this buffer
- * @task_comm:		taskcomm of last client to reference this buffer in a
- *			handle, used for debugging
- * @pid:		pid of last client to reference this buffer in a
- *			handle, used for debugging
  */
 struct ion_buffer {
 	union {
@@ -92,13 +84,7 @@ struct ion_buffer {
 	int kmap_cnt;
 	void *vaddr;
 	struct sg_table *sg_table;
-	struct page **pages;
-	struct list_head vmas;
 	struct list_head attachments;
-	/* used to track orphaned buffers */
-	int handle_count;
-	char task_comm[TASK_COMM_LEN];
-	pid_t pid;
 };
 void ion_buffer_destroy(struct ion_buffer *buffer);
 
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index 5fdc1f3..fee7650 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -33,7 +33,7 @@ struct ion_carveout_heap {
 };
 
 static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
-					     unsigned long size)
+					 unsigned long size)
 {
 	struct ion_carveout_heap *carveout_heap =
 		container_of(heap, struct ion_carveout_heap, heap);
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index c50f2d9..5964bf2 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -153,7 +153,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 		max_order = compound_order(page);
 		i++;
 	}
-	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	table = kmalloc(sizeof(*table), GFP_KERNEL);
 	if (!table)
 		goto free_pages;
 
@@ -383,7 +383,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
 	for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
 		__free_page(page + i);
 
-	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	table = kmalloc(sizeof(*table), GFP_KERNEL);
 	if (!table) {
 		ret = -ENOMEM;
 		goto free_pages;
@@ -433,7 +433,7 @@ static struct ion_heap *__ion_system_contig_heap_create(void)
 {
 	struct ion_heap *heap;
 
-	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 	if (!heap)
 		return ERR_PTR(-ENOMEM);
 	heap->ops = &kmalloc_ops;
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index b76db1b2..9e21451 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -57,12 +57,6 @@ enum ion_heap_type {
  */
 #define ION_FLAG_CACHED 1
 
-/*
- * mappings of this buffer will created at mmap time, if this is set
- * caches must be managed manually
- */
-#define ION_FLAG_CACHED_NEEDS_SYNC 2
-
 /**
  * DOC: Ion Userspace API
  *
@@ -131,24 +125,6 @@ struct ion_heap_query {
 				      struct ion_allocation_data)
 
 /**
- * DOC: ION_IOC_FREE - free memory
- *
- * Takes an ion_handle_data struct and frees the handle.
- */
-#define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
-
-/**
- * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
- *
- * Takes an ion_fd_data struct with the handle field populated with a valid
- * opaque handle.  Returns the struct with the fd field set to a file
- * descriptor open in the current address space.  This file descriptor
- * can then be passed to another process.  The corresponding opaque handle can
- * be retrieved via ION_IOC_IMPORT.
- */
-#define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
-
-/**
  * DOC: ION_IOC_HEAP_QUERY - information about available heaps
  *
  * Takes an ion_heap_query structure and populates information about
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index ae62704..36a87c6 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -1,6 +1,6 @@
 config CRYPTO_DEV_CCREE
 	tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
-	depends on CRYPTO_HW && OF && HAS_DMA
+	depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
 	default n
 	select CRYPTO_HASH
 	select CRYPTO_BLKCIPHER
@@ -18,7 +18,7 @@
 	select CRYPTO_CTR
 	select CRYPTO_XTS
 	help
-	  Say 'Y' to enable a driver for the Arm TrustZone CryptoCell 
+	  Say 'Y' to enable a driver for the Arm TrustZone CryptoCell
 	  C7xx. Currently only the CryptoCell 712 REE is supported.
 	  Choose this if you wish to use hardware acceleration of
 	  cryptographic operations on the system REE.
@@ -32,12 +32,3 @@
 	  Say 'Y' to enable support for FIPS compliant mode by the
 	  CCREE driver.
 	  If unsure say N.
-
-config CCREE_DISABLE_COHERENT_DMA_OPS
-	bool "Disable Coherent DMA operations for the CCREE driver"
-	depends on CRYPTO_DEV_CCREE
-	default n
-	help
-	  Say 'Y' to disable the use of coherent DMA operations by the
-	  CCREE driver for debugging purposes.  
-	  If unsure say N.
diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile
index 44f3e3e..318c2b3 100644
--- a/drivers/staging/ccree/Makefile
+++ b/drivers/staging/ccree/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_cipher.o ssi_hash.o ssi_aead.o ssi_ivgen.o ssi_sram_mgr.o ssi_pm.o ssi_pm_ext.o
+ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_cipher.o ssi_hash.o ssi_aead.o ssi_ivgen.o ssi_sram_mgr.o ssi_pm.o
 ccree-$(CCREE_FIPS_SUPPORT) += ssi_fips.o ssi_fips_ll.o ssi_fips_ext.o ssi_fips_local.o
diff --git a/drivers/staging/ccree/cc_bitops.h b/drivers/staging/ccree/cc_bitops.h
deleted file mode 100644
index 3a39565..0000000
--- a/drivers/staging/ccree/cc_bitops.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*!
- * \file cc_bitops.h
- * Bit fields operations macros.
- */
-#ifndef _CC_BITOPS_H_
-#define _CC_BITOPS_H_
-
-#define BITMASK(mask_size) (((mask_size) < 32) ?	\
-	((1UL << (mask_size)) - 1) : 0xFFFFFFFFUL)
-#define BITMASK_AT(mask_size, mask_offset) (BITMASK(mask_size) << (mask_offset))
-
-#define BITFIELD_GET(word, bit_offset, bit_size) \
-	(((word) >> (bit_offset)) & BITMASK(bit_size))
-#define BITFIELD_SET(word, bit_offset, bit_size, new_val)   do {    \
-	word = ((word) & ~BITMASK_AT(bit_size, bit_offset)) |	    \
-		(((new_val) & BITMASK(bit_size)) << (bit_offset));  \
-} while (0)
-
-/* Is val aligned to "align" ("align" must be power of 2) */
-#ifndef IS_ALIGNED
-#define IS_ALIGNED(val, align)		\
-	(((uintptr_t)(val) & ((align) - 1)) == 0)
-#endif
-
-#define SWAP_ENDIAN(word)		\
-	(((word) >> 24) | (((word) & 0x00FF0000) >> 8) | \
-	(((word) & 0x0000FF00) << 8) | (((word) & 0x000000FF) << 24))
-
-#ifdef BIG__ENDIAN
-#define SWAP_TO_LE(word) SWAP_ENDIAN(word)
-#define SWAP_TO_BE(word) word
-#else
-#define SWAP_TO_LE(word) word
-#define SWAP_TO_BE(word) SWAP_ENDIAN(word)
-#endif
-
-
-
-/* Is val a multiple of "mult" ("mult" must be power of 2) */
-#define IS_MULT(val, mult)              \
-	(((val) & ((mult) - 1)) == 0)
-
-#define IS_NULL_ADDR(adr)		\
-	(!(adr))
-
-#endif /*_CC_BITOPS_H_*/
diff --git a/drivers/staging/ccree/cc_crypto_ctx.h b/drivers/staging/ccree/cc_crypto_ctx.h
index 9e10b26..591f6fd 100644
--- a/drivers/staging/ccree/cc_crypto_ctx.h
+++ b/drivers/staging/ccree/cc_crypto_ctx.h
@@ -1,35 +1,23 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-
 #ifndef _CC_CRYPTO_CTX_H_
 #define _CC_CRYPTO_CTX_H_
 
-#ifdef __KERNEL__
 #include <linux/types.h>
-#define INT32_MAX 0x7FFFFFFFL
-#else
-#include <stdint.h>
-#endif
-
-
-#ifndef max
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#define min(a, b) ((a) < (b) ? (a) : (b))
-#endif
 
 /* context size */
 #ifndef CC_CTX_SIZE_LOG2
@@ -39,7 +27,7 @@
 #define CC_CTX_SIZE_LOG2 7
 #endif
 #endif
-#define CC_CTX_SIZE (1<<CC_CTX_SIZE_LOG2)
+#define CC_CTX_SIZE BIT(CC_CTX_SIZE_LOG2)
 #define CC_DRV_CTX_SIZE_WORDS (CC_CTX_SIZE >> 2)
 
 #define CC_DRV_DES_IV_SIZE 8
@@ -65,13 +53,13 @@
 #define CC_AES_KEY_SIZE_MAX			CC_AES_256_BIT_KEY_SIZE
 #define CC_AES_KEY_SIZE_WORDS_MAX		(CC_AES_KEY_SIZE_MAX >> 2)
 
-#define CC_MD5_DIGEST_SIZE 	16
-#define CC_SHA1_DIGEST_SIZE 	20
-#define CC_SHA224_DIGEST_SIZE 	28
-#define CC_SHA256_DIGEST_SIZE 	32
+#define CC_MD5_DIGEST_SIZE	16
+#define CC_SHA1_DIGEST_SIZE	20
+#define CC_SHA224_DIGEST_SIZE	28
+#define CC_SHA256_DIGEST_SIZE	32
 #define CC_SHA256_DIGEST_SIZE_IN_WORDS 8
-#define CC_SHA384_DIGEST_SIZE 	48
-#define CC_SHA512_DIGEST_SIZE 	64
+#define CC_SHA384_DIGEST_SIZE	48
+#define CC_SHA512_DIGEST_SIZE	64
 
 #define CC_SHA1_BLOCK_SIZE 64
 #define CC_SHA1_BLOCK_SIZE_IN_WORDS 16
@@ -94,18 +82,17 @@
 
 #define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
 
-#define CC_MULTI2_SYSTEM_KEY_SIZE 		32
-#define CC_MULTI2_DATA_KEY_SIZE 		8
-#define CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE 	(CC_MULTI2_SYSTEM_KEY_SIZE + CC_MULTI2_DATA_KEY_SIZE)
+#define CC_MULTI2_SYSTEM_KEY_SIZE		32
+#define CC_MULTI2_DATA_KEY_SIZE		8
+#define CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE \
+		(CC_MULTI2_SYSTEM_KEY_SIZE + CC_MULTI2_DATA_KEY_SIZE)
 #define	CC_MULTI2_BLOCK_SIZE					8
 #define	CC_MULTI2_IV_SIZE					8
 #define	CC_MULTI2_MIN_NUM_ROUNDS				8
 #define	CC_MULTI2_MAX_NUM_ROUNDS				128
 
-
 #define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
 
-
 enum drv_engine_type {
 	DRV_ENGINE_NULL = 0,
 	DRV_ENGINE_AES = 1,
@@ -113,7 +100,7 @@ enum drv_engine_type {
 	DRV_ENGINE_HASH = 3,
 	DRV_ENGINE_RC4 = 4,
 	DRV_ENGINE_DOUT = 5,
-	DRV_ENGINE_RESERVE32B = INT32_MAX,
+	DRV_ENGINE_RESERVE32B = S32_MAX,
 };
 
 enum drv_crypto_alg {
@@ -126,7 +113,7 @@ enum drv_crypto_alg {
 	DRV_CRYPTO_ALG_AEAD = 5,
 	DRV_CRYPTO_ALG_BYPASS = 6,
 	DRV_CRYPTO_ALG_NUM = 7,
-	DRV_CRYPTO_ALG_RESERVE32B = INT32_MAX
+	DRV_CRYPTO_ALG_RESERVE32B = S32_MAX
 };
 
 enum drv_crypto_direction {
@@ -134,7 +121,7 @@ enum drv_crypto_direction {
 	DRV_CRYPTO_DIRECTION_ENCRYPT = 0,
 	DRV_CRYPTO_DIRECTION_DECRYPT = 1,
 	DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
-	DRV_CRYPTO_DIRECTION_RESERVE32B = INT32_MAX
+	DRV_CRYPTO_DIRECTION_RESERVE32B = S32_MAX
 };
 
 enum drv_cipher_mode {
@@ -152,7 +139,7 @@ enum drv_cipher_mode {
 	DRV_CIPHER_GCTR = 12,
 	DRV_CIPHER_ESSIV = 13,
 	DRV_CIPHER_BITLOCKER = 14,
-	DRV_CIPHER_RESERVE32B = INT32_MAX
+	DRV_CIPHER_RESERVE32B = S32_MAX
 };
 
 enum drv_hash_mode {
@@ -163,11 +150,11 @@ enum drv_hash_mode {
 	DRV_HASH_SHA512 = 3,
 	DRV_HASH_SHA384 = 4,
 	DRV_HASH_MD5 = 5,
-	DRV_HASH_CBC_MAC = 6, 
+	DRV_HASH_CBC_MAC = 6,
 	DRV_HASH_XCBC_MAC = 7,
 	DRV_HASH_CMAC = 8,
 	DRV_HASH_MODE_NUM = 9,
-	DRV_HASH_RESERVE32B = INT32_MAX
+	DRV_HASH_RESERVE32B = S32_MAX
 };
 
 enum drv_hash_hw_mode {
@@ -178,7 +165,7 @@ enum drv_hash_hw_mode {
 	DRV_HASH_HW_SHA512 = 4,
 	DRV_HASH_HW_SHA384 = 12,
 	DRV_HASH_HW_GHASH = 6,
-	DRV_HASH_HW_RESERVE32B = INT32_MAX
+	DRV_HASH_HW_RESERVE32B = S32_MAX
 };
 
 enum drv_multi2_mode {
@@ -186,10 +173,9 @@ enum drv_multi2_mode {
 	DRV_MULTI2_ECB = 0,
 	DRV_MULTI2_CBC = 1,
 	DRV_MULTI2_OFB = 2,
-	DRV_MULTI2_RESERVE32B = INT32_MAX
+	DRV_MULTI2_RESERVE32B = S32_MAX
 };
 
-
 /* drv_crypto_key_type[1:0] is mapped to cipher_do[1:0] */
 /* drv_crypto_key_type[2] is mapped to cipher_config2 */
 enum drv_crypto_key_type {
@@ -201,99 +187,14 @@ enum drv_crypto_key_type {
 	DRV_APPLET_KEY = 4,		/* NA */
 	DRV_PLATFORM_KEY = 5,		/* 0x101 */
 	DRV_CUSTOMER_KEY = 6,		/* 0x110 */
-	DRV_END_OF_KEYS = INT32_MAX,
+	DRV_END_OF_KEYS = S32_MAX,
 };
 
 enum drv_crypto_padding_type {
 	DRV_PADDING_NONE = 0,
 	DRV_PADDING_PKCS7 = 1,
-	DRV_PADDING_RESERVE32B = INT32_MAX
+	DRV_PADDING_RESERVE32B = S32_MAX
 };
 
-/*******************************************************************/
-/***************** DESCRIPTOR BASED CONTEXTS ***********************/
-/*******************************************************************/
-
- /* Generic context ("super-class") */
-struct drv_ctx_generic {
-	enum drv_crypto_alg alg;
-} __attribute__((__may_alias__));
-
-
-struct drv_ctx_hash {
-	enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_HASH */
-	enum drv_hash_mode mode;
-	uint8_t digest[CC_DIGEST_SIZE_MAX];
-	/* reserve to end of allocated context size */
-	uint8_t reserved[CC_CTX_SIZE - 2 * sizeof(uint32_t) -
-			CC_DIGEST_SIZE_MAX];
-};
-
-/* !!!! drv_ctx_hmac should have the same structure as drv_ctx_hash except
-   k0, k0_size fields */
-struct drv_ctx_hmac {
-	enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_HMAC */
-	enum drv_hash_mode mode;
-	uint8_t digest[CC_DIGEST_SIZE_MAX];
-	uint32_t k0[CC_HMAC_BLOCK_SIZE_MAX/sizeof(uint32_t)];
-	uint32_t k0_size;
-	/* reserve to end of allocated context size */
-	uint8_t reserved[CC_CTX_SIZE - 3 * sizeof(uint32_t) -
-			CC_DIGEST_SIZE_MAX - CC_HMAC_BLOCK_SIZE_MAX];
-};
-
-struct drv_ctx_cipher {
-	enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_AES */
-	enum drv_cipher_mode mode;
-	enum drv_crypto_direction direction;
-	enum drv_crypto_key_type crypto_key_type;
-	enum drv_crypto_padding_type padding_type;
-	uint32_t key_size; /* numeric value in bytes   */
-	uint32_t data_unit_size; /* required for XTS */
-	/* block_state is the AES engine block state.
-	*  It is used by the host to pass IV or counter at initialization.
-	*  It is used by SeP for intermediate block chaining state and for
-	*  returning MAC algorithms results.           */
-	uint8_t block_state[CC_AES_BLOCK_SIZE];
-	uint8_t key[CC_AES_KEY_SIZE_MAX];
-	uint8_t xex_key[CC_AES_KEY_SIZE_MAX];
-	/* reserve to end of allocated context size */
-	uint32_t reserved[CC_DRV_CTX_SIZE_WORDS - 7 -
-		CC_AES_BLOCK_SIZE/sizeof(uint32_t) - 2 *
-		(CC_AES_KEY_SIZE_MAX/sizeof(uint32_t))];
-};
-
-/* authentication and encryption with associated data class */
-struct drv_ctx_aead {
-	enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_AES */
-	enum drv_cipher_mode mode;
-	enum drv_crypto_direction direction;
-	uint32_t key_size; /* numeric value in bytes   */
-	uint32_t nonce_size; /* nonce size (octets) */
-	uint32_t header_size; /* finit additional data size (octets) */
-	uint32_t text_size; /* finit text data size (octets) */
-	uint32_t tag_size; /* mac size, element of {4, 6, 8, 10, 12, 14, 16} */
-	/* block_state1/2 is the AES engine block state */
-	uint8_t block_state[CC_AES_BLOCK_SIZE];
-	uint8_t mac_state[CC_AES_BLOCK_SIZE]; /* MAC result */
-	uint8_t nonce[CC_AES_BLOCK_SIZE]; /* nonce buffer */
-	uint8_t key[CC_AES_KEY_SIZE_MAX];
-	/* reserve to end of allocated context size */
-	uint32_t reserved[CC_DRV_CTX_SIZE_WORDS - 8 -
-		3 * (CC_AES_BLOCK_SIZE/sizeof(uint32_t)) -
-		CC_AES_KEY_SIZE_MAX/sizeof(uint32_t)];
-};
-
-/*******************************************************************/
-/***************** MESSAGE BASED CONTEXTS **************************/
-/*******************************************************************/
-
-
-/* Get the address of a @member within a given @ctx address
-   @ctx: The context address
-   @type: Type of context structure
-   @member: Associated context field */
-#define GET_CTX_FIELD_ADDR(ctx, type, member) (ctx + offsetof(type, member))
-
 #endif /* _CC_CRYPTO_CTX_H_ */
 
diff --git a/drivers/staging/ccree/cc_hal.h b/drivers/staging/ccree/cc_hal.h
index 75a0ce3..eecc866 100644
--- a/drivers/staging/ccree/cc_hal.h
+++ b/drivers/staging/ccree/cc_hal.h
@@ -1,20 +1,22 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-/* pseudo cc_hal.h for cc7x_perf_test_driver (to be able to include code from CC drivers) */
+/* pseudo cc_hal.h for cc7x_perf_test_driver (to be able to include code from
+ * CC drivers).
+ */
 
 #ifndef __CC_HAL_H__
 #define __CC_HAL_H__
@@ -24,7 +26,8 @@
 #define READ_REGISTER(_addr) ioread32((_addr))
 #define WRITE_REGISTER(_addr, _data)  iowrite32((_data), (_addr))
 
-#define CC_HAL_WRITE_REGISTER(offset, val) WRITE_REGISTER(cc_base + offset, val)
-#define CC_HAL_READ_REGISTER(offset) READ_REGISTER(cc_base + offset)
+#define CC_HAL_WRITE_REGISTER(offset, val) \
+	WRITE_REGISTER(cc_base + (offset), val)
+#define CC_HAL_READ_REGISTER(offset) READ_REGISTER(cc_base + (offset))
 
 #endif
diff --git a/drivers/staging/ccree/cc_hw_queue_defs.h b/drivers/staging/ccree/cc_hw_queue_defs.h
index fbaf1b6..e6b8cea 100644
--- a/drivers/staging/ccree/cc_hw_queue_defs.h
+++ b/drivers/staging/ccree/cc_hw_queue_defs.h
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -17,63 +17,95 @@
 #ifndef __CC_HW_QUEUE_DEFS_H__
 #define __CC_HW_QUEUE_DEFS_H__
 
-#include "cc_pal_log.h"
-#include "cc_regs.h"
-#include "dx_crys_kernel.h"
-
-#ifdef __KERNEL__
 #include <linux/types.h>
-#define UINT32_MAX 0xFFFFFFFFL
-#define INT32_MAX  0x7FFFFFFFL
-#define UINT16_MAX 0xFFFFL
-#else
-#include <stdint.h>
-#endif
+
+#include "dx_crys_kernel.h"
+#include <linux/bitfield.h>
 
 /******************************************************************************
-*                        	DEFINITIONS
-******************************************************************************/
-
-
-/* Dma AXI Secure bit */
-#define	AXI_SECURE	0
-#define AXI_NOT_SECURE	1
+ *				DEFINITIONS
+ ******************************************************************************/
 
 #define HW_DESC_SIZE_WORDS		6
 #define HW_QUEUE_SLOTS_MAX              15 /* Max. available slots in HW queue */
 
-#define _HW_DESC_MONITOR_KICK 0x7FFFC00
+#define CC_REG_NAME(word, name) DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name
+
+#define CC_REG_LOW(word, name)  \
+	(DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
+
+#define CC_REG_HIGH(word, name) \
+	(CC_REG_LOW(word, name) + \
+	 DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
+
+#define CC_GENMASK(word, name) \
+	GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
+
+#define WORD0_VALUE		CC_GENMASK(0, VALUE)
+#define WORD1_DIN_CONST_VALUE	CC_GENMASK(1, DIN_CONST_VALUE)
+#define WORD1_DIN_DMA_MODE	CC_GENMASK(1, DIN_DMA_MODE)
+#define WORD1_DIN_SIZE		CC_GENMASK(1, DIN_SIZE)
+#define WORD1_NOT_LAST		CC_GENMASK(1, NOT_LAST)
+#define WORD1_NS_BIT		CC_GENMASK(1, NS_BIT)
+#define WORD2_VALUE		CC_GENMASK(2, VALUE)
+#define WORD3_DOUT_DMA_MODE	CC_GENMASK(3, DOUT_DMA_MODE)
+#define WORD3_DOUT_LAST_IND	CC_GENMASK(3, DOUT_LAST_IND)
+#define WORD3_DOUT_SIZE		CC_GENMASK(3, DOUT_SIZE)
+#define WORD3_HASH_XOR_BIT	CC_GENMASK(3, HASH_XOR_BIT)
+#define WORD3_NS_BIT		CC_GENMASK(3, NS_BIT)
+#define WORD3_QUEUE_LAST_IND	CC_GENMASK(3, QUEUE_LAST_IND)
+#define WORD4_ACK_NEEDED	CC_GENMASK(4, ACK_NEEDED)
+#define WORD4_AES_SEL_N_HASH	CC_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_BYTES_SWAP	CC_GENMASK(4, BYTES_SWAP)
+#define WORD4_CIPHER_CONF0	CC_GENMASK(4, CIPHER_CONF0)
+#define WORD4_CIPHER_CONF1	CC_GENMASK(4, CIPHER_CONF1)
+#define WORD4_CIPHER_CONF2	CC_GENMASK(4, CIPHER_CONF2)
+#define WORD4_CIPHER_DO		CC_GENMASK(4, CIPHER_DO)
+#define WORD4_CIPHER_MODE	CC_GENMASK(4, CIPHER_MODE)
+#define WORD4_CMAC_SIZE0	CC_GENMASK(4, CMAC_SIZE0)
+#define WORD4_DATA_FLOW_MODE	CC_GENMASK(4, DATA_FLOW_MODE)
+#define WORD4_KEY_SIZE		CC_GENMASK(4, KEY_SIZE)
+#define WORD4_SETUP_OPERATION	CC_GENMASK(4, SETUP_OPERATION)
+#define WORD5_DIN_ADDR_HIGH	CC_GENMASK(5, DIN_ADDR_HIGH)
+#define WORD5_DOUT_ADDR_HIGH	CC_GENMASK(5, DOUT_ADDR_HIGH)
 
 /******************************************************************************
-*				TYPE DEFINITIONS
-******************************************************************************/
+ *				TYPE DEFINITIONS
+ ******************************************************************************/
 
-typedef struct HwDesc {
-	uint32_t word[HW_DESC_SIZE_WORDS];
-} HwDesc_s;
+struct cc_hw_desc {
+	union {
+		u32 word[HW_DESC_SIZE_WORDS];
+		u16 hword[HW_DESC_SIZE_WORDS * 2];
+	};
+};
 
-typedef enum DescDirection {
+enum cc_axi_sec {
+	AXI_SECURE = 0,
+	AXI_NOT_SECURE = 1
+};
+
+enum cc_desc_direction {
 	DESC_DIRECTION_ILLEGAL = -1,
 	DESC_DIRECTION_ENCRYPT_ENCRYPT = 0,
 	DESC_DIRECTION_DECRYPT_DECRYPT = 1,
 	DESC_DIRECTION_DECRYPT_ENCRYPT = 3,
-	DESC_DIRECTION_END = INT32_MAX,
-}DescDirection_t;
+	DESC_DIRECTION_END = S32_MAX,
+};
 
-typedef enum DmaMode {
+enum cc_dma_mode {
 	DMA_MODE_NULL		= -1,
-	NO_DMA 			= 0,
+	NO_DMA			= 0,
 	DMA_SRAM		= 1,
 	DMA_DLLI		= 2,
 	DMA_MLLI		= 3,
-	DmaMode_OPTIONTS,
-	DmaMode_END 		= INT32_MAX,
-}DmaMode_t;
+	DMA_MODE_END		= S32_MAX,
+};
 
-typedef enum FlowMode {
+enum cc_flow_mode {
 	FLOW_MODE_NULL		= -1,
 	/* data flows */
- 	BYPASS			= 0,
+	BYPASS			= 0,
 	DIN_AES_DOUT		= 1,
 	AES_to_HASH		= 2,
 	AES_and_HASH		= 3,
@@ -93,11 +125,11 @@ typedef enum FlowMode {
 	DIN_AES_AESMAC		= 17,
 	HASH_to_DOUT		= 18,
 	/* setup flows */
- 	S_DIN_to_AES 		= 32,
+	S_DIN_to_AES		= 32,
 	S_DIN_to_AES2		= 33,
 	S_DIN_to_DES		= 34,
 	S_DIN_to_RC4		= 35,
- 	S_DIN_to_MULTI2		= 36,
+	S_DIN_to_MULTI2		= 36,
 	S_DIN_to_HASH		= 37,
 	S_AES_to_DOUT		= 38,
 	S_AES2_to_DOUT		= 39,
@@ -105,47 +137,43 @@ typedef enum FlowMode {
 	S_DES_to_DOUT		= 42,
 	S_HASH_to_DOUT		= 43,
 	SET_FLOW_ID		= 44,
-	FlowMode_OPTIONTS,
-	FlowMode_END = INT32_MAX,
-}FlowMode_t;
+	FLOW_MODE_END = S32_MAX,
+};
 
-typedef enum TunnelOp {
+enum cc_tunnel_op {
 	TUNNEL_OP_INVALID = -1,
 	TUNNEL_OFF = 0,
 	TUNNEL_ON = 1,
-	TunnelOp_OPTIONS,
-	TunnelOp_END = INT32_MAX,
-} TunnelOp_t;
+	TUNNEL_OP_END = S32_MAX,
+};
 
-typedef enum SetupOp {
+enum cc_setup_op {
 	SETUP_LOAD_NOP		= 0,
 	SETUP_LOAD_STATE0	= 1,
 	SETUP_LOAD_STATE1	= 2,
 	SETUP_LOAD_STATE2	= 3,
 	SETUP_LOAD_KEY0		= 4,
 	SETUP_LOAD_XEX_KEY	= 5,
-	SETUP_WRITE_STATE0	= 8, 
+	SETUP_WRITE_STATE0	= 8,
 	SETUP_WRITE_STATE1	= 9,
 	SETUP_WRITE_STATE2	= 10,
 	SETUP_WRITE_STATE3	= 11,
-	setupOp_OPTIONTS,
-	setupOp_END = INT32_MAX,	
-}SetupOp_t;
+	SETUP_OP_END = S32_MAX,
+};
 
-enum AesMacSelector {
+enum cc_aes_mac_selector {
 	AES_SK = 1,
 	AES_CMAC_INIT = 2,
 	AES_CMAC_SIZE0 = 3,
-	AesMacEnd = INT32_MAX,
+	AES_MAC_END = S32_MAX,
 };
 
-#define HW_KEY_MASK_CIPHER_DO 	  0x3
+#define HW_KEY_MASK_CIPHER_DO	  0x3
 #define HW_KEY_SHIFT_CIPHER_CFG2  2
 
-
 /* HwCryptoKey[1:0] is mapped to cipher_do[1:0] */
 /* HwCryptoKey[2:3] is mapped to cipher_config2[1:0] */
-typedef enum HwCryptoKey {
+enum cc_hw_crypto_key {
 	USER_KEY = 0,			/* 0x0000 */
 	ROOT_KEY = 1,			/* 0x0001 */
 	PROVISIONING_KEY = 2,		/* 0x0010 */ /* ==KCP */
@@ -157,447 +185,409 @@ typedef enum HwCryptoKey {
 	KFDE1_KEY = 9,			/* 0x1001 */
 	KFDE2_KEY = 10,			/* 0x1010 */
 	KFDE3_KEY = 11,			/* 0x1011 */
-	END_OF_KEYS = INT32_MAX,
-}HwCryptoKey_t;
+	END_OF_KEYS = S32_MAX,
+};
 
-typedef enum HwAesKeySize {
+enum cc_hw_aes_key_size {
 	AES_128_KEY = 0,
 	AES_192_KEY = 1,
 	AES_256_KEY = 2,
-	END_OF_AES_KEYS = INT32_MAX,
-}HwAesKeySize_t;
+	END_OF_AES_KEYS = S32_MAX,
+};
 
-typedef enum HwDesKeySize {
+enum cc_hw_des_key_size {
 	DES_ONE_KEY = 0,
 	DES_TWO_KEYS = 1,
 	DES_THREE_KEYS = 2,
-	END_OF_DES_KEYS = INT32_MAX,
-}HwDesKeySize_t;
+	END_OF_DES_KEYS = S32_MAX,
+};
 
 /*****************************/
 /* Descriptor packing macros */
 /*****************************/
 
-#define GET_HW_Q_DESC_WORD_IDX(descWordIdx) (CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD ## descWordIdx) )
+/*
+ * Init a HW descriptor struct
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void hw_desc_init(struct cc_hw_desc *pdesc)
+{
+	memset(pdesc, 0, sizeof(struct cc_hw_desc));
+}
 
-#define HW_DESC_INIT(pDesc)  do { \
-	(pDesc)->word[0] = 0;     \
-	(pDesc)->word[1] = 0;     \
-	(pDesc)->word[2] = 0;     \
-	(pDesc)->word[3] = 0;     \
-	(pDesc)->word[4] = 0;     \
-	(pDesc)->word[5] = 0;     \
-} while (0)
+/*
+ * Indicates the end of current HW descriptors flow and release the HW engines.
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_queue_last_ind(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
+}
 
-/* HW descriptor debug functions */
-int createDetailedDump(HwDesc_s *pDesc);
-void descriptor_log(HwDesc_s *desc);
-
-#if defined(HW_DESCRIPTOR_LOG) || defined(HW_DESC_DUMP_HOST_BUF)
-#define LOG_HW_DESC(pDesc) descriptor_log(pDesc)
-#else
-#define LOG_HW_DESC(pDesc) 
+/*
+ * Set the DIN field of a HW descriptors
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: dinAdr DIN address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_din_type(struct cc_hw_desc *pdesc,
+				enum cc_dma_mode dma_mode, dma_addr_t addr,
+				u32 size, enum cc_axi_sec axi_sec)
+{
+	pdesc->word[0] = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32)));
 #endif
+	pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
+				FIELD_PREP(WORD1_DIN_SIZE, size) |
+				FIELD_PREP(WORD1_NS_BIT, axi_sec);
+}
 
-#if (CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE) || defined(OEMFW_LOG)
+/*
+ * Set the DIN field of a HW descriptors to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DIN address
+ * @size: Data size in bytes
+ */
+static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+	pdesc->word[0] = addr;
+	pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
+}
 
-#ifdef UART_PRINTF
-#define CREATE_DETAILED_DUMP(pDesc) createDetailedDump(pDesc)
-#else
-#define CREATE_DETAILED_DUMP(pDesc) 
-#endif 
+/*
+ * Set the DIN field of a HW descriptors to SRAM mode.
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DIN address
+ * @size Data size in bytes
+ */
+static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr,
+				u32 size)
+{
+	pdesc->word[0] = (u32)addr;
+	pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
+				FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
+}
 
-#define HW_DESC_DUMP(pDesc) do {            			\
-	CC_PAL_LOG_TRACE("\n---------------------------------------------------\n");	\
-	CREATE_DETAILED_DUMP(pDesc); 				\
-	CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[0]);  	\
-	CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[1]);  	\
-	CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[2]);  	\
-	CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[3]);  	\
-	CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[4]);  	\
-	CC_PAL_LOG_TRACE("0x%08X\n", (unsigned int)(pDesc)->word[5]);  	\
-	CC_PAL_LOG_TRACE("---------------------------------------------------\n\n");    \
-} while (0)
+/*
+ * Set the DIN field of a HW descriptors to CONST mode
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @val: DIN const value
+ * @size: Data size in bytes
+ */
+static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
+{
+	pdesc->word[0] = val;
+	pdesc->word[1] |= FIELD_PREP(WORD1_DIN_CONST_VALUE, 1) |
+			FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM) |
+			FIELD_PREP(WORD1_DIN_SIZE, size);
+}
 
-#else
-#define HW_DESC_DUMP(pDesc) do {} while (0)
+/*
+ * Set the DIN not last input data indicator
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_type(struct cc_hw_desc *pdesc,
+				 enum cc_dma_mode dma_mode, dma_addr_t addr,
+				 u32 size, enum cc_axi_sec axi_sec)
+{
+	pdesc->word[2] = (u32)addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32)));
 #endif
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
+				FIELD_PREP(WORD3_DOUT_SIZE, size) |
+				FIELD_PREP(WORD3_NS_BIT, axi_sec);
+}
 
-
-/*!
- * This macro indicates the end of current HW descriptors flow and release the HW engines.
- * 
- * \param pDesc pointer HW descriptor struct
- */
-#define HW_DESC_SET_QUEUE_LAST_IND(pDesc) 								\
-	do {												\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, QUEUE_LAST_IND, (pDesc)->word[3], 1);	\
-	} while (0)
-
-/*!
- * This macro signs the end of HW descriptors flow by asking for completion ack, and release the HW engines
- * 
- * \param pDesc pointer HW descriptor struct 
- */
-#define HW_DESC_SET_ACK_LAST(pDesc) 									\
-	do {												\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, QUEUE_LAST_IND, (pDesc)->word[3], 1);	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, ACK_NEEDED, (pDesc)->word[4], 1);	\
-	} while (0)
-
-
-#define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&UINT16_MAX)
-
-/*!
- * This macro sets the DIN field of a HW descriptors
- * 
- * \param pDesc pointer HW descriptor struct 
- * \param dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
- * \param dinAdr DIN address
- * \param dinSize Data size in bytes 
- * \param axiNs AXI secure bit
- */
-#define HW_DESC_SET_DIN_TYPE(pDesc, dmaMode, dinAdr, dinSize, axiNs)								\
-	do {		                                                                                        		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (dinAdr)&UINT32_MAX );			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DIN_ADDR_HIGH, (pDesc)->word[5], MSB64(dinAdr) );		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], (dmaMode));			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize));				\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, NS_BIT, (pDesc)->word[1], (axiNs));				\
-	} while (0)
-
-
-/*!
- * This macro sets the DIN field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and 
- * other special modes 
- * 
- * \param pDesc pointer HW descriptor struct
- * \param dinAdr DIN address
- * \param dinSize Data size in bytes 
- */
-#define HW_DESC_SET_DIN_NO_DMA(pDesc, dinAdr, dinSize)									\
-	do {		                                                                                        	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(dinAdr));		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize));			\
-	} while (0)
-
-/*!
- * This macro sets the DIN field of a HW descriptors to SRAM mode. 
- * Note: No need to check SRAM alignment since host requests do not use SRAM and 
- * adaptor will enforce alignment check. 
- * 
- * \param pDesc pointer HW descriptor struct
- * \param dinAdr DIN address
- * \param dinSize Data size in bytes 
- */
-#define HW_DESC_SET_DIN_SRAM(pDesc, dinAdr, dinSize)									\
-	do {		                                                                                        	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(dinAdr));		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], DMA_SRAM);		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize));			\
-	} while (0)
-
-/*! This macro sets the DIN field of a HW descriptors to CONST mode 
- * 
- * \param pDesc pointer HW descriptor struct
- * \param val DIN const value
- * \param dinSize Data size in bytes 
- */
-#define HW_DESC_SET_DIN_CONST(pDesc, val, dinSize)									\
-	do {		                                                                                        	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(val));		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_CONST_VALUE, (pDesc)->word[1], 1);			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], DMA_SRAM);		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize));			\
-	} while (0)
-
-/*!
- * This macro sets the DIN not last input data indicator
- * 
- * \param pDesc pointer HW descriptor struct
- */
-#define HW_DESC_SET_DIN_NOT_LAST_INDICATION(pDesc)									\
-	do {		                                                                                        	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, NOT_LAST, (pDesc)->word[1], 1);				\
-	} while (0)
-
-/*!
- * This macro sets the DOUT field of a HW descriptors 
- * 
- * \param pDesc pointer HW descriptor struct 
- * \param dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
- * \param doutAdr DOUT address
- * \param doutSize Data size in bytes 
- * \param axiNs AXI secure bit
- */
-#define HW_DESC_SET_DOUT_TYPE(pDesc, dmaMode, doutAdr, doutSize, axiNs)							\
-	do {		                                                                                        	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX );		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) );	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], (dmaMode));		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, NS_BIT, (pDesc)->word[3], (axiNs));			\
-	} while (0)
-
-/*!
- * This macro sets the DOUT field of a HW descriptors to DLLI type 
- * The LAST INDICATION is provided by the user 
- * 
- * \param pDesc pointer HW descriptor struct 
- * \param doutAdr DOUT address
- * \param doutSize Data size in bytes 
- * \param lastInd The last indication bit
- * \param axiNs AXI secure bit 
- */
-#define HW_DESC_SET_DOUT_DLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd)								\
-	do {		                                                                                        		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX );		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) );	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_DLLI);			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], lastInd);			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, NS_BIT, (pDesc)->word[3], (axiNs));				\
-	} while (0)
-
-/*!
- * This macro sets the DOUT field of a HW descriptors to DLLI type 
- * The LAST INDICATION is provided by the user 
- * 
- * \param pDesc pointer HW descriptor struct 
- * \param doutAdr DOUT address
- * \param doutSize Data size in bytes 
- * \param lastInd The last indication bit
- * \param axiNs AXI secure bit 
- */
-#define HW_DESC_SET_DOUT_MLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd)								\
-	do {		                                                                                        		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX );		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) );	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_MLLI);			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], lastInd);			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, NS_BIT, (pDesc)->word[3], (axiNs));				\
-	} while (0)
-
-/*!
- * This macro sets the DOUT field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and 
- * other special modes 
- * 
- * \param pDesc pointer HW descriptor struct
- * \param doutAdr DOUT address
- * \param doutSize Data size in bytes  
- * \param registerWriteEnable Enables a write operation to a register
- */
-#define HW_DESC_SET_DOUT_NO_DMA(pDesc, doutAdr, doutSize, registerWriteEnable)							\
-	do {		                                                                                        		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(doutAdr));			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));			\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], (registerWriteEnable));	\
-	} while (0)
-
-/*!
- * This macro sets the word for the XOR operation. 
- * 
- * \param pDesc pointer HW descriptor struct
- * \param xorVal xor data value
- */
-#define HW_DESC_SET_XOR_VAL(pDesc, xorVal)										\
-	do {		                                                                                        	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(xorVal));		\
-	} while (0)
-
-/*!
- * This macro sets the XOR indicator bit in the descriptor
- * 
- * \param pDesc pointer HW descriptor struct
- */
-#define HW_DESC_SET_XOR_ACTIVE(pDesc)											\
-	do {		                                                                                        	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, HASH_XOR_BIT, (pDesc)->word[3], 1);			\
-	} while (0)
-
-/*!
- * This macro selects the AES engine instead of HASH engine when setting up combined mode with AES XCBC MAC
- * 
- * \param pDesc pointer HW descriptor struct
- */
-#define HW_DESC_SET_AES_NOT_HASH_MODE(pDesc)										\
-	do {		                                                                                       	 	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, AES_SEL_N_HASH, (pDesc)->word[4], 1);			\
-	} while (0)
-
-/*!
- * This macro sets the DOUT field of a HW descriptors to SRAM mode
- * Note: No need to check SRAM alignment since host requests do not use SRAM and 
- * adaptor will enforce alignment check. 
- * 
- * \param pDesc pointer HW descriptor struct
- * \param doutAdr DOUT address
- * \param doutSize Data size in bytes 
- */
-#define HW_DESC_SET_DOUT_SRAM(pDesc, doutAdr, doutSize)									\
-	do {		                                                                                        	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(doutAdr));		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_SRAM);		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize));		\
-	} while (0)
-
-
-/*!
- * This macro sets the data unit size for XEX mode in data_out_addr[15:0]
- * 
- * \param pDesc pointer HW descriptor struct
- * \param dataUnitSize data unit size for XEX mode
- */
-#define HW_DESC_SET_XEX_DATA_UNIT_SIZE(pDesc, dataUnitSize)								\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(dataUnitSize));	\
-	} while (0)
-
-/*!
- * This macro sets the number of rounds for Multi2 in data_out_addr[15:0]
+/*
+ * Set the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
  *
- * \param pDesc pointer HW descriptor struct
- * \param numRounds number of rounds for Multi2
-*/
-#define HW_DESC_SET_MULTI2_NUM_ROUNDS(pDesc, numRounds)									\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(numRounds));	\
-	} while (0)
-
-/*!
- * This macro sets the flow mode.
- *
- * \param pDesc pointer HW descriptor struct
- * \param flowMode Any one of the modes defined in [CC7x-DESC]
-*/
-
-#define HW_DESC_SET_FLOW_MODE(pDesc, flowMode)										\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, DATA_FLOW_MODE, (pDesc)->word[4], (flowMode));		\
-	} while (0)
-
-/*!
- * This macro sets the cipher mode.
- *
- * \param pDesc pointer HW descriptor struct
- * \param cipherMode Any one of the modes defined in [CC7x-DESC]
-*/
-#define HW_DESC_SET_CIPHER_MODE(pDesc, cipherMode)									\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_MODE, (pDesc)->word[4], (cipherMode));		\
-	} while (0)
-
-/*!
- * This macro sets the cipher configuration fields.
- *
- * \param pDesc pointer HW descriptor struct
- * \param cipherConfig Any one of the modes defined in [CC7x-DESC]
-*/
-#define HW_DESC_SET_CIPHER_CONFIG0(pDesc, cipherConfig)									\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_CONF0, (pDesc)->word[4], (cipherConfig));	\
-	} while (0)
-
-/*!
- * This macro sets the cipher configuration fields.
- *
- * \param pDesc pointer HW descriptor struct
- * \param cipherConfig Any one of the modes defined in [CC7x-DESC]
-*/
-#define HW_DESC_SET_CIPHER_CONFIG1(pDesc, cipherConfig)									\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_CONF1, (pDesc)->word[4], (cipherConfig));	\
-	} while (0)
-
-/*!
- * This macro sets HW key configuration fields.
- *
- * \param pDesc pointer HW descriptor struct
- * \param hwKey The hw key number as in enun HwCryptoKey
-*/
-#define HW_DESC_SET_HW_CRYPTO_KEY(pDesc, hwKey)										\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_DO, (pDesc)->word[4], (hwKey)&HW_KEY_MASK_CIPHER_DO);		\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_CONF2, (pDesc)->word[4], (hwKey>>HW_KEY_SHIFT_CIPHER_CFG2));	\
-	} while (0)
-
-/*!
- * This macro changes the bytes order of all setup-finalize descriptosets.
- *
- * \param pDesc pointer HW descriptor struct
- * \param swapConfig Any one of the modes defined in [CC7x-DESC]
-*/
-#define HW_DESC_SET_BYTES_SWAP(pDesc, swapConfig)									\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, BYTES_SWAP, (pDesc)->word[4], (swapConfig));		\
-	} while (0)
-
-/*!
- * This macro sets the CMAC_SIZE0 mode.
- *
- * \param pDesc pointer HW descriptor struct
-*/
-#define HW_DESC_SET_CMAC_SIZE0_MODE(pDesc)										\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CMAC_SIZE0, (pDesc)->word[4], 0x1);			\
-	} while (0)
-
-/*!
- * This macro sets the key size for AES engine.
- *
- * \param pDesc pointer HW descriptor struct
- * \param keySize key size in bytes (NOT size code)
-*/
-#define HW_DESC_SET_KEY_SIZE_AES(pDesc, keySize)									\
-	do {													        \
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, KEY_SIZE, (pDesc)->word[4], ((keySize) >> 3) - 2);	\
-	} while (0)
-
-/*!
- * This macro sets the key size for DES engine.
- *
- * \param pDesc pointer HW descriptor struct
- * \param keySize key size in bytes (NOT size code)
-*/
-#define HW_DESC_SET_KEY_SIZE_DES(pDesc, keySize)									\
-	do {													        \
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, KEY_SIZE, (pDesc)->word[4], ((keySize) >> 3) - 1);	\
-	} while (0)
-
-/*!
- * This macro sets the descriptor's setup mode
- *
- * \param pDesc pointer HW descriptor struct
- * \param setupMode Any one of the setup modes defined in [CC7x-DESC]
-*/
-#define HW_DESC_SET_SETUP_MODE(pDesc, setupMode)									\
-	do {														\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, SETUP_OPERATION, (pDesc)->word[4], (setupMode));	\
-	} while (0)
-
-/*!
- * This macro sets the descriptor's cipher do
- *
- * \param pDesc pointer HW descriptor struct
- * \param cipherDo Any one of the cipher do defined in [CC7x-DESC]
-*/
-#define HW_DESC_SET_CIPHER_DO(pDesc, cipherDo)											\
-	do {															\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_DO, (pDesc)->word[4], (cipherDo)&HW_KEY_MASK_CIPHER_DO);	\
-	} while (0)
-
-/*!
- * This macro sets the DIN field of a HW descriptors to star/stop monitor descriptor. 
- * Used for performance measurements and debug purposes.
- * 
- * \param pDesc pointer HW descriptor struct
+ * @pdesc pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @last_ind: The last indication bit
+ * @axi_sec: AXI secure bit
  */
-#define HW_DESC_SET_DIN_MONITOR_CNTR(pDesc)										\
-	do {		                                                                                        	\
-		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR, VALUE, (pDesc)->word[1], _HW_DESC_MONITOR_KICK);	\
-	} while (0)
+static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
+				 u32 size, enum cc_axi_sec axi_sec,
+				 u32 last_ind)
+{
+	set_dout_type(pdesc, DMA_DLLI, addr, size, axi_sec);
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
 
+/*
+ * Set the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @last_ind: The last indication bit
+ * @axi_sec: AXI secure bit
+ */
+static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
+				 u32 size, enum cc_axi_sec axi_sec,
+				 bool last_ind)
+{
+	set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
+}
 
+/*
+ * Set the DOUT field of a HW descriptors to NO DMA mode.
+ * Used for NOP descriptor, register patches and other special modes.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ * @write_enable: Enables a write operation to a register
+ */
+static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
+				   u32 size, bool write_enable)
+{
+	pdesc->word[2] = addr;
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_SIZE, size) |
+			FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
+}
+
+/*
+ * Set the word for the XOR operation.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @val: xor data value
+ */
+static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
+{
+	pdesc->word[2] = val;
+}
+
+/*
+ * Sets the XOR indicator bit in the descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_xor_active(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
+}
+
+/*
+ * Select the AES engine instead of HASH engine when setting up combined mode
+ * with AES XCBC MAC
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
+}
+
+/*
+ * Set the DOUT field of a HW descriptors to SRAM mode
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @addr: DOUT address
+ * @size: Data size in bytes
+ */
+static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
+{
+	pdesc->word[2] = addr;
+	pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, DMA_SRAM) |
+			FIELD_PREP(WORD3_DOUT_SIZE, size);
+}
+
+/*
+ * Sets the data unit size for XEX mode in data_out_addr[15:0]
+ *
+ * @pdesc: pDesc pointer HW descriptor struct
+ * @size: data unit size for XEX mode
+ */
+static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
+{
+	pdesc->word[2] = size;
+}
+
+/*
+ * Set the number of rounds for Multi2 in data_out_addr[15:0]
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @num: number of rounds for Multi2
+ */
+static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
+{
+	pdesc->word[2] = num;
+}
+
+/*
+ * Set the flow mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_flow_mode(struct cc_hw_desc *pdesc,
+				 enum cc_flow_mode mode)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
+}
+
+/*
+ * Set the cipher mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode:  Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
+				   enum drv_cipher_mode mode)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
+}
+
+/*
+ * Set the cipher configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
+				      enum drv_crypto_direction mode)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
+}
+
+/*
+ * Set the cipher configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
+				      enum cc_hash_conf_pad config)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
+}
+
+/*
+ * Set HW key configuration fields.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
+ */
+static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
+				     enum cc_hw_crypto_key hw_key)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+				     (hw_key & HW_KEY_MASK_CIPHER_DO)) |
+			FIELD_PREP(WORD4_CIPHER_CONF2,
+				   (hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
+}
+
+/*
+ * Set byte order of all setup-finalize descriptors.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the modes defined in [CC7x-DESC]
+ */
+static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
+}
+
+/*
+ * Set CMAC_SIZE0 mode.
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
+}
+
+/*
+ * Set key size descriptor field.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
+}
+
+/*
+ * Set AES key size.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
+{
+	set_key_size(pdesc, ((size >> 3) - 2));
+}
+
+/*
+ * Set DES key size.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @size: key size in bytes (NOT size code)
+ */
+static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
+{
+	set_key_size(pdesc, ((size >> 3) - 1));
+}
+
+/*
+ * Set the descriptor setup mode
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @mode: Any one of the setup modes defined in [CC7x-DESC]
+ */
+static inline void set_setup_mode(struct cc_hw_desc *pdesc,
+				  enum cc_setup_op mode)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
+}
+
+/*
+ * Set the descriptor cipher DO
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @config: Any one of the cipher do defined in [CC7x-DESC]
+ */
+static inline void set_cipher_do(struct cc_hw_desc *pdesc,
+				 enum cc_hash_cipher_pad config)
+{
+	pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
+				(config & HW_KEY_MASK_CIPHER_DO));
+}
 
 #endif /*__CC_HW_QUEUE_DEFS_H__*/
diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h
index 697f1ed..851d390 100644
--- a/drivers/staging/ccree/cc_lli_defs.h
+++ b/drivers/staging/ccree/cc_lli_defs.h
@@ -1,46 +1,42 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-
 #ifndef _CC_LLI_DEFS_H_
 #define _CC_LLI_DEFS_H_
-#ifdef __KERNEL__
+
 #include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-#include "cc_bitops.h"
 
-/* Max DLLI size */
-#define DLLI_SIZE_BIT_SIZE	0x18	// DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
+/* Max DLLI size
+ *  AKA DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
+ */
+#define DLLI_SIZE_BIT_SIZE	0x18
 
-#define CC_MAX_MLLI_ENTRY_SIZE 0x10000
+#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
 
-#define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&UINT16_MAX)
-
-#define LLI_SET_ADDR(lli_p, addr) \
-		BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD0_OFFSET], LLI_LADDR_BIT_OFFSET, LLI_LADDR_BIT_SIZE, (addr & UINT32_MAX)); \
-		BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD1_OFFSET], LLI_HADDR_BIT_OFFSET, LLI_HADDR_BIT_SIZE, MSB64(addr));
-
-#define LLI_SET_SIZE(lli_p, size) \
-		BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD1_OFFSET], LLI_SIZE_BIT_OFFSET, LLI_SIZE_BIT_SIZE, size)
+#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
+#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
+#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
+		(2 * LLI_MAX_NUM_OF_DATA_ENTRIES + \
+		 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)
 
 /* Size of entry */
 #define LLI_ENTRY_WORD_SIZE 2
-#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(uint32_t))
+#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(u32))
 
 /* Word0[31:0] = ADDR[31:0] */
 #define LLI_WORD0_OFFSET 0
@@ -53,5 +49,24 @@
 #define LLI_HADDR_BIT_OFFSET 16
 #define LLI_HADDR_BIT_SIZE 16
 
+#define LLI_SIZE_MASK GENMASK((LLI_SIZE_BIT_SIZE - 1), LLI_SIZE_BIT_OFFSET)
+#define LLI_HADDR_MASK GENMASK( \
+			       (LLI_HADDR_BIT_OFFSET + LLI_HADDR_BIT_SIZE - 1),\
+				LLI_HADDR_BIT_OFFSET)
+
+static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
+{
+	lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
+	lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 16));
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+}
+
+static inline void cc_lli_set_size(u32 *lli_p, u16 size)
+{
+	lli_p[LLI_WORD1_OFFSET] &= ~LLI_SIZE_MASK;
+	lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_SIZE_MASK, size);
+}
 
 #endif /*_CC_LLI_DEFS_H_*/
diff --git a/drivers/staging/ccree/cc_pal_log.h b/drivers/staging/ccree/cc_pal_log.h
deleted file mode 100644
index e5f5a87..0000000
--- a/drivers/staging/ccree/cc_pal_log.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _CC_PAL_LOG_H_
-#define _CC_PAL_LOG_H_
-
-#include "cc_pal_types.h"
-#include "cc_pal_log_plat.h"
-
-/*!
-@file 
-@brief This file contains the PAL layer log definitions, by default the log is disabled. 
-@defgroup cc_pal_log CryptoCell PAL logging APIs and definitions
-@{
-@ingroup cc_pal
-*/
-
-/* PAL log levels (to be used in CC_PAL_logLevel) */
-/*! PAL log level - disabled. */
-#define CC_PAL_LOG_LEVEL_NULL      (-1) /*!< \internal Disable logging */
-/*! PAL log level - error. */
-#define CC_PAL_LOG_LEVEL_ERR       0
-/*! PAL log level - warning. */
-#define CC_PAL_LOG_LEVEL_WARN      1
-/*! PAL log level - info. */
-#define CC_PAL_LOG_LEVEL_INFO      2
-/*! PAL log level - debug. */
-#define CC_PAL_LOG_LEVEL_DEBUG     3
-/*! PAL log level - trace. */
-#define CC_PAL_LOG_LEVEL_TRACE     4
-/*! PAL log level - data. */
-#define CC_PAL_LOG_LEVEL_DATA      5
-
-#ifndef CC_PAL_LOG_CUR_COMPONENT
-/* Setting default component mask in case caller did not define */
-/* (a mask that is always on for every log mask value but full masking) */
-/*! Default log debugged component.*/
-#define CC_PAL_LOG_CUR_COMPONENT 0xFFFFFFFF
-#endif
-#ifndef CC_PAL_LOG_CUR_COMPONENT_NAME
-/*! Default log debugged component.*/
-#define CC_PAL_LOG_CUR_COMPONENT_NAME "CC"
-#endif
-
-/* Select compile time log level (default if not explicitly specified by caller) */
-#ifndef CC_PAL_MAX_LOG_LEVEL /* Can be overriden by external definition of this constant */
-#ifdef DEBUG
-/*! Default debug log level (when debug is set to on).*/
-#define CC_PAL_MAX_LOG_LEVEL  CC_PAL_LOG_LEVEL_ERR /*CC_PAL_LOG_LEVEL_DEBUG*/
-#else /* Disable logging */
-/*! Default debug log level (when debug is set to on).*/
-#define CC_PAL_MAX_LOG_LEVEL CC_PAL_LOG_LEVEL_NULL
-#endif
-#endif /*CC_PAL_MAX_LOG_LEVEL*/
-/*! Evaluate CC_PAL_MAX_LOG_LEVEL in case provided by caller */
-#define __CC_PAL_LOG_LEVEL_EVAL(level) level
-/*! Maximal log level defintion.*/
-#define _CC_PAL_MAX_LOG_LEVEL __CC_PAL_LOG_LEVEL_EVAL(CC_PAL_MAX_LOG_LEVEL)
-
-
-#ifdef ARM_DSM
-/*! Log init function. */
-#define CC_PalLogInit() do {} while (0)
-/*! Log set level function - sets the level of logging in case of debug. */
-#define CC_PalLogLevelSet(setLevel) do {} while (0)
-/*! Log set mask function - sets the component masking in case of debug. */
-#define CC_PalLogMaskSet(setMask) do {} while (0)
-#else
-#if _CC_PAL_MAX_LOG_LEVEL > CC_PAL_LOG_LEVEL_NULL
-/*! Log init function. */
-void CC_PalLogInit(void);
-/*! Log set level function - sets the level of logging in case of debug. */
-void CC_PalLogLevelSet(int setLevel);
-/*! Log set mask function - sets the component masking in case of debug. */
-void CC_PalLogMaskSet(uint32_t setMask);
-/*! Global variable for log level */
-extern int CC_PAL_logLevel;
-/*! Global variable for log mask */
-extern uint32_t CC_PAL_logMask;
-#else /* No log */
-/*! Log init function. */
-static inline void CC_PalLogInit(void) {}
-/*! Log set level function - sets the level of logging in case of debug. */
-static inline void CC_PalLogLevelSet(int setLevel) {CC_UNUSED_PARAM(setLevel);}
-/*! Log set mask function - sets the component masking in case of debug. */
-static inline void CC_PalLogMaskSet(uint32_t setMask) {CC_UNUSED_PARAM(setMask);}
-#endif
-#endif
-
-/*! Filter logging based on logMask and dispatch to platform specific logging mechanism. */
-#define _CC_PAL_LOG(level, format, ...)  \
-	if (CC_PAL_logMask & CC_PAL_LOG_CUR_COMPONENT) \
-		__CC_PAL_LOG_PLAT(CC_PAL_LOG_LEVEL_ ## level, "%s:%s: " format, CC_PAL_LOG_CUR_COMPONENT_NAME, __func__, ##__VA_ARGS__)
-
-#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_ERR)
-/*! Log messages according to log level.*/
-#define CC_PAL_LOG_ERR(format, ... ) \
-	_CC_PAL_LOG(ERR, format, ##__VA_ARGS__)
-#else
-/*! Log messages according to log level.*/
-#define CC_PAL_LOG_ERR( ... ) do {} while (0)
-#endif
-
-#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_WARN)
-/*! Log messages according to log level.*/
-#define CC_PAL_LOG_WARN(format, ... ) \
-	if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_WARN) \
-		_CC_PAL_LOG(WARN, format, ##__VA_ARGS__)
-#else
-/*! Log messages according to log level.*/
-#define CC_PAL_LOG_WARN( ... ) do {} while (0)
-#endif
-
-#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_INFO)
-/*! Log messages according to log level.*/
-#define CC_PAL_LOG_INFO(format, ... ) \
-	if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_INFO) \
-		_CC_PAL_LOG(INFO, format, ##__VA_ARGS__)
-#else
-/*! Log messages according to log level.*/
-#define CC_PAL_LOG_INFO( ... ) do {} while (0)
-#endif
-
-#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_DEBUG)
-/*! Log messages according to log level.*/
-#define CC_PAL_LOG_DEBUG(format, ... ) \
-	if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_DEBUG) \
-		_CC_PAL_LOG(DEBUG, format, ##__VA_ARGS__)
-
-/*! Log message buffer.*/
-#define CC_PAL_LOG_DUMP_BUF(msg, buf, size)		\
-	do {						\
-	int i;						\
-	uint8_t	*pData = (uint8_t*)buf;			\
-							\
-	PRINTF("%s (%d):\n", msg, size);		\
-	for (i = 0; i < size; i++) {			\
-		PRINTF("0x%02X ", pData[i]);		\
-		if ((i & 0xF) == 0xF) {			\
-			PRINTF("\n");			\
-		}					\
-	}						\
-	PRINTF("\n");					\
-	} while (0)
-#else
-/*! Log debug messages.*/
-#define CC_PAL_LOG_DEBUG( ... ) do {} while (0)
-/*! Log debug buffer.*/
-#define CC_PAL_LOG_DUMP_BUF(msg, buf, size)	do {} while (0)
-#endif
-
-#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE)
-/*! Log debug trace.*/
-#define CC_PAL_LOG_TRACE(format, ... ) \
-	if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_TRACE) \
-		_CC_PAL_LOG(TRACE, format, ##__VA_ARGS__)
-#else
-/*! Log debug trace.*/
-#define CC_PAL_LOG_TRACE(...) do {} while (0)
-#endif
-
-#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE)
-/*! Log debug data.*/
-#define CC_PAL_LOG_DATA(format, ...) \
-	if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_TRACE) \
-		_CC_PAL_LOG(DATA, format, ##__VA_ARGS__)
-#else
-/*! Log debug data.*/
-#define CC_PAL_LOG_DATA( ...) do {} while (0)
-#endif
-/** 
-@}
- */
-
-#endif /*_CC_PAL_LOG_H_*/
diff --git a/drivers/staging/ccree/cc_pal_log_plat.h b/drivers/staging/ccree/cc_pal_log_plat.h
deleted file mode 100644
index a05a200..0000000
--- a/drivers/staging/ccree/cc_pal_log_plat.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* Dummy pal_log_plat for test driver in kernel */
-
-#ifndef _SSI_PAL_LOG_PLAT_H_
-#define _SSI_PAL_LOG_PLAT_H_
-
-#if defined(DEBUG)
-
-#define __CC_PAL_LOG_PLAT(level, format, ...) printk(level "cc7x_test::" format , ##__VA_ARGS__)
-
-#else /* Disable all prints */
-
-#define __CC_PAL_LOG_PLAT(...)  do {} while (0)
-
-#endif
-
-#endif /*_SASI_PAL_LOG_PLAT_H_*/
-
diff --git a/drivers/staging/ccree/cc_pal_types.h b/drivers/staging/ccree/cc_pal_types.h
deleted file mode 100644
index 9b59bbb..0000000
--- a/drivers/staging/ccree/cc_pal_types.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef CC_PAL_TYPES_H
-#define CC_PAL_TYPES_H
-
-/*! 
-@file 
-@brief This file contains platform-dependent definitions and types. 
-@defgroup cc_pal_types CryptoCell PAL platform dependant types
-@{
-@ingroup cc_pal
-
-*/
- 
-#include "cc_pal_types_plat.h"
-
-/*! Boolean definition.*/
-typedef enum {
-	/*! Boolean false definition.*/
-	CC_FALSE = 0,
-	/*! Boolean true definition.*/
-	CC_TRUE = 1
-} CCBool;
-
-/*! Success definition. */
-#define CC_SUCCESS              0UL
-/*! Failure definition. */
-#define CC_FAIL		  	1UL
-
-/*! Defintion of 1KB in bytes. */
-#define CC_1K_SIZE_IN_BYTES	1024
-/*! Defintion of number of bits in a byte. */
-#define CC_BITS_IN_BYTE		8
-/*! Defintion of number of bits in a 32bits word. */
-#define CC_BITS_IN_32BIT_WORD	32
-/*! Defintion of number of bytes in a 32bits word. */
-#define CC_32BIT_WORD_SIZE	(sizeof(uint32_t))
-
-/*! Success (OK) defintion. */
-#define CC_OK   0
-
-/*! Macro that handles unused parameters in the code (to avoid compilation warnings).  */
-#define CC_UNUSED_PARAM(prm)  ((void)prm)
-
-/*! Maximal uint32 value.*/
-#define CC_MAX_UINT32_VAL 	(0xFFFFFFFF)
-
-
-/* Minimum and Maximum macros */
-#ifdef  min
-/*! Definition for minimum. */
-#define CC_MIN(a,b) min( a , b )
-#else
-/*! Definition for minimum. */
-#define CC_MIN( a , b ) ( ( (a) < (b) ) ? (a) : (b) )
-#endif
-
-#ifdef max    
-/*! Definition for maximum. */    
-#define CC_MAX(a,b) max( a , b )
-#else
-/*! Definition for maximum. */    
-#define CC_MAX( a , b ) ( ( (a) > (b) ) ? (a) : (b) )
-#endif
-
-/*! Macro that calculates number of full bytes from bits (i.e. 7 bits are 1 byte). */    
-#define CALC_FULL_BYTES(numBits) 		((numBits)/CC_BITS_IN_BYTE + (((numBits) & (CC_BITS_IN_BYTE-1)) > 0)) 
-/*! Macro that calculates number of full 32bits words from bits (i.e. 31 bits are 1 word). */    
-#define CALC_FULL_32BIT_WORDS(numBits) 		((numBits)/CC_BITS_IN_32BIT_WORD +  (((numBits) & (CC_BITS_IN_32BIT_WORD-1)) > 0))   
-/*! Macro that calculates number of full 32bits words from bytes (i.e. 3 bytes are 1 word). */    
-#define CALC_32BIT_WORDS_FROM_BYTES(sizeBytes)  ((sizeBytes)/CC_32BIT_WORD_SIZE + (((sizeBytes) & (CC_32BIT_WORD_SIZE-1)) > 0)) 
-/*! Macro that round up bits to 32bits words. */     
-#define ROUNDUP_BITS_TO_32BIT_WORD(numBits) 	(CALC_FULL_32BIT_WORDS(numBits) * CC_BITS_IN_32BIT_WORD)
-/*! Macro that round up bits to bytes. */    
-#define ROUNDUP_BITS_TO_BYTES(numBits) 		(CALC_FULL_BYTES(numBits) * CC_BITS_IN_BYTE)
-/*! Macro that round up bytes to 32bits words. */    
-#define ROUNDUP_BYTES_TO_32BIT_WORD(sizeBytes) 	(CALC_32BIT_WORDS_FROM_BYTES(sizeBytes) * CC_32BIT_WORD_SIZE)     
-
-
-/** 
-@}
- */
-#endif
diff --git a/drivers/staging/ccree/cc_pal_types_plat.h b/drivers/staging/ccree/cc_pal_types_plat.h
deleted file mode 100644
index 6e42112..0000000
--- a/drivers/staging/ccree/cc_pal_types_plat.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
- 
-#ifndef SSI_PAL_TYPES_PLAT_H
-#define SSI_PAL_TYPES_PLAT_H
-/* Linux kernel types */
-
-#include <linux/types.h>
-
-#ifndef NULL /* Missing in Linux kernel */
-#define NULL (0x0L)
-#endif
-
-
-#endif /*SSI_PAL_TYPES_PLAT_H*/
diff --git a/drivers/staging/ccree/cc_regs.h b/drivers/staging/ccree/cc_regs.h
index 963f814..4a893a6 100644
--- a/drivers/staging/ccree/cc_regs.h
+++ b/drivers/staging/ccree/cc_regs.h
@@ -1,106 +1,42 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-
 /*!
- * @file 
- * @brief This file contains macro definitions for accessing ARM TrustZone CryptoCell register space.
+ * @file
+ * @brief This file contains macro definitions for accessing ARM TrustZone
+ *        CryptoCell register space.
  */
 
 #ifndef _CC_REGS_H_
 #define _CC_REGS_H_
 
-#include "cc_bitops.h"
+#include <linux/bitfield.h>
+
+#define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP)
+#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+		DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+		DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+
+#define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP)
+#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+		DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+		DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
 
 /* Register Offset macro */
 #define CC_REG_OFFSET(unit_name, reg_name)               \
 	(DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
 
-#define CC_REG_BIT_SHIFT(reg_name, field_name)               \
-	(DX_ ## reg_name ## _ ## field_name ## _BIT_SHIFT)
-
-/* Register Offset macros (from registers base address in host) */
-#include "dx_reg_base_host.h"
-
-/* Read-Modify-Write a field of a register */
-#define MODIFY_REGISTER_FLD(unitName, regName, fldName, fldVal)         \
-do {								            \
-	uint32_t regVal;						    \
-	regVal = READ_REGISTER(CC_REG_ADDR(unitName, regName));       \
-	CC_REG_FLD_SET(unitName, regName, fldName, regVal, fldVal); \
-	WRITE_REGISTER(CC_REG_ADDR(unitName, regName), regVal);       \
-} while (0)
-
-/* Registers address macros for ENV registers (development FPGA only) */
-#ifdef DX_BASE_ENV_REGS
-
-/* This offset should be added to mapping address of DX_BASE_ENV_REGS */
-#define CC_ENV_REG_OFFSET(reg_name) (DX_ENV_ ## reg_name ## _REG_OFFSET)
-
-#endif /*DX_BASE_ENV_REGS*/
-
-/*! Bit fields get */
-#define CC_REG_FLD_GET(unit_name, reg_name, fld_name, reg_val)	      \
-	(DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20 ?	      \
-	reg_val /*!< \internal Optimization for 32b fields */ :			      \
-	BITFIELD_GET(reg_val, DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
-		     DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE))
-
-/*! Bit fields access */
-#define CC_REG_FLD_GET2(unit_name, reg_name, fld_name, reg_val)	      \
-	(CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20 ?	      \
-	reg_val /*!< \internal Optimization for 32b fields */ :			      \
-	BITFIELD_GET(reg_val, CC_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
-		     CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE))
-
-/* yael TBD !!! -       				      * 
-* all HW includes should start with CC_ and not DX_ !!	      */
-
-
-/*! Bit fields set */
-#define CC_REG_FLD_SET(                                               \
-	unit_name, reg_name, fld_name, reg_shadow_var, new_fld_val)      \
-do {                                                                     \
-	if (DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20)       \
-		reg_shadow_var = new_fld_val; /*!< \internal Optimization for 32b fields */\
-	else                                                             \
-		BITFIELD_SET(reg_shadow_var,                             \
-			DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT,  \
-			DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE,   \
-			new_fld_val);                                    \
-} while (0)
-
-/*! Bit fields set */
-#define CC_REG_FLD_SET2(                                               \
-	unit_name, reg_name, fld_name, reg_shadow_var, new_fld_val)      \
-do {                                                                     \
-	if (CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20)       \
-		reg_shadow_var = new_fld_val; /*!< \internal Optimization for 32b fields */\
-	else                                                             \
-		BITFIELD_SET(reg_shadow_var,                             \
-			CC_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT,  \
-			CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE,   \
-			new_fld_val);                                    \
-} while (0)
-
-/* Usage example:
-   uint32_t reg_shadow = READ_REGISTER(CC_REG_ADDR(CRY_KERNEL,AES_CONTROL));
-   CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY0,reg_shadow, 3);
-   CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY1,reg_shadow, 1);
-   WRITE_REGISTER(CC_REG_ADDR(CRY_KERNEL,AES_CONTROL), reg_shadow);
- */
-
 #endif /*_CC_REGS_H_*/
diff --git a/drivers/staging/ccree/dx_crys_kernel.h b/drivers/staging/ccree/dx_crys_kernel.h
index 703469c4..2196030 100644
--- a/drivers/staging/ccree/dx_crys_kernel.h
+++ b/drivers/staging/ccree/dx_crys_kernel.h
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -20,161 +20,161 @@
 // --------------------------------------
 // BLOCK: DSCRPTR
 // --------------------------------------
-#define DX_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 	0xE00UL 
-#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 	0x6UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 	0x6UL
-#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_SW_RESET_REG_OFFSET 	0xE40UL 
-#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 	0xE60UL 
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 	0xAUL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 	0xAUL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 	0xCUL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 	0x16UL
-#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 	0x3UL
-#define DX_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 	0xE64UL 
-#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_MEASURE_CNTR_REG_OFFSET 	0xE68UL 
-#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 	0x20UL
-#define DX_DSCRPTR_QUEUE_WORD0_REG_OFFSET 	0xE80UL 
-#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 	0x20UL
-#define DX_DSCRPTR_QUEUE_WORD1_REG_OFFSET 	0xE84UL 
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 	0x18UL
-#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 	0x1AUL
-#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 	0x1BUL
-#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 	0x1CUL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 	0x1DUL
-#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 	0x1EUL
-#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD2_REG_OFFSET 	0xE88UL 
-#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 	0x20UL
-#define DX_DSCRPTR_QUEUE_WORD3_REG_OFFSET 	0xE8CUL 
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 	0x18UL
-#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 	0x1AUL
-#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 	0x1BUL
-#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 	0x1DUL
-#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 	0x1EUL
-#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 	0x1FUL
-#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_REG_OFFSET 	0xE90UL 
-#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 	0x6UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 	0x6UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 	0x7UL
-#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 	0x8UL
-#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 	0xAUL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 	0x4UL
-#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 	0xEUL
-#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 	0xFUL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 	0x11UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 	0x13UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 	0x14UL
-#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 	0x16UL
-#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 	0x2UL
-#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 	0x18UL
-#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 	0x4UL
-#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 	0x1CUL
-#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 	0x1DUL
-#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 	0x1EUL
-#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 	0x1FUL
-#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 	0x1UL
-#define DX_DSCRPTR_QUEUE_WORD5_REG_OFFSET 	0xE94UL 
-#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 	0x10UL
-#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 	0x10UL
-#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 	0x10UL
-#define DX_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 	0xE98UL 
-#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 	0xAUL
-#define DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 	0xE9CUL 
-#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 	0x0UL
-#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 	0xAUL
+#define DX_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET	0xE00UL
+#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE	0x6UL
+#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT	0x6UL
+#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_SW_RESET_REG_OFFSET	0xE40UL
+#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET	0xE60UL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE	0xAUL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT	0xAUL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE	0xCUL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT	0x16UL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE	0x3UL
+#define DX_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET	0xE64UL
+#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_MEASURE_CNTR_REG_OFFSET	0xE68UL
+#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE	0x20UL
+#define DX_DSCRPTR_QUEUE_WORD0_REG_OFFSET	0xE80UL
+#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE	0x20UL
+#define DX_DSCRPTR_QUEUE_WORD1_REG_OFFSET	0xE84UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE	0x18UL
+#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT	0x1AUL
+#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT	0x1BUL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT	0x1CUL
+#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT	0x1DUL
+#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT	0x1EUL
+#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD2_REG_OFFSET	0xE88UL
+#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE	0x20UL
+#define DX_DSCRPTR_QUEUE_WORD3_REG_OFFSET	0xE8CUL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE	0x18UL
+#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT	0x1AUL
+#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT	0x1BUL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT	0x1DUL
+#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT	0x1EUL
+#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT	0x1FUL
+#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_REG_OFFSET	0xE90UL
+#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE	0x6UL
+#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT	0x6UL
+#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT	0x7UL
+#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT	0x8UL
+#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT	0xAUL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE	0x4UL
+#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT	0xEUL
+#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT	0xFUL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT	0x11UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT	0x13UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT	0x14UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT	0x16UL
+#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE	0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT	0x18UL
+#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE	0x4UL
+#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT	0x1CUL
+#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT	0x1DUL
+#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT	0x1EUL
+#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT	0x1FUL
+#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE	0x1UL
+#define DX_DSCRPTR_QUEUE_WORD5_REG_OFFSET	0xE94UL
+#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE	0x10UL
+#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT	0x10UL
+#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE	0x10UL
+#define DX_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET	0xE98UL
+#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE	0xAUL
+#define DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET	0xE9CUL
+#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT	0x0UL
+#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE	0xAUL
 // --------------------------------------
 // BLOCK: AXI_P
 // --------------------------------------
-#define DX_AXIM_MON_INFLIGHT_REG_OFFSET 	0xB00UL 
-#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 	0x0UL
-#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 	0x8UL
-#define DX_AXIM_MON_INFLIGHTLAST_REG_OFFSET 	0xB40UL 
-#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 	0x0UL
-#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 	0x8UL
-#define DX_AXIM_MON_COMP_REG_OFFSET 	0xB80UL 
-#define DX_AXIM_MON_COMP_VALUE_BIT_SHIFT 	0x0UL
-#define DX_AXIM_MON_COMP_VALUE_BIT_SIZE 	0x10UL
-#define DX_AXIM_MON_ERR_REG_OFFSET 	0xBC4UL 
-#define DX_AXIM_MON_ERR_BRESP_BIT_SHIFT 	0x0UL
-#define DX_AXIM_MON_ERR_BRESP_BIT_SIZE 	0x2UL
-#define DX_AXIM_MON_ERR_BID_BIT_SHIFT 	0x2UL
-#define DX_AXIM_MON_ERR_BID_BIT_SIZE 	0x4UL
-#define DX_AXIM_MON_ERR_RRESP_BIT_SHIFT 	0x10UL
-#define DX_AXIM_MON_ERR_RRESP_BIT_SIZE 	0x2UL
-#define DX_AXIM_MON_ERR_RID_BIT_SHIFT 	0x12UL
-#define DX_AXIM_MON_ERR_RID_BIT_SIZE 	0x4UL
-#define DX_AXIM_CFG_REG_OFFSET 	0xBE8UL 
-#define DX_AXIM_CFG_BRESPMASK_BIT_SHIFT 	0x4UL
-#define DX_AXIM_CFG_BRESPMASK_BIT_SIZE 	0x1UL
-#define DX_AXIM_CFG_RRESPMASK_BIT_SHIFT 	0x5UL
-#define DX_AXIM_CFG_RRESPMASK_BIT_SIZE 	0x1UL
-#define DX_AXIM_CFG_INFLTMASK_BIT_SHIFT 	0x6UL
-#define DX_AXIM_CFG_INFLTMASK_BIT_SIZE 	0x1UL
-#define DX_AXIM_CFG_COMPMASK_BIT_SHIFT 	0x7UL
-#define DX_AXIM_CFG_COMPMASK_BIT_SIZE 	0x1UL
-#define DX_AXIM_ACE_CONST_REG_OFFSET 	0xBECUL 
-#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 	0x0UL
-#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 	0x2UL
-#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 	0x2UL
-#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 	0x2UL
-#define DX_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 	0x4UL
-#define DX_AXIM_ACE_CONST_ARBAR_BIT_SIZE 	0x2UL
-#define DX_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 	0x6UL
-#define DX_AXIM_ACE_CONST_AWBAR_BIT_SIZE 	0x2UL
-#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 	0x8UL
-#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 	0x4UL
-#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 	0xCUL
-#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 	0x3UL
-#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 	0xFUL
-#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 	0x3UL
-#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 	0x12UL
-#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 	0x7UL
-#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 	0x19UL
-#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 	0x4UL
-#define DX_AXIM_CACHE_PARAMS_REG_OFFSET 	0xBF0UL 
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 	0x0UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 	0x4UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 	0x4UL
-#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 	0x4UL
-#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 	0x8UL
-#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 	0x4UL
+#define DX_AXIM_MON_INFLIGHT_REG_OFFSET	0xB00UL
+#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT	0x0UL
+#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE	0x8UL
+#define DX_AXIM_MON_INFLIGHTLAST_REG_OFFSET	0xB40UL
+#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT	0x0UL
+#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE	0x8UL
+#define DX_AXIM_MON_COMP_REG_OFFSET	0xB80UL
+#define DX_AXIM_MON_COMP_VALUE_BIT_SHIFT	0x0UL
+#define DX_AXIM_MON_COMP_VALUE_BIT_SIZE	0x10UL
+#define DX_AXIM_MON_ERR_REG_OFFSET	0xBC4UL
+#define DX_AXIM_MON_ERR_BRESP_BIT_SHIFT	0x0UL
+#define DX_AXIM_MON_ERR_BRESP_BIT_SIZE	0x2UL
+#define DX_AXIM_MON_ERR_BID_BIT_SHIFT	0x2UL
+#define DX_AXIM_MON_ERR_BID_BIT_SIZE	0x4UL
+#define DX_AXIM_MON_ERR_RRESP_BIT_SHIFT	0x10UL
+#define DX_AXIM_MON_ERR_RRESP_BIT_SIZE	0x2UL
+#define DX_AXIM_MON_ERR_RID_BIT_SHIFT	0x12UL
+#define DX_AXIM_MON_ERR_RID_BIT_SIZE	0x4UL
+#define DX_AXIM_CFG_REG_OFFSET	0xBE8UL
+#define DX_AXIM_CFG_BRESPMASK_BIT_SHIFT	0x4UL
+#define DX_AXIM_CFG_BRESPMASK_BIT_SIZE	0x1UL
+#define DX_AXIM_CFG_RRESPMASK_BIT_SHIFT	0x5UL
+#define DX_AXIM_CFG_RRESPMASK_BIT_SIZE	0x1UL
+#define DX_AXIM_CFG_INFLTMASK_BIT_SHIFT	0x6UL
+#define DX_AXIM_CFG_INFLTMASK_BIT_SIZE	0x1UL
+#define DX_AXIM_CFG_COMPMASK_BIT_SHIFT	0x7UL
+#define DX_AXIM_CFG_COMPMASK_BIT_SIZE	0x1UL
+#define DX_AXIM_ACE_CONST_REG_OFFSET	0xBECUL
+#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT	0x0UL
+#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE	0x2UL
+#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT	0x2UL
+#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE	0x2UL
+#define DX_AXIM_ACE_CONST_ARBAR_BIT_SHIFT	0x4UL
+#define DX_AXIM_ACE_CONST_ARBAR_BIT_SIZE	0x2UL
+#define DX_AXIM_ACE_CONST_AWBAR_BIT_SHIFT	0x6UL
+#define DX_AXIM_ACE_CONST_AWBAR_BIT_SIZE	0x2UL
+#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT	0x8UL
+#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE	0x4UL
+#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT	0xCUL
+#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE	0x3UL
+#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT	0xFUL
+#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE	0x3UL
+#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT	0x12UL
+#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE	0x7UL
+#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT	0x19UL
+#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE	0x4UL
+#define DX_AXIM_CACHE_PARAMS_REG_OFFSET	0xBF0UL
+#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT	0x0UL
+#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE	0x4UL
+#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT	0x4UL
+#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE	0x4UL
+#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT	0x8UL
+#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE	0x4UL
 #endif	// __DX_CRYS_KERNEL_H__
diff --git a/drivers/staging/ccree/dx_env.h b/drivers/staging/ccree/dx_env.h
deleted file mode 100644
index 0804060..0000000
--- a/drivers/staging/ccree/dx_env.h
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DX_ENV_H__
-#define __DX_ENV_H__
-
-// --------------------------------------
-// BLOCK: FPGA_ENV_REGS
-// --------------------------------------
-#define DX_ENV_PKA_DEBUG_MODE_REG_OFFSET 	0x024UL 
-#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_SCAN_MODE_REG_OFFSET 	0x030UL 
-#define DX_ENV_SCAN_MODE_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_SCAN_MODE_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_ALLOW_SCAN_REG_OFFSET 	0x034UL 
-#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_HOST_INT_REG_OFFSET 	0x0A0UL 
-#define DX_ENV_CC_HOST_INT_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_HOST_INT_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_PUB_HOST_INT_REG_OFFSET 	0x0A4UL 
-#define DX_ENV_CC_PUB_HOST_INT_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_PUB_HOST_INT_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_RST_N_REG_OFFSET 	0x0A8UL 
-#define DX_ENV_CC_RST_N_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_RST_N_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_RST_OVERRIDE_REG_OFFSET 	0x0ACUL 
-#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_POR_N_ADDR_REG_OFFSET 	0x0E0UL 
-#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_COLD_RST_REG_OFFSET 	0x0FCUL 
-#define DX_ENV_CC_COLD_RST_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_COLD_RST_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_DUMMY_ADDR_REG_OFFSET 	0x108UL 
-#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_COUNTER_CLR_REG_OFFSET 	0x118UL 
-#define DX_ENV_COUNTER_CLR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_COUNTER_CLR_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_COUNTER_RD_REG_OFFSET 	0x11CUL 
-#define DX_ENV_COUNTER_RD_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_COUNTER_RD_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_RNG_DEBUG_ENABLE_REG_OFFSET 	0x430UL 
-#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_LCS_REG_OFFSET 	0x43CUL 
-#define DX_ENV_CC_LCS_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_LCS_VALUE_BIT_SIZE 	0x8UL
-#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_REG_OFFSET 	0x440UL 
-#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SHIFT 	0x1UL
-#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SHIFT 	0x2UL
-#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SHIFT 	0x3UL
-#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SIZE 	0x1UL
-#define DX_ENV_DCU_EN_REG_OFFSET 	0x444UL 
-#define DX_ENV_DCU_EN_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_DCU_EN_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_CC_LCS_IS_VALID_REG_OFFSET 	0x448UL 
-#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_POWER_DOWN_REG_OFFSET 	0x478UL 
-#define DX_ENV_POWER_DOWN_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_POWER_DOWN_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_DCU_H_EN_REG_OFFSET 	0x484UL 
-#define DX_ENV_DCU_H_EN_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_DCU_H_EN_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_VERSION_REG_OFFSET 	0x488UL 
-#define DX_ENV_VERSION_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_VERSION_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_ROSC_WRITE_REG_OFFSET 	0x48CUL 
-#define DX_ENV_ROSC_WRITE_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_ROSC_WRITE_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_ROSC_ADDR_REG_OFFSET 	0x490UL 
-#define DX_ENV_ROSC_ADDR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_ROSC_ADDR_VALUE_BIT_SIZE 	0x8UL
-#define DX_ENV_RESET_SESSION_KEY_REG_OFFSET 	0x494UL 
-#define DX_ENV_RESET_SESSION_KEY_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_RESET_SESSION_KEY_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_SESSION_KEY_0_REG_OFFSET 	0x4A0UL 
-#define DX_ENV_SESSION_KEY_0_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_SESSION_KEY_0_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_SESSION_KEY_1_REG_OFFSET 	0x4A4UL 
-#define DX_ENV_SESSION_KEY_1_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_SESSION_KEY_1_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_SESSION_KEY_2_REG_OFFSET 	0x4A8UL 
-#define DX_ENV_SESSION_KEY_2_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_SESSION_KEY_2_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_SESSION_KEY_3_REG_OFFSET 	0x4ACUL 
-#define DX_ENV_SESSION_KEY_3_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_SESSION_KEY_3_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_SESSION_KEY_VALID_REG_OFFSET 	0x4B0UL 
-#define DX_ENV_SESSION_KEY_VALID_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_SESSION_KEY_VALID_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_SPIDEN_REG_OFFSET 	0x4D0UL 
-#define DX_ENV_SPIDEN_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_SPIDEN_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_AXIM_USER_PARAMS_REG_OFFSET 	0x600UL 
-#define DX_ENV_AXIM_USER_PARAMS_ARUSER_BIT_SHIFT 	0x0UL
-#define DX_ENV_AXIM_USER_PARAMS_ARUSER_BIT_SIZE 	0x5UL
-#define DX_ENV_AXIM_USER_PARAMS_AWUSER_BIT_SHIFT 	0x5UL
-#define DX_ENV_AXIM_USER_PARAMS_AWUSER_BIT_SIZE 	0x5UL
-#define DX_ENV_SECURITY_MODE_OVERRIDE_REG_OFFSET 	0x604UL 
-#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_BIT_BIT_SHIFT 	0x0UL
-#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_BIT_BIT_SIZE 	0x1UL
-#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_OVERRIDE_BIT_SHIFT 	0x1UL
-#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_OVERRIDE_BIT_SIZE 	0x1UL
-#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_BIT_BIT_SHIFT 	0x2UL
-#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_BIT_BIT_SIZE 	0x1UL
-#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_OVERRIDE_BIT_SHIFT 	0x3UL
-#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_OVERRIDE_BIT_SIZE 	0x1UL
-#define DX_ENV_AO_CC_KPLT_0_REG_OFFSET 	0x620UL 
-#define DX_ENV_AO_CC_KPLT_0_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_AO_CC_KPLT_0_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_AO_CC_KPLT_1_REG_OFFSET 	0x624UL 
-#define DX_ENV_AO_CC_KPLT_1_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_AO_CC_KPLT_1_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_AO_CC_KPLT_2_REG_OFFSET 	0x628UL 
-#define DX_ENV_AO_CC_KPLT_2_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_AO_CC_KPLT_2_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_AO_CC_KPLT_3_REG_OFFSET 	0x62CUL 
-#define DX_ENV_AO_CC_KPLT_3_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_AO_CC_KPLT_3_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_AO_CC_KCST_0_REG_OFFSET 	0x630UL 
-#define DX_ENV_AO_CC_KCST_0_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_AO_CC_KCST_0_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_AO_CC_KCST_1_REG_OFFSET 	0x634UL 
-#define DX_ENV_AO_CC_KCST_1_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_AO_CC_KCST_1_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_AO_CC_KCST_2_REG_OFFSET 	0x638UL 
-#define DX_ENV_AO_CC_KCST_2_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_AO_CC_KCST_2_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_AO_CC_KCST_3_REG_OFFSET 	0x63CUL 
-#define DX_ENV_AO_CC_KCST_3_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_AO_CC_KCST_3_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_APB_FIPS_ADDR_REG_OFFSET 	0x650UL 
-#define DX_ENV_APB_FIPS_ADDR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APB_FIPS_ADDR_VALUE_BIT_SIZE 	0xCUL
-#define DX_ENV_APB_FIPS_VAL_REG_OFFSET 	0x654UL 
-#define DX_ENV_APB_FIPS_VAL_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APB_FIPS_VAL_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_APB_FIPS_MASK_REG_OFFSET 	0x658UL 
-#define DX_ENV_APB_FIPS_MASK_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APB_FIPS_MASK_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_APB_FIPS_CNT_REG_OFFSET 	0x65CUL 
-#define DX_ENV_APB_FIPS_CNT_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APB_FIPS_CNT_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_APB_FIPS_NEW_ADDR_REG_OFFSET 	0x660UL 
-#define DX_ENV_APB_FIPS_NEW_ADDR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APB_FIPS_NEW_ADDR_VALUE_BIT_SIZE 	0xCUL
-#define DX_ENV_APB_FIPS_NEW_VAL_REG_OFFSET 	0x664UL 
-#define DX_ENV_APB_FIPS_NEW_VAL_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APB_FIPS_NEW_VAL_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_APBP_FIPS_ADDR_REG_OFFSET 	0x670UL 
-#define DX_ENV_APBP_FIPS_ADDR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APBP_FIPS_ADDR_VALUE_BIT_SIZE 	0xCUL
-#define DX_ENV_APBP_FIPS_VAL_REG_OFFSET 	0x674UL 
-#define DX_ENV_APBP_FIPS_VAL_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APBP_FIPS_VAL_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_APBP_FIPS_MASK_REG_OFFSET 	0x678UL 
-#define DX_ENV_APBP_FIPS_MASK_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APBP_FIPS_MASK_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_APBP_FIPS_CNT_REG_OFFSET 	0x67CUL 
-#define DX_ENV_APBP_FIPS_CNT_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APBP_FIPS_CNT_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_APBP_FIPS_NEW_ADDR_REG_OFFSET 	0x680UL 
-#define DX_ENV_APBP_FIPS_NEW_ADDR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APBP_FIPS_NEW_ADDR_VALUE_BIT_SIZE 	0xCUL
-#define DX_ENV_APBP_FIPS_NEW_VAL_REG_OFFSET 	0x684UL 
-#define DX_ENV_APBP_FIPS_NEW_VAL_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_APBP_FIPS_NEW_VAL_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_CC_POWERDOWN_EN_REG_OFFSET 	0x690UL 
-#define DX_ENV_CC_POWERDOWN_EN_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_POWERDOWN_EN_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_CC_POWERDOWN_RST_EN_REG_OFFSET 	0x694UL 
-#define DX_ENV_CC_POWERDOWN_RST_EN_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_CC_POWERDOWN_RST_EN_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_POWERDOWN_RST_CNTR_REG_OFFSET 	0x698UL 
-#define DX_ENV_POWERDOWN_RST_CNTR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_POWERDOWN_RST_CNTR_VALUE_BIT_SIZE 	0x20UL
-#define DX_ENV_POWERDOWN_EN_DEBUG_REG_OFFSET 	0x69CUL 
-#define DX_ENV_POWERDOWN_EN_DEBUG_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_POWERDOWN_EN_DEBUG_VALUE_BIT_SIZE 	0x1UL
-// --------------------------------------
-// BLOCK: ENV_CC_MEMORIES
-// --------------------------------------
-#define DX_ENV_FUSE_READY_REG_OFFSET 	0x000UL 
-#define DX_ENV_FUSE_READY_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_FUSE_READY_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_PERF_RAM_MASTER_REG_OFFSET 	0x0ECUL 
-#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SIZE 	0x1UL
-#define DX_ENV_PERF_RAM_ADDR_HIGH4_REG_OFFSET 	0x0F0UL 
-#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SIZE 	0x2UL
-#define DX_ENV_FUSES_RAM_REG_OFFSET 	0x3ECUL 
-#define DX_ENV_FUSES_RAM_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_FUSES_RAM_VALUE_BIT_SIZE 	0x20UL
-// --------------------------------------
-// BLOCK: ENV_PERF_RAM_BASE
-// --------------------------------------
-#define DX_ENV_PERF_RAM_BASE_REG_OFFSET 	0x000UL 
-#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SHIFT 	0x0UL
-#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SIZE 	0x20UL
-
-#endif /*__DX_ENV_H__*/
diff --git a/drivers/staging/ccree/dx_host.h b/drivers/staging/ccree/dx_host.h
index 4e42e74..863c267 100644
--- a/drivers/staging/ccree/dx_host.h
+++ b/drivers/staging/ccree/dx_host.h
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -20,136 +20,136 @@
 // --------------------------------------
 // BLOCK: HOST_P
 // --------------------------------------
-#define DX_HOST_IRR_REG_OFFSET 	0xA00UL 
-#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 	0x2UL
-#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 	0x1UL
-#define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 	0x8UL
-#define DX_HOST_IRR_AXI_ERR_INT_BIT_SIZE 	0x1UL
-#define DX_HOST_IRR_GPR0_BIT_SHIFT 	0xBUL
-#define DX_HOST_IRR_GPR0_BIT_SIZE 	0x1UL
-#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 	0x13UL
-#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 	0x1UL
-#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 	0x17UL
-#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 	0x1UL
-#define DX_HOST_IMR_REG_OFFSET 	0xA04UL 
-#define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 	0x1UL
-#define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE 	0x1UL
-#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 	0x2UL
-#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 	0x1UL
-#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 	0x8UL
-#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 	0x1UL
-#define DX_HOST_IMR_GPR0_BIT_SHIFT 	0xBUL
-#define DX_HOST_IMR_GPR0_BIT_SIZE 	0x1UL
-#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 	0x13UL
-#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 	0x1UL
-#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 	0x17UL
-#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 	0x1UL
-#define DX_HOST_ICR_REG_OFFSET 	0xA08UL 
-#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 	0x2UL
-#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 	0x1UL
-#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 	0x8UL
-#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 	0x1UL
-#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 	0xBUL
-#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 	0x1UL
-#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 	0x13UL
-#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 	0x1UL
-#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 	0x17UL
-#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 	0x1UL
-#define DX_HOST_SIGNATURE_REG_OFFSET 	0xA24UL 
-#define DX_HOST_SIGNATURE_VALUE_BIT_SHIFT 	0x0UL
-#define DX_HOST_SIGNATURE_VALUE_BIT_SIZE 	0x20UL
-#define DX_HOST_BOOT_REG_OFFSET 	0xA28UL 
-#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 	0x0UL
-#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 	0x1UL
-#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 	0x2UL
-#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 	0x3UL
-#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 	0x5UL
-#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 	0x6UL
-#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 	0x3UL
-#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 	0x9UL
-#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 	0xAUL
-#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 	0xBUL
-#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 	0xCUL
-#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 	0xDUL
-#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 	0xEUL
-#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 	0xFUL
-#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 	0x10UL
-#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 	0x11UL
-#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 	0x12UL
-#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 	0x13UL
-#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 	0x14UL
-#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 	0x15UL
-#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 	0x16UL
-#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 	0x17UL
-#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 	0x18UL
-#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 	0x19UL
-#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 	0x1AUL
-#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 	0x1BUL
-#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 	0x1CUL
-#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 	0x1DUL
-#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 	0x1EUL
-#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 	0x1UL
-#define DX_HOST_VERSION_REG_OFFSET 	0xA40UL 
-#define DX_HOST_VERSION_VALUE_BIT_SHIFT 	0x0UL
-#define DX_HOST_VERSION_VALUE_BIT_SIZE 	0x20UL
-#define DX_HOST_KFDE0_VALID_REG_OFFSET 	0xA60UL 
-#define DX_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 	0x0UL
-#define DX_HOST_KFDE0_VALID_VALUE_BIT_SIZE 	0x1UL
-#define DX_HOST_KFDE1_VALID_REG_OFFSET 	0xA64UL 
-#define DX_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 	0x0UL
-#define DX_HOST_KFDE1_VALID_VALUE_BIT_SIZE 	0x1UL
-#define DX_HOST_KFDE2_VALID_REG_OFFSET 	0xA68UL 
-#define DX_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 	0x0UL
-#define DX_HOST_KFDE2_VALID_VALUE_BIT_SIZE 	0x1UL
-#define DX_HOST_KFDE3_VALID_REG_OFFSET 	0xA6CUL 
-#define DX_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 	0x0UL
-#define DX_HOST_KFDE3_VALID_VALUE_BIT_SIZE 	0x1UL
-#define DX_HOST_GPR0_REG_OFFSET 	0xA70UL 
-#define DX_HOST_GPR0_VALUE_BIT_SHIFT 	0x0UL
-#define DX_HOST_GPR0_VALUE_BIT_SIZE 	0x20UL
-#define DX_GPR_HOST_REG_OFFSET 	0xA74UL 
-#define DX_GPR_HOST_VALUE_BIT_SHIFT 	0x0UL
-#define DX_GPR_HOST_VALUE_BIT_SIZE 	0x20UL
-#define DX_HOST_POWER_DOWN_EN_REG_OFFSET 	0xA78UL 
-#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 	0x0UL
-#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 	0x1UL
+#define DX_HOST_IRR_REG_OFFSET	0xA00UL
+#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT	0x2UL
+#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE	0x1UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT	0x8UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SIZE	0x1UL
+#define DX_HOST_IRR_GPR0_BIT_SHIFT	0xBUL
+#define DX_HOST_IRR_GPR0_BIT_SIZE	0x1UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT	0x13UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE	0x1UL
+#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT	0x17UL
+#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SIZE	0x1UL
+#define DX_HOST_IMR_REG_OFFSET	0xA04UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT	0x1UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE	0x1UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT	0x2UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE	0x1UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT	0x8UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SIZE	0x1UL
+#define DX_HOST_IMR_GPR0_BIT_SHIFT	0xBUL
+#define DX_HOST_IMR_GPR0_BIT_SIZE	0x1UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT	0x13UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE	0x1UL
+#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT	0x17UL
+#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE	0x1UL
+#define DX_HOST_ICR_REG_OFFSET	0xA08UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT	0x2UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE	0x1UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT	0x8UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE	0x1UL
+#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT	0xBUL
+#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE	0x1UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT	0x13UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE	0x1UL
+#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT	0x17UL
+#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE	0x1UL
+#define DX_HOST_SIGNATURE_REG_OFFSET	0xA24UL
+#define DX_HOST_SIGNATURE_VALUE_BIT_SHIFT	0x0UL
+#define DX_HOST_SIGNATURE_VALUE_BIT_SIZE	0x20UL
+#define DX_HOST_BOOT_REG_OFFSET	0xA28UL
+#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT	0x0UL
+#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT	0x1UL
+#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT	0x2UL
+#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT	0x3UL
+#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT	0x5UL
+#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT	0x6UL
+#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE	0x3UL
+#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT	0x9UL
+#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT	0xAUL
+#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT	0xBUL
+#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT	0xCUL
+#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT	0xDUL
+#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT	0xEUL
+#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT	0xFUL
+#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT	0x10UL
+#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT	0x11UL
+#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT	0x12UL
+#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT	0x13UL
+#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT	0x14UL
+#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT	0x15UL
+#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT	0x16UL
+#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT	0x17UL
+#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT	0x18UL
+#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT	0x19UL
+#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT	0x1AUL
+#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT	0x1BUL
+#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT	0x1CUL
+#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT	0x1DUL
+#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT	0x1EUL
+#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE	0x1UL
+#define DX_HOST_VERSION_REG_OFFSET	0xA40UL
+#define DX_HOST_VERSION_VALUE_BIT_SHIFT	0x0UL
+#define DX_HOST_VERSION_VALUE_BIT_SIZE	0x20UL
+#define DX_HOST_KFDE0_VALID_REG_OFFSET	0xA60UL
+#define DX_HOST_KFDE0_VALID_VALUE_BIT_SHIFT	0x0UL
+#define DX_HOST_KFDE0_VALID_VALUE_BIT_SIZE	0x1UL
+#define DX_HOST_KFDE1_VALID_REG_OFFSET	0xA64UL
+#define DX_HOST_KFDE1_VALID_VALUE_BIT_SHIFT	0x0UL
+#define DX_HOST_KFDE1_VALID_VALUE_BIT_SIZE	0x1UL
+#define DX_HOST_KFDE2_VALID_REG_OFFSET	0xA68UL
+#define DX_HOST_KFDE2_VALID_VALUE_BIT_SHIFT	0x0UL
+#define DX_HOST_KFDE2_VALID_VALUE_BIT_SIZE	0x1UL
+#define DX_HOST_KFDE3_VALID_REG_OFFSET	0xA6CUL
+#define DX_HOST_KFDE3_VALID_VALUE_BIT_SHIFT	0x0UL
+#define DX_HOST_KFDE3_VALID_VALUE_BIT_SIZE	0x1UL
+#define DX_HOST_GPR0_REG_OFFSET	0xA70UL
+#define DX_HOST_GPR0_VALUE_BIT_SHIFT	0x0UL
+#define DX_HOST_GPR0_VALUE_BIT_SIZE	0x20UL
+#define DX_GPR_HOST_REG_OFFSET	0xA74UL
+#define DX_GPR_HOST_VALUE_BIT_SHIFT	0x0UL
+#define DX_GPR_HOST_VALUE_BIT_SIZE	0x20UL
+#define DX_HOST_POWER_DOWN_EN_REG_OFFSET	0xA78UL
+#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT	0x0UL
+#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE	0x1UL
 // --------------------------------------
 // BLOCK: HOST_SRAM
 // --------------------------------------
-#define DX_SRAM_DATA_REG_OFFSET 	0xF00UL 
-#define DX_SRAM_DATA_VALUE_BIT_SHIFT 	0x0UL
-#define DX_SRAM_DATA_VALUE_BIT_SIZE 	0x20UL
-#define DX_SRAM_ADDR_REG_OFFSET 	0xF04UL 
-#define DX_SRAM_ADDR_VALUE_BIT_SHIFT 	0x0UL
-#define DX_SRAM_ADDR_VALUE_BIT_SIZE 	0xFUL
-#define DX_SRAM_DATA_READY_REG_OFFSET 	0xF08UL 
-#define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT 	0x0UL
-#define DX_SRAM_DATA_READY_VALUE_BIT_SIZE 	0x1UL
+#define DX_SRAM_DATA_REG_OFFSET	0xF00UL
+#define DX_SRAM_DATA_VALUE_BIT_SHIFT	0x0UL
+#define DX_SRAM_DATA_VALUE_BIT_SIZE	0x20UL
+#define DX_SRAM_ADDR_REG_OFFSET	0xF04UL
+#define DX_SRAM_ADDR_VALUE_BIT_SHIFT	0x0UL
+#define DX_SRAM_ADDR_VALUE_BIT_SIZE	0xFUL
+#define DX_SRAM_DATA_READY_REG_OFFSET	0xF08UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT	0x0UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SIZE	0x1UL
 
 #endif //__DX_HOST_H__
diff --git a/drivers/staging/ccree/dx_reg_base_host.h b/drivers/staging/ccree/dx_reg_base_host.h
index 58dafe0..47bbadb 100644
--- a/drivers/staging/ccree/dx_reg_base_host.h
+++ b/drivers/staging/ccree/dx_reg_base_host.h
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -17,16 +17,7 @@
 #ifndef __DX_REG_BASE_HOST_H__
 #define __DX_REG_BASE_HOST_H__
 
-/* Identify platform: Xilinx Zynq7000 ZC706 */
-#define DX_PLAT_ZYNQ7000 1
-#define DX_PLAT_ZYNQ7000_ZC706 1
-
 #define DX_BASE_CC 0x80000000
-
-#define DX_BASE_ENV_REGS 0x40008000
-#define DX_BASE_ENV_CC_MEMORIES 0x40008000
-#define DX_BASE_ENV_PERF_RAM 0x40009000
-
 #define DX_BASE_HOST_RGF 0x0UL
 #define DX_BASE_CRY_KERNEL     0x0UL
 #define DX_BASE_ROM     0x40000000
diff --git a/drivers/staging/ccree/dx_reg_common.h b/drivers/staging/ccree/dx_reg_common.h
index 4ffed38..d5132ff 100644
--- a/drivers/staging/ccree/dx_reg_common.h
+++ b/drivers/staging/ccree/dx_reg_common.h
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -19,7 +19,7 @@
 
 #define DX_DEV_SIGNATURE 0xDCC71200UL
 
-#define CC_HW_VERSION 0xef840015UL 
+#define CC_HW_VERSION 0xef840015UL
 
 #define DX_DEV_SHA_MAX 512
 
diff --git a/drivers/staging/ccree/hash_defs.h b/drivers/staging/ccree/hash_defs.h
index 5ab0861..f52656f 100644
--- a/drivers/staging/ccree/hash_defs.h
+++ b/drivers/staging/ccree/hash_defs.h
@@ -1,78 +1,36 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef  _HASH_DEFS_H__
-#define  _HASH_DEFS_H__
+#ifndef _HASH_DEFS_H_
+#define _HASH_DEFS_H_
 
 #include "cc_crypto_ctx.h"
 
-/* this files provides definitions required for hash engine drivers */
-#ifndef CC_CONFIG_HASH_SHA_512_SUPPORTED
-#define SEP_HASH_LENGTH_WORDS		2
-#else
-#define SEP_HASH_LENGTH_WORDS		4
-#endif
-
-#ifdef BIG__ENDIAN
-#define OPAD_CURRENT_LENGTH 0x40000000, 0x00000000 , 0x00000000, 0x00000000
-#define HASH_LARVAL_MD5  0x76543210, 0xFEDCBA98, 0x89ABCDEF, 0x01234567
-#define HASH_LARVAL_SHA1 0xF0E1D2C3, 0x76543210, 0xFEDCBA98, 0x89ABCDEF, 0x01234567
-#define HASH_LARVAL_SHA224 0XA44FFABE, 0XA78FF964, 0X11155868, 0X310BC0FF, 0X39590EF7, 0X17DD7030, 0X07D57C36, 0XD89E05C1
-#define HASH_LARVAL_SHA256 0X19CDE05B, 0XABD9831F, 0X8C68059B, 0X7F520E51, 0X3AF54FA5, 0X72F36E3C, 0X85AE67BB, 0X67E6096A
-#define HASH_LARVAL_SHA384 0X1D48B547, 0XA44FFABE, 0X0D2E0CDB, 0XA78FF964, 0X874AB48E, 0X11155868, 0X67263367, 0X310BC0FF, 0XD8EC2F15, 0X39590EF7, 0X5A015991, 0X17DD7030, 0X2A299A62, 0X07D57C36, 0X5D9DBBCB, 0XD89E05C1
-#define HASH_LARVAL_SHA512 0X19CDE05B, 0X79217E13, 0XABD9831F, 0X6BBD41FB, 0X8C68059B, 0X1F6C3E2B, 0X7F520E51, 0XD182E6AD, 0X3AF54FA5, 0XF1361D5F, 0X72F36E3C, 0X2BF894FE, 0X85AE67BB, 0X3BA7CA84, 0X67E6096A, 0X08C9BCF3
-#else
-#define OPAD_CURRENT_LENGTH 0x00000040, 0x00000000, 0x00000000, 0x00000000
-#define HASH_LARVAL_MD5  0x10325476, 0x98BADCFE, 0xEFCDAB89, 0x67452301
-#define HASH_LARVAL_SHA1 0xC3D2E1F0, 0x10325476, 0x98BADCFE, 0xEFCDAB89, 0x67452301
-#define HASH_LARVAL_SHA224 0xbefa4fa4, 0x64f98fa7, 0x68581511, 0xffc00b31, 0xf70e5939, 0x3070dd17, 0x367cd507, 0xc1059ed8
-#define HASH_LARVAL_SHA256 0x5be0cd19, 0x1f83d9ab, 0x9b05688c, 0x510e527f, 0xa54ff53a, 0x3c6ef372, 0xbb67ae85, 0x6a09e667
-#define HASH_LARVAL_SHA384 0X47B5481D, 0XBEFA4FA4, 0XDB0C2E0D, 0X64F98FA7, 0X8EB44A87, 0X68581511, 0X67332667, 0XFFC00B31, 0X152FECD8, 0XF70E5939, 0X9159015A, 0X3070DD17, 0X629A292A, 0X367CD507, 0XCBBB9D5D, 0XC1059ED8
-#define HASH_LARVAL_SHA512 0x5be0cd19, 0x137e2179, 0x1f83d9ab, 0xfb41bd6b, 0x9b05688c, 0x2b3e6c1f, 0x510e527f, 0xade682d1, 0xa54ff53a, 0x5f1d36f1, 0x3c6ef372, 0xfe94f82b, 0xbb67ae85, 0x84caa73b, 0x6a09e667, 0xf3bcc908
-#endif
-
-enum HashConfig1Padding {
+enum cc_hash_conf_pad {
 	HASH_PADDING_DISABLED = 0,
 	HASH_PADDING_ENABLED = 1,
 	HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
-	HASH_CONFIG1_PADDING_RESERVE32 = INT32_MAX,
+	HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
 };
 
-enum HashCipherDoPadding {
+enum cc_hash_cipher_pad {
 	DO_NOT_PAD = 0,
 	DO_PAD = 1,
-	HASH_CIPHER_DO_PADDING_RESERVE32 = INT32_MAX,
+	HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
 };
 
-typedef struct SepHashPrivateContext {
-	/* The current length is placed at the end of the context buffer because the hash 
-	   context is used for all HMAC operations as well. HMAC context includes a 64 bytes 
-	   K0 field.  The size of struct drv_ctx_hash reserved field is  88/184 bytes depend if t
-	   he SHA512 is supported ( in this case teh context size is 256 bytes).
-	   The size of struct drv_ctx_hash reseved field is 20 or 52 depend if the SHA512 is supported.
-	   This means that this structure size (without the reserved field can be up to 20 bytes ,
-	   in case sha512 is not suppported it is 20 bytes (SEP_HASH_LENGTH_WORDS define to 2 ) and in the other
-	   case it is 28 (SEP_HASH_LENGTH_WORDS define to 4) */
-	uint32_t reserved[(sizeof(struct drv_ctx_hash)/sizeof(uint32_t)) - SEP_HASH_LENGTH_WORDS - 3];
-	uint32_t CurrentDigestedLength[SEP_HASH_LENGTH_WORDS];
-	uint32_t KeyType;
-	uint32_t dataCompleted;
-	uint32_t hmacFinalization;
-	/* no space left */
-} SepHashPrivateContext_s;
-
-#endif /*_HASH_DEFS_H__*/
+#endif /*_HASH_DEFS_H_*/
 
diff --git a/drivers/staging/ccree/hw_queue_defs_plat.h b/drivers/staging/ccree/hw_queue_defs_plat.h
deleted file mode 100644
index aee02cc..0000000
--- a/drivers/staging/ccree/hw_queue_defs_plat.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __HW_QUEUE_DEFS_PLAT_H__
-#define __HW_QUEUE_DEFS_PLAT_H__
-
-
-/*****************************/
-/* Descriptor packing macros */
-/*****************************/
-
-#define HW_QUEUE_FREE_SLOTS_GET() (CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT)) & HW_QUEUE_SLOTS_MAX)
-
-#define HW_QUEUE_POLL_QUEUE_UNTIL_FREE_SLOTS(seqLen)						\
-	do {											\
-	} while (HW_QUEUE_FREE_SLOTS_GET() < (seqLen))
-
-#define HW_DESC_PUSH_TO_QUEUE(pDesc) do {        				  \
-	LOG_HW_DESC(pDesc);							  \
-	HW_DESC_DUMP(pDesc);							  \
-	CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(0), (pDesc)->word[0]); \
-	CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(1), (pDesc)->word[1]); \
-	CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(2), (pDesc)->word[2]); \
-	CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(3), (pDesc)->word[3]); \
-	CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(4), (pDesc)->word[4]); \
-	wmb();									   \
-	CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(5), (pDesc)->word[5]); \
-} while (0)
-
-#endif /*__HW_QUEUE_DEFS_PLAT_H__*/
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 0382917..1fc0b05 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -49,9 +49,8 @@
 #define AES_CCM_RFC4309_NONCE_SIZE 3
 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
 
-
 /* Value of each ICV_CMP byte (of 8) in case of success */
-#define ICV_VERIF_OK 0x01	
+#define ICV_VERIF_OK 0x01
 
 struct ssi_aead_handle {
 	ssi_sram_addr_t sram_workspace_addr;
@@ -60,18 +59,18 @@ struct ssi_aead_handle {
 
 struct ssi_aead_ctx {
 	struct ssi_drvdata *drvdata;
-	uint8_t ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
-	uint8_t *enckey;
+	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
+	u8 *enckey;
 	dma_addr_t enckey_dma_addr;
 	union {
 		struct {
-			uint8_t *padded_authkey;
-			uint8_t *ipad_opad; /* IPAD, OPAD*/
+			u8 *padded_authkey;
+			u8 *ipad_opad; /* IPAD, OPAD*/
 			dma_addr_t padded_authkey_dma_addr;
 			dma_addr_t ipad_opad_dma_addr;
 		} hmac;
 		struct {
-			uint8_t *xcbc_keys; /* K1,K2,K3 */
+			u8 *xcbc_keys; /* K1,K2,K3 */
 			dma_addr_t xcbc_keys_dma_addr;
 		} xcbc;
 	} auth_state;
@@ -79,7 +78,7 @@ struct ssi_aead_ctx {
 	unsigned int auth_keylen;
 	unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
 	enum drv_cipher_mode cipher_mode;
-	enum FlowMode flow_mode;
+	enum cc_flow_mode flow_mode;
 	enum drv_hash_mode auth_mode;
 };
 
@@ -96,23 +95,20 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
 	SSI_LOG_DEBUG("Clearing context @%p for %s\n",
 		crypto_aead_ctx(tfm), crypto_tfm_alg_name(&(tfm->base)));
 
- 	dev = &ctx->drvdata->plat_dev->dev;
+	dev = &ctx->drvdata->plat_dev->dev;
 	/* Unmap enckey buffer */
-	if (ctx->enckey != NULL) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->enckey_dma_addr);
+	if (ctx->enckey) {
 		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
 		SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=0x%llX\n",
 			(unsigned long long)ctx->enckey_dma_addr);
 		ctx->enckey_dma_addr = 0;
 		ctx->enckey = NULL;
 	}
-	
+
 	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
-		if (ctx->auth_state.xcbc.xcbc_keys != NULL) {
-			SSI_RESTORE_DMA_ADDR_TO_48BIT(
-				ctx->auth_state.xcbc.xcbc_keys_dma_addr);
+		if (ctx->auth_state.xcbc.xcbc_keys) {
 			dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
-				ctx->auth_state.xcbc.xcbc_keys, 
+				ctx->auth_state.xcbc.xcbc_keys,
 				ctx->auth_state.xcbc.xcbc_keys_dma_addr);
 		}
 		SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=0x%llX\n",
@@ -120,9 +116,7 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
 		ctx->auth_state.xcbc.xcbc_keys_dma_addr = 0;
 		ctx->auth_state.xcbc.xcbc_keys = NULL;
 	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
-		if (ctx->auth_state.hmac.ipad_opad != NULL) {
-			SSI_RESTORE_DMA_ADDR_TO_48BIT(
-				ctx->auth_state.hmac.ipad_opad_dma_addr);
+		if (ctx->auth_state.hmac.ipad_opad) {
 			dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
 				ctx->auth_state.hmac.ipad_opad,
 				ctx->auth_state.hmac.ipad_opad_dma_addr);
@@ -131,9 +125,7 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
 			ctx->auth_state.hmac.ipad_opad_dma_addr = 0;
 			ctx->auth_state.hmac.ipad_opad = NULL;
 		}
-		if (ctx->auth_state.hmac.padded_authkey != NULL) {
-			SSI_RESTORE_DMA_ADDR_TO_48BIT(
-				ctx->auth_state.hmac.padded_authkey_dma_addr);
+		if (ctx->auth_state.hmac.padded_authkey) {
 			dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
 				ctx->auth_state.hmac.padded_authkey,
 				ctx->auth_state.hmac.padded_authkey_dma_addr);
@@ -162,16 +154,15 @@ static int ssi_aead_init(struct crypto_aead *tfm)
 	ctx->auth_mode = ssi_alg->auth_mode;
 	ctx->drvdata = ssi_alg->drvdata;
 	dev = &ctx->drvdata->plat_dev->dev;
-	crypto_aead_set_reqsize(tfm,sizeof(struct aead_req_ctx));
+	crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
 
 	/* Allocate key buffer, cache line aligned */
 	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
 		&ctx->enckey_dma_addr, GFP_KERNEL);
-	if (ctx->enckey == NULL) {
+	if (!ctx->enckey) {
 		SSI_LOG_ERR("Failed allocating key buffer\n");
 		goto init_failed;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->enckey_dma_addr, AES_MAX_KEY_SIZE);
 	SSI_LOG_DEBUG("Allocated enckey buffer in context ctx->enckey=@%p\n", ctx->enckey);
 
 	/* Set default authlen value */
@@ -182,38 +173,29 @@ static int ssi_aead_init(struct crypto_aead *tfm)
 		ctx->auth_state.xcbc.xcbc_keys = dma_alloc_coherent(dev,
 			CC_AES_128_BIT_KEY_SIZE * 3,
 			&ctx->auth_state.xcbc.xcbc_keys_dma_addr, GFP_KERNEL);
-		if (ctx->auth_state.xcbc.xcbc_keys == NULL) {
+		if (!ctx->auth_state.xcbc.xcbc_keys) {
 			SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
 			goto init_failed;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(
-			ctx->auth_state.xcbc.xcbc_keys_dma_addr,
-			CC_AES_128_BIT_KEY_SIZE * 3);
 	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
 		/* Allocate dma-coherent buffer for IPAD + OPAD */
 		ctx->auth_state.hmac.ipad_opad = dma_alloc_coherent(dev,
 			2 * MAX_HMAC_DIGEST_SIZE,
 			&ctx->auth_state.hmac.ipad_opad_dma_addr, GFP_KERNEL);
-		if (ctx->auth_state.hmac.ipad_opad == NULL) {
+		if (!ctx->auth_state.hmac.ipad_opad) {
 			SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
 			goto init_failed;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(
-			ctx->auth_state.hmac.ipad_opad_dma_addr,
-			2 * MAX_HMAC_DIGEST_SIZE);
 		SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
 			ctx->auth_state.hmac.ipad_opad);
-	
+
 		ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev,
 			MAX_HMAC_BLOCK_SIZE,
 			&ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL);
-		if (ctx->auth_state.hmac.padded_authkey == NULL) {
+		if (!ctx->auth_state.hmac.padded_authkey) {
 			SSI_LOG_ERR("failed to allocate padded_authkey\n");
 			goto init_failed;
-		}	
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(
-			ctx->auth_state.hmac.padded_authkey_dma_addr,
-			MAX_HMAC_BLOCK_SIZE);
+		}
 	} else {
 		ctx->auth_state.hmac.ipad_opad = NULL;
 		ctx->auth_state.hmac.padded_authkey = NULL;
@@ -225,7 +207,6 @@ static int ssi_aead_init(struct crypto_aead *tfm)
 	ssi_aead_exit(tfm);
 	return -ENOMEM;
 }
- 
 
 static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
 {
@@ -234,9 +215,6 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
 	struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	int err = 0;
-	DECL_CYCLE_COUNT_RESOURCES;
-
-	START_CYCLE_COUNT();
 
 	ssi_buffer_mgr_unmap_aead_request(dev, areq);
 
@@ -250,72 +228,75 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
 				"(auth-size=%d, cipher=%d).\n",
 				ctx->authsize, ctx->cipher_mode);
 			/* In case of payload authentication failure, MUST NOT
-			   revealed the decrypted message --> zero its memory. */
+			 * revealed the decrypted message --> zero its memory.
+			 */
 			ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
 			err = -EBADMSG;
 		}
 	} else { /*ENCRYPT*/
-		if (unlikely(areq_ctx->is_icv_fragmented == true))
+		if (unlikely(areq_ctx->is_icv_fragmented))
 			ssi_buffer_mgr_copy_scatterlist_portion(
-				areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen+areq_ctx->dstOffset,
-				areq->cryptlen+areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
+				areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen + areq_ctx->dstOffset,
+				areq->cryptlen + areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
 
 		/* If an IV was generated, copy it back to the user provided buffer. */
-		if (areq_ctx->backup_giv != NULL) {
-			if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+		if (areq_ctx->backup_giv) {
+			if (ctx->cipher_mode == DRV_CIPHER_CTR)
 				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
-			} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+			else if (ctx->cipher_mode == DRV_CIPHER_CCM)
 				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
-			}
 		}
 	}
 
-	END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_4);
 	aead_request_complete(areq, err);
 }
 
-static int xcbc_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
+static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
 {
 	/* Load the AES key */
-	HW_DESC_INIT(&desc[0]);
+	hw_desc_init(&desc[0]);
 	/* We are using for the source/user key the same buffer as for the output keys,
-	   because after this key loading it is not needed anymore */
-	HW_DESC_SET_DIN_TYPE(&desc[0], DMA_DLLI, ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen, NS_BIT);
-	HW_DESC_SET_CIPHER_MODE(&desc[0], DRV_CIPHER_ECB);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[0], ctx->auth_keylen);
-	HW_DESC_SET_FLOW_MODE(&desc[0], S_DIN_to_AES);
-	HW_DESC_SET_SETUP_MODE(&desc[0], SETUP_LOAD_KEY0);
+	 * because after this key loading it is not needed anymore
+	 */
+	set_din_type(&desc[0], DMA_DLLI,
+		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
+		     NS_BIT);
+	set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
+	set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_key_size_aes(&desc[0], ctx->auth_keylen);
+	set_flow_mode(&desc[0], S_DIN_to_AES);
+	set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
 
-	HW_DESC_INIT(&desc[1]);
-	HW_DESC_SET_DIN_CONST(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[1], DIN_AES_DOUT);
-	HW_DESC_SET_DOUT_DLLI(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr, AES_KEYSIZE_128, NS_BIT, 0);
+	hw_desc_init(&desc[1]);
+	set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[1], DIN_AES_DOUT);
+	set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+		      AES_KEYSIZE_128, NS_BIT, 0);
 
-	HW_DESC_INIT(&desc[2]);
-	HW_DESC_SET_DIN_CONST(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[2], DIN_AES_DOUT);
-	HW_DESC_SET_DOUT_DLLI(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+	hw_desc_init(&desc[2]);
+	set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[2], DIN_AES_DOUT);
+	set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 					 + AES_KEYSIZE_128),
 			      AES_KEYSIZE_128, NS_BIT, 0);
 
-	HW_DESC_INIT(&desc[3]);
-	HW_DESC_SET_DIN_CONST(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[3], DIN_AES_DOUT);
-	HW_DESC_SET_DOUT_DLLI(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+	hw_desc_init(&desc[3]);
+	set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[3], DIN_AES_DOUT);
+	set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 					  + 2 * AES_KEYSIZE_128),
 			      AES_KEYSIZE_128, NS_BIT, 0);
 
 	return 4;
 }
 
-static int hmac_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
+static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
 {
 	unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 	unsigned int digest_ofs = 0;
-	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? 
+	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 			DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
-	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? 
+	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 			CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 
 	int idx = 0;
@@ -324,52 +305,51 @@ static int hmac_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
 	/* calc derived HMAC key */
 	for (i = 0; i < 2; i++) {
 		/* Load hash initial state */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-		HW_DESC_SET_DIN_SRAM(&desc[idx],
-			ssi_ahash_get_larval_digest_sram_addr(
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_din_sram(&desc[idx],
+			     ssi_ahash_get_larval_digest_sram_addr(
 				ctx->drvdata, ctx->auth_mode),
-			digest_size);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+			     digest_size);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 		idx++;
 
 		/* Load the hash current length*/
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-		HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 		idx++;
 
 		/* Prepare ipad key */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+		hw_desc_init(&desc[idx]);
+		set_xor_val(&desc[idx], hmacPadConst[i]);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 		idx++;
 
 		/* Perform HASH update */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-				   ctx->auth_state.hmac.padded_authkey_dma_addr,
-				     SHA256_BLOCK_SIZE, NS_BIT);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-		HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     ctx->auth_state.hmac.padded_authkey_dma_addr,
+			     SHA256_BLOCK_SIZE, NS_BIT);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_xor_active(&desc[idx]);
+		set_flow_mode(&desc[idx], DIN_HASH);
 		idx++;
 
 		/* Get the digset */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], 
-				      (ctx->auth_state.hmac.ipad_opad_dma_addr +
-				       digest_ofs),
-				      digest_size, NS_BIT, 0);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-		HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], hash_mode);
+		set_dout_dlli(&desc[idx],
+			      (ctx->auth_state.hmac.ipad_opad_dma_addr +
+			       digest_ofs), digest_size, NS_BIT, 0);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 		idx++;
 
 		digest_ofs += digest_size;
@@ -420,15 +400,17 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 
 	return 0; /* All tests of keys sizes passed */
 }
-/*This function prepers the user key so it can pass to the hmac processing 
-  (copy to intenral buffer or hash in case of key longer than block */
+
+/* This function prepers the user key so it can pass to the hmac processing
+ * (copy to intenral buffer or hash in case of key longer than block
+ */
 static int
 ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 {
 	dma_addr_t key_dma_addr = 0;
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
-	uint32_t larval_addr = ssi_ahash_get_larval_digest_sram_addr(
+	u32 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
 					ctx->drvdata, ctx->auth_mode);
 	struct ssi_crypto_req ssi_req = {};
 	unsigned int blocksize;
@@ -436,8 +418,8 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
 	unsigned int hashmode;
 	unsigned int idx = 0;
 	int rc = 0;
-	HwDesc_s desc[MAX_AEAD_SETKEY_SEQ];
-	dma_addr_t padded_authkey_dma_addr = 
+	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
+	dma_addr_t padded_authkey_dma_addr =
 		ctx->auth_state.hmac.padded_authkey_dma_addr;
 
 	switch (ctx->auth_mode) { /* auth_key required and >0 */
@@ -460,107 +442,89 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
 				   " DMA failed\n", key, keylen);
 			return -ENOMEM;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(key_dma_addr, keylen);
 		if (keylen > blocksize) {
 			/* Load hash initial state */
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
-			HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr, digestsize);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-			HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], hashmode);
+			set_din_sram(&desc[idx], larval_addr, digestsize);
+			set_flow_mode(&desc[idx], S_DIN_to_HASH);
+			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 			idx++;
-	
+
 			/* Load the hash current length*/
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
-			HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
-			HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-			HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], hashmode);
+			set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+			set_flow_mode(&desc[idx], S_DIN_to_HASH);
+			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 			idx++;
-	
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-					     key_dma_addr, 
-					     keylen, NS_BIT);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+
+			hw_desc_init(&desc[idx]);
+			set_din_type(&desc[idx], DMA_DLLI,
+				     key_dma_addr, keylen, NS_BIT);
+			set_flow_mode(&desc[idx], DIN_HASH);
 			idx++;
-	
+
 			/* Get hashed key */
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode); 
-			HW_DESC_SET_DOUT_DLLI(&desc[idx],
-					 padded_authkey_dma_addr,
-					 digestsize,
-					 NS_BIT, 0);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-			HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-			HW_DESC_SET_CIPHER_CONFIG1(&desc[idx],
-							HASH_PADDING_DISABLED);
-			HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
-						   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], hashmode);
+			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+				      digestsize, NS_BIT, 0);
+			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+			set_cipher_config0(&desc[idx],
+					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 			idx++;
-	
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
-			HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-			HW_DESC_SET_DOUT_DLLI(&desc[idx], 
-					      (padded_authkey_dma_addr + digestsize),
-					      (blocksize - digestsize),
-					      NS_BIT, 0);
+
+			hw_desc_init(&desc[idx]);
+			set_din_const(&desc[idx], 0, (blocksize - digestsize));
+			set_flow_mode(&desc[idx], BYPASS);
+			set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
+				      digestsize), (blocksize - digestsize),
+				      NS_BIT, 0);
 			idx++;
 		} else {
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-					     key_dma_addr, 
-					     keylen, NS_BIT);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-			HW_DESC_SET_DOUT_DLLI(&desc[idx], 
-					      (padded_authkey_dma_addr),
-					      keylen, NS_BIT, 0);
+			hw_desc_init(&desc[idx]);
+			set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+				     keylen, NS_BIT);
+			set_flow_mode(&desc[idx], BYPASS);
+			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+				      keylen, NS_BIT, 0);
 			idx++;
-	
+
 			if ((blocksize - keylen) != 0) {
-				HW_DESC_INIT(&desc[idx]);
-				HW_DESC_SET_DIN_CONST(&desc[idx], 0,
-						      (blocksize - keylen));
-				HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-				HW_DESC_SET_DOUT_DLLI(&desc[idx], 
-					(padded_authkey_dma_addr + keylen),
-					(blocksize - keylen),
-					NS_BIT, 0);
+				hw_desc_init(&desc[idx]);
+				set_din_const(&desc[idx], 0,
+					      (blocksize - keylen));
+				set_flow_mode(&desc[idx], BYPASS);
+				set_dout_dlli(&desc[idx],
+					      (padded_authkey_dma_addr +
+					       keylen),
+					      (blocksize - keylen), NS_BIT, 0);
 				idx++;
 			}
 		}
 	} else {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_CONST(&desc[idx], 0,
-				      (blocksize - keylen));
-		HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], 
-			padded_authkey_dma_addr,
-			blocksize,
-			NS_BIT, 0);
+		hw_desc_init(&desc[idx]);
+		set_din_const(&desc[idx], 0, (blocksize - keylen));
+		set_flow_mode(&desc[idx], BYPASS);
+		set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
+			      blocksize, NS_BIT, 0);
 		idx++;
 	}
 
-#ifdef ENABLE_CYCLE_COUNT
-	ssi_req.op_type = STAT_OP_TYPE_SETKEY;
-#endif
-
 	rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
 	if (unlikely(rc != 0))
 		SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
 
-	if (likely(key_dma_addr != 0)) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(key_dma_addr);
+	if (likely(key_dma_addr != 0))
 		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
-	}
 
 	return rc;
 }
 
-
 static int
 ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 {
@@ -568,16 +532,14 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 	struct rtattr *rta = (struct rtattr *)key;
 	struct ssi_crypto_req ssi_req = {};
 	struct crypto_authenc_key_param *param;
-	HwDesc_s desc[MAX_AEAD_SETKEY_SEQ];
+	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 	int seq_len = 0, rc = -EINVAL;
-	DECL_CYCLE_COUNT_RESOURCES;
 
 	SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
 		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 	/* STAT_PHASE_0: Init and sanity checks */
-	START_CYCLE_COUNT();
 
 	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
 		if (!RTA_OK(rta, keylen))
@@ -600,7 +562,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 			    (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 				goto badkey;
 			/* Copy nonce from last 4 bytes in CTR key to
-			*  first 4 bytes in CTR IV */
+			 *  first 4 bytes in CTR IV
+			 */
 			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
 				CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
 			/* Set CTR key size */
@@ -615,9 +578,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 	if (unlikely(rc != 0))
 		goto badkey;
 
-	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
 	/* STAT_PHASE_1: Copy key to ctx */
-	START_CYCLE_COUNT();
 
 	/* Get key material */
 	memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
@@ -631,10 +592,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 			goto badkey;
 	}
 
-	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
-	
 	/* STAT_PHASE_2: Create sequence */
-	START_CYCLE_COUNT();
 
 	switch (ctx->auth_mode) {
 	case DRV_HASH_SHA1:
@@ -652,15 +610,9 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 		goto badkey;
 	}
 
-	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_2);
-
 	/* STAT_PHASE_3: Submit sequence to HW */
-	START_CYCLE_COUNT();
-	
+
 	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
-#ifdef ENABLE_CYCLE_COUNT
-		ssi_req.op_type = STAT_OP_TYPE_SETKEY;
-#endif
 		rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
 		if (unlikely(rc != 0)) {
 			SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
@@ -669,7 +621,6 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
 	}
 
 	/* Update STAT_PHASE_3 */
-	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_3);
 	return rc;
 
 badkey:
@@ -684,7 +635,7 @@ static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
 {
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	int rc = 0;
-	
+
 	if (keylen < 3)
 		return -EINVAL;
 
@@ -702,11 +653,11 @@ static int ssi_aead_setauthsize(
 	unsigned int authsize)
 {
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
-	
+
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 	/* Unsupported auth. sizes */
 	if ((authsize == 0) ||
-	    (authsize >crypto_aead_maxauthsize(authenc))) {
+	    (authsize > crypto_aead_maxauthsize(authenc))) {
 		return -ENOTSUPP;
 	}
 
@@ -752,11 +703,11 @@ static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
 }
 #endif /*SSI_CC_HAS_AES_CCM*/
 
-static inline void 
+static inline void
 ssi_aead_create_assoc_desc(
-	struct aead_request *areq, 
+	struct aead_request *areq,
 	unsigned int flow_mode,
-	HwDesc_s desc[], 
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
@@ -768,24 +719,23 @@ ssi_aead_create_assoc_desc(
 	switch (assoc_dma_type) {
 	case SSI_DMA_BUF_DLLI:
 		SSI_LOG_DEBUG("ASSOC buffer type DLLI\n");
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-			sg_dma_address(areq->src),
-			areq->assoclen, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
-		if (ctx->auth_mode == DRV_HASH_XCBC_MAC && (areq_ctx->cryptlen > 0) )
-			HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx]);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
+			     areq->assoclen, NS_BIT); set_flow_mode(&desc[idx],
+			     flow_mode);
+		if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
+		    (areq_ctx->cryptlen > 0))
+			set_din_not_last_indication(&desc[idx]);
 		break;
 	case SSI_DMA_BUF_MLLI:
 		SSI_LOG_DEBUG("ASSOC buffer type MLLI\n");
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
-				     areq_ctx->assoc.sram_addr,
-				     areq_ctx->assoc.mlli_nents,
-				     NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
-		if (ctx->auth_mode == DRV_HASH_XCBC_MAC && (areq_ctx->cryptlen > 0) )
-			HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx]);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
+			     areq_ctx->assoc.mlli_nents, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
+		if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
+		    (areq_ctx->cryptlen > 0))
+			set_din_not_last_indication(&desc[idx]);
 		break;
 	case SSI_DMA_BUF_NULL:
 	default:
@@ -797,9 +747,9 @@ ssi_aead_create_assoc_desc(
 
 static inline void
 ssi_aead_process_authenc_data_desc(
-	struct aead_request *areq, 
+	struct aead_request *areq,
 	unsigned int flow_mode,
-	HwDesc_s desc[], 
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size,
 	int direct)
 {
@@ -814,27 +764,28 @@ ssi_aead_process_authenc_data_desc(
 			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 			areq_ctx->dstSgl : areq_ctx->srcSgl;
 
-		unsigned int offset = 
+		unsigned int offset =
 			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 			areq_ctx->dstOffset : areq_ctx->srcOffset;
 		SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			(sg_dma_address(cipher)+ offset), areq_ctx->cryptlen,
-			NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     (sg_dma_address(cipher) + offset),
+			     areq_ctx->cryptlen, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
 		break;
 	}
 	case SSI_DMA_BUF_MLLI:
 	{
 		/* DOUBLE-PASS flow (as default)
 		 * assoc. + iv + data -compact in one table
-		 * if assoclen is ZERO only IV perform */
+		 * if assoclen is ZERO only IV perform
+		 */
 		ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
-		uint32_t mlli_nents = areq_ctx->assoc.mlli_nents;
+		u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 
-		if (likely(areq_ctx->is_single_pass == true)) {
-			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT){
+		if (likely(areq_ctx->is_single_pass)) {
+			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 				mlli_addr = areq_ctx->dst.sram_addr;
 				mlli_nents = areq_ctx->dst.mlli_nents;
 			} else {
@@ -844,10 +795,10 @@ ssi_aead_process_authenc_data_desc(
 		}
 
 		SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type MLLI\n");
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
-			mlli_addr, mlli_nents, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
+			     NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
 		break;
 	}
 	case SSI_DMA_BUF_NULL:
@@ -860,9 +811,9 @@ ssi_aead_process_authenc_data_desc(
 
 static inline void
 ssi_aead_process_cipher_data_desc(
-	struct aead_request *areq, 
+	struct aead_request *areq,
 	unsigned int flow_mode,
-	HwDesc_s desc[], 
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	unsigned int idx = *seq_size;
@@ -875,25 +826,24 @@ ssi_aead_process_cipher_data_desc(
 	switch (data_dma_type) {
 	case SSI_DMA_BUF_DLLI:
 		SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			(sg_dma_address(areq_ctx->srcSgl)+areq_ctx->srcOffset),
-			areq_ctx->cryptlen, NS_BIT);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx],
-			(sg_dma_address(areq_ctx->dstSgl)+areq_ctx->dstOffset),
-			areq_ctx->cryptlen, NS_BIT, 0);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     (sg_dma_address(areq_ctx->srcSgl) +
+			      areq_ctx->srcOffset), areq_ctx->cryptlen, NS_BIT);
+		set_dout_dlli(&desc[idx],
+			      (sg_dma_address(areq_ctx->dstSgl) +
+			       areq_ctx->dstOffset),
+			      areq_ctx->cryptlen, NS_BIT, 0);
+		set_flow_mode(&desc[idx], flow_mode);
 		break;
 	case SSI_DMA_BUF_MLLI:
 		SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type MLLI\n");
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
-			areq_ctx->src.sram_addr,
-			areq_ctx->src.mlli_nents, NS_BIT);
-		HW_DESC_SET_DOUT_MLLI(&desc[idx],
-			areq_ctx->dst.sram_addr,
-			areq_ctx->dst.mlli_nents, NS_BIT, 0);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
+			     areq_ctx->src.mlli_nents, NS_BIT);
+		set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
+			      areq_ctx->dst.mlli_nents, NS_BIT, 0);
+		set_flow_mode(&desc[idx], flow_mode);
 		break;
 	case SSI_DMA_BUF_NULL:
 	default:
@@ -905,7 +855,7 @@ ssi_aead_process_cipher_data_desc(
 
 static inline void ssi_aead_process_digest_result_desc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -918,35 +868,36 @@ static inline void ssi_aead_process_digest_result_desc(
 
 	/* Get final ICV result */
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->icv_dma_addr,
-			ctx->authsize, NS_BIT, 1);
-		HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+		hw_desc_init(&desc[idx]);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+		set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
+			      NS_BIT, 1);
+		set_queue_last_ind(&desc[idx]);
 		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
-			HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC); 
+			set_aes_not_hash_mode(&desc[idx]);
+			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 		} else {
-			HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
-				HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+			set_cipher_config0(&desc[idx],
+					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+			set_cipher_mode(&desc[idx], hash_mode);
 		}
 	} else { /*Decrypt*/
 		/* Get ICV out from hardware */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
-			ctx->authsize, NS_BIT, 1);
-		HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-		HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+		hw_desc_init(&desc[idx]);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
+			      ctx->authsize, NS_BIT, 1);
+		set_queue_last_ind(&desc[idx]);
+		set_cipher_config0(&desc[idx],
+				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
-			HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+			set_aes_not_hash_mode(&desc[idx]);
 		} else {
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+			set_cipher_mode(&desc[idx], hash_mode);
 		}
 	}
 
@@ -955,7 +906,7 @@ static inline void ssi_aead_process_digest_result_desc(
 
 static inline void ssi_aead_setup_cipher_desc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -966,35 +917,34 @@ static inline void ssi_aead_setup_cipher_desc(
 	int direct = req_ctx->gen_ctx.op_type;
 
 	/* Setup cipher state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direct);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], ctx->flow_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-		req_ctx->gen_ctx.iv_dma_addr, hw_iv_size, NS_BIT);
-	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	} else {
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
-	}
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->cipher_mode);
+	hw_desc_init(&desc[idx]);
+	set_cipher_config0(&desc[idx], direct);
+	set_flow_mode(&desc[idx], ctx->flow_mode);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
+		     hw_iv_size, NS_BIT);
+	if (ctx->cipher_mode == DRV_CIPHER_CTR)
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	else
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 	idx++;
 
 	/* Setup enc. key */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direct);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], ctx->flow_mode);
+	hw_desc_init(&desc[idx]);
+	set_cipher_config0(&desc[idx], direct);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_flow_mode(&desc[idx], ctx->flow_mode);
 	if (ctx->flow_mode == S_DIN_to_AES) {
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
-			((ctx->enc_keylen == 24) ?
-			 CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), NS_BIT);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+			     ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
+			      ctx->enc_keylen), NS_BIT);
+		set_key_size_aes(&desc[idx], ctx->enc_keylen);
 	} else {
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
-			ctx->enc_keylen, NS_BIT);
-		HW_DESC_SET_KEY_SIZE_DES(&desc[idx], ctx->enc_keylen);
+		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+			     ctx->enc_keylen, NS_BIT);
+		set_key_size_des(&desc[idx], ctx->enc_keylen);
 	}
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->cipher_mode);
+	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 	idx++;
 
 	*seq_size = idx;
@@ -1002,7 +952,7 @@ static inline void ssi_aead_setup_cipher_desc(
 
 static inline void ssi_aead_process_cipher(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size,
 	unsigned int data_flow_mode)
 {
@@ -1017,9 +967,9 @@ static inline void ssi_aead_process_cipher(
 	ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 		/* We must wait for DMA to write all cipher */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-		HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+		hw_desc_init(&desc[idx]);
+		set_din_no_dma(&desc[idx], 0, 0xfffff0);
+		set_dout_no_dma(&desc[idx], 0, 0, 1);
 		idx++;
 	}
 
@@ -1028,35 +978,36 @@ static inline void ssi_aead_process_cipher(
 
 static inline void ssi_aead_hmac_setup_digest_desc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
-	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? 
+	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 	unsigned int idx = *seq_size;
 
 	/* Loading hash ipad xor key state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-		ctx->auth_state.hmac.ipad_opad_dma_addr,
-		digest_size, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
+		     NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 
 	/* Load init. digest len (64 bytes) */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-	HW_DESC_SET_DIN_SRAM(&desc[idx],
-		ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, hash_mode),
-		HASH_LEN_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_din_sram(&desc[idx],
+		     ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
+								hash_mode),
+								HASH_LEN_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	*seq_size = idx;
@@ -1064,7 +1015,7 @@ static inline void ssi_aead_hmac_setup_digest_desc(
 
 static inline void ssi_aead_xcbc_setup_digest_desc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -1072,55 +1023,53 @@ static inline void ssi_aead_xcbc_setup_digest_desc(
 	unsigned int idx = *seq_size;
 
 	/* Loading MAC state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_CONST(&desc[idx], 0, CC_AES_BLOCK_SIZE);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
 	/* Setup XCBC MAC K1 */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
-			     AES_KEYSIZE_128, NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+		     AES_KEYSIZE_128, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
 	/* Setup XCBC MAC K2 */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     (ctx->auth_state.xcbc.xcbc_keys_dma_addr + 
-			      AES_KEYSIZE_128),
-			     AES_KEYSIZE_128, NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+		      AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
 	/* Setup XCBC MAC K3 */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
-			      2 * AES_KEYSIZE_128),
-			     AES_KEYSIZE_128, NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+		      2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
 	*seq_size = idx;
@@ -1128,7 +1077,7 @@ static inline void ssi_aead_xcbc_setup_digest_desc(
 
 static inline void ssi_aead_process_digest_header_desc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	unsigned int idx = *seq_size;
@@ -1142,7 +1091,7 @@ static inline void ssi_aead_process_digest_header_desc(
 
 static inline void ssi_aead_process_digest_scheme_desc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -1150,55 +1099,56 @@ static inline void ssi_aead_process_digest_scheme_desc(
 	struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
 	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
-	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? 
+	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 	unsigned int idx = *seq_size;
 
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-	HW_DESC_SET_DOUT_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
-			HASH_LEN_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
-	HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+		      HASH_LEN_SIZE);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+	set_cipher_do(&desc[idx], DO_PAD);
 	idx++;
 
 	/* Get final ICV result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DOUT_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
-			digest_size);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+	hw_desc_init(&desc[idx]);
+	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
+		      digest_size);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+	set_cipher_mode(&desc[idx], hash_mode);
 	idx++;
 
 	/* Loading hash opad xor key state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-		(ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
-		digest_size, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
+		     digest_size, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 
 	/* Load init. digest len (64 bytes) */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
-	HW_DESC_SET_DIN_SRAM(&desc[idx],
-		ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, hash_mode),
-		HASH_LEN_SIZE);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hash_mode);
+	set_din_sram(&desc[idx],
+		     ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
+								hash_mode),
+		     HASH_LEN_SIZE);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	/* Perform HASH update */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
-			digest_size);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+	hw_desc_init(&desc[idx]);
+	set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
+		     digest_size);
+	set_flow_mode(&desc[idx], DIN_HASH);
 	idx++;
 
 	*seq_size = idx;
@@ -1206,7 +1156,7 @@ static inline void ssi_aead_process_digest_scheme_desc(
 
 static inline void ssi_aead_load_mlli_to_sram(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
@@ -1216,29 +1166,29 @@ static inline void ssi_aead_load_mlli_to_sram(
 	if (unlikely(
 		(req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
 		(req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
-		(req_ctx->is_single_pass == false))) {
+		!req_ctx->is_single_pass)) {
 		SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
 			(unsigned int)ctx->drvdata->mlli_sram_addr,
 			req_ctx->mlli_params.mlli_len);
 		/* Copy MLLI table host-to-sram */
-		HW_DESC_INIT(&desc[*seq_size]);
-		HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
-			req_ctx->mlli_params.mlli_dma_addr,
-			req_ctx->mlli_params.mlli_len, NS_BIT);
-		HW_DESC_SET_DOUT_SRAM(&desc[*seq_size],
-			ctx->drvdata->mlli_sram_addr,
-			req_ctx->mlli_params.mlli_len);
-		HW_DESC_SET_FLOW_MODE(&desc[*seq_size], BYPASS);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI,
+			     req_ctx->mlli_params.mlli_dma_addr,
+			     req_ctx->mlli_params.mlli_len, NS_BIT);
+		set_dout_sram(&desc[*seq_size],
+			      ctx->drvdata->mlli_sram_addr,
+			      req_ctx->mlli_params.mlli_len);
+		set_flow_mode(&desc[*seq_size], BYPASS);
 		(*seq_size)++;
 	}
 }
 
-static inline enum FlowMode ssi_aead_get_data_flow_mode(
+static inline enum cc_flow_mode ssi_aead_get_data_flow_mode(
 	enum drv_crypto_direction direct,
-	enum FlowMode setup_flow_mode,
+	enum cc_flow_mode setup_flow_mode,
 	bool is_single_pass)
 {
-	enum FlowMode data_flow_mode;
+	enum cc_flow_mode data_flow_mode;
 
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 		if (setup_flow_mode == S_DIN_to_AES)
@@ -1261,7 +1211,7 @@ static inline enum FlowMode ssi_aead_get_data_flow_mode(
 
 static inline void ssi_aead_hmac_authenc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -1271,7 +1221,7 @@ static inline void ssi_aead_hmac_authenc(
 	unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
 		direct, ctx->flow_mode, req_ctx->is_single_pass);
 
-	if (req_ctx->is_single_pass == true) {
+	if (req_ctx->is_single_pass) {
 		/**
 		 * Single-pass flow
 		 */
@@ -1284,10 +1234,11 @@ static inline void ssi_aead_hmac_authenc(
 		return;
 	}
 
-	/** 
+	/**
 	 * Double-pass flow
-	 * Fallback for unsupported single-pass modes, 
-	 * i.e. using assoc. data of non-word-multiple */
+	 * Fallback for unsupported single-pass modes,
+	 * i.e. using assoc. data of non-word-multiple
+	 */
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 		/* encrypt first.. */
 		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
@@ -1305,7 +1256,8 @@ static inline void ssi_aead_hmac_authenc(
 		/* decrypt after.. */
 		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
 		/* read the digest result with setting the completion bit
-		   must be after the cipher operation */
+		 * must be after the cipher operation
+		 */
 		ssi_aead_process_digest_result_desc(req, desc, seq_size);
 	}
 }
@@ -1313,7 +1265,7 @@ static inline void ssi_aead_hmac_authenc(
 static inline void
 ssi_aead_xcbc_authenc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -1323,7 +1275,7 @@ ssi_aead_xcbc_authenc(
 	unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
 		direct, ctx->flow_mode, req_ctx->is_single_pass);
 
-	if (req_ctx->is_single_pass == true) {
+	if (req_ctx->is_single_pass) {
 		/**
 		 * Single-pass flow
 		 */
@@ -1335,10 +1287,11 @@ ssi_aead_xcbc_authenc(
 		return;
 	}
 
-	/** 
+	/**
 	 * Double-pass flow
-	 * Fallback for unsupported single-pass modes, 
-	 * i.e. using assoc. data of non-word-multiple */
+	 * Fallback for unsupported single-pass modes,
+	 * i.e. using assoc. data of non-word-multiple
+	 */
 	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 		/* encrypt first.. */
 		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
@@ -1353,7 +1306,8 @@ ssi_aead_xcbc_authenc(
 		/* decrypt after..*/
 		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
 		/* read the digest result with setting the completion bit
-		   must be after the cipher operation */
+		 * must be after the cipher operation
+		 */
 		ssi_aead_process_digest_result_desc(req, desc, seq_size);
 	}
 }
@@ -1379,18 +1333,17 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
 			goto data_size_err;
 		if (ctx->cipher_mode == DRV_CIPHER_CCM)
 			break;
-		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
-		{
-			if (areq_ctx->plaintext_authenticate_only == true)
-				areq_ctx->is_single_pass = false; 
+		if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+			if (areq_ctx->plaintext_authenticate_only)
+				areq_ctx->is_single_pass = false;
 			break;
 		}
 
-		if (!IS_ALIGNED(assoclen, sizeof(uint32_t)))
+		if (!IS_ALIGNED(assoclen, sizeof(u32)))
 			areq_ctx->is_single_pass = false;
 
 		if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
-		    !IS_ALIGNED(cipherlen, sizeof(uint32_t)))
+		    !IS_ALIGNED(cipherlen, sizeof(u32)))
 			areq_ctx->is_single_pass = false;
 
 		break;
@@ -1412,13 +1365,14 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
 }
 
 #if SSI_CC_HAS_AES_CCM
-static unsigned int format_ccm_a0(uint8_t *pA0Buff, uint32_t headerSize)
+static unsigned int format_ccm_a0(u8 *pA0Buff, u32 headerSize)
 {
 	unsigned int len = 0;
-	if ( headerSize == 0 ) {
+
+	if (headerSize == 0)
 		return 0;
-	} 
-	if ( headerSize < ((1UL << 16) - (1UL << 8) )) {
+
+	if (headerSize < ((1UL << 16) - (1UL << 8))) {
 		len = 2;
 
 		pA0Buff[0] = (headerSize >> 8) & 0xFF;
@@ -1457,7 +1411,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
 
 static inline int ssi_aead_ccm(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -1467,7 +1421,6 @@ static inline int ssi_aead_ccm(
 	unsigned int cipher_flow_mode;
 	dma_addr_t mac_result;
 
-
 	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 		cipher_flow_mode = AES_to_HASH_and_DOUT;
 		mac_result = req_ctx->mac_buf_dma_addr;
@@ -1477,118 +1430,111 @@ static inline int ssi_aead_ccm(
 	}
 
 	/* load key */
-	HW_DESC_INIT(&desc[idx]);	
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);	
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
-			((ctx->enc_keylen == 24) ? 
-			 CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), 
-			 NS_BIT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
+		      ctx->enc_keylen), NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* load ctr state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			req_ctx->gen_ctx.iv_dma_addr, 
-			     AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);	
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_din_type(&desc[idx], DMA_DLLI,
+		     req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* load MAC key */
-	HW_DESC_INIT(&desc[idx]);	
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);	
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
-			((ctx->enc_keylen == 24) ? 
-			 CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), 
-			 NS_BIT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
+		      ctx->enc_keylen), NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
 	/* load MAC state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			req_ctx->mac_buf_dma_addr, 
-			     AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);	
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
-
 	/* process assoc data */
 	if (req->assoclen > 0) {
 		ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
 	} else {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-				      sg_dma_address(&req_ctx->ccm_adata_sg),
-				     AES_BLOCK_SIZE + req_ctx->ccm_hdr_size,
-				     NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     sg_dma_address(&req_ctx->ccm_adata_sg),
+			     AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
+		set_flow_mode(&desc[idx], DIN_HASH);
 		idx++;
 	}
 
 	/* process the cipher */
-	if (req_ctx->cryptlen != 0) {
+	if (req_ctx->cryptlen != 0)
 		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
-	}
 
 	/* Read temporal MAC */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
-			      ctx->authsize, NS_BIT, 0);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
+		      NS_BIT, 0);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
 	/* load AES-CTR state (for last MAC calculation)*/
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     req_ctx->ccm_iv0_dma_addr ,
-			     AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
 	idx++;
 
 	/* encrypt the "T" value and store MAC in mac_state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			req_ctx->mac_buf_dma_addr , ctx->authsize, NS_BIT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result , ctx->authsize, NS_BIT, 1);
-	HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
-	idx++;	
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+		     ctx->authsize, NS_BIT);
+	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+	set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	idx++;
 
 	*seq_size = idx;
 	return 0;
 }
 
-static int config_ccm_adata(struct aead_request *req) {
+static int config_ccm_adata(struct aead_request *req)
+{
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
@@ -1597,21 +1543,22 @@ static int config_ccm_adata(struct aead_request *req) {
 	/* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
 	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
 	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
-	uint8_t *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
-	uint8_t *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
-	uint8_t *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
-	unsigned int cryptlen = (req_ctx->gen_ctx.op_type == 
-				 DRV_CRYPTO_DIRECTION_ENCRYPT) ? 
-				req->cryptlen : 
+	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
+	u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
+	u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+				req->cryptlen :
 				(req->cryptlen - ctx->authsize);
 	int rc;
+
 	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
-	memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE*3);
+	memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
 
 	/* taken from crypto/ccm.c */
 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
 	if (2 > l || l > 8) {
-		SSI_LOG_ERR("illegal iv value %X\n",req->iv[0]);
+		SSI_LOG_ERR("illegal iv value %X\n", req->iv[0]);
 		return -EINVAL;
 	}
 	memcpy(b0, req->iv, AES_BLOCK_SIZE);
@@ -1622,20 +1569,19 @@ static int config_ccm_adata(struct aead_request *req) {
 	*b0 |= (8 * ((m - 2) / 2));
 	if (req->assoclen > 0)
 		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
-	
+
 	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
-	if (rc != 0) {
+	if (rc != 0)
 		return rc;
-	}
 	 /* END of "taken from crypto/ccm.c" */
-	
+
 	/* l(a) - size of associated data. */
-	req_ctx->ccm_hdr_size = format_ccm_a0 (a0, req->assoclen);
+	req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
 
 	memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
-	req->iv [15] = 1;
+	req->iv[15] = 1;
 
-	memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE) ;
+	memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
 	ctr_count_0[15] = 0;
 
 	return 0;
@@ -1654,7 +1600,7 @@ static void ssi_rfc4309_ccm_process(struct aead_request *req)
 	/* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
 	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
 	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET,    req->iv,        CCM_BLOCK_IV_SIZE);
-	req->iv = areq_ctx->ctr_iv;	
+	req->iv = areq_ctx->ctr_iv;
 	req->assoclen -= CCM_BLOCK_IV_SIZE;
 }
 #endif /*SSI_CC_HAS_AES_CCM*/
@@ -1663,7 +1609,7 @@ static void ssi_rfc4309_ccm_process(struct aead_request *req)
 
 static inline void ssi_aead_gcm_setup_ghash_desc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -1672,69 +1618,68 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
 	unsigned int idx = *seq_size;
 
 	/* load key to AES*/
-	HW_DESC_INIT(&desc[idx]);	
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);	
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
-			ctx->enc_keylen, NS_BIT); 
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+		     ctx->enc_keylen, NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* process one zero block to generate hkey */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx],
-				  req_ctx->hkey_dma_addr,
-				  AES_BLOCK_SIZE,
-				  NS_BIT, 0); 
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+	set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
+		      NS_BIT, 0);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 	idx++;
 
 	/* Memory Barrier */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
 	idx++;
 
 	/* Load GHASH subkey */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			req_ctx->hkey_dma_addr, 
-				 AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);	
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);	
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	/* Configure Hash Engine to work with GHASH.
-	   Since it was not possible to extend HASH submodes to add GHASH,
-	   The following command is necessary in order to select GHASH (according to HW designers)*/
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);	
-	HW_DESC_SET_CIPHER_DO(&desc[idx], 1); //1=AES_SK RKEK
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	 * Since it was not possible to extend HASH submodes to add GHASH,
+	 * The following command is necessary in order to
+	 * select GHASH (according to HW designers)
+	 */
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	/* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 
 	*seq_size = idx;
@@ -1742,7 +1687,7 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
 
 static inline void ssi_aead_gcm_setup_gctr_desc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -1751,27 +1696,27 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
 	unsigned int idx = *seq_size;
 
 	/* load key to AES*/
-	HW_DESC_INIT(&desc[idx]);	
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);	
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, 
-			ctx->enc_keylen, NS_BIT); 
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+		     ctx->enc_keylen, NS_BIT);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
-	if ((req_ctx->cryptlen != 0) && (req_ctx->plaintext_authenticate_only==false)){
+	if ((req_ctx->cryptlen != 0) && (!req_ctx->plaintext_authenticate_only)) {
 		/* load AES/CTR initial CTR value inc by 2*/
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-				req_ctx->gcm_iv_inc2_dma_addr, 
-					 AES_BLOCK_SIZE, NS_BIT);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);	
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+		set_key_size_aes(&desc[idx], ctx->enc_keylen);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
+			     NS_BIT);
+		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
 		idx++;
 	}
 
@@ -1780,13 +1725,13 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
 
 static inline void ssi_aead_process_gcm_result_desc(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	dma_addr_t mac_result; 
+	dma_addr_t mac_result;
 	unsigned int idx = *seq_size;
 
 	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
@@ -1796,60 +1741,57 @@ static inline void ssi_aead_process_gcm_result_desc(
 	}
 
 	/* process(ghash) gcm_block_len */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-		req_ctx->gcm_block_len_dma_addr,
-		AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_flow_mode(&desc[idx], DIN_HASH);
 	idx++;
 
 	/* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
-				  AES_BLOCK_SIZE, NS_BIT, 0);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
+		      NS_BIT, 0);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_aes_not_hash_mode(&desc[idx]);
 
-	idx++; 
+	idx++;
 
 	/* load AES/CTR initial CTR value inc by 1*/
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-				 req_ctx->gcm_iv_inc1_dma_addr, 
-				 AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);	
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_key_size_aes(&desc[idx], ctx->enc_keylen);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* Memory Barrier */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
 	idx++;
 
 	/* process GCTR on stored GHASH and store MAC in mac_state*/
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-		req_ctx->mac_buf_dma_addr,
-		AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
-	HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
-	idx++;	
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
+		     AES_BLOCK_SIZE, NS_BIT);
+	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+	set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	idx++;
 
 	*seq_size = idx;
 }
 
 static inline int ssi_aead_gcm(
 	struct aead_request *req,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
@@ -1862,9 +1804,8 @@ static inline int ssi_aead_gcm(
 		cipher_flow_mode = AES_to_HASH_and_DOUT;
 	}
 
-
 	//in RFC4543 no data to encrypt. just copy data from src to dest.
-	if (req_ctx->plaintext_authenticate_only==true){     
+	if (req_ctx->plaintext_authenticate_only) {
 		ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
 		ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
 		/* process(ghash) assoc data */
@@ -1883,7 +1824,7 @@ static inline int ssi_aead_gcm(
 	ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
 	/* process(gctr+ghash) */
 	if (req_ctx->cryptlen != 0)
-		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size); 
+		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
 	ssi_aead_process_gcm_result_desc(req, desc, seq_size);
 
 	idx = *seq_size;
@@ -1892,7 +1833,7 @@ static inline int ssi_aead_gcm(
 
 #ifdef CC_DEBUG
 static inline void ssi_aead_dump_gcm(
-	const char* title,
+	const char *title,
 	struct aead_request *req)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -1902,52 +1843,50 @@ static inline void ssi_aead_dump_gcm(
 	if (ctx->cipher_mode != DRV_CIPHER_GCTR)
 		return;
 
-	if (title != NULL) {
+	if (title) {
 		SSI_LOG_DEBUG("----------------------------------------------------------------------------------");
 		SSI_LOG_DEBUG("%s\n", title);
 	}
 
-	SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d \n", \
-				 ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen );
+	SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n", \
+				 ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen);
 
-	if ( ctx->enckey != NULL ) {
-		dump_byte_array("mac key",ctx->enckey, 16);
-	}
+	if (ctx->enckey)
+		dump_byte_array("mac key", ctx->enckey, 16);
 
-	dump_byte_array("req->iv",req->iv, AES_BLOCK_SIZE);
+	dump_byte_array("req->iv", req->iv, AES_BLOCK_SIZE);
 
-	dump_byte_array("gcm_iv_inc1",req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
+	dump_byte_array("gcm_iv_inc1", req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
 
-	dump_byte_array("gcm_iv_inc2",req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
+	dump_byte_array("gcm_iv_inc2", req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
 
-	dump_byte_array("hkey",req_ctx->hkey, AES_BLOCK_SIZE);
+	dump_byte_array("hkey", req_ctx->hkey, AES_BLOCK_SIZE);
 
-	dump_byte_array("mac_buf",req_ctx->mac_buf, AES_BLOCK_SIZE);
+	dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);
 
-	dump_byte_array("gcm_len_block",req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
+	dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
 
-	if (req->src!=NULL && req->cryptlen) {
-		dump_byte_array("req->src",sg_virt(req->src), req->cryptlen+req->assoclen);
-	}
+	if (req->src && req->cryptlen)
+		dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
 
-	if (req->dst!=NULL) {
-		dump_byte_array("req->dst",sg_virt(req->dst), req->cryptlen+ctx->authsize+req->assoclen);
-    }
+	if (req->dst)
+		dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
 }
 #endif
 
-static int config_gcm_context(struct aead_request *req) {
+static int config_gcm_context(struct aead_request *req)
+{
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
-	
-	unsigned int cryptlen = (req_ctx->gen_ctx.op_type == 
-				 DRV_CRYPTO_DIRECTION_ENCRYPT) ? 
-				req->cryptlen : 
+
+	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+				req->cryptlen :
 				(req->cryptlen - ctx->authsize);
 	__be32 counter = cpu_to_be32(2);
 
-	SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d \n", cryptlen, req->assoclen, ctx->authsize);
+	SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", cryptlen, req->assoclen, ctx->authsize);
 
 	memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
 
@@ -1960,21 +1899,20 @@ static int config_gcm_context(struct aead_request *req) {
 	memcpy(req->iv + 12, &counter, 4);
 	memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
 
+	if (!req_ctx->plaintext_authenticate_only) {
+		__be64 temp64;
 
-	if (req_ctx->plaintext_authenticate_only == false)
-	{
-		__be64 temp64;
 		temp64 = cpu_to_be64(req->assoclen * 8);
-		memcpy ( &req_ctx->gcm_len_block.lenA , &temp64, sizeof(temp64) );
+		memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
 		temp64 = cpu_to_be64(cryptlen * 8);
-		memcpy ( &req_ctx->gcm_len_block.lenC , &temp64, 8 );
-	}
-	else { //rfc4543=>  all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
+		memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
+	} else { //rfc4543=>  all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
 		__be64 temp64;
-		temp64 = cpu_to_be64((req->assoclen+GCM_BLOCK_RFC4_IV_SIZE+cryptlen) * 8);
-		memcpy ( &req_ctx->gcm_len_block.lenA , &temp64, sizeof(temp64) );
+
+		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
+		memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
 		temp64 = 0;
-		memcpy ( &req_ctx->gcm_len_block.lenC , &temp64, 8 );
+		memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
 	}
 
 	return 0;
@@ -1988,34 +1926,30 @@ static void ssi_rfc4_gcm_process(struct aead_request *req)
 
 	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
 	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET,    req->iv, GCM_BLOCK_RFC4_IV_SIZE);
-	req->iv = areq_ctx->ctr_iv;	
+	req->iv = areq_ctx->ctr_iv;
 	req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
 }
 
-
 #endif /*SSI_CC_HAS_AES_GCM*/
 
 static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
 {
 	int rc = 0;
 	int seq_len = 0;
-	HwDesc_s desc[MAX_AEAD_PROCESS_SEQ]; 
+	struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
 	struct ssi_crypto_req ssi_req = {};
 
-	DECL_CYCLE_COUNT_RESOURCES;
-
 	SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
-		((direct==DRV_CRYPTO_DIRECTION_ENCRYPT)?"Encrypt":"Decrypt"), ctx, req, req->iv,
+		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), ctx, req, req->iv,
 		sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 
 	/* STAT_PHASE_0: Init and sanity checks */
-	START_CYCLE_COUNT();
-	
+
 	/* Check data length according to mode */
 	if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
 		SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
@@ -2028,25 +1962,19 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 	ssi_req.user_cb = (void *)ssi_aead_complete;
 	ssi_req.user_arg = (void *)req;
 
-#ifdef ENABLE_CYCLE_COUNT
-	ssi_req.op_type = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
-		STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
-#endif
 	/* Setup request context */
 	areq_ctx->gen_ctx.op_type = direct;
 	areq_ctx->req_authsize = ctx->authsize;
 	areq_ctx->cipher_mode = ctx->cipher_mode;
 
-	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);
-
 	/* STAT_PHASE_1: Map buffers */
-	START_CYCLE_COUNT();
-	
+
 	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 		/* Build CTR IV - Copy nonce from last 4 bytes in
-		*  CTR key to first 4 bytes in CTR IV */
+		 * CTR key to first 4 bytes in CTR IV
+		 */
 		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
-		if (areq_ctx->backup_giv == NULL) /*User none-generated IV*/
+		if (!areq_ctx->backup_giv) /*User none-generated IV*/
 			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
 				req->iv, CTR_RFC3686_IV_SIZE);
 		/* Initialize counter portion of counter block */
@@ -2056,8 +1984,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 		/* Replace with counter iv */
 		req->iv = areq_ctx->ctr_iv;
 		areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
-	} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) || 
-		   (ctx->cipher_mode == DRV_CIPHER_GCTR) ) {
+	} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
+		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
 		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
 		if (areq_ctx->ctr_iv != req->iv) {
 			memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
@@ -2072,23 +2000,23 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 		rc = config_ccm_adata(req);
 		if (unlikely(rc != 0)) {
 			SSI_LOG_ERR("config_ccm_adata() returned with a failure %d!", rc);
-			goto exit; 
+			goto exit;
 		}
 	} else {
-		areq_ctx->ccm_hdr_size = ccm_header_size_null;		
+		areq_ctx->ccm_hdr_size = ccm_header_size_null;
 	}
 #else
-	areq_ctx->ccm_hdr_size = ccm_header_size_null;		
+	areq_ctx->ccm_hdr_size = ccm_header_size_null;
 #endif /*SSI_CC_HAS_AES_CCM*/
 
-#if SSI_CC_HAS_AES_GCM 
+#if SSI_CC_HAS_AES_GCM
 	if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
 		rc = config_gcm_context(req);
 		if (unlikely(rc != 0)) {
 			SSI_LOG_ERR("config_gcm_context() returned with a failure %d!", rc);
-			goto exit; 
+			goto exit;
 		}
-	} 
+	}
 #endif /*SSI_CC_HAS_AES_GCM*/
 
 	rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
@@ -2098,17 +2026,17 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 	}
 
 	/* do we need to generate IV? */
-	if (areq_ctx->backup_giv != NULL) {
-
+	if (areq_ctx->backup_giv) {
 		/* set the DMA mapped IV address*/
 		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 			ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
 			ssi_req.ivgen_dma_addr_len = 1;
 		} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
 			/* In ccm, the IV needs to exist both inside B0 and inside the counter.
-			   It is also copied to iv_dma_addr for other reasons (like returning
-			   it to the user).
-			   So, using 3 (identical) IV outputs. */
+			 * It is also copied to iv_dma_addr for other reasons (like returning
+			 * it to the user).
+			 * So, using 3 (identical) IV outputs.
+			 */
 			ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
 			ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET          + CCM_BLOCK_IV_OFFSET;
 			ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
@@ -2122,10 +2050,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 		ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
 	}
 
-	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_1);
-
 	/* STAT_PHASE_2: Create sequence */
-	START_CYCLE_COUNT();
 
 	/* Load MLLI tables to SRAM if necessary */
 	ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
@@ -2139,31 +2064,26 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 	case DRV_HASH_XCBC_MAC:
 		ssi_aead_xcbc_authenc(req, desc, &seq_len);
 		break;
-#if ( SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM )
+#if (SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM)
 	case DRV_HASH_NULL:
 #if SSI_CC_HAS_AES_CCM
-		if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+		if (ctx->cipher_mode == DRV_CIPHER_CCM)
 			ssi_aead_ccm(req, desc, &seq_len);
-		}
 #endif /*SSI_CC_HAS_AES_CCM*/
 #if SSI_CC_HAS_AES_GCM
-		if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
 			ssi_aead_gcm(req, desc, &seq_len);
-		}
 #endif /*SSI_CC_HAS_AES_GCM*/
 			break;
 #endif
-	default:	
+	default:
 		SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
 		ssi_buffer_mgr_unmap_aead_request(dev, req);
 		rc = -ENOTSUPP;
 		goto exit;
 	}
 
-	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_2);
-
 	/* STAT_PHASE_3: Lock HW and push sequence */
-	START_CYCLE_COUNT();
 
 	rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
 
@@ -2172,8 +2092,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
 		ssi_buffer_mgr_unmap_aead_request(dev, req);
 	}
 
-	
-	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
 exit:
 	return rc;
 }
@@ -2206,7 +2124,7 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
 	int rc = -EINVAL;
 
 	if (!valid_assoclen(req)) {
-		SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen );
+		SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
 		goto out;
 	}
 
@@ -2214,9 +2132,9 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_giv = NULL;
 	areq_ctx->is_gcm4543 = true;
-	
+
 	ssi_rfc4309_ccm_process(req);
-	
+
 	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
 	if (rc != -EINPROGRESS)
 		req->iv = areq_ctx->backup_iv;
@@ -2242,7 +2160,6 @@ static int ssi_aead_decrypt(struct aead_request *req)
 		req->iv = areq_ctx->backup_iv;
 
 	return rc;
-
 }
 
 #if SSI_CC_HAS_AES_CCM
@@ -2261,10 +2178,10 @@ static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_giv = NULL;
-	
+
 	areq_ctx->is_gcm4543 = true;
 	ssi_rfc4309_ccm_process(req);
-	
+
 	rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
 	if (rc != -EINPROGRESS)
 		req->iv = areq_ctx->backup_iv;
@@ -2280,8 +2197,8 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
 {
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	int rc = 0;
-	
-	SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey()  keylen %d, key %p \n", keylen, key );
+
+	SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey()  keylen %d, key %p\n", keylen, key);
 
 	if (keylen < 4)
 		return -EINVAL;
@@ -2298,8 +2215,8 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
 {
 	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	int rc = 0;
-	
-	SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey()  keylen %d, key %p \n", keylen, key );
+
+	SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey()  keylen %d, key %p\n", keylen, key);
 
 	if (keylen < 4)
 		return -EINVAL;
@@ -2334,24 +2251,24 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
 static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
 				      unsigned int authsize)
 {
-        SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize()  authsize %d \n", authsize );
+	SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize()  authsize %d\n", authsize);
 
-        switch (authsize) {
-        case 8:
-        case 12:
-        case 16:
-                break;
-        default:
-                return -EINVAL;
-        }
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
 
-        return ssi_aead_setauthsize(authenc, authsize);
+	return ssi_aead_setauthsize(authenc, authsize);
 }
 
 static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
-				      unsigned int authsize)
+				       unsigned int authsize)
 {
-	SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize()  authsize %d \n", authsize );
+	SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize()  authsize %d\n", authsize);
 
 	if (authsize != 16)
 		return -EINVAL;
@@ -2364,7 +2281,7 @@ static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
 	/* Very similar to ssi_aead_encrypt() above. */
 
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-        int rc = -EINVAL;
+	int rc = -EINVAL;
 
 	if (!valid_assoclen(req)) {
 		SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
@@ -2374,7 +2291,7 @@ static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_giv = NULL;
-	
+
 	areq_ctx->plaintext_authenticate_only = false;
 
 	ssi_rfc4_gcm_process(req);
@@ -2393,14 +2310,14 @@ static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
 
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc;
-	
+
 	//plaintext is not encryped with rfc4543
 	areq_ctx->plaintext_authenticate_only = true;
 
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_giv = NULL;
-	
+
 	ssi_rfc4_gcm_process(req);
 	areq_ctx->is_gcm4543 = true;
 
@@ -2416,7 +2333,7 @@ static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
 	/* Very similar to ssi_aead_decrypt() above. */
 
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-        int rc = -EINVAL;
+	int rc = -EINVAL;
 
 	if (!valid_assoclen(req)) {
 		SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
@@ -2426,7 +2343,7 @@ static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_giv = NULL;
-	
+
 	areq_ctx->plaintext_authenticate_only = false;
 
 	ssi_rfc4_gcm_process(req);
@@ -2452,7 +2369,7 @@ static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
 	areq_ctx->backup_giv = NULL;
-	
+
 	ssi_rfc4_gcm_process(req);
 	areq_ctx->is_gcm4543 = true;
 
@@ -2715,7 +2632,7 @@ static struct ssi_alg_template aead_algs[] = {
 		.cipher_mode = DRV_CIPHER_GCTR,
 		.flow_mode = S_DIN_to_AES,
 		.auth_mode = DRV_HASH_NULL,
-	}, 
+	},
 #endif /*SSI_CC_HAS_AES_GCM*/
 };
 
@@ -2758,7 +2675,7 @@ int ssi_aead_free(struct ssi_drvdata *drvdata)
 	struct ssi_aead_handle *aead_handle =
 		(struct ssi_aead_handle *)drvdata->aead_handle;
 
-	if (aead_handle != NULL) {
+	if (aead_handle) {
 		/* Remove registered algs */
 		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
 			crypto_unregister_aead(&t_alg->aead_alg);
@@ -2780,7 +2697,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
 	int alg;
 
 	aead_handle = kmalloc(sizeof(struct ssi_aead_handle), GFP_KERNEL);
-	if (aead_handle == NULL) {
+	if (!aead_handle) {
 		rc = -ENOMEM;
 		goto fail0;
 	}
@@ -2827,6 +2744,3 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
 fail0:
 	return rc;
 }
-
-
-
diff --git a/drivers/staging/ccree/ssi_aead.h b/drivers/staging/ccree/ssi_aead.h
index fe88c9e..39cc633 100644
--- a/drivers/staging/ccree/ssi_aead.h
+++ b/drivers/staging/ccree/ssi_aead.h
@@ -1,21 +1,21 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* \file ssi_aead.h
-   ARM CryptoCell AEAD Crypto API
+ * ARM CryptoCell AEAD Crypto API
  */
 
 #ifndef __SSI_AEAD_H__
@@ -25,22 +25,18 @@
 #include <crypto/algapi.h>
 #include <crypto/ctr.h>
 
-
 /* mac_cmp - HW writes 8 B but all bytes hold the same value */
 #define ICV_CMP_SIZE 8
-#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE*3)
+#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
 #define MAX_MAC_SIZE MAX(SHA256_DIGEST_SIZE, AES_BLOCK_SIZE)
 
-
 /* defines for AES GCM configuration buffer */
 #define GCM_BLOCK_LEN_SIZE 8
 
-#define GCM_BLOCK_RFC4_IV_OFFSET    	4  
-#define GCM_BLOCK_RFC4_IV_SIZE  	    8  /* IV size for rfc's */
-#define GCM_BLOCK_RFC4_NONCE_OFFSET 	0  
-#define GCM_BLOCK_RFC4_NONCE_SIZE   	4  
-
-
+#define GCM_BLOCK_RFC4_IV_OFFSET	4
+#define GCM_BLOCK_RFC4_IV_SIZE		8  /* IV size for rfc's */
+#define GCM_BLOCK_RFC4_NONCE_OFFSET	0
+#define GCM_BLOCK_RFC4_NONCE_SIZE	4
 
 /* Offsets into AES CCM configuration buffer */
 #define CCM_B0_OFFSET 0
@@ -57,48 +53,49 @@ enum aead_ccm_header_size {
 	ccm_header_size_zero = 0,
 	ccm_header_size_2 = 2,
 	ccm_header_size_6 = 6,
-	ccm_header_size_max = INT32_MAX
+	ccm_header_size_max = S32_MAX
 };
 
 struct aead_req_ctx {
 	/* Allocate cache line although only 4 bytes are needed to
-	*  assure next field falls @ cache line 
-	*  Used for both: digest HW compare and CCM/GCM MAC value */
-	uint8_t mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
-	uint8_t ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
+	 *  assure next field falls @ cache line
+	 *  Used for both: digest HW compare and CCM/GCM MAC value
+	 */
+	u8 mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
+	u8 ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
 
-	//used in gcm 
-	uint8_t gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
-	uint8_t gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
-	uint8_t hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
+	//used in gcm
+	u8 gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
+	u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
+	u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
 	struct {
-		uint8_t lenA[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
-		uint8_t lenC[GCM_BLOCK_LEN_SIZE] ;
+		u8 lenA[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
+		u8 lenC[GCM_BLOCK_LEN_SIZE];
 	} gcm_len_block;
 
-	uint8_t ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
+	u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
 	unsigned int hw_iv_size ____cacheline_aligned; /*HW actual size input*/
-	uint8_t backup_mac[MAX_MAC_SIZE]; /*used to prevent cache coherence problem*/
-	uint8_t *backup_iv; /*store iv for generated IV flow*/
-	uint8_t *backup_giv; /*store iv for rfc3686(ctr) flow*/
+	u8 backup_mac[MAX_MAC_SIZE]; /*used to prevent cache coherence problem*/
+	u8 *backup_iv; /*store iv for generated IV flow*/
+	u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
 	dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
 	dma_addr_t ccm_iv0_dma_addr; /* buffer for internal ccm configurations */
 	dma_addr_t icv_dma_addr; /* Phys. address of ICV */
 
-	//used in gcm 
+	//used in gcm
 	dma_addr_t gcm_iv_inc1_dma_addr; /* buffer for internal gcm configurations */
 	dma_addr_t gcm_iv_inc2_dma_addr; /* buffer for internal gcm configurations */
 	dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
 	dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
 	bool is_gcm4543;
 
-	uint8_t *icv_virt_addr; /* Virt. address of ICV */
+	u8 *icv_virt_addr; /* Virt. address of ICV */
 	struct async_gen_req_ctx gen_ctx;
 	struct ssi_mlli assoc;
 	struct ssi_mlli src;
 	struct ssi_mlli dst;
-	struct scatterlist* srcSgl;
-	struct scatterlist* dstSgl;
+	struct scatterlist *srcSgl;
+	struct scatterlist *dstSgl;
 	unsigned int srcOffset;
 	unsigned int dstOffset;
 	enum ssi_req_dma_buf_type assoc_buff_type;
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index 038e2ff..b35871e 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -33,42 +33,15 @@
 #include "ssi_hash.h"
 #include "ssi_aead.h"
 
-#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
-#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
-#define MLLI_TABLE_MIN_ALIGNMENT 4 /*Force the MLLI table to be align to uint32 */
-#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
-#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES (2*LLI_MAX_NUM_OF_DATA_ENTRIES + \
-					LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES )
-
 #ifdef CC_DEBUG
-#define DUMP_SGL(sg) \
-	while (sg) { \
-		SSI_LOG_DEBUG("page=%lu offset=%u length=%u (dma_len=%u) " \
-			     "dma_addr=%08x\n", (sg)->page_link, (sg)->offset, \
-			(sg)->length, sg_dma_len(sg), (sg)->dma_address); \
-		(sg) = sg_next(sg); \
-	}
-#define DUMP_MLLI_TABLE(mlli_p, nents) \
-	do { \
-		SSI_LOG_DEBUG("mlli=%pK nents=%u\n", (mlli_p), (nents)); \
-		while((nents)--) { \
-			SSI_LOG_DEBUG("addr=0x%08X size=0x%08X\n", \
-			     (mlli_p)[LLI_WORD0_OFFSET], \
-			     (mlli_p)[LLI_WORD1_OFFSET]); \
-			(mlli_p) += LLI_ENTRY_WORD_SIZE; \
-		} \
-	} while (0)
 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
 	((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
 	((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
 	((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
 #else
-#define DX_BUFFER_MGR_DUMP_SGL(sg)
-#define DX_BUFFER_MGR_DUMP_MLLI_TABLE(mlli_p, nents)
 #define GET_DMA_BUFFER_TYPE(buff_type)
 #endif
 
-
 enum dma_buffer_type {
 	DMA_NULL_TYPE = -1,
 	DMA_SGL_TYPE = 1,
@@ -92,103 +65,55 @@ struct buffer_array {
 	int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
 	enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
 	bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
-	uint32_t * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
 };
 
-#ifdef CC_DMA_48BIT_SIM
-dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len)
-{
-	dma_addr_t tmp_dma_addr;
-#ifdef CC_DMA_48BIT_SIM_FULL
-	/* With this code all addresses will be switched to 48 bits. */
-	/* The if condition protects from double expention */
-	if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) && 
-		(data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
-#else
-	if((!(((orig_addr >> 16) & 0xFF) % 2)) && 
-		(data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
-#endif
-		tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 | 
-				(orig_addr & UINT16_MAX));
-			SSI_LOG_DEBUG("MAP DMA: orig address=0x%llX "
-				    "dma_address=0x%llX\n",
-				     orig_addr, tmp_dma_addr);
-			return tmp_dma_addr;	
-	}
-	return orig_addr;
-}
-
-dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr)
-{
-	dma_addr_t tmp_dma_addr;
-#ifdef CC_DMA_48BIT_SIM_FULL
-	/* With this code all addresses will be restored from 48 bits. */
-	/* The if condition protects from double restoring */
-	if((orig_addr >> 32) & 0xFFFF ) {
-#else
-	if(((orig_addr >> 32) & 0xFFFF) && 
-		!(((orig_addr >> 32) & 0xFF) % 2) ) {
-#endif
-		/*return high 16 bits*/
-		tmp_dma_addr = ((orig_addr >> 16));
-		/*clean the 0xFFFF in the lower bits (set in the add expansion)*/
-		tmp_dma_addr &= 0xFFFF0000; 
-		/* Set the original 16 bits */
-		tmp_dma_addr |= (orig_addr & UINT16_MAX); 
-		SSI_LOG_DEBUG("Release DMA: orig address=0x%llX "
-			     "dma_address=0x%llX\n",
-			     orig_addr, tmp_dma_addr);
-			return tmp_dma_addr;	
-	}
-	return orig_addr;
-}
-#endif
 /**
  * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
- * 
+ *
  * @sg_list: SG list
  * @nbytes: [IN] Total SGL data bytes.
- * @lbytes: [OUT] Returns the amount of bytes at the last entry 
+ * @lbytes: [OUT] Returns the amount of bytes at the last entry
  */
 static unsigned int ssi_buffer_mgr_get_sgl_nents(
-	struct scatterlist *sg_list, unsigned int nbytes, uint32_t *lbytes, bool *is_chained)
+	struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
 {
 	unsigned int nents = 0;
+
 	while (nbytes != 0) {
 		if (sg_is_chain(sg_list)) {
-			SSI_LOG_ERR("Unexpected chanined entry "
-				   "in sg (entry =0x%X) \n", nents);
+			SSI_LOG_ERR("Unexpected chained entry "
+				   "in sg (entry =0x%X)\n", nents);
 			BUG();
 		}
 		if (sg_list->length != 0) {
 			nents++;
 			/* get the number of bytes in the last entry */
 			*lbytes = nbytes;
-			nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
+			nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
 			sg_list = sg_next(sg_list);
 		} else {
 			sg_list = (struct scatterlist *)sg_page(sg_list);
-			if (is_chained != NULL) {
+			if (is_chained)
 				*is_chained = true;
-			}
 		}
 	}
-	SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
+	SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
 	return nents;
 }
 
 /**
  * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
- * 
+ *
  * @sgl:
  */
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
+void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
 {
 	struct scatterlist *current_sg = sgl;
 	int sg_index = 0;
 
 	while (sg_index <= data_len) {
-		if (current_sg == NULL) {
+		if (!current_sg) {
 			/* reached the end of the sgl --> just return back */
 			return;
 		}
@@ -201,7 +126,7 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
 /**
  * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
  * from to_skip to end, to dest and vice versa
- * 
+ *
  * @dest:
  * @sg:
  * @to_skip:
@@ -210,47 +135,44 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
  */
 void ssi_buffer_mgr_copy_scatterlist_portion(
 	u8 *dest, struct scatterlist *sg,
-	uint32_t to_skip,  uint32_t end,
+	u32 to_skip,  u32 end,
 	enum ssi_sg_cpy_direct direct)
 {
-	uint32_t nents, lbytes;
+	u32 nents, lbytes;
 
 	nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
-	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
+	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
+		       (direct == SSI_SG_TO_BUF));
 }
 
 static inline int ssi_buffer_mgr_render_buff_to_mlli(
-	dma_addr_t buff_dma, uint32_t buff_size, uint32_t *curr_nents,
-	uint32_t **mlli_entry_pp)
+	dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
+	u32 **mlli_entry_pp)
 {
-	uint32_t *mlli_entry_p = *mlli_entry_pp;
-	uint32_t new_nents;;
+	u32 *mlli_entry_p = *mlli_entry_pp;
+	u32 new_nents;;
 
 	/* Verify there is no memory overflow*/
-	new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
-	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
+	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
+	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
 		return -ENOMEM;
-	}
 
 	/*handle buffer longer than 64 kbytes */
-	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, CC_MAX_MLLI_ENTRY_SIZE);
-		LLI_SET_ADDR(mlli_entry_p,buff_dma);
-		LLI_SET_SIZE(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
-		SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
+	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
+		cc_lli_set_addr(mlli_entry_p, buff_dma);
+		cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
+		SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
 			   mlli_entry_p[LLI_WORD0_OFFSET],
 			   mlli_entry_p[LLI_WORD1_OFFSET]);
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(buff_dma);
 		buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 		buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 		mlli_entry_p = mlli_entry_p + 2;
 		(*curr_nents)++;
 	}
 	/*Last entry */
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, buff_size);
-	LLI_SET_ADDR(mlli_entry_p,buff_dma);
-	LLI_SET_SIZE(mlli_entry_p, buff_size);
-	SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
+	cc_lli_set_addr(mlli_entry_p, buff_dma);
+	cc_lli_set_size(mlli_entry_p, buff_size);
+	SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
 		   mlli_entry_p[LLI_WORD0_OFFSET],
 		   mlli_entry_p[LLI_WORD1_OFFSET]);
 	mlli_entry_p = mlli_entry_p + 2;
@@ -259,28 +181,27 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
 	return 0;
 }
 
-
 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
-	struct scatterlist *sgl, uint32_t sgl_data_len, uint32_t sglOffset, uint32_t *curr_nents,
-	uint32_t **mlli_entry_pp)
+	struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
+	u32 **mlli_entry_pp)
 {
 	struct scatterlist *curr_sgl = sgl;
-	uint32_t *mlli_entry_p = *mlli_entry_pp;
-	int32_t rc = 0;
+	u32 *mlli_entry_p = *mlli_entry_pp;
+	s32 rc = 0;
 
-	for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
+	for ( ; (curr_sgl) && (sgl_data_len != 0);
 	      curr_sgl = sg_next(curr_sgl)) {
-		uint32_t entry_data_len =
+		u32 entry_data_len =
 			(sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
-				sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
+				sg_dma_len(curr_sgl) - sglOffset : sgl_data_len;
 		sgl_data_len -= entry_data_len;
 		rc = ssi_buffer_mgr_render_buff_to_mlli(
 			sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
 			&mlli_entry_p);
-		if(rc != 0) {
+		if (rc != 0)
 			return rc;
-		}
-		sglOffset=0;
+
+		sglOffset = 0;
 	}
 	*mlli_entry_pp = mlli_entry_p;
 	return 0;
@@ -291,8 +212,8 @@ static int ssi_buffer_mgr_generate_mlli(
 	struct buffer_array *sg_data,
 	struct mlli_params *mlli_params)
 {
-	uint32_t *mlli_p;
-	uint32_t total_nents = 0,prev_total_nents = 0;
+	u32 *mlli_p;
+	u32 total_nents = 0, prev_total_nents = 0;
 	int rc = 0, i;
 
 	SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
@@ -301,21 +222,18 @@ static int ssi_buffer_mgr_generate_mlli(
 	mlli_params->mlli_virt_addr = dma_pool_alloc(
 			mlli_params->curr_pool, GFP_KERNEL,
 			&(mlli_params->mlli_dma_addr));
-	if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
+	if (unlikely(!mlli_params->mlli_virt_addr)) {
 		SSI_LOG_ERR("dma_pool_alloc() failed\n");
-		rc =-ENOMEM;
+		rc = -ENOMEM;
 		goto build_mlli_exit;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr, 
-						(MAX_NUM_OF_TOTAL_MLLI_ENTRIES*
-						LLI_ENTRY_BYTE_SIZE));
 	/* Point to start of MLLI */
-	mlli_p = (uint32_t *)mlli_params->mlli_virt_addr;
+	mlli_p = (u32 *)mlli_params->mlli_virt_addr;
 	/* go over all SG's and link it to one MLLI table */
 	for (i = 0; i < sg_data->num_of_buffers; i++) {
 		if (sg_data->type[i] == DMA_SGL_TYPE)
 			rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
-				sg_data->entry[i].sgl, 
+				sg_data->entry[i].sgl,
 				sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
 				&mlli_p);
 		else /*DMA_BUFF_TYPE*/
@@ -323,15 +241,15 @@ static int ssi_buffer_mgr_generate_mlli(
 				sg_data->entry[i].buffer_dma,
 				sg_data->total_data_len[i], &total_nents,
 				&mlli_p);
-		if(rc != 0) {
+		if (rc != 0)
 			return rc;
-		}
 
 		/* set last bit in the current table */
-		if (sg_data->mlli_nents[i] != NULL) {
-			/*Calculate the current MLLI table length for the 
-			length field in the descriptor*/
-			*(sg_data->mlli_nents[i]) += 
+		if (sg_data->mlli_nents[i]) {
+			/*Calculate the current MLLI table length for the
+			 *length field in the descriptor
+			 */
+			*(sg_data->mlli_nents[i]) +=
 				(total_nents - prev_total_nents);
 			prev_total_nents = total_nents;
 		}
@@ -353,7 +271,7 @@ static int ssi_buffer_mgr_generate_mlli(
 static inline void ssi_buffer_mgr_add_buffer_entry(
 	struct buffer_array *sgl_data,
 	dma_addr_t buffer_dma, unsigned int buffer_len,
-	bool is_last_entry, uint32_t *mlli_nents)
+	bool is_last_entry, u32 *mlli_nents)
 {
 	unsigned int index = sgl_data->num_of_buffers;
 
@@ -367,7 +285,7 @@ static inline void ssi_buffer_mgr_add_buffer_entry(
 	sgl_data->type[index] = DMA_BUFF_TYPE;
 	sgl_data->is_last[index] = is_last_entry;
 	sgl_data->mlli_nents[index] = mlli_nents;
-	if (sgl_data->mlli_nents[index] != NULL)
+	if (sgl_data->mlli_nents[index])
 		*sgl_data->mlli_nents[index] = 0;
 	sgl_data->num_of_buffers++;
 }
@@ -379,7 +297,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
 	unsigned int data_len,
 	unsigned int data_offset,
 	bool is_last_table,
-	uint32_t *mlli_nents)
+	u32 *mlli_nents)
 {
 	unsigned int index = sgl_data->num_of_buffers;
 
@@ -392,22 +310,22 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
 	sgl_data->type[index] = DMA_SGL_TYPE;
 	sgl_data->is_last[index] = is_last_table;
 	sgl_data->mlli_nents[index] = mlli_nents;
-	if (sgl_data->mlli_nents[index] != NULL)
+	if (sgl_data->mlli_nents[index])
 		*sgl_data->mlli_nents[index] = 0;
 	sgl_data->num_of_buffers++;
 }
 
 static int
-ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t nents,
+ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
 			 enum dma_data_direction direction)
 {
-	uint32_t i , j;
+	u32 i, j;
 	struct scatterlist *l_sg = sg;
+
 	for (i = 0; i < nents; i++) {
-		if (l_sg == NULL) {
+		if (!l_sg)
 			break;
-		}
-		if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
+		if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
 			SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
 			goto err;
 		}
@@ -418,10 +336,9 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t n
 err:
 	/* Restore mapped parts */
 	for (j = 0; j < i; j++) {
-		if (sg == NULL) {
+		if (!sg)
 			break;
-		}
-		dma_unmap_sg(dev,sg,1,direction);
+		dma_unmap_sg(dev, sg, 1, direction);
 		sg = sg_next(sg);
 	}
 	return 0;
@@ -430,8 +347,8 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t n
 static int ssi_buffer_mgr_map_scatterlist(
 	struct device *dev, struct scatterlist *sg,
 	unsigned int nbytes, int direction,
-	uint32_t *nents, uint32_t max_sg_nents,
-	uint32_t *lbytes, uint32_t *mapped_nents)
+	u32 *nents, u32 max_sg_nents,
+	u32 *lbytes, u32 *mapped_nents)
 {
 	bool is_chained = false;
 
@@ -440,20 +357,19 @@ static int ssi_buffer_mgr_map_scatterlist(
 		if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
 			SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
 			return -ENOMEM;
-		} 
+		}
 		SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
-			     "page_link=0x%08lX addr=%pK offset=%u "
+			     "page=%p addr=%pK offset=%u "
 			     "length=%u\n",
-			     (unsigned long long)sg_dma_address(sg), 
-			     sg->page_link, 
-			     sg_virt(sg), 
+			     (unsigned long long)sg_dma_address(sg),
+			     sg_page(sg),
+			     sg_virt(sg),
 			     sg->offset, sg->length);
 		*lbytes = nbytes;
 		*nents = 1;
 		*mapped_nents = 1;
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(sg_dma_address(sg), sg_dma_len(sg));
 	} else {  /*sg_is_last*/
-		*nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes, 
+		*nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
 						     &is_chained);
 		if (*nents > max_sg_nents) {
 			*nents = 0;
@@ -463,21 +379,23 @@ static int ssi_buffer_mgr_map_scatterlist(
 		}
 		if (!is_chained) {
 			/* In case of mmu the number of mapped nents might
-			be changed from the original sgl nents */
+			 * be changed from the original sgl nents
+			 */
 			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
-			if (unlikely(*mapped_nents == 0)){
+			if (unlikely(*mapped_nents == 0)) {
 				*nents = 0;
 				SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
 				return -ENOMEM;
 			}
 		} else {
 			/*In this case the driver maps entry by entry so it
-			must have the same nents before and after map */
+			 * must have the same nents before and after map
+			 */
 			*mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
 								 sg,
 								 *nents,
 								 direction);
-			if (unlikely(*mapped_nents != *nents)){
+			if (unlikely(*mapped_nents != *nents)) {
 				*nents = *mapped_nents;
 				SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
 				return -ENOMEM;
@@ -491,48 +409,47 @@ static int ssi_buffer_mgr_map_scatterlist(
 static inline int
 ssi_aead_handle_config_buf(struct device *dev,
 	struct aead_req_ctx *areq_ctx,
-	uint8_t* config_data,
+	u8 *config_data,
 	struct buffer_array *sg_data,
 	unsigned int assoclen)
 {
-	SSI_LOG_DEBUG(" handle additional data config set to   DLLI \n");
+	SSI_LOG_DEBUG(" handle additional data config set to   DLLI\n");
 	/* create sg for the current buffer */
 	sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
-	if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, 
+	if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
 				DMA_TO_DEVICE) != 1)) {
 			SSI_LOG_ERR("dma_map_sg() "
 			   "config buffer failed\n");
 			return -ENOMEM;
 	}
 	SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
-		     "page_link=0x%08lX addr=%pK "
+		     "page=%p addr=%pK "
 		     "offset=%u length=%u\n",
-		     (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg), 
-		     areq_ctx->ccm_adata_sg.page_link, 
+		     (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
+		     sg_page(&areq_ctx->ccm_adata_sg),
 		     sg_virt(&areq_ctx->ccm_adata_sg),
-		     areq_ctx->ccm_adata_sg.offset, 
+		     areq_ctx->ccm_adata_sg.offset,
 		     areq_ctx->ccm_adata_sg.length);
 	/* prepare for case of MLLI */
 	if (assoclen > 0) {
-		ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, 
+		ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
 						    &areq_ctx->ccm_adata_sg,
-						    (AES_BLOCK_SIZE + 
+						    (AES_BLOCK_SIZE +
 						    areq_ctx->ccm_hdr_size), 0,
 						    false, NULL);
 	}
 	return 0;
 }
 
-
 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
 					   struct ahash_req_ctx *areq_ctx,
-					   uint8_t* curr_buff,
-					   uint32_t curr_buff_cnt,
+					   u8 *curr_buff,
+					   u32 curr_buff_cnt,
 					   struct buffer_array *sg_data)
 {
-	SSI_LOG_DEBUG(" handle curr buff %x set to   DLLI \n", curr_buff_cnt);
+	SSI_LOG_DEBUG(" handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 	/* create sg for the current buffer */
-	sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
+	sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 	if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
 				DMA_TO_DEVICE) != 1)) {
 			SSI_LOG_ERR("dma_map_sg() "
@@ -540,12 +457,12 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
 			return -ENOMEM;
 	}
 	SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
-		     "page_link=0x%08lX addr=%pK "
+		     "page=%p addr=%pK "
 		     "offset=%u length=%u\n",
-		     (unsigned long long)sg_dma_address(areq_ctx->buff_sg), 
-		     areq_ctx->buff_sg->page_link, 
+		     (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
+		     sg_page(areq_ctx->buff_sg),
 		     sg_virt(areq_ctx->buff_sg),
-		     areq_ctx->buff_sg->offset, 
+		     areq_ctx->buff_sg->offset,
 		     areq_ctx->buff_sg->length);
 	areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
 	areq_ctx->curr_sg = areq_ctx->buff_sg;
@@ -566,32 +483,28 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
 	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
 
 	if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
-		SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n", 
+		SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
 			(unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
 			ivsize);
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr);
-		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, 
-				 ivsize, 
+		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
+				 ivsize,
 				 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
 				 DMA_TO_DEVICE);
 	}
 	/* Release pool */
 	if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->mlli_params.mlli_dma_addr);
 		dma_pool_free(req_ctx->mlli_params.curr_pool,
 			      req_ctx->mlli_params.mlli_virt_addr,
 			      req_ctx->mlli_params.mlli_dma_addr);
 	}
 
-	SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
 	dma_unmap_sg(dev, src, req_ctx->in_nents,
 		DMA_BIDIRECTIONAL);
-	SSI_LOG_DEBUG("Unmapped req->src=%pK\n", 
+	SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
 		     sg_virt(src));
 
 	if (src != dst) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(dst));
-		dma_unmap_sg(dev, dst, req_ctx->out_nents, 
+		dma_unmap_sg(dev, dst, req_ctx->out_nents,
 			DMA_BIDIRECTIONAL);
 		SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
 			sg_virt(dst));
@@ -608,40 +521,39 @@ int ssi_buffer_mgr_map_blkcipher_request(
 	struct scatterlist *dst)
 {
 	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
-	struct mlli_params *mlli_params = &req_ctx->mlli_params;	
+	struct mlli_params *mlli_params = &req_ctx->mlli_params;
 	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
 	struct device *dev = &drvdata->plat_dev->dev;
 	struct buffer_array sg_data;
-	uint32_t dummy = 0;
+	u32 dummy = 0;
 	int rc = 0;
-	uint32_t mapped_nents = 0;
+	u32 mapped_nents = 0;
 
 	req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
 	mlli_params->curr_pool = NULL;
 	sg_data.num_of_buffers = 0;
 
 	/* Map IV buffer */
-	if (likely(ivsize != 0) ) {
-		dump_byte_array("iv", (uint8_t *)info, ivsize);
-		req_ctx->gen_ctx.iv_dma_addr = 
-			dma_map_single(dev, (void *)info, 
-				       ivsize, 
-				       req_ctx->is_giv ? DMA_BIDIRECTIONAL:
+	if (likely(ivsize != 0)) {
+		dump_byte_array("iv", (u8 *)info, ivsize);
+		req_ctx->gen_ctx.iv_dma_addr =
+			dma_map_single(dev, (void *)info,
+				       ivsize,
+				       req_ctx->is_giv ? DMA_BIDIRECTIONAL :
 				       DMA_TO_DEVICE);
-		if (unlikely(dma_mapping_error(dev, 
+		if (unlikely(dma_mapping_error(dev,
 					req_ctx->gen_ctx.iv_dma_addr))) {
 			SSI_LOG_ERR("Mapping iv %u B at va=%pK "
 				   "for DMA failed\n", ivsize, info);
 			return -ENOMEM;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr,
-								ivsize);
 		SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
 			ivsize, info,
 			(unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
-	} else
+	} else {
 		req_ctx->gen_ctx.iv_dma_addr = 0;
-	
+	}
+
 	/* Map the src SGL */
 	rc = ssi_buffer_mgr_map_scatterlist(dev, src,
 		nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
@@ -664,7 +576,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
 	} else {
 		/* Map the dst sg */
 		if (unlikely(ssi_buffer_mgr_map_scatterlist(
-			dev,dst, nbytes,
+			dev, dst, nbytes,
 			DMA_BIDIRECTIONAL, &req_ctx->out_nents,
 			LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
 			&mapped_nents))){
@@ -681,17 +593,16 @@ int ssi_buffer_mgr_map_blkcipher_request(
 				&req_ctx->in_mlli_nents);
 			ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
 				req_ctx->out_nents, dst,
-				nbytes, 0, true, 
+				nbytes, 0, true,
 				&req_ctx->out_mlli_nents);
 		}
 	}
-	
+
 	if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 		rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
-		if (unlikely(rc!= 0))
+		if (unlikely(rc != 0))
 			goto ablkcipher_exit;
-
 	}
 
 	SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
@@ -710,39 +621,35 @@ void ssi_buffer_mgr_unmap_aead_request(
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	uint32_t dummy;
+	struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
+	u32 dummy;
 	bool chained;
-	uint32_t size_to_unmap = 0;
+	u32 size_to_unmap = 0;
 
 	if (areq_ctx->mac_buf_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mac_buf_dma_addr);
-		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, 
+		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 			MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 	}
 
 #if SSI_CC_HAS_AES_GCM
 	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 		if (areq_ctx->hkey_dma_addr != 0) {
-			SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->hkey_dma_addr);
 			dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 					 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 		}
-	
+
 		if (areq_ctx->gcm_block_len_dma_addr != 0) {
-			SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_block_len_dma_addr);
 			dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 					 AES_BLOCK_SIZE, DMA_TO_DEVICE);
 		}
-	
+
 		if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
-			SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc1_dma_addr);
-			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, 
+			dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 				AES_BLOCK_SIZE, DMA_TO_DEVICE);
 		}
-	
+
 		if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
-			SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc2_dma_addr);
-			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, 
+			dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 				AES_BLOCK_SIZE, DMA_TO_DEVICE);
 		}
 	}
@@ -750,93 +657,87 @@ void ssi_buffer_mgr_unmap_aead_request(
 
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 		if (areq_ctx->ccm_iv0_dma_addr != 0) {
-			SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->ccm_iv0_dma_addr);
-			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, 
+			dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 				AES_BLOCK_SIZE, DMA_TO_DEVICE);
 		}
 
 		dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 	}
 	if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gen_ctx.iv_dma_addr);
 		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 				 hw_iv_size, DMA_BIDIRECTIONAL);
 	}
 
-	/*In case a pool was set, a table was 
-	  allocated and should be released */
-	if (areq_ctx->mlli_params.curr_pool != NULL) {
-		SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n", 
+	/*In case a pool was set, a table was
+	 *allocated and should be released
+	 */
+	if (areq_ctx->mlli_params.curr_pool) {
+		SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
 			(unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
 			areq_ctx->mlli_params.mlli_virt_addr);
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
 		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 			      areq_ctx->mlli_params.mlli_virt_addr,
 			      areq_ctx->mlli_params.mlli_dma_addr);
 	}
 
-	SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src),areq_ctx->src.nents,areq_ctx->assoc.nents,req->assoclen,req->cryptlen);
-	SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(req->src));
-	size_to_unmap = req->assoclen+req->cryptlen;
-	if(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT){
+	SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
+	size_to_unmap = req->assoclen + req->cryptlen;
+	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 		size_to_unmap += areq_ctx->req_authsize;
-	}
 	if (areq_ctx->is_gcm4543)
 		size_to_unmap += crypto_aead_ivsize(tfm);
 
-	dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
+	dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
 	if (unlikely(req->src != req->dst)) {
-		SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n", 
+		SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
 			sg_virt(req->dst));
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(req->dst));
-		dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
+		dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained),
 			DMA_BIDIRECTIONAL);
 	}
-#if DX_HAS_ACP
-	if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
+	if (drvdata->coherent &&
+	    (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
 	    likely(req->src == req->dst))
 	{
-		uint32_t size_to_skip = req->assoclen;
-		if (areq_ctx->is_gcm4543) {
+		u32 size_to_skip = req->assoclen;
+
+		if (areq_ctx->is_gcm4543)
 			size_to_skip += crypto_aead_ivsize(tfm);
-		}
+
 		/* copy mac to a temporary location to deal with possible
-		  data memory overriding that caused by cache coherence problem. */
+		 * data memory overriding that caused by cache coherence problem.
+		 */
 		ssi_buffer_mgr_copy_scatterlist_portion(
 			areq_ctx->backup_mac, req->src,
-			size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
-			size_to_skip+ req->cryptlen, SSI_SG_FROM_BUF);
+			size_to_skip + req->cryptlen - areq_ctx->req_authsize,
+			size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
 	}
-#endif
 }
 
 static inline int ssi_buffer_mgr_get_aead_icv_nents(
 	struct scatterlist *sgl,
 	unsigned int sgl_nents,
 	unsigned int authsize,
-	uint32_t last_entry_data_size,
+	u32 last_entry_data_size,
 	bool *is_icv_fragmented)
 {
 	unsigned int icv_max_size = 0;
 	unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
 	unsigned int nents;
 	unsigned int i;
-	
+
 	if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
 		*is_icv_fragmented = false;
 		return 0;
 	}
-	
-	for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
-		if (sgl == NULL) {
+
+	for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
+		if (!sgl)
 			break;
-		}
 		sgl = sg_next(sgl);
 	}
 
-	if (sgl != NULL) {
+	if (sgl)
 		icv_max_size = sgl->length;
-	}
 
 	if (last_entry_data_size > authsize) {
 		nents = 0; /* ICV attached to data in last entry (not fragmented!) */
@@ -872,7 +773,7 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
 	struct device *dev = &drvdata->plat_dev->dev;
 	int rc = 0;
 
-	if (unlikely(req->iv == NULL)) {
+	if (unlikely(!req->iv)) {
 		areq_ctx->gen_ctx.iv_dma_addr = 0;
 		goto chain_iv_exit;
 	}
@@ -883,14 +784,13 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
 		SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
 			hw_iv_size, req->iv);
 		rc = -ENOMEM;
-		goto chain_iv_exit; 
+		goto chain_iv_exit;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size);
 
 	SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
-		hw_iv_size, req->iv, 
+		hw_iv_size, req->iv,
 		(unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
-	if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){  // TODO: what about CTR?? ask Ron
+	if (do_chain && areq_ctx->plaintext_authenticate_only) {  // TODO: what about CTR?? ask Ron
 		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
 		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
@@ -914,17 +814,16 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc = 0;
-	uint32_t mapped_nents = 0;
+	u32 mapped_nents = 0;
 	struct scatterlist *current_sg = req->src;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	unsigned int sg_index = 0;
-	uint32_t size_of_assoc = req->assoclen;
+	u32 size_of_assoc = req->assoclen;
 
-	if (areq_ctx->is_gcm4543) {
+	if (areq_ctx->is_gcm4543)
 		size_of_assoc += crypto_aead_ivsize(tfm);
-	}
 
-	if (sg_data == NULL) {
+	if (!sg_data) {
 		rc = -EINVAL;
 		goto chain_assoc_exit;
 	}
@@ -943,14 +842,13 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 	//it is assumed that if we reach here , the sgl is already mapped
 	sg_index = current_sg->length;
 	if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
-		mapped_nents++;        
-	}
-	else{
+		mapped_nents++;
+	} else {
 		while (sg_index <= size_of_assoc) {
 			current_sg = sg_next(current_sg);
 			//if have reached the end of the sgl, then this is unexpected
-			if (current_sg == NULL) {
-				SSI_LOG_ERR("reached end of sg list. unexpected \n");
+			if (!current_sg) {
+				SSI_LOG_ERR("reached end of sg list. unexpected\n");
 				BUG();
 			}
 			sg_index += current_sg->length;
@@ -965,11 +863,11 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 	areq_ctx->assoc.nents = mapped_nents;
 
 	/* in CCM case we have additional entry for
-	*  ccm header configurations */
+	 * ccm header configurations
+	 */
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 		if (unlikely((mapped_nents + 1) >
 			LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
-
 			SSI_LOG_ERR("CCM case.Too many fragments. "
 				"Current %d max %d\n",
 				(areq_ctx->assoc.nents + 1),
@@ -985,9 +883,8 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 	else
 		areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
 
-	if (unlikely((do_chain == true) ||
+	if (unlikely((do_chain) ||
 		(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
-
 		SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
 			GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
 			areq_ctx->assoc.nents);
@@ -1004,7 +901,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
 
 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
 	struct aead_request *req,
-	uint32_t *src_last_bytes, uint32_t *dst_last_bytes)
+	u32 *src_last_bytes, u32 *dst_last_bytes)
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
@@ -1014,7 +911,7 @@ static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
 	if (likely(req->src == req->dst)) {
 		/*INPLACE*/
 		areq_ctx->icv_dma_addr = sg_dma_address(
-			areq_ctx->srcSgl)+
+			areq_ctx->srcSgl) +
 			(*src_last_bytes - authsize);
 		areq_ctx->icv_virt_addr = sg_virt(
 			areq_ctx->srcSgl) +
@@ -1033,7 +930,7 @@ static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
 			areq_ctx->dstSgl) +
 			(*dst_last_bytes - authsize);
 		areq_ctx->icv_virt_addr = sg_virt(
-			areq_ctx->dstSgl)+
+			areq_ctx->dstSgl) +
 			(*dst_last_bytes - authsize);
 	}
 }
@@ -1042,7 +939,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
 	struct ssi_drvdata *drvdata,
 	struct aead_request *req,
 	struct buffer_array *sg_data,
-	uint32_t *src_last_bytes, uint32_t *dst_last_bytes,
+	u32 *src_last_bytes, u32 *dst_last_bytes,
 	bool is_last_table)
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
@@ -1055,7 +952,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
 		/*INPLACE*/
 		ssi_buffer_mgr_add_scatterlist_entry(sg_data,
 			areq_ctx->src.nents, areq_ctx->srcSgl,
-			areq_ctx->cryptlen,areq_ctx->srcOffset, is_last_table,
+			areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
 			&areq_ctx->src.mlli_nents);
 
 		icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
@@ -1066,24 +963,30 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
 			goto prepare_data_mlli_exit;
 		}
 
-		if (unlikely(areq_ctx->is_icv_fragmented == true)) {
+		if (unlikely(areq_ctx->is_icv_fragmented)) {
 			/* Backup happens only when ICV is fragmented, ICV
-			   verification is made by CPU compare in order to simplify
-			   MAC verification upon request completion */
+			 * verification is made by CPU compare in order to simplify
+			 * MAC verification upon request completion
+			 */
 			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
-#if !DX_HAS_ACP
-				/* In ACP platform we already copying ICV
-				   for any INPLACE-DECRYPT operation, hence
-				   we must neglect this code. */
-				uint32_t size_to_skip = req->assoclen;
-				if (areq_ctx->is_gcm4543) {
-					size_to_skip += crypto_aead_ivsize(tfm);
+				if (!drvdata->coherent) {
+				/* In coherent platforms (e.g. ACP)
+				 * already copying ICV for any
+				 * INPLACE-DECRYPT operation, hence
+				 * we must neglect this code.
+				 */
+					u32 skip = req->assoclen;
+
+					if (areq_ctx->is_gcm4543)
+						skip += crypto_aead_ivsize(tfm);
+
+					ssi_buffer_mgr_copy_scatterlist_portion(
+						areq_ctx->backup_mac, req->src,
+						(skip + req->cryptlen -
+						 areq_ctx->req_authsize),
+						skip + req->cryptlen,
+						SSI_SG_TO_BUF);
 				}
-				ssi_buffer_mgr_copy_scatterlist_portion(
-					areq_ctx->backup_mac, req->src,
-					size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
-					size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
-#endif
 				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 			} else {
 				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
@@ -1095,7 +998,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
 				&areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
 				(*src_last_bytes - authsize);
 			areq_ctx->icv_virt_addr = sg_virt(
-				&areq_ctx->srcSgl[areq_ctx->src.nents - 1]) + 
+				&areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
 				(*src_last_bytes - authsize);
 		}
 
@@ -1103,11 +1006,11 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
 		/*NON-INPLACE and DECRYPT*/
 		ssi_buffer_mgr_add_scatterlist_entry(sg_data,
 			areq_ctx->src.nents, areq_ctx->srcSgl,
-			areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
+			areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
 			&areq_ctx->src.mlli_nents);
 		ssi_buffer_mgr_add_scatterlist_entry(sg_data,
 			areq_ctx->dst.nents, areq_ctx->dstSgl,
-			areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
+			areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
 			&areq_ctx->dst.mlli_nents);
 
 		icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
@@ -1118,18 +1021,20 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
 			goto prepare_data_mlli_exit;
 		}
 
-		if (unlikely(areq_ctx->is_icv_fragmented == true)) {
+		if (unlikely(areq_ctx->is_icv_fragmented)) {
 			/* Backup happens only when ICV is fragmented, ICV
-			   verification is made by CPU compare in order to simplify
-			   MAC verification upon request completion */
-			  uint32_t size_to_skip = req->assoclen;
-			  if (areq_ctx->is_gcm4543) {
+			 * verification is made by CPU compare in order to simplify
+			 * MAC verification upon request completion
+			 */
+			  u32 size_to_skip = req->assoclen;
+
+			  if (areq_ctx->is_gcm4543)
 				  size_to_skip += crypto_aead_ivsize(tfm);
-			  }
+
 			  ssi_buffer_mgr_copy_scatterlist_portion(
 				  areq_ctx->backup_mac, req->src,
-				  size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
-				  size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
+				  size_to_skip + req->cryptlen - areq_ctx->req_authsize,
+				  size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
 			areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 		} else { /* Contig. ICV */
 			/*Should hanlde if the sg is not contig.*/
@@ -1145,11 +1050,11 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
 		/*NON-INPLACE and ENCRYPT*/
 		ssi_buffer_mgr_add_scatterlist_entry(sg_data,
 			areq_ctx->dst.nents, areq_ctx->dstSgl,
-			areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
+			areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
 			&areq_ctx->dst.mlli_nents);
 		ssi_buffer_mgr_add_scatterlist_entry(sg_data,
 			areq_ctx->src.nents, areq_ctx->srcSgl,
-			areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
+			areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
 			&areq_ctx->src.mlli_nents);
 
 		icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
@@ -1160,7 +1065,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
 			goto prepare_data_mlli_exit;
 		}
 
-		if (likely(areq_ctx->is_icv_fragmented == false)) {
+		if (likely(!areq_ctx->is_icv_fragmented)) {
 			/* Contig. ICV */
 			areq_ctx->icv_dma_addr = sg_dma_address(
 				&areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
@@ -1190,40 +1095,40 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 	unsigned int authsize = areq_ctx->req_authsize;
 	int src_last_bytes = 0, dst_last_bytes = 0;
 	int rc = 0;
-	uint32_t src_mapped_nents = 0, dst_mapped_nents = 0;
-	uint32_t offset = 0;
-	unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
+	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
+	u32 offset = 0;
+	unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	uint32_t sg_index = 0;
+	u32 sg_index = 0;
 	bool chained = false;
 	bool is_gcm4543 = areq_ctx->is_gcm4543;
-	uint32_t size_to_skip = req->assoclen;
-	if (is_gcm4543) {
+	u32 size_to_skip = req->assoclen;
+
+	if (is_gcm4543)
 		size_to_skip += crypto_aead_ivsize(tfm);
-	}
+
 	offset = size_to_skip;
 
-	if (sg_data == NULL) {
+	if (!sg_data) {
 		rc = -EINVAL;
 		goto chain_data_exit;
 	}
 	areq_ctx->srcSgl = req->src;
 	areq_ctx->dstSgl = req->dst;
 
-	if (is_gcm4543) {
+	if (is_gcm4543)
 		size_for_map += crypto_aead_ivsize(tfm);
-	}
 
-	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;	
-	src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);  
+	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
+	src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
 	sg_index = areq_ctx->srcSgl->length;
 	//check where the data starts
 	while (sg_index <= size_to_skip) {
 		offset -= areq_ctx->srcSgl->length;
 		areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
 		//if have reached the end of the sgl, then this is unexpected
-		if (areq_ctx->srcSgl == NULL) {
-			SSI_LOG_ERR("reached end of sg list. unexpected \n");
+		if (!areq_ctx->srcSgl) {
+			SSI_LOG_ERR("reached end of sg list. unexpected\n");
 			BUG();
 		}
 		sg_index += areq_ctx->srcSgl->length;
@@ -1238,14 +1143,13 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 
 	areq_ctx->src.nents = src_mapped_nents;
 
-	areq_ctx->srcOffset = offset;  
+	areq_ctx->srcOffset = offset;
 
 	if (req->src != req->dst) {
-		size_for_map = req->assoclen +req->cryptlen;
+		size_for_map = req->assoclen + req->cryptlen;
 		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
-		if (is_gcm4543) {
+		if (is_gcm4543)
 			size_for_map += crypto_aead_ivsize(tfm);
-		}
 
 		rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
 			 DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
@@ -1253,22 +1157,21 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 						   &dst_mapped_nents);
 		if (unlikely(rc != 0)) {
 			rc = -ENOMEM;
-			goto chain_data_exit; 
+			goto chain_data_exit;
 		}
 	}
 
-	dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst,size_for_map,&dst_last_bytes, &chained);
+	dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
 	sg_index = areq_ctx->dstSgl->length;
 	offset = size_to_skip;
 
 	//check where the data starts
 	while (sg_index <= size_to_skip) {
-
 		offset -= areq_ctx->dstSgl->length;
 		areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
 		//if have reached the end of the sgl, then this is unexpected
-		if (areq_ctx->dstSgl == NULL) {
-			SSI_LOG_ERR("reached end of sg list. unexpected \n");
+		if (!areq_ctx->dstSgl) {
+			SSI_LOG_ERR("reached end of sg list. unexpected\n");
 			BUG();
 		}
 		sg_index += areq_ctx->dstSgl->length;
@@ -1284,7 +1187,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 	areq_ctx->dstOffset = offset;
 	if ((src_mapped_nents > 1) ||
 	    (dst_mapped_nents  > 1) ||
-	    (do_chain == true)) {
+	    do_chain) {
 		areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
 		rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
 			&src_last_bytes, &dst_last_bytes, is_last_table);
@@ -1298,15 +1201,15 @@ static inline int ssi_buffer_mgr_aead_chain_data(
 	return rc;
 }
 
-static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
+static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
 					   struct aead_request *req)
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
-	uint32_t curr_mlli_size = 0;
-	
+	u32 curr_mlli_size = 0;
+
 	if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
 		areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
-		curr_mlli_size = areq_ctx->assoc.mlli_nents * 
+		curr_mlli_size = areq_ctx->assoc.mlli_nents *
 						LLI_ENTRY_BYTE_SIZE;
 	}
 
@@ -1317,32 +1220,32 @@ static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
 			areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
 								curr_mlli_size;
 			areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
-			if (areq_ctx->is_single_pass == false)
-				areq_ctx->assoc.mlli_nents += 
+			if (!areq_ctx->is_single_pass)
+				areq_ctx->assoc.mlli_nents +=
 					areq_ctx->src.mlli_nents;
 		} else {
-			if (areq_ctx->gen_ctx.op_type == 
+			if (areq_ctx->gen_ctx.op_type ==
 					DRV_CRYPTO_DIRECTION_DECRYPT) {
-				areq_ctx->src.sram_addr = 
+				areq_ctx->src.sram_addr =
 						drvdata->mlli_sram_addr +
 								curr_mlli_size;
-				areq_ctx->dst.sram_addr = 
-						areq_ctx->src.sram_addr + 
-						areq_ctx->src.mlli_nents * 
+				areq_ctx->dst.sram_addr =
+						areq_ctx->src.sram_addr +
+						areq_ctx->src.mlli_nents *
 						LLI_ENTRY_BYTE_SIZE;
-				if (areq_ctx->is_single_pass == false)
-					areq_ctx->assoc.mlli_nents += 
+				if (!areq_ctx->is_single_pass)
+					areq_ctx->assoc.mlli_nents +=
 						areq_ctx->src.mlli_nents;
 			} else {
-				areq_ctx->dst.sram_addr = 
+				areq_ctx->dst.sram_addr =
 						drvdata->mlli_sram_addr +
 								curr_mlli_size;
-				areq_ctx->src.sram_addr = 
+				areq_ctx->src.sram_addr =
 						areq_ctx->dst.sram_addr +
-						areq_ctx->dst.mlli_nents * 
+						areq_ctx->dst.mlli_nents *
 						LLI_ENTRY_BYTE_SIZE;
-				if (areq_ctx->is_single_pass == false)
-					areq_ctx->assoc.mlli_nents += 
+				if (!areq_ctx->is_single_pass)
+					areq_ctx->assoc.mlli_nents +=
 						areq_ctx->dst.mlli_nents;
 			}
 		}
@@ -1362,33 +1265,34 @@ int ssi_buffer_mgr_map_aead_request(
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	bool is_gcm4543 = areq_ctx->is_gcm4543;
 
-	uint32_t mapped_nents = 0;
-	uint32_t dummy = 0; /*used for the assoc data fragments */
-	uint32_t size_to_map = 0;
+	u32 mapped_nents = 0;
+	u32 dummy = 0; /*used for the assoc data fragments */
+	u32 size_to_map = 0;
 
 	mlli_params->curr_pool = NULL;
 	sg_data.num_of_buffers = 0;
 
-#if DX_HAS_ACP
-	if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
+	if (drvdata->coherent &&
+	    (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
 	    likely(req->src == req->dst))
 	{
-		uint32_t size_to_skip = req->assoclen;
-		if (is_gcm4543) {
+		u32 size_to_skip = req->assoclen;
+
+		if (is_gcm4543)
 			size_to_skip += crypto_aead_ivsize(tfm);
-		}
+
 		/* copy mac to a temporary location to deal with possible
-		   data memory overriding that caused by cache coherence problem. */
+		 * data memory overriding that caused by cache coherence problem.
+		 */
 		ssi_buffer_mgr_copy_scatterlist_portion(
 			areq_ctx->backup_mac, req->src,
-			size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
-			size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
+			size_to_skip + req->cryptlen - areq_ctx->req_authsize,
+			size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
 	}
-#endif
 
 	/* cacluate the size for cipher remove ICV in decrypt*/
-	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == 
-				 DRV_CRYPTO_DIRECTION_ENCRYPT) ? 
+	areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
+				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 				req->cryptlen :
 				(req->cryptlen - authsize);
 
@@ -1400,7 +1304,6 @@ int ssi_buffer_mgr_map_aead_request(
 		rc = -ENOMEM;
 		goto aead_map_failure;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->mac_buf_dma_addr, MAX_MAC_SIZE);
 
 	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 		areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
@@ -1415,8 +1318,6 @@ int ssi_buffer_mgr_map_aead_request(
 			rc = -ENOMEM;
 			goto aead_map_failure;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->ccm_iv0_dma_addr,
-								AES_BLOCK_SIZE);
 		if (ssi_aead_handle_config_buf(dev, areq_ctx,
 			areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
 			rc = -ENOMEM;
@@ -1434,7 +1335,6 @@ int ssi_buffer_mgr_map_aead_request(
 			rc = -ENOMEM;
 			goto aead_map_failure;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->hkey_dma_addr, AES_BLOCK_SIZE);
 
 		areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
 			&areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
@@ -1444,7 +1344,6 @@ int ssi_buffer_mgr_map_aead_request(
 			rc = -ENOMEM;
 			goto aead_map_failure;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_block_len_dma_addr, AES_BLOCK_SIZE);
 
 		areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
 			areq_ctx->gcm_iv_inc1,
@@ -1458,8 +1357,6 @@ int ssi_buffer_mgr_map_aead_request(
 			rc = -ENOMEM;
 			goto aead_map_failure;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc1_dma_addr,
-								AES_BLOCK_SIZE);
 
 		areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
 			areq_ctx->gcm_iv_inc2,
@@ -1473,32 +1370,30 @@ int ssi_buffer_mgr_map_aead_request(
 			rc = -ENOMEM;
 			goto aead_map_failure;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc2_dma_addr,
-								AES_BLOCK_SIZE);
 	}
 #endif /*SSI_CC_HAS_AES_GCM*/
 
 	size_to_map = req->cryptlen + req->assoclen;
-	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 		size_to_map += authsize;
-	}
+
 	if (is_gcm4543)
 		size_to_map += crypto_aead_ivsize(tfm);
 	rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
 					    size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
-					    LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+					    LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 	if (unlikely(rc != 0)) {
 		rc = -ENOMEM;
-		goto aead_map_failure; 
+		goto aead_map_failure;
 	}
 
-	if (likely(areq_ctx->is_single_pass == true)) {
+	if (likely(areq_ctx->is_single_pass)) {
 		/*
-		* Create MLLI table for: 
-		*   (1) Assoc. data
-		*   (2) Src/Dst SGLs
-		*   Note: IV is contg. buffer (not an SGL) 
-		*/
+		 * Create MLLI table for:
+		 *   (1) Assoc. data
+		 *   (2) Src/Dst SGLs
+		 *   Note: IV is contg. buffer (not an SGL)
+		 */
 		rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
 		if (unlikely(rc != 0))
 			goto aead_map_failure;
@@ -1510,25 +1405,25 @@ int ssi_buffer_mgr_map_aead_request(
 			goto aead_map_failure;
 	} else { /* DOUBLE-PASS flow */
 		/*
-		* Prepare MLLI table(s) in this order:
-		*  
-		* If ENCRYPT/DECRYPT (inplace):
-		*   (1) MLLI table for assoc
-		*   (2) IV entry (chained right after end of assoc)
-		*   (3) MLLI for src/dst (inplace operation)
-		*  
-		* If ENCRYPT (non-inplace) 
-		*   (1) MLLI table for assoc
-		*   (2) IV entry (chained right after end of assoc)
-		*   (3) MLLI for dst
-		*   (4) MLLI for src
-		*  
-		* If DECRYPT (non-inplace) 
-		*   (1) MLLI table for assoc
-		*   (2) IV entry (chained right after end of assoc)
-		*   (3) MLLI for src
-		*   (4) MLLI for dst
-		*/
+		 * Prepare MLLI table(s) in this order:
+		 *
+		 * If ENCRYPT/DECRYPT (inplace):
+		 *   (1) MLLI table for assoc
+		 *   (2) IV entry (chained right after end of assoc)
+		 *   (3) MLLI for src/dst (inplace operation)
+		 *
+		 * If ENCRYPT (non-inplace)
+		 *   (1) MLLI table for assoc
+		 *   (2) IV entry (chained right after end of assoc)
+		 *   (3) MLLI for dst
+		 *   (4) MLLI for src
+		 *
+		 * If DECRYPT (non-inplace)
+		 *   (1) MLLI table for assoc
+		 *   (2) IV entry (chained right after end of assoc)
+		 *   (3) MLLI for src
+		 *   (4) MLLI for dst
+		 */
 		rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
 		if (unlikely(rc != 0))
 			goto aead_map_failure;
@@ -1544,17 +1439,15 @@ int ssi_buffer_mgr_map_aead_request(
 	if (unlikely(
 		(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
 		(areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
-
 		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 		rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
-		if (unlikely(rc != 0)) {
+		if (unlikely(rc != 0))
 			goto aead_map_failure;
-		}
 
 		ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
-		SSI_LOG_DEBUG("assoc params mn %d\n",areq_ctx->assoc.mlli_nents);
-		SSI_LOG_DEBUG("src params mn %d\n",areq_ctx->src.mlli_nents);
-		SSI_LOG_DEBUG("dst params mn %d\n",areq_ctx->dst.mlli_nents);
+		SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
+		SSI_LOG_DEBUG("src params mn %d\n", areq_ctx->src.mlli_nents);
+		SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx->dst.mlli_nents);
 	}
 	return 0;
 
@@ -1568,15 +1461,15 @@ int ssi_buffer_mgr_map_hash_request_final(
 {
 	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
 	struct device *dev = &drvdata->plat_dev->dev;
-	uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
+	u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
 			areq_ctx->buff0;
-	uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
+	u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
 			&areq_ctx->buff0_cnt;
-	struct mlli_params *mlli_params = &areq_ctx->mlli_params;	
+	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 	struct buffer_array sg_data;
 	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
-	uint32_t dummy = 0;
-	uint32_t mapped_nents = 0;
+	u32 dummy = 0;
+	u32 mapped_nents = 0;
 
 	SSI_LOG_DEBUG(" final params : curr_buff=%pK "
 		     "curr_buff_cnt=0x%X nbytes = 0x%X "
@@ -1593,10 +1486,10 @@ int ssi_buffer_mgr_map_hash_request_final(
 		/* nothing to do */
 		return 0;
 	}
-	
+
 	/*TODO: copy data in case that buffer is enough for operation */
 	/* map the previous buffer */
-	if (*curr_buff_cnt != 0 ) {
+	if (*curr_buff_cnt != 0) {
 		if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
 					    *curr_buff_cnt, &sg_data) != 0) {
 			return -ENOMEM;
@@ -1604,7 +1497,7 @@ int ssi_buffer_mgr_map_hash_request_final(
 	}
 
 	if (src && (nbytes > 0) && do_update) {
-		if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
+		if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
 					  nbytes,
 					  DMA_TO_DEVICE,
 					  &areq_ctx->in_nents,
@@ -1612,9 +1505,9 @@ int ssi_buffer_mgr_map_hash_request_final(
 					  &dummy, &mapped_nents))){
 			goto unmap_curr_buff;
 		}
-		if ( src && (mapped_nents == 1) 
-		     && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
-			memcpy(areq_ctx->buff_sg,src,
+		if (src && (mapped_nents == 1)
+		     && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
+			memcpy(areq_ctx->buff_sg, src,
 			       sizeof(struct scatterlist));
 			areq_ctx->buff_sg->length = nbytes;
 			areq_ctx->curr_sg = areq_ctx->buff_sg;
@@ -1622,7 +1515,6 @@ int ssi_buffer_mgr_map_hash_request_final(
 		} else {
 			areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
 		}
-
 	}
 
 	/*build mlli */
@@ -1640,7 +1532,7 @@ int ssi_buffer_mgr_map_hash_request_final(
 		}
 	}
 	/* change the buffer index for the unmap function */
-	areq_ctx->buff_index = (areq_ctx->buff_index^1);
+	areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
 	SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
 		GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
 	return 0;
@@ -1649,9 +1541,9 @@ int ssi_buffer_mgr_map_hash_request_final(
 	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
 
 unmap_curr_buff:
-	if (*curr_buff_cnt != 0 ) {
+	if (*curr_buff_cnt != 0)
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
-	}
+
 	return -ENOMEM;
 }
 
@@ -1660,26 +1552,26 @@ int ssi_buffer_mgr_map_hash_request_update(
 {
 	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
 	struct device *dev = &drvdata->plat_dev->dev;
-	uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
+	u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
 			areq_ctx->buff0;
-	uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
+	u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
 			&areq_ctx->buff0_cnt;
-	uint8_t* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
+	u8 *next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
 			areq_ctx->buff1;
-	uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
+	u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
 			&areq_ctx->buff1_cnt;
-	struct mlli_params *mlli_params = &areq_ctx->mlli_params;	
+	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 	unsigned int update_data_len;
-	uint32_t total_in_len = nbytes + *curr_buff_cnt;
+	u32 total_in_len = nbytes + *curr_buff_cnt;
 	struct buffer_array sg_data;
 	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
 	unsigned int swap_index = 0;
-	uint32_t dummy = 0;
-	uint32_t mapped_nents = 0;
-		
+	u32 dummy = 0;
+	u32 mapped_nents = 0;
+
 	SSI_LOG_DEBUG(" update params : curr_buff=%pK "
 		     "curr_buff_cnt=0x%X nbytes=0x%X "
-		     "src=%pK curr_index=%u \n",
+		     "src=%pK curr_index=%u\n",
 		     curr_buff, *curr_buff_cnt, nbytes,
 		     src, areq_ctx->buff_index);
 	/* Init the type of the dma buffer */
@@ -1694,12 +1586,12 @@ int ssi_buffer_mgr_map_hash_request_update(
 			     "*curr_buff_cnt=0x%X copy_to=%pK\n",
 			curr_buff, *curr_buff_cnt,
 			&curr_buff[*curr_buff_cnt]);
-		areq_ctx->in_nents = 
+		areq_ctx->in_nents =
 			ssi_buffer_mgr_get_sgl_nents(src,
 						    nbytes,
 						    &dummy, NULL);
 		sg_copy_to_buffer(src, areq_ctx->in_nents,
-				  &curr_buff[*curr_buff_cnt], nbytes); 
+				  &curr_buff[*curr_buff_cnt], nbytes);
 		*curr_buff_cnt += nbytes;
 		return 1;
 	}
@@ -1716,12 +1608,12 @@ int ssi_buffer_mgr_map_hash_request_update(
 	/* Copy the new residue to next buffer */
 	if (*next_buff_cnt != 0) {
 		SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
-			     " residue %u \n", next_buff,
+			     " residue %u\n", next_buff,
 			     (update_data_len - *curr_buff_cnt),
 			     *next_buff_cnt);
 		ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
-			     (update_data_len -*curr_buff_cnt),
-			     nbytes,SSI_SG_TO_BUF);
+			     (update_data_len - *curr_buff_cnt),
+			     nbytes, SSI_SG_TO_BUF);
 		/* change the buffer index for next operation */
 		swap_index = 1;
 	}
@@ -1734,20 +1626,20 @@ int ssi_buffer_mgr_map_hash_request_update(
 		/* change the buffer index for next operation */
 		swap_index = 1;
 	}
-	
-	if ( update_data_len > *curr_buff_cnt ) {
-		if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
-					  (update_data_len -*curr_buff_cnt),
+
+	if (update_data_len > *curr_buff_cnt) {
+		if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
+					  (update_data_len - *curr_buff_cnt),
 					  DMA_TO_DEVICE,
 					  &areq_ctx->in_nents,
 					  LLI_MAX_NUM_OF_DATA_ENTRIES,
 					  &dummy, &mapped_nents))){
 			goto unmap_curr_buff;
 		}
-		if ( (mapped_nents == 1) 
-		     && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
+		if ((mapped_nents == 1)
+		     && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
 			/* only one entry in the SG and no previous data */
-			memcpy(areq_ctx->buff_sg,src,
+			memcpy(areq_ctx->buff_sg, src,
 			       sizeof(struct scatterlist));
 			areq_ctx->buff_sg->length = update_data_len;
 			areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
@@ -1769,9 +1661,8 @@ int ssi_buffer_mgr_map_hash_request_update(
 						  mlli_params) != 0)) {
 			goto fail_unmap_din;
 		}
-
 	}
-	areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
+	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
 
 	return 0;
 
@@ -1779,9 +1670,9 @@ int ssi_buffer_mgr_map_hash_request_update(
 	dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
 
 unmap_curr_buff:
-	if (*curr_buff_cnt != 0 ) {
+	if (*curr_buff_cnt != 0)
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
-	}
+
 	return -ENOMEM;
 }
 
@@ -1789,36 +1680,35 @@ void ssi_buffer_mgr_unmap_hash_request(
 	struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
 {
 	struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
-	uint32_t *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
+	u32 *prev_len = areq_ctx->buff_index ?  &areq_ctx->buff0_cnt :
 						&areq_ctx->buff1_cnt;
 
-	/*In case a pool was set, a table was 
-	  allocated and should be released */
-	if (areq_ctx->mlli_params.curr_pool != NULL) {
-		SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n", 
+	/*In case a pool was set, a table was
+	 *allocated and should be released
+	 */
+	if (areq_ctx->mlli_params.curr_pool) {
+		SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
 			     (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
 			     areq_ctx->mlli_params.mlli_virt_addr);
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
 		dma_pool_free(areq_ctx->mlli_params.curr_pool,
 			      areq_ctx->mlli_params.mlli_virt_addr,
 			      areq_ctx->mlli_params.mlli_dma_addr);
 	}
-	
+
 	if ((src) && likely(areq_ctx->in_nents != 0)) {
 		SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
 			     sg_virt(src),
-			     (unsigned long long)sg_dma_address(src), 
+			     (unsigned long long)sg_dma_address(src),
 			     sg_dma_len(src));
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
-		dma_unmap_sg(dev, src, 
+		dma_unmap_sg(dev, src,
 			     areq_ctx->in_nents, DMA_TO_DEVICE);
 	}
 
 	if (*prev_len != 0) {
 		SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
-			     "dma=0x%llX len 0x%X\n", 
+			     " dma=0x%llX len 0x%X\n",
 				sg_virt(areq_ctx->buff_sg),
-				(unsigned long long)sg_dma_address(areq_ctx->buff_sg), 
+				(unsigned long long)sg_dma_address(areq_ctx->buff_sg),
 				sg_dma_len(areq_ctx->buff_sg));
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
 		if (!do_revert) {
@@ -1837,18 +1727,18 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
 
 	buff_mgr_handle = (struct buff_mgr_handle *)
 		kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
-	if (buff_mgr_handle == NULL)
+	if (!buff_mgr_handle)
 		return -ENOMEM;
 
 	drvdata->buff_mgr_handle = buff_mgr_handle;
 
 	buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
 				"dx_single_mlli_tables", dev,
-				MAX_NUM_OF_TOTAL_MLLI_ENTRIES * 
+				MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
 				LLI_ENTRY_BYTE_SIZE,
 				MLLI_TABLE_MIN_ALIGNMENT, 0);
 
-	if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
+	if (unlikely(!buff_mgr_handle->mlli_buffs_pool))
 		goto error;
 
 	return 0;
@@ -1862,12 +1752,10 @@ int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
 {
 	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
 
-	if (buff_mgr_handle  != NULL) {
+	if (buff_mgr_handle) {
 		dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
 		kfree(drvdata->buff_mgr_handle);
 		drvdata->buff_mgr_handle = NULL;
-
 	}
 	return 0;
 }
-
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.h b/drivers/staging/ccree/ssi_buffer_mgr.h
index 5f4b032..41f5223 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.h
+++ b/drivers/staging/ccree/ssi_buffer_mgr.h
@@ -1,21 +1,21 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* \file buffer_mgr.h
-   Buffer Manager
+ * Buffer Manager
  */
 
 #ifndef __SSI_BUFFER_MGR_H__
@@ -26,7 +26,6 @@
 #include "ssi_config.h"
 #include "ssi_driver.h"
 
-
 enum ssi_req_dma_buf_type {
 	SSI_DMA_BUF_NULL = 0,
 	SSI_DMA_BUF_DLLI,
@@ -46,9 +45,9 @@ struct ssi_mlli {
 
 struct mlli_params {
 	struct dma_pool *curr_pool;
-	uint8_t *mlli_virt_addr;
+	u8 *mlli_virt_addr;
 	dma_addr_t mlli_dma_addr;
-	uint32_t mlli_len;  
+	u32 mlli_len;
 };
 
 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata);
@@ -65,7 +64,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
 	struct scatterlist *dst);
 
 void ssi_buffer_mgr_unmap_blkcipher_request(
-	struct device *dev, 
+	struct device *dev,
 	void *ctx,
 	unsigned int ivsize,
 	struct scatterlist *src,
@@ -81,25 +80,9 @@ int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ct
 
 void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert);
 
-void ssi_buffer_mgr_copy_scatterlist_portion(u8 *dest, struct scatterlist *sg, uint32_t to_skip, uint32_t end, enum ssi_sg_cpy_direct direct);
+void ssi_buffer_mgr_copy_scatterlist_portion(u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct);
 
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len);
-
-
-#ifdef CC_DMA_48BIT_SIM
-dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len);
-dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr);
-
-#define SSI_UPDATE_DMA_ADDR_TO_48BIT(addr,size) addr = \
-					ssi_buff_mgr_update_dma_addr(addr,size)
-#define SSI_RESTORE_DMA_ADDR_TO_48BIT(addr) addr = \
-					ssi_buff_mgr_restore_dma_addr(addr)
-#else
-
-#define SSI_UPDATE_DMA_ADDR_TO_48BIT(addr,size) addr = addr
-#define SSI_RESTORE_DMA_ADDR_TO_48BIT(addr) addr = addr
-
-#endif
+void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len);
 
 #endif /*__BUFFER_MGR_H__*/
 
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index 664ed7e..cd2eafc 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -36,7 +36,6 @@
 #define MAX_ABLKCIPHER_SEQ_LEN 6
 
 #define template_ablkcipher	template_u.ablkcipher
-#define template_sblkcipher	template_u.blkcipher
 
 #define SSI_MIN_AES_XTS_SIZE 0x10
 #define SSI_MAX_AES_XTS_SIZE 0x2000
@@ -45,12 +44,13 @@ struct ssi_blkcipher_handle {
 };
 
 struct cc_user_key_info {
-	uint8_t *key;
+	u8 *key;
 	dma_addr_t key_dma_addr;
 };
+
 struct cc_hw_key_info {
-	enum HwCryptoKey key1_slot;
-	enum HwCryptoKey key2_slot;
+	enum cc_hw_crypto_key key1_slot;
+	enum cc_hw_crypto_key key2_slot;
 };
 
 struct ssi_ablkcipher_ctx {
@@ -68,11 +68,10 @@ struct ssi_ablkcipher_ctx {
 
 static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
 
-
-static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, uint32_t size) {
-	switch (ctx_p->flow_mode){
+static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
+	switch (ctx_p->flow_mode) {
 	case S_DIN_to_AES:
-		switch (size){
+		switch (size) {
 		case CC_AES_128_BIT_KEY_SIZE:
 		case CC_AES_192_BIT_KEY_SIZE:
 			if (likely((ctx_p->cipher_mode != DRV_CIPHER_XTS) &&
@@ -82,8 +81,8 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, uint32_t size)
 			break;
 		case CC_AES_256_BIT_KEY_SIZE:
 			return 0;
-		case (CC_AES_192_BIT_KEY_SIZE*2):
-		case (CC_AES_256_BIT_KEY_SIZE*2):
+		case (CC_AES_192_BIT_KEY_SIZE * 2):
+		case (CC_AES_256_BIT_KEY_SIZE * 2):
 			if (likely((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
 				   (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
 				   (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)))
@@ -105,19 +104,17 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, uint32_t size)
 #endif
 	default:
 		break;
-
 	}
 	return -EINVAL;
 }
 
-
 static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) {
-	switch (ctx_p->flow_mode){
+	switch (ctx_p->flow_mode) {
 	case S_DIN_to_AES:
-		switch (ctx_p->cipher_mode){
+		switch (ctx_p->cipher_mode) {
 		case DRV_CIPHER_XTS:
 			if ((size >= SSI_MIN_AES_XTS_SIZE) &&
-			    (size <= SSI_MAX_AES_XTS_SIZE) && 
+			    (size <= SSI_MAX_AES_XTS_SIZE) &&
 			    IS_ALIGNED(size, AES_BLOCK_SIZE))
 				return 0;
 			break;
@@ -159,7 +156,6 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int siz
 #endif /*SSI_CC_HAS_MULTI2*/
 	default:
 		break;
-
 	}
 	return -EINVAL;
 }
@@ -168,13 +164,11 @@ static unsigned int get_max_keysize(struct crypto_tfm *tfm)
 {
 	struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
 
-	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_ABLKCIPHER) {
+	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_ABLKCIPHER)
 		return ssi_alg->crypto_alg.cra_ablkcipher.max_keysize;
-	}
 
-	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) {
+	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER)
 		return ssi_alg->crypto_alg.cra_blkcipher.max_keysize;
-	}
 
 	return 0;
 }
@@ -189,7 +183,7 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
 	int rc = 0;
 	unsigned int max_key_buf_size = get_max_keysize(tfm);
 
-	SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p, 
+	SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p,
 						crypto_tfm_alg_name(tfm));
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
@@ -199,7 +193,7 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
 	dev = &ctx_p->drvdata->plat_dev->dev;
 
 	/* Allocate key buffer, cache line aligned */
-	ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL|GFP_DMA);
+	ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
 	if (!ctx_p->user.key) {
 		SSI_LOG_ERR("Allocating key buffer in context failed\n");
 		rc = -ENOMEM;
@@ -215,7 +209,6 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
 			max_key_buf_size, ctx_p->user.key);
 		return -ENOMEM;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr, max_key_buf_size);
 	SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=0x%llX\n",
 		max_key_buf_size, ctx_p->user.key,
 		(unsigned long long)ctx_p->user.key_dma_addr);
@@ -248,10 +241,9 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
 	}
 
 	/* Unmap key buffer */
-	SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr);
 	dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
 								DMA_TO_DEVICE);
-	SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n", 
+	SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n",
 		(unsigned long long)ctx_p->user.key_dma_addr);
 
 	/* Free key buffer in context */
@@ -259,50 +251,48 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
 	SSI_LOG_DEBUG("Free key buffer in context. key=@%p\n", ctx_p->user.key);
 }
 
+struct tdes_keys {
+	u8	key1[DES_KEY_SIZE];
+	u8	key2[DES_KEY_SIZE];
+	u8	key3[DES_KEY_SIZE];
+};
 
-typedef struct tdes_keys{
-        u8      key1[DES_KEY_SIZE];
-        u8      key2[DES_KEY_SIZE];
-        u8      key3[DES_KEY_SIZE];
-}tdes_keys_t;
-
-static const u8 zero_buff[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 
-                               0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
-                               0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 
-                               0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+static const u8 zero_buff[] = {	0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
 
 /* The function verifies that tdes keys are not weak.*/
 static int ssi_fips_verify_3des_keys(const u8 *key, unsigned int keylen)
 {
 #ifdef CCREE_FIPS_SUPPORT
-        tdes_keys_t *tdes_key = (tdes_keys_t*)key;
+	struct tdes_keys *tdes_key = (struct tdes_keys *)key;
 
 	/* verify key1 != key2 and key3 != key2*/
-        if (unlikely( (memcmp((u8*)tdes_key->key1, (u8*)tdes_key->key2, sizeof(tdes_key->key1)) == 0) || 
-		      (memcmp((u8*)tdes_key->key3, (u8*)tdes_key->key2, sizeof(tdes_key->key3)) == 0) )) {
-                return -ENOEXEC;
-        }
+	if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
+		      (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) {
+		return -ENOEXEC;
+	}
 #endif /* CCREE_FIPS_SUPPORT */
 
-        return 0;
+	return 0;
 }
 
 /* The function verifies that xts keys are not weak.*/
 static int ssi_fips_verify_xts_keys(const u8 *key, unsigned int keylen)
 {
 #ifdef CCREE_FIPS_SUPPORT
-        /* Weak key is define as key that its first half (128/256 lsb) equals its second half (128/256 msb) */
-        int singleKeySize = keylen >> 1;
+	/* Weak key is define as key that its first half (128/256 lsb) equals its second half (128/256 msb) */
+	int singleKeySize = keylen >> 1;
 
-	if (unlikely(memcmp(key, &key[singleKeySize], singleKeySize) == 0)) {
+	if (unlikely(memcmp(key, &key[singleKeySize], singleKeySize) == 0))
 		return -ENOEXEC;
-	}
 #endif /* CCREE_FIPS_SUPPORT */
 
-        return 0;
+	return 0;
 }
 
-static enum HwCryptoKey hw_key_to_cc_hw_key(int slot_num)
+static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
 {
 	switch (slot_num) {
 	case 0:
@@ -317,35 +307,32 @@ static enum HwCryptoKey hw_key_to_cc_hw_key(int slot_num)
 	return END_OF_KEYS;
 }
 
-static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, 
-				const u8 *key, 
+static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
+				const u8 *key,
 				unsigned int keylen)
 {
 	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct device *dev = &ctx_p->drvdata->plat_dev->dev;
 	u32 tmp[DES_EXPKEY_WORDS];
 	unsigned int max_key_buf_size = get_max_keysize(tfm);
-	DECL_CYCLE_COUNT_RESOURCES;
 
 	SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
 		ctx_p, crypto_tfm_alg_name(tfm), keylen);
-	dump_byte_array("key", (uint8_t *)key, keylen);
+	dump_byte_array("key", (u8 *)key, keylen);
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 
 	SSI_LOG_DEBUG("ssi_blkcipher_setkey: after FIPS check");
-	
+
 	/* STAT_PHASE_0: Init and sanity checks */
-	START_CYCLE_COUNT();
 
 #if SSI_CC_HAS_MULTI2
 	/*last byte of key buffer is round number and should not be a part of key size*/
-	if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
-		keylen -=1;
-	}
+	if (ctx_p->flow_mode == S_DIN_to_MULTI2)
+		keylen -= 1;
 #endif /*SSI_CC_HAS_MULTI2*/
 
-	if (unlikely(validate_keys_sizes(ctx_p,keylen) != 0)) {
+	if (unlikely(validate_keys_sizes(ctx_p, keylen) != 0)) {
 		SSI_LOG_ERR("Unsupported key size %d.\n", keylen);
 		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
@@ -353,7 +340,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 
 	if (ssi_is_hw_key(tfm)) {
 		/* setting HW key slots */
-		struct arm_hw_key_info *hki = (struct arm_hw_key_info*)key;
+		struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
 
 		if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
 			SSI_LOG_ERR("HW key not supported for non-AES flows\n");
@@ -381,7 +368,6 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 		}
 
 		ctx_p->keylen = keylen;
-		END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
 		SSI_LOG_DEBUG("ssi_blkcipher_setkey: ssi_is_hw_key ret 0");
 
 		return 0;
@@ -396,28 +382,24 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 			return -EINVAL;
 		}
 	}
-	if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) && 
+	if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
 	    ssi_fips_verify_xts_keys(key, keylen) != 0) {
 		SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak XTS key");
 		return -EINVAL;
 	}
-	if ((ctx_p->flow_mode == S_DIN_to_DES) && 
-	    (keylen == DES3_EDE_KEY_SIZE) && 
+	if ((ctx_p->flow_mode == S_DIN_to_DES) &&
+	    (keylen == DES3_EDE_KEY_SIZE) &&
 	    ssi_fips_verify_3des_keys(key, keylen) != 0) {
 		SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak 3DES key");
 		return -EINVAL;
 	}
 
-
-	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
-
 	/* STAT_PHASE_1: Copy key to ctx */
-	START_CYCLE_COUNT();
-	SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr);
-	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr, 
+	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
 					max_key_buf_size, DMA_TO_DEVICE);
-#if SSI_CC_HAS_MULTI2
+
 	if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
+#if SSI_CC_HAS_MULTI2
 		memcpy(ctx_p->user.key, key, CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE);
 		ctx_p->key_round_number = key[CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE];
 		if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS ||
@@ -425,10 +407,8 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 			crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 			SSI_LOG_DEBUG("ssi_blkcipher_setkey: SSI_CC_HAS_MULTI2 einval");
 			return -EINVAL;
-		}
-	} else 
 #endif /*SSI_CC_HAS_MULTI2*/
-	{
+	} else {
 		memcpy(ctx_p->user.key, key, keylen);
 		if (keylen == 24)
 			memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
@@ -438,6 +418,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 			int key_len = keylen >> 1;
 			int err;
 			SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
 			desc->tfm = ctx_p->shash_tfm;
 
 			err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
@@ -447,12 +428,9 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
 			}
 		}
 	}
-	dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr, 
+	dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
 					max_key_buf_size, DMA_TO_DEVICE);
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr ,max_key_buf_size);
 	ctx_p->keylen = keylen;
-	
-	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
 
 	 SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
 	return 0;
@@ -464,7 +442,7 @@ ssi_blkcipher_create_setup_desc(
 	struct blkcipher_req_ctx *req_ctx,
 	unsigned int ivsize,
 	unsigned int nbytes,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
@@ -489,96 +467,92 @@ ssi_blkcipher_create_setup_desc(
 	case DRV_CIPHER_CTR:
 	case DRV_CIPHER_OFB:
 		/* Load cipher state */
-		HW_DESC_INIT(&desc[*seq_size]);
-		HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
-				     iv_dma_addr, ivsize,
-				     NS_BIT);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
-		HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
-		HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
-		if ((cipher_mode == DRV_CIPHER_CTR) || 
-		    (cipher_mode == DRV_CIPHER_OFB) ) {
-			HW_DESC_SET_SETUP_MODE(&desc[*seq_size],
-					       SETUP_LOAD_STATE1);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
+			     NS_BIT);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		if ((cipher_mode == DRV_CIPHER_CTR) ||
+		    (cipher_mode == DRV_CIPHER_OFB)) {
+			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
 		} else {
-			HW_DESC_SET_SETUP_MODE(&desc[*seq_size],
-					       SETUP_LOAD_STATE0);
+			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
 		}
 		(*seq_size)++;
 		/*FALLTHROUGH*/
 	case DRV_CIPHER_ECB:
 		/* Load key */
-		HW_DESC_INIT(&desc[*seq_size]);
-		HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
 		if (flow_mode == S_DIN_to_AES) {
-
 			if (ssi_is_hw_key(tfm)) {
-				HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key1_slot);
+				set_hw_crypto_key(&desc[*seq_size],
+						  ctx_p->hw.key1_slot);
 			} else {
-				HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
-						     key_dma_addr, 
-						     ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len),
-						     NS_BIT);
+				set_din_type(&desc[*seq_size], DMA_DLLI,
+					     key_dma_addr, ((key_len == 24) ?
+							    AES_MAX_KEY_SIZE :
+							    key_len), NS_BIT);
 			}
-			HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len);
+			set_key_size_aes(&desc[*seq_size], key_len);
 		} else {
 			/*des*/
-			HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
-					     key_dma_addr, key_len,
-					     NS_BIT);
-			HW_DESC_SET_KEY_SIZE_DES(&desc[*seq_size], key_len);
+			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+				     key_len, NS_BIT);
+			set_key_size_des(&desc[*seq_size], key_len);
 		}
-		HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
-		HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_KEY0);
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
 		(*seq_size)++;
 		break;
 	case DRV_CIPHER_XTS:
 	case DRV_CIPHER_ESSIV:
 	case DRV_CIPHER_BITLOCKER:
 		/* Load AES key */
-		HW_DESC_INIT(&desc[*seq_size]);
-		HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
 		if (ssi_is_hw_key(tfm)) {
-			HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key1_slot);
+			set_hw_crypto_key(&desc[*seq_size],
+					  ctx_p->hw.key1_slot);
 		} else {
-			HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
-					     key_dma_addr, key_len/2,
-					     NS_BIT);
+			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+				     (key_len / 2), NS_BIT);
 		}
-		HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
-		HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
-		HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_KEY0);
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
 		(*seq_size)++;
 
 		/* load XEX key */
-		HW_DESC_INIT(&desc[*seq_size]);
-		HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
 		if (ssi_is_hw_key(tfm)) {
-			HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key2_slot);
+			set_hw_crypto_key(&desc[*seq_size],
+					  ctx_p->hw.key2_slot);
 		} else {
-			HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI, 
-					     (key_dma_addr+key_len/2), key_len/2,
-					     NS_BIT);
+			set_din_type(&desc[*seq_size], DMA_DLLI,
+				     (key_dma_addr + (key_len / 2)),
+				     (key_len / 2), NS_BIT);
 		}
-		HW_DESC_SET_XEX_DATA_UNIT_SIZE(&desc[*seq_size], du_size);
-		HW_DESC_SET_FLOW_MODE(&desc[*seq_size], S_DIN_to_AES2);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
-		HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+		set_xex_data_unit_size(&desc[*seq_size], du_size);
+		set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
 		(*seq_size)++;
-	
+
 		/* Set state */
-		HW_DESC_INIT(&desc[*seq_size]);
-		HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE1);
-		HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
-		HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
-		HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
-				     iv_dma_addr, CC_AES_BLOCK_SIZE,
-				     NS_BIT);
+		hw_desc_init(&desc[*seq_size]);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
+			     CC_AES_BLOCK_SIZE, NS_BIT);
 		(*seq_size)++;
 		break;
 	default:
@@ -592,49 +566,43 @@ static inline void ssi_blkcipher_create_multi2_setup_desc(
 	struct crypto_tfm *tfm,
 	struct blkcipher_req_ctx *req_ctx,
 	unsigned int ivsize,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-	
+
 	int direction = req_ctx->gen_ctx.op_type;
 	/* Load system key */
-	HW_DESC_INIT(&desc[*seq_size]);
-	HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
-	HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI, ctx_p->user.key_dma_addr,
-						CC_MULTI2_SYSTEM_KEY_SIZE,
-						NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
-	HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[*seq_size]);
+	set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
+	set_cipher_config0(&desc[*seq_size], direction);
+	set_din_type(&desc[*seq_size], DMA_DLLI, ctx_p->user.key_dma_addr,
+		     CC_MULTI2_SYSTEM_KEY_SIZE, NS_BIT);
+	set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
+	set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
 	(*seq_size)++;
 
 	/* load data key */
-	HW_DESC_INIT(&desc[*seq_size]);
-	HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI, 
-					(ctx_p->user.key_dma_addr + 
-						CC_MULTI2_SYSTEM_KEY_SIZE),
-				CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
-	HW_DESC_SET_MULTI2_NUM_ROUNDS(&desc[*seq_size],
-						ctx_p->key_round_number);
-	HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
-	HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
-	HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE0 );
+	hw_desc_init(&desc[*seq_size]);
+	set_din_type(&desc[*seq_size], DMA_DLLI,
+		     (ctx_p->user.key_dma_addr + CC_MULTI2_SYSTEM_KEY_SIZE),
+		     CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
+	set_multi2_num_rounds(&desc[*seq_size], ctx_p->key_round_number);
+	set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
+	set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
+	set_cipher_config0(&desc[*seq_size], direction);
+	set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
 	(*seq_size)++;
-	
-	
+
 	/* Set state */
-	HW_DESC_INIT(&desc[*seq_size]);
-	HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
-			     req_ctx->gen_ctx.iv_dma_addr,
-			     ivsize, NS_BIT);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
-	HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
-	HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
-	HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE1);	
+	hw_desc_init(&desc[*seq_size]);
+	set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
+		     ivsize, NS_BIT);
+	set_cipher_config0(&desc[*seq_size], direction);
+	set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
+	set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
+	set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
 	(*seq_size)++;
-	
 }
 #endif /*SSI_CC_HAS_MULTI2*/
 
@@ -645,7 +613,7 @@ ssi_blkcipher_create_data_desc(
 	struct scatterlist *dst, struct scatterlist *src,
 	unsigned int nbytes,
 	void *areq,
-	HwDesc_s desc[],
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size)
 {
 	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
@@ -668,25 +636,22 @@ ssi_blkcipher_create_data_desc(
 		return;
 	}
 	/* Process */
-	if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)){
+	if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
 		SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
 			     (unsigned long long)sg_dma_address(src),
 			     nbytes);
 		SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
 			     (unsigned long long)sg_dma_address(dst),
 			     nbytes);
-		HW_DESC_INIT(&desc[*seq_size]);
-		HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
-				     sg_dma_address(src),
-				     nbytes, NS_BIT);
-		HW_DESC_SET_DOUT_DLLI(&desc[*seq_size],
-				      sg_dma_address(dst),
-				      nbytes,
-				      NS_BIT, (areq == NULL)? 0:1);
-		if (areq != NULL) {
-			HW_DESC_SET_QUEUE_LAST_IND(&desc[*seq_size]);
-		}
-		HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+			     nbytes, NS_BIT);
+		set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+			      nbytes, NS_BIT, (!areq ? 0 : 1));
+		if (areq)
+			set_queue_last_ind(&desc[*seq_size]);
+
+		set_flow_mode(&desc[*seq_size], flow_mode);
 		(*seq_size)++;
 	} else {
 		/* bypass */
@@ -695,77 +660,72 @@ ssi_blkcipher_create_data_desc(
 			(unsigned long long)req_ctx->mlli_params.mlli_dma_addr,
 			req_ctx->mlli_params.mlli_len,
 			(unsigned int)ctx_p->drvdata->mlli_sram_addr);
-		HW_DESC_INIT(&desc[*seq_size]);
-		HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
-				     req_ctx->mlli_params.mlli_dma_addr,
-				     req_ctx->mlli_params.mlli_len,
-				     NS_BIT);
-		HW_DESC_SET_DOUT_SRAM(&desc[*seq_size],
-				      ctx_p->drvdata->mlli_sram_addr,
-				      req_ctx->mlli_params.mlli_len);
-		HW_DESC_SET_FLOW_MODE(&desc[*seq_size], BYPASS);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI,
+			     req_ctx->mlli_params.mlli_dma_addr,
+			     req_ctx->mlli_params.mlli_len, NS_BIT);
+		set_dout_sram(&desc[*seq_size],
+			      ctx_p->drvdata->mlli_sram_addr,
+			      req_ctx->mlli_params.mlli_len);
+		set_flow_mode(&desc[*seq_size], BYPASS);
 		(*seq_size)++;
 
-		HW_DESC_INIT(&desc[*seq_size]);
-		HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_MLLI,
-			ctx_p->drvdata->mlli_sram_addr,
-				     req_ctx->in_mlli_nents, NS_BIT);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_MLLI,
+			     ctx_p->drvdata->mlli_sram_addr,
+			     req_ctx->in_mlli_nents, NS_BIT);
 		if (req_ctx->out_nents == 0) {
 			SSI_LOG_DEBUG(" din/dout params addr 0x%08X "
 				     "addr 0x%08X\n",
 			(unsigned int)ctx_p->drvdata->mlli_sram_addr,
 			(unsigned int)ctx_p->drvdata->mlli_sram_addr);
-			HW_DESC_SET_DOUT_MLLI(&desc[*seq_size], 
-			ctx_p->drvdata->mlli_sram_addr,
-					      req_ctx->in_mlli_nents,
-					      NS_BIT,(areq == NULL)? 0:1);
+			set_dout_mlli(&desc[*seq_size],
+				      ctx_p->drvdata->mlli_sram_addr,
+				      req_ctx->in_mlli_nents, NS_BIT,
+				      (!areq ? 0 : 1));
 		} else {
 			SSI_LOG_DEBUG(" din/dout params "
 				     "addr 0x%08X addr 0x%08X\n",
 				(unsigned int)ctx_p->drvdata->mlli_sram_addr,
-				(unsigned int)ctx_p->drvdata->mlli_sram_addr + 
-				(uint32_t)LLI_ENTRY_BYTE_SIZE * 
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr +
+				(u32)LLI_ENTRY_BYTE_SIZE *
 							req_ctx->in_nents);
-			HW_DESC_SET_DOUT_MLLI(&desc[*seq_size], 
-				(ctx_p->drvdata->mlli_sram_addr +
-				LLI_ENTRY_BYTE_SIZE * 
-						req_ctx->in_mlli_nents), 
-				req_ctx->out_mlli_nents, NS_BIT,(areq == NULL)? 0:1);
+			set_dout_mlli(&desc[*seq_size],
+				      (ctx_p->drvdata->mlli_sram_addr +
+				       (LLI_ENTRY_BYTE_SIZE *
+					req_ctx->in_mlli_nents)),
+				      req_ctx->out_mlli_nents, NS_BIT,
+				      (!areq ? 0 : 1));
 		}
-		if (areq != NULL) {
-			HW_DESC_SET_QUEUE_LAST_IND(&desc[*seq_size]);
-		}
-		HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+		if (areq)
+			set_queue_last_ind(&desc[*seq_size]);
+
+		set_flow_mode(&desc[*seq_size], flow_mode);
 		(*seq_size)++;
 	}
 }
 
 static int ssi_blkcipher_complete(struct device *dev,
-                                  struct ssi_ablkcipher_ctx *ctx_p, 
-                                  struct blkcipher_req_ctx *req_ctx,
-                                  struct scatterlist *dst, struct scatterlist *src,
-                                  void *info, //req info
-                                  unsigned int ivsize,
-                                  void *areq,
-                                  void __iomem *cc_base)
+				struct ssi_ablkcipher_ctx *ctx_p,
+				struct blkcipher_req_ctx *req_ctx,
+				struct scatterlist *dst,
+				struct scatterlist *src,
+				unsigned int ivsize,
+				void *areq,
+				void __iomem *cc_base)
 {
 	int completion_error = 0;
-	uint32_t inflight_counter;
-	DECL_CYCLE_COUNT_RESOURCES;
+	u32 inflight_counter;
 
-	START_CYCLE_COUNT();
 	ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
-	info = req_ctx->backup_info;
-	END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_4);
-
 
 	/*Set the inflight couter value to local variable*/
 	inflight_counter =  ctx_p->drvdata->inflight_counter;
 	/*Decrease the inflight counter*/
-	if(ctx_p->flow_mode == BYPASS && ctx_p->drvdata->inflight_counter > 0)
+	if (ctx_p->flow_mode == BYPASS && ctx_p->drvdata->inflight_counter > 0)
 		ctx_p->drvdata->inflight_counter--;
 
-	if(areq){
+	if (areq) {
 		ablkcipher_request_complete(areq, completion_error);
 		return 0;
 	}
@@ -779,24 +739,22 @@ static int ssi_blkcipher_process(
 	unsigned int nbytes,
 	void *info, //req info
 	unsigned int ivsize,
-	void *areq, 
+	void *areq,
 	enum drv_crypto_direction direction)
 {
 	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct device *dev = &ctx_p->drvdata->plat_dev->dev;
-	HwDesc_s desc[MAX_ABLKCIPHER_SEQ_LEN];
+	struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
 	struct ssi_crypto_req ssi_req = {};
-	int rc, seq_len = 0,cts_restore_flag = 0;
-	DECL_CYCLE_COUNT_RESOURCES;
+	int rc, seq_len = 0, cts_restore_flag = 0;
 
 	SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n",
-		((direction==DRV_CRYPTO_DIRECTION_ENCRYPT)?"Encrypt":"Decrypt"),
+		((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
 		     areq, info, nbytes);
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 	/* STAT_PHASE_0: Init and sanity checks */
-	START_CYCLE_COUNT();
-	
+
 	/* TODO: check data length according to mode */
 	if (unlikely(validate_data_size(ctx_p, nbytes))) {
 		SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
@@ -807,9 +765,8 @@ static int ssi_blkcipher_process(
 		/* No data to process is valid */
 		return 0;
 	}
-        /*For CTS in case of data size aligned to 16 use CBC mode*/
-	if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)){
-
+	/*For CTS in case of data size aligned to 16 use CBC mode*/
+	if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
 		ctx_p->cipher_mode = DRV_CIPHER_CBC;
 		cts_restore_flag = 1;
 	}
@@ -826,83 +783,65 @@ static int ssi_blkcipher_process(
 
 	/* Setup request context */
 	req_ctx->gen_ctx.op_type = direction;
-	
-	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);
 
 	/* STAT_PHASE_1: Map buffers */
-	START_CYCLE_COUNT();
-	
+
 	rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
 	if (unlikely(rc != 0)) {
 		SSI_LOG_ERR("map_request() failed\n");
 		goto exit_process;
 	}
 
-	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_1);
-
 	/* STAT_PHASE_2: Create sequence */
-	START_CYCLE_COUNT();
 
 	/* Setup processing */
 #if SSI_CC_HAS_MULTI2
-	if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
-		ssi_blkcipher_create_multi2_setup_desc(tfm,
-						       req_ctx,
-						       ivsize,
-						       desc,
-						       &seq_len);
-	} else
+	if (ctx_p->flow_mode == S_DIN_to_MULTI2)
+		ssi_blkcipher_create_multi2_setup_desc(tfm, req_ctx, ivsize,
+						       desc, &seq_len);
+	else
 #endif /*SSI_CC_HAS_MULTI2*/
-	{
-		ssi_blkcipher_create_setup_desc(tfm,
-						req_ctx,
-						ivsize,
-						nbytes,
-						desc,
-						&seq_len);
-	}
+		ssi_blkcipher_create_setup_desc(tfm, req_ctx, ivsize, nbytes,
+						desc, &seq_len);
 	/* Data processing */
 	ssi_blkcipher_create_data_desc(tfm,
-			      req_ctx, 
+			      req_ctx,
 			      dst, src,
 			      nbytes,
 			      areq,
 			      desc, &seq_len);
 
 	/* do we need to generate IV? */
-	if (req_ctx->is_giv == true) {
+	if (req_ctx->is_giv) {
 		ssi_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
 		ssi_req.ivgen_dma_addr_len = 1;
 		/* set the IV size (8/16 B long)*/
 		ssi_req.ivgen_size = ivsize;
 	}
-	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_2);
 
 	/* STAT_PHASE_3: Lock HW and push sequence */
-	START_CYCLE_COUNT();
-	
-	rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (areq == NULL)? 0:1);
-	if(areq != NULL) {
+
+	rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (!areq) ? 0 : 1);
+	if (areq) {
 		if (unlikely(rc != -EINPROGRESS)) {
 			/* Failed to send the request or request completed synchronously */
 			ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
 		}
 
-		END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
 	} else {
 		if (rc != 0) {
 			ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
-			END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);            
 		} else {
-			END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
-			rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst, src, info, ivsize, NULL, ctx_p->drvdata->cc_base);
-		} 
+			rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
+						    src, ivsize, NULL,
+						    ctx_p->drvdata->cc_base);
+		}
 	}
 
 exit_process:
 	if (cts_restore_flag != 0)
 		ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
-	
+
 	return rc;
 }
 
@@ -916,86 +855,23 @@ static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __io
 
 	CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR();
 
-	ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src, areq->info, ivsize, areq, cc_base);
+	ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src,
+			       ivsize, areq, cc_base);
 }
 
-
-
-static int ssi_sblkcipher_init(struct crypto_tfm *tfm)
-{
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-
-	/* Allocate sync ctx buffer */
-	ctx_p->sync_ctx = kmalloc(sizeof(struct blkcipher_req_ctx), GFP_KERNEL|GFP_DMA);
-	if (!ctx_p->sync_ctx) {
-		SSI_LOG_ERR("Allocating sync ctx buffer in context failed\n");
-		return -ENOMEM;
-	}
-	SSI_LOG_DEBUG("Allocated sync ctx buffer in context ctx_p->sync_ctx=@%p\n",
-								ctx_p->sync_ctx);
-
-	return ssi_blkcipher_init(tfm);
-}
-
-
-static void ssi_sblkcipher_exit(struct crypto_tfm *tfm)
-{
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-	
-	kfree(ctx_p->sync_ctx);
-	SSI_LOG_DEBUG("Free sync ctx buffer in context ctx_p->sync_ctx=@%p\n", ctx_p->sync_ctx);
-
-	ssi_blkcipher_exit(tfm);
-}
-
-#ifdef SYNC_ALGS
-static int ssi_sblkcipher_encrypt(struct blkcipher_desc *desc,
-                        struct scatterlist *dst, struct scatterlist *src,
-                        unsigned int nbytes)
-{
-	struct crypto_blkcipher *blk_tfm = desc->tfm;
-	struct crypto_tfm *tfm = crypto_blkcipher_tfm(blk_tfm);
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-	struct blkcipher_req_ctx *req_ctx = ctx_p->sync_ctx;
-	unsigned int ivsize = crypto_blkcipher_ivsize(blk_tfm);
-
-	req_ctx->backup_info = desc->info;
-	req_ctx->is_giv = false;
-
-	return ssi_blkcipher_process(tfm, req_ctx, dst, src, nbytes, desc->info, ivsize, NULL, DRV_CRYPTO_DIRECTION_ENCRYPT);
-}
-
-static int ssi_sblkcipher_decrypt(struct blkcipher_desc *desc,
-                        struct scatterlist *dst, struct scatterlist *src,
-                        unsigned int nbytes)
-{
-	struct crypto_blkcipher *blk_tfm = desc->tfm;
-	struct crypto_tfm *tfm = crypto_blkcipher_tfm(blk_tfm);
-	struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-	struct blkcipher_req_ctx *req_ctx = ctx_p->sync_ctx;
-	unsigned int ivsize = crypto_blkcipher_ivsize(blk_tfm);
-
-	req_ctx->backup_info = desc->info;
-	req_ctx->is_giv = false;
-
-	return ssi_blkcipher_process(tfm, req_ctx, dst, src, nbytes, desc->info, ivsize, NULL, DRV_CRYPTO_DIRECTION_DECRYPT);
-}
-#endif
-
 /* Async wrap functions */
 
 static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
 {
 	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
-	
+
 	ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
 
 	return ssi_blkcipher_init(tfm);
 }
 
-
-static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 
-				const u8 *key, 
+static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+				const u8 *key,
 				unsigned int keylen)
 {
 	return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
@@ -1026,7 +902,6 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
 	return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
 }
 
-
 /* DX Block cipher alg */
 static struct ssi_alg_template blkcipher_algs[] = {
 /* Async template */
@@ -1047,7 +922,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_XTS,
 		.flow_mode = S_DIN_to_AES,
-        .synchronous = false,
 	},
 	{
 		.name = "xts(aes)",
@@ -1064,7 +938,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_XTS,
 		.flow_mode = S_DIN_to_AES,
-	.synchronous = false,
 	},
 	{
 		.name = "xts(aes)",
@@ -1081,7 +954,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_XTS,
 		.flow_mode = S_DIN_to_AES,
-	.synchronous = false,
 	},
 #endif /*SSI_CC_HAS_AES_XTS*/
 #if SSI_CC_HAS_AES_ESSIV
@@ -1100,7 +972,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_ESSIV,
 		.flow_mode = S_DIN_to_AES,
-		.synchronous = false,
 	},
 	{
 		.name = "essiv(aes)",
@@ -1117,7 +988,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_ESSIV,
 		.flow_mode = S_DIN_to_AES,
-		.synchronous = false,
 	},
 	{
 		.name = "essiv(aes)",
@@ -1134,7 +1004,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_ESSIV,
 		.flow_mode = S_DIN_to_AES,
-		.synchronous = false,
 	},
 #endif /*SSI_CC_HAS_AES_ESSIV*/
 #if SSI_CC_HAS_AES_BITLOCKER
@@ -1153,7 +1022,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_BITLOCKER,
 		.flow_mode = S_DIN_to_AES,
-		.synchronous = false,
 	},
 	{
 		.name = "bitlocker(aes)",
@@ -1170,7 +1038,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_BITLOCKER,
 		.flow_mode = S_DIN_to_AES,
-		.synchronous = false,
 	},
 	{
 		.name = "bitlocker(aes)",
@@ -1187,7 +1054,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_BITLOCKER,
 		.flow_mode = S_DIN_to_AES,
-		.synchronous = false,
 	},
 #endif /*SSI_CC_HAS_AES_BITLOCKER*/
 	{
@@ -1205,7 +1071,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_ECB,
 		.flow_mode = S_DIN_to_AES,
-        .synchronous = false,
 	},
 	{
 		.name = "cbc(aes)",
@@ -1219,10 +1084,9 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
-			},
+		},
 		.cipher_mode = DRV_CIPHER_CBC,
 		.flow_mode = S_DIN_to_AES,
-        .synchronous = false,
 	},
 	{
 		.name = "ofb(aes)",
@@ -1239,7 +1103,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_OFB,
 		.flow_mode = S_DIN_to_AES,
-        .synchronous = false,
 	},
 #if SSI_CC_HAS_AES_CTS
 	{
@@ -1257,7 +1120,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_CBC_CTS,
 		.flow_mode = S_DIN_to_AES,
-        .synchronous = false,
 	},
 #endif
 	{
@@ -1275,7 +1137,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_CTR,
 		.flow_mode = S_DIN_to_AES,
-        .synchronous = false,
 	},
 	{
 		.name = "cbc(des3_ede)",
@@ -1292,7 +1153,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_CBC,
 		.flow_mode = S_DIN_to_DES,
-        .synchronous = false,
 	},
 	{
 		.name = "ecb(des3_ede)",
@@ -1309,7 +1169,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_ECB,
 		.flow_mode = S_DIN_to_DES,
-        .synchronous = false,
 	},
 	{
 		.name = "cbc(des)",
@@ -1326,7 +1185,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_CBC,
 		.flow_mode = S_DIN_to_DES,
-        .synchronous = false,
 	},
 	{
 		.name = "ecb(des)",
@@ -1343,7 +1201,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_CIPHER_ECB,
 		.flow_mode = S_DIN_to_DES,
-        .synchronous = false,
 	},
 #if SSI_CC_HAS_MULTI2
 	{
@@ -1361,7 +1218,6 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_MULTI2_CBC,
 		.flow_mode = S_DIN_to_MULTI2,
-        .synchronous = false,
 	},
 	{
 		.name = "ofb(multi2)",
@@ -1378,12 +1234,11 @@ static struct ssi_alg_template blkcipher_algs[] = {
 			},
 		.cipher_mode = DRV_MULTI2_OFB,
 		.flow_mode = S_DIN_to_MULTI2,
-        .synchronous = false,
 	},
 #endif /*SSI_CC_HAS_MULTI2*/
 };
 
-static 
+static
 struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *template)
 {
 	struct ssi_crypto_alg *t_alg;
@@ -1405,19 +1260,13 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *templa
 	alg->cra_blocksize = template->blocksize;
 	alg->cra_alignmask = 0;
 	alg->cra_ctxsize = sizeof(struct ssi_ablkcipher_ctx);
-	
-	alg->cra_init = template->synchronous? ssi_sblkcipher_init:ssi_ablkcipher_init;
-	alg->cra_exit = template->synchronous? ssi_sblkcipher_exit:ssi_blkcipher_exit;
-	alg->cra_type = template->synchronous? &crypto_blkcipher_type:&crypto_ablkcipher_type;
-	if(template->synchronous) {
-		alg->cra_blkcipher = template->template_sblkcipher;
-		alg->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+
+	alg->cra_init = ssi_ablkcipher_init;
+	alg->cra_exit = ssi_blkcipher_exit;
+	alg->cra_type = &crypto_ablkcipher_type;
+	alg->cra_ablkcipher = template->template_ablkcipher;
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
 				template->type;
-	} else {
-		alg->cra_ablkcipher = template->template_ablkcipher;
-		alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
-				template->type;
-	}
 
 	t_alg->cipher_mode = template->cipher_mode;
 	t_alg->flow_mode = template->flow_mode;
@@ -1428,12 +1277,13 @@ struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *templa
 int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
 {
 	struct ssi_crypto_alg *t_alg, *n;
-	struct ssi_blkcipher_handle *blkcipher_handle = 
+	struct ssi_blkcipher_handle *blkcipher_handle =
 						drvdata->blkcipher_handle;
 	struct device *dev;
+
 	dev = &drvdata->plat_dev->dev;
 
-	if (blkcipher_handle != NULL) {
+	if (blkcipher_handle) {
 		/* Remove registered algs */
 		list_for_each_entry_safe(t_alg, n,
 				&blkcipher_handle->blkcipher_alg_list,
@@ -1448,8 +1298,6 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
 	return 0;
 }
 
-
-
 int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
 {
 	struct ssi_blkcipher_handle *ablkcipher_handle;
@@ -1459,7 +1307,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
 
 	ablkcipher_handle = kmalloc(sizeof(struct ssi_blkcipher_handle),
 		GFP_KERNEL);
-	if (ablkcipher_handle == NULL)
+	if (!ablkcipher_handle)
 		return -ENOMEM;
 
 	drvdata->blkcipher_handle = ablkcipher_handle;
@@ -1489,9 +1337,9 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
 			kfree(t_alg);
 			goto fail0;
 		} else {
-			list_add_tail(&t_alg->entry, 
+			list_add_tail(&t_alg->entry,
 				      &ablkcipher_handle->blkcipher_alg_list);
-			SSI_LOG_DEBUG("Registered %s\n", 
+			SSI_LOG_DEBUG("Registered %s\n",
 					t_alg->crypto_alg.cra_driver_name);
 		}
 	}
diff --git a/drivers/staging/ccree/ssi_cipher.h b/drivers/staging/ccree/ssi_cipher.h
index ba4eb7c..296b375 100644
--- a/drivers/staging/ccree/ssi_cipher.h
+++ b/drivers/staging/ccree/ssi_cipher.h
@@ -1,21 +1,21 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* \file ssi_cipher.h
-   ARM CryptoCell Cipher Crypto API
+ * ARM CryptoCell Cipher Crypto API
  */
 
 #ifndef __SSI_CIPHER_H__
@@ -26,7 +26,6 @@
 #include "ssi_driver.h"
 #include "ssi_buffer_mgr.h"
 
-
 /* Crypto cipher flags */
 #define CC_CRYPTO_CIPHER_KEY_KFDE0    (1 << 0)
 #define CC_CRYPTO_CIPHER_KEY_KFDE1    (1 << 1)
@@ -36,21 +35,18 @@
 
 #define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | CC_CRYPTO_CIPHER_KEY_KFDE1 | CC_CRYPTO_CIPHER_KEY_KFDE2 | CC_CRYPTO_CIPHER_KEY_KFDE3)
 
-
 struct blkcipher_req_ctx {
 	struct async_gen_req_ctx gen_ctx;
 	enum ssi_req_dma_buf_type dma_buf_type;
-	uint32_t in_nents;
-	uint32_t in_mlli_nents;
-	uint32_t out_nents;
-	uint32_t out_mlli_nents;
-	uint8_t *backup_info; /*store iv for generated IV flow*/
+	u32 in_nents;
+	u32 in_mlli_nents;
+	u32 out_nents;
+	u32 out_mlli_nents;
+	u8 *backup_info; /*store iv for generated IV flow*/
 	bool is_giv;
 	struct mlli_params mlli_params;
 };
 
-
-
 int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata);
 
 int ssi_ablkcipher_free(struct ssi_drvdata *drvdata);
@@ -63,7 +59,6 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata);
 				CRYPTO_ALG_BULK_DU_4096)
 #endif /* CRYPTO_ALG_BULK_MASK */
 
-
 #ifdef CRYPTO_TFM_REQ_HW_KEY
 
 static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
@@ -71,7 +66,7 @@ static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
 	return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
 }
 
-#else 
+#else
 
 struct arm_hw_key_info {
 	int hw_key1;
@@ -85,5 +80,4 @@ static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
 
 #endif /* CRYPTO_TFM_REQ_HW_KEY */
 
-
 #endif /*__SSI_CIPHER_H__*/
diff --git a/drivers/staging/ccree/ssi_config.h b/drivers/staging/ccree/ssi_config.h
index d96a543..ff7597c 100644
--- a/drivers/staging/ccree/ssi_config.h
+++ b/drivers/staging/ccree/ssi_config.h
@@ -1,21 +1,21 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* \file ssi_config.h
-   Definitions for ARM CryptoCell Linux Crypto Driver
+ * Definitions for ARM CryptoCell Linux Crypto Driver
  */
 
 #ifndef __SSI_CONFIG_H__
@@ -23,39 +23,14 @@
 
 #include <linux/version.h>
 
-#define DISABLE_COHERENT_DMA_OPS
 //#define FLUSH_CACHE_ALL
 //#define COMPLETION_DELAY
 //#define DX_DUMP_DESCS
 // #define DX_DUMP_BYTES
 // #define CC_DEBUG
 #define ENABLE_CC_SYSFS		/* Enable sysfs interface for debugging REE driver */
-//#define ENABLE_CC_CYCLE_COUNT
 //#define DX_IRQ_DELAY 100000
 #define DMA_BIT_MASK_LEN	48	/* was 32 bit, but for juno's sake it was enlarged to 48 bit */
 
-#if defined ENABLE_CC_CYCLE_COUNT && defined ENABLE_CC_SYSFS
-#define CC_CYCLE_COUNT
-#endif
-
-
-#if defined (CONFIG_ARM64)	// TODO currently only this mode was test on Juno (which is ARM64), need to enable coherent also.
-#define DISABLE_COHERENT_DMA_OPS
-#endif
-
-/* Define the CryptoCell DMA cache coherency signals configuration */
-#if defined (DISABLE_COHERENT_DMA_OPS)
-	/* Software Controlled Cache Coherency (SCCC) */ 
-	#define SSI_CACHE_PARAMS (0x000)
-	/* CC attached to NONE-ACP such as HPP/ACE/AMBA4.
-	 * The customer is responsible to enable/disable this feature
-	 * according to his platform type. */
-	#define DX_HAS_ACP 0
-#else
-	#define SSI_CACHE_PARAMS (0xEEE)
-	/* CC attached to ACP */
-	#define DX_HAS_ACP 1
-#endif
-
 #endif /*__DX_CONFIG_H__*/
 
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
index bc19adc..78709b92 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/ssi_driver.c
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -57,6 +57,8 @@
 #include <linux/sched.h>
 #include <linux/random.h>
 #include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
 
 #include "ssi_config.h"
 #include "ssi_driver.h"
@@ -71,15 +73,14 @@
 #include "ssi_pm.h"
 #include "ssi_fips_local.h"
 
-
 #ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long size)
+void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
 {
-	int i , line_offset = 0, ret = 0;
-	const uint8_t *cur_byte;
+	int i, line_offset = 0, ret = 0;
+	const u8 *cur_byte;
 	char line_buf[80];
 
-	if (the_array == NULL) {
+	if (!the_array) {
 		SSI_LOG_ERR("cannot dump_byte_array - NULL pointer\n");
 		return;
 	}
@@ -87,17 +88,17 @@ void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long s
 	ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
 		name, size);
 	if (ret < 0) {
-		SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n",ret);
+		SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
 		return;
 	}
 	line_offset = ret;
-	for (i = 0 , cur_byte = the_array;
+	for (i = 0, cur_byte = the_array;
 	     (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
 			ret = snprintf(line_buf + line_offset,
 					sizeof(line_buf) - line_offset,
 					"0x%02X ", *cur_byte);
 		if (ret < 0) {
-			SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n",ret);
+			SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
 			return;
 		}
 		line_offset += ret;
@@ -116,12 +117,10 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 {
 	struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
 	void __iomem *cc_base = drvdata->cc_base;
-	uint32_t irr;
-	uint32_t imr;
-	DECL_CYCLE_COUNT_RESOURCES;
+	u32 irr;
+	u32 imr;
 
 	/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
-	START_CYCLE_COUNT();
 
 	/* read the interrupt status */
 	irr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
@@ -154,12 +153,12 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 #endif
 	/* AXI error interrupt */
 	if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) {
-		uint32_t axi_err;
-		
+		u32 axi_err;
+
 		/* Read the AXI error ID */
 		axi_err = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
 		SSI_LOG_DEBUG("AXI completion error: axim_mon_err=0x%08X\n", axi_err);
-		
+
 		irr &= ~SSI_AXI_ERR_IRQ_MASK;
 	}
 
@@ -168,15 +167,12 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 		/* Just warning */
 	}
 
-	END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_0);
-	START_CYCLE_COUNT_AT(drvdata->isr_exit_cycles);
-
 	return IRQ_HANDLED;
 }
 
 int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
 {
-	unsigned int val;
+	unsigned int val, cache_params;
 	void __iomem *cc_base = drvdata->cc_base;
 
 	/* Unmask all AXI interrupt sources AXI_CFG1 register */
@@ -192,7 +188,7 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
 	/* Unmask relevant interrupt cause */
 	val = (~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK | SSI_GPR0_IRQ_MASK));
 	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
-		
+
 #ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
 #ifdef DX_IRQ_DELAY
 	/* Set CC IRQ delay */
@@ -205,15 +201,20 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
 	}
 #endif
 
+	cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
+
 	val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
-	if (is_probe == true) {
+
+	if (is_probe)
 		SSI_LOG_INFO("Cache params previous: 0x%08X\n", val);
-	}
-	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS), SSI_CACHE_PARAMS);
+
+	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS),
+			      cache_params);
 	val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
-	if (is_probe == true) {
-		SSI_LOG_INFO("Cache params current: 0x%08X  (expected: 0x%08X)\n", val, SSI_CACHE_PARAMS);
-	}
+
+	if (is_probe)
+		SSI_LOG_INFO("Cache params current: 0x%08X (expect: 0x%08X)\n",
+			     val, cache_params);
 
 	return 0;
 }
@@ -224,15 +225,20 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	void __iomem *cc_base = NULL;
 	bool irq_registered = false;
 	struct ssi_drvdata *new_drvdata = kzalloc(sizeof(struct ssi_drvdata), GFP_KERNEL);
-	uint32_t signature_val;
+	struct device *dev = &plat_dev->dev;
+	struct device_node *np = dev->of_node;
+	u32 signature_val;
 	int rc = 0;
 
-	if (unlikely(new_drvdata == NULL)) {
+	if (unlikely(!new_drvdata)) {
 		SSI_LOG_ERR("Failed to allocate drvdata");
 		rc = -ENOMEM;
 		goto init_cc_res_err;
 	}
 
+	new_drvdata->clk = of_clk_get(np, 0);
+	new_drvdata->coherent = of_dma_is_coherent(np);
+
 	/*Initialize inflight counter used in dx_ablkcipher_secure_complete used for count of BYSPASS blocks operations*/
 	new_drvdata->inflight_counter = 0;
 
@@ -240,7 +246,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	/* Get device resources */
 	/* First CC registers space */
 	new_drvdata->res_mem = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
-	if (unlikely(new_drvdata->res_mem == NULL)) {
+	if (unlikely(!new_drvdata->res_mem)) {
 		SSI_LOG_ERR("Failed getting IO memory resource\n");
 		rc = -ENODEV;
 		goto init_cc_res_err;
@@ -251,14 +257,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
 		(unsigned long long)new_drvdata->res_mem->end);
 	/* Map registers space */
 	req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs");
-	if (unlikely(req_mem_cc_regs == NULL)) {
+	if (unlikely(!req_mem_cc_regs)) {
 		SSI_LOG_ERR("Couldn't allocate registers memory region at "
 			     "0x%08X\n", (unsigned int)new_drvdata->res_mem->start);
 		rc = -EBUSY;
 		goto init_cc_res_err;
 	}
 	cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem));
-	if (unlikely(cc_base == NULL)) {
+	if (unlikely(!cc_base)) {
 		SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n",
 			(unsigned int)new_drvdata->res_mem->start, (unsigned int)resource_size(new_drvdata->res_mem));
 		rc = -ENOMEM;
@@ -266,11 +272,10 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	}
 	SSI_LOG_DEBUG("CC registers mapped from %pa to 0x%p\n", &new_drvdata->res_mem->start, cc_base);
 	new_drvdata->cc_base = cc_base;
-	
 
 	/* Then IRQ */
 	new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
-	if (unlikely(new_drvdata->res_irq == NULL)) {
+	if (unlikely(!new_drvdata->res_irq)) {
 		SSI_LOG_ERR("Failed getting IRQ resource\n");
 		rc = -ENODEV;
 		goto init_cc_res_err;
@@ -291,9 +296,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
 
 	new_drvdata->plat_dev = plat_dev;
 
-	if(new_drvdata->plat_dev->dev.dma_mask == NULL)
+	rc = cc_clk_on(new_drvdata);
+	if (rc)
+		goto init_cc_res_err;
+
+	if (!new_drvdata->plat_dev->dev.dma_mask)
 	{
-		new_drvdata->plat_dev->dev.dma_mask = & new_drvdata->plat_dev->dev.coherent_dma_mask;
+		new_drvdata->plat_dev->dev.dma_mask = &new_drvdata->plat_dev->dev.coherent_dma_mask;
 	}
 	if (!new_drvdata->plat_dev->dev.coherent_dma_mask)
 	{
@@ -304,7 +313,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
 	if (signature_val != DX_DEV_SIGNATURE) {
 		SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
-			signature_val, (uint32_t)DX_DEV_SIGNATURE);
+			signature_val, (u32)DX_DEV_SIGNATURE);
 		rc = -EINVAL;
 		goto init_cc_res_err;
 	}
@@ -396,8 +405,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
 
 init_cc_res_err:
 	SSI_LOG_ERR("Freeing CC HW resources!\n");
-	
-	if (new_drvdata != NULL) {
+
+	if (new_drvdata) {
 		ssi_aead_free(new_drvdata);
 		ssi_hash_free(new_drvdata);
 		ssi_ablkcipher_free(new_drvdata);
@@ -410,8 +419,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
 #ifdef ENABLE_CC_SYSFS
 		ssi_sysfs_fini();
 #endif
-	
-		if (req_mem_cc_regs != NULL) {
+
+		if (req_mem_cc_regs) {
 			if (irq_registered) {
 				free_irq(new_drvdata->res_irq->start, new_drvdata);
 				new_drvdata->res_irq = NULL;
@@ -432,9 +441,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
 void fini_cc_regs(struct ssi_drvdata *drvdata)
 {
 	/* Mask all interrupts */
-	WRITE_REGISTER(drvdata->cc_base + 
+	WRITE_REGISTER(drvdata->cc_base +
 		       CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF);
-
 }
 
 static void cleanup_cc_resources(struct platform_device *plat_dev)
@@ -442,9 +450,9 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
 	struct ssi_drvdata *drvdata =
 		(struct ssi_drvdata *)dev_get_drvdata(&plat_dev->dev);
 
-        ssi_aead_free(drvdata);
-        ssi_hash_free(drvdata);
-        ssi_ablkcipher_free(drvdata);
+	ssi_aead_free(drvdata);
+	ssi_hash_free(drvdata);
+	ssi_ablkcipher_free(drvdata);
 	ssi_ivgen_fini(drvdata);
 	ssi_power_mgr_fini(drvdata);
 	ssi_buffer_mgr_fini(drvdata);
@@ -455,15 +463,12 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
 	ssi_sysfs_fini();
 #endif
 
-	/* Mask all interrupts */
-	WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_IMR),
-		0xFFFFFFFF);
+	fini_cc_regs(drvdata);
+	cc_clk_off(drvdata);
 	free_irq(drvdata->res_irq->start, drvdata);
 	drvdata->res_irq = NULL;
 
-	fini_cc_regs(drvdata);
-
-	if (drvdata->cc_base != NULL) {
+	if (drvdata->cc_base) {
 		iounmap(drvdata->cc_base);
 		release_mem_region(drvdata->res_mem->start,
 			resource_size(drvdata->res_mem));
@@ -475,11 +480,38 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
 	dev_set_drvdata(&plat_dev->dev, NULL);
 }
 
+int cc_clk_on(struct ssi_drvdata *drvdata)
+{
+	struct clk *clk = drvdata->clk;
+	int rc;
+
+	if (IS_ERR(clk))
+		/* Not all devices have a clock associated with CCREE  */
+		return 0;
+
+	rc = clk_prepare_enable(clk);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+void cc_clk_off(struct ssi_drvdata *drvdata)
+{
+	struct clk *clk = drvdata->clk;
+
+	if (IS_ERR(clk))
+		/* Not all devices have a clock associated with CCREE */
+		return;
+
+	clk_disable_unprepare(clk);
+}
+
 static int cc7x_probe(struct platform_device *plat_dev)
 {
 	int rc;
 #if defined(CONFIG_ARM) && defined(CC_DEBUG)
-	uint32_t ctr, cacheline_size;
+	u32 ctr, cacheline_size;
 
 	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
 	cacheline_size =  4 << ((ctr >> 16) & 0xf);
@@ -489,7 +521,7 @@ static int cc7x_probe(struct platform_device *plat_dev)
 	asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
 	SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X,"
 		     " Part 0x%03X, Rev r%dp%d\n",
-		(ctr>>24), (ctr>>16)&0xF, (ctr>>4)&0xFFF, (ctr>>20)&0xF, ctr&0xF);
+		(ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF, (ctr >> 20) & 0xF, ctr & 0xF);
 #endif
 
 	/* Map registers space */
@@ -505,29 +537,26 @@ static int cc7x_probe(struct platform_device *plat_dev)
 static int cc7x_remove(struct platform_device *plat_dev)
 {
 	SSI_LOG_DEBUG("Releasing cc7x resources...\n");
-	
+
 	cleanup_cc_resources(plat_dev);
 
 	SSI_LOG(KERN_INFO, "ARM cc7x_ree device terminated\n");
-#ifdef ENABLE_CYCLE_COUNT
-	display_all_stat_db();
-#endif
-	
+
 	return 0;
 }
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 static struct dev_pm_ops arm_cc7x_driver_pm = {
 	SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
 };
 #endif
 
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 #define	DX_DRIVER_RUNTIME_PM	(&arm_cc7x_driver_pm)
 #else
 #define	DX_DRIVER_RUNTIME_PM	NULL
 #endif
 
-
 #ifdef CONFIG_OF
 static const struct of_device_id arm_cc7x_dev_of_match[] = {
 	{.compatible = "arm,cryptocell-712-ree"},
diff --git a/drivers/staging/ccree/ssi_driver.h b/drivers/staging/ccree/ssi_driver.h
index 891958b..c1ed61f 100644
--- a/drivers/staging/ccree/ssi_driver.h
+++ b/drivers/staging/ccree/ssi_driver.h
@@ -1,21 +1,21 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* \file ssi_driver.h
-   ARM CryptoCell Linux Crypto Driver
+ * ARM CryptoCell Linux Crypto Driver
  */
 
 #ifndef __SSI_DRIVER_H__
@@ -36,29 +36,27 @@
 #include <crypto/authenc.h>
 #include <crypto/hash.h>
 #include <linux/version.h>
-
-#ifndef INT32_MAX /* Missing in Linux kernel */
-#define INT32_MAX 0x7FFFFFFFL
-#endif
+#include <linux/clk.h>
 
 /* Registers definitions from shared/hw/ree_include */
 #include "dx_reg_base_host.h"
 #include "dx_host.h"
-#define DX_CC_HOST_VIRT /* must be defined before including dx_cc_regs.h */
-#include "cc_hw_queue_defs.h"
 #include "cc_regs.h"
 #include "dx_reg_common.h"
 #include "cc_hal.h"
-#include "ssi_sram_mgr.h"
 #define CC_SUPPORT_SHA DX_DEV_SHA_MAX
 #include "cc_crypto_ctx.h"
 #include "ssi_sysfs.h"
 #include "hash_defs.h"
 #include "ssi_fips_local.h"
+#include "cc_hw_queue_defs.h"
+#include "ssi_sram_mgr.h"
 
 #define DRV_MODULE_VERSION "3.0"
 
 #define SSI_DEV_NAME_STR "cc715ree"
+#define CC_COHERENT_CACHE_PARAMS 0xEEE
+
 #define SSI_CC_HAS_AES_CCM 1
 #define SSI_CC_HAS_AES_GCM 1
 #define SSI_CC_HAS_AES_XTS 1
@@ -89,12 +87,13 @@
 /* Definitions for HW descriptors DIN/DOUT fields */
 #define NS_BIT 1
 #define AXI_ID 0
-/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID 
-   field in the HW descriptor. The DMA engine +8 that value. */
+/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
+ * field in the HW descriptor. The DMA engine +8 that value.
+ */
 
 /* Logging macros */
 #define SSI_LOG(level, format, ...) \
-	printk(level "cc715ree::%s: " format , __func__, ##__VA_ARGS__)
+	printk(level "cc715ree::%s: " format, __func__, ##__VA_ARGS__)
 #define SSI_LOG_ERR(format, ...) SSI_LOG(KERN_ERR, format, ##__VA_ARGS__)
 #define SSI_LOG_WARNING(format, ...) SSI_LOG(KERN_WARNING, format, ##__VA_ARGS__)
 #define SSI_LOG_NOTICE(format, ...) SSI_LOG(KERN_NOTICE, format, ##__VA_ARGS__)
@@ -108,21 +107,18 @@
 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
 
-#define SSI_MAX_IVGEN_DMA_ADDRESSES 	3
+#define SSI_MAX_IVGEN_DMA_ADDRESSES	3
 struct ssi_crypto_req {
 	void (*user_cb)(struct device *dev, void *req, void __iomem *cc_base);
 	void *user_arg;
-	dma_addr_t ivgen_dma_addr[SSI_MAX_IVGEN_DMA_ADDRESSES]; /* For the first 'ivgen_dma_addr_len' addresses of this array,
-					 generated IV would be placed in it by send_request().
-					 Same generated IV for all addresses! */
+	dma_addr_t ivgen_dma_addr[SSI_MAX_IVGEN_DMA_ADDRESSES];
+	/* For the first 'ivgen_dma_addr_len' addresses of this array,
+	 * generated IV would be placed in it by send_request().
+	 * Same generated IV for all addresses!
+	 */
 	unsigned int ivgen_dma_addr_len; /* Amount of 'ivgen_dma_addr' elements to be filled. */
 	unsigned int ivgen_size; /* The generated IV size required, 8/16 B allowed. */
 	struct completion seq_compl; /* request completion */
-#ifdef ENABLE_CYCLE_COUNT
-	enum stat_op op_type;
-	cycles_t submit_cycle;
-	bool is_monitored_p;
-#endif
 };
 
 /**
@@ -136,15 +132,13 @@ struct ssi_drvdata {
 	struct resource *res_mem;
 	struct resource *res_irq;
 	void __iomem *cc_base;
-#ifdef DX_BASE_ENV_REGS
-	void __iomem *env_base; /* ARM CryptoCell development FPGAs only */
-#endif
 	unsigned int irq;
-	uint32_t irq_mask;
-	uint32_t fw_ver;
+	u32 irq_mask;
+	u32 fw_ver;
 	/* Calibration time of start/stop
-	*  monitor descriptors */
-	uint32_t monitor_null_cycles;
+	 * monitor descriptors
+	 */
+	u32 monitor_null_cycles;
 	struct platform_device *plat_dev;
 	ssi_sram_addr_t mlli_sram_addr;
 	struct completion icache_setup_completion;
@@ -156,12 +150,9 @@ struct ssi_drvdata {
 	void *fips_handle;
 	void *ivgen_handle;
 	void *sram_mgr_handle;
-
-#ifdef ENABLE_CYCLE_COUNT
-	cycles_t isr_exit_cycles; /* Save for isr-to-tasklet latency */
-#endif
-	uint32_t inflight_counter;
-
+	u32 inflight_counter;
+	struct clk *clk;
+	bool coherent;
 };
 
 struct ssi_crypto_alg {
@@ -189,7 +180,6 @@ struct ssi_alg_template {
 	int cipher_mode;
 	int flow_mode; /* Note: currently, refers to the cipher mode only. */
 	int auth_mode;
-	bool synchronous;
 	struct ssi_drvdata *drvdata;
 };
 
@@ -199,30 +189,16 @@ struct async_gen_req_ctx {
 };
 
 #ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long size);
+void dump_byte_array(const char *name, const u8 *the_array, unsigned long size);
 #else
 #define dump_byte_array(name, array, size) do {	\
 } while (0);
 #endif
 
-#ifdef ENABLE_CYCLE_COUNT
-#define DECL_CYCLE_COUNT_RESOURCES cycles_t _last_cycles_read
-#define START_CYCLE_COUNT() do { _last_cycles_read = get_cycles(); } while (0)
-#define END_CYCLE_COUNT(_stat_op_type, _stat_phase) update_host_stat(_stat_op_type, _stat_phase, get_cycles() - _last_cycles_read)
-#define GET_START_CYCLE_COUNT() _last_cycles_read
-#define START_CYCLE_COUNT_AT(_var) do { _var = get_cycles(); } while(0)
-#define END_CYCLE_COUNT_AT(_var, _stat_op_type, _stat_phase) update_host_stat(_stat_op_type, _stat_phase, get_cycles() - _var)
-#else
-#define DECL_CYCLE_COUNT_RESOURCES 
-#define START_CYCLE_COUNT() do { } while (0)
-#define END_CYCLE_COUNT(_stat_op_type, _stat_phase) do { } while (0)
-#define GET_START_CYCLE_COUNT() 0
-#define START_CYCLE_COUNT_AT(_var) do { } while (0)
-#define END_CYCLE_COUNT_AT(_var, _stat_op_type, _stat_phase) do { } while (0)
-#endif /*ENABLE_CYCLE_COUNT*/
-
 int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe);
 void fini_cc_regs(struct ssi_drvdata *drvdata);
+int cc_clk_on(struct ssi_drvdata *drvdata);
+void cc_clk_off(struct ssi_drvdata *drvdata);
 
 #endif /*__SSI_DRIVER_H__*/
 
diff --git a/drivers/staging/ccree/ssi_fips.c b/drivers/staging/ccree/ssi_fips.c
index 50f7485..fdc40f3 100644
--- a/drivers/staging/ccree/ssi_fips.c
+++ b/drivers/staging/ccree/ssi_fips.c
@@ -1,42 +1,39 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-
 /**************************************************************
-This file defines the driver FIPS APIs                                                             *
-***************************************************************/
+ * This file defines the driver FIPS APIs                     *
+ **************************************************************/
 
 #include <linux/module.h>
 #include "ssi_fips.h"
 
-
-extern int ssi_fips_ext_get_state(ssi_fips_state_t *p_state);
-extern int ssi_fips_ext_get_error(ssi_fips_error_t *p_err);
+extern int ssi_fips_ext_get_state(enum cc_fips_state_t *p_state);
+extern int ssi_fips_ext_get_error(enum cc_fips_error *p_err);
 
 /*
-This function returns the REE FIPS state.  
-It should be called by kernel module. 
-*/
-int ssi_fips_get_state(ssi_fips_state_t *p_state)
+ * This function returns the REE FIPS state.
+ * It should be called by kernel module.
+ */
+int ssi_fips_get_state(enum cc_fips_state_t *p_state)
 {
-        int rc = 0;
+	int rc = 0;
 
-	if (p_state == NULL) {
+	if (!p_state)
 		return -EINVAL;
-	}
 
 	rc = ssi_fips_ext_get_state(p_state);
 
@@ -46,16 +43,15 @@ int ssi_fips_get_state(ssi_fips_state_t *p_state)
 EXPORT_SYMBOL(ssi_fips_get_state);
 
 /*
-This function returns the REE FIPS error.  
-It should be called by kernel module. 
-*/
-int ssi_fips_get_error(ssi_fips_error_t *p_err)
+ * This function returns the REE FIPS error.
+ * It should be called by kernel module.
+ */
+int ssi_fips_get_error(enum cc_fips_error *p_err)
 {
-        int rc = 0;
+	int rc = 0;
 
-	if (p_err == NULL) {
+	if (!p_err)
 		return -EINVAL;
-	}
 
 	rc = ssi_fips_ext_get_error(p_err);
 
diff --git a/drivers/staging/ccree/ssi_fips.h b/drivers/staging/ccree/ssi_fips.h
index 19bcdeb..4f5c6a9 100644
--- a/drivers/staging/ccree/ssi_fips.h
+++ b/drivers/staging/ccree/ssi_fips.h
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -17,26 +17,19 @@
 #ifndef __SSI_FIPS_H__
 #define __SSI_FIPS_H__
 
+/*!
+ * @file
+ * @brief This file contains FIPS related defintions and APIs.
+ */
 
-#ifndef INT32_MAX /* Missing in Linux kernel */
-#define INT32_MAX 0x7FFFFFFFL
-#endif
+enum cc_fips_state {
+	CC_FIPS_STATE_NOT_SUPPORTED = 0,
+	CC_FIPS_STATE_SUPPORTED,
+	CC_FIPS_STATE_ERROR,
+	CC_FIPS_STATE_RESERVE32B = S32_MAX
+};
 
-
-/*! 
-@file
-@brief This file contains FIPS related defintions and APIs.
-*/
-
-typedef enum ssi_fips_state {
-        CC_FIPS_STATE_NOT_SUPPORTED = 0,
-        CC_FIPS_STATE_SUPPORTED,
-        CC_FIPS_STATE_ERROR,
-        CC_FIPS_STATE_RESERVE32B = INT32_MAX
-} ssi_fips_state_t;
-
-
-typedef enum ssi_fips_error {
+enum cc_fips_error {
 	CC_REE_FIPS_ERROR_OK = 0,
 	CC_REE_FIPS_ERROR_GENERAL,
 	CC_REE_FIPS_ERROR_FROM_TEE,
@@ -58,13 +51,11 @@ typedef enum ssi_fips_error {
 	CC_REE_FIPS_ERROR_HMAC_SHA256_PUT,
 	CC_REE_FIPS_ERROR_HMAC_SHA512_PUT,
 	CC_REE_FIPS_ERROR_ROM_CHECKSUM,
-	CC_REE_FIPS_ERROR_RESERVE32B = INT32_MAX
-} ssi_fips_error_t;
+	CC_REE_FIPS_ERROR_RESERVE32B = S32_MAX
+};
 
-
-
-int ssi_fips_get_state(ssi_fips_state_t *p_state);
-int ssi_fips_get_error(ssi_fips_error_t *p_err);
+int ssi_fips_get_state(enum cc_fips_state *p_state);
+int ssi_fips_get_error(enum cc_fips_error *p_err);
 
 #endif  /*__SSI_FIPS_H__*/
 
diff --git a/drivers/staging/ccree/ssi_fips_data.h b/drivers/staging/ccree/ssi_fips_data.h
index 3fddd8f..c41671d 100644
--- a/drivers/staging/ccree/ssi_fips_data.h
+++ b/drivers/staging/ccree/ssi_fips_data.h
@@ -1,67 +1,66 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /*
-The test vectors were taken from:
-
-* AES
-NIST Special Publication 800-38A 2001 Edition
-Recommendation for Block Cipher Modes of Operation
-http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
-Appendix F: Example Vectors for Modes of Operation of the AES
-
-* AES CTS
-Advanced Encryption Standard (AES) Encryption for Kerberos 5
-February 2005
-https://tools.ietf.org/html/rfc3962#appendix-B
-B.  Sample Test Vectors
-
-* AES XTS
-http://csrc.nist.gov/groups/STM/cavp/#08
-http://csrc.nist.gov/groups/STM/cavp/documents/aes/XTSTestVectors.zip
-
-* AES CMAC
-http://csrc.nist.gov/groups/STM/cavp/index.html#07
-http://csrc.nist.gov/groups/STM/cavp/documents/mac/cmactestvectors.zip
- 
-* AES-CCM
-http://csrc.nist.gov/groups/STM/cavp/#07
-http://csrc.nist.gov/groups/STM/cavp/documents/mac/ccmtestvectors.zip
-
-* AES-GCM
-http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
-
-* Triple-DES
-NIST Special Publication 800-67 January 2012
-Recommendation for the Triple Data Encryption Algorithm (TDEA) Block Cipher
-http://csrc.nist.gov/publications/nistpubs/800-67-Rev1/SP-800-67-Rev1.pdf
-APPENDIX B: EXAMPLE OF TDEA FORWARD AND INVERSE CIPHER OPERATIONS
-and
-http://csrc.nist.gov/groups/STM/cavp/#01
-http://csrc.nist.gov/groups/STM/cavp/documents/des/tdesmct_intermediate.zip
-
-* HASH
-http://csrc.nist.gov/groups/STM/cavp/#03
-http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip 
- 
-* HMAC 
-http://csrc.nist.gov/groups/STM/cavp/#07
-http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip 
- 
-*/
+ * The test vectors were taken from:
+ *
+ * * AES
+ * NIST Special Publication 800-38A 2001 Edition
+ * Recommendation for Block Cipher Modes of Operation
+ * http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
+ * Appendix F: Example Vectors for Modes of Operation of the AES
+ *
+ * * AES CTS
+ * Advanced Encryption Standard (AES) Encryption for Kerberos 5
+ * February 2005
+ * https://tools.ietf.org/html/rfc3962#appendix-B
+ * B.  Sample Test Vectors
+ *
+ * * AES XTS
+ * http://csrc.nist.gov/groups/STM/cavp/#08
+ * http://csrc.nist.gov/groups/STM/cavp/documents/aes/XTSTestVectors.zip
+ *
+ * * AES CMAC
+ * http://csrc.nist.gov/groups/STM/cavp/index.html#07
+ * http://csrc.nist.gov/groups/STM/cavp/documents/mac/cmactestvectors.zip
+ *
+ * * AES-CCM
+ * http://csrc.nist.gov/groups/STM/cavp/#07
+ * http://csrc.nist.gov/groups/STM/cavp/documents/mac/ccmtestvectors.zip
+ *
+ * * AES-GCM
+ * http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
+ *
+ * * Triple-DES
+ * NIST Special Publication 800-67 January 2012
+ * Recommendation for the Triple Data Encryption Algorithm (TDEA) Block Cipher
+ * http://csrc.nist.gov/publications/nistpubs/800-67-Rev1/SP-800-67-Rev1.pdf
+ * APPENDIX B: EXAMPLE OF TDEA FORWARD AND INVERSE CIPHER OPERATIONS
+ * and
+ * http://csrc.nist.gov/groups/STM/cavp/#01
+ * http://csrc.nist.gov/groups/STM/cavp/documents/des/tdesmct_intermediate.zip
+ *
+ * * HASH
+ * http://csrc.nist.gov/groups/STM/cavp/#03
+ * http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip
+ *
+ * * HMAC
+ * http://csrc.nist.gov/groups/STM/cavp/#07
+ * http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip
+ */
 
 /* NIST AES */
 #define AES_128_BIT_KEY_SIZE    16
@@ -86,19 +85,18 @@ and
 
 #define NIST_AES_CBC_IV         { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
 #define NIST_AES_128_CBC_CIPHER { 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46, 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d }
-#define NIST_AES_192_CBC_CIPHER { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8 } 
-#define NIST_AES_256_CBC_CIPHER { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6 } 
+#define NIST_AES_192_CBC_CIPHER { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8 }
+#define NIST_AES_256_CBC_CIPHER { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6 }
 
 #define NIST_AES_OFB_IV         { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
 #define NIST_AES_128_OFB_CIPHER { 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20, 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a }
-#define NIST_AES_192_OFB_CIPHER { 0xcd, 0xc8, 0x0d, 0x6f, 0xdd, 0xf1, 0x8c, 0xab, 0x34, 0xc2, 0x59, 0x09, 0xc9, 0x9a, 0x41, 0x74 } 
+#define NIST_AES_192_OFB_CIPHER { 0xcd, 0xc8, 0x0d, 0x6f, 0xdd, 0xf1, 0x8c, 0xab, 0x34, 0xc2, 0x59, 0x09, 0xc9, 0x9a, 0x41, 0x74 }
 #define NIST_AES_256_OFB_CIPHER { 0xdc, 0x7e, 0x84, 0xbf, 0xda, 0x79, 0x16, 0x4b, 0x7e, 0xcd, 0x84, 0x86, 0x98, 0x5d, 0x38, 0x60 }
 
 #define NIST_AES_CTR_IV         { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }
 #define NIST_AES_128_CTR_CIPHER { 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26, 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce }
-#define NIST_AES_192_CTR_CIPHER { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2, 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b } 
-#define NIST_AES_256_CTR_CIPHER { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5, 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28 } 
-
+#define NIST_AES_192_CTR_CIPHER { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2, 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b }
+#define NIST_AES_256_CTR_CIPHER { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5, 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28 }
 
 #define RFC3962_AES_128_KEY            { 0x63, 0x68, 0x69, 0x63, 0x6b, 0x65, 0x6e, 0x20, 0x74, 0x65, 0x72, 0x69, 0x79, 0x61, 0x6b, 0x69 }
 #define RFC3962_AES_VECTOR_SIZE        17
@@ -106,13 +104,12 @@ and
 #define RFC3962_AES_CBC_CTS_IV         { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
 #define RFC3962_AES_128_CBC_CTS_CIPHER { 0xc6, 0x35, 0x35, 0x68, 0xf2, 0xbf, 0x8c, 0xb4, 0xd8, 0xa5, 0x80, 0x36, 0x2d, 0xa7, 0xff, 0x7f, 0x97 }
 
-
 #define NIST_AES_256_XTS_KEY            { 0xa1, 0xb9, 0x0c, 0xba, 0x3f, 0x06, 0xac, 0x35, 0x3b, 0x2c, 0x34, 0x38, 0x76, 0x08, 0x17, 0x62, \
 					  0x09, 0x09, 0x23, 0x02, 0x6e, 0x91, 0x77, 0x18, 0x15, 0xf2, 0x9d, 0xab, 0x01, 0x93, 0x2f, 0x2f }
 #define NIST_AES_256_XTS_IV             { 0x4f, 0xae, 0xf7, 0x11, 0x7c, 0xda, 0x59, 0xc6, 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 }
 #define NIST_AES_256_XTS_VECTOR_SIZE    16
-#define NIST_AES_256_XTS_PLAIN          { 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c } 
-#define NIST_AES_256_XTS_CIPHER         { 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a, 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63 } 
+#define NIST_AES_256_XTS_PLAIN          { 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c }
+#define NIST_AES_256_XTS_CIPHER         { 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a, 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63 }
 
 #define NIST_AES_512_XTS_KEY            { 0x1e, 0xa6, 0x61, 0xc5, 0x8d, 0x94, 0x3a, 0x0e, 0x48, 0x01, 0xe4, 0x2f, 0x4b, 0x09, 0x47, 0x14, \
 					  0x9e, 0x7f, 0x9f, 0x8e, 0x3e, 0x68, 0xd0, 0xc7, 0x50, 0x52, 0x10, 0xbd, 0x31, 0x1a, 0x0e, 0x7c, \
@@ -121,10 +118,9 @@ and
 #define NIST_AES_512_XTS_IV             { 0xad, 0xf8, 0xd9, 0x26, 0x27, 0x46, 0x4a, 0xd2, 0xf0, 0x42, 0x8e, 0x84, 0xa9, 0xf8, 0x75, 0x64,  }
 #define NIST_AES_512_XTS_VECTOR_SIZE    32
 #define NIST_AES_512_XTS_PLAIN          { 0x2e, 0xed, 0xea, 0x52, 0xcd, 0x82, 0x15, 0xe1, 0xac, 0xc6, 0x47, 0xe8, 0x10, 0xbb, 0xc3, 0x64, \
-					  0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3, 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e } 
+					  0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3, 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e }
 #define NIST_AES_512_XTS_CIPHER         { 0xcb, 0xaa, 0xd0, 0xe2, 0xf6, 0xce, 0xa3, 0xf5, 0x0b, 0x37, 0xf9, 0x34, 0xd4, 0x6a, 0x9b, 0x13, \
-					  0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb } 
-
+					  0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb }
 
 /* NIST AES-CMAC */
 #define NIST_AES_128_CMAC_KEY           { 0x67, 0x08, 0xc9, 0x88, 0x7b, 0x84, 0x70, 0x84, 0xf1, 0x23, 0xd3, 0xdd, 0x9c, 0x3a, 0x81, 0x36 }
@@ -148,27 +144,25 @@ and
 #define NIST_AES_256_CMAC_VECTOR_SIZE   16
 #define NIST_AES_256_CMAC_OUTPUT_SIZE   10
 
-
 /* NIST TDES */
 #define TDES_NUM_OF_KEYS                3
 #define NIST_TDES_VECTOR_SIZE           8
 #define NIST_TDES_IV_SIZE               8
 
-#define NIST_TDES_ECB_IV             	{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define NIST_TDES_ECB_IV		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
 
 #define NIST_TDES_ECB3_KEY		{ 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, \
 					  0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, \
 					  0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23 }
-#define NIST_TDES_ECB3_PLAIN_DATA    	{ 0x54, 0x68, 0x65, 0x20, 0x71, 0x75, 0x66, 0x63 }
-#define NIST_TDES_ECB3_CIPHER        	{ 0xa8, 0x26, 0xfd, 0x8c, 0xe5, 0x3b, 0x85, 0x5f }
+#define NIST_TDES_ECB3_PLAIN_DATA	{ 0x54, 0x68, 0x65, 0x20, 0x71, 0x75, 0x66, 0x63 }
+#define NIST_TDES_ECB3_CIPHER		{ 0xa8, 0x26, 0xfd, 0x8c, 0xe5, 0x3b, 0x85, 0x5f }
 
-#define NIST_TDES_CBC3_IV            	{ 0xf8, 0xee, 0xe1, 0x35, 0x9c, 0x6e, 0x54, 0x40 }
+#define NIST_TDES_CBC3_IV		{ 0xf8, 0xee, 0xe1, 0x35, 0x9c, 0x6e, 0x54, 0x40 }
 #define NIST_TDES_CBC3_KEY		{ 0xe9, 0xda, 0x37, 0xf8, 0xdc, 0x97, 0x6d, 0x5b, \
 					  0xb6, 0x8c, 0x04, 0xe3, 0xec, 0x98, 0x20, 0x15, \
 					  0xf4, 0x0e, 0x08, 0xb5, 0x97, 0x29, 0xf2, 0x8f }
-#define NIST_TDES_CBC3_PLAIN_DATA    	{ 0x3b, 0xb7, 0xa7, 0xdb, 0xa3, 0xd5, 0x92, 0x91 }
-#define NIST_TDES_CBC3_CIPHER        	{ 0x5b, 0x84, 0x24, 0xd2, 0x39, 0x3e, 0x55, 0xa2 }
-
+#define NIST_TDES_CBC3_PLAIN_DATA	{ 0x3b, 0xb7, 0xa7, 0xdb, 0xa3, 0xd5, 0x92, 0x91 }
+#define NIST_TDES_CBC3_CIPHER		{ 0x5b, 0x84, 0x24, 0xd2, 0x39, 0x3e, 0x55, 0xa2 }
 
 /* NIST AES-CCM */
 #define NIST_AESCCM_128_BIT_KEY_SIZE    16
@@ -208,7 +202,6 @@ and
 #define NIST_AESCCM_256_CIPHER          { 0xcc, 0x17, 0xbf, 0x87, 0x94, 0xc8, 0x43, 0x45, 0x7d, 0x89, 0x93, 0x91, 0x89, 0x8e, 0xd2, 0x2a }
 #define NIST_AESCCM_256_MAC             { 0x6f, 0x9d, 0x28, 0xfc, 0xb6, 0x42, 0x34, 0xe1, 0xcd, 0x79, 0x3c, 0x41, 0x44, 0xf1, 0xda, 0x50 }
 
-
 /* NIST AES-GCM */
 #define NIST_AESGCM_128_BIT_KEY_SIZE    16
 #define NIST_AESGCM_192_BIT_KEY_SIZE    24
@@ -242,7 +235,6 @@ and
 #define NIST_AESGCM_256_CIPHER          { 0x42, 0x6e, 0x0e, 0xfc, 0x69, 0x3b, 0x7b, 0xe1, 0xf3, 0x01, 0x8d, 0xb7, 0xdd, 0xbb, 0x7e, 0x4d }
 #define NIST_AESGCM_256_MAC             { 0xee, 0x82, 0x57, 0x79, 0x5b, 0xe6, 0xa1, 0x16, 0x4d, 0x7e, 0x1d, 0x2d, 0x6c, 0xac, 0x77, 0xa7 }
 
-
 /* NIST HASH */
 #define NIST_SHA_MSG_SIZE               16
 
@@ -260,7 +252,6 @@ and
 					  0x8f, 0x2b, 0xa9, 0x1c, 0x3a, 0x9f, 0x0c, 0x16, 0x53, 0xc4, 0xbf, 0x0a, 0xda, 0x35, 0x64, 0x55, \
 					  0xea, 0x36, 0xfd, 0x31, 0xf8, 0xe7, 0x3e, 0x39, 0x51, 0xca, 0xd4, 0xeb, 0xba, 0x8c, 0x6e, 0x04 }
 
-
 /* NIST HMAC */
 #define NIST_HMAC_MSG_SIZE              128
 
diff --git a/drivers/staging/ccree/ssi_fips_ext.c b/drivers/staging/ccree/ssi_fips_ext.c
index 2ac432f..e7bf184 100644
--- a/drivers/staging/ccree/ssi_fips_ext.c
+++ b/drivers/staging/ccree/ssi_fips_ext.c
@@ -1,49 +1,47 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /**************************************************************
-This file defines the driver FIPS functions that should be 
-implemented by the driver user. Current implementation is sample code only.
-***************************************************************/
+ * This file defines the driver FIPS functions that should be
+ * implemented by the driver user. Current implementation is sample code only.
+ ***************************************************************/
 
 #include <linux/module.h>
 #include "ssi_fips_local.h"
 #include "ssi_driver.h"
 
-
 static bool tee_error;
 module_param(tee_error, bool, 0644);
 MODULE_PARM_DESC(tee_error, "Simulate TEE library failure flag: 0 - no error (default), 1 - TEE error occured ");
 
-static ssi_fips_state_t fips_state = CC_FIPS_STATE_NOT_SUPPORTED;
-static ssi_fips_error_t fips_error = CC_REE_FIPS_ERROR_OK;
+static enum cc_fips_state_t fips_state = CC_FIPS_STATE_NOT_SUPPORTED;
+static enum cc_fips_error fips_error = CC_REE_FIPS_ERROR_OK;
 
 /*
-This function returns the FIPS REE state. 
-The function should be implemented by the driver user, depends on where                          .
-the state value is stored. 
-The reference code uses global variable. 
-*/
-int ssi_fips_ext_get_state(ssi_fips_state_t *p_state)
+ * This function returns the FIPS REE state.
+ * The function should be implemented by the driver user, depends on where
+ * the state value is stored.
+ * The reference code uses global variable.
+ */
+int ssi_fips_ext_get_state(enum cc_fips_state_t *p_state)
 {
-        int rc = 0;
+	int rc = 0;
 
-	if (p_state == NULL) {
+	if (!p_state)
 		return -EINVAL;
-	}
 
 	*p_state = fips_state;
 
@@ -51,18 +49,17 @@ int ssi_fips_ext_get_state(ssi_fips_state_t *p_state)
 }
 
 /*
-This function returns the FIPS REE error. 
-The function should be implemented by the driver user, depends on where                          .
-the error value is stored. 
-The reference code uses global variable. 
-*/
-int ssi_fips_ext_get_error(ssi_fips_error_t *p_err)
+ * This function returns the FIPS REE error.
+ * The function should be implemented by the driver user, depends on where
+ * the error value is stored.
+ * The reference code uses global variable.
+ */
+int ssi_fips_ext_get_error(enum cc_fips_error *p_err)
 {
-        int rc = 0;
+	int rc = 0;
 
-	if (p_err == NULL) {
+	if (!p_err)
 		return -EINVAL;
-	}
 
 	*p_err = fips_error;
 
@@ -70,27 +67,26 @@ int ssi_fips_ext_get_error(ssi_fips_error_t *p_err)
 }
 
 /*
-This function sets the FIPS REE state. 
-The function should be implemented by the driver user, depends on where                          .
-the state value is stored. 
-The reference code uses global variable. 
-*/
-int ssi_fips_ext_set_state(ssi_fips_state_t state)
+ * This function sets the FIPS REE state.
+ * The function should be implemented by the driver user, depends on where
+ * the state value is stored.
+ * The reference code uses global variable.
+ */
+int ssi_fips_ext_set_state(enum cc_fips_state_t state)
 {
 	fips_state = state;
 	return 0;
 }
 
 /*
-This function sets the FIPS REE error. 
-The function should be implemented by the driver user, depends on where                          .
-the error value is stored. 
-The reference code uses global variable. 
-*/
-int ssi_fips_ext_set_error(ssi_fips_error_t err)
+ * This function sets the FIPS REE error.
+ * The function should be implemented by the driver user, depends on where
+ * the error value is stored.
+ * The reference code uses global variable.
+ */
+int ssi_fips_ext_set_error(enum cc_fips_error err)
 {
 	fips_error = err;
 	return 0;
 }
 
-
diff --git a/drivers/staging/ccree/ssi_fips_ll.c b/drivers/staging/ccree/ssi_fips_ll.c
index d573574..3557e20 100644
--- a/drivers/staging/ccree/ssi_fips_ll.c
+++ b/drivers/staging/ccree/ssi_fips_ll.c
@@ -1,23 +1,23 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /**************************************************************
-This file defines the driver FIPS Low Level implmentaion functions,
-that executes the KAT.
-***************************************************************/
+ * This file defines the driver FIPS Low Level implmentaion functions,
+ * that executes the KAT.
+ ***************************************************************/
 #include <linux/kernel.h>
 
 #include "ssi_driver.h"
@@ -27,151 +27,143 @@ that executes the KAT.
 #include "ssi_hash.h"
 #include "ssi_request_mgr.h"
 
-
-static const uint32_t digest_len_init[] = {
+static const u32 digest_len_init[] = {
 	0x00000040, 0x00000000, 0x00000000, 0x00000000 };
-static const uint32_t sha1_init[] = { 
+static const u32 sha1_init[] = {
 	SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const uint32_t sha256_init[] = {
+static const u32 sha256_init[] = {
 	SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
 	SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
 #if (CC_SUPPORT_SHA > 256)
-static const uint32_t digest_len_sha512_init[] = { 
+static const u32 digest_len_sha512_init[] = {
 	0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static const uint64_t sha512_init[] = {
+static const u64 sha512_init[] = {
 	SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
 	SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
 #endif
 
-
 #define NIST_CIPHER_AES_MAX_VECTOR_SIZE      32
 
 struct fips_cipher_ctx {
-	uint8_t iv[CC_AES_IV_SIZE];
-	uint8_t key[AES_512_BIT_KEY_SIZE];
-	uint8_t din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
-	uint8_t dout[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+	u8 iv[CC_AES_IV_SIZE];
+	u8 key[AES_512_BIT_KEY_SIZE];
+	u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+	u8 dout[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
 };
 
 typedef struct _FipsCipherData {
-	uint8_t                   isAes;
-	uint8_t                   key[AES_512_BIT_KEY_SIZE];
+	u8                   isAes;
+	u8                   key[AES_512_BIT_KEY_SIZE];
 	size_t                    keySize;
-	uint8_t                   iv[CC_AES_IV_SIZE];
+	u8                   iv[CC_AES_IV_SIZE];
 	enum drv_crypto_direction direction;
 	enum drv_cipher_mode      oprMode;
-	uint8_t                   dataIn[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
-	uint8_t                   dataOut[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+	u8                   dataIn[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+	u8                   dataOut[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
 	size_t                    dataInSize;
 } FipsCipherData;
 
-
 struct fips_cmac_ctx {
-	uint8_t key[AES_256_BIT_KEY_SIZE];
-	uint8_t din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
-	uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+	u8 key[AES_256_BIT_KEY_SIZE];
+	u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+	u8 mac_res[CC_DIGEST_SIZE_MAX];
 };
 
 typedef struct _FipsCmacData {
 	enum drv_crypto_direction direction;
-	uint8_t                   key[AES_256_BIT_KEY_SIZE];
+	u8                   key[AES_256_BIT_KEY_SIZE];
 	size_t                    key_size;
-	uint8_t                   data_in[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+	u8                   data_in[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
 	size_t                    data_in_size;
-	uint8_t                   mac_res[CC_DIGEST_SIZE_MAX];
+	u8                   mac_res[CC_DIGEST_SIZE_MAX];
 	size_t                    mac_res_size;
 } FipsCmacData;
 
-
 struct fips_hash_ctx {
-	uint8_t initial_digest[CC_DIGEST_SIZE_MAX];
-	uint8_t din[NIST_SHA_MSG_SIZE];
-	uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+	u8 initial_digest[CC_DIGEST_SIZE_MAX];
+	u8 din[NIST_SHA_MSG_SIZE];
+	u8 mac_res[CC_DIGEST_SIZE_MAX];
 };
 
 typedef struct _FipsHashData {
 	enum drv_hash_mode    hash_mode;
-	uint8_t               data_in[NIST_SHA_MSG_SIZE];
+	u8               data_in[NIST_SHA_MSG_SIZE];
 	size_t                data_in_size;
-	uint8_t               mac_res[CC_DIGEST_SIZE_MAX];
+	u8               mac_res[CC_DIGEST_SIZE_MAX];
 } FipsHashData;
 
-
 /* note that the hmac key length must be equal or less than block size (block size is 64 up to sha256 and 128 for sha384/512) */
 struct fips_hmac_ctx {
-	uint8_t initial_digest[CC_DIGEST_SIZE_MAX];
-	uint8_t key[CC_HMAC_BLOCK_SIZE_MAX];
-	uint8_t k0[CC_HMAC_BLOCK_SIZE_MAX];
-	uint8_t digest_bytes_len[HASH_LEN_SIZE];
-	uint8_t tmp_digest[CC_DIGEST_SIZE_MAX];
-	uint8_t din[NIST_HMAC_MSG_SIZE];
-	uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+	u8 initial_digest[CC_DIGEST_SIZE_MAX];
+	u8 key[CC_HMAC_BLOCK_SIZE_MAX];
+	u8 k0[CC_HMAC_BLOCK_SIZE_MAX];
+	u8 digest_bytes_len[HASH_LEN_SIZE];
+	u8 tmp_digest[CC_DIGEST_SIZE_MAX];
+	u8 din[NIST_HMAC_MSG_SIZE];
+	u8 mac_res[CC_DIGEST_SIZE_MAX];
 };
 
 typedef struct _FipsHmacData {
 	enum drv_hash_mode    hash_mode;
-	uint8_t               key[CC_HMAC_BLOCK_SIZE_MAX];
+	u8               key[CC_HMAC_BLOCK_SIZE_MAX];
 	size_t                key_size;
-	uint8_t               data_in[NIST_HMAC_MSG_SIZE];
+	u8               data_in[NIST_HMAC_MSG_SIZE];
 	size_t                data_in_size;
-	uint8_t               mac_res[CC_DIGEST_SIZE_MAX];
+	u8               mac_res[CC_DIGEST_SIZE_MAX];
 } FipsHmacData;
 
-
 #define FIPS_CCM_B0_A0_ADATA_SIZE   (NIST_AESCCM_IV_SIZE + NIST_AESCCM_IV_SIZE + NIST_AESCCM_ADATA_SIZE)
 
 struct fips_ccm_ctx {
-	uint8_t b0_a0_adata[FIPS_CCM_B0_A0_ADATA_SIZE];
-	uint8_t iv[NIST_AESCCM_IV_SIZE];
-	uint8_t ctr_cnt_0[NIST_AESCCM_IV_SIZE];
-	uint8_t key[CC_AES_KEY_SIZE_MAX];
-	uint8_t din[NIST_AESCCM_TEXT_SIZE];
-	uint8_t dout[NIST_AESCCM_TEXT_SIZE];
-	uint8_t mac_res[NIST_AESCCM_TAG_SIZE];
+	u8 b0_a0_adata[FIPS_CCM_B0_A0_ADATA_SIZE];
+	u8 iv[NIST_AESCCM_IV_SIZE];
+	u8 ctr_cnt_0[NIST_AESCCM_IV_SIZE];
+	u8 key[CC_AES_KEY_SIZE_MAX];
+	u8 din[NIST_AESCCM_TEXT_SIZE];
+	u8 dout[NIST_AESCCM_TEXT_SIZE];
+	u8 mac_res[NIST_AESCCM_TAG_SIZE];
 };
 
 typedef struct _FipsCcmData {
 	enum drv_crypto_direction direction;
-	uint8_t                   key[CC_AES_KEY_SIZE_MAX];
+	u8                   key[CC_AES_KEY_SIZE_MAX];
 	size_t                    keySize;
-	uint8_t                   nonce[NIST_AESCCM_NONCE_SIZE];
-	uint8_t                   adata[NIST_AESCCM_ADATA_SIZE];
+	u8                   nonce[NIST_AESCCM_NONCE_SIZE];
+	u8                   adata[NIST_AESCCM_ADATA_SIZE];
 	size_t                    adataSize;
-	uint8_t                   dataIn[NIST_AESCCM_TEXT_SIZE];
+	u8                   dataIn[NIST_AESCCM_TEXT_SIZE];
 	size_t                    dataInSize;
-	uint8_t                   dataOut[NIST_AESCCM_TEXT_SIZE];
-	uint8_t                   tagSize;
-	uint8_t                   macResOut[NIST_AESCCM_TAG_SIZE];
+	u8                   dataOut[NIST_AESCCM_TEXT_SIZE];
+	u8                   tagSize;
+	u8                   macResOut[NIST_AESCCM_TAG_SIZE];
 } FipsCcmData;
 
-
 struct fips_gcm_ctx {
-	uint8_t adata[NIST_AESGCM_ADATA_SIZE];
-	uint8_t key[CC_AES_KEY_SIZE_MAX];
-	uint8_t hkey[CC_AES_KEY_SIZE_MAX];
-	uint8_t din[NIST_AESGCM_TEXT_SIZE];
-	uint8_t dout[NIST_AESGCM_TEXT_SIZE];
-	uint8_t mac_res[NIST_AESGCM_TAG_SIZE];
-	uint8_t len_block[AES_BLOCK_SIZE];
-	uint8_t iv_inc1[AES_BLOCK_SIZE];
-	uint8_t iv_inc2[AES_BLOCK_SIZE];
+	u8 adata[NIST_AESGCM_ADATA_SIZE];
+	u8 key[CC_AES_KEY_SIZE_MAX];
+	u8 hkey[CC_AES_KEY_SIZE_MAX];
+	u8 din[NIST_AESGCM_TEXT_SIZE];
+	u8 dout[NIST_AESGCM_TEXT_SIZE];
+	u8 mac_res[NIST_AESGCM_TAG_SIZE];
+	u8 len_block[AES_BLOCK_SIZE];
+	u8 iv_inc1[AES_BLOCK_SIZE];
+	u8 iv_inc2[AES_BLOCK_SIZE];
 };
 
 typedef struct _FipsGcmData {
 	enum drv_crypto_direction direction;
-	uint8_t                   key[CC_AES_KEY_SIZE_MAX];
+	u8                   key[CC_AES_KEY_SIZE_MAX];
 	size_t                    keySize;
-	uint8_t                   iv[NIST_AESGCM_IV_SIZE];
-	uint8_t                   adata[NIST_AESGCM_ADATA_SIZE];
+	u8                   iv[NIST_AESGCM_IV_SIZE];
+	u8                   adata[NIST_AESGCM_ADATA_SIZE];
 	size_t                    adataSize;
-	uint8_t                   dataIn[NIST_AESGCM_TEXT_SIZE];
+	u8                   dataIn[NIST_AESGCM_TEXT_SIZE];
 	size_t                    dataInSize;
-	uint8_t                   dataOut[NIST_AESGCM_TEXT_SIZE];
-	uint8_t                   tagSize;
-	uint8_t                   macResOut[NIST_AESGCM_TAG_SIZE];
+	u8                   dataOut[NIST_AESGCM_TEXT_SIZE];
+	u8                   tagSize;
+	u8                   macResOut[NIST_AESGCM_TAG_SIZE];
 } FipsGcmData;
 
-
 typedef union _fips_ctx {
 	struct fips_cipher_ctx cipher;
 	struct fips_cmac_ctx cmac;
@@ -181,7 +173,6 @@ typedef union _fips_ctx {
 	struct fips_gcm_ctx gcm;
 } fips_ctx;
 
-
 /* test data tables */
 static const FipsCipherData FipsCipherDataTable[] = {
 	/* AES */
@@ -214,8 +205,8 @@ static const FipsCipherData FipsCipherDataTable[] = {
 	{ 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE,   NIST_AES_256_XTS_IV,  DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS,     NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_VECTOR_SIZE },
 	{ 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE,   NIST_AES_256_XTS_IV,  DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS,     NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_VECTOR_SIZE },
 #if (CC_SUPPORT_SHA > 256)
-	{ 1, NIST_AES_512_XTS_KEY, 2*CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV,  DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS,     NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_VECTOR_SIZE },
-	{ 1, NIST_AES_512_XTS_KEY, 2*CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV,  DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS,     NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_VECTOR_SIZE },
+	{ 1, NIST_AES_512_XTS_KEY, 2 * CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV,  DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS,     NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_VECTOR_SIZE },
+	{ 1, NIST_AES_512_XTS_KEY, 2 * CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV,  DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS,     NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_VECTOR_SIZE },
 #endif
 	/* DES */
 	{ 0, NIST_TDES_ECB3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_TDES_ECB3_PLAIN_DATA, NIST_TDES_ECB3_CIPHER, NIST_TDES_VECTOR_SIZE },
@@ -223,6 +214,7 @@ static const FipsCipherData FipsCipherDataTable[] = {
 	{ 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_CBC3_CIPHER, NIST_TDES_VECTOR_SIZE },
 	{ 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_CIPHER, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE },
 };
+
 #define FIPS_CIPHER_NUM_OF_TESTS        (sizeof(FipsCipherDataTable) / sizeof(FipsCipherData))
 
 static const FipsCmacData FipsCmacDataTable[] = {
@@ -230,56 +222,60 @@ static const FipsCmacData FipsCmacDataTable[] = {
 	{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_192_CMAC_KEY, AES_192_BIT_KEY_SIZE, NIST_AES_192_CMAC_PLAIN_DATA, NIST_AES_192_CMAC_VECTOR_SIZE, NIST_AES_192_CMAC_MAC, NIST_AES_192_CMAC_OUTPUT_SIZE },
 	{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_256_CMAC_KEY, AES_256_BIT_KEY_SIZE, NIST_AES_256_CMAC_PLAIN_DATA, NIST_AES_256_CMAC_VECTOR_SIZE, NIST_AES_256_CMAC_MAC, NIST_AES_256_CMAC_OUTPUT_SIZE },
 };
+
 #define FIPS_CMAC_NUM_OF_TESTS        (sizeof(FipsCmacDataTable) / sizeof(FipsCmacData))
 
 static const FipsHashData FipsHashDataTable[] = {
-        { DRV_HASH_SHA1,   NIST_SHA_1_MSG,   NIST_SHA_MSG_SIZE, NIST_SHA_1_MD },
-        { DRV_HASH_SHA256, NIST_SHA_256_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_256_MD },
+	{ DRV_HASH_SHA1,   NIST_SHA_1_MSG,   NIST_SHA_MSG_SIZE, NIST_SHA_1_MD },
+	{ DRV_HASH_SHA256, NIST_SHA_256_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_256_MD },
 #if (CC_SUPPORT_SHA > 256)
 //        { DRV_HASH_SHA512, NIST_SHA_512_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_512_MD },
 #endif
 };
+
 #define FIPS_HASH_NUM_OF_TESTS        (sizeof(FipsHashDataTable) / sizeof(FipsHashData))
 
 static const FipsHmacData FipsHmacDataTable[] = {
-        { DRV_HASH_SHA1,   NIST_HMAC_SHA1_KEY,   NIST_HMAC_SHA1_KEY_SIZE,   NIST_HMAC_SHA1_MSG,   NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA1_MD },
-        { DRV_HASH_SHA256, NIST_HMAC_SHA256_KEY, NIST_HMAC_SHA256_KEY_SIZE, NIST_HMAC_SHA256_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA256_MD },
+	{ DRV_HASH_SHA1,   NIST_HMAC_SHA1_KEY,   NIST_HMAC_SHA1_KEY_SIZE,   NIST_HMAC_SHA1_MSG,   NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA1_MD },
+	{ DRV_HASH_SHA256, NIST_HMAC_SHA256_KEY, NIST_HMAC_SHA256_KEY_SIZE, NIST_HMAC_SHA256_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA256_MD },
 #if (CC_SUPPORT_SHA > 256)
 //        { DRV_HASH_SHA512, NIST_HMAC_SHA512_KEY, NIST_HMAC_SHA512_KEY_SIZE, NIST_HMAC_SHA512_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA512_MD },
 #endif
 };
+
 #define FIPS_HMAC_NUM_OF_TESTS        (sizeof(FipsHmacDataTable) / sizeof(FipsHmacData))
 
 static const FipsCcmData FipsCcmDataTable[] = {
-        { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
-        { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
-        { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
-        { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
-        { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
-        { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
+	{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
+	{ DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
+	{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
+	{ DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
+	{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
+	{ DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
 };
+
 #define FIPS_CCM_NUM_OF_TESTS        (sizeof(FipsCcmDataTable) / sizeof(FipsCcmData))
 
 static const FipsGcmData FipsGcmDataTable[] = {
-        { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
-        { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
-        { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
-        { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
-        { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
-        { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
+	{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
+	{ DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
+	{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
+	{ DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
+	{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
+	{ DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
 };
+
 #define FIPS_GCM_NUM_OF_TESTS        (sizeof(FipsGcmDataTable) / sizeof(FipsGcmData))
 
-
-static inline ssi_fips_error_t 
+static inline enum cc_fips_error
 FIPS_CipherToFipsError(enum drv_cipher_mode mode, bool is_aes)
 {
 	switch (mode)
 	{
 	case DRV_CIPHER_ECB:
-		return is_aes ? CC_REE_FIPS_ERROR_AES_ECB_PUT : CC_REE_FIPS_ERROR_DES_ECB_PUT ;
+		return is_aes ? CC_REE_FIPS_ERROR_AES_ECB_PUT : CC_REE_FIPS_ERROR_DES_ECB_PUT;
 	case DRV_CIPHER_CBC:
-		return is_aes ? CC_REE_FIPS_ERROR_AES_CBC_PUT : CC_REE_FIPS_ERROR_DES_CBC_PUT ;
+		return is_aes ? CC_REE_FIPS_ERROR_AES_CBC_PUT : CC_REE_FIPS_ERROR_DES_CBC_PUT;
 	case DRV_CIPHER_OFB:
 		return CC_REE_FIPS_ERROR_AES_OFB_PUT;
 	case DRV_CIPHER_CTR:
@@ -295,8 +291,7 @@ FIPS_CipherToFipsError(enum drv_cipher_mode mode, bool is_aes)
 	return CC_REE_FIPS_ERROR_GENERAL;
 }
 
-
-static inline int 
+static inline int
 ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
 			 bool is_aes,
 			 int cipher_mode,
@@ -314,7 +309,7 @@ ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
 
 	int rc;
 	struct ssi_crypto_req ssi_req = {0};
-	HwDesc_s desc[FIPS_CIPHER_MAX_SEQ_LEN];
+	struct cc_hw_desc desc[FIPS_CIPHER_MAX_SEQ_LEN];
 	int idx = 0;
 	int s_flow_mode = is_aes ? S_DIN_to_AES : S_DIN_to_DES;
 
@@ -325,74 +320,73 @@ ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
 	case DRV_CIPHER_CTR:
 	case DRV_CIPHER_OFB:
 		/* Load cipher state */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-				     iv_dma_addr, iv_len, NS_BIT);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
-		if ((cipher_mode == DRV_CIPHER_CTR) || 
-		    (cipher_mode == DRV_CIPHER_OFB) ) {
-			HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     iv_dma_addr, iv_len, NS_BIT);
+		set_cipher_config0(&desc[idx], direction);
+		set_flow_mode(&desc[idx], s_flow_mode);
+		set_cipher_mode(&desc[idx], cipher_mode);
+		if ((cipher_mode == DRV_CIPHER_CTR) ||
+		    (cipher_mode == DRV_CIPHER_OFB)) {
+			set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 		} else {
-			HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 		}
 		idx++;
 		/*FALLTHROUGH*/
 	case DRV_CIPHER_ECB:
 		/* Load key */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], cipher_mode);
+		set_cipher_config0(&desc[idx], direction);
 		if (is_aes) {
-			HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-					     key_dma_addr, 
-					     ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len),
-					     NS_BIT);
-			HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
+			set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+				     ((key_len == 24) ? AES_MAX_KEY_SIZE :
+				      key_len), NS_BIT);
+			set_key_size_aes(&desc[idx], key_len);
 		} else {/*des*/
-			HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-					     key_dma_addr, key_len,
-					     NS_BIT);
-			HW_DESC_SET_KEY_SIZE_DES(&desc[idx], key_len);
+			set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+				     key_len, NS_BIT);
+			set_key_size_des(&desc[idx], key_len);
 		}
-		HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+		set_flow_mode(&desc[idx], s_flow_mode);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 		idx++;
 		break;
 	case DRV_CIPHER_XTS:
 		/* Load AES key */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-				     key_dma_addr, key_len/2, NS_BIT);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len/2);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], cipher_mode);
+		set_cipher_config0(&desc[idx], direction);
+		set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, (key_len / 2),
+			     NS_BIT);
+		set_key_size_aes(&desc[idx], (key_len / 2));
+		set_flow_mode(&desc[idx], s_flow_mode);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 		idx++;
 
 		/* load XEX key */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-				     (key_dma_addr+key_len/2), key_len/2, NS_BIT);
-		HW_DESC_SET_XEX_DATA_UNIT_SIZE(&desc[idx], data_size);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len/2);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_XEX_KEY);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], cipher_mode);
+		set_cipher_config0(&desc[idx], direction);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     (key_dma_addr + (key_len / 2)),
+			     (key_len / 2), NS_BIT);
+		set_xex_data_unit_size(&desc[idx], data_size);
+		set_flow_mode(&desc[idx], s_flow_mode);
+		set_key_size_aes(&desc[idx], (key_len / 2));
+		set_setup_mode(&desc[idx], SETUP_LOAD_XEX_KEY);
 		idx++;
 
 		/* Set state */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len/2);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-				     iv_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
+		hw_desc_init(&desc[idx]);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+		set_cipher_mode(&desc[idx], cipher_mode);
+		set_cipher_config0(&desc[idx], direction);
+		set_key_size_aes(&desc[idx], (key_len / 2));
+		set_flow_mode(&desc[idx], s_flow_mode);
+		set_din_type(&desc[idx], DMA_DLLI, iv_dma_addr,
+			     CC_AES_BLOCK_SIZE, NS_BIT);
 		idx++;
 		break;
 	default:
@@ -401,10 +395,10 @@ ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
 	}
 
 	/* create data descriptor */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, din_dma_addr, data_size, NS_BIT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], dout_dma_addr, data_size, NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], is_aes ? DIN_AES_DOUT : DIN_DES_DOUT);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_size, NS_BIT);
+	set_dout_dlli(&desc[idx], dout_dma_addr, data_size, NS_BIT, 0);
+	set_flow_mode(&desc[idx], is_aes ? DIN_AES_DOUT : DIN_DES_DOUT);
 	idx++;
 
 	/* perform the operation - Lock HW and push sequence */
@@ -415,11 +409,10 @@ ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
 	return rc;
 }
 
-
-ssi_fips_error_t
+enum cc_fips_error
 ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
 {
-	ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+	enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
 	size_t i;
 	struct fips_cipher_ctx *virt_ctx = (struct fips_cipher_ctx *)cpu_addr_buffer;
 
@@ -431,9 +424,9 @@ ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffe
 
 	for (i = 0; i < FIPS_CIPHER_NUM_OF_TESTS; ++i)
 	{
-		FipsCipherData *cipherData = (FipsCipherData*)&FipsCipherDataTable[i];
+		FipsCipherData *cipherData = (FipsCipherData *)&FipsCipherDataTable[i];
 		int rc = 0;
-		size_t iv_size = cipherData->isAes ? NIST_AES_IV_SIZE : NIST_TDES_IV_SIZE ;
+		size_t iv_size = cipherData->isAes ? NIST_AES_IV_SIZE : NIST_TDES_IV_SIZE;
 
 		memset(cpu_addr_buffer, 0, sizeof(struct fips_cipher_ctx));
 
@@ -480,8 +473,7 @@ ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffe
 	return error;
 }
 
-
-static inline int 
+static inline int
 ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata,
 		       dma_addr_t key_dma_addr,
 		       size_t key_len,
@@ -495,46 +487,45 @@ ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata,
 
 	int rc;
 	struct ssi_crypto_req ssi_req = {0};
-	HwDesc_s desc[FIPS_CMAC_MAX_SEQ_LEN];
+	struct cc_hw_desc desc[FIPS_CMAC_MAX_SEQ_LEN];
 	int idx = 0;
 
 	/* Setup CMAC Key */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, key_dma_addr,
-			     ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len), NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+		     ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len), NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], key_len);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* Load MAC state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, digest_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, digest_dma_addr, CC_AES_BLOCK_SIZE,
+		     NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], key_len);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
-
 	//ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-			     din_dma_addr, 
-			     din_len, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_len, NS_BIT);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 	idx++;
-	
+
 	/* Get final MAC result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], digest_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC); 
+	hw_desc_init(&desc[idx]);
+	set_dout_dlli(&desc[idx], digest_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,
+		      0);
+	set_flow_mode(&desc[idx], S_AES_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
 	idx++;
 
 	/* perform the operation - Lock HW and push sequence */
@@ -545,10 +536,10 @@ ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata,
 	return rc;
 }
 
-ssi_fips_error_t
+enum cc_fips_error
 ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
 {
-	ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+	enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
 	size_t i;
 	struct fips_cmac_ctx *virt_ctx = (struct fips_cmac_ctx *)cpu_addr_buffer;
 
@@ -559,7 +550,7 @@ ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 
 	for (i = 0; i < FIPS_CMAC_NUM_OF_TESTS; ++i)
 	{
-		FipsCmacData *cmac_data = (FipsCmacData*)&FipsCmacDataTable[i];
+		FipsCmacData *cmac_data = (FipsCmacData *)&FipsCmacDataTable[i];
 		int rc = 0;
 
 		memset(cpu_addr_buffer, 0, sizeof(struct fips_cmac_ctx));
@@ -604,8 +595,7 @@ ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 	return error;
 }
 
-
-static inline ssi_fips_error_t 
+static inline enum cc_fips_error
 FIPS_HashToFipsError(enum drv_hash_mode hash_mode)
 {
 	switch (hash_mode) {
@@ -624,7 +614,7 @@ FIPS_HashToFipsError(enum drv_hash_mode hash_mode)
 	return CC_REE_FIPS_ERROR_GENERAL;
 }
 
-static inline int 
+static inline int
 ssi_hash_fips_run_test(struct ssi_drvdata *drvdata,
 		       dma_addr_t initial_digest_dma_addr,
 		       dma_addr_t din_dma_addr,
@@ -640,45 +630,47 @@ ssi_hash_fips_run_test(struct ssi_drvdata *drvdata,
 
 	int rc;
 	struct ssi_crypto_req ssi_req = {0};
-	HwDesc_s desc[FIPS_HASH_MAX_SEQ_LEN];
+	struct cc_hw_desc desc[FIPS_HASH_MAX_SEQ_LEN];
 	int idx = 0;
 
 	/* Load initial digest */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, initial_digest_dma_addr, inter_digestsize, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hw_mode);
+	set_din_type(&desc[idx], DMA_DLLI, initial_digest_dma_addr,
+		     inter_digestsize, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 
 	/* Load the hash current length */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-	HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hw_mode);
+	set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	/* data descriptor */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT);
+	set_flow_mode(&desc[idx], DIN_HASH);
 	idx++;
 
 	/* Get final MAC result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hw_mode);
+	set_dout_dlli(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 	if (unlikely((hash_mode == DRV_HASH_MD5) ||
 		     (hash_mode == DRV_HASH_SHA384) ||
 		     (hash_mode == DRV_HASH_SHA512))) {
-		HW_DESC_SET_BYTES_SWAP(&desc[idx], 1);
+		set_bytes_swap(&desc[idx], 1);
 	} else {
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+		set_cipher_config0(&desc[idx],
+				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 	}
 	idx++;
 
@@ -689,10 +681,10 @@ ssi_hash_fips_run_test(struct ssi_drvdata *drvdata,
 	return rc;
 }
 
-ssi_fips_error_t
+enum cc_fips_error
 ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
 {
-	ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+	enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
 	size_t i;
 	struct fips_hash_ctx *virt_ctx = (struct fips_hash_ctx *)cpu_addr_buffer;
 
@@ -703,7 +695,7 @@ ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 
 	for (i = 0; i < FIPS_HASH_NUM_OF_TESTS; ++i)
 	{
-		FipsHashData *hash_data = (FipsHashData*)&FipsHashDataTable[i];
+		FipsHashData *hash_data = (FipsHashData *)&FipsHashDataTable[i];
 		int rc = 0;
 		enum drv_hash_hw_mode hw_mode = 0;
 		int digest_size = 0;
@@ -717,20 +709,20 @@ ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 			digest_size = CC_SHA1_DIGEST_SIZE;
 			inter_digestsize = CC_SHA1_DIGEST_SIZE;
 			/* copy the initial digest into the allocated cache coherent buffer */
-			memcpy(virt_ctx->initial_digest, (void*)sha1_init, CC_SHA1_DIGEST_SIZE);
+			memcpy(virt_ctx->initial_digest, (void *)sha1_init, CC_SHA1_DIGEST_SIZE);
 			break;
 		case DRV_HASH_SHA256:
 			hw_mode = DRV_HASH_HW_SHA256;
 			digest_size = CC_SHA256_DIGEST_SIZE;
 			inter_digestsize = CC_SHA256_DIGEST_SIZE;
-			memcpy(virt_ctx->initial_digest, (void*)sha256_init, CC_SHA256_DIGEST_SIZE);
+			memcpy(virt_ctx->initial_digest, (void *)sha256_init, CC_SHA256_DIGEST_SIZE);
 			break;
 #if (CC_SUPPORT_SHA > 256)
 		case DRV_HASH_SHA512:
 			hw_mode = DRV_HASH_HW_SHA512;
 			digest_size = CC_SHA512_DIGEST_SIZE;
 			inter_digestsize = CC_SHA512_DIGEST_SIZE;
-			memcpy(virt_ctx->initial_digest, (void*)sha512_init, CC_SHA512_DIGEST_SIZE);
+			memcpy(virt_ctx->initial_digest, (void *)sha512_init, CC_SHA512_DIGEST_SIZE);
 			break;
 #endif
 		default:
@@ -757,7 +749,7 @@ ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 			FIPS_LOG("ssi_hash_fips_run_test %d returned error - rc = %d \n", i, rc);
 			error = FIPS_HashToFipsError(hash_data->hash_mode);
 			break;
-                }
+		}
 
 		/* compare actual mac result to expected */
 		if (memcmp(virt_ctx->mac_res, hash_data->mac_res, digest_size) != 0)
@@ -772,14 +764,13 @@ ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 
 			error = FIPS_HashToFipsError(hash_data->hash_mode);
 			break;
-                }
+		}
 	}
 
 	return error;
 }
 
-
-static inline ssi_fips_error_t 
+static inline enum cc_fips_error
 FIPS_HmacToFipsError(enum drv_hash_mode hash_mode)
 {
 	switch (hash_mode) {
@@ -798,7 +789,7 @@ FIPS_HmacToFipsError(enum drv_hash_mode hash_mode)
 	return CC_REE_FIPS_ERROR_GENERAL;
 }
 
-static inline int 
+static inline int
 ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
 		       dma_addr_t initial_digest_dma_addr,
 		       dma_addr_t key_dma_addr,
@@ -816,34 +807,34 @@ ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
 		       dma_addr_t digest_bytes_len_dma_addr)
 {
 	/* The implemented flow is not the same as the one implemented in ssi_hash.c (setkey + digest flows).
-	   In this flow, there is no need to store and reload some of the intermidiate results. */
+	 * In this flow, there is no need to store and reload some of the intermidiate results.
+	 */
 
 	/* max number of descriptors used for the flow */
 	#define FIPS_HMAC_MAX_SEQ_LEN 12
 
 	int rc;
 	struct ssi_crypto_req ssi_req = {0};
-	HwDesc_s desc[FIPS_HMAC_MAX_SEQ_LEN];
+	struct cc_hw_desc desc[FIPS_HMAC_MAX_SEQ_LEN];
 	int idx = 0;
 	int i;
 	/* calc the hash opad first and ipad only afterwards (unlike the flow in ssi_hash.c) */
 	unsigned int hmacPadConst[2] = { HMAC_OPAD_CONST, HMAC_IPAD_CONST };
 
 	// assume (key_size <= block_size)
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], k0_dma_addr, key_size, NS_BIT, 0);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
+	set_flow_mode(&desc[idx], BYPASS);
+	set_dout_dlli(&desc[idx], k0_dma_addr, key_size, NS_BIT, 0);
 	idx++;
 
 	// if needed, append Key with zeros to create K0
 	if ((block_size - key_size) != 0) {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_CONST(&desc[idx], 0, (block_size - key_size));
-		HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], 
-				      (k0_dma_addr + key_size), (block_size - key_size),
-				      NS_BIT, 0);
+		hw_desc_init(&desc[idx]);
+		set_din_const(&desc[idx], 0, (block_size - key_size));
+		set_flow_mode(&desc[idx], BYPASS);
+		set_dout_dlli(&desc[idx], (k0_dma_addr + key_size),
+			      (block_size - key_size), NS_BIT, 0);
 		idx++;
 	}
 
@@ -858,50 +849,47 @@ ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
 	/* calc derived HMAC key */
 	for (i = 0; i < 2; i++) {
 		/* Load hash initial state */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, initial_digest_dma_addr, inter_digestsize, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], hw_mode);
+		set_din_type(&desc[idx], DMA_DLLI, initial_digest_dma_addr,
+			     inter_digestsize, NS_BIT);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 		idx++;
 
-
 		/* Load the hash current length*/
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-		HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], hw_mode);
+		set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 		idx++;
 
 		/* Prepare opad/ipad key */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+		hw_desc_init(&desc[idx]);
+		set_xor_val(&desc[idx], hmacPadConst[i]);
+		set_cipher_mode(&desc[idx], hw_mode);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 		idx++;
 
 		/* Perform HASH update */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-				     k0_dma_addr,
-				     block_size, NS_BIT);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx],hw_mode);
-		HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, k0_dma_addr, block_size,
+			     NS_BIT);
+		set_cipher_mode(&desc[idx], hw_mode);
+		set_xor_active(&desc[idx]);
+		set_flow_mode(&desc[idx], DIN_HASH);
 		idx++;
 
 		if (i == 0) {
 			/* First iteration - calc H(K0^opad) into tmp_digest_dma_addr */
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-			HW_DESC_SET_DOUT_DLLI(&desc[idx],
-					      tmp_digest_dma_addr,
-					      inter_digestsize,
-					      NS_BIT, 0);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-			HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], hw_mode);
+			set_dout_dlli(&desc[idx], tmp_digest_dma_addr,
+				      inter_digestsize, NS_BIT, 0);
+			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 			idx++;
 
 			// is this needed?? or continue with current descriptors??
@@ -916,85 +904,86 @@ ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
 	}
 
 	/* data descriptor */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-			     din_dma_addr, data_in_size,
-			     NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT);
+	set_flow_mode(&desc[idx], DIN_HASH);
 	idx++;
 
 	/* HW last hash block padding (aka. "DO_PAD") */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], k0_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
-	HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hw_mode);
+	set_dout_dlli(&desc[idx], k0_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+	set_cipher_do(&desc[idx], DO_PAD);
 	idx++;
 
 	/* store the hash digest result in the context */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], k0_dma_addr, digest_size, NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hw_mode);
+	set_dout_dlli(&desc[idx], k0_dma_addr, digest_size, NS_BIT, 0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 	if (unlikely((hash_mode == DRV_HASH_MD5) ||
 		     (hash_mode == DRV_HASH_SHA384) ||
 		     (hash_mode == DRV_HASH_SHA512))) {
-		HW_DESC_SET_BYTES_SWAP(&desc[idx], 1);
+		set_bytes_swap(&desc[idx], 1);
 	} else {
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+		set_cipher_config0(&desc[idx],
+				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 	}
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 	idx++;
 
 	/* at this point:
-	   tmp_digest = H(o_key_pad)
-	   k0 = H(i_key_pad || m)
-	   */
+	 * tmp_digest = H(o_key_pad)
+	 * k0 = H(i_key_pad || m)
+	 */
 
 	/* Loading hash opad xor key state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, tmp_digest_dma_addr, inter_digestsize, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hw_mode);
+	set_din_type(&desc[idx], DMA_DLLI, tmp_digest_dma_addr,
+		     inter_digestsize, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 
 	/* Load the hash current length */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hw_mode);
+	set_din_type(&desc[idx], DMA_DLLI, digest_bytes_len_dma_addr,
+		     HASH_LEN_SIZE, NS_BIT);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
 	idx++;
 
 	/* Perform HASH update */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, k0_dma_addr, digest_size, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, k0_dma_addr, digest_size, NS_BIT);
+	set_flow_mode(&desc[idx], DIN_HASH);
 	idx++;
 
-
 	/* Get final MAC result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], hw_mode);
+	set_dout_dlli(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 	if (unlikely((hash_mode == DRV_HASH_MD5) ||
 		     (hash_mode == DRV_HASH_SHA384) ||
 		     (hash_mode == DRV_HASH_SHA512))) {
-		HW_DESC_SET_BYTES_SWAP(&desc[idx], 1);
+		set_bytes_swap(&desc[idx], 1);
 	} else {
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+		set_cipher_config0(&desc[idx],
+				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 	}
 	idx++;
 
@@ -1005,10 +994,10 @@ ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
 	return rc;
 }
 
-ssi_fips_error_t
+enum cc_fips_error
 ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
 {
-	ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+	enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
 	size_t i;
 	struct fips_hmac_ctx *virt_ctx = (struct fips_hmac_ctx *)cpu_addr_buffer;
 
@@ -1023,7 +1012,7 @@ ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 
 	for (i = 0; i < FIPS_HMAC_NUM_OF_TESTS; ++i)
 	{
-		FipsHmacData *hmac_data = (FipsHmacData*)&FipsHmacDataTable[i];
+		FipsHmacData *hmac_data = (FipsHmacData *)&FipsHmacDataTable[i];
 		int rc = 0;
 		enum drv_hash_hw_mode hw_mode = 0;
 		int digest_size = 0;
@@ -1038,7 +1027,7 @@ ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 			digest_size = CC_SHA1_DIGEST_SIZE;
 			block_size = CC_SHA1_BLOCK_SIZE;
 			inter_digestsize = CC_SHA1_DIGEST_SIZE;
-			memcpy(virt_ctx->initial_digest, (void*)sha1_init, CC_SHA1_DIGEST_SIZE);
+			memcpy(virt_ctx->initial_digest, (void *)sha1_init, CC_SHA1_DIGEST_SIZE);
 			memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
 			break;
 		case DRV_HASH_SHA256:
@@ -1046,7 +1035,7 @@ ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 			digest_size = CC_SHA256_DIGEST_SIZE;
 			block_size = CC_SHA256_BLOCK_SIZE;
 			inter_digestsize = CC_SHA256_DIGEST_SIZE;
-			memcpy(virt_ctx->initial_digest, (void*)sha256_init, CC_SHA256_DIGEST_SIZE);
+			memcpy(virt_ctx->initial_digest, (void *)sha256_init, CC_SHA256_DIGEST_SIZE);
 			memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
 			break;
 #if (CC_SUPPORT_SHA > 256)
@@ -1055,7 +1044,7 @@ ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 			digest_size = CC_SHA512_DIGEST_SIZE;
 			block_size = CC_SHA512_BLOCK_SIZE;
 			inter_digestsize = CC_SHA512_DIGEST_SIZE;
-			memcpy(virt_ctx->initial_digest, (void*)sha512_init, CC_SHA512_DIGEST_SIZE);
+			memcpy(virt_ctx->initial_digest, (void *)sha512_init, CC_SHA512_DIGEST_SIZE);
 			memcpy(virt_ctx->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
 			break;
 #endif
@@ -1111,8 +1100,7 @@ ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 	return error;
 }
 
-
-static inline int 
+static inline int
 ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
 		      enum drv_crypto_direction direction,
 		      dma_addr_t key_dma_addr,
@@ -1131,7 +1119,7 @@ ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
 
 	int rc;
 	struct ssi_crypto_req ssi_req = {0};
-	HwDesc_s desc[FIPS_CCM_MAX_SEQ_LEN];
+	struct cc_hw_desc desc[FIPS_CCM_MAX_SEQ_LEN];
 	unsigned int idx = 0;
 	unsigned int cipher_flow_mode;
 
@@ -1142,100 +1130,103 @@ ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
 	}
 
 	/* load key */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, key_dma_addr,
-			     ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ? CC_AES_KEY_SIZE_MAX : key_size),
-			     NS_BIT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+	set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+		     ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ?
+		     CC_AES_KEY_SIZE_MAX : key_size), NS_BIT)
+	set_key_size_aes(&desc[idx], key_size);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* load ctr state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     iv_dma_addr, AES_BLOCK_SIZE,
-			     NS_BIT);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);	
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+	set_key_size_aes(&desc[idx], key_size);
+	set_din_type(&desc[idx], DMA_DLLI, iv_dma_addr, AES_BLOCK_SIZE,
+		     NS_BIT);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* load MAC key */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, key_dma_addr,
-			     ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ? CC_AES_KEY_SIZE_MAX : key_size),
-			     NS_BIT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+	set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
+		     ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ?
+		     CC_AES_KEY_SIZE_MAX : key_size), NS_BIT);
+	set_key_size_aes(&desc[idx], key_size);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
 	/* load MAC state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);	
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+	set_key_size_aes(&desc[idx], key_size);
+	set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr,
+		     NIST_AESCCM_TAG_SIZE, NS_BIT);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
 	/* prcess assoc data */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, b0_a0_adata_dma_addr, b0_a0_adata_size, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, b0_a0_adata_dma_addr,
+		     b0_a0_adata_size, NS_BIT);
+	set_flow_mode(&desc[idx], DIN_HASH);
 	idx++;
 
 	/* process the cipher */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], cipher_flow_mode);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT);
+	set_dout_dlli(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0);
+	set_flow_mode(&desc[idx], cipher_flow_mode);
 	idx++;
 
 	/* Read temporal MAC */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT, 0);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
+	set_dout_dlli(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE,
+		      NS_BIT, 0);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_aes_not_hash_mode(&desc[idx]);
 	idx++;
 
 	/* load AES-CTR state (for last MAC calculation)*/
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     ctr_cnt_0_dma_addr,
-			     AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_din_type(&desc[idx], DMA_DLLI, ctr_cnt_0_dma_addr, AES_BLOCK_SIZE,
+		     NS_BIT);
+	set_key_size_aes(&desc[idx], key_size);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* Memory Barrier */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
 	idx++;
 
 	/* encrypt the "T" value and store MAC inplace */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
-	idx++;	
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr,
+		     NIST_AESCCM_TAG_SIZE, NS_BIT);
+	set_dout_dlli(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE,
+		      NS_BIT, 0);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	idx++;
 
 	/* perform the operation - Lock HW and push sequence */
 	BUG_ON(idx > FIPS_CCM_MAX_SEQ_LEN);
@@ -1244,10 +1235,10 @@ ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
 	return rc;
 }
 
-ssi_fips_error_t
+enum cc_fips_error
 ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
 {
-	ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+	enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
 	size_t i;
 	struct fips_ccm_ctx *virt_ctx = (struct fips_ccm_ctx *)cpu_addr_buffer;
 
@@ -1262,7 +1253,7 @@ ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 
 	for (i = 0; i < FIPS_CCM_NUM_OF_TESTS; ++i)
 	{
-		FipsCcmData *ccmData = (FipsCcmData*)&FipsCcmDataTable[i];
+		FipsCcmData *ccmData = (FipsCcmData *)&FipsCcmDataTable[i];
 		int rc = 0;
 
 		memset(cpu_addr_buffer, 0, sizeof(struct fips_ccm_ctx));
@@ -1273,6 +1264,7 @@ ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 		{
 			/* build B0 -- B0, nonce, l(m) */
 			__be16 data = cpu_to_be16(NIST_AESCCM_TEXT_SIZE);
+
 			virt_ctx->b0_a0_adata[0] = NIST_AESCCM_B0_VAL;
 			memcpy(virt_ctx->b0_a0_adata + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE);
 			memcpy(virt_ctx->b0_a0_adata + 14, (u8 *)&data, sizeof(__be16));
@@ -1313,9 +1305,9 @@ ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 		if (memcmp(virt_ctx->dout, ccmData->dataOut, ccmData->dataInSize) != 0)
 		{
 			FIPS_LOG("dout comparison error %d - size=%d \n", i, ccmData->dataInSize);
-                        error = CC_REE_FIPS_ERROR_AESCCM_PUT;
+			error = CC_REE_FIPS_ERROR_AESCCM_PUT;
 			break;
-                }
+		}
 
 		/* compare actual mac result to expected */
 		if (memcmp(virt_ctx->mac_res, ccmData->macResOut, ccmData->tagSize) != 0)
@@ -1336,7 +1328,6 @@ ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 	return error;
 }
 
-
 static inline int
 ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
 		      enum drv_crypto_direction direction,
@@ -1358,7 +1349,7 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
 
 	int rc;
 	struct ssi_crypto_req ssi_req = {0};
-	HwDesc_s desc[FIPS_GCM_MAX_SEQ_LEN];
+	struct cc_hw_desc desc[FIPS_GCM_MAX_SEQ_LEN];
 	unsigned int idx = 0;
 	unsigned int cipher_flow_mode;
 
@@ -1372,186 +1363,162 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
 //	ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
 /////////////////////////////////   1   ////////////////////////////////////
 
-	/* load key to AES*/
-	HW_DESC_INIT(&desc[idx]);	
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);	
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_DIN_TYPE(&desc[idx],
-			     DMA_DLLI, key_dma_addr, key_size,
-			     NS_BIT); 
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	/* load key to AES */
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
+	set_key_size_aes(&desc[idx], key_size);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* process one zero block to generate hkey */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx],
-			      hkey_dma_addr, AES_BLOCK_SIZE,
-			      NS_BIT, 0); 
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+	set_dout_dlli(&desc[idx], hkey_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 	idx++;
 
 	/* Memory Barrier */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
 	idx++;
 
 	/* Load GHASH subkey */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     hkey_dma_addr, AES_BLOCK_SIZE,
-			     NS_BIT);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);	
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);	
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, hkey_dma_addr, AES_BLOCK_SIZE,
+		     NS_BIT);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	/* Configure Hash Engine to work with GHASH.
-	   Since it was not possible to extend HASH submodes to add GHASH,
-	   The following command is necessary in order to select GHASH (according to HW designers)*/
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);	
-	HW_DESC_SET_CIPHER_DO(&desc[idx], 1); //1=AES_SK RKEK
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	 * Since it was not possible to extend HASH submodes to add GHASH,
+	 * The following command is necessary in order to
+	 * select GHASH (according to HW designers)
+	 */
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	/* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_aes_not_hash_mode(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 
-
-
 /////////////////////////////////   2   ////////////////////////////////////
 	/* prcess(ghash) assoc data */
 //	if (req->assoclen > 0)
 //		ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
 /////////////////////////////////   2   ////////////////////////////////////
 
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-			     adata_dma_addr, adata_size,
-			     NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, adata_dma_addr, adata_size, NS_BIT);
+	set_flow_mode(&desc[idx], DIN_HASH);
 	idx++;
 
-
 /////////////////////////////////   3   ////////////////////////////////////
 //	ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
 /////////////////////////////////   3   ////////////////////////////////////
 
 	/* load key to AES*/
-	HW_DESC_INIT(&desc[idx]);	
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);	
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     key_dma_addr, key_size,
-			     NS_BIT); 
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
+	set_key_size_aes(&desc[idx], key_size);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* load AES/CTR initial CTR value inc by 2*/
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     iv_inc2_dma_addr, AES_BLOCK_SIZE,
-			     NS_BIT);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);	
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_key_size_aes(&desc[idx], key_size);
+	set_din_type(&desc[idx], DMA_DLLI, iv_inc2_dma_addr, AES_BLOCK_SIZE,
+		     NS_BIT);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
-
 /////////////////////////////////   4   ////////////////////////////////////
 	/* process(gctr+ghash) */
 //	if (req_ctx->cryptlen != 0)
-//		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size); 
+//		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
 /////////////////////////////////   4   ////////////////////////////////////
 
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     din_dma_addr, din_size,
-			     NS_BIT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx],
-			      dout_dma_addr, din_size,
-			      NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], cipher_flow_mode);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT);
+	set_dout_dlli(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0);
+	set_flow_mode(&desc[idx], cipher_flow_mode);
 	idx++;
 
-
 /////////////////////////////////   5   ////////////////////////////////////
 //	ssi_aead_process_gcm_result_desc(req, desc, seq_size);
 /////////////////////////////////   5   ////////////////////////////////////
 
 	/* prcess(ghash) gcm_block_len */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-			     block_len_dma_addr, AES_BLOCK_SIZE,
-			     NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, block_len_dma_addr, AES_BLOCK_SIZE,
+		     NS_BIT);
+	set_flow_mode(&desc[idx], DIN_HASH);
 	idx++;
 
 	/* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx],
-			      mac_res_dma_addr, AES_BLOCK_SIZE,
-			      NS_BIT, 0);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
-	idx++; 
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_dlli(&desc[idx], mac_res_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_aes_not_hash_mode(&desc[idx]);
+	idx++;
 
 	/* load AES/CTR initial CTR value inc by 1*/
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     iv_inc1_dma_addr, AES_BLOCK_SIZE,
-			     NS_BIT);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);	
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_key_size_aes(&desc[idx], key_size);
+	set_din_type(&desc[idx], DMA_DLLI, iv_inc1_dma_addr, AES_BLOCK_SIZE,
+		     NS_BIT);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* Memory Barrier */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+	hw_desc_init(&desc[idx]);
+	set_din_no_dma(&desc[idx], 0, 0xfffff0);
+	set_dout_no_dma(&desc[idx], 0, 0, 1);
 	idx++;
 
 	/* process GCTR on stored GHASH and store MAC inplace */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-			     mac_res_dma_addr, AES_BLOCK_SIZE,
-			     NS_BIT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx],
-			      mac_res_dma_addr, AES_BLOCK_SIZE,
-			      NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
+	set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr, AES_BLOCK_SIZE,
+		     NS_BIT);
+	set_dout_dlli(&desc[idx], mac_res_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 	idx++;
 
 	/* perform the operation - Lock HW and push sequence */
@@ -1561,10 +1528,10 @@ ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
 	return rc;
 }
 
-ssi_fips_error_t
+enum cc_fips_error
 ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
 {
-	ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+	enum cc_fips_error error = CC_REE_FIPS_ERROR_OK;
 	size_t i;
 	struct fips_gcm_ctx *virt_ctx = (struct fips_gcm_ctx *)cpu_addr_buffer;
 
@@ -1581,7 +1548,7 @@ ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 
 	for (i = 0; i < FIPS_GCM_NUM_OF_TESTS; ++i)
 	{
-		FipsGcmData *gcmData = (FipsGcmData*)&FipsGcmDataTable[i];
+		FipsGcmData *gcmData = (FipsGcmData *)&FipsGcmDataTable[i];
 		int rc = 0;
 
 		memset(cpu_addr_buffer, 0, sizeof(struct fips_gcm_ctx));
@@ -1594,6 +1561,7 @@ ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 		/* len_block */
 		{
 			__be64 len_bits;
+
 			len_bits = cpu_to_be64(gcmData->adataSize * 8);
 			memcpy(virt_ctx->len_block, &len_bits, sizeof(len_bits));
 			len_bits = cpu_to_be64(gcmData->dataInSize * 8);
@@ -1602,6 +1570,7 @@ ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 		/* iv_inc1, iv_inc2 */
 		{
 			__be32 counter = cpu_to_be32(1);
+
 			memcpy(virt_ctx->iv_inc1, gcmData->iv, NIST_AESGCM_IV_SIZE);
 			memcpy(virt_ctx->iv_inc1 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter));
 			counter = cpu_to_be32(2);
@@ -1666,7 +1635,6 @@ ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer,
 	return error;
 }
 
-
 size_t ssi_fips_max_mem_alloc_size(void)
 {
 	FIPS_DBG("sizeof(struct fips_cipher_ctx) %d \n", sizeof(struct fips_cipher_ctx));
diff --git a/drivers/staging/ccree/ssi_fips_local.c b/drivers/staging/ccree/ssi_fips_local.c
index 51b535a..aefb71d 100644
--- a/drivers/staging/ccree/ssi_fips_local.c
+++ b/drivers/staging/ccree/ssi_fips_local.c
@@ -1,22 +1,22 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /**************************************************************
-This file defines the driver FIPS internal function, used by the driver itself.
-***************************************************************/
+ * This file defines the driver FIPS internal function, used by the driver itself.
+ ***************************************************************/
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -26,7 +26,6 @@ This file defines the driver FIPS internal function, used by the driver itself.
 #include "ssi_driver.h"
 #include "cc_hal.h"
 
-
 #define FIPS_POWER_UP_TEST_CIPHER	1
 #define FIPS_POWER_UP_TEST_CMAC		1
 #define FIPS_POWER_UP_TEST_HASH		1
@@ -49,62 +48,57 @@ struct ssi_fips_handle {
 #endif
 };
 
-
-extern int ssi_fips_get_state(ssi_fips_state_t *p_state);
-extern int ssi_fips_get_error(ssi_fips_error_t *p_err);
-extern int ssi_fips_ext_set_state(ssi_fips_state_t state);
-extern int ssi_fips_ext_set_error(ssi_fips_error_t err);
+extern int ssi_fips_get_state(enum cc_fips_state_t *p_state);
+extern int ssi_fips_get_error(enum cc_fips_error *p_err);
+extern int ssi_fips_ext_set_state(enum cc_fips_state_t state);
+extern int ssi_fips_ext_set_error(enum cc_fips_error err);
 
 /* FIPS power-up tests */
-extern ssi_fips_error_t ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern ssi_fips_error_t ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern ssi_fips_error_t ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern ssi_fips_error_t ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern ssi_fips_error_t ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
-extern ssi_fips_error_t ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern enum cc_fips_error ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern enum cc_fips_error ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern enum cc_fips_error ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern enum cc_fips_error ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern enum cc_fips_error ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern enum cc_fips_error ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
 extern size_t ssi_fips_max_mem_alloc_size(void);
 
-
 /* The function called once at driver entry point to check whether TEE FIPS error occured.*/
 static enum ssi_fips_error ssi_fips_get_tee_error(struct ssi_drvdata *drvdata)
 {
-	uint32_t regVal;
+	u32 regVal;
 	void __iomem *cc_base = drvdata->cc_base;
 
 	regVal = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
-	if (regVal == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) {
+	if (regVal == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
 		return CC_REE_FIPS_ERROR_OK;
-	} 
+
 	return CC_REE_FIPS_ERROR_FROM_TEE;
 }
 
-
-/* 
- This function should push the FIPS REE library status towards the TEE library.
- By writing the error state to HOST_GPR0 register. The function is called from  						.
- driver entry point so no need to protect by mutex.
-*/
-static void ssi_fips_update_tee_upon_ree_status(struct ssi_drvdata *drvdata, ssi_fips_error_t err)
+/*
+ * This function should push the FIPS REE library status towards the TEE library.
+ * By writing the error state to HOST_GPR0 register. The function is called from
+ * driver entry point so no need to protect by mutex.
+ */
+static void ssi_fips_update_tee_upon_ree_status(struct ssi_drvdata *drvdata, enum cc_fips_error err)
 {
 	void __iomem *cc_base = drvdata->cc_base;
-	if (err == CC_REE_FIPS_ERROR_OK) {
-		CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS|CC_FIPS_SYNC_MODULE_OK));
-	} else {
-		CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS|CC_FIPS_SYNC_MODULE_ERROR));
-	}
+
+	if (err == CC_REE_FIPS_ERROR_OK)
+		CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_OK));
+	else
+		CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_ERROR));
 }
 
-
-
 void ssi_fips_fini(struct ssi_drvdata *drvdata)
 {
 	struct ssi_fips_handle *fips_h = drvdata->fips_handle;
 
-	if (fips_h == NULL)
+	if (!fips_h)
 		return; /* Not allocated */
 
 #ifdef COMP_IN_WQ
-	if (fips_h->workq != NULL) {
+	if (fips_h->workq) {
 		flush_workqueue(fips_h->workq);
 		destroy_workqueue(fips_h->workq);
 	}
@@ -119,7 +113,7 @@ void ssi_fips_fini(struct ssi_drvdata *drvdata)
 
 void fips_handler(struct ssi_drvdata *drvdata)
 {
-	struct ssi_fips_handle *fips_handle_ptr = 
+	struct ssi_fips_handle *fips_handle_ptr =
 						drvdata->fips_handle;
 #ifdef COMP_IN_WQ
 	queue_delayed_work(fips_handle_ptr->workq, &fips_handle_ptr->fipswork, 0);
@@ -128,8 +122,6 @@ void fips_handler(struct ssi_drvdata *drvdata)
 #endif
 }
 
-
-
 #ifdef COMP_IN_WQ
 static void fips_wq_handler(struct work_struct *work)
 {
@@ -145,29 +137,27 @@ static void fips_dsr(unsigned long devarg)
 {
 	struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
 	void __iomem *cc_base = drvdata->cc_base;
-	uint32_t irq;
-	uint32_t teeFipsError = 0;
+	u32 irq;
+	u32 teeFipsError = 0;
 
 	irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
 
 	if (irq & SSI_GPR0_IRQ_MASK) {
 		teeFipsError = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
-		if (teeFipsError != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) {
+		if (teeFipsError != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
 			ssi_fips_set_error(drvdata, CC_REE_FIPS_ERROR_FROM_TEE);
-		} 
 	}
 
 	/* after verifing that there is nothing to do, Unmask AXI completion interrupt */
-	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), 
+	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
 		CC_HAL_READ_REGISTER(
 		CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
 }
 
-
-ssi_fips_error_t cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata)
+enum cc_fips_error cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata)
 {
-	ssi_fips_error_t fips_error = CC_REE_FIPS_ERROR_OK;
-	void * cpu_addr_buffer = NULL;
+	enum cc_fips_error fips_error = CC_REE_FIPS_ERROR_OK;
+	void *cpu_addr_buffer = NULL;
 	dma_addr_t dma_handle;
 	size_t alloc_buff_size = ssi_fips_max_mem_alloc_size();
 	struct device *dev = &drvdata->plat_dev->dev;
@@ -177,9 +167,9 @@ ssi_fips_error_t cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata)
 	// the dma_handle is the returned phy address - use it in the HW descriptor
 	FIPS_DBG("dma_alloc_coherent \n");
 	cpu_addr_buffer = dma_alloc_coherent(dev, alloc_buff_size, &dma_handle, GFP_KERNEL);
-	if (cpu_addr_buffer == NULL) {
+	if (!cpu_addr_buffer)
 		return CC_REE_FIPS_ERROR_GENERAL;
-	}
+
 	FIPS_DBG("allocated coherent buffer - addr 0x%08X , size = %d \n", (size_t)cpu_addr_buffer, alloc_buff_size);
 
 #if FIPS_POWER_UP_TEST_CIPHER
@@ -229,13 +219,12 @@ ssi_fips_error_t cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata)
 	return fips_error;
 }
 
-
-
-/* The function checks if FIPS supported and FIPS error exists.* 
-*  It should be used in every driver API.*/
+/* The function checks if FIPS supported and FIPS error exists.*
+ * It should be used in every driver API.
+ */
 int ssi_fips_check_fips_error(void)
 {
-	ssi_fips_state_t fips_state; 
+	enum cc_fips_state_t fips_state;
 
 	if (ssi_fips_get_state(&fips_state) != 0) {
 		FIPS_LOG("ssi_fips_get_state FAILED, returning.. \n");
@@ -248,62 +237,61 @@ int ssi_fips_check_fips_error(void)
 	return 0;
 }
 
-
-/* The function sets the REE FIPS state.* 
-*  It should be used while driver is being loaded .*/
-int ssi_fips_set_state(ssi_fips_state_t state)
+/* The function sets the REE FIPS state.*
+ * It should be used while driver is being loaded.
+ */
+int ssi_fips_set_state(enum cc_fips_state_t state)
 {
 	return ssi_fips_ext_set_state(state);
 }
 
-/* The function sets the REE FIPS error, and pushes the error to TEE library. * 
-*  It should be used when any of the KAT tests fails .*/
-int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, ssi_fips_error_t err)
+/* The function sets the REE FIPS error, and pushes the error to TEE library. *
+ * It should be used when any of the KAT tests fails.
+ */
+int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, enum cc_fips_error err)
 {
 	int rc = 0;
-        ssi_fips_error_t current_err;
+	enum cc_fips_error current_err;
 
-        FIPS_LOG("ssi_fips_set_error - fips_error = %d \n", err);
+	FIPS_LOG("ssi_fips_set_error - fips_error = %d \n", err);
 
 	// setting no error is not allowed
-	if (err == CC_REE_FIPS_ERROR_OK) {
-                return -ENOEXEC;
-	} 
-        // If error exists, do not set new error
-        if (ssi_fips_get_error(&current_err) != 0) {
-                return -ENOEXEC;
-        }
-        if (current_err != CC_REE_FIPS_ERROR_OK) {
-                return -ENOEXEC;
-        }
-        // set REE internal error and state
-	rc = ssi_fips_ext_set_error(err);
-	if (rc != 0) {
-                return -ENOEXEC;
-	}
-	rc = ssi_fips_ext_set_state(CC_FIPS_STATE_ERROR);
-	if (rc != 0) {
-                return -ENOEXEC;
-	}
+	if (err == CC_REE_FIPS_ERROR_OK)
+		return -ENOEXEC;
 
-        // push error towards TEE libraray, if it's not TEE error
-	if (err != CC_REE_FIPS_ERROR_FROM_TEE) {
+	// If error exists, do not set new error
+	if (ssi_fips_get_error(&current_err) != 0)
+		return -ENOEXEC;
+
+	if (current_err != CC_REE_FIPS_ERROR_OK)
+		return -ENOEXEC;
+
+	// set REE internal error and state
+	rc = ssi_fips_ext_set_error(err);
+	if (rc != 0)
+		return -ENOEXEC;
+
+	rc = ssi_fips_ext_set_state(CC_FIPS_STATE_ERROR);
+	if (rc != 0)
+		return -ENOEXEC;
+
+	// push error towards TEE libraray, if it's not TEE error
+	if (err != CC_REE_FIPS_ERROR_FROM_TEE)
 		ssi_fips_update_tee_upon_ree_status(p_drvdata, err);
-	}
+
 	return rc;
 }
 
-
 /* The function called once at driver entry point .*/
 int ssi_fips_init(struct ssi_drvdata *p_drvdata)
 {
-	ssi_fips_error_t rc = CC_REE_FIPS_ERROR_OK;
+	enum cc_fips_error rc = CC_REE_FIPS_ERROR_OK;
 	struct ssi_fips_handle *fips_h;
 
 	FIPS_DBG("CC FIPS code ..  (fips=%d) \n", ssi_fips_support);
 
-	fips_h = kzalloc(sizeof(struct ssi_fips_handle),GFP_KERNEL);
-	if (fips_h == NULL) {
+	fips_h = kzalloc(sizeof(struct ssi_fips_handle), GFP_KERNEL);
+	if (!fips_h) {
 		ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
 		return -ENOMEM;
 	}
@@ -313,7 +301,7 @@ int ssi_fips_init(struct ssi_drvdata *p_drvdata)
 #ifdef COMP_IN_WQ
 	SSI_LOG_DEBUG("Initializing fips workqueue\n");
 	fips_h->workq = create_singlethread_workqueue("arm_cc7x_fips_wq");
-	if (unlikely(fips_h->workq == NULL)) {
+	if (unlikely(!fips_h->workq)) {
 		SSI_LOG_ERR("Failed creating fips work queue\n");
 		ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
 		rc = -ENOMEM;
@@ -326,7 +314,7 @@ int ssi_fips_init(struct ssi_drvdata *p_drvdata)
 #endif
 
 	/* init fips driver data */
-	rc = ssi_fips_set_state((ssi_fips_support == 0)? CC_FIPS_STATE_NOT_SUPPORTED : CC_FIPS_STATE_SUPPORTED);
+	rc = ssi_fips_set_state((ssi_fips_support == 0) ? CC_FIPS_STATE_NOT_SUPPORTED : CC_FIPS_STATE_SUPPORTED);
 	if (unlikely(rc != 0)) {
 		ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
 		rc = -EAGAIN;
diff --git a/drivers/staging/ccree/ssi_fips_local.h b/drivers/staging/ccree/ssi_fips_local.h
index 65997c1..8c7994f 100644
--- a/drivers/staging/ccree/ssi_fips_local.h
+++ b/drivers/staging/ccree/ssi_fips_local.h
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -17,32 +17,23 @@
 #ifndef __SSI_FIPS_LOCAL_H__
 #define __SSI_FIPS_LOCAL_H__
 
-
 #ifdef CONFIG_CCX7REE_FIPS_SUPPORT
 
 #include "ssi_fips.h"
 struct ssi_drvdata;
 
-// IG - how to make 1 file for TEE and REE
-typedef enum CC_FipsSyncStatus{
-	CC_FIPS_SYNC_MODULE_OK 		= 0x0,
-	CC_FIPS_SYNC_MODULE_ERROR 	= 0x1,
-	CC_FIPS_SYNC_REE_STATUS 	= 0x4,
-	CC_FIPS_SYNC_TEE_STATUS 	= 0x8,
-	CC_FIPS_SYNC_STATUS_RESERVE32B 	= INT32_MAX
-}CCFipsSyncStatus_t;
-
-
 #define CHECK_AND_RETURN_UPON_FIPS_ERROR() {\
-        if (ssi_fips_check_fips_error() != 0) {\
-                return -ENOEXEC;\
-        }\
+	if (ssi_fips_check_fips_error() != 0) {\
+		return -ENOEXEC;\
+	} \
 }
+
 #define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR() {\
-        if (ssi_fips_check_fips_error() != 0) {\
-                return;\
-        }\
+	if (ssi_fips_check_fips_error() != 0) {\
+		return;\
+	} \
 }
+
 #define SSI_FIPS_INIT(p_drvData)  (ssi_fips_init(p_drvData))
 #define SSI_FIPS_FINI(p_drvData)  (ssi_fips_fini(p_drvData))
 
@@ -53,7 +44,7 @@ typedef enum CC_FipsSyncStatus{
 int ssi_fips_init(struct ssi_drvdata *p_drvdata);
 void ssi_fips_fini(struct ssi_drvdata *drvdata);
 int ssi_fips_check_fips_error(void);
-int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, ssi_fips_error_t err);
+int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, enum cc_fips_error err);
 void fips_handler(struct ssi_drvdata *drvdata);
 
 #else  /* CONFIG_CC7XXREE_FIPS_SUPPORT */
@@ -72,6 +63,5 @@ void fips_handler(struct ssi_drvdata *drvdata);
 
 #endif  /* CONFIG_CC7XXREE_FIPS_SUPPORT */
 
-
 #endif  /*__SSI_FIPS_LOCAL_H__*/
 
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index f99d421..ae8f36a 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -42,64 +42,61 @@ struct ssi_hash_handle {
 	struct completion init_comp;
 };
 
-static const uint32_t digest_len_init[] = {
+static const u32 digest_len_init[] = {
 	0x00000040, 0x00000000, 0x00000000, 0x00000000 };
-static const uint32_t md5_init[] = { 
+static const u32 md5_init[] = {
 	SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const uint32_t sha1_init[] = { 
+static const u32 sha1_init[] = {
 	SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
-static const uint32_t sha224_init[] = { 
+static const u32 sha224_init[] = {
 	SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
 	SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
-static const uint32_t sha256_init[] = {
+static const u32 sha256_init[] = {
 	SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
 	SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
 #if (DX_DEV_SHA_MAX > 256)
-static const uint32_t digest_len_sha512_init[] = { 
+static const u32 digest_len_sha512_init[] = {
 	0x00000080, 0x00000000, 0x00000000, 0x00000000 };
-static const uint64_t sha384_init[] = {
+static const u64 sha384_init[] = {
 	SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
 	SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
-static const uint64_t sha512_init[] = {
+static const u64 sha512_init[] = {
 	SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
 	SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
 #endif
 
 static void ssi_hash_create_xcbc_setup(
-	struct ahash_request *areq, 
-	HwDesc_s desc[],
+	struct ahash_request *areq,
+	struct cc_hw_desc desc[],
 	unsigned int *seq_size);
 
-static void ssi_hash_create_cmac_setup(struct ahash_request *areq, 
-				  HwDesc_s desc[],
+static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
+				  struct cc_hw_desc desc[],
 				  unsigned int *seq_size);
 
 struct ssi_hash_alg {
 	struct list_head entry;
-	bool synchronize;
 	int hash_mode;
 	int hw_mode;
 	int inter_digestsize;
 	struct ssi_drvdata *drvdata;
-	union {
-		struct ahash_alg ahash_alg;
-		struct shash_alg shash_alg;
-	};
+	struct ahash_alg ahash_alg;
 };
 
-
 struct hash_key_req_ctx {
-	uint32_t keylen;
+	u32 keylen;
 	dma_addr_t key_dma_addr;
 };
 
 /* hash per-session context */
 struct ssi_hash_ctx {
 	struct ssi_drvdata *drvdata;
-	/* holds the origin digest; the digest after "setkey" if HMAC,* 
-	   the initial digest if HASH. */
-	uint8_t digest_buff[SSI_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
-	uint8_t opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE]  ____cacheline_aligned;
+	/* holds the origin digest; the digest after "setkey" if HMAC,*
+	 * the initial digest if HASH.
+	 */
+	u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
+	u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE]  ____cacheline_aligned;
+
 	dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
 	dma_addr_t digest_buff_dma_addr;
 	/* use for hmac with key large then mode block size */
@@ -111,31 +108,29 @@ struct ssi_hash_ctx {
 	bool is_hmac;
 };
 
-static const struct crypto_type crypto_shash_type;
-
 static void ssi_hash_create_data_desc(
 	struct ahash_req_ctx *areq_ctx,
-	struct ssi_hash_ctx *ctx, 
-	unsigned int flow_mode,HwDesc_s desc[],
+	struct ssi_hash_ctx *ctx,
+	unsigned int flow_mode, struct cc_hw_desc desc[],
 	bool is_not_last_data,
 	unsigned int *seq_size);
 
-static inline void ssi_set_hash_endianity(uint32_t mode, HwDesc_s *desc)
+static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
 {
 	if (unlikely((mode == DRV_HASH_MD5) ||
 		(mode == DRV_HASH_SHA384) ||
 		(mode == DRV_HASH_SHA512))) {
-		HW_DESC_SET_BYTES_SWAP(desc, 1);
+		set_bytes_swap(desc, 1);
 	} else {
-		HW_DESC_SET_CIPHER_CONFIG0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+		set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 	}
 }
 
-static int ssi_hash_map_result(struct device *dev, 
-			       struct ahash_req_ctx *state, 
+static int ssi_hash_map_result(struct device *dev,
+			       struct ahash_req_ctx *state,
 			       unsigned int digestsize)
 {
-	state->digest_result_dma_addr = 
+	state->digest_result_dma_addr =
 		dma_map_single(dev, (void *)state->digest_result_buff,
 			       digestsize,
 			       DMA_BIDIRECTIONAL);
@@ -144,8 +139,6 @@ static int ssi_hash_map_result(struct device *dev,
 			digestsize);
 		return -ENOMEM;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr,
-						digestsize);
 	SSI_LOG_DEBUG("Mapped digest result buffer %u B "
 		     "at va=%pK to dma=0x%llX\n",
 		digestsize, state->digest_result_buff,
@@ -154,33 +147,33 @@ static int ssi_hash_map_result(struct device *dev,
 	return 0;
 }
 
-static int ssi_hash_map_request(struct device *dev, 
-				struct ahash_req_ctx *state, 
+static int ssi_hash_map_request(struct device *dev,
+				struct ahash_req_ctx *state,
 				struct ssi_hash_ctx *ctx)
 {
 	bool is_hmac = ctx->is_hmac;
 	ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
 					ctx->drvdata, ctx->hash_mode);
 	struct ssi_crypto_req ssi_req = {};
-	HwDesc_s desc;
+	struct cc_hw_desc desc;
 	int rc = -ENOMEM;
 
-	state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA);
+	state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
 	if (!state->buff0) {
 		SSI_LOG_ERR("Allocating buff0 in context failed\n");
 		goto fail0;
 	}
-	state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA);
+	state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
 	if (!state->buff1) {
 		SSI_LOG_ERR("Allocating buff1 in context failed\n");
 		goto fail_buff0;
 	}
-	state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE ,GFP_KERNEL|GFP_DMA);
+	state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
 	if (!state->digest_result_buff) {
 		SSI_LOG_ERR("Allocating digest_result_buff in context failed\n");
 		goto fail_buff1;
 	}
-	state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA);
+	state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
 	if (!state->digest_buff) {
 		SSI_LOG_ERR("Allocating digest-buffer in context failed\n");
 		goto fail_digest_result_buff;
@@ -188,7 +181,7 @@ static int ssi_hash_map_request(struct device *dev,
 
 	SSI_LOG_DEBUG("Allocated digest-buffer in context ctx->digest_buff=@%p\n", state->digest_buff);
 	if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
-		state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL|GFP_DMA);
+		state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
 		if (!state->digest_bytes_len) {
 			SSI_LOG_ERR("Allocating digest-bytes-len in context failed\n");
 			goto fail1;
@@ -198,7 +191,7 @@ static int ssi_hash_map_request(struct device *dev,
 		state->digest_bytes_len = NULL;
 	}
 
-	state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA);
+	state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
 	if (!state->opad_digest_buff) {
 		SSI_LOG_ERR("Allocating opad-digest-buffer in context failed\n");
 		goto fail2;
@@ -211,50 +204,40 @@ static int ssi_hash_map_request(struct device *dev,
 		ctx->inter_digestsize, state->digest_buff);
 		goto fail3;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr, 
-							ctx->inter_digestsize);
 	SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=0x%llX\n",
 		ctx->inter_digestsize, state->digest_buff,
 		(unsigned long long)state->digest_buff_dma_addr);
 
 	if (is_hmac) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr);
 		dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr, 
-							ctx->inter_digestsize);
 		if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
 			memset(state->digest_buff, 0, ctx->inter_digestsize);
 		} else { /*sha*/
 			memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
 #if (DX_DEV_SHA_MAX > 256)
-			if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384))) {
+			if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
 				memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
-			} else {
+			else
 				memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
-			}
 #else
 			memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
 #endif
 		}
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
 		dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr, 
-							ctx->inter_digestsize);
 
 		if (ctx->hash_mode != DRV_HASH_NULL) {
-			SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
 			dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 			memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
-			SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, 
-							ctx->inter_digestsize);
-		} 
+		}
 	} else { /*hash*/
 		/* Copy the initial digests if hash flow. The SRAM contains the
-		initial digests in the expected order for all SHA* */
-		HW_DESC_INIT(&desc);
-		HW_DESC_SET_DIN_SRAM(&desc, larval_digest_addr, ctx->inter_digestsize);
-		HW_DESC_SET_DOUT_DLLI(&desc, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0);
-		HW_DESC_SET_FLOW_MODE(&desc, BYPASS);
+		 * initial digests in the expected order for all SHA*
+		 */
+		hw_desc_init(&desc);
+		set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
+		set_dout_dlli(&desc, state->digest_buff_dma_addr,
+			      ctx->inter_digestsize, NS_BIT, 0);
+		set_flow_mode(&desc, BYPASS);
 
 		rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
 		if (unlikely(rc != 0)) {
@@ -270,8 +253,6 @@ static int ssi_hash_map_request(struct device *dev,
 			HASH_LEN_SIZE, state->digest_bytes_len);
 			goto fail4;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr,
-								HASH_LEN_SIZE);
 		SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=0x%llX\n",
 			HASH_LEN_SIZE, state->digest_bytes_len,
 			(unsigned long long)state->digest_bytes_len_dma_addr);
@@ -286,8 +267,6 @@ static int ssi_hash_map_request(struct device *dev,
 			ctx->inter_digestsize, state->opad_digest_buff);
 			goto fail5;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr,
-							ctx->inter_digestsize);
 		SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=0x%llX\n",
 			ctx->inter_digestsize, state->opad_digest_buff,
 			(unsigned long long)state->opad_digest_dma_addr);
@@ -303,13 +282,11 @@ static int ssi_hash_map_request(struct device *dev,
 
 fail5:
 	if (state->digest_bytes_len_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr);
 		dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
 		state->digest_bytes_len_dma_addr = 0;
 	}
 fail4:
 	if (state->digest_buff_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
 		dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 		state->digest_buff_dma_addr = 0;
 	}
@@ -320,17 +297,17 @@ static int ssi_hash_map_request(struct device *dev,
 fail1:
 	 kfree(state->digest_buff);
 fail_digest_result_buff:
-	 if (state->digest_result_buff != NULL) {
+	 if (state->digest_result_buff) {
 		 kfree(state->digest_result_buff);
 	     state->digest_result_buff = NULL;
 	 }
 fail_buff1:
-	 if (state->buff1 != NULL) {
+	 if (state->buff1) {
 		 kfree(state->buff1);
 	     state->buff1 = NULL;
 	 }
 fail_buff0:
-	 if (state->buff0 != NULL) {
+	 if (state->buff0) {
 		 kfree(state->buff0);
 	     state->buff0 = NULL;
 	 }
@@ -338,12 +315,11 @@ static int ssi_hash_map_request(struct device *dev,
 	return rc;
 }
 
-static void ssi_hash_unmap_request(struct device *dev, 
-				   struct ahash_req_ctx *state, 
+static void ssi_hash_unmap_request(struct device *dev,
+				   struct ahash_req_ctx *state,
 				   struct ssi_hash_ctx *ctx)
 {
 	if (state->digest_buff_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
 		dma_unmap_single(dev, state->digest_buff_dma_addr,
 				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 		SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=0x%llX\n",
@@ -351,7 +327,6 @@ static void ssi_hash_unmap_request(struct device *dev,
 		state->digest_buff_dma_addr = 0;
 	}
 	if (state->digest_bytes_len_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr);
 		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
 				 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
 		SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=0x%llX\n",
@@ -359,7 +334,6 @@ static void ssi_hash_unmap_request(struct device *dev,
 		state->digest_bytes_len_dma_addr = 0;
 	}
 	if (state->opad_digest_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr);
 		dma_unmap_single(dev, state->opad_digest_dma_addr,
 				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 		SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=0x%llX\n",
@@ -375,19 +349,18 @@ static void ssi_hash_unmap_request(struct device *dev,
 	kfree(state->buff0);
 }
 
-static void ssi_hash_unmap_result(struct device *dev, 
-				  struct ahash_req_ctx *state, 
+static void ssi_hash_unmap_result(struct device *dev,
+				  struct ahash_req_ctx *state,
 				  unsigned int digestsize, u8 *result)
 {
 	if (state->digest_result_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr);
 		dma_unmap_single(dev,
 				 state->digest_result_dma_addr,
 				 digestsize,
-				  DMA_BIDIRECTIONAL);	
+				  DMA_BIDIRECTIONAL);
 		SSI_LOG_DEBUG("unmpa digest result buffer "
 			     "va (%pK) pa (%llx) len %u\n",
-			     state->digest_result_buff, 
+			     state->digest_result_buff,
 			     (unsigned long long)state->digest_result_dma_addr,
 			     digestsize);
 		memcpy(result,
@@ -414,8 +387,8 @@ static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __i
 	struct ahash_req_ctx *state = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-	uint32_t digestsize = crypto_ahash_digestsize(tfm);
-	
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+
 	SSI_LOG_DEBUG("req=%pK\n", req);
 
 	ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
@@ -430,8 +403,8 @@ static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *c
 	struct ahash_req_ctx *state = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-	uint32_t digestsize = crypto_ahash_digestsize(tfm);
-	
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+
 	SSI_LOG_DEBUG("req=%pK\n", req);
 
 	ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
@@ -440,24 +413,23 @@ static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *c
 	req->base.complete(&req->base, 0);
 }
 
-static int ssi_hash_digest(struct ahash_req_ctx *state, 
-			   struct ssi_hash_ctx *ctx, 
-			   unsigned int digestsize, 
-			   struct scatterlist *src, 
-			   unsigned int nbytes, u8 *result, 
+static int ssi_hash_digest(struct ahash_req_ctx *state,
+			   struct ssi_hash_ctx *ctx,
+			   unsigned int digestsize,
+			   struct scatterlist *src,
+			   unsigned int nbytes, u8 *result,
 			   void *async_req)
 {
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
 	bool is_hmac = ctx->is_hmac;
 	struct ssi_crypto_req ssi_req = {};
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
 	ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
 					ctx->drvdata, ctx->hash_mode);
 	int idx = 0;
 	int rc = 0;
 
-
-	SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
+	SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 
@@ -480,102 +452,109 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
 		/* Setup DX request structure */
 		ssi_req.user_cb = (void *)ssi_hash_digest_complete;
 		ssi_req.user_arg = (void *)async_req;
-#ifdef ENABLE_CYCLE_COUNT
-		ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
-#endif
 	}
 
 	/* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
 	if (is_hmac) {
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
+		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+			     ctx->inter_digestsize, NS_BIT);
 	} else {
-		HW_DESC_SET_DIN_SRAM(&desc[idx], larval_digest_addr, ctx->inter_digestsize);
+		set_din_sram(&desc[idx], larval_digest_addr,
+			     ctx->inter_digestsize);
 	}
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 
 	/* Load the hash current length */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
 
 	if (is_hmac) {
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
+			     NS_BIT);
 	} else {
-		HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
-		if (likely(nbytes != 0)) {
-			HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-		} else {
-			HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
-		}
+		set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+		if (likely(nbytes != 0))
+			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+		else
+			set_cipher_do(&desc[idx], DO_PAD);
 	}
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
 
 	if (is_hmac) {
 		/* HW last hash block padding (aka. "DO_PAD") */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
-		HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+			      HASH_LEN_SIZE, NS_BIT, 0);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+		set_cipher_do(&desc[idx], DO_PAD);
 		idx++;
 
 		/* store the hash digest result in the context */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+			      digestsize, NS_BIT, 0);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 		ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 		idx++;
 
 		/* Loading hash opad xor key state */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+			     ctx->inter_digestsize, NS_BIT);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 		idx++;
 
 		/* Load the hash current length */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
-		HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_sram(&desc[idx],
+			     ssi_ahash_get_initial_digest_len_sram_addr(
+ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
+		set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 		idx++;
 
 		/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-		HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+		hw_desc_init(&desc[idx]);
+		set_din_no_dma(&desc[idx], 0, 0xfffff0);
+		set_dout_no_dma(&desc[idx], 0, 0, 1);
 		idx++;
 
 		/* Perform HASH update */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+			     digestsize, NS_BIT);
+		set_flow_mode(&desc[idx], DIN_HASH);
 		idx++;
 	}
 
 	/* Get final MAC result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0);   /*TODO*/
-	if (async_req) {
-		HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	}
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	/* TODO */
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+		      NS_BIT, (async_req ? 1 : 0));
+	if (async_req)
+		set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 	ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
 	idx++;
 
@@ -593,7 +572,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
 			SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
 			ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
 		} else {
-			ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);			
+			ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
 		}
 		ssi_hash_unmap_result(dev, state, digestsize, result);
 		ssi_hash_unmap_request(dev, state, ctx);
@@ -601,21 +580,21 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
 	return rc;
 }
 
-static int ssi_hash_update(struct ahash_req_ctx *state, 
-			   struct ssi_hash_ctx *ctx, 
-			   unsigned int block_size, 
-			   struct scatterlist *src, 
-			   unsigned int nbytes, 
+static int ssi_hash_update(struct ahash_req_ctx *state,
+			   struct ssi_hash_ctx *ctx,
+			   unsigned int block_size,
+			   struct scatterlist *src,
+			   unsigned int nbytes,
 			   void *async_req)
 {
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
 	struct ssi_crypto_req ssi_req = {};
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
-	uint32_t idx = 0;
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+	u32 idx = 0;
 	int rc;
 
 	SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
-					"hmac":"hash", nbytes);
+					"hmac" : "hash", nbytes);
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 	if (nbytes == 0) {
@@ -638,45 +617,45 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
 		/* Setup DX request structure */
 		ssi_req.user_cb = (void *)ssi_hash_update_complete;
 		ssi_req.user_arg = async_req;
-#ifdef ENABLE_CYCLE_COUNT
-		ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
-#endif
 	}
 
 	/* Restore hash digest */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+		     ctx->inter_digestsize, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 	/* Restore hash current length */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+		     HASH_LEN_SIZE, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
 
 	/* store the hash digest result in context */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+		      ctx->inter_digestsize, NS_BIT, 0);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 	idx++;
 
 	/* store current hash length in context */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, async_req? 1:0);
-	if (async_req) {
-		HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	}
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+		      HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
+	if (async_req)
+		set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 	idx++;
 
 	if (async_req) {
@@ -697,26 +676,26 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
 	return rc;
 }
 
-static int ssi_hash_finup(struct ahash_req_ctx *state, 
-			  struct ssi_hash_ctx *ctx, 
-			  unsigned int digestsize, 
-			  struct scatterlist *src, 
-			  unsigned int nbytes, 
-			  u8 *result, 
+static int ssi_hash_finup(struct ahash_req_ctx *state,
+			  struct ssi_hash_ctx *ctx,
+			  unsigned int digestsize,
+			  struct scatterlist *src,
+			  unsigned int nbytes,
+			  u8 *result,
 			  void *async_req)
 {
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
 	bool is_hmac = ctx->is_hmac;
 	struct ssi_crypto_req ssi_req = {};
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
 	int idx = 0;
 	int rc;
 
-	SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
+	SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 
-	if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src , nbytes, 1) != 0)) {
+	if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
 		SSI_LOG_ERR("map_ahash_request_final() failed\n");
 		return -ENOMEM;
 	}
@@ -729,81 +708,86 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
 		/* Setup DX request structure */
 		ssi_req.user_cb = (void *)ssi_hash_complete;
 		ssi_req.user_arg = async_req;
-#ifdef ENABLE_CYCLE_COUNT
-		ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
-#endif
 	}
 
 	/* Restore hash digest */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+		     ctx->inter_digestsize, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 
 	/* Restore hash current length */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+		     HASH_LEN_SIZE, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
 
 	if (is_hmac) {
 		/* Store the hash digest result in the context */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
-		ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+			      digestsize, NS_BIT, 0);
+		ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 		idx++;
 
 		/* Loading hash OPAD xor key state */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+			     ctx->inter_digestsize, NS_BIT);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 		idx++;
 
 		/* Load the hash current length */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
-		HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_sram(&desc[idx],
+			     ssi_ahash_get_initial_digest_len_sram_addr(
+ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
+		set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 		idx++;
 
 		/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-		HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+		hw_desc_init(&desc[idx]);
+		set_din_no_dma(&desc[idx], 0, 0xfffff0);
+		set_dout_no_dma(&desc[idx], 0, 0, 1);
 		idx++;
 
 		/* Perform HASH update on last digest */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+			     digestsize, NS_BIT);
+		set_flow_mode(&desc[idx], DIN_HASH);
 		idx++;
 	}
 
 	/* Get final MAC result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); /*TODO*/
-	if (async_req) {
-		HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	}
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
+	hw_desc_init(&desc[idx]);
+	/* TODO */
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+		      NS_BIT, (async_req ? 1 : 0));
+	if (async_req)
+		set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
 	idx++;
 
 	if (async_req) {
@@ -828,22 +812,22 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
 	return rc;
 }
 
-static int ssi_hash_final(struct ahash_req_ctx *state, 
-			  struct ssi_hash_ctx *ctx, 
-			  unsigned int digestsize, 
-			  struct scatterlist *src, 
-			  unsigned int nbytes, 
-			  u8 *result, 
+static int ssi_hash_final(struct ahash_req_ctx *state,
+			  struct ssi_hash_ctx *ctx,
+			  unsigned int digestsize,
+			  struct scatterlist *src,
+			  unsigned int nbytes,
+			  u8 *result,
 			  void *async_req)
 {
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
 	bool is_hmac = ctx->is_hmac;
 	struct ssi_crypto_req ssi_req = {};
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
 	int idx = 0;
 	int rc;
 
-	SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
+	SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 
@@ -861,90 +845,95 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
 		/* Setup DX request structure */
 		ssi_req.user_cb = (void *)ssi_hash_complete;
 		ssi_req.user_arg = async_req;
-#ifdef ENABLE_CYCLE_COUNT
-		ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
-#endif
 	}
 
 	/* Restore hash digest */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+		     ctx->inter_digestsize, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 	idx++;
 
 	/* Restore hash current length */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
+		     HASH_LEN_SIZE, NS_BIT);
+	set_flow_mode(&desc[idx], S_DIN_to_HASH);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
 	ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
 
 	/* "DO-PAD" must be enabled only when writing current length to HW */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+	hw_desc_init(&desc[idx]);
+	set_cipher_do(&desc[idx], DO_PAD);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
+		      HASH_LEN_SIZE, NS_BIT, 0);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 	idx++;
 
 	if (is_hmac) {
 		/* Store the hash digest result in the context */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
-		ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+			      digestsize, NS_BIT, 0);
+		ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 		idx++;
 
 		/* Loading hash OPAD xor key state */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
+			     ctx->inter_digestsize, NS_BIT);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 		idx++;
 
 		/* Load the hash current length */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
-		HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_sram(&desc[idx],
+			     ssi_ahash_get_initial_digest_len_sram_addr(
+ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
+		set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 		idx++;
 
 		/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-		HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+		hw_desc_init(&desc[idx]);
+		set_din_no_dma(&desc[idx], 0, 0xfffff0);
+		set_dout_no_dma(&desc[idx], 0, 0, 1);
 		idx++;
 
 		/* Perform HASH update on last digest */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+			     digestsize, NS_BIT);
+		set_flow_mode(&desc[idx], DIN_HASH);
 		idx++;
 	}
 
 	/* Get final MAC result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0);
-	if (async_req) {
-		HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	}
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-	HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+	hw_desc_init(&desc[idx]);
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
+		      NS_BIT, (async_req ? 1 : 0));
+	if (async_req)
+		set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
 	idx++;
 
 	if (async_req) {
@@ -972,7 +961,8 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
 static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
 {
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
-	state->xcbc_count = 0;	
+
+	state->xcbc_count = 0;
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 	ssi_hash_map_request(dev, state, ctx);
@@ -980,25 +970,9 @@ static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
 	return 0;
 }
 
-#ifdef EXPORT_FIXED
-static int ssi_hash_export(struct ssi_hash_ctx *ctx, void *out)
-{
-	CHECK_AND_RETURN_UPON_FIPS_ERROR();
-	memcpy(out, ctx, sizeof(struct ssi_hash_ctx));
-	return 0;
-}
-
-static int ssi_hash_import(struct ssi_hash_ctx *ctx, const void *in)
-{
-	CHECK_AND_RETURN_UPON_FIPS_ERROR();
-	memcpy(ctx, in, sizeof(struct ssi_hash_ctx));
-	return 0;
-}
-#endif
-
 static int ssi_hash_setkey(void *hash,
-			   const u8 *key, 
-			   unsigned int keylen, 
+			   const u8 *key,
+			   unsigned int keylen,
 			   bool synchronize)
 {
 	unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
@@ -1007,27 +981,22 @@ static int ssi_hash_setkey(void *hash,
 	int blocksize = 0;
 	int digestsize = 0;
 	int i, idx = 0, rc = 0;
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
 	ssi_sram_addr_t larval_addr;
 
 	 SSI_LOG_DEBUG("ssi_hash_setkey: start keylen: %d", keylen);
-	
+
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
-	if (synchronize) {
-		ctx = crypto_shash_ctx(((struct crypto_shash *)hash));
-		blocksize = crypto_tfm_alg_blocksize(&((struct crypto_shash *)hash)->base);
-		digestsize = crypto_shash_digestsize(((struct crypto_shash *)hash));
-	} else {
-		ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
-		blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
-		digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
-	}
-	
+	ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
+	blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
+	digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
+
 	larval_addr = ssi_ahash_get_larval_digest_sram_addr(
 					ctx->drvdata, ctx->hash_mode);
 
 	/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
-	   any NON-ZERO value utilizes HMAC flow */
+	 * any NON-ZERO value utilizes HMAC flow
+	 */
 	ctx->key_params.keylen = keylen;
 	ctx->key_params.key_dma_addr = 0;
 	ctx->is_hmac = true;
@@ -1043,7 +1012,6 @@ static int ssi_hash_setkey(void *hash,
 				   " DMA failed\n", key, keylen);
 			return -ENOMEM;
 		}
-		SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen);
 		SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
 			     "keylen=%u\n",
 			     (unsigned long long)ctx->key_params.key_dma_addr,
@@ -1051,79 +1019,76 @@ static int ssi_hash_setkey(void *hash,
 
 		if (keylen > blocksize) {
 			/* Load hash initial state */
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-			HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr,
-					ctx->inter_digestsize);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-			HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], ctx->hw_mode);
+			set_din_sram(&desc[idx], larval_addr,
+				     ctx->inter_digestsize);
+			set_flow_mode(&desc[idx], S_DIN_to_HASH);
+			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 			idx++;
-	
+
 			/* Load the hash current length*/
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-			HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
-			HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-			HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], ctx->hw_mode);
+			set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
+			set_flow_mode(&desc[idx], S_DIN_to_HASH);
+			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 			idx++;
-	
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-					     ctx->key_params.key_dma_addr, 
-					     keylen, NS_BIT);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+
+			hw_desc_init(&desc[idx]);
+			set_din_type(&desc[idx], DMA_DLLI,
+				     ctx->key_params.key_dma_addr, keylen,
+				     NS_BIT);
+			set_flow_mode(&desc[idx], DIN_HASH);
 			idx++;
-	
+
 			/* Get hashed key */
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
-			HW_DESC_SET_DOUT_DLLI(&desc[idx], ctx->opad_tmp_keys_dma_addr,
-					      digestsize, NS_BIT, 0);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-			HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-			HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
-			ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
+			hw_desc_init(&desc[idx]);
+			set_cipher_mode(&desc[idx], ctx->hw_mode);
+			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+				      digestsize, NS_BIT, 0);
+			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
+			ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
 			idx++;
-	
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
-			HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-			HW_DESC_SET_DOUT_DLLI(&desc[idx], 
-					      (ctx->opad_tmp_keys_dma_addr + digestsize),
-					      (blocksize - digestsize),
-					      NS_BIT, 0);
+
+			hw_desc_init(&desc[idx]);
+			set_din_const(&desc[idx], 0, (blocksize - digestsize));
+			set_flow_mode(&desc[idx], BYPASS);
+			set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
+						   digestsize),
+				      (blocksize - digestsize), NS_BIT, 0);
 			idx++;
 		} else {
-			HW_DESC_INIT(&desc[idx]);
-			HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-					     ctx->key_params.key_dma_addr, 
-					     keylen, NS_BIT);
-			HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-			HW_DESC_SET_DOUT_DLLI(&desc[idx],
-					(ctx->opad_tmp_keys_dma_addr),
-					keylen, NS_BIT, 0);
+			hw_desc_init(&desc[idx]);
+			set_din_type(&desc[idx], DMA_DLLI,
+				     ctx->key_params.key_dma_addr, keylen,
+				     NS_BIT);
+			set_flow_mode(&desc[idx], BYPASS);
+			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+				      keylen, NS_BIT, 0);
 			idx++;
 
 			if ((blocksize - keylen) != 0) {
-				HW_DESC_INIT(&desc[idx]);
-				HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - keylen));
-				HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-				HW_DESC_SET_DOUT_DLLI(&desc[idx], 
-						      (ctx->opad_tmp_keys_dma_addr + keylen),
-						      (blocksize - keylen),
-						      NS_BIT, 0);
+				hw_desc_init(&desc[idx]);
+				set_din_const(&desc[idx], 0,
+					      (blocksize - keylen));
+				set_flow_mode(&desc[idx], BYPASS);
+				set_dout_dlli(&desc[idx],
+					      (ctx->opad_tmp_keys_dma_addr +
+					       keylen), (blocksize - keylen),
+					      NS_BIT, 0);
 				idx++;
 			}
 		}
 	} else {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_CONST(&desc[idx], 0, blocksize);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], 
-				      (ctx->opad_tmp_keys_dma_addr),
-				      blocksize,
-				      NS_BIT, 0);
+		hw_desc_init(&desc[idx]);
+		set_din_const(&desc[idx], 0, blocksize);
+		set_flow_mode(&desc[idx], BYPASS);
+		set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
+			      blocksize, NS_BIT, 0);
 		idx++;
 	}
 
@@ -1136,71 +1101,59 @@ static int ssi_hash_setkey(void *hash,
 	/* calc derived HMAC key */
 	for (idx = 0, i = 0; i < 2; i++) {
 		/* Load hash initial state */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr,
-				ctx->inter_digestsize);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 		idx++;
 
 		/* Load the hash current length*/
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 		idx++;
 
 		/* Prepare ipad key */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+		hw_desc_init(&desc[idx]);
+		set_xor_val(&desc[idx], hmacPadConst[i]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_flow_mode(&desc[idx], S_DIN_to_HASH);
+		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 		idx++;
 
 		/* Perform HASH update */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
-				     ctx->opad_tmp_keys_dma_addr,
-				     blocksize, NS_BIT);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx],ctx->hw_mode);
-		HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+			     blocksize, NS_BIT);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_xor_active(&desc[idx]);
+		set_flow_mode(&desc[idx], DIN_HASH);
 		idx++;
 
 		/* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
 		if (i > 0) /* Not first iteration */
-			HW_DESC_SET_DOUT_DLLI(&desc[idx],
-					      ctx->opad_tmp_keys_dma_addr,
-					      ctx->inter_digestsize,
-					      NS_BIT, 0);
+			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+				      ctx->inter_digestsize, NS_BIT, 0);
 		else /* First iteration */
-			HW_DESC_SET_DOUT_DLLI(&desc[idx],
-					      ctx->digest_buff_dma_addr,
-					      ctx->inter_digestsize,
-					      NS_BIT, 0);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+			set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
+				      ctx->inter_digestsize, NS_BIT, 0);
+		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
+		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 		idx++;
 	}
 
 	rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
 
 out:
-	if (rc != 0) {
-		if (synchronize) {
-			crypto_shash_set_flags((struct crypto_shash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		} else {
-			crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		}
-	}
+	if (rc)
+		crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
 
 	if (ctx->key_params.key_dma_addr) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr);
 		dma_unmap_single(&ctx->drvdata->plat_dev->dev,
 				ctx->key_params.key_dma_addr,
 				ctx->key_params.keylen, DMA_TO_DEVICE);
@@ -1211,14 +1164,13 @@ static int ssi_hash_setkey(void *hash,
 	return rc;
 }
 
-
 static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
 			const u8 *key, unsigned int keylen)
 {
 	struct ssi_crypto_req ssi_req = {};
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 	int idx = 0, rc = 0;
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
 
 	SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
@@ -1244,43 +1196,43 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
 			   " DMA failed\n", key, keylen);
 		return -ENOMEM;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen);
 	SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
 		     "keylen=%u\n",
 		     (unsigned long long)ctx->key_params.key_dma_addr,
 		     ctx->key_params.keylen);
-	
+
 	ctx->is_hmac = true;
 	/* 1. Load the AES key */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr, keylen, NS_BIT);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keylen);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
+		     keylen, NS_BIT);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+	set_key_size_aes(&desc[idx], keylen);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 	idx++;
 
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_CONST(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr + 
-					   XCBC_MAC_K1_OFFSET), 
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
+					   XCBC_MAC_K1_OFFSET),
 			      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 	idx++;
 
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_CONST(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr + 
-					   XCBC_MAC_K2_OFFSET), 
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
+					   XCBC_MAC_K2_OFFSET),
 			      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 	idx++;
 
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_CONST(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr + 
+	hw_desc_init(&desc[idx]);
+	set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], DIN_AES_DOUT);
+	set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
 					   XCBC_MAC_K3_OFFSET),
 			       CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 	idx++;
@@ -1290,7 +1242,6 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
 	if (rc != 0)
 		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
 
-	SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr);
 	dma_unmap_single(&ctx->drvdata->plat_dev->dev,
 			ctx->key_params.key_dma_addr,
 			ctx->key_params.keylen, DMA_TO_DEVICE);
@@ -1300,12 +1251,13 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
 
 	return rc;
 }
+
 #if SSI_CC_HAS_CMAC
 static int ssi_cmac_setkey(struct crypto_ahash *ahash,
 			const u8 *key, unsigned int keylen)
 {
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-	DECL_CYCLE_COUNT_RESOURCES;
+
 	SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 
@@ -1323,25 +1275,20 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
 	ctx->key_params.keylen = keylen;
 
 	/* STAT_PHASE_1: Copy key to ctx */
-	START_CYCLE_COUNT();
-	
-	SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
+
 	dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev,
-				ctx->opad_tmp_keys_dma_addr, 
+				ctx->opad_tmp_keys_dma_addr,
 				keylen, DMA_TO_DEVICE);
 
 	memcpy(ctx->opad_tmp_keys_buff, key, keylen);
 	if (keylen == 24)
 		memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
-	
+
 	dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev,
-				   ctx->opad_tmp_keys_dma_addr, 
+				   ctx->opad_tmp_keys_dma_addr,
 				   keylen, DMA_TO_DEVICE);
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, keylen);
-		
+
 	ctx->key_params.keylen = keylen;
-	
-	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
 
 	return 0;
 }
@@ -1352,7 +1299,6 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
 
 	if (ctx->digest_buff_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr);
 		dma_unmap_single(dev, ctx->digest_buff_dma_addr,
 				 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
 		SSI_LOG_DEBUG("Unmapped digest-buffer: "
@@ -1361,7 +1307,6 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
 		ctx->digest_buff_dma_addr = 0;
 	}
 	if (ctx->opad_tmp_keys_dma_addr != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
 		dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
 				 sizeof(ctx->opad_tmp_keys_buff),
 				 DMA_BIDIRECTIONAL);
@@ -1372,10 +1317,8 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
 	}
 
 	ctx->key_params.keylen = 0;
-
 }
 
-
 static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
 {
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
@@ -1388,8 +1331,6 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
 			sizeof(ctx->digest_buff), ctx->digest_buff);
 		goto fail;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr,
-						sizeof(ctx->digest_buff));
 	SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=0x%llX\n",
 		sizeof(ctx->digest_buff), ctx->digest_buff,
 		(unsigned long long)ctx->digest_buff_dma_addr);
@@ -1401,8 +1342,6 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
 			ctx->opad_tmp_keys_buff);
 		goto fail;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr,
-					sizeof(ctx->opad_tmp_keys_buff));
 	SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=0x%llX\n",
 		sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
 		(unsigned long long)ctx->opad_tmp_keys_dma_addr);
@@ -1415,34 +1354,16 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
 	return -ENOMEM;
 }
 
-static int ssi_shash_cra_init(struct crypto_tfm *tfm)
-{		
-	struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct shash_alg * shash_alg = 
-		container_of(tfm->__crt_alg, struct shash_alg, base);
-	struct ssi_hash_alg *ssi_alg =
-			container_of(shash_alg, struct ssi_hash_alg, shash_alg);
-        	
-	CHECK_AND_RETURN_UPON_FIPS_ERROR();
-	ctx->hash_mode = ssi_alg->hash_mode;
-	ctx->hw_mode = ssi_alg->hw_mode;
-	ctx->inter_digestsize = ssi_alg->inter_digestsize;
-	ctx->drvdata = ssi_alg->drvdata;
-
-	return ssi_hash_alloc_ctx(ctx);
-}
-
 static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
 {
 	struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct hash_alg_common * hash_alg_common = 
+	struct hash_alg_common *hash_alg_common =
 		container_of(tfm->__crt_alg, struct hash_alg_common, base);
-	struct ahash_alg *ahash_alg = 
+	struct ahash_alg *ahash_alg =
 		container_of(hash_alg_common, struct ahash_alg, halg);
 	struct ssi_hash_alg *ssi_alg =
 			container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
 
-
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 				sizeof(struct ahash_req_ctx));
@@ -1471,9 +1392,9 @@ static int ssi_mac_update(struct ahash_request *req)
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
 	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
 	struct ssi_crypto_req ssi_req = {};
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
 	int rc;
-	uint32_t idx = 0;
+	u32 idx = 0;
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 	if (req->nbytes == 0) {
@@ -1494,29 +1415,26 @@ static int ssi_mac_update(struct ahash_request *req)
 		return -ENOMEM;
 	}
 
-	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
 		ssi_hash_create_xcbc_setup(req, desc, &idx);
-	} else {
+	else
 		ssi_hash_create_cmac_setup(req, desc, &idx);
-	}
-	
+
 	ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
 
 	/* store the hash digest result in context */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 1);
-	HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+	hw_desc_init(&desc[idx]);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
+	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+		      ctx->inter_digestsize, NS_BIT, 1);
+	set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], S_AES_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 	idx++;
 
 	/* Setup DX request structure */
 	ssi_req.user_cb = (void *)ssi_hash_update_complete;
 	ssi_req.user_arg = (void *)req;
-#ifdef ENABLE_CYCLE_COUNT
-	ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
-#endif
 
 	rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
 	if (unlikely(rc != -EINPROGRESS)) {
@@ -1533,15 +1451,14 @@ static int ssi_mac_final(struct ahash_request *req)
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
 	struct ssi_crypto_req ssi_req = {};
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
 	int idx = 0;
 	int rc = 0;
-	uint32_t keySize, keyLen;
-	uint32_t digestsize = crypto_ahash_digestsize(tfm);
+	u32 keySize, keyLen;
+	u32 digestsize = crypto_ahash_digestsize(tfm);
 
-	uint32_t rem_cnt = state->buff_index ? state->buff1_cnt :
+	u32 rem_cnt = state->buff_index ? state->buff1_cnt :
 			state->buff0_cnt;
-	
 
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
@@ -1567,68 +1484,66 @@ static int ssi_mac_final(struct ahash_request *req)
 	/* Setup DX request structure */
 	ssi_req.user_cb = (void *)ssi_hash_complete;
 	ssi_req.user_arg = (void *)req;
-#ifdef ENABLE_CYCLE_COUNT
-	ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
-#endif
 
 	if (state->xcbc_count && (rem_cnt == 0)) {
 		/* Load key for ECB decryption */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
-		HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-				     (ctx->opad_tmp_keys_dma_addr + 
-				      XCBC_MAC_K1_OFFSET),
-				    keySize, NS_BIT);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
-		HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
+		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     (ctx->opad_tmp_keys_dma_addr +
+			      XCBC_MAC_K1_OFFSET), keySize, NS_BIT);
+		set_key_size_aes(&desc[idx], keyLen);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
+		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 		idx++;
 
-
 		/* Initiate decryption of block state to previous block_state-XOR-M[n] */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
-		HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,0);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+			     CC_AES_BLOCK_SIZE, NS_BIT);
+		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
+			      CC_AES_BLOCK_SIZE, NS_BIT, 0);
+		set_flow_mode(&desc[idx], DIN_AES_DOUT);
 		idx++;
 
 		/* Memory Barrier: wait for axi write to complete */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
-		HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+		hw_desc_init(&desc[idx]);
+		set_din_no_dma(&desc[idx], 0, 0xfffff0);
+		set_dout_no_dma(&desc[idx], 0, 0, 1);
 		idx++;
 	}
-	
-	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+
+	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
 		ssi_hash_create_xcbc_setup(req, desc, &idx);
-	} else {
+	else
 		ssi_hash_create_cmac_setup(req, desc, &idx);
-	}
 
 	if (state->xcbc_count == 0) {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
-		HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_key_size_aes(&desc[idx], keyLen);
+		set_cmac_size0_mode(&desc[idx]);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
 		idx++;
 	} else if (rem_cnt > 0) {
 		ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
 	} else {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_CONST(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+		hw_desc_init(&desc[idx]);
+		set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
+		set_flow_mode(&desc[idx], DIN_AES_DOUT);
 		idx++;
 	}
-	
+
 	/* Get final MAC result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
-	HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
+	hw_desc_init(&desc[idx]);
+	/* TODO */
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+		      digestsize, NS_BIT, 1);
+	set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], S_AES_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
 	idx++;
 
 	rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
@@ -1647,11 +1562,11 @@ static int ssi_mac_finup(struct ahash_request *req)
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
 	struct ssi_crypto_req ssi_req = {};
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
 	int idx = 0;
 	int rc = 0;
-	uint32_t key_len = 0;
-	uint32_t digestsize = crypto_ahash_digestsize(tfm);
+	u32 key_len = 0;
+	u32 digestsize = crypto_ahash_digestsize(tfm);
 
 	SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
@@ -1659,7 +1574,7 @@ static int ssi_mac_finup(struct ahash_request *req)
 		SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final \n");
 		return ssi_mac_final(req);
 	}
-	
+
 	if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
 		SSI_LOG_ERR("map_ahash_request_final() failed\n");
 		return -ENOMEM;
@@ -1672,9 +1587,6 @@ static int ssi_mac_finup(struct ahash_request *req)
 	/* Setup DX request structure */
 	ssi_req.user_cb = (void *)ssi_hash_complete;
 	ssi_req.user_arg = (void *)req;
-#ifdef ENABLE_CYCLE_COUNT
-	ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
-#endif
 
 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
 		key_len = CC_AES_128_BIT_KEY_SIZE;
@@ -1685,23 +1597,25 @@ static int ssi_mac_finup(struct ahash_request *req)
 	}
 
 	if (req->nbytes == 0) {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
-		HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_key_size_aes(&desc[idx], key_len);
+		set_cmac_size0_mode(&desc[idx]);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
 		idx++;
 	} else {
 		ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
 	}
-	
+
 	/* Get final MAC result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
-	HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
+	hw_desc_init(&desc[idx]);
+	/* TODO */
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+		      digestsize, NS_BIT, 1);
+	set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], S_AES_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
 	idx++;
 
 	rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
@@ -1719,16 +1633,16 @@ static int ssi_mac_digest(struct ahash_request *req)
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 	struct device *dev = &ctx->drvdata->plat_dev->dev;
-	uint32_t digestsize = crypto_ahash_digestsize(tfm);
+	u32 digestsize = crypto_ahash_digestsize(tfm);
 	struct ssi_crypto_req ssi_req = {};
-	HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
-	uint32_t keyLen;
+	struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
+	u32 keyLen;
 	int idx = 0;
 	int rc;
 
 	SSI_LOG_DEBUG("===== -digest mac (%d) ====\n",  req->nbytes);
 	CHECK_AND_RETURN_UPON_FIPS_ERROR();
-	
+
 	if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
 		SSI_LOG_ERR("map_ahash_source() failed\n");
 		return -ENOMEM;
@@ -1742,15 +1656,11 @@ static int ssi_mac_digest(struct ahash_request *req)
 		SSI_LOG_ERR("map_ahash_request_final() failed\n");
 		return -ENOMEM;
 	}
-	
+
 	/* Setup DX request structure */
 	ssi_req.user_cb = (void *)ssi_hash_digest_complete;
 	ssi_req.user_arg = (void *)req;
-#ifdef ENABLE_CYCLE_COUNT
-	ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
-#endif
 
-	
 	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
 		keyLen = CC_AES_128_BIT_KEY_SIZE;
 		ssi_hash_create_xcbc_setup(req, desc, &idx);
@@ -1760,24 +1670,25 @@ static int ssi_mac_digest(struct ahash_request *req)
 	}
 
 	if (req->nbytes == 0) {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
-		HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
-		HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+		hw_desc_init(&desc[idx]);
+		set_cipher_mode(&desc[idx], ctx->hw_mode);
+		set_key_size_aes(&desc[idx], keyLen);
+		set_cmac_size0_mode(&desc[idx]);
+		set_flow_mode(&desc[idx], S_DIN_to_AES);
 		idx++;
 	} else {
 		ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
 	}
-	
+
 	/* Get final MAC result */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,1);
-	HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 
+	hw_desc_init(&desc[idx]);
+	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
+		      CC_AES_BLOCK_SIZE, NS_BIT, 1);
+	set_queue_last_ind(&desc[idx]);
+	set_flow_mode(&desc[idx], S_AES_to_DOUT);
+	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_cipher_mode(&desc[idx], ctx->hw_mode);
 	idx++;
 
 	rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
@@ -1790,108 +1701,14 @@ static int ssi_mac_digest(struct ahash_request *req)
 	return rc;
 }
 
-//shash wrap functions
-#ifdef SYNC_ALGS
-static int ssi_shash_digest(struct shash_desc *desc, 
-			    const u8 *data, unsigned int len, u8 *out)
-{
-	struct ahash_req_ctx *state = shash_desc_ctx(desc);
-	struct crypto_shash *tfm = desc->tfm;
-	struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-	uint32_t digestsize = crypto_shash_digestsize(tfm);
-	struct scatterlist src;
-
-	if (len == 0) {
-		return ssi_hash_digest(state, ctx, digestsize, NULL, 0, out, NULL);
-	}
-	
-	/* sg_init_one may crash when len is 0 (depends on kernel configuration) */
-	sg_init_one(&src, (const void *)data, len);
-		
-	return ssi_hash_digest(state, ctx, digestsize, &src, len, out, NULL);
-}
-
-static int ssi_shash_update(struct shash_desc *desc, 
-						const u8 *data, unsigned int len)
-{
-	struct ahash_req_ctx *state = shash_desc_ctx(desc);
-	struct crypto_shash *tfm = desc->tfm;
-	struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-	uint32_t blocksize = crypto_tfm_alg_blocksize(&tfm->base);
-	struct scatterlist src;
-
-	sg_init_one(&src, (const void *)data, len);
-	
-	return ssi_hash_update(state, ctx, blocksize, &src, len, NULL);
-}
-
-static int ssi_shash_finup(struct shash_desc *desc, 
-			   const u8 *data, unsigned int len, u8 *out)
-{
-	struct ahash_req_ctx *state = shash_desc_ctx(desc);
-	struct crypto_shash *tfm = desc->tfm;
-	struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-	uint32_t digestsize = crypto_shash_digestsize(tfm);
-	struct scatterlist src;
-	
-	sg_init_one(&src, (const void *)data, len);
-	
-	return ssi_hash_finup(state, ctx, digestsize, &src, len, out, NULL);
-}
-
-static int ssi_shash_final(struct shash_desc *desc, u8 *out)
-{
-	struct ahash_req_ctx *state = shash_desc_ctx(desc);
-	struct crypto_shash *tfm = desc->tfm;
-	struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-	uint32_t digestsize = crypto_shash_digestsize(tfm);
-		
-	return ssi_hash_final(state, ctx, digestsize, NULL, 0, out, NULL);
-}
-
-static int ssi_shash_init(struct shash_desc *desc)
-{
-	struct ahash_req_ctx *state = shash_desc_ctx(desc);
-	struct crypto_shash *tfm = desc->tfm;
-	struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-
-	return ssi_hash_init(state, ctx);
-}
-
-#ifdef EXPORT_FIXED
-static int ssi_shash_export(struct shash_desc *desc, void *out)
-{
-	struct crypto_shash *tfm = desc->tfm;
-	struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-
-	return ssi_hash_export(ctx, out);
-}
-
-static int ssi_shash_import(struct shash_desc *desc, const void *in)
-{
-	struct crypto_shash *tfm = desc->tfm;
-	struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
-	
-	return ssi_hash_import(ctx, in);
-}
-#endif
-
-static int ssi_shash_setkey(struct crypto_shash *tfm, 
-			    const u8 *key, unsigned int keylen)
-{
-	return ssi_hash_setkey((void *) tfm, key, keylen, true);
-}
-
-#endif /* SYNC_ALGS */
-
 //ahash wrap functions
 static int ssi_ahash_digest(struct ahash_request *req)
 {
 	struct ahash_req_ctx *state = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-	uint32_t digestsize = crypto_ahash_digestsize(tfm);
-	
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+
 	return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
 }
 
@@ -1901,7 +1718,7 @@ static int ssi_ahash_update(struct ahash_request *req)
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
-	
+
 	return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
 }
 
@@ -1910,8 +1727,8 @@ static int ssi_ahash_finup(struct ahash_request *req)
 	struct ahash_req_ctx *state = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-	uint32_t digestsize = crypto_ahash_digestsize(tfm);
-	
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+
 	return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
 }
 
@@ -1920,8 +1737,8 @@ static int ssi_ahash_final(struct ahash_request *req)
 	struct ahash_req_ctx *state = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-	uint32_t digestsize = crypto_ahash_digestsize(tfm);
-	
+	u32 digestsize = crypto_ahash_digestsize(tfm);
+
 	return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
 }
 
@@ -1929,81 +1746,161 @@ static int ssi_ahash_init(struct ahash_request *req)
 {
 	struct ahash_req_ctx *state = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);	
+	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 
 	SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes);
 
 	return ssi_hash_init(state, ctx);
 }
 
-#ifdef EXPORT_FIXED
 static int ssi_ahash_export(struct ahash_request *req, void *out)
 {
 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-	
-	return ssi_hash_export(ctx, out);
+	struct device *dev = &ctx->drvdata->plat_dev->dev;
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
+	u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
+				state->buff0_cnt;
+	const u32 tmp = CC_EXPORT_MAGIC;
+
+	CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+	memcpy(out, &tmp, sizeof(u32));
+	out += sizeof(u32);
+
+	dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
+				ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+	memcpy(out, state->digest_buff, ctx->inter_digestsize);
+	out += ctx->inter_digestsize;
+
+	if (state->digest_bytes_len_dma_addr) {
+		dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
+					HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+		memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
+	} else {
+		/* Poison the unused exported digest len field. */
+		memset(out, 0x5F, HASH_LEN_SIZE);
+	}
+	out += HASH_LEN_SIZE;
+
+	memcpy(out, &curr_buff_cnt, sizeof(u32));
+	out += sizeof(u32);
+
+	memcpy(out, curr_buff, curr_buff_cnt);
+
+	/* No sync for device ineeded since we did not change the data,
+	 * we only copy it
+	 */
+
+	return 0;
 }
 
 static int ssi_ahash_import(struct ahash_request *req, const void *in)
 {
 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-	
-	return ssi_hash_import(ctx, in);
+	struct device *dev = &ctx->drvdata->plat_dev->dev;
+	struct ahash_req_ctx *state = ahash_request_ctx(req);
+	u32 tmp;
+	int rc;
+
+	CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+	memcpy(&tmp, in, sizeof(u32));
+	if (tmp != CC_EXPORT_MAGIC) {
+		rc = -EINVAL;
+		goto out;
+	}
+	in += sizeof(u32);
+
+	rc = ssi_hash_init(state, ctx);
+	if (rc)
+		goto out;
+
+	dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
+				ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+	memcpy(state->digest_buff, in, ctx->inter_digestsize);
+	in += ctx->inter_digestsize;
+
+	if (state->digest_bytes_len_dma_addr) {
+		dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
+					HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+		memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
+	}
+	in += HASH_LEN_SIZE;
+
+	dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
+				   ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+
+	if (state->digest_bytes_len_dma_addr)
+		dma_sync_single_for_device(dev,
+					   state->digest_bytes_len_dma_addr,
+					   HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+
+	state->buff_index = 0;
+
+	/* Sanity check the data as much as possible */
+	memcpy(&tmp, in, sizeof(u32));
+	if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
+		rc = -EINVAL;
+		goto out;
+	}
+	in += sizeof(u32);
+
+	state->buff0_cnt = tmp;
+	memcpy(state->buff0, in, state->buff0_cnt);
+
+out:
+	return rc;
 }
-#endif
 
 static int ssi_ahash_setkey(struct crypto_ahash *ahash,
 			const u8 *key, unsigned int keylen)
-{	
-	return ssi_hash_setkey((void *) ahash, key, keylen, false);
+{
+	return ssi_hash_setkey((void *)ahash, key, keylen, false);
 }
 
 struct ssi_hash_template {
 	char name[CRYPTO_MAX_ALG_NAME];
 	char driver_name[CRYPTO_MAX_ALG_NAME];
-	char hmac_name[CRYPTO_MAX_ALG_NAME];
-	char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+	char mac_name[CRYPTO_MAX_ALG_NAME];
+	char mac_driver_name[CRYPTO_MAX_ALG_NAME];
 	unsigned int blocksize;
 	bool synchronize;
-	union {
-		struct ahash_alg template_ahash;
-		struct shash_alg template_shash;
-	};	
+	struct ahash_alg template_ahash;
 	int hash_mode;
 	int hw_mode;
 	int inter_digestsize;
 	struct ssi_drvdata *drvdata;
 };
 
+#define CC_STATE_SIZE(_x) \
+	((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
+
 /* hash descriptors */
 static struct ssi_hash_template driver_hash[] = {
 	//Asynchronize hash template
 	{
 		.name = "sha1",
 		.driver_name = "sha1-dx",
-		.hmac_name = "hmac(sha1)",
-		.hmac_driver_name = "hmac-sha1-dx",
+		.mac_name = "hmac(sha1)",
+		.mac_driver_name = "hmac-sha1-dx",
 		.blocksize = SHA1_BLOCK_SIZE,
 		.synchronize = false,
-		{
-			.template_ahash = {
-				.init = ssi_ahash_init,
-				.update = ssi_ahash_update,
-				.final = ssi_ahash_final,
-				.finup = ssi_ahash_finup,
-				.digest = ssi_ahash_digest,
-#ifdef EXPORT_FIXED
-				.export = ssi_ahash_export,
-				.import = ssi_ahash_import,
-#endif
-				.setkey = ssi_ahash_setkey,
-				.halg = {
-					.digestsize = SHA1_DIGEST_SIZE,
-					.statesize = sizeof(struct sha1_state),
-					},
-				},
+		.template_ahash = {
+			.init = ssi_ahash_init,
+			.update = ssi_ahash_update,
+			.final = ssi_ahash_final,
+			.finup = ssi_ahash_finup,
+			.digest = ssi_ahash_digest,
+			.export = ssi_ahash_export,
+			.import = ssi_ahash_import,
+			.setkey = ssi_ahash_setkey,
+			.halg = {
+				.digestsize = SHA1_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
+			},
 		},
 		.hash_mode = DRV_HASH_SHA1,
 		.hw_mode = DRV_HASH_HW_SHA1,
@@ -2012,27 +1909,22 @@ static struct ssi_hash_template driver_hash[] = {
 	{
 		.name = "sha256",
 		.driver_name = "sha256-dx",
-		.hmac_name = "hmac(sha256)",
-		.hmac_driver_name = "hmac-sha256-dx",
+		.mac_name = "hmac(sha256)",
+		.mac_driver_name = "hmac-sha256-dx",
 		.blocksize = SHA256_BLOCK_SIZE,
-		.synchronize = false,
-		{
-			.template_ahash = {
-				.init = ssi_ahash_init,
-				.update = ssi_ahash_update,
-				.final = ssi_ahash_final,
-				.finup = ssi_ahash_finup,
-				.digest = ssi_ahash_digest,
-#ifdef EXPORT_FIXED
-				.export = ssi_ahash_export,
-				.import = ssi_ahash_import,
-#endif
-				.setkey = ssi_ahash_setkey,
-				.halg = {
-					.digestsize = SHA256_DIGEST_SIZE,
-					.statesize = sizeof(struct sha256_state),
-					},
-				},
+		.template_ahash = {
+			.init = ssi_ahash_init,
+			.update = ssi_ahash_update,
+			.final = ssi_ahash_final,
+			.finup = ssi_ahash_finup,
+			.digest = ssi_ahash_digest,
+			.export = ssi_ahash_export,
+			.import = ssi_ahash_import,
+			.setkey = ssi_ahash_setkey,
+			.halg = {
+				.digestsize = SHA256_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
+			},
 		},
 		.hash_mode = DRV_HASH_SHA256,
 		.hw_mode = DRV_HASH_HW_SHA256,
@@ -2041,27 +1933,22 @@ static struct ssi_hash_template driver_hash[] = {
 	{
 		.name = "sha224",
 		.driver_name = "sha224-dx",
-		.hmac_name = "hmac(sha224)",
-		.hmac_driver_name = "hmac-sha224-dx",
+		.mac_name = "hmac(sha224)",
+		.mac_driver_name = "hmac-sha224-dx",
 		.blocksize = SHA224_BLOCK_SIZE,
-		.synchronize = false,
-		{
-			.template_ahash = {
-				.init = ssi_ahash_init,
-				.update = ssi_ahash_update,
-				.final = ssi_ahash_final,
-				.finup = ssi_ahash_finup,
-				.digest = ssi_ahash_digest,
-#ifdef EXPORT_FIXED
-				.export = ssi_ahash_export,
-				.import = ssi_ahash_import,
-#endif
-				.setkey = ssi_ahash_setkey,
-				.halg = {
-					.digestsize = SHA224_DIGEST_SIZE,
-					.statesize = sizeof(struct sha256_state),
-					},
-				},
+		.template_ahash = {
+			.init = ssi_ahash_init,
+			.update = ssi_ahash_update,
+			.final = ssi_ahash_final,
+			.finup = ssi_ahash_finup,
+			.digest = ssi_ahash_digest,
+			.export = ssi_ahash_export,
+			.import = ssi_ahash_import,
+			.setkey = ssi_ahash_setkey,
+			.halg = {
+				.digestsize = SHA224_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+			},
 		},
 		.hash_mode = DRV_HASH_SHA224,
 		.hw_mode = DRV_HASH_HW_SHA256,
@@ -2071,27 +1958,22 @@ static struct ssi_hash_template driver_hash[] = {
 	{
 		.name = "sha384",
 		.driver_name = "sha384-dx",
-		.hmac_name = "hmac(sha384)",
-		.hmac_driver_name = "hmac-sha384-dx",
+		.mac_name = "hmac(sha384)",
+		.mac_driver_name = "hmac-sha384-dx",
 		.blocksize = SHA384_BLOCK_SIZE,
-		.synchronize = false,
-		{
-			.template_ahash = {
-				.init = ssi_ahash_init,
-				.update = ssi_ahash_update,
-				.final = ssi_ahash_final,
-				.finup = ssi_ahash_finup,
-				.digest = ssi_ahash_digest,
-#ifdef EXPORT_FIXED
-				.export = ssi_ahash_export,
-				.import = ssi_ahash_import,
-#endif
-				.setkey = ssi_ahash_setkey,
-				.halg = {
-					.digestsize = SHA384_DIGEST_SIZE,
-					.statesize = sizeof(struct sha512_state),
-					},
-				},
+		.template_ahash = {
+			.init = ssi_ahash_init,
+			.update = ssi_ahash_update,
+			.final = ssi_ahash_final,
+			.finup = ssi_ahash_finup,
+			.digest = ssi_ahash_digest,
+			.export = ssi_ahash_export,
+			.import = ssi_ahash_import,
+			.setkey = ssi_ahash_setkey,
+			.halg = {
+				.digestsize = SHA384_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+			},
 		},
 		.hash_mode = DRV_HASH_SHA384,
 		.hw_mode = DRV_HASH_HW_SHA512,
@@ -2100,27 +1982,22 @@ static struct ssi_hash_template driver_hash[] = {
 	{
 		.name = "sha512",
 		.driver_name = "sha512-dx",
-		.hmac_name = "hmac(sha512)",
-		.hmac_driver_name = "hmac-sha512-dx",
+		.mac_name = "hmac(sha512)",
+		.mac_driver_name = "hmac-sha512-dx",
 		.blocksize = SHA512_BLOCK_SIZE,
-		.synchronize = false,
-		{
-			.template_ahash = {
-				.init = ssi_ahash_init,
-				.update = ssi_ahash_update,
-				.final = ssi_ahash_final,
-				.finup = ssi_ahash_finup,
-				.digest = ssi_ahash_digest,
-#ifdef EXPORT_FIXED
-				.export = ssi_ahash_export,
-				.import = ssi_ahash_import,
-#endif
-				.setkey = ssi_ahash_setkey,
-				.halg = {
-					.digestsize = SHA512_DIGEST_SIZE,
-					.statesize = sizeof(struct sha512_state),
-					},
-				},
+		.template_ahash = {
+			.init = ssi_ahash_init,
+			.update = ssi_ahash_update,
+			.final = ssi_ahash_final,
+			.finup = ssi_ahash_finup,
+			.digest = ssi_ahash_digest,
+			.export = ssi_ahash_export,
+			.import = ssi_ahash_import,
+			.setkey = ssi_ahash_setkey,
+			.halg = {
+				.digestsize = SHA512_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
+			},
 		},
 		.hash_mode = DRV_HASH_SHA512,
 		.hw_mode = DRV_HASH_HW_SHA512,
@@ -2130,54 +2007,44 @@ static struct ssi_hash_template driver_hash[] = {
 	{
 		.name = "md5",
 		.driver_name = "md5-dx",
-		.hmac_name = "hmac(md5)",
-		.hmac_driver_name = "hmac-md5-dx",
+		.mac_name = "hmac(md5)",
+		.mac_driver_name = "hmac-md5-dx",
 		.blocksize = MD5_HMAC_BLOCK_SIZE,
-		.synchronize = false,
-		{
-			.template_ahash = {
-				.init = ssi_ahash_init,
-				.update = ssi_ahash_update,
-				.final = ssi_ahash_final,
-				.finup = ssi_ahash_finup,
-				.digest = ssi_ahash_digest,
-#ifdef EXPORT_FIXED
-				.export = ssi_ahash_export,
-				.import = ssi_ahash_import,
-#endif
-				.setkey = ssi_ahash_setkey,
-				.halg = {
-					.digestsize = MD5_DIGEST_SIZE,
-					.statesize = sizeof(struct md5_state),
-					},
-				},
+		.template_ahash = {
+			.init = ssi_ahash_init,
+			.update = ssi_ahash_update,
+			.final = ssi_ahash_final,
+			.finup = ssi_ahash_finup,
+			.digest = ssi_ahash_digest,
+			.export = ssi_ahash_export,
+			.import = ssi_ahash_import,
+			.setkey = ssi_ahash_setkey,
+			.halg = {
+				.digestsize = MD5_DIGEST_SIZE,
+				.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
+			},
 		},
 		.hash_mode = DRV_HASH_MD5,
 		.hw_mode = DRV_HASH_HW_MD5,
 		.inter_digestsize = MD5_DIGEST_SIZE,
 	},
 	{
-		.name = "xcbc(aes)",
-		.driver_name = "xcbc-aes-dx",
+		.mac_name = "xcbc(aes)",
+		.mac_driver_name = "xcbc-aes-dx",
 		.blocksize = AES_BLOCK_SIZE,
-		.synchronize = false,
-		{
-			.template_ahash = {
-				.init = ssi_ahash_init,
-				.update = ssi_mac_update,
-				.final = ssi_mac_final,
-				.finup = ssi_mac_finup,
-				.digest = ssi_mac_digest,
-				.setkey = ssi_xcbc_setkey,
-#ifdef EXPORT_FIXED
-				.export = ssi_ahash_export,
-				.import = ssi_ahash_import,
-#endif
-				.halg = {
-					.digestsize = AES_BLOCK_SIZE,
-					.statesize = sizeof(struct aeshash_state),
-					},
-				},
+		.template_ahash = {
+			.init = ssi_ahash_init,
+			.update = ssi_mac_update,
+			.final = ssi_mac_final,
+			.finup = ssi_mac_finup,
+			.digest = ssi_mac_digest,
+			.setkey = ssi_xcbc_setkey,
+			.export = ssi_ahash_export,
+			.import = ssi_ahash_import,
+			.halg = {
+				.digestsize = AES_BLOCK_SIZE,
+				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+			},
 		},
 		.hash_mode = DRV_HASH_NULL,
 		.hw_mode = DRV_CIPHER_XCBC_MAC,
@@ -2185,34 +2052,29 @@ static struct ssi_hash_template driver_hash[] = {
 	},
 #if SSI_CC_HAS_CMAC
 	{
-		.name = "cmac(aes)",
-		.driver_name = "cmac-aes-dx",
+		.mac_name = "cmac(aes)",
+		.mac_driver_name = "cmac-aes-dx",
 		.blocksize = AES_BLOCK_SIZE,
-		.synchronize = false,
-		{
-			.template_ahash = {
-				.init = ssi_ahash_init,
-				.update = ssi_mac_update,
-				.final = ssi_mac_final,
-				.finup = ssi_mac_finup,
-				.digest = ssi_mac_digest,
-				.setkey = ssi_cmac_setkey,
-#ifdef EXPORT_FIXED
-				.export = ssi_ahash_export,
-				.import = ssi_ahash_import,
-#endif
-				.halg = {
-					.digestsize = AES_BLOCK_SIZE,
-					.statesize = sizeof(struct aeshash_state),
-					},
-				},
+		.template_ahash = {
+			.init = ssi_ahash_init,
+			.update = ssi_mac_update,
+			.final = ssi_mac_final,
+			.finup = ssi_mac_finup,
+			.digest = ssi_mac_digest,
+			.setkey = ssi_cmac_setkey,
+			.export = ssi_ahash_export,
+			.import = ssi_ahash_import,
+			.halg = {
+				.digestsize = AES_BLOCK_SIZE,
+				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
+			},
 		},
 		.hash_mode = DRV_HASH_NULL,
 		.hw_mode = DRV_CIPHER_CMAC,
 		.inter_digestsize = AES_BLOCK_SIZE,
 	},
 #endif
-	
+
 };
 
 static struct ssi_hash_alg *
@@ -2220,6 +2082,7 @@ ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
 {
 	struct ssi_hash_alg *t_crypto_alg;
 	struct crypto_alg *alg;
+	struct ahash_alg *halg;
 
 	t_crypto_alg = kzalloc(sizeof(struct ssi_hash_alg), GFP_KERNEL);
 	if (!t_crypto_alg) {
@@ -2227,27 +2090,17 @@ ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
 		return ERR_PTR(-ENOMEM);
 	}
 
-	t_crypto_alg->synchronize = template->synchronize;
-	if (template->synchronize) {
-		struct shash_alg *halg;
-		t_crypto_alg->shash_alg = template->template_shash;
-		halg = &t_crypto_alg->shash_alg;
-		alg = &halg->base;
-		if (!keyed) halg->setkey = NULL;
-	} else {
-		struct ahash_alg *halg;
-		t_crypto_alg->ahash_alg = template->template_ahash;
-		halg = &t_crypto_alg->ahash_alg;
-		alg = &halg->halg.base;
-		if (!keyed) halg->setkey = NULL;
-	}
+	t_crypto_alg->ahash_alg = template->template_ahash;
+	halg = &t_crypto_alg->ahash_alg;
+	alg = &halg->halg.base;
 
 	if (keyed) {
 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
-			 template->hmac_name);
+			 template->mac_name);
 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
-			 template->hmac_driver_name);
+			 template->mac_driver_name);
 	} else {
+		halg->setkey = NULL;
 		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
 			 template->name);
 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
@@ -2259,18 +2112,11 @@ ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
 	alg->cra_blocksize = template->blocksize;
 	alg->cra_alignmask = 0;
 	alg->cra_exit = ssi_hash_cra_exit;
-	
-	if (template->synchronize) {
-		alg->cra_init = ssi_shash_cra_init;		
-		alg->cra_flags = CRYPTO_ALG_TYPE_SHASH |
+
+	alg->cra_init = ssi_ahash_cra_init;
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
 			CRYPTO_ALG_KERN_DRIVER_ONLY;
-		alg->cra_type = &crypto_shash_type;
-	} else {
-		alg->cra_init = ssi_ahash_cra_init;
-		alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
-			CRYPTO_ALG_KERN_DRIVER_ONLY;
-		alg->cra_type = &crypto_ahash_type;
-	}
+	alg->cra_type = &crypto_ahash_type;
 
 	t_crypto_alg->hash_mode = template->hash_mode;
 	t_crypto_alg->hw_mode = template->hw_mode;
@@ -2284,7 +2130,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 	struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
 	ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
 	unsigned int larval_seq_len = 0;
-	HwDesc_s larval_seq[CC_DIGEST_SIZE_MAX/sizeof(uint32_t)];
+	struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
 	int rc = 0;
 #if (DX_DEV_SHA_MAX > 256)
 	int i;
@@ -2351,15 +2197,15 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 #if (DX_DEV_SHA_MAX > 256)
 	/* We are forced to swap each double-word larval before copying to sram */
 	for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
-		const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[1];
-		const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[0];
+		const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
+		const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
 
 		ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
 			larval_seq, &larval_seq_len);
-		sram_buff_ofs += sizeof(uint32_t);
+		sram_buff_ofs += sizeof(u32);
 		ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
 			larval_seq, &larval_seq_len);
-		sram_buff_ofs += sizeof(uint32_t);
+		sram_buff_ofs += sizeof(u32);
 	}
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
 	if (unlikely(rc != 0)) {
@@ -2369,15 +2215,15 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
 	larval_seq_len = 0;
 
 	for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
-		const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[1];
-		const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[0];
+		const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
+		const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
 
 		ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
 			larval_seq, &larval_seq_len);
-		sram_buff_ofs += sizeof(uint32_t);
+		sram_buff_ofs += sizeof(u32);
 		ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
 			larval_seq, &larval_seq_len);
-		sram_buff_ofs += sizeof(uint32_t);
+		sram_buff_ofs += sizeof(u32);
 	}
 	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
 	if (unlikely(rc != 0)) {
@@ -2394,12 +2240,12 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
 {
 	struct ssi_hash_handle *hash_handle;
 	ssi_sram_addr_t sram_buff;
-	uint32_t sram_size_to_alloc;
+	u32 sram_size_to_alloc;
 	int rc = 0;
 	int alg;
 
 	hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL);
-	if (hash_handle == NULL) {
+	if (!hash_handle) {
 		SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
 			sizeof(struct ssi_hash_handle));
 		rc = -ENOMEM;
@@ -2418,7 +2264,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
 			sizeof(sha1_init) +
 			sizeof(sha224_init) +
 			sizeof(sha256_init);
-				
+
 	sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
 	if (sram_buff == NULL_SRAM_ADDR) {
 		SSI_LOG_ERR("SRAM pool exhausted\n");
@@ -2441,40 +2287,32 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
 	/* ahash registration */
 	for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
 		struct ssi_hash_alg *t_alg;
-		
-		/* register hmac version */
+		int hw_mode = driver_hash[alg].hw_mode;
 
-		if ((((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_XCBC_MAC) &&
-			(((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_CMAC)) {
-			t_alg = ssi_hash_create_alg(&driver_hash[alg], true);
-			if (IS_ERR(t_alg)) {
-				rc = PTR_ERR(t_alg);
-				SSI_LOG_ERR("%s alg allocation failed\n",
-					 driver_hash[alg].driver_name);
-				goto fail;
-			}
-			t_alg->drvdata = drvdata;
-	
-			if (t_alg->synchronize) {
-				rc = crypto_register_shash(&t_alg->shash_alg);
-				if (unlikely(rc != 0)) {
-					SSI_LOG_ERR("%s alg registration failed\n",
-						t_alg->shash_alg.base.cra_driver_name);
-					kfree(t_alg);
-					goto fail;
-				} else
-					list_add_tail(&t_alg->entry, &hash_handle->hash_list);
-			} else {
-				rc = crypto_register_ahash(&t_alg->ahash_alg);
-				if (unlikely(rc != 0)) {
-					SSI_LOG_ERR("%s alg registration failed\n",
-						t_alg->ahash_alg.halg.base.cra_driver_name);
-					kfree(t_alg);
-					goto fail;
-				} else
-					list_add_tail(&t_alg->entry, &hash_handle->hash_list);
-			}
+		/* register hmac version */
+		t_alg = ssi_hash_create_alg(&driver_hash[alg], true);
+		if (IS_ERR(t_alg)) {
+			rc = PTR_ERR(t_alg);
+			SSI_LOG_ERR("%s alg allocation failed\n",
+				    driver_hash[alg].driver_name);
+			goto fail;
 		}
+		t_alg->drvdata = drvdata;
+
+		rc = crypto_register_ahash(&t_alg->ahash_alg);
+		if (unlikely(rc)) {
+			SSI_LOG_ERR("%s alg registration failed\n",
+				    driver_hash[alg].driver_name);
+			kfree(t_alg);
+			goto fail;
+		} else {
+			list_add_tail(&t_alg->entry,
+				      &hash_handle->hash_list);
+		}
+
+		if ((hw_mode == DRV_CIPHER_XCBC_MAC) ||
+		    (hw_mode == DRV_CIPHER_CMAC))
+			continue;
 
 		/* register hash version */
 		t_alg = ssi_hash_create_alg(&driver_hash[alg], false);
@@ -2485,26 +2323,15 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
 			goto fail;
 		}
 		t_alg->drvdata = drvdata;
-		
-		if (t_alg->synchronize) {
-			rc = crypto_register_shash(&t_alg->shash_alg);
-			if (unlikely(rc != 0)) {
-				SSI_LOG_ERR("%s alg registration failed\n",
-					t_alg->shash_alg.base.cra_driver_name);
-				kfree(t_alg);
-				goto fail;
-			} else
-				list_add_tail(&t_alg->entry, &hash_handle->hash_list);	
-				
+
+		rc = crypto_register_ahash(&t_alg->ahash_alg);
+		if (unlikely(rc)) {
+			SSI_LOG_ERR("%s alg registration failed\n",
+				    driver_hash[alg].driver_name);
+			kfree(t_alg);
+			goto fail;
 		} else {
-			rc = crypto_register_ahash(&t_alg->ahash_alg);
-			if (unlikely(rc != 0)) {
-				SSI_LOG_ERR("%s alg registration failed\n",
-					t_alg->ahash_alg.halg.base.cra_driver_name);
-				kfree(t_alg);
-				goto fail;
-			} else
-				list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
 		}
 	}
 
@@ -2512,7 +2339,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
 
 fail:
 
-	if (drvdata->hash_handle != NULL) {
+	if (drvdata->hash_handle) {
 		kfree(drvdata->hash_handle);
 		drvdata->hash_handle = NULL;
 	}
@@ -2524,26 +2351,21 @@ int ssi_hash_free(struct ssi_drvdata *drvdata)
 	struct ssi_hash_alg *t_hash_alg, *hash_n;
 	struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
 
-	if (hash_handle != NULL) {
-
+	if (hash_handle) {
 		list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
-			if (t_hash_alg->synchronize) {
-				crypto_unregister_shash(&t_hash_alg->shash_alg);
-			} else {
-				crypto_unregister_ahash(&t_hash_alg->ahash_alg);
-			}
+			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
 			list_del(&t_hash_alg->entry);
 			kfree(t_hash_alg);
 		}
-		
+
 		kfree(hash_handle);
 		drvdata->hash_handle = NULL;
 	}
 	return 0;
 }
 
-static void ssi_hash_create_xcbc_setup(struct ahash_request *areq, 
-				  HwDesc_s desc[],
+static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
+				  struct cc_hw_desc desc[],
 				  unsigned int *seq_size) {
 	unsigned int idx = *seq_size;
 	struct ahash_req_ctx *state = ahash_request_ctx(areq);
@@ -2551,55 +2373,56 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 
 	/* Setup XCBC MAC K1 */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr 
-						    + XCBC_MAC_K1_OFFSET),
-			     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
+					    XCBC_MAC_K1_OFFSET),
+		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* Setup XCBC MAC K2 */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr 
-						    + XCBC_MAC_K2_OFFSET),
-			      CC_AES_128_BIT_KEY_SIZE, NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
+					    XCBC_MAC_K2_OFFSET),
+		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* Setup XCBC MAC K3 */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr 
-						    + XCBC_MAC_K3_OFFSET),
-			     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
+					    XCBC_MAC_K3_OFFSET),
+		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* Loading MAC state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+		     CC_AES_BLOCK_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 	*seq_size = idx;
 }
 
-static void ssi_hash_create_cmac_setup(struct ahash_request *areq, 
-				  HwDesc_s desc[],
+static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
+				  struct cc_hw_desc desc[],
 				  unsigned int *seq_size)
 {
 	unsigned int idx = *seq_size;
@@ -2608,24 +2431,26 @@ static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
 	struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 
 	/* Setup CMAC Key */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
-		((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen), NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+		     ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
+		      ctx->key_params.keylen), NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 
 	/* Load MAC state */
-	HW_DESC_INIT(&desc[idx]);
-	HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
-	HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
-	HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
-	HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen);
-	HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+	hw_desc_init(&desc[idx]);
+	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
+		     CC_AES_BLOCK_SIZE, NS_BIT);
+	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
+	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
+	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
+	set_flow_mode(&desc[idx], S_DIN_to_AES);
 	idx++;
 	*seq_size = idx;
 }
@@ -2633,18 +2458,18 @@ static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
 static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
 				      struct ssi_hash_ctx *ctx,
 				      unsigned int flow_mode,
-				      HwDesc_s desc[],
-				      bool is_not_last_data, 
+				      struct cc_hw_desc desc[],
+				      bool is_not_last_data,
 				      unsigned int *seq_size)
 {
 	unsigned int idx = *seq_size;
 
 	if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-				     sg_dma_address(areq_ctx->curr_sg), 
-				     areq_ctx->curr_sg->length, NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     sg_dma_address(areq_ctx->curr_sg),
+			     areq_ctx->curr_sg->length, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
 		idx++;
 	} else {
 		if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
@@ -2653,42 +2478,38 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
 			return;
 		}
 		/* bypass */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 
-				     areq_ctx->mlli_params.mlli_dma_addr, 
-				     areq_ctx->mlli_params.mlli_len, 
-				     NS_BIT);
-		HW_DESC_SET_DOUT_SRAM(&desc[idx], 
-				      ctx->drvdata->mlli_sram_addr, 
-				      areq_ctx->mlli_params.mlli_len);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_DLLI,
+			     areq_ctx->mlli_params.mlli_dma_addr,
+			     areq_ctx->mlli_params.mlli_len, NS_BIT);
+		set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
+			      areq_ctx->mlli_params.mlli_len);
+		set_flow_mode(&desc[idx], BYPASS);
 		idx++;
 		/* process */
-		HW_DESC_INIT(&desc[idx]);
-		HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI, 
-				     ctx->drvdata->mlli_sram_addr, 
-				     areq_ctx->mlli_nents,
-				     NS_BIT);
-		HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+		hw_desc_init(&desc[idx]);
+		set_din_type(&desc[idx], DMA_MLLI,
+			     ctx->drvdata->mlli_sram_addr,
+			     areq_ctx->mlli_nents, NS_BIT);
+		set_flow_mode(&desc[idx], flow_mode);
 		idx++;
 	}
-	if (is_not_last_data) {
-		HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx-1]);
-	}
+	if (is_not_last_data)
+		set_din_not_last_indication(&desc[(idx - 1)]);
 	/* return updated desc sequence size */
 	*seq_size = idx;
 }
 
 /*!
- * Gets the address of the initial digest in SRAM 
+ * Gets the address of the initial digest in SRAM
  * according to the given hash mode
- * 
+ *
  * \param drvdata
  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
- * 
- * \return uint32_t The address of the inital digest in SRAM
+ *
+ * \return u32 The address of the inital digest in SRAM
  */
-ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode)
+ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
 {
 	struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
 	struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
@@ -2734,7 +2555,7 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mo
 }
 
 ssi_sram_addr_t
-ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode)
+ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
 {
 	struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
 	struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
diff --git a/drivers/staging/ccree/ssi_hash.h b/drivers/staging/ccree/ssi_hash.h
index a2b076d3..2400e38 100644
--- a/drivers/staging/ccree/ssi_hash.h
+++ b/drivers/staging/ccree/ssi_hash.h
@@ -1,21 +1,21 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* \file ssi_hash.h
-   ARM CryptoCell Hash Crypto API
+ * ARM CryptoCell Hash Crypto API
  */
 
 #ifndef __SSI_HASH_H__
@@ -39,6 +39,8 @@
 #define XCBC_MAC_K2_OFFSET 16
 #define XCBC_MAC_K3_OFFSET 32
 
+#define CC_EXPORT_MAGIC 0xC2EE1070U
+
 // this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used for xcbc/cmac statesize
 struct aeshash_state {
 	u8 state[AES_BLOCK_SIZE];
@@ -48,27 +50,27 @@ struct aeshash_state {
 
 /* ahash state */
 struct ahash_req_ctx {
-	uint8_t* buff0;
-	uint8_t* buff1;
-	uint8_t* digest_result_buff;
+	u8 *buff0;
+	u8 *buff1;
+	u8 *digest_result_buff;
 	struct async_gen_req_ctx gen_ctx;
 	enum ssi_req_dma_buf_type data_dma_buf_type;
-	uint8_t *digest_buff;
-	uint8_t *opad_digest_buff;
-	uint8_t *digest_bytes_len;
+	u8 *digest_buff;
+	u8 *opad_digest_buff;
+	u8 *digest_bytes_len;
 	dma_addr_t opad_digest_dma_addr;
 	dma_addr_t digest_buff_dma_addr;
 	dma_addr_t digest_bytes_len_dma_addr;
 	dma_addr_t digest_result_dma_addr;
-	uint32_t buff0_cnt;
-	uint32_t buff1_cnt;
-	uint32_t buff_index;
-	uint32_t xcbc_count; /* count xcbc update operatations */
+	u32 buff0_cnt;
+	u32 buff1_cnt;
+	u32 buff_index;
+	u32 xcbc_count; /* count xcbc update operatations */
 	struct scatterlist buff_sg[2];
 	struct scatterlist *curr_sg;
-	uint32_t in_nents;
-	uint32_t mlli_nents;
-	struct mlli_params mlli_params;	
+	u32 in_nents;
+	u32 mlli_nents;
+	struct mlli_params mlli_params;
 };
 
 int ssi_hash_alloc(struct ssi_drvdata *drvdata);
@@ -77,25 +79,25 @@ int ssi_hash_free(struct ssi_drvdata *drvdata);
 
 /*!
  * Gets the initial digest length
- * 
- * \param drvdata 
+ *
+ * \param drvdata
  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
- * 
- * \return uint32_t returns the address of the initial digest length in SRAM
+ *
+ * \return u32 returns the address of the initial digest length in SRAM
  */
 ssi_sram_addr_t
-ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode);
+ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode);
 
 /*!
- * Gets the address of the initial digest in SRAM 
+ * Gets the address of the initial digest in SRAM
  * according to the given hash mode
- * 
- * \param drvdata 
+ *
+ * \param drvdata
  * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
- * 
- * \return uint32_t The address of the inital digest in SRAM
+ *
+ * \return u32 The address of the inital digest in SRAM
  */
-ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode);
+ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode);
 
 #endif /*__SSI_HASH_H__*/
 
diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/ssi_ivgen.c
index f16f469..5ff3368 100644
--- a/drivers/staging/ccree/ssi_ivgen.c
+++ b/drivers/staging/ccree/ssi_ivgen.c
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -26,13 +26,14 @@
 /* The max. size of pool *MUST* be <= SRAM total size */
 #define SSI_IVPOOL_SIZE 1024
 /* The first 32B fraction of pool are dedicated to the
-   next encryption "key" & "IV" for pool regeneration */
+ * next encryption "key" & "IV" for pool regeneration
+ */
 #define SSI_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
 #define SSI_IVPOOL_GEN_SEQ_LEN	4
 
 /**
- * struct ssi_ivgen_ctx -IV pool generation context 
- * @pool:          the start address of the iv-pool resides in internal RAM 
+ * struct ssi_ivgen_ctx -IV pool generation context
+ * @pool:          the start address of the iv-pool resides in internal RAM
  * @ctr_key_dma:   address of pool's encryption key material in internal RAM
  * @ctr_iv_dma:    address of pool's counter iv in internal RAM
  * @next_iv_ofs:   the offset to the next available IV in pool
@@ -43,62 +44,62 @@ struct ssi_ivgen_ctx {
 	ssi_sram_addr_t pool;
 	ssi_sram_addr_t ctr_key;
 	ssi_sram_addr_t ctr_iv;
-	uint32_t next_iv_ofs;
-	uint8_t *pool_meta;
+	u32 next_iv_ofs;
+	u8 *pool_meta;
 	dma_addr_t pool_meta_dma;
 };
 
 /*!
- * Generates SSI_IVPOOL_SIZE of random bytes by 
+ * Generates SSI_IVPOOL_SIZE of random bytes by
  * encrypting 0's using AES128-CTR.
- * 
+ *
  * \param ivgen iv-pool context
  * \param iv_seq IN/OUT array to the descriptors sequence
- * \param iv_seq_len IN/OUT pointer to the sequence length 
+ * \param iv_seq_len IN/OUT pointer to the sequence length
  */
 static int ssi_ivgen_generate_pool(
 	struct ssi_ivgen_ctx *ivgen_ctx,
-	HwDesc_s iv_seq[],
+	struct cc_hw_desc iv_seq[],
 	unsigned int *iv_seq_len)
 {
 	unsigned int idx = *iv_seq_len;
 
-	if ( (*iv_seq_len + SSI_IVPOOL_GEN_SEQ_LEN) > SSI_IVPOOL_SEQ_LEN) {
+	if ((*iv_seq_len + SSI_IVPOOL_GEN_SEQ_LEN) > SSI_IVPOOL_SEQ_LEN) {
 		/* The sequence will be longer than allowed */
 		return -EINVAL;
 	}
 	/* Setup key */
-	HW_DESC_INIT(&iv_seq[idx]);
-	HW_DESC_SET_DIN_SRAM(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
-	HW_DESC_SET_SETUP_MODE(&iv_seq[idx], SETUP_LOAD_KEY0);
-	HW_DESC_SET_CIPHER_CONFIG0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_FLOW_MODE(&iv_seq[idx], S_DIN_to_AES);
-	HW_DESC_SET_KEY_SIZE_AES(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_CIPHER_MODE(&iv_seq[idx], DRV_CIPHER_CTR);
+	hw_desc_init(&iv_seq[idx]);
+	set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
+	set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
+	set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
+	set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
 	idx++;
 
 	/* Setup cipher state */
-	HW_DESC_INIT(&iv_seq[idx]);
-	HW_DESC_SET_DIN_SRAM(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
-	HW_DESC_SET_CIPHER_CONFIG0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
-	HW_DESC_SET_FLOW_MODE(&iv_seq[idx], S_DIN_to_AES);
-	HW_DESC_SET_SETUP_MODE(&iv_seq[idx], SETUP_LOAD_STATE1);
-	HW_DESC_SET_KEY_SIZE_AES(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
-	HW_DESC_SET_CIPHER_MODE(&iv_seq[idx], DRV_CIPHER_CTR);
+	hw_desc_init(&iv_seq[idx]);
+	set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
+	set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+	set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
+	set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
+	set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+	set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
 	idx++;
 
 	/* Perform dummy encrypt to skip first block */
-	HW_DESC_INIT(&iv_seq[idx]);
-	HW_DESC_SET_DIN_CONST(&iv_seq[idx], 0, CC_AES_IV_SIZE);
-	HW_DESC_SET_DOUT_SRAM(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
-	HW_DESC_SET_FLOW_MODE(&iv_seq[idx], DIN_AES_DOUT);
+	hw_desc_init(&iv_seq[idx]);
+	set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
+	set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
+	set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
 	idx++;
 
 	/* Generate IV pool */
-	HW_DESC_INIT(&iv_seq[idx]);
-	HW_DESC_SET_DIN_CONST(&iv_seq[idx], 0, SSI_IVPOOL_SIZE);
-	HW_DESC_SET_DOUT_SRAM(&iv_seq[idx], ivgen_ctx->pool, SSI_IVPOOL_SIZE);
-	HW_DESC_SET_FLOW_MODE(&iv_seq[idx], DIN_AES_DOUT);
+	hw_desc_init(&iv_seq[idx]);
+	set_din_const(&iv_seq[idx], 0, SSI_IVPOOL_SIZE);
+	set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, SSI_IVPOOL_SIZE);
+	set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
 	idx++;
 
 	*iv_seq_len = idx; /* Update sequence length */
@@ -110,17 +111,17 @@ static int ssi_ivgen_generate_pool(
 }
 
 /*!
- * Generates the initial pool in SRAM. 
- * This function should be invoked when resuming DX driver. 
- * 
- * \param drvdata 
- *  
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming DX driver.
+ *
+ * \param drvdata
+ *
  * \return int Zero for success, negative value otherwise.
  */
 int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
 {
 	struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
-	HwDesc_s iv_seq[SSI_IVPOOL_SEQ_LEN];
+	struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
 	unsigned int iv_seq_len = 0;
 	int rc;
 
@@ -132,40 +133,38 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
 	ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
 
 	/* Copy initial enc. key and IV to SRAM at a single descriptor */
-	HW_DESC_INIT(&iv_seq[iv_seq_len]);
-	HW_DESC_SET_DIN_TYPE(&iv_seq[iv_seq_len], DMA_DLLI,
-		ivgen_ctx->pool_meta_dma, SSI_IVPOOL_META_SIZE,
-		NS_BIT);
-	HW_DESC_SET_DOUT_SRAM(&iv_seq[iv_seq_len], ivgen_ctx->pool,
-		SSI_IVPOOL_META_SIZE);
-	HW_DESC_SET_FLOW_MODE(&iv_seq[iv_seq_len], BYPASS);
+	hw_desc_init(&iv_seq[iv_seq_len]);
+	set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
+		     SSI_IVPOOL_META_SIZE, NS_BIT);
+	set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
+		      SSI_IVPOOL_META_SIZE);
+	set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
 	iv_seq_len++;
 
 	/* Generate initial pool */
 	rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len);
-	if (unlikely(rc != 0)) {
+	if (unlikely(rc != 0))
 		return rc;
-	}
+
 	/* Fire-and-forget */
 	return send_request_init(drvdata, iv_seq, iv_seq_len);
 }
 
 /*!
  * Free iv-pool and ivgen context.
- *  
- * \param drvdata 
+ *
+ * \param drvdata
  */
 void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
 {
 	struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
 	struct device *device = &(drvdata->plat_dev->dev);
 
-	if (ivgen_ctx == NULL)
+	if (!ivgen_ctx)
 		return;
 
-	if (ivgen_ctx->pool_meta != NULL) {
+	if (ivgen_ctx->pool_meta) {
 		memset(ivgen_ctx->pool_meta, 0, SSI_IVPOOL_META_SIZE);
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(ivgen_ctx->pool_meta_dma);
 		dma_free_coherent(device, SSI_IVPOOL_META_SIZE,
 			ivgen_ctx->pool_meta, ivgen_ctx->pool_meta_dma);
 	}
@@ -177,11 +176,11 @@ void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
 }
 
 /*!
- * Allocates iv-pool and maps resources. 
- * This function generates the first IV pool.  
- * 
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
  * \param drvdata Driver's private context
- * 
+ *
  * \return int Zero for success, negative value otherwise.
  */
 int ssi_ivgen_init(struct ssi_drvdata *drvdata)
@@ -209,8 +208,6 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
 		rc = -ENOMEM;
 		goto out;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(ivgen_ctx->pool_meta_dma,
-							SSI_IVPOOL_META_SIZE);
 	/* Allocate IV pool in SRAM */
 	ivgen_ctx->pool = ssi_sram_mgr_alloc(drvdata, SSI_IVPOOL_SIZE);
 	if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
@@ -228,22 +225,22 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
 
 /*!
  * Acquires 16 Bytes IV from the iv-pool
- * 
+ *
  * \param drvdata Driver private context
  * \param iv_out_dma Array of physical IV out addresses
  * \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
- * \param iv_out_size May be 8 or 16 bytes long 
+ * \param iv_out_size May be 8 or 16 bytes long
  * \param iv_seq IN/OUT array to the descriptors sequence
- * \param iv_seq_len IN/OUT pointer to the sequence length 
- *  
- * \return int Zero for success, negative value otherwise. 
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
  */
 int ssi_ivgen_getiv(
 	struct ssi_drvdata *drvdata,
 	dma_addr_t iv_out_dma[],
 	unsigned int iv_out_dma_len,
 	unsigned int iv_out_size,
-	HwDesc_s iv_seq[],
+	struct cc_hw_desc iv_seq[],
 	unsigned int *iv_seq_len)
 {
 	struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
@@ -254,34 +251,35 @@ int ssi_ivgen_getiv(
 	    (iv_out_size != CTR_RFC3686_IV_SIZE)) {
 		return -EINVAL;
 	}
-	if ( (iv_out_dma_len + 1) > SSI_IVPOOL_SEQ_LEN) {
+	if ((iv_out_dma_len + 1) > SSI_IVPOOL_SEQ_LEN) {
 		/* The sequence will be longer than allowed */
 		return -EINVAL;
 	}
 
 	//check that number of generated IV is limited to max dma address iv buffer size
-	if ( iv_out_dma_len > SSI_MAX_IVGEN_DMA_ADDRESSES) {
+	if (iv_out_dma_len > SSI_MAX_IVGEN_DMA_ADDRESSES) {
 		/* The sequence will be longer than allowed */
 		return -EINVAL;
 	}
 
 	for (t = 0; t < iv_out_dma_len; t++) {
 		/* Acquire IV from pool */
-		HW_DESC_INIT(&iv_seq[idx]);
-		HW_DESC_SET_DIN_SRAM(&iv_seq[idx],
-			ivgen_ctx->pool + ivgen_ctx->next_iv_ofs,
-			iv_out_size);
-		HW_DESC_SET_DOUT_DLLI(&iv_seq[idx], iv_out_dma[t],
-			iv_out_size, NS_BIT, 0);
-		HW_DESC_SET_FLOW_MODE(&iv_seq[idx], BYPASS);
+		hw_desc_init(&iv_seq[idx]);
+		set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
+					    ivgen_ctx->next_iv_ofs),
+			     iv_out_size);
+		set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
+			      NS_BIT, 0);
+		set_flow_mode(&iv_seq[idx], BYPASS);
 		idx++;
 	}
 
 	/* Bypass operation is proceeded by crypto sequence, hence must
-	*  assure bypass-write-transaction by a memory barrier */
-	HW_DESC_INIT(&iv_seq[idx]);
-	HW_DESC_SET_DIN_NO_DMA(&iv_seq[idx], 0, 0xfffff0);
-	HW_DESC_SET_DOUT_NO_DMA(&iv_seq[idx], 0, 0, 1);
+	 *  assure bypass-write-transaction by a memory barrier
+	 */
+	hw_desc_init(&iv_seq[idx]);
+	set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
+	set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
 	idx++;
 
 	*iv_seq_len = idx; /* update seq length */
@@ -298,4 +296,3 @@ int ssi_ivgen_getiv(
 	return 0;
 }
 
-
diff --git a/drivers/staging/ccree/ssi_ivgen.h b/drivers/staging/ccree/ssi_ivgen.h
index bc69cd8..961aea4 100644
--- a/drivers/staging/ccree/ssi_ivgen.h
+++ b/drivers/staging/ccree/ssi_ivgen.h
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -19,54 +19,53 @@
 
 #include "cc_hw_queue_defs.h"
 
-
 #define SSI_IVPOOL_SEQ_LEN 8
 
 /*!
- * Allocates iv-pool and maps resources. 
- * This function generates the first IV pool.  
- * 
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
  * \param drvdata Driver's private context
- * 
+ *
  * \return int Zero for success, negative value otherwise.
  */
 int ssi_ivgen_init(struct ssi_drvdata *drvdata);
 
 /*!
  * Free iv-pool and ivgen context.
- *  
- * \param drvdata 
+ *
+ * \param drvdata
  */
 void ssi_ivgen_fini(struct ssi_drvdata *drvdata);
 
 /*!
- * Generates the initial pool in SRAM. 
- * This function should be invoked when resuming DX driver. 
- * 
- * \param drvdata 
- *  
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming DX driver.
+ *
+ * \param drvdata
+ *
  * \return int Zero for success, negative value otherwise.
  */
 int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata);
 
 /*!
  * Acquires 16 Bytes IV from the iv-pool
- * 
+ *
  * \param drvdata Driver private context
  * \param iv_out_dma Array of physical IV out addresses
  * \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
- * \param iv_out_size May be 8 or 16 bytes long 
+ * \param iv_out_size May be 8 or 16 bytes long
  * \param iv_seq IN/OUT array to the descriptors sequence
- * \param iv_seq_len IN/OUT pointer to the sequence length 
- *  
- * \return int Zero for success, negative value otherwise. 
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
  */
 int ssi_ivgen_getiv(
 	struct ssi_drvdata *drvdata,
 	dma_addr_t iv_out_dma[],
 	unsigned int iv_out_dma_len,
 	unsigned int iv_out_size,
-	HwDesc_s iv_seq[],
+	struct cc_hw_desc iv_seq[],
 	unsigned int *iv_seq_len);
 
 #endif /*__SSI_IVGEN_H__*/
diff --git a/drivers/staging/ccree/ssi_pm.c b/drivers/staging/ccree/ssi_pm.c
index dd399f2..52a8ed5 100644
--- a/drivers/staging/ccree/ssi_pm.c
+++ b/drivers/staging/ccree/ssi_pm.c
@@ -1,20 +1,19 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-
 #include "ssi_config.h"
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
@@ -29,15 +28,12 @@
 #include "ssi_ivgen.h"
 #include "ssi_hash.h"
 #include "ssi_pm.h"
-#include "ssi_pm_ext.h"
 
-
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 
 #define POWER_DOWN_ENABLE 0x01
 #define POWER_DOWN_DISABLE 0x00
 
-
 int ssi_power_mgr_runtime_suspend(struct device *dev)
 {
 	struct ssi_drvdata *drvdata =
@@ -52,9 +48,7 @@ int ssi_power_mgr_runtime_suspend(struct device *dev)
 		return rc;
 	}
 	fini_cc_regs(drvdata);
-
-	/* Specific HW suspend code */
-	ssi_pm_ext_hw_suspend(dev);
+	cc_clk_off(drvdata);
 	return 0;
 }
 
@@ -66,24 +60,28 @@ int ssi_power_mgr_runtime_resume(struct device *dev)
 
 	SSI_LOG_DEBUG("ssi_power_mgr_runtime_resume , unset HOST_POWER_DOWN_EN\n");
 	WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-	/* Specific HW resume code */
-	ssi_pm_ext_hw_resume(dev);
+
+	rc = cc_clk_on(drvdata);
+	if (rc) {
+		SSI_LOG_ERR("failed getting clock back on. We're toast.\n");
+		return rc;
+	}
 
 	rc = init_cc_regs(drvdata, false);
-	if (rc !=0) {
-		SSI_LOG_ERR("init_cc_regs (%x)\n",rc);
+	if (rc != 0) {
+		SSI_LOG_ERR("init_cc_regs (%x)\n", rc);
 		return rc;
 	}
 
 	rc = ssi_request_mgr_runtime_resume_queue(drvdata);
-	if (rc !=0) {
-		SSI_LOG_ERR("ssi_request_mgr_runtime_resume_queue (%x)\n",rc);
+	if (rc != 0) {
+		SSI_LOG_ERR("ssi_request_mgr_runtime_resume_queue (%x)\n", rc);
 		return rc;
 	}
 
 	/* must be after the queue resuming as it uses the HW queue*/
 	ssi_hash_init_sram_digest_consts(drvdata);
-	
+
 	ssi_ivgen_init_sram_pool(drvdata);
 	return 0;
 }
@@ -109,26 +107,22 @@ int ssi_power_mgr_runtime_put_suspend(struct device *dev)
 				(struct ssi_drvdata *)dev_get_drvdata(dev))) {
 		pm_runtime_mark_last_busy(dev);
 		rc = pm_runtime_put_autosuspend(dev);
-	}
-	else {
+	} else {
 		/* Something wrong happens*/
 		BUG();
 	}
 	return rc;
-
 }
 
 #endif
 
-
-
 int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
 {
 	int rc = 0;
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 	struct platform_device *plat_dev = drvdata->plat_dev;
 	/* must be before the enabling to avoid resdundent suspending */
-	pm_runtime_set_autosuspend_delay(&plat_dev->dev,SSI_SUSPEND_TIMEOUT);
+	pm_runtime_set_autosuspend_delay(&plat_dev->dev, SSI_SUSPEND_TIMEOUT);
 	pm_runtime_use_autosuspend(&plat_dev->dev);
 	/* activate the PM module */
 	rc = pm_runtime_set_active(&plat_dev->dev);
@@ -142,7 +136,7 @@ int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
 
 void ssi_power_mgr_fini(struct ssi_drvdata *drvdata)
 {
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 	struct platform_device *plat_dev = drvdata->plat_dev;
 
 	pm_runtime_disable(&plat_dev->dev);
diff --git a/drivers/staging/ccree/ssi_pm.h b/drivers/staging/ccree/ssi_pm.h
index 516fc3f..63673f6 100644
--- a/drivers/staging/ccree/ssi_pm.h
+++ b/drivers/staging/ccree/ssi_pm.h
@@ -1,38 +1,35 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* \file ssi_pm.h
-    */
+ */
 
 #ifndef __SSI_POWER_MGR_H__
 #define __SSI_POWER_MGR_H__
 
-
 #include "ssi_config.h"
 #include "ssi_driver.h"
 
-
 #define SSI_SUSPEND_TIMEOUT 3000
 
-
 int ssi_power_mgr_init(struct ssi_drvdata *drvdata);
 
 void ssi_power_mgr_fini(struct ssi_drvdata *drvdata);
 
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 int ssi_power_mgr_runtime_suspend(struct device *dev);
 
 int ssi_power_mgr_runtime_resume(struct device *dev);
diff --git a/drivers/staging/ccree/ssi_pm_ext.c b/drivers/staging/ccree/ssi_pm_ext.c
deleted file mode 100644
index f86bbab..0000000
--- a/drivers/staging/ccree/ssi_pm_ext.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-
-#include "ssi_config.h"
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <crypto/ctr.h>
-#include <linux/pm_runtime.h>
-#include "ssi_driver.h"
-#include "ssi_sram_mgr.h"
-#include "ssi_pm_ext.h"
-
-/*
-This function should suspend the HW (if possiable), It should be implemented by 
-the driver user. 
-The reference code clears the internal SRAM to imitate lose of state. 
-*/
-void ssi_pm_ext_hw_suspend(struct device *dev)
-{
-	struct ssi_drvdata *drvdata =
-		(struct ssi_drvdata *)dev_get_drvdata(dev);
-	unsigned int val;
-	void __iomem *cc_base = drvdata->cc_base;
-	unsigned int  sram_addr = 0;
-
-	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, SRAM_ADDR), sram_addr);
-
-	for (;sram_addr < SSI_CC_SRAM_SIZE ; sram_addr+=4) {
-		CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, SRAM_DATA), 0x0);
-
-		do {
-			val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, SRAM_DATA_READY));
-		} while (!(val &0x1));
-	}
-}
-
-/*
-This function should resume the HW (if possiable).It should be implemented by 
-the driver user. 
-*/
-void ssi_pm_ext_hw_resume(struct device *dev)
-{
-	return;
-}
-
diff --git a/drivers/staging/ccree/ssi_pm_ext.h b/drivers/staging/ccree/ssi_pm_ext.h
deleted file mode 100644
index b4e2795..0000000
--- a/drivers/staging/ccree/ssi_pm_ext.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- * 
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- * 
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* \file ssi_pm_ext.h
-    */
-
-#ifndef __PM_EXT_H__
-#define __PM_EXT_H__
-
-
-#include "ssi_config.h"
-#include "ssi_driver.h"
-
-void ssi_pm_ext_hw_suspend(struct device *dev);
-
-void ssi_pm_ext_hw_resume(struct device *dev);
-
-
-#endif /*__POWER_MGR_H__*/
-
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 8611adf..46d9396 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -35,91 +35,22 @@
 
 #define SSI_MAX_POLL_ITER	10
 
-#define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP)
-
-#ifdef CC_CYCLE_COUNT
-
-#define MONITOR_CNTR_BIT 0
-
-/**
- * Monitor descriptor. 
- * Used to measure CC performance. 
- */
-#define INIT_CC_MONITOR_DESC(desc_p) \
-do { \
-	HW_DESC_INIT(desc_p); \
-	HW_DESC_SET_DIN_MONITOR_CNTR(desc_p); \
-} while (0)
-
-/** 
- * Try adding monitor descriptor BEFORE enqueuing sequence.
- */
-#define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) \
-do { \
-	if (!test_and_set_bit(MONITOR_CNTR_BIT, (lock_p))) { \
-		enqueue_seq((cc_base_addr), (desc_p), 1); \
-		*(is_monitored_p) = true; \
-	} else { \
-		*(is_monitored_p) = false; \
-	} \
-} while (0)
-
-/**
- * If CC_CYCLE_DESC_HEAD was successfully added: 
- * 1. Add memory barrier descriptor to ensure last AXI transaction.  
- * 2. Add monitor descriptor to sequence tail AFTER enqueuing sequence.
- */
-#define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) \
-do { \
-	if ((is_monitored) == true) { \
-		HwDesc_s barrier_desc; \
-		HW_DESC_INIT(&barrier_desc); \
-		HW_DESC_SET_DIN_NO_DMA(&barrier_desc, 0, 0xfffff0); \
-		HW_DESC_SET_DOUT_NO_DMA(&barrier_desc, 0, 0, 1); \
-		enqueue_seq((cc_base_addr), &barrier_desc, 1); \
-		enqueue_seq((cc_base_addr), (desc_p), 1); \
-	} \
-} while (0)
-
-/**
- * Try reading CC monitor counter value upon sequence complete. 
- * Can only succeed if the lock_p is taken by the owner of the given request.
- */
-#define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) \
-do { \
-	uint32_t elapsed_cycles; \
-	if ((is_monitored) == true) { \
-		elapsed_cycles = READ_REGISTER((cc_base_addr) + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR)); \
-		clear_bit(MONITOR_CNTR_BIT, (lock_p)); \
-		if (elapsed_cycles > 0) \
-			update_cc_stat(stat_op_type, stat_phase, (elapsed_cycles - monitor_null_cycles)); \
-	} \
-} while (0)
-
-#else /*CC_CYCLE_COUNT*/
-
-#define INIT_CC_MONITOR_DESC(desc_p) do { } while (0)
-#define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) do { } while (0)
-#define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) do { } while (0)
-#define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) do { } while (0)
-#endif /*CC_CYCLE_COUNT*/
-
-
 struct ssi_request_mgr_handle {
 	/* Request manager resources */
 	unsigned int hw_queue_size; /* HW capability */
 	unsigned int min_free_hw_slots;
 	unsigned int max_used_sw_slots;
 	struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
-	uint32_t req_queue_head;
-	uint32_t req_queue_tail;
-	uint32_t axi_completed;
-	uint32_t q_free_slots;
+	u32 req_queue_head;
+	u32 req_queue_tail;
+	u32 axi_completed;
+	u32 q_free_slots;
 	spinlock_t hw_lock;
-	HwDesc_s compl_desc;
-	uint8_t *dummy_comp_buff;
+	struct cc_hw_desc compl_desc;
+	u8 *dummy_comp_buff;
 	dma_addr_t dummy_comp_buff_dma;
-	HwDesc_s monitor_desc;
+	struct cc_hw_desc monitor_desc;
+
 	volatile unsigned long monitor_lock;
 #ifdef COMP_IN_WQ
 	struct workqueue_struct *workq;
@@ -127,7 +58,7 @@ struct ssi_request_mgr_handle {
 #else
 	struct tasklet_struct comptask;
 #endif
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 	bool is_runtime_suspended;
 #endif
 };
@@ -141,18 +72,17 @@ void request_mgr_fini(struct ssi_drvdata *drvdata)
 {
 	struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
 
-	if (req_mgr_h == NULL)
+	if (!req_mgr_h)
 		return; /* Not allocated */
 
 	if (req_mgr_h->dummy_comp_buff_dma != 0) {
-		SSI_RESTORE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma);
 		dma_free_coherent(&drvdata->plat_dev->dev,
-				  sizeof(uint32_t), req_mgr_h->dummy_comp_buff,
+				  sizeof(u32), req_mgr_h->dummy_comp_buff,
 				  req_mgr_h->dummy_comp_buff_dma);
 	}
 
 	SSI_LOG_DEBUG("max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
-						req_mgr_h->min_free_hw_slots) );
+						req_mgr_h->min_free_hw_slots));
 	SSI_LOG_DEBUG("max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
 
 #ifdef COMP_IN_WQ
@@ -169,15 +99,11 @@ void request_mgr_fini(struct ssi_drvdata *drvdata)
 
 int request_mgr_init(struct ssi_drvdata *drvdata)
 {
-#ifdef CC_CYCLE_COUNT
-	HwDesc_s monitor_desc[2];
-	struct ssi_crypto_req monitor_req = {0};
-#endif
 	struct ssi_request_mgr_handle *req_mgr_h;
 	int rc = 0;
 
-	req_mgr_h = kzalloc(sizeof(struct ssi_request_mgr_handle),GFP_KERNEL);
-	if (req_mgr_h == NULL) {
+	req_mgr_h = kzalloc(sizeof(struct ssi_request_mgr_handle), GFP_KERNEL);
+	if (!req_mgr_h) {
 		rc = -ENOMEM;
 		goto req_mgr_init_err;
 	}
@@ -188,7 +114,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
 #ifdef COMP_IN_WQ
 	SSI_LOG_DEBUG("Initializing completion workqueue\n");
 	req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
-	if (unlikely(req_mgr_h->workq == NULL)) {
+	if (unlikely(!req_mgr_h->workq)) {
 		SSI_LOG_ERR("Failed creating work queue\n");
 		rc = -ENOMEM;
 		goto req_mgr_init_err;
@@ -210,45 +136,23 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
 	req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
 	req_mgr_h->max_used_sw_slots = 0;
 
-
 	/* Allocate DMA word for "dummy" completion descriptor use */
 	req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
-		sizeof(uint32_t), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
+		sizeof(u32), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
 	if (!req_mgr_h->dummy_comp_buff) {
 		SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
-			   "buffer\n", sizeof(uint32_t));
+			   "buffer\n", sizeof(u32));
 		rc = -ENOMEM;
 		goto req_mgr_init_err;
 	}
-	SSI_UPDATE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma,
-							     sizeof(uint32_t));
 
 	/* Init. "dummy" completion descriptor */
-	HW_DESC_INIT(&req_mgr_h->compl_desc);
-	HW_DESC_SET_DIN_CONST(&req_mgr_h->compl_desc, 0, sizeof(uint32_t));
-	HW_DESC_SET_DOUT_DLLI(&req_mgr_h->compl_desc,
-		req_mgr_h->dummy_comp_buff_dma,
-		sizeof(uint32_t), NS_BIT, 1);
-	HW_DESC_SET_FLOW_MODE(&req_mgr_h->compl_desc, BYPASS);
-	HW_DESC_SET_QUEUE_LAST_IND(&req_mgr_h->compl_desc);
-
-#ifdef CC_CYCLE_COUNT
-	/* For CC-HW cycle performance trace */
-	INIT_CC_MONITOR_DESC(&req_mgr_h->monitor_desc);
-	set_bit(MONITOR_CNTR_BIT, &req_mgr_h->monitor_lock);
-	monitor_desc[0] = req_mgr_h->monitor_desc;
-	monitor_desc[1] = req_mgr_h->monitor_desc;
-
-	rc = send_request(drvdata, &monitor_req, monitor_desc, 2, 0);
-	if (unlikely(rc != 0))
-		goto req_mgr_init_err;
-
-	drvdata->monitor_null_cycles = READ_REGISTER(drvdata->cc_base +
-		CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR));
-	SSI_LOG_ERR("Calibration time=0x%08x\n", drvdata->monitor_null_cycles);
-
-	clear_bit(MONITOR_CNTR_BIT, &req_mgr_h->monitor_lock);
-#endif
+	hw_desc_init(&req_mgr_h->compl_desc);
+	set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
+	set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
+		      sizeof(u32), NS_BIT, 1);
+	set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
+	set_queue_last_ind(&req_mgr_h->compl_desc);
 
 	return 0;
 
@@ -259,18 +163,18 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
 
 static inline void enqueue_seq(
 	void __iomem *cc_base,
-	HwDesc_s seq[], unsigned int seq_len)
+	struct cc_hw_desc seq[], unsigned int seq_len)
 {
 	int i;
 
 	for (i = 0; i < seq_len; i++) {
-		writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
-		writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
-		writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
-		writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
-		writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+		writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+		writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+		writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+		writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+		writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
 		wmb();
-		writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+		writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
 #ifdef DX_DUMP_DESCS
 		SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
 			seq[i].word[0], seq[i].word[1], seq[i].word[2], seq[i].word[3], seq[i].word[4], seq[i].word[5]);
@@ -279,62 +183,63 @@ static inline void enqueue_seq(
 }
 
 /*!
- * Completion will take place if and only if user requested completion 
- * by setting "is_dout = 0" in send_request().  
- * 
- * \param dev 
+ * Completion will take place if and only if user requested completion
+ * by setting "is_dout = 0" in send_request().
+ *
+ * \param dev
  * \param dx_compl_h The completion event to signal
  */
 static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base)
 {
 	struct completion *this_compl = dx_compl_h;
+
 	complete(this_compl);
 }
 
-
 static inline int request_mgr_queues_status_check(
 		struct ssi_request_mgr_handle *req_mgr_h,
 		void __iomem *cc_base,
 		unsigned int total_seq_len)
 {
 	unsigned long poll_queue;
-	
-	/* SW queue is checked only once as it will not 
-	   be chaned during the poll becasue the spinlock_bh 
-	   is held by the thread */
+
+	/* SW queue is checked only once as it will not
+	 * be chaned during the poll becasue the spinlock_bh
+	 * is held by the thread
+	 */
 	if (unlikely(((req_mgr_h->req_queue_head + 1) &
-		      (MAX_REQUEST_QUEUE_SIZE - 1)) == 
+		      (MAX_REQUEST_QUEUE_SIZE - 1)) ==
 		     req_mgr_h->req_queue_tail)) {
-		SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n", 
+		SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
 			   req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
 		return -EBUSY;
 	}
 
-	if ((likely(req_mgr_h->q_free_slots >= total_seq_len)) ) {
+	if ((likely(req_mgr_h->q_free_slots >= total_seq_len)))
 		return 0;
-	}
+
 	/* Wait for space in HW queue. Poll constant num of iterations. */
-	for (poll_queue =0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue ++) {
-		req_mgr_h->q_free_slots = 
+	for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
+		req_mgr_h->q_free_slots =
 			CC_HAL_READ_REGISTER(
 				CC_REG_OFFSET(CRY_KERNEL,
 						 DSCRPTR_QUEUE_CONTENT));
-		if (unlikely(req_mgr_h->q_free_slots < 
+		if (unlikely(req_mgr_h->q_free_slots <
 						req_mgr_h->min_free_hw_slots)) {
 			req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
 		}
 
-		if (likely (req_mgr_h->q_free_slots >= total_seq_len)) {
+		if (likely(req_mgr_h->q_free_slots >= total_seq_len)) {
 			/* If there is enough place return */
 			return 0;
 		}
 
-		SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n", 
+		SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
 			req_mgr_h->q_free_slots, total_seq_len);
 	}
 	/* No room in the HW queue try again later */
 	SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
-		   "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n", 
+		   "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
 		     req_mgr_h->req_queue_head,
 		   MAX_REQUEST_QUEUE_SIZE,
 		   req_mgr_h->q_free_slots,
@@ -344,38 +249,37 @@ static inline int request_mgr_queues_status_check(
 
 /*!
  * Enqueue caller request to crypto hardware.
- * 
- * \param drvdata 
+ *
+ * \param drvdata
  * \param ssi_req The request to enqueue
  * \param desc The crypto sequence
  * \param len The crypto sequence length
- * \param is_dout If "true": completion is handled by the caller 
- *      	  If "false": this function adds a dummy descriptor completion
- *      	  and waits upon completion signal.
- * 
+ * \param is_dout If "true": completion is handled by the caller
+ *	  If "false": this function adds a dummy descriptor completion
+ *	  and waits upon completion signal.
+ *
  * \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
  */
 int send_request(
 	struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
-	HwDesc_s *desc, unsigned int len, bool is_dout)
+	struct cc_hw_desc *desc, unsigned int len, bool is_dout)
 {
 	void __iomem *cc_base = drvdata->cc_base;
 	struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
 	unsigned int used_sw_slots;
 	unsigned int iv_seq_len = 0;
 	unsigned int total_seq_len = len; /*initial sequence length*/
-	HwDesc_s iv_seq[SSI_IVPOOL_SEQ_LEN];
+	struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
 	int rc;
 	unsigned int max_required_seq_len = (total_seq_len +
 					((ssi_req->ivgen_dma_addr_len == 0) ? 0 :
-					SSI_IVPOOL_SEQ_LEN ) +
-					((is_dout == 0 )? 1 : 0));
-	DECL_CYCLE_COUNT_RESOURCES;
+					SSI_IVPOOL_SEQ_LEN) +
+					((is_dout == 0) ? 1 : 0));
 
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 	rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev);
 	if (rc != 0) {
-		SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc);
+		SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n", rc);
 		return rc;
 	}
 #endif
@@ -384,21 +288,23 @@ int send_request(
 		spin_lock_bh(&req_mgr_h->hw_lock);
 
 		/* Check if there is enough place in the SW/HW queues
-		in case iv gen add the max size and in case of no dout add 1 
-		for the internal completion descriptor */
+		 * in case iv gen add the max size and in case of no dout add 1
+		 * for the internal completion descriptor
+		 */
 		rc = request_mgr_queues_status_check(req_mgr_h,
 					       cc_base,
 					       max_required_seq_len);
-		if (likely(rc == 0 ))
+		if (likely(rc == 0))
 			/* There is enough place in the queue */
 			break;
 		/* something wrong release the spinlock*/
 		spin_unlock_bh(&req_mgr_h->hw_lock);
 
 		if (rc != -EAGAIN) {
-			/* Any error other than HW queue full 
-			   (SW queue is full) */
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+			/* Any error other than HW queue full
+			 * (SW queue is full)
+			 */
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 			ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
 #endif
 			return rc;
@@ -409,7 +315,8 @@ int send_request(
 	} while (1);
 
 	/* Additional completion descriptor is needed incase caller did not
-	   enabled any DLLI/MLLI DOUT bit in the given sequence */
+	 * enabled any DLLI/MLLI DOUT bit in the given sequence
+	 */
 	if (!is_dout) {
 		init_completion(&ssi_req->seq_compl);
 		ssi_req->user_cb = request_mgr_complete;
@@ -432,7 +339,7 @@ int send_request(
 		if (unlikely(rc != 0)) {
 			SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc);
 			spin_unlock_bh(&req_mgr_h->hw_lock);
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 			ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
 #endif
 			return rc;
@@ -440,18 +347,13 @@ int send_request(
 
 		total_seq_len += iv_seq_len;
 	}
-	
-	used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE-1));
-	if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots)) {
+
+	used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE - 1));
+	if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots))
 		req_mgr_h->max_used_sw_slots = used_sw_slots;
-	}
-	
-	CC_CYCLE_DESC_HEAD(cc_base, &req_mgr_h->monitor_desc,
-			&req_mgr_h->monitor_lock, &ssi_req->is_monitored_p);
 
 	/* Enqueue request - must be locked with HW lock*/
 	req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *ssi_req;
-	START_CYCLE_COUNT_AT(req_mgr_h->req_queue[req_mgr_h->req_queue_head].submit_cycle);
 	req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
 	/* TODO: Use circ_buf.h ? */
 
@@ -462,13 +364,9 @@ int send_request(
 #endif
 
 	/* STAT_PHASE_4: Push sequence */
-	START_CYCLE_COUNT();
 	enqueue_seq(cc_base, iv_seq, iv_seq_len);
 	enqueue_seq(cc_base, desc, len);
 	enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
-	END_CYCLE_COUNT(ssi_req->op_type, STAT_PHASE_4);
-
-	CC_CYCLE_DESC_TAIL(cc_base, &req_mgr_h->monitor_desc, ssi_req->is_monitored_p);
 
 	if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) {
 		/*This means that there was a problem with the resume*/
@@ -481,28 +379,29 @@ int send_request(
 
 	if (!is_dout) {
 		/* Wait upon sequence completion.
-		*  Return "0" -Operation done successfully. */
-		return wait_for_completion_interruptible(&ssi_req->seq_compl);
+		 *  Return "0" -Operation done successfully.
+		 */
+		wait_for_completion(&ssi_req->seq_compl);
+		return 0;
 	} else {
 		/* Operation still in process */
 		return -EINPROGRESS;
 	}
 }
 
-
 /*!
  * Enqueue caller request to crypto hardware during init process.
  * assume this function is not called in middle of a flow,
  * since we set QUEUE_LAST_IND flag in the last descriptor.
- * 
- * \param drvdata 
+ *
+ * \param drvdata
  * \param desc The crypto sequence
  * \param len The crypto sequence length
- * 
+ *
  * \return int Returns "0" upon success
  */
 int send_request_init(
-	struct ssi_drvdata *drvdata, HwDesc_s *desc, unsigned int len)
+	struct ssi_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len)
 {
 	void __iomem *cc_base = drvdata->cc_base;
 	struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
@@ -511,10 +410,10 @@ int send_request_init(
 
 	/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
 	rc = request_mgr_queues_status_check(req_mgr_h, cc_base, total_seq_len);
-	if (unlikely(rc != 0 )) {
+	if (unlikely(rc != 0))
 		return rc;
-	}
-	HW_DESC_SET_QUEUE_LAST_IND(&desc[len-1]);
+
+	set_queue_last_ind(&desc[(len - 1)]);
 
 	enqueue_seq(cc_base, desc, len);
 
@@ -526,10 +425,9 @@ int send_request_init(
 	return 0;
 }
 
-
 void complete_request(struct ssi_drvdata *drvdata)
 {
-	struct ssi_request_mgr_handle *request_mgr_handle = 
+	struct ssi_request_mgr_handle *request_mgr_handle =
 						drvdata->request_mgr_handle;
 #ifdef COMP_IN_WQ
 	queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0);
@@ -552,14 +450,13 @@ static void proc_completions(struct ssi_drvdata *drvdata)
 {
 	struct ssi_crypto_req *ssi_req;
 	struct platform_device *plat_dev = drvdata->plat_dev;
-	struct ssi_request_mgr_handle * request_mgr_handle = 
+	struct ssi_request_mgr_handle *request_mgr_handle =
 						drvdata->request_mgr_handle;
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 	int rc = 0;
 #endif
-	DECL_CYCLE_COUNT_RESOURCES;
 
-	while(request_mgr_handle->axi_completed) {
+	while (request_mgr_handle->axi_completed) {
 		request_mgr_handle->axi_completed--;
 
 		/* Dequeue request */
@@ -569,9 +466,6 @@ static void proc_completions(struct ssi_drvdata *drvdata)
 		}
 
 		ssi_req = &request_mgr_handle->req_queue[request_mgr_handle->req_queue_tail];
-		END_CYCLE_COUNT_AT(ssi_req->submit_cycle, ssi_req->op_type, STAT_PHASE_5); /* Seq. Comp. */
-		END_CC_MONITOR_COUNT(drvdata->cc_base, ssi_req->op_type, STAT_PHASE_6,
-			drvdata->monitor_null_cycles, &request_mgr_handle->monitor_lock, ssi_req->is_monitored_p);
 
 #ifdef FLUSH_CACHE_ALL
 		flush_cache_all();
@@ -580,116 +474,109 @@ static void proc_completions(struct ssi_drvdata *drvdata)
 #ifdef COMPLETION_DELAY
 		/* Delay */
 		{
-			uint32_t axi_err;
+			u32 axi_err;
 			int i;
+
 			SSI_LOG_INFO("Delay\n");
-			for (i=0;i<1000000;i++) {
+			for (i = 0; i < 1000000; i++)
 				axi_err = READ_REGISTER(drvdata->cc_base + CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
-			}
 		}
 #endif /* COMPLETION_DELAY */
 
-		if (likely(ssi_req->user_cb != NULL)) {
-			START_CYCLE_COUNT();
+		if (likely(ssi_req->user_cb))
 			ssi_req->user_cb(&plat_dev->dev, ssi_req->user_arg, drvdata->cc_base);
-			END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_3);
-		}
 		request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
 		SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle->req_queue_tail);
 		SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle->axi_completed);
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 		rc = ssi_power_mgr_runtime_put_suspend(&plat_dev->dev);
-		if (rc != 0) {
-			SSI_LOG_ERR("Failed to set runtime suspension %d\n",rc);
-		}
+		if (rc != 0)
+			SSI_LOG_ERR("Failed to set runtime suspension %d\n", rc);
 #endif
 	}
 }
 
+static inline u32 cc_axi_comp_count(void __iomem *cc_base)
+{
+	/* The CC_HAL_READ_REGISTER macro implictly requires and uses
+	 * a base MMIO register address variable named cc_base.
+	 */
+	return FIELD_GET(AXIM_MON_COMP_VALUE,
+			 CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
+}
+
 /* Deferred service handler, run as interrupt-fired tasklet */
 static void comp_handler(unsigned long devarg)
 {
 	struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
 	void __iomem *cc_base = drvdata->cc_base;
-	struct ssi_request_mgr_handle * request_mgr_handle = 
+	struct ssi_request_mgr_handle *request_mgr_handle =
 						drvdata->request_mgr_handle;
 
-	uint32_t irq;
-
-	DECL_CYCLE_COUNT_RESOURCES;
-
-	START_CYCLE_COUNT();
+	u32 irq;
 
 	irq = (drvdata->irq & SSI_COMP_IRQ_MASK);
 
 	if (irq & SSI_COMP_IRQ_MASK) {
 		/* To avoid the interrupt from firing as we unmask it, we clear it now */
 		CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
-	
+
 		/* Avoid race with above clear: Test completion counter once more */
-		request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE, 
-			CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
-	
-		/* ISR-to-Tasklet latency */
-		if (request_mgr_handle->axi_completed) {
-			/* Only if actually reflects ISR-to-completion-handling latency, i.e.,
-			   not duplicate as a result of interrupt after AXIM_MON_ERR clear, before end of loop */
-			END_CYCLE_COUNT_AT(drvdata->isr_exit_cycles, STAT_OP_TYPE_GENERIC, STAT_PHASE_1);
-		}
-	
+		request_mgr_handle->axi_completed +=
+				cc_axi_comp_count(cc_base);
+
 		while (request_mgr_handle->axi_completed) {
 			do {
 				proc_completions(drvdata);
-				/* At this point (after proc_completions()), request_mgr_handle->axi_completed is always 0.
-				   The following assignment was changed to = (previously was +=) to conform KW restrictions. */
-				request_mgr_handle->axi_completed = CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE, 
-					CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
+				/* At this point (after proc_completions()),
+				 * request_mgr_handle->axi_completed is 0.
+				 */
+				request_mgr_handle->axi_completed =
+						cc_axi_comp_count(cc_base);
 			} while (request_mgr_handle->axi_completed > 0);
-	
+
 			/* To avoid the interrupt from firing as we unmask it, we clear it now */
 			CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
-			
+
 			/* Avoid race with above clear: Test completion counter once more */
-			request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE, 
-				CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
+			request_mgr_handle->axi_completed +=
+					cc_axi_comp_count(cc_base);
 		}
-	
 	}
 	/* after verifing that there is nothing to do, Unmask AXI completion interrupt */
-	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), 
+	CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
 		CC_HAL_READ_REGISTER(
 		CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
-	END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_2);
 }
 
 /*
-resume the queue configuration - no need to take the lock as this happens inside
-the spin lock protection
-*/
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+ * resume the queue configuration - no need to take the lock as this happens inside
+ * the spin lock protection
+ */
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata)
 {
-	struct ssi_request_mgr_handle * request_mgr_handle = drvdata->request_mgr_handle;
+	struct ssi_request_mgr_handle *request_mgr_handle = drvdata->request_mgr_handle;
 
 	spin_lock_bh(&request_mgr_handle->hw_lock);
 	request_mgr_handle->is_runtime_suspended = false;
 	spin_unlock_bh(&request_mgr_handle->hw_lock);
 
-	return 0 ;
+	return 0;
 }
 
 /*
-suspend the queue configuration. Since it is used for the runtime suspend
-only verify that the queue can be suspended.
-*/
+ * suspend the queue configuration. Since it is used for the runtime suspend
+ * only verify that the queue can be suspended.
+ */
 int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
 {
-	struct ssi_request_mgr_handle * request_mgr_handle = 
+	struct ssi_request_mgr_handle *request_mgr_handle =
 						drvdata->request_mgr_handle;
-	
+
 	/* lock the send_request */
 	spin_lock_bh(&request_mgr_handle->hw_lock);
-	if (request_mgr_handle->req_queue_head != 
+	if (request_mgr_handle->req_queue_head !=
 	    request_mgr_handle->req_queue_tail) {
 		spin_unlock_bh(&request_mgr_handle->hw_lock);
 		return -EBUSY;
@@ -702,10 +589,10 @@ int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
 
 bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata)
 {
-	struct ssi_request_mgr_handle * request_mgr_handle = 
+	struct ssi_request_mgr_handle *request_mgr_handle =
 						drvdata->request_mgr_handle;
 
-	return 	request_mgr_handle->is_runtime_suspended;
+	return	request_mgr_handle->is_runtime_suspended;
 }
 
 #endif
diff --git a/drivers/staging/ccree/ssi_request_mgr.h b/drivers/staging/ccree/ssi_request_mgr.h
index c09339b..bdbbf89 100644
--- a/drivers/staging/ccree/ssi_request_mgr.h
+++ b/drivers/staging/ccree/ssi_request_mgr.h
@@ -1,21 +1,21 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* \file request_mgr.h
-   Request Manager
+ * Request Manager
  */
 
 #ifndef __REQUEST_MGR_H__
@@ -27,29 +27,29 @@ int request_mgr_init(struct ssi_drvdata *drvdata);
 
 /*!
  * Enqueue caller request to crypto hardware.
- * 
- * \param drvdata 
+ *
+ * \param drvdata
  * \param ssi_req The request to enqueue
  * \param desc The crypto sequence
  * \param len The crypto sequence length
- * \param is_dout If "true": completion is handled by the caller 
- *      	  If "false": this function adds a dummy descriptor completion
- *      	  and waits upon completion signal.
- * 
+ * \param is_dout If "true": completion is handled by the caller
+ *	  If "false": this function adds a dummy descriptor completion
+ *	  and waits upon completion signal.
+ *
  * \return int Returns -EINPROGRESS if "is_dout=ture"; "0" if "is_dout=false"
  */
 int send_request(
 	struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
-	HwDesc_s *desc, unsigned int len, bool is_dout);
+	struct cc_hw_desc *desc, unsigned int len, bool is_dout);
 
 int send_request_init(
-	struct ssi_drvdata *drvdata, HwDesc_s *desc, unsigned int len);
+	struct ssi_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len);
 
 void complete_request(struct ssi_drvdata *drvdata);
 
 void request_mgr_fini(struct ssi_drvdata *drvdata);
 
-#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
 int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata);
 
 int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata);
diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/ssi_sram_mgr.c
index 50066e1..e05c0c1 100644
--- a/drivers/staging/ccree/ssi_sram_mgr.c
+++ b/drivers/staging/ccree/ssi_sram_mgr.c
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -17,7 +17,6 @@
 #include "ssi_driver.h"
 #include "ssi_sram_mgr.h"
 
-
 /**
  * struct ssi_sram_mgr_ctx -Internal RAM context manager
  * @sram_free_offset:   the offset to the non-allocated area
@@ -26,10 +25,9 @@ struct ssi_sram_mgr_ctx {
 	ssi_sram_addr_t sram_free_offset;
 };
 
-
 /**
  * ssi_sram_mgr_fini() - Cleanup SRAM pool.
- * 
+ *
  * @drvdata: Associated device driver context
  */
 void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
@@ -37,17 +35,17 @@ void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
 	struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
 
 	/* Free "this" context */
-	if (smgr_ctx != NULL) {
+	if (smgr_ctx) {
 		memset(smgr_ctx, 0, sizeof(struct ssi_sram_mgr_ctx));
 		kfree(smgr_ctx);
 	}
 }
 
 /**
- * ssi_sram_mgr_init() - Initializes SRAM pool. 
+ * ssi_sram_mgr_init() - Initializes SRAM pool.
  *      The pool starts right at the beginning of SRAM.
  *      Returns zero for success, negative value otherwise.
- * 
+ *
  * @drvdata: Associated device driver context
  */
 int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
@@ -77,15 +75,15 @@ int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
 }
 
 /*!
- * Allocated buffer from SRAM pool. 
- * Note: Caller is responsible to free the LAST allocated buffer. 
- * This function does not taking care of any fragmentation may occur 
- * by the order of calls to alloc/free. 
- * 
- * \param drvdata 
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
  * \param size The requested bytes to allocate
  */
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
+ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
 {
 	struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
 	ssi_sram_addr_t p;
@@ -100,7 +98,7 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
 			size, smgr_ctx->sram_free_offset);
 		return NULL_SRAM_ADDR;
 	}
-	
+
 	p = smgr_ctx->sram_free_offset;
 	smgr_ctx->sram_free_offset += size;
 	SSI_LOG_DEBUG("Allocated %u B @ %u\n", size, (unsigned int)p);
@@ -109,9 +107,9 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
 
 /**
  * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
- *	set values in given array into SRAM. 
+ *	set values in given array into SRAM.
  * Note: each const value can't exceed word size.
- * 
+ *
  * @src:	  A pointer to array of words to set as consts.
  * @dst:	  The target SRAM buffer to set into
  * @nelements:	  The number of words in "src" array
@@ -119,18 +117,18 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
  * @seq_len:	  A pointer to the given IN/OUT sequence length
  */
 void ssi_sram_mgr_const2sram_desc(
-	const uint32_t *src, ssi_sram_addr_t dst,
+	const u32 *src, ssi_sram_addr_t dst,
 	unsigned int nelement,
-	HwDesc_s *seq, unsigned int *seq_len)
+	struct cc_hw_desc *seq, unsigned int *seq_len)
 {
-	uint32_t i;
+	u32 i;
 	unsigned int idx = *seq_len;
 
 	for (i = 0; i < nelement; i++, idx++) {
-		HW_DESC_INIT(&seq[idx]);
-		HW_DESC_SET_DIN_CONST(&seq[idx], src[i], sizeof(uint32_t));
-		HW_DESC_SET_DOUT_SRAM(&seq[idx], dst + (i * sizeof(uint32_t)), sizeof(uint32_t));
-		HW_DESC_SET_FLOW_MODE(&seq[idx], BYPASS);
+		hw_desc_init(&seq[idx]);
+		set_din_const(&seq[idx], src[i], sizeof(u32));
+		set_dout_sram(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
+		set_flow_mode(&seq[idx], BYPASS);
 	}
 
 	*seq_len = idx;
diff --git a/drivers/staging/ccree/ssi_sram_mgr.h b/drivers/staging/ccree/ssi_sram_mgr.h
index d71fbaf..9ba1d59 100644
--- a/drivers/staging/ccree/ssi_sram_mgr.h
+++ b/drivers/staging/ccree/ssi_sram_mgr.h
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -17,7 +17,6 @@
 #ifndef __SSI_SRAM_MGR_H__
 #define __SSI_SRAM_MGR_H__
 
-
 #ifndef SSI_CC_SRAM_SIZE
 #define SSI_CC_SRAM_SIZE 4096
 #endif
@@ -28,44 +27,44 @@ struct ssi_drvdata;
  * Address (offset) within CC internal SRAM
  */
 
-typedef uint64_t ssi_sram_addr_t;
+typedef u64 ssi_sram_addr_t;
 
 #define NULL_SRAM_ADDR ((ssi_sram_addr_t)-1)
 
 /*!
- * Initializes SRAM pool. 
- * The first X bytes of SRAM are reserved for ROM usage, hence, pool 
- * starts right after X bytes. 
- *  
- * \param drvdata 
- *  
+ * Initializes SRAM pool.
+ * The first X bytes of SRAM are reserved for ROM usage, hence, pool
+ * starts right after X bytes.
+ *
+ * \param drvdata
+ *
  * \return int Zero for success, negative value otherwise.
  */
 int ssi_sram_mgr_init(struct ssi_drvdata *drvdata);
 
 /*!
  * Uninits SRAM pool.
- * 
- * \param drvdata 
+ *
+ * \param drvdata
  */
 void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata);
 
 /*!
- * Allocated buffer from SRAM pool. 
- * Note: Caller is responsible to free the LAST allocated buffer. 
- * This function does not taking care of any fragmentation may occur 
- * by the order of calls to alloc/free. 
- * 
- * \param drvdata 
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
  * \param size The requested bytes to allocate
  */
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size);
+ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size);
 
 /**
  * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
- *	set values in given array into SRAM. 
+ *	set values in given array into SRAM.
  * Note: each const value can't exceed word size.
- * 
+ *
  * @src:	  A pointer to array of words to set as consts.
  * @dst:	  The target SRAM buffer to set into
  * @nelements:	  The number of words in "src" array
@@ -73,8 +72,8 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size);
  * @seq_len:	  A pointer to the given IN/OUT sequence length
  */
 void ssi_sram_mgr_const2sram_desc(
-	const uint32_t *src, ssi_sram_addr_t dst,
+	const u32 *src, ssi_sram_addr_t dst,
 	unsigned int nelement,
-	HwDesc_s *seq, unsigned int *seq_len);
+	struct cc_hw_desc *seq, unsigned int *seq_len);
 
 #endif /*__SSI_SRAM_MGR_H__*/
diff --git a/drivers/staging/ccree/ssi_sysfs.c b/drivers/staging/ccree/ssi_sysfs.c
index 7c514c1..dbcd163 100644
--- a/drivers/staging/ccree/ssi_sysfs.c
+++ b/drivers/staging/ccree/ssi_sysfs.c
@@ -1,15 +1,15 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
@@ -40,7 +40,7 @@ struct stat_name {
 	const char *stat_phase_name[MAX_STAT_PHASES];
 };
 
-static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] = 
+static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
 {
 	{
 		/* STAT_OP_TYPE_NULL */
@@ -50,8 +50,8 @@ static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
 	{
 		.op_type_name = "Encode",
 		.stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
-		.stat_phase_name[STAT_PHASE_1] = "Map buffers", 
-		.stat_phase_name[STAT_PHASE_2] = "Create sequence", 
+		.stat_phase_name[STAT_PHASE_1] = "Map buffers",
+		.stat_phase_name[STAT_PHASE_2] = "Create sequence",
 		.stat_phase_name[STAT_PHASE_3] = "Send Request",
 		.stat_phase_name[STAT_PHASE_4] = "HW-Q push",
 		.stat_phase_name[STAT_PHASE_5] = "Sequence completion",
@@ -59,14 +59,14 @@ static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
 	},
 	{	.op_type_name = "Decode",
 		.stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
-		.stat_phase_name[STAT_PHASE_1] = "Map buffers", 
-		.stat_phase_name[STAT_PHASE_2] = "Create sequence", 
+		.stat_phase_name[STAT_PHASE_1] = "Map buffers",
+		.stat_phase_name[STAT_PHASE_2] = "Create sequence",
 		.stat_phase_name[STAT_PHASE_3] = "Send Request",
 		.stat_phase_name[STAT_PHASE_4] = "HW-Q push",
 		.stat_phase_name[STAT_PHASE_5] = "Sequence completion",
 		.stat_phase_name[STAT_PHASE_6] = "HW cycles",
 	},
-	{ 	.op_type_name = "Setkey",
+	{	.op_type_name = "Setkey",
 		.stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
 		.stat_phase_name[STAT_PHASE_1] = "Copy key to ctx",
 		.stat_phase_name[STAT_PHASE_2] = "Create sequence",
@@ -88,14 +88,14 @@ static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
 };
 
 /*
- * Structure used to create a directory 
+ * Structure used to create a directory
  * and its attributes in sysfs.
  */
 struct sys_dir {
 	struct kobject *sys_dir_kobj;
 	struct attribute_group sys_dir_attr_group;
 	struct attribute **sys_dir_attr_list;
-	uint32_t num_of_attrs;
+	u32 num_of_attrs;
 	struct ssi_drvdata *drvdata; /* Associated driver context */
 };
 
@@ -108,14 +108,13 @@ static DEFINE_SPINLOCK(stat_lock);
 static struct stat_item stat_host_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
 static struct stat_item stat_cc_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
 
-
 static void init_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
 {
 	unsigned int i, j;
 
 	/* Clear db */
-	for (i=0; i<MAX_STAT_OP_TYPES; i++) {
-		for (j=0; j<MAX_STAT_PHASES; j++) {
+	for (i = 0; i < MAX_STAT_OP_TYPES; i++) {
+		for (j = 0; j < MAX_STAT_PHASES; j++) {
 			item[i][j].min = 0xFFFFFFFF;
 			item[i][j].max = 0;
 			item[i][j].sum = 0;
@@ -130,29 +129,28 @@ static void update_db(struct stat_item *item, unsigned int result)
 	item->sum += result;
 	if (result < item->min)
 		item->min = result;
-	if (result > item->max )
+	if (result > item->max)
 		item->max = result;
 }
 
 static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
 {
 	unsigned int i, j;
-	uint64_t avg;
+	u64 avg;
 
-	for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
-		for (j=0; j<MAX_STAT_PHASES; j++) {	
+	for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
+		for (j = 0; j < MAX_STAT_PHASES; j++) {
 			if (item[i][j].count > 0) {
-				avg = (uint64_t)item[i][j].sum;
+				avg = (u64)item[i][j].sum;
 				do_div(avg, item[i][j].count);
-				SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n", 
-					stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j], 
+				SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n",
+					stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j],
 					item[i][j].min, (int)avg, item[i][j].max, (long long)item[i][j].sum, item[i][j].count);
 			}
 		}
 	}
 }
 
-
 /**************************************
  * Attributes show functions section  *
  **************************************/
@@ -174,38 +172,38 @@ static ssize_t ssi_sys_stats_cc_db_clear(struct kobject *kobj,
 static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
 		struct kobj_attribute *attr, char *buf)
 {
-	int i, j ;
+	int i, j;
 	char line[512];
-	uint32_t min_cyc, max_cyc;
-	uint64_t avg;
-	ssize_t buf_len, tmp_len=0;
+	u32 min_cyc, max_cyc;
+	u64 avg;
+	ssize_t buf_len, tmp_len = 0;
 
-	buf_len = scnprintf(buf,PAGE_SIZE,
+	buf_len = scnprintf(buf, PAGE_SIZE,
 		"phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
-	if ( buf_len <0 )/* scnprintf shouldn't return negative value according to its implementation*/
+	if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
 		return buf_len;
-	for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
-		for (j=0; j<MAX_STAT_PHASES-1; j++) {
+	for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
+		for (j = 0; j < MAX_STAT_PHASES - 1; j++) {
 			if (stat_host_db[i][j].count > 0) {
-				avg = (uint64_t)stat_host_db[i][j].sum;
+				avg = (u64)stat_host_db[i][j].sum;
 				do_div(avg, stat_host_db[i][j].count);
 				min_cyc = stat_host_db[i][j].min;
 				max_cyc = stat_host_db[i][j].max;
 			} else {
 				avg = min_cyc = max_cyc = 0;
 			}
-			tmp_len = scnprintf(line,512,
+			tmp_len = scnprintf(line, 512,
 				"%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n",
 				stat_name_db[i].op_type_name,
 				stat_name_db[i].stat_phase_name[j],
 				min_cyc, (unsigned int)avg, max_cyc,
 				stat_host_db[i][j].count);
-			if ( tmp_len <0 )/* scnprintf shouldn't return negative value according to its implementation*/
+			if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
 				return buf_len;
-			if ( buf_len + tmp_len >= PAGE_SIZE)
+			if (buf_len + tmp_len >= PAGE_SIZE)
 				return buf_len;
 			buf_len += tmp_len;
-			strncat(buf, line,512);
+			strncat(buf, line, 512);
 		}
 	}
 	return buf_len;
@@ -216,24 +214,24 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
 {
 	int i;
 	char line[256];
-	uint32_t min_cyc, max_cyc;
-	uint64_t avg;
-	ssize_t buf_len,tmp_len=0;
+	u32 min_cyc, max_cyc;
+	u64 avg;
+	ssize_t buf_len, tmp_len = 0;
 
-	buf_len = scnprintf(buf,PAGE_SIZE,
+	buf_len = scnprintf(buf, PAGE_SIZE,
 		"phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
-	if ( buf_len <0 )/* scnprintf shouldn't return negative value according to its implementation*/
+	if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
 		return buf_len;
-	for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
+	for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
 		if (stat_cc_db[i][STAT_PHASE_6].count > 0) {
-			avg = (uint64_t)stat_cc_db[i][STAT_PHASE_6].sum;
+			avg = (u64)stat_cc_db[i][STAT_PHASE_6].sum;
 			do_div(avg, stat_cc_db[i][STAT_PHASE_6].count);
 			min_cyc = stat_cc_db[i][STAT_PHASE_6].min;
 			max_cyc = stat_cc_db[i][STAT_PHASE_6].max;
 		} else {
 			avg = min_cyc = max_cyc = 0;
 		}
-		tmp_len = scnprintf(line,256,
+		tmp_len = scnprintf(line, 256,
 			"%s\t%6u\t%6u\t%6u\t%7u\n",
 			stat_name_db[i].op_type_name,
 			min_cyc,
@@ -241,13 +239,13 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
 			max_cyc,
 			stat_cc_db[i][STAT_PHASE_6].count);
 
-		if ( tmp_len < 0 )/* scnprintf shouldn't return negative value according to its implementation*/
+		if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
 			return buf_len;
 
-		if ( buf_len + tmp_len >= PAGE_SIZE)
+		if (buf_len + tmp_len >= PAGE_SIZE)
 			return buf_len;
 		buf_len += tmp_len;
-		strncat(buf, line,256);
+		strncat(buf, line, 256);
 	}
 	return buf_len;
 }
@@ -271,21 +269,19 @@ void update_cc_stat(
 
 void display_all_stat_db(void)
 {
-	SSI_LOG_ERR("\n=======    CYCLE COUNT STATS    =======\n"); 
+	SSI_LOG_ERR("\n=======    CYCLE COUNT STATS    =======\n");
 	display_db(stat_host_db);
-	SSI_LOG_ERR("\n======= CC HW CYCLE COUNT STATS =======\n"); 
+	SSI_LOG_ERR("\n======= CC HW CYCLE COUNT STATS =======\n");
 	display_db(stat_cc_db);
 }
 #endif /*CC_CYCLE_COUNT*/
 
-
-
 static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
 		struct kobj_attribute *attr, char *buf)
 {
 	struct ssi_drvdata *drvdata = sys_get_drvdata();
-	uint32_t register_value;
-	void __iomem* cc_base = drvdata->cc_base;
+	u32 register_value;
+	void __iomem *cc_base = drvdata->cc_base;
 	int offset = 0;
 
 	register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
@@ -304,7 +300,7 @@ static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
 static ssize_t ssi_sys_help_show(struct kobject *kobj,
 		struct kobj_attribute *attr, char *buf)
 {
-	char* help_str[]={
+	char *help_str[] = {
 				"cat reg_dump              ", "Print several of CC register values",
 		#if defined CC_CYCLE_COUNT
 				"cat stats_host            ", "Print host statistics",
@@ -313,12 +309,12 @@ static ssize_t ssi_sys_help_show(struct kobject *kobj,
 				"echo <number> > stats_cc  ", "Clear CC statistics database",
 		#endif
 				};
-	int i=0, offset = 0;
+	int i = 0, offset = 0;
 
 	offset += scnprintf(buf + offset, PAGE_SIZE - offset, "Usage:\n");
-	for ( i = 0; i < ARRAY_SIZE(help_str); i+=2) {
-	   offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s\t\t%s\n", help_str[i], help_str[i+1]);
-	}
+	for (i = 0; i < ARRAY_SIZE(help_str); i += 2)
+	   offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s\t\t%s\n", help_str[i], help_str[i + 1]);
+
 	return offset;
 }
 
@@ -333,7 +329,7 @@ struct sys_dir {
 	struct kobject *sys_dir_kobj;
 	struct attribute_group sys_dir_attr_group;
 	struct attribute **sys_dir_attr_list;
-	uint32_t num_of_attrs;
+	u32 num_of_attrs;
 	struct ssi_drvdata *drvdata; /* Associated driver context */
 };
 
@@ -355,13 +351,14 @@ static struct ssi_drvdata *sys_get_drvdata(void)
 {
 	/* TODO: supporting multiple SeP devices would require avoiding
 	 * global "top_dir" and finding associated "top_dir" by traversing
-	 * up the tree to the kobject which matches one of the top_dir's */
+	 * up the tree to the kobject which matches one of the top_dir's
+	 */
 	return sys_top_dir.drvdata;
 }
 
 static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
 		 struct kobject *parent_dir_kobj, const char *dir_name,
-		 struct kobj_attribute *attrs, uint32_t num_of_attrs)
+		 struct kobj_attribute *attrs, u32 num_of_attrs)
 {
 	int i;
 
@@ -407,7 +404,7 @@ static void sys_free_dir(struct sys_dir *sys_dir)
 
 	kfree(sys_dir->sys_dir_attr_list);
 
-	if (sys_dir->sys_dir_kobj != NULL)
+	if (sys_dir->sys_dir_kobj)
 		kobject_put(sys_dir->sys_dir_kobj);
 }
 
diff --git a/drivers/staging/ccree/ssi_sysfs.h b/drivers/staging/ccree/ssi_sysfs.h
index baeac1d..44ae3d4 100644
--- a/drivers/staging/ccree/ssi_sysfs.h
+++ b/drivers/staging/ccree/ssi_sysfs.h
@@ -1,21 +1,21 @@
 /*
  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
 /* \file ssi_sysfs.h
-   ARM CryptoCell sysfs APIs
+ * ARM CryptoCell sysfs APIs
  */
 
 #ifndef __SSI_SYSFS_H__
@@ -36,6 +36,7 @@ enum stat_phase {
 	STAT_PHASE_6,
 	MAX_STAT_PHASES,
 };
+
 enum stat_op {
 	STAT_OP_TYPE_NULL = 0,
 	STAT_OP_TYPE_ENCODE,
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index f191c2a..ca11be2 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2881,29 +2881,25 @@ static int __init comedi_init(void)
 	retval = register_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
 					COMEDI_NUM_MINORS, "comedi");
 	if (retval)
-		return -EIO;
+		return retval;
+
 	cdev_init(&comedi_cdev, &comedi_fops);
 	comedi_cdev.owner = THIS_MODULE;
 
 	retval = kobject_set_name(&comedi_cdev.kobj, "comedi");
-	if (retval) {
-		unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
-					 COMEDI_NUM_MINORS);
-		return retval;
-	}
+	if (retval)
+		goto out_unregister_chrdev_region;
 
-	if (cdev_add(&comedi_cdev, MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS)) {
-		unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
-					 COMEDI_NUM_MINORS);
-		return -EIO;
-	}
+	retval = cdev_add(&comedi_cdev, MKDEV(COMEDI_MAJOR, 0),
+			  COMEDI_NUM_MINORS);
+	if (retval)
+		goto out_unregister_chrdev_region;
+
 	comedi_class = class_create(THIS_MODULE, "comedi");
 	if (IS_ERR(comedi_class)) {
+		retval = PTR_ERR(comedi_class);
 		pr_err("failed to create class\n");
-		cdev_del(&comedi_cdev);
-		unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
-					 COMEDI_NUM_MINORS);
-		return PTR_ERR(comedi_class);
+		goto out_cdev_del;
 	}
 
 	comedi_class->dev_groups = comedi_dev_groups;
@@ -2914,11 +2910,8 @@ static int __init comedi_init(void)
 
 		dev = comedi_alloc_board_minor(NULL);
 		if (IS_ERR(dev)) {
-			comedi_cleanup_board_minors();
-			cdev_del(&comedi_cdev);
-			unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
-						 COMEDI_NUM_MINORS);
-			return PTR_ERR(dev);
+			retval = PTR_ERR(dev);
+			goto out_cleanup_board_minors;
 		}
 		/* comedi_alloc_board_minor() locked the mutex */
 		mutex_unlock(&dev->mutex);
@@ -2928,6 +2921,15 @@ static int __init comedi_init(void)
 	comedi_proc_init();
 
 	return 0;
+
+out_cleanup_board_minors:
+	comedi_cleanup_board_minors();
+	class_destroy(comedi_class);
+out_cdev_del:
+	cdev_del(&comedi_cdev);
+out_unregister_chrdev_region:
+	unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS);
+	return retval;
 }
 module_init(comedi_init);
 
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.h b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
index b8a1b0e..e93f790 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_isadma.h
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
@@ -1,6 +1,6 @@
 /*
  * ni_labpc ISA DMA support.
-*/
+ */
 
 #ifndef _NI_LABPC_ISADMA_H
 #define _NI_LABPC_ISADMA_H
diff --git a/drivers/staging/comedi/drivers/ni_labpc_regs.h b/drivers/staging/comedi/drivers/ni_labpc_regs.h
index 8c52179..6003e9d 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_regs.h
+++ b/drivers/staging/comedi/drivers/ni_labpc_regs.h
@@ -1,6 +1,6 @@
 /*
  * ni_labpc register definitions.
-*/
+ */
 
 #ifndef _NI_LABPC_REGS_H
 #define _NI_LABPC_REGS_H
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 4b9c226..c906c9a 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -580,11 +580,14 @@ static int s626_set_dac(struct comedi_device *dev,
 	 * running after the packet has been sent to the target DAC.
 	 */
 	val = 0x0F000000;	/* Continue clock after target DAC data
-				 * (write to non-existent trimdac). */
+				 * (write to non-existent trimdac).
+				 */
 	val |= 0x00004000;	/* Address the two main dual-DAC devices
-				 * (TSL's chip select enables target device). */
+				 * (TSL's chip select enables target device).
+				 */
 	val |= ((u32)(chan & 1) << 15);	/* Address the DAC channel
-						 * within the device. */
+					 * within the device.
+					 */
 	val |= (u32)dacdata;	/* Include DAC setpoint data. */
 	return s626_send_dac(dev, val);
 }
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index 253f38b..c1b6079 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -96,7 +96,6 @@ static int dgnc_do_remap(struct dgnc_board *brd)
 	return 0;
 }
 
-
 /* A board has been found, initialize  it. */
 static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
 {
@@ -287,7 +286,6 @@ static void dgnc_free_irq(struct dgnc_board *brd)
 		free_irq(brd->irq, brd);
 }
 
-
  /*
   * As each timer expires, it determines (a) whether the "transmit"
   * waiter needs to be woken up, and (b) whether the poller needs to
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index 980410f..764d6fe 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -52,19 +52,6 @@
 
 #define dgnc_jiffies_from_ms(a) (((a) * HZ) / 1000)
 
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially.  This is the same structure that is defined
- * as the default in tty_io.c with the same settings overridden as in serial.c
- *
- * In short, this should match the internal serial ports' defaults.
- */
-#define	DEFAULT_IFLAGS	(ICRNL | IXON)
-#define	DEFAULT_OFLAGS	(OPOST | ONLCR)
-#define	DEFAULT_CFLAGS	(B9600 | CS8 | CREAD | HUPCL | CLOCAL)
-#define	DEFAULT_LFLAGS	(ISIG | ICANON | ECHO | ECHOE | ECHOK | \
-			ECHOCTL | ECHOKE | IEXTEN)
-
 #ifndef _POSIX_VDISABLE
 #define   _POSIX_VDISABLE '\0'
 #endif
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 9e98781..d3736da 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -51,22 +51,6 @@ static const struct digi_t dgnc_digi_init = {
 	.digi_term =	"ansi"		/* default terminal type */
 };
 
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially.
- *
- * This defines a raw port at 9600 baud, 8 data bits, no parity,
- * 1 stop bit.
- */
-static const struct ktermios default_termios = {
-	.c_iflag =	(DEFAULT_IFLAGS),
-	.c_oflag =	(DEFAULT_OFLAGS),
-	.c_cflag =	(DEFAULT_CFLAGS),
-	.c_lflag =	(DEFAULT_LFLAGS),
-	.c_cc =		INIT_C_CC,
-	.c_line =	0,
-};
-
 static int dgnc_tty_open(struct tty_struct *tty, struct file *file);
 static void dgnc_tty_close(struct tty_struct *tty, struct file *file);
 static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file,
@@ -129,6 +113,49 @@ static const struct tty_operations dgnc_tty_ops = {
 
 /* TTY Initialization/Cleanup Functions */
 
+static struct tty_driver *dgnc_tty_create(char *serial_name, uint maxports,
+					  int major, int minor)
+{
+	int rc;
+	struct tty_driver *drv;
+
+	drv = tty_alloc_driver(maxports,
+			       TTY_DRIVER_REAL_RAW |
+			       TTY_DRIVER_DYNAMIC_DEV |
+			       TTY_DRIVER_HARDWARE_BREAK);
+	if (IS_ERR(drv))
+		return drv;
+
+	drv->name = serial_name;
+	drv->name_base = 0;
+	drv->major = major;
+	drv->minor_start = minor;
+	drv->type = TTY_DRIVER_TYPE_SERIAL;
+	drv->subtype = SERIAL_TYPE_NORMAL;
+	drv->init_termios = tty_std_termios;
+	drv->init_termios.c_cflag = (B9600 | CS8 | CREAD | HUPCL | CLOCAL);
+	drv->init_termios.c_ispeed = 9600;
+	drv->init_termios.c_ospeed = 9600;
+	drv->driver_name = DRVSTR;
+	/*
+	 * Entry points for driver.  Called by the kernel from
+	 * tty_io.c and n_tty.c.
+	 */
+	tty_set_operations(drv, &dgnc_tty_ops);
+	rc = tty_register_driver(drv);
+	if (rc < 0) {
+		put_tty_driver(drv);
+		return ERR_PTR(rc);
+	}
+	return drv;
+}
+
+static void dgnc_tty_free(struct tty_driver *drv)
+{
+	tty_unregister_driver(drv);
+	put_tty_driver(drv);
+}
+
 /**
  * dgnc_tty_register() - Init the tty subsystem for this board.
  */
@@ -136,95 +163,36 @@ int dgnc_tty_register(struct dgnc_board *brd)
 {
 	int rc;
 
-	brd->serial_driver = tty_alloc_driver(brd->maxports,
-					      TTY_DRIVER_REAL_RAW |
-					      TTY_DRIVER_DYNAMIC_DEV |
-					      TTY_DRIVER_HARDWARE_BREAK);
-	if (IS_ERR(brd->serial_driver))
-		return PTR_ERR(brd->serial_driver);
-
 	snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgnc_%d_",
 		 brd->boardnum);
 
-	brd->serial_driver->name = brd->serial_name;
-	brd->serial_driver->name_base = 0;
-	brd->serial_driver->major = 0;
-	brd->serial_driver->minor_start = 0;
-	brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
-	brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
-	brd->serial_driver->init_termios = default_termios;
-	brd->serial_driver->driver_name = DRVSTR;
-
-	/*
-	 * Entry points for driver.  Called by the kernel from
-	 * tty_io.c and n_tty.c.
-	 */
-	tty_set_operations(brd->serial_driver, &dgnc_tty_ops);
-
-	rc = tty_register_driver(brd->serial_driver);
-	if (rc < 0) {
-		dev_dbg(&brd->pdev->dev,
-			"Can't register tty device (%d)\n", rc);
-		goto free_serial_driver;
-	}
-
-	/*
-	 * If we're doing transparent print, we have to do all of the above
-	 * again, separately so we don't get the LD confused about what major
-	 * we are when we get into the dgnc_tty_open() routine.
-	 */
-	brd->print_driver = tty_alloc_driver(brd->maxports,
-					     TTY_DRIVER_REAL_RAW |
-					     TTY_DRIVER_DYNAMIC_DEV |
-					     TTY_DRIVER_HARDWARE_BREAK);
-	if (IS_ERR(brd->print_driver)) {
-		rc = PTR_ERR(brd->print_driver);
-		goto unregister_serial_driver;
+	brd->serial_driver = dgnc_tty_create(brd->serial_name,
+					     brd->maxports, 0, 0);
+	if (IS_ERR(brd->serial_driver)) {
+		rc = PTR_ERR(brd->serial_driver);
+		dev_dbg(&brd->pdev->dev, "Can't register tty device (%d)\n",
+			rc);
+		return rc;
 	}
 
 	snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
-
-	brd->print_driver->name = brd->print_name;
-	brd->print_driver->name_base = 0;
-	brd->print_driver->major = brd->serial_driver->major;
-	brd->print_driver->minor_start = 0x80;
-	brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
-	brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
-	brd->print_driver->init_termios = default_termios;
-	brd->print_driver->driver_name = DRVSTR;
-
-	/*
-	 * Entry points for driver.  Called by the kernel from
-	 * tty_io.c and n_tty.c.
-	 */
-	tty_set_operations(brd->print_driver, &dgnc_tty_ops);
-
-	rc = tty_register_driver(brd->print_driver);
-	if (rc < 0) {
+	brd->print_driver = dgnc_tty_create(brd->print_name, brd->maxports,
+					    0x80,
+					    brd->serial_driver->major);
+	if (IS_ERR(brd->print_driver)) {
+		rc = PTR_ERR(brd->print_driver);
 		dev_dbg(&brd->pdev->dev,
-			"Can't register Transparent Print device(%d)\n",
-			rc);
-		goto free_print_driver;
+			"Can't register Transparent Print device(%d)\n", rc);
+		dgnc_tty_free(brd->serial_driver);
+		return rc;
 	}
-
 	return 0;
-
-free_print_driver:
-	put_tty_driver(brd->print_driver);
-unregister_serial_driver:
-	tty_unregister_driver(brd->serial_driver);
-free_serial_driver:
-	put_tty_driver(brd->serial_driver);
-
-	return rc;
 }
 
 void dgnc_tty_unregister(struct dgnc_board *brd)
 {
-	tty_unregister_driver(brd->print_driver);
-	tty_unregister_driver(brd->serial_driver);
-	put_tty_driver(brd->print_driver);
-	put_tty_driver(brd->serial_driver);
+	dgnc_tty_free(brd->print_driver);
+	dgnc_tty_free(brd->serial_driver);
 }
 
 /**
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 77b242e..bb010cb 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -200,13 +200,13 @@ static u32 _nbu2ss_get_begin_ram_address(struct nbu2ss_udc *udc)
 	for (num = 0; num < NUM_ENDPOINTS - 1; num++) {
 		p_ep_regs = &udc->p_regs->EP_REGS[num];
 		data = _nbu2ss_readl(&p_ep_regs->EP_PCKT_ADRS);
-		buf_type = _nbu2ss_readl(&p_ep_regs->EP_CONTROL) & EPn_BUF_TYPE;
+		buf_type = _nbu2ss_readl(&p_ep_regs->EP_CONTROL) & EPN_BUF_TYPE;
 		if (buf_type == 0) {
 			/* Single Buffer */
-			use_ram_size += (data & EPn_MPKT) / sizeof(u32);
+			use_ram_size += (data & EPN_MPKT) / sizeof(u32);
 		} else {
 			/* Double Buffer */
-			use_ram_size += ((data & EPn_MPKT) / sizeof(u32)) * 2;
+			use_ram_size += ((data & EPN_MPKT) / sizeof(u32)) * 2;
 		}
 
 		if ((data >> 16) > last_ram_adr)
@@ -245,15 +245,15 @@ static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
 	/*   Bulk, Interrupt, ISO */
 	switch (ep->ep_type) {
 	case USB_ENDPOINT_XFER_BULK:
-		data = EPn_BULK;
+		data = EPN_BULK;
 		break;
 
 	case USB_ENDPOINT_XFER_INT:
-		data = EPn_BUF_SINGLE | EPn_INTERRUPT;
+		data = EPN_BUF_SINGLE | EPN_INTERRUPT;
 		break;
 
 	case USB_ENDPOINT_XFER_ISOC:
-		data = EPn_ISO;
+		data = EPN_ISO;
 		break;
 
 	default:
@@ -267,24 +267,24 @@ static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
 	if (ep->direct == USB_DIR_OUT) {
 		/*---------------------------------------------------------*/
 		/* OUT */
-		data = EPn_EN | EPn_BCLR | EPn_DIR0;
+		data = EPN_EN | EPN_BCLR | EPN_DIR0;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = EPn_ONAK | EPn_OSTL_EN | EPn_OSTL;
+		data = EPN_ONAK | EPN_OSTL_EN | EPN_OSTL;
 		_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = EPn_OUT_EN | EPn_OUT_END_EN;
+		data = EPN_OUT_EN | EPN_OUT_END_EN;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
 	} else {
 		/*---------------------------------------------------------*/
 		/* IN */
-		data = EPn_EN | EPn_BCLR | EPn_AUTO;
+		data = EPN_EN | EPN_BCLR | EPN_AUTO;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = EPn_ISTL;
+		data = EPN_ISTL;
 		_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = EPn_IN_EN | EPn_IN_END_EN;
+		data = EPN_IN_EN | EPN_IN_END_EN;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
 	}
 
@@ -315,24 +315,24 @@ static int _nbu2ss_epn_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
 	if (ep->direct == USB_DIR_OUT) {
 		/*---------------------------------------------------------*/
 		/* OUT */
-		data = EPn_ONAK | EPn_BCLR;
+		data = EPN_ONAK | EPN_BCLR;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = EPn_EN | EPn_DIR0;
+		data = EPN_EN | EPN_DIR0;
 		_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = EPn_OUT_EN | EPn_OUT_END_EN;
+		data = EPN_OUT_EN | EPN_OUT_END_EN;
 		_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
 	} else {
 		/*---------------------------------------------------------*/
 		/* IN */
-		data = EPn_BCLR;
+		data = EPN_BCLR;
 		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = EPn_EN | EPn_AUTO;
+		data = EPN_EN | EPN_AUTO;
 		_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 
-		data = EPn_IN_EN | EPn_IN_END_EN;
+		data = EPN_IN_EN | EPN_IN_END_EN;
 		_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
 	}
 
@@ -360,21 +360,21 @@ static void _nbu2ss_ep_dma_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
 
 		/*---------------------------------------------------------*/
 		/* Transfer Direct */
-		data = DCR1_EPn_DIR0;
+		data = DCR1_EPN_DIR0;
 		_nbu2ss_bitset(&udc->p_regs->EP_DCR[num].EP_DCR1, data);
 
 		/*---------------------------------------------------------*/
 		/* DMA Mode etc. */
-		data = EPn_STOP_MODE | EPn_STOP_SET  | EPn_DMAMODE0;
+		data = EPN_STOP_MODE | EPN_STOP_SET  | EPN_DMAMODE0;
 		_nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
 	} else {
 		/*---------------------------------------------------------*/
 		/* IN */
-		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, EPn_AUTO);
+		_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, EPN_AUTO);
 
 		/*---------------------------------------------------------*/
 		/* DMA Mode etc. */
-		data = EPn_BURST_SET | EPn_DMAMODE0;
+		data = EPN_BURST_SET | EPN_DMAMODE0;
 		_nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
 	}
 }
@@ -402,12 +402,12 @@ static void _nbu2ss_ep_dma_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
 		/*---------------------------------------------------------*/
 		/* OUT */
 		_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, 0);
-		_nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPn_DIR0);
+		_nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPN_DIR0);
 		_nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
 	} else {
 		/*---------------------------------------------------------*/
 		/* IN */
-		_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
+		_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
 		_nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
 	}
 }
@@ -418,9 +418,9 @@ static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
 {
 	struct fc_regs	*preg = udc->p_regs;
 
-	_nbu2ss_bitclr(&preg->EP_DCR[ep->epnum - 1].EP_DCR1, DCR1_EPn_REQEN);
-	mdelay(DMA_DISABLE_TIME);	/* DCR1_EPn_REQEN Clear */
-	_nbu2ss_bitclr(&preg->EP_REGS[ep->epnum - 1].EP_DMA_CTRL, EPn_DMA_EN);
+	_nbu2ss_bitclr(&preg->EP_DCR[ep->epnum - 1].EP_DCR1, DCR1_EPN_REQEN);
+	mdelay(DMA_DISABLE_TIME);	/* DCR1_EPN_REQEN Clear */
+	_nbu2ss_bitclr(&preg->EP_REGS[ep->epnum - 1].EP_DMA_CTRL, EPN_DMA_EN);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -453,16 +453,16 @@ static void _nbu2ss_ep_in_end(
 	} else {
 		num = epnum - 1;
 
-		_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
+		_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
 
 		/* Writing of 1-4 bytes */
 		if (length)
 			_nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
 
-		data = (((length) << 5) & EPn_DW) | EPn_DEND;
+		data = (((length) << 5) & EPN_DW) | EPN_DEND;
 		_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
 
-		_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
+		_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
 	}
 }
 
@@ -526,12 +526,13 @@ static void _nbu2ss_dma_unmap_single(
 			if (direct == USB_DIR_OUT)
 				memcpy(req->req.buf, ep->virt_buf,
 				       req->req.actual & 0xfffffffc);
-		} else
+		} else {
 			dma_unmap_single(udc->gadget.dev.parent,
 					 req->req.dma, req->req.length,
 				(direct == USB_DIR_IN)
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
+		}
 		req->req.dma = DMA_ADDR_INVALID;
 		req->mapped = 0;
 	} else {
@@ -573,65 +574,67 @@ static int ep0_out_pio(struct nbu2ss_udc *udc, u8 *buf, u32 length)
 
 /*-------------------------------------------------------------------------*/
 /* Endpoint 0 OUT Transfer (PIO, OverBytes) */
-static int EP0_out_OverBytes(struct nbu2ss_udc *udc, u8 *pBuf, u32 length)
+static int ep0_out_overbytes(struct nbu2ss_udc *udc, u8 *p_buf, u32 length)
 {
 	u32		i;
-	u32		iReadSize = 0;
-	union usb_reg_access  Temp32;
-	union usb_reg_access  *pBuf32 = (union usb_reg_access *)pBuf;
+	u32		i_read_size = 0;
+	union usb_reg_access  temp_32;
+	union usb_reg_access  *p_buf_32 = (union usb_reg_access *)p_buf;
 
 	if ((length > 0) && (length < sizeof(u32))) {
-		Temp32.dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
+		temp_32.dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
 		for (i = 0 ; i < length ; i++)
-			pBuf32->byte.DATA[i] = Temp32.byte.DATA[i];
-		iReadSize += length;
+			p_buf_32->byte.DATA[i] = temp_32.byte.DATA[i];
+		i_read_size += length;
 	}
 
-	return iReadSize;
+	return i_read_size;
 }
 
 /*-------------------------------------------------------------------------*/
 /* Endpoint 0 IN Transfer (PIO) */
-static int EP0_in_PIO(struct nbu2ss_udc *udc, u8 *pBuf, u32 length)
+static int EP0_in_PIO(struct nbu2ss_udc *udc, u8 *p_buf, u32 length)
 {
 	u32		i;
-	u32		iMaxLength   = EP0_PACKETSIZE;
-	u32		iWordLength  = 0;
-	u32		iWriteLength = 0;
-	union usb_reg_access  *pBuf32 = (union usb_reg_access *)pBuf;
+	u32		i_max_length   = EP0_PACKETSIZE;
+	u32		i_word_length  = 0;
+	u32		i_write_length = 0;
+	union usb_reg_access  *p_buf_32 = (union usb_reg_access *)p_buf;
 
 	/*------------------------------------------------------------*/
 	/* Transfer Length */
-	if (iMaxLength < length)
-		iWordLength = iMaxLength / sizeof(u32);
+	if (i_max_length < length)
+		i_word_length = i_max_length / sizeof(u32);
 	else
-		iWordLength = length / sizeof(u32);
+		i_word_length = length / sizeof(u32);
 
 	/*------------------------------------------------------------*/
 	/* PIO */
-	for (i = 0; i < iWordLength; i++) {
-		_nbu2ss_writel(&udc->p_regs->EP0_WRITE, pBuf32->dw);
-		pBuf32++;
-		iWriteLength += sizeof(u32);
+	for (i = 0; i < i_word_length; i++) {
+		_nbu2ss_writel(&udc->p_regs->EP0_WRITE, p_buf_32->dw);
+		p_buf_32++;
+		i_write_length += sizeof(u32);
 	}
 
-	return iWriteLength;
+	return i_write_length;
 }
 
 /*-------------------------------------------------------------------------*/
 /* Endpoint 0 IN Transfer (PIO, OverBytes) */
-static int EP0_in_OverBytes(struct nbu2ss_udc *udc, u8 *pBuf, u32 iRemainSize)
+static int ep0_in_overbytes(struct nbu2ss_udc *udc,
+			    u8 *p_buf,
+			    u32 i_remain_size)
 {
 	u32		i;
-	union usb_reg_access  Temp32;
-	union usb_reg_access  *pBuf32 = (union usb_reg_access *)pBuf;
+	union usb_reg_access  temp_32;
+	union usb_reg_access  *p_buf_32 = (union usb_reg_access *)p_buf;
 
-	if ((iRemainSize > 0) && (iRemainSize < sizeof(u32))) {
-		for (i = 0 ; i < iRemainSize ; i++)
-			Temp32.byte.DATA[i] = pBuf32->byte.DATA[i];
-		_nbu2ss_ep_in_end(udc, 0, Temp32.dw, iRemainSize);
+	if ((i_remain_size > 0) && (i_remain_size < sizeof(u32))) {
+		for (i = 0 ; i < i_remain_size ; i++)
+			temp_32.byte.DATA[i] = p_buf_32->byte.DATA[i];
+		_nbu2ss_ep_in_end(udc, 0, temp_32.dw, i_remain_size);
 
-		return iRemainSize;
+		return i_remain_size;
 	}
 
 	return 0;
@@ -679,9 +682,9 @@ static int _nbu2ss_ep0_in_transfer(
 	struct nbu2ss_req *req
 )
 {
-	u8		*pBuffer;			/* IN Data Buffer */
+	u8		*p_buffer;			/* IN Data Buffer */
 	u32		data;
-	u32		iRemainSize = 0;
+	u32		i_remain_size = 0;
 	int		result = 0;
 
 	/*-------------------------------------------------------------*/
@@ -705,25 +708,25 @@ static int _nbu2ss_ep0_in_transfer(
 	data &= ~(u32)EP0_INAK;
 	_nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
 
-	iRemainSize = req->req.length - req->req.actual;
-	pBuffer = (u8 *)req->req.buf;
-	pBuffer += req->req.actual;
+	i_remain_size = req->req.length - req->req.actual;
+	p_buffer = (u8 *)req->req.buf;
+	p_buffer += req->req.actual;
 
 	/*-------------------------------------------------------------*/
 	/* Data transfer */
-	result = EP0_in_PIO(udc, pBuffer, iRemainSize);
+	result = EP0_in_PIO(udc, p_buffer, i_remain_size);
 
 	req->div_len = result;
-	iRemainSize -= result;
+	i_remain_size -= result;
 
-	if (iRemainSize == 0) {
+	if (i_remain_size == 0) {
 		EP0_send_NULL(udc, FALSE);
 		return result;
 	}
 
-	if ((iRemainSize < sizeof(u32)) && (result != EP0_PACKETSIZE)) {
-		pBuffer += result;
-		result += EP0_in_OverBytes(udc, pBuffer, iRemainSize);
+	if ((i_remain_size < sizeof(u32)) && (result != EP0_PACKETSIZE)) {
+		p_buffer += result;
+		result += ep0_in_overbytes(udc, p_buffer, i_remain_size);
 		req->div_len = result;
 	}
 
@@ -736,40 +739,40 @@ static int _nbu2ss_ep0_out_transfer(
 	struct nbu2ss_req *req
 )
 {
-	u8		*pBuffer;
-	u32		iRemainSize;
-	u32		iRecvLength;
+	u8		*p_buffer;
+	u32		i_remain_size;
+	u32		i_recv_length;
 	int		result = 0;
-	int		fRcvZero;
+	int		f_rcv_zero;
 
 	/*-------------------------------------------------------------*/
 	/* Receive data confirmation */
-	iRecvLength = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
-	if (iRecvLength != 0) {
-		fRcvZero = 0;
+	i_recv_length = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
+	if (i_recv_length != 0) {
+		f_rcv_zero = 0;
 
-		iRemainSize = req->req.length - req->req.actual;
-		pBuffer = (u8 *)req->req.buf;
-		pBuffer += req->req.actual;
+		i_remain_size = req->req.length - req->req.actual;
+		p_buffer = (u8 *)req->req.buf;
+		p_buffer += req->req.actual;
 
-		result = ep0_out_pio(udc, pBuffer
-					, min(iRemainSize, iRecvLength));
+		result = ep0_out_pio(udc, p_buffer
+					, min(i_remain_size, i_recv_length));
 		if (result < 0)
 			return result;
 
 		req->req.actual += result;
-		iRecvLength -= result;
+		i_recv_length -= result;
 
-		if ((iRecvLength > 0) && (iRecvLength < sizeof(u32))) {
-			pBuffer += result;
-			iRemainSize -= result;
+		if ((i_recv_length > 0) && (i_recv_length < sizeof(u32))) {
+			p_buffer += result;
+			i_remain_size -= result;
 
-			result = EP0_out_OverBytes(udc, pBuffer
-					, min(iRemainSize, iRecvLength));
+			result = ep0_out_overbytes(udc, p_buffer
+					, min(i_remain_size, i_recv_length));
 			req->req.actual += result;
 		}
 	} else {
-		fRcvZero = 1;
+		f_rcv_zero = 1;
 	}
 
 	/*-------------------------------------------------------------*/
@@ -794,9 +797,9 @@ static int _nbu2ss_ep0_out_transfer(
 		return -EOVERFLOW;
 	}
 
-	if (fRcvZero != 0) {
-		iRemainSize = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
-		if (iRemainSize & EP0_ONAK) {
+	if (f_rcv_zero != 0) {
+		i_remain_size = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
+		if (i_remain_size & EP0_ONAK) {
 			/*---------------------------------------------------*/
 			/* NACK release */
 			_nbu2ss_bitclr(&udc->p_regs->EP0_CONTROL, EP0_ONAK);
@@ -815,7 +818,7 @@ static int _nbu2ss_out_dma(
 	u32		length
 )
 {
-	dma_addr_t	pBuffer;
+	dma_addr_t	p_buffer;
 	u32		mpkt;
 	u32		lmpkt;
 	u32		dmacnt;
@@ -828,14 +831,14 @@ static int _nbu2ss_out_dma(
 		return 1;		/* DMA is forwarded */
 
 	req->dma_flag = TRUE;
-	pBuffer = req->req.dma;
-	pBuffer += req->req.actual;
+	p_buffer = req->req.dma;
+	p_buffer += req->req.actual;
 
 	/* DMA Address */
-	_nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)pBuffer);
+	_nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)p_buffer);
 
 	/* Number of transfer packets */
-	mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPn_MPKT;
+	mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT;
 	dmacnt = length / mpkt;
 	lmpkt = (length % mpkt) & ~(u32)0x03;
 
@@ -851,18 +854,18 @@ static int _nbu2ss_out_dma(
 	data = mpkt | (lmpkt << 16);
 	_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
 
-	data = ((dmacnt & 0xff) << 16) | DCR1_EPn_DIR0 | DCR1_EPn_REQEN;
+	data = ((dmacnt & 0xff) << 16) | DCR1_EPN_DIR0 | DCR1_EPN_REQEN;
 	_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
 
 	if (burst == 0) {
 		_nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, 0);
-		_nbu2ss_bitclr(&preg->EP_REGS[num].EP_DMA_CTRL, EPn_BURST_SET);
+		_nbu2ss_bitclr(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_BURST_SET);
 	} else {
 		_nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT
 				, (dmacnt << 16));
-		_nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPn_BURST_SET);
+		_nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_BURST_SET);
 	}
-	_nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPn_DMA_EN);
+	_nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_DMA_EN);
 
 	result = length & ~(u32)0x03;
 	req->div_len = result;
@@ -878,12 +881,12 @@ static int _nbu2ss_epn_out_pio(
 	u32		length
 )
 {
-	u8		*pBuffer;
+	u8		*p_buffer;
 	u32		i;
 	u32		data;
-	u32		iWordLength;
-	union usb_reg_access	Temp32;
-	union usb_reg_access	*pBuf32;
+	u32		i_word_length;
+	union usb_reg_access	temp_32;
+	union usb_reg_access	*p_buf_32;
 	int		result = 0;
 	struct fc_regs	*preg = udc->p_regs;
 
@@ -893,28 +896,29 @@ static int _nbu2ss_epn_out_pio(
 	if (length == 0)
 		return 0;
 
-	pBuffer = (u8 *)req->req.buf;
-	pBuf32 = (union usb_reg_access *)(pBuffer + req->req.actual);
+	p_buffer = (u8 *)req->req.buf;
+	p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
 
-	iWordLength = length / sizeof(u32);
-	if (iWordLength > 0) {
+	i_word_length = length / sizeof(u32);
+	if (i_word_length > 0) {
 		/*---------------------------------------------------------*/
 		/* Copy of every four bytes */
-		for (i = 0; i < iWordLength; i++) {
-			pBuf32->dw =
+		for (i = 0; i < i_word_length; i++) {
+			p_buf_32->dw =
 			_nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
-			pBuf32++;
+			p_buf_32++;
 		}
-		result = iWordLength * sizeof(u32);
+		result = i_word_length * sizeof(u32);
 	}
 
 	data = length - result;
 	if (data > 0) {
 		/*---------------------------------------------------------*/
 		/* Copy of fraction byte */
-		Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
+		temp_32.dw =
+			_nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
 		for (i = 0 ; i < data ; i++)
-			pBuf32->byte.DATA[i] = Temp32.byte.DATA[i];
+			p_buf_32->byte.DATA[i] = temp_32.byte.DATA[i];
 		result += data;
 	}
 
@@ -937,7 +941,7 @@ static int _nbu2ss_epn_out_data(
 )
 {
 	u32		num;
-	u32		iBufSize;
+	u32		i_buf_size;
 	int		nret = 1;
 
 	if (ep->epnum == 0)
@@ -945,14 +949,14 @@ static int _nbu2ss_epn_out_data(
 
 	num = ep->epnum - 1;
 
-	iBufSize = min((req->req.length - req->req.actual), data_size);
+	i_buf_size = min((req->req.length - req->req.actual), data_size);
 
 	if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
-	    (iBufSize  >= sizeof(u32))) {
-		nret = _nbu2ss_out_dma(udc, req, num, iBufSize);
+	    (i_buf_size  >= sizeof(u32))) {
+		nret = _nbu2ss_out_dma(udc, req, num, i_buf_size);
 	} else {
-		iBufSize = min_t(u32, iBufSize, ep->ep.maxpacket);
-		nret = _nbu2ss_epn_out_pio(udc, ep, req, iBufSize);
+		i_buf_size = min_t(u32, i_buf_size, ep->ep.maxpacket);
+		nret = _nbu2ss_epn_out_pio(udc, ep, req, i_buf_size);
 	}
 
 	return nret;
@@ -966,7 +970,7 @@ static int _nbu2ss_epn_out_transfer(
 )
 {
 	u32		num;
-	u32		iRecvLength;
+	u32		i_recv_length;
 	int		result = 1;
 	struct fc_regs	*preg = udc->p_regs;
 
@@ -977,13 +981,13 @@ static int _nbu2ss_epn_out_transfer(
 
 	/*-------------------------------------------------------------*/
 	/* Receive Length */
-	iRecvLength
-		= _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT) & EPn_LDATA;
+	i_recv_length
+		= _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT) & EPN_LDATA;
 
-	if (iRecvLength != 0) {
-		result = _nbu2ss_epn_out_data(udc, ep, req, iRecvLength);
-		if (iRecvLength < ep->ep.maxpacket) {
-			if (iRecvLength == result) {
+	if (i_recv_length != 0) {
+		result = _nbu2ss_epn_out_data(udc, ep, req, i_recv_length);
+		if (i_recv_length < ep->ep.maxpacket) {
+			if (i_recv_length == result) {
 				req->req.actual += result;
 				result = 0;
 			}
@@ -1023,11 +1027,11 @@ static int _nbu2ss_in_dma(
 	u32		length
 )
 {
-	dma_addr_t	pBuffer;
+	dma_addr_t	p_buffer;
 	u32		mpkt;		/* MaxPacketSize */
 	u32		lmpkt;		/* Last Packet Data Size */
 	u32		dmacnt;		/* IN Data Size */
-	u32		iWriteLength;
+	u32		i_write_length;
 	u32		data;
 	int		result = -EINVAL;
 	struct fc_regs	*preg = udc->p_regs;
@@ -1042,18 +1046,18 @@ static int _nbu2ss_in_dma(
 	req->dma_flag = TRUE;
 
 	/* MAX Packet Size */
-	mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPn_MPKT;
+	mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT;
 
 	if ((DMA_MAX_COUNT * mpkt) < length)
-		iWriteLength = DMA_MAX_COUNT * mpkt;
+		i_write_length = DMA_MAX_COUNT * mpkt;
 	else
-		iWriteLength = length;
+		i_write_length = length;
 
 	/*------------------------------------------------------------*/
 	/* Number of transmission packets */
-	if (mpkt < iWriteLength) {
-		dmacnt = iWriteLength / mpkt;
-		lmpkt  = (iWriteLength % mpkt) & ~(u32)0x3;
+	if (mpkt < i_write_length) {
+		dmacnt = i_write_length / mpkt;
+		lmpkt  = (i_write_length % mpkt) & ~(u32)0x3;
 		if (lmpkt != 0)
 			dmacnt++;
 		else
@@ -1061,7 +1065,7 @@ static int _nbu2ss_in_dma(
 
 	} else {
 		dmacnt = 1;
-		lmpkt  = iWriteLength & ~(u32)0x3;
+		lmpkt  = i_write_length & ~(u32)0x3;
 	}
 
 	/* Packet setting */
@@ -1069,12 +1073,12 @@ static int _nbu2ss_in_dma(
 	_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
 
 	/* Address setting */
-	pBuffer = req->req.dma;
-	pBuffer += req->req.actual;
-	_nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)pBuffer);
+	p_buffer = req->req.dma;
+	p_buffer += req->req.actual;
+	_nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)p_buffer);
 
 	/* Packet and DMA setting */
-	data = ((dmacnt & 0xff) << 16) | DCR1_EPn_REQEN;
+	data = ((dmacnt & 0xff) << 16) | DCR1_EPN_REQEN;
 	_nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
 
 	/* Packet setting of EPC */
@@ -1082,9 +1086,9 @@ static int _nbu2ss_in_dma(
 	_nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, data);
 
 	/*DMA setting of EPC */
-	_nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPn_DMA_EN);
+	_nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_DMA_EN);
 
-	result = iWriteLength & ~(u32)0x3;
+	result = i_write_length & ~(u32)0x3;
 	req->div_len = result;
 
 	return result;
@@ -1098,12 +1102,12 @@ static int _nbu2ss_epn_in_pio(
 	u32		length
 )
 {
-	u8		*pBuffer;
+	u8		*p_buffer;
 	u32		i;
 	u32		data;
-	u32		iWordLength;
-	union usb_reg_access	Temp32;
-	union usb_reg_access	*pBuf32 = NULL;
+	u32		i_word_length;
+	union usb_reg_access	temp_32;
+	union usb_reg_access	*p_buf_32 = NULL;
 	int		result = 0;
 	struct fc_regs	*preg = udc->p_regs;
 
@@ -1111,30 +1115,30 @@ static int _nbu2ss_epn_in_pio(
 		return 1;		/* DMA is forwarded */
 
 	if (length > 0) {
-		pBuffer = (u8 *)req->req.buf;
-		pBuf32 = (union usb_reg_access *)(pBuffer + req->req.actual);
+		p_buffer = (u8 *)req->req.buf;
+		p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
 
-		iWordLength = length / sizeof(u32);
-		if (iWordLength > 0) {
-			for (i = 0; i < iWordLength; i++) {
+		i_word_length = length / sizeof(u32);
+		if (i_word_length > 0) {
+			for (i = 0; i < i_word_length; i++) {
 				_nbu2ss_writel(
 					&preg->EP_REGS[ep->epnum - 1].EP_WRITE
-					, pBuf32->dw
+					, p_buf_32->dw
 				);
 
-				pBuf32++;
+				p_buf_32++;
 			}
-			result = iWordLength * sizeof(u32);
+			result = i_word_length * sizeof(u32);
 		}
 	}
 
 	if (result != ep->ep.maxpacket) {
 		data = length - result;
-		Temp32.dw = 0;
+		temp_32.dw = 0;
 		for (i = 0 ; i < data ; i++)
-			Temp32.byte.DATA[i] = pBuf32->byte.DATA[i];
+			temp_32.byte.DATA[i] = p_buf_32->byte.DATA[i];
 
-		_nbu2ss_ep_in_end(udc, ep->epnum, Temp32.dw, data);
+		_nbu2ss_ep_in_end(udc, ep->epnum, temp_32.dw, data);
 		result += data;
 	}
 
@@ -1178,7 +1182,7 @@ static int _nbu2ss_epn_in_transfer(
 )
 {
 	u32		num;
-	u32		iBufSize;
+	u32		i_buf_size;
 	int		result = 0;
 	u32		status;
 
@@ -1192,19 +1196,19 @@ static int _nbu2ss_epn_in_transfer(
 	/*-------------------------------------------------------------*/
 	/* State confirmation of FIFO */
 	if (req->req.actual == 0) {
-		if ((status & EPn_IN_EMPTY) == 0)
+		if ((status & EPN_IN_EMPTY) == 0)
 			return 1;	/* Not Empty */
 
 	} else {
-		if ((status & EPn_IN_FULL) != 0)
+		if ((status & EPN_IN_FULL) != 0)
 			return 1;	/* Not Empty */
 	}
 
 	/*-------------------------------------------------------------*/
 	/* Start transfer */
-	iBufSize = req->req.length - req->req.actual;
-	if (iBufSize > 0)
-		result = _nbu2ss_epn_in_data(udc, ep, req, iBufSize);
+	i_buf_size = req->req.length - req->req.actual;
+	if (i_buf_size > 0)
+		result = _nbu2ss_epn_in_data(udc, ep, req, i_buf_size);
 	else if (req->req.length == 0)
 		_nbu2ss_zero_len_pkt(udc, ep->epnum);
 
@@ -1252,7 +1256,7 @@ static int _nbu2ss_start_transfer(
 		}
 
 	} else {
-		/* EPn */
+		/* EPN */
 		if (ep->direct == USB_DIR_OUT) {
 			/* OUT */
 			if (!bflag)
@@ -1281,7 +1285,7 @@ static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
 		length = _nbu2ss_readl(
 			&ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
 
-		length &= EPn_LDATA;
+		length &= EPN_LDATA;
 		if (length < ep->ep.maxpacket)
 			bflag = TRUE;
 	}
@@ -1304,9 +1308,9 @@ static void _nbu2ss_endpoint_toggle_reset(
 	num = (ep_adrs & 0x7F) - 1;
 
 	if (ep_adrs & USB_DIR_IN)
-		data = EPn_IPIDCLR;
+		data = EPN_IPIDCLR;
 	else
-		data = EPn_BCLR | EPn_OPIDCLR;
+		data = EPN_BCLR | EPN_OPIDCLR;
 
 	_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
 }
@@ -1341,9 +1345,9 @@ static void _nbu2ss_set_endpoint_stall(
 			ep->halted = TRUE;
 
 			if (ep_adrs & USB_DIR_IN)
-				data = EPn_BCLR | EPn_ISTL;
+				data = EPN_BCLR | EPN_ISTL;
 			else
-				data = EPn_OSTL_EN | EPn_OSTL;
+				data = EPN_OSTL_EN | EPN_OSTL;
 
 			_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
 		} else {
@@ -1351,13 +1355,13 @@ static void _nbu2ss_set_endpoint_stall(
 			ep->stalled = FALSE;
 			if (ep_adrs & USB_DIR_IN) {
 				_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL
-						, EPn_ISTL);
+						, EPN_ISTL);
 			} else {
 				data =
 				_nbu2ss_readl(&preg->EP_REGS[num].EP_CONTROL);
 
-				data &= ~EPn_OSTL;
-				data |= EPn_OSTL_EN;
+				data &= ~EPN_OSTL;
+				data |= EPN_OSTL_EN;
 
 				_nbu2ss_writel(&preg->EP_REGS[num].EP_CONTROL
 						, data);
@@ -1453,13 +1457,13 @@ static int _nbu2ss_get_ep_stall(struct nbu2ss_udc *udc, u8 ep_adrs)
 
 	} else {
 		data = _nbu2ss_readl(&preg->EP_REGS[epnum - 1].EP_CONTROL);
-		if ((data & EPn_EN) == 0)
+		if ((data & EPN_EN) == 0)
 			return -1;
 
 		if (ep_adrs & USB_ENDPOINT_DIR_MASK)
-			bit_data = EPn_ISTL;
+			bit_data = EPN_ISTL;
 		else
-			bit_data = EPn_OSTL;
+			bit_data = EPN_OSTL;
 	}
 
 	if ((data & bit_data) == 0)
@@ -1548,7 +1552,7 @@ static void _nbu2ss_epn_set_stall(
 			regdata = _nbu2ss_readl(
 				&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
 
-			if ((regdata & EPn_IN_DATA) == 0)
+			if ((regdata & EPN_IN_DATA) == 0)
 				break;
 
 			mdelay(1);
@@ -1651,7 +1655,7 @@ static int std_req_set_address(struct nbu2ss_udc *udc)
 /*-------------------------------------------------------------------------*/
 static int std_req_set_configuration(struct nbu2ss_udc *udc)
 {
-	u32 ConfigValue = (u32)(udc->ctrl.wValue & 0x00ff);
+	u32 config_value = (u32)(udc->ctrl.wValue & 0x00ff);
 
 	if ((udc->ctrl.wIndex != 0x0000)	||
 	    (udc->ctrl.wLength != 0x0000)	||
@@ -1659,9 +1663,9 @@ static int std_req_set_configuration(struct nbu2ss_udc *udc)
 		return -EINVAL;
 	}
 
-	udc->curr_config = ConfigValue;
+	udc->curr_config = config_value;
 
-	if (ConfigValue > 0) {
+	if (config_value > 0) {
 		_nbu2ss_bitset(&udc->p_regs->USB_CONTROL, CONF);
 		udc->devstate = USB_STATE_CONFIGURED;
 
@@ -1968,7 +1972,7 @@ static inline void _nbu2ss_epn_in_int(
 			status =
 			_nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
 
-			if ((status & EPn_IN_FULL) == 0) {
+			if ((status & EPN_IN_FULL) == 0) {
 				/*-----------------------------------------*/
 				/* 0 Length Packet */
 				req->zero = false;
@@ -2059,18 +2063,18 @@ static inline void _nbu2ss_epn_out_dma_int(
 	}
 
 	ep_dmacnt = _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT)
-		 & EPn_DMACNT;
+		 & EPN_DMACNT;
 	ep_dmacnt >>= 16;
 
 	for (i = 0; i < EPC_PLL_LOCK_COUNT; i++) {
 		dmacnt = _nbu2ss_readl(&preg->EP_DCR[num].EP_DCR1)
-			 & DCR1_EPn_DMACNT;
+			 & DCR1_EPN_DMACNT;
 		dmacnt >>= 16;
 		if (ep_dmacnt == dmacnt)
 			break;
 	}
 
-	_nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPn_REQEN);
+	_nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPN_REQEN);
 
 	if (dmacnt != 0) {
 		mpkt = ep->ep.maxpacket;
@@ -2117,20 +2121,20 @@ static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum)
 		return;
 	}
 
-	if (status & EPn_OUT_END_INT) {
-		status &= ~EPn_OUT_INT;
+	if (status & EPN_OUT_END_INT) {
+		status &= ~EPN_OUT_INT;
 		_nbu2ss_epn_out_dma_int(udc, ep, req);
 	}
 
-	if (status & EPn_OUT_INT)
+	if (status & EPN_OUT_INT)
 		_nbu2ss_epn_out_int(udc, ep, req);
 
-	if (status & EPn_IN_END_INT) {
-		status &= ~EPn_IN_INT;
+	if (status & EPN_IN_END_INT) {
+		status &= ~EPN_IN_INT;
 		_nbu2ss_epn_in_dma_int(udc, ep, req);
 	}
 
-	if (status & EPn_IN_INT)
+	if (status & EPN_IN_INT)
 		_nbu2ss_epn_in_int(udc, ep, req);
 }
 
@@ -2231,9 +2235,9 @@ static void _nbu2ss_fifo_flush(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
 		_nbu2ss_bitset(&p->EP0_CONTROL, EP0_BCLR);
 
 	} else {
-		/* EPn */
+		/* EPN */
 		_nbu2ss_ep_dma_abort(udc, ep);
-		_nbu2ss_bitset(&p->EP_REGS[ep->epnum - 1].EP_CONTROL, EPn_BCLR);
+		_nbu2ss_bitset(&p->EP_REGS[ep->epnum - 1].EP_CONTROL, EPN_BCLR);
 	}
 }
 
@@ -2478,7 +2482,7 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
 			suspend_flag = 1;
 		}
 
-		if (status & EPn_INT) {
+		if (status & EPN_INT) {
 			/* EP INT */
 			int_bit = status >> 8;
 
@@ -2651,7 +2655,9 @@ static int nbu2ss_ep_queue(
 	}
 
 	req = container_of(_req, struct nbu2ss_req, req);
-	if (unlikely(!_req->complete || !_req->buf || !list_empty(&req->queue))) {
+	if (unlikely(!_req->complete ||
+		     !_req->buf ||
+		     !list_empty(&req->queue))) {
 		if (!_req->complete)
 			pr_err("udc: %s --- !_req->complete\n", __func__);
 
@@ -2868,7 +2874,7 @@ static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
 
 	} else {
 		data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_LEN_DCNT)
-			& EPn_LDATA;
+			& EPN_LDATA;
 	}
 
 	spin_unlock_irqrestore(&udc->lock, flags);
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index 78c08e1..928d531 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -144,7 +144,7 @@
 /*------- (0x001C) Setup Data 1 Register */
 
 /*------- (0x0020) USB Interrupt Status Register */
-#define EPn_INT				0x00FFFF00
+#define EPN_INT				0x00FFFF00
 #define EP15_INT			BIT23
 #define EP14_INT			BIT22
 #define EP13_INT			BIT21
@@ -264,102 +264,102 @@
 /*------- (0x0038) EP0 Read Register */
 /*------- (0x003C) EP0 Write Register */
 
-/*------- (0x0040:) EPn Control Register */
-#define EPn_EN				BIT31
-#define EPn_BUF_TYPE			BIT30
-#define EPn_BUF_SINGLE			BIT30
+/*------- (0x0040:) EPN Control Register */
+#define EPN_EN				BIT31
+#define EPN_BUF_TYPE			BIT30
+#define EPN_BUF_SINGLE			BIT30
 
-#define EPn_DIR0			BIT26
-#define EPn_MODE			(BIT25 + BIT24)
-#define EPn_BULK			0
-#define EPn_INTERRUPT			BIT24
-#define EPn_ISO				BIT25
+#define EPN_DIR0			BIT26
+#define EPN_MODE			(BIT25 + BIT24)
+#define EPN_BULK			0
+#define EPN_INTERRUPT			BIT24
+#define EPN_ISO				BIT25
 
-#define EPn_OVERSEL			BIT17
-#define EPn_AUTO			BIT16
+#define EPN_OVERSEL			BIT17
+#define EPN_AUTO			BIT16
 
-#define EPn_IPIDCLR			BIT11
-#define EPn_OPIDCLR			BIT10
-#define EPn_BCLR			BIT09
-#define EPn_CBCLR			BIT08
-#define EPn_DEND			BIT07
-#define EPn_DW				(BIT06 + BIT05)
-#define EPn_DW4				0
-#define EPn_DW3				(BIT06 + BIT05)
-#define EPn_DW2				BIT06
-#define EPn_DW1				BIT05
+#define EPN_IPIDCLR			BIT11
+#define EPN_OPIDCLR			BIT10
+#define EPN_BCLR			BIT09
+#define EPN_CBCLR			BIT08
+#define EPN_DEND			BIT07
+#define EPN_DW				(BIT06 + BIT05)
+#define EPN_DW4				0
+#define EPN_DW3				(BIT06 + BIT05)
+#define EPN_DW2				BIT06
+#define EPN_DW1				BIT05
 
-#define EPn_OSTL_EN			BIT04
-#define EPn_ISTL			BIT03
-#define EPn_OSTL			BIT02
+#define EPN_OSTL_EN			BIT04
+#define EPN_ISTL			BIT03
+#define EPN_OSTL			BIT02
 
-#define EPn_ONAK			BIT00
+#define EPN_ONAK			BIT00
 
-/*------- (0x0044:) EPn Status Register	*/
-#define EPn_ISO_PIDERR			BIT29		/* R */
-#define EPn_OPID			BIT28		/* R */
-#define EPn_OUT_NOTKN			BIT27		/* R */
-#define EPn_ISO_OR			BIT26		/* R */
+/*------- (0x0044:) EPN Status Register	*/
+#define EPN_ISO_PIDERR			BIT29		/* R */
+#define EPN_OPID			BIT28		/* R */
+#define EPN_OUT_NOTKN			BIT27		/* R */
+#define EPN_ISO_OR			BIT26		/* R */
 
-#define EPn_ISO_CRC			BIT24		/* R */
-#define EPn_OUT_END_INT			BIT23		/* RW */
-#define EPn_OUT_OR_INT			BIT22		/* RW */
-#define EPn_OUT_NAK_ERR_INT		BIT21		/* RW */
-#define EPn_OUT_STALL_INT		BIT20		/* RW */
-#define EPn_OUT_INT			BIT19		/* RW */
-#define EPn_OUT_NULL_INT		BIT18		/* RW */
-#define EPn_OUT_FULL			BIT17		/* R */
-#define EPn_OUT_EMPTY			BIT16		/* R */
+#define EPN_ISO_CRC			BIT24		/* R */
+#define EPN_OUT_END_INT			BIT23		/* RW */
+#define EPN_OUT_OR_INT			BIT22		/* RW */
+#define EPN_OUT_NAK_ERR_INT		BIT21		/* RW */
+#define EPN_OUT_STALL_INT		BIT20		/* RW */
+#define EPN_OUT_INT			BIT19		/* RW */
+#define EPN_OUT_NULL_INT		BIT18		/* RW */
+#define EPN_OUT_FULL			BIT17		/* R */
+#define EPN_OUT_EMPTY			BIT16		/* R */
 
-#define EPn_IPID			BIT10		/* R */
-#define EPn_IN_NOTKN			BIT09		/* R */
-#define EPn_ISO_UR			BIT08		/* R */
-#define EPn_IN_END_INT			BIT07		/* RW */
+#define EPN_IPID			BIT10		/* R */
+#define EPN_IN_NOTKN			BIT09		/* R */
+#define EPN_ISO_UR			BIT08		/* R */
+#define EPN_IN_END_INT			BIT07		/* RW */
 
-#define EPn_IN_NAK_ERR_INT		BIT05		/* RW */
-#define EPn_IN_STALL_INT		BIT04		/* RW */
-#define EPn_IN_INT			BIT03		/* RW */
-#define EPn_IN_DATA			BIT02		/* R */
-#define EPn_IN_FULL			BIT01		/* R */
-#define EPn_IN_EMPTY			BIT00		/* R */
+#define EPN_IN_NAK_ERR_INT		BIT05		/* RW */
+#define EPN_IN_STALL_INT		BIT04		/* RW */
+#define EPN_IN_INT			BIT03		/* RW */
+#define EPN_IN_DATA			BIT02		/* R */
+#define EPN_IN_FULL			BIT01		/* R */
+#define EPN_IN_EMPTY			BIT00		/* R */
 
-#define EPn_INT_EN	\
-	(EPn_OUT_END_INT | EPn_OUT_INT | EPn_IN_END_INT | EPn_IN_INT)
+#define EPN_INT_EN	\
+	(EPN_OUT_END_INT | EPN_OUT_INT | EPN_IN_END_INT | EPN_IN_INT)
 
-/*------- (0x0048:) EPn Interrupt Enable Register */
-#define EPn_OUT_END_EN			BIT23		/* RW */
-#define EPn_OUT_OR_EN			BIT22		/* RW */
-#define EPn_OUT_NAK_ERR_EN		BIT21		/* RW */
-#define EPn_OUT_STALL_EN		BIT20		/* RW */
-#define EPn_OUT_EN			BIT19		/* RW */
-#define EPn_OUT_NULL_EN			BIT18		/* RW */
+/*------- (0x0048:) EPN Interrupt Enable Register */
+#define EPN_OUT_END_EN			BIT23		/* RW */
+#define EPN_OUT_OR_EN			BIT22		/* RW */
+#define EPN_OUT_NAK_ERR_EN		BIT21		/* RW */
+#define EPN_OUT_STALL_EN		BIT20		/* RW */
+#define EPN_OUT_EN			BIT19		/* RW */
+#define EPN_OUT_NULL_EN			BIT18		/* RW */
 
-#define EPn_IN_END_EN			BIT07		/* RW */
+#define EPN_IN_END_EN			BIT07		/* RW */
 
-#define EPn_IN_NAK_ERR_EN		BIT05		/* RW */
-#define EPn_IN_STALL_EN			BIT04		/* RW */
-#define EPn_IN_EN			BIT03		/* RW */
+#define EPN_IN_NAK_ERR_EN		BIT05		/* RW */
+#define EPN_IN_STALL_EN			BIT04		/* RW */
+#define EPN_IN_EN			BIT03		/* RW */
 
-/*------- (0x004C:) EPn Interrupt Enable Register */
-#define EPn_STOP_MODE			BIT11
-#define EPn_DEND_SET			BIT10
-#define EPn_BURST_SET			BIT09
-#define EPn_STOP_SET			BIT08
+/*------- (0x004C:) EPN Interrupt Enable Register */
+#define EPN_STOP_MODE			BIT11
+#define EPN_DEND_SET			BIT10
+#define EPN_BURST_SET			BIT09
+#define EPN_STOP_SET			BIT08
 
-#define EPn_DMA_EN			BIT04
+#define EPN_DMA_EN			BIT04
 
-#define EPn_DMAMODE0			BIT00
+#define EPN_DMAMODE0			BIT00
 
-/*------- (0x0050:) EPn MaxPacket & BaseAddress Register */
-#define EPn_BASEAD			0x1FFF0000
-#define EPn_MPKT			0x000007FF
+/*------- (0x0050:) EPN MaxPacket & BaseAddress Register */
+#define EPN_BASEAD			0x1FFF0000
+#define EPN_MPKT			0x000007FF
 
-/*------- (0x0054:) EPn Length & DMA Count Register */
-#define EPn_DMACNT			0x01FF0000
-#define EPn_LDATA			0x000007FF
+/*------- (0x0054:) EPN Length & DMA Count Register */
+#define EPN_DMACNT			0x01FF0000
+#define EPN_LDATA			0x000007FF
 
-/*------- (0x0058:) EPn Read Register */
-/*------- (0x005C:) EPn Write Register */
+/*------- (0x0058:) EPN Read Register */
+/*------- (0x005C:) EPN Write Register */
 
 /*------- (0x1000) AHBSCTR Register */
 #define WAIT_MODE			BIT00
@@ -428,19 +428,19 @@
 #define EP_AVAILABLE			0xFFFF0000	/* R */
 #define DMA_AVAILABLE			0x0000FFFF	/* R */
 
-/*------- (0x1110:) EPnDCR1 Register */
-#define DCR1_EPn_DMACNT			0x00FF0000	/* RW */
+/*------- (0x1110:) EPNDCR1 Register */
+#define DCR1_EPN_DMACNT			0x00FF0000	/* RW */
 
-#define DCR1_EPn_DIR0			BIT01		/* RW */
-#define DCR1_EPn_REQEN			BIT00		/* RW */
+#define DCR1_EPN_DIR0			BIT01		/* RW */
+#define DCR1_EPN_REQEN			BIT00		/* RW */
 
-/*------- (0x1114:) EPnDCR2 Register */
-#define DCR2_EPn_LMPKT			0x07FF0000	/* RW */
+/*------- (0x1114:) EPNDCR2 Register */
+#define DCR2_EPN_LMPKT			0x07FF0000	/* RW */
 
-#define DCR2_EPn_MPKT			0x000007FF	/* RW */
+#define DCR2_EPN_MPKT			0x000007FF	/* RW */
 
-/*------- (0x1118:) EPnTADR Register */
-#define EPn_TADR			0xFFFFFFFF	/* RW */
+/*------- (0x1118:) EPNTADR Register */
+#define EPN_TADR			0xFFFFFFFF	/* RW */
 
 /*===========================================================================*/
 /* Struct */
@@ -471,7 +471,7 @@ struct fc_regs {
 	u32 USB_ADDRESS;		/* (0x0008) USB Address */
 	u32 UTMI_CHARACTER_1;		/* (0x000C) UTMI Setting */
 	u32 TEST_CONTROL;		/* (0x0010) TEST Control */
-	u32 Reserved_14;		/* (0x0014) Reserved */
+	u32 reserved_14;		/* (0x0014) Reserved */
 	u32 SETUP_DATA0;		/* (0x0018) Setup Data0 */
 	u32 SETUP_DATA1;		/* (0x001C) Setup Data1 */
 	u32 USB_INT_STA;		/* (0x0020) USB Interrupt Status */
@@ -485,7 +485,7 @@ struct fc_regs {
 
 	struct ep_regs EP_REGS[REG_EP_NUM];	/* Endpoint Register */
 
-	u8 Reserved220[0x1000 - 0x220];	/* (0x0220:0x0FFF) Reserved */
+	u8 reserved_220[0x1000 - 0x220];	/* (0x0220:0x0FFF) Reserved */
 
 	u32 AHBSCTR;			/* (0x1000) AHBSCTR */
 	u32 AHBMCTR;			/* (0x1004) AHBMCTR */
@@ -494,25 +494,25 @@ struct fc_regs {
 	u32 EPCTR;			/* (0x1010) EPCTR */
 	u32 USBF_EPTEST;		/* (0x1014) USBF_EPTEST */
 
-	u8 Reserved1018[0x20 - 0x18];	/* (0x1018:0x101F) Reserved */
+	u8 reserved_1018[0x20 - 0x18];	/* (0x1018:0x101F) Reserved */
 
 	u32 USBSSVER;			/* (0x1020) USBSSVER */
 	u32 USBSSCONF;			/* (0x1024) USBSSCONF */
 
-	u8 Reserved1028[0x110 - 0x28];	/* (0x1028:0x110F) Reserved */
+	u8 reserved_1028[0x110 - 0x28];	/* (0x1028:0x110F) Reserved */
 
 	struct ep_dcr EP_DCR[REG_EP_NUM];	/* */
 
-	u8 Reserved1200[0x1000 - 0x200];	/* Reserved */
+	u8 reserved_1200[0x1000 - 0x200];	/* Reserved */
 } __aligned(32);
 
 #define EP0_PACKETSIZE			64
 #define EP_PACKETSIZE			1024
 
-/* EPn RAM SIZE */
+/* EPN RAM SIZE */
 #define D_RAM_SIZE_CTRL			64
 
-/* EPn Bulk Endpoint Max Packet Size */
+/* EPN Bulk Endpoint Max Packet Size */
 #define D_FS_RAM_SIZE_BULK		64
 #define D_HS_RAM_SIZE_BULK		512
 
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 489151a..456a8dd 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -282,10 +282,10 @@ static void iterate_diffusion_matrix(u32 xres, u32 yres, int x,
 				continue;
 			write_pos = &convert_buf[(y + j) * xres + x + i];
 			coeff = diffusing_matrix[i][j];
-			if (-1 == coeff)
+			if (-1 == coeff) {
 				/* pixel itself */
 				*write_pos = pixel;
-			else {
+			} else {
 				signed short p = *write_pos + error * coeff;
 
 				if (p > WHITE)
diff --git a/drivers/staging/fbtft/fb_hx8340bn.c b/drivers/staging/fbtft/fb_hx8340bn.c
index 1ca1fcd..fbd5ef5 100644
--- a/drivers/staging/fbtft/fb_hx8340bn.c
+++ b/drivers/staging/fbtft/fb_hx8340bn.c
@@ -157,7 +157,7 @@ static int set_var(struct fbtft_par *par)
  *   OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1
  *   ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX  GC
  */
-#define CURVE(num, idx)  curves[num * par->gamma.num_values + idx]
+#define CURVE(num, idx)  curves[(num) * par->gamma.num_values + (idx)]
 static int set_gamma(struct fbtft_par *par, u32 *curves)
 {
 	unsigned long mask[] = {
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index d868405..ffb9a3b 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -71,7 +71,7 @@ int fbtft_write_spi_emulate_9(struct fbtft_par *par, void *buf, size_t len)
 			src++;
 		}
 		tmp |= ((*src & 0x0100) ? 1 : 0);
-		*(u64 *)dst = cpu_to_be64(tmp);
+		*(__be64 *)dst = cpu_to_be64(tmp);
 		dst += 8;
 		*dst++ = (u8)(*src++ & 0x00FF);
 		added++;
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
index 6f9eed6..b9a0a31 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -37,9 +37,9 @@
 #include <linux/interrupt.h>
 #include <linux/msi.h>
 #include <linux/kthread.h>
+#include <linux/iommu.h>
 
 #include "../../fsl-mc/include/mc.h"
-#include "../../fsl-mc/include/mc-sys.h"
 #include "dpaa2-eth.h"
 
 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
@@ -54,6 +54,16 @@ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
 
 const char dpaa2_eth_drv_version[] = "0.1";
 
+static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+				dma_addr_t iova_addr)
+{
+	phys_addr_t phys_addr;
+
+	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+	return phys_to_virt(phys_addr);
+}
+
 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
 			     u32 fd_status,
 			     struct sk_buff *skb)
@@ -98,12 +108,11 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
 	sgt = vaddr + dpaa2_fd_get_offset(fd);
 	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
 		addr = dpaa2_sg_get_addr(&sgt[i]);
+		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
 				 DMA_FROM_DEVICE);
 
-		sg_vaddr = phys_to_virt(addr);
 		skb_free_frag(sg_vaddr);
-
 		if (dpaa2_sg_is_final(&sgt[i]))
 			break;
 	}
@@ -159,10 +168,10 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
 
 		/* Get the address and length from the S/G entry */
 		sg_addr = dpaa2_sg_get_addr(sge);
+		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
 		dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
 				 DMA_FROM_DEVICE);
 
-		sg_vaddr = phys_to_virt(sg_addr);
 		sg_length = dpaa2_sg_get_len(sge);
 
 		if (i == 0) {
@@ -217,16 +226,19 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 	struct dpaa2_eth_drv_stats *percpu_extras;
 	struct device *dev = priv->net_dev->dev.parent;
 	struct dpaa2_fas *fas;
+	void *buf_data;
 	u32 status = 0;
 
 	/* Tracing point */
 	trace_dpaa2_rx_fd(priv->net_dev, fd);
 
+	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
 	dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
-	vaddr = phys_to_virt(addr);
 
-	prefetch(vaddr + priv->buf_layout.private_data_size);
-	prefetch(vaddr + dpaa2_fd_get_offset(fd));
+	fas = dpaa2_get_fas(vaddr);
+	prefetch(fas);
+	buf_data = vaddr + dpaa2_fd_get_offset(fd);
+	prefetch(buf_data);
 
 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
@@ -234,9 +246,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 	if (fd_format == dpaa2_fd_single) {
 		skb = build_linear_skb(priv, ch, fd, vaddr);
 	} else if (fd_format == dpaa2_fd_sg) {
-		struct dpaa2_sg_entry *sgt =
-				vaddr + dpaa2_fd_get_offset(fd);
-		skb = build_frag_skb(priv, ch, sgt);
+		skb = build_frag_skb(priv, ch, buf_data);
 		skb_free_frag(vaddr);
 		percpu_extras->rx_sg_frames++;
 		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
@@ -252,8 +262,6 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 
 	/* Check if we need to validate the L4 csum */
 	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
-		fas = (struct dpaa2_fas *)
-				(vaddr + priv->buf_layout.private_data_size);
 		status = le32_to_cpu(fas->status);
 		validate_rx_csum(priv, status, skb);
 	}
@@ -263,10 +271,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
 	percpu_stats->rx_packets++;
 	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
 
-	if (priv->net_dev->features & NETIF_F_GRO)
-		napi_gro_receive(napi, skb);
-	else
-		netif_receive_skb(skb);
+	napi_gro_receive(napi, skb);
 
 	return;
 
@@ -320,7 +325,6 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
 {
 	struct device *dev = priv->net_dev->dev.parent;
 	void *sgt_buf = NULL;
-	void *hwa;
 	dma_addr_t addr;
 	int nr_frags = skb_shinfo(skb)->nr_frags;
 	struct dpaa2_sg_entry *sgt;
@@ -330,6 +334,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
 	int num_sg;
 	int num_dma_bufs;
 	struct dpaa2_eth_swa *swa;
+	struct dpaa2_fas *fas;
 
 	/* Create and map scatterlist.
 	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
@@ -345,7 +350,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
 
 	sg_init_table(scl, nr_frags + 1);
 	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
-	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 	if (unlikely(!num_dma_bufs)) {
 		err = -ENOMEM;
 		goto dma_map_sg_failed;
@@ -366,8 +371,8 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
 	 * on TX confirmation. We are clearing FAS (Frame Annotation Status)
 	 * field from the hardware annotation area
 	 */
-	hwa = sgt_buf + priv->buf_layout.private_data_size;
-	memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
+	fas = dpaa2_get_fas(sgt_buf);
+	memset(fas, 0, DPAA2_FAS_SIZE);
 
 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
 
@@ -396,7 +401,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
 	swa->num_dma_bufs = num_dma_bufs;
 
 	/* Separately map the SGT buffer */
-	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE);
+	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
 	if (unlikely(dma_mapping_error(dev, addr))) {
 		err = -ENOMEM;
 		goto dma_map_single_failed;
@@ -413,7 +418,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
 dma_map_single_failed:
 	kfree(sgt_buf);
 sgt_buf_alloc_failed:
-	dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 dma_map_sg_failed:
 	kfree(scl);
 	return err;
@@ -426,7 +431,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
 {
 	struct device *dev = priv->net_dev->dev.parent;
 	u8 *buffer_start;
-	void *hwa;
+	struct dpaa2_fas *fas;
 	struct sk_buff **skbh;
 	dma_addr_t addr;
 
@@ -439,8 +444,8 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
 	 * on TX confirmation. We are clearing FAS (Frame Annotation Status)
 	 * field from the hardware annotation area
 	 */
-	hwa = buffer_start + priv->buf_layout.private_data_size;
-	memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
+	fas = dpaa2_get_fas(buffer_start);
+	memset(fas, 0, DPAA2_FAS_SIZE);
 
 	/* Store a backpointer to the skb at the beginning of the buffer
 	 * (in the private data area) such that we can release it
@@ -451,7 +456,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
 
 	addr = dma_map_single(dev, buffer_start,
 			      skb_tail_pointer(skb) - buffer_start,
-			      DMA_TO_DEVICE);
+			      DMA_BIDIRECTIONAL);
 	if (unlikely(dma_mapping_error(dev, addr)))
 		return -ENOMEM;
 
@@ -490,7 +495,8 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 	struct dpaa2_fas *fas;
 
 	fd_addr = dpaa2_fd_get_addr(fd);
-	skbh = phys_to_virt(fd_addr);
+	skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
+	fas = dpaa2_get_fas(skbh);
 
 	if (fd_format == dpaa2_fd_single) {
 		skb = *skbh;
@@ -500,7 +506,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 		 */
 		dma_unmap_single(dev, fd_addr,
 				 skb_tail_pointer(skb) - buffer_start,
-				 DMA_TO_DEVICE);
+				 DMA_BIDIRECTIONAL);
 	} else if (fd_format == dpaa2_fd_sg) {
 		swa = (struct dpaa2_eth_swa *)skbh;
 		skb = swa->skb;
@@ -509,13 +515,13 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 		num_dma_bufs = swa->num_dma_bufs;
 
 		/* Unmap the scatterlist */
-		dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+		dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
 		kfree(scl);
 
 		/* Unmap the SGT buffer */
 		unmap_size = priv->tx_data_offset +
 		       sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
-		dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE);
+		dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
 	} else {
 		/* Unsupported format, mark it as errored and give up */
 		if (status)
@@ -527,11 +533,8 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 	 * buffer but before we free it. The caller function is responsible
 	 * for checking the status value.
 	 */
-	if (status && (dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
-		fas = (struct dpaa2_fas *)
-			((void *)skbh + priv->buf_layout.private_data_size);
+	if (status)
 		*status = le32_to_cpu(fas->status);
-	}
 
 	/* Free SGT buffer kmalloc'ed on tx */
 	if (fd_format != dpaa2_fd_single)
@@ -541,7 +544,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
 	dev_kfree_skb(skb);
 }
 
-static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
 {
 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 	struct dpaa2_fd fd;
@@ -634,6 +637,8 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
 	struct rtnl_link_stats64 *percpu_stats;
 	struct dpaa2_eth_drv_stats *percpu_extras;
 	u32 status = 0;
+	u32 fd_errors;
+	bool has_fas_errors = false;
 
 	/* Tracing point */
 	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
@@ -642,13 +647,31 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
 	percpu_extras->tx_conf_frames++;
 	percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
 
-	free_tx_fd(priv, fd, &status);
-
-	if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) {
-		percpu_stats = this_cpu_ptr(priv->percpu_stats);
-		/* Tx-conf logically pertains to the egress path. */
-		percpu_stats->tx_errors++;
+	/* Check frame errors in the FD field */
+	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
+	if (unlikely(fd_errors)) {
+		/* We only check error bits in the FAS field if corresponding
+		 * FAERR bit is set in FD and the FAS field is marked as valid
+		 */
+		has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) &&
+				 !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
+		if (net_ratelimit())
+			netdev_dbg(priv->net_dev, "TX frame FD error: %x08\n",
+				   fd_errors);
 	}
+
+	free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
+
+	if (likely(!fd_errors))
+		return;
+
+	percpu_stats = this_cpu_ptr(priv->percpu_stats);
+	/* Tx-conf logically pertains to the egress path. */
+	percpu_stats->tx_errors++;
+
+	if (has_fas_errors && net_ratelimit())
+		netdev_dbg(priv->net_dev, "TX frame FAS error: %x08\n",
+			   status & DPAA2_FAS_TX_ERR_MASK);
 }
 
 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
@@ -794,7 +817,7 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
 	int ret, i;
 
 	do {
-		ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid,
+		ret = dpaa2_io_service_acquire(NULL, priv->bpid,
 					       buf_array, count);
 		if (ret < 0) {
 			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
@@ -802,10 +825,11 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
 		}
 		for (i = 0; i < ret; i++) {
 			/* Same logic as on regular Rx path */
+			vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
+						   buf_array[i]);
 			dma_unmap_single(dev, buf_array[i],
 					 DPAA2_ETH_RX_BUF_SIZE,
 					 DMA_FROM_DEVICE);
-			vaddr = phys_to_virt(buf_array[i]);
 			skb_free_frag(vaddr);
 		}
 	} while (ret);
@@ -890,7 +914,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
 			break;
 
 		/* Refill pool if appropriate */
-		refill_pool(priv, ch, priv->dpbp_attrs.bpid);
+		refill_pool(priv, ch, priv->bpid);
 
 		store_cleaned = consume_frames(ch);
 		cleaned += store_cleaned;
@@ -964,7 +988,7 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
 		netif_carrier_off(priv->net_dev);
 	}
 
-	netdev_info(priv->net_dev, "Link Event: state %s",
+	netdev_info(priv->net_dev, "Link Event: state %s\n",
 		    state.up ? "up" : "down");
 
 	return 0;
@@ -975,14 +999,14 @@ static int dpaa2_eth_open(struct net_device *net_dev)
 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 	int err;
 
-	err = seed_pool(priv, priv->dpbp_attrs.bpid);
+	err = seed_pool(priv, priv->bpid);
 	if (err) {
 		/* Not much to do; the buffer pool, though not filled up,
 		 * may still contain some buffers which would enable us
 		 * to limp on.
 		 */
 		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
-			   priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid);
+			   priv->dpbp_dev->obj_desc.id, priv->bpid);
 	}
 
 	/* We'll only start the txqs when the link is actually ready; make sure
@@ -1151,8 +1175,8 @@ static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
 /** Fill in counters maintained by the GPP driver. These may be different from
  * the hardware counters obtained by ethtool.
  */
-void dpaa2_eth_get_stats(struct net_device *net_dev,
-			 struct rtnl_link_stats64 *stats)
+static void dpaa2_eth_get_stats(struct net_device *net_dev,
+				struct rtnl_link_stats64 *stats)
 {
 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 	struct rtnl_link_stats64 *percpu_stats;
@@ -1502,6 +1526,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
 		if (!channel) {
 			dev_info(dev,
 				 "No affine channel for cpu %d and above\n", i);
+			err = -ENODEV;
 			goto err_alloc_ch;
 		}
 
@@ -1516,10 +1541,13 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
 		/* Register the new context */
 		err = dpaa2_io_service_register(NULL, nctx);
 		if (err) {
-			dev_info(dev, "No affine DPIO for cpu %d\n", i);
+			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
 			/* If no affine DPIO for this core, there's probably
-			 * none available for next cores either.
+			 * none available for next cores either. Signal we want
+			 * to retry later, in case the DPIO devices weren't
+			 * probed yet.
 			 */
+			err = -EPROBE_DEFER;
 			goto err_service_reg;
 		}
 
@@ -1557,7 +1585,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
 err_alloc_ch:
 	if (cpumask_empty(&priv->dpio_cpumask)) {
 		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
-		return -ENODEV;
+		return err;
 	}
 
 	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
@@ -1662,6 +1690,7 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv)
 	int err;
 	struct fsl_mc_device *dpbp_dev;
 	struct device *dev = priv->net_dev->dev.parent;
+	struct dpbp_attr dpbp_attrs;
 
 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
 				     &dpbp_dev);
@@ -1679,6 +1708,12 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv)
 		goto err_open;
 	}
 
+	err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
+	if (err) {
+		dev_err(dev, "dpbp_reset() failed\n");
+		goto err_reset;
+	}
+
 	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
 	if (err) {
 		dev_err(dev, "dpbp_enable() failed\n");
@@ -1686,17 +1721,19 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv)
 	}
 
 	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
-				  &priv->dpbp_attrs);
+				  &dpbp_attrs);
 	if (err) {
 		dev_err(dev, "dpbp_get_attributes() failed\n");
 		goto err_get_attr;
 	}
+	priv->bpid = dpbp_attrs.bpid;
 
 	return 0;
 
 err_get_attr:
 	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
 err_enable:
+err_reset:
 	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
 err_open:
 	fsl_mc_object_free(dpbp_dev);
@@ -1718,15 +1755,14 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
 	struct device *dev = &ls_dev->dev;
 	struct dpaa2_eth_priv *priv;
 	struct net_device *net_dev;
+	struct dpni_buffer_layout buf_layout = {0};
 	int err;
 
 	net_dev = dev_get_drvdata(dev);
 	priv = netdev_priv(net_dev);
 
-	priv->dpni_id = ls_dev->obj_desc.id;
-
 	/* get a handle for the DPNI object */
-	err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
+	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
 	if (err) {
 		dev_err(dev, "dpni_open() failed\n");
 		goto err_open;
@@ -1750,35 +1786,35 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
 
 	/* Configure buffer layouts */
 	/* rx buffer */
-	priv->buf_layout.pass_parser_result = true;
-	priv->buf_layout.pass_frame_status = true;
-	priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
-	priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
-	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
-				   DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-				   DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
-				   DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
+	buf_layout.pass_parser_result = true;
+	buf_layout.pass_frame_status = true;
+	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
+	buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
+	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+			     DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
+			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-				     DPNI_QUEUE_RX, &priv->buf_layout);
+				     DPNI_QUEUE_RX, &buf_layout);
 	if (err) {
 		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
 		goto err_buf_layout;
 	}
 
 	/* tx buffer */
-	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-				   DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+	buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+			     DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-				     DPNI_QUEUE_TX, &priv->buf_layout);
+				     DPNI_QUEUE_TX, &buf_layout);
 	if (err) {
 		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
 		goto err_buf_layout;
 	}
 
 	/* tx-confirm buffer */
-	priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+	buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
-				     DPNI_QUEUE_TX_CONFIRM, &priv->buf_layout);
+				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
 	if (err) {
 		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
 		goto err_buf_layout;
@@ -1795,7 +1831,7 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
 	}
 
 	if ((priv->tx_data_offset % 64) != 0)
-		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
+		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
 			 priv->tx_data_offset);
 
 	/* Accommodate software annotation space (SWA) */
@@ -1947,7 +1983,7 @@ static const struct dpaa2_eth_hash_fields hash_fields[] = {
 /* Set RX hash options
  * flags is a combination of RXH_ bits
  */
-int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
 {
 	struct device *dev = net_dev->dev.parent;
 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -1958,8 +1994,8 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
 	int err = 0;
 
 	if (!dpaa2_eth_hash_enabled(priv)) {
-		dev_err(dev, "Hashing support is not enabled\n");
-		return -EOPNOTSUPP;
+		dev_dbg(dev, "Hashing support is not enabled\n");
+		return 0;
 	}
 
 	memset(&cls_cfg, 0, sizeof(cls_cfg));
@@ -1985,23 +2021,23 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
 		priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
 	}
 
-	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
+	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
 	if (!dma_mem)
 		return -ENOMEM;
 
 	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
 	if (err) {
-		dev_err(dev, "dpni_prepare_key_cfg error %d", err);
+		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
 		goto err_prep_key;
 	}
 
 	memset(&dist_cfg, 0, sizeof(dist_cfg));
 
 	/* Prepare for setting the rx dist */
-	dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem,
+	dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
 					       DPAA2_CLASSIFIER_DMA_SIZE,
 					       DMA_TO_DEVICE);
-	if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) {
+	if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
 		dev_err(dev, "DMA mapping failed\n");
 		err = -ENOMEM;
 		goto err_dma_map;
@@ -2011,7 +2047,7 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
 	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
 
 	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
-	dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova,
+	dma_unmap_single(dev, dist_cfg.key_cfg_iova,
 			 DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
 	if (err)
 		dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
@@ -2052,7 +2088,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
 		netdev_err(net_dev, "Failed to configure hashing\n");
 
 	/* Configure handling of error frames */
-	err_cfg.errors = DPAA2_ETH_RX_ERR_MASK;
+	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
 	err_cfg.set_frame_annotation = 1;
 	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
 	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
@@ -2125,15 +2161,12 @@ static void free_rings(struct dpaa2_eth_priv *priv)
 		dpaa2_io_store_destroy(priv->channel[i]->store);
 }
 
-static int netdev_init(struct net_device *net_dev)
+static int set_mac_addr(struct dpaa2_eth_priv *priv)
 {
-	int err;
+	struct net_device *net_dev = priv->net_dev;
 	struct device *dev = net_dev->dev.parent;
-	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
-	u8 bcast_addr[ETH_ALEN];
-
-	net_dev->netdev_ops = &dpaa2_eth_ops;
+	int err;
 
 	/* Get firmware address, if any */
 	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
@@ -2146,7 +2179,7 @@ static int netdev_init(struct net_device *net_dev)
 	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
 					dpni_mac_addr);
 	if (err) {
-		dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
+		dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
 		return err;
 	}
 
@@ -2164,18 +2197,19 @@ static int netdev_init(struct net_device *net_dev)
 		}
 		memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
 	} else if (is_zero_ether_addr(dpni_mac_addr)) {
-		/* Fills in net_dev->dev_addr, as required by
-		 * register_netdevice()
+		/* No MAC address configured, fill in net_dev->dev_addr
+		 * with a random one
 		 */
 		eth_hw_addr_random(net_dev);
-		/* Make the user aware, without cluttering the boot log */
-		dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
+		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
 		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
 						net_dev->dev_addr);
 		if (err) {
-			dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err);
+			dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
 			return err;
 		}
+
 		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
 		 * practical purposes, this will be our "permanent" mac address,
 		 * at least until the next reboot. This move will also permit
@@ -2189,14 +2223,29 @@ static int netdev_init(struct net_device *net_dev)
 		memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
 	}
 
-	/* Explicitly add the broadcast address to the MAC filtering table;
-	 * the MC won't do that for us.
-	 */
+	return 0;
+}
+
+static int netdev_init(struct net_device *net_dev)
+{
+	struct device *dev = net_dev->dev.parent;
+	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+	u8 bcast_addr[ETH_ALEN];
+	u8 num_queues;
+	int err;
+
+	net_dev->netdev_ops = &dpaa2_eth_ops;
+
+	err = set_mac_addr(priv);
+	if (err)
+		return err;
+
+	/* Explicitly add the broadcast address to the MAC filtering table */
 	eth_broadcast_addr(bcast_addr);
 	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
 	if (err) {
-		dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
-		/* Won't return an error; at least, we'd have egress traffic */
+		dev_err(dev, "dpni_add_mac_addr() failed\n");
+		return err;
 	}
 
 	/* Reserve enough space to align buffer as per hardware requirement;
@@ -2208,6 +2257,19 @@ static int netdev_init(struct net_device *net_dev)
 	net_dev->min_mtu = 68;
 	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
 
+	/* Set actual number of queues in the net device */
+	num_queues = dpaa2_eth_queue_count(priv);
+	err = netif_set_real_num_tx_queues(net_dev, num_queues);
+	if (err) {
+		dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
+		return err;
+	}
+	err = netif_set_real_num_rx_queues(net_dev, num_queues);
+	if (err) {
+		dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
+		return err;
+	}
+
 	/* Our .ndo_init will be called herein */
 	err = register_netdev(net_dev);
 	if (err < 0) {
@@ -2241,7 +2303,7 @@ static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
 
 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
 {
-	u32 status, clear = 0;
+	u32 status = 0, clear = 0;
 	struct device *dev = (struct device *)arg;
 	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
 	struct net_device *net_dev = dev_get_drvdata(dev);
@@ -2250,7 +2312,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
 	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
 				  DPNI_IRQ_INDEX, &status);
 	if (unlikely(err)) {
-		netdev_err(net_dev, "Can't get irq status (err %d)", err);
+		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
 		clear = 0xffffffff;
 		goto out;
 	}
@@ -2284,21 +2346,21 @@ static int setup_irqs(struct fsl_mc_device *ls_dev)
 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
 					dev_name(&ls_dev->dev), &ls_dev->dev);
 	if (err < 0) {
-		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
+		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
 		goto free_mc_irq;
 	}
 
 	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
 				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
 	if (err < 0) {
-		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
+		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
 		goto free_irq;
 	}
 
 	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
 				  DPNI_IRQ_INDEX, 1);
 	if (err < 0) {
-		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
+		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
 		goto free_irq;
 	}
 
@@ -2358,6 +2420,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
 	priv = netdev_priv(net_dev);
 	priv->net_dev = net_dev;
 
+	priv->iommu_domain = iommu_get_domain_for_dev(dev);
+
 	/* Obtain a MC portal */
 	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
 				     &priv->mc_io);
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
index c67cced..e6d28a2 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -120,6 +120,19 @@ struct dpaa2_eth_swa {
 #define DPAA2_FD_FRC_FASWOV		0x0800
 #define DPAA2_FD_FRC_FAICFDV		0x0400
 
+/* Error bits in FD CTRL */
+#define DPAA2_FD_CTRL_UFD		0x00000004
+#define DPAA2_FD_CTRL_SBE		0x00000008
+#define DPAA2_FD_CTRL_FSE		0x00000010
+#define DPAA2_FD_CTRL_FAERR		0x00000020
+
+#define DPAA2_FD_RX_ERR_MASK		(DPAA2_FD_CTRL_SBE	| \
+					 DPAA2_FD_CTRL_FAERR)
+#define DPAA2_FD_TX_ERR_MASK		(DPAA2_FD_CTRL_UFD	| \
+					 DPAA2_FD_CTRL_SBE	| \
+					 DPAA2_FD_CTRL_FSE	| \
+					 DPAA2_FD_CTRL_FAERR)
+
 /* Annotation bits in FD CTRL */
 #define DPAA2_FD_CTRL_ASAL		0x00020000	/* ASAL = 128 */
 #define DPAA2_FD_CTRL_PTA		0x00800000
@@ -139,6 +152,12 @@ struct dpaa2_fas {
 #define DPAA2_FAS_OFFSET		0
 #define DPAA2_FAS_SIZE			(sizeof(struct dpaa2_fas))
 
+/* Accessors for the hardware annotation fields that we use */
+#define dpaa2_get_hwa(buf_addr) \
+	((void *)(buf_addr) + DPAA2_ETH_SWA_SIZE)
+#define dpaa2_get_fas(buf_addr) \
+	(struct dpaa2_fas *)(dpaa2_get_hwa(buf_addr) + DPAA2_FAS_OFFSET)
+
 /* Error and status bits in the frame annotation status word */
 /* Debug frame, otherwise supposed to be discarded */
 #define DPAA2_FAS_DISC			0x80000000
@@ -171,7 +190,7 @@ struct dpaa2_fas {
 /* L4 csum error */
 #define DPAA2_FAS_L4CE			0x00000001
 /* Possible errors on the ingress path */
-#define DPAA2_ETH_RX_ERR_MASK		(DPAA2_FAS_KSE		| \
+#define DPAA2_FAS_RX_ERR_MASK		(DPAA2_FAS_KSE		| \
 					 DPAA2_FAS_EOFHE	| \
 					 DPAA2_FAS_MNLE		| \
 					 DPAA2_FAS_TIDE		| \
@@ -185,7 +204,7 @@ struct dpaa2_fas {
 					 DPAA2_FAS_L3CE		| \
 					 DPAA2_FAS_L4CE)
 /* Tx errors */
-#define DPAA2_ETH_TXCONF_ERR_MASK	(DPAA2_FAS_KSE		| \
+#define DPAA2_FAS_TX_ERR_MASK		(DPAA2_FAS_KSE		| \
 					 DPAA2_FAS_EOFHE	| \
 					 DPAA2_FAS_MNLE		| \
 					 DPAA2_FAS_TIDE)
@@ -291,16 +310,12 @@ struct dpaa2_eth_priv {
 	u8 num_channels;
 	struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
 
-	int dpni_id;
 	struct dpni_attr dpni_attrs;
-	/* Insofar as the MC is concerned, we're using one layout on all 3 types
-	 * of buffers (Rx, Tx, Tx-Conf).
-	 */
-	struct dpni_buffer_layout buf_layout;
 	u16 tx_data_offset;
 
 	struct fsl_mc_device *dpbp_dev;
-	struct dpbp_attr dpbp_attrs;
+	u16 bpid;
+	struct iommu_domain *iommu_domain;
 
 	u16 tx_qdid;
 	struct fsl_mc_io *mc_io;
@@ -338,8 +353,6 @@ struct dpaa2_eth_priv {
 extern const struct ethtool_ops dpaa2_ethtool_ops;
 extern const char dpaa2_eth_drv_version[];
 
-int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
-
 static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
 {
 	return priv->dpni_attrs.num_queues;
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
index dd0cffa..5312edc 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -34,41 +34,41 @@
 #include "dpaa2-eth.h"
 
 /* To be kept in sync with DPNI statistics */
-char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
-	"rx frames",
-	"rx bytes",
-	"rx mcast frames",
-	"rx mcast bytes",
-	"rx bcast frames",
-	"rx bcast bytes",
-	"tx frames",
-	"tx bytes",
-	"tx mcast frames",
-	"tx mcast bytes",
-	"tx bcast frames",
-	"tx bcast bytes",
-	"rx filtered frames",
-	"rx discarded frames",
-	"rx nobuffer discards",
-	"tx discarded frames",
-	"tx confirmed frames",
+static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
+	"[hw] rx frames",
+	"[hw] rx bytes",
+	"[hw] rx mcast frames",
+	"[hw] rx mcast bytes",
+	"[hw] rx bcast frames",
+	"[hw] rx bcast bytes",
+	"[hw] tx frames",
+	"[hw] tx bytes",
+	"[hw] tx mcast frames",
+	"[hw] tx mcast bytes",
+	"[hw] tx bcast frames",
+	"[hw] tx bcast bytes",
+	"[hw] rx filtered frames",
+	"[hw] rx discarded frames",
+	"[hw] rx nobuffer discards",
+	"[hw] tx discarded frames",
+	"[hw] tx confirmed frames",
 };
 
 #define DPAA2_ETH_NUM_STATS	ARRAY_SIZE(dpaa2_ethtool_stats)
 
-char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
+static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
 	/* per-cpu stats */
-	"tx conf frames",
-	"tx conf bytes",
-	"tx sg frames",
-	"tx sg bytes",
-	"rx sg frames",
-	"rx sg bytes",
-	"enqueue portal busy",
+	"[drv] tx conf frames",
+	"[drv] tx conf bytes",
+	"[drv] tx sg frames",
+	"[drv] tx sg bytes",
+	"[drv] rx sg frames",
+	"[drv] rx sg bytes",
+	"[drv] enqueue portal busy",
 	/* Channel stats */
-	"dequeue portal busy",
-	"channel pull errors",
-	"cdan",
+	"[drv] dequeue portal busy",
+	"[drv] channel pull errors",
+	"[drv] cdan",
 };
 
 #define DPAA2_ETH_NUM_EXTRA_STATS	ARRAY_SIZE(dpaa2_ethtool_extras)
@@ -94,7 +94,7 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
 
 	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
 	if (err) {
-		netdev_err(net_dev, "ERROR %d getting link state", err);
+		netdev_err(net_dev, "ERROR %d getting link state\n", err);
 		goto out;
 	}
 
@@ -147,7 +147,7 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
 		/* ethtool will be loud enough if we return an error; no point
 		 * in putting our own error message on the console by default
 		 */
-		netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
+		netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
 
 	return err;
 }
@@ -206,7 +206,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
 		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
 					  j, &dpni_stats);
 		if (err != 0)
-			netdev_warn(net_dev, "dpni_get_stats(%d) failed", j);
+			netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
 		switch (j) {
 		case 0:
 			num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
index cea46ed..5b9d442 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
@@ -30,8 +30,9 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include "../../fsl-mc/include/mc-sys.h"
-#include "../../fsl-mc/include/mc-cmd.h"
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include "../../fsl-mc/include/mc.h"
 #include "dpni.h"
 #include "dpni-cmd.h"
 
diff --git a/drivers/staging/fsl-mc/README.txt b/drivers/staging/fsl-mc/README.txt
index 179536a..524eda1 100644
--- a/drivers/staging/fsl-mc/README.txt
+++ b/drivers/staging/fsl-mc/README.txt
@@ -131,9 +131,7 @@
 
     DPRCs can be defined statically and populated with objects
     via a config file passed to the MC when firmware starts
-    it.  There is also a Linux user space tool called "restool"
-    that can be used to create/destroy containers and objects
-    dynamically.
+    it.
 
 -DPAA2 Objects for an Ethernet Network Interface
 
@@ -322,6 +320,8 @@
        -creates an MSI IRQ domain
        -doing a 'device add' to expose the 'root' DPRC, in turn triggering
         a bind of the root DPRC to the DPRC driver
+    The binding for the MC-bus device-tree node can be consulted here:
+        Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
 
     DPRC driver
     -----------
diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
index 659eccf..6df407e 100644
--- a/drivers/staging/fsl-mc/bus/Makefile
+++ b/drivers/staging/fsl-mc/bus/Makefile
@@ -11,7 +11,6 @@
 		      mc-sys.o \
 		      mc-io.o \
 		      dprc.o \
-		      dpmng.o \
 		      dprc-driver.o \
 		      fsl-mc-allocator.o \
 		      fsl-mc-msi.o \
diff --git a/drivers/staging/fsl-mc/bus/dpbp-cmd.h b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
index 8aa6545..5904836 100644
--- a/drivers/staging/fsl-mc/bus/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
@@ -40,7 +40,7 @@
 #define DPBP_CMD_BASE_VERSION			1
 #define DPBP_CMD_ID_OFFSET			4
 
-#define DPBP_CMD(id)	((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+#define DPBP_CMD(id)	(((id) << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
 
 /* Command IDs */
 #define DPBP_CMDID_CLOSE		DPBP_CMD(0x800)
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index d9e450a..363730a 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -29,8 +29,8 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
+#include <linux/kernel.h>
+#include "../include/mc.h"
 #include "../include/dpbp.h"
 
 #include "dpbp-cmd.h"
diff --git a/drivers/staging/fsl-mc/bus/dpcon.c b/drivers/staging/fsl-mc/bus/dpcon.c
index eb71357..ca1da85 100644
--- a/drivers/staging/fsl-mc/bus/dpcon.c
+++ b/drivers/staging/fsl-mc/bus/dpcon.c
@@ -29,8 +29,8 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
+#include <linux/kernel.h>
+#include "../include/mc.h"
 #include "../include/dpcon.h"
 
 #include "dpcon-cmd.h"
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
index e5d6674..f809682 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
@@ -260,9 +260,9 @@ int dpaa2_io_service_register(struct dpaa2_io *d,
 
 	/* Enable the generation of CDAN notifications */
 	if (ctx->is_cdan)
-		qbman_swp_CDAN_set_context_enable(d->swp,
-						  (u16)ctx->id,
-						  ctx->qman64);
+		return qbman_swp_CDAN_set_context_enable(d->swp,
+							 (u16)ctx->id,
+							 ctx->qman64);
 	return 0;
 }
 EXPORT_SYMBOL(dpaa2_io_service_register);
@@ -514,7 +514,7 @@ EXPORT_SYMBOL(dpaa2_io_service_acquire);
  * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
  * The 'dpaa2_io_store' returned is a DPIO service managed object.
  *
- * Return pointer to dpaa2_io_store struct for successfuly created storage
+ * Return pointer to dpaa2_io_store struct for successfully created storage
  * memory, or NULL on error.
  */
 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/staging/fsl-mc/bus/dpio/dpio.c
index d81e0232..00eb221 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
@@ -30,8 +30,8 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include "../../include/mc-sys.h"
-#include "../../include/mc-cmd.h"
+#include <linux/kernel.h>
+#include "../../include/mc.h"
 
 #include "dpio.h"
 #include "dpio-cmd.h"
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index 384a13d..861b2a7 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -40,7 +40,7 @@
 #define DPMCP_CMD_BASE_VERSION		1
 #define DPMCP_CMD_ID_OFFSET		4
 
-#define DPMCP_CMD(id)	((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
+#define DPMCP_CMD(id)	(((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
 
 /* Command IDs */
 #define DPMCP_CMDID_CLOSE		DPMCP_CMD(0x800)
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index ad4c8b4..eea42f6 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -29,8 +29,8 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
+#include <linux/kernel.h>
+#include "../include/mc.h"
 
 #include "dpmcp.h"
 #include "dpmcp-cmd.h"
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
index cdddfb8..d1f04ac 100644
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
@@ -44,7 +44,7 @@
 #define DPMNG_CMD_BASE_VERSION		1
 #define DPMNG_CMD_ID_OFFSET		4
 
-#define DPMNG_CMD(id)	((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+#define DPMNG_CMD(id)	(((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
 
 /* Command IDs */
 #define DPMNG_CMDID_GET_VERSION		DPMNG_CMD(0x831)
diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
deleted file mode 100644
index ad5d5bb..0000000
--- a/drivers/staging/fsl-mc/bus/dpmng.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
-#include "../include/dpmng.h"
-
-#include "dpmng-cmd.h"
-
-/**
- * mc_get_version() - Retrieves the Management Complex firmware
- *			version information
- * @mc_io:		Pointer to opaque I/O object
- * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
- * @mc_ver_info:	Returned version information structure
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int mc_get_version(struct fsl_mc_io *mc_io,
-		   u32 cmd_flags,
-		   struct mc_version *mc_ver_info)
-{
-	struct mc_command cmd = { 0 };
-	struct dpmng_rsp_get_version *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
-					  cmd_flags,
-					  0);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
-	mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
-	mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
-	mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
-
-	return 0;
-}
-EXPORT_SYMBOL(mc_get_version);
-
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index e9fdca4..d9b2dcd 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -48,7 +48,7 @@
 #define DPRC_CMD_BASE_VERSION			1
 #define DPRC_CMD_ID_OFFSET			4
 
-#define DPRC_CMD(id)	((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+#define DPRC_CMD(id)	(((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
 
 /* Command IDs */
 #define DPRC_CMDID_CLOSE                        DPRC_CMD(0x800)
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index e4b0341..4cdd190 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -13,27 +13,30 @@
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/msi.h>
-#include "../include/mc-bus.h"
-#include "../include/mc-sys.h"
+#include "../include/mc.h"
 
 #include "dprc-cmd.h"
 #include "fsl-mc-private.h"
 
 #define FSL_MC_DPRC_DRIVER_NAME    "fsl_mc_dprc"
 
-#define FSL_MC_DEVICE_MATCH(_mc_dev, _obj_desc) \
-	(strcmp((_mc_dev)->obj_desc.type, (_obj_desc)->type) == 0 && \
-	 (_mc_dev)->obj_desc.id == (_obj_desc)->id)
-
-struct dprc_child_objs {
+struct fsl_mc_child_objs {
 	int child_count;
-	struct dprc_obj_desc *child_array;
+	struct fsl_mc_obj_desc *child_array;
 };
 
+static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
+				struct fsl_mc_obj_desc *obj_desc)
+{
+	return mc_dev->obj_desc.id == obj_desc->id &&
+	       !strcmp(mc_dev->obj_desc.type, obj_desc->type);
+
+}
+
 static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
 {
 	int i;
-	struct dprc_child_objs *objs;
+	struct fsl_mc_child_objs *objs;
 	struct fsl_mc_device *mc_dev;
 
 	WARN_ON(!dev);
@@ -42,10 +45,10 @@ static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
 	objs = data;
 
 	for (i = 0; i < objs->child_count; i++) {
-		struct dprc_obj_desc *obj_desc = &objs->child_array[i];
+		struct fsl_mc_obj_desc *obj_desc = &objs->child_array[i];
 
 		if (strlen(obj_desc->type) != 0 &&
-		    FSL_MC_DEVICE_MATCH(mc_dev, obj_desc))
+		    fsl_mc_device_match(mc_dev, obj_desc))
 			break;
 	}
 
@@ -76,7 +79,7 @@ static int __fsl_mc_device_remove(struct device *dev, void *data)
  * been dynamically removed in the physical DPRC.
  */
 static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
-				struct dprc_obj_desc *obj_desc_array,
+				struct fsl_mc_obj_desc *obj_desc_array,
 				int num_child_objects_in_mc)
 {
 	if (num_child_objects_in_mc != 0) {
@@ -84,7 +87,7 @@ static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
 		 * Remove child objects that are in the DPRC in Linux,
 		 * but not in the MC:
 		 */
-		struct dprc_child_objs objs;
+		struct fsl_mc_child_objs objs;
 
 		objs.child_count = num_child_objects_in_mc;
 		objs.child_array = obj_desc_array;
@@ -102,13 +105,13 @@ static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
 
 static int __fsl_mc_device_match(struct device *dev, void *data)
 {
-	struct dprc_obj_desc *obj_desc = data;
+	struct fsl_mc_obj_desc *obj_desc = data;
 	struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
 
-	return FSL_MC_DEVICE_MATCH(mc_dev, obj_desc);
+	return fsl_mc_device_match(mc_dev, obj_desc);
 }
 
-static struct fsl_mc_device *fsl_mc_device_lookup(struct dprc_obj_desc
+static struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc
 								*obj_desc,
 						  struct fsl_mc_device
 								*mc_bus_dev)
@@ -133,16 +136,16 @@ static struct fsl_mc_device *fsl_mc_device_lookup(struct dprc_obj_desc
  * device is unbound from the corresponding device driver.
  */
 static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
-				       struct dprc_obj_desc *obj_desc)
+				       struct fsl_mc_obj_desc *obj_desc)
 {
 	int error;
 	u32 plugged_flag_at_mc =
-			obj_desc->state & DPRC_OBJ_STATE_PLUGGED;
+			obj_desc->state & FSL_MC_OBJ_STATE_PLUGGED;
 
 	if (plugged_flag_at_mc !=
-	    (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) {
+	    (mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED)) {
 		if (plugged_flag_at_mc) {
-			mc_dev->obj_desc.state |= DPRC_OBJ_STATE_PLUGGED;
+			mc_dev->obj_desc.state |= FSL_MC_OBJ_STATE_PLUGGED;
 			error = device_attach(&mc_dev->dev);
 			if (error < 0) {
 				dev_err(&mc_dev->dev,
@@ -150,7 +153,7 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
 					error);
 			}
 		} else {
-			mc_dev->obj_desc.state &= ~DPRC_OBJ_STATE_PLUGGED;
+			mc_dev->obj_desc.state &= ~FSL_MC_OBJ_STATE_PLUGGED;
 			device_release_driver(&mc_dev->dev);
 		}
 	}
@@ -169,7 +172,7 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
  * in the physical DPRC.
  */
 static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
-				 struct dprc_obj_desc *obj_desc_array,
+				 struct fsl_mc_obj_desc *obj_desc_array,
 				 int num_child_objects_in_mc)
 {
 	int error;
@@ -177,7 +180,7 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
 
 	for (i = 0; i < num_child_objects_in_mc; i++) {
 		struct fsl_mc_device *child_dev;
-		struct dprc_obj_desc *obj_desc = &obj_desc_array[i];
+		struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
 
 		if (strlen(obj_desc->type) == 0)
 			continue;
@@ -217,14 +220,14 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
  * populated before they can get allocation requests from probe callbacks
  * of the device drivers for the non-allocatable devices.
  */
-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
-		      unsigned int *total_irq_count)
+static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+			     unsigned int *total_irq_count)
 {
 	int num_child_objects;
 	int dprc_get_obj_failures;
 	int error;
 	unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
-	struct dprc_obj_desc *child_obj_desc_array = NULL;
+	struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
 
 	error = dprc_get_obj_count(mc_bus_dev->mc_io,
 				   0,
@@ -251,7 +254,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
 		 */
 		dprc_get_obj_failures = 0;
 		for (i = 0; i < num_child_objects; i++) {
-			struct dprc_obj_desc *obj_desc =
+			struct fsl_mc_obj_desc *obj_desc =
 			    &child_obj_desc_array[i];
 
 			error = dprc_get_obj(mc_bus_dev->mc_io,
@@ -279,7 +282,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
 			if ((strcmp(obj_desc->type, "dpseci") == 0) &&
 			    (obj_desc->ver_major < 4))
 				obj_desc->flags |=
-					DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY;
+					FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY;
 
 			irq_count += obj_desc->irq_count;
 			dev_dbg(&mc_bus_dev->dev,
@@ -306,7 +309,6 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(dprc_scan_objects);
 
 /**
  * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
@@ -317,7 +319,7 @@ EXPORT_SYMBOL_GPL(dprc_scan_objects);
  * bus driver with the actual state of the MC by adding and removing
  * devices as appropriate.
  */
-int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
+static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
 {
 	int error;
 	unsigned int irq_count;
@@ -353,7 +355,6 @@ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
 	fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
 	return error;
 }
-EXPORT_SYMBOL_GPL(dprc_scan_container);
 
 /**
  * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
@@ -681,8 +682,8 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
 	}
 
 	if (major_ver < DPRC_MIN_VER_MAJOR ||
-	   (major_ver == DPRC_MIN_VER_MAJOR &&
-	    minor_ver < DPRC_MIN_VER_MINOR)) {
+	    (major_ver == DPRC_MIN_VER_MAJOR &&
+	     minor_ver < DPRC_MIN_VER_MINOR)) {
 		dev_err(&mc_dev->dev,
 			"ERROR: DPRC version %d.%d not supported\n",
 			major_ver, minor_ver);
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index fcf7b47..6f6c65a 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -29,9 +29,9 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
-#include "../include/dprc.h"
+#include <linux/kernel.h>
+#include "../include/mc.h"
+#include "dprc.h"
 
 #include "dprc-cmd.h"
 
@@ -496,7 +496,7 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
 		 u32 cmd_flags,
 		 u16 token,
 		 int obj_index,
-		 struct dprc_obj_desc *obj_desc)
+		 struct fsl_mc_obj_desc *obj_desc)
 {
 	struct mc_command cmd = { 0 };
 	struct dprc_cmd_get_obj *cmd_params;
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/bus/dprc.h
similarity index 84%
rename from drivers/staging/fsl-mc/include/dprc.h
rename to drivers/staging/fsl-mc/bus/dprc.h
index dc985cc..21295e4 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/bus/dprc.h
@@ -33,14 +33,13 @@
 #ifndef _FSL_DPRC_H
 #define _FSL_DPRC_H
 
-#include "mc-cmd.h"
-
 /*
  * Data Path Resource Container API
  * Contains DPRC API for managing and querying DPAA resources
  */
 
 struct fsl_mc_io;
+struct fsl_mc_obj_desc;
 
 int dprc_open(struct fsl_mc_io *mc_io,
 	      u32 cmd_flags,
@@ -51,7 +50,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
 	       u32 cmd_flags,
 	       u16 token);
 
-
 /* IRQ */
 
 /* IRQ index */
@@ -170,59 +168,18 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
 		       u16 token,
 		       int *obj_count);
 
-/* Objects Attributes Flags */
-
-/* Opened state - Indicates that an object is open by at least one owner */
-#define DPRC_OBJ_STATE_OPEN		0x00000001
-/* Plugged state - Indicates that the object is plugged */
-#define DPRC_OBJ_STATE_PLUGGED		0x00000002
-
-/**
- * Shareability flag - Object flag indicating no memory shareability.
- * the object generates memory accesses that are non coherent with other
- * masters;
- * user is responsible for proper memory handling through IOMMU configuration.
- */
-#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY	0x0001
-
-/**
- * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
- * @type: Type of object: NULL terminated string
- * @id: ID of logical object resource
- * @vendor: Object vendor identifier
- * @ver_major: Major version number
- * @ver_minor:  Minor version number
- * @irq_count: Number of interrupts supported by the object
- * @region_count: Number of mappable regions supported by the object
- * @state: Object state: combination of DPRC_OBJ_STATE_ states
- * @label: Object label
- * @flags: Object's flags
- */
-struct dprc_obj_desc {
-	char type[16];
-	int id;
-	u16 vendor;
-	u16 ver_major;
-	u16 ver_minor;
-	u8 irq_count;
-	u8 region_count;
-	u32 state;
-	char label[16];
-	u16 flags;
-};
-
 int dprc_get_obj(struct fsl_mc_io *mc_io,
 		 u32 cmd_flags,
 		 u16 token,
 		 int obj_index,
-		 struct dprc_obj_desc *obj_desc);
+		 struct fsl_mc_obj_desc *obj_desc);
 
 int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
 		      u32 cmd_flags,
 		      u16 token,
 		      char *obj_type,
 		      int obj_id,
-		      struct dprc_obj_desc *obj_desc);
+		      struct fsl_mc_obj_desc *obj_desc);
 
 int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
 		     u32 cmd_flags,
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index ce07096c..b37a6f4 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -10,17 +10,16 @@
 
 #include <linux/module.h>
 #include <linux/msi.h>
-#include "../include/mc-bus.h"
-#include "../include/mc-sys.h"
+#include "../include/mc.h"
 
-#include "dpbp-cmd.h"
-#include "dpcon-cmd.h"
 #include "fsl-mc-private.h"
 
-#define FSL_MC_IS_ALLOCATABLE(_obj_type) \
-	(strcmp(_obj_type, "dpbp") == 0 || \
-	 strcmp(_obj_type, "dpmcp") == 0 || \
-	 strcmp(_obj_type, "dpcon") == 0)
+static bool __must_check fsl_mc_is_allocatable(const char *obj_type)
+{
+	return strcmp(obj_type, "dpbp") ||
+	       strcmp(obj_type, "dpmcp") ||
+	       strcmp(obj_type, "dpcon");
+}
 
 /**
  * fsl_mc_resource_pool_add_device - add allocatable object to a resource
@@ -44,7 +43,7 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
 
 	if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
 		goto out;
-	if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
+	if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
 		goto out;
 	if (WARN_ON(mc_dev->resource))
 		goto out;
@@ -106,7 +105,7 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
 	struct fsl_mc_resource *resource;
 	int error = -EINVAL;
 
-	if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
+	if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
 		goto out;
 
 	resource = mc_dev->resource;
@@ -586,7 +585,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
 	struct fsl_mc_bus *mc_bus;
 	int error;
 
-	if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
+	if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
 		return -EINVAL;
 
 	mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
@@ -615,7 +614,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
 {
 	int error;
 
-	if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
+	if (WARN_ON(!fsl_mc_is_allocatable(mc_dev->obj_desc.type)))
 		return -EINVAL;
 
 	if (mc_dev->resource) {
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
index 3be5f25..19606e8 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
@@ -20,12 +20,10 @@
 #include <linux/bitops.h>
 #include <linux/msi.h>
 #include <linux/dma-mapping.h>
-#include "../include/mc-bus.h"
-#include "../include/dpmng.h"
-#include "../include/mc-sys.h"
 
 #include "fsl-mc-private.h"
 #include "dprc-cmd.h"
+#include "dpmng-cmd.h"
 
 /**
  * Default DMA mask for devices on a fsl-mc bus
@@ -61,6 +59,20 @@ struct fsl_mc_addr_translation_range {
 };
 
 /**
+ * struct mc_version
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ *		backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ *		and/or bug fixes that have no impact on API
+ */
+struct mc_version {
+	u32 major;
+	u32 minor;
+	u32 revision;
+};
+
+/**
  * fsl_mc_bus_match - device to driver matching callback
  * @dev: the fsl-mc device to match against
  * @drv: the device driver to search for matching fsl-mc object type
@@ -82,7 +94,7 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
 	 * If the object is not 'plugged' don't match.
 	 * Only exception is the root DPRC, which is a special case.
 	 */
-	if ((mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED) == 0 &&
+	if ((mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED) == 0 &&
 	    !fsl_mc_is_root_dprc(&mc_dev->dev))
 		goto out;
 
@@ -239,10 +251,46 @@ void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
 EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
 
 /**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ *			version information
+ * @mc_io:		Pointer to opaque I/O object
+ * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info:	Returned version information structure
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+static int mc_get_version(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  struct mc_version *mc_ver_info)
+{
+	struct mc_command cmd = { 0 };
+	struct dpmng_rsp_get_version *rsp_params;
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+					  cmd_flags,
+					  0);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+	mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+	mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+	mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
+
+	return 0;
+}
+
+/**
  * fsl_mc_get_root_dprc - function to traverse to the root dprc
  */
-void fsl_mc_get_root_dprc(struct device *dev,
-			  struct device **root_dprc_dev)
+static void fsl_mc_get_root_dprc(struct device *dev,
+				 struct device **root_dprc_dev)
 {
 	if (WARN_ON(!dev)) {
 		*root_dprc_dev = NULL;
@@ -254,7 +302,6 @@ void fsl_mc_get_root_dprc(struct device *dev,
 			*root_dprc_dev = (*root_dprc_dev)->parent;
 	}
 }
-EXPORT_SYMBOL_GPL(fsl_mc_get_root_dprc);
 
 static int get_dprc_attr(struct fsl_mc_io *mc_io,
 			 int container_id, struct dprc_attributes *attr)
@@ -339,7 +386,7 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
 	int i;
 	int error;
 	struct resource *regions;
-	struct dprc_obj_desc *obj_desc = &mc_dev->obj_desc;
+	struct fsl_mc_obj_desc *obj_desc = &mc_dev->obj_desc;
 	struct device *parent_dev = mc_dev->dev.parent;
 	enum dprc_region_type mc_region_type;
 
@@ -420,15 +467,11 @@ bool fsl_mc_is_root_dprc(struct device *dev)
 static void fsl_mc_device_release(struct device *dev)
 {
 	struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
-	struct fsl_mc_bus *mc_bus = NULL;
 
 	kfree(mc_dev->regions);
 
 	if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
-		mc_bus = to_fsl_mc_bus(mc_dev);
-
-	if (mc_bus)
-		kfree(mc_bus);
+		kfree(to_fsl_mc_bus(mc_dev));
 	else
 		kfree(mc_dev);
 }
@@ -436,7 +479,7 @@ static void fsl_mc_device_release(struct device *dev)
 /**
  * Add a newly discovered fsl-mc device to be visible in Linux
  */
-int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
 		      struct fsl_mc_io *mc_io,
 		      struct device *parent_dev,
 		      struct fsl_mc_device **new_mc_dev)
@@ -538,7 +581,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
 	}
 
 	/* Objects are coherent, unless 'no shareability' flag set. */
-	if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+	if (!(obj_desc->flags & FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
 		arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
 
 	/*
@@ -559,10 +602,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
 
 error_cleanup_dev:
 	kfree(mc_dev->regions);
-	if (mc_bus)
-		kfree(mc_bus);
-	else
-		kfree(mc_dev);
+	kfree(mc_bus);
+	kfree(mc_dev);
 
 	return error;
 }
@@ -644,10 +685,10 @@ static int get_mc_addr_translation_ranges(struct device *dev,
 	const __be32 *cell;
 
 	ret = parse_mc_ranges(dev,
-				&paddr_cells,
-				&mc_addr_cells,
-				&mc_size_cells,
-				&ranges_start);
+			      &paddr_cells,
+			      &mc_addr_cells,
+			      &mc_size_cells,
+			      &ranges_start);
 	if (ret < 0)
 		return ret;
 
@@ -693,7 +734,7 @@ static int get_mc_addr_translation_ranges(struct device *dev,
  */
 static int fsl_mc_bus_probe(struct platform_device *pdev)
 {
-	struct dprc_obj_desc obj_desc;
+	struct fsl_mc_obj_desc obj_desc;
 	int error;
 	struct fsl_mc *mc;
 	struct fsl_mc_device *mc_bus_dev = NULL;
@@ -752,7 +793,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
 		goto error_cleanup_mc_io;
 	}
 
-	memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
+	memset(&obj_desc, 0, sizeof(struct fsl_mc_obj_desc));
 	error = dprc_get_api_version(mc_io, 0,
 				     &obj_desc.ver_major,
 				     &obj_desc.ver_minor);
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index b8b2c86..c04a2f2 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -16,7 +16,6 @@
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/msi.h>
-#include "../include/mc-bus.h"
 #include "fsl-mc-private.h"
 
 /*
@@ -170,7 +169,7 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
 
 	domain = msi_create_irq_domain(fwnode, info, parent);
 	if (domain)
-		domain->bus_token = DOMAIN_BUS_FSL_MC_MSI;
+		irq_domain_update_bus_token(domain, DOMAIN_BUS_FSL_MC_MSI);
 
 	return domain;
 }
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
index 5c49c9d..62d3989 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
@@ -11,9 +11,56 @@
 #define _FSL_MC_PRIVATE_H_
 
 #include "../include/mc.h"
-#include "../include/mc-bus.h"
+#include "dprc.h"
+#include <linux/mutex.h>
 
-int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
+/**
+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+ * IRQ pool
+ */
+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS	256
+
+/**
+ * struct fsl_mc_resource_pool - Pool of MC resources of a given
+ * type
+ * @type: type of resources in the pool
+ * @max_count: maximum number of resources in the pool
+ * @free_count: number of free resources in the pool
+ * @mutex: mutex to serialize access to the pool's free list
+ * @free_list: anchor node of list of free resources in the pool
+ * @mc_bus: pointer to the MC bus that owns this resource pool
+ */
+struct fsl_mc_resource_pool {
+	enum fsl_mc_pool_type type;
+	int max_count;
+	int free_count;
+	struct mutex mutex;	/* serializes access to free_list */
+	struct list_head free_list;
+	struct fsl_mc_bus *mc_bus;
+};
+
+/**
+ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
+ * @mc_dev: fsl-mc device for the bus device itself.
+ * @resource_pools: array of resource pools (one pool per resource type)
+ * for this MC bus. These resources represent allocatable entities
+ * from the physical DPRC.
+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
+ * @scan_mutex: Serializes bus scanning
+ * @dprc_attr: DPRC attributes
+ */
+struct fsl_mc_bus {
+	struct fsl_mc_device mc_dev;
+	struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+	struct fsl_mc_device_irq *irq_resources;
+	struct mutex scan_mutex;    /* serializes bus scanning */
+	struct dprc_attributes dprc_attr;
+};
+
+#define to_fsl_mc_bus(_mc_dev) \
+	container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
+
+int __must_check fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
 				   struct fsl_mc_io *mc_io,
 				   struct device *parent_dev,
 				   struct fsl_mc_device **new_mc_dev);
@@ -28,6 +75,10 @@ int __init fsl_mc_allocator_driver_init(void);
 
 void fsl_mc_allocator_driver_exit(void);
 
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
 int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
 					  enum fsl_mc_pool_type pool_type,
 					  struct fsl_mc_resource
@@ -44,6 +95,14 @@ int __init its_fsl_mc_msi_init(void);
 
 void its_fsl_mc_msi_cleanup(void);
 
+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+			   struct irq_domain **mc_msi_domain);
+
+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+			     unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
+
 int __must_check fsl_create_mc_io(struct device *dev,
 				  phys_addr_t mc_portal_phys_addr,
 				  u32 mc_portal_size,
@@ -52,4 +111,6 @@ int __must_check fsl_create_mc_io(struct device *dev,
 
 void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
 
+bool fsl_mc_is_root_dprc(struct device *dev);
+
 #endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 49127ac..865d385 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -16,7 +16,6 @@
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
-#include "../include/mc-bus.h"
 #include "fsl-mc-private.h"
 
 static struct irq_chip its_msi_irq_chip = {
diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c
index d66b87f..35221a17 100644
--- a/drivers/staging/fsl-mc/bus/mc-io.c
+++ b/drivers/staging/fsl-mc/bus/mc-io.c
@@ -31,8 +31,7 @@
  */
 
 #include <linux/io.h>
-#include "../include/mc-bus.h"
-#include "../include/mc-sys.h"
+#include "../include/mc.h"
 
 #include "fsl-mc-private.h"
 #include "dpmcp.h"
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 4d82802..a1704c3 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -37,8 +37,6 @@
 #include <linux/ioport.h>
 #include <linux/device.h>
 #include <linux/io.h>
-#include "../include/mc-sys.h"
-#include "../include/mc-cmd.h"
 #include "../include/mc.h"
 
 #include "dpmcp.h"
diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h
deleted file mode 100644
index 170c07d..0000000
--- a/drivers/staging/fsl-mc/include/dpmng.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __FSL_DPMNG_H
-#define __FSL_DPMNG_H
-
-/*
- * Management Complex General API
- * Contains general API for the Management Complex firmware
- */
-
-struct fsl_mc_io;
-
-/**
- * Management Complex firmware version information
- */
-#define MC_VER_MAJOR 8
-#define MC_VER_MINOR 0
-
-/**
- * struct mc_version
- * @major: Major version number: incremented on API compatibility changes
- * @minor: Minor version number: incremented on API additions (that are
- *		backward compatible); reset when major version is incremented
- * @revision: Internal revision number: incremented on implementation changes
- *		and/or bug fixes that have no impact on API
- */
-struct mc_version {
-	u32 major;
-	u32 minor;
-	u32 revision;
-};
-
-int mc_get_version(struct fsl_mc_io *mc_io,
-		   u32 cmd_flags,
-		   struct mc_version *mc_ver_info);
-
-#endif /* __FSL_DPMNG_H */
diff --git a/drivers/staging/fsl-mc/include/mc-bus.h b/drivers/staging/fsl-mc/include/mc-bus.h
deleted file mode 100644
index 42700de..0000000
--- a/drivers/staging/fsl-mc/include/mc-bus.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Freescale Management Complex (MC) bus declarations
- *
- * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Author: German Rivera <German.Rivera@freescale.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#ifndef _FSL_MC_MCBUS_H_
-#define _FSL_MC_MCBUS_H_
-
-#include "../include/mc.h"
-#include <linux/mutex.h>
-
-struct irq_domain;
-struct msi_domain_info;
-
-/**
- * Maximum number of total IRQs that can be pre-allocated for an MC bus'
- * IRQ pool
- */
-#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS	256
-
-#ifdef CONFIG_FSL_MC_BUS
-#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type)
-#else
-/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */
-#define dev_is_fsl_mc(_dev) (0)
-#endif
-
-/**
- * struct fsl_mc_resource_pool - Pool of MC resources of a given
- * type
- * @type: type of resources in the pool
- * @max_count: maximum number of resources in the pool
- * @free_count: number of free resources in the pool
- * @mutex: mutex to serialize access to the pool's free list
- * @free_list: anchor node of list of free resources in the pool
- * @mc_bus: pointer to the MC bus that owns this resource pool
- */
-struct fsl_mc_resource_pool {
-	enum fsl_mc_pool_type type;
-	int max_count;
-	int free_count;
-	struct mutex mutex;	/* serializes access to free_list */
-	struct list_head free_list;
-	struct fsl_mc_bus *mc_bus;
-};
-
-/**
- * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
- * @mc_dev: fsl-mc device for the bus device itself.
- * @resource_pools: array of resource pools (one pool per resource type)
- * for this MC bus. These resources represent allocatable entities
- * from the physical DPRC.
- * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
- * @scan_mutex: Serializes bus scanning
- * @dprc_attr: DPRC attributes
- */
-struct fsl_mc_bus {
-	struct fsl_mc_device mc_dev;
-	struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
-	struct fsl_mc_device_irq *irq_resources;
-	struct mutex scan_mutex;    /* serializes bus scanning */
-	struct dprc_attributes dprc_attr;
-};
-
-#define to_fsl_mc_bus(_mc_dev) \
-	container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
-
-int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
-
-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
-		      unsigned int *total_irq_count);
-
-int __init dprc_driver_init(void);
-
-void dprc_driver_exit(void);
-
-int __init fsl_mc_allocator_driver_init(void);
-
-void fsl_mc_allocator_driver_exit(void);
-
-struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
-						struct msi_domain_info *info,
-						struct irq_domain *parent);
-
-int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
-			   struct irq_domain **mc_msi_domain);
-
-int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
-			     unsigned int irq_count);
-
-void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
-
-void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-
-bool fsl_mc_bus_exists(void);
-
-void fsl_mc_get_root_dprc(struct device *dev,
-			  struct device **root_dprc_dev);
-
-bool fsl_mc_is_root_dprc(struct device *dev);
-
-extern struct bus_type fsl_mc_bus_type;
-
-#endif /* _FSL_MC_MCBUS_H_ */
diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
deleted file mode 100644
index 2e08aa3..0000000
--- a/drivers/staging/fsl-mc/include/mc-cmd.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __FSL_MC_CMD_H
-#define __FSL_MC_CMD_H
-
-#define MC_CMD_NUM_OF_PARAMS	7
-
-struct mc_cmd_header {
-	u8 src_id;
-	u8 flags_hw;
-	u8 status;
-	u8 flags_sw;
-	__le16 token;
-	__le16 cmd_id;
-};
-
-struct mc_command {
-	u64 header;
-	u64 params[MC_CMD_NUM_OF_PARAMS];
-};
-
-struct mc_rsp_create {
-	__le32 object_id;
-};
-
-struct mc_rsp_api_ver {
-	__le16 major_ver;
-	__le16 minor_ver;
-};
-
-enum mc_cmd_status {
-	MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
-	MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
-	MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
-	MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
-	MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
-	MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
-	MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
-	MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
-	MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
-	MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
-	MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
-	MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
-};
-
-/*
- * MC command flags
- */
-
-/* High priority flag */
-#define MC_CMD_FLAG_PRI		0x80
-/* Command completion flag */
-#define MC_CMD_FLAG_INTR_DIS	0x01
-
-static inline u64 mc_encode_cmd_header(u16 cmd_id,
-				       u32 cmd_flags,
-				       u16 token)
-{
-	u64 header = 0;
-	struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
-
-	hdr->cmd_id = cpu_to_le16(cmd_id);
-	hdr->token  = cpu_to_le16(token);
-	hdr->status = MC_CMD_STATUS_READY;
-	if (cmd_flags & MC_CMD_FLAG_PRI)
-		hdr->flags_hw = MC_CMD_FLAG_PRI;
-	if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
-		hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
-
-	return header;
-}
-
-static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
-{
-	struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
-	u16 token = le16_to_cpu(hdr->token);
-
-	return token;
-}
-
-static inline u32 mc_cmd_read_object_id(struct mc_command *cmd)
-{
-	struct mc_rsp_create *rsp_params;
-
-	rsp_params = (struct mc_rsp_create *)cmd->params;
-	return le32_to_cpu(rsp_params->object_id);
-}
-
-static inline void mc_cmd_read_api_version(struct mc_command *cmd,
-					   u16 *major_ver,
-					   u16 *minor_ver)
-{
-	struct mc_rsp_api_ver *rsp_params;
-
-	rsp_params = (struct mc_rsp_api_ver *)cmd->params;
-	*major_ver = le16_to_cpu(rsp_params->major_ver);
-	*minor_ver = le16_to_cpu(rsp_params->minor_ver);
-}
-
-#endif /* __FSL_MC_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h
deleted file mode 100644
index dca7f90..0000000
--- a/drivers/staging/fsl-mc/include/mc-sys.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Interface of the I/O services to send MC commands to the MC hardware
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *     * Neither the name of the above-listed copyright holders nor the
- *       names of any contributors may be used to endorse or promote products
- *       derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _FSL_MC_SYS_H
-#define _FSL_MC_SYS_H
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-
-/**
- * Bit masks for a MC I/O object (struct fsl_mc_io) flags
- */
-#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL	0x0001
-
-struct fsl_mc_resource;
-struct mc_command;
-
-/**
- * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command()
- * @dev: device associated with this Mc I/O object
- * @flags: flags for mc_send_command()
- * @portal_size: MC command portal size in bytes
- * @portal_phys_addr: MC command portal physical address
- * @portal_virt_addr: MC command portal virtual address
- * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal.
- *
- * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not
- * set:
- * @mutex: Mutex to serialize mc_send_command() calls that use the same MC
- * portal, if the fsl_mc_io object was created with the
- * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this
- * fsl_mc_io object must be made only from non-atomic context.
- *
- * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is
- * set:
- * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC
- * portal, if the fsl_mc_io object was created with the
- * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this
- * fsl_mc_io object can be made from atomic or non-atomic context.
- */
-struct fsl_mc_io {
-	struct device *dev;
-	u16 flags;
-	u16 portal_size;
-	phys_addr_t portal_phys_addr;
-	void __iomem *portal_virt_addr;
-	struct fsl_mc_device *dpmcp_dev;
-	union {
-		/*
-		 * This field is only meaningful if the
-		 * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set
-		 */
-		struct mutex mutex; /* serializes mc_send_command() */
-
-		/*
-		 * This field is only meaningful if the
-		 * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
-		 */
-		spinlock_t spinlock;	/* serializes mc_send_command() */
-	};
-};
-
-int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
-
-#endif /* _FSL_MC_SYS_H */
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index 1c46c0c..aafe63a 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -14,10 +14,12 @@
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
 #include <linux/interrupt.h>
-#include "../include/dprc.h"
 
 #define FSL_MC_VENDOR_FREESCALE	0x1957
 
+struct irq_domain;
+struct msi_domain_info;
+
 struct fsl_mc_device;
 struct fsl_mc_io;
 
@@ -104,6 +106,45 @@ struct fsl_mc_device_irq {
 #define to_fsl_mc_irq(_mc_resource) \
 	container_of(_mc_resource, struct fsl_mc_device_irq, resource)
 
+/* Opened state - Indicates that an object is open by at least one owner */
+#define FSL_MC_OBJ_STATE_OPEN		0x00000001
+/* Plugged state - Indicates that the object is plugged */
+#define FSL_MC_OBJ_STATE_PLUGGED	0x00000002
+
+/**
+ * Shareability flag - Object flag indicating no memory shareability.
+ * the object generates memory accesses that are non coherent with other
+ * masters;
+ * user is responsible for proper memory handling through IOMMU configuration.
+ */
+#define FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY	0x0001
+
+/**
+ * struct fsl_mc_obj_desc - Object descriptor
+ * @type: Type of object: NULL terminated string
+ * @id: ID of logical object resource
+ * @vendor: Object vendor identifier
+ * @ver_major: Major version number
+ * @ver_minor:  Minor version number
+ * @irq_count: Number of interrupts supported by the object
+ * @region_count: Number of mappable regions supported by the object
+ * @state: Object state: combination of FSL_MC_OBJ_STATE_ states
+ * @label: Object label: NULL terminated string
+ * @flags: Object's flags
+ */
+struct fsl_mc_obj_desc {
+	char type[16];
+	int id;
+	u16 vendor;
+	u16 ver_major;
+	u16 ver_minor;
+	u8 irq_count;
+	u8 region_count;
+	u32 state;
+	char label[16];
+	u16 flags;
+};
+
 /**
  * Bit masks for a MC object device (struct fsl_mc_device) flags
  */
@@ -150,7 +191,7 @@ struct fsl_mc_device {
 	u16 icid;
 	u16 mc_handle;
 	struct fsl_mc_io *mc_io;
-	struct dprc_obj_desc obj_desc;
+	struct fsl_mc_obj_desc obj_desc;
 	struct resource *regions;
 	struct fsl_mc_device_irq **irqs;
 	struct fsl_mc_resource *resource;
@@ -159,6 +200,159 @@ struct fsl_mc_device {
 #define to_fsl_mc_device(_dev) \
 	container_of(_dev, struct fsl_mc_device, dev)
 
+#define MC_CMD_NUM_OF_PARAMS	7
+
+struct mc_cmd_header {
+	u8 src_id;
+	u8 flags_hw;
+	u8 status;
+	u8 flags_sw;
+	__le16 token;
+	__le16 cmd_id;
+};
+
+struct mc_command {
+	u64 header;
+	u64 params[MC_CMD_NUM_OF_PARAMS];
+};
+
+enum mc_cmd_status {
+	MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+	MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+	MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
+	MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
+	MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
+	MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
+	MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
+	MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
+	MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
+	MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
+	MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
+	MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
+};
+
+/*
+ * MC command flags
+ */
+
+/* High priority flag */
+#define MC_CMD_FLAG_PRI		0x80
+/* Command completion flag */
+#define MC_CMD_FLAG_INTR_DIS	0x01
+
+static inline u64 mc_encode_cmd_header(u16 cmd_id,
+				       u32 cmd_flags,
+				       u16 token)
+{
+	u64 header = 0;
+	struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+	hdr->cmd_id = cpu_to_le16(cmd_id);
+	hdr->token  = cpu_to_le16(token);
+	hdr->status = MC_CMD_STATUS_READY;
+	if (cmd_flags & MC_CMD_FLAG_PRI)
+		hdr->flags_hw = MC_CMD_FLAG_PRI;
+	if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
+		hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
+
+	return header;
+}
+
+static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
+{
+	struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+	u16 token = le16_to_cpu(hdr->token);
+
+	return token;
+}
+
+struct mc_rsp_create {
+	__le32 object_id;
+};
+
+struct mc_rsp_api_ver {
+	__le16 major_ver;
+	__le16 minor_ver;
+};
+
+static inline u32 mc_cmd_read_object_id(struct mc_command *cmd)
+{
+	struct mc_rsp_create *rsp_params;
+
+	rsp_params = (struct mc_rsp_create *)cmd->params;
+	return le32_to_cpu(rsp_params->object_id);
+}
+
+static inline void mc_cmd_read_api_version(struct mc_command *cmd,
+					   u16 *major_ver,
+					   u16 *minor_ver)
+{
+	struct mc_rsp_api_ver *rsp_params;
+
+	rsp_params = (struct mc_rsp_api_ver *)cmd->params;
+	*major_ver = le16_to_cpu(rsp_params->major_ver);
+	*minor_ver = le16_to_cpu(rsp_params->minor_ver);
+}
+
+/**
+ * Bit masks for a MC I/O object (struct fsl_mc_io) flags
+ */
+#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL	0x0001
+
+/**
+ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command()
+ * @dev: device associated with this Mc I/O object
+ * @flags: flags for mc_send_command()
+ * @portal_size: MC command portal size in bytes
+ * @portal_phys_addr: MC command portal physical address
+ * @portal_virt_addr: MC command portal virtual address
+ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal.
+ *
+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not
+ * set:
+ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC
+ * portal, if the fsl_mc_io object was created with the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this
+ * fsl_mc_io object must be made only from non-atomic context.
+ *
+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is
+ * set:
+ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC
+ * portal, if the fsl_mc_io object was created with the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this
+ * fsl_mc_io object can be made from atomic or non-atomic context.
+ */
+struct fsl_mc_io {
+	struct device *dev;
+	u16 flags;
+	u16 portal_size;
+	phys_addr_t portal_phys_addr;
+	void __iomem *portal_virt_addr;
+	struct fsl_mc_device *dpmcp_dev;
+	union {
+		/*
+		 * This field is only meaningful if the
+		 * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set
+		 */
+		struct mutex mutex; /* serializes mc_send_command() */
+
+		/*
+		 * This field is only meaningful if the
+		 * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
+		 */
+		spinlock_t spinlock;	/* serializes mc_send_command() */
+	};
+};
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
+
+#ifdef CONFIG_FSL_MC_BUS
+#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type)
+#else
+/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */
+#define dev_is_fsl_mc(_dev) (0)
+#endif
+
 /*
  * module_fsl_mc_driver() - Helper macro for drivers that don't do
  * anything special in module init/exit.  This eliminates a lot of
@@ -194,8 +388,14 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
 
 void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
 
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+						struct msi_domain_info *info,
+						struct irq_domain *parent);
+
 int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
 
 void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
 
+extern struct bus_type fsl_mc_bus_type;
+
 #endif /* _FSL_MC_H_ */
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 15a7e81..87cd1f8 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -738,11 +738,11 @@ static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
 	sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
 	if (nic_type == NIC_TYPE_ARP) {
 		send_len = len + SDU_PARAM_LEN;
-	    memcpy(sdu->data, data, len);
+		memcpy(sdu->data, data, len);
 	} else {
-	    send_len = len - ETH_HLEN;
-	    send_len += SDU_PARAM_LEN;
-	    memcpy(sdu->data, data + ETH_HLEN, len - ETH_HLEN);
+		send_len = len - ETH_HLEN;
+		send_len += SDU_PARAM_LEN;
+		memcpy(sdu->data, data + ETH_HLEN, len - ETH_HLEN);
 	}
 
 	sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
index 50de2d7..ab096bc 100644
--- a/drivers/staging/greybus/Kconfig
+++ b/drivers/staging/greybus/Kconfig
@@ -216,4 +216,14 @@
 	  will be called gb-usb.ko
 
 endif	# GREYBUS_BRIDGED_PHY
+
+config GREYBUS_ARCHE
+	tristate "Greybus Arche Platform driver"
+	depends on USB_HSIC_USB3613 || COMPILE_TEST
+	---help---
+	  Select this option if you have an Arche device.
+
+	  To compile this code as a module, chose M here: the module
+	  will be called gb-arche.ko
+
 endif	# GREYBUS
diff --git a/drivers/staging/greybus/Makefile b/drivers/staging/greybus/Makefile
index b26b9a3..23e1cb7 100644
--- a/drivers/staging/greybus/Makefile
+++ b/drivers/staging/greybus/Makefile
@@ -91,4 +91,4 @@
 # Greybus Platform driver
 gb-arche-y	:= arche-platform.o arche-apb-ctrl.o
 
-obj-$(CONFIG_USB_HSIC_USB3613)	+= gb-arche.o
+obj-$(CONFIG_GREYBUS_ARCHE)	+= gb-arche.o
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index 02243b4..0412f3d 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -22,6 +22,8 @@
 #include "arche_platform.h"
 
 
+static void apb_bootret_deassert(struct device *dev);
+
 struct arche_apb_ctrl_drvdata {
 	/* Control GPIO signals to and from AP <=> AP Bridges */
 	int resetn_gpio;
@@ -222,14 +224,7 @@ static void poweroff_seq(struct platform_device *pdev)
 	/* TODO: May have to send an event to SVC about this exit */
 }
 
-void apb_bootret_assert(struct device *dev)
-{
-	struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
-
-	gpio_set_value(apb->boot_ret_gpio, 1);
-}
-
-void apb_bootret_deassert(struct device *dev)
+static void apb_bootret_deassert(struct device *dev)
 {
 	struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
 
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index aac1145..eced2d2 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -24,7 +24,14 @@
 #include "arche_platform.h"
 #include "greybus.h"
 
+#if IS_ENABLED(CONFIG_USB_HSIC_USB3613)
 #include <linux/usb/usb3613.h>
+#else
+static inline int usb3613_hub_mode_ctrl(bool unused)
+{
+	return 0;
+}
+#endif
 
 #define WD_COLDBOOT_PULSE_WIDTH_MS	30
 
@@ -35,7 +42,6 @@ enum svc_wakedetect_state {
 	WD_STATE_STANDBYBOOT_TRIG,	/* As of now not used ?? */
 	WD_STATE_COLDBOOT_START,	/* Cold boot process started */
 	WD_STATE_STANDBYBOOT_START,	/* Not used */
-	WD_STATE_TIMESYNC,
 };
 
 struct arche_platform_drvdata {
@@ -59,26 +65,12 @@ struct arche_platform_drvdata {
 	int wake_detect_irq;
 	spinlock_t wake_lock;			/* Protect wake_detect_state */
 	struct mutex platform_state_mutex;	/* Protect state */
-	wait_queue_head_t wq;			/* WQ for arche_pdata->state */
 	unsigned long wake_detect_start;
 	struct notifier_block pm_notifier;
 
 	struct device *dev;
-	struct gb_timesync_svc *timesync_svc_pdata;
 };
 
-static int arche_apb_bootret_assert(struct device *dev, void *data)
-{
-	apb_bootret_assert(dev);
-	return 0;
-}
-
-static int arche_apb_bootret_deassert(struct device *dev, void *data)
-{
-	apb_bootret_deassert(dev);
-	return 0;
-}
-
 /* Requires calling context to hold arche_pdata->platform_state_mutex */
 static void arche_platform_set_state(struct arche_platform_drvdata *arche_pdata,
 				     enum arche_platform_state state)
@@ -86,112 +78,6 @@ static void arche_platform_set_state(struct arche_platform_drvdata *arche_pdata,
 	arche_pdata->state = state;
 }
 
-/*
- * arche_platform_change_state: Change the operational state
- *
- * This exported function allows external drivers to change the state
- * of the arche-platform driver.
- * Note that this function only supports transitions between two states
- * with limited functionality.
- *
- *  - ARCHE_PLATFORM_STATE_TIME_SYNC:
- *    Once set, allows timesync operations between SVC <=> AP and makes
- *    sure that arche-platform driver ignores any subsequent events/pulses
- *    from SVC over wake/detect.
- *
- *  - ARCHE_PLATFORM_STATE_ACTIVE:
- *    Puts back driver to active state, where any pulse from SVC on wake/detect
- *    line would trigger either cold/standby boot.
- *    Note: Transition request from this function does not trigger cold/standby
- *          boot. It just puts back driver book keeping variable back to ACTIVE
- *          state and restores the interrupt.
- *
- * Returns -ENODEV if device not found, -EAGAIN if the driver cannot currently
- * satisfy the requested state-transition or -EINVAL for all other
- * state-transition requests.
- */
-int arche_platform_change_state(enum arche_platform_state state,
-				struct gb_timesync_svc *timesync_svc_pdata)
-{
-	struct arche_platform_drvdata *arche_pdata;
-	struct platform_device *pdev;
-	struct device_node *np;
-	int ret = -EAGAIN;
-	unsigned long flags;
-
-	np = of_find_compatible_node(NULL, NULL, "google,arche-platform");
-	if (!np) {
-		pr_err("google,arche-platform device node not found\n");
-		return -ENODEV;
-	}
-
-	pdev = of_find_device_by_node(np);
-	if (!pdev) {
-		pr_err("arche-platform device not found\n");
-		of_node_put(np);
-		return -ENODEV;
-	}
-
-	arche_pdata = platform_get_drvdata(pdev);
-
-	mutex_lock(&arche_pdata->platform_state_mutex);
-	spin_lock_irqsave(&arche_pdata->wake_lock, flags);
-
-	if (arche_pdata->state == state) {
-		ret = 0;
-		goto exit;
-	}
-
-	switch (state) {
-	case ARCHE_PLATFORM_STATE_TIME_SYNC:
-		if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
-			ret = -EINVAL;
-			goto exit;
-		}
-		if (arche_pdata->wake_detect_state != WD_STATE_IDLE) {
-			dev_err(arche_pdata->dev,
-				"driver busy with wake/detect line ops\n");
-			goto  exit;
-		}
-		device_for_each_child(arche_pdata->dev, NULL,
-				      arche_apb_bootret_assert);
-		arche_pdata->wake_detect_state = WD_STATE_TIMESYNC;
-		break;
-	case ARCHE_PLATFORM_STATE_ACTIVE:
-		if (arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC) {
-			ret = -EINVAL;
-			goto exit;
-		}
-		device_for_each_child(arche_pdata->dev, NULL,
-				      arche_apb_bootret_deassert);
-		arche_pdata->wake_detect_state = WD_STATE_IDLE;
-		break;
-	case ARCHE_PLATFORM_STATE_OFF:
-	case ARCHE_PLATFORM_STATE_STANDBY:
-	case ARCHE_PLATFORM_STATE_FW_FLASHING:
-		dev_err(arche_pdata->dev, "busy, request to retry later\n");
-		goto exit;
-	default:
-		ret = -EINVAL;
-		dev_err(arche_pdata->dev,
-			"invalid state transition request\n");
-		goto exit;
-	}
-	arche_pdata->timesync_svc_pdata = timesync_svc_pdata;
-	arche_platform_set_state(arche_pdata, state);
-	if (state == ARCHE_PLATFORM_STATE_ACTIVE)
-		wake_up(&arche_pdata->wq);
-
-	ret = 0;
-exit:
-	spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
-	mutex_unlock(&arche_pdata->platform_state_mutex);
-	put_device(&pdev->dev);
-	of_node_put(np);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(arche_platform_change_state);
-
 /* Requires arche_pdata->wake_lock is held by calling context */
 static void arche_platform_set_wake_detect_state(
 				struct arche_platform_drvdata *arche_pdata,
@@ -275,11 +161,6 @@ static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
 
 	spin_lock_irqsave(&arche_pdata->wake_lock, flags);
 
-	if (arche_pdata->wake_detect_state == WD_STATE_TIMESYNC) {
-		gb_timesync_irq(arche_pdata->timesync_svc_pdata);
-		goto exit;
-	}
-
 	if (gpio_get_value(arche_pdata->wake_detect_gpio)) {
 		/* wake/detect rising */
 
@@ -323,7 +204,6 @@ static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
 		}
 	}
 
-exit:
 	spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
 
 	return IRQ_HANDLED;
@@ -436,17 +316,7 @@ static ssize_t state_store(struct device *dev,
 	struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
 	int ret = 0;
 
-retry:
 	mutex_lock(&arche_pdata->platform_state_mutex);
-	if (arche_pdata->state == ARCHE_PLATFORM_STATE_TIME_SYNC) {
-		mutex_unlock(&arche_pdata->platform_state_mutex);
-		ret = wait_event_interruptible(
-			arche_pdata->wq,
-			arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC);
-		if (ret)
-			return ret;
-		goto retry;
-	}
 
 	if (sysfs_streq(buf, "off")) {
 		if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
@@ -517,8 +387,6 @@ static ssize_t state_show(struct device *dev,
 		return sprintf(buf, "standby\n");
 	case ARCHE_PLATFORM_STATE_FW_FLASHING:
 		return sprintf(buf, "fw_flashing\n");
-	case ARCHE_PLATFORM_STATE_TIME_SYNC:
-		return sprintf(buf, "time_sync\n");
 	default:
 		return sprintf(buf, "unknown state\n");
 	}
@@ -665,7 +533,6 @@ static int arche_platform_probe(struct platform_device *pdev)
 
 	spin_lock_init(&arche_pdata->wake_lock);
 	mutex_init(&arche_pdata->platform_state_mutex);
-	init_waitqueue_head(&arche_pdata->wq);
 	arche_pdata->wake_detect_irq =
 		gpio_to_irq(arche_pdata->wake_detect_gpio);
 
@@ -701,9 +568,6 @@ static int arche_platform_probe(struct platform_device *pdev)
 		goto err_device_remove;
 	}
 
-	/* Register callback pointer */
-	arche_platform_change_state_cb = arche_platform_change_state;
-
 	/* Explicitly power off if requested */
 	if (!of_property_read_bool(pdev->dev.of_node, "arche,init-off")) {
 		mutex_lock(&arche_pdata->platform_state_mutex);
@@ -751,7 +615,7 @@ static int arche_platform_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static int arche_platform_suspend(struct device *dev)
+static __maybe_unused int arche_platform_suspend(struct device *dev)
 {
 	/*
 	 * If timing profile premits, we may shutdown bridge
@@ -765,7 +629,7 @@ static int arche_platform_suspend(struct device *dev)
 	return 0;
 }
 
-static int arche_platform_resume(struct device *dev)
+static __maybe_unused int arche_platform_resume(struct device *dev)
 {
 	/*
 	 * Atleast for ES2 we have to meet the delay requirement between
diff --git a/drivers/staging/greybus/arche_platform.h b/drivers/staging/greybus/arche_platform.h
index c0591df..bcffc69 100644
--- a/drivers/staging/greybus/arche_platform.h
+++ b/drivers/staging/greybus/arche_platform.h
@@ -15,14 +15,8 @@ enum arche_platform_state {
 	ARCHE_PLATFORM_STATE_ACTIVE,
 	ARCHE_PLATFORM_STATE_STANDBY,
 	ARCHE_PLATFORM_STATE_FW_FLASHING,
-	ARCHE_PLATFORM_STATE_TIME_SYNC,
 };
 
-int arche_platform_change_state(enum arche_platform_state state,
-				struct gb_timesync_svc *pdata);
-
-extern int (*arche_platform_change_state_cb)(enum arche_platform_state state,
-					     struct gb_timesync_svc *pdata);
 int __init arche_apb_init(void);
 void __exit arche_apb_exit(void);
 
@@ -31,7 +25,5 @@ int apb_ctrl_coldboot(struct device *dev);
 int apb_ctrl_fw_flashing(struct device *dev);
 int apb_ctrl_standby_boot(struct device *dev);
 void apb_ctrl_poweroff(struct device *dev);
-void apb_bootret_assert(struct device *dev);
-void apb_bootret_deassert(struct device *dev);
 
 #endif	/* __ARCHE_PLATFORM_H */
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 1681362..861a249 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -1030,7 +1030,7 @@ static int gb_lights_light_config(struct gb_lights *glights, u8 id)
 	light->channels_count = conf.channel_count;
 	light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL);
 
-	light->channels = kzalloc(light->channels_count *
+	light->channels = kcalloc(light->channels_count,
 				  sizeof(struct gb_channel), GFP_KERNEL);
 	if (!light->channels)
 		return -ENOMEM;
@@ -1167,7 +1167,7 @@ static int gb_lights_create_all(struct gb_lights *glights)
 	if (ret < 0)
 		goto out;
 
-	glights->lights = kzalloc(glights->lights_count *
+	glights->lights = kcalloc(glights->lights_count,
 				  sizeof(struct gb_light), GFP_KERNEL);
 	if (!glights->lights) {
 		ret = -ENOMEM;
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index e85c988..20cac20 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -944,7 +944,7 @@ static int gb_power_supplies_setup(struct gb_power_supplies *supplies)
 	if (ret < 0)
 		goto out;
 
-	supplies->supply = kzalloc(supplies->supplies_count *
+	supplies->supply = kcalloc(supplies->supplies_count,
 				     sizeof(struct gb_power_supply),
 				     GFP_KERNEL);
 
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index dc6ecd8..ff10d1f 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -231,16 +231,12 @@ static int ad7152_write_raw_samp_freq(struct device *dev, int val)
 	if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
 		i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
 
-	mutex_lock(&chip->state_lock);
 	ret = i2c_smbus_write_byte_data(chip->client,
 					AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
-	if (ret < 0) {
-		mutex_unlock(&chip->state_lock);
+	if (ret < 0)
 		return ret;
-	}
 
 	chip->filter_rate_setup = i;
-	mutex_unlock(&chip->state_lock);
 
 	return ret;
 }
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index af108e9..995acdd 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -292,7 +292,7 @@ ssize_t ad9834_show_out0_wavetype_available(struct device *dev,
 	return sprintf(buf, "%s\n", str);
 }
 
-static IIO_DEVICE_ATTR(out_altvoltage0_out0_wavetype_available, S_IRUGO,
+static IIO_DEVICE_ATTR(out_altvoltage0_out0_wavetype_available, 0444,
 		       ad9834_show_out0_wavetype_available, NULL, 0);
 
 static
@@ -312,27 +312,27 @@ ssize_t ad9834_show_out1_wavetype_available(struct device *dev,
 	return sprintf(buf, "%s\n", str);
 }
 
-static IIO_DEVICE_ATTR(out_altvoltage0_out1_wavetype_available, S_IRUGO,
+static IIO_DEVICE_ATTR(out_altvoltage0_out1_wavetype_available, 0444,
 		       ad9834_show_out1_wavetype_available, NULL, 0);
 
 /**
  * see dds.h for further information
  */
 
-static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ0);
-static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ1);
-static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_FSEL);
+static IIO_DEV_ATTR_FREQ(0, 0, 0200, NULL, ad9834_write, AD9834_REG_FREQ0);
+static IIO_DEV_ATTR_FREQ(0, 1, 0200, NULL, ad9834_write, AD9834_REG_FREQ1);
+static IIO_DEV_ATTR_FREQSYMBOL(0, 0200, NULL, ad9834_write, AD9834_FSEL);
 static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */
 
-static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE0);
-static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE1);
-static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_PSEL);
+static IIO_DEV_ATTR_PHASE(0, 0, 0200, NULL, ad9834_write, AD9834_REG_PHASE0);
+static IIO_DEV_ATTR_PHASE(0, 1, 0200, NULL, ad9834_write, AD9834_REG_PHASE1);
+static IIO_DEV_ATTR_PHASESYMBOL(0, 0200, NULL, ad9834_write, AD9834_PSEL);
 static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/
 
-static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL,
+static IIO_DEV_ATTR_PINCONTROL_EN(0, 0200, NULL,
 	ad9834_write, AD9834_PIN_SW);
-static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL, ad9834_write, AD9834_RESET);
-static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, S_IWUSR, NULL,
+static IIO_DEV_ATTR_OUT_ENABLE(0, 0200, NULL, ad9834_write, AD9834_RESET);
+static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, 0200, NULL,
 	ad9834_write, AD9834_OPBITEN);
 static IIO_DEV_ATTR_OUT_WAVETYPE(0, 0, ad9834_store_wavetype, 0);
 static IIO_DEV_ATTR_OUT_WAVETYPE(0, 1, ad9834_store_wavetype, 1);
diff --git a/drivers/staging/iio/frequency/dds.h b/drivers/staging/iio/frequency/dds.h
index fe53e732..d6ccd99 100644
--- a/drivers/staging/iio/frequency/dds.h
+++ b/drivers/staging/iio/frequency/dds.h
@@ -101,7 +101,7 @@
 
 #define IIO_DEV_ATTR_OUT_WAVETYPE(_channel, _output, _store, _addr)	\
 	IIO_DEVICE_ATTR(out_altvoltage##_channel##_out##_output##_wavetype,\
-			S_IWUSR, NULL, _store, _addr)
+			0200, NULL, _store, _addr)
 
 /**
  * /sys/bus/iio/devices/.../out_altvoltageX_outY_wavetype_available
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index 4fbf629..aacb0ae 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -3,16 +3,6 @@
 #
 menu "Light sensors"
 
-config SENSORS_ISL29028
-	tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor"
-	depends on I2C
-	select REGMAP_I2C
-	help
-	 Provides driver for the Intersil's ISL29028 device.
-	 This driver supports the sysfs interface to get the ALS, IR intensity,
-	 Proximity value via iio. The ISL29028 provides the concurrent sensing
-	 of ambient light and proximity.
-
 config TSL2x7x
 	tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
 	depends on I2C
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
index f8693e9..ab8dc3a 100644
--- a/drivers/staging/iio/light/Makefile
+++ b/drivers/staging/iio/light/Makefile
@@ -2,5 +2,4 @@
 # Makefile for industrial I/O Light sensors
 #
 
-obj-$(CONFIG_SENSORS_ISL29028)	+= isl29028.o
-obj-$(CONFIG_TSL2x7x)	+= tsl2x7x_core.o
+obj-$(CONFIG_TSL2x7x)	+= tsl2x7x.o
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x.c
similarity index 95%
rename from drivers/staging/iio/light/tsl2x7x_core.c
rename to drivers/staging/iio/light/tsl2x7x.c
index af3910b..1467199 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x.c
@@ -13,10 +13,6 @@
  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA        02110-1301, USA.
  */
 
 #include <linux/kernel.h>
@@ -919,7 +915,7 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
 		tsl2x7x_chip_on(indio_dev);
 }
 
-static ssize_t tsl2x7x_power_state_show(struct device *dev,
+static ssize_t power_state_show(struct device *dev,
 					struct device_attribute *attr,
 					char *buf)
 {
@@ -928,7 +924,7 @@ static ssize_t tsl2x7x_power_state_show(struct device *dev,
 	return snprintf(buf, PAGE_SIZE, "%d\n", chip->tsl2x7x_chip_status);
 }
 
-static ssize_t tsl2x7x_power_state_store(struct device *dev,
+static ssize_t power_state_store(struct device *dev,
 					 struct device_attribute *attr,
 					 const char *buf, size_t len)
 {
@@ -946,7 +942,7 @@ static ssize_t tsl2x7x_power_state_store(struct device *dev,
 	return len;
 }
 
-static ssize_t tsl2x7x_gain_available_show(struct device *dev,
+static ssize_t in_illuminance0_calibscale_available_show(struct device *dev,
 					   struct device_attribute *attr,
 					   char *buf)
 {
@@ -964,14 +960,14 @@ static ssize_t tsl2x7x_gain_available_show(struct device *dev,
 	return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 120");
 }
 
-static ssize_t tsl2x7x_prox_gain_available_show(struct device *dev,
+static ssize_t in_proximity0_calibscale_available_show(struct device *dev,
 						struct device_attribute *attr,
 						char *buf)
 {
 		return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
 }
 
-static ssize_t tsl2x7x_als_time_show(struct device *dev,
+static ssize_t in_illuminance0_integration_time_show(struct device *dev,
 				     struct device_attribute *attr,
 				     char *buf)
 {
@@ -986,7 +982,7 @@ static ssize_t tsl2x7x_als_time_show(struct device *dev,
 	return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
 }
 
-static ssize_t tsl2x7x_als_time_store(struct device *dev,
+static ssize_t in_illuminance0_integration_time_store(struct device *dev,
 				      struct device_attribute *attr,
 				      const char *buf, size_t len)
 {
@@ -1014,7 +1010,7 @@ static ssize_t tsl2x7x_als_time_store(struct device *dev,
 static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
 		".00272 - .696");
 
-static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
+static ssize_t in_illuminance0_target_input_show(struct device *dev,
 					   struct device_attribute *attr,
 					   char *buf)
 {
@@ -1024,7 +1020,7 @@ static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
 			chip->tsl2x7x_settings.als_cal_target);
 }
 
-static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
+static ssize_t in_illuminance0_target_input_store(struct device *dev,
 					    struct device_attribute *attr,
 					    const char *buf, size_t len)
 {
@@ -1044,7 +1040,7 @@ static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
 }
 
 /* persistence settings */
-static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
+static ssize_t in_intensity0_thresh_period_show(struct device *dev,
 					    struct device_attribute *attr,
 					    char *buf)
 {
@@ -1061,7 +1057,7 @@ static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
 	return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
 }
 
-static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
+static ssize_t in_intensity0_thresh_period_store(struct device *dev,
 					     struct device_attribute *attr,
 					     const char *buf, size_t len)
 {
@@ -1092,7 +1088,7 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
 	return IIO_VAL_INT_PLUS_MICRO;
 }
 
-static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
+static ssize_t in_proximity0_thresh_period_show(struct device *dev,
 					     struct device_attribute *attr,
 					     char *buf)
 {
@@ -1109,7 +1105,7 @@ static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
 	return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
 }
 
-static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
+static ssize_t in_proximity0_thresh_period_store(struct device *dev,
 					      struct device_attribute *attr,
 					      const char *buf, size_t len)
 {
@@ -1140,7 +1136,7 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
 	return IIO_VAL_INT_PLUS_MICRO;
 }
 
-static ssize_t tsl2x7x_do_calibrate(struct device *dev,
+static ssize_t in_illuminance0_calibrate_store(struct device *dev,
 				    struct device_attribute *attr,
 				    const char *buf, size_t len)
 {
@@ -1158,7 +1154,7 @@ static ssize_t tsl2x7x_do_calibrate(struct device *dev,
 	return len;
 }
 
-static ssize_t tsl2x7x_luxtable_show(struct device *dev,
+static ssize_t in_illuminance0_lux_table_show(struct device *dev,
 				     struct device_attribute *attr,
 				     char *buf)
 {
@@ -1186,7 +1182,7 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
 	return offset;
 }
 
-static ssize_t tsl2x7x_luxtable_store(struct device *dev,
+static ssize_t in_illuminance0_lux_table_store(struct device *dev,
 				      struct device_attribute *attr,
 				      const char *buf, size_t len)
 {
@@ -1226,7 +1222,7 @@ static ssize_t tsl2x7x_luxtable_store(struct device *dev,
 	return len;
 }
 
-static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev,
+static ssize_t in_proximity0_calibrate_store(struct device *dev,
 					 struct device_attribute *attr,
 					 const char *buf, size_t len)
 {
@@ -1498,35 +1494,25 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
 	return 0;
 }
 
-static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
-		tsl2x7x_power_state_show, tsl2x7x_power_state_store);
+static DEVICE_ATTR_RW(power_state);
 
-static DEVICE_ATTR(in_proximity0_calibscale_available, S_IRUGO,
-		tsl2x7x_prox_gain_available_show, NULL);
+static DEVICE_ATTR_RO(in_proximity0_calibscale_available);
 
-static DEVICE_ATTR(in_illuminance0_calibscale_available, S_IRUGO,
-		tsl2x7x_gain_available_show, NULL);
+static DEVICE_ATTR_RO(in_illuminance0_calibscale_available);
 
-static DEVICE_ATTR(in_illuminance0_integration_time, S_IRUGO | S_IWUSR,
-		tsl2x7x_als_time_show, tsl2x7x_als_time_store);
+static DEVICE_ATTR_RW(in_illuminance0_integration_time);
 
-static DEVICE_ATTR(in_illuminance0_target_input, S_IRUGO | S_IWUSR,
-		tsl2x7x_als_cal_target_show, tsl2x7x_als_cal_target_store);
+static DEVICE_ATTR_RW(in_illuminance0_target_input);
 
-static DEVICE_ATTR(in_illuminance0_calibrate, S_IWUSR, NULL,
-		tsl2x7x_do_calibrate);
+static DEVICE_ATTR_WO(in_illuminance0_calibrate);
 
-static DEVICE_ATTR(in_proximity0_calibrate, S_IWUSR, NULL,
-		tsl2x7x_do_prox_calibrate);
+static DEVICE_ATTR_WO(in_proximity0_calibrate);
 
-static DEVICE_ATTR(in_illuminance0_lux_table, S_IRUGO | S_IWUSR,
-		tsl2x7x_luxtable_show, tsl2x7x_luxtable_store);
+static DEVICE_ATTR_RW(in_illuminance0_lux_table);
 
-static DEVICE_ATTR(in_intensity0_thresh_period, S_IRUGO | S_IWUSR,
-		tsl2x7x_als_persistence_show, tsl2x7x_als_persistence_store);
+static DEVICE_ATTR_RW(in_intensity0_thresh_period);
 
-static DEVICE_ATTR(in_proximity0_thresh_period, S_IRUGO | S_IWUSR,
-		tsl2x7x_prox_persistence_show, tsl2x7x_prox_persistence_store);
+static DEVICE_ATTR_RW(in_proximity0_thresh_period);
 
 /* Use the default register values to identify the Taos device */
 static int tsl2x7x_device_id(unsigned char *id, int target)
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index b71fbd3..ce26abdea 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -107,9 +107,8 @@ static int ade7753_spi_write_reg_8(struct device *dev,
 	return ret;
 }
 
-static int ade7753_spi_write_reg_16(struct device *dev,
-		u8 reg_address,
-		u16 value)
+static int ade7753_spi_write_reg_16(struct device *dev, u8 reg_address,
+				    u16 value)
 {
 	int ret;
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -126,8 +125,8 @@ static int ade7753_spi_write_reg_16(struct device *dev,
 }
 
 static int ade7753_spi_read_reg_8(struct device *dev,
-		u8 reg_address,
-		u8 *val)
+				  u8 reg_address,
+				  u8 *val)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct ade7753_state *st = iio_priv(indio_dev);
@@ -136,7 +135,7 @@ static int ade7753_spi_read_reg_8(struct device *dev,
 	ret = spi_w8r8(st->us, ADE7753_READ_REG(reg_address));
 	if (ret < 0) {
 		dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
-				reg_address);
+			reg_address);
 		return ret;
 	}
 	*val = ret;
@@ -145,8 +144,8 @@ static int ade7753_spi_read_reg_8(struct device *dev,
 }
 
 static int ade7753_spi_read_reg_16(struct device *dev,
-		u8 reg_address,
-		u16 *val)
+				   u8 reg_address,
+				   u16 *val)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct ade7753_state *st = iio_priv(indio_dev);
@@ -165,8 +164,8 @@ static int ade7753_spi_read_reg_16(struct device *dev,
 }
 
 static int ade7753_spi_read_reg_24(struct device *dev,
-		u8 reg_address,
-		u32 *val)
+				   u8 reg_address,
+				   u32 *val)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct ade7753_state *st = iio_priv(indio_dev);
@@ -189,7 +188,7 @@ static int ade7753_spi_read_reg_24(struct device *dev,
 	ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
 	if (ret) {
 		dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
-				reg_address);
+			reg_address);
 		goto error_ret;
 	}
 	*val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
@@ -200,8 +199,8 @@ static int ade7753_spi_read_reg_24(struct device *dev,
 }
 
 static ssize_t ade7753_read_8bit(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
+				 struct device_attribute *attr,
+				 char *buf)
 {
 	int ret;
 	u8 val;
@@ -215,8 +214,8 @@ static ssize_t ade7753_read_8bit(struct device *dev,
 }
 
 static ssize_t ade7753_read_16bit(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
+				  struct device_attribute *attr,
+				  char *buf)
 {
 	int ret;
 	u16 val;
@@ -230,8 +229,8 @@ static ssize_t ade7753_read_16bit(struct device *dev,
 }
 
 static ssize_t ade7753_read_24bit(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
+				  struct device_attribute *attr,
+				  char *buf)
 {
 	int ret;
 	u32 val;
@@ -245,9 +244,9 @@ static ssize_t ade7753_read_24bit(struct device *dev,
 }
 
 static ssize_t ade7753_write_8bit(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf,
-		size_t len)
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t len)
 {
 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 	int ret;
@@ -263,9 +262,9 @@ static ssize_t ade7753_write_8bit(struct device *dev,
 }
 
 static ssize_t ade7753_write_16bit(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf,
-		size_t len)
+				   struct device_attribute *attr,
+				   const char *buf,
+				   size_t len)
 {
 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
 	int ret;
@@ -298,92 +297,92 @@ static IIO_DEV_ATTR_AENERGY(ade7753_read_24bit, ADE7753_AENERGY);
 static IIO_DEV_ATTR_LAENERGY(ade7753_read_24bit, ADE7753_LAENERGY);
 static IIO_DEV_ATTR_VAENERGY(ade7753_read_24bit, ADE7753_VAENERGY);
 static IIO_DEV_ATTR_LVAENERGY(ade7753_read_24bit, ADE7753_LVAENERGY);
-static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CFDEN(0644,
 		ade7753_read_16bit,
 		ade7753_write_16bit,
 		ADE7753_CFDEN);
-static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CFNUM(0644,
 		ade7753_read_8bit,
 		ade7753_write_8bit,
 		ADE7753_CFNUM);
 static IIO_DEV_ATTR_CHKSUM(ade7753_read_8bit, ADE7753_CHKSUM);
-static IIO_DEV_ATTR_PHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_PHCAL(0644,
 		ade7753_read_16bit,
 		ade7753_write_16bit,
 		ADE7753_PHCAL);
-static IIO_DEV_ATTR_APOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_APOS(0644,
 		ade7753_read_16bit,
 		ade7753_write_16bit,
 		ADE7753_APOS);
-static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_SAGCYC(0644,
 		ade7753_read_8bit,
 		ade7753_write_8bit,
 		ADE7753_SAGCYC);
-static IIO_DEV_ATTR_SAGLVL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_SAGLVL(0644,
 		ade7753_read_8bit,
 		ade7753_write_8bit,
 		ADE7753_SAGLVL);
-static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_LINECYC(0644,
 		ade7753_read_8bit,
 		ade7753_write_8bit,
 		ADE7753_LINECYC);
-static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_WDIV(0644,
 		ade7753_read_8bit,
 		ade7753_write_8bit,
 		ADE7753_WDIV);
-static IIO_DEV_ATTR_IRMS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_IRMS(0644,
 		ade7753_read_24bit,
 		NULL,
 		ADE7753_IRMS);
-static IIO_DEV_ATTR_VRMS(S_IRUGO,
+static IIO_DEV_ATTR_VRMS(0444,
 		ade7753_read_24bit,
 		NULL,
 		ADE7753_VRMS);
-static IIO_DEV_ATTR_IRMSOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_IRMSOS(0644,
 		ade7753_read_16bit,
 		ade7753_write_16bit,
 		ADE7753_IRMSOS);
-static IIO_DEV_ATTR_VRMSOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_VRMSOS(0644,
 		ade7753_read_16bit,
 		ade7753_write_16bit,
 		ADE7753_VRMSOS);
-static IIO_DEV_ATTR_WGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_WGAIN(0644,
 		ade7753_read_16bit,
 		ade7753_write_16bit,
 		ADE7753_WGAIN);
-static IIO_DEV_ATTR_VAGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_VAGAIN(0644,
 		ade7753_read_16bit,
 		ade7753_write_16bit,
 		ADE7753_VAGAIN);
-static IIO_DEV_ATTR_PGA_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_PGA_GAIN(0644,
 		ade7753_read_16bit,
 		ade7753_write_16bit,
 		ADE7753_GAIN);
-static IIO_DEV_ATTR_IPKLVL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_IPKLVL(0644,
 		ade7753_read_8bit,
 		ade7753_write_8bit,
 		ADE7753_IPKLVL);
-static IIO_DEV_ATTR_VPKLVL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_VPKLVL(0644,
 		ade7753_read_8bit,
 		ade7753_write_8bit,
 		ADE7753_VPKLVL);
-static IIO_DEV_ATTR_IPEAK(S_IRUGO,
+static IIO_DEV_ATTR_IPEAK(0444,
 		ade7753_read_24bit,
 		NULL,
 		ADE7753_IPEAK);
-static IIO_DEV_ATTR_VPEAK(S_IRUGO,
+static IIO_DEV_ATTR_VPEAK(0444,
 		ade7753_read_24bit,
 		NULL,
 		ADE7753_VPEAK);
-static IIO_DEV_ATTR_VPERIOD(S_IRUGO,
+static IIO_DEV_ATTR_VPERIOD(0444,
 		ade7753_read_16bit,
 		NULL,
 		ADE7753_PERIOD);
-static IIO_DEV_ATTR_CH_OFF(1, S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CH_OFF(1, 0644,
 		ade7753_read_8bit,
 		ade7753_write_8bit,
 		ADE7753_CH1OS);
-static IIO_DEV_ATTR_CH_OFF(2, S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CH_OFF(2, 0644,
 		ade7753_read_8bit,
 		ade7753_write_8bit,
 		ADE7753_CH2OS);
@@ -450,8 +449,8 @@ static int ade7753_initial_setup(struct iio_dev *indio_dev)
 }
 
 static ssize_t ade7753_read_frequency(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
+				      struct device_attribute *attr,
+				      char *buf)
 {
 	int ret;
 	u16 t;
@@ -468,9 +467,9 @@ static ssize_t ade7753_read_frequency(struct device *dev,
 }
 
 static ssize_t ade7753_write_frequency(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf,
-		size_t len)
+				       struct device_attribute *attr,
+				       const char *buf,
+				       size_t len)
 {
 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 	struct ade7753_state *st = iio_priv(indio_dev);
@@ -514,7 +513,7 @@ static IIO_DEV_ATTR_TEMP_RAW(ade7753_read_8bit);
 static IIO_CONST_ATTR(in_temp_offset, "-25 C");
 static IIO_CONST_ATTR(in_temp_scale, "0.67 C");
 
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_SAMP_FREQ(0644,
 		ade7753_read_frequency,
 		ade7753_write_frequency);
 
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 32dc503..be0df3f 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -316,111 +316,111 @@ static IIO_DEV_ATTR_AENERGY(ade7754_read_24bit, ADE7754_AENERGY);
 static IIO_DEV_ATTR_LAENERGY(ade7754_read_24bit, ADE7754_LAENERGY);
 static IIO_DEV_ATTR_VAENERGY(ade7754_read_24bit, ADE7754_VAENERGY);
 static IIO_DEV_ATTR_LVAENERGY(ade7754_read_24bit, ADE7754_LVAENERGY);
-static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_VPEAK(0644,
 		ade7754_read_8bit,
 		ade7754_write_8bit,
 		ADE7754_VPEAK);
-static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_IPEAK(0644,
 		ade7754_read_8bit,
 		ade7754_write_8bit,
 		ADE7754_VPEAK);
-static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_APHCAL(0644,
 		ade7754_read_8bit,
 		ade7754_write_8bit,
 		ADE7754_APHCAL);
-static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BPHCAL(0644,
 		ade7754_read_8bit,
 		ade7754_write_8bit,
 		ADE7754_BPHCAL);
-static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CPHCAL(0644,
 		ade7754_read_8bit,
 		ade7754_write_8bit,
 		ADE7754_CPHCAL);
-static IIO_DEV_ATTR_AAPOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_AAPOS(0644,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_AAPOS);
-static IIO_DEV_ATTR_BAPOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BAPOS(0644,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_BAPOS);
-static IIO_DEV_ATTR_CAPOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CAPOS(0644,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_CAPOS);
-static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_WDIV(0644,
 		ade7754_read_8bit,
 		ade7754_write_8bit,
 		ADE7754_WDIV);
-static IIO_DEV_ATTR_VADIV(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_VADIV(0644,
 		ade7754_read_8bit,
 		ade7754_write_8bit,
 		ADE7754_VADIV);
-static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CFNUM(0644,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_CFNUM);
-static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CFDEN(0644,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_CFDEN);
-static IIO_DEV_ATTR_ACTIVE_POWER_A_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_ACTIVE_POWER_A_GAIN(0644,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_AAPGAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_B_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_ACTIVE_POWER_B_GAIN(0644,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_BAPGAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_C_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_ACTIVE_POWER_C_GAIN(0644,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_CAPGAIN);
-static IIO_DEV_ATTR_AIRMS(S_IRUGO,
+static IIO_DEV_ATTR_AIRMS(0444,
 		ade7754_read_24bit,
 		NULL,
 		ADE7754_AIRMS);
-static IIO_DEV_ATTR_BIRMS(S_IRUGO,
+static IIO_DEV_ATTR_BIRMS(0444,
 		ade7754_read_24bit,
 		NULL,
 		ADE7754_BIRMS);
-static IIO_DEV_ATTR_CIRMS(S_IRUGO,
+static IIO_DEV_ATTR_CIRMS(0444,
 		ade7754_read_24bit,
 		NULL,
 		ADE7754_CIRMS);
-static IIO_DEV_ATTR_AVRMS(S_IRUGO,
+static IIO_DEV_ATTR_AVRMS(0444,
 		ade7754_read_24bit,
 		NULL,
 		ADE7754_AVRMS);
-static IIO_DEV_ATTR_BVRMS(S_IRUGO,
+static IIO_DEV_ATTR_BVRMS(0444,
 		ade7754_read_24bit,
 		NULL,
 		ADE7754_BVRMS);
-static IIO_DEV_ATTR_CVRMS(S_IRUGO,
+static IIO_DEV_ATTR_CVRMS(0444,
 		ade7754_read_24bit,
 		NULL,
 		ADE7754_CVRMS);
-static IIO_DEV_ATTR_AIRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_AIRMSOS(0444,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_AIRMSOS);
-static IIO_DEV_ATTR_BIRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_BIRMSOS(0444,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_BIRMSOS);
-static IIO_DEV_ATTR_CIRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_CIRMSOS(0444,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_CIRMSOS);
-static IIO_DEV_ATTR_AVRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_AVRMSOS(0444,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_AVRMSOS);
-static IIO_DEV_ATTR_BVRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_BVRMSOS(0444,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_BVRMSOS);
-static IIO_DEV_ATTR_CVRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_CVRMSOS(0444,
 		ade7754_read_16bit,
 		ade7754_write_16bit,
 		ADE7754_CVRMSOS);
@@ -549,7 +549,7 @@ static IIO_DEV_ATTR_TEMP_RAW(ade7754_read_8bit);
 static IIO_CONST_ATTR(in_temp_offset, "129 C");
 static IIO_CONST_ATTR(in_temp_scale, "4 C");
 
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_SAMP_FREQ(0644,
 		ade7754_read_frequency,
 		ade7754_write_frequency);
 
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 99c89e6..40498af 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -301,103 +301,103 @@ static int ade7758_reset(struct device *dev)
 	return ret;
 }
 
-static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_VPEAK(0644,
 		ade7758_read_8bit,
 		ade7758_write_8bit,
 		ADE7758_VPEAK);
-static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_IPEAK(0644,
 		ade7758_read_8bit,
 		ade7758_write_8bit,
 		ADE7758_VPEAK);
-static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_APHCAL(0644,
 		ade7758_read_8bit,
 		ade7758_write_8bit,
 		ADE7758_APHCAL);
-static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BPHCAL(0644,
 		ade7758_read_8bit,
 		ade7758_write_8bit,
 		ADE7758_BPHCAL);
-static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CPHCAL(0644,
 		ade7758_read_8bit,
 		ade7758_write_8bit,
 		ADE7758_CPHCAL);
-static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_WDIV(0644,
 		ade7758_read_8bit,
 		ade7758_write_8bit,
 		ADE7758_WDIV);
-static IIO_DEV_ATTR_VADIV(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_VADIV(0644,
 		ade7758_read_8bit,
 		ade7758_write_8bit,
 		ADE7758_VADIV);
-static IIO_DEV_ATTR_AIRMS(S_IRUGO,
+static IIO_DEV_ATTR_AIRMS(0444,
 		ade7758_read_24bit,
 		NULL,
 		ADE7758_AIRMS);
-static IIO_DEV_ATTR_BIRMS(S_IRUGO,
+static IIO_DEV_ATTR_BIRMS(0444,
 		ade7758_read_24bit,
 		NULL,
 		ADE7758_BIRMS);
-static IIO_DEV_ATTR_CIRMS(S_IRUGO,
+static IIO_DEV_ATTR_CIRMS(0444,
 		ade7758_read_24bit,
 		NULL,
 		ADE7758_CIRMS);
-static IIO_DEV_ATTR_AVRMS(S_IRUGO,
+static IIO_DEV_ATTR_AVRMS(0444,
 		ade7758_read_24bit,
 		NULL,
 		ADE7758_AVRMS);
-static IIO_DEV_ATTR_BVRMS(S_IRUGO,
+static IIO_DEV_ATTR_BVRMS(0444,
 		ade7758_read_24bit,
 		NULL,
 		ADE7758_BVRMS);
-static IIO_DEV_ATTR_CVRMS(S_IRUGO,
+static IIO_DEV_ATTR_CVRMS(0444,
 		ade7758_read_24bit,
 		NULL,
 		ADE7758_CVRMS);
-static IIO_DEV_ATTR_AIRMSOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_AIRMSOS(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_AIRMSOS);
-static IIO_DEV_ATTR_BIRMSOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BIRMSOS(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_BIRMSOS);
-static IIO_DEV_ATTR_CIRMSOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CIRMSOS(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_CIRMSOS);
-static IIO_DEV_ATTR_AVRMSOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_AVRMSOS(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_AVRMSOS);
-static IIO_DEV_ATTR_BVRMSOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BVRMSOS(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_BVRMSOS);
-static IIO_DEV_ATTR_CVRMSOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CVRMSOS(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_CVRMSOS);
-static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_AIGAIN(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_AIGAIN);
-static IIO_DEV_ATTR_BIGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BIGAIN(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_BIGAIN);
-static IIO_DEV_ATTR_CIGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CIGAIN(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_CIGAIN);
-static IIO_DEV_ATTR_AVRMSGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_AVRMSGAIN(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_AVRMSGAIN);
-static IIO_DEV_ATTR_BVRMSGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BVRMSGAIN(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_BVRMSGAIN);
-static IIO_DEV_ATTR_CVRMSGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CVRMSGAIN(0644,
 		ade7758_read_16bit,
 		ade7758_write_16bit,
 		ADE7758_CVRMSGAIN);
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index c6cffc1..70612da 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -186,127 +186,127 @@ static int ade7854_reset(struct device *dev)
 	return st->write_reg_16(dev, ADE7854_CONFIG, val);
 }
 
-static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_AIGAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_AIGAIN);
-static IIO_DEV_ATTR_BIGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BIGAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_BIGAIN);
-static IIO_DEV_ATTR_CIGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CIGAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_CIGAIN);
-static IIO_DEV_ATTR_NIGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_NIGAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_NIGAIN);
-static IIO_DEV_ATTR_AVGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_AVGAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_AVGAIN);
-static IIO_DEV_ATTR_BVGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BVGAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_BVGAIN);
-static IIO_DEV_ATTR_CVGAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CVGAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_CVGAIN);
-static IIO_DEV_ATTR_APPARENT_POWER_A_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_APPARENT_POWER_A_GAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_AVAGAIN);
-static IIO_DEV_ATTR_APPARENT_POWER_B_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_APPARENT_POWER_B_GAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_BVAGAIN);
-static IIO_DEV_ATTR_APPARENT_POWER_C_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_APPARENT_POWER_C_GAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_CVAGAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_A_OFFSET(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_ACTIVE_POWER_A_OFFSET(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_AWATTOS);
-static IIO_DEV_ATTR_ACTIVE_POWER_B_OFFSET(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_ACTIVE_POWER_B_OFFSET(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_BWATTOS);
-static IIO_DEV_ATTR_ACTIVE_POWER_C_OFFSET(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_ACTIVE_POWER_C_OFFSET(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_CWATTOS);
-static IIO_DEV_ATTR_REACTIVE_POWER_A_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_REACTIVE_POWER_A_GAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_AVARGAIN);
-static IIO_DEV_ATTR_REACTIVE_POWER_B_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_REACTIVE_POWER_B_GAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_BVARGAIN);
-static IIO_DEV_ATTR_REACTIVE_POWER_C_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_REACTIVE_POWER_C_GAIN(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_CVARGAIN);
-static IIO_DEV_ATTR_REACTIVE_POWER_A_OFFSET(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_REACTIVE_POWER_A_OFFSET(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_AVAROS);
-static IIO_DEV_ATTR_REACTIVE_POWER_B_OFFSET(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_REACTIVE_POWER_B_OFFSET(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_BVAROS);
-static IIO_DEV_ATTR_REACTIVE_POWER_C_OFFSET(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_REACTIVE_POWER_C_OFFSET(0644,
 		ade7854_read_24bit,
 		ade7854_write_24bit,
 		ADE7854_CVAROS);
-static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_VPEAK(0644,
 		ade7854_read_32bit,
 		ade7854_write_32bit,
 		ADE7854_VPEAK);
-static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_IPEAK(0644,
 		ade7854_read_32bit,
 		ade7854_write_32bit,
 		ADE7854_VPEAK);
-static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_APHCAL(0644,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_APHCAL);
-static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_BPHCAL(0644,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_BPHCAL);
-static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CPHCAL(0644,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_CPHCAL);
-static IIO_DEV_ATTR_CF1DEN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CF1DEN(0644,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_CF1DEN);
-static IIO_DEV_ATTR_CF2DEN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CF2DEN(0644,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_CF2DEN);
-static IIO_DEV_ATTR_CF3DEN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CF3DEN(0644,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_CF3DEN);
-static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_LINECYC(0644,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_LINECYC);
-static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_SAGCYC(0644,
 		ade7854_read_8bit,
 		ade7854_write_8bit,
 		ADE7854_SAGCYC);
-static IIO_DEV_ATTR_CFCYC(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CFCYC(0644,
 		ade7854_read_8bit,
 		ade7854_write_8bit,
 		ADE7854_CFCYC);
-static IIO_DEV_ATTR_PEAKCYC(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_PEAKCYC(0644,
 		ade7854_read_8bit,
 		ade7854_write_8bit,
 		ADE7854_PEAKCYC);
@@ -318,55 +318,55 @@ static IIO_DEV_ATTR_ANGLE1(ade7854_read_24bit,
 		ADE7854_ANGLE1);
 static IIO_DEV_ATTR_ANGLE2(ade7854_read_24bit,
 		ADE7854_ANGLE2);
-static IIO_DEV_ATTR_AIRMS(S_IRUGO,
+static IIO_DEV_ATTR_AIRMS(0444,
 		ade7854_read_24bit,
 		NULL,
 		ADE7854_AIRMS);
-static IIO_DEV_ATTR_BIRMS(S_IRUGO,
+static IIO_DEV_ATTR_BIRMS(0444,
 		ade7854_read_24bit,
 		NULL,
 		ADE7854_BIRMS);
-static IIO_DEV_ATTR_CIRMS(S_IRUGO,
+static IIO_DEV_ATTR_CIRMS(0444,
 		ade7854_read_24bit,
 		NULL,
 		ADE7854_CIRMS);
-static IIO_DEV_ATTR_NIRMS(S_IRUGO,
+static IIO_DEV_ATTR_NIRMS(0444,
 		ade7854_read_24bit,
 		NULL,
 		ADE7854_NIRMS);
-static IIO_DEV_ATTR_AVRMS(S_IRUGO,
+static IIO_DEV_ATTR_AVRMS(0444,
 		ade7854_read_24bit,
 		NULL,
 		ADE7854_AVRMS);
-static IIO_DEV_ATTR_BVRMS(S_IRUGO,
+static IIO_DEV_ATTR_BVRMS(0444,
 		ade7854_read_24bit,
 		NULL,
 		ADE7854_BVRMS);
-static IIO_DEV_ATTR_CVRMS(S_IRUGO,
+static IIO_DEV_ATTR_CVRMS(0444,
 		ade7854_read_24bit,
 		NULL,
 		ADE7854_CVRMS);
-static IIO_DEV_ATTR_AIRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_AIRMSOS(0444,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_AIRMSOS);
-static IIO_DEV_ATTR_BIRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_BIRMSOS(0444,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_BIRMSOS);
-static IIO_DEV_ATTR_CIRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_CIRMSOS(0444,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_CIRMSOS);
-static IIO_DEV_ATTR_AVRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_AVRMSOS(0444,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_AVRMSOS);
-static IIO_DEV_ATTR_BVRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_BVRMSOS(0444,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_BVRMSOS);
-static IIO_DEV_ATTR_CVRMSOS(S_IRUGO,
+static IIO_DEV_ATTR_CVRMSOS(0444,
 		ade7854_read_16bit,
 		ade7854_write_16bit,
 		ADE7854_CVRMSOS);
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
index b2d25ef..ae03f74 100644
--- a/drivers/staging/ks7010/eap_packet.h
+++ b/drivers/staging/ks7010/eap_packet.h
@@ -18,7 +18,7 @@ struct ether_hdr {
 	unsigned char h_source_snap;
 	unsigned char h_command;
 	unsigned char h_vendor_id[3];
-	unsigned short h_proto;	/* packet type ID field */
+	__be16 h_proto;	/* packet type ID field */
 #define ETHER_PROTOCOL_TYPE_EAP		0x888e
 #define ETHER_PROTOCOL_TYPE_IP		0x0800
 #define ETHER_PROTOCOL_TYPE_ARP		0x0806
@@ -91,7 +91,7 @@ struct ieee802_1x_eapol_key {
 
 struct wpa_eapol_key {
 	unsigned char type;
-	unsigned short key_info;
+	__be16 key_info;
 	unsigned short key_length;
 	unsigned char replay_counter[WPA_REPLAY_COUNTER_LEN];
 	unsigned char key_nonce[WPA_NONCE_LEN];
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index c325f48..9b28ee1 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -269,7 +269,8 @@ static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
 	hdr = (struct hostif_hdr *)buffer;
 
 	DPRINTK(4, "size=%d\n", hdr->size);
-	if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
+	if (le16_to_cpu(hdr->event) < HIF_DATA_REQ ||
+	    le16_to_cpu(hdr->event) > HIF_REQ_MAX) {
 		DPRINTK(1, "unknown event=%04X\n", hdr->event);
 		return 0;
 	}
@@ -327,13 +328,14 @@ int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
 
 	hdr = (struct hostif_hdr *)p;
 
-	if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
+	if (le16_to_cpu(hdr->event) < HIF_DATA_REQ ||
+	    le16_to_cpu(hdr->event) > HIF_REQ_MAX) {
 		DPRINTK(1, "unknown event=%04X\n", hdr->event);
 		return 0;
 	}
 
 	/* add event to hostt buffer */
-	priv->hostt.buff[priv->hostt.qtail] = hdr->event;
+	priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event);
 	priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
 
 	DPRINTK(4, "event=%04X\n", hdr->event);
@@ -403,7 +405,7 @@ static void ks_wlan_hw_rx(struct ks_wlan_private *priv, uint16_t size)
 
 	hdr = (struct hostif_hdr *)&rx_buffer->data[0];
 	rx_buffer->size = le16_to_cpu(hdr->size) + sizeof(hdr->size);
-	event = hdr->event;
+	event = le16_to_cpu(hdr->event);
 	inc_rxqtail(priv);
 
 	ret = ks7010_sdio_writeb(priv, READ_STATUS, REG_STATUS_IDLE);
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 49e9542..db01a48 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -147,7 +147,7 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
 	/* noise */
 	ap->noise = ap_info->noise;
 	/* capability */
-	ap->capability = ap_info->capability;
+	ap->capability = le16_to_cpu(ap_info->capability);
 	/* rsn */
 	if ((ap_info->rsn_mode & RSN_MODE_WPA2) &&
 	    (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
@@ -233,12 +233,12 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
 	/* noise */
 	ap->noise = ap_info->noise;
 	/* capability */
-	ap->capability = ap_info->capability;
+	ap->capability = le16_to_cpu(ap_info->capability);
 	/* channel */
 	ap->channel = ap_info->ch_info;
 
 	bp = ap_info->body;
-	bsize = ap_info->body_size;
+	bsize = le16_to_cpu(ap_info->body_size);
 	offset = 0;
 
 	while (bsize > offset) {
@@ -567,9 +567,9 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
 		break;
 	case LOCAL_GAIN:
 		memcpy(&priv->gain, priv->rxp, sizeof(priv->gain));
-		DPRINTK(3, "TxMode=%d, RxMode=%d, TxGain=%d, RxGain=%d\n",
-			priv->gain.TxMode, priv->gain.RxMode, priv->gain.TxGain,
-			priv->gain.RxGain);
+		DPRINTK(3, "tx_mode=%d, rx_mode=%d, tx_gain=%d, rx_gain=%d\n",
+			priv->gain.tx_mode, priv->gain.rx_mode,
+			priv->gain.tx_gain, priv->gain.rx_gain);
 		break;
 	case LOCAL_EEPROM_SUM:
 		memcpy(&priv->eeprom_sum, priv->rxp, sizeof(priv->eeprom_sum));
@@ -948,18 +948,18 @@ void hostif_associate_indication(struct ks_wlan_private *priv)
 	wrqu.data.length += sizeof(associnfo_leader0) - 1;
 	pbuf += sizeof(associnfo_leader0) - 1;
 
-	for (i = 0; i < assoc_req->reqIEs_size; i++)
+	for (i = 0; i < le16_to_cpu(assoc_req->req_ies_size); i++)
 		pbuf += sprintf(pbuf, "%02x", *(pb + i));
-	wrqu.data.length += (assoc_req->reqIEs_size) * 2;
+	wrqu.data.length += (le16_to_cpu(assoc_req->req_ies_size)) * 2;
 
 	memcpy(pbuf, associnfo_leader1, sizeof(associnfo_leader1) - 1);
 	wrqu.data.length += sizeof(associnfo_leader1) - 1;
 	pbuf += sizeof(associnfo_leader1) - 1;
 
-	pb += assoc_req->reqIEs_size;
-	for (i = 0; i < assoc_resp->respIEs_size; i++)
+	pb += le16_to_cpu(assoc_req->req_ies_size);
+	for (i = 0; i < le16_to_cpu(assoc_resp->resp_ies_size); i++)
 		pbuf += sprintf(pbuf, "%02x", *(pb + i));
-	wrqu.data.length += (assoc_resp->respIEs_size) * 2;
+	wrqu.data.length += (le16_to_cpu(assoc_resp->resp_ies_size)) * 2;
 
 	pbuf += sprintf(pbuf, ")");
 	wrqu.data.length += 1;
@@ -994,22 +994,22 @@ void hostif_phy_information_confirm(struct ks_wlan_private *priv)
 {
 	struct iw_statistics *wstats = &priv->wstats;
 	unsigned char rssi, signal, noise;
-	unsigned char LinkSpeed;
-	unsigned int TransmittedFrameCount, ReceivedFragmentCount;
-	unsigned int FailedCount, FCSErrorCount;
+	unsigned char link_speed;
+	unsigned int transmitted_frame_count, received_fragment_count;
+	unsigned int failed_count, fcs_error_count;
 
 	DPRINTK(3, "\n");
 	rssi = get_BYTE(priv);
 	signal = get_BYTE(priv);
 	noise = get_BYTE(priv);
-	LinkSpeed = get_BYTE(priv);
-	TransmittedFrameCount = get_DWORD(priv);
-	ReceivedFragmentCount = get_DWORD(priv);
-	FailedCount = get_DWORD(priv);
-	FCSErrorCount = get_DWORD(priv);
+	link_speed = get_BYTE(priv);
+	transmitted_frame_count = get_DWORD(priv);
+	received_fragment_count = get_DWORD(priv);
+	failed_count = get_DWORD(priv);
+	fcs_error_count = get_DWORD(priv);
 
 	DPRINTK(4, "phyinfo confirm rssi=%d signal=%d\n", rssi, signal);
-	priv->current_rate = (LinkSpeed & RATE_MASK);
+	priv->current_rate = (link_speed & RATE_MASK);
 	wstats->qual.qual = signal;
 	wstats->qual.level = 256 - rssi;
 	wstats->qual.noise = 0;	/* invalid noise value */
@@ -1017,14 +1017,13 @@ void hostif_phy_information_confirm(struct ks_wlan_private *priv)
 
 	DPRINTK(3, "\n    rssi=%u\n"
 		   "    signal=%u\n"
-		   "    LinkSpeed=%ux500Kbps\n"
-		   "    TransmittedFrameCount=%u\n"
-		   "    ReceivedFragmentCount=%u\n"
-		   "    FailedCount=%u\n"
-		   "    FCSErrorCount=%u\n",
-		rssi, signal, LinkSpeed, TransmittedFrameCount,
-		ReceivedFragmentCount, FailedCount, FCSErrorCount);
-
+		   "    link_speed=%ux500Kbps\n"
+		   "    transmitted_frame_count=%u\n"
+		   "    received_fragment_count=%u\n"
+		   "    failed_count=%u\n"
+		   "    fcs_error_count=%u\n",
+		rssi, signal, link_speed, transmitted_frame_count,
+		received_fragment_count, failed_count, fcs_error_count);
 	/* wake_up_interruptible_all(&priv->confirm_wait); */
 	complete(&priv->confirm_wait);
 }
@@ -1660,13 +1659,13 @@ void hostif_phy_information_request(struct ks_wlan_private *priv)
 
 static
 void hostif_power_mgmt_request(struct ks_wlan_private *priv,
-			       unsigned long mode, unsigned long wake_up,
-			       unsigned long receiveDTIMs)
+				unsigned long mode, unsigned long wake_up,
+				unsigned long receive_dtims)
 {
 	struct hostif_power_mgmt_request_t *pp;
 
-	DPRINTK(3, "mode=%lu wake_up=%lu receiveDTIMs=%lu\n", mode, wake_up,
-		receiveDTIMs);
+	DPRINTK(3, "mode=%lu wake_up=%lu receive_dtims=%lu\n", mode, wake_up,
+		receive_dtims);
 
 	pp = hostif_generic_request(sizeof(*pp), HIF_POWER_MGMT_REQ);
 	if (!pp)
@@ -1674,7 +1673,7 @@ void hostif_power_mgmt_request(struct ks_wlan_private *priv,
 
 	pp->mode = cpu_to_le32((uint32_t)mode);
 	pp->wake_up = cpu_to_le32((uint32_t)wake_up);
-	pp->receiveDTIMs = cpu_to_le32((uint32_t)receiveDTIMs);
+	pp->receive_dtims = cpu_to_le32((uint32_t)receive_dtims);
 
 	/* send to device request */
 	ps_confirm_wait_inc(priv);
@@ -1822,7 +1821,7 @@ void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
 static
 void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
 {
-	u32 val;
+	__le32 val;
 
 	switch (type) {
 	case SME_WEP_INDEX_REQUEST:
@@ -1871,13 +1870,13 @@ void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
 }
 
 struct wpa_suite_t {
-	unsigned short size;
+	__le16 size;
 	unsigned char suite[4][CIPHER_ID_LEN];
 } __packed;
 
 struct rsn_mode_t {
-	u32 rsn_mode;
-	u16 rsn_capability;
+	__le32 rsn_mode;
+	__le16 rsn_capability;
 } __packed;
 
 static
@@ -1885,7 +1884,7 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
 {
 	struct wpa_suite_t wpa_suite;
 	struct rsn_mode_t rsn_mode;
-	u32 val;
+	__le32 val;
 
 	memset(&wpa_suite, 0, sizeof(wpa_suite));
 
@@ -1937,7 +1936,8 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
 
 		hostif_mib_set_request(priv, DOT11_RSN_CONFIG_UNICAST_CIPHER,
 				       sizeof(wpa_suite.size) +
-				       CIPHER_ID_LEN * wpa_suite.size,
+				       CIPHER_ID_LEN *
+				       le16_to_cpu(wpa_suite.size),
 				       MIB_VALUE_TYPE_OSTRING, &wpa_suite);
 		break;
 	case SME_RSN_MCAST_REQUEST:
@@ -2029,7 +2029,8 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
 
 		hostif_mib_set_request(priv, DOT11_RSN_CONFIG_AUTH_SUITE,
 				       sizeof(wpa_suite.size) +
-				       KEY_MGMT_ID_LEN * wpa_suite.size,
+				       KEY_MGMT_ID_LEN *
+				       le16_to_cpu(wpa_suite.size),
 				       MIB_VALUE_TYPE_OSTRING, &wpa_suite);
 		break;
 	case SME_RSN_ENABLED_REQUEST:
@@ -2166,7 +2167,7 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
 	int mc_count;
 	struct netdev_hw_addr *ha;
 	char set_address[NIC_MAX_MCAST_LIST * ETH_ALEN];
-	unsigned long filter_type;
+	__le32 filter_type;
 	int i = 0;
 
 	DPRINTK(3, "\n");
@@ -2218,44 +2219,44 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
 static
 void hostif_sme_power_mgmt_set(struct ks_wlan_private *priv)
 {
-	unsigned long mode, wake_up, receiveDTIMs;
+	unsigned long mode, wake_up, receive_dtims;
 
 	DPRINTK(3, "\n");
 	switch (priv->reg.power_mgmt) {
 	case POWER_MGMT_ACTIVE:
 		mode = POWER_ACTIVE;
 		wake_up = 0;
-		receiveDTIMs = 0;
+		receive_dtims = 0;
 		break;
 	case POWER_MGMT_SAVE1:
 		if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
 			mode = POWER_SAVE;
 			wake_up = 0;
-			receiveDTIMs = 0;
+			receive_dtims = 0;
 		} else {
 			mode = POWER_ACTIVE;
 			wake_up = 0;
-			receiveDTIMs = 0;
+			receive_dtims = 0;
 		}
 		break;
 	case POWER_MGMT_SAVE2:
 		if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
 			mode = POWER_SAVE;
 			wake_up = 0;
-			receiveDTIMs = 1;
+			receive_dtims = 1;
 		} else {
 			mode = POWER_ACTIVE;
 			wake_up = 0;
-			receiveDTIMs = 0;
+			receive_dtims = 0;
 		}
 		break;
 	default:
 		mode = POWER_ACTIVE;
 		wake_up = 0;
-		receiveDTIMs = 0;
+		receive_dtims = 0;
 		break;
 	}
-	hostif_power_mgmt_request(priv, mode, wake_up, receiveDTIMs);
+	hostif_power_mgmt_request(priv, mode, wake_up, receive_dtims);
 }
 
 static
@@ -2277,7 +2278,7 @@ void hostif_sme_sleep_set(struct ks_wlan_private *priv)
 static
 void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
 {
-	u32 val;
+	__le32 val;
 
 	switch (type) {
 	case SME_SET_FLAG:
@@ -2336,7 +2337,7 @@ static
 void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
 {
 	struct pmk_cache_t {
-		u16 size;
+		__le16 size;
 		struct {
 			u8 bssid[ETH_ALEN];
 			u8 pmkid[IW_PMKID_LEN];
@@ -2367,7 +2368,7 @@ void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
 static
 void hostif_sme_execute(struct ks_wlan_private *priv, int event)
 {
-	u32 val;
+	__le32 val;
 
 	DPRINTK(3, "event=%d\n", event);
 	switch (event) {
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
index d758076..5bae8d4 100644
--- a/drivers/staging/ks7010/ks_hostif.h
+++ b/drivers/staging/ks7010/ks_hostif.h
@@ -62,27 +62,27 @@
  */
 
 struct hostif_hdr {
-	u16 size;
-	u16 event;
+	__le16 size;
+	__le16 event;
 } __packed;
 
 struct hostif_data_request_t {
 	struct hostif_hdr header;
-	u16 auth_type;
+	__le16 auth_type;
 #define TYPE_DATA 0x0000
 #define TYPE_AUTH 0x0001
-	u16 reserved;
+	__le16 reserved;
 	u8 data[0];
 } __packed;
 
 struct hostif_data_indication_t {
 	struct hostif_hdr header;
-	u16 auth_type;
+	__le16 auth_type;
 /* #define TYPE_DATA 0x0000 */
 #define TYPE_PMK1 0x0001
 #define TYPE_GMK1 0x0002
 #define TYPE_GMK2 0x0003
-	u16 reserved;
+	__le16 reserved;
 	u8 data[0];
 } __packed;
 
@@ -143,12 +143,12 @@ struct channel_list_t {
 
 struct hostif_mib_get_request_t {
 	struct hostif_hdr header;
-	u32 mib_attribute;
+	__le32 mib_attribute;
 } __packed;
 
 struct hostif_mib_value_t {
-	u16 size;
-	u16 type;
+	__le16 size;
+	__le16 type;
 #define MIB_VALUE_TYPE_NULL     0
 #define MIB_VALUE_TYPE_INT      1
 #define MIB_VALUE_TYPE_BOOL     2
@@ -159,36 +159,36 @@ struct hostif_mib_value_t {
 
 struct hostif_mib_get_confirm_t {
 	struct hostif_hdr header;
-	u32 mib_status;
+	__le32 mib_status;
 #define MIB_SUCCESS    0
 #define MIB_INVALID    1
 #define MIB_READ_ONLY  2
 #define MIB_WRITE_ONLY 3
-	u32 mib_attribute;
+	__le32 mib_attribute;
 	struct hostif_mib_value_t mib_value;
 } __packed;
 
 struct hostif_mib_set_request_t {
 	struct hostif_hdr header;
-	u32 mib_attribute;
+	__le32 mib_attribute;
 	struct hostif_mib_value_t mib_value;
 } __packed;
 
 struct hostif_mib_set_confirm_t {
 	struct hostif_hdr header;
-	u32 mib_status;
-	u32 mib_attribute;
+	__le32 mib_status;
+	__le32 mib_attribute;
 } __packed;
 
 struct hostif_power_mgmt_request_t {
 	struct hostif_hdr header;
-	u32 mode;
+	__le32 mode;
 #define POWER_ACTIVE  1
 #define POWER_SAVE    2
-	u32 wake_up;
+	__le32 wake_up;
 #define SLEEP_FALSE 0
 #define SLEEP_TRUE  1	/* not used */
-	u32 receiveDTIMs;
+	__le32 receive_dtims;
 #define DTIM_FALSE 0
 #define DTIM_TRUE  1
 } __packed;
@@ -207,12 +207,12 @@ enum power_mgmt_mode_type {
 
 struct hostif_power_mgmt_confirm_t {
 	struct hostif_hdr header;
-	u16 result_code;
+	__le16 result_code;
 } __packed;
 
 struct hostif_start_request_t {
 	struct hostif_hdr header;
-	u16 mode;
+	__le16 mode;
 #define MODE_PSEUDO_ADHOC   0
 #define MODE_INFRASTRUCTURE 1
 #define MODE_AP             2	/* not used */
@@ -221,7 +221,7 @@ struct hostif_start_request_t {
 
 struct hostif_start_confirm_t {
 	struct hostif_hdr header;
-	u16 result_code;
+	__le16 result_code;
 } __packed;
 
 #define SSID_MAX_SIZE 32
@@ -238,26 +238,26 @@ struct rate_set8_t {
 	u8 rate_pad;
 } __packed;
 
-struct FhParms_t {
-	u16 dwellTime;
-	u8 hopSet;
-	u8 hopPattern;
-	u8 hopIndex;
+struct fh_parms_t {
+	__le16 dwell_time;
+	u8 hop_set;
+	u8 hop_pattern;
+	u8 hop_index;
 } __packed;
 
-struct DsParms_t {
+struct ds_parms_t {
 	u8 channel;
 } __packed;
 
-struct CfParms_t {
+struct cf_parms_t {
 	u8 count;
 	u8 period;
-	u16 maxDuration;
-	u16 durRemaining;
+	__le16 max_duration;
+	__le16 dur_remaining;
 } __packed;
 
-struct IbssParms_t {
-	u16 atimWindow;
+struct ibss_parms_t {
+	__le16 atim_window;
 } __packed;
 
 struct rsn_t {
@@ -266,7 +266,7 @@ struct rsn_t {
 	u8 body[RSN_BODY_SIZE];
 } __packed;
 
-struct ErpParams_t {
+struct erp_params_t {
 	u8 erp_info;
 } __packed;
 
@@ -282,8 +282,8 @@ struct ap_info_t {
 	u8 sq;	/* +07 */
 	u8 noise;	/* +08 */
 	u8 pad0;	/* +09 */
-	u16 beacon_period;	/* +10 */
-	u16 capability;	/* +12 */
+	__le16 beacon_period;	/* +10 */
+	__le16 capability;	/* +12 */
 #define BSS_CAP_ESS             BIT(0)
 #define BSS_CAP_IBSS            BIT(1)
 #define BSS_CAP_CF_POLABLE      BIT(2)
@@ -298,7 +298,7 @@ struct ap_info_t {
 	u8 ch_info;	/* +15 */
 #define FRAME_TYPE_BEACON	0x80
 #define FRAME_TYPE_PROBE_RESP	0x50
-	u16 body_size;	/* +16 */
+	__le16 body_size;	/* +16 */
 	u8 body[1024];	/* +18 */
 	/* +1032 */
 } __packed;
@@ -309,14 +309,14 @@ struct link_ap_info_t {
 	u8 sq;	/* +07 */
 	u8 noise;	/* +08 */
 	u8 pad0;	/* +09 */
-	u16 beacon_period;	/* +10 */
-	u16 capability;	/* +12 */
+	__le16 beacon_period;	/* +10 */
+	__le16 capability;	/* +12 */
 	struct rate_set8_t rate_set;	/* +14 */
-	struct FhParms_t fh_parameter;	/* +24 */
-	struct DsParms_t ds_parameter;	/* +29 */
-	struct CfParms_t cf_parameter;	/* +30 */
-	struct IbssParms_t ibss_parameter;	/* +36 */
-	struct ErpParams_t erp_parameter;	/* +38 */
+	struct fh_parms_t fh_parameter;	/* +24 */
+	struct ds_parms_t ds_parameter;	/* +29 */
+	struct cf_parms_t cf_parameter;	/* +30 */
+	struct ibss_parms_t ibss_parameter;	/* +36 */
+	struct erp_params_t erp_parameter;	/* +38 */
 	u8 pad1;	/* +39 */
 	struct rate_set8_t ext_rate_set;	/* +40 */
 	u8 DTIM_period;	/* +50 */
@@ -332,7 +332,7 @@ struct link_ap_info_t {
 
 struct hostif_connect_indication_t {
 	struct hostif_hdr header;
-	u16 connect_code;
+	__le16 connect_code;
 #define RESULT_CONNECT    0
 #define RESULT_DISCONNECT 1
 	struct link_ap_info_t link_ap_info;
@@ -344,7 +344,7 @@ struct hostif_stop_request_t {
 
 struct hostif_stop_confirm_t {
 	struct hostif_hdr header;
-	u16 result_code;
+	__le16 result_code;
 } __packed;
 
 /**
@@ -356,23 +356,23 @@ struct hostif_stop_confirm_t {
  */
 struct hostif_ps_adhoc_set_request_t {
 	struct hostif_hdr header;
-	u16 phy_type;
+	__le16 phy_type;
 #define D_11B_ONLY_MODE		0
 #define D_11G_ONLY_MODE		1
 #define D_11BG_COMPATIBLE_MODE	2
 #define D_11A_ONLY_MODE		3
-	u16 cts_mode;
+	__le16 cts_mode;
 #define CTS_MODE_FALSE	0
 #define CTS_MODE_TRUE	1
-	u16 channel;
+	__le16 channel;
 	struct rate_set16_t rate_set;
-	u16 capability;
-	u16 scan_type;
+	__le16 capability;
+	__le16 scan_type;
 } __packed;
 
 struct hostif_ps_adhoc_set_confirm_t {
 	struct hostif_hdr header;
-	u16 result_code;
+	__le16 result_code;
 } __packed;
 
 /**
@@ -384,17 +384,17 @@ struct hostif_ps_adhoc_set_confirm_t {
  */
 struct hostif_infrastructure_set_request_t {
 	struct hostif_hdr header;
-	u16 phy_type;
-	u16 cts_mode;
+	__le16 phy_type;
+	__le16 cts_mode;
 	struct rate_set16_t rate_set;
 	struct ssid_t ssid;
-	u16 capability;
-	u16 beacon_lost_count;
-	u16 auth_type;
+	__le16 capability;
+	__le16 beacon_lost_count;
+	__le16 auth_type;
 #define AUTH_TYPE_OPEN_SYSTEM 0
 #define AUTH_TYPE_SHARED_KEY  1
 	struct channel_list_t channel_list;
-	u16 scan_type;
+	__le16 scan_type;
 } __packed;
 
 /**
@@ -406,23 +406,23 @@ struct hostif_infrastructure_set_request_t {
  */
 struct hostif_infrastructure_set2_request_t {
 	struct hostif_hdr header;
-	u16 phy_type;
-	u16 cts_mode;
+	__le16 phy_type;
+	__le16 cts_mode;
 	struct rate_set16_t rate_set;
 	struct ssid_t ssid;
-	u16 capability;
-	u16 beacon_lost_count;
-	u16 auth_type;
+	__le16 capability;
+	__le16 beacon_lost_count;
+	__le16 auth_type;
 #define AUTH_TYPE_OPEN_SYSTEM 0
 #define AUTH_TYPE_SHARED_KEY  1
 	struct channel_list_t channel_list;
-	u16 scan_type;
+	__le16 scan_type;
 	u8 bssid[ETH_ALEN];
 } __packed;
 
 struct hostif_infrastructure_set_confirm_t {
 	struct hostif_hdr header;
-	u16 result_code;
+	__le16 result_code;
 } __packed;
 
 /**
@@ -434,13 +434,13 @@ struct hostif_infrastructure_set_confirm_t {
  */
 struct hostif_adhoc_set_request_t {
 	struct hostif_hdr header;
-	u16 phy_type;
-	u16 cts_mode;
-	u16 channel;
+	__le16 phy_type;
+	__le16 cts_mode;
+	__le16 channel;
 	struct rate_set16_t rate_set;
 	struct ssid_t ssid;
-	u16 capability;
-	u16 scan_type;
+	__le16 capability;
+	__le16 scan_type;
 } __packed;
 
 /**
@@ -452,20 +452,20 @@ struct hostif_adhoc_set_request_t {
  */
 struct hostif_adhoc_set2_request_t {
 	struct hostif_hdr header;
-	u16 phy_type;
-	u16 cts_mode;
-	u16 reserved;
+	__le16 phy_type;
+	__le16 cts_mode;
+	__le16 reserved;
 	struct rate_set16_t rate_set;
 	struct ssid_t ssid;
-	u16 capability;
-	u16 scan_type;
+	__le16 capability;
+	__le16 scan_type;
 	struct channel_list_t channel_list;
 	u8 bssid[ETH_ALEN];
 } __packed;
 
 struct hostif_adhoc_set_confirm_t {
 	struct hostif_hdr header;
-	u16 result_code;
+	__le16 result_code;
 } __packed;
 
 struct last_associate_t {
@@ -478,10 +478,10 @@ struct association_request_t {
 #define FRAME_TYPE_ASSOC_REQ	0x00
 #define FRAME_TYPE_REASSOC_REQ	0x20
 	u8 pad;
-	u16 capability;
-	u16 listen_interval;
+	__le16 capability;
+	__le16 listen_interval;
 	u8 ap_address[6];
-	u16 reqIEs_size;
+	__le16 req_ies_size;
 } __packed;
 
 struct association_response_t {
@@ -489,17 +489,17 @@ struct association_response_t {
 #define FRAME_TYPE_ASSOC_RESP	0x10
 #define FRAME_TYPE_REASSOC_RESP	0x30
 	u8 pad;
-	u16 capability;
-	u16 status;
-	u16 association_id;
-	u16 respIEs_size;
+	__le16 capability;
+	__le16 status;
+	__le16 association_id;
+	__le16 resp_ies_size;
 } __packed;
 
 struct hostif_associate_indication_t {
 	struct hostif_hdr header;
 	struct association_request_t assoc_req;
 	struct association_response_t assoc_resp;
-	/* followed by (reqIEs_size + respIEs_size) octets of data */
+	/* followed by (req_ies_size + resp_ies_size) octets of data */
 	/* reqIEs data *//* respIEs data */
 } __packed;
 
@@ -509,24 +509,24 @@ struct hostif_bss_scan_request_t {
 #define ACTIVE_SCAN  0
 #define PASSIVE_SCAN 1
 	u8 pad[3];
-	u32 ch_time_min;
-	u32 ch_time_max;
+	__le32 ch_time_min;
+	__le32 ch_time_max;
 	struct channel_list_t channel_list;
 	struct ssid_t ssid;
 } __packed;
 
 struct hostif_bss_scan_confirm_t {
 	struct hostif_hdr header;
-	u16 result_code;
-	u16 reserved;
+	__le16 result_code;
+	__le16 reserved;
 } __packed;
 
 struct hostif_phy_information_request_t {
 	struct hostif_hdr header;
-	u16 type;
+	__le16 type;
 #define NORMAL_TYPE	0
 #define TIME_TYPE	1
-	u16 time;	/* unit 100ms */
+	__le16 time;	/* unit 100ms */
 } __packed;
 
 struct hostif_phy_information_confirm_t {
@@ -535,10 +535,10 @@ struct hostif_phy_information_confirm_t {
 	u8 sq;
 	u8 noise;
 	u8 link_speed;
-	u32 tx_frame;
-	u32 rx_frame;
-	u32 tx_error;
-	u32 rx_error;
+	__le32 tx_frame;
+	__le32 rx_frame;
+	__le32 tx_error;
+	__le32 rx_error;
 } __packed;
 
 enum sleep_mode_type {
@@ -552,18 +552,18 @@ struct hostif_sleep_request_t {
 
 struct hostif_sleep_confirm_t {
 	struct hostif_hdr header;
-	u16 result_code;
+	__le16 result_code;
 } __packed;
 
 struct hostif_mic_failure_request_t {
 	struct hostif_hdr header;
-	u16 failure_count;
-	u16 timer;
+	__le16 failure_count;
+	__le16 timer;
 } __packed;
 
 struct hostif_mic_failure_confirm_t {
 	struct hostif_hdr header;
-	u16 result_code;
+	__le16 result_code;
 } __packed;
 
 #define BASIC_RATE	0x80
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index cd4f56d..3767079 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -264,10 +264,10 @@ struct local_aplist_t {
 };
 
 struct local_gain_t {
-	u8 TxMode;
-	u8 RxMode;
-	u8 TxGain;
-	u8 RxGain;
+	u8 tx_mode;
+	u8 rx_mode;
+	u8 tx_gain;
+	u8 rx_gain;
 };
 
 struct local_eeprom_sum_t {
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 5a43f19..8aa12e8 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -2273,7 +2273,7 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev,
 		netdev_info(dev, "SET_SLEEP_MODE %d\n", priv->sleep_mode);
 		hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
 	} else {
-		netdev_err(dev, "SET_SLEEP_MODE %d errror\n", *uwrq);
+		netdev_err(dev, "SET_SLEEP_MODE %d error\n", *uwrq);
 		return -EINVAL;
 	}
 
@@ -2378,14 +2378,14 @@ static int ks_wlan_set_tx_gain(struct net_device *dev,
 		return -EPERM;
 	/* for SLEEP MODE */
 	if (*uwrq >= 0 && *uwrq <= 0xFF)	/* 0-255 */
-		priv->gain.TxGain = (uint8_t)*uwrq;
+		priv->gain.tx_gain = (uint8_t)*uwrq;
 	else
 		return -EINVAL;
 
-	if (priv->gain.TxGain < 0xFF)
-		priv->gain.TxMode = 1;
+	if (priv->gain.tx_gain < 0xFF)
+		priv->gain.tx_mode = 1;
 	else
-		priv->gain.TxMode = 0;
+		priv->gain.tx_mode = 0;
 
 	hostif_sme_enqueue(priv, SME_SET_GAIN);
 	return 0;
@@ -2400,7 +2400,7 @@ static int ks_wlan_get_tx_gain(struct net_device *dev,
 	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
 	/* for SLEEP MODE */
-	*uwrq = priv->gain.TxGain;
+	*uwrq = priv->gain.tx_gain;
 	hostif_sme_enqueue(priv, SME_GET_GAIN);
 	return 0;
 }
@@ -2415,14 +2415,14 @@ static int ks_wlan_set_rx_gain(struct net_device *dev,
 		return -EPERM;
 	/* for SLEEP MODE */
 	if (*uwrq >= 0 && *uwrq <= 0xFF)	/* 0-255 */
-		priv->gain.RxGain = (uint8_t)*uwrq;
+		priv->gain.rx_gain = (uint8_t)*uwrq;
 	else
 		return -EINVAL;
 
-	if (priv->gain.RxGain < 0xFF)
-		priv->gain.RxMode = 1;
+	if (priv->gain.rx_gain < 0xFF)
+		priv->gain.rx_mode = 1;
 	else
-		priv->gain.RxMode = 0;
+		priv->gain.rx_mode = 0;
 
 	hostif_sme_enqueue(priv, SME_SET_GAIN);
 	return 0;
@@ -2437,7 +2437,7 @@ static int ks_wlan_get_rx_gain(struct net_device *dev,
 	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
 	/* for SLEEP MODE */
-	*uwrq = priv->gain.RxGain;
+	*uwrq = priv->gain.rx_gain;
 	hostif_sme_enqueue(priv, SME_GET_GAIN);
 	return 0;
 }
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 79321e4..0520f02 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -2306,7 +2306,7 @@ static int kiblnd_dev_need_failover(struct kib_dev *dev)
 
 	memset(&srcaddr, 0, sizeof(srcaddr));
 	srcaddr.sin_family = AF_INET;
-	srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
+	srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
 
 	memset(&dstaddr, 0, sizeof(dstaddr));
 	dstaddr.sin_family = AF_INET;
@@ -2378,7 +2378,7 @@ int kiblnd_dev_failover(struct kib_dev *dev)
 
 	memset(&addr, 0, sizeof(addr));
 	addr.sin_family      = AF_INET;
-	addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
+	addr.sin_addr.s_addr = htonl(dev->ibd_ifip);
 	addr.sin_port	= htons(*kiblnd_tunables.kib_service);
 
 	/* Bind to failover device or port */
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 0db662d..85b242e 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -3267,7 +3267,7 @@ int
 kiblnd_connd(void *arg)
 {
 	spinlock_t *lock = &kiblnd_data.kib_connd_lock;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	unsigned long flags;
 	struct kib_conn *conn;
 	int timeout;
@@ -3521,7 +3521,7 @@ kiblnd_scheduler(void *arg)
 	long id = (long)arg;
 	struct kib_sched_info *sched;
 	struct kib_conn *conn;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	unsigned long flags;
 	struct ib_wc wc;
 	int did_something;
@@ -3656,7 +3656,7 @@ kiblnd_failover_thread(void *arg)
 {
 	rwlock_t *glock = &kiblnd_data.kib_global_lock;
 	struct kib_dev *dev;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	unsigned long flags;
 	int rc;
 
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 3ed3b08..6b38d5a 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -2166,7 +2166,7 @@ ksocknal_connd(void *arg)
 {
 	spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
 	struct ksock_connreq *cr;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	int nloops = 0;
 	int cons_retry = 0;
 
@@ -2554,7 +2554,7 @@ ksocknal_check_peer_timeouts(int idx)
 int
 ksocknal_reaper(void *arg)
 {
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	struct ksock_conn *conn;
 	struct ksock_sched *sched;
 	struct list_head enomem_conns;
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c56e992..49deb44 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -361,7 +361,7 @@ static int libcfs_debug_dumplog_thread(void *arg)
 
 void libcfs_debug_dumplog(void)
 {
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	struct task_struct *dumper;
 
 	/* we're being careful to ensure that the kernel thread is
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 75eb84e..a5a9478 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -57,8 +57,9 @@ int cfs_tracefile_init_arch(void)
 	memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
 	for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
 		cfs_trace_data[i] =
-			kmalloc(sizeof(union cfs_trace_data_union) *
-				num_possible_cpus(), GFP_KERNEL);
+			kmalloc_array(num_possible_cpus(),
+				      sizeof(union cfs_trace_data_union),
+				      GFP_KERNEL);
 		if (!cfs_trace_data[i])
 			goto out;
 	}
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 9599b74..d1aa79b 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -191,10 +191,9 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
 		} else {
 			tage = cfs_tage_alloc(GFP_ATOMIC);
 			if (unlikely(!tage)) {
-				if ((!memory_pressure_get() ||
-				     in_interrupt()) && printk_ratelimit())
-					pr_warn("cannot allocate a tage (%ld)\n",
-						tcd->tcd_cur_pages);
+				if (!memory_pressure_get() || in_interrupt())
+					pr_warn_ratelimited("cannot allocate a tage (%ld)\n",
+							    tcd->tcd_cur_pages);
 				return NULL;
 			}
 		}
@@ -229,9 +228,8 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
 	 * from here: this will lead to infinite recursion.
 	 */
 
-	if (printk_ratelimit())
-		pr_warn("debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
-			pgcount + 1, tcd->tcd_cur_pages);
+	pr_warn_ratelimited("debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
+			    pgcount + 1, tcd->tcd_cur_pages);
 
 	INIT_LIST_HEAD(&pc.pc_pages);
 
@@ -990,7 +988,7 @@ static int tracefiled(void *arg)
 	complete(&tctl->tctl_start);
 
 	while (1) {
-		wait_queue_t __wait;
+		wait_queue_entry_t __wait;
 
 		pc.pc_want_daemon_pages = 0;
 		collect_pages(&pc);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index ce4b835..9ebba4e 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -312,7 +312,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
 {
 	int tms = *timeout_ms;
 	int wait;
-	wait_queue_t wl;
+	wait_queue_entry_t wl;
 	unsigned long now;
 
 	if (!tms)
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index a99c5c0..20ebe24 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -1274,9 +1274,9 @@ lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
 	int rc;
 
 	/* Convert put fields to host byte order */
-	hdr->msg.put.match_bits	= le64_to_cpu(hdr->msg.put.match_bits);
-	hdr->msg.put.ptl_index	= le32_to_cpu(hdr->msg.put.ptl_index);
-	hdr->msg.put.offset	= le32_to_cpu(hdr->msg.put.offset);
+	le64_to_cpus(&hdr->msg.put.match_bits);
+	le32_to_cpus(&hdr->msg.put.ptl_index);
+	le32_to_cpus(&hdr->msg.put.offset);
 
 	info.mi_id.nid	= hdr->src_nid;
 	info.mi_id.pid	= hdr->src_pid;
@@ -1332,10 +1332,10 @@ lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
 	int rc;
 
 	/* Convert get fields to host byte order */
-	hdr->msg.get.match_bits  = le64_to_cpu(hdr->msg.get.match_bits);
-	hdr->msg.get.ptl_index   = le32_to_cpu(hdr->msg.get.ptl_index);
-	hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
-	hdr->msg.get.src_offset  = le32_to_cpu(hdr->msg.get.src_offset);
+	le64_to_cpus(&hdr->msg.get.match_bits);
+	le32_to_cpus(&hdr->msg.get.ptl_index);
+	le32_to_cpus(&hdr->msg.get.sink_length);
+	le32_to_cpus(&hdr->msg.get.src_offset);
 
 	info.mi_id.nid  = hdr->src_nid;
 	info.mi_id.pid  = hdr->src_pid;
@@ -1464,8 +1464,8 @@ lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
 	src.pid = hdr->src_pid;
 
 	/* Convert ack fields to host byte order */
-	hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
-	hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
+	le64_to_cpus(&hdr->msg.ack.match_bits);
+	le32_to_cpus(&hdr->msg.ack.mlength);
 
 	cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
 	lnet_res_lock(cpt);
@@ -1798,7 +1798,7 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
 		/* convert common msg->hdr fields to host byteorder */
 		msg->msg_hdr.type	= type;
 		msg->msg_hdr.src_nid	= src_nid;
-		msg->msg_hdr.src_pid	= le32_to_cpu(msg->msg_hdr.src_pid);
+		le32_to_cpus(&msg->msg_hdr.src_pid);
 		msg->msg_hdr.dest_nid	= dest_nid;
 		msg->msg_hdr.dest_pid	= dest_pid;
 		msg->msg_hdr.payload_length = payload_length;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 9fca8d2..800f4f6 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -89,7 +89,7 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
 	struct ifreq ifr;
 	int nob;
 	int rc;
-	__u32 val;
+	__be32 val;
 
 	nob = strnlen(name, IFNAMSIZ);
 	if (nob == IFNAMSIZ) {
@@ -516,7 +516,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port,
 int
 lnet_sock_accept(struct socket **newsockp, struct socket *sock)
 {
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	struct socket *newsock;
 	int rc;
 
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 999f250..19895fa 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -192,7 +192,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
 }
 
 static int seq_fid_alloc_prep(struct lu_client_seq *seq,
-			      wait_queue_t *link)
+			      wait_queue_entry_t *link)
 {
 	if (seq->lcs_update) {
 		add_wait_queue(&seq->lcs_waitq, link);
@@ -223,7 +223,7 @@ static void seq_fid_alloc_fini(struct lu_client_seq *seq)
 int seq_client_alloc_fid(const struct lu_env *env,
 			 struct lu_client_seq *seq, struct lu_fid *fid)
 {
-	wait_queue_t link;
+	wait_queue_entry_t link;
 	int rc;
 
 	LASSERT(seq);
@@ -259,7 +259,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
 			return rc;
 		}
 
-		CDEBUG(D_INFO, "%s: Switch to sequence [0x%16.16Lx]\n",
+		CDEBUG(D_INFO, "%s: Switch to sequence [0x%16.16llx]\n",
 		       seq->lcs_name, seqnr);
 
 		seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID;
@@ -279,7 +279,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
 	*fid = seq->lcs_fid;
 	mutex_unlock(&seq->lcs_mutex);
 
-	CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name,  PFID(fid));
+	CDEBUG(D_INFO, "%s: Allocated FID " DFID "\n", seq->lcs_name,  PFID(fid));
 	return rc;
 }
 EXPORT_SYMBOL(seq_client_alloc_fid);
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(seq_client_alloc_fid);
  */
 void seq_client_flush(struct lu_client_seq *seq)
 {
-	wait_queue_t link;
+	wait_queue_entry_t link;
 
 	LASSERT(seq);
 	init_waitqueue_entry(&link, current);
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 11f6974..b852fed 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -151,7 +151,7 @@ static void fld_fix_new_list(struct fld_cache *cache)
 			continue;
 
 		LASSERTF(c_range->lsr_start <= n_range->lsr_start,
-			 "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
+			 "cur lsr_start " DRANGE " next lsr_start " DRANGE "\n",
 			 PRANGE(c_range), PRANGE(n_range));
 
 		/* check merge possibility with next range */
@@ -349,7 +349,7 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
 		f_curr->fce_range.lsr_end = new_start;
 		fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
 	} else
-		CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
+		CERROR("NEW range =" DRANGE " curr = " DRANGE "\n",
 		       PRANGE(range), PRANGE(&f_curr->fce_range));
 }
 
@@ -415,7 +415,7 @@ static int fld_cache_insert_nolock(struct fld_cache *cache,
 	if (!prev)
 		prev = head;
 
-	CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
+	CDEBUG(D_INFO, "insert range " DRANGE "\n", PRANGE(&f_new->fce_range));
 	/* Add new entry to cache and lru list. */
 	fld_cache_entry_add(cache, f_new, prev);
 out:
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index 61ac420..b83d7eb 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -136,7 +136,7 @@ fld_debugfs_cache_flush_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
-static struct file_operations fld_debugfs_cache_flush_fops = {
+static const struct file_operations fld_debugfs_cache_flush_fops = {
 	.owner		= THIS_MODULE,
 	.open           = simple_open,
 	.write		= fld_debugfs_cache_flush_write,
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 2bc3ee5..90a0c50 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -1287,7 +1287,7 @@ do {								    \
  * @{
  */
 struct cl_page_list {
-	unsigned	     pl_nr;
+	unsigned int		 pl_nr;
 	struct list_head	   pl_pages;
 	struct task_struct	*pl_owner;
 };
@@ -1842,7 +1842,7 @@ struct cl_io {
 	/**
 	 * Number of pages owned by this IO. For invariant checking.
 	 */
-	unsigned	     ci_owned_nr;
+	unsigned int	     ci_owned_nr;
 };
 
 /** @} cl_io */
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 242abb8..915283c 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -49,7 +49,7 @@
 
 struct lprocfs_vars {
 	const char		*name;
-	struct file_operations	*fops;
+	const struct file_operations	*fops;
 	void			*data;
 	/**
 	 * sysfs file mode.
@@ -449,7 +449,7 @@ int lprocfs_exp_cleanup(struct obd_export *exp);
 struct dentry *ldebugfs_add_simple(struct dentry *root,
 				   char *name,
 				   void *data,
-				   struct file_operations *fops);
+				   const struct file_operations *fops);
 
 int ldebugfs_register_stats(struct dentry *parent,
 			    const char *name,
@@ -523,8 +523,8 @@ unsigned long lprocfs_oh_sum(struct obd_histogram *oh);
 void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
 			   struct lprocfs_counter *cnt);
 
-int lprocfs_single_release(struct inode *, struct file *);
-int lprocfs_seq_release(struct inode *, struct file *);
+int lprocfs_single_release(struct inode *inode, struct file *file);
+int lprocfs_seq_release(struct inode *inode, struct file *file);
 
 /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
  * proc entries; otherwise, you will define name##_seq_write function also for
@@ -536,7 +536,7 @@ static int name##_single_open(struct inode *inode, struct file *file)	\
 {									\
 	return single_open(file, name##_seq_show, inode->i_private);	\
 }									\
-static struct file_operations name##_fops = {				\
+static const struct file_operations name##_fops = {			\
 	.owner   = THIS_MODULE,					    \
 	.open    = name##_single_open,				     \
 	.read    = seq_read,					       \
@@ -581,7 +581,7 @@ static struct file_operations name##_fops = {				\
 	{								\
 		return single_open(file, NULL, inode->i_private);	\
 	}								\
-	static struct file_operations name##_##type##_fops = {	\
+	static const struct file_operations name##_##type##_fops = {	\
 		.open	= name##_##type##_open,				\
 		.write	= name##_##type##_write,			\
 		.release = lprocfs_single_release,			\
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 73ecc23..2e70602 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -968,11 +968,11 @@ struct lu_context {
 	 * Version counter used to skip calls to lu_context_refill() when no
 	 * keys were registered.
 	 */
-	unsigned	       lc_version;
+	unsigned int		lc_version;
 	/**
 	 * Debugging cookie.
 	 */
-	unsigned	       lc_cookie;
+	unsigned int		lc_cookie;
 };
 
 /**
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index df48b8d..77995fa 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -546,7 +546,7 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
 static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
 {
 	if (unlikely(fid_seq_is_igif(fid->f_seq))) {
-		CERROR("bad IGIF, "DFID"\n", PFID(fid));
+		CERROR("bad IGIF, " DFID "\n", PFID(fid));
 		return -EBADF;
 	}
 
@@ -585,7 +585,7 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
 	__u64 seq = ostid_seq(ostid);
 
 	if (ost_idx > 0xffff) {
-		CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
+		CERROR("bad ost_idx, " DOSTID " ost_idx:%u\n", POSTID(ostid),
 		       ost_idx);
 		return -EBADF;
 	}
@@ -630,7 +630,7 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
 {
 	if (unlikely(fid_seq_is_igif(fid->f_seq))) {
-		CERROR("bad IGIF, "DFID"\n", PFID(fid));
+		CERROR("bad IGIF, " DFID "\n", PFID(fid));
 		return -EBADF;
 	}
 
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 7d8628c..edff8dc 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -532,7 +532,7 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
 #define FID_NOBRACE_LEN 40
 #define FID_LEN (FID_NOBRACE_LEN + 2)
 #define DFID_NOBRACE "%#llx:0x%x:0x%x"
-#define DFID "["DFID_NOBRACE"]"
+#define DFID "[" DFID_NOBRACE "]"
 #define PFID(fid) (unsigned long long)(fid)->f_seq, (fid)->f_oid, (fid)->f_ver
 
 /* scanf input parse format for fids in DFID_NOBRACE format
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index b5a1aad..6dc24a7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -606,7 +606,7 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid)
 static inline int lu_fid_diff(const struct lu_fid *fid1,
 			      const struct lu_fid *fid2)
 {
-	LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID", fid2:"DFID"\n",
+	LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:" DFID ", fid2:" DFID "\n",
 		 PFID(fid1), PFID(fid2));
 
 	if (fid_is_idif(fid1) && fid_is_idif(fid2))
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index b04d613..f24970d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -201,7 +201,7 @@ struct l_wait_info {
 			   sigmask(SIGALRM))
 
 /**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively
  * waiting threads, which is not always desirable because all threads will
  * be waken up again and again, even user only needs a few of them to be
  * active most time. This is not good for performance because cache can
@@ -228,7 +228,7 @@ struct l_wait_info {
  */
 #define __l_wait_event(wq, condition, info, ret, l_add_wait)		   \
 do {									   \
-	wait_queue_t __wait;						 \
+	wait_queue_entry_t __wait;						 \
 	long __timeout = info->lwi_timeout;			  \
 	sigset_t   __blocked;					      \
 	int   __allow_intr = info->lwi_allow_intr;			     \
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 5d24b48..ec3b23c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -79,11 +79,11 @@ static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
 	return atomic_read(&ns->ns_bref) == 0;
 }
 
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *,
-					  enum ldlm_side);
-void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
-					    enum ldlm_side);
-struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side);
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
+					  enum ldlm_side client);
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
+					    enum ldlm_side client);
+struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client);
 
 /* ldlm_request.c */
 /* Cancel lru flag, it indicates we cancel aged locks. */
@@ -130,16 +130,19 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
 		  enum req_location loc, void *data, int size);
 struct ldlm_lock *
-ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
+ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *id,
 		 enum ldlm_type type, enum ldlm_mode mode,
 		 const struct ldlm_callback_suite *cbs,
 		 void *data, __u32 lvb_len, enum lvb_type lvb_type);
-enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
-				  void *cookie, __u64 *flags);
-void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode);
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
-void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode);
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
+enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
+				  struct ldlm_lock **lock, void *cookie,
+				  __u64 *flags);
+void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode);
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
+				      enum ldlm_mode mode);
+void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode);
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
+				      enum ldlm_mode mode);
 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
 		      enum ldlm_desc_ast_t ast_type);
 int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 3663c5c..4dc7bae 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -363,17 +363,16 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
 	 */
 	cli->cl_chunkbits = PAGE_SHIFT;
 
-	if (!strcmp(name, LUSTRE_MDC_NAME)) {
+	if (!strcmp(name, LUSTRE_MDC_NAME))
 		cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
-	} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
+	else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */)
 		cli->cl_max_rpcs_in_flight = 2;
-	} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
+	else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */)
 		cli->cl_max_rpcs_in_flight = 3;
-	} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
+	else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */)
 		cli->cl_max_rpcs_in_flight = 4;
-	} else {
+	else
 		cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
-	}
 
 	spin_lock_init(&cli->cl_mod_rpcs_lock);
 	spin_lock_init(&cli->cl_mod_rpcs_hist.oh_lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 84eeaa5..4028e11 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -445,8 +445,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
 
 		if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
 				 &lock->l_resource->lr_name)) {
-			CDEBUG(D_INFO, "remote intent success, locking "DLDLMRES
-				       " instead of "DLDLMRES"\n",
+			CDEBUG(D_INFO, "remote intent success, locking " DLDLMRES
+				       " instead of " DLDLMRES "\n",
 			       PLDLMRES(&reply->lock_desc.l_resource),
 			       PLDLMRES(lock->l_resource));
 
@@ -1677,7 +1677,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
 					   0, flags | LCF_BL_AST, opaque);
 	rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
 	if (rc != ELDLM_OK)
-		CERROR("canceling unused lock "DLDLMRES": rc = %d\n",
+		CERROR("canceling unused lock " DLDLMRES ": rc = %d\n",
 		       PLDLMRES(res), rc);
 
 	LDLM_RESOURCE_DELREF(res);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 633f65b0..c9ef247 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -78,7 +78,25 @@ lprocfs_wr_dump_ns(struct file *file, const char __user *buffer,
 
 LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
 
-LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
+static int ldlm_rw_uint_seq_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "%u\n", *(unsigned int *)m->private);
+	return 0;
+}
+
+static ssize_t
+ldlm_rw_uint_seq_write(struct file *file, const char __user *buffer,
+		       size_t count, loff_t *off)
+{
+	struct seq_file *seq = file->private_data;
+
+	if (count == 0)
+		return 0;
+	return kstrtouint_from_user(buffer, count, 0,
+				    (unsigned int *)seq->private);
+}
+
+LPROC_SEQ_FOPS(ldlm_rw_uint);
 
 static struct lprocfs_vars ldlm_debugfs_list[] = {
 	{ "dump_namespaces", &ldlm_dump_ns_fops, NULL, 0222 },
@@ -831,7 +849,7 @@ static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 	struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
 
 	lock_res(res);
-	CERROR("%s: namespace resource "DLDLMRES
+	CERROR("%s: namespace resource " DLDLMRES
 	       " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n",
 	       ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
 	       atomic_read(&res->lr_refcount) - 1);
@@ -1373,7 +1391,7 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
 	if (!((libcfs_debug | D_ERROR) & level))
 		return;
 
-	CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
+	CDEBUG(level, "--- Resource: " DLDLMRES " (%p) refcount = %d\n",
 	       PLDLMRES(res), res, atomic_read(&res->lr_refcount));
 
 	if (!list_empty(&res->lr_granted)) {
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 38f8466..d20425f 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -180,7 +180,7 @@ void ll_invalidate_aliases(struct inode *inode)
 {
 	struct dentry *dentry;
 
-	CDEBUG(D_INODE, "marking dentries for ino "DFID"(%p) invalid\n",
+	CDEBUG(D_INODE, "marking dentries for ino " DFID "(%p) invalid\n",
 	       PFID(ll_inode2fid(inode)), inode);
 
 	spin_lock(&inode->i_lock);
@@ -216,7 +216,7 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
 	if (it->it_lock_mode && inode) {
 		struct ll_sb_info *sbi = ll_i2sbi(inode);
 
-		CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)\n",
+		CDEBUG(D_DLMTRACE, "setting l_data to inode " DFID "(%p)\n",
 		       PFID(ll_inode2fid(inode)), inode);
 		ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
 	}
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 13b3592..03a72c0 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -303,7 +303,7 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
 	struct md_op_data *op_data;
 	int			rc;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) pos/size %lu/%llu 32bit_api %d\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p) pos/size %lu/%llu 32bit_api %d\n",
 	       PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
 	       i_size_read(inode), api32);
 
@@ -419,7 +419,7 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
 	if (unlikely(lump->lum_magic != LMV_USER_MAGIC))
 		return -EINVAL;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) name %s stripe_offset %d, stripe_count: %u\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p) name %s stripe_offset %d, stripe_count: %u\n",
 	       PFID(ll_inode2fid(parent)), parent, dirname,
 	       (int)lump->lum_stripe_offset, lump->lum_stripe_count);
 
@@ -606,7 +606,7 @@ int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
 	rc = md_getattr(sbi->ll_md_exp, op_data, &req);
 	ll_finish_md_op_data(op_data);
 	if (rc < 0) {
-		CDEBUG(D_INFO, "md_getattr failed on inode "DFID": rc %d\n",
+		CDEBUG(D_INFO, "md_getattr failed on inode " DFID ": rc %d\n",
 		       PFID(ll_inode2fid(inode)), rc);
 		goto out;
 	}
@@ -733,7 +733,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
 		iput(inode);
 		if (rc != 0) {
 			CDEBUG(D_HSM, "Could not read file data version of "
-				      DFID" (rc = %d). Archive request (%#llx) could not be done.\n",
+				      DFID " (rc = %d). Archive request (%#llx) could not be done.\n",
 				      PFID(&copy->hc_hai.hai_fid), rc,
 				      copy->hc_hai.hai_cookie);
 			hpk.hpk_flags |= HP_FLAG_RETRY;
@@ -833,7 +833,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
 		if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
 		    (copy->hc_data_version != data_version)) {
 			CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
-			       DFID", start:%#llx current:%#llx\n",
+			       DFID ", start:%#llx current:%#llx\n",
 			       PFID(&copy->hc_hai.hai_fid),
 			       copy->hc_data_version, data_version);
 			/* File was changed, send error to cdt. Do not ask for
@@ -1037,7 +1037,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	struct obd_ioctl_data *data;
 	int rc = 0;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%#x\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), cmd=%#x\n",
 	       PFID(ll_inode2fid(inode)), inode, cmd);
 
 	/* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
@@ -1141,7 +1141,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		}
 
 #if OBD_OCD_VERSION(2, 9, 50, 0) > LUSTRE_VERSION_CODE
-		mode = data->ioc_type != 0 ? data->ioc_type : S_IRWXUGO;
+		mode = data->ioc_type != 0 ? data->ioc_type : 0777;
 #else
 		mode = data->ioc_type;
 #endif
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 67c4b9c..ab1c85c 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -316,7 +316,7 @@ int ll_file_release(struct inode *inode, struct file *file)
 	struct ll_inode_info *lli = ll_i2info(inode);
 	int rc;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
 	       PFID(ll_inode2fid(inode)), inode);
 
 	if (!is_root_inode(inode))
@@ -494,7 +494,7 @@ int ll_file_open(struct inode *inode, struct file *file)
 	struct ll_file_data *fd;
 	int rc = 0;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), flags %o\n",
 	       PFID(ll_inode2fid(inode)), inode, file->f_flags);
 
 	it = file->private_data; /* XXX: compat macro */
@@ -834,7 +834,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
 	}
 	rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
 	if (rc2 < 0)
-		CERROR("%s: error closing file "DFID": %d\n",
+		CERROR("%s: error closing file " DFID ": %d\n",
 		       ll_get_fsname(inode->i_sb, NULL, 0),
 		       PFID(&ll_i2info(inode)->lli_fid), rc2);
 	och = NULL; /* och has been freed in ll_close_inode_openhandle() */
@@ -1665,7 +1665,7 @@ int ll_hsm_release(struct inode *inode)
 	int rc;
 	u16 refcheck;
 
-	CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
+	CDEBUG(D_INODE, "%s: Releasing file " DFID ".\n",
 	       ll_get_fsname(inode->i_sb, NULL, 0),
 	       PFID(&ll_i2info(inode)->lli_fid));
 
@@ -1928,7 +1928,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	struct ll_file_data	*fd = LUSTRE_FPRIVATE(file);
 	int			 flags, rc;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),cmd=%x\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p),cmd=%x\n",
 	       PFID(ll_inode2fid(inode)), inode, cmd);
 	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
 
@@ -2263,7 +2263,7 @@ static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
 
 	retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
 			   (origin == SEEK_CUR) ? file->f_pos : 0);
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), to=%llu=%#llx(%d)\n",
 	       PFID(ll_inode2fid(inode)), inode, retval, retval, origin);
 	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
 
@@ -2360,7 +2360,7 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 	struct ptlrpc_request *req;
 	int rc, err;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
 	       PFID(ll_inode2fid(inode)), inode);
 	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
 
@@ -2422,7 +2422,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
 	int rc;
 	int rc2 = 0;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID " file_lock=%p\n",
 	       PFID(ll_inode2fid(inode)), file_lock);
 
 	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
@@ -2507,7 +2507,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
 	if (IS_ERR(op_data))
 		return PTR_ERR(op_data);
 
-	CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
+	CDEBUG(D_DLMTRACE, "inode=" DFID ", pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
 	       PFID(ll_inode2fid(inode)), flock.l_flock.pid, flags,
 	       einfo.ei_mode, flock.l_flock.start, flock.l_flock.end);
 
@@ -2582,7 +2582,7 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
 	struct qstr qstr;
 	int rc;
 
-	CDEBUG(D_VFSTRACE, "migrate %s under "DFID" to MDT%d\n",
+	CDEBUG(D_VFSTRACE, "migrate %s under " DFID " to MDT%d\n",
 	       name, PFID(ll_inode2fid(parent)), mdtidx);
 
 	op_data = ll_prep_md_op_data(NULL, parent, NULL, name, namelen,
@@ -2617,7 +2617,7 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
 	inode_lock(child_inode);
 	op_data->op_fid3 = *ll_inode2fid(child_inode);
 	if (!fid_is_sane(&op_data->op_fid3)) {
-		CERROR("%s: migrate %s, but fid "DFID" is insane\n",
+		CERROR("%s: migrate %s, but fid " DFID " is insane\n",
 		       ll_get_fsname(parent->i_sb, NULL, 0), name,
 		       PFID(&op_data->op_fid3));
 		rc = -EINVAL;
@@ -2629,7 +2629,7 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
 		goto out_unlock;
 
 	if (rc == mdtidx) {
-		CDEBUG(D_INFO, "%s:"DFID" is already on MDT%d.\n", name,
+		CDEBUG(D_INFO, "%s: " DFID " is already on MDT%d.\n", name,
 		       PFID(&op_data->op_fid3), mdtidx);
 		rc = 0;
 		goto out_unlock;
@@ -2733,7 +2733,7 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits,
 		return 0;
 
 	fid = &ll_i2info(inode)->lli_fid;
-	CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
+	CDEBUG(D_INFO, "trying to match res " DFID " mode %s\n", PFID(fid),
 	       ldlm_lockname[mode]);
 
 	flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
@@ -2767,7 +2767,7 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
 	struct lu_fid *fid;
 
 	fid = &ll_i2info(inode)->lli_fid;
-	CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
+	CDEBUG(D_INFO, "trying to match res " DFID "\n", PFID(fid));
 
 	return md_lock_match(ll_i2mdexp(inode), flags | LDLM_FL_BLOCK_GRANTED,
 			     fid, LDLM_IBITS, &policy, mode, lockh);
@@ -2792,7 +2792,7 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc)
 			return 0;
 	} else if (rc != 0) {
 		CDEBUG_LIMIT((rc == -EACCES || rc == -EIDRM) ? D_INFO : D_ERROR,
-			     "%s: revalidate FID "DFID" error: rc = %d\n",
+			     "%s: revalidate FID " DFID " error: rc = %d\n",
 			     ll_get_fsname(inode->i_sb, NULL, 0),
 			     PFID(ll_inode2fid(inode)), rc);
 	}
@@ -2807,7 +2807,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
 	struct obd_export *exp;
 	int rc = 0;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%pd\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p),name=%pd\n",
 	       PFID(ll_inode2fid(inode)), inode, dentry);
 
 	exp = ll_i2mdexp(inode);
@@ -3067,7 +3067,7 @@ int ll_inode_permission(struct inode *inode, int mask)
 			return rc;
 	}
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), inode mode %x mask %o\n",
 	       PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
 
 	/* squash fsuid/fsgid if needed */
@@ -3114,7 +3114,7 @@ int ll_inode_permission(struct inode *inode, int mask)
 }
 
 /* -o localflock - only provides locally consistent flock locks */
-struct file_operations ll_file_operations = {
+const struct file_operations ll_file_operations = {
 	.read_iter = ll_file_read_iter,
 	.write_iter = ll_file_write_iter,
 	.unlocked_ioctl = ll_file_ioctl,
@@ -3127,7 +3127,7 @@ struct file_operations ll_file_operations = {
 	.flush	  = ll_flush
 };
 
-struct file_operations ll_file_operations_flock = {
+const struct file_operations ll_file_operations_flock = {
 	.read_iter    = ll_file_read_iter,
 	.write_iter   = ll_file_write_iter,
 	.unlocked_ioctl = ll_file_ioctl,
@@ -3143,7 +3143,7 @@ struct file_operations ll_file_operations_flock = {
 };
 
 /* These are for -o noflock - to return ENOSYS on flock calls */
-struct file_operations ll_file_operations_noflock = {
+const struct file_operations ll_file_operations_noflock = {
 	.read_iter    = ll_file_read_iter,
 	.write_iter   = ll_file_write_iter,
 	.unlocked_ioctl = ll_file_ioctl,
@@ -3322,7 +3322,7 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
 	int lmmsize;
 	int rc;
 
-	CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
+	CDEBUG(D_INODE, DFID " LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
 	       PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
 	       lock->l_lvb_data, lock->l_lvb_len);
 
@@ -3447,7 +3447,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
 
 	/* wait for IO to complete if it's still being used. */
 	if (wait_layout) {
-		CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
+		CDEBUG(D_INODE, "%s: " DFID "(%p) wait for layout reconf\n",
 		       ll_get_fsname(inode->i_sb, NULL, 0),
 		       PFID(&lli->lli_fid), inode);
 
@@ -3458,7 +3458,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
 		if (rc == 0)
 			rc = -EAGAIN;
 
-		CDEBUG(D_INODE, "%s: file="DFID" waiting layout return: %d.\n",
+		CDEBUG(D_INODE, "%s: file=" DFID " waiting layout return: %d.\n",
 		       ll_get_fsname(inode->i_sb, NULL, 0),
 		       PFID(&lli->lli_fid), rc);
 	}
@@ -3504,7 +3504,7 @@ static int ll_layout_refresh_locked(struct inode *inode)
 	it.it_op = IT_LAYOUT;
 	lockh.cookie = 0ULL;
 
-	LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
+	LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file " DFID "(%p)",
 			  ll_get_fsname(inode->i_sb, NULL, 0),
 			  PFID(&lli->lli_fid), inode);
 
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index 8af6110..96515b8 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -207,7 +207,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
 static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
 {
 	struct lu_object_header *header = obj->co_lu.lo_header;
-	wait_queue_t	   waiter;
+	wait_queue_entry_t	   waiter;
 
 	if (unlikely(atomic_read(&header->loh_ref) != 1)) {
 		struct lu_site *site = obj->co_lu.lo_dev->ld_site;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index d2a0fab..cd3311a 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -470,7 +470,7 @@ struct ll_sb_info {
 
 	struct ll_ra_info	 ll_ra_info;
 	unsigned int	      ll_namelen;
-	struct file_operations   *ll_fop;
+	const struct file_operations	*ll_fop;
 
 	unsigned int		  ll_md_brw_pages; /* readdir pages per RPC */
 
@@ -718,14 +718,14 @@ extern const struct inode_operations ll_special_inode_operations;
 struct inode *ll_iget(struct super_block *sb, ino_t hash,
 		      struct lustre_md *lic);
 int ll_test_inode_by_fid(struct inode *inode, void *opaque);
-int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
+int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 		       void *data, int flag);
 struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
 void ll_update_times(struct ptlrpc_request *request, struct inode *inode);
 
 /* llite/rw.c */
 int ll_writepage(struct page *page, struct writeback_control *wbc);
-int ll_writepages(struct address_space *, struct writeback_control *wbc);
+int ll_writepages(struct address_space *mapping, struct writeback_control *wbc);
 int ll_readpage(struct file *file, struct page *page);
 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
@@ -736,9 +736,9 @@ void ll_cl_remove(struct file *file, const struct lu_env *env);
 extern const struct address_space_operations ll_aops;
 
 /* llite/file.c */
-extern struct file_operations ll_file_operations;
-extern struct file_operations ll_file_operations_flock;
-extern struct file_operations ll_file_operations_noflock;
+extern const struct file_operations ll_file_operations;
+extern const struct file_operations ll_file_operations_flock;
+extern const struct file_operations ll_file_operations_noflock;
 extern const struct inode_operations ll_file_inode_operations;
 int ll_have_md_lock(struct inode *inode, __u64 *bits,
 		    enum ldlm_mode l_req_mode);
@@ -747,7 +747,7 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
 			       enum ldlm_mode mode);
 int ll_file_open(struct inode *inode, struct file *file);
 int ll_file_release(struct inode *inode, struct file *file);
-int ll_release_openhandle(struct inode *, struct lookup_intent *);
+int ll_release_openhandle(struct inode *inode, struct lookup_intent *it);
 int ll_md_real_close(struct inode *inode, fmode_t fmode);
 int ll_getattr(const struct path *path, struct kstat *stat,
 	       u32 request_mask, unsigned int flags);
@@ -778,9 +778,9 @@ int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss);
 /* llite/dcache.c */
 
 extern const struct dentry_operations ll_d_ops;
-void ll_intent_drop_lock(struct lookup_intent *);
-void ll_intent_release(struct lookup_intent *);
-void ll_invalidate_aliases(struct inode *);
+void ll_intent_drop_lock(struct lookup_intent *it);
+void ll_intent_release(struct lookup_intent *it);
+void ll_invalidate_aliases(struct inode *inode);
 void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode);
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
 			    struct lookup_intent *it, struct inode *inode);
@@ -811,7 +811,7 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data);
 int ll_show_options(struct seq_file *seq, struct dentry *dentry);
 void ll_dirty_page_discard_warn(struct page *page, int ioret);
 int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
-		  struct super_block *, struct lookup_intent *);
+		  struct super_block *sb, struct lookup_intent *it);
 int ll_obd_statfs(struct inode *inode, void __user *arg);
 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
@@ -1253,7 +1253,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
 		 */
 		if (it->it_remote_lock_mode) {
 			handle.cookie = it->it_remote_lock_handle;
-			CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for remote lock %#llx\n",
+			CDEBUG(D_DLMTRACE, "setting l_data to inode " DFID "%p for remote lock %#llx\n",
 			       PFID(ll_inode2fid(inode)), inode,
 			       handle.cookie);
 			md_set_lock_data(exp, &handle, inode, NULL);
@@ -1261,7 +1261,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
 
 		handle.cookie = it->it_lock_handle;
 
-		CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for lock %#llx\n",
+		CDEBUG(D_DLMTRACE, "setting l_data to inode " DFID "%p for lock %#llx\n",
 		       PFID(ll_inode2fid(inode)), inode, handle.cookie);
 
 		md_set_lock_data(exp, &handle, inode, &it->it_lock_bits);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index ca5040c..974a05d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -427,13 +427,13 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
 		goto out_lock_cn_cb;
 	}
 	if (!fid_is_sane(&sbi->ll_root_fid)) {
-		CERROR("%s: Invalid root fid "DFID" during mount\n",
+		CERROR("%s: Invalid root fid " DFID " during mount\n",
 		       sbi->ll_md_exp->exp_obd->obd_name,
 		       PFID(&sbi->ll_root_fid));
 		err = -EINVAL;
 		goto out_lock_cn_cb;
 	}
-	CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
+	CDEBUG(D_SUPER, "rootfid " DFID "\n", PFID(&sbi->ll_root_fid));
 
 	sb->s_op = &lustre_super_operations;
 	sb->s_xattr = ll_xattr_handlers;
@@ -1079,7 +1079,7 @@ static struct inode *ll_iget_anon_dir(struct super_block *sb,
 	ino = cl_fid_build_ino(fid, sbi->ll_flags & LL_SBI_32BIT_API);
 	inode = iget_locked(sb, ino);
 	if (!inode) {
-		CERROR("%s: failed get simple inode "DFID": rc = -ENOENT\n",
+		CERROR("%s: failed get simple inode " DFID ": rc = -ENOENT\n",
 		       ll_get_fsname(sb, NULL, 0), PFID(fid));
 		return ERR_PTR(-ENOENT);
 	}
@@ -1090,7 +1090,7 @@ static struct inode *ll_iget_anon_dir(struct super_block *sb,
 
 		inode->i_mode = (inode->i_mode & ~S_IFMT) |
 				(body->mbo_mode & S_IFMT);
-		LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode "DFID"\n",
+		LASSERTF(S_ISDIR(inode->i_mode), "Not slave inode " DFID "\n",
 			 PFID(fid));
 
 		LTIME_S(inode->i_mtime) = 0;
@@ -1106,7 +1106,7 @@ static struct inode *ll_iget_anon_dir(struct super_block *sb,
 		LASSERT(lsm);
 		/* master object FID */
 		lli->lli_pfid = body->mbo_fid1;
-		CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
+		CDEBUG(D_INODE, "lli %p slave " DFID " master " DFID "\n",
 		       lli, PFID(fid), PFID(&lli->lli_pfid));
 		unlock_new_inode(inode);
 	}
@@ -1174,7 +1174,7 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
 	int rc;
 
 	LASSERT(S_ISDIR(inode->i_mode));
-	CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
+	CDEBUG(D_INODE, "update lsm %p of " DFID "\n", lli->lli_lsm_md,
 	       PFID(ll_inode2fid(inode)));
 
 	/* no striped information from request. */
@@ -1187,7 +1187,7 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
 			 * migration is done, the temporay MIGRATE layout has
 			 * been removed
 			 */
-			CDEBUG(D_INODE, DFID" finish migration.\n",
+			CDEBUG(D_INODE, DFID " finish migration.\n",
 			       PFID(ll_inode2fid(inode)));
 			lmv_free_memmd(lli->lli_lsm_md);
 			lli->lli_lsm_md = NULL;
@@ -1241,7 +1241,7 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
 
 		kfree(attr);
 
-		CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
+		CDEBUG(D_INODE, "Set lsm %p magic %x to " DFID "\n", lsm,
 		       lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
 		return 0;
 	}
@@ -1251,7 +1251,7 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
 		struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
 		int idx;
 
-		CERROR("%s: inode "DFID"(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
+		CERROR("%s: inode " DFID "(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
 		       ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
 		       inode, lsm, old_lsm,
 		       lsm->lsm_md_magic, old_lsm->lsm_md_magic,
@@ -1266,13 +1266,13 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
 		       old_lsm->lsm_md_pool_name);
 
 		for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
-			CERROR("%s: sub FIDs in old lsm idx %d, old: "DFID"\n",
+			CERROR("%s: sub FIDs in old lsm idx %d, old: " DFID "\n",
 			       ll_get_fsname(inode->i_sb, NULL, 0), idx,
 			       PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
 		}
 
 		for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
-			CERROR("%s: sub FIDs in new lsm idx %d, new: "DFID"\n",
+			CERROR("%s: sub FIDs in new lsm idx %d, new: " DFID "\n",
 			       ll_get_fsname(inode->i_sb, NULL, 0), idx,
 			       PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
 		}
@@ -1288,7 +1288,7 @@ void ll_clear_inode(struct inode *inode)
 	struct ll_inode_info *lli = ll_i2info(inode);
 	struct ll_sb_info *sbi = ll_i2sbi(inode);
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
 	       PFID(ll_inode2fid(inode)), inode);
 
 	if (S_ISDIR(inode->i_mode)) {
@@ -1421,7 +1421,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
 	struct md_op_data *op_data = NULL;
 	int rc = 0;
 
-	CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
+	CDEBUG(D_VFSTRACE, "%s: setattr inode " DFID "(%p) from %llu to %llu, valid %x, hsm_import %d\n",
 	       ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
 	       i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
 
@@ -1436,7 +1436,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
 		 * needs another check in addition to the VFS check above.
 		 */
 		if (attr->ia_size > ll_file_maxbytes(inode)) {
-			CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
+			CDEBUG(D_INODE, "file " DFID " too large %llu > %llu\n",
 			       PFID(&lli->lli_fid), attr->ia_size,
 			       ll_file_maxbytes(inode));
 			return -EFBIG;
@@ -1785,7 +1785,7 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
 		/* FID shouldn't be changed! */
 		if (fid_is_sane(&lli->lli_fid)) {
 			LASSERTF(lu_fid_eq(&lli->lli_fid, &body->mbo_fid1),
-				 "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
+				 "Trying to change FID " DFID " to the " DFID ", inode " DFID "(%p)\n",
 				 PFID(&lli->lli_fid), PFID(&body->mbo_fid1),
 				 PFID(ll_inode2fid(inode)), inode);
 		} else {
@@ -1820,7 +1820,7 @@ int ll_read_inode2(struct inode *inode, void *opaque)
 	struct ll_inode_info *lli = ll_i2info(inode);
 	int rc;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
 	       PFID(&lli->lli_fid), inode);
 
 	/* Core attributes from the MDS first.  This is a new inode, and
@@ -1902,7 +1902,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
 		rc = md_getattr(sbi->ll_md_exp, op_data, &req);
 		ll_finish_md_op_data(op_data);
 		if (rc) {
-			CERROR("%s: failure inode "DFID": rc = %d\n",
+			CERROR("%s: failure inode " DFID ": rc = %d\n",
 			       sbi->ll_md_exp->exp_obd->obd_name,
 			       PFID(ll_inode2fid(inode)), rc);
 			return -abs(rc);
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index cbbfdaf..ccc7ae1 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -378,7 +378,7 @@ static int ll_page_mkwrite(struct vm_fault *vmf)
 		if (!printed && ++count > 16) {
 			const struct dentry *de = vma->vm_file->f_path.dentry;
 
-			CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
+			CWARN("app(%s): the page %lu of file " DFID " is under heavy contention\n",
 			      current->comm, vmf->pgoff,
 			      PFID(ll_inode2fid(de->d_inode)));
 			printed = true;
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 49a930f..e50c637 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -84,7 +84,7 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
 	struct  md_op_data    *op_data;
 	int		   rc;
 
-	CDEBUG(D_INFO, "searching inode for:(%lu,"DFID")\n", hash, PFID(fid));
+	CDEBUG(D_INFO, "searching inode for:(%lu," DFID ")\n", hash, PFID(fid));
 
 	inode = ilookup5(sb, hash, ll_test_inode_by_fid, (void *)fid);
 	if (inode)
@@ -109,7 +109,7 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
 	rc = md_getattr(sbi->ll_md_exp, op_data, &req);
 	kfree(op_data);
 	if (rc) {
-		CDEBUG(D_INFO, "can't get object attrs, fid "DFID", rc %d\n",
+		CDEBUG(D_INFO, "can't get object attrs, fid " DFID ", rc %d\n",
 		       PFID(fid), rc);
 		return ERR_PTR(rc);
 	}
@@ -195,7 +195,7 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
 	int fileid_len = sizeof(struct lustre_nfs_fid) / 4;
 	struct lustre_nfs_fid *nfs_fid = (void *)fh;
 
-	CDEBUG(D_INFO, "%s: encoding for ("DFID") maxlen=%d minlen=%d\n",
+	CDEBUG(D_INFO, "%s: encoding for (" DFID ") maxlen=%d minlen=%d\n",
 	       ll_get_fsname(inode->i_sb, NULL, 0),
 	       PFID(ll_inode2fid(inode)), *plen, fileid_len);
 
@@ -312,7 +312,7 @@ int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid)
 
 	sbi = ll_s2sbi(dir->i_sb);
 
-	CDEBUG(D_INFO, "%s: getting parent for ("DFID")\n",
+	CDEBUG(D_INFO, "%s: getting parent for (" DFID ")\n",
 	       ll_get_fsname(dir->i_sb, NULL, 0),
 	       PFID(ll_inode2fid(dir)));
 
@@ -329,7 +329,7 @@ int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid)
 	rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
 	ll_finish_md_op_data(op_data);
 	if (rc) {
-		CERROR("%s: failure inode "DFID" get parent: rc = %d\n",
+		CERROR("%s: failure inode " DFID " get parent: rc = %d\n",
 		       ll_get_fsname(dir->i_sb, NULL, 0),
 		       PFID(ll_inode2fid(dir)), rc);
 		return rc;
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index c742cba..aeae667 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -39,9 +39,9 @@
 #include "vvp_internal.h"
 
 /* debugfs llite mount point registration */
-static struct file_operations ll_rw_extents_stats_fops;
-static struct file_operations ll_rw_extents_stats_pp_fops;
-static struct file_operations ll_rw_offset_stats_fops;
+static const struct file_operations ll_rw_extents_stats_fops;
+static const struct file_operations ll_rw_extents_stats_pp_fops;
+static const struct file_operations ll_rw_offset_stats_fops;
 
 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
 			      char *buf)
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index d583696..a208a8b 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -86,7 +86,7 @@ static int ll_set_inode(struct inode *inode, void *opaque)
 
 	inode->i_mode = (inode->i_mode & ~S_IFMT) | (body->mbo_mode & S_IFMT);
 	if (unlikely(inode->i_mode == 0)) {
-		CERROR("Invalid inode "DFID" type\n", PFID(&lli->lli_fid));
+		CERROR("Invalid inode " DFID " type\n", PFID(&lli->lli_fid));
 		return -EINVAL;
 	}
 
@@ -134,7 +134,7 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
 		}
 	} else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
 		rc = ll_update_inode(inode, md);
-		CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p): rc = %d\n",
+		CDEBUG(D_VFSTRACE, "got inode: " DFID "(%p): rc = %d\n",
 		       PFID(&md->body->mbo_fid1), inode, rc);
 		if (rc) {
 			if (S_ISDIR(inode->i_mode))
@@ -205,7 +205,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 
 		if (!fid_res_name_eq(ll_inode2fid(inode),
 				     &lock->l_resource->lr_name)) {
-			LDLM_ERROR(lock, "data mismatch with object "DFID"(%p)",
+			LDLM_ERROR(lock, "data mismatch with object " DFID "(%p)",
 				   PFID(ll_inode2fid(inode)), inode);
 			LBUG();
 		}
@@ -257,7 +257,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 			rc = ll_layout_conf(inode, &conf);
 			if (rc < 0)
 				CDEBUG(D_INODE, "cannot invalidate layout of "
-				       DFID": rc = %d\n",
+				       DFID ": rc = %d\n",
 				       PFID(ll_inode2fid(inode)), rc);
 		}
 
@@ -274,7 +274,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 		if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
 			struct ll_inode_info *lli = ll_i2info(inode);
 
-			CDEBUG(D_INODE, "invalidating inode "DFID" lli = %p, pfid  = "DFID"\n",
+			CDEBUG(D_INODE, "invalidating inode " DFID " lli = %p, pfid  = " DFID "\n",
 			       PFID(ll_inode2fid(inode)), lli,
 			       PFID(&lli->lli_pfid));
 
@@ -290,7 +290,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 				 * we have to invalidate the negative children
 				 * on master inode
 				 */
-				CDEBUG(D_INODE, "Invalidate s"DFID" m"DFID"\n",
+				CDEBUG(D_INODE, "Invalidate s" DFID " m" DFID "\n",
 				       PFID(ll_inode2fid(inode)),
 				       PFID(&lli->lli_pfid));
 
@@ -542,7 +542,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
 	if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
 		return ERR_PTR(-ENAMETOOLONG);
 
-	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),intent=%s\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),intent=%s\n",
 	       dentry, PFID(ll_inode2fid(parent)), parent, LL_IT2STR(it));
 
 	if (d_mountpoint(dentry))
@@ -650,7 +650,7 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
 	struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
 	struct dentry *de;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),flags=%u\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),flags=%u\n",
 	       dentry, PFID(ll_inode2fid(parent)), parent, flags);
 
 	/* Optimize away (CREATE && !OPEN). Let .create handle the race.
@@ -678,14 +678,14 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
  * together.
  */
 static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
-			  struct file *file, unsigned open_flags,
+			  struct file *file, unsigned int open_flags,
 			  umode_t mode, int *opened)
 {
 	struct lookup_intent *it;
 	struct dentry *de;
 	int rc = 0;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),file %p,open_flags %x,mode %x opened %d\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),file %p,open_flags %x,mode %x opened %d\n",
 	       dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
 	       *opened);
 
@@ -792,7 +792,7 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
 	 * lock on the inode.  Since we finally have an inode pointer,
 	 * stuff it in the lock.
 	 */
-	CDEBUG(D_DLMTRACE, "setting l_ast_data to inode "DFID"(%p)\n",
+	CDEBUG(D_DLMTRACE, "setting l_ast_data to inode " DFID "(%p)\n",
 	       PFID(ll_inode2fid(dir)), inode);
 	ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
  out:
@@ -820,7 +820,7 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry,
 	struct inode *inode;
 	int rc = 0;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), intent=%s\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p), intent=%s\n",
 	       dentry, PFID(ll_inode2fid(dir)), dir, LL_IT2STR(it));
 
 	rc = it_open_error(DISP_OPEN_CREATE, it);
@@ -844,7 +844,7 @@ void ll_update_times(struct ptlrpc_request *request, struct inode *inode)
 	LASSERT(body);
 	if (body->mbo_valid & OBD_MD_FLMTIME &&
 	    body->mbo_mtime > LTIME_S(inode->i_mtime)) {
-		CDEBUG(D_INODE, "setting fid "DFID" mtime from %lu to %llu\n",
+		CDEBUG(D_INODE, "setting fid " DFID " mtime from %lu to %llu\n",
 		       PFID(ll_inode2fid(inode)), LTIME_S(inode->i_mtime),
 		       body->mbo_mtime);
 		LTIME_S(inode->i_mtime) = body->mbo_mtime;
@@ -942,7 +942,7 @@ static int ll_mknod(struct inode *dir, struct dentry *dchild,
 {
 	int err;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p) mode %o dev %x\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p) mode %o dev %x\n",
 	       dchild, PFID(ll_inode2fid(dir)), dir, mode,
 	       old_encode_dev(rdev));
 
@@ -982,7 +982,7 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
 {
 	int rc;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), flags=%u, excl=%d\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p), flags=%u, excl=%d\n",
 	       dentry, PFID(ll_inode2fid(dir)), dir, mode, want_excl);
 
 	rc = ll_mknod(dir, dentry, mode, 0);
@@ -1032,7 +1032,7 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int err;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir"DFID"(%p)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir" DFID "(%p)\n",
 	       dentry, PFID(ll_inode2fid(dir)), dir);
 
 	if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
@@ -1052,7 +1052,7 @@ static int ll_rmdir(struct inode *dir, struct dentry *dchild)
 	struct md_op_data *op_data;
 	int rc;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p)\n",
 	       dchild, PFID(ll_inode2fid(dir)), dir);
 
 	op_data = ll_prep_md_op_data(NULL, dir, NULL,
@@ -1082,7 +1082,7 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry,
 {
 	int err;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),target=%.*s\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p),target=%.*s\n",
 	       dentry, PFID(ll_inode2fid(dir)), dir, 3000, oldname);
 
 	err = ll_new_node(dir, dentry, oldname, S_IFLNK | 0777,
@@ -1103,7 +1103,7 @@ static int ll_link(struct dentry *old_dentry, struct inode *dir,
 	struct md_op_data *op_data;
 	int err;
 
-	CDEBUG(D_VFSTRACE, "VFS Op: inode="DFID"(%p), dir="DFID"(%p), target=%pd\n",
+	CDEBUG(D_VFSTRACE, "VFS Op: inode=" DFID "(%p), dir=" DFID "(%p), target=%pd\n",
 	       PFID(ll_inode2fid(src)), src, PFID(ll_inode2fid(dir)), dir,
 	       new_dentry);
 
@@ -1138,7 +1138,7 @@ static int ll_rename(struct inode *src, struct dentry *src_dchild,
 		return -EINVAL;
 
 	CDEBUG(D_VFSTRACE,
-	       "VFS Op:oldname=%pd, src_dir="DFID"(%p), newname=%pd, tgt_dir="DFID"(%p)\n",
+	       "VFS Op:oldname=%pd, src_dir=" DFID "(%p), newname=%pd, tgt_dir=" DFID "(%p)\n",
 	       src_dchild, PFID(ll_inode2fid(src)), src,
 	       tgt_dchild, PFID(ll_inode2fid(tgt)), tgt);
 
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 420f296..3619cd8 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -327,7 +327,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
 	if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
 		return -EINVAL;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
 	       PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
 	       file_offset, file_offset, count >> PAGE_SHIFT,
 	       MAX_DIO_SIZE >> PAGE_SHIFT);
@@ -547,7 +547,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
 }
 
 static int ll_write_end(struct file *file, struct address_space *mapping,
-			loff_t pos, unsigned len, unsigned copied,
+			loff_t pos, unsigned int len, unsigned int copied,
 			struct page *vmpage, void *fsdata)
 {
 	struct ll_cl_context *lcc = fsdata;
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index fb7c315..9bbca01 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -528,7 +528,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
 	}
 
 	CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
-	       DFID", idx = %llu\n", PFID(&lli->lli_fid), index);
+	       DFID ", idx = %llu\n", PFID(&lli->lli_fid), index);
 
 	cl_agl(inode);
 	lli->lli_agl_index = 0;
@@ -536,7 +536,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
 	up_write(&lli->lli_glimpse_sem);
 
 	CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
-	       DFID", idx = %llu, rc = %d\n",
+	       DFID ", idx = %llu, rc = %d\n",
 	       PFID(&lli->lli_fid), index, rc);
 
 	iput(inode);
@@ -1008,7 +1008,7 @@ static int ll_statahead_thread(void *arg)
 		sai->sai_in_readpage = 0;
 		if (IS_ERR(page)) {
 			rc = PTR_ERR(page);
-			CDEBUG(D_READA, "error reading dir "DFID" at %llu/%llu: opendir_pid = %u: rc = %d\n",
+			CDEBUG(D_READA, "error reading dir " DFID " at %llu/%llu: opendir_pid = %u: rc = %d\n",
 			       PFID(ll_inode2fid(dir)), pos, sai->sai_index,
 			       lli->lli_opendir_pid, rc);
 			break;
@@ -1105,7 +1105,7 @@ static int ll_statahead_thread(void *arg)
 		if (sa_low_hit(sai)) {
 			rc = -EFAULT;
 			atomic_inc(&sbi->ll_sa_wrong);
-			CDEBUG(D_READA, "Statahead for dir "DFID" hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread: pid %d\n",
+			CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread: pid %d\n",
 			       PFID(&lli->lli_fid), sai->sai_hit,
 			       sai->sai_miss, sai->sai_sent,
 			       sai->sai_replied, current_pid());
@@ -1211,7 +1211,7 @@ void ll_deauthorize_statahead(struct inode *dir, void *key)
 	LASSERT(lli->lli_opendir_key == key);
 	LASSERT(lli->lli_opendir_pid);
 
-	CDEBUG(D_READA, "deauthorize statahead for "DFID"\n",
+	CDEBUG(D_READA, "deauthorize statahead for " DFID "\n",
 	       PFID(&lli->lli_fid));
 
 	spin_lock(&lli->lli_sa_lock);
@@ -1274,7 +1274,7 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry)
 			struct ll_inode_info *lli = ll_i2info(dir);
 
 			rc = PTR_ERR(page);
-			CERROR("%s: error reading dir "DFID" at %llu: opendir_pid = %u : rc = %d\n",
+			CERROR("%s: error reading dir " DFID " at %llu: opendir_pid = %u : rc = %d\n",
 			       ll_get_fsname(dir->i_sb, NULL, 0),
 			       PFID(ll_inode2fid(dir)), pos,
 			       lli->lli_opendir_pid, rc);
@@ -1471,7 +1471,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
 			} else if ((*dentryp)->d_inode != inode) {
 				/* revalidate, but inode is recreated */
 				CDEBUG(D_READA,
-				       "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
+				       "%s: stale dentry %pd inode " DFID ", statahead inode " DFID "\n",
 				       ll_get_fsname((*dentryp)->d_inode->i_sb,
 						     NULL, 0),
 				       *dentryp,
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 60aac42..3cd3348 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -72,7 +72,7 @@ static int ll_readlink_internal(struct inode *inode,
 	ll_finish_md_op_data(op_data);
 	if (rc) {
 		if (rc != -ENOENT)
-			CERROR("%s: inode "DFID": rc = %d\n",
+			CERROR("%s: inode " DFID ": rc = %d\n",
 			       ll_get_fsname(inode->i_sb, NULL, 0),
 			       PFID(ll_inode2fid(inode)), rc);
 		goto failed;
@@ -87,7 +87,7 @@ static int ll_readlink_internal(struct inode *inode,
 
 	LASSERT(symlen != 0);
 	if (body->mbo_eadatasize != symlen) {
-		CERROR("%s: inode "DFID": symlink length %d not expected %d\n",
+		CERROR("%s: inode " DFID ": symlink length %d not expected %d\n",
 		       ll_get_fsname(inode->i_sb, NULL, 0),
 		       PFID(ll_inode2fid(inode)), body->mbo_eadatasize - 1,
 		       symlen - 1);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 6cb2db2..8e45672 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -381,11 +381,11 @@ int cl_sb_fini(struct super_block *sb)
 #define PGC_DEPTH_SHIFT (32)
 
 struct vvp_pgcache_id {
-	unsigned		 vpi_bucket;
-	unsigned		 vpi_depth;
+	unsigned int		 vpi_bucket;
+	unsigned int		 vpi_depth;
 	uint32_t		 vpi_index;
 
-	unsigned		 vpi_curdep;
+	unsigned int		 vpi_curdep;
 	struct lu_object_header *vpi_obj;
 };
 
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index aa31bc0..c5ba265 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -325,7 +325,7 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
 		io->ci_need_restart = vio->vui_layout_gen != gen;
 		if (io->ci_need_restart) {
 			CDEBUG(D_VFSTRACE,
-			       DFID" layout changed from %d to %d.\n",
+			       DFID " layout changed from %d to %d.\n",
 			       PFID(lu_object_fid(&obj->co_lu)),
 			       vio->vui_layout_gen, gen);
 			/* today successful restore is the only possible case */
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 8e18cf8..9bfd72e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -70,7 +70,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
 	     atomic_read(&obj->vob_mmap_cnt), inode);
 	if (inode) {
 		lli = ll_i2info(inode);
-		(*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
+		(*p)(env, cookie, "%lu/%u %o %u %d %p " DFID,
 		     inode->i_ino, inode->i_generation, inode->i_mode,
 		     inode->i_nlink, atomic_read(&inode->i_count),
 		     lli->lli_clob, PFID(&lli->lli_fid));
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 6187bff..bd30abd 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -195,7 +195,7 @@ static int ll_xattr_set(const struct xattr_handler *handler,
 	LASSERT(inode);
 	LASSERT(name);
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n",
 	       PFID(ll_inode2fid(inode)), inode, name);
 
 	if (!strcmp(name, "lov")) {
@@ -370,7 +370,7 @@ static int ll_xattr_get_common(const struct xattr_handler *handler,
 #endif
 	int rc;
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
 	       PFID(ll_inode2fid(inode)), inode);
 
 	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
@@ -523,7 +523,7 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
 
 	LASSERT(inode);
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p)\n",
 	       PFID(ll_inode2fid(inode)), inode);
 
 	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 38f75f6a..82cf421 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -311,7 +311,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
 
 	if (rc < 0) {
 		CDEBUG(D_CACHE,
-		       "md_intent_lock failed with %d for fid "DFID"\n",
+		       "md_intent_lock failed with %d for fid " DFID "\n",
 		       rc, PFID(ll_inode2fid(inode)));
 		mutex_unlock(&lli->lli_xattrs_enq_lock);
 		return rc;
@@ -365,7 +365,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
 	}
 
 	if (oit->it_status < 0) {
-		CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
+		CDEBUG(D_CACHE, "getxattr intent returned %d for fid " DFID "\n",
 		       oit->it_status, PFID(ll_inode2fid(inode)));
 		rc = oit->it_status;
 		/* xattr data is so large that we don't want to cache it */
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index a5265f9..6f8070f 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -70,7 +70,7 @@ int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds)
 		return rc;
 	}
 
-	CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
+	CDEBUG(D_INODE, "FLD lookup got mds #%x for fid=" DFID "\n",
 	       *mds, PFID(fid));
 
 	if (*mds >= lmv->desc.ld_tgt_count) {
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index aa42066..f49db6c 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -213,7 +213,7 @@ int lmv_revalidate_slaves(struct obd_export *exp,
 		lockh = (struct lustre_handle *)&it.it_lock_handle;
 		if (rc > 0 && !req) {
 			/* slave inode is still valid */
-			CDEBUG(D_INODE, "slave "DFID" is still valid.\n",
+			CDEBUG(D_INODE, "slave " DFID " is still valid.\n",
 			       PFID(&fid));
 			rc = 0;
 		} else {
@@ -435,7 +435,7 @@ static int lmv_intent_lookup(struct obd_export *exp,
 			if (IS_ERR(tgt))
 				return PTR_ERR(tgt);
 
-			CDEBUG(D_INODE, "Try other stripes " DFID"\n",
+			CDEBUG(D_INODE, "Try other stripes " DFID "\n",
 			       PFID(&oinfo->lmo_fid));
 
 			op_data->op_fid1 = oinfo->lmo_fid;
@@ -479,7 +479,7 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
 
 	LASSERT(fid_is_sane(&op_data->op_fid1));
 
-	CDEBUG(D_INODE, "INTENT LOCK '%s' for "DFID" '%*s' on "DFID"\n",
+	CDEBUG(D_INODE, "INTENT LOCK '%s' for " DFID " '%*s' on " DFID "\n",
 	       LL_IT2STR(it), PFID(&op_data->op_fid2),
 	       (int)op_data->op_namelen, op_data->op_name,
 	       PFID(&op_data->op_fid1));
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 7325951..64fcaef 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -75,7 +75,7 @@ static void lmv_activate_target(struct lmv_obd *lmv,
 static int lmv_set_mdc_active(struct lmv_obd *lmv, const struct obd_uuid *uuid,
 			      int activate)
 {
-	struct lmv_tgt_desc    *uninitialized_var(tgt);
+	struct lmv_tgt_desc *tgt = NULL;
 	struct obd_device      *obd;
 	u32		     i;
 	int		     rc = 0;
@@ -673,7 +673,7 @@ static int lmv_fid2path(struct obd_export *exp, int len, void *karg,
 		*ptr = '/';
 	}
 
-	CDEBUG(D_INFO, "%s: get path %s "DFID" rec: %llu ln: %u\n",
+	CDEBUG(D_INFO, "%s: get path %s " DFID " rec: %llu ln: %u\n",
 	       tgt->ltd_exp->exp_obd->obd_name,
 	       gf->gf_path, PFID(&gf->gf_fid), gf->gf_recno,
 	       gf->gf_linkno);
@@ -693,7 +693,7 @@ static int lmv_fid2path(struct obd_export *exp, int len, void *karg,
 	}
 
 	if (!fid_is_sane(&gf->gf_fid)) {
-		CERROR("%s: invalid FID "DFID": rc = %d\n",
+		CERROR("%s: invalid FID " DFID ": rc = %d\n",
 		       tgt->ltd_exp->exp_obd->obd_name,
 		       PFID(&gf->gf_fid), -EINVAL);
 		rc = -EINVAL;
@@ -1508,7 +1508,7 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
 	if (rc)
 		return rc;
 
-	CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
+	CDEBUG(D_INODE, "CBDATA for " DFID "\n", PFID(fid));
 
 	/*
 	 * With DNE every object can have two locks in different namespaces:
@@ -1540,7 +1540,7 @@ static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
 	if (IS_ERR(tgt))
 		return PTR_ERR(tgt);
 
-	CDEBUG(D_INODE, "CLOSE "DFID"\n", PFID(&op_data->op_fid1));
+	CDEBUG(D_INODE, "CLOSE " DFID "\n", PFID(&op_data->op_fid1));
 	return md_close(tgt->ltd_exp, op_data, mod, request);
 }
 
@@ -1672,7 +1672,7 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
 	if (IS_ERR(tgt))
 		return PTR_ERR(tgt);
 
-	CDEBUG(D_INODE, "CREATE name '%.*s' on "DFID" -> mds #%x\n",
+	CDEBUG(D_INODE, "CREATE name '%.*s' on " DFID " -> mds #%x\n",
 	       (int)op_data->op_namelen, op_data->op_name,
 	       PFID(&op_data->op_fid1), op_data->op_mds);
 
@@ -1694,7 +1694,7 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
 		CDEBUG(D_CONFIG, "Server doesn't support striped dirs\n");
 	}
 
-	CDEBUG(D_INODE, "CREATE obj "DFID" -> mds #%x\n",
+	CDEBUG(D_INODE, "CREATE obj " DFID " -> mds #%x\n",
 	       PFID(&op_data->op_fid1), op_data->op_mds);
 
 	op_data->op_flags |= MF_MDC_CANCEL_FID1;
@@ -1704,7 +1704,7 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
 	if (rc == 0) {
 		if (!*request)
 			return rc;
-		CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
+		CDEBUG(D_INODE, "Created - " DFID "\n", PFID(&op_data->op_fid2));
 	}
 	return rc;
 }
@@ -1724,7 +1724,7 @@ lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
 	if (rc)
 		return rc;
 
-	CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n",
+	CDEBUG(D_INODE, "ENQUEUE '%s' on " DFID "\n",
 	       LL_IT2STR(it), PFID(&op_data->op_fid1));
 
 	tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
@@ -1769,7 +1769,7 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
 	if (body->mbo_valid & OBD_MD_MDS) {
 		struct lu_fid rid = body->mbo_fid1;
 
-		CDEBUG(D_INODE, "Request attrs for "DFID"\n",
+		CDEBUG(D_INODE, "Request attrs for " DFID "\n",
 		       PFID(&rid));
 
 		tgt = lmv_find_target(lmv, &rid);
@@ -1818,13 +1818,13 @@ static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
 	}
 
 	if (tgt->ltd_idx != op_tgt) {
-		CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
+		CDEBUG(D_INODE, "EARLY_CANCEL on " DFID "\n", PFID(fid));
 		policy.l_inodebits.bits = bits;
 		rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
 				      mode, LCF_ASYNC, NULL);
 	} else {
 		CDEBUG(D_INODE,
-		       "EARLY_CANCEL skip operation target %d on "DFID"\n",
+		       "EARLY_CANCEL skip operation target %d on " DFID "\n",
 		       op_tgt, PFID(fid));
 		op_data->op_flags |= flag;
 		rc = 0;
@@ -1851,7 +1851,7 @@ static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
 
 	LASSERT(op_data->op_namelen != 0);
 
-	CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n",
+	CDEBUG(D_INODE, "LINK " DFID ":%*s to " DFID "\n",
 	       PFID(&op_data->op_fid2), (int)op_data->op_namelen,
 	       op_data->op_name, PFID(&op_data->op_fid1));
 
@@ -1901,7 +1901,7 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
 
 	LASSERT(oldlen != 0);
 
-	CDEBUG(D_INODE, "RENAME %.*s in "DFID":%d to %.*s in "DFID":%d\n",
+	CDEBUG(D_INODE, "RENAME %.*s in " DFID ":%d to %.*s in " DFID ":%d\n",
 	       (int)oldlen, old, PFID(&op_data->op_fid1),
 	       op_data->op_mea1 ? op_data->op_mea1->lsm_md_stripe_count : 0,
 	       (int)newlen, new, PFID(&op_data->op_fid2),
@@ -1916,7 +1916,7 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
 	op_data->op_cap = cfs_curproc_cap_pack();
 
 	if (op_data->op_cli_flags & CLI_MIGRATE) {
-		LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID "DFID"\n",
+		LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID " DFID "\n",
 			 PFID(&op_data->op_fid3));
 
 		if (op_data->op_mea1) {
@@ -2069,7 +2069,7 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
 	if (rc)
 		return rc;
 
-	CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x\n",
+	CDEBUG(D_INODE, "SETATTR for " DFID ", valid 0x%x\n",
 	       PFID(&op_data->op_fid1), op_data->op_attr.ia_valid);
 
 	op_data->op_flags |= MF_MDC_CANCEL_FID1;
@@ -2274,6 +2274,7 @@ static int lmv_read_striped_page(struct obd_export *exp,
 	struct lu_fid master_fid = op_data->op_fid1;
 	struct obd_device *obd = exp->exp_obd;
 	__u64 hash_offset = offset;
+	__u32 ldp_flags;
 	struct page *min_ent_page = NULL;
 	struct page *ent_page = NULL;
 	struct lu_dirent *min_ent = NULL;
@@ -2301,7 +2302,7 @@ static int lmv_read_striped_page(struct obd_export *exp,
 	dp = kmap(ent_page);
 	memset(dp, 0, sizeof(*dp));
 	dp->ldp_hash_start = cpu_to_le64(offset);
-	dp->ldp_flags |= LDF_COLLIDE;
+	ldp_flags = LDF_COLLIDE;
 
 	area = dp + 1;
 	left_bytes = PAGE_SIZE - sizeof(*dp);
@@ -2379,8 +2380,8 @@ static int lmv_read_striped_page(struct obd_export *exp,
 		ent_page = NULL;
 	} else {
 		if (ent == area)
-			dp->ldp_flags |= LDF_EMPTY;
-		dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
+			ldp_flags |= LDF_EMPTY;
+		dp->ldp_flags |= cpu_to_le32(ldp_flags);
 		dp->ldp_hash_end = cpu_to_le64(hash_offset);
 	}
 
@@ -2413,9 +2414,8 @@ static int lmv_read_page(struct obd_export *exp, struct md_op_data *op_data,
 	if (rc)
 		return rc;
 
-	if (unlikely(lsm)) {
+	if (unlikely(lsm))
 		return lmv_read_striped_page(exp, op_data, cb_op, offset, ppage);
-	}
 
 	tgt = lmv_find_target(lmv, &op_data->op_fid1);
 	if (IS_ERR(tgt))
@@ -2577,7 +2577,7 @@ static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
 	if (likely(!(body->mbo_valid & OBD_MD_MDS)))
 		return rc;
 
-	CDEBUG(D_INODE, "%s: try unlink to another MDT for "DFID"\n",
+	CDEBUG(D_INODE, "%s: try unlink to another MDT for " DFID "\n",
 	       exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
 
 	/* This is a remote object, try remote MDT, Note: it may
@@ -2781,7 +2781,7 @@ static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
 				    &lsm->lsm_md_oinfo[i].lmo_mds);
 		if (rc)
 			return rc;
-		CDEBUG(D_INFO, "unpack fid #%d "DFID"\n", i,
+		CDEBUG(D_INFO, "unpack fid #%d " DFID "\n", i,
 		       PFID(&lsm->lsm_md_oinfo[i].lmo_fid));
 	}
 
@@ -2925,7 +2925,7 @@ static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
 	int tgt;
 	u32 i;
 
-	CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
+	CDEBUG(D_INODE, "Lock match for " DFID "\n", PFID(fid));
 
 	/*
 	 * With DNE every object can have two locks in different namespaces:
@@ -2937,7 +2937,7 @@ static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
 	     i < lmv->desc.ld_tgt_count;
 	     i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
 		if (tgt < 0) {
-			CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n",
+			CDEBUG(D_HA, "%s: " DFID " is inaccessible: rc = %d\n",
 			       obd->obd_name, PFID(fid), tgt);
 			tgt = 0;
 		}
@@ -3107,9 +3107,8 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
 		return -EIO;
 	}
 
-	if (oqctl->qc_cmd != Q_GETOQUOTA) {
+	if (oqctl->qc_cmd != Q_GETOQUOTA)
 		return obd_quotactl(tgt->ltd_exp, oqctl);
-	}
 
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
 		int err;
@@ -3149,7 +3148,7 @@ static int lmv_merge_attr(struct obd_export *exp,
 	for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
 		struct inode *inode = lsm->lsm_md_oinfo[i].lmo_root;
 
-		CDEBUG(D_INFO, ""DFID" size %llu, blocks %llu nlink %u, atime %lu ctime %lu, mtime %lu.\n",
+		CDEBUG(D_INFO, "" DFID " size %llu, blocks %llu nlink %u, atime %lu ctime %lu, mtime %lu.\n",
 		       PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
 		       i_size_read(inode), (unsigned long long)inode->i_blocks,
 		       inode->i_nlink, LTIME_S(inode->i_atime),
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 391c632..e889d3a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -370,7 +370,7 @@ struct lov_thread_info {
 	struct ost_lvb	  lti_lvb;
 	struct cl_2queue	lti_cl2q;
 	struct cl_page_list     lti_plist;
-	wait_queue_t	  lti_waiter;
+	wait_queue_entry_t	  lti_waiter;
 	struct cl_attr          lti_attr;
 };
 
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index df77b25..babf39a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -1021,7 +1021,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
 		break;
 	case CIT_FAULT:
 		result = -EFAULT;
-		CERROR("Page fault on a file without stripes: "DFID"\n",
+		CERROR("Page fault on a file without stripes: " DFID "\n",
 		       PFID(lu_object_fid(&obj->co_lu)));
 		break;
 	}
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 391dfd2..034b4fc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -57,7 +57,7 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
 	assert_spin_locked(&lsm->lsm_lock);
 	LASSERT(lsm->lsm_lock_owner == current_pid());
 
-	CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
+	CDEBUG(D_INODE, "MDT ID " DOSTID " initial value: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
 	       POSTID(&lsm->lsm_oi), lvb->lvb_size, lvb->lvb_mtime,
 	       lvb->lvb_atime, lvb->lvb_ctime, lvb->lvb_blocks);
 	for (i = 0; i < lsm->lsm_stripe_count; i++) {
@@ -89,7 +89,7 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
 		if (loi->loi_lvb.lvb_ctime > current_ctime)
 			current_ctime = loi->loi_lvb.lvb_ctime;
 
-		CDEBUG(D_INODE, "MDT ID "DOSTID" on OST[%u]: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
+		CDEBUG(D_INODE, "MDT ID " DOSTID " on OST[%u]: s=%llu m=%llu a=%llu c=%llu b=%llu\n",
 		       POSTID(&lsm->lsm_oi), loi->loi_ost_idx,
 		       loi->loi_lvb.lvb_size, loi->loi_lvb.lvb_mtime,
 		       loi->loi_lvb.lvb_atime, loi->loi_lvb.lvb_ctime,
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index ab3ecfe..14f3826 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -153,8 +153,7 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
 	subhdr = cl_object_header(stripe);
 
 	oinfo = lov->lo_lsm->lsm_oinfo[idx];
-	CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
-	       " idx: %d gen: %d\n",
+	CDEBUG(D_INODE, DFID "@%p[%d] -> " DFID "@%p: ostid: " DOSTID " idx: %d gen: %d\n",
 	       PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
 	       PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
 	       oinfo->loi_ost_idx, oinfo->loi_ost_gen);
@@ -371,7 +370,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
 	struct lov_layout_raid0 *r0;
 	struct lu_site	  *site;
 	struct lu_site_bkt_data *bkt;
-	wait_queue_t	  *waiter;
+	wait_queue_entry_t	  *waiter;
 
 	r0  = &lov->u.raid0;
 	LASSERT(r0->lo_sub[idx] == los);
@@ -757,7 +756,7 @@ static int lov_layout_change(const struct lu_env *unused,
 
 	LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
 
-	CDEBUG(D_INODE, DFID" from %s to %s\n",
+	CDEBUG(D_INODE, DFID " from %s to %s\n",
 	       PFID(lu_object_fid(lov2lu(lov))),
 	       llt2str(lov->lo_type), llt2str(llt));
 
@@ -904,7 +903,7 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
 out:
 	lov_conf_unlock(lov);
 	lov_lsm_put(lsm);
-	CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
+	CDEBUG(D_INODE, DFID " lo_layout_invalid=%d\n",
 	       PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
 	return result;
 }
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 2e1bd47..638b764 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -56,7 +56,7 @@ void lov_dump_lmm_common(int level, void *lmmp)
 	struct ost_id	oi;
 
 	lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
-	CDEBUG(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
+	CDEBUG(level, "objid " DOSTID ", magic 0x%08x, pattern %#x\n",
 	       POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
 	       le32_to_cpu(lmm->lmm_pattern));
 	CDEBUG(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
@@ -80,7 +80,7 @@ static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
 		struct ost_id	oi;
 
 		ostid_le_to_cpu(&lod->l_ost_oi, &oi);
-		CDEBUG(level, "stripe %u idx %u subobj "DOSTID"\n", i,
+		CDEBUG(level, "stripe %u idx %u subobj " DOSTID "\n", i,
 		       le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
 	}
 }
@@ -95,7 +95,7 @@ void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
 void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
 {
 	lov_dump_lmm_common(level, lmm);
-	CDEBUG(level, "pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
+	CDEBUG(level, "pool_name " LOV_POOLNAMEF "\n", lmm->lmm_pool_name);
 	lov_dump_lmm_objects(level, lmm->lmm_objects,
 			     le16_to_cpu(lmm->lmm_stripe_count));
 }
@@ -293,18 +293,10 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
 	size_t lmmk_size;
 	size_t lum_size;
 	int rc;
-	mm_segment_t seg;
 
 	if (!lsm)
 		return -ENODATA;
 
-	/*
-	 * "Switch to kernel segment" to allow copying from kernel space by
-	 * copy_{to,from}_user().
-	 */
-	seg = get_fs();
-	set_fs(KERNEL_DS);
-
 	if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
 		CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
 		       lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
@@ -406,6 +398,5 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
 out_free:
 	kvfree(lmmk);
 out:
-	set_fs(seg);
 	return rc;
 }
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 7daa867..39daa17 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -286,7 +286,7 @@ static int pool_proc_open(struct inode *inode, struct file *file)
 	return rc;
 }
 
-static struct file_operations pool_proc_operations = {
+static const struct file_operations pool_proc_operations = {
 	.open	   = pool_proc_open,
 	.read	   = seq_read,
 	.llseek	 = seq_lseek,
@@ -429,7 +429,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
 						poolname, new_pool,
 						&pool_proc_operations);
 	if (IS_ERR_OR_NULL(new_pool->pool_debugfs_entry)) {
-		CWARN("Cannot add debugfs pool entry "LOV_POOLNAMEF"\n",
+		CWARN("Cannot add debugfs pool entry " LOV_POOLNAMEF "\n",
 		      poolname);
 		new_pool->pool_debugfs_entry = NULL;
 		lov_pool_putref(new_pool);
@@ -450,7 +450,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
 		goto out_err;
 	}
 
-	CDEBUG(D_CONFIG, LOV_POOLNAMEF" is pool #%d\n",
+	CDEBUG(D_CONFIG, LOV_POOLNAMEF " is pool #%d\n",
 	       poolname, lov->lov_pool_count);
 
 	return 0;
@@ -531,7 +531,7 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
 	if (rc)
 		goto out;
 
-	CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
+	CDEBUG(D_CONFIG, "Added %s to " LOV_POOLNAMEF " as member %d\n",
 	       ostname, poolname,  pool_tgt_count(pool));
 
 out:
@@ -575,7 +575,7 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
 
 	lov_ost_pool_remove(&pool->pool_obds, lov_idx);
 
-	CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
+	CDEBUG(D_CONFIG, "%s removed from " LOV_POOLNAMEF "\n", ostname,
 	       poolname);
 
 out:
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 392b0e3..3eb66ce 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -830,7 +830,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
 		ptlrpc_req_finished(req);
 		resends++;
 
-		CDEBUG(D_HA, "%s: resend:%d op:%d "DFID"/"DFID"\n",
+		CDEBUG(D_HA, "%s: resend:%d op:%d " DFID "/" DFID "\n",
 		       obddev->obd_name, resends, it->it_op,
 		       PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));
 
@@ -911,13 +911,13 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
 		OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout);
 	}
 
-	if (it->it_op & IT_CREAT) {
+	if (it->it_op & IT_CREAT)
 		/* XXX this belongs in ll_create_it */
-	} else if (it->it_op == IT_OPEN) {
+		;
+	else if (it->it_op == IT_OPEN)
 		LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
-	} else {
+	else
 		LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP | IT_LAYOUT));
-	}
 
 	/* If we already have a matching lock, then cancel the new
 	 * one.  We have to set the data here instead of in
@@ -933,7 +933,7 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
 
 		LASSERTF(fid_res_name_eq(&mdt_body->mbo_fid1,
 					 &lock->l_resource->lr_name),
-			 "Lock res_id: "DLDLMRES", fid: "DFID"\n",
+			 "Lock res_id: " DLDLMRES ", fid: " DFID "\n",
 			 PLDLMRES(lock->l_resource), PFID(&mdt_body->mbo_fid1));
 		LDLM_LOCK_PUT(lock);
 
@@ -1063,7 +1063,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
 
 	LASSERT(it);
 
-	CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
+	CDEBUG(D_DLMTRACE, "(name: %.*s," DFID ") in obj " DFID
 		", intent: %s flags %#Lo\n", (int)op_data->op_namelen,
 		op_data->op_name, PFID(&op_data->op_fid2),
 		PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index 07b1684..2287bd4 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -227,7 +227,7 @@ int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
 		ptlrpc_req_finished(req);
 		resends++;
 
-		CDEBUG(D_HA, "%s: resend:%d create on "DFID"/"DFID"\n",
+		CDEBUG(D_HA, "%s: resend:%d create on " DFID "/" DFID "\n",
 		       exp->exp_obd->obd_name, resends,
 		       PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));
 
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 6bc2fb8..1a3fa1b 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -105,7 +105,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid)
 
 	*rootfid = body->mbo_fid1;
 	CDEBUG(D_NET,
-	       "root fid="DFID", last_committed=%llu\n",
+	       "root fid=" DFID ", last_committed=%llu\n",
 	       PFID(rootfid),
 	       lustre_msg_get_last_committed(req->rq_repmsg));
 out:
@@ -713,7 +713,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
 		/* allocate a FID for volatile file */
 		rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
 		if (rc < 0) {
-			CERROR("%s: "DFID" failed to allocate FID: %d\n",
+			CERROR("%s: " DFID " failed to allocate FID: %d\n",
 			       obd->obd_name, PFID(&op_data->op_fid1), rc);
 			/* save the errcode and proceed to close */
 			saved_rc = rc;
@@ -753,7 +753,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
 		/*
 		 * TODO: repeat close after errors
 		 */
-		CWARN("%s: close of FID "DFID" failed, file reference will be dropped when this client unmounts or is evicted\n",
+		CWARN("%s: close of FID " DFID " failed, file reference will be dropped when this client unmounts or is evicted\n",
 		      obd->obd_name, PFID(&op_data->op_fid1));
 		rc = -ENOMEM;
 		goto out;
@@ -1254,7 +1254,7 @@ static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
 		ptlrpc_req_finished(enq_req);
 
 	if (rc < 0) {
-		CERROR("%s: "DFID" lock enqueue fails: rc = %d\n",
+		CERROR("%s: " DFID " lock enqueue fails: rc = %d\n",
 		       exp->exp_obd->obd_name, PFID(&op_data->op_fid1), rc);
 		return rc;
 	}
@@ -1298,7 +1298,7 @@ static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
 					    rp_param.rp_hash64),
 			       mdc_read_page_remote, &rp_param);
 	if (IS_ERR(page)) {
-		CERROR("%s: read cache page: "DFID" at %llu: rc %ld\n",
+		CERROR("%s: read cache page: " DFID " at %llu: rc %ld\n",
 		       exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
 		       rp_param.rp_off, PTR_ERR(page));
 		rc = PTR_ERR(page);
@@ -1308,7 +1308,7 @@ static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
 	wait_on_page_locked(page);
 	(void)kmap(page);
 	if (!PageUptodate(page)) {
-		CERROR("%s: page not updated: "DFID" at %llu: rc %d\n",
+		CERROR("%s: page not updated: " DFID " at %llu: rc %d\n",
 		       exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
 		       rp_param.rp_off, -5);
 		goto fail;
@@ -1316,7 +1316,7 @@ static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
 	if (!PageChecked(page))
 		SetPageChecked(page);
 	if (PageError(page)) {
-		CERROR("%s: page error: "DFID" at %llu: rc %d\n",
+		CERROR("%s: page error: " DFID " at %llu: rc %d\n",
 		       exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
 		       rp_param.rp_off, -5);
 		goto fail;
@@ -1436,7 +1436,7 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
 	memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
 	memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
 
-	CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n",
+	CDEBUG(D_IOCTL, "path get " DFID " from %llu #%d\n",
 	       PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
 
 	if (!fid_is_sane(&gf->gf_fid)) {
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 6a76605..eee0b66 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -800,7 +800,7 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 		/* We've given up the lock, prepare ourselves to update. */
 		LDLM_DEBUG(lock, "MGC cancel CB");
 
-		CDEBUG(D_MGC, "Lock res "DLDLMRES" (%.8s)\n",
+		CDEBUG(D_MGC, "Lock res " DLDLMRES " (%.8s)\n",
 		       PLDLMRES(lock->l_resource),
 		       (char *)&lock->l_resource->lr_name.name[0]);
 
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 9d7b593..a343e3a 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -246,7 +246,7 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
 	const struct lu_fid  *fid;
 
 	fid = lu_object_fid(&descr->cld_obj->co_lu);
-	(*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
+	(*printer)(env, cookie, DDESCR "@" DFID, PDESCR(descr), PFID(fid));
 }
 EXPORT_SYMBOL(cl_lock_descr_print);
 
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 71fcc4c..6b8c41b 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -193,7 +193,7 @@ struct cl_page *cl_page_find(const struct lu_env *env,
 
 	hdr = cl_object_header(o);
 
-	CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
+	CDEBUG(D_PAGE, "%lu@" DFID " %p %lx %d\n",
 	       idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
 	/* fast path. */
 	if (type == CPT_CACHEABLE) {
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index ce8e2f6..8f1533c 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -78,7 +78,7 @@ static int llog_cat_id2handle(const struct lu_env *env,
 		if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
 		    ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
 			if (cgl->lgl_ogen != logid->lgl_ogen) {
-				CERROR("%s: log "DOSTID" generation %x != %x\n",
+				CERROR("%s: log " DOSTID " generation %x != %x\n",
 				       loghandle->lgh_ctxt->loc_obd->obd_name,
 				       POSTID(&logid->lgl_oi), cgl->lgl_ogen,
 				       logid->lgl_ogen);
@@ -95,7 +95,7 @@ static int llog_cat_id2handle(const struct lu_env *env,
 	rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
 		       LLOG_OPEN_EXISTS);
 	if (rc < 0) {
-		CERROR("%s: error opening log id "DOSTID":%x: rc = %d\n",
+		CERROR("%s: error opening log id " DOSTID ":%x: rc = %d\n",
 		       cathandle->lgh_ctxt->loc_obd->obd_name,
 		       POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
 		return rc;
@@ -152,13 +152,13 @@ static int llog_cat_process_cb(const struct lu_env *env,
 		CERROR("invalid record in catalog\n");
 		return -EINVAL;
 	}
-	CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
-	       DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
+	CDEBUG(D_HA, "processing log " DOSTID ":%x at index %u of catalog "
+	       DOSTID "\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
 	       rec->lrh_index, POSTID(&cat_llh->lgh_id.lgl_oi));
 
 	rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
 	if (rc) {
-		CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
+		CERROR("%s: cannot find handle for llog " DOSTID ": %d\n",
 		       cat_llh->lgh_ctxt->loc_obd->obd_name,
 		       POSTID(&lir->lid_id.lgl_oi), rc);
 		return rc;
@@ -204,7 +204,7 @@ static int llog_cat_process_or_fork(const struct lu_env *env,
 	if (llh->llh_cat_idx > cat_llh->lgh_last_idx) {
 		struct llog_process_cat_data cd;
 
-		CWARN("catlog "DOSTID" crosses index zero\n",
+		CWARN("catlog " DOSTID " crosses index zero\n",
 		      POSTID(&cat_llh->lgh_id.lgl_oi));
 
 		cd.lpcd_first_idx = llh->llh_cat_idx;
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index 723c212..016046d 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -44,7 +44,7 @@
 static void print_llogd_body(struct llogd_body *d)
 {
 	CDEBUG(D_OTHER, "llogd body: %p\n", d);
-	CDEBUG(D_OTHER, "\tlgd_logid.lgl_oi: "DOSTID"\n",
+	CDEBUG(D_OTHER, "\tlgd_logid.lgl_oi: " DOSTID "\n",
 	       POSTID(&d->lgd_logid.lgl_oi));
 	CDEBUG(D_OTHER, "\tlgd_logid.lgl_ogen: %#x\n", d->lgd_logid.lgl_ogen);
 	CDEBUG(D_OTHER, "\tlgd_ctxt_idx: %#x\n", d->lgd_ctxt_idx);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 1ec6e37..bc19f19 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(lprocfs_seq_release);
 
 struct dentry *ldebugfs_add_simple(struct dentry *root,
 				   char *name, void *data,
-				   struct file_operations *fops)
+				   const struct file_operations *fops)
 {
 	struct dentry *entry;
 	umode_t mode = 0;
@@ -389,40 +389,6 @@ struct dentry *ldebugfs_register(const char *name,
 EXPORT_SYMBOL_GPL(ldebugfs_register);
 
 /* Generic callbacks */
-int lprocfs_rd_uint(struct seq_file *m, void *data)
-{
-	seq_printf(m, "%u\n", *(unsigned int *)data);
-	return 0;
-}
-EXPORT_SYMBOL(lprocfs_rd_uint);
-
-int lprocfs_wr_uint(struct file *file, const char __user *buffer,
-		    unsigned long count, void *data)
-{
-	unsigned *p = data;
-	char dummy[MAX_STRING_SIZE + 1], *end;
-	unsigned long tmp;
-
-	if (count >= sizeof(dummy))
-		return -EINVAL;
-
-	if (count == 0)
-		return 0;
-
-	if (copy_from_user(dummy, buffer, count))
-		return -EFAULT;
-
-	dummy[count] = '\0';
-
-	tmp = simple_strtoul(dummy, &end, 0);
-	if (dummy == end)
-		return -EINVAL;
-
-	*p = (unsigned int)tmp;
-	return count;
-}
-EXPORT_SYMBOL(lprocfs_wr_uint);
-
 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
 			 char *buf)
 {
@@ -736,7 +702,7 @@ static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m)
 	bool first = true;
 
 	if (imp->imp_obd->obd_no_recov) {
-		seq_printf(m, "no_recov");
+		seq_puts(m, "no_recov");
 		first = false;
 	}
 
@@ -802,15 +768,15 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
 		   imp->imp_connect_data.ocd_instance);
 	obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags,
 				  ", ");
-	seq_printf(m, " ]\n");
+	seq_puts(m, " ]\n");
 	obd_connect_data_seqprint(m, ocd);
-	seq_printf(m, "    import_flags: [ ");
+	seq_puts(m, "    import_flags: [ ");
 	obd_import_flags2str(imp, m);
 
-	seq_printf(m,
-		   " ]\n"
-		   "    connection:\n"
-		   "       failover_nids: [ ");
+	seq_puts(m,
+		 " ]\n"
+		 "    connection:\n"
+		 "       failover_nids: [ ");
 	spin_lock(&imp->imp_lock);
 	j = 0;
 	list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
@@ -943,7 +909,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data)
 
 	seq_printf(m, "current_state: %s\n",
 		   ptlrpc_import_state_name(imp->imp_state));
-	seq_printf(m, "state_history:\n");
+	seq_puts(m, "state_history:\n");
 	k = imp->imp_state_hist_idx;
 	for (j = 0; j < IMP_STATE_HIST_LEN; j++) {
 		struct import_state_hist *ish =
@@ -965,7 +931,7 @@ int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at)
 
 	for (i = 0; i < AT_BINS; i++)
 		seq_printf(m, "%3u ", at->at_hist[i]);
-	seq_printf(m, "\n");
+	seq_puts(m, "\n");
 	return 0;
 }
 EXPORT_SYMBOL(lprocfs_at_hist_helper);
@@ -1033,7 +999,7 @@ int lprocfs_rd_connect_flags(struct seq_file *m, void *data)
 	flags = obd->u.cli.cl_import->imp_connect_data.ocd_connect_flags;
 	seq_printf(m, "flags=%#llx\n", flags);
 	obd_connect_seq_flags2str(m, flags, "\n");
-	seq_printf(m, "\n");
+	seq_puts(m, "\n");
 	up_read(&obd->u.cli.cl_sem);
 	return 0;
 }
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index abcf951..bb9d514 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -512,7 +512,7 @@ void lu_object_header_print(const struct lu_env *env, void *cookie,
 			    lu_printer_t printer,
 			    const struct lu_object_header *hdr)
 {
-	(*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
+	(*printer)(env, cookie, "header@%p[%#lx, %d, " DFID "%s%s%s]",
 		   hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
 		   PFID(&hdr->loh_fid),
 		   hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
@@ -556,7 +556,7 @@ EXPORT_SYMBOL(lu_object_print);
 static struct lu_object *htable_lookup(struct lu_site *s,
 				       struct cfs_hash_bd *bd,
 				       const struct lu_fid *f,
-				       wait_queue_t *waiter,
+				       wait_queue_entry_t *waiter,
 				       __u64 *version)
 {
 	struct lu_site_bkt_data *bkt;
@@ -670,7 +670,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
 					    struct lu_device *dev,
 					    const struct lu_fid *f,
 					    const struct lu_object_conf *conf,
-					    wait_queue_t *waiter)
+					    wait_queue_entry_t *waiter)
 {
 	struct lu_object      *o;
 	struct lu_object      *shadow;
@@ -750,7 +750,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env,
 {
 	struct lu_site_bkt_data *bkt;
 	struct lu_object	*obj;
-	wait_queue_t	   wait;
+	wait_queue_entry_t	   wait;
 
 	while (1) {
 		obj = lu_object_find_try(env, dev, f, conf, &wait);
@@ -918,9 +918,8 @@ static unsigned long lu_htable_order(struct lu_device *top)
 	cache_size = cache_size / 100 * lu_cache_percent *
 		(PAGE_SIZE / 1024);
 
-	for (bits = 1; (1 << bits) < cache_size; ++bits) {
+	for (bits = 1; (1 << bits) < cache_size; ++bits)
 		;
-	}
 	return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
 }
 
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 77b4c55..1c4a8fe 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -277,7 +277,7 @@ static int echo_page_print(const struct lu_env *env,
 {
 	struct echo_page *ep = cl2echo_page(slice);
 
-	(*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
+	(*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME "-page@%p %d vm@%p\n",
 		   ep, mutex_is_locked(&ep->ep_lock),
 		   slice->cpl_page->cp_vmpage);
 	return 0;
@@ -1121,7 +1121,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
 	}
 	cl_echo_object_put(eco);
 
-	CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
+	CDEBUG(D_INFO, "oa oid " DOSTID "\n", POSTID(&oa->o_oi));
 
  failed:
 	if (created && rc)
@@ -1646,9 +1646,8 @@ static int echo_client_connect(const struct lu_env *env,
 	struct lustre_handle conn = { 0 };
 
 	rc = class_connect(&conn, src, cluuid);
-	if (rc == 0) {
+	if (rc == 0)
 		*exp = class_conn2export(&conn);
-	}
 
 	return rc;
 }
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index c5ccf56..d8a95f8 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -472,7 +472,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext)
 		else if (ext->oe_start > tmp->oe_end)
 			n = &(*n)->rb_right;
 		else
-			EASSERTF(0, tmp, EXTSTR"\n", EXTPARA(ext));
+			EASSERTF(0, tmp, EXTSTR "\n", EXTPARA(ext));
 	}
 	rb_link_node(&ext->oe_node, parent, n);
 	rb_insert_color(&ext->oe_node, &obj->oo_root);
@@ -690,7 +690,7 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
 	/* grants has been allocated by caller */
 	LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
 		 "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax);
-	LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR"\n",
+	LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR "\n",
 		 EXTPARA(cur));
 
 restart:
@@ -709,7 +709,7 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
 		/* if covering by different locks, no chance to match */
 		if (olck->ols_dlmlock != ext->oe_dlmlock) {
 			EASSERTF(!overlapped(ext, cur), ext,
-				 EXTSTR"\n", EXTPARA(cur));
+				 EXTSTR "\n", EXTPARA(cur));
 
 			ext = next_extent(ext);
 			continue;
@@ -732,7 +732,7 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
 			 */
 			EASSERTF((ext->oe_start <= cur->oe_start &&
 				  ext->oe_end >= cur->oe_end),
-				 ext, EXTSTR"\n", EXTPARA(cur));
+				 ext, EXTSTR "\n", EXTPARA(cur));
 
 			if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
 				/* for simplicity, we wait for this extent to
@@ -1406,9 +1406,8 @@ static void osc_release_write_grant(struct client_obd *cli,
 				    struct brw_page *pga)
 {
 	assert_spin_locked(&cli->cl_loi_list_lock);
-	if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
+	if (!(pga->flag & OBD_BRW_FROM_GRANT))
 		return;
-	}
 
 	pga->flag &= ~OBD_BRW_FROM_GRANT;
 	atomic_long_dec(&obd_dirty_pages);
@@ -1890,10 +1889,10 @@ struct extent_rpc_data {
 	unsigned int		erd_max_chunks;
 };
 
-static inline unsigned osc_extent_chunks(const struct osc_extent *ext)
+static inline unsigned int osc_extent_chunks(const struct osc_extent *ext)
 {
 	struct client_obd *cli = osc_cli(ext->oe_obj);
-	unsigned ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
+	unsigned int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
 
 	return (ext->oe_end >> ppc_bits) - (ext->oe_start >> ppc_bits) + 1;
 }
@@ -1951,7 +1950,7 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
 	return 1;
 }
 
-static inline unsigned osc_max_write_chunks(const struct client_obd *cli)
+static inline unsigned int osc_max_write_chunks(const struct client_obd *cli)
 {
 	/*
 	 * LU-8135:
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index 845e795..13a40f6 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -62,7 +62,7 @@ struct osc_async_page {
 	struct list_head	      oap_rpc_item;
 
 	u64		 oap_obj_off;
-	unsigned		oap_page_off;
+	unsigned int		oap_page_off;
 	enum async_flags	oap_async_flags;
 
 	struct brw_page	 oap_brw_page;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index d8aa3fb..922d0cb 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -1227,8 +1227,7 @@ static int check_write_checksum(struct obdo *oa,
 		msg = "changed in transit AND doesn't match the original - likely false positive due to mmap IO (bug 11742)"
 			;
 
-	LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
-			   " object "DOSTID" extent [%llu-%llu]\n",
+	LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode " DFID " object " DOSTID " extent [%llu-%llu]\n",
 			   msg, libcfs_nid2str(peer->nid),
 			   oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
 			   oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 6466974..1c77792 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -367,8 +367,8 @@ void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
 		 */
 		CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
 		       D_ADAPTTO : D_WARNING,
-		       "Reported service time %u > total measured time "
-		       CFS_DURATION_T"\n", service_time,
+		       "Reported service time %u > total measured time " CFS_DURATION_T "\n",
+		       service_time,
 		       (long)(now - req->rq_sent));
 		return;
 	}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 93e172f..52cb1f0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1182,17 +1182,15 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
 	}
 
 	/* Sanity checks for a reconnected import. */
-	if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
+	if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE))
 		CERROR("imp_replayable flag does not match server after reconnect. We should LBUG right here.\n");
-	}
 
 	if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
 	    lustre_msg_get_last_committed(request->rq_repmsg) <
-	    aa->pcaa_peer_committed) {
+	    aa->pcaa_peer_committed)
 		CERROR("%s went back in time (transno %lld was previously committed, server now claims %lld)!  See https://bugzilla.lustre.org/show_bug.cgi?id=9646\n",
 		       obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
 		       lustre_msg_get_last_committed(request->rq_repmsg));
-	}
 
 finish:
 	ptlrpc_prepare_replay(imp);
@@ -1437,20 +1435,17 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
 		rc = 0;
 	}
 
-	if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
+	if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS)
 		if (atomic_read(&imp->imp_replay_inflight) == 0) {
 			IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
 			rc = signal_completed_replay(imp);
 			if (rc)
 				goto out;
 		}
-	}
 
-	if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
-		if (atomic_read(&imp->imp_replay_inflight) == 0) {
+	if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT)
+		if (atomic_read(&imp->imp_replay_inflight) == 0)
 			IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
-		}
-	}
 
 	if (imp->imp_state == LUSTRE_IMP_RECOVER) {
 		CDEBUG(D_HA, "reconnected to %s@%s\n",
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 8177e1a..5810bba 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -1761,7 +1761,7 @@ static u32 __req_capsule_offset(const struct req_capsule *pill,
 		 field->rmf_name, offset, loc);
 	offset--;
 
-	LASSERT(0 <= offset && offset < REQ_MAX_FIELD_NR);
+	LASSERT(offset < REQ_MAX_FIELD_NR);
 	return offset;
 }
 
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 9456a18..55e8696e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -2089,7 +2089,7 @@ static void dump_obdo(struct obdo *oa)
 
 	CDEBUG(D_RPCTRACE, "obdo: o_valid = %08x\n", valid);
 	if (valid & OBD_MD_FLID)
-		CDEBUG(D_RPCTRACE, "obdo: id = "DOSTID"\n", POSTID(&oa->o_oi));
+		CDEBUG(D_RPCTRACE, "obdo: id = " DOSTID "\n", POSTID(&oa->o_oi));
 	if (valid & OBD_MD_FLFID)
 		CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = %#llx\n",
 		       oa->o_parent_seq);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index d2707a3..c38e166 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -68,7 +68,7 @@ void ptlrpc_init_xid(void);
 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
 			    struct ptlrpc_request *req);
 int ptlrpc_expired_set(void *data);
-int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
+int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set);
 void ptlrpc_resend_req(struct ptlrpc_request *request);
 void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req);
 void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req);
@@ -79,7 +79,7 @@ void ptlrpc_add_unreplied(struct ptlrpc_request *req);
 int ptlrpc_init_portals(void);
 void ptlrpc_exit_portals(void);
 
-void ptlrpc_request_handle_notconn(struct ptlrpc_request *);
+void ptlrpc_request_handle_notconn(struct ptlrpc_request *req);
 void lustre_assert_wire_constants(void);
 int ptlrpc_import_in_recovery(struct obd_import *imp);
 int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index b8091c1..759aa6c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -1565,7 +1565,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
 
 	/* req_in handling should/must be fast */
 	if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5)
-		DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
+		DEBUG_REQ(D_WARNING, req, "Slow req_in handling " CFS_DURATION_T "s",
 			  (long)(ktime_get_real_seconds() -
 				 req->rq_arrival_time.tv_sec));
 
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 8ea0190..466517c 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -19,5 +19,3 @@
 
 obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
 
-ccflags-y += -Werror
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
index 1d7f7ab..6b13a3a 100644
--- a/drivers/staging/media/atomisp/i2c/imx/Makefile
+++ b/drivers/staging/media/atomisp/i2c/imx/Makefile
@@ -4,5 +4,3 @@
 
 ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
 obj-$(CONFIG_VIDEO_OV8858)     += ov8858_driver.o
-
-ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
index fceb9e9..c9c0e12 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -1,3 +1 @@
 obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
-
-ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
index 3fa7c1c..f126a89 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -351,5 +351,5 @@
 DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
 DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
 
-ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror
+ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
 
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index ce1764c..4c259c2 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -22,7 +22,6 @@
 #include <linux/wait.h>
 #include <linux/kobject.h>
 #include "mostcore.h"
-#include "networking.h"
 
 #define MEP_HDR_LEN 8
 #define MDP_HDR_LEN 16
@@ -65,17 +64,16 @@ struct net_dev_channel {
 
 struct net_dev_context {
 	struct most_interface *iface;
-	bool channels_opened;
 	bool is_mamac;
 	struct net_device *dev;
 	struct net_dev_channel rx;
 	struct net_dev_channel tx;
-	struct completion mac_compl;
 	struct list_head list;
 };
 
 static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
-static struct spinlock list_lock;
+static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
+static struct spinlock list_lock; /* list_head, ch->linked = false, dev_hold */
 static struct most_aim aim;
 
 static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
@@ -157,14 +155,12 @@ static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
 
 static int most_nd_set_mac_address(struct net_device *dev, void *p)
 {
-	struct net_dev_context *nd = dev->ml_priv;
+	struct net_dev_context *nd = netdev_priv(dev);
 	int err = eth_mac_addr(dev, p);
 
 	if (err)
 		return err;
 
-	BUG_ON(nd->dev != dev);
-
 	nd->is_mamac =
 		(dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
 		 dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
@@ -178,71 +174,52 @@ static int most_nd_set_mac_address(struct net_device *dev, void *p)
 	return 0;
 }
 
+static void on_netinfo(struct most_interface *iface,
+		       unsigned char link_stat, unsigned char *mac_addr);
+
 static int most_nd_open(struct net_device *dev)
 {
-	struct net_dev_context *nd = dev->ml_priv;
-	long ret;
+	struct net_dev_context *nd = netdev_priv(dev);
+	int ret = 0;
 
-	netdev_info(dev, "open net device\n");
-
-	BUG_ON(nd->dev != dev);
-
-	if (nd->channels_opened)
-		return -EFAULT;
-
-	BUG_ON(!nd->tx.linked || !nd->rx.linked);
+	mutex_lock(&probe_disc_mt);
 
 	if (most_start_channel(nd->iface, nd->rx.ch_id, &aim)) {
 		netdev_err(dev, "most_start_channel() failed\n");
-		return -EBUSY;
+		ret = -EBUSY;
+		goto unlock;
 	}
 
 	if (most_start_channel(nd->iface, nd->tx.ch_id, &aim)) {
 		netdev_err(dev, "most_start_channel() failed\n");
 		most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
-		return -EBUSY;
+		ret = -EBUSY;
+		goto unlock;
 	}
 
-	if (!is_valid_ether_addr(dev->dev_addr)) {
-		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
-		ret = wait_for_completion_interruptible_timeout(
-			      &nd->mac_compl, msecs_to_jiffies(5000));
-		if (!ret) {
-			netdev_err(dev, "mac timeout\n");
-			ret = -EBUSY;
-			goto err;
-		}
-
-		if (ret < 0) {
-			netdev_warn(dev, "mac waiting interrupted\n");
-			goto err;
-		}
-	}
-
-	nd->channels_opened = true;
+	netif_carrier_off(dev);
+	if (is_valid_ether_addr(dev->dev_addr))
+		netif_dormant_off(dev);
+	else
+		netif_dormant_on(dev);
 	netif_wake_queue(dev);
-	return 0;
+	if (nd->iface->request_netinfo)
+		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, on_netinfo);
 
-err:
-	most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
-	most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+unlock:
+	mutex_unlock(&probe_disc_mt);
 	return ret;
 }
 
 static int most_nd_stop(struct net_device *dev)
 {
-	struct net_dev_context *nd = dev->ml_priv;
+	struct net_dev_context *nd = netdev_priv(dev);
 
-	netdev_info(dev, "stop net device\n");
-
-	BUG_ON(nd->dev != dev);
 	netif_stop_queue(dev);
-
-	if (nd->channels_opened) {
-		most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
-		most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
-		nd->channels_opened = false;
-	}
+	if (nd->iface->request_netinfo)
+		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, NULL);
+	most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+	most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
 
 	return 0;
 }
@@ -250,12 +227,10 @@ static int most_nd_stop(struct net_device *dev)
 static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
 				      struct net_device *dev)
 {
-	struct net_dev_context *nd = dev->ml_priv;
+	struct net_dev_context *nd = netdev_priv(dev);
 	struct mbo *mbo;
 	int ret;
 
-	BUG_ON(nd->dev != dev);
-
 	mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &aim);
 
 	if (!mbo) {
@@ -296,33 +271,29 @@ static void most_nd_setup(struct net_device *dev)
 	dev->netdev_ops = &most_nd_ops;
 }
 
-static void most_net_rm_netdev_safe(struct net_dev_context *nd)
+static struct net_dev_context *get_net_dev(struct most_interface *iface)
 {
-	if (!nd->dev)
-		return;
+	struct net_dev_context *nd;
 
-	pr_info("remove net device %p\n", nd->dev);
-
-	unregister_netdev(nd->dev);
-	free_netdev(nd->dev);
-	nd->dev = NULL;
+	list_for_each_entry(nd, &net_devices, list)
+		if (nd->iface == iface)
+			return nd;
+	return NULL;
 }
 
-static struct net_dev_context *get_net_dev_context(
-	struct most_interface *iface)
+static struct net_dev_context *get_net_dev_hold(struct most_interface *iface)
 {
-	struct net_dev_context *nd, *tmp;
+	struct net_dev_context *nd;
 	unsigned long flags;
 
 	spin_lock_irqsave(&list_lock, flags);
-	list_for_each_entry_safe(nd, tmp, &net_devices, list) {
-		if (nd->iface == iface) {
-			spin_unlock_irqrestore(&list_lock, flags);
-			return nd;
-		}
-	}
+	nd = get_net_dev(iface);
+	if (nd && nd->rx.linked && nd->tx.linked)
+		dev_hold(nd->dev);
+	else
+		nd = NULL;
 	spin_unlock_irqrestore(&list_lock, flags);
-	return NULL;
+	return nd;
 }
 
 static int aim_probe_channel(struct most_interface *iface, int channel_idx,
@@ -331,7 +302,9 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
 {
 	struct net_dev_context *nd;
 	struct net_dev_channel *ch;
+	struct net_device *dev;
 	unsigned long flags;
+	int ret = 0;
 
 	if (!iface)
 		return -EINVAL;
@@ -339,54 +312,45 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
 	if (ccfg->data_type != MOST_CH_ASYNC)
 		return -EINVAL;
 
-	nd = get_net_dev_context(iface);
-
+	mutex_lock(&probe_disc_mt);
+	nd = get_net_dev(iface);
 	if (!nd) {
-		nd = kzalloc(sizeof(*nd), GFP_KERNEL);
-		if (!nd)
-			return -ENOMEM;
+		dev = alloc_netdev(sizeof(struct net_dev_context), "meth%d",
+				   NET_NAME_UNKNOWN, most_nd_setup);
+		if (!dev) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
 
-		init_completion(&nd->mac_compl);
+		nd = netdev_priv(dev);
 		nd->iface = iface;
+		nd->dev = dev;
 
 		spin_lock_irqsave(&list_lock, flags);
 		list_add(&nd->list, &net_devices);
 		spin_unlock_irqrestore(&list_lock, flags);
-	}
 
-	ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
-	if (ch->linked) {
-		pr_err("only one channel per instance & direction allowed\n");
-		return -EINVAL;
-	}
-
-	if (nd->tx.linked || nd->rx.linked) {
-		struct net_device *dev =
-			alloc_netdev(0, "meth%d", NET_NAME_UNKNOWN,
-				     most_nd_setup);
-
-		if (!dev) {
-			pr_err("no memory for net_device\n");
-			return -ENOMEM;
+		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
+	} else {
+		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
+		if (ch->linked) {
+			pr_err("direction is allocated\n");
+			ret = -EINVAL;
+			goto unlock;
 		}
 
-		nd->dev = dev;
-		ch->ch_id = channel_idx;
-		ch->linked = true;
-
-		dev->ml_priv = nd;
-		if (register_netdev(dev)) {
-			pr_err("registering net device failed\n");
-			ch->linked = false;
-			free_netdev(dev);
-			return -EINVAL;
+		if (register_netdev(nd->dev)) {
+			pr_err("register_netdev() failed\n");
+			ret = -EINVAL;
+			goto unlock;
 		}
 	}
-
 	ch->ch_id = channel_idx;
 	ch->linked = true;
 
-	return 0;
+unlock:
+	mutex_unlock(&probe_disc_mt);
+	return ret;
 }
 
 static int aim_disconnect_channel(struct most_interface *iface,
@@ -395,34 +359,45 @@ static int aim_disconnect_channel(struct most_interface *iface,
 	struct net_dev_context *nd;
 	struct net_dev_channel *ch;
 	unsigned long flags;
+	int ret = 0;
 
-	nd = get_net_dev_context(iface);
-	if (!nd)
-		return -EINVAL;
+	mutex_lock(&probe_disc_mt);
+	nd = get_net_dev(iface);
+	if (!nd) {
+		ret = -EINVAL;
+		goto unlock;
+	}
 
-	if (nd->rx.linked && channel_idx == nd->rx.ch_id)
+	if (nd->rx.linked && channel_idx == nd->rx.ch_id) {
 		ch = &nd->rx;
-	else if (nd->tx.linked && channel_idx == nd->tx.ch_id)
+	} else if (nd->tx.linked && channel_idx == nd->tx.ch_id) {
 		ch = &nd->tx;
-	else
-		return -EINVAL;
+	} else {
+		ret = -EINVAL;
+		goto unlock;
+	}
 
-	ch->linked = false;
+	if (nd->rx.linked && nd->tx.linked) {
+		spin_lock_irqsave(&list_lock, flags);
+		ch->linked = false;
+		spin_unlock_irqrestore(&list_lock, flags);
 
-	/*
-	 * do not call most_stop_channel() here, because channels are
-	 * going to be closed in ndo_stop() after unregister_netdev()
-	 */
-	most_net_rm_netdev_safe(nd);
-
-	if (!nd->rx.linked && !nd->tx.linked) {
+		/*
+		 * do not call most_stop_channel() here, because channels are
+		 * going to be closed in ndo_stop() after unregister_netdev()
+		 */
+		unregister_netdev(nd->dev);
+	} else {
 		spin_lock_irqsave(&list_lock, flags);
 		list_del(&nd->list);
 		spin_unlock_irqrestore(&list_lock, flags);
-		kfree(nd);
+
+		free_netdev(nd->dev);
 	}
 
-	return 0;
+unlock:
+	mutex_unlock(&probe_disc_mt);
+	return ret;
 }
 
 static int aim_resume_tx_channel(struct most_interface *iface,
@@ -430,14 +405,17 @@ static int aim_resume_tx_channel(struct most_interface *iface,
 {
 	struct net_dev_context *nd;
 
-	nd = get_net_dev_context(iface);
-	if (!nd || !nd->channels_opened || nd->tx.ch_id != channel_idx)
+	nd = get_net_dev_hold(iface);
+	if (!nd)
 		return 0;
 
-	if (!nd->dev)
-		return 0;
+	if (nd->tx.ch_id != channel_idx)
+		goto put_nd;
 
 	netif_wake_queue(nd->dev);
+
+put_nd:
+	dev_put(nd->dev);
 	return 0;
 }
 
@@ -450,25 +428,31 @@ static int aim_rx_data(struct mbo *mbo)
 	struct sk_buff *skb;
 	struct net_device *dev;
 	unsigned int skb_len;
+	int ret = 0;
 
-	nd = get_net_dev_context(mbo->ifp);
-	if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
+	nd = get_net_dev_hold(mbo->ifp);
+	if (!nd)
 		return -EIO;
 
-	dev = nd->dev;
-	if (!dev) {
-		pr_err_once("drop packet: missing net_device\n");
-		return -EIO;
+	if (nd->rx.ch_id != mbo->hdm_channel_id) {
+		ret = -EIO;
+		goto put_nd;
 	}
 
+	dev = nd->dev;
+
 	if (nd->is_mamac) {
-		if (!PMS_IS_MAMAC(buf, len))
-			return -EIO;
+		if (!PMS_IS_MAMAC(buf, len)) {
+			ret = -EIO;
+			goto put_nd;
+		}
 
 		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
 	} else {
-		if (!PMS_IS_MEP(buf, len))
-			return -EIO;
+		if (!PMS_IS_MEP(buf, len)) {
+			ret = -EIO;
+			goto put_nd;
+		}
 
 		skb = dev_alloc_skb(len - MEP_HDR_LEN);
 	}
@@ -511,7 +495,10 @@ static int aim_rx_data(struct mbo *mbo)
 
 out:
 	most_put_mbo(mbo);
-	return 0;
+
+put_nd:
+	dev_put(nd->dev);
+	return ret;
 }
 
 static struct most_aim aim = {
@@ -524,68 +511,54 @@ static struct most_aim aim = {
 
 static int __init most_net_init(void)
 {
-	pr_info("most_net_init()\n");
 	spin_lock_init(&list_lock);
+	mutex_init(&probe_disc_mt);
 	return most_register_aim(&aim);
 }
 
 static void __exit most_net_exit(void)
 {
-	struct net_dev_context *nd, *tmp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&list_lock, flags);
-	list_for_each_entry_safe(nd, tmp, &net_devices, list) {
-		list_del(&nd->list);
-		spin_unlock_irqrestore(&list_lock, flags);
-		/*
-		 * do not call most_stop_channel() here, because channels are
-		 * going to be closed in ndo_stop() after unregister_netdev()
-		 */
-		most_net_rm_netdev_safe(nd);
-		kfree(nd);
-		spin_lock_irqsave(&list_lock, flags);
-	}
-	spin_unlock_irqrestore(&list_lock, flags);
-
 	most_deregister_aim(&aim);
-	pr_info("most_net_exit()\n");
 }
 
 /**
- * most_deliver_netinfo - callback for HDM to be informed about HW's MAC
+ * on_netinfo - callback for HDM to be informed about HW's MAC
  * @param iface - most interface instance
  * @param link_stat - link status
  * @param mac_addr - MAC address
  */
-void most_deliver_netinfo(struct most_interface *iface,
-			  unsigned char link_stat, unsigned char *mac_addr)
+static void on_netinfo(struct most_interface *iface,
+		       unsigned char link_stat, unsigned char *mac_addr)
 {
 	struct net_dev_context *nd;
 	struct net_device *dev;
 	const u8 *m = mac_addr;
 
-	nd = get_net_dev_context(iface);
+	nd = get_net_dev_hold(iface);
 	if (!nd)
 		return;
 
 	dev = nd->dev;
-	if (!dev)
-		return;
+
+	if (link_stat)
+		netif_carrier_on(dev);
+	else
+		netif_carrier_off(dev);
 
 	if (m && is_valid_ether_addr(m)) {
 		if (!is_valid_ether_addr(dev->dev_addr)) {
 			netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
 				    m[0], m[1], m[2], m[3], m[4], m[5]);
 			ether_addr_copy(dev->dev_addr, m);
-			complete(&nd->mac_compl);
+			netif_dormant_off(dev);
 		} else if (!ether_addr_equal(dev->dev_addr, m)) {
 			netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
 				    m[0], m[1], m[2], m[3], m[4], m[5]);
 		}
 	}
+
+	dev_put(nd->dev);
 }
-EXPORT_SYMBOL(most_deliver_netinfo);
 
 module_init(most_net_init);
 module_exit(most_net_exit);
diff --git a/drivers/staging/most/aim-network/networking.h b/drivers/staging/most/aim-network/networking.h
deleted file mode 100644
index 6f346d4..0000000
--- a/drivers/staging/most/aim-network/networking.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Networking AIM - Networking Application Interface Module for MostCore
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-#ifndef _NETWORKING_H_
-#define _NETWORKING_H_
-
-#include "mostcore.h"
-
-void most_deliver_netinfo(struct most_interface *iface,
-			  unsigned char link_stat, unsigned char *mac_addr);
-
-#endif
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
index 28a0e17..663bfeb 100644
--- a/drivers/staging/most/hdm-dim2/Kconfig
+++ b/drivers/staging/most/hdm-dim2/Kconfig
@@ -4,7 +4,6 @@
 
 config HDM_DIM2
 	tristate "DIM2 HDM"
-	depends on AIM_NETWORK
 	depends on HAS_IOMEM
 
 	---help---
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
index d604ec09..9148464 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.c
@@ -217,12 +217,15 @@ static inline void dim2_clear_ctr(u32 ctr_addr)
 }
 
 static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
-			       bool read_not_write, bool sync_mfe)
+			       bool read_not_write)
 {
+	bool isoc_fce = ch_type == CAT_CT_VAL_ISOC;
+	bool sync_mfe = ch_type == CAT_CT_VAL_SYNC;
 	u16 const cat =
 		(read_not_write << CAT_RNW_BIT) |
 		(ch_type << CAT_CT_SHIFT) |
 		(ch_addr << CAT_CL_SHIFT) |
+		(isoc_fce << CAT_FCE_BIT) |
 		(sync_mfe << CAT_MFE_BIT) |
 		(false << CAT_MT_BIT) |
 		(true << CAT_CE_BIT);
@@ -350,13 +353,13 @@ static void dim2_clear_ctram(void)
 
 static void dim2_configure_channel(
 	u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
-	u16 packet_length, bool sync_mfe)
+	u16 packet_length)
 {
 	dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
-	dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0, sync_mfe);
+	dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0);
 
 	dim2_configure_adt(ch_addr);
-	dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1, sync_mfe);
+	dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
 
 	/* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
 	dimcb_io_write(&g.dim2->ACMR0,
@@ -771,7 +774,7 @@ static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
 	channel_init(ch, ch_address / 2);
 
 	dim2_configure_channel(ch->addr, type, is_tx,
-			       ch->dbr_addr, ch->dbr_size, 0, false);
+			       ch->dbr_addr, ch->dbr_size, 0);
 
 	return DIM_NO_ERROR;
 }
@@ -857,7 +860,7 @@ u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 	isoc_init(ch, ch_address / 2, packet_length);
 
 	dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
-			       ch->dbr_size, packet_length, false);
+			       ch->dbr_size, packet_length);
 
 	return DIM_NO_ERROR;
 }
@@ -885,7 +888,7 @@ u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
 
 	dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
 	dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
-			       ch->dbr_addr, ch->dbr_size, 0, true);
+			       ch->dbr_addr, ch->dbr_size, 0);
 
 	return DIM_NO_ERROR;
 }
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
index 902824e..4607d03 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -26,7 +26,6 @@
 #include <linux/kthread.h>
 
 #include <mostcore.h>
-#include <networking.h>
 #include "dim2_hal.h"
 #include "dim2_hdm.h"
 #include "dim2_errors.h"
@@ -107,6 +106,8 @@ struct dim2_hdm {
 	unsigned char link_state;
 	int atx_idx;
 	struct medialb_bus bus;
+	void (*on_netinfo)(struct most_interface *,
+			   unsigned char, unsigned char *);
 };
 
 #define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
@@ -287,8 +288,11 @@ static int deliver_netinfo_thread(void *data)
 
 		if (dev->deliver_netinfo) {
 			dev->deliver_netinfo--;
-			most_deliver_netinfo(&dev->most_iface, dev->link_state,
-					     dev->mac_addrs);
+			if (dev->on_netinfo) {
+				dev->on_netinfo(&dev->most_iface,
+						dev->link_state,
+						dev->mac_addrs);
+			}
 		}
 	}
 
@@ -654,12 +658,18 @@ static int enqueue(struct most_interface *most_iface, int ch_idx,
  * Send a command to INIC which triggers retrieving of network info by means of
  * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
  */
-static void request_netinfo(struct most_interface *most_iface, int ch_idx)
+static void request_netinfo(struct most_interface *most_iface, int ch_idx,
+			    void (*on_netinfo)(struct most_interface *,
+					       unsigned char, unsigned char *))
 {
 	struct dim2_hdm *dev = iface_to_hdm(most_iface);
 	struct mbo *mbo;
 	u8 *data;
 
+	dev->on_netinfo = on_netinfo;
+	if (!on_netinfo)
+		return;
+
 	if (dev->atx_idx < 0) {
 		pr_err("Async Tx Not initialized\n");
 		return;
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
index 01fe499..f7d9fbc 100644
--- a/drivers/staging/most/hdm-dim2/dim2_reg.h
+++ b/drivers/staging/most/hdm-dim2/dim2_reg.h
@@ -141,6 +141,7 @@ enum {
 	ADT1_CTRL_ASYNC_BD_MASK = DIM2_MASK(11),
 	ADT1_ISOC_SYNC_BD_MASK = DIM2_MASK(13),
 
+	CAT_FCE_BIT = 14,
 	CAT_MFE_BIT = 14,
 
 	CAT_MT_BIT = 13,
diff --git a/drivers/staging/most/hdm-i2c/hdm_i2c.c b/drivers/staging/most/hdm-i2c/hdm_i2c.c
index 1d5b229..2b4de40 100644
--- a/drivers/staging/most/hdm-i2c/hdm_i2c.c
+++ b/drivers/staging/most/hdm-i2c/hdm_i2c.c
@@ -185,12 +185,6 @@ static int poison_channel(struct most_interface *most_iface,
 	return 0;
 }
 
-static void request_netinfo(struct most_interface *most_iface,
-			    int ch_idx)
-{
-	pr_info("request_netinfo()\n");
-}
-
 static void do_rx_work(struct hdm_i2c *dev)
 {
 	struct mbo *mbo;
@@ -343,7 +337,6 @@ static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	dev->most_iface.configure = configure_channel;
 	dev->most_iface.enqueue = enqueue;
 	dev->most_iface.poison_channel = poison_channel;
-	dev->most_iface.request_netinfo = request_netinfo;
 
 	INIT_LIST_HEAD(&dev->rx.list);
 	mutex_init(&dev->rx.list_mutex);
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
index ec15463..487f1f3 100644
--- a/drivers/staging/most/hdm-usb/Kconfig
+++ b/drivers/staging/most/hdm-usb/Kconfig
@@ -5,7 +5,7 @@
 config HDM_USB
 	tristate "USB HDM"
 	depends on USB && NET
-	select AIM_NETWORK
+
 	---help---
 	  Say Y here if you want to connect via USB to network tranceiver.
 	  This device driver depends on the networking AIM.
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index a95b591..d0f68cb 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -30,7 +30,6 @@
 #include <linux/etherdevice.h>
 #include <linux/uaccess.h>
 #include "mostcore.h"
-#include "networking.h"
 
 #define USB_MTU			512
 #define NO_ISOCHRONOUS_URB	0
@@ -126,6 +125,8 @@ struct most_dev {
 	struct mutex io_mutex;
 	struct timer_list link_stat_timer;
 	struct work_struct poll_work_obj;
+	void (*on_netinfo)(struct most_interface *, unsigned char,
+			   unsigned char *);
 };
 
 #define to_mdev(d) container_of(d, struct most_dev, iface)
@@ -719,12 +720,19 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
  * polls for the NI state of the INIC every 2 seconds.
  *
  */
-static void hdm_request_netinfo(struct most_interface *iface, int channel)
+static void hdm_request_netinfo(struct most_interface *iface, int channel,
+				void (*on_netinfo)(struct most_interface *,
+						   unsigned char,
+						   unsigned char *))
 {
 	struct most_dev *mdev;
 
 	BUG_ON(!iface);
 	mdev = to_mdev(iface);
+	mdev->on_netinfo = on_netinfo;
+	if (!on_netinfo)
+		return;
+
 	mdev->link_stat_timer.expires = jiffies + HZ;
 	mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
 }
@@ -786,7 +794,8 @@ static void wq_netinfo(struct work_struct *wq_obj)
 	hw_addr[4] = lo >> 8;
 	hw_addr[5] = lo;
 
-	most_deliver_netinfo(&mdev->iface, link, hw_addr);
+	if (mdev->on_netinfo)
+		mdev->on_netinfo(&mdev->iface, link, hw_addr);
 }
 
 /**
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h
index 5f8339b..915e515 100644
--- a/drivers/staging/most/mostcore/mostcore.h
+++ b/drivers/staging/most/mostcore/mostcore.h
@@ -233,6 +233,8 @@ struct mbo {
  *   The callback returns a negative value on error, otherwise 0.
  * @request_netinfo: triggers retrieving of network info from the HDM by
  *   means of "Message exchange over MDP/MEP"
+ *   The call of the function request_netinfo with the parameter on_netinfo as
+ *   NULL prohibits use of the previously obtained function pointer.
  * @priv Private field used by mostcore to store context information.
  */
 struct most_interface {
@@ -246,7 +248,10 @@ struct most_interface {
 	int (*enqueue)(struct most_interface *iface, int channel_idx,
 		       struct mbo *mbo);
 	int (*poison_channel)(struct most_interface *iface, int channel_idx);
-	void (*request_netinfo)(struct most_interface *iface, int channel_idx);
+	void (*request_netinfo)(struct most_interface *iface, int channel_idx,
+				void (*on_netinfo)(struct most_interface *iface,
+						   unsigned char link_stat,
+						   unsigned char *mac_addr));
 	void *priv;
 };
 
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 9a7858a..068aece 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -3659,14 +3659,14 @@ static int octeon_usb_probe(struct platform_device *pdev)
 	status = cvmx_usb_initialize(dev, usb);
 	if (status) {
 		dev_dbg(dev, "USB initialization failed with %d\n", status);
-		kfree(hcd);
+		usb_put_hcd(hcd);
 		return -1;
 	}
 
 	status = usb_add_hcd(hcd, irq, 0);
 	if (status) {
 		dev_dbg(dev, "USB add HCD failed with %d\n", status);
-		kfree(hcd);
+		usb_put_hcd(hcd);
 		return -1;
 	}
 	device_wakeup_enable(hcd->self.controller);
@@ -3691,7 +3691,7 @@ static int octeon_usb_remove(struct platform_device *pdev)
 	if (status)
 		dev_dbg(dev, "USB shutdown failed with %d\n", status);
 
-	kfree(hcd);
+	usb_put_hcd(hcd);
 
 	return 0;
 }
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 617da80..cb5540d 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -39,7 +39,7 @@ static inline int INTERFACE(int ipd_port)
 	interface = cvmx_helper_get_interface_num(ipd_port);
 	if (interface >= 0)
 		return interface;
-	panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
+	panic("Illegal ipd_port %d passed to %s\n", ipd_port, __func__);
 }
 
 /**
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 519b4d3..647a922 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -735,9 +735,11 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
 	cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
 
 
-	/* check if there is wps ie, */
-	/* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
-	/* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
+	/* check if there is wps ie,
+	 * if there is wpsie in beacon, the hostapd will update
+	 * beacon twice when stating hostapd, and at first time the
+	 * security ie (RSN/WPA IE) will not include in beacon.
+	 */
 	if (!rtw_get_wps_ie(pnetwork->IEs + _FIXED_IE_LENGTH_, pnetwork->IELength - _FIXED_IE_LENGTH_, NULL, NULL))
 		pmlmeext->bstart_bss = true;
 
@@ -751,8 +753,11 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
 		update_hw_ht_param(padapter);
 	}
 
-	if (pmlmepriv->cur_network.join_res != true) { /* setting only at  first time */
-		/* WEP Key will be set before this function, do not clear CAM. */
+	/* setting only at  first time */
+	if (!(pmlmepriv->cur_network.join_res)) {
+		/* WEP Key will be set before this function, do not
+		 * clear CAM.
+		 */
 		if ((psecuritypriv->dot11PrivacyAlgrthm != _WEP40_) &&
 		    (psecuritypriv->dot11PrivacyAlgrthm != _WEP104_))
 			flush_all_cam_entry(padapter);	/* clear CAM */
@@ -809,7 +814,9 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
 			}
 		}
 	}
-	/* TODO: need to judge the phy parameters on concurrent mode for single phy */
+	/* TODO: need to judge the phy parameters on concurrent
+	 * mode for single phy
+	 */
 	set_channel_bwmode(padapter, cur_channel, cur_ch_offset, cur_bwmode);
 
 	DBG_88E("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode, cur_ch_offset);
@@ -991,7 +998,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
 			}
 			break;
 		}
-		if ((p == NULL) || (ie_len == 0))
+		if ((!p) || (ie_len == 0))
 			break;
 	}
 
@@ -1005,9 +1012,12 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
 			if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
 				pmlmepriv->qospriv.qos_option = 1;
 
-				*(p + 8) |= BIT(7);/* QoS Info, support U-APSD */
+				/* QoS Info, support U-APSD */
+				*(p + 8) |= BIT(7);
 
-				/* disable all ACM bits since the WMM admission control is not supported */
+				/* disable all ACM bits since the WMM
+				 * admission control is not supported
+				 */
 				*(p + 10) &= ~BIT(4); /* BE */
 				*(p + 14) &= ~BIT(4); /* BK */
 				*(p + 18) &= ~BIT(4); /* VI */
@@ -1015,7 +1025,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
 				break;
 			}
 
-			if ((p == NULL) || (ie_len == 0))
+			if ((!p) || (ie_len == 0))
 				break;
 		}
 	}
@@ -1097,7 +1107,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
 	psta = rtw_get_stainfo(&padapter->stapriv, pbss_network->MacAddress);
 	if (!psta) {
 		psta = rtw_alloc_stainfo(&padapter->stapriv, pbss_network->MacAddress);
-		if (psta == NULL)
+		if (!psta)
 			return _FAIL;
 	}
 
@@ -1268,12 +1278,12 @@ static void update_bcn_wps_ie(struct adapter *padapter)
 	DBG_88E("%s\n", __func__);
 
 	pwps_ie_src = pmlmepriv->wps_beacon_ie;
-	if (pwps_ie_src == NULL)
+	if (!pwps_ie_src)
 		return;
 
 	pwps_ie = rtw_get_wps_ie(ie+_FIXED_IE_LENGTH_, ielen-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
 
-	if (pwps_ie == NULL || wps_ielen == 0)
+	if (!pwps_ie || wps_ielen == 0)
 		return;
 
 	wps_offset = (uint)(pwps_ie-ie);
@@ -1834,7 +1844,9 @@ void stop_ap_mode(struct adapter *padapter)
 	pmlmepriv->update_bcn = false;
 	pmlmeext->bstart_bss = false;
 
-	/* reset and init security priv , this can refine with rtw_reset_securitypriv */
+	/* reset and init security priv , this can refine with
+	 * rtw_reset_securitypriv
+	 */
 	memset((unsigned char *)&padapter->securitypriv, 0, sizeof(struct security_priv));
 	padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
 	padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 9754322..002d091 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -21,35 +21,33 @@
 #include <rtw_mlme_ext.h>
 
 /*
-Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
-No irqsave is necessary.
-*/
+ * Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
+ * No irqsave is necessary.
+ */
 
 int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
 {
 	init_completion(&pcmdpriv->cmd_queue_comp);
 	init_completion(&pcmdpriv->terminate_cmdthread_comp);
 
-	_rtw_init_queue(&(pcmdpriv->cmd_queue));
+	_rtw_init_queue(&pcmdpriv->cmd_queue);
 	return _SUCCESS;
 }
 
 /*
-Calling Context:
-
-rtw_enqueue_cmd can only be called between kernel thread,
-since only spin_lock is used.
-
-ISR/Call-Back functions can't call this sub-function.
-
-*/
+ * Calling Context:
+ *
+ * rtw_enqueue_cmd can only be called between kernel thread,
+ * since only spin_lock is used.
+ *
+ * ISR/Call-Back functions can't call this sub-function.
+ */
 
 static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
 {
 	unsigned long irqL;
 
-
-	if (obj == NULL)
+	if (!obj)
 		goto exit;
 
 	spin_lock_irqsave(&queue->lock, irqL);
@@ -60,7 +58,6 @@ static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
 
 exit:
 
-
 	return _SUCCESS;
 }
 
@@ -107,8 +104,7 @@ u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
 	int res = _FAIL;
 	struct adapter *padapter = pcmdpriv->padapter;
 
-
-	if (cmd_obj == NULL)
+	if (!cmd_obj)
 		goto exit;
 
 	cmd_obj->padapter = padapter;
@@ -126,19 +122,17 @@ u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
 
 exit:
 
-
 	return res;
 }
 
 void rtw_free_cmd_obj(struct cmd_obj *pcmd)
 {
-
 	if ((pcmd->cmdcode != _JoinBss_CMD_) && (pcmd->cmdcode != _CreateBss_CMD_)) {
 		/* free parmbuf in cmd_obj */
 		kfree(pcmd->parmbuf);
 	}
 
-	if (pcmd->rsp != NULL) {
+	if (!pcmd->rsp) {
 		if (pcmd->rspsz != 0) {
 			/* free rsp in cmd_obj */
 			kfree(pcmd->rsp);
@@ -147,7 +141,6 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
 
 	/* free cmd_obj */
 	kfree(pcmd);
-
 }
 
 int rtw_cmd_thread(void *context)
@@ -157,14 +150,15 @@ int rtw_cmd_thread(void *context)
 	u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
 	void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd);
 	struct adapter *padapter = context;
-	struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+	struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
 
 	allow_signal(SIGTERM);
 
 	pcmdpriv->cmdthd_running = true;
 	complete(&pcmdpriv->terminate_cmdthread_comp);
 
-	RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("start r871x rtw_cmd_thread !!!!\n"));
+	RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+		 ("start r871x %s !!!!\n", __func__));
 
 	while (1) {
 		if (wait_for_completion_interruptible(&pcmdpriv->cmd_queue_comp))
@@ -208,7 +202,7 @@ int rtw_cmd_thread(void *context)
 		/* call callback function for post-processed */
 		if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
 			pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
-			if (pcmd_callback == NULL) {
+			if (!pcmd_callback) {
 				RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("mlme_cmd_hdl(): pcmd_callback = 0x%p, cmdcode = 0x%x\n", pcmd_callback, pcmd->cmdcode));
 				rtw_free_cmd_obj(pcmd);
 			} else {
@@ -236,15 +230,15 @@ int rtw_cmd_thread(void *context)
 
 	complete(&pcmdpriv->terminate_cmdthread_comp);
 
-
 	complete_and_exit(NULL, 0);
 }
 
 /*
-rtw_sitesurvey_cmd(~)
-	### NOTE:#### (!!!!)
-	MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
-*/
+ * rtw_sitesurvey_cmd(~)
+ * ### NOTE:#### (!!!!)
+ * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE
+ * LOCKED pmlmepriv->lock
+ */
 u8 rtw_sitesurvey_cmd(struct adapter  *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
 	struct rtw_ieee80211_channel *ch, int ch_num)
 {
@@ -315,13 +309,11 @@ u8 rtw_sitesurvey_cmd(struct adapter  *padapter, struct ndis_802_11_ssid *ssid,
 		_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
 	}
 
-
 	return res;
 }
 
 void rtw_readtssi_cmdrsp_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
 {
-
 	kfree(pcmd->parmbuf);
 	kfree(pcmd);
 }
@@ -334,7 +326,6 @@ u8 rtw_createbss_cmd(struct adapter  *padapter)
 	struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
 	u8	res = _SUCCESS;
 
-
 	LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
 
 	if (pmlmepriv->assoc_ssid.SsidLength == 0)
@@ -358,7 +349,6 @@ u8 rtw_createbss_cmd(struct adapter  *padapter)
 	res = rtw_enqueue_cmd(pcmdpriv, pcmd);
 exit:
 
-
 	return res;
 }
 
@@ -376,8 +366,7 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
 	struct ht_priv		*phtpriv = &pmlmepriv->htpriv;
 	enum ndis_802_11_network_infra ndis_network_mode = pnetwork->network.InfrastructureMode;
 	struct mlme_ext_priv	*pmlmeext = &padapter->mlmeextpriv;
-	struct mlme_ext_info	*pmlmeinfo = &(pmlmeext->mlmext_info);
-
+	struct mlme_ext_info	*pmlmeinfo = &pmlmeext->mlmext_info;
 
 	LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
 
@@ -394,9 +383,8 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
 	/* for IEs is fix buf size */
 	t_len = sizeof(struct wlan_bssid_ex);
 
-
 	/* for hidden ap to set fw_state here */
-	if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE)) {
+	if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE)) {
 		switch (ndis_network_mode) {
 		case Ndis802_11IBSS:
 			set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
@@ -412,12 +400,13 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
 	}
 
 	psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
-	if (psecnetwork == NULL) {
+	if (!psecnetwork) {
 		kfree(pcmd);
 
 		res = _FAIL;
 
-		RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd :psecnetwork == NULL!!!\n"));
+		RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+			 ("%s :psecnetwork == NULL!!!\n", __func__));
 
 		goto exit;
 	}
@@ -444,7 +433,6 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
 
 	psecnetwork->IELength = rtw_restruct_sec_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength);
 
-
 	pqospriv->qos_option = 0;
 
 	if (pregistrypriv->wmm_enable) {
@@ -498,7 +486,6 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
 
 exit:
 
-
 	return res;
 }
 
@@ -509,8 +496,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
 	struct cmd_priv *cmdpriv = &padapter->cmdpriv;
 	u8 res = _SUCCESS;
 
-
-	RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_disassoc_cmd\n"));
+	RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+%s\n", __func__));
 
 	/* prepare cmd parameter */
 	param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -539,7 +525,6 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
 
 exit:
 
-
 	return res;
 }
 
@@ -617,7 +602,6 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
 	struct sta_info *sta = (struct sta_info *)psta;
 	u8	res = _SUCCESS;
 
-
 	if (!enqueue) {
 		clear_cam_entry(padapter, entry);
 	} else {
@@ -656,7 +640,6 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
 	}
 exit:
 
-
 	return res;
 }
 
@@ -667,7 +650,6 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
 	struct addBaReq_parm *paddbareq_parm;
 	u8	res = _SUCCESS;
 
-
 	ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
 	if (!ph2c) {
 		res = _FAIL;
@@ -693,7 +675,6 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
 
 exit:
 
-
 	return res;
 }
 
@@ -704,7 +685,6 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
 	struct cmd_priv	*pcmdpriv = &padapter->cmdpriv;
 	u8	res = _SUCCESS;
 
-
 	ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
 	if (!ph2c) {
 		res = _FAIL;
@@ -724,7 +704,6 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
 
 	init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, _Set_Drv_Extra_CMD_);
 
-
 	/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
 	res = rtw_enqueue_cmd(pcmdpriv, ph2c);
 exit:
@@ -739,8 +718,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
 
 	u8	res = _SUCCESS;
 
-
-	RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_chplan_cmd\n"));
+	RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+%s\n", __func__));
 
 	/* check input parameter */
 	if (!rtw_is_channel_plan_valid(chplan)) {
@@ -781,7 +759,6 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
 
 exit:
 
-
 	return res;
 }
 
@@ -790,7 +767,7 @@ static void traffic_status_watchdog(struct adapter *padapter)
 	u8	bEnterPS;
 	u8	bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false;
 	u8	bHigherBusyTraffic = false, bHigherBusyRxTraffic = false, bHigherBusyTxTraffic = false;
-	struct mlme_priv		*pmlmepriv = &(padapter->mlmepriv);
+	struct mlme_priv		*pmlmepriv = &padapter->mlmepriv;
 
 	/*  */
 	/*  Determine if our traffic is busy now */
@@ -849,7 +826,7 @@ static void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
 	struct mlme_priv *pmlmepriv;
 
 	padapter = (struct adapter *)pbuf;
-	pmlmepriv = &(padapter->mlmepriv);
+	pmlmepriv = &padapter->mlmepriv;
 
 #ifdef CONFIG_88EU_AP_MODE
 	if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
@@ -865,10 +842,9 @@ static void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
 static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
 {
 	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	u8	mstatus;
 
-
 	if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
 	    (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
 		return;
@@ -905,7 +881,6 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
 	default:
 		break;
 	}
-
 }
 
 u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
@@ -943,7 +918,6 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
 
 exit:
 
-
 	return res;
 }
 
@@ -980,7 +954,6 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
 	res = rtw_enqueue_cmd(pcmdpriv, ph2c);
 exit:
 
-
 	return res;
 }
 
@@ -1026,7 +999,6 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
 	}
 exit:
 
-
 	return res;
 }
 
@@ -1169,7 +1141,6 @@ void rtw_survey_cmd_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
 {
 	struct	mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
-
 	if (pcmd->res == H2C_DROPPED) {
 		/* TODO: cancel timer and do timeout handler directly... */
 		/* need to make timeout handlerOS independent */
@@ -1183,13 +1154,12 @@ void rtw_survey_cmd_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
 
 	/*  free cmd */
 	rtw_free_cmd_obj(pcmd);
-
 }
+
 void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
 {
 	struct	mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
-
 	if (pcmd->res != H2C_SUCCESS) {
 		spin_lock_bh(&pmlmepriv->lock);
 		set_fwstate(pmlmepriv, _FW_LINKED);
@@ -1207,7 +1177,6 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
 {
 	struct	mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
-
 	if (pcmd->res == H2C_DROPPED) {
 		/* TODO: cancel timer and do timeout handler directly... */
 		/* need to make timeout handlerOS independent */
@@ -1220,7 +1189,6 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
 	}
 
 	rtw_free_cmd_obj(pcmd);
-
 }
 
 void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
@@ -1229,11 +1197,11 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
 	struct wlan_network *pwlan = NULL;
 	struct	mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
-	struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
-
+	struct wlan_network *tgt_network = &pmlmepriv->cur_network;
 
 	if (pcmd->res != H2C_SUCCESS) {
-		RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: rtw_createbss_cmd_callback  Fail ************\n\n."));
+		RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+			 ("\n **** Error: %s  Fail ****\n\n.", __func__));
 		mod_timer(&pmlmepriv->assoc_timer,
 			  jiffies + msecs_to_jiffies(1));
 	}
@@ -1246,7 +1214,7 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
 		psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->MacAddress);
 		if (!psta) {
 			psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
-			if (psta == NULL) {
+			if (!psta) {
 				RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nCan't alloc sta_info when createbss_cmd_callback\n"));
 				goto createbss_cmd_fail;
 			}
@@ -1255,28 +1223,31 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
 		rtw_indicate_connect(padapter);
 	} else {
 		pwlan = _rtw_alloc_network(pmlmepriv);
-		spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
-		if (pwlan == NULL) {
+		spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+		if (!pwlan) {
 			pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
-			if (pwlan == NULL) {
+			if (!pwlan) {
 				RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n Error:  can't get pwlan in rtw_joinbss_event_callback\n"));
 				spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
 				goto createbss_cmd_fail;
 			}
 			pwlan->last_scanned = jiffies;
 		} else {
-			list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
+			list_add_tail(&pwlan->list,
+				      &pmlmepriv->scanned_queue.queue);
 		}
 
 		pnetwork->Length = get_wlan_bssid_ex_sz(pnetwork);
-		memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
+		memcpy(&pwlan->network, pnetwork, pnetwork->Length);
 
 		memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork)));
 
 		_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
 
 		spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
-		/*  we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
+		/*  we will set _FW_LINKED when there is one more sat to
+		 *  join us (rtw_stassoc_event_callback)
+		 */
 	}
 
 createbss_cmd_fail:
@@ -1284,7 +1255,6 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
 	spin_unlock_bh(&pmlmepriv->lock);
 
 	rtw_free_cmd_obj(pcmd);
-
 }
 
 void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter,  struct cmd_obj *pcmd)
@@ -1293,8 +1263,7 @@ void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter,  struct cmd_obj *pc
 	struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp);
 	struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
 
-
-	if (psta == NULL) {
+	if (!psta) {
 		RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: %s => can't get sta_info\n\n", __func__));
 		goto exit;
 	}
@@ -1310,8 +1279,7 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter,  struct cmd_obj *
 	struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp);
 	struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
 
-
-	if (psta == NULL) {
+	if (!psta) {
 		RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: %s => can't get sta_info\n\n", __func__));
 		goto exit;
 	}
@@ -1326,5 +1294,4 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter,  struct cmd_obj *
 
 exit:
 	rtw_free_cmd_obj(pcmd);
-
 }
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index d1dafe0..bb867a9 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -955,50 +955,6 @@ void rtw_macaddr_cfg(u8 *mac_addr)
 	DBG_88E("rtw_macaddr_cfg MAC Address  = %pM\n", (mac_addr));
 }
 
-/* Baron adds to avoid FreeBSD warning */
-int ieee80211_is_empty_essid(const char *essid, int essid_len)
-{
-	/* Single white space is for Linksys APs */
-	if (essid_len == 1 && essid[0] == ' ')
-		return 1;
-
-	/* Otherwise, if the entire essid is 0, we assume it is hidden */
-	while (essid_len) {
-		essid_len--;
-		if (essid[essid_len] != '\0')
-			return 0;
-	}
-
-	return 1;
-}
-
-int ieee80211_get_hdrlen(u16 fc)
-{
-	int hdrlen = 24;
-
-	switch (WLAN_FC_GET_TYPE(fc)) {
-	case RTW_IEEE80211_FTYPE_DATA:
-		if (fc & RTW_IEEE80211_STYPE_QOS_DATA)
-			hdrlen += 2;
-		if ((fc & RTW_IEEE80211_FCTL_FROMDS) && (fc & RTW_IEEE80211_FCTL_TODS))
-			hdrlen += 6; /* Addr4 */
-		break;
-	case RTW_IEEE80211_FTYPE_CTL:
-		switch (WLAN_FC_GET_STYPE(fc)) {
-		case RTW_IEEE80211_STYPE_CTS:
-		case RTW_IEEE80211_STYPE_ACK:
-			hdrlen = 10;
-			break;
-		default:
-			hdrlen = 16;
-			break;
-		}
-		break;
-	}
-
-	return hdrlen;
-}
-
 static int rtw_get_cipher_info(struct wlan_network *pnetwork)
 {
 	uint wpa_ielen;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 301085a..fde3060 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -370,7 +370,7 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
 		sq_final = padapter->recvpriv.signal_qual;
 		/* the rssi value here is undecorated, and will be used for antenna diversity */
 		if (sq_smp != 101) /* from the right channel */
-			rssi_final = (src->Rssi+dst->Rssi*4)/5;
+			rssi_final = (src->Rssi + dst->Rssi * 4) / 5;
 		else
 			rssi_final = rssi_ori;
 	} else {
@@ -1081,10 +1081,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
 					RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv)));
 				}
 
-			/* s5. Cancle assoc_timer */
+			/* s5. Cancel assoc_timer */
 			del_timer_sync(&pmlmepriv->assoc_timer);
 
-			RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancle assoc_timer\n"));
+			RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancel assoc_timer\n"));
 
 		} else {
 			RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_joinbss_event_callback err: fw_state:%x", get_fwstate(pmlmepriv)));
@@ -1926,7 +1926,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
 
 		if (pqospriv->qos_option == 0) {
 			out_len = *pout_len;
-			rtw_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_,
+			rtw_set_ie(out_ie + out_len, _VENDOR_SPECIFIC_IE_,
 				   _WMM_IE_Length_, WMM_IE, pout_len);
 
 			pqospriv->qos_option = 1;
@@ -2058,8 +2058,8 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
 	phtpriv = &psta->htpriv;
 
 	if ((phtpriv->ht_option) && (phtpriv->ampdu_enable)) {
-		issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
-		issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
+		issued = (phtpriv->agg_enable_bitmap >> priority) & 0x1;
+		issued |= (phtpriv->candidate_tid_bitmap >> priority) & 0x1;
 
 		if (issued == 0) {
 			DBG_88E("rtw_issue_addbareq_cmd, p=%d\n", priority);
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index c6c4404..2c37bb5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -259,12 +259,10 @@ static int recvframe_chkmic(struct adapter *adapter,
 			}
 
 			/* icv_len included the mic code */
-			datalen = precvframe->pkt->len-prxattrib->hdrlen -
-				  prxattrib->iv_len-prxattrib->icv_len-8;
+			datalen = precvframe->pkt->len-prxattrib->hdrlen - 8;
 			pframe = precvframe->pkt->data;
-			payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
+			payload = pframe+prxattrib->hdrlen;
 
-			RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
 			rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
 					   (unsigned char)prxattrib->priority); /* care the length of the data */
 
@@ -409,9 +407,15 @@ static struct recv_frame *decryptor(struct adapter *padapter,
 		default:
 			break;
 		}
+		if (res != _FAIL) {
+			memmove(precv_frame->pkt->data + precv_frame->attrib.iv_len, precv_frame->pkt->data, precv_frame->attrib.hdrlen);
+			skb_pull(precv_frame->pkt, precv_frame->attrib.iv_len);
+			skb_trim(precv_frame->pkt, precv_frame->pkt->len - precv_frame->attrib.icv_len);
+		}
 	} else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
-		   (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_))
-			psecuritypriv->hw_decrypted = true;
+		   (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) {
+		psecuritypriv->hw_decrypted = true;
+	}
 
 	if (res == _FAIL) {
 		rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue);
@@ -452,7 +456,7 @@ static struct recv_frame *portctrl(struct adapter *adapter,
 
 	if (auth_alg == 2) {
 		/* get ether_type */
-		ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE + pfhdr->attrib.iv_len;
+		ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
 		memcpy(&be_tmp, ptr, 2);
 		ether_type = ntohs(be_tmp);
 
@@ -1134,6 +1138,8 @@ static int validate_recv_data_frame(struct adapter *adapter,
 	}
 
 	if (pattrib->privacy) {
+		struct sk_buff *skb = precv_frame->pkt;
+
 		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy));
 		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra)));
 
@@ -1142,6 +1148,13 @@ static int validate_recv_data_frame(struct adapter *adapter,
 		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt));
 
 		SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
+
+		if (pattrib->bdecrypted == 1 && pattrib->encrypt > 0) {
+			memmove(skb->data + pattrib->iv_len,
+				skb->data, pattrib->hdrlen);
+			skb_pull(skb, pattrib->iv_len);
+			skb_trim(skb, skb->len - pattrib->icv_len);
+		}
 	} else {
 		pattrib->encrypt = 0;
 		pattrib->iv_len = 0;
@@ -1261,6 +1274,7 @@ static int validate_recv_frame(struct adapter *adapter,
 	 * Hence forward the frame to the monitor anyway to preserve the order
 	 * in which frames were received.
 	 */
+
 	rtl88eu_mon_recv_hook(adapter->pmondev, precv_frame);
 
 exit:
@@ -1282,11 +1296,8 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
 	u8 *ptr = precvframe->pkt->data;
 	struct rx_pkt_attrib *pattrib = &precvframe->attrib;
 
-	if (pattrib->encrypt)
-		skb_trim(precvframe->pkt, precvframe->pkt->len - pattrib->icv_len);
-
-	psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
-	psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
+	psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen);
+	psnap_type = ptr+pattrib->hdrlen + SNAP_SIZE;
 	/* convert hdr + possible LLC headers into Ethernet header */
 	if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
 	     (!memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
@@ -1299,12 +1310,9 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
 		bsnaphdr = false;
 	}
 
-	rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
+	rmv_len = pattrib->hdrlen + (bsnaphdr ? SNAP_SIZE : 0);
 	len = precvframe->pkt->len - rmv_len;
 
-	RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
-		 ("\n===pattrib->hdrlen: %x,  pattrib->iv_len:%x===\n\n", pattrib->hdrlen,  pattrib->iv_len));
-
 	memcpy(&be_tmp, ptr+rmv_len, 2);
 	eth_type = ntohs(be_tmp); /* pattrib->ether_type */
 	pattrib->eth_type = eth_type;
@@ -1329,7 +1337,6 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
 					   struct __queue *defrag_q)
 {
 	struct list_head *plist, *phead;
-	u8 wlanhdr_offset;
 	u8	curfragnum;
 	struct recv_frame *pfhdr, *pnfhdr;
 	struct recv_frame *prframe, *pnextrframe;
@@ -1378,12 +1385,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
 		/* copy the 2nd~n fragment frame's payload to the first fragment */
 		/* get the 2nd~last fragment frame's payload */
 
-		wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
-
-		skb_pull(pnextrframe->pkt, wlanhdr_offset);
-
-		/* append  to first fragment frame's tail (if privacy frame, pull the ICV) */
-		skb_trim(prframe->pkt, prframe->pkt->len - pfhdr->attrib.icv_len);
+		skb_pull(pnextrframe->pkt, pnfhdr->attrib.hdrlen);
 
 		/* memcpy */
 		memcpy(skb_tail_pointer(pfhdr->pkt), pnfhdr->pkt->data,
@@ -1391,7 +1393,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
 
 		skb_put(prframe->pkt, pnfhdr->pkt->len);
 
-		pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
+		pfhdr->attrib.icv_len = 0;
 		plist = plist->next;
 	}
 
@@ -1518,11 +1520,6 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
 	nr_subframes = 0;
 	pattrib = &prframe->attrib;
 
-	skb_pull(prframe->pkt, prframe->attrib.hdrlen);
-
-	if (prframe->attrib.iv_len > 0)
-		skb_pull(prframe->pkt, prframe->attrib.iv_len);
-
 	a_len = prframe->pkt->len;
 
 	pdata = prframe->pkt->data;
@@ -1892,24 +1889,6 @@ static int process_recv_indicatepkts(struct adapter *padapter,
 	return retval;
 }
 
-static int recv_func_prehandle(struct adapter *padapter,
-			       struct recv_frame *rframe)
-{
-	int ret = _SUCCESS;
-	struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
-
-	/* check the frame crtl field and decache */
-	ret = validate_recv_frame(padapter, rframe);
-	if (ret != _SUCCESS) {
-		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
-		rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
-		goto exit;
-	}
-
-exit:
-	return ret;
-}
-
 static int recv_func_posthandle(struct adapter *padapter,
 				struct recv_frame *prframe)
 {
@@ -1962,6 +1941,7 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
 	struct rx_pkt_attrib *prxattrib = &rframe->attrib;
 	struct security_priv *psecuritypriv = &padapter->securitypriv;
 	struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+	struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
 
 	/* check if need to handle uc_swdec_pending_queue*/
 	if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
@@ -1973,9 +1953,12 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
 		}
 	}
 
-	ret = recv_func_prehandle(padapter, rframe);
-
-	if (ret == _SUCCESS) {
+	/* check the frame crtl field and decache */
+	ret = validate_recv_frame(padapter, rframe);
+	if (ret != _SUCCESS) {
+		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
+		rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
+	} else {
 		/* check if need to enqueue into uc_swdec_pending_queue*/
 		if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
 		    !IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 &&
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 2ecfb11..22cf362 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -61,7 +61,6 @@ static void _rtw_init_stainfo(struct sta_info *psta)
 	psta->keep_alive_trycnt = 0;
 
 #endif	/*  CONFIG_88EU_AP_MODE */
-
 }
 
 u32	_rtw_init_sta_priv(struct	sta_priv *pstapriv)
@@ -69,7 +68,6 @@ u32	_rtw_init_sta_priv(struct	sta_priv *pstapriv)
 	struct sta_info *psta;
 	s32 i;
 
-
 	pstapriv->pallocated_stainfo_buf = vzalloc(sizeof(struct sta_info) * NUM_STA + 4);
 
 	if (!pstapriv->pallocated_stainfo_buf)
@@ -116,7 +114,6 @@ u32	_rtw_init_sta_priv(struct	sta_priv *pstapriv)
 	pstapriv->max_num_sta = NUM_STA;
 #endif
 
-
 	return _SUCCESS;
 }
 
@@ -263,7 +260,6 @@ u32	rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
 	struct	xmit_priv	*pxmitpriv = &padapter->xmitpriv;
 	struct	sta_priv *pstapriv = &padapter->stapriv;
 
-
 	if (!psta)
 		goto exit;
 
@@ -381,7 +377,6 @@ u32	rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
 
 exit:
 
-
 	return _SUCCESS;
 }
 
@@ -394,7 +389,6 @@ void rtw_free_all_stainfo(struct adapter *padapter)
 	struct	sta_priv *pstapriv = &padapter->stapriv;
 	struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
 
-
 	if (pstapriv->asoc_sta_count == 1)
 		return;
 
@@ -425,7 +419,6 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
 	u8 *addr;
 	u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
-
 	if (!hwaddr)
 		return NULL;
 
@@ -463,7 +456,6 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
 	unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 	struct	sta_priv *pstapriv = &padapter->stapriv;
 
-
 	psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
 
 	if (!psta) {
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 054f599..3039bbe 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -1091,7 +1091,7 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
 		}
 	}
 
-	if (0x00 == path_a_ok) {
+	if (path_a_ok == 0x00) {
 		ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
 			     ("Path A IQK failed!!\n"));
 	}
@@ -1122,7 +1122,7 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
 			}
 		}
 
-		if (0x00 == path_b_ok) {
+		if (path_b_ok == 0x00) {
 			ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
 				     ("Path B IQK failed!!\n"));
 		}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index d9fa290..9f51f54 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -58,7 +58,7 @@ static void process_link_qual(struct adapter *padapter,
 }
 
 void rtl8188e_process_phy_info(struct adapter *padapter,
-		               struct recv_frame *precvframe)
+			       struct recv_frame *precvframe)
 {
 	/*  Check RSSI */
 	process_rssi(padapter, precvframe);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index 22ab0c4..284db7d 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -306,68 +306,6 @@ enum eap_type {
 #define MIN_FRAG_THRESHOLD     256U
 #define	MAX_FRAG_THRESHOLD     2346U
 
-/* Frame control field constants */
-#define RTW_IEEE80211_FCTL_VERS		0x0003
-#define RTW_IEEE80211_FCTL_FTYPE	0x000c
-#define RTW_IEEE80211_FCTL_STYPE	0x00f0
-#define RTW_IEEE80211_FCTL_TODS		0x0100
-#define RTW_IEEE80211_FCTL_FROMDS	0x0200
-#define RTW_IEEE80211_FCTL_MOREFRAGS	0x0400
-#define RTW_IEEE80211_FCTL_RETRY	0x0800
-#define RTW_IEEE80211_FCTL_PM		0x1000
-#define RTW_IEEE80211_FCTL_MOREDATA	0x2000
-#define RTW_IEEE80211_FCTL_PROTECTED	0x4000
-#define RTW_IEEE80211_FCTL_ORDER	0x8000
-#define RTW_IEEE80211_FCTL_CTL_EXT	0x0f00
-
-#define RTW_IEEE80211_FTYPE_MGMT	0x0000
-#define RTW_IEEE80211_FTYPE_CTL		0x0004
-#define RTW_IEEE80211_FTYPE_DATA	0x0008
-#define RTW_IEEE80211_FTYPE_EXT		0x000c
-
-/* management */
-#define RTW_IEEE80211_STYPE_ASSOC_REQ	0x0000
-#define RTW_IEEE80211_STYPE_ASSOC_RESP	0x0010
-#define RTW_IEEE80211_STYPE_REASSOC_REQ	0x0020
-#define RTW_IEEE80211_STYPE_REASSOC_RESP	0x0030
-#define RTW_IEEE80211_STYPE_PROBE_REQ	0x0040
-#define RTW_IEEE80211_STYPE_PROBE_RESP	0x0050
-#define RTW_IEEE80211_STYPE_BEACON	0x0080
-#define RTW_IEEE80211_STYPE_ATIM	0x0090
-#define RTW_IEEE80211_STYPE_DISASSOC	0x00A0
-#define RTW_IEEE80211_STYPE_AUTH	0x00B0
-#define RTW_IEEE80211_STYPE_DEAUTH	0x00C0
-#define RTW_IEEE80211_STYPE_ACTION	0x00D0
-
-/* control */
-#define RTW_IEEE80211_STYPE_CTL_EXT	0x0060
-#define RTW_IEEE80211_STYPE_BACK_REQ	0x0080
-#define RTW_IEEE80211_STYPE_BACK	0x0090
-#define RTW_IEEE80211_STYPE_PSPOLL	0x00A0
-#define RTW_IEEE80211_STYPE_RTS		0x00B0
-#define RTW_IEEE80211_STYPE_CTS		0x00C0
-#define RTW_IEEE80211_STYPE_ACK		0x00D0
-#define RTW_IEEE80211_STYPE_CFEND	0x00E0
-#define RTW_IEEE80211_STYPE_CFENDACK	0x00F0
-
-/* data */
-#define RTW_IEEE80211_STYPE_DATA	0x0000
-#define RTW_IEEE80211_STYPE_DATA_CFACK	0x0010
-#define RTW_IEEE80211_STYPE_DATA_CFPOLL	0x0020
-#define RTW_IEEE80211_STYPE_DATA_CFACKPOLL	0x0030
-#define RTW_IEEE80211_STYPE_NULLFUNC	0x0040
-#define RTW_IEEE80211_STYPE_CFACK	0x0050
-#define RTW_IEEE80211_STYPE_CFPOLL	0x0060
-#define RTW_IEEE80211_STYPE_CFACKPOLL	0x0070
-#define RTW_IEEE80211_STYPE_QOS_DATA	0x0080
-#define RTW_IEEE80211_STYPE_QOS_DATA_CFACK	0x0090
-#define RTW_IEEE80211_STYPE_QOS_DATA_CFPOLL	0x00A0
-#define RTW_IEEE80211_STYPE_QOS_DATA_CFACKPOLL	0x00B0
-#define RTW_IEEE80211_STYPE_QOS_NULLFUNC	0x00C0
-#define RTW_IEEE80211_STYPE_QOS_CFACK		0x00D0
-#define RTW_IEEE80211_STYPE_QOS_CFPOLL		0x00E0
-#define RTW_IEEE80211_STYPE_QOS_CFACKPOLL	0x00F0
-
 /* sequence control field */
 #define RTW_IEEE80211_SCTL_FRAG	0x000F
 #define RTW_IEEE80211_SCTL_SEQ	0xFFF0
@@ -408,9 +346,6 @@ struct ieee80211_snap_hdr {
 
 #define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
 
-#define WLAN_FC_GET_TYPE(fc) ((fc) & RTW_IEEE80211_FCTL_FTYPE)
-#define WLAN_FC_GET_STYPE(fc) ((fc) & RTW_IEEE80211_FCTL_STYPE)
-
 #define WLAN_QC_GET_TID(qc) ((qc) & 0x0f)
 
 #define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTW_IEEE80211_SCTL_FRAG)
@@ -423,14 +358,6 @@ struct ieee80211_snap_hdr {
 #define IEEE80211_DATA_HDR3_LEN 24
 #define IEEE80211_DATA_HDR4_LEN 30
 
-
-#define IEEE80211_STATMASK_SIGNAL (1<<0)
-#define IEEE80211_STATMASK_RSSI (1<<1)
-#define IEEE80211_STATMASK_NOISE (1<<2)
-#define IEEE80211_STATMASK_RATE (1<<3)
-#define IEEE80211_STATMASK_WEMASK 0x7
-
-
 #define IEEE80211_CCK_MODULATION    (1<<0)
 #define IEEE80211_OFDM_MODULATION   (1<<1)
 
@@ -488,9 +415,6 @@ struct ieee80211_snap_hdr {
 	IEEE80211_OFDM_RATE_36MB_MASK |					\
 	IEEE80211_OFDM_RATE_48MB_MASK |					\
 	IEEE80211_OFDM_RATE_54MB_MASK)
-#define IEEE80211_DEFAULT_RATES_MASK					\
-	(IEEE80211_OFDM_DEFAULT_RATES_MASK |				\
-	 IEEE80211_CCK_DEFAULT_RATES_MASK)
 
 #define IEEE80211_NUM_OFDM_RATES	8
 #define IEEE80211_NUM_CCK_RATES		4
@@ -521,25 +445,6 @@ struct ieee80211_snap_hdr {
 #define WEP_KEYS 4
 #define WEP_KEY_LEN 13
 
-#define BEACON_PROBE_SSID_ID_POSITION 12
-
-/* Management Frame Information Element Types */
-#define MFIE_TYPE_SSID		0
-#define MFIE_TYPE_RATES		1
-#define MFIE_TYPE_FH_SET	2
-#define MFIE_TYPE_DS_SET	3
-#define MFIE_TYPE_CF_SET	4
-#define MFIE_TYPE_TIM		5
-#define MFIE_TYPE_IBSS_SET	6
-#define MFIE_TYPE_CHALLENGE	16
-#define MFIE_TYPE_ERP		42
-#define MFIE_TYPE_RSN		48
-#define MFIE_TYPE_RATES_EX	50
-#define MFIE_TYPE_GENERIC	221
-
-#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
-#define IEEE80211_DEFAULT_BASIC_RATE 10
-
 /* SWEEP TABLE ENTRIES NUMBER*/
 #define MAX_SWEEP_TAB_ENTRIES		  42
 #define MAX_SWEEP_TAB_ENTRIES_PER_PACKET  7
@@ -566,14 +471,6 @@ struct ieee80211_snap_hdr {
 #define NETWORK_HAS_OFDM    (1<<1)
 #define NETWORK_HAS_CCK     (1<<2)
 
-#define IEEE80211_DTIM_MBCAST 4
-#define IEEE80211_DTIM_UCAST 2
-#define IEEE80211_DTIM_VALID 1
-#define IEEE80211_DTIM_INVALID 0
-
-#define IEEE80211_PS_DISABLED 0
-#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
-#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
 #define IW_ESSID_MAX_SIZE 32
 /*
 join_res:
@@ -644,10 +541,6 @@ static inline int is_broadcast_mac_addr(const u8 *addr)
 #define IEEE_G	    (1<<2)
 #define IEEE_MODE_MASK    (IEEE_A|IEEE_B|IEEE_G)
 
-/* Baron move to ieee80211.c */
-int ieee80211_is_empty_essid(const char *essid, int essid_len);
-int ieee80211_get_hdrlen(u16 fc);
-
 /* Action category code */
 enum rtw_ieee80211_category {
 	RTW_WLAN_CATEGORY_SPECTRUM_MGMT = 0,
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index cfe37eb..7326d9b 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -66,6 +66,34 @@ static void mon_recv_decrypted(struct net_device *dev, const u8 *data,
 	netif_rx(skb);
 }
 
+static void mon_recv_decrypted_recv(struct net_device *dev, const u8 *data,
+				    int data_len)
+{
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+	int hdr_len;
+
+	skb = netdev_alloc_skb(dev, data_len);
+	if (!skb)
+		return;
+	memcpy(skb_put(skb, data_len), data, data_len);
+
+	/*
+	 * Frame data is not encrypted. Strip off protection so
+	 * userspace doesn't think that it is.
+	 */
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+	if (ieee80211_has_protected(hdr->frame_control))
+		hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb->protocol = eth_type_trans(skb, dev);
+	netif_rx(skb);
+}
+
 static void mon_recv_encrypted(struct net_device *dev, const u8 *data,
 			       int data_len)
 {
@@ -82,7 +110,6 @@ static void mon_recv_encrypted(struct net_device *dev, const u8 *data,
 void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame)
 {
 	struct rx_pkt_attrib *attr;
-	int iv_len, icv_len;
 	int data_len;
 	u8 *data;
 
@@ -95,11 +122,8 @@ void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame)
 	data = frame->pkt->data;
 	data_len = frame->pkt->len;
 
-	/* Broadcast and multicast frames don't have attr->{iv,icv}_len set */
-	SET_ICE_IV_LEN(iv_len, icv_len, attr->encrypt);
-
 	if (attr->bdecrypted)
-		mon_recv_decrypted(dev, data, data_len, iv_len, icv_len);
+		mon_recv_decrypted_recv(dev, data, data_len);
 	else
 		mon_recv_encrypted(dev, data, data_len);
 }
@@ -152,7 +176,7 @@ static const struct net_device_ops mon_netdev_ops = {
 static void mon_setup(struct net_device *dev)
 {
 	dev->netdev_ops = &mon_netdev_ops;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	ether_setup(dev);
 	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->type = ARPHRD_IEEE80211;
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 623075c..7fa3c4d 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -30,8 +30,8 @@ enum dot11d_state {
 };
 
 /**
- * struct rt_dot11d_info * @CountryIeLen: value greater than 0 if @CountryIeBuf contains
- *		  valid country information element.
+ * struct rt_dot11d_info * @CountryIeLen: value greater than 0 if
+ *		  @CountryIeBuf contains valid country information element.
  * @channel_map: holds channel values
  *		0 - invalid,
  *		1 - valid (active scan),
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index a4d1bac..aca5265 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2275,131 +2275,6 @@ static int _rtl92e_set_mac_adr(struct net_device *dev, void *mac)
 	return 0;
 }
 
-/* based on ipw2200 driver */
-static int _rtl92e_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-	struct r8192_priv *priv = rtllib_priv(dev);
-	struct iwreq *wrq = (struct iwreq *)rq;
-	int ret = -1;
-	struct rtllib_device *ieee = priv->rtllib;
-	u32 key[4];
-	const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-	struct iw_point *p = &wrq->u.data;
-	struct ieee_param *ipw = NULL;
-
-	mutex_lock(&priv->wx_mutex);
-
-	switch (cmd) {
-	case RTL_IOCTL_WPA_SUPPLICANT:
-		if (p->length < sizeof(struct ieee_param) || !p->pointer) {
-			ret = -EINVAL;
-			goto out;
-		}
-
-		ipw = memdup_user(p->pointer, p->length);
-		if (IS_ERR(ipw)) {
-			ret = PTR_ERR(ipw);
-			goto out;
-		}
-
-		if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION) {
-			if (ipw->u.crypt.set_tx) {
-				if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
-					ieee->pairwise_key_type = KEY_TYPE_CCMP;
-				else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
-					ieee->pairwise_key_type = KEY_TYPE_TKIP;
-				else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
-					if (ipw->u.crypt.key_len == 13)
-						ieee->pairwise_key_type =
-							 KEY_TYPE_WEP104;
-					else if (ipw->u.crypt.key_len == 5)
-						ieee->pairwise_key_type =
-							 KEY_TYPE_WEP40;
-				} else {
-					ieee->pairwise_key_type = KEY_TYPE_NA;
-				}
-
-				if (ieee->pairwise_key_type) {
-					if (is_zero_ether_addr(ieee->ap_mac_addr))
-						ieee->iw_mode = IW_MODE_ADHOC;
-					memcpy((u8 *)key, ipw->u.crypt.key, 16);
-					rtl92e_enable_hw_security_config(dev);
-					rtl92e_set_swcam(dev, 4,
-							 ipw->u.crypt.idx,
-							 ieee->pairwise_key_type,
-							 (u8 *)ieee->ap_mac_addr,
-							 0, key, 0);
-					rtl92e_set_key(dev, 4, ipw->u.crypt.idx,
-						       ieee->pairwise_key_type,
-						       (u8 *)ieee->ap_mac_addr,
-						       0, key);
-					if (ieee->iw_mode == IW_MODE_ADHOC) {
-						rtl92e_set_swcam(dev,
-								 ipw->u.crypt.idx,
-								 ipw->u.crypt.idx,
-								 ieee->pairwise_key_type,
-								 (u8 *)ieee->ap_mac_addr,
-								 0, key, 0);
-						rtl92e_set_key(dev,
-							       ipw->u.crypt.idx,
-							       ipw->u.crypt.idx,
-							       ieee->pairwise_key_type,
-							       (u8 *)ieee->ap_mac_addr,
-							       0, key);
-					}
-				}
-				if ((ieee->pairwise_key_type ==
-				     KEY_TYPE_CCMP) &&
-				     ieee->pHTInfo->bCurrentHTSupport) {
-					rtl92e_writeb(dev, 0x173, 1);
-				}
-
-			} else {
-				memcpy((u8 *)key, ipw->u.crypt.key, 16);
-				if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
-					ieee->group_key_type = KEY_TYPE_CCMP;
-				else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
-					ieee->group_key_type = KEY_TYPE_TKIP;
-				else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
-					if (ipw->u.crypt.key_len == 13)
-						ieee->group_key_type =
-							 KEY_TYPE_WEP104;
-					else if (ipw->u.crypt.key_len == 5)
-						ieee->group_key_type =
-							 KEY_TYPE_WEP40;
-				} else
-					ieee->group_key_type = KEY_TYPE_NA;
-
-				if (ieee->group_key_type) {
-					rtl92e_set_swcam(dev, ipw->u.crypt.idx,
-							 ipw->u.crypt.idx,
-							 ieee->group_key_type,
-							 broadcast_addr, 0, key,
-							 0);
-					rtl92e_set_key(dev, ipw->u.crypt.idx,
-						       ipw->u.crypt.idx,
-						       ieee->group_key_type,
-						       broadcast_addr, 0, key);
-				}
-			}
-		}
-
-		ret = rtllib_wpa_supplicant_ioctl(priv->rtllib, &wrq->u.data,
-						  0);
-		kfree(ipw);
-		break;
-	default:
-		ret = -EOPNOTSUPP;
-		break;
-	}
-
-out:
-	mutex_unlock(&priv->wx_mutex);
-
-	return ret;
-}
-
-
 static irqreturn_t _rtl92e_irq(int irq, void *netdev)
 {
 	struct net_device *dev = netdev;
@@ -2542,7 +2417,6 @@ static const struct net_device_ops rtl8192_netdev_ops = {
 	.ndo_open = _rtl92e_open,
 	.ndo_stop = _rtl92e_close,
 	.ndo_tx_timeout = _rtl92e_tx_timeout,
-	.ndo_do_ioctl = _rtl92e_ioctl,
 	.ndo_set_rx_mode = _rtl92e_set_multicast,
 	.ndo_set_mac_address = _rtl92e_set_mac_adr,
 	.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 0335823..9d3089c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -102,8 +102,6 @@
 
 #define	PHY_RSSI_SLID_WIN_MAX			100
 
-#define RTL_IOCTL_WPA_SUPPLICANT		(SIOCIWFIRSTPRIV + 30)
-
 #define TxBBGainTableLength			37
 #define CCKTxBBGainTableLength			23
 
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 1a43c68..b8205eba 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -1693,22 +1693,6 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
 	if (priv->rtllib->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
 		goto dm_CheckEdcaTurbo_EXIT;
 
-	{
-		u8 *peername[11] = {
-			"unknown", "realtek_90", "realtek_92se", "broadcom",
-			"ralink", "atheros", "cisco", "marvell", "92u_softap",
-			"self_softap"
-		};
-		static int wb_tmp;
-
-		if (wb_tmp == 0) {
-			netdev_info(dev,
-				    "%s():iot peer is %s, bssid: %pM\n",
-				    __func__, peername[pHTInfo->IOTPeer],
-				    priv->rtllib->current_network.bssid);
-			wb_tmp = 1;
-		}
-	}
 	if (!priv->rtllib->bis_any_nonbepkts) {
 		curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
 		curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index 4ae1d38..f0e1172 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -908,8 +908,8 @@ void HTSetConnectBwMode(struct rtllib_device *ieee,
 		pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
 	}
 
-	pr_info("%s():pHTInfo->bCurBW40MHz:%x\n", __func__,
-	       pHTInfo->bCurBW40MHz);
+	netdev_dbg(ieee->dev, "%s():pHTInfo->bCurBW40MHz:%x\n", __func__,
+		   pHTInfo->bCurBW40MHz);
 
 	pHTInfo->bSwBwInProgress = true;
 
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 827651b..0042a0f 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -335,60 +335,8 @@ enum rt_op_mode {
 
 #define MGMT_QUEUE_NUM 5
 
-#define IEEE_CMD_SET_WPA_PARAM			1
-#define	IEEE_CMD_SET_WPA_IE			2
-#define IEEE_CMD_SET_ENCRYPTION			3
-#define IEEE_CMD_MLME				4
-
-#define IEEE_PARAM_WPA_ENABLED			1
-#define IEEE_PARAM_TKIP_COUNTERMEASURES		2
-#define IEEE_PARAM_DROP_UNENCRYPTED		3
-#define IEEE_PARAM_PRIVACY_INVOKED		4
-#define IEEE_PARAM_AUTH_ALGS			5
-#define IEEE_PARAM_IEEE_802_1X			6
-#define IEEE_PARAM_WPAX_SELECT			7
-
-#define IEEE_MLME_STA_DEAUTH			1
-#define IEEE_MLME_STA_DISASSOC			2
-
-
-#define IEEE_CRYPT_ERR_UNKNOWN_ALG		2
-#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED	4
-#define IEEE_CRYPT_ERR_KEY_SET_FAILED		5
-#define IEEE_CRYPT_ERR_CARD_CONF_FAILED		7
-#define	IEEE_CRYPT_ALG_NAME_LEN			16
-
 #define MAX_IE_LEN  0xff
 
-struct ieee_param {
-	u32 cmd;
-	u8 sta_addr[ETH_ALEN];
-	union {
-		struct {
-			u8 name;
-			u32 value;
-		} wpa_param;
-		struct {
-			u32 len;
-			u8 reserved[32];
-			u8 data[0];
-		} wpa_ie;
-		struct {
-			int command;
-			int reason_code;
-		} mlme;
-		struct {
-			u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
-			u8 set_tx;
-			u32 err;
-			u8 idx;
-			u8 seq[8]; /* sequence counter (set: RX, get: TX) */
-			u16 key_len;
-			u8 key[0];
-		} crypt;
-	} u;
-};
-
 #define msleep_interruptible_rsl  msleep_interruptible
 
 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
@@ -2066,8 +2014,6 @@ void rtllib_stop_all_queues(struct rtllib_device *ieee);
 struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee);
 void rtllib_start_send_beacons(struct rtllib_device *ieee);
 void rtllib_stop_send_beacons(struct rtllib_device *ieee);
-int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee,
-				struct iw_point *p, u8 is_mesh);
 
 void notify_wx_assoc_event(struct rtllib_device *ieee);
 void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success);
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 43a7774..0033dc6 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1214,9 +1214,6 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
 		return -1;
 	}
 
-	if (rtllib_is_eapol_frame(ieee, skb, hdrlen))
-		netdev_warn(ieee->dev, "RX: IEEE802.1X EAPOL frame!\n");
-
 	return 0;
 }
 
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index eeda17d..64b0034 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1525,7 +1525,8 @@ static void rtllib_associate_complete_wq(void *data)
 				     associate_complete_wq);
 	struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl);
 
-	netdev_info(ieee->dev, "Associated successfully\n");
+	netdev_info(ieee->dev, "Associated successfully with %pM\n",
+		    ieee->current_network.bssid);
 	if (!ieee->is_silent_reset) {
 		netdev_info(ieee->dev, "normal associate\n");
 		notify_wx_assoc_event(ieee);
@@ -2309,7 +2310,8 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
 	if (errcode) {
 		ieee->softmac_stats.rx_auth_rs_err++;
 		netdev_info(ieee->dev,
-			    "Authentication respose status code 0x%x", errcode);
+			    "Authentication response status code 0x%x",
+			    errcode);
 		rtllib_associate_abort(ieee);
 		return;
 	}
@@ -3076,333 +3078,6 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
 	tasklet_kill(&ieee->ps_task);
 }
 
-/********************************************************
- * Start of WPA code.				        *
- * this is stolen from the ipw2200 driver	        *
- ********************************************************/
-
-
-static int rtllib_wpa_enable(struct rtllib_device *ieee, int value)
-{
-	/* This is called when wpa_supplicant loads and closes the driver
-	 * interface.
-	 */
-	netdev_info(ieee->dev, "%s WPA\n", value ? "enabling" : "disabling");
-	ieee->wpa_enabled = value;
-	eth_zero_addr(ieee->ap_mac_addr);
-	return 0;
-}
-
-
-static void rtllib_wpa_assoc_frame(struct rtllib_device *ieee, char *wpa_ie,
-				   int wpa_ie_len)
-{
-	/* make sure WPA is enabled */
-	rtllib_wpa_enable(ieee, 1);
-
-	rtllib_disassociate(ieee);
-}
-
-
-static int rtllib_wpa_mlme(struct rtllib_device *ieee, int command, int reason)
-{
-
-	int ret = 0;
-
-	switch (command) {
-	case IEEE_MLME_STA_DEAUTH:
-		break;
-
-	case IEEE_MLME_STA_DISASSOC:
-		rtllib_disassociate(ieee);
-		break;
-
-	default:
-		netdev_info(ieee->dev, "Unknown MLME request: %d\n", command);
-		ret = -EOPNOTSUPP;
-	}
-
-	return ret;
-}
-
-
-static int rtllib_wpa_set_wpa_ie(struct rtllib_device *ieee,
-			      struct ieee_param *param, int plen)
-{
-	u8 *buf;
-
-	if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
-	    (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
-		return -EINVAL;
-
-	if (param->u.wpa_ie.len) {
-		buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
-			      GFP_KERNEL);
-		if (buf == NULL)
-			return -ENOMEM;
-
-		kfree(ieee->wpa_ie);
-		ieee->wpa_ie = buf;
-		ieee->wpa_ie_len = param->u.wpa_ie.len;
-	} else {
-		kfree(ieee->wpa_ie);
-		ieee->wpa_ie = NULL;
-		ieee->wpa_ie_len = 0;
-	}
-
-	rtllib_wpa_assoc_frame(ieee, ieee->wpa_ie, ieee->wpa_ie_len);
-	return 0;
-}
-
-#define AUTH_ALG_OPEN_SYSTEM			0x1
-#define AUTH_ALG_SHARED_KEY			0x2
-#define AUTH_ALG_LEAP				0x4
-static int rtllib_wpa_set_auth_algs(struct rtllib_device *ieee, int value)
-{
-
-	struct rtllib_security sec = {
-		.flags = SEC_AUTH_MODE,
-	};
-
-	if (value & AUTH_ALG_SHARED_KEY) {
-		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
-		ieee->open_wep = 0;
-		ieee->auth_mode = 1;
-	} else if (value & AUTH_ALG_OPEN_SYSTEM) {
-		sec.auth_mode = WLAN_AUTH_OPEN;
-		ieee->open_wep = 1;
-		ieee->auth_mode = 0;
-	} else if (value & AUTH_ALG_LEAP) {
-		sec.auth_mode = WLAN_AUTH_LEAP  >> 6;
-		ieee->open_wep = 1;
-		ieee->auth_mode = 2;
-	}
-
-
-	if (ieee->set_security)
-		ieee->set_security(ieee->dev, &sec);
-
-	return 0;
-}
-
-static int rtllib_wpa_set_param(struct rtllib_device *ieee, u8 name, u32 value)
-{
-	int ret = 0;
-	unsigned long flags;
-
-	switch (name) {
-	case IEEE_PARAM_WPA_ENABLED:
-		ret = rtllib_wpa_enable(ieee, value);
-		break;
-
-	case IEEE_PARAM_TKIP_COUNTERMEASURES:
-		ieee->tkip_countermeasures = value;
-		break;
-
-	case IEEE_PARAM_DROP_UNENCRYPTED:
-	{
-		/* HACK:
-		 *
-		 * wpa_supplicant calls set_wpa_enabled when the driver
-		 * is loaded and unloaded, regardless of if WPA is being
-		 * used.  No other calls are made which can be used to
-		 * determine if encryption will be used or not prior to
-		 * association being expected.  If encryption is not being
-		 * used, drop_unencrypted is set to false, else true -- we
-		 * can use this to determine if the CAP_PRIVACY_ON bit should
-		 * be set.
-		 */
-		struct rtllib_security sec = {
-			.flags = SEC_ENABLED,
-			.enabled = value,
-		};
-		ieee->drop_unencrypted = value;
-		/* We only change SEC_LEVEL for open mode. Others
-		 * are set by ipw_wpa_set_encryption.
-		 */
-		if (!value) {
-			sec.flags |= SEC_LEVEL;
-			sec.level = SEC_LEVEL_0;
-		} else {
-			sec.flags |= SEC_LEVEL;
-			sec.level = SEC_LEVEL_1;
-		}
-		if (ieee->set_security)
-			ieee->set_security(ieee->dev, &sec);
-		break;
-	}
-
-	case IEEE_PARAM_PRIVACY_INVOKED:
-		ieee->privacy_invoked = value;
-		break;
-
-	case IEEE_PARAM_AUTH_ALGS:
-		ret = rtllib_wpa_set_auth_algs(ieee, value);
-		break;
-
-	case IEEE_PARAM_IEEE_802_1X:
-		ieee->ieee802_1x = value;
-		break;
-	case IEEE_PARAM_WPAX_SELECT:
-		spin_lock_irqsave(&ieee->wpax_suitlist_lock, flags);
-		spin_unlock_irqrestore(&ieee->wpax_suitlist_lock, flags);
-		break;
-
-	default:
-		netdev_info(ieee->dev, "Unknown WPA param: %d\n", name);
-		ret = -EOPNOTSUPP;
-	}
-
-	return ret;
-}
-
-/* implementation borrowed from hostap driver */
-static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
-				  struct ieee_param *param, int param_len,
-				  u8 is_mesh)
-{
-	int ret = 0;
-	struct lib80211_crypto_ops *ops;
-	struct lib80211_crypt_data **crypt;
-
-	struct rtllib_security sec = {
-		.flags = 0,
-	};
-
-	param->u.crypt.err = 0;
-	param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
-	if (param_len !=
-	    (int) ((char *) param->u.crypt.key - (char *) param) +
-	    param->u.crypt.key_len) {
-		netdev_info(ieee->dev, "Len mismatch %d, %d\n", param_len,
-			    param->u.crypt.key_len);
-		return -EINVAL;
-	}
-	if (is_broadcast_ether_addr(param->sta_addr)) {
-		if (param->u.crypt.idx >= NUM_WEP_KEYS)
-			return -EINVAL;
-		crypt = &ieee->crypt_info.crypt[param->u.crypt.idx];
-	} else {
-		return -EINVAL;
-	}
-
-	if (strcmp(param->u.crypt.alg, "none") == 0) {
-		if (crypt) {
-			sec.enabled = 0;
-			sec.level = SEC_LEVEL_0;
-			sec.flags |= SEC_ENABLED | SEC_LEVEL;
-			lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
-		}
-		goto done;
-	}
-	sec.enabled = 1;
-	sec.flags |= SEC_ENABLED;
-
-	/* IPW HW cannot build TKIP MIC, host decryption still needed. */
-	if (!(ieee->host_encrypt || ieee->host_decrypt) &&
-	    strcmp(param->u.crypt.alg, "R-TKIP"))
-		goto skip_host_crypt;
-
-	ops = lib80211_get_crypto_ops(param->u.crypt.alg);
-	if (ops == NULL && strcmp(param->u.crypt.alg, "R-WEP") == 0) {
-		request_module("rtllib_crypt_wep");
-		ops = lib80211_get_crypto_ops(param->u.crypt.alg);
-	} else if (ops == NULL && strcmp(param->u.crypt.alg, "R-TKIP") == 0) {
-		request_module("rtllib_crypt_tkip");
-		ops = lib80211_get_crypto_ops(param->u.crypt.alg);
-	} else if (ops == NULL && strcmp(param->u.crypt.alg, "R-CCMP") == 0) {
-		request_module("rtllib_crypt_ccmp");
-		ops = lib80211_get_crypto_ops(param->u.crypt.alg);
-	}
-	if (ops == NULL) {
-		netdev_info(ieee->dev, "unknown crypto alg '%s'\n",
-			    param->u.crypt.alg);
-		param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
-		ret = -EINVAL;
-		goto done;
-	}
-	if (*crypt == NULL || (*crypt)->ops != ops) {
-		struct lib80211_crypt_data *new_crypt;
-
-		lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
-
-		new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
-		if (new_crypt == NULL) {
-			ret = -ENOMEM;
-			goto done;
-		}
-		new_crypt->ops = ops;
-		if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
-			new_crypt->priv =
-				new_crypt->ops->init(param->u.crypt.idx);
-
-		if (new_crypt->priv == NULL) {
-			kfree(new_crypt);
-			param->u.crypt.err = IEEE_CRYPT_ERR_CRYPT_INIT_FAILED;
-			ret = -EINVAL;
-			goto done;
-		}
-
-		*crypt = new_crypt;
-	}
-
-	if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
-	    (*crypt)->ops->set_key(param->u.crypt.key,
-	    param->u.crypt.key_len, param->u.crypt.seq,
-	    (*crypt)->priv) < 0) {
-		netdev_info(ieee->dev, "key setting failed\n");
-		param->u.crypt.err = IEEE_CRYPT_ERR_KEY_SET_FAILED;
-		ret = -EINVAL;
-		goto done;
-	}
-
- skip_host_crypt:
-	if (param->u.crypt.set_tx) {
-		ieee->crypt_info.tx_keyidx = param->u.crypt.idx;
-		sec.active_key = param->u.crypt.idx;
-		sec.flags |= SEC_ACTIVE_KEY;
-	} else
-		sec.flags &= ~SEC_ACTIVE_KEY;
-
-	memcpy(sec.keys[param->u.crypt.idx],
-	       param->u.crypt.key,
-	       param->u.crypt.key_len);
-	sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
-	sec.flags |= (1 << param->u.crypt.idx);
-
-	if (strcmp(param->u.crypt.alg, "R-WEP") == 0) {
-		sec.flags |= SEC_LEVEL;
-		sec.level = SEC_LEVEL_1;
-	} else if (strcmp(param->u.crypt.alg, "R-TKIP") == 0) {
-		sec.flags |= SEC_LEVEL;
-		sec.level = SEC_LEVEL_2;
-	} else if (strcmp(param->u.crypt.alg, "R-CCMP") == 0) {
-		sec.flags |= SEC_LEVEL;
-		sec.level = SEC_LEVEL_3;
-	}
- done:
-	if (ieee->set_security)
-		ieee->set_security(ieee->dev, &sec);
-
-	/* Do not reset port if card is in Managed mode since resetting will
-	 * generate new IEEE 802.11 authentication which may end up in looping
-	 * with IEEE 802.1X.  If your hardware requires a reset after WEP
-	 * configuration (for example... Prism2), implement the reset_port in
-	 * the callbacks structures used to initialize the 802.11 stack.
-	 */
-	if (ieee->reset_on_keychange &&
-	    ieee->iw_mode != IW_MODE_INFRA &&
-	    ieee->reset_port &&
-	    ieee->reset_port(ieee->dev)) {
-		netdev_info(ieee->dev, "reset_port failed\n");
-		param->u.crypt.err = IEEE_CRYPT_ERR_CARD_CONF_FAILED;
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
 static inline struct sk_buff *
 rtllib_disauth_skb(struct rtllib_network *beacon,
 		   struct rtllib_device *ieee, u16 asRsn)
@@ -3501,62 +3176,6 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
 	}
 }
 
-int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee, struct iw_point *p,
-				u8 is_mesh)
-{
-	struct ieee_param *param;
-	int ret = 0;
-
-	mutex_lock(&ieee->wx_mutex);
-
-	if (p->length < sizeof(struct ieee_param) || !p->pointer) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	param = memdup_user(p->pointer, p->length);
-	if (IS_ERR(param)) {
-		ret = PTR_ERR(param);
-		goto out;
-	}
-
-	switch (param->cmd) {
-	case IEEE_CMD_SET_WPA_PARAM:
-		ret = rtllib_wpa_set_param(ieee, param->u.wpa_param.name,
-					param->u.wpa_param.value);
-		break;
-
-	case IEEE_CMD_SET_WPA_IE:
-		ret = rtllib_wpa_set_wpa_ie(ieee, param, p->length);
-		break;
-
-	case IEEE_CMD_SET_ENCRYPTION:
-		ret = rtllib_wpa_set_encryption(ieee, param, p->length, 0);
-		break;
-
-	case IEEE_CMD_MLME:
-		ret = rtllib_wpa_mlme(ieee, param->u.mlme.command,
-				   param->u.mlme.reason_code);
-		break;
-
-	default:
-		netdev_info(ieee->dev, "Unknown WPA supplicant request: %d\n",
-			    param->cmd);
-		ret = -EOPNOTSUPP;
-		break;
-	}
-
-	if (ret == 0 && copy_to_user(p->pointer, param, p->length))
-		ret = -EFAULT;
-
-	kfree(param);
-out:
-	mutex_unlock(&ieee->wx_mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL(rtllib_wpa_supplicant_ioctl);
-
 static void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
 {
 	u8	OpMode;
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index c5c0d96..f7eba01 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -595,7 +595,7 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
 		ret = -EINVAL;
 		goto done;
 	}
-	netdev_info(dev, "alg name:%s\n", alg);
+	netdev_dbg(dev, "alg name:%s\n", alg);
 
 	ops = lib80211_get_crypto_ops(alg);
 	if (ops == NULL) {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 899c77e..b062cad 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -2187,7 +2187,7 @@ int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
 			       struct sk_buff *frag, int hdr_len);
 
 int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
-void ieee80211_txb_free(struct ieee80211_txb *);
+void ieee80211_txb_free(struct ieee80211_txb *txb);
 
 
 /* ieee80211_rx.c */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
index 005bf89..a0aa0f5 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
@@ -82,8 +82,8 @@ struct ieee80211_crypt_data {
 int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
 int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
 struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
-void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
-void ieee80211_crypt_deinit_handler(unsigned long);
+void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force);
+void ieee80211_crypt_deinit_handler(unsigned long data);
 void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
 				    struct ieee80211_crypt_data **crypt);
 
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 5039172..60ecfec 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -171,13 +171,6 @@ static inline u16 Mk16(u8 hi, u8 lo)
 	return lo | (((u16) hi) << 8);
 }
 
-
-static inline u16 Mk16_le(u16 *v)
-{
-	return le16_to_cpu(*v);
-}
-
-
 static const u16 Sbox[256] = {
 	0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
 	0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
@@ -264,15 +257,15 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
 	PPK[5] = TTAK[4] + IV16;
 
 	/* Step 2 - 96-bit bijective mixing using S-box */
-	PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
-	PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
-	PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
-	PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
-	PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
-	PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
+	PPK[0] += _S_(PPK[5] ^ le16_to_cpu(*(__le16 *)(&TK[0])));
+	PPK[1] += _S_(PPK[0] ^ le16_to_cpu(*(__le16 *)(&TK[2])));
+	PPK[2] += _S_(PPK[1] ^ le16_to_cpu(*(__le16 *)(&TK[4])));
+	PPK[3] += _S_(PPK[2] ^ le16_to_cpu(*(__le16 *)(&TK[6])));
+	PPK[4] += _S_(PPK[3] ^ le16_to_cpu(*(__le16 *)(&TK[8])));
+	PPK[5] += _S_(PPK[4] ^ le16_to_cpu(*(__le16 *)(&TK[10])));
 
-	PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
-	PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
+	PPK[0] += RotR1(PPK[5] ^ le16_to_cpu(*(__le16 *)(&TK[12])));
+	PPK[1] += RotR1(PPK[0] ^ le16_to_cpu(*(__le16 *)(&TK[14])));
 	PPK[2] += RotR1(PPK[1]);
 	PPK[3] += RotR1(PPK[2]);
 	PPK[4] += RotR1(PPK[3]);
@@ -285,7 +278,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
 	WEPSeed[0] = Hi8(IV16);
 	WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
 	WEPSeed[2] = Lo8(IV16);
-	WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
+	WEPSeed[3] = Lo8((PPK[5] ^ le16_to_cpu(*(__le16 *)(&TK[0]))) >> 1);
 
 #ifdef __BIG_ENDIAN
 	{
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index a791175..8f236b3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -157,8 +157,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
 	ieee80211_softmac_init(ieee);
 
 	ieee->pHTInfo = kzalloc(sizeof(RT_HIGH_THROUGHPUT), GFP_KERNEL);
-	if (ieee->pHTInfo == NULL)
-	{
+	if (ieee->pHTInfo == NULL) {
 		IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for HTInfo\n");
 		goto failed;
 	}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index b4c13ff..f98bb03 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -36,18 +36,15 @@ static void RxPktPendingTimeout(unsigned long data)
 
 	spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
 	IEEE80211_DEBUG(IEEE80211_DL_REORDER,"==================>%s()\n",__func__);
-	if(pRxTs->RxTimeoutIndicateSeq != 0xffff)
-	{
+	if(pRxTs->RxTimeoutIndicateSeq != 0xffff) {
 		// Indicate the pending packets sequentially according to SeqNum until meet the gap.
-		while(!list_empty(&pRxTs->RxPendingPktList))
-		{
+		while(!list_empty(&pRxTs->RxPendingPktList)) {
 			pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTs->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
 			if(index == 0)
 				pRxTs->RxIndicateSeq = pReorderEntry->SeqNum;
 
 			if( SN_LESS(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) ||
-				SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq)	)
-			{
+				SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq)	) {
 				list_del_init(&pReorderEntry->List);
 
 				if(SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq))
@@ -58,22 +55,19 @@ static void RxPktPendingTimeout(unsigned long data)
 				index++;
 
 				list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List);
-			}
-			else
-			{
+			} else {
 				bPktInBuf = true;
 				break;
 			}
 		}
 	}
 
-	if(index>0)
-	{
+	if(index>0) {
 		// Set RxTimeoutIndicateSeq to 0xffff to indicate no pending packets in buffer now.
 		pRxTs->RxTimeoutIndicateSeq = 0xffff;
 
 		// Indicate packets
-		if(index > REORDER_WIN_SIZE){
+		if(index > REORDER_WIN_SIZE) {
 			IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
 			spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
 			return;
@@ -81,8 +75,7 @@ static void RxPktPendingTimeout(unsigned long data)
 		ieee80211_indicate_packets(ieee, ieee->stats_IndicateArray, index);
 	}
 
-	if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff))
-	{
+	if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff)) {
 		pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
 		mod_timer(&pRxTs->RxPktPendingTimer,
 			  jiffies + msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime));
@@ -147,8 +140,7 @@ void TSInitialize(struct ieee80211_device *ieee)
 	INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List);
 	INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List);
 
-	for(count = 0; count < TOTAL_TS_NUM; count++)
-	{
+	for(count = 0; count < TOTAL_TS_NUM; count++) {
 		//
 		pTxTS->num = count;
 		// The timers for the operation of Traffic Stream and Block Ack.
@@ -172,8 +164,7 @@ void TSInitialize(struct ieee80211_device *ieee)
 	INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List);
 	INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List);
 	INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
-	for(count = 0; count < TOTAL_TS_NUM; count++)
-	{
+	for(count = 0; count < TOTAL_TS_NUM; count++) {
 		pRxTS->num = count;
 		INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
 		setup_timer(&pRxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
@@ -191,15 +182,13 @@ void TSInitialize(struct ieee80211_device *ieee)
 	// Initialize unused Rx Reorder List.
 	INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
 //#ifdef TO_DO_LIST
-	for(count = 0; count < REORDER_ENTRY_NUM; count++)
-	{
+	for(count = 0; count < REORDER_ENTRY_NUM; count++) {
 		list_add_tail( &pRxReorderEntry->List,&ieee->RxReorder_Unused_List);
 		if(count == (REORDER_ENTRY_NUM-1))
 			break;
 		pRxReorderEntry = &ieee->RxReorderEntry[count+1];
 	}
 //#endif
-
 }
 
 static void AdmitTS(struct ieee80211_device *ieee,
@@ -223,36 +212,25 @@ static PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee,
 	bool				search_dir[4] = {0};
 	struct list_head		*psearch_list; //FIXME
 	PTS_COMMON_INFO	pRet = NULL;
-	if(ieee->iw_mode == IW_MODE_MASTER) //ap mode
-	{
-		if(TxRxSelect == TX_DIR)
-		{
+	if(ieee->iw_mode == IW_MODE_MASTER) { //ap mode
+		if(TxRxSelect == TX_DIR) {
 			search_dir[DIR_DOWN] = true;
 			search_dir[DIR_BI_DIR]= true;
-		}
-		else
-		{
+		} else {
 			search_dir[DIR_UP]	= true;
 			search_dir[DIR_BI_DIR]= true;
 		}
-	}
-	else if(ieee->iw_mode == IW_MODE_ADHOC)
-	{
+	} else if(ieee->iw_mode == IW_MODE_ADHOC) {
 		if(TxRxSelect == TX_DIR)
 			search_dir[DIR_UP]	= true;
 		else
 			search_dir[DIR_DOWN] = true;
-	}
-	else
-	{
-		if(TxRxSelect == TX_DIR)
-		{
+	} else {
+		if(TxRxSelect == TX_DIR) {
 			search_dir[DIR_UP]	= true;
 			search_dir[DIR_BI_DIR]= true;
 			search_dir[DIR_DIRECT]= true;
-		}
-		else
-		{
+		} else {
 			search_dir[DIR_DOWN] = true;
 			search_dir[DIR_BI_DIR]= true;
 			search_dir[DIR_DIRECT]= true;
@@ -265,28 +243,24 @@ static PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee,
 		psearch_list = &ieee->Rx_TS_Admit_List;
 
 	//for(dir = DIR_UP; dir <= DIR_BI_DIR; dir++)
-	for(dir = 0; dir <= DIR_BI_DIR; dir++)
-	{
+	for(dir = 0; dir <= DIR_BI_DIR; dir++) {
 		if (!search_dir[dir])
 			continue;
 		list_for_each_entry(pRet, psearch_list, List){
 	//		IEEE80211_DEBUG(IEEE80211_DL_TS, "ADD:%pM, TID:%d, dir:%d\n", pRet->Addr, pRet->TSpec.f.TSInfo.field.ucTSID, pRet->TSpec.f.TSInfo.field.ucDirection);
 			if (memcmp(pRet->Addr, Addr, 6) == 0)
 				if (pRet->TSpec.f.TSInfo.field.ucTSID == TID)
-					if(pRet->TSpec.f.TSInfo.field.ucDirection == dir)
-					{
+					if(pRet->TSpec.f.TSInfo.field.ucDirection == dir) {
 	//					printk("Bingo! got it\n");
 						break;
 					}
-
 		}
 		if(&pRet->List  != psearch_list)
 			break;
 	}
 
-	if(&pRet->List  != psearch_list){
+	if(&pRet->List  != psearch_list)
 		return pRet ;
-	}
 	else
 		return NULL;
 }
@@ -327,25 +301,21 @@ bool GetTs(
 	// We do not build any TS for Broadcast or Multicast stream.
 	// So reject these kinds of search here.
 	//
-	if (is_multicast_ether_addr(Addr))
-	{
+	if (is_multicast_ether_addr(Addr)) {
 		IEEE80211_DEBUG(IEEE80211_DL_ERR, "get TS for Broadcast or Multicast\n");
 		return false;
 	}
 
-	if (ieee->current_network.qos_data.supported == 0)
+	if (ieee->current_network.qos_data.supported == 0) {
 		UP = 0;
-	else
-	{
+	} else {
 		// In WMM case: we use 4 TID only
-		if (!IsACValid(TID))
-		{
+		if (!IsACValid(TID)) {
 			IEEE80211_DEBUG(IEEE80211_DL_ERR, " in %s(), TID(%d) is not valid\n", __func__, TID);
 			return false;
 		}
 
-		switch (TID)
-		{
+		switch (TID) {
 		case 0:
 		case 3:
 			UP = 0;
@@ -373,18 +343,13 @@ bool GetTs(
 			Addr,
 			UP,
 			TxRxSelect);
-	if(*ppTS != NULL)
-	{
+	if(*ppTS != NULL) {
 		return true;
-	}
-	else
-	{
+	} else {
 		if (!bAddNewTs) {
 			IEEE80211_DEBUG(IEEE80211_DL_TS, "add new TS failed(tid:%d)\n", UP);
 			return false;
-		}
-		else
-		{
+		} else {
 			//
 			// Create a new Traffic stream for current Tx/Rx
 			// This is for EDCA and WMM to add a new TS.
@@ -406,16 +371,13 @@ bool GetTs(
 								((TxRxSelect==TX_DIR)?DIR_DOWN:DIR_UP):
 								((TxRxSelect==TX_DIR)?DIR_UP:DIR_DOWN);
 			IEEE80211_DEBUG(IEEE80211_DL_TS, "to add Ts\n");
-			if(!list_empty(pUnusedList))
-			{
+			if(!list_empty(pUnusedList)) {
 				(*ppTS) = list_entry(pUnusedList->next, TS_COMMON_INFO, List);
 				list_del_init(&(*ppTS)->List);
-				if(TxRxSelect==TX_DIR)
-				{
+				if(TxRxSelect==TX_DIR) {
 					PTX_TS_RECORD tmp = container_of(*ppTS, TX_TS_RECORD, TsCommonInfo);
 					ResetTxTsEntry(tmp);
-				}
-				else{
+				} else {
 					PRX_TS_RECORD tmp = container_of(*ppTS, RX_TS_RECORD, TsCommonInfo);
 					ResetRxTsEntry(tmp);
 				}
@@ -438,9 +400,7 @@ bool GetTs(
 				// if there is DirectLink, we need to do additional operation here!!
 
 				return true;
-			}
-			else
-			{
+			} else {
 				IEEE80211_DEBUG(IEEE80211_DL_ERR, "in function %s() There is not enough TS record to be used!!", __func__);
 				return false;
 			}
@@ -457,16 +417,14 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs,
 	del_timer_sync(&pTs->InactTimer);
 	TsInitDelBA(ieee, pTs, TxRxSelect);
 
-	if(TxRxSelect == RX_DIR)
-	{
+	if(TxRxSelect == RX_DIR) {
 //#ifdef TO_DO_LIST
 		PRX_REORDER_ENTRY	pRxReorderEntry;
 		PRX_TS_RECORD		pRxTS = (PRX_TS_RECORD)pTs;
 		if(timer_pending(&pRxTS->RxPktPendingTimer))
 			del_timer_sync(&pRxTS->RxPktPendingTimer);
 
-		while(!list_empty(&pRxTS->RxPendingPktList))
-		{
+		while(!list_empty(&pRxTS->RxPendingPktList)) {
 			spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
 			//pRxReorderEntry = list_entry(&pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
 			pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
@@ -474,14 +432,13 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs,
 			{
 				int i = 0;
 				struct ieee80211_rxb *prxb = pRxReorderEntry->prxb;
-				if (unlikely(!prxb))
-				{
+				if (unlikely(!prxb)) {
 					spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
 					return;
 				}
-				for(i =0; i < prxb->nr_subframes; i++) {
+				for(i =0; i < prxb->nr_subframes; i++)
 					dev_kfree_skb(prxb->subframes[i]);
-				}
+
 				kfree(prxb);
 				prxb = NULL;
 			}
@@ -490,9 +447,7 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs,
 		}
 
 //#endif
-	}
-	else
-	{
+	} else {
 		PTX_TS_RECORD pTxTS = (PTX_TS_RECORD)pTs;
 		del_timer_sync(&pTxTS->TsAddBaTimer);
 	}
@@ -503,20 +458,16 @@ void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr)
 	PTS_COMMON_INFO	pTS, pTmpTS;
 
 	printk("===========>RemovePeerTS,%pM\n", Addr);
-	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
-	{
-		if (memcmp(pTS->Addr, Addr, 6) == 0)
-		{
+	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+		if (memcmp(pTS->Addr, Addr, 6) == 0) {
 			RemoveTsEntry(ieee, pTS, TX_DIR);
 			list_del_init(&pTS->List);
 			list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
 		}
 	}
 
-	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List)
-	{
-		if (memcmp(pTS->Addr, Addr, 6) == 0)
-		{
+	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+		if (memcmp(pTS->Addr, Addr, 6) == 0) {
 			printk("====>remove Tx_TS_admin_list\n");
 			RemoveTsEntry(ieee, pTS, TX_DIR);
 			list_del_init(&pTS->List);
@@ -524,20 +475,16 @@ void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr)
 		}
 	}
 
-	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List)
-	{
-		if (memcmp(pTS->Addr, Addr, 6) == 0)
-		{
+	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+		if (memcmp(pTS->Addr, Addr, 6) == 0) {
 			RemoveTsEntry(ieee, pTS, RX_DIR);
 			list_del_init(&pTS->List);
 			list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
 		}
 	}
 
-	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List)
-	{
-		if (memcmp(pTS->Addr, Addr, 6) == 0)
-		{
+	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+		if (memcmp(pTS->Addr, Addr, 6) == 0) {
 			RemoveTsEntry(ieee, pTS, RX_DIR);
 			list_del_init(&pTS->List);
 			list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
@@ -549,29 +496,25 @@ void RemoveAllTS(struct ieee80211_device *ieee)
 {
 	PTS_COMMON_INFO pTS, pTmpTS;
 
-	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List)
-	{
+	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
 		RemoveTsEntry(ieee, pTS, TX_DIR);
 		list_del_init(&pTS->List);
 		list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
 	}
 
-	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List)
-	{
+	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
 		RemoveTsEntry(ieee, pTS, TX_DIR);
 		list_del_init(&pTS->List);
 		list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
 	}
 
-	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List)
-	{
+	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
 		RemoveTsEntry(ieee, pTS, RX_DIR);
 		list_del_init(&pTS->List);
 		list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
 	}
 
-	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List)
-	{
+	list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
 		RemoveTsEntry(ieee, pTS, RX_DIR);
 		list_del_init(&pTS->List);
 		list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
@@ -580,21 +523,17 @@ void RemoveAllTS(struct ieee80211_device *ieee)
 
 void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD	pTxTS)
 {
-	if(!pTxTS->bAddBaReqInProgress)
-	{
+	if(!pTxTS->bAddBaReqInProgress) {
 		pTxTS->bAddBaReqInProgress = true;
-		if(pTxTS->bAddBaReqDelayed)
-		{
+		if(pTxTS->bAddBaReqDelayed)	{
 			IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n");
 			mod_timer(&pTxTS->TsAddBaTimer,
 				  jiffies + msecs_to_jiffies(TS_ADDBA_DELAY));
-		}
-		else
-		{
+		} else {
 			IEEE80211_DEBUG(IEEE80211_DL_BA,"TsStartAddBaProcess(): Immediately Start ADDBA now!!\n");
 			mod_timer(&pTxTS->TsAddBaTimer, jiffies+10); //set 10 ticks
 		}
-	}
-	else
+	} else {
 		IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s()==>BA timer is already added\n", __func__);
+	}
 }
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index e702afb..51c150a 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -34,7 +34,7 @@
 #include <linux/proc_fs.h>
 #include <linux/if_arp.h>
 #include <linux/random.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include "ieee80211/ieee80211.h"
 
 #define RTL8192U
@@ -1147,9 +1147,9 @@ int write_nic_word(struct net_device *dev, int x, u16 y);
 int write_nic_dword(struct net_device *dev, int x, u32 y);
 void force_pci_posting(struct net_device *dev);
 
-void rtl8192_rtx_disable(struct net_device *);
-void rtl8192_rx_enable(struct net_device *);
-void rtl8192_tx_enable(struct net_device *);
+void rtl8192_rtx_disable(struct net_device *dev);
+void rtl8192_rx_enable(struct net_device *dev);
+void rtl8192_tx_enable(struct net_device *dev);
 
 void rtl8192_disassociate(struct net_device *dev);
 void rtl8185_set_rf_pins_enable(struct net_device *dev, u32 a);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 9f370e8..779ecdb 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1797,8 +1797,8 @@ static void rtl8192_link_change(struct net_device *dev)
 		 * way, but there is no chance to set this as wep will not set
 		 * group key in wext.
 		 */
-		if (KEY_TYPE_WEP40 == ieee->pairwise_key_type ||
-		    KEY_TYPE_WEP104 == ieee->pairwise_key_type)
+		if (ieee->pairwise_key_type == KEY_TYPE_WEP40 ||
+		    ieee->pairwise_key_type == KEY_TYPE_WEP104)
 			EnableHWSecurityConfig8192(dev);
 	}
 	/*update timing params*/
@@ -2071,7 +2071,7 @@ static bool GetNmodeSupportBySecCfg8192(struct net_device *dev)
 	 */
 	encrypt = (network->capability & WLAN_CAPABILITY_PRIVACY) ||
 		  (ieee->host_encrypt && crypt && crypt->ops &&
-		   (0 == strcmp(crypt->ops->name, "WEP")));
+		   (strcmp(crypt->ops->name, "WEP") == 0));
 
 	/* simply judge  */
 	if (encrypt && (wpa_ie_len == 0)) {
@@ -4498,7 +4498,7 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
 	praddr = hdr->addr1;
 
 	/* Check if the received packet is acceptable. */
-	bpacket_match_bssid = (IEEE80211_FTYPE_CTL != type) &&
+	bpacket_match_bssid = (type != IEEE80211_FTYPE_CTL) &&
 			       (eqMacAddr(priv->ieee80211->current_network.bssid,  (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
 			       && (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV);
 	bpacket_toself =  bpacket_match_bssid &
@@ -5098,7 +5098,7 @@ void EnableHWSecurityConfig8192(struct net_device *dev)
 	struct ieee80211_device *ieee = priv->ieee80211;
 
 	SECR_value = SCR_TxEncEnable | SCR_RxDecEnable;
-	if (((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type)) && (priv->ieee80211->auth_mode != 2)) {
+	if (((ieee->pairwise_key_type == KEY_TYPE_WEP40) || (ieee->pairwise_key_type == KEY_TYPE_WEP104)) && (priv->ieee80211->auth_mode != 2)) {
 		SECR_value |= SCR_RxUseDK;
 		SECR_value |= SCR_TxUseDK;
 	} else if ((ieee->iw_mode == IW_MODE_ADHOC) && (ieee->pairwise_key_type & (KEY_TYPE_CCMP | KEY_TYPE_TKIP))) {
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 975f707..e6f8d1d 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -2300,43 +2300,52 @@ static void dm_check_edca_turbo(
 		 * Restore original EDCA according to the declaration of AP.
 		 */
 		if (priv->bcurrent_turbo_EDCA) {
+			u8	u1bAIFS;
+			u32	u4bAcParam, op_limit, cw_max, cw_min;
+
+			struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
+			u8 mode = priv->ieee80211->mode;
+
+			/*  For Each time updating EDCA parameter, reset EDCA turbo mode status. */
+			dm_init_edca_turbo(dev);
+
+			u1bAIFS = qos_parameters->aifs[0] * ((mode & (IEEE_G | IEEE_N_24G)) ? 9 : 20) + aSifsTime;
+
+			op_limit = (u32)le16_to_cpu(qos_parameters->tx_op_limit[0]);
+			cw_max   = (u32)le16_to_cpu(qos_parameters->cw_max[0]);
+			cw_min   = (u32)le16_to_cpu(qos_parameters->cw_min[0]);
+
+			op_limit <<= AC_PARAM_TXOP_LIMIT_OFFSET;
+			cw_max   <<= AC_PARAM_ECW_MAX_OFFSET;
+			cw_min   <<= AC_PARAM_ECW_MIN_OFFSET;
+			u1bAIFS  <<= AC_PARAM_AIFS_OFFSET;
+
+			u4bAcParam = op_limit | cw_max | cw_min | u1bAIFS;
+			cpu_to_le32s(&u4bAcParam);
+
+			write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
+
+
+			/*
+			 * Check ACM bit.
+			 * If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13.
+			 */
 			{
-				u8		u1bAIFS;
-				u32		u4bAcParam;
-				struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
-				u8 mode = priv->ieee80211->mode;
+				/*  TODO:  Modified this part and try to set acm control in only 1 IO processing!! */
 
-				/*  For Each time updating EDCA parameter, reset EDCA turbo mode status. */
-				dm_init_edca_turbo(dev);
-				u1bAIFS = qos_parameters->aifs[0] * ((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
-				u4bAcParam = (((le16_to_cpu(qos_parameters->tx_op_limit[0])) << AC_PARAM_TXOP_LIMIT_OFFSET)|
-					((le16_to_cpu(qos_parameters->cw_max[0])) << AC_PARAM_ECW_MAX_OFFSET)|
-					((le16_to_cpu(qos_parameters->cw_min[0])) << AC_PARAM_ECW_MIN_OFFSET)|
-					((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
-				/*write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);*/
-				write_nic_dword(dev, EDCAPARA_BE,  u4bAcParam);
+				PACI_AIFSN	pAciAifsn = (PACI_AIFSN)&(qos_parameters->aifs[0]);
+				u8		AcmCtrl;
 
-				/*
-				 * Check ACM bit.
-				 * If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13.
-				 */
-				{
-					/*  TODO:  Modified this part and try to set acm control in only 1 IO processing!! */
+				read_nic_byte(dev, AcmHwCtrl, &AcmCtrl);
 
-					PACI_AIFSN	pAciAifsn = (PACI_AIFSN)&(qos_parameters->aifs[0]);
-					u8		AcmCtrl;
-
-					read_nic_byte(dev, AcmHwCtrl, &AcmCtrl);
-
-					if (pAciAifsn->f.ACM) { /*  ACM bit is 1. */
-						AcmCtrl |= AcmHw_BeqEn;
-					} else {	/* ACM bit is 0. */
-						AcmCtrl &= (~AcmHw_BeqEn);
-					}
-
-					RT_TRACE(COMP_QOS, "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl);
-					write_nic_byte(dev, AcmHwCtrl, AcmCtrl);
+				if (pAciAifsn->f.ACM) { /*  ACM bit is 1. */
+					AcmCtrl |= AcmHw_BeqEn;
+				} else {	/* ACM bit is 0. */
+					AcmCtrl &= (~AcmHw_BeqEn);
 				}
+
+				RT_TRACE(COMP_QOS, "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl);
+				write_nic_byte(dev, AcmHwCtrl, AcmCtrl);
 			}
 			priv->bcurrent_turbo_EDCA = false;
 		}
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index f35121e..33e82a9 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -166,7 +166,7 @@ static uint r8712_get_rateset_len(u8 *rateset)
 
 int r8712_generate_ie(struct registry_priv *pregistrypriv)
 {
-	int sz = 0, rateLen;
+	int sz = 0, rate_len;
 	struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
 	u8 *ie = pdev_network->IEs;
 
@@ -191,15 +191,16 @@ int r8712_generate_ie(struct registry_priv *pregistrypriv)
 			  pdev_network->Ssid.Ssid, &sz);
 	/*supported rates*/
 	set_supported_rate(pdev_network->rates, pregistrypriv->wireless_mode);
-	rateLen = r8712_get_rateset_len(pdev_network->rates);
-	if (rateLen > 8) {
+	rate_len = r8712_get_rateset_len(pdev_network->rates);
+	if (rate_len > 8) {
 		ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_, 8,
 				  pdev_network->rates, &sz);
-		ie = r8712_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8),
+		ie = r8712_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8),
 				  (pdev_network->rates + 8), &sz);
-	} else
+	} else {
 		ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_,
-				  rateLen, pdev_network->rates, &sz);
+				  rate_len, pdev_network->rates, &sz);
+	}
 	/*DS parameter set*/
 	ie = r8712_set_ie(ie, _DSSET_IE_, 1,
 			  (u8 *)&pdev_network->Configuration.DSConfig, &sz);
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 8836b31..e698f6e 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -93,7 +93,7 @@ static char *initmac;
  */
 static int wifi_test;
 
-module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO | S_IWUSR);
+module_param_string(ifname, ifname, sizeof(ifname), 0644);
 module_param(wifi_test, int, 0644);
 module_param(initmac, charp, 0644);
 module_param(video_mode, int, 0644);
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 556367b..0ed2f44 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -170,8 +170,10 @@ enum WIFI_REG_DOMAIN {
 	*(__le16 *)(pbuf) &= (~cpu_to_le16(_FROM_DS_)); \
 })
 
-#define get_tofr_ds(pframe)	((GetToDs(pframe) << 1) | GetFrDs(pframe))
-
+static inline unsigned char get_tofr_ds(unsigned char *pframe)
+{
+	return ((GetToDs(pframe) << 1) | GetFrDs(pframe));
+}
 
 #define SetMFrag(pbuf) ({ \
 	*(__le16 *)(pbuf) |= cpu_to_le16(_MORE_FRAG_); \
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 9e35573..d5ab123 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -249,7 +249,7 @@ void _rtw_free_network_nolock(struct	mlme_priv *pmlmepriv, struct wlan_network *
 /*
 	return the wlan_network with the matching addr
 
-	Shall be calle under atomic context... to avoid possible racing condition...
+	Shall be called under atomic context... to avoid possible racing condition...
 */
 struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
 {
@@ -412,7 +412,7 @@ void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
 /*
 	return the wlan_network with the matching addr
 
-	Shall be calle under atomic context... to avoid possible racing condition...
+	Shall be called under atomic context... to avoid possible racing condition...
 */
 struct	wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
 {
@@ -564,7 +564,7 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
 			sq_final = ((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5;
 			rssi_final = (src->Rssi+dst->Rssi*4)/5;
 		} else {
-			/* bss info not receving from the right channel, use the original RX signal infos */
+			/* bss info not receiving from the right channel, use the original RX signal infos */
 			ss_final = dst->PhyInfo.SignalStrength;
 			sq_final = dst->PhyInfo.SignalQuality;
 			rssi_final = dst->Rssi;
@@ -680,7 +680,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
 			pnetwork->aid = 0;
 			pnetwork->join_res = 0;
 
-			/* bss info not receving from the right channel */
+			/* bss info not receiving from the right channel */
 			if (pnetwork->network.PhyInfo.SignalQuality == 101)
 				pnetwork->network.PhyInfo.SignalQuality = 0;
 		} else {
@@ -699,7 +699,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
 
 			pnetwork->last_scanned = jiffies;
 
-			/* bss info not receving from the right channel */
+			/* bss info not receiving from the right channel */
 			if (pnetwork->network.PhyInfo.SignalQuality == 101)
 				pnetwork->network.PhyInfo.SignalQuality = 0;
 
@@ -715,7 +715,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
 
 		pnetwork->last_scanned = jiffies;
 
-		/* target.Reserved[0]== 1, means that scaned network is a bcn frame. */
+		/* target.Reserved[0]== 1, means that scanned network is a bcn frame. */
 		if ((pnetwork->network.IELength > target->IELength) && (target->Reserved[0] == 1))
 			update_ie = false;
 
@@ -1264,7 +1264,7 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
 
 		/* 	Commented by Albert 2012/07/21 */
 		/* 	When doing the WPS, the wps_ie_len won't equal to 0 */
-		/* 	And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+		/* 	And the Wi-Fi driver shouldn't allow the data packet to be transmitted. */
 		if (padapter->securitypriv.wps_ie_len != 0) {
 			psta->ieee8021x_blocked = true;
 			padapter->securitypriv.wps_ie_len = 0;
@@ -1272,7 +1272,7 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
 
 
 		/* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
-		/* if A-MPDU Rx is enabled, reseting  rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
+		/* if A-MPDU Rx is enabled, resetting  rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
 		/* todo: check if AP can send A-MPDU packets */
 		for (i = 0; i < 16 ; i++) {
 			/* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
@@ -1374,7 +1374,7 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
 	rtw_update_ht_cap(padapter, cur_network->network.IEs, cur_network->network.IELength, (u8) cur_network->network.Configuration.DSConfig);
 }
 
-/* Notes: the fucntion could be > passive_level (the same context as Rx tasklet) */
+/* Notes: the function could be > passive_level (the same context as Rx tasklet) */
 /* pnetwork : returns from rtw_joinbss_event_callback */
 /* ptarget_wlan: found from scanned_queue */
 /* if join_res > 0, for (fw_state ==WIFI_STATION_STATE), we check if  "ptarget_sta" & "ptarget_wlan" exist. */
@@ -1482,10 +1482,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
 			}
 
 
-			/* s5. Cancle assoc_timer */
+			/* s5. Cancel assoc_timer */
 			_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
 
-			RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancle assoc_timer\n"));
+			RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancel assoc_timer\n"));
 
 		} else{
 			RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_joinbss_event_callback err: fw_state:%x", get_fwstate(pmlmepriv)));
@@ -1817,7 +1817,7 @@ void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf)
 }
 
 /*
-* _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+* _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
 * @adapter: pointer to struct adapter structure
 */
 void _rtw_join_timeout_handler (struct adapter *adapter)
@@ -1870,7 +1870,7 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
 }
 
 /*
-* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
+* rtw_scan_timeout_handler - Timeout/Failure handler for CMD SiteSurvey
 * @adapter: pointer to struct adapter structure
 */
 void rtw_scan_timeout_handler (struct adapter *adapter)
@@ -2622,7 +2622,7 @@ void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter)
 {
 }
 
-/* the fucntion is at passive_level */
+/* the function is at passive_level */
 void rtw_joinbss_reset(struct adapter *padapter)
 {
 	u8 threshold;
@@ -2727,7 +2727,7 @@ void rtw_build_wmm_ie_ht(struct adapter *padapter, u8 *out_ie, uint *pout_len)
 	}
 }
 
-/* the fucntion is >= passive_level */
+/* the function is >= passive_level */
 unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len, u8 channel)
 {
 	u32 ielen, out_len;
@@ -2879,7 +2879,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
 
 }
 
-/* the fucntion is > passive_level (in critical_section) */
+/* the function is > passive_level (in critical_section) */
 void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channel)
 {
 	u8 *p, max_ampdu_sz;
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
index 86040ad..37f42bf 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
@@ -404,7 +404,7 @@ static void halbtc8723b1ant_MonitorWiFiCtr(PBTC_COEXIST pBtCoexist)
 {
 	s32	wifiRssi = 0;
 	bool bWifiBusy = false, bWifiUnderBMode = false;
-	static u8 nCCKLockCounter = 0;
+	static u8 nCCKLockCounter;
 
 	pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
 	pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
@@ -488,7 +488,7 @@ static void halbtc8723b1ant_MonitorWiFiCtr(PBTC_COEXIST pBtCoexist)
 
 static bool halbtc8723b1ant_IsWifiStatusChanged(PBTC_COEXIST pBtCoexist)
 {
-	static bool	bPreWifiBusy = false, bPreUnder4way = false, bPreBtHsOn = false;
+	static bool	bPreWifiBusy, bPreUnder4way, bPreBtHsOn;
 	bool bWifiBusy = false, bUnder4way = false, bBtHsOn = false;
 	bool bWifiConnected = false;
 
@@ -2754,7 +2754,7 @@ void EXhalbtc8723b1ant_DisplayCoexInfo(PBTC_COEXIST pBtCoexist)
 	u32 wifiBw, wifiTrafficDir, faOfdm, faCck, wifiLinkStatus;
 	u8 wifiDot11Chnl, wifiHsChnl;
 	u32 fwVer = 0, btPatchVer = 0;
-	static u8 PopReportIn10s = 0;
+	static u8 PopReportIn10s;
 
 	CL_SPRINTF(
 		cliBuf,
@@ -3751,7 +3751,7 @@ void EXhalbtc8723b1ant_PnpNotify(PBTC_COEXIST pBtCoexist, u8 pnpState)
 
 void EXhalbtc8723b1ant_Periodical(PBTC_COEXIST pBtCoexist)
 {
-	static u8 disVerInfoCnt = 0;
+	static u8 disVerInfoCnt;
 	u32 fwVer = 0, btPatchVer = 0;
 
 	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical ===========================\n"));
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
index f5bc511..33610d3 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
@@ -282,7 +282,7 @@ static void halbtc8723b2ant_QueryBtInfo(PBTC_COEXIST pBtCoexist)
 
 static bool halbtc8723b2ant_IsWifiStatusChanged(PBTC_COEXIST pBtCoexist)
 {
-	static bool	bPreWifiBusy = false, bPreUnder4way = false, bPreBtHsOn = false;
+	static bool	bPreWifiBusy, bPreUnder4way, bPreBtHsOn;
 	bool bWifiBusy = false, bUnder4way = false, bBtHsOn = false;
 	bool bWifiConnected = false;
 
@@ -3706,7 +3706,7 @@ void EXhalbtc8723b2ant_PnpNotify(PBTC_COEXIST pBtCoexist, u8 pnpState)
 
 void EXhalbtc8723b2ant_Periodical(PBTC_COEXIST pBtCoexist)
 {
-	static u8 disVerInfoCnt = 0;
+	static u8 disVerInfoCnt;
 	u32 fwVer = 0, btPatchVer = 0;
 
 	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical ===========================\n"));
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index cc7e090..9e08a4d 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -385,7 +385,7 @@ static s32 halbtcoutsrc_GetWifiRssi(struct adapter *padapter)
 static u8 halbtcoutsrc_GetWifiScanAPNum(struct adapter *padapter)
 {
 	struct mlme_ext_priv *pmlmeext;
-	static u8 scan_AP_num = 0;
+	static u8 scan_AP_num;
 
 	pmlmeext = &padapter->mlmeextpriv;
 
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 1880d41..e3a9832 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -26,7 +26,7 @@ u8 rtw_hal_data_init(struct adapter *padapter)
 		padapter->hal_data_sz = sizeof(struct hal_com_data);
 		padapter->HalData = vzalloc(padapter->hal_data_sz);
 		if (padapter->HalData == NULL) {
-			DBG_8192C("cant not alloc memory for HAL DATA\n");
+			DBG_8192C("cannot alloc memory for HAL DATA\n");
 			return _FAIL;
 		}
 	}
@@ -1415,7 +1415,8 @@ bool GetHexValueFromString(char *szStr, u32 *pu4bVal, u32 *pu4bMove)
 
 	/*  Check input parameter. */
 	if (szStr == NULL || pu4bVal == NULL || pu4bMove == NULL) {
-		DBG_871X("GetHexValueFromString(): Invalid inpur argumetns! szStr: %p, pu4bVal: %p, pu4bMove: %p\n", szStr, pu4bVal, pu4bMove);
+		DBG_871X("GetHexValueFromString(): Invalid input arguments! szStr: %p, pu4bVal: %p, pu4bMove: %p\n",
+			 szStr, pu4bVal, pu4bMove);
 		return false;
 	}
 
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index 0b3541a..87a76ba 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -209,7 +209,10 @@ typedef struct _ODM_RATE_ADAPTIVE {
 
 #define AVG_THERMAL_NUM		8
 #define IQK_Matrix_REG_NUM	8
-#define IQK_Matrix_Settings_NUM	14+24+21 /*  Channels_2_4G_NUM + Channels_5G_20M_NUM + Channels_5G */
+#define IQK_Matrix_Settings_NUM	(14 + 24 + 21) /*   Channels_2_4G_NUM
+						* + Channels_5G_20M_NUM
+						* + Channels_5G
+						*/
 
 #define		DM_Type_ByFW			0
 #define		DM_Type_ByDriver		1
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index ba8e8eb..0bde944 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -278,11 +278,11 @@ void odm_Adaptivity(void *pDM_VOID, u8 IGI)
 
 	if (!pDM_Odm->ForceEDCCA) {
 		if (pDM_Odm->RSSI_Min > pDM_Odm->AdapEn_RSSI)
-			EDCCA_State = 1;
+			EDCCA_State = true;
 		else if (pDM_Odm->RSSI_Min < (pDM_Odm->AdapEn_RSSI - 5))
-			EDCCA_State = 0;
+			EDCCA_State = false;
 	} else
-		EDCCA_State = 1;
+		EDCCA_State = true;
 
 	if (
 		pDM_Odm->bLinked &&
@@ -305,7 +305,7 @@ void odm_Adaptivity(void *pDM_VOID, u8 IGI)
 		)
 	);
 
-	if (EDCCA_State == 1) {
+	if (EDCCA_State) {
 		Diff = IGI_target-(s8)IGI;
 		TH_L2H_dmc = pDM_Odm->TH_L2H_ini + Diff;
 		if (TH_L2H_dmc > 10)
@@ -372,7 +372,7 @@ void odm_PauseDIG(
 {
 	PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
 	pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable;
-	static bool bPaused = false;
+	static bool bPaused;
 
 	ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG() =========>\n"));
 
diff --git a/drivers/staging/rtl8723bs/hal/odm_debug.h b/drivers/staging/rtl8723bs/hal/odm_debug.h
index 2ec4baf..ff13136 100644
--- a/drivers/staging/rtl8723bs/hal/odm_debug.h
+++ b/drivers/staging/rtl8723bs/hal/odm_debug.h
@@ -105,51 +105,60 @@
 
 #if DBG
 #define ODM_RT_TRACE(pDM_Odm, comp, level, fmt)\
-	if (\
-		(comp & pDM_Odm->DebugComponents) &&\
-		(level <= pDM_Odm->DebugLevel || level == ODM_DBG_SERIOUS)\
-	) {\
-		RT_PRINTK fmt;\
-	}
+	do {\
+		if (\
+			(comp & pDM_Odm->DebugComponents) &&\
+			(level <= pDM_Odm->DebugLevel ||\
+			 level == ODM_DBG_SERIOUS)\
+		) {\
+			RT_PRINTK fmt;\
+		} \
+	} while (0)
 
 #define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt)\
-	if (\
-		(comp & pDM_Odm->DebugComponents) &&\
-		(level <= pDM_Odm->DebugLevel)\
-	) {\
-		RT_PRINTK fmt;\
-	}
+	do {\
+		if (\
+			(comp & pDM_Odm->DebugComponents) &&\
+			(level <= pDM_Odm->DebugLevel)\
+		) {\
+			RT_PRINTK fmt;\
+		} \
+	} while (0)
 
 #define ODM_RT_ASSERT(pDM_Odm, expr, fmt)\
-	if (!expr) {\
-		DbgPrint("Assertion failed! %s at ......\n", #expr);\
-		DbgPrint(\
-			"      ......%s,%s, line =%d\n",\
-			__FILE__,\
-			__func__,\
-			__LINE__\
-		);\
-		RT_PRINTK fmt;\
-		ASSERT(false);\
-	}
+	do {\
+		if (!expr) {\
+			DbgPrint("Assertion failed! %s at ......\n", #expr);\
+			DbgPrint(\
+				"      ......%s,%s, line =%d\n",\
+				__FILE__,\
+				__func__,\
+				__LINE__\
+			);\
+			RT_PRINTK fmt;\
+			ASSERT(false);\
+		} \
+	} while (0)
 #define ODM_dbg_enter() { DbgPrint("==> %s\n", __func__); }
 #define ODM_dbg_exit() { DbgPrint("<== %s\n", __func__); }
 #define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
 
 #define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr)\
-	if (\
-		(comp & pDM_Odm->DebugComponents) &&\
-		(level <= pDM_Odm->DebugLevel)\
-	) {\
-		int __i;\
-		u8 *__ptr = (u8 *)ptr;\
-		DbgPrint("[ODM] ");\
-		DbgPrint(title_str);\
-		DbgPrint(" ");\
-		for (__i = 0; __i < 6; __i++)\
-			DbgPrint("%02X%s", __ptr[__i], (__i == 5) ? "" : "-");\
-		DbgPrint("\n");\
-	}
+	do {\
+		if (\
+			(comp & pDM_Odm->DebugComponents) &&\
+			(level <= pDM_Odm->DebugLevel)\
+		) {\
+			int __i;\
+			u8 *__ptr = (u8 *)ptr;\
+			DbgPrint("[ODM] ");\
+			DbgPrint(title_str);\
+			DbgPrint(" ");\
+			for (__i = 0; __i < 6; __i++)\
+				DbgPrint("%02X%s", __ptr[__i], (__i == 5) ? "" : "-");\
+			DbgPrint("\n");\
+		} \
+	} while (0)
 #else
 #define ODM_RT_TRACE(pDM_Odm, comp, level, fmt)		no_printk fmt
 #define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt)	no_printk fmt
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 163537f..84a89ef 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -1741,7 +1741,8 @@ static u8 hal_EfusePgPacketWrite2ByteHeader(
 
 	efuse_addr = *pAddr;
 	if (efuse_addr >= efuse_max_available_len) {
-		DBG_8192C("%s: addr(%d) over avaliable(%d)!!\n", __func__, efuse_addr, efuse_max_available_len);
+		DBG_8192C("%s: addr(%d) over available (%d)!!\n", __func__,
+			  efuse_addr, efuse_max_available_len);
 		return false;
 	}
 
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 28d1a22..21ec890 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -385,8 +385,7 @@ s32 PHY_MACConfig8723B(struct adapter *Adapter)
 	/*  Config MAC */
 	/*  */
 	rtStatus = phy_ConfigMACWithParaFile(Adapter, pszMACRegFile);
-	if (rtStatus == _FAIL)
-	{
+	if (rtStatus == _FAIL) {
 		ODM_ConfigMACWithHeaderFile(&pHalData->odmpriv);
 		rtStatus = _SUCCESS;
 	}
@@ -459,8 +458,7 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
 		Adapter->registrypriv.RegEnableTxPowerLimit == 1 ||
 		(Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory == 1)
 	) {
-		if (PHY_ConfigRFWithPowerLimitTableParaFile(Adapter, pszRFTxPwrLmtFile) == _FAIL)
-		{
+		if (PHY_ConfigRFWithPowerLimitTableParaFile(Adapter, pszRFTxPwrLmtFile) == _FAIL) {
 			if (HAL_STATUS_SUCCESS != ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_TXPWR_LMT, (ODM_RF_RADIO_PATH_E)0))
 				rtStatus = _FAIL;
 		}
@@ -474,8 +472,8 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
 	/*  */
 	/*  1. Read PHY_REG.TXT BB INIT!! */
 	/*  */
-	if (phy_ConfigBBWithParaFile(Adapter, pszBBRegFile, CONFIG_BB_PHY_REG) == _FAIL)
-	{
+	if (phy_ConfigBBWithParaFile(Adapter, pszBBRegFile, CONFIG_BB_PHY_REG) ==
+		_FAIL) {
 		if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG))
 			rtStatus = _FAIL;
 	}
@@ -491,8 +489,8 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
 		Adapter->registrypriv.RegEnableTxPowerByRate == 1 ||
 		(Adapter->registrypriv.RegEnableTxPowerByRate == 2 && pHalData->EEPROMRegulatory != 2)
 	) {
-		if (phy_ConfigBBWithPgParaFile(Adapter, pszBBRegPgFile) == _FAIL)
-		{
+		if (phy_ConfigBBWithPgParaFile(Adapter, pszBBRegPgFile) ==
+			_FAIL) {
 			if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG_PG))
 				rtStatus = _FAIL;
 		}
@@ -514,8 +512,8 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
 	/*  */
 	/*  2. Read BB AGC table Initialization */
 	/*  */
-	if (phy_ConfigBBWithParaFile(Adapter, pszAGCTableFile, CONFIG_BB_AGC_TAB) == _FAIL)
-	{
+	if (phy_ConfigBBWithParaFile(Adapter, pszAGCTableFile,
+				     CONFIG_BB_AGC_TAB) == _FAIL) {
 		if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB))
 			rtStatus = _FAIL;
 	}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
index 3a85d0c..a71b552 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
@@ -144,15 +144,15 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
 		/*----Initialize RF fom connfiguration file----*/
 		switch (eRFPath) {
 		case RF_PATH_A:
-			if (PHY_ConfigRFWithParaFile(Adapter, pszRadioAFile, eRFPath) == _FAIL)
-			{
+			if (PHY_ConfigRFWithParaFile(Adapter, pszRadioAFile,
+						     eRFPath) == _FAIL) {
 				if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
 					rtStatus = _FAIL;
 			}
 			break;
 		case RF_PATH_B:
-			if (PHY_ConfigRFWithParaFile(Adapter, pszRadioBFile, eRFPath) == _FAIL)
-			{
+			if (PHY_ConfigRFWithParaFile(Adapter, pszRadioBFile,
+						     eRFPath) == _FAIL) {
 				if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
 					rtStatus = _FAIL;
 			}
@@ -186,8 +186,8 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
 	/* 3 Configuration of Tx Power Tracking */
 	/* 3 ----------------------------------------------------------------- */
 
-	if (PHY_ConfigRFWithTxPwrTrackParaFile(Adapter, pszTxPwrTrackFile) == _FAIL)
-	{
+	if (PHY_ConfigRFWithTxPwrTrackParaFile(Adapter, pszTxPwrTrackFile) ==
+		_FAIL) {
 		ODM_ConfigRFWithTxPwrTrackHeaderFile(&pHalData->odmpriv);
 	}
 
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index ca6ad96..9bee2e4 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -23,8 +23,7 @@ static u8 rtw_sdio_wait_enough_TxOQT_space(struct adapter *padapter, u8 agg_num)
 	u32 n = 0;
 	struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
 
-	while (pHalData->SdioTxOQTFreeSpace < agg_num)
-	{
+	while (pHalData->SdioTxOQTFreeSpace < agg_num) {
 		if (
 			(padapter->bSurpriseRemoved == true) ||
 			(padapter->bDriverStopped == true)
@@ -59,7 +58,7 @@ static s32 rtl8723_dequeue_writeport(struct adapter *padapter)
 	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
 	struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
 	struct xmit_buf *pxmitbuf;
-	struct adapter * pri_padapter = padapter;
+	struct adapter *pri_padapter = padapter;
 	s32 ret = 0;
 	u8 PageIdx = 0;
 	u32 deviceId;
@@ -231,7 +230,7 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
 	pxmitbuf = NULL;
 
 	if (padapter->registrypriv.wifi_spec == 1) {
-		for (idx = 0; idx<4; idx++)
+		for (idx = 0; idx < 4; idx++)
 			inx[idx] = pxmitpriv->wmm_para_seq[idx];
 	} else {
 		inx[0] = 0;
@@ -299,9 +298,10 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
 				) {
 					if (pxmitbuf) {
 						/* pxmitbuf->priv_data will be NULL, and will crash here */
-						if (pxmitbuf->len > 0 && pxmitbuf->priv_data) {
+						if (pxmitbuf->len > 0 &&
+						    pxmitbuf->priv_data) {
 							struct xmit_frame *pframe;
-							pframe = (struct xmit_frame*)pxmitbuf->priv_data;
+							pframe = (struct xmit_frame *)pxmitbuf->priv_data;
 							pframe->agg_num = k;
 							pxmitbuf->agg_num = k;
 							rtl8723b_update_txdesc(pframe, pframe->buf_addr);
@@ -392,7 +392,7 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
 
 			if (pxmitbuf->len > 0) {
 				struct xmit_frame *pframe;
-				pframe = (struct xmit_frame*)pxmitbuf->priv_data;
+				pframe = (struct xmit_frame *)pxmitbuf->priv_data;
 				pframe->agg_num = k;
 				pxmitbuf->agg_num = k;
 				rtl8723b_update_txdesc(pframe, pframe->buf_addr);
@@ -400,8 +400,7 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
 				pxmitbuf->priv_data = NULL;
 				enqueue_pending_xmitbuf(pxmitpriv, pxmitbuf);
 				yield();
-			}
-			else
+			} else
 				rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
 			pxmitbuf = NULL;
 		}
@@ -611,7 +610,8 @@ s32	rtl8723bs_hal_xmitframe_enqueue(
 	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
 	s32 err;
 
-	if ((err = rtw_xmitframe_enqueue(padapter, pxmitframe)) != _SUCCESS) {
+	err = rtw_xmitframe_enqueue(padapter, pxmitframe);
+	if (err != _SUCCESS) {
 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
 
 		pxmitpriv->tx_drop++;
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index 6dc6dc7..73ce637 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -1003,10 +1003,10 @@ enum ieee80211_state {
 
 #define DEFAULT_MAX_SCAN_AGE (15 * HZ)
 #define DEFAULT_FTS 2346
-#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC_ARG(x) ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5]
-#define IP_FMT "%d.%d.%d.%d"
-#define IP_ARG(x) ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3]
+#define MAC_FMT "%pM"
+#define MAC_ARG(x) (x)
+#define IP_FMT "%pI4"
+#define IP_ARG(x) (x)
 
 extern __inline int is_multicast_mac_addr(const u8 *addr)
 {
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index fdeabc1..ac9ffe0 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -169,10 +169,10 @@ __inline static u32 _RND8(u32 sz)
 }
 
 #ifndef MAC_FMT
-#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_FMT "%pM"
 #endif
 #ifndef MAC_ARG
-#define MAC_ARG(x) ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5]
+#define MAC_ARG(x) (x)
 #endif
 
 
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index 486e818..0c9b4f6 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -26,10 +26,10 @@
 	/* include <linux/smp_lock.h> */
 	#include <linux/netdevice.h>
 	#include <linux/skbuff.h>
-	#include <asm/uaccess.h>
+	#include <linux/uaccess.h>
 	#include <asm/byteorder.h>
-	#include <asm/atomic.h>
-	#include <asm/io.h>
+	#include <linux/atomic.h>
+	#include <linux/io.h>
 	#include <linux/semaphore.h>
 	#include <linux/sem.h>
 	#include <linux/sched.h>
diff --git a/drivers/staging/rtl8723bs/include/rtl8192c_rf.h b/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
index 0dbee56..97900a3 100644
--- a/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
+++ b/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
@@ -19,19 +19,16 @@
 /*  */
 /*  RF RL6052 Series API */
 /*  */
-void 	rtl8192c_RF_ChangeTxPath(struct adapter *Adapter,
-				u16 	DataRate);
-void 	rtl8192c_PHY_RF6052SetBandwidth(
-				struct adapter *			Adapter,
-				enum CHANNEL_WIDTH		Bandwidth);
-void rtl8192c_PHY_RF6052SetCckTxPower(
-				struct adapter *Adapter,
-				u8*	pPowerlevel);
-void rtl8192c_PHY_RF6052SetOFDMTxPower(
-				struct adapter *Adapter,
-				u8*	pPowerLevel,
-				u8 Channel);
-int	PHY_RF6052_Config8192C(struct adapter *	Adapter	);
+void rtl8192c_RF_ChangeTxPath(struct adapter *Adapter,
+			      u16 DataRate);
+void rtl8192c_PHY_RF6052SetBandwidth(struct adapter *Adapter,
+				     enum CHANNEL_WIDTH Bandwidth);
+void rtl8192c_PHY_RF6052SetCckTxPower(struct adapter *Adapter,
+				      u8 *pPowerlevel);
+void rtl8192c_PHY_RF6052SetOFDMTxPower(struct adapter *Adapter,
+				       u8 *pPowerLevel,
+				       u8 Channel);
+int PHY_RF6052_Config8192C(struct adapter *Adapter);
 
 /*--------------------------Exported Function prototype---------------------*/
 
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_spec.h b/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
index 8d78f4e..1906ff20 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
@@ -17,12 +17,11 @@
 
 #include <autoconf.h>
 
-
 #define HAL_NAV_UPPER_UNIT_8723B		128		/*  micro-second */
 
 /*  */
 /*  */
-/* 	0x0000h ~ 0x00FFh	System Configuration */
+/*	0x0000h ~ 0x00FFh	System Configuration */
 /*  */
 /*  */
 #define REG_RSV_CTRL_8723B				0x001C	/*  3 Byte */
@@ -42,7 +41,7 @@
 
 /*  */
 /*  */
-/* 	0x0100h ~ 0x01FFh	MACTOP General Configuration */
+/*	0x0100h ~ 0x01FFh	MACTOP General Configuration */
 /*  */
 /*  */
 #define REG_C2HEVT_CMD_ID_8723B	0x01A0
@@ -58,13 +57,13 @@
 
 /*  */
 /*  */
-/* 	0x0200h ~ 0x027Fh	TXDMA Configuration */
+/*	0x0200h ~ 0x027Fh	TXDMA Configuration */
 /*  */
 /*  */
 
 /*  */
 /*  */
-/* 	0x0280h ~ 0x02FFh	RXDMA Configuration */
+/*	0x0280h ~ 0x02FFh	RXDMA Configuration */
 /*  */
 /*  */
 #define REG_RXDMA_CONTROL_8723B		0x0286 /*  Control the RX DMA. */
@@ -72,7 +71,7 @@
 
 /*  */
 /*  */
-/* 	0x0300h ~ 0x03FFh	PCIe */
+/*	0x0300h ~ 0x03FFh	PCIe */
 /*  */
 /*  */
 #define	REG_PCIE_CTRL_REG_8723B		0x0300
@@ -99,7 +98,7 @@
 
 /*  */
 /*  */
-/* 	0x0400h ~ 0x047Fh	Protocol Configuration */
+/*	0x0400h ~ 0x047Fh	Protocol Configuration */
 /*  */
 /*  */
 #define REG_TXPKTBUF_BCNQ_BDNY_8723B	0x0424
@@ -113,18 +112,17 @@
 
 /*  */
 /*  */
-/* 	0x0500h ~ 0x05FFh	EDCA Configuration */
+/*	0x0500h ~ 0x05FFh	EDCA Configuration */
 /*  */
 /*  */
 #define REG_SECONDARY_CCA_CTRL_8723B	0x0577
 
 /*  */
 /*  */
-/* 	0x0600h ~ 0x07FFh	WMAC Configuration */
+/*	0x0600h ~ 0x07FFh	WMAC Configuration */
 /*  */
 /*  */
 
-
 /*  */
 /*  SDIO Bus Specification */
 /*  */
@@ -142,9 +140,8 @@
 /*  */
 #define SDIO_REG_HCPWM1_8723B	0x025 /*  HCI Current Power Mode 1 */
 
-
 /*  */
-/* 	8723 Regsiter Bit and Content definition */
+/*	8723 Register Bit and Content definition */
 /*  */
 
 /* 2 HSISR */
@@ -157,20 +154,19 @@
 
 /*  */
 /*  */
-/* 	0x0100h ~ 0x01FFh	MACTOP General Configuration */
-/*  */
-/*  */
-
-
-/*  */
-/*  */
-/* 	0x0200h ~ 0x027Fh	TXDMA Configuration */
+/*	0x0100h ~ 0x01FFh	MACTOP General Configuration */
 /*  */
 /*  */
 
 /*  */
 /*  */
-/* 	0x0280h ~ 0x02FFh	RXDMA Configuration */
+/*	0x0200h ~ 0x027Fh	TXDMA Configuration */
+/*  */
+/*  */
+
+/*  */
+/*  */
+/*	0x0280h ~ 0x02FFh	RXDMA Configuration */
 /*  */
 /*  */
 #define BIT_USB_RXDMA_AGG_EN	BIT(31)
@@ -184,7 +180,7 @@
 
 /*  */
 /*  */
-/* 	0x0400h ~ 0x047Fh	Protocol Configuration */
+/*	0x0400h ~ 0x047Fh	Protocol Configuration */
 /*  */
 /*  */
 
@@ -195,19 +191,18 @@
 
 /*  */
 /*  */
-/* 	0x0500h ~ 0x05FFh	EDCA Configuration */
+/*	0x0500h ~ 0x05FFh	EDCA Configuration */
 /*  */
 /*  */
 
 /*  */
 /*  */
-/* 	0x0600h ~ 0x07FFh	WMAC Configuration */
+/*	0x0600h ~ 0x07FFh	WMAC Configuration */
 /*  */
 /*  */
 #define EEPROM_RF_GAIN_OFFSET			0xC1
 #define EEPROM_RF_GAIN_VAL			0x1F6
 
-
 /*  */
 /*        8195 IMR/ISR bits						(offset 0xB0,  8bits) */
 /*  */
@@ -246,13 +241,13 @@
 #define	IMR_BCNDMAINT3_8723B				BIT23		/*  Beacon DMA Interrupt 3 */
 #define	IMR_BCNDMAINT2_8723B				BIT22		/*  Beacon DMA Interrupt 2 */
 #define	IMR_BCNDMAINT1_8723B				BIT21		/*  Beacon DMA Interrupt 1 */
-#define	IMR_BCNDOK7_8723B					BIT20		/*  Beacon Queue DMA OK Interrup 7 */
-#define	IMR_BCNDOK6_8723B					BIT19		/*  Beacon Queue DMA OK Interrup 6 */
-#define	IMR_BCNDOK5_8723B					BIT18		/*  Beacon Queue DMA OK Interrup 5 */
-#define	IMR_BCNDOK4_8723B					BIT17		/*  Beacon Queue DMA OK Interrup 4 */
-#define	IMR_BCNDOK3_8723B					BIT16		/*  Beacon Queue DMA OK Interrup 3 */
-#define	IMR_BCNDOK2_8723B					BIT15		/*  Beacon Queue DMA OK Interrup 2 */
-#define	IMR_BCNDOK1_8723B					BIT14		/*  Beacon Queue DMA OK Interrup 1 */
+#define	IMR_BCNDOK7_8723B					BIT20		/*  Beacon Queue DMA OK Interrupt 7 */
+#define	IMR_BCNDOK6_8723B					BIT19		/*  Beacon Queue DMA OK Interrupt 6 */
+#define	IMR_BCNDOK5_8723B					BIT18		/*  Beacon Queue DMA OK Interrupt 5 */
+#define	IMR_BCNDOK4_8723B					BIT17		/*  Beacon Queue DMA OK Interrupt 4 */
+#define	IMR_BCNDOK3_8723B					BIT16		/*  Beacon Queue DMA OK Interrupt 3 */
+#define	IMR_BCNDOK2_8723B					BIT15		/*  Beacon Queue DMA OK Interrupt 2 */
+#define	IMR_BCNDOK1_8723B					BIT14		/*  Beacon Queue DMA OK Interrupt 1 */
 #define	IMR_ATIMEND_E_8723B				BIT13		/*  ATIM Window End Extension for Win7 */
 #define	IMR_TXERR_8723B					BIT11		/*  Tx Error Flag Interrupt Status, write 1 clear. */
 #define	IMR_RXERR_8723B					BIT10		/*  Rx Error Flag INT Status, Write 1 clear */
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 36c3189..bd4352f 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2667,7 +2667,8 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st
 	mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
 	strncpy(mon_ndev->name, name, IFNAMSIZ);
 	mon_ndev->name[IFNAMSIZ - 1] = 0;
-	mon_ndev->destructor = rtw_ndev_destructor;
+	mon_ndev->needs_free_netdev = true;
+	mon_ndev->priv_destructor = rtw_ndev_destructor;
 
 	mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
 
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 91674137..79d8383 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -766,7 +766,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
 
 exit:
 
-	kfree((u8 *)pwep);
+	kfree(pwep);
 	return ret;
 }
 
@@ -2500,7 +2500,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
 	ret =  wpa_set_encryption(dev, param, param_len);
 
 exit:
-	kfree((u8 *)param);
+	kfree(param);
 
 	return ret;
 }
@@ -3767,7 +3767,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
 
 	if (copy_from_user(param, p->pointer, p->length))
 	{
-		kfree((u8 *)param);
+		kfree(param);
 		ret = -EFAULT;
 		goto out;
 	}
@@ -3801,7 +3801,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
 	if (ret == 0 && copy_to_user(p->pointer, param, p->length))
 		ret = -EFAULT;
 
-	kfree((u8 *)param);
+	kfree(param);
 
 out:
 
@@ -4130,7 +4130,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
 	}
 
 exit:
-	kfree((u8 *)pwep);
+	kfree(pwep);
 
 	return ret;
 
@@ -4713,7 +4713,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
 
 	if (copy_from_user(param, p->pointer, p->length))
 	{
-		kfree((u8 *)param);
+		kfree(param);
 		ret = -EFAULT;
 		goto out;
 	}
@@ -4817,7 +4817,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
 		ret = -EFAULT;
 
 
-	kfree((u8 *)param);
+	kfree(param);
 
 out:
 
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index f83cfc7..0215899 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -1207,8 +1207,6 @@ void rtw_ndev_destructor(struct net_device *ndev)
 
 	if (ndev->ieee80211_ptr)
 		kfree((u8 *)ndev->ieee80211_ptr);
-
-	free_netdev(ndev);
 }
 
 void rtw_dev_unload(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 02db59e..aa16d1a 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -160,7 +160,7 @@ static int isFileReadable(char *path)
 		oldfs = get_fs(); set_fs(get_ds());
 
 		if (1!=readFile(fp, &buf, 1))
-			ret = PTR_ERR(fp);
+			ret = -EINVAL;
 
 		set_fs(oldfs);
 		filp_close(fp, NULL);
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
index 33f0f83..3aa3e65 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
@@ -272,7 +272,7 @@ u32 sd_read32(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
 		DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x, val = 0x%x\n", __func__, *err, addr, v);
 
 		*err = 0;
-		for (i = 0; i<SD_IO_TRY_CNT; i++)
+		for (i = 0; i < SD_IO_TRY_CNT; i++)
 		{
 			if (claim_needed) sdio_claim_host(func);
 			v = sdio_readl(func, addr, err);
@@ -294,7 +294,7 @@ u32 sd_read32(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
 			}
 		}
 
-		if (i ==SD_IO_TRY_CNT)
+		if (i == SD_IO_TRY_CNT)
 			DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x, val = 0x%x, try_cnt =%d\n", __func__, *err, addr, v, i);
 		else
 			DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x, val = 0x%x, try_cnt =%d\n", __func__, *err, addr, v, i);
@@ -317,7 +317,7 @@ void sd_write8(struct intf_hdl *pintfhdl, u32 addr, u8 v, s32 *err)
 
 	if (padapter->bSurpriseRemoved) {
 		/* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
-		return ;
+		return;
 	}
 
 	func = psdio->func;
@@ -346,7 +346,7 @@ void sd_write32(struct intf_hdl *pintfhdl, u32 addr, u32 v, s32 *err)
 
 	if (padapter->bSurpriseRemoved) {
 		/* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
-		return ;
+		return;
 	}
 
 	func = psdio->func;
@@ -365,7 +365,7 @@ void sd_write32(struct intf_hdl *pintfhdl, u32 addr, u32 v, s32 *err)
 		DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x val = 0x%08x\n", __func__, *err, addr, v);
 
 		*err = 0;
-		for (i = 0; i<SD_IO_TRY_CNT; i++)
+		for (i = 0; i < SD_IO_TRY_CNT; i++)
 		{
 			if (claim_needed) sdio_claim_host(func);
 			sdio_writel(func, v, addr, err);
@@ -386,7 +386,7 @@ void sd_write32(struct intf_hdl *pintfhdl, u32 addr, u32 v, s32 *err)
 			}
 		}
 
-		if (i ==SD_IO_TRY_CNT)
+		if (i == SD_IO_TRY_CNT)
 			DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x val = 0x%08x, try_cnt =%d\n", __func__, *err, addr, v, i);
 		else
 			DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x val = 0x%08x, try_cnt =%d\n", __func__, *err, addr, v, i);
@@ -428,7 +428,7 @@ s32 _sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
 
 	func = psdio->func;
 
-	if (unlikely((cnt == 1) || (cnt ==2)))
+	if (unlikely((cnt == 1) || (cnt == 2)))
 	{
 		int i;
 		u8 *pbuf = (u8 *)pdata;
@@ -465,7 +465,7 @@ s32 _sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
  *0		Success
  *others	Fail
  */
-s32 sd_read(struct intf_hdl * pintfhdl, u32 addr, u32 cnt, void *pdata)
+s32 sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
 {
 	struct adapter *padapter;
 	struct dvobj_priv *psdiodev;
@@ -517,7 +517,7 @@ s32 _sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
 
 	struct sdio_func *func;
 	u32 size;
-	s32 err =-EPERM;
+	s32 err =  -EPERM;
 
 	padapter = pintfhdl->padapter;
 	psdiodev = pintfhdl->pintf_dev;
@@ -529,9 +529,9 @@ s32 _sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
 	}
 
 	func = psdio->func;
-/* 	size = sdio_align_size(func, cnt); */
+/*	size = sdio_align_size(func, cnt); */
 
-	if (unlikely((cnt == 1) || (cnt ==2)))
+	if (unlikely((cnt == 1) || (cnt == 2)))
 	{
 		int i;
 		u8 *pbuf = (u8 *)pdata;
@@ -576,7 +576,7 @@ s32 sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
 	PSDIO_DATA psdio;
 	struct sdio_func *func;
 	bool claim_needed;
-	s32 err =-EPERM;
+	s32 err =  -EPERM;
 
 	padapter = pintfhdl->padapter;
 	psdiodev = pintfhdl->pintf_dev;
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index 9c61125..305e88a 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -14,42 +14,43 @@
  */
 
 /*
- *Only these channels all allow active
- *scan on all world regulatory domains
+ * Only these channels all allow active
+ * scan on all world regulatory domains
  */
 
 /* 2G chan 01 - chan 11 */
 #define RTW_2GHZ_CH01_11	\
-	REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+	REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
 
 /*
- *We enable active scan on these a case
- *by case basis by regulatory domain
+ * We enable active scan on these a case
+ * by case basis by regulatory domain
  */
 
 /* 2G chan 12 - chan 13, PASSIV SCAN */
 #define RTW_2GHZ_CH12_13	\
-	REG_RULE(2467-10, 2472+10, 40, 0, 20,	\
+	REG_RULE(2467 - 10, 2472 + 10, 40, 0, 20,	\
 	NL80211_RRF_PASSIVE_SCAN)
 
 /* 2G chan 14, PASSIVS SCAN, NO OFDM (B only) */
 #define RTW_2GHZ_CH14	\
-	REG_RULE(2484-10, 2484+10, 40, 0, 20,	\
+	REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20,	\
 	NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
 
 static const struct ieee80211_regdomain rtw_regdom_rd = {
 	.n_reg_rules = 3,
 	.alpha2 = "99",
 	.reg_rules = {
-		      RTW_2GHZ_CH01_11,
-		      RTW_2GHZ_CH12_13,
-		      }
+		RTW_2GHZ_CH01_11,
+		RTW_2GHZ_CH12_13,
+	}
 };
 
 static int rtw_ieee80211_channel_to_frequency(int chan, int band)
 {
 	/* see 802.11 17.3.8.3.2 and Annex J
-	 * there are overlapping channel numbers in 5GHz and 2GHz bands */
+	 * there are overlapping channel numbers in 5GHz and 2GHz bands
+	 */
 
 	/* NL80211_BAND_2GHZ */
 	if (chan == 14)
@@ -73,7 +74,7 @@ static void _rtw_reg_apply_flags(struct wiphy *wiphy)
 	u16 channel;
 	u32 freq;
 
-	/*  all channels disable */
+	/* all channels disable */
 	for (i = 0; i < NUM_NL80211_BANDS; i++) {
 		sband = wiphy->bands[i];
 
@@ -87,7 +88,7 @@ static void _rtw_reg_apply_flags(struct wiphy *wiphy)
 		}
 	}
 
-	/*  channels apply by channel plans. */
+	/* channels apply by channel plans. */
 	for (i = 0; i < max_chan_nums; i++) {
 		channel = channel_set[i].ChannelNum;
 		freq =
@@ -96,12 +97,10 @@ static void _rtw_reg_apply_flags(struct wiphy *wiphy)
 
 		ch = ieee80211_get_channel(wiphy, freq);
 		if (ch) {
-			if (channel_set[i].ScanType == SCAN_PASSIVE) {
+			if (channel_set[i].ScanType == SCAN_PASSIVE)
 				ch->flags = IEEE80211_CHAN_NO_IR;
-			}
-			else {
+			else
 				ch->flags = 0;
-			}
 		}
 	}
 }
@@ -123,10 +122,11 @@ static const struct ieee80211_regdomain *_rtw_regdomain_select(struct
 }
 
 static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg,
-				struct wiphy *wiphy,
-				void (*reg_notifier) (struct wiphy * wiphy,
-						     struct regulatory_request *
-						     request))
+				 struct wiphy *wiphy,
+				 void (*reg_notifier)(struct wiphy *wiphy,
+						      struct
+						      regulatory_request *
+						      request))
 {
 	const struct ieee80211_regdomain *regd;
 
@@ -144,11 +144,11 @@ static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg,
 }
 
 int rtw_regd_init(struct adapter *padapter,
-		  void (*reg_notifier) (struct wiphy * wiphy,
+		  void (*reg_notifier)(struct wiphy *wiphy,
 				       struct regulatory_request *request))
 {
-	/* struct registry_priv  *registrypriv = &padapter->registrypriv; */
 	struct wiphy *wiphy = padapter->rtw_wdev->wiphy;
+
 	_rtw_regd_init_wiphy(NULL, wiphy, reg_notifier);
 
 	return 0;
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index a95c5de..36b5a11 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -536,7 +536,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (sendbytes > 8) {
 		memcpy(buf, inquiry_buf, 8);
-		memcpy(buf + 8, inquiry_string,	sendbytes - 8);
+		strncpy(buf + 8, inquiry_string, sendbytes - 8);
 		if (pro_formatter_flag) {
 			/* Additional Length */
 			buf[4] = 0x33;
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index bdd35b6..c2eb072 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -1057,7 +1057,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
 
 	rtsx_write_register(chip, SD_DCMPS_CTL, DCMPS_CHANGE, 0);
 	rtsx_write_register(chip, SD_VP_CTL, PHASE_CHANGE, 0);
-	wait_timeout(10);
+	mdelay(10);
 	sd_reset_dcm(chip, tune_dir);
 	return STATUS_FAIL;
 }
@@ -5231,7 +5231,7 @@ int sd_power_off_card3v3(struct rtsx_chip *chip)
 			return STATUS_FAIL;
 		}
 
-		wait_timeout(50);
+		mdelay(50);
 	}
 
 	if (chip->asic_code) {
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index 85aba05..74d36f9 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -1268,7 +1268,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
 			reg = 0;
 			rtsx_read_register(chip, XD_CTL, &reg);
 			if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
-				wait_timeout(100);
+				mdelay(100);
 
 				if (detect_card_cd(chip,
 						   XD_CARD) != STATUS_SUCCESS) {
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 5e4bfb6..944dd25 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -175,7 +175,7 @@ static void set_master_clock(unsigned int frequency)
 		}
 
 		sm750_set_current_gate(reg);
-		}
+	}
 }
 
 unsigned int ddk750_get_vm_size(void)
@@ -224,7 +224,7 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
 	sm750_set_current_gate(reg);
 
 	if (sm750_get_chip_type() != SM750LE) {
-		/*	set panel pll and graphic mode via mmio_88 */
+		/* set panel pll and graphic mode via mmio_88 */
 		reg = peek32(VGA_CONFIGURATION);
 		reg |= (VGA_CONFIGURATION_PLL | VGA_CONFIGURATION_MODE);
 		poke32(VGA_CONFIGURATION, reg);
@@ -309,7 +309,8 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
  * M = {1,...,255}
  * N = {2,...,15}
  */
-unsigned int sm750_calc_pll_value(unsigned int request_orig, struct pll_value *pll)
+unsigned int sm750_calc_pll_value(unsigned int request_orig,
+				  struct pll_value *pll)
 {
 	/*
 	 * as sm750 register definition,
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index 171ae06..87a199d 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -29,26 +29,31 @@ static dvi_ctrl_device_t g_dcftSupportedDviController[] = {
 #endif
 };
 
-int dviInit(
-	unsigned char edgeSelect,
-	unsigned char busSelect,
-	unsigned char dualEdgeClkSelect,
-	unsigned char hsyncEnable,
-	unsigned char vsyncEnable,
-	unsigned char deskewEnable,
-	unsigned char deskewSetting,
-	unsigned char continuousSyncEnable,
-	unsigned char pllFilterEnable,
-	unsigned char pllFilterValue
-			)
+int dviInit(unsigned char edgeSelect,
+	    unsigned char busSelect,
+	    unsigned char dualEdgeClkSelect,
+	    unsigned char hsyncEnable,
+	    unsigned char vsyncEnable,
+	    unsigned char deskewEnable,
+	    unsigned char deskewSetting,
+	    unsigned char continuousSyncEnable,
+	    unsigned char pllFilterEnable,
+	    unsigned char pllFilterValue)
 {
 	dvi_ctrl_device_t *pCurrentDviCtrl;
 
 	pCurrentDviCtrl = g_dcftSupportedDviController;
 	if (pCurrentDviCtrl->pfnInit) {
-		return pCurrentDviCtrl->pfnInit(edgeSelect, busSelect, dualEdgeClkSelect, hsyncEnable,
-						vsyncEnable, deskewEnable, deskewSetting, continuousSyncEnable,
-						pllFilterEnable, pllFilterValue);
+		return pCurrentDviCtrl->pfnInit(edgeSelect,
+						busSelect,
+						dualEdgeClkSelect,
+						hsyncEnable,
+						vsyncEnable,
+						deskewEnable,
+						deskewSetting,
+						continuousSyncEnable,
+						pllFilterEnable,
+						pllFilterValue);
 	}
 	return -1; /* error */
 }
diff --git a/drivers/staging/sm750fb/ddk750_dvi.h b/drivers/staging/sm750fb/ddk750_dvi.h
index 677939c..4a83945 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.h
+++ b/drivers/staging/sm750fb/ddk750_dvi.h
@@ -3,17 +3,16 @@
 
 /* dvi chip stuffs structros */
 
-typedef long (*PFN_DVICTRL_INIT)(
-	unsigned char edgeSelect,
-	unsigned char busSelect,
-	unsigned char dualEdgeClkSelect,
-	unsigned char hsyncEnable,
-	unsigned char vsyncEnable,
-	unsigned char deskewEnable,
-	unsigned char deskewSetting,
-	unsigned char continuousSyncEnable,
-	unsigned char pllFilterEnable,
-	unsigned char pllFilterValue);
+typedef long (*PFN_DVICTRL_INIT)(unsigned char edgeSelect,
+				 unsigned char busSelect,
+				 unsigned char dualEdgeClkSelect,
+				 unsigned char hsyncEnable,
+				 unsigned char vsyncEnable,
+				 unsigned char deskewEnable,
+				 unsigned char deskewSetting,
+				 unsigned char continuousSyncEnable,
+				 unsigned char pllFilterEnable,
+				 unsigned char pllFilterValue);
 
 typedef void (*PFN_DVICTRL_RESETCHIP)(void);
 typedef char* (*PFN_DVICTRL_GETCHIPSTRING)(void);
@@ -42,18 +41,16 @@ typedef struct _dvi_ctrl_device_t {
 #define DVI_CTRL_SII164
 
 /* dvi functions prototype */
-int dviInit(
-	unsigned char edgeSelect,
-	unsigned char busSelect,
-	unsigned char dualEdgeClkSelect,
-	unsigned char hsyncEnable,
-	unsigned char vsyncEnable,
-	unsigned char deskewEnable,
-	unsigned char deskewSetting,
-	unsigned char continuousSyncEnable,
-	unsigned char pllFilterEnable,
-	unsigned char pllFilterValue
-);
+int dviInit(unsigned char edgeSelect,
+	    unsigned char busSelect,
+	    unsigned char dualEdgeClkSelect,
+	    unsigned char hsyncEnable,
+	    unsigned char vsyncEnable,
+	    unsigned char deskewEnable,
+	    unsigned char deskewSetting,
+	    unsigned char continuousSyncEnable,
+	    unsigned char pllFilterEnable,
+	    unsigned char pllFilterValue);
 
 #endif
 
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index fe814e4..ec556a9 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -8,9 +8,7 @@
 #define MAX_HWI2C_FIFO                  16
 #define HWI2C_WAIT_TIMEOUT              0xF0000
 
-int sm750_hw_i2c_init(
-unsigned char bus_speed_mode
-)
+int sm750_hw_i2c_init(unsigned char bus_speed_mode)
 {
 	unsigned int value;
 
@@ -81,11 +79,9 @@ static long hw_i2c_wait_tx_done(void)
  *  Return Value:
  *      Total number of bytes those are actually written.
  */
-static unsigned int hw_i2c_write_data(
-	unsigned char addr,
-	unsigned int length,
-	unsigned char *buf
-)
+static unsigned int hw_i2c_write_data(unsigned char addr,
+				      unsigned int length,
+				      unsigned char *buf)
 {
 	unsigned char count, i;
 	unsigned int total_bytes = 0;
@@ -148,11 +144,9 @@ static unsigned int hw_i2c_write_data(
  *  Return Value:
  *      Total number of actual bytes read from the slave device
  */
-static unsigned int hw_i2c_read_data(
-	unsigned char addr,
-	unsigned int length,
-	unsigned char *buf
-)
+static unsigned int hw_i2c_read_data(unsigned char addr,
+				     unsigned int length,
+				     unsigned char *buf)
 {
 	unsigned char count, i;
 	unsigned int total_bytes = 0;
@@ -212,10 +206,7 @@ static unsigned int hw_i2c_read_data(
  *  Return Value:
  *      Register value
  */
-unsigned char sm750_hw_i2c_read_reg(
-	unsigned char addr,
-	unsigned char reg
-)
+unsigned char sm750_hw_i2c_read_reg(unsigned char addr, unsigned char reg)
 {
 	unsigned char value = 0xFF;
 
@@ -238,11 +229,9 @@ unsigned char sm750_hw_i2c_read_reg(
  *          0   - Success
  *         -1   - Fail
  */
-int sm750_hw_i2c_write_reg(
-	unsigned char addr,
-	unsigned char reg,
-	unsigned char data
-)
+int sm750_hw_i2c_write_reg(unsigned char addr,
+			   unsigned char reg,
+			   unsigned char data)
 {
 	unsigned char value[2];
 
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 259006a..0431833 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -112,18 +112,16 @@ unsigned short sii164GetDeviceID(void)
  *      0   - Success
  *     -1   - Fail.
  */
-long sii164InitChip(
-	unsigned char edgeSelect,
-	unsigned char busSelect,
-	unsigned char dualEdgeClkSelect,
-	unsigned char hsyncEnable,
-	unsigned char vsyncEnable,
-	unsigned char deskewEnable,
-	unsigned char deskewSetting,
-	unsigned char continuousSyncEnable,
-	unsigned char pllFilterEnable,
-	unsigned char pllFilterValue
-)
+long sii164InitChip(unsigned char edgeSelect,
+		    unsigned char busSelect,
+		    unsigned char dualEdgeClkSelect,
+		    unsigned char hsyncEnable,
+		    unsigned char vsyncEnable,
+		    unsigned char deskewEnable,
+		    unsigned char deskewSetting,
+		    unsigned char continuousSyncEnable,
+		    unsigned char pllFilterEnable,
+		    unsigned char pllFilterValue)
 {
 	unsigned char config;
 
@@ -259,7 +257,6 @@ void sii164ResetChip(void)
 	sii164SetPower(1);
 }
 
-
 /*
  * sii164GetChipString
  *      This function returns a char string name of the current DVI Controller chip.
@@ -270,7 +267,6 @@ char *sii164GetChipString(void)
 	return gDviCtrlChipName;
 }
 
-
 /*
  *  sii164SetPower
  *      This function sets the power configuration of the DVI Controller Chip.
@@ -278,9 +274,7 @@ char *sii164GetChipString(void)
  *  Input:
  *      powerUp - Flag to set the power down or up
  */
-void sii164SetPower(
-	unsigned char powerUp
-)
+void sii164SetPower(unsigned char powerUp)
 {
 	unsigned char config;
 
@@ -298,18 +292,16 @@ void sii164SetPower(
 	}
 }
 
-
 /*
  *  sii164SelectHotPlugDetectionMode
  *      This function selects the mode of the hot plug detection.
  */
-static void sii164SelectHotPlugDetectionMode(
-	sii164_hot_plug_mode_t hotPlugMode
-)
+static void sii164SelectHotPlugDetectionMode(sii164_hot_plug_mode_t hotPlugMode)
 {
 	unsigned char detectReg;
 
-	detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & ~SII164_DETECT_MONITOR_SENSE_OUTPUT_FLAG;
+	detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) &
+		    ~SII164_DETECT_MONITOR_SENSE_OUTPUT_FLAG;
 	switch (hotPlugMode) {
 	case SII164_HOTPLUG_DISABLE:
 		detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HIGH;
@@ -336,9 +328,7 @@ static void sii164SelectHotPlugDetectionMode(
  *
  *  enableHotPlug   - Enable (=1) / disable (=0) Hot Plug detection
  */
-void sii164EnableHotPlugDetection(
-	unsigned char enableHotPlug
-)
+void sii164EnableHotPlugDetection(unsigned char enableHotPlug)
 {
 	unsigned char detectReg;
 
@@ -365,7 +355,8 @@ unsigned char sii164IsConnected(void)
 {
 	unsigned char hotPlugValue;
 
-	hotPlugValue = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_HOT_PLUG_STATUS_MASK;
+	hotPlugValue = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) &
+		       SII164_DETECT_HOT_PLUG_STATUS_MASK;
 	if (hotPlugValue == SII164_DETECT_HOT_PLUG_STATUS_ON)
 		return 1;
 	else
@@ -384,7 +375,8 @@ unsigned char sii164CheckInterrupt(void)
 {
 	unsigned char detectReg;
 
-	detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_MONITOR_STATE_MASK;
+	detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) &
+		    SII164_DETECT_MONITOR_STATE_MASK;
 	if (detectReg == SII164_DETECT_MONITOR_STATE_CHANGE)
 		return 1;
 	else
@@ -401,7 +393,8 @@ void sii164ClearInterrupt(void)
 
 	/* Clear the MDI interrupt */
 	detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
-	i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg | SII164_DETECT_MONITOR_STATE_CLEAR);
+	i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT,
+		    detectReg | SII164_DETECT_MONITOR_STATE_CLEAR);
 }
 
 #endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index 664ad08..6968cf5 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -13,18 +13,16 @@ typedef enum _sii164_hot_plug_mode_t {
 
 
 /* Silicon Image SiI164 chip prototype */
-long sii164InitChip(
-	unsigned char edgeSelect,
-	unsigned char busSelect,
-	unsigned char dualEdgeClkSelect,
-	unsigned char hsyncEnable,
-	unsigned char vsyncEnable,
-	unsigned char deskewEnable,
-	unsigned char deskewSetting,
-	unsigned char continuousSyncEnable,
-	unsigned char pllFilterEnable,
-	unsigned char pllFilterValue
-);
+long sii164InitChip(unsigned char edgeSelect,
+		    unsigned char busSelect,
+		    unsigned char dualEdgeClkSelect,
+		    unsigned char hsyncEnable,
+		    unsigned char vsyncEnable,
+		    unsigned char deskewEnable,
+		    unsigned char deskewSetting,
+		    unsigned char continuousSyncEnable,
+		    unsigned char pllFilterEnable,
+		    unsigned char pllFilterValue);
 
 unsigned short sii164GetVendorID(void);
 unsigned short sii164GetDeviceID(void);
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.c b/drivers/staging/sm750fb/ddk750_swi2c.c
index a4ac07c..19c5ffc 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.c
+++ b/drivers/staging/sm750fb/ddk750_swi2c.c
@@ -349,8 +349,7 @@ static unsigned char sw_i2c_read_byte(unsigned char ack)
  *      -1   - Fail to initialize the i2c
  *       0   - Success
  */
-static long sm750le_i2c_init(unsigned char clk_gpio,
-			     unsigned char data_gpio)
+static long sm750le_i2c_init(unsigned char clk_gpio, unsigned char data_gpio)
 {
 	int i;
 
@@ -388,10 +387,7 @@ static long sm750le_i2c_init(unsigned char clk_gpio,
  *      -1   - Fail to initialize the i2c
  *       0   - Success
  */
-long sm750_sw_i2c_init(
-	unsigned char clk_gpio,
-	unsigned char data_gpio
-)
+long sm750_sw_i2c_init(unsigned char clk_gpio, unsigned char data_gpio)
 {
 	int i;
 
@@ -448,10 +444,7 @@ long sm750_sw_i2c_init(
  *  Return Value:
  *      Register value
  */
-unsigned char sm750_sw_i2c_read_reg(
-	unsigned char addr,
-	unsigned char reg
-)
+unsigned char sm750_sw_i2c_read_reg(unsigned char addr, unsigned char reg)
 {
 	unsigned char data;
 
@@ -488,11 +481,9 @@ unsigned char sm750_sw_i2c_read_reg(
  *          0   - Success
  *         -1   - Fail
  */
-long sm750_sw_i2c_write_reg(
-	unsigned char addr,
-	unsigned char reg,
-	unsigned char data
-)
+long sm750_sw_i2c_write_reg(unsigned char addr,
+			    unsigned char reg,
+			    unsigned char data)
 {
 	long ret = 0;
 
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.h b/drivers/staging/sm750fb/ddk750_swi2c.h
index 5a9466e..3b8a96d 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.h
+++ b/drivers/staging/sm750fb/ddk750_swi2c.h
@@ -28,10 +28,7 @@
  *      -1   - Fail to initialize the i2c
  *       0   - Success
  */
-long sm750_sw_i2c_init(
-	unsigned char clk_gpio,
-	unsigned char data_gpio
-);
+long sm750_sw_i2c_init(unsigned char clk_gpio, unsigned char data_gpio);
 
 /*
  *  This function reads the slave device's register
@@ -44,10 +41,7 @@ long sm750_sw_i2c_init(
  *  Return Value:
  *      Register value
  */
-unsigned char sm750_sw_i2c_read_reg(
-	unsigned char addr,
-	unsigned char reg
-);
+unsigned char sm750_sw_i2c_read_reg(unsigned char addr, unsigned char reg);
 
 /*
  *  This function writes a value to the slave device's register
@@ -62,10 +56,8 @@ unsigned char sm750_sw_i2c_read_reg(
  *          0   - Success
  *         -1   - Fail
  */
-long sm750_sw_i2c_write_reg(
-	unsigned char addr,
-	unsigned char reg,
-	unsigned char data
-);
+long sm750_sw_i2c_write_reg(unsigned char addr,
+			    unsigned char reg,
+			    unsigned char data);
 
 #endif  /* _SWI2C_H_ */
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 386d4ad..3aa4128 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -33,7 +33,7 @@ static int g_hwcursor = 1;
 static int g_noaccel;
 static int g_nomtrr;
 static const char *g_fbmode[] = {NULL, NULL};
-static const char *g_def_fbmode = "800x600-16@60";
+static const char *g_def_fbmode = "1024x768-32@60";
 static char *g_settings;
 static int g_dualview;
 static char *g_option;
@@ -112,42 +112,42 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
 	cursor = &crtc->cursor;
 
 	if (fbcursor->image.width > cursor->maxW ||
-	   fbcursor->image.height > cursor->maxH ||
-	   fbcursor->image.depth > 1) {
+	    fbcursor->image.height > cursor->maxH ||
+	    fbcursor->image.depth > 1) {
 		return -ENXIO;
 	}
 
 	sm750_hw_cursor_disable(cursor);
 	if (fbcursor->set & FB_CUR_SETSIZE)
 		sm750_hw_cursor_setSize(cursor,
-				  fbcursor->image.width,
-				  fbcursor->image.height);
+					fbcursor->image.width,
+					fbcursor->image.height);
 
 	if (fbcursor->set & FB_CUR_SETPOS)
 		sm750_hw_cursor_setPos(cursor,
-				 fbcursor->image.dx - info->var.xoffset,
-				 fbcursor->image.dy - info->var.yoffset);
+				       fbcursor->image.dx - info->var.xoffset,
+				       fbcursor->image.dy - info->var.yoffset);
 
 	if (fbcursor->set & FB_CUR_SETCMAP) {
 		/* get the 16bit color of kernel means */
 		u16 fg, bg;
 
 		fg = ((info->cmap.red[fbcursor->image.fg_color] & 0xf800)) |
-		      ((info->cmap.green[fbcursor->image.fg_color] & 0xfc00) >> 5) |
-		      ((info->cmap.blue[fbcursor->image.fg_color] & 0xf800) >> 11);
+		     ((info->cmap.green[fbcursor->image.fg_color] & 0xfc00) >> 5) |
+		     ((info->cmap.blue[fbcursor->image.fg_color] & 0xf800) >> 11);
 
 		bg = ((info->cmap.red[fbcursor->image.bg_color] & 0xf800)) |
-		      ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) |
-		      ((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11);
+		     ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) |
+		     ((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11);
 
 		sm750_hw_cursor_setColor(cursor, fg, bg);
 	}
 
 	if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
 		sm750_hw_cursor_setData(cursor,
-				  fbcursor->rop,
-				  fbcursor->image.data,
-				  fbcursor->mask);
+					fbcursor->rop,
+					fbcursor->image.data,
+					fbcursor->mask);
 	}
 
 	if (fbcursor->enable)
@@ -183,19 +183,19 @@ static void lynxfb_ops_fillrect(struct fb_info *info,
 	rop = (region->rop != ROP_COPY) ? HW_ROP2_XOR : HW_ROP2_COPY;
 
 	/*
-	 * If not use spin_lock,system will die if user load driver
+	 * If not use spin_lock, system will die if user load driver
 	 * and immediately unload driver frequently (dual)
+	 * since they fb_count could change during the lifetime of
+	 * this lock, we are holding it for all cases.
 	 */
-	if (sm750_dev->fb_count > 1)
-		spin_lock(&sm750_dev->slock);
+	spin_lock(&sm750_dev->slock);
 
 	sm750_dev->accel.de_fillrect(&sm750_dev->accel,
 				     base, pitch, Bpp,
 				     region->dx, region->dy,
 				     region->width, region->height,
 				     color, rop);
-	if (sm750_dev->fb_count > 1)
-		spin_unlock(&sm750_dev->slock);
+	spin_unlock(&sm750_dev->slock);
 }
 
 static void lynxfb_ops_copyarea(struct fb_info *info,
@@ -219,17 +219,17 @@ static void lynxfb_ops_copyarea(struct fb_info *info,
 	/*
 	 * If not use spin_lock, system will die if user load driver
 	 * and immediately unload driver frequently (dual)
+	 * since they fb_count could change during the lifetime of
+	 * this lock, we are holding it for all cases.
 	 */
-	if (sm750_dev->fb_count > 1)
-		spin_lock(&sm750_dev->slock);
+	spin_lock(&sm750_dev->slock);
 
 	sm750_dev->accel.de_copyarea(&sm750_dev->accel,
 				     base, pitch, region->sx, region->sy,
 				     base, pitch, Bpp, region->dx, region->dy,
 				     region->width, region->height,
 				     HW_ROP2_COPY);
-	if (sm750_dev->fb_count > 1)
-		spin_unlock(&sm750_dev->slock);
+	spin_unlock(&sm750_dev->slock);
 }
 
 static void lynxfb_ops_imageblit(struct fb_info *info,
@@ -268,9 +268,10 @@ static void lynxfb_ops_imageblit(struct fb_info *info,
 	/*
 	 * If not use spin_lock, system will die if user load driver
 	 * and immediately unload driver frequently (dual)
+	 * since they fb_count could change during the lifetime of
+	 * this lock, we are holding it for all cases.
 	 */
-	if (sm750_dev->fb_count > 1)
-		spin_lock(&sm750_dev->slock);
+	spin_lock(&sm750_dev->slock);
 
 	sm750_dev->accel.de_imageblit(&sm750_dev->accel,
 				      image->data, image->width >> 3, 0,
@@ -278,8 +279,7 @@ static void lynxfb_ops_imageblit(struct fb_info *info,
 				      image->dx, image->dy,
 				      image->width, image->height,
 				      fgcol, bgcol, HW_ROP2_COPY);
-	if (sm750_dev->fb_count > 1)
-		spin_unlock(&sm750_dev->slock);
+	spin_unlock(&sm750_dev->slock);
 }
 
 static int lynxfb_ops_pan_display(struct fb_var_screeninfo *var,
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index 5b186da..4386122 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -189,14 +189,22 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev);
 int hw_sm750_deWait(void);
 int hw_sm750le_deWait(void);
 
-int hw_sm750_output_setMode(struct lynxfb_output*, struct fb_var_screeninfo*,
-			    struct fb_fix_screeninfo*);
-int hw_sm750_crtc_checkMode(struct lynxfb_crtc*, struct fb_var_screeninfo*);
-int hw_sm750_crtc_setMode(struct lynxfb_crtc*, struct fb_var_screeninfo*,
-			  struct fb_fix_screeninfo*);
-int hw_sm750_setColReg(struct lynxfb_crtc*, ushort, ushort, ushort, ushort);
-int hw_sm750_setBLANK(struct lynxfb_output*, int);
-int hw_sm750le_setBLANK(struct lynxfb_output*, int);
+int hw_sm750_output_setMode(struct lynxfb_output *output,
+			    struct fb_var_screeninfo *var,
+			    struct fb_fix_screeninfo *fix);
+
+int hw_sm750_crtc_checkMode(struct lynxfb_crtc *crtc,
+			    struct fb_var_screeninfo *var);
+
+int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
+			  struct fb_var_screeninfo *var,
+			  struct fb_fix_screeninfo *fix);
+
+int hw_sm750_setColReg(struct lynxfb_crtc *crtc, ushort index,
+		       ushort red, ushort green, ushort blue);
+
+int hw_sm750_setBLANK(struct lynxfb_output *output, int blank);
+int hw_sm750le_setBLANK(struct lynxfb_output *output, int blank);
 int hw_sm750_pan_display(struct lynxfb_crtc *crtc,
 			 const struct fb_var_screeninfo *var,
 			 const struct fb_info *info);
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 6be86e4..4b720cf 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -42,10 +42,11 @@ void sm750_hw_de_init(struct lynx_accel *accel)
 	/* dpr1c */
 	reg =  0x3;
 
-	clr = DE_STRETCH_FORMAT_PATTERN_XY | DE_STRETCH_FORMAT_PATTERN_Y_MASK |
-		DE_STRETCH_FORMAT_PATTERN_X_MASK |
-		DE_STRETCH_FORMAT_ADDRESSING_MASK |
-		DE_STRETCH_FORMAT_SOURCE_HEIGHT_MASK;
+	clr = DE_STRETCH_FORMAT_PATTERN_XY |
+	      DE_STRETCH_FORMAT_PATTERN_Y_MASK |
+	      DE_STRETCH_FORMAT_PATTERN_X_MASK |
+	      DE_STRETCH_FORMAT_ADDRESSING_MASK |
+	      DE_STRETCH_FORMAT_SOURCE_HEIGHT_MASK;
 
 	/* DE_STRETCH bpp format need be initialized in setMode routine */
 	write_dpr(accel, DE_STRETCH_FORMAT,
@@ -84,9 +85,9 @@ void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt)
 }
 
 int sm750_hw_fillrect(struct lynx_accel *accel,
-				u32 base, u32 pitch, u32 Bpp,
-				u32 x, u32 y, u32 width, u32 height,
-				u32 color, u32 rop)
+		      u32 base, u32 pitch, u32 Bpp,
+		      u32 x, u32 y, u32 width, u32 height,
+		      u32 color, u32 rop)
 {
 	u32 deCtrl;
 
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index b64dc8a..aa47a16 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -60,15 +60,13 @@ void sm750_hw_cursor_disable(struct lynx_cursor *cursor)
 	poke32(HWC_ADDRESS, 0);
 }
 
-void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
-						int w, int h)
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor, int w, int h)
 {
 	cursor->w = w;
 	cursor->h = h;
 }
 
-void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
-						int x, int y)
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor, int x, int y)
 {
 	u32 reg;
 
@@ -77,8 +75,7 @@ void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
 	poke32(HWC_LOCATION, reg);
 }
 
-void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
-						u32 fg, u32 bg)
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg)
 {
 	u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) &
 		HWC_COLOR_12_2_RGB565_MASK;
@@ -87,8 +84,8 @@ void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
 	poke32(HWC_COLOR_3, 0xffe0);
 }
 
-void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
-			u16 rop, const u8 *pcol, const u8 *pmsk)
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop,
+			     const u8 *pcol, const u8 *pmsk)
 {
 	int i, j, count, pitch, offset;
 	u8 color, mask, opr;
@@ -138,8 +135,8 @@ void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
 }
 
 
-void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
-			u16 rop, const u8 *pcol, const u8 *pmsk)
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop,
+			      const u8 *pcol, const u8 *pmsk)
 {
 	int i, j, count, pitch, offset;
 	u8 color, mask;
diff --git a/drivers/staging/speakup/Makefile b/drivers/staging/speakup/Makefile
index c5e43a5..c864ea6 100644
--- a/drivers/staging/speakup/Makefile
+++ b/drivers/staging/speakup/Makefile
@@ -25,6 +25,7 @@
 	kobjects.o \
 	selection.o \
 	serialio.o \
+	spk_ttyio.o \
 	synth.o \
 	thread.o \
 	varhandlers.o
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index d2ad596..82e5de2 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1945,6 +1945,7 @@ static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
 		goto oops;
 	if (ch == 8) {
 		u16 wch;
+
 		if (num == 0)
 			return -1;
 		wch = goto_buf[--num];
@@ -2287,6 +2288,7 @@ static int vt_notifier_call(struct notifier_block *nb,
 			speakup_bs(vc);
 		} else {
 			u16 d = param->c;
+
 			speakup_con_write(vc, &d, 1);
 		}
 		break;
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index ba060d0..9cfc814 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -28,11 +28,17 @@ static int timeouts;
 static int spk_serial_out(struct spk_synth *in_synth, const char ch);
 static void spk_serial_send_xchar(char ch);
 static void spk_serial_tiocmset(unsigned int set, unsigned int clear);
+static unsigned char spk_serial_in(void);
+static unsigned char spk_serial_in_nowait(void);
+static void spk_serial_flush_buffer(void);
 
 struct spk_io_ops spk_serial_io_ops = {
 	.synth_out = spk_serial_out,
 	.send_xchar = spk_serial_send_xchar,
 	.tiocmset = spk_serial_tiocmset,
+	.synth_in = spk_serial_in,
+	.synth_in_nowait = spk_serial_in_nowait,
+	.flush_buffer = spk_serial_flush_buffer,
 };
 EXPORT_SYMBOL_GPL(spk_serial_io_ops);
 
@@ -132,8 +138,8 @@ static void start_serial_interrupt(int irq)
 	outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2,
 	     speakup_info.port_tts + UART_MCR);
 	/* Turn on Interrupts */
-	outb(UART_IER_MSI|UART_IER_RLSI|UART_IER_RDI,
-			speakup_info.port_tts + UART_IER);
+	outb(UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI,
+	     speakup_info.port_tts + UART_IER);
 	inb(speakup_info.port_tts + UART_LSR);
 	inb(speakup_info.port_tts + UART_RX);
 	inb(speakup_info.port_tts + UART_IIR);
@@ -156,6 +162,7 @@ static void spk_serial_send_xchar(char ch)
 static void spk_serial_tiocmset(unsigned int set, unsigned int clear)
 {
 	int old = inb(speakup_info.port_tts + UART_MCR);
+
 	outb((old & ~clear) | set, speakup_info.port_tts + UART_MCR);
 }
 
@@ -221,7 +228,8 @@ int spk_wait_for_xmitr(struct spk_synth *in_synth)
 	}
 	while (spk_serial_tx_busy()) {
 		if (--tmout == 0) {
-			pr_warn("%s: timed out (tx busy)\n", in_synth->long_name);
+			pr_warn("%s: timed out (tx busy)\n",
+				in_synth->long_name);
 			timeouts++;
 			return 0;
 		}
@@ -240,7 +248,7 @@ int spk_wait_for_xmitr(struct spk_synth *in_synth)
 	return 1;
 }
 
-unsigned char spk_serial_in(void)
+static unsigned char spk_serial_in(void)
 {
 	int tmout = SPK_SERIAL_TIMEOUT;
 
@@ -253,9 +261,8 @@ unsigned char spk_serial_in(void)
 	}
 	return inb_p(speakup_info.port_tts + UART_RX);
 }
-EXPORT_SYMBOL_GPL(spk_serial_in);
 
-unsigned char spk_serial_in_nowait(void)
+static unsigned char spk_serial_in_nowait(void)
 {
 	unsigned char lsr;
 
@@ -264,7 +271,11 @@ unsigned char spk_serial_in_nowait(void)
 		return 0;
 	return inb_p(speakup_info.port_tts + UART_RX);
 }
-EXPORT_SYMBOL_GPL(spk_serial_in_nowait);
+
+static void spk_serial_flush_buffer(void)
+{
+	/* TODO: flush the UART 16550 buffer */
+}
 
 static int spk_serial_out(struct spk_synth *in_synth, const char ch)
 {
@@ -275,7 +286,8 @@ static int spk_serial_out(struct spk_synth *in_synth, const char ch)
 	return 0;
 }
 
-const char *spk_serial_synth_immediate(struct spk_synth *synth, const char *buff)
+const char *spk_serial_synth_immediate(struct spk_synth *synth,
+				       const char *buff)
 {
 	u_char ch;
 
diff --git a/drivers/staging/speakup/serialio.h b/drivers/staging/speakup/serialio.h
index 3ad7ff0..89de6ff 100644
--- a/drivers/staging/speakup/serialio.h
+++ b/drivers/staging/speakup/serialio.h
@@ -8,6 +8,8 @@
 #endif
 #include <linux/serial_core.h>
 
+#include "spk_priv.h"
+
 /*
  * this is cut&paste from 8250.h. Get rid of the structure, the definitions
  * and this whole broken driver.
@@ -21,7 +23,7 @@ struct old_serial_port {
 };
 
 /* countdown values for serial timeouts in us */
-#define SPK_SERIAL_TIMEOUT 100000
+#define SPK_SERIAL_TIMEOUT SPK_SYNTH_TIMEOUT
 /* countdown values transmitter/dsr timeouts in us */
 #define SPK_XMITR_TIMEOUT 100000
 /* countdown values cts timeouts in us */
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index de67ffd..0e10404 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -96,13 +96,14 @@ static struct spk_synth synth_acntsa = {
 	.trigger = 50,
 	.jiffies = 30,
 	.full = 40000,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
-	.io_ops = &spk_serial_io_ops,
+	.io_ops = &spk_ttyio_ops,
 	.probe = synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = spk_do_catch_up,
 	.flush = spk_synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
@@ -125,7 +126,7 @@ static int synth_probe(struct spk_synth *synth)
 {
 	int failed;
 
-	failed = spk_serial_synth_probe(synth);
+	failed = spk_ttyio_synth_probe(synth);
 	if (failed == 0) {
 		synth->synth_immediate(synth, "\033=R\r");
 		mdelay(100);
@@ -135,9 +136,11 @@ static int synth_probe(struct spk_synth *synth)
 }
 
 module_param_named(ser, synth_acntsa.ser, int, 0444);
+module_param_named(dev, synth_acntsa.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_acntsa.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_acntsa);
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index cead8b1..2edb56c 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -22,9 +22,9 @@
 #include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/kthread.h>
+#include <linux/serial_reg.h>	/* for UART_MCR* constants */
 
 #include "spk_priv.h"
-#include "serialio.h"
 #include "speakup.h"
 
 #define DRV_VERSION "2.21"
@@ -105,13 +105,14 @@ static struct spk_synth synth_apollo = {
 	.trigger = 50,
 	.jiffies = 50,
 	.full = 40000,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
-	.io_ops = &spk_serial_io_ops,
-	.probe = spk_serial_synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.io_ops = &spk_ttyio_ops,
+	.probe = spk_ttyio_synth_probe,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = do_catch_up,
 	.flush = spk_synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
@@ -199,9 +200,11 @@ static void do_catch_up(struct spk_synth *synth)
 }
 
 module_param_named(ser, synth_apollo.ser, int, 0444);
+module_param_named(dev, synth_apollo.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_apollo.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_apollo);
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index 6880352..8ae826e 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -20,7 +20,6 @@
  */
 #include "spk_priv.h"
 #include "speakup.h"
-#include "serialio.h"
 
 #define DRV_VERSION "2.11"
 #define SYNTH_CLEAR 0x18 /* flush synth buffer */
@@ -101,13 +100,14 @@ static struct spk_synth synth_audptr = {
 	.trigger = 50,
 	.jiffies = 30,
 	.full = 18000,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
-	.io_ops = &spk_serial_io_ops,
+	.io_ops = &spk_ttyio_ops,
 	.probe = synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = spk_do_catch_up,
 	.flush = synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
@@ -128,6 +128,7 @@ static struct spk_synth synth_audptr = {
 
 static void synth_flush(struct spk_synth *synth)
 {
+	synth->io_ops->flush_buffer();
 	synth->io_ops->send_xchar(SYNTH_CLEAR);
 	synth->io_ops->synth_out(synth, PROCSPEECH);
 }
@@ -138,11 +139,11 @@ static void synth_version(struct spk_synth *synth)
 	char synth_id[40] = "";
 
 	synth->synth_immediate(synth, "\x05[Q]");
-	synth_id[test] = spk_serial_in();
+	synth_id[test] = synth->io_ops->synth_in();
 	if (synth_id[test] == 'A') {
 		do {
 			/* read version string from synth */
-			synth_id[++test] = spk_serial_in();
+			synth_id[++test] = synth->io_ops->synth_in();
 		} while (synth_id[test] != '\n' && test < 32);
 		synth_id[++test] = 0x00;
 	}
@@ -154,7 +155,7 @@ static int synth_probe(struct spk_synth *synth)
 {
 	int failed;
 
-	failed = spk_serial_synth_probe(synth);
+	failed = spk_ttyio_synth_probe(synth);
 	if (failed == 0)
 		synth_version(synth);
 	synth->alive = !failed;
@@ -162,9 +163,11 @@ static int synth_probe(struct spk_synth *synth)
 }
 
 module_param_named(ser, synth_audptr.ser, int, 0444);
+module_param_named(dev, synth_audptr.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_audptr.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_audptr);
diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c
index a972a51..60bcf0d 100644
--- a/drivers/staging/speakup/speakup_bns.c
+++ b/drivers/staging/speakup/speakup_bns.c
@@ -93,13 +93,14 @@ static struct spk_synth synth_bns = {
 	.trigger = 50,
 	.jiffies = 50,
 	.full = 40000,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
-	.io_ops = &spk_serial_io_ops,
-	.probe = spk_serial_synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.io_ops = &spk_ttyio_ops,
+	.probe = spk_ttyio_synth_probe,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = spk_do_catch_up,
 	.flush = spk_synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
@@ -119,9 +120,11 @@ static struct spk_synth synth_bns = {
 };
 
 module_param_named(ser, synth_bns.ser, int, 0444);
+module_param_named(dev, synth_bns.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_bns.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_bns);
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index c564bf8..95f4b21 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -24,26 +24,22 @@
 #include <linux/kthread.h>
 
 #include "spk_priv.h"
-#include "serialio.h"
 #include "speakup.h"
 
 #define DRV_VERSION "2.14"
 #define SYNTH_CLEAR 0x03
 #define PROCSPEECH 0x0b
-static unsigned char last_char;
 
-static inline u_char get_last_char(void)
+static volatile unsigned char last_char;
+
+static void read_buff_add(u_char ch)
 {
-	u_char avail = inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR;
-
-	if (avail)
-		last_char = inb_p(speakup_info.port_tts + UART_RX);
-	return last_char;
+	last_char = ch;
 }
 
 static inline bool synth_full(void)
 {
-	return get_last_char() == 0x13;
+	return last_char == 0x13;
 }
 
 static void do_catch_up(struct spk_synth *synth);
@@ -124,18 +120,19 @@ static struct spk_synth synth_decext = {
 	.jiffies = 50,
 	.full = 40000,
 	.flags = SF_DEC,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
-	.io_ops = &spk_serial_io_ops,
-	.probe = spk_serial_synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.io_ops = &spk_ttyio_ops,
+	.probe = spk_ttyio_synth_probe,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = do_catch_up,
 	.flush = synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
 	.synth_adjust = NULL,
-	.read_buff_add = NULL,
+	.read_buff_add = read_buff_add,
 	.get_index = NULL,
 	.indexing = {
 		.command = NULL,
@@ -225,13 +222,16 @@ static void do_catch_up(struct spk_synth *synth)
 static void synth_flush(struct spk_synth *synth)
 {
 	in_escape = 0;
+	synth->io_ops->flush_buffer();
 	synth->synth_immediate(synth, "\033P;10z\033\\");
 }
 
 module_param_named(ser, synth_decext.ser, int, 0444);
+module_param_named(dev, synth_decext.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_decext.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_decext);
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 5d22c3b..7a8df7d 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -84,7 +84,7 @@
 #define	CTRL_last_index		0x0b00	/* get last index spoken */
 #define	CTRL_io_priority	0x0c00	/* change i/o priority */
 #define	CTRL_free_mem		0x0d00	/* get free paragraphs on module */
-#define	CTRL_get_lang		0x0e00	/* return bit mask of loaded languages */
+#define	CTRL_get_lang		0x0e00	/* return bitmask of loaded languages */
 #define	CMD_test		0x2000	/* self-test request */
 #define	TEST_mask		0x0F00	/* isolate test field */
 #define	TEST_null		0x0000	/* no test requested */
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index 0cdbd5e..f069954 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -27,7 +27,6 @@
 #include <linux/kthread.h>
 #include "speakup.h"
 #include "spk_priv.h"
-#include "serialio.h"
 
 #define DRV_VERSION "2.20"
 #define SYNTH_CLEAR 0x03
@@ -42,7 +41,7 @@ static inline int synth_full(void)
 static void do_catch_up(struct spk_synth *synth);
 static void synth_flush(struct spk_synth *synth);
 static void read_buff_add(u_char c);
-static unsigned char get_index(void);
+static unsigned char get_index(struct spk_synth *synth);
 
 static int in_escape;
 static int is_flushing;
@@ -125,15 +124,16 @@ static struct spk_synth synth_dectlk = {
 	.trigger = 50,
 	.jiffies = 50,
 	.full = 40000,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
 	.default_pitch = ap_defaults,
 	.default_vol = g5_defaults,
-	.io_ops = &spk_serial_io_ops,
-	.probe = spk_serial_synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.io_ops = &spk_ttyio_ops,
+	.probe = spk_ttyio_synth_probe,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = do_catch_up,
 	.flush = synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
@@ -163,7 +163,7 @@ static int is_indnum(u_char *ch)
 
 static u_char lastind;
 
-static unsigned char get_index(void)
+static unsigned char get_index(struct spk_synth *synth)
 {
 	u_char rv;
 
@@ -294,13 +294,16 @@ static void synth_flush(struct spk_synth *synth)
 		synth->io_ops->synth_out(synth, ']');
 	in_escape = 0;
 	is_flushing = 1;
+	synth->io_ops->flush_buffer();
 	synth->io_ops->synth_out(synth, SYNTH_CLEAR);
 }
 
 module_param_named(ser, synth_dectlk.ser, int, 0444);
+module_param_named(dev, synth_dectlk.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_dectlk.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_dectlk);
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index 3318093..8999e3e 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -138,7 +138,7 @@ static struct spk_synth synth_dtlk = {
 	.is_alive = spk_synth_is_alive_nop,
 	.synth_adjust = NULL,
 	.read_buff_add = NULL,
-	.get_index = spk_serial_in_nowait,
+	.get_index = spk_synth_get_index,
 	.indexing = {
 		.command = "\x01%di",
 		.lowindex = 1,
diff --git a/drivers/staging/speakup/speakup_dtlk.h b/drivers/staging/speakup/speakup_dtlk.h
index 46d885f..51ac0f2 100644
--- a/drivers/staging/speakup/speakup_dtlk.h
+++ b/drivers/staging/speakup/speakup_dtlk.h
@@ -24,11 +24,11 @@
 				 * usec later.
 				 */
 #define TTS_ALMOST_FULL	0x08	/* mask for AF bit: When set to 1,
-					 * indicates that less than 300 bytes
-					 * are available in the TTS input
-					 * buffer. AF is always 0 in the PCM,
-					 * TGN and CVSD modes.
-					 */
+				 * indicates that less than 300 bytes
+				 * are available in the TTS input
+				 * buffer. AF is always 0 in the PCM,
+				 * TGN and CVSD modes.
+				 */
 #define TTS_ALMOST_EMPTY 0x04	/* mask for AE bit: When set to 1,
 				 * indicates that less than 300 bytes
 				 * are remaining in DoubleTalk's input
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index 8db7aa3..851953d 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -95,13 +95,14 @@ static struct spk_synth synth_dummy = {
 	.trigger = 50,
 	.jiffies = 50,
 	.full = 40000,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
-	.io_ops = &spk_serial_io_ops,
-	.probe = spk_serial_synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.io_ops = &spk_ttyio_ops,
+	.probe = spk_ttyio_synth_probe,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = spk_do_catch_up,
 	.flush = spk_synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
@@ -121,9 +122,11 @@ static struct spk_synth synth_dummy = {
 };
 
 module_param_named(ser, synth_dummy.ser, int, 0444);
+module_param_named(dev, synth_dummy.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_dummy.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_dummy);
diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c
index 11275f4..423795f 100644
--- a/drivers/staging/speakup/speakup_ltlk.c
+++ b/drivers/staging/speakup/speakup_ltlk.c
@@ -20,7 +20,6 @@
  */
 #include "speakup.h"
 #include "spk_priv.h"
-#include "serialio.h"
 #include "speakup_dtlk.h" /* local header file for LiteTalk values */
 
 #define DRV_VERSION "2.11"
@@ -108,19 +107,20 @@ static struct spk_synth synth_ltlk = {
 	.trigger = 50,
 	.jiffies = 50,
 	.full = 40000,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
-	.io_ops = &spk_serial_io_ops,
+	.io_ops = &spk_ttyio_ops,
 	.probe = synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = spk_do_catch_up,
 	.flush = spk_synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
 	.synth_adjust = NULL,
 	.read_buff_add = NULL,
-	.get_index = spk_serial_in_nowait,
+	.get_index = spk_synth_get_index,
 	.indexing = {
 		.command = "\x01%di",
 		.lowindex = 1,
@@ -141,7 +141,7 @@ static void synth_interrogate(struct spk_synth *synth)
 
 	synth->synth_immediate(synth, "\x18\x01?");
 	for (i = 0; i < 50; i++) {
-		buf[i] = spk_serial_in();
+		buf[i] = synth->io_ops->synth_in();
 		if (i > 2 && buf[i] == 0x7f)
 			break;
 	}
@@ -159,7 +159,7 @@ static int synth_probe(struct spk_synth *synth)
 {
 	int failed = 0;
 
-	failed = spk_serial_synth_probe(synth);
+	failed = spk_ttyio_synth_probe(synth);
 	if (failed == 0)
 		synth_interrogate(synth);
 	synth->alive = !failed;
@@ -167,9 +167,11 @@ static int synth_probe(struct spk_synth *synth)
 }
 
 module_param_named(ser, synth_ltlk.ser, int, 0444);
+module_param_named(dev, synth_ltlk.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_ltlk.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_ltlk);
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index e454f56..d99daf6 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -36,7 +36,7 @@
 static int softsynth_probe(struct spk_synth *synth);
 static void softsynth_release(void);
 static int softsynth_is_alive(struct spk_synth *synth);
-static unsigned char get_index(void);
+static unsigned char get_index(struct spk_synth *synth);
 
 static struct miscdevice synth_device, synthu_device;
 static int init_pos;
@@ -340,7 +340,7 @@ static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wa
 	return ret;
 }
 
-static unsigned char get_index(void)
+static unsigned char get_index(struct spk_synth *synth)
 {
 	int rv;
 
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index d95c375..9ca21ed 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -20,7 +20,6 @@
  */
 #include "spk_priv.h"
 #include "speakup.h"
-#include "serialio.h"
 
 #define DRV_VERSION "2.11"
 #define SYNTH_CLEAR 0x18
@@ -99,19 +98,20 @@ static struct spk_synth synth_spkout = {
 	.trigger = 50,
 	.jiffies = 50,
 	.full = 40000,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
-	.io_ops = &spk_serial_io_ops,
-	.probe = spk_serial_synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.io_ops = &spk_ttyio_ops,
+	.probe = spk_ttyio_synth_probe,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = spk_do_catch_up,
 	.flush = synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
 	.synth_adjust = NULL,
 	.read_buff_add = NULL,
-	.get_index = spk_serial_in_nowait,
+	.get_index = spk_synth_get_index,
 	.indexing = {
 		.command = "\x05[%c",
 		.lowindex = 1,
@@ -126,13 +126,16 @@ static struct spk_synth synth_spkout = {
 
 static void synth_flush(struct spk_synth *synth)
 {
+	synth->io_ops->flush_buffer();
 	synth->io_ops->send_xchar(SYNTH_CLEAR);
 }
 
 module_param_named(ser, synth_spkout.ser, int, 0444);
+module_param_named(dev, synth_spkout.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_spkout.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_spkout);
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index 3f531fb..831ee40 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -92,13 +92,14 @@ static struct spk_synth synth_txprt = {
 	.trigger = 50,
 	.jiffies = 50,
 	.full = 40000,
+	.dev_name = SYNTH_DEFAULT_DEV,
 	.startup = SYNTH_START,
 	.checkval = SYNTH_CHECK,
 	.vars = vars,
-	.io_ops = &spk_serial_io_ops,
-	.probe = spk_serial_synth_probe,
-	.release = spk_serial_release,
-	.synth_immediate = spk_serial_synth_immediate,
+	.io_ops = &spk_ttyio_ops,
+	.probe = spk_ttyio_synth_probe,
+	.release = spk_ttyio_release,
+	.synth_immediate = spk_ttyio_synth_immediate,
 	.catch_up = spk_do_catch_up,
 	.flush = spk_synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
@@ -118,9 +119,11 @@ static struct spk_synth synth_txprt = {
 };
 
 module_param_named(ser, synth_txprt.ser, int, 0444);
+module_param_named(dev, synth_txprt.dev_name, charp, S_IRUGO);
 module_param_named(start, synth_txprt.startup, short, 0444);
 
 MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
+MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer.");
 MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
 
 module_spk_synth(synth_txprt);
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 995f586..87b6a0a 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -39,13 +39,15 @@
 #endif
 
 #define KT_SPKUP 15
+#define SPK_SYNTH_TIMEOUT 100000 /* in micro-seconds */
+#define SYNTH_DEFAULT_DEV "ttyS0"
+#define SYNTH_DEFAULT_SER 0
 
 const struct old_serial_port *spk_serial_init(int index);
 void spk_stop_serial_interrupt(void);
 int spk_wait_for_xmitr(struct spk_synth *in_synth);
-unsigned char spk_serial_in(void);
-unsigned char spk_serial_in_nowait(void);
 void spk_serial_release(void);
+void spk_ttyio_release(void);
 
 void synth_buffer_skip_nonlatin1(void);
 u16 synth_buffer_getc(void);
@@ -58,9 +60,12 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
 		      const char *buf, size_t count);
 
 int spk_serial_synth_probe(struct spk_synth *synth);
+int spk_ttyio_synth_probe(struct spk_synth *synth);
 const char *spk_serial_synth_immediate(struct spk_synth *synth, const char *buff);
+const char *spk_ttyio_synth_immediate(struct spk_synth *synth, const char *buff);
 void spk_do_catch_up(struct spk_synth *synth);
 void spk_synth_flush(struct spk_synth *synth);
+unsigned char spk_synth_get_index(struct spk_synth *synth);
 int spk_synth_is_alive_nop(struct spk_synth *synth);
 int spk_synth_is_alive_restart(struct spk_synth *synth);
 __printf(1, 2)
@@ -79,5 +84,6 @@ extern struct speakup_info_t speakup_info;
 extern struct var_t synth_time_vars[];
 
 extern struct spk_io_ops spk_serial_io_ops;
+extern struct spk_io_ops spk_ttyio_ops;
 
 #endif
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
new file mode 100644
index 0000000..ed8e96b
--- /dev/null
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -0,0 +1,320 @@
+#include <linux/types.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/slab.h>
+
+#include "speakup.h"
+#include "spk_types.h"
+#include "spk_priv.h"
+
+#define DEV_PREFIX_LP "lp"
+
+static const char * const lp_supported[] = { "acntsa", "bns", "dummy",
+	"txprt" };
+
+struct spk_ldisc_data {
+	char buf;
+	struct semaphore sem;
+	bool buf_free;
+};
+
+static struct spk_synth *spk_ttyio_synth;
+static struct tty_struct *speakup_tty;
+
+static int ser_to_dev(int ser, dev_t *dev_no)
+{
+	if (ser < 0 || ser > (255 - 64)) {
+		pr_err("speakup: Invalid ser param. Must be between 0 and 191 inclusive.\n");
+		return -EINVAL;
+	}
+
+	*dev_no = MKDEV(4, (64 + ser));
+	return 0;
+}
+
+static int get_dev_to_use(struct spk_synth *synth, dev_t *dev_no)
+{
+	/* use ser only when dev is not specified */
+	if (strcmp(synth->dev_name, SYNTH_DEFAULT_DEV) ||
+	    synth->ser == SYNTH_DEFAULT_SER) {
+		/* for /dev/lp* check if synth is supported */
+		if (strncmp(synth->dev_name, DEV_PREFIX_LP,
+		    strlen(DEV_PREFIX_LP)) == 0)
+			if (match_string(lp_supported, ARRAY_SIZE(lp_supported),
+			    synth->name) < 0)  {
+				int i;
+
+				pr_err("speakup: lp* is only supported on:");
+				for (i = 0; i < ARRAY_SIZE(lp_supported); i++)
+					pr_cont(" %s", lp_supported[i]);
+				pr_cont("\n");
+
+				return -ENOTSUPP;
+			}
+
+		return tty_dev_name_to_number(synth->dev_name, dev_no);
+	}
+
+	return ser_to_dev(synth->ser, dev_no);
+}
+
+static int spk_ttyio_ldisc_open(struct tty_struct *tty)
+{
+	struct spk_ldisc_data *ldisc_data;
+
+	if (tty->ops->write == NULL)
+		return -EOPNOTSUPP;
+	speakup_tty = tty;
+
+	ldisc_data = kmalloc(sizeof(struct spk_ldisc_data), GFP_KERNEL);
+	if (!ldisc_data) {
+		pr_err("speakup: Failed to allocate ldisc_data.\n");
+		return -ENOMEM;
+	}
+
+	sema_init(&ldisc_data->sem, 0);
+	ldisc_data->buf_free = true;
+	speakup_tty->disc_data = ldisc_data;
+
+	return 0;
+}
+
+static void spk_ttyio_ldisc_close(struct tty_struct *tty)
+{
+	kfree(speakup_tty->disc_data);
+	speakup_tty = NULL;
+}
+
+static int spk_ttyio_receive_buf2(struct tty_struct *tty,
+		const unsigned char *cp, char *fp, int count)
+{
+	struct spk_ldisc_data *ldisc_data = tty->disc_data;
+
+	if (spk_ttyio_synth->read_buff_add) {
+		int i;
+
+		for (i = 0; i < count; i++)
+			spk_ttyio_synth->read_buff_add(cp[i]);
+
+		return count;
+	}
+
+	if (!ldisc_data->buf_free)
+		/* ttyio_in will tty_schedule_flip */
+		return 0;
+
+	/* Make sure the consumer has read buf before we have seen
+	 * buf_free == true and overwrite buf */
+	mb();
+
+	ldisc_data->buf = cp[0];
+	ldisc_data->buf_free = false;
+	up(&ldisc_data->sem);
+
+	return 1;
+}
+
+static struct tty_ldisc_ops spk_ttyio_ldisc_ops = {
+	.owner          = THIS_MODULE,
+	.magic          = TTY_LDISC_MAGIC,
+	.name           = "speakup_ldisc",
+	.open           = spk_ttyio_ldisc_open,
+	.close          = spk_ttyio_ldisc_close,
+	.receive_buf2	= spk_ttyio_receive_buf2,
+};
+
+static int spk_ttyio_out(struct spk_synth *in_synth, const char ch);
+static void spk_ttyio_send_xchar(char ch);
+static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear);
+static unsigned char spk_ttyio_in(void);
+static unsigned char spk_ttyio_in_nowait(void);
+static void spk_ttyio_flush_buffer(void);
+
+struct spk_io_ops spk_ttyio_ops = {
+	.synth_out = spk_ttyio_out,
+	.send_xchar = spk_ttyio_send_xchar,
+	.tiocmset = spk_ttyio_tiocmset,
+	.synth_in = spk_ttyio_in,
+	.synth_in_nowait = spk_ttyio_in_nowait,
+	.flush_buffer = spk_ttyio_flush_buffer,
+};
+EXPORT_SYMBOL_GPL(spk_ttyio_ops);
+
+static inline void get_termios(struct tty_struct *tty, struct ktermios *out_termios)
+{
+	down_read(&tty->termios_rwsem);
+	*out_termios = tty->termios;
+	up_read(&tty->termios_rwsem);
+}
+
+static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
+{
+	int ret = 0;
+	struct tty_struct *tty;
+	struct ktermios tmp_termios;
+	dev_t dev;
+
+	ret = tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops);
+	if (ret) {
+		pr_err("Error registering line discipline.\n");
+		return ret;
+	}
+
+	ret = get_dev_to_use(synth, &dev);
+	if (ret)
+		return ret;
+
+	tty = tty_open_by_driver(dev, NULL, NULL);
+	if (IS_ERR(tty))
+		return PTR_ERR(tty);
+
+	if (tty->ops->open)
+		ret = tty->ops->open(tty, NULL);
+	else
+		ret = -ENODEV;
+
+	if (ret) {
+		tty_unlock(tty);
+		return ret;
+	}
+
+	clear_bit(TTY_HUPPED, &tty->flags);
+	/* ensure hardware flow control is enabled */
+	get_termios(tty, &tmp_termios);
+	if (!(tmp_termios.c_cflag & CRTSCTS)) {
+		tmp_termios.c_cflag |= CRTSCTS;
+		tty_set_termios(tty, &tmp_termios);
+		/*
+		 * check c_cflag to see if it's updated as tty_set_termios may not return
+		 * error even when no tty bits are changed by the request.
+		 */
+		get_termios(tty, &tmp_termios);
+		if (!(tmp_termios.c_cflag & CRTSCTS))
+			pr_warn("speakup: Failed to set hardware flow control\n");
+	}
+
+	tty_unlock(tty);
+
+	ret = tty_set_ldisc(tty, N_SPEAKUP);
+
+	return ret;
+}
+
+static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
+{
+	if (in_synth->alive && speakup_tty && speakup_tty->ops->write) {
+		int ret = speakup_tty->ops->write(speakup_tty, &ch, 1);
+
+		if (ret == 0)
+			/* No room */
+			return 0;
+		if (ret < 0) {
+			pr_warn("%s: I/O error, deactivating speakup\n", in_synth->long_name);
+			/* No synth any more, so nobody will restart TTYs, and we thus
+			 * need to do it ourselves.  Now that there is no synth we can
+			 * let application flood anyway
+			 */
+			in_synth->alive = 0;
+			speakup_start_ttys();
+			return 0;
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static void spk_ttyio_send_xchar(char ch)
+{
+	speakup_tty->ops->send_xchar(speakup_tty, ch);
+}
+
+static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
+{
+	speakup_tty->ops->tiocmset(speakup_tty, set, clear);
+}
+
+static unsigned char ttyio_in(int timeout)
+{
+	struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data;
+	char rv;
+
+	if (down_timeout(&ldisc_data->sem, usecs_to_jiffies(timeout)) == -ETIME) {
+		if (timeout)
+			pr_warn("spk_ttyio: timeout (%d)  while waiting for input\n",
+				timeout);
+		return 0xff;
+	}
+
+	rv = ldisc_data->buf;
+	/* Make sure we have read buf before we set buf_free to let
+	 * the producer overwrite it */
+	mb();
+	ldisc_data->buf_free = true;
+	/* Let TTY push more characters */
+	tty_schedule_flip(speakup_tty->port);
+
+	return rv;
+}
+
+static unsigned char spk_ttyio_in(void)
+{
+	return ttyio_in(SPK_SYNTH_TIMEOUT);
+}
+
+static unsigned char spk_ttyio_in_nowait(void)
+{
+	u8 rv = ttyio_in(0);
+
+	return (rv == 0xff) ? 0 : rv;
+}
+
+static void spk_ttyio_flush_buffer(void)
+{
+	if (speakup_tty->ops->flush_buffer)
+		speakup_tty->ops->flush_buffer(speakup_tty);
+}
+
+int spk_ttyio_synth_probe(struct spk_synth *synth)
+{
+	int rv = spk_ttyio_initialise_ldisc(synth);
+
+	if (rv)
+		return rv;
+
+	synth->alive = 1;
+	spk_ttyio_synth = synth;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(spk_ttyio_synth_probe);
+
+void spk_ttyio_release(void)
+{
+	if (!speakup_tty)
+		return;
+
+	tty_lock(speakup_tty);
+
+	if (speakup_tty->ops->close)
+		speakup_tty->ops->close(speakup_tty, NULL);
+
+	tty_ldisc_flush(speakup_tty);
+	tty_unlock(speakup_tty);
+	tty_ldisc_release(speakup_tty);
+}
+EXPORT_SYMBOL_GPL(spk_ttyio_release);
+
+const char *spk_ttyio_synth_immediate(struct spk_synth *synth, const char *buff)
+{
+	u_char ch;
+
+	while ((ch = *buff)) {
+		if (ch == '\n')
+			ch = synth->procspeech;
+		if (tty_write_room(speakup_tty) < 1 || !synth->io_ops->synth_out(synth, ch))
+			return buff;
+		buff++;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(spk_ttyio_synth_immediate);
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index c156975..22f657d 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -152,6 +152,9 @@ struct spk_io_ops {
 	int (*synth_out)(struct spk_synth *synth, const char ch);
 	void (*send_xchar)(char ch);
 	void (*tiocmset)(unsigned int set, unsigned int clear);
+	unsigned char (*synth_in)(void);
+	unsigned char (*synth_in_nowait)(void);
+	void (*flush_buffer)(void);
 };
 
 struct spk_synth {
@@ -166,6 +169,7 @@ struct spk_synth {
 	int jiffies;
 	int full;
 	int ser;
+	char *dev_name;
 	short flags;
 	short startup;
 	const int checkval; /* for validating a proper synth module */
@@ -182,7 +186,7 @@ struct spk_synth {
 	int (*is_alive)(struct spk_synth *synth);
 	int (*synth_adjust)(struct st_var_header *var);
 	void (*read_buff_add)(u_char);
-	unsigned char (*get_index)(void);
+	unsigned char (*get_index)(struct spk_synth *synth);
 	struct synth_indexing indexing;
 	int alive;
 	struct attribute_group attributes;
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 352e9ee..a1ca68c 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -120,10 +120,17 @@ EXPORT_SYMBOL_GPL(spk_do_catch_up);
 
 void spk_synth_flush(struct spk_synth *synth)
 {
+	synth->io_ops->flush_buffer();
 	synth->io_ops->synth_out(synth, synth->clear);
 }
 EXPORT_SYMBOL_GPL(spk_synth_flush);
 
+unsigned char spk_synth_get_index(struct spk_synth *synth)
+{
+	return synth->io_ops->synth_in_nowait();
+}
+EXPORT_SYMBOL_GPL(spk_synth_get_index);
+
 int spk_synth_is_alive_nop(struct spk_synth *synth)
 {
 	synth->alive = 1;
@@ -249,7 +256,7 @@ void spk_reset_index_count(int sc)
 	if (first)
 		first = 0;
 	else
-		synth->get_index();
+		synth->get_index(synth);
 	index_count = 0;
 	sentence_count = sc;
 }
@@ -282,7 +289,7 @@ void synth_insert_next_index(int sent_num)
 
 void spk_get_index_count(int *linecount, int *sentcount)
 {
-	int ind = synth->get_index();
+	int ind = synth->get_index(synth);
 
 	if (ind) {
 		sentence_count = ind % 10;
@@ -438,10 +445,15 @@ int synth_add(struct spk_synth *in_synth)
 		mutex_unlock(&spk_mutex);
 		return -1;
 	}
-	synths[i++] = in_synth;
-	synths[i] = NULL;
+
 	if (in_synth->startup)
 		status = do_synth_init(in_synth);
+
+	if (!status) {
+		synths[i++] = in_synth;
+		synths[i] = NULL;
+	}
+
 	mutex_unlock(&spk_mutex);
 	return status;
 }
diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/staging/typec/fusb302/fusb302.c
index 4a356e5..03a3809 100644
--- a/drivers/staging/typec/fusb302/fusb302.c
+++ b/drivers/staging/typec/fusb302/fusb302.c
@@ -1039,8 +1039,8 @@ static int fusb302_pd_send_message(struct fusb302_chip *chip,
 	}
 	/* packsym tells the FUSB302 chip that the next X bytes are payload */
 	buf[pos++] = FUSB302_TKN_PACKSYM | (len & 0x1F);
-	buf[pos++] = msg->header & 0xFF;
-	buf[pos++] = (msg->header >> 8) & 0xFF;
+	memcpy(&buf[pos], &msg->header, sizeof(msg->header));
+	pos += sizeof(msg->header);
 
 	len -= 2;
 	memcpy(&buf[pos], msg->payload, len);
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
index 1146c1c..e0466bf 100644
--- a/drivers/staging/unisys/Documentation/overview.txt
+++ b/drivers/staging/unisys/Documentation/overview.txt
@@ -221,7 +221,7 @@
 
 The visorhba driver registers with visorbus as the function driver to
 handle virtual scsi disk devices, specified using the
-SPAR_VHBA_CHANNEL_PROTOCOL_UUID type in the visorbus_register_visor_driver()
+VISOR_VHBA_CHANNEL_UUID type in the visorbus_register_visor_driver()
 call. visorhba uses scsi_add_host() to expose a Linux block device
 (e.g., /sys/block/) in the guest environment for each s-Par virtual device.
 
@@ -240,7 +240,7 @@
 standard udev/systemd environments, as it includes the modules.alias
 definition:
 
-    "visorbus:"+SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR
+    "visorbus:"+VISOR_VHBA_CHANNEL_UUID_STR
 
 i.e.:
 
@@ -252,7 +252,7 @@
 
 The visornic driver registers with visorbus as the function driver to
 handle virtual network devices, specified using the
-SPAR_VNIC_CHANNEL_PROTOCOL_UUID type in the visorbus_register_visor_driver()
+VISOR_VNIC_CHANNEL_UUID type in the visorbus_register_visor_driver()
 call. visornic uses register_netdev() to expose a Linux device of class net
 (e.g., /sys/class/net/) in the guest environment for each s-Par virtual
 device.
@@ -270,7 +270,7 @@
 standard udev/systemd environments, as it includes the modules.alias
 definition:
 
-    "visorbus:"+SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR
+    "visorbus:"+VISOR_VNIC_CHANNEL_UUID_STR
 
 i.e.:
 
@@ -282,7 +282,7 @@
 
 The visorinput driver registers with visorbus as the function driver to
 handle human input devices, specified using the
-SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID and SPAR_MOUSE_CHANNEL_PROTOCOL_UUID
+VISOR_KEYBOARD_CHANNEL_UUID and VISOR_MOUSE_CHANNEL_UUID
 types in the visorbus_register_visor_driver() call. visorinput uses
 input_register_device() to expose devices of class input
 (e.g., /sys/class/input/) for virtual keyboard and virtual mouse devices.
@@ -307,8 +307,8 @@
 standard udev/systemd environments, as it includes the modules.alias
 definition:
 
-    "visorbus:"+SPAR_MOUSE_CHANNEL_PROTOCOL_UUID_STR
-    "visorbus:"+SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID_STR
+    "visorbus:"+VISOR_MOUSE_CHANNEL_UUID_STR
+    "visorbus:"+VISOR_KEYBOARD_CHANNEL_UUID_STR
 
 i.e.:
 
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/channel.h
index 057421e..692efcb 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/channel.h
@@ -32,7 +32,7 @@
 #define COVER(v, d) ((d) * DIV_ROUND_UP(v, d))
 #endif
 
-#define ULTRA_CHANNEL_PROTOCOL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
+#define VISOR_CHANNEL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
 
 enum channel_serverstate {
 	CHANNELSRV_UNINITIALIZED = 0,	/* channel is in an undefined state */
@@ -59,10 +59,10 @@ enum channel_clientstate {
 				/* access channel anytime */
 };
 
-#define SPAR_CHANNEL_SERVER_READY(ch) \
+#define VISOR_CHANNEL_SERVER_READY(ch) \
 	(readl(&(ch)->srv_state) == CHANNELSRV_READY)
 
-#define ULTRA_VALID_CHANNELCLI_TRANSITION(o, n) \
+#define VISOR_VALID_CHANNELCLI_TRANSITION(o, n) \
 	(((((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_DISABLED)) || \
 	  (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DISABLED)) || \
 	  (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DISABLED)) || \
@@ -80,33 +80,33 @@ enum channel_clientstate {
 	  (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_OWNED)) || (0)) \
 	 ? (1) : (0))
 
-/* Values for ULTRA_CHANNEL_PROTOCOL.CliErrorBoot: */
+/* Values for VISORA_CHANNEL_PROTOCOL.CliErrorBoot: */
 /* throttling invalid boot channel statetransition error due to client
  * disabled
  */
-#define ULTRA_CLIERRORBOOT_THROTTLEMSG_DISABLED 0x01
+#define VISOR_CLIERRORBOOT_THROTTLEMSG_DISABLED 0x01
 
 /* throttling invalid boot channel statetransition error due to client
  * not attached
  */
-#define ULTRA_CLIERRORBOOT_THROTTLEMSG_NOTATTACHED 0x02
+#define VISOR_CLIERRORBOOT_THROTTLEMSG_NOTATTACHED 0x02
 
 /* throttling invalid boot channel statetransition error due to busy channel */
-#define ULTRA_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04
+#define VISOR_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04
 
-/* Values for ULTRA_CHANNEL_PROTOCOL.Features: This define exists so
+/* Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so
  * that windows guest can look at the FeatureFlags in the io channel,
  * and configure the windows driver to use interrupts or not based on
  * this setting.  This flag is set in uislib after the
- * ULTRA_VHBA_init_channel is called.  All feature bits for all
+ * VISOR_VHBA_init_channel is called.  All feature bits for all
  * channels should be defined here.  The io channel feature bits are
  * defined right here
  */
-#define ULTRA_IO_DRIVER_ENABLES_INTS (0x1ULL << 1)
-#define ULTRA_IO_CHANNEL_IS_POLLING (0x1ULL << 3)
-#define ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS (0x1ULL << 4)
-#define ULTRA_IO_DRIVER_DISABLES_INTS (0x1ULL << 5)
-#define ULTRA_IO_DRIVER_SUPPORTS_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
+#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1)
+#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3)
+#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4)
+#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5)
+#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
 
 /* Common Channel Header */
 struct channel_header {
@@ -156,7 +156,7 @@ struct channel_header {
 	u8 recover_channel;
 } __packed;
 
-#define ULTRA_CHANNEL_ENABLE_INTS (0x1ULL << 0)
+#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0)
 
 /* Subheader for the Signal Type variation of the Common Channel */
 struct signal_queue_header {
@@ -204,12 +204,12 @@ struct signal_queue_header {
  * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
  */
 static inline int
-spar_check_channel(struct channel_header *ch,
-		   uuid_le expected_uuid,
-		   char *chname,
-		   u64 expected_min_bytes,
-		   u32 expected_version,
-		   u64 expected_signature)
+visor_check_channel(struct channel_header *ch,
+		    uuid_le expected_uuid,
+		    char *chname,
+		    u64 expected_min_bytes,
+		    u32 expected_version,
+		    u64 expected_signature)
 {
 	if (uuid_le_cmp(expected_uuid, NULL_UUID_LE) != 0) {
 		/* caller wants us to verify type GUID */
@@ -254,27 +254,25 @@ spar_check_channel(struct channel_header *ch,
  */
 
 /* {414815ed-c58c-11da-95a9-00e08161165f} */
-#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID \
+#define VISOR_VHBA_CHANNEL_UUID \
 	UUID_LE(0x414815ed, 0xc58c, 0x11da, \
 		0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le spar_vhba_channel_protocol_uuid =
-	SPAR_VHBA_CHANNEL_PROTOCOL_UUID;
-#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR \
+static const uuid_le visor_vhba_channel_uuid = VISOR_VHBA_CHANNEL_UUID;
+#define VISOR_VHBA_CHANNEL_UUID_STR \
 	"414815ed-c58c-11da-95a9-00e08161165f"
 
 /* {8cd5994d-c58e-11da-95a9-00e08161165f} */
-#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID \
+#define VISOR_VNIC_CHANNEL_UUID \
 	UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
 		0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le spar_vnic_channel_protocol_uuid =
-	SPAR_VNIC_CHANNEL_PROTOCOL_UUID;
-#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR \
+static const uuid_le visor_vnic_channel_uuid = VISOR_VNIC_CHANNEL_UUID;
+#define VISOR_VNIC_CHANNEL_UUID_STR \
 	"8cd5994d-c58e-11da-95a9-00e08161165f"
 
 /* {72120008-4AAB-11DC-8530-444553544200} */
-#define SPAR_SIOVM_UUID \
+#define VISOR_SIOVM_UUID \
 	UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
 		0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
-static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID;
+static const uuid_le visor_siovm_uuid = VISOR_SIOVM_UUID;
 
 #endif
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 9bde848..c7cb3fb 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -34,10 +34,9 @@
 #include <linux/dma-direction.h>
 #include "channel.h"
 
-#define ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
-#define ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
-#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \
-	ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define VISOR_VHBA_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
+#define VISOR_VNIC_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
+#define VISOR_VSWITCH_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
 
 /*
  * Must increment these whenever you insert or delete fields within this channel
@@ -46,21 +45,21 @@
  * usually add fields to the END of the channel struct without needing to
  * increment this.
  */
-#define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2
-#define ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID 2
-#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID 1
+#define VISOR_VHBA_CHANNEL_VERSIONID 2
+#define VISOR_VNIC_CHANNEL_VERSIONID 2
+#define VISOR_VSWITCH_CHANNEL_VERSIONID 1
 
-#define SPAR_VHBA_CHANNEL_OK_CLIENT(ch) \
-	(spar_check_channel(ch, spar_vhba_channel_protocol_uuid, \
-			    "vhba", MIN_IO_CHANNEL_SIZE,	\
-			    ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \
-			    ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE))
+#define VISOR_VHBA_CHANNEL_OK_CLIENT(ch) \
+	(visor_check_channel(ch, visor_vhba_channel_uuid, \
+			     "vhba", MIN_IO_CHANNEL_SIZE, \
+			     VISOR_VHBA_CHANNEL_VERSIONID, \
+			     VISOR_VHBA_CHANNEL_SIGNATURE))
 
-#define SPAR_VNIC_CHANNEL_OK_CLIENT(ch) \
-	(spar_check_channel(ch, spar_vnic_channel_protocol_uuid, \
-			    "vnic", MIN_IO_CHANNEL_SIZE,	\
-			    ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \
-			    ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE))
+#define VISOR_VNIC_CHANNEL_OK_CLIENT(ch) \
+	(visor_check_channel(ch, visor_vnic_channel_uuid, \
+			     "vnic", MIN_IO_CHANNEL_SIZE, \
+			     VISOR_VNIC_CHANNEL_VERSIONID, \
+			     VISOR_VNIC_CHANNEL_SIGNATURE))
 
 /*
  * Everything necessary to handle SCSI & NIC traffic between Guest Partition and
@@ -530,7 +529,7 @@ struct iochannel_vnic {
  * this header there is a large region of memory which contains the command and
  * response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS.
  */
-struct spar_io_channel_protocol {
+struct visor_io_channel {
 	struct channel_header channel_header;
 	struct signal_queue_header cmd_q;
 	struct signal_queue_header rsp_q;
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index 274f724..ed045ef 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -19,12 +19,11 @@
 #include "channel.h"
 
 /* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
-#define SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID \
+#define VISOR_CONTROLVM_CHANNEL_UUID \
 	UUID_LE(0x2b3c2d10, 0x7ef5, 0x4ad8, \
 		0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
 
-#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE \
-	ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define VISOR_CONTROLVM_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
 #define CONTROLVM_MESSAGE_MAX 64
 
 /* Must increment this whenever you insert or delete fields within
@@ -33,15 +32,15 @@
  * software.  Note that you can usually add fields to the END of the
  * channel struct withOUT needing to increment this.
  */
-#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID 1
+#define VISOR_CONTROLVM_CHANNEL_VERSIONID 1
 
-#define SPAR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \
-	(spar_check_channel(ch, \
-			    SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID, \
-			    "controlvm", \
-			    sizeof(struct spar_controlvm_channel_protocol), \
-			    ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID, \
-			    ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE))
+#define VISOR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \
+	(visor_check_channel(ch, \
+			     VISOR_CONTROLVM_CHANNEL_UUID, \
+			     "controlvm", \
+			     sizeof(struct visor_controlvm_channel), \
+			     VISOR_CONTROLVM_CHANNEL_VERSIONID, \
+			     VISOR_CONTROLVM_CHANNEL_SIGNATURE))
 
 /* Defines for various channel queues */
 #define CONTROLVM_QUEUE_REQUEST	 0
@@ -52,7 +51,7 @@
 /* Max num of messages stored during IOVM creation to be reused after crash */
 #define CONTROLVM_CRASHMSG_MAX 2
 
-struct spar_segment_state  {
+struct visor_segment_state  {
 	/* Bit 0: May enter other states */
 	u16 enabled:1;
 	/* Bit 1: Assigned to active partition */
@@ -76,15 +75,15 @@ struct spar_segment_state  {
  */
 } __packed;
 
-static const struct spar_segment_state segment_state_running = {
+static const struct visor_segment_state segment_state_running = {
 	1, 1, 1, 0, 1, 1, 1, 1
 };
 
-static const struct spar_segment_state segment_state_paused = {
+static const struct visor_segment_state segment_state_paused = {
 	1, 1, 1, 0, 1, 1, 1, 0
 };
 
-static const struct spar_segment_state segment_state_standby = {
+static const struct visor_segment_state segment_state_standby = {
 	1, 1, 0, 0, 1, 1, 1, 0
 };
 
@@ -149,7 +148,7 @@ struct irq_info {
 	u8 reserved[3];	/* Natural alignment purposes */
 } __packed;
 
-struct efi_spar_indication  {
+struct efi_visor_indication  {
 	u64 boot_to_fw_ui:1;		/* Bit 0: Stop in uefi ui */
 	u64 clear_nvram:1;		/* Bit 1: Clear NVRAM */
 	u64 clear_cmos:1;		/* Bit 2: Clear CMOS */
@@ -158,9 +157,9 @@ struct efi_spar_indication  {
 	u64 reserved:60;		/* Natural alignment */
 } __packed;
 
-enum ultra_chipset_feature {
-	ULTRA_CHIPSET_FEATURE_REPLY = 0x00000001,
-	ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
+enum visor_chipset_feature {
+	VISOR_CHIPSET_FEATURE_REPLY = 0x00000001,
+	VISOR_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002,
 };
 
 /* This is the common structure that is at the beginning of every
@@ -298,13 +297,13 @@ struct controlvm_message_packet  {
 			/* for CONTROLVM_DEVICE_RECONFIGURE */
 		struct  {
 			u32 bus_no;
-			struct spar_segment_state state;
+			struct visor_segment_state state;
 			u8 reserved[2];	/* Natural alignment purposes */
 		} __packed bus_change_state; /* for CONTROLVM_BUS_CHANGESTATE */
 		struct  {
 			u32 bus_no;
 			u32 dev_no;
-			struct spar_segment_state state;
+			struct visor_segment_state state;
 			struct  {
 				/* =1 if message is for a physical device */
 				u32 phys_device:1;
@@ -317,7 +316,7 @@ struct controlvm_message_packet  {
 		struct  {
 			u32 bus_no;
 			u32 dev_no;
-			struct spar_segment_state state;
+			struct visor_segment_state state;
 			u8 reserved[6];	/* Natural alignment purposes */
 		} __packed device_change_state_event;
 			/* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
@@ -326,7 +325,7 @@ struct controlvm_message_packet  {
 			u32 bus_count;
 			/* indicates the max number of switches */
 			u32 switch_count;
-			enum ultra_chipset_feature features;
+			enum visor_chipset_feature features;
 			u32 platform_number;	/* Platform Number */
 		} __packed init_chipset;	/* for CONTROLVM_CHIPSET_INIT */
 		struct  {
@@ -349,7 +348,7 @@ struct controlvm_message {
 	struct controlvm_message_packet cmd;
 } __packed;
 
-struct spar_controlvm_channel_protocol {
+struct visor_controlvm_channel {
 	struct channel_header header;
 	u64 gp_controlvm;	/* guest phys addr of this channel */
 	u64 gp_partition_tables;/* guest phys addr of partition tables */
@@ -371,7 +370,7 @@ struct spar_controlvm_channel_protocol {
 	u32 message_count;		/* CONTROLVM_MESSAGE_MAX */
 	u64 gp_smbios_table;		/* guest phys addr of SMBIOS tables */
 	u64 gp_physical_smbios_table;	/* guest phys addr of SMBIOS table  */
-	/* ULTRA_MAX_GUESTS_PER_SERVICE */
+	/* VISOR_MAX_GUESTS_PER_SERVICE */
 	char gp_reserved[2688];
 
 	/* guest physical address of EFI firmware image base  */
@@ -402,11 +401,10 @@ struct spar_controlvm_channel_protocol {
 	u32 installation_text_id;	/* Id of string to display */
 	/* Number of remaining installation  steps (for progress bars) */
 	u16 installation_remaining_steps;
-	/* ULTRA_TOOL_ACTIONS Installation Action field */
+	/* VISOR_TOOL_ACTIONS Installation Action field */
 	u8 tool_action;
 	u8 reserved;		/* alignment */
-	struct efi_spar_indication efi_spar_ind;
-	struct efi_spar_indication efi_spar_ind_supported;
+	struct efi_visor_indication efi_visor_ind;
 	u32 sp_reserved;
 	/* Force signals to begin on 128-byte cache line */
 	u8 reserved2[28];
@@ -444,7 +442,7 @@ struct spar_controlvm_channel_protocol {
  * of total_length should equal PayloadBytes. The format of the strings at
  * PayloadVmOffset will take different forms depending on the message.
  */
-struct spar_controlvm_parameters_header {
+struct visor_controlvm_parameters_header {
 	u32 total_length;
 	u32 header_length;
 	u32 connection_offset;
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index f0ef5ec..01d7d51 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -27,13 +27,12 @@
 #include "channel.h"
 
 /* {193b331b-c58f-11da-95a9-00e08161165f} */
-#define SPAR_VBUS_CHANNEL_PROTOCOL_UUID \
+#define VISOR_VBUS_CHANNEL_UUID \
 	UUID_LE(0x193b331b, 0xc58f, 0x11da, \
 		0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
-static const uuid_le spar_vbus_channel_protocol_uuid =
-	SPAR_VBUS_CHANNEL_PROTOCOL_UUID;
+static const uuid_le visor_vbus_channel_uuid = VISOR_VBUS_CHANNEL_UUID;
 
-#define SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE ULTRA_CHANNEL_PROTOCOL_SIGNATURE
+#define VISOR_VBUS_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE
 
 /* Must increment this whenever you insert or delete fields within this channel
  * struct.  Also increment whenever you change the meaning of fields within this
@@ -41,7 +40,7 @@ static const uuid_le spar_vbus_channel_protocol_uuid =
  * usually add fields to the END of the channel struct withOUT needing to
  * increment this.
  */
-#define SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID 1
+#define VISOR_VBUS_CHANNEL_VERSIONID 1
 
 /*
  * An array of this struct is present in the channel area for each vbus.
@@ -49,16 +48,16 @@ static const uuid_le spar_vbus_channel_protocol_uuid =
  * It is filled in by the client side to provide info about the device
  * and driver from the client's perspective.
  */
-struct ultra_vbus_deviceinfo {
+struct visor_vbus_deviceinfo {
 	u8 devtype[16];		/* short string identifying the device type */
 	u8 drvname[16];		/* driver .sys file name */
 	u8 infostrs[96];	/* kernel version */
 	u8 reserved[128];	/* pad size to 256 bytes */
 } __packed;
 
-struct spar_vbus_headerinfo {
+struct visor_vbus_headerinfo {
 	u32 struct_bytes;	/* size of this struct in bytes */
-	u32 device_info_struct_bytes;	/* sizeof(ULTRA_VBUS_DEVICEINFO) */
+	u32 device_info_struct_bytes;	/* sizeof(VISOR_VBUS_DEVICEINFO) */
 	u32 dev_info_count;	/* num of items in DevInfo member */
 	/* (this is the allocated size) */
 	u32 chp_info_offset;	/* byte offset from beginning of this struct */
@@ -70,15 +69,15 @@ struct spar_vbus_headerinfo {
 	u8 reserved[104];
 } __packed;
 
-struct spar_vbus_channel_protocol {
+struct visor_vbus_channel {
 	struct channel_header channel_header;	/* initialized by server */
-	struct spar_vbus_headerinfo hdr_info;	/* initialized by server */
+	struct visor_vbus_headerinfo hdr_info;	/* initialized by server */
 	/* the remainder of this channel is filled in by the client */
-	struct ultra_vbus_deviceinfo chp_info;
+	struct visor_vbus_deviceinfo chp_info;
 	/* describes client chipset device and driver */
-	struct ultra_vbus_deviceinfo bus_info;
+	struct visor_vbus_deviceinfo bus_info;
 	/* describes client bus device and driver */
-	struct ultra_vbus_deviceinfo dev_info[0];
+	struct visor_vbus_deviceinfo dev_info[0];
 	/* describes client device and driver for each device on the bus */
 } __packed;
 
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index a692561..1c785dd 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -64,9 +64,9 @@ static const struct attribute_group *visorbus_dev_groups[] = {
 };
 
 /* filled in with info about parent chipset driver when we register with it */
-static struct ultra_vbus_deviceinfo chipset_driverinfo;
+static struct visor_vbus_deviceinfo chipset_driverinfo;
 /* filled in with info about this driver, wrt it servicing client busses */
-static struct ultra_vbus_deviceinfo clientbus_driverinfo;
+static struct visor_vbus_deviceinfo clientbus_driverinfo;
 
 /* list of visor_device structs, linked via .list_all */
 static LIST_HEAD(list_all_bus_instances);
@@ -356,9 +356,9 @@ static const struct attribute_group *visorbus_groups[] = {
  *  /sys/kernel/debug/visorbus/visorbus<n>.
  */
 /*
- * vbuschannel_print_devinfo() - format a struct ultra_vbus_deviceinfo
+ * vbuschannel_print_devinfo() - format a struct visor_vbus_deviceinfo
  *                               and write it to a seq_file
- * @devinfo: the struct ultra_vbus_deviceinfo to format
+ * @devinfo: the struct visor_vbus_deviceinfo to format
  * @seq: seq_file to write to
  * @devix: the device index to be included in the output data, or -1 if no
  *         device index is to be included
@@ -366,7 +366,7 @@ static const struct attribute_group *visorbus_groups[] = {
  * Reads @devInfo, and writes it in human-readable notation to @seq.
  */
 static void
-vbuschannel_print_devinfo(struct ultra_vbus_deviceinfo *devinfo,
+vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
 			  struct seq_file *seq, int devix)
 {
 	if (!isprint(devinfo->devtype[0]))
@@ -397,7 +397,7 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
 
 	int i;
 	unsigned long off;
-	struct ultra_vbus_deviceinfo dev_info;
+	struct visor_vbus_deviceinfo dev_info;
 
 	if (!channel)
 		return 0;
@@ -407,16 +407,14 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
 		   ((vdev->name) ? (char *)(vdev->name) : ""),
 		   vdev->chipset_bus_no);
 	if (visorchannel_read(channel,
-			      offsetof(struct spar_vbus_channel_protocol,
-				       chp_info),
+			      offsetof(struct visor_vbus_channel, chp_info),
 			      &dev_info, sizeof(dev_info)) >= 0)
 		vbuschannel_print_devinfo(&dev_info, seq, -1);
 	if (visorchannel_read(channel,
-			      offsetof(struct spar_vbus_channel_protocol,
-				       bus_info),
+			      offsetof(struct visor_vbus_channel, bus_info),
 			      &dev_info, sizeof(dev_info)) >= 0)
 		vbuschannel_print_devinfo(&dev_info, seq, -1);
-	off = offsetof(struct spar_vbus_channel_protocol, dev_info);
+	off = offsetof(struct visor_vbus_channel, dev_info);
 	i = 0;
 	while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
 		if (visorchannel_read(channel, off, &dev_info,
@@ -684,16 +682,16 @@ remove_visor_device(struct visor_device *dev)
 
 static int
 get_vbus_header_info(struct visorchannel *chan,
-		     struct spar_vbus_headerinfo *hdr_info)
+		     struct visor_vbus_headerinfo *hdr_info)
 {
 	int err;
 
-	if (!spar_check_channel(visorchannel_get_header(chan),
-				spar_vbus_channel_protocol_uuid,
-				"vbus",
-				sizeof(struct spar_vbus_channel_protocol),
-				SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID,
-				SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE))
+	if (!visor_check_channel(visorchannel_get_header(chan),
+				 visor_vbus_channel_uuid,
+				 "vbus",
+				 sizeof(struct visor_vbus_channel),
+				 VISOR_VBUS_CHANNEL_VERSIONID,
+				 VISOR_VBUS_CHANNEL_SIGNATURE))
 		return -EINVAL;
 
 	err = visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
@@ -701,11 +699,11 @@ get_vbus_header_info(struct visorchannel *chan,
 	if (err < 0)
 		return err;
 
-	if (hdr_info->struct_bytes < sizeof(struct spar_vbus_headerinfo))
+	if (hdr_info->struct_bytes < sizeof(struct visor_vbus_headerinfo))
 		return -EINVAL;
 
 	if (hdr_info->device_info_struct_bytes <
-	    sizeof(struct ultra_vbus_deviceinfo))
+	    sizeof(struct visor_vbus_deviceinfo))
 		return -EINVAL;
 
 	return 0;
@@ -713,7 +711,7 @@ get_vbus_header_info(struct visorchannel *chan,
 
 /*
  * write_vbus_chp_info() - write the contents of <info> to the struct
- *                         spar_vbus_channel_protocol.chp_info
+ *                         visor_vbus_channel.chp_info
  * @chan:     indentifies the s-Par channel that will be updated
  * @hdr_info: used to find appropriate channel offset to write data
  * @info:     contains the information to write
@@ -726,8 +724,8 @@ get_vbus_header_info(struct visorchannel *chan,
  */
 static void
 write_vbus_chp_info(struct visorchannel *chan,
-		    struct spar_vbus_headerinfo *hdr_info,
-		    struct ultra_vbus_deviceinfo *info)
+		    struct visor_vbus_headerinfo *hdr_info,
+		    struct visor_vbus_deviceinfo *info)
 {
 	int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
 
@@ -739,7 +737,7 @@ write_vbus_chp_info(struct visorchannel *chan,
 
 /*
  * write_vbus_bus_info() - write the contents of <info> to the struct
- *                         spar_vbus_channel_protocol.bus_info
+ *                         visor_vbus_channel.bus_info
  * @chan:     indentifies the s-Par channel that will be updated
  * @hdr_info: used to find appropriate channel offset to write data
  * @info:     contains the information to write
@@ -752,8 +750,8 @@ write_vbus_chp_info(struct visorchannel *chan,
  */
 static void
 write_vbus_bus_info(struct visorchannel *chan,
-		    struct spar_vbus_headerinfo *hdr_info,
-		    struct ultra_vbus_deviceinfo *info)
+		    struct visor_vbus_headerinfo *hdr_info,
+		    struct visor_vbus_deviceinfo *info)
 {
 	int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
 
@@ -765,7 +763,7 @@ write_vbus_bus_info(struct visorchannel *chan,
 
 /*
  * write_vbus_dev_info() - write the contents of <info> to the struct
- *                         spar_vbus_channel_protocol.dev_info[<devix>]
+ *                         visor_vbus_channel.dev_info[<devix>]
  * @chan:     indentifies the s-Par channel that will be updated
  * @hdr_info: used to find appropriate channel offset to write data
  * @info:     contains the information to write
@@ -779,8 +777,8 @@ write_vbus_bus_info(struct visorchannel *chan,
  */
 static void
 write_vbus_dev_info(struct visorchannel *chan,
-		    struct spar_vbus_headerinfo *hdr_info,
-		    struct ultra_vbus_deviceinfo *info, unsigned int devix)
+		    struct visor_vbus_headerinfo *hdr_info,
+		    struct visor_vbus_deviceinfo *info, unsigned int devix)
 {
 	int off =
 	    (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
@@ -793,10 +791,10 @@ write_vbus_dev_info(struct visorchannel *chan,
 }
 
 static void bus_device_info_init(
-		struct ultra_vbus_deviceinfo *bus_device_info_ptr,
+		struct visor_vbus_deviceinfo *bus_device_info_ptr,
 		const char *dev_type, const char *drv_name)
 {
-	memset(bus_device_info_ptr, 0, sizeof(struct ultra_vbus_deviceinfo));
+	memset(bus_device_info_ptr, 0, sizeof(struct visor_vbus_deviceinfo));
 	snprintf(bus_device_info_ptr->devtype,
 		 sizeof(bus_device_info_ptr->devtype),
 		 "%s", (dev_type) ? dev_type : "unknownType");
@@ -823,9 +821,9 @@ fix_vbus_dev_info(struct visor_device *visordev)
 	struct visor_driver *visordrv;
 	u32 bus_no = visordev->chipset_bus_no;
 	u32 dev_no = visordev->chipset_dev_no;
-	struct ultra_vbus_deviceinfo dev_info;
+	struct visor_vbus_deviceinfo dev_info;
 	const char *chan_type_name = NULL;
-	struct spar_vbus_headerinfo *hdr_info;
+	struct visor_vbus_headerinfo *hdr_info;
 
 	if (!visordev->device.driver)
 		return;
@@ -833,7 +831,7 @@ fix_vbus_dev_info(struct visor_device *visordev)
 	bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
 	if (!bdev)
 		return;
-	hdr_info = (struct spar_vbus_headerinfo *)bdev->vbus_hdr_info;
+	hdr_info = (struct visor_vbus_headerinfo *)bdev->vbus_hdr_info;
 	if (!hdr_info)
 		return;
 	visordrv = to_visor_driver(visordev->device.driver);
@@ -981,18 +979,18 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
 EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
 
 /*
- * create_bus_instance() - create a device instance for the visor bus itself
+ * visorbus_create_instance() - create a device instance for the visorbus itself
  * @dev: struct visor_device indicating the bus instance
  *
  * Return: 0 for success, otherwise negative errno value indicating reason for
  *         failure
  */
 static int
-create_bus_instance(struct visor_device *dev)
+visorbus_create_instance(struct visor_device *dev)
 {
 	int id = dev->chipset_bus_no;
 	int err;
-	struct spar_vbus_headerinfo *hdr_info;
+	struct visor_vbus_headerinfo *hdr_info;
 
 	hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
 	if (!hdr_info)
@@ -1032,16 +1030,16 @@ create_bus_instance(struct visor_device *dev)
 err_debugfs_dir:
 	debugfs_remove_recursive(dev->debugfs_dir);
 	kfree(hdr_info);
-	dev_err(&dev->device, "create_bus_instance failed: %d\n", err);
+	dev_err(&dev->device, "visorbus_create_instance failed: %d\n", err);
 	return err;
 }
 
 /*
- * remove_bus_instance() - remove a device instance for the visor bus itself
+ * visorbus_remove_instance() - remove a device instance for the visorbus itself
  * @dev: struct visor_device indentifying the bus to remove
  */
 static void
-remove_bus_instance(struct visor_device *dev)
+visorbus_remove_instance(struct visor_device *dev)
 {
 	/*
 	 * Note that this will result in the release method for
@@ -1061,7 +1059,7 @@ remove_bus_instance(struct visor_device *dev)
 }
 
 /*
- * remove_all_visor_devices() - remove all child visor bus device instances
+ * remove_all_visor_devices() - remove all child visorbus device instances
  */
 static void
 remove_all_visor_devices(void)
@@ -1077,29 +1075,29 @@ remove_all_visor_devices(void)
 }
 
 int
-chipset_bus_create(struct visor_device *dev)
+visorchipset_bus_create(struct visor_device *dev)
 {
 	int err;
 
-	err = create_bus_instance(dev);
+	err = visorbus_create_instance(dev);
 
 	if (err < 0)
 		return err;
 
-	bus_create_response(dev, err);
+	visorbus_create_response(dev, err);
 
 	return 0;
 }
 
 void
-chipset_bus_destroy(struct visor_device *dev)
+visorchipset_bus_destroy(struct visor_device *dev)
 {
-	remove_bus_instance(dev);
-	bus_destroy_response(dev, 0);
+	visorbus_remove_instance(dev);
+	visorbus_destroy_response(dev, 0);
 }
 
 int
-chipset_device_create(struct visor_device *dev_info)
+visorchipset_device_create(struct visor_device *dev_info)
 {
 	int err;
 
@@ -1107,17 +1105,17 @@ chipset_device_create(struct visor_device *dev_info)
 	if (err < 0)
 		return err;
 
-	device_create_response(dev_info, err);
+	visorbus_device_create_response(dev_info, err);
 
 	return 0;
 }
 
 void
-chipset_device_destroy(struct visor_device *dev_info)
+visorchipset_device_destroy(struct visor_device *dev_info)
 {
 	remove_visor_device(dev_info);
 
-	device_destroy_response(dev_info, 0);
+	visorbus_device_destroy_response(dev_info, 0);
 }
 
 /*
@@ -1137,7 +1135,7 @@ pause_state_change_complete(struct visor_device *dev, int status)
 
 	dev->pausing = false;
 
-	device_pause_response(dev, status);
+	visorbus_device_pause_response(dev, status);
 }
 
 /*
@@ -1162,12 +1160,12 @@ resume_state_change_complete(struct visor_device *dev, int status)
 	 * which will presumably want to send some sort of response to
 	 * the initiator.
 	 */
-	device_resume_response(dev, status);
+	visorbus_device_resume_response(dev, status);
 }
 
 /*
- * initiate_chipset_device_pause_resume() - start a pause or resume operation
- *                                          for a visor device
+ * visorchipset_initiate_device_pause_resume() - start a pause or resume
+ *                                               operation for a visor device
  * @dev: struct visor_device identifying the device being paused or resumed
  * @is_pause: true to indicate pause operation, false to indicate resume
  *
@@ -1177,7 +1175,8 @@ resume_state_change_complete(struct visor_device *dev, int status)
  * resume_state_change_complete().
  */
 static int
-initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
+visorchipset_initiate_device_pause_resume(struct visor_device *dev,
+					  bool is_pause)
 {
 	int err;
 	struct visor_driver *drv = NULL;
@@ -1211,7 +1210,7 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
 }
 
 /**
- * chipset_device_pause() - start a pause operation for a visor device
+ * visorchipset_device_pause() - start a pause operation for a visor device
  * @dev_info: struct visor_device identifying the device being paused
  *
  * Tell the subordinate function driver for a specific device to pause
@@ -1219,11 +1218,11 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
  * via a callback function; see pause_state_change_complete().
  */
 int
-chipset_device_pause(struct visor_device *dev_info)
+visorchipset_device_pause(struct visor_device *dev_info)
 {
 	int err;
 
-	err = initiate_chipset_device_pause_resume(dev_info, true);
+	err = visorchipset_initiate_device_pause_resume(dev_info, true);
 
 	if (err < 0) {
 		dev_info->pausing = false;
@@ -1234,7 +1233,7 @@ chipset_device_pause(struct visor_device *dev_info)
 }
 
 /**
- * chipset_device_resume() - start a resume operation for a visor device
+ * visorchipset_device_resume() - start a resume operation for a visor device
  * @dev_info: struct visor_device identifying the device being resumed
  *
  * Tell the subordinate function driver for a specific device to resume
@@ -1242,11 +1241,11 @@ chipset_device_pause(struct visor_device *dev_info)
  * via a callback function; see resume_state_change_complete().
  */
 int
-chipset_device_resume(struct visor_device *dev_info)
+visorchipset_device_resume(struct visor_device *dev_info)
 {
 	int err;
 
-	err = initiate_chipset_device_pause_resume(dev_info, false);
+	err = visorchipset_initiate_device_pause_resume(dev_info, false);
 
 	if (err < 0) {
 		dev_info->resuming = false;
@@ -1289,7 +1288,7 @@ visorbus_exit(void)
 		struct visor_device *dev = list_entry(listentry,
 						      struct visor_device,
 						      list_all);
-		remove_bus_instance(dev);
+		visorbus_remove_instance(dev);
 	}
 
 	bus_unregister(&visorbus_type);
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 9f030b1..98a5af1 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -27,19 +27,19 @@
  * command line
  */
 
-int chipset_bus_create(struct visor_device *bus_info);
-void chipset_bus_destroy(struct visor_device *bus_info);
-int chipset_device_create(struct visor_device *dev_info);
-void chipset_device_destroy(struct visor_device *dev_info);
-int chipset_device_pause(struct visor_device *dev_info);
-int chipset_device_resume(struct visor_device *dev_info);
+int visorchipset_bus_create(struct visor_device *bus_info);
+void visorchipset_bus_destroy(struct visor_device *bus_info);
+int visorchipset_device_create(struct visor_device *dev_info);
+void visorchipset_device_destroy(struct visor_device *dev_info);
+int visorchipset_device_pause(struct visor_device *dev_info);
+int visorchipset_device_resume(struct visor_device *dev_info);
 
-void bus_create_response(struct visor_device *p, int response);
-void bus_destroy_response(struct visor_device *p, int response);
-void device_create_response(struct visor_device *p, int response);
-void device_destroy_response(struct visor_device *p, int response);
-void device_resume_response(struct visor_device *p, int response);
-void device_pause_response(struct visor_device *p, int response);
+void visorbus_create_response(struct visor_device *p, int response);
+void visorbus_destroy_response(struct visor_device *p, int response);
+void visorbus_device_create_response(struct visor_device *p, int response);
+void visorbus_device_destroy_response(struct visor_device *p, int response);
+void visorbus_device_resume_response(struct visor_device *p, int response);
+void visorbus_device_pause_response(struct visor_device *p, int response);
 
 int visorbus_init(void);
 void visorbus_exit(void);
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 9e1cea2..6885c2c 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -28,11 +28,11 @@
 
 #define MYDRVNAME "visorchannel"
 
-#define SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID \
+#define VISOR_CONSOLEVIDEO_CHANNEL_GUID \
 	UUID_LE(0x3cd6e705, 0xd6a2, 0x4aa5, \
 		0xad, 0x5c, 0x7b, 0x8, 0x88, 0x9d, 0xff, 0xe2)
 
-static const uuid_le spar_video_guid = SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID;
+static const uuid_le visor_video_guid = VISOR_CONSOLEVIDEO_CHANNEL_GUID;
 
 struct visorchannel {
 	u64 physaddr;
@@ -415,7 +415,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
 	 * release later on.
 	 */
 	channel->requested = request_mem_region(physaddr, size, MYDRVNAME);
-	if (!channel->requested && uuid_le_cmp(guid, spar_video_guid))
+	if (!channel->requested && uuid_le_cmp(guid, visor_video_guid))
 		/* we only care about errors if this is not the video channel */
 		goto err_destroy_channel;
 
@@ -445,7 +445,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
 	channel->mapped = NULL;
 	channel->requested = request_mem_region(channel->physaddr,
 						channel_bytes, MYDRVNAME);
-	if (!channel->requested && uuid_le_cmp(guid, spar_video_guid))
+	if (!channel->requested && uuid_le_cmp(guid, visor_video_guid))
 		/* we only care about errors if this is not the video channel */
 		goto err_destroy_channel;
 
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 4cfd0fa..2215056 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -34,12 +34,12 @@
 
 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
 
-#define UNISYS_SPAR_LEAF_ID 0x40000000
+#define UNISYS_VISOR_LEAF_ID 0x40000000
 
 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
-#define UNISYS_SPAR_ID_EBX 0x73696e55
-#define UNISYS_SPAR_ID_ECX 0x70537379
-#define UNISYS_SPAR_ID_EDX 0x34367261
+#define UNISYS_VISOR_ID_EBX 0x73696e55
+#define UNISYS_VISOR_ID_ECX 0x70537379
+#define UNISYS_VISOR_ID_EDX 0x34367261
 
 /*
  * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
@@ -101,7 +101,7 @@ static ssize_t toolaction_show(struct device *dev,
 	int err;
 
 	err = visorchannel_read(chipset_dev->controlvm_channel,
-				offsetof(struct spar_controlvm_channel_protocol,
+				offsetof(struct visor_controlvm_channel,
 					 tool_action),
 				&tool_action, sizeof(u8));
 	if (err)
@@ -120,11 +120,10 @@ static ssize_t toolaction_store(struct device *dev,
 	if (kstrtou8(buf, 10, &tool_action))
 		return -EINVAL;
 
-	err = visorchannel_write
-		(chipset_dev->controlvm_channel,
-		 offsetof(struct spar_controlvm_channel_protocol,
-			  tool_action),
-		 &tool_action, sizeof(u8));
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  tool_action),
+				 &tool_action, sizeof(u8));
 
 	if (err)
 		return err;
@@ -136,18 +135,18 @@ static ssize_t boottotool_show(struct device *dev,
 			       struct device_attribute *attr,
 			       char *buf)
 {
-	struct efi_spar_indication efi_spar_indication;
+	struct efi_visor_indication efi_visor_indication;
 	int err;
 
 	err = visorchannel_read(chipset_dev->controlvm_channel,
-				offsetof(struct spar_controlvm_channel_protocol,
-					 efi_spar_ind),
-				&efi_spar_indication,
-				sizeof(struct efi_spar_indication));
+				offsetof(struct visor_controlvm_channel,
+					 efi_visor_ind),
+				&efi_visor_indication,
+				sizeof(struct efi_visor_indication));
 
 	if (err)
 		return err;
-	return sprintf(buf, "%u\n", efi_spar_indication.boot_to_tool);
+	return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
 }
 
 static ssize_t boottotool_store(struct device *dev,
@@ -155,17 +154,17 @@ static ssize_t boottotool_store(struct device *dev,
 				const char *buf, size_t count)
 {
 	int val, err;
-	struct efi_spar_indication efi_spar_indication;
+	struct efi_visor_indication efi_visor_indication;
 
 	if (kstrtoint(buf, 10, &val))
 		return -EINVAL;
 
-	efi_spar_indication.boot_to_tool = val;
-	err = visorchannel_write
-		(chipset_dev->controlvm_channel,
-		 offsetof(struct spar_controlvm_channel_protocol,
-			  efi_spar_ind), &(efi_spar_indication),
-		 sizeof(struct efi_spar_indication));
+	efi_visor_indication.boot_to_tool = val;
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  efi_visor_ind),
+				 &(efi_visor_indication),
+				 sizeof(struct efi_visor_indication));
 
 	if (err)
 		return err;
@@ -180,7 +179,7 @@ static ssize_t error_show(struct device *dev, struct device_attribute *attr,
 	int err;
 
 	err = visorchannel_read(chipset_dev->controlvm_channel,
-				offsetof(struct spar_controlvm_channel_protocol,
+				offsetof(struct visor_controlvm_channel,
 					 installation_error),
 				&error, sizeof(u32));
 	if (err)
@@ -197,11 +196,10 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
 	if (kstrtou32(buf, 10, &error))
 		return -EINVAL;
 
-	err = visorchannel_write
-		(chipset_dev->controlvm_channel,
-		 offsetof(struct spar_controlvm_channel_protocol,
-			  installation_error),
-		 &error, sizeof(u32));
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  installation_error),
+				 &error, sizeof(u32));
 	if (err)
 		return err;
 	return count;
@@ -214,11 +212,10 @@ static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
 	u32 text_id = 0;
 	int err;
 
-	err = visorchannel_read
-			(chipset_dev->controlvm_channel,
-			 offsetof(struct spar_controlvm_channel_protocol,
-				  installation_text_id),
-			 &text_id, sizeof(u32));
+	err = visorchannel_read(chipset_dev->controlvm_channel,
+				offsetof(struct visor_controlvm_channel,
+					 installation_text_id),
+				&text_id, sizeof(u32));
 	if (err)
 		return err;
 
@@ -234,11 +231,10 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
 	if (kstrtou32(buf, 10, &text_id))
 		return -EINVAL;
 
-	err = visorchannel_write
-		(chipset_dev->controlvm_channel,
-		 offsetof(struct spar_controlvm_channel_protocol,
-			  installation_text_id),
-		 &text_id, sizeof(u32));
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  installation_text_id),
+				 &text_id, sizeof(u32));
 	if (err)
 		return err;
 	return count;
@@ -252,7 +248,7 @@ static ssize_t remaining_steps_show(struct device *dev,
 	int err;
 
 	err = visorchannel_read(chipset_dev->controlvm_channel,
-				offsetof(struct spar_controlvm_channel_protocol,
+				offsetof(struct visor_controlvm_channel,
 					 installation_remaining_steps),
 				&remaining_steps, sizeof(u16));
 	if (err)
@@ -271,11 +267,10 @@ static ssize_t remaining_steps_store(struct device *dev,
 	if (kstrtou16(buf, 10, &remaining_steps))
 		return -EINVAL;
 
-	err = visorchannel_write
-		(chipset_dev->controlvm_channel,
-		 offsetof(struct spar_controlvm_channel_protocol,
-			  installation_remaining_steps),
-		 &remaining_steps, sizeof(u16));
+	err = visorchannel_write(chipset_dev->controlvm_channel,
+				 offsetof(struct visor_controlvm_channel,
+					  installation_remaining_steps),
+				 &remaining_steps, sizeof(u16));
 	if (err)
 		return err;
 	return count;
@@ -285,9 +280,9 @@ static DEVICE_ATTR_RW(remaining_steps);
 static uuid_le
 parser_id_get(struct parser_context *ctx)
 {
-	struct spar_controlvm_parameters_header *phdr = NULL;
+	struct visor_controlvm_parameters_header *phdr = NULL;
 
-	phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+	phdr = (struct visor_controlvm_parameters_header *)(ctx->data);
 	return phdr->id;
 }
 
@@ -331,9 +326,9 @@ parser_string_get(struct parser_context *ctx)
 static void *
 parser_name_get(struct parser_context *ctx)
 {
-	struct spar_controlvm_parameters_header *phdr = NULL;
+	struct visor_controlvm_parameters_header *phdr = NULL;
 
-	phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
+	phdr = (struct visor_controlvm_parameters_header *)(ctx->data);
 
 	if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
 		return NULL;
@@ -400,7 +395,7 @@ controlvm_init_response(struct controlvm_message *msg,
 static int
 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
 			       int response,
-			       enum ultra_chipset_feature features)
+			       enum visor_chipset_feature features)
 {
 	struct controlvm_message outmsg;
 
@@ -414,7 +409,7 @@ static int
 chipset_init(struct controlvm_message *inmsg)
 {
 	static int chipset_inited;
-	enum ultra_chipset_feature features = 0;
+	enum visor_chipset_feature features = 0;
 	int rc = CONTROLVM_RESP_SUCCESS;
 	int res = 0;
 
@@ -430,13 +425,13 @@ chipset_init(struct controlvm_message *inmsg)
 	 * also supports it).
 	 */
 	features = inmsg->cmd.init_chipset.features &
-		   ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
+		   VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
 
 	/*
 	 * Set the "reply" bit so Command knows this is a
 	 * features-aware driver.
 	 */
-	features |= ULTRA_CHIPSET_FEATURE_REPLY;
+	features |= VISOR_CHIPSET_FEATURE_REPLY;
 
 out_respond:
 	if (inmsg->hdr.flags.response_expected)
@@ -447,7 +442,7 @@ chipset_init(struct controlvm_message *inmsg)
 
 static int
 controlvm_respond(struct controlvm_message_header *msg_hdr, int response,
-		  struct spar_segment_state *state)
+		  struct visor_segment_state *state)
 {
 	struct controlvm_message outmsg;
 
@@ -470,14 +465,14 @@ enum crash_obj_type {
 };
 
 static int
-save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
+save_crash_message(struct controlvm_message *msg, enum crash_obj_type cr_type)
 {
 	u32 local_crash_msg_offset;
 	u16 local_crash_msg_count;
 	int err;
 
 	err = visorchannel_read(chipset_dev->controlvm_channel,
-				offsetof(struct spar_controlvm_channel_protocol,
+				offsetof(struct visor_controlvm_channel,
 					 saved_crash_message_count),
 				&local_crash_msg_count, sizeof(u16));
 	if (err) {
@@ -493,7 +488,7 @@ save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
 	}
 
 	err = visorchannel_read(chipset_dev->controlvm_channel,
-				offsetof(struct spar_controlvm_channel_protocol,
+				offsetof(struct visor_controlvm_channel,
 					 saved_crash_message_offset),
 				&local_crash_msg_offset, sizeof(u32));
 	if (err) {
@@ -502,7 +497,7 @@ save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
 		return err;
 	}
 
-	switch (typ) {
+	switch (cr_type) {
 	case CRASH_DEV:
 		local_crash_msg_offset += sizeof(struct controlvm_message);
 		err = visorchannel_write(chipset_dev->controlvm_channel,
@@ -551,7 +546,7 @@ controlvm_responder(enum controlvm_id cmd_id,
 static int
 device_changestate_responder(enum controlvm_id cmd_id,
 			     struct visor_device *p, int response,
-			     struct spar_segment_state response_state)
+			     struct visor_segment_state response_state)
 {
 	struct controlvm_message outmsg;
 	u32 bus_no = p->chipset_bus_no;
@@ -573,7 +568,7 @@ device_changestate_responder(enum controlvm_id cmd_id,
 }
 
 static int
-bus_create(struct controlvm_message *inmsg)
+visorbus_create(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
 	struct controlvm_message_header *pmsg_hdr = NULL;
@@ -585,7 +580,7 @@ bus_create(struct controlvm_message *inmsg)
 	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
 	if (bus_info && (bus_info->state.created == 1)) {
 		dev_err(&chipset_dev->acpi_device->dev,
-			"failed bus_create: already exists\n");
+			"failed visorbus_create: already exists\n");
 		err = -EEXIST;
 		goto err_respond;
 	}
@@ -600,7 +595,7 @@ bus_create(struct controlvm_message *inmsg)
 	bus_info->chipset_bus_no = bus_no;
 	bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
 
-	if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
+	if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, visor_siovm_uuid) == 0) {
 		err = save_crash_message(inmsg, CRASH_BUS);
 		if (err)
 			goto err_free_bus_info;
@@ -630,9 +625,9 @@ bus_create(struct controlvm_message *inmsg)
 	}
 	bus_info->visorchannel = visorchannel;
 
-	/* Response will be handled by chipset_bus_create */
-	err = chipset_bus_create(bus_info);
-	/* If error chipset_bus_create didn't respond, need to respond here */
+	/* Response will be handled by visorchipset_bus_create */
+	err = visorchipset_bus_create(bus_info);
+	/* If visorchipset_bus_create didn't respond, need to respond here */
 	if (err)
 		goto err_destroy_channel;
 
@@ -654,7 +649,7 @@ bus_create(struct controlvm_message *inmsg)
 }
 
 static int
-bus_destroy(struct controlvm_message *inmsg)
+visorbus_destroy(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
 	struct controlvm_message_header *pmsg_hdr = NULL;
@@ -688,8 +683,8 @@ bus_destroy(struct controlvm_message *inmsg)
 		bus_info->pending_msg_hdr = pmsg_hdr;
 	}
 
-	/* Response will be handled by chipset_bus_destroy */
-	chipset_bus_destroy(bus_info);
+	/* Response will be handled by visorchipset_bus_destroy */
+	visorchipset_bus_destroy(bus_info);
 	return 0;
 
 err_respond:
@@ -699,8 +694,8 @@ bus_destroy(struct controlvm_message *inmsg)
 }
 
 static int
-bus_configure(struct controlvm_message *inmsg,
-	      struct parser_context *parser_ctx)
+visorbus_configure(struct controlvm_message *inmsg,
+		   struct parser_context *parser_ctx)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
 	u32 bus_no;
@@ -737,14 +732,14 @@ bus_configure(struct controlvm_message *inmsg,
 
 err_respond:
 	dev_err(&chipset_dev->acpi_device->dev,
-		"bus_configured exited with err: %d\n", err);
+		"visorbus_configure exited with err: %d\n", err);
 	if (inmsg->hdr.flags.response_expected == 1)
 		controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
 	return err;
 }
 
 static int
-my_device_create(struct controlvm_message *inmsg)
+visorbus_device_create(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
 	struct controlvm_message_header *pmsg_hdr = NULL;
@@ -807,7 +802,7 @@ my_device_create(struct controlvm_message *inmsg)
 	dev_info->visorchannel = visorchannel;
 	dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
 	if (uuid_le_cmp(cmd->create_device.data_type_uuid,
-			spar_vhba_channel_protocol_uuid) == 0) {
+			visor_vhba_channel_uuid) == 0) {
 		err = save_crash_message(inmsg, CRASH_DEV);
 		if (err)
 			goto err_destroy_visorchannel;
@@ -824,8 +819,8 @@ my_device_create(struct controlvm_message *inmsg)
 		       sizeof(struct controlvm_message_header));
 		dev_info->pending_msg_hdr = pmsg_hdr;
 	}
-	/* Chipset_device_create will send response */
-	err = chipset_device_create(dev_info);
+	/* visorchipset_device_create will send response */
+	err = visorchipset_device_create(dev_info);
 	if (err)
 		goto err_destroy_visorchannel;
 
@@ -844,13 +839,13 @@ my_device_create(struct controlvm_message *inmsg)
 }
 
 static int
-my_device_changestate(struct controlvm_message *inmsg)
+visorbus_device_changestate(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
 	struct controlvm_message_header *pmsg_hdr = NULL;
 	u32 bus_no = cmd->device_change_state.bus_no;
 	u32 dev_no = cmd->device_change_state.dev_no;
-	struct spar_segment_state state = cmd->device_change_state.state;
+	struct visor_segment_state state = cmd->device_change_state.state;
 	struct visor_device *dev_info;
 	int err = 0;
 
@@ -882,16 +877,16 @@ my_device_changestate(struct controlvm_message *inmsg)
 
 	if (state.alive == segment_state_running.alive &&
 	    state.operating == segment_state_running.operating)
-		/* Response will be sent from chipset_device_resume */
-		err = chipset_device_resume(dev_info);
+		/* Response will be sent from visorchipset_device_resume */
+		err = visorchipset_device_resume(dev_info);
 	/* ServerNotReady / ServerLost / SegmentStateStandby */
 	else if (state.alive == segment_state_standby.alive &&
 		 state.operating == segment_state_standby.operating)
 		/*
 		 * technically this is standby case where server is lost.
-		 * Response will be sent from chipset_device_pause.
+		 * Response will be sent from visorchipset_device_pause.
 		 */
-		err = chipset_device_pause(dev_info);
+		err = visorchipset_device_pause(dev_info);
 	if (err)
 		goto err_respond;
 
@@ -905,7 +900,7 @@ my_device_changestate(struct controlvm_message *inmsg)
 }
 
 static int
-my_device_destroy(struct controlvm_message *inmsg)
+visorbus_device_destroy(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
 	struct controlvm_message_header *pmsg_hdr = NULL;
@@ -941,7 +936,7 @@ my_device_destroy(struct controlvm_message *inmsg)
 		dev_info->pending_msg_hdr = pmsg_hdr;
 	}
 
-	chipset_device_destroy(dev_info);
+	visorchipset_device_destroy(dev_info);
 	return 0;
 
 err_respond:
@@ -1179,15 +1174,15 @@ parahotplug_request_kickoff(struct parahotplug_request *req)
 		env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
 	};
 
-	sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
-	sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
-	sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
+	sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
+	sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
+	sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
 		cmd->device_change_state.state.active);
-	sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
+	sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
 		cmd->device_change_state.bus_no);
-	sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
+	sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
 		cmd->device_change_state.dev_no >> 3);
-	sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
+	sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
 		cmd->device_change_state.dev_no & 0x7);
 
 	return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
@@ -1387,7 +1382,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
 
 	/* get saved message count */
 	if (visorchannel_read(chipset_dev->controlvm_channel,
-			      offsetof(struct spar_controlvm_channel_protocol,
+			      offsetof(struct visor_controlvm_channel,
 				       saved_crash_message_count),
 			      &local_crash_msg_count, sizeof(u16)) < 0) {
 		dev_err(&chipset_dev->acpi_device->dev,
@@ -1403,7 +1398,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
 
 	/* get saved crash message offset */
 	if (visorchannel_read(chipset_dev->controlvm_channel,
-			      offsetof(struct spar_controlvm_channel_protocol,
+			      offsetof(struct visor_controlvm_channel,
 				       saved_crash_message_offset),
 			      &local_crash_msg_offset, sizeof(u32)) < 0) {
 		dev_err(&chipset_dev->acpi_device->dev,
@@ -1438,7 +1433,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
 			"no valid create_bus message\n");
 		return;
 	}
-	bus_create(&local_crash_bus_msg);
+	visorbus_create(&local_crash_bus_msg);
 
 	/* reuse create device message for storage device */
 	if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
@@ -1446,11 +1441,11 @@ setup_crash_devices_work_queue(struct work_struct *work)
 			"no valid create_device message\n");
 		return;
 	}
-	my_device_create(&local_crash_dev_msg);
+	visorbus_device_create(&local_crash_dev_msg);
 }
 
 void
-bus_create_response(struct visor_device *bus_info, int response)
+visorbus_create_response(struct visor_device *bus_info, int response)
 {
 	if (response >= 0)
 		bus_info->state.created = 1;
@@ -1463,7 +1458,7 @@ bus_create_response(struct visor_device *bus_info, int response)
 }
 
 void
-bus_destroy_response(struct visor_device *bus_info, int response)
+visorbus_destroy_response(struct visor_device *bus_info, int response)
 {
 	controlvm_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
 			    response);
@@ -1473,7 +1468,7 @@ bus_destroy_response(struct visor_device *bus_info, int response)
 }
 
 void
-device_create_response(struct visor_device *dev_info, int response)
+visorbus_device_create_response(struct visor_device *dev_info, int response)
 {
 	if (response >= 0)
 		dev_info->state.created = 1;
@@ -1486,7 +1481,7 @@ device_create_response(struct visor_device *dev_info, int response)
 }
 
 void
-device_destroy_response(struct visor_device *dev_info, int response)
+visorbus_device_destroy_response(struct visor_device *dev_info, int response)
 {
 	controlvm_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
 			    response);
@@ -1496,8 +1491,7 @@ device_destroy_response(struct visor_device *dev_info, int response)
 }
 
 void
-device_pause_response(struct visor_device *dev_info,
-		      int response)
+visorbus_device_pause_response(struct visor_device *dev_info, int response)
 {
 	device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
 				     dev_info, response,
@@ -1508,7 +1502,7 @@ device_pause_response(struct visor_device *dev_info,
 }
 
 void
-device_resume_response(struct visor_device *dev_info, int response)
+visorbus_device_resume_response(struct visor_device *dev_info, int response)
 {
 	device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
 				     dev_info, response,
@@ -1599,9 +1593,6 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
 
 	/* create parsing context if necessary */
 	local_addr = (inmsg.hdr.flags.test_message == 1);
-	if (channel_addr == 0)
-		return -EINVAL;
-
 	parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
 	parm_bytes = inmsg.hdr.payload_bytes;
 
@@ -1634,16 +1625,16 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
 		err = chipset_init(&inmsg);
 		break;
 	case CONTROLVM_BUS_CREATE:
-		err = bus_create(&inmsg);
+		err = visorbus_create(&inmsg);
 		break;
 	case CONTROLVM_BUS_DESTROY:
-		err = bus_destroy(&inmsg);
+		err = visorbus_destroy(&inmsg);
 		break;
 	case CONTROLVM_BUS_CONFIGURE:
-		err = bus_configure(&inmsg, parser_ctx);
+		err = visorbus_configure(&inmsg, parser_ctx);
 		break;
 	case CONTROLVM_DEVICE_CREATE:
-		err = my_device_create(&inmsg);
+		err = visorbus_device_create(&inmsg);
 		break;
 	case CONTROLVM_DEVICE_CHANGESTATE:
 		if (cmd->device_change_state.flags.phys_device) {
@@ -1653,12 +1644,12 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
 			 * save the hdr and cmd structures for later use
 			 * when sending back the response to Command
 			 */
-			err = my_device_changestate(&inmsg);
+			err = visorbus_device_changestate(&inmsg);
 			break;
 		}
 		break;
 	case CONTROLVM_DEVICE_DESTROY:
-		err = my_device_destroy(&inmsg);
+		err = visorbus_device_destroy(&inmsg);
 		break;
 	case CONTROLVM_DEVICE_CONFIGURE:
 		/* no op just send a respond that we passed */
@@ -1793,6 +1784,11 @@ controlvm_periodic_work(struct work_struct *work)
 	/* parahotplug_worker */
 	parahotplug_process_list();
 
+/*
+ * The controlvm messages are sent in a bulk. If we start receiving messages, we
+ * want the polling to be fast. If we do not receive any message for
+ * MIN_IDLE_SECONDS, we can slow down the polling.
+ */
 schedule_out:
 	if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
 				(HZ * MIN_IDLE_SECONDS))) {
@@ -1821,7 +1817,7 @@ visorchipset_init(struct acpi_device *acpi_device)
 {
 	int err = -ENODEV;
 	u64 addr;
-	uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
+	uuid_le uuid = VISOR_CONTROLVM_CHANNEL_UUID;
 	struct visorchannel *controlvm_channel;
 
 	chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
@@ -1849,7 +1845,7 @@ visorchipset_init(struct acpi_device *acpi_device)
 	if (err < 0)
 		goto error_destroy_channel;
 
-	if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
+	if (!VISOR_CONTROLVM_CHANNEL_OK_CLIENT(
 				visorchannel_get_header(controlvm_channel)))
 		goto error_delete_groups;
 
@@ -1928,10 +1924,10 @@ static __init int visorutil_spar_detect(void)
 
 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
 		/* check the ID */
-		cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
-		return  (ebx == UNISYS_SPAR_ID_EBX) &&
-			(ecx == UNISYS_SPAR_ID_ECX) &&
-			(edx == UNISYS_SPAR_ID_EDX);
+		cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
+		return  (ebx == UNISYS_VISOR_ID_EBX) &&
+			(ecx == UNISYS_VISOR_ID_ECX) &&
+			(edx == UNISYS_VISOR_ID_EDX);
 	} else {
 		return 0;
 	}
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 6997b16..a6e7a6b 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -37,14 +37,14 @@ static struct dentry *visorhba_debugfs_dir;
 /* GUIDS for HBA channel type supported by this driver */
 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
 	/* Note that the only channel type we expect to be reported by the
-	 * bus driver is the SPAR_VHBA channel.
+	 * bus driver is the VISOR_VHBA channel.
 	 */
-	{ SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
+	{ VISOR_VHBA_CHANNEL_UUID, "sparvhba" },
 	{ NULL_UUID_LE, NULL }
 };
 
 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
-MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
+MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_UUID_STR);
 
 struct visordisk_info {
 	u32 valid;
@@ -657,7 +657,7 @@ static int info_debugfs_show(struct seq_file *seq, void *v)
 		seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
 			   phys_flags_addr);
 		seq_printf(seq, "FeatureFlags = %llu\n",
-			   (__le64)readq(devdata->flags_addr));
+			   (u64)readq(devdata->flags_addr));
 	}
 	seq_printf(seq, "acquire_failed_cnt = %llu\n",
 		   devdata->acquire_failed_cnt);
@@ -1060,8 +1060,7 @@ static int visorhba_probe(struct visor_device *dev)
 	if (!scsihost)
 		return -ENODEV;
 
-	channel_offset = offsetof(struct spar_io_channel_protocol,
-				  vhba.max);
+	channel_offset = offsetof(struct visor_io_channel, vhba.max);
 	err = visorbus_read_channel(dev, channel_offset, &max,
 				    sizeof(struct vhba_config_max));
 	if (err < 0)
@@ -1091,7 +1090,7 @@ static int visorhba_probe(struct visor_device *dev)
 		goto err_scsi_remove_host;
 	}
 	devdata->debugfs_info =
-		debugfs_create_file("info", S_IRUSR | S_IRGRP,
+		debugfs_create_file("info", 0440,
 				    devdata->debugfs_dir, devdata,
 				    &info_debugfs_fops);
 	if (!devdata->debugfs_info) {
@@ -1105,12 +1104,12 @@ static int visorhba_probe(struct visor_device *dev)
 	devdata->serverchangingstate = false;
 	devdata->scsihost = scsihost;
 
-	channel_offset = offsetof(struct spar_io_channel_protocol,
+	channel_offset = offsetof(struct visor_io_channel,
 				  channel_header.features);
 	err = visorbus_read_channel(dev, channel_offset, &features, 8);
 	if (err)
 		goto err_debugfs_info;
-	features |= ULTRA_IO_CHANNEL_IS_POLLING;
+	features |= VISOR_CHANNEL_IS_POLLING;
 	err = visorbus_write_channel(dev, channel_offset, &features, 8);
 	if (err)
 		goto err_debugfs_info;
@@ -1166,7 +1165,7 @@ static void visorhba_remove(struct visor_device *dev)
 	debugfs_remove_recursive(devdata->debugfs_dir);
 }
 
-/* This is used to tell the visor bus driver which types of visor devices
+/* This is used to tell the visorbus driver which types of visor devices
  * we support, and what functions to call when a visor device that we support
  * is attached or removed.
  */
diff --git a/drivers/staging/unisys/visorinput/ultrainputreport.h b/drivers/staging/unisys/visorinput/ultrainputreport.h
index 53dde7c..a4baea5 100644
--- a/drivers/staging/unisys/visorinput/ultrainputreport.h
+++ b/drivers/staging/unisys/visorinput/ultrainputreport.h
@@ -17,26 +17,23 @@
 
 #include <linux/types.h>
 
-/* Identifies mouse and keyboard activity which is specified by the firmware to
- *  the host using the cmsimpleinput protocol.  @ingroup coretypes
+/* These defines identify mouse and keyboard activity which is specified by the
+ * firmware to the host using the cmsimpleinput protocol.  @ingroup coretypes
  */
-enum ultra_inputaction {
-	inputaction_none = 0,
-	inputaction_xy_motion = 1,	/* only motion; arg1=x, arg2=y */
-	inputaction_mouse_button_down = 2, /* arg1: 1=left,2=center,3=right */
-	inputaction_mouse_button_up = 3, /* arg1: 1=left,2=center,3=right */
-	inputaction_mouse_button_click = 4, /* arg1: 1=left,2=center,3=right */
-	inputaction_mouse_button_dclick = 5, /* arg1: 1=left,2=center,
-					      * 3=right
-					      */
-	inputaction_wheel_rotate_away = 6, /* arg1: wheel rotation away from
-					    * user
-					    */
-	inputaction_wheel_rotate_toward = 7, /* arg1: wheel rotation toward
-					      * user
-					      */
-	inputaction_set_max_xy = 8,	/* set screen maxXY; arg1=x, arg2=y */
-	inputaction_key_down = 64,	/* arg1: scancode, as follows:
+#define INPUTACTION_XY_MOTION 1		/* only motion; arg1=x, arg2=y */
+#define INPUTACTION_MOUSE_BUTTON_DOWN 2 /* arg1: 1=left,2=center,3=right */
+#define INPUTACTION_MOUSE_BUTTON_UP 3	/* arg1: 1=left,2=center,3=right */
+#define INPUTACTION_MOUSE_BUTTON_CLICK 4 /* arg1: 1=left,2=center,3=right */
+#define INPUTACTION_MOUSE_BUTTON_DCLICK 5 /* arg1: 1=left,2=center,
+					   * 3=right
+					   */
+#define INPUTACTION_WHEEL_ROTATE_AWAY 6  /* arg1: wheel rotation away from
+					  * user
+					  */
+#define INPUTACTION_WHEEL_ROTATE_TOWARD 7 /* arg1: wheel rotation toward
+					   * user
+					   */
+#define INPUTACTION_KEY_DOWN 64		/* arg1: scancode, as follows:
 					 * If arg1 <= 0xff, it's a 1-byte
 					 * scancode and arg1 is that scancode.
 					 * If arg1 > 0xff, it's a 2-byte
@@ -45,10 +42,10 @@ enum ultra_inputaction {
 					 * high 8 bits.  E.g., the right ALT key
 					 * would appear as x'38e0'.
 					 */
-	inputaction_key_up = 65,	/* arg1: scancode (in same format as
+#define INPUTACTION_KEY_UP 65		/* arg1: scancode (in same format as
 					 * inputaction_keyDown)
 					 */
-	inputaction_set_locking_key_state = 66,
+#define INPUTACTION_SET_LOCKING_KEY_STATE 66
 					/* arg1: scancode (in same format
 					 *	 as inputaction_keyDown);
 					 *	 MUST refer to one of the
@@ -58,22 +55,20 @@ enum ultra_inputaction {
 					 *	 in the LOCKED position
 					 *	 (e.g., light is ON)
 					 */
-	inputaction_key_down_up = 67,	/* arg1: scancode (in same format
+#define INPUTACTION_KEY_DOWN_UP 67	/* arg1: scancode (in same format
 					 *	 as inputaction_keyDown)
 					 */
-	inputaction_last
-};
 
-struct ultra_inputactivity {
+struct visor_inputactivity {
 	u16 action;
 	u16 arg1;
 	u16 arg2;
 	u16 arg3;
 } __packed;
 
-struct ultra_inputreport {
+struct visor_inputreport {
 	u64 seq_no;
-	struct ultra_inputactivity activity;
+	struct visor_inputactivity activity;
 } __packed;
 
 #endif
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index cdd3543..45bc340 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -33,17 +33,16 @@
 #include "ultrainputreport.h"
 
 /* Keyboard channel {c73416d0-b0b8-44af-b304-9d2ae99f1b3d} */
-#define SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID \
+#define VISOR_KEYBOARD_CHANNEL_UUID \
 	UUID_LE(0xc73416d0, 0xb0b8, 0x44af, \
 		0xb3, 0x4, 0x9d, 0x2a, 0xe9, 0x9f, 0x1b, 0x3d)
-#define SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID_STR "c73416d0-b0b8-44af-b304-9d2ae99f1b3d"
+#define VISOR_KEYBOARD_CHANNEL_UUID_STR "c73416d0-b0b8-44af-b304-9d2ae99f1b3d"
 
 /* Mouse channel {addf07d4-94a9-46e2-81c3-61abcdbdbd87} */
-#define SPAR_MOUSE_CHANNEL_PROTOCOL_UUID \
+#define VISOR_MOUSE_CHANNEL_UUID \
 	UUID_LE(0xaddf07d4, 0x94a9, 0x46e2, \
 		0x81, 0xc3, 0x61, 0xab, 0xcd, 0xbd, 0xbd, 0x87)
-#define SPAR_MOUSE_CHANNEL_PROTOCOL_UUID_STR \
-	"addf07d4-94a9-46e2-81c3-61abcdbdbd87"
+#define VISOR_MOUSE_CHANNEL_UUID_STR "addf07d4-94a9-46e2-81c3-61abcdbdbd87"
 
 #define PIXELS_ACROSS_DEFAULT 800
 #define PIXELS_DOWN_DEFAULT   600
@@ -70,10 +69,8 @@ struct visorinput_devdata {
 	unsigned char keycode_table[0];
 };
 
-static const uuid_le spar_keyboard_channel_protocol_uuid =
-	SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID;
-static const uuid_le spar_mouse_channel_protocol_uuid =
-	SPAR_MOUSE_CHANNEL_PROTOCOL_UUID;
+static const uuid_le visor_keyboard_channel_uuid = VISOR_KEYBOARD_CHANNEL_UUID;
+static const uuid_le visor_mouse_channel_uuid = VISOR_MOUSE_CHANNEL_UUID;
 
 /*
  * Borrowed from drivers/input/keyboard/atakbd.c
@@ -456,9 +453,9 @@ visorinput_probe(struct visor_device *dev)
 	enum visorinput_device_type devtype;
 
 	guid = visorchannel_get_uuid(dev->visorchannel);
-	if (uuid_le_cmp(guid, spar_mouse_channel_protocol_uuid) == 0)
+	if (uuid_le_cmp(guid, visor_mouse_channel_uuid) == 0)
 		devtype = visorinput_mouse;
-	else if (uuid_le_cmp(guid, spar_keyboard_channel_protocol_uuid) == 0)
+	else if (uuid_le_cmp(guid, visor_keyboard_channel_uuid) == 0)
 		devtype = visorinput_keyboard;
 	else
 		return -ENODEV;
@@ -568,7 +565,7 @@ calc_button(int x)
 static void
 visorinput_channel_interrupt(struct visor_device *dev)
 {
-	struct ultra_inputreport r;
+	struct visor_inputreport r;
 	int scancode, keycode;
 	struct input_dev *visorinput_dev;
 	int xmotion, ymotion, button;
@@ -585,46 +582,46 @@ visorinput_channel_interrupt(struct visor_device *dev)
 		scancode = r.activity.arg1;
 		keycode = scancode_to_keycode(scancode);
 		switch (r.activity.action) {
-		case inputaction_key_down:
+		case INPUTACTION_KEY_DOWN:
 			input_report_key(visorinput_dev, keycode, 1);
 			input_sync(visorinput_dev);
 			break;
-		case inputaction_key_up:
+		case INPUTACTION_KEY_UP:
 			input_report_key(visorinput_dev, keycode, 0);
 			input_sync(visorinput_dev);
 			break;
-		case inputaction_key_down_up:
+		case INPUTACTION_KEY_DOWN_UP:
 			input_report_key(visorinput_dev, keycode, 1);
 			input_sync(visorinput_dev);
 			input_report_key(visorinput_dev, keycode, 0);
 			input_sync(visorinput_dev);
 			break;
-		case inputaction_set_locking_key_state:
+		case INPUTACTION_SET_LOCKING_KEY_STATE:
 			handle_locking_key(visorinput_dev, keycode,
 					   r.activity.arg2);
 			break;
-		case inputaction_xy_motion:
+		case INPUTACTION_XY_MOTION:
 			xmotion = r.activity.arg1;
 			ymotion = r.activity.arg2;
 			input_report_abs(visorinput_dev, ABS_X, xmotion);
 			input_report_abs(visorinput_dev, ABS_Y, ymotion);
 			input_sync(visorinput_dev);
 			break;
-		case inputaction_mouse_button_down:
+		case INPUTACTION_MOUSE_BUTTON_DOWN:
 			button = calc_button(r.activity.arg1);
 			if (button < 0)
 				break;
 			input_report_key(visorinput_dev, button, 1);
 			input_sync(visorinput_dev);
 			break;
-		case inputaction_mouse_button_up:
+		case INPUTACTION_MOUSE_BUTTON_UP:
 			button = calc_button(r.activity.arg1);
 			if (button < 0)
 				break;
 			input_report_key(visorinput_dev, button, 0);
 			input_sync(visorinput_dev);
 			break;
-		case inputaction_mouse_button_click:
+		case INPUTACTION_MOUSE_BUTTON_CLICK:
 			button = calc_button(r.activity.arg1);
 			if (button < 0)
 				break;
@@ -634,7 +631,7 @@ visorinput_channel_interrupt(struct visor_device *dev)
 			input_report_key(visorinput_dev, button, 0);
 			input_sync(visorinput_dev);
 			break;
-		case inputaction_mouse_button_dclick:
+		case INPUTACTION_MOUSE_BUTTON_DCLICK:
 			button = calc_button(r.activity.arg1);
 			if (button < 0)
 				break;
@@ -645,11 +642,11 @@ visorinput_channel_interrupt(struct visor_device *dev)
 				input_sync(visorinput_dev);
 			}
 			break;
-		case inputaction_wheel_rotate_away:
+		case INPUTACTION_WHEEL_ROTATE_AWAY:
 			input_report_rel(visorinput_dev, REL_WHEEL, 1);
 			input_sync(visorinput_dev);
 			break;
-		case inputaction_wheel_rotate_toward:
+		case INPUTACTION_WHEEL_ROTATE_TOWARD:
 			input_report_rel(visorinput_dev, REL_WHEEL, -1);
 			input_sync(visorinput_dev);
 			break;
@@ -730,8 +727,8 @@ visorinput_resume(struct visor_device *dev,
 
 /* GUIDS for all channel types supported by this driver. */
 static struct visor_channeltype_descriptor visorinput_channel_types[] = {
-	{ SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID, "keyboard"},
-	{ SPAR_MOUSE_CHANNEL_PROTOCOL_UUID, "mouse"},
+	{ VISOR_KEYBOARD_CHANNEL_UUID, "keyboard"},
+	{ VISOR_MOUSE_CHANNEL_UUID, "mouse"},
 	{ NULL_UUID_LE, NULL }
 };
 
@@ -767,5 +764,5 @@ MODULE_AUTHOR("Unisys");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("s-Par human input driver for virtual keyboard/mouse");
 
-MODULE_ALIAS("visorbus:" SPAR_MOUSE_CHANNEL_PROTOCOL_UUID_STR);
-MODULE_ALIAS("visorbus:" SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID_STR);
+MODULE_ALIAS("visorbus:" VISOR_MOUSE_CHANNEL_UUID_STR);
+MODULE_ALIAS("visorbus:" VISOR_KEYBOARD_CHANNEL_UUID_STR);
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index adebf22..2891622 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -39,9 +39,9 @@
 /* GUIDS for director channel type supported by this driver.  */
 static struct visor_channeltype_descriptor visornic_channel_types[] = {
 	/* Note that the only channel type we expect to be reported by the
-	 * bus driver is the SPAR_VNIC channel.
+	 * bus driver is the VISOR_VNIC channel.
 	 */
-	{ SPAR_VNIC_CHANNEL_PROTOCOL_UUID, "ultravnic" },
+	{ VISOR_VNIC_CHANNEL_UUID, "ultravnic" },
 	{ NULL_UUID_LE, NULL }
 };
 MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
@@ -52,7 +52,7 @@ MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
  * must be added to scripts/mode/file2alias.c, etc., to get this working
  * properly.
  */
-MODULE_ALIAS("visorbus:" SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR);
+MODULE_ALIAS("visorbus:" VISOR_VNIC_CHANNEL_UUID_STR);
 
 struct chanstat {
 	unsigned long got_rcv;
@@ -1807,8 +1807,7 @@ static int visornic_probe(struct visor_device *dev)
 
 	/* Get MAC address from channel and read it into the device. */
 	netdev->addr_len = ETH_ALEN;
-	channel_offset = offsetof(struct spar_io_channel_protocol,
-				  vnic.macaddr);
+	channel_offset = offsetof(struct visor_io_channel, vnic.macaddr);
 	err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
 				    ETH_ALEN);
 	if (err < 0) {
@@ -1836,8 +1835,7 @@ static int visornic_probe(struct visor_device *dev)
 	atomic_set(&devdata->usage, 1);
 
 	/* Setup rcv bufs */
-	channel_offset = offsetof(struct spar_io_channel_protocol,
-				  vnic.num_rcv_bufs);
+	channel_offset = offsetof(struct visor_io_channel, vnic.num_rcv_bufs);
 	err = visorbus_read_channel(dev, channel_offset,
 				    &devdata->num_rcv_bufs, 4);
 	if (err) {
@@ -1884,8 +1882,7 @@ static int visornic_probe(struct visor_device *dev)
 	devdata->server_change_state = false;
 
 	/*set the default mtu */
-	channel_offset = offsetof(struct spar_io_channel_protocol,
-				  vnic.mtu);
+	channel_offset = offsetof(struct visor_io_channel, vnic.mtu);
 	err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
 	if (err) {
 		dev_err(&dev->device,
@@ -1906,7 +1903,7 @@ static int visornic_probe(struct visor_device *dev)
 	 */
 	mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
 
-	channel_offset = offsetof(struct spar_io_channel_protocol,
+	channel_offset = offsetof(struct visor_io_channel,
 				  channel_header.features);
 	err = visorbus_read_channel(dev, channel_offset, &features, 8);
 	if (err) {
@@ -1916,8 +1913,8 @@ static int visornic_probe(struct visor_device *dev)
 		goto cleanup_napi_add;
 	}
 
-	features |= ULTRA_IO_CHANNEL_IS_POLLING;
-	features |= ULTRA_IO_DRIVER_SUPPORTS_ENHANCED_RCVBUF_CHECKING;
+	features |= VISOR_CHANNEL_IS_POLLING;
+	features |= VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING;
 	err = visorbus_write_channel(dev, channel_offset, &features, 8);
 	if (err) {
 		dev_err(&dev->device,
@@ -2115,7 +2112,7 @@ static int visornic_resume(struct visor_device *dev,
 	return 0;
 }
 
-/* This is used to tell the visor bus driver which types of visor devices
+/* This is used to tell the visorbus driver which types of visor devices
  * we support, and what functions to call when a visor device that we support
  * is attached or removed.
  */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index d04db3f..0159ca4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -153,7 +153,6 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
 		MAX_FRAGMENTS;
 
 	g_fragments_base = (char *)slot_mem + slot_mem_size;
-	slot_mem_size += frag_mem_size;
 
 	g_free_fragments = g_fragments_base;
 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
@@ -365,7 +364,7 @@ vchiq_doorbell_irq(int irq, void *dev_id)
 }
 
 static void
-cleaup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
+cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
 {
 	if (pagelistinfo->scatterlist_mapped) {
 		dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
@@ -460,6 +459,11 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 								 PAGE_SIZE));
 			size_t bytes = PAGE_SIZE - off;
 
+			if (!pg) {
+				cleanup_pagelistinfo(pagelistinfo);
+				return NULL;
+			}
+
 			if (bytes > length)
 				bytes = length;
 			pages[actual_pages] = pg;
@@ -470,7 +474,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 	} else {
 		down_read(&task->mm->mmap_sem);
 		actual_pages = get_user_pages(
-				          (unsigned long)buf & ~(PAGE_SIZE - 1),
+					  (unsigned long)buf & PAGE_MASK,
 					  num_pages,
 					  (type == PAGELIST_READ) ? FOLL_WRITE : 0,
 					  pages,
@@ -489,7 +493,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 				actual_pages--;
 				put_page(pages[actual_pages]);
 			}
-			cleaup_pagelistinfo(pagelistinfo);
+			cleanup_pagelistinfo(pagelistinfo);
 			return NULL;
 		}
 		 /* release user pages */
@@ -518,7 +522,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 				 pagelistinfo->dma_dir);
 
 	if (dma_buffers == 0) {
-		cleaup_pagelistinfo(pagelistinfo);
+		cleanup_pagelistinfo(pagelistinfo);
 		return NULL;
 	}
 
@@ -555,7 +559,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 		char *fragments;
 
 		if (down_interruptible(&g_free_fragments_sema) != 0) {
-			cleaup_pagelistinfo(pagelistinfo);
+			cleanup_pagelistinfo(pagelistinfo);
 			return NULL;
 		}
 
@@ -577,7 +581,6 @@ static void
 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
 	      int actual)
 {
-	unsigned int i;
 	PAGELIST_T *pagelist   = pagelistinfo->pagelist;
 	struct page **pages    = pagelistinfo->pages;
 	unsigned int num_pages = pagelistinfo->num_pages;
@@ -633,9 +636,11 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
 	/* Need to mark all the pages dirty. */
 	if (pagelist->type != PAGELIST_WRITE &&
 	    pagelistinfo->pages_need_release) {
+		unsigned int i;
+
 		for (i = 0; i < num_pages; i++)
 			set_page_dirty(pages[i]);
 	}
 
-	cleaup_pagelistinfo(pagelistinfo);
+	cleanup_pagelistinfo(pagelistinfo);
 }
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index e823f1d..030bec8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -2070,7 +2070,7 @@ dump_phys_mem(void *virt_addr, u32 num_bytes)
 	struct page  **pages;
 	u8            *kmapped_virt_ptr;
 
-	/* Align virtAddr and endVirtAddr to 16 byte boundaries. */
+	/* Align virt_addr and end_virt_addr to 16 byte boundaries. */
 
 	virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
 	end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
@@ -3276,12 +3276,12 @@ vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
 		if (only_nonzero && !service_ptr->service_use_count)
 			continue;
 
-		if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
-			service_data[j].fourcc = service_ptr->base.fourcc;
-			service_data[j].clientid = service_ptr->client_id;
-			service_data[j++].use_count = service_ptr->
-							service_use_count;
-		}
+		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
+			continue;
+
+		service_data[j].fourcc = service_ptr->base.fourcc;
+		service_data[j].clientid = service_ptr->client_id;
+		service_data[j++].use_count = service_ptr->service_use_count;
 	}
 
 	read_unlock_bh(&arm_state->susp_res_lock);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 4f9e738..486be99 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -175,7 +175,7 @@ find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
 	service = handle_to_service(handle);
 	if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
 		(service->handle == handle)) {
-		BUG_ON(service->ref_count == 0);
+		WARN_ON(service->ref_count == 0);
 		service->ref_count++;
 	} else
 		service = NULL;
@@ -197,7 +197,7 @@ find_service_by_port(VCHIQ_STATE_T *state, int localport)
 		spin_lock(&service_spinlock);
 		service = state->services[localport];
 		if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
-			BUG_ON(service->ref_count == 0);
+			WARN_ON(service->ref_count == 0);
 			service->ref_count++;
 		} else
 			service = NULL;
@@ -221,7 +221,7 @@ find_service_for_instance(VCHIQ_INSTANCE_T instance,
 	if (service && (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
 		(service->handle == handle) &&
 		(service->instance == instance)) {
-		BUG_ON(service->ref_count == 0);
+		WARN_ON(service->ref_count == 0);
 		service->ref_count++;
 	} else
 		service = NULL;
@@ -246,7 +246,7 @@ find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
 		 (service->srvstate == VCHIQ_SRVSTATE_CLOSED)) &&
 		(service->handle == handle) &&
 		(service->instance == instance)) {
-		BUG_ON(service->ref_count == 0);
+		WARN_ON(service->ref_count == 0);
 		service->ref_count++;
 	} else
 		service = NULL;
@@ -273,7 +273,7 @@ next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
 		if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
 			(srv->instance == instance)) {
 			service = srv;
-			BUG_ON(service->ref_count == 0);
+			WARN_ON(service->ref_count == 0);
 			service->ref_count++;
 			break;
 		}
@@ -289,9 +289,11 @@ void
 lock_service(VCHIQ_SERVICE_T *service)
 {
 	spin_lock(&service_spinlock);
-	BUG_ON(!service || (service->ref_count == 0));
-	if (service)
+	WARN_ON(!service);
+	if (service) {
+		WARN_ON(service->ref_count == 0);
 		service->ref_count++;
+	}
 	spin_unlock(&service_spinlock);
 }
 
@@ -299,17 +301,24 @@ void
 unlock_service(VCHIQ_SERVICE_T *service)
 {
 	spin_lock(&service_spinlock);
-	BUG_ON(!service || (service->ref_count == 0));
-	if (service && service->ref_count) {
-		service->ref_count--;
-		if (!service->ref_count) {
-			VCHIQ_STATE_T *state = service->state;
-
-			BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
-			state->services[service->localport] = NULL;
-		} else
-			service = NULL;
+	if (!service) {
+		WARN(1, "%s: service is NULL\n", __func__);
+		goto unlock;
 	}
+	if (!service->ref_count) {
+		WARN(1, "%s: ref_count is zero\n", __func__);
+		goto unlock;
+	}
+	service->ref_count--;
+	if (!service->ref_count) {
+		VCHIQ_STATE_T *state = service->state;
+
+		WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
+		state->services[service->localport] = NULL;
+	} else {
+		service = NULL;
+	}
+unlock:
 	spin_unlock(&service_spinlock);
 
 	if (service && service->userdata_term)
@@ -591,8 +600,10 @@ reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking)
 				return NULL; /* No space available */
 		}
 
-		BUG_ON(tx_pos ==
-			(state->slot_queue_available * VCHIQ_SLOT_SIZE));
+		if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
+			pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
+			return NULL;
+		}
 
 		slot_index = local->slot_queue[
 			SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
@@ -822,9 +833,14 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 	if (type == VCHIQ_MSG_DATA) {
 		int tx_end_index;
 
-		BUG_ON(!service);
-		BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
-				 QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
+		if (!service) {
+			WARN(1, "%s: service is NULL\n", __func__);
+			mutex_unlock(&state->slot_mutex);
+			return VCHIQ_ERROR;
+		}
+
+		WARN_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
+				  QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
 
 		if (service->closing) {
 			/* The service has been closed */
@@ -923,9 +939,8 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 			header, size, VCHIQ_MSG_SRCPORT(msgid),
 			VCHIQ_MSG_DSTPORT(msgid));
 
-		BUG_ON(!service);
-		BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
-				 QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
+		WARN_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
+				  QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
 
 		callback_result =
 			copy_message_data(copy_callback, context,
@@ -1376,7 +1391,6 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
 {
 	VCHIQ_STATE_T *state = service->state;
 	int resolved = 0;
-	int rc;
 
 	while ((queue->process != queue->local_insert) &&
 		(queue->process != queue->remote_insert)) {
@@ -1392,8 +1406,7 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
 		WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
 		WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
 
-		rc = mutex_lock_killable(&state->bulk_transfer_mutex);
-		if (rc != 0)
+		if (mutex_lock_killable(&state->bulk_transfer_mutex))
 			break;
 
 		vchiq_transfer_bulk(bulk);
@@ -1952,9 +1965,14 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 					mutex_unlock(&service->bulk_mutex);
 					break;
 				}
-
-				BUG_ON(queue->process == queue->local_insert);
-				BUG_ON(queue->process != queue->remote_insert);
+				if (queue->process != queue->remote_insert) {
+					pr_err("%s: p %x != ri %x\n",
+					       __func__,
+					       queue->process,
+					       queue->remote_insert);
+					mutex_unlock(&service->bulk_mutex);
+					goto bail_not_ready;
+				}
 
 				bulk = &queue->bulks[
 					BULK_INDEX(queue->remote_insert)];
@@ -2139,7 +2157,6 @@ slot_handler_func(void *v)
 					vchiq_log_error(vchiq_core_log_level,
 						"Failed to send RESUME "
 						"message");
-					BUG();
 				}
 				break;
 
@@ -2351,13 +2368,17 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 	VCHIQ_SHARED_STATE_T *remote;
 	VCHIQ_STATUS_T status;
 	char threadname[16];
-	static int id;
 	int i;
 
 	vchiq_log_warning(vchiq_core_log_level,
 		"%s: slot_zero = %pK, is_master = %d",
 		__func__, slot_zero, is_master);
 
+	if (vchiq_states[0]) {
+		pr_err("%s: VCHIQ state already initialized\n", __func__);
+		return VCHIQ_ERROR;
+	}
+
 	/* Check the input configuration */
 
 	if (slot_zero->magic != VCHIQ_MAGIC) {
@@ -2439,7 +2460,6 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 
 	memset(state, 0, sizeof(VCHIQ_STATE_T));
 
-	state->id = id++;
 	state->is_master = is_master;
 
 	/*
@@ -2558,8 +2578,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 	set_user_nice(state->sync_thread, -20);
 	wake_up_process(state->sync_thread);
 
-	BUG_ON(state->id >= VCHIQ_MAX_STATES);
-	vchiq_states[state->id] = state;
+	vchiq_states[0] = state;
 
 	/* Indicate readiness to the other side */
 	local->initialised = 1;
@@ -3185,7 +3204,7 @@ vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
 	if (current == service->state->slot_handler_thread) {
 		status = vchiq_close_service_internal(service,
 			0/*!close_recvd*/);
-		BUG_ON(status == VCHIQ_RETRY);
+		WARN_ON(status == VCHIQ_RETRY);
 	} else {
 	/* Mark the service for termination by the slot handler */
 		request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
@@ -3247,7 +3266,7 @@ vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
 
 		status = vchiq_close_service_internal(service,
 			0/*!close_recvd*/);
-		BUG_ON(status == VCHIQ_RETRY);
+		WARN_ON(status == VCHIQ_RETRY);
 	} else {
 		/* Mark the service for removal by the slot handler */
 		request_poll(service->state, service, VCHIQ_POLL_REMOVE);
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
index 5577df3..ac4a4ba 100644
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -68,38 +68,38 @@ static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
 					2, 2, 2, 2, 2, 2, 2, 2,
 					3, 3, 3, 3, 3, 3, 3, 3 };
 
-#define PIO2_CHANNEL0_BIT		(1 << 0)
-#define PIO2_CHANNEL1_BIT		(1 << 1)
-#define PIO2_CHANNEL2_BIT		(1 << 2)
-#define PIO2_CHANNEL3_BIT		(1 << 3)
-#define PIO2_CHANNEL4_BIT		(1 << 4)
-#define PIO2_CHANNEL5_BIT		(1 << 5)
-#define PIO2_CHANNEL6_BIT		(1 << 6)
-#define PIO2_CHANNEL7_BIT		(1 << 7)
-#define PIO2_CHANNEL8_BIT		(1 << 0)
-#define PIO2_CHANNEL9_BIT		(1 << 1)
-#define PIO2_CHANNEL10_BIT		(1 << 2)
-#define PIO2_CHANNEL11_BIT		(1 << 3)
-#define PIO2_CHANNEL12_BIT		(1 << 4)
-#define PIO2_CHANNEL13_BIT		(1 << 5)
-#define PIO2_CHANNEL14_BIT		(1 << 6)
-#define PIO2_CHANNEL15_BIT		(1 << 7)
-#define PIO2_CHANNEL16_BIT		(1 << 0)
-#define PIO2_CHANNEL17_BIT		(1 << 1)
-#define PIO2_CHANNEL18_BIT		(1 << 2)
-#define PIO2_CHANNEL19_BIT		(1 << 3)
-#define PIO2_CHANNEL20_BIT		(1 << 4)
-#define PIO2_CHANNEL21_BIT		(1 << 5)
-#define PIO2_CHANNEL22_BIT		(1 << 6)
-#define PIO2_CHANNEL23_BIT		(1 << 7)
-#define PIO2_CHANNEL24_BIT		(1 << 0)
-#define PIO2_CHANNEL25_BIT		(1 << 1)
-#define PIO2_CHANNEL26_BIT		(1 << 2)
-#define PIO2_CHANNEL27_BIT		(1 << 3)
-#define PIO2_CHANNEL28_BIT		(1 << 4)
-#define PIO2_CHANNEL29_BIT		(1 << 5)
-#define PIO2_CHANNEL30_BIT		(1 << 6)
-#define PIO2_CHANNEL31_BIT		(1 << 7)
+#define PIO2_CHANNEL0_BIT		BIT(0)
+#define PIO2_CHANNEL1_BIT		BIT(1)
+#define PIO2_CHANNEL2_BIT		BIT(2)
+#define PIO2_CHANNEL3_BIT		BIT(3)
+#define PIO2_CHANNEL4_BIT		BIT(4)
+#define PIO2_CHANNEL5_BIT		BIT(5)
+#define PIO2_CHANNEL6_BIT		BIT(6)
+#define PIO2_CHANNEL7_BIT		BIT(7)
+#define PIO2_CHANNEL8_BIT		BIT(0)
+#define PIO2_CHANNEL9_BIT		BIT(1)
+#define PIO2_CHANNEL10_BIT		BIT(2)
+#define PIO2_CHANNEL11_BIT		BIT(3)
+#define PIO2_CHANNEL12_BIT		BIT(4)
+#define PIO2_CHANNEL13_BIT		BIT(5)
+#define PIO2_CHANNEL14_BIT		BIT(6)
+#define PIO2_CHANNEL15_BIT		BIT(7)
+#define PIO2_CHANNEL16_BIT		BIT(0)
+#define PIO2_CHANNEL17_BIT		BIT(1)
+#define PIO2_CHANNEL18_BIT		BIT(2)
+#define PIO2_CHANNEL19_BIT		BIT(3)
+#define PIO2_CHANNEL20_BIT		BIT(4)
+#define PIO2_CHANNEL21_BIT		BIT(5)
+#define PIO2_CHANNEL22_BIT		BIT(6)
+#define PIO2_CHANNEL23_BIT		BIT(7)
+#define PIO2_CHANNEL24_BIT		BIT(0)
+#define PIO2_CHANNEL25_BIT		BIT(1)
+#define PIO2_CHANNEL26_BIT		BIT(2)
+#define PIO2_CHANNEL27_BIT		BIT(3)
+#define PIO2_CHANNEL28_BIT		BIT(4)
+#define PIO2_CHANNEL29_BIT		BIT(5)
+#define PIO2_CHANNEL30_BIT		BIT(6)
+#define PIO2_CHANNEL31_BIT		BIT(7)
 
 static const int PIO2_CHANNEL_BIT[32] = { PIO2_CHANNEL0_BIT, PIO2_CHANNEL1_BIT,
 					PIO2_CHANNEL2_BIT, PIO2_CHANNEL3_BIT,
@@ -120,12 +120,12 @@ static const int PIO2_CHANNEL_BIT[32] = { PIO2_CHANNEL0_BIT, PIO2_CHANNEL1_BIT,
 					};
 
 /* PIO2_REGS_INT_STAT_CNTR (0xc) */
-#define PIO2_COUNTER0			(1 << 0)
-#define PIO2_COUNTER1			(1 << 1)
-#define PIO2_COUNTER2			(1 << 2)
-#define PIO2_COUNTER3			(1 << 3)
-#define PIO2_COUNTER4			(1 << 4)
-#define PIO2_COUNTER5			(1 << 5)
+#define PIO2_COUNTER0			BIT(0)
+#define PIO2_COUNTER1			BIT(1)
+#define PIO2_COUNTER2			BIT(2)
+#define PIO2_COUNTER3			BIT(3)
+#define PIO2_COUNTER4			BIT(4)
+#define PIO2_COUNTER5			BIT(5)
 
 static const int PIO2_COUNTER[6] = { PIO2_COUNTER0, PIO2_COUNTER1,
 					PIO2_COUNTER2, PIO2_COUNTER3,
@@ -133,8 +133,8 @@ static const int PIO2_COUNTER[6] = { PIO2_COUNTER0, PIO2_COUNTER1,
 
 /* PIO2_REGS_CTRL (0x18) */
 #define PIO2_VME_INT_MASK		0x7
-#define PIO2_LED			(1 << 6)
-#define PIO2_LOOP			(1 << 7)
+#define PIO2_LED			BIT(6)
+#define PIO2_LOOP			BIT(7)
 
 /* PIO2_REGS_VME_VECTOR (0x19) */
 #define PIO2_VME_VECTOR_SPUR		0x0
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 5463cf8..f5db2b3 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -913,7 +913,7 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
 {
 	unsigned short wRxBcnTSFOffst;
 
-	wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate%MAX_RATE];
+	wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate % MAX_RATE];
 
 	qwTSF2 += (u64)wRxBcnTSFOffst;
 
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 44420b5..1a04dbb 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -63,26 +63,26 @@ typedef enum _CARD_STATUS_TYPE {
 
 struct vnt_private;
 
-void CARDvSetRSPINF(struct vnt_private *, u8);
-void CARDvUpdateBasicTopRate(struct vnt_private *);
-bool CARDbIsOFDMinBasicRate(struct vnt_private *);
-void CARDvSetLoopbackMode(struct vnt_private *, unsigned short wLoopbackMode);
-bool CARDbSoftwareReset(struct vnt_private *);
-void CARDvSetFirstNextTBTT(struct vnt_private *,
+void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type);
+void CARDvUpdateBasicTopRate(struct vnt_private *priv);
+bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
+void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode);
+bool CARDbSoftwareReset(struct vnt_private *priv);
+void CARDvSetFirstNextTBTT(struct vnt_private *priv,
 			   unsigned short wBeaconInterval);
-void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF,
+void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
 			 unsigned short wBeaconInterval);
-bool CARDbGetCurrentTSF(struct vnt_private *, u64 *pqwCurrTSF);
+bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF);
 u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
 u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
-unsigned char CARDbyGetPktType(struct vnt_private *);
-void CARDvSafeResetTx(struct vnt_private *);
-void CARDvSafeResetRx(struct vnt_private *);
-bool CARDbRadioPowerOff(struct vnt_private *);
-bool CARDbRadioPowerOn(struct vnt_private *);
-bool CARDbSetPhyParameter(struct vnt_private *, u8);
-bool CARDbUpdateTSF(struct vnt_private *, unsigned char byRxRate,
+unsigned char CARDbyGetPktType(struct vnt_private *priv);
+void CARDvSafeResetTx(struct vnt_private *priv);
+void CARDvSafeResetRx(struct vnt_private *priv);
+bool CARDbRadioPowerOff(struct vnt_private *priv);
+bool CARDbRadioPowerOn(struct vnt_private *priv);
+bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type);
+bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
 		    u64 qwBSSTimestamp);
-bool CARDbSetBeaconPeriod(struct vnt_private *, unsigned short wBeaconInterval);
+bool CARDbSetBeaconPeriod(struct vnt_private *priv, unsigned short wBeaconInterval);
 
 #endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 2621dfa..8fe7076 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -21,8 +21,8 @@
 
 #include "card.h"
 
-void vnt_init_bands(struct vnt_private *);
+void vnt_init_bands(struct vnt_private *priv);
 
-bool set_channel(struct vnt_private *, struct ieee80211_channel *);
+bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch);
 
 #endif /* _CHANNEL_H_ */
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index da0f711..9fcf2e2 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -157,7 +157,7 @@ static void vt6655_remove(struct pci_dev *pcid)
 {
 	struct vnt_private *priv = pci_get_drvdata(pcid);
 
-	if (priv == NULL)
+	if (!priv)
 		return;
 	device_free_info(priv);
 }
@@ -453,7 +453,7 @@ static bool device_init_rings(struct vnt_private *priv)
 				       priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
 				       priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
 				       &priv->pool_dma, GFP_ATOMIC);
-	if (vir_pool == NULL) {
+	if (!vir_pool) {
 		dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
 		return false;
 	}
@@ -1018,7 +1018,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
 			}
 
 			/* TODO: adhoc PS mode */
-
 		}
 
 		if (isr & ISR_BNTX) {
@@ -1311,8 +1310,8 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
 }
 
 static void vnt_bss_info_changed(struct ieee80211_hw *hw,
-		struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf,
-		u32 changed)
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_bss_conf *conf, u32 changed)
 {
 	struct vnt_private *priv = hw->priv;
 
@@ -1402,7 +1401,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 }
 
 static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
-	struct netdev_hw_addr_list *mc_list)
+				 struct netdev_hw_addr_list *mc_list)
 {
 	struct vnt_private *priv = hw->priv;
 	struct netdev_hw_addr *ha;
@@ -1421,7 +1420,8 @@ static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
 }
 
 static void vnt_configure(struct ieee80211_hw *hw,
-	unsigned int changed_flags, unsigned int *total_flags, u64 multicast)
+			  unsigned int changed_flags,
+			  unsigned int *total_flags, u64 multicast)
 {
 	struct vnt_private *priv = hw->priv;
 	u8 rx_mode = 0;
@@ -1482,8 +1482,8 @@ static void vnt_configure(struct ieee80211_hw *hw,
 }
 
 static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-	struct ieee80211_vif *vif, struct ieee80211_sta *sta,
-		struct ieee80211_key_conf *key)
+		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		       struct ieee80211_key_conf *key)
 {
 	struct vnt_private *priv = hw->priv;
 
@@ -1702,7 +1702,6 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
 
 static int vt6655_resume(struct pci_dev *pcid)
 {
-
 	pci_set_power_state(pcid, PCI_D0);
 	pci_enable_wake(pcid, PCI_D0, 0);
 	pci_restore_state(pcid);
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index dad9e29..d7ede73 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -27,8 +27,8 @@
 #include "mac.h"
 
 static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
-	struct ieee80211_key_conf *key, u32 key_type, u32 mode,
-	bool onfly_latch)
+			   struct ieee80211_key_conf *key, u32 key_type,
+			   u32 mode, bool onfly_latch)
 {
 	struct vnt_private *priv = hw->priv;
 	u8 broadcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 33b758c..db401e3 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -885,57 +885,57 @@ do {								\
 #define MACvSetRFLE_LatchBase(iobase)                                 \
 	MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
 
-bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs,
+bool MACbIsRegBitsOn(struct vnt_private *priv, unsigned char byRegOfs,
 		     unsigned char byTestBits);
-bool MACbIsRegBitsOff(struct vnt_private *, unsigned char byRegOfs,
+bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
 		      unsigned char byTestBits);
 
-bool MACbIsIntDisable(struct vnt_private *);
+bool MACbIsIntDisable(struct vnt_private *priv);
 
-void MACvSetShortRetryLimit(struct vnt_private *, unsigned char byRetryLimit);
+void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit);
 
-void MACvSetLongRetryLimit(struct vnt_private *, unsigned char byRetryLimit);
-void MACvGetLongRetryLimit(struct vnt_private *,
+void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit);
+void MACvGetLongRetryLimit(struct vnt_private *priv,
 			   unsigned char *pbyRetryLimit);
 
-void MACvSetLoopbackMode(struct vnt_private *, unsigned char byLoopbackMode);
+void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode);
 
-void MACvSaveContext(struct vnt_private *, unsigned char *pbyCxtBuf);
-void MACvRestoreContext(struct vnt_private *, unsigned char *pbyCxtBuf);
+void MACvSaveContext(struct vnt_private *priv, unsigned char *pbyCxtBuf);
+void MACvRestoreContext(struct vnt_private *priv, unsigned char *pbyCxtBuf);
 
-bool MACbSoftwareReset(struct vnt_private *);
-bool MACbSafeSoftwareReset(struct vnt_private *);
-bool MACbSafeRxOff(struct vnt_private *);
-bool MACbSafeTxOff(struct vnt_private *);
-bool MACbSafeStop(struct vnt_private *);
-bool MACbShutdown(struct vnt_private *);
-void MACvInitialize(struct vnt_private *);
-void MACvSetCurrRx0DescAddr(struct vnt_private *,
+bool MACbSoftwareReset(struct vnt_private *priv);
+bool MACbSafeSoftwareReset(struct vnt_private *priv);
+bool MACbSafeRxOff(struct vnt_private *priv);
+bool MACbSafeTxOff(struct vnt_private *priv);
+bool MACbSafeStop(struct vnt_private *priv);
+bool MACbShutdown(struct vnt_private *priv);
+void MACvInitialize(struct vnt_private *priv);
+void MACvSetCurrRx0DescAddr(struct vnt_private *priv,
 			    u32 curr_desc_addr);
-void MACvSetCurrRx1DescAddr(struct vnt_private *,
+void MACvSetCurrRx1DescAddr(struct vnt_private *priv,
 			    u32 curr_desc_addr);
-void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *,
+void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *priv,
 			   u32 curr_desc_addr);
-void MACvSetCurrTx0DescAddrEx(struct vnt_private *,
+void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
 			      u32 curr_desc_addr);
-void MACvSetCurrAC0DescAddrEx(struct vnt_private *,
+void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
 			      u32 curr_desc_addr);
-void MACvSetCurrSyncDescAddrEx(struct vnt_private *,
+void MACvSetCurrSyncDescAddrEx(struct vnt_private *priv,
 			       u32 curr_desc_addr);
-void MACvSetCurrATIMDescAddrEx(struct vnt_private *,
+void MACvSetCurrATIMDescAddrEx(struct vnt_private *priv,
 			       u32 curr_desc_addr);
-void MACvTimer0MicroSDelay(struct vnt_private *, unsigned int uDelay);
-void MACvOneShotTimer1MicroSec(struct vnt_private *, unsigned int uDelayTime);
+void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay);
+void MACvOneShotTimer1MicroSec(struct vnt_private *priv, unsigned int uDelayTime);
 
-void MACvSetMISCFifo(struct vnt_private *, unsigned short wOffset,
+void MACvSetMISCFifo(struct vnt_private *priv, unsigned short wOffset,
 		     u32 dwData);
 
-bool MACbPSWakeup(struct vnt_private *);
+bool MACbPSWakeup(struct vnt_private *priv);
 
-void MACvSetKeyEntry(struct vnt_private *, unsigned short wKeyCtl,
+void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl,
 		     unsigned int uEntryIdx, unsigned int uKeyIdx,
 		     unsigned char *pbyAddr, u32 *pdwKey,
 		     unsigned char byLocalID);
-void MACvDisableKeyEntry(struct vnt_private *, unsigned int uEntryIdx);
+void MACvDisableKeyEntry(struct vnt_private *priv, unsigned int uEntryIdx);
 
 #endif /* __MAC_H__ */
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index dfcb0ca..f360c59 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -31,20 +31,10 @@
 #define PS_FAST_INTERVAL         1       /* Fast power saving listen interval */
 #define PS_MAX_INTERVAL          4       /* MAX power saving listen interval */
 
-void
-PSvDisablePowerSaving(
-	struct vnt_private *
-);
+void PSvDisablePowerSaving(struct vnt_private *priv);
 
-void
-PSvEnablePowerSaving(
-	struct vnt_private *,
-	unsigned short wListenInterval
-);
+void PSvEnablePowerSaving(struct vnt_private *priv, unsigned short wListenInterval);
 
-bool
-PSbIsNextTBTTWakeUp(
-	struct vnt_private *
-);
+bool PSbIsNextTBTTWakeUp(struct vnt_private *priv);
 
 #endif /* __POWER_H__ */
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 3760009..ba22230 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -68,28 +68,28 @@
 
 /*---------------------  Export Functions  --------------------------*/
 
-bool IFRFbWriteEmbedded(struct vnt_private *, unsigned long dwData);
-bool RFbSelectChannel(struct vnt_private *, unsigned char byRFType, u16);
+bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData);
+bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, u16 byChannel);
 bool RFbInit(
-	struct vnt_private *
+	struct vnt_private *priv
 );
-bool RFvWriteWakeProgSyn(struct vnt_private *, unsigned char byRFType, u16);
-bool RFbSetPower(struct vnt_private *, unsigned int rate, u16);
+bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, u16 uChannel);
+bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH);
 bool RFbRawSetPower(
-	struct vnt_private *,
+	struct vnt_private *priv,
 	unsigned char byPwr,
 	unsigned int rate
 );
 
 void
 RFvRSSITodBm(
-	struct vnt_private *,
+	struct vnt_private *priv,
 	unsigned char byCurrRSSI,
 	long    *pldBm
 );
 
 /* {{ RobertYu: 20050104 */
-bool RFbAL7230SelectChannelPostProcess(struct vnt_private *, u16, u16);
+bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv, u16 byOldChannel, u16 byNewChannel);
 /* }} RobertYu */
 
 #endif /* __RF_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 0e5a993..c61422e 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -359,35 +359,18 @@ void vnt_update_ifs(struct vnt_private *priv)
 		priv->sifs = C_SIFS_A;
 		priv->difs = C_SIFS_A + 2 * C_SLOT_SHORT;
 		max_min = 4;
-	} else if (priv->packet_type == PK_TYPE_11B) {
-		priv->slot = C_SLOT_LONG;
-		priv->sifs = C_SIFS_BG;
-		priv->difs = C_SIFS_BG + 2 * C_SLOT_LONG;
-		max_min = 5;
-	} else {/* PK_TYPE_11GA & PK_TYPE_11GB */
-		bool ofdm_rate = false;
-		unsigned int ii = 0;
-
+	} else {
 		priv->sifs = C_SIFS_BG;
 
-		if (priv->short_slot_time)
+		if (priv->short_slot_time) {
 			priv->slot = C_SLOT_SHORT;
-		else
+			max_min = 4;
+		} else {
 			priv->slot = C_SLOT_LONG;
-
-		priv->difs = C_SIFS_BG + 2 * priv->slot;
-
-		for (ii = RATE_54M; ii >= RATE_6M; ii--) {
-			if (priv->basic_rates & ((u32)(0x1 << ii))) {
-				ofdm_rate = true;
-				break;
-			}
+			max_min = 5;
 		}
 
-		if (ofdm_rate)
-			max_min = 4;
-		else
-			max_min = 5;
+		priv->difs = C_SIFS_BG + 2 * priv->slot;
 	}
 
 	priv->eifs = C_EIFS;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 028f54b..095b855 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -513,6 +513,9 @@ static int vnt_start(struct ieee80211_hw *hw)
 		goto free_all;
 	}
 
+	if (vnt_key_init_table(priv))
+		goto free_all;
+
 	priv->int_interval = 1;  /* bInterval is set to 1 */
 
 	vnt_int_start_interrupt(priv);
@@ -634,7 +637,6 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
 {
 	struct vnt_private *priv = hw->priv;
 	struct ieee80211_conf *conf = &hw->conf;
-	u8 bb_type;
 
 	if (changed & IEEE80211_CONF_CHANGE_PS) {
 		if (conf->flags & IEEE80211_CONF_PS)
@@ -648,15 +650,9 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
 		vnt_set_channel(priv, conf->chandef.chan->hw_value);
 
 		if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
-			bb_type = BB_TYPE_11A;
+			priv->bb_type = BB_TYPE_11A;
 		else
-			bb_type = BB_TYPE_11G;
-
-		if (priv->bb_type != bb_type) {
-			priv->bb_type = bb_type;
-
-			vnt_set_bss_mode(priv);
-		}
+			priv->bb_type = BB_TYPE_11G;
 	}
 
 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -687,6 +683,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 		priv->basic_rates = conf->basic_rates;
 
 		vnt_update_top_rates(priv);
+		vnt_set_bss_mode(priv);
 
 		dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates);
 	}
@@ -715,6 +712,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 			priv->short_slot_time = false;
 
 		vnt_set_short_slot_time(priv);
+		vnt_update_ifs(priv);
 		vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
 		vnt_update_pre_ed_threshold(priv, false);
 	}
@@ -846,7 +844,6 @@ static void vnt_sw_scan_start(struct ieee80211_hw *hw,
 {
 	struct vnt_private *priv = hw->priv;
 
-	vnt_set_bss_mode(priv);
 	/* Set max sensitivity*/
 	vnt_update_pre_ed_threshold(priv, true);
 }
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 6341349..a44abcc 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -114,7 +114,7 @@ static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
 }
 
 static u32 vnt_get_rsvtime(struct vnt_private *priv, u8 pkt_type,
-			    u32 frame_length, u16 rate, int need_ack)
+			   u32 frame_length, u16 rate, int need_ack)
 {
 	u32 data_time, ack_time;
 
@@ -135,30 +135,37 @@ static u32 vnt_get_rsvtime(struct vnt_private *priv, u8 pkt_type,
 }
 
 static __le16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
-				     u32 frame_length, u16 rate, int need_ack)
+				    u32 frame_length, u16 rate, int need_ack)
 {
 	return cpu_to_le16((u16)vnt_get_rsvtime(priv, pkt_type,
 		frame_length, rate, need_ack));
 }
 
-static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv,
-					 u8 rsv_type, u8 pkt_type, u32 frame_length, u16 current_rate)
+static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv, u8 rsv_type,
+					u8 pkt_type, u32 frame_length,
+					u16 current_rate)
 {
 	u32 rrv_time, rts_time, cts_time, ack_time, data_time;
 
-	rrv_time = rts_time = cts_time = ack_time = data_time = 0;
+	rrv_time = 0;
+	rts_time = 0;
+	cts_time = 0;
+	ack_time = 0;
 
 	data_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
 				       frame_length, current_rate);
 
 	if (rsv_type == 0) {
-		rts_time = vnt_get_frame_time(priv->preamble_type,
-			pkt_type, 20, priv->top_cck_basic_rate);
-		cts_time = ack_time = vnt_get_frame_time(priv->preamble_type,
-			pkt_type, 14, priv->top_cck_basic_rate);
+		rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
+					      20, priv->top_cck_basic_rate);
+		ack_time = vnt_get_frame_time(priv->preamble_type,
+					      pkt_type, 14,
+					      priv->top_cck_basic_rate);
+		cts_time = ack_time;
+
 	} else if (rsv_type == 1) {
-		rts_time = vnt_get_frame_time(priv->preamble_type,
-			pkt_type, 20, priv->top_cck_basic_rate);
+		rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
+					      20, priv->top_cck_basic_rate);
 		cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
 					      14, priv->top_cck_basic_rate);
 		ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
@@ -166,8 +173,11 @@ static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv,
 	} else if (rsv_type == 2) {
 		rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
 					      20, priv->top_ofdm_basic_rate);
-		cts_time = ack_time = vnt_get_frame_time(priv->preamble_type,
-			pkt_type, 14, priv->top_ofdm_basic_rate);
+		ack_time = vnt_get_frame_time(priv->preamble_type,
+					      pkt_type, 14,
+					      priv->top_ofdm_basic_rate);
+		cts_time = ack_time;
+
 	} else if (rsv_type == 3) {
 		cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
 					      14, priv->top_cck_basic_rate);
@@ -184,18 +194,20 @@ static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv,
 	return cpu_to_le16((u16)rrv_time);
 }
 
-static __le16 vnt_get_duration_le(struct vnt_private *priv,
-				   u8 pkt_type, int need_ack)
+static __le16 vnt_get_duration_le(struct vnt_private *priv, u8 pkt_type,
+				  int need_ack)
 {
 	u32 ack_time = 0;
 
 	if (need_ack) {
 		if (pkt_type == PK_TYPE_11B)
 			ack_time = vnt_get_frame_time(priv->preamble_type,
-				pkt_type, 14, priv->top_cck_basic_rate);
+						      pkt_type, 14,
+						      priv->top_cck_basic_rate);
 		else
 			ack_time = vnt_get_frame_time(priv->preamble_type,
-				pkt_type, 14, priv->top_ofdm_basic_rate);
+						      pkt_type, 14,
+						      priv->top_ofdm_basic_rate);
 
 		return cpu_to_le16((u16)(priv->sifs + ack_time));
 	}
@@ -216,8 +228,8 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
 	case RTSDUR_BA:
 	case RTSDUR_BA_F0:
 	case RTSDUR_BA_F1:
-		cts_time = vnt_get_frame_time(priv->preamble_type,
-				pkt_type, 14, priv->top_cck_basic_rate);
+		cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
+					      14, priv->top_cck_basic_rate);
 		dur_time = cts_time + 2 * priv->sifs +
 			vnt_get_rsvtime(priv, pkt_type,
 					frame_length, rate, need_ack);
@@ -226,8 +238,8 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
 	case RTSDUR_AA:
 	case RTSDUR_AA_F0:
 	case RTSDUR_AA_F1:
-		cts_time = vnt_get_frame_time(priv->preamble_type,
-					      pkt_type, 14, priv->top_ofdm_basic_rate);
+		cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
+					      14, priv->top_ofdm_basic_rate);
 		dur_time = cts_time + 2 * priv->sifs +
 			vnt_get_rsvtime(priv, pkt_type,
 					frame_length, rate, need_ack);
@@ -248,7 +260,7 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
 }
 
 static u16 vnt_mac_hdr_pos(struct vnt_usb_send_context *tx_context,
-	struct ieee80211_hdr *hdr)
+			   struct ieee80211_hdr *hdr)
 {
 	u8 *head = tx_context->data + offsetof(struct vnt_tx_buffer, fifo_head);
 	u8 *hdr_pos = (u8 *)hdr;
@@ -263,7 +275,6 @@ static u16 vnt_mac_hdr_pos(struct vnt_usb_send_context *tx_context,
 static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
 			       struct vnt_tx_datahead_g *buf)
 {
-
 	struct vnt_private *priv = tx_context->priv;
 	struct ieee80211_hdr *hdr =
 				(struct ieee80211_hdr *)tx_context->skb->data;
@@ -274,7 +285,7 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
 	/* Get SignalField,ServiceField,Length */
 	vnt_get_phy_field(priv, frame_len, rate, tx_context->pkt_type, &buf->a);
 	vnt_get_phy_field(priv, frame_len, priv->top_cck_basic_rate,
-							PK_TYPE_11B, &buf->b);
+			  PK_TYPE_11B, &buf->b);
 
 	/* Get Duration and TimeStamp */
 	if (ieee80211_is_pspoll(hdr->frame_control)) {
@@ -310,7 +321,7 @@ static u16 vnt_rxtx_datahead_g_fb(struct vnt_usb_send_context *tx_context,
 	vnt_get_phy_field(priv, frame_len, rate, tx_context->pkt_type, &buf->a);
 
 	vnt_get_phy_field(priv, frame_len, priv->top_cck_basic_rate,
-						PK_TYPE_11B, &buf->b);
+			  PK_TYPE_11B, &buf->b);
 
 	/* Get Duration and TimeStamp */
 	buf->duration_a = vnt_get_duration_le(priv, tx_context->pkt_type,
@@ -387,7 +398,7 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
 }
 
 static int vnt_fill_ieee80211_rts(struct vnt_usb_send_context *tx_context,
-	struct ieee80211_rts *rts, __le16 duration)
+				  struct ieee80211_rts *rts, __le16 duration)
 {
 	struct ieee80211_hdr *hdr =
 				(struct ieee80211_hdr *)tx_context->skb->data;
@@ -499,8 +510,8 @@ static u16 vnt_rxtx_rts_a_fb_head(struct vnt_usb_send_context *tx_context,
 	u16 current_rate = tx_context->tx_rate;
 	u16 rts_frame_len = 20;
 
-	vnt_get_phy_field(priv, rts_frame_len,
-		priv->top_ofdm_basic_rate, tx_context->pkt_type, &buf->a);
+	vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate,
+			  tx_context->pkt_type, &buf->a);
 
 	buf->duration = vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA,
 						   tx_context->pkt_type,
@@ -683,11 +694,10 @@ static u16 vnt_rxtx_ab(struct vnt_usb_send_context *tx_context,
 }
 
 static u16 vnt_generate_tx_parameter(struct vnt_usb_send_context *tx_context,
-				      struct vnt_tx_buffer *tx_buffer,
-				      struct vnt_mic_hdr **mic_hdr, u32 need_mic,
-				      bool need_rts)
+				     struct vnt_tx_buffer *tx_buffer,
+				     struct vnt_mic_hdr **mic_hdr, u32 need_mic,
+				     bool need_rts)
 {
-
 	if (tx_context->pkt_type == PK_TYPE_11GB ||
 	    tx_context->pkt_type == PK_TYPE_11GA) {
 		if (need_rts) {
@@ -712,8 +722,9 @@ static u16 vnt_generate_tx_parameter(struct vnt_usb_send_context *tx_context,
 }
 
 static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
-	u8 *key_buffer, struct ieee80211_key_conf *tx_key, struct sk_buff *skb,
-	u16 payload_len, struct vnt_mic_hdr *mic_hdr)
+			   u8 *key_buffer, struct ieee80211_key_conf *tx_key,
+			   struct sk_buff *skb, u16 payload_len,
+			   struct vnt_mic_hdr *mic_hdr)
 {
 	struct ieee80211_hdr *hdr = tx_context->hdr;
 	u64 pn64;
@@ -774,14 +785,12 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
 		if (ieee80211_has_a4(hdr->frame_control))
 			ether_addr_copy(mic_hdr->addr4, hdr->addr4);
 
-
 		memcpy(key_buffer, tx_key->key, WLAN_KEY_LEN_CCMP);
 
 		break;
 	default:
 		break;
 	}
-
 }
 
 int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
@@ -807,7 +816,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 
 	current_rate = rate->hw_value;
 	if (priv->current_rate != current_rate &&
-			!(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
+	    !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
 		priv->current_rate = current_rate;
 		vnt_schedule_command(priv, WLAN_CMD_SETPOWER);
 	}
@@ -964,7 +973,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 		tx_key = info->control.hw_key;
 		if (tx_key->keylen > 0)
 			vnt_fill_txkey(tx_context, tx_buffer_head->tx_key,
-				tx_key, skb, tx_body_size, mic_hdr);
+				       tx_key, skb, tx_body_size, mic_hdr);
 	}
 
 	priv->seq_counter = (le16_to_cpu(hdr->seq_ctrl) &
@@ -991,8 +1000,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 	return 0;
 }
 
-static int vnt_beacon_xmit(struct vnt_private *priv,
-	struct sk_buff *skb)
+static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
 {
 	struct vnt_beacon_buffer *beacon_buffer;
 	struct vnt_tx_short_buf_head *short_head;
@@ -1101,7 +1109,7 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
 }
 
 int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
-		       struct ieee80211_bss_conf *conf)
+		      struct ieee80211_bss_conf *conf)
 {
 	vnt_mac_reg_bits_off(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
 
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index c3a8af0..2568dfc 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -329,25 +329,53 @@ static void handle_set_channel(struct wilc_vif *vif,
 		netdev_err(vif->ndev, "Failed to set channel\n");
 }
 
-static void handle_set_wfi_drv_handler(struct wilc_vif *vif,
-				       struct drv_handler *hif_drv_handler)
+static int handle_set_wfi_drv_handler(struct wilc_vif *vif,
+				      struct drv_handler *hif_drv_handler)
 {
 	int ret = 0;
 	struct wid wid;
+	u8 *currbyte, *buffer;
+	struct host_if_drv *hif_drv = NULL;
+
+	if (!vif->hif_drv)
+		return -EINVAL;
+
+	if (!hif_drv_handler)
+		return -EINVAL;
+
+	hif_drv	= vif->hif_drv;
+
+	buffer = kzalloc(DRV_HANDLER_SIZE, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	currbyte = buffer;
+	*currbyte = hif_drv->driver_handler_id & DRV_HANDLER_MASK;
+	currbyte++;
+	*currbyte = (u32)0 & DRV_HANDLER_MASK;
+	currbyte++;
+	*currbyte = (u32)0 & DRV_HANDLER_MASK;
+	currbyte++;
+	*currbyte = (u32)0 & DRV_HANDLER_MASK;
+	currbyte++;
+	*currbyte = (hif_drv_handler->name | (hif_drv_handler->mode << 1));
 
 	wid.id = (u16)WID_SET_DRV_HANDLER;
 	wid.type = WID_STR;
-	wid.val = (s8 *)hif_drv_handler;
-	wid.size = sizeof(*hif_drv_handler);
+	wid.val = (s8 *)buffer;
+	wid.size = DRV_HANDLER_SIZE;
 
 	ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
-				   hif_drv_handler->handler);
-
-	if (!hif_drv_handler->handler)
-		complete(&hif_driver_comp);
-
-	if (ret)
+				   hif_drv->driver_handler_id);
+	if (ret) {
 		netdev_err(vif->ndev, "Failed to set driver handler\n");
+		complete(&hif_driver_comp);
+		kfree(buffer);
+		return ret;
+	}
+	complete(&hif_driver_comp);
+	kfree(buffer);
+	return 0;
 }
 
 static void handle_set_operation_mode(struct wilc_vif *vif,
@@ -1188,7 +1216,7 @@ static s32 Handle_ConnectTimeout(struct wilc_vif *vif)
 	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
 				      wilc_get_vif_idx(vif));
 	if (result)
-		netdev_err(vif->ndev, "Failed to send dissconect\n");
+		netdev_err(vif->ndev, "Failed to send disconnect\n");
 
 	hif_drv->usr_conn_req.ssid_len = 0;
 	kfree(hif_drv->usr_conn_req.ssid);
@@ -2052,23 +2080,9 @@ static u32 WILC_HostIf_PackStaParam(u8 *pu8Buffer,
 	pu8CurrByte += pstrStationParam->rates_len;
 
 	*pu8CurrByte++ = pstrStationParam->ht_supported;
-	*pu8CurrByte++ = pstrStationParam->ht_capa_info & 0xFF;
-	*pu8CurrByte++ = (pstrStationParam->ht_capa_info >> 8) & 0xFF;
-
-	*pu8CurrByte++ = pstrStationParam->ht_ampdu_params;
-	memcpy(pu8CurrByte, pstrStationParam->ht_supp_mcs_set,
-	       WILC_SUPP_MCS_SET_SIZE);
-	pu8CurrByte += WILC_SUPP_MCS_SET_SIZE;
-
-	*pu8CurrByte++ = pstrStationParam->ht_ext_params & 0xFF;
-	*pu8CurrByte++ = (pstrStationParam->ht_ext_params >> 8) & 0xFF;
-
-	*pu8CurrByte++ = pstrStationParam->ht_tx_bf_cap & 0xFF;
-	*pu8CurrByte++ = (pstrStationParam->ht_tx_bf_cap >> 8) & 0xFF;
-	*pu8CurrByte++ = (pstrStationParam->ht_tx_bf_cap >> 16) & 0xFF;
-	*pu8CurrByte++ = (pstrStationParam->ht_tx_bf_cap >> 24) & 0xFF;
-
-	*pu8CurrByte++ = pstrStationParam->ht_ante_sel;
+	memcpy(pu8CurrByte, &pstrStationParam->ht_capa,
+	       sizeof(struct ieee80211_ht_cap));
+	pu8CurrByte += sizeof(struct ieee80211_ht_cap);
 
 	*pu8CurrByte++ = pstrStationParam->flags_mask & 0xFF;
 	*pu8CurrByte++ = (pstrStationParam->flags_mask >> 8) & 0xFF;
@@ -2403,9 +2417,9 @@ static void Handle_SetMulticastFilter(struct wilc_vif *vif,
 
 	pu8CurrByte = wid.val;
 	*pu8CurrByte++ = (strHostIfSetMulti->enabled & 0xFF);
-	*pu8CurrByte++ = 0;
-	*pu8CurrByte++ = 0;
-	*pu8CurrByte++ = 0;
+	*pu8CurrByte++ = ((strHostIfSetMulti->enabled >> 8) & 0xFF);
+	*pu8CurrByte++ = ((strHostIfSetMulti->enabled >> 16) & 0xFF);
+	*pu8CurrByte++ = ((strHostIfSetMulti->enabled >> 24) & 0xFF);
 
 	*pu8CurrByte++ = (strHostIfSetMulti->cnt & 0xFF);
 	*pu8CurrByte++ = ((strHostIfSetMulti->cnt >> 8) & 0xFF);
@@ -2463,6 +2477,7 @@ static void host_if_work(struct work_struct *work)
 {
 	struct host_if_msg *msg;
 	struct wilc *wilc;
+	int ret = 0;
 
 	msg = container_of(work, struct host_if_msg, work);
 	wilc = msg->vif->wilc;
@@ -2568,7 +2583,7 @@ static void host_if_work(struct work_struct *work)
 		break;
 
 	case HOST_IF_MSG_SET_WFIDRV_HANDLER:
-		handle_set_wfi_drv_handler(msg->vif, &msg->body.drv);
+		ret = handle_set_wfi_drv_handler(msg->vif, &msg->body.drv);
 		break;
 
 	case HOST_IF_MSG_SET_OPERATION_MODE:
@@ -2622,6 +2637,8 @@ static void host_if_work(struct work_struct *work)
 		break;
 	}
 free_msg:
+	if (ret)
+		netdev_err(msg->vif->ndev, "Host cmd %d failed\n", msg->id);
 	kfree(msg);
 	complete(&hif_thread_comp);
 }
@@ -3099,7 +3116,8 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
 	return 0;
 }
 
-int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx)
+int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
+			     u8 ifc_id)
 {
 	int result = 0;
 	struct host_if_msg msg;
@@ -3107,7 +3125,8 @@ int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx)
 	memset(&msg, 0, sizeof(struct host_if_msg));
 	msg.id = HOST_IF_MSG_SET_WFIDRV_HANDLER;
 	msg.body.drv.handler = index;
-	msg.body.drv.mac_idx = mac_idx;
+	msg.body.drv.mode = mode;
+	msg.body.drv.name = ifc_id;
 	msg.vif = vif;
 
 	result = wilc_enqueue_cmd(&msg);
@@ -3330,6 +3349,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
 	for (i = 0; i < wilc->vif_num; i++)
 		if (dev == wilc->vif[i]->ndev) {
 			wilc->vif[i]->hif_drv = hif_drv;
+			hif_drv->driver_handler_id = i + 1;
 			break;
 		}
 
@@ -3403,7 +3423,7 @@ int wilc_deinit(struct wilc_vif *vif)
 	del_timer_sync(&periodic_rssi);
 	del_timer_sync(&hif_drv->remain_on_ch_timer);
 
-	wilc_set_wfi_drv_handler(vif, 0, 0);
+	wilc_set_wfi_drv_handler(vif, 0, 0, 0);
 	wait_for_completion(&hif_driver_comp);
 
 	if (hif_drv->usr_scan_req.scan_result) {
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index f36d3b5..1ce5ead 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -1,6 +1,6 @@
 #ifndef HOST_INT_H
 #define HOST_INT_H
-
+#include <linux/ieee80211.h>
 #include "coreconfigurator.h"
 
 #define IP_ALEN  4
@@ -47,10 +47,11 @@
 #define ETH_ALEN				6
 #define PMKID_LEN				16
 #define WILC_MAX_NUM_PMKIDS			16
-#define WILC_SUPP_MCS_SET_SIZE			16
 #define WILC_ADD_STA_LENGTH			40
 #define SCAN_EVENT_DONE_ABORTED
 #define NUM_CONCURRENT_IFC			2
+#define DRV_HANDLER_SIZE			5
+#define DRV_HANDLER_MASK			0x000000FF
 
 struct rf_info {
 	u8 link_speed;
@@ -217,7 +218,8 @@ struct user_conn_req {
 
 struct drv_handler {
 	u32 handler;
-	u8 mac_idx;
+	u8 mode;
+	u8 name;
 };
 
 struct op_mode {
@@ -281,6 +283,7 @@ struct host_if_drv {
 	struct timer_list remain_on_ch_timer;
 
 	bool IFC_UP;
+	int driver_handler_id;
 };
 
 struct add_sta_param {
@@ -289,12 +292,7 @@ struct add_sta_param {
 	u8 rates_len;
 	const u8 *rates;
 	bool ht_supported;
-	u16 ht_capa_info;
-	u8 ht_ampdu_params;
-	u8 ht_supp_mcs_set[16];
-	u16 ht_ext_params;
-	u32 ht_tx_bf_cap;
-	u8 ht_ante_sel;
+	struct ieee80211_ht_cap ht_capa;
 	u16 flags_mask;
 	u16 flags_set;
 };
@@ -354,7 +352,8 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
 			   void *user_arg);
 int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id);
 int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg);
-int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx);
+int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mode,
+			     u8 ifc_id);
 int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode);
 int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats);
 void wilc_resolve_disconnect_aberration(struct wilc_vif *vif);
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index d6d8034..3087084 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -858,34 +858,15 @@ static int wilc_mac_open(struct net_device *ndev)
 
 	for (i = 0; i < wl->vif_num; i++) {
 		if (ndev == wl->vif[i]->ndev) {
-			if (vif->iftype == AP_MODE) {
-				wilc_set_wfi_drv_handler(vif,
-							 wilc_get_vif_idx(vif),
-							 0);
-			} else if (!wilc_wlan_get_num_conn_ifcs(wl)) {
-				wilc_set_wfi_drv_handler(vif,
-							 wilc_get_vif_idx(vif),
-							 wl->open_ifcs);
-			} else {
-				if (memcmp(wl->vif[i ^ 1]->bssid,
-					   wl->vif[i ^ 1]->src_addr, 6))
-					wilc_set_wfi_drv_handler(vif,
-							 wilc_get_vif_idx(vif),
-							 0);
-				else
-					wilc_set_wfi_drv_handler(vif,
-							 wilc_get_vif_idx(vif),
-							 1);
-			}
+			wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif),
+						 vif->iftype, vif->ifc_id);
 			wilc_set_operation_mode(vif, vif->iftype);
-
-			wilc_get_mac_address(vif, mac_add);
-			netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
-			memcpy(wl->vif[i]->src_addr, mac_add, ETH_ALEN);
-
 			break;
 		}
 	}
+			wilc_get_mac_address(vif, mac_add);
+			netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
+			memcpy(wl->vif[i]->src_addr, mac_add, ETH_ALEN);
 
 	memcpy(ndev->dev_addr, wl->vif[i]->src_addr, ETH_ALEN);
 
@@ -1246,11 +1227,13 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
 		vif = netdev_priv(ndev);
 		memset(vif, 0, sizeof(struct wilc_vif));
 
-		if (i == 0)
+		if (i == 0) {
 			strcpy(ndev->name, "wlan%d");
-		else
+			vif->ifc_id = 1;
+		} else {
 			strcpy(ndev->name, "p2p%d");
-
+			vif->ifc_id = 0;
+		}
 		vif->wilc = *wilc;
 		vif->ndev = ndev;
 		wl->vif[i] = vif;
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index 7d32de9..ce54864 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -20,7 +20,7 @@
 static struct dentry *wilc_dir;
 
 /*
- * --------------------------------------------------------------------------------
+ * ----------------------------------------------------------------------------
  */
 #define DEBUG           BIT(0)
 #define INFO            BIT(1)
@@ -32,10 +32,11 @@ static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
 EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
 
 /*
- * --------------------------------------------------------------------------------
+ * ----------------------------------------------------------------------------
  */
 
-static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos)
+static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf,
+				     size_t count, loff_t *ppos)
 {
 	char buf[128];
 	int res = 0;
@@ -44,13 +45,15 @@ static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf, si
 	if (*ppos > 0)
 		return 0;
 
-	res = scnprintf(buf, sizeof(buf), "Debug Level: %x\n", atomic_read(&WILC_DEBUG_LEVEL));
+	res = scnprintf(buf, sizeof(buf), "Debug Level: %x\n",
+			atomic_read(&WILC_DEBUG_LEVEL));
 
 	return simple_read_from_buffer(userbuf, count, ppos, buf, res);
 }
 
-static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
-				      size_t count, loff_t *ppos)
+static ssize_t wilc_debug_level_write(struct file *filp,
+				      const char __user *buf, size_t count,
+				      loff_t *ppos)
 {
 	int flag = 0;
 	int ret;
@@ -60,7 +63,8 @@ static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
 		return ret;
 
 	if (flag > DBG_LEVEL_ALL) {
-		pr_info("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL));
+		pr_info("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n",
+			__func__, flag, atomic_read(&WILC_DEBUG_LEVEL));
 		return -EINVAL;
 	}
 
@@ -75,7 +79,7 @@ static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
 }
 
 /*
- * --------------------------------------------------------------------------------
+ * ----------------------------------------------------------------------------
  */
 
 #define FOPS(_open, _read, _write, _poll) { \
@@ -94,7 +98,12 @@ struct wilc_debugfs_info_t {
 };
 
 static struct wilc_debugfs_info_t debugfs_info[] = {
-	{ "wilc_debug_level",	0666,	(DEBUG | ERR), FOPS(NULL, wilc_debug_level_read, wilc_debug_level_write, NULL), },
+	{
+		"wilc_debug_level",
+		0666,
+		(DEBUG | ERR),
+		FOPS(NULL, wilc_debug_level_read, wilc_debug_level_write, NULL),
+	},
 };
 
 static int __init wilc_debugfs_init(void)
@@ -122,4 +131,3 @@ static void __exit wilc_debugfs_remove(void)
 module_exit(wilc_debugfs_remove);
 
 #endif
-
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 44a12bd..68fd5b3 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -84,7 +84,6 @@ static const struct wiphy_wowlan_support wowlan_support = {
 #define TCP_ACK_FILTER_LINK_SPEED_THRESH	54
 #define DEFAULT_LINK_SPEED			72
 
-
 #define IS_MANAGMEMENT				0x100
 #define IS_MANAGMEMENT_CALLBACK			0x080
 #define IS_MGMT_STATUS_SUCCES			0x040
@@ -163,12 +162,12 @@ static struct ieee80211_supported_band WILC_WFI_band_2ghz = {
 	.n_bitrates = ARRAY_SIZE(ieee80211_bitrates),
 };
 
-
 struct add_key_params {
 	u8 key_idx;
 	bool pairwise;
 	u8 *mac_addr;
 };
+
 static struct add_key_params g_add_gtk_key_params;
 static struct wilc_wfi_key g_key_gtk_params;
 static struct add_key_params g_add_ptk_key_params;
@@ -281,7 +280,6 @@ static void remove_network_from_shadow(unsigned long arg)
 	unsigned long now = jiffies;
 	int i, j;
 
-
 	for (i = 0; i < last_scanned_cnt; i++) {
 		if (time_after(now, last_scanned_shadow[i].time_scan +
 			       (unsigned long)(SCAN_RESULT_EXPIRE))) {
@@ -526,7 +524,6 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
 
 			memcpy(priv->au8AssociatedBss, pstrConnectInfo->bssid, ETH_ALEN);
 
-
 			for (i = 0; i < last_scanned_cnt; i++) {
 				if (memcmp(last_scanned_shadow[i].bssid,
 					   pstrConnectInfo->bssid,
@@ -626,7 +623,6 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
 				return -ENOMEM;
 			strHiddenNetwork.n_ssids = request->n_ssids;
 
-
 			for (i = 0; i < request->n_ssids; i++) {
 				if (request->ssids[i].ssid_len != 0) {
 					strHiddenNetwork.net_info[i].ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL);
@@ -927,8 +923,6 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
 				priv->wilc_ptk[key_index]->seq = NULL;
 			}
 
-
-
 			if (!pairwise) {
 				if (params->cipher == WLAN_CIPHER_SUITE_TKIP)
 					u8gmode = ENCRYPT_ENABLED | WPA | TKIP;
@@ -968,7 +962,6 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
 				else
 					u8pmode = priv->wilc_groupkey | AES;
 
-
 				if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
 					pu8TxMic = params->key + 24;
 					pu8RxMic = params->key + 16;
@@ -1153,7 +1146,6 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
 
 	priv = wiphy_priv(wiphy);
 
-
 	if (!pairwise) {
 		key_params.key = priv->wilc_gtk[key_index]->key;
 		key_params.cipher = priv->wilc_gtk[key_index]->cipher;
@@ -1298,7 +1290,6 @@ static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
 
 	vif = netdev_priv(priv->dev);
 
-
 	for (i = 0; i < priv->pmkid_list.numpmkid; i++)	{
 		if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
 			    ETH_ALEN)) {
@@ -1511,7 +1502,6 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
 						}
 					}
 
-
 					if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP) && (wilc_ie))	{
 						cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size - 7, 0);
 						return;
@@ -1533,7 +1523,6 @@ static void WILC_WFI_mgmt_tx_complete(void *priv, int status)
 {
 	struct p2p_mgmt_data *pv_data = priv;
 
-
 	kfree(pv_data->buff);
 	kfree(pv_data);
 }
@@ -1653,7 +1642,6 @@ static int mgmt_tx(struct wiphy *wiphy,
 		memcpy(mgmt_tx->buff, buf, len);
 		mgmt_tx->size = len;
 
-
 		if (ieee80211_is_probe_resp(mgmt->frame_control)) {
 			wilc_set_mac_chnl_num(vif, chan->hw_value);
 			curr_channel = chan->hw_value;
@@ -1832,7 +1820,6 @@ static int set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
 	if (wilc_enable_ps)
 		wilc_set_power_mgmt(vif, enabled, timeout);
 
-
 	return 0;
 }
 
@@ -1887,7 +1874,7 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
 
 		if (wl->initialized) {
 			wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif),
-						 0);
+						 0, vif->ifc_id);
 			wilc_set_operation_mode(vif, AP_MODE);
 			wilc_set_power_mgmt(vif, 0, 0);
 		}
@@ -2003,14 +1990,7 @@ static int add_station(struct wiphy *wiphy, struct net_device *dev,
 			strStaParams.ht_supported = false;
 		} else {
 			strStaParams.ht_supported = true;
-			strStaParams.ht_capa_info = params->ht_capa->cap_info;
-			strStaParams.ht_ampdu_params = params->ht_capa->ampdu_params_info;
-			memcpy(strStaParams.ht_supp_mcs_set,
-			       &params->ht_capa->mcs,
-			       WILC_SUPP_MCS_SET_SIZE);
-			strStaParams.ht_ext_params = params->ht_capa->extended_ht_cap_info;
-			strStaParams.ht_tx_bf_cap = params->ht_capa->tx_BF_cap_info;
-			strStaParams.ht_ante_sel = params->ht_capa->antenna_selection_info;
+			strStaParams.ht_capa = *params->ht_capa;
 		}
 
 		strStaParams.flags_mask = params->sta_flags_mask;
@@ -2075,14 +2055,7 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev,
 			strStaParams.ht_supported = false;
 		} else {
 			strStaParams.ht_supported = true;
-			strStaParams.ht_capa_info = params->ht_capa->cap_info;
-			strStaParams.ht_ampdu_params = params->ht_capa->ampdu_params_info;
-			memcpy(strStaParams.ht_supp_mcs_set,
-			       &params->ht_capa->mcs,
-			       WILC_SUPP_MCS_SET_SIZE);
-			strStaParams.ht_ext_params = params->ht_capa->extended_ht_cap_info;
-			strStaParams.ht_tx_bf_cap = params->ht_capa->tx_BF_cap_info;
-			strStaParams.ht_ante_sel = params->ht_capa->antenna_selection_info;
+			strStaParams.ht_capa = *params->ht_capa;
 		}
 
 		strStaParams.flags_mask = params->sta_flags_mask;
@@ -2108,7 +2081,6 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->wdev->netdev);
 
-
 	if (type == NL80211_IFTYPE_MONITOR) {
 		new_ifc = WILC_WFI_init_mon_interface(name, vif->ndev);
 		if (new_ifc) {
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index d431673..c89bf43 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -158,6 +158,7 @@ struct wilc_vif {
 	struct host_if_drv *hif_drv;
 	struct net_device *ndev;
 	u8 mode;
+	u8 ifc_id;
 };
 
 struct wilc {
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 439ac6f..f4d6005 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -845,7 +845,7 @@ typedef enum {
 	WID_MODEL_NAME			= 0x3027, /*Added for CAPI tool */
 	WID_MODEL_NUM			= 0x3028, /*Added for CAPI tool */
 	WID_DEVICE_NAME			= 0x3029, /*Added for CAPI tool */
-	WID_SET_DRV_HANDLER		= 0x3030,
+	WID_SET_DRV_HANDLER		= 0x3079,
 
 	/* NMAC String WID list */
 	WID_11N_P_ACTION_REQ		= 0x3080,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 310e2c4..018db22 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -353,12 +353,12 @@
 /*-------------------------------------------------------------*/
 /* Commonly used basic types */
 struct hfa384x_bytestr {
-	u16 len;
+	__le16 len;
 	u8 data[0];
 } __packed;
 
 struct hfa384x_bytestr32 {
-	u16 len;
+	__le16 len;
 	u8 data[32];
 } __packed;
 
@@ -399,8 +399,8 @@ struct hfa384x_caplevel {
 
 /*-- Configuration Record: HostScanRequest (data portion only) --*/
 struct hfa384x_host_scan_request_data {
-	u16 channel_list;
-	u16 tx_rate;
+	__le16 channel_list;
+	__le16 tx_rate;
 	struct hfa384x_bytestr32 ssid;
 } __packed;
 
@@ -419,7 +419,7 @@ struct hfa384x_authenticate_station_data {
 
 /*-- Configuration Record: WPAData       (data portion only) --*/
 struct hfa384x_wpa_data {
-	u16 datalen;
+	__le16 datalen;
 	u8 data[0];		/* max 80 */
 } __packed;
 
@@ -682,16 +682,16 @@ struct hfa384x_ch_info_result {
 
 /*--  Inquiry Frame, Diagnose: Host Scan Results & Subfields--*/
 struct hfa384x_hscan_result_sub {
-	u16 chid;
-	u16 anl;
-	u16 sl;
+	__le16 chid;
+	__le16 anl;
+	__le16 sl;
 	u8 bssid[WLAN_BSSID_LEN];
-	u16 bcnint;
-	u16 capinfo;
+	__le16 bcnint;
+	__le16 capinfo;
 	struct hfa384x_bytestr32 ssid;
 	u8 supprates[10];	/* 802.11 info element */
 	u16 proberesp_rate;
-	u16 atim;
+	__le16 atim;
 } __packed;
 
 struct hfa384x_hscan_result {
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index a812e55..d3d1958 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -1840,15 +1840,15 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
 	if (result)
 		return result;
 
-	hw->bufinfo.page = le16_to_cpu(hw->bufinfo.page);
-	hw->bufinfo.offset = le16_to_cpu(hw->bufinfo.offset);
-	hw->bufinfo.len = le16_to_cpu(hw->bufinfo.len);
+	le16_to_cpus(&hw->bufinfo.page);
+	le16_to_cpus(&hw->bufinfo.offset);
+	le16_to_cpus(&hw->bufinfo.len);
 	result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_MAXLOADTIME,
 					  &hw->dltimeout);
 	if (result)
 		return result;
 
-	hw->dltimeout = le16_to_cpu(hw->dltimeout);
+	le16_to_cpus(&hw->dltimeout);
 
 	pr_debug("flashdl_enable\n");
 
@@ -2644,8 +2644,7 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
 	    HFA384x_TX_MACPORT_SET(0) | HFA384x_TX_STRUCTYPE_SET(1) |
 	    HFA384x_TX_TXEX_SET(0) | HFA384x_TX_TXOK_SET(0);
 #endif
-	hw->txbuff.txfrm.desc.tx_control =
-	    cpu_to_le16(hw->txbuff.txfrm.desc.tx_control);
+	cpu_to_le16s(&hw->txbuff.txfrm.desc.tx_control);
 
 	/* copy the header over to the txdesc */
 	memcpy(&hw->txbuff.txfrm.desc.frame_control, p80211_hdr,
@@ -3380,8 +3379,8 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
 	u16 fc;
 
 	/* Byte order convert once up front. */
-	usbin->rxfrm.desc.status = le16_to_cpu(usbin->rxfrm.desc.status);
-	usbin->rxfrm.desc.time = le32_to_cpu(usbin->rxfrm.desc.time);
+	le16_to_cpus(&usbin->rxfrm.desc.status);
+	le32_to_cpus(&usbin->rxfrm.desc.time);
 
 	/* Now handle frame based on port# */
 	switch (HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status)) {
@@ -3576,8 +3575,7 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
 static void hfa384x_usbin_info(struct wlandevice *wlandev,
 			       union hfa384x_usbin *usbin)
 {
-	usbin->infofrm.info.framelen =
-	    le16_to_cpu(usbin->infofrm.info.framelen);
+	le16_to_cpus(&usbin->infofrm.info.framelen);
 	prism2sta_ev_info(wlandev, &usbin->infofrm.info);
 }
 
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index afd877f..1a0c786 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -617,28 +617,28 @@ static int mkpdrlist(struct pda *pda)
 		    HFA384x_PDR_NICID) {
 			memcpy(&nicid, &pda->rec[pda->nrec]->data.nicid,
 			       sizeof(nicid));
-			nicid.id = le16_to_cpu(nicid.id);
-			nicid.variant = le16_to_cpu(nicid.variant);
-			nicid.major = le16_to_cpu(nicid.major);
-			nicid.minor = le16_to_cpu(nicid.minor);
+			le16_to_cpus(&nicid.id);
+			le16_to_cpus(&nicid.variant);
+			le16_to_cpus(&nicid.major);
+			le16_to_cpus(&nicid.minor);
 		}
 		if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
 		    HFA384x_PDR_MFISUPRANGE) {
 			memcpy(&rfid, &pda->rec[pda->nrec]->data.mfisuprange,
 			       sizeof(rfid));
-			rfid.id = le16_to_cpu(rfid.id);
-			rfid.variant = le16_to_cpu(rfid.variant);
-			rfid.bottom = le16_to_cpu(rfid.bottom);
-			rfid.top = le16_to_cpu(rfid.top);
+			le16_to_cpus(&rfid.id);
+			le16_to_cpus(&rfid.variant);
+			le16_to_cpus(&rfid.bottom);
+			le16_to_cpus(&rfid.top);
 		}
 		if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
 		    HFA384x_PDR_CFISUPRANGE) {
 			memcpy(&macid, &pda->rec[pda->nrec]->data.cfisuprange,
 			       sizeof(macid));
-			macid.id = le16_to_cpu(macid.id);
-			macid.variant = le16_to_cpu(macid.variant);
-			macid.bottom = le16_to_cpu(macid.bottom);
-			macid.top = le16_to_cpu(macid.top);
+			le16_to_cpus(&macid.id);
+			le16_to_cpus(&macid.variant);
+			le16_to_cpus(&macid.bottom);
+			le16_to_cpus(&macid.top);
 		}
 
 		(pda->nrec)++;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index e23a0d0..c4aa9e7 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -170,7 +170,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
 				     hw->ident_sta_fw.variant) >
 	    HFA384x_FIRMWARE_VERSION(1, 5, 0)) {
 		if (msg->scantype.data != P80211ENUM_scantype_active)
-			word = cpu_to_le16(msg->maxchanneltime.data);
+			word = msg->maxchanneltime.data;
 		else
 			word = 0;
 
@@ -213,7 +213,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
 		goto exit;
 	}
 	if (word == HFA384x_PORTSTATUS_DISABLED) {
-		u16 wordbuf[17];
+		__le16 wordbuf[17];
 
 		result = hfa384x_drvr_setconfig16(hw,
 					HFA384x_RID_CNFROAMINGMODE,
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 28df1f3..e41207d 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -774,7 +774,7 @@ void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
 void prism2mgmt_bytestr2pstr(struct hfa384x_bytestr *bytestr,
 			     struct p80211pstrd *pstr)
 {
-	pstr->len = (u8)(le16_to_cpu((u16)(bytestr->len)));
+	pstr->len = (u8)(le16_to_cpu(bytestr->len));
 	memcpy(pstr->data, bytestr->data, pstr->len);
 }
 
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 9c2b4ef..e16da34 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -603,10 +603,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	}
 
 	/* get all the nic id fields in host byte order */
-	hw->ident_nic.id = le16_to_cpu(hw->ident_nic.id);
-	hw->ident_nic.variant = le16_to_cpu(hw->ident_nic.variant);
-	hw->ident_nic.major = le16_to_cpu(hw->ident_nic.major);
-	hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor);
+	le16_to_cpus(&hw->ident_nic.id);
+	le16_to_cpus(&hw->ident_nic.variant);
+	le16_to_cpus(&hw->ident_nic.major);
+	le16_to_cpus(&hw->ident_nic.minor);
 
 	netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n",
 		    hw->ident_nic.id, hw->ident_nic.major,
@@ -622,10 +622,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	}
 
 	/* get all the private fw id fields in host byte order */
-	hw->ident_pri_fw.id = le16_to_cpu(hw->ident_pri_fw.id);
-	hw->ident_pri_fw.variant = le16_to_cpu(hw->ident_pri_fw.variant);
-	hw->ident_pri_fw.major = le16_to_cpu(hw->ident_pri_fw.major);
-	hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor);
+	le16_to_cpus(&hw->ident_pri_fw.id);
+	le16_to_cpus(&hw->ident_pri_fw.variant);
+	le16_to_cpus(&hw->ident_pri_fw.major);
+	le16_to_cpus(&hw->ident_pri_fw.minor);
 
 	netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n",
 		    hw->ident_pri_fw.id, hw->ident_pri_fw.major,
@@ -648,10 +648,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	}
 
 	/* get all the station fw id fields in host byte order */
-	hw->ident_sta_fw.id = le16_to_cpu(hw->ident_sta_fw.id);
-	hw->ident_sta_fw.variant = le16_to_cpu(hw->ident_sta_fw.variant);
-	hw->ident_sta_fw.major = le16_to_cpu(hw->ident_sta_fw.major);
-	hw->ident_sta_fw.minor = le16_to_cpu(hw->ident_sta_fw.minor);
+	le16_to_cpus(&hw->ident_sta_fw.id);
+	le16_to_cpus(&hw->ident_sta_fw.variant);
+	le16_to_cpus(&hw->ident_sta_fw.major);
+	le16_to_cpus(&hw->ident_sta_fw.minor);
 
 	/* strip out the 'special' variant bits */
 	hw->mm_mods = hw->ident_sta_fw.variant & GENMASK(15, 14);
@@ -683,11 +683,11 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	/* get all the Compatibility range, modem interface supplier
 	 * fields in byte order
 	 */
-	hw->cap_sup_mfi.role = le16_to_cpu(hw->cap_sup_mfi.role);
-	hw->cap_sup_mfi.id = le16_to_cpu(hw->cap_sup_mfi.id);
-	hw->cap_sup_mfi.variant = le16_to_cpu(hw->cap_sup_mfi.variant);
-	hw->cap_sup_mfi.bottom = le16_to_cpu(hw->cap_sup_mfi.bottom);
-	hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top);
+	le16_to_cpus(&hw->cap_sup_mfi.role);
+	le16_to_cpus(&hw->cap_sup_mfi.id);
+	le16_to_cpus(&hw->cap_sup_mfi.variant);
+	le16_to_cpus(&hw->cap_sup_mfi.bottom);
+	le16_to_cpus(&hw->cap_sup_mfi.top);
 
 	netdev_info(wlandev->netdev,
 		    "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
@@ -707,11 +707,11 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	/* get all the Compatibility range, controller interface supplier
 	 * fields in byte order
 	 */
-	hw->cap_sup_cfi.role = le16_to_cpu(hw->cap_sup_cfi.role);
-	hw->cap_sup_cfi.id = le16_to_cpu(hw->cap_sup_cfi.id);
-	hw->cap_sup_cfi.variant = le16_to_cpu(hw->cap_sup_cfi.variant);
-	hw->cap_sup_cfi.bottom = le16_to_cpu(hw->cap_sup_cfi.bottom);
-	hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top);
+	le16_to_cpus(&hw->cap_sup_cfi.role);
+	le16_to_cpus(&hw->cap_sup_cfi.id);
+	le16_to_cpus(&hw->cap_sup_cfi.variant);
+	le16_to_cpus(&hw->cap_sup_cfi.bottom);
+	le16_to_cpus(&hw->cap_sup_cfi.top);
 
 	netdev_info(wlandev->netdev,
 		    "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
@@ -731,11 +731,11 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	/* get all the Compatibility range, primary firmware supplier
 	 * fields in byte order
 	 */
-	hw->cap_sup_pri.role = le16_to_cpu(hw->cap_sup_pri.role);
-	hw->cap_sup_pri.id = le16_to_cpu(hw->cap_sup_pri.id);
-	hw->cap_sup_pri.variant = le16_to_cpu(hw->cap_sup_pri.variant);
-	hw->cap_sup_pri.bottom = le16_to_cpu(hw->cap_sup_pri.bottom);
-	hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top);
+	le16_to_cpus(&hw->cap_sup_pri.role);
+	le16_to_cpus(&hw->cap_sup_pri.id);
+	le16_to_cpus(&hw->cap_sup_pri.variant);
+	le16_to_cpus(&hw->cap_sup_pri.bottom);
+	le16_to_cpus(&hw->cap_sup_pri.top);
 
 	netdev_info(wlandev->netdev,
 		    "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
@@ -755,11 +755,11 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	/* get all the Compatibility range, station firmware supplier
 	 * fields in byte order
 	 */
-	hw->cap_sup_sta.role = le16_to_cpu(hw->cap_sup_sta.role);
-	hw->cap_sup_sta.id = le16_to_cpu(hw->cap_sup_sta.id);
-	hw->cap_sup_sta.variant = le16_to_cpu(hw->cap_sup_sta.variant);
-	hw->cap_sup_sta.bottom = le16_to_cpu(hw->cap_sup_sta.bottom);
-	hw->cap_sup_sta.top = le16_to_cpu(hw->cap_sup_sta.top);
+	le16_to_cpus(&hw->cap_sup_sta.role);
+	le16_to_cpus(&hw->cap_sup_sta.id);
+	le16_to_cpus(&hw->cap_sup_sta.variant);
+	le16_to_cpus(&hw->cap_sup_sta.bottom);
+	le16_to_cpus(&hw->cap_sup_sta.top);
 
 	if (hw->cap_sup_sta.id == 0x04) {
 		netdev_info(wlandev->netdev,
@@ -787,11 +787,11 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	/* get all the Compatibility range, primary f/w actor, CFI supplier
 	 * fields in byte order
 	 */
-	hw->cap_act_pri_cfi.role = le16_to_cpu(hw->cap_act_pri_cfi.role);
-	hw->cap_act_pri_cfi.id = le16_to_cpu(hw->cap_act_pri_cfi.id);
-	hw->cap_act_pri_cfi.variant = le16_to_cpu(hw->cap_act_pri_cfi.variant);
-	hw->cap_act_pri_cfi.bottom = le16_to_cpu(hw->cap_act_pri_cfi.bottom);
-	hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top);
+	le16_to_cpus(&hw->cap_act_pri_cfi.role);
+	le16_to_cpus(&hw->cap_act_pri_cfi.id);
+	le16_to_cpus(&hw->cap_act_pri_cfi.variant);
+	le16_to_cpus(&hw->cap_act_pri_cfi.bottom);
+	le16_to_cpus(&hw->cap_act_pri_cfi.top);
 
 	netdev_info(wlandev->netdev,
 		    "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
@@ -811,11 +811,11 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	/* get all the Compatibility range, station f/w actor, CFI supplier
 	 * fields in byte order
 	 */
-	hw->cap_act_sta_cfi.role = le16_to_cpu(hw->cap_act_sta_cfi.role);
-	hw->cap_act_sta_cfi.id = le16_to_cpu(hw->cap_act_sta_cfi.id);
-	hw->cap_act_sta_cfi.variant = le16_to_cpu(hw->cap_act_sta_cfi.variant);
-	hw->cap_act_sta_cfi.bottom = le16_to_cpu(hw->cap_act_sta_cfi.bottom);
-	hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top);
+	le16_to_cpus(&hw->cap_act_sta_cfi.role);
+	le16_to_cpus(&hw->cap_act_sta_cfi.id);
+	le16_to_cpus(&hw->cap_act_sta_cfi.variant);
+	le16_to_cpus(&hw->cap_act_sta_cfi.bottom);
+	le16_to_cpus(&hw->cap_act_sta_cfi.top);
 
 	netdev_info(wlandev->netdev,
 		    "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
@@ -835,11 +835,11 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	/* get all the Compatibility range, station f/w actor, MFI supplier
 	 * fields in byte order
 	 */
-	hw->cap_act_sta_mfi.role = le16_to_cpu(hw->cap_act_sta_mfi.role);
-	hw->cap_act_sta_mfi.id = le16_to_cpu(hw->cap_act_sta_mfi.id);
-	hw->cap_act_sta_mfi.variant = le16_to_cpu(hw->cap_act_sta_mfi.variant);
-	hw->cap_act_sta_mfi.bottom = le16_to_cpu(hw->cap_act_sta_mfi.bottom);
-	hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top);
+	le16_to_cpus(&hw->cap_act_sta_mfi.role);
+	le16_to_cpus(&hw->cap_act_sta_mfi.id);
+	le16_to_cpus(&hw->cap_act_sta_mfi.variant);
+	le16_to_cpus(&hw->cap_act_sta_mfi.bottom);
+	le16_to_cpus(&hw->cap_act_sta_mfi.top);
 
 	netdev_info(wlandev->netdev,
 		    "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
@@ -1478,8 +1478,8 @@ static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
 	int i;
 
 	memcpy(&rec, &inf->info.assocstatus, sizeof(rec));
-	rec.assocstatus = le16_to_cpu(rec.assocstatus);
-	rec.reason = le16_to_cpu(rec.reason);
+	le16_to_cpus(&rec.assocstatus);
+	le16_to_cpus(&rec.reason);
 
 	/*
 	 * Find the address in the list of authenticated stations.
@@ -1748,7 +1748,7 @@ static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
 void prism2sta_ev_info(struct wlandevice *wlandev,
 		       struct hfa384x_inf_frame *inf)
 {
-	inf->infotype = le16_to_cpu(inf->infotype);
+	le16_to_cpus(&inf->infotype);
 	/* Dispatch */
 	switch (inf->infotype) {
 	case HFA384x_IT_HANDOVERADDR:
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index f9f98e0..31dd52c 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -2448,15 +2448,15 @@ static const struct XGI301C_Tap4TimingStruct PALTap4Timing[] = {
 		}
 	},
 	{0xFFFF, {
-		 0x04, 0x1A, 0x04, 0x7E, 0x02, 0x1B, 0x05, 0x7E, /* ; C0-C7 */
-		 0x01, 0x1A, 0x07, 0x7E, 0x00, 0x1A, 0x09, 0x7D, /* ; C8-CF */
-		 0x7F, 0x19, 0x0B, 0x7D, 0x7E, 0x18, 0x0D, 0x7D, /* ; D0-D7 */
-		 0x7D, 0x17, 0x10, 0x7C, 0x7D, 0x15, 0x12, 0x7C, /* ; D8-DF */
-		 0x7C, 0x14, 0x14, 0x7C, 0x7C, 0x12, 0x15, 0x7D, /* ; E0-E7 */
-		 0x7C, 0x10, 0x17, 0x7D, 0x7C, 0x0D, 0x18, 0x7F, /* ; EA-EF */
-		 0x7D, 0x0B, 0x19, 0x7F, 0x7D, 0x09, 0x1A, 0x00, /* ; F0-F7 */
-		 0x7D, 0x07, 0x1A, 0x02, 0x7E, 0x05, 0x1B, 0x02  /* ; F8-FF */
-		 }
+		0x04, 0x1A, 0x04, 0x7E, 0x02, 0x1B, 0x05, 0x7E, /* ; C0-C7 */
+		0x01, 0x1A, 0x07, 0x7E, 0x00, 0x1A, 0x09, 0x7D, /* ; C8-CF */
+		0x7F, 0x19, 0x0B, 0x7D, 0x7E, 0x18, 0x0D, 0x7D, /* ; D0-D7 */
+		0x7D, 0x17, 0x10, 0x7C, 0x7D, 0x15, 0x12, 0x7C, /* ; D8-DF */
+		0x7C, 0x14, 0x14, 0x7C, 0x7C, 0x12, 0x15, 0x7D, /* ; E0-E7 */
+		0x7C, 0x10, 0x17, 0x7D, 0x7C, 0x0D, 0x18, 0x7F, /* ; EA-EF */
+		0x7D, 0x0B, 0x19, 0x7F, 0x7D, 0x09, 0x1A, 0x00, /* ; F0-F7 */
+		0x7D, 0x07, 0x1A, 0x02, 0x7E, 0x05, 0x1B, 0x02  /* ; F8-FF */
+		}
 	}
 };
 
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 26a9bcd..3fdca2c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1279,6 +1279,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
 	 */
 	if (dump_payload)
 		goto after_immediate_data;
+	/*
+	 * Check for underflow case where both EDTL and immediate data payload
+	 * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
+	 * already been set in target_cmd_size_check() as se_cmd->data_length.
+	 *
+	 * For this special case, fail the command and dump the immediate data
+	 * payload.
+	 */
+	if (cmd->first_burst_len > cmd->se_cmd.data_length) {
+		cmd->sense_reason = TCM_INVALID_CDB_FIELD;
+		goto after_immediate_data;
+	}
 
 	immed_ret = iscsit_handle_immediate_data(cmd, hdr,
 					cmd->first_burst_len);
@@ -3790,6 +3802,8 @@ int iscsi_target_tx_thread(void *arg)
 {
 	int ret = 0;
 	struct iscsi_conn *conn = arg;
+	bool conn_freed = false;
+
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
 	 * connection recovery / failure event can be triggered externally.
@@ -3815,12 +3829,14 @@ int iscsi_target_tx_thread(void *arg)
 			goto transport_err;
 
 		ret = iscsit_handle_response_queue(conn);
-		if (ret == 1)
+		if (ret == 1) {
 			goto get_immediate;
-		else if (ret == -ECONNRESET)
+		} else if (ret == -ECONNRESET) {
+			conn_freed = true;
 			goto out;
-		else if (ret < 0)
+		} else if (ret < 0) {
 			goto transport_err;
+		}
 	}
 
 transport_err:
@@ -3830,8 +3846,13 @@ int iscsi_target_tx_thread(void *arg)
 	 * responsible for cleaning up the early connection failure.
 	 */
 	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
-		iscsit_take_action_for_connection_exit(conn);
+		iscsit_take_action_for_connection_exit(conn, &conn_freed);
 out:
+	if (!conn_freed) {
+		while (!kthread_should_stop()) {
+			msleep(100);
+		}
+	}
 	return 0;
 }
 
@@ -4004,6 +4025,7 @@ int iscsi_target_rx_thread(void *arg)
 {
 	int rc;
 	struct iscsi_conn *conn = arg;
+	bool conn_freed = false;
 
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
@@ -4016,7 +4038,7 @@ int iscsi_target_rx_thread(void *arg)
 	 */
 	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
 	if (rc < 0 || iscsi_target_check_conn_state(conn))
-		return 0;
+		goto out;
 
 	if (!conn->conn_transport->iscsit_get_rx_pdu)
 		return 0;
@@ -4025,7 +4047,15 @@ int iscsi_target_rx_thread(void *arg)
 
 	if (!signal_pending(current))
 		atomic_set(&conn->transport_failed, 1);
-	iscsit_take_action_for_connection_exit(conn);
+	iscsit_take_action_for_connection_exit(conn, &conn_freed);
+
+out:
+	if (!conn_freed) {
+		while (!kthread_should_stop()) {
+			msleep(100);
+		}
+	}
+
 	return 0;
 }
 
@@ -4405,8 +4435,11 @@ static void iscsit_logout_post_handler_closesession(
 	 * always sleep waiting for RX/TX thread shutdown to complete
 	 * within iscsit_close_connection().
 	 */
-	if (!conn->conn_transport->rdma_shutdown)
+	if (!conn->conn_transport->rdma_shutdown) {
 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+		if (!sleep)
+			return;
+	}
 
 	atomic_set(&conn->conn_logout_remove, 0);
 	complete(&conn->conn_logout_comp);
@@ -4422,8 +4455,11 @@ static void iscsit_logout_post_handler_samecid(
 {
 	int sleep = 1;
 
-	if (!conn->conn_transport->rdma_shutdown)
+	if (!conn->conn_transport->rdma_shutdown) {
 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+		if (!sleep)
+			return;
+	}
 
 	atomic_set(&conn->conn_logout_remove, 0);
 	complete(&conn->conn_logout_comp);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 9a96e17..7fe2aa7 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
 	}
 }
 
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
 {
+	*conn_freed = false;
+
 	spin_lock_bh(&conn->state_lock);
 	if (atomic_read(&conn->connection_exit)) {
 		spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
 	if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
 		spin_unlock_bh(&conn->state_lock);
 		iscsit_close_connection(conn);
+		*conn_freed = true;
 		return;
 	}
 
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
 	spin_unlock_bh(&conn->state_lock);
 
 	iscsit_handle_connection_cleanup(conn);
+	*conn_freed = true;
 }
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index 60e69e2..3822d9c 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
 extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
 
 #endif   /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 6623847..92b96b5 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg)
 			break;
 	}
 
+	while (!kthread_should_stop()) {
+		msleep(100);
+	}
+
 	return 0;
 }
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 7ccc9c1..6f88b31 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
 
 static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
 
-static bool iscsi_target_sk_state_check(struct sock *sk)
+static bool __iscsi_target_sk_check_close(struct sock *sk)
 {
 	if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
-		pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
+		pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
 			"returning FALSE\n");
-		return false;
+		return true;
 	}
-	return true;
+	return false;
+}
+
+static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
+{
+	bool state = false;
+
+	if (conn->sock) {
+		struct sock *sk = conn->sock->sk;
+
+		read_lock_bh(&sk->sk_callback_lock);
+		state = (__iscsi_target_sk_check_close(sk) ||
+			 test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+		read_unlock_bh(&sk->sk_callback_lock);
+	}
+	return state;
+}
+
+static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
+{
+	bool state = false;
+
+	if (conn->sock) {
+		struct sock *sk = conn->sock->sk;
+
+		read_lock_bh(&sk->sk_callback_lock);
+		state = test_bit(flag, &conn->login_flags);
+		read_unlock_bh(&sk->sk_callback_lock);
+	}
+	return state;
+}
+
+static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
+{
+	bool state = false;
+
+	if (conn->sock) {
+		struct sock *sk = conn->sock->sk;
+
+		write_lock_bh(&sk->sk_callback_lock);
+		state = (__iscsi_target_sk_check_close(sk) ||
+			 test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+		if (!state)
+			clear_bit(flag, &conn->login_flags);
+		write_unlock_bh(&sk->sk_callback_lock);
+	}
+	return state;
 }
 
 static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
 	pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
 			conn, current->comm, current->pid);
+	/*
+	 * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
+	 * before initial PDU processing in iscsi_target_start_negotiation()
+	 * has completed, go ahead and retry until it's cleared.
+	 *
+	 * Otherwise if the TCP connection drops while this is occuring,
+	 * iscsi_target_start_negotiation() will detect the failure, call
+	 * cancel_delayed_work_sync(&conn->login_work), and cleanup the
+	 * remaining iscsi connection resources from iscsi_np process context.
+	 */
+	if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
+		schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
+		return;
+	}
 
 	spin_lock(&tpg->tpg_state_lock);
 	state = (tpg->tpg_state == TPG_STATE_ACTIVE);
@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
 	if (!state) {
 		pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
-		iscsi_target_restore_sock_callbacks(conn);
-		iscsi_target_login_drop(conn, login);
-		iscsit_deaccess_np(np, tpg, tpg_np);
-		return;
+		goto err;
 	}
 
-	if (conn->sock) {
-		struct sock *sk = conn->sock->sk;
-
-		read_lock_bh(&sk->sk_callback_lock);
-		state = iscsi_target_sk_state_check(sk);
-		read_unlock_bh(&sk->sk_callback_lock);
-
-		if (!state) {
-			pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
-			iscsi_target_restore_sock_callbacks(conn);
-			iscsi_target_login_drop(conn, login);
-			iscsit_deaccess_np(np, tpg, tpg_np);
-			return;
-		}
+	if (iscsi_target_sk_check_close(conn)) {
+		pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+		goto err;
 	}
 
 	conn->login_kworker = current;
@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 	flush_signals(current);
 	conn->login_kworker = NULL;
 
-	if (rc < 0) {
-		iscsi_target_restore_sock_callbacks(conn);
-		iscsi_target_login_drop(conn, login);
-		iscsit_deaccess_np(np, tpg, tpg_np);
-		return;
-	}
+	if (rc < 0)
+		goto err;
 
 	pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
 			conn, current->comm, current->pid);
 
 	rc = iscsi_target_do_login(conn, login);
 	if (rc < 0) {
-		iscsi_target_restore_sock_callbacks(conn);
-		iscsi_target_login_drop(conn, login);
-		iscsit_deaccess_np(np, tpg, tpg_np);
+		goto err;
 	} else if (!rc) {
-		if (conn->sock) {
-			struct sock *sk = conn->sock->sk;
-
-			write_lock_bh(&sk->sk_callback_lock);
-			clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
-			write_unlock_bh(&sk->sk_callback_lock);
-		}
+		if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
+			goto err;
 	} else if (rc == 1) {
 		iscsi_target_nego_release(conn);
 		iscsi_post_login_handler(np, conn, zero_tsih);
 		iscsit_deaccess_np(np, tpg, tpg_np);
 	}
+	return;
+
+err:
+	iscsi_target_restore_sock_callbacks(conn);
+	iscsi_target_login_drop(conn, login);
+	iscsit_deaccess_np(np, tpg, tpg_np);
 }
 
 static void iscsi_target_do_cleanup(struct work_struct *work)
@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
 		orig_state_change(sk);
 		return;
 	}
+	state = __iscsi_target_sk_check_close(sk);
+	pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
+
 	if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
 		pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
 			 " conn: %p\n", conn);
+		if (state)
+			set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
 		write_unlock_bh(&sk->sk_callback_lock);
 		orig_state_change(sk);
 		return;
 	}
-	if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+	if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
 		pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
 			 conn);
 		write_unlock_bh(&sk->sk_callback_lock);
 		orig_state_change(sk);
 		return;
 	}
-
-	state = iscsi_target_sk_state_check(sk);
-	write_unlock_bh(&sk->sk_callback_lock);
-
-	pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
-
-	if (!state) {
+	/*
+	 * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
+	 * but only queue conn->login_work -> iscsi_target_do_login_rx()
+	 * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
+	 *
+	 * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
+	 * will detect the dropped TCP connection from delayed workqueue context.
+	 *
+	 * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
+	 * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
+	 * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
+	 * via iscsi_target_sk_check_and_clear() is responsible for detecting the
+	 * dropped TCP connection in iscsi_np process context, and cleaning up
+	 * the remaining iscsi connection resources.
+	 */
+	if (state) {
 		pr_debug("iscsi_target_sk_state_change got failed state\n");
-		schedule_delayed_work(&conn->login_cleanup_work, 0);
+		set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+		state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+		write_unlock_bh(&sk->sk_callback_lock);
+
+		orig_state_change(sk);
+
+		if (!state)
+			schedule_delayed_work(&conn->login_work, 0);
 		return;
 	}
+	write_unlock_bh(&sk->sk_callback_lock);
+
 	orig_state_change(sk);
 }
 
@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
 			if (iscsi_target_handle_csg_one(conn, login) < 0)
 				return -1;
 			if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+				/*
+				 * Check to make sure the TCP connection has not
+				 * dropped asynchronously while session reinstatement
+				 * was occuring in this kthread context, before
+				 * transitioning to full feature phase operation.
+				 */
+				if (iscsi_target_sk_check_close(conn))
+					return -1;
+
 				login->tsih = conn->sess->tsih;
 				login->login_complete = 1;
 				iscsi_target_restore_sock_callbacks(conn);
@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
 		break;
 	}
 
-	if (conn->sock) {
-		struct sock *sk = conn->sock->sk;
-		bool state;
-
-		read_lock_bh(&sk->sk_callback_lock);
-		state = iscsi_target_sk_state_check(sk);
-		read_unlock_bh(&sk->sk_callback_lock);
-
-		if (!state) {
-			pr_debug("iscsi_target_do_login() failed state for"
-				 " conn: %p\n", conn);
-			return -1;
-		}
-	}
-
 	return 0;
 }
 
@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation(
 
 		write_lock_bh(&sk->sk_callback_lock);
 		set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+		set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
 		write_unlock_bh(&sk->sk_callback_lock);
 	}
-
+	/*
+	 * If iscsi_target_do_login returns zero to signal more PDU
+	 * exchanges are required to complete the login, go ahead and
+	 * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
+	 * is still active.
+	 *
+	 * Otherwise if TCP connection dropped asynchronously, go ahead
+	 * and perform connection cleanup now.
+	 */
 	ret = iscsi_target_do_login(conn, login);
+	if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+		ret = -1;
+
 	if (ret < 0) {
 		cancel_delayed_work_sync(&conn->login_work);
 		cancel_delayed_work_sync(&conn->login_cleanup_work);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index bb069eb..c05d3801 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -93,7 +93,7 @@ static int iblock_configure_device(struct se_device *dev)
 		return -EINVAL;
 	}
 
-	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
+	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
 	if (!ib_dev->ibd_bio_set) {
 		pr_err("IBLOCK: Unable to create bioset\n");
 		goto out;
@@ -296,8 +296,8 @@ static void iblock_bio_done(struct bio *bio)
 	struct se_cmd *cmd = bio->bi_private;
 	struct iblock_req *ibr = cmd->priv;
 
-	if (bio->bi_error) {
-		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_error);
+	if (bio->bi_status) {
+		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
 		/*
 		 * Bump the ib_bio_err_cnt and release bio.
 		 */
@@ -354,11 +354,11 @@ static void iblock_end_io_flush(struct bio *bio)
 {
 	struct se_cmd *cmd = bio->bi_private;
 
-	if (bio->bi_error)
-		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
+	if (bio->bi_status)
+		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
 
 	if (cmd) {
-		if (bio->bi_error)
+		if (bio->bi_status)
 			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
 		else
 			target_complete_cmd(cmd, SAM_STAT_GOOD);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 9ab7090..0912de7 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -136,7 +136,7 @@ int	init_se_kmem_caches(void);
 void	release_se_kmem_caches(void);
 u32	scsi_get_new_index(scsi_index_t);
 void	transport_subsystem_check_init(void);
-void	transport_cmd_finish_abort(struct se_cmd *, int);
+int	transport_cmd_finish_abort(struct se_cmd *, int);
 unsigned char *transport_dump_cmd_direction(struct se_cmd *);
 void	transport_dump_dev_state(struct se_device *, char *, int *);
 void	transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 3e4abb1..ceec021 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -55,7 +55,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
 }
 
 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static void pscsi_req_done(struct request *, int);
+static void pscsi_req_done(struct request *, blk_status_t);
 
 /*	pscsi_attach_hba():
  *
@@ -992,8 +992,6 @@ pscsi_execute_cmd(struct se_cmd *cmd)
 		goto fail;
 	}
 
-	scsi_req_init(req);
-
 	if (sgl) {
 		ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
 		if (ret)
@@ -1045,7 +1043,7 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
 	return 0;
 }
 
-static void pscsi_req_done(struct request *req, int uptodate)
+static void pscsi_req_done(struct request *req, blk_status_t status)
 {
 	struct se_cmd *cmd = req->end_io_data;
 	struct pscsi_plugin_task *pt = cmd->priv;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index dce1e1b..13f47bf 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
 	kfree(tmr);
 }
 
-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
 {
 	unsigned long flags;
 	bool remove = true, send_tas;
@@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
 		transport_send_task_abort(cmd);
 	}
 
-	transport_cmd_finish_abort(cmd, remove);
+	return transport_cmd_finish_abort(cmd, remove);
 }
 
 static int target_check_cdb_and_preempt(struct list_head *list,
@@ -184,8 +184,8 @@ void core_tmr_abort_task(
 		cancel_work_sync(&se_cmd->work);
 		transport_wait_for_tasks(se_cmd);
 
-		transport_cmd_finish_abort(se_cmd, true);
-		target_put_sess_cmd(se_cmd);
+		if (!transport_cmd_finish_abort(se_cmd, true))
+			target_put_sess_cmd(se_cmd);
 
 		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
 				" ref_tag: %llu\n", ref_tag);
@@ -281,8 +281,8 @@ static void core_tmr_drain_tmr_list(
 		cancel_work_sync(&cmd->work);
 		transport_wait_for_tasks(cmd);
 
-		transport_cmd_finish_abort(cmd, 1);
-		target_put_sess_cmd(cmd);
+		if (!transport_cmd_finish_abort(cmd, 1))
+			target_put_sess_cmd(cmd);
 	}
 }
 
@@ -380,8 +380,8 @@ static void core_tmr_drain_state_list(
 		cancel_work_sync(&cmd->work);
 		transport_wait_for_tasks(cmd);
 
-		core_tmr_handle_tas_abort(cmd, tas);
-		target_put_sess_cmd(cmd);
+		if (!core_tmr_handle_tas_abort(cmd, tas))
+			target_put_sess_cmd(cmd);
 	}
 }
 
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 37f5735..f1b3a46 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -651,9 +651,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 		percpu_ref_put(&lun->lun_ref);
 }
 
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
 	bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+	int ret = 0;
 
 	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
 		transport_lun_remove_cmd(cmd);
@@ -665,9 +666,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 		cmd->se_tfo->aborted_task(cmd);
 
 	if (transport_cmd_check_stop_to_fabric(cmd))
-		return;
+		return 1;
 	if (remove && ack_kref)
-		transport_put_cmd(cmd);
+		ret = transport_put_cmd(cmd);
+
+	return ret;
 }
 
 static void target_complete_failure_work(struct work_struct *work)
@@ -1160,15 +1163,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
 	if (cmd->unknown_data_length) {
 		cmd->data_length = size;
 	} else if (size != cmd->data_length) {
-		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+		pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
 			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
 			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
 				cmd->data_length, size, cmd->t_task_cdb[0]);
 
-		if (cmd->data_direction == DMA_TO_DEVICE &&
-		    cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
-			pr_err("Rejecting underflow/overflow WRITE data\n");
-			return TCM_INVALID_CDB_FIELD;
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+				pr_err_ratelimited("Rejecting underflow/overflow"
+						   " for WRITE data CDB\n");
+				return TCM_INVALID_CDB_FIELD;
+			}
+			/*
+			 * Some fabric drivers like iscsi-target still expect to
+			 * always reject overflow writes.  Reject this case until
+			 * full fabric driver level support for overflow writes
+			 * is introduced tree-wide.
+			 */
+			if (size > cmd->data_length) {
+				pr_err_ratelimited("Rejecting overflow for"
+						   " WRITE control CDB\n");
+				return TCM_INVALID_CDB_FIELD;
+			}
 		}
 		/*
 		 * Reject READ_* or WRITE_* with overflow/underflow for
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9045837..beb5f09 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -97,7 +97,7 @@ struct tcmu_hba {
 
 struct tcmu_dev {
 	struct list_head node;
-
+	struct kref kref;
 	struct se_device se_dev;
 
 	char *name;
@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
 	udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
 	if (!udev)
 		return NULL;
+	kref_init(&udev->kref);
 
 	udev->name = kstrdup(name, GFP_KERNEL);
 	if (!udev->name) {
@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
 	return 0;
 }
 
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+	struct se_device *dev = container_of(p, struct se_device, rcu_head);
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+
+	kfree(udev->uio_info.name);
+	kfree(udev->name);
+	kfree(udev);
+}
+
+static void tcmu_dev_kref_release(struct kref *kref)
+{
+	struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
+	struct se_device *dev = &udev->se_dev;
+
+	call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+}
+
 static int tcmu_release(struct uio_info *info, struct inode *inode)
 {
 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
 	clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
 
 	pr_debug("close\n");
-
+	/* release ref from configure */
+	kref_put(&udev->kref, tcmu_dev_kref_release);
 	return 0;
 }
 
@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev)
 		dev->dev_attrib.hw_max_sectors = 128;
 	dev->dev_attrib.hw_queue_depth = 128;
 
+	/*
+	 * Get a ref incase userspace does a close on the uio device before
+	 * LIO has initiated tcmu_free_device.
+	 */
+	kref_get(&udev->kref);
+
 	ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
 				 udev->uio_info.uio_dev->minor);
 	if (ret)
@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev)
 	return 0;
 
 err_netlink:
+	kref_put(&udev->kref, tcmu_dev_kref_release);
 	uio_unregister_device(&udev->uio_info);
 err_register:
 	vfree(udev->mb_addr);
 err_vzalloc:
 	kfree(info->name);
+	info->name = NULL;
 
 	return ret;
 }
@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
 	return -EINVAL;
 }
 
-static void tcmu_dev_call_rcu(struct rcu_head *p)
-{
-	struct se_device *dev = container_of(p, struct se_device, rcu_head);
-	struct tcmu_dev *udev = TCMU_DEV(dev);
-
-	kfree(udev);
-}
-
 static bool tcmu_dev_configured(struct tcmu_dev *udev)
 {
 	return udev->uio_info.uio_dev ? true : false;
@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev)
 				   udev->uio_info.uio_dev->minor);
 
 		uio_unregister_device(&udev->uio_info);
-		kfree(udev->uio_info.name);
-		kfree(udev->name);
 	}
-	call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+
+	/* release ref from init */
+	kref_put(&udev->kref, tcmu_dev_kref_release);
 }
 
 enum {
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 9413c4a..a9ec94e 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -23,7 +23,7 @@ enum int3400_thermal_uuid {
 	INT3400_THERMAL_MAXIMUM_UUID,
 };
 
-static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
+static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
 	"42A441D6-AE6A-462b-A84B-4A8CE79027D3",
 	"3A95C389-E4B8-4629-A526-C52C88626BAE",
 	"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
@@ -141,10 +141,10 @@ static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv)
 		}
 
 		for (j = 0; j < INT3400_THERMAL_MAXIMUM_UUID; j++) {
-			u8 uuid[16];
+			guid_t guid;
 
-			acpi_str_to_uuid(int3400_thermal_uuids[j], uuid);
-			if (!strncmp(uuid, objb->buffer.pointer, 16)) {
+			guid_parse(int3400_thermal_uuids[j], &guid);
+			if (guid_equal((guid_t *)objb->buffer.pointer, &guid)) {
 				priv->uuid_bitmap |= (1 << j);
 				break;
 			}
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index e9a1fe3..159bbce 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -104,8 +104,6 @@ static int max77620_thermal_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
-	pdev->dev.of_node = pdev->dev.parent->of_node;
-
 	mtherm->dev = &pdev->dev;
 	mtherm->rmap = dev_get_regmap(pdev->dev.parent, NULL);
 	if (!mtherm->rmap) {
@@ -113,6 +111,12 @@ static int max77620_thermal_probe(struct platform_device *pdev)
 		return -ENODEV;
 	}
 
+	/*
+	 * The reference taken to the parent's node which will be balanced on
+	 * reprobe or on platform-device release.
+	 */
+	device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
 	mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
 				mtherm, &max77620_thermal_ops);
 	if (IS_ERR(mtherm->tz_device)) {
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index d35db16..f4869c3 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,15 +1,16 @@
 menuconfig THUNDERBOLT
-	tristate "Thunderbolt support for Apple devices"
+	tristate "Thunderbolt support"
 	depends on PCI
 	depends on X86 || COMPILE_TEST
 	select APPLE_PROPERTIES if EFI_STUB && X86
 	select CRC32
+	select CRYPTO
+	select CRYPTO_HASH
+	select NVMEM
 	help
-	  Cactus Ridge Thunderbolt Controller driver
-	  This driver is required if you want to hotplug Thunderbolt devices on
-	  Apple hardware.
-
-	  Device chaining is currently not supported.
+	  Thunderbolt Controller driver. This driver is required if you
+	  want to hotplug Thunderbolt devices on Apple hardware or on PCs
+	  with Intel Falcon Ridge or newer.
 
 	  To compile this driver a module, choose M here. The module will be
 	  called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 5d1053c..4900feb 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
 obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
 thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
-
+thunderbolt-objs += domain.o dma_port.o icm.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index a7b47e7..38bc27a 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -9,6 +9,8 @@
 
 #include "tb.h"
 
+#define CAP_OFFSET_MAX		0xff
+#define VSE_CAP_OFFSET_MAX	0xffff
 
 struct tb_cap_any {
 	union {
@@ -18,99 +20,110 @@ struct tb_cap_any {
 	};
 } __packed;
 
-static bool tb_cap_is_basic(struct tb_cap_any *cap)
+/**
+ * tb_port_find_cap() - Find port capability
+ * @port: Port to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
 {
-	/* basic.cap is u8. This checks only the lower 8 bit of cap. */
-	return cap->basic.cap != 5;
-}
+	u32 offset;
 
-static bool tb_cap_is_long(struct tb_cap_any *cap)
-{
-	return !tb_cap_is_basic(cap)
-	       && cap->extended_short.next == 0
-	       && cap->extended_short.length == 0;
-}
-
-static enum tb_cap tb_cap(struct tb_cap_any *cap)
-{
-	if (tb_cap_is_basic(cap))
-		return cap->basic.cap;
-	else
-		/* extended_short/long have cap at the same offset. */
-		return cap->extended_short.cap;
-}
-
-static u32 tb_cap_next(struct tb_cap_any *cap, u32 offset)
-{
-	int next;
-	if (offset == 1) {
-		/*
-		 * The first pointer is part of the switch header and always
-		 * a simple pointer.
-		 */
-		next = cap->basic.next;
-	} else {
-		/*
-		 * Somehow Intel decided to use 3 different types of capability
-		 * headers. It is not like anyone could have predicted that
-		 * single byte offsets are not enough...
-		 */
-		if (tb_cap_is_basic(cap))
-			next = cap->basic.next;
-		else if (!tb_cap_is_long(cap))
-			next = cap->extended_short.next;
-		else
-			next = cap->extended_long.next;
-	}
 	/*
-	 * "Hey, we could terminate some capability lists with a null offset
-	 *  and others with a pointer to the last element." - "Great idea!"
+	 * DP out adapters claim to implement TMU capability but in
+	 * reality they do not so we hard code the adapter specific
+	 * capability offset here.
 	 */
-	if (next == offset)
-		return 0;
-	return next;
+	if (port->config.type == TB_TYPE_DP_HDMI_OUT)
+		offset = 0x39;
+	else
+		offset = 0x1;
+
+	do {
+		struct tb_cap_any header;
+		int ret;
+
+		ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
+		if (ret)
+			return ret;
+
+		if (header.basic.cap == cap)
+			return offset;
+
+		offset = header.basic.next;
+	} while (offset);
+
+	return -ENOENT;
+}
+
+static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
+{
+	int offset = sw->config.first_cap_offset;
+
+	while (offset > 0 && offset < CAP_OFFSET_MAX) {
+		struct tb_cap_any header;
+		int ret;
+
+		ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
+		if (ret)
+			return ret;
+
+		if (header.basic.cap == cap)
+			return offset;
+
+		offset = header.basic.next;
+	}
+
+	return -ENOENT;
 }
 
 /**
- * tb_find_cap() - find a capability
+ * tb_switch_find_vse_cap() - Find switch vendor specific capability
+ * @sw: Switch to find the capability for
+ * @vsec: Vendor specific capability to look
  *
- * Return: Returns a positive offset if the capability was found and 0 if not.
- * Returns an error code on failure.
+ * Functions enumerates vendor specific capabilities (VSEC) of a switch
+ * and returns offset when capability matching @vsec is found. If no
+ * such capability is found returns %-ENOENT. In case of error returns
+ * negative errno.
  */
-int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap)
+int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
 {
-	u32 offset = 1;
 	struct tb_cap_any header;
-	int res;
-	int retries = 10;
-	while (retries--) {
-		res = tb_port_read(port, &header, space, offset, 1);
-		if (res) {
-			/* Intel needs some help with linked lists. */
-			if (space == TB_CFG_PORT && offset == 0xa
-			    && port->config.type == TB_TYPE_DP_HDMI_OUT) {
-				offset = 0x39;
-				continue;
-			}
-			return res;
-		}
-		if (offset != 1) {
-			if (tb_cap(&header) == cap)
+	int offset;
+
+	offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE);
+	if (offset < 0)
+		return offset;
+
+	while (offset > 0 && offset < VSE_CAP_OFFSET_MAX) {
+		int ret;
+
+		ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
+		if (ret)
+			return ret;
+
+		/*
+		 * Extended vendor specific capabilities come in two
+		 * flavors: short and long. The latter is used when
+		 * offset is over 0xff.
+		 */
+		if (offset >= CAP_OFFSET_MAX) {
+			if (header.extended_long.vsec_id == vsec)
 				return offset;
-			if (tb_cap_is_long(&header)) {
-				/* tb_cap_extended_long is 2 dwords */
-				res = tb_port_read(port, &header, space,
-						   offset, 2);
-				if (res)
-					return res;
-			}
+			offset = header.extended_long.next;
+		} else {
+			if (header.extended_short.vsec_id == vsec)
+				return offset;
+			if (!header.extended_short.length)
+				return -ENOENT;
+			offset = header.extended_short.next;
 		}
-		offset = tb_cap_next(&header, offset);
-		if (!offset)
-			return 0;
 	}
-	tb_port_WARN(port,
-		     "run out of retries while looking for cap %#x in config space %d, last offset: %#x\n",
-		     cap, space, offset);
-	return -EIO;
+
+	return -ENOENT;
 }
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 1146ff4..69c0232 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -5,22 +5,17 @@
  */
 
 #include <linux/crc32.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/dmapool.h>
 #include <linux/workqueue.h>
-#include <linux/kfifo.h>
 
 #include "ctl.h"
 
 
-struct ctl_pkg {
-	struct tb_ctl *ctl;
-	void *buffer;
-	struct ring_frame frame;
-};
-
-#define TB_CTL_RX_PKG_COUNT 10
+#define TB_CTL_RX_PKG_COUNT	10
+#define TB_CTL_RETRIES		4
 
 /**
  * struct tb_cfg - thunderbolt control channel
@@ -32,10 +27,11 @@ struct tb_ctl {
 
 	struct dma_pool *frame_pool;
 	struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
-	DECLARE_KFIFO(response_fifo, struct ctl_pkg*, 16);
-	struct completion response_ready;
+	struct mutex request_queue_lock;
+	struct list_head request_queue;
+	bool running;
 
-	hotplug_cb callback;
+	event_cb callback;
 	void *callback_data;
 };
 
@@ -52,102 +48,124 @@ struct tb_ctl {
 #define tb_ctl_info(ctl, format, arg...) \
 	dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
 
+#define tb_ctl_dbg(ctl, format, arg...) \
+	dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
 
-/* configuration packets definitions */
+static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
+/* Serializes access to request kref_get/put */
+static DEFINE_MUTEX(tb_cfg_request_lock);
 
-enum tb_cfg_pkg_type {
-	TB_CFG_PKG_READ = 1,
-	TB_CFG_PKG_WRITE = 2,
-	TB_CFG_PKG_ERROR = 3,
-	TB_CFG_PKG_NOTIFY_ACK = 4,
-	TB_CFG_PKG_EVENT = 5,
-	TB_CFG_PKG_XDOMAIN_REQ = 6,
-	TB_CFG_PKG_XDOMAIN_RESP = 7,
-	TB_CFG_PKG_OVERRIDE = 8,
-	TB_CFG_PKG_RESET = 9,
-	TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
-};
+/**
+ * tb_cfg_request_alloc() - Allocates a new config request
+ *
+ * This is refcounted object so when you are done with this, call
+ * tb_cfg_request_put() to it.
+ */
+struct tb_cfg_request *tb_cfg_request_alloc(void)
+{
+	struct tb_cfg_request *req;
 
-/* common header */
-struct tb_cfg_header {
-	u32 route_hi:22;
-	u32 unknown:10; /* highest order bit is set on replies */
-	u32 route_lo;
-} __packed;
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return NULL;
 
-/* additional header for read/write packets */
-struct tb_cfg_address {
-	u32 offset:13; /* in dwords */
-	u32 length:6; /* in dwords */
-	u32 port:6;
-	enum tb_cfg_space space:2;
-	u32 seq:2; /* sequence number  */
-	u32 zero:3;
-} __packed;
+	kref_init(&req->kref);
 
-/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
-struct cfg_read_pkg {
-	struct tb_cfg_header header;
-	struct tb_cfg_address addr;
-} __packed;
+	return req;
+}
 
-/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
-struct cfg_write_pkg {
-	struct tb_cfg_header header;
-	struct tb_cfg_address addr;
-	u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
-} __packed;
+/**
+ * tb_cfg_request_get() - Increase refcount of a request
+ * @req: Request whose refcount is increased
+ */
+void tb_cfg_request_get(struct tb_cfg_request *req)
+{
+	mutex_lock(&tb_cfg_request_lock);
+	kref_get(&req->kref);
+	mutex_unlock(&tb_cfg_request_lock);
+}
 
-/* TB_CFG_PKG_ERROR */
-struct cfg_error_pkg {
-	struct tb_cfg_header header;
-	enum tb_cfg_error error:4;
-	u32 zero1:4;
-	u32 port:6;
-	u32 zero2:2; /* Both should be zero, still they are different fields. */
-	u32 zero3:16;
-} __packed;
+static void tb_cfg_request_destroy(struct kref *kref)
+{
+	struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
 
-/* TB_CFG_PKG_EVENT */
-struct cfg_event_pkg {
-	struct tb_cfg_header header;
-	u32 port:6;
-	u32 zero:25;
-	bool unplug:1;
-} __packed;
+	kfree(req);
+}
 
-/* TB_CFG_PKG_RESET */
-struct cfg_reset_pkg {
-	struct tb_cfg_header header;
-} __packed;
+/**
+ * tb_cfg_request_put() - Decrease refcount and possibly release the request
+ * @req: Request whose refcount is decreased
+ *
+ * Call this function when you are done with the request. When refcount
+ * goes to %0 the object is released.
+ */
+void tb_cfg_request_put(struct tb_cfg_request *req)
+{
+	mutex_lock(&tb_cfg_request_lock);
+	kref_put(&req->kref, tb_cfg_request_destroy);
+	mutex_unlock(&tb_cfg_request_lock);
+}
 
-/* TB_CFG_PKG_PREPARE_TO_SLEEP */
-struct cfg_pts_pkg {
-	struct tb_cfg_header header;
-	u32 data;
-} __packed;
+static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
+				  struct tb_cfg_request *req)
+{
+	WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
+	WARN_ON(req->ctl);
 
+	mutex_lock(&ctl->request_queue_lock);
+	if (!ctl->running) {
+		mutex_unlock(&ctl->request_queue_lock);
+		return -ENOTCONN;
+	}
+	req->ctl = ctl;
+	list_add_tail(&req->list, &ctl->request_queue);
+	set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+	mutex_unlock(&ctl->request_queue_lock);
+	return 0;
+}
+
+static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
+{
+	struct tb_ctl *ctl = req->ctl;
+
+	mutex_lock(&ctl->request_queue_lock);
+	list_del(&req->list);
+	clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+	if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
+		wake_up(&tb_cfg_request_cancel_queue);
+	mutex_unlock(&ctl->request_queue_lock);
+}
+
+static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
+{
+	return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+}
+
+static struct tb_cfg_request *
+tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
+{
+	struct tb_cfg_request *req;
+	bool found = false;
+
+	mutex_lock(&pkg->ctl->request_queue_lock);
+	list_for_each_entry(req, &pkg->ctl->request_queue, list) {
+		tb_cfg_request_get(req);
+		if (req->match(req, pkg)) {
+			found = true;
+			break;
+		}
+		tb_cfg_request_put(req);
+	}
+	mutex_unlock(&pkg->ctl->request_queue_lock);
+
+	return found ? req : NULL;
+}
 
 /* utility functions */
 
-static u64 get_route(struct tb_cfg_header header)
-{
-	return (u64) header.route_hi << 32 | header.route_lo;
-}
 
-static struct tb_cfg_header make_header(u64 route)
-{
-	struct tb_cfg_header header = {
-		.route_hi = route >> 32,
-		.route_lo = route,
-	};
-	/* check for overflow, route_hi is not 32 bits! */
-	WARN_ON(get_route(header) != route);
-	return header;
-}
-
-static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type,
-			u64 route)
+static int check_header(const struct ctl_pkg *pkg, u32 len,
+			enum tb_cfg_pkg_type type, u64 route)
 {
 	struct tb_cfg_header *header = pkg->buffer;
 
@@ -167,9 +185,9 @@ static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type,
 	if (WARN(header->unknown != 1 << 9,
 			"header->unknown is %#x\n", header->unknown))
 		return -EIO;
-	if (WARN(route != get_route(*header),
+	if (WARN(route != tb_cfg_get_route(header),
 			"wrong route (expected %llx, got %llx)",
-			route, get_route(*header)))
+			route, tb_cfg_get_route(header)))
 		return -EIO;
 	return 0;
 }
@@ -189,8 +207,6 @@ static int check_config_address(struct tb_cfg_address addr,
 	if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
 			length, addr.length))
 		return -EIO;
-	if (WARN(addr.seq, "addr.seq is %#x\n", addr.seq))
-		return -EIO;
 	/*
 	 * We cannot check addr->port as it is set to the upstream port of the
 	 * sender.
@@ -198,14 +214,14 @@ static int check_config_address(struct tb_cfg_address addr,
 	return 0;
 }
 
-static struct tb_cfg_result decode_error(struct ctl_pkg *response)
+static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
 {
 	struct cfg_error_pkg *pkg = response->buffer;
 	struct tb_cfg_result res = { 0 };
-	res.response_route = get_route(pkg->header);
+	res.response_route = tb_cfg_get_route(&pkg->header);
 	res.response_port = 0;
 	res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
-			       get_route(pkg->header));
+			       tb_cfg_get_route(&pkg->header));
 	if (res.err)
 		return res;
 
@@ -219,7 +235,7 @@ static struct tb_cfg_result decode_error(struct ctl_pkg *response)
 
 }
 
-static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len,
+static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
 					 enum tb_cfg_pkg_type type, u64 route)
 {
 	struct tb_cfg_header *header = pkg->buffer;
@@ -229,7 +245,7 @@ static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len,
 		return decode_error(pkg);
 
 	res.response_port = 0; /* will be updated later for cfg_read/write */
-	res.response_route = get_route(*header);
+	res.response_route = tb_cfg_get_route(header);
 	res.err = check_header(pkg, len, type, route);
 	return res;
 }
@@ -273,7 +289,7 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
 	}
 }
 
-static void cpu_to_be32_array(__be32 *dst, u32 *src, size_t len)
+static void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
 {
 	int i;
 	for (i = 0; i < len; i++)
@@ -287,7 +303,7 @@ static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
 		dst[i] = be32_to_cpu(src[i]);
 }
 
-static __be32 tb_crc(void *data, size_t len)
+static __be32 tb_crc(const void *data, size_t len)
 {
 	return cpu_to_be32(~__crc32c_le(~0, data, len));
 }
@@ -333,7 +349,7 @@ static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
  *
  * Return: Returns 0 on success or an error code on failure.
  */
-static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len,
+static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
 		     enum tb_cfg_pkg_type type)
 {
 	int res;
@@ -364,24 +380,12 @@ static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len,
 }
 
 /**
- * tb_ctl_handle_plug_event() - acknowledge a plug event, invoke ctl->callback
+ * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
  */
-static void tb_ctl_handle_plug_event(struct tb_ctl *ctl,
-				     struct ctl_pkg *response)
+static void tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
+				struct ctl_pkg *pkg, size_t size)
 {
-	struct cfg_event_pkg *pkg = response->buffer;
-	u64 route = get_route(pkg->header);
-
-	if (check_header(response, sizeof(*pkg), TB_CFG_PKG_EVENT, route)) {
-		tb_ctl_warn(ctl, "malformed TB_CFG_PKG_EVENT\n");
-		return;
-	}
-
-	if (tb_cfg_error(ctl, route, pkg->port, TB_CFG_ERROR_ACK_PLUG_EVENT))
-		tb_ctl_warn(ctl, "could not ack plug event on %llx:%x\n",
-			    route, pkg->port);
-	WARN(pkg->zero, "pkg->zero is %#x\n", pkg->zero);
-	ctl->callback(ctl->callback_data, route, pkg->port, pkg->unplug);
+	ctl->callback(ctl->callback_data, type, pkg->buffer, size);
 }
 
 static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
@@ -394,10 +398,30 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
 					     */
 }
 
+static int tb_async_error(const struct ctl_pkg *pkg)
+{
+	const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
+
+	if (pkg->frame.eof != TB_CFG_PKG_ERROR)
+		return false;
+
+	switch (error->error) {
+	case TB_CFG_ERROR_LINK_ERROR:
+	case TB_CFG_ERROR_HEC_ERROR_DETECTED:
+	case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
+		return true;
+
+	default:
+		return false;
+	}
+}
+
 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
 			       bool canceled)
 {
 	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
+	struct tb_cfg_request *req;
+	__be32 crc32;
 
 	if (canceled)
 		return; /*
@@ -412,55 +436,168 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
 	}
 
 	frame->size -= 4; /* remove checksum */
-	if (*(__be32 *) (pkg->buffer + frame->size)
-			!= tb_crc(pkg->buffer, frame->size)) {
-		tb_ctl_err(pkg->ctl,
-			   "RX: checksum mismatch, dropping packet\n");
-		goto rx;
-	}
+	crc32 = tb_crc(pkg->buffer, frame->size);
 	be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
 
-	if (frame->eof == TB_CFG_PKG_EVENT) {
-		tb_ctl_handle_plug_event(pkg->ctl, pkg);
+	switch (frame->eof) {
+	case TB_CFG_PKG_READ:
+	case TB_CFG_PKG_WRITE:
+	case TB_CFG_PKG_ERROR:
+	case TB_CFG_PKG_OVERRIDE:
+	case TB_CFG_PKG_RESET:
+		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
+			tb_ctl_err(pkg->ctl,
+				   "RX: checksum mismatch, dropping packet\n");
+			goto rx;
+		}
+		if (tb_async_error(pkg)) {
+			tb_ctl_handle_event(pkg->ctl, frame->eof,
+					    pkg, frame->size);
+			goto rx;
+		}
+		break;
+
+	case TB_CFG_PKG_EVENT:
+		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
+			tb_ctl_err(pkg->ctl,
+				   "RX: checksum mismatch, dropping packet\n");
+			goto rx;
+		}
+		/* Fall through */
+	case TB_CFG_PKG_ICM_EVENT:
+		tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size);
 		goto rx;
+
+	default:
+		break;
 	}
-	if (!kfifo_put(&pkg->ctl->response_fifo, pkg)) {
-		tb_ctl_err(pkg->ctl, "RX: fifo is full\n");
-		goto rx;
+
+	/*
+	 * The received packet will be processed only if there is an
+	 * active request and that the packet is what is expected. This
+	 * prevents packets such as replies coming after timeout has
+	 * triggered from messing with the active requests.
+	 */
+	req = tb_cfg_request_find(pkg->ctl, pkg);
+	if (req) {
+		if (req->copy(req, pkg))
+			schedule_work(&req->work);
+		tb_cfg_request_put(req);
 	}
-	complete(&pkg->ctl->response_ready);
-	return;
+
 rx:
 	tb_ctl_rx_submit(pkg);
 }
 
-/**
- * tb_ctl_rx() - receive a packet from the control channel
- */
-static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer,
-				      size_t length, int timeout_msec,
-				      u64 route, enum tb_cfg_pkg_type type)
+static void tb_cfg_request_work(struct work_struct *work)
 {
-	struct tb_cfg_result res;
-	struct ctl_pkg *pkg;
+	struct tb_cfg_request *req = container_of(work, typeof(*req), work);
 
-	if (!wait_for_completion_timeout(&ctl->response_ready,
-					 msecs_to_jiffies(timeout_msec))) {
-		tb_ctl_WARN(ctl, "RX: timeout\n");
-		return (struct tb_cfg_result) { .err = -ETIMEDOUT };
-	}
-	if (!kfifo_get(&ctl->response_fifo, &pkg)) {
-		tb_ctl_WARN(ctl, "empty kfifo\n");
-		return (struct tb_cfg_result) { .err = -EIO };
-	}
+	if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
+		req->callback(req->callback_data);
 
-	res = parse_header(pkg, length, type, route);
-	if (!res.err)
-		memcpy(buffer, pkg->buffer, length);
-	tb_ctl_rx_submit(pkg);
-	return res;
+	tb_cfg_request_dequeue(req);
+	tb_cfg_request_put(req);
 }
 
+/**
+ * tb_cfg_request() - Start control request not waiting for it to complete
+ * @ctl: Control channel to use
+ * @req: Request to start
+ * @callback: Callback called when the request is completed
+ * @callback_data: Data to be passed to @callback
+ *
+ * This queues @req on the given control channel without waiting for it
+ * to complete. When the request completes @callback is called.
+ */
+int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
+		   void (*callback)(void *), void *callback_data)
+{
+	int ret;
+
+	req->flags = 0;
+	req->callback = callback;
+	req->callback_data = callback_data;
+	INIT_WORK(&req->work, tb_cfg_request_work);
+	INIT_LIST_HEAD(&req->list);
+
+	tb_cfg_request_get(req);
+	ret = tb_cfg_request_enqueue(ctl, req);
+	if (ret)
+		goto err_put;
+
+	ret = tb_ctl_tx(ctl, req->request, req->request_size,
+			req->request_type);
+	if (ret)
+		goto err_dequeue;
+
+	if (!req->response)
+		schedule_work(&req->work);
+
+	return 0;
+
+err_dequeue:
+	tb_cfg_request_dequeue(req);
+err_put:
+	tb_cfg_request_put(req);
+
+	return ret;
+}
+
+/**
+ * tb_cfg_request_cancel() - Cancel a control request
+ * @req: Request to cancel
+ * @err: Error to assign to the request
+ *
+ * This function can be used to cancel ongoing request. It will wait
+ * until the request is not active anymore.
+ */
+void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
+{
+	set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
+	schedule_work(&req->work);
+	wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
+	req->result.err = err;
+}
+
+static void tb_cfg_request_complete(void *data)
+{
+	complete(data);
+}
+
+/**
+ * tb_cfg_request_sync() - Start control request and wait until it completes
+ * @ctl: Control channel to use
+ * @req: Request to start
+ * @timeout_msec: Timeout how long to wait @req to complete
+ *
+ * Starts a control request and waits until it completes. If timeout
+ * triggers the request is canceled before function returns. Note the
+ * caller needs to make sure only one message for given switch is active
+ * at a time.
+ */
+struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
+					 struct tb_cfg_request *req,
+					 int timeout_msec)
+{
+	unsigned long timeout = msecs_to_jiffies(timeout_msec);
+	struct tb_cfg_result res = { 0 };
+	DECLARE_COMPLETION_ONSTACK(done);
+	int ret;
+
+	ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
+	if (ret) {
+		res.err = ret;
+		return res;
+	}
+
+	if (!wait_for_completion_timeout(&done, timeout))
+		tb_cfg_request_cancel(req, -ETIMEDOUT);
+
+	flush_work(&req->work);
+
+	return req->result;
+}
 
 /* public interface, alloc/start/stop/free */
 
@@ -471,7 +608,7 @@ static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer,
  *
  * Return: Returns a pointer on success or NULL on failure.
  */
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
 {
 	int i;
 	struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
@@ -481,18 +618,18 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
 	ctl->callback = cb;
 	ctl->callback_data = cb_data;
 
-	init_completion(&ctl->response_ready);
-	INIT_KFIFO(ctl->response_fifo);
+	mutex_init(&ctl->request_queue_lock);
+	INIT_LIST_HEAD(&ctl->request_queue);
 	ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
 					 TB_FRAME_SIZE, 4, 0);
 	if (!ctl->frame_pool)
 		goto err;
 
-	ctl->tx = ring_alloc_tx(nhi, 0, 10);
+	ctl->tx = ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
 	if (!ctl->tx)
 		goto err;
 
-	ctl->rx = ring_alloc_rx(nhi, 0, 10);
+	ctl->rx = ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
 	if (!ctl->rx)
 		goto err;
 
@@ -520,6 +657,10 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
 void tb_ctl_free(struct tb_ctl *ctl)
 {
 	int i;
+
+	if (!ctl)
+		return;
+
 	if (ctl->rx)
 		ring_free(ctl->rx);
 	if (ctl->tx)
@@ -546,6 +687,8 @@ void tb_ctl_start(struct tb_ctl *ctl)
 	ring_start(ctl->rx);
 	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
 		tb_ctl_rx_submit(ctl->rx_packets[i]);
+
+	ctl->running = true;
 }
 
 /**
@@ -558,12 +701,16 @@ void tb_ctl_start(struct tb_ctl *ctl)
  */
 void tb_ctl_stop(struct tb_ctl *ctl)
 {
+	mutex_lock(&ctl->request_queue_lock);
+	ctl->running = false;
+	mutex_unlock(&ctl->request_queue_lock);
+
 	ring_stop(ctl->rx);
 	ring_stop(ctl->tx);
 
-	if (!kfifo_is_empty(&ctl->response_fifo))
-		tb_ctl_WARN(ctl, "dangling response in response_fifo\n");
-	kfifo_reset(&ctl->response_fifo);
+	if (!list_empty(&ctl->request_queue))
+		tb_ctl_WARN(ctl, "dangling request in request_queue\n");
+	INIT_LIST_HEAD(&ctl->request_queue);
 	tb_ctl_info(ctl, "control channel stopped\n");
 }
 
@@ -578,7 +725,7 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
 		 enum tb_cfg_error error)
 {
 	struct cfg_error_pkg pkg = {
-		.header = make_header(route),
+		.header = tb_cfg_make_header(route),
 		.port = port,
 		.error = error,
 	};
@@ -586,6 +733,49 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
 	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
 }
 
+static bool tb_cfg_match(const struct tb_cfg_request *req,
+			 const struct ctl_pkg *pkg)
+{
+	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
+
+	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+		return true;
+
+	if (pkg->frame.eof != req->response_type)
+		return false;
+	if (route != tb_cfg_get_route(req->request))
+		return false;
+	if (pkg->frame.size != req->response_size)
+		return false;
+
+	if (pkg->frame.eof == TB_CFG_PKG_READ ||
+	    pkg->frame.eof == TB_CFG_PKG_WRITE) {
+		const struct cfg_read_pkg *req_hdr = req->request;
+		const struct cfg_read_pkg *res_hdr = pkg->buffer;
+
+		if (req_hdr->addr.seq != res_hdr->addr.seq)
+			return false;
+	}
+
+	return true;
+}
+
+static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+	struct tb_cfg_result res;
+
+	/* Now make sure it is in expected format */
+	res = parse_header(pkg, req->response_size, req->response_type,
+			   tb_cfg_get_route(req->request));
+	if (!res.err)
+		memcpy(req->response, pkg->buffer, req->response_size);
+
+	req->result = res;
+
+	/* Always complete when first response is received */
+	return true;
+}
+
 /**
  * tb_cfg_reset() - send a reset packet and wait for a response
  *
@@ -596,16 +786,31 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
 				  int timeout_msec)
 {
-	int err;
-	struct cfg_reset_pkg request = { .header = make_header(route) };
+	struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
+	struct tb_cfg_result res = { 0 };
 	struct tb_cfg_header reply;
+	struct tb_cfg_request *req;
 
-	err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_RESET);
-	if (err)
-		return (struct tb_cfg_result) { .err = err };
+	req = tb_cfg_request_alloc();
+	if (!req) {
+		res.err = -ENOMEM;
+		return res;
+	}
 
-	return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
-			 TB_CFG_PKG_RESET);
+	req->match = tb_cfg_match;
+	req->copy = tb_cfg_copy;
+	req->request = &request;
+	req->request_size = sizeof(request);
+	req->request_type = TB_CFG_PKG_RESET;
+	req->response = &reply;
+	req->response_size = sizeof(reply);
+	req->response_type = sizeof(TB_CFG_PKG_RESET);
+
+	res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+	tb_cfg_request_put(req);
+
+	return res;
 }
 
 /**
@@ -619,7 +824,7 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
 {
 	struct tb_cfg_result res = { 0 };
 	struct cfg_read_pkg request = {
-		.header = make_header(route),
+		.header = tb_cfg_make_header(route),
 		.addr = {
 			.port = port,
 			.space = space,
@@ -628,13 +833,39 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
 		},
 	};
 	struct cfg_write_pkg reply;
+	int retries = 0;
 
-	res.err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_READ);
-	if (res.err)
-		return res;
+	while (retries < TB_CTL_RETRIES) {
+		struct tb_cfg_request *req;
 
-	res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route,
-			TB_CFG_PKG_READ);
+		req = tb_cfg_request_alloc();
+		if (!req) {
+			res.err = -ENOMEM;
+			return res;
+		}
+
+		request.addr.seq = retries++;
+
+		req->match = tb_cfg_match;
+		req->copy = tb_cfg_copy;
+		req->request = &request;
+		req->request_size = sizeof(request);
+		req->request_type = TB_CFG_PKG_READ;
+		req->response = &reply;
+		req->response_size = 12 + 4 * length;
+		req->response_type = TB_CFG_PKG_READ;
+
+		res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+		tb_cfg_request_put(req);
+
+		if (res.err != -ETIMEDOUT)
+			break;
+
+		/* Wait a bit (arbitrary time) until we send a retry */
+		usleep_range(10, 100);
+	}
+
 	if (res.err)
 		return res;
 
@@ -650,13 +881,13 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
  *
  * Offset and length are in dwords.
  */
-struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
 		u64 route, u32 port, enum tb_cfg_space space,
 		u32 offset, u32 length, int timeout_msec)
 {
 	struct tb_cfg_result res = { 0 };
 	struct cfg_write_pkg request = {
-		.header = make_header(route),
+		.header = tb_cfg_make_header(route),
 		.addr = {
 			.port = port,
 			.space = space,
@@ -665,15 +896,41 @@ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
 		},
 	};
 	struct cfg_read_pkg reply;
+	int retries = 0;
 
 	memcpy(&request.data, buffer, length * 4);
 
-	res.err = tb_ctl_tx(ctl, &request, 12 + 4 * length, TB_CFG_PKG_WRITE);
-	if (res.err)
-		return res;
+	while (retries < TB_CTL_RETRIES) {
+		struct tb_cfg_request *req;
 
-	res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
-			TB_CFG_PKG_WRITE);
+		req = tb_cfg_request_alloc();
+		if (!req) {
+			res.err = -ENOMEM;
+			return res;
+		}
+
+		request.addr.seq = retries++;
+
+		req->match = tb_cfg_match;
+		req->copy = tb_cfg_copy;
+		req->request = &request;
+		req->request_size = 12 + 4 * length;
+		req->request_type = TB_CFG_PKG_WRITE;
+		req->response = &reply;
+		req->response_size = sizeof(reply);
+		req->response_type = TB_CFG_PKG_WRITE;
+
+		res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+		tb_cfg_request_put(req);
+
+		if (res.err != -ETIMEDOUT)
+			break;
+
+		/* Wait a bit (arbitrary time) until we send a retry */
+		usleep_range(10, 100);
+	}
+
 	if (res.err)
 		return res;
 
@@ -687,24 +944,52 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
 {
 	struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
 			space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
-	if (res.err == 1) {
+	switch (res.err) {
+	case 0:
+		/* Success */
+		break;
+
+	case 1:
+		/* Thunderbolt error, tb_error holds the actual number */
 		tb_cfg_print_error(ctl, &res);
 		return -EIO;
+
+	case -ETIMEDOUT:
+		tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
+			    space, offset);
+		break;
+
+	default:
+		WARN(1, "tb_cfg_read: %d\n", res.err);
+		break;
 	}
-	WARN(res.err, "tb_cfg_read: %d\n", res.err);
 	return res.err;
 }
 
-int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
 		 enum tb_cfg_space space, u32 offset, u32 length)
 {
 	struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
 			space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
-	if (res.err == 1) {
+	switch (res.err) {
+	case 0:
+		/* Success */
+		break;
+
+	case 1:
+		/* Thunderbolt error, tb_error holds the actual number */
 		tb_cfg_print_error(ctl, &res);
 		return -EIO;
+
+	case -ETIMEDOUT:
+		tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
+			    space, offset);
+		break;
+
+	default:
+		WARN(1, "tb_cfg_write: %d\n", res.err);
+		break;
 	}
-	WARN(res.err, "tb_cfg_write: %d\n", res.err);
 	return res.err;
 }
 
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index ba87d6e..36fd28b 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -7,14 +7,18 @@
 #ifndef _TB_CFG
 #define _TB_CFG
 
+#include <linux/kref.h>
+
 #include "nhi.h"
+#include "tb_msgs.h"
 
 /* control channel */
 struct tb_ctl;
 
-typedef void (*hotplug_cb)(void *data, u64 route, u8 port, bool unplug);
+typedef void (*event_cb)(void *data, enum tb_cfg_pkg_type type,
+			 const void *buf, size_t size);
 
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data);
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data);
 void tb_ctl_start(struct tb_ctl *ctl);
 void tb_ctl_stop(struct tb_ctl *ctl);
 void tb_ctl_free(struct tb_ctl *ctl);
@@ -23,21 +27,6 @@ void tb_ctl_free(struct tb_ctl *ctl);
 
 #define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
 
-enum tb_cfg_space {
-	TB_CFG_HOPS = 0,
-	TB_CFG_PORT = 1,
-	TB_CFG_SWITCH = 2,
-	TB_CFG_COUNTERS = 3,
-};
-
-enum tb_cfg_error {
-	TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
-	TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
-	TB_CFG_ERROR_NO_SUCH_PORT = 4,
-	TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
-	TB_CFG_ERROR_LOOP = 8,
-};
-
 struct tb_cfg_result {
 	u64 response_route;
 	u32 response_port; /*
@@ -52,6 +41,84 @@ struct tb_cfg_result {
 	enum tb_cfg_error tb_error; /* valid if err == 1 */
 };
 
+struct ctl_pkg {
+	struct tb_ctl *ctl;
+	void *buffer;
+	struct ring_frame frame;
+};
+
+/**
+ * struct tb_cfg_request - Control channel request
+ * @kref: Reference count
+ * @ctl: Pointer to the control channel structure. Only set when the
+ *	 request is queued.
+ * @request_size: Size of the request packet (in bytes)
+ * @request_type: Type of the request packet
+ * @response: Response is stored here
+ * @response_size: Maximum size of one response packet
+ * @response_type: Expected type of the response packet
+ * @npackets: Number of packets expected to be returned with this request
+ * @match: Function used to match the incoming packet
+ * @copy: Function used to copy the incoming packet to @response
+ * @callback: Callback called when the request is finished successfully
+ * @callback_data: Data to be passed to @callback
+ * @flags: Flags for the request
+ * @work: Work item used to complete the request
+ * @result: Result after the request has been completed
+ * @list: Requests are queued using this field
+ *
+ * An arbitrary request over Thunderbolt control channel. For standard
+ * control channel message, one should use tb_cfg_read/write() and
+ * friends if possible.
+ */
+struct tb_cfg_request {
+	struct kref kref;
+	struct tb_ctl *ctl;
+	const void *request;
+	size_t request_size;
+	enum tb_cfg_pkg_type request_type;
+	void *response;
+	size_t response_size;
+	enum tb_cfg_pkg_type response_type;
+	size_t npackets;
+	bool (*match)(const struct tb_cfg_request *req,
+		      const struct ctl_pkg *pkg);
+	bool (*copy)(struct tb_cfg_request *req, const struct ctl_pkg *pkg);
+	void (*callback)(void *callback_data);
+	void *callback_data;
+	unsigned long flags;
+	struct work_struct work;
+	struct tb_cfg_result result;
+	struct list_head list;
+};
+
+#define TB_CFG_REQUEST_ACTIVE		0
+#define TB_CFG_REQUEST_CANCELED		1
+
+struct tb_cfg_request *tb_cfg_request_alloc(void);
+void tb_cfg_request_get(struct tb_cfg_request *req);
+void tb_cfg_request_put(struct tb_cfg_request *req);
+int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
+		   void (*callback)(void *), void *callback_data);
+void tb_cfg_request_cancel(struct tb_cfg_request *req, int err);
+struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
+			struct tb_cfg_request *req, int timeout_msec);
+
+static inline u64 tb_cfg_get_route(const struct tb_cfg_header *header)
+{
+	return (u64) header->route_hi << 32 | header->route_lo;
+}
+
+static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
+{
+	struct tb_cfg_header header = {
+		.route_hi = route >> 32,
+		.route_lo = route,
+	};
+	/* check for overflow, route_hi is not 32 bits! */
+	WARN_ON(tb_cfg_get_route(&header) != route);
+	return header;
+}
 
 int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
 		 enum tb_cfg_error error);
@@ -61,13 +128,13 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
 				     u64 route, u32 port,
 				     enum tb_cfg_space space, u32 offset,
 				     u32 length, int timeout_msec);
-struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
 				      u64 route, u32 port,
 				      enum tb_cfg_space space, u32 offset,
 				      u32 length, int timeout_msec);
 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
 		enum tb_cfg_space space, u32 offset, u32 length);
-int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
 		 enum tb_cfg_space space, u32 offset, u32 length);
 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
 
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
new file mode 100644
index 0000000..af6dde3
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.c
@@ -0,0 +1,524 @@
+/*
+ * Thunderbolt DMA configuration based mailbox support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dma_port.h"
+#include "tb_regs.h"
+
+#define DMA_PORT_CAP			0x3e
+
+#define MAIL_DATA			1
+#define MAIL_DATA_DWORDS		16
+
+#define MAIL_IN				17
+#define MAIL_IN_CMD_SHIFT		28
+#define MAIL_IN_CMD_MASK		GENMASK(31, 28)
+#define MAIL_IN_CMD_FLASH_WRITE		0x0
+#define MAIL_IN_CMD_FLASH_UPDATE_AUTH	0x1
+#define MAIL_IN_CMD_FLASH_READ		0x2
+#define MAIL_IN_CMD_POWER_CYCLE		0x4
+#define MAIL_IN_DWORDS_SHIFT		24
+#define MAIL_IN_DWORDS_MASK		GENMASK(27, 24)
+#define MAIL_IN_ADDRESS_SHIFT		2
+#define MAIL_IN_ADDRESS_MASK		GENMASK(23, 2)
+#define MAIL_IN_CSS			BIT(1)
+#define MAIL_IN_OP_REQUEST		BIT(0)
+
+#define MAIL_OUT			18
+#define MAIL_OUT_STATUS_RESPONSE	BIT(29)
+#define MAIL_OUT_STATUS_CMD_SHIFT	4
+#define MAIL_OUT_STATUS_CMD_MASK	GENMASK(7, 4)
+#define MAIL_OUT_STATUS_MASK		GENMASK(3, 0)
+#define MAIL_OUT_STATUS_COMPLETED	0
+#define MAIL_OUT_STATUS_ERR_AUTH	1
+#define MAIL_OUT_STATUS_ERR_ACCESS	2
+
+#define DMA_PORT_TIMEOUT		5000 /* ms */
+#define DMA_PORT_RETRIES		3
+
+/**
+ * struct tb_dma_port - DMA control port
+ * @sw: Switch the DMA port belongs to
+ * @port: Switch port number where DMA capability is found
+ * @base: Start offset of the mailbox registers
+ * @buf: Temporary buffer to store a single block
+ */
+struct tb_dma_port {
+	struct tb_switch *sw;
+	u8 port;
+	u32 base;
+	u8 *buf;
+};
+
+/*
+ * When the switch is in safe mode it supports very little functionality
+ * so we don't validate that much here.
+ */
+static bool dma_port_match(const struct tb_cfg_request *req,
+			   const struct ctl_pkg *pkg)
+{
+	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
+
+	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+		return true;
+	if (pkg->frame.eof != req->response_type)
+		return false;
+	if (route != tb_cfg_get_route(req->request))
+		return false;
+	if (pkg->frame.size != req->response_size)
+		return false;
+
+	return true;
+}
+
+static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+	memcpy(req->response, pkg->buffer, req->response_size);
+	return true;
+}
+
+static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
+			 u32 port, u32 offset, u32 length, int timeout_msec)
+{
+	struct cfg_read_pkg request = {
+		.header = tb_cfg_make_header(route),
+		.addr = {
+			.seq = 1,
+			.port = port,
+			.space = TB_CFG_PORT,
+			.offset = offset,
+			.length = length,
+		},
+	};
+	struct tb_cfg_request *req;
+	struct cfg_write_pkg reply;
+	struct tb_cfg_result res;
+
+	req = tb_cfg_request_alloc();
+	if (!req)
+		return -ENOMEM;
+
+	req->match = dma_port_match;
+	req->copy = dma_port_copy;
+	req->request = &request;
+	req->request_size = sizeof(request);
+	req->request_type = TB_CFG_PKG_READ;
+	req->response = &reply;
+	req->response_size = 12 + 4 * length;
+	req->response_type = TB_CFG_PKG_READ;
+
+	res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+	tb_cfg_request_put(req);
+
+	if (res.err)
+		return res.err;
+
+	memcpy(buffer, &reply.data, 4 * length);
+	return 0;
+}
+
+static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
+			  u32 port, u32 offset, u32 length, int timeout_msec)
+{
+	struct cfg_write_pkg request = {
+		.header = tb_cfg_make_header(route),
+		.addr = {
+			.seq = 1,
+			.port = port,
+			.space = TB_CFG_PORT,
+			.offset = offset,
+			.length = length,
+		},
+	};
+	struct tb_cfg_request *req;
+	struct cfg_read_pkg reply;
+	struct tb_cfg_result res;
+
+	memcpy(&request.data, buffer, length * 4);
+
+	req = tb_cfg_request_alloc();
+	if (!req)
+		return -ENOMEM;
+
+	req->match = dma_port_match;
+	req->copy = dma_port_copy;
+	req->request = &request;
+	req->request_size = 12 + 4 * length;
+	req->request_type = TB_CFG_PKG_WRITE;
+	req->response = &reply;
+	req->response_size = sizeof(reply);
+	req->response_type = TB_CFG_PKG_WRITE;
+
+	res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+	tb_cfg_request_put(req);
+
+	return res.err;
+}
+
+static int dma_find_port(struct tb_switch *sw)
+{
+	int port, ret;
+	u32 type;
+
+	/*
+	 * The DMA (NHI) port is either 3 or 5 depending on the
+	 * controller. Try both starting from 5 which is more common.
+	 */
+	port = 5;
+	ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
+			    DMA_PORT_TIMEOUT);
+	if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
+		return port;
+
+	port = 3;
+	ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
+			    DMA_PORT_TIMEOUT);
+	if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
+		return port;
+
+	return -ENODEV;
+}
+
+/**
+ * dma_port_alloc() - Finds DMA control port from a switch pointed by route
+ * @sw: Switch from where find the DMA port
+ *
+ * Function checks if the switch NHI port supports DMA configuration
+ * based mailbox capability and if it does, allocates and initializes
+ * DMA port structure. Returns %NULL if the capabity was not found.
+ *
+ * The DMA control port is functional also when the switch is in safe
+ * mode.
+ */
+struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
+{
+	struct tb_dma_port *dma;
+	int port;
+
+	port = dma_find_port(sw);
+	if (port < 0)
+		return NULL;
+
+	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+	if (!dma)
+		return NULL;
+
+	dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
+	if (!dma->buf) {
+		kfree(dma);
+		return NULL;
+	}
+
+	dma->sw = sw;
+	dma->port = port;
+	dma->base = DMA_PORT_CAP;
+
+	return dma;
+}
+
+/**
+ * dma_port_free() - Release DMA control port structure
+ * @dma: DMA control port
+ */
+void dma_port_free(struct tb_dma_port *dma)
+{
+	if (dma) {
+		kfree(dma->buf);
+		kfree(dma);
+	}
+}
+
+static int dma_port_wait_for_completion(struct tb_dma_port *dma,
+					unsigned int timeout)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(timeout);
+	struct tb_switch *sw = dma->sw;
+
+	do {
+		int ret;
+		u32 in;
+
+		ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
+				    dma->base + MAIL_IN, 1, 50);
+		if (ret) {
+			if (ret != -ETIMEDOUT)
+				return ret;
+		} else if (!(in & MAIL_IN_OP_REQUEST)) {
+			return 0;
+		}
+
+		usleep_range(50, 100);
+	} while (time_before(jiffies, end));
+
+	return -ETIMEDOUT;
+}
+
+static int status_to_errno(u32 status)
+{
+	switch (status & MAIL_OUT_STATUS_MASK) {
+	case MAIL_OUT_STATUS_COMPLETED:
+		return 0;
+	case MAIL_OUT_STATUS_ERR_AUTH:
+		return -EINVAL;
+	case MAIL_OUT_STATUS_ERR_ACCESS:
+		return -EACCES;
+	}
+
+	return -EIO;
+}
+
+static int dma_port_request(struct tb_dma_port *dma, u32 in,
+			    unsigned int timeout)
+{
+	struct tb_switch *sw = dma->sw;
+	u32 out;
+	int ret;
+
+	ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
+			     dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	ret = dma_port_wait_for_completion(dma, timeout);
+	if (ret)
+		return ret;
+
+	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
+			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	return status_to_errno(out);
+}
+
+static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
+				     void *buf, u32 size)
+{
+	struct tb_switch *sw = dma->sw;
+	u32 in, dwaddress, dwords;
+	int ret;
+
+	dwaddress = address / 4;
+	dwords = size / 4;
+
+	in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
+	if (dwords < MAIL_DATA_DWORDS)
+		in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
+	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
+	in |= MAIL_IN_OP_REQUEST;
+
+	ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
+			     dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
+}
+
+static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
+				      const void *buf, u32 size)
+{
+	struct tb_switch *sw = dma->sw;
+	u32 in, dwaddress, dwords;
+	int ret;
+
+	dwords = size / 4;
+
+	/* Write the block to MAIL_DATA registers */
+	ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
+			    dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
+
+	in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
+
+	/* CSS header write is always done to the same magic address */
+	if (address >= DMA_PORT_CSS_ADDRESS) {
+		dwaddress = DMA_PORT_CSS_ADDRESS;
+		in |= MAIL_IN_CSS;
+	} else {
+		dwaddress = address / 4;
+	}
+
+	in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
+	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
+	in |= MAIL_IN_OP_REQUEST;
+
+	return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
+}
+
+/**
+ * dma_port_flash_read() - Read from active flash region
+ * @dma: DMA control port
+ * @address: Address relative to the start of active region
+ * @buf: Buffer where the data is read
+ * @size: Size of the buffer
+ */
+int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+			void *buf, size_t size)
+{
+	unsigned int retries = DMA_PORT_RETRIES;
+	unsigned int offset;
+
+	offset = address & 3;
+	address = address & ~3;
+
+	do {
+		u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+		int ret;
+
+		ret = dma_port_flash_read_block(dma, address, dma->buf,
+						ALIGN(nbytes, 4));
+		if (ret) {
+			if (ret == -ETIMEDOUT) {
+				if (retries--)
+					continue;
+				ret = -EIO;
+			}
+			return ret;
+		}
+
+		memcpy(buf, dma->buf + offset, nbytes);
+
+		size -= nbytes;
+		address += nbytes;
+		buf += nbytes;
+	} while (size > 0);
+
+	return 0;
+}
+
+/**
+ * dma_port_flash_write() - Write to non-active flash region
+ * @dma: DMA control port
+ * @address: Address relative to the start of non-active region
+ * @buf: Data to write
+ * @size: Size of the buffer
+ *
+ * Writes block of data to the non-active flash region of the switch. If
+ * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
+ * using CSS command.
+ */
+int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
+			 const void *buf, size_t size)
+{
+	unsigned int retries = DMA_PORT_RETRIES;
+	unsigned int offset;
+
+	if (address >= DMA_PORT_CSS_ADDRESS) {
+		offset = 0;
+		if (size > DMA_PORT_CSS_MAX_SIZE)
+			return -E2BIG;
+	} else {
+		offset = address & 3;
+		address = address & ~3;
+	}
+
+	do {
+		u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+		int ret;
+
+		memcpy(dma->buf + offset, buf, nbytes);
+
+		ret = dma_port_flash_write_block(dma, address, buf, nbytes);
+		if (ret) {
+			if (ret == -ETIMEDOUT) {
+				if (retries--)
+					continue;
+				ret = -EIO;
+			}
+			return ret;
+		}
+
+		size -= nbytes;
+		address += nbytes;
+		buf += nbytes;
+	} while (size > 0);
+
+	return 0;
+}
+
+/**
+ * dma_port_flash_update_auth() - Starts flash authenticate cycle
+ * @dma: DMA control port
+ *
+ * Starts the flash update authentication cycle. If the image in the
+ * non-active area was valid, the switch starts upgrade process where
+ * active and non-active area get swapped in the end. Caller should call
+ * dma_port_flash_update_auth_status() to get status of this command.
+ * This is because if the switch in question is root switch the
+ * thunderbolt host controller gets reset as well.
+ */
+int dma_port_flash_update_auth(struct tb_dma_port *dma)
+{
+	u32 in;
+
+	in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
+	in |= MAIL_IN_OP_REQUEST;
+
+	return dma_port_request(dma, in, 150);
+}
+
+/**
+ * dma_port_flash_update_auth_status() - Reads status of update auth command
+ * @dma: DMA control port
+ * @status: Status code of the operation
+ *
+ * The function checks if there is status available from the last update
+ * auth command. Returns %0 if there is no status and no further
+ * action is required. If there is status, %1 is returned instead and
+ * @status holds the failure code.
+ *
+ * Negative return means there was an error reading status from the
+ * switch.
+ */
+int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
+{
+	struct tb_switch *sw = dma->sw;
+	u32 out, cmd;
+	int ret;
+
+	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
+			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
+	if (ret)
+		return ret;
+
+	/* Check if the status relates to flash update auth */
+	cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
+	if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
+		if (status)
+			*status = out & MAIL_OUT_STATUS_MASK;
+
+		/* Reset is needed in any case */
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * dma_port_power_cycle() - Power cycles the switch
+ * @dma: DMA control port
+ *
+ * Triggers power cycle to the switch.
+ */
+int dma_port_power_cycle(struct tb_dma_port *dma)
+{
+	u32 in;
+
+	in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
+	in |= MAIL_IN_OP_REQUEST;
+
+	return dma_port_request(dma, in, 150);
+}
diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h
new file mode 100644
index 0000000..c4a69e0
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.h
@@ -0,0 +1,34 @@
+/*
+ * Thunderbolt DMA configuration based mailbox support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DMA_PORT_H_
+#define DMA_PORT_H_
+
+#include "tb.h"
+
+struct tb_switch;
+struct tb_dma_port;
+
+#define DMA_PORT_CSS_ADDRESS		0x3fffff
+#define DMA_PORT_CSS_MAX_SIZE		SZ_128
+
+struct tb_dma_port *dma_port_alloc(struct tb_switch *sw);
+void dma_port_free(struct tb_dma_port *dma);
+int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+			void *buf, size_t size);
+int dma_port_flash_update_auth(struct tb_dma_port *dma);
+int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status);
+int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
+			 const void *buf, size_t size);
+int dma_port_power_cycle(struct tb_dma_port *dma);
+
+#endif
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
new file mode 100644
index 0000000..9f2dcd4
--- /dev/null
+++ b/drivers/thunderbolt/domain.c
@@ -0,0 +1,456 @@
+/*
+ * Thunderbolt bus support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author:  Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <crypto/hash.h>
+
+#include "tb.h"
+
+static DEFINE_IDA(tb_domain_ida);
+
+static const char * const tb_security_names[] = {
+	[TB_SECURITY_NONE] = "none",
+	[TB_SECURITY_USER] = "user",
+	[TB_SECURITY_SECURE] = "secure",
+	[TB_SECURITY_DPONLY] = "dponly",
+};
+
+static ssize_t security_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct tb *tb = container_of(dev, struct tb, dev);
+
+	return sprintf(buf, "%s\n", tb_security_names[tb->security_level]);
+}
+static DEVICE_ATTR_RO(security);
+
+static struct attribute *domain_attrs[] = {
+	&dev_attr_security.attr,
+	NULL,
+};
+
+static struct attribute_group domain_attr_group = {
+	.attrs = domain_attrs,
+};
+
+static const struct attribute_group *domain_attr_groups[] = {
+	&domain_attr_group,
+	NULL,
+};
+
+struct bus_type tb_bus_type = {
+	.name = "thunderbolt",
+};
+
+static void tb_domain_release(struct device *dev)
+{
+	struct tb *tb = container_of(dev, struct tb, dev);
+
+	tb_ctl_free(tb->ctl);
+	destroy_workqueue(tb->wq);
+	ida_simple_remove(&tb_domain_ida, tb->index);
+	mutex_destroy(&tb->lock);
+	kfree(tb);
+}
+
+struct device_type tb_domain_type = {
+	.name = "thunderbolt_domain",
+	.release = tb_domain_release,
+};
+
+/**
+ * tb_domain_alloc() - Allocate a domain
+ * @nhi: Pointer to the host controller
+ * @privsize: Size of the connection manager private data
+ *
+ * Allocates and initializes a new Thunderbolt domain. Connection
+ * managers are expected to call this and then fill in @cm_ops
+ * accordingly.
+ *
+ * Call tb_domain_put() to release the domain before it has been added
+ * to the system.
+ *
+ * Return: allocated domain structure on %NULL in case of error
+ */
+struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
+{
+	struct tb *tb;
+
+	/*
+	 * Make sure the structure sizes map with that the hardware
+	 * expects because bit-fields are being used.
+	 */
+	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
+	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
+	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
+
+	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
+	if (!tb)
+		return NULL;
+
+	tb->nhi = nhi;
+	mutex_init(&tb->lock);
+
+	tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
+	if (tb->index < 0)
+		goto err_free;
+
+	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
+	if (!tb->wq)
+		goto err_remove_ida;
+
+	tb->dev.parent = &nhi->pdev->dev;
+	tb->dev.bus = &tb_bus_type;
+	tb->dev.type = &tb_domain_type;
+	tb->dev.groups = domain_attr_groups;
+	dev_set_name(&tb->dev, "domain%d", tb->index);
+	device_initialize(&tb->dev);
+
+	return tb;
+
+err_remove_ida:
+	ida_simple_remove(&tb_domain_ida, tb->index);
+err_free:
+	kfree(tb);
+
+	return NULL;
+}
+
+static void tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
+			       const void *buf, size_t size)
+{
+	struct tb *tb = data;
+
+	if (!tb->cm_ops->handle_event) {
+		tb_warn(tb, "domain does not have event handler\n");
+		return;
+	}
+
+	tb->cm_ops->handle_event(tb, type, buf, size);
+}
+
+/**
+ * tb_domain_add() - Add domain to the system
+ * @tb: Domain to add
+ *
+ * Starts the domain and adds it to the system. Hotplugging devices will
+ * work after this has been returned successfully. In order to remove
+ * and release the domain after this function has been called, call
+ * tb_domain_remove().
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_domain_add(struct tb *tb)
+{
+	int ret;
+
+	if (WARN_ON(!tb->cm_ops))
+		return -EINVAL;
+
+	mutex_lock(&tb->lock);
+
+	tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
+	if (!tb->ctl) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	/*
+	 * tb_schedule_hotplug_handler may be called as soon as the config
+	 * channel is started. Thats why we have to hold the lock here.
+	 */
+	tb_ctl_start(tb->ctl);
+
+	if (tb->cm_ops->driver_ready) {
+		ret = tb->cm_ops->driver_ready(tb);
+		if (ret)
+			goto err_ctl_stop;
+	}
+
+	ret = device_add(&tb->dev);
+	if (ret)
+		goto err_ctl_stop;
+
+	/* Start the domain */
+	if (tb->cm_ops->start) {
+		ret = tb->cm_ops->start(tb);
+		if (ret)
+			goto err_domain_del;
+	}
+
+	/* This starts event processing */
+	mutex_unlock(&tb->lock);
+
+	return 0;
+
+err_domain_del:
+	device_del(&tb->dev);
+err_ctl_stop:
+	tb_ctl_stop(tb->ctl);
+err_unlock:
+	mutex_unlock(&tb->lock);
+
+	return ret;
+}
+
+/**
+ * tb_domain_remove() - Removes and releases a domain
+ * @tb: Domain to remove
+ *
+ * Stops the domain, removes it from the system and releases all
+ * resources once the last reference has been released.
+ */
+void tb_domain_remove(struct tb *tb)
+{
+	mutex_lock(&tb->lock);
+	if (tb->cm_ops->stop)
+		tb->cm_ops->stop(tb);
+	/* Stop the domain control traffic */
+	tb_ctl_stop(tb->ctl);
+	mutex_unlock(&tb->lock);
+
+	flush_workqueue(tb->wq);
+	device_unregister(&tb->dev);
+}
+
+/**
+ * tb_domain_suspend_noirq() - Suspend a domain
+ * @tb: Domain to suspend
+ *
+ * Suspends all devices in the domain and stops the control channel.
+ */
+int tb_domain_suspend_noirq(struct tb *tb)
+{
+	int ret = 0;
+
+	/*
+	 * The control channel interrupt is left enabled during suspend
+	 * and taking the lock here prevents any events happening before
+	 * we actually have stopped the domain and the control channel.
+	 */
+	mutex_lock(&tb->lock);
+	if (tb->cm_ops->suspend_noirq)
+		ret = tb->cm_ops->suspend_noirq(tb);
+	if (!ret)
+		tb_ctl_stop(tb->ctl);
+	mutex_unlock(&tb->lock);
+
+	return ret;
+}
+
+/**
+ * tb_domain_resume_noirq() - Resume a domain
+ * @tb: Domain to resume
+ *
+ * Re-starts the control channel, and resumes all devices connected to
+ * the domain.
+ */
+int tb_domain_resume_noirq(struct tb *tb)
+{
+	int ret = 0;
+
+	mutex_lock(&tb->lock);
+	tb_ctl_start(tb->ctl);
+	if (tb->cm_ops->resume_noirq)
+		ret = tb->cm_ops->resume_noirq(tb);
+	mutex_unlock(&tb->lock);
+
+	return ret;
+}
+
+int tb_domain_suspend(struct tb *tb)
+{
+	int ret;
+
+	mutex_lock(&tb->lock);
+	if (tb->cm_ops->suspend) {
+		ret = tb->cm_ops->suspend(tb);
+		if (ret) {
+			mutex_unlock(&tb->lock);
+			return ret;
+		}
+	}
+	mutex_unlock(&tb->lock);
+	return 0;
+}
+
+void tb_domain_complete(struct tb *tb)
+{
+	mutex_lock(&tb->lock);
+	if (tb->cm_ops->complete)
+		tb->cm_ops->complete(tb);
+	mutex_unlock(&tb->lock);
+}
+
+/**
+ * tb_domain_approve_switch() - Approve switch
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * This will approve switch by connection manager specific means. In
+ * case of success the connection manager will create tunnels for all
+ * supported protocols.
+ */
+int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+	struct tb_switch *parent_sw;
+
+	if (!tb->cm_ops->approve_switch)
+		return -EPERM;
+
+	/* The parent switch must be authorized before this one */
+	parent_sw = tb_to_switch(sw->dev.parent);
+	if (!parent_sw || !parent_sw->authorized)
+		return -EINVAL;
+
+	return tb->cm_ops->approve_switch(tb, sw);
+}
+
+/**
+ * tb_domain_approve_switch_key() - Approve switch and add key
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * For switches that support secure connect, this function first adds
+ * key to the switch NVM using connection manager specific means. If
+ * adding the key is successful, the switch is approved and connected.
+ *
+ * Return: %0 on success and negative errno in case of failure.
+ */
+int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+	struct tb_switch *parent_sw;
+	int ret;
+
+	if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
+		return -EPERM;
+
+	/* The parent switch must be authorized before this one */
+	parent_sw = tb_to_switch(sw->dev.parent);
+	if (!parent_sw || !parent_sw->authorized)
+		return -EINVAL;
+
+	ret = tb->cm_ops->add_switch_key(tb, sw);
+	if (ret)
+		return ret;
+
+	return tb->cm_ops->approve_switch(tb, sw);
+}
+
+/**
+ * tb_domain_challenge_switch_key() - Challenge and approve switch
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * For switches that support secure connect, this function generates
+ * random challenge and sends it to the switch. The switch responds to
+ * this and if the response matches our random challenge, the switch is
+ * approved and connected.
+ *
+ * Return: %0 on success and negative errno in case of failure.
+ */
+int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+	u8 challenge[TB_SWITCH_KEY_SIZE];
+	u8 response[TB_SWITCH_KEY_SIZE];
+	u8 hmac[TB_SWITCH_KEY_SIZE];
+	struct tb_switch *parent_sw;
+	struct crypto_shash *tfm;
+	struct shash_desc *shash;
+	int ret;
+
+	if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
+		return -EPERM;
+
+	/* The parent switch must be authorized before this one */
+	parent_sw = tb_to_switch(sw->dev.parent);
+	if (!parent_sw || !parent_sw->authorized)
+		return -EINVAL;
+
+	get_random_bytes(challenge, sizeof(challenge));
+	ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
+	if (ret)
+		return ret;
+
+	tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
+	if (ret)
+		goto err_free_tfm;
+
+	shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
+			GFP_KERNEL);
+	if (!shash) {
+		ret = -ENOMEM;
+		goto err_free_tfm;
+	}
+
+	shash->tfm = tfm;
+	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	memset(hmac, 0, sizeof(hmac));
+	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
+	if (ret)
+		goto err_free_shash;
+
+	/* The returned HMAC must match the one we calculated */
+	if (memcmp(response, hmac, sizeof(hmac))) {
+		ret = -EKEYREJECTED;
+		goto err_free_shash;
+	}
+
+	crypto_free_shash(tfm);
+	kfree(shash);
+
+	return tb->cm_ops->approve_switch(tb, sw);
+
+err_free_shash:
+	kfree(shash);
+err_free_tfm:
+	crypto_free_shash(tfm);
+
+	return ret;
+}
+
+/**
+ * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
+ * @tb: Domain whose PCIe paths to disconnect
+ *
+ * This needs to be called in preparation for NVM upgrade of the host
+ * controller. Makes sure all PCIe paths are disconnected.
+ *
+ * Return %0 on success and negative errno in case of error.
+ */
+int tb_domain_disconnect_pcie_paths(struct tb *tb)
+{
+	if (!tb->cm_ops->disconnect_pcie_paths)
+		return -EPERM;
+
+	return tb->cm_ops->disconnect_pcie_paths(tb);
+}
+
+int tb_domain_init(void)
+{
+	return bus_register(&tb_bus_type);
+}
+
+void tb_domain_exit(void)
+{
+	bus_unregister(&tb_bus_type);
+	ida_destroy(&tb_domain_ida);
+	tb_switch_exit();
+}
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 6392990..308b6e1 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -204,6 +204,11 @@ struct tb_drom_entry_header {
 	enum tb_drom_entry_type type:1;
 } __packed;
 
+struct tb_drom_entry_generic {
+	struct tb_drom_entry_header header;
+	u8 data[0];
+} __packed;
+
 struct tb_drom_entry_port {
 	/* BYTES 0-1 */
 	struct tb_drom_entry_header header;
@@ -276,6 +281,9 @@ int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
 	if (res)
 		return res;
 
+	if (drom_offset == 0)
+		return -ENODEV;
+
 	/* read uid */
 	res = tb_eeprom_read_n(sw, drom_offset, data, 9);
 	if (res)
@@ -283,7 +291,7 @@ int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
 
 	crc = tb_crc8(data + 1, 8);
 	if (crc != data[0]) {
-		tb_sw_warn(sw, "uid crc8 missmatch (expected: %#x, got: %#x)\n",
+		tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
 				data[0], crc);
 		return -EIO;
 	}
@@ -292,25 +300,39 @@ int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
 	return 0;
 }
 
-static void tb_drom_parse_port_entry(struct tb_port *port,
-		struct tb_drom_entry_port *entry)
+static int tb_drom_parse_entry_generic(struct tb_switch *sw,
+		struct tb_drom_entry_header *header)
 {
-	port->link_nr = entry->link_nr;
-	if (entry->has_dual_link_port)
-		port->dual_link_port =
-				&port->sw->ports[entry->dual_link_port_nr];
+	const struct tb_drom_entry_generic *entry =
+		(const struct tb_drom_entry_generic *)header;
+
+	switch (header->index) {
+	case 1:
+		/* Length includes 2 bytes header so remove it before copy */
+		sw->vendor_name = kstrndup(entry->data,
+			header->len - sizeof(*header), GFP_KERNEL);
+		if (!sw->vendor_name)
+			return -ENOMEM;
+		break;
+
+	case 2:
+		sw->device_name = kstrndup(entry->data,
+			header->len - sizeof(*header), GFP_KERNEL);
+		if (!sw->device_name)
+			return -ENOMEM;
+		break;
+	}
+
+	return 0;
 }
 
-static int tb_drom_parse_entry(struct tb_switch *sw,
-		struct tb_drom_entry_header *header)
+static int tb_drom_parse_entry_port(struct tb_switch *sw,
+				    struct tb_drom_entry_header *header)
 {
 	struct tb_port *port;
 	int res;
 	enum tb_port_type type;
 
-	if (header->type != TB_DROM_ENTRY_PORT)
-		return 0;
-
 	port = &sw->ports[header->index];
 	port->disabled = header->port_disabled;
 	if (port->disabled)
@@ -329,7 +351,10 @@ static int tb_drom_parse_entry(struct tb_switch *sw,
 				header->len, sizeof(struct tb_drom_entry_port));
 			return -EIO;
 		}
-		tb_drom_parse_port_entry(port, entry);
+		port->link_nr = entry->link_nr;
+		if (entry->has_dual_link_port)
+			port->dual_link_port =
+				&port->sw->ports[entry->dual_link_port_nr];
 	}
 	return 0;
 }
@@ -344,6 +369,7 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
 	struct tb_drom_header *header = (void *) sw->drom;
 	u16 pos = sizeof(*header);
 	u16 drom_size = header->data_len + TB_DROM_DATA_START;
+	int res;
 
 	while (pos < drom_size) {
 		struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
@@ -353,7 +379,16 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
 			return -EIO;
 		}
 
-		tb_drom_parse_entry(sw, entry);
+		switch (entry->type) {
+		case TB_DROM_ENTRY_GENERIC:
+			res = tb_drom_parse_entry_generic(sw, entry);
+			break;
+		case TB_DROM_ENTRY_PORT:
+			res = tb_drom_parse_entry_port(sw, entry);
+			break;
+		}
+		if (res)
+			return res;
 
 		pos += entry->len;
 	}
@@ -394,6 +429,50 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
 	return -EINVAL;
 }
 
+static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
+{
+	u32 drom_offset;
+	int ret;
+
+	if (!sw->dma_port)
+		return -ENODEV;
+
+	ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
+			 sw->cap_plug_events + 12, 1);
+	if (ret)
+		return ret;
+
+	if (!drom_offset)
+		return -ENODEV;
+
+	ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
+				  sizeof(*size));
+	if (ret)
+		return ret;
+
+	/* Size includes CRC8 + UID + CRC32 */
+	*size += 1 + 8 + 4;
+	sw->drom = kzalloc(*size, GFP_KERNEL);
+	if (!sw->drom)
+		return -ENOMEM;
+
+	ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
+	if (ret)
+		goto err_free;
+
+	/*
+	 * Read UID from the minimal DROM because the one in NVM is just
+	 * a placeholder.
+	 */
+	tb_drom_read_uid_only(sw, &sw->uid);
+	return 0;
+
+err_free:
+	kfree(sw->drom);
+	sw->drom = NULL;
+	return ret;
+}
+
 /**
  * tb_drom_read - copy drom to sw->drom and parse it
  */
@@ -415,6 +494,10 @@ int tb_drom_read(struct tb_switch *sw)
 		if (tb_drom_copy_efi(sw, &size) == 0)
 			goto parse;
 
+		/* Non-Apple hardware has the DROM as part of NVM */
+		if (tb_drom_copy_nvm(sw, &size) == 0)
+			goto parse;
+
 		/*
 		 * The root switch contains only a dummy drom (header only,
 		 * no entries). Hardcode the configuration here.
@@ -475,17 +558,19 @@ int tb_drom_read(struct tb_switch *sw)
 			header->uid_crc8, crc);
 		goto err;
 	}
-	sw->uid = header->uid;
+	if (!sw->uid)
+		sw->uid = header->uid;
+	sw->vendor = header->vendor_id;
+	sw->device = header->model_id;
 
 	crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
 	if (crc != header->data_crc32) {
 		tb_sw_warn(sw,
-			"drom data crc32 mismatch (expected: %#x, got: %#x), aborting\n",
+			"drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n",
 			header->data_crc32, crc);
-		goto err;
 	}
 
-	if (header->device_rom_revision > 1)
+	if (header->device_rom_revision > 2)
 		tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
 			header->device_rom_revision);
 
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
new file mode 100644
index 0000000..8ee3402
--- /dev/null
+++ b/drivers/thunderbolt/icm.c
@@ -0,0 +1,1089 @@
+/*
+ * Internal Thunderbolt Connection Manager. This is a firmware running on
+ * the Thunderbolt host controller performing most of the low-level
+ * handling.
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "ctl.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+#define PCIE2CIO_CMD			0x30
+#define PCIE2CIO_CMD_TIMEOUT		BIT(31)
+#define PCIE2CIO_CMD_START		BIT(30)
+#define PCIE2CIO_CMD_WRITE		BIT(21)
+#define PCIE2CIO_CMD_CS_MASK		GENMASK(20, 19)
+#define PCIE2CIO_CMD_CS_SHIFT		19
+#define PCIE2CIO_CMD_PORT_MASK		GENMASK(18, 13)
+#define PCIE2CIO_CMD_PORT_SHIFT		13
+
+#define PCIE2CIO_WRDATA			0x34
+#define PCIE2CIO_RDDATA			0x38
+
+#define PHY_PORT_CS1			0x37
+#define PHY_PORT_CS1_LINK_DISABLE	BIT(14)
+#define PHY_PORT_CS1_LINK_STATE_MASK	GENMASK(29, 26)
+#define PHY_PORT_CS1_LINK_STATE_SHIFT	26
+
+#define ICM_TIMEOUT			5000 /* ms */
+#define ICM_MAX_LINK			4
+#define ICM_MAX_DEPTH			6
+
+/**
+ * struct icm - Internal connection manager private data
+ * @request_lock: Makes sure only one message is send to ICM at time
+ * @rescan_work: Work used to rescan the surviving switches after resume
+ * @upstream_port: Pointer to the PCIe upstream port this host
+ *		   controller is connected. This is only set for systems
+ *		   where ICM needs to be started manually
+ * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
+ *	     (only set when @upstream_port is not %NULL)
+ * @safe_mode: ICM is in safe mode
+ * @is_supported: Checks if we can support ICM on this controller
+ * @get_mode: Read and return the ICM firmware mode (optional)
+ * @get_route: Find a route string for given switch
+ * @device_connected: Handle device connected ICM message
+ * @device_disconnected: Handle device disconnected ICM message
+ */
+struct icm {
+	struct mutex request_lock;
+	struct delayed_work rescan_work;
+	struct pci_dev *upstream_port;
+	int vnd_cap;
+	bool safe_mode;
+	bool (*is_supported)(struct tb *tb);
+	int (*get_mode)(struct tb *tb);
+	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
+	void (*device_connected)(struct tb *tb,
+				 const struct icm_pkg_header *hdr);
+	void (*device_disconnected)(struct tb *tb,
+				    const struct icm_pkg_header *hdr);
+};
+
+struct icm_notification {
+	struct work_struct work;
+	struct icm_pkg_header *pkg;
+	struct tb *tb;
+};
+
+static inline struct tb *icm_to_tb(struct icm *icm)
+{
+	return ((void *)icm - sizeof(struct tb));
+}
+
+static inline u8 phy_port_from_route(u64 route, u8 depth)
+{
+	return tb_switch_phy_port_from_link(route >> ((depth - 1) * 8));
+}
+
+static inline u8 dual_link_from_link(u8 link)
+{
+	return link ? ((link - 1) ^ 0x01) + 1 : 0;
+}
+
+static inline u64 get_route(u32 route_hi, u32 route_lo)
+{
+	return (u64)route_hi << 32 | route_lo;
+}
+
+static inline bool is_apple(void)
+{
+	return dmi_match(DMI_BOARD_VENDOR, "Apple Inc.");
+}
+
+static bool icm_match(const struct tb_cfg_request *req,
+		      const struct ctl_pkg *pkg)
+{
+	const struct icm_pkg_header *res_hdr = pkg->buffer;
+	const struct icm_pkg_header *req_hdr = req->request;
+
+	if (pkg->frame.eof != req->response_type)
+		return false;
+	if (res_hdr->code != req_hdr->code)
+		return false;
+
+	return true;
+}
+
+static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+	const struct icm_pkg_header *hdr = pkg->buffer;
+
+	if (hdr->packet_id < req->npackets) {
+		size_t offset = hdr->packet_id * req->response_size;
+
+		memcpy(req->response + offset, pkg->buffer, req->response_size);
+	}
+
+	return hdr->packet_id == hdr->total_packets - 1;
+}
+
+static int icm_request(struct tb *tb, const void *request, size_t request_size,
+		       void *response, size_t response_size, size_t npackets,
+		       unsigned int timeout_msec)
+{
+	struct icm *icm = tb_priv(tb);
+	int retries = 3;
+
+	do {
+		struct tb_cfg_request *req;
+		struct tb_cfg_result res;
+
+		req = tb_cfg_request_alloc();
+		if (!req)
+			return -ENOMEM;
+
+		req->match = icm_match;
+		req->copy = icm_copy;
+		req->request = request;
+		req->request_size = request_size;
+		req->request_type = TB_CFG_PKG_ICM_CMD;
+		req->response = response;
+		req->npackets = npackets;
+		req->response_size = response_size;
+		req->response_type = TB_CFG_PKG_ICM_RESP;
+
+		mutex_lock(&icm->request_lock);
+		res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
+		mutex_unlock(&icm->request_lock);
+
+		tb_cfg_request_put(req);
+
+		if (res.err != -ETIMEDOUT)
+			return res.err == 1 ? -EIO : res.err;
+
+		usleep_range(20, 50);
+	} while (retries--);
+
+	return -ETIMEDOUT;
+}
+
+static bool icm_fr_is_supported(struct tb *tb)
+{
+	return !is_apple();
+}
+
+static inline int icm_fr_get_switch_index(u32 port)
+{
+	int index;
+
+	if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
+		return 0;
+
+	index = port >> ICM_PORT_INDEX_SHIFT;
+	return index != 0xff ? index : 0;
+}
+
+static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+{
+	struct icm_fr_pkg_get_topology_response *switches, *sw;
+	struct icm_fr_pkg_get_topology request = {
+		.hdr = { .code = ICM_GET_TOPOLOGY },
+	};
+	size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
+	int ret, index;
+	u8 i;
+
+	switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
+	if (!switches)
+		return -ENOMEM;
+
+	ret = icm_request(tb, &request, sizeof(request), switches,
+			  sizeof(*switches), npackets, ICM_TIMEOUT);
+	if (ret)
+		goto err_free;
+
+	sw = &switches[0];
+	index = icm_fr_get_switch_index(sw->ports[link]);
+	if (!index) {
+		ret = -ENODEV;
+		goto err_free;
+	}
+
+	sw = &switches[index];
+	for (i = 1; i < depth; i++) {
+		unsigned int j;
+
+		if (!(sw->first_data & ICM_SWITCH_USED)) {
+			ret = -ENODEV;
+			goto err_free;
+		}
+
+		for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
+			index = icm_fr_get_switch_index(sw->ports[j]);
+			if (index > sw->switch_index) {
+				sw = &switches[index];
+				break;
+			}
+		}
+	}
+
+	*route = get_route(sw->route_hi, sw->route_lo);
+
+err_free:
+	kfree(switches);
+	return ret;
+}
+
+static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+	struct icm_fr_pkg_approve_device request;
+	struct icm_fr_pkg_approve_device reply;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_APPROVE_DEVICE;
+	request.connection_id = sw->connection_id;
+	request.connection_key = sw->connection_key;
+
+	memset(&reply, 0, sizeof(reply));
+	/* Use larger timeout as establishing tunnels can take some time */
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, 10000);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+		tb_warn(tb, "PCIe tunnel creation failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+	struct icm_fr_pkg_add_device_key request;
+	struct icm_fr_pkg_add_device_key_response reply;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_ADD_DEVICE_KEY;
+	request.connection_id = sw->connection_id;
+	request.connection_key = sw->connection_key;
+	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+		tb_warn(tb, "Adding key to switch failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+				       const u8 *challenge, u8 *response)
+{
+	struct icm_fr_pkg_challenge_device request;
+	struct icm_fr_pkg_challenge_device_response reply;
+	int ret;
+
+	memset(&request, 0, sizeof(request));
+	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+	request.hdr.code = ICM_CHALLENGE_DEVICE;
+	request.connection_id = sw->connection_id;
+	request.connection_key = sw->connection_key;
+	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EKEYREJECTED;
+	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
+		return -ENOKEY;
+
+	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
+
+	return 0;
+}
+
+static void remove_switch(struct tb_switch *sw)
+{
+	struct tb_switch *parent_sw;
+
+	parent_sw = tb_to_switch(sw->dev.parent);
+	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+	tb_switch_remove(sw);
+}
+
+static void
+icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_fr_event_device_connected *pkg =
+		(const struct icm_fr_event_device_connected *)hdr;
+	struct tb_switch *sw, *parent_sw;
+	struct icm *icm = tb_priv(tb);
+	bool authorized = false;
+	u8 link, depth;
+	u64 route;
+	int ret;
+
+	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+		ICM_LINK_INFO_DEPTH_SHIFT;
+	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
+
+	ret = icm->get_route(tb, link, depth, &route);
+	if (ret) {
+		tb_err(tb, "failed to find route string for switch at %u.%u\n",
+		       link, depth);
+		return;
+	}
+
+	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
+	if (sw) {
+		u8 phy_port, sw_phy_port;
+
+		parent_sw = tb_to_switch(sw->dev.parent);
+		sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
+		phy_port = phy_port_from_route(route, depth);
+
+		/*
+		 * On resume ICM will send us connected events for the
+		 * devices that still are present. However, that
+		 * information might have changed for example by the
+		 * fact that a switch on a dual-link connection might
+		 * have been enumerated using the other link now. Make
+		 * sure our book keeping matches that.
+		 */
+		if (sw->depth == depth && sw_phy_port == phy_port &&
+		    !!sw->authorized == authorized) {
+			tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+			tb_port_at(route, parent_sw)->remote =
+				   tb_upstream_port(sw);
+			sw->config.route_hi = upper_32_bits(route);
+			sw->config.route_lo = lower_32_bits(route);
+			sw->connection_id = pkg->connection_id;
+			sw->connection_key = pkg->connection_key;
+			sw->link = link;
+			sw->depth = depth;
+			sw->is_unplugged = false;
+			tb_switch_put(sw);
+			return;
+		}
+
+		/*
+		 * User connected the same switch to another physical
+		 * port or to another part of the topology. Remove the
+		 * existing switch now before adding the new one.
+		 */
+		remove_switch(sw);
+		tb_switch_put(sw);
+	}
+
+	/*
+	 * If the switch was not found by UUID, look for a switch on
+	 * same physical port (taking possible link aggregation into
+	 * account) and depth. If we found one it is definitely a stale
+	 * one so remove it first.
+	 */
+	sw = tb_switch_find_by_link_depth(tb, link, depth);
+	if (!sw) {
+		u8 dual_link;
+
+		dual_link = dual_link_from_link(link);
+		if (dual_link)
+			sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
+	}
+	if (sw) {
+		remove_switch(sw);
+		tb_switch_put(sw);
+	}
+
+	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
+	if (!parent_sw) {
+		tb_err(tb, "failed to find parent switch for %u.%u\n",
+		       link, depth);
+		return;
+	}
+
+	sw = tb_switch_alloc(tb, &parent_sw->dev, route);
+	if (!sw) {
+		tb_switch_put(parent_sw);
+		return;
+	}
+
+	sw->uuid = kmemdup(&pkg->ep_uuid, sizeof(pkg->ep_uuid), GFP_KERNEL);
+	sw->connection_id = pkg->connection_id;
+	sw->connection_key = pkg->connection_key;
+	sw->link = link;
+	sw->depth = depth;
+	sw->authorized = authorized;
+	sw->security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
+				ICM_FLAGS_SLEVEL_SHIFT;
+
+	/* Link the two switches now */
+	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
+	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
+
+	ret = tb_switch_add(sw);
+	if (ret) {
+		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+		tb_switch_put(sw);
+	}
+	tb_switch_put(parent_sw);
+}
+
+static void
+icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+	const struct icm_fr_event_device_disconnected *pkg =
+		(const struct icm_fr_event_device_disconnected *)hdr;
+	struct tb_switch *sw;
+	u8 link, depth;
+
+	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+		ICM_LINK_INFO_DEPTH_SHIFT;
+
+	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
+		return;
+	}
+
+	sw = tb_switch_find_by_link_depth(tb, link, depth);
+	if (!sw) {
+		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
+			depth);
+		return;
+	}
+
+	remove_switch(sw);
+	tb_switch_put(sw);
+}
+
+static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
+{
+	struct pci_dev *parent;
+
+	parent = pci_upstream_bridge(pdev);
+	while (parent) {
+		if (!pci_is_pcie(parent))
+			return NULL;
+		if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
+			break;
+		parent = pci_upstream_bridge(parent);
+	}
+
+	if (!parent)
+		return NULL;
+
+	switch (parent->device) {
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+		return parent;
+	}
+
+	return NULL;
+}
+
+static bool icm_ar_is_supported(struct tb *tb)
+{
+	struct pci_dev *upstream_port;
+	struct icm *icm = tb_priv(tb);
+
+	/*
+	 * Starting from Alpine Ridge we can use ICM on Apple machines
+	 * as well. We just need to reset and re-enable it first.
+	 */
+	if (!is_apple())
+		return true;
+
+	/*
+	 * Find the upstream PCIe port in case we need to do reset
+	 * through its vendor specific registers.
+	 */
+	upstream_port = get_upstream_port(tb->nhi->pdev);
+	if (upstream_port) {
+		int cap;
+
+		cap = pci_find_ext_capability(upstream_port,
+					      PCI_EXT_CAP_ID_VNDR);
+		if (cap > 0) {
+			icm->upstream_port = upstream_port;
+			icm->vnd_cap = cap;
+
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static int icm_ar_get_mode(struct tb *tb)
+{
+	struct tb_nhi *nhi = tb->nhi;
+	int retries = 5;
+	u32 val;
+
+	do {
+		val = ioread32(nhi->iobase + REG_FW_STS);
+		if (val & REG_FW_STS_NVM_AUTH_DONE)
+			break;
+		msleep(30);
+	} while (--retries);
+
+	if (!retries) {
+		dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
+		return -ENODEV;
+	}
+
+	return nhi_mailbox_mode(nhi);
+}
+
+static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+{
+	struct icm_ar_pkg_get_route_response reply;
+	struct icm_ar_pkg_get_route request = {
+		.hdr = { .code = ICM_GET_ROUTE },
+		.link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
+	};
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (reply.hdr.flags & ICM_FLAGS_ERROR)
+		return -EIO;
+
+	*route = get_route(reply.route_hi, reply.route_lo);
+	return 0;
+}
+
+static void icm_handle_notification(struct work_struct *work)
+{
+	struct icm_notification *n = container_of(work, typeof(*n), work);
+	struct tb *tb = n->tb;
+	struct icm *icm = tb_priv(tb);
+
+	mutex_lock(&tb->lock);
+
+	switch (n->pkg->code) {
+	case ICM_EVENT_DEVICE_CONNECTED:
+		icm->device_connected(tb, n->pkg);
+		break;
+	case ICM_EVENT_DEVICE_DISCONNECTED:
+		icm->device_disconnected(tb, n->pkg);
+		break;
+	}
+
+	mutex_unlock(&tb->lock);
+
+	kfree(n->pkg);
+	kfree(n);
+}
+
+static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
+			     const void *buf, size_t size)
+{
+	struct icm_notification *n;
+
+	n = kmalloc(sizeof(*n), GFP_KERNEL);
+	if (!n)
+		return;
+
+	INIT_WORK(&n->work, icm_handle_notification);
+	n->pkg = kmemdup(buf, size, GFP_KERNEL);
+	n->tb = tb;
+
+	queue_work(tb->wq, &n->work);
+}
+
+static int
+__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
+{
+	struct icm_pkg_driver_ready_response reply;
+	struct icm_pkg_driver_ready request = {
+		.hdr.code = ICM_DRIVER_READY,
+	};
+	unsigned int retries = 10;
+	int ret;
+
+	memset(&reply, 0, sizeof(reply));
+	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+			  1, ICM_TIMEOUT);
+	if (ret)
+		return ret;
+
+	if (security_level)
+		*security_level = reply.security_level & 0xf;
+
+	/*
+	 * Hold on here until the switch config space is accessible so
+	 * that we can read root switch config successfully.
+	 */
+	do {
+		struct tb_cfg_result res;
+		u32 tmp;
+
+		res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
+				      0, 1, 100);
+		if (!res.err)
+			return 0;
+
+		msleep(50);
+	} while (--retries);
+
+	return -ETIMEDOUT;
+}
+
+static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
+	u32 cmd;
+
+	do {
+		pci_read_config_dword(icm->upstream_port,
+				      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
+		if (!(cmd & PCIE2CIO_CMD_START)) {
+			if (cmd & PCIE2CIO_CMD_TIMEOUT)
+				break;
+			return 0;
+		}
+
+		msleep(50);
+	} while (time_before(jiffies, end));
+
+	return -ETIMEDOUT;
+}
+
+static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
+			 unsigned int port, unsigned int index, u32 *data)
+{
+	struct pci_dev *pdev = icm->upstream_port;
+	int ret, vnd_cap = icm->vnd_cap;
+	u32 cmd;
+
+	cmd = index;
+	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+	cmd |= PCIE2CIO_CMD_START;
+	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+	ret = pci2cio_wait_completion(icm, 5000);
+	if (ret)
+		return ret;
+
+	pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
+	return 0;
+}
+
+static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
+			  unsigned int port, unsigned int index, u32 data)
+{
+	struct pci_dev *pdev = icm->upstream_port;
+	int vnd_cap = icm->vnd_cap;
+	u32 cmd;
+
+	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
+
+	cmd = index;
+	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+	cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
+	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+	return pci2cio_wait_completion(icm, 5000);
+}
+
+static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
+{
+	struct icm *icm = tb_priv(tb);
+	u32 val;
+
+	/* Put ARC to wait for CIO reset event to happen */
+	val = ioread32(nhi->iobase + REG_FW_STS);
+	val |= REG_FW_STS_CIO_RESET_REQ;
+	iowrite32(val, nhi->iobase + REG_FW_STS);
+
+	/* Re-start ARC */
+	val = ioread32(nhi->iobase + REG_FW_STS);
+	val |= REG_FW_STS_ICM_EN_INVERT;
+	val |= REG_FW_STS_ICM_EN_CPU;
+	iowrite32(val, nhi->iobase + REG_FW_STS);
+
+	/* Trigger CIO reset now */
+	return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
+}
+
+static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
+{
+	unsigned int retries = 10;
+	int ret;
+	u32 val;
+
+	/* Check if the ICM firmware is already running */
+	val = ioread32(nhi->iobase + REG_FW_STS);
+	if (val & REG_FW_STS_ICM_EN)
+		return 0;
+
+	dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
+
+	ret = icm_firmware_reset(tb, nhi);
+	if (ret)
+		return ret;
+
+	/* Wait until the ICM firmware tells us it is up and running */
+	do {
+		/* Check that the ICM firmware is running */
+		val = ioread32(nhi->iobase + REG_FW_STS);
+		if (val & REG_FW_STS_NVM_AUTH_DONE)
+			return 0;
+
+		msleep(300);
+	} while (--retries);
+
+	return -ETIMEDOUT;
+}
+
+static int icm_reset_phy_port(struct tb *tb, int phy_port)
+{
+	struct icm *icm = tb_priv(tb);
+	u32 state0, state1;
+	int port0, port1;
+	u32 val0, val1;
+	int ret;
+
+	if (!icm->upstream_port)
+		return 0;
+
+	if (phy_port) {
+		port0 = 3;
+		port1 = 4;
+	} else {
+		port0 = 1;
+		port1 = 2;
+	}
+
+	/*
+	 * Read link status of both null ports belonging to a single
+	 * physical port.
+	 */
+	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
+	if (ret)
+		return ret;
+	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
+	if (ret)
+		return ret;
+
+	state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
+	state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
+	state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
+	state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
+
+	/* If they are both up we need to reset them now */
+	if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
+		return 0;
+
+	val0 |= PHY_PORT_CS1_LINK_DISABLE;
+	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
+	if (ret)
+		return ret;
+
+	val1 |= PHY_PORT_CS1_LINK_DISABLE;
+	ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
+	if (ret)
+		return ret;
+
+	/* Wait a bit and then re-enable both ports */
+	usleep_range(10, 100);
+
+	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
+	if (ret)
+		return ret;
+	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
+	if (ret)
+		return ret;
+
+	val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
+	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
+	if (ret)
+		return ret;
+
+	val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
+	return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
+}
+
+static int icm_firmware_init(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+	struct tb_nhi *nhi = tb->nhi;
+	int ret;
+
+	ret = icm_firmware_start(tb, nhi);
+	if (ret) {
+		dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
+		return ret;
+	}
+
+	if (icm->get_mode) {
+		ret = icm->get_mode(tb);
+
+		switch (ret) {
+		case NHI_FW_SAFE_MODE:
+			icm->safe_mode = true;
+			break;
+
+		case NHI_FW_CM_MODE:
+			/* Ask ICM to accept all Thunderbolt devices */
+			nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
+			break;
+
+		default:
+			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
+			return -ENODEV;
+		}
+	}
+
+	/*
+	 * Reset both physical ports if there is anything connected to
+	 * them already.
+	 */
+	ret = icm_reset_phy_port(tb, 0);
+	if (ret)
+		dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
+	ret = icm_reset_phy_port(tb, 1);
+	if (ret)
+		dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
+
+	return 0;
+}
+
+static int icm_driver_ready(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+	int ret;
+
+	ret = icm_firmware_init(tb);
+	if (ret)
+		return ret;
+
+	if (icm->safe_mode) {
+		tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
+		tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
+		tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
+		return 0;
+	}
+
+	return __icm_driver_ready(tb, &tb->security_level);
+}
+
+static int icm_suspend(struct tb *tb)
+{
+	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
+}
+
+/*
+ * Mark all switches (except root switch) below this one unplugged. ICM
+ * firmware will send us an updated list of switches after we have send
+ * it driver ready command. If a switch is not in that list it will be
+ * removed when we perform rescan.
+ */
+static void icm_unplug_children(struct tb_switch *sw)
+{
+	unsigned int i;
+
+	if (tb_route(sw))
+		sw->is_unplugged = true;
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_port *port = &sw->ports[i];
+
+		if (tb_is_upstream_port(port))
+			continue;
+		if (!port->remote)
+			continue;
+
+		icm_unplug_children(port->remote->sw);
+	}
+}
+
+static void icm_free_unplugged_children(struct tb_switch *sw)
+{
+	unsigned int i;
+
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		struct tb_port *port = &sw->ports[i];
+
+		if (tb_is_upstream_port(port))
+			continue;
+		if (!port->remote)
+			continue;
+
+		if (port->remote->sw->is_unplugged) {
+			tb_switch_remove(port->remote->sw);
+			port->remote = NULL;
+		} else {
+			icm_free_unplugged_children(port->remote->sw);
+		}
+	}
+}
+
+static void icm_rescan_work(struct work_struct *work)
+{
+	struct icm *icm = container_of(work, struct icm, rescan_work.work);
+	struct tb *tb = icm_to_tb(icm);
+
+	mutex_lock(&tb->lock);
+	if (tb->root_switch)
+		icm_free_unplugged_children(tb->root_switch);
+	mutex_unlock(&tb->lock);
+}
+
+static void icm_complete(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+
+	if (tb->nhi->going_away)
+		return;
+
+	icm_unplug_children(tb->root_switch);
+
+	/*
+	 * Now all existing children should be resumed, start events
+	 * from ICM to get updated status.
+	 */
+	__icm_driver_ready(tb, NULL);
+
+	/*
+	 * We do not get notifications of devices that have been
+	 * unplugged during suspend so schedule rescan to clean them up
+	 * if any.
+	 */
+	queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
+}
+
+static int icm_start(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+	int ret;
+
+	if (icm->safe_mode)
+		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
+	else
+		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
+	if (!tb->root_switch)
+		return -ENODEV;
+
+	/*
+	 * NVM upgrade has not been tested on Apple systems and they
+	 * don't provide images publicly either. To be on the safe side
+	 * prevent root switch NVM upgrade on Macs for now.
+	 */
+	tb->root_switch->no_nvm_upgrade = is_apple();
+
+	ret = tb_switch_add(tb->root_switch);
+	if (ret)
+		tb_switch_put(tb->root_switch);
+
+	return ret;
+}
+
+static void icm_stop(struct tb *tb)
+{
+	struct icm *icm = tb_priv(tb);
+
+	cancel_delayed_work(&icm->rescan_work);
+	tb_switch_remove(tb->root_switch);
+	tb->root_switch = NULL;
+	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+}
+
+static int icm_disconnect_pcie_paths(struct tb *tb)
+{
+	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
+}
+
+/* Falcon Ridge and Alpine Ridge */
+static const struct tb_cm_ops icm_fr_ops = {
+	.driver_ready = icm_driver_ready,
+	.start = icm_start,
+	.stop = icm_stop,
+	.suspend = icm_suspend,
+	.complete = icm_complete,
+	.handle_event = icm_handle_event,
+	.approve_switch = icm_fr_approve_switch,
+	.add_switch_key = icm_fr_add_switch_key,
+	.challenge_switch_key = icm_fr_challenge_switch_key,
+	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
+};
+
+struct tb *icm_probe(struct tb_nhi *nhi)
+{
+	struct icm *icm;
+	struct tb *tb;
+
+	tb = tb_domain_alloc(nhi, sizeof(struct icm));
+	if (!tb)
+		return NULL;
+
+	icm = tb_priv(tb);
+	INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
+	mutex_init(&icm->request_lock);
+
+	switch (nhi->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+		icm->is_supported = icm_fr_is_supported;
+		icm->get_route = icm_fr_get_route;
+		icm->device_connected = icm_fr_device_connected;
+		icm->device_disconnected = icm_fr_device_disconnected;
+		tb->cm_ops = &icm_fr_ops;
+		break;
+
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
+		icm->is_supported = icm_ar_is_supported;
+		icm->get_mode = icm_ar_get_mode;
+		icm->get_route = icm_ar_get_route;
+		icm->device_connected = icm_fr_device_connected;
+		icm->device_disconnected = icm_fr_device_disconnected;
+		tb->cm_ops = &icm_fr_ops;
+		break;
+	}
+
+	if (!icm->is_supported || !icm->is_supported(tb)) {
+		dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
+		tb_domain_put(tb);
+		return NULL;
+	}
+
+	return tb;
+}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index a8c2041..05af126 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -13,7 +13,7 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
-#include <linux/dmi.h>
+#include <linux/delay.h>
 
 #include "nhi.h"
 #include "nhi_regs.h"
@@ -21,6 +21,14 @@
 
 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
 
+/*
+ * Minimal number of vectors when we use MSI-X. Two for control channel
+ * Rx/Tx and the rest four are for cross domain DMA paths.
+ */
+#define MSIX_MIN_VECS		6
+#define MSIX_MAX_VECS		16
+
+#define NHI_MAILBOX_TIMEOUT	500 /* ms */
 
 static int ring_interrupt_index(struct tb_ring *ring)
 {
@@ -42,6 +50,37 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
 	int bit = ring_interrupt_index(ring) & 31;
 	int mask = 1 << bit;
 	u32 old, new;
+
+	if (ring->irq > 0) {
+		u32 step, shift, ivr, misc;
+		void __iomem *ivr_base;
+		int index;
+
+		if (ring->is_tx)
+			index = ring->hop;
+		else
+			index = ring->hop + ring->nhi->hop_count;
+
+		/*
+		 * Ask the hardware to clear interrupt status bits automatically
+		 * since we already know which interrupt was triggered.
+		 */
+		misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+		if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
+			misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
+			iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
+		}
+
+		ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
+		step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+		shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+		ivr = ioread32(ivr_base + step);
+		ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
+		if (active)
+			ivr |= ring->vector << shift;
+		iowrite32(ivr, ivr_base + step);
+	}
+
 	old = ioread32(ring->nhi->iobase + reg);
 	if (active)
 		new = old | mask;
@@ -239,8 +278,50 @@ int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
 	return ret;
 }
 
+static irqreturn_t ring_msix(int irq, void *data)
+{
+	struct tb_ring *ring = data;
+
+	schedule_work(&ring->work);
+	return IRQ_HANDLED;
+}
+
+static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
+{
+	struct tb_nhi *nhi = ring->nhi;
+	unsigned long irqflags;
+	int ret;
+
+	if (!nhi->pdev->msix_enabled)
+		return 0;
+
+	ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+	if (ret < 0)
+		return ret;
+
+	ring->vector = ret;
+
+	ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
+	if (ring->irq < 0)
+		return ring->irq;
+
+	irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
+	return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+}
+
+static void ring_release_msix(struct tb_ring *ring)
+{
+	if (ring->irq <= 0)
+		return;
+
+	free_irq(ring->irq, ring);
+	ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+	ring->vector = 0;
+	ring->irq = 0;
+}
+
 static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
-				  bool transmit)
+				  bool transmit, unsigned int flags)
 {
 	struct tb_ring *ring = NULL;
 	dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
@@ -271,9 +352,14 @@ static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
 	ring->hop = hop;
 	ring->is_tx = transmit;
 	ring->size = size;
+	ring->flags = flags;
 	ring->head = 0;
 	ring->tail = 0;
 	ring->running = false;
+
+	if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
+		goto err;
+
 	ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
 			size * sizeof(*ring->descriptors),
 			&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
@@ -295,14 +381,16 @@ static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
 	return NULL;
 }
 
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+			      unsigned int flags)
 {
-	return ring_alloc(nhi, hop, size, true);
+	return ring_alloc(nhi, hop, size, true, flags);
 }
 
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+			      unsigned int flags)
 {
-	return ring_alloc(nhi, hop, size, false);
+	return ring_alloc(nhi, hop, size, false, flags);
 }
 
 /**
@@ -314,6 +402,8 @@ void ring_start(struct tb_ring *ring)
 {
 	mutex_lock(&ring->nhi->lock);
 	mutex_lock(&ring->lock);
+	if (ring->nhi->going_away)
+		goto err;
 	if (ring->running) {
 		dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
 		goto err;
@@ -360,6 +450,8 @@ void ring_stop(struct tb_ring *ring)
 	mutex_lock(&ring->lock);
 	dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
 		 RING_TYPE(ring), ring->hop);
+	if (ring->nhi->going_away)
+		goto err;
 	if (!ring->running) {
 		dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
 			 RING_TYPE(ring), ring->hop);
@@ -413,6 +505,8 @@ void ring_free(struct tb_ring *ring)
 			 RING_TYPE(ring), ring->hop);
 	}
 
+	ring_release_msix(ring);
+
 	dma_free_coherent(&ring->nhi->pdev->dev,
 			  ring->size * sizeof(*ring->descriptors),
 			  ring->descriptors, ring->descriptors_dma);
@@ -428,15 +522,70 @@ void ring_free(struct tb_ring *ring)
 
 	mutex_unlock(&ring->nhi->lock);
 	/**
-	 * ring->work can no longer be scheduled (it is scheduled only by
-	 * nhi_interrupt_work and ring_stop). Wait for it to finish before
-	 * freeing the ring.
+	 * ring->work can no longer be scheduled (it is scheduled only
+	 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
+	 * to finish before freeing the ring.
 	 */
 	flush_work(&ring->work);
 	mutex_destroy(&ring->lock);
 	kfree(ring);
 }
 
+/**
+ * nhi_mailbox_cmd() - Send a command through NHI mailbox
+ * @nhi: Pointer to the NHI structure
+ * @cmd: Command to send
+ * @data: Data to be send with the command
+ *
+ * Sends mailbox command to the firmware running on NHI. Returns %0 in
+ * case of success and negative errno in case of failure.
+ */
+int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
+{
+	ktime_t timeout;
+	u32 val;
+
+	iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
+
+	val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+	val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
+	val |= REG_INMAIL_OP_REQUEST | cmd;
+	iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
+
+	timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
+	do {
+		val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+		if (!(val & REG_INMAIL_OP_REQUEST))
+			break;
+		usleep_range(10, 20);
+	} while (ktime_before(ktime_get(), timeout));
+
+	if (val & REG_INMAIL_OP_REQUEST)
+		return -ETIMEDOUT;
+	if (val & REG_INMAIL_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * nhi_mailbox_mode() - Return current firmware operation mode
+ * @nhi: Pointer to the NHI structure
+ *
+ * The function reads current firmware operation mode using NHI mailbox
+ * registers and returns it to the caller.
+ */
+enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
+{
+	u32 val;
+
+	val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
+	val &= REG_OUTMAIL_CMD_OPMODE_MASK;
+	val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
+
+	return (enum nhi_fw_mode)val;
+}
+
 static void nhi_interrupt_work(struct work_struct *work)
 {
 	struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
@@ -498,16 +647,40 @@ static int nhi_suspend_noirq(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct tb *tb = pci_get_drvdata(pdev);
-	thunderbolt_suspend(tb);
-	return 0;
+
+	return tb_domain_suspend_noirq(tb);
 }
 
 static int nhi_resume_noirq(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct tb *tb = pci_get_drvdata(pdev);
-	thunderbolt_resume(tb);
-	return 0;
+
+	/*
+	 * Check that the device is still there. It may be that the user
+	 * unplugged last device which causes the host controller to go
+	 * away on PCs.
+	 */
+	if (!pci_device_is_present(pdev))
+		tb->nhi->going_away = true;
+
+	return tb_domain_resume_noirq(tb);
+}
+
+static int nhi_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	return tb_domain_suspend(tb);
+}
+
+static void nhi_complete(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct tb *tb = pci_get_drvdata(pdev);
+
+	tb_domain_complete(tb);
 }
 
 static void nhi_shutdown(struct tb_nhi *nhi)
@@ -528,9 +701,52 @@ static void nhi_shutdown(struct tb_nhi *nhi)
 	 * We have to release the irq before calling flush_work. Otherwise an
 	 * already executing IRQ handler could call schedule_work again.
 	 */
-	devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
-	flush_work(&nhi->interrupt_work);
+	if (!nhi->pdev->msix_enabled) {
+		devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
+		flush_work(&nhi->interrupt_work);
+	}
 	mutex_destroy(&nhi->lock);
+	ida_destroy(&nhi->msix_ida);
+}
+
+static int nhi_init_msi(struct tb_nhi *nhi)
+{
+	struct pci_dev *pdev = nhi->pdev;
+	int res, irq, nvec;
+
+	/* In case someone left them on. */
+	nhi_disable_interrupts(nhi);
+
+	ida_init(&nhi->msix_ida);
+
+	/*
+	 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
+	 * get all MSI-X vectors and if we succeed, each ring will have
+	 * one MSI-X. If for some reason that does not work out, we
+	 * fallback to a single MSI.
+	 */
+	nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
+				     PCI_IRQ_MSIX);
+	if (nvec < 0) {
+		nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+		if (nvec < 0)
+			return nvec;
+
+		INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
+
+		irq = pci_irq_vector(nhi->pdev, 0);
+		if (irq < 0)
+			return irq;
+
+		res = devm_request_irq(&pdev->dev, irq, nhi_msi,
+				       IRQF_NO_SUSPEND, "thunderbolt", nhi);
+		if (res) {
+			dev_err(&pdev->dev, "request_irq failed, aborting\n");
+			return res;
+		}
+	}
+
+	return 0;
 }
 
 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -545,12 +761,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		return res;
 	}
 
-	res = pci_enable_msi(pdev);
-	if (res) {
-		dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
-		return res;
-	}
-
 	res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
 	if (res) {
 		dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
@@ -568,7 +778,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (nhi->hop_count != 12 && nhi->hop_count != 32)
 		dev_warn(&pdev->dev, "unexpected hop count: %d\n",
 			 nhi->hop_count);
-	INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
 
 	nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
 				     sizeof(*nhi->tx_rings), GFP_KERNEL);
@@ -577,12 +786,9 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (!nhi->tx_rings || !nhi->rx_rings)
 		return -ENOMEM;
 
-	nhi_disable_interrupts(nhi); /* In case someone left them on. */
-	res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
-			       IRQF_NO_SUSPEND, /* must work during _noirq */
-			       "thunderbolt", nhi);
+	res = nhi_init_msi(nhi);
 	if (res) {
-		dev_err(&pdev->dev, "request_irq failed, aborting\n");
+		dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
 		return res;
 	}
 
@@ -593,13 +799,24 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	/* magic value - clock related? */
 	iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
 
-	dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
-	tb = thunderbolt_alloc_and_start(nhi);
+	tb = icm_probe(nhi);
+	if (!tb)
+		tb = tb_probe(nhi);
 	if (!tb) {
+		dev_err(&nhi->pdev->dev,
+			"failed to determine connection manager, aborting\n");
+		return -ENODEV;
+	}
+
+	dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+
+	res = tb_domain_add(tb);
+	if (res) {
 		/*
 		 * At this point the RX/TX rings might already have been
 		 * activated. Do a proper shutdown.
 		 */
+		tb_domain_put(tb);
 		nhi_shutdown(nhi);
 		return -EIO;
 	}
@@ -612,7 +829,8 @@ static void nhi_remove(struct pci_dev *pdev)
 {
 	struct tb *tb = pci_get_drvdata(pdev);
 	struct tb_nhi *nhi = tb->nhi;
-	thunderbolt_shutdown_and_free(tb);
+
+	tb_domain_remove(tb);
 	nhi_shutdown(nhi);
 }
 
@@ -629,6 +847,10 @@ static const struct dev_pm_ops nhi_pm_ops = {
 					    * pci-tunnels stay alive.
 					    */
 	.restore_noirq = nhi_resume_noirq,
+	.suspend = nhi_suspend,
+	.freeze = nhi_suspend,
+	.poweroff = nhi_suspend,
+	.complete = nhi_complete,
 };
 
 static struct pci_device_id nhi_ids[] = {
@@ -660,6 +882,17 @@ static struct pci_device_id nhi_ids[] = {
 		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
 		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
 	},
+
+	/* Thunderbolt 3 */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
+
 	{ 0,}
 };
 
@@ -676,14 +909,21 @@ static struct pci_driver nhi_driver = {
 
 static int __init nhi_init(void)
 {
-	if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
-		return -ENOSYS;
-	return pci_register_driver(&nhi_driver);
+	int ret;
+
+	ret = tb_domain_init();
+	if (ret)
+		return ret;
+	ret = pci_register_driver(&nhi_driver);
+	if (ret)
+		tb_domain_exit();
+	return ret;
 }
 
 static void __exit nhi_unload(void)
 {
 	pci_unregister_driver(&nhi_driver);
+	tb_domain_exit();
 }
 
 module_init(nhi_init);
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 3172429..5b5bb2c 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -7,45 +7,78 @@
 #ifndef DSL3510_H_
 #define DSL3510_H_
 
+#include <linux/idr.h>
 #include <linux/mutex.h>
 #include <linux/workqueue.h>
 
 /**
  * struct tb_nhi - thunderbolt native host interface
+ * @lock: Must be held during ring creation/destruction. Is acquired by
+ *	  interrupt_work when dispatching interrupts to individual rings.
+ * @pdev: Pointer to the PCI device
+ * @iobase: MMIO space of the NHI
+ * @tx_rings: All Tx rings available on this host controller
+ * @rx_rings: All Rx rings available on this host controller
+ * @msix_ida: Used to allocate MSI-X vectors for rings
+ * @going_away: The host controller device is about to disappear so when
+ *		this flag is set, avoid touching the hardware anymore.
+ * @interrupt_work: Work scheduled to handle ring interrupt when no
+ *		    MSI-X is used.
+ * @hop_count: Number of rings (end point hops) supported by NHI.
  */
 struct tb_nhi {
-	struct mutex lock; /*
-			    * Must be held during ring creation/destruction.
-			    * Is acquired by interrupt_work when dispatching
-			    * interrupts to individual rings.
-			    **/
+	struct mutex lock;
 	struct pci_dev *pdev;
 	void __iomem *iobase;
 	struct tb_ring **tx_rings;
 	struct tb_ring **rx_rings;
+	struct ida msix_ida;
+	bool going_away;
 	struct work_struct interrupt_work;
-	u32 hop_count; /* Number of rings (end point hops) supported by NHI. */
+	u32 hop_count;
 };
 
 /**
  * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
+ * @lock: Lock serializing actions to this ring. Must be acquired after
+ *	  nhi->lock.
+ * @nhi: Pointer to the native host controller interface
+ * @size: Size of the ring
+ * @hop: Hop (DMA channel) associated with this ring
+ * @head: Head of the ring (write next descriptor here)
+ * @tail: Tail of the ring (complete next descriptor here)
+ * @descriptors: Allocated descriptors for this ring
+ * @queue: Queue holding frames to be transferred over this ring
+ * @in_flight: Queue holding frames that are currently in flight
+ * @work: Interrupt work structure
+ * @is_tx: Is the ring Tx or Rx
+ * @running: Is the ring running
+ * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
+ * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
+ * @flags: Ring specific flags
  */
 struct tb_ring {
-	struct mutex lock; /* must be acquired after nhi->lock */
+	struct mutex lock;
 	struct tb_nhi *nhi;
 	int size;
 	int hop;
-	int head; /* write next descriptor here */
-	int tail; /* complete next descriptor here */
+	int head;
+	int tail;
 	struct ring_desc *descriptors;
 	dma_addr_t descriptors_dma;
 	struct list_head queue;
 	struct list_head in_flight;
 	struct work_struct work;
-	bool is_tx:1; /* rx otherwise */
+	bool is_tx:1;
 	bool running:1;
+	int irq;
+	u8 vector;
+	unsigned int flags;
 };
 
+/* Leave ring interrupt enabled on suspend */
+#define RING_FLAG_NO_SUSPEND	BIT(0)
+
 struct ring_frame;
 typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
 
@@ -64,8 +97,10 @@ struct ring_frame {
 
 #define TB_FRAME_SIZE 0x100    /* minimum size for ring_rx */
 
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size);
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size);
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+			      unsigned int flags);
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+			      unsigned int flags);
 void ring_start(struct tb_ring *ring);
 void ring_stop(struct tb_ring *ring);
 void ring_free(struct tb_ring *ring);
@@ -111,4 +146,38 @@ static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
 	return __ring_enqueue(ring, frame);
 }
 
+enum nhi_fw_mode {
+	NHI_FW_SAFE_MODE,
+	NHI_FW_AUTH_MODE,
+	NHI_FW_EP_MODE,
+	NHI_FW_CM_MODE,
+};
+
+enum nhi_mailbox_cmd {
+	NHI_MAILBOX_SAVE_DEVS = 0x05,
+	NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06,
+	NHI_MAILBOX_DRV_UNLOADS = 0x07,
+	NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23,
+};
+
+int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
+enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
+
+/*
+ * PCI IDs used in this driver from Win Ridge forward. There is no
+ * need for the PCI quirk anymore as we will use ICM also on Apple
+ * hardware.
+ */
+#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI            0x157d
+#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE         0x157e
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI		0x15bf
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE	0x15c0
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI	0x15d2
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE	0x15d3
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI	0x15d9
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE	0x15da
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI	0x15dc
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI	0x15dd
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI	0x15de
+
 #endif
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 75cf069..09ed574 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -95,7 +95,34 @@ struct ring_desc {
 #define REG_RING_INTERRUPT_BASE	0x38200
 #define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
 
+/* Interrupt Vector Allocation */
+#define REG_INT_VEC_ALLOC_BASE	0x38c40
+#define REG_INT_VEC_ALLOC_BITS	4
+#define REG_INT_VEC_ALLOC_MASK	GENMASK(3, 0)
+#define REG_INT_VEC_ALLOC_REGS	(32 / REG_INT_VEC_ALLOC_BITS)
+
 /* The last 11 bits contain the number of hops supported by the NHI port. */
 #define REG_HOP_COUNT		0x39640
 
+#define REG_DMA_MISC			0x39864
+#define REG_DMA_MISC_INT_AUTO_CLEAR     BIT(2)
+
+#define REG_INMAIL_DATA			0x39900
+
+#define REG_INMAIL_CMD			0x39904
+#define REG_INMAIL_CMD_MASK		GENMASK(7, 0)
+#define REG_INMAIL_ERROR		BIT(30)
+#define REG_INMAIL_OP_REQUEST		BIT(31)
+
+#define REG_OUTMAIL_CMD			0x3990c
+#define REG_OUTMAIL_CMD_OPMODE_SHIFT	8
+#define REG_OUTMAIL_CMD_OPMODE_MASK	GENMASK(11, 8)
+
+#define REG_FW_STS			0x39944
+#define REG_FW_STS_NVM_AUTH_DONE	BIT(31)
+#define REG_FW_STS_CIO_RESET_REQ	BIT(30)
+#define REG_FW_STS_ICM_EN_CPU		BIT(2)
+#define REG_FW_STS_ICM_EN_INVERT	BIT(1)
+#define REG_FW_STS_ICM_EN		BIT(0)
+
 #endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index c6f30b1..ab3e8f4 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -5,10 +5,395 @@
  */
 
 #include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/nvmem-provider.h>
+#include <linux/sizes.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 
 #include "tb.h"
 
+/* Switch authorization from userspace is serialized by this lock */
+static DEFINE_MUTEX(switch_lock);
+
+/* Switch NVM support */
+
+#define NVM_DEVID		0x05
+#define NVM_VERSION		0x08
+#define NVM_CSS			0x10
+#define NVM_FLASH_SIZE		0x45
+
+#define NVM_MIN_SIZE		SZ_32K
+#define NVM_MAX_SIZE		SZ_512K
+
+static DEFINE_IDA(nvm_ida);
+
+struct nvm_auth_status {
+	struct list_head list;
+	uuid_be uuid;
+	u32 status;
+};
+
+/*
+ * Hold NVM authentication failure status per switch This information
+ * needs to stay around even when the switch gets power cycled so we
+ * keep it separately.
+ */
+static LIST_HEAD(nvm_auth_status_cache);
+static DEFINE_MUTEX(nvm_auth_status_lock);
+
+static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
+{
+	struct nvm_auth_status *st;
+
+	list_for_each_entry(st, &nvm_auth_status_cache, list) {
+		if (!uuid_be_cmp(st->uuid, *sw->uuid))
+			return st;
+	}
+
+	return NULL;
+}
+
+static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
+{
+	struct nvm_auth_status *st;
+
+	mutex_lock(&nvm_auth_status_lock);
+	st = __nvm_get_auth_status(sw);
+	mutex_unlock(&nvm_auth_status_lock);
+
+	*status = st ? st->status : 0;
+}
+
+static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
+{
+	struct nvm_auth_status *st;
+
+	if (WARN_ON(!sw->uuid))
+		return;
+
+	mutex_lock(&nvm_auth_status_lock);
+	st = __nvm_get_auth_status(sw);
+
+	if (!st) {
+		st = kzalloc(sizeof(*st), GFP_KERNEL);
+		if (!st)
+			goto unlock;
+
+		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
+		INIT_LIST_HEAD(&st->list);
+		list_add_tail(&st->list, &nvm_auth_status_cache);
+	}
+
+	st->status = status;
+unlock:
+	mutex_unlock(&nvm_auth_status_lock);
+}
+
+static void nvm_clear_auth_status(const struct tb_switch *sw)
+{
+	struct nvm_auth_status *st;
+
+	mutex_lock(&nvm_auth_status_lock);
+	st = __nvm_get_auth_status(sw);
+	if (st) {
+		list_del(&st->list);
+		kfree(st);
+	}
+	mutex_unlock(&nvm_auth_status_lock);
+}
+
+static int nvm_validate_and_write(struct tb_switch *sw)
+{
+	unsigned int image_size, hdr_size;
+	const u8 *buf = sw->nvm->buf;
+	u16 ds_size;
+	int ret;
+
+	if (!buf)
+		return -EINVAL;
+
+	image_size = sw->nvm->buf_data_size;
+	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
+		return -EINVAL;
+
+	/*
+	 * FARB pointer must point inside the image and must at least
+	 * contain parts of the digital section we will be reading here.
+	 */
+	hdr_size = (*(u32 *)buf) & 0xffffff;
+	if (hdr_size + NVM_DEVID + 2 >= image_size)
+		return -EINVAL;
+
+	/* Digital section start should be aligned to 4k page */
+	if (!IS_ALIGNED(hdr_size, SZ_4K))
+		return -EINVAL;
+
+	/*
+	 * Read digital section size and check that it also fits inside
+	 * the image.
+	 */
+	ds_size = *(u16 *)(buf + hdr_size);
+	if (ds_size >= image_size)
+		return -EINVAL;
+
+	if (!sw->safe_mode) {
+		u16 device_id;
+
+		/*
+		 * Make sure the device ID in the image matches the one
+		 * we read from the switch config space.
+		 */
+		device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
+		if (device_id != sw->config.device_id)
+			return -EINVAL;
+
+		if (sw->generation < 3) {
+			/* Write CSS headers first */
+			ret = dma_port_flash_write(sw->dma_port,
+				DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
+				DMA_PORT_CSS_MAX_SIZE);
+			if (ret)
+				return ret;
+		}
+
+		/* Skip headers in the image */
+		buf += hdr_size;
+		image_size -= hdr_size;
+	}
+
+	return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
+}
+
+static int nvm_authenticate_host(struct tb_switch *sw)
+{
+	int ret;
+
+	/*
+	 * Root switch NVM upgrade requires that we disconnect the
+	 * existing PCIe paths first (in case it is not in safe mode
+	 * already).
+	 */
+	if (!sw->safe_mode) {
+		ret = tb_domain_disconnect_pcie_paths(sw->tb);
+		if (ret)
+			return ret;
+		/*
+		 * The host controller goes away pretty soon after this if
+		 * everything goes well so getting timeout is expected.
+		 */
+		ret = dma_port_flash_update_auth(sw->dma_port);
+		return ret == -ETIMEDOUT ? 0 : ret;
+	}
+
+	/*
+	 * From safe mode we can get out by just power cycling the
+	 * switch.
+	 */
+	dma_port_power_cycle(sw->dma_port);
+	return 0;
+}
+
+static int nvm_authenticate_device(struct tb_switch *sw)
+{
+	int ret, retries = 10;
+
+	ret = dma_port_flash_update_auth(sw->dma_port);
+	if (ret && ret != -ETIMEDOUT)
+		return ret;
+
+	/*
+	 * Poll here for the authentication status. It takes some time
+	 * for the device to respond (we get timeout for a while). Once
+	 * we get response the device needs to be power cycled in order
+	 * to the new NVM to be taken into use.
+	 */
+	do {
+		u32 status;
+
+		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+		if (ret < 0 && ret != -ETIMEDOUT)
+			return ret;
+		if (ret > 0) {
+			if (status) {
+				tb_sw_warn(sw, "failed to authenticate NVM\n");
+				nvm_set_auth_status(sw, status);
+			}
+
+			tb_sw_info(sw, "power cycling the switch now\n");
+			dma_port_power_cycle(sw->dma_port);
+			return 0;
+		}
+
+		msleep(500);
+	} while (--retries);
+
+	return -ETIMEDOUT;
+}
+
+static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
+			      size_t bytes)
+{
+	struct tb_switch *sw = priv;
+
+	return dma_port_flash_read(sw->dma_port, offset, val, bytes);
+}
+
+static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+			       size_t bytes)
+{
+	struct tb_switch *sw = priv;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	/*
+	 * Since writing the NVM image might require some special steps,
+	 * for example when CSS headers are written, we cache the image
+	 * locally here and handle the special cases when the user asks
+	 * us to authenticate the image.
+	 */
+	if (!sw->nvm->buf) {
+		sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
+		if (!sw->nvm->buf) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+	}
+
+	sw->nvm->buf_data_size = offset + bytes;
+	memcpy(sw->nvm->buf + offset, val, bytes);
+
+unlock:
+	mutex_unlock(&switch_lock);
+
+	return ret;
+}
+
+static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
+					   size_t size, bool active)
+{
+	struct nvmem_config config;
+
+	memset(&config, 0, sizeof(config));
+
+	if (active) {
+		config.name = "nvm_active";
+		config.reg_read = tb_switch_nvm_read;
+	} else {
+		config.name = "nvm_non_active";
+		config.reg_write = tb_switch_nvm_write;
+	}
+
+	config.id = id;
+	config.stride = 4;
+	config.word_size = 4;
+	config.size = size;
+	config.dev = &sw->dev;
+	config.owner = THIS_MODULE;
+	config.root_only = true;
+	config.priv = sw;
+
+	return nvmem_register(&config);
+}
+
+static int tb_switch_nvm_add(struct tb_switch *sw)
+{
+	struct nvmem_device *nvm_dev;
+	struct tb_switch_nvm *nvm;
+	u32 val;
+	int ret;
+
+	if (!sw->dma_port)
+		return 0;
+
+	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+	if (!nvm)
+		return -ENOMEM;
+
+	nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+
+	/*
+	 * If the switch is in safe-mode the only accessible portion of
+	 * the NVM is the non-active one where userspace is expected to
+	 * write new functional NVM.
+	 */
+	if (!sw->safe_mode) {
+		u32 nvm_size, hdr_size;
+
+		ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
+					  sizeof(val));
+		if (ret)
+			goto err_ida;
+
+		hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
+		nvm_size = (SZ_1M << (val & 7)) / 8;
+		nvm_size = (nvm_size - hdr_size) / 2;
+
+		ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
+					  sizeof(val));
+		if (ret)
+			goto err_ida;
+
+		nvm->major = val >> 16;
+		nvm->minor = val >> 8;
+
+		nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
+		if (IS_ERR(nvm_dev)) {
+			ret = PTR_ERR(nvm_dev);
+			goto err_ida;
+		}
+		nvm->active = nvm_dev;
+	}
+
+	nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
+	if (IS_ERR(nvm_dev)) {
+		ret = PTR_ERR(nvm_dev);
+		goto err_nvm_active;
+	}
+	nvm->non_active = nvm_dev;
+
+	mutex_lock(&switch_lock);
+	sw->nvm = nvm;
+	mutex_unlock(&switch_lock);
+
+	return 0;
+
+err_nvm_active:
+	if (nvm->active)
+		nvmem_unregister(nvm->active);
+err_ida:
+	ida_simple_remove(&nvm_ida, nvm->id);
+	kfree(nvm);
+
+	return ret;
+}
+
+static void tb_switch_nvm_remove(struct tb_switch *sw)
+{
+	struct tb_switch_nvm *nvm;
+
+	mutex_lock(&switch_lock);
+	nvm = sw->nvm;
+	sw->nvm = NULL;
+	mutex_unlock(&switch_lock);
+
+	if (!nvm)
+		return;
+
+	/* Remove authentication status in case the switch is unplugged */
+	if (!nvm->authenticating)
+		nvm_clear_auth_status(sw);
+
+	nvmem_unregister(nvm->non_active);
+	if (nvm->active)
+		nvmem_unregister(nvm->active);
+	ida_simple_remove(&nvm_ida, nvm->id);
+	vfree(nvm->buf);
+	kfree(nvm);
+}
+
 /* port utility functions */
 
 static const char *tb_port_type(struct tb_regs_port_header *port)
@@ -192,7 +577,7 @@ static int tb_init_port(struct tb_port *port)
 
 	/* Port 0 is the switch itself and has no PHY. */
 	if (port->config.type == TB_TYPE_PORT && port->port != 0) {
-		cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PHY);
+		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
 
 		if (cap > 0)
 			port->cap_phy = cap;
@@ -281,6 +666,9 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
 	u32 data;
 	int res;
 
+	if (!sw->config.enabled)
+		return 0;
+
 	sw->config.plug_events_delay = 0xff;
 	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
 	if (res)
@@ -307,36 +695,361 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
 			   sw->cap_plug_events + 1, 1);
 }
 
-
-/**
- * tb_switch_free() - free a tb_switch and all downstream switches
- */
-void tb_switch_free(struct tb_switch *sw)
+static ssize_t authorized_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
 {
-	int i;
-	/* port 0 is the switch itself and never has a remote */
-	for (i = 1; i <= sw->config.max_port_number; i++) {
-		if (tb_is_upstream_port(&sw->ports[i]))
-			continue;
-		if (sw->ports[i].remote)
-			tb_switch_free(sw->ports[i].remote->sw);
-		sw->ports[i].remote = NULL;
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%u\n", sw->authorized);
+}
+
+static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
+{
+	int ret = -EINVAL;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	if (sw->authorized)
+		goto unlock;
+
+	switch (val) {
+	/* Approve switch */
+	case 1:
+		if (sw->key)
+			ret = tb_domain_approve_switch_key(sw->tb, sw);
+		else
+			ret = tb_domain_approve_switch(sw->tb, sw);
+		break;
+
+	/* Challenge switch */
+	case 2:
+		if (sw->key)
+			ret = tb_domain_challenge_switch_key(sw->tb, sw);
+		break;
+
+	default:
+		break;
 	}
 
-	if (!sw->is_unplugged)
-		tb_plug_events_active(sw, false);
+	if (!ret) {
+		sw->authorized = val;
+		/* Notify status change to the userspace */
+		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+	}
 
+unlock:
+	mutex_unlock(&switch_lock);
+	return ret;
+}
+
+static ssize_t authorized_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	unsigned int val;
+	ssize_t ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret)
+		return ret;
+	if (val > 2)
+		return -EINVAL;
+
+	ret = tb_switch_set_authorized(sw, val);
+
+	return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(authorized);
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%#x\n", sw->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t
+device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
+}
+static DEVICE_ATTR_RO(device_name);
+
+static ssize_t key_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	ssize_t ret;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	if (sw->key)
+		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
+	else
+		ret = sprintf(buf, "\n");
+
+	mutex_unlock(&switch_lock);
+	return ret;
+}
+
+static ssize_t key_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	u8 key[TB_SWITCH_KEY_SIZE];
+	ssize_t ret = count;
+
+	if (count < 64)
+		return -EINVAL;
+
+	if (hex2bin(key, buf, sizeof(key)))
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	if (sw->authorized) {
+		ret = -EBUSY;
+	} else {
+		kfree(sw->key);
+		sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
+		if (!sw->key)
+			ret = -ENOMEM;
+	}
+
+	mutex_unlock(&switch_lock);
+	return ret;
+}
+static DEVICE_ATTR_RW(key);
+
+static ssize_t nvm_authenticate_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	u32 status;
+
+	nvm_get_auth_status(sw, &status);
+	return sprintf(buf, "%#x\n", status);
+}
+
+static ssize_t nvm_authenticate_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	bool val;
+	int ret;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	/* If NVMem devices are not yet added */
+	if (!sw->nvm) {
+		ret = -EAGAIN;
+		goto exit_unlock;
+	}
+
+	ret = kstrtobool(buf, &val);
+	if (ret)
+		goto exit_unlock;
+
+	/* Always clear the authentication status */
+	nvm_clear_auth_status(sw);
+
+	if (val) {
+		ret = nvm_validate_and_write(sw);
+		if (ret)
+			goto exit_unlock;
+
+		sw->nvm->authenticating = true;
+
+		if (!tb_route(sw))
+			ret = nvm_authenticate_host(sw);
+		else
+			ret = nvm_authenticate_device(sw);
+	}
+
+exit_unlock:
+	mutex_unlock(&switch_lock);
+
+	if (ret)
+		return ret;
+	return count;
+}
+static DEVICE_ATTR_RW(nvm_authenticate);
+
+static ssize_t nvm_version_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	int ret;
+
+	if (mutex_lock_interruptible(&switch_lock))
+		return -ERESTARTSYS;
+
+	if (sw->safe_mode)
+		ret = -ENODATA;
+	else if (!sw->nvm)
+		ret = -EAGAIN;
+	else
+		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
+
+	mutex_unlock(&switch_lock);
+
+	return ret;
+}
+static DEVICE_ATTR_RO(nvm_version);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%#x\n", sw->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t
+vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
+}
+static DEVICE_ATTR_RO(vendor_name);
+
+static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	return sprintf(buf, "%pUb\n", sw->uuid);
+}
+static DEVICE_ATTR_RO(unique_id);
+
+static struct attribute *switch_attrs[] = {
+	&dev_attr_authorized.attr,
+	&dev_attr_device.attr,
+	&dev_attr_device_name.attr,
+	&dev_attr_key.attr,
+	&dev_attr_nvm_authenticate.attr,
+	&dev_attr_nvm_version.attr,
+	&dev_attr_vendor.attr,
+	&dev_attr_vendor_name.attr,
+	&dev_attr_unique_id.attr,
+	NULL,
+};
+
+static umode_t switch_attr_is_visible(struct kobject *kobj,
+				      struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	if (attr == &dev_attr_key.attr) {
+		if (tb_route(sw) &&
+		    sw->tb->security_level == TB_SECURITY_SECURE &&
+		    sw->security_level == TB_SECURITY_SECURE)
+			return attr->mode;
+		return 0;
+	} else if (attr == &dev_attr_nvm_authenticate.attr ||
+		   attr == &dev_attr_nvm_version.attr) {
+		if (sw->dma_port)
+			return attr->mode;
+		return 0;
+	}
+
+	return sw->safe_mode ? 0 : attr->mode;
+}
+
+static struct attribute_group switch_group = {
+	.is_visible = switch_attr_is_visible,
+	.attrs = switch_attrs,
+};
+
+static const struct attribute_group *switch_groups[] = {
+	&switch_group,
+	NULL,
+};
+
+static void tb_switch_release(struct device *dev)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+
+	dma_port_free(sw->dma_port);
+
+	kfree(sw->uuid);
+	kfree(sw->device_name);
+	kfree(sw->vendor_name);
 	kfree(sw->ports);
 	kfree(sw->drom);
+	kfree(sw->key);
 	kfree(sw);
 }
 
+struct device_type tb_switch_type = {
+	.name = "thunderbolt_device",
+	.release = tb_switch_release,
+};
+
+static int tb_switch_get_generation(struct tb_switch *sw)
+{
+	switch (sw->config.device_id) {
+	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+	case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
+	case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
+	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+	case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
+	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
+		return 1;
+
+	case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+		return 2;
+
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+		return 3;
+
+	default:
+		/*
+		 * For unknown switches assume generation to be 1 to be
+		 * on the safe side.
+		 */
+		tb_sw_warn(sw, "unsupported switch device id %#x\n",
+			   sw->config.device_id);
+		return 1;
+	}
+}
+
 /**
- * tb_switch_alloc() - allocate and initialize a switch
+ * tb_switch_alloc() - allocate a switch
+ * @tb: Pointer to the owning domain
+ * @parent: Parent device for this switch
+ * @route: Route string for this switch
  *
- * Return: Returns a NULL on failure.
+ * Allocates and initializes a switch. Will not upload configuration to
+ * the switch. For that you need to call tb_switch_configure()
+ * separately. The returned switch should be released by calling
+ * tb_switch_put().
+ *
+ * Return: Pointer to the allocated switch or %NULL in case of failure
  */
-struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
+struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
+				  u64 route)
 {
 	int i;
 	int cap;
@@ -351,11 +1064,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
 
 	sw->tb = tb;
 	if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
-		goto err;
-	tb_info(tb,
-		"initializing Switch at %#llx (depth: %d, up port: %d)\n",
-		route, tb_route_length(route), upstream_port);
-	tb_info(tb, "old switch config:\n");
+		goto err_free_sw_ports;
+
+	tb_info(tb, "current switch config:\n");
 	tb_dump_switch(tb, &sw->config);
 
 	/* configure switch */
@@ -363,30 +1074,13 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
 	sw->config.depth = tb_route_length(route);
 	sw->config.route_lo = route;
 	sw->config.route_hi = route >> 32;
-	sw->config.enabled = 1;
-	/* from here on we may use the tb_sw_* functions & macros */
-
-	if (sw->config.vendor_id != 0x8086)
-		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
-			   sw->config.vendor_id);
-
-	if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
-	    sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
-	    sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE &&
-	    sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE &&
-	    sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE)
-		tb_sw_warn(sw, "unsupported switch device id %#x\n",
-			   sw->config.device_id);
-
-	/* upload configuration */
-	if (tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3))
-		goto err;
+	sw->config.enabled = 0;
 
 	/* initialize ports */
 	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
 				GFP_KERNEL);
 	if (!sw->ports)
-		goto err;
+		goto err_free_sw_ports;
 
 	for (i = 0; i <= sw->config.max_port_number; i++) {
 		/* minimum setup for tb_find_cap and tb_drom_read to work */
@@ -394,41 +1088,286 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
 		sw->ports[i].port = i;
 	}
 
-	cap = tb_find_cap(&sw->ports[0], TB_CFG_SWITCH, TB_CAP_PLUG_EVENTS);
+	sw->generation = tb_switch_get_generation(sw);
+
+	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
 	if (cap < 0) {
-		tb_sw_warn(sw, "cannot find TB_CAP_PLUG_EVENTS aborting\n");
-		goto err;
+		tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
+		goto err_free_sw_ports;
 	}
 	sw->cap_plug_events = cap;
 
-	/* read drom */
-	if (tb_drom_read(sw))
-		tb_sw_warn(sw, "tb_eeprom_read_rom failed, continuing\n");
-	tb_sw_info(sw, "uid: %#llx\n", sw->uid);
+	/* Root switch is always authorized */
+	if (!route)
+		sw->authorized = true;
 
-	for (i = 0; i <= sw->config.max_port_number; i++) {
-		if (sw->ports[i].disabled) {
-			tb_port_info(&sw->ports[i], "disabled by eeprom\n");
-			continue;
-		}
-		if (tb_init_port(&sw->ports[i]))
-			goto err;
-	}
-
-	/* TODO: I2C, IECS, link controller */
-
-	if (tb_plug_events_active(sw, true))
-		goto err;
+	device_initialize(&sw->dev);
+	sw->dev.parent = parent;
+	sw->dev.bus = &tb_bus_type;
+	sw->dev.type = &tb_switch_type;
+	sw->dev.groups = switch_groups;
+	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
 
 	return sw;
-err:
+
+err_free_sw_ports:
 	kfree(sw->ports);
-	kfree(sw->drom);
 	kfree(sw);
+
 	return NULL;
 }
 
 /**
+ * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
+ * @tb: Pointer to the owning domain
+ * @parent: Parent device for this switch
+ * @route: Route string for this switch
+ *
+ * This creates a switch in safe mode. This means the switch pretty much
+ * lacks all capabilities except DMA configuration port before it is
+ * flashed with a valid NVM firmware.
+ *
+ * The returned switch must be released by calling tb_switch_put().
+ *
+ * Return: Pointer to the allocated switch or %NULL in case of failure
+ */
+struct tb_switch *
+tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
+{
+	struct tb_switch *sw;
+
+	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+	if (!sw)
+		return NULL;
+
+	sw->tb = tb;
+	sw->config.depth = tb_route_length(route);
+	sw->config.route_hi = upper_32_bits(route);
+	sw->config.route_lo = lower_32_bits(route);
+	sw->safe_mode = true;
+
+	device_initialize(&sw->dev);
+	sw->dev.parent = parent;
+	sw->dev.bus = &tb_bus_type;
+	sw->dev.type = &tb_switch_type;
+	sw->dev.groups = switch_groups;
+	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
+
+	return sw;
+}
+
+/**
+ * tb_switch_configure() - Uploads configuration to the switch
+ * @sw: Switch to configure
+ *
+ * Call this function before the switch is added to the system. It will
+ * upload configuration to the switch and makes it available for the
+ * connection manager to use.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_switch_configure(struct tb_switch *sw)
+{
+	struct tb *tb = sw->tb;
+	u64 route;
+	int ret;
+
+	route = tb_route(sw);
+	tb_info(tb,
+		"initializing Switch at %#llx (depth: %d, up port: %d)\n",
+		route, tb_route_length(route), sw->config.upstream_port_number);
+
+	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
+		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+			   sw->config.vendor_id);
+
+	sw->config.enabled = 1;
+
+	/* upload configuration */
+	ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
+	if (ret)
+		return ret;
+
+	return tb_plug_events_active(sw, true);
+}
+
+static void tb_switch_set_uuid(struct tb_switch *sw)
+{
+	u32 uuid[4];
+	int cap;
+
+	if (sw->uuid)
+		return;
+
+	/*
+	 * The newer controllers include fused UUID as part of link
+	 * controller specific registers
+	 */
+	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
+	if (cap > 0) {
+		tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
+	} else {
+		/*
+		 * ICM generates UUID based on UID and fills the upper
+		 * two words with ones. This is not strictly following
+		 * UUID format but we want to be compatible with it so
+		 * we do the same here.
+		 */
+		uuid[0] = sw->uid & 0xffffffff;
+		uuid[1] = (sw->uid >> 32) & 0xffffffff;
+		uuid[2] = 0xffffffff;
+		uuid[3] = 0xffffffff;
+	}
+
+	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+}
+
+static int tb_switch_add_dma_port(struct tb_switch *sw)
+{
+	u32 status;
+	int ret;
+
+	switch (sw->generation) {
+	case 3:
+		break;
+
+	case 2:
+		/* Only root switch can be upgraded */
+		if (tb_route(sw))
+			return 0;
+		break;
+
+	default:
+		/*
+		 * DMA port is the only thing available when the switch
+		 * is in safe mode.
+		 */
+		if (!sw->safe_mode)
+			return 0;
+		break;
+	}
+
+	if (sw->no_nvm_upgrade)
+		return 0;
+
+	sw->dma_port = dma_port_alloc(sw);
+	if (!sw->dma_port)
+		return 0;
+
+	/*
+	 * Check status of the previous flash authentication. If there
+	 * is one we need to power cycle the switch in any case to make
+	 * it functional again.
+	 */
+	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+	if (ret <= 0)
+		return ret;
+
+	if (status) {
+		tb_sw_info(sw, "switch flash authentication failed\n");
+		tb_switch_set_uuid(sw);
+		nvm_set_auth_status(sw, status);
+	}
+
+	tb_sw_info(sw, "power cycling the switch now\n");
+	dma_port_power_cycle(sw->dma_port);
+
+	/*
+	 * We return error here which causes the switch adding failure.
+	 * It should appear back after power cycle is complete.
+	 */
+	return -ESHUTDOWN;
+}
+
+/**
+ * tb_switch_add() - Add a switch to the domain
+ * @sw: Switch to add
+ *
+ * This is the last step in adding switch to the domain. It will read
+ * identification information from DROM and initializes ports so that
+ * they can be used to connect other switches. The switch will be
+ * exposed to the userspace when this function successfully returns. To
+ * remove and release the switch, call tb_switch_remove().
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_switch_add(struct tb_switch *sw)
+{
+	int i, ret;
+
+	/*
+	 * Initialize DMA control port now before we read DROM. Recent
+	 * host controllers have more complete DROM on NVM that includes
+	 * vendor and model identification strings which we then expose
+	 * to the userspace. NVM can be accessed through DMA
+	 * configuration based mailbox.
+	 */
+	ret = tb_switch_add_dma_port(sw);
+	if (ret)
+		return ret;
+
+	if (!sw->safe_mode) {
+		/* read drom */
+		ret = tb_drom_read(sw);
+		if (ret) {
+			tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+			return ret;
+		}
+		tb_sw_info(sw, "uid: %#llx\n", sw->uid);
+
+		tb_switch_set_uuid(sw);
+
+		for (i = 0; i <= sw->config.max_port_number; i++) {
+			if (sw->ports[i].disabled) {
+				tb_port_info(&sw->ports[i], "disabled by eeprom\n");
+				continue;
+			}
+			ret = tb_init_port(&sw->ports[i]);
+			if (ret)
+				return ret;
+		}
+	}
+
+	ret = device_add(&sw->dev);
+	if (ret)
+		return ret;
+
+	ret = tb_switch_nvm_add(sw);
+	if (ret)
+		device_del(&sw->dev);
+
+	return ret;
+}
+
+/**
+ * tb_switch_remove() - Remove and release a switch
+ * @sw: Switch to remove
+ *
+ * This will remove the switch from the domain and release it after last
+ * reference count drops to zero. If there are switches connected below
+ * this switch, they will be removed as well.
+ */
+void tb_switch_remove(struct tb_switch *sw)
+{
+	int i;
+
+	/* port 0 is the switch itself and never has a remote */
+	for (i = 1; i <= sw->config.max_port_number; i++) {
+		if (tb_is_upstream_port(&sw->ports[i]))
+			continue;
+		if (sw->ports[i].remote)
+			tb_switch_remove(sw->ports[i].remote->sw);
+		sw->ports[i].remote = NULL;
+	}
+
+	if (!sw->is_unplugged)
+		tb_plug_events_active(sw, false);
+
+	tb_switch_nvm_remove(sw);
+	device_unregister(&sw->dev);
+}
+
+/**
  * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
  */
 void tb_sw_set_unplugged(struct tb_switch *sw)
@@ -452,19 +1391,26 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
 int tb_switch_resume(struct tb_switch *sw)
 {
 	int i, err;
-	u64 uid;
 	tb_sw_info(sw, "resuming switch\n");
 
-	err = tb_drom_read_uid_only(sw, &uid);
-	if (err) {
-		tb_sw_warn(sw, "uid read failed\n");
-		return err;
-	}
-	if (sw != sw->tb->root_switch && sw->uid != uid) {
-		tb_sw_info(sw,
-			"changed while suspended (uid %#llx -> %#llx)\n",
-			sw->uid, uid);
-		return -ENODEV;
+	/*
+	 * Check for UID of the connected switches except for root
+	 * switch which we assume cannot be removed.
+	 */
+	if (tb_route(sw)) {
+		u64 uid;
+
+		err = tb_drom_read_uid_only(sw, &uid);
+		if (err) {
+			tb_sw_warn(sw, "uid read failed\n");
+			return err;
+		}
+		if (sw->uid != uid) {
+			tb_sw_info(sw,
+				"changed while suspended (uid %#llx -> %#llx)\n",
+				sw->uid, uid);
+			return -ENODEV;
+		}
 	}
 
 	/* upload configuration */
@@ -509,3 +1455,85 @@ void tb_switch_suspend(struct tb_switch *sw)
 	 * effect?
 	 */
 }
+
+struct tb_sw_lookup {
+	struct tb *tb;
+	u8 link;
+	u8 depth;
+	const uuid_be *uuid;
+};
+
+static int tb_switch_match(struct device *dev, void *data)
+{
+	struct tb_switch *sw = tb_to_switch(dev);
+	struct tb_sw_lookup *lookup = data;
+
+	if (!sw)
+		return 0;
+	if (sw->tb != lookup->tb)
+		return 0;
+
+	if (lookup->uuid)
+		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
+
+	/* Root switch is matched only by depth */
+	if (!lookup->depth)
+		return !sw->depth;
+
+	return sw->link == lookup->link && sw->depth == lookup->depth;
+}
+
+/**
+ * tb_switch_find_by_link_depth() - Find switch by link and depth
+ * @tb: Domain the switch belongs
+ * @link: Link number the switch is connected
+ * @depth: Depth of the switch in link
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
+{
+	struct tb_sw_lookup lookup;
+	struct device *dev;
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.tb = tb;
+	lookup.link = link;
+	lookup.depth = depth;
+
+	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+	if (dev)
+		return tb_to_switch(dev);
+
+	return NULL;
+}
+
+/**
+ * tb_switch_find_by_link_depth() - Find switch by UUID
+ * @tb: Domain the switch belongs
+ * @uuid: UUID to look for
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
+{
+	struct tb_sw_lookup lookup;
+	struct device *dev;
+
+	memset(&lookup, 0, sizeof(lookup));
+	lookup.tb = tb;
+	lookup.uuid = uuid;
+
+	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+	if (dev)
+		return tb_to_switch(dev);
+
+	return NULL;
+}
+
+void tb_switch_exit(void)
+{
+	ida_destroy(&nvm_ida);
+}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 24b6d30..1b02ca0 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -7,11 +7,24 @@
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 
 #include "tb.h"
 #include "tb_regs.h"
 #include "tunnel_pci.h"
 
+/**
+ * struct tb_cm - Simple Thunderbolt connection manager
+ * @tunnel_list: List of active tunnels
+ * @hotplug_active: tb_handle_hotplug will stop progressing plug
+ *		    events and exit if this is not set (it needs to
+ *		    acquire the lock one more time). Used to drain wq
+ *		    after cfg has been paused.
+ */
+struct tb_cm {
+	struct list_head tunnel_list;
+	bool hotplug_active;
+};
 
 /* enumeration & hot plug handling */
 
@@ -49,9 +62,23 @@ static void tb_scan_port(struct tb_port *port)
 		tb_port_WARN(port, "port already has a remote!\n");
 		return;
 	}
-	sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port));
+	sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
+			     tb_downstream_route(port));
 	if (!sw)
 		return;
+
+	if (tb_switch_configure(sw)) {
+		tb_switch_put(sw);
+		return;
+	}
+
+	sw->authorized = true;
+
+	if (tb_switch_add(sw)) {
+		tb_switch_put(sw);
+		return;
+	}
+
 	port->remote = tb_upstream_port(sw);
 	tb_upstream_port(sw)->remote = port;
 	tb_scan_switch(sw);
@@ -62,12 +89,14 @@ static void tb_scan_port(struct tb_port *port)
  */
 static void tb_free_invalid_tunnels(struct tb *tb)
 {
+	struct tb_cm *tcm = tb_priv(tb);
 	struct tb_pci_tunnel *tunnel;
 	struct tb_pci_tunnel *n;
-	list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
-	{
+
+	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
 		if (tb_pci_is_invalid(tunnel)) {
 			tb_pci_deactivate(tunnel);
+			list_del(&tunnel->list);
 			tb_pci_free(tunnel);
 		}
 	}
@@ -86,7 +115,7 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
 		if (!port->remote)
 			continue;
 		if (port->remote->sw->is_unplugged) {
-			tb_switch_free(port->remote->sw);
+			tb_switch_remove(port->remote->sw);
 			port->remote = NULL;
 		} else {
 			tb_free_unplugged_children(port->remote->sw);
@@ -121,8 +150,8 @@ static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
 			continue;
 		if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
 			continue;
-		cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE);
-		if (cap <= 0)
+		cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
+		if (cap < 0)
 			continue;
 		res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
 		if (res < 0)
@@ -149,6 +178,8 @@ static void tb_activate_pcie_devices(struct tb *tb)
 	struct tb_port *up_port;
 	struct tb_port *down_port;
 	struct tb_pci_tunnel *tunnel;
+	struct tb_cm *tcm = tb_priv(tb);
+
 	/* scan for pcie devices at depth 1*/
 	for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
 		if (tb_is_upstream_port(&tb->root_switch->ports[i]))
@@ -165,8 +196,8 @@ static void tb_activate_pcie_devices(struct tb *tb)
 		}
 
 		/* check whether port is already activated */
-		cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE);
-		if (cap <= 0)
+		cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
+		if (cap < 0)
 			continue;
 		if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
 			continue;
@@ -195,6 +226,7 @@ static void tb_activate_pcie_devices(struct tb *tb)
 			tb_pci_free(tunnel);
 		}
 
+		list_add(&tunnel->list, &tcm->tunnel_list);
 	}
 }
 
@@ -217,10 +249,11 @@ static void tb_handle_hotplug(struct work_struct *work)
 {
 	struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
 	struct tb *tb = ev->tb;
+	struct tb_cm *tcm = tb_priv(tb);
 	struct tb_switch *sw;
 	struct tb_port *port;
 	mutex_lock(&tb->lock);
-	if (!tb->hotplug_active)
+	if (!tcm->hotplug_active)
 		goto out; /* during init, suspend or shutdown */
 
 	sw = get_switch_at_route(tb->root_switch, ev->route);
@@ -248,7 +281,7 @@ static void tb_handle_hotplug(struct work_struct *work)
 			tb_port_info(port, "unplugged\n");
 			tb_sw_set_unplugged(port->remote->sw);
 			tb_free_invalid_tunnels(tb);
-			tb_switch_free(port->remote->sw);
+			tb_switch_remove(port->remote->sw);
 			port->remote = NULL;
 		} else {
 			tb_port_info(port,
@@ -281,137 +314,108 @@ static void tb_handle_hotplug(struct work_struct *work)
  *
  * Delegates to tb_handle_hotplug.
  */
-static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
-					bool unplug)
+static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
+			    const void *buf, size_t size)
 {
-	struct tb *tb = data;
-	struct tb_hotplug_event *ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+	const struct cfg_event_pkg *pkg = buf;
+	struct tb_hotplug_event *ev;
+	u64 route;
+
+	if (type != TB_CFG_PKG_EVENT) {
+		tb_warn(tb, "unexpected event %#x, ignoring\n", type);
+		return;
+	}
+
+	route = tb_cfg_get_route(&pkg->header);
+
+	if (tb_cfg_error(tb->ctl, route, pkg->port,
+			 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
+		tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
+			pkg->port);
+	}
+
+	ev = kmalloc(sizeof(*ev), GFP_KERNEL);
 	if (!ev)
 		return;
 	INIT_WORK(&ev->work, tb_handle_hotplug);
 	ev->tb = tb;
 	ev->route = route;
-	ev->port = port;
-	ev->unplug = unplug;
+	ev->port = pkg->port;
+	ev->unplug = pkg->unplug;
 	queue_work(tb->wq, &ev->work);
 }
 
-/**
- * thunderbolt_shutdown_and_free() - shutdown everything
- *
- * Free all switches and the config channel.
- *
- * Used in the error path of thunderbolt_alloc_and_start.
- */
-void thunderbolt_shutdown_and_free(struct tb *tb)
+static void tb_stop(struct tb *tb)
 {
+	struct tb_cm *tcm = tb_priv(tb);
 	struct tb_pci_tunnel *tunnel;
 	struct tb_pci_tunnel *n;
 
-	mutex_lock(&tb->lock);
-
 	/* tunnels are only present after everything has been initialized */
-	list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) {
+	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
 		tb_pci_deactivate(tunnel);
 		tb_pci_free(tunnel);
 	}
-
-	if (tb->root_switch)
-		tb_switch_free(tb->root_switch);
-	tb->root_switch = NULL;
-
-	if (tb->ctl) {
-		tb_ctl_stop(tb->ctl);
-		tb_ctl_free(tb->ctl);
-	}
-	tb->ctl = NULL;
-	tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
-
-	/* allow tb_handle_hotplug to acquire the lock */
-	mutex_unlock(&tb->lock);
-	if (tb->wq) {
-		flush_workqueue(tb->wq);
-		destroy_workqueue(tb->wq);
-		tb->wq = NULL;
-	}
-	mutex_destroy(&tb->lock);
-	kfree(tb);
+	tb_switch_remove(tb->root_switch);
+	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
 }
 
-/**
- * thunderbolt_alloc_and_start() - setup the thunderbolt bus
- *
- * Allocates a tb_cfg control channel, initializes the root switch, enables
- * plug events and activates pci devices.
- *
- * Return: Returns NULL on error.
- */
-struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
+static int tb_start(struct tb *tb)
 {
-	struct tb *tb;
+	struct tb_cm *tcm = tb_priv(tb);
+	int ret;
 
-	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
-	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
-	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
-
-	tb = kzalloc(sizeof(*tb), GFP_KERNEL);
-	if (!tb)
-		return NULL;
-
-	tb->nhi = nhi;
-	mutex_init(&tb->lock);
-	mutex_lock(&tb->lock);
-	INIT_LIST_HEAD(&tb->tunnel_list);
-
-	tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
-	if (!tb->wq)
-		goto err_locked;
-
-	tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb);
-	if (!tb->ctl)
-		goto err_locked;
-	/*
-	 * tb_schedule_hotplug_handler may be called as soon as the config
-	 * channel is started. Thats why we have to hold the lock here.
-	 */
-	tb_ctl_start(tb->ctl);
-
-	tb->root_switch = tb_switch_alloc(tb, 0);
+	tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
 	if (!tb->root_switch)
-		goto err_locked;
+		return -ENOMEM;
+
+	/*
+	 * ICM firmware upgrade needs running firmware and in native
+	 * mode that is not available so disable firmware upgrade of the
+	 * root switch.
+	 */
+	tb->root_switch->no_nvm_upgrade = true;
+
+	ret = tb_switch_configure(tb->root_switch);
+	if (ret) {
+		tb_switch_put(tb->root_switch);
+		return ret;
+	}
+
+	/* Announce the switch to the world */
+	ret = tb_switch_add(tb->root_switch);
+	if (ret) {
+		tb_switch_put(tb->root_switch);
+		return ret;
+	}
 
 	/* Full scan to discover devices added before the driver was loaded. */
 	tb_scan_switch(tb->root_switch);
 	tb_activate_pcie_devices(tb);
 
 	/* Allow tb_handle_hotplug to progress events */
-	tb->hotplug_active = true;
-	mutex_unlock(&tb->lock);
-	return tb;
-
-err_locked:
-	mutex_unlock(&tb->lock);
-	thunderbolt_shutdown_and_free(tb);
-	return NULL;
+	tcm->hotplug_active = true;
+	return 0;
 }
 
-void thunderbolt_suspend(struct tb *tb)
+static int tb_suspend_noirq(struct tb *tb)
 {
+	struct tb_cm *tcm = tb_priv(tb);
+
 	tb_info(tb, "suspending...\n");
-	mutex_lock(&tb->lock);
 	tb_switch_suspend(tb->root_switch);
-	tb_ctl_stop(tb->ctl);
-	tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
-	mutex_unlock(&tb->lock);
+	tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
 	tb_info(tb, "suspend finished\n");
+
+	return 0;
 }
 
-void thunderbolt_resume(struct tb *tb)
+static int tb_resume_noirq(struct tb *tb)
 {
+	struct tb_cm *tcm = tb_priv(tb);
 	struct tb_pci_tunnel *tunnel, *n;
+
 	tb_info(tb, "resuming...\n");
-	mutex_lock(&tb->lock);
-	tb_ctl_start(tb->ctl);
 
 	/* remove any pci devices the firmware might have setup */
 	tb_switch_reset(tb, 0);
@@ -419,9 +423,9 @@ void thunderbolt_resume(struct tb *tb)
 	tb_switch_resume(tb->root_switch);
 	tb_free_invalid_tunnels(tb);
 	tb_free_unplugged_children(tb->root_switch);
-	list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
+	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
 		tb_pci_restart(tunnel);
-	if (!list_empty(&tb->tunnel_list)) {
+	if (!list_empty(&tcm->tunnel_list)) {
 		/*
 		 * the pcie links need some time to get going.
 		 * 100ms works for me...
@@ -430,7 +434,37 @@ void thunderbolt_resume(struct tb *tb)
 		msleep(100);
 	}
 	 /* Allow tb_handle_hotplug to progress events */
-	tb->hotplug_active = true;
-	mutex_unlock(&tb->lock);
+	tcm->hotplug_active = true;
 	tb_info(tb, "resume finished\n");
+
+	return 0;
+}
+
+static const struct tb_cm_ops tb_cm_ops = {
+	.start = tb_start,
+	.stop = tb_stop,
+	.suspend_noirq = tb_suspend_noirq,
+	.resume_noirq = tb_resume_noirq,
+	.handle_event = tb_handle_event,
+};
+
+struct tb *tb_probe(struct tb_nhi *nhi)
+{
+	struct tb_cm *tcm;
+	struct tb *tb;
+
+	if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+		return NULL;
+
+	tb = tb_domain_alloc(nhi, sizeof(*tcm));
+	if (!tb)
+		return NULL;
+
+	tb->security_level = TB_SECURITY_NONE;
+	tb->cm_ops = &tb_cm_ops;
+
+	tcm = tb_priv(tb);
+	INIT_LIST_HEAD(&tcm->tunnel_list);
+
+	return tb;
 }
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 61d57ba..3d9f646 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -7,22 +7,120 @@
 #ifndef TB_H_
 #define TB_H_
 
+#include <linux/nvmem-provider.h>
 #include <linux/pci.h>
+#include <linux/uuid.h>
 
 #include "tb_regs.h"
 #include "ctl.h"
+#include "dma_port.h"
+
+/**
+ * struct tb_switch_nvm - Structure holding switch NVM information
+ * @major: Major version number of the active NVM portion
+ * @minor: Minor version number of the active NVM portion
+ * @id: Identifier used with both NVM portions
+ * @active: Active portion NVMem device
+ * @non_active: Non-active portion NVMem device
+ * @buf: Buffer where the NVM image is stored before it is written to
+ *	 the actual NVM flash device
+ * @buf_data_size: Number of bytes actually consumed by the new NVM
+ *		   image
+ * @authenticating: The switch is authenticating the new NVM
+ */
+struct tb_switch_nvm {
+	u8 major;
+	u8 minor;
+	int id;
+	struct nvmem_device *active;
+	struct nvmem_device *non_active;
+	void *buf;
+	size_t buf_data_size;
+	bool authenticating;
+};
+
+/**
+ * enum tb_security_level - Thunderbolt security level
+ * @TB_SECURITY_NONE: No security, legacy mode
+ * @TB_SECURITY_USER: User approval required at minimum
+ * @TB_SECURITY_SECURE: One time saved key required at minimum
+ * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
+ */
+enum tb_security_level {
+	TB_SECURITY_NONE,
+	TB_SECURITY_USER,
+	TB_SECURITY_SECURE,
+	TB_SECURITY_DPONLY,
+};
+
+#define TB_SWITCH_KEY_SIZE		32
+/* Each physical port contains 2 links on modern controllers */
+#define TB_SWITCH_LINKS_PER_PHY_PORT	2
 
 /**
  * struct tb_switch - a thunderbolt switch
+ * @dev: Device for the switch
+ * @config: Switch configuration
+ * @ports: Ports in this switch
+ * @dma_port: If the switch has port supporting DMA configuration based
+ *	      mailbox this will hold the pointer to that (%NULL
+ *	      otherwise). If set it also means the switch has
+ *	      upgradeable NVM.
+ * @tb: Pointer to the domain the switch belongs to
+ * @uid: Unique ID of the switch
+ * @uuid: UUID of the switch (or %NULL if not supported)
+ * @vendor: Vendor ID of the switch
+ * @device: Device ID of the switch
+ * @vendor_name: Name of the vendor (or %NULL if not known)
+ * @device_name: Name of the device (or %NULL if not known)
+ * @generation: Switch Thunderbolt generation
+ * @cap_plug_events: Offset to the plug events capability (%0 if not found)
+ * @is_unplugged: The switch is going away
+ * @drom: DROM of the switch (%NULL if not found)
+ * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
+ * @no_nvm_upgrade: Prevent NVM upgrade of this switch
+ * @safe_mode: The switch is in safe-mode
+ * @authorized: Whether the switch is authorized by user or policy
+ * @work: Work used to automatically authorize a switch
+ * @security_level: Switch supported security level
+ * @key: Contains the key used to challenge the device or %NULL if not
+ *	 supported. Size of the key is %TB_SWITCH_KEY_SIZE.
+ * @connection_id: Connection ID used with ICM messaging
+ * @connection_key: Connection key used with ICM messaging
+ * @link: Root switch link this switch is connected (ICM only)
+ * @depth: Depth in the chain this switch is connected (ICM only)
+ *
+ * When the switch is being added or removed to the domain (other
+ * switches) you need to have domain lock held. For switch authorization
+ * internal switch_lock is enough.
  */
 struct tb_switch {
+	struct device dev;
 	struct tb_regs_switch_header config;
 	struct tb_port *ports;
+	struct tb_dma_port *dma_port;
 	struct tb *tb;
 	u64 uid;
-	int cap_plug_events; /* offset, zero if not found */
-	bool is_unplugged; /* unplugged, will go away */
+	uuid_be *uuid;
+	u16 vendor;
+	u16 device;
+	const char *vendor_name;
+	const char *device_name;
+	unsigned int generation;
+	int cap_plug_events;
+	bool is_unplugged;
 	u8 *drom;
+	struct tb_switch_nvm *nvm;
+	bool no_nvm_upgrade;
+	bool safe_mode;
+	unsigned int authorized;
+	struct work_struct work;
+	enum tb_security_level security_level;
+	u8 *key;
+	u8 connection_id;
+	u8 connection_key;
+	u8 link;
+	u8 depth;
 };
 
 /**
@@ -92,29 +190,71 @@ struct tb_path {
 	int path_length; /* number of hops */
 };
 
+/**
+ * struct tb_cm_ops - Connection manager specific operations vector
+ * @driver_ready: Called right after control channel is started. Used by
+ *		  ICM to send driver ready message to the firmware.
+ * @start: Starts the domain
+ * @stop: Stops the domain
+ * @suspend_noirq: Connection manager specific suspend_noirq
+ * @resume_noirq: Connection manager specific resume_noirq
+ * @suspend: Connection manager specific suspend
+ * @complete: Connection manager specific complete
+ * @handle_event: Handle thunderbolt event
+ * @approve_switch: Approve switch
+ * @add_switch_key: Add key to switch
+ * @challenge_switch_key: Challenge switch using key
+ * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
+ */
+struct tb_cm_ops {
+	int (*driver_ready)(struct tb *tb);
+	int (*start)(struct tb *tb);
+	void (*stop)(struct tb *tb);
+	int (*suspend_noirq)(struct tb *tb);
+	int (*resume_noirq)(struct tb *tb);
+	int (*suspend)(struct tb *tb);
+	void (*complete)(struct tb *tb);
+	void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
+			     const void *buf, size_t size);
+	int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
+	int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
+	int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
+				    const u8 *challenge, u8 *response);
+	int (*disconnect_pcie_paths)(struct tb *tb);
+};
 
 /**
  * struct tb - main thunderbolt bus structure
+ * @dev: Domain device
+ * @lock: Big lock. Must be held when accessing any struct
+ *	  tb_switch / struct tb_port.
+ * @nhi: Pointer to the NHI structure
+ * @ctl: Control channel for this domain
+ * @wq: Ordered workqueue for all domain specific work
+ * @root_switch: Root switch of this domain
+ * @cm_ops: Connection manager specific operations vector
+ * @index: Linux assigned domain number
+ * @security_level: Current security level
+ * @privdata: Private connection manager specific data
  */
 struct tb {
-	struct mutex lock;	/*
-				 * Big lock. Must be held when accessing cfg or
-				 * any struct tb_switch / struct tb_port.
-				 */
+	struct device dev;
+	struct mutex lock;
 	struct tb_nhi *nhi;
 	struct tb_ctl *ctl;
-	struct workqueue_struct *wq; /* ordered workqueue for plug events */
+	struct workqueue_struct *wq;
 	struct tb_switch *root_switch;
-	struct list_head tunnel_list; /* list of active PCIe tunnels */
-	bool hotplug_active; /*
-			      * tb_handle_hotplug will stop progressing plug
-			      * events and exit if this is not set (it needs to
-			      * acquire the lock one more time). Used to drain
-			      * wq after cfg has been paused.
-			      */
-
+	const struct tb_cm_ops *cm_ops;
+	int index;
+	enum tb_security_level security_level;
+	unsigned long privdata[0];
 };
 
+static inline void *tb_priv(struct tb *tb)
+{
+	return (void *)tb->privdata;
+}
+
 /* helper functions & macros */
 
 /**
@@ -137,6 +277,16 @@ static inline u64 tb_route(struct tb_switch *sw)
 	return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
 }
 
+static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
+{
+	u8 port;
+
+	port = route >> (sw->config.depth * 8);
+	if (WARN_ON(port > sw->config.max_port_number))
+		return NULL;
+	return &sw->ports[port];
+}
+
 static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
 			     enum tb_cfg_space space, u32 offset, u32 length)
 {
@@ -173,7 +323,7 @@ static inline int tb_port_read(struct tb_port *port, void *buffer,
 			   length);
 }
 
-static inline int tb_port_write(struct tb_port *port, void *buffer,
+static inline int tb_port_write(struct tb_port *port, const void *buffer,
 				enum tb_cfg_space space, u32 offset, u32 length)
 {
 	return tb_cfg_write(port->sw->tb->ctl,
@@ -215,25 +365,78 @@ static inline int tb_port_write(struct tb_port *port, void *buffer,
 #define tb_port_info(port, fmt, arg...) \
 	__TB_PORT_PRINT(tb_info, port, fmt, ##arg)
 
+struct tb *icm_probe(struct tb_nhi *nhi);
+struct tb *tb_probe(struct tb_nhi *nhi);
 
-struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi);
-void thunderbolt_shutdown_and_free(struct tb *tb);
-void thunderbolt_suspend(struct tb *tb);
-void thunderbolt_resume(struct tb *tb);
+extern struct bus_type tb_bus_type;
+extern struct device_type tb_domain_type;
+extern struct device_type tb_switch_type;
 
-struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route);
-void tb_switch_free(struct tb_switch *sw);
+int tb_domain_init(void);
+void tb_domain_exit(void);
+void tb_switch_exit(void);
+
+struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
+int tb_domain_add(struct tb *tb);
+void tb_domain_remove(struct tb *tb);
+int tb_domain_suspend_noirq(struct tb *tb);
+int tb_domain_resume_noirq(struct tb *tb);
+int tb_domain_suspend(struct tb *tb);
+void tb_domain_complete(struct tb *tb);
+int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
+int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
+int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
+int tb_domain_disconnect_pcie_paths(struct tb *tb);
+
+static inline void tb_domain_put(struct tb *tb)
+{
+	put_device(&tb->dev);
+}
+
+struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
+				  u64 route);
+struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
+			struct device *parent, u64 route);
+int tb_switch_configure(struct tb_switch *sw);
+int tb_switch_add(struct tb_switch *sw);
+void tb_switch_remove(struct tb_switch *sw);
 void tb_switch_suspend(struct tb_switch *sw);
 int tb_switch_resume(struct tb_switch *sw);
 int tb_switch_reset(struct tb *tb, u64 route);
 void tb_sw_set_unplugged(struct tb_switch *sw);
 struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
+struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
+					       u8 depth);
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid);
+
+static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
+{
+	return (link - 1) / TB_SWITCH_LINKS_PER_PHY_PORT;
+}
+
+static inline void tb_switch_put(struct tb_switch *sw)
+{
+	put_device(&sw->dev);
+}
+
+static inline bool tb_is_switch(const struct device *dev)
+{
+	return dev->type == &tb_switch_type;
+}
+
+static inline struct tb_switch *tb_to_switch(struct device *dev)
+{
+	if (tb_is_switch(dev))
+		return container_of(dev, struct tb_switch, dev);
+	return NULL;
+}
 
 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
 int tb_port_add_nfc_credits(struct tb_port *port, int credits);
 int tb_port_clear_counter(struct tb_port *port, int counter);
 
-int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap);
+int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
 
 struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
 void tb_path_free(struct tb_path *path);
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
new file mode 100644
index 0000000..85b6d33
--- /dev/null
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -0,0 +1,260 @@
+/*
+ * Thunderbolt control channel messages
+ *
+ * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2017, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _TB_MSGS
+#define _TB_MSGS
+
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+enum tb_cfg_pkg_type {
+	TB_CFG_PKG_READ = 1,
+	TB_CFG_PKG_WRITE = 2,
+	TB_CFG_PKG_ERROR = 3,
+	TB_CFG_PKG_NOTIFY_ACK = 4,
+	TB_CFG_PKG_EVENT = 5,
+	TB_CFG_PKG_XDOMAIN_REQ = 6,
+	TB_CFG_PKG_XDOMAIN_RESP = 7,
+	TB_CFG_PKG_OVERRIDE = 8,
+	TB_CFG_PKG_RESET = 9,
+	TB_CFG_PKG_ICM_EVENT = 10,
+	TB_CFG_PKG_ICM_CMD = 11,
+	TB_CFG_PKG_ICM_RESP = 12,
+	TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
+
+};
+
+enum tb_cfg_space {
+	TB_CFG_HOPS = 0,
+	TB_CFG_PORT = 1,
+	TB_CFG_SWITCH = 2,
+	TB_CFG_COUNTERS = 3,
+};
+
+enum tb_cfg_error {
+	TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
+	TB_CFG_ERROR_LINK_ERROR = 1,
+	TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
+	TB_CFG_ERROR_NO_SUCH_PORT = 4,
+	TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
+	TB_CFG_ERROR_LOOP = 8,
+	TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
+	TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
+};
+
+/* common header */
+struct tb_cfg_header {
+	u32 route_hi:22;
+	u32 unknown:10; /* highest order bit is set on replies */
+	u32 route_lo;
+} __packed;
+
+/* additional header for read/write packets */
+struct tb_cfg_address {
+	u32 offset:13; /* in dwords */
+	u32 length:6; /* in dwords */
+	u32 port:6;
+	enum tb_cfg_space space:2;
+	u32 seq:2; /* sequence number  */
+	u32 zero:3;
+} __packed;
+
+/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
+struct cfg_read_pkg {
+	struct tb_cfg_header header;
+	struct tb_cfg_address addr;
+} __packed;
+
+/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
+struct cfg_write_pkg {
+	struct tb_cfg_header header;
+	struct tb_cfg_address addr;
+	u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
+} __packed;
+
+/* TB_CFG_PKG_ERROR */
+struct cfg_error_pkg {
+	struct tb_cfg_header header;
+	enum tb_cfg_error error:4;
+	u32 zero1:4;
+	u32 port:6;
+	u32 zero2:2; /* Both should be zero, still they are different fields. */
+	u32 zero3:16;
+} __packed;
+
+/* TB_CFG_PKG_EVENT */
+struct cfg_event_pkg {
+	struct tb_cfg_header header;
+	u32 port:6;
+	u32 zero:25;
+	bool unplug:1;
+} __packed;
+
+/* TB_CFG_PKG_RESET */
+struct cfg_reset_pkg {
+	struct tb_cfg_header header;
+} __packed;
+
+/* TB_CFG_PKG_PREPARE_TO_SLEEP */
+struct cfg_pts_pkg {
+	struct tb_cfg_header header;
+	u32 data;
+} __packed;
+
+/* ICM messages */
+
+enum icm_pkg_code {
+	ICM_GET_TOPOLOGY = 0x1,
+	ICM_DRIVER_READY = 0x3,
+	ICM_APPROVE_DEVICE = 0x4,
+	ICM_CHALLENGE_DEVICE = 0x5,
+	ICM_ADD_DEVICE_KEY = 0x6,
+	ICM_GET_ROUTE = 0xa,
+};
+
+enum icm_event_code {
+	ICM_EVENT_DEVICE_CONNECTED = 3,
+	ICM_EVENT_DEVICE_DISCONNECTED = 4,
+};
+
+struct icm_pkg_header {
+	u8 code;
+	u8 flags;
+	u8 packet_id;
+	u8 total_packets;
+} __packed;
+
+#define ICM_FLAGS_ERROR			BIT(0)
+#define ICM_FLAGS_NO_KEY		BIT(1)
+#define ICM_FLAGS_SLEVEL_SHIFT		3
+#define ICM_FLAGS_SLEVEL_MASK		GENMASK(4, 3)
+
+struct icm_pkg_driver_ready {
+	struct icm_pkg_header hdr;
+} __packed;
+
+struct icm_pkg_driver_ready_response {
+	struct icm_pkg_header hdr;
+	u8 romver;
+	u8 ramver;
+	u16 security_level;
+} __packed;
+
+/* Falcon Ridge & Alpine Ridge common messages */
+
+struct icm_fr_pkg_get_topology {
+	struct icm_pkg_header hdr;
+} __packed;
+
+#define ICM_GET_TOPOLOGY_PACKETS	14
+
+struct icm_fr_pkg_get_topology_response {
+	struct icm_pkg_header hdr;
+	u32 route_lo;
+	u32 route_hi;
+	u8 first_data;
+	u8 second_data;
+	u8 drom_i2c_address_index;
+	u8 switch_index;
+	u32 reserved[2];
+	u32 ports[16];
+	u32 port_hop_info[16];
+} __packed;
+
+#define ICM_SWITCH_USED			BIT(0)
+#define ICM_SWITCH_UPSTREAM_PORT_MASK	GENMASK(7, 1)
+#define ICM_SWITCH_UPSTREAM_PORT_SHIFT	1
+
+#define ICM_PORT_TYPE_MASK		GENMASK(23, 0)
+#define ICM_PORT_INDEX_SHIFT		24
+#define ICM_PORT_INDEX_MASK		GENMASK(31, 24)
+
+struct icm_fr_event_device_connected {
+	struct icm_pkg_header hdr;
+	uuid_be ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 link_info;
+	u32 ep_name[55];
+} __packed;
+
+#define ICM_LINK_INFO_LINK_MASK		0x7
+#define ICM_LINK_INFO_DEPTH_SHIFT	4
+#define ICM_LINK_INFO_DEPTH_MASK	GENMASK(7, 4)
+#define ICM_LINK_INFO_APPROVED		BIT(8)
+
+struct icm_fr_pkg_approve_device {
+	struct icm_pkg_header hdr;
+	uuid_be ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+} __packed;
+
+struct icm_fr_event_device_disconnected {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+} __packed;
+
+struct icm_fr_pkg_add_device_key {
+	struct icm_pkg_header hdr;
+	uuid_be ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+	u32 key[8];
+} __packed;
+
+struct icm_fr_pkg_add_device_key_response {
+	struct icm_pkg_header hdr;
+	uuid_be ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+} __packed;
+
+struct icm_fr_pkg_challenge_device {
+	struct icm_pkg_header hdr;
+	uuid_be ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+	u32 challenge[8];
+} __packed;
+
+struct icm_fr_pkg_challenge_device_response {
+	struct icm_pkg_header hdr;
+	uuid_be ep_uuid;
+	u8 connection_key;
+	u8 connection_id;
+	u16 reserved;
+	u32 challenge[8];
+	u32 response[8];
+} __packed;
+
+/* Alpine Ridge only messages */
+
+struct icm_ar_pkg_get_route {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+} __packed;
+
+struct icm_ar_pkg_get_route_response {
+	struct icm_pkg_header hdr;
+	u16 reserved;
+	u16 link_info;
+	u32 route_hi;
+	u32 route_lo;
+} __packed;
+
+#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 1e2a4a8..582bd1f 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -23,15 +23,22 @@
  */
 #define TB_MAX_CONFIG_RW_LENGTH 60
 
-enum tb_cap {
-	TB_CAP_PHY		= 0x0001,
-	TB_CAP_TIME1		= 0x0003,
-	TB_CAP_PCIE		= 0x0004,
-	TB_CAP_I2C		= 0x0005,
-	TB_CAP_PLUG_EVENTS	= 0x0105, /* also EEPROM */
-	TB_CAP_TIME2		= 0x0305,
-	TB_CAP_IECS		= 0x0405,
-	TB_CAP_LINK_CONTROLLER	= 0x0605, /* also IECS */
+enum tb_switch_cap {
+	TB_SWITCH_CAP_VSE		= 0x05,
+};
+
+enum tb_switch_vse_cap {
+	TB_VSE_CAP_PLUG_EVENTS		= 0x01, /* also EEPROM */
+	TB_VSE_CAP_TIME2		= 0x03,
+	TB_VSE_CAP_IECS			= 0x04,
+	TB_VSE_CAP_LINK_CONTROLLER	= 0x06, /* also IECS */
+};
+
+enum tb_port_cap {
+	TB_PORT_CAP_PHY			= 0x01,
+	TB_PORT_CAP_TIME1		= 0x03,
+	TB_PORT_CAP_ADAP		= 0x04,
+	TB_PORT_CAP_VSE			= 0x05,
 };
 
 enum tb_port_state {
@@ -49,15 +56,34 @@ struct tb_cap_basic {
 	u8 cap; /* if cap == 0x05 then we have a extended capability */
 } __packed;
 
+/**
+ * struct tb_cap_extended_short - Switch extended short capability
+ * @next: Pointer to the next capability. If @next and @length are zero
+ *	  then we have a long cap.
+ * @cap: Base capability ID (see &enum tb_switch_cap)
+ * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap)
+ * @length: Length of this capability
+ */
 struct tb_cap_extended_short {
-	u8 next; /* if next and length are zero then we have a long cap */
-	enum tb_cap cap:16;
+	u8 next;
+	u8 cap;
+	u8 vsec_id;
 	u8 length;
 } __packed;
 
+/**
+ * struct tb_cap_extended_long - Switch extended long capability
+ * @zero1: This field should be zero
+ * @cap: Base capability ID (see &enum tb_switch_cap)
+ * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap)
+ * @zero2: This field should be zero
+ * @next: Pointer to the next capability
+ * @length: Length of this capability
+ */
 struct tb_cap_extended_long {
 	u8 zero1;
-	enum tb_cap cap:16;
+	u8 cap;
+	u8 vsec_id;
 	u8 zero2;
 	u16 next;
 	u16 length;
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
index baf1cd3..ca44759 100644
--- a/drivers/thunderbolt/tunnel_pci.c
+++ b/drivers/thunderbolt/tunnel_pci.c
@@ -147,10 +147,10 @@ bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
 static int tb_pci_port_active(struct tb_port *port, bool active)
 {
 	u32 word = active ? 0x80000000 : 0x0;
-	int cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PCIE);
-	if (cap <= 0) {
-		tb_port_warn(port, "TB_CAP_PCIE not found: %d\n", cap);
-		return cap ? cap : -ENXIO;
+	int cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
+	if (cap < 0) {
+		tb_port_warn(port, "TB_PORT_CAP_ADAP not found: %d\n", cap);
+		return cap;
 	}
 	return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
 }
@@ -194,19 +194,13 @@ int tb_pci_restart(struct tb_pci_tunnel *tunnel)
  */
 int tb_pci_activate(struct tb_pci_tunnel *tunnel)
 {
-	int res;
 	if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
 		tb_tunnel_WARN(tunnel,
 			       "trying to activate an already activated tunnel\n");
 		return -EINVAL;
 	}
 
-	res = tb_pci_restart(tunnel);
-	if (res)
-		return res;
-
-	list_add(&tunnel->list, &tunnel->tb->tunnel_list);
-	return 0;
+	return tb_pci_restart(tunnel);
 }
 
 
@@ -227,6 +221,5 @@ void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
 		tb_path_deactivate(tunnel->path_to_down);
 	if (tunnel->path_to_up->activated)
 		tb_path_deactivate(tunnel->path_to_up);
-	list_del_init(&tunnel->list);
 }
 
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index f02becd..8689279 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -1,6 +1,7 @@
 obj-$(CONFIG_TTY)		+= tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
 				   tty_buffer.o tty_port.o tty_mutex.o \
-				   tty_ldsem.o tty_baudrate.o tty_jobctrl.o
+				   tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
+				   n_null.o
 obj-$(CONFIG_LEGACY_PTYS)	+= pty.o
 obj-$(CONFIG_UNIX98_PTYS)	+= pty.o
 obj-$(CONFIG_AUDIT)		+= tty_audit.o
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index dea16bb..9820e20 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -570,18 +570,6 @@ static int startup(struct tty_struct *tty, struct serial_state *info)
 	info->xmit.head = info->xmit.tail = 0;
 
 	/*
-	 * Set up the tty->alt_speed kludge
-	 */
-	if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
-		tty->alt_speed = 57600;
-	if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
-		tty->alt_speed = 115200;
-	if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
-		tty->alt_speed = 230400;
-	if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
-		tty->alt_speed = 460800;
-
-	/*
 	 * and set the speed of the serial port
 	 */
 	change_speed(tty, info, NULL);
@@ -1084,14 +1072,9 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
 check_and_exit:
 	if (tty_port_initialized(port)) {
 		if (change_spd) {
-			if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
-				tty->alt_speed = 57600;
-			if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
-				tty->alt_speed = 115200;
-			if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
-				tty->alt_speed = 230400;
-			if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
-				tty->alt_speed = 460800;
+			/* warn about deprecation unless clearing */
+			if (new_serial.flags & ASYNC_SPD_MASK)
+				dev_warn_ratelimited(tty->dev, "use of SPD flags is deprecated\n");
 			change_speed(tty, state, NULL);
 		}
 	} else
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 104f09c..d272bc4 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -1975,18 +1975,6 @@ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty)
 	cflag = tty->termios.c_cflag;
 	iflag = tty->termios.c_iflag;
 
-	/*
-	 * Set up the tty->alt_speed kludge
-	 */
-	if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
-		tty->alt_speed = 57600;
-	if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
-		tty->alt_speed = 115200;
-	if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
-		tty->alt_speed = 230400;
-	if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
-		tty->alt_speed = 460800;
-
 	card = info->card;
 	channel = info->line - card->first_line;
 
@@ -2295,12 +2283,16 @@ cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
 		struct serial_struct __user *new_info)
 {
 	struct serial_struct new_serial;
+	int old_flags;
 	int ret;
 
 	if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
 		return -EFAULT;
 
 	mutex_lock(&info->port.mutex);
+
+	old_flags = info->port.flags;
+
 	if (!capable(CAP_SYS_ADMIN)) {
 		if (new_serial.close_delay != info->port.close_delay ||
 				new_serial.baud_base != info->baud ||
@@ -2332,6 +2324,11 @@ cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
 
 check_and_exit:
 	if (tty_port_initialized(&info->port)) {
+		if ((new_serial.flags ^ old_flags) & ASYNC_SPD_MASK) {
+			/* warn about deprecation unless clearing */
+			if (new_serial.flags & ASYNC_SPD_MASK)
+				dev_warn_ratelimited(tty->dev, "use of SPD flags is deprecated\n");
+		}
 		cy_set_line_char(info, tty);
 		ret = 0;
 	} else {
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 574da15..b8d5ea0 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -44,7 +44,7 @@
 
 config HVC_IUCV
 	bool "z/VM IUCV Hypervisor console support (VM only)"
-	depends on S390
+	depends on S390 && NET
 	select HVC_DRIVER
 	select IUCV
 	default y
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 99bb875..79cc5be 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -484,13 +484,13 @@ static struct attribute_group hvcs_attr_group = {
 	.attrs = hvcs_attrs,
 };
 
-static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
+static ssize_t rescan_show(struct device_driver *ddp, char *buf)
 {
 	/* A 1 means it is updating, a 0 means it is done updating */
 	return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
 }
 
-static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
+static ssize_t rescan_store(struct device_driver *ddp, const char * buf,
 		size_t count)
 {
 	if ((simple_strtol(buf, NULL, 0) != 1)
@@ -505,8 +505,7 @@ static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
 	return count;
 }
 
-static DRIVER_ATTR(rescan,
-	S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
+static DRIVER_ATTR_RW(rescan);
 
 static void hvcs_kick(void)
 {
@@ -1242,8 +1241,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
 		free_irq(irq, hvcsd);
 		return;
 	} else if (hvcsd->port.count < 0) {
-		printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
-				" is missmanaged.\n",
+		printk(KERN_ERR "HVCS: vty-server@%X open_count: %d is mismanaged.\n",
 		hvcsd->vdev->unit_address, hvcsd->port.count);
 	}
 
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 2667a20..363a8ca 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2015,6 +2015,33 @@ static void gsm_error(struct gsm_mux *gsm,
 	gsm->io_error++;
 }
 
+static int gsm_disconnect(struct gsm_mux *gsm)
+{
+	struct gsm_dlci *dlci = gsm->dlci[0];
+	struct gsm_control *gc;
+
+	if (!dlci)
+		return 0;
+
+	/* In theory disconnecting DLCI 0 is sufficient but for some
+	   modems this is apparently not the case. */
+	gc = gsm_control_send(gsm, CMD_CLD, NULL, 0);
+	if (gc)
+		gsm_control_wait(gsm, gc);
+
+	del_timer_sync(&gsm->t2_timer);
+	/* Now we are sure T2 has stopped */
+
+	gsm_dlci_begin_close(dlci);
+	wait_event_interruptible(gsm->event,
+				dlci->state == DLCI_CLOSED);
+
+	if (signal_pending(current))
+		return -EINTR;
+
+	return 0;
+}
+
 /**
  *	gsm_cleanup_mux		-	generic GSM protocol cleanup
  *	@gsm: our mux
@@ -2029,7 +2056,6 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
 	int i;
 	struct gsm_dlci *dlci = gsm->dlci[0];
 	struct gsm_msg *txq, *ntxq;
-	struct gsm_control *gc;
 
 	gsm->dead = 1;
 
@@ -2045,21 +2071,11 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
 	if (i == MAX_MUX)
 		return;
 
-	/* In theory disconnecting DLCI 0 is sufficient but for some
-	   modems this is apparently not the case. */
-	if (dlci) {
-		gc = gsm_control_send(gsm, CMD_CLD, NULL, 0);
-		if (gc)
-			gsm_control_wait(gsm, gc);
-	}
 	del_timer_sync(&gsm->t2_timer);
 	/* Now we are sure T2 has stopped */
-	if (dlci) {
+	if (dlci)
 		dlci->dead = 1;
-		gsm_dlci_begin_close(dlci);
-		wait_event_interruptible(gsm->event,
-					dlci->state == DLCI_CLOSED);
-	}
+
 	/* Free up any link layer users */
 	mutex_lock(&gsm->mutex);
 	for (i = 0; i < NUM_DLCI; i++)
@@ -2519,12 +2535,12 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
 	 */
 
 	if (need_close || need_restart) {
-		gsm_dlci_begin_close(gsm->dlci[0]);
-		/* This will timeout if the link is down due to N2 expiring */
-		wait_event_interruptible(gsm->event,
-				gsm->dlci[0]->state == DLCI_CLOSED);
-		if (signal_pending(current))
-			return -EINTR;
+		int ret;
+
+		ret = gsm_disconnect(gsm);
+
+		if (ret)
+			return ret;
 	}
 	if (need_restart)
 		gsm_cleanup_mux(gsm);
diff --git a/drivers/tty/n_null.c b/drivers/tty/n_null.c
new file mode 100644
index 0000000..d63261c
--- /dev/null
+++ b/drivers/tty/n_null.c
@@ -0,0 +1,80 @@
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+
+/*
+ *  n_null.c - Null line discipline used in the failure path
+ *
+ *  Copyright (C) Intel 2017
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+static int n_null_open(struct tty_struct *tty)
+{
+	return 0;
+}
+
+static void n_null_close(struct tty_struct *tty)
+{
+}
+
+static ssize_t n_null_read(struct tty_struct *tty, struct file *file,
+			   unsigned char __user * buf, size_t nr)
+{
+	return -EOPNOTSUPP;
+}
+
+static ssize_t n_null_write(struct tty_struct *tty, struct file *file,
+			    const unsigned char *buf, size_t nr)
+{
+	return -EOPNOTSUPP;
+}
+
+static void n_null_receivebuf(struct tty_struct *tty,
+				 const unsigned char *cp, char *fp,
+				 int cnt)
+{
+}
+
+static struct tty_ldisc_ops null_ldisc = {
+	.owner		=	THIS_MODULE,
+	.magic		=	TTY_LDISC_MAGIC,
+	.name		=	"n_null",
+	.open		=	n_null_open,
+	.close		=	n_null_close,
+	.read		=	n_null_read,
+	.write		=	n_null_write,
+	.receive_buf	=	n_null_receivebuf
+};
+
+static int __init n_null_init(void)
+{
+	BUG_ON(tty_register_ldisc(N_NULL, &null_ldisc));
+	return 0;
+}
+
+static void __exit n_null_exit(void)
+{
+	tty_unregister_ldisc(N_NULL);
+}
+
+module_init(n_null_init);
+module_exit(n_null_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alan Cox");
+MODULE_ALIAS_LDISC(N_NULL);
+MODULE_DESCRIPTION("Null ldisc driver");
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 6579957..d1399aa 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -24,6 +24,9 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/poll.h>
+#include <linux/mount.h>
+#include <linux/file.h>
+#include <linux/ioctl.h>
 
 #undef TTY_DEBUG_HANGUP
 #ifdef TTY_DEBUG_HANGUP
@@ -66,8 +69,13 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
 #ifdef CONFIG_UNIX98_PTYS
 		if (tty->driver == ptm_driver) {
 			mutex_lock(&devpts_mutex);
-			if (tty->link->driver_data)
-				devpts_pty_kill(tty->link->driver_data);
+			if (tty->link->driver_data) {
+				struct path *path = tty->link->driver_data;
+
+				devpts_pty_kill(path->dentry);
+				path_put(path);
+				kfree(path);
+			}
 			mutex_unlock(&devpts_mutex);
 		}
 #endif
@@ -440,6 +448,48 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
 	return retval;
 }
 
+/**
+ *	pty_open_peer - open the peer of a pty
+ *	@tty: the peer of the pty being opened
+ *
+ *	Open the cached dentry in tty->link, providing a safe way for userspace
+ *	to get the slave end of a pty (where they have the master fd and cannot
+ *	access or trust the mount namespace /dev/pts was mounted inside).
+ */
+static struct file *pty_open_peer(struct tty_struct *tty, int flags)
+{
+	if (tty->driver->subtype != PTY_TYPE_MASTER)
+		return ERR_PTR(-EIO);
+	return dentry_open(tty->link->driver_data, flags, current_cred());
+}
+
+static int pty_get_peer(struct tty_struct *tty, int flags)
+{
+	int fd = -1;
+	struct file *filp = NULL;
+	int retval = -EINVAL;
+
+	fd = get_unused_fd_flags(0);
+	if (fd < 0) {
+		retval = fd;
+		goto err;
+	}
+
+	filp = pty_open_peer(tty, flags);
+	if (IS_ERR(filp)) {
+		retval = PTR_ERR(filp);
+		goto err_put;
+	}
+
+	fd_install(fd, filp);
+	return fd;
+
+err_put:
+	put_unused_fd(fd);
+err:
+	return retval;
+}
+
 static void pty_cleanup(struct tty_struct *tty)
 {
 	tty_port_put(tty->port);
@@ -481,6 +531,16 @@ static int pty_bsd_ioctl(struct tty_struct *tty,
 	return -ENOIOCTLCMD;
 }
 
+static long pty_bsd_compat_ioctl(struct tty_struct *tty,
+				 unsigned int cmd, unsigned long arg)
+{
+	/*
+	 * PTY ioctls don't require any special translation between 32-bit and
+	 * 64-bit userspace, they are already compatible.
+	 */
+	return pty_bsd_ioctl(tty, cmd, arg);
+}
+
 static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
 /*
  * not really modular, but the easiest way to keep compat with existing
@@ -502,6 +562,7 @@ static const struct tty_operations master_pty_ops_bsd = {
 	.chars_in_buffer = pty_chars_in_buffer,
 	.unthrottle = pty_unthrottle,
 	.ioctl = pty_bsd_ioctl,
+	.compat_ioctl = pty_bsd_compat_ioctl,
 	.cleanup = pty_cleanup,
 	.resize = pty_resize,
 	.remove = pty_remove
@@ -602,6 +663,8 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
 		return pty_get_pktmode(tty, (int __user *)arg);
 	case TIOCGPTN: /* Get PT Number */
 		return put_user(tty->index, (unsigned int __user *)arg);
+	case TIOCGPTPEER: /* Open the other end */
+		return pty_get_peer(tty, (int) arg);
 	case TIOCSIG:    /* Send signal to other side of pty */
 		return pty_signal(tty, (int) arg);
 	}
@@ -609,6 +672,16 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
 	return -ENOIOCTLCMD;
 }
 
+static long pty_unix98_compat_ioctl(struct tty_struct *tty,
+				 unsigned int cmd, unsigned long arg)
+{
+	/*
+	 * PTY ioctls don't require any special translation between 32-bit and
+	 * 64-bit userspace, they are already compatible.
+	 */
+	return pty_unix98_ioctl(tty, cmd, arg);
+}
+
 /**
  *	ptm_unix98_lookup	-	find a pty master
  *	@driver: ptm driver
@@ -681,6 +754,7 @@ static const struct tty_operations ptm_unix98_ops = {
 	.chars_in_buffer = pty_chars_in_buffer,
 	.unthrottle = pty_unthrottle,
 	.ioctl = pty_unix98_ioctl,
+	.compat_ioctl = pty_unix98_compat_ioctl,
 	.resize = pty_resize,
 	.cleanup = pty_cleanup
 };
@@ -718,6 +792,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
 {
 	struct pts_fs_info *fsi;
 	struct tty_struct *tty;
+	struct path *pts_path;
 	struct dentry *dentry;
 	int retval;
 	int index;
@@ -771,16 +846,26 @@ static int ptmx_open(struct inode *inode, struct file *filp)
 		retval = PTR_ERR(dentry);
 		goto err_release;
 	}
-	tty->link->driver_data = dentry;
+	/* We need to cache a fake path for TIOCGPTPEER. */
+	pts_path = kmalloc(sizeof(struct path), GFP_KERNEL);
+	if (!pts_path)
+		goto err_release;
+	pts_path->mnt = filp->f_path.mnt;
+	pts_path->dentry = dentry;
+	path_get(pts_path);
+	tty->link->driver_data = pts_path;
 
 	retval = ptm_driver->ops->open(tty, filp);
 	if (retval)
-		goto err_release;
+		goto err_path_put;
 
 	tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
 
 	tty_unlock(tty);
 	return 0;
+err_path_put:
+	path_put(pts_path);
+	kfree(pts_path);
 err_release:
 	tty_unlock(tty);
 	// This will also put-ref the fsi
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index b51a877..20d79a6 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -947,18 +947,6 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
 
 		tty_port_set_initialized(&info->port, 1);
 
-		/*
-		 * Set up the tty->alt_speed kludge
-		 */
-		if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI)
-			tty->alt_speed = 57600;
-		if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI)
-			tty->alt_speed = 115200;
-		if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI)
-			tty->alt_speed = 230400;
-		if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP)
-			tty->alt_speed = 460800;
-
 		configure_r_port(tty, info, NULL);
 		if (C_BAUD(tty)) {
 			sSetDTR(cp);
@@ -1219,23 +1207,20 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
 			return -EPERM;
 		}
 		info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
-		configure_r_port(tty, info, NULL);
 		mutex_unlock(&info->port.mutex);
 		return 0;
 	}
 
+	if ((new_serial.flags ^ info->flags) & ROCKET_SPD_MASK) {
+		/* warn about deprecation, unless clearing */
+		if (new_serial.flags & ROCKET_SPD_MASK)
+			dev_warn_ratelimited(tty->dev, "use of SPD flags is deprecated\n");
+	}
+
 	info->flags = ((info->flags & ~ROCKET_FLAGS) | (new_serial.flags & ROCKET_FLAGS));
 	info->port.close_delay = new_serial.close_delay;
 	info->port.closing_wait = new_serial.closing_wait;
 
-	if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI)
-		tty->alt_speed = 57600;
-	if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI)
-		tty->alt_speed = 115200;
-	if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI)
-		tty->alt_speed = 230400;
-	if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP)
-		tty->alt_speed = 460800;
 	mutex_unlock(&info->port.mutex);
 
 	configure_r_port(tty, info, NULL);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index f71b473..ae1aaa0 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -262,11 +262,13 @@ static ssize_t modalias_show(struct device *dev,
 {
 	return of_device_modalias(dev, buf, PAGE_SIZE);
 }
+DEVICE_ATTR_RO(modalias);
 
-static struct device_attribute serdev_device_attrs[] = {
-	__ATTR_RO(modalias),
-	__ATTR_NULL
+static struct attribute *serdev_device_attrs[] = {
+	&dev_attr_modalias.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(serdev_device);
 
 static struct bus_type serdev_bus_type = {
 	.name		= "serial",
@@ -274,7 +276,7 @@ static struct bus_type serdev_bus_type = {
 	.probe		= serdev_drv_probe,
 	.remove		= serdev_drv_remove,
 	.uevent		= serdev_uevent,
-	.dev_attrs	= serdev_device_attrs,
+	.dev_groups	= serdev_device_groups,
 };
 
 /**
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index d0a021c..302018d 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -148,7 +148,7 @@ static unsigned int ttyport_set_baudrate(struct serdev_controller *ctrl, unsigne
 
 	/* tty_set_termios() return not checked as it is always 0 */
 	tty_set_termios(tty, &ktermios);
-	return speed;
+	return ktermios.c_ospeed;
 }
 
 static void ttyport_set_flow_control(struct serdev_controller *ctrl, bool enable)
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index ce8d4ff..b2bdc35 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -81,6 +81,9 @@ struct serial8250_config {
 #define UART_CAP_HFIFO	(1 << 14)	/* UART has a "hidden" FIFO */
 #define UART_CAP_RPM	(1 << 15)	/* Runtime PM is active while idle */
 #define UART_CAP_IRDA	(1 << 16)	/* UART supports IrDA line discipline */
+#define UART_CAP_MINI	(1 << 17)	/* Mini UART on BCM283X family lacks:
+					 * STOP PARITY EPAR SPAR WLEN5 WLEN6
+					 */
 
 #define UART_BUG_QUOT	(1 << 0)	/* UART has buggy quot LSB */
 #define UART_BUG_TXEN	(1 << 1)	/* UART has buggy TX IIR status */
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
new file mode 100644
index 0000000..822be49
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -0,0 +1,323 @@
+/*
+ *  Serial Port driver for Aspeed VUART device
+ *
+ *    Copyright (C) 2016 Jeremy Kerr <jk@ozlabs.org>, IBM Corp.
+ *    Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+
+#include "8250.h"
+
+#define ASPEED_VUART_GCRA		0x20
+#define ASPEED_VUART_GCRA_VUART_EN		BIT(0)
+#define ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD BIT(5)
+#define ASPEED_VUART_GCRB		0x24
+#define ASPEED_VUART_GCRB_HOST_SIRQ_MASK	GENMASK(7, 4)
+#define ASPEED_VUART_GCRB_HOST_SIRQ_SHIFT	4
+#define ASPEED_VUART_ADDRL		0x28
+#define ASPEED_VUART_ADDRH		0x2c
+
+struct aspeed_vuart {
+	struct device		*dev;
+	void __iomem		*regs;
+	struct clk		*clk;
+	int			line;
+};
+
+/*
+ * The VUART is basically two UART 'front ends' connected by their FIFO
+ * (no actual serial line in between). One is on the BMC side (management
+ * controller) and one is on the host CPU side.
+ *
+ * It allows the BMC to provide to the host a "UART" that pipes into
+ * the BMC itself and can then be turned by the BMC into a network console
+ * of some sort for example.
+ *
+ * This driver is for the BMC side. The sysfs files allow the BMC
+ * userspace which owns the system configuration policy, to specify
+ * at what IO port and interrupt number the host side will appear
+ * to the host on the Host <-> BMC LPC bus. It could be different on a
+ * different system (though most of them use 3f8/4).
+ */
+
+static ssize_t lpc_address_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+	u16 addr;
+
+	addr = (readb(vuart->regs + ASPEED_VUART_ADDRH) << 8) |
+		(readb(vuart->regs + ASPEED_VUART_ADDRL));
+
+	return snprintf(buf, PAGE_SIZE - 1, "0x%x\n", addr);
+}
+
+static ssize_t lpc_address_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+	unsigned long val;
+	int err;
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	writeb(val >> 8, vuart->regs + ASPEED_VUART_ADDRH);
+	writeb(val >> 0, vuart->regs + ASPEED_VUART_ADDRL);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(lpc_address);
+
+static ssize_t sirq_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+	u8 reg;
+
+	reg = readb(vuart->regs + ASPEED_VUART_GCRB);
+	reg &= ASPEED_VUART_GCRB_HOST_SIRQ_MASK;
+	reg >>= ASPEED_VUART_GCRB_HOST_SIRQ_SHIFT;
+
+	return snprintf(buf, PAGE_SIZE - 1, "%u\n", reg);
+}
+
+static ssize_t sirq_store(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+	unsigned long val;
+	int err;
+	u8 reg;
+
+	err = kstrtoul(buf, 0, &val);
+	if (err)
+		return err;
+
+	val <<= ASPEED_VUART_GCRB_HOST_SIRQ_SHIFT;
+	val &= ASPEED_VUART_GCRB_HOST_SIRQ_MASK;
+
+	reg = readb(vuart->regs + ASPEED_VUART_GCRB);
+	reg &= ~ASPEED_VUART_GCRB_HOST_SIRQ_MASK;
+	reg |= val;
+	writeb(reg, vuart->regs + ASPEED_VUART_GCRB);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(sirq);
+
+static struct attribute *aspeed_vuart_attrs[] = {
+	&dev_attr_sirq.attr,
+	&dev_attr_lpc_address.attr,
+	NULL,
+};
+
+static const struct attribute_group aspeed_vuart_attr_group = {
+	.attrs = aspeed_vuart_attrs,
+};
+
+static void aspeed_vuart_set_enabled(struct aspeed_vuart *vuart, bool enabled)
+{
+	u8 reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+
+	if (enabled)
+		reg |= ASPEED_VUART_GCRA_VUART_EN;
+	else
+		reg &= ~ASPEED_VUART_GCRA_VUART_EN;
+
+	writeb(reg, vuart->regs + ASPEED_VUART_GCRA);
+}
+
+static void aspeed_vuart_set_host_tx_discard(struct aspeed_vuart *vuart,
+					     bool discard)
+{
+	u8 reg;
+
+	reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+
+	/* If the DISABLE_HOST_TX_DISCARD bit is set, discard is disabled */
+	if (!discard)
+		reg |= ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD;
+	else
+		reg &= ~ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD;
+
+	writeb(reg, vuart->regs + ASPEED_VUART_GCRA);
+}
+
+static int aspeed_vuart_startup(struct uart_port *uart_port)
+{
+	struct uart_8250_port *uart_8250_port = up_to_u8250p(uart_port);
+	struct aspeed_vuart *vuart = uart_8250_port->port.private_data;
+	int rc;
+
+	rc = serial8250_do_startup(uart_port);
+	if (rc)
+		return rc;
+
+	aspeed_vuart_set_host_tx_discard(vuart, false);
+
+	return 0;
+}
+
+static void aspeed_vuart_shutdown(struct uart_port *uart_port)
+{
+	struct uart_8250_port *uart_8250_port = up_to_u8250p(uart_port);
+	struct aspeed_vuart *vuart = uart_8250_port->port.private_data;
+
+	aspeed_vuart_set_host_tx_discard(vuart, true);
+
+	serial8250_do_shutdown(uart_port);
+}
+
+static int aspeed_vuart_probe(struct platform_device *pdev)
+{
+	struct uart_8250_port port;
+	struct aspeed_vuart *vuart;
+	struct device_node *np;
+	struct resource *res;
+	u32 clk, prop;
+	int rc;
+
+	np = pdev->dev.of_node;
+
+	vuart = devm_kzalloc(&pdev->dev, sizeof(*vuart), GFP_KERNEL);
+	if (!vuart)
+		return -ENOMEM;
+
+	vuart->dev = &pdev->dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	vuart->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(vuart->regs))
+		return PTR_ERR(vuart->regs);
+
+	memset(&port, 0, sizeof(port));
+	port.port.private_data = vuart;
+	port.port.membase = vuart->regs;
+	port.port.mapbase = res->start;
+	port.port.mapsize = resource_size(res);
+	port.port.startup = aspeed_vuart_startup;
+	port.port.shutdown = aspeed_vuart_shutdown;
+	port.port.dev = &pdev->dev;
+
+	rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
+	if (rc < 0)
+		return rc;
+
+	if (of_property_read_u32(np, "clock-frequency", &clk)) {
+		vuart->clk = devm_clk_get(&pdev->dev, NULL);
+		if (IS_ERR(vuart->clk)) {
+			dev_warn(&pdev->dev,
+				"clk or clock-frequency not defined\n");
+			return PTR_ERR(vuart->clk);
+		}
+
+		rc = clk_prepare_enable(vuart->clk);
+		if (rc < 0)
+			return rc;
+
+		clk = clk_get_rate(vuart->clk);
+	}
+
+	/* If current-speed was set, then try not to change it. */
+	if (of_property_read_u32(np, "current-speed", &prop) == 0)
+		port.port.custom_divisor = clk / (16 * prop);
+
+	/* Check for shifted address mapping */
+	if (of_property_read_u32(np, "reg-offset", &prop) == 0)
+		port.port.mapbase += prop;
+
+	/* Check for registers offset within the devices address range */
+	if (of_property_read_u32(np, "reg-shift", &prop) == 0)
+		port.port.regshift = prop;
+
+	/* Check for fifo size */
+	if (of_property_read_u32(np, "fifo-size", &prop) == 0)
+		port.port.fifosize = prop;
+
+	/* Check for a fixed line number */
+	rc = of_alias_get_id(np, "serial");
+	if (rc >= 0)
+		port.port.line = rc;
+
+	port.port.irq = irq_of_parse_and_map(np, 0);
+	port.port.irqflags = IRQF_SHARED;
+	port.port.iotype = UPIO_MEM;
+	port.port.type = PORT_16550A;
+	port.port.uartclk = clk;
+	port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF
+		| UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_NO_THRE_TEST;
+
+	if (of_property_read_bool(np, "no-loopback-test"))
+		port.port.flags |= UPF_SKIP_TEST;
+
+	if (port.port.fifosize)
+		port.capabilities = UART_CAP_FIFO;
+
+	if (of_property_read_bool(np, "auto-flow-control"))
+		port.capabilities |= UART_CAP_AFE;
+
+	rc = serial8250_register_8250_port(&port);
+	if (rc < 0)
+		goto err_clk_disable;
+
+	vuart->line = rc;
+
+	aspeed_vuart_set_enabled(vuart, true);
+	aspeed_vuart_set_host_tx_discard(vuart, true);
+	platform_set_drvdata(pdev, vuart);
+
+	return 0;
+
+err_clk_disable:
+	clk_disable_unprepare(vuart->clk);
+	irq_dispose_mapping(port.port.irq);
+	return rc;
+}
+
+static int aspeed_vuart_remove(struct platform_device *pdev)
+{
+	struct aspeed_vuart *vuart = platform_get_drvdata(pdev);
+
+	aspeed_vuart_set_enabled(vuart, false);
+	serial8250_unregister_port(vuart->line);
+	sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
+	clk_disable_unprepare(vuart->clk);
+
+	return 0;
+}
+
+static const struct of_device_id aspeed_vuart_table[] = {
+	{ .compatible = "aspeed,ast2400-vuart" },
+	{ .compatible = "aspeed,ast2500-vuart" },
+	{ },
+};
+
+static struct platform_driver aspeed_vuart_driver = {
+	.driver = {
+		.name = "aspeed-vuart",
+		.of_match_table = aspeed_vuart_table,
+	},
+	.probe = aspeed_vuart_probe,
+	.remove = aspeed_vuart_remove,
+};
+
+module_platform_driver(aspeed_vuart_driver);
+
+MODULE_AUTHOR("Jeremy Kerr <jk@ozlabs.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver for Aspeed VUART device");
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index e10f124..a23c7da 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -39,7 +39,7 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
 
 	/* initialize data */
 	spin_lock_init(&data->uart.port.lock);
-	data->uart.capabilities = UART_CAP_FIFO;
+	data->uart.capabilities = UART_CAP_FIFO | UART_CAP_MINI;
 	data->uart.port.dev = &pdev->dev;
 	data->uart.port.regshift = 2;
 	data->uart.port.type = PORT_16550;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 1aab301..b5def35 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1043,24 +1043,13 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
 		if (up->dl_write)
 			uart->dl_write = up->dl_write;
 
-		if (uart->port.type != PORT_8250_CIR) {
-			if (serial8250_isa_config != NULL)
-				serial8250_isa_config(0, &uart->port,
-						&uart->capabilities);
+		if (serial8250_isa_config != NULL)
+			serial8250_isa_config(0, &uart->port,
+					&uart->capabilities);
 
-			ret = uart_add_one_port(&serial8250_reg,
-						&uart->port);
-			if (ret == 0)
-				ret = uart->port.line;
-		} else {
-			dev_info(uart->port.dev,
-				"skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
-				uart->port.iobase,
-				(unsigned long long)uart->port.mapbase,
-				uart->port.irq);
-
-			ret = 0;
-		}
+		ret = uart_add_one_port(&serial8250_reg, &uart->port);
+		if (ret == 0)
+			ret = uart->port.line;
 	}
 	mutex_unlock(&serial_mutex);
 
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 1270ff1..a309bcf 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -109,7 +109,6 @@ pci_fastcom335_setup(struct exar8250 *priv, struct pci_dev *pcidev,
 	u8 __iomem *p;
 	int err;
 
-	port->port.flags |= UPF_EXAR_EFR;
 	port->port.uartclk = baud * 16;
 
 	err = default_setup(priv, pcidev, idx, offset, port);
@@ -171,19 +170,26 @@ pci_xr17c154_setup(struct exar8250 *priv, struct pci_dev *pcidev,
 	return default_setup(priv, pcidev, idx, offset, port);
 }
 
-static void setup_gpio(u8 __iomem *p)
+static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p)
 {
+	/*
+	 * The Commtech adapters required the MPIOs to be driven low. The Exar
+	 * devices will export them as GPIOs, so we pre-configure them safely
+	 * as inputs.
+	 */
+	u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00;
+
 	writeb(0x00, p + UART_EXAR_MPIOINT_7_0);
 	writeb(0x00, p + UART_EXAR_MPIOLVL_7_0);
 	writeb(0x00, p + UART_EXAR_MPIO3T_7_0);
 	writeb(0x00, p + UART_EXAR_MPIOINV_7_0);
-	writeb(0x00, p + UART_EXAR_MPIOSEL_7_0);
+	writeb(dir,  p + UART_EXAR_MPIOSEL_7_0);
 	writeb(0x00, p + UART_EXAR_MPIOOD_7_0);
 	writeb(0x00, p + UART_EXAR_MPIOINT_15_8);
 	writeb(0x00, p + UART_EXAR_MPIOLVL_15_8);
 	writeb(0x00, p + UART_EXAR_MPIO3T_15_8);
 	writeb(0x00, p + UART_EXAR_MPIOINV_15_8);
-	writeb(0x00, p + UART_EXAR_MPIOSEL_15_8);
+	writeb(dir,  p + UART_EXAR_MPIOSEL_15_8);
 	writeb(0x00, p + UART_EXAR_MPIOOD_15_8);
 }
 
@@ -236,7 +242,7 @@ pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
 
 	if (idx == 0) {
 		/* Setup Multipurpose Input/Output pins. */
-		setup_gpio(p);
+		setup_gpio(pcidev, p);
 
 		port->port.private_data = xr17v35x_register_gpio(pcidev);
 	}
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 1cbadaf..0cf95fd 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -19,11 +19,13 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/clk.h>
+#include <linux/reset.h>
 
 #include "8250.h"
 
 struct of_serial_info {
 	struct clk *clk;
+	struct reset_control *rst;
 	int type;
 	int line;
 };
@@ -132,6 +134,13 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
 		}
 	}
 
+	info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
+	if (IS_ERR(info->rst))
+		goto out;
+	ret = reset_control_deassert(info->rst);
+	if (ret)
+		goto out;
+
 	port->type = type;
 	port->uartclk = clk;
 	port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
@@ -229,6 +238,7 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
 
 	serial8250_unregister_port(info->line);
 
+	reset_control_assert(info->rst);
 	if (info->clk)
 		clk_disable_unprepare(info->clk);
 	kfree(info);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index e7e6491..833771b 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -613,6 +613,10 @@ static int omap_8250_startup(struct uart_port *port)
 	up->lsr_saved_flags = 0;
 	up->msr_saved_flags = 0;
 
+	/* Disable DMA for console UART */
+	if (uart_console(port))
+		up->dma = NULL;
+
 	if (up->dma) {
 		ret = serial8250_request_dma(up);
 		if (ret) {
@@ -782,8 +786,27 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
 
 static void __dma_rx_complete(void *param)
 {
-	__dma_rx_do_complete(param);
-	omap_8250_rx_dma(param);
+	struct uart_8250_port *p = param;
+	struct uart_8250_dma *dma = p->dma;
+	struct dma_tx_state     state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&p->port.lock, flags);
+
+	/*
+	 * If the tx status is not DMA_COMPLETE, then this is a delayed
+	 * completion callback. A previous RX timeout flush would have
+	 * already pushed the data, so exit.
+	 */
+	if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
+			DMA_COMPLETE) {
+		spin_unlock_irqrestore(&p->port.lock, flags);
+		return;
+	}
+	__dma_rx_do_complete(p);
+	omap_8250_rx_dma(p);
+
+	spin_unlock_irqrestore(&p->port.lock, flags);
 }
 
 static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 68fd045..a5fe0e6 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1764,6 +1764,10 @@ void serial8250_tx_chars(struct uart_8250_port *up)
 		if ((up->capabilities & UART_CAP_HFIFO) &&
 		    (serial_in(up, UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
 			break;
+		/* The BCM2835 MINI UART THRE bit is really a not-full bit. */
+		if ((up->capabilities & UART_CAP_MINI) &&
+		    !(serial_in(up, UART_LSR) & UART_LSR_THRE))
+			break;
 	} while (--count > 0);
 
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -2228,7 +2232,7 @@ int serial8250_do_startup(struct uart_port *port)
 		}
 	}
 
-	if (port->irq) {
+	if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
 		unsigned char iir1;
 		/*
 		 * Test for UARTs that do not reassert THRE when the
@@ -2585,6 +2589,12 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
 	unsigned long flags;
 	unsigned int baud, quot, frac = 0;
 
+	if (up->capabilities & UART_CAP_MINI) {
+		termios->c_cflag &= ~(CSTOPB | PARENB | PARODD | CMSPAR);
+		if ((termios->c_cflag & CSIZE) == CS5 ||
+		    (termios->c_cflag & CSIZE) == CS6)
+			termios->c_cflag = (termios->c_cflag & ~CSIZE) | CS7;
+	}
 	cval = serial8250_compute_lcr(up, termios->c_cflag);
 
 	baud = serial8250_get_baud_rate(port, termios, old);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 0e3f529..a1161ec 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -224,6 +224,16 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called 8250_accent.
 
+config SERIAL_8250_ASPEED_VUART
+	tristate "Aspeed Virtual UART"
+	depends on SERIAL_8250
+	depends on OF
+	help
+	  If you want to use the virtual UART (VUART) device on Aspeed
+	  BMC platforms, enable this option. This enables the 16550A-
+	  compatible device on the local LPC bus, giving a UART device
+	  with no physical RS232 connections.
+
 config SERIAL_8250_BOCA
 	tristate "Support Boca cards"
 	depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 2f30f9e..a44a99a 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_SERIAL_8250_HP300)		+= 8250_hp300.o
 obj-$(CONFIG_SERIAL_8250_CS)		+= serial_cs.o
 obj-$(CONFIG_SERIAL_8250_ACORN)		+= 8250_acorn.o
+obj-$(CONFIG_SERIAL_8250_ASPEED_VUART)	+= 8250_aspeed_vuart.o
 obj-$(CONFIG_SERIAL_8250_BCM2835AUX)	+= 8250_bcm2835aux.o
 obj-$(CONFIG_SERIAL_8250_CONSOLE)	+= 8250_early.o
 obj-$(CONFIG_SERIAL_8250_FOURPORT)	+= 8250_fourport.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5c8850f..1f096e2 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -114,32 +114,32 @@
 	  If unsure, say Y.
 
 config SERIAL_ATMEL
-	bool "AT91 / AT32 on-chip serial port support"
+	bool "AT91 on-chip serial port support"
 	depends on HAS_DMA
-	depends on ARCH_AT91 || AVR32 || COMPILE_TEST
+	depends on ARCH_AT91 || COMPILE_TEST
 	select SERIAL_CORE
 	select SERIAL_MCTRL_GPIO if GPIOLIB
 	help
 	  This enables the driver for the on-chip UARTs of the Atmel
-	  AT91 and AT32 processors.
+	  AT91 processors.
 
 config SERIAL_ATMEL_CONSOLE
-	bool "Support for console on AT91 / AT32 serial port"
+	bool "Support for console on AT91 serial port"
 	depends on SERIAL_ATMEL=y
 	select SERIAL_CORE_CONSOLE
 	help
 	  Say Y here if you wish to use an on-chip UART on a Atmel
-	  AT91 or AT32 processor as the system console (the system
+	  AT91 processor as the system console (the system
 	  console is the device which receives all kernel messages and
 	  warnings and which allows logins in single user mode).
 
 config SERIAL_ATMEL_PDC
-	bool "Support DMA transfers on AT91 / AT32 serial port"
+	bool "Support DMA transfers on AT91 serial port"
 	depends on SERIAL_ATMEL
 	default y
 	help
 	  Say Y here if you wish to use the PDC to do DMA transfers to
-	  and from the Atmel AT91 / AT32 serial port. In order to
+	  and from the Atmel AT91 serial port. In order to
 	  actually use DMA transfers, make sure that the use_dma_tx
 	  and use_dma_rx members in the atmel_uart_data struct is set
 	  appropriately for each port.
@@ -152,7 +152,7 @@
 	bool "Install as device ttyATn instead of ttySn"
 	depends on SERIAL_ATMEL=y
 	help
-	  Say Y here if you wish to have the internal AT91 / AT32 UARTs
+	  Say Y here if you wish to have the internal AT91 UARTs
 	  appear as /dev/ttyATn (major 204, minor starting at 154)
 	  instead of the normal /dev/ttySn (major 4, minor starting at
 	  64). This is necessary if you also want other UARTs, such as
@@ -1688,6 +1688,25 @@
 	  and warnings and which allows logins in single user mode)
 	  Otherwise, say 'N'.
 
+config SERIAL_OWL
+	bool "Actions Semi Owl serial port support"
+	depends on ARCH_ACTIONS || COMPILE_TEST
+	select SERIAL_CORE
+	help
+	  This driver is for Actions Semiconductor S500/S900 SoC's UART.
+	  Say 'Y' here if you wish to use the on-board serial port.
+	  Otherwise, say 'N'.
+
+config SERIAL_OWL_CONSOLE
+	bool "Console on Actions Semi Owl serial port"
+	depends on SERIAL_OWL=y
+	select SERIAL_CORE_CONSOLE
+	select SERIAL_EARLYCON
+	default y
+	help
+	  Say 'Y' here if you wish to use Actions Semiconductor S500/S900 UART
+	  as the system console. Only earlycon is implemented currently.
+
 endmenu
 
 config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 53c03e0..fe88a75 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -92,6 +92,7 @@
 obj-$(CONFIG_SERIAL_MVEBU_UART)	+= mvebu-uart.o
 obj-$(CONFIG_SERIAL_PIC32)	+= pic32_uart.o
 obj-$(CONFIG_SERIAL_MPS2_UART)	+= mps2-uart.o
+obj-$(CONFIG_SERIAL_OWL)	+= owl-uart.o
 
 # GPIOLIB helpers for modem control lines
 obj-$(CONFIG_SERIAL_MCTRL_GPIO)	+= serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index f2f2510..24180ad 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -697,6 +697,7 @@ static struct console amba_console = {
 #define AMBA_CONSOLE	NULL
 #endif
 
+static DEFINE_MUTEX(amba_reg_lock);
 static struct uart_driver amba_reg = {
 	.owner			= THIS_MODULE,
 	.driver_name		= "ttyAM",
@@ -749,6 +750,19 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
 	amba_ports[i] = uap;
 
 	amba_set_drvdata(dev, uap);
+
+	mutex_lock(&amba_reg_lock);
+	if (!amba_reg.state) {
+		ret = uart_register_driver(&amba_reg);
+		if (ret < 0) {
+			mutex_unlock(&amba_reg_lock);
+			dev_err(uap->port.dev,
+				"Failed to register AMBA-PL010 driver\n");
+			return ret;
+		}
+	}
+	mutex_unlock(&amba_reg_lock);
+
 	ret = uart_add_one_port(&amba_reg, &uap->port);
 	if (ret)
 		amba_ports[i] = NULL;
@@ -760,12 +774,18 @@ static int pl010_remove(struct amba_device *dev)
 {
 	struct uart_amba_port *uap = amba_get_drvdata(dev);
 	int i;
+	bool busy = false;
 
 	uart_remove_one_port(&amba_reg, &uap->port);
 
 	for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
 		if (amba_ports[i] == uap)
 			amba_ports[i] = NULL;
+		else if (amba_ports[i])
+			busy = true;
+
+	if (!busy)
+		uart_unregister_driver(&amba_reg);
 
 	return 0;
 }
@@ -816,23 +836,14 @@ static struct amba_driver pl010_driver = {
 
 static int __init pl010_init(void)
 {
-	int ret;
-
 	printk(KERN_INFO "Serial: AMBA driver\n");
 
-	ret = uart_register_driver(&amba_reg);
-	if (ret == 0) {
-		ret = amba_driver_register(&pl010_driver);
-		if (ret)
-			uart_unregister_driver(&amba_reg);
-	}
-	return ret;
+	return  amba_driver_register(&pl010_driver);
 }
 
 static void __exit pl010_exit(void)
 {
 	amba_driver_unregister(&pl010_driver);
-	uart_unregister_driver(&amba_reg);
 }
 
 module_init(pl010_init);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index c355ac9..7551cab 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1,5 +1,5 @@
 /*
- *  Driver for Atmel AT91 / AT32 Serial ports
+ *  Driver for Atmel AT91 Serial ports
  *  Copyright (C) 2003 Rick Bronson
  *
  *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
@@ -46,6 +46,7 @@
 #include <linux/err.h>
 #include <linux/irq.h>
 #include <linux/suspend.h>
+#include <linux/mm.h>
 
 #include <asm/io.h>
 #include <asm/ioctls.h>
@@ -118,7 +119,6 @@ struct atmel_uart_char {
 
 /*
  * at91: 6 USARTs and one DBGU port (SAM9260)
- * avr32: 4
  * samx7: 3 USARTs and 5 UARTs
  */
 #define ATMEL_MAX_UART		8
@@ -228,21 +228,6 @@ static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
 	__raw_writel(value, port->membase + reg);
 }
 
-#ifdef CONFIG_AVR32
-
-/* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */
-static inline u8 atmel_uart_read_char(struct uart_port *port)
-{
-	return __raw_readl(port->membase + ATMEL_US_RHR);
-}
-
-static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
-{
-	__raw_writel(value, port->membase + ATMEL_US_THR);
-}
-
-#else
-
 static inline u8 atmel_uart_read_char(struct uart_port *port)
 {
 	return __raw_readb(port->membase + ATMEL_US_RHR);
@@ -253,8 +238,6 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
 	__raw_writeb(value, port->membase + ATMEL_US_THR);
 }
 
-#endif
-
 #ifdef CONFIG_SERIAL_ATMEL_PDC
 static bool atmel_use_pdc_rx(struct uart_port *port)
 {
@@ -959,7 +942,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
 	sg_set_page(&atmel_port->sg_tx,
 			virt_to_page(port->state->xmit.buf),
 			UART_XMIT_SIZE,
-			(unsigned long)port->state->xmit.buf & ~PAGE_MASK);
+			offset_in_page(port->state->xmit.buf));
 	nent = dma_map_sg(port->dev,
 				&atmel_port->sg_tx,
 				1,
@@ -1141,7 +1124,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
 	sg_set_page(&atmel_port->sg_rx,
 		    virt_to_page(ring->buf),
 		    sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
-		    (unsigned long)ring->buf & ~PAGE_MASK);
+		    offset_in_page(ring->buf));
 	nent = dma_map_sg(port->dev,
 			  &atmel_port->sg_rx,
 			  1,
@@ -1655,72 +1638,56 @@ static void atmel_init_property(struct atmel_uart_port *atmel_port,
 				struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
-	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
 
-	if (np) {
-		/* DMA/PDC usage specification */
-		if (of_property_read_bool(np, "atmel,use-dma-rx")) {
-			if (of_property_read_bool(np, "dmas")) {
-				atmel_port->use_dma_rx  = true;
-				atmel_port->use_pdc_rx  = false;
-			} else {
-				atmel_port->use_dma_rx  = false;
-				atmel_port->use_pdc_rx  = true;
-			}
+	/* DMA/PDC usage specification */
+	if (of_property_read_bool(np, "atmel,use-dma-rx")) {
+		if (of_property_read_bool(np, "dmas")) {
+			atmel_port->use_dma_rx  = true;
+			atmel_port->use_pdc_rx  = false;
 		} else {
 			atmel_port->use_dma_rx  = false;
-			atmel_port->use_pdc_rx  = false;
+			atmel_port->use_pdc_rx  = true;
 		}
-
-		if (of_property_read_bool(np, "atmel,use-dma-tx")) {
-			if (of_property_read_bool(np, "dmas")) {
-				atmel_port->use_dma_tx  = true;
-				atmel_port->use_pdc_tx  = false;
-			} else {
-				atmel_port->use_dma_tx  = false;
-				atmel_port->use_pdc_tx  = true;
-			}
-		} else {
-			atmel_port->use_dma_tx  = false;
-			atmel_port->use_pdc_tx  = false;
-		}
-
 	} else {
-		atmel_port->use_pdc_rx  = pdata->use_dma_rx;
-		atmel_port->use_pdc_tx  = pdata->use_dma_tx;
 		atmel_port->use_dma_rx  = false;
-		atmel_port->use_dma_tx  = false;
+		atmel_port->use_pdc_rx  = false;
 	}
 
+	if (of_property_read_bool(np, "atmel,use-dma-tx")) {
+		if (of_property_read_bool(np, "dmas")) {
+			atmel_port->use_dma_tx  = true;
+			atmel_port->use_pdc_tx  = false;
+		} else {
+			atmel_port->use_dma_tx  = false;
+			atmel_port->use_pdc_tx  = true;
+		}
+	} else {
+		atmel_port->use_dma_tx  = false;
+		atmel_port->use_pdc_tx  = false;
+	}
 }
 
 static void atmel_init_rs485(struct uart_port *port,
 				struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
-	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
 
-	if (np) {
-		struct serial_rs485 *rs485conf = &port->rs485;
-		u32 rs485_delay[2];
-		/* rs485 properties */
-		if (of_property_read_u32_array(np, "rs485-rts-delay",
-					rs485_delay, 2) == 0) {
-			rs485conf->delay_rts_before_send = rs485_delay[0];
-			rs485conf->delay_rts_after_send = rs485_delay[1];
-			rs485conf->flags = 0;
-		}
+	struct serial_rs485 *rs485conf = &port->rs485;
+	u32 rs485_delay[2];
 
-		if (of_get_property(np, "rs485-rx-during-tx", NULL))
-			rs485conf->flags |= SER_RS485_RX_DURING_TX;
-
-		if (of_get_property(np, "linux,rs485-enabled-at-boot-time",
-								NULL))
-			rs485conf->flags |= SER_RS485_ENABLED;
-	} else {
-		port->rs485       = pdata->rs485;
+	/* rs485 properties */
+	if (of_property_read_u32_array(np, "rs485-rts-delay",
+				       rs485_delay, 2) == 0) {
+		rs485conf->delay_rts_before_send = rs485_delay[0];
+		rs485conf->delay_rts_after_send = rs485_delay[1];
+		rs485conf->flags = 0;
 	}
 
+	if (of_get_property(np, "rs485-rx-during-tx", NULL))
+		rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
+	if (of_get_property(np, "linux,rs485-enabled-at-boot-time", NULL))
+		rs485conf->flags |= SER_RS485_ENABLED;
 }
 
 static void atmel_set_ops(struct uart_port *port)
@@ -2402,7 +2369,6 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
 {
 	int ret;
 	struct uart_port *port = &atmel_port->uart;
-	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
 
 	atmel_init_property(atmel_port, pdev);
 	atmel_set_ops(port);
@@ -2410,24 +2376,17 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
 	atmel_init_rs485(port, pdev);
 
 	port->iotype		= UPIO_MEM;
-	port->flags		= UPF_BOOT_AUTOCONF;
+	port->flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP;
 	port->ops		= &atmel_pops;
 	port->fifosize		= 1;
 	port->dev		= &pdev->dev;
 	port->mapbase	= pdev->resource[0].start;
 	port->irq	= pdev->resource[1].start;
 	port->rs485_config	= atmel_config_rs485;
+	port->membase	= NULL;
 
 	memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
 
-	if (pdata && pdata->regs) {
-		/* Already mapped by setup code */
-		port->membase = pdata->regs;
-	} else {
-		port->flags	|= UPF_IOREMAP;
-		port->membase	= NULL;
-	}
-
 	/* for console, the clock could already be configured */
 	if (!atmel_port->clk) {
 		atmel_port->clk = clk_get(&pdev->dev, "usart");
@@ -2460,8 +2419,6 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
 	return 0;
 }
 
-struct platform_device *atmel_default_console_device;	/* the serial console device */
-
 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
 static void atmel_console_putchar(struct uart_port *port, int ch)
 {
@@ -2594,47 +2551,6 @@ static struct console atmel_console = {
 
 #define ATMEL_CONSOLE_DEVICE	(&atmel_console)
 
-/*
- * Early console initialization (before VM subsystem initialized).
- */
-static int __init atmel_console_init(void)
-{
-	int ret;
-	if (atmel_default_console_device) {
-		struct atmel_uart_data *pdata =
-			dev_get_platdata(&atmel_default_console_device->dev);
-		int id = pdata->num;
-		struct atmel_uart_port *atmel_port = &atmel_ports[id];
-
-		atmel_port->backup_imr = 0;
-		atmel_port->uart.line = id;
-
-		add_preferred_console(ATMEL_DEVICENAME, id, NULL);
-		ret = atmel_init_port(atmel_port, atmel_default_console_device);
-		if (ret)
-			return ret;
-		register_console(&atmel_console);
-	}
-
-	return 0;
-}
-
-console_initcall(atmel_console_init);
-
-/*
- * Late console initialization.
- */
-static int __init atmel_late_console_init(void)
-{
-	if (atmel_default_console_device
-	    && !(atmel_console.flags & CON_ENABLED))
-		register_console(&atmel_console);
-
-	return 0;
-}
-
-core_initcall(atmel_late_console_init);
-
 static inline bool atmel_is_console_port(struct uart_port *port)
 {
 	return port->cons && port->cons->index == port->line;
@@ -2804,19 +2720,13 @@ static int atmel_serial_probe(struct platform_device *pdev)
 {
 	struct atmel_uart_port *atmel_port;
 	struct device_node *np = pdev->dev.of_node;
-	struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
 	void *data;
 	int ret = -ENODEV;
 	bool rs485_enabled;
 
 	BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
 
-	if (np)
-		ret = of_alias_get_id(np, "serial");
-	else
-		if (pdata)
-			ret = pdata->num;
-
+	ret = of_alias_get_id(np, "serial");
 	if (ret < 0)
 		/* port id not found in platform data nor device-tree aliases:
 		 * auto-enumerate it */
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 15df1ba7..343de8c 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -140,6 +140,8 @@
 #define UARTBAUD_SBNS		0x00002000
 #define UARTBAUD_SBR		0x00000000
 #define UARTBAUD_SBR_MASK	0x1fff
+#define UARTBAUD_OSR_MASK       0x1f
+#define UARTBAUD_OSR_SHIFT      24
 
 #define UARTSTAT_LBKDIF		0x80000000
 #define UARTSTAT_RXEDGIF	0x40000000
@@ -231,12 +233,14 @@
 #define DEV_NAME	"ttyLP"
 #define UART_NR		6
 
+/* IMX lpuart has four extra unused regs located at the beginning */
+#define IMX_REG_OFF	0x10
+
 struct lpuart_port {
 	struct uart_port	port;
 	struct clk		*clk;
 	unsigned int		txfifo_size;
 	unsigned int		rxfifo_size;
-	bool			lpuart32;
 
 	bool			lpuart_dma_tx_use;
 	bool			lpuart_dma_rx_use;
@@ -258,13 +262,28 @@ struct lpuart_port {
 	wait_queue_head_t	dma_wait;
 };
 
+struct lpuart_soc_data {
+	char	iotype;
+	u8	reg_off;
+};
+
+static const struct lpuart_soc_data vf_data = {
+	.iotype = UPIO_MEM,
+};
+
+static const struct lpuart_soc_data ls_data = {
+	.iotype = UPIO_MEM32BE,
+};
+
+static struct lpuart_soc_data imx_data = {
+	.iotype = UPIO_MEM32,
+	.reg_off = IMX_REG_OFF,
+};
+
 static const struct of_device_id lpuart_dt_ids[] = {
-	{
-		.compatible = "fsl,vf610-lpuart",
-	},
-	{
-		.compatible = "fsl,ls1021a-lpuart",
-	},
+	{ .compatible = "fsl,vf610-lpuart",	.data = &vf_data, },
+	{ .compatible = "fsl,ls1021a-lpuart",	.data = &ls_data, },
+	{ .compatible = "fsl,imx7ulp-lpuart",	.data = &imx_data, },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
@@ -272,14 +291,29 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
 /* Forward declare this for the dma callbacks*/
 static void lpuart_dma_tx_complete(void *arg);
 
-static u32 lpuart32_read(void __iomem *addr)
+static inline u32 lpuart32_read(struct uart_port *port, u32 off)
 {
-	return ioread32be(addr);
+	switch (port->iotype) {
+	case UPIO_MEM32:
+		return readl(port->membase + off);
+	case UPIO_MEM32BE:
+		return ioread32be(port->membase + off);
+	default:
+		return 0;
+	}
 }
 
-static void lpuart32_write(u32 val, void __iomem *addr)
+static inline void lpuart32_write(struct uart_port *port, u32 val,
+				  u32 off)
 {
-	iowrite32be(val, addr);
+	switch (port->iotype) {
+	case UPIO_MEM32:
+		writel(val, port->membase + off);
+		break;
+	case UPIO_MEM32BE:
+		iowrite32be(val, port->membase + off);
+		break;
+	}
 }
 
 static void lpuart_stop_tx(struct uart_port *port)
@@ -295,9 +329,9 @@ static void lpuart32_stop_tx(struct uart_port *port)
 {
 	unsigned long temp;
 
-	temp = lpuart32_read(port->membase + UARTCTRL);
+	temp = lpuart32_read(port, UARTCTRL);
 	temp &= ~(UARTCTRL_TIE | UARTCTRL_TCIE);
-	lpuart32_write(temp, port->membase + UARTCTRL);
+	lpuart32_write(port, temp, UARTCTRL);
 }
 
 static void lpuart_stop_rx(struct uart_port *port)
@@ -312,8 +346,8 @@ static void lpuart32_stop_rx(struct uart_port *port)
 {
 	unsigned long temp;
 
-	temp = lpuart32_read(port->membase + UARTCTRL);
-	lpuart32_write(temp & ~UARTCTRL_RE, port->membase + UARTCTRL);
+	temp = lpuart32_read(port, UARTCTRL);
+	lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL);
 }
 
 static void lpuart_dma_tx(struct lpuart_port *sport)
@@ -512,14 +546,14 @@ static inline void lpuart32_transmit_buffer(struct lpuart_port *sport)
 	struct circ_buf *xmit = &sport->port.state->xmit;
 	unsigned long txcnt;
 
-	txcnt = lpuart32_read(sport->port.membase + UARTWATER);
+	txcnt = lpuart32_read(&sport->port, UARTWATER);
 	txcnt = txcnt >> UARTWATER_TXCNT_OFF;
 	txcnt &= UARTWATER_COUNT_MASK;
 	while (!uart_circ_empty(xmit) && (txcnt < sport->txfifo_size)) {
-		lpuart32_write(xmit->buf[xmit->tail], sport->port.membase + UARTDATA);
+		lpuart32_write(&sport->port, xmit->buf[xmit->tail], UARTDATA);
 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
 		sport->port.icount.tx++;
-		txcnt = lpuart32_read(sport->port.membase + UARTWATER);
+		txcnt = lpuart32_read(&sport->port, UARTWATER);
 		txcnt = txcnt >> UARTWATER_TXCNT_OFF;
 		txcnt &= UARTWATER_COUNT_MASK;
 	}
@@ -555,10 +589,10 @@ static void lpuart32_start_tx(struct uart_port *port)
 	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
 	unsigned long temp;
 
-	temp = lpuart32_read(port->membase + UARTCTRL);
-	lpuart32_write(temp | UARTCTRL_TIE, port->membase + UARTCTRL);
+	temp = lpuart32_read(port, UARTCTRL);
+	lpuart32_write(port, temp | UARTCTRL_TIE, UARTCTRL);
 
-	if (lpuart32_read(port->membase + UARTSTAT) & UARTSTAT_TDRE)
+	if (lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE)
 		lpuart32_transmit_buffer(sport);
 }
 
@@ -581,7 +615,7 @@ static unsigned int lpuart_tx_empty(struct uart_port *port)
 
 static unsigned int lpuart32_tx_empty(struct uart_port *port)
 {
-	return (lpuart32_read(port->membase + UARTSTAT) & UARTSTAT_TC) ?
+	return (lpuart32_read(port, UARTSTAT) & UARTSTAT_TC) ?
 		TIOCSER_TEMT : 0;
 }
 
@@ -593,22 +627,22 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id)
 
 	spin_lock_irqsave(&sport->port.lock, flags);
 	if (sport->port.x_char) {
-		if (sport->lpuart32)
-			lpuart32_write(sport->port.x_char, sport->port.membase + UARTDATA);
+		if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
+			lpuart32_write(&sport->port, sport->port.x_char, UARTDATA);
 		else
 			writeb(sport->port.x_char, sport->port.membase + UARTDR);
 		goto out;
 	}
 
 	if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
-		if (sport->lpuart32)
+		if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
 			lpuart32_stop_tx(&sport->port);
 		else
 			lpuart_stop_tx(&sport->port);
 		goto out;
 	}
 
-	if (sport->lpuart32)
+	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
 		lpuart32_transmit_buffer(sport);
 	else
 		lpuart_transmit_buffer(sport);
@@ -694,15 +728,15 @@ static irqreturn_t lpuart32_rxint(int irq, void *dev_id)
 
 	spin_lock_irqsave(&sport->port.lock, flags);
 
-	while (!(lpuart32_read(sport->port.membase + UARTFIFO) & UARTFIFO_RXEMPT)) {
+	while (!(lpuart32_read(&sport->port, UARTFIFO) & UARTFIFO_RXEMPT)) {
 		flg = TTY_NORMAL;
 		sport->port.icount.rx++;
 		/*
 		 * to clear the FE, OR, NF, FE, PE flags,
 		 * read STAT then read DATA reg
 		 */
-		sr = lpuart32_read(sport->port.membase + UARTSTAT);
-		rx = lpuart32_read(sport->port.membase + UARTDATA);
+		sr = lpuart32_read(&sport->port, UARTSTAT);
+		rx = lpuart32_read(&sport->port, UARTDATA);
 		rx &= 0x3ff;
 
 		if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
@@ -769,18 +803,18 @@ static irqreturn_t lpuart32_int(int irq, void *dev_id)
 	struct lpuart_port *sport = dev_id;
 	unsigned long sts, rxcount;
 
-	sts = lpuart32_read(sport->port.membase + UARTSTAT);
-	rxcount = lpuart32_read(sport->port.membase + UARTWATER);
+	sts = lpuart32_read(&sport->port, UARTSTAT);
+	rxcount = lpuart32_read(&sport->port, UARTWATER);
 	rxcount = rxcount >> UARTWATER_RXCNT_OFF;
 
 	if (sts & UARTSTAT_RDRF || rxcount > 0)
 		lpuart32_rxint(irq, dev_id);
 
 	if ((sts & UARTSTAT_TDRE) &&
-		!(lpuart32_read(sport->port.membase + UARTBAUD) & UARTBAUD_TDMAE))
+		!(lpuart32_read(&sport->port, UARTBAUD) & UARTBAUD_TDMAE))
 		lpuart_txint(irq, dev_id);
 
-	lpuart32_write(sts, sport->port.membase + UARTSTAT);
+	lpuart32_write(&sport->port, sts, UARTSTAT);
 	return IRQ_HANDLED;
 }
 
@@ -1041,7 +1075,7 @@ static unsigned int lpuart32_get_mctrl(struct uart_port *port)
 	unsigned int temp = 0;
 	unsigned long reg;
 
-	reg = lpuart32_read(port->membase + UARTMODIR);
+	reg = lpuart32_read(port, UARTMODIR);
 	if (reg & UARTMODIR_TXCTSE)
 		temp |= TIOCM_CTS;
 
@@ -1076,7 +1110,7 @@ static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
 	unsigned long temp;
 
-	temp = lpuart32_read(port->membase + UARTMODIR) &
+	temp = lpuart32_read(port, UARTMODIR) &
 			~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
 
 	if (mctrl & TIOCM_RTS)
@@ -1085,7 +1119,7 @@ static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
 	if (mctrl & TIOCM_CTS)
 		temp |= UARTMODIR_TXCTSE;
 
-	lpuart32_write(temp, port->membase + UARTMODIR);
+	lpuart32_write(port, temp, UARTMODIR);
 }
 
 static void lpuart_break_ctl(struct uart_port *port, int break_state)
@@ -1104,12 +1138,12 @@ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
 {
 	unsigned long temp;
 
-	temp = lpuart32_read(port->membase + UARTCTRL) & ~UARTCTRL_SBK;
+	temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
 
 	if (break_state != 0)
 		temp |= UARTCTRL_SBK;
 
-	lpuart32_write(temp, port->membase + UARTCTRL);
+	lpuart32_write(port, temp, UARTCTRL);
 }
 
 static void lpuart_setup_watermark(struct lpuart_port *sport)
@@ -1149,24 +1183,24 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
 	unsigned long val, ctrl;
 	unsigned long ctrl_saved;
 
-	ctrl = lpuart32_read(sport->port.membase + UARTCTRL);
+	ctrl = lpuart32_read(&sport->port, UARTCTRL);
 	ctrl_saved = ctrl;
 	ctrl &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_TE |
 			UARTCTRL_RIE | UARTCTRL_RE);
-	lpuart32_write(ctrl, sport->port.membase + UARTCTRL);
+	lpuart32_write(&sport->port, ctrl, UARTCTRL);
 
 	/* enable FIFO mode */
-	val = lpuart32_read(sport->port.membase + UARTFIFO);
+	val = lpuart32_read(&sport->port, UARTFIFO);
 	val |= UARTFIFO_TXFE | UARTFIFO_RXFE;
 	val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
-	lpuart32_write(val, sport->port.membase + UARTFIFO);
+	lpuart32_write(&sport->port, val, UARTFIFO);
 
 	/* set the watermark */
 	val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF);
-	lpuart32_write(val, sport->port.membase + UARTWATER);
+	lpuart32_write(&sport->port, val, UARTWATER);
 
 	/* Restore cr2 */
-	lpuart32_write(ctrl_saved, sport->port.membase + UARTCTRL);
+	lpuart32_write(&sport->port, ctrl_saved, UARTCTRL);
 }
 
 static void rx_dma_timer_init(struct lpuart_port *sport)
@@ -1242,7 +1276,7 @@ static int lpuart32_startup(struct uart_port *port)
 	unsigned long temp;
 
 	/* determine FIFO size */
-	temp = lpuart32_read(sport->port.membase + UARTFIFO);
+	temp = lpuart32_read(&sport->port, UARTFIFO);
 
 	sport->txfifo_size = 0x1 << (((temp >> UARTFIFO_TXSIZE_OFF) &
 		UARTFIFO_FIFOSIZE_MASK) - 1);
@@ -1259,10 +1293,10 @@ static int lpuart32_startup(struct uart_port *port)
 
 	lpuart32_setup_watermark(sport);
 
-	temp = lpuart32_read(sport->port.membase + UARTCTRL);
+	temp = lpuart32_read(&sport->port, UARTCTRL);
 	temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE | UARTCTRL_TE);
 	temp |= UARTCTRL_ILIE;
-	lpuart32_write(temp, sport->port.membase + UARTCTRL);
+	lpuart32_write(&sport->port, temp, UARTCTRL);
 
 	spin_unlock_irqrestore(&sport->port.lock, flags);
 	return 0;
@@ -1311,10 +1345,10 @@ static void lpuart32_shutdown(struct uart_port *port)
 	spin_lock_irqsave(&port->lock, flags);
 
 	/* disable Rx/Tx and interrupts */
-	temp = lpuart32_read(port->membase + UARTCTRL);
+	temp = lpuart32_read(port, UARTCTRL);
 	temp &= ~(UARTCTRL_TE | UARTCTRL_RE |
 			UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
-	lpuart32_write(temp, port->membase + UARTCTRL);
+	lpuart32_write(port, temp, UARTCTRL);
 
 	spin_unlock_irqrestore(&port->lock, flags);
 
@@ -1479,6 +1513,75 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
 }
 
 static void
+lpuart32_serial_setbrg(struct lpuart_port *sport, unsigned int baudrate)
+{
+	u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp;
+	u32 clk = sport->port.uartclk;
+
+	/*
+	 * The idea is to use the best OSR (over-sampling rate) possible.
+	 * Note, OSR is typically hard-set to 16 in other LPUART instantiations.
+	 * Loop to find the best OSR value possible, one that generates minimum
+	 * baud_diff iterate through the rest of the supported values of OSR.
+	 *
+	 * Calculation Formula:
+	 *  Baud Rate = baud clock / ((OSR+1) × SBR)
+	 */
+	baud_diff = baudrate;
+	osr = 0;
+	sbr = 0;
+
+	for (tmp_osr = 4; tmp_osr <= 32; tmp_osr++) {
+		/* calculate the temporary sbr value  */
+		tmp_sbr = (clk / (baudrate * tmp_osr));
+		if (tmp_sbr == 0)
+			tmp_sbr = 1;
+
+		/*
+		 * calculate the baud rate difference based on the temporary
+		 * osr and sbr values
+		 */
+		tmp_diff = clk / (tmp_osr * tmp_sbr) - baudrate;
+
+		/* select best values between sbr and sbr+1 */
+		tmp = clk / (tmp_osr * (tmp_sbr + 1));
+		if (tmp_diff > (baudrate - tmp)) {
+			tmp_diff = baudrate - tmp;
+			tmp_sbr++;
+		}
+
+		if (tmp_diff <= baud_diff) {
+			baud_diff = tmp_diff;
+			osr = tmp_osr;
+			sbr = tmp_sbr;
+
+			if (!baud_diff)
+				break;
+		}
+	}
+
+	/* handle buadrate outside acceptable rate */
+	if (baud_diff > ((baudrate / 100) * 3))
+		dev_warn(sport->port.dev,
+			 "unacceptable baud rate difference of more than 3%%\n");
+
+	tmp = lpuart32_read(&sport->port, UARTBAUD);
+
+	if ((osr > 3) && (osr < 8))
+		tmp |= UARTBAUD_BOTHEDGE;
+
+	tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT);
+	tmp |= (((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT);
+
+	tmp &= ~UARTBAUD_SBR_MASK;
+	tmp |= sbr & UARTBAUD_SBR_MASK;
+
+	tmp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE);
+
+	lpuart32_write(&sport->port, tmp, UARTBAUD);
+}
+
+static void
 lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
 		   struct ktermios *old)
 {
@@ -1487,11 +1590,10 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
 	unsigned long ctrl, old_ctrl, bd, modem;
 	unsigned int  baud;
 	unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
-	unsigned int sbr;
 
-	ctrl = old_ctrl = lpuart32_read(sport->port.membase + UARTCTRL);
-	bd = lpuart32_read(sport->port.membase + UARTBAUD);
-	modem = lpuart32_read(sport->port.membase + UARTMODIR);
+	ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
+	bd = lpuart32_read(&sport->port, UARTBAUD);
+	modem = lpuart32_read(&sport->port, UARTMODIR);
 	/*
 	 * only support CS8 and CS7, and for CS7 must enable PE.
 	 * supported mode:
@@ -1577,21 +1679,16 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
 	uart_update_timeout(port, termios->c_cflag, baud);
 
 	/* wait transmit engin complete */
-	while (!(lpuart32_read(sport->port.membase + UARTSTAT) & UARTSTAT_TC))
+	while (!(lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_TC))
 		barrier();
 
 	/* disable transmit and receive */
-	lpuart32_write(old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
-			sport->port.membase + UARTCTRL);
+	lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
+		       UARTCTRL);
 
-	sbr = sport->port.uartclk / (16 * baud);
-	bd &= ~UARTBAUD_SBR_MASK;
-	bd |= sbr & UARTBAUD_SBR_MASK;
-	bd |= UARTBAUD_BOTHEDGE;
-	bd &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE);
-	lpuart32_write(bd, sport->port.membase + UARTBAUD);
-	lpuart32_write(modem, sport->port.membase + UARTMODIR);
-	lpuart32_write(ctrl, sport->port.membase + UARTCTRL);
+	lpuart32_serial_setbrg(sport, baud);
+	lpuart32_write(&sport->port, modem, UARTMODIR);
+	lpuart32_write(&sport->port, ctrl, UARTCTRL);
 	/* restore control register */
 
 	spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1694,10 +1791,10 @@ static void lpuart_console_putchar(struct uart_port *port, int ch)
 
 static void lpuart32_console_putchar(struct uart_port *port, int ch)
 {
-	while (!(lpuart32_read(port->membase + UARTSTAT) & UARTSTAT_TDRE))
+	while (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE))
 		barrier();
 
-	lpuart32_write(ch, port->membase + UARTDATA);
+	lpuart32_write(port, ch, UARTDATA);
 }
 
 static void
@@ -1745,18 +1842,18 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
 		spin_lock_irqsave(&sport->port.lock, flags);
 
 	/* first save CR2 and then disable interrupts */
-	cr = old_cr = lpuart32_read(sport->port.membase + UARTCTRL);
+	cr = old_cr = lpuart32_read(&sport->port, UARTCTRL);
 	cr |= (UARTCTRL_TE |  UARTCTRL_RE);
 	cr &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
-	lpuart32_write(cr, sport->port.membase + UARTCTRL);
+	lpuart32_write(&sport->port, cr, UARTCTRL);
 
 	uart_console_write(&sport->port, s, count, lpuart32_console_putchar);
 
 	/* wait for transmitter finish complete and restore CR2 */
-	while (!(lpuart32_read(sport->port.membase + UARTSTAT) & UARTSTAT_TC))
+	while (!(lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_TC))
 		barrier();
 
-	lpuart32_write(old_cr, sport->port.membase + UARTCTRL);
+	lpuart32_write(&sport->port, old_cr, UARTCTRL);
 
 	if (locked)
 		spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1822,14 +1919,14 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
 	unsigned long cr, bd;
 	unsigned int sbr, uartclk, baud_raw;
 
-	cr = lpuart32_read(sport->port.membase + UARTCTRL);
+	cr = lpuart32_read(&sport->port, UARTCTRL);
 	cr &= UARTCTRL_TE | UARTCTRL_RE;
 	if (!cr)
 		return;
 
 	/* ok, the port was enabled */
 
-	cr = lpuart32_read(sport->port.membase + UARTCTRL);
+	cr = lpuart32_read(&sport->port, UARTCTRL);
 
 	*parity = 'n';
 	if (cr & UARTCTRL_PE) {
@@ -1844,7 +1941,7 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
 	else
 		*bits = 8;
 
-	bd = lpuart32_read(sport->port.membase + UARTBAUD);
+	bd = lpuart32_read(&sport->port, UARTBAUD);
 	bd &= UARTBAUD_SBR_MASK;
 	sbr = bd;
 	uartclk = clk_get_rate(sport->clk);
@@ -1881,12 +1978,12 @@ static int __init lpuart_console_setup(struct console *co, char *options)
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
 	else
-		if (sport->lpuart32)
+		if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
 			lpuart32_console_get_options(sport, &baud, &parity, &bits);
 		else
 			lpuart_console_get_options(sport, &baud, &parity, &bits);
 
-	if (sport->lpuart32)
+	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
 		lpuart32_setup_watermark(sport);
 	else
 		lpuart_setup_watermark(sport);
@@ -1945,12 +2042,26 @@ static int __init lpuart32_early_console_setup(struct earlycon_device *device,
 	if (!device->port.membase)
 		return -ENODEV;
 
+	device->port.iotype = UPIO_MEM32BE;
 	device->con->write = lpuart32_early_write;
 	return 0;
 }
 
+static int __init lpuart32_imx_early_console_setup(struct earlycon_device *device,
+						   const char *opt)
+{
+	if (!device->port.membase)
+		return -ENODEV;
+
+	device->port.iotype = UPIO_MEM32;
+	device->port.membase += IMX_REG_OFF;
+	device->con->write = lpuart32_early_write;
+
+	return 0;
+}
 OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
 OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
 EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
 EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
 
@@ -1971,6 +2082,9 @@ static struct uart_driver lpuart_reg = {
 
 static int lpuart_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *of_id = of_match_device(lpuart_dt_ids,
+							   &pdev->dev);
+	const struct lpuart_soc_data *sdata = of_id->data;
 	struct device_node *np = pdev->dev.of_node;
 	struct lpuart_port *sport;
 	struct resource *res;
@@ -1988,25 +2102,23 @@ static int lpuart_probe(struct platform_device *pdev)
 		return ret;
 	}
 	sport->port.line = ret;
-	sport->lpuart32 = of_device_is_compatible(np, "fsl,ls1021a-lpuart");
-
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(sport->port.membase))
 		return PTR_ERR(sport->port.membase);
 
+	sport->port.membase += sdata->reg_off;
 	sport->port.mapbase = res->start;
 	sport->port.dev = &pdev->dev;
 	sport->port.type = PORT_LPUART;
-	sport->port.iotype = UPIO_MEM;
 	ret = platform_get_irq(pdev, 0);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "cannot obtain irq\n");
 		return ret;
 	}
 	sport->port.irq = ret;
-
-	if (sport->lpuart32)
+	sport->port.iotype = sdata->iotype;
+	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
 		sport->port.ops = &lpuart32_pops;
 	else
 		sport->port.ops = &lpuart_pops;
@@ -2033,7 +2145,7 @@ static int lpuart_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, &sport->port);
 
-	if (sport->lpuart32)
+	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE))
 		lpuart_reg.cons = LPUART32_CONSOLE;
 	else
 		lpuart_reg.cons = LPUART_CONSOLE;
@@ -2086,11 +2198,11 @@ static int lpuart_suspend(struct device *dev)
 	struct lpuart_port *sport = dev_get_drvdata(dev);
 	unsigned long temp;
 
-	if (sport->lpuart32) {
+	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) {
 		/* disable Rx/Tx and interrupts */
-		temp = lpuart32_read(sport->port.membase + UARTCTRL);
+		temp = lpuart32_read(&sport->port, UARTCTRL);
 		temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
-		lpuart32_write(temp, sport->port.membase + UARTCTRL);
+		lpuart32_write(&sport->port, temp, UARTCTRL);
 	} else {
 		/* disable Rx/Tx and interrupts */
 		temp = readb(sport->port.membase + UARTCR2);
@@ -2137,12 +2249,12 @@ static int lpuart_resume(struct device *dev)
 	if (sport->port.suspended && !sport->port.irq_wake)
 		clk_prepare_enable(sport->clk);
 
-	if (sport->lpuart32) {
+	if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) {
 		lpuart32_setup_watermark(sport);
-		temp = lpuart32_read(sport->port.membase + UARTCTRL);
+		temp = lpuart32_read(&sport->port, UARTCTRL);
 		temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE |
 			 UARTCTRL_TE | UARTCTRL_ILIE);
-		lpuart32_write(temp, sport->port.membase + UARTCTRL);
+		lpuart32_write(&sport->port, temp, UARTCTRL);
 	} else {
 		lpuart_setup_watermark(sport);
 		temp = readb(sport->port.membase + UARTCR2);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index bbefddd..9e3162b 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -186,6 +186,11 @@
 
 #define UART_NR 8
 
+/* RX DMA buffer periods */
+#define RX_DMA_PERIODS 4
+#define RX_BUF_SIZE	(PAGE_SIZE)
+
+
 /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
 enum imx_uart_type {
 	IMX1_UART,
@@ -207,9 +212,6 @@ struct imx_port {
 	unsigned int		have_rtscts:1;
 	unsigned int		have_rtsgpio:1;
 	unsigned int		dte_mode:1;
-	unsigned int		irda_inv_rx:1;
-	unsigned int		irda_inv_tx:1;
-	unsigned short		trcv_delay; /* transceiver delay */
 	struct clk		*clk_ipg;
 	struct clk		*clk_per;
 	const struct imx_uart_data *devdata;
@@ -224,6 +226,7 @@ struct imx_port {
 	struct dma_chan		*dma_chan_rx, *dma_chan_tx;
 	struct scatterlist	rx_sgl, tx_sgl[2];
 	void			*rx_buf;
+	unsigned int		rx_buf_size;
 	struct circ_buf		rx_ring;
 	unsigned int		rx_periods;
 	dma_cookie_t		rx_cookie;
@@ -964,8 +967,6 @@ static void imx_timeout(unsigned long data)
 	}
 }
 
-#define RX_BUF_SIZE	(PAGE_SIZE)
-
 /*
  * There are two kinds of RX DMA interrupts(such as in the MX6Q):
  *   [1] the RX DMA buffer is full.
@@ -1048,9 +1049,6 @@ static void dma_rx_callback(void *data)
 	}
 }
 
-/* RX DMA buffer periods */
-#define RX_DMA_PERIODS 4
-
 static int start_rx_dma(struct imx_port *sport)
 {
 	struct scatterlist *sgl = &sport->rx_sgl;
@@ -1061,9 +1059,8 @@ static int start_rx_dma(struct imx_port *sport)
 
 	sport->rx_ring.head = 0;
 	sport->rx_ring.tail = 0;
-	sport->rx_periods = RX_DMA_PERIODS;
 
-	sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
+	sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size);
 	ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
 	if (ret == 0) {
 		dev_err(dev, "DMA mapping error for RX.\n");
@@ -1174,7 +1171,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
 		goto err;
 	}
 
-	sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL);
 	if (!sport->rx_buf) {
 		ret = -ENOMEM;
 		goto err;
@@ -1302,7 +1299,9 @@ static int imx_startup(struct uart_port *port)
 		imx_enable_dma(sport);
 
 	temp = readl(sport->port.membase + UCR1);
-	temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
+	temp |= UCR1_RRDYEN | UCR1_UARTEN;
+	if (sport->have_rtscts)
+			temp |= UCR1_RTSDEN;
 
 	writel(temp, sport->port.membase + UCR1);
 
@@ -1340,29 +1339,13 @@ static int imx_startup(struct uart_port *port)
 	imx_enable_ms(&sport->port);
 
 	/*
-	 * If the serial port is opened for reading start RX DMA immediately
-	 * instead of waiting for RX FIFO interrupts. In our iMX53 the average
-	 * delay for the first reception dropped from approximately 35000
-	 * microseconds to 1000 microseconds.
+	 * Start RX DMA immediately instead of waiting for RX FIFO interrupts.
+	 * In our iMX53 the average delay for the first reception dropped from
+	 * approximately 35000 microseconds to 1000 microseconds.
 	 */
 	if (sport->dma_is_enabled) {
-		struct tty_struct *tty = sport->port.state->port.tty;
-		struct tty_file_private *file_priv;
-		int readcnt = 0;
-
-		spin_lock(&tty->files_lock);
-
-		if (!list_empty(&tty->tty_files))
-			list_for_each_entry(file_priv, &tty->tty_files, list)
-				if (!(file_priv->file->f_flags & O_WRONLY))
-					readcnt++;
-
-		spin_unlock(&tty->files_lock);
-
-		if (readcnt > 0) {
-			imx_disable_rx_int(sport);
-			start_rx_dma(sport);
-		}
+		imx_disable_rx_int(sport);
+		start_rx_dma(sport);
 	}
 
 	spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -2053,6 +2036,7 @@ static int serial_imx_probe_dt(struct imx_port *sport,
 {
 	struct device_node *np = pdev->dev.of_node;
 	int ret;
+	u32 dma_buf_size[2];
 
 	sport->devdata = of_device_get_match_data(&pdev->dev);
 	if (!sport->devdata)
@@ -2076,6 +2060,14 @@ static int serial_imx_probe_dt(struct imx_port *sport,
 	if (of_get_property(np, "rts-gpios", NULL))
 		sport->have_rtsgpio = 1;
 
+	if (!of_property_read_u32_array(np, "fsl,dma-size", dma_buf_size, 2)) {
+		sport->rx_buf_size = dma_buf_size[0] * dma_buf_size[1];
+		sport->rx_periods = dma_buf_size[1];
+	} else {
+		sport->rx_buf_size = RX_BUF_SIZE;
+		sport->rx_periods = RX_DMA_PERIODS;
+	}
+
 	return 0;
 }
 #else
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 60f1679..42e4a4c 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -286,7 +286,7 @@ static int meson_uart_startup(struct uart_port *port)
 	writel(val, port->membase + AML_UART_MISC);
 
 	ret = request_irq(port->irq, meson_uart_interrupt, 0,
-			  meson_uart_type(port), port);
+			  port->name, port);
 
 	return ret;
 }
@@ -298,8 +298,6 @@ static void meson_uart_change_speed(struct uart_port *port, unsigned long baud)
 	while (!meson_uart_tx_empty(port))
 		cpu_relax();
 
-	val = readl(port->membase + AML_UART_REG5);
-	val &= ~AML_UART_BAUD_MASK;
 	if (port->uartclk == 24000000) {
 		val = ((port->uartclk / 3) / baud) - 1;
 		val |= AML_UART_BAUD_XTAL;
@@ -355,7 +353,7 @@ static void meson_uart_set_termios(struct uart_port *port,
 	if (cflags & CSTOPB)
 		val |= AML_UART_STOP_BIN_2SB;
 	else
-		val &= ~AML_UART_STOP_BIN_1SB;
+		val |= AML_UART_STOP_BIN_1SB;
 
 	if (cflags & CRTSCTS)
 		val &= ~AML_UART_TWO_WIRE_EN;
@@ -395,51 +393,25 @@ static int meson_uart_verify_port(struct uart_port *port,
 	return ret;
 }
 
-static int meson_uart_res_size(struct uart_port *port)
-{
-	struct platform_device *pdev = to_platform_device(port->dev);
-	struct resource *res;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(port->dev, "cannot obtain I/O memory region");
-		return -ENODEV;
-	}
-
-	return resource_size(res);
-}
-
 static void meson_uart_release_port(struct uart_port *port)
 {
-	int size = meson_uart_res_size(port);
-
-	if (port->flags & UPF_IOREMAP) {
-		devm_release_mem_region(port->dev, port->mapbase, size);
-		devm_iounmap(port->dev, port->membase);
-		port->membase = NULL;
-	}
+	devm_iounmap(port->dev, port->membase);
+	port->membase = NULL;
+	devm_release_mem_region(port->dev, port->mapbase, port->mapsize);
 }
 
 static int meson_uart_request_port(struct uart_port *port)
 {
-	int size = meson_uart_res_size(port);
-
-	if (size < 0)
-		return size;
-
-	if (!devm_request_mem_region(port->dev, port->mapbase, size,
+	if (!devm_request_mem_region(port->dev, port->mapbase, port->mapsize,
 				     dev_name(port->dev))) {
 		dev_err(port->dev, "Memory region busy\n");
 		return -EBUSY;
 	}
 
-	if (port->flags & UPF_IOREMAP) {
-		port->membase = devm_ioremap_nocache(port->dev,
-						     port->mapbase,
-						     size);
-		if (port->membase == NULL)
-			return -ENOMEM;
-	}
+	port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+					     port->mapsize);
+	if (!port->membase)
+		return -ENOMEM;
 
 	return 0;
 }
@@ -470,6 +442,14 @@ static struct uart_ops meson_uart_ops = {
 };
 
 #ifdef CONFIG_SERIAL_MESON_CONSOLE
+static void meson_uart_enable_tx_engine(struct uart_port *port)
+{
+	u32 val;
+
+	val = readl(port->membase + AML_UART_CONTROL);
+	val |= AML_UART_TX_EN;
+	writel(val, port->membase + AML_UART_CONTROL);
+}
 
 static void meson_console_putchar(struct uart_port *port, int ch)
 {
@@ -499,7 +479,6 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
 	}
 
 	val = readl(port->membase + AML_UART_CONTROL);
-	val |= AML_UART_TX_EN;
 	tmp = val & ~(AML_UART_TX_INT_EN | AML_UART_RX_INT_EN);
 	writel(tmp, port->membase + AML_UART_CONTROL);
 
@@ -538,6 +517,8 @@ static int meson_serial_console_setup(struct console *co, char *options)
 	if (!port || !port->membase)
 		return -ENODEV;
 
+	meson_uart_enable_tx_engine(port);
+
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
 
@@ -576,11 +557,16 @@ meson_serial_early_console_setup(struct earlycon_device *device, const char *opt
 	if (!device->port.membase)
 		return -ENODEV;
 
+	meson_uart_enable_tx_engine(&device->port);
 	device->con->write = meson_serial_early_console_write;
 	return 0;
 }
+/* Legacy bindings, should be removed when no more used */
 OF_EARLYCON_DECLARE(meson, "amlogic,meson-uart",
 		    meson_serial_early_console_setup);
+/* Stable bindings */
+OF_EARLYCON_DECLARE(meson, "amlogic,meson-ao-uart",
+		    meson_serial_early_console_setup);
 
 #define MESON_SERIAL_CONSOLE	(&meson_serial_console)
 #else
@@ -595,11 +581,76 @@ static struct uart_driver meson_uart_driver = {
 	.cons		= MESON_SERIAL_CONSOLE,
 };
 
+static inline struct clk *meson_uart_probe_clock(struct device *dev,
+						 const char *id)
+{
+	struct clk *clk = NULL;
+	int ret;
+
+	clk = devm_clk_get(dev, id);
+	if (IS_ERR(clk))
+		return clk;
+
+	ret = clk_prepare_enable(clk);
+	if (ret) {
+		dev_err(dev, "couldn't enable clk\n");
+		return ERR_PTR(ret);
+	}
+
+	devm_add_action_or_reset(dev,
+			(void(*)(void *))clk_disable_unprepare,
+			clk);
+
+	return clk;
+}
+
+/*
+ * This function gets clocks in the legacy non-stable DT bindings.
+ * This code will be remove once all the platforms switch to the
+ * new DT bindings.
+ */
+static int meson_uart_probe_clocks_legacy(struct platform_device *pdev,
+					  struct uart_port *port)
+{
+	struct clk *clk = NULL;
+
+	clk = meson_uart_probe_clock(&pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	port->uartclk = clk_get_rate(clk);
+
+	return 0;
+}
+
+static int meson_uart_probe_clocks(struct platform_device *pdev,
+				   struct uart_port *port)
+{
+	struct clk *clk_xtal = NULL;
+	struct clk *clk_pclk = NULL;
+	struct clk *clk_baud = NULL;
+
+	clk_pclk = meson_uart_probe_clock(&pdev->dev, "pclk");
+	if (IS_ERR(clk_pclk))
+		return PTR_ERR(clk_pclk);
+
+	clk_xtal = meson_uart_probe_clock(&pdev->dev, "xtal");
+	if (IS_ERR(clk_xtal))
+		return PTR_ERR(clk_xtal);
+
+	clk_baud = meson_uart_probe_clock(&pdev->dev, "baud");
+	if (IS_ERR(clk_baud))
+		return PTR_ERR(clk_baud);
+
+	port->uartclk = clk_get_rate(clk_baud);
+
+	return 0;
+}
+
 static int meson_uart_probe(struct platform_device *pdev)
 {
 	struct resource *res_mem, *res_irq;
 	struct uart_port *port;
-	struct clk *clk;
 	int ret = 0;
 
 	if (pdev->dev.of_node)
@@ -625,15 +676,20 @@ static int meson_uart_probe(struct platform_device *pdev)
 	if (!port)
 		return -ENOMEM;
 
-	clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(clk))
-		return PTR_ERR(clk);
+	/* Use legacy way until all platforms switch to new bindings */
+	if (of_device_is_compatible(pdev->dev.of_node, "amlogic,meson-uart"))
+		ret = meson_uart_probe_clocks_legacy(pdev, port);
+	else
+		ret = meson_uart_probe_clocks(pdev, port);
 
-	port->uartclk = clk_get_rate(clk);
+	if (ret)
+		return ret;
+
 	port->iotype = UPIO_MEM;
 	port->mapbase = res_mem->start;
+	port->mapsize = resource_size(res_mem);
 	port->irq = res_irq->start;
-	port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_LOW_LATENCY;
+	port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
 	port->dev = &pdev->dev;
 	port->line = pdev->id;
 	port->type = PORT_MESON;
@@ -668,9 +724,14 @@ static int meson_uart_remove(struct platform_device *pdev)
 	return 0;
 }
 
-
 static const struct of_device_id meson_uart_dt_match[] = {
+	/* Legacy bindings, should be removed when no more used */
 	{ .compatible = "amlogic,meson-uart" },
+	/* Stable bindings */
+	{ .compatible = "amlogic,meson6-uart" },
+	{ .compatible = "amlogic,meson8-uart" },
+	{ .compatible = "amlogic,meson8b-uart" },
+	{ .compatible = "amlogic,meson-gx-uart" },
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, meson_uart_dt_match);
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 1a60a20..67ffecc 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -754,9 +754,10 @@ static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
 		if (!dma_set_mask(pi->port.dev, 0xffffffff)) {
 			printk(KERN_ERR "MPSC: Inadequate DMA support\n");
 			rc = -ENXIO;
-		} else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev,
+		} else if ((pi->dma_region = dma_alloc_attrs(pi->port.dev,
 						MPSC_DMA_ALLOC_SIZE,
-						&pi->dma_region_p, GFP_KERNEL))
+						&pi->dma_region_p, GFP_KERNEL,
+						DMA_ATTR_NON_CONSISTENT))
 				== NULL) {
 			printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
 			rc = -ENOMEM;
@@ -771,8 +772,9 @@ static void mpsc_free_ring_mem(struct mpsc_port_info *pi)
 	pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
 
 	if (pi->dma_region) {
-		dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
-				pi->dma_region, pi->dma_region_p);
+		dma_free_attrs(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
+				pi->dma_region, pi->dma_region_p,
+				DMA_ATTR_NON_CONSISTENT);
 		pi->dma_region = NULL;
 		pi->dma_region_p = (dma_addr_t)NULL;
 	}
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
new file mode 100644
index 0000000..1b80087
--- /dev/null
+++ b/drivers/tty/serial/owl-uart.c
@@ -0,0 +1,135 @@
+/*
+ * Actions Semi Owl family serial console
+ *
+ * Copyright 2013 Actions Semi Inc.
+ * Author: Actions Semi, Inc.
+ *
+ * Copyright (c) 2016-2017 Andreas Färber
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#define OWL_UART_CTL	0x000
+#define OWL_UART_TXDAT	0x008
+#define OWL_UART_STAT	0x00c
+
+#define OWL_UART_CTL_TRFS_TX		BIT(14)
+#define OWL_UART_CTL_EN			BIT(15)
+#define OWL_UART_CTL_RXIE		BIT(18)
+#define OWL_UART_CTL_TXIE		BIT(19)
+
+#define OWL_UART_STAT_RIP		BIT(0)
+#define OWL_UART_STAT_TIP		BIT(1)
+#define OWL_UART_STAT_TFFU		BIT(6)
+#define OWL_UART_STAT_TRFL_MASK		(0x1f << 11)
+#define OWL_UART_STAT_UTBB		BIT(17)
+
+static inline void owl_uart_write(struct uart_port *port, u32 val, unsigned int off)
+{
+	writel(val, port->membase + off);
+}
+
+static inline u32 owl_uart_read(struct uart_port *port, unsigned int off)
+{
+	return readl(port->membase + off);
+}
+
+#ifdef CONFIG_SERIAL_OWL_CONSOLE
+
+static void owl_console_putchar(struct uart_port *port, int ch)
+{
+	if (!port->membase)
+		return;
+
+	while (owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TFFU)
+		cpu_relax();
+
+	owl_uart_write(port, ch, OWL_UART_TXDAT);
+}
+
+static void owl_uart_port_write(struct uart_port *port, const char *s,
+				u_int count)
+{
+	u32 old_ctl, val;
+	unsigned long flags;
+	int locked;
+
+	local_irq_save(flags);
+
+	if (port->sysrq)
+		locked = 0;
+	else if (oops_in_progress)
+		locked = spin_trylock(&port->lock);
+	else {
+		spin_lock(&port->lock);
+		locked = 1;
+	}
+
+	old_ctl = owl_uart_read(port, OWL_UART_CTL);
+	val = old_ctl | OWL_UART_CTL_TRFS_TX;
+	/* disable IRQ */
+	val &= ~(OWL_UART_CTL_RXIE | OWL_UART_CTL_TXIE);
+	owl_uart_write(port, val, OWL_UART_CTL);
+
+	uart_console_write(port, s, count, owl_console_putchar);
+
+	/* wait until all contents have been sent out */
+	while (owl_uart_read(port, OWL_UART_STAT) & OWL_UART_STAT_TRFL_MASK)
+		cpu_relax();
+
+	/* clear IRQ pending */
+	val = owl_uart_read(port, OWL_UART_STAT);
+	val |= OWL_UART_STAT_TIP | OWL_UART_STAT_RIP;
+	owl_uart_write(port, val, OWL_UART_STAT);
+
+	owl_uart_write(port, old_ctl, OWL_UART_CTL);
+
+	if (locked)
+		spin_unlock(&port->lock);
+
+	local_irq_restore(flags);
+}
+
+static void owl_uart_early_console_write(struct console *co,
+					 const char *s,
+					 u_int count)
+{
+	struct earlycon_device *dev = co->data;
+
+	owl_uart_port_write(&dev->port, s, count);
+}
+
+static int __init
+owl_uart_early_console_setup(struct earlycon_device *device, const char *opt)
+{
+	if (!device->port.membase)
+		return -ENODEV;
+
+	device->con->write = owl_uart_early_console_write;
+
+	return 0;
+}
+OF_EARLYCON_DECLARE(owl, "actions,owl-uart",
+		    owl_uart_early_console_setup);
+
+#endif /* CONFIG_SERIAL_OWL_CONSOLE */
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 42caccb..d3796dc 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -878,8 +878,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
 	sg_dma_len(sg) = priv->trigger_level;
 
 	sg_set_page(&priv->sg_rx, virt_to_page(priv->rx_buf_virt),
-		     sg_dma_len(sg), (unsigned long)priv->rx_buf_virt &
-		     ~PAGE_MASK);
+		     sg_dma_len(sg), offset_in_page(priv->rx_buf_virt));
 
 	sg_dma_address(sg) = priv->rx_buf_dma;
 
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index fcf803f..cdd2f94 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -884,14 +884,19 @@ static int sccnxp_probe(struct platform_device *pdev)
 
 	clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(clk)) {
-		if (PTR_ERR(clk) == -EPROBE_DEFER) {
-			ret = -EPROBE_DEFER;
+		ret = PTR_ERR(clk);
+		if (ret == -EPROBE_DEFER)
 			goto err_out;
-		}
+		uartclk = 0;
+	} else {
+		clk_prepare_enable(clk);
+		uartclk = clk_get_rate(clk);
+	}
+
+	if (!uartclk) {
 		dev_notice(&pdev->dev, "Using default clock frequency\n");
 		uartclk = s->chip->freq_std;
-	} else
-		uartclk = clk_get_rate(clk);
+	}
 
 	/* Check input frequency */
 	if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 13bfd5d..f534a40 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -954,11 +954,10 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
 		    old_custom_divisor != uport->custom_divisor) {
 			/*
 			 * If they're setting up a custom divisor or speed,
-			 * instead of clearing it, then bitch about it. No
-			 * need to rate-limit; it's CAP_SYS_ADMIN only.
+			 * instead of clearing it, then bitch about it.
 			 */
 			if (uport->flags & UPF_SPD_MASK) {
-				dev_notice(uport->dev,
+				dev_notice_ratelimited(uport->dev,
 				       "%s sets custom speed on %s. This is deprecated.\n",
 				      current->comm,
 				      tty_name(port->tty));
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 71707e8..da5ddfc 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1450,8 +1450,7 @@ static struct dma_chan *sci_request_dma_chan(struct uart_port *port,
 	chan = dma_request_slave_channel(port->dev,
 					 dir == DMA_MEM_TO_DEV ? "tx" : "rx");
 	if (!chan) {
-		dev_warn(port->dev,
-			 "dma_request_slave_channel_compat failed\n");
+		dev_warn(port->dev, "dma_request_slave_channel failed\n");
 		return NULL;
 	}
 
@@ -1558,7 +1557,16 @@ static void sci_free_dma(struct uart_port *port)
 	if (s->chan_rx)
 		sci_rx_dma_release(s, false);
 }
-#else
+
+static void sci_flush_buffer(struct uart_port *port)
+{
+	/*
+	 * In uart_flush_buffer(), the xmit circular buffer has just been
+	 * cleared, so we have to reset tx_dma_len accordingly.
+	 */
+	to_sci_port(port)->tx_dma_len = 0;
+}
+#else /* !CONFIG_SERIAL_SH_SCI_DMA */
 static inline void sci_request_dma(struct uart_port *port)
 {
 }
@@ -1566,7 +1574,9 @@ static inline void sci_request_dma(struct uart_port *port)
 static inline void sci_free_dma(struct uart_port *port)
 {
 }
-#endif
+
+#define sci_flush_buffer	NULL
+#endif /* !CONFIG_SERIAL_SH_SCI_DMA */
 
 static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
 {
@@ -2581,6 +2591,7 @@ static const struct uart_ops sci_uart_ops = {
 	.break_ctl	= sci_break_ctl,
 	.startup	= sci_startup,
 	.shutdown	= sci_shutdown,
+	.flush_buffer	= sci_flush_buffer,
 	.set_termios	= sci_set_termios,
 	.pm		= sci_pm,
 	.type		= sci_type,
@@ -2950,6 +2961,7 @@ static inline int sci_probe_earlyprintk(struct platform_device *pdev)
 
 static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized";
 
+static DEFINE_MUTEX(sci_uart_registration_lock);
 static struct uart_driver sci_uart_driver = {
 	.owner		= THIS_MODULE,
 	.driver_name	= "sci",
@@ -3078,6 +3090,16 @@ static int sci_probe_single(struct platform_device *dev,
 		return -EINVAL;
 	}
 
+	mutex_lock(&sci_uart_registration_lock);
+	if (!sci_uart_driver.state) {
+		ret = uart_register_driver(&sci_uart_driver);
+		if (ret) {
+			mutex_unlock(&sci_uart_registration_lock);
+			return ret;
+		}
+	}
+	mutex_unlock(&sci_uart_registration_lock);
+
 	ret = sci_init_single(dev, sciport, index, p, false);
 	if (ret)
 		return ret;
@@ -3201,24 +3223,17 @@ static struct platform_driver sci_driver = {
 
 static int __init sci_init(void)
 {
-	int ret;
-
 	pr_info("%s\n", banner);
 
-	ret = uart_register_driver(&sci_uart_driver);
-	if (likely(ret == 0)) {
-		ret = platform_driver_register(&sci_driver);
-		if (unlikely(ret))
-			uart_unregister_driver(&sci_uart_driver);
-	}
-
-	return ret;
+	return platform_driver_register(&sci_driver);
 }
 
 static void __exit sci_exit(void)
 {
 	platform_driver_unregister(&sci_driver);
-	uart_unregister_driver(&sci_uart_driver);
+
+	if (sci_uart_driver.state)
+		uart_unregister_driver(&sci_uart_driver);
 }
 
 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index e03282d..684cb8d 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -1253,7 +1253,7 @@ static enum hrtimer_restart
 	return HRTIMER_RESTART;
 }
 
-static struct of_device_id sirfsoc_uart_ids[] = {
+static const struct of_device_id sirfsoc_uart_ids[] = {
 	{ .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
 	{ .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart},
 	{ .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index c053995..fde55dc 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -186,6 +186,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
  * @pclk:		APB clock
  * @baud:		Current baud rate
  * @clk_rate_change_nb:	Notifier block for clock changes
+ * @quirks:		Flags for RXBS support.
  */
 struct cdns_uart {
 	struct uart_port	*port;
@@ -1587,20 +1588,21 @@ static int cdns_uart_probe(struct platform_device *pdev)
 	if (rc) {
 		dev_err(&pdev->dev,
 			"uart_add_one_port() failed; err=%i\n", rc);
-		goto err_out_notif_unreg;
+		goto err_out_pm_disable;
 	}
 
 	return 0;
 
+err_out_pm_disable:
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
 err_out_notif_unreg:
 #ifdef CONFIG_COMMON_CLK
 	clk_notifier_unregister(cdns_uart_data->uartclk,
 			&cdns_uart_data->clk_rate_change_nb);
 #endif
 err_out_clk_disable:
-	pm_runtime_disable(&pdev->dev);
-	pm_runtime_set_suspended(&pdev->dev);
-	pm_runtime_dont_use_autosuspend(&pdev->dev);
 	clk_disable_unprepare(cdns_uart_data->uartclk);
 err_out_clk_dis_pclk:
 	clk_disable_unprepare(cdns_uart_data->pclk);
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 31885f2..cc047de 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -184,7 +184,7 @@ static void hdlcdev_exit(struct slgt_info *info);
 struct cond_wait {
 	struct cond_wait *next;
 	wait_queue_head_t q;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	unsigned int data;
 };
 static void init_cond_wait(struct cond_wait *w, unsigned int data);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 0c150b5..974b13d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -325,6 +325,56 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index)
 	return NULL;
 }
 
+/**
+ *	tty_dev_name_to_number	-	return dev_t for device name
+ *	@name: user space name of device under /dev
+ *	@number: pointer to dev_t that this function will populate
+ *
+ *	This function converts device names like ttyS0 or ttyUSB1 into dev_t
+ *	like (4, 64) or (188, 1). If no corresponding driver is registered then
+ *	the function returns -ENODEV.
+ *
+ *	Locking: this acquires tty_mutex to protect the tty_drivers list from
+ *		being modified while we are traversing it, and makes sure to
+ *		release it before exiting.
+ */
+int tty_dev_name_to_number(const char *name, dev_t *number)
+{
+	struct tty_driver *p;
+	int ret;
+	int index, prefix_length = 0;
+	const char *str;
+
+	for (str = name; *str && !isdigit(*str); str++)
+		;
+
+	if (!*str)
+		return -EINVAL;
+
+	ret = kstrtoint(str, 10, &index);
+	if (ret)
+		return ret;
+
+	prefix_length = str - name;
+	mutex_lock(&tty_mutex);
+
+	list_for_each_entry(p, &tty_drivers, tty_drivers)
+		if (prefix_length == strlen(p->name) && strncmp(name,
+					p->name, prefix_length) == 0) {
+			if (index < p->num) {
+				*number = MKDEV(p->major, p->minor_start + index);
+				goto out;
+			}
+		}
+
+	/* if here then driver wasn't found */
+	ret = -ENODEV;
+out:
+	mutex_unlock(&tty_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
+
 #ifdef CONFIG_CONSOLE_POLL
 
 /**
@@ -1083,7 +1133,10 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
 	struct tty_struct *tty;
 
 	if (driver->ops->lookup)
-		tty = driver->ops->lookup(driver, file, idx);
+		if (!file)
+			tty = ERR_PTR(-EIO);
+		else
+			tty = driver->ops->lookup(driver, file, idx);
 	else
 		tty = driver->ttys[idx];
 
@@ -1715,7 +1768,7 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
 		struct tty_driver *console_driver = console_device(index);
 		if (console_driver) {
 			driver = tty_driver_kref_get(console_driver);
-			if (driver) {
+			if (driver && filp) {
 				/* Don't let /dev/console block */
 				filp->f_flags |= O_NONBLOCK;
 				break;
@@ -1748,7 +1801,7 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
  *	  - concurrent tty driver removal w/ lookup
  *	  - concurrent tty removal from driver table
  */
-static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
+struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
 					     struct file *filp)
 {
 	struct tty_struct *tty;
@@ -1793,6 +1846,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
 	tty_driver_kref_put(driver);
 	return tty;
 }
+EXPORT_SYMBOL_GPL(tty_open_by_driver);
 
 /**
  *	tty_open		-	open a tty device
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index e4603b0..2fe216b 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -492,6 +492,29 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
 }
 
 /**
+ *	tty_ldisc_failto	-	helper for ldisc failback
+ *	@tty: tty to open the ldisc on
+ *	@ld: ldisc we are trying to fail back to
+ *
+ *	Helper to try and recover a tty when switching back to the old
+ *	ldisc fails and we need something attached.
+ */
+
+static int tty_ldisc_failto(struct tty_struct *tty, int ld)
+{
+	struct tty_ldisc *disc = tty_ldisc_get(tty, ld);
+	int r;
+
+	if (IS_ERR(disc))
+		return PTR_ERR(disc);
+	tty->ldisc = disc;
+	tty_set_termios_ldisc(tty, ld);
+	if ((r = tty_ldisc_open(tty, disc)) < 0)
+		tty_ldisc_put(disc);
+	return r;
+}
+
+/**
  *	tty_ldisc_restore	-	helper for tty ldisc change
  *	@tty: tty to recover
  *	@old: previous ldisc
@@ -502,9 +525,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
 
 static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
 {
-	struct tty_ldisc *new_ldisc;
-	int r;
-
 	/* There is an outstanding reference here so this is safe */
 	old = tty_ldisc_get(tty, old->ops->num);
 	WARN_ON(IS_ERR(old));
@@ -512,17 +532,13 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
 	tty_set_termios_ldisc(tty, old->ops->num);
 	if (tty_ldisc_open(tty, old) < 0) {
 		tty_ldisc_put(old);
-		/* This driver is always present */
-		new_ldisc = tty_ldisc_get(tty, N_TTY);
-		if (IS_ERR(new_ldisc))
-			panic("n_tty: get");
-		tty->ldisc = new_ldisc;
-		tty_set_termios_ldisc(tty, N_TTY);
-		r = tty_ldisc_open(tty, new_ldisc);
-		if (r < 0)
-			panic("Couldn't open N_TTY ldisc for "
-			      "%s --- error %d.",
-			      tty_name(tty), r);
+		/* The traditional behaviour is to fall back to N_TTY, we
+		   want to avoid falling back to N_NULL unless we have no
+		   choice to avoid the risk of breaking anything */
+		if (tty_ldisc_failto(tty, N_TTY) < 0 &&
+		    tty_ldisc_failto(tty, N_NULL) < 0)
+			panic("Couldn't open N_NULL ldisc for %s.",
+			      tty_name(tty));
 	}
 }
 
@@ -605,6 +621,7 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
 	tty_unlock(tty);
 	return retval;
 }
+EXPORT_SYMBOL_GPL(tty_set_ldisc);
 
 /**
  *	tty_ldisc_kill	-	teardown ldisc
@@ -797,6 +814,7 @@ void tty_ldisc_release(struct tty_struct *tty)
 
 	tty_ldisc_debug(tty, "released\n");
 }
+EXPORT_SYMBOL_GPL(tty_ldisc_release);
 
 /**
  *	tty_ldisc_init		-	ldisc setup for new tty
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 4fb3165..6b13719 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -34,9 +34,7 @@ static int tty_port_default_receive_buf(struct tty_port *port,
 	if (!disc)
 		return 0;
 
-	mutex_lock(&tty->atomic_write_lock);
 	ret = tty_ldisc_receive_buf(disc, p, (char *)f, count);
-	mutex_unlock(&tty->atomic_write_lock);
 
 	tty_ldisc_deref(disc);
 
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 1f6e17f..a5f88cf 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -322,15 +322,13 @@ int con_set_trans_old(unsigned char __user * arg)
 {
 	int i;
 	unsigned short inbuf[E_TABSZ];
+	unsigned char ubuf[E_TABSZ];
 
-	if (!access_ok(VERIFY_READ, arg, E_TABSZ))
+	if (copy_from_user(ubuf, arg, E_TABSZ))
 		return -EFAULT;
 
-	for (i = 0; i < E_TABSZ ; i++) {
-		unsigned char uc;
-		__get_user(uc, arg+i);
-		inbuf[i] = UNI_DIRECT_BASE | uc;
-	}
+	for (i = 0; i < E_TABSZ ; i++)
+		inbuf[i] = UNI_DIRECT_BASE | ubuf[i];
 
 	console_lock();
 	memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
@@ -345,9 +343,6 @@ int con_get_trans_old(unsigned char __user * arg)
 	unsigned short *p = translations[USER_MAP];
 	unsigned char outbuf[E_TABSZ];
 
-	if (!access_ok(VERIFY_WRITE, arg, E_TABSZ))
-		return -EFAULT;
-
 	console_lock();
 	for (i = 0; i < E_TABSZ ; i++)
 	{
@@ -356,22 +351,16 @@ int con_get_trans_old(unsigned char __user * arg)
 	}
 	console_unlock();
 
-	for (i = 0; i < E_TABSZ ; i++)
-		__put_user(outbuf[i], arg+i);
-	return 0;
+	return copy_to_user(arg, outbuf, sizeof(outbuf)) ? -EFAULT : 0;
 }
 
 int con_set_trans_new(ushort __user * arg)
 {
-	int i;
 	unsigned short inbuf[E_TABSZ];
 
-	if (!access_ok(VERIFY_READ, arg, E_TABSZ*sizeof(unsigned short)))
+	if (copy_from_user(inbuf, arg, sizeof(inbuf)))
 		return -EFAULT;
 
-	for (i = 0; i < E_TABSZ ; i++)
-		__get_user(inbuf[i], arg+i);
-
 	console_lock();
 	memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
 	update_user_maps();
@@ -381,19 +370,13 @@ int con_set_trans_new(ushort __user * arg)
 
 int con_get_trans_new(ushort __user * arg)
 {
-	int i;
 	unsigned short outbuf[E_TABSZ];
 
-	if (!access_ok(VERIFY_WRITE, arg, E_TABSZ*sizeof(unsigned short)))
-		return -EFAULT;
-
 	console_lock();
 	memcpy(outbuf, translations[USER_MAP], sizeof(outbuf));
 	console_unlock();
 
-	for (i = 0; i < E_TABSZ ; i++)
-		__put_user(outbuf[i], arg+i);
-	return 0;
+	return copy_to_user(arg, outbuf, sizeof(outbuf)) ? -EFAULT : 0;
 }
 
 /*
@@ -557,14 +540,9 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
 	if (!ct)
 		return 0;
 
-	unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
-	if (!unilist)
-		return -ENOMEM;
-
-	for (i = ct, plist = unilist; i; i--, plist++, list++) {
-		__get_user(plist->unicode, &list->unicode);
-		__get_user(plist->fontpos, &list->fontpos);
-	}
+	unilist = memdup_user(list, ct * sizeof(struct unipair));
+	if (IS_ERR(unilist))
+		return PTR_ERR(unilist);
 
 	console_lock();
 
@@ -757,11 +735,11 @@ EXPORT_SYMBOL(con_copy_unimap);
  */
 int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
 {
-	int i, j, k;
+	int i, j, k, ret = 0;
 	ushort ect;
 	u16 **p1, *p2;
 	struct uni_pagedir *p;
-	struct unipair *unilist, *plist;
+	struct unipair *unilist;
 
 	unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
 	if (!unilist)
@@ -792,13 +770,11 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni
 		}
 	}
 	console_unlock();
-	for (i = min(ect, ct), plist = unilist; i; i--, list++, plist++) {
-		__put_user(plist->unicode, &list->unicode);
-		__put_user(plist->fontpos, &list->fontpos);
-	}
-	__put_user(ect, uct);
+	if (copy_to_user(list, unilist, min(ect, ct) * sizeof(struct unipair)))
+		ret = -EFAULT;
+	put_user(ect, uct);
 	kfree(unilist);
-	return ((ect <= ct) ? 0 : -ENOMEM);
+	return ret ? ret : (ect <= ct) ? 0 : -ENOMEM;
 }
 
 /*
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 8af8d95..f416626 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1203,8 +1203,7 @@ DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0);
 #if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\
     defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\
     defined(CONFIG_PARISC) || defined(CONFIG_SUPERH) ||\
-    (defined(CONFIG_ARM) && defined(CONFIG_KEYBOARD_ATKBD) && !defined(CONFIG_ARCH_RPC)) ||\
-    defined(CONFIG_AVR32)
+    (defined(CONFIG_ARM) && defined(CONFIG_KEYBOARD_ATKBD) && !defined(CONFIG_ARCH_RPC))
 
 #define HW_RAW(dev) (test_bit(EV_MSC, dev->evbit) && test_bit(MSC_RAW, dev->mscbit) &&\
 			((dev)->id.bustype == BUS_I8042) && ((dev)->id.vendor == 0x0001) && ((dev)->id.product == 0x0001))
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 9c99452..2ebaba1 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -425,7 +425,7 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
 	else if (_underline)
 		a = (a & 0xf0) | vc->vc_ulcolor;
 	else if (_intensity == 0)
-		a = (a & 0xf0) | vc->vc_ulcolor;
+		a = (a & 0xf0) | vc->vc_halfcolor;
 	if (_reverse)
 		a = ((a) & 0x88) | ((((a) >> 4) | ((a) << 4)) & 0x77);
 	if (_blink)
@@ -2709,13 +2709,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
 	 * related to the kernel should not use this.
 	 */
 			data = vt_get_shift_state();
-			ret = __put_user(data, p);
+			ret = put_user(data, p);
 			break;
 		case TIOCL_GETMOUSEREPORTING:
 			console_lock();	/* May be overkill */
 			data = mouse_reporting();
 			console_unlock();
-			ret = __put_user(data, p);
+			ret = put_user(data, p);
 			break;
 		case TIOCL_SETVESABLANK:
 			console_lock();
@@ -2724,7 +2724,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
 			break;
 		case TIOCL_GETKMSGREDIRECT:
 			data = vt_get_kmsg_redirect();
-			ret = __put_user(data, p);
+			ret = put_user(data, p);
 			break;
 		case TIOCL_SETKMSGREDIRECT:
 			if (!capable(CAP_SYS_ADMIN)) {
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 0cbfe1f..96d389cb 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -266,10 +266,6 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_
 
 	if (copy_from_user(&tmp, user_ud, sizeof tmp))
 		return -EFAULT;
-	if (tmp.entries)
-		if (!access_ok(VERIFY_WRITE, tmp.entries,
-				tmp.entry_ct*sizeof(struct unipair)))
-			return -EFAULT;
 	switch (cmd) {
 	case PIO_UNIMAP:
 		if (!perm)
@@ -1170,10 +1166,6 @@ compat_unimap_ioctl(unsigned int cmd, struct compat_unimapdesc __user *user_ud,
 	if (copy_from_user(&tmp, user_ud, sizeof tmp))
 		return -EFAULT;
 	tmp_entries = compat_ptr(tmp.entries);
-	if (tmp_entries)
-		if (!access_ok(VERIFY_WRITE, tmp_entries,
-				tmp.entry_ct*sizeof(struct unipair)))
-			return -EFAULT;
 	switch (cmd) {
 	case PIO_UNIMAP:
 		if (!perm)
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index d0b508b..a56fdf9 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -66,14 +66,7 @@ static int probe(struct pci_dev *pdev,
 		return err;
 	}
 
-	if (!pdev->irq) {
-		dev_warn(&pdev->dev, "No IRQ assigned to device: "
-			 "no support for interrupts?\n");
-		pci_disable_device(pdev);
-		return -ENODEV;
-	}
-
-	if (!pci_intx_mask_supported(pdev)) {
+	if (pdev->irq && !pci_intx_mask_supported(pdev)) {
 		err = -ENODEV;
 		goto err_verify;
 	}
@@ -86,10 +79,15 @@ static int probe(struct pci_dev *pdev,
 
 	gdev->info.name = "uio_pci_generic";
 	gdev->info.version = DRIVER_VERSION;
-	gdev->info.irq = pdev->irq;
-	gdev->info.irq_flags = IRQF_SHARED;
-	gdev->info.handler = irqhandler;
 	gdev->pdev = pdev;
+	if (pdev->irq) {
+		gdev->info.irq = pdev->irq;
+		gdev->info.irq_flags = IRQF_SHARED;
+		gdev->info.handler = irqhandler;
+	} else {
+		dev_warn(&pdev->dev, "No IRQ assigned to device: "
+			 "no support for interrupts?\n");
+	}
 
 	err = uio_register_device(&pdev->dev, &gdev->info);
 	if (err)
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 9e217b1..b17ed3a 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -818,7 +818,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci)
 {
 	ci_hdrc_gadget_destroy(ci);
 	ci_hdrc_host_destroy(ci);
-	if (ci->is_otg)
+	if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
 		ci_hdrc_otg_destroy(ci);
 }
 
@@ -843,7 +843,10 @@ static ssize_t ci_role_show(struct device *dev, struct device_attribute *attr,
 {
 	struct ci_hdrc *ci = dev_get_drvdata(dev);
 
-	return sprintf(buf, "%s\n", ci_role(ci)->name);
+	if (ci->role != CI_ROLE_END)
+		return sprintf(buf, "%s\n", ci_role(ci)->name);
+
+	return 0;
 }
 
 static ssize_t ci_role_store(struct device *dev,
@@ -977,27 +980,35 @@ static int ci_hdrc_probe(struct platform_device *pdev)
 	/* initialize role(s) before the interrupt is requested */
 	if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
 		ret = ci_hdrc_host_init(ci);
-		if (ret)
-			dev_info(dev, "doesn't support host\n");
+		if (ret) {
+			if (ret == -ENXIO)
+				dev_info(dev, "doesn't support host\n");
+			else
+				goto deinit_phy;
+		}
 	}
 
 	if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
 		ret = ci_hdrc_gadget_init(ci);
-		if (ret)
-			dev_info(dev, "doesn't support gadget\n");
+		if (ret) {
+			if (ret == -ENXIO)
+				dev_info(dev, "doesn't support gadget\n");
+			else
+				goto deinit_host;
+		}
 	}
 
 	if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
 		dev_err(dev, "no supported roles\n");
 		ret = -ENODEV;
-		goto deinit_phy;
+		goto deinit_gadget;
 	}
 
 	if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) {
 		ret = ci_hdrc_otg_init(ci);
 		if (ret) {
 			dev_err(dev, "init otg fails, ret = %d\n", ret);
-			goto stop;
+			goto deinit_gadget;
 		}
 	}
 
@@ -1067,7 +1078,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)
 remove_debug:
 	dbg_remove_files(ci);
 stop:
-	ci_role_destroy(ci);
+	if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
+		ci_hdrc_otg_destroy(ci);
+deinit_gadget:
+	ci_hdrc_gadget_destroy(ci);
+deinit_host:
+	ci_hdrc_host_destroy(ci);
 deinit_phy:
 	ci_usb_phy_exit(ci);
 ulpi_exit:
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 6d23eed..1c31e8a 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file *s, void *data)
 {
 	struct ci_hdrc *ci = s->private;
 
-	seq_printf(s, "%s\n", ci_role(ci)->name);
+	if (ci->role != CI_ROLE_END)
+		seq_printf(s, "%s\n", ci_role(ci)->name);
 
 	return 0;
 }
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 93e24ce..949183e 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -234,7 +234,7 @@ static void ci_otg_add_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
 				ktime_set(timer_sec, timer_nsec));
 	ci->enabled_otg_timer_bits |= (1 << t);
 	if ((ci->next_otg_timer == NUM_OTG_FSM_TIMERS) ||
-			(ci->hr_timeouts[ci->next_otg_timer] >
+			ktime_after(ci->hr_timeouts[ci->next_otg_timer],
 						ci->hr_timeouts[t])) {
 			ci->next_otg_timer = t;
 			hrtimer_start_range_ns(&ci->otg_fsm_hrtimer,
@@ -269,7 +269,7 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
 			for_each_set_bit(cur_timer, &enabled_timer_bits,
 							NUM_OTG_FSM_TIMERS) {
 				if ((next_timer == NUM_OTG_FSM_TIMERS) ||
-					(ci->hr_timeouts[next_timer] <
+					ktime_before(ci->hr_timeouts[next_timer],
 					 ci->hr_timeouts[cur_timer]))
 					next_timer = cur_timer;
 			}
@@ -397,13 +397,13 @@ static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
 
 	now = ktime_get();
 	for_each_set_bit(cur_timer, &enabled_timer_bits, NUM_OTG_FSM_TIMERS) {
-		if (now >= ci->hr_timeouts[cur_timer]) {
+		if (ktime_compare(now, ci->hr_timeouts[cur_timer]) >= 0) {
 			ci->enabled_otg_timer_bits &= ~(1 << cur_timer);
 			if (otg_timer_handlers[cur_timer])
 				ret = otg_timer_handlers[cur_timer](ci);
 		} else {
 			if ((next_timer == NUM_OTG_FSM_TIMERS) ||
-				(ci->hr_timeouts[cur_timer] <
+				ktime_before(ci->hr_timeouts[cur_timer],
 					ci->hr_timeouts[next_timer]))
 				next_timer = cur_timer;
 		}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 56d2d32..d68b125 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1993,6 +1993,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
 int ci_hdrc_gadget_init(struct ci_hdrc *ci)
 {
 	struct ci_role_driver *rdrv;
+	int ret;
 
 	if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
 		return -ENXIO;
@@ -2005,7 +2006,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
 	rdrv->stop	= udc_id_switch_for_host;
 	rdrv->irq	= udc_irq;
 	rdrv->name	= "gadget";
-	ci->roles[CI_ROLE_GADGET] = rdrv;
 
-	return udc_start(ci);
+	ret = udc_start(ci);
+	if (!ret)
+		ci->roles[CI_ROLE_GADGET] = rdrv;
+
+	return ret;
 }
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index e77a4ed..9f4a018 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -108,6 +108,8 @@ struct imx_usbmisc {
 	const struct usbmisc_ops *ops;
 };
 
+static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data);
+
 static int usbmisc_imx25_init(struct imx_usbmisc_data *data)
 {
 	struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
@@ -242,10 +244,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
 			val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
 				| MX53_USB_UHx_CTRL_ULPI_INT_EN;
 			writel(val, reg);
-			/* Disable internal 60Mhz clock */
-			reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
-			val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
-			writel(val, reg);
+			if (is_imx53_usbmisc(data)) {
+				/* Disable internal 60Mhz clock */
+				reg = usbmisc->base +
+					MX53_USB_CLKONOFF_CTRL_OFFSET;
+				val = readl(reg) |
+					MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
+				writel(val, reg);
+			}
+
 		}
 		if (data->disable_oc) {
 			reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
@@ -267,10 +274,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
 			val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
 				| MX53_USB_UHx_CTRL_ULPI_INT_EN;
 			writel(val, reg);
-			/* Disable internal 60Mhz clock */
-			reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
-			val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
-			writel(val, reg);
+
+			if (is_imx53_usbmisc(data)) {
+				/* Disable internal 60Mhz clock */
+				reg = usbmisc->base +
+					MX53_USB_CLKONOFF_CTRL_OFFSET;
+				val = readl(reg) |
+					MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
+				writel(val, reg);
+			}
 		}
 		if (data->disable_oc) {
 			reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
@@ -456,6 +468,10 @@ static const struct usbmisc_ops imx27_usbmisc_ops = {
 	.init = usbmisc_imx27_init,
 };
 
+static const struct usbmisc_ops imx51_usbmisc_ops = {
+	.init = usbmisc_imx53_init,
+};
+
 static const struct usbmisc_ops imx53_usbmisc_ops = {
 	.init = usbmisc_imx53_init,
 };
@@ -479,6 +495,13 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = {
 	.set_wakeup = usbmisc_imx7d_set_wakeup,
 };
 
+static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
+{
+	struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+
+	return usbmisc->ops == &imx53_usbmisc_ops;
+}
+
 int imx_usbmisc_init(struct imx_usbmisc_data *data)
 {
 	struct imx_usbmisc *usbmisc;
@@ -536,7 +559,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
 	},
 	{
 		.compatible = "fsl,imx51-usbmisc",
-		.data = &imx53_usbmisc_ops,
+		.data = &imx51_usbmisc_ops,
 	},
 	{
 		.compatible = "fsl,imx53-usbmisc",
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 08669fe..8f97224 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -361,17 +361,9 @@ static ssize_t wdm_write
 	if (we < 0)
 		return usb_translate_errors(we);
 
-	buf = kmalloc(count, GFP_KERNEL);
-	if (!buf) {
-		rv = -ENOMEM;
-		goto outnl;
-	}
-
-	r = copy_from_user(buf, buffer, count);
-	if (r > 0) {
-		rv = -EFAULT;
-		goto out_free_mem;
-	}
+	buf = memdup_user(buffer, count);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
 
 	/* concurrent writes and disconnect */
 	r = mutex_lock_interruptible(&desc->wlock);
@@ -441,8 +433,7 @@ static ssize_t wdm_write
 
 	usb_autopm_put_interface(desc->intf);
 	mutex_unlock(&desc->wlock);
-outnl:
-	return rv < 0 ? rv : count;
+	return count;
 
 out_free_mem_pm:
 	usb_autopm_put_interface(desc->intf);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 8e6ef67..0e7d0e81 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -2537,6 +2537,9 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
 	case USBDEVFS_DROP_PRIVILEGES:
 		ret = proc_drop_privileges(ps, p);
 		break;
+	case USBDEVFS_GET_SPEED:
+		ret = ps->dev->speed;
+		break;
 	}
 
  done:
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 7859d73..ea829ad 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -584,12 +584,7 @@ static int hcd_pci_suspend_noirq(struct device *dev)
 
 static int hcd_pci_resume_noirq(struct device *dev)
 {
-	struct pci_dev		*pci_dev = to_pci_dev(dev);
-
-	powermac_set_asic(pci_dev, 1);
-
-	/* Go back to D0 and disable remote wakeup */
-	pci_back_from_sleep(pci_dev);
+	powermac_set_asic(to_pci_dev(dev), 1);
 	return 0;
 }
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 5dea983..ab1bb3b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/version.h>
 #include <linux/kernel.h>
+#include <linux/sched/task_stack.h>
 #include <linux/slab.h>
 #include <linux/completion.h>
 #include <linux/utsname.h>
@@ -1076,7 +1077,6 @@ static void usb_deregister_bus (struct usb_bus *bus)
 static int register_root_hub(struct usb_hcd *hcd)
 {
 	struct device *parent_dev = hcd->self.controller;
-	struct device *sysdev = hcd->self.sysdev;
 	struct usb_device *usb_dev = hcd->self.root_hub;
 	const int devnum = 1;
 	int retval;
@@ -1123,7 +1123,6 @@ static int register_root_hub(struct usb_hcd *hcd)
 		/* Did the HC die before the root hub was registered? */
 		if (HCD_DEAD(hcd))
 			usb_hc_died (hcd);	/* This time clean up */
-		usb_dev->dev.of_node = sysdev->of_node;
 	}
 	mutex_unlock(&usb_bus_idr_lock);
 
@@ -1523,6 +1522,14 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
 		if (hcd->self.uses_pio_for_control)
 			return ret;
 		if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
+			if (is_vmalloc_addr(urb->setup_packet)) {
+				WARN_ONCE(1, "setup packet is not dma capable\n");
+				return -EAGAIN;
+			} else if (object_is_on_stack(urb->setup_packet)) {
+				WARN_ONCE(1, "setup packet is on stack\n");
+				return -EAGAIN;
+			}
+
 			urb->setup_dma = dma_map_single(
 					hcd->self.sysdev,
 					urb->setup_packet,
@@ -1587,6 +1594,9 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
 			} else if (is_vmalloc_addr(urb->transfer_buffer)) {
 				WARN_ONCE(1, "transfer buffer not dma capable\n");
 				ret = -EAGAIN;
+			} else if (object_is_on_stack(urb->transfer_buffer)) {
+				WARN_ONCE(1, "transfer buffer is on stack\n");
+				ret = -EAGAIN;
 			} else {
 				urb->transfer_dma = dma_map_single(
 						hcd->self.sysdev,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b8bb20d..6e6797d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1661,10 +1661,28 @@ static void hub_disconnect(struct usb_interface *intf)
 	kref_put(&hub->kref, hub_release);
 }
 
+static bool hub_descriptor_is_sane(struct usb_host_interface *desc)
+{
+	/* Some hubs have a subclass of 1, which AFAICT according to the */
+	/*  specs is not defined, but it works */
+	if (desc->desc.bInterfaceSubClass != 0 &&
+	    desc->desc.bInterfaceSubClass != 1)
+		return false;
+
+	/* Multiple endpoints? What kind of mutant ninja-hub is this? */
+	if (desc->desc.bNumEndpoints != 1)
+		return false;
+
+	/* If the first endpoint is not interrupt IN, we'd better punt! */
+	if (!usb_endpoint_is_int_in(&desc->endpoint[0].desc))
+		return false;
+
+        return true;
+}
+
 static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
 	struct usb_host_interface *desc;
-	struct usb_endpoint_descriptor *endpoint;
 	struct usb_device *hdev;
 	struct usb_hub *hub;
 
@@ -1739,25 +1757,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
 	}
 #endif
 
-	/* Some hubs have a subclass of 1, which AFAICT according to the */
-	/*  specs is not defined, but it works */
-	if ((desc->desc.bInterfaceSubClass != 0) &&
-	    (desc->desc.bInterfaceSubClass != 1)) {
-descriptor_error:
+	if (!hub_descriptor_is_sane(desc)) {
 		dev_err(&intf->dev, "bad descriptor, ignoring hub\n");
 		return -EIO;
 	}
 
-	/* Multiple endpoints? What kind of mutant ninja-hub is this? */
-	if (desc->desc.bNumEndpoints != 1)
-		goto descriptor_error;
-
-	endpoint = &desc->endpoint[0].desc;
-
-	/* If it's not an interrupt in endpoint, we'd better punt! */
-	if (!usb_endpoint_is_int_in(endpoint))
-		goto descriptor_error;
-
 	/* We found a hub */
 	dev_info(&intf->dev, "USB hub found\n");
 
@@ -1784,7 +1788,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
 	if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
 		hub->quirk_check_port_auto_suspend = 1;
 
-	if (hub_configure(hub, endpoint) >= 0)
+	if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
 		return 0;
 
 	hub_disconnect(intf);
@@ -3155,12 +3159,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
 		if (PMSG_IS_AUTO(msg))
 			goto err_ltm;
 	}
-	if (usb_unlocked_disable_lpm(udev)) {
-		dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
-		status = -ENOMEM;
-		if (PMSG_IS_AUTO(msg))
-			goto err_lpm3;
-	}
 
 	/* see 7.1.7.6 */
 	if (hub_is_superspeed(hub->hdev))
@@ -3187,9 +3185,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
 	if (status) {
 		dev_dbg(&port_dev->dev, "can't suspend, status %d\n", status);
 
-		/* Try to enable USB3 LPM and LTM again */
-		usb_unlocked_enable_lpm(udev);
- err_lpm3:
+		/* Try to enable USB3 LTM again */
 		usb_enable_ltm(udev);
  err_ltm:
 		/* Try to enable USB2 hardware LPM again */
@@ -3473,9 +3469,8 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
 		if (udev->usb2_hw_lpm_capable == 1)
 			usb_set_usb2_hardware_lpm(udev, 1);
 
-		/* Try to enable USB3 LTM and LPM */
+		/* Try to enable USB3 LTM */
 		usb_enable_ltm(udev);
-		usb_unlocked_enable_lpm(udev);
 	}
 
 	usb_unlock_port(port_dev);
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index 1713248..16c19a3 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -11,8 +11,10 @@
 #include <linux/device.h>
 #include <linux/leds.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
+#include <linux/usb/of.h>
 
 struct usbport_trig_data {
 	struct led_classdev *led_cdev;
@@ -123,6 +125,57 @@ static const struct attribute_group ports_group = {
  * Adding & removing ports
  ***************************************/
 
+/**
+ * usbport_trig_port_observed - Check if port should be observed
+ */
+static bool usbport_trig_port_observed(struct usbport_trig_data *usbport_data,
+				       struct usb_device *usb_dev, int port1)
+{
+	struct device *dev = usbport_data->led_cdev->dev;
+	struct device_node *led_np = dev->of_node;
+	struct of_phandle_args args;
+	struct device_node *port_np;
+	int count, i;
+
+	if (!led_np)
+		return false;
+
+	/* Get node of port being added */
+	port_np = usb_of_get_child_node(usb_dev->dev.of_node, port1);
+	if (!port_np)
+		return false;
+
+	/* Amount of trigger sources for this LED */
+	count = of_count_phandle_with_args(led_np, "trigger-sources",
+					   "#trigger-source-cells");
+	if (count < 0) {
+		dev_warn(dev, "Failed to get trigger sources for %s\n",
+			 led_np->full_name);
+		return false;
+	}
+
+	/* Check list of sources for this specific port */
+	for (i = 0; i < count; i++) {
+		int err;
+
+		err = of_parse_phandle_with_args(led_np, "trigger-sources",
+						 "#trigger-source-cells", i,
+						 &args);
+		if (err) {
+			dev_err(dev, "Failed to get trigger source phandle at index %d: %d\n",
+				i, err);
+			continue;
+		}
+
+		of_node_put(args.np);
+
+		if (args.np == port_np)
+			return true;
+	}
+
+	return false;
+}
+
 static int usbport_trig_add_port(struct usbport_trig_data *usbport_data,
 				 struct usb_device *usb_dev,
 				 const char *hub_name, int portnum)
@@ -141,6 +194,8 @@ static int usbport_trig_add_port(struct usbport_trig_data *usbport_data,
 	port->data = usbport_data;
 	port->hub = usb_dev;
 	port->portnum = portnum;
+	port->observed = usbport_trig_port_observed(usbport_data, usb_dev,
+						    portnum);
 
 	len = strlen(hub_name) + 8;
 	port->port_name = kzalloc(len, GFP_KERNEL);
@@ -255,6 +310,7 @@ static void usbport_trig_activate(struct led_classdev *led_cdev)
 	if (err)
 		goto err_free;
 	usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
+	usbport_trig_update_count(usbport_data);
 
 	/* Notifications */
 	usbport_data->nb.notifier_call = usbport_trig_notify,
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index d563cbc..3863bb1 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -28,7 +28,8 @@
  *
  * Find the node from device tree according to its port number.
  *
- * Return: On success, a pointer to the device node, %NULL on failure.
+ * Return: A pointer to the node with incremented refcount if found, or
+ * %NULL otherwise.
  */
 struct device_node *usb_of_get_child_node(struct device_node *parent,
 					int portnum)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 96b21b0..3116edf 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -223,6 +223,10 @@ static const struct usb_device_id usb_quirk_list[] = {
 	/* Blackmagic Design UltraStudio SDI */
 	{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
 
+	/* Hauppauge HVR-950q */
+	{ USB_DEVICE(0x2040, 0x7200), .driver_info =
+			USB_QUIRK_CONFIG_INTF_STRINGS },
+
 	/* INTEL VALUE SSD */
 	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 2776cfe..ef9cf4a 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -127,6 +127,22 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
  */
 #define USB_ACPI_LOCATION_VALID (1 << 31)
 
+static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
+					      int raw)
+{
+	struct acpi_device *adev;
+
+	if (!parent)
+		return NULL;
+
+	list_for_each_entry(adev, &parent->children, node) {
+		if (acpi_device_adr(adev) == raw)
+			return adev;
+	}
+
+	return acpi_find_child_device(parent, raw, false);
+}
+
 static struct acpi_device *usb_acpi_find_companion(struct device *dev)
 {
 	struct usb_device *udev;
@@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
 			int raw;
 
 			raw = usb_hcd_find_raw_port_number(hcd, port1);
-			adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
-					raw, false);
+
+			adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
+						  raw);
+
 			if (!adev)
 				return NULL;
 		} else {
@@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
 				return NULL;
 
 			acpi_bus_get_device(parent_handle, &adev);
-			adev = acpi_find_child_device(adev, port1, false);
+
+			adev = usb_acpi_find_port(adev, port1);
+
 			if (!adev)
 				return NULL;
 		}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 28b053c..17681d5 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -416,6 +416,7 @@ static void usb_release_dev(struct device *dev)
 
 	usb_destroy_configuration(udev);
 	usb_release_bos_descriptor(udev);
+	of_node_put(dev->of_node);
 	usb_put_hcd(hcd);
 	kfree(udev->product);
 	kfree(udev->manufacturer);
@@ -614,6 +615,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
 		dev->route = 0;
 
 		dev->dev.parent = bus->controller;
+		device_set_of_node_from_dev(&dev->dev, bus->sysdev);
 		dev_set_name(&dev->dev, "usb%d", bus->busnum);
 		root_hub = 1;
 	} else {
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 9cd8722..a3ffe97 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -144,6 +144,8 @@ const struct of_device_id dwc2_of_match_table[] = {
 	{ .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
 	{ .compatible = "snps,dwc2" },
 	{ .compatible = "samsung,s3c6400-hsotg" },
+	{ .compatible = "amlogic,meson8-usb",
+	  .data = dwc2_set_amlogic_params },
 	{ .compatible = "amlogic,meson8b-usb",
 	  .data = dwc2_set_amlogic_params },
 	{ .compatible = "amlogic,meson-gxbb-usb",
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 455d89a..326b302 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -151,11 +151,24 @@ static void __dwc3_set_mode(struct work_struct *work)
 	switch (dwc->desired_dr_role) {
 	case DWC3_GCTL_PRTCAP_HOST:
 		ret = dwc3_host_init(dwc);
-		if (ret)
+		if (ret) {
 			dev_err(dwc->dev, "failed to initialize host\n");
+		} else {
+			if (dwc->usb2_phy)
+				otg_set_vbus(dwc->usb2_phy->otg, true);
+			if (dwc->usb2_generic_phy)
+				phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+
+		}
 		break;
 	case DWC3_GCTL_PRTCAP_DEVICE:
 		dwc3_event_buffers_setup(dwc);
+
+		if (dwc->usb2_phy)
+			otg_set_vbus(dwc->usb2_phy->otg, false);
+		if (dwc->usb2_generic_phy)
+			phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+
 		ret = dwc3_gadget_init(dwc);
 		if (ret)
 			dev_err(dwc->dev, "failed to initialize peripheral\n");
@@ -721,6 +734,8 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 }
 
+static int dwc3_core_get_phy(struct dwc3 *dwc);
+
 /**
  * dwc3_core_init - Low-level initialization of DWC3 Core
  * @dwc: Pointer to our controller context structure
@@ -759,6 +774,10 @@ static int dwc3_core_init(struct dwc3 *dwc)
 	if (ret)
 		goto err0;
 
+	ret = dwc3_core_get_phy(dwc);
+	if (ret)
+		goto err0;
+
 	dwc3_core_setup_global_control(dwc);
 	dwc3_core_num_eps(dwc);
 
@@ -796,13 +815,19 @@ static int dwc3_core_init(struct dwc3 *dwc)
 		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
 	}
 
-	/*
-	 * Enable hardware control of sending remote wakeup in HS when
-	 * the device is in the L1 state.
-	 */
-	if (dwc->revision >= DWC3_REVISION_290A) {
+	if (dwc->revision >= DWC3_REVISION_250A) {
 		reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
-		reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
+
+		/*
+		 * Enable hardware control of sending remote wakeup
+		 * in HS when the device is in the L1 state.
+		 */
+		if (dwc->revision >= DWC3_REVISION_290A)
+			reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
+
+		if (dwc->dis_tx_ipgap_linecheck_quirk)
+			reg |= DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS;
+
 		dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
 	}
 
@@ -903,6 +928,12 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
 	switch (dwc->dr_mode) {
 	case USB_DR_MODE_PERIPHERAL:
 		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+
+		if (dwc->usb2_phy)
+			otg_set_vbus(dwc->usb2_phy->otg, false);
+		if (dwc->usb2_generic_phy)
+			phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+
 		ret = dwc3_gadget_init(dwc);
 		if (ret) {
 			if (ret != -EPROBE_DEFER)
@@ -912,6 +943,12 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
 		break;
 	case USB_DR_MODE_HOST:
 		dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+
+		if (dwc->usb2_phy)
+			otg_set_vbus(dwc->usb2_phy->otg, true);
+		if (dwc->usb2_generic_phy)
+			phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+
 		ret = dwc3_host_init(dwc);
 		if (ret) {
 			if (ret != -EPROBE_DEFER)
@@ -1023,6 +1060,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
 				"snps,dis-u2-freeclk-exists-quirk");
 	dwc->dis_del_phy_power_chg_quirk = device_property_read_bool(dev,
 				"snps,dis-del-phy-power-chg-quirk");
+	dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
+				"snps,dis-tx-ipgap-linecheck-quirk");
 
 	dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
 				"snps,tx_de_emphasis_quirk");
@@ -1148,10 +1187,6 @@ static int dwc3_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, dwc);
 	dwc3_cache_hwparams(dwc);
 
-	ret = dwc3_core_get_phy(dwc);
-	if (ret)
-		goto err0;
-
 	spin_lock_init(&dwc->lock);
 
 	pm_runtime_set_active(dev);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 981c77f..ea910ac 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1,4 +1,4 @@
-/**
+/*
  * core.h - DesignWare USB3 DRD Core Header
  *
  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
@@ -204,6 +204,7 @@
 #define DWC3_GCTL_DSBLCLKGTNG		BIT(0)
 
 /* Global User Control 1 Register */
+#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS	BIT(28)
 #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW	BIT(24)
 
 /* Global USB2 PHY Configuration Register */
@@ -522,7 +523,6 @@ struct dwc3_event_buffer {
  * @trb_pool_dma: dma address of @trb_pool
  * @trb_enqueue: enqueue 'pointer' into TRB array
  * @trb_dequeue: dequeue 'pointer' into TRB array
- * @desc: usb_endpoint_descriptor pointer
  * @dwc: pointer to DWC controller
  * @saved_state: ep state saved during hibernation
  * @flags: endpoint flags (wedged, stalled, ...)
@@ -664,7 +664,7 @@ enum dwc3_link_state {
  * @bpl: DW0-3
  * @bph: DW4-7
  * @size: DW8-B
- * @trl: DWC-F
+ * @ctrl: DWC-F
  */
 struct dwc3_trb {
 	u32		bpl;
@@ -674,16 +674,16 @@ struct dwc3_trb {
 } __packed;
 
 /**
- * dwc3_hwparams - copy of HWPARAMS registers
- * @hwparams0 - GHWPARAMS0
- * @hwparams1 - GHWPARAMS1
- * @hwparams2 - GHWPARAMS2
- * @hwparams3 - GHWPARAMS3
- * @hwparams4 - GHWPARAMS4
- * @hwparams5 - GHWPARAMS5
- * @hwparams6 - GHWPARAMS6
- * @hwparams7 - GHWPARAMS7
- * @hwparams8 - GHWPARAMS8
+ * struct dwc3_hwparams - copy of HWPARAMS registers
+ * @hwparams0: GHWPARAMS0
+ * @hwparams1: GHWPARAMS1
+ * @hwparams2: GHWPARAMS2
+ * @hwparams3: GHWPARAMS3
+ * @hwparams4: GHWPARAMS4
+ * @hwparams5: GHWPARAMS5
+ * @hwparams6: GHWPARAMS6
+ * @hwparams7: GHWPARAMS7
+ * @hwparams8: GHWPARAMS8
  */
 struct dwc3_hwparams {
 	u32	hwparams0;
@@ -730,7 +730,8 @@ struct dwc3_hwparams {
  * @unaligned: true for OUT endpoints with length not divisible by maxp
  * @direction: IN or OUT direction flag
  * @mapped: true when request has been dma-mapped
- * @queued: true when request has been queued to HW
+ * @started: request is started
+ * @zero: wants a ZLP
  */
 struct dwc3_request {
 	struct usb_request	request;
@@ -761,17 +762,23 @@ struct dwc3_scratchpad_array {
 
 /**
  * struct dwc3 - representation of our controller
- * @drd_work - workqueue used for role swapping
+ * @drd_work: workqueue used for role swapping
  * @ep0_trb: trb which is used for the ctrl_req
+ * @bounce: address of bounce buffer
+ * @scratchbuf: address of scratch buffer
  * @setup_buf: used while precessing STD USB requests
- * @ep0_trb: dma address of ep0_trb
+ * @ep0_trb_addr: dma address of @ep0_trb
+ * @bounce_addr: dma address of @bounce
  * @ep0_usb_req: dummy req used while handling STD USB requests
  * @scratch_addr: dma address of scratchbuf
  * @ep0_in_setup: one control transfer is completed and enter setup phase
  * @lock: for synchronizing
  * @dev: pointer to our struct device
+ * @sysdev: pointer to the DMA-capable device
  * @xhci: pointer to our xHCI child
- * @event_buffer_list: a list of event buffers
+ * @xhci_resources: struct resources for our @xhci child
+ * @ev_buf: struct dwc3_event_buffer pointer
+ * @eps: endpoint array
  * @gadget: device side representation of the peripheral controller
  * @gadget_driver: pointer to the gadget driver
  * @regs: base address for our registers
@@ -795,8 +802,6 @@ struct dwc3_scratchpad_array {
  * @usb2_generic_phy: pointer to USB2 PHY
  * @usb3_generic_phy: pointer to USB3 PHY
  * @ulpi: pointer to ulpi interface
- * @dcfg: saved contents of DCFG register
- * @gctl: saved contents of GCTL register
  * @isoch_delay: wValue from Set Isochronous Delay request;
  * @u2sel: parameter from Set SEL request.
  * @u2pel: parameter from Set SEL request.
@@ -830,7 +835,6 @@ struct dwc3_scratchpad_array {
  * @pending_events: true when we have pending IRQs to be handled
  * @pullups_connected: true when Run/Stop bit is set
  * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
- * @start_config_issued: true when StartConfig command has been issued
  * @three_stage_setup: set if we perform a three phase setup
  * @usb3_lpm_capable: set if hadrware supports Link Power Management
  * @disable_scramble_quirk: set if we enable the disable scramble quirk
@@ -845,11 +849,14 @@ struct dwc3_scratchpad_array {
  * @dis_u2_susphy_quirk: set if we disable usb2 suspend phy
  * @dis_enblslpm_quirk: set if we clear enblslpm in GUSB2PHYCFG,
  *                      disabling the suspend signal to the PHY.
+ * @dis_rxdet_inp3_quirk: set if we disable Rx.Detect in P3
  * @dis_u2_freeclk_exists_quirk : set if we clear u2_freeclk_exists
  *			in GUSB2PHYCFG, specify that USB2 PHY doesn't
  *			provide a free-running PHY clock.
  * @dis_del_phy_power_chg_quirk: set if we disable delay phy power
  *			change quirk.
+ * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
+ *			check during HS transmit.
  * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
  * @tx_de_emphasis: Tx de-emphasis value
  * 	0	- -6dB de-emphasis
@@ -1004,6 +1011,7 @@ struct dwc3 {
 	unsigned		dis_rxdet_inp3_quirk:1;
 	unsigned		dis_u2_freeclk_exists_quirk:1;
 	unsigned		dis_del_phy_power_chg_quirk:1;
+	unsigned		dis_tx_ipgap_linecheck_quirk:1;
 
 	unsigned		tx_de_emphasis_quirk:1;
 	unsigned		tx_de_emphasis:2;
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index cb2d8d3..5e9c070e 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -173,9 +173,8 @@ static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
  * @event: the event code
  */
 static inline const char *
-dwc3_gadget_event_string(const struct dwc3_event_devt *event)
+dwc3_gadget_event_string(char *str, const struct dwc3_event_devt *event)
 {
-	static char str[256];
 	enum dwc3_link_state state = event->event_info & DWC3_LINK_STATE_MASK;
 
 	switch (event->type) {
@@ -223,15 +222,249 @@ dwc3_gadget_event_string(const struct dwc3_event_devt *event)
 	return str;
 }
 
+static inline void dwc3_decode_get_status(__u8 t, __u16 i, __u16 l, char *str)
+{
+	switch (t & USB_RECIP_MASK) {
+	case USB_RECIP_INTERFACE:
+		sprintf(str, "Get Interface Status(Intf = %d, Length = %d)",
+			i, l);
+		break;
+	case USB_RECIP_ENDPOINT:
+		sprintf(str, "Get Endpoint Status(ep%d%s)",
+			i & ~USB_DIR_IN,
+			i & USB_DIR_IN ? "in" : "out");
+		break;
+	}
+}
+
+static inline void dwc3_decode_set_clear_feature(__u8 t, __u8 b, __u16 v,
+						 __u16 i, char *str)
+{
+	switch (t & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		sprintf(str, "%s Device Feature(%s%s)",
+			b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
+			({char *s;
+				switch (v) {
+				case USB_DEVICE_SELF_POWERED:
+					s = "Self Powered";
+					break;
+				case USB_DEVICE_REMOTE_WAKEUP:
+					s = "Remote Wakeup";
+					break;
+				case USB_DEVICE_TEST_MODE:
+					s = "Test Mode";
+					break;
+				default:
+					s = "UNKNOWN";
+				} s; }),
+			v == USB_DEVICE_TEST_MODE ?
+			({ char *s;
+				switch (i) {
+				case TEST_J:
+					s = ": TEST_J";
+					break;
+				case TEST_K:
+					s = ": TEST_K";
+					break;
+				case TEST_SE0_NAK:
+					s = ": TEST_SE0_NAK";
+					break;
+				case TEST_PACKET:
+					s = ": TEST_PACKET";
+					break;
+				case TEST_FORCE_EN:
+					s = ": TEST_FORCE_EN";
+					break;
+				default:
+					s = ": UNKNOWN";
+				} s; }) : "");
+		break;
+	case USB_RECIP_INTERFACE:
+		sprintf(str, "%s Interface Feature(%s)",
+			b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
+			v == USB_INTRF_FUNC_SUSPEND ?
+			"Function Suspend" : "UNKNOWN");
+		break;
+	case USB_RECIP_ENDPOINT:
+		sprintf(str, "%s Endpoint Feature(%s ep%d%s)",
+			b == USB_REQ_CLEAR_FEATURE ? "Clear" : "Set",
+			v == USB_ENDPOINT_HALT ? "Halt" : "UNKNOWN",
+			i & ~USB_DIR_IN,
+			i & USB_DIR_IN ? "in" : "out");
+		break;
+	}
+}
+
+static inline void dwc3_decode_set_address(__u16 v, char *str)
+{
+	sprintf(str, "Set Address(Addr = %02x)", v);
+}
+
+static inline void dwc3_decode_get_set_descriptor(__u8 t, __u8 b, __u16 v,
+						  __u16 i, __u16 l, char *str)
+{
+	sprintf(str, "%s %s Descriptor(Index = %d, Length = %d)",
+		b == USB_REQ_GET_DESCRIPTOR ? "Get" : "Set",
+		({ char *s;
+			switch (v >> 8) {
+			case USB_DT_DEVICE:
+				s = "Device";
+				break;
+			case USB_DT_CONFIG:
+				s = "Configuration";
+				break;
+			case USB_DT_STRING:
+				s = "String";
+				break;
+			case USB_DT_INTERFACE:
+				s = "Interface";
+				break;
+			case USB_DT_ENDPOINT:
+				s = "Endpoint";
+				break;
+			case USB_DT_DEVICE_QUALIFIER:
+				s = "Device Qualifier";
+				break;
+			case USB_DT_OTHER_SPEED_CONFIG:
+				s = "Other Speed Config";
+				break;
+			case USB_DT_INTERFACE_POWER:
+				s = "Interface Power";
+				break;
+			case USB_DT_OTG:
+				s = "OTG";
+				break;
+			case USB_DT_DEBUG:
+				s = "Debug";
+				break;
+			case USB_DT_INTERFACE_ASSOCIATION:
+				s = "Interface Association";
+				break;
+			case USB_DT_BOS:
+				s = "BOS";
+				break;
+			case USB_DT_DEVICE_CAPABILITY:
+				s = "Device Capability";
+				break;
+			case USB_DT_PIPE_USAGE:
+				s = "Pipe Usage";
+				break;
+			case USB_DT_SS_ENDPOINT_COMP:
+				s = "SS Endpoint Companion";
+				break;
+			case USB_DT_SSP_ISOC_ENDPOINT_COMP:
+				s = "SSP Isochronous Endpoint Companion";
+				break;
+			default:
+				s = "UNKNOWN";
+				break;
+			} s; }), v & 0xff, l);
+}
+
+
+static inline void dwc3_decode_get_configuration(__u16 l, char *str)
+{
+	sprintf(str, "Get Configuration(Length = %d)", l);
+}
+
+static inline void dwc3_decode_set_configuration(__u8 v, char *str)
+{
+	sprintf(str, "Set Configuration(Config = %d)", v);
+}
+
+static inline void dwc3_decode_get_intf(__u16 i, __u16 l, char *str)
+{
+	sprintf(str, "Get Interface(Intf = %d, Length = %d)", i, l);
+}
+
+static inline void dwc3_decode_set_intf(__u8 v, __u16 i, char *str)
+{
+	sprintf(str, "Set Interface(Intf = %d, Alt.Setting = %d)", i, v);
+}
+
+static inline void dwc3_decode_synch_frame(__u16 i, __u16 l, char *str)
+{
+	sprintf(str, "Synch Frame(Endpoint = %d, Length = %d)", i, l);
+}
+
+static inline void dwc3_decode_set_sel(__u16 l, char *str)
+{
+	sprintf(str, "Set SEL(Length = %d)", l);
+}
+
+static inline void dwc3_decode_set_isoch_delay(__u8 v, char *str)
+{
+	sprintf(str, "Set Isochronous Delay(Delay = %d ns)", v);
+}
+
+/**
+ * dwc3_decode_ctrl - returns a string represetion of ctrl request
+ */
+static inline const char *dwc3_decode_ctrl(char *str, __u8 bRequestType,
+		__u8 bRequest, __u16 wValue, __u16 wIndex, __u16 wLength)
+{
+	switch (bRequest) {
+	case USB_REQ_GET_STATUS:
+		dwc3_decode_get_status(bRequestType, wIndex, wLength, str);
+		break;
+	case USB_REQ_CLEAR_FEATURE:
+	case USB_REQ_SET_FEATURE:
+		dwc3_decode_set_clear_feature(bRequestType, bRequest, wValue,
+					      wIndex, str);
+		break;
+	case USB_REQ_SET_ADDRESS:
+		dwc3_decode_set_address(wValue, str);
+		break;
+	case USB_REQ_GET_DESCRIPTOR:
+	case USB_REQ_SET_DESCRIPTOR:
+		dwc3_decode_get_set_descriptor(bRequestType, bRequest, wValue,
+					       wIndex, wLength, str);
+		break;
+	case USB_REQ_GET_CONFIGURATION:
+		dwc3_decode_get_configuration(wLength, str);
+		break;
+	case USB_REQ_SET_CONFIGURATION:
+		dwc3_decode_set_configuration(wValue, str);
+		break;
+	case USB_REQ_GET_INTERFACE:
+		dwc3_decode_get_intf(wIndex, wLength, str);
+		break;
+	case USB_REQ_SET_INTERFACE:
+		dwc3_decode_set_intf(wValue, wIndex, str);
+		break;
+	case USB_REQ_SYNCH_FRAME:
+		dwc3_decode_synch_frame(wIndex, wLength, str);
+		break;
+	case USB_REQ_SET_SEL:
+		dwc3_decode_set_sel(wLength, str);
+		break;
+	case USB_REQ_SET_ISOCH_DELAY:
+		dwc3_decode_set_isoch_delay(wValue, str);
+		break;
+	default:
+		sprintf(str, "%02x %02x %02x %02x %02x %02x %02x %02x",
+			bRequestType, bRequest,
+			cpu_to_le16(wValue) & 0xff,
+			cpu_to_le16(wValue) >> 8,
+			cpu_to_le16(wIndex) & 0xff,
+			cpu_to_le16(wIndex) >> 8,
+			cpu_to_le16(wLength) & 0xff,
+			cpu_to_le16(wLength) >> 8);
+	}
+
+	return str;
+}
+
 /**
  * dwc3_ep_event_string - returns event name
  * @event: then event code
  */
 static inline const char *
-dwc3_ep_event_string(const struct dwc3_event_depevt *event, u32 ep0state)
+dwc3_ep_event_string(char *str, const struct dwc3_event_depevt *event,
+		     u32 ep0state)
 {
 	u8 epnum = event->endpoint_number;
-	static char str[256];
 	size_t len;
 	int status;
 	int ret;
@@ -332,14 +565,14 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
 	}
 }
 
-static inline const char *dwc3_decode_event(u32 event, u32 ep0state)
+static inline const char *dwc3_decode_event(char *str, u32 event, u32 ep0state)
 {
 	const union dwc3_event evt = (union dwc3_event) event;
 
 	if (evt.type.is_devspec)
-		return dwc3_gadget_event_string(&evt.devt);
+		return dwc3_gadget_event_string(str, &evt.devt);
 	else
-		return dwc3_ep_event_string(&evt.depevt, ep0state);
+		return dwc3_ep_event_string(str, &evt.depevt, ep0state);
 }
 
 static inline const char *dwc3_ep_cmd_status_string(int status)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 7be963dd..4e09be8 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -653,16 +653,13 @@ static int dwc3_ep_trb_ring_show(struct seq_file *s, void *unused)
 		goto out;
 	}
 
-	seq_printf(s, "enqueue pointer %d\n", dep->trb_enqueue);
-	seq_printf(s, "dequeue pointer %d\n", dep->trb_dequeue);
-	seq_printf(s, "\n--------------------------------------------------\n\n");
 	seq_printf(s, "buffer_addr,size,type,ioc,isp_imi,csp,chn,lst,hwo\n");
 
 	for (i = 0; i < DWC3_TRB_NUM; i++) {
 		struct dwc3_trb *trb = &dep->trb_pool[i];
 		unsigned int type = DWC3_TRBCTL_TYPE(trb->ctrl);
 
-		seq_printf(s, "%08x%08x,%d,%s,%d,%d,%d,%d,%d,%d\n",
+		seq_printf(s, "%08x%08x,%d,%s,%d,%d,%d,%d,%d,%d       %c%c\n",
 				trb->bph, trb->bpl, trb->size,
 				dwc3_trb_type_string(type),
 				!!(trb->ctrl & DWC3_TRB_CTRL_IOC),
@@ -670,7 +667,9 @@ static int dwc3_ep_trb_ring_show(struct seq_file *s, void *unused)
 				!!(trb->ctrl & DWC3_TRB_CTRL_CSP),
 				!!(trb->ctrl & DWC3_TRB_CTRL_CHN),
 				!!(trb->ctrl & DWC3_TRB_CTRL_LST),
-				!!(trb->ctrl & DWC3_TRB_CTRL_HWO));
+				!!(trb->ctrl & DWC3_TRB_CTRL_HWO),
+				dep->trb_enqueue == i ? 'E' : ' ',
+				dep->trb_dequeue == i ? 'D' : ' ');
 	}
 
 out:
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 98f74ff..e089df7 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -125,12 +125,16 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
 		dev_err(dev, "couldn't get clock\n");
 		return -EINVAL;
 	}
-	clk_prepare_enable(exynos->clk);
+	ret = clk_prepare_enable(exynos->clk);
+	if (ret)
+		return ret;
 
 	exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk");
 	if (IS_ERR(exynos->susp_clk))
 		exynos->susp_clk = NULL;
-	clk_prepare_enable(exynos->susp_clk);
+	ret = clk_prepare_enable(exynos->susp_clk);
+	if (ret)
+		goto susp_clk_err;
 
 	if (of_device_is_compatible(node, "samsung,exynos7-dwusb3")) {
 		exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
@@ -139,7 +143,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
 			ret = -ENODEV;
 			goto axius_clk_err;
 		}
-		clk_prepare_enable(exynos->axius_clk);
+		ret = clk_prepare_enable(exynos->axius_clk);
+		if (ret)
+			goto axius_clk_err;
 	} else {
 		exynos->axius_clk = NULL;
 	}
@@ -197,6 +203,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
 	clk_disable_unprepare(exynos->axius_clk);
 axius_clk_err:
 	clk_disable_unprepare(exynos->susp_clk);
+susp_clk_err:
 	clk_disable_unprepare(exynos->clk);
 	return ret;
 }
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 84a2ceb..7e995df 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -42,7 +42,7 @@
 #define PCI_DEVICE_ID_INTEL_CNPLP		0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH		0xa36e
 
-#define PCI_INTEL_BXT_DSM_UUID		"732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+#define PCI_INTEL_BXT_DSM_GUID		"732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR	4
 #define PCI_INTEL_BXT_STATE_D0		0
 #define PCI_INTEL_BXT_STATE_D3		3
@@ -51,14 +51,14 @@
  * struct dwc3_pci - Driver private structure
  * @dwc3: child dwc3 platform_device
  * @pci: our link to PCI bus
- * @uuid: _DSM UUID
+ * @guid: _DSM GUID
  * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
  */
 struct dwc3_pci {
 	struct platform_device *dwc3;
 	struct pci_dev *pci;
 
-	u8 uuid[16];
+	guid_t guid;
 
 	unsigned int has_dsm_for_pm:1;
 };
@@ -120,7 +120,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
 
 		if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
 				pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
-			acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid);
+			guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
 			dwc->has_dsm_for_pm = true;
 		}
 
@@ -230,7 +230,6 @@ static int dwc3_pci_probe(struct pci_dev *pci,
 	}
 
 	device_init_wakeup(dev, true);
-	device_set_run_wake(dev, true);
 	pci_set_drvdata(pci, dwc);
 	pm_runtime_put(dev);
 
@@ -292,7 +291,7 @@ static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
 	tmp.type = ACPI_TYPE_INTEGER;
 	tmp.integer.value = param;
 
-	obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid,
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), &dwc->guid,
 			1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
 	if (!obj) {
 		dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");
@@ -310,7 +309,7 @@ static int dwc3_pci_runtime_suspend(struct device *dev)
 {
 	struct dwc3_pci		*dwc = dev_get_drvdata(dev);
 
-	if (device_run_wake(dev))
+	if (device_can_wakeup(dev))
 		return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
 
 	return -EBUSY;
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index dfbf464..505676f 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -230,7 +230,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
 
 	dwc3_data->syscfg_reg_off = res->start;
 
-	dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
+	dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
 		 dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
 
 	dwc3_data->rstc_pwrdn =
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index a78c78e..827e376 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1,4 +1,4 @@
-/**
+/*
  * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
  *
  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
@@ -319,10 +319,16 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
 {
 	struct dwc3_ep		*dep;
 	u32			recip;
+	u32			value;
 	u32			reg;
 	u16			usb_status = 0;
 	__le16			*response_pkt;
 
+	/* We don't support PTM_STATUS */
+	value = le16_to_cpu(ctrl->wValue);
+	if (value != 0)
+		return -EINVAL;
+
 	recip = ctrl->bRequestType & USB_RECIP_MASK;
 	switch (recip) {
 	case USB_RECIP_DEVICE:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index aea9a5b..9e41605a 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1,4 +1,4 @@
-/**
+/*
  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
  *
  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
@@ -36,13 +36,12 @@
 #include "io.h"
 
 /**
- * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
+ * dwc3_gadget_set_test_mode - enables usb2 test modes
  * @dwc: pointer to our context structure
  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
  *
- * Caller should take care of locking. This function will
- * return 0 on success or -EINVAL if wrong Test Selector
- * is passed
+ * Caller should take care of locking. This function will return 0 on
+ * success or -EINVAL if wrong Test Selector is passed.
  */
 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
 {
@@ -69,7 +68,7 @@ int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
 }
 
 /**
- * dwc3_gadget_get_link_state - Gets current state of USB Link
+ * dwc3_gadget_get_link_state - gets current state of usb link
  * @dwc: pointer to our context structure
  *
  * Caller should take care of locking. This function will
@@ -85,7 +84,7 @@ int dwc3_gadget_get_link_state(struct dwc3 *dwc)
 }
 
 /**
- * dwc3_gadget_set_link_state - Sets USB Link to a particular State
+ * dwc3_gadget_set_link_state - sets usb link to a particular state
  * @dwc: pointer to our context structure
  * @state: the state to put link into
  *
@@ -143,8 +142,8 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
 }
 
 /**
- * dwc3_ep_inc_trb() - Increment a TRB index.
- * @index - Pointer to the TRB index to increment.
+ * dwc3_ep_inc_trb - increment a trb index.
+ * @index: Pointer to the TRB index to increment.
  *
  * The index should never point to the link TRB. After incrementing,
  * if it is point to the link TRB, wrap around to the beginning. The
@@ -157,16 +156,34 @@ static void dwc3_ep_inc_trb(u8 *index)
 		*index = 0;
 }
 
+/**
+ * dwc3_ep_inc_enq - increment endpoint's enqueue pointer
+ * @dep: The endpoint whose enqueue pointer we're incrementing
+ */
 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
 {
 	dwc3_ep_inc_trb(&dep->trb_enqueue);
 }
 
+/**
+ * dwc3_ep_inc_deq - increment endpoint's dequeue pointer
+ * @dep: The endpoint whose enqueue pointer we're incrementing
+ */
 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
 {
 	dwc3_ep_inc_trb(&dep->trb_dequeue);
 }
 
+/**
+ * dwc3_gadget_giveback - call struct usb_request's ->complete callback
+ * @dep: The endpoint to whom the request belongs to
+ * @req: The request we're giving back
+ * @status: completion code for the request
+ *
+ * Must be called with controller's lock held and interrupts disabled. This
+ * function will unmap @req and call its ->complete() callback to notify upper
+ * layers that it has completed.
+ */
 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
 		int status)
 {
@@ -193,6 +210,15 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
 		pm_runtime_put(dwc->dev);
 }
 
+/**
+ * dwc3_send_gadget_generic_command - issue a generic command for the controller
+ * @dwc: pointer to the controller context
+ * @cmd: the command to be issued
+ * @param: command parameter
+ *
+ * Caller should take care of locking. Issue @cmd with a given @param to @dwc
+ * and wait for its completion.
+ */
 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
 {
 	u32		timeout = 500;
@@ -225,6 +251,15 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
 
 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
 
+/**
+ * dwc3_send_gadget_ep_cmd - issue an endpoint command
+ * @dep: the endpoint to which the command is going to be issued
+ * @cmd: the command to be issued
+ * @params: parameters to the command
+ *
+ * Caller should handle locking. This function will issue @cmd with given
+ * @params to @dep and wait for its completion.
+ */
 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 		struct dwc3_gadget_ep_cmd_params *params)
 {
@@ -422,36 +457,38 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
 
 /**
- * dwc3_gadget_start_config - Configure EP resources
+ * dwc3_gadget_start_config - configure ep resources
  * @dwc: pointer to our controller context structure
  * @dep: endpoint that is being enabled
  *
- * The assignment of transfer resources cannot perfectly follow the
- * data book due to the fact that the controller driver does not have
- * all knowledge of the configuration in advance. It is given this
- * information piecemeal by the composite gadget framework after every
- * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
- * programming model in this scenario can cause errors. For two
- * reasons:
+ * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
+ * completion, it will set Transfer Resource for all available endpoints.
  *
- * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
- * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
- * multiple interfaces.
+ * The assignment of transfer resources cannot perfectly follow the data book
+ * due to the fact that the controller driver does not have all knowledge of the
+ * configuration in advance. It is given this information piecemeal by the
+ * composite gadget framework after every SET_CONFIGURATION and
+ * SET_INTERFACE. Trying to follow the databook programming model in this
+ * scenario can cause errors. For two reasons:
  *
- * 2) The databook does not mention doing more DEPXFERCFG for new
+ * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
+ * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
+ * incorrect in the scenario of multiple interfaces.
+ *
+ * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
  * endpoint on alt setting (8.1.6).
  *
  * The following simplified method is used instead:
  *
- * All hardware endpoints can be assigned a transfer resource and this
- * setting will stay persistent until either a core reset or
- * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
- * do DEPXFERCFG for every hardware endpoint as well. We are
+ * All hardware endpoints can be assigned a transfer resource and this setting
+ * will stay persistent until either a core reset or hibernation. So whenever we
+ * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
+ * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
  * guaranteed that there are as many transfer resources as endpoints.
  *
- * This function is called for each endpoint when it is being enabled
- * but is triggered only when called for EP0-out, which always happens
- * first, and which should only happen in one of the above conditions.
+ * This function is called for each endpoint when it is being enabled but is
+ * triggered only when called for EP0-out, which always happens first, and which
+ * should only happen in one of the above conditions.
  */
 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
 {
@@ -569,11 +606,13 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
 }
 
 /**
- * __dwc3_gadget_ep_enable - Initializes a HW endpoint
+ * __dwc3_gadget_ep_enable - initializes a hw endpoint
  * @dep: endpoint to be initialized
- * @desc: USB Endpoint Descriptor
+ * @modify: if true, modify existing endpoint configuration
+ * @restore: if true, restore endpoint configuration from scratch buffer
  *
- * Caller should take care of locking
+ * Caller should take care of locking. Execute all necessary commands to
+ * initialize a HW endpoint so it can be used by a gadget driver.
  */
 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
 		bool modify, bool restore)
@@ -685,11 +724,13 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 }
 
 /**
- * __dwc3_gadget_ep_disable - Disables a HW endpoint
+ * __dwc3_gadget_ep_disable - disables a hw endpoint
  * @dep: the endpoint to disable
  *
- * This function also removes requests which are currently processed ny the
- * hardware and those which are not yet scheduled.
+ * This function undoes what __dwc3_gadget_ep_enable did and also removes
+ * requests which are currently being processed by the hardware and those which
+ * are not yet scheduled.
+ *
  * Caller should take care of locking.
  */
 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
@@ -932,7 +973,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 }
 
 /**
- * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
+ * dwc3_ep_prev_trb - returns the previous TRB in the ring
  * @dep: The endpoint with the TRB ring
  * @index: The index of the current TRB in the ring
  *
@@ -953,7 +994,6 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
 {
 	struct dwc3_trb		*tmp;
-	struct dwc3		*dwc = dep->dwc;
 	u8			trbs_left;
 
 	/*
@@ -965,8 +1005,7 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
 	 */
 	if (dep->trb_enqueue == dep->trb_dequeue) {
 		tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
-		if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO,
-				  "%s No TRBS left\n", dep->name))
+		if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
 			return 0;
 
 		return DWC3_TRB_NUM - 1;
@@ -1101,6 +1140,17 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
 	}
 
 	list_for_each_entry_safe(req, n, &dep->pending_list, list) {
+		struct dwc3	*dwc = dep->dwc;
+		int		ret;
+
+		ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
+						    dep->direction);
+		if (ret)
+			return;
+
+		req->sg			= req->request.sg;
+		req->num_pending_sgs	= req->request.num_mapped_sgs;
+
 		if (req->num_pending_sgs > 0)
 			dwc3_prepare_one_trb_sg(dep, req);
 		else
@@ -1207,7 +1257,7 @@ static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 {
 	struct dwc3		*dwc = dep->dwc;
-	int			ret;
+	int			ret = 0;
 
 	if (!dep->endpoint.desc) {
 		dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
@@ -1215,12 +1265,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 		return -ESHUTDOWN;
 	}
 
-	if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
-				&req->request, req->dep->name)) {
-		dev_err(dwc->dev, "%s: request %p belongs to '%s'\n",
-				dep->name, &req->request, req->dep->name);
+	if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
+				&req->request, req->dep->name))
 		return -EINVAL;
-	}
 
 	pm_runtime_get(dwc->dev);
 
@@ -1231,14 +1278,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 
 	trace_dwc3_ep_queue(req);
 
-	ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
-					    dep->direction);
-	if (ret)
-		return ret;
-
-	req->sg			= req->request.sg;
-	req->num_pending_sgs	= req->request.num_mapped_sgs;
-
 	list_add_tail(&req->list, &dep->pending_list);
 
 	/*
@@ -1396,7 +1435,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
 			}
 			goto out1;
 		}
-		dev_err(dwc->dev, "request %p was not queued to %s\n",
+		dev_err(dwc->dev, "request %pK was not queued to %s\n",
 				request, ep->name);
 		ret = -EINVAL;
 		goto out0;
@@ -1741,8 +1780,8 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
 
 /**
- * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
- * dwc: pointer to our context structure
+ * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
+ * @dwc: pointer to our context structure
  *
  * The following looks like complex but it's actually very simple. In order to
  * calculate the number of packets we can burst at once on OUT transfers, we're
@@ -1798,49 +1837,6 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
 		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
 	}
 
-	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
-	reg &= ~(DWC3_DCFG_SPEED_MASK);
-
-	/**
-	 * WORKAROUND: DWC3 revision < 2.20a have an issue
-	 * which would cause metastability state on Run/Stop
-	 * bit if we try to force the IP to USB2-only mode.
-	 *
-	 * Because of that, we cannot configure the IP to any
-	 * speed other than the SuperSpeed
-	 *
-	 * Refers to:
-	 *
-	 * STAR#9000525659: Clock Domain Crossing on DCTL in
-	 * USB 2.0 Mode
-	 */
-	if (dwc->revision < DWC3_REVISION_220A) {
-		reg |= DWC3_DCFG_SUPERSPEED;
-	} else {
-		switch (dwc->maximum_speed) {
-		case USB_SPEED_LOW:
-			reg |= DWC3_DCFG_LOWSPEED;
-			break;
-		case USB_SPEED_FULL:
-			reg |= DWC3_DCFG_FULLSPEED;
-			break;
-		case USB_SPEED_HIGH:
-			reg |= DWC3_DCFG_HIGHSPEED;
-			break;
-		case USB_SPEED_SUPER_PLUS:
-			reg |= DWC3_DCFG_SUPERSPEED_PLUS;
-			break;
-		default:
-			dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
-				dwc->maximum_speed);
-			/* fall through */
-		case USB_SPEED_SUPER:
-			reg |= DWC3_DCFG_SUPERSPEED;
-			break;
-		}
-	}
-	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
-
 	/*
 	 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
 	 * field instead of letting dwc3 itself calculate that automatically.
@@ -1972,6 +1968,63 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
 	return 0;
 }
 
+static void dwc3_gadget_set_speed(struct usb_gadget *g,
+				  enum usb_device_speed speed)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+	unsigned long		flags;
+	u32			reg;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+	reg &= ~(DWC3_DCFG_SPEED_MASK);
+
+	/*
+	 * WORKAROUND: DWC3 revision < 2.20a have an issue
+	 * which would cause metastability state on Run/Stop
+	 * bit if we try to force the IP to USB2-only mode.
+	 *
+	 * Because of that, we cannot configure the IP to any
+	 * speed other than the SuperSpeed
+	 *
+	 * Refers to:
+	 *
+	 * STAR#9000525659: Clock Domain Crossing on DCTL in
+	 * USB 2.0 Mode
+	 */
+	if (dwc->revision < DWC3_REVISION_220A) {
+		reg |= DWC3_DCFG_SUPERSPEED;
+	} else {
+		switch (speed) {
+		case USB_SPEED_LOW:
+			reg |= DWC3_DCFG_LOWSPEED;
+			break;
+		case USB_SPEED_FULL:
+			reg |= DWC3_DCFG_FULLSPEED;
+			break;
+		case USB_SPEED_HIGH:
+			reg |= DWC3_DCFG_HIGHSPEED;
+			break;
+		case USB_SPEED_SUPER:
+			reg |= DWC3_DCFG_SUPERSPEED;
+			break;
+		case USB_SPEED_SUPER_PLUS:
+			reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+			break;
+		default:
+			dev_err(dwc->dev, "invalid speed (%d)\n", speed);
+
+			if (dwc->revision & DWC3_REVISION_IS_DWC31)
+				reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+			else
+				reg |= DWC3_DCFG_SUPERSPEED;
+		}
+	}
+	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
 static const struct usb_gadget_ops dwc3_gadget_ops = {
 	.get_frame		= dwc3_gadget_get_frame,
 	.wakeup			= dwc3_gadget_wakeup,
@@ -1979,19 +2032,21 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
 	.pullup			= dwc3_gadget_pullup,
 	.udc_start		= dwc3_gadget_start,
 	.udc_stop		= dwc3_gadget_stop,
+	.udc_set_speed		= dwc3_gadget_set_speed,
 };
 
 /* -------------------------------------------------------------------------- */
 
-static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 num)
+static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
 {
 	struct dwc3_ep			*dep;
 	u8				epnum;
 
 	INIT_LIST_HEAD(&dwc->gadget.ep_list);
 
-	for (epnum = 0; epnum < num; epnum++) {
+	for (epnum = 0; epnum < total; epnum++) {
 		bool			direction = epnum & 1;
+		u8			num = epnum >> 1;
 
 		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
 		if (!dep)
@@ -2003,7 +2058,7 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 num)
 		dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
 		dwc->eps[epnum] = dep;
 
-		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
+		snprintf(dep->name, sizeof(dep->name), "ep%u%s", num,
 				direction ? "in" : "out");
 
 		dep->endpoint.name = dep->name;
@@ -2015,39 +2070,39 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 num)
 
 		spin_lock_init(&dep->lock);
 
-		if (epnum == 0 || epnum == 1) {
+		if (num == 0) {
 			usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
 			dep->endpoint.maxburst = 1;
 			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
-			if (!epnum)
+			if (!direction)
 				dwc->gadget.ep0 = &dep->endpoint;
 		} else if (direction) {
 			int mdwidth;
+			int kbytes;
 			int size;
 			int ret;
-			int num;
 
 			mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
 			/* MDWIDTH is represented in bits, we need it in bytes */
 			mdwidth /= 8;
 
-			size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(epnum >> 1));
+			size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num));
 			size = DWC3_GTXFIFOSIZ_TXFDEF(size);
 
 			/* FIFO Depth is in MDWDITH bytes. Multiply */
 			size *= mdwidth;
 
-			num = size / 1024;
-			if (num == 0)
-				num = 1;
+			kbytes = size / 1024;
+			if (kbytes == 0)
+				kbytes = 1;
 
 			/*
-			 * FIFO sizes account an extra MDWIDTH * (num + 1) bytes for
+			 * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for
 			 * internal overhead. We don't really know how these are used,
 			 * but documentation say it exists.
 			 */
-			size -= mdwidth * (num + 1);
-			size /= num;
+			size -= mdwidth * (kbytes + 1);
+			size /= kbytes;
 
 			usb_ep_set_maxpacket_limit(&dep->endpoint, size);
 
@@ -2073,7 +2128,7 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 num)
 				return ret;
 		}
 
-		if (epnum == 0 || epnum == 1) {
+		if (num == 0) {
 			dep->endpoint.caps.type_control = true;
 		} else {
 			dep->endpoint.caps.type_iso = true;
@@ -2871,7 +2926,7 @@ static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
 {
 	unsigned int is_ss = evtinfo & BIT(4);
 
-	/**
+	/*
 	 * WORKAROUND: DWC3 revison 2.20a with hibernation support
 	 * have a known issue which can cause USB CV TD.9.23 to fail
 	 * randomly.
@@ -2943,20 +2998,12 @@ static void dwc3_process_event_entry(struct dwc3 *dwc,
 {
 	trace_dwc3_event(event->raw, dwc);
 
-	/* Endpoint IRQ, handle it and return early */
-	if (event->type.is_devspec == 0) {
-		/* depevt */
-		return dwc3_endpoint_interrupt(dwc, &event->depevt);
-	}
-
-	switch (event->type.type) {
-	case DWC3_EVENT_TYPE_DEV:
+	if (!event->type.is_devspec)
+		dwc3_endpoint_interrupt(dwc, &event->depevt);
+	else if (event->type.type == DWC3_EVENT_TYPE_DEV)
 		dwc3_gadget_interrupt(dwc, &event->devt);
-		break;
-	/* REVISIT what to do with Carkit and I2C events ? */
-	default:
+	else
 		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
-	}
 }
 
 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
@@ -3110,7 +3157,7 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
 }
 
 /**
- * dwc3_gadget_init - Initializes gadget related registers
+ * dwc3_gadget_init - initializes gadget related registers
  * @dwc: pointer to our controller context structure
  *
  * Returns 0 on success otherwise negative errno.
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index e4602d0..4a32275 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -1,4 +1,4 @@
-/**
+/*
  * gadget.h - DesignWare USB3 DRD Gadget Header
  *
  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
@@ -60,11 +60,25 @@ struct dwc3;
 
 #define to_dwc3_request(r)	(container_of(r, struct dwc3_request, request))
 
+/**
+ * next_request - gets the next request on the given list
+ * @list: the request list to operate on
+ *
+ * Caller should take care of locking. This function return %NULL or the first
+ * request available on @list.
+ */
 static inline struct dwc3_request *next_request(struct list_head *list)
 {
 	return list_first_entry_or_null(list, struct dwc3_request, list);
 }
 
+/**
+ * dwc3_gadget_move_started_request - move @req to the started_list
+ * @req: the request to be moved
+ *
+ * Caller should take care of locking. This function will move @req from its
+ * current list to the endpoint's started_list.
+ */
 static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
 {
 	struct dwc3_ep		*dep = req->dep;
@@ -87,10 +101,10 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
 
 /**
  * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
- * @dwc: DesignWare USB3 Pointer
- * @number: DWC endpoint number
+ * @dep: dwc3 endpoint
  *
- * Caller should take care of locking
+ * Caller should take care of locking. Returns the transfer resource
+ * index for a given endpoint.
  */
 static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
 {
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index f1bd444..6504b11 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -60,13 +60,15 @@ DECLARE_EVENT_CLASS(dwc3_log_event,
 	TP_STRUCT__entry(
 		__field(u32, event)
 		__field(u32, ep0state)
+		__dynamic_array(char, str, DWC3_MSG_MAX)
 	),
 	TP_fast_assign(
 		__entry->event = event;
 		__entry->ep0state = dwc->ep0state;
 	),
 	TP_printk("event (%08x): %s", __entry->event,
-			dwc3_decode_event(__entry->event, __entry->ep0state))
+			dwc3_decode_event(__get_str(str), __entry->event,
+					  __entry->ep0state))
 );
 
 DEFINE_EVENT(dwc3_log_event, dwc3_event,
@@ -83,6 +85,7 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl,
 		__field(__u16, wValue)
 		__field(__u16, wIndex)
 		__field(__u16, wLength)
+		__dynamic_array(char, str, DWC3_MSG_MAX)
 	),
 	TP_fast_assign(
 		__entry->bRequestType = ctrl->bRequestType;
@@ -91,10 +94,9 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl,
 		__entry->wIndex = le16_to_cpu(ctrl->wIndex);
 		__entry->wLength = le16_to_cpu(ctrl->wLength);
 	),
-	TP_printk("bRequestType %02x bRequest %02x wValue %04x wIndex %04x wLength %d",
-		__entry->bRequestType, __entry->bRequest,
-		__entry->wValue, __entry->wIndex,
-		__entry->wLength
+	TP_printk("%s", dwc3_decode_ctrl(__get_str(str), __entry->bRequestType,
+					__entry->bRequest, __entry->wValue,
+					__entry->wIndex, __entry->wLength)
 	)
 );
 
@@ -107,7 +109,7 @@ DECLARE_EVENT_CLASS(dwc3_log_request,
 	TP_PROTO(struct dwc3_request *req),
 	TP_ARGS(req),
 	TP_STRUCT__entry(
-		__dynamic_array(char, name, DWC3_MSG_MAX)
+		__string(name, req->dep->name)
 		__field(struct dwc3_request *, req)
 		__field(unsigned, actual)
 		__field(unsigned, length)
@@ -117,7 +119,7 @@ DECLARE_EVENT_CLASS(dwc3_log_request,
 		__field(int, no_interrupt)
 	),
 	TP_fast_assign(
-		snprintf(__get_str(name), DWC3_MSG_MAX, "%s", req->dep->name);
+		__assign_str(name, req->dep->name);
 		__entry->req = req;
 		__entry->actual = req->request.actual;
 		__entry->length = req->request.length;
@@ -190,7 +192,7 @@ DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
 		struct dwc3_gadget_ep_cmd_params *params, int cmd_status),
 	TP_ARGS(dep, cmd, params, cmd_status),
 	TP_STRUCT__entry(
-		__dynamic_array(char, name, DWC3_MSG_MAX)
+		__string(name, dep->name)
 		__field(unsigned int, cmd)
 		__field(u32, param0)
 		__field(u32, param1)
@@ -198,7 +200,7 @@ DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
 		__field(int, cmd_status)
 	),
 	TP_fast_assign(
-		snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
+		__assign_str(name, dep->name);
 		__entry->cmd = cmd;
 		__entry->param0 = params->param0;
 		__entry->param1 = params->param1;
@@ -223,7 +225,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
 	TP_PROTO(struct dwc3_ep *dep, struct dwc3_trb *trb),
 	TP_ARGS(dep, trb),
 	TP_STRUCT__entry(
-		__dynamic_array(char, name, DWC3_MSG_MAX)
+		__string(name, dep->name)
 		__field(struct dwc3_trb *, trb)
 		__field(u32, allocated)
 		__field(u32, queued)
@@ -234,7 +236,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
 		__field(u32, type)
 	),
 	TP_fast_assign(
-		snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
+		__assign_str(name, dep->name);
 		__entry->trb = trb;
 		__entry->allocated = dep->allocated_requests;
 		__entry->queued = dep->queued_requests;
@@ -291,7 +293,7 @@ DECLARE_EVENT_CLASS(dwc3_log_ep,
 	TP_PROTO(struct dwc3_ep *dep),
 	TP_ARGS(dep),
 	TP_STRUCT__entry(
-		__dynamic_array(char, name, DWC3_MSG_MAX)
+		__string(name, dep->name)
 		__field(unsigned, maxpacket)
 		__field(unsigned, maxpacket_limit)
 		__field(unsigned, max_streams)
@@ -302,7 +304,7 @@ DECLARE_EVENT_CLASS(dwc3_log_ep,
 		__field(u8, trb_dequeue)
 	),
 	TP_fast_assign(
-		snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
+		__assign_str(name, dep->name);
 		__entry->maxpacket = dep->endpoint.maxpacket;
 		__entry->maxpacket_limit = dep->endpoint.maxpacket_limit;
 		__entry->max_streams = dep->endpoint.max_streams;
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index bd86f84..e87ce8e 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -41,6 +41,12 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
 	u32 reg;
 	int ret;
 
+	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+	if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
+		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+	}
+
 	reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
 
@@ -58,6 +64,12 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
 	struct dwc3 *dwc = dev_get_drvdata(dev);
 	u32 reg;
 
+	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+	if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
+		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+	}
+
 	reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
 	reg |= DWC3_GUSB2PHYACC_WRITE | val;
 	dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 1268818..12fe70b 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -32,7 +32,6 @@
 static struct xdbc_state xdbc;
 static bool early_console_keep;
 
-#define XDBC_TRACE
 #ifdef XDBC_TRACE
 #define	xdbc_trace	trace_printk
 #else
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c164d6b..35cc641 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -41,7 +41,7 @@
 	   don't have this kind of hardware (except maybe inside Linux PDAs).
 
 	   For more information, see <http://www.linux-usb.org/gadget> and
-	   the kernel DocBook documentation for this API.
+	   the kernel documentation for this API.
 
 if USB_GADGET
 
@@ -158,6 +158,9 @@
 config USB_U_ETHER
 	tristate
 
+config USB_U_AUDIO
+	tristate
+
 config USB_F_SERIAL
 	tristate
 
@@ -191,6 +194,9 @@
 config USB_F_UAC1
 	tristate
 
+config USB_F_UAC1_LEGACY
+	tristate
+
 config USB_F_UAC2
 	tristate
 
@@ -368,12 +374,30 @@
 	depends on SND
 	select USB_LIBCOMPOSITE
 	select SND_PCM
+	select USB_U_AUDIO
 	select USB_F_UAC1
 	help
 	  This Audio function implements 1 AudioControl interface,
 	  1 AudioStreaming Interface each for USB-OUT and USB-IN.
-	  This driver requires a real Audio codec to be present
-	  on the device.
+	  This driver doesn't expect any real Audio codec to be present
+	  on the device - the audio streams are simply sinked to and
+	  sourced from a virtual ALSA sound card created. The user-space
+	  application may choose to do whatever it wants with the data
+	  received from the USB Host and choose to provide whatever it
+	  wants as audio data to the USB Host.
+
+config USB_CONFIGFS_F_UAC1_LEGACY
+	bool "Audio Class 1.0 (legacy implementation)"
+	depends on USB_CONFIGFS
+	depends on SND
+	select USB_LIBCOMPOSITE
+	select SND_PCM
+	select USB_F_UAC1_LEGACY
+	help
+	  This Audio function implements 1 AudioControl interface,
+	  1 AudioStreaming Interface each for USB-OUT and USB-IN.
+	  This is a legacy driver and requires a real Audio codec
+	  to be present on the device.
 
 config USB_CONFIGFS_F_UAC2
 	bool "Audio Class 2.0"
@@ -381,6 +405,7 @@
 	depends on SND
 	select USB_LIBCOMPOSITE
 	select SND_PCM
+	select USB_U_AUDIO
 	select USB_F_UAC2
 	help
 	  This Audio function is compatible with USB Audio Class
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 49d685a..dd74c99 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -315,6 +315,9 @@ void usb_remove_function(struct usb_configuration *c, struct usb_function *f)
 	list_del(&f->list);
 	if (f->unbind)
 		f->unbind(c, f);
+
+	if (f->bind_deactivated)
+		usb_function_activate(f);
 }
 EXPORT_SYMBOL_GPL(usb_remove_function);
 
@@ -607,7 +610,6 @@ static int count_configs(struct usb_composite_dev *cdev, unsigned type)
 static int bos_desc(struct usb_composite_dev *cdev)
 {
 	struct usb_ext_cap_descriptor	*usb_ext;
-	struct usb_ss_cap_descriptor	*ss_cap;
 	struct usb_dcd_config_params	dcd_config_params;
 	struct usb_bos_descriptor	*bos = cdev->req->buf;
 
@@ -633,29 +635,35 @@ static int bos_desc(struct usb_composite_dev *cdev)
 	 * The Superspeed USB Capability descriptor shall be implemented by all
 	 * SuperSpeed devices.
 	 */
-	ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
-	bos->bNumDeviceCaps++;
-	le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE);
-	ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
-	ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
-	ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
-	ss_cap->bmAttributes = 0; /* LTM is not supported yet */
-	ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION |
-				USB_FULL_SPEED_OPERATION |
-				USB_HIGH_SPEED_OPERATION |
-				USB_5GBPS_OPERATION);
-	ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
+	if (gadget_is_superspeed(cdev->gadget)) {
+		struct usb_ss_cap_descriptor *ss_cap;
 
-	/* Get Controller configuration */
-	if (cdev->gadget->ops->get_config_params)
-		cdev->gadget->ops->get_config_params(&dcd_config_params);
-	else {
-		dcd_config_params.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT;
-		dcd_config_params.bU2DevExitLat =
-			cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
+		ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+		bos->bNumDeviceCaps++;
+		le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE);
+		ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
+		ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+		ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
+		ss_cap->bmAttributes = 0; /* LTM is not supported yet */
+		ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION |
+						      USB_FULL_SPEED_OPERATION |
+						      USB_HIGH_SPEED_OPERATION |
+						      USB_5GBPS_OPERATION);
+		ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
+
+		/* Get Controller configuration */
+		if (cdev->gadget->ops->get_config_params) {
+			cdev->gadget->ops->get_config_params(
+				&dcd_config_params);
+		} else {
+			dcd_config_params.bU1devExitLat =
+				USB_DEFAULT_U1_DEV_EXIT_LAT;
+			dcd_config_params.bU2DevExitLat =
+				cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
+		}
+		ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
+		ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
 	}
-	ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
-	ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
 
 	/* The SuperSpeedPlus USB Device Capability descriptor */
 	if (gadget_is_superspeed_plus(cdev->gadget)) {
@@ -956,12 +964,8 @@ static void remove_config(struct usb_composite_dev *cdev,
 
 		f = list_first_entry(&config->functions,
 				struct usb_function, list);
-		list_del(&f->list);
-		if (f->unbind) {
-			DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
-			f->unbind(config, f);
-			/* may free memory for "f" */
-		}
+
+		usb_remove_function(config, f);
 	}
 	list_del(&config->list);
 	if (config->unbind) {
@@ -1603,7 +1607,10 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
 					cdev->desc.bcdUSB = cpu_to_le16(0x0210);
 				}
 			} else {
-				cdev->desc.bcdUSB = cpu_to_le16(0x0200);
+				if (gadget->lpm_capable)
+					cdev->desc.bcdUSB = cpu_to_le16(0x0201);
+				else
+					cdev->desc.bcdUSB = cpu_to_le16(0x0200);
 			}
 
 			value = min(w_length, (u16) sizeof cdev->desc);
@@ -1634,7 +1641,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
 				value = min(w_length, (u16) value);
 			break;
 		case USB_DT_BOS:
-			if (gadget_is_superspeed(gadget)) {
+			if (gadget_is_superspeed(gadget) ||
+			    gadget->lpm_capable) {
 				value = bos_desc(cdev);
 				value = min(w_length, (u16) value);
 			}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index cbff3b0..a22a892 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -738,7 +738,7 @@ static inline struct gadget_info *os_desc_item_to_gadget_info(
 
 static ssize_t os_desc_use_show(struct config_item *item, char *page)
 {
-	return sprintf(page, "%d",
+	return sprintf(page, "%d\n",
 			os_desc_item_to_gadget_info(item)->use_os_desc);
 }
 
@@ -762,7 +762,7 @@ static ssize_t os_desc_use_store(struct config_item *item, const char *page,
 
 static ssize_t os_desc_b_vendor_code_show(struct config_item *item, char *page)
 {
-	return sprintf(page, "%d",
+	return sprintf(page, "0x%02x\n",
 			os_desc_item_to_gadget_info(item)->b_vendor_code);
 }
 
@@ -787,9 +787,13 @@ static ssize_t os_desc_b_vendor_code_store(struct config_item *item,
 static ssize_t os_desc_qw_sign_show(struct config_item *item, char *page)
 {
 	struct gadget_info *gi = os_desc_item_to_gadget_info(item);
+	int res;
 
-	memcpy(page, gi->qw_sign, OS_STRING_QW_SIGN_LEN);
-	return OS_STRING_QW_SIGN_LEN;
+	res = utf16s_to_utf8s((wchar_t *) gi->qw_sign, OS_STRING_QW_SIGN_LEN,
+			      UTF16_LITTLE_ENDIAN, page, PAGE_SIZE - 1);
+	page[res++] = '\n';
+
+	return res;
 }
 
 static ssize_t os_desc_qw_sign_store(struct config_item *item, const char *page,
@@ -900,7 +904,7 @@ static inline struct usb_os_desc_ext_prop
 
 static ssize_t ext_prop_type_show(struct config_item *item, char *page)
 {
-	return sprintf(page, "%d", to_usb_os_desc_ext_prop(item)->type);
+	return sprintf(page, "%d\n", to_usb_os_desc_ext_prop(item)->type);
 }
 
 static ssize_t ext_prop_type_store(struct config_item *item,
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index cb8c225..86e8252 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -32,8 +32,11 @@
 obj-$(CONFIG_USB_F_MASS_STORAGE)+= usb_f_mass_storage.o
 usb_f_fs-y			:= f_fs.o
 obj-$(CONFIG_USB_F_FS)		+= usb_f_fs.o
-usb_f_uac1-y			:= f_uac1.o u_uac1.o
+obj-$(CONFIG_USB_U_AUDIO)	+= u_audio.o
+usb_f_uac1-y			:= f_uac1.o
 obj-$(CONFIG_USB_F_UAC1)	+= usb_f_uac1.o
+usb_f_uac1_legacy-y		:= f_uac1_legacy.o u_uac1_legacy.o
+obj-$(CONFIG_USB_F_UAC1_LEGACY)	+= usb_f_uac1_legacy.o
 usb_f_uac2-y			:= f_uac2.o
 obj-$(CONFIG_USB_F_UAC2)	+= usb_f_uac2.o
 usb_f_uvc-y			:= f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 47dda34..d21874b 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -127,7 +127,6 @@ struct ffs_ep {
 struct ffs_epfile {
 	/* Protects ep->ep and ep->req. */
 	struct mutex			mutex;
-	wait_queue_head_t		wait;
 
 	struct ffs_data			*ffs;
 	struct ffs_ep			*ep;	/* P: ffs->eps_lock */
@@ -889,7 +888,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 		if (file->f_flags & O_NONBLOCK)
 			return -EAGAIN;
 
-		ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
+		ret = wait_event_interruptible(
+				epfile->ffs->wait, (ep = epfile->ep));
 		if (ret)
 			return -EINTR;
 	}
@@ -1189,6 +1189,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
 			     unsigned long value)
 {
 	struct ffs_epfile *epfile = file->private_data;
+	struct ffs_ep *ep;
 	int ret;
 
 	ENTER();
@@ -1196,50 +1197,65 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
+	/* Wait for endpoint to be enabled */
+	ep = epfile->ep;
+	if (!ep) {
+		if (file->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		ret = wait_event_interruptible(
+				epfile->ffs->wait, (ep = epfile->ep));
+		if (ret)
+			return -EINTR;
+	}
+
 	spin_lock_irq(&epfile->ffs->eps_lock);
-	if (likely(epfile->ep)) {
-		switch (code) {
-		case FUNCTIONFS_FIFO_STATUS:
-			ret = usb_ep_fifo_status(epfile->ep->ep);
-			break;
-		case FUNCTIONFS_FIFO_FLUSH:
-			usb_ep_fifo_flush(epfile->ep->ep);
-			ret = 0;
-			break;
-		case FUNCTIONFS_CLEAR_HALT:
-			ret = usb_ep_clear_halt(epfile->ep->ep);
-			break;
-		case FUNCTIONFS_ENDPOINT_REVMAP:
-			ret = epfile->ep->num;
-			break;
-		case FUNCTIONFS_ENDPOINT_DESC:
-		{
-			int desc_idx;
-			struct usb_endpoint_descriptor *desc;
 
-			switch (epfile->ffs->gadget->speed) {
-			case USB_SPEED_SUPER:
-				desc_idx = 2;
-				break;
-			case USB_SPEED_HIGH:
-				desc_idx = 1;
-				break;
-			default:
-				desc_idx = 0;
-			}
-			desc = epfile->ep->descs[desc_idx];
+	/* In the meantime, endpoint got disabled or changed. */
+	if (epfile->ep != ep) {
+		spin_unlock_irq(&epfile->ffs->eps_lock);
+		return -ESHUTDOWN;
+	}
 
-			spin_unlock_irq(&epfile->ffs->eps_lock);
-			ret = copy_to_user((void *)value, desc, desc->bLength);
-			if (ret)
-				ret = -EFAULT;
-			return ret;
-		}
+	switch (code) {
+	case FUNCTIONFS_FIFO_STATUS:
+		ret = usb_ep_fifo_status(epfile->ep->ep);
+		break;
+	case FUNCTIONFS_FIFO_FLUSH:
+		usb_ep_fifo_flush(epfile->ep->ep);
+		ret = 0;
+		break;
+	case FUNCTIONFS_CLEAR_HALT:
+		ret = usb_ep_clear_halt(epfile->ep->ep);
+		break;
+	case FUNCTIONFS_ENDPOINT_REVMAP:
+		ret = epfile->ep->num;
+		break;
+	case FUNCTIONFS_ENDPOINT_DESC:
+	{
+		int desc_idx;
+		struct usb_endpoint_descriptor *desc;
+
+		switch (epfile->ffs->gadget->speed) {
+		case USB_SPEED_SUPER:
+			desc_idx = 2;
+			break;
+		case USB_SPEED_HIGH:
+			desc_idx = 1;
+			break;
 		default:
-			ret = -ENOTTY;
+			desc_idx = 0;
 		}
-	} else {
-		ret = -ENODEV;
+		desc = epfile->ep->descs[desc_idx];
+
+		spin_unlock_irq(&epfile->ffs->eps_lock);
+		ret = copy_to_user((void *)value, desc, desc->bLength);
+		if (ret)
+			ret = -EFAULT;
+		return ret;
+	}
+	default:
+		ret = -ENOTTY;
 	}
 	spin_unlock_irq(&epfile->ffs->eps_lock);
 
@@ -1593,7 +1609,8 @@ static void ffs_data_put(struct ffs_data *ffs)
 		pr_info("%s(): freeing\n", __func__);
 		ffs_data_clear(ffs);
 		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
-		       waitqueue_active(&ffs->ep0req_completion.wait));
+		       waitqueue_active(&ffs->ep0req_completion.wait) ||
+		       waitqueue_active(&ffs->wait));
 		kfree(ffs->dev_name);
 		kfree(ffs);
 	}
@@ -1640,6 +1657,7 @@ static struct ffs_data *ffs_data_new(void)
 	mutex_init(&ffs->mutex);
 	spin_lock_init(&ffs->eps_lock);
 	init_waitqueue_head(&ffs->ev.waitq);
+	init_waitqueue_head(&ffs->wait);
 	init_completion(&ffs->ep0req_completion);
 
 	/* XXX REVISIT need to update it in some places, or do we? */
@@ -1761,7 +1779,6 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
 	for (i = 1; i <= count; ++i, ++epfile) {
 		epfile->ffs = ffs;
 		mutex_init(&epfile->mutex);
-		init_waitqueue_head(&epfile->wait);
 		if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
 			sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
 		else
@@ -1786,8 +1803,7 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
 	ENTER();
 
 	for (; count; --count, ++epfile) {
-		BUG_ON(mutex_is_locked(&epfile->mutex) ||
-		       waitqueue_active(&epfile->wait));
+		BUG_ON(mutex_is_locked(&epfile->mutex));
 		if (epfile->dentry) {
 			d_delete(epfile->dentry);
 			dput(epfile->dentry);
@@ -1874,11 +1890,11 @@ static int ffs_func_eps_enable(struct ffs_function *func)
 			break;
 		}
 
-		wake_up(&epfile->wait);
-
 		++ep;
 		++epfile;
 	}
+
+	wake_up_interruptible(&ffs->wait);
 	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 
 	return ret;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 4c8aacc..e80b9c1 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -260,12 +260,13 @@ struct fsg_common {
 	struct usb_gadget	*gadget;
 	struct usb_composite_dev *cdev;
 	struct fsg_dev		*fsg, *new_fsg;
+	wait_queue_head_t	io_wait;
 	wait_queue_head_t	fsg_wait;
 
 	/* filesem protects: backing files in use */
 	struct rw_semaphore	filesem;
 
-	/* lock protects: state, all the req_busy's */
+	/* lock protects: state and thread_task */
 	spinlock_t		lock;
 
 	struct usb_ep		*ep0;		/* Copy of gadget->ep0 */
@@ -303,7 +304,6 @@ struct fsg_common {
 	unsigned int		running:1;
 	unsigned int		sysfs:1;
 
-	int			thread_wakeup_needed;
 	struct completion	thread_notifier;
 	struct task_struct	*thread_task;
 
@@ -355,7 +355,7 @@ typedef void (*fsg_routine_t)(struct fsg_dev *);
 
 static int exception_in_progress(struct fsg_common *common)
 {
-	return common->state > FSG_STATE_IDLE;
+	return common->state > FSG_STATE_NORMAL;
 }
 
 /* Make bulk-out requests be divisible by the maxpacket size */
@@ -393,16 +393,6 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
 
 /* These routines may be called in process context or in_irq */
 
-/* Caller must hold fsg->lock */
-static void wakeup_thread(struct fsg_common *common)
-{
-	smp_wmb();	/* ensure the write of bh->state is complete */
-	/* Tell the main thread that something has happened */
-	common->thread_wakeup_needed = 1;
-	if (common->thread_task)
-		wake_up_process(common->thread_task);
-}
-
 static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
 {
 	unsigned long		flags;
@@ -456,13 +446,9 @@ static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
 	if (req->status == -ECONNRESET)		/* Request was cancelled */
 		usb_ep_fifo_flush(ep);
 
-	/* Hold the lock while we update the request and buffer states */
-	smp_wmb();
-	spin_lock(&common->lock);
-	bh->inreq_busy = 0;
-	bh->state = BUF_STATE_EMPTY;
-	wakeup_thread(common);
-	spin_unlock(&common->lock);
+	/* Synchronize with the smp_load_acquire() in sleep_thread() */
+	smp_store_release(&bh->state, BUF_STATE_EMPTY);
+	wake_up(&common->io_wait);
 }
 
 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
@@ -477,13 +463,9 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
 	if (req->status == -ECONNRESET)		/* Request was cancelled */
 		usb_ep_fifo_flush(ep);
 
-	/* Hold the lock while we update the request and buffer states */
-	smp_wmb();
-	spin_lock(&common->lock);
-	bh->outreq_busy = 0;
-	bh->state = BUF_STATE_FULL;
-	wakeup_thread(common);
-	spin_unlock(&common->lock);
+	/* Synchronize with the smp_load_acquire() in sleep_thread() */
+	smp_store_release(&bh->state, BUF_STATE_FULL);
+	wake_up(&common->io_wait);
 }
 
 static int _fsg_common_get_max_lun(struct fsg_common *common)
@@ -528,7 +510,7 @@ static int fsg_setup(struct usb_function *f,
 		 * and reinitialize our state.
 		 */
 		DBG(fsg, "bulk reset request\n");
-		raise_exception(fsg->common, FSG_STATE_RESET);
+		raise_exception(fsg->common, FSG_STATE_PROTOCOL_RESET);
 		return USB_GADGET_DELAYED_STATUS;
 
 	case US_BULK_GET_MAX_LUN:
@@ -559,43 +541,39 @@ static int fsg_setup(struct usb_function *f,
 /* All the following routines run in process context */
 
 /* Use this for bulk or interrupt transfers, not ep0 */
-static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
-			   struct usb_request *req, int *pbusy,
-			   enum fsg_buffer_state *state)
+static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
+			   struct usb_request *req)
 {
 	int	rc;
 
 	if (ep == fsg->bulk_in)
 		dump_msg(fsg, "bulk-in", req->buf, req->length);
 
-	spin_lock_irq(&fsg->common->lock);
-	*pbusy = 1;
-	*state = BUF_STATE_BUSY;
-	spin_unlock_irq(&fsg->common->lock);
-
 	rc = usb_ep_queue(ep, req, GFP_KERNEL);
-	if (rc == 0)
-		return;  /* All good, we're done */
+	if (rc) {
 
-	*pbusy = 0;
-	*state = BUF_STATE_EMPTY;
+		/* We can't do much more than wait for a reset */
+		req->status = rc;
 
-	/* We can't do much more than wait for a reset */
-
-	/*
-	 * Note: currently the net2280 driver fails zero-length
-	 * submissions if DMA is enabled.
-	 */
-	if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && req->length == 0))
-		WARNING(fsg, "error in submission: %s --> %d\n", ep->name, rc);
+		/*
+		 * Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled.
+		 */
+		if (rc != -ESHUTDOWN &&
+				!(rc == -EOPNOTSUPP && req->length == 0))
+			WARNING(fsg, "error in submission: %s --> %d\n",
+					ep->name, rc);
+	}
+	return rc;
 }
 
 static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
 {
 	if (!fsg_is_set(common))
 		return false;
-	start_transfer(common->fsg, common->fsg->bulk_in,
-		       bh->inreq, &bh->inreq_busy, &bh->state);
+	bh->state = BUF_STATE_SENDING;
+	if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
+		bh->state = BUF_STATE_EMPTY;
 	return true;
 }
 
@@ -603,32 +581,31 @@ static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
 {
 	if (!fsg_is_set(common))
 		return false;
-	start_transfer(common->fsg, common->fsg->bulk_out,
-		       bh->outreq, &bh->outreq_busy, &bh->state);
+	bh->state = BUF_STATE_RECEIVING;
+	if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
+		bh->state = BUF_STATE_FULL;
 	return true;
 }
 
-static int sleep_thread(struct fsg_common *common, bool can_freeze)
+static int sleep_thread(struct fsg_common *common, bool can_freeze,
+		struct fsg_buffhd *bh)
 {
-	int	rc = 0;
+	int	rc;
 
-	/* Wait until a signal arrives or we are woken up */
-	for (;;) {
-		if (can_freeze)
-			try_to_freeze();
-		set_current_state(TASK_INTERRUPTIBLE);
-		if (signal_pending(current)) {
-			rc = -EINTR;
-			break;
-		}
-		if (common->thread_wakeup_needed)
-			break;
-		schedule();
-	}
-	__set_current_state(TASK_RUNNING);
-	common->thread_wakeup_needed = 0;
-	smp_rmb();	/* ensure the latest bh->state is visible */
-	return rc;
+	/* Wait until a signal arrives or bh is no longer busy */
+	if (can_freeze)
+		/*
+		 * synchronize with the smp_store_release(&bh->state) in
+		 * bulk_in_complete() or bulk_out_complete()
+		 */
+		rc = wait_event_freezable(common->io_wait,
+				bh && smp_load_acquire(&bh->state) >=
+					BUF_STATE_EMPTY);
+	else
+		rc = wait_event_interruptible(common->io_wait,
+				bh && smp_load_acquire(&bh->state) >=
+					BUF_STATE_EMPTY);
+	return rc ? -EINTR : 0;
 }
 
 
@@ -688,11 +665,9 @@ static int do_read(struct fsg_common *common)
 
 		/* Wait for the next buffer to become available */
 		bh = common->next_buffhd_to_fill;
-		while (bh->state != BUF_STATE_EMPTY) {
-			rc = sleep_thread(common, false);
-			if (rc)
-				return rc;
-		}
+		rc = sleep_thread(common, false, bh);
+		if (rc)
+			return rc;
 
 		/*
 		 * If we were asked to read past the end of file,
@@ -869,84 +844,80 @@ static int do_write(struct fsg_common *common)
 		bh = common->next_buffhd_to_drain;
 		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
 			break;			/* We stopped early */
-		if (bh->state == BUF_STATE_FULL) {
-			smp_rmb();
-			common->next_buffhd_to_drain = bh->next;
-			bh->state = BUF_STATE_EMPTY;
 
-			/* Did something go wrong with the transfer? */
-			if (bh->outreq->status != 0) {
-				curlun->sense_data = SS_COMMUNICATION_FAILURE;
-				curlun->sense_data_info =
-					file_offset >> curlun->blkbits;
-				curlun->info_valid = 1;
-				break;
-			}
-
-			amount = bh->outreq->actual;
-			if (curlun->file_length - file_offset < amount) {
-				LERROR(curlun,
-				       "write %u @ %llu beyond end %llu\n",
-				       amount, (unsigned long long)file_offset,
-				       (unsigned long long)curlun->file_length);
-				amount = curlun->file_length - file_offset;
-			}
-
-			/* Don't accept excess data.  The spec doesn't say
-			 * what to do in this case.  We'll ignore the error.
-			 */
-			amount = min(amount, bh->bulk_out_intended_length);
-
-			/* Don't write a partial block */
-			amount = round_down(amount, curlun->blksize);
-			if (amount == 0)
-				goto empty_write;
-
-			/* Perform the write */
-			file_offset_tmp = file_offset;
-			nwritten = vfs_write(curlun->filp,
-					     (char __user *)bh->buf,
-					     amount, &file_offset_tmp);
-			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
-			      (unsigned long long)file_offset, (int)nwritten);
-			if (signal_pending(current))
-				return -EINTR;		/* Interrupted! */
-
-			if (nwritten < 0) {
-				LDBG(curlun, "error in file write: %d\n",
-				     (int)nwritten);
-				nwritten = 0;
-			} else if (nwritten < amount) {
-				LDBG(curlun, "partial file write: %d/%u\n",
-				     (int)nwritten, amount);
-				nwritten = round_down(nwritten, curlun->blksize);
-			}
-			file_offset += nwritten;
-			amount_left_to_write -= nwritten;
-			common->residue -= nwritten;
-
-			/* If an error occurred, report it and its position */
-			if (nwritten < amount) {
-				curlun->sense_data = SS_WRITE_ERROR;
-				curlun->sense_data_info =
-					file_offset >> curlun->blkbits;
-				curlun->info_valid = 1;
-				break;
-			}
-
- empty_write:
-			/* Did the host decide to stop early? */
-			if (bh->outreq->actual < bh->bulk_out_intended_length) {
-				common->short_packet_received = 1;
-				break;
-			}
-			continue;
-		}
-
-		/* Wait for something to happen */
-		rc = sleep_thread(common, false);
+		/* Wait for the data to be received */
+		rc = sleep_thread(common, false, bh);
 		if (rc)
 			return rc;
+
+		common->next_buffhd_to_drain = bh->next;
+		bh->state = BUF_STATE_EMPTY;
+
+		/* Did something go wrong with the transfer? */
+		if (bh->outreq->status != 0) {
+			curlun->sense_data = SS_COMMUNICATION_FAILURE;
+			curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		amount = bh->outreq->actual;
+		if (curlun->file_length - file_offset < amount) {
+			LERROR(curlun, "write %u @ %llu beyond end %llu\n",
+				       amount, (unsigned long long)file_offset,
+				       (unsigned long long)curlun->file_length);
+			amount = curlun->file_length - file_offset;
+		}
+
+		/*
+		 * Don't accept excess data.  The spec doesn't say
+		 * what to do in this case.  We'll ignore the error.
+		 */
+		amount = min(amount, bh->bulk_out_intended_length);
+
+		/* Don't write a partial block */
+		amount = round_down(amount, curlun->blksize);
+		if (amount == 0)
+			goto empty_write;
+
+		/* Perform the write */
+		file_offset_tmp = file_offset;
+		nwritten = vfs_write(curlun->filp, (char __user *)bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+				(unsigned long long)file_offset, (int)nwritten);
+		if (signal_pending(current))
+			return -EINTR;		/* Interrupted! */
+
+		if (nwritten < 0) {
+			LDBG(curlun, "error in file write: %d\n",
+					(int) nwritten);
+			nwritten = 0;
+		} else if (nwritten < amount) {
+			LDBG(curlun, "partial file write: %d/%u\n",
+					(int) nwritten, amount);
+			nwritten = round_down(nwritten, curlun->blksize);
+		}
+		file_offset += nwritten;
+		amount_left_to_write -= nwritten;
+		common->residue -= nwritten;
+
+		/* If an error occurred, report it and its position */
+		if (nwritten < amount) {
+			curlun->sense_data = SS_WRITE_ERROR;
+			curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+
+ empty_write:
+		/* Did the host decide to stop early? */
+		if (bh->outreq->actual < bh->bulk_out_intended_length) {
+			common->short_packet_received = 1;
+			break;
+		}
 	}
 
 	return -EIO;		/* No default reply */
@@ -1471,7 +1442,7 @@ static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
 
 static int throw_away_data(struct fsg_common *common)
 {
-	struct fsg_buffhd	*bh;
+	struct fsg_buffhd	*bh, *bh2;
 	u32			amount;
 	int			rc;
 
@@ -1479,26 +1450,10 @@ static int throw_away_data(struct fsg_common *common)
 	     bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
 	     bh = common->next_buffhd_to_drain) {
 
-		/* Throw away the data in a filled buffer */
-		if (bh->state == BUF_STATE_FULL) {
-			smp_rmb();
-			bh->state = BUF_STATE_EMPTY;
-			common->next_buffhd_to_drain = bh->next;
-
-			/* A short packet or an error ends everything */
-			if (bh->outreq->actual < bh->bulk_out_intended_length ||
-			    bh->outreq->status != 0) {
-				raise_exception(common,
-						FSG_STATE_ABORT_BULK_OUT);
-				return -EINTR;
-			}
-			continue;
-		}
-
 		/* Try to submit another request if we need one */
-		bh = common->next_buffhd_to_fill;
-		if (bh->state == BUF_STATE_EMPTY
-		 && common->usb_amount_left > 0) {
+		bh2 = common->next_buffhd_to_fill;
+		if (bh2->state == BUF_STATE_EMPTY &&
+				common->usb_amount_left > 0) {
 			amount = min(common->usb_amount_left, FSG_BUFLEN);
 
 			/*
@@ -1506,19 +1461,30 @@ static int throw_away_data(struct fsg_common *common)
 			 * equal to the buffer size, which is divisible by
 			 * the bulk-out maxpacket size.
 			 */
-			set_bulk_out_req_length(common, bh, amount);
-			if (!start_out_transfer(common, bh))
+			set_bulk_out_req_length(common, bh2, amount);
+			if (!start_out_transfer(common, bh2))
 				/* Dunno what to do if common->fsg is NULL */
 				return -EIO;
-			common->next_buffhd_to_fill = bh->next;
+			common->next_buffhd_to_fill = bh2->next;
 			common->usb_amount_left -= amount;
 			continue;
 		}
 
-		/* Otherwise wait for something to happen */
-		rc = sleep_thread(common, true);
+		/* Wait for the data to be received */
+		rc = sleep_thread(common, false, bh);
 		if (rc)
 			return rc;
+
+		/* Throw away the data in a filled buffer */
+		bh->state = BUF_STATE_EMPTY;
+		common->next_buffhd_to_drain = bh->next;
+
+		/* A short packet or an error ends everything */
+		if (bh->outreq->actual < bh->bulk_out_intended_length ||
+				bh->outreq->status != 0) {
+			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
+			return -EINTR;
+		}
 	}
 	return 0;
 }
@@ -1625,7 +1591,7 @@ static int finish_reply(struct fsg_common *common)
 	return rc;
 }
 
-static int send_status(struct fsg_common *common)
+static void send_status(struct fsg_common *common)
 {
 	struct fsg_lun		*curlun = common->curlun;
 	struct fsg_buffhd	*bh;
@@ -1636,11 +1602,9 @@ static int send_status(struct fsg_common *common)
 
 	/* Wait for the next buffer to become available */
 	bh = common->next_buffhd_to_fill;
-	while (bh->state != BUF_STATE_EMPTY) {
-		rc = sleep_thread(common, true);
-		if (rc)
-			return rc;
-	}
+	rc = sleep_thread(common, false, bh);
+	if (rc)
+		return;
 
 	if (curlun) {
 		sd = curlun->sense_data;
@@ -1674,10 +1638,10 @@ static int send_status(struct fsg_common *common)
 	bh->inreq->zero = 0;
 	if (!start_in_transfer(common, bh))
 		/* Don't know what to do if common->fsg is NULL */
-		return -EIO;
+		return;
 
 	common->next_buffhd_to_fill = bh->next;
-	return 0;
+	return;
 }
 
 
@@ -1839,11 +1803,10 @@ static int do_scsi_command(struct fsg_common *common)
 	/* Wait for the next buffer to become available for data or status */
 	bh = common->next_buffhd_to_fill;
 	common->next_buffhd_to_drain = bh;
-	while (bh->state != BUF_STATE_EMPTY) {
-		rc = sleep_thread(common, true);
-		if (rc)
-			return rc;
-	}
+	rc = sleep_thread(common, false, bh);
+	if (rc)
+		return rc;
+
 	common->phase_error = 0;
 	common->short_packet_received = 0;
 
@@ -2186,11 +2149,9 @@ static int get_next_command(struct fsg_common *common)
 
 	/* Wait for the next buffer to become available */
 	bh = common->next_buffhd_to_fill;
-	while (bh->state != BUF_STATE_EMPTY) {
-		rc = sleep_thread(common, true);
-		if (rc)
-			return rc;
-	}
+	rc = sleep_thread(common, true, bh);
+	if (rc)
+		return rc;
 
 	/* Queue a request to read a Bulk-only CBW */
 	set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
@@ -2205,12 +2166,10 @@ static int get_next_command(struct fsg_common *common)
 	 */
 
 	/* Wait for the CBW to arrive */
-	while (bh->state != BUF_STATE_FULL) {
-		rc = sleep_thread(common, true);
-		if (rc)
-			return rc;
-	}
-	smp_rmb();
+	rc = sleep_thread(common, true, bh);
+	if (rc)
+		return rc;
+
 	rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
 	bh->state = BUF_STATE_EMPTY;
 
@@ -2362,9 +2321,11 @@ static void handle_exception(struct fsg_common *common)
 		if (!sig)
 			break;
 		if (sig != SIGUSR1) {
+			spin_lock_irq(&common->lock);
 			if (common->state < FSG_STATE_EXIT)
 				DBG(common, "Main thread exiting on signal\n");
-			raise_exception(common, FSG_STATE_EXIT);
+			common->state = FSG_STATE_EXIT;
+			spin_unlock_irq(&common->lock);
 		}
 	}
 
@@ -2372,23 +2333,14 @@ static void handle_exception(struct fsg_common *common)
 	if (likely(common->fsg)) {
 		for (i = 0; i < common->fsg_num_buffers; ++i) {
 			bh = &common->buffhds[i];
-			if (bh->inreq_busy)
+			if (bh->state == BUF_STATE_SENDING)
 				usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
-			if (bh->outreq_busy)
+			if (bh->state == BUF_STATE_RECEIVING)
 				usb_ep_dequeue(common->fsg->bulk_out,
 					       bh->outreq);
-		}
 
-		/* Wait until everything is idle */
-		for (;;) {
-			int num_active = 0;
-			for (i = 0; i < common->fsg_num_buffers; ++i) {
-				bh = &common->buffhds[i];
-				num_active += bh->inreq_busy + bh->outreq_busy;
-			}
-			if (num_active == 0)
-				break;
-			if (sleep_thread(common, true))
+			/* Wait for a transfer to become idle */
+			if (sleep_thread(common, false, bh))
 				return;
 		}
 
@@ -2413,10 +2365,9 @@ static void handle_exception(struct fsg_common *common)
 	common->next_buffhd_to_drain = &common->buffhds[0];
 	exception_req_tag = common->exception_req_tag;
 	old_state = common->state;
+	common->state = FSG_STATE_NORMAL;
 
-	if (old_state == FSG_STATE_ABORT_BULK_OUT)
-		common->state = FSG_STATE_STATUS_PHASE;
-	else {
+	if (old_state != FSG_STATE_ABORT_BULK_OUT) {
 		for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
 			curlun = common->luns[i];
 			if (!curlun)
@@ -2427,21 +2378,19 @@ static void handle_exception(struct fsg_common *common)
 			curlun->sense_data_info = 0;
 			curlun->info_valid = 0;
 		}
-		common->state = FSG_STATE_IDLE;
 	}
 	spin_unlock_irq(&common->lock);
 
 	/* Carry out any extra actions required for the exception */
 	switch (old_state) {
-	case FSG_STATE_ABORT_BULK_OUT:
-		send_status(common);
-		spin_lock_irq(&common->lock);
-		if (common->state == FSG_STATE_STATUS_PHASE)
-			common->state = FSG_STATE_IDLE;
-		spin_unlock_irq(&common->lock);
+	case FSG_STATE_NORMAL:
 		break;
 
-	case FSG_STATE_RESET:
+	case FSG_STATE_ABORT_BULK_OUT:
+		send_status(common);
+		break;
+
+	case FSG_STATE_PROTOCOL_RESET:
 		/*
 		 * In case we were forced against our will to halt a
 		 * bulk endpoint, clear the halt now.  (The SuperH UDC
@@ -2474,19 +2423,13 @@ static void handle_exception(struct fsg_common *common)
 		break;
 
 	case FSG_STATE_EXIT:
-	case FSG_STATE_TERMINATED:
 		do_set_interface(common, NULL);		/* Free resources */
 		spin_lock_irq(&common->lock);
 		common->state = FSG_STATE_TERMINATED;	/* Stop the thread */
 		spin_unlock_irq(&common->lock);
 		break;
 
-	case FSG_STATE_INTERFACE_CHANGE:
-	case FSG_STATE_DISCONNECT:
-	case FSG_STATE_COMMAND_PHASE:
-	case FSG_STATE_DATA_PHASE:
-	case FSG_STATE_STATUS_PHASE:
-	case FSG_STATE_IDLE:
+	case FSG_STATE_TERMINATED:
 		break;
 	}
 }
@@ -2525,33 +2468,17 @@ static int fsg_main_thread(void *common_)
 		}
 
 		if (!common->running) {
-			sleep_thread(common, true);
+			sleep_thread(common, true, NULL);
 			continue;
 		}
 
-		if (get_next_command(common))
+		if (get_next_command(common) || exception_in_progress(common))
 			continue;
-
-		spin_lock_irq(&common->lock);
-		if (!exception_in_progress(common))
-			common->state = FSG_STATE_DATA_PHASE;
-		spin_unlock_irq(&common->lock);
-
-		if (do_scsi_command(common) || finish_reply(common))
+		if (do_scsi_command(common) || exception_in_progress(common))
 			continue;
-
-		spin_lock_irq(&common->lock);
-		if (!exception_in_progress(common))
-			common->state = FSG_STATE_STATUS_PHASE;
-		spin_unlock_irq(&common->lock);
-
-		if (send_status(common))
+		if (finish_reply(common) || exception_in_progress(common))
 			continue;
-
-		spin_lock_irq(&common->lock);
-		if (!exception_in_progress(common))
-			common->state = FSG_STATE_IDLE;
-		spin_unlock_irq(&common->lock);
+		send_status(common);
 	}
 
 	spin_lock_irq(&common->lock);
@@ -2671,6 +2598,7 @@ static struct fsg_common *fsg_common_setup(struct fsg_common *common)
 	spin_lock_init(&common->lock);
 	kref_init(&common->ref);
 	init_completion(&common->thread_notifier);
+	init_waitqueue_head(&common->io_wait);
 	init_waitqueue_head(&common->fsg_wait);
 	common->state = FSG_STATE_TERMINATED;
 	memset(common->luns, 0, sizeof(common->luns));
@@ -2972,7 +2900,6 @@ static void fsg_common_release(struct kref *ref)
 	if (common->state != FSG_STATE_TERMINATED) {
 		raise_exception(common, FSG_STATE_EXIT);
 		wait_for_completion(&common->thread_notifier);
-		common->thread_task = NULL;
 	}
 
 	for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
@@ -3021,11 +2948,11 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
 	}
 
 	if (!common->thread_task) {
-		common->state = FSG_STATE_IDLE;
+		common->state = FSG_STATE_NORMAL;
 		common->thread_task =
 			kthread_create(fsg_main_thread, common, "file-storage");
 		if (IS_ERR(common->thread_task)) {
-			int ret = PTR_ERR(common->thread_task);
+			ret = PTR_ERR(common->thread_task);
 			common->thread_task = NULL;
 			common->state = FSG_STATE_TERMINATED;
 			return ret;
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index b4058f0..6a1ce6a 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -281,7 +281,7 @@ static void pn_net_setup(struct net_device *dev)
 	dev->tx_queue_len	= 1;
 
 	dev->netdev_ops		= &pn_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 	dev->header_ops		= &phonet_header_ops;
 }
 
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index f2ac0cb..8656f84 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -1,24 +1,38 @@
 /*
- * f_audio.c -- USB Audio class function driver
-  *
- * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
- * Copyright (C) 2008 Analog Devices, Inc
+ * f_uac1.c -- USB Audio Class 1.0 Function (using u_audio API)
  *
- * Enter bugs at http://blackfin.uclinux.org/
+ * Copyright (C) 2016 Ruslan Bilovol <ruslan.bilovol@gmail.com>
  *
- * Licensed under the GPL-2 or later.
+ * This driver doesn't expect any real Audio codec to be present
+ * on the device - the audio streams are simply sinked to and
+ * sourced from a virtual ALSA sound card created.
+ *
+ * This file is based on f_uac1.c which is
+ *   Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ *   Copyright (C) 2008 Analog Devices, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
  */
 
-#include <linux/slab.h>
-#include <linux/kernel.h>
+#include <linux/usb/audio.h>
 #include <linux/module.h>
-#include <linux/device.h>
-#include <linux/atomic.h>
 
+#include "u_audio.h"
 #include "u_uac1.h"
 
-static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
-static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
+struct f_uac1 {
+	struct g_audio g_audio;
+	u8 ac_intf, as_in_intf, as_out_intf;
+	u8 ac_alt, as_in_alt, as_out_alt;	/* needed for get_alt() */
+};
+
+static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
+{
+	return container_of(f, struct f_uac1, g_audio.func);
+}
 
 /*
  * DESCRIPTORS ... most are static, but strings and full
@@ -26,12 +40,17 @@ static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
  */
 
 /*
- * We have two interfaces- AudioControl and AudioStreaming
- * TODO: only supcard playback currently
+ * We have three interfaces - one AudioControl and two AudioStreaming
+ *
+ * The driver implements a simple UAC_1 topology.
+ * USB-OUT -> IT_1 -> OT_2 -> ALSA_Capture
+ * ALSA_Playback -> IT_3 -> OT_4 -> USB-IN
  */
-#define F_AUDIO_AC_INTERFACE	0
-#define F_AUDIO_AS_INTERFACE	1
-#define F_AUDIO_NUM_INTERFACES	1
+#define F_AUDIO_AC_INTERFACE		0
+#define F_AUDIO_AS_OUT_INTERFACE	1
+#define F_AUDIO_AS_IN_INTERFACE		2
+/* Number of streaming interfaces */
+#define F_AUDIO_NUM_INTERFACES		2
 
 /* B.3.1  Standard AC Interface Descriptor */
 static struct usb_interface_descriptor ac_interface_desc = {
@@ -46,89 +65,73 @@ static struct usb_interface_descriptor ac_interface_desc = {
  * The number of AudioStreaming and MIDIStreaming interfaces
  * in the Audio Interface Collection
  */
-DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
 
 #define UAC_DT_AC_HEADER_LENGTH	UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
-/* 1 input terminal, 1 output terminal and 1 feature unit */
-#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
-	+ UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* 2 input terminals and 2 output terminals */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+	+ 2*UAC_DT_INPUT_TERMINAL_SIZE + 2*UAC_DT_OUTPUT_TERMINAL_SIZE)
 /* B.3.2  Class-Specific AC Interface Descriptor */
-static struct uac1_ac_header_descriptor_1 ac_header_desc = {
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
 	.bLength =		UAC_DT_AC_HEADER_LENGTH,
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype =	UAC_HEADER,
-	.bcdADC =		__constant_cpu_to_le16(0x0100),
-	.wTotalLength =		__constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+	.bcdADC =		cpu_to_le16(0x0100),
+	.wTotalLength =		cpu_to_le16(UAC_DT_TOTAL_LENGTH),
 	.bInCollection =	F_AUDIO_NUM_INTERFACES,
 	.baInterfaceNr = {
-	/* Interface number of the first AudioStream interface */
+	/* Interface number of the AudioStream interfaces */
 		[0] =		1,
+		[1] =		2,
 	}
 };
 
-#define INPUT_TERMINAL_ID	1
-static struct uac_input_terminal_descriptor input_terminal_desc = {
+#define USB_OUT_IT_ID	1
+static struct uac_input_terminal_descriptor usb_out_it_desc = {
 	.bLength =		UAC_DT_INPUT_TERMINAL_SIZE,
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype =	UAC_INPUT_TERMINAL,
-	.bTerminalID =		INPUT_TERMINAL_ID,
+	.bTerminalID =		USB_OUT_IT_ID,
 	.wTerminalType =	UAC_TERMINAL_STREAMING,
 	.bAssocTerminal =	0,
 	.wChannelConfig =	0x3,
 };
 
-DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
-
-#define FEATURE_UNIT_ID		2
-static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
-	.bLength		= UAC_DT_FEATURE_UNIT_SIZE(0),
-	.bDescriptorType	= USB_DT_CS_INTERFACE,
-	.bDescriptorSubtype	= UAC_FEATURE_UNIT,
-	.bUnitID		= FEATURE_UNIT_ID,
-	.bSourceID		= INPUT_TERMINAL_ID,
-	.bControlSize		= 2,
-	.bmaControls[0]		= (UAC_FU_MUTE | UAC_FU_VOLUME),
-};
-
-static struct usb_audio_control mute_control = {
-	.list = LIST_HEAD_INIT(mute_control.list),
-	.name = "Mute Control",
-	.type = UAC_FU_MUTE,
-	/* Todo: add real Mute control code */
-	.set = generic_set_cmd,
-	.get = generic_get_cmd,
-};
-
-static struct usb_audio_control volume_control = {
-	.list = LIST_HEAD_INIT(volume_control.list),
-	.name = "Volume Control",
-	.type = UAC_FU_VOLUME,
-	/* Todo: add real Volume control code */
-	.set = generic_set_cmd,
-	.get = generic_get_cmd,
-};
-
-static struct usb_audio_control_selector feature_unit = {
-	.list = LIST_HEAD_INIT(feature_unit.list),
-	.id = FEATURE_UNIT_ID,
-	.name = "Mute & Volume Control",
-	.type = UAC_FEATURE_UNIT,
-	.desc = (struct usb_descriptor_header *)&feature_unit_desc,
-};
-
-#define OUTPUT_TERMINAL_ID	3
-static struct uac1_output_terminal_descriptor output_terminal_desc = {
+#define IO_OUT_OT_ID	2
+static struct uac1_output_terminal_descriptor io_out_ot_desc = {
 	.bLength		= UAC_DT_OUTPUT_TERMINAL_SIZE,
 	.bDescriptorType	= USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype	= UAC_OUTPUT_TERMINAL,
-	.bTerminalID		= OUTPUT_TERMINAL_ID,
+	.bTerminalID		= IO_OUT_OT_ID,
 	.wTerminalType		= UAC_OUTPUT_TERMINAL_SPEAKER,
-	.bAssocTerminal		= FEATURE_UNIT_ID,
-	.bSourceID		= FEATURE_UNIT_ID,
+	.bAssocTerminal		= 0,
+	.bSourceID		= USB_OUT_IT_ID,
+};
+
+#define IO_IN_IT_ID	3
+static struct uac_input_terminal_descriptor io_in_it_desc = {
+	.bLength		= UAC_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_INPUT_TERMINAL,
+	.bTerminalID		= IO_IN_IT_ID,
+	.wTerminalType		= UAC_INPUT_TERMINAL_MICROPHONE,
+	.bAssocTerminal		= 0,
+	.wChannelConfig		= 0x3,
+};
+
+#define USB_IN_OT_ID	4
+static struct uac1_output_terminal_descriptor usb_in_ot_desc = {
+	.bLength =		UAC_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_OUTPUT_TERMINAL,
+	.bTerminalID =		USB_IN_OT_ID,
+	.wTerminalType =	UAC_TERMINAL_STREAMING,
+	.bAssocTerminal =	0,
+	.bSourceID =		IO_IN_IT_ID,
 };
 
 /* B.4.1  Standard AS Interface Descriptor */
-static struct usb_interface_descriptor as_interface_alt_0_desc = {
+static struct usb_interface_descriptor as_out_interface_alt_0_desc = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
 	.bDescriptorType =	USB_DT_INTERFACE,
 	.bAlternateSetting =	0,
@@ -137,7 +140,25 @@ static struct usb_interface_descriptor as_interface_alt_0_desc = {
 	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
 };
 
-static struct usb_interface_descriptor as_interface_alt_1_desc = {
+static struct usb_interface_descriptor as_out_interface_alt_1_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_in_interface_alt_0_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_in_interface_alt_1_desc = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
 	.bDescriptorType =	USB_DT_INTERFACE,
 	.bAlternateSetting =	1,
@@ -147,18 +168,27 @@ static struct usb_interface_descriptor as_interface_alt_1_desc = {
 };
 
 /* B.4.2  Class-Specific AS Interface Descriptor */
-static struct uac1_as_header_descriptor as_header_desc = {
+static struct uac1_as_header_descriptor as_out_header_desc = {
 	.bLength =		UAC_DT_AS_HEADER_SIZE,
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype =	UAC_AS_GENERAL,
-	.bTerminalLink =	INPUT_TERMINAL_ID,
+	.bTerminalLink =	USB_OUT_IT_ID,
+	.bDelay =		1,
+	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+};
+
+static struct uac1_as_header_descriptor as_in_header_desc = {
+	.bLength =		UAC_DT_AS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_AS_GENERAL,
+	.bTerminalLink =	USB_IN_OT_ID,
 	.bDelay =		1,
 	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
 };
 
 DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
 
-static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+static struct uac_format_type_i_discrete_descriptor_1 as_out_type_i_desc = {
 	.bLength =		UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
 	.bDescriptorType =	USB_DT_CS_INTERFACE,
 	.bDescriptorSubtype =	UAC_FORMAT_TYPE,
@@ -184,48 +214,97 @@ static struct uac_iso_endpoint_descriptor as_iso_out_desc = {
 	.bLength =		UAC_ISO_ENDPOINT_DESC_SIZE,
 	.bDescriptorType =	USB_DT_CS_ENDPOINT,
 	.bDescriptorSubtype =	UAC_EP_GENERAL,
-	.bmAttributes = 	1,
+	.bmAttributes =		1,
 	.bLockDelayUnits =	1,
-	.wLockDelay =		__constant_cpu_to_le16(1),
+	.wLockDelay =		cpu_to_le16(1),
+};
+
+static struct uac_format_type_i_discrete_descriptor_1 as_in_type_i_desc = {
+	.bLength =		UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_FORMAT_TYPE,
+	.bFormatType =		UAC_FORMAT_TYPE_I,
+	.bSubframeSize =	2,
+	.bBitResolution =	16,
+	.bSamFreqType =		1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_ASYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize	=	cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+	.bInterval =		4,
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+	.bLength =		UAC_ISO_ENDPOINT_DESC_SIZE,
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	UAC_EP_GENERAL,
+	.bmAttributes =		1,
+	.bLockDelayUnits =	0,
+	.wLockDelay =		0,
 };
 
 static struct usb_descriptor_header *f_audio_desc[] = {
 	(struct usb_descriptor_header *)&ac_interface_desc,
 	(struct usb_descriptor_header *)&ac_header_desc,
 
-	(struct usb_descriptor_header *)&input_terminal_desc,
-	(struct usb_descriptor_header *)&output_terminal_desc,
-	(struct usb_descriptor_header *)&feature_unit_desc,
+	(struct usb_descriptor_header *)&usb_out_it_desc,
+	(struct usb_descriptor_header *)&io_out_ot_desc,
+	(struct usb_descriptor_header *)&io_in_it_desc,
+	(struct usb_descriptor_header *)&usb_in_ot_desc,
 
-	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
-	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
-	(struct usb_descriptor_header *)&as_header_desc,
+	(struct usb_descriptor_header *)&as_out_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_out_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_out_header_desc,
 
-	(struct usb_descriptor_header *)&as_type_i_desc,
+	(struct usb_descriptor_header *)&as_out_type_i_desc,
 
 	(struct usb_descriptor_header *)&as_out_ep_desc,
 	(struct usb_descriptor_header *)&as_iso_out_desc,
+
+	(struct usb_descriptor_header *)&as_in_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_in_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_in_header_desc,
+
+	(struct usb_descriptor_header *)&as_in_type_i_desc,
+
+	(struct usb_descriptor_header *)&as_in_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
 	NULL,
 };
 
 enum {
 	STR_AC_IF,
-	STR_INPUT_TERMINAL,
-	STR_INPUT_TERMINAL_CH_NAMES,
-	STR_FEAT_DESC_0,
-	STR_OUTPUT_TERMINAL,
-	STR_AS_IF_ALT0,
-	STR_AS_IF_ALT1,
+	STR_USB_OUT_IT,
+	STR_USB_OUT_IT_CH_NAMES,
+	STR_IO_OUT_OT,
+	STR_IO_IN_IT,
+	STR_IO_IN_IT_CH_NAMES,
+	STR_USB_IN_OT,
+	STR_AS_OUT_IF_ALT0,
+	STR_AS_OUT_IF_ALT1,
+	STR_AS_IN_IF_ALT0,
+	STR_AS_IN_IF_ALT1,
 };
 
 static struct usb_string strings_uac1[] = {
 	[STR_AC_IF].s = "AC Interface",
-	[STR_INPUT_TERMINAL].s = "Input terminal",
-	[STR_INPUT_TERMINAL_CH_NAMES].s = "Channels",
-	[STR_FEAT_DESC_0].s = "Volume control & mute",
-	[STR_OUTPUT_TERMINAL].s = "Output terminal",
-	[STR_AS_IF_ALT0].s = "AS Interface",
-	[STR_AS_IF_ALT1].s = "AS Interface",
+	[STR_USB_OUT_IT].s = "Playback Input terminal",
+	[STR_USB_OUT_IT_CH_NAMES].s = "Playback Channels",
+	[STR_IO_OUT_OT].s = "Playback Output terminal",
+	[STR_IO_IN_IT].s = "Capture Input terminal",
+	[STR_IO_IN_IT_CH_NAMES].s = "Capture Channels",
+	[STR_USB_IN_OT].s = "Capture Output terminal",
+	[STR_AS_OUT_IF_ALT0].s = "Playback Inactive",
+	[STR_AS_OUT_IF_ALT1].s = "Playback Active",
+	[STR_AS_IN_IF_ALT0].s = "Capture Inactive",
+	[STR_AS_IN_IF_ALT1].s = "Capture Active",
 	{ },
 };
 
@@ -243,216 +322,6 @@ static struct usb_gadget_strings *uac1_strings[] = {
  * This function is an ALSA sound card following USB Audio Class Spec 1.0.
  */
 
-/*-------------------------------------------------------------------------*/
-struct f_audio_buf {
-	u8 *buf;
-	int actual;
-	struct list_head list;
-};
-
-static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
-{
-	struct f_audio_buf *copy_buf;
-
-	copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
-	if (!copy_buf)
-		return ERR_PTR(-ENOMEM);
-
-	copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
-	if (!copy_buf->buf) {
-		kfree(copy_buf);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	return copy_buf;
-}
-
-static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
-{
-	kfree(audio_buf->buf);
-	kfree(audio_buf);
-}
-/*-------------------------------------------------------------------------*/
-
-struct f_audio {
-	struct gaudio			card;
-
-	/* endpoints handle full and/or high speeds */
-	struct usb_ep			*out_ep;
-
-	spinlock_t			lock;
-	struct f_audio_buf *copy_buf;
-	struct work_struct playback_work;
-	struct list_head play_queue;
-
-	/* Control Set command */
-	struct list_head cs;
-	u8 set_cmd;
-	struct usb_audio_control *set_con;
-};
-
-static inline struct f_audio *func_to_audio(struct usb_function *f)
-{
-	return container_of(f, struct f_audio, card.func);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void f_audio_playback_work(struct work_struct *data)
-{
-	struct f_audio *audio = container_of(data, struct f_audio,
-					playback_work);
-	struct f_audio_buf *play_buf;
-
-	spin_lock_irq(&audio->lock);
-	if (list_empty(&audio->play_queue)) {
-		spin_unlock_irq(&audio->lock);
-		return;
-	}
-	play_buf = list_first_entry(&audio->play_queue,
-			struct f_audio_buf, list);
-	list_del(&play_buf->list);
-	spin_unlock_irq(&audio->lock);
-
-	u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
-	f_audio_buffer_free(play_buf);
-}
-
-static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
-{
-	struct f_audio *audio = req->context;
-	struct usb_composite_dev *cdev = audio->card.func.config->cdev;
-	struct f_audio_buf *copy_buf = audio->copy_buf;
-	struct f_uac1_opts *opts;
-	int audio_buf_size;
-	int err;
-
-	opts = container_of(audio->card.func.fi, struct f_uac1_opts,
-			    func_inst);
-	audio_buf_size = opts->audio_buf_size;
-
-	if (!copy_buf)
-		return -EINVAL;
-
-	/* Copy buffer is full, add it to the play_queue */
-	if (audio_buf_size - copy_buf->actual < req->actual) {
-		list_add_tail(&copy_buf->list, &audio->play_queue);
-		schedule_work(&audio->playback_work);
-		copy_buf = f_audio_buffer_alloc(audio_buf_size);
-		if (IS_ERR(copy_buf))
-			return -ENOMEM;
-	}
-
-	memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
-	copy_buf->actual += req->actual;
-	audio->copy_buf = copy_buf;
-
-	err = usb_ep_queue(ep, req, GFP_ATOMIC);
-	if (err)
-		ERROR(cdev, "%s queue req: %d\n", ep->name, err);
-
-	return 0;
-
-}
-
-static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
-{
-	struct f_audio *audio = req->context;
-	int status = req->status;
-	u32 data = 0;
-	struct usb_ep *out_ep = audio->out_ep;
-
-	switch (status) {
-
-	case 0:				/* normal completion? */
-		if (ep == out_ep)
-			f_audio_out_ep_complete(ep, req);
-		else if (audio->set_con) {
-			memcpy(&data, req->buf, req->length);
-			audio->set_con->set(audio->set_con, audio->set_cmd,
-					le16_to_cpu(data));
-			audio->set_con = NULL;
-		}
-		break;
-	default:
-		break;
-	}
-}
-
-static int audio_set_intf_req(struct usb_function *f,
-		const struct usb_ctrlrequest *ctrl)
-{
-	struct f_audio		*audio = func_to_audio(f);
-	struct usb_composite_dev *cdev = f->config->cdev;
-	struct usb_request	*req = cdev->req;
-	u8			id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
-	u16			len = le16_to_cpu(ctrl->wLength);
-	u16			w_value = le16_to_cpu(ctrl->wValue);
-	u8			con_sel = (w_value >> 8) & 0xFF;
-	u8			cmd = (ctrl->bRequest & 0x0F);
-	struct usb_audio_control_selector *cs;
-	struct usb_audio_control *con;
-
-	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
-			ctrl->bRequest, w_value, len, id);
-
-	list_for_each_entry(cs, &audio->cs, list) {
-		if (cs->id == id) {
-			list_for_each_entry(con, &cs->control, list) {
-				if (con->type == con_sel) {
-					audio->set_con = con;
-					break;
-				}
-			}
-			break;
-		}
-	}
-
-	audio->set_cmd = cmd;
-	req->context = audio;
-	req->complete = f_audio_complete;
-
-	return len;
-}
-
-static int audio_get_intf_req(struct usb_function *f,
-		const struct usb_ctrlrequest *ctrl)
-{
-	struct f_audio		*audio = func_to_audio(f);
-	struct usb_composite_dev *cdev = f->config->cdev;
-	struct usb_request	*req = cdev->req;
-	int			value = -EOPNOTSUPP;
-	u8			id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
-	u16			len = le16_to_cpu(ctrl->wLength);
-	u16			w_value = le16_to_cpu(ctrl->wValue);
-	u8			con_sel = (w_value >> 8) & 0xFF;
-	u8			cmd = (ctrl->bRequest & 0x0F);
-	struct usb_audio_control_selector *cs;
-	struct usb_audio_control *con;
-
-	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
-			ctrl->bRequest, w_value, len, id);
-
-	list_for_each_entry(cs, &audio->cs, list) {
-		if (cs->id == id) {
-			list_for_each_entry(con, &cs->control, list) {
-				if (con->type == con_sel && con->get) {
-					value = con->get(con, cmd);
-					break;
-				}
-			}
-			break;
-		}
-	}
-
-	req->context = audio;
-	req->complete = f_audio_complete;
-	len = min_t(size_t, sizeof(value), len);
-	memcpy(req->buf, &value, len);
-
-	return len;
-}
-
 static int audio_set_endpoint_req(struct usb_function *f,
 		const struct usb_ctrlrequest *ctrl)
 {
@@ -531,14 +400,6 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
 	 * activation uses set_alt().
 	 */
 	switch (ctrl->bRequestType) {
-	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
-		value = audio_set_intf_req(f, ctrl);
-		break;
-
-	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
-		value = audio_get_intf_req(f, ctrl);
-		break;
-
 	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
 		value = audio_set_endpoint_req(f, ctrl);
 		break;
@@ -571,143 +432,158 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
 
 static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 {
-	struct f_audio		*audio = func_to_audio(f);
 	struct usb_composite_dev *cdev = f->config->cdev;
-	struct usb_ep *out_ep = audio->out_ep;
-	struct usb_request *req;
-	struct f_uac1_opts *opts;
-	int req_buf_size, req_count, audio_buf_size;
-	int i = 0, err = 0;
+	struct usb_gadget *gadget = cdev->gadget;
+	struct device *dev = &gadget->dev;
+	struct f_uac1 *uac1 = func_to_uac1(f);
+	int ret = 0;
 
-	DBG(cdev, "intf %d, alt %d\n", intf, alt);
-
-	opts = container_of(f->fi, struct f_uac1_opts, func_inst);
-	req_buf_size = opts->req_buf_size;
-	req_count = opts->req_count;
-	audio_buf_size = opts->audio_buf_size;
-
-	if (intf == 1) {
-		if (alt == 1) {
-			err = config_ep_by_speed(cdev->gadget, f, out_ep);
-			if (err)
-				return err;
-
-			usb_ep_enable(out_ep);
-			audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
-			if (IS_ERR(audio->copy_buf))
-				return -ENOMEM;
-
-			/*
-			 * allocate a bunch of read buffers
-			 * and queue them all at once.
-			 */
-			for (i = 0; i < req_count && err == 0; i++) {
-				req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
-				if (req) {
-					req->buf = kzalloc(req_buf_size,
-							GFP_ATOMIC);
-					if (req->buf) {
-						req->length = req_buf_size;
-						req->context = audio;
-						req->complete =
-							f_audio_complete;
-						err = usb_ep_queue(out_ep,
-							req, GFP_ATOMIC);
-						if (err)
-							ERROR(cdev,
-							"%s queue req: %d\n",
-							out_ep->name, err);
-					} else
-						err = -ENOMEM;
-				} else
-					err = -ENOMEM;
-			}
-
-		} else {
-			struct f_audio_buf *copy_buf = audio->copy_buf;
-			if (copy_buf) {
-				list_add_tail(&copy_buf->list,
-						&audio->play_queue);
-				schedule_work(&audio->playback_work);
-			}
-		}
+	/* No i/f has more than 2 alt settings */
+	if (alt > 1) {
+		dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+		return -EINVAL;
 	}
 
-	return err;
+	if (intf == uac1->ac_intf) {
+		/* Control I/f has only 1 AltSetting - 0 */
+		if (alt) {
+			dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	if (intf == uac1->as_out_intf) {
+		uac1->as_out_alt = alt;
+
+		if (alt)
+			ret = u_audio_start_capture(&uac1->g_audio);
+		else
+			u_audio_stop_capture(&uac1->g_audio);
+	} else if (intf == uac1->as_in_intf) {
+		uac1->as_in_alt = alt;
+
+			if (alt)
+				ret = u_audio_start_playback(&uac1->g_audio);
+			else
+				u_audio_stop_playback(&uac1->g_audio);
+	} else {
+		dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return ret;
 }
 
+static int f_audio_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_gadget *gadget = cdev->gadget;
+	struct device *dev = &gadget->dev;
+	struct f_uac1 *uac1 = func_to_uac1(f);
+
+	if (intf == uac1->ac_intf)
+		return uac1->ac_alt;
+	else if (intf == uac1->as_out_intf)
+		return uac1->as_out_alt;
+	else if (intf == uac1->as_in_intf)
+		return uac1->as_in_alt;
+	else
+		dev_err(dev, "%s:%d Invalid Interface %d!\n",
+			__func__, __LINE__, intf);
+
+	return -EINVAL;
+}
+
+
 static void f_audio_disable(struct usb_function *f)
 {
-	return;
+	struct f_uac1 *uac1 = func_to_uac1(f);
+
+	uac1->as_out_alt = 0;
+	uac1->as_in_alt = 0;
+
+	u_audio_stop_capture(&uac1->g_audio);
 }
 
 /*-------------------------------------------------------------------------*/
 
-static void f_audio_build_desc(struct f_audio *audio)
-{
-	struct gaudio *card = &audio->card;
-	u8 *sam_freq;
-	int rate;
-
-	/* Set channel numbers */
-	input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
-	as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
-
-	/* Set sample rates */
-	rate = u_audio_get_playback_rate(card);
-	sam_freq = as_type_i_desc.tSamFreq[0];
-	memcpy(sam_freq, &rate, 3);
-
-	/* Todo: Set Sample bits and other parameters */
-
-	return;
-}
-
 /* audio function driver setup/binding */
-static int
-f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
 {
-	struct usb_composite_dev *cdev = c->cdev;
-	struct f_audio		*audio = func_to_audio(f);
-	struct usb_string	*us;
-	int			status;
-	struct usb_ep		*ep = NULL;
-	struct f_uac1_opts	*audio_opts;
+	struct usb_composite_dev	*cdev = c->cdev;
+	struct usb_gadget		*gadget = cdev->gadget;
+	struct f_uac1			*uac1 = func_to_uac1(f);
+	struct g_audio			*audio = func_to_g_audio(f);
+	struct f_uac1_opts		*audio_opts;
+	struct usb_ep			*ep = NULL;
+	struct usb_string		*us;
+	u8				*sam_freq;
+	int				rate;
+	int				status;
 
 	audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
-	audio->card.gadget = c->cdev->gadget;
-	/* set up ASLA audio devices */
-	if (!audio_opts->bound) {
-		status = gaudio_setup(&audio->card);
-		if (status < 0)
-			return status;
-		audio_opts->bound = true;
-	}
+
 	us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
 	if (IS_ERR(us))
 		return PTR_ERR(us);
 	ac_interface_desc.iInterface = us[STR_AC_IF].id;
-	input_terminal_desc.iTerminal = us[STR_INPUT_TERMINAL].id;
-	input_terminal_desc.iChannelNames = us[STR_INPUT_TERMINAL_CH_NAMES].id;
-	feature_unit_desc.iFeature = us[STR_FEAT_DESC_0].id;
-	output_terminal_desc.iTerminal = us[STR_OUTPUT_TERMINAL].id;
-	as_interface_alt_0_desc.iInterface = us[STR_AS_IF_ALT0].id;
-	as_interface_alt_1_desc.iInterface = us[STR_AS_IF_ALT1].id;
+	usb_out_it_desc.iTerminal = us[STR_USB_OUT_IT].id;
+	usb_out_it_desc.iChannelNames = us[STR_USB_OUT_IT_CH_NAMES].id;
+	io_out_ot_desc.iTerminal = us[STR_IO_OUT_OT].id;
+	as_out_interface_alt_0_desc.iInterface = us[STR_AS_OUT_IF_ALT0].id;
+	as_out_interface_alt_1_desc.iInterface = us[STR_AS_OUT_IF_ALT1].id;
+	io_in_it_desc.iTerminal = us[STR_IO_IN_IT].id;
+	io_in_it_desc.iChannelNames = us[STR_IO_IN_IT_CH_NAMES].id;
+	usb_in_ot_desc.iTerminal = us[STR_USB_IN_OT].id;
+	as_in_interface_alt_0_desc.iInterface = us[STR_AS_IN_IF_ALT0].id;
+	as_in_interface_alt_1_desc.iInterface = us[STR_AS_IN_IF_ALT1].id;
 
+	/* Set channel numbers */
+	usb_out_it_desc.bNrChannels = num_channels(audio_opts->c_chmask);
+	usb_out_it_desc.wChannelConfig = cpu_to_le16(audio_opts->c_chmask);
+	as_out_type_i_desc.bNrChannels = num_channels(audio_opts->c_chmask);
+	as_out_type_i_desc.bSubframeSize = audio_opts->c_ssize;
+	as_out_type_i_desc.bBitResolution = audio_opts->c_ssize * 8;
+	io_in_it_desc.bNrChannels = num_channels(audio_opts->p_chmask);
+	io_in_it_desc.wChannelConfig = cpu_to_le16(audio_opts->p_chmask);
+	as_in_type_i_desc.bNrChannels = num_channels(audio_opts->p_chmask);
+	as_in_type_i_desc.bSubframeSize = audio_opts->p_ssize;
+	as_in_type_i_desc.bBitResolution = audio_opts->p_ssize * 8;
 
-	f_audio_build_desc(audio);
+	/* Set sample rates */
+	rate = audio_opts->c_srate;
+	sam_freq = as_out_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
+	rate = audio_opts->p_srate;
+	sam_freq = as_in_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
 
 	/* allocate instance-specific interface IDs, and patch descriptors */
 	status = usb_interface_id(c, f);
 	if (status < 0)
 		goto fail;
 	ac_interface_desc.bInterfaceNumber = status;
+	uac1->ac_intf = status;
+	uac1->ac_alt = 0;
 
 	status = usb_interface_id(c, f);
 	if (status < 0)
 		goto fail;
-	as_interface_alt_0_desc.bInterfaceNumber = status;
-	as_interface_alt_1_desc.bInterfaceNumber = status;
+	as_out_interface_alt_0_desc.bInterfaceNumber = status;
+	as_out_interface_alt_1_desc.bInterfaceNumber = status;
+	uac1->as_out_intf = status;
+	uac1->as_out_alt = 0;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	as_in_interface_alt_0_desc.bInterfaceNumber = status;
+	as_in_interface_alt_1_desc.bInterfaceNumber = status;
+	uac1->as_in_intf = status;
+	uac1->as_in_alt = 0;
+
+	audio->gadget = gadget;
 
 	status = -ENODEV;
 
@@ -718,52 +594,42 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
 	audio->out_ep = ep;
 	audio->out_ep->desc = &as_out_ep_desc;
 
-	status = -ENOMEM;
+	ep = usb_ep_autoconfig(cdev->gadget, &as_in_ep_desc);
+	if (!ep)
+		goto fail;
+	audio->in_ep = ep;
+	audio->in_ep->desc = &as_in_ep_desc;
 
 	/* copy descriptors, and track endpoint copies */
 	status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL,
 					NULL);
 	if (status)
 		goto fail;
+
+	audio->out_ep_maxpsize = as_out_ep_desc.wMaxPacketSize;
+	audio->in_ep_maxpsize = as_in_ep_desc.wMaxPacketSize;
+	audio->params.c_chmask = audio_opts->c_chmask;
+	audio->params.c_srate = audio_opts->c_srate;
+	audio->params.c_ssize = audio_opts->c_ssize;
+	audio->params.p_chmask = audio_opts->p_chmask;
+	audio->params.p_srate = audio_opts->p_srate;
+	audio->params.p_ssize = audio_opts->p_ssize;
+	audio->params.req_number = audio_opts->req_number;
+
+	status = g_audio_setup(audio, "UAC1_PCM", "UAC1_Gadget");
+	if (status)
+		goto err_card_register;
+
 	return 0;
 
+err_card_register:
+	usb_free_all_descriptors(f);
 fail:
-	gaudio_cleanup(&audio->card);
 	return status;
 }
 
 /*-------------------------------------------------------------------------*/
 
-static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
-{
-	con->data[cmd] = value;
-
-	return 0;
-}
-
-static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
-{
-	return con->data[cmd];
-}
-
-/* Todo: add more control selecotor dynamically */
-static int control_selector_init(struct f_audio *audio)
-{
-	INIT_LIST_HEAD(&audio->cs);
-	list_add(&feature_unit.list, &audio->cs);
-
-	INIT_LIST_HEAD(&feature_unit.control);
-	list_add(&mute_control.list, &feature_unit.control);
-	list_add(&volume_control.list, &feature_unit.control);
-
-	volume_control.data[UAC__CUR] = 0xffc0;
-	volume_control.data[UAC__MIN] = 0xe3a0;
-	volume_control.data[UAC__MAX] = 0xfff0;
-	volume_control.data[UAC__RES] = 0x0030;
-
-	return 0;
-}
-
 static inline struct f_uac1_opts *to_f_uac1_opts(struct config_item *item)
 {
 	return container_of(to_config_group(item), struct f_uac1_opts,
@@ -781,9 +647,10 @@ static struct configfs_item_operations f_uac1_item_ops = {
 	.release	= f_uac1_attr_release,
 };
 
-#define UAC1_INT_ATTRIBUTE(name)					\
-static ssize_t f_uac1_opts_##name##_show(struct config_item *item,	\
-					 char *page)			\
+#define UAC1_ATTRIBUTE(name)						\
+static ssize_t f_uac1_opts_##name##_show(				\
+					  struct config_item *item,	\
+					  char *page)			\
 {									\
 	struct f_uac1_opts *opts = to_f_uac1_opts(item);		\
 	int result;							\
@@ -795,7 +662,8 @@ static ssize_t f_uac1_opts_##name##_show(struct config_item *item,	\
 	return result;							\
 }									\
 									\
-static ssize_t f_uac1_opts_##name##_store(struct config_item *item,		\
+static ssize_t f_uac1_opts_##name##_store(				\
+					  struct config_item *item,	\
 					  const char *page, size_t len)	\
 {									\
 	struct f_uac1_opts *opts = to_f_uac1_opts(item);		\
@@ -822,64 +690,22 @@ end:									\
 									\
 CONFIGFS_ATTR(f_uac1_opts_, name)
 
-UAC1_INT_ATTRIBUTE(req_buf_size);
-UAC1_INT_ATTRIBUTE(req_count);
-UAC1_INT_ATTRIBUTE(audio_buf_size);
-
-#define UAC1_STR_ATTRIBUTE(name)					\
-static ssize_t f_uac1_opts_##name##_show(struct config_item *item,	\
-					 char *page)			\
-{									\
-	struct f_uac1_opts *opts = to_f_uac1_opts(item);		\
-	int result;							\
-									\
-	mutex_lock(&opts->lock);					\
-	result = sprintf(page, "%s\n", opts->name);			\
-	mutex_unlock(&opts->lock);					\
-									\
-	return result;							\
-}									\
-									\
-static ssize_t f_uac1_opts_##name##_store(struct config_item *item,	\
-					  const char *page, size_t len)	\
-{									\
-	struct f_uac1_opts *opts = to_f_uac1_opts(item);		\
-	int ret = -EBUSY;						\
-	char *tmp;							\
-									\
-	mutex_lock(&opts->lock);					\
-	if (opts->refcnt)						\
-		goto end;						\
-									\
-	tmp = kstrndup(page, len, GFP_KERNEL);				\
-	if (tmp) {							\
-		ret = -ENOMEM;						\
-		goto end;						\
-	}								\
-	if (opts->name##_alloc)						\
-		kfree(opts->name);					\
-	opts->name##_alloc = true;					\
-	opts->name = tmp;						\
-	ret = len;							\
-									\
-end:									\
-	mutex_unlock(&opts->lock);					\
-	return ret;							\
-}									\
-									\
-CONFIGFS_ATTR(f_uac1_opts_, name)
-
-UAC1_STR_ATTRIBUTE(fn_play);
-UAC1_STR_ATTRIBUTE(fn_cap);
-UAC1_STR_ATTRIBUTE(fn_cntl);
+UAC1_ATTRIBUTE(c_chmask);
+UAC1_ATTRIBUTE(c_srate);
+UAC1_ATTRIBUTE(c_ssize);
+UAC1_ATTRIBUTE(p_chmask);
+UAC1_ATTRIBUTE(p_srate);
+UAC1_ATTRIBUTE(p_ssize);
+UAC1_ATTRIBUTE(req_number);
 
 static struct configfs_attribute *f_uac1_attrs[] = {
-	&f_uac1_opts_attr_req_buf_size,
-	&f_uac1_opts_attr_req_count,
-	&f_uac1_opts_attr_audio_buf_size,
-	&f_uac1_opts_attr_fn_play,
-	&f_uac1_opts_attr_fn_cap,
-	&f_uac1_opts_attr_fn_cntl,
+	&f_uac1_opts_attr_c_chmask,
+	&f_uac1_opts_attr_c_srate,
+	&f_uac1_opts_attr_c_ssize,
+	&f_uac1_opts_attr_p_chmask,
+	&f_uac1_opts_attr_p_srate,
+	&f_uac1_opts_attr_p_ssize,
+	&f_uac1_opts_attr_req_number,
 	NULL,
 };
 
@@ -894,12 +720,6 @@ static void f_audio_free_inst(struct usb_function_instance *f)
 	struct f_uac1_opts *opts;
 
 	opts = container_of(f, struct f_uac1_opts, func_inst);
-	if (opts->fn_play_alloc)
-		kfree(opts->fn_play);
-	if (opts->fn_cap_alloc)
-		kfree(opts->fn_cap);
-	if (opts->fn_cntl_alloc)
-		kfree(opts->fn_cntl);
 	kfree(opts);
 }
 
@@ -917,21 +737,22 @@ static struct usb_function_instance *f_audio_alloc_inst(void)
 	config_group_init_type_name(&opts->func_inst.group, "",
 				    &f_uac1_func_type);
 
-	opts->req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE;
-	opts->req_count = UAC1_REQ_COUNT;
-	opts->audio_buf_size = UAC1_AUDIO_BUF_SIZE;
-	opts->fn_play = FILE_PCM_PLAYBACK;
-	opts->fn_cap = FILE_PCM_CAPTURE;
-	opts->fn_cntl = FILE_CONTROL;
+	opts->c_chmask = UAC1_DEF_CCHMASK;
+	opts->c_srate = UAC1_DEF_CSRATE;
+	opts->c_ssize = UAC1_DEF_CSSIZE;
+	opts->p_chmask = UAC1_DEF_PCHMASK;
+	opts->p_srate = UAC1_DEF_PSRATE;
+	opts->p_ssize = UAC1_DEF_PSSIZE;
+	opts->req_number = UAC1_DEF_REQ_NUM;
 	return &opts->func_inst;
 }
 
 static void f_audio_free(struct usb_function *f)
 {
-	struct f_audio *audio = func_to_audio(f);
+	struct g_audio *audio;
 	struct f_uac1_opts *opts;
 
-	gaudio_cleanup(&audio->card);
+	audio = func_to_g_audio(f);
 	opts = container_of(f->fi, struct f_uac1_opts, func_inst);
 	kfree(audio);
 	mutex_lock(&opts->lock);
@@ -941,42 +762,41 @@ static void f_audio_free(struct usb_function *f)
 
 static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
 {
+	struct g_audio *audio = func_to_g_audio(f);
+
+	g_audio_cleanup(audio);
 	usb_free_all_descriptors(f);
+
+	audio->gadget = NULL;
 }
 
 static struct usb_function *f_audio_alloc(struct usb_function_instance *fi)
 {
-	struct f_audio *audio;
+	struct f_uac1 *uac1;
 	struct f_uac1_opts *opts;
 
 	/* allocate and initialize one new instance */
-	audio = kzalloc(sizeof(*audio), GFP_KERNEL);
-	if (!audio)
+	uac1 = kzalloc(sizeof(*uac1), GFP_KERNEL);
+	if (!uac1)
 		return ERR_PTR(-ENOMEM);
 
-	audio->card.func.name = "g_audio";
-
 	opts = container_of(fi, struct f_uac1_opts, func_inst);
 	mutex_lock(&opts->lock);
 	++opts->refcnt;
 	mutex_unlock(&opts->lock);
-	INIT_LIST_HEAD(&audio->play_queue);
-	spin_lock_init(&audio->lock);
 
-	audio->card.func.bind = f_audio_bind;
-	audio->card.func.unbind = f_audio_unbind;
-	audio->card.func.set_alt = f_audio_set_alt;
-	audio->card.func.setup = f_audio_setup;
-	audio->card.func.disable = f_audio_disable;
-	audio->card.func.free_func = f_audio_free;
+	uac1->g_audio.func.name = "uac1_func";
+	uac1->g_audio.func.bind = f_audio_bind;
+	uac1->g_audio.func.unbind = f_audio_unbind;
+	uac1->g_audio.func.set_alt = f_audio_set_alt;
+	uac1->g_audio.func.get_alt = f_audio_get_alt;
+	uac1->g_audio.func.setup = f_audio_setup;
+	uac1->g_audio.func.disable = f_audio_disable;
+	uac1->g_audio.func.free_func = f_audio_free;
 
-	control_selector_init(audio);
-
-	INIT_WORK(&audio->playback_work, f_audio_playback_work);
-
-	return &audio->card.func;
+	return &uac1->g_audio.func;
 }
 
 DECLARE_USB_FUNCTION_INIT(uac1, f_audio_alloc_inst, f_audio_alloc);
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bryan Wu");
+MODULE_AUTHOR("Ruslan Bilovol");
diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c
new file mode 100644
index 0000000..5d229e7
--- /dev/null
+++ b/drivers/usb/gadget/function/f_uac1_legacy.c
@@ -0,0 +1,1021 @@
+/*
+ * f_audio.c -- USB Audio class function driver
+  *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+
+#include "u_uac1_legacy.h"
+
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
+
+/*
+ * DESCRIPTORS ... most are static, but strings and full
+ * configuration descriptors are built on demand.
+ */
+
+/*
+ * We have two interfaces- AudioControl and AudioStreaming
+ * TODO: only supcard playback currently
+ */
+#define F_AUDIO_AC_INTERFACE	0
+#define F_AUDIO_AS_INTERFACE	1
+#define F_AUDIO_NUM_INTERFACES	1
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOCONTROL,
+};
+
+/*
+ * The number of AudioStreaming and MIDIStreaming interfaces
+ * in the Audio Interface Collection
+ */
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
+
+#define UAC_DT_AC_HEADER_LENGTH	UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
+	+ UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_1 ac_header_desc = {
+	.bLength =		UAC_DT_AC_HEADER_LENGTH,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_HEADER,
+	.bcdADC =		__constant_cpu_to_le16(0x0100),
+	.wTotalLength =		__constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+	.bInCollection =	F_AUDIO_NUM_INTERFACES,
+	.baInterfaceNr = {
+	/* Interface number of the first AudioStream interface */
+		[0] =		1,
+	}
+};
+
+#define INPUT_TERMINAL_ID	1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+	.bLength =		UAC_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_INPUT_TERMINAL,
+	.bTerminalID =		INPUT_TERMINAL_ID,
+	.wTerminalType =	UAC_TERMINAL_STREAMING,
+	.bAssocTerminal =	0,
+	.wChannelConfig =	0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID		2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+	.bLength		= UAC_DT_FEATURE_UNIT_SIZE(0),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_FEATURE_UNIT,
+	.bUnitID		= FEATURE_UNIT_ID,
+	.bSourceID		= INPUT_TERMINAL_ID,
+	.bControlSize		= 2,
+	.bmaControls[0]		= (UAC_FU_MUTE | UAC_FU_VOLUME),
+};
+
+static struct usb_audio_control mute_control = {
+	.list = LIST_HEAD_INIT(mute_control.list),
+	.name = "Mute Control",
+	.type = UAC_FU_MUTE,
+	/* Todo: add real Mute control code */
+	.set = generic_set_cmd,
+	.get = generic_get_cmd,
+};
+
+static struct usb_audio_control volume_control = {
+	.list = LIST_HEAD_INIT(volume_control.list),
+	.name = "Volume Control",
+	.type = UAC_FU_VOLUME,
+	/* Todo: add real Volume control code */
+	.set = generic_set_cmd,
+	.get = generic_get_cmd,
+};
+
+static struct usb_audio_control_selector feature_unit = {
+	.list = LIST_HEAD_INIT(feature_unit.list),
+	.id = FEATURE_UNIT_ID,
+	.name = "Mute & Volume Control",
+	.type = UAC_FEATURE_UNIT,
+	.desc = (struct usb_descriptor_header *)&feature_unit_desc,
+};
+
+#define OUTPUT_TERMINAL_ID	3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+	.bLength		= UAC_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_OUTPUT_TERMINAL,
+	.bTerminalID		= OUTPUT_TERMINAL_ID,
+	.wTerminalType		= UAC_OUTPUT_TERMINAL_SPEAKER,
+	.bAssocTerminal		= FEATURE_UNIT_ID,
+	.bSourceID		= FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+	.bLength =		UAC_DT_AS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_AS_GENERAL,
+	.bTerminalLink =	INPUT_TERMINAL_ID,
+	.bDelay =		1,
+	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+	.bLength =		UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_FORMAT_TYPE,
+	.bFormatType =		UAC_FORMAT_TYPE_I,
+	.bSubframeSize =	2,
+	.bBitResolution =	16,
+	.bSamFreqType =		1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_out_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_SYNC_ADAPTIVE
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize	=	cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+	.bInterval =		4,
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_out_desc = {
+	.bLength =		UAC_ISO_ENDPOINT_DESC_SIZE,
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	UAC_EP_GENERAL,
+	.bmAttributes = 	1,
+	.bLockDelayUnits =	1,
+	.wLockDelay =		__constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *f_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&as_out_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_out_desc,
+	NULL,
+};
+
+enum {
+	STR_AC_IF,
+	STR_INPUT_TERMINAL,
+	STR_INPUT_TERMINAL_CH_NAMES,
+	STR_FEAT_DESC_0,
+	STR_OUTPUT_TERMINAL,
+	STR_AS_IF_ALT0,
+	STR_AS_IF_ALT1,
+};
+
+static struct usb_string strings_uac1[] = {
+	[STR_AC_IF].s = "AC Interface",
+	[STR_INPUT_TERMINAL].s = "Input terminal",
+	[STR_INPUT_TERMINAL_CH_NAMES].s = "Channels",
+	[STR_FEAT_DESC_0].s = "Volume control & mute",
+	[STR_OUTPUT_TERMINAL].s = "Output terminal",
+	[STR_AS_IF_ALT0].s = "AS Interface",
+	[STR_AS_IF_ALT1].s = "AS Interface",
+	{ },
+};
+
+static struct usb_gadget_strings str_uac1 = {
+	.language = 0x0409,	/* en-us */
+	.strings = strings_uac1,
+};
+
+static struct usb_gadget_strings *uac1_strings[] = {
+	&str_uac1,
+	NULL,
+};
+
+/*
+ * This function is an ALSA sound card following USB Audio Class Spec 1.0.
+ */
+
+/*-------------------------------------------------------------------------*/
+struct f_audio_buf {
+	u8 *buf;
+	int actual;
+	struct list_head list;
+};
+
+static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
+{
+	struct f_audio_buf *copy_buf;
+
+	copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
+	if (!copy_buf)
+		return ERR_PTR(-ENOMEM);
+
+	copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
+	if (!copy_buf->buf) {
+		kfree(copy_buf);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return copy_buf;
+}
+
+static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
+{
+	kfree(audio_buf->buf);
+	kfree(audio_buf);
+}
+/*-------------------------------------------------------------------------*/
+
+struct f_audio {
+	struct gaudio			card;
+
+	u8 ac_intf, ac_alt;
+	u8 as_intf, as_alt;
+
+	/* endpoints handle full and/or high speeds */
+	struct usb_ep			*out_ep;
+
+	spinlock_t			lock;
+	struct f_audio_buf *copy_buf;
+	struct work_struct playback_work;
+	struct list_head play_queue;
+
+	/* Control Set command */
+	struct list_head cs;
+	u8 set_cmd;
+	struct usb_audio_control *set_con;
+};
+
+static inline struct f_audio *func_to_audio(struct usb_function *f)
+{
+	return container_of(f, struct f_audio, card.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_playback_work(struct work_struct *data)
+{
+	struct f_audio *audio = container_of(data, struct f_audio,
+					playback_work);
+	struct f_audio_buf *play_buf;
+
+	spin_lock_irq(&audio->lock);
+	if (list_empty(&audio->play_queue)) {
+		spin_unlock_irq(&audio->lock);
+		return;
+	}
+	play_buf = list_first_entry(&audio->play_queue,
+			struct f_audio_buf, list);
+	list_del(&play_buf->list);
+	spin_unlock_irq(&audio->lock);
+
+	u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
+	f_audio_buffer_free(play_buf);
+}
+
+static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_audio *audio = req->context;
+	struct usb_composite_dev *cdev = audio->card.func.config->cdev;
+	struct f_audio_buf *copy_buf = audio->copy_buf;
+	struct f_uac1_legacy_opts *opts;
+	int audio_buf_size;
+	int err;
+
+	opts = container_of(audio->card.func.fi, struct f_uac1_legacy_opts,
+			    func_inst);
+	audio_buf_size = opts->audio_buf_size;
+
+	if (!copy_buf)
+		return -EINVAL;
+
+	/* Copy buffer is full, add it to the play_queue */
+	if (audio_buf_size - copy_buf->actual < req->actual) {
+		list_add_tail(&copy_buf->list, &audio->play_queue);
+		schedule_work(&audio->playback_work);
+		copy_buf = f_audio_buffer_alloc(audio_buf_size);
+		if (IS_ERR(copy_buf))
+			return -ENOMEM;
+	}
+
+	memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
+	copy_buf->actual += req->actual;
+	audio->copy_buf = copy_buf;
+
+	err = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (err)
+		ERROR(cdev, "%s queue req: %d\n", ep->name, err);
+
+	return 0;
+
+}
+
+static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_audio *audio = req->context;
+	int status = req->status;
+	u32 data = 0;
+	struct usb_ep *out_ep = audio->out_ep;
+
+	switch (status) {
+
+	case 0:				/* normal completion? */
+		if (ep == out_ep)
+			f_audio_out_ep_complete(ep, req);
+		else if (audio->set_con) {
+			memcpy(&data, req->buf, req->length);
+			audio->set_con->set(audio->set_con, audio->set_cmd,
+					le16_to_cpu(data));
+			audio->set_con = NULL;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static int audio_set_intf_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	u8			id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u8			con_sel = (w_value >> 8) & 0xFF;
+	u8			cmd = (ctrl->bRequest & 0x0F);
+	struct usb_audio_control_selector *cs;
+	struct usb_audio_control *con;
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+			ctrl->bRequest, w_value, len, id);
+
+	list_for_each_entry(cs, &audio->cs, list) {
+		if (cs->id == id) {
+			list_for_each_entry(con, &cs->control, list) {
+				if (con->type == con_sel) {
+					audio->set_con = con;
+					break;
+				}
+			}
+			break;
+		}
+	}
+
+	audio->set_cmd = cmd;
+	req->context = audio;
+	req->complete = f_audio_complete;
+
+	return len;
+}
+
+static int audio_get_intf_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u8			id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u8			con_sel = (w_value >> 8) & 0xFF;
+	u8			cmd = (ctrl->bRequest & 0x0F);
+	struct usb_audio_control_selector *cs;
+	struct usb_audio_control *con;
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+			ctrl->bRequest, w_value, len, id);
+
+	list_for_each_entry(cs, &audio->cs, list) {
+		if (cs->id == id) {
+			list_for_each_entry(con, &cs->control, list) {
+				if (con->type == con_sel && con->get) {
+					value = con->get(con, cmd);
+					break;
+				}
+			}
+			break;
+		}
+	}
+
+	req->context = audio;
+	req->complete = f_audio_complete;
+	len = min_t(size_t, sizeof(value), len);
+	memcpy(req->buf, &value, len);
+
+	return len;
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int			value = -EOPNOTSUPP;
+	u16			ep = le16_to_cpu(ctrl->wIndex);
+	u16			len = le16_to_cpu(ctrl->wLength);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_SET_CUR:
+		value = len;
+		break;
+
+	case UAC_SET_MIN:
+		break;
+
+	case UAC_SET_MAX:
+		break;
+
+	case UAC_SET_RES:
+		break;
+
+	case UAC_SET_MEM:
+		break;
+
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int value = -EOPNOTSUPP;
+	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+
+	DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_GET_CUR:
+	case UAC_GET_MIN:
+	case UAC_GET_MAX:
+	case UAC_GET_RES:
+		value = len;
+		break;
+	case UAC_GET_MEM:
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int
+f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything; interface
+	 * activation uses set_alt().
+	 */
+	switch (ctrl->bRequestType) {
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
+		value = audio_set_intf_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
+		value = audio_get_intf_req(f, ctrl);
+		break;
+
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_set_endpoint_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_get_endpoint_req(f, ctrl);
+		break;
+
+	default:
+		ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "audio response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_ep *out_ep = audio->out_ep;
+	struct usb_request *req;
+	struct f_uac1_legacy_opts *opts;
+	int req_buf_size, req_count, audio_buf_size;
+	int i = 0, err = 0;
+
+	DBG(cdev, "intf %d, alt %d\n", intf, alt);
+
+	opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
+	req_buf_size = opts->req_buf_size;
+	req_count = opts->req_count;
+	audio_buf_size = opts->audio_buf_size;
+
+	/* No i/f has more than 2 alt settings */
+	if (alt > 1) {
+		ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (intf == audio->ac_intf) {
+		/* Control I/f has only 1 AltSetting - 0 */
+		if (alt) {
+			ERROR(cdev, "%s:%d Error!\n", __func__, __LINE__);
+			return -EINVAL;
+		}
+		return 0;
+	} else if (intf == audio->as_intf) {
+		if (alt == 1) {
+			err = config_ep_by_speed(cdev->gadget, f, out_ep);
+			if (err)
+				return err;
+
+			usb_ep_enable(out_ep);
+			audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
+			if (IS_ERR(audio->copy_buf))
+				return -ENOMEM;
+
+			/*
+			 * allocate a bunch of read buffers
+			 * and queue them all at once.
+			 */
+			for (i = 0; i < req_count && err == 0; i++) {
+				req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
+				if (req) {
+					req->buf = kzalloc(req_buf_size,
+							GFP_ATOMIC);
+					if (req->buf) {
+						req->length = req_buf_size;
+						req->context = audio;
+						req->complete =
+							f_audio_complete;
+						err = usb_ep_queue(out_ep,
+							req, GFP_ATOMIC);
+						if (err)
+							ERROR(cdev,
+							"%s queue req: %d\n",
+							out_ep->name, err);
+					} else
+						err = -ENOMEM;
+				} else
+					err = -ENOMEM;
+			}
+
+		} else {
+			struct f_audio_buf *copy_buf = audio->copy_buf;
+			if (copy_buf) {
+				list_add_tail(&copy_buf->list,
+						&audio->play_queue);
+				schedule_work(&audio->playback_work);
+			}
+		}
+		audio->as_alt = alt;
+	}
+
+	return err;
+}
+
+static int f_audio_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	if (intf == audio->ac_intf)
+		return audio->ac_alt;
+	else if (intf == audio->as_intf)
+		return audio->as_alt;
+	else
+		ERROR(cdev, "%s:%d Invalid Interface %d!\n",
+		      __func__, __LINE__, intf);
+
+	return -EINVAL;
+}
+
+static void f_audio_disable(struct usb_function *f)
+{
+	return;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_build_desc(struct f_audio *audio)
+{
+	struct gaudio *card = &audio->card;
+	u8 *sam_freq;
+	int rate;
+
+	/* Set channel numbers */
+	input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
+	as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
+
+	/* Set sample rates */
+	rate = u_audio_get_playback_rate(card);
+	sam_freq = as_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
+
+	/* Todo: Set Sample bits and other parameters */
+
+	return;
+}
+
+/* audio function driver setup/binding */
+static int
+f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_audio		*audio = func_to_audio(f);
+	struct usb_string	*us;
+	int			status;
+	struct usb_ep		*ep = NULL;
+	struct f_uac1_legacy_opts	*audio_opts;
+
+	audio_opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
+	audio->card.gadget = c->cdev->gadget;
+	/* set up ASLA audio devices */
+	if (!audio_opts->bound) {
+		status = gaudio_setup(&audio->card);
+		if (status < 0)
+			return status;
+		audio_opts->bound = true;
+	}
+	us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
+	if (IS_ERR(us))
+		return PTR_ERR(us);
+	ac_interface_desc.iInterface = us[STR_AC_IF].id;
+	input_terminal_desc.iTerminal = us[STR_INPUT_TERMINAL].id;
+	input_terminal_desc.iChannelNames = us[STR_INPUT_TERMINAL_CH_NAMES].id;
+	feature_unit_desc.iFeature = us[STR_FEAT_DESC_0].id;
+	output_terminal_desc.iTerminal = us[STR_OUTPUT_TERMINAL].id;
+	as_interface_alt_0_desc.iInterface = us[STR_AS_IF_ALT0].id;
+	as_interface_alt_1_desc.iInterface = us[STR_AS_IF_ALT1].id;
+
+
+	f_audio_build_desc(audio);
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ac_interface_desc.bInterfaceNumber = status;
+	audio->ac_intf = status;
+	audio->ac_alt = 0;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	as_interface_alt_0_desc.bInterfaceNumber = status;
+	as_interface_alt_1_desc.bInterfaceNumber = status;
+	audio->as_intf = status;
+	audio->as_alt = 0;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
+	if (!ep)
+		goto fail;
+	audio->out_ep = ep;
+	audio->out_ep->desc = &as_out_ep_desc;
+
+	status = -ENOMEM;
+
+	/* copy descriptors, and track endpoint copies */
+	status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL,
+					NULL);
+	if (status)
+		goto fail;
+	return 0;
+
+fail:
+	gaudio_cleanup(&audio->card);
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
+{
+	con->data[cmd] = value;
+
+	return 0;
+}
+
+static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
+{
+	return con->data[cmd];
+}
+
+/* Todo: add more control selecotor dynamically */
+static int control_selector_init(struct f_audio *audio)
+{
+	INIT_LIST_HEAD(&audio->cs);
+	list_add(&feature_unit.list, &audio->cs);
+
+	INIT_LIST_HEAD(&feature_unit.control);
+	list_add(&mute_control.list, &feature_unit.control);
+	list_add(&volume_control.list, &feature_unit.control);
+
+	volume_control.data[UAC__CUR] = 0xffc0;
+	volume_control.data[UAC__MIN] = 0xe3a0;
+	volume_control.data[UAC__MAX] = 0xfff0;
+	volume_control.data[UAC__RES] = 0x0030;
+
+	return 0;
+}
+
+static inline
+struct f_uac1_legacy_opts *to_f_uac1_opts(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct f_uac1_legacy_opts,
+			    func_inst.group);
+}
+
+static void f_uac1_attr_release(struct config_item *item)
+{
+	struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item);
+
+	usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations f_uac1_item_ops = {
+	.release	= f_uac1_attr_release,
+};
+
+#define UAC1_INT_ATTRIBUTE(name)					\
+static ssize_t f_uac1_opts_##name##_show(struct config_item *item,	\
+					 char *page)			\
+{									\
+	struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item);		\
+	int result;							\
+									\
+	mutex_lock(&opts->lock);					\
+	result = sprintf(page, "%u\n", opts->name);			\
+	mutex_unlock(&opts->lock);					\
+									\
+	return result;							\
+}									\
+									\
+static ssize_t f_uac1_opts_##name##_store(struct config_item *item,		\
+					  const char *page, size_t len)	\
+{									\
+	struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item);		\
+	int ret;							\
+	u32 num;							\
+									\
+	mutex_lock(&opts->lock);					\
+	if (opts->refcnt) {						\
+		ret = -EBUSY;						\
+		goto end;						\
+	}								\
+									\
+	ret = kstrtou32(page, 0, &num);					\
+	if (ret)							\
+		goto end;						\
+									\
+	opts->name = num;						\
+	ret = len;							\
+									\
+end:									\
+	mutex_unlock(&opts->lock);					\
+	return ret;							\
+}									\
+									\
+CONFIGFS_ATTR(f_uac1_opts_, name)
+
+UAC1_INT_ATTRIBUTE(req_buf_size);
+UAC1_INT_ATTRIBUTE(req_count);
+UAC1_INT_ATTRIBUTE(audio_buf_size);
+
+#define UAC1_STR_ATTRIBUTE(name)					\
+static ssize_t f_uac1_opts_##name##_show(struct config_item *item,	\
+					 char *page)			\
+{									\
+	struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item);		\
+	int result;							\
+									\
+	mutex_lock(&opts->lock);					\
+	result = sprintf(page, "%s\n", opts->name);			\
+	mutex_unlock(&opts->lock);					\
+									\
+	return result;							\
+}									\
+									\
+static ssize_t f_uac1_opts_##name##_store(struct config_item *item,	\
+					  const char *page, size_t len)	\
+{									\
+	struct f_uac1_legacy_opts *opts = to_f_uac1_opts(item);		\
+	int ret = -EBUSY;						\
+	char *tmp;							\
+									\
+	mutex_lock(&opts->lock);					\
+	if (opts->refcnt)						\
+		goto end;						\
+									\
+	tmp = kstrndup(page, len, GFP_KERNEL);				\
+	if (tmp) {							\
+		ret = -ENOMEM;						\
+		goto end;						\
+	}								\
+	if (opts->name##_alloc)						\
+		kfree(opts->name);					\
+	opts->name##_alloc = true;					\
+	opts->name = tmp;						\
+	ret = len;							\
+									\
+end:									\
+	mutex_unlock(&opts->lock);					\
+	return ret;							\
+}									\
+									\
+CONFIGFS_ATTR(f_uac1_opts_, name)
+
+UAC1_STR_ATTRIBUTE(fn_play);
+UAC1_STR_ATTRIBUTE(fn_cap);
+UAC1_STR_ATTRIBUTE(fn_cntl);
+
+static struct configfs_attribute *f_uac1_attrs[] = {
+	&f_uac1_opts_attr_req_buf_size,
+	&f_uac1_opts_attr_req_count,
+	&f_uac1_opts_attr_audio_buf_size,
+	&f_uac1_opts_attr_fn_play,
+	&f_uac1_opts_attr_fn_cap,
+	&f_uac1_opts_attr_fn_cntl,
+	NULL,
+};
+
+static struct config_item_type f_uac1_func_type = {
+	.ct_item_ops	= &f_uac1_item_ops,
+	.ct_attrs	= f_uac1_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static void f_audio_free_inst(struct usb_function_instance *f)
+{
+	struct f_uac1_legacy_opts *opts;
+
+	opts = container_of(f, struct f_uac1_legacy_opts, func_inst);
+	if (opts->fn_play_alloc)
+		kfree(opts->fn_play);
+	if (opts->fn_cap_alloc)
+		kfree(opts->fn_cap);
+	if (opts->fn_cntl_alloc)
+		kfree(opts->fn_cntl);
+	kfree(opts);
+}
+
+static struct usb_function_instance *f_audio_alloc_inst(void)
+{
+	struct f_uac1_legacy_opts *opts;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&opts->lock);
+	opts->func_inst.free_func_inst = f_audio_free_inst;
+
+	config_group_init_type_name(&opts->func_inst.group, "",
+				    &f_uac1_func_type);
+
+	opts->req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE;
+	opts->req_count = UAC1_REQ_COUNT;
+	opts->audio_buf_size = UAC1_AUDIO_BUF_SIZE;
+	opts->fn_play = FILE_PCM_PLAYBACK;
+	opts->fn_cap = FILE_PCM_CAPTURE;
+	opts->fn_cntl = FILE_CONTROL;
+	return &opts->func_inst;
+}
+
+static void f_audio_free(struct usb_function *f)
+{
+	struct f_audio *audio = func_to_audio(f);
+	struct f_uac1_legacy_opts *opts;
+
+	gaudio_cleanup(&audio->card);
+	opts = container_of(f->fi, struct f_uac1_legacy_opts, func_inst);
+	kfree(audio);
+	mutex_lock(&opts->lock);
+	--opts->refcnt;
+	mutex_unlock(&opts->lock);
+}
+
+static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	usb_free_all_descriptors(f);
+}
+
+static struct usb_function *f_audio_alloc(struct usb_function_instance *fi)
+{
+	struct f_audio *audio;
+	struct f_uac1_legacy_opts *opts;
+
+	/* allocate and initialize one new instance */
+	audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+	if (!audio)
+		return ERR_PTR(-ENOMEM);
+
+	audio->card.func.name = "g_audio";
+
+	opts = container_of(fi, struct f_uac1_legacy_opts, func_inst);
+	mutex_lock(&opts->lock);
+	++opts->refcnt;
+	mutex_unlock(&opts->lock);
+	INIT_LIST_HEAD(&audio->play_queue);
+	spin_lock_init(&audio->lock);
+
+	audio->card.func.bind = f_audio_bind;
+	audio->card.func.unbind = f_audio_unbind;
+	audio->card.func.set_alt = f_audio_set_alt;
+	audio->card.func.get_alt = f_audio_get_alt;
+	audio->card.func.setup = f_audio_setup;
+	audio->card.func.disable = f_audio_disable;
+	audio->card.func.free_func = f_audio_free;
+
+	control_selector_init(audio);
+
+	INIT_WORK(&audio->playback_work, f_audio_playback_work);
+
+	return &audio->card.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(uac1_legacy, f_audio_alloc_inst, f_audio_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bryan Wu");
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index f6a0d3a..9082ce2 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -13,13 +13,9 @@
 
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
-#include <linux/platform_device.h>
 #include <linux/module.h>
 
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
+#include "u_audio.h"
 #include "u_uac2.h"
 
 /*
@@ -51,506 +47,23 @@
 #define UNFLW_CTRL	8
 #define OVFLW_CTRL	10
 
-static const char *uac2_name = "snd_uac2";
-
-struct uac2_req {
-	struct uac2_rtd_params *pp; /* parent param */
-	struct usb_request *req;
+struct f_uac2 {
+	struct g_audio g_audio;
+	u8 ac_intf, as_in_intf, as_out_intf;
+	u8 ac_alt, as_in_alt, as_out_alt;	/* needed for get_alt() */
 };
 
-struct uac2_rtd_params {
-	struct snd_uac2_chip *uac2; /* parent chip */
-	bool ep_enabled; /* if the ep is enabled */
-	/* Size of the ring buffer */
-	size_t dma_bytes;
-	unsigned char *dma_area;
-
-	struct snd_pcm_substream *ss;
-
-	/* Ring buffer */
-	ssize_t hw_ptr;
-
-	void *rbuf;
-
-	size_t period_size;
-
-	unsigned max_psize;
-	struct uac2_req *ureq;
-
-	spinlock_t lock;
-};
-
-struct snd_uac2_chip {
-	struct platform_device pdev;
-	struct platform_driver pdrv;
-
-	struct uac2_rtd_params p_prm;
-	struct uac2_rtd_params c_prm;
-
-	struct snd_card *card;
-	struct snd_pcm *pcm;
-
-	/* timekeeping for the playback endpoint */
-	unsigned int p_interval;
-	unsigned int p_residue;
-
-	/* pre-calculated values for playback iso completion */
-	unsigned int p_pktsize;
-	unsigned int p_pktsize_residue;
-	unsigned int p_framesize;
-};
-
-#define BUFF_SIZE_MAX	(PAGE_SIZE * 16)
-#define PRD_SIZE_MAX	PAGE_SIZE
-#define MIN_PERIODS	4
-
-static struct snd_pcm_hardware uac2_pcm_hardware = {
-	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER
-		 | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
-		 | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
-	.rates = SNDRV_PCM_RATE_CONTINUOUS,
-	.periods_max = BUFF_SIZE_MAX / PRD_SIZE_MAX,
-	.buffer_bytes_max = BUFF_SIZE_MAX,
-	.period_bytes_max = PRD_SIZE_MAX,
-	.periods_min = MIN_PERIODS,
-};
-
-struct audio_dev {
-	u8 ac_intf, ac_alt;
-	u8 as_out_intf, as_out_alt;
-	u8 as_in_intf, as_in_alt;
-
-	struct usb_ep *in_ep, *out_ep;
-	struct usb_function func;
-
-	/* The ALSA Sound Card it represents on the USB-Client side */
-	struct snd_uac2_chip uac2;
-};
-
-static inline
-struct audio_dev *func_to_agdev(struct usb_function *f)
+static inline struct f_uac2 *func_to_uac2(struct usb_function *f)
 {
-	return container_of(f, struct audio_dev, func);
+	return container_of(f, struct f_uac2, g_audio.func);
 }
 
 static inline
-struct audio_dev *uac2_to_agdev(struct snd_uac2_chip *u)
-{
-	return container_of(u, struct audio_dev, uac2);
-}
-
-static inline
-struct snd_uac2_chip *pdev_to_uac2(struct platform_device *p)
-{
-	return container_of(p, struct snd_uac2_chip, pdev);
-}
-
-static inline
-struct f_uac2_opts *agdev_to_uac2_opts(struct audio_dev *agdev)
+struct f_uac2_opts *g_audio_to_uac2_opts(struct g_audio *agdev)
 {
 	return container_of(agdev->func.fi, struct f_uac2_opts, func_inst);
 }
 
-static inline
-uint num_channels(uint chanmask)
-{
-	uint num = 0;
-
-	while (chanmask) {
-		num += (chanmask & 1);
-		chanmask >>= 1;
-	}
-
-	return num;
-}
-
-static void
-agdev_iso_complete(struct usb_ep *ep, struct usb_request *req)
-{
-	unsigned pending;
-	unsigned long flags;
-	unsigned int hw_ptr;
-	bool update_alsa = false;
-	int status = req->status;
-	struct uac2_req *ur = req->context;
-	struct snd_pcm_substream *substream;
-	struct uac2_rtd_params *prm = ur->pp;
-	struct snd_uac2_chip *uac2 = prm->uac2;
-
-	/* i/f shutting down */
-	if (!prm->ep_enabled || req->status == -ESHUTDOWN)
-		return;
-
-	/*
-	 * We can't really do much about bad xfers.
-	 * Afterall, the ISOCH xfers could fail legitimately.
-	 */
-	if (status)
-		pr_debug("%s: iso_complete status(%d) %d/%d\n",
-			__func__, status, req->actual, req->length);
-
-	substream = prm->ss;
-
-	/* Do nothing if ALSA isn't active */
-	if (!substream)
-		goto exit;
-
-	spin_lock_irqsave(&prm->lock, flags);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		/*
-		 * For each IN packet, take the quotient of the current data
-		 * rate and the endpoint's interval as the base packet size.
-		 * If there is a residue from this division, add it to the
-		 * residue accumulator.
-		 */
-		req->length = uac2->p_pktsize;
-		uac2->p_residue += uac2->p_pktsize_residue;
-
-		/*
-		 * Whenever there are more bytes in the accumulator than we
-		 * need to add one more sample frame, increase this packet's
-		 * size and decrease the accumulator.
-		 */
-		if (uac2->p_residue / uac2->p_interval >= uac2->p_framesize) {
-			req->length += uac2->p_framesize;
-			uac2->p_residue -= uac2->p_framesize *
-					   uac2->p_interval;
-		}
-
-		req->actual = req->length;
-	}
-
-	pending = prm->hw_ptr % prm->period_size;
-	pending += req->actual;
-	if (pending >= prm->period_size)
-		update_alsa = true;
-
-	hw_ptr = prm->hw_ptr;
-	prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
-
-	spin_unlock_irqrestore(&prm->lock, flags);
-
-	/* Pack USB load in ALSA ring buffer */
-	pending = prm->dma_bytes - hw_ptr;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		if (unlikely(pending < req->actual)) {
-			memcpy(req->buf, prm->dma_area + hw_ptr, pending);
-			memcpy(req->buf + pending, prm->dma_area,
-			       req->actual - pending);
-		} else {
-			memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
-		}
-	} else {
-		if (unlikely(pending < req->actual)) {
-			memcpy(prm->dma_area + hw_ptr, req->buf, pending);
-			memcpy(prm->dma_area, req->buf + pending,
-			       req->actual - pending);
-		} else {
-			memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
-		}
-	}
-
-exit:
-	if (usb_ep_queue(ep, req, GFP_ATOMIC))
-		dev_err(&uac2->pdev.dev, "%d Error!\n", __LINE__);
-
-	if (update_alsa)
-		snd_pcm_period_elapsed(substream);
-
-	return;
-}
-
-static int
-uac2_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
-	struct audio_dev *agdev = uac2_to_agdev(uac2);
-	struct f_uac2_opts *uac2_opts = agdev_to_uac2_opts(agdev);
-	struct uac2_rtd_params *prm;
-	unsigned long flags;
-	int err = 0;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		prm = &uac2->p_prm;
-	else
-		prm = &uac2->c_prm;
-
-	spin_lock_irqsave(&prm->lock, flags);
-
-	/* Reset */
-	prm->hw_ptr = 0;
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-		prm->ss = substream;
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-		prm->ss = NULL;
-		break;
-	default:
-		err = -EINVAL;
-	}
-
-	spin_unlock_irqrestore(&prm->lock, flags);
-
-	/* Clear buffer after Play stops */
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
-		memset(prm->rbuf, 0, prm->max_psize * uac2_opts->req_number);
-
-	return err;
-}
-
-static snd_pcm_uframes_t uac2_pcm_pointer(struct snd_pcm_substream *substream)
-{
-	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
-	struct uac2_rtd_params *prm;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		prm = &uac2->p_prm;
-	else
-		prm = &uac2->c_prm;
-
-	return bytes_to_frames(substream->runtime, prm->hw_ptr);
-}
-
-static int uac2_pcm_hw_params(struct snd_pcm_substream *substream,
-			       struct snd_pcm_hw_params *hw_params)
-{
-	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
-	struct uac2_rtd_params *prm;
-	int err;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		prm = &uac2->p_prm;
-	else
-		prm = &uac2->c_prm;
-
-	err = snd_pcm_lib_malloc_pages(substream,
-					params_buffer_bytes(hw_params));
-	if (err >= 0) {
-		prm->dma_bytes = substream->runtime->dma_bytes;
-		prm->dma_area = substream->runtime->dma_area;
-		prm->period_size = params_period_bytes(hw_params);
-	}
-
-	return err;
-}
-
-static int uac2_pcm_hw_free(struct snd_pcm_substream *substream)
-{
-	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
-	struct uac2_rtd_params *prm;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		prm = &uac2->p_prm;
-	else
-		prm = &uac2->c_prm;
-
-	prm->dma_area = NULL;
-	prm->dma_bytes = 0;
-	prm->period_size = 0;
-
-	return snd_pcm_lib_free_pages(substream);
-}
-
-static int uac2_pcm_open(struct snd_pcm_substream *substream)
-{
-	struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream);
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct audio_dev *audio_dev;
-	struct f_uac2_opts *opts;
-	int p_ssize, c_ssize;
-	int p_srate, c_srate;
-	int p_chmask, c_chmask;
-
-	audio_dev = uac2_to_agdev(uac2);
-	opts = container_of(audio_dev->func.fi, struct f_uac2_opts, func_inst);
-	p_ssize = opts->p_ssize;
-	c_ssize = opts->c_ssize;
-	p_srate = opts->p_srate;
-	c_srate = opts->c_srate;
-	p_chmask = opts->p_chmask;
-	c_chmask = opts->c_chmask;
-	uac2->p_residue = 0;
-
-	runtime->hw = uac2_pcm_hardware;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		spin_lock_init(&uac2->p_prm.lock);
-		runtime->hw.rate_min = p_srate;
-		switch (p_ssize) {
-		case 3:
-			runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
-			break;
-		case 4:
-			runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
-			break;
-		default:
-			runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
-			break;
-		}
-		runtime->hw.channels_min = num_channels(p_chmask);
-		runtime->hw.period_bytes_min = 2 * uac2->p_prm.max_psize
-						/ runtime->hw.periods_min;
-	} else {
-		spin_lock_init(&uac2->c_prm.lock);
-		runtime->hw.rate_min = c_srate;
-		switch (c_ssize) {
-		case 3:
-			runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
-			break;
-		case 4:
-			runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
-			break;
-		default:
-			runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
-			break;
-		}
-		runtime->hw.channels_min = num_channels(c_chmask);
-		runtime->hw.period_bytes_min = 2 * uac2->c_prm.max_psize
-						/ runtime->hw.periods_min;
-	}
-
-	runtime->hw.rate_max = runtime->hw.rate_min;
-	runtime->hw.channels_max = runtime->hw.channels_min;
-
-	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
-
-	return 0;
-}
-
-/* ALSA cries without these function pointers */
-static int uac2_pcm_null(struct snd_pcm_substream *substream)
-{
-	return 0;
-}
-
-static struct snd_pcm_ops uac2_pcm_ops = {
-	.open = uac2_pcm_open,
-	.close = uac2_pcm_null,
-	.ioctl = snd_pcm_lib_ioctl,
-	.hw_params = uac2_pcm_hw_params,
-	.hw_free = uac2_pcm_hw_free,
-	.trigger = uac2_pcm_trigger,
-	.pointer = uac2_pcm_pointer,
-	.prepare = uac2_pcm_null,
-};
-
-static int snd_uac2_probe(struct platform_device *pdev)
-{
-	struct snd_uac2_chip *uac2 = pdev_to_uac2(pdev);
-	struct snd_card *card;
-	struct snd_pcm *pcm;
-	struct audio_dev *audio_dev;
-	struct f_uac2_opts *opts;
-	int err;
-	int p_chmask, c_chmask;
-
-	audio_dev = uac2_to_agdev(uac2);
-	opts = container_of(audio_dev->func.fi, struct f_uac2_opts, func_inst);
-	p_chmask = opts->p_chmask;
-	c_chmask = opts->c_chmask;
-
-	/* Choose any slot, with no id */
-	err = snd_card_new(&pdev->dev, -1, NULL, THIS_MODULE, 0, &card);
-	if (err < 0)
-		return err;
-
-	uac2->card = card;
-
-	/*
-	 * Create first PCM device
-	 * Create a substream only for non-zero channel streams
-	 */
-	err = snd_pcm_new(uac2->card, "UAC2 PCM", 0,
-			       p_chmask ? 1 : 0, c_chmask ? 1 : 0, &pcm);
-	if (err < 0)
-		goto snd_fail;
-
-	strcpy(pcm->name, "UAC2 PCM");
-	pcm->private_data = uac2;
-
-	uac2->pcm = pcm;
-
-	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac2_pcm_ops);
-	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac2_pcm_ops);
-
-	strcpy(card->driver, "UAC2_Gadget");
-	strcpy(card->shortname, "UAC2_Gadget");
-	sprintf(card->longname, "UAC2_Gadget %i", pdev->id);
-
-	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
-		snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
-
-	err = snd_card_register(card);
-	if (!err) {
-		platform_set_drvdata(pdev, card);
-		return 0;
-	}
-
-snd_fail:
-	snd_card_free(card);
-
-	uac2->pcm = NULL;
-	uac2->card = NULL;
-
-	return err;
-}
-
-static int snd_uac2_remove(struct platform_device *pdev)
-{
-	struct snd_card *card = platform_get_drvdata(pdev);
-
-	if (card)
-		return snd_card_free(card);
-
-	return 0;
-}
-
-static void snd_uac2_release(struct device *dev)
-{
-	dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
-}
-
-static int alsa_uac2_init(struct audio_dev *agdev)
-{
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
-	int err;
-
-	uac2->pdrv.probe = snd_uac2_probe;
-	uac2->pdrv.remove = snd_uac2_remove;
-	uac2->pdrv.driver.name = uac2_name;
-
-	uac2->pdev.id = 0;
-	uac2->pdev.name = uac2_name;
-	uac2->pdev.dev.release = snd_uac2_release;
-
-	/* Register snd_uac2 driver */
-	err = platform_driver_register(&uac2->pdrv);
-	if (err)
-		return err;
-
-	/* Register snd_uac2 device */
-	err = platform_device_register(&uac2->pdev);
-	if (err)
-		platform_driver_unregister(&uac2->pdrv);
-
-	return err;
-}
-
-static void alsa_uac2_exit(struct audio_dev *agdev)
-{
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
-
-	platform_driver_unregister(&uac2->pdrv);
-	platform_device_unregister(&uac2->pdev);
-}
-
-
 /* --------- USB Function Interface ------------- */
 
 enum {
@@ -938,32 +451,6 @@ struct cntrl_range_lay3 {
 	__u32	dRES;
 } __packed;
 
-static inline void
-free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep)
-{
-	struct snd_uac2_chip *uac2 = prm->uac2;
-	struct audio_dev *agdev = uac2_to_agdev(uac2);
-	struct f_uac2_opts *uac2_opts = agdev_to_uac2_opts(agdev);
-	int i;
-
-	if (!prm->ep_enabled)
-		return;
-
-	prm->ep_enabled = false;
-
-	for (i = 0; i < uac2_opts->req_number; i++) {
-		if (prm->ureq[i].req) {
-			usb_ep_dequeue(ep, prm->ureq[i].req);
-			usb_ep_free_request(ep, prm->ureq[i].req);
-			prm->ureq[i].req = NULL;
-		}
-	}
-
-	if (usb_ep_disable(ep))
-		dev_err(&uac2->pdev.dev,
-			"%s:%d Error!\n", __func__, __LINE__);
-}
-
 static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
 	struct usb_endpoint_descriptor *ep_desc,
 	unsigned int factor, bool is_playback)
@@ -990,12 +477,11 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
 static int
 afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 {
-	struct audio_dev *agdev = func_to_agdev(fn);
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct f_uac2 *uac2 = func_to_uac2(fn);
+	struct g_audio *agdev = func_to_g_audio(fn);
 	struct usb_composite_dev *cdev = cfg->cdev;
 	struct usb_gadget *gadget = cdev->gadget;
-	struct device *dev = &uac2->pdev.dev;
-	struct uac2_rtd_params *prm;
+	struct device *dev = &gadget->dev;
 	struct f_uac2_opts *uac2_opts;
 	struct usb_string *us;
 	int ret;
@@ -1042,8 +528,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 		return ret;
 	}
 	std_ac_if_desc.bInterfaceNumber = ret;
-	agdev->ac_intf = ret;
-	agdev->ac_alt = 0;
+	uac2->ac_intf = ret;
+	uac2->ac_alt = 0;
 
 	ret = usb_interface_id(cfg, fn);
 	if (ret < 0) {
@@ -1052,8 +538,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 	}
 	std_as_out_if0_desc.bInterfaceNumber = ret;
 	std_as_out_if1_desc.bInterfaceNumber = ret;
-	agdev->as_out_intf = ret;
-	agdev->as_out_alt = 0;
+	uac2->as_out_intf = ret;
+	uac2->as_out_alt = 0;
 
 	ret = usb_interface_id(cfg, fn);
 	if (ret < 0) {
@@ -1062,8 +548,14 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 	}
 	std_as_in_if0_desc.bInterfaceNumber = ret;
 	std_as_in_if1_desc.bInterfaceNumber = ret;
-	agdev->as_in_intf = ret;
-	agdev->as_in_alt = 0;
+	uac2->as_in_intf = ret;
+	uac2->as_in_alt = 0;
+
+	/* Calculate wMaxPacketSize according to audio bandwidth */
+	set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
+	set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
+	set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
+	set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
 
 	agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
 	if (!agdev->out_ep) {
@@ -1077,14 +569,10 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 		return ret;
 	}
 
-	uac2->p_prm.uac2 = uac2;
-	uac2->c_prm.uac2 = uac2;
-
-	/* Calculate wMaxPacketSize according to audio bandwidth */
-	set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
-	set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
-	set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
-	set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+	agdev->in_ep_maxpsize = max(fs_epin_desc.wMaxPacketSize,
+					hs_epin_desc.wMaxPacketSize);
+	agdev->out_ep_maxpsize = max(fs_epout_desc.wMaxPacketSize,
+					hs_epout_desc.wMaxPacketSize);
 
 	hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
 	hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
@@ -1094,48 +582,23 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 	if (ret)
 		return ret;
 
-	prm = &agdev->uac2.c_prm;
-	prm->max_psize = hs_epout_desc.wMaxPacketSize;
-	prm->ureq = kcalloc(uac2_opts->req_number, sizeof(struct uac2_req),
-			GFP_KERNEL);
-	if (!prm->ureq) {
-		ret = -ENOMEM;
-		goto err_free_descs;
-	}
-	prm->rbuf = kcalloc(uac2_opts->req_number, prm->max_psize, GFP_KERNEL);
-	if (!prm->rbuf) {
-		prm->max_psize = 0;
-		ret = -ENOMEM;
-		goto err_free_descs;
-	}
+	agdev->gadget = gadget;
 
-	prm = &agdev->uac2.p_prm;
-	prm->max_psize = hs_epin_desc.wMaxPacketSize;
-	prm->ureq = kcalloc(uac2_opts->req_number, sizeof(struct uac2_req),
-			GFP_KERNEL);
-	if (!prm->ureq) {
-		ret = -ENOMEM;
-		goto err_free_descs;
-	}
-	prm->rbuf = kcalloc(uac2_opts->req_number, prm->max_psize, GFP_KERNEL);
-	if (!prm->rbuf) {
-		prm->max_psize = 0;
-		ret = -ENOMEM;
-		goto err_no_memory;
-	}
-
-	ret = alsa_uac2_init(agdev);
+	agdev->params.p_chmask = uac2_opts->p_chmask;
+	agdev->params.p_srate = uac2_opts->p_srate;
+	agdev->params.p_ssize = uac2_opts->p_ssize;
+	agdev->params.c_chmask = uac2_opts->c_chmask;
+	agdev->params.c_srate = uac2_opts->c_srate;
+	agdev->params.c_ssize = uac2_opts->c_ssize;
+	agdev->params.req_number = uac2_opts->req_number;
+	ret = g_audio_setup(agdev, "UAC2 PCM", "UAC2_Gadget");
 	if (ret)
-		goto err_no_memory;
+		goto err_free_descs;
 	return 0;
 
-err_no_memory:
-	kfree(agdev->uac2.p_prm.ureq);
-	kfree(agdev->uac2.c_prm.ureq);
-	kfree(agdev->uac2.p_prm.rbuf);
-	kfree(agdev->uac2.c_prm.rbuf);
 err_free_descs:
 	usb_free_all_descriptors(fn);
+	agdev->gadget = NULL;
 	return ret;
 }
 
@@ -1143,15 +606,10 @@ static int
 afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
 {
 	struct usb_composite_dev *cdev = fn->config->cdev;
-	struct audio_dev *agdev = func_to_agdev(fn);
-	struct f_uac2_opts *opts = agdev_to_uac2_opts(agdev);
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct f_uac2 *uac2 = func_to_uac2(fn);
 	struct usb_gadget *gadget = cdev->gadget;
-	struct device *dev = &uac2->pdev.dev;
-	struct usb_request *req;
-	struct usb_ep *ep;
-	struct uac2_rtd_params *prm;
-	int req_len, i;
+	struct device *dev = &gadget->dev;
+	int ret = 0;
 
 	/* No i/f has more than 2 alt settings */
 	if (alt > 1) {
@@ -1159,7 +617,7 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
 		return -EINVAL;
 	}
 
-	if (intf == agdev->ac_intf) {
+	if (intf == uac2->ac_intf) {
 		/* Control I/f has only 1 AltSetting - 0 */
 		if (alt) {
 			dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
@@ -1168,95 +626,42 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
 		return 0;
 	}
 
-	if (intf == agdev->as_out_intf) {
-		ep = agdev->out_ep;
-		prm = &uac2->c_prm;
-		config_ep_by_speed(gadget, fn, ep);
-		agdev->as_out_alt = alt;
-		req_len = prm->max_psize;
-	} else if (intf == agdev->as_in_intf) {
-		unsigned int factor, rate;
-		struct usb_endpoint_descriptor *ep_desc;
+	if (intf == uac2->as_out_intf) {
+		uac2->as_out_alt = alt;
 
-		ep = agdev->in_ep;
-		prm = &uac2->p_prm;
-		config_ep_by_speed(gadget, fn, ep);
-		agdev->as_in_alt = alt;
-
-		/* pre-calculate the playback endpoint's interval */
-		if (gadget->speed == USB_SPEED_FULL) {
-			ep_desc = &fs_epin_desc;
-			factor = 1000;
-		} else {
-			ep_desc = &hs_epin_desc;
-			factor = 8000;
-		}
-
-		/* pre-compute some values for iso_complete() */
-		uac2->p_framesize = opts->p_ssize *
-				    num_channels(opts->p_chmask);
-		rate = opts->p_srate * uac2->p_framesize;
-		uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
-		uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
-					prm->max_psize);
-
-		if (uac2->p_pktsize < prm->max_psize)
-			uac2->p_pktsize_residue = rate % uac2->p_interval;
+		if (alt)
+			ret = u_audio_start_capture(&uac2->g_audio);
 		else
-			uac2->p_pktsize_residue = 0;
+			u_audio_stop_capture(&uac2->g_audio);
+	} else if (intf == uac2->as_in_intf) {
+		uac2->as_in_alt = alt;
 
-		req_len = uac2->p_pktsize;
-		uac2->p_residue = 0;
+		if (alt)
+			ret = u_audio_start_playback(&uac2->g_audio);
+		else
+			u_audio_stop_playback(&uac2->g_audio);
 	} else {
 		dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
 		return -EINVAL;
 	}
 
-	if (alt == 0) {
-		free_ep(prm, ep);
-		return 0;
-	}
-
-	prm->ep_enabled = true;
-	usb_ep_enable(ep);
-
-	for (i = 0; i < opts->req_number; i++) {
-		if (!prm->ureq[i].req) {
-			req = usb_ep_alloc_request(ep, GFP_ATOMIC);
-			if (req == NULL)
-				return -ENOMEM;
-
-			prm->ureq[i].req = req;
-			prm->ureq[i].pp = prm;
-
-			req->zero = 0;
-			req->context = &prm->ureq[i];
-			req->length = req_len;
-			req->complete = agdev_iso_complete;
-			req->buf = prm->rbuf + i * prm->max_psize;
-		}
-
-		if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
-			dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
-	}
-
-	return 0;
+	return ret;
 }
 
 static int
 afunc_get_alt(struct usb_function *fn, unsigned intf)
 {
-	struct audio_dev *agdev = func_to_agdev(fn);
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct f_uac2 *uac2 = func_to_uac2(fn);
+	struct g_audio *agdev = func_to_g_audio(fn);
 
-	if (intf == agdev->ac_intf)
-		return agdev->ac_alt;
-	else if (intf == agdev->as_out_intf)
-		return agdev->as_out_alt;
-	else if (intf == agdev->as_in_intf)
-		return agdev->as_in_alt;
+	if (intf == uac2->ac_intf)
+		return uac2->ac_alt;
+	else if (intf == uac2->as_out_intf)
+		return uac2->as_out_alt;
+	else if (intf == uac2->as_in_intf)
+		return uac2->as_in_alt;
 	else
-		dev_err(&uac2->pdev.dev,
+		dev_err(&agdev->gadget->dev,
 			"%s:%d Invalid Interface %d!\n",
 			__func__, __LINE__, intf);
 
@@ -1266,22 +671,19 @@ afunc_get_alt(struct usb_function *fn, unsigned intf)
 static void
 afunc_disable(struct usb_function *fn)
 {
-	struct audio_dev *agdev = func_to_agdev(fn);
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct f_uac2 *uac2 = func_to_uac2(fn);
 
-	free_ep(&uac2->p_prm, agdev->in_ep);
-	agdev->as_in_alt = 0;
-
-	free_ep(&uac2->c_prm, agdev->out_ep);
-	agdev->as_out_alt = 0;
+	uac2->as_in_alt = 0;
+	uac2->as_out_alt = 0;
+	u_audio_stop_capture(&uac2->g_audio);
+	u_audio_stop_playback(&uac2->g_audio);
 }
 
 static int
 in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 {
 	struct usb_request *req = fn->config->cdev->req;
-	struct audio_dev *agdev = func_to_agdev(fn);
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct g_audio *agdev = func_to_g_audio(fn);
 	struct f_uac2_opts *opts;
 	u16 w_length = le16_to_cpu(cr->wLength);
 	u16 w_index = le16_to_cpu(cr->wIndex);
@@ -1291,7 +693,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 	int value = -EOPNOTSUPP;
 	int p_srate, c_srate;
 
-	opts = agdev_to_uac2_opts(agdev);
+	opts = g_audio_to_uac2_opts(agdev);
 	p_srate = opts->p_srate;
 	c_srate = opts->c_srate;
 
@@ -1310,7 +712,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 		*(u8 *)req->buf = 1;
 		value = min_t(unsigned, w_length, 1);
 	} else {
-		dev_err(&uac2->pdev.dev,
+		dev_err(&agdev->gadget->dev,
 			"%s:%d control_selector=%d TODO!\n",
 			__func__, __LINE__, control_selector);
 	}
@@ -1322,8 +724,7 @@ static int
 in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 {
 	struct usb_request *req = fn->config->cdev->req;
-	struct audio_dev *agdev = func_to_agdev(fn);
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct g_audio *agdev = func_to_g_audio(fn);
 	struct f_uac2_opts *opts;
 	u16 w_length = le16_to_cpu(cr->wLength);
 	u16 w_index = le16_to_cpu(cr->wIndex);
@@ -1334,7 +735,7 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 	int value = -EOPNOTSUPP;
 	int p_srate, c_srate;
 
-	opts = agdev_to_uac2_opts(agdev);
+	opts = g_audio_to_uac2_opts(agdev);
 	p_srate = opts->p_srate;
 	c_srate = opts->c_srate;
 
@@ -1353,7 +754,7 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 		value = min_t(unsigned, w_length, sizeof r);
 		memcpy(req->buf, &r, value);
 	} else {
-		dev_err(&uac2->pdev.dev,
+		dev_err(&agdev->gadget->dev,
 			"%s:%d control_selector=%d TODO!\n",
 			__func__, __LINE__, control_selector);
 	}
@@ -1388,13 +789,13 @@ out_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 static int
 setup_rq_inf(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 {
-	struct audio_dev *agdev = func_to_agdev(fn);
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct f_uac2 *uac2 = func_to_uac2(fn);
+	struct g_audio *agdev = func_to_g_audio(fn);
 	u16 w_index = le16_to_cpu(cr->wIndex);
 	u8 intf = w_index & 0xff;
 
-	if (intf != agdev->ac_intf) {
-		dev_err(&uac2->pdev.dev,
+	if (intf != uac2->ac_intf) {
+		dev_err(&agdev->gadget->dev,
 			"%s:%d Error!\n", __func__, __LINE__);
 		return -EOPNOTSUPP;
 	}
@@ -1411,8 +812,7 @@ static int
 afunc_setup(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 {
 	struct usb_composite_dev *cdev = fn->config->cdev;
-	struct audio_dev *agdev = func_to_agdev(fn);
-	struct snd_uac2_chip *uac2 = &agdev->uac2;
+	struct g_audio *agdev = func_to_g_audio(fn);
 	struct usb_request *req = cdev->req;
 	u16 w_length = le16_to_cpu(cr->wLength);
 	int value = -EOPNOTSUPP;
@@ -1424,14 +824,15 @@ afunc_setup(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 	if ((cr->bRequestType & USB_RECIP_MASK) == USB_RECIP_INTERFACE)
 		value = setup_rq_inf(fn, cr);
 	else
-		dev_err(&uac2->pdev.dev, "%s:%d Error!\n", __func__, __LINE__);
+		dev_err(&agdev->gadget->dev, "%s:%d Error!\n",
+				__func__, __LINE__);
 
 	if (value >= 0) {
 		req->length = value;
 		req->zero = value < w_length;
 		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
 		if (value < 0) {
-			dev_err(&uac2->pdev.dev,
+			dev_err(&agdev->gadget->dev,
 				"%s:%d Error!\n", __func__, __LINE__);
 			req->status = 0;
 		}
@@ -1557,10 +958,10 @@ static struct usb_function_instance *afunc_alloc_inst(void)
 
 static void afunc_free(struct usb_function *f)
 {
-	struct audio_dev *agdev;
+	struct g_audio *agdev;
 	struct f_uac2_opts *opts;
 
-	agdev = func_to_agdev(f);
+	agdev = func_to_g_audio(f);
 	opts = container_of(f->fi, struct f_uac2_opts, func_inst);
 	kfree(agdev);
 	mutex_lock(&opts->lock);
@@ -1570,27 +971,21 @@ static void afunc_free(struct usb_function *f)
 
 static void afunc_unbind(struct usb_configuration *c, struct usb_function *f)
 {
-	struct audio_dev *agdev = func_to_agdev(f);
-	struct uac2_rtd_params *prm;
+	struct g_audio *agdev = func_to_g_audio(f);
 
-	alsa_uac2_exit(agdev);
-
-	prm = &agdev->uac2.p_prm;
-	kfree(prm->rbuf);
-
-	prm = &agdev->uac2.c_prm;
-	kfree(prm->rbuf);
-	kfree(prm->ureq);
+	g_audio_cleanup(agdev);
 	usb_free_all_descriptors(f);
+
+	agdev->gadget = NULL;
 }
 
 static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
 {
-	struct audio_dev *agdev;
+	struct f_uac2	*uac2;
 	struct f_uac2_opts *opts;
 
-	agdev = kzalloc(sizeof(*agdev), GFP_KERNEL);
-	if (agdev == NULL)
+	uac2 = kzalloc(sizeof(*uac2), GFP_KERNEL);
+	if (uac2 == NULL)
 		return ERR_PTR(-ENOMEM);
 
 	opts = container_of(fi, struct f_uac2_opts, func_inst);
@@ -1598,16 +993,16 @@ static struct usb_function *afunc_alloc(struct usb_function_instance *fi)
 	++opts->refcnt;
 	mutex_unlock(&opts->lock);
 
-	agdev->func.name = "uac2_func";
-	agdev->func.bind = afunc_bind;
-	agdev->func.unbind = afunc_unbind;
-	agdev->func.set_alt = afunc_set_alt;
-	agdev->func.get_alt = afunc_get_alt;
-	agdev->func.disable = afunc_disable;
-	agdev->func.setup = afunc_setup;
-	agdev->func.free_func = afunc_free;
+	uac2->g_audio.func.name = "uac2_func";
+	uac2->g_audio.func.bind = afunc_bind;
+	uac2->g_audio.func.unbind = afunc_unbind;
+	uac2->g_audio.func.set_alt = afunc_set_alt;
+	uac2->g_audio.func.get_alt = afunc_get_alt;
+	uac2->g_audio.func.disable = afunc_disable;
+	uac2->g_audio.func.setup = afunc_setup;
+	uac2->g_audio.func.free_func = afunc_free;
 
-	return &agdev->func;
+	return &uac2->g_audio.func;
 }
 
 DECLARE_USB_FUNCTION_INIT(uac2, afunc_alloc_inst, afunc_alloc);
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index e698489..e0814a9 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -133,9 +133,10 @@ static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
 #define FSG_MAX_LUNS	16
 
 enum fsg_buffer_state {
+	BUF_STATE_SENDING = -2,
+	BUF_STATE_RECEIVING,
 	BUF_STATE_EMPTY = 0,
-	BUF_STATE_FULL,
-	BUF_STATE_BUSY
+	BUF_STATE_FULL
 };
 
 struct fsg_buffhd {
@@ -151,23 +152,14 @@ struct fsg_buffhd {
 	unsigned int			bulk_out_intended_length;
 
 	struct usb_request		*inreq;
-	int				inreq_busy;
 	struct usb_request		*outreq;
-	int				outreq_busy;
 };
 
 enum fsg_state {
-	/* This one isn't used anywhere */
-	FSG_STATE_COMMAND_PHASE = -10,
-	FSG_STATE_DATA_PHASE,
-	FSG_STATE_STATUS_PHASE,
-
-	FSG_STATE_IDLE = 0,
+	FSG_STATE_NORMAL,
 	FSG_STATE_ABORT_BULK_OUT,
-	FSG_STATE_RESET,
-	FSG_STATE_INTERFACE_CHANGE,
+	FSG_STATE_PROTOCOL_RESET,
 	FSG_STATE_CONFIG_CHANGE,
-	FSG_STATE_DISCONNECT,
 	FSG_STATE_EXIT,
 	FSG_STATE_TERMINATED
 };
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
new file mode 100644
index 0000000..5dd73b9
--- /dev/null
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -0,0 +1,662 @@
+/*
+ * u_audio.c -- interface to USB gadget "ALSA sound card" utilities
+ *
+ * Copyright (C) 2016
+ * Author: Ruslan Bilovol <ruslan.bilovol@gmail.com>
+ *
+ * Sound card implementation was cut-and-pasted with changes
+ * from f_uac2.c and has:
+ *    Copyright (C) 2011
+ *    Yadwinder Singh (yadi.brar01@gmail.com)
+ *    Jaswinder Singh (jaswinder.singh@linaro.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "u_audio.h"
+
+#define BUFF_SIZE_MAX	(PAGE_SIZE * 16)
+#define PRD_SIZE_MAX	PAGE_SIZE
+#define MIN_PERIODS	4
+
+struct uac_req {
+	struct uac_rtd_params *pp; /* parent param */
+	struct usb_request *req;
+};
+
+/* Runtime data params for one stream */
+struct uac_rtd_params {
+	struct snd_uac_chip *uac; /* parent chip */
+	bool ep_enabled; /* if the ep is enabled */
+	/* Size of the ring buffer */
+	size_t dma_bytes;
+	unsigned char *dma_area;
+
+	struct snd_pcm_substream *ss;
+
+	/* Ring buffer */
+	ssize_t hw_ptr;
+
+	void *rbuf;
+
+	size_t period_size;
+
+	unsigned max_psize;	/* MaxPacketSize of endpoint */
+	struct uac_req *ureq;
+
+	spinlock_t lock;
+};
+
+struct snd_uac_chip {
+	struct g_audio *audio_dev;
+
+	struct uac_rtd_params p_prm;
+	struct uac_rtd_params c_prm;
+
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+
+	/* timekeeping for the playback endpoint */
+	unsigned int p_interval;
+	unsigned int p_residue;
+
+	/* pre-calculated values for playback iso completion */
+	unsigned int p_pktsize;
+	unsigned int p_pktsize_residue;
+	unsigned int p_framesize;
+};
+
+static struct snd_pcm_hardware uac_pcm_hardware = {
+	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER
+		 | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID
+		 | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
+	.rates = SNDRV_PCM_RATE_CONTINUOUS,
+	.periods_max = BUFF_SIZE_MAX / PRD_SIZE_MAX,
+	.buffer_bytes_max = BUFF_SIZE_MAX,
+	.period_bytes_max = PRD_SIZE_MAX,
+	.periods_min = MIN_PERIODS,
+};
+
+static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	unsigned pending;
+	unsigned long flags;
+	unsigned int hw_ptr;
+	bool update_alsa = false;
+	int status = req->status;
+	struct uac_req *ur = req->context;
+	struct snd_pcm_substream *substream;
+	struct uac_rtd_params *prm = ur->pp;
+	struct snd_uac_chip *uac = prm->uac;
+
+	/* i/f shutting down */
+	if (!prm->ep_enabled || req->status == -ESHUTDOWN)
+		return;
+
+	/*
+	 * We can't really do much about bad xfers.
+	 * Afterall, the ISOCH xfers could fail legitimately.
+	 */
+	if (status)
+		pr_debug("%s: iso_complete status(%d) %d/%d\n",
+			__func__, status, req->actual, req->length);
+
+	substream = prm->ss;
+
+	/* Do nothing if ALSA isn't active */
+	if (!substream)
+		goto exit;
+
+	spin_lock_irqsave(&prm->lock, flags);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		/*
+		 * For each IN packet, take the quotient of the current data
+		 * rate and the endpoint's interval as the base packet size.
+		 * If there is a residue from this division, add it to the
+		 * residue accumulator.
+		 */
+		req->length = uac->p_pktsize;
+		uac->p_residue += uac->p_pktsize_residue;
+
+		/*
+		 * Whenever there are more bytes in the accumulator than we
+		 * need to add one more sample frame, increase this packet's
+		 * size and decrease the accumulator.
+		 */
+		if (uac->p_residue / uac->p_interval >= uac->p_framesize) {
+			req->length += uac->p_framesize;
+			uac->p_residue -= uac->p_framesize *
+					   uac->p_interval;
+		}
+
+		req->actual = req->length;
+	}
+
+	pending = prm->hw_ptr % prm->period_size;
+	pending += req->actual;
+	if (pending >= prm->period_size)
+		update_alsa = true;
+
+	hw_ptr = prm->hw_ptr;
+	prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
+
+	spin_unlock_irqrestore(&prm->lock, flags);
+
+	/* Pack USB load in ALSA ring buffer */
+	pending = prm->dma_bytes - hw_ptr;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (unlikely(pending < req->actual)) {
+			memcpy(req->buf, prm->dma_area + hw_ptr, pending);
+			memcpy(req->buf + pending, prm->dma_area,
+			       req->actual - pending);
+		} else {
+			memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
+		}
+	} else {
+		if (unlikely(pending < req->actual)) {
+			memcpy(prm->dma_area + hw_ptr, req->buf, pending);
+			memcpy(prm->dma_area, req->buf + pending,
+			       req->actual - pending);
+		} else {
+			memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
+		}
+	}
+
+exit:
+	if (usb_ep_queue(ep, req, GFP_ATOMIC))
+		dev_err(uac->card->dev, "%d Error!\n", __LINE__);
+
+	if (update_alsa)
+		snd_pcm_period_elapsed(substream);
+}
+
+static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+	struct uac_rtd_params *prm;
+	struct g_audio *audio_dev;
+	struct uac_params *params;
+	unsigned long flags;
+	int err = 0;
+
+	audio_dev = uac->audio_dev;
+	params = &audio_dev->params;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prm = &uac->p_prm;
+	else
+		prm = &uac->c_prm;
+
+	spin_lock_irqsave(&prm->lock, flags);
+
+	/* Reset */
+	prm->hw_ptr = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		prm->ss = substream;
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		prm->ss = NULL;
+		break;
+	default:
+		err = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&prm->lock, flags);
+
+	/* Clear buffer after Play stops */
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
+		memset(prm->rbuf, 0, prm->max_psize * params->req_number);
+
+	return err;
+}
+
+static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+	struct uac_rtd_params *prm;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prm = &uac->p_prm;
+	else
+		prm = &uac->c_prm;
+
+	return bytes_to_frames(substream->runtime, prm->hw_ptr);
+}
+
+static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
+			       struct snd_pcm_hw_params *hw_params)
+{
+	struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+	struct uac_rtd_params *prm;
+	int err;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prm = &uac->p_prm;
+	else
+		prm = &uac->c_prm;
+
+	err = snd_pcm_lib_malloc_pages(substream,
+					params_buffer_bytes(hw_params));
+	if (err >= 0) {
+		prm->dma_bytes = substream->runtime->dma_bytes;
+		prm->dma_area = substream->runtime->dma_area;
+		prm->period_size = params_period_bytes(hw_params);
+	}
+
+	return err;
+}
+
+static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+	struct uac_rtd_params *prm;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		prm = &uac->p_prm;
+	else
+		prm = &uac->c_prm;
+
+	prm->dma_area = NULL;
+	prm->dma_bytes = 0;
+	prm->period_size = 0;
+
+	return snd_pcm_lib_free_pages(substream);
+}
+
+static int uac_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct g_audio *audio_dev;
+	struct uac_params *params;
+	int p_ssize, c_ssize;
+	int p_srate, c_srate;
+	int p_chmask, c_chmask;
+
+	audio_dev = uac->audio_dev;
+	params = &audio_dev->params;
+	p_ssize = params->p_ssize;
+	c_ssize = params->c_ssize;
+	p_srate = params->p_srate;
+	c_srate = params->c_srate;
+	p_chmask = params->p_chmask;
+	c_chmask = params->c_chmask;
+	uac->p_residue = 0;
+
+	runtime->hw = uac_pcm_hardware;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		spin_lock_init(&uac->p_prm.lock);
+		runtime->hw.rate_min = p_srate;
+		switch (p_ssize) {
+		case 3:
+			runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
+			break;
+		case 4:
+			runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
+			break;
+		default:
+			runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+			break;
+		}
+		runtime->hw.channels_min = num_channels(p_chmask);
+		runtime->hw.period_bytes_min = 2 * uac->p_prm.max_psize
+						/ runtime->hw.periods_min;
+	} else {
+		spin_lock_init(&uac->c_prm.lock);
+		runtime->hw.rate_min = c_srate;
+		switch (c_ssize) {
+		case 3:
+			runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
+			break;
+		case 4:
+			runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
+			break;
+		default:
+			runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+			break;
+		}
+		runtime->hw.channels_min = num_channels(c_chmask);
+		runtime->hw.period_bytes_min = 2 * uac->c_prm.max_psize
+						/ runtime->hw.periods_min;
+	}
+
+	runtime->hw.rate_max = runtime->hw.rate_min;
+	runtime->hw.channels_max = runtime->hw.channels_min;
+
+	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+
+	return 0;
+}
+
+/* ALSA cries without these function pointers */
+static int uac_pcm_null(struct snd_pcm_substream *substream)
+{
+	return 0;
+}
+
+static struct snd_pcm_ops uac_pcm_ops = {
+	.open = uac_pcm_open,
+	.close = uac_pcm_null,
+	.ioctl = snd_pcm_lib_ioctl,
+	.hw_params = uac_pcm_hw_params,
+	.hw_free = uac_pcm_hw_free,
+	.trigger = uac_pcm_trigger,
+	.pointer = uac_pcm_pointer,
+	.prepare = uac_pcm_null,
+};
+
+static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
+{
+	struct snd_uac_chip *uac = prm->uac;
+	struct g_audio *audio_dev;
+	struct uac_params *params;
+	int i;
+
+	if (!prm->ep_enabled)
+		return;
+
+	prm->ep_enabled = false;
+
+	audio_dev = uac->audio_dev;
+	params = &audio_dev->params;
+
+	for (i = 0; i < params->req_number; i++) {
+		if (prm->ureq[i].req) {
+			usb_ep_dequeue(ep, prm->ureq[i].req);
+			usb_ep_free_request(ep, prm->ureq[i].req);
+			prm->ureq[i].req = NULL;
+		}
+	}
+
+	if (usb_ep_disable(ep))
+		dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
+}
+
+
+int u_audio_start_capture(struct g_audio *audio_dev)
+{
+	struct snd_uac_chip *uac = audio_dev->uac;
+	struct usb_gadget *gadget = audio_dev->gadget;
+	struct device *dev = &gadget->dev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	struct uac_rtd_params *prm;
+	struct uac_params *params = &audio_dev->params;
+	int req_len, i;
+
+	ep = audio_dev->out_ep;
+	prm = &uac->c_prm;
+	config_ep_by_speed(gadget, &audio_dev->func, ep);
+	req_len = prm->max_psize;
+
+	prm->ep_enabled = true;
+	usb_ep_enable(ep);
+
+	for (i = 0; i < params->req_number; i++) {
+		if (!prm->ureq[i].req) {
+			req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+			if (req == NULL)
+				return -ENOMEM;
+
+			prm->ureq[i].req = req;
+			prm->ureq[i].pp = prm;
+
+			req->zero = 0;
+			req->context = &prm->ureq[i];
+			req->length = req_len;
+			req->complete = u_audio_iso_complete;
+			req->buf = prm->rbuf + i * prm->max_psize;
+		}
+
+		if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+			dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(u_audio_start_capture);
+
+void u_audio_stop_capture(struct g_audio *audio_dev)
+{
+	struct snd_uac_chip *uac = audio_dev->uac;
+
+	free_ep(&uac->c_prm, audio_dev->out_ep);
+}
+EXPORT_SYMBOL_GPL(u_audio_stop_capture);
+
+int u_audio_start_playback(struct g_audio *audio_dev)
+{
+	struct snd_uac_chip *uac = audio_dev->uac;
+	struct usb_gadget *gadget = audio_dev->gadget;
+	struct device *dev = &gadget->dev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	struct uac_rtd_params *prm;
+	struct uac_params *params = &audio_dev->params;
+	unsigned int factor, rate;
+	const struct usb_endpoint_descriptor *ep_desc;
+	int req_len, i;
+
+	ep = audio_dev->in_ep;
+	prm = &uac->p_prm;
+	config_ep_by_speed(gadget, &audio_dev->func, ep);
+
+	ep_desc = ep->desc;
+
+	/* pre-calculate the playback endpoint's interval */
+	if (gadget->speed == USB_SPEED_FULL)
+		factor = 1000;
+	else
+		factor = 8000;
+
+	/* pre-compute some values for iso_complete() */
+	uac->p_framesize = params->p_ssize *
+			    num_channels(params->p_chmask);
+	rate = params->p_srate * uac->p_framesize;
+	uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
+	uac->p_pktsize = min_t(unsigned int, rate / uac->p_interval,
+				prm->max_psize);
+
+	if (uac->p_pktsize < prm->max_psize)
+		uac->p_pktsize_residue = rate % uac->p_interval;
+	else
+		uac->p_pktsize_residue = 0;
+
+	req_len = uac->p_pktsize;
+	uac->p_residue = 0;
+
+	prm->ep_enabled = true;
+	usb_ep_enable(ep);
+
+	for (i = 0; i < params->req_number; i++) {
+		if (!prm->ureq[i].req) {
+			req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+			if (req == NULL)
+				return -ENOMEM;
+
+			prm->ureq[i].req = req;
+			prm->ureq[i].pp = prm;
+
+			req->zero = 0;
+			req->context = &prm->ureq[i];
+			req->length = req_len;
+			req->complete = u_audio_iso_complete;
+			req->buf = prm->rbuf + i * prm->max_psize;
+		}
+
+		if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+			dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(u_audio_start_playback);
+
+void u_audio_stop_playback(struct g_audio *audio_dev)
+{
+	struct snd_uac_chip *uac = audio_dev->uac;
+
+	free_ep(&uac->p_prm, audio_dev->in_ep);
+}
+EXPORT_SYMBOL_GPL(u_audio_stop_playback);
+
+int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+					const char *card_name)
+{
+	struct snd_uac_chip *uac;
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	struct uac_params *params;
+	int p_chmask, c_chmask;
+	int err;
+
+	if (!g_audio)
+		return -EINVAL;
+
+	uac = kzalloc(sizeof(*uac), GFP_KERNEL);
+	if (!uac)
+		return -ENOMEM;
+	g_audio->uac = uac;
+	uac->audio_dev = g_audio;
+
+	params = &g_audio->params;
+	p_chmask = params->p_chmask;
+	c_chmask = params->c_chmask;
+
+	if (c_chmask) {
+		struct uac_rtd_params *prm = &uac->c_prm;
+
+		uac->c_prm.uac = uac;
+		prm->max_psize = g_audio->out_ep_maxpsize;
+
+		prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
+				GFP_KERNEL);
+		if (!prm->ureq) {
+			err = -ENOMEM;
+			goto fail;
+		}
+
+		prm->rbuf = kcalloc(params->req_number, prm->max_psize,
+				GFP_KERNEL);
+		if (!prm->rbuf) {
+			prm->max_psize = 0;
+			err = -ENOMEM;
+			goto fail;
+		}
+	}
+
+	if (p_chmask) {
+		struct uac_rtd_params *prm = &uac->p_prm;
+
+		uac->p_prm.uac = uac;
+		prm->max_psize = g_audio->in_ep_maxpsize;
+
+		prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
+				GFP_KERNEL);
+		if (!prm->ureq) {
+			err = -ENOMEM;
+			goto fail;
+		}
+
+		prm->rbuf = kcalloc(params->req_number, prm->max_psize,
+				GFP_KERNEL);
+		if (!prm->rbuf) {
+			prm->max_psize = 0;
+			err = -ENOMEM;
+			goto fail;
+		}
+	}
+
+	/* Choose any slot, with no id */
+	err = snd_card_new(&g_audio->gadget->dev,
+			-1, NULL, THIS_MODULE, 0, &card);
+	if (err < 0)
+		goto fail;
+
+	uac->card = card;
+
+	/*
+	 * Create first PCM device
+	 * Create a substream only for non-zero channel streams
+	 */
+	err = snd_pcm_new(uac->card, pcm_name, 0,
+			       p_chmask ? 1 : 0, c_chmask ? 1 : 0, &pcm);
+	if (err < 0)
+		goto snd_fail;
+
+	strcpy(pcm->name, pcm_name);
+	pcm->private_data = uac;
+	uac->pcm = pcm;
+
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
+
+	strcpy(card->driver, card_name);
+	strcpy(card->shortname, card_name);
+	sprintf(card->longname, "%s %i", card_name, card->dev->id);
+
+	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+		snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
+
+	err = snd_card_register(card);
+
+	if (!err)
+		return 0;
+
+snd_fail:
+	snd_card_free(card);
+fail:
+	kfree(uac->p_prm.ureq);
+	kfree(uac->c_prm.ureq);
+	kfree(uac->p_prm.rbuf);
+	kfree(uac->c_prm.rbuf);
+	kfree(uac);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(g_audio_setup);
+
+void g_audio_cleanup(struct g_audio *g_audio)
+{
+	struct snd_uac_chip *uac;
+	struct snd_card *card;
+
+	if (!g_audio || !g_audio->uac)
+		return;
+
+	uac = g_audio->uac;
+	card = uac->card;
+	if (card)
+		snd_card_free(card);
+
+	kfree(uac->p_prm.ureq);
+	kfree(uac->c_prm.ureq);
+	kfree(uac->p_prm.rbuf);
+	kfree(uac->c_prm.rbuf);
+	kfree(uac);
+}
+EXPORT_SYMBOL_GPL(g_audio_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB gadget \"ALSA sound card\" utilities");
+MODULE_AUTHOR("Ruslan Bilovol");
diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
new file mode 100644
index 0000000..07e1378
--- /dev/null
+++ b/drivers/usb/gadget/function/u_audio.h
@@ -0,0 +1,95 @@
+/*
+ * u_audio.h -- interface to USB gadget "ALSA sound card" utilities
+ *
+ * Copyright (C) 2016
+ * Author: Ruslan Bilovol <ruslan.bilovol@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __U_AUDIO_H
+#define __U_AUDIO_H
+
+#include <linux/usb/composite.h>
+
+struct uac_params {
+	/* playback */
+	int p_chmask;	/* channel mask */
+	int p_srate;	/* rate in Hz */
+	int p_ssize;	/* sample size */
+
+	/* capture */
+	int c_chmask;	/* channel mask */
+	int c_srate;	/* rate in Hz */
+	int c_ssize;	/* sample size */
+
+	int req_number; /* number of preallocated requests */
+};
+
+struct g_audio {
+	struct usb_function func;
+	struct usb_gadget *gadget;
+
+	struct usb_ep *in_ep;
+	struct usb_ep *out_ep;
+
+	/* Max packet size for all in_ep possible speeds */
+	unsigned int in_ep_maxpsize;
+	/* Max packet size for all out_ep possible speeds */
+	unsigned int out_ep_maxpsize;
+
+	/* The ALSA Sound Card it represents on the USB-Client side */
+	struct snd_uac_chip *uac;
+
+	struct uac_params params;
+};
+
+static inline struct g_audio *func_to_g_audio(struct usb_function *f)
+{
+	return container_of(f, struct g_audio, func);
+}
+
+static inline uint num_channels(uint chanmask)
+{
+	uint num = 0;
+
+	while (chanmask) {
+		num += (chanmask & 1);
+		chanmask >>= 1;
+	}
+
+	return num;
+}
+
+/*
+ * g_audio_setup - initialize one virtual ALSA sound card
+ * @g_audio: struct with filled params, in_ep_maxpsize, out_ep_maxpsize
+ * @pcm_name: the id string for a PCM instance of this sound card
+ * @card_name: name of this soundcard
+ *
+ * This sets up the single virtual ALSA sound card that may be exported by a
+ * gadget driver using this framework.
+ *
+ * Context: may sleep
+ *
+ * Returns zero on success, or a negative error on failure.
+ */
+int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
+					const char *card_name);
+void g_audio_cleanup(struct g_audio *g_audio);
+
+int u_audio_start_capture(struct g_audio *g_audio);
+void u_audio_stop_capture(struct g_audio *g_audio);
+int u_audio_start_playback(struct g_audio *g_audio);
+void u_audio_stop_playback(struct g_audio *g_audio);
+
+#endif /* __U_AUDIO_H */
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 4378cc2..540f1c4 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -216,6 +216,9 @@ struct ffs_data {
 #define FFS_FL_CALL_CLOSED_CALLBACK 0
 #define FFS_FL_BOUND                1
 
+	/* For waking up blocked threads when function is enabled. */
+	wait_queue_head_t		wait;
+
 	/* Active function */
 	struct ffs_function		*func;
 
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index 5c2ac8e..6f188fd 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -1,82 +1,41 @@
 /*
- * u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
+ * u_uac1.h - Utility definitions for UAC1 function
  *
- * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
- * Copyright (C) 2008 Analog Devices, Inc
+ * Copyright (C) 2016 Ruslan Bilovol <ruslan.bilovol@gmail.com>
  *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
  */
 
-#ifndef __U_AUDIO_H
-#define __U_AUDIO_H
+#ifndef __U_UAC1_H
+#define __U_UAC1_H
 
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/usb/audio.h>
 #include <linux/usb/composite.h>
 
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
-#define FILE_PCM_PLAYBACK	"/dev/snd/pcmC0D0p"
-#define FILE_PCM_CAPTURE	"/dev/snd/pcmC0D0c"
-#define FILE_CONTROL		"/dev/snd/controlC0"
-
 #define UAC1_OUT_EP_MAX_PACKET_SIZE	200
-#define UAC1_REQ_COUNT			256
-#define UAC1_AUDIO_BUF_SIZE		48000
+#define UAC1_DEF_CCHMASK	0x3
+#define UAC1_DEF_CSRATE		48000
+#define UAC1_DEF_CSSIZE		2
+#define UAC1_DEF_PCHMASK	0x3
+#define UAC1_DEF_PSRATE		48000
+#define UAC1_DEF_PSSIZE		2
+#define UAC1_DEF_REQ_NUM	2
 
-/*
- * This represents the USB side of an audio card device, managed by a USB
- * function which provides control and stream interfaces.
- */
-
-struct gaudio_snd_dev {
-	struct gaudio			*card;
-	struct file			*filp;
-	struct snd_pcm_substream	*substream;
-	int				access;
-	int				format;
-	int				channels;
-	int				rate;
-};
-
-struct gaudio {
-	struct usb_function		func;
-	struct usb_gadget		*gadget;
-
-	/* ALSA sound device interfaces */
-	struct gaudio_snd_dev		control;
-	struct gaudio_snd_dev		playback;
-	struct gaudio_snd_dev		capture;
-
-	/* TODO */
-};
 
 struct f_uac1_opts {
 	struct usb_function_instance	func_inst;
-	int				req_buf_size;
-	int				req_count;
-	int				audio_buf_size;
-	char				*fn_play;
-	char				*fn_cap;
-	char				*fn_cntl;
+	int				c_chmask;
+	int				c_srate;
+	int				c_ssize;
+	int				p_chmask;
+	int				p_srate;
+	int				p_ssize;
+	int				req_number;
 	unsigned			bound:1;
-	unsigned			fn_play_alloc:1;
-	unsigned			fn_cap_alloc:1;
-	unsigned			fn_cntl_alloc:1;
+
 	struct mutex			lock;
 	int				refcnt;
 };
 
-int gaudio_setup(struct gaudio *card);
-void gaudio_cleanup(struct gaudio *the_card);
-
-size_t u_audio_playback(struct gaudio *card, void *buf, size_t count);
-int u_audio_get_playback_channels(struct gaudio *card);
-int u_audio_get_playback_rate(struct gaudio *card);
-
-#endif /* __U_AUDIO_H */
+#endif /* __U_UAC1_H */
diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1_legacy.c
similarity index 97%
rename from drivers/usb/gadget/function/u_uac1.c
rename to drivers/usb/gadget/function/u_uac1_legacy.c
index c78c841..8aa76b4 100644
--- a/drivers/usb/gadget/function/u_uac1.c
+++ b/drivers/usb/gadget/function/u_uac1_legacy.c
@@ -18,7 +18,7 @@
 #include <linux/random.h>
 #include <linux/syscalls.h>
 
-#include "u_uac1.h"
+#include "u_uac1_legacy.h"
 
 /*
  * This component encapsulates the ALSA devices for USB audio gadget
@@ -205,10 +205,11 @@ static int gaudio_open_snd_dev(struct gaudio *card)
 {
 	struct snd_pcm_file *pcm_file;
 	struct gaudio_snd_dev *snd;
-	struct f_uac1_opts *opts;
+	struct f_uac1_legacy_opts *opts;
 	char *fn_play, *fn_cap, *fn_cntl;
 
-	opts = container_of(card->func.fi, struct f_uac1_opts, func_inst);
+	opts = container_of(card->func.fi, struct f_uac1_legacy_opts,
+			    func_inst);
 	fn_play = opts->fn_play;
 	fn_cap = opts->fn_cap;
 	fn_cntl = opts->fn_cntl;
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.h b/drivers/usb/gadget/function/u_uac1_legacy.h
new file mode 100644
index 0000000..d715b1a
--- /dev/null
+++ b/drivers/usb/gadget/function/u_uac1_legacy.h
@@ -0,0 +1,82 @@
+/*
+ * u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __U_UAC1_LEGACY_H
+#define __U_UAC1_LEGACY_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/composite.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#define FILE_PCM_PLAYBACK	"/dev/snd/pcmC0D0p"
+#define FILE_PCM_CAPTURE	"/dev/snd/pcmC0D0c"
+#define FILE_CONTROL		"/dev/snd/controlC0"
+
+#define UAC1_OUT_EP_MAX_PACKET_SIZE	200
+#define UAC1_REQ_COUNT			256
+#define UAC1_AUDIO_BUF_SIZE		48000
+
+/*
+ * This represents the USB side of an audio card device, managed by a USB
+ * function which provides control and stream interfaces.
+ */
+
+struct gaudio_snd_dev {
+	struct gaudio			*card;
+	struct file			*filp;
+	struct snd_pcm_substream	*substream;
+	int				access;
+	int				format;
+	int				channels;
+	int				rate;
+};
+
+struct gaudio {
+	struct usb_function		func;
+	struct usb_gadget		*gadget;
+
+	/* ALSA sound device interfaces */
+	struct gaudio_snd_dev		control;
+	struct gaudio_snd_dev		playback;
+	struct gaudio_snd_dev		capture;
+
+	/* TODO */
+};
+
+struct f_uac1_legacy_opts {
+	struct usb_function_instance	func_inst;
+	int				req_buf_size;
+	int				req_count;
+	int				audio_buf_size;
+	char				*fn_play;
+	char				*fn_cap;
+	char				*fn_cntl;
+	unsigned			bound:1;
+	unsigned			fn_play_alloc:1;
+	unsigned			fn_cap_alloc:1;
+	unsigned			fn_cntl_alloc:1;
+	struct mutex			lock;
+	int				refcnt;
+};
+
+int gaudio_setup(struct gaudio *card);
+void gaudio_cleanup(struct gaudio *the_card);
+
+size_t u_audio_playback(struct gaudio *card, void *buf, size_t count);
+int u_audio_get_playback_channels(struct gaudio *card);
+int u_audio_get_playback_rate(struct gaudio *card);
+
+#endif /* __U_UAC1_LEGACY_H */
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 0b36878..a12fb45 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -54,8 +54,10 @@
 	depends on SND
 	select USB_LIBCOMPOSITE
 	select SND_PCM
-	select USB_F_UAC1 if GADGET_UAC1
+	select USB_F_UAC1 if (GADGET_UAC1 && !GADGET_UAC1_LEGACY)
+	select USB_F_UAC1_LEGACY if (GADGET_UAC1 && GADGET_UAC1_LEGACY)
 	select USB_F_UAC2 if !GADGET_UAC1
+	select USB_U_AUDIO if (USB_F_UAC2 || USB_F_UAC1)
 	help
 	  This Gadget Audio driver is compatible with USB Audio Class
 	  specification 2.0. It implements 1 AudioControl interface,
@@ -73,10 +75,17 @@
 	  dynamically linked module called "g_audio".
 
 config GADGET_UAC1
-	bool "UAC 1.0 (Legacy)"
+	bool "UAC 1.0"
 	depends on USB_AUDIO
 	help
-	  If you instead want older UAC Spec-1.0 driver that also has audio
+	  If you instead want older USB Audio Class specification 1.0 support
+	  with similar driver capabilities.
+
+config GADGET_UAC1_LEGACY
+	bool "UAC 1.0 (Legacy)"
+	depends on GADGET_UAC1
+	help
+	  If you instead want legacy UAC Spec-1.0 driver that also has audio
 	  paths hardwired to the Audio codec chip on-board and doesn't work
 	  without one.
 
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index 8a39f42..1f5cdbe 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -53,8 +53,41 @@ static int c_ssize = UAC2_DEF_CSSIZE;
 module_param(c_ssize, uint, S_IRUGO);
 MODULE_PARM_DESC(c_ssize, "Capture Sample Size(bytes)");
 #else
+#ifndef CONFIG_GADGET_UAC1_LEGACY
 #include "u_uac1.h"
 
+/* Playback(USB-IN) Default Stereo - Fl/Fr */
+static int p_chmask = UAC1_DEF_PCHMASK;
+module_param(p_chmask, uint, S_IRUGO);
+MODULE_PARM_DESC(p_chmask, "Playback Channel Mask");
+
+/* Playback Default 48 KHz */
+static int p_srate = UAC1_DEF_PSRATE;
+module_param(p_srate, uint, S_IRUGO);
+MODULE_PARM_DESC(p_srate, "Playback Sampling Rate");
+
+/* Playback Default 16bits/sample */
+static int p_ssize = UAC1_DEF_PSSIZE;
+module_param(p_ssize, uint, S_IRUGO);
+MODULE_PARM_DESC(p_ssize, "Playback Sample Size(bytes)");
+
+/* Capture(USB-OUT) Default Stereo - Fl/Fr */
+static int c_chmask = UAC1_DEF_CCHMASK;
+module_param(c_chmask, uint, S_IRUGO);
+MODULE_PARM_DESC(c_chmask, "Capture Channel Mask");
+
+/* Capture Default 48 KHz */
+static int c_srate = UAC1_DEF_CSRATE;
+module_param(c_srate, uint, S_IRUGO);
+MODULE_PARM_DESC(c_srate, "Capture Sampling Rate");
+
+/* Capture Default 16bits/sample */
+static int c_ssize = UAC1_DEF_CSSIZE;
+module_param(c_ssize, uint, S_IRUGO);
+MODULE_PARM_DESC(c_ssize, "Capture Sample Size(bytes)");
+#else /* CONFIG_GADGET_UAC1_LEGACY */
+#include "u_uac1_legacy.h"
+
 static char *fn_play = FILE_PCM_PLAYBACK;
 module_param(fn_play, charp, S_IRUGO);
 MODULE_PARM_DESC(fn_play, "Playback PCM device file name");
@@ -78,6 +111,7 @@ MODULE_PARM_DESC(req_count, "ISO OUT endpoint request count");
 static int audio_buf_size = UAC1_AUDIO_BUF_SIZE;
 module_param(audio_buf_size, int, S_IRUGO);
 MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
+#endif /* CONFIG_GADGET_UAC1_LEGACY */
 #endif
 
 /* string IDs are assigned dynamically */
@@ -125,7 +159,7 @@ static struct usb_device_descriptor device_desc = {
 
 	/* .bcdUSB = DYNAMIC */
 
-#ifdef CONFIG_GADGET_UAC1
+#ifdef CONFIG_GADGET_UAC1_LEGACY
 	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
 	.bDeviceSubClass =	0,
 	.bDeviceProtocol =	0,
@@ -207,7 +241,11 @@ static int audio_bind(struct usb_composite_dev *cdev)
 #ifndef CONFIG_GADGET_UAC1
 	struct f_uac2_opts	*uac2_opts;
 #else
+#ifndef CONFIG_GADGET_UAC1_LEGACY
 	struct f_uac1_opts	*uac1_opts;
+#else
+	struct f_uac1_legacy_opts	*uac1_opts;
+#endif
 #endif
 	int			status;
 
@@ -216,7 +254,11 @@ static int audio_bind(struct usb_composite_dev *cdev)
 	if (IS_ERR(fi_uac2))
 		return PTR_ERR(fi_uac2);
 #else
+#ifndef CONFIG_GADGET_UAC1_LEGACY
 	fi_uac1 = usb_get_function_instance("uac1");
+#else
+	fi_uac1 = usb_get_function_instance("uac1_legacy");
+#endif
 	if (IS_ERR(fi_uac1))
 		return PTR_ERR(fi_uac1);
 #endif
@@ -231,13 +273,24 @@ static int audio_bind(struct usb_composite_dev *cdev)
 	uac2_opts->c_ssize = c_ssize;
 	uac2_opts->req_number = UAC2_DEF_REQ_NUM;
 #else
+#ifndef CONFIG_GADGET_UAC1_LEGACY
 	uac1_opts = container_of(fi_uac1, struct f_uac1_opts, func_inst);
+	uac1_opts->p_chmask = p_chmask;
+	uac1_opts->p_srate = p_srate;
+	uac1_opts->p_ssize = p_ssize;
+	uac1_opts->c_chmask = c_chmask;
+	uac1_opts->c_srate = c_srate;
+	uac1_opts->c_ssize = c_ssize;
+	uac1_opts->req_number = UAC1_DEF_REQ_NUM;
+#else /* CONFIG_GADGET_UAC1_LEGACY */
+	uac1_opts = container_of(fi_uac1, struct f_uac1_legacy_opts, func_inst);
 	uac1_opts->fn_play = fn_play;
 	uac1_opts->fn_cap = fn_cap;
 	uac1_opts->fn_cntl = fn_cntl;
 	uac1_opts->req_buf_size = req_buf_size;
 	uac1_opts->req_count = req_count;
 	uac1_opts->audio_buf_size = audio_buf_size;
+#endif /* CONFIG_GADGET_UAC1_LEGACY */
 #endif
 
 	status = usb_string_ids_tab(cdev, strings_dev);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index b9ca0a2..684900f 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd)
 
 	/* closing ep0 === shutdown all */
 
-	if (dev->gadget_registered)
+	if (dev->gadget_registered) {
 		usb_gadget_unregister_driver (&gadgetfs_driver);
+		dev->gadget_registered = false;
+	}
 
 	/* at this point "good" hardware has disconnected the
 	 * device from USB; the host won't see it any more.
@@ -1677,9 +1679,10 @@ static void
 gadgetfs_suspend (struct usb_gadget *gadget)
 {
 	struct dev_data		*dev = get_gadget_data (gadget);
+	unsigned long		flags;
 
 	INFO (dev, "suspended from state %d\n", dev->state);
-	spin_lock (&dev->lock);
+	spin_lock_irqsave(&dev->lock, flags);
 	switch (dev->state) {
 	case STATE_DEV_SETUP:		// VERY odd... host died??
 	case STATE_DEV_CONNECTED:
@@ -1690,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget)
 	default:
 		break;
 	}
-	spin_unlock (&dev->lock);
+	spin_unlock_irqrestore(&dev->lock, flags);
 }
 
 static struct usb_gadget_driver gadgetfs_driver = {
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index 125974f..e99ab57 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -210,7 +210,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
 	usb_composite_overwrite_options(cdev, &coverwrite);
 	dev_info(&cdev->gadget->dev,
 		 DRIVER_DESC ", version: " DRIVER_VERSION "\n");
-	set_bit(0, &msg_registered);
 	return 0;
 
 fail_otg_desc:
@@ -257,7 +256,12 @@ MODULE_LICENSE("GPL");
 
 static int __init msg_init(void)
 {
-	return usb_composite_probe(&msg_driver);
+	int ret;
+
+	ret = usb_composite_probe(&msg_driver);
+	set_bit(0, &msg_registered);
+
+	return ret;
 }
 module_init(msg_init);
 
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1c14c28..9ffb11e 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -55,7 +55,7 @@
 
 config USB_ATMEL_USBA
 	tristate "Atmel USBA"
-	depends on ((AVR32 && !OF) || ARCH_AT91)
+	depends on ARCH_AT91
 	help
 	  USBA is the integrated high-speed USB Device controller on
 	  the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel.
@@ -256,7 +256,7 @@
 	  controller, which support super speed USB peripheral.
 
 config USB_SNP_CORE
-	depends on USB_AMD5536UDC
+	depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT)
 	tristate
 	help
 	  This enables core driver support for Synopsys USB 2.0 Device
@@ -269,6 +269,20 @@
 	  This IP is different to the High Speed OTG IP that can be enabled
 	  by selecting USB_DWC2 or USB_DWC3 options.
 
+config USB_SNP_UDC_PLAT
+	tristate "Synopsys USB 2.0 Device controller"
+	depends on (USB_GADGET && OF)
+	select USB_GADGET_DUALSPEED
+	select USB_SNP_CORE
+	default ARCH_BCM_IPROC
+	help
+	  This adds Platform Device support for Synopsys Designware core
+	  AHB subsystem USB2.0 Device Controller (UDC).
+
+	  This driver works with UDCs integrated into Broadcom's Northstar2
+	  and Cygnus SoCs.
+
+	  If unsure, say N.
 #
 # Controllers available in both integrated and discrete versions
 #
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index 626e1f1..ea9e1c7 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -10,7 +10,7 @@
 obj-$(CONFIG_USB_DUMMY_HCD)	+= dummy_hcd.o
 obj-$(CONFIG_USB_NET2272)	+= net2272.o
 obj-$(CONFIG_USB_NET2280)	+= net2280.o
-obj-$(CONFIG_USB_SNP_CORE)	+= amd5536udc.o
+obj-$(CONFIG_USB_SNP_CORE)	+= snps_udc_core.o
 obj-$(CONFIG_USB_AMD5536UDC)	+= amd5536udc_pci.o
 obj-$(CONFIG_USB_PXA25X)	+= pxa25x_udc.o
 obj-$(CONFIG_USB_PXA27X)	+= pxa27x_udc.o
@@ -37,4 +37,5 @@
 obj-$(CONFIG_USB_MV_U3D)	+= mv_u3d_core.o
 obj-$(CONFIG_USB_GR_UDC)	+= gr_udc.o
 obj-$(CONFIG_USB_GADGET_XILINX)	+= udc-xilinx.o
+obj-$(CONFIG_USB_SNP_UDC_PLAT) += snps_udc_plat.o
 obj-$(CONFIG_USB_BDC_UDC)	+= bdc/
diff --git a/drivers/usb/gadget/udc/amd5536udc.h b/drivers/usb/gadget/udc/amd5536udc.h
index fae49bf..4fe22d4 100644
--- a/drivers/usb/gadget/udc/amd5536udc.h
+++ b/drivers/usb/gadget/udc/amd5536udc.h
@@ -16,6 +16,7 @@
 /* debug control */
 /* #define UDC_VERBOSE */
 
+#include <linux/extcon.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
 
@@ -28,6 +29,9 @@
 #define UDC_HSA0_REV 1
 #define UDC_HSB1_REV 2
 
+/* Broadcom chip rev. */
+#define UDC_BCM_REV 10
+
 /*
  * SETUP usb commands
  * needed, because some SETUP's are handled in hw, but must be passed to
@@ -112,6 +116,7 @@
 #define UDC_DEVCTL_BRLEN_MASK			0x00ff0000
 #define UDC_DEVCTL_BRLEN_OFS			16
 
+#define UDC_DEVCTL_SRX_FLUSH			14
 #define UDC_DEVCTL_CSR_DONE			13
 #define UDC_DEVCTL_DEVNAK			12
 #define UDC_DEVCTL_SD				10
@@ -563,6 +568,16 @@ struct udc {
 	u16				cur_config;
 	u16				cur_intf;
 	u16				cur_alt;
+
+	/* for platform device and extcon support */
+	struct device			*dev;
+	struct phy			*udc_phy;
+	struct extcon_dev		*edev;
+	struct extcon_specific_cable_nb	extcon_nb;
+	struct notifier_block		nb;
+	struct delayed_work		drd_work;
+	struct workqueue_struct		*drd_wq;
+	u32				conn_type;
 };
 
 #define to_amd5536_udc(g)	(container_of((g), struct udc, gadget))
@@ -578,6 +593,7 @@ int udc_enable_dev_setup_interrupts(struct udc *dev);
 int udc_mask_unused_interrupts(struct udc *dev);
 irqreturn_t udc_irq(int irq, void *pdev);
 void gadget_release(struct device *pdev);
+void empty_req_queue(struct udc_ep *ep);
 void udc_basic_init(struct udc *dev);
 void free_dma_pools(struct udc *dev);
 int init_dma_pools(struct udc *dev);
@@ -639,7 +655,7 @@ MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
 
 /* debug macros ------------------------------------------------------------*/
 
-#define DBG(udc , args...)	dev_dbg(&(udc)->pdev->dev, args)
+#define DBG(udc , args...)	dev_dbg(udc->dev, args)
 
 #ifdef UDC_VERBOSE
 #define VDBG			DBG
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 2a2d0a9..57a13f0 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -168,6 +168,7 @@ static int udc_pci_probe(
 	dev->phys_addr = resource;
 	dev->irq = pdev->irq;
 	dev->pdev = pdev;
+	dev->dev = &pdev->dev;
 
 	/* general probing */
 	if (udc_probe(dev)) {
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 3ccc341..98d7140 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -152,7 +152,7 @@ static int regs_dbg_open(struct inode *inode, struct file *file)
 
 	spin_lock_irq(&udc->lock);
 	for (i = 0; i < inode->i_size / 4; i++)
-		data[i] = usba_io_readl(udc->regs + i * 4);
+		data[i] = readl_relaxed(udc->regs + i * 4);
 	spin_unlock_irq(&udc->lock);
 
 	file->private_data = data;
@@ -1369,7 +1369,7 @@ static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
 		if (crq->wLength != cpu_to_le16(sizeof(status)))
 			goto stall;
 		ep->state = DATA_STAGE_IN;
-		usba_io_writew(status, ep->fifo);
+		writew_relaxed(status, ep->fifo);
 		usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
 		break;
 	}
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 9551b70..f8ebe03 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -43,13 +43,8 @@
 #define USBA_REMOTE_WAKE_UP			(1 << 10)
 #define USBA_PULLD_DIS				(1 << 11)
 
-#if defined(CONFIG_AVR32)
-#define USBA_ENABLE_MASK			USBA_EN_USBA
-#define USBA_DISABLE_MASK			0
-#elif defined(CONFIG_ARCH_AT91)
 #define USBA_ENABLE_MASK			(USBA_EN_USBA | USBA_PULLD_DIS)
 #define USBA_DISABLE_MASK			USBA_DETACH
-#endif /* CONFIG_ARCH_AT91 */
 
 /* Bitfields in FNUM */
 #define USBA_MICRO_FRAME_NUM_OFFSET		0
@@ -191,28 +186,18 @@
 	 | USBA_BF(name, value))
 
 /* Register access macros */
-#ifdef CONFIG_AVR32
-#define usba_io_readl	__raw_readl
-#define usba_io_writel	__raw_writel
-#define usba_io_writew	__raw_writew
-#else
-#define usba_io_readl	readl_relaxed
-#define usba_io_writel	writel_relaxed
-#define usba_io_writew	writew_relaxed
-#endif
-
 #define usba_readl(udc, reg)					\
-	usba_io_readl((udc)->regs + USBA_##reg)
+	readl_relaxed((udc)->regs + USBA_##reg)
 #define usba_writel(udc, reg, value)				\
-	usba_io_writel((value), (udc)->regs + USBA_##reg)
+	writel_relaxed((value), (udc)->regs + USBA_##reg)
 #define usba_ep_readl(ep, reg)					\
-	usba_io_readl((ep)->ep_regs + USBA_EPT_##reg)
+	readl_relaxed((ep)->ep_regs + USBA_EPT_##reg)
 #define usba_ep_writel(ep, reg, value)				\
-	usba_io_writel((value), (ep)->ep_regs + USBA_EPT_##reg)
+	writel_relaxed((value), (ep)->ep_regs + USBA_EPT_##reg)
 #define usba_dma_readl(ep, reg)					\
-	usba_io_readl((ep)->dma_regs + USBA_DMA_##reg)
+	readl_relaxed((ep)->dma_regs + USBA_DMA_##reg)
 #define usba_dma_writel(ep, reg, value)				\
-	usba_io_writel((value), (ep)->dma_regs + USBA_DMA_##reg)
+	writel_relaxed((value), (ep)->dma_regs + USBA_DMA_##reg)
 
 /* Calculate base address for a given endpoint or DMA controller */
 #define USBA_EPT_BASE(x)	(0x100 + (x) * 0x20)
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index ccb9c21..e9bd8d4 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -475,7 +475,7 @@ static int bdc_probe(struct platform_device *pdev)
 	bdc->dev = dev;
 	dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
 
-	temp = bdc_readl(bdc->regs, BDC_BDCSC);
+	temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
 	if ((temp & BDC_P64) &&
 			!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
 		dev_dbg(bdc->dev, "Using 64-bit address\n");
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index efce68e..e6f04ee 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -23,6 +23,7 @@
 #include <linux/list.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
+#include <linux/sched/task_stack.h>
 #include <linux/workqueue.h>
 
 #include <linux/usb/ch9.h>
@@ -139,10 +140,8 @@ int usb_ep_disable(struct usb_ep *ep)
 		goto out;
 
 	ret = ep->ops->disable(ep);
-	if (ret) {
-		ret = ret;
+	if (ret)
 		goto out;
-	}
 
 	ep->enabled = false;
 
@@ -798,6 +797,14 @@ int usb_gadget_map_request_by_dev(struct device *dev,
 
 		req->num_mapped_sgs = mapped;
 	} else {
+		if (is_vmalloc_addr(req->buf)) {
+			dev_err(dev, "buffer is not dma capable\n");
+			return -EFAULT;
+		} else if (object_is_on_stack(req->buf)) {
+			dev_err(dev, "buffer is on stack\n");
+			return -EFAULT;
+		}
+
 		req->dma = dma_map_single(dev, req->buf, req->length,
 				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
@@ -1058,6 +1065,23 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
 }
 
 /**
+ * usb_gadget_udc_set_speed - tells usb device controller speed supported by
+ *    current driver
+ * @udc: The device we want to set maximum speed
+ * @speed: The maximum speed to allowed to run
+ *
+ * This call is issued by the UDC Class driver before calling
+ * usb_gadget_udc_start() in order to make sure that we don't try to
+ * connect on speeds the gadget driver doesn't support.
+ */
+static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
+					    enum usb_device_speed speed)
+{
+	if (udc->gadget->ops->udc_set_speed)
+		udc->gadget->ops->udc_set_speed(udc->gadget, speed);
+}
+
+/**
  * usb_udc_release - release the usb_udc struct
  * @dev: the dev member within usb_udc
  *
@@ -1290,6 +1314,9 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
 	udc->dev.driver = &driver->driver;
 	udc->gadget->dev.driver = &driver->driver;
 
+	if (driver->max_speed < udc->gadget->max_speed)
+		usb_gadget_udc_set_speed(udc, driver->max_speed);
+
 	ret = driver->bind(udc->gadget, driver);
 	if (ret)
 		goto err1;
@@ -1442,6 +1469,18 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RO(state);
 
+static ssize_t function_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct usb_udc		*udc = container_of(dev, struct usb_udc, dev);
+	struct usb_gadget_driver *drv = udc->driver;
+
+	if (!drv || !drv->function)
+		return 0;
+	return scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+}
+static DEVICE_ATTR_RO(function);
+
 #define USB_UDC_SPEED_ATTR(name, param)					\
 ssize_t name##_show(struct device *dev,					\
 		struct device_attribute *attr, char *buf)		\
@@ -1477,6 +1516,7 @@ static struct attribute *usb_udc_attrs[] = {
 	&dev_attr_srp.attr,
 	&dev_attr_soft_connect.attr,
 	&dev_attr_state.attr,
+	&dev_attr_function.attr,
 	&dev_attr_current_speed.attr,
 	&dev_attr_maximum_speed.attr,
 
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ccabb51..3c37603 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
 		/* Report reset and disconnect events to the driver */
 		if (dum->driver && (disconnect || reset)) {
 			stop_activity(dum);
-			spin_unlock(&dum->lock);
 			if (reset)
 				usb_gadget_udc_reset(&dum->gadget, dum->driver);
 			else
 				dum->driver->disconnect(&dum->gadget);
-			spin_lock(&dum->lock);
 		}
 	} else if (dum_hcd->active != dum_hcd->old_active) {
-		if (dum_hcd->old_active && dum->driver->suspend) {
-			spin_unlock(&dum->lock);
+		if (dum_hcd->old_active && dum->driver->suspend)
 			dum->driver->suspend(&dum->gadget);
-			spin_lock(&dum->lock);
-		} else if (!dum_hcd->old_active &&  dum->driver->resume) {
-			spin_unlock(&dum->lock);
+		else if (!dum_hcd->old_active &&  dum->driver->resume)
 			dum->driver->resume(&dum->gadget);
-			spin_lock(&dum->lock);
-		}
 	}
 
 	dum_hcd->old_status = dum_hcd->port_status;
@@ -888,22 +881,6 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
 	unsigned long	flags;
 
 	dum = gadget_dev_to_dummy(&_gadget->dev);
-
-	if (value && dum->driver) {
-		if (mod_data.is_super_speed)
-			dum->gadget.speed = dum->driver->max_speed;
-		else if (mod_data.is_high_speed)
-			dum->gadget.speed = min_t(u8, USB_SPEED_HIGH,
-					dum->driver->max_speed);
-		else
-			dum->gadget.speed = USB_SPEED_FULL;
-		dummy_udc_update_ep0(dum);
-
-		if (dum->gadget.speed < dum->driver->max_speed)
-			dev_dbg(udc_dev(dum), "This device can perform faster"
-				" if you connect it to a %s port...\n",
-				usb_speed_string(dum->driver->max_speed));
-	}
 	dum_hcd = gadget_to_dummy_hcd(_gadget);
 
 	spin_lock_irqsave(&dum->lock, flags);
@@ -915,6 +892,28 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
 	return 0;
 }
 
+static void dummy_udc_set_speed(struct usb_gadget *_gadget,
+		enum usb_device_speed speed)
+{
+	struct dummy	*dum;
+
+	dum = gadget_dev_to_dummy(&_gadget->dev);
+
+	 if (mod_data.is_super_speed)
+		 dum->gadget.speed = min_t(u8, USB_SPEED_SUPER, speed);
+	 else if (mod_data.is_high_speed)
+		 dum->gadget.speed = min_t(u8, USB_SPEED_HIGH, speed);
+	 else
+		 dum->gadget.speed = USB_SPEED_FULL;
+
+	dummy_udc_update_ep0(dum);
+
+	if (dum->gadget.speed < speed)
+		dev_dbg(udc_dev(dum), "This device can perform faster"
+			" if you connect it to a %s port...\n",
+			usb_speed_string(speed));
+}
+
 static int dummy_udc_start(struct usb_gadget *g,
 		struct usb_gadget_driver *driver);
 static int dummy_udc_stop(struct usb_gadget *g);
@@ -926,6 +925,7 @@ static const struct usb_gadget_ops dummy_ops = {
 	.pullup		= dummy_pullup,
 	.udc_start	= dummy_udc_start,
 	.udc_stop	= dummy_udc_stop,
+	.udc_set_speed	= dummy_udc_set_speed,
 };
 
 /*-------------------------------------------------------------------------*/
@@ -983,7 +983,9 @@ static int dummy_udc_stop(struct usb_gadget *g)
 	struct dummy_hcd	*dum_hcd = gadget_to_dummy_hcd(g);
 	struct dummy		*dum = dum_hcd->dum;
 
+	spin_lock_irq(&dum->lock);
 	dum->driver = NULL;
+	spin_unlock_irq(&dum->lock);
 
 	return 0;
 }
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 76f56c5..8a708d0 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -960,9 +960,9 @@ static const struct usb_ep_ops mv_ep_ops = {
 	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
 };
 
-static void udc_clock_enable(struct mv_udc *udc)
+static int udc_clock_enable(struct mv_udc *udc)
 {
-	clk_prepare_enable(udc->clk);
+	return clk_prepare_enable(udc->clk);
 }
 
 static void udc_clock_disable(struct mv_udc *udc)
@@ -1070,7 +1070,10 @@ static int mv_udc_enable_internal(struct mv_udc *udc)
 		return 0;
 
 	dev_dbg(&udc->dev->dev, "enable udc\n");
-	udc_clock_enable(udc);
+	retval = udc_clock_enable(udc);
+	if (retval)
+		return retval;
+
 	if (udc->pdata->phy_init) {
 		retval = udc->pdata->phy_init(udc->phy_regs);
 		if (retval) {
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 6cf0785..f608c1f 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2470,11 +2470,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
 		nuke(&dev->ep[i]);
 
 	/* report disconnect; the driver is already quiesced */
-	if (driver) {
-		spin_unlock(&dev->lock);
+	if (driver)
 		driver->disconnect(&dev->gadget);
-		spin_lock(&dev->lock);
-	}
 
 	usb_reinit(dev);
 }
@@ -3348,8 +3345,6 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
 		BIT(PCI_RETRY_ABORT_INTERRUPT))
 
 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
-__releases(dev->lock)
-__acquires(dev->lock)
 {
 	struct net2280_ep	*ep;
 	u32			tmp, num, mask, scratch;
@@ -3390,14 +3385,12 @@ __acquires(dev->lock)
 			if (disconnect || reset) {
 				stop_activity(dev, dev->driver);
 				ep0_start(dev);
-				spin_unlock(&dev->lock);
 				if (reset)
 					usb_gadget_udc_reset
 						(&dev->gadget, dev->driver);
 				else
 					(dev->driver->disconnect)
 						(&dev->gadget);
-				spin_lock(&dev->lock);
 				return;
 			}
 		}
@@ -3573,7 +3566,6 @@ static void net2280_remove(struct pci_dev *pdev)
 	BUG_ON(dev->driver);
 
 	/* then clean up the resources we allocated during probe() */
-	net2280_led_shutdown(dev);
 	if (dev->requests) {
 		int		i;
 		for (i = 1; i < 5; i++) {
@@ -3588,8 +3580,10 @@ static void net2280_remove(struct pci_dev *pdev)
 		free_irq(pdev->irq, dev);
 	if (dev->quirks & PLX_PCIE)
 		pci_disable_msi(pdev);
-	if (dev->regs)
+	if (dev->regs) {
+		net2280_led_shutdown(dev);
 		iounmap(dev->regs);
+	}
 	if (dev->region)
 		release_mem_region(pci_resource_start(pdev, 0),
 				pci_resource_len(pdev, 0));
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 5a2d845..d827832 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/extcon.h>
 #include <linux/interrupt.h>
@@ -27,6 +28,8 @@
 #define USB3_AXI_INT_ENA	0x00c
 #define USB3_DMA_INT_STA	0x010
 #define USB3_DMA_INT_ENA	0x014
+#define USB3_DMA_CH0_CON(n)	(0x030 + ((n) - 1) * 0x10) /* n = 1 to 4 */
+#define USB3_DMA_CH0_PRD_ADR(n)	(0x034 + ((n) - 1) * 0x10) /* n = 1 to 4 */
 #define USB3_USB_COM_CON	0x200
 #define USB3_USB20_CON		0x204
 #define USB3_USB30_CON		0x208
@@ -64,6 +67,22 @@
 /* AXI_INT_ENA and AXI_INT_STA */
 #define AXI_INT_DMAINT		BIT(31)
 #define AXI_INT_EPCINT		BIT(30)
+/* PRD's n = from 1 to 4 */
+#define AXI_INT_PRDEN_CLR_STA_SHIFT(n)	(16 + (n) - 1)
+#define AXI_INT_PRDERR_STA_SHIFT(n)	(0 + (n) - 1)
+#define AXI_INT_PRDEN_CLR_STA(n)	(1 << AXI_INT_PRDEN_CLR_STA_SHIFT(n))
+#define AXI_INT_PRDERR_STA(n)		(1 << AXI_INT_PRDERR_STA_SHIFT(n))
+
+/* DMA_INT_ENA and DMA_INT_STA */
+#define DMA_INT(n)		BIT(n)
+
+/* DMA_CH0_CONn */
+#define DMA_CON_PIPE_DIR	BIT(15)		/* 1: In Transfer */
+#define DMA_CON_PIPE_NO_SHIFT	8
+#define DMA_CON_PIPE_NO_MASK	GENMASK(12, DMA_CON_PIPE_NO_SHIFT)
+#define DMA_COM_PIPE_NO(n)	(((n) << DMA_CON_PIPE_NO_SHIFT) & \
+					 DMA_CON_PIPE_NO_MASK)
+#define DMA_CON_PRD_EN		BIT(0)
 
 /* LCLKSEL */
 #define LCLKSEL_LSEL		BIT(18)
@@ -231,8 +250,50 @@
 #define USB3_EP0_BUF_SIZE		8
 #define USB3_MAX_NUM_PIPES		30
 #define USB3_WAIT_US			3
+#define USB3_DMA_NUM_SETTING_AREA	4
+/*
+ * To avoid double-meaning of "0" (xferred 65536 bytes or received zlp if
+ * buffer size is 65536), this driver uses the maximum size per a entry is
+ * 32768 bytes.
+ */
+#define USB3_DMA_MAX_XFER_SIZE		32768
+#define USB3_DMA_PRD_SIZE		4096
 
 struct renesas_usb3;
+
+/* Physical Region Descriptor Table */
+struct renesas_usb3_prd {
+	u32 word1;
+#define USB3_PRD1_E		BIT(30)		/* the end of chain */
+#define USB3_PRD1_U		BIT(29)		/* completion of transfer */
+#define USB3_PRD1_D		BIT(28)		/* Error occurred */
+#define USB3_PRD1_INT		BIT(27)		/* Interrupt occurred */
+#define USB3_PRD1_LST		BIT(26)		/* Last Packet */
+#define USB3_PRD1_B_INC		BIT(24)
+#define USB3_PRD1_MPS_8		0
+#define USB3_PRD1_MPS_16	BIT(21)
+#define USB3_PRD1_MPS_32	BIT(22)
+#define USB3_PRD1_MPS_64	(BIT(22) | BIT(21))
+#define USB3_PRD1_MPS_512	BIT(23)
+#define USB3_PRD1_MPS_1024	(BIT(23) | BIT(21))
+#define USB3_PRD1_MPS_RESERVED	(BIT(23) | BIT(22) | BIT(21))
+#define USB3_PRD1_SIZE_MASK	GENMASK(15, 0)
+
+	u32 bap;
+};
+#define USB3_DMA_NUM_PRD_ENTRIES	(USB3_DMA_PRD_SIZE / \
+					  sizeof(struct renesas_usb3_prd))
+#define USB3_DMA_MAX_XFER_SIZE_ALL_PRDS	(USB3_DMA_PRD_SIZE / \
+					 sizeof(struct renesas_usb3_prd) * \
+					 USB3_DMA_MAX_XFER_SIZE)
+
+struct renesas_usb3_dma {
+	struct renesas_usb3_prd *prd;
+	dma_addr_t prd_dma;
+	int num;	/* Setting area number (from 1 to 4) */
+	bool used;
+};
+
 struct renesas_usb3_request {
 	struct usb_request	req;
 	struct list_head	queue;
@@ -242,6 +303,7 @@ struct renesas_usb3_request {
 struct renesas_usb3_ep {
 	struct usb_ep ep;
 	struct renesas_usb3 *usb3;
+	struct renesas_usb3_dma *dma;
 	int num;
 	char ep_name[USB3_EP_NAME_SIZE];
 	struct list_head queue;
@@ -270,6 +332,8 @@ struct renesas_usb3 {
 	struct renesas_usb3_ep *usb3_ep;
 	int num_usb3_eps;
 
+	struct renesas_usb3_dma dma[USB3_DMA_NUM_SETTING_AREA];
+
 	spinlock_t lock;
 	int disabled_count;
 
@@ -298,8 +362,18 @@ struct renesas_usb3 {
 		     (i) < (usb3)->num_usb3_eps;		\
 		     (i)++, usb3_ep = usb3_get_ep(usb3, (i)))
 
+#define usb3_get_dma(usb3, i)	(&(usb3)->dma[i])
+#define usb3_for_each_dma(usb3, dma, i)				\
+		for ((i) = 0, dma = usb3_get_dma((usb3), (i));	\
+		     (i) < USB3_DMA_NUM_SETTING_AREA;		\
+		     (i)++, dma = usb3_get_dma((usb3), (i)))
+
 static const char udc_name[] = "renesas_usb3";
 
+static bool use_dma = 1;
+module_param(use_dma, bool, 0644);
+MODULE_PARM_DESC(use_dma, "use dedicated DMAC");
+
 static void usb3_write(struct renesas_usb3 *usb3, u32 data, u32 offs)
 {
 	iowrite32(data, usb3->reg + offs);
@@ -623,7 +697,6 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
 {
 	usb3_disconnect(usb3);
 	usb3_write(usb3, 0, USB3_P0_INT_ENA);
-	usb3_write(usb3, 0, USB3_PN_INT_ENA);
 	usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA);
 	usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
 	usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
@@ -1060,6 +1133,273 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
 	usb3_p0_xfer(usb3_ep, usb3_req);
 }
 
+static void usb3_enable_dma_pipen(struct renesas_usb3 *usb3)
+{
+	usb3_set_bit(usb3, PN_CON_DATAIF_EN, USB3_PN_CON);
+}
+
+static void usb3_disable_dma_pipen(struct renesas_usb3 *usb3)
+{
+	usb3_clear_bit(usb3, PN_CON_DATAIF_EN, USB3_PN_CON);
+}
+
+static void usb3_enable_dma_irq(struct renesas_usb3 *usb3, int num)
+{
+	usb3_set_bit(usb3, DMA_INT(num), USB3_DMA_INT_ENA);
+}
+
+static void usb3_disable_dma_irq(struct renesas_usb3 *usb3, int num)
+{
+	usb3_clear_bit(usb3, DMA_INT(num), USB3_DMA_INT_ENA);
+}
+
+static u32 usb3_dma_mps_to_prd_word1(struct renesas_usb3_ep *usb3_ep)
+{
+	switch (usb3_ep->ep.maxpacket) {
+	case 8:
+		return USB3_PRD1_MPS_8;
+	case 16:
+		return USB3_PRD1_MPS_16;
+	case 32:
+		return USB3_PRD1_MPS_32;
+	case 64:
+		return USB3_PRD1_MPS_64;
+	case 512:
+		return USB3_PRD1_MPS_512;
+	case 1024:
+		return USB3_PRD1_MPS_1024;
+	default:
+		return USB3_PRD1_MPS_RESERVED;
+	}
+}
+
+static bool usb3_dma_get_setting_area(struct renesas_usb3_ep *usb3_ep,
+				      struct renesas_usb3_request *usb3_req)
+{
+	struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+	struct renesas_usb3_dma *dma;
+	int i;
+	bool ret = false;
+
+	if (usb3_req->req.length > USB3_DMA_MAX_XFER_SIZE_ALL_PRDS) {
+		dev_dbg(usb3_to_dev(usb3), "%s: the length is too big (%d)\n",
+			__func__, usb3_req->req.length);
+		return false;
+	}
+
+	/* The driver doesn't handle zero-length packet via dmac */
+	if (!usb3_req->req.length)
+		return false;
+
+	if (usb3_dma_mps_to_prd_word1(usb3_ep) == USB3_PRD1_MPS_RESERVED)
+		return false;
+
+	usb3_for_each_dma(usb3, dma, i) {
+		if (dma->used)
+			continue;
+
+		if (usb_gadget_map_request(&usb3->gadget, &usb3_req->req,
+					   usb3_ep->dir_in) < 0)
+			break;
+
+		dma->used = true;
+		usb3_ep->dma = dma;
+		ret = true;
+		break;
+	}
+
+	return ret;
+}
+
+static void usb3_dma_put_setting_area(struct renesas_usb3_ep *usb3_ep,
+				      struct renesas_usb3_request *usb3_req)
+{
+	struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+	int i;
+	struct renesas_usb3_dma *dma;
+
+	usb3_for_each_dma(usb3, dma, i) {
+		if (usb3_ep->dma == dma) {
+			usb_gadget_unmap_request(&usb3->gadget, &usb3_req->req,
+						 usb3_ep->dir_in);
+			dma->used = false;
+			usb3_ep->dma = NULL;
+			break;
+		}
+	}
+}
+
+static void usb3_dma_fill_prd(struct renesas_usb3_ep *usb3_ep,
+			      struct renesas_usb3_request *usb3_req)
+{
+	struct renesas_usb3_prd *cur_prd = usb3_ep->dma->prd;
+	u32 remain = usb3_req->req.length;
+	u32 dma = usb3_req->req.dma;
+	u32 len;
+	int i = 0;
+
+	do {
+		len = min_t(u32, remain, USB3_DMA_MAX_XFER_SIZE) &
+			    USB3_PRD1_SIZE_MASK;
+		cur_prd->word1 = usb3_dma_mps_to_prd_word1(usb3_ep) |
+				 USB3_PRD1_B_INC | len;
+		cur_prd->bap = dma;
+		remain -= len;
+		dma += len;
+		if (!remain || (i + 1) < USB3_DMA_NUM_PRD_ENTRIES)
+			break;
+
+		cur_prd++;
+		i++;
+	} while (1);
+
+	cur_prd->word1 |= USB3_PRD1_E | USB3_PRD1_INT;
+	if (usb3_ep->dir_in)
+		cur_prd->word1 |= USB3_PRD1_LST;
+}
+
+static void usb3_dma_kick_prd(struct renesas_usb3_ep *usb3_ep)
+{
+	struct renesas_usb3_dma *dma = usb3_ep->dma;
+	struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+	u32 dma_con = DMA_COM_PIPE_NO(usb3_ep->num) | DMA_CON_PRD_EN;
+
+	if (usb3_ep->dir_in)
+		dma_con |= DMA_CON_PIPE_DIR;
+
+	wmb();	/* prd entries should be in system memory here */
+
+	usb3_write(usb3, 1 << usb3_ep->num, USB3_DMA_INT_STA);
+	usb3_write(usb3, AXI_INT_PRDEN_CLR_STA(dma->num) |
+		   AXI_INT_PRDERR_STA(dma->num), USB3_AXI_INT_STA);
+
+	usb3_write(usb3, dma->prd_dma, USB3_DMA_CH0_PRD_ADR(dma->num));
+	usb3_write(usb3, dma_con, USB3_DMA_CH0_CON(dma->num));
+	usb3_enable_dma_irq(usb3, usb3_ep->num);
+}
+
+static void usb3_dma_stop_prd(struct renesas_usb3_ep *usb3_ep)
+{
+	struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+	struct renesas_usb3_dma *dma = usb3_ep->dma;
+
+	usb3_disable_dma_irq(usb3, usb3_ep->num);
+	usb3_write(usb3, 0, USB3_DMA_CH0_CON(dma->num));
+}
+
+static int usb3_dma_update_status(struct renesas_usb3_ep *usb3_ep,
+				  struct renesas_usb3_request *usb3_req)
+{
+	struct renesas_usb3_prd *cur_prd = usb3_ep->dma->prd;
+	struct usb_request *req = &usb3_req->req;
+	u32 remain, len;
+	int i = 0;
+	int status = 0;
+
+	rmb();	/* The controller updated prd entries */
+
+	do {
+		if (cur_prd->word1 & USB3_PRD1_D)
+			status = -EIO;
+		if (cur_prd->word1 & USB3_PRD1_E)
+			len = req->length % USB3_DMA_MAX_XFER_SIZE;
+		else
+			len = USB3_DMA_MAX_XFER_SIZE;
+		remain = cur_prd->word1 & USB3_PRD1_SIZE_MASK;
+		req->actual += len - remain;
+
+		if (cur_prd->word1 & USB3_PRD1_E ||
+		    (i + 1) < USB3_DMA_NUM_PRD_ENTRIES)
+			break;
+
+		cur_prd++;
+		i++;
+	} while (1);
+
+	return status;
+}
+
+static bool usb3_dma_try_start(struct renesas_usb3_ep *usb3_ep,
+			       struct renesas_usb3_request *usb3_req)
+{
+	struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+
+	if (!use_dma)
+		return false;
+
+	if (usb3_dma_get_setting_area(usb3_ep, usb3_req)) {
+		usb3_pn_stop(usb3);
+		usb3_enable_dma_pipen(usb3);
+		usb3_dma_fill_prd(usb3_ep, usb3_req);
+		usb3_dma_kick_prd(usb3_ep);
+		usb3_pn_start(usb3);
+		return true;
+	}
+
+	return false;
+}
+
+static int usb3_dma_try_stop(struct renesas_usb3_ep *usb3_ep,
+			     struct renesas_usb3_request *usb3_req)
+{
+	struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+	unsigned long flags;
+	int status = 0;
+
+	spin_lock_irqsave(&usb3->lock, flags);
+	if (!usb3_ep->dma)
+		goto out;
+
+	if (!usb3_pn_change(usb3, usb3_ep->num))
+		usb3_disable_dma_pipen(usb3);
+	usb3_dma_stop_prd(usb3_ep);
+	status = usb3_dma_update_status(usb3_ep, usb3_req);
+	usb3_dma_put_setting_area(usb3_ep, usb3_req);
+
+out:
+	spin_unlock_irqrestore(&usb3->lock, flags);
+	return status;
+}
+
+static int renesas_usb3_dma_free_prd(struct renesas_usb3 *usb3,
+				     struct device *dev)
+{
+	int i;
+	struct renesas_usb3_dma *dma;
+
+	usb3_for_each_dma(usb3, dma, i) {
+		if (dma->prd) {
+			dma_free_coherent(dev, USB3_DMA_MAX_XFER_SIZE,
+					  dma->prd, dma->prd_dma);
+			dma->prd = NULL;
+		}
+	}
+
+	return 0;
+}
+
+static int renesas_usb3_dma_alloc_prd(struct renesas_usb3 *usb3,
+				      struct device *dev)
+{
+	int i;
+	struct renesas_usb3_dma *dma;
+
+	if (!use_dma)
+		return 0;
+
+	usb3_for_each_dma(usb3, dma, i) {
+		dma->prd = dma_alloc_coherent(dev, USB3_DMA_PRD_SIZE,
+					      &dma->prd_dma, GFP_KERNEL);
+		if (!dma->prd) {
+			renesas_usb3_dma_free_prd(usb3, dev);
+			return -ENOMEM;
+		}
+		dma->num = i + 1;
+	}
+
+	return 0;
+}
+
 static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
 			     struct renesas_usb3_request *usb3_req)
 {
@@ -1079,6 +1419,10 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
 		goto out;
 
 	usb3_ep->started = true;
+
+	if (usb3_dma_try_start(usb3_ep, usb3_req))
+		goto out;
+
 	usb3_pn_start(usb3);
 
 	if (usb3_ep->dir_in) {
@@ -1475,7 +1819,13 @@ static void usb3_request_done_pipen(struct renesas_usb3 *usb3,
 				    struct renesas_usb3_request *usb3_req,
 				    int status)
 {
-	usb3_pn_stop(usb3);
+	unsigned long flags;
+
+	spin_lock_irqsave(&usb3->lock, flags);
+	if (usb3_pn_change(usb3, usb3_ep->num))
+		usb3_pn_stop(usb3);
+	spin_unlock_irqrestore(&usb3->lock, flags);
+
 	usb3_disable_pipe_irq(usb3, usb3_ep->num);
 	usb3_request_done(usb3_ep, usb3_req, status);
 
@@ -1504,30 +1854,46 @@ static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num)
 {
 	struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
 	struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+	bool done = false;
 
 	if (!usb3_req)
 		return;
 
+	spin_lock(&usb3->lock);
+	if (usb3_pn_change(usb3, num))
+		goto out;
+
 	if (usb3_ep->dir_in) {
 		/* Do not stop the IN pipe here to detect LSTTR interrupt */
 		if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
 			usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
 	} else {
 		if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
-			usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
+			done = true;
 	}
+
+out:
+	/* need to unlock because usb3_request_done_pipen() locks it */
+	spin_unlock(&usb3->lock);
+
+	if (done)
+		usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
 }
 
 static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
 {
 	u32 pn_int_sta;
 
-	if (usb3_pn_change(usb3, num) < 0)
+	spin_lock(&usb3->lock);
+	if (usb3_pn_change(usb3, num) < 0) {
+		spin_unlock(&usb3->lock);
 		return;
+	}
 
 	pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
 	pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
 	usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
+	spin_unlock(&usb3->lock);
 	if (pn_int_sta & PN_INT_LSTTR)
 		usb3_irq_epc_pipen_lsttr(usb3, num);
 	if (pn_int_sta & PN_INT_BFRDY)
@@ -1582,12 +1948,49 @@ static void usb3_irq_epc(struct renesas_usb3 *usb3)
 	}
 }
 
+static void usb3_irq_dma_int(struct renesas_usb3 *usb3, u32 dma_sta)
+{
+	struct renesas_usb3_ep *usb3_ep;
+	struct renesas_usb3_request *usb3_req;
+	int i, status;
+
+	for (i = 0; i < usb3->num_usb3_eps; i++) {
+		if (!(dma_sta & DMA_INT(i)))
+			continue;
+
+		usb3_ep = usb3_get_ep(usb3, i);
+		if (!(usb3_read(usb3, USB3_AXI_INT_STA) &
+		    AXI_INT_PRDEN_CLR_STA(usb3_ep->dma->num)))
+			continue;
+
+		usb3_req = usb3_get_request(usb3_ep);
+		status = usb3_dma_try_stop(usb3_ep, usb3_req);
+		usb3_request_done_pipen(usb3, usb3_ep, usb3_req, status);
+	}
+}
+
+static void usb3_irq_dma(struct renesas_usb3 *usb3)
+{
+	u32 dma_sta = usb3_read(usb3, USB3_DMA_INT_STA);
+
+	dma_sta &= usb3_read(usb3, USB3_DMA_INT_ENA);
+	if (dma_sta) {
+		usb3_write(usb3, dma_sta, USB3_DMA_INT_STA);
+		usb3_irq_dma_int(usb3, dma_sta);
+	}
+}
+
 static irqreturn_t renesas_usb3_irq(int irq, void *_usb3)
 {
 	struct renesas_usb3 *usb3 = _usb3;
 	irqreturn_t ret = IRQ_NONE;
 	u32 axi_int_sta = usb3_read(usb3, USB3_AXI_INT_STA);
 
+	if (axi_int_sta & AXI_INT_DMAINT) {
+		usb3_irq_dma(usb3);
+		ret = IRQ_HANDLED;
+	}
+
 	if (axi_int_sta & AXI_INT_EPCINT) {
 		usb3_irq_epc(usb3);
 		ret = IRQ_HANDLED;
@@ -1660,6 +2063,7 @@ static int usb3_disable_pipe_n(struct renesas_usb3_ep *usb3_ep)
 
 	spin_lock_irqsave(&usb3->lock, flags);
 	if (!usb3_pn_change(usb3, usb3_ep->num)) {
+		usb3_write(usb3, 0, USB3_PN_INT_ENA);
 		usb3_write(usb3, 0, USB3_PN_RAMMAP);
 		usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON);
 	}
@@ -1686,6 +2090,7 @@ static int renesas_usb3_ep_disable(struct usb_ep *_ep)
 		usb3_req = usb3_get_request(usb3_ep);
 		if (!usb3_req)
 			break;
+		usb3_dma_try_stop(usb3_ep, usb3_req);
 		usb3_request_done(usb3_ep, usb3_req, -ESHUTDOWN);
 	} while (1);
 
@@ -1733,6 +2138,7 @@ static int renesas_usb3_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 	dev_dbg(usb3_to_dev(usb3), "ep_dequeue: ep%2d, %u\n", usb3_ep->num,
 		_req->length);
 
+	usb3_dma_try_stop(usb3_ep, usb3_req);
 	usb3_request_done_pipen(usb3, usb3_ep, usb3_req, -ECONNRESET);
 
 	return 0;
@@ -1799,6 +2205,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
 	/* hook up the driver */
 	usb3->driver = driver;
 
+	pm_runtime_enable(usb3_to_dev(usb3));
+	pm_runtime_get_sync(usb3_to_dev(usb3));
+
 	renesas_usb3_init_controller(usb3);
 
 	return 0;
@@ -1807,14 +2216,14 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
 static int renesas_usb3_stop(struct usb_gadget *gadget)
 {
 	struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
-	unsigned long flags;
 
-	spin_lock_irqsave(&usb3->lock, flags);
 	usb3->softconnect = false;
 	usb3->gadget.speed = USB_SPEED_UNKNOWN;
 	usb3->driver = NULL;
 	renesas_usb3_stop_controller(usb3);
-	spin_unlock_irqrestore(&usb3->lock, flags);
+
+	pm_runtime_put(usb3_to_dev(usb3));
+	pm_runtime_disable(usb3_to_dev(usb3));
 
 	return 0;
 }
@@ -1891,10 +2300,8 @@ static int renesas_usb3_remove(struct platform_device *pdev)
 
 	device_remove_file(&pdev->dev, &dev_attr_role);
 
-	pm_runtime_put(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-
 	usb_del_gadget_udc(&usb3->gadget);
+	renesas_usb3_dma_free_prd(usb3, &pdev->dev);
 
 	__renesas_usb3_ep_free_request(usb3->ep0_req);
 
@@ -2089,6 +2496,10 @@ static int renesas_usb3_probe(struct platform_device *pdev)
 	if (!usb3->ep0_req)
 		return -ENOMEM;
 
+	ret = renesas_usb3_dma_alloc_prd(usb3, &pdev->dev);
+	if (ret < 0)
+		goto err_alloc_prd;
+
 	ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
 	if (ret < 0)
 		goto err_add_udc;
@@ -2099,9 +2510,6 @@ static int renesas_usb3_probe(struct platform_device *pdev)
 
 	usb3->workaround_for_vbus = priv->workaround_for_vbus;
 
-	pm_runtime_enable(&pdev->dev);
-	pm_runtime_get_sync(&pdev->dev);
-
 	dev_info(&pdev->dev, "probed\n");
 
 	return 0;
@@ -2110,6 +2518,9 @@ static int renesas_usb3_probe(struct platform_device *pdev)
 	usb_del_gadget_udc(&usb3->gadget);
 
 err_add_udc:
+	renesas_usb3_dma_free_prd(usb3, &pdev->dev);
+
+err_alloc_prd:
 	__renesas_usb3_ep_free_request(usb3->ep0_req);
 
 	return ret;
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/snps_udc_core.c
similarity index 97%
rename from drivers/usb/gadget/udc/amd5536udc.c
rename to drivers/usb/gadget/udc/snps_udc_core.c
index 4ecd2f2..38a165d 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -41,7 +41,6 @@
 #include "amd5536udc.h"
 
 static void udc_tasklet_disconnect(unsigned long);
-static void empty_req_queue(struct udc_ep *);
 static void udc_setup_endpoints(struct udc *dev);
 static void udc_soft_reset(struct udc *dev);
 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
@@ -209,18 +208,18 @@ static void print_regs(struct udc *dev)
 	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
 		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
 			"WITHOUT desc. update)\n");
-		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
+		dev_info(dev->dev, "DMA mode (%s)\n", "PPBNDU");
 	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
 		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
 			"WITH desc. update)\n");
-		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
+		dev_info(dev->dev, "DMA mode (%s)\n", "PPBDU");
 	}
 	if (use_dma && use_dma_bufferfill_mode) {
 		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
-		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
+		dev_info(dev->dev, "DMA mode (%s)\n", "BF");
 	}
 	if (!use_dma)
-		dev_info(&dev->pdev->dev, "FIFO mode\n");
+		dev_info(dev->dev, "FIFO mode\n");
 	DBG(dev, "-------------------------------------------------------\n");
 }
 
@@ -1244,7 +1243,7 @@ udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
 }
 
 /* Empty request queue of an endpoint; caller holds spinlock */
-static void empty_req_queue(struct udc_ep *ep)
+void empty_req_queue(struct udc_ep *ep)
 {
 	struct udc_request	*req;
 
@@ -1256,6 +1255,7 @@ static void empty_req_queue(struct udc_ep *ep)
 		complete_req(ep, req, -ESHUTDOWN);
 	}
 }
+EXPORT_SYMBOL_GPL(empty_req_queue);
 
 /* Dequeues a request packet, called by gadget driver */
 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
@@ -1623,8 +1623,11 @@ static void udc_setup_endpoints(struct udc *dev)
 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
 static void usb_connect(struct udc *dev)
 {
+	/* Return if already connected */
+	if (dev->connected)
+		return;
 
-	dev_info(&dev->pdev->dev, "USB Connect\n");
+	dev_info(dev->dev, "USB Connect\n");
 
 	dev->connected = 1;
 
@@ -1641,8 +1644,11 @@ static void usb_connect(struct udc *dev)
  */
 static void usb_disconnect(struct udc *dev)
 {
+	/* Return if already disconnected */
+	if (!dev->connected)
+		return;
 
-	dev_info(&dev->pdev->dev, "USB Disconnect\n");
+	dev_info(dev->dev, "USB Disconnect\n");
 
 	dev->connected = 0;
 
@@ -1715,11 +1721,15 @@ static void udc_soft_reset(struct udc *dev)
 	/* device int. status reset */
 	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
 
-	spin_lock_irqsave(&udc_irq_spinlock, flags);
-	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
-	readl(&dev->regs->cfg);
-	spin_unlock_irqrestore(&udc_irq_spinlock, flags);
-
+	/* Don't do this for Broadcom UDC since this is a reserved
+	 * bit.
+	 */
+	if (dev->chiprev != UDC_BCM_REV) {
+		spin_lock_irqsave(&udc_irq_spinlock, flags);
+		writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
+		readl(&dev->regs->cfg);
+		spin_unlock_irqrestore(&udc_irq_spinlock, flags);
+	}
 }
 
 /* RDE timer callback to set RDE bit */
@@ -2106,7 +2116,7 @@ static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
 	}
 	/* HE event ? */
 	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
-		dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
+		dev_err(dev->dev, "HE ep%dout occurred\n", ep->num);
 
 		/* clear HE */
 		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
@@ -2305,7 +2315,7 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
 	if (use_dma) {
 		/* BNA ? */
 		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
-			dev_err(&dev->pdev->dev,
+			dev_err(dev->dev,
 				"BNA ep%din occurred - DESPTR = %08lx\n",
 				ep->num,
 				(unsigned long) readl(&ep->regs->desptr));
@@ -2318,7 +2328,7 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
 	}
 	/* HE event ? */
 	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
-		dev_err(&dev->pdev->dev,
+		dev_err(dev->dev,
 			"HE ep%dn occurred - DESPTR = %08lx\n",
 			ep->num, (unsigned long) readl(&ep->regs->desptr));
 
@@ -2956,7 +2966,7 @@ __acquires(dev->lock)
 
 		/* link up all endpoints */
 		udc_setup_endpoints(dev);
-		dev_info(&dev->pdev->dev, "Connect: %s\n",
+		dev_info(dev->dev, "Connect: %s\n",
 			 usb_speed_string(dev->gadget.speed));
 
 		/* init ep 0 */
@@ -3097,7 +3107,7 @@ int init_dma_pools(struct udc *dev)
 	}
 
 	/* DMA setup */
-	dev->data_requests = dma_pool_create("data_requests", NULL,
+	dev->data_requests = dma_pool_create("data_requests", dev->dev,
 		sizeof(struct udc_data_dma), 0, 0);
 	if (!dev->data_requests) {
 		DBG(dev, "can't get request data pool\n");
@@ -3108,7 +3118,7 @@ int init_dma_pools(struct udc *dev)
 	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
 
 	/* dma desc for setup data */
-	dev->stp_requests = dma_pool_create("setup requests", NULL,
+	dev->stp_requests = dma_pool_create("setup requests", dev->dev,
 		sizeof(struct udc_stp_dma), 0, 0);
 	if (!dev->stp_requests) {
 		DBG(dev, "can't get stp request pool\n");
@@ -3168,24 +3178,30 @@ int udc_probe(struct udc *dev)
 	/* init registers, interrupts, ... */
 	startup_registers(dev);
 
-	dev_info(&dev->pdev->dev, "%s\n", mod_desc);
+	dev_info(dev->dev, "%s\n", mod_desc);
 
 	snprintf(tmp, sizeof(tmp), "%d", dev->irq);
-	dev_info(&dev->pdev->dev,
-		 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
-		 tmp, dev->phys_addr, dev->chiprev,
-		 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
-	strcpy(tmp, UDC_DRIVER_VERSION_STRING);
-	if (dev->chiprev == UDC_HSA0_REV) {
-		dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
-		retval = -ENODEV;
-		goto finished;
+
+	/* Print this device info for AMD chips only*/
+	if (dev->chiprev == UDC_HSA0_REV ||
+	    dev->chiprev == UDC_HSB1_REV) {
+		dev_info(dev->dev, "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
+			 tmp, dev->phys_addr, dev->chiprev,
+			 (dev->chiprev == UDC_HSA0_REV) ?
+			 "A0" : "B1");
+		strcpy(tmp, UDC_DRIVER_VERSION_STRING);
+		if (dev->chiprev == UDC_HSA0_REV) {
+			dev_err(dev->dev, "chip revision is A0; too old\n");
+			retval = -ENODEV;
+			goto finished;
+		}
+		dev_info(dev->dev,
+			 "driver version: %s(for Geode5536 B1)\n", tmp);
 	}
-	dev_info(&dev->pdev->dev,
-		 "driver version: %s(for Geode5536 B1)\n", tmp);
+
 	udc = dev;
 
-	retval = usb_add_gadget_udc_release(&udc->pdev->dev, &dev->gadget,
+	retval = usb_add_gadget_udc_release(udc->dev, &dev->gadget,
 					    gadget_release);
 	if (retval)
 		goto finished;
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
new file mode 100644
index 0000000..2e11f19
--- /dev/null
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -0,0 +1,344 @@
+/*
+ * snps_udc_plat.c - Synopsys UDC Platform Driver
+ *
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/module.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include "amd5536udc.h"
+
+/* description */
+#define UDC_MOD_DESCRIPTION     "Synopsys UDC platform driver"
+
+void start_udc(struct udc *udc)
+{
+	if (udc->driver) {
+		dev_info(udc->dev, "Connecting...\n");
+		udc_enable_dev_setup_interrupts(udc);
+		udc_basic_init(udc);
+		udc->connected = 1;
+	}
+}
+
+void stop_udc(struct udc *udc)
+{
+	int tmp;
+	u32 reg;
+
+	spin_lock(&udc->lock);
+
+	/* Flush the receieve fifo */
+	reg = readl(&udc->regs->ctl);
+	reg |= AMD_BIT(UDC_DEVCTL_SRX_FLUSH);
+	writel(reg, &udc->regs->ctl);
+
+	reg = readl(&udc->regs->ctl);
+	reg &= ~(AMD_BIT(UDC_DEVCTL_SRX_FLUSH));
+	writel(reg, &udc->regs->ctl);
+	dev_dbg(udc->dev, "ep rx queue flushed\n");
+
+	/* Mask interrupts. Required more so when the
+	 * UDC is connected to a DRD phy.
+	 */
+	udc_mask_unused_interrupts(udc);
+
+	/* Disconnect gadget driver */
+	if (udc->driver) {
+		spin_unlock(&udc->lock);
+		udc->driver->disconnect(&udc->gadget);
+		spin_lock(&udc->lock);
+
+		/* empty queues */
+		for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
+			empty_req_queue(&udc->ep[tmp]);
+	}
+	udc->connected = 0;
+
+	spin_unlock(&udc->lock);
+	dev_info(udc->dev, "Device disconnected\n");
+}
+
+void udc_drd_work(struct work_struct *work)
+{
+	struct udc *udc;
+
+	udc = container_of(to_delayed_work(work),
+			   struct udc, drd_work);
+
+	if (udc->conn_type) {
+		dev_dbg(udc->dev, "idle -> device\n");
+		start_udc(udc);
+	} else {
+		dev_dbg(udc->dev, "device -> idle\n");
+		stop_udc(udc);
+	}
+}
+
+static int usbd_connect_notify(struct notifier_block *self,
+			       unsigned long event, void *ptr)
+{
+	struct udc *udc = container_of(self, struct udc, nb);
+
+	dev_dbg(udc->dev, "%s: event: %lu\n", __func__, event);
+
+	udc->conn_type = event;
+
+	schedule_delayed_work(&udc->drd_work, 0);
+
+	return NOTIFY_OK;
+}
+
+static int udc_plat_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct udc *udc;
+	int ret;
+
+	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
+	if (!udc)
+		return -ENOMEM;
+
+	spin_lock_init(&udc->lock);
+	udc->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	udc->virt_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(udc->regs))
+		return PTR_ERR(udc->regs);
+
+	/* udc csr registers base */
+	udc->csr = udc->virt_addr + UDC_CSR_ADDR;
+
+	/* dev registers base */
+	udc->regs = udc->virt_addr + UDC_DEVCFG_ADDR;
+
+	/* ep registers base */
+	udc->ep_regs = udc->virt_addr + UDC_EPREGS_ADDR;
+
+	/* fifo's base */
+	udc->rxfifo = (u32 __iomem *)(udc->virt_addr + UDC_RXFIFO_ADDR);
+	udc->txfifo = (u32 __iomem *)(udc->virt_addr + UDC_TXFIFO_ADDR);
+
+	udc->phys_addr = (unsigned long)res->start;
+
+	udc->irq = irq_of_parse_and_map(dev->of_node, 0);
+	if (udc->irq <= 0) {
+		dev_err(dev, "Can't parse and map interrupt\n");
+		return -EINVAL;
+	}
+
+	udc->udc_phy = devm_of_phy_get_by_index(dev, dev->of_node, 0);
+	if (IS_ERR(udc->udc_phy)) {
+		dev_err(dev, "Failed to obtain phy from device tree\n");
+		return PTR_ERR(udc->udc_phy);
+	}
+
+	ret = phy_init(udc->udc_phy);
+	if (ret) {
+		dev_err(dev, "UDC phy init failed");
+		return ret;
+	}
+
+	ret = phy_power_on(udc->udc_phy);
+	if (ret) {
+		dev_err(dev, "UDC phy power on failed");
+		phy_exit(udc->udc_phy);
+		return ret;
+	}
+
+	/* Register for extcon if supported */
+	if (of_get_property(dev->of_node, "extcon", NULL)) {
+		udc->edev = extcon_get_edev_by_phandle(dev, 0);
+		if (IS_ERR(udc->edev)) {
+			if (PTR_ERR(udc->edev) == -EPROBE_DEFER)
+				return -EPROBE_DEFER;
+			dev_err(dev, "Invalid or missing extcon\n");
+			ret = PTR_ERR(udc->edev);
+			goto exit_phy;
+		}
+
+		udc->nb.notifier_call = usbd_connect_notify;
+		ret = extcon_register_notifier(udc->edev, EXTCON_USB,
+					       &udc->nb);
+		if (ret < 0) {
+			dev_err(dev, "Can't register extcon device\n");
+			goto exit_phy;
+		}
+
+		ret = extcon_get_cable_state_(udc->edev, EXTCON_USB);
+		if (ret < 0) {
+			dev_err(dev, "Can't get cable state\n");
+			goto exit_extcon;
+		} else if (ret) {
+			udc->conn_type = ret;
+		}
+		INIT_DELAYED_WORK(&udc->drd_work, udc_drd_work);
+	}
+
+	/* init dma pools */
+	if (use_dma) {
+		ret = init_dma_pools(udc);
+		if (ret != 0)
+			goto exit_extcon;
+	}
+
+	ret = devm_request_irq(dev, udc->irq, udc_irq, IRQF_SHARED,
+			       "snps-udc", udc);
+	if (ret < 0) {
+		dev_err(dev, "Request irq %d failed for UDC\n", udc->irq);
+		goto exit_dma;
+	}
+
+	platform_set_drvdata(pdev, udc);
+	udc->chiprev = UDC_BCM_REV;
+
+	if (udc_probe(udc)) {
+		ret = -ENODEV;
+		goto exit_dma;
+	}
+	dev_info(dev, "Synopsys UDC platform driver probe successful\n");
+
+	return 0;
+
+exit_dma:
+	if (use_dma)
+		free_dma_pools(udc);
+exit_extcon:
+	if (udc->edev)
+		extcon_unregister_notifier(udc->edev, EXTCON_USB, &udc->nb);
+exit_phy:
+	if (udc->udc_phy) {
+		phy_power_off(udc->udc_phy);
+		phy_exit(udc->udc_phy);
+	}
+	return ret;
+}
+
+static int udc_plat_remove(struct platform_device *pdev)
+{
+	struct udc *dev;
+
+	dev = platform_get_drvdata(pdev);
+
+	usb_del_gadget_udc(&dev->gadget);
+	/* gadget driver must not be registered */
+	if (WARN_ON(dev->driver))
+		return 0;
+
+	/* dma pool cleanup */
+	free_dma_pools(dev);
+
+	udc_remove(dev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	if (dev->drd_wq) {
+		flush_workqueue(dev->drd_wq);
+		destroy_workqueue(dev->drd_wq);
+	}
+
+	phy_power_off(dev->udc_phy);
+	phy_exit(dev->udc_phy);
+	extcon_unregister_notifier(dev->edev, EXTCON_USB, &dev->nb);
+
+	dev_info(&pdev->dev, "Synopsys UDC platform driver removed\n");
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int udc_plat_suspend(struct device *dev)
+{
+	struct udc *udc;
+
+	udc = dev_get_drvdata(dev);
+	stop_udc(udc);
+
+	if (extcon_get_cable_state_(udc->edev, EXTCON_USB) > 0) {
+		dev_dbg(udc->dev, "device -> idle\n");
+		stop_udc(udc);
+	}
+	phy_power_off(udc->udc_phy);
+	phy_exit(udc->udc_phy);
+
+	return 0;
+}
+
+static int udc_plat_resume(struct device *dev)
+{
+	struct udc *udc;
+	int ret;
+
+	udc = dev_get_drvdata(dev);
+
+	ret = phy_init(udc->udc_phy);
+	if (ret) {
+		dev_err(udc->dev, "UDC phy init failure");
+		return ret;
+	}
+
+	ret = phy_power_on(udc->udc_phy);
+	if (ret) {
+		dev_err(udc->dev, "UDC phy power on failure");
+		phy_exit(udc->udc_phy);
+		return ret;
+	}
+
+	if (extcon_get_cable_state_(udc->edev, EXTCON_USB) > 0) {
+		dev_dbg(udc->dev, "idle -> device\n");
+		start_udc(udc);
+	}
+
+	return 0;
+}
+static const struct dev_pm_ops udc_plat_pm_ops = {
+	.suspend	= udc_plat_suspend,
+	.resume		= udc_plat_resume,
+};
+#endif
+
+#if defined(CONFIG_OF)
+static const struct of_device_id of_udc_match[] = {
+	{ .compatible = "brcm,ns2-udc", },
+	{ .compatible = "brcm,cygnus-udc", },
+	{ .compatible = "brcm,iproc-udc", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, of_udc_match);
+#endif
+
+static struct platform_driver udc_plat_driver = {
+	.probe		= udc_plat_probe,
+	.remove		= udc_plat_remove,
+	.driver		= {
+		.name	= "snps-udc-plat",
+		.of_match_table = of_match_ptr(of_udc_match),
+#ifdef CONFIG_PM_SLEEP
+		.pm	= &udc_plat_pm_ops,
+#endif
+	},
+};
+module_platform_driver(udc_plat_driver);
+
+MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
+MODULE_AUTHOR("Broadcom");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 588e253..de207a9 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1151,7 +1151,7 @@ static int xudc_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 			break;
 	}
 	if (&req->usb_req != _req) {
-		spin_unlock_irqrestore(&ep->udc->lock, flags);
+		spin_unlock_irqrestore(&udc->lock, flags);
 		return -EINVAL;
 	}
 	xudc_done(ep, req, -ECONNRESET);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index ababb91..fa5692d 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -473,8 +473,12 @@
 config USB_OHCI_HCD_OMAP3
 	tristate "OHCI support for OMAP3 and later chips"
 	depends on (ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5)
+	select USB_OHCI_HCD_PLATFORM
 	default y
-	---help---
+	help
+	  This option is deprecated now and the driver was removed, use
+	  USB_OHCI_HCD_PLATFORM instead.
+
 	  Enables support for the on-chip OHCI controller on
 	  OMAP3 and later chips.
 
@@ -627,7 +631,11 @@
 
 config USB_UHCI_PLATFORM
 	bool
-	default y if ARCH_VT8500
+	default y if (ARCH_VT8500 || ARCH_ASPEED)
+
+config USB_UHCI_ASPEED
+       bool
+       default y if ARCH_ASPEED
 
 config USB_UHCI_BIG_ENDIAN_MMIO
 	bool
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index c77b0a3..cf2691f 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -52,7 +52,6 @@
 obj-$(CONFIG_USB_OHCI_HCD_PLATFORM)	+= ohci-platform.o
 obj-$(CONFIG_USB_OHCI_EXYNOS)	+= ohci-exynos.o
 obj-$(CONFIG_USB_OHCI_HCD_OMAP1)	+= ohci-omap.o
-obj-$(CONFIG_USB_OHCI_HCD_OMAP3)	+= ohci-omap3.o
 obj-$(CONFIG_USB_OHCI_HCD_SPEAR)	+= ohci-spear.o
 obj-$(CONFIG_USB_OHCI_HCD_STI)	+= ohci-st.o
 obj-$(CONFIG_USB_OHCI_HCD_AT91)	+= ohci-at91.o
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 7a603f6..26b6411 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -279,7 +279,9 @@ static int exynos_ehci_resume(struct device *dev)
 	struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
 	int ret;
 
-	clk_prepare_enable(exynos_ehci->clk);
+	ret = clk_prepare_enable(exynos_ehci->clk);
+	if (ret)
+		return ret;
 
 	ret = exynos_ehci_phy_enable(dev);
 	if (ret) {
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 980a6b3..6bc6304 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1105,7 +1105,7 @@ iso_stream_init(
 		addr |= epnum << 8;
 		addr |= dev->devnum;
 		stream->ps.usecs = HS_USECS_ISO(maxp);
-		think_time = dev->tt ? dev->tt->think_time : 0;
+		think_time = dev->tt->think_time;
 		stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
 				dev->speed, is_input, 1, maxp));
 		hs_transfers = max(1u, (maxp + 187) / 188);
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 3893b5b..0b6cdb7 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -424,7 +424,7 @@ static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
 	 */
 	now = ktime_get();
 	for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
-		if (now >= ehci->hr_timeouts[e])
+		if (ktime_compare(now, ehci->hr_timeouts[e]) >= 0)
 			event_handlers[e](ehci);
 		else
 			ehci_enable_event(ehci, e, false);
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index ced08dc..457cc65 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1380,7 +1380,7 @@ static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
 	 */
 	now = ktime_get();
 	for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
-		if (now >= fotg210->hr_timeouts[e])
+		if (ktime_compare(now, fotg210->hr_timeouts[e]) >= 0)
 			event_handlers[e](fotg210);
 		else
 			fotg210_enable_event(fotg210, e, false);
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
deleted file mode 100644
index ec15aeb..0000000
--- a/drivers/usb/host/ohci-omap3.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * ohci-omap3.c - driver for OHCI on OMAP3 and later processors
- *
- * Bus Glue for OMAP3 USBHOST 3 port OHCI controller
- * This controller is also used in later OMAPs and AM35x chips
- *
- * Copyright (C) 2007-2010 Texas Instruments, Inc.
- * Author: Vikram Pandita <vikram.pandita@ti.com>
- * Author: Anand Gadiyar <gadiyar@ti.com>
- * Author: Keshava Munegowda <keshava_mgowda@ti.com>
- *
- * Based on ehci-omap.c and some other ohci glue layers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
- * TODO (last updated Feb 27, 2011):
- *	- add kernel-doc
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/usb/otg.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/usb.h>
-#include <linux/usb/hcd.h>
-
-#include "ohci.h"
-
-#define DRIVER_DESC "OHCI OMAP3 driver"
-
-static const char hcd_name[] = "ohci-omap3";
-static struct hc_driver __read_mostly ohci_omap3_hc_driver;
-
-/*
- * configure so an HC device and id are always provided
- * always called with process context; sleeping is OK
- */
-
-/**
- * ohci_hcd_omap3_probe - initialize OMAP-based HCDs
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- */
-static int ohci_hcd_omap3_probe(struct platform_device *pdev)
-{
-	struct device		*dev = &pdev->dev;
-	struct ohci_hcd		*ohci;
-	struct usb_hcd		*hcd = NULL;
-	void __iomem		*regs = NULL;
-	struct resource		*res;
-	int			ret;
-	int			irq;
-
-	if (usb_disabled())
-		return -ENODEV;
-
-	if (!dev->parent) {
-		dev_err(dev, "Missing parent device\n");
-		return -ENODEV;
-	}
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "OHCI irq failed\n");
-		return -ENODEV;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(dev, "UHH OHCI get resource failed\n");
-		return -ENOMEM;
-	}
-
-	regs = ioremap(res->start, resource_size(res));
-	if (!regs) {
-		dev_err(dev, "UHH OHCI ioremap failed\n");
-		return -ENOMEM;
-	}
-
-	/*
-	 * Right now device-tree probed devices don't get dma_mask set.
-	 * Since shared usb code relies on it, set it here for now.
-	 * Once we have dma capability bindings this can go away.
-	 */
-	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
-	if (ret)
-		goto err_io;
-
-	ret = -ENODEV;
-	hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
-			dev_name(dev));
-	if (!hcd) {
-		dev_err(dev, "usb_create_hcd failed\n");
-		goto err_io;
-	}
-
-	hcd->rsrc_start = res->start;
-	hcd->rsrc_len = resource_size(res);
-	hcd->regs =  regs;
-
-	pm_runtime_enable(dev);
-	pm_runtime_get_sync(dev);
-
-	ohci = hcd_to_ohci(hcd);
-	/*
-	 * RemoteWakeupConnected has to be set explicitly before
-	 * calling ohci_run. The reset value of RWC is 0.
-	 */
-	ohci->hc_control = OHCI_CTRL_RWC;
-
-	ret = usb_add_hcd(hcd, irq, 0);
-	if (ret) {
-		dev_dbg(dev, "failed to add hcd with err %d\n", ret);
-		goto err_add_hcd;
-	}
-	device_wakeup_enable(hcd->self.controller);
-
-	return 0;
-
-err_add_hcd:
-	pm_runtime_put_sync(dev);
-	usb_put_hcd(hcd);
-
-err_io:
-	iounmap(regs);
-
-	return ret;
-}
-
-/*
- * may be called without controller electrically present
- * may be called with controller, bus, and devices active
- */
-
-/**
- * ohci_hcd_omap3_remove - shutdown processing for OHCI HCDs
- * @pdev: USB Host Controller being removed
- *
- * Reverses the effect of ohci_hcd_omap3_probe(), first invoking
- * the HCD's stop() method.  It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- */
-static int ohci_hcd_omap3_remove(struct platform_device *pdev)
-{
-	struct device *dev	= &pdev->dev;
-	struct usb_hcd *hcd	= dev_get_drvdata(dev);
-
-	iounmap(hcd->regs);
-	usb_remove_hcd(hcd);
-	pm_runtime_put_sync(dev);
-	pm_runtime_disable(dev);
-	usb_put_hcd(hcd);
-	return 0;
-}
-
-static const struct of_device_id omap_ohci_dt_ids[] = {
-	{ .compatible = "ti,ohci-omap3" },
-	{ }
-};
-
-MODULE_DEVICE_TABLE(of, omap_ohci_dt_ids);
-
-static struct platform_driver ohci_hcd_omap3_driver = {
-	.probe		= ohci_hcd_omap3_probe,
-	.remove		= ohci_hcd_omap3_remove,
-	.shutdown	= usb_hcd_platform_shutdown,
-	.driver		= {
-		.name	= "ohci-omap3",
-		.of_match_table = omap_ohci_dt_ids,
-	},
-};
-
-static int __init ohci_omap3_init(void)
-{
-	if (usb_disabled())
-		return -ENODEV;
-
-	pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
-	ohci_init_driver(&ohci_omap3_hc_driver, NULL);
-	return platform_driver_register(&ohci_hcd_omap3_driver);
-}
-module_init(ohci_omap3_init);
-
-static void __exit ohci_omap3_cleanup(void)
-{
-	platform_driver_unregister(&ohci_hcd_omap3_driver);
-}
-module_exit(ohci_omap3_cleanup);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_ALIAS("platform:ohci-omap3");
-MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 6368fce..61fe2b9 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -24,6 +24,7 @@
 #include <linux/err.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/reset.h>
 #include <linux/usb/ohci_pdriver.h>
 #include <linux/usb.h>
@@ -163,6 +164,10 @@ static int ohci_platform_probe(struct platform_device *dev)
 		if (of_property_read_bool(dev->dev.of_node, "no-big-frame-no"))
 			ohci->flags |= OHCI_QUIRK_FRAME_NO;
 
+		if (of_property_read_bool(dev->dev.of_node,
+					  "remote-wakeup-connected"))
+			ohci->hc_control = OHCI_CTRL_RWC;
+
 		of_property_read_u32(dev->dev.of_node, "num-ports",
 				     &ohci->num_ports);
 
@@ -242,6 +247,8 @@ static int ohci_platform_probe(struct platform_device *dev)
 	}
 #endif
 
+	pm_runtime_set_active(&dev->dev);
+	pm_runtime_enable(&dev->dev);
 	if (pdata->power_on) {
 		err = pdata->power_on(dev);
 		if (err < 0)
@@ -271,6 +278,7 @@ static int ohci_platform_probe(struct platform_device *dev)
 	if (pdata->power_off)
 		pdata->power_off(dev);
 err_reset:
+	pm_runtime_disable(&dev->dev);
 	while (--rst >= 0)
 		reset_control_assert(priv->resets[rst]);
 err_put_clks:
@@ -292,6 +300,7 @@ static int ohci_platform_remove(struct platform_device *dev)
 	struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
 	int clk, rst;
 
+	pm_runtime_get_sync(&dev->dev);
 	usb_remove_hcd(hcd);
 
 	if (pdata->power_off)
@@ -305,6 +314,9 @@ static int ohci_platform_remove(struct platform_device *dev)
 
 	usb_put_hcd(hcd);
 
+	pm_runtime_put_sync(&dev->dev);
+	pm_runtime_disable(&dev->dev);
+
 	if (pdata == &ohci_platform_defaults)
 		dev->dev.platform_data = NULL;
 
@@ -350,6 +362,7 @@ static int ohci_platform_resume(struct device *dev)
 static const struct of_device_id ohci_platform_ids[] = {
 	{ .compatible = "generic-ohci", },
 	{ .compatible = "cavium,octeon-6335-ohci", },
+	{ .compatible = "ti,ohci-omap3", },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, ohci_platform_ids);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 79efde8f..21c010f 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -274,14 +274,16 @@ extern void pxa27x_clear_otgph(void);
 
 static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
 {
-	int retval = 0;
+	int retval;
 	struct pxaohci_platform_data *inf;
 	uint32_t uhchr;
 	struct usb_hcd *hcd = dev_get_drvdata(dev);
 
 	inf = dev_get_platdata(dev);
 
-	clk_prepare_enable(pxa_ohci->clk);
+	retval = clk_prepare_enable(pxa_ohci->clk);
+	if (retval)
+		return retval;
 
 	pxa27x_reset_hc(pxa_ohci);
 
@@ -296,8 +298,10 @@ static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
 	if (inf->init)
 		retval = inf->init(dev);
 
-	if (retval < 0)
+	if (retval < 0) {
+		clk_disable_unprepare(pxa_ohci->clk);
 		return retval;
+	}
 
 	if (cpu_is_pxa3xx())
 		pxa3xx_u2d_start_hc(&hcd->self);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 94b1501..c3267a7 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -265,9 +265,13 @@ static void configure_hc(struct uhci_hcd *uhci)
 
 static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
 {
-	/* If we have to ignore overcurrent events then almost by definition
-	 * we can't depend on resume-detect interrupts. */
-	if (ignore_oc)
+	/*
+	 * If we have to ignore overcurrent events then almost by definition
+	 * we can't depend on resume-detect interrupts.
+	 *
+	 * Those interrupts also don't seem to work on ASpeed SoCs.
+	 */
+	if (ignore_oc || uhci_is_aspeed(uhci))
 		return 1;
 
 	return uhci->resume_detect_interrupts_are_broken ?
@@ -384,6 +388,13 @@ static void start_rh(struct uhci_hcd *uhci)
 {
 	uhci->is_stopped = 0;
 
+	/*
+	 * Clear stale status bits on Aspeed as we get a stale HCH
+	 * which causes problems later on
+	 */
+	if (uhci_is_aspeed(uhci))
+		uhci_writew(uhci, uhci_readw(uhci, USBSTS), USBSTS);
+
 	/* Mark it configured and running with a 64-byte max packet.
 	 * All interrupts are enabled, even though RESUME won't do anything.
 	 */
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 7fa318a..91b22b2 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -48,6 +48,8 @@
 /* USB port status and control registers */
 #define USBPORTSC1	16
 #define USBPORTSC2	18
+#define USBPORTSC3	20
+#define USBPORTSC4	22
 #define   USBPORTSC_CCS		0x0001	/* Current Connect Status
 					 * ("device present") */
 #define   USBPORTSC_CSC		0x0002	/* Connect Status Change */
@@ -427,6 +429,7 @@ struct uhci_hcd {
 	unsigned int wait_for_hp:1;		/* Wait for HP port reset */
 	unsigned int big_endian_mmio:1;		/* Big endian registers */
 	unsigned int big_endian_desc:1;		/* Big endian descriptors */
+	unsigned int is_aspeed:1;		/* Aspeed impl. workarounds */
 
 	/* Support for port suspend/resume/reset */
 	unsigned long port_c_suspend;		/* Bit-arrays of ports */
@@ -490,6 +493,12 @@ struct urb_priv {
 #define PCI_VENDOR_ID_GENESYS		0x17a0
 #define PCI_DEVICE_ID_GL880S_UHCI	0x8083
 
+/* Aspeed SoC needs some quirks */
+static inline bool uhci_is_aspeed(const struct uhci_hcd *uhci)
+{
+	return IS_ENABLED(CONFIG_USB_UHCI_ASPEED) && uhci->is_aspeed;
+}
+
 /*
  * Functions used to access controller registers. The UCHI spec says that host
  * controller I/O registers are mapped into PCI I/O space. For non-PCI hosts
@@ -545,10 +554,42 @@ static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
 #define uhci_big_endian_mmio(u)		0
 #endif
 
+static inline int uhci_aspeed_reg(unsigned int reg)
+{
+	switch (reg) {
+	case USBCMD:
+		return 00;
+	case USBSTS:
+		return 0x04;
+	case USBINTR:
+		return 0x08;
+	case USBFRNUM:
+		return 0x80;
+	case USBFLBASEADD:
+		return 0x0c;
+	case USBSOF:
+		return 0x84;
+	case USBPORTSC1:
+		return 0x88;
+	case USBPORTSC2:
+		return 0x8c;
+	case USBPORTSC3:
+		return 0x90;
+	case USBPORTSC4:
+		return 0x94;
+	default:
+		pr_warn("UHCI: Unsupported register 0x%02x on Aspeed\n", reg);
+		/* Return an unimplemented register */
+		return 0x10;
+	}
+}
+
 static inline u32 uhci_readl(const struct uhci_hcd *uhci, int reg)
 {
 	if (uhci_has_pci_registers(uhci))
 		return inl(uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		return readl(uhci->regs + uhci_aspeed_reg(reg));
 #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
 	else if (uhci_big_endian_mmio(uhci))
 		return readl_be(uhci->regs + reg);
@@ -561,6 +602,8 @@ static inline void uhci_writel(const struct uhci_hcd *uhci, u32 val, int reg)
 {
 	if (uhci_has_pci_registers(uhci))
 		outl(val, uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		writel(val, uhci->regs + uhci_aspeed_reg(reg));
 #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
 	else if (uhci_big_endian_mmio(uhci))
 		writel_be(val, uhci->regs + reg);
@@ -573,6 +616,8 @@ static inline u16 uhci_readw(const struct uhci_hcd *uhci, int reg)
 {
 	if (uhci_has_pci_registers(uhci))
 		return inw(uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		return readl(uhci->regs + uhci_aspeed_reg(reg));
 #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
 	else if (uhci_big_endian_mmio(uhci))
 		return readw_be(uhci->regs + reg);
@@ -585,6 +630,8 @@ static inline void uhci_writew(const struct uhci_hcd *uhci, u16 val, int reg)
 {
 	if (uhci_has_pci_registers(uhci))
 		outw(val, uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		writel(val, uhci->regs + uhci_aspeed_reg(reg));
 #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
 	else if (uhci_big_endian_mmio(uhci))
 		writew_be(val, uhci->regs + reg);
@@ -597,6 +644,8 @@ static inline u8 uhci_readb(const struct uhci_hcd *uhci, int reg)
 {
 	if (uhci_has_pci_registers(uhci))
 		return inb(uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		return readl(uhci->regs + uhci_aspeed_reg(reg));
 #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
 	else if (uhci_big_endian_mmio(uhci))
 		return readb_be(uhci->regs + reg);
@@ -609,6 +658,8 @@ static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
 {
 	if (uhci_has_pci_registers(uhci))
 		outb(val, uhci->io_addr + reg);
+	else if (uhci_is_aspeed(uhci))
+		writel(val, uhci->regs + uhci_aspeed_reg(reg));
 #ifdef CONFIG_USB_UHCI_BIG_ENDIAN_MMIO
 	else if (uhci_big_endian_mmio(uhci))
 		writeb_be(val, uhci->regs + reg);
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 02260cf..49effdc 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -131,7 +131,7 @@ static int uhci_pci_init(struct usb_hcd *hcd)
 
 	/* Intel controllers use non-PME wakeup signalling */
 	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
-		device_set_run_wake(uhci_dev(uhci), 1);
+		device_set_wakeup_capable(uhci_dev(uhci), true);
 
 	/* Set up pointers to PCI-specific functions */
 	uhci->reset_hc = uhci_pci_reset_hc;
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 32a6f3d..1b4e086 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -15,7 +15,9 @@ static int uhci_platform_init(struct usb_hcd *hcd)
 {
 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
 
-	uhci->rh_numports = uhci_count_ports(hcd);
+	/* Probe number of ports if not already provided by DT */
+	if (!uhci->rh_numports)
+		uhci->rh_numports = uhci_count_ports(hcd);
 
 	/* Set up pointers to to generic functions */
 	uhci->reset_hc = uhci_generic_reset_hc;
@@ -63,6 +65,7 @@ static const struct hc_driver uhci_platform_hc_driver = {
 
 static int uhci_hcd_platform_probe(struct platform_device *pdev)
 {
+	struct device_node *np = pdev->dev.of_node;
 	struct usb_hcd *hcd;
 	struct uhci_hcd	*uhci;
 	struct resource *res;
@@ -98,6 +101,23 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
 
 	uhci->regs = hcd->regs;
 
+	/* Grab some things from the device-tree */
+	if (np) {
+		u32 num_ports;
+
+		if (of_property_read_u32(np, "#ports", &num_ports) == 0) {
+			uhci->rh_numports = num_ports;
+			dev_info(&pdev->dev,
+				"Detected %d ports from device-tree\n",
+				num_ports);
+		}
+		if (of_device_is_compatible(np, "aspeed,ast2400-uhci") ||
+		    of_device_is_compatible(np, "aspeed,ast2500-uhci")) {
+			uhci->is_aspeed = 1;
+			dev_info(&pdev->dev,
+				 "Enabled Aspeed implementation workarounds\n");
+		}
+	}
 	ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
 	if (ret)
 		goto err_rmr;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1f1687e..2a82c92 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -407,64 +407,17 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
 	return NULL;
 }
 
-void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
 		struct xhci_virt_device *virt_dev,
 		unsigned int ep_index)
 {
-	int rings_cached;
-
-	rings_cached = virt_dev->num_rings_cached;
-	if (rings_cached < XHCI_MAX_RINGS_CACHED) {
-		virt_dev->ring_cache[rings_cached] =
-			virt_dev->eps[ep_index].ring;
-		virt_dev->num_rings_cached++;
-		xhci_dbg(xhci, "Cached old ring, "
-				"%d ring%s cached\n",
-				virt_dev->num_rings_cached,
-				(virt_dev->num_rings_cached > 1) ? "s" : "");
-	} else {
-		xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
-		xhci_dbg(xhci, "Ring cache full (%d rings), "
-				"freeing ring\n",
-				virt_dev->num_rings_cached);
-	}
+	xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
 	virt_dev->eps[ep_index].ring = NULL;
 }
 
-/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
- * pointers to the beginning of the ring.
- */
-static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
-			struct xhci_ring *ring, unsigned int cycle_state,
-			enum xhci_ring_type type)
-{
-	struct xhci_segment	*seg = ring->first_seg;
-	int i;
-
-	do {
-		memset(seg->trbs, 0,
-				sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
-		if (cycle_state == 0) {
-			for (i = 0; i < TRBS_PER_SEGMENT; i++)
-				seg->trbs[i].link.control |=
-					cpu_to_le32(TRB_CYCLE);
-		}
-		/* All endpoint rings have link TRBs */
-		xhci_link_segments(xhci, seg, seg->next, type);
-		seg = seg->next;
-	} while (seg != ring->first_seg);
-	ring->type = type;
-	xhci_initialize_ring_info(ring, cycle_state);
-	/* td list should be empty since all URBs have been cancelled,
-	 * but just in case...
-	 */
-	INIT_LIST_HEAD(&ring->td_list);
-}
-
 /*
  * Expand an existing ring.
- * Look for a cached ring or allocate a new ring which has same segment numbers
- * and link the two rings.
+ * Allocate a new ring which has same segment numbers and link the two rings.
  */
 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
 				unsigned int num_trbs, gfp_t flags)
@@ -968,12 +921,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 	/* If necessary, update the number of active TTs on this root port */
 	xhci_update_tt_active_eps(xhci, dev, old_active_eps);
 
-	if (dev->ring_cache) {
-		for (i = 0; i < dev->num_rings_cached; i++)
-			xhci_ring_free(xhci, dev->ring_cache[i]);
-		kfree(dev->ring_cache);
-	}
-
 	if (dev->in_ctx)
 		xhci_free_container_ctx(xhci, dev->in_ctx);
 	if (dev->out_ctx)
@@ -1062,14 +1009,6 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 	if (!dev->eps[0].ring)
 		goto fail;
 
-	/* Allocate pointers to the ring cache */
-	dev->ring_cache = kzalloc(
-			sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
-			flags);
-	if (!dev->ring_cache)
-		goto fail;
-	dev->num_rings_cached = 0;
-
 	dev->udev = udev;
 
 	/* Point to output device context in dcbaa. */
@@ -1537,17 +1476,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
 	/* Set up the endpoint ring */
 	virt_dev->eps[ep_index].new_ring =
 		xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
-	if (!virt_dev->eps[ep_index].new_ring) {
-		/* Attempt to use the ring cache */
-		if (virt_dev->num_rings_cached == 0)
-			return -ENOMEM;
-		virt_dev->num_rings_cached--;
-		virt_dev->eps[ep_index].new_ring =
-			virt_dev->ring_cache[virt_dev->num_rings_cached];
-		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
-		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
-					1, ring_type);
-	}
+	if (!virt_dev->eps[ep_index].new_ring)
+		return -ENOMEM;
+
 	virt_dev->eps[ep_index].skip = false;
 	ep_ring = virt_dev->eps[ep_index].new_ring;
 
@@ -2119,11 +2050,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
 {
 	u32 temp, port_offset, port_count;
 	int i;
-	u8 major_revision;
+	u8 major_revision, minor_revision;
 	struct xhci_hub *rhub;
 
 	temp = readl(addr);
 	major_revision = XHCI_EXT_PORT_MAJOR(temp);
+	minor_revision = XHCI_EXT_PORT_MINOR(temp);
 
 	if (major_revision == 0x03) {
 		rhub = &xhci->usb3_rhub;
@@ -2137,7 +2069,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
 		return;
 	}
 	rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
-	rhub->min_rev = XHCI_EXT_PORT_MINOR(temp);
+
+	if (rhub->min_rev < minor_revision)
+		rhub->min_rev = minor_revision;
 
 	/* Port offset and count in the third dword, see section 7.2 */
 	temp = readl(addr + 2);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fcf1f3f..783e668 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -201,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
 			pdev->device == 0x1042)
 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+			pdev->device == 0x1142)
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
 	if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
 		xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
@@ -213,13 +216,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 #ifdef CONFIG_ACPI
 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
 {
-	static const u8 intel_dsm_uuid[] = {
-		0xb7, 0x0c, 0x34, 0xac,	0x01, 0xe9, 0xbf, 0x45,
-		0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
-	};
+	static const guid_t intel_dsm_guid =
+		GUID_INIT(0xac340cb7, 0xe901, 0x45bf,
+			  0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23);
 	union acpi_object *obj;
 
-	obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1,
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), &intel_dsm_guid, 3, 1,
 				NULL);
 	ACPI_FREE(obj);
 }
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 03f63f5..c50c902 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -480,10 +480,34 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
 	return NULL;
 }
 
+
+/*
+ * Get the hw dequeue pointer xHC stopped on, either directly from the
+ * endpoint context, or if streams are in use from the stream context.
+ * The returned hw_dequeue contains the lowest four bits with cycle state
+ * and possbile stream context type.
+ */
+static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
+			   unsigned int ep_index, unsigned int stream_id)
+{
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_stream_ctx *st_ctx;
+	struct xhci_virt_ep *ep;
+
+	ep = &vdev->eps[ep_index];
+
+	if (ep->ep_state & EP_HAS_STREAMS) {
+		st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
+		return le64_to_cpu(st_ctx->stream_ring);
+	}
+	ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
+	return le64_to_cpu(ep_ctx->deq);
+}
+
 /*
  * Move the xHC's endpoint ring dequeue pointer past cur_td.
  * Record the new state of the xHC's endpoint ring dequeue segment,
- * dequeue pointer, and new consumer cycle state in state.
+ * dequeue pointer, stream id, and new consumer cycle state in state.
  * Update our internal representation of the ring's dequeue pointer.
  *
  * We do this in three jumps:
@@ -521,24 +545,15 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 				stream_id);
 		return;
 	}
-
 	/* Dig out the cycle state saved by the xHC during the stop ep cmd */
 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 			"Finding endpoint context");
-	/* 4.6.9 the css flag is written to the stream context for streams */
-	if (ep->ep_state & EP_HAS_STREAMS) {
-		struct xhci_stream_ctx *ctx =
-			&ep->stream_info->stream_ctx_array[stream_id];
-		hw_dequeue = le64_to_cpu(ctx->stream_ring);
-	} else {
-		struct xhci_ep_ctx *ep_ctx
-			= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
-		hw_dequeue = le64_to_cpu(ep_ctx->deq);
-	}
 
+	hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
 	new_seg = ep_ring->deq_seg;
 	new_deq = ep_ring->dequeue;
 	state->new_cycle_state = hw_dequeue & 0x1;
+	state->stream_id = stream_id;
 
 	/*
 	 * We want to find the pointer, segment and cycle state of the new trb
@@ -691,7 +706,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
 	struct xhci_td *last_unlinked_td;
 	struct xhci_ep_ctx *ep_ctx;
 	struct xhci_virt_device *vdev;
-
+	u64 hw_deq;
 	struct xhci_dequeue_state deq_state;
 
 	if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
@@ -715,7 +730,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
 
 	if (list_empty(&ep->cancelled_td_list)) {
 		xhci_stop_watchdog_timer_in_irq(xhci, ep);
-		ep->stopped_td = NULL;
 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 		return;
 	}
@@ -753,12 +767,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
 		 * If we stopped on the TD we need to cancel, then we have to
 		 * move the xHC endpoint ring dequeue pointer past this TD.
 		 */
-		if (cur_td == ep->stopped_td)
+		hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
+					 cur_td->urb->stream_id);
+		hw_deq &= ~0xf;
+
+		if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
+			      cur_td->last_trb, hw_deq, false)) {
 			xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
-					cur_td->urb->stream_id,
-					cur_td, &deq_state);
-		else
+						    cur_td->urb->stream_id,
+						    cur_td, &deq_state);
+		} else {
 			td_to_noop(xhci, ep_ring, cur_td, false);
+		}
+
 remove_finished_td:
 		/*
 		 * The event handler won't see a completion for this TD anymore,
@@ -773,15 +794,13 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
 	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
 	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
 		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
-				ep->stopped_td->urb->stream_id, &deq_state);
+					     &deq_state);
 		xhci_ring_cmd_db(xhci);
 	} else {
 		/* Otherwise ring the doorbell(s) to restart queued transfers */
 		ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 	}
 
-	ep->stopped_td = NULL;
-
 	/*
 	 * Drop the lock and complete the URBs in the cancelled TD list.
 	 * New TDs to be cancelled might be added to the end of the list before
@@ -1800,7 +1819,8 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
 		unsigned int slot_id, unsigned int ep_index,
 		unsigned int stream_id,
-		struct xhci_td *td, union xhci_trb *ep_trb)
+		struct xhci_td *td, union xhci_trb *ep_trb,
+		enum xhci_ep_reset_type reset_type)
 {
 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
 	struct xhci_command *command;
@@ -1809,12 +1829,11 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
 		return;
 
 	ep->ep_state |= EP_HALTED;
-	ep->stopped_stream = stream_id;
 
-	xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
-	xhci_cleanup_stalled_ring(xhci, ep_index, td);
+	xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
 
-	ep->stopped_stream = 0;
+	if (reset_type == EP_HARD_RESET)
+		xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td);
 
 	xhci_ring_cmd_db(xhci);
 }
@@ -1909,7 +1928,7 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
 
 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
 	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
-	struct xhci_virt_ep *ep, int *status, bool skip)
+	struct xhci_virt_ep *ep, int *status)
 {
 	struct xhci_virt_device *xdev;
 	struct xhci_ep_ctx *ep_ctx;
@@ -1925,9 +1944,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
 
-	if (skip)
-		goto td_cleanup;
-
 	if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
 			trb_comp_code == COMP_STOPPED ||
 			trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
@@ -1935,7 +1951,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
 		 * stopped TDs.  A stopped TD may be restarted, so don't update
 		 * the ring dequeue pointer or take this TD off any lists yet.
 		 */
-		ep->stopped_td = td;
 		return 0;
 	}
 	if (trb_comp_code == COMP_STALL_ERROR ||
@@ -1947,7 +1962,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
 		 * The class driver clears the device side halt later.
 		 */
 		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
-					ep_ring->stream_id, td, ep_trb);
+					ep_ring->stream_id, td, ep_trb,
+					EP_HARD_RESET);
 	} else {
 		/* Update ring dequeue pointer */
 		while (ep_ring->dequeue != td->last_trb)
@@ -1955,7 +1971,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
 		inc_deq(xhci, ep_ring);
 	}
 
-td_cleanup:
 	return xhci_td_cleanup(xhci, td, ep_ring, status);
 }
 
@@ -2075,7 +2090,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
 		td->urb->actual_length = requested;
 
 finish_td:
-	return finish_td(xhci, td, ep_trb, event, ep, status, false);
+	return finish_td(xhci, td, ep_trb, event, ep, status);
 }
 
 /*
@@ -2162,7 +2177,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
 
 	td->urb->actual_length += frame->actual_length;
 
-	return finish_td(xhci, td, ep_trb, event, ep, status, false);
+	return finish_td(xhci, td, ep_trb, event, ep, status);
 }
 
 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
@@ -2190,7 +2205,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
 		inc_deq(xhci, ep_ring);
 	inc_deq(xhci, ep_ring);
 
-	return finish_td(xhci, td, NULL, event, ep, status, true);
+	return xhci_td_cleanup(xhci, td, ep_ring, status);
 }
 
 /*
@@ -2252,7 +2267,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
 			  remaining);
 		td->urb->actual_length = 0;
 	}
-	return finish_td(xhci, td, ep_trb, event, ep, status, false);
+	return finish_td(xhci, td, ep_trb, event, ep, status);
 }
 
 /*
@@ -2280,39 +2295,46 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 	bool handling_skipped_tds = false;
 
 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+	ep_trb_dma = le64_to_cpu(event->buffer);
+
 	xdev = xhci->devs[slot_id];
 	if (!xdev) {
 		xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
 			 slot_id);
-		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
-			 (unsigned long long) xhci_trb_virt_to_dma(
-				 xhci->event_ring->deq_seg,
-				 xhci->event_ring->dequeue),
-			 lower_32_bits(le64_to_cpu(event->buffer)),
-			 upper_32_bits(le64_to_cpu(event->buffer)),
-			 le32_to_cpu(event->transfer_len),
-			 le32_to_cpu(event->flags));
-		return -ENODEV;
+		goto err_out;
 	}
 
-	/* Endpoint ID is 1 based, our index is zero based */
-	ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
 	ep = &xdev->eps[ep_index];
-	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+	ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
-	if (!ep_ring ||  GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
+
+	if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
 		xhci_err(xhci,
-			 "ERROR Transfer event for disabled endpoint slot %u ep %u or incorrect stream ring\n",
+			 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
 			  slot_id, ep_index);
-		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
-			 (unsigned long long) xhci_trb_virt_to_dma(
-				 xhci->event_ring->deq_seg,
-				 xhci->event_ring->dequeue),
-			 lower_32_bits(le64_to_cpu(event->buffer)),
-			 upper_32_bits(le64_to_cpu(event->buffer)),
-			 le32_to_cpu(event->transfer_len),
-			 le32_to_cpu(event->flags));
-		return -ENODEV;
+		goto err_out;
+	}
+
+	/* Some transfer events don't always point to a trb, see xhci 4.17.4 */
+	if (!ep_ring) {
+		switch (trb_comp_code) {
+		case COMP_STALL_ERROR:
+		case COMP_USB_TRANSACTION_ERROR:
+		case COMP_INVALID_STREAM_TYPE_ERROR:
+		case COMP_INVALID_STREAM_ID_ERROR:
+			xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0,
+						     NULL, NULL, EP_SOFT_RESET);
+			goto cleanup;
+		case COMP_RING_UNDERRUN:
+		case COMP_RING_OVERRUN:
+			goto cleanup;
+		default:
+			xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
+				 slot_id, ep_index);
+			goto err_out;
+		}
 	}
 
 	/* Count current td numbers if ep->skip is set */
@@ -2321,8 +2343,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			td_num++;
 	}
 
-	ep_trb_dma = le64_to_cpu(event->buffer);
-	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
 	/* Look for common error cases */
 	switch (trb_comp_code) {
 	/* Skip codes that require special handling depending on
@@ -2339,6 +2359,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 					      slot_id, ep_index);
 	case COMP_SHORT_PACKET:
 		break;
+	/* Completion codes for endpoint stopped state */
 	case COMP_STOPPED:
 		xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
 			 slot_id, ep_index);
@@ -2353,18 +2374,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			 "Stopped with short packet transfer detected for slot %u ep %u\n",
 			 slot_id, ep_index);
 		break;
+	/* Completion codes for endpoint halted state */
 	case COMP_STALL_ERROR:
 		xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
 			 ep_index);
 		ep->ep_state |= EP_HALTED;
 		status = -EPIPE;
 		break;
-	case COMP_TRB_ERROR:
-		xhci_warn(xhci,
-			  "WARN: TRB error for slot %u ep %u on endpoint\n",
-			  slot_id, ep_index);
-		status = -EILSEQ;
-		break;
 	case COMP_SPLIT_TRANSACTION_ERROR:
 	case COMP_USB_TRANSACTION_ERROR:
 		xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
@@ -2376,6 +2392,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			 slot_id, ep_index);
 		status = -EOVERFLOW;
 		break;
+	/* Completion codes for endpoint error state */
+	case COMP_TRB_ERROR:
+		xhci_warn(xhci,
+			  "WARN: TRB error for slot %u ep %u on endpoint\n",
+			  slot_id, ep_index);
+		status = -EILSEQ;
+		break;
+	/* completion codes not indicating endpoint state change */
 	case COMP_DATA_BUFFER_ERROR:
 		xhci_warn(xhci,
 			  "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
@@ -2413,12 +2437,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 				 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
 				 ep_index);
 		goto cleanup;
-	case COMP_INCOMPATIBLE_DEVICE_ERROR:
-		xhci_warn(xhci,
-			  "WARN: detect an incompatible device for slot %u ep %u",
-			  slot_id, ep_index);
-		status = -EPROTO;
-		break;
 	case COMP_MISSED_SERVICE_ERROR:
 		/*
 		 * When encounter missed service error, one or more isoc tds
@@ -2437,6 +2455,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
 			 slot_id, ep_index);
 		goto cleanup;
+
+	case COMP_INCOMPATIBLE_DEVICE_ERROR:
+		/* needs disable slot command to recover */
+		xhci_warn(xhci,
+			  "WARN: detect an incompatible device for slot %u ep %u",
+			  slot_id, ep_index);
+		status = -EPROTO;
+		break;
 	default:
 		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
 			status = 0;
@@ -2589,6 +2615,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 	} while (handling_skipped_tds);
 
 	return 0;
+
+err_out:
+	xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+		 (unsigned long long) xhci_trb_virt_to_dma(
+			 xhci->event_ring->deq_seg,
+			 xhci->event_ring->dequeue),
+		 lower_32_bits(le64_to_cpu(event->buffer)),
+		 upper_32_bits(le64_to_cpu(event->buffer)),
+		 le32_to_cpu(event->transfer_len),
+		 le32_to_cpu(event->flags));
+	return -ENODEV;
 }
 
 /*
@@ -3963,13 +4000,12 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
 /* Set Transfer Ring Dequeue Pointer command */
 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
 		unsigned int slot_id, unsigned int ep_index,
-		unsigned int stream_id,
 		struct xhci_dequeue_state *deq_state)
 {
 	dma_addr_t addr;
 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
-	u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
+	u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
 	u32 trb_sct = 0;
 	u32 type = TRB_TYPE(TRB_SET_DEQ);
 	struct xhci_virt_ep *ep;
@@ -4007,7 +4043,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
 
 	ep->queued_deq_seg = deq_state->new_deq_seg;
 	ep->queued_deq_ptr = deq_state->new_deq_ptr;
-	if (stream_id)
+	if (deq_state->stream_id)
 		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
 	ret = queue_command(xhci, cmd,
 		lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
@@ -4027,12 +4063,16 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
 }
 
 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
-			int slot_id, unsigned int ep_index)
+			int slot_id, unsigned int ep_index,
+			enum xhci_ep_reset_type reset_type)
 {
 	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
 	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
 	u32 type = TRB_TYPE(TRB_RESET_EP);
 
+	if (reset_type == EP_SOFT_RESET)
+		type |= TRB_TSP;
+
 	return queue_command(xhci, cmd, 0, 0, 0,
 			trb_slot_id | trb_ep_index | type, false);
 }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 30f47d9..56f85df 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1695,8 +1695,7 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
 	if (xhci->quirks & XHCI_MTK_HOST) {
 		ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
 		if (ret < 0) {
-			xhci_free_or_cache_endpoint_ring(xhci,
-				virt_dev, ep_index);
+			xhci_free_endpoint_ring(xhci, virt_dev, ep_index);
 			return ret;
 		}
 	}
@@ -2720,23 +2719,23 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 	for (i = 1; i < 31; i++) {
 		if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
 		    !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
-			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+			xhci_free_endpoint_ring(xhci, virt_dev, i);
 			xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
 		}
 	}
 	xhci_zero_in_ctx(xhci, virt_dev);
 	/*
 	 * Install any rings for completely new endpoints or changed endpoints,
-	 * and free or cache any old rings from changed endpoints.
+	 * and free any old rings from changed endpoints.
 	 */
 	for (i = 1; i < 31; i++) {
 		if (!virt_dev->eps[i].new_ring)
 			continue;
-		/* Only cache or free the old ring if it exists.
+		/* Only free the old ring if it exists.
 		 * It may not if this is the first add of an endpoint.
 		 */
 		if (virt_dev->eps[i].ring) {
-			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+			xhci_free_endpoint_ring(xhci, virt_dev, i);
 		}
 		xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
 		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
@@ -2823,8 +2822,8 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
 			added_ctxs, added_ctxs);
 }
 
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
-			unsigned int ep_index, struct xhci_td *td)
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
+			       unsigned int stream_id, struct xhci_td *td)
 {
 	struct xhci_dequeue_state deq_state;
 	struct xhci_virt_ep *ep;
@@ -2837,7 +2836,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
 	 * or it will attempt to resend it on the next doorbell ring.
 	 */
 	xhci_find_new_dequeue_state(xhci, udev->slot_id,
-			ep_index, ep->stopped_stream, td, &deq_state);
+			ep_index, stream_id, td, &deq_state);
 
 	if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
 		return;
@@ -2849,7 +2848,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
 		xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
 				"Queueing new dequeue state");
 		xhci_queue_new_dequeue_state(xhci, udev->slot_id,
-				ep_index, ep->stopped_stream, &deq_state);
+				ep_index, &deq_state);
 	} else {
 		/* Better hope no one uses the input context between now and the
 		 * reset endpoint completion!
@@ -3336,7 +3335,7 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
  *
  * Wait for the Reset Device command to finish.  Remove all structures
  * associated with the endpoints that were disabled.  Clear the input device
- * structure?  Cache the rings?  Reset the control endpoint 0 max packet size?
+ * structure? Reset the control endpoint 0 max packet size?
  *
  * If the virt_dev to be reset does not exist or does not match the udev,
  * it means the device is lost, possibly due to the xHC restore error and
@@ -3466,7 +3465,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
 		spin_unlock_irqrestore(&xhci->lock, flags);
 	}
 
-	/* Everything but endpoint 0 is disabled, so free or cache the rings. */
+	/* Everything but endpoint 0 is disabled, so free the rings. */
 	last_freed_endpoint = 1;
 	for (i = 1; i < 31; i++) {
 		struct xhci_virt_ep *ep = &virt_dev->eps[i];
@@ -3480,7 +3479,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
 		}
 
 		if (ep->ring) {
-			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+			xhci_free_endpoint_ring(xhci, virt_dev, i);
 			last_freed_endpoint = i;
 		}
 		if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 73a28a9..650a2d9d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -924,8 +924,6 @@ struct xhci_virt_ep {
 #define EP_GETTING_NO_STREAMS	(1 << 5)
 	/* ----  Related to URB cancellation ---- */
 	struct list_head	cancelled_td_list;
-	struct xhci_td		*stopped_td;
-	unsigned int		stopped_stream;
 	/* Watchdog timer for stop endpoint command to cancel URBs */
 	struct timer_list	stop_cmd_timer;
 	struct xhci_hcd		*xhci;
@@ -993,10 +991,6 @@ struct xhci_virt_device {
 	struct xhci_container_ctx       *out_ctx;
 	/* Used for addressing devices and configuration changes */
 	struct xhci_container_ctx       *in_ctx;
-	/* Rings saved to ensure old alt settings can be re-instated */
-	struct xhci_ring		**ring_cache;
-	int				num_rings_cached;
-#define	XHCI_MAX_RINGS_CACHED	31
 	struct xhci_virt_ep		eps[31];
 	u8				fake_port;
 	u8				real_port;
@@ -1210,6 +1204,11 @@ struct xhci_event_cmd {
 /* Stop Ring - Transfer State Preserve */
 #define TRB_TSP		(1<<9)
 
+enum xhci_ep_reset_type {
+	EP_HARD_RESET,
+	EP_SOFT_RESET,
+};
+
 /* Force Event */
 #define TRB_TO_VF_INTR_TARGET(p)	(((p) & (0x3ff << 22)) >> 22)
 #define TRB_TO_VF_ID(p)			(((p) & (0xff << 16)) >> 16)
@@ -1527,6 +1526,7 @@ struct xhci_dequeue_state {
 	struct xhci_segment *new_deq_seg;
 	union xhci_trb *new_deq_ptr;
 	int new_cycle_state;
+	unsigned int stream_id;
 };
 
 enum xhci_ring_type {
@@ -1960,7 +1960,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
 				unsigned int num_trbs, gfp_t flags);
-void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
 		struct xhci_virt_device *virt_dev,
 		unsigned int ep_index);
 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
@@ -2044,7 +2044,8 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
 		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
-		int slot_id, unsigned int ep_index);
+		int slot_id, unsigned int ep_index,
+		enum xhci_ep_reset_type reset_type);
 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
 		u32 slot_id);
 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
@@ -2053,10 +2054,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 		struct xhci_dequeue_state *state);
 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
 		unsigned int slot_id, unsigned int ep_index,
-		unsigned int stream_id,
 		struct xhci_dequeue_state *deq_state);
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
-		unsigned int ep_index, struct xhci_td *td);
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
+		unsigned int stream_id, struct xhci_td *td);
 void xhci_stop_endpoint_command_watchdog(unsigned long arg);
 void xhci_handle_command_timeout(struct work_struct *work);
 
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 1d1d70d..0f9f25d 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -275,29 +275,3 @@
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called chaoskey.
-
-config UCSI
-	tristate "USB Type-C Connector System Software Interface driver"
-	depends on ACPI
-	help
-	  UCSI driver is meant to be used as a convenience tool for desktop and
-	  server systems that are not equipped to handle USB in device mode. It
-	  will always select USB host role for the USB Type-C ports on systems
-	  that provide UCSI interface.
-
-	  USB Type-C Connector System Software Interface (UCSI) is a
-	  specification for an interface that allows the Operating System to
-	  control the USB Type-C ports on a system. Things the need controlling
-	  include the USB Data Role (host or device), and when USB Power
-	  Delivery is supported, the Power Role (source or sink). With USB
-	  Type-C connectors, when two dual role capable devices are attached
-	  together, the data role is selected randomly. Therefore it is
-	  important to give the OS a way to select the role. Otherwise the user
-	  would have to unplug and replug in order in order to attempt to swap
-	  the data and power roles.
-
-	  The UCSI specification can be downloaded from:
-	  http://www.intel.com/content/www/us/en/io/universal-serial-bus/usb-type-c-ucsi-spec.html
-
-	  To compile the driver as a module, choose M here: the module will be
-	  called ucsi.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index f6ac6c9..7fdb45f 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -27,7 +27,6 @@
 obj-$(CONFIG_USB_HSIC_USB3503)		+= usb3503.o
 obj-$(CONFIG_USB_HSIC_USB4604)		+= usb4604.o
 obj-$(CONFIG_USB_CHAOSKEY)		+= chaoskey.o
-obj-$(CONFIG_UCSI)			+= ucsi.o
 
 obj-$(CONFIG_USB_SISUSBVGA)		+= sisusbvga/
 obj-$(CONFIG_USB_LINK_LAYER_TEST)	+= lvstest.o
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 83b05a2..7ca4c7e 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -368,14 +368,9 @@ static ssize_t iowarrior_write(struct file *file,
 	case USB_DEVICE_ID_CODEMERCS_IOWPV2:
 	case USB_DEVICE_ID_CODEMERCS_IOW40:
 		/* IOW24 and IOW40 use a synchronous call */
-		buf = kmalloc(count, GFP_KERNEL);
-		if (!buf) {
-			retval = -ENOMEM;
-			goto exit;
-		}
-		if (copy_from_user(buf, user_buffer, count)) {
-			retval = -EFAULT;
-			kfree(buf);
+		buf = memdup_user(user_buffer, count);
+		if (IS_ERR(buf)) {
+			retval = PTR_ERR(buf);
 			goto exit;
 		}
 		retval = usb_set_report(dev->interface, 2, 0, buf, count);
diff --git a/drivers/usb/misc/ucsi.c b/drivers/usb/misc/ucsi.c
deleted file mode 100644
index 07397bd..0000000
--- a/drivers/usb/misc/ucsi.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * USB Type-C Connector System Software Interface driver
- *
- * Copyright (C) 2016, Intel Corporation
- * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/acpi.h>
-
-#include "ucsi.h"
-
-/* Double the time defined by MIN_TIME_TO_RESPOND_WITH_BUSY */
-#define UCSI_TIMEOUT_MS 20
-
-enum ucsi_status {
-	UCSI_IDLE = 0,
-	UCSI_BUSY,
-	UCSI_ERROR,
-};
-
-struct ucsi_connector {
-	int num;
-	struct ucsi *ucsi;
-	struct work_struct work;
-	struct ucsi_connector_capability cap;
-};
-
-struct ucsi {
-	struct device *dev;
-	struct ucsi_data __iomem *data;
-
-	enum ucsi_status status;
-	struct completion complete;
-	struct ucsi_capability cap;
-	struct ucsi_connector *connector;
-
-	/* device lock */
-	spinlock_t dev_lock;
-
-	/* PPM Communication lock */
-	struct mutex ppm_lock;
-
-	/* PPM communication flags */
-	unsigned long flags;
-#define EVENT_PENDING	0
-#define COMMAND_PENDING	1
-};
-
-static int ucsi_acpi_cmd(struct ucsi *ucsi, struct ucsi_control *ctrl)
-{
-	uuid_le uuid = UUID_LE(0x6f8398c2, 0x7ca4, 0x11e4,
-			       0xad, 0x36, 0x63, 0x10, 0x42, 0xb5, 0x00, 0x8f);
-	union acpi_object *obj;
-
-	ucsi->data->ctrl.raw_cmd = ctrl->raw_cmd;
-
-	obj = acpi_evaluate_dsm(ACPI_HANDLE(ucsi->dev), uuid.b, 1, 1, NULL);
-	if (!obj) {
-		dev_err(ucsi->dev, "%s: failed to evaluate _DSM\n", __func__);
-		return -EIO;
-	}
-
-	ACPI_FREE(obj);
-	return 0;
-}
-
-static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
-{
-	struct ucsi *ucsi = data;
-	struct ucsi_cci *cci;
-
-	spin_lock(&ucsi->dev_lock);
-
-	ucsi->status = UCSI_IDLE;
-	cci = &ucsi->data->cci;
-
-	/*
-	 * REVISIT: This is not documented behavior, but all known PPMs ACK
-	 * asynchronous events by sending notification with cleared CCI.
-	 */
-	if (!ucsi->data->raw_cci) {
-		if (test_bit(EVENT_PENDING, &ucsi->flags))
-			complete(&ucsi->complete);
-		else
-			dev_WARN(ucsi->dev, "spurious notification\n");
-		goto out_unlock;
-	}
-
-	if (test_bit(COMMAND_PENDING, &ucsi->flags)) {
-		if (cci->busy) {
-			ucsi->status = UCSI_BUSY;
-			complete(&ucsi->complete);
-
-			goto out_unlock;
-		} else if (cci->ack_complete || cci->cmd_complete) {
-			/* Error Indication is only valid with commands */
-			if (cci->error && cci->cmd_complete)
-				ucsi->status = UCSI_ERROR;
-
-			ucsi->data->ctrl.raw_cmd = 0;
-			complete(&ucsi->complete);
-		}
-	}
-
-	if (cci->connector_change) {
-		struct ucsi_connector *con;
-
-		/*
-		 * This is workaround for buggy PPMs that create asynchronous
-		 * event notifications before OPM has enabled them.
-		 */
-		if (!ucsi->connector)
-			goto out_unlock;
-
-		con = ucsi->connector + (cci->connector_change - 1);
-
-		/*
-		 * PPM will not clear the connector specific bit in Connector
-		 * Change Indication field of CCI until the driver has ACK it,
-		 * and the driver can not ACK it before it has been processed.
-		 * The PPM will not generate new events before the first has
-		 * been acknowledged, even if they are for an other connector.
-		 * So only one event at a time.
-		 */
-		if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
-			schedule_work(&con->work);
-	}
-out_unlock:
-	spin_unlock(&ucsi->dev_lock);
-}
-
-static int ucsi_ack(struct ucsi *ucsi, u8 cmd)
-{
-	struct ucsi_control ctrl;
-	int ret;
-
-	ctrl.cmd.cmd = UCSI_ACK_CC_CI;
-	ctrl.cmd.length = 0;
-	ctrl.cmd.data = cmd;
-	ret = ucsi_acpi_cmd(ucsi, &ctrl);
-	if (ret)
-		return ret;
-
-	/* Waiting for ACK also with ACK CMD for now */
-	ret = wait_for_completion_timeout(&ucsi->complete,
-					  msecs_to_jiffies(UCSI_TIMEOUT_MS));
-	if (!ret)
-		return -ETIMEDOUT;
-	return 0;
-}
-
-static int ucsi_run_cmd(struct ucsi *ucsi, struct ucsi_control *ctrl,
-			void *data, size_t size)
-{
-	u16 err_value = 0;
-	int ret;
-
-	set_bit(COMMAND_PENDING, &ucsi->flags);
-
-	ret = ucsi_acpi_cmd(ucsi, ctrl);
-	if (ret)
-		goto err_clear_flag;
-
-	ret = wait_for_completion_timeout(&ucsi->complete,
-					  msecs_to_jiffies(UCSI_TIMEOUT_MS));
-	if (!ret) {
-		ret = -ETIMEDOUT;
-		goto err_clear_flag;
-	}
-
-	switch (ucsi->status) {
-	case UCSI_IDLE:
-		if (data)
-			memcpy(data, ucsi->data->message_in, size);
-
-		ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
-		break;
-	case UCSI_BUSY:
-		/* The caller decides whether to cancel or not */
-		ret = -EBUSY;
-		goto err_clear_flag;
-	case UCSI_ERROR:
-		ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
-		if (ret)
-			goto err_clear_flag;
-
-		ctrl->cmd.cmd = UCSI_GET_ERROR_STATUS;
-		ctrl->cmd.length = 0;
-		ctrl->cmd.data = 0;
-		ret = ucsi_acpi_cmd(ucsi, ctrl);
-		if (ret)
-			goto err_clear_flag;
-
-		ret = wait_for_completion_timeout(&ucsi->complete,
-					msecs_to_jiffies(UCSI_TIMEOUT_MS));
-		if (!ret) {
-			ret = -ETIMEDOUT;
-			goto err_clear_flag;
-		}
-
-		memcpy(&err_value, ucsi->data->message_in, sizeof(err_value));
-
-		/* Something has really gone wrong */
-		if (WARN_ON(ucsi->status == UCSI_ERROR)) {
-			ret = -ENODEV;
-			goto err_clear_flag;
-		}
-
-		ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
-		if (ret)
-			goto err_clear_flag;
-
-		switch (err_value) {
-		case UCSI_ERROR_INCOMPATIBLE_PARTNER:
-			ret = -EOPNOTSUPP;
-			break;
-		case UCSI_ERROR_CC_COMMUNICATION_ERR:
-			ret = -ECOMM;
-			break;
-		case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
-			ret = -EIO;
-			break;
-		case UCSI_ERROR_DEAD_BATTERY:
-			dev_warn(ucsi->dev, "Dead battery condition!\n");
-			ret = -EPERM;
-			break;
-		/* The following mean a bug in this driver */
-		case UCSI_ERROR_INVALID_CON_NUM:
-		case UCSI_ERROR_UNREGONIZED_CMD:
-		case UCSI_ERROR_INVALID_CMD_ARGUMENT:
-		default:
-			dev_warn(ucsi->dev,
-				 "%s: possible UCSI driver bug - error %hu\n",
-				 __func__, err_value);
-			ret = -EINVAL;
-			break;
-		}
-		break;
-	}
-	ctrl->raw_cmd = 0;
-err_clear_flag:
-	clear_bit(COMMAND_PENDING, &ucsi->flags);
-	return ret;
-}
-
-static void ucsi_connector_change(struct work_struct *work)
-{
-	struct ucsi_connector *con = container_of(work, struct ucsi_connector,
-						  work);
-	struct ucsi_connector_status constat;
-	struct ucsi *ucsi = con->ucsi;
-	struct ucsi_control ctrl;
-	int ret;
-
-	mutex_lock(&ucsi->ppm_lock);
-
-	ctrl.cmd.cmd = UCSI_GET_CONNECTOR_STATUS;
-	ctrl.cmd.length = 0;
-	ctrl.cmd.data = con->num;
-	ret = ucsi_run_cmd(con->ucsi, &ctrl, &constat, sizeof(constat));
-	if (ret) {
-		dev_err(ucsi->dev, "%s: failed to read connector status (%d)\n",
-			__func__, ret);
-		goto out_ack_event;
-	}
-
-	/* Ignoring disconnections and Alternate Modes */
-	if (!constat.connected || !(constat.change &
-	    (UCSI_CONSTAT_PARTNER_CHANGE | UCSI_CONSTAT_CONNECT_CHANGE)) ||
-	    constat.partner_flags & UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE)
-		goto out_ack_event;
-
-	/* If the partner got USB Host role, attempting swap */
-	if (constat.partner_type & UCSI_CONSTAT_PARTNER_TYPE_DFP) {
-		ctrl.uor.cmd = UCSI_SET_UOR;
-		ctrl.uor.con_num = con->num;
-		ctrl.uor.role = UCSI_UOR_ROLE_DFP;
-
-		ret = ucsi_run_cmd(con->ucsi, &ctrl, NULL, 0);
-		if (ret)
-			dev_err(ucsi->dev, "%s: failed to swap role (%d)\n",
-				__func__, ret);
-	}
-out_ack_event:
-	ucsi_ack(ucsi, UCSI_ACK_EVENT);
-	clear_bit(EVENT_PENDING, &ucsi->flags);
-	mutex_unlock(&ucsi->ppm_lock);
-}
-
-static int ucsi_reset_ppm(struct ucsi *ucsi)
-{
-	int timeout = UCSI_TIMEOUT_MS;
-	struct ucsi_control ctrl;
-	int ret;
-
-	memset(&ctrl, 0, sizeof(ctrl));
-	ctrl.cmd.cmd = UCSI_PPM_RESET;
-	ret = ucsi_acpi_cmd(ucsi, &ctrl);
-	if (ret)
-		return ret;
-
-	/* There is no quarantee the PPM will ever set the RESET_COMPLETE bit */
-	while (!ucsi->data->cci.reset_complete && timeout--)
-		usleep_range(1000, 2000);
-	return 0;
-}
-
-static int ucsi_init(struct ucsi *ucsi)
-{
-	struct ucsi_connector *con;
-	struct ucsi_control ctrl;
-	int ret;
-	int i;
-
-	init_completion(&ucsi->complete);
-	spin_lock_init(&ucsi->dev_lock);
-	mutex_init(&ucsi->ppm_lock);
-
-	/* Reset the PPM */
-	ret = ucsi_reset_ppm(ucsi);
-	if (ret)
-		return ret;
-
-	/*
-	 * REVISIT: Executing second reset to WA an issue seen on some of the
-	 * Broxton based platforms, where the first reset puts the PPM into a
-	 * state where it's unable to recognise some of the commands.
-	 */
-	ret = ucsi_reset_ppm(ucsi);
-	if (ret)
-		return ret;
-
-	mutex_lock(&ucsi->ppm_lock);
-
-	/* Enable basic notifications */
-	ctrl.cmd.cmd = UCSI_SET_NOTIFICATION_ENABLE;
-	ctrl.cmd.length = 0;
-	ctrl.cmd.data = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
-	ret = ucsi_run_cmd(ucsi, &ctrl, NULL, 0);
-	if (ret)
-		goto err_reset;
-
-	/* Get PPM capabilities */
-	ctrl.cmd.cmd = UCSI_GET_CAPABILITY;
-	ret = ucsi_run_cmd(ucsi, &ctrl, &ucsi->cap, sizeof(ucsi->cap));
-	if (ret)
-		goto err_reset;
-
-	if (!ucsi->cap.num_connectors) {
-		ret = -ENODEV;
-		goto err_reset;
-	}
-
-	ucsi->connector = devm_kcalloc(ucsi->dev, ucsi->cap.num_connectors,
-				       sizeof(*ucsi->connector), GFP_KERNEL);
-	if (!ucsi->connector) {
-		ret = -ENOMEM;
-		goto err_reset;
-	}
-
-	for (i = 1, con = ucsi->connector; i < ucsi->cap.num_connectors + 1;
-	     i++, con++) {
-		/* Get connector capability */
-		ctrl.cmd.cmd = UCSI_GET_CONNECTOR_CAPABILITY;
-		ctrl.cmd.data = i;
-		ret = ucsi_run_cmd(ucsi, &ctrl, &con->cap, sizeof(con->cap));
-		if (ret)
-			goto err_reset;
-
-		con->num = i;
-		con->ucsi = ucsi;
-		INIT_WORK(&con->work, ucsi_connector_change);
-	}
-
-	/* Enable all notifications */
-	ctrl.cmd.cmd = UCSI_SET_NOTIFICATION_ENABLE;
-	ctrl.cmd.data = UCSI_ENABLE_NTFY_ALL;
-	ret = ucsi_run_cmd(ucsi, &ctrl, NULL, 0);
-	if (ret < 0)
-		goto err_reset;
-
-	mutex_unlock(&ucsi->ppm_lock);
-	return 0;
-err_reset:
-	ucsi_reset_ppm(ucsi);
-	mutex_unlock(&ucsi->ppm_lock);
-	return ret;
-}
-
-static int ucsi_acpi_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	acpi_status status;
-	struct ucsi *ucsi;
-	int ret;
-
-	ucsi = devm_kzalloc(&pdev->dev, sizeof(*ucsi), GFP_KERNEL);
-	if (!ucsi)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "missing memory resource\n");
-		return -ENODEV;
-	}
-
-	/*
-	 * NOTE: ACPI has claimed the memory region as it's also an Operation
-	 * Region. It's not possible to request it in the driver.
-	 */
-	ucsi->data = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-	if (!ucsi->data)
-		return -ENOMEM;
-
-	ucsi->dev = &pdev->dev;
-
-	status = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
-					     ACPI_ALL_NOTIFY,
-					     ucsi_acpi_notify, ucsi);
-	if (ACPI_FAILURE(status))
-		return -ENODEV;
-
-	ret = ucsi_init(ucsi);
-	if (ret) {
-		acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
-					   ACPI_ALL_NOTIFY,
-					   ucsi_acpi_notify);
-		return ret;
-	}
-
-	platform_set_drvdata(pdev, ucsi);
-	return 0;
-}
-
-static int ucsi_acpi_remove(struct platform_device *pdev)
-{
-	struct ucsi *ucsi = platform_get_drvdata(pdev);
-
-	acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
-				   ACPI_ALL_NOTIFY, ucsi_acpi_notify);
-
-	/* Make sure there are no events in the middle of being processed */
-	if (wait_on_bit_timeout(&ucsi->flags, EVENT_PENDING,
-				TASK_UNINTERRUPTIBLE,
-				msecs_to_jiffies(UCSI_TIMEOUT_MS)))
-		dev_WARN(ucsi->dev, "%s: Events still pending\n", __func__);
-
-	ucsi_reset_ppm(ucsi);
-	return 0;
-}
-
-static const struct acpi_device_id ucsi_acpi_match[] = {
-	{ "PNP0CA0", 0 },
-	{ },
-};
-MODULE_DEVICE_TABLE(acpi, ucsi_acpi_match);
-
-static struct platform_driver ucsi_acpi_platform_driver = {
-	.driver = {
-		.name = "ucsi_acpi",
-		.acpi_match_table = ACPI_PTR(ucsi_acpi_match),
-	},
-	.probe = ucsi_acpi_probe,
-	.remove = ucsi_acpi_remove,
-};
-
-module_platform_driver(ucsi_acpi_platform_driver);
-
-MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("USB Type-C System Software Interface (UCSI) driver");
diff --git a/drivers/usb/misc/ucsi.h b/drivers/usb/misc/ucsi.h
deleted file mode 100644
index 6dd11d1..0000000
--- a/drivers/usb/misc/ucsi.h
+++ /dev/null
@@ -1,215 +0,0 @@
-
-#include <linux/types.h>
-
-/* -------------------------------------------------------------------------- */
-
-/* Command Status and Connector Change Indication (CCI) data structure */
-struct ucsi_cci {
-	unsigned int RESERVED1:1;
-	unsigned int connector_change:7;
-	u8 data_length;
-	unsigned int RESERVED9:9;
-	unsigned int not_supported:1;
-	unsigned int cancel_complete:1;
-	unsigned int reset_complete:1;
-	unsigned int busy:1;
-	unsigned int ack_complete:1;
-	unsigned int error:1;
-	unsigned int cmd_complete:1;
-} __packed;
-
-/* Default fields in CONTROL data structure */
-struct ucsi_command {
-	u8 cmd;
-	u8 length;
-	u64 data:48;
-} __packed;
-
-/* Set USB Operation Mode Command structure */
-struct ucsi_uor_cmd {
-	u8 cmd;
-	u8 length;
-	u64 con_num:7;
-	u64 role:3;
-#define UCSI_UOR_ROLE_DFP			BIT(0)
-#define UCSI_UOR_ROLE_UFP			BIT(1)
-#define UCSI_UOR_ROLE_DRP			BIT(2)
-	u64 data:38;
-} __packed;
-
-struct ucsi_control {
-	union {
-		u64 raw_cmd;
-		struct ucsi_command cmd;
-		struct ucsi_uor_cmd uor;
-	};
-};
-
-struct ucsi_data {
-	u16 version;
-	u16 RESERVED;
-	union {
-		u32 raw_cci;
-		struct ucsi_cci cci;
-	};
-	struct ucsi_control ctrl;
-	u32 message_in[4];
-	u32 message_out[4];
-} __packed;
-
-/* Commands */
-#define UCSI_PPM_RESET			0x01
-#define UCSI_CANCEL			0x02
-#define UCSI_CONNECTOR_RESET		0x03
-#define UCSI_ACK_CC_CI			0x04
-#define UCSI_SET_NOTIFICATION_ENABLE	0x05
-#define UCSI_GET_CAPABILITY		0x06
-#define UCSI_GET_CONNECTOR_CAPABILITY	0x07
-#define UCSI_SET_UOM			0x08
-#define UCSI_SET_UOR			0x09
-#define UCSI_SET_PDM			0x0A
-#define UCSI_SET_PDR			0x0B
-#define UCSI_GET_ALTERNATE_MODES	0x0C
-#define UCSI_GET_CAM_SUPPORTED		0x0D
-#define UCSI_GET_CURRENT_CAM		0x0E
-#define UCSI_SET_NEW_CAM		0x0F
-#define UCSI_GET_PDOS			0x10
-#define UCSI_GET_CABLE_PROPERTY		0x11
-#define UCSI_GET_CONNECTOR_STATUS	0x12
-#define UCSI_GET_ERROR_STATUS		0x13
-
-/* ACK_CC_CI commands */
-#define UCSI_ACK_EVENT			1
-#define UCSI_ACK_CMD			2
-
-/* Bits for SET_NOTIFICATION_ENABLE command */
-#define UCSI_ENABLE_NTFY_CMD_COMPLETE		BIT(0)
-#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE	BIT(1)
-#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE	BIT(2)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE		BIT(5)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE	BIT(6)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE	BIT(7)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE		BIT(8)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE	BIT(9)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE		BIT(11)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE		BIT(12)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE	BIT(14)
-#define UCSI_ENABLE_NTFY_ERROR			BIT(15)
-#define UCSI_ENABLE_NTFY_ALL			0xdbe7
-
-/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
-#define UCSI_ERROR_UNREGONIZED_CMD		BIT(0)
-#define UCSI_ERROR_INVALID_CON_NUM		BIT(1)
-#define UCSI_ERROR_INVALID_CMD_ARGUMENT		BIT(2)
-#define UCSI_ERROR_INCOMPATIBLE_PARTNER		BIT(3)
-#define UCSI_ERROR_CC_COMMUNICATION_ERR		BIT(4)
-#define UCSI_ERROR_DEAD_BATTERY			BIT(5)
-#define UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL	BIT(6)
-
-/* Data structure filled by PPM in response to GET_CAPABILITY command. */
-struct ucsi_capability {
-	u32 attributes;
-#define UCSI_CAP_ATTR_DISABLE_STATE		BIT(0)
-#define UCSI_CAP_ATTR_BATTERY_CHARGING		BIT(1)
-#define UCSI_CAP_ATTR_USB_PD			BIT(2)
-#define UCSI_CAP_ATTR_TYPEC_CURRENT		BIT(6)
-#define UCSI_CAP_ATTR_POWER_AC_SUPPLY		BIT(8)
-#define UCSI_CAP_ATTR_POWER_OTHER		BIT(10)
-#define UCSI_CAP_ATTR_POWER_VBUS		BIT(14)
-	u8 num_connectors;
-	u32 features:24;
-#define UCSI_CAP_SET_UOM			BIT(0)
-#define UCSI_CAP_SET_PDM			BIT(1)
-#define UCSI_CAP_ALT_MODE_DETAILS		BIT(2)
-#define UCSI_CAP_ALT_MODE_OVERRIDE		BIT(3)
-#define UCSI_CAP_PDO_DETAILS			BIT(4)
-#define UCSI_CAP_CABLE_DETAILS			BIT(5)
-#define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS	BIT(6)
-#define UCSI_CAP_PD_RESET			BIT(7)
-	u8 num_alt_modes;
-	u8 RESERVED;
-	u16 bc_version;
-	u16 pd_version;
-	u16 typec_version;
-} __packed;
-
-/* Data structure filled by PPM in response to GET_CONNECTOR_CAPABILITY cmd. */
-struct ucsi_connector_capability {
-	u8 op_mode;
-#define UCSI_CONCAP_OPMODE_DFP			BIT(0)
-#define UCSI_CONCAP_OPMODE_UFP			BIT(1)
-#define UCSI_CONCAP_OPMODE_DRP			BIT(2)
-#define UCSI_CONCAP_OPMODE_AUDIO_ACCESSORY	BIT(3)
-#define UCSI_CONCAP_OPMODE_DEBUG_ACCESSORY	BIT(4)
-#define UCSI_CONCAP_OPMODE_USB2			BIT(5)
-#define UCSI_CONCAP_OPMODE_USB3			BIT(6)
-#define UCSI_CONCAP_OPMODE_ALT_MODE		BIT(7)
-	u8 provider:1;
-	u8 consumer:1;
-} __packed;
-
-/* Data structure filled by PPM in response to GET_CABLE_PROPERTY command. */
-struct ucsi_cable_property {
-	u16 speed_supported;
-	u8 current_capability;
-	u8 vbus_in_cable:1;
-	u8 active_cable:1;
-	u8 directionality:1;
-	u8 plug_type:2;
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A		0
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B		1
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C		2
-#define UCSI_CABLE_PROPERTY_PLUG_OTHER		3
-	u8 mode_support:1;
-	u8 RESERVED_2:2;
-	u8 latency:4;
-	u8 RESERVED_4:4;
-} __packed;
-
-/* Data structure filled by PPM in response to GET_CONNECTOR_STATUS command. */
-struct ucsi_connector_status {
-	u16 change;
-#define UCSI_CONSTAT_EXT_SUPPLY_CHANGE		BIT(1)
-#define UCSI_CONSTAT_POWER_OPMODE_CHANGE	BIT(2)
-#define UCSI_CONSTAT_PDOS_CHANGE		BIT(5)
-#define UCSI_CONSTAT_POWER_LEVEL_CHANGE		BIT(6)
-#define UCSI_CONSTAT_PD_RESET_COMPLETE		BIT(7)
-#define UCSI_CONSTAT_CAM_CHANGE			BIT(8)
-#define UCSI_CONSTAT_BC_CHANGE			BIT(9)
-#define UCSI_CONSTAT_PARTNER_CHANGE		BIT(11)
-#define UCSI_CONSTAT_POWER_DIR_CHANGE		BIT(12)
-#define UCSI_CONSTAT_CONNECT_CHANGE		BIT(14)
-#define UCSI_CONSTAT_ERROR			BIT(15)
-	u16 pwr_op_mode:3;
-#define UCSI_CONSTAT_PWR_OPMODE_NONE		0
-#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT		1
-#define UCSI_CONSTAT_PWR_OPMODE_BC		2
-#define UCSI_CONSTAT_PWR_OPMODE_PD		3
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_3	4
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0	5
-	u16 connected:1;
-	u16 pwr_dir:1;
-	u16 partner_flags:8;
-#define UCSI_CONSTAT_PARTNER_FLAG_USB		BIT(0)
-#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE	BIT(1)
-	u16 partner_type:3;
-#define UCSI_CONSTAT_PARTNER_TYPE_DFP		1
-#define UCSI_CONSTAT_PARTNER_TYPE_UFP		2
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_NO_UFP	3 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP	4 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG		5
-#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO		6
-	u32 request_data_obj;
-	u8 bc_status:2;
-#define UCSI_CONSTAT_BC_NOT_CHARGING		0
-#define UCSI_CONSTAT_BC_NOMINAL_CHARGING	1
-#define UCSI_CONSTAT_BC_SLOW_CHARGING		2
-#define UCSI_CONSTAT_BC_TRICKLE_CHARGING	3
-	u8 provider_cap_limit_reason:4;
-#define UCSI_CONSTAT_CAP_PWR_LOWERED		0
-#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT	1
-	u8 RESERVED:2;
-} __packed;
-
-/* -------------------------------------------------------------------------- */
-
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index a0ba529..388fae6 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -33,7 +33,7 @@ static const struct usb_device_id id_table[] = {
 MODULE_DEVICE_TABLE(usb, id_table);
 
 /* the different text display modes the device is capable of */
-static char *display_textmodes[] = {"raw", "hex", "ascii", NULL};
+static const char *display_textmodes[] = {"raw", "hex", "ascii"};
 
 struct usb_sevsegdev {
 	struct usb_device *udev;
@@ -280,7 +280,7 @@ static ssize_t show_attr_textmode(struct device *dev,
 
 	buf[0] = 0;
 
-	for (i = 0; display_textmodes[i]; i++) {
+	for (i = 0; i < ARRAY_SIZE(display_textmodes); i++) {
 		if (mydev->textmode == i) {
 			strcat(buf, " [");
 			strcat(buf, display_textmodes[i]);
@@ -304,15 +304,13 @@ static ssize_t set_attr_textmode(struct device *dev,
 	struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
 	int i;
 
-	for (i = 0; display_textmodes[i]; i++) {
-		if (sysfs_streq(display_textmodes[i], buf)) {
-			mydev->textmode = i;
-			update_display_visual(mydev, GFP_KERNEL);
-			return count;
-		}
-	}
+	i = sysfs_match_string(display_textmodes, buf);
+	if (i < 0)
+		return i;
 
-	return -EINVAL;
+	mydev->textmode = i;
+	update_display_visual(mydev, GFP_KERNEL);
+	return count;
 }
 
 static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode);
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index aa6fd6a..7b6dc23 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -356,12 +356,8 @@ static inline struct mtu3_ep *to_mtu3_ep(struct usb_ep *ep)
 
 static inline struct mtu3_request *next_request(struct mtu3_ep *mep)
 {
-	struct list_head *queue = &mep->req_list;
-
-	if (list_empty(queue))
-		return NULL;
-
-	return list_first_entry(queue, struct mtu3_request, list);
+	return list_first_entry_or_null(&mep->req_list, struct mtu3_request,
+					list);
 }
 
 static inline void mtu3_writel(void __iomem *base, u32 offset, u32 data)
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 42550c7..0d3ebb3 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -458,6 +458,7 @@ static int __maybe_unused mtu3_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+	int ret;
 
 	dev_dbg(dev, "%s\n", __func__);
 
@@ -465,12 +466,28 @@ static int __maybe_unused mtu3_resume(struct device *dev)
 		return 0;
 
 	ssusb_wakeup_disable(ssusb);
-	clk_prepare_enable(ssusb->sys_clk);
-	clk_prepare_enable(ssusb->ref_clk);
-	ssusb_phy_power_on(ssusb);
+	ret = clk_prepare_enable(ssusb->sys_clk);
+	if (ret)
+		goto err_sys_clk;
+
+	ret = clk_prepare_enable(ssusb->ref_clk);
+	if (ret)
+		goto err_ref_clk;
+
+	ret = ssusb_phy_power_on(ssusb);
+	if (ret)
+		goto err_power_on;
+
 	ssusb_host_enable(ssusb);
 
 	return 0;
+
+err_power_on:
+	clk_disable_unprepare(ssusb->ref_clk);
+err_ref_clk:
+	clk_disable_unprepare(ssusb->sys_clk);
+err_sys_clk:
+	return ret;
 }
 
 static const struct dev_pm_ops mtu3_pm_ops = {
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 870da18..87cbd56 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2224,6 +2224,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
 		musb->io.ep_select = musb_flat_ep_select;
 	}
 
+	if (musb->io.quirks & MUSB_G_NO_SKB_RESERVE)
+		musb->g.quirk_avoids_skb_reserve = 1;
+
 	/* At least tusb6010 has its own offsets */
 	if (musb->ops->ep_offset)
 		musb->io.ep_offset = musb->ops->ep_offset;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 3e98d42..9f22c5b 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -172,6 +172,7 @@ struct musb_io;
  */
 struct musb_platform_ops {
 
+#define MUSB_G_NO_SKB_RESERVE	BIT(9)
 #define MUSB_DA8XX		BIT(8)
 #define MUSB_PRESERVE_SESSION	BIT(7)
 #define MUSB_DMA_UX500		BIT(6)
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index e7c8b1b..ba25528 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -673,12 +673,15 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
 		musb_dma->status = MUSB_DMA_STATUS_FREE;
 		musb_dma->max_len = SZ_4M;
 
-		dc = dma_request_slave_channel(dev->parent, str);
-		if (!dc) {
-			dev_err(dev, "Failed to request %s.\n", str);
-			ret = -EPROBE_DEFER;
+		dc = dma_request_chan(dev->parent, str);
+		if (IS_ERR(dc)) {
+			ret = PTR_ERR(dc);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "Failed to request %s: %d.\n",
+					str, ret);
 			goto err;
 		}
+
 		cppi41_channel->dc = dc;
 	}
 	return 0;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 9c7ee26..bc6a9be 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -245,6 +245,11 @@ static int dsps_check_status(struct musb *musb, void *unused)
 		dsps_mod_timer_optional(glue);
 		break;
 	case OTG_STATE_A_WAIT_BCON:
+		/* keep VBUS on for host-only mode */
+		if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+			dsps_mod_timer_optional(glue);
+			break;
+		}
 		musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
 		skip_session = 1;
 		/* fall */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index dbe617a..76decb8 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1540,7 +1540,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
 	struct dma_channel *channel = hw_ep->rx_channel;
 	void __iomem *epio = hw_ep->regs;
 	dma_addr_t *buf;
-	u32 length, res;
+	u32 length;
 	u16 val;
 
 	buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
@@ -1552,10 +1552,8 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
 	val |= MUSB_RXCSR_DMAENAB;
 	musb_writew(hw_ep->regs, MUSB_RXCSR, val);
 
-	res = dma->channel_program(channel, qh->maxpacket, 0,
+	return dma->channel_program(channel, qh->maxpacket, 0,
 				   (u32)buf, length);
-
-	return res;
 }
 #else
 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index e85cc8e..4eb640c 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -881,26 +881,14 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
 				| TUSB_INT_SRC_ID_STATUS_CHNG))
 		idle_timeout = tusb_otg_ints(musb, int_src, tbase);
 
-	/* TX dma callback must be handled here, RX dma callback is
-	 * handled in tusb_omap_dma_cb.
+	/*
+	 * Just clear the DMA interrupt if it comes as the completion for both
+	 * TX and RX is handled by the DMA callback in tusb6010_omap
 	 */
 	if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
 		u32	dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
-		u32	real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
 
 		dev_dbg(musb->controller, "DMA IRQ %08x\n", dma_src);
-		real_dma_src = ~real_dma_src & dma_src;
-		if (tusb_dma_omap(musb) && real_dma_src) {
-			int	tx_source = (real_dma_src & 0xffff);
-			int	i;
-
-			for (i = 1; i <= 15; i++) {
-				if (tx_source & (1 << i)) {
-					dev_dbg(musb->controller, "completing ep%i %s\n", i, "tx");
-					musb_dma_completion(musb, i, 1);
-				}
-			}
-		}
 		musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
 	}
 
@@ -1181,7 +1169,8 @@ static int tusb_musb_exit(struct musb *musb)
 }
 
 static const struct musb_platform_ops tusb_ops = {
-	.quirks		= MUSB_DMA_TUSB_OMAP | MUSB_IN_TUSB,
+	.quirks		= MUSB_DMA_TUSB_OMAP | MUSB_IN_TUSB |
+			  MUSB_G_NO_SKB_RESERVE,
 	.init		= tusb_musb_init,
 	.exit		= tusb_musb_exit,
 
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 7870b37..e8060e4 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -15,7 +15,7 @@
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
-#include <linux/omap-dma.h>
+#include <linux/dmaengine.h>
 
 #include "musb_core.h"
 #include "tusb6010.h"
@@ -24,12 +24,10 @@
 
 #define MAX_DMAREQ		5	/* REVISIT: Really 6, but req5 not OK */
 
-#define OMAP24XX_DMA_EXT_DMAREQ0	2
-#define OMAP24XX_DMA_EXT_DMAREQ1	3
-#define OMAP242X_DMA_EXT_DMAREQ2	14
-#define OMAP242X_DMA_EXT_DMAREQ3	15
-#define OMAP242X_DMA_EXT_DMAREQ4	16
-#define OMAP242X_DMA_EXT_DMAREQ5	64
+struct tusb_dma_data {
+	s8			dmareq;
+	struct dma_chan		*chan;
+};
 
 struct tusb_omap_dma_ch {
 	struct musb		*musb;
@@ -39,9 +37,7 @@ struct tusb_omap_dma_ch {
 	u8			tx;
 	struct musb_hw_ep	*hw_ep;
 
-	int			ch;
-	s8			dmareq;
-	s8			sync_dev;
+	struct tusb_dma_data	*dma_data;
 
 	struct tusb_omap_dma	*tusb_dma;
 
@@ -58,9 +54,7 @@ struct tusb_omap_dma {
 	struct dma_controller		controller;
 	void __iomem			*tbase;
 
-	int				ch;
-	s8				dmareq;
-	s8				sync_dev;
+	struct tusb_dma_data		dma_pool[MAX_DMAREQ];
 	unsigned			multichannel:1;
 };
 
@@ -103,7 +97,7 @@ static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
  * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
  * musb_gadget.c.
  */
-static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
+static void tusb_omap_dma_cb(void *data)
 {
 	struct dma_channel	*channel = (struct dma_channel *)data;
 	struct tusb_omap_dma_ch	*chdat = to_chdat(channel);
@@ -114,21 +108,11 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
 	void __iomem		*ep_conf = hw_ep->conf;
 	void __iomem		*mbase = musb->mregs;
 	unsigned long		remaining, flags, pio;
-	int			ch;
 
 	spin_lock_irqsave(&musb->lock, flags);
 
-	if (tusb_dma->multichannel)
-		ch = chdat->ch;
-	else
-		ch = tusb_dma->ch;
-
-	if (ch_status != OMAP_DMA_BLOCK_IRQ)
-		printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
-
-	dev_dbg(musb->controller, "ep%i %s dma callback ch: %i status: %x\n",
-		chdat->epnum, chdat->tx ? "tx" : "rx",
-		ch, ch_status);
+	dev_dbg(musb->controller, "ep%i %s dma callback\n",
+		chdat->epnum, chdat->tx ? "tx" : "rx");
 
 	if (chdat->tx)
 		remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
@@ -139,9 +123,8 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
 
 	/* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
 	if (unlikely(remaining > chdat->transfer_len)) {
-		dev_dbg(musb->controller, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
-			chdat->tx ? "tx" : "rx", chdat->ch,
-			remaining);
+		dev_dbg(musb->controller, "Corrupt %s XFR_SIZE: 0x%08lx\n",
+			chdat->tx ? "tx" : "rx", remaining);
 		remaining = 0;
 	}
 
@@ -175,13 +158,7 @@ static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
 
 	channel->status = MUSB_DMA_STATUS_FREE;
 
-	/* Handle only RX callbacks here. TX callbacks must be handled based
-	 * on the TUSB DMA status interrupt.
-	 * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
-	 * interrupt for RX and TX.
-	 */
-	if (!chdat->tx)
-		musb_dma_completion(musb, chdat->epnum, chdat->tx);
+	musb_dma_completion(musb, chdat->epnum, chdat->tx);
 
 	/* We must terminate short tx transfers manually by setting TXPKTRDY.
 	 * REVISIT: This same problem may occur with other MUSB dma as well.
@@ -214,15 +191,16 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
 	struct musb_hw_ep		*hw_ep = chdat->hw_ep;
 	void __iomem			*mbase = musb->mregs;
 	void __iomem			*ep_conf = hw_ep->conf;
-	dma_addr_t			fifo = hw_ep->fifo_sync;
-	struct omap_dma_channel_params	dma_params;
+	dma_addr_t			fifo_addr = hw_ep->fifo_sync;
 	u32				dma_remaining;
-	int				src_burst, dst_burst;
 	u16				csr;
 	u32				psize;
-	int				ch;
-	s8				dmareq;
-	s8				sync_dev;
+	struct tusb_dma_data		*dma_data;
+	struct dma_async_tx_descriptor	*dma_desc;
+	struct dma_slave_config		dma_cfg;
+	enum dma_transfer_direction	dma_dir;
+	u32				port_window;
+	int				ret;
 
 	if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
 		return false;
@@ -248,9 +226,8 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
 
 	dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
 	if (dma_remaining) {
-		dev_dbg(musb->controller, "Busy %s dma ch%i, not using: %08x\n",
-			chdat->tx ? "tx" : "rx", chdat->ch,
-			dma_remaining);
+		dev_dbg(musb->controller, "Busy %s dma, not using: %08x\n",
+			chdat->tx ? "tx" : "rx", dma_remaining);
 		return false;
 	}
 
@@ -261,27 +238,19 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
 	else
 		chdat->transfer_packet_sz = packet_sz;
 
-	if (tusb_dma->multichannel) {
-		ch = chdat->ch;
-		dmareq = chdat->dmareq;
-		sync_dev = chdat->sync_dev;
-	} else {
+	dma_data = chdat->dma_data;
+	if (!tusb_dma->multichannel) {
 		if (tusb_omap_use_shared_dmareq(chdat) != 0) {
 			dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum);
 			return false;
 		}
-		if (tusb_dma->ch < 0) {
+		if (dma_data->dmareq < 0) {
 			/* REVISIT: This should get blocked earlier, happens
 			 * with MSC ErrorRecoveryTest
 			 */
 			WARN_ON(1);
 			return false;
 		}
-
-		ch = tusb_dma->ch;
-		dmareq = tusb_dma->dmareq;
-		sync_dev = tusb_dma->sync_dev;
-		omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
 	}
 
 	chdat->packet_sz = packet_sz;
@@ -291,92 +260,80 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
 	channel->status = MUSB_DMA_STATUS_BUSY;
 
 	/* Since we're recycling dma areas, we need to clean or invalidate */
-	if (chdat->tx)
+	if (chdat->tx) {
+		dma_dir = DMA_MEM_TO_DEV;
 		dma_map_single(dev, phys_to_virt(dma_addr), len,
 				DMA_TO_DEVICE);
-	else
+	} else {
+		dma_dir = DMA_DEV_TO_MEM;
 		dma_map_single(dev, phys_to_virt(dma_addr), len,
 				DMA_FROM_DEVICE);
+	}
+
+	memset(&dma_cfg, 0, sizeof(dma_cfg));
 
 	/* Use 16-bit transfer if dma_addr is not 32-bit aligned */
 	if ((dma_addr & 0x3) == 0) {
-		dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
-		dma_params.elem_count = 8;		/* Elements in frame */
+		dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		port_window = 8;
 	} else {
-		dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
-		dma_params.elem_count = 16;		/* Elements in frame */
-		fifo = hw_ep->fifo_async;
+		dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		port_window = 16;
+
+		fifo_addr = hw_ep->fifo_async;
 	}
 
-	dma_params.frame_count	= chdat->transfer_len / 32; /* Burst sz frame */
+	dev_dbg(musb->controller,
+		"ep%i %s dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
+		chdat->epnum, chdat->tx ? "tx" : "rx", &dma_addr,
+		chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz);
 
-	dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
-		chdat->epnum, chdat->tx ? "tx" : "rx",
-		ch, &dma_addr, chdat->transfer_len, len,
-		chdat->transfer_packet_sz, packet_sz);
+	dma_cfg.src_addr = fifo_addr;
+	dma_cfg.dst_addr = fifo_addr;
+	dma_cfg.src_port_window_size = port_window;
+	dma_cfg.src_maxburst = port_window;
+	dma_cfg.dst_port_window_size = port_window;
+	dma_cfg.dst_maxburst = port_window;
 
-	/*
-	 * Prepare omap DMA for transfer
-	 */
-	if (chdat->tx) {
-		dma_params.src_amode	= OMAP_DMA_AMODE_POST_INC;
-		dma_params.src_start	= (unsigned long)dma_addr;
-		dma_params.src_ei	= 0;
-		dma_params.src_fi	= 0;
-
-		dma_params.dst_amode	= OMAP_DMA_AMODE_DOUBLE_IDX;
-		dma_params.dst_start	= (unsigned long)fifo;
-		dma_params.dst_ei	= 1;
-		dma_params.dst_fi	= -31;	/* Loop 32 byte window */
-
-		dma_params.trigger	= sync_dev;
-		dma_params.sync_mode	= OMAP_DMA_SYNC_FRAME;
-		dma_params.src_or_dst_synch	= 0;	/* Dest sync */
-
-		src_burst = OMAP_DMA_DATA_BURST_16;	/* 16x32 read */
-		dst_burst = OMAP_DMA_DATA_BURST_8;	/* 8x32 write */
-	} else {
-		dma_params.src_amode	= OMAP_DMA_AMODE_DOUBLE_IDX;
-		dma_params.src_start	= (unsigned long)fifo;
-		dma_params.src_ei	= 1;
-		dma_params.src_fi	= -31;	/* Loop 32 byte window */
-
-		dma_params.dst_amode	= OMAP_DMA_AMODE_POST_INC;
-		dma_params.dst_start	= (unsigned long)dma_addr;
-		dma_params.dst_ei	= 0;
-		dma_params.dst_fi	= 0;
-
-		dma_params.trigger	= sync_dev;
-		dma_params.sync_mode	= OMAP_DMA_SYNC_FRAME;
-		dma_params.src_or_dst_synch	= 1;	/* Source sync */
-
-		src_burst = OMAP_DMA_DATA_BURST_8;	/* 8x32 read */
-		dst_burst = OMAP_DMA_DATA_BURST_16;	/* 16x32 write */
+	ret = dmaengine_slave_config(dma_data->chan, &dma_cfg);
+	if (ret) {
+		dev_err(musb->controller, "DMA slave config failed: %d\n", ret);
+		return false;
 	}
 
-	dev_dbg(musb->controller, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
+	dma_desc = dmaengine_prep_slave_single(dma_data->chan, dma_addr,
+					chdat->transfer_len, dma_dir,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!dma_desc) {
+		dev_err(musb->controller, "DMA prep_slave_single failed\n");
+		return false;
+	}
+
+	dma_desc->callback = tusb_omap_dma_cb;
+	dma_desc->callback_param = channel;
+	dmaengine_submit(dma_desc);
+
+	dev_dbg(musb->controller,
+		"ep%i %s using %i-bit %s dma from %pad to %pad\n",
 		chdat->epnum, chdat->tx ? "tx" : "rx",
-		(dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
+		dma_cfg.src_addr_width * 8,
 		((dma_addr & 0x3) == 0) ? "sync" : "async",
-		dma_params.src_start, dma_params.dst_start);
-
-	omap_set_dma_params(ch, &dma_params);
-	omap_set_dma_src_burst_mode(ch, src_burst);
-	omap_set_dma_dest_burst_mode(ch, dst_burst);
-	omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
+		(dma_dir == DMA_MEM_TO_DEV) ? &dma_addr : &fifo_addr,
+		(dma_dir == DMA_MEM_TO_DEV) ? &fifo_addr : &dma_addr);
 
 	/*
 	 * Prepare MUSB for DMA transfer
 	 */
+	musb_ep_select(mbase, chdat->epnum);
 	if (chdat->tx) {
-		musb_ep_select(mbase, chdat->epnum);
 		csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
 		csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
 			| MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
 		csr &= ~MUSB_TXCSR_P_UNDERRUN;
 		musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
 	} else {
-		musb_ep_select(mbase, chdat->epnum);
 		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
 		csr |= MUSB_RXCSR_DMAENAB;
 		csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
@@ -384,10 +341,8 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
 			csr | MUSB_RXCSR_P_WZC_BITS);
 	}
 
-	/*
-	 * Start DMA transfer
-	 */
-	omap_start_dma(ch);
+	/* Start DMA transfer */
+	dma_async_issue_pending(dma_data->chan);
 
 	if (chdat->tx) {
 		/* Send transfer_packet_sz packets at a time */
@@ -415,18 +370,9 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
 static int tusb_omap_dma_abort(struct dma_channel *channel)
 {
 	struct tusb_omap_dma_ch	*chdat = to_chdat(channel);
-	struct tusb_omap_dma	*tusb_dma = chdat->tusb_dma;
 
-	if (!tusb_dma->multichannel) {
-		if (tusb_dma->ch >= 0) {
-			omap_stop_dma(tusb_dma->ch);
-			omap_free_dma(tusb_dma->ch);
-			tusb_dma->ch = -1;
-		}
-
-		tusb_dma->dmareq = -1;
-		tusb_dma->sync_dev = -1;
-	}
+	if (chdat->dma_data)
+		dmaengine_terminate_all(chdat->dma_data->chan);
 
 	channel->status = MUSB_DMA_STATUS_FREE;
 
@@ -438,15 +384,6 @@ static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
 	u32		reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
 	int		i, dmareq_nr = -1;
 
-	const int sync_dev[6] = {
-		OMAP24XX_DMA_EXT_DMAREQ0,
-		OMAP24XX_DMA_EXT_DMAREQ1,
-		OMAP242X_DMA_EXT_DMAREQ2,
-		OMAP242X_DMA_EXT_DMAREQ3,
-		OMAP242X_DMA_EXT_DMAREQ4,
-		OMAP242X_DMA_EXT_DMAREQ5,
-	};
-
 	for (i = 0; i < MAX_DMAREQ; i++) {
 		int cur = (reg & (0xf << (i * 5))) >> (i * 5);
 		if (cur == 0) {
@@ -463,8 +400,7 @@ static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
 		reg |= ((1 << 4) << (dmareq_nr * 5));
 	musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
 
-	chdat->dmareq = dmareq_nr;
-	chdat->sync_dev = sync_dev[chdat->dmareq];
+	chdat->dma_data = &chdat->tusb_dma->dma_pool[dmareq_nr];
 
 	return 0;
 }
@@ -473,15 +409,14 @@ static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
 {
 	u32 reg;
 
-	if (!chdat || chdat->dmareq < 0)
+	if (!chdat || !chdat->dma_data || chdat->dma_data->dmareq < 0)
 		return;
 
 	reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
-	reg &= ~(0x1f << (chdat->dmareq * 5));
+	reg &= ~(0x1f << (chdat->dma_data->dmareq * 5));
 	musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
 
-	chdat->dmareq = -1;
-	chdat->sync_dev = -1;
+	chdat->dma_data = NULL;
 }
 
 static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
@@ -492,24 +427,14 @@ tusb_omap_dma_allocate(struct dma_controller *c,
 		u8 tx)
 {
 	int ret, i;
-	const char		*dev_name;
 	struct tusb_omap_dma	*tusb_dma;
 	struct musb		*musb;
-	void __iomem		*tbase;
 	struct dma_channel	*channel = NULL;
 	struct tusb_omap_dma_ch	*chdat = NULL;
-	u32			reg;
+	struct tusb_dma_data	*dma_data = NULL;
 
 	tusb_dma = container_of(c, struct tusb_omap_dma, controller);
 	musb = tusb_dma->controller.musb;
-	tbase = musb->ctrl_base;
-
-	reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
-	if (tx)
-		reg &= ~(1 << hw_ep->epnum);
-	else
-		reg &= ~(1 << (hw_ep->epnum + 15));
-	musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
 
 	/* REVISIT: Why does dmareq5 not work? */
 	if (hw_ep->epnum == 0) {
@@ -530,56 +455,38 @@ tusb_omap_dma_allocate(struct dma_controller *c,
 	if (!channel)
 		return NULL;
 
-	if (tx) {
-		chdat->tx = 1;
-		dev_name = "TUSB transmit";
-	} else {
-		chdat->tx = 0;
-		dev_name = "TUSB receive";
-	}
-
 	chdat->musb = tusb_dma->controller.musb;
 	chdat->tbase = tusb_dma->tbase;
 	chdat->hw_ep = hw_ep;
 	chdat->epnum = hw_ep->epnum;
-	chdat->dmareq = -1;
 	chdat->completed_len = 0;
 	chdat->tusb_dma = tusb_dma;
+	if (tx)
+		chdat->tx = 1;
+	else
+		chdat->tx = 0;
 
 	channel->max_len = 0x7fffffff;
 	channel->desired_mode = 0;
 	channel->actual_len = 0;
 
-	if (tusb_dma->multichannel) {
-		ret = tusb_omap_dma_allocate_dmareq(chdat);
-		if (ret != 0)
-			goto free_dmareq;
-
-		ret = omap_request_dma(chdat->sync_dev, dev_name,
-				tusb_omap_dma_cb, channel, &chdat->ch);
-		if (ret != 0)
-			goto free_dmareq;
-	} else if (tusb_dma->ch == -1) {
-		tusb_dma->dmareq = 0;
-		tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;
-
-		/* Callback data gets set later in the shared dmareq case */
-		ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
-				tusb_omap_dma_cb, NULL, &tusb_dma->ch);
-		if (ret != 0)
-			goto free_dmareq;
-
-		chdat->dmareq = -1;
-		chdat->ch = -1;
+	if (!chdat->dma_data) {
+		if (tusb_dma->multichannel) {
+			ret = tusb_omap_dma_allocate_dmareq(chdat);
+			if (ret != 0)
+				goto free_dmareq;
+		} else {
+			chdat->dma_data = &tusb_dma->dma_pool[0];
+		}
 	}
 
-	dev_dbg(musb->controller, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
+	dma_data = chdat->dma_data;
+
+	dev_dbg(musb->controller, "ep%i %s dma: %s dmareq%i\n",
 		chdat->epnum,
 		chdat->tx ? "tx" : "rx",
-		chdat->ch >= 0 ? "dedicated" : "shared",
-		chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
-		chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
-		chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);
+		tusb_dma->multichannel ? "shared" : "dedicated",
+		dma_data->dmareq);
 
 	return channel;
 
@@ -596,35 +503,13 @@ static void tusb_omap_dma_release(struct dma_channel *channel)
 {
 	struct tusb_omap_dma_ch	*chdat = to_chdat(channel);
 	struct musb		*musb = chdat->musb;
-	void __iomem		*tbase = musb->ctrl_base;
-	u32			reg;
 
-	dev_dbg(musb->controller, "ep%i ch%i\n", chdat->epnum, chdat->ch);
-
-	reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
-	if (chdat->tx)
-		reg |= (1 << chdat->epnum);
-	else
-		reg |= (1 << (chdat->epnum + 15));
-	musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
-
-	reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR);
-	if (chdat->tx)
-		reg |= (1 << chdat->epnum);
-	else
-		reg |= (1 << (chdat->epnum + 15));
-	musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg);
+	dev_dbg(musb->controller, "Release for ep%i\n", chdat->epnum);
 
 	channel->status = MUSB_DMA_STATUS_UNKNOWN;
 
-	if (chdat->ch >= 0) {
-		omap_stop_dma(chdat->ch);
-		omap_free_dma(chdat->ch);
-		chdat->ch = -1;
-	}
-
-	if (chdat->dmareq >= 0)
-		tusb_omap_dma_free_dmareq(chdat);
+	dmaengine_terminate_sync(chdat->dma_data->chan);
+	tusb_omap_dma_free_dmareq(chdat);
 
 	channel = NULL;
 }
@@ -641,15 +526,62 @@ void tusb_dma_controller_destroy(struct dma_controller *c)
 			kfree(ch->private_data);
 			kfree(ch);
 		}
-	}
 
-	if (tusb_dma && !tusb_dma->multichannel && tusb_dma->ch >= 0)
-		omap_free_dma(tusb_dma->ch);
+		/* Free up the DMA channels */
+		if (tusb_dma && tusb_dma->dma_pool[i].chan)
+			dma_release_channel(tusb_dma->dma_pool[i].chan);
+	}
 
 	kfree(tusb_dma);
 }
 EXPORT_SYMBOL_GPL(tusb_dma_controller_destroy);
 
+static int tusb_omap_allocate_dma_pool(struct tusb_omap_dma *tusb_dma)
+{
+	struct musb *musb = tusb_dma->controller.musb;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < MAX_DMAREQ; i++) {
+		struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
+
+		/*
+		 * Request DMA channels:
+		 * - one channel in case of non multichannel mode
+		 * - MAX_DMAREQ number of channels in multichannel mode
+		 */
+		if (i == 0 || tusb_dma->multichannel) {
+			char ch_name[8];
+
+			sprintf(ch_name, "dmareq%d", i);
+			dma_data->chan = dma_request_chan(musb->controller,
+							  ch_name);
+			if (IS_ERR(dma_data->chan)) {
+				dev_err(musb->controller,
+					"Failed to request %s\n", ch_name);
+				ret = PTR_ERR(dma_data->chan);
+				goto dma_error;
+			}
+
+			dma_data->dmareq = i;
+		} else {
+			dma_data->dmareq = -1;
+		}
+	}
+
+	return 0;
+
+dma_error:
+	for (; i >= 0; i--) {
+		struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
+
+		if (dma_data->dmareq >= 0)
+			dma_release_channel(dma_data->chan);
+	}
+
+	return ret;
+}
+
 struct dma_controller *
 tusb_dma_controller_create(struct musb *musb, void __iomem *base)
 {
@@ -674,10 +606,6 @@ tusb_dma_controller_create(struct musb *musb, void __iomem *base)
 	tusb_dma->controller.musb = musb;
 	tusb_dma->tbase = musb->ctrl_base;
 
-	tusb_dma->ch = -1;
-	tusb_dma->dmareq = -1;
-	tusb_dma->sync_dev = -1;
-
 	tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
 	tusb_dma->controller.channel_release = tusb_omap_dma_release;
 	tusb_dma->controller.channel_program = tusb_omap_dma_program;
@@ -704,6 +632,9 @@ tusb_dma_controller_create(struct musb *musb, void __iomem *base)
 		ch->private_data = chdat;
 	}
 
+	if (tusb_omap_allocate_dma_pool(tusb_dma))
+		goto cleanup;
+
 	return &tusb_dma->controller;
 
 cleanup:
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 3006f56..aff702c 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -4,6 +4,7 @@
 menu "USB Physical Layer drivers"
 
 config USB_PHY
+	select EXTCON
 	def_bool n
 
 #
@@ -109,7 +110,7 @@
 
 config TAHVO_USB
 	tristate "Tahvo USB transceiver driver"
-	depends on MFD_RETU && EXTCON
+	depends on MFD_RETU
 	depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
 	select USB_PHY
 	help
@@ -141,7 +142,6 @@
 	depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
 	depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
 	depends on RESET_CONTROLLER
-	depends on EXTCON
 	select USB_PHY
 	help
 	  Enable this to support the USB OTG transceiver on Qualcomm chips. It
@@ -155,7 +155,7 @@
 config USB_QCOM_8X16_PHY
 	tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
 	depends on ARCH_QCOM || COMPILE_TEST
-	depends on RESET_CONTROLLER && EXTCON
+	depends on RESET_CONTROLLER
 	select USB_PHY
 	select USB_ULPI_VIEWPORT
 	help
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 93d9aaa..8fb86a5 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -146,17 +146,6 @@ struct msm_otg_platform_data {
 };
 
 /**
- * struct msm_usb_cable - structure for exteternal connector cable
- *			  state tracking
- * @nb: hold event notification callback
- * @conn: used for notification registration
- */
-struct msm_usb_cable {
-	struct notifier_block		nb;
-	struct extcon_dev		*extcon;
-};
-
-/**
  * struct msm_otg: OTG driver data. Shared by HCD and DCD.
  * @otg: USB OTG Transceiver structure.
  * @pdata: otg device platform data.
@@ -215,9 +204,6 @@ struct msm_otg {
 
 	bool manual_pullup;
 
-	struct msm_usb_cable vbus;
-	struct msm_usb_cable id;
-
 	struct gpio_desc *switch_gpio;
 	struct notifier_block reboot;
 };
@@ -1612,8 +1598,8 @@ MODULE_DEVICE_TABLE(of, msm_otg_dt_match);
 static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event,
 				void *ptr)
 {
-	struct msm_usb_cable *vbus = container_of(nb, struct msm_usb_cable, nb);
-	struct msm_otg *motg = container_of(vbus, struct msm_otg, vbus);
+	struct usb_phy *usb_phy = container_of(nb, struct usb_phy, vbus_nb);
+	struct msm_otg *motg = container_of(usb_phy, struct msm_otg, phy);
 
 	if (event)
 		set_bit(B_SESS_VLD, &motg->inputs);
@@ -1636,8 +1622,8 @@ static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event,
 static int msm_otg_id_notifier(struct notifier_block *nb, unsigned long event,
 				void *ptr)
 {
-	struct msm_usb_cable *id = container_of(nb, struct msm_usb_cable, nb);
-	struct msm_otg *motg = container_of(id, struct msm_otg, id);
+	struct usb_phy *usb_phy = container_of(nb, struct usb_phy, id_nb);
+	struct msm_otg *motg = container_of(usb_phy, struct msm_otg, phy);
 
 	if (event)
 		clear_bit(ID, &motg->inputs);
@@ -1652,7 +1638,6 @@ static int msm_otg_id_notifier(struct notifier_block *nb, unsigned long event,
 static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
 {
 	struct msm_otg_platform_data *pdata;
-	struct extcon_dev *ext_id, *ext_vbus;
 	struct device_node *node = pdev->dev.of_node;
 	struct property *prop;
 	int len, ret, words;
@@ -1708,54 +1693,6 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
 	if (IS_ERR(motg->switch_gpio))
 		return PTR_ERR(motg->switch_gpio);
 
-	ext_id = ERR_PTR(-ENODEV);
-	ext_vbus = ERR_PTR(-ENODEV);
-	if (of_property_read_bool(node, "extcon")) {
-
-		/* Each one of them is not mandatory */
-		ext_vbus = extcon_get_edev_by_phandle(&pdev->dev, 0);
-		if (IS_ERR(ext_vbus) && PTR_ERR(ext_vbus) != -ENODEV)
-			return PTR_ERR(ext_vbus);
-
-		ext_id = extcon_get_edev_by_phandle(&pdev->dev, 1);
-		if (IS_ERR(ext_id) && PTR_ERR(ext_id) != -ENODEV)
-			return PTR_ERR(ext_id);
-	}
-
-	if (!IS_ERR(ext_vbus)) {
-		motg->vbus.extcon = ext_vbus;
-		motg->vbus.nb.notifier_call = msm_otg_vbus_notifier;
-		ret = devm_extcon_register_notifier(&pdev->dev, ext_vbus,
-						EXTCON_USB, &motg->vbus.nb);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "register VBUS notifier failed\n");
-			return ret;
-		}
-
-		ret = extcon_get_state(ext_vbus, EXTCON_USB);
-		if (ret)
-			set_bit(B_SESS_VLD, &motg->inputs);
-		else
-			clear_bit(B_SESS_VLD, &motg->inputs);
-	}
-
-	if (!IS_ERR(ext_id)) {
-		motg->id.extcon = ext_id;
-		motg->id.nb.notifier_call = msm_otg_id_notifier;
-		ret = devm_extcon_register_notifier(&pdev->dev, ext_id,
-						EXTCON_USB_HOST, &motg->id.nb);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "register ID notifier failed\n");
-			return ret;
-		}
-
-		ret = extcon_get_state(ext_id, EXTCON_USB_HOST);
-		if (ret)
-			clear_bit(ID, &motg->inputs);
-		else
-			set_bit(ID, &motg->inputs);
-	}
-
 	prop = of_find_property(node, "qcom,phy-init-sequence", &len);
 	if (!prop || !len)
 		return 0;
@@ -1932,6 +1869,8 @@ static int msm_otg_probe(struct platform_device *pdev)
 	phy->init = msm_phy_init;
 	phy->notify_disconnect = msm_phy_notify_disconnect;
 	phy->type = USB_PHY_TYPE_USB2;
+	phy->vbus_nb.notifier_call = msm_otg_vbus_notifier;
+	phy->id_nb.notifier_call = msm_otg_id_notifier;
 
 	phy->io_ops = &msm_otg_io_ops;
 
@@ -1947,6 +1886,18 @@ static int msm_otg_probe(struct platform_device *pdev)
 		goto disable_ldo;
 	}
 
+	ret = extcon_get_state(phy->edev, EXTCON_USB);
+	if (ret)
+		set_bit(B_SESS_VLD, &motg->inputs);
+	else
+		clear_bit(B_SESS_VLD, &motg->inputs);
+
+	ret = extcon_get_state(phy->id_edev, EXTCON_USB_HOST);
+	if (ret)
+		clear_bit(ID, &motg->inputs);
+	else
+		set_bit(ID, &motg->inputs);
+
 	platform_set_drvdata(pdev, motg);
 	device_init_wakeup(&pdev->dev, 1);
 
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
index fdf6863..b6a83a5 100644
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -69,9 +69,6 @@ struct phy_8x16 {
 
 	struct reset_control		*phy_reset;
 
-	struct extcon_dev		*vbus_edev;
-	struct notifier_block		vbus_notify;
-
 	struct gpio_desc		*switch_gpio;
 	struct notifier_block		reboot_notify;
 };
@@ -131,7 +128,8 @@ static int phy_8x16_vbus_off(struct phy_8x16 *qphy)
 static int phy_8x16_vbus_notify(struct notifier_block *nb, unsigned long event,
 				void *ptr)
 {
-	struct phy_8x16 *qphy = container_of(nb, struct phy_8x16, vbus_notify);
+	struct usb_phy *usb_phy = container_of(nb, struct usb_phy, vbus_nb);
+	struct phy_8x16 *qphy = container_of(usb_phy, struct phy_8x16, phy);
 
 	if (event)
 		phy_8x16_vbus_on(qphy);
@@ -187,7 +185,7 @@ static int phy_8x16_init(struct usb_phy *phy)
 	val = ULPI_PWR_OTG_COMP_DISABLE;
 	usb_phy_io_write(phy, val, ULPI_SET(ULPI_PWR_CLK_MNG_REG));
 
-	state = extcon_get_state(qphy->vbus_edev, EXTCON_USB);
+	state = extcon_get_state(qphy->phy.edev, EXTCON_USB);
 	if (state)
 		phy_8x16_vbus_on(qphy);
 	else
@@ -289,15 +287,13 @@ static int phy_8x16_probe(struct platform_device *pdev)
 	phy->io_priv		= qphy->regs + HSPHY_ULPI_VIEWPORT;
 	phy->io_ops		= &ulpi_viewport_access_ops;
 	phy->type		= USB_PHY_TYPE_USB2;
+	phy->vbus_nb.notifier_call = phy_8x16_vbus_notify;
+	phy->id_nb.notifier_call = NULL;
 
 	ret = phy_8x16_read_devicetree(qphy);
 	if (ret < 0)
 		return ret;
 
-	qphy->vbus_edev = extcon_get_edev_by_phandle(phy->dev, 0);
-	if (IS_ERR(qphy->vbus_edev))
-		return PTR_ERR(qphy->vbus_edev);
-
 	ret = clk_set_rate(qphy->core_clk, INT_MAX);
 	if (ret < 0)
 		dev_dbg(phy->dev, "Can't boost core clock\n");
@@ -315,12 +311,6 @@ static int phy_8x16_probe(struct platform_device *pdev)
 	if (WARN_ON(ret))
 		goto off_clks;
 
-	qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
-	ret = devm_extcon_register_notifier(&pdev->dev, qphy->vbus_edev,
-					EXTCON_USB, &qphy->vbus_notify);
-	if (ret < 0)
-		goto off_power;
-
 	ret = usb_add_phy_dev(&qphy->phy);
 	if (ret)
 		goto off_power;
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 98f75d28..032f5af 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -100,6 +100,54 @@ static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
 	return *phy == match_data;
 }
 
+static int usb_add_extcon(struct usb_phy *x)
+{
+	int ret;
+
+	if (of_property_read_bool(x->dev->of_node, "extcon")) {
+		x->edev = extcon_get_edev_by_phandle(x->dev, 0);
+		if (IS_ERR(x->edev))
+			return PTR_ERR(x->edev);
+
+		x->id_edev = extcon_get_edev_by_phandle(x->dev, 1);
+		if (IS_ERR(x->id_edev)) {
+			x->id_edev = NULL;
+			dev_info(x->dev, "No separate ID extcon device\n");
+		}
+
+		if (x->vbus_nb.notifier_call) {
+			ret = devm_extcon_register_notifier(x->dev, x->edev,
+							    EXTCON_USB,
+							    &x->vbus_nb);
+			if (ret < 0) {
+				dev_err(x->dev,
+					"register VBUS notifier failed\n");
+				return ret;
+			}
+		}
+
+		if (x->id_nb.notifier_call) {
+			struct extcon_dev *id_ext;
+
+			if (x->id_edev)
+				id_ext = x->id_edev;
+			else
+				id_ext = x->edev;
+
+			ret = devm_extcon_register_notifier(x->dev, id_ext,
+							    EXTCON_USB_HOST,
+							    &x->id_nb);
+			if (ret < 0) {
+				dev_err(x->dev,
+					"register ID notifier failed\n");
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
 /**
  * devm_usb_get_phy - find the USB PHY
  * @dev - device that requests this phy
@@ -388,6 +436,10 @@ int usb_add_phy(struct usb_phy *x, enum usb_phy_type type)
 		return -EINVAL;
 	}
 
+	ret = usb_add_extcon(x);
+	if (ret)
+		return ret;
+
 	ATOMIC_INIT_NOTIFIER_HEAD(&x->notifier);
 
 	spin_lock_irqsave(&phy_lock, flags);
@@ -422,12 +474,17 @@ int usb_add_phy_dev(struct usb_phy *x)
 {
 	struct usb_phy_bind *phy_bind;
 	unsigned long flags;
+	int ret;
 
 	if (!x->dev) {
 		dev_err(x->dev, "no device provided for PHY\n");
 		return -EINVAL;
 	}
 
+	ret = usb_add_extcon(x);
+	if (ret)
+		return ret;
+
 	ATOMIC_INIT_NOTIFIER_HEAD(&x->notifier);
 
 	spin_lock_irqsave(&phy_lock, flags);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 0c55e7f..f64e914 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -141,6 +141,7 @@ static const struct usb_device_id id_table[] = {
 	{ USB_DEVICE(0x10C4, 0x8977) },	/* CEL MeshWorks DevKit Device */
 	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
 	{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+	{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index aba74f8..1cec037 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1244,42 +1244,13 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
 	int div_okay = 1;
 	int baud;
 
-	/*
-	 * The logic involved in setting the baudrate can be cleanly split into
-	 * 3 steps.
-	 * 1. Standard baud rates are set in tty->termios->c_cflag
-	 * 2. If these are not enough, you can set any speed using alt_speed as
-	 * follows:
-	 *    - set tty->termios->c_cflag speed to B38400
-	 *    - set your real speed in tty->alt_speed; it gets ignored when
-	 *      alt_speed==0, (or)
-	 *    - call TIOCSSERIAL ioctl with (struct serial_struct) set as
-	 *	follows:
-	 *      flags & ASYNC_SPD_MASK == ASYNC_SPD_[HI, VHI, SHI, WARP],
-	 *	this just sets alt_speed to (HI: 57600, VHI: 115200,
-	 *	SHI: 230400, WARP: 460800)
-	 * ** Steps 1, 2 are done courtesy of tty_get_baud_rate
-	 * 3. You can also set baud rate by setting custom divisor as follows
-	 *    - set tty->termios->c_cflag speed to B38400
-	 *    - call TIOCSSERIAL ioctl with (struct serial_struct) set as
-	 *	follows:
-	 *      o flags & ASYNC_SPD_MASK == ASYNC_SPD_CUST
-	 *      o custom_divisor set to baud_base / your_new_baudrate
-	 * ** Step 3 is done courtesy of code borrowed from serial.c
-	 *    I should really spend some time and separate + move this common
-	 *    code to serial.c, it is replicated in nearly every serial driver
-	 *    you see.
-	 */
-
-	/* 1. Get the baud rate from the tty settings, this observes
-	      alt_speed hack */
-
 	baud = tty_get_baud_rate(tty);
 	dev_dbg(dev, "%s - tty_get_baud_rate reports speed %d\n", __func__, baud);
 
-	/* 2. Observe async-compatible custom_divisor hack, update baudrate
-	   if needed */
-
+	/*
+	 * Observe deprecated async-compatible custom_divisor hack, update
+	 * baudrate if needed.
+	 */
 	if (baud == 38400 &&
 	    ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
 	     (priv->custom_divisor)) {
@@ -1288,8 +1259,6 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
 			__func__, priv->custom_divisor, baud);
 	}
 
-	/* 3. Convert baudrate to device-specific divisor */
-
 	if (!baud)
 		baud = 9600;
 	switch (priv->chip_type) {
@@ -1505,8 +1474,7 @@ static int set_serial_info(struct tty_struct *tty,
 	/* Do error checking and permission checking */
 
 	if (!capable(CAP_SYS_ADMIN)) {
-		if (((new_serial.flags & ~ASYNC_USR_MASK) !=
-		     (priv->flags & ~ASYNC_USR_MASK))) {
+		if ((new_serial.flags ^ priv->flags) & ~ASYNC_USR_MASK) {
 			mutex_unlock(&priv->cfg_lock);
 			return -EPERM;
 		}
@@ -1530,23 +1498,14 @@ static int set_serial_info(struct tty_struct *tty,
 check_and_exit:
 	write_latency_timer(port);
 
-	if ((old_priv.flags & ASYNC_SPD_MASK) !=
-	     (priv->flags & ASYNC_SPD_MASK)) {
-		if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
-			tty->alt_speed = 57600;
-		else if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
-			tty->alt_speed = 115200;
-		else if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
-			tty->alt_speed = 230400;
-		else if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
-			tty->alt_speed = 460800;
-		else
-			tty->alt_speed = 0;
-	}
-	if (((old_priv.flags & ASYNC_SPD_MASK) !=
-	     (priv->flags & ASYNC_SPD_MASK)) ||
-	    (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
-	     (old_priv.custom_divisor != priv->custom_divisor))) {
+	if ((priv->flags ^ old_priv.flags) & ASYNC_SPD_MASK ||
+			((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
+			 priv->custom_divisor != old_priv.custom_divisor)) {
+
+		/* warn about deprecation unless clearing */
+		if (priv->flags & ASYNC_SPD_MASK)
+			dev_warn_ratelimited(&port->dev, "use of SPD flags is deprecated\n");
+
 		change_speed(tty, port);
 		mutex_unlock(&priv->cfg_lock);
 	}
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index fd509ed6c..4ac137d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -454,6 +454,8 @@ static struct usb_serial_driver qcdevice = {
 	.write		     = usb_wwan_write,
 	.write_room	     = usb_wwan_write_room,
 	.chars_in_buffer     = usb_wwan_chars_in_buffer,
+	.tiocmget            = usb_wwan_tiocmget,
+	.tiocmset            = usb_wwan_tiocmset,
 	.attach              = qc_attach,
 	.release	     = qc_release,
 	.port_probe          = usb_wwan_port_probe,
diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c
index a028dd2..6819a34 100644
--- a/drivers/usb/serial/upd78f0730.c
+++ b/drivers/usb/serial/upd78f0730.c
@@ -288,7 +288,7 @@ static void upd78f0730_dtr_rts(struct usb_serial_port *port, int on)
 static speed_t upd78f0730_get_baud_rate(struct tty_struct *tty)
 {
 	const speed_t baud_rate = tty_get_baud_rate(tty);
-	const speed_t supported[] = {
+	static const speed_t supported[] = {
 		0, 2400, 4800, 9600, 19200, 38400, 57600, 115200, 153600
 	};
 	int i;
@@ -384,7 +384,7 @@ static void upd78f0730_set_termios(struct tty_struct *tty,
 
 static int upd78f0730_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-	struct upd78f0730_open_close request = {
+	static const struct upd78f0730_open_close request = {
 		.opcode = UPD78F0730_CMD_OPEN_CLOSE,
 		.state = UPD78F0730_PORT_OPEN
 	};
@@ -402,7 +402,7 @@ static int upd78f0730_open(struct tty_struct *tty, struct usb_serial_port *port)
 
 static void upd78f0730_close(struct usb_serial_port *port)
 {
-	struct upd78f0730_open_close request = {
+	static const struct upd78f0730_open_close request = {
 		.opcode = UPD78F0730_CMD_OPEN_CLOSE,
 		.state = UPD78F0730_PORT_CLOSE
 	};
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index c7ca95f..bb34f9f 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -742,6 +742,124 @@ static void find_endpoints(struct usb_serial *serial,
 	}
 }
 
+static int setup_port_bulk_in(struct usb_serial_port *port,
+					struct usb_endpoint_descriptor *epd)
+{
+	struct usb_serial_driver *type = port->serial->type;
+	struct usb_device *udev = port->serial->dev;
+	int buffer_size;
+	int i;
+
+	buffer_size = max_t(int, type->bulk_in_size, usb_endpoint_maxp(epd));
+	port->bulk_in_size = buffer_size;
+	port->bulk_in_endpointAddress = epd->bEndpointAddress;
+
+	for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+		set_bit(i, &port->read_urbs_free);
+		port->read_urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
+		if (!port->read_urbs[i])
+			return -ENOMEM;
+		port->bulk_in_buffers[i] = kmalloc(buffer_size, GFP_KERNEL);
+		if (!port->bulk_in_buffers[i])
+			return -ENOMEM;
+		usb_fill_bulk_urb(port->read_urbs[i], udev,
+				usb_rcvbulkpipe(udev, epd->bEndpointAddress),
+				port->bulk_in_buffers[i], buffer_size,
+				type->read_bulk_callback, port);
+	}
+
+	port->read_urb = port->read_urbs[0];
+	port->bulk_in_buffer = port->bulk_in_buffers[0];
+
+	return 0;
+}
+
+static int setup_port_bulk_out(struct usb_serial_port *port,
+					struct usb_endpoint_descriptor *epd)
+{
+	struct usb_serial_driver *type = port->serial->type;
+	struct usb_device *udev = port->serial->dev;
+	int buffer_size;
+	int i;
+
+	if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
+		return -ENOMEM;
+	if (type->bulk_out_size)
+		buffer_size = type->bulk_out_size;
+	else
+		buffer_size = usb_endpoint_maxp(epd);
+	port->bulk_out_size = buffer_size;
+	port->bulk_out_endpointAddress = epd->bEndpointAddress;
+
+	for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
+		set_bit(i, &port->write_urbs_free);
+		port->write_urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
+		if (!port->write_urbs[i])
+			return -ENOMEM;
+		port->bulk_out_buffers[i] = kmalloc(buffer_size, GFP_KERNEL);
+		if (!port->bulk_out_buffers[i])
+			return -ENOMEM;
+		usb_fill_bulk_urb(port->write_urbs[i], udev,
+				usb_sndbulkpipe(udev, epd->bEndpointAddress),
+				port->bulk_out_buffers[i], buffer_size,
+				type->write_bulk_callback, port);
+	}
+
+	port->write_urb = port->write_urbs[0];
+	port->bulk_out_buffer = port->bulk_out_buffers[0];
+
+	return 0;
+}
+
+static int setup_port_interrupt_in(struct usb_serial_port *port,
+					struct usb_endpoint_descriptor *epd)
+{
+	struct usb_serial_driver *type = port->serial->type;
+	struct usb_device *udev = port->serial->dev;
+	int buffer_size;
+
+	port->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!port->interrupt_in_urb)
+		return -ENOMEM;
+	buffer_size = usb_endpoint_maxp(epd);
+	port->interrupt_in_endpointAddress = epd->bEndpointAddress;
+	port->interrupt_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
+	if (!port->interrupt_in_buffer)
+		return -ENOMEM;
+	usb_fill_int_urb(port->interrupt_in_urb, udev,
+			usb_rcvintpipe(udev, epd->bEndpointAddress),
+			port->interrupt_in_buffer, buffer_size,
+			type->read_int_callback, port,
+			epd->bInterval);
+
+	return 0;
+}
+
+static int setup_port_interrupt_out(struct usb_serial_port *port,
+					struct usb_endpoint_descriptor *epd)
+{
+	struct usb_serial_driver *type = port->serial->type;
+	struct usb_device *udev = port->serial->dev;
+	int buffer_size;
+
+	port->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!port->interrupt_out_urb)
+		return -ENOMEM;
+	buffer_size = usb_endpoint_maxp(epd);
+	port->interrupt_out_size = buffer_size;
+	port->interrupt_out_endpointAddress = epd->bEndpointAddress;
+	port->interrupt_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
+	if (!port->interrupt_out_buffer)
+		return -ENOMEM;
+	usb_fill_int_urb(port->interrupt_out_urb, udev,
+			usb_sndintpipe(udev, epd->bEndpointAddress),
+			port->interrupt_out_buffer, buffer_size,
+			type->write_int_callback, port,
+			epd->bInterval);
+
+	return 0;
+}
+
 static int usb_serial_probe(struct usb_interface *interface,
 			       const struct usb_device_id *id)
 {
@@ -749,13 +867,10 @@ static int usb_serial_probe(struct usb_interface *interface,
 	struct usb_device *dev = interface_to_usbdev(interface);
 	struct usb_serial *serial = NULL;
 	struct usb_serial_port *port;
-	struct usb_endpoint_descriptor *endpoint;
 	struct usb_serial_endpoints *epds;
 	struct usb_serial_driver *type = NULL;
 	int retval;
-	int buffer_size;
 	int i;
-	int j;
 	int num_ports = 0;
 	unsigned char max_endpoints;
 
@@ -847,8 +962,10 @@ static int usb_serial_probe(struct usb_interface *interface,
 	dev_dbg(ddev, "setting up %d port structure(s)\n", max_endpoints);
 	for (i = 0; i < max_endpoints; ++i) {
 		port = kzalloc(sizeof(struct usb_serial_port), GFP_KERNEL);
-		if (!port)
-			goto probe_error;
+		if (!port) {
+			retval = -ENOMEM;
+			goto err_free_epds;
+		}
 		tty_port_init(&port->port);
 		port->port.ops = &serial_port_ops;
 		port->serial = serial;
@@ -867,86 +984,24 @@ static int usb_serial_probe(struct usb_interface *interface,
 
 	/* set up the endpoint information */
 	for (i = 0; i < epds->num_bulk_in; ++i) {
-		endpoint = epds->bulk_in[i];
-		port = serial->port[i];
-		buffer_size = max_t(int, serial->type->bulk_in_size,
-				usb_endpoint_maxp(endpoint));
-		port->bulk_in_size = buffer_size;
-		port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
-
-		for (j = 0; j < ARRAY_SIZE(port->read_urbs); ++j) {
-			set_bit(j, &port->read_urbs_free);
-			port->read_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
-			if (!port->read_urbs[j])
-				goto probe_error;
-			port->bulk_in_buffers[j] = kmalloc(buffer_size,
-								GFP_KERNEL);
-			if (!port->bulk_in_buffers[j])
-				goto probe_error;
-			usb_fill_bulk_urb(port->read_urbs[j], dev,
-					usb_rcvbulkpipe(dev,
-						endpoint->bEndpointAddress),
-					port->bulk_in_buffers[j], buffer_size,
-					serial->type->read_bulk_callback,
-					port);
-		}
-
-		port->read_urb = port->read_urbs[0];
-		port->bulk_in_buffer = port->bulk_in_buffers[0];
+		retval = setup_port_bulk_in(serial->port[i], epds->bulk_in[i]);
+		if (retval)
+			goto err_free_epds;
 	}
 
 	for (i = 0; i < epds->num_bulk_out; ++i) {
-		endpoint = epds->bulk_out[i];
-		port = serial->port[i];
-		if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
-			goto probe_error;
-		buffer_size = serial->type->bulk_out_size;
-		if (!buffer_size)
-			buffer_size = usb_endpoint_maxp(endpoint);
-		port->bulk_out_size = buffer_size;
-		port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
-
-		for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
-			set_bit(j, &port->write_urbs_free);
-			port->write_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
-			if (!port->write_urbs[j])
-				goto probe_error;
-			port->bulk_out_buffers[j] = kmalloc(buffer_size,
-								GFP_KERNEL);
-			if (!port->bulk_out_buffers[j])
-				goto probe_error;
-			usb_fill_bulk_urb(port->write_urbs[j], dev,
-					usb_sndbulkpipe(dev,
-						endpoint->bEndpointAddress),
-					port->bulk_out_buffers[j], buffer_size,
-					serial->type->write_bulk_callback,
-					port);
-		}
-
-		port->write_urb = port->write_urbs[0];
-		port->bulk_out_buffer = port->bulk_out_buffers[0];
+		retval = setup_port_bulk_out(serial->port[i],
+				epds->bulk_out[i]);
+		if (retval)
+			goto err_free_epds;
 	}
 
 	if (serial->type->read_int_callback) {
 		for (i = 0; i < epds->num_interrupt_in; ++i) {
-			endpoint = epds->interrupt_in[i];
-			port = serial->port[i];
-			port->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
-			if (!port->interrupt_in_urb)
-				goto probe_error;
-			buffer_size = usb_endpoint_maxp(endpoint);
-			port->interrupt_in_endpointAddress =
-						endpoint->bEndpointAddress;
-			port->interrupt_in_buffer = kmalloc(buffer_size,
-								GFP_KERNEL);
-			if (!port->interrupt_in_buffer)
-				goto probe_error;
-			usb_fill_int_urb(port->interrupt_in_urb, dev,
-				usb_rcvintpipe(dev,
-						endpoint->bEndpointAddress),
-				port->interrupt_in_buffer, buffer_size,
-				serial->type->read_int_callback, port,
-				endpoint->bInterval);
+			retval = setup_port_interrupt_in(serial->port[i],
+					epds->interrupt_in[i]);
+			if (retval)
+				goto err_free_epds;
 		}
 	} else if (epds->num_interrupt_in) {
 		dev_dbg(ddev, "The device claims to support interrupt in transfers, but read_int_callback is not defined\n");
@@ -954,25 +1009,10 @@ static int usb_serial_probe(struct usb_interface *interface,
 
 	if (serial->type->write_int_callback) {
 		for (i = 0; i < epds->num_interrupt_out; ++i) {
-			endpoint = epds->interrupt_out[i];
-			port = serial->port[i];
-			port->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
-			if (!port->interrupt_out_urb)
-				goto probe_error;
-			buffer_size = usb_endpoint_maxp(endpoint);
-			port->interrupt_out_size = buffer_size;
-			port->interrupt_out_endpointAddress =
-						endpoint->bEndpointAddress;
-			port->interrupt_out_buffer = kmalloc(buffer_size,
-								GFP_KERNEL);
-			if (!port->interrupt_out_buffer)
-				goto probe_error;
-			usb_fill_int_urb(port->interrupt_out_urb, dev,
-				usb_sndintpipe(dev,
-						  endpoint->bEndpointAddress),
-				port->interrupt_out_buffer, buffer_size,
-				serial->type->write_int_callback, port,
-				endpoint->bInterval);
+			retval = setup_port_interrupt_out(serial->port[i],
+					epds->interrupt_out[i]);
+			if (retval)
+				goto err_free_epds;
 		}
 	} else if (epds->num_interrupt_out) {
 		dev_dbg(ddev, "The device claims to support interrupt out transfers, but write_int_callback is not defined\n");
@@ -984,7 +1024,7 @@ static int usb_serial_probe(struct usb_interface *interface,
 	if (type->attach) {
 		retval = type->attach(serial);
 		if (retval < 0)
-			goto probe_error;
+			goto err_free_epds;
 		serial->attached = 1;
 		if (retval > 0) {
 			/* quietly accept this device, but don't bind to a
@@ -996,9 +1036,10 @@ static int usb_serial_probe(struct usb_interface *interface,
 		serial->attached = 1;
 	}
 
-	if (allocate_minors(serial, num_ports)) {
+	retval = allocate_minors(serial, num_ports);
+	if (retval) {
 		dev_err(ddev, "No more free serial minor numbers\n");
-		goto probe_error;
+		goto err_free_epds;
 	}
 
 	/* register all of the individual ports with the driver core */
@@ -1020,8 +1061,6 @@ static int usb_serial_probe(struct usb_interface *interface,
 	module_put(type->driver.owner);
 	return 0;
 
-probe_error:
-	retval = -EIO;
 err_free_epds:
 	kfree(epds);
 err_put_serial:
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 44af719..2810037 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -95,12 +95,12 @@ static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = {
 #define REG_HW_TRAP1        0xFF89
 
 /* SRB Status */
-#define SS_SUCCESS                  0x00      /* No Sense */
-#define SS_NOT_READY                0x02
-#define SS_MEDIUM_ERR               0x03
-#define SS_HW_ERR                   0x04
-#define SS_ILLEGAL_REQUEST          0x05
-#define SS_UNIT_ATTENTION           0x06
+#define SS_SUCCESS		0x000000	/* No Sense */
+#define SS_NOT_READY		0x023A00	/* Medium not present */
+#define SS_MEDIUM_ERR		0x031100	/* Unrecovered read error */
+#define SS_HW_ERR		0x040800	/* Communication failure */
+#define SS_ILLEGAL_REQUEST	0x052000	/* Invalid command */
+#define SS_UNIT_ATTENTION	0x062900	/* Reset occurred */
 
 /* ENE Load FW Pattern */
 #define SD_INIT1_PATTERN   1
@@ -584,6 +584,34 @@ static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
+static int do_scsi_request_sense(struct us_data *us, struct scsi_cmnd *srb)
+{
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	unsigned char buf[18];
+
+	memset(buf, 0, 18);
+	buf[0] = 0x70;				/* Current error */
+	buf[2] = info->SrbStatus >> 16;		/* Sense key */
+	buf[7] = 10;				/* Additional length */
+	buf[12] = info->SrbStatus >> 8;		/* ASC */
+	buf[13] = info->SrbStatus;		/* ASCQ */
+
+	usb_stor_set_xfer_buf(buf, sizeof(buf), srb);
+	return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int do_scsi_inquiry(struct us_data *us, struct scsi_cmnd *srb)
+{
+	unsigned char data_ptr[36] = {
+		0x00, 0x00, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55,
+		0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61,
+		0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20,
+		0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30 };
+
+	usb_stor_set_xfer_buf(data_ptr, 36, srb);
+	return USB_STOR_TRANSPORT_GOOD;
+}
+
 static int sd_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
 {
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
@@ -598,18 +626,6 @@ static int sd_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
-static int sd_scsi_inquiry(struct us_data *us, struct scsi_cmnd *srb)
-{
-	unsigned char data_ptr[36] = {
-		0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55,
-		0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61,
-		0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20,
-		0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30 };
-
-	usb_stor_set_xfer_buf(data_ptr, 36, srb);
-	return USB_STOR_TRANSPORT_GOOD;
-}
-
 static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
 {
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
@@ -1455,19 +1471,6 @@ static int ms_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
-static int ms_scsi_inquiry(struct us_data *us, struct scsi_cmnd *srb)
-{
-	/* pr_info("MS_SCSI_Inquiry\n"); */
-	unsigned char data_ptr[36] = {
-		0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55,
-		0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61,
-		0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20,
-		0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30};
-
-	usb_stor_set_xfer_buf(data_ptr, 36, srb);
-	return USB_STOR_TRANSPORT_GOOD;
-}
-
 static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
 {
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
@@ -1940,6 +1943,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag)
 	bcb->CDB[0] = 0xEF;
 
 	result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
+	if (us->srb != NULL)
+		scsi_set_resid(us->srb, 0);
 	info->BIN_FLAG = flag;
 	kfree(buf);
 
@@ -2223,13 +2228,15 @@ static int sd_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
 	int    result;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
 
-	info->SrbStatus = SS_SUCCESS;
 	switch (srb->cmnd[0]) {
 	case TEST_UNIT_READY:
 		result = sd_scsi_test_unit_ready(us, srb);
 		break; /* 0x00 */
+	case REQUEST_SENSE:
+		result = do_scsi_request_sense(us, srb);
+		break; /* 0x03 */
 	case INQUIRY:
-		result = sd_scsi_inquiry(us, srb);
+		result = do_scsi_inquiry(us, srb);
 		break; /* 0x12 */
 	case MODE_SENSE:
 		result = sd_scsi_mode_sense(us, srb);
@@ -2253,6 +2260,8 @@ static int sd_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
 		result = USB_STOR_TRANSPORT_FAILED;
 		break;
 	}
+	if (result == USB_STOR_TRANSPORT_GOOD)
+		info->SrbStatus = SS_SUCCESS;
 	return result;
 }
 
@@ -2263,13 +2272,16 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
 {
 	int result;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
-	info->SrbStatus = SS_SUCCESS;
+
 	switch (srb->cmnd[0]) {
 	case TEST_UNIT_READY:
 		result = ms_scsi_test_unit_ready(us, srb);
 		break; /* 0x00 */
+	case REQUEST_SENSE:
+		result = do_scsi_request_sense(us, srb);
+		break; /* 0x03 */
 	case INQUIRY:
-		result = ms_scsi_inquiry(us, srb);
+		result = do_scsi_inquiry(us, srb);
 		break; /* 0x12 */
 	case MODE_SENSE:
 		result = ms_scsi_mode_sense(us, srb);
@@ -2288,26 +2300,29 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
 		result = USB_STOR_TRANSPORT_FAILED;
 		break;
 	}
+	if (result == USB_STOR_TRANSPORT_GOOD)
+		info->SrbStatus = SS_SUCCESS;
 	return result;
 }
 
 static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
 {
-	int result = 0;
+	int result = USB_STOR_XFER_GOOD;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
 
 	/*US_DEBUG(usb_stor_show_command(us, srb)); */
 	scsi_set_resid(srb, 0);
-	if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) {
+	if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
 		result = ene_init(us);
-	} else {
+	if (result == USB_STOR_XFER_GOOD) {
+		result = USB_STOR_TRANSPORT_ERROR;
 		if (info->SD_Status.Ready)
 			result = sd_scsi_irp(us, srb);
 
 		if (info->MS_Status.Ready)
 			result = ms_scsi_irp(us, srb);
 	}
-	return 0;
+	return result;
 }
 
 static struct scsi_host_template ene_ub6250_host_template;
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index dfcfe45..bc1b774 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -19,4 +19,6 @@
 	  To compile this driver as module, choose M here: the module will be
 	  called typec_wcove
 
+source "drivers/usb/typec/ucsi/Kconfig"
+
 endmenu
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index b9cb862..bc214f1 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -1,2 +1,3 @@
 obj-$(CONFIG_TYPEC)		+= typec.o
 obj-$(CONFIG_TYPEC_WCOVE)	+= typec_wcove.o
+obj-$(CONFIG_TYPEC_UCSI)	+= ucsi/
diff --git a/drivers/usb/typec/typec.c b/drivers/usb/typec/typec.c
index 89e540b..24e355b 100644
--- a/drivers/usb/typec/typec.c
+++ b/drivers/usb/typec/typec.c
@@ -11,6 +11,7 @@
 
 #include <linux/device.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/usb/typec.h>
 
@@ -69,6 +70,8 @@ struct typec_port {
 	enum typec_role			pwr_role;
 	enum typec_role			vconn_role;
 	enum typec_pwr_opmode		pwr_opmode;
+	enum typec_port_type		port_type;
+	struct mutex			port_type_lock;
 
 	const struct typec_capability	*cap;
 };
@@ -291,7 +294,7 @@ typec_altmode_roles_show(struct device *dev, struct device_attribute *attr,
 }
 
 static void typec_init_modes(struct typec_altmode *alt,
-			     struct typec_mode_desc *desc, bool is_port)
+			     const struct typec_mode_desc *desc, bool is_port)
 {
 	int i;
 
@@ -378,7 +381,8 @@ static const struct device_type typec_altmode_dev_type = {
 };
 
 static struct typec_altmode *
-typec_register_altmode(struct device *parent, struct typec_altmode_desc *desc)
+typec_register_altmode(struct device *parent,
+		       const struct typec_altmode_desc *desc)
 {
 	struct typec_altmode *alt;
 	int ret;
@@ -495,7 +499,7 @@ EXPORT_SYMBOL_GPL(typec_partner_set_identity);
  */
 struct typec_altmode *
 typec_partner_register_altmode(struct typec_partner *partner,
-			       struct typec_altmode_desc *desc)
+			       const struct typec_altmode_desc *desc)
 {
 	return typec_register_altmode(&partner->dev, desc);
 }
@@ -590,7 +594,7 @@ static const struct device_type typec_plug_dev_type = {
  */
 struct typec_altmode *
 typec_plug_register_altmode(struct typec_plug *plug,
-			    struct typec_altmode_desc *desc)
+			    const struct typec_altmode_desc *desc)
 {
 	return typec_register_altmode(&plug->dev, desc);
 }
@@ -789,6 +793,18 @@ static const char * const typec_data_roles[] = {
 	[TYPEC_HOST]	= "host",
 };
 
+static const char * const typec_port_types[] = {
+	[TYPEC_PORT_DFP] = "source",
+	[TYPEC_PORT_UFP] = "sink",
+	[TYPEC_PORT_DRP] = "dual",
+};
+
+static const char * const typec_port_types_drp[] = {
+	[TYPEC_PORT_DFP] = "dual [source] sink",
+	[TYPEC_PORT_UFP] = "dual source [sink]",
+	[TYPEC_PORT_DRP] = "[dual] source sink",
+};
+
 static ssize_t
 preferred_role_store(struct device *dev, struct device_attribute *attr,
 		     const char *buf, size_t size)
@@ -846,11 +862,6 @@ static ssize_t data_role_store(struct device *dev,
 	struct typec_port *port = to_typec_port(dev);
 	int ret;
 
-	if (port->cap->type != TYPEC_PORT_DRP) {
-		dev_dbg(dev, "data role swap only supported with DRP ports\n");
-		return -EOPNOTSUPP;
-	}
-
 	if (!port->cap->dr_set) {
 		dev_dbg(dev, "data role swapping not supported\n");
 		return -EOPNOTSUPP;
@@ -860,11 +871,22 @@ static ssize_t data_role_store(struct device *dev,
 	if (ret < 0)
 		return ret;
 
+	mutex_lock(&port->port_type_lock);
+	if (port->port_type != TYPEC_PORT_DRP) {
+		dev_dbg(dev, "port type fixed at \"%s\"",
+			     typec_port_types[port->port_type]);
+		ret = -EOPNOTSUPP;
+		goto unlock_and_ret;
+	}
+
 	ret = port->cap->dr_set(port->cap, ret);
 	if (ret)
-		return ret;
+		goto unlock_and_ret;
 
-	return size;
+	ret = size;
+unlock_and_ret:
+	mutex_unlock(&port->port_type_lock);
+	return ret;
 }
 
 static ssize_t data_role_show(struct device *dev,
@@ -885,7 +907,7 @@ static ssize_t power_role_store(struct device *dev,
 				const char *buf, size_t size)
 {
 	struct typec_port *port = to_typec_port(dev);
-	int ret = size;
+	int ret;
 
 	if (!port->cap->pd_revision) {
 		dev_dbg(dev, "USB Power Delivery not supported\n");
@@ -906,11 +928,22 @@ static ssize_t power_role_store(struct device *dev,
 	if (ret < 0)
 		return ret;
 
+	mutex_lock(&port->port_type_lock);
+	if (port->port_type != TYPEC_PORT_DRP) {
+		dev_dbg(dev, "port type fixed at \"%s\"",
+			     typec_port_types[port->port_type]);
+		ret = -EOPNOTSUPP;
+		goto unlock_and_ret;
+	}
+
 	ret = port->cap->pr_set(port->cap, ret);
 	if (ret)
-		return ret;
+		goto unlock_and_ret;
 
-	return size;
+	ret = size;
+unlock_and_ret:
+	mutex_unlock(&port->port_type_lock);
+	return ret;
 }
 
 static ssize_t power_role_show(struct device *dev,
@@ -926,6 +959,57 @@ static ssize_t power_role_show(struct device *dev,
 }
 static DEVICE_ATTR_RW(power_role);
 
+static ssize_t
+port_type_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t size)
+{
+	struct typec_port *port = to_typec_port(dev);
+	int ret;
+	enum typec_port_type type;
+
+	if (!port->cap->port_type_set || port->cap->type != TYPEC_PORT_DRP) {
+		dev_dbg(dev, "changing port type not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	ret = sysfs_match_string(typec_port_types, buf);
+	if (ret < 0)
+		return ret;
+
+	type = ret;
+	mutex_lock(&port->port_type_lock);
+
+	if (port->port_type == type) {
+		ret = size;
+		goto unlock_and_ret;
+	}
+
+	ret = port->cap->port_type_set(port->cap, type);
+	if (ret)
+		goto unlock_and_ret;
+
+	port->port_type = type;
+	ret = size;
+
+unlock_and_ret:
+	mutex_unlock(&port->port_type_lock);
+	return ret;
+}
+
+static ssize_t
+port_type_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct typec_port *port = to_typec_port(dev);
+
+	if (port->cap->type == TYPEC_PORT_DRP)
+		return sprintf(buf, "%s\n",
+			       typec_port_types_drp[port->port_type]);
+
+	return sprintf(buf, "[%s]\n", typec_port_types[port->cap->type]);
+}
+static DEVICE_ATTR_RW(port_type);
+
 static const char * const typec_pwr_opmodes[] = {
 	[TYPEC_PWR_MODE_USB]	= "default",
 	[TYPEC_PWR_MODE_1_5A]	= "1.5A",
@@ -1035,6 +1119,7 @@ static struct attribute *typec_attrs[] = {
 	&dev_attr_usb_power_delivery_revision.attr,
 	&dev_attr_usb_typec_revision.attr,
 	&dev_attr_vconn_source.attr,
+	&dev_attr_port_type.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(typec);
@@ -1123,6 +1208,11 @@ void typec_set_vconn_role(struct typec_port *port, enum typec_role role)
 }
 EXPORT_SYMBOL_GPL(typec_set_vconn_role);
 
+static int partner_match(struct device *dev, void *data)
+{
+	return is_typec_partner(dev);
+}
+
 /**
  * typec_set_pwr_opmode - Report changed power operation mode
  * @port: The USB Type-C Port where the mode was changed
@@ -1136,12 +1226,26 @@ EXPORT_SYMBOL_GPL(typec_set_vconn_role);
 void typec_set_pwr_opmode(struct typec_port *port,
 			  enum typec_pwr_opmode opmode)
 {
+	struct device *partner_dev;
+
 	if (port->pwr_opmode == opmode)
 		return;
 
 	port->pwr_opmode = opmode;
 	sysfs_notify(&port->dev.kobj, NULL, "power_operation_mode");
 	kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
+
+	partner_dev = device_find_child(&port->dev, NULL, partner_match);
+	if (partner_dev) {
+		struct typec_partner *partner = to_typec_partner(partner_dev);
+
+		if (opmode == TYPEC_PWR_MODE_PD && !partner->usb_pd) {
+			partner->usb_pd = 1;
+			sysfs_notify(&partner_dev->kobj, NULL,
+				     "supports_usb_power_delivery");
+		}
+		put_device(partner_dev);
+	}
 }
 EXPORT_SYMBOL_GPL(typec_set_pwr_opmode);
 
@@ -1159,7 +1263,7 @@ EXPORT_SYMBOL_GPL(typec_set_pwr_opmode);
  */
 struct typec_altmode *
 typec_port_register_altmode(struct typec_port *port,
-			    struct typec_altmode_desc *desc)
+			    const struct typec_altmode_desc *desc)
 {
 	return typec_register_altmode(&port->dev, desc);
 }
@@ -1211,6 +1315,8 @@ struct typec_port *typec_register_port(struct device *parent,
 
 	port->id = id;
 	port->cap = cap;
+	port->port_type = cap->type;
+	mutex_init(&port->port_type_lock);
 	port->prefer_role = cap->prefer_role;
 
 	port->dev.class = typec_class;
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
index d5a7b21..c2ce252 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/typec_wcove.c
@@ -105,8 +105,8 @@ enum wcove_typec_role {
 	WCOVE_ROLE_DEVICE,
 };
 
-static uuid_le uuid = UUID_LE(0x482383f0, 0x2876, 0x4e49,
-			      0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
+static guid_t guid = GUID_INIT(0x482383f0, 0x2876, 0x4e49,
+			       0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
 
 static int wcove_typec_func(struct wcove_typec *wcove,
 			    enum wcove_typec_func func, int param)
@@ -118,7 +118,7 @@ static int wcove_typec_func(struct wcove_typec *wcove,
 	tmp.type = ACPI_TYPE_INTEGER;
 	tmp.integer.value = param;
 
-	obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), uuid.b, 1, func,
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), &guid, 1, func,
 				&argv4);
 	if (!obj) {
 		dev_err(wcove->dev, "%s: failed to evaluate _DSM\n", __func__);
@@ -314,7 +314,7 @@ static int wcove_typec_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), uuid.b, 0, 0x1f)) {
+	if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &guid, 0, 0x1f)) {
 		dev_err(&pdev->dev, "Missing _DSM functions\n");
 		return -ENODEV;
 	}
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
new file mode 100644
index 0000000..d0c31ce
--- /dev/null
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -0,0 +1,39 @@
+config TYPEC_UCSI
+	tristate "USB Type-C Connector System Software Interface driver"
+	depends on !CPU_BIG_ENDIAN
+	select TYPEC
+	help
+	  USB Type-C Connector System Software Interface (UCSI) is a
+	  specification for an interface that allows the operating system to
+	  control the USB Type-C ports. On UCSI system the USB Type-C ports
+	  function autonomously by default, but in order to get the status of
+	  the ports and support basic operations like role swapping, the driver
+	  is required. UCSI is available on most of the new Intel based systems
+	  that are equipped with Embedded Controller and USB Type-C ports.
+
+	  UCSI specification does not define the interface method, so depending
+	  on the platform, ACPI, PCI, I2C, etc. may be used. Therefore this
+	  driver only provides the core part, and separate drivers are needed
+	  for every supported interface method.
+
+	  The UCSI specification can be downloaded from:
+	  http://www.intel.com/content/www/us/en/io/universal-serial-bus/usb-type-c-ucsi-spec.html
+
+	  To compile the driver as a module, choose M here: the module will be
+	  called typec_ucsi.
+
+if TYPEC_UCSI
+
+config UCSI_ACPI
+	tristate "UCSI ACPI Interface Driver"
+	depends on ACPI
+	help
+	  This driver enables UCSI support on platforms that expose UCSI
+	  interface as ACPI device. On new Intel Atom based platforms starting
+	  from Broxton SoCs and Core platforms stating from Skylake, UCSI is an
+	  ACPI enumerated device.
+
+	  To compile the driver as a module, choose M here: the module will be
+	  called ucsi_acpi
+
+endif
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
new file mode 100644
index 0000000..8372fc2
--- /dev/null
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -0,0 +1,9 @@
+CFLAGS_trace.o			:= -I$(src)
+
+obj-$(CONFIG_TYPEC_UCSI)	+= typec_ucsi.o
+
+typec_ucsi-y			:= ucsi.o
+
+typec_ucsi-$(CONFIG_FTRACE)	+= trace.o
+
+obj-$(CONFIG_UCSI_ACPI)		+= ucsi_acpi.o
diff --git a/drivers/usb/typec/ucsi/debug.h b/drivers/usb/typec/ucsi/debug.h
new file mode 100644
index 0000000..e4d8fc7
--- /dev/null
+++ b/drivers/usb/typec/ucsi/debug.h
@@ -0,0 +1,64 @@
+#ifndef __UCSI_DEBUG_H
+#define __UCSI_DEBUG_H
+
+#include "ucsi.h"
+
+static const char * const ucsi_cmd_strs[] = {
+	[0]				= "Unknown command",
+	[UCSI_PPM_RESET]		= "PPM_RESET",
+	[UCSI_CANCEL]			= "CANCEL",
+	[UCSI_CONNECTOR_RESET]		= "CONNECTOR_RESET",
+	[UCSI_ACK_CC_CI]		= "ACK_CC_CI",
+	[UCSI_SET_NOTIFICATION_ENABLE]	= "SET_NOTIFICATION_ENABLE",
+	[UCSI_GET_CAPABILITY]		= "GET_CAPABILITY",
+	[UCSI_GET_CONNECTOR_CAPABILITY]	= "GET_CONNECTOR_CAPABILITY",
+	[UCSI_SET_UOM]			= "SET_UOM",
+	[UCSI_SET_UOR]			= "SET_UOR",
+	[UCSI_SET_PDM]			= "SET_PDM",
+	[UCSI_SET_PDR]			= "SET_PDR",
+	[UCSI_GET_ALTERNATE_MODES]	= "GET_ALTERNATE_MODES",
+	[UCSI_GET_CAM_SUPPORTED]	= "GET_CAM_SUPPORTED",
+	[UCSI_GET_CURRENT_CAM]		= "GET_CURRENT_CAM",
+	[UCSI_SET_NEW_CAM]		= "SET_NEW_CAM",
+	[UCSI_GET_PDOS]			= "GET_PDOS",
+	[UCSI_GET_CABLE_PROPERTY]	= "GET_CABLE_PROPERTY",
+	[UCSI_GET_CONNECTOR_STATUS]	= "GET_CONNECTOR_STATUS",
+	[UCSI_GET_ERROR_STATUS]		= "GET_ERROR_STATUS",
+};
+
+static inline const char *ucsi_cmd_str(u64 raw_cmd)
+{
+	u8 cmd = raw_cmd & GENMASK(7, 0);
+
+	return ucsi_cmd_strs[(cmd >= ARRAY_SIZE(ucsi_cmd_strs)) ? 0 : cmd];
+}
+
+static const char * const ucsi_ack_strs[] = {
+	[0]				= "",
+	[UCSI_ACK_EVENT]		= "event",
+	[UCSI_ACK_CMD]			= "command",
+};
+
+static inline const char *ucsi_ack_str(u8 ack)
+{
+	return ucsi_ack_strs[(ack >= ARRAY_SIZE(ucsi_ack_strs)) ? 0 : ack];
+}
+
+static inline const char *ucsi_cci_str(u32 cci)
+{
+	if (cci & GENMASK(7, 0)) {
+		if (cci & BIT(29))
+			return "Event pending (ACK completed)";
+		if (cci & BIT(31))
+			return "Event pending (command completed)";
+		return "Connector Change";
+	}
+	if (cci & BIT(29))
+		return "ACK completed";
+	if (cci & BIT(31))
+		return "Command completed";
+
+	return "";
+}
+
+#endif /* __UCSI_DEBUG_H */
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
new file mode 100644
index 0000000..006f65c
--- /dev/null
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -0,0 +1,2 @@
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h
new file mode 100644
index 0000000..98b4044
--- /dev/null
+++ b/drivers/usb/typec/ucsi/trace.h
@@ -0,0 +1,143 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ucsi
+
+#if !defined(__UCSI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __UCSI_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "ucsi.h"
+#include "debug.h"
+
+DECLARE_EVENT_CLASS(ucsi_log_ack,
+	TP_PROTO(u8 ack),
+	TP_ARGS(ack),
+	TP_STRUCT__entry(
+		__field(u8, ack)
+	),
+	TP_fast_assign(
+		__entry->ack = ack;
+	),
+	TP_printk("ACK %s", ucsi_ack_str(__entry->ack))
+);
+
+DEFINE_EVENT(ucsi_log_ack, ucsi_ack,
+	TP_PROTO(u8 ack),
+	TP_ARGS(ack)
+);
+
+DECLARE_EVENT_CLASS(ucsi_log_control,
+	TP_PROTO(struct ucsi_control *ctrl),
+	TP_ARGS(ctrl),
+	TP_STRUCT__entry(
+		__field(u64, ctrl)
+	),
+	TP_fast_assign(
+		__entry->ctrl = ctrl->raw_cmd;
+	),
+	TP_printk("control=%08llx (%s)", __entry->ctrl,
+		ucsi_cmd_str(__entry->ctrl))
+);
+
+DEFINE_EVENT(ucsi_log_control, ucsi_command,
+	TP_PROTO(struct ucsi_control *ctrl),
+	TP_ARGS(ctrl)
+);
+
+DECLARE_EVENT_CLASS(ucsi_log_command,
+	TP_PROTO(struct ucsi_control *ctrl, int ret),
+	TP_ARGS(ctrl, ret),
+	TP_STRUCT__entry(
+		__field(u64, ctrl)
+		__field(int, ret)
+	),
+	TP_fast_assign(
+		__entry->ctrl = ctrl->raw_cmd;
+		__entry->ret = ret;
+	),
+	TP_printk("%s -> %s (err=%d)", ucsi_cmd_str(__entry->ctrl),
+		__entry->ret < 0 ? "FAIL" : "OK",
+		__entry->ret < 0 ? __entry->ret : 0)
+);
+
+DEFINE_EVENT(ucsi_log_command, ucsi_run_command,
+	TP_PROTO(struct ucsi_control *ctrl, int ret),
+	TP_ARGS(ctrl, ret)
+);
+
+DEFINE_EVENT(ucsi_log_command, ucsi_reset_ppm,
+	TP_PROTO(struct ucsi_control *ctrl, int ret),
+	TP_ARGS(ctrl, ret)
+);
+
+DECLARE_EVENT_CLASS(ucsi_log_cci,
+	TP_PROTO(u32 cci),
+	TP_ARGS(cci),
+	TP_STRUCT__entry(
+		__field(u32, cci)
+	),
+	TP_fast_assign(
+		__entry->cci = cci;
+	),
+	TP_printk("CCI=%08x %s", __entry->cci, ucsi_cci_str(__entry->cci))
+);
+
+DEFINE_EVENT(ucsi_log_cci, ucsi_notify,
+	TP_PROTO(u32 cci),
+	TP_ARGS(cci)
+);
+
+DECLARE_EVENT_CLASS(ucsi_log_connector_status,
+	TP_PROTO(int port, struct ucsi_connector_status *status),
+	TP_ARGS(port, status),
+	TP_STRUCT__entry(
+		__field(int, port)
+		__field(u16, change)
+		__field(u8, opmode)
+		__field(u8, connected)
+		__field(u8, pwr_dir)
+		__field(u8, partner_flags)
+		__field(u8, partner_type)
+		__field(u32, request_data_obj)
+		__field(u8, bc_status)
+	),
+	TP_fast_assign(
+		__entry->port = port - 1;
+		__entry->change = status->change;
+		__entry->opmode = status->pwr_op_mode;
+		__entry->connected = status->connected;
+		__entry->pwr_dir = status->pwr_dir;
+		__entry->partner_flags = status->partner_flags;
+		__entry->partner_type = status->partner_type;
+		__entry->request_data_obj = status->request_data_obj;
+		__entry->bc_status = status->bc_status;
+	),
+	TP_printk("port%d status: change=%04x, opmode=%x, connected=%d, "
+		"sourcing=%d, partner_flags=%x, partner_type=%x, "
+		"request_data_obj=%08x, BC status=%x", __entry->port,
+		__entry->change, __entry->opmode, __entry->connected,
+		__entry->pwr_dir, __entry->partner_flags, __entry->partner_type,
+		__entry->request_data_obj, __entry->bc_status)
+);
+
+DEFINE_EVENT(ucsi_log_connector_status, ucsi_connector_change,
+	TP_PROTO(int port, struct ucsi_connector_status *status),
+	TP_ARGS(port, status)
+);
+
+DEFINE_EVENT(ucsi_log_connector_status, ucsi_register_port,
+	TP_PROTO(int port, struct ucsi_connector_status *status),
+	TP_ARGS(port, status)
+);
+
+#endif /* __UCSI_TRACE_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
new file mode 100644
index 0000000..714c5bc
--- /dev/null
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -0,0 +1,790 @@
+/*
+ * USB Type-C Connector System Software Interface driver
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/completion.h>
+#include <linux/property.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/usb/typec.h>
+
+#include "ucsi.h"
+#include "trace.h"
+
+#define to_ucsi_connector(_cap_) container_of(_cap_, struct ucsi_connector, \
+					      typec_cap)
+
+/*
+ * UCSI_TIMEOUT_MS - PPM communication timeout
+ *
+ * Ideally we could use MIN_TIME_TO_RESPOND_WITH_BUSY (which is defined in UCSI
+ * specification) here as reference, but unfortunately we can't. It is very
+ * difficult to estimate the time it takes for the system to process the command
+ * before it is actually passed to the PPM.
+ */
+#define UCSI_TIMEOUT_MS		1000
+
+/*
+ * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
+ *
+ * 5 seconds is close to the time it takes for CapsCounter to reach 0, so even
+ * if the PPM does not generate Connector Change events before that with
+ * partners that do not support USB Power Delivery, this should still work.
+ */
+#define UCSI_SWAP_TIMEOUT_MS	5000
+
+enum ucsi_status {
+	UCSI_IDLE = 0,
+	UCSI_BUSY,
+	UCSI_ERROR,
+};
+
+struct ucsi_connector {
+	int num;
+
+	struct ucsi *ucsi;
+	struct work_struct work;
+	struct completion complete;
+
+	struct typec_port *port;
+	struct typec_partner *partner;
+
+	struct typec_capability typec_cap;
+
+	struct ucsi_connector_status status;
+	struct ucsi_connector_capability cap;
+};
+
+struct ucsi {
+	struct device *dev;
+	struct ucsi_ppm *ppm;
+
+	enum ucsi_status status;
+	struct completion complete;
+	struct ucsi_capability cap;
+	struct ucsi_connector *connector;
+
+	struct work_struct work;
+
+	/* PPM Communication lock */
+	struct mutex ppm_lock;
+
+	/* PPM communication flags */
+	unsigned long flags;
+#define EVENT_PENDING	0
+#define COMMAND_PENDING	1
+#define ACK_PENDING	2
+};
+
+static inline int ucsi_sync(struct ucsi *ucsi)
+{
+	if (ucsi->ppm && ucsi->ppm->sync)
+		return ucsi->ppm->sync(ucsi->ppm);
+	return 0;
+}
+
+static int ucsi_command(struct ucsi *ucsi, struct ucsi_control *ctrl)
+{
+	int ret;
+
+	trace_ucsi_command(ctrl);
+
+	set_bit(COMMAND_PENDING, &ucsi->flags);
+
+	ret = ucsi->ppm->cmd(ucsi->ppm, ctrl);
+	if (ret)
+		goto err_clear_flag;
+
+	if (!wait_for_completion_timeout(&ucsi->complete,
+					 msecs_to_jiffies(UCSI_TIMEOUT_MS))) {
+		dev_warn(ucsi->dev, "PPM NOT RESPONDING\n");
+		ret = -ETIMEDOUT;
+	}
+
+err_clear_flag:
+	clear_bit(COMMAND_PENDING, &ucsi->flags);
+
+	return ret;
+}
+
+static int ucsi_ack(struct ucsi *ucsi, u8 ack)
+{
+	struct ucsi_control ctrl;
+	int ret;
+
+	trace_ucsi_ack(ack);
+
+	set_bit(ACK_PENDING, &ucsi->flags);
+
+	UCSI_CMD_ACK(ctrl, ack);
+	ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
+	if (ret)
+		goto out_clear_bit;
+
+	/* Waiting for ACK with ACK CMD, but not with EVENT for now */
+	if (ack == UCSI_ACK_EVENT)
+		goto out_clear_bit;
+
+	if (!wait_for_completion_timeout(&ucsi->complete,
+					 msecs_to_jiffies(UCSI_TIMEOUT_MS)))
+		ret = -ETIMEDOUT;
+
+out_clear_bit:
+	clear_bit(ACK_PENDING, &ucsi->flags);
+
+	if (ret)
+		dev_err(ucsi->dev, "%s: failed\n", __func__);
+
+	return ret;
+}
+
+static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+			    void *data, size_t size)
+{
+	struct ucsi_control _ctrl;
+	u8 data_length;
+	u16 error;
+	int ret;
+
+	ret = ucsi_command(ucsi, ctrl);
+	if (ret)
+		goto err;
+
+	switch (ucsi->status) {
+	case UCSI_IDLE:
+		ret = ucsi_sync(ucsi);
+		if (ret)
+			dev_warn(ucsi->dev, "%s: sync failed\n", __func__);
+
+		if (data)
+			memcpy(data, ucsi->ppm->data->message_in, size);
+
+		data_length = ucsi->ppm->data->cci.data_length;
+
+		ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+		if (!ret)
+			ret = data_length;
+		break;
+	case UCSI_BUSY:
+		/* The caller decides whether to cancel or not */
+		ret = -EBUSY;
+		break;
+	case UCSI_ERROR:
+		ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+		if (ret)
+			break;
+
+		_ctrl.raw_cmd = 0;
+		_ctrl.cmd.cmd = UCSI_GET_ERROR_STATUS;
+		ret = ucsi_command(ucsi, &_ctrl);
+		if (ret) {
+			dev_err(ucsi->dev, "reading error failed!\n");
+			break;
+		}
+
+		memcpy(&error, ucsi->ppm->data->message_in, sizeof(error));
+
+		/* Something has really gone wrong */
+		if (WARN_ON(ucsi->status == UCSI_ERROR)) {
+			ret = -ENODEV;
+			break;
+		}
+
+		ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+		if (ret)
+			break;
+
+		switch (error) {
+		case UCSI_ERROR_INCOMPATIBLE_PARTNER:
+			ret = -EOPNOTSUPP;
+			break;
+		case UCSI_ERROR_CC_COMMUNICATION_ERR:
+			ret = -ECOMM;
+			break;
+		case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
+			ret = -EPROTO;
+			break;
+		case UCSI_ERROR_DEAD_BATTERY:
+			dev_warn(ucsi->dev, "Dead battery condition!\n");
+			ret = -EPERM;
+			break;
+		/* The following mean a bug in this driver */
+		case UCSI_ERROR_INVALID_CON_NUM:
+		case UCSI_ERROR_UNREGONIZED_CMD:
+		case UCSI_ERROR_INVALID_CMD_ARGUMENT:
+			dev_warn(ucsi->dev,
+				 "%s: possible UCSI driver bug - error 0x%x\n",
+				 __func__, error);
+			ret = -EINVAL;
+			break;
+		default:
+			dev_warn(ucsi->dev,
+				 "%s: error without status\n", __func__);
+			ret = -EIO;
+			break;
+		}
+		break;
+	}
+
+err:
+	trace_ucsi_run_command(ctrl, ret);
+
+	return ret;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
+{
+	switch (con->status.pwr_op_mode) {
+	case UCSI_CONSTAT_PWR_OPMODE_PD:
+		typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
+		break;
+	case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+		typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_1_5A);
+		break;
+	case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+		typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_3_0A);
+		break;
+	default:
+		typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_USB);
+		break;
+	}
+}
+
+static int ucsi_register_partner(struct ucsi_connector *con)
+{
+	struct typec_partner_desc partner;
+
+	if (con->partner)
+		return 0;
+
+	memset(&partner, 0, sizeof(partner));
+
+	switch (con->status.partner_type) {
+	case UCSI_CONSTAT_PARTNER_TYPE_DEBUG:
+		partner.accessory = TYPEC_ACCESSORY_DEBUG;
+		break;
+	case UCSI_CONSTAT_PARTNER_TYPE_AUDIO:
+		partner.accessory = TYPEC_ACCESSORY_AUDIO;
+		break;
+	default:
+		break;
+	}
+
+	partner.usb_pd = con->status.pwr_op_mode == UCSI_CONSTAT_PWR_OPMODE_PD;
+
+	con->partner = typec_register_partner(con->port, &partner);
+	if (!con->partner) {
+		dev_err(con->ucsi->dev, "con%d: failed to register partner\n",
+			con->num);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void ucsi_unregister_partner(struct ucsi_connector *con)
+{
+	typec_unregister_partner(con->partner);
+	con->partner = NULL;
+}
+
+static void ucsi_connector_change(struct work_struct *work)
+{
+	struct ucsi_connector *con = container_of(work, struct ucsi_connector,
+						  work);
+	struct ucsi *ucsi = con->ucsi;
+	struct ucsi_control ctrl;
+	int ret;
+
+	mutex_lock(&ucsi->ppm_lock);
+
+	UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
+	ret = ucsi_run_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+	if (ret < 0) {
+		dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
+			__func__, ret);
+		goto out_unlock;
+	}
+
+	if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE)
+		ucsi_pwr_opmode_change(con);
+
+	if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
+		typec_set_pwr_role(con->port, con->status.pwr_dir);
+
+		/* Complete pending power role swap */
+		if (!completion_done(&con->complete))
+			complete(&con->complete);
+	}
+
+	if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE) {
+		switch (con->status.partner_type) {
+		case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+			typec_set_data_role(con->port, TYPEC_HOST);
+			break;
+		case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+			typec_set_data_role(con->port, TYPEC_DEVICE);
+			break;
+		default:
+			break;
+		}
+
+		/* Complete pending data role swap */
+		if (!completion_done(&con->complete))
+			complete(&con->complete);
+	}
+
+	if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
+		if (con->status.connected)
+			ucsi_register_partner(con);
+		else
+			ucsi_unregister_partner(con);
+	}
+
+	ret = ucsi_ack(ucsi, UCSI_ACK_EVENT);
+	if (ret)
+		dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+
+	trace_ucsi_connector_change(con->num, &con->status);
+
+out_unlock:
+	clear_bit(EVENT_PENDING, &ucsi->flags);
+	mutex_unlock(&ucsi->ppm_lock);
+}
+
+/**
+ * ucsi_notify - PPM notification handler
+ * @ucsi: Source UCSI Interface for the notifications
+ *
+ * Handle notifications from PPM of @ucsi.
+ */
+void ucsi_notify(struct ucsi *ucsi)
+{
+	struct ucsi_cci *cci;
+
+	/* There is no requirement to sync here, but no harm either. */
+	ucsi_sync(ucsi);
+
+	cci = &ucsi->ppm->data->cci;
+
+	if (cci->error)
+		ucsi->status = UCSI_ERROR;
+	else if (cci->busy)
+		ucsi->status = UCSI_BUSY;
+	else
+		ucsi->status = UCSI_IDLE;
+
+	if (cci->cmd_complete && test_bit(COMMAND_PENDING, &ucsi->flags)) {
+		complete(&ucsi->complete);
+	} else if (cci->ack_complete && test_bit(ACK_PENDING, &ucsi->flags)) {
+		complete(&ucsi->complete);
+	} else if (cci->connector_change) {
+		struct ucsi_connector *con;
+
+		con = &ucsi->connector[cci->connector_change - 1];
+
+		if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
+			schedule_work(&con->work);
+	}
+
+	trace_ucsi_notify(ucsi->ppm->data->raw_cci);
+}
+EXPORT_SYMBOL_GPL(ucsi_notify);
+
+/* -------------------------------------------------------------------------- */
+
+static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
+{
+	struct ucsi_control ctrl;
+
+	UCSI_CMD_CONNECTOR_RESET(ctrl, con, hard);
+
+	return ucsi_run_command(con->ucsi, &ctrl, NULL, 0);
+}
+
+static int ucsi_reset_ppm(struct ucsi *ucsi)
+{
+	struct ucsi_control ctrl;
+	unsigned long tmo;
+	int ret;
+
+	ctrl.raw_cmd = 0;
+	ctrl.cmd.cmd = UCSI_PPM_RESET;
+	trace_ucsi_command(&ctrl);
+	ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
+	if (ret)
+		goto err;
+
+	tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
+
+	do {
+		/* Here sync is critical. */
+		ret = ucsi_sync(ucsi);
+		if (ret)
+			goto err;
+
+		if (ucsi->ppm->data->cci.reset_complete)
+			break;
+
+		/* If the PPM is still doing something else, reset it again. */
+		if (ucsi->ppm->data->raw_cci) {
+			dev_warn_ratelimited(ucsi->dev,
+				"Failed to reset PPM! Trying again..\n");
+
+			trace_ucsi_command(&ctrl);
+			ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
+			if (ret)
+				goto err;
+		}
+
+		/* Letting the PPM settle down. */
+		msleep(20);
+
+		ret = -ETIMEDOUT;
+	} while (time_is_after_jiffies(tmo));
+
+err:
+	trace_ucsi_reset_ppm(&ctrl, ret);
+
+	return ret;
+}
+
+static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
+{
+	int ret;
+
+	ret = ucsi_run_command(con->ucsi, ctrl, NULL, 0);
+	if (ret == -ETIMEDOUT) {
+		struct ucsi_control c;
+
+		/* PPM most likely stopped responding. Resetting everything. */
+		ucsi_reset_ppm(con->ucsi);
+
+		UCSI_CMD_SET_NTFY_ENABLE(c, UCSI_ENABLE_NTFY_ALL);
+		ucsi_run_command(con->ucsi, &c, NULL, 0);
+
+		ucsi_reset_connector(con, true);
+	}
+
+	return ret;
+}
+
+static int
+ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
+{
+	struct ucsi_connector *con = to_ucsi_connector(cap);
+	struct ucsi_control ctrl;
+	int ret = 0;
+
+	if (!con->partner)
+		return -ENOTCONN;
+
+	mutex_lock(&con->ucsi->ppm_lock);
+
+	if ((con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
+	     role == TYPEC_DEVICE) ||
+	    (con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
+	     role == TYPEC_HOST))
+		goto out_unlock;
+
+	UCSI_CMD_SET_UOR(ctrl, con, role);
+	ret = ucsi_role_cmd(con, &ctrl);
+	if (ret < 0)
+		goto out_unlock;
+
+	mutex_unlock(&con->ucsi->ppm_lock);
+
+	if (!wait_for_completion_timeout(&con->complete,
+					msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
+		return -ETIMEDOUT;
+
+	return 0;
+
+out_unlock:
+	mutex_unlock(&con->ucsi->ppm_lock);
+
+	return ret;
+}
+
+static int
+ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
+{
+	struct ucsi_connector *con = to_ucsi_connector(cap);
+	struct ucsi_control ctrl;
+	int ret = 0;
+
+	if (!con->partner)
+		return -ENOTCONN;
+
+	mutex_lock(&con->ucsi->ppm_lock);
+
+	if (con->status.pwr_dir == role)
+		goto out_unlock;
+
+	UCSI_CMD_SET_PDR(ctrl, con, role);
+	ret = ucsi_role_cmd(con, &ctrl);
+	if (ret < 0)
+		goto out_unlock;
+
+	mutex_unlock(&con->ucsi->ppm_lock);
+
+	if (!wait_for_completion_timeout(&con->complete,
+					msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
+		return -ETIMEDOUT;
+
+	mutex_lock(&con->ucsi->ppm_lock);
+
+	/* Something has gone wrong while swapping the role */
+	if (con->status.pwr_op_mode != UCSI_CONSTAT_PWR_OPMODE_PD) {
+		ucsi_reset_connector(con, true);
+		ret = -EPROTO;
+	}
+
+out_unlock:
+	mutex_unlock(&con->ucsi->ppm_lock);
+
+	return ret;
+}
+
+static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
+{
+	struct fwnode_handle *fwnode;
+	int i = 1;
+
+	device_for_each_child_node(con->ucsi->dev, fwnode)
+		if (i++ == con->num)
+			return fwnode;
+	return NULL;
+}
+
+static int ucsi_register_port(struct ucsi *ucsi, int index)
+{
+	struct ucsi_connector *con = &ucsi->connector[index];
+	struct typec_capability *cap = &con->typec_cap;
+	enum typec_accessory *accessory = cap->accessory;
+	struct ucsi_control ctrl;
+	int ret;
+
+	INIT_WORK(&con->work, ucsi_connector_change);
+	init_completion(&con->complete);
+	con->num = index + 1;
+	con->ucsi = ucsi;
+
+	/* Get connector capability */
+	UCSI_CMD_GET_CONNECTOR_CAPABILITY(ctrl, con->num);
+	ret = ucsi_run_command(ucsi, &ctrl, &con->cap, sizeof(con->cap));
+	if (ret < 0)
+		return ret;
+
+	if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
+		cap->type = TYPEC_PORT_DRP;
+	else if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DFP)
+		cap->type = TYPEC_PORT_DFP;
+	else if (con->cap.op_mode & UCSI_CONCAP_OPMODE_UFP)
+		cap->type = TYPEC_PORT_UFP;
+
+	cap->revision = ucsi->cap.typec_version;
+	cap->pd_revision = ucsi->cap.pd_version;
+	cap->prefer_role = TYPEC_NO_PREFERRED_ROLE;
+
+	if (con->cap.op_mode & UCSI_CONCAP_OPMODE_AUDIO_ACCESSORY)
+		*accessory++ = TYPEC_ACCESSORY_AUDIO;
+	if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DEBUG_ACCESSORY)
+		*accessory = TYPEC_ACCESSORY_DEBUG;
+
+	cap->fwnode = ucsi_find_fwnode(con);
+	cap->dr_set = ucsi_dr_swap;
+	cap->pr_set = ucsi_pr_swap;
+
+	/* Register the connector */
+	con->port = typec_register_port(ucsi->dev, cap);
+	if (!con->port)
+		return -ENODEV;
+
+	/* Get the status */
+	UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
+	ret = ucsi_run_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+	if (ret < 0) {
+		dev_err(ucsi->dev, "con%d: failed to get status\n", con->num);
+		return 0;
+	}
+
+	ucsi_pwr_opmode_change(con);
+	typec_set_pwr_role(con->port, con->status.pwr_dir);
+
+	switch (con->status.partner_type) {
+	case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+		typec_set_data_role(con->port, TYPEC_HOST);
+		break;
+	case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+		typec_set_data_role(con->port, TYPEC_DEVICE);
+		break;
+	default:
+		break;
+	}
+
+	/* Check if there is already something connected */
+	if (con->status.connected)
+		ucsi_register_partner(con);
+
+	trace_ucsi_register_port(con->num, &con->status);
+
+	return 0;
+}
+
+static void ucsi_init(struct work_struct *work)
+{
+	struct ucsi *ucsi = container_of(work, struct ucsi, work);
+	struct ucsi_connector *con;
+	struct ucsi_control ctrl;
+	int ret;
+	int i;
+
+	mutex_lock(&ucsi->ppm_lock);
+
+	/* Reset the PPM */
+	ret = ucsi_reset_ppm(ucsi);
+	if (ret) {
+		dev_err(ucsi->dev, "failed to reset PPM!\n");
+		goto err;
+	}
+
+	/* Enable basic notifications */
+	UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE |
+					UCSI_ENABLE_NTFY_ERROR);
+	ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+	if (ret < 0)
+		goto err_reset;
+
+	/* Get PPM capabilities */
+	UCSI_CMD_GET_CAPABILITY(ctrl);
+	ret = ucsi_run_command(ucsi, &ctrl, &ucsi->cap, sizeof(ucsi->cap));
+	if (ret < 0)
+		goto err_reset;
+
+	if (!ucsi->cap.num_connectors) {
+		ret = -ENODEV;
+		goto err_reset;
+	}
+
+	/* Allocate the connectors. Released in ucsi_unregister_ppm() */
+	ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
+				  sizeof(*ucsi->connector), GFP_KERNEL);
+	if (!ucsi->connector) {
+		ret = -ENOMEM;
+		goto err_reset;
+	}
+
+	/* Register all connectors */
+	for (i = 0; i < ucsi->cap.num_connectors; i++) {
+		ret = ucsi_register_port(ucsi, i);
+		if (ret)
+			goto err_unregister;
+	}
+
+	/* Enable all notifications */
+	UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL);
+	ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+	if (ret < 0)
+		goto err_unregister;
+
+	mutex_unlock(&ucsi->ppm_lock);
+
+	return;
+
+err_unregister:
+	for (con = ucsi->connector; con->port; con++) {
+		ucsi_unregister_partner(con);
+		typec_unregister_port(con->port);
+		con->port = NULL;
+	}
+
+err_reset:
+	ucsi_reset_ppm(ucsi);
+err:
+	mutex_unlock(&ucsi->ppm_lock);
+	dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
+}
+
+/**
+ * ucsi_register_ppm - Register UCSI PPM Interface
+ * @dev: Device interface to the PPM
+ * @ppm: The PPM interface
+ *
+ * Allocates UCSI instance, associates it with @ppm and returns it to the
+ * caller, and schedules initialization of the interface.
+ */
+struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm)
+{
+	struct ucsi *ucsi;
+
+	ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
+	if (!ucsi)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_WORK(&ucsi->work, ucsi_init);
+	init_completion(&ucsi->complete);
+	mutex_init(&ucsi->ppm_lock);
+
+	ucsi->dev = dev;
+	ucsi->ppm = ppm;
+
+	/*
+	 * Communication with the PPM takes a lot of time. It is not reasonable
+	 * to initialize the driver here. Using a work for now.
+	 */
+	queue_work(system_long_wq, &ucsi->work);
+
+	return ucsi;
+}
+EXPORT_SYMBOL_GPL(ucsi_register_ppm);
+
+/**
+ * ucsi_unregister_ppm - Unregister UCSI PPM Interface
+ * @ucsi: struct ucsi associated with the PPM
+ *
+ * Unregister UCSI PPM that was created with ucsi_register().
+ */
+void ucsi_unregister_ppm(struct ucsi *ucsi)
+{
+	struct ucsi_control ctrl;
+	int i;
+
+	/* Make sure that we are not in the middle of driver initialization */
+	cancel_work_sync(&ucsi->work);
+
+	mutex_lock(&ucsi->ppm_lock);
+
+	/* Disable everything except command complete notification */
+	UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE)
+	ucsi_run_command(ucsi, &ctrl, NULL, 0);
+
+	mutex_unlock(&ucsi->ppm_lock);
+
+	for (i = 0; i < ucsi->cap.num_connectors; i++) {
+		cancel_work_sync(&ucsi->connector[i].work);
+		ucsi_unregister_partner(&ucsi->connector[i]);
+		typec_unregister_port(ucsi->connector[i].port);
+	}
+
+	ucsi_reset_ppm(ucsi);
+
+	kfree(ucsi->connector);
+	kfree(ucsi);
+}
+EXPORT_SYMBOL_GPL(ucsi_unregister_ppm);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("USB Type-C Connector System Software Interface driver");
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
new file mode 100644
index 0000000..6b0d2f0
--- /dev/null
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -0,0 +1,335 @@
+
+#ifndef __DRIVER_USB_TYPEC_UCSI_H
+#define __DRIVER_USB_TYPEC_UCSI_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/* -------------------------------------------------------------------------- */
+
+/* Command Status and Connector Change Indication (CCI) data structure */
+struct ucsi_cci {
+	u8:1; /* reserved */
+	u8 connector_change:7;
+	u8 data_length;
+	u16:9; /* reserved */
+	u16 not_supported:1;
+	u16 cancel_complete:1;
+	u16 reset_complete:1;
+	u16 busy:1;
+	u16 ack_complete:1;
+	u16 error:1;
+	u16 cmd_complete:1;
+} __packed;
+
+/* Default fields in CONTROL data structure */
+struct ucsi_command {
+	u8 cmd;
+	u8 length;
+	u64 data:48;
+} __packed;
+
+/* ACK Command structure */
+struct ucsi_ack_cmd {
+	u8 cmd;
+	u8 length;
+	u8 cci_ack:1;
+	u8 cmd_ack:1;
+	u8:6; /* reserved */
+} __packed;
+
+/* Connector Reset Command structure */
+struct ucsi_con_rst {
+	u8 cmd;
+	u8 length;
+	u8 con_num:7;
+	u8 hard_reset:1;
+} __packed;
+
+/* Set USB Operation Mode Command structure */
+struct ucsi_uor_cmd {
+	u8 cmd;
+	u8 length;
+	u16 con_num:7;
+	u16 role:3;
+#define UCSI_UOR_ROLE_DFP			BIT(0)
+#define UCSI_UOR_ROLE_UFP			BIT(1)
+#define UCSI_UOR_ROLE_DRP			BIT(2)
+	u16:6; /* reserved */
+} __packed;
+
+struct ucsi_control {
+	union {
+		u64 raw_cmd;
+		struct ucsi_command cmd;
+		struct ucsi_uor_cmd uor;
+		struct ucsi_ack_cmd ack;
+		struct ucsi_con_rst con_rst;
+	};
+};
+
+#define __UCSI_CMD(_ctrl_, _cmd_)					\
+{									\
+	(_ctrl_).raw_cmd = 0;						\
+	(_ctrl_).cmd.cmd = _cmd_;					\
+}
+
+/* Helper for preparing ucsi_control for CONNECTOR_RESET command. */
+#define UCSI_CMD_CONNECTOR_RESET(_ctrl_, _con_, _hard_)			\
+{									\
+	__UCSI_CMD(_ctrl_, UCSI_CONNECTOR_RESET)			\
+	(_ctrl_).con_rst.con_num = (_con_)->num;			\
+	(_ctrl_).con_rst.hard_reset = _hard_;				\
+}
+
+/* Helper for preparing ucsi_control for ACK_CC_CI command. */
+#define UCSI_CMD_ACK(_ctrl_, _ack_)					\
+{									\
+	__UCSI_CMD(_ctrl_, UCSI_ACK_CC_CI)				\
+	(_ctrl_).ack.cci_ack = ((_ack_) == UCSI_ACK_EVENT);		\
+	(_ctrl_).ack.cmd_ack = ((_ack_) == UCSI_ACK_CMD);		\
+}
+
+/* Helper for preparing ucsi_control for SET_NOTIFY_ENABLE command. */
+#define UCSI_CMD_SET_NTFY_ENABLE(_ctrl_, _ntfys_)			\
+{									\
+	__UCSI_CMD(_ctrl_, UCSI_SET_NOTIFICATION_ENABLE)		\
+	(_ctrl_).cmd.data = _ntfys_;					\
+}
+
+/* Helper for preparing ucsi_control for GET_CAPABILITY command. */
+#define UCSI_CMD_GET_CAPABILITY(_ctrl_)					\
+{									\
+	__UCSI_CMD(_ctrl_, UCSI_GET_CAPABILITY)				\
+}
+
+/* Helper for preparing ucsi_control for GET_CONNECTOR_CAPABILITY command. */
+#define UCSI_CMD_GET_CONNECTOR_CAPABILITY(_ctrl_, _con_)		\
+{									\
+	__UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_CAPABILITY)		\
+	(_ctrl_).cmd.data = _con_;					\
+}
+
+/* Helper for preparing ucsi_control for GET_CONNECTOR_STATUS command. */
+#define UCSI_CMD_GET_CONNECTOR_STATUS(_ctrl_, _con_)			\
+{									\
+	__UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_STATUS)			\
+	(_ctrl_).cmd.data = _con_;					\
+}
+
+#define __UCSI_ROLE(_ctrl_, _cmd_, _con_num_)				\
+{									\
+	__UCSI_CMD(_ctrl_, _cmd_)					\
+	(_ctrl_).uor.con_num = _con_num_;				\
+	(_ctrl_).uor.role = UCSI_UOR_ROLE_DRP;				\
+}
+
+/* Helper for preparing ucsi_control for SET_UOR command. */
+#define UCSI_CMD_SET_UOR(_ctrl_, _con_, _role_)				\
+{									\
+	__UCSI_ROLE(_ctrl_, UCSI_SET_UOR, (_con_)->num)		\
+	(_ctrl_).uor.role |= (_role_) == TYPEC_HOST ? UCSI_UOR_ROLE_DFP : \
+			  UCSI_UOR_ROLE_UFP;				\
+}
+
+/* Helper for preparing ucsi_control for SET_PDR command. */
+#define UCSI_CMD_SET_PDR(_ctrl_, _con_, _role_)			\
+{									\
+	__UCSI_ROLE(_ctrl_, UCSI_SET_PDR, (_con_)->num)		\
+	(_ctrl_).uor.role |= (_role_) == TYPEC_SOURCE ? UCSI_UOR_ROLE_DFP : \
+			UCSI_UOR_ROLE_UFP;				\
+}
+
+/* Commands */
+#define UCSI_PPM_RESET			0x01
+#define UCSI_CANCEL			0x02
+#define UCSI_CONNECTOR_RESET		0x03
+#define UCSI_ACK_CC_CI			0x04
+#define UCSI_SET_NOTIFICATION_ENABLE	0x05
+#define UCSI_GET_CAPABILITY		0x06
+#define UCSI_GET_CONNECTOR_CAPABILITY	0x07
+#define UCSI_SET_UOM			0x08
+#define UCSI_SET_UOR			0x09
+#define UCSI_SET_PDM			0x0a
+#define UCSI_SET_PDR			0x0b
+#define UCSI_GET_ALTERNATE_MODES	0x0c
+#define UCSI_GET_CAM_SUPPORTED		0x0d
+#define UCSI_GET_CURRENT_CAM		0x0e
+#define UCSI_SET_NEW_CAM		0x0f
+#define UCSI_GET_PDOS			0x10
+#define UCSI_GET_CABLE_PROPERTY		0x11
+#define UCSI_GET_CONNECTOR_STATUS	0x12
+#define UCSI_GET_ERROR_STATUS		0x13
+
+/* ACK_CC_CI commands */
+#define UCSI_ACK_EVENT			1
+#define UCSI_ACK_CMD			2
+
+/* Bits for SET_NOTIFICATION_ENABLE command */
+#define UCSI_ENABLE_NTFY_CMD_COMPLETE		BIT(0)
+#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE	BIT(1)
+#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE	BIT(2)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE		BIT(5)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE	BIT(6)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE	BIT(7)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE		BIT(8)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE	BIT(9)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE		BIT(11)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE		BIT(12)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE	BIT(14)
+#define UCSI_ENABLE_NTFY_ERROR			BIT(15)
+#define UCSI_ENABLE_NTFY_ALL			0xdbe7
+
+/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
+#define UCSI_ERROR_UNREGONIZED_CMD		BIT(0)
+#define UCSI_ERROR_INVALID_CON_NUM		BIT(1)
+#define UCSI_ERROR_INVALID_CMD_ARGUMENT		BIT(2)
+#define UCSI_ERROR_INCOMPATIBLE_PARTNER		BIT(3)
+#define UCSI_ERROR_CC_COMMUNICATION_ERR		BIT(4)
+#define UCSI_ERROR_DEAD_BATTERY			BIT(5)
+#define UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL	BIT(6)
+
+/* Data structure filled by PPM in response to GET_CAPABILITY command. */
+struct ucsi_capability {
+	u32 attributes;
+#define UCSI_CAP_ATTR_DISABLE_STATE		BIT(0)
+#define UCSI_CAP_ATTR_BATTERY_CHARGING		BIT(1)
+#define UCSI_CAP_ATTR_USB_PD			BIT(2)
+#define UCSI_CAP_ATTR_TYPEC_CURRENT		BIT(6)
+#define UCSI_CAP_ATTR_POWER_AC_SUPPLY		BIT(8)
+#define UCSI_CAP_ATTR_POWER_OTHER		BIT(10)
+#define UCSI_CAP_ATTR_POWER_VBUS		BIT(14)
+	u32 num_connectors:8;
+	u32 features:24;
+#define UCSI_CAP_SET_UOM			BIT(0)
+#define UCSI_CAP_SET_PDM			BIT(1)
+#define UCSI_CAP_ALT_MODE_DETAILS		BIT(2)
+#define UCSI_CAP_ALT_MODE_OVERRIDE		BIT(3)
+#define UCSI_CAP_PDO_DETAILS			BIT(4)
+#define UCSI_CAP_CABLE_DETAILS			BIT(5)
+#define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS	BIT(6)
+#define UCSI_CAP_PD_RESET			BIT(7)
+	u8 num_alt_modes;
+	u8 reserved;
+	u16 bc_version;
+	u16 pd_version;
+	u16 typec_version;
+} __packed;
+
+/* Data structure filled by PPM in response to GET_CONNECTOR_CAPABILITY cmd. */
+struct ucsi_connector_capability {
+	u8 op_mode;
+#define UCSI_CONCAP_OPMODE_DFP			BIT(0)
+#define UCSI_CONCAP_OPMODE_UFP			BIT(1)
+#define UCSI_CONCAP_OPMODE_DRP			BIT(2)
+#define UCSI_CONCAP_OPMODE_AUDIO_ACCESSORY	BIT(3)
+#define UCSI_CONCAP_OPMODE_DEBUG_ACCESSORY	BIT(4)
+#define UCSI_CONCAP_OPMODE_USB2			BIT(5)
+#define UCSI_CONCAP_OPMODE_USB3			BIT(6)
+#define UCSI_CONCAP_OPMODE_ALT_MODE		BIT(7)
+	u8 provider:1;
+	u8 consumer:1;
+	u8:6; /* reserved */
+} __packed;
+
+struct ucsi_altmode {
+	u16 svid;
+	u32 mid;
+} __packed;
+
+/* Data structure filled by PPM in response to GET_CABLE_PROPERTY command. */
+struct ucsi_cable_property {
+	u16 speed_supported;
+	u8 current_capability;
+	u8 vbus_in_cable:1;
+	u8 active_cable:1;
+	u8 directionality:1;
+	u8 plug_type:2;
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A		0
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B		1
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C		2
+#define UCSI_CABLE_PROPERTY_PLUG_OTHER		3
+	u8 mode_support:1;
+	u8:2; /* reserved */
+	u8 latency:4;
+	u8:4; /* reserved */
+} __packed;
+
+/* Data structure filled by PPM in response to GET_CONNECTOR_STATUS command. */
+struct ucsi_connector_status {
+	u16 change;
+#define UCSI_CONSTAT_EXT_SUPPLY_CHANGE		BIT(1)
+#define UCSI_CONSTAT_POWER_OPMODE_CHANGE	BIT(2)
+#define UCSI_CONSTAT_PDOS_CHANGE		BIT(5)
+#define UCSI_CONSTAT_POWER_LEVEL_CHANGE		BIT(6)
+#define UCSI_CONSTAT_PD_RESET_COMPLETE		BIT(7)
+#define UCSI_CONSTAT_CAM_CHANGE			BIT(8)
+#define UCSI_CONSTAT_BC_CHANGE			BIT(9)
+#define UCSI_CONSTAT_PARTNER_CHANGE		BIT(11)
+#define UCSI_CONSTAT_POWER_DIR_CHANGE		BIT(12)
+#define UCSI_CONSTAT_CONNECT_CHANGE		BIT(14)
+#define UCSI_CONSTAT_ERROR			BIT(15)
+	u16 pwr_op_mode:3;
+#define UCSI_CONSTAT_PWR_OPMODE_NONE		0
+#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT		1
+#define UCSI_CONSTAT_PWR_OPMODE_BC		2
+#define UCSI_CONSTAT_PWR_OPMODE_PD		3
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5	4
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0	5
+	u16 connected:1;
+	u16 pwr_dir:1;
+	u16 partner_flags:8;
+#define UCSI_CONSTAT_PARTNER_FLAG_USB		BIT(0)
+#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE	BIT(1)
+	u16 partner_type:3;
+#define UCSI_CONSTAT_PARTNER_TYPE_DFP		1
+#define UCSI_CONSTAT_PARTNER_TYPE_UFP		2
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE		3 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP	4 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG		5
+#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO		6
+	u32 request_data_obj;
+	u8 bc_status:2;
+#define UCSI_CONSTAT_BC_NOT_CHARGING		0
+#define UCSI_CONSTAT_BC_NOMINAL_CHARGING	1
+#define UCSI_CONSTAT_BC_SLOW_CHARGING		2
+#define UCSI_CONSTAT_BC_TRICKLE_CHARGING	3
+	u8 provider_cap_limit_reason:4;
+#define UCSI_CONSTAT_CAP_PWR_LOWERED		0
+#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT	1
+	u8:2; /* reserved */
+} __packed;
+
+/* -------------------------------------------------------------------------- */
+
+struct ucsi;
+
+struct ucsi_data {
+	u16 version;
+	u16 reserved;
+	union {
+		u32 raw_cci;
+		struct ucsi_cci cci;
+	};
+	struct ucsi_control ctrl;
+	u32 message_in[4];
+	u32 message_out[4];
+} __packed;
+
+/*
+ * struct ucsi_ppm - Interface to UCSI Platform Policy Manager
+ * @data: memory location to the UCSI data structures
+ * @cmd: UCSI command execution routine
+ * @sync: Refresh UCSI mailbox (the data structures)
+ */
+struct ucsi_ppm {
+	struct ucsi_data *data;
+	int (*cmd)(struct ucsi_ppm *, struct ucsi_control *);
+	int (*sync)(struct ucsi_ppm *);
+};
+
+struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm);
+void ucsi_unregister_ppm(struct ucsi *ucsi);
+void ucsi_notify(struct ucsi *ucsi);
+
+#endif /* __DRIVER_USB_TYPEC_UCSI_H */
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
new file mode 100644
index 0000000..cabd476
--- /dev/null
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -0,0 +1,158 @@
+/*
+ * UCSI ACPI driver
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+
+#include "ucsi.h"
+
+#define UCSI_DSM_UUID		"6f8398c2-7ca4-11e4-ad36-631042b5008f"
+#define UCSI_DSM_FUNC_WRITE	1
+#define UCSI_DSM_FUNC_READ	2
+
+struct ucsi_acpi {
+	struct device *dev;
+	struct ucsi *ucsi;
+	struct ucsi_ppm ppm;
+	guid_t guid;
+};
+
+static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
+{
+	union acpi_object *obj;
+
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(ua->dev), &ua->guid, 1, func,
+				NULL);
+	if (!obj) {
+		dev_err(ua->dev, "%s: failed to evaluate _DSM %d\n",
+			__func__, func);
+		return -EIO;
+	}
+
+	ACPI_FREE(obj);
+	return 0;
+}
+
+static int ucsi_acpi_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+{
+	struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+
+	ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+
+	return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE);
+}
+
+static int ucsi_acpi_sync(struct ucsi_ppm *ppm)
+{
+	struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+
+	return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+}
+
+static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct ucsi_acpi *ua = data;
+
+	ucsi_notify(ua->ucsi);
+}
+
+static int ucsi_acpi_probe(struct platform_device *pdev)
+{
+	struct ucsi_acpi *ua;
+	struct resource *res;
+	acpi_status status;
+	int ret;
+
+	ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL);
+	if (!ua)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "missing memory resource\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * NOTE: The memory region for the data structures is used also in an
+	 * operation region, which means ACPI has already reserved it. Therefore
+	 * it can not be requested here, and we can not use
+	 * devm_ioremap_resource().
+	 */
+	ua->ppm.data = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!ua->ppm.data)
+		return -ENOMEM;
+
+	if (!ua->ppm.data->version)
+		return -ENODEV;
+
+	ret = guid_parse(UCSI_DSM_UUID, &ua->guid);
+	if (ret)
+		return ret;
+
+	ua->ppm.cmd = ucsi_acpi_cmd;
+	ua->ppm.sync = ucsi_acpi_sync;
+	ua->dev = &pdev->dev;
+
+	status = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
+					     ACPI_DEVICE_NOTIFY,
+					     ucsi_acpi_notify, ua);
+	if (ACPI_FAILURE(status)) {
+		dev_err(&pdev->dev, "failed to install notify handler\n");
+		return -ENODEV;
+	}
+
+	ua->ucsi = ucsi_register_ppm(&pdev->dev, &ua->ppm);
+	if (IS_ERR(ua->ucsi)) {
+		acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
+					   ACPI_DEVICE_NOTIFY,
+					   ucsi_acpi_notify);
+		return PTR_ERR(ua->ucsi);
+	}
+
+	platform_set_drvdata(pdev, ua);
+
+	return 0;
+}
+
+static int ucsi_acpi_remove(struct platform_device *pdev)
+{
+	struct ucsi_acpi *ua = platform_get_drvdata(pdev);
+
+	ucsi_unregister_ppm(ua->ucsi);
+
+	acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), ACPI_DEVICE_NOTIFY,
+				   ucsi_acpi_notify);
+
+	return 0;
+}
+
+static const struct acpi_device_id ucsi_acpi_match[] = {
+	{ "PNP0CA0", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, ucsi_acpi_match);
+
+static struct platform_driver ucsi_acpi_platform_driver = {
+	.driver = {
+		.name = "ucsi_acpi",
+		.acpi_match_table = ACPI_PTR(ucsi_acpi_match),
+	},
+	.probe = ucsi_acpi_probe,
+	.remove = ucsi_acpi_remove,
+};
+
+module_platform_driver(ucsi_acpi_platform_driver);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("UCSI ACPI driver");
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 44ab43f..660180a 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -134,7 +134,7 @@ int del_match_busid(char *busid)
 	return ret;
 }
 
-static ssize_t show_match_busid(struct device_driver *drv, char *buf)
+static ssize_t match_busid_show(struct device_driver *drv, char *buf)
 {
 	int i;
 	char *out = buf;
@@ -149,7 +149,7 @@ static ssize_t show_match_busid(struct device_driver *drv, char *buf)
 	return out - buf;
 }
 
-static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
+static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
 				 size_t count)
 {
 	int len;
@@ -181,8 +181,7 @@ static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
 
 	return -EINVAL;
 }
-static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
-		   store_match_busid);
+static DRIVER_ATTR_RW(match_busid);
 
 static ssize_t rebind_store(struct device_driver *dev, const char *buf,
 				 size_t count)
@@ -262,7 +261,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
 		kmem_cache_free(stub_priv_cache, priv);
 
 		kfree(urb->transfer_buffer);
+		urb->transfer_buffer = NULL;
+
 		kfree(urb->setup_packet);
+		urb->setup_packet = NULL;
+
 		usb_free_urb(urb);
 	}
 }
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 6b1e8c3..be50cef 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv)
 	struct urb *urb = priv->urb;
 
 	kfree(urb->setup_packet);
+	urb->setup_packet = NULL;
+
 	kfree(urb->transfer_buffer);
+	urb->transfer_buffer = NULL;
+
 	list_del(&priv->list);
 	kmem_cache_free(stub_priv_cache, priv);
 	usb_free_urb(urb);
diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
index 88b71c4..5cfb59e 100644
--- a/drivers/usb/usbip/vhci.h
+++ b/drivers/usb/usbip/vhci.h
@@ -72,6 +72,11 @@ struct vhci_unlink {
 	unsigned long unlink_seqnum;
 };
 
+enum hub_speed {
+	HUB_SPEED_HIGH = 0,
+	HUB_SPEED_SUPER,
+};
+
 /* Number of supported ports. Value has an upperbound of USB_MAXCHILDREN */
 #ifdef CONFIG_USBIP_VHCI_HC_PORTS
 #define VHCI_HC_PORTS CONFIG_USBIP_VHCI_HC_PORTS
@@ -79,6 +84,9 @@ struct vhci_unlink {
 #define VHCI_HC_PORTS 8
 #endif
 
+/* Each VHCI has 2 hubs (USB2 and USB3), each has VHCI_HC_PORTS ports */
+#define VHCI_PORTS	(VHCI_HC_PORTS*2)
+
 #ifdef CONFIG_USBIP_VHCI_NR_HCS
 #define VHCI_NR_HCS CONFIG_USBIP_VHCI_NR_HCS
 #else
@@ -87,10 +95,19 @@ struct vhci_unlink {
 
 #define MAX_STATUS_NAME 16
 
-/* for usb_bus.hcpriv */
-struct vhci_hcd {
+struct vhci {
 	spinlock_t lock;
 
+	struct platform_device *pdev;
+
+	struct vhci_hcd *vhci_hcd_hs;
+	struct vhci_hcd *vhci_hcd_ss;
+};
+
+/* for usb_hcd.hcd_priv[0] */
+struct vhci_hcd {
+	struct vhci *vhci;
+
 	u32 port_status[VHCI_HC_PORTS];
 
 	unsigned resuming:1;
@@ -107,7 +124,7 @@ struct vhci_hcd {
 };
 
 extern int vhci_num_controllers;
-extern struct platform_device **vhci_pdevs;
+extern struct vhci *vhcis;
 extern struct attribute_group vhci_attr_group;
 
 /* vhci_hcd.c */
@@ -131,10 +148,10 @@ static inline __u32 port_to_rhport(__u32 port)
 
 static inline int port_to_pdev_nr(__u32 port)
 {
-	return port / VHCI_HC_PORTS;
+	return port / VHCI_PORTS;
 }
 
-static inline struct vhci_hcd *hcd_to_vhci(struct usb_hcd *hcd)
+static inline struct vhci_hcd *hcd_to_vhci_hcd(struct usb_hcd *hcd)
 {
 	return (struct vhci_hcd *) (hcd->hcd_priv);
 }
@@ -149,15 +166,14 @@ static inline const char *hcd_name(struct usb_hcd *hcd)
 	return (hcd)->self.bus_name;
 }
 
-static inline struct usb_hcd *vhci_to_hcd(struct vhci_hcd *vhci)
+static inline struct usb_hcd *vhci_hcd_to_hcd(struct vhci_hcd *vhci_hcd)
 {
-	return container_of((void *) vhci, struct usb_hcd, hcd_priv);
+	return container_of((void *) vhci_hcd, struct usb_hcd, hcd_priv);
 }
 
-static inline struct vhci_hcd *vdev_to_vhci(struct vhci_device *vdev)
+static inline struct vhci_hcd *vdev_to_vhci_hcd(struct vhci_device *vdev)
 {
-	return container_of(
-			(void *)(vdev - vdev->rhport), struct vhci_hcd, vdev);
+	return container_of((void *)(vdev - vdev->rhport), struct vhci_hcd, vdev);
 }
 
 #endif /* __USBIP_VHCI_H */
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 0585078..2c4b2fd 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -58,8 +58,7 @@ static const char driver_name[] = "vhci_hcd";
 static const char driver_desc[] = "USB/IP Virtual Host Controller";
 
 int vhci_num_controllers = VHCI_NR_HCS;
-
-struct platform_device **vhci_pdevs;
+struct vhci *vhcis;
 
 static const char * const bit_desc[] = {
 	"CONNECTION",		/*0*/
@@ -67,7 +66,7 @@ static const char * const bit_desc[] = {
 	"SUSPEND",		/*2*/
 	"OVER_CURRENT",		/*3*/
 	"RESET",		/*4*/
-	"R5",			/*5*/
+	"L1",			/*5*/
 	"R6",			/*6*/
 	"R7",			/*7*/
 	"POWER",		/*8*/
@@ -83,7 +82,7 @@ static const char * const bit_desc[] = {
 	"C_SUSPEND",		/*18*/
 	"C_OVER_CURRENT",	/*19*/
 	"C_RESET",		/*20*/
-	"R21",			/*21*/
+	"C_L1",			/*21*/
 	"R22",			/*22*/
 	"R23",			/*23*/
 	"R24",			/*24*/
@@ -96,10 +95,49 @@ static const char * const bit_desc[] = {
 	"R31",			/*31*/
 };
 
-static void dump_port_status_diff(u32 prev_status, u32 new_status)
+static const char * const bit_desc_ss[] = {
+	"CONNECTION",		/*0*/
+	"ENABLE",		/*1*/
+	"SUSPEND",		/*2*/
+	"OVER_CURRENT",		/*3*/
+	"RESET",		/*4*/
+	"L1",			/*5*/
+	"R6",			/*6*/
+	"R7",			/*7*/
+	"R8",			/*8*/
+	"POWER",		/*9*/
+	"HIGHSPEED",		/*10*/
+	"PORT_TEST",		/*11*/
+	"INDICATOR",		/*12*/
+	"R13",			/*13*/
+	"R14",			/*14*/
+	"R15",			/*15*/
+	"C_CONNECTION",		/*16*/
+	"C_ENABLE",		/*17*/
+	"C_SUSPEND",		/*18*/
+	"C_OVER_CURRENT",	/*19*/
+	"C_RESET",		/*20*/
+	"C_BH_RESET",		/*21*/
+	"C_LINK_STATE",		/*22*/
+	"C_CONFIG_ERROR",	/*23*/
+	"R24",			/*24*/
+	"R25",			/*25*/
+	"R26",			/*26*/
+	"R27",			/*27*/
+	"R28",			/*28*/
+	"R29",			/*29*/
+	"R30",			/*30*/
+	"R31",			/*31*/
+};
+
+static void dump_port_status_diff(u32 prev_status, u32 new_status, bool usb3)
 {
 	int i = 0;
 	u32 bit = 1;
+	const char * const *desc = bit_desc;
+
+	if (usb3)
+		desc = bit_desc_ss;
 
 	pr_debug("status prev -> new: %08x -> %08x\n", prev_status, new_status);
 	while (bit) {
@@ -114,8 +152,12 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
 		else
 			change = ' ';
 
-		if (prev || new)
-			pr_debug(" %c%s\n", change, bit_desc[i]);
+		if (prev || new) {
+			pr_debug(" %c%s\n", change, desc[i]);
+
+			if (bit == 1) /* USB_PORT_STAT_CONNECTION */
+				pr_debug(" %c%s\n", change, "USB_PORT_STAT_SPEED_5GBPS");
+		}
 		bit <<= 1;
 		i++;
 	}
@@ -124,7 +166,8 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
 
 void rh_port_connect(struct vhci_device *vdev, enum usb_device_speed speed)
 {
-	struct vhci_hcd	*vhci = vdev_to_vhci(vdev);
+	struct vhci_hcd	*vhci_hcd = vdev_to_vhci_hcd(vdev);
+	struct vhci *vhci = vhci_hcd->vhci;
 	int		rhport = vdev->rhport;
 	u32		status;
 	unsigned long	flags;
@@ -133,7 +176,7 @@ void rh_port_connect(struct vhci_device *vdev, enum usb_device_speed speed)
 
 	spin_lock_irqsave(&vhci->lock, flags);
 
-	status = vhci->port_status[rhport];
+	status = vhci_hcd->port_status[rhport];
 
 	status |= USB_PORT_STAT_CONNECTION | (1 << USB_PORT_FEAT_C_CONNECTION);
 
@@ -148,16 +191,17 @@ void rh_port_connect(struct vhci_device *vdev, enum usb_device_speed speed)
 		break;
 	}
 
-	vhci->port_status[rhport] = status;
+	vhci_hcd->port_status[rhport] = status;
 
 	spin_unlock_irqrestore(&vhci->lock, flags);
 
-	usb_hcd_poll_rh_status(vhci_to_hcd(vhci));
+	usb_hcd_poll_rh_status(vhci_hcd_to_hcd(vhci_hcd));
 }
 
 static void rh_port_disconnect(struct vhci_device *vdev)
 {
-	struct vhci_hcd	*vhci = vdev_to_vhci(vdev);
+	struct vhci_hcd	*vhci_hcd = vdev_to_vhci_hcd(vdev);
+	struct vhci *vhci = vhci_hcd->vhci;
 	int		rhport = vdev->rhport;
 	u32		status;
 	unsigned long	flags;
@@ -166,15 +210,15 @@ static void rh_port_disconnect(struct vhci_device *vdev)
 
 	spin_lock_irqsave(&vhci->lock, flags);
 
-	status = vhci->port_status[rhport];
+	status = vhci_hcd->port_status[rhport];
 
 	status &= ~USB_PORT_STAT_CONNECTION;
 	status |= (1 << USB_PORT_FEAT_C_CONNECTION);
 
-	vhci->port_status[rhport] = status;
+	vhci_hcd->port_status[rhport] = status;
 
 	spin_unlock_irqrestore(&vhci->lock, flags);
-	usb_hcd_poll_rh_status(vhci_to_hcd(vhci));
+	usb_hcd_poll_rh_status(vhci_hcd_to_hcd(vhci_hcd));
 }
 
 #define PORT_C_MASK				\
@@ -197,17 +241,15 @@ static void rh_port_disconnect(struct vhci_device *vdev)
  */
 static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
 {
-	struct vhci_hcd	*vhci;
-	int		retval;
+	struct vhci_hcd	*vhci_hcd = hcd_to_vhci_hcd(hcd);
+	struct vhci *vhci = vhci_hcd->vhci;
+	int		retval = DIV_ROUND_UP(VHCI_HC_PORTS + 1, 8);
 	int		rhport;
 	int		changed = 0;
 	unsigned long	flags;
 
-	retval = DIV_ROUND_UP(VHCI_HC_PORTS + 1, 8);
 	memset(buf, 0, retval);
 
-	vhci = hcd_to_vhci(hcd);
-
 	spin_lock_irqsave(&vhci->lock, flags);
 	if (!HCD_HW_ACCESSIBLE(hcd)) {
 		usbip_dbg_vhci_rh("hw accessible flag not on?\n");
@@ -216,7 +258,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
 
 	/* check pseudo status register for each port */
 	for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
-		if ((vhci->port_status[rhport] & PORT_C_MASK)) {
+		if ((vhci_hcd->port_status[rhport] & PORT_C_MASK)) {
 			/* The status of a port has been changed, */
 			usbip_dbg_vhci_rh("port %d status changed\n", rhport);
 
@@ -233,6 +275,40 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
 	return changed ? retval : 0;
 }
 
+/* usb 3.0 root hub device descriptor */
+static struct {
+	struct usb_bos_descriptor bos;
+	struct usb_ss_cap_descriptor ss_cap;
+} __packed usb3_bos_desc = {
+
+	.bos = {
+		.bLength		= USB_DT_BOS_SIZE,
+		.bDescriptorType	= USB_DT_BOS,
+		.wTotalLength		= cpu_to_le16(sizeof(usb3_bos_desc)),
+		.bNumDeviceCaps		= 1,
+	},
+	.ss_cap = {
+		.bLength		= USB_DT_USB_SS_CAP_SIZE,
+		.bDescriptorType	= USB_DT_DEVICE_CAPABILITY,
+		.bDevCapabilityType	= USB_SS_CAP_TYPE,
+		.wSpeedSupported	= cpu_to_le16(USB_5GBPS_OPERATION),
+		.bFunctionalitySupport	= ilog2(USB_5GBPS_OPERATION),
+	},
+};
+
+static inline void
+ss_hub_descriptor(struct usb_hub_descriptor *desc)
+{
+	memset(desc, 0, sizeof *desc);
+	desc->bDescriptorType = USB_DT_SS_HUB;
+	desc->bDescLength = 12;
+	desc->wHubCharacteristics = cpu_to_le16(
+		HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
+	desc->bNbrPorts = VHCI_HC_PORTS;
+	desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
+	desc->u.ss.DeviceRemovable = 0xffff;
+}
+
 static inline void hub_descriptor(struct usb_hub_descriptor *desc)
 {
 	int width;
@@ -253,7 +329,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
 static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 			    u16 wIndex, char *buf, u16 wLength)
 {
-	struct vhci_hcd	*dum;
+	struct vhci_hcd	*vhci_hcd;
+	struct vhci	*vhci;
 	int             retval = 0;
 	int		rhport;
 	unsigned long	flags;
@@ -265,21 +342,24 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 
 	/*
 	 * NOTE:
-	 * wIndex shows the port number and begins from 1.
+	 * wIndex (bits 0-7) shows the port number and begins from 1?
 	 */
+	wIndex = ((__u8)(wIndex & 0x00ff));
 	usbip_dbg_vhci_rh("typeReq %x wValue %x wIndex %x\n", typeReq, wValue,
 			  wIndex);
+
 	if (wIndex > VHCI_HC_PORTS)
 		pr_err("invalid port number %d\n", wIndex);
-	rhport = ((__u8)(wIndex & 0x00ff)) - 1;
+	rhport = wIndex - 1;
 
-	dum = hcd_to_vhci(hcd);
+	vhci_hcd = hcd_to_vhci_hcd(hcd);
+	vhci = vhci_hcd->vhci;
 
-	spin_lock_irqsave(&dum->lock, flags);
+	spin_lock_irqsave(&vhci->lock, flags);
 
 	/* store old status and compare now and old later */
 	if (usbip_dbg_flag_vhci_rh) {
-		memcpy(prev_port_status, dum->port_status,
+		memcpy(prev_port_status, vhci_hcd->port_status,
 			sizeof(prev_port_status));
 	}
 
@@ -290,45 +370,56 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 	case ClearPortFeature:
 		switch (wValue) {
 		case USB_PORT_FEAT_SUSPEND:
-			if (dum->port_status[rhport] & USB_PORT_STAT_SUSPEND) {
+			if (hcd->speed == HCD_USB3) {
+				pr_err(" ClearPortFeature: USB_PORT_FEAT_SUSPEND req not "
+				       "supported for USB 3.0 roothub\n");
+				goto error;
+			}
+			usbip_dbg_vhci_rh(
+				" ClearPortFeature: USB_PORT_FEAT_SUSPEND\n");
+			if (vhci_hcd->port_status[rhport] & USB_PORT_STAT_SUSPEND) {
 				/* 20msec signaling */
-				dum->resuming = 1;
-				dum->re_timeout =
-					jiffies + msecs_to_jiffies(20);
+				vhci_hcd->resuming = 1;
+				vhci_hcd->re_timeout = jiffies + msecs_to_jiffies(20);
 			}
 			break;
 		case USB_PORT_FEAT_POWER:
 			usbip_dbg_vhci_rh(
 				" ClearPortFeature: USB_PORT_FEAT_POWER\n");
-			dum->port_status[rhport] = 0;
-			dum->resuming = 0;
-			break;
-		case USB_PORT_FEAT_C_RESET:
-			usbip_dbg_vhci_rh(
-				" ClearPortFeature: USB_PORT_FEAT_C_RESET\n");
-			switch (dum->vdev[rhport].speed) {
-			case USB_SPEED_HIGH:
-				dum->port_status[rhport] |=
-					USB_PORT_STAT_HIGH_SPEED;
-				break;
-			case USB_SPEED_LOW:
-				dum->port_status[rhport] |=
-					USB_PORT_STAT_LOW_SPEED;
-				break;
-			default:
-				break;
-			}
+			if (hcd->speed == HCD_USB3)
+				vhci_hcd->port_status[rhport] &= ~USB_SS_PORT_STAT_POWER;
+			else
+				vhci_hcd->port_status[rhport] &= ~USB_PORT_STAT_POWER;
 			break;
 		default:
 			usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
 					  wValue);
-			dum->port_status[rhport] &= ~(1 << wValue);
+			vhci_hcd->port_status[rhport] &= ~(1 << wValue);
 			break;
 		}
 		break;
 	case GetHubDescriptor:
 		usbip_dbg_vhci_rh(" GetHubDescriptor\n");
-		hub_descriptor((struct usb_hub_descriptor *) buf);
+		if (hcd->speed == HCD_USB3 &&
+				(wLength < USB_DT_SS_HUB_SIZE ||
+				 wValue != (USB_DT_SS_HUB << 8))) {
+			pr_err("Wrong hub descriptor type for USB 3.0 roothub.\n");
+			goto error;
+		}
+		if (hcd->speed == HCD_USB3)
+			ss_hub_descriptor((struct usb_hub_descriptor *) buf);
+		else
+			hub_descriptor((struct usb_hub_descriptor *) buf);
+		break;
+	case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
+		if (hcd->speed != HCD_USB3)
+			goto error;
+
+		if ((wValue >> 8) != USB_DT_BOS)
+			goto error;
+
+		memcpy(buf, &usb3_bos_desc, sizeof(usb3_bos_desc));
+		retval = sizeof(usb3_bos_desc);
 		break;
 	case GetHubStatus:
 		usbip_dbg_vhci_rh(" GetHubStatus\n");
@@ -336,7 +427,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		break;
 	case GetPortStatus:
 		usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
-		if (wIndex > VHCI_HC_PORTS || wIndex < 1) {
+		if (wIndex < 1) {
 			pr_err("invalid port number %d\n", wIndex);
 			retval = -EPIPE;
 		}
@@ -346,36 +437,48 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		/* whoever resets or resumes must GetPortStatus to
 		 * complete it!!
 		 */
-		if (dum->resuming && time_after(jiffies, dum->re_timeout)) {
-			dum->port_status[rhport] |=
-				(1 << USB_PORT_FEAT_C_SUSPEND);
-			dum->port_status[rhport] &=
-				~(1 << USB_PORT_FEAT_SUSPEND);
-			dum->resuming = 0;
-			dum->re_timeout = 0;
+		if (vhci_hcd->resuming && time_after(jiffies, vhci_hcd->re_timeout)) {
+			vhci_hcd->port_status[rhport] |= (1 << USB_PORT_FEAT_C_SUSPEND);
+			vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_SUSPEND);
+			vhci_hcd->resuming = 0;
+			vhci_hcd->re_timeout = 0;
 		}
 
-		if ((dum->port_status[rhport] & (1 << USB_PORT_FEAT_RESET)) !=
-		    0 && time_after(jiffies, dum->re_timeout)) {
-			dum->port_status[rhport] |=
-				(1 << USB_PORT_FEAT_C_RESET);
-			dum->port_status[rhport] &=
-				~(1 << USB_PORT_FEAT_RESET);
-			dum->re_timeout = 0;
+		if ((vhci_hcd->port_status[rhport] & (1 << USB_PORT_FEAT_RESET)) !=
+		    0 && time_after(jiffies, vhci_hcd->re_timeout)) {
+			vhci_hcd->port_status[rhport] |= (1 << USB_PORT_FEAT_C_RESET);
+			vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_RESET);
+			vhci_hcd->re_timeout = 0;
 
-			if (dum->vdev[rhport].ud.status ==
+			if (vhci_hcd->vdev[rhport].ud.status ==
 			    VDEV_ST_NOTASSIGNED) {
 				usbip_dbg_vhci_rh(
 					" enable rhport %d (status %u)\n",
 					rhport,
-					dum->vdev[rhport].ud.status);
-				dum->port_status[rhport] |=
+					vhci_hcd->vdev[rhport].ud.status);
+				vhci_hcd->port_status[rhport] |=
 					USB_PORT_STAT_ENABLE;
 			}
+
+			if (hcd->speed < HCD_USB3) {
+				switch (vhci_hcd->vdev[rhport].speed) {
+				case USB_SPEED_HIGH:
+					vhci_hcd->port_status[rhport] |=
+					      USB_PORT_STAT_HIGH_SPEED;
+					break;
+				case USB_SPEED_LOW:
+					vhci_hcd->port_status[rhport] |=
+						USB_PORT_STAT_LOW_SPEED;
+					break;
+				default:
+					pr_err("vhci_device speed not set\n");
+					break;
+				}
+			}
 		}
-		((__le16 *) buf)[0] = cpu_to_le16(dum->port_status[rhport]);
+		((__le16 *) buf)[0] = cpu_to_le16(vhci_hcd->port_status[rhport]);
 		((__le16 *) buf)[1] =
-			cpu_to_le16(dum->port_status[rhport] >> 16);
+			cpu_to_le16(vhci_hcd->port_status[rhport] >> 16);
 
 		usbip_dbg_vhci_rh(" GetPortStatus bye %x %x\n", ((u16 *)buf)[0],
 				  ((u16 *)buf)[1]);
@@ -386,36 +489,119 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		break;
 	case SetPortFeature:
 		switch (wValue) {
+		case USB_PORT_FEAT_LINK_STATE:
+			usbip_dbg_vhci_rh(
+				" SetPortFeature: USB_PORT_FEAT_LINK_STATE\n");
+			if (hcd->speed != HCD_USB3) {
+				pr_err("USB_PORT_FEAT_LINK_STATE req not "
+				       "supported for USB 2.0 roothub\n");
+				goto error;
+			}
+			/*
+			 * Since this is dummy we don't have an actual link so
+			 * there is nothing to do for the SET_LINK_STATE cmd
+			 */
+			break;
+		case USB_PORT_FEAT_U1_TIMEOUT:
+			usbip_dbg_vhci_rh(
+				" SetPortFeature: USB_PORT_FEAT_U1_TIMEOUT\n");
+		case USB_PORT_FEAT_U2_TIMEOUT:
+			usbip_dbg_vhci_rh(
+				" SetPortFeature: USB_PORT_FEAT_U2_TIMEOUT\n");
+			/* TODO: add suspend/resume support! */
+			if (hcd->speed != HCD_USB3) {
+				pr_err("USB_PORT_FEAT_U1/2_TIMEOUT req not "
+				       "supported for USB 2.0 roothub\n");
+				goto error;
+			}
+			break;
 		case USB_PORT_FEAT_SUSPEND:
 			usbip_dbg_vhci_rh(
 				" SetPortFeature: USB_PORT_FEAT_SUSPEND\n");
+			/* Applicable only for USB2.0 hub */
+			if (hcd->speed == HCD_USB3) {
+				pr_err("USB_PORT_FEAT_SUSPEND req not "
+				       "supported for USB 3.0 roothub\n");
+				goto error;
+			}
+
+			vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
 			break;
+		case USB_PORT_FEAT_POWER:
+			usbip_dbg_vhci_rh(
+				" SetPortFeature: USB_PORT_FEAT_POWER\n");
+			if (hcd->speed == HCD_USB3)
+				vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
+			else
+				vhci_hcd->port_status[rhport] |= USB_PORT_STAT_POWER;
+			break;
+		case USB_PORT_FEAT_BH_PORT_RESET:
+			usbip_dbg_vhci_rh(
+				" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
+			/* Applicable only for USB3.0 hub */
+			if (hcd->speed != HCD_USB3) {
+				pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
+				       "supported for USB 2.0 roothub\n");
+				goto error;
+			}
+			/* FALLS THROUGH */
 		case USB_PORT_FEAT_RESET:
 			usbip_dbg_vhci_rh(
 				" SetPortFeature: USB_PORT_FEAT_RESET\n");
-			/* if it's already running, disconnect first */
-			if (dum->port_status[rhport] & USB_PORT_STAT_ENABLE) {
-				dum->port_status[rhport] &=
-					~(USB_PORT_STAT_ENABLE |
-					  USB_PORT_STAT_LOW_SPEED |
-					  USB_PORT_STAT_HIGH_SPEED);
-				/* FIXME test that code path! */
+			/* if it's already enabled, disable */
+			if (hcd->speed == HCD_USB3) {
+				vhci_hcd->port_status[rhport] = 0;
+				vhci_hcd->port_status[rhport] =
+					(USB_SS_PORT_STAT_POWER |
+					 USB_PORT_STAT_CONNECTION |
+					 USB_PORT_STAT_RESET);
+			} else if (vhci_hcd->port_status[rhport] & USB_PORT_STAT_ENABLE) {
+				vhci_hcd->port_status[rhport] &= ~(USB_PORT_STAT_ENABLE
+					| USB_PORT_STAT_LOW_SPEED
+					| USB_PORT_STAT_HIGH_SPEED);
 			}
-			/* 50msec reset signaling */
-			dum->re_timeout = jiffies + msecs_to_jiffies(50);
 
-			/* FALLTHROUGH */
+			/* 50msec reset signaling */
+			vhci_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
+
+			/* FALLS THROUGH */
 		default:
 			usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
 					  wValue);
-			dum->port_status[rhport] |= (1 << wValue);
-			break;
+			if (hcd->speed == HCD_USB3) {
+				if ((vhci_hcd->port_status[rhport] &
+				     USB_SS_PORT_STAT_POWER) != 0) {
+					vhci_hcd->port_status[rhport] |= (1 << wValue);
+				}
+			} else
+				if ((vhci_hcd->port_status[rhport] &
+				     USB_PORT_STAT_POWER) != 0) {
+					vhci_hcd->port_status[rhport] |= (1 << wValue);
+				}
 		}
 		break;
-
+	case GetPortErrorCount:
+		usbip_dbg_vhci_rh(" GetPortErrorCount\n");
+		if (hcd->speed != HCD_USB3) {
+			pr_err("GetPortErrorCount req not "
+			       "supported for USB 2.0 roothub\n");
+			goto error;
+		}
+		/* We'll always return 0 since this is a dummy hub */
+		*(__le32 *) buf = cpu_to_le32(0);
+		break;
+	case SetHubDepth:
+		usbip_dbg_vhci_rh(" SetHubDepth\n");
+		if (hcd->speed != HCD_USB3) {
+			pr_err("SetHubDepth req not supported for "
+			       "USB 2.0 roothub\n");
+			goto error;
+		}
+		break;
 	default:
-		pr_err("default: no such request\n");
-
+		pr_err("default hub control req: %04x v%04x i%04x l%d\n",
+			typeReq, wValue, wIndex, wLength);
+error:
 		/* "protocol stall" on error */
 		retval = -EPIPE;
 	}
@@ -425,12 +611,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		/* Only dump valid port status */
 		if (rhport >= 0) {
 			dump_port_status_diff(prev_port_status[rhport],
-					      dum->port_status[rhport]);
+					      vhci_hcd->port_status[rhport],
+					      hcd->speed == HCD_USB3);
 		}
 	}
 	usbip_dbg_vhci_rh(" bye\n");
 
-	spin_unlock_irqrestore(&dum->lock, flags);
+	spin_unlock_irqrestore(&vhci->lock, flags);
+
+	if ((vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0)
+		usb_hcd_poll_rh_status(hcd);
 
 	return retval;
 }
@@ -438,14 +628,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 static void vhci_tx_urb(struct urb *urb, struct vhci_device *vdev)
 {
 	struct vhci_priv *priv;
-	struct vhci_hcd *vhci;
+	struct vhci_hcd *vhci_hcd;
 	unsigned long flags;
 
 	if (!vdev) {
 		pr_err("could not get virtual device");
 		return;
 	}
-	vhci = vdev_to_vhci(vdev);
+	vhci_hcd = vdev_to_vhci_hcd(vdev);
 
 	priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC);
 	if (!priv) {
@@ -455,7 +645,7 @@ static void vhci_tx_urb(struct urb *urb, struct vhci_device *vdev)
 
 	spin_lock_irqsave(&vdev->priv_lock, flags);
 
-	priv->seqnum = atomic_inc_return(&vhci->seqnum);
+	priv->seqnum = atomic_inc_return(&vhci_hcd->seqnum);
 	if (priv->seqnum == 0xffff)
 		dev_info(&urb->dev->dev, "seqnum max\n");
 
@@ -470,10 +660,10 @@ static void vhci_tx_urb(struct urb *urb, struct vhci_device *vdev)
 	spin_unlock_irqrestore(&vdev->priv_lock, flags);
 }
 
-static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
-			    gfp_t mem_flags)
+static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
 {
-	struct vhci_hcd *vhci = hcd_to_vhci(hcd);
+	struct vhci_hcd *vhci_hcd = hcd_to_vhci_hcd(hcd);
+	struct vhci *vhci = vhci_hcd->vhci;
 	struct device *dev = &urb->dev->dev;
 	u8 portnum = urb->dev->portnum;
 	int ret = 0;
@@ -487,7 +677,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
 		pr_err("invalid port number %d\n", portnum);
 		return -ENODEV;
 	}
-	vdev = &vhci->vdev[portnum-1];
+	vdev = &vhci_hcd->vdev[portnum-1];
 
 	/* patch to usb_sg_init() is in 2.5.60 */
 	BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
@@ -640,7 +830,8 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  */
 static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 {
-	struct vhci_hcd *vhci = hcd_to_vhci(hcd);
+	struct vhci_hcd *vhci_hcd = hcd_to_vhci_hcd(hcd);
+	struct vhci *vhci = vhci_hcd->vhci;
 	struct vhci_priv *priv;
 	struct vhci_device *vdev;
 	unsigned long flags;
@@ -691,7 +882,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 		usb_hcd_unlink_urb_from_ep(hcd, urb);
 
 		spin_unlock_irqrestore(&vhci->lock, flags);
-		usb_hcd_giveback_urb(vhci_to_hcd(vhci), urb, urb->status);
+		usb_hcd_giveback_urb(hcd, urb, urb->status);
 		spin_lock_irqsave(&vhci->lock, flags);
 
 	} else {
@@ -709,7 +900,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 			return -ENOMEM;
 		}
 
-		unlink->seqnum = atomic_inc_return(&vhci->seqnum);
+		unlink->seqnum = atomic_inc_return(&vhci_hcd->seqnum);
 		if (unlink->seqnum == 0xffff)
 			pr_info("seqnum max\n");
 
@@ -733,8 +924,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 
 static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
 {
-	struct vhci_hcd *vhci = vdev_to_vhci(vdev);
-	struct usb_hcd *hcd = vhci_to_hcd(vhci);
+	struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
+	struct usb_hcd *hcd = vhci_hcd_to_hcd(vhci_hcd);
+	struct vhci *vhci = vhci_hcd->vhci;
 	struct vhci_unlink *unlink, *tmp;
 	unsigned long flags;
 
@@ -846,7 +1038,6 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
 	pr_info("disconnect device\n");
 }
 
-
 static void vhci_device_reset(struct usbip_device *ud)
 {
 	struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
@@ -918,29 +1109,58 @@ static int hcd_name_to_id(const char *name)
 	return val;
 }
 
+static int vhci_setup(struct usb_hcd *hcd)
+{
+	struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
+	hcd->self.sg_tablesize = ~0;
+	if (usb_hcd_is_primary_hcd(hcd)) {
+		vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd);
+		vhci->vhci_hcd_hs->vhci = vhci;
+		/*
+		 * Mark the first roothub as being USB 2.0.
+		 * The USB 3.0 roothub will be registered later by
+		 * vhci_hcd_probe()
+		 */
+		hcd->speed = HCD_USB2;
+		hcd->self.root_hub->speed = USB_SPEED_HIGH;
+	} else {
+		vhci->vhci_hcd_ss = hcd_to_vhci_hcd(hcd);
+		vhci->vhci_hcd_ss->vhci = vhci;
+		hcd->speed = HCD_USB3;
+		hcd->self.root_hub->speed = USB_SPEED_SUPER;
+	}
+	return 0;
+}
+
 static int vhci_start(struct usb_hcd *hcd)
 {
-	struct vhci_hcd *vhci = hcd_to_vhci(hcd);
+	struct vhci_hcd *vhci_hcd = hcd_to_vhci_hcd(hcd);
 	int id, rhport;
-	int err = 0;
+	int err;
 
 	usbip_dbg_vhci_hc("enter vhci_start\n");
 
+	if (usb_hcd_is_primary_hcd(hcd))
+		spin_lock_init(&vhci_hcd->vhci->lock);
+
 	/* initialize private data of usb_hcd */
 
 	for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
-		struct vhci_device *vdev = &vhci->vdev[rhport];
+		struct vhci_device *vdev = &vhci_hcd->vdev[rhport];
 
 		vhci_device_init(vdev);
 		vdev->rhport = rhport;
 	}
 
-	atomic_set(&vhci->seqnum, 0);
-	spin_lock_init(&vhci->lock);
+	atomic_set(&vhci_hcd->seqnum, 0);
 
 	hcd->power_budget = 0; /* no limit */
 	hcd->uses_new_polling = 1;
 
+#ifdef CONFIG_USB_OTG
+	hcd->self.otg_port = 1;
+#endif
+
 	id = hcd_name_to_id(hcd_name(hcd));
 	if (id < 0) {
 		pr_err("invalid vhci name %s\n", hcd_name(hcd));
@@ -948,7 +1168,7 @@ static int vhci_start(struct usb_hcd *hcd)
 	}
 
 	/* vhci_hcd is now ready to be controlled through sysfs */
-	if (id == 0) {
+	if (id == 0 && usb_hcd_is_primary_hcd(hcd)) {
 		err = vhci_init_attr_group();
 		if (err) {
 			pr_err("init attr group\n");
@@ -968,21 +1188,21 @@ static int vhci_start(struct usb_hcd *hcd)
 
 static void vhci_stop(struct usb_hcd *hcd)
 {
-	struct vhci_hcd *vhci = hcd_to_vhci(hcd);
+	struct vhci_hcd *vhci_hcd = hcd_to_vhci_hcd(hcd);
 	int id, rhport;
 
 	usbip_dbg_vhci_hc("stop VHCI controller\n");
 
 	/* 1. remove the userland interface of vhci_hcd */
 	id = hcd_name_to_id(hcd_name(hcd));
-	if (id == 0) {
+	if (id == 0 && usb_hcd_is_primary_hcd(hcd)) {
 		sysfs_remove_group(&hcd_dev(hcd)->kobj, &vhci_attr_group);
 		vhci_finish_attr_group();
 	}
 
 	/* 2. shutdown all the ports of vhci_hcd */
 	for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
-		struct vhci_device *vdev = &vhci->vdev[rhport];
+		struct vhci_device *vdev = &vhci_hcd->vdev[rhport];
 
 		usbip_event_add(&vdev->ud, VDEV_EVENT_REMOVED);
 		usbip_stop_eh(&vdev->ud);
@@ -1000,7 +1220,7 @@ static int vhci_get_frame_number(struct usb_hcd *hcd)
 /* FIXME: suspend/resume */
 static int vhci_bus_suspend(struct usb_hcd *hcd)
 {
-	struct vhci_hcd *vhci = hcd_to_vhci(hcd);
+	struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
 	unsigned long flags;
 
 	dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
@@ -1014,7 +1234,7 @@ static int vhci_bus_suspend(struct usb_hcd *hcd)
 
 static int vhci_bus_resume(struct usb_hcd *hcd)
 {
-	struct vhci_hcd *vhci = hcd_to_vhci(hcd);
+	struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
 	int rc = 0;
 	unsigned long flags;
 
@@ -1036,13 +1256,32 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
 #define vhci_bus_resume       NULL
 #endif
 
+/* Change a group of bulk endpoints to support multiple stream IDs */
+static int vhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+	struct usb_host_endpoint **eps, unsigned int num_eps,
+	unsigned int num_streams, gfp_t mem_flags)
+{
+	dev_dbg(&hcd->self.root_hub->dev, "vhci_alloc_streams not implemented\n");
+	return 0;
+}
+
+/* Reverts a group of bulk endpoints back to not using stream IDs. */
+static int vhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+	struct usb_host_endpoint **eps, unsigned int num_eps,
+	gfp_t mem_flags)
+{
+	dev_dbg(&hcd->self.root_hub->dev, "vhci_free_streams not implemented\n");
+	return 0;
+}
+
 static struct hc_driver vhci_hc_driver = {
 	.description	= driver_name,
 	.product_desc	= driver_desc,
 	.hcd_priv_size	= sizeof(struct vhci_hcd),
 
-	.flags		= HCD_USB2,
+	.flags		= HCD_USB3 | HCD_SHARED,
 
+	.reset		= vhci_setup,
 	.start		= vhci_start,
 	.stop		= vhci_stop,
 
@@ -1055,11 +1294,16 @@ static struct hc_driver vhci_hc_driver = {
 	.hub_control    = vhci_hub_control,
 	.bus_suspend	= vhci_bus_suspend,
 	.bus_resume	= vhci_bus_resume,
+
+	.alloc_streams	= vhci_alloc_streams,
+	.free_streams	= vhci_free_streams,
 };
 
 static int vhci_hcd_probe(struct platform_device *pdev)
 {
-	struct usb_hcd		*hcd;
+	struct vhci             *vhci = *((void **)dev_get_platdata(&pdev->dev));
+	struct usb_hcd		*hcd_hs;
+	struct usb_hcd		*hcd_ss;
 	int			ret;
 
 	usbip_dbg_vhci_hc("name %s id %d\n", pdev->name, pdev->id);
@@ -1068,43 +1312,68 @@ static int vhci_hcd_probe(struct platform_device *pdev)
 	 * Allocate and initialize hcd.
 	 * Our private data is also allocated automatically.
 	 */
-	hcd = usb_create_hcd(&vhci_hc_driver, &pdev->dev, dev_name(&pdev->dev));
-	if (!hcd) {
-		pr_err("create hcd failed\n");
+	hcd_hs = usb_create_hcd(&vhci_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd_hs) {
+		pr_err("create primary hcd failed\n");
 		return -ENOMEM;
 	}
-	hcd->has_tt = 1;
+	hcd_hs->has_tt = 1;
 
 	/*
 	 * Finish generic HCD structure initialization and register.
 	 * Call the driver's reset() and start() routines.
 	 */
-	ret = usb_add_hcd(hcd, 0, 0);
+	ret = usb_add_hcd(hcd_hs, 0, 0);
 	if (ret != 0) {
-		pr_err("usb_add_hcd failed %d\n", ret);
-		usb_put_hcd(hcd);
-		return ret;
+		pr_err("usb_add_hcd hs failed %d\n", ret);
+		goto put_usb2_hcd;
+	}
+
+	hcd_ss = usb_create_shared_hcd(&vhci_hc_driver, &pdev->dev,
+				       dev_name(&pdev->dev), hcd_hs);
+	if (!hcd_ss) {
+		ret = -ENOMEM;
+		pr_err("create shared hcd failed\n");
+		goto remove_usb2_hcd;
+	}
+
+	ret = usb_add_hcd(hcd_ss, 0, 0);
+	if (ret) {
+		pr_err("usb_add_hcd ss failed %d\n", ret);
+		goto put_usb3_hcd;
 	}
 
 	usbip_dbg_vhci_hc("bye\n");
 	return 0;
+
+put_usb3_hcd:
+	usb_put_hcd(hcd_ss);
+remove_usb2_hcd:
+	usb_remove_hcd(hcd_hs);
+put_usb2_hcd:
+	usb_put_hcd(hcd_hs);
+	vhci->vhci_hcd_hs = NULL;
+	vhci->vhci_hcd_ss = NULL;
+	return ret;
 }
 
 static int vhci_hcd_remove(struct platform_device *pdev)
 {
-	struct usb_hcd	*hcd;
-
-	hcd = platform_get_drvdata(pdev);
-	if (!hcd)
-		return 0;
+	struct vhci *vhci = *((void **)dev_get_platdata(&pdev->dev));
 
 	/*
 	 * Disconnects the root hub,
 	 * then reverses the effects of usb_add_hcd(),
 	 * invoking the HCD's stop() methods.
 	 */
-	usb_remove_hcd(hcd);
-	usb_put_hcd(hcd);
+	usb_remove_hcd(vhci_hcd_to_hcd(vhci->vhci_hcd_ss));
+	usb_put_hcd(vhci_hcd_to_hcd(vhci->vhci_hcd_ss));
+
+	usb_remove_hcd(vhci_hcd_to_hcd(vhci->vhci_hcd_hs));
+	usb_put_hcd(vhci_hcd_to_hcd(vhci->vhci_hcd_hs));
+
+	vhci->vhci_hcd_hs = NULL;
+	vhci->vhci_hcd_ss = NULL;
 
 	return 0;
 }
@@ -1115,23 +1384,32 @@ static int vhci_hcd_remove(struct platform_device *pdev)
 static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	struct usb_hcd *hcd;
-	struct vhci_hcd *vhci;
+	struct vhci *vhci;
 	int rhport;
 	int connected = 0;
 	int ret = 0;
 	unsigned long flags;
 
+	dev_dbg(&pdev->dev, "%s\n", __func__);
+
 	hcd = platform_get_drvdata(pdev);
 	if (!hcd)
 		return 0;
-	vhci = hcd_to_vhci(hcd);
+
+	vhci = *((void **)dev_get_platdata(hcd->self.controller));
 
 	spin_lock_irqsave(&vhci->lock, flags);
 
-	for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++)
-		if (vhci->port_status[rhport] & USB_PORT_STAT_CONNECTION)
+	for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
+		if (vhci->vhci_hcd_hs->port_status[rhport] &
+		    USB_PORT_STAT_CONNECTION)
 			connected += 1;
 
+		if (vhci->vhci_hcd_ss->port_status[rhport] &
+		    USB_PORT_STAT_CONNECTION)
+			connected += 1;
+	}
+
 	spin_unlock_irqrestore(&vhci->lock, flags);
 
 	if (connected > 0) {
@@ -1179,34 +1457,16 @@ static struct platform_driver vhci_driver = {
 	},
 };
 
-static int add_platform_device(int id)
-{
-	struct platform_device *pdev;
-	int dev_nr;
-
-	if (id == 0)
-		dev_nr = -1;
-	else
-		dev_nr = id;
-
-	pdev = platform_device_register_simple(driver_name, dev_nr, NULL, 0);
-	if (IS_ERR(pdev))
-		return PTR_ERR(pdev);
-
-	*(vhci_pdevs + id) = pdev;
-	return 0;
-}
-
 static void del_platform_devices(void)
 {
 	struct platform_device *pdev;
 	int i;
 
 	for (i = 0; i < vhci_num_controllers; i++) {
-		pdev = *(vhci_pdevs + i);
+		pdev = vhcis[i].pdev;
 		if (pdev != NULL)
 			platform_device_unregister(pdev);
-		*(vhci_pdevs + i) = NULL;
+		vhcis[i].pdev = NULL;
 	}
 	sysfs_remove_link(&platform_bus.kobj, driver_name);
 }
@@ -1221,28 +1481,51 @@ static int __init vhci_hcd_init(void)
 	if (vhci_num_controllers < 1)
 		vhci_num_controllers = 1;
 
-	vhci_pdevs = kcalloc(vhci_num_controllers, sizeof(void *), GFP_KERNEL);
-	if (vhci_pdevs == NULL)
+	vhcis = kcalloc(vhci_num_controllers, sizeof(struct vhci), GFP_KERNEL);
+	if (vhcis == NULL)
 		return -ENOMEM;
 
+	for (i = 0; i < vhci_num_controllers; i++) {
+		vhcis[i].pdev = platform_device_alloc(driver_name, i);
+		if (!vhcis[i].pdev) {
+			i--;
+			while (i >= 0)
+				platform_device_put(vhcis[i--].pdev);
+			ret = -ENOMEM;
+			goto err_device_alloc;
+		}
+	}
+	for (i = 0; i < vhci_num_controllers; i++) {
+		void *vhci = &vhcis[i];
+		ret = platform_device_add_data(vhcis[i].pdev, &vhci, sizeof(void *));
+		if (ret)
+			goto err_driver_register;
+	}
+
 	ret = platform_driver_register(&vhci_driver);
 	if (ret)
 		goto err_driver_register;
 
 	for (i = 0; i < vhci_num_controllers; i++) {
-		ret = add_platform_device(i);
-		if (ret)
-			goto err_platform_device_register;
+		ret = platform_device_add(vhcis[i].pdev);
+		if (ret < 0) {
+			i--;
+			while (i >= 0)
+				platform_device_del(vhcis[i--].pdev);
+			goto err_add_hcd;
+		}
 	}
 
 	pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
 	return ret;
 
-err_platform_device_register:
-	del_platform_devices();
+err_add_hcd:
 	platform_driver_unregister(&vhci_driver);
 err_driver_register:
-	kfree(vhci_pdevs);
+	for (i = 0; i < vhci_num_controllers; i++)
+		platform_device_put(vhcis[i].pdev);
+err_device_alloc:
+	kfree(vhcis);
 	return ret;
 }
 
@@ -1250,7 +1533,7 @@ static void __exit vhci_hcd_exit(void)
 {
 	del_platform_devices();
 	platform_driver_unregister(&vhci_driver);
-	kfree(vhci_pdevs);
+	kfree(vhcis);
 }
 
 module_init(vhci_hcd_init);
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index fc2d319..ef2f2d5 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -70,7 +70,8 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
 static void vhci_recv_ret_submit(struct vhci_device *vdev,
 				 struct usbip_header *pdu)
 {
-	struct vhci_hcd *vhci = vdev_to_vhci(vdev);
+	struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
+	struct vhci *vhci = vhci_hcd->vhci;
 	struct usbip_device *ud = &vdev->ud;
 	struct urb *urb;
 	unsigned long flags;
@@ -82,7 +83,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 	if (!urb) {
 		pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
 		pr_info("max seqnum %d\n",
-			atomic_read(&vhci->seqnum));
+			atomic_read(&vhci_hcd->seqnum));
 		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
 		return;
 	}
@@ -107,10 +108,10 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 	usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
 
 	spin_lock_irqsave(&vhci->lock, flags);
-	usb_hcd_unlink_urb_from_ep(vhci_to_hcd(vhci), urb);
+	usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
 	spin_unlock_irqrestore(&vhci->lock, flags);
 
-	usb_hcd_giveback_urb(vhci_to_hcd(vhci), urb, urb->status);
+	usb_hcd_giveback_urb(vhci_hcd_to_hcd(vhci_hcd), urb, urb->status);
 
 	usbip_dbg_vhci_rx("Leave\n");
 }
@@ -143,7 +144,8 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
 static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 				 struct usbip_header *pdu)
 {
-	struct vhci_hcd *vhci = vdev_to_vhci(vdev);
+	struct vhci_hcd *vhci_hcd = vdev_to_vhci_hcd(vdev);
+	struct vhci *vhci = vhci_hcd->vhci;
 	struct vhci_unlink *unlink;
 	struct urb *urb;
 	unsigned long flags;
@@ -177,10 +179,10 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 		pr_info("urb->status %d\n", urb->status);
 
 		spin_lock_irqsave(&vhci->lock, flags);
-		usb_hcd_unlink_urb_from_ep(vhci_to_hcd(vhci), urb);
+		usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
 		spin_unlock_irqrestore(&vhci->lock, flags);
 
-		usb_hcd_giveback_urb(vhci_to_hcd(vhci), urb, urb->status);
+		usb_hcd_giveback_urb(vhci_hcd_to_hcd(vhci_hcd), urb, urb->status);
 	}
 
 	kfree(unlink);
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index b96e5b1..5778b64 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -29,13 +29,51 @@
 
 /* TODO: refine locking ?*/
 
+/*
+ * output example:
+ * hub port sta spd dev      socket           local_busid
+ * hs  0000 004 000 00000000         c5a7bb80 1-2.3
+ * ................................................
+ * ss  0008 004 000 00000000         d8cee980 2-3.4
+ * ................................................
+ *
+ * IP address can be retrieved from a socket pointer address by looking
+ * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
+ * port number and its peer IP address.
+ */
+static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
+{
+	if (hub == HUB_SPEED_HIGH)
+		*out += sprintf(*out, "hs  %04u %03u ",
+				      port, vdev->ud.status);
+	else /* hub == HUB_SPEED_SUPER */
+		*out += sprintf(*out, "ss  %04u %03u ",
+				      port, vdev->ud.status);
+
+	if (vdev->ud.status == VDEV_ST_USED) {
+		*out += sprintf(*out, "%03u %08x ",
+				      vdev->speed, vdev->devid);
+		*out += sprintf(*out, "%16p %s",
+				      vdev->ud.tcp_socket,
+				      dev_name(&vdev->udev->dev));
+
+	} else {
+		*out += sprintf(*out, "000 00000000 ");
+		*out += sprintf(*out, "0000000000000000 0-0");
+	}
+
+	*out += sprintf(*out, "\n");
+}
+
 /* Sysfs entry to show port status */
 static ssize_t status_show_vhci(int pdev_nr, char *out)
 {
-	struct platform_device *pdev = *(vhci_pdevs + pdev_nr);
-	struct vhci_hcd *vhci;
+	struct platform_device *pdev = vhcis[pdev_nr].pdev;
+	struct vhci *vhci;
+	struct usb_hcd *hcd;
+	struct vhci_hcd *vhci_hcd;
 	char *s = out;
-	int i = 0;
+	int i;
 	unsigned long flags;
 
 	if (!pdev || !out) {
@@ -43,41 +81,27 @@ static ssize_t status_show_vhci(int pdev_nr, char *out)
 		return 0;
 	}
 
-	vhci = hcd_to_vhci(platform_get_drvdata(pdev));
+	hcd = platform_get_drvdata(pdev);
+	vhci_hcd = hcd_to_vhci_hcd(hcd);
+	vhci = vhci_hcd->vhci;
 
 	spin_lock_irqsave(&vhci->lock, flags);
 
-	/*
-	 * output example:
-	 * port sta spd dev      socket           local_busid
-	 * 0000 004 000 00000000         c5a7bb80 1-2.3
-	 * 0001 004 000 00000000         d8cee980 2-3.4
-	 *
-	 * IP address can be retrieved from a socket pointer address by looking
-	 * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
-	 * port number and its peer IP address.
-	 */
 	for (i = 0; i < VHCI_HC_PORTS; i++) {
-		struct vhci_device *vdev = &vhci->vdev[i];
+		struct vhci_device *vdev = &vhci->vhci_hcd_hs->vdev[i];
 
 		spin_lock(&vdev->ud.lock);
-		out += sprintf(out, "%04u %03u ",
-				    (pdev_nr * VHCI_HC_PORTS) + i,
-				    vdev->ud.status);
+		port_show_vhci(&out, HUB_SPEED_HIGH,
+			       pdev_nr * VHCI_PORTS + i, vdev);
+		spin_unlock(&vdev->ud.lock);
+	}
 
-		if (vdev->ud.status == VDEV_ST_USED) {
-			out += sprintf(out, "%03u %08x ",
-					    vdev->speed, vdev->devid);
-			out += sprintf(out, "%16p %s",
-					    vdev->ud.tcp_socket,
-					    dev_name(&vdev->udev->dev));
+	for (i = 0; i < VHCI_HC_PORTS; i++) {
+		struct vhci_device *vdev = &vhci->vhci_hcd_ss->vdev[i];
 
-		} else {
-			out += sprintf(out, "000 00000000 ");
-			out += sprintf(out, "0000000000000000 0-0");
-		}
-
-		out += sprintf(out, "\n");
+		spin_lock(&vdev->ud.lock);
+		port_show_vhci(&out, HUB_SPEED_SUPER,
+			       pdev_nr * VHCI_PORTS + VHCI_HC_PORTS + i, vdev);
 		spin_unlock(&vdev->ud.lock);
 	}
 
@@ -92,8 +116,16 @@ static ssize_t status_show_not_ready(int pdev_nr, char *out)
 	int i = 0;
 
 	for (i = 0; i < VHCI_HC_PORTS; i++) {
-		out += sprintf(out, "%04u %03u ",
-				    (pdev_nr * VHCI_HC_PORTS) + i,
+		out += sprintf(out, "hs  %04u %03u ",
+				    (pdev_nr * VHCI_PORTS) + i,
+				    VDEV_ST_NOTASSIGNED);
+		out += sprintf(out, "000 00000000 0000000000000000 0-0");
+		out += sprintf(out, "\n");
+	}
+
+	for (i = 0; i < VHCI_HC_PORTS; i++) {
+		out += sprintf(out, "ss  %04u %03u ",
+				    (pdev_nr * VHCI_PORTS) + VHCI_HC_PORTS + i,
 				    VDEV_ST_NOTASSIGNED);
 		out += sprintf(out, "000 00000000 0000000000000000 0-0");
 		out += sprintf(out, "\n");
@@ -125,7 +157,7 @@ static ssize_t status_show(struct device *dev,
 	int pdev_nr;
 
 	out += sprintf(out,
-		       "port sta spd dev      socket           local_busid\n");
+		       "hub port sta spd dev      socket           local_busid\n");
 
 	pdev_nr = status_name_to_id(attr->attr.name);
 	if (pdev_nr < 0)
@@ -141,15 +173,19 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
 {
 	char *s = out;
 
-	out += sprintf(out, "%d\n", VHCI_HC_PORTS * vhci_num_controllers);
+	/*
+	 * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2.
+	 */
+	out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
 	return out - s;
 }
 static DEVICE_ATTR_RO(nports);
 
 /* Sysfs entry to shutdown a virtual connection */
-static int vhci_port_disconnect(struct vhci_hcd *vhci, __u32 rhport)
+static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
 {
-	struct vhci_device *vdev = &vhci->vdev[rhport];
+	struct vhci_device *vdev = &vhci_hcd->vdev[rhport];
+	struct vhci *vhci = vhci_hcd->vhci;
 	unsigned long flags;
 
 	usbip_dbg_vhci_sysfs("enter\n");
@@ -195,6 +231,7 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
 {
 	__u32 port = 0, pdev_nr = 0, rhport = 0;
 	struct usb_hcd *hcd;
+	struct vhci_hcd *vhci_hcd;
 	int ret;
 
 	if (kstrtoint(buf, 10, &port) < 0)
@@ -206,13 +243,20 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr,
 	if (!valid_port(pdev_nr, rhport))
 		return -EINVAL;
 
-	hcd = platform_get_drvdata(*(vhci_pdevs + pdev_nr));
+	hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
 	if (hcd == NULL) {
 		dev_err(dev, "port is not ready %u\n", port);
 		return -EAGAIN;
 	}
 
-	ret = vhci_port_disconnect(hcd_to_vhci(hcd), rhport);
+	usbip_dbg_vhci_sysfs("rhport %d\n", rhport);
+
+	if ((port / VHCI_HC_PORTS) % 2)
+		vhci_hcd = hcd_to_vhci_hcd(hcd)->vhci->vhci_hcd_ss;
+	else
+		vhci_hcd = hcd_to_vhci_hcd(hcd)->vhci->vhci_hcd_hs;
+
+	ret = vhci_port_disconnect(vhci_hcd, rhport);
 	if (ret < 0)
 		return -EINVAL;
 
@@ -233,6 +277,7 @@ static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed)
 	case USB_SPEED_FULL:
 	case USB_SPEED_HIGH:
 	case USB_SPEED_WIRELESS:
+	case USB_SPEED_SUPER:
 		break;
 	default:
 		pr_err("Failed attach request for unsupported USB speed: %s\n",
@@ -262,8 +307,9 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
 	int sockfd = 0;
 	__u32 port = 0, pdev_nr = 0, rhport = 0, devid = 0, speed = 0;
 	struct usb_hcd *hcd;
-	struct vhci_hcd *vhci;
+	struct vhci_hcd *vhci_hcd;
 	struct vhci_device *vdev;
+	struct vhci *vhci;
 	int err;
 	unsigned long flags;
 
@@ -287,13 +333,19 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
 	if (!valid_args(pdev_nr, rhport, speed))
 		return -EINVAL;
 
-	hcd = platform_get_drvdata(*(vhci_pdevs + pdev_nr));
+	hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
 	if (hcd == NULL) {
 		dev_err(dev, "port %d is not ready\n", port);
 		return -EAGAIN;
 	}
-	vhci = hcd_to_vhci(hcd);
-	vdev = &vhci->vdev[rhport];
+
+	vhci_hcd = hcd_to_vhci_hcd(hcd);
+	vhci = vhci_hcd->vhci;
+
+	if (speed == USB_SPEED_SUPER)
+		vdev = &vhci->vhci_hcd_ss->vdev[rhport];
+	else
+		vdev = &vhci->vhci_hcd_hs->vdev[rhport];
 
 	/* Extract socket from fd. */
 	socket = sockfd_lookup(sockfd, &err);
diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c
index 776bcb3..ff2d424 100644
--- a/drivers/uwb/driver.c
+++ b/drivers/uwb/driver.c
@@ -94,17 +94,18 @@ ssize_t beacon_timeout_ms_store(struct class *class,
 	beacon_timeout_ms = bt;
 	return size;
 }
+static CLASS_ATTR_RW(beacon_timeout_ms);
 
-static struct class_attribute uwb_class_attrs[] = {
-	__ATTR(beacon_timeout_ms, S_IWUSR | S_IRUGO,
-	       beacon_timeout_ms_show, beacon_timeout_ms_store),
-	__ATTR_NULL,
+static struct attribute *uwb_class_attrs[] = {
+	&class_attr_beacon_timeout_ms.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(uwb_class);
 
 /** Device model classes */
 struct class uwb_rc_class = {
 	.name        = "uwb_rc",
-	.class_attrs = uwb_class_attrs,
+	.class_groups = uwb_class_groups,
 };
 
 
diff --git a/drivers/uwb/i1480/dfu/phy.c b/drivers/uwb/i1480/dfu/phy.c
index 3b1a87d..1ac8526 100644
--- a/drivers/uwb/i1480/dfu/phy.c
+++ b/drivers/uwb/i1480/dfu/phy.c
@@ -126,6 +126,7 @@ int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size)
 		dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n",
 			reply->bResultCode);
 		result = -EIO;
+		goto out;
 	}
 	for (cnt = 0; cnt < size; cnt++) {
 		if (reply->data[cnt].page != (srcaddr + cnt) >> 8)
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 27c89cd..4797217 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -43,7 +43,7 @@ static void virqfd_deactivate(struct virqfd *virqfd)
 	queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
 }
 
-static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 {
 	struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
 	unsigned long flags = (unsigned long)key;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 042030e..e4613a3 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -165,7 +165,7 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
 	add_wait_queue(wqh, &poll->wait);
 }
 
-static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
+static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
 			     void *key)
 {
 	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index f55671d..f720958 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -31,7 +31,7 @@ struct vhost_work {
 struct vhost_poll {
 	poll_table                table;
 	wait_queue_head_t        *wqh;
-	wait_queue_t              wait;
+	wait_queue_entry_t              wait;
 	struct vhost_work	  work;
 	unsigned long		  mask;
 	struct vhost_dev	 *dev;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 3acef3c..3f63e03 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -706,7 +706,7 @@ static const struct file_operations vhost_vsock_fops = {
 };
 
 static struct miscdevice vhost_vsock_misc = {
-	.minor = MISC_DYNAMIC_MINOR,
+	.minor = VHOST_VSOCK_MINOR,
 	.name = "vhost-vsock",
 	.fops = &vhost_vsock_fops,
 };
@@ -778,3 +778,5 @@ module_exit(vhost_vsock_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Asias He");
 MODULE_DESCRIPTION("vhost transport for vsock ");
+MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
+MODULE_ALIAS("devname:vhost-vsock");
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 687ebb0..41d7979 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1048,7 +1048,7 @@ void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
 
 	for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
 	     i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
-		if (PIXEL_CLOCK)
+		if (PIXEL_CLOCK != 0)
 			edt[num++] = block - edid;
 
 	/* Yikes, EDID data is totally useless */
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index ec2e7e3..449fcea 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1646,8 +1646,9 @@ static int ufx_usb_probe(struct usb_interface *interface,
 	dev_dbg(dev->gdev, "%s %s - serial #%s\n",
 		usbdev->manufacturer, usbdev->product, usbdev->serial);
 	dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n",
-		usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
-		usbdev->descriptor.bcdDevice, dev);
+		le16_to_cpu(usbdev->descriptor.idVendor),
+		le16_to_cpu(usbdev->descriptor.idProduct),
+		le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
 	dev_dbg(dev->gdev, "console enable=%d\n", console);
 	dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio);
 
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 6a3c353..05ef657 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1105,8 +1105,8 @@ static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
 	char *bufptr;
 	struct urb *urb;
 
-	pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n",
-		info->node, dev->blank_mode, blank_mode);
+	pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n",
+		 info->node, dev->blank_mode, blank_mode);
 
 	if ((dev->blank_mode == FB_BLANK_POWERDOWN) &&
 	    (blank_mode != FB_BLANK_POWERDOWN)) {
@@ -1613,8 +1613,9 @@ static int dlfb_usb_probe(struct usb_interface *interface,
 	pr_info("%s %s - serial #%s\n",
 		usbdev->manufacturer, usbdev->product, usbdev->serial);
 	pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n",
-		usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
-		usbdev->descriptor.bcdDevice, dev);
+		le16_to_cpu(usbdev->descriptor.idVendor),
+		le16_to_cpu(usbdev->descriptor.idProduct),
+		le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
 	pr_info("console enable=%d\n", console);
 	pr_info("fb_defio enable=%d\n", fb_defio);
 	pr_info("shadow enable=%d\n", shadow);
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index f9718f0..badee04 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
 }
 static void viafb_remove_proc(struct viafb_shared *shared)
 {
-	struct proc_dir_entry *viafb_entry = shared->proc_entry,
-		*iga1_entry = shared->iga1_proc_entry,
-		*iga2_entry = shared->iga2_proc_entry;
+	struct proc_dir_entry *viafb_entry = shared->proc_entry;
 
 	if (!viafb_entry)
 		return;
 
-	remove_proc_entry("output_devices", iga2_entry);
+	remove_proc_entry("output_devices", shared->iga2_proc_entry);
 	remove_proc_entry("iga2", viafb_entry);
-	remove_proc_entry("output_devices", iga1_entry);
+	remove_proc_entry("output_devices", shared->iga1_proc_entry);
 	remove_proc_entry("iga1", viafb_entry);
 	remove_proc_entry("supported_output_devices", viafb_entry);
 
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 408c174..22caf80 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -663,6 +663,12 @@ static int virtballoon_restore(struct virtio_device *vdev)
 }
 #endif
 
+static int virtballoon_validate(struct virtio_device *vdev)
+{
+	__virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
+	return 0;
+}
+
 static unsigned int features[] = {
 	VIRTIO_BALLOON_F_MUST_TELL_HOST,
 	VIRTIO_BALLOON_F_STATS_VQ,
@@ -675,6 +681,7 @@ static struct virtio_driver virtio_balloon_driver = {
 	.driver.name =	KBUILD_MODNAME,
 	.driver.owner =	THIS_MODULE,
 	.id_table =	id_table,
+	.validate =	virtballoon_validate,
 	.probe =	virtballoon_probe,
 	.remove =	virtballoon_remove,
 	.config_changed = virtballoon_changed,
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index e0b8a4b..fd2e9da 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -25,8 +25,7 @@
 
 #include <asm/io.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
 
 
 #define DS1WM_CMD	0x00	/* R/W 4 bits command */
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 2e30db1..d49681c 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -20,8 +20,7 @@
 #include <linux/delay.h>
 #include <asm/delay.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
 
 /**
  * Allow the active pullup to be disabled, default is enabled.
@@ -35,6 +34,8 @@
  */
 static int ds2482_active_pullup = 1;
 module_param_named(active_pullup, ds2482_active_pullup, int, 0644);
+MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
+				"0-disable, 1-enable (default)");
 
 /**
  * The DS2482 registers - there are 3 registers that are addressed by a read
@@ -93,30 +94,6 @@ static const u8 ds2482_chan_rd[8] =
 #define DS2482_REG_STS_PPD		0x02
 #define DS2482_REG_STS_1WB		0x01
 
-
-static int ds2482_probe(struct i2c_client *client,
-			const struct i2c_device_id *id);
-static int ds2482_remove(struct i2c_client *client);
-
-
-/**
- * Driver data (common to all clients)
- */
-static const struct i2c_device_id ds2482_id[] = {
-	{ "ds2482", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, ds2482_id);
-
-static struct i2c_driver ds2482_driver = {
-	.driver = {
-		.name	= "ds2482",
-	},
-	.probe		= ds2482_probe,
-	.remove		= ds2482_remove,
-	.id_table	= ds2482_id,
-};
-
 /*
  * Client data (each client gets its own)
  */
@@ -560,10 +537,25 @@ static int ds2482_remove(struct i2c_client *client)
 	return 0;
 }
 
+/**
+ * Driver data (common to all clients)
+ */
+static const struct i2c_device_id ds2482_id[] = {
+	{ "ds2482", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ds2482_id);
+
+static struct i2c_driver ds2482_driver = {
+	.driver = {
+		.name	= "ds2482",
+	},
+	.probe		= ds2482_probe,
+	.remove		= ds2482_remove,
+	.id_table	= ds2482_id,
+};
 module_i2c_driver(ds2482_driver);
 
-MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
-				"0-disable, 1-enable (default)");
 MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
 MODULE_DESCRIPTION("DS2482 driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index be77b79..46ccb2f 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -25,8 +25,7 @@
 #include <linux/usb.h>
 #include <linux/slab.h>
 
-#include "../w1_int.h"
-#include "../w1.h"
+#include <linux/w1.h>
 
 /* USB Standard */
 /* USB Control request vendor type */
@@ -179,28 +178,9 @@ struct ds_status
 	u8			reserved2;
 };
 
-static struct usb_device_id ds_id_table [] = {
-	{ USB_DEVICE(0x04fa, 0x2490) },
-	{ },
-};
-MODULE_DEVICE_TABLE(usb, ds_id_table);
-
-static int ds_probe(struct usb_interface *, const struct usb_device_id *);
-static void ds_disconnect(struct usb_interface *);
-
-static int ds_send_control(struct ds_device *, u16, u16);
-static int ds_send_control_cmd(struct ds_device *, u16, u16);
-
 static LIST_HEAD(ds_devices);
 static DEFINE_MUTEX(ds_mutex);
 
-static struct usb_driver ds_driver = {
-	.name =		"DS9490R",
-	.probe =	ds_probe,
-	.disconnect =	ds_disconnect,
-	.id_table =	ds_id_table,
-};
-
 static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
 {
 	int err;
@@ -1108,8 +1088,20 @@ static void ds_disconnect(struct usb_interface *intf)
 	kfree(dev);
 }
 
+static struct usb_device_id ds_id_table [] = {
+	{ USB_DEVICE(0x04fa, 0x2490) },
+	{ },
+};
+MODULE_DEVICE_TABLE(usb, ds_id_table);
+
+static struct usb_driver ds_driver = {
+	.name =		"DS9490R",
+	.probe =	ds_probe,
+	.disconnect =	ds_disconnect,
+	.id_table =	ds_id_table,
+};
 module_usb_driver(ds_driver);
 
-MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
 MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 97a676b..d83d7c9 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -34,28 +34,7 @@
 #include <linux/pci_ids.h>
 #include <linux/pci.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire protocol) over VGA DDC(matrox gpio).");
-
-static struct pci_device_id matrox_w1_tbl[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) },
-	{ },
-};
-MODULE_DEVICE_TABLE(pci, matrox_w1_tbl);
-
-static int matrox_w1_probe(struct pci_dev *, const struct pci_device_id *);
-static void matrox_w1_remove(struct pci_dev *);
-
-static struct pci_driver matrox_w1_pci_driver = {
-	.name = "matrox_w1",
-	.id_table = matrox_w1_tbl,
-	.probe = matrox_w1_probe,
-	.remove = matrox_w1_remove,
-};
+#include <linux/w1.h>
 
 /*
  * Matrox G400 DDC registers.
@@ -88,9 +67,6 @@ struct matrox_device
 	struct w1_bus_master *bus_master;
 };
 
-static u8 matrox_w1_read_ddc_bit(void *);
-static void matrox_w1_write_ddc_bit(void *, u8);
-
 /*
  * These functions read and write DDC Data bit.
  *
@@ -226,4 +202,21 @@ static void matrox_w1_remove(struct pci_dev *pdev)
 	}
 	kfree(dev);
 }
+
+static struct pci_device_id matrox_w1_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) },
+	{ },
+};
+MODULE_DEVICE_TABLE(pci, matrox_w1_tbl);
+
+static struct pci_driver matrox_w1_pci_driver = {
+	.name = "matrox_w1",
+	.id_table = matrox_w1_tbl,
+	.probe = matrox_w1_probe,
+	.remove = matrox_w1_remove,
+};
 module_pci_driver(matrox_w1_pci_driver);
+
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire protocol) over VGA DDC(matrox gpio).");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a462175..74f2e6e 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -19,8 +19,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
 
 /*
  * MXC W1 Register offsets
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index fb190c2..3612542 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -19,8 +19,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
 
 #define	MOD_NAME	"OMAP_HDQ:"
 
@@ -53,7 +52,10 @@
 #define OMAP_HDQ_MAX_USER			4
 
 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
+
 static int w1_id;
+module_param(w1_id, int, S_IRUSR);
+MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
 
 struct hdq_data {
 	struct device		*dev;
@@ -76,36 +78,6 @@ struct hdq_data {
 
 };
 
-static int omap_hdq_probe(struct platform_device *pdev);
-static int omap_hdq_remove(struct platform_device *pdev);
-
-static const struct of_device_id omap_hdq_dt_ids[] = {
-	{ .compatible = "ti,omap3-1w" },
-	{ .compatible = "ti,am4372-hdq" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
-
-static struct platform_driver omap_hdq_driver = {
-	.probe =	omap_hdq_probe,
-	.remove =	omap_hdq_remove,
-	.driver =	{
-		.name =	"omap_hdq",
-		.of_match_table = omap_hdq_dt_ids,
-	},
-};
-
-static u8 omap_w1_read_byte(void *_hdq);
-static void omap_w1_write_byte(void *_hdq, u8 byte);
-static u8 omap_w1_reset_bus(void *_hdq);
-
-
-static struct w1_bus_master omap_w1_master = {
-	.read_byte	= omap_w1_read_byte,
-	.write_byte	= omap_w1_write_byte,
-	.reset_bus	= omap_w1_reset_bus,
-};
-
 /* HDQ register I/O routines */
 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
 {
@@ -678,6 +650,12 @@ static void omap_w1_write_byte(void *_hdq, u8 byte)
 	}
 }
 
+static struct w1_bus_master omap_w1_master = {
+	.read_byte	= omap_w1_read_byte,
+	.write_byte	= omap_w1_write_byte,
+	.reset_bus	= omap_w1_reset_bus,
+};
+
 static int omap_hdq_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -787,10 +765,22 @@ static int omap_hdq_remove(struct platform_device *pdev)
 	return 0;
 }
 
-module_platform_driver(omap_hdq_driver);
+static const struct of_device_id omap_hdq_dt_ids[] = {
+	{ .compatible = "ti,omap3-1w" },
+	{ .compatible = "ti,am4372-hdq" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
 
-module_param(w1_id, int, S_IRUSR);
-MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
+static struct platform_driver omap_hdq_driver = {
+	.probe = omap_hdq_probe,
+	.remove = omap_hdq_remove,
+	.driver = {
+		.name =	"omap_hdq",
+		.of_match_table = omap_hdq_dt_ids,
+	},
+};
+module_platform_driver(omap_hdq_driver);
 
 MODULE_AUTHOR("Texas Instruments");
 MODULE_DESCRIPTION("HDQ-1W driver Library");
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index a373ae6..a90728c 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -20,8 +20,7 @@
 #include <linux/of.h>
 #include <linux/delay.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
 
 static u8 w1_gpio_set_pullup(void *data, int delay)
 {
diff --git a/drivers/w1/slaves/w1_bq27000.c b/drivers/w1/slaves/w1_bq27000.c
index 9f4a86b..8046ac4 100644
--- a/drivers/w1/slaves/w1_bq27000.c
+++ b/drivers/w1/slaves/w1_bq27000.c
@@ -17,14 +17,16 @@
 #include <linux/mutex.h>
 #include <linux/power/bq27xxx_battery.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_FAMILY_BQ27000	0x01
 
 #define HDQ_CMD_READ	(0)
 #define HDQ_CMD_WRITE	(1<<7)
 
 static int F_ID;
+module_param(F_ID, int, S_IRUSR);
+MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ device");
 
 static int w1_bq27000_read(struct device *dev, unsigned int reg)
 {
@@ -106,13 +108,10 @@ static void __exit w1_bq27000_exit(void)
 	w1_unregister_family(&w1_bq27000_family);
 }
 
-
 module_init(w1_bq27000_init);
 module_exit(w1_bq27000_exit);
 
-module_param(F_ID, int, S_IRUSR);
-MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ device");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_BQ27000));
-MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Texas Instruments Ltd");
 MODULE_DESCRIPTION("HDQ/1-wire slave driver bq27000 battery monitor chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_BQ27000));
diff --git a/drivers/w1/slaves/w1_ds2405.c b/drivers/w1/slaves/w1_ds2405.c
index d5d5487..42a1e81 100644
--- a/drivers/w1/slaves/w1_ds2405.c
+++ b/drivers/w1/slaves/w1_ds2405.c
@@ -24,8 +24,9 @@
 #include <linux/string.h>
 #include <linux/types.h>
 
-#include "../w1.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_FAMILY_DS2405	0x05
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
index 51f2f66..fac2663 100644
--- a/drivers/w1/slaves/w1_ds2406.c
+++ b/drivers/w1/slaves/w1_ds2406.c
@@ -17,13 +17,9 @@
 #include <linux/slab.h>
 #include <linux/crc16.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Scott Alfter <scott@alfter.us>");
-MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO");
+#define W1_FAMILY_DS2406	0x12
 
 #define W1_F12_FUNC_READ_STATUS		   0xAA
 #define W1_F12_FUNC_WRITE_STATUS	   0x55
@@ -154,3 +150,7 @@ static struct w1_family w1_family_12 = {
 	.fops = &w1_f12_fops,
 };
 module_w1_family(w1_family_12);
+
+MODULE_AUTHOR("Scott Alfter <scott@alfter.us>");
+MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index aec5958..b535d5e 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -15,15 +15,9 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jean-Francois Dagenais <dagenaisj@sonatest.com>");
-MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2408));
-
+#define W1_FAMILY_DS2408	0x29
 
 #define W1_F29_RETRIES		3
 
@@ -352,3 +346,8 @@ static struct w1_family w1_family_29 = {
 	.fops = &w1_f29_fops,
 };
 module_w1_family(w1_family_29);
+
+MODULE_AUTHOR("Jean-Francois Dagenais <dagenaisj@sonatest.com>");
+MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2408));
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
index f2e1c51..492e3d0 100644
--- a/drivers/w1/slaves/w1_ds2413.c
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -16,14 +16,9 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
-MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2413));
+#define W1_FAMILY_DS2413	0x3A
 
 #define W1_F3A_RETRIES                     3
 #define W1_F3A_FUNC_PIO_ACCESS_READ        0xF5
@@ -136,3 +131,8 @@ static struct w1_family w1_family_3a = {
 	.fops = &w1_f3a_fops,
 };
 module_w1_family(w1_family_3a);
+
+MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
+MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2413));
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
index 4ab54fd..050407c 100644
--- a/drivers/w1/slaves/w1_ds2423.c
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -30,9 +30,9 @@
 #include <linux/delay.h>
 #include <linux/crc16.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_COUNTER_DS2423	0x1D
 
 #define CRC16_VALID	0xb001
 #define CRC16_INIT	0
@@ -140,7 +140,7 @@ static struct w1_family w1_family_1d = {
 };
 module_w1_family(w1_family_1d);
 
-MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>");
 MODULE_DESCRIPTION("w1 family 1d driver for DS2423, 4 counters and 4kb ram");
+MODULE_LICENSE("GPL");
 MODULE_ALIAS("w1-family-" __stringify(W1_COUNTER_DS2423));
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index 80572cb..5adecd3 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -16,9 +16,9 @@
 #include <linux/types.h>
 #include <linux/delay.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_EEPROM_DS2431	0x2D
 
 #define W1_F2D_EEPROM_SIZE		128
 #define W1_F2D_PAGE_COUNT		4
@@ -290,7 +290,7 @@ static struct w1_family w1_family_2d = {
 };
 module_w1_family(w1_family_2d);
 
-MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>");
 MODULE_DESCRIPTION("w1 family 2d driver for DS2431, 1kb EEPROM");
+MODULE_LICENSE("GPL");
 MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2431));
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 6cf378c..75ad70c 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -22,14 +22,9 @@
 
 #endif
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
-MODULE_DESCRIPTION("w1 family 23 driver for DS2433, 4kb EEPROM");
-MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2433));
+#define W1_EEPROM_DS2433	0x23
 
 #define W1_EEPROM_SIZE		512
 #define W1_PAGE_COUNT		16
@@ -306,3 +301,8 @@ static struct w1_family w1_family_23 = {
 	.fops = &w1_f23_fops,
 };
 module_w1_family(w1_family_23);
+
+MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
+MODULE_DESCRIPTION("w1 family 23 driver for DS2433, 4kb EEPROM");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2433));
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index 5ededb4..6487fb7 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -13,8 +13,9 @@
 #include <linux/types.h>
 #include <linux/delay.h>
 
-#include "../w1.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_FAMILY_DS2438		0x26
 
 #define W1_DS2438_RETRIES		3
 
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index ffa37f77..26168ab 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -18,11 +18,12 @@
 #include <linux/idr.h>
 #include <linux/gfp.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
 #include "w1_ds2760.h"
 
+#define W1_FAMILY_DS2760	0x30
+
 static int w1_ds2760_io(struct device *dev, char *buf, int addr, size_t count,
 			int io)
 {
@@ -63,11 +64,13 @@ int w1_ds2760_read(struct device *dev, char *buf, int addr, size_t count)
 {
 	return w1_ds2760_io(dev, buf, addr, count, 0);
 }
+EXPORT_SYMBOL(w1_ds2760_read);
 
 int w1_ds2760_write(struct device *dev, char *buf, int addr, size_t count)
 {
 	return w1_ds2760_io(dev, buf, addr, count, 1);
 }
+EXPORT_SYMBOL(w1_ds2760_write);
 
 static int w1_ds2760_eeprom_cmd(struct device *dev, int addr, int cmd)
 {
@@ -91,11 +94,13 @@ int w1_ds2760_store_eeprom(struct device *dev, int addr)
 {
 	return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_COPY_DATA);
 }
+EXPORT_SYMBOL(w1_ds2760_store_eeprom);
 
 int w1_ds2760_recall_eeprom(struct device *dev, int addr)
 {
 	return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
 }
+EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
 
 static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
 			     struct bin_attribute *bin_attr, char *buf,
@@ -164,12 +169,7 @@ static struct w1_family w1_ds2760_family = {
 };
 module_w1_family(w1_ds2760_family);
 
-EXPORT_SYMBOL(w1_ds2760_read);
-EXPORT_SYMBOL(w1_ds2760_write);
-EXPORT_SYMBOL(w1_ds2760_store_eeprom);
-EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
-
-MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>");
 MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip");
+MODULE_LICENSE("GPL");
 MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2760));
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index f5c2aa4..a605281 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -21,11 +21,12 @@
 #include <linux/mutex.h>
 #include <linux/idr.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
 #include "w1_ds2780.h"
 
+#define W1_FAMILY_DS2780	0x32
+
 static int w1_ds2780_do_io(struct device *dev, char *buf, int addr,
 			size_t count, int io)
 {
@@ -156,7 +157,7 @@ static struct w1_family w1_ds2780_family = {
 };
 module_w1_family(w1_ds2780_family);
 
-MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
 MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC");
+MODULE_LICENSE("GPL");
 MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2780));
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index 9c03e01..645be6e 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -18,11 +18,12 @@
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
 #include "w1_ds2781.h"
 
+#define W1_FAMILY_DS2781	0x3D
+
 static int w1_ds2781_do_io(struct device *dev, char *buf, int addr,
 			size_t count, int io)
 {
@@ -153,7 +154,7 @@ static struct w1_family w1_ds2781_family = {
 };
 module_w1_family(w1_ds2781_family);
 
-MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>");
 MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2781 Stand-Alone Fuel Gauge IC");
+MODULE_LICENSE("GPL");
 MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2781));
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index 5e348d3..ec234b8 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -20,14 +20,9 @@
 #define CRC16_INIT		0
 #define CRC16_VALID		0xb001
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Markus Franke <franke.m@sebakmt.com>, <franm@hrz.tu-chemnitz.de>");
-MODULE_DESCRIPTION("w1 family 1C driver for DS28E04, 4kb EEPROM and PIO");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E04));
+#define W1_FAMILY_DS28E04	0x1C
 
 /* Allow the strong pullup to be disabled, but default to enabled.
  * If it was disabled a parasite powered device might not get the required
@@ -428,3 +423,8 @@ static struct w1_family w1_family_1C = {
 	.fops = &w1_f1C_fops,
 };
 module_w1_family(w1_family_1C);
+
+MODULE_AUTHOR("Markus Franke <franke.m@sebakmt.com>, <franm@hrz.tu-chemnitz.de>");
+MODULE_DESCRIPTION("w1 family 1C driver for DS28E04, 4kb EEPROM and PIO");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E04));
diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c
index ed4c875..e556b0c 100644
--- a/drivers/w1/slaves/w1_smem.c
+++ b/drivers/w1/slaves/w1_smem.c
@@ -27,15 +27,10 @@
 #include <linux/device.h>
 #include <linux/types.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_01));
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_81));
+#define W1_FAMILY_SMEM_01	0x01
+#define W1_FAMILY_SMEM_81	0x81
 
 static struct w1_family w1_smem_family_01 = {
 	.fid = W1_FAMILY_SMEM_01,
@@ -70,3 +65,9 @@ static void __exit w1_smem_fini(void)
 
 module_init(w1_smem_init);
 module_exit(w1_smem_fini);
+
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_01));
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_81));
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 82611f1..cb3fc3c 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -30,18 +30,13 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18S20));
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1822));
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18B20));
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1825));
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
+#define W1_THERM_DS18S20	0x10
+#define W1_THERM_DS1822		0x22
+#define W1_THERM_DS18B20	0x28
+#define W1_THERM_DS1825		0x3B
+#define W1_THERM_DS28EA00	0x42
 
 /* Allow the strong pullup to be disabled, but default to enabled.
  * If it was disabled a parasite powered device might not get the require
@@ -646,3 +641,12 @@ static void __exit w1_therm_fini(void)
 
 module_init(w1_therm_init);
 module_exit(w1_therm_fini);
+
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18S20));
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1822));
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18B20));
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1825));
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 8511d16..95ea7e6 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -28,25 +28,20 @@
 
 #include <linux/atomic.h>
 
-#include "w1.h"
-#include "w1_int.h"
-#include "w1_family.h"
+#include "w1_internal.h"
 #include "w1_netlink.h"
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
+#define W1_FAMILY_DEFAULT	0
 
 static int w1_timeout = 10;
-static int w1_timeout_us = 0;
-int w1_max_slave_count = 64;
-int w1_max_slave_ttl = 10;
-
 module_param_named(timeout, w1_timeout, int, 0);
 MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
+
+static int w1_timeout_us = 0;
 module_param_named(timeout_us, w1_timeout_us, int, 0);
 MODULE_PARM_DESC(timeout_us,
 		 "time in microseconds between automatic slave searches");
+
 /* A search stops when w1_max_slave_count devices have been found in that
  * search.  The next search will start over and detect the same set of devices
  * on a static 1-wire bus.  Memory is not allocated based on this number, just
@@ -55,9 +50,12 @@ MODULE_PARM_DESC(timeout_us,
  * device on the network and w1_max_slave_count is set to 1, the device id can
  * be read directly skipping the normal slower search process.
  */
+int w1_max_slave_count = 64;
 module_param_named(max_slave_count, w1_max_slave_count, int, 0);
 MODULE_PARM_DESC(max_slave_count,
 	"maximum number of slaves detected in a search");
+
+int w1_max_slave_ttl = 10;
 module_param_named(slave_ttl, w1_max_slave_ttl, int, 0);
 MODULE_PARM_DESC(slave_ttl,
 	"Number of searches not seeing a slave before it will be removed");
@@ -1228,3 +1226,7 @@ static void __exit w1_fini(void)
 
 module_init(w1_init);
 module_exit(w1_fini);
+
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 2096f46..f14ab0b 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -18,8 +18,7 @@
 #include <linux/delay.h>
 #include <linux/export.h>
 
-#include "w1_family.h"
-#include "w1.h"
+#include "w1_internal.h"
 
 DEFINE_SPINLOCK(w1_flock);
 static LIST_HEAD(w1_families);
@@ -55,6 +54,7 @@ int w1_register_family(struct w1_family *newf)
 
 	return ret;
 }
+EXPORT_SYMBOL(w1_register_family);
 
 /**
  * w1_unregister_family() - unregister a device family driver
@@ -87,6 +87,7 @@ void w1_unregister_family(struct w1_family *fent)
 			flush_signals(current);
 	}
 }
+EXPORT_SYMBOL(w1_unregister_family);
 
 /*
  * Should be called under w1_flock held.
@@ -136,6 +137,3 @@ void __w1_family_get(struct w1_family *f)
 	atomic_inc(&f->refcnt);
 	smp_mb__after_atomic();
 }
-
-EXPORT_SYMBOL(w1_unregister_family);
-EXPORT_SYMBOL(w1_register_family);
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
deleted file mode 100644
index 869a3ff..0000000
--- a/drivers/w1/w1_family.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __W1_FAMILY_H
-#define __W1_FAMILY_H
-
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/atomic.h>
-
-#define W1_FAMILY_DEFAULT	0
-#define W1_FAMILY_BQ27000	0x01
-#define W1_FAMILY_SMEM_01	0x01
-#define W1_FAMILY_SMEM_81	0x81
-#define W1_FAMILY_DS2405	0x05
-#define W1_THERM_DS18S20 	0x10
-#define W1_FAMILY_DS28E04	0x1C
-#define W1_COUNTER_DS2423	0x1D
-#define W1_THERM_DS1822  	0x22
-#define W1_EEPROM_DS2433  	0x23
-#define W1_FAMILY_DS2438	0x26
-#define W1_THERM_DS18B20 	0x28
-#define W1_FAMILY_DS2408	0x29
-#define W1_EEPROM_DS2431	0x2D
-#define W1_FAMILY_DS2760	0x30
-#define W1_FAMILY_DS2780	0x32
-#define W1_FAMILY_DS2413	0x3A
-#define W1_FAMILY_DS2406	0x12
-#define W1_THERM_DS1825		0x3B
-#define W1_FAMILY_DS2781	0x3D
-#define W1_THERM_DS28EA00	0x42
-
-#define MAXNAMELEN		32
-
-struct w1_slave;
-
-/**
- * struct w1_family_ops - operations for a family type
- * @add_slave: add_slave
- * @remove_slave: remove_slave
- * @groups: sysfs group
- */
-struct w1_family_ops
-{
-	int  (* add_slave)(struct w1_slave *);
-	void (* remove_slave)(struct w1_slave *);
-	const struct attribute_group **groups;
-};
-
-/**
- * struct w1_family - reference counted family structure.
- * @family_entry:	family linked list
- * @fid:		8 bit family identifier
- * @fops:		operations for this family
- * @refcnt:		reference counter
- */
-struct w1_family
-{
-	struct list_head	family_entry;
-	u8			fid;
-
-	struct w1_family_ops	*fops;
-
-	atomic_t		refcnt;
-};
-
-extern spinlock_t w1_flock;
-
-void w1_family_put(struct w1_family *);
-void __w1_family_get(struct w1_family *);
-struct w1_family * w1_family_registered(u8);
-void w1_unregister_family(struct w1_family *);
-int w1_register_family(struct w1_family *);
-
-/**
- * module_w1_driver() - Helper macro for registering a 1-Wire families
- * @__w1_family: w1_family struct
- *
- * Helper macro for 1-Wire families which do not do anything special in module
- * init/exit. This eliminates a lot of boilerplate. Each module may only
- * use this macro once, and calling it replaces module_init() and module_exit()
- */
-#define module_w1_family(__w1_family) \
-	module_driver(__w1_family, w1_register_family, \
-			w1_unregister_family)
-
-#endif /* __W1_FAMILY_H */
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 1072a2e..1c77617 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -21,9 +21,8 @@
 #include <linux/export.h>
 #include <linux/moduleparam.h>
 
-#include "w1.h"
+#include "w1_internal.h"
 #include "w1_netlink.h"
-#include "w1_int.h"
 
 static int w1_search_count = -1; /* Default is continual scan */
 module_param_named(search_count, w1_search_count, int, 0);
@@ -179,6 +178,7 @@ int w1_add_master_device(struct w1_bus_master *master)
 
 	return retval;
 }
+EXPORT_SYMBOL(w1_add_master_device);
 
 void __w1_remove_master_device(struct w1_master *dev)
 {
@@ -251,6 +251,4 @@ void w1_remove_master_device(struct w1_bus_master *bm)
 
 	__w1_remove_master_device(found);
 }
-
-EXPORT_SYMBOL(w1_add_master_device);
 EXPORT_SYMBOL(w1_remove_master_device);
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h
deleted file mode 100644
index 3719891..0000000
--- a/drivers/w1/w1_int.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __W1_INT_H
-#define __W1_INT_H
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-
-#include "w1.h"
-
-int w1_add_master_device(struct w1_bus_master *);
-void w1_remove_master_device(struct w1_bus_master *);
-void __w1_remove_master_device(struct w1_master *);
-
-#endif /* __W1_INT_H */
diff --git a/drivers/w1/w1_internal.h b/drivers/w1/w1_internal.h
new file mode 100644
index 0000000..1623e2f
--- /dev/null
+++ b/drivers/w1/w1_internal.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __W1_H
+#define __W1_H
+
+#include <linux/w1.h>
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+
+#define W1_SLAVE_ACTIVE		0
+#define W1_SLAVE_DETACH		1
+
+/**
+ * struct w1_async_cmd - execute callback from the w1_process kthread
+ * @async_entry: link entry
+ * @cb: callback function, must list_del and destroy this list before
+ * returning
+ *
+ * When inserted into the w1_master async_list, w1_process will execute
+ * the callback.  Embed this into the structure with the command details.
+ */
+struct w1_async_cmd {
+	struct list_head	async_entry;
+	void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd);
+};
+
+int w1_create_master_attributes(struct w1_master *master);
+void w1_destroy_master_attributes(struct w1_master *master);
+void w1_search(struct w1_master *dev, u8 search_type,
+	       w1_slave_found_callback cb);
+void w1_search_devices(struct w1_master *dev, u8 search_type,
+		       w1_slave_found_callback cb);
+/* call w1_unref_slave to release the reference counts w1_search_slave added */
+struct w1_slave *w1_search_slave(struct w1_reg_num *id);
+/*
+ * decrements the reference on sl->master and sl, and cleans up if zero
+ * returns the reference count after it has been decremented
+ */
+int w1_unref_slave(struct w1_slave *sl);
+void w1_slave_found(struct w1_master *dev, u64 rn);
+void w1_search_process_cb(struct w1_master *dev, u8 search_type,
+			  w1_slave_found_callback cb);
+struct w1_slave *w1_slave_search_device(struct w1_master *dev,
+					struct w1_reg_num *rn);
+struct w1_master *w1_search_master_id(u32 id);
+
+/* Disconnect and reconnect devices in the given family.  Used for finding
+ * unclaimed devices after a family has been registered or releasing devices
+ * after a family has been unregistered.  Set attach to 1 when a new family
+ * has just been registered, to 0 when it has been unregistered.
+ */
+void w1_reconnect_slaves(struct w1_family *f, int attach);
+int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
+/* 0 success, otherwise EBUSY */
+int w1_slave_detach(struct w1_slave *sl);
+
+void __w1_remove_master_device(struct w1_master *dev);
+
+void w1_family_put(struct w1_family *f);
+void __w1_family_get(struct w1_family *f);
+struct w1_family *w1_family_registered(u8 fid);
+
+extern struct device_driver w1_master_driver;
+extern struct device w1_master_device;
+extern int w1_max_slave_count;
+extern int w1_max_slave_ttl;
+extern struct list_head w1_masters;
+extern struct mutex w1_mlock;
+extern spinlock_t w1_flock;
+
+int w1_process_callbacks(struct w1_master *dev);
+int w1_process(void *data);
+
+#endif /* __W1_H */
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 1134e6b..d191e1f 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -18,7 +18,7 @@
 #include <linux/moduleparam.h>
 #include <linux/module.h>
 
-#include "w1.h"
+#include "w1_internal.h"
 
 static int w1_delay_parm = 1;
 module_param_named(delay_coef, w1_delay_parm, int, 0);
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 027950f..f2f099c 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -17,7 +17,7 @@
 #include <linux/netlink.h>
 #include <linux/connector.h>
 
-#include "w1.h"
+#include "w1_internal.h"
 #include "w1_netlink.h"
 
 #if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE)))
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index b389e5ff..a36661c 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -18,7 +18,7 @@
 #include <asm/types.h>
 #include <linux/connector.h>
 
-#include "w1.h"
+#include "w1_internal.h"
 
 /**
  * enum w1_cn_msg_flags - bitfield flags for struct cn_msg.flags
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b52852f..2e567d8 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1343,8 +1343,12 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
 			    bool force)
 {
 	unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
+	int ret = rebind_irq_to_cpu(data->irq, tcpu);
 
-	return rebind_irq_to_cpu(data->irq, tcpu);
+	if (!ret)
+		irq_data_update_effective_affinity(data, cpumask_of(tcpu));
+
+	return ret;
 }
 
 static void enable_dynirq(struct irq_data *data)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c1ec8ee..9e35032 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -190,6 +190,7 @@ static void do_poweroff(void)
 {
 	switch (system_state) {
 	case SYSTEM_BOOTING:
+	case SYSTEM_SCHEDULING:
 		orderly_poweroff(true);
 		break;
 	case SYSTEM_RUNNING:
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index a493c7315..6cc1c15 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -408,6 +408,8 @@ static int __init xen_late_init_mcelog(void)
 	if (ret)
 		goto deregister;
 
+	pr_info("/dev/mcelog registered by Xen\n");
+
 	return 0;
 
 deregister:
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 7a92a5e..feca75b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
 				st->global_error = 1;
 		}
 	}
-	st->va += PAGE_SIZE * nr;
-	st->index += nr;
+	st->va += XEN_PAGE_SIZE * nr;
+	st->index += nr / XEN_PFN_PER_PAGE;
 
 	return 0;
 }
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 4ac2ca8..bf13d1e 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -233,12 +233,12 @@ static int tmem_cleancache_init_fs(size_t pagesize)
 	return xen_tmem_new_pool(uuid_private, 0, pagesize);
 }
 
-static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
+static int tmem_cleancache_init_shared_fs(uuid_t *uuid, size_t pagesize)
 {
 	struct tmem_pool_uuid shared_uuid;
 
-	shared_uuid.uuid_lo = *(u64 *)uuid;
-	shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
+	shared_uuid.uuid_lo = *(u64 *)&uuid->b[0];
+	shared_uuid.uuid_hi = *(u64 *)&uuid->b[8];
 	return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
 }
 
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 3062cce..782d4d0 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -350,7 +350,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
 {
 	struct sockaddr_rxrpc srx;
 	struct afs_server *server;
-	struct uuid_v1 *r;
+	struct afs_uuid *r;
 	unsigned loop;
 	__be32 *b;
 	int ret;
@@ -380,7 +380,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
 		}
 
 		_debug("unmarshall UUID");
-		call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
+		call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
 		if (!call->request)
 			return -ENOMEM;
 
@@ -453,7 +453,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
 static void SRXAFSCB_ProbeUuid(struct work_struct *work)
 {
 	struct afs_call *call = container_of(work, struct afs_call, work);
-	struct uuid_v1 *r = call->request;
+	struct afs_uuid *r = call->request;
 
 	struct {
 		__be32	match;
@@ -476,7 +476,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
  */
 static int afs_deliver_cb_probe_uuid(struct afs_call *call)
 {
-	struct uuid_v1 *r;
+	struct afs_uuid *r;
 	unsigned loop;
 	__be32 *b;
 	int ret;
@@ -502,15 +502,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
 		}
 
 		_debug("unmarshall UUID");
-		call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
+		call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
 		if (!call->request)
 			return -ENOMEM;
 
 		b = call->buffer;
 		r = call->request;
-		r->time_low			= b[0];
-		r->time_mid			= htons(ntohl(b[1]));
-		r->time_hi_and_version		= htons(ntohl(b[2]));
+		r->time_low			= ntohl(b[0]);
+		r->time_mid			= ntohl(b[1]);
+		r->time_hi_and_version		= ntohl(b[2]);
 		r->clock_seq_hi_and_reserved 	= ntohl(b[3]);
 		r->clock_seq_low		= ntohl(b[4]);
 
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 3936729..4e25566 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -410,6 +410,15 @@ struct afs_interface {
 	unsigned	mtu;		/* MTU of interface */
 };
 
+struct afs_uuid {
+	__be32		time_low;			/* low part of timestamp */
+	__be16		time_mid;			/* mid part of timestamp */
+	__be16		time_hi_and_version;		/* high part of timestamp and version  */
+	__u8		clock_seq_hi_and_reserved;	/* clock seq hi and variant */
+	__u8		clock_seq_low;			/* clock seq low */
+	__u8		node[6];			/* spatially unique node ID (MAC addr) */
+};
+
 /*****************************************************************************/
 /*
  * cache.c
@@ -544,7 +553,7 @@ extern int afs_drop_inode(struct inode *);
  * main.c
  */
 extern struct workqueue_struct *afs_wq;
-extern struct uuid_v1 afs_uuid;
+extern struct afs_uuid afs_uuid;
 
 /*
  * misc.c
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 51d7d17..9944770 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -31,7 +31,7 @@ static char *rootcell;
 module_param(rootcell, charp, 0);
 MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
 
-struct uuid_v1 afs_uuid;
+struct afs_uuid afs_uuid;
 struct workqueue_struct *afs_wq;
 
 /*
diff --git a/fs/aio.c b/fs/aio.c
index f52d925..dcad3a6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1541,7 +1541,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 	ssize_t ret;
 
 	/* enforce forwards compatibility on users */
-	if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
+	if (unlikely(iocb->aio_reserved2)) {
 		pr_debug("EINVAL: reserve field set\n");
 		return -EINVAL;
 	}
@@ -1568,6 +1568,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 	req->common.ki_pos = iocb->aio_offset;
 	req->common.ki_complete = aio_complete;
 	req->common.ki_flags = iocb_flags(req->common.ki_filp);
+	req->common.ki_hint = file_write_hint(file);
 
 	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
 		/*
@@ -1586,6 +1587,18 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 		req->common.ki_flags |= IOCB_EVENTFD;
 	}
 
+	ret = kiocb_set_rw_flags(&req->common, iocb->aio_rw_flags);
+	if (unlikely(ret)) {
+		pr_debug("EINVAL: aio_rw_flags\n");
+		goto out_put_req;
+	}
+
+	if ((req->common.ki_flags & IOCB_NOWAIT) &&
+			!(req->common.ki_flags & IOCB_DIRECT)) {
+		ret = -EOPNOTSUPP;
+		goto out_put_req;
+	}
+
 	ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
 	if (unlikely(ret)) {
 		pr_debug("EFAULT: aio_key\n");
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index beef981..974f534 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -83,7 +83,7 @@ struct autofs_info {
 struct autofs_wait_queue {
 	wait_queue_head_t queue;
 	struct autofs_wait_queue *next;
-	autofs_wqt_t wait_queue_token;
+	autofs_wqt_t wait_queue_entry_token;
 	/* We use the following to see what we are waiting for */
 	struct qstr name;
 	u32 dev;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 734cbf8..dd9f1be 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
 	int status;
 
 	token = (autofs_wqt_t) param->fail.token;
-	status = param->fail.status ? param->fail.status : -ENOENT;
+	status = param->fail.status < 0 ? param->fail.status : -ENOENT;
 	return autofs4_wait_release(sbi, token, status);
 }
 
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 24a58bf..7071895 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -104,7 +104,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 	size_t pktsz;
 
 	pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
-		 (unsigned long) wq->wait_queue_token,
+		 (unsigned long) wq->wait_queue_entry_token,
 		 wq->name.len, wq->name.name, type);
 
 	memset(&pkt, 0, sizeof(pkt)); /* For security reasons */
@@ -120,7 +120,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 
 		pktsz = sizeof(*mp);
 
-		mp->wait_queue_token = wq->wait_queue_token;
+		mp->wait_queue_entry_token = wq->wait_queue_entry_token;
 		mp->len = wq->name.len;
 		memcpy(mp->name, wq->name.name, wq->name.len);
 		mp->name[wq->name.len] = '\0';
@@ -133,7 +133,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 
 		pktsz = sizeof(*ep);
 
-		ep->wait_queue_token = wq->wait_queue_token;
+		ep->wait_queue_entry_token = wq->wait_queue_entry_token;
 		ep->len = wq->name.len;
 		memcpy(ep->name, wq->name.name, wq->name.len);
 		ep->name[wq->name.len] = '\0';
@@ -153,7 +153,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 
 		pktsz = sizeof(*packet);
 
-		packet->wait_queue_token = wq->wait_queue_token;
+		packet->wait_queue_entry_token = wq->wait_queue_entry_token;
 		packet->len = wq->name.len;
 		memcpy(packet->name, wq->name.name, wq->name.len);
 		packet->name[wq->name.len] = '\0';
@@ -428,7 +428,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
 			return -ENOMEM;
 		}
 
-		wq->wait_queue_token = autofs4_next_wait_queue;
+		wq->wait_queue_entry_token = autofs4_next_wait_queue;
 		if (++autofs4_next_wait_queue == 0)
 			autofs4_next_wait_queue = 1;
 		wq->next = sbi->queues;
@@ -461,7 +461,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
 		}
 
 		pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
-			 (unsigned long) wq->wait_queue_token, wq->name.len,
+			 (unsigned long) wq->wait_queue_entry_token, wq->name.len,
 			 wq->name.name, notify);
 
 		/*
@@ -471,7 +471,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
 	} else {
 		wq->wait_ctr++;
 		pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n",
-			 (unsigned long) wq->wait_queue_token, wq->name.len,
+			 (unsigned long) wq->wait_queue_entry_token, wq->name.len,
 			 wq->name.name, notify);
 		mutex_unlock(&sbi->wq_mutex);
 		kfree(qstr.name);
@@ -550,13 +550,13 @@ int autofs4_wait(struct autofs_sb_info *sbi,
 }
 
 
-int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status)
+int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_entry_token, int status)
 {
 	struct autofs_wait_queue *wq, **wql;
 
 	mutex_lock(&sbi->wq_mutex);
 	for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
-		if (wq->wait_queue_token == wait_queue_token)
+		if (wq->wait_queue_entry_token == wait_queue_entry_token)
 			break;
 	}
 
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 519599d..a7df151 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -225,6 +225,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
 	bio_init(&bio, vecs, nr_pages);
 	bio.bi_bdev = bdev;
 	bio.bi_iter.bi_sector = pos >> 9;
+	bio.bi_write_hint = iocb->ki_hint;
 	bio.bi_private = current;
 	bio.bi_end_io = blkdev_bio_end_io_simple;
 
@@ -262,8 +263,11 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
 	if (vecs != inline_vecs)
 		kfree(vecs);
 
-	if (unlikely(bio.bi_error))
-		return bio.bi_error;
+	if (unlikely(bio.bi_status))
+		ret = blk_status_to_errno(bio.bi_status);
+
+	bio_uninit(&bio);
+
 	return ret;
 }
 
@@ -288,16 +292,18 @@ static void blkdev_bio_end_io(struct bio *bio)
 	bool should_dirty = dio->should_dirty;
 
 	if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
-		if (bio->bi_error && !dio->bio.bi_error)
-			dio->bio.bi_error = bio->bi_error;
+		if (bio->bi_status && !dio->bio.bi_status)
+			dio->bio.bi_status = bio->bi_status;
 	} else {
 		if (!dio->is_sync) {
 			struct kiocb *iocb = dio->iocb;
-			ssize_t ret = dio->bio.bi_error;
+			ssize_t ret;
 
-			if (likely(!ret)) {
+			if (likely(!dio->bio.bi_status)) {
 				ret = dio->size;
 				iocb->ki_pos += ret;
+			} else {
+				ret = blk_status_to_errno(dio->bio.bi_status);
 			}
 
 			dio->iocb->ki_complete(iocb, ret, 0);
@@ -334,7 +340,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
 	bool is_read = (iov_iter_rw(iter) == READ), is_sync;
 	loff_t pos = iocb->ki_pos;
 	blk_qc_t qc = BLK_QC_T_NONE;
-	int ret;
+	int ret = 0;
 
 	if ((pos | iov_iter_alignment(iter)) &
 	    (bdev_logical_block_size(bdev) - 1))
@@ -358,12 +364,13 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
 	for (;;) {
 		bio->bi_bdev = bdev;
 		bio->bi_iter.bi_sector = pos >> 9;
+		bio->bi_write_hint = iocb->ki_hint;
 		bio->bi_private = dio;
 		bio->bi_end_io = blkdev_bio_end_io;
 
 		ret = bio_iov_iter_get_pages(bio, iter);
 		if (unlikely(ret)) {
-			bio->bi_error = ret;
+			bio->bi_status = BLK_STS_IOERR;
 			bio_endio(bio);
 			break;
 		}
@@ -412,7 +419,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
 	}
 	__set_current_state(TASK_RUNNING);
 
-	ret = dio->bio.bi_error;
+	if (!ret)
+		ret = blk_status_to_errno(dio->bio.bi_status);
 	if (likely(!ret))
 		ret = dio->size;
 
@@ -436,7 +444,7 @@ blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 
 static __init int blkdev_init(void)
 {
-	blkdev_dio_pool = bioset_create(4, offsetof(struct blkdev_dio, bio));
+	blkdev_dio_pool = bioset_create(4, offsetof(struct blkdev_dio, bio), BIOSET_NEED_BVECS);
 	if (!blkdev_dio_pool)
 		return -ENOMEM;
 	return 0;
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index b8622e4..d87ac27 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -310,7 +310,8 @@ struct btrfs_dio_private {
 	 * The original bio may be split to several sub-bios, this is
 	 * done during endio of sub-bios
 	 */
-	int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
+	blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
+			blk_status_t);
 };
 
 /*
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index ab14c2e..4ded1c3 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -2129,7 +2129,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
 	/* mutex is not held! This is not save if IO is not yet completed
 	 * on umount */
 	iodone_w_error = 0;
-	if (bp->bi_error)
+	if (bp->bi_status)
 		iodone_w_error = 1;
 
 	BUG_ON(NULL == block);
@@ -2143,7 +2143,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
 		if ((dev_state->state->print_mask &
 		     BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
 			pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
-			       bp->bi_error,
+			       bp->bi_status,
 			       btrfsic_get_block_type(dev_state->state, block),
 			       block->logical_bytenr, dev_state->name,
 			       block->dev_bytenr, block->mirror_num);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 10e6b28..a2fad39 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -155,7 +155,7 @@ static void end_compressed_bio_read(struct bio *bio)
 	unsigned long index;
 	int ret;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		cb->errors = 1;
 
 	/* if there are more bios still pending for this compressed
@@ -268,7 +268,7 @@ static void end_compressed_bio_write(struct bio *bio)
 	struct page *page;
 	unsigned long index;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		cb->errors = 1;
 
 	/* if there are more bios still pending for this compressed
@@ -287,7 +287,7 @@ static void end_compressed_bio_write(struct bio *bio)
 					 cb->start,
 					 cb->start + cb->len - 1,
 					 NULL,
-					 bio->bi_error ? 0 : 1);
+					 bio->bi_status ? 0 : 1);
 	cb->compressed_pages[0]->mapping = NULL;
 
 	end_compressed_writeback(inode, cb);
@@ -320,7 +320,7 @@ static void end_compressed_bio_write(struct bio *bio)
  * This also checksums the file bytes and gets things ready for
  * the end io hooks.
  */
-int btrfs_submit_compressed_write(struct inode *inode, u64 start,
+blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
 				 unsigned long len, u64 disk_start,
 				 unsigned long compressed_len,
 				 struct page **compressed_pages,
@@ -335,13 +335,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 	struct page *page;
 	u64 first_byte = disk_start;
 	struct block_device *bdev;
-	int ret;
+	blk_status_t ret;
 	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
 	WARN_ON(start & ((u64)PAGE_SIZE - 1));
 	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 	if (!cb)
-		return -ENOMEM;
+		return BLK_STS_RESOURCE;
 	refcount_set(&cb->pending_bios, 0);
 	cb->errors = 0;
 	cb->inode = inode;
@@ -358,7 +358,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
 	if (!bio) {
 		kfree(cb);
-		return -ENOMEM;
+		return BLK_STS_RESOURCE;
 	}
 	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 	bio->bi_private = cb;
@@ -368,17 +368,17 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 	/* create and submit bios for the compressed pages */
 	bytes_left = compressed_len;
 	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
+		int submit = 0;
+
 		page = compressed_pages[pg_index];
 		page->mapping = inode->i_mapping;
 		if (bio->bi_iter.bi_size)
-			ret = io_tree->ops->merge_bio_hook(page, 0,
+			submit = io_tree->ops->merge_bio_hook(page, 0,
 							   PAGE_SIZE,
 							   bio, 0);
-		else
-			ret = 0;
 
 		page->mapping = NULL;
-		if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
+		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
 		    PAGE_SIZE) {
 			bio_get(bio);
 
@@ -400,7 +400,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
 			ret = btrfs_map_bio(fs_info, bio, 0, 1);
 			if (ret) {
-				bio->bi_error = ret;
+				bio->bi_status = ret;
 				bio_endio(bio);
 			}
 
@@ -434,7 +434,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
 	ret = btrfs_map_bio(fs_info, bio, 0, 1);
 	if (ret) {
-		bio->bi_error = ret;
+		bio->bi_status = ret;
 		bio_endio(bio);
 	}
 
@@ -569,7 +569,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
  * After the compressed pages are read, we copy the bytes into the
  * bio we were passed and then call the bio end_io calls
  */
-int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 				 int mirror_num, unsigned long bio_flags)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -586,7 +586,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 	u64 em_len;
 	u64 em_start;
 	struct extent_map *em;
-	int ret = -ENOMEM;
+	blk_status_t ret = BLK_STS_RESOURCE;
 	int faili = 0;
 	u32 *sums;
 
@@ -600,7 +600,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 				   PAGE_SIZE);
 	read_unlock(&em_tree->lock);
 	if (!em)
-		return -EIO;
+		return BLK_STS_IOERR;
 
 	compressed_len = em->block_len;
 	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
@@ -638,7 +638,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 							      __GFP_HIGHMEM);
 		if (!cb->compressed_pages[pg_index]) {
 			faili = pg_index - 1;
-			ret = -ENOMEM;
+			ret = BLK_STS_RESOURCE;
 			goto fail2;
 		}
 	}
@@ -659,19 +659,19 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 	refcount_set(&cb->pending_bios, 1);
 
 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+		int submit = 0;
+
 		page = cb->compressed_pages[pg_index];
 		page->mapping = inode->i_mapping;
 		page->index = em_start >> PAGE_SHIFT;
 
 		if (comp_bio->bi_iter.bi_size)
-			ret = tree->ops->merge_bio_hook(page, 0,
+			submit = tree->ops->merge_bio_hook(page, 0,
 							PAGE_SIZE,
 							comp_bio, 0);
-		else
-			ret = 0;
 
 		page->mapping = NULL;
-		if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
+		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
 		    PAGE_SIZE) {
 			bio_get(comp_bio);
 
@@ -697,7 +697,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
 			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 			if (ret) {
-				comp_bio->bi_error = ret;
+				comp_bio->bi_status = ret;
 				bio_endio(comp_bio);
 			}
 
@@ -726,7 +726,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
 	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 	if (ret) {
-		comp_bio->bi_error = ret;
+		comp_bio->bi_status = ret;
 		bio_endio(comp_bio);
 	}
 
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 39ec43a..680d426 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -48,12 +48,12 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
 			      unsigned long total_out, u64 disk_start,
 			      struct bio *bio);
 
-int btrfs_submit_compressed_write(struct inode *inode, u64 start,
+blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
 				  unsigned long len, u64 disk_start,
 				  unsigned long compressed_len,
 				  struct page **compressed_pages,
 				  unsigned long nr_pages);
-int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 				 int mirror_num, unsigned long bio_flags);
 
 enum btrfs_compression_type {
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 643c70d..a0d0c79 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2563,7 +2563,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
 						 unsigned num_items)
 {
-	return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
+	return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
 }
 
 /*
@@ -2573,7 +2573,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
 						 unsigned num_items)
 {
-	return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
+	return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
 }
 
 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
@@ -3078,8 +3078,8 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
 struct btrfs_dio_private;
 int btrfs_del_csums(struct btrfs_trans_handle *trans,
 		    struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
-int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
-int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
+blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
 			      u64 logical_offset);
 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *root,
@@ -3094,7 +3094,7 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
 			   struct btrfs_ordered_sum *sums);
-int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
 		       u64 file_start, int contig);
 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 			     struct list_head *list, int search_commit);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 60a7506..c24d615 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -468,7 +468,7 @@ int verify_dir_item(struct btrfs_fs_info *fs_info,
 
 	if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
 		btrfs_crit(fs_info, "invalid dir item name len: %u",
-		       (unsigned)btrfs_dir_data_len(leaf, dir_item));
+		       (unsigned)btrfs_dir_name_len(leaf, dir_item));
 		return 1;
 	}
 
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8685d67..6036d15 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -87,7 +87,7 @@ struct btrfs_end_io_wq {
 	bio_end_io_t *end_io;
 	void *private;
 	struct btrfs_fs_info *info;
-	int error;
+	blk_status_t status;
 	enum btrfs_wq_endio_type metadata;
 	struct list_head list;
 	struct btrfs_work work;
@@ -131,7 +131,7 @@ struct async_submit_bio {
 	 */
 	u64 bio_offset;
 	struct btrfs_work work;
-	int error;
+	blk_status_t status;
 };
 
 /*
@@ -799,7 +799,7 @@ static void end_workqueue_bio(struct bio *bio)
 	btrfs_work_func_t func;
 
 	fs_info = end_io_wq->info;
-	end_io_wq->error = bio->bi_error;
+	end_io_wq->status = bio->bi_status;
 
 	if (bio_op(bio) == REQ_OP_WRITE) {
 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
@@ -836,19 +836,19 @@ static void end_workqueue_bio(struct bio *bio)
 	btrfs_queue_work(wq, &end_io_wq->work);
 }
 
-int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
+blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 			enum btrfs_wq_endio_type metadata)
 {
 	struct btrfs_end_io_wq *end_io_wq;
 
 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
 	if (!end_io_wq)
-		return -ENOMEM;
+		return BLK_STS_RESOURCE;
 
 	end_io_wq->private = bio->bi_private;
 	end_io_wq->end_io = bio->bi_end_io;
 	end_io_wq->info = info;
-	end_io_wq->error = 0;
+	end_io_wq->status = 0;
 	end_io_wq->bio = bio;
 	end_io_wq->metadata = metadata;
 
@@ -868,14 +868,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
 static void run_one_async_start(struct btrfs_work *work)
 {
 	struct async_submit_bio *async;
-	int ret;
+	blk_status_t ret;
 
 	async = container_of(work, struct  async_submit_bio, work);
 	ret = async->submit_bio_start(async->inode, async->bio,
 				      async->mirror_num, async->bio_flags,
 				      async->bio_offset);
 	if (ret)
-		async->error = ret;
+		async->status = ret;
 }
 
 static void run_one_async_done(struct btrfs_work *work)
@@ -898,8 +898,8 @@ static void run_one_async_done(struct btrfs_work *work)
 		wake_up(&fs_info->async_submit_wait);
 
 	/* If an error occurred we just want to clean up the bio and move on */
-	if (async->error) {
-		async->bio->bi_error = async->error;
+	if (async->status) {
+		async->bio->bi_status = async->status;
 		bio_endio(async->bio);
 		return;
 	}
@@ -916,18 +916,17 @@ static void run_one_async_free(struct btrfs_work *work)
 	kfree(async);
 }
 
-int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
-			struct bio *bio, int mirror_num,
-			unsigned long bio_flags,
-			u64 bio_offset,
-			extent_submit_bio_hook_t *submit_bio_start,
-			extent_submit_bio_hook_t *submit_bio_done)
+blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
+		struct inode *inode, struct bio *bio, int mirror_num,
+		unsigned long bio_flags, u64 bio_offset,
+		extent_submit_bio_hook_t *submit_bio_start,
+		extent_submit_bio_hook_t *submit_bio_done)
 {
 	struct async_submit_bio *async;
 
 	async = kmalloc(sizeof(*async), GFP_NOFS);
 	if (!async)
-		return -ENOMEM;
+		return BLK_STS_RESOURCE;
 
 	async->inode = inode;
 	async->bio = bio;
@@ -941,7 +940,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 	async->bio_flags = bio_flags;
 	async->bio_offset = bio_offset;
 
-	async->error = 0;
+	async->status = 0;
 
 	atomic_inc(&fs_info->nr_async_submits);
 
@@ -959,7 +958,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 	return 0;
 }
 
-static int btree_csum_one_bio(struct bio *bio)
+static blk_status_t btree_csum_one_bio(struct bio *bio)
 {
 	struct bio_vec *bvec;
 	struct btrfs_root *root;
@@ -972,12 +971,12 @@ static int btree_csum_one_bio(struct bio *bio)
 			break;
 	}
 
-	return ret;
+	return errno_to_blk_status(ret);
 }
 
-static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
-				    int mirror_num, unsigned long bio_flags,
-				    u64 bio_offset)
+static blk_status_t __btree_submit_bio_start(struct inode *inode,
+		struct bio *bio, int mirror_num, unsigned long bio_flags,
+		u64 bio_offset)
 {
 	/*
 	 * when we're called for a write, we're already in the async
@@ -986,11 +985,11 @@ static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
 	return btree_csum_one_bio(bio);
 }
 
-static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
-				 int mirror_num, unsigned long bio_flags,
-				 u64 bio_offset)
+static blk_status_t __btree_submit_bio_done(struct inode *inode,
+		struct bio *bio, int mirror_num, unsigned long bio_flags,
+		u64 bio_offset)
 {
-	int ret;
+	blk_status_t ret;
 
 	/*
 	 * when we're called for a write, we're already in the async
@@ -998,7 +997,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
 	 */
 	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
 	if (ret) {
-		bio->bi_error = ret;
+		bio->bi_status = ret;
 		bio_endio(bio);
 	}
 	return ret;
@@ -1015,13 +1014,13 @@ static int check_async_write(unsigned long bio_flags)
 	return 1;
 }
 
-static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
+static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
 				 int mirror_num, unsigned long bio_flags,
 				 u64 bio_offset)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int async = check_async_write(bio_flags);
-	int ret;
+	blk_status_t ret;
 
 	if (bio_op(bio) != REQ_OP_WRITE) {
 		/*
@@ -1054,7 +1053,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
 	return 0;
 
 out_w_error:
-	bio->bi_error = ret;
+	bio->bi_status = ret;
 	bio_endio(bio);
 	return ret;
 }
@@ -1820,7 +1819,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
 	bio = end_io_wq->bio;
 
-	bio->bi_error = end_io_wq->error;
+	bio->bi_status = end_io_wq->status;
 	bio->bi_private = end_io_wq->private;
 	bio->bi_end_io = end_io_wq->end_io;
 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
@@ -3467,10 +3466,12 @@ static int write_dev_supers(struct btrfs_device *device,
 		 * we fua the first super.  The others we allow
 		 * to go down lazy.
 		 */
-		if (i == 0)
-			ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
-		else
+		if (i == 0) {
+			ret = btrfsic_submit_bh(REQ_OP_WRITE,
+						REQ_SYNC | REQ_FUA, bh);
+		} else {
 			ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
+		}
 		if (ret)
 			errors++;
 	}
@@ -3495,11 +3496,11 @@ static void btrfs_end_empty_barrier(struct bio *bio)
  * any device where the flush fails with eopnotsupp are flagged as not-barrier
  * capable
  */
-static int write_dev_flush(struct btrfs_device *device, int wait)
+static blk_status_t write_dev_flush(struct btrfs_device *device, int wait)
 {
 	struct request_queue *q = bdev_get_queue(device->bdev);
 	struct bio *bio;
-	int ret = 0;
+	blk_status_t ret = 0;
 
 	if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
 		return 0;
@@ -3511,8 +3512,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 
 		wait_for_completion(&device->flush_wait);
 
-		if (bio->bi_error) {
-			ret = bio->bi_error;
+		if (bio->bi_status) {
+			ret = bio->bi_status;
 			btrfs_dev_stat_inc_and_print(device,
 				BTRFS_DEV_STAT_FLUSH_ERRS);
 		}
@@ -3531,11 +3532,11 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 	device->flush_bio = NULL;
 	bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
 	if (!bio)
-		return -ENOMEM;
+		return BLK_STS_RESOURCE;
 
 	bio->bi_end_io = btrfs_end_empty_barrier;
 	bio->bi_bdev = device->bdev;
-	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
 	init_completion(&device->flush_wait);
 	bio->bi_private = &device->flush_wait;
 	device->flush_bio = bio;
@@ -3556,7 +3557,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
 	struct btrfs_device *dev;
 	int errors_send = 0;
 	int errors_wait = 0;
-	int ret;
+	blk_status_t ret;
 
 	/* send down all the barriers */
 	head = &info->fs_devices->devices;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 21f1ceb..c581927 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -118,13 +118,13 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
 u32 btrfs_csum_data(const char *data, u32 seed, size_t len);
 void btrfs_csum_final(u32 crc, u8 *result);
-int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
+blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 			enum btrfs_wq_endio_type metadata);
-int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
-			struct bio *bio, int mirror_num,
-			unsigned long bio_flags, u64 bio_offset,
-			extent_submit_bio_hook_t *submit_bio_start,
-			extent_submit_bio_hook_t *submit_bio_done);
+blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
+		struct inode *inode, struct bio *bio, int mirror_num,
+		unsigned long bio_flags, u64 bio_offset,
+		extent_submit_bio_hook_t *submit_bio_start,
+		extent_submit_bio_hook_t *submit_bio_done);
 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
 int btrfs_write_tree_block(struct extent_buffer *buf);
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e390451..33d979e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3993,6 +3993,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
 				    info->space_info_kobj, "%s",
 				    alloc_name(found->flags));
 	if (ret) {
+		percpu_counter_destroy(&found->total_bytes_pinned);
 		kfree(found);
 		return ret;
 	}
@@ -4844,7 +4845,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
 	spin_unlock(&delayed_rsv->lock);
 
 commit:
-	trans = btrfs_join_transaction(fs_info->fs_root);
+	trans = btrfs_join_transaction(fs_info->extent_root);
 	if (IS_ERR(trans))
 		return -ENOSPC;
 
@@ -4862,7 +4863,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
 		       struct btrfs_space_info *space_info, u64 num_bytes,
 		       u64 orig_bytes, int state)
 {
-	struct btrfs_root *root = fs_info->fs_root;
+	struct btrfs_root *root = fs_info->extent_root;
 	struct btrfs_trans_handle *trans;
 	int nr;
 	int ret = 0;
@@ -5062,7 +5063,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
 	int flush_state = FLUSH_DELAYED_ITEMS_NR;
 
 	spin_lock(&space_info->lock);
-	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
+	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root,
 						      space_info);
 	if (!to_reclaim) {
 		spin_unlock(&space_info->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d8da3ed..d1cd601 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -174,7 +174,8 @@ int __init extent_io_init(void)
 		goto free_state_cache;
 
 	btrfs_bioset = bioset_create(BIO_POOL_SIZE,
-				     offsetof(struct btrfs_io_bio, bio));
+				     offsetof(struct btrfs_io_bio, bio),
+				     BIOSET_NEED_BVECS);
 	if (!btrfs_bioset)
 		goto free_buffer_cache;
 
@@ -2399,6 +2400,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 	struct bio *bio;
 	int read_mode = 0;
+	blk_status_t status;
 	int ret;
 
 	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -2431,11 +2433,12 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
 		"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
 		read_mode, failrec->this_mirror, failrec->in_validation);
 
-	ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
+	status = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
 					 failrec->bio_flags, 0);
-	if (ret) {
+	if (status) {
 		free_io_failure(BTRFS_I(inode), failrec);
 		bio_put(bio);
+		ret = blk_status_to_errno(status);
 	}
 
 	return ret;
@@ -2458,7 +2461,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
 	if (!uptodate) {
 		ClearPageUptodate(page);
 		SetPageError(page);
-		ret = ret < 0 ? ret : -EIO;
+		ret = err < 0 ? err : -EIO;
 		mapping_set_error(page->mapping, ret);
 	}
 }
@@ -2474,6 +2477,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
  */
 static void end_bio_extent_writepage(struct bio *bio)
 {
+	int error = blk_status_to_errno(bio->bi_status);
 	struct bio_vec *bvec;
 	u64 start;
 	u64 end;
@@ -2503,7 +2507,7 @@ static void end_bio_extent_writepage(struct bio *bio)
 		start = page_offset(page);
 		end = start + bvec->bv_offset + bvec->bv_len - 1;
 
-		end_extent_writepage(page, bio->bi_error, start, end);
+		end_extent_writepage(page, error, start, end);
 		end_page_writeback(page);
 	}
 
@@ -2536,7 +2540,7 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
 static void end_bio_extent_readpage(struct bio *bio)
 {
 	struct bio_vec *bvec;
-	int uptodate = !bio->bi_error;
+	int uptodate = !bio->bi_status;
 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
 	struct extent_io_tree *tree;
 	u64 offset = 0;
@@ -2556,7 +2560,7 @@ static void end_bio_extent_readpage(struct bio *bio)
 
 		btrfs_debug(fs_info,
 			"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
-			(u64)bio->bi_iter.bi_sector, bio->bi_error,
+			(u64)bio->bi_iter.bi_sector, bio->bi_status,
 			io_bio->mirror_num);
 		tree = &BTRFS_I(inode)->io_tree;
 
@@ -2615,7 +2619,7 @@ static void end_bio_extent_readpage(struct bio *bio)
 				ret = bio_readpage_error(bio, offset, page,
 							 start, end, mirror);
 				if (ret == 0) {
-					uptodate = !bio->bi_error;
+					uptodate = !bio->bi_status;
 					offset += len;
 					continue;
 				}
@@ -2673,7 +2677,7 @@ static void end_bio_extent_readpage(struct bio *bio)
 		endio_readpage_release_extent(tree, extent_start, extent_len,
 					      uptodate);
 	if (io_bio->end_io)
-		io_bio->end_io(io_bio, bio->bi_error);
+		io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status));
 	bio_put(bio);
 }
 
@@ -2743,7 +2747,7 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
 				       unsigned long bio_flags)
 {
-	int ret = 0;
+	blk_status_t ret = 0;
 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
 	struct page *page = bvec->bv_page;
 	struct extent_io_tree *tree = bio->bi_private;
@@ -2761,7 +2765,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
 		btrfsic_submit_bio(bio);
 
 	bio_put(bio);
-	return ret;
+	return blk_status_to_errno(ret);
 }
 
 static int merge_bio(struct extent_io_tree *tree, struct page *page,
@@ -2826,6 +2830,7 @@ static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree,
 	bio_add_page(bio, page, page_size, offset);
 	bio->bi_end_io = end_io_func;
 	bio->bi_private = tree;
+	bio->bi_write_hint = page->mapping->host->i_write_hint;
 	bio_set_op_attrs(bio, op, op_flags);
 	if (wbc) {
 		wbc_init_bio(wbc, bio);
@@ -3707,7 +3712,7 @@ static void end_bio_extent_buffer_writepage(struct bio *bio)
 		BUG_ON(!eb);
 		done = atomic_dec_and_test(&eb->io_pages);
 
-		if (bio->bi_error ||
+		if (bio->bi_status ||
 		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
 			ClearPageUptodate(page);
 			set_btree_ioerr(page);
@@ -4377,6 +4382,123 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
 	return NULL;
 }
 
+/*
+ * To cache previous fiemap extent
+ *
+ * Will be used for merging fiemap extent
+ */
+struct fiemap_cache {
+	u64 offset;
+	u64 phys;
+	u64 len;
+	u32 flags;
+	bool cached;
+};
+
+/*
+ * Helper to submit fiemap extent.
+ *
+ * Will try to merge current fiemap extent specified by @offset, @phys,
+ * @len and @flags with cached one.
+ * And only when we fails to merge, cached one will be submitted as
+ * fiemap extent.
+ *
+ * Return value is the same as fiemap_fill_next_extent().
+ */
+static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
+				struct fiemap_cache *cache,
+				u64 offset, u64 phys, u64 len, u32 flags)
+{
+	int ret = 0;
+
+	if (!cache->cached)
+		goto assign;
+
+	/*
+	 * Sanity check, extent_fiemap() should have ensured that new
+	 * fiemap extent won't overlap with cahced one.
+	 * Not recoverable.
+	 *
+	 * NOTE: Physical address can overlap, due to compression
+	 */
+	if (cache->offset + cache->len > offset) {
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	/*
+	 * Only merges fiemap extents if
+	 * 1) Their logical addresses are continuous
+	 *
+	 * 2) Their physical addresses are continuous
+	 *    So truly compressed (physical size smaller than logical size)
+	 *    extents won't get merged with each other
+	 *
+	 * 3) Share same flags except FIEMAP_EXTENT_LAST
+	 *    So regular extent won't get merged with prealloc extent
+	 */
+	if (cache->offset + cache->len  == offset &&
+	    cache->phys + cache->len == phys  &&
+	    (cache->flags & ~FIEMAP_EXTENT_LAST) ==
+			(flags & ~FIEMAP_EXTENT_LAST)) {
+		cache->len += len;
+		cache->flags |= flags;
+		goto try_submit_last;
+	}
+
+	/* Not mergeable, need to submit cached one */
+	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+				      cache->len, cache->flags);
+	cache->cached = false;
+	if (ret)
+		return ret;
+assign:
+	cache->cached = true;
+	cache->offset = offset;
+	cache->phys = phys;
+	cache->len = len;
+	cache->flags = flags;
+try_submit_last:
+	if (cache->flags & FIEMAP_EXTENT_LAST) {
+		ret = fiemap_fill_next_extent(fieinfo, cache->offset,
+				cache->phys, cache->len, cache->flags);
+		cache->cached = false;
+	}
+	return ret;
+}
+
+/*
+ * Sanity check for fiemap cache
+ *
+ * All fiemap cache should be submitted by emit_fiemap_extent()
+ * Iteration should be terminated either by last fiemap extent or
+ * fieinfo->fi_extents_max.
+ * So no cached fiemap should exist.
+ */
+static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
+			       struct fiemap_extent_info *fieinfo,
+			       struct fiemap_cache *cache)
+{
+	int ret;
+
+	if (!cache->cached)
+		return 0;
+
+	/* Small and recoverbale problem, only to info developer */
+#ifdef CONFIG_BTRFS_DEBUG
+	WARN_ON(1);
+#endif
+	btrfs_warn(fs_info,
+		   "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
+		   cache->offset, cache->phys, cache->len, cache->flags);
+	ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+				      cache->len, cache->flags);
+	cache->cached = false;
+	if (ret > 0)
+		ret = 0;
+	return ret;
+}
+
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		__u64 start, __u64 len, get_extent_t *get_extent)
 {
@@ -4394,6 +4516,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 	struct extent_state *cached_state = NULL;
 	struct btrfs_path *path;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct fiemap_cache cache = { 0 };
 	int end = 0;
 	u64 em_start = 0;
 	u64 em_len = 0;
@@ -4573,8 +4696,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 			flags |= FIEMAP_EXTENT_LAST;
 			end = 1;
 		}
-		ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
-					      em_len, flags);
+		ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
+					   em_len, flags);
 		if (ret) {
 			if (ret == 1)
 				ret = 0;
@@ -4582,6 +4705,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		}
 	}
 out_free:
+	if (!ret)
+		ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
 	free_extent_map(em);
 out:
 	btrfs_free_path(path);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 1eafa2f..487ca02 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -92,9 +92,9 @@ struct btrfs_inode;
 struct btrfs_io_bio;
 struct io_failure_record;
 
-typedef	int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio,
-				       int mirror_num, unsigned long bio_flags,
-				       u64 bio_offset);
+typedef	blk_status_t (extent_submit_bio_hook_t)(struct inode *inode,
+		struct bio *bio, int mirror_num, unsigned long bio_flags,
+		u64 bio_offset);
 struct extent_io_ops {
 	/*
 	 * The following callbacks must be allways defined, the function
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 64fcb31..5b1c709 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -160,7 +160,7 @@ static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
 	kfree(bio->csum_allocated);
 }
 
-static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
+static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
 				   u64 logical_offset, u32 *dst, int dio)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
 
 	path = btrfs_alloc_path();
 	if (!path)
-		return -ENOMEM;
+		return BLK_STS_RESOURCE;
 
 	nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
 	if (!dst) {
@@ -191,7 +191,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
 					csum_size, GFP_NOFS);
 			if (!btrfs_bio->csum_allocated) {
 				btrfs_free_path(path);
-				return -ENOMEM;
+				return BLK_STS_RESOURCE;
 			}
 			btrfs_bio->csum = btrfs_bio->csum_allocated;
 			btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
@@ -303,12 +303,12 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
 	return 0;
 }
 
-int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
+blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
 {
 	return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
 }
 
-int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
+blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
 {
 	return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
 }
@@ -433,7 +433,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 	return ret;
 }
 
-int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
 		       u64 file_start, int contig)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -452,7 +452,7 @@ int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
 	sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
 		       GFP_NOFS);
 	if (!sums)
-		return -ENOMEM;
+		return BLK_STS_RESOURCE;
 
 	sums->len = bio->bi_iter.bi_size;
 	INIT_LIST_HEAD(&sums->list);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index da1096e..59e2dcc 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1875,12 +1875,29 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
 	ssize_t num_written = 0;
 	bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
 	ssize_t err;
-	loff_t pos;
-	size_t count;
+	loff_t pos = iocb->ki_pos;
+	size_t count = iov_iter_count(from);
 	loff_t oldsize;
 	int clean_page = 0;
 
-	inode_lock(inode);
+	if ((iocb->ki_flags & IOCB_NOWAIT) &&
+			(iocb->ki_flags & IOCB_DIRECT)) {
+		/* Don't sleep on inode rwsem */
+		if (!inode_trylock(inode))
+			return -EAGAIN;
+		/*
+		 * We will allocate space in case nodatacow is not set,
+		 * so bail
+		 */
+		if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+					      BTRFS_INODE_PREALLOC)) ||
+		    check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
+			inode_unlock(inode);
+			return -EAGAIN;
+		}
+	} else
+		inode_lock(inode);
+
 	err = generic_write_checks(iocb, from);
 	if (err <= 0) {
 		inode_unlock(inode);
@@ -1914,8 +1931,6 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
 	 */
 	update_time_for_write(inode);
 
-	pos = iocb->ki_pos;
-	count = iov_iter_count(from);
 	start_pos = round_down(pos, fs_info->sectorsize);
 	oldsize = i_size_read(inode);
 	if (start_pos > oldsize) {
@@ -3071,13 +3086,19 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
 	return offset;
 }
 
+static int btrfs_file_open(struct inode *inode, struct file *filp)
+{
+	filp->f_mode |= FMODE_AIO_NOWAIT;
+	return generic_file_open(inode, filp);
+}
+
 const struct file_operations btrfs_file_operations = {
 	.llseek		= btrfs_file_llseek,
 	.read_iter      = generic_file_read_iter,
 	.splice_read	= generic_file_splice_read,
 	.write_iter	= btrfs_file_write_iter,
 	.mmap		= btrfs_file_mmap,
-	.open		= generic_file_open,
+	.open		= btrfs_file_open,
 	.release	= btrfs_release_file,
 	.fsync		= btrfs_sync_file,
 	.fallocate	= btrfs_fallocate,
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index a97fdc1..baacc18 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -38,6 +38,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
 {
 	SHASH_DESC_ON_STACK(shash, tfm);
 	u32 *ctx = (u32 *)shash_desc_ctx(shash);
+	u32 retval;
 	int err;
 
 	shash->tfm = tfm;
@@ -47,5 +48,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
 	err = crypto_shash_update(shash, address, length);
 	BUG_ON(err);
 
-	return *ctx;
+	retval = *ctx;
+	barrier_data(ctx);
+	return retval;
 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 17cbe93..556c930 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -842,13 +842,12 @@ static noinline void submit_compressed_extents(struct inode *inode,
 				NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
 				PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
 				PAGE_SET_WRITEBACK);
-		ret = btrfs_submit_compressed_write(inode,
+		if (btrfs_submit_compressed_write(inode,
 				    async_extent->start,
 				    async_extent->ram_size,
 				    ins.objectid,
 				    ins.offset, async_extent->pages,
-				    async_extent->nr_pages);
-		if (ret) {
+				    async_extent->nr_pages)) {
 			struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 			struct page *p = async_extent->pages[0];
 			const u64 start = async_extent->start;
@@ -1901,11 +1900,11 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
  * At IO completion time the cums attached on the ordered extent record
  * are inserted into the btree
  */
-static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
-				    int mirror_num, unsigned long bio_flags,
-				    u64 bio_offset)
+static blk_status_t __btrfs_submit_bio_start(struct inode *inode,
+		struct bio *bio, int mirror_num, unsigned long bio_flags,
+		u64 bio_offset)
 {
-	int ret = 0;
+	blk_status_t ret = 0;
 
 	ret = btrfs_csum_one_bio(inode, bio, 0, 0);
 	BUG_ON(ret); /* -ENOMEM */
@@ -1920,16 +1919,16 @@ static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
  * At IO completion time the cums attached on the ordered extent record
  * are inserted into the btree
  */
-static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
-			  int mirror_num, unsigned long bio_flags,
-			  u64 bio_offset)
+static blk_status_t __btrfs_submit_bio_done(struct inode *inode,
+		struct bio *bio, int mirror_num, unsigned long bio_flags,
+		u64 bio_offset)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-	int ret;
+	blk_status_t ret;
 
 	ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
 	if (ret) {
-		bio->bi_error = ret;
+		bio->bi_status = ret;
 		bio_endio(bio);
 	}
 	return ret;
@@ -1939,14 +1938,14 @@ static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
  * extent_io.c submission hook. This does the right thing for csum calculation
  * on write, or reading the csums from the tree before a read
  */
-static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
+static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
 			  int mirror_num, unsigned long bio_flags,
 			  u64 bio_offset)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
-	int ret = 0;
+	blk_status_t ret = 0;
 	int skip_sum;
 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
 
@@ -1991,8 +1990,8 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
 	ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
 
 out:
-	if (ret < 0) {
-		bio->bi_error = ret;
+	if (ret) {
+		bio->bi_status = ret;
 		bio_endio(bio);
 	}
 	return ret;
@@ -2952,7 +2951,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 
 	ret = test_range_bit(io_tree, ordered_extent->file_offset,
 			ordered_extent->file_offset + ordered_extent->len - 1,
-			EXTENT_DEFRAG, 1, cached_state);
+			EXTENT_DEFRAG, 0, cached_state);
 	if (ret) {
 		u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
 		if (0 && last_snapshot >= BTRFS_I(inode)->generation)
@@ -7483,8 +7482,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
 	int found = false;
 	void **pagep = NULL;
 	struct page *page = NULL;
-	int start_idx;
-	int end_idx;
+	unsigned long start_idx;
+	unsigned long end_idx;
 
 	start_idx = start >> PAGE_SHIFT;
 
@@ -8037,7 +8036,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
 	struct bio_vec *bvec;
 	int i;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		goto end;
 
 	ASSERT(bio->bi_vcnt == 1);
@@ -8116,7 +8115,7 @@ static void btrfs_retry_endio(struct bio *bio)
 	int ret;
 	int i;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		goto end;
 
 	uptodate = 1;
@@ -8141,8 +8140,8 @@ static void btrfs_retry_endio(struct bio *bio)
 	bio_put(bio);
 }
 
-static int __btrfs_subio_endio_read(struct inode *inode,
-				    struct btrfs_io_bio *io_bio, int err)
+static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
+		struct btrfs_io_bio *io_bio, blk_status_t err)
 {
 	struct btrfs_fs_info *fs_info;
 	struct bio_vec *bvec;
@@ -8184,7 +8183,7 @@ static int __btrfs_subio_endio_read(struct inode *inode,
 				io_bio->mirror_num,
 				btrfs_retry_endio, &done);
 		if (ret) {
-			err = ret;
+			err = errno_to_blk_status(ret);
 			goto next;
 		}
 
@@ -8211,8 +8210,8 @@ static int __btrfs_subio_endio_read(struct inode *inode,
 	return err;
 }
 
-static int btrfs_subio_endio_read(struct inode *inode,
-				  struct btrfs_io_bio *io_bio, int err)
+static blk_status_t btrfs_subio_endio_read(struct inode *inode,
+		struct btrfs_io_bio *io_bio, blk_status_t err)
 {
 	bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
@@ -8232,7 +8231,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
 	struct inode *inode = dip->inode;
 	struct bio *dio_bio;
 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
-	int err = bio->bi_error;
+	blk_status_t err = bio->bi_status;
 
 	if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
 		err = btrfs_subio_endio_read(inode, io_bio, err);
@@ -8243,11 +8242,11 @@ static void btrfs_endio_direct_read(struct bio *bio)
 
 	kfree(dip);
 
-	dio_bio->bi_error = bio->bi_error;
-	dio_end_io(dio_bio, bio->bi_error);
+	dio_bio->bi_status = bio->bi_status;
+	dio_end_io(dio_bio);
 
 	if (io_bio->end_io)
-		io_bio->end_io(io_bio, err);
+		io_bio->end_io(io_bio, blk_status_to_errno(err));
 	bio_put(bio);
 }
 
@@ -8299,20 +8298,20 @@ static void btrfs_endio_direct_write(struct bio *bio)
 	struct bio *dio_bio = dip->dio_bio;
 
 	__endio_write_update_ordered(dip->inode, dip->logical_offset,
-				     dip->bytes, !bio->bi_error);
+				     dip->bytes, !bio->bi_status);
 
 	kfree(dip);
 
-	dio_bio->bi_error = bio->bi_error;
-	dio_end_io(dio_bio, bio->bi_error);
+	dio_bio->bi_status = bio->bi_status;
+	dio_end_io(dio_bio);
 	bio_put(bio);
 }
 
-static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
+static blk_status_t __btrfs_submit_bio_start_direct_io(struct inode *inode,
 				    struct bio *bio, int mirror_num,
 				    unsigned long bio_flags, u64 offset)
 {
-	int ret;
+	blk_status_t ret;
 	ret = btrfs_csum_one_bio(inode, bio, offset, 1);
 	BUG_ON(ret); /* -ENOMEM */
 	return 0;
@@ -8321,7 +8320,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
 static void btrfs_end_dio_bio(struct bio *bio)
 {
 	struct btrfs_dio_private *dip = bio->bi_private;
-	int err = bio->bi_error;
+	blk_status_t err = bio->bi_status;
 
 	if (err)
 		btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
@@ -8351,7 +8350,7 @@ static void btrfs_end_dio_bio(struct bio *bio)
 	if (dip->errors) {
 		bio_io_error(dip->orig_bio);
 	} else {
-		dip->dio_bio->bi_error = 0;
+		dip->dio_bio->bi_status = 0;
 		bio_endio(dip->orig_bio);
 	}
 out:
@@ -8368,14 +8367,14 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
 	return bio;
 }
 
-static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
+static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
 						 struct btrfs_dio_private *dip,
 						 struct bio *bio,
 						 u64 file_offset)
 {
 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
 	struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
-	int ret;
+	blk_status_t ret;
 
 	/*
 	 * We load all the csum data we need when we submit
@@ -8406,7 +8405,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_dio_private *dip = bio->bi_private;
 	bool write = bio_op(bio) == REQ_OP_WRITE;
-	int ret;
+	blk_status_t ret;
 
 	if (async_submit)
 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
@@ -8649,7 +8648,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
 	 * callbacks - they require an allocated dip and a clone of dio_bio.
 	 */
 	if (io_bio && dip) {
-		io_bio->bi_error = -EIO;
+		io_bio->bi_status = BLK_STS_IOERR;
 		bio_endio(io_bio);
 		/*
 		 * The end io callbacks free our dip, do the final put on io_bio
@@ -8668,12 +8667,12 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
 			unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
 			      file_offset + dio_bio->bi_iter.bi_size - 1);
 
-		dio_bio->bi_error = -EIO;
+		dio_bio->bi_status = BLK_STS_IOERR;
 		/*
 		 * Releases and cleans up our dio_bio, no need to bio_put()
 		 * nor bio_endio()/bio_io_error() against dio_bio.
 		 */
-		dio_end_io(dio_bio, ret);
+		dio_end_io(dio_bio);
 	}
 	if (io_bio)
 		bio_put(io_bio);
@@ -8755,6 +8754,9 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 			dio_data.overwrite = 1;
 			inode_unlock(inode);
 			relock = true;
+		} else if (iocb->ki_flags & IOCB_NOWAIT) {
+			ret = -EAGAIN;
+			goto out;
 		}
 		ret = btrfs_delalloc_reserve_space(inode, offset, count);
 		if (ret)
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index d8ea0eb..f3d30d9e 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -871,7 +871,7 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio)
  * this frees the rbio and runs through all the bios in the
  * bio_list and calls end_io on them
  */
-static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
+static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
 {
 	struct bio *cur = bio_list_get(&rbio->bio_list);
 	struct bio *next;
@@ -884,7 +884,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
 	while (cur) {
 		next = cur->bi_next;
 		cur->bi_next = NULL;
-		cur->bi_error = err;
+		cur->bi_status = err;
 		bio_endio(cur);
 		cur = next;
 	}
@@ -897,7 +897,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
 static void raid_write_end_io(struct bio *bio)
 {
 	struct btrfs_raid_bio *rbio = bio->bi_private;
-	int err = bio->bi_error;
+	blk_status_t err = bio->bi_status;
 	int max_errors;
 
 	if (err)
@@ -914,7 +914,7 @@ static void raid_write_end_io(struct bio *bio)
 	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
 		     0 : rbio->bbio->max_errors;
 	if (atomic_read(&rbio->error) > max_errors)
-		err = -EIO;
+		err = BLK_STS_IOERR;
 
 	rbio_orig_end_io(rbio, err);
 }
@@ -1092,7 +1092,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
 		 * devices or if they are not contiguous
 		 */
 		if (last_end == disk_start && stripe->dev->bdev &&
-		    !last->bi_error &&
+		    !last->bi_status &&
 		    last->bi_bdev == stripe->dev->bdev) {
 			ret = bio_add_page(last, page, PAGE_SIZE, 0);
 			if (ret == PAGE_SIZE)
@@ -1448,7 +1448,7 @@ static void raid_rmw_end_io(struct bio *bio)
 {
 	struct btrfs_raid_bio *rbio = bio->bi_private;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		fail_bio_stripe(rbio, bio);
 	else
 		set_bio_pages_uptodate(bio);
@@ -1991,7 +1991,7 @@ static void raid_recover_end_io(struct bio *bio)
 	 * we only read stripe pages off the disk, set them
 	 * up to date if there were no errors
 	 */
-	if (bio->bi_error)
+	if (bio->bi_status)
 		fail_bio_stripe(rbio, bio);
 	else
 		set_bio_pages_uptodate(bio);
@@ -2530,7 +2530,7 @@ static void raid56_parity_scrub_end_io(struct bio *bio)
 {
 	struct btrfs_raid_bio *rbio = bio->bi_private;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		fail_bio_stripe(rbio, bio);
 	else
 		set_bio_pages_uptodate(bio);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index c7b45eb..ba5595d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -95,7 +95,7 @@ struct scrub_bio {
 	struct scrub_ctx	*sctx;
 	struct btrfs_device	*dev;
 	struct bio		*bio;
-	int			err;
+	blk_status_t		status;
 	u64			logical;
 	u64			physical;
 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
@@ -1668,14 +1668,14 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 
 struct scrub_bio_ret {
 	struct completion event;
-	int error;
+	blk_status_t status;
 };
 
 static void scrub_bio_wait_endio(struct bio *bio)
 {
 	struct scrub_bio_ret *ret = bio->bi_private;
 
-	ret->error = bio->bi_error;
+	ret->status = bio->bi_status;
 	complete(&ret->event);
 }
 
@@ -1693,7 +1693,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
 	int ret;
 
 	init_completion(&done.event);
-	done.error = 0;
+	done.status = 0;
 	bio->bi_iter.bi_sector = page->logical >> 9;
 	bio->bi_private = &done;
 	bio->bi_end_io = scrub_bio_wait_endio;
@@ -1705,7 +1705,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
 		return ret;
 
 	wait_for_completion(&done.event);
-	if (done.error)
+	if (done.status)
 		return -EIO;
 
 	return 0;
@@ -1937,7 +1937,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 		bio->bi_bdev = sbio->dev->bdev;
 		bio->bi_iter.bi_sector = sbio->physical >> 9;
 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-		sbio->err = 0;
+		sbio->status = 0;
 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
 		   spage->physical_for_dev_replace ||
 		   sbio->logical + sbio->page_count * PAGE_SIZE !=
@@ -1992,7 +1992,7 @@ static void scrub_wr_bio_end_io(struct bio *bio)
 	struct scrub_bio *sbio = bio->bi_private;
 	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
 
-	sbio->err = bio->bi_error;
+	sbio->status = bio->bi_status;
 	sbio->bio = bio;
 
 	btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
@@ -2007,7 +2007,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
 	int i;
 
 	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
-	if (sbio->err) {
+	if (sbio->status) {
 		struct btrfs_dev_replace *dev_replace =
 			&sbio->sctx->fs_info->dev_replace;
 
@@ -2341,7 +2341,7 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
 		bio->bi_bdev = sbio->dev->bdev;
 		bio->bi_iter.bi_sector = sbio->physical >> 9;
 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
-		sbio->err = 0;
+		sbio->status = 0;
 	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
 		   spage->physical ||
 		   sbio->logical + sbio->page_count * PAGE_SIZE !=
@@ -2377,7 +2377,7 @@ static void scrub_missing_raid56_end_io(struct bio *bio)
 	struct scrub_block *sblock = bio->bi_private;
 	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		sblock->no_io_error_seen = 0;
 
 	bio_put(bio);
@@ -2588,7 +2588,7 @@ static void scrub_bio_end_io(struct bio *bio)
 	struct scrub_bio *sbio = bio->bi_private;
 	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
 
-	sbio->err = bio->bi_error;
+	sbio->status = bio->bi_status;
 	sbio->bio = bio;
 
 	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
@@ -2601,7 +2601,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
 	int i;
 
 	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
-	if (sbio->err) {
+	if (sbio->status) {
 		for (i = 0; i < sbio->page_count; i++) {
 			struct scrub_page *spage = sbio->pagev[i];
 
@@ -3004,7 +3004,7 @@ static void scrub_parity_bio_endio(struct bio *bio)
 	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
 	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
 			  sparity->nsectors);
 
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 017b67d..84a4959 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6042,9 +6042,10 @@ static void btrfs_end_bio(struct bio *bio)
 	struct btrfs_bio *bbio = bio->bi_private;
 	int is_orig_bio = 0;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		atomic_inc(&bbio->error);
-		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
+		if (bio->bi_status == BLK_STS_IOERR ||
+		    bio->bi_status == BLK_STS_TARGET) {
 			unsigned int stripe_index =
 				btrfs_io_bio(bio)->stripe_index;
 			struct btrfs_device *dev;
@@ -6082,13 +6083,13 @@ static void btrfs_end_bio(struct bio *bio)
 		 * beyond the tolerance of the btrfs bio
 		 */
 		if (atomic_read(&bbio->error) > bbio->max_errors) {
-			bio->bi_error = -EIO;
+			bio->bi_status = BLK_STS_IOERR;
 		} else {
 			/*
 			 * this bio is actually up to date, we didn't
 			 * go over the max number of errors
 			 */
-			bio->bi_error = 0;
+			bio->bi_status = 0;
 		}
 
 		btrfs_end_bbio(bbio, bio);
@@ -6199,7 +6200,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
 
 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
 		bio->bi_iter.bi_sector = logical >> 9;
-		bio->bi_error = -EIO;
+		bio->bi_status = BLK_STS_IOERR;
 		btrfs_end_bbio(bbio, bio);
 	}
 }
diff --git a/fs/buffer.c b/fs/buffer.c
index 161be58..5c2cba8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -49,7 +49,7 @@
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
-			 struct writeback_control *wbc);
+			 enum rw_hint hint, struct writeback_control *wbc);
 
 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
 
@@ -1829,7 +1829,8 @@ int __block_write_full_page(struct inode *inode, struct page *page,
 	do {
 		struct buffer_head *next = bh->b_this_page;
 		if (buffer_async_write(bh)) {
-			submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
+			submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
+					inode->i_write_hint, wbc);
 			nr_underway++;
 		}
 		bh = next;
@@ -1883,7 +1884,8 @@ int __block_write_full_page(struct inode *inode, struct page *page,
 		struct buffer_head *next = bh->b_this_page;
 		if (buffer_async_write(bh)) {
 			clear_buffer_dirty(bh);
-			submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
+			submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
+					inode->i_write_hint, wbc);
 			nr_underway++;
 		}
 		bh = next;
@@ -3038,7 +3040,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
 		set_bit(BH_Quiet, &bh->b_state);
 
-	bh->b_end_io(bh, !bio->bi_error);
+	bh->b_end_io(bh, !bio->bi_status);
 	bio_put(bio);
 }
 
@@ -3091,7 +3093,7 @@ void guard_bio_eod(int op, struct bio *bio)
 }
 
 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
-			 struct writeback_control *wbc)
+			 enum rw_hint write_hint, struct writeback_control *wbc)
 {
 	struct bio *bio;
 
@@ -3120,6 +3122,7 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
 
 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
+	bio->bi_write_hint = write_hint;
 
 	bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
 	BUG_ON(bio->bi_iter.bi_size != bh->b_size);
@@ -3142,7 +3145,7 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
 
 int submit_bh(int op, int op_flags, struct buffer_head *bh)
 {
-	return submit_bh_wbc(op, op_flags, bh, NULL);
+	return submit_bh_wbc(op, op_flags, bh, 0, NULL);
 }
 EXPORT_SYMBOL(submit_bh);
 
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 9bf90bc..bb3a02c 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -18,7 +18,7 @@
 
 #include <linux/fscache-cache.h>
 #include <linux/timer.h>
-#include <linux/wait.h>
+#include <linux/wait_bit.h>
 #include <linux/cred.h>
 #include <linux/workqueue.h>
 #include <linux/security.h>
@@ -97,7 +97,7 @@ struct cachefiles_cache {
  * backing file read tracking
  */
 struct cachefiles_one_read {
-	wait_queue_t			monitor;	/* link into monitored waitqueue */
+	wait_queue_entry_t			monitor;	/* link into monitored waitqueue */
 	struct page			*back_page;	/* backing file page we're waiting for */
 	struct page			*netfs_page;	/* netfs page we're going to fill */
 	struct fscache_retrieval	*op;		/* retrieval op covering this */
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 41df8a2..3978b32 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -204,7 +204,7 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
 		wait_queue_head_t *wq;
 
 		signed long timeout = 60 * HZ;
-		wait_queue_t wait;
+		wait_queue_entry_t wait;
 		bool requeue;
 
 		/* if the object we're waiting for is queued for processing,
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index afbdc41..18d7aa6 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -21,7 +21,7 @@
  * - we use this to detect read completion of backing pages
  * - the caller holds the waitqueue lock
  */
-static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
+static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
 				  int sync, void *_key)
 {
 	struct cachefiles_one_read *monitor =
@@ -48,7 +48,7 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
 	}
 
 	/* remove from the waitqueue */
-	list_del(&wait->task_list);
+	list_del(&wait->entry);
 
 	/* move onto the action list and queue for FS-Cache thread pool */
 	ASSERT(monitor->op);
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 987044b..59cb307 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -131,6 +131,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 	}
 
 	if (new_mode != old_mode) {
+		newattrs.ia_ctime = current_time(inode);
 		newattrs.ia_mode = new_mode;
 		newattrs.ia_valid = ATTR_MODE;
 		ret = __ceph_setattr(inode, &newattrs);
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index e8f11fa..7df550c 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -91,6 +91,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
 		ceph_mdsc_put_request(req);
 		if (!inode)
 			return ERR_PTR(-ESTALE);
+		if (inode->i_nlink == 0) {
+			iput(inode);
+			return ERR_PTR(-ESTALE);
+		}
 	}
 
 	return d_obtain_alias(inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index dcce79b..4de6cdd 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2022,7 +2022,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
 		    attr->ia_size > inode->i_size) {
 			i_size_write(inode, attr->ia_size);
 			inode->i_blocks = calc_inode_blocks(attr->ia_size);
-			inode->i_ctime = attr->ia_ctime;
 			ci->i_reported_size = attr->ia_size;
 			dirtied |= CEPH_CAP_FILE_EXCL;
 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
@@ -2044,7 +2043,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
 		     only ? "ctime only" : "ignored");
-		inode->i_ctime = attr->ia_ctime;
 		if (only) {
 			/*
 			 * if kernel wants to dirty ctime but nothing else,
@@ -2067,7 +2065,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
 	if (dirtied) {
 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
 							   &prealloc_cf);
-		inode->i_ctime = current_time(inode);
+		inode->i_ctime = attr->ia_ctime;
 	}
 
 	release &= issued;
@@ -2085,6 +2083,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
 		req->r_inode_drop = release;
 		req->r_args.setattr.mask = cpu_to_le32(mask);
 		req->r_num_caps = 1;
+		req->r_stamp = attr->ia_ctime;
 		err = ceph_mdsc_do_request(mdsc, NULL, req);
 	}
 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f38e56f..0c05df4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1687,7 +1687,6 @@ struct ceph_mds_request *
 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
 {
 	struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
-	struct timespec ts;
 
 	if (!req)
 		return ERR_PTR(-ENOMEM);
@@ -1706,8 +1705,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
 	init_completion(&req->r_safe_completion);
 	INIT_LIST_HEAD(&req->r_unsafe_item);
 
-	ktime_get_real_ts(&ts);
-	req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran);
+	req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran);
 
 	req->r_op = op;
 	req->r_direct_mode = mode;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 0fd081b..fcef706 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
 	if (!is_sync_kiocb(iocb))
 		ctx->iocb = iocb;
 
-	if (to->type & ITER_IOVEC)
+	if (to->type == ITER_IOVEC)
 		ctx->should_dirty = true;
 
 	rc = setup_aio_ctx_iter(ctx, to, READ);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4d1fcd7..a869363 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -24,6 +24,7 @@
 #include <linux/pagemap.h>
 #include <linux/freezer.h>
 #include <linux/sched/signal.h>
+#include <linux/wait_bit.h>
 
 #include <asm/div64.h>
 #include "cifsfs.h"
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index b085319..3b147dc 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
 
 	if (!pages) {
 		pages = vmalloc(max_pages * sizeof(struct page *));
-		if (!bv) {
+		if (!pages) {
 			kvfree(bv);
 			return -ENOMEM;
 		}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 27bc360..a723df3 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
 		     struct cifs_fid *fid, __u16 search_flags,
 		     struct cifs_search_info *srch_inf)
 {
-	return CIFSFindFirst(xid, tcon, path, cifs_sb,
-			     &fid->netfid, search_flags, srch_inf, true);
+	int rc;
+
+	rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
+			   &fid->netfid, search_flags, srch_inf, true);
+	if (rc)
+		cifs_dbg(FYI, "find first failed=%d\n", rc);
+	return rc;
 }
 
 static int
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index c586918..7e48561 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
 	kfree(utf16_path);
 	if (rc) {
-		cifs_dbg(VFS, "open dir failed\n");
+		cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
 		return rc;
 	}
 
@@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
 	rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
 				  fid->volatile_fid, 0, srch_inf);
 	if (rc) {
-		cifs_dbg(VFS, "query directory failed\n");
+		cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
 		SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
 	}
 	return rc;
@@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
 
 	sg = init_sg(rqst, sign);
 	if (!sg) {
-		cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc);
+		cifs_dbg(VFS, "%s: Failed to init sg", __func__);
+		rc = -ENOMEM;
 		goto free_req;
 	}
 
@@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
 	iv = kzalloc(iv_len, GFP_KERNEL);
 	if (!iv) {
 		cifs_dbg(VFS, "%s: Failed to alloc IV", __func__);
+		rc = -ENOMEM;
 		goto free_sg;
 	}
 	iv[0] = 3;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 3cb5c9e..de50e74 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
 	pcreatetime = (__u64 *)value;
 	*pcreatetime = CIFS_I(inode)->createtime;
 	return sizeof(__u64);
-
-	return rc;
 }
 
 
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 6116d52..112b3e1 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -866,8 +866,6 @@ COMPATIBLE_IOCTL(TIOCGDEV)
 COMPATIBLE_IOCTL(TIOCCBRK)
 COMPATIBLE_IOCTL(TIOCGSID)
 COMPATIBLE_IOCTL(TIOCGICOUNT)
-COMPATIBLE_IOCTL(TIOCGPKT)
-COMPATIBLE_IOCTL(TIOCGPTLCK)
 COMPATIBLE_IOCTL(TIOCGEXCL)
 /* Little t */
 COMPATIBLE_IOCTL(TIOCGETD)
@@ -883,16 +881,12 @@ COMPATIBLE_IOCTL(TIOCMGET)
 COMPATIBLE_IOCTL(TIOCMBIC)
 COMPATIBLE_IOCTL(TIOCMBIS)
 COMPATIBLE_IOCTL(TIOCMSET)
-COMPATIBLE_IOCTL(TIOCPKT)
 COMPATIBLE_IOCTL(TIOCNOTTY)
 COMPATIBLE_IOCTL(TIOCSTI)
 COMPATIBLE_IOCTL(TIOCOUTQ)
 COMPATIBLE_IOCTL(TIOCSPGRP)
 COMPATIBLE_IOCTL(TIOCGPGRP)
-COMPATIBLE_IOCTL(TIOCGPTN)
-COMPATIBLE_IOCTL(TIOCSPTLCK)
 COMPATIBLE_IOCTL(TIOCSERGETLSR)
-COMPATIBLE_IOCTL(TIOCSIG)
 #ifdef TIOCSRS485
 COMPATIBLE_IOCTL(TIOCSRS485)
 #endif
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 8b2a994..a66f662 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -138,6 +138,14 @@ struct config_item *config_item_get(struct config_item *item)
 }
 EXPORT_SYMBOL(config_item_get);
 
+struct config_item *config_item_get_unless_zero(struct config_item *item)
+{
+	if (item && kref_get_unless_zero(&item->ci_kref))
+		return item;
+	return NULL;
+}
+EXPORT_SYMBOL(config_item_get_unless_zero);
+
 static void config_item_cleanup(struct config_item *item)
 {
 	struct config_item_type *t = item->ci_type;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index a6ab012..c8aabba 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item,
 	ret = -ENOMEM;
 	sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
 	if (sl) {
-		sl->sl_target = config_item_get(item);
 		spin_lock(&configfs_dirent_lock);
 		if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
 			spin_unlock(&configfs_dirent_lock);
-			config_item_put(item);
 			kfree(sl);
 			return -ENOENT;
 		}
+		sl->sl_target = config_item_get(item);
 		list_add(&sl->sl_list, &target_sd->s_links);
 		spin_unlock(&configfs_dirent_lock);
 		ret = configfs_create_link(sl, parent_item->ci_dentry,
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index a409a84..6181e95 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -129,7 +129,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
 			goto errout;
 		}
 		err = submit_bio_wait(bio);
-		if ((err == 0) && bio->bi_error)
+		if (err == 0 && bio->bi_status)
 			err = -EIO;
 		bio_put(bio);
 		if (err)
diff --git a/fs/dax.c b/fs/dax.c
index c22eaf1..33d05aa 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -84,7 +84,7 @@ struct exceptional_entry_key {
 };
 
 struct wait_exceptional_entry_queue {
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	struct exceptional_entry_key key;
 };
 
@@ -108,7 +108,7 @@ static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
 	return wait_table + hash;
 }
 
-static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
+static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
 				       int sync, void *keyp)
 {
 	struct exceptional_entry_key *key = keyp;
@@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
 			if (ret < 0)
 				goto out;
 		}
+		start_index = indices[pvec.nr - 1] + 1;
 	}
 out:
 	put_dax(dax_dev);
@@ -1155,6 +1156,17 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
 	}
 
 	/*
+	 * It is possible, particularly with mixed reads & writes to private
+	 * mappings, that we have raced with a PMD fault that overlaps with
+	 * the PTE we need to set up.  If so just return and the fault will be
+	 * retried.
+	 */
+	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
+		vmf_ret = VM_FAULT_NOPAGE;
+		goto unlock_entry;
+	}
+
+	/*
 	 * Note that we don't bother to use iomap_apply here: DAX required
 	 * the file system block size to be equal the page size, which means
 	 * that we never have to deal with more than a single extent here.
@@ -1398,6 +1410,18 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
 		goto fallback;
 
 	/*
+	 * It is possible, particularly with mixed reads & writes to private
+	 * mappings, that we have raced with a PTE fault that overlaps with
+	 * the PMD we need to set up.  If so just return and the fault will be
+	 * retried.
+	 */
+	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
+			!pmd_devmap(*vmf->pmd)) {
+		result = 0;
+		goto unlock_entry;
+	}
+
+	/*
 	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
 	 * setting up a mapping, so really we're using iomap_begin() as a way
 	 * to look up our filesystem block.
diff --git a/fs/dcache.c b/fs/dcache.c
index cddf397..a9f995f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1494,7 +1494,7 @@ static void check_and_drop(void *_data)
 {
 	struct detach_data *data = _data;
 
-	if (!data->mountpoint && !data->select.found)
+	if (!data->mountpoint && list_empty(&data->select.dispose))
 		__d_drop(data->select.start);
 }
 
@@ -1536,17 +1536,15 @@ void d_invalidate(struct dentry *dentry)
 
 		d_walk(dentry, &data, detach_and_collect, check_and_drop);
 
-		if (data.select.found)
+		if (!list_empty(&data.select.dispose))
 			shrink_dentry_list(&data.select.dispose);
+		else if (!data.mountpoint)
+			return;
 
 		if (data.mountpoint) {
 			detach_mounts(data.mountpoint);
 			dput(data.mountpoint);
 		}
-
-		if (!data.mountpoint && !data.select.found)
-			break;
-
 		cond_resched();
 	}
 }
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 354e2ab..6dabc4a 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -9,7 +9,7 @@
  *	2 as published by the Free Software Foundation.
  *
  *  debugfs is for people to use instead of /proc or /sys.
- *  See Documentation/DocBook/filesystems for more details.
+ *  See Documentation/filesystems/ for more details.
  *
  */
 
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index e892ae7..77440e4 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -9,7 +9,7 @@
  *	2 as published by the Free Software Foundation.
  *
  *  debugfs is for people to use instead of /proc or /sys.
- *  See Documentation/DocBook/kernel-api for more details.
+ *  See ./Documentation/core-api/kernel-api.rst for more details.
  *
  */
 
diff --git a/fs/direct-io.c b/fs/direct-io.c
index a04ebea..08cf278 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -294,7 +294,7 @@ static void dio_aio_complete_work(struct work_struct *work)
 	dio_complete(dio, 0, true);
 }
 
-static int dio_bio_complete(struct dio *dio, struct bio *bio);
+static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
 
 /*
  * Asynchronous IO callback. 
@@ -348,13 +348,12 @@ static void dio_bio_end_io(struct bio *bio)
 /**
  * dio_end_io - handle the end io action for the given bio
  * @bio: The direct io bio thats being completed
- * @error: Error if there was one
  *
  * This is meant to be called by any filesystem that uses their own dio_submit_t
  * so that the DIO specific endio actions are dealt with after the filesystem
  * has done it's completion work.
  */
-void dio_end_io(struct bio *bio, int error)
+void dio_end_io(struct bio *bio)
 {
 	struct dio *dio = bio->bi_private;
 
@@ -386,6 +385,8 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
 	else
 		bio->bi_end_io = dio_bio_end_io;
 
+	bio->bi_write_hint = dio->iocb->ki_hint;
+
 	sdio->bio = bio;
 	sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
 }
@@ -474,17 +475,20 @@ static struct bio *dio_await_one(struct dio *dio)
 /*
  * Process one completed BIO.  No locks are held.
  */
-static int dio_bio_complete(struct dio *dio, struct bio *bio)
+static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
 {
 	struct bio_vec *bvec;
 	unsigned i;
-	int err;
+	blk_status_t err = bio->bi_status;
 
-	if (bio->bi_error)
-		dio->io_error = -EIO;
+	if (err) {
+		if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
+			dio->io_error = -EAGAIN;
+		else
+			dio->io_error = -EIO;
+	}
 
 	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
-		err = bio->bi_error;
 		bio_check_pages_dirty(bio);	/* transfers ownership */
 	} else {
 		bio_for_each_segment_all(bvec, bio, i) {
@@ -495,7 +499,6 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
 				set_page_dirty_lock(page);
 			put_page(page);
 		}
-		err = bio->bi_error;
 		bio_put(bio);
 	}
 	return err;
@@ -539,7 +542,7 @@ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
 			bio = dio->bio_list;
 			dio->bio_list = bio->bi_private;
 			spin_unlock_irqrestore(&dio->bio_lock, flags);
-			ret2 = dio_bio_complete(dio, bio);
+			ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
 			if (ret == 0)
 				ret = ret2;
 		}
@@ -1197,6 +1200,8 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	if (iov_iter_rw(iter) == WRITE) {
 		dio->op = REQ_OP_WRITE;
 		dio->op_flags = REQ_SYNC | REQ_IDLE;
+		if (iocb->ki_flags & IOCB_NOWAIT)
+			dio->op_flags |= REQ_NOWAIT;
 	} else {
 		dio->op = REQ_OP_READ;
 	}
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 68b9fff..2fb4ead 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -191,7 +191,7 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
  * This is used to atomically remove a wait queue entry from the eventfd wait
  * queue head, and read/reset the counter value.
  */
-int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
+int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
 				  __u64 *cnt)
 {
 	unsigned long flags;
@@ -215,8 +215,8 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
  *
  * Returns %0 if successful, or the following error codes:
  *
- * -EAGAIN      : The operation would have blocked but @no_wait was non-zero.
- * -ERESTARTSYS : A signal interrupted the wait operation.
+ *  - -EAGAIN      : The operation would have blocked but @no_wait was non-zero.
+ *  - -ERESTARTSYS : A signal interrupted the wait operation.
  *
  * If @no_wait is zero, the function might sleep until the eventfd internal
  * counter becomes greater than zero.
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 5420767..b1c8e23 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -244,7 +244,7 @@ struct eppoll_entry {
 	 * Wait queue item that will be linked to the target file wait
 	 * queue head.
 	 */
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	/* The wait queue head that linked the "wait" wait queue item */
 	wait_queue_head_t *whead;
@@ -347,13 +347,13 @@ static inline int ep_is_linked(struct list_head *p)
 	return !list_empty(p);
 }
 
-static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
+static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
 {
 	return container_of(p, struct eppoll_entry, wait);
 }
 
 /* Get the "struct epitem" from a wait queue pointer */
-static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
+static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
 {
 	return container_of(p, struct eppoll_entry, wait)->base;
 }
@@ -1078,7 +1078,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
  * mechanism. It is called by the stored file descriptors when they
  * have events to report.
  */
-static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 {
 	int pwake = 0;
 	unsigned long flags;
@@ -1094,7 +1094,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
 		 * can't use __remove_wait_queue(). whead->lock is held by
 		 * the caller.
 		 */
-		list_del_init(&wait->task_list);
+		list_del_init(&wait->entry);
 	}
 
 	spin_lock_irqsave(&ep->lock, flags);
@@ -1699,7 +1699,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 	int res = 0, eavail, timed_out = 0;
 	unsigned long flags;
 	u64 slack = 0;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	ktime_t expires, *to = NULL;
 
 	if (timeout > 0) {
diff --git a/fs/exec.c b/fs/exec.c
index 72934df..9041990 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 
 	if (write) {
 		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+		unsigned long ptr_size;
 		struct rlimit *rlim;
 
+		/*
+		 * Since the stack will hold pointers to the strings, we
+		 * must account for them as well.
+		 *
+		 * The size calculation is the entire vma while each arg page is
+		 * built, so each time we get here it's calculating how far it
+		 * is currently (rather than each call being just the newly
+		 * added size from the arg page).  As a result, we need to
+		 * always add the entire size of the pointers, so that on the
+		 * last call to get_arg_page() we'll actually have the entire
+		 * correct size.
+		 */
+		ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+		if (ptr_size > ULONG_MAX - size)
+			goto fail;
+		size += ptr_size;
+
 		acct_arg_size(bprm, size / PAGE_SIZE);
 
 		/*
@@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 		 *    to work from.
 		 */
 		rlim = current->signal->rlim;
-		if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
-			put_page(page);
-			return NULL;
-		}
+		if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+			goto fail;
 	}
 
 	return page;
+
+fail:
+	put_page(page);
+	return NULL;
 }
 
 static void put_arg_page(struct page *page)
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index fd38993..3ec0e46 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
  */
 
+#include <linux/quotaops.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
 #include "xattr.h"
@@ -232,6 +233,9 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 	handle_t *handle;
 	int error, retries = 0;
 
+	error = dquot_initialize(inode);
+	if (error)
+		return error;
 retry:
 	handle = ext4_journal_start(inode, EXT4_HT_XATTR,
 				    ext4_jbd2_credits_xattr(inode));
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8e80461..3219154 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2523,7 +2523,6 @@ extern int ext4_search_dir(struct buffer_head *bh,
 			   int buf_size,
 			   struct inode *dir,
 			   struct ext4_filename *fname,
-			   const struct qstr *d_name,
 			   unsigned int offset,
 			   struct ext4_dir_entry_2 **res_dir);
 extern int ext4_generic_delete_entry(handle_t *handle,
@@ -3007,7 +3006,6 @@ extern int htree_inlinedir_to_tree(struct file *dir_file,
 				   int *has_inline_data);
 extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
 					struct ext4_filename *fname,
-					const struct qstr *d_name,
 					struct ext4_dir_entry_2 **res_dir,
 					int *has_inline_data);
 extern int ext4_delete_inline_entry(handle_t *handle,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2a97dff..3e36508 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
 	struct ext4_sb_info *sbi;
 	struct ext4_extent_header *eh;
 	struct ext4_map_blocks split_map;
-	struct ext4_extent zero_ex;
+	struct ext4_extent zero_ex1, zero_ex2;
 	struct ext4_extent *ex, *abut_ex;
 	ext4_lblk_t ee_block, eof_block;
 	unsigned int ee_len, depth, map_len = map->m_len;
 	int allocated = 0, max_zeroout = 0;
 	int err = 0;
-	int split_flag = 0;
+	int split_flag = EXT4_EXT_DATA_VALID2;
 
 	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
 		"block %llu, max_blocks %u\n", inode->i_ino,
@@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
 	ex = path[depth].p_ext;
 	ee_block = le32_to_cpu(ex->ee_block);
 	ee_len = ext4_ext_get_actual_len(ex);
-	zero_ex.ee_len = 0;
+	zero_ex1.ee_len = 0;
+	zero_ex2.ee_len = 0;
 
 	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
 
@@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
 	if (ext4_encrypted_inode(inode))
 		max_zeroout = 0;
 
-	/* If extent is less than s_max_zeroout_kb, zeroout directly */
-	if (max_zeroout && (ee_len <= max_zeroout)) {
-		err = ext4_ext_zeroout(inode, ex);
-		if (err)
-			goto out;
-		zero_ex.ee_block = ex->ee_block;
-		zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
-		ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
-
-		err = ext4_ext_get_access(handle, inode, path + depth);
-		if (err)
-			goto out;
-		ext4_ext_mark_initialized(ex);
-		ext4_ext_try_to_merge(handle, inode, path, ex);
-		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
-		goto out;
-	}
-
 	/*
-	 * four cases:
+	 * five cases:
 	 * 1. split the extent into three extents.
-	 * 2. split the extent into two extents, zeroout the first half.
-	 * 3. split the extent into two extents, zeroout the second half.
+	 * 2. split the extent into two extents, zeroout the head of the first
+	 *    extent.
+	 * 3. split the extent into two extents, zeroout the tail of the second
+	 *    extent.
 	 * 4. split the extent into two extents with out zeroout.
+	 * 5. no splitting needed, just possibly zeroout the head and / or the
+	 *    tail of the extent.
 	 */
 	split_map.m_lblk = map->m_lblk;
 	split_map.m_len = map->m_len;
 
-	if (max_zeroout && (allocated > map->m_len)) {
+	if (max_zeroout && (allocated > split_map.m_len)) {
 		if (allocated <= max_zeroout) {
-			/* case 3 */
-			zero_ex.ee_block =
-					 cpu_to_le32(map->m_lblk);
-			zero_ex.ee_len = cpu_to_le16(allocated);
-			ext4_ext_store_pblock(&zero_ex,
-				ext4_ext_pblock(ex) + map->m_lblk - ee_block);
-			err = ext4_ext_zeroout(inode, &zero_ex);
+			/* case 3 or 5 */
+			zero_ex1.ee_block =
+				 cpu_to_le32(split_map.m_lblk +
+					     split_map.m_len);
+			zero_ex1.ee_len =
+				cpu_to_le16(allocated - split_map.m_len);
+			ext4_ext_store_pblock(&zero_ex1,
+				ext4_ext_pblock(ex) + split_map.m_lblk +
+				split_map.m_len - ee_block);
+			err = ext4_ext_zeroout(inode, &zero_ex1);
 			if (err)
 				goto out;
-			split_map.m_lblk = map->m_lblk;
 			split_map.m_len = allocated;
-		} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
-			/* case 2 */
-			if (map->m_lblk != ee_block) {
-				zero_ex.ee_block = ex->ee_block;
-				zero_ex.ee_len = cpu_to_le16(map->m_lblk -
+		}
+		if (split_map.m_lblk - ee_block + split_map.m_len <
+								max_zeroout) {
+			/* case 2 or 5 */
+			if (split_map.m_lblk != ee_block) {
+				zero_ex2.ee_block = ex->ee_block;
+				zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
 							ee_block);
-				ext4_ext_store_pblock(&zero_ex,
+				ext4_ext_store_pblock(&zero_ex2,
 						      ext4_ext_pblock(ex));
-				err = ext4_ext_zeroout(inode, &zero_ex);
+				err = ext4_ext_zeroout(inode, &zero_ex2);
 				if (err)
 					goto out;
 			}
 
+			split_map.m_len += split_map.m_lblk - ee_block;
 			split_map.m_lblk = ee_block;
-			split_map.m_len = map->m_lblk - ee_block + map->m_len;
 			allocated = map->m_len;
 		}
 	}
@@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
 		err = 0;
 out:
 	/* If we have gotten a failure, don't zero out status tree */
-	if (!err)
-		err = ext4_zeroout_es(inode, &zero_ex);
+	if (!err) {
+		err = ext4_zeroout_es(inode, &zero_ex1);
+		if (!err)
+			err = ext4_zeroout_es(inode, &zero_ex2);
+	}
 	return err ? err : allocated;
 }
 
@@ -4883,6 +4877,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
 	/* Zero out partial block at the edges of the range */
 	ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+	if (ret >= 0)
+		ext4_update_inode_fsync_trans(handle, inode, 1);
 
 	if (file->f_flags & O_SYNC)
 		ext4_handle_sync(handle);
@@ -5569,6 +5565,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 		ext4_handle_sync(handle);
 	inode->i_mtime = inode->i_ctime = current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
+	ext4_update_inode_fsync_trans(handle, inode, 1);
 
 out_stop:
 	ext4_journal_stop(handle);
@@ -5742,6 +5739,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
 	up_write(&EXT4_I(inode)->i_data_sem);
 	if (IS_SYNC(inode))
 		ext4_handle_sync(handle);
+	if (ret >= 0)
+		ext4_update_inode_fsync_trans(handle, inode, 1);
 
 out_stop:
 	ext4_journal_stop(handle);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 831fd6be..58e2eea 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -37,7 +37,11 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
 	struct inode *inode = file_inode(iocb->ki_filp);
 	ssize_t ret;
 
-	inode_lock_shared(inode);
+	if (!inode_trylock_shared(inode)) {
+		if (iocb->ki_flags & IOCB_NOWAIT)
+			return -EAGAIN;
+		inode_lock_shared(inode);
+	}
 	/*
 	 * Recheck under inode lock - at this point we are sure it cannot
 	 * change anymore
@@ -179,7 +183,11 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	struct inode *inode = file_inode(iocb->ki_filp);
 	ssize_t ret;
 
-	inode_lock(inode);
+	if (!inode_trylock(inode)) {
+		if (iocb->ki_flags & IOCB_NOWAIT)
+			return -EAGAIN;
+		inode_lock(inode);
+	}
 	ret = ext4_write_checks(iocb, from);
 	if (ret <= 0)
 		goto out;
@@ -216,7 +224,12 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 		return ext4_dax_write_iter(iocb, from);
 #endif
 
-	inode_lock(inode);
+	if (!inode_trylock(inode)) {
+		if (iocb->ki_flags & IOCB_NOWAIT)
+			return -EAGAIN;
+		inode_lock(inode);
+	}
+
 	ret = ext4_write_checks(iocb, from);
 	if (ret <= 0)
 		goto out;
@@ -235,9 +248,15 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
 	iocb->private = &overwrite;
 	/* Check whether we do a DIO overwrite or not */
-	if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
-	    ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
-		overwrite = 1;
+	if (o_direct && !unaligned_aio) {
+		if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
+			if (ext4_should_dioread_nolock(inode))
+				overwrite = 1;
+		} else if (iocb->ki_flags & IOCB_NOWAIT) {
+			ret = -EAGAIN;
+			goto out;
+		}
+	}
 
 	ret = __generic_file_write_iter(iocb, from);
 	inode_unlock(inode);
@@ -435,6 +454,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
 		if (ret < 0)
 			return ret;
 	}
+
+	/* Set the flags to support nowait AIO */
+	filp->f_mode |= FMODE_AIO_NOWAIT;
+
 	return dquot_file_open(inode, filp);
 }
 
@@ -474,57 +497,37 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
 	endoff = (loff_t)end_blk << blkbits;
 
 	index = startoff >> PAGE_SHIFT;
-	end = endoff >> PAGE_SHIFT;
+	end = (endoff - 1) >> PAGE_SHIFT;
 
 	pagevec_init(&pvec, 0);
 	do {
 		int i, num;
 		unsigned long nr_pages;
 
-		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
 					  (pgoff_t)num);
-		if (nr_pages == 0) {
-			if (whence == SEEK_DATA)
-				break;
-
-			BUG_ON(whence != SEEK_HOLE);
-			/*
-			 * If this is the first time to go into the loop and
-			 * offset is not beyond the end offset, it will be a
-			 * hole at this offset
-			 */
-			if (lastoff == startoff || lastoff < endoff)
-				found = 1;
+		if (nr_pages == 0)
 			break;
-		}
-
-		/*
-		 * If this is the first time to go into the loop and
-		 * offset is smaller than the first page offset, it will be a
-		 * hole at this offset.
-		 */
-		if (lastoff == startoff && whence == SEEK_HOLE &&
-		    lastoff < page_offset(pvec.pages[0])) {
-			found = 1;
-			break;
-		}
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
 			struct buffer_head *bh, *head;
 
 			/*
-			 * If the current offset is not beyond the end of given
-			 * range, it will be a hole.
+			 * If current offset is smaller than the page offset,
+			 * there is a hole at this offset.
 			 */
-			if (lastoff < endoff && whence == SEEK_HOLE &&
-			    page->index > end) {
+			if (whence == SEEK_HOLE && lastoff < endoff &&
+			    lastoff < page_offset(pvec.pages[i])) {
 				found = 1;
 				*offset = lastoff;
 				goto out;
 			}
 
+			if (page->index > end)
+				goto out;
+
 			lock_page(page);
 
 			if (unlikely(page->mapping != inode->i_mapping)) {
@@ -564,20 +567,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
 			unlock_page(page);
 		}
 
-		/*
-		 * The no. of pages is less than our desired, that would be a
-		 * hole in there.
-		 */
-		if (nr_pages < num && whence == SEEK_HOLE) {
-			found = 1;
-			*offset = lastoff;
+		/* The no. of pages is less than our desired, we are done. */
+		if (nr_pages < num)
 			break;
-		}
 
 		index = pvec.pages[i - 1]->index + 1;
 		pagevec_release(&pvec);
 	} while (index <= end);
 
+	if (whence == SEEK_HOLE && lastoff < endoff) {
+		found = 1;
+		*offset = lastoff;
+	}
 out:
 	pagevec_release(&pvec);
 	return found;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index d5dea4c..8d141c0 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1627,7 +1627,6 @@ int ext4_try_create_inline_dir(handle_t *handle, struct inode *parent,
 
 struct buffer_head *ext4_find_inline_entry(struct inode *dir,
 					struct ext4_filename *fname,
-					const struct qstr *d_name,
 					struct ext4_dir_entry_2 **res_dir,
 					int *has_inline_data)
 {
@@ -1649,7 +1648,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
 						EXT4_INLINE_DOTDOT_SIZE;
 	inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
 	ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
-			      dir, fname, d_name, 0, res_dir);
+			      dir, fname, 0, res_dir);
 	if (ret == 1)
 		goto out_find;
 	if (ret < 0)
@@ -1662,7 +1661,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
 	inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
 
 	ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
-			      dir, fname, d_name, 0, res_dir);
+			      dir, fname, 0, res_dir);
 	if (ret == 1)
 		goto out_find;
 
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1bd0bfa..5cf82d0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2124,15 +2124,29 @@ static int ext4_writepage(struct page *page,
 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
 {
 	int len;
-	loff_t size = i_size_read(mpd->inode);
+	loff_t size;
 	int err;
 
 	BUG_ON(page->index != mpd->first_page);
+	clear_page_dirty_for_io(page);
+	/*
+	 * We have to be very careful here!  Nothing protects writeback path
+	 * against i_size changes and the page can be writeably mapped into
+	 * page tables. So an application can be growing i_size and writing
+	 * data through mmap while writeback runs. clear_page_dirty_for_io()
+	 * write-protects our page in page tables and the page cannot get
+	 * written to again until we release page lock. So only after
+	 * clear_page_dirty_for_io() we are safe to sample i_size for
+	 * ext4_bio_write_page() to zero-out tail of the written page. We rely
+	 * on the barrier provided by TestClearPageDirty in
+	 * clear_page_dirty_for_io() to make sure i_size is really sampled only
+	 * after page tables are updated.
+	 */
+	size = i_size_read(mpd->inode);
 	if (page->index == size >> PAGE_SHIFT)
 		len = size & ~PAGE_MASK;
 	else
 		len = PAGE_SIZE;
-	clear_page_dirty_for_io(page);
 	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
 	if (!err)
 		mpd->wbc->nr_to_write--;
@@ -3629,9 +3643,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
 		get_block_func = ext4_dio_get_block_unwritten_async;
 		dio_flags = DIO_LOCKING;
 	}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-	BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
-#endif
 	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
 				   get_block_func, ext4_end_io_dio, NULL,
 				   dio_flags);
@@ -3713,7 +3724,7 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
 	 */
 	inode_lock_shared(inode);
 	ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
-					   iocb->ki_pos + count);
+					   iocb->ki_pos + count - 1);
 	if (ret)
 		goto out_unlock;
 	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
@@ -4207,6 +4218,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 
 	inode->i_mtime = inode->i_ctime = current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
+	if (ret >= 0)
+		ext4_update_inode_fsync_trans(handle, inode, 1);
 out_stop:
 	ext4_journal_stop(handle);
 out_dio:
@@ -5637,8 +5650,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
 	/* No extended attributes present */
 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
-		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
-			new_extra_isize);
+		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
+		       EXT4_I(inode)->i_extra_isize, 0,
+		       new_extra_isize - EXT4_I(inode)->i_extra_isize);
 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
 		return 0;
 	}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5083bce2..b7928cd 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3887,7 +3887,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
 
 	err = ext4_mb_load_buddy(sb, group, &e4b);
 	if (err) {
-		ext4_error(sb, "Error loading buddy information for %u", group);
+		ext4_warning(sb, "Error %d loading buddy information for %u",
+			     err, group);
 		put_bh(bitmap_bh);
 		return 0;
 	}
@@ -4044,10 +4045,11 @@ void ext4_discard_preallocations(struct inode *inode)
 		BUG_ON(pa->pa_type != MB_INODE_PA);
 		group = ext4_get_group_number(sb, pa->pa_pstart);
 
-		err = ext4_mb_load_buddy(sb, group, &e4b);
+		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+					     GFP_NOFS|__GFP_NOFAIL);
 		if (err) {
-			ext4_error(sb, "Error loading buddy information for %u",
-					group);
+			ext4_error(sb, "Error %d loading buddy information for %u",
+				   err, group);
 			continue;
 		}
 
@@ -4303,11 +4305,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
 	spin_unlock(&lg->lg_prealloc_lock);
 
 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
+		int err;
 
 		group = ext4_get_group_number(sb, pa->pa_pstart);
-		if (ext4_mb_load_buddy(sb, group, &e4b)) {
-			ext4_error(sb, "Error loading buddy information for %u",
-					group);
+		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+					     GFP_NOFS|__GFP_NOFAIL);
+		if (err) {
+			ext4_error(sb, "Error %d loading buddy information for %u",
+				   err, group);
 			continue;
 		}
 		ext4_lock_group(sb, group);
@@ -5127,8 +5132,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
 
 	ret = ext4_mb_load_buddy(sb, group, &e4b);
 	if (ret) {
-		ext4_error(sb, "Error in loading buddy "
-				"information for %u", group);
+		ext4_warning(sb, "Error %d loading buddy information for %u",
+			     ret, group);
 		return ret;
 	}
 	bitmap = e4b.bd_bitmap;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b81f7d4..404256c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1155,12 +1155,11 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
 static inline int search_dirblock(struct buffer_head *bh,
 				  struct inode *dir,
 				  struct ext4_filename *fname,
-				  const struct qstr *d_name,
 				  unsigned int offset,
 				  struct ext4_dir_entry_2 **res_dir)
 {
 	return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
-			       fname, d_name, offset, res_dir);
+			       fname, offset, res_dir);
 }
 
 /*
@@ -1262,7 +1261,6 @@ static inline bool ext4_match(const struct ext4_filename *fname,
  */
 int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
 		    struct inode *dir, struct ext4_filename *fname,
-		    const struct qstr *d_name,
 		    unsigned int offset, struct ext4_dir_entry_2 **res_dir)
 {
 	struct ext4_dir_entry_2 * de;
@@ -1355,7 +1353,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
 
 	if (ext4_has_inline_data(dir)) {
 		int has_inline_data = 1;
-		ret = ext4_find_inline_entry(dir, &fname, d_name, res_dir,
+		ret = ext4_find_inline_entry(dir, &fname, res_dir,
 					     &has_inline_data);
 		if (has_inline_data) {
 			if (inlined)
@@ -1447,7 +1445,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
 			goto next;
 		}
 		set_buffer_verified(bh);
-		i = search_dirblock(bh, dir, &fname, d_name,
+		i = search_dirblock(bh, dir, &fname,
 			    block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
 		if (i == 1) {
 			EXT4_I(dir)->i_dir_start_lookup = block;
@@ -1488,7 +1486,6 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 {
 	struct super_block * sb = dir->i_sb;
 	struct dx_frame frames[2], *frame;
-	const struct qstr *d_name = fname->usr_fname;
 	struct buffer_head *bh;
 	ext4_lblk_t block;
 	int retval;
@@ -1505,7 +1502,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 		if (IS_ERR(bh))
 			goto errout;
 
-		retval = search_dirblock(bh, dir, fname, d_name,
+		retval = search_dirblock(bh, dir, fname,
 					 block << EXT4_BLOCK_SIZE_BITS(sb),
 					 res_dir);
 		if (retval == 1)
@@ -1530,7 +1527,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 
 	bh = NULL;
 errout:
-	dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
+	dxtrace(printk(KERN_DEBUG "%s not found\n", fname->usr_fname->name));
 success:
 	dx_release(frames);
 	return bh;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 1a82138..c2fce44 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -85,7 +85,7 @@ static void ext4_finish_bio(struct bio *bio)
 		}
 #endif
 
-		if (bio->bi_error) {
+		if (bio->bi_status) {
 			SetPageError(page);
 			mapping_set_error(page->mapping, -EIO);
 		}
@@ -104,7 +104,7 @@ static void ext4_finish_bio(struct bio *bio)
 				continue;
 			}
 			clear_buffer_async_write(bh);
-			if (bio->bi_error)
+			if (bio->bi_status)
 				buffer_io_error(bh);
 		} while ((bh = bh->b_this_page) != head);
 		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
@@ -303,24 +303,25 @@ static void ext4_end_bio(struct bio *bio)
 		      bdevname(bio->bi_bdev, b),
 		      (long long) bio->bi_iter.bi_sector,
 		      (unsigned) bio_sectors(bio),
-		      bio->bi_error)) {
+		      bio->bi_status)) {
 		ext4_finish_bio(bio);
 		bio_put(bio);
 		return;
 	}
 	bio->bi_end_io = NULL;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		struct inode *inode = io_end->inode;
 
 		ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
 			     "(offset %llu size %ld starting block %llu)",
-			     bio->bi_error, inode->i_ino,
+			     bio->bi_status, inode->i_ino,
 			     (unsigned long long) io_end->offset,
 			     (long) io_end->size,
 			     (unsigned long long)
 			     bi_sector >> (inode->i_blkbits - 9));
-		mapping_set_error(inode->i_mapping, bio->bi_error);
+		mapping_set_error(inode->i_mapping,
+				blk_status_to_errno(bio->bi_status));
 	}
 
 	if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
@@ -349,6 +350,7 @@ void ext4_io_submit(struct ext4_io_submit *io)
 	if (bio) {
 		int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
 				  REQ_SYNC : 0;
+		io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
 		bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
 		submit_bio(io->io_bio);
 	}
@@ -396,6 +398,7 @@ static int io_submit_add_bh(struct ext4_io_submit *io,
 		ret = io_submit_init_bio(io, bh);
 		if (ret)
 			return ret;
+		io->io_bio->bi_write_hint = inode->i_write_hint;
 	}
 	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
 	if (ret != bh->b_size)
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index a81b829..40a5497 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -73,7 +73,7 @@ static void mpage_end_io(struct bio *bio)
 	int i;
 
 	if (ext4_bio_encrypted(bio)) {
-		if (bio->bi_error) {
+		if (bio->bi_status) {
 			fscrypt_release_ctx(bio->bi_private);
 		} else {
 			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
@@ -83,7 +83,7 @@ static void mpage_end_io(struct bio *bio)
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
 
-		if (!bio->bi_error) {
+		if (!bio->bi_status) {
 			SetPageUptodate(page);
 		} else {
 			ClearPageUptodate(page);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0b177da..9006cb5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -848,14 +848,9 @@ static inline void ext4_quota_off_umount(struct super_block *sb)
 {
 	int type;
 
-	if (ext4_has_feature_quota(sb)) {
-		dquot_disable(sb, -1,
-			      DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-	} else {
-		/* Use our quota_off function to clear inode flags etc. */
-		for (type = 0; type < EXT4_MAXQUOTAS; type++)
-			ext4_quota_off(sb, type);
-	}
+	/* Use our quota_off function to clear inode flags etc. */
+	for (type = 0; type < EXT4_MAXQUOTAS; type++)
+		ext4_quota_off(sb, type);
 }
 #else
 static inline void ext4_quota_off_umount(struct super_block *sb)
@@ -1179,6 +1174,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
 		return res;
 	}
 
+	res = dquot_initialize(inode);
+	if (res)
+		return res;
 retry:
 	handle = ext4_journal_start(inode, EXT4_HT_MISC,
 			ext4_jbd2_credits_xattr(inode));
@@ -3952,7 +3950,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 		sb->s_qcop = &ext4_qctl_operations;
 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
 #endif
-	memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
+	memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
 
 	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
 	mutex_init(&sbi->s_orphan_lock);
@@ -5485,7 +5483,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
 		goto out;
 
 	err = dquot_quota_off(sb, type);
-	if (err)
+	if (err || ext4_has_feature_quota(sb))
 		goto out_put;
 
 	inode_lock(inode);
@@ -5505,6 +5503,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
 out_unlock:
 	inode_unlock(inode);
 out_put:
+	lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
 	iput(inode);
 	return err;
 out:
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 8fb7ce1..5d3c253 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -888,6 +888,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
 			else {
 				u32 ref;
 
+				WARN_ON_ONCE(dquot_initialize_needed(inode));
+
 				/* The old block is released after updating
 				   the inode. */
 				error = dquot_alloc_block(inode,
@@ -954,6 +956,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
 			/* We need to allocate a new block */
 			ext4_fsblk_t goal, block;
 
+			WARN_ON_ONCE(dquot_initialize_needed(inode));
+
 			goal = ext4_group_first_block_no(sb,
 						EXT4_I(inode)->i_block_group);
 
@@ -1166,6 +1170,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
 		return -EINVAL;
 	if (strlen(name) > 255)
 		return -ERANGE;
+
 	ext4_write_lock_xattr(inode, &no_expand);
 
 	error = ext4_reserve_inode_write(handle, inode, &is.iloc);
@@ -1267,6 +1272,9 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
 	int error, retries = 0;
 	int credits = ext4_jbd2_credits_xattr(inode);
 
+	error = dquot_initialize(inode);
+	if (error)
+		return error;
 retry:
 	handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
 	if (IS_ERR(handle)) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7c0f6bd..36fe820 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -58,12 +58,12 @@ static void f2fs_read_end_io(struct bio *bio)
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 	if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
 		f2fs_show_injection_info(FAULT_IO);
-		bio->bi_error = -EIO;
+		bio->bi_status = BLK_STS_IOERR;
 	}
 #endif
 
 	if (f2fs_bio_encrypted(bio)) {
-		if (bio->bi_error) {
+		if (bio->bi_status) {
 			fscrypt_release_ctx(bio->bi_private);
 		} else {
 			fscrypt_decrypt_bio_pages(bio->bi_private, bio);
@@ -74,7 +74,7 @@ static void f2fs_read_end_io(struct bio *bio)
 	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
 
-		if (!bio->bi_error) {
+		if (!bio->bi_status) {
 			if (!PageUptodate(page))
 				SetPageUptodate(page);
 		} else {
@@ -102,14 +102,14 @@ static void f2fs_write_end_io(struct bio *bio)
 			unlock_page(page);
 			mempool_free(page, sbi->write_io_dummy);
 
-			if (unlikely(bio->bi_error))
+			if (unlikely(bio->bi_status))
 				f2fs_stop_checkpoint(sbi, true);
 			continue;
 		}
 
 		fscrypt_pullback_bio_page(&page, true);
 
-		if (unlikely(bio->bi_error)) {
+		if (unlikely(bio->bi_status)) {
 			mapping_set_error(page->mapping, -EIO);
 			f2fs_stop_checkpoint(sbi, true);
 		}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 2185c7a..fd2e651 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1078,6 +1078,7 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
 {
 	SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
 	u32 *ctx = (u32 *)shash_desc_ctx(shash);
+	u32 retval;
 	int err;
 
 	shash->tfm = sbi->s_chksum_driver;
@@ -1087,7 +1088,9 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
 	err = crypto_shash_update(shash, address, length);
 	BUG_ON(err);
 
-	return *ctx;
+	retval = *ctx;
+	barrier_data(ctx);
+	return retval;
 }
 
 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 9684585..ea9f455 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -749,7 +749,7 @@ static void f2fs_submit_discard_endio(struct bio *bio)
 {
 	struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
 
-	dc->error = bio->bi_error;
+	dc->error = blk_status_to_errno(bio->bi_status);
 	dc->state = D_DONE;
 	complete(&dc->wait);
 	bio_put(bio);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 83355ec..0b89b0b 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1937,7 +1937,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 	sb->s_time_gran = 1;
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
-	memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
+	memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
 
 	/* init f2fs-specific super block info */
 	sbi->valid_super_block = valid_super_block;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index f4e7267..ed051f8 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -243,6 +243,67 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
 }
 #endif
 
+static bool rw_hint_valid(enum rw_hint hint)
+{
+	switch (hint) {
+	case RWF_WRITE_LIFE_NOT_SET:
+	case RWH_WRITE_LIFE_NONE:
+	case RWH_WRITE_LIFE_SHORT:
+	case RWH_WRITE_LIFE_MEDIUM:
+	case RWH_WRITE_LIFE_LONG:
+	case RWH_WRITE_LIFE_EXTREME:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static long fcntl_rw_hint(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	struct inode *inode = file_inode(file);
+	u64 *argp = (u64 __user *)arg;
+	enum rw_hint hint;
+	u64 h;
+
+	switch (cmd) {
+	case F_GET_FILE_RW_HINT:
+		h = file_write_hint(file);
+		if (copy_to_user(argp, &h, sizeof(*argp)))
+			return -EFAULT;
+		return 0;
+	case F_SET_FILE_RW_HINT:
+		if (copy_from_user(&h, argp, sizeof(h)))
+			return -EFAULT;
+		hint = (enum rw_hint) h;
+		if (!rw_hint_valid(hint))
+			return -EINVAL;
+
+		spin_lock(&file->f_lock);
+		file->f_write_hint = hint;
+		spin_unlock(&file->f_lock);
+		return 0;
+	case F_GET_RW_HINT:
+		h = inode->i_write_hint;
+		if (copy_to_user(argp, &h, sizeof(*argp)))
+			return -EFAULT;
+		return 0;
+	case F_SET_RW_HINT:
+		if (copy_from_user(&h, argp, sizeof(h)))
+			return -EFAULT;
+		hint = (enum rw_hint) h;
+		if (!rw_hint_valid(hint))
+			return -EINVAL;
+
+		inode_lock(inode);
+		inode->i_write_hint = hint;
+		inode_unlock(inode);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
 		struct file *filp)
 {
@@ -337,6 +398,12 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
 	case F_GET_SEALS:
 		err = shmem_fcntl(filp, cmd, arg);
 		break;
+	case F_GET_RW_HINT:
+	case F_SET_RW_HINT:
+	case F_GET_FILE_RW_HINT:
+	case F_SET_FILE_RW_HINT:
+		err = fcntl_rw_hint(filp, cmd, arg);
+		break;
 	default:
 		break;
 	}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 63ee294..8b426f8 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2052,11 +2052,13 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
 }
 
 /**
- *	__mark_inode_dirty -	internal function
- *	@inode: inode to mark
- *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
- *	Mark an inode as dirty. Callers should use mark_inode_dirty or
- *  	mark_inode_dirty_sync.
+ * __mark_inode_dirty -	internal function
+ *
+ * @inode: inode to mark
+ * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
+ *
+ * Mark an inode as dirty. Callers should use mark_inode_dirty or
+ * mark_inode_dirty_sync.
  *
  * Put the inode on the super block's dirty list.
  *
diff --git a/fs/fs_pin.c b/fs/fs_pin.c
index 611b540..e747b3d 100644
--- a/fs/fs_pin.c
+++ b/fs/fs_pin.c
@@ -34,7 +34,7 @@ void pin_insert(struct fs_pin *pin, struct vfsmount *m)
 
 void pin_kill(struct fs_pin *p)
 {
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	if (!p) {
 		rcu_read_unlock();
@@ -61,7 +61,7 @@ void pin_kill(struct fs_pin *p)
 		rcu_read_unlock();
 		schedule();
 		rcu_read_lock();
-		if (likely(list_empty(&wait.task_list)))
+		if (likely(list_empty(&wait.entry)))
 			break;
 		/* OK, we know p couldn't have been freed yet */
 		spin_lock_irq(&p->wait.lock);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index b7cf65d..aa3d445 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -815,7 +815,6 @@ struct gfs2_sbd {
 	atomic_t sd_log_in_flight;
 	struct bio *sd_log_bio;
 	wait_queue_head_t sd_log_flush_wait;
-	int sd_log_error;
 
 	atomic_t sd_reserving_log;
 	wait_queue_head_t sd_reserving_log_wait;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f865b96..d2955da 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -659,7 +659,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 	struct gfs2_log_header *lh;
 	unsigned int tail;
 	u32 hash;
-	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
+	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
 	lh = page_address(page);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index b1f9144..885d36e 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -170,7 +170,7 @@ static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
  */
 
 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
-				  int error)
+				  blk_status_t error)
 {
 	struct buffer_head *bh, *next;
 	struct page *page = bvec->bv_page;
@@ -209,15 +209,13 @@ static void gfs2_end_log_write(struct bio *bio)
 	struct page *page;
 	int i;
 
-	if (bio->bi_error) {
-		sdp->sd_log_error = bio->bi_error;
-		fs_err(sdp, "Error %d writing to log\n", bio->bi_error);
-	}
+	if (bio->bi_status)
+		fs_err(sdp, "Error %d writing to log\n", bio->bi_status);
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		page = bvec->bv_page;
 		if (page_has_buffers(page))
-			gfs2_end_log_write_bh(sdp, bvec, bio->bi_error);
+			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
 		else
 			mempool_free(page, gfs2_page_pool);
 	}
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 663ffc1..fabe1614f 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -201,7 +201,7 @@ static void gfs2_meta_read_endio(struct bio *bio)
 		do {
 			struct buffer_head *next = bh->b_this_page;
 			len -= bh->b_size;
-			bh->b_end_io(bh, !bio->bi_error);
+			bh->b_end_io(bh, !bio->bi_status);
 			bh = next;
 		} while (bh && len);
 	}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ed67548..e76058d 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -176,10 +176,10 @@ static void end_bio_io_page(struct bio *bio)
 {
 	struct page *page = bio->bi_private;
 
-	if (!bio->bi_error)
+	if (!bio->bi_status)
 		SetPageUptodate(page);
 	else
-		pr_warn("error %d reading superblock\n", bio->bi_error);
+		pr_warn("error %d reading superblock\n", bio->bi_status);
 	unlock_page(page);
 }
 
@@ -203,7 +203,7 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
 
 	memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
 	memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
-	memcpy(s->s_uuid, str->sb_uuid, 16);
+	memcpy(&s->s_uuid, str->sb_uuid, 16);
 }
 
 /**
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 7a51534..e77bc52 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -71,25 +71,14 @@ static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
 	return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
 }
 
-static int gfs2_uuid_valid(const u8 *uuid)
-{
-	int i;
-
-	for (i = 0; i < 16; i++) {
-		if (uuid[i])
-			return 1;
-	}
-	return 0;
-}
-
 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
 {
 	struct super_block *s = sdp->sd_vfs;
-	const u8 *uuid = s->s_uuid;
+
 	buf[0] = '\0';
-	if (!gfs2_uuid_valid(uuid))
+	if (uuid_is_null(&s->s_uuid))
 		return 0;
-	return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid);
+	return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
 }
 
 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
@@ -712,14 +701,13 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
 {
 	struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
 	struct super_block *s = sdp->sd_vfs;
-	const u8 *uuid = s->s_uuid;
 
 	add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
 	add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
 	if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
 		add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
-	if (gfs2_uuid_valid(uuid))
-		add_uevent_var(env, "UUID=%pUB", uuid);
+	if (!uuid_is_null(&s->s_uuid))
+		add_uevent_var(env, "UUID=%pUB", &s->s_uuid);
 	return 0;
 }
 
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index dde8613..d44f545 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -200,7 +200,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)))
 			return addr;
 	}
 
diff --git a/fs/inode.c b/fs/inode.c
index db59147..ab3b9a7 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -146,6 +146,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
 	i_gid_write(inode, 0);
 	atomic_set(&inode->i_writecount, 0);
 	inode->i_size = 0;
+	inode->i_write_hint = WRITE_LIFE_NOT_SET;
 	inode->i_blocks = 0;
 	inode->i_bytes = 0;
 	inode->i_generation = 0;
@@ -1891,11 +1892,11 @@ static void __wait_on_freeing_inode(struct inode *inode)
 	wait_queue_head_t *wq;
 	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
 	wq = bit_waitqueue(&inode->i_state, __I_NEW);
-	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+	prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
 	spin_unlock(&inode->i_lock);
 	spin_unlock(&inode_hash_lock);
 	schedule();
-	finish_wait(wq, &wait.wait);
+	finish_wait(wq, &wait.wq_entry);
 	spin_lock(&inode_hash_lock);
 }
 
@@ -2038,11 +2039,11 @@ static void __inode_dio_wait(struct inode *inode)
 	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
 
 	do {
-		prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
+		prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
 		if (atomic_read(&inode->i_dio_count))
 			schedule();
 	} while (atomic_read(&inode->i_dio_count));
-	finish_wait(wq, &q.wait);
+	finish_wait(wq, &q.wq_entry);
 }
 
 /**
diff --git a/fs/iomap.c b/fs/iomap.c
index 4b10892..fa6cd5b 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -672,8 +672,8 @@ static void iomap_dio_bio_end_io(struct bio *bio)
 	struct iomap_dio *dio = bio->bi_private;
 	bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
 
-	if (bio->bi_error)
-		iomap_dio_set_error(dio, bio->bi_error);
+	if (bio->bi_status)
+		iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
 
 	if (atomic_dec_and_test(&dio->ref)) {
 		if (is_sync_kiocb(dio->iocb)) {
@@ -793,6 +793,7 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
 		bio->bi_bdev = iomap->bdev;
 		bio->bi_iter.bi_sector =
 			iomap->blkno + ((pos - iomap->offset) >> 9);
+		bio->bi_write_hint = dio->iocb->ki_hint;
 		bio->bi_private = dio;
 		bio->bi_end_io = iomap_dio_bio_end_io;
 
@@ -881,6 +882,14 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
 		flags |= IOMAP_WRITE;
 	}
 
+	if (iocb->ki_flags & IOCB_NOWAIT) {
+		if (filemap_range_has_page(mapping, start, end)) {
+			ret = -EAGAIN;
+			goto out_free_dio;
+		}
+		flags |= IOMAP_NOWAIT;
+	}
+
 	ret = filemap_write_and_wait_range(mapping, start, end);
 	if (ret)
 		goto out_free_dio;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index ebad342..7d5ef3b 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2579,10 +2579,10 @@ void jbd2_journal_release_jbd_inode(journal_t *journal,
 		wait_queue_head_t *wq;
 		DEFINE_WAIT_BIT(wait, &jinode->i_flags, __JI_COMMIT_RUNNING);
 		wq = bit_waitqueue(&jinode->i_flags, __JI_COMMIT_RUNNING);
-		prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
 		spin_unlock(&journal->j_list_lock);
 		schedule();
-		finish_wait(wq, &wait.wait);
+		finish_wait(wq, &wait.wq_entry);
 		goto restart;
 	}
 
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 9ee4832..8b08044 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -409,25 +409,6 @@ static handle_t *new_handle(int nblocks)
 	return handle;
 }
 
-/**
- * handle_t *jbd2_journal_start() - Obtain a new handle.
- * @journal: Journal to start transaction on.
- * @nblocks: number of block buffer we might modify
- *
- * We make sure that the transaction can guarantee at least nblocks of
- * modified buffers in the log.  We block until the log can guarantee
- * that much space. Additionally, if rsv_blocks > 0, we also create another
- * handle with rsv_blocks reserved blocks in the journal. This handle is
- * is stored in h_rsv_handle. It is not attached to any particular transaction
- * and thus doesn't block transaction commit. If the caller uses this reserved
- * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
- * on the parent handle will dispose the reserved one. Reserved handle has to
- * be converted to a normal handle using jbd2_journal_start_reserved() before
- * it can be used.
- *
- * Return a pointer to a newly allocated handle, or an ERR_PTR() value
- * on failure.
- */
 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
 			      gfp_t gfp_mask, unsigned int type,
 			      unsigned int line_no)
@@ -478,6 +459,25 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
 EXPORT_SYMBOL(jbd2__journal_start);
 
 
+/**
+ * handle_t *jbd2_journal_start() - Obtain a new handle.
+ * @journal: Journal to start transaction on.
+ * @nblocks: number of block buffer we might modify
+ *
+ * We make sure that the transaction can guarantee at least nblocks of
+ * modified buffers in the log.  We block until the log can guarantee
+ * that much space. Additionally, if rsv_blocks > 0, we also create another
+ * handle with rsv_blocks reserved blocks in the journal. This handle is
+ * is stored in h_rsv_handle. It is not attached to any particular transaction
+ * and thus doesn't block transaction commit. If the caller uses this reserved
+ * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
+ * on the parent handle will dispose the reserved one. Reserved handle has to
+ * be converted to a normal handle using jbd2_journal_start_reserved() before
+ * it can be used.
+ *
+ * Return a pointer to a newly allocated handle, or an ERR_PTR() value
+ * on failure.
+ */
 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
 {
 	return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0);
@@ -680,6 +680,12 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
 
 	rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
 	handle->h_buffer_credits = nblocks;
+	/*
+	 * Restore the original nofs context because the journal restart
+	 * is basically the same thing as journal stop and start.
+	 * start_this_handle will start a new nofs context.
+	 */
+	memalloc_nofs_restore(handle->saved_alloc_context);
 	ret = start_this_handle(journal, handle, gfp_mask);
 	return ret;
 }
@@ -1066,10 +1072,10 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
  * @handle: transaction to add buffer modifications to
  * @bh:     bh to be used for metadata writes
  *
- * Returns an error code or 0 on success.
+ * Returns: error code or 0 on success.
  *
  * In full data journalling mode the buffer may be of type BJ_AsyncData,
- * because we're write()ing a buffer which is also part of a shared mapping.
+ * because we're ``write()ing`` a buffer which is also part of a shared mapping.
  */
 
 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index bb1da1f..a21f0e9 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2205,7 +2205,7 @@ static void lbmIODone(struct bio *bio)
 
 	bp->l_flag |= lbmDONE;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		bp->l_flag |= lbmERROR;
 
 		jfs_err("lbmIODone: I/O error in JFS log");
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 489aaa1..ce93db3 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -280,7 +280,7 @@ static void metapage_read_end_io(struct bio *bio)
 {
 	struct page *page = bio->bi_private;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		printk(KERN_ERR "metapage_read_end_io: I/O error\n");
 		SetPageError(page);
 	}
@@ -337,7 +337,7 @@ static void metapage_write_end_io(struct bio *bio)
 
 	BUG_ON(!PagePrivate(page));
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		printk(KERN_ERR "metapage_write_end_io: I/O error\n");
 		SetPageError(page);
 	}
diff --git a/fs/mpage.c b/fs/mpage.c
index baff8f8..2e4c41c 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -50,7 +50,8 @@ static void mpage_end_io(struct bio *bio)
 
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
-		page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
+		page_endio(page, op_is_write(bio_op(bio)),
+				blk_status_to_errno(bio->bi_status));
 	}
 
 	bio_put(bio);
@@ -344,6 +345,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
  *
  * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
  * submitted in the following order:
+ *
  * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
  *
  * because the indirect block has to be read to get the mappings of blocks
@@ -614,6 +616,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
 			goto confused;
 
 		wbc_init_bio(wbc, bio);
+		bio->bi_write_hint = inode->i_write_hint;
 	}
 
 	/*
diff --git a/fs/namei.c b/fs/namei.c
index 6571a5f..8bacc39 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -4332,6 +4332,7 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
  * The worst of all namespace operations - renaming directory. "Perverted"
  * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
  * Problems:
+ *
  *	a) we can get into loop creation.
  *	b) race potential - two innocent renames can create a loop together.
  *	   That's where 4.4 screws up. Current fix: serialization on
diff --git a/fs/namespace.c b/fs/namespace.c
index 8bd3e4d..5a44384 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3488,6 +3488,8 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
 		return err;
 	}
 
+	put_mnt_ns(old_mnt_ns);
+
 	/* Update the pwd and root */
 	set_fs_pwd(fs, &root);
 	set_fs_root(fs, &root);
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 0ca370d..d8863a8 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -188,7 +188,7 @@ static void bl_end_io_read(struct bio *bio)
 {
 	struct parallel_io *par = bio->bi_private;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		struct nfs_pgio_header *header = par->data;
 
 		if (!header->pnfs_error)
@@ -319,7 +319,7 @@ static void bl_end_io_write(struct bio *bio)
 	struct parallel_io *par = bio->bi_private;
 	struct nfs_pgio_header *header = par->data;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		if (!header->pnfs_error)
 			header->pnfs_error = -EIO;
 		pnfs_set_lo_fail(header->lseg);
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index c14758e..390ac9c 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -753,7 +753,6 @@ static void nfs4_callback_free_slot(struct nfs4_session *session,
 	 * A single slot, so highest used slotid is either 0 or -1
 	 */
 	nfs4_free_slot(tbl, slot);
-	nfs4_slot_tbl_drain_complete(tbl);
 	spin_unlock(&tbl->slot_tbl_lock);
 }
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 32ccd77..2ac00bf 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1946,29 +1946,6 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 }
 EXPORT_SYMBOL_GPL(nfs_link);
 
-static void
-nfs_complete_rename(struct rpc_task *task, struct nfs_renamedata *data)
-{
-	struct dentry *old_dentry = data->old_dentry;
-	struct dentry *new_dentry = data->new_dentry;
-	struct inode *old_inode = d_inode(old_dentry);
-	struct inode *new_inode = d_inode(new_dentry);
-
-	nfs_mark_for_revalidate(old_inode);
-
-	switch (task->tk_status) {
-	case 0:
-		if (new_inode != NULL)
-			nfs_drop_nlink(new_inode);
-		d_move(old_dentry, new_dentry);
-		nfs_set_verifier(new_dentry,
-					nfs_save_change_attribute(data->new_dir));
-		break;
-	case -ENOENT:
-		nfs_dentry_handle_enoent(old_dentry);
-	}
-}
-
 /*
  * RENAME
  * FIXME: Some nfsds, like the Linux user space nfsd, may generate a
@@ -1999,7 +1976,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 {
 	struct inode *old_inode = d_inode(old_dentry);
 	struct inode *new_inode = d_inode(new_dentry);
-	struct dentry *dentry = NULL;
+	struct dentry *dentry = NULL, *rehash = NULL;
 	struct rpc_task *task;
 	int error = -EBUSY;
 
@@ -2022,8 +1999,10 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 		 * To prevent any new references to the target during the
 		 * rename, we unhash the dentry in advance.
 		 */
-		if (!d_unhashed(new_dentry))
+		if (!d_unhashed(new_dentry)) {
 			d_drop(new_dentry);
+			rehash = new_dentry;
+		}
 
 		if (d_count(new_dentry) > 2) {
 			int err;
@@ -2040,6 +2019,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 				goto out;
 
 			new_dentry = dentry;
+			rehash = NULL;
 			new_inode = NULL;
 		}
 	}
@@ -2048,8 +2028,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	if (new_inode != NULL)
 		NFS_PROTO(new_inode)->return_delegation(new_inode);
 
-	task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
-					nfs_complete_rename);
+	task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
 	if (IS_ERR(task)) {
 		error = PTR_ERR(task);
 		goto out;
@@ -2059,9 +2038,27 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	if (error == 0)
 		error = task->tk_status;
 	rpc_put_task(task);
+	nfs_mark_for_revalidate(old_inode);
 out:
+	if (rehash)
+		d_rehash(rehash);
 	trace_nfs_rename_exit(old_dir, old_dentry,
 			new_dir, new_dentry, error);
+	if (!error) {
+		if (new_inode != NULL)
+			nfs_drop_nlink(new_inode);
+		/*
+		 * The d_move() should be here instead of in an async RPC completion
+		 * handler because we need the proper locks to move the dentry.  If
+		 * we're interrupted by a signal, the async RPC completion handler
+		 * should mark the directories for revalidation.
+		 */
+		d_move(old_dentry, new_dentry);
+		nfs_set_verifier(new_dentry,
+					nfs_save_change_attribute(new_dir));
+	} else if (error == -ENOENT)
+		nfs_dentry_handle_enoent(old_dentry);
+
 	/* new dentry created? */
 	if (dentry)
 		dput(dentry);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index f5714ee..23542dc 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -454,6 +454,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 			goto out_err_free;
 
 		/* fh */
+		rc = -EIO;
 		p = xdr_inline_decode(&stream, 4);
 		if (!p)
 			goto out_err_free;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e9b4c33..8701d76 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -7,6 +7,7 @@
 #include <linux/security.h>
 #include <linux/crc32.h>
 #include <linux/nfs_page.h>
+#include <linux/wait_bit.h>
 
 #define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
 
@@ -398,7 +399,6 @@ extern struct file_system_type nfs4_referral_fs_type;
 bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
 struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
 			struct nfs_subversion *);
-void nfs_initialise_sb(struct super_block *);
 int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
 int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
 struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
@@ -458,7 +458,6 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
 
 /* super.c */
-void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
 void nfs_umount_begin(struct super_block *);
 int  nfs_statfs(struct dentry *, struct kstatfs *);
 int  nfs_show_options(struct seq_file *, struct dentry *);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 1a224a3..e5686be 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -246,7 +246,7 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
 
 	devname = nfs_devname(dentry, page, PAGE_SIZE);
 	if (IS_ERR(devname))
-		mnt = (struct vfsmount *)devname;
+		mnt = ERR_CAST(devname);
 	else
 		mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata);
 
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 929d09a..319a47d 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -177,7 +177,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
 	if (status)
 		goto out;
 
-	if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
+	if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
 				    &res->commit_res.verf->verifier)) {
 		status = -EAGAIN;
 		goto out;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 692a7a8..66776f0 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -582,7 +582,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
 			 */
 			nfs4_schedule_path_down_recovery(pos);
 		default:
-			spin_lock(&nn->nfs_client_lock);
 			goto out;
 		}
 
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c08c46a..98b0b66 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2589,7 +2589,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
 
 	/* Except MODE, it seems harmless of setting twice. */
 	if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
-		attrset[1] & FATTR4_WORD1_MODE)
+		(attrset[1] & FATTR4_WORD1_MODE ||
+		 attrset[2] & FATTR4_WORD2_MODE_UMASK))
 		sattr->ia_valid &= ~ATTR_MODE;
 
 	if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
@@ -6372,7 +6373,7 @@ struct nfs4_lock_waiter {
 };
 
 static int
-nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key)
+nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
 {
 	int ret;
 	struct cb_notify_lock_args *cbnl = key;
@@ -6415,7 +6416,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
 					   .inode = state->inode,
 					   .owner = &owner,
 					   .notified = false };
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	/* Don't bother with waitqueue if we don't expect a callback */
 	if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
@@ -8416,6 +8417,7 @@ static void nfs4_layoutget_release(void *calldata)
 	size_t max_pages = max_response_pages(server);
 
 	dprintk("--> %s\n", __func__);
+	nfs4_sequence_free_slot(&lgp->res.seq_res);
 	nfs4_free_pages(lgp->args.layout.pages, max_pages);
 	pnfs_put_layout_hdr(NFS_I(inode)->layout);
 	put_nfs_open_context(lgp->args.ctx);
@@ -8490,7 +8492,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
 	/* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
 	if (status == 0 && lgp->res.layoutp->len)
 		lseg = pnfs_layout_process(lgp);
-	nfs4_sequence_free_slot(&lgp->res.seq_res);
 	rpc_put_task(task);
 	dprintk("<-- %s status=%d\n", __func__, status);
 	if (status)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index b34de03..cbf82b0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2134,6 +2134,8 @@ int nfs4_discover_server_trunking(struct nfs_client *clp,
 	put_rpccred(cred);
 	switch (status) {
 	case 0:
+	case -EINTR:
+	case -ERESTARTSYS:
 		break;
 	case -ETIMEDOUT:
 		if (clnt->cl_softrtry)
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index adc6ec2..c383d09 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2094,12 +2094,26 @@ pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
 
+/*
+ * Check for any intersection between the request and the pgio->pg_lseg,
+ * and if none, put this pgio->pg_lseg away.
+ */
+static void
+pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+	if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
+		pnfs_put_lseg(pgio->pg_lseg);
+		pgio->pg_lseg = NULL;
+	}
+}
+
 void
 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
 {
 	u64 rd_size = req->wb_bytes;
 
 	pnfs_generic_pg_check_layout(pgio);
+	pnfs_generic_pg_check_range(pgio, req);
 	if (pgio->pg_lseg == NULL) {
 		if (pgio->pg_dreq == NULL)
 			rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
@@ -2131,6 +2145,7 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
 			   struct nfs_page *req, u64 wb_size)
 {
 	pnfs_generic_pg_check_layout(pgio);
+	pnfs_generic_pg_check_range(pgio, req);
 	if (pgio->pg_lseg == NULL) {
 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
 						   req->wb_context,
@@ -2191,16 +2206,10 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
 		seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
 				     pgio->pg_lseg->pls_range.length);
 		req_start = req_offset(req);
-		WARN_ON_ONCE(req_start >= seg_end);
+
 		/* start of request is past the last byte of this segment */
-		if (req_start >= seg_end) {
-			/* reference the new lseg */
-			if (pgio->pg_ops->pg_cleanup)
-				pgio->pg_ops->pg_cleanup(pgio);
-			if (pgio->pg_ops->pg_init)
-				pgio->pg_ops->pg_init(pgio, req);
+		if (req_start >= seg_end)
 			return 0;
-		}
 
 		/* adjust 'size' iff there are fewer bytes left in the
 		 * segment than what nfs_generic_pg_test returned */
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 2d05b75..99731e3 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -593,6 +593,16 @@ pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
 	return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2);
 }
 
+static inline bool
+pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req)
+{
+	u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length);
+	u64 req_last = req_offset(req) + req->wb_bytes;
+
+	return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last,
+				req_offset(req), req_last);
+}
+
 extern unsigned int layoutstats_timer;
 
 #ifdef NFS_DEBUG
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2f3822a..c5334c0 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2301,7 +2301,7 @@ EXPORT_SYMBOL_GPL(nfs_remount);
 /*
  * Initialise the common bits of the superblock
  */
-inline void nfs_initialise_sb(struct super_block *sb)
+static void nfs_initialise_sb(struct super_block *sb)
 {
 	struct nfs_server *server = NFS_SB(sb);
 
@@ -2348,7 +2348,8 @@ EXPORT_SYMBOL_GPL(nfs_fill_super);
 /*
  * Finish setting up a cloned NFS2/3/4 superblock
  */
-void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
+static void nfs_clone_super(struct super_block *sb,
+			    struct nfs_mount_info *mount_info)
 {
 	const struct super_block *old_sb = mount_info->cloned->sb;
 	struct nfs_server *server = NFS_SB(sb);
@@ -2544,10 +2545,25 @@ EXPORT_SYMBOL_GPL(nfs_set_sb_security);
 int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
 			  struct nfs_mount_info *mount_info)
 {
+	int error;
+	unsigned long kflags = 0, kflags_out = 0;
+
 	/* clone any lsm security options from the parent to the new sb */
 	if (d_inode(mntroot)->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
 		return -ESTALE;
-	return security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
+
+	if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL)
+		kflags |= SECURITY_LSM_NATIVE_LABELS;
+
+	error = security_sb_clone_mnt_opts(mount_info->cloned->sb, s, kflags,
+			&kflags_out);
+	if (error)
+		return error;
+
+	if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL &&
+		!(kflags_out & SECURITY_LSM_NATIVE_LABELS))
+		NFS_SB(s)->caps &= ~NFS_CAP_SECURITY_LABEL;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(nfs_clone_sb_security);
 
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index fb5213a..c862c24 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -219,6 +219,9 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev,
 	u8 *buf, *d, type, assoc;
 	int error;
 
+	if (WARN_ON_ONCE(!blk_queue_scsi_passthrough(q)))
+		return -EINVAL;
+
 	buf = kzalloc(bufflen, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
@@ -229,7 +232,6 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev,
 		goto out_free_buf;
 	}
 	req = scsi_req(rq);
-	scsi_req_init(rq);
 
 	error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL);
 	if (error)
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index e71f11b..3bc08c3 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -486,7 +486,7 @@ secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
 #endif
 
 static inline int
-uuid_parse(char **mesg, char *buf, unsigned char **puuid)
+nfsd_uuid_parse(char **mesg, char *buf, unsigned char **puuid)
 {
 	int len;
 
@@ -586,7 +586,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
 			if (strcmp(buf, "fsloc") == 0)
 				err = fsloc_parse(&mesg, buf, &exp.ex_fslocs);
 			else if (strcmp(buf, "uuid") == 0)
-				err = uuid_parse(&mesg, buf, &exp.ex_uuid);
+				err = nfsd_uuid_parse(&mesg, buf, &exp.ex_uuid);
 			else if (strcmp(buf, "secinfo") == 0)
 				err = secinfo_parse(&mesg, buf, &exp);
 			else
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 12feac6..4523346 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -334,11 +334,8 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
 	if (!p)
 		return 0;
 	p = xdr_decode_hyper(p, &args->offset);
+
 	args->count = ntohl(*p++);
-
-	if (!xdr_argsize_check(rqstp, p))
-		return 0;
-
 	len = min(args->count, max_blocksize);
 
 	/* set up the kvec */
@@ -352,7 +349,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
 		v++;
 	}
 	args->vlen = v;
-	return 1;
+	return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -544,11 +541,9 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p,
 	p = decode_fh(p, &args->fh);
 	if (!p)
 		return 0;
-	if (!xdr_argsize_check(rqstp, p))
-		return 0;
 	args->buffer = page_address(*(rqstp->rq_next_page++));
 
-	return 1;
+	return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -574,14 +569,10 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
 	args->verf   = p; p += 2;
 	args->dircount = ~0;
 	args->count  = ntohl(*p++);
-
-	if (!xdr_argsize_check(rqstp, p))
-		return 0;
-
 	args->count  = min_t(u32, args->count, PAGE_SIZE);
 	args->buffer = page_address(*(rqstp->rq_next_page++));
 
-	return 1;
+	return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -599,9 +590,6 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
 	args->dircount = ntohl(*p++);
 	args->count    = ntohl(*p++);
 
-	if (!xdr_argsize_check(rqstp, p))
-		return 0;
-
 	len = args->count = min(args->count, max_blocksize);
 	while (len > 0) {
 		struct page *p = *(rqstp->rq_next_page++);
@@ -609,7 +597,8 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
 			args->buffer = page_address(p);
 		len -= PAGE_SIZE;
 	}
-	return 1;
+
+	return xdr_argsize_check(rqstp, p);
 }
 
 int
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index c453a19..dadb3bf 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1769,6 +1769,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
 			opdesc->op_get_currentstateid(cstate, &op->u);
 		op->status = opdesc->op_func(rqstp, cstate, &op->u);
 
+		/* Only from SEQUENCE */
+		if (cstate->status == nfserr_replay_cache) {
+			dprintk("%s NFS4.1 replay from cache\n", __func__);
+			status = op->status;
+			goto out;
+		}
 		if (!op->status) {
 			if (opdesc->op_set_currentstateid)
 				opdesc->op_set_currentstateid(cstate, &op->u);
@@ -1779,14 +1785,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
 			if (need_wrongsec_check(rqstp))
 				op->status = check_nfsd_access(current_fh->fh_export, rqstp);
 		}
-
 encode_op:
-		/* Only from SEQUENCE */
-		if (cstate->status == nfserr_replay_cache) {
-			dprintk("%s NFS4.1 replay from cache\n", __func__);
-			status = op->status;
-			goto out;
-		}
 		if (op->status == nfserr_replay_me) {
 			op->replay = &cstate->replay_owner->so_replay;
 			nfsd4_encode_replay(&resp->xdr, op);
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 6a4947a..de07ff6 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -257,9 +257,6 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
 	len = args->count     = ntohl(*p++);
 	p++; /* totalcount - unused */
 
-	if (!xdr_argsize_check(rqstp, p))
-		return 0;
-
 	len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2);
 
 	/* set up somewhere to store response.
@@ -275,7 +272,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p,
 		v++;
 	}
 	args->vlen = v;
-	return 1;
+	return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -365,11 +362,9 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli
 	p = decode_fh(p, &args->fh);
 	if (!p)
 		return 0;
-	if (!xdr_argsize_check(rqstp, p))
-		return 0;
 	args->buffer = page_address(*(rqstp->rq_next_page++));
 
-	return 1;
+	return xdr_argsize_check(rqstp, p);
 }
 
 int
@@ -407,11 +402,9 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p,
 	args->cookie = ntohl(*p++);
 	args->count  = ntohl(*p++);
 	args->count  = min_t(u32, args->count, PAGE_SIZE);
-	if (!xdr_argsize_check(rqstp, p))
-		return 0;
 	args->buffer = page_address(*(rqstp->rq_next_page++));
 
-	return 1;
+	return xdr_argsize_check(rqstp, p);
 }
 
 /*
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 6f87b2a..e73c86d 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -338,7 +338,7 @@ static void nilfs_end_bio_write(struct bio *bio)
 {
 	struct nilfs_segment_buffer *segbuf = bio->bi_private;
 
-	if (bio->bi_error)
+	if (bio->bi_status)
 		atomic_inc(&segbuf->sb_err);
 
 	bio_put(bio);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index febed12..70ded52 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2161,7 +2161,7 @@ void nilfs_flush_segment(struct super_block *sb, ino_t ino)
 }
 
 struct nilfs_segctor_wait_request {
-	wait_queue_t	wq;
+	wait_queue_entry_t	wq;
 	__u32		seq;
 	int		err;
 	atomic_t	done;
@@ -2206,8 +2206,7 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
 	unsigned long flags;
 
 	spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
-	list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
-				 wq.task_list) {
+	list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
 		if (!atomic_read(&wrq->done) &&
 		    nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
 			wrq->err = err;
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 3582583..4690cd7 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -159,7 +159,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
 					PTR_ERR(dent_inode));
 		kfree(name);
 		/* Return the error code. */
-		return (struct dentry *)dent_inode;
+		return ERR_CAST(dent_inode);
 	}
 	/* It is guaranteed that @name is no longer allocated at this point. */
 	if (MREF_ERR(mref) == -ENOENT) {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 0da0332..ffe0039 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -516,9 +516,9 @@ static void o2hb_bio_end_io(struct bio *bio)
 {
 	struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
 
-	if (bio->bi_error) {
-		mlog(ML_ERROR, "IO Error %d\n", bio->bi_error);
-		wc->wc_error = bio->bi_error;
+	if (bio->bi_status) {
+		mlog(ML_ERROR, "IO Error %d\n", bio->bi_status);
+		wc->wc_error = blk_status_to_errno(bio->bi_status);
 	}
 
 	o2hb_bio_wait_dec(wc, 1);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 3b7c937..4689940 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
 	struct ocfs2_lock_res *lockres;
 
 	lockres = &OCFS2_I(inode)->ip_inode_lockres;
+	/* had_lock means that the currect process already takes the cluster
+	 * lock previously. If had_lock is 1, we have nothing to do here, and
+	 * it will get unlocked where we got the lock.
+	 */
 	if (!had_lock) {
 		ocfs2_remove_holder(lockres, oh);
 		ocfs2_inode_unlock(inode, ex);
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 827fc98..9f88188 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -119,7 +119,7 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
 
 	if (IS_ERR(inode)) {
 		mlog_errno(PTR_ERR(inode));
-		result = (void *)inode;
+		result = ERR_CAST(inode);
 		goto bail;
 	}
 
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index ca1646f..83005f4 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -2062,7 +2062,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
 	cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
 	bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
 	sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits);
-	memcpy(sb->s_uuid, di->id2.i_super.s_uuid,
+	memcpy(&sb->s_uuid, di->id2.i_super.s_uuid,
 	       sizeof(di->id2.i_super.s_uuid));
 
 	osb->osb_dx_mask = (1 << (cbits - bbits)) - 1;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3c5384d..f70c377 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode,
 			   void *buffer,
 			   size_t buffer_size)
 {
-	int ret;
+	int ret, had_lock;
 	struct buffer_head *di_bh = NULL;
+	struct ocfs2_lock_holder oh;
 
-	ret = ocfs2_inode_lock(inode, &di_bh, 0);
-	if (ret < 0) {
-		mlog_errno(ret);
-		return ret;
+	had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh);
+	if (had_lock < 0) {
+		mlog_errno(had_lock);
+		return had_lock;
 	}
 	down_read(&OCFS2_I(inode)->ip_xattr_sem);
 	ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
 				     name, buffer, buffer_size);
 	up_read(&OCFS2_I(inode)->ip_xattr_sem);
 
-	ocfs2_inode_unlock(inode, 0);
+	ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
 
 	brelse(di_bh);
 
@@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode,
 {
 	struct buffer_head *di_bh = NULL;
 	struct ocfs2_dinode *di;
-	int ret, credits, ref_meta = 0, ref_credits = 0;
+	int ret, credits, had_lock, ref_meta = 0, ref_credits = 0;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct inode *tl_inode = osb->osb_tl_inode;
 	struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, };
 	struct ocfs2_refcount_tree *ref_tree = NULL;
+	struct ocfs2_lock_holder oh;
 
 	struct ocfs2_xattr_info xi = {
 		.xi_name_index = name_index,
@@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode,
 		return -ENOMEM;
 	}
 
-	ret = ocfs2_inode_lock(inode, &di_bh, 1);
-	if (ret < 0) {
+	had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh);
+	if (had_lock < 0) {
+		ret = had_lock;
 		mlog_errno(ret);
 		goto cleanup_nolock;
 	}
@@ -3670,7 +3673,7 @@ int ocfs2_xattr_set(struct inode *inode,
 		if (ret)
 			mlog_errno(ret);
 	}
-	ocfs2_inode_unlock(inode, 1);
+	ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
 cleanup_nolock:
 	brelse(di_bh);
 	brelse(xbs.xattr_bh);
diff --git a/fs/open.c b/fs/open.c
index cd0c5be..3fe0c4a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -759,6 +759,7 @@ static int do_dentry_open(struct file *f,
 	     likely(f->f_op->write || f->f_op->write_iter))
 		f->f_mode |= FMODE_CAN_WRITE;
 
+	f->f_write_hint = WRITE_LIFE_NOT_SET;
 	f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
 
 	file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 83b5060..038d675 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -46,8 +46,8 @@ static void run_down(struct slot_map *m)
 	spin_lock(&m->q.lock);
 	if (m->c != -1) {
 		for (;;) {
-			if (likely(list_empty(&wait.task_list)))
-				__add_wait_queue_tail(&m->q, &wait);
+			if (likely(list_empty(&wait.entry)))
+				__add_wait_queue_entry_tail(&m->q, &wait);
 			set_current_state(TASK_UNINTERRUPTIBLE);
 
 			if (m->c == -1)
@@ -84,8 +84,8 @@ static int wait_for_free(struct slot_map *m)
 
 	do {
 		long n = left, t;
-		if (likely(list_empty(&wait.task_list)))
-			__add_wait_queue_tail_exclusive(&m->q, &wait);
+		if (likely(list_empty(&wait.entry)))
+			__add_wait_queue_entry_tail_exclusive(&m->q, &wait);
 		set_current_state(TASK_INTERRUPTIBLE);
 
 		if (m->c > 0)
@@ -108,8 +108,8 @@ static int wait_for_free(struct slot_map *m)
 			left = -EINTR;
 	} while (left > 0);
 
-	if (!list_empty(&wait.task_list))
-		list_del(&wait.task_list);
+	if (!list_empty(&wait.entry))
+		list_del(&wait.entry);
 	else if (left <= 0 && waitqueue_active(&m->q))
 		__wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
 	__set_current_state(TASK_RUNNING);
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index 0daac51..c0c9683 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -1,5 +1,6 @@
 config OVERLAY_FS
 	tristate "Overlay filesystem support"
+	select EXPORTFS
 	help
 	  An overlay filesystem combines two filesystems - an 'upper' filesystem
 	  and a 'lower' filesystem.  When a name exists in both filesystems, the
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 9008ab9..e5869f9 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -233,7 +233,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
 	return err;
 }
 
-static struct ovl_fh *ovl_encode_fh(struct dentry *lower, uuid_be *uuid)
+static struct ovl_fh *ovl_encode_fh(struct dentry *lower, uuid_t *uuid)
 {
 	struct ovl_fh *fh;
 	int fh_type, fh_len, dwords;
@@ -284,7 +284,6 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
 			  struct dentry *upper)
 {
 	struct super_block *sb = lower->d_sb;
-	uuid_be *uuid = (uuid_be *) &sb->s_uuid;
 	const struct ovl_fh *fh = NULL;
 	int err;
 
@@ -294,13 +293,17 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
 	 * up and a pure upper inode.
 	 */
 	if (sb->s_export_op && sb->s_export_op->fh_to_dentry &&
-	    uuid_be_cmp(*uuid, NULL_UUID_BE)) {
-		fh = ovl_encode_fh(lower, uuid);
+	    !uuid_is_null(&sb->s_uuid)) {
+		fh = ovl_encode_fh(lower, &sb->s_uuid);
 		if (IS_ERR(fh))
 			return PTR_ERR(fh);
 	}
 
-	err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0);
+	/*
+	 * Do not fail when upper doesn't support xattrs.
+	 */
+	err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh,
+				 fh ? fh->len : 0, 0);
 	kfree(fh);
 
 	return err;
@@ -326,15 +329,9 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 		.link = link
 	};
 
-	upper = lookup_one_len(dentry->d_name.name, upperdir,
-			       dentry->d_name.len);
-	err = PTR_ERR(upper);
-	if (IS_ERR(upper))
-		goto out;
-
 	err = security_inode_copy_up(dentry, &new_creds);
 	if (err < 0)
-		goto out1;
+		goto out;
 
 	if (new_creds)
 		old_creds = override_creds(new_creds);
@@ -342,13 +339,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 	if (tmpfile)
 		temp = ovl_do_tmpfile(upperdir, stat->mode);
 	else
-		temp = ovl_lookup_temp(workdir, dentry);
-	err = PTR_ERR(temp);
-	if (IS_ERR(temp))
-		goto out1;
-
+		temp = ovl_lookup_temp(workdir);
 	err = 0;
-	if (!tmpfile)
+	if (IS_ERR(temp)) {
+		err = PTR_ERR(temp);
+		temp = NULL;
+	}
+
+	if (!err && !tmpfile)
 		err = ovl_create_real(wdir, temp, &cattr, NULL, true);
 
 	if (new_creds) {
@@ -357,7 +355,7 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 	}
 
 	if (err)
-		goto out2;
+		goto out;
 
 	if (S_ISREG(stat->mode)) {
 		struct path upperpath;
@@ -393,10 +391,23 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 	/*
 	 * Store identifier of lower inode in upper inode xattr to
 	 * allow lookup of the copy up origin inode.
+	 *
+	 * Don't set origin when we are breaking the association with a lower
+	 * hard link.
 	 */
-	err = ovl_set_origin(dentry, lowerpath->dentry, temp);
-	if (err)
+	if (S_ISDIR(stat->mode) || stat->nlink == 1) {
+		err = ovl_set_origin(dentry, lowerpath->dentry, temp);
+		if (err)
+			goto out_cleanup;
+	}
+
+	upper = lookup_one_len(dentry->d_name.name, upperdir,
+			       dentry->d_name.len);
+	if (IS_ERR(upper)) {
+		err = PTR_ERR(upper);
+		upper = NULL;
 		goto out_cleanup;
+	}
 
 	if (tmpfile)
 		err = ovl_do_link(temp, udir, upper, true);
@@ -411,17 +422,15 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 
 	/* Restore timestamps on parent (best effort) */
 	ovl_set_timestamps(upperdir, pstat);
-out2:
-	dput(temp);
-out1:
-	dput(upper);
 out:
+	dput(temp);
+	dput(upper);
 	return err;
 
 out_cleanup:
 	if (!tmpfile)
 		ovl_cleanup(wdir, temp);
-	goto out2;
+	goto out;
 }
 
 /*
@@ -454,6 +463,11 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
 	ovl_path_upper(parent, &parentpath);
 	upperdir = parentpath.dentry;
 
+	/* Mark parent "impure" because it may now contain non-pure upper */
+	err = ovl_set_impure(parent, upperdir);
+	if (err)
+		return err;
+
 	err = vfs_getattr(&parentpath, &pstat,
 			  STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT);
 	if (err)
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 723b98b..a63a716 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -41,7 +41,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
 	}
 }
 
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
+struct dentry *ovl_lookup_temp(struct dentry *workdir)
 {
 	struct dentry *temp;
 	char name[20];
@@ -68,7 +68,7 @@ static struct dentry *ovl_whiteout(struct dentry *workdir,
 	struct dentry *whiteout;
 	struct inode *wdir = workdir->d_inode;
 
-	whiteout = ovl_lookup_temp(workdir, dentry);
+	whiteout = ovl_lookup_temp(workdir);
 	if (IS_ERR(whiteout))
 		return whiteout;
 
@@ -127,17 +127,28 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
 	return err;
 }
 
-static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper,
+			       int xerr)
 {
 	int err;
 
-	err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
+	err = ovl_check_setxattr(dentry, upper, OVL_XATTR_OPAQUE, "y", 1, xerr);
 	if (!err)
 		ovl_dentry_set_opaque(dentry);
 
 	return err;
 }
 
+static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
+{
+	/*
+	 * Fail with -EIO when trying to create opaque dir and upper doesn't
+	 * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to
+	 * return a specific error for noxattr case.
+	 */
+	return ovl_set_opaque_xerr(dentry, upperdentry, -EIO);
+}
+
 /* Common operations required to be done after creation of file on upper */
 static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
 			    struct dentry *newdentry, bool hardlink)
@@ -162,6 +173,11 @@ static bool ovl_type_merge(struct dentry *dentry)
 	return OVL_TYPE_MERGE(ovl_path_type(dentry));
 }
 
+static bool ovl_type_origin(struct dentry *dentry)
+{
+	return OVL_TYPE_ORIGIN(ovl_path_type(dentry));
+}
+
 static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
 			    struct cattr *attr, struct dentry *hardlink)
 {
@@ -250,7 +266,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
 	if (upper->d_parent->d_inode != udir)
 		goto out_unlock;
 
-	opaquedir = ovl_lookup_temp(workdir, dentry);
+	opaquedir = ovl_lookup_temp(workdir);
 	err = PTR_ERR(opaquedir);
 	if (IS_ERR(opaquedir))
 		goto out_unlock;
@@ -382,7 +398,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
 	if (err)
 		goto out;
 
-	newdentry = ovl_lookup_temp(workdir, dentry);
+	newdentry = ovl_lookup_temp(workdir);
 	err = PTR_ERR(newdentry);
 	if (IS_ERR(newdentry))
 		goto out_unlock;
@@ -846,18 +862,16 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
 	if (IS_ERR(redirect))
 		return PTR_ERR(redirect);
 
-	err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT,
-			      redirect, strlen(redirect), 0);
+	err = ovl_check_setxattr(dentry, ovl_dentry_upper(dentry),
+				 OVL_XATTR_REDIRECT,
+				 redirect, strlen(redirect), -EXDEV);
 	if (!err) {
 		spin_lock(&dentry->d_lock);
 		ovl_dentry_set_redirect(dentry, redirect);
 		spin_unlock(&dentry->d_lock);
 	} else {
 		kfree(redirect);
-		if (err == -EOPNOTSUPP)
-			ovl_clear_redirect_dir(dentry->d_sb);
-		else
-			pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
+		pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
 		/* Fall back to userspace copy-up */
 		err = -EXDEV;
 	}
@@ -943,6 +957,25 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
 	old_upperdir = ovl_dentry_upper(old->d_parent);
 	new_upperdir = ovl_dentry_upper(new->d_parent);
 
+	if (!samedir) {
+		/*
+		 * When moving a merge dir or non-dir with copy up origin into
+		 * a new parent, we are marking the new parent dir "impure".
+		 * When ovl_iterate() iterates an "impure" upper dir, it will
+		 * lookup the origin inodes of the entries to fill d_ino.
+		 */
+		if (ovl_type_origin(old)) {
+			err = ovl_set_impure(new->d_parent, new_upperdir);
+			if (err)
+				goto out_revert_creds;
+		}
+		if (!overwrite && ovl_type_origin(new)) {
+			err = ovl_set_impure(old->d_parent, old_upperdir);
+			if (err)
+				goto out_revert_creds;
+		}
+	}
+
 	trap = lock_rename(new_upperdir, old_upperdir);
 
 	olddentry = lookup_one_len(old->d_name.name, old_upperdir,
@@ -992,7 +1025,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
 		if (ovl_type_merge_or_lower(old))
 			err = ovl_set_redirect(old, samedir);
 		else if (!old_opaque && ovl_type_merge(new->d_parent))
-			err = ovl_set_opaque(old, olddentry);
+			err = ovl_set_opaque_xerr(old, olddentry, -EXDEV);
 		if (err)
 			goto out_dput;
 	}
@@ -1000,7 +1033,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
 		if (ovl_type_merge_or_lower(new))
 			err = ovl_set_redirect(new, samedir);
 		else if (!new_opaque && ovl_type_merge(old->d_parent))
-			err = ovl_set_opaque(new, newdentry);
+			err = ovl_set_opaque_xerr(new, newdentry, -EXDEV);
 		if (err)
 			goto out_dput;
 	}
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index ad9547f..d613e2c 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -240,6 +240,16 @@ int ovl_xattr_get(struct dentry *dentry, const char *name,
 	return res;
 }
 
+static bool ovl_can_list(const char *s)
+{
+	/* List all non-trusted xatts */
+	if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+		return true;
+
+	/* Never list trusted.overlay, list other trusted for superuser only */
+	return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+}
+
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
 	struct dentry *realdentry = ovl_dentry_real(dentry);
@@ -263,7 +273,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 			return -EIO;
 
 		len -= slen;
-		if (ovl_is_private_xattr(s)) {
+		if (!ovl_can_list(s)) {
 			res -= slen;
 			memmove(s, s + slen, len);
 		} else {
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index bad0f66..de0d4f7 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -135,7 +135,7 @@ static struct dentry *ovl_get_origin(struct dentry *dentry,
 	 * Make sure that the stored uuid matches the uuid of the lower
 	 * layer where file handle will be decoded.
 	 */
-	if (uuid_be_cmp(fh->uuid, *(uuid_be *) &mnt->mnt_sb->s_uuid))
+	if (!uuid_equal(&fh->uuid, &mnt->mnt_sb->s_uuid))
 		goto out;
 
 	origin = exportfs_decode_fh(mnt, (struct fid *)fh->fid,
@@ -169,17 +169,7 @@ static struct dentry *ovl_get_origin(struct dentry *dentry,
 
 static bool ovl_is_opaquedir(struct dentry *dentry)
 {
-	int res;
-	char val;
-
-	if (!d_is_dir(dentry))
-		return false;
-
-	res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
-	if (res == 1 && val == 'y')
-		return true;
-
-	return false;
+	return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE);
 }
 
 static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
@@ -351,6 +341,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 	unsigned int ctr = 0;
 	struct inode *inode = NULL;
 	bool upperopaque = false;
+	bool upperimpure = false;
 	char *upperredirect = NULL;
 	struct dentry *this;
 	unsigned int i;
@@ -395,6 +386,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 				poe = roe;
 		}
 		upperopaque = d.opaque;
+		if (upperdentry && d.is_dir)
+			upperimpure = ovl_is_impuredir(upperdentry);
 	}
 
 	if (!d.stop && poe->numlower) {
@@ -463,6 +456,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
 	revert_creds(old_cred);
 	oe->opaque = upperopaque;
+	oe->impure = upperimpure;
 	oe->redirect = upperredirect;
 	oe->__upperdentry = upperdentry;
 	memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index caa36cb..10863b4 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -24,6 +24,7 @@ enum ovl_path_type {
 #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
 #define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect"
 #define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin"
+#define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure"
 
 /*
  * The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
@@ -55,7 +56,7 @@ struct ovl_fh {
 	u8 len;		/* size of this header + size of fid */
 	u8 flags;	/* OVL_FH_FLAG_* */
 	u8 type;	/* fid_type of fid */
-	uuid_be uuid;	/* uuid of filesystem */
+	uuid_t uuid;	/* uuid of filesystem */
 	u8 fid[0];	/* file identifier */
 } __packed;
 
@@ -203,10 +204,10 @@ struct dentry *ovl_dentry_real(struct dentry *dentry);
 struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
 void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
 bool ovl_dentry_is_opaque(struct dentry *dentry);
+bool ovl_dentry_is_impure(struct dentry *dentry);
 bool ovl_dentry_is_whiteout(struct dentry *dentry);
 void ovl_dentry_set_opaque(struct dentry *dentry);
 bool ovl_redirect_dir(struct super_block *sb);
-void ovl_clear_redirect_dir(struct super_block *sb);
 const char *ovl_dentry_get_redirect(struct dentry *dentry);
 void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
 void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
@@ -219,6 +220,17 @@ bool ovl_is_whiteout(struct dentry *dentry);
 struct file *ovl_path_open(struct path *path, int flags);
 int ovl_copy_up_start(struct dentry *dentry);
 void ovl_copy_up_end(struct dentry *dentry);
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name);
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+		       const char *name, const void *value, size_t size,
+		       int xerr);
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
+
+static inline bool ovl_is_impuredir(struct dentry *dentry)
+{
+	return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
+}
+
 
 /* namei.c */
 int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
@@ -263,7 +275,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
 
 /* dir.c */
 extern const struct inode_operations ovl_dir_inode_operations;
-struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry);
+struct dentry *ovl_lookup_temp(struct dentry *workdir);
 struct cattr {
 	dev_t rdev;
 	umode_t mode;
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index b2023ddb..34bc4a9 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -28,6 +28,7 @@ struct ovl_fs {
 	/* creds of process who forced instantiation of super block */
 	const struct cred *creator_cred;
 	bool tmpfile;
+	bool noxattr;
 	wait_queue_head_t copyup_wq;
 	/* sb common to all layers */
 	struct super_block *same_sb;
@@ -42,6 +43,7 @@ struct ovl_entry {
 			u64 version;
 			const char *redirect;
 			bool opaque;
+			bool impure;
 			bool copying;
 		};
 		struct rcu_head rcu;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 9828b7d..4882ffb 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -891,6 +891,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 				dput(temp);
 			else
 				pr_warn("overlayfs: upper fs does not support tmpfile.\n");
+
+			/*
+			 * Check if upper/work fs supports trusted.overlay.*
+			 * xattr
+			 */
+			err = ovl_do_setxattr(ufs->workdir, OVL_XATTR_OPAQUE,
+					      "0", 1, 0);
+			if (err) {
+				ufs->noxattr = true;
+				pr_warn("overlayfs: upper fs does not support xattr.\n");
+			} else {
+				vfs_removexattr(ufs->workdir, OVL_XATTR_OPAQUE);
+			}
 		}
 	}
 
@@ -961,7 +974,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 	path_put(&workpath);
 	kfree(lowertmp);
 
-	oe->__upperdentry = upperpath.dentry;
+	if (upperpath.dentry) {
+		oe->__upperdentry = upperpath.dentry;
+		oe->impure = ovl_is_impuredir(upperpath.dentry);
+	}
 	for (i = 0; i < numlower; i++) {
 		oe->lowerstack[i].dentry = stack[i].dentry;
 		oe->lowerstack[i].mnt = ufs->lower_mnt[i];
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index cfdea47..8090489 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -175,6 +175,13 @@ bool ovl_dentry_is_opaque(struct dentry *dentry)
 	return oe->opaque;
 }
 
+bool ovl_dentry_is_impure(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	return oe->impure;
+}
+
 bool ovl_dentry_is_whiteout(struct dentry *dentry)
 {
 	return !dentry->d_inode && ovl_dentry_is_opaque(dentry);
@@ -191,14 +198,7 @@ bool ovl_redirect_dir(struct super_block *sb)
 {
 	struct ovl_fs *ofs = sb->s_fs_info;
 
-	return ofs->config.redirect_dir;
-}
-
-void ovl_clear_redirect_dir(struct super_block *sb)
-{
-	struct ovl_fs *ofs = sb->s_fs_info;
-
-	ofs->config.redirect_dir = false;
+	return ofs->config.redirect_dir && !ofs->noxattr;
 }
 
 const char *ovl_dentry_get_redirect(struct dentry *dentry)
@@ -303,3 +303,59 @@ void ovl_copy_up_end(struct dentry *dentry)
 	wake_up_locked(&ofs->copyup_wq);
 	spin_unlock(&ofs->copyup_wq.lock);
 }
+
+bool ovl_check_dir_xattr(struct dentry *dentry, const char *name)
+{
+	int res;
+	char val;
+
+	if (!d_is_dir(dentry))
+		return false;
+
+	res = vfs_getxattr(dentry, name, &val, 1);
+	if (res == 1 && val == 'y')
+		return true;
+
+	return false;
+}
+
+int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
+		       const char *name, const void *value, size_t size,
+		       int xerr)
+{
+	int err;
+	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+
+	if (ofs->noxattr)
+		return xerr;
+
+	err = ovl_do_setxattr(upperdentry, name, value, size, 0);
+
+	if (err == -EOPNOTSUPP) {
+		pr_warn("overlayfs: cannot set %s xattr on upper\n", name);
+		ofs->noxattr = true;
+		return xerr;
+	}
+
+	return err;
+}
+
+int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
+{
+	int err;
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	if (oe->impure)
+		return 0;
+
+	/*
+	 * Do not fail when upper doesn't support xattrs.
+	 * Upper inodes won't have origin nor redirect xattr anyway.
+	 */
+	err = ovl_check_setxattr(dentry, upperdentry, OVL_XATTR_IMPURE,
+				 "y", 1, 0);
+	if (!err)
+		oe->impure = true;
+
+	return err;
+}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 45f6bf6..f1e1927 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -821,7 +821,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
 	if (!mmget_not_zero(mm))
 		goto free;
 
-	flags = write ? FOLL_WRITE : 0;
+	flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
 
 	while (count > 0) {
 		int this_len = min_t(int, count, PAGE_SIZE);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f0c8b33..520802d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -300,11 +300,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 
 	/* We don't show the stack guard page in /proc/maps */
 	start = vma->vm_start;
-	if (stack_guard_page_start(vma, start))
-		start += PAGE_SIZE;
 	end = vma->vm_end;
-	if (stack_guard_page_end(vma, end))
-		end -= PAGE_SIZE;
 
 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ebf80c7..48813ae 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1512,6 +1512,22 @@ int dquot_initialize(struct inode *inode)
 }
 EXPORT_SYMBOL(dquot_initialize);
 
+bool dquot_initialize_needed(struct inode *inode)
+{
+	struct dquot **dquots;
+	int i;
+
+	if (!dquot_active(inode))
+		return false;
+
+	dquots = i_dquot(inode);
+	for (i = 0; i < MAXQUOTAS; i++)
+		if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
+			return true;
+	return false;
+}
+EXPORT_SYMBOL(dquot_initialize_needed);
+
 /*
  * Release all quotas referenced by inode.
  *
diff --git a/fs/read_write.c b/fs/read_write.c
index 47c1d44..d591eee 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -678,16 +678,10 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
 	struct kiocb kiocb;
 	ssize_t ret;
 
-	if (flags & ~(RWF_HIPRI | RWF_DSYNC | RWF_SYNC))
-		return -EOPNOTSUPP;
-
 	init_sync_kiocb(&kiocb, filp);
-	if (flags & RWF_HIPRI)
-		kiocb.ki_flags |= IOCB_HIPRI;
-	if (flags & RWF_DSYNC)
-		kiocb.ki_flags |= IOCB_DSYNC;
-	if (flags & RWF_SYNC)
-		kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+	ret = kiocb_set_rw_flags(&kiocb, flags);
+	if (ret)
+		return ret;
 	kiocb.ki_pos = *ppos;
 
 	if (type == READ)
@@ -1285,7 +1279,7 @@ static size_t compat_writev(struct file *file,
 	if (!(file->f_mode & FMODE_CAN_WRITE))
 		goto out;
 
-	ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0);
+	ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags);
 
 out:
 	if (ret > 0)
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index da01f49..a11d773 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1112,7 +1112,7 @@ static int flush_commit_list(struct super_block *s,
 		depth = reiserfs_write_unlock_nested(s);
 		if (reiserfs_barrier_flush(s))
 			__sync_dirty_buffer(jl->j_commit_bh,
-					REQ_PREFLUSH | REQ_FUA);
+					REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
 		else
 			sync_dirty_buffer(jl->j_commit_bh);
 		reiserfs_write_lock_nested(s, depth);
@@ -1271,7 +1271,7 @@ static int _update_journal_header_block(struct super_block *sb,
 
 		if (reiserfs_barrier_flush(sb))
 			__sync_dirty_buffer(journal->j_header_bh,
-					REQ_PREFLUSH | REQ_FUA);
+					REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
 		else
 			sync_dirty_buffer(journal->j_header_bh);
 
@@ -2956,7 +2956,7 @@ void reiserfs_wait_on_write_block(struct super_block *s)
 
 static void queue_log_writer(struct super_block *s)
 {
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	struct reiserfs_journal *journal = SB_JOURNAL(s);
 	set_bit(J_WRITERS_QUEUED, &journal->j_state);
 
diff --git a/fs/select.c b/fs/select.c
index d6c652a..5b524a9 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -180,7 +180,7 @@ static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
 	return table->entry++;
 }
 
-static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 {
 	struct poll_wqueues *pwq = wait->private;
 	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
@@ -206,7 +206,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 	return default_wake_function(&dummy_wait, mode, sync, key);
 }
 
-static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 {
 	struct poll_table_entry *entry;
 
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7e3d711..593b022 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -43,7 +43,7 @@ void signalfd_cleanup(struct sighand_struct *sighand)
 	if (likely(!waitqueue_active(wqh)))
 		return;
 
-	/* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
+	/* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
 	wake_up_poll(wqh, POLLHUP | POLLFREE);
 }
 
diff --git a/fs/stat.c b/fs/stat.c
index f494b18..c356108 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -672,6 +672,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
 		inode->i_bytes -= 512;
 	}
 }
+EXPORT_SYMBOL(__inode_add_bytes);
 
 void inode_add_bytes(struct inode *inode, loff_t bytes)
 {
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index a0376a2..f80be4c 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -82,7 +82,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
 			ufs_error (sb, "ufs_free_fragments",
 				   "bit already cleared for fragment %u", i);
 	}
-	
+
+	inode_sub_bytes(inode, count << uspi->s_fshift);
 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
 	uspi->cs_total.cs_nffree += count;
 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -184,6 +185,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
 		}
 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
+		inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
 			ufs_clusteracct (sb, ucpi, blkno, 1);
 
@@ -398,10 +400,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
 	/*
 	 * There is not enough space for user on the device
 	 */
-	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
-		mutex_unlock(&UFS_SB(sb)->s_lock);
-		UFSD("EXIT (FAILED)\n");
-		return 0;
+	if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
+		if (!capable(CAP_SYS_RESOURCE)) {
+			mutex_unlock(&UFS_SB(sb)->s_lock);
+			UFSD("EXIT (FAILED)\n");
+			return 0;
+		}
 	}
 
 	if (goal >= uspi->s_size) 
@@ -419,12 +423,12 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
 		if (result) {
 			ufs_clear_frags(inode, result + oldcount,
 					newcount - oldcount, locked_page != NULL);
+			*err = 0;
 			write_seqlock(&UFS_I(inode)->meta_lock);
 			ufs_cpu_to_data_ptr(sb, p, result);
-			write_sequnlock(&UFS_I(inode)->meta_lock);
-			*err = 0;
 			UFS_I(inode)->i_lastfrag =
 				max(UFS_I(inode)->i_lastfrag, fragment + count);
+			write_sequnlock(&UFS_I(inode)->meta_lock);
 		}
 		mutex_unlock(&UFS_SB(sb)->s_lock);
 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
@@ -437,8 +441,10 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
 	result = ufs_add_fragments(inode, tmp, oldcount, newcount);
 	if (result) {
 		*err = 0;
+		read_seqlock_excl(&UFS_I(inode)->meta_lock);
 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
 						fragment + count);
+		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
 				locked_page != NULL);
 		mutex_unlock(&UFS_SB(sb)->s_lock);
@@ -449,39 +455,29 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
 	/*
 	 * allocate new block and move data
 	 */
-	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
-	    case UFS_OPTSPACE:
+	if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
 		request = newcount;
-		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
-		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
-			break;
-		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
-		break;
-	    default:
-		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
-	
-	    case UFS_OPTTIME:
+		if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
+			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
+	} else {
 		request = uspi->s_fpb;
-		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
-		    (uspi->s_minfree - 2) / 100)
-			break;
-		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
-		break;
+		if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
+			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
 	}
 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
 	if (result) {
 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
 				locked_page != NULL);
+		mutex_unlock(&UFS_SB(sb)->s_lock);
 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
 				   uspi->s_sbbase + tmp,
 				   uspi->s_sbbase + result, locked_page);
+		*err = 0;
 		write_seqlock(&UFS_I(inode)->meta_lock);
 		ufs_cpu_to_data_ptr(sb, p, result);
-		write_sequnlock(&UFS_I(inode)->meta_lock);
-		*err = 0;
 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
 						fragment + count);
-		mutex_unlock(&UFS_SB(sb)->s_lock);
+		write_sequnlock(&UFS_I(inode)->meta_lock);
 		if (newcount < request)
 			ufs_free_fragments (inode, result + newcount, request - newcount);
 		ufs_free_fragments (inode, tmp, oldcount);
@@ -494,6 +490,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
 	return 0;
 }		
 
+static bool try_add_frags(struct inode *inode, unsigned frags)
+{
+	unsigned size = frags * i_blocksize(inode);
+	spin_lock(&inode->i_lock);
+	__inode_add_bytes(inode, size);
+	if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
+		__inode_sub_bytes(inode, size);
+		spin_unlock(&inode->i_lock);
+		return false;
+	}
+	spin_unlock(&inode->i_lock);
+	return true;
+}
+
 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
 			     unsigned oldcount, unsigned newcount)
 {
@@ -530,6 +540,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
 	for (i = oldcount; i < newcount; i++)
 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
 			return 0;
+
+	if (!try_add_frags(inode, count))
+		return 0;
 	/*
 	 * Block can be extended
 	 */
@@ -647,6 +660,7 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
 		i = uspi->s_fpb - count;
 
+		inode_sub_bytes(inode, i << uspi->s_fshift);
 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
 		uspi->cs_total.cs_nffree += i;
 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
@@ -657,6 +671,8 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
 	if (result == INVBLOCK)
 		return 0;
+	if (!try_add_frags(inode, count))
+		return 0;
 	for (i = 0; i < count; i++)
 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
 	
@@ -716,6 +732,8 @@ static u64 ufs_alloccg_block(struct inode *inode,
 		return INVBLOCK;
 	ucpi->c_rotor = result;
 gotit:
+	if (!try_add_frags(inode, uspi->s_fpb))
+		return 0;
 	blkno = ufs_fragstoblks(result);
 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 7e41aee..f36d6a5 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to,
 
 	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
 	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
-				new_size, err, locked_page);
+				new_size - (lastfrag & uspi->s_fpbmask), err,
+				locked_page);
 	return tmp != 0;
 }
 
@@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
 			goal += uspi->s_fpb;
 	}
 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
-				goal, uspi->s_fpb, err, locked_page);
+				goal, nfrags, err, locked_page);
 
 	if (!tmp) {
 		*err = -ENOSPC;
@@ -400,11 +401,20 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
 	u64 phys64 = 0;
 	unsigned frag = fragment & uspi->s_fpbmask;
 
-	if (!create) {
-		phys64 = ufs_frag_map(inode, offsets, depth);
-		goto out;
-	}
+	phys64 = ufs_frag_map(inode, offsets, depth);
+	if (!create)
+		goto done;
 
+	if (phys64) {
+		if (fragment >= UFS_NDIR_FRAGMENT)
+			goto done;
+		read_seqlock_excl(&UFS_I(inode)->meta_lock);
+		if (fragment < UFS_I(inode)->i_lastfrag) {
+			read_sequnlock_excl(&UFS_I(inode)->meta_lock);
+			goto done;
+		}
+		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
+	}
         /* This code entered only while writing ....? */
 
 	mutex_lock(&UFS_I(inode)->truncate_mutex);
@@ -448,6 +458,11 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
 	}
 	mutex_unlock(&UFS_I(inode)->truncate_mutex);
 	return err;
+
+done:
+	if (phys64)
+		map_bh(bh_result, sb, phys64 + frag);
+	return 0;
 }
 
 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
@@ -551,10 +566,8 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 	 */
 	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
 	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
-	if (inode->i_nlink == 0) {
-		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
-		return -1;
-	}
+	if (inode->i_nlink == 0)
+		return -ESTALE;
 
 	/*
 	 * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -563,9 +576,9 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
 	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
 
 	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
-	inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
-	inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
-	inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
+	inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
+	inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
+	inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
 	inode->i_mtime.tv_nsec = 0;
 	inode->i_atime.tv_nsec = 0;
 	inode->i_ctime.tv_nsec = 0;
@@ -599,10 +612,8 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
 	 */
 	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
 	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
-	if (inode->i_nlink == 0) {
-		ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
-		return -1;
-	}
+	if (inode->i_nlink == 0)
+		return -ESTALE;
 
         /*
          * Linux now has 32-bit uid and gid, so we can support EFT.
@@ -642,7 +653,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 	struct buffer_head * bh;
 	struct inode *inode;
-	int err;
+	int err = -EIO;
 
 	UFSD("ENTER, ino %lu\n", ino);
 
@@ -677,9 +688,10 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
 		err = ufs1_read_inode(inode,
 				      ufs_inode + ufs_inotofsbo(inode->i_ino));
 	}
-
+	brelse(bh);
 	if (err)
 		goto bad_inode;
+
 	inode->i_version++;
 	ufsi->i_lastfrag =
 		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
@@ -688,15 +700,13 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
 
 	ufs_set_inode_ops(inode);
 
-	brelse(bh);
-
 	UFSD("EXIT\n");
 	unlock_new_inode(inode);
 	return inode;
 
 bad_inode:
 	iget_failed(inode);
-	return ERR_PTR(-EIO);
+	return ERR_PTR(err);
 }
 
 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
@@ -841,8 +851,11 @@ void ufs_evict_inode(struct inode * inode)
 	truncate_inode_pages_final(&inode->i_data);
 	if (want_delete) {
 		inode->i_size = 0;
-		if (inode->i_blocks)
+		if (inode->i_blocks &&
+		    (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+		     S_ISLNK(inode->i_mode)))
 			ufs_truncate_blocks(inode);
+		ufs_update_inode(inode, inode_needs_sync(inode));
 	}
 
 	invalidate_inode_buffers(inode);
@@ -868,7 +881,6 @@ static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
 	ctx->to = from + count;
 }
 
-#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
 
 static void ufs_trunc_direct(struct inode *inode)
@@ -1100,25 +1112,30 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
        return err;
 }
 
-static void __ufs_truncate_blocks(struct inode *inode)
+static void ufs_truncate_blocks(struct inode *inode)
 {
 	struct ufs_inode_info *ufsi = UFS_I(inode);
 	struct super_block *sb = inode->i_sb;
 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 	unsigned offsets[4];
-	int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets);
+	int depth;
 	int depth2;
 	unsigned i;
 	struct ufs_buffer_head *ubh[3];
 	void *p;
 	u64 block;
 
-	if (!depth)
-		return;
+	if (inode->i_size) {
+		sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
+		depth = ufs_block_to_path(inode, last, offsets);
+		if (!depth)
+			return;
+	} else {
+		depth = 1;
+	}
 
-	/* find the last non-zero in offsets[] */
 	for (depth2 = depth - 1; depth2; depth2--)
-		if (offsets[depth2])
+		if (offsets[depth2] != uspi->s_apb - 1)
 			break;
 
 	mutex_lock(&ufsi->truncate_mutex);
@@ -1127,9 +1144,8 @@ static void __ufs_truncate_blocks(struct inode *inode)
 		offsets[0] = UFS_IND_BLOCK;
 	} else {
 		/* get the blocks that should be partially emptied */
-		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]);
+		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
 		for (i = 0; i < depth2; i++) {
-			offsets[i]++;	/* next branch is fully freed */
 			block = ufs_data_ptr_to_cpu(sb, p);
 			if (!block)
 				break;
@@ -1140,7 +1156,7 @@ static void __ufs_truncate_blocks(struct inode *inode)
 				write_sequnlock(&ufsi->meta_lock);
 				break;
 			}
-			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]);
+			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
 		}
 		while (i--)
 			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
@@ -1155,7 +1171,9 @@ static void __ufs_truncate_blocks(struct inode *inode)
 			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
 		}
 	}
+	read_seqlock_excl(&ufsi->meta_lock);
 	ufsi->i_lastfrag = DIRECT_FRAGMENT;
+	read_sequnlock_excl(&ufsi->meta_lock);
 	mark_inode_dirty(inode);
 	mutex_unlock(&ufsi->truncate_mutex);
 }
@@ -1183,7 +1201,7 @@ static int ufs_truncate(struct inode *inode, loff_t size)
 
 	truncate_setsize(inode, size);
 
-	__ufs_truncate_blocks(inode);
+	ufs_truncate_blocks(inode);
 	inode->i_mtime = inode->i_ctime = current_time(inode);
 	mark_inode_dirty(inode);
 out:
@@ -1191,16 +1209,6 @@ static int ufs_truncate(struct inode *inode, loff_t size)
 	return err;
 }
 
-static void ufs_truncate_blocks(struct inode *inode)
-{
-	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-	      S_ISLNK(inode->i_mode)))
-		return;
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-		return;
-	__ufs_truncate_blocks(inode);
-}
-
 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	struct inode *inode = d_inode(dentry);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 131b2b7..0a4f58a 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -480,7 +480,7 @@ static void ufs_setup_cstotal(struct super_block *sb)
 	usb3 = ubh_get_usb_third(uspi);
 
 	if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
-	     (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
+	     (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) ||
 	    mtype == UFS_MOUNT_UFSTYPE_UFS2) {
 		/*we have statistic in different place, then usual*/
 		uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir);
@@ -596,9 +596,7 @@ static void ufs_put_cstotal(struct super_block *sb)
 	usb2 = ubh_get_usb_second(uspi);
 	usb3 = ubh_get_usb_third(uspi);
 
-	if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
-	     (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
-	    mtype == UFS_MOUNT_UFSTYPE_UFS2) {
+	if (mtype == UFS_MOUNT_UFSTYPE_UFS2) {
 		/*we have statistic in different place, then usual*/
 		usb2->fs_un.fs_u2.cs_ndir =
 			cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
@@ -608,16 +606,26 @@ static void ufs_put_cstotal(struct super_block *sb)
 			cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
 		usb3->fs_un1.fs_u2.cs_nffree =
 			cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
-	} else {
-		usb1->fs_cstotal.cs_ndir =
-			cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
-		usb1->fs_cstotal.cs_nbfree =
-			cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
-		usb1->fs_cstotal.cs_nifree =
-			cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
-		usb1->fs_cstotal.cs_nffree =
-			cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
+		goto out;
 	}
+
+	if (mtype == UFS_MOUNT_UFSTYPE_44BSD &&
+	     (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) {
+		/* store stats in both old and new places */
+		usb2->fs_un.fs_u2.cs_ndir =
+			cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
+		usb2->fs_un.fs_u2.cs_nbfree =
+			cpu_to_fs64(sb, uspi->cs_total.cs_nbfree);
+		usb3->fs_un1.fs_u2.cs_nifree =
+			cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
+		usb3->fs_un1.fs_u2.cs_nffree =
+			cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
+	}
+	usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
+	usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
+	usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
+	usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
+out:
 	ubh_mark_buffer_dirty(USPI_UBH(uspi));
 	ufs_print_super_stuff(sb, usb1, usb2, usb3);
 	UFSD("EXIT\n");
@@ -746,6 +754,23 @@ static void ufs_put_super(struct super_block *sb)
 	return;
 }
 
+static u64 ufs_max_bytes(struct super_block *sb)
+{
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	int bits = uspi->s_apbshift;
+	u64 res;
+
+	if (bits > 21)
+		res = ~0ULL;
+	else
+		res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
+			(1LL << (3*bits));
+
+	if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
+		return MAX_LFS_FILESIZE;
+	return res << uspi->s_bshift;
+}
+
 static int ufs_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct ufs_sb_info * sbi;
@@ -812,9 +837,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
 	uspi->s_dirblksize = UFS_SECTOR_SIZE;
 	super_block_offset=UFS_SBLOCK;
 
-	/* Keep 2Gig file limit. Some UFS variants need to override 
-	   this but as I don't know which I'll let those in the know loosen
-	   the rules */
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+
 	switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
 	case UFS_MOUNT_UFSTYPE_44BSD:
 		UFSD("ufstype=44bsd\n");
@@ -980,6 +1004,13 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
 		flags |=  UFS_ST_SUN;
 	}
 
+	if ((flags & UFS_ST_MASK) == UFS_ST_44BSD &&
+	    uspi->s_postblformat == UFS_42POSTBLFMT) {
+		if (!silent)
+			pr_err("this is not a 44bsd filesystem");
+		goto failed;
+	}
+
 	/*
 	 * Check ufs magic number
 	 */
@@ -1127,8 +1158,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
 	uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
 
 	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
-		uspi->s_u2_size  = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
-		uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
+		uspi->s_size  = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
+		uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
 	} else {
 		uspi->s_size  =  fs32_to_cpu(sb, usb1->fs_size);
 		uspi->s_dsize =  fs32_to_cpu(sb, usb1->fs_dsize);
@@ -1177,6 +1208,18 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
 	uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff);
 	uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff);
 
+	uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize,
+					      uspi->s_minfree, 100);
+	if (uspi->s_minfree <= 5) {
+		uspi->s_time_to_space = ~0ULL;
+		uspi->s_space_to_time = 0;
+		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
+	} else {
+		uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1;
+		uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize,
+					      uspi->s_minfree - 2, 100) - 1;
+	}
+
 	/*
 	 * Compute another frequently used values
 	 */
@@ -1212,6 +1255,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
 			    "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
 		uspi->s_maxsymlinklen = maxsymlen;
 	}
+	sb->s_maxbytes = ufs_max_bytes(sb);
 	sb->s_max_links = UFS_LINK_MAX;
 
 	inode = ufs_iget(sb, UFS_ROOTINO);
@@ -1365,19 +1409,17 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf)
 	mutex_lock(&UFS_SB(sb)->s_lock);
 	usb3 = ubh_get_usb_third(uspi);
 	
-	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
 		buf->f_type = UFS2_MAGIC;
-		buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
-	} else {
+	else
 		buf->f_type = UFS_MAGIC;
-		buf->f_blocks = uspi->s_dsize;
-	}
-	buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
-		uspi->cs_total.cs_nffree;
+
+	buf->f_blocks = uspi->s_dsize;
+	buf->f_bfree = ufs_freefrags(uspi);
 	buf->f_ffree = uspi->cs_total.cs_nifree;
 	buf->f_bsize = sb->s_blocksize;
-	buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree))
-		? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0;
+	buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks)
+		? (buf->f_bfree - uspi->s_root_blocks) : 0;
 	buf->f_files = uspi->s_ncg * uspi->s_ipg;
 	buf->f_namelen = UFS_MAXNAMLEN;
 	buf->f_fsid.val[0] = (u32)id;
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 0cbd5d3..150eef6 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -733,10 +733,8 @@ struct ufs_sb_private_info {
 	__u32	s_dblkno;	/* offset of first data after cg */
 	__u32	s_cgoffset;	/* cylinder group offset in cylinder */
 	__u32	s_cgmask;	/* used to calc mod fs_ntrak */
-	__u32	s_size;		/* number of blocks (fragments) in fs */
-	__u32	s_dsize;	/* number of data blocks in fs */
-	__u64	s_u2_size;	/* ufs2: number of blocks (fragments) in fs */
-	__u64	s_u2_dsize;	/*ufs2:  number of data blocks in fs */
+	__u64	s_size;		/* number of blocks (fragments) in fs */
+	__u64	s_dsize;	/* number of data blocks in fs */
 	__u32	s_ncg;		/* number of cylinder groups */
 	__u32	s_bsize;	/* size of basic blocks */
 	__u32	s_fsize;	/* size of fragments */
@@ -793,6 +791,9 @@ struct ufs_sb_private_info {
 	__u32	s_maxsymlinklen;/* upper limit on fast symlinks' size */
 	__s32	fs_magic;       /* filesystem magic */
 	unsigned int s_dirblksize;
+	__u64   s_root_blocks;
+	__u64	s_time_to_space;
+	__u64	s_space_to_time;
 };
 
 /*
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index f41ad0a..02497a4 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -243,9 +243,8 @@ ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev
 struct page *ufs_get_locked_page(struct address_space *mapping,
 				 pgoff_t index)
 {
-	struct page *page;
-
-	page = find_lock_page(mapping, index);
+	struct inode *inode = mapping->host;
+	struct page *page = find_lock_page(mapping, index);
 	if (!page) {
 		page = read_mapping_page(mapping, index, NULL);
 
@@ -253,7 +252,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
 			printk(KERN_ERR "ufs_change_blocknr: "
 			       "read_mapping_page error: ino %lu, index: %lu\n",
 			       mapping->host->i_ino, index);
-			goto out;
+			return page;
 		}
 
 		lock_page(page);
@@ -262,8 +261,7 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
 			/* Truncate got there first */
 			unlock_page(page);
 			put_page(page);
-			page = NULL;
-			goto out;
+			return NULL;
 		}
 
 		if (!PageUptodate(page) || PageError(page)) {
@@ -272,11 +270,12 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
 
 			printk(KERN_ERR "ufs_change_blocknr: "
 			       "can not read page: ino %lu, index: %lu\n",
-			       mapping->host->i_ino, index);
+			       inode->i_ino, index);
 
-			page = ERR_PTR(-EIO);
+			return ERR_PTR(-EIO);
 		}
 	}
-out:
+	if (!page_has_buffers(page))
+		create_empty_buffers(page, 1 << inode->i_blkbits, 0);
 	return page;
 }
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index b7fbf53..9fc7119 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -350,16 +350,11 @@ static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
 #define ubh_blkmap(ubh,begin,bit) \
 	((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
 
-/*
- * Determine the number of available frags given a
- * percentage to hold in reserve.
- */
 static inline u64
-ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved)
+ufs_freefrags(struct ufs_sb_private_info *uspi)
 {
 	return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
-		uspi->cs_total.cs_nffree -
-		(uspi->s_dsize * (percentreserved) / 100);
+		uspi->cs_total.cs_nffree;
 }
 
 /*
@@ -473,15 +468,19 @@ static inline unsigned _ubh_find_last_zero_bit_(
 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
 	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
 {
+	u8 mask;
 	switch (uspi->s_fpb) {
 	case 8:
 	    	return (*ubh_get_addr (ubh, begin + block) == 0xff);
 	case 4:
-		return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
+		mask = 0x0f << ((block & 0x01) << 2);
+		return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
 	case 2:
-		return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
+		mask = 0x03 << ((block & 0x03) << 1);
+		return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
 	case 1:
-		return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
+		mask = 0x01 << (block & 0x07);
+		return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
 	}
 	return 0;	
 }
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index f7555fc..6148ccd 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -81,7 +81,7 @@ struct userfaultfd_unmap_ctx {
 
 struct userfaultfd_wait_queue {
 	struct uffd_msg msg;
-	wait_queue_t wq;
+	wait_queue_entry_t wq;
 	struct userfaultfd_ctx *ctx;
 	bool waken;
 };
@@ -91,7 +91,7 @@ struct userfaultfd_wake_range {
 	unsigned long len;
 };
 
-static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
+static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
 				     int wake_flags, void *key)
 {
 	struct userfaultfd_wake_range *range = key;
@@ -129,7 +129,7 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
 		 * wouldn't be enough, the smp_mb__before_spinlock is
 		 * enough to avoid an explicit smp_mb() here.
 		 */
-		list_del_init(&wq->task_list);
+		list_del_init(&wq->entry);
 out:
 	return ret;
 }
@@ -340,9 +340,28 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
 	bool must_wait, return_to_userland;
 	long blocking_state;
 
-	BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
-
 	ret = VM_FAULT_SIGBUS;
+
+	/*
+	 * We don't do userfault handling for the final child pid update.
+	 *
+	 * We also don't do userfault handling during
+	 * coredumping. hugetlbfs has the special
+	 * follow_hugetlb_page() to skip missing pages in the
+	 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
+	 * the no_page_table() helper in follow_page_mask(), but the
+	 * shmem_vm_ops->fault method is invoked even during
+	 * coredumping without mmap_sem and it ends up here.
+	 */
+	if (current->flags & (PF_EXITING|PF_DUMPCORE))
+		goto out;
+
+	/*
+	 * Coredumping runs without mmap_sem so we can only check that
+	 * the mmap_sem is held, if PF_DUMPCORE was not set.
+	 */
+	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+
 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
 	if (!ctx)
 		goto out;
@@ -361,12 +380,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
 		goto out;
 
 	/*
-	 * We don't do userfault handling for the final child pid update.
-	 */
-	if (current->flags & PF_EXITING)
-		goto out;
-
-	/*
 	 * Check that we can return VM_FAULT_RETRY.
 	 *
 	 * NOTE: it should become possible to return VM_FAULT_RETRY
@@ -509,13 +522,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
 	 * and it's fine not to block on the spinlock. The uwq on this
 	 * kernel stack can be released after the list_del_init.
 	 */
-	if (!list_empty_careful(&uwq.wq.task_list)) {
+	if (!list_empty_careful(&uwq.wq.entry)) {
 		spin_lock(&ctx->fault_pending_wqh.lock);
 		/*
 		 * No need of list_del_init(), the uwq on the stack
 		 * will be freed shortly anyway.
 		 */
-		list_del(&uwq.wq.task_list);
+		list_del(&uwq.wq.entry);
 		spin_unlock(&ctx->fault_pending_wqh.lock);
 	}
 
@@ -847,7 +860,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
 static inline struct userfaultfd_wait_queue *find_userfault_in(
 		wait_queue_head_t *wqh)
 {
-	wait_queue_t *wq;
+	wait_queue_entry_t *wq;
 	struct userfaultfd_wait_queue *uwq;
 
 	VM_BUG_ON(!spin_is_locked(&wqh->lock));
@@ -856,7 +869,7 @@ static inline struct userfaultfd_wait_queue *find_userfault_in(
 	if (!waitqueue_active(wqh))
 		goto out;
 	/* walk in reverse to provide FIFO behavior to read userfaults */
-	wq = list_last_entry(&wqh->task_list, typeof(*wq), task_list);
+	wq = list_last_entry(&wqh->head, typeof(*wq), entry);
 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
 out:
 	return uwq;
@@ -990,14 +1003,14 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
 			 * changes __remove_wait_queue() to use
 			 * list_del_init() in turn breaking the
 			 * !list_empty_careful() check in
-			 * handle_userfault(). The uwq->wq.task_list
+			 * handle_userfault(). The uwq->wq.head list
 			 * must never be empty at any time during the
 			 * refile, or the waitqueue could disappear
 			 * from under us. The "wait_queue_head_t"
 			 * parameter of __remove_wait_queue() is unused
 			 * anyway.
 			 */
-			list_del(&uwq->wq.task_list);
+			list_del(&uwq->wq.entry);
 			__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
 
 			write_seqcount_end(&ctx->refile_seq);
@@ -1019,7 +1032,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
 				fork_nctx = (struct userfaultfd_ctx *)
 					(unsigned long)
 					uwq->msg.arg.reserved.reserved1;
-				list_move(&uwq->wq.task_list, &fork_event);
+				list_move(&uwq->wq.entry, &fork_event);
 				spin_unlock(&ctx->event_wqh.lock);
 				ret = 0;
 				break;
@@ -1056,8 +1069,8 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
 			if (!list_empty(&fork_event)) {
 				uwq = list_first_entry(&fork_event,
 						       typeof(*uwq),
-						       wq.task_list);
-				list_del(&uwq->wq.task_list);
+						       wq.entry);
+				list_del(&uwq->wq.entry);
 				__add_wait_queue(&ctx->event_wqh, &uwq->wq);
 				userfaultfd_event_complete(ctx, uwq);
 			}
@@ -1734,17 +1747,17 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
 static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
 {
 	struct userfaultfd_ctx *ctx = f->private_data;
-	wait_queue_t *wq;
+	wait_queue_entry_t *wq;
 	struct userfaultfd_wait_queue *uwq;
 	unsigned long pending = 0, total = 0;
 
 	spin_lock(&ctx->fault_pending_wqh.lock);
-	list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) {
+	list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
 		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
 		pending++;
 		total++;
 	}
-	list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) {
+	list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
 		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
 		total++;
 	}
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 5c90f82..a6e955b 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -98,8 +98,7 @@
 				   xfs_sysfs.o \
 				   xfs_trans.o \
 				   xfs_xattr.o \
-				   kmem.o \
-				   uuid.o
+				   kmem.o
 
 # low-level transaction/log code
 xfs-y				+= xfs_log.o \
diff --git a/fs/xfs/uuid.c b/fs/xfs/uuid.c
deleted file mode 100644
index b83f76b..0000000
--- a/fs/xfs/uuid.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#include <xfs.h>
-
-/* IRIX interpretation of an uuid_t */
-typedef struct {
-	__be32	uu_timelow;
-	__be16	uu_timemid;
-	__be16	uu_timehi;
-	__be16	uu_clockseq;
-	__be16	uu_node[3];
-} xfs_uu_t;
-
-/*
- * uuid_getnodeuniq - obtain the node unique fields of a UUID.
- *
- * This is not in any way a standard or condoned UUID function;
- * it just something that's needed for user-level file handles.
- */
-void
-uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
-{
-	xfs_uu_t *uup = (xfs_uu_t *)uuid;
-
-	fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) |
-		   be16_to_cpu(uup->uu_timemid);
-	fsid[1] = be32_to_cpu(uup->uu_timelow);
-}
-
-int
-uuid_is_nil(uuid_t *uuid)
-{
-	int	i;
-	char	*cp = (char *)uuid;
-
-	if (uuid == NULL)
-		return 0;
-	/* implied check of version number here... */
-	for (i = 0; i < sizeof *uuid; i++)
-		if (*cp++) return 0;	/* not nil */
-	return 1;	/* is nil */
-}
-
-int
-uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
-{
-	return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1;
-}
diff --git a/fs/xfs/uuid.h b/fs/xfs/uuid.h
deleted file mode 100644
index 104db0f..0000000
--- a/fs/xfs/uuid.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_SUPPORT_UUID_H__
-#define __XFS_SUPPORT_UUID_H__
-
-typedef struct {
-	unsigned char	__u_bits[16];
-} uuid_t;
-
-extern int uuid_is_nil(uuid_t *uuid);
-extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
-extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]);
-
-static inline void
-uuid_copy(uuid_t *dst, uuid_t *src)
-{
-	memcpy(dst, src, sizeof(uuid_t));
-}
-
-#endif	/* __XFS_SUPPORT_UUID_H__ */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 09af0f7..d20c29b 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -276,7 +276,7 @@ xfs_end_io(
 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
 	xfs_off_t		offset = ioend->io_offset;
 	size_t			size = ioend->io_size;
-	int			error = ioend->io_bio->bi_error;
+	int			error;
 
 	/*
 	 * Just clean up the in-memory strutures if the fs has been shut down.
@@ -289,6 +289,7 @@ xfs_end_io(
 	/*
 	 * Clean up any COW blocks on an I/O error.
 	 */
+	error = blk_status_to_errno(ioend->io_bio->bi_status);
 	if (unlikely(error)) {
 		switch (ioend->io_type) {
 		case XFS_IO_COW:
@@ -332,7 +333,7 @@ xfs_end_bio(
 	else if (ioend->io_append_trans)
 		queue_work(mp->m_data_workqueue, &ioend->io_work);
 	else
-		xfs_destroy_ioend(ioend, bio->bi_error);
+		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
 }
 
 STATIC int
@@ -500,11 +501,12 @@ xfs_submit_ioend(
 	 * time.
 	 */
 	if (status) {
-		ioend->io_bio->bi_error = status;
+		ioend->io_bio->bi_status = errno_to_blk_status(status);
 		bio_endio(ioend->io_bio);
 		return status;
 	}
 
+	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
 	submit_bio(ioend->io_bio);
 	return 0;
 }
@@ -564,6 +566,7 @@ xfs_chain_bio(
 	bio_chain(ioend->io_bio, new);
 	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
 	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
+	ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
 	submit_bio(ioend->io_bio);
 	ioend->io_bio = new;
 }
@@ -1316,9 +1319,12 @@ xfs_vm_bmap(
 	 * The swap code (ab-)uses ->bmap to get a block mapping and then
 	 * bypasseѕ the file system for actual I/O.  We really can't allow
 	 * that on reflinks inodes, so we have to skip out here.  And yes,
-	 * 0 is the magic code for a bmap error..
+	 * 0 is the magic code for a bmap error.
+	 *
+	 * Since we don't pass back blockdev info, we can't return bmap
+	 * information for rt files either.
 	 */
-	if (xfs_is_reflink_inode(ip))
+	if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
 		return 0;
 
 	filemap_write_and_wait(mapping);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 62fa392..438505f 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -97,12 +97,16 @@ static inline void
 xfs_buf_ioacct_inc(
 	struct xfs_buf	*bp)
 {
-	if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
+	if (bp->b_flags & XBF_NO_IOACCT)
 		return;
 
 	ASSERT(bp->b_flags & XBF_ASYNC);
-	bp->b_flags |= _XBF_IN_FLIGHT;
-	percpu_counter_inc(&bp->b_target->bt_io_count);
+	spin_lock(&bp->b_lock);
+	if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
+		bp->b_state |= XFS_BSTATE_IN_FLIGHT;
+		percpu_counter_inc(&bp->b_target->bt_io_count);
+	}
+	spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -110,14 +114,24 @@ xfs_buf_ioacct_inc(
  * freed and unaccount from the buftarg.
  */
 static inline void
+__xfs_buf_ioacct_dec(
+	struct xfs_buf	*bp)
+{
+	lockdep_assert_held(&bp->b_lock);
+
+	if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
+		bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
+		percpu_counter_dec(&bp->b_target->bt_io_count);
+	}
+}
+
+static inline void
 xfs_buf_ioacct_dec(
 	struct xfs_buf	*bp)
 {
-	if (!(bp->b_flags & _XBF_IN_FLIGHT))
-		return;
-
-	bp->b_flags &= ~_XBF_IN_FLIGHT;
-	percpu_counter_dec(&bp->b_target->bt_io_count);
+	spin_lock(&bp->b_lock);
+	__xfs_buf_ioacct_dec(bp);
+	spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -149,9 +163,9 @@ xfs_buf_stale(
 	 * unaccounted (released to LRU) before that occurs. Drop in-flight
 	 * status now to preserve accounting consistency.
 	 */
-	xfs_buf_ioacct_dec(bp);
-
 	spin_lock(&bp->b_lock);
+	__xfs_buf_ioacct_dec(bp);
+
 	atomic_set(&bp->b_lru_ref, 0);
 	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
 	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -979,12 +993,12 @@ xfs_buf_rele(
 		 * ensures the decrement occurs only once per-buf.
 		 */
 		if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
-			xfs_buf_ioacct_dec(bp);
+			__xfs_buf_ioacct_dec(bp);
 		goto out_unlock;
 	}
 
 	/* the last reference has been dropped ... */
-	xfs_buf_ioacct_dec(bp);
+	__xfs_buf_ioacct_dec(bp);
 	if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
 		/*
 		 * If the buffer is added to the LRU take a new reference to the
@@ -1213,8 +1227,11 @@ xfs_buf_bio_end_io(
 	 * don't overwrite existing errors - otherwise we can lose errors on
 	 * buffers that require multiple bios to complete.
 	 */
-	if (bio->bi_error)
-		cmpxchg(&bp->b_io_error, 0, bio->bi_error);
+	if (bio->bi_status) {
+		int error = blk_status_to_errno(bio->bi_status);
+
+		cmpxchg(&bp->b_io_error, 0, error);
+	}
 
 	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
 		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 8d1d44f..1508121 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -63,7 +63,6 @@ typedef enum {
 #define _XBF_KMEM	 (1 << 21)/* backed by heap memory */
 #define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */
 #define _XBF_COMPOUND	 (1 << 23)/* compound buffer */
-#define _XBF_IN_FLIGHT	 (1 << 25) /* I/O in flight, for accounting purposes */
 
 typedef unsigned int xfs_buf_flags_t;
 
@@ -84,14 +83,14 @@ typedef unsigned int xfs_buf_flags_t;
 	{ _XBF_PAGES,		"PAGES" }, \
 	{ _XBF_KMEM,		"KMEM" }, \
 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
-	{ _XBF_COMPOUND,	"COMPOUND" }, \
-	{ _XBF_IN_FLIGHT,	"IN_FLIGHT" }
+	{ _XBF_COMPOUND,	"COMPOUND" }
 
 
 /*
  * Internal state flags.
  */
 #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
+#define XFS_BSTATE_IN_FLIGHT	 (1 << 1)	/* I/O in flight */
 
 /*
  * The xfs_buftarg contains 2 notions of "sector size" -
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 5fb5a09..17f27a2 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -237,7 +237,11 @@ xfs_file_dax_read(
 	if (!count)
 		return 0; /* skip atime */
 
-	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
+		if (iocb->ki_flags & IOCB_NOWAIT)
+			return -EAGAIN;
+		xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	}
 	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 
@@ -541,7 +545,11 @@ xfs_file_dio_aio_write(
 		iolock = XFS_IOLOCK_SHARED;
 	}
 
-	xfs_ilock(ip, iolock);
+	if (!xfs_ilock_nowait(ip, iolock)) {
+		if (iocb->ki_flags & IOCB_NOWAIT)
+			return -EAGAIN;
+		xfs_ilock(ip, iolock);
+	}
 
 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 	if (ret)
@@ -553,9 +561,15 @@ xfs_file_dio_aio_write(
 	 * otherwise demote the lock if we had to take the exclusive lock
 	 * for other reasons in xfs_file_aio_write_checks.
 	 */
-	if (unaligned_io)
-		inode_dio_wait(inode);
-	else if (iolock == XFS_IOLOCK_EXCL) {
+	if (unaligned_io) {
+		/* If we are going to wait for other DIO to finish, bail */
+		if (iocb->ki_flags & IOCB_NOWAIT) {
+			if (atomic_read(&inode->i_dio_count))
+				return -EAGAIN;
+		} else {
+			inode_dio_wait(inode);
+		}
+	} else if (iolock == XFS_IOLOCK_EXCL) {
 		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 		iolock = XFS_IOLOCK_SHARED;
 	}
@@ -585,7 +599,12 @@ xfs_file_dax_write(
 	size_t			count;
 	loff_t			pos;
 
-	xfs_ilock(ip, iolock);
+	if (!xfs_ilock_nowait(ip, iolock)) {
+		if (iocb->ki_flags & IOCB_NOWAIT)
+			return -EAGAIN;
+		xfs_ilock(ip, iolock);
+	}
+
 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 	if (ret)
 		goto out;
@@ -892,6 +911,7 @@ xfs_file_open(
 		return -EFBIG;
 	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
 		return -EIO;
+	file->f_mode |= FMODE_AIO_NOWAIT;
 	return 0;
 }
 
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index f61c84f8..b9c12e1 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -66,7 +66,6 @@ xfs_inode_alloc(
 
 	XFS_STATS_INC(mp, vn_active);
 	ASSERT(atomic_read(&ip->i_pincount) == 0);
-	ASSERT(!spin_is_locked(&ip->i_flags_lock));
 	ASSERT(!xfs_isiflocked(ip));
 	ASSERT(ip->i_ino == 0);
 
@@ -190,7 +189,7 @@ xfs_perag_set_reclaim_tag(
 {
 	struct xfs_mount	*mp = pag->pag_mount;
 
-	ASSERT(spin_is_locked(&pag->pag_ici_lock));
+	lockdep_assert_held(&pag->pag_ici_lock);
 	if (pag->pag_ici_reclaimable++)
 		return;
 
@@ -212,7 +211,7 @@ xfs_perag_clear_reclaim_tag(
 {
 	struct xfs_mount	*mp = pag->pag_mount;
 
-	ASSERT(spin_is_locked(&pag->pag_ici_lock));
+	lockdep_assert_held(&pag->pag_ici_lock);
 	if (--pag->pag_ici_reclaimable)
 		return;
 
@@ -270,12 +269,12 @@ xfs_inew_wait(
 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
 
 	do {
-		prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
 		if (!xfs_iflags_test(ip, XFS_INEW))
 			break;
 		schedule();
 	} while (true);
-	finish_wait(wq, &wait.wait);
+	finish_wait(wq, &wait.wq_entry);
 }
 
 /*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ec9826c..c0a1e84 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -622,12 +622,12 @@ __xfs_iflock(
 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
 
 	do {
-		prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+		prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
 		if (xfs_isiflocked(ip))
 			io_schedule();
 	} while (!xfs_iflock_nowait(ip));
 
-	finish_wait(wq, &wait.wait);
+	finish_wait(wq, &wait.wq_entry);
 }
 
 STATIC uint
@@ -2486,11 +2486,11 @@ __xfs_iunpin_wait(
 	xfs_iunpin(ip);
 
 	do {
-		prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
 		if (xfs_ipincount(ip))
 			io_schedule();
 	} while (xfs_ipincount(ip));
-	finish_wait(wq, &wait.wait);
+	finish_wait(wq, &wait.wq_entry);
 }
 
 void
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 08cb7d1..013cc78 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -834,9 +834,7 @@ xfs_inode_item_format_convert(
 		in_f->ilf_dsize = in_f32->ilf_dsize;
 		in_f->ilf_ino = in_f32->ilf_ino;
 		/* copy biggest field of ilf_u */
-		memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
-		       in_f32->ilf_u.ilfu_uuid.__u_bits,
-		       sizeof(uuid_t));
+		uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
 		in_f->ilf_blkno = in_f32->ilf_blkno;
 		in_f->ilf_len = in_f32->ilf_len;
 		in_f->ilf_boffset = in_f32->ilf_boffset;
@@ -851,9 +849,7 @@ xfs_inode_item_format_convert(
 		in_f->ilf_dsize = in_f64->ilf_dsize;
 		in_f->ilf_ino = in_f64->ilf_ino;
 		/* copy biggest field of ilf_u */
-		memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
-		       in_f64->ilf_u.ilfu_uuid.__u_bits,
-		       sizeof(uuid_t));
+		uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
 		in_f->ilf_blkno = in_f64->ilf_blkno;
 		in_f->ilf_len = in_f64->ilf_len;
 		in_f->ilf_boffset = in_f64->ilf_boffset;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 94e5bdf..05dc87e 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -995,6 +995,11 @@ xfs_file_iomap_begin(
 		lockmode = xfs_ilock_data_map_shared(ip);
 	}
 
+	if ((flags & IOMAP_NOWAIT) && !(ip->i_df.if_flags & XFS_IFEXTENTS)) {
+		error = -EAGAIN;
+		goto out_unlock;
+	}
+
 	ASSERT(offset <= mp->m_super->s_maxbytes);
 	if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
 		length = mp->m_super->s_maxbytes - offset;
@@ -1016,6 +1021,15 @@ xfs_file_iomap_begin(
 
 	if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
 		if (flags & IOMAP_DIRECT) {
+			/*
+			 * A reflinked inode will result in CoW alloc.
+			 * FIXME: It could still overwrite on unshared extents
+			 * and not need allocation.
+			 */
+			if (flags & IOMAP_NOWAIT) {
+				error = -EAGAIN;
+				goto out_unlock;
+			}
 			/* may drop and re-acquire the ilock */
 			error = xfs_reflink_allocate_cow(ip, &imap, &shared,
 					&lockmode);
@@ -1033,6 +1047,14 @@ xfs_file_iomap_begin(
 
 	if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
 		/*
+		 * If nowait is set bail since we are going to make
+		 * allocations.
+		 */
+		if (flags & IOMAP_NOWAIT) {
+			error = -EAGAIN;
+			goto out_unlock;
+		}
+		/*
 		 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
 		 * pages to keep the chunks of work done where somewhat symmetric
 		 * with the work writeback does. This is a completely arbitrary
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 044fb0e..2d167fe 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -19,6 +19,7 @@
 #define __XFS_LINUX__
 
 #include <linux/types.h>
+#include <linux/uuid.h>
 
 /*
  * Kernel specific type declarations for XFS
@@ -42,7 +43,6 @@ typedef __u32			xfs_nlink_t;
 
 #include "kmem.h"
 #include "mrlock.h"
-#include "uuid.h"
 
 #include <linux/semaphore.h>
 #include <linux/mm.h>
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index cd0b077..8cec1e5 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -352,13 +352,13 @@ xlog_header_check_mount(
 {
 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 
-	if (uuid_is_nil(&head->h_fs_uuid)) {
+	if (uuid_is_null(&head->h_fs_uuid)) {
 		/*
 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
-		 * h_fs_uuid is nil, we assume this log was last mounted
+		 * h_fs_uuid is null, we assume this log was last mounted
 		 * by IRIX and continue.
 		 */
-		xfs_warn(mp, "nil uuid in log - IRIX style log");
+		xfs_warn(mp, "null uuid in log - IRIX style log");
 	} else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
 		xfs_warn(mp, "log has mismatched uuid - can't recover");
 		xlog_header_check_dump(mp, head);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 2eaf818..d249546 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -74,20 +74,19 @@ xfs_uuid_mount(
 	int			hole, i;
 
 	/* Publish UUID in struct super_block */
-	BUILD_BUG_ON(sizeof(mp->m_super->s_uuid) != sizeof(uuid_t));
-	memcpy(&mp->m_super->s_uuid, uuid, sizeof(uuid_t));
+	uuid_copy(&mp->m_super->s_uuid, uuid);
 
 	if (mp->m_flags & XFS_MOUNT_NOUUID)
 		return 0;
 
-	if (uuid_is_nil(uuid)) {
-		xfs_warn(mp, "Filesystem has nil UUID - can't mount");
+	if (uuid_is_null(uuid)) {
+		xfs_warn(mp, "Filesystem has null UUID - can't mount");
 		return -EINVAL;
 	}
 
 	mutex_lock(&xfs_uuid_table_mutex);
 	for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
-		if (uuid_is_nil(&xfs_uuid_table[i])) {
+		if (uuid_is_null(&xfs_uuid_table[i])) {
 			hole = i;
 			continue;
 		}
@@ -124,7 +123,7 @@ xfs_uuid_unmount(
 
 	mutex_lock(&xfs_uuid_table_mutex);
 	for (i = 0; i < xfs_uuid_table_size; i++) {
-		if (uuid_is_nil(&xfs_uuid_table[i]))
+		if (uuid_is_null(&xfs_uuid_table[i]))
 			continue;
 		if (!uuid_equal(uuid, &xfs_uuid_table[i]))
 			continue;
@@ -793,7 +792,10 @@ xfs_mountfs(
 	 *  Copies the low order bits of the timestamp and the randomly
 	 *  set "sequence" number out of a UUID.
 	 */
-	uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
+	mp->m_fixedfsid[0] =
+		(get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
+		 get_unaligned_be16(&sbp->sb_uuid.b[4]);
+	mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
 
 	mp->m_dmevmask = 0;	/* not persistent; set after each mount */
 
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 455a575..97df4db 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1766,7 +1766,8 @@ STATIC int __init
 xfs_init_zones(void)
 {
 	xfs_ioend_bioset = bioset_create(4 * MAX_BUF_PER_PAGE,
-			offsetof(struct xfs_ioend, io_inline_bio));
+			offsetof(struct xfs_ioend, io_inline_bio),
+			BIOSET_NEED_BVECS);
 	if (!xfs_ioend_bioset)
 		goto out;
 
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 197f3ff..68bc6be 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -61,17 +61,18 @@ bool acpi_ata_match(acpi_handle handle);
 bool acpi_bay_match(acpi_handle handle);
 bool acpi_dock_match(acpi_handle handle);
 
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs);
-union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
+bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs);
+union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
 			u64 rev, u64 func, union acpi_object *argv4);
 
 static inline union acpi_object *
-acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
-			union acpi_object *argv4, acpi_object_type type)
+acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
+			u64 func, union acpi_object *argv4,
+			acpi_object_type type)
 {
 	union acpi_object *obj;
 
-	obj = acpi_evaluate_dsm(handle, uuid, rev, func, argv4);
+	obj = acpi_evaluate_dsm(handle, guid, rev, func, argv4);
 	if (obj && obj->type != type) {
 		ACPI_FREE(obj);
 		obj = NULL;
@@ -210,7 +211,8 @@ struct acpi_device_flags {
 	u32 of_compatible_ok:1;
 	u32 coherent_dma:1;
 	u32 cca_seen:1;
-	u32 reserved:20;
+	u32 spi_i2c_slave:1;
+	u32 reserved:19;
 };
 
 /* File System */
@@ -313,13 +315,12 @@ struct acpi_device_perf {
 /* Wakeup Management */
 struct acpi_device_wakeup_flags {
 	u8 valid:1;		/* Can successfully enable wakeup? */
-	u8 run_wake:1;		/* Run-Wake GPE devices */
 	u8 notifier_present:1;  /* Wake-up notify handler has been installed */
 	u8 enabled:1;		/* Enabled for wakeup */
 };
 
 struct acpi_device_wakeup_context {
-	struct work_struct work;
+	void (*func)(struct acpi_device_wakeup_context *context);
 	struct device *dev;
 };
 
@@ -598,15 +599,20 @@ static inline bool acpi_device_always_present(struct acpi_device *adev)
 #endif
 
 #ifdef CONFIG_PM
+void acpi_pm_wakeup_event(struct device *dev);
 acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
-				 void (*work_func)(struct work_struct *work));
+			void (*func)(struct acpi_device_wakeup_context *context));
 acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
+bool acpi_pm_device_can_wakeup(struct device *dev);
 int acpi_pm_device_sleep_state(struct device *, int *, int);
-int acpi_pm_device_run_wake(struct device *, bool);
+int acpi_pm_set_device_wakeup(struct device *dev, bool enable);
 #else
+static inline void acpi_pm_wakeup_event(struct device *dev)
+{
+}
 static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
 					       struct device *dev,
-				               void (*work_func)(struct work_struct *work))
+					       void (*func)(struct acpi_device_wakeup_context *context))
 {
 	return AE_SUPPORT;
 }
@@ -614,6 +620,10 @@ static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
 {
 	return AE_SUPPORT;
 }
+static inline bool acpi_pm_device_can_wakeup(struct device *dev)
+{
+	return false;
+}
 static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
 {
 	if (p)
@@ -622,16 +632,7 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
 	return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ?
 		m : ACPI_STATE_D0;
 }
-static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
-{
-	return -ENODEV;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-int acpi_pm_device_sleep_wake(struct device *, bool);
-#else
-static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
+static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
 {
 	return -ENODEV;
 }
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 15c86ce..a59c44c 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20170303
+#define ACPI_CA_VERSION                 0x20170531
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index f0f7403..343dbdc 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -289,6 +289,11 @@ union acpi_resource_attribute {
 	u8 type_specific;
 };
 
+struct acpi_resource_label {
+	u16 string_length;
+	char *string_ptr;
+};
+
 struct acpi_resource_source {
 	u8 index;
 	u16 string_length;
@@ -534,6 +539,81 @@ struct acpi_resource_uart_serialbus {
 #define ACPI_UART_CLEAR_TO_SEND                 (1<<6)
 #define ACPI_UART_REQUEST_TO_SEND               (1<<7)
 
+struct acpi_resource_pin_function {
+	u8 revision_id;
+	u8 pin_config;
+	u8 sharable;		/* For values, see Interrupt Attributes above */
+	u16 function_number;
+	u16 pin_table_length;
+	u16 vendor_length;
+	struct acpi_resource_source resource_source;
+	u16 *pin_table;
+	u8 *vendor_data;
+};
+
+struct acpi_resource_pin_config {
+	u8 revision_id;
+	u8 producer_consumer;	/* For values, see Producer/Consumer above */
+	u8 sharable;		/* For values, see Interrupt Attributes above */
+	u8 pin_config_type;
+	u32 pin_config_value;
+	u16 pin_table_length;
+	u16 vendor_length;
+	struct acpi_resource_source resource_source;
+	u16 *pin_table;
+	u8 *vendor_data;
+};
+
+/* Values for pin_config_type field above */
+
+#define ACPI_PIN_CONFIG_DEFAULT                 0
+#define ACPI_PIN_CONFIG_BIAS_PULL_UP            1
+#define ACPI_PIN_CONFIG_BIAS_PULL_DOWN          2
+#define ACPI_PIN_CONFIG_BIAS_DEFAULT            3
+#define ACPI_PIN_CONFIG_BIAS_DISABLE            4
+#define ACPI_PIN_CONFIG_BIAS_HIGH_IMPEDANCE     5
+#define ACPI_PIN_CONFIG_BIAS_BUS_HOLD           6
+#define ACPI_PIN_CONFIG_DRIVE_OPEN_DRAIN        7
+#define ACPI_PIN_CONFIG_DRIVE_OPEN_SOURCE       8
+#define ACPI_PIN_CONFIG_DRIVE_PUSH_PULL         9
+#define ACPI_PIN_CONFIG_DRIVE_STRENGTH          10
+#define ACPI_PIN_CONFIG_SLEW_RATE               11
+#define ACPI_PIN_CONFIG_INPUT_DEBOUNCE          12
+#define ACPI_PIN_CONFIG_INPUT_SCHMITT_TRIGGER   13
+
+struct acpi_resource_pin_group {
+	u8 revision_id;
+	u8 producer_consumer;	/* For values, see Producer/Consumer above */
+	u16 pin_table_length;
+	u16 vendor_length;
+	u16 *pin_table;
+	struct acpi_resource_label resource_label;
+	u8 *vendor_data;
+};
+
+struct acpi_resource_pin_group_function {
+	u8 revision_id;
+	u8 producer_consumer;	/* For values, see Producer/Consumer above */
+	u8 sharable;		/* For values, see Interrupt Attributes above */
+	u16 function_number;
+	u16 vendor_length;
+	struct acpi_resource_source resource_source;
+	struct acpi_resource_label resource_source_label;
+	u8 *vendor_data;
+};
+
+struct acpi_resource_pin_group_config {
+	u8 revision_id;
+	u8 producer_consumer;	/* For values, see Producer/Consumer above */
+	u8 sharable;		/* For values, see Interrupt Attributes above */
+	u8 pin_config_type;	/* For values, see pin_config_type above */
+	u32 pin_config_value;
+	u16 vendor_length;
+	struct acpi_resource_source resource_source;
+	struct acpi_resource_label resource_source_label;
+	u8 *vendor_data;
+};
+
 /* ACPI_RESOURCE_TYPEs */
 
 #define ACPI_RESOURCE_TYPE_IRQ                  0
@@ -556,7 +636,12 @@ struct acpi_resource_uart_serialbus {
 #define ACPI_RESOURCE_TYPE_GPIO                 17	/* ACPI 5.0 */
 #define ACPI_RESOURCE_TYPE_FIXED_DMA            18	/* ACPI 5.0 */
 #define ACPI_RESOURCE_TYPE_SERIAL_BUS           19	/* ACPI 5.0 */
-#define ACPI_RESOURCE_TYPE_MAX                  19
+#define ACPI_RESOURCE_TYPE_PIN_FUNCTION         20	/* ACPI 6.2 */
+#define ACPI_RESOURCE_TYPE_PIN_CONFIG           21	/* ACPI 6.2 */
+#define ACPI_RESOURCE_TYPE_PIN_GROUP            22	/* ACPI 6.2 */
+#define ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION   23	/* ACPI 6.2 */
+#define ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG     24	/* ACPI 6.2 */
+#define ACPI_RESOURCE_TYPE_MAX                  24
 
 /* Master union for resource descriptors */
 
@@ -584,6 +669,11 @@ union acpi_resource_data {
 	struct acpi_resource_spi_serialbus spi_serial_bus;
 	struct acpi_resource_uart_serialbus uart_serial_bus;
 	struct acpi_resource_common_serialbus common_serial_bus;
+	struct acpi_resource_pin_function pin_function;
+	struct acpi_resource_pin_config pin_config;
+	struct acpi_resource_pin_group pin_group;
+	struct acpi_resource_pin_group_function pin_group_function;
+	struct acpi_resource_pin_group_config pin_group_config;
 
 	/* Common fields */
 
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index d92543f..bdc55c0 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -374,6 +374,20 @@ struct acpi_table_desc {
 	u16 validation_count;
 };
 
+/*
+ * Maximum value of the validation_count field in struct acpi_table_desc.
+ * When reached, validation_count cannot be changed any more and the table will
+ * be permanently regarded as validated.
+ *
+ * This is to prevent situations in which unbalanced table get/put operations
+ * may cause premature table unmapping in the OS to happen.
+ *
+ * The maximum validation count can be defined to any value, but should be
+ * greater than the maximum number of OS early stage mapping slots to avoid
+ * leaking early stage table mappings to the late stage.
+ */
+#define ACPI_MAX_TABLE_VALIDATIONS          ACPI_UINT16_MAX
+
 /* Masks for Flags field above */
 
 #define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL  (0)	/* Virtual address, external maintained */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index b4ce55c..6b8714a 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -65,9 +65,11 @@
 #define ACPI_SIG_ECDT           "ECDT"	/* Embedded Controller Boot Resources Table */
 #define ACPI_SIG_EINJ           "EINJ"	/* Error Injection table */
 #define ACPI_SIG_ERST           "ERST"	/* Error Record Serialization Table */
+#define ACPI_SIG_HMAT           "HMAT"	/* Heterogeneous Memory Attributes Table */
 #define ACPI_SIG_HEST           "HEST"	/* Hardware Error Source Table */
 #define ACPI_SIG_MADT           "APIC"	/* Multiple APIC Description Table */
 #define ACPI_SIG_MSCT           "MSCT"	/* Maximum System Characteristics Table */
+#define ACPI_SIG_PPTT           "PPTT"	/* Processor Properties Topology Table */
 #define ACPI_SIG_SBST           "SBST"	/* Smart Battery Specification Table */
 #define ACPI_SIG_SLIT           "SLIT"	/* System Locality Distance Information Table */
 #define ACPI_SIG_SRAT           "SRAT"	/* System Resource Affinity Table */
@@ -430,7 +432,8 @@ enum acpi_hest_types {
 	ACPI_HEST_TYPE_AER_BRIDGE = 8,
 	ACPI_HEST_TYPE_GENERIC_ERROR = 9,
 	ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10,
-	ACPI_HEST_TYPE_RESERVED = 11	/* 11 and greater are reserved */
+	ACPI_HEST_TYPE_IA32_DEFERRED_CHECK = 11,
+	ACPI_HEST_TYPE_RESERVED = 12	/* 12 and greater are reserved */
 };
 
 /*
@@ -476,6 +479,7 @@ struct acpi_hest_aer_common {
 
 #define ACPI_HEST_FIRMWARE_FIRST        (1)
 #define ACPI_HEST_GLOBAL                (1<<1)
+#define ACPI_HEST_GHES_ASSIST           (1<<2)
 
 /*
  * Macros to access the bus/segment numbers in Bus field above:
@@ -513,7 +517,8 @@ enum acpi_hest_notify_types {
 	ACPI_HEST_NOTIFY_SEA = 8,	/* ACPI 6.1 */
 	ACPI_HEST_NOTIFY_SEI = 9,	/* ACPI 6.1 */
 	ACPI_HEST_NOTIFY_GSIV = 10,	/* ACPI 6.1 */
-	ACPI_HEST_NOTIFY_RESERVED = 11	/* 11 and greater are reserved */
+	ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11,	/* ACPI 6.2 */
+	ACPI_HEST_NOTIFY_RESERVED = 12	/* 12 and greater are reserved */
 };
 
 /* Values for config_write_enable bitfield above */
@@ -534,7 +539,7 @@ enum acpi_hest_notify_types {
 struct acpi_hest_ia_machine_check {
 	struct acpi_hest_header header;
 	u16 reserved1;
-	u8 flags;
+	u8 flags;		/* See flags ACPI_HEST_GLOBAL, etc. above */
 	u8 enabled;
 	u32 records_to_preallocate;
 	u32 max_sections_per_record;
@@ -549,7 +554,7 @@ struct acpi_hest_ia_machine_check {
 struct acpi_hest_ia_corrected {
 	struct acpi_hest_header header;
 	u16 reserved1;
-	u8 flags;
+	u8 flags;		/* See flags ACPI_HEST_GLOBAL, etc. above */
 	u8 enabled;
 	u32 records_to_preallocate;
 	u32 max_sections_per_record;
@@ -686,6 +691,136 @@ struct acpi_hest_generic_data_v300 {
 #define ACPI_HEST_GEN_VALID_FRU_STRING      (1<<1)
 #define ACPI_HEST_GEN_VALID_TIMESTAMP       (1<<2)
 
+/* 11: IA32 Deferred Machine Check Exception (ACPI 6.2) */
+
+struct acpi_hest_ia_deferred_check {
+	struct acpi_hest_header header;
+	u16 reserved1;
+	u8 flags;		/* See flags ACPI_HEST_GLOBAL, etc. above */
+	u8 enabled;
+	u32 records_to_preallocate;
+	u32 max_sections_per_record;
+	struct acpi_hest_notify notify;
+	u8 num_hardware_banks;
+	u8 reserved2[3];
+};
+
+/*******************************************************************************
+ *
+ * HMAT - Heterogeneous Memory Attributes Table (ACPI 6.2)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_hmat {
+	struct acpi_table_header header;	/* Common ACPI table header */
+	u32 reserved;
+};
+
+/* Values for HMAT structure types */
+
+enum acpi_hmat_type {
+	ACPI_HMAT_TYPE_ADDRESS_RANGE = 0,	/* Memory subystem address range */
+	ACPI_HMAT_TYPE_LOCALITY = 1,	/* System locality latency and bandwidth information */
+	ACPI_HMAT_TYPE_CACHE = 2,	/* Memory side cache information */
+	ACPI_HMAT_TYPE_RESERVED = 3	/* 3 and greater are reserved */
+};
+
+struct acpi_hmat_structure {
+	u16 type;
+	u16 reserved;
+	u32 length;
+};
+
+/*
+ * HMAT Structures, correspond to Type in struct acpi_hmat_structure
+ */
+
+/* 0: Memory subystem address range */
+
+struct acpi_hmat_address_range {
+	struct acpi_hmat_structure header;
+	u16 flags;
+	u16 reserved1;
+	u32 processor_PD;	/* Processor proximity domain */
+	u32 memory_PD;		/* Memory proximity domain */
+	u32 reserved2;
+	u64 physical_address_base;	/* Physical address range base */
+	u64 physical_address_length;	/* Physical address range length */
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_HMAT_PROCESSOR_PD_VALID    (1)	/* 1: processor_PD field is valid */
+#define ACPI_HMAT_MEMORY_PD_VALID       (1<<1)	/* 1: memory_PD field is valid */
+#define ACPI_HMAT_RESERVATION_HINT      (1<<2)	/* 1: Reservation hint */
+
+/* 1: System locality latency and bandwidth information */
+
+struct acpi_hmat_locality {
+	struct acpi_hmat_structure header;
+	u8 flags;
+	u8 data_type;
+	u16 reserved1;
+	u32 number_of_initiator_Pds;
+	u32 number_of_target_Pds;
+	u32 reserved2;
+	u64 entry_base_unit;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_HMAT_MEMORY_HIERARCHY  (0x0F)
+
+/* Values for Memory Hierarchy flag */
+
+#define ACPI_HMAT_MEMORY            0
+#define ACPI_HMAT_LAST_LEVEL_CACHE  1
+#define ACPI_HMAT_1ST_LEVEL_CACHE   2
+#define ACPI_HMAT_2ND_LEVEL_CACHE   3
+#define ACPI_HMAT_3RD_LEVEL_CACHE   4
+
+/* Values for data_type field above */
+
+#define ACPI_HMAT_ACCESS_LATENCY    0
+#define ACPI_HMAT_READ_LATENCY      1
+#define ACPI_HMAT_WRITE_LATENCY     2
+#define ACPI_HMAT_ACCESS_BANDWIDTH  3
+#define ACPI_HMAT_READ_BANDWIDTH    4
+#define ACPI_HMAT_WRITE_BANDWIDTH   5
+
+/* 2: Memory side cache information */
+
+struct acpi_hmat_cache {
+	struct acpi_hmat_structure header;
+	u32 memory_PD;
+	u32 reserved1;
+	u64 cache_size;
+	u32 cache_attributes;
+	u16 reserved2;
+	u16 number_of_SMBIOShandles;
+};
+
+/* Masks for cache_attributes field above */
+
+#define ACPI_HMAT_TOTAL_CACHE_LEVEL     (0x0000000F)
+#define ACPI_HMAT_CACHE_LEVEL           (0x000000F0)
+#define ACPI_HMAT_CACHE_ASSOCIATIVITY   (0x00000F00)
+#define ACPI_HMAT_WRITE_POLICY          (0x0000F000)
+#define ACPI_HMAT_CACHE_LINE_SIZE       (0xFFFF0000)
+
+/* Values for cache associativity flag */
+
+#define ACPI_HMAT_CA_NONE                     (0)
+#define ACPI_HMAT_CA_DIRECT_MAPPED            (1)
+#define ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING   (2)
+
+/* Values for write policy flag */
+
+#define ACPI_HMAT_CP_NONE   (0)
+#define ACPI_HMAT_CP_WB     (1)
+#define ACPI_HMAT_CP_WT     (2)
+
 /*******************************************************************************
  *
  * MADT - Multiple APIC Description Table
@@ -705,8 +840,8 @@ struct acpi_table_madt {
 
 /* Values for PCATCompat flag */
 
-#define ACPI_MADT_DUAL_PIC          0
-#define ACPI_MADT_MULTIPLE_APIC     1
+#define ACPI_MADT_DUAL_PIC          1
+#define ACPI_MADT_MULTIPLE_APIC     0
 
 /* Values for MADT subtable type in struct acpi_subtable_header */
 
@@ -1147,6 +1282,85 @@ struct acpi_nfit_flush_address {
 
 /*******************************************************************************
  *
+ * PPTT - Processor Properties Topology Table (ACPI 6.2)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_pptt {
+	struct acpi_table_header header;	/* Common ACPI table header */
+};
+
+/* Values for Type field above */
+
+enum acpi_pptt_type {
+	ACPI_PPTT_TYPE_PROCESSOR = 0,
+	ACPI_PPTT_TYPE_CACHE = 1,
+	ACPI_PPTT_TYPE_ID = 2,
+	ACPI_PPTT_TYPE_RESERVED = 3
+};
+
+/* 0: Processor Hierarchy Node Structure */
+
+struct acpi_pptt_processor {
+	struct acpi_subtable_header header;
+	u16 reserved;
+	u32 flags;
+	u32 parent;
+	u32 acpi_processor_id;
+	u32 number_of_priv_resources;
+};
+
+/* Flags */
+
+#define ACPI_PPTT_PHYSICAL_PACKAGE          (1)	/* Physical package */
+#define ACPI_PPTT_ACPI_PROCESSOR_ID_VALID   (2)	/* ACPI Processor ID valid */
+
+/* 1: Cache Type Structure */
+
+struct acpi_pptt_cache {
+	struct acpi_subtable_header header;
+	u16 reserved;
+	u32 flags;
+	u32 next_level_of_cache;
+	u32 size;
+	u32 number_of_sets;
+	u8 associativity;
+	u8 attributes;
+	u16 line_size;
+};
+
+/* Flags */
+
+#define ACPI_PPTT_SIZE_PROPERTY_VALID       (1)	/* Physical property valid */
+#define ACPI_PPTT_NUMBER_OF_SETS_VALID      (1<<1)	/* Number of sets valid */
+#define ACPI_PPTT_ASSOCIATIVITY_VALID       (1<<2)	/* Associativity valid */
+#define ACPI_PPTT_ALLOCATION_TYPE_VALID     (1<<3)	/* Allocation type valid */
+#define ACPI_PPTT_CACHE_TYPE_VALID          (1<<4)	/* Cache type valid */
+#define ACPI_PPTT_WRITE_POLICY_VALID        (1<<5)	/* Write policy valid */
+#define ACPI_PPTT_LINE_SIZE_VALID           (1<<6)	/* Line size valid */
+
+/* Masks for Attributes */
+
+#define ACPI_PPTT_MASK_ALLOCATION_TYPE      (0x03)	/* Allocation type */
+#define ACPI_PPTT_MASK_CACHE_TYPE           (0x0C)	/* Cache type */
+#define ACPI_PPTT_MASK_WRITE_POLICY         (0x10)	/* Write policy */
+
+/* 2: ID Structure */
+
+struct acpi_pptt_id {
+	struct acpi_subtable_header header;
+	u16 reserved;
+	u32 vendor_id;
+	u64 level1_id;
+	u64 level2_id;
+	u16 major_rev;
+	u16 minor_rev;
+	u16 spin_rev;
+};
+
+/*******************************************************************************
+ *
  * SBST - Smart Battery Specification Table
  *        Version 1
  *
@@ -1192,7 +1406,8 @@ enum acpi_srat_type {
 	ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
 	ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
 	ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
-	ACPI_SRAT_TYPE_RESERVED = 4	/* 4 and greater are reserved */
+	ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4,	/* ACPI 6.2 */
+	ACPI_SRAT_TYPE_RESERVED = 5	/* 5 and greater are reserved */
 };
 
 /*
@@ -1264,6 +1479,15 @@ struct acpi_srat_gicc_affinity {
 
 #define ACPI_SRAT_GICC_ENABLED     (1)	/* 00: Use affinity structure */
 
+/* 4: GCC ITS Affinity (ACPI 6.2) */
+
+struct acpi_srat_gic_its_affinity {
+	struct acpi_subtable_header header;
+	u32 proximity_domain;
+	u16 reserved;
+	u32 its_id;
+};
+
 /* Reset to default packing */
 
 #pragma pack()
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index faa9f2c..707dda74 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -87,6 +87,7 @@
 #define ACPI_SIG_WDAT           "WDAT"	/* Watchdog Action Table */
 #define ACPI_SIG_WDDT           "WDDT"	/* Watchdog Timer Description Table */
 #define ACPI_SIG_WDRT           "WDRT"	/* Watchdog Resource Table */
+#define ACPI_SIG_WSMT           "WSMT"	/* Windows SMM Security Migrations Table */
 #define ACPI_SIG_XXXX           "XXXX"	/* Intermediate AML header for ASL/ASL+ converter */
 
 #ifdef ACPI_UNDEFINED_TABLES
@@ -1220,7 +1221,8 @@ enum acpi_spmi_interface_types {
  *        Version 2
  *
  * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
- * December 19, 2014
+ * Version 1.2, Revision 8
+ * February 27, 2017
  *
  * NOTE: There are two versions of the table with the same signature --
  * the client version and the server version. The common platform_class
@@ -1283,7 +1285,8 @@ struct acpi_table_tcpa_server {
  *        Version 4
  *
  * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
- * December 19, 2014
+ * Version 1.2, Revision 8
+ * February 27, 2017
  *
  ******************************************************************************/
 
@@ -1304,7 +1307,36 @@ struct acpi_table_tpm2 {
 #define ACPI_TPM2_MEMORY_MAPPED                     6
 #define ACPI_TPM2_COMMAND_BUFFER                    7
 #define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD  8
-#define ACPI_TPM2_COMMAND_BUFFER_WITH_SMC          11
+#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC       11	/* V1.2 Rev 8 */
+
+/* Trailer appears after any start_method subtables */
+
+struct acpi_tpm2_trailer {
+	u32 minimum_log_length;	/* Minimum length for the event log area */
+	u64 log_address;	/* Address of the event log area */
+};
+
+/*
+ * Subtables (start_method-specific)
+ */
+
+/* 11: Start Method for ARM SMC (V1.2 Rev 8) */
+
+struct acpi_tpm2_arm_smc {
+	u32 global_interrupt;
+	u8 interrupt_flags;
+	u8 operation_flags;
+	u16 reserved;
+	u32 function_id;
+};
+
+/* Values for interrupt_flags above */
+
+#define ACPI_TPM2_INTERRUPT_SUPPORT     (1)
+
+/* Values for operation_flags above */
+
+#define ACPI_TPM2_IDLE_SUPPORT          (1)
 
 /*******************************************************************************
  *
@@ -1498,6 +1530,27 @@ struct acpi_table_wdrt {
 	u8 units;
 };
 
+/*******************************************************************************
+ *
+ * WSMT - Windows SMM Security Migrations Table
+ *        Version 1
+ *
+ * Conforms to "Windows SMM Security Migrations Table",
+ * Version 1.0, April 18, 2016
+ *
+ ******************************************************************************/
+
+struct acpi_table_wsmt {
+	struct acpi_table_header header;	/* Common ACPI table header */
+	u32 protection_flags;
+};
+
+/* Flags for protection_flags field above */
+
+#define ACPI_WSMT_FIXED_COMM_BUFFERS                (1)
+#define ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION (2)
+#define ACPI_WSMT_SYSTEM_RESOURCE_PROTECTION        (4)
+
 /* Reset to default packing */
 
 #pragma pack()
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 94414b2..5bde2e7 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -116,6 +116,11 @@ struct acpi_table_bgrt {
 	u32 image_offset_y;
 };
 
+/* Flags for Status field above */
+
+#define ACPI_BGRT_DISPLAYED                 (1)
+#define ACPI_BGRT_ORIENTATION_OFFSET        (3 << 1)
+
 /*******************************************************************************
  *
  * DRTM - Dynamic Root of Trust for Measurement table
@@ -462,7 +467,7 @@ struct acpi_mpst_shared {
 /*******************************************************************************
  *
  * PCCT - Platform Communications Channel Table (ACPI 5.0)
- *        Version 1
+ *        Version 2 (ACPI 6.2)
  *
  ******************************************************************************/
 
@@ -482,7 +487,9 @@ enum acpi_pcct_type {
 	ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0,
 	ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1,
 	ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2,	/* ACPI 6.1 */
-	ACPI_PCCT_TYPE_RESERVED = 3	/* 3 and greater are reserved */
+	ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3,	/* ACPI 6.2 */
+	ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4,	/* ACPI 6.2 */
+	ACPI_PCCT_TYPE_RESERVED = 5	/* 5 and greater are reserved */
 };
 
 /*
@@ -508,7 +515,7 @@ struct acpi_pcct_subspace {
 
 struct acpi_pcct_hw_reduced {
 	struct acpi_subtable_header header;
-	u32 doorbell_interrupt;
+	u32 platform_interrupt;
 	u8 flags;
 	u8 reserved;
 	u64 base_address;
@@ -525,7 +532,7 @@ struct acpi_pcct_hw_reduced {
 
 struct acpi_pcct_hw_reduced_type2 {
 	struct acpi_subtable_header header;
-	u32 doorbell_interrupt;
+	u32 platform_interrupt;
 	u8 flags;
 	u8 reserved;
 	u64 base_address;
@@ -536,11 +543,67 @@ struct acpi_pcct_hw_reduced_type2 {
 	u32 latency;
 	u32 max_access_rate;
 	u16 min_turnaround_time;
-	struct acpi_generic_address doorbell_ack_register;
+	struct acpi_generic_address platform_ack_register;
 	u64 ack_preserve_mask;
 	u64 ack_write_mask;
 };
 
+/* 3: Extended PCC Master Subspace Type 3 (ACPI 6.2) */
+
+struct acpi_pcct_ext_pcc_master {
+	struct acpi_subtable_header header;
+	u32 platform_interrupt;
+	u8 flags;
+	u8 reserved1;
+	u64 base_address;
+	u32 length;
+	struct acpi_generic_address doorbell_register;
+	u64 preserve_mask;
+	u64 write_mask;
+	u32 latency;
+	u32 max_access_rate;
+	u32 min_turnaround_time;
+	struct acpi_generic_address platform_ack_register;
+	u64 ack_preserve_mask;
+	u64 ack_set_mask;
+	u64 reserved2;
+	struct acpi_generic_address cmd_complete_register;
+	u64 cmd_complete_mask;
+	struct acpi_generic_address cmd_update_register;
+	u64 cmd_update_preserve_mask;
+	u64 cmd_update_set_mask;
+	struct acpi_generic_address error_status_register;
+	u64 error_status_mask;
+};
+
+/* 4: Extended PCC Slave Subspace Type 4 (ACPI 6.2) */
+
+struct acpi_pcct_ext_pcc_slave {
+	struct acpi_subtable_header header;
+	u32 platform_interrupt;
+	u8 flags;
+	u8 reserved1;
+	u64 base_address;
+	u32 length;
+	struct acpi_generic_address doorbell_register;
+	u64 preserve_mask;
+	u64 write_mask;
+	u32 latency;
+	u32 max_access_rate;
+	u32 min_turnaround_time;
+	struct acpi_generic_address platform_ack_register;
+	u64 ack_preserve_mask;
+	u64 ack_set_mask;
+	u64 reserved2;
+	struct acpi_generic_address cmd_complete_register;
+	u64 cmd_complete_mask;
+	struct acpi_generic_address cmd_update_register;
+	u64 cmd_update_preserve_mask;
+	u64 cmd_update_set_mask;
+	struct acpi_generic_address error_status_register;
+	u64 error_status_mask;
+};
+
 /* Values for doorbell flags above */
 
 #define ACPI_PCCT_INTERRUPT_POLARITY    (1)
@@ -558,6 +621,15 @@ struct acpi_pcct_shared_memory {
 	u16 status;
 };
 
+/* Extended PCC Subspace Shared Memory Region (ACPI 6.2) */
+
+struct acpi_pcct_ext_pcc_shared_memory {
+	u32 signature;
+	u32 flags;
+	u32 length;
+	u32 command;
+};
+
 /*******************************************************************************
  *
  * PMTT - Platform Memory Topology Table (ACPI 5.0)
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index d549e31..2fcbaec 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -47,9 +47,9 @@
 /* acpisrc:struct_defs -- for acpisrc conversion */
 
 /*
- * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header
- * and must be either 32 or 64. 16-bit ACPICA is no longer supported, as of
- * 12/2006.
+ * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent
+ * header and must be either 32 or 64. 16-bit ACPICA is no longer
+ * supported, as of 12/2006.
  */
 #ifndef ACPI_MACHINE_WIDTH
 #error ACPI_MACHINE_WIDTH not defined
@@ -87,9 +87,9 @@
  * s64          64-bit (8 byte) signed value
  *
  * COMPILER_DEPENDENT_UINT64/s64 - These types are defined in the
- * compiler-dependent header(s) and were introduced because there is no common
- * 64-bit integer type across the various compilation models, as shown in
- * the table below.
+ * compiler-dependent header(s) and were introduced because there is no
+ * common 64-bit integer type across the various compilation models, as
+ * shown in the table below.
  *
  * Datatype  LP64 ILP64 LLP64 ILP32 LP32 16bit
  * char      8    8     8     8     8    8
@@ -106,10 +106,10 @@
  * 2) These types represent the native word size of the target mode of the
  * processor, and may be 16-bit, 32-bit, or 64-bit as required. They are
  * usually used for memory allocation, efficient loop counters, and array
- * indexes. The types are similar to the size_t type in the C library and are
- * required because there is no C type that consistently represents the native
- * data width. acpi_size is needed because there is no guarantee that a
- * kernel-level C library is present.
+ * indexes. The types are similar to the size_t type in the C library and
+ * are required because there is no C type that consistently represents the
+ * native data width. acpi_size is needed because there is no guarantee
+ * that a kernel-level C library is present.
  *
  * acpi_size        16/32/64-bit unsigned value
  * acpi_native_int  16/32/64-bit signed value
@@ -169,9 +169,10 @@ typedef u64 acpi_physical_address;
 
 /*
  * In the case of the Itanium Processor Family (IPF), the hardware does not
- * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED flag
- * to indicate that special precautions must be taken to avoid alignment faults.
- * (IA64 or ia64 is currently used by existing compilers to indicate IPF.)
+ * support misaligned memory transfers. Set the MISALIGNMENT_NOT_SUPPORTED
+ * flag to indicate that special precautions must be taken to avoid alignment
+ * faults. (IA64 or ia64 is currently used by existing compilers to indicate
+ * IPF.)
  *
  * Note: EM64T and other X86-64 processors support misaligned transfers,
  * so there is no need to define this flag.
@@ -309,8 +310,8 @@ typedef u64 acpi_physical_address;
 #endif
 
 /*
- * Some compilers complain about unused variables. Sometimes we don't want to
- * use all the variables (for example, _acpi_module_name). This allows us
+ * Some compilers complain about unused variables. Sometimes we don't want
+ * to use all the variables (for example, _acpi_module_name). This allows us
  * to tell the compiler in a per-variable manner that a variable
  * is unused
  */
@@ -319,8 +320,9 @@ typedef u64 acpi_physical_address;
 #endif
 
 /*
- * All ACPICA external functions that are available to the rest of the kernel
- * are tagged with thes macros which can be defined as appropriate for the host.
+ * All ACPICA external functions that are available to the rest of the
+ * kernel are tagged with these macros which can be defined as appropriate
+ * for the host.
  *
  * Notes:
  * ACPI_EXPORT_SYMBOL_INIT is used for initialization and termination
@@ -383,7 +385,8 @@ typedef u64 acpi_physical_address;
 
 /******************************************************************************
  *
- * ACPI Specification constants (Do not change unless the specification changes)
+ * ACPI Specification constants (Do not change unless the specification
+ * changes)
  *
  *****************************************************************************/
 
@@ -484,10 +487,10 @@ typedef u8 acpi_owner_id;
 #define ACPI_DO_NOT_WAIT                0
 
 /*
- * Obsolete: Acpi integer width. In ACPI version 1 (1996), integers are 32 bits.
- * In ACPI version 2 (2000) and later, integers are 64 bits. Note that this
- * pertains to the ACPI integer type only, not to other integers used in the
- * implementation of the ACPICA subsystem.
+ * Obsolete: Acpi integer width. In ACPI version 1 (1996), integers are
+ * 32 bits. In ACPI version 2 (2000) and later, integers are max 64 bits.
+ * Note that this pertains to the ACPI integer type only, not to other
+ * integers used in the implementation of the ACPICA subsystem.
  *
  * 01/2010: This type is obsolete and has been removed from the entire ACPICA
  * code base. It remains here for compatibility with device drivers that use
@@ -629,8 +632,9 @@ typedef u64 acpi_integer;
 #define ACPI_NOTIFY_LOCALITY_UPDATE     (u8) 0x0B
 #define ACPI_NOTIFY_SHUTDOWN_REQUEST    (u8) 0x0C
 #define ACPI_NOTIFY_AFFINITY_UPDATE     (u8) 0x0D
+#define ACPI_NOTIFY_MEMORY_UPDATE       (u8) 0x0E
 
-#define ACPI_GENERIC_NOTIFY_MAX         0x0D
+#define ACPI_GENERIC_NOTIFY_MAX         0x0E
 #define ACPI_SPECIFIC_NOTIFY_MAX        0x84
 
 /*
@@ -667,10 +671,11 @@ typedef u32 acpi_object_type;
 
 /*
  * These are object types that do not map directly to the ACPI
- * object_type() operator. They are used for various internal purposes only.
- * If new predefined ACPI_TYPEs are added (via the ACPI specification), these
- * internal types must move upwards. (There is code that depends on these
- * values being contiguous with the external types above.)
+ * object_type() operator. They are used for various internal purposes
+ * only. If new predefined ACPI_TYPEs are added (via the ACPI
+ * specification), these internal types must move upwards. (There
+ * is code that depends on these values being contiguous with the
+ * external types above.)
  */
 #define ACPI_TYPE_LOCAL_REGION_FIELD    0x11
 #define ACPI_TYPE_LOCAL_BANK_FIELD      0x12
@@ -770,7 +775,7 @@ typedef u32 acpi_event_status;
  *   |  | | |  +-- Type of dispatch:to method, handler, notify, or none
  *   |  | | +----- Interrupt type: edge or level triggered
  *   |  | +------- Is a Wake GPE
- *   |  +--------- Is GPE masked by the software GPE masking machanism
+ *   |  +--------- Is GPE masked by the software GPE masking mechanism
  *   +------------ <Reserved>
  */
 #define ACPI_GPE_DISPATCH_NONE          (u8) 0x00
@@ -908,8 +913,8 @@ struct acpi_sleep_functions {
  */
 
 /*
- * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package element
- * or an unresolved named reference.
+ * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package
+ * element or an unresolved named reference.
  */
 union acpi_object {
 	acpi_object_type type;	/* See definition of acpi_ns_type for values */
@@ -1166,7 +1171,7 @@ struct acpi_pnp_device_id_list {
 
 /*
  * Structure returned from acpi_get_object_info.
- * Optimized for both 32- and 64-bit builds
+ * Optimized for both 32-bit and 64-bit builds.
  */
 struct acpi_device_info {
 	u32 info_size;		/* Size of info, including ID strings */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 699a199..b1a0a8a 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -78,6 +78,11 @@
 #define UUID_PERSISTENT_VIRTUAL_DISK    "5cea02c9-4d07-69d3-269f-4496fbe096f9"
 #define UUID_PERSISTENT_VIRTUAL_CD      "08018188-42cd-bb48-100f-5387d53ded3d"
 
+/* Processor Properties (ACPI 6.2) */
+
+#define UUID_CACHE_PROPERTIES           "6DC63E77-257E-4E78-A973-A21F2796898D"
+#define UUID_PHYSICAL_PROPERTY          "DDE4D59A-AA42-4349-B407-EA40F57D9FB7"
+
 /* Miscellaneous */
 
 #define UUID_PLATFORM_CAPABILITIES      "0811b06e-4a27-44f9-8d60-3cbbc22e7b48"
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 09994b0..912563c 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -382,4 +382,8 @@
 #define ACPI_INIT_FUNCTION
 #endif
 
+#ifndef ACPI_STRUCT_INIT
+#define ACPI_STRUCT_INIT(field, value) value
+#endif
+
 #endif				/* __ACENV_H__ */
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index e877a35..97a7e21 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -48,7 +48,17 @@
  * Use compiler specific <stdarg.h> is a good practice for even when
  * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
  */
+#ifndef va_arg
+#ifdef ACPI_USE_BUILTIN_STDARG
+typedef __builtin_va_list va_list;
+#define va_start(v, l)          __builtin_va_start(v, l)
+#define va_end(v)               __builtin_va_end(v)
+#define va_arg(v, l)            __builtin_va_arg(v, l)
+#define va_copy(d, s)           __builtin_va_copy(d, s)
+#else
 #include <stdarg.h>
+#endif
+#endif
 
 #define ACPI_INLINE             __inline__
 
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
index 17bd3b7..bdb6858 100644
--- a/include/acpi/platform/acintel.h
+++ b/include/acpi/platform/acintel.h
@@ -48,7 +48,9 @@
  * Use compiler specific <stdarg.h> is a good practice for even when
  * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
  */
+#ifndef va_arg
 #include <stdarg.h>
+#endif
 
 /* Configuration specific to Intel 64-bit C compiler */
 
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index a39e3f6..047f138 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -178,6 +178,8 @@
 #define ACPI_MSG_BIOS_ERROR     KERN_ERR "ACPI BIOS Error (bug): "
 #define ACPI_MSG_BIOS_WARNING   KERN_WARNING "ACPI BIOS Warning (bug): "
 
+#define ACPI_STRUCT_INIT(field, value)	.field = value
+
 #else				/* !__KERNEL__ */
 
 #define ACPI_USE_STANDARD_HEADERS
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
deleted file mode 100644
index a2508a8..0000000
--- a/include/asm-generic/siginfo.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _ASM_GENERIC_SIGINFO_H
-#define _ASM_GENERIC_SIGINFO_H
-
-#include <uapi/asm-generic/siginfo.h>
-
-#define __SI_MASK	0xffff0000u
-#define __SI_KILL	(0 << 16)
-#define __SI_TIMER	(1 << 16)
-#define __SI_POLL	(2 << 16)
-#define __SI_FAULT	(3 << 16)
-#define __SI_CHLD	(4 << 16)
-#define __SI_RT		(5 << 16)
-#define __SI_MESGQ	(6 << 16)
-#define __SI_SYS	(7 << 16)
-#define __SI_CODE(T,N)	((T) | ((N) & 0xffff))
-
-struct siginfo;
-void do_schedule_next_timer(struct siginfo *info);
-
-extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
-
-#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 314a0b9..0d64658 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -172,8 +172,7 @@
 	KEEP(*(__##name##_of_table))					\
 	KEEP(*(__##name##_of_table_end))
 
-#define CLKSRC_OF_TABLES()	OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
-#define CLKEVT_OF_TABLES()	OF_TABLE(CONFIG_CLKEVT_OF, clkevt)
+#define TIMER_OF_TABLES()	OF_TABLE(CONFIG_TIMER_OF, timer)
 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
 #define CLK_OF_TABLES()		OF_TABLE(CONFIG_COMMON_CLK, clk)
 #define IOMMU_OF_TABLES()	OF_TABLE(CONFIG_OF_IOMMU, iommu)
@@ -557,15 +556,15 @@
 	MEM_DISCARD(init.rodata)					\
 	CLK_OF_TABLES()							\
 	RESERVEDMEM_OF_TABLES()						\
-	CLKSRC_OF_TABLES()						\
-	CLKEVT_OF_TABLES()						\
+	TIMER_OF_TABLES()						\
 	IOMMU_OF_TABLES()						\
 	CPU_METHOD_OF_TABLES()						\
 	CPUIDLE_METHOD_OF_TABLES()					\
 	KERNEL_DTB()							\
 	IRQCHIP_OF_MATCH_TABLE()					\
 	ACPI_PROBE_TABLE(irqchip)					\
-	ACPI_PROBE_TABLE(clksrc)					\
+	ACPI_PROBE_TABLE(timer)						\
+	ACPI_PROBE_TABLE(iort)						\
 	EARLYCON_TABLE()
 
 #define INIT_TEXT							\
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index c0bd0d7..bb83731 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -913,4 +913,55 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux);
 int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
 int drm_dp_stop_crc(struct drm_dp_aux *aux);
 
+struct drm_dp_dpcd_ident {
+	u8 oui[3];
+	u8 device_id[6];
+	u8 hw_rev;
+	u8 sw_major_rev;
+	u8 sw_minor_rev;
+} __packed;
+
+/**
+ * struct drm_dp_desc - DP branch/sink device descriptor
+ * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
+ * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
+ */
+struct drm_dp_desc {
+	struct drm_dp_dpcd_ident ident;
+	u32 quirks;
+};
+
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+		     bool is_branch);
+
+/**
+ * enum drm_dp_quirk - Display Port sink/branch device specific quirks
+ *
+ * Display Port sink and branch devices in the wild have a variety of bugs, try
+ * to collect them here. The quirks are shared, but it's up to the drivers to
+ * implement workarounds for them.
+ */
+enum drm_dp_quirk {
+	/**
+	 * @DP_DPCD_QUIRK_LIMITED_M_N:
+	 *
+	 * The device requires main link attributes Mvid and Nvid to be limited
+	 * to 16 bits.
+	 */
+	DP_DPCD_QUIRK_LIMITED_M_N,
+};
+
+/**
+ * drm_dp_has_quirk() - does the DP device have a specific quirk
+ * @desc: Device decriptor filled by drm_dp_read_desc()
+ * @quirk: Quirk to query for
+ *
+ * Return true if DP device identified by @desc has @quirk.
+ */
+static inline bool
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+{
+	return desc->quirks & BIT(quirk);
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/dt-bindings/clock/bcm-sr.h b/include/dt-bindings/clock/bcm-sr.h
new file mode 100644
index 0000000..cff6c6f
--- /dev/null
+++ b/include/dt-bindings/clock/bcm-sr.h
@@ -0,0 +1,101 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2017 Broadcom. All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom Corporation nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CLOCK_BCM_SR_H
+#define _CLOCK_BCM_SR_H
+
+/* GENPLL 0 clock channel ID SCR HSLS FS PCIE */
+#define BCM_SR_GENPLL0			0
+#define BCM_SR_GENPLL0_SATA_CLK		1
+#define BCM_SR_GENPLL0_SCR_CLK		2
+#define BCM_SR_GENPLL0_250M_CLK		3
+#define BCM_SR_GENPLL0_PCIE_AXI_CLK	4
+#define BCM_SR_GENPLL0_PAXC_AXI_X2_CLK	5
+#define BCM_SR_GENPLL0_PAXC_AXI_CLK	6
+
+/* GENPLL 1 clock channel ID MHB PCIE NITRO */
+#define BCM_SR_GENPLL1			0
+#define BCM_SR_GENPLL1_PCIE_TL_CLK	1
+#define BCM_SR_GENPLL1_MHB_APB_CLK	2
+
+/* GENPLL 2 clock channel ID NITRO MHB*/
+#define BCM_SR_GENPLL2			0
+#define BCM_SR_GENPLL2_NIC_CLK		1
+#define BCM_SR_GENPLL2_250_NITRO_CLK	2
+#define BCM_SR_GENPLL2_125_NITRO_CLK	3
+#define BCM_SR_GENPLL2_CHIMP_CLK	4
+
+/* GENPLL 3 HSLS clock channel ID */
+#define BCM_SR_GENPLL3			0
+#define BCM_SR_GENPLL3_HSLS_CLK		1
+#define BCM_SR_GENPLL3_SDIO_CLK		2
+
+/* GENPLL 4 SCR clock channel ID */
+#define BCM_SR_GENPLL4			0
+#define BCM_SR_GENPLL4_CCN_CLK		1
+
+/* GENPLL 5 FS4 clock channel ID */
+#define BCM_SR_GENPLL5			0
+#define BCM_SR_GENPLL5_FS_CLK		1
+#define BCM_SR_GENPLL5_SPU_CLK		2
+
+/* GENPLL 6 NITRO clock channel ID */
+#define BCM_SR_GENPLL6			0
+#define BCM_SR_GENPLL6_48_USB_CLK	1
+
+/* LCPLL0  clock channel ID */
+#define BCM_SR_LCPLL0			0
+#define BCM_SR_LCPLL0_SATA_REF_CLK	1
+#define BCM_SR_LCPLL0_USB_REF_CLK	2
+#define BCM_SR_LCPLL0_SATA_REFPN_CLK	3
+
+/* LCPLL1  clock channel ID */
+#define BCM_SR_LCPLL1			0
+#define BCM_SR_LCPLL1_WAN_CLK		1
+
+/* LCPLL PCIE  clock channel ID */
+#define BCM_SR_LCPLL_PCIE		0
+#define BCM_SR_LCPLL_PCIE_PHY_REF_CLK	1
+
+/* GENPLL EMEM0 clock channel ID */
+#define BCM_SR_EMEMPLL0			0
+#define BCM_SR_EMEMPLL0_EMEM_CLK	1
+
+/* GENPLL EMEM0 clock channel ID */
+#define BCM_SR_EMEMPLL1			0
+#define BCM_SR_EMEMPLL1_EMEM_CLK	1
+
+/* GENPLL EMEM0 clock channel ID */
+#define BCM_SR_EMEMPLL2			0
+#define BCM_SR_EMEMPLL2_EMEM_CLK	1
+
+#endif /* _CLOCK_BCM_SR_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 3190e30..e3e9f79 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -5,7 +5,6 @@
 #ifndef __GXBB_CLKC_H
 #define __GXBB_CLKC_H
 
-#define CLKID_CPUCLK		1
 #define CLKID_HDMI_PLL		2
 #define CLKID_FCLK_DIV2		4
 #define CLKID_FCLK_DIV3		5
@@ -13,24 +12,30 @@
 #define CLKID_GP0_PLL		9
 #define CLKID_CLK81		12
 #define CLKID_MPLL2		15
+#define CLKID_SPICC		21
 #define CLKID_I2C		22
 #define CLKID_SAR_ADC		23
 #define CLKID_RNG0		25
+#define CLKID_UART0		26
 #define CLKID_SPI		34
 #define CLKID_ETH		36
 #define CLKID_AIU_GLUE		38
+#define CLKID_IEC958		39
 #define CLKID_I2S_OUT		40
 #define CLKID_MIXER_IFACE	44
 #define CLKID_AIU		47
+#define CLKID_UART1		48
 #define CLKID_USB0		50
 #define CLKID_USB1		51
 #define CLKID_USB		55
 #define CLKID_HDMI_PCLK		63
 #define CLKID_USB1_DDR_BRIDGE	64
 #define CLKID_USB0_DDR_BRIDGE	65
+#define CLKID_UART2		68
 #define CLKID_SANA		69
 #define CLKID_GCLK_VENCI_INT0	77
 #define CLKID_AOCLK_GATE	80
+#define CLKID_IEC958_GATE	81
 #define CLKID_AO_I2C		93
 #define CLKID_SD_EMMC_A		94
 #define CLKID_SD_EMMC_B		95
@@ -42,5 +47,8 @@
 #define CLKID_MALI_1_SEL	103
 #define CLKID_MALI_1		105
 #define CLKID_MALI		106
+#define CLKID_CTS_AMCLK		107
+#define CLKID_CTS_MCLK_I958	110
+#define CLKID_CTS_I958		113
 
 #endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index a55ff8c..e29227f 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -21,5 +21,15 @@
 #define CLKID_ZERO		13
 #define CLKID_MPEG_SEL		14
 #define CLKID_MPEG_DIV		15
+#define CLKID_SAR_ADC		23
+#define CLKID_RNG0		25
+#define CLKID_SDIO		30
+#define CLKID_ETH		36
+#define CLKID_USB0		50
+#define CLKID_USB1		51
+#define CLKID_USB		55
+#define CLKID_USB1_DDR_BRIDGE	64
+#define CLKID_USB0_DDR_BRIDGE	65
+#define CLKID_SANA		69
 
 #endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h
index bc256d3..7dd8bc0 100644
--- a/include/dt-bindings/clock/r7s72100-clock.h
+++ b/include/dt-bindings/clock/r7s72100-clock.h
@@ -12,8 +12,18 @@
 
 #define R7S72100_CLK_PLL	0
 
+/* MSTP2 */
+#define R7S72100_CLK_CORESIGHT	0
+
 /* MSTP3 */
+#define R7S72100_CLK_IEBUS	7
+#define R7S72100_CLK_IRDA	6
+#define R7S72100_CLK_LIN0	5
+#define R7S72100_CLK_LIN1	4
 #define R7S72100_CLK_MTU2	3
+#define R7S72100_CLK_CAN	2
+#define R7S72100_CLK_ADCPWR	1
+#define R7S72100_CLK_PWM	0
 
 /* MSTP4 */
 #define R7S72100_CLK_SCIF0	7
@@ -26,23 +36,51 @@
 #define R7S72100_CLK_SCIF7	0
 
 /* MSTP5 */
+#define R7S72100_CLK_SCI0	7
+#define R7S72100_CLK_SCI1	6
+#define R7S72100_CLK_SG0	5
+#define R7S72100_CLK_SG1	4
+#define R7S72100_CLK_SG2	3
+#define R7S72100_CLK_SG3	2
 #define R7S72100_CLK_OSTM0	1
 #define R7S72100_CLK_OSTM1	0
 
 /* MSTP6 */
+#define R7S72100_CLK_ADC	7
+#define R7S72100_CLK_CEU	6
+#define R7S72100_CLK_DOC0	5
+#define R7S72100_CLK_DOC1	4
+#define R7S72100_CLK_DRC0	3
+#define R7S72100_CLK_DRC1	2
+#define R7S72100_CLK_JCU	1
 #define R7S72100_CLK_RTC	0
 
 /* MSTP7 */
+#define R7S72100_CLK_VDEC0	7
+#define R7S72100_CLK_VDEC1	6
 #define R7S72100_CLK_ETHER	4
+#define R7S72100_CLK_NAND	3
+#define R7S72100_CLK_USB0	1
+#define R7S72100_CLK_USB1	0
 
 /* MSTP8 */
+#define R7S72100_CLK_IMR0	7
+#define R7S72100_CLK_IMR1	6
+#define R7S72100_CLK_IMRDISP	5
 #define R7S72100_CLK_MMCIF	4
+#define R7S72100_CLK_MLB	3
+#define R7S72100_CLK_ETHAVB	2
+#define R7S72100_CLK_SCUX	1
 
 /* MSTP9 */
 #define R7S72100_CLK_I2C0	7
 #define R7S72100_CLK_I2C1	6
 #define R7S72100_CLK_I2C2	5
 #define R7S72100_CLK_I2C3	4
+#define R7S72100_CLK_SPIBSC0	3
+#define R7S72100_CLK_SPIBSC1	2
+#define R7S72100_CLK_VDC50	1	/* and LVDS */
+#define R7S72100_CLK_VDC51	0
 
 /* MSTP10 */
 #define R7S72100_CLK_SPI0	7
@@ -50,6 +88,17 @@
 #define R7S72100_CLK_SPI2	5
 #define R7S72100_CLK_SPI3	4
 #define R7S72100_CLK_SPI4	3
+#define R7S72100_CLK_CDROM	2
+#define R7S72100_CLK_SPDIF	1
+#define R7S72100_CLK_RGPVG2	0
+
+/* MSTP11 */
+#define R7S72100_CLK_SSI0	5
+#define R7S72100_CLK_SSI1	4
+#define R7S72100_CLK_SSI2	3
+#define R7S72100_CLK_SSI3	2
+#define R7S72100_CLK_SSI4	1
+#define R7S72100_CLK_SSI5	0
 
 /* MSTP12 */
 #define R7S72100_CLK_SDHI00	3
@@ -57,4 +106,8 @@
 #define R7S72100_CLK_SDHI10	1
 #define R7S72100_CLK_SDHI11	0
 
+/* MSTP13 */
+#define R7S72100_CLK_PIX1	2
+#define R7S72100_CLK_PIX0	1
+
 #endif /* __DT_BINDINGS_CLOCK_R7S72100_H__ */
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index adc50dc..ef69213 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -109,6 +109,7 @@
 #define R8A7791_CLK_SATA0		15
 
 /* MSTP9 */
+#define R8A7791_CLK_GYROADC		1
 #define R8A7791_CLK_GPIO7		4
 #define R8A7791_CLK_GPIO6		5
 #define R8A7791_CLK_GPIO5		7
diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h
index b27e2b1..56f841c 100644
--- a/include/dt-bindings/clock/rk3228-cru.h
+++ b/include/dt-bindings/clock/rk3228-cru.h
@@ -61,6 +61,17 @@
 #define SCLK_MAC_TX		130
 #define SCLK_MAC_PHY		131
 #define SCLK_MAC_OUT		132
+#define SCLK_VDEC_CABAC		133
+#define SCLK_VDEC_CORE		134
+#define SCLK_RGA		135
+#define SCLK_HDCP		136
+#define SCLK_HDMI_CEC		137
+#define SCLK_CRYPTO		138
+#define SCLK_TSP		139
+#define SCLK_HSADC		140
+#define SCLK_WIFI		141
+#define SCLK_OTGPHY0		142
+#define SCLK_OTGPHY1		143
 
 /* dclk gates */
 #define DCLK_VOP		190
@@ -68,15 +79,32 @@
 
 /* aclk gates */
 #define ACLK_DMAC		194
+#define ACLK_CPU		195
+#define ACLK_VPU_PRE		196
+#define ACLK_RKVDEC_PRE		197
+#define ACLK_RGA_PRE		198
+#define ACLK_IEP_PRE		199
+#define ACLK_HDCP_PRE		200
+#define ACLK_VOP_PRE		201
+#define ACLK_VPU		202
+#define ACLK_RKVDEC		203
+#define ACLK_IEP		204
+#define ACLK_RGA		205
+#define ACLK_HDCP		206
 #define ACLK_PERI		210
 #define ACLK_VOP		211
 #define ACLK_GMAC		212
+#define ACLK_GPU		213
 
 /* pclk gates */
 #define PCLK_GPIO0		320
 #define PCLK_GPIO1		321
 #define PCLK_GPIO2		322
 #define PCLK_GPIO3		323
+#define PCLK_VIO_H2P		324
+#define PCLK_HDCP		325
+#define PCLK_EFUSE_1024		326
+#define PCLK_EFUSE_256		327
 #define PCLK_GRF		329
 #define PCLK_I2C0		332
 #define PCLK_I2C1		333
@@ -89,6 +117,7 @@
 #define PCLK_TSADC		344
 #define PCLK_PWM		350
 #define PCLK_TIMER		353
+#define PCLK_CPU		354
 #define PCLK_PERI		363
 #define PCLK_HDMI_CTRL		364
 #define PCLK_HDMI_PHY		365
@@ -104,6 +133,24 @@
 #define HCLK_SDMMC		456
 #define HCLK_SDIO		457
 #define HCLK_EMMC		459
+#define HCLK_CPU		460
+#define HCLK_VPU_PRE		461
+#define HCLK_RKVDEC_PRE		462
+#define HCLK_VIO_PRE		463
+#define HCLK_VPU		464
+#define HCLK_RKVDEC		465
+#define HCLK_VIO		466
+#define HCLK_RGA		467
+#define HCLK_IEP		468
+#define HCLK_VIO_H2P		469
+#define HCLK_HDCP_MMU		470
+#define HCLK_HOST0		471
+#define HCLK_HOST1		472
+#define HCLK_HOST2		473
+#define HCLK_OTG		474
+#define HCLK_TSP		475
+#define HCLK_M_CRYPTO		476
+#define HCLK_S_CRYPTO		477
 #define HCLK_PERI		478
 
 #define CLK_NR_CLKS		(HCLK_PERI + 1)
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 220a60f..22cb1df 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -132,6 +132,8 @@
 #define SCLK_RMII_SRC			166
 #define SCLK_PCIEPHY_REF100M		167
 #define SCLK_DDRC			168
+#define SCLK_TESTCLKOUT1		169
+#define SCLK_TESTCLKOUT2		170
 
 #define DCLK_VOP0			180
 #define DCLK_VOP1			181
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index 370c0a0..d66432c 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -43,6 +43,8 @@
 #ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_
 #define _DT_BINDINGS_CLK_SUN50I_A64_H_
 
+#define CLK_PLL_PERIPH0		11
+
 #define CLK_BUS_MIPI_DSI	28
 #define CLK_BUS_CE		29
 #define CLK_BUS_DMA		30
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h
index c2afc41..e139fe5 100644
--- a/include/dt-bindings/clock/sun8i-h3-ccu.h
+++ b/include/dt-bindings/clock/sun8i-h3-ccu.h
@@ -43,6 +43,8 @@
 #ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_
 #define _DT_BINDINGS_CLK_SUN8I_H3_H_
 
+#define CLK_PLL_PERIPH0		9
+
 #define CLK_CPUX		14
 
 #define CLK_BUS_CE		20
diff --git a/include/dt-bindings/interrupt-controller/mvebu-icu.h b/include/dt-bindings/interrupt-controller/mvebu-icu.h
new file mode 100644
index 0000000..8249558
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/mvebu-icu.h
@@ -0,0 +1,15 @@
+/*
+ * This header provides constants for the MVEBU ICU driver.
+ */
+
+#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MVEBU_ICU_H
+#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MVEBU_ICU_H
+
+/* interrupt specifier cell 0 */
+
+#define ICU_GRP_NSR		0x0
+#define ICU_GRP_SR		0x1
+#define ICU_GRP_SEI		0x4
+#define ICU_GRP_REI		0x5
+
+#endif
diff --git a/include/dt-bindings/mux/mux.h b/include/dt-bindings/mux/mux.h
new file mode 100644
index 0000000..c8e855c
--- /dev/null
+++ b/include/dt-bindings/mux/mux.h
@@ -0,0 +1,16 @@
+/*
+ * This header provides constants for most Multiplexer bindings.
+ *
+ * Most Multiplexer bindings specify an idle state. In most cases, the
+ * the multiplexer can be left as is when idle, and in some cases it can
+ * disconnect the input/output and leave the multiplexer in a high
+ * impedance state.
+ */
+
+#ifndef _DT_BINDINGS_MUX_MUX_H
+#define _DT_BINDINGS_MUX_MUX_H
+
+#define MUX_IDLE_AS_IS      (-1)
+#define MUX_IDLE_DISCONNECT (-2)
+
+#endif
diff --git a/include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h b/include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h
new file mode 100644
index 0000000..caa6c66
--- /dev/null
+++ b/include/dt-bindings/pinctrl/brcm,pinctrl-stingray.h
@@ -0,0 +1,68 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2017 Broadcom Corporation.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom Corporation nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_BRCM_STINGRAY_H__
+#define __DT_BINDINGS_PINCTRL_BRCM_STINGRAY_H__
+
+/* Alternate functions available in MUX controller */
+#define MODE_NITRO				0
+#define MODE_NAND				1
+#define MODE_PNOR				2
+#define MODE_GPIO				3
+
+/* Pad configuration attribute */
+#define PAD_SLEW_RATE_ENA			(1 << 0)
+#define PAD_SLEW_RATE_ENA_MASK			(1 << 0)
+
+#define PAD_DRIVE_STRENGTH_2_MA			(0 << 1)
+#define PAD_DRIVE_STRENGTH_4_MA			(1 << 1)
+#define PAD_DRIVE_STRENGTH_6_MA			(2 << 1)
+#define PAD_DRIVE_STRENGTH_8_MA			(3 << 1)
+#define PAD_DRIVE_STRENGTH_10_MA		(4 << 1)
+#define PAD_DRIVE_STRENGTH_12_MA		(5 << 1)
+#define PAD_DRIVE_STRENGTH_14_MA		(6 << 1)
+#define PAD_DRIVE_STRENGTH_16_MA		(7 << 1)
+#define PAD_DRIVE_STRENGTH_MASK			(7 << 1)
+
+#define PAD_PULL_UP_ENA				(1 << 4)
+#define PAD_PULL_UP_ENA_MASK			(1 << 4)
+
+#define PAD_PULL_DOWN_ENA			(1 << 5)
+#define PAD_PULL_DOWN_ENA_MASK			(1 << 5)
+
+#define PAD_INPUT_PATH_DIS			(1 << 6)
+#define PAD_INPUT_PATH_DIS_MASK			(1 << 6)
+
+#define PAD_HYSTERESIS_ENA			(1 << 7)
+#define PAD_HYSTERESIS_ENA_MASK			(1 << 7)
+
+#endif
diff --git a/include/dt-bindings/pinctrl/r7s72100-pinctrl.h b/include/dt-bindings/pinctrl/r7s72100-pinctrl.h
new file mode 100644
index 0000000..6b609fe
--- /dev/null
+++ b/include/dt-bindings/pinctrl/r7s72100-pinctrl.h
@@ -0,0 +1,16 @@
+/*
+ * Defines macros and constants for Renesas RZ/A1 pin controller pin
+ * muxing functions.
+ */
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_RZA1_H
+#define __DT_BINDINGS_PINCTRL_RENESAS_RZA1_H
+
+#define RZA1_PINS_PER_PORT	16
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZA1_PINMUX(b, p, f)	((b) * RZA1_PINS_PER_PORT + (p) | (f << 16))
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA1_H */
diff --git a/include/dt-bindings/power/mt6797-power.h b/include/dt-bindings/power/mt6797-power.h
new file mode 100644
index 0000000..a60c1d8
--- /dev/null
+++ b/include/dt-bindings/power/mt6797-power.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT6797_POWER_H
+#define _DT_BINDINGS_POWER_MT6797_POWER_H
+
+#define MT6797_POWER_DOMAIN_VDEC		0
+#define MT6797_POWER_DOMAIN_VENC		1
+#define MT6797_POWER_DOMAIN_ISP		2
+#define MT6797_POWER_DOMAIN_MM			3
+#define MT6797_POWER_DOMAIN_AUDIO		4
+#define MT6797_POWER_DOMAIN_MFG_ASYNC		5
+#define MT6797_POWER_DOMAIN_MFG		6
+#define MT6797_POWER_DOMAIN_MFG_CORE0		7
+#define MT6797_POWER_DOMAIN_MFG_CORE1		8
+#define MT6797_POWER_DOMAIN_MFG_CORE2		9
+#define MT6797_POWER_DOMAIN_MFG_CORE3		10
+#define MT6797_POWER_DOMAIN_MJC		11
+
+#endif /* _DT_BINDINGS_POWER_MT6797_POWER_H */
diff --git a/include/dt-bindings/power/owl-s500-powergate.h b/include/dt-bindings/power/owl-s500-powergate.h
new file mode 100644
index 0000000..0a1c451
--- /dev/null
+++ b/include/dt-bindings/power/owl-s500-powergate.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+#ifndef DT_BINDINGS_POWER_OWL_S500_POWERGATE_H
+#define DT_BINDINGS_POWER_OWL_S500_POWERGATE_H
+
+#define S500_PD_VDE	0
+#define S500_PD_VCE_SI	1
+#define S500_PD_USB2_1	2
+#define S500_PD_CPU2	3
+#define S500_PD_CPU3	4
+#define S500_PD_DMA	5
+#define S500_PD_DS	6
+#define S500_PD_USB3	7
+#define S500_PD_USB2_0	8
+
+#endif
diff --git a/include/dt-bindings/reset/altr,rst-mgr-s10.h b/include/dt-bindings/reset/altr,rst-mgr-s10.h
new file mode 100644
index 0000000..7978c21
--- /dev/null
+++ b/include/dt-bindings/reset/altr,rst-mgr-s10.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 Intel Corporation. All rights reserved
+ * Copyright (C) 2016 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * derived from Steffen Trumtrar's "altr,rst-mgr-a10.h"
+ */
+
+#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_S10_H
+#define _DT_BINDINGS_RESET_ALTR_RST_MGR_S10_H
+
+/* MPUMODRST */
+#define CPU0_RESET		0
+#define CPU1_RESET		1
+#define CPU2_RESET		2
+#define CPU3_RESET		3
+
+/* PER0MODRST */
+#define EMAC0_RESET		32
+#define EMAC1_RESET		33
+#define EMAC2_RESET		34
+#define USB0_RESET		35
+#define USB1_RESET		36
+#define NAND_RESET		37
+/* 38 is empty */
+#define SDMMC_RESET		39
+#define EMAC0_OCP_RESET		40
+#define EMAC1_OCP_RESET		41
+#define EMAC2_OCP_RESET		42
+#define USB0_OCP_RESET		43
+#define USB1_OCP_RESET		44
+#define NAND_OCP_RESET		45
+/* 46 is empty */
+#define SDMMC_OCP_RESET		47
+#define DMA_RESET		48
+#define SPIM0_RESET		49
+#define SPIM1_RESET		50
+#define SPIS0_RESET		51
+#define SPIS1_RESET		52
+#define DMA_OCP_RESET		53
+#define EMAC_PTP_RESET		54
+/* 55 is empty*/
+#define DMAIF0_RESET		56
+#define DMAIF1_RESET		57
+#define DMAIF2_RESET		58
+#define DMAIF3_RESET		59
+#define DMAIF4_RESET		60
+#define DMAIF5_RESET		61
+#define DMAIF6_RESET		62
+#define DMAIF7_RESET		63
+
+/* PER1MODRST */
+#define WATCHDOG0_RESET		64
+#define WATCHDOG1_RESET		65
+#define WATCHDOG2_RESET		66
+#define WATCHDOG3_RESET		67
+#define L4SYSTIMER0_RESET	68
+#define L4SYSTIMER1_RESET	69
+#define SPTIMER0_RESET		70
+#define SPTIMER1_RESET		71
+#define I2C0_RESET		72
+#define I2C1_RESET		73
+#define I2C2_RESET		74
+#define I2C3_RESET		75
+#define I2C4_RESET		76
+/* 77-79 is empty */
+#define UART0_RESET		80
+#define UART1_RESET		81
+/* 82-87 is empty */
+#define GPIO0_RESET		88
+#define GPIO1_RESET		89
+
+/* BRGMODRST */
+#define SOC2FPGA_RESET		96
+#define LWHPS2FPGA_RESET	97
+#define FPGA2SOC_RESET		98
+#define F2SSDRAM0_RESET		99
+#define F2SSDRAM1_RESET		100
+#define F2SSDRAM2_RESET		101
+#define DDRSCH_RESET		102
+
+/* COLDMODRST */
+#define CPUPO0_RESET		160
+#define CPUPO1_RESET		161
+#define CPUPO2_RESET		162
+#define CPUPO3_RESET		163
+/* 164-167 is empty */
+#define L2_RESET		168
+
+/* DBGMODRST */
+#define DBG_RESET		224
+#define CSDAP_RESET		225
+
+/* TAPMODRST */
+#define TAP_RESET		256
+
+#endif
diff --git a/include/dt-bindings/reset/cortina,gemini-reset.h b/include/dt-bindings/reset/cortina,gemini-reset.h
new file mode 100644
index 0000000..0b886ae
--- /dev/null
+++ b/include/dt-bindings/reset/cortina,gemini-reset.h
@@ -0,0 +1,36 @@
+#ifndef _DT_BINDINGS_RESET_CORTINA_GEMINI_H
+#define _DT_BINDINGS_RESET_CORTINA_GEMINI_H
+
+#define GEMINI_RESET_DRAM	0
+#define GEMINI_RESET_FLASH	1
+#define GEMINI_RESET_IDE	2
+#define GEMINI_RESET_RAID	3
+#define GEMINI_RESET_SECURITY	4
+#define GEMINI_RESET_GMAC0	5
+#define GEMINI_RESET_GMAC1	6
+#define GEMINI_RESET_PCI	7
+#define GEMINI_RESET_USB0	8
+#define GEMINI_RESET_USB1	9
+#define GEMINI_RESET_DMAC	10
+#define GEMINI_RESET_APB	11
+#define GEMINI_RESET_LPC	12
+#define GEMINI_RESET_LCD	13
+#define GEMINI_RESET_INTCON0	14
+#define GEMINI_RESET_INTCON1	15
+#define GEMINI_RESET_RTC	16
+#define GEMINI_RESET_TIMER	17
+#define GEMINI_RESET_UART	18
+#define GEMINI_RESET_SSP	19
+#define GEMINI_RESET_GPIO0	20
+#define GEMINI_RESET_GPIO1	21
+#define GEMINI_RESET_GPIO2	22
+#define GEMINI_RESET_WDOG	23
+#define GEMINI_RESET_EXTERN	24
+#define GEMINI_RESET_CIR	25
+#define GEMINI_RESET_SATA0	26
+#define GEMINI_RESET_SATA1	27
+#define GEMINI_RESET_TVC	28
+#define GEMINI_RESET_CPU1	30
+#define GEMINI_RESET_GLOBAL	31
+
+#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 137e4a3..cafdfb8 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -26,6 +26,7 @@
 #include <linux/resource_ext.h>
 #include <linux/device.h>
 #include <linux/property.h>
+#include <linux/uuid.h>
 
 #ifndef _LINUX
 #define _LINUX
@@ -457,7 +458,6 @@ struct acpi_osc_context {
 	struct acpi_buffer ret;		/* free by caller if success */
 };
 
-acpi_status acpi_str_to_uuid(char *str, u8 *uuid);
 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
 
 /* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
@@ -741,7 +741,7 @@ static inline bool acpi_driver_match_device(struct device *dev,
 }
 
 static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
-						   const u8 *uuid,
+						   const guid_t *guid,
 						   int rev, int func,
 						   union acpi_object *argv4)
 {
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
new file mode 100644
index 0000000..9af3c17
--- /dev/null
+++ b/include/linux/arch_topology.h
@@ -0,0 +1,17 @@
+/*
+ * include/linux/arch_topology.h - arch specific cpu topology information
+ */
+#ifndef _LINUX_ARCH_TOPOLOGY_H_
+#define _LINUX_ARCH_TOPOLOGY_H_
+
+void topology_normalize_cpu_scale(void);
+
+struct device_node;
+int topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
+
+struct sched_domain;
+unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+
+#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index ad7d9ee..73fe18e 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -20,7 +20,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  *  Hardware documentation available from http://www.t13.org/
  *
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index 2793652..a414a2b 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -8,6 +8,7 @@
 #ifndef __BCM47XX_NVRAM_H
 #define __BCM47XX_NVRAM_H
 
+#include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/vmalloc.h>
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d1b04b0..664a27d 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -118,7 +118,6 @@ static inline void *bio_data(struct bio *bio)
 /*
  * will die
  */
-#define bio_to_phys(bio)	(page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
 #define bvec_to_phys(bv)	(page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
 
 /*
@@ -373,8 +372,11 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
 	return bio_split(bio, sectors, gfp, bs);
 }
 
-extern struct bio_set *bioset_create(unsigned int, unsigned int);
-extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
+extern struct bio_set *bioset_create(unsigned int, unsigned int, int flags);
+enum {
+	BIOSET_NEED_BVECS = BIT(0),
+	BIOSET_NEED_RESCUER = BIT(1),
+};
 extern void bioset_free(struct bio_set *);
 extern mempool_t *biovec_create_pool(int pool_entries);
 
@@ -392,11 +394,6 @@ static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 	return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
 }
 
-static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
-{
-	return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
-}
-
 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 {
 	return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
@@ -414,7 +411,13 @@ extern void bio_endio(struct bio *);
 
 static inline void bio_io_error(struct bio *bio)
 {
-	bio->bi_error = -EIO;
+	bio->bi_status = BLK_STS_IOERR;
+	bio_endio(bio);
+}
+
+static inline void bio_wouldblock_error(struct bio *bio)
+{
+	bio->bi_status = BLK_STS_AGAIN;
 	bio_endio(bio);
 }
 
@@ -426,6 +429,7 @@ extern void bio_advance(struct bio *, unsigned);
 
 extern void bio_init(struct bio *bio, struct bio_vec *table,
 		     unsigned short max_vecs);
+extern void bio_uninit(struct bio *);
 extern void bio_reset(struct bio *);
 void bio_chain(struct bio *, struct bio *);
 
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index fcd6410..1454230 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -33,14 +33,12 @@ struct blk_mq_hw_ctx {
 	struct blk_mq_ctx	**ctxs;
 	unsigned int		nr_ctx;
 
-	wait_queue_t		dispatch_wait;
+	wait_queue_entry_t		dispatch_wait;
 	atomic_t		wait_index;
 
 	struct blk_mq_tags	*tags;
 	struct blk_mq_tags	*sched_tags;
 
-	struct srcu_struct	queue_rq_srcu;
-
 	unsigned long		queued;
 	unsigned long		run;
 #define BLK_MQ_MAX_DISPATCH_ORDER	7
@@ -62,6 +60,9 @@ struct blk_mq_hw_ctx {
 	struct dentry		*debugfs_dir;
 	struct dentry		*sched_debugfs_dir;
 #endif
+
+	/* Must be the last member - see also blk_mq_hw_ctx_size(). */
+	struct srcu_struct	queue_rq_srcu[0];
 };
 
 struct blk_mq_tag_set {
@@ -87,7 +88,8 @@ struct blk_mq_queue_data {
 	bool last;
 };
 
-typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
+typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
+		const struct blk_mq_queue_data *);
 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -142,6 +144,8 @@ struct blk_mq_ops {
 	init_request_fn		*init_request;
 	exit_request_fn		*exit_request;
 	reinit_request_fn	*reinit_request;
+	/* Called from inside blk_get_request() */
+	void (*initialize_rq_fn)(struct request *rq);
 
 	map_queues_fn		*map_queues;
 
@@ -155,10 +159,6 @@ struct blk_mq_ops {
 };
 
 enum {
-	BLK_MQ_RQ_QUEUE_OK	= 0,	/* queued fine */
-	BLK_MQ_RQ_QUEUE_BUSY	= 1,	/* requeue IO for later */
-	BLK_MQ_RQ_QUEUE_ERROR	= 2,	/* end IO with error */
-
 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
 	BLK_MQ_F_TAG_SHARED	= 1 << 1,
 	BLK_MQ_F_SG_MERGE	= 1 << 2,
@@ -204,10 +204,10 @@ enum {
 	BLK_MQ_REQ_INTERNAL	= (1 << 2), /* allocate internal/sched tag */
 };
 
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
+struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 		unsigned int flags);
-struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
-		unsigned int flags, unsigned int hctx_idx);
+struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
+		unsigned int op, unsigned int flags, unsigned int hctx_idx);
 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
 
 enum {
@@ -230,8 +230,8 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 
 int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
-void blk_mq_end_request(struct request *rq, int error);
-void __blk_mq_end_request(struct request *rq, int error);
+void blk_mq_end_request(struct request *rq, blk_status_t error);
+void __blk_mq_end_request(struct request *rq, blk_status_t error);
 
 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
@@ -247,6 +247,8 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
 void blk_mq_start_hw_queues(struct request_queue *q);
 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_quiesce_queue(struct request_queue *q);
+void blk_mq_unquiesce_queue(struct request_queue *q);
 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
@@ -264,6 +266,8 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
 int blk_mq_map_queues(struct blk_mq_tag_set *set);
 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 
+void blk_mq_quiesce_queue_nowait(struct request_queue *q);
+
 /*
  * Driver command data is immediately after the request. So subtract request
  * size to get back to the original request, add request size to get the PDU.
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 61339bc..d2eb87c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -17,6 +17,27 @@ struct io_context;
 struct cgroup_subsys_state;
 typedef void (bio_end_io_t) (struct bio *);
 
+/*
+ * Block error status values.  See block/blk-core:blk_errors for the details.
+ */
+typedef u8 __bitwise blk_status_t;
+#define	BLK_STS_OK 0
+#define BLK_STS_NOTSUPP		((__force blk_status_t)1)
+#define BLK_STS_TIMEOUT		((__force blk_status_t)2)
+#define BLK_STS_NOSPC		((__force blk_status_t)3)
+#define BLK_STS_TRANSPORT	((__force blk_status_t)4)
+#define BLK_STS_TARGET		((__force blk_status_t)5)
+#define BLK_STS_NEXUS		((__force blk_status_t)6)
+#define BLK_STS_MEDIUM		((__force blk_status_t)7)
+#define BLK_STS_PROTECTION	((__force blk_status_t)8)
+#define BLK_STS_RESOURCE	((__force blk_status_t)9)
+#define BLK_STS_IOERR		((__force blk_status_t)10)
+
+/* hack for device mapper, don't use elsewhere: */
+#define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
+
+#define BLK_STS_AGAIN		((__force blk_status_t)12)
+
 struct blk_issue_stat {
 	u64 stat;
 };
@@ -28,13 +49,14 @@ struct blk_issue_stat {
 struct bio {
 	struct bio		*bi_next;	/* request queue link */
 	struct block_device	*bi_bdev;
-	int			bi_error;
+	blk_status_t		bi_status;
 	unsigned int		bi_opf;		/* bottom bits req flags,
 						 * top bits REQ_OP. Use
 						 * accessors.
 						 */
 	unsigned short		bi_flags;	/* status, etc and bvec pool number */
 	unsigned short		bi_ioprio;
+	unsigned short		bi_write_hint;
 
 	struct bvec_iter	bi_iter;
 
@@ -205,6 +227,7 @@ enum req_flag_bits {
 	/* command specific flags for REQ_OP_WRITE_ZEROES: */
 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
 
+	__REQ_NOWAIT,           /* Don't wait if request will block */
 	__REQ_NR_BITS,		/* stops here */
 };
 
@@ -223,6 +246,7 @@ enum req_flag_bits {
 #define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
 
 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
+#define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
 
 #define REQ_FAILFAST_MASK \
 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ab92c4e..25f6a0c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -55,7 +55,7 @@ struct blk_stat_callback;
  */
 #define BLKCG_MAX_POLS		3
 
-typedef void (rq_end_io_fn)(struct request *, int);
+typedef void (rq_end_io_fn)(struct request *, blk_status_t);
 
 #define BLK_RL_SYNCFULL		(1U << 0)
 #define BLK_RL_ASYNCFULL	(1U << 1)
@@ -225,6 +225,8 @@ struct request {
 
 	unsigned int extra_len;	/* length of alignment and padding */
 
+	unsigned short write_hint;
+
 	unsigned long deadline;
 	struct list_head timeout_list;
 
@@ -391,6 +393,8 @@ struct request_queue {
 	int			nr_rqs[2];	/* # allocated [a]sync rqs */
 	int			nr_rqs_elvpriv;	/* # allocated rqs w/ elvpriv */
 
+	atomic_t		shared_hctx_restart;
+
 	struct blk_queue_stats	*stats;
 	struct rq_wb		*rq_wb;
 
@@ -410,8 +414,12 @@ struct request_queue {
 	rq_timed_out_fn		*rq_timed_out_fn;
 	dma_drain_needed_fn	*dma_drain_needed;
 	lld_busy_fn		*lld_busy_fn;
+	/* Called just after a request is allocated */
 	init_rq_fn		*init_rq_fn;
+	/* Called just before a request is freed */
 	exit_rq_fn		*exit_rq_fn;
+	/* Called from inside blk_get_request() */
+	void (*initialize_rq_fn)(struct request *rq);
 
 	const struct blk_mq_ops	*mq_ops;
 
@@ -586,6 +594,11 @@ struct request_queue {
 
 	size_t			cmd_size;
 	void			*rq_alloc_data;
+
+	struct work_struct	release_work;
+
+#define BLK_MAX_WRITE_HINTS	5
+	u64			write_hints[BLK_MAX_WRITE_HINTS];
 };
 
 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
@@ -618,6 +631,8 @@ struct request_queue {
 #define QUEUE_FLAG_STATS       27	/* track rq completion times */
 #define QUEUE_FLAG_POLL_STATS  28	/* collecting stats for hybrid polling */
 #define QUEUE_FLAG_REGISTERED  29	/* queue has been registered to a disk */
+#define QUEUE_FLAG_SCSI_PASSTHROUGH 30	/* queue supports SCSI commands */
+#define QUEUE_FLAG_QUIESCED    31	/* queue has been quiesced */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -629,6 +644,13 @@ struct request_queue {
 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
 				 (1 << QUEUE_FLAG_POLL))
 
+/*
+ * @q->queue_lock is set while a queue is being initialized. Since we know
+ * that no other threads access the queue object before @q->queue_lock has
+ * been set, it is safe to manipulate queue flags without holding the
+ * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
+ * blk_init_allocated_queue().
+ */
 static inline void queue_lockdep_assert_held(struct request_queue *q)
 {
 	if (q->queue_lock)
@@ -708,10 +730,13 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 #define blk_queue_secure_erase(q) \
 	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
 #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
+#define blk_queue_scsi_passthrough(q)	\
+	test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
 
 #define blk_noretry_request(rq) \
 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
 			     REQ_FAILFAST_DRIVER))
+#define blk_queue_quiesced(q)	test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
 
 static inline bool blk_account_rq(struct request *rq)
 {
@@ -810,7 +835,8 @@ static inline bool rq_mergeable(struct request *rq)
 
 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
 {
-	if (bio_data(a) == bio_data(b))
+	if (bio_page(a) == bio_page(b) &&
+	    bio_offset(a) == bio_offset(b))
 		return true;
 
 	return false;
@@ -858,19 +884,6 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
 #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
 
-#ifdef CONFIG_BOUNCE
-extern int init_emergency_isa_pool(void);
-extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-#else
-static inline int init_emergency_isa_pool(void)
-{
-	return 0;
-}
-static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
-{
-}
-#endif /* CONFIG_MMU */
-
 struct rq_map_data {
 	struct page **pages;
 	int page_order;
@@ -929,7 +942,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
 extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
 extern void blk_put_request(struct request *);
 extern void __blk_put_request(struct request_queue *, struct request *);
-extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern struct request *blk_get_request(struct request_queue *, unsigned int op,
+				       gfp_t gfp_mask);
 extern void blk_requeue_request(struct request_queue *, struct request *);
 extern int blk_lld_busy(struct request_queue *q);
 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@@ -937,12 +951,11 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 			     int (*bio_ctr)(struct bio *, struct bio *, void *),
 			     void *data);
 extern void blk_rq_unprep_clone(struct request *rq);
-extern int blk_insert_cloned_request(struct request_queue *q,
+extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
 				     struct request *rq);
 extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
-extern void blk_queue_split(struct request_queue *, struct bio **,
-			    struct bio_set *);
+extern void blk_queue_split(struct request_queue *, struct bio **);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -963,7 +976,6 @@ extern void __blk_run_queue(struct request_queue *q);
 extern void __blk_run_queue_uncond(struct request_queue *q);
 extern void blk_run_queue(struct request_queue *);
 extern void blk_run_queue_async(struct request_queue *q);
-extern void blk_mq_quiesce_queue(struct request_queue *q);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
 			   struct rq_map_data *, void __user *, unsigned long,
 			   gfp_t);
@@ -977,6 +989,9 @@ extern void blk_execute_rq(struct request_queue *, struct gendisk *,
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
 				  struct request *, int, rq_end_io_fn *);
 
+int blk_status_to_errno(blk_status_t status);
+blk_status_t errno_to_blk_status(int errno);
+
 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
@@ -1109,16 +1124,16 @@ extern struct request *blk_fetch_request(struct request_queue *q);
  * blk_end_request() for parts of the original function.
  * This prevents code duplication in drivers.
  */
-extern bool blk_update_request(struct request *rq, int error,
+extern bool blk_update_request(struct request *rq, blk_status_t error,
 			       unsigned int nr_bytes);
-extern void blk_finish_request(struct request *rq, int error);
-extern bool blk_end_request(struct request *rq, int error,
+extern void blk_finish_request(struct request *rq, blk_status_t error);
+extern bool blk_end_request(struct request *rq, blk_status_t error,
 			    unsigned int nr_bytes);
-extern void blk_end_request_all(struct request *rq, int error);
-extern bool __blk_end_request(struct request *rq, int error,
+extern void blk_end_request_all(struct request *rq, blk_status_t error);
+extern bool __blk_end_request(struct request *rq, blk_status_t error,
 			      unsigned int nr_bytes);
-extern void __blk_end_request_all(struct request *rq, int error);
-extern bool __blk_end_request_cur(struct request *rq, int error);
+extern void __blk_end_request_all(struct request *rq, blk_status_t error);
+extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
 
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
@@ -1370,11 +1385,6 @@ enum blk_default_limits {
 
 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
 
-static inline unsigned long queue_bounce_pfn(struct request_queue *q)
-{
-	return q->limits.bounce_pfn;
-}
-
 static inline unsigned long queue_segment_boundary(struct request_queue *q)
 {
 	return q->limits.seg_boundary_mask;
@@ -1776,7 +1786,7 @@ struct blk_integrity_iter {
 	const char		*disk_name;
 };
 
-typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
+typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
 
 struct blk_integrity_profile {
 	integrity_processing_fn		*generate_fn;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 2174594..ec47101 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -48,6 +48,7 @@ enum {
 	CSS_ONLINE	= (1 << 1), /* between ->css_online() and ->css_offline() */
 	CSS_RELEASED	= (1 << 2), /* refcnt reached zero, released */
 	CSS_VISIBLE	= (1 << 3), /* css is visible to userland */
+	CSS_DYING	= (1 << 4), /* css is dying */
 };
 
 /* bits in struct cgroup flags field */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed2573e..710a005 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -344,6 +344,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
 }
 
 /**
+ * css_is_dying - test whether the specified css is dying
+ * @css: target css
+ *
+ * Test whether @css is in the process of offlining or already offline.  In
+ * most cases, ->css_online() and ->css_offline() callbacks should be
+ * enough; however, the actual offline operations are RCU delayed and this
+ * test returns %true also when @css is scheduled to be offlined.
+ *
+ * This is useful, for example, when the use case requires synchronous
+ * behavior with respect to cgroup removal.  cgroup removal schedules css
+ * offlining but the css can seem alive while the operation is being
+ * delayed.  If the delay affects user visible semantics, this test can be
+ * used to resolve the situation.
+ */
+static inline bool css_is_dying(struct cgroup_subsys_state *css)
+{
+	return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+}
+
+/**
  * css_put - put a css reference
  * @css: target css
  *
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
index fccf7f4..bbb3712 100644
--- a/include/linux/cleancache.h
+++ b/include/linux/cleancache.h
@@ -27,7 +27,7 @@ struct cleancache_filekey {
 
 struct cleancache_ops {
 	int (*init_fs)(size_t);
-	int (*init_shared_fs)(char *uuid, size_t);
+	int (*init_shared_fs)(uuid_t *uuid, size_t);
 	int (*get_page)(int, struct cleancache_filekey,
 			pgoff_t, struct page *);
 	void (*put_page)(int, struct cleancache_filekey,
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index acc9ce0..a116926 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -223,13 +223,4 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
 
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
-#define CLOCKEVENT_OF_DECLARE(name, compat, fn) \
-	OF_DECLARE_1_RET(clkevt, name, compat, fn)
-
-#ifdef CONFIG_CLKEVT_PROBE
-extern int clockevent_probe(void);
-#else
-static inline int clockevent_probe(void) { return 0; }
-#endif
-
 #endif /* _LINUX_CLOCKCHIPS_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index f2b10d9..a78cb18 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -96,6 +96,7 @@ struct clocksource {
 	void (*suspend)(struct clocksource *cs);
 	void (*resume)(struct clocksource *cs);
 	void (*mark_unstable)(struct clocksource *cs);
+	void (*tick_stable)(struct clocksource *cs);
 
 	/* private: */
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
@@ -249,16 +250,19 @@ extern int clocksource_mmio_init(void __iomem *, const char *,
 
 extern int clocksource_i8253_init(void);
 
-#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
-	OF_DECLARE_1_RET(clksrc, name, compat, fn)
+#define TIMER_OF_DECLARE(name, compat, fn) \
+	OF_DECLARE_1_RET(timer, name, compat, fn)
 
-#ifdef CONFIG_CLKSRC_PROBE
-extern void clocksource_probe(void);
+#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
+	TIMER_OF_DECLARE(name, compat, fn)
+
+#ifdef CONFIG_TIMER_PROBE
+extern void timer_probe(void);
 #else
-static inline void clocksource_probe(void) {}
+static inline void timer_probe(void) {}
 #endif
 
-#define CLOCKSOURCE_ACPI_DECLARE(name, table_id, fn)		\
-	ACPI_DECLARE_PROBE_ENTRY(clksrc, name, table_id, 0, NULL, 0, fn)
+#define TIMER_ACPI_DECLARE(name, table_id, fn)		\
+	ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
 
 #endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 1c5f315..425563c 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -94,6 +94,10 @@ struct compat_itimerval {
 	struct compat_timeval	it_value;
 };
 
+struct itimerval;
+int get_compat_itimerval(struct itimerval *, const struct compat_itimerval __user *);
+int put_compat_itimerval(struct compat_itimerval __user *, const struct itimerval *);
+
 struct compat_tms {
 	compat_clock_t		tms_utime;
 	compat_clock_t		tms_stime;
@@ -128,6 +132,10 @@ struct compat_timex {
 	compat_int_t:32; compat_int_t:32; compat_int_t:32;
 };
 
+struct timex;
+int compat_get_timex(struct timex *, const struct compat_timex __user *);
+int compat_put_timex(struct compat_timex __user *, const struct timex *);
+
 #define _COMPAT_NSIG_WORDS	(_COMPAT_NSIG / _COMPAT_NSIG_BPW)
 
 typedef struct {
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index de17999..d614c5e 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -15,3 +15,11 @@
  * with any version that can compile the kernel
  */
 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+/*
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function.  This turns out to avoid the need for complex #ifdef
+ * directives.  Suppress the warning in clang as well.
+ */
+#undef inline
+#define inline inline __attribute__((unused)) notrace
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f811005..707242fd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -17,11 +17,7 @@
 # define __release(x)	__context__(x,-1)
 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
 # define __percpu	__attribute__((noderef, address_space(3)))
-#ifdef CONFIG_SPARSE_RCU_POINTER
 # define __rcu		__attribute__((noderef, address_space(4)))
-#else /* CONFIG_SPARSE_RCU_POINTER */
-# define __rcu
-#endif /* CONFIG_SPARSE_RCU_POINTER */
 # define __private	__attribute__((noderef))
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 2319b8c..c967090 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -74,7 +74,8 @@ extern void config_item_init_type_name(struct config_item *item,
 				       const char *name,
 				       struct config_item_type *type);
 
-extern struct config_item * config_item_get(struct config_item *);
+extern struct config_item *config_item_get(struct config_item *);
+extern struct config_item *config_item_get_unless_zero(struct config_item *);
 extern void config_item_put(struct config_item *);
 
 struct config_item_type {
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 035c16c..d950dad 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -263,11 +263,15 @@ static inline int coresight_timeout(void __iomem *addr, u32 offset,
 #endif
 
 #ifdef CONFIG_OF
-extern struct coresight_platform_data *of_get_coresight_platform_data(
-				struct device *dev, struct device_node *node);
+extern int of_coresight_get_cpu(const struct device_node *node);
+extern struct coresight_platform_data *
+of_get_coresight_platform_data(struct device *dev,
+			       const struct device_node *node);
 #else
+static inline int of_coresight_get_cpu(const struct device_node *node)
+{ return 0; }
 static inline struct coresight_platform_data *of_get_coresight_platform_data(
-	struct device *dev, struct device_node *node) { return NULL; }
+	struct device *dev, const struct device_node *node) { return NULL; }
 #endif
 
 #ifdef CONFIG_PID_NS
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index f920812..ca73bc1 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -99,26 +99,32 @@ static inline void cpu_maps_update_done(void)
 extern struct bus_type cpu_subsys;
 
 #ifdef CONFIG_HOTPLUG_CPU
-/* Stop CPUs going up and down. */
-
-extern void cpu_hotplug_begin(void);
-extern void cpu_hotplug_done(void);
-extern void get_online_cpus(void);
-extern void put_online_cpus(void);
+extern void cpus_write_lock(void);
+extern void cpus_write_unlock(void);
+extern void cpus_read_lock(void);
+extern void cpus_read_unlock(void);
+extern void lockdep_assert_cpus_held(void);
 extern void cpu_hotplug_disable(void);
 extern void cpu_hotplug_enable(void);
 void clear_tasks_mm_cpumask(int cpu);
 int cpu_down(unsigned int cpu);
 
-#else		/* CONFIG_HOTPLUG_CPU */
+#else /* CONFIG_HOTPLUG_CPU */
 
-static inline void cpu_hotplug_begin(void) {}
-static inline void cpu_hotplug_done(void) {}
-#define get_online_cpus()	do { } while (0)
-#define put_online_cpus()	do { } while (0)
-#define cpu_hotplug_disable()	do { } while (0)
-#define cpu_hotplug_enable()	do { } while (0)
-#endif		/* CONFIG_HOTPLUG_CPU */
+static inline void cpus_write_lock(void) { }
+static inline void cpus_write_unlock(void) { }
+static inline void cpus_read_lock(void) { }
+static inline void cpus_read_unlock(void) { }
+static inline void lockdep_assert_cpus_held(void) { }
+static inline void cpu_hotplug_disable(void) { }
+static inline void cpu_hotplug_enable(void) { }
+#endif	/* !CONFIG_HOTPLUG_CPU */
+
+/* Wrappers which go away once all code is converted */
+static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
+static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
+static inline void get_online_cpus(void) { cpus_read_lock(); }
+static inline void put_online_cpus(void) { cpus_read_unlock(); }
 
 #ifdef CONFIG_PM_SLEEP_SMP
 extern int freeze_secondary_cpus(int primary);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index a5ce0bbe..905117b 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -883,6 +883,8 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
 }
 #endif
 
+extern unsigned int arch_freq_get_on_cpu(int cpu);
+
 /* the following are really really optional */
 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
 extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 0f2a803..b56573b 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -58,7 +58,6 @@ enum cpuhp_state {
 	CPUHP_XEN_EVTCHN_PREPARE,
 	CPUHP_ARM_SHMOBILE_SCU_PREPARE,
 	CPUHP_SH_SH3X_PREPARE,
-	CPUHP_BLK_MQ_PREPARE,
 	CPUHP_NET_FLOW_PREPARE,
 	CPUHP_TOPOLOGY_PREPARE,
 	CPUHP_NET_IUCV_PREPARE,
@@ -124,6 +123,7 @@ enum cpuhp_state {
 	CPUHP_AP_ONLINE_IDLE,
 	CPUHP_AP_SMPBOOT_THREADS,
 	CPUHP_AP_X86_VDSO_VMA_ONLINE,
+	CPUHP_AP_IRQ_AFFINITY_ONLINE,
 	CPUHP_AP_PERF_ONLINE,
 	CPUHP_AP_PERF_X86_ONLINE,
 	CPUHP_AP_PERF_X86_UNCORE_ONLINE,
@@ -153,6 +153,11 @@ int __cpuhp_setup_state(enum cpuhp_state state,	const char *name, bool invoke,
 			int (*startup)(unsigned int cpu),
 			int (*teardown)(unsigned int cpu), bool multi_instance);
 
+int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
+				   bool invoke,
+				   int (*startup)(unsigned int cpu),
+				   int (*teardown)(unsigned int cpu),
+				   bool multi_instance);
 /**
  * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
  * @state:	The state for which the calls are installed
@@ -171,6 +176,15 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
 	return __cpuhp_setup_state(state, name, true, startup, teardown, false);
 }
 
+static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
+					       const char *name,
+					       int (*startup)(unsigned int cpu),
+					       int (*teardown)(unsigned int cpu))
+{
+	return __cpuhp_setup_state_cpuslocked(state, name, true, startup,
+					      teardown, false);
+}
+
 /**
  * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
  *			       callbacks
@@ -191,6 +205,15 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
 				   false);
 }
 
+static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
+						     const char *name,
+						     int (*startup)(unsigned int cpu),
+						     int (*teardown)(unsigned int cpu))
+{
+	return __cpuhp_setup_state_cpuslocked(state, name, false, startup,
+					    teardown, false);
+}
+
 /**
  * cpuhp_setup_state_multi - Add callbacks for multi state
  * @state:	The state for which the calls are installed
@@ -217,6 +240,8 @@ static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
 
 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
 			       bool invoke);
+int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
+					  struct hlist_node *node, bool invoke);
 
 /**
  * cpuhp_state_add_instance - Add an instance for a state and invoke startup
@@ -249,7 +274,15 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
 	return __cpuhp_state_add_instance(state, node, false);
 }
 
+static inline int
+cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
+					    struct hlist_node *node)
+{
+	return __cpuhp_state_add_instance_cpuslocked(state, node, false);
+}
+
 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
+void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
 
 /**
  * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown
@@ -273,6 +306,11 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
 	__cpuhp_remove_state(state, false);
 }
 
+static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
+{
+	__cpuhp_remove_state_cpuslocked(state, false);
+}
+
 /**
  * cpuhp_remove_multi_state - Remove hotplug multi state callback
  * @state:	The state for which the calls are removed
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 2404ad2..4bf4479 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
 		(cpu) = cpumask_next_zero((cpu), (mask)),	\
 		(cpu) < nr_cpu_ids;)
 
+extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+
+/**
+ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask poiter
+ * @start: the start location
+ *
+ * The implementation does not assume any bit in @mask is set (including @start).
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_wrap(cpu, mask, start)					\
+	for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false);	\
+	     (cpu) < nr_cpumask_bits;						\
+	     (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+
 /**
  * for_each_cpu_and - iterate over every cpu in both masks
  * @cpu: the (optionally unsigned) integer iterator
@@ -276,6 +293,12 @@ static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
 	set_bit(cpumask_check(cpu), cpumask_bits(dstp));
 }
 
+static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+{
+	__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
+}
+
+
 /**
  * cpumask_clear_cpu - clear a cpu in a cpumask
  * @cpu: cpu number (< nr_cpu_ids)
@@ -286,6 +309,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
 	clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
 }
 
+static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+{
+	__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
+}
+
 /**
  * cpumask_test_cpu - test for a cpu in a cpumask
  * @cpu: cpu number (< nr_cpu_ids)
diff --git a/include/linux/crc4.h b/include/linux/crc4.h
new file mode 100644
index 0000000..8f739f1
--- /dev/null
+++ b/include/linux/crc4.h
@@ -0,0 +1,8 @@
+#ifndef _LINUX_CRC4_H
+#define _LINUX_CRC4_H
+
+#include <linux/types.h>
+
+extern uint8_t crc4(uint8_t c, uint64_t x, int bits);
+
+#endif /* _LINUX_CRC4_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index b03e7d0..c728d51 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -1,4 +1,4 @@
-/* Credentials management - see Documentation/security/credentials.txt
+/* Credentials management - see Documentation/security/credentials.rst
  *
  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 9174b0d..aa86e6d 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -9,7 +9,7 @@
  *	2 as published by the Free Software Foundation.
  *
  *  debugfs is for people to use instead of /proc or /sys.
- *  See Documentation/DocBook/filesystems for more details.
+ *  See Documentation/filesystems/ for more details.
  */
 
 #ifndef _DEBUGFS_H_
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index f4c639c..456da50 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -72,9 +72,9 @@ typedef void (*dm_release_clone_request_fn) (struct request *clone);
  * 2   : The target wants to push back the io
  */
 typedef int (*dm_endio_fn) (struct dm_target *ti,
-			    struct bio *bio, int error);
+			    struct bio *bio, blk_status_t *error);
 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
-				    struct request *clone, int error,
+				    struct request *clone, blk_status_t error,
 				    union map_info *map_context);
 
 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
diff --git a/include/linux/device.h b/include/linux/device.h
index 9ef518a..6baa123 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -66,7 +66,6 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
  * @name:	The name of the bus.
  * @dev_name:	Used for subsystems to enumerate devices like ("foo%u", dev->id).
  * @dev_root:	Default device to use as the parent.
- * @dev_attrs:	Default attributes of the devices on the bus.
  * @bus_groups:	Default attributes of the bus.
  * @dev_groups:	Default attributes of the devices on the bus.
  * @drv_groups: Default attributes of the device drivers on the bus.
@@ -112,7 +111,6 @@ struct bus_type {
 	const char		*name;
 	const char		*dev_name;
 	struct device		*dev_root;
-	struct device_attribute	*dev_attrs;	/* use dev_groups instead */
 	const struct attribute_group **bus_groups;
 	const struct attribute_group **dev_groups;
 	const struct attribute_group **drv_groups;
@@ -365,7 +363,6 @@ int subsys_virtual_register(struct bus_type *subsys,
  * struct class - device classes
  * @name:	Name of the class.
  * @owner:	The module owner.
- * @class_attrs: Default attributes of this class.
  * @class_groups: Default attributes of this class.
  * @dev_groups:	Default attributes of the devices that belong to the class.
  * @dev_kobj:	The kobject that represents this class and links it into the hierarchy.
@@ -394,7 +391,6 @@ struct class {
 	const char		*name;
 	struct module		*owner;
 
-	struct class_attribute		*class_attrs;
 	const struct attribute_group	**class_groups;
 	const struct attribute_group	**dev_groups;
 	struct kobject			*dev_kobj;
@@ -465,8 +461,6 @@ struct class_attribute {
 			const char *buf, size_t count);
 };
 
-#define CLASS_ATTR(_name, _mode, _show, _store) \
-	struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
 #define CLASS_ATTR_RW(_name) \
 	struct class_attribute class_attr_##_name = __ATTR_RW(_name)
 #define CLASS_ATTR_RO(_name) \
@@ -879,6 +873,8 @@ struct dev_links_info {
  *
  * @offline_disabled: If set, the device is permanently online.
  * @offline:	Set after successful invocation of bus type's .offline().
+ * @of_node_reused: Set if the device-tree node is shared with an ancestor
+ *              device.
  *
  * At the lowest level, every device in a Linux system is represented by an
  * instance of struct device. The device structure contains the information
@@ -966,6 +962,7 @@ struct device {
 
 	bool			offline_disabled:1;
 	bool			offline:1;
+	bool			of_node_reused:1;
 };
 
 static inline struct device *kobj_to_dev(struct kobject *kobj)
@@ -1144,6 +1141,7 @@ extern int device_offline(struct device *dev);
 extern int device_online(struct device *dev);
 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
 
 static inline int dev_num_vf(struct device *dev)
 {
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 4eac267..92f2083 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -78,6 +78,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 
 struct iommu_domain;
 struct msi_msg;
+struct device;
 
 static inline int iommu_dma_init(void)
 {
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 5e9c74c..9bbf21a 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -136,7 +136,7 @@ static inline int dmi_name_in_vendors(const char *s) { return 0; }
 static inline int dmi_name_in_serial(const char *s) { return 0; }
 #define dmi_available 0
 static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
-	void *private_data) { return -1; }
+	void *private_data) { return -ENXIO; }
 static inline bool dmi_match(enum dmi_field f, const char *str)
 	{ return false; }
 static inline void dmi_memdev_name(u16 handle, const char **bank,
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ec36f42..8269bcb 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -137,6 +137,18 @@ struct efi_boot_memmap {
 #define EFI_CAPSULE_POPULATE_SYSTEM_TABLE	0x00020000
 #define EFI_CAPSULE_INITIATE_RESET		0x00040000
 
+struct capsule_info {
+	efi_capsule_header_t	header;
+	int			reset_type;
+	long			index;
+	size_t			count;
+	size_t			total_size;
+	phys_addr_t		*pages;
+	size_t			page_bytes_remain;
+};
+
+int __efi_capsule_setup_info(struct capsule_info *cap_info);
+
 /*
  * Allocation types for calls to boottime->allocate_pages.
  */
@@ -1403,7 +1415,7 @@ extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
 				 size_t size, int *reset);
 
 extern int efi_capsule_update(efi_capsule_header_t *capsule,
-			      struct page **pages);
+			      phys_addr_t *pages);
 
 #ifdef CONFIG_EFI_RUNTIME_MAP
 int efi_runtime_map_init(struct kobject *);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 9ec5e22..5bc8f86 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -104,8 +104,9 @@ struct elevator_mq_ops {
 	int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
 	void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
 	void (*requests_merged)(struct request_queue *, struct request *, struct request *);
-	struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *);
-	void (*put_request)(struct request *);
+	void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
+	void (*prepare_request)(struct request *, struct bio *bio);
+	void (*finish_request)(struct request *);
 	void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
 	struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
 	bool (*has_work)(struct blk_mq_hw_ctx *);
@@ -114,8 +115,6 @@ struct elevator_mq_ops {
 	void (*requeue_request)(struct request *);
 	struct request *(*former_request)(struct request_queue *, struct request *);
 	struct request *(*next_request)(struct request_queue *, struct request *);
-	int (*get_rq_priv)(struct request_queue *, struct request *, struct bio *);
-	void (*put_rq_priv)(struct request_queue *, struct request *);
 	void (*init_icq)(struct io_cq *);
 	void (*exit_icq)(struct io_cq *);
 };
@@ -153,7 +152,7 @@ struct elevator_type
 #endif
 
 	/* managed by elevator core */
-	char icq_cache_name[ELV_NAME_MAX + 5];	/* elvname + "_io_cq" */
+	char icq_cache_name[ELV_NAME_MAX + 6];	/* elvname + "_io_cq" */
 	struct list_head list;
 };
 
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index ff0b981..9e4befd 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -37,7 +37,7 @@ struct eventfd_ctx *eventfd_ctx_fdget(int fd);
 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
 ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
-int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
+int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
 				  __u64 *cnt);
 
 #else /* CONFIG_EVENTFD */
@@ -73,7 +73,7 @@ static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
 }
 
 static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
-						wait_queue_t *wait, __u64 *cnt)
+						wait_queue_entry_t *wait, __u64 *cnt)
 {
 	return -ENOSYS;
 }
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 803e5a9..771fe11 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2,7 +2,7 @@
 #define _LINUX_FS_H
 
 #include <linux/linkage.h>
-#include <linux/wait.h>
+#include <linux/wait_bit.h>
 #include <linux/kdev_t.h>
 #include <linux/dcache.h>
 #include <linux/path.h>
@@ -20,6 +20,7 @@
 #include <linux/rwsem.h>
 #include <linux/capability.h>
 #include <linux/semaphore.h>
+#include <linux/fcntl.h>
 #include <linux/fiemap.h>
 #include <linux/rculist_bl.h>
 #include <linux/atomic.h>
@@ -30,6 +31,7 @@
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
 #include <linux/delayed_call.h>
+#include <linux/uuid.h>
 
 #include <asm/byteorder.h>
 #include <uapi/linux/fs.h>
@@ -142,6 +144,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY		((__force fmode_t)0x4000000)
 
+/* File is capable of returning -EAGAIN if AIO will block */
+#define FMODE_AIO_NOWAIT	((__force fmode_t)0x8000000)
+
 /*
  * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
  * that indicates that they should check the contents of the iovec are
@@ -261,6 +266,18 @@ struct page;
 struct address_space;
 struct writeback_control;
 
+/*
+ * Write life time hint values.
+ */
+enum rw_hint {
+	WRITE_LIFE_NOT_SET	= 0,
+	WRITE_LIFE_NONE		= RWH_WRITE_LIFE_NONE,
+	WRITE_LIFE_SHORT	= RWH_WRITE_LIFE_SHORT,
+	WRITE_LIFE_MEDIUM	= RWH_WRITE_LIFE_MEDIUM,
+	WRITE_LIFE_LONG		= RWH_WRITE_LIFE_LONG,
+	WRITE_LIFE_EXTREME	= RWH_WRITE_LIFE_EXTREME,
+};
+
 #define IOCB_EVENTFD		(1 << 0)
 #define IOCB_APPEND		(1 << 1)
 #define IOCB_DIRECT		(1 << 2)
@@ -268,6 +285,7 @@ struct writeback_control;
 #define IOCB_DSYNC		(1 << 4)
 #define IOCB_SYNC		(1 << 5)
 #define IOCB_WRITE		(1 << 6)
+#define IOCB_NOWAIT		(1 << 7)
 
 struct kiocb {
 	struct file		*ki_filp;
@@ -275,6 +293,7 @@ struct kiocb {
 	void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
 	void			*private;
 	int			ki_flags;
+	enum rw_hint		ki_hint;
 };
 
 static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -282,16 +301,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
 	return kiocb->ki_complete == NULL;
 }
 
-static inline int iocb_flags(struct file *file);
-
-static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
-{
-	*kiocb = (struct kiocb) {
-		.ki_filp = filp,
-		.ki_flags = iocb_flags(filp),
-	};
-}
-
 /*
  * "descriptor" for what we're up to with a read.
  * This allows us to use the same read code yet
@@ -592,6 +601,7 @@ struct inode {
 	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
 	unsigned short          i_bytes;
 	unsigned int		i_blkbits;
+	enum rw_hint		i_write_hint;
 	blkcnt_t		i_blocks;
 
 #ifdef __NEED_I_SIZE_ORDERED
@@ -846,6 +856,7 @@ struct file {
 	 * Must not be taken from IRQ context.
 	 */
 	spinlock_t		f_lock;
+	enum rw_hint		f_write_hint;
 	atomic_long_t		f_count;
 	unsigned int 		f_flags;
 	fmode_t			f_mode;
@@ -1021,8 +1032,6 @@ struct file_lock_context {
 #define OFFT_OFFSET_MAX	INT_LIMIT(off_t)
 #endif
 
-#include <linux/fcntl.h>
-
 extern void send_sigio(struct fown_struct *fown, int fd, int band);
 
 /*
@@ -1328,8 +1337,8 @@ struct super_block {
 
 	struct sb_writers	s_writers;
 
-	char s_id[32];				/* Informational name */
-	u8 s_uuid[16];				/* UUID */
+	char			s_id[32];	/* Informational name */
+	uuid_t			s_uuid;		/* UUID */
 
 	void 			*s_fs_info;	/* Filesystem private info */
 	unsigned int		s_max_links;
@@ -1873,6 +1882,25 @@ static inline bool HAS_UNMAPPED_ID(struct inode *inode)
 	return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid);
 }
 
+static inline enum rw_hint file_write_hint(struct file *file)
+{
+	if (file->f_write_hint != WRITE_LIFE_NOT_SET)
+		return file->f_write_hint;
+
+	return file_inode(file)->i_write_hint;
+}
+
+static inline int iocb_flags(struct file *file);
+
+static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+{
+	*kiocb = (struct kiocb) {
+		.ki_filp = filp,
+		.ki_flags = iocb_flags(filp),
+		.ki_hint = file_write_hint(filp),
+	};
+}
+
 /*
  * Inode state bits.  Protected by inode->i_lock
  *
@@ -2517,6 +2545,8 @@ extern int filemap_fdatawait(struct address_space *);
 extern void filemap_fdatawait_keep_errors(struct address_space *);
 extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
 				   loff_t lend);
+extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
+				  loff_t lend);
 extern int filemap_write_and_wait(struct address_space *mapping);
 extern int filemap_write_and_wait_range(struct address_space *mapping,
 				        loff_t lstart, loff_t lend);
@@ -2843,7 +2873,7 @@ enum {
 	DIO_SKIP_DIO_COUNT = 0x08,
 };
 
-void dio_end_io(struct bio *bio, int error);
+void dio_end_io(struct bio *bio);
 
 ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 			     struct block_device *bdev, struct iov_iter *iter,
@@ -3056,6 +3086,25 @@ static inline int iocb_flags(struct file *file)
 	return res;
 }
 
+static inline int kiocb_set_rw_flags(struct kiocb *ki, int flags)
+{
+	if (unlikely(flags & ~RWF_SUPPORTED))
+		return -EOPNOTSUPP;
+
+	if (flags & RWF_NOWAIT) {
+		if (!(ki->ki_filp->f_mode & FMODE_AIO_NOWAIT))
+			return -EOPNOTSUPP;
+		ki->ki_flags |= IOCB_NOWAIT;
+	}
+	if (flags & RWF_HIPRI)
+		ki->ki_flags |= IOCB_HIPRI;
+	if (flags & RWF_DSYNC)
+		ki->ki_flags |= IOCB_DSYNC;
+	if (flags & RWF_SYNC)
+		ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+	return 0;
+}
+
 static inline ino_t parent_ino(struct dentry *dentry)
 {
 	ino_t res;
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
index 273cbf64..141fd38 100644
--- a/include/linux/fsi.h
+++ b/include/linux/fsi.h
@@ -21,8 +21,18 @@ struct fsi_device {
 	struct device		dev;
 	u8			engine_type;
 	u8			version;
+	u8			unit;
+	struct fsi_slave	*slave;
+	uint32_t		addr;
+	uint32_t		size;
 };
 
+extern int fsi_device_read(struct fsi_device *dev, uint32_t addr,
+		void *val, size_t size);
+extern int fsi_device_write(struct fsi_device *dev, uint32_t addr,
+		const void *val, size_t size);
+extern int fsi_device_peek(struct fsi_device *dev, void *val);
+
 struct fsi_device_id {
 	u8	engine_type;
 	u8	version;
@@ -36,7 +46,6 @@ struct fsi_device_id {
 #define FSI_DEVICE_VERSIONED(t, v) \
 	.engine_type = (t), .version = (v),
 
-
 struct fsi_driver {
 	struct device_driver		drv;
 	const struct fsi_device_id	*id_table;
@@ -45,6 +54,30 @@ struct fsi_driver {
 #define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
 #define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
 
+extern int fsi_driver_register(struct fsi_driver *fsi_drv);
+extern void fsi_driver_unregister(struct fsi_driver *fsi_drv);
+
+/* module_fsi_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit.  This eliminates a lot of
+ * boilerplate.  Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_fsi_driver(__fsi_driver) \
+		module_driver(__fsi_driver, fsi_driver_register, \
+				fsi_driver_unregister)
+
+/* direct slave API */
+extern int fsi_slave_claim_range(struct fsi_slave *slave,
+		uint32_t addr, uint32_t size);
+extern void fsi_slave_release_range(struct fsi_slave *slave,
+		uint32_t addr, uint32_t size);
+extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
+		void *val, size_t size);
+extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
+		const void *val, size_t size);
+
+
+
 extern struct bus_type fsi_bus_type;
 
 #endif /* LINUX_FSI_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index acff943..e619fae 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -219,12 +219,6 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part)
 	return NULL;
 }
 
-static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
-{
-	uuid_be_to_bin(uuid_str, (uuid_be *)to);
-	return 0;
-}
-
 static inline int disk_max_parts(struct gendisk *disk)
 {
 	if (disk->flags & GENHD_FL_EXT_DEVT)
@@ -736,11 +730,6 @@ static inline dev_t blk_lookup_devt(const char *name, int partno)
 	dev_t devt = MKDEV(0, 0);
 	return devt;
 }
-
-static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
-{
-	return -EINVAL;
-}
 #endif /* CONFIG_BLOCK */
 
 #endif /* _LINUX_GENHD_H */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 2b1a44f5..a89d37e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -41,7 +41,7 @@ struct vm_area_struct;
 #define ___GFP_WRITE		0x800000u
 #define ___GFP_KSWAPD_RECLAIM	0x1000000u
 #ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP	0x4000000u
+#define ___GFP_NOLOCKDEP	0x2000000u
 #else
 #define ___GFP_NOLOCKDEP	0
 #endif
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index c0d712d..f738d50 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -56,7 +56,14 @@ struct gpiod_lookup_table {
 	.flags = _flags,                                                  \
 }
 
+#ifdef CONFIG_GPIOLIB
 void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
 void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
+#else
+static inline
+void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
+static inline
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
+#endif
 
 #endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 661e5c2..082dc1b 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -167,7 +167,6 @@ static inline void hash_del_rcu(struct hlist_node *node)
 /**
  * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
  * same bucket in an rcu enabled hashtable
- * in a rcu enabled hashtable
  * @name: hashtable to iterate
  * @obj: the type * to use as a loop cursor for each entry
  * @member: the name of the hlist_node within the struct
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index f32d7c3..fc7aae6 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -233,12 +233,14 @@ struct hid_sensor_common {
 	atomic_t user_requested_state;
 	int poll_interval;
 	int raw_hystersis;
+	int latency_ms;
 	struct iio_trigger *trigger;
 	int timestamp_ns_scale;
 	struct hid_sensor_hub_attribute_info poll;
 	struct hid_sensor_hub_attribute_info report_state;
 	struct hid_sensor_hub_attribute_info power_state;
 	struct hid_sensor_hub_attribute_info sensitivity;
+	struct hid_sensor_hub_attribute_info report_latency;
 	struct work_struct work;
 };
 
@@ -276,5 +278,8 @@ s32 hid_sensor_read_poll_value(struct hid_sensor_common *st);
 
 int64_t hid_sensor_convert_timestamp(struct hid_sensor_common *st,
 				     int64_t raw_value);
+bool hid_sensor_batch_mode_supported(struct hid_sensor_common *st);
+int hid_sensor_set_report_latency(struct hid_sensor_common *st, int latency);
+int hid_sensor_get_report_latency(struct hid_sensor_common *st);
 
 #endif
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 761f862..76033e0 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -90,6 +90,8 @@
 #define HID_USAGE_SENSOR_ORIENT_TILT_Z				0x200481
 
 #define HID_USAGE_SENSOR_DEVICE_ORIENTATION			0x20008A
+#define HID_USAGE_SENSOR_RELATIVE_ORIENTATION			0x20008E
+#define HID_USAGE_SENSOR_GEOMAGNETIC_ORIENTATION		0x2000C1
 #define HID_USAGE_SENSOR_ORIENT_ROTATION_MATRIX			0x200482
 #define HID_USAGE_SENSOR_ORIENT_QUATERNION			0x200483
 #define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX			0x200484
@@ -150,6 +152,9 @@
 #define HID_USAGE_SENSOR_PROP_REPORT_STATE			0x200316
 #define HID_USAGE_SENSOR_PROY_POWER_STATE			0x200319
 
+/* Batch mode selectors */
+#define HID_USAGE_SENSOR_PROP_REPORT_LATENCY			0x20031B
+
 /* Per data field properties */
 #define HID_USAGE_SENSOR_DATA_MOD_NONE					0x00
 #define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS		0x1000
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 8c5b10e..255edd5 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -452,11 +452,11 @@ static inline u64 hrtimer_forward_now(struct hrtimer *timer,
 }
 
 /* Precise sleep: */
-extern long hrtimer_nanosleep(struct timespec64 *rqtp,
-			      struct timespec __user *rmtp,
+
+extern int nanosleep_copyout(struct restart_block *, struct timespec *);
+extern long hrtimer_nanosleep(const struct timespec64 *rqtp,
 			      const enum hrtimer_mode mode,
 			      const clockid_t clockid);
-extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
 
 extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
 				 struct task_struct *tsk);
diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
deleted file mode 100644
index 1c0134d..0000000
--- a/include/linux/i2c/twl4030-madc.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * twl4030_madc.h - Header for TWL4030 MADC
- *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- * J Keerthy <j-keerthy@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef _TWL4030_MADC_H
-#define _TWL4030_MADC_H
-
-struct twl4030_madc_conversion_method {
-	u8 sel;
-	u8 avg;
-	u8 rbase;
-	u8 ctrl;
-};
-
-#define TWL4030_MADC_MAX_CHANNELS 16
-
-
-/*
- * twl4030_madc_request- madc request packet for channel conversion
- * @channels:	16 bit bitmap for individual channels
- * @do_avgP:	sample the input channel for 4 consecutive cycles
- * @method:	RT, SW1, SW2
- * @type:	Polling or interrupt based method
- * @raw:	Return raw value, do not convert it
- */
-
-struct twl4030_madc_request {
-	unsigned long channels;
-	bool do_avg;
-	u16 method;
-	u16 type;
-	bool active;
-	bool result_pending;
-	bool raw;
-	int rbuf[TWL4030_MADC_MAX_CHANNELS];
-	void (*func_cb)(int len, int channels, int *buf);
-};
-
-enum conversion_methods {
-	TWL4030_MADC_RT,
-	TWL4030_MADC_SW1,
-	TWL4030_MADC_SW2,
-	TWL4030_MADC_NUM_METHODS
-};
-
-enum sample_type {
-	TWL4030_MADC_WAIT,
-	TWL4030_MADC_IRQ_ONESHOT,
-	TWL4030_MADC_IRQ_REARM
-};
-
-#define TWL4030_MADC_CTRL1		0x00
-#define TWL4030_MADC_CTRL2		0x01
-
-#define TWL4030_MADC_RTSELECT_LSB	0x02
-#define TWL4030_MADC_SW1SELECT_LSB	0x06
-#define TWL4030_MADC_SW2SELECT_LSB	0x0A
-
-#define TWL4030_MADC_RTAVERAGE_LSB	0x04
-#define TWL4030_MADC_SW1AVERAGE_LSB	0x08
-#define TWL4030_MADC_SW2AVERAGE_LSB	0x0C
-
-#define TWL4030_MADC_CTRL_SW1		0x12
-#define TWL4030_MADC_CTRL_SW2		0x13
-
-#define TWL4030_MADC_RTCH0_LSB		0x17
-#define TWL4030_MADC_GPCH0_LSB		0x37
-
-#define TWL4030_MADC_MADCON	(1 << 0)	/* MADC power on */
-#define TWL4030_MADC_BUSY	(1 << 0)	/* MADC busy */
-/* MADC conversion completion */
-#define TWL4030_MADC_EOC_SW	(1 << 1)
-/* MADC SWx start conversion */
-#define TWL4030_MADC_SW_START	(1 << 5)
-#define TWL4030_MADC_ADCIN0	(1 << 0)
-#define TWL4030_MADC_ADCIN1	(1 << 1)
-#define TWL4030_MADC_ADCIN2	(1 << 2)
-#define TWL4030_MADC_ADCIN3	(1 << 3)
-#define TWL4030_MADC_ADCIN4	(1 << 4)
-#define TWL4030_MADC_ADCIN5	(1 << 5)
-#define TWL4030_MADC_ADCIN6	(1 << 6)
-#define TWL4030_MADC_ADCIN7	(1 << 7)
-#define TWL4030_MADC_ADCIN8	(1 << 8)
-#define TWL4030_MADC_ADCIN9	(1 << 9)
-#define TWL4030_MADC_ADCIN10	(1 << 10)
-#define TWL4030_MADC_ADCIN11	(1 << 11)
-#define TWL4030_MADC_ADCIN12	(1 << 12)
-#define TWL4030_MADC_ADCIN13	(1 << 13)
-#define TWL4030_MADC_ADCIN14	(1 << 14)
-#define TWL4030_MADC_ADCIN15	(1 << 15)
-
-/* Fixed channels */
-#define TWL4030_MADC_BTEMP	TWL4030_MADC_ADCIN1
-#define TWL4030_MADC_VBUS	TWL4030_MADC_ADCIN8
-#define TWL4030_MADC_VBKB	TWL4030_MADC_ADCIN9
-#define TWL4030_MADC_ICHG	TWL4030_MADC_ADCIN10
-#define TWL4030_MADC_VCHG	TWL4030_MADC_ADCIN11
-#define TWL4030_MADC_VBAT	TWL4030_MADC_ADCIN12
-
-/* Step size and prescaler ratio */
-#define TEMP_STEP_SIZE          147
-#define TEMP_PSR_R              100
-#define CURR_STEP_SIZE		147
-#define CURR_PSR_R1		44
-#define CURR_PSR_R2		88
-
-#define TWL4030_BCI_BCICTL1	0x23
-#define TWL4030_BCI_CGAIN	0x020
-#define TWL4030_BCI_MESBAT	(1 << 1)
-#define TWL4030_BCI_TYPEN	(1 << 4)
-#define TWL4030_BCI_ITHEN	(1 << 3)
-
-#define REG_BCICTL2             0x024
-#define TWL4030_BCI_ITHSENS	0x007
-
-/* Register and bits for GPBR1 register */
-#define TWL4030_REG_GPBR1		0x0c
-#define TWL4030_GPBR1_MADC_HFCLK_EN	(1 << 7)
-
-struct twl4030_madc_user_parms {
-	int channel;
-	int average;
-	int status;
-	u16 result;
-};
-
-int twl4030_madc_conversion(struct twl4030_madc_request *conv);
-int twl4030_get_madc_conversion(int channel_no);
-#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 6980ca3..dc152e4 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -671,7 +671,7 @@ struct ide_port_ops {
 	void	(*init_dev)(ide_drive_t *);
 	void	(*set_pio_mode)(struct hwif_s *, ide_drive_t *);
 	void	(*set_dma_mode)(struct hwif_s *, ide_drive_t *);
-	int	(*reset_poll)(ide_drive_t *);
+	blk_status_t (*reset_poll)(ide_drive_t *);
 	void	(*pre_reset)(ide_drive_t *);
 	void	(*resetproc)(ide_drive_t *);
 	void	(*maskproc)(ide_drive_t *, int);
@@ -1092,7 +1092,7 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
 extern int ide_vlb_clk;
 extern int ide_pci_clk;
 
-int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
+int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
 void ide_kill_rq(ide_drive_t *, struct request *);
 
 void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
@@ -1123,7 +1123,7 @@ extern int ide_devset_execute(ide_drive_t *drive,
 			      const struct ide_devset *setting, int arg);
 
 void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, int, unsigned int);
+int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
 
 void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
 void ide_tf_dump(const char *, struct ide_cmd *);
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 47eeec3..5e347a9 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -312,4 +312,41 @@ int iio_read_channel_scale(struct iio_channel *chan, int *val,
 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
 	int *processed, unsigned int scale);
 
+/**
+ * iio_get_channel_ext_info_count() - get number of ext_info attributes
+ *				      connected to the channel.
+ * @chan:		The channel being queried
+ *
+ * Returns the number of ext_info attributes
+ */
+unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan);
+
+/**
+ * iio_read_channel_ext_info() - read ext_info attribute from a given channel
+ * @chan:		The channel being queried.
+ * @attr:		The ext_info attribute to read.
+ * @buf:		Where to store the attribute value. Assumed to hold
+ *			at least PAGE_SIZE bytes.
+ *
+ * Returns the number of bytes written to buf (perhaps w/o zero termination;
+ * it need not even be a string), or an error code.
+ */
+ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
+				  const char *attr, char *buf);
+
+/**
+ * iio_write_channel_ext_info() - write ext_info attribute from a given channel
+ * @chan:		The channel being queried.
+ * @attr:		The ext_info attribute to read.
+ * @buf:		The new attribute value. Strings needs to be zero-
+ *			terminated, but the terminator should not be included
+ *			in the below len.
+ * @len:		The size of the new attribute value.
+ *
+ * Returns the number of accepted bytes, which should be the same as len.
+ * An error code can also be returned.
+ */
+ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
+				   const char *buf, size_t len);
+
 #endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 3f5ea2e..d68bec2 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -352,10 +352,16 @@ unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
 #define INDIO_BUFFER_SOFTWARE		0x04
 #define INDIO_BUFFER_HARDWARE		0x08
 #define INDIO_EVENT_TRIGGERED		0x10
+#define INDIO_HARDWARE_TRIGGERED	0x20
 
 #define INDIO_ALL_BUFFER_MODES					\
 	(INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
 
+#define INDIO_ALL_TRIGGERED_MODES	\
+	(INDIO_BUFFER_TRIGGERED		\
+	 | INDIO_EVENT_TRIGGERED	\
+	 | INDIO_HARDWARE_TRIGGERED)
+
 #define INDIO_MAX_RAW_ELEMENTS		4
 
 struct iio_trigger; /* forward declaration */
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
index 55535ae..fa7d786 100644
--- a/include/linux/iio/timer/stm32-timer-trigger.h
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -10,6 +10,7 @@
 #define _STM32_TIMER_TRIGGER_H_
 
 #define TIM1_TRGO	"tim1_trgo"
+#define TIM1_TRGO2	"tim1_trgo2"
 #define TIM1_CH1	"tim1_ch1"
 #define TIM1_CH2	"tim1_ch2"
 #define TIM1_CH3	"tim1_ch3"
@@ -44,6 +45,7 @@
 #define TIM7_TRGO	"tim7_trgo"
 
 #define TIM8_TRGO	"tim8_trgo"
+#define TIM8_TRGO2	"tim8_trgo2"
 #define TIM8_CH1	"tim8_ch1"
 #define TIM8_CH2	"tim8_ch2"
 #define TIM8_CH3	"tim8_ch3"
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 7f6952f..0e4647e 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -75,11 +75,17 @@ static inline void ima_add_kexec_buffer(struct kimage *image)
 #endif
 
 #ifdef CONFIG_IMA_APPRAISE
+extern bool is_ima_appraise_enabled(void);
 extern void ima_inode_post_setattr(struct dentry *dentry);
 extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
 		       const void *xattr_value, size_t xattr_value_len);
 extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
 #else
+static inline bool is_ima_appraise_enabled(void)
+{
+	return 0;
+}
+
 static inline void ima_inode_post_setattr(struct dentry *dentry)
 {
 	return;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a6fba48..37f8e35 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -703,6 +703,12 @@ static inline void init_irq_proc(void)
 }
 #endif
 
+#ifdef CONFIG_IRQ_TIMINGS
+void irq_timings_enable(void);
+void irq_timings_disable(void);
+u64 irq_timings_next_event(u64 now);
+#endif
+
 struct seq_file;
 int show_interrupts(struct seq_file *p, void *v);
 int arch_show_interrupts(struct seq_file *p, int prec);
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index f753e78..69f4e94 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -52,6 +52,7 @@ struct iomap {
 #define IOMAP_REPORT		(1 << 2) /* report extent status, e.g. FIEMAP */
 #define IOMAP_FAULT		(1 << 3) /* mapping for page fault */
 #define IOMAP_DIRECT		(1 << 4) /* direct I/O */
+#define IOMAP_NOWAIT		(1 << 5) /* Don't wait for writeback */
 
 struct iomap_ops {
 	/*
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f887351..00db35b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -22,6 +22,7 @@
 #include <linux/topology.h>
 #include <linux/wait.h>
 #include <linux/io.h>
+#include <linux/slab.h>
 
 #include <asm/irq.h>
 #include <asm/ptrace.h>
@@ -136,6 +137,9 @@ struct irq_domain;
  * @affinity:		IRQ affinity on SMP. If this is an IPI
  *			related irq, then this is the mask of the
  *			CPUs to which an IPI can be sent.
+ * @effective_affinity:	The effective IRQ affinity on SMP as some irq
+ *			chips do not allow multi CPU destinations.
+ *			A subset of @affinity.
  * @msi_desc:		MSI descriptor
  * @ipi_offset:		Offset of first IPI target cpu in @affinity. Optional.
  */
@@ -147,6 +151,9 @@ struct irq_common_data {
 	void			*handler_data;
 	struct msi_desc		*msi_desc;
 	cpumask_var_t		affinity;
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+	cpumask_var_t		effective_affinity;
+#endif
 #ifdef CONFIG_GENERIC_IRQ_IPI
 	unsigned int		ipi_offset;
 #endif
@@ -199,6 +206,10 @@ struct irq_data {
  * IRQD_WAKEUP_ARMED		- Wakeup mode armed
  * IRQD_FORWARDED_TO_VCPU	- The interrupt is forwarded to a VCPU
  * IRQD_AFFINITY_MANAGED	- Affinity is auto-managed by the kernel
+ * IRQD_IRQ_STARTED		- Startup state of the interrupt
+ * IRQD_MANAGED_SHUTDOWN	- Interrupt was shutdown due to empty affinity
+ *				  mask. Applies only to affinity managed irqs.
+ * IRQD_SINGLE_TARGET		- IRQ allows only a single affinity target
  */
 enum {
 	IRQD_TRIGGER_MASK		= 0xf,
@@ -216,6 +227,9 @@ enum {
 	IRQD_WAKEUP_ARMED		= (1 << 19),
 	IRQD_FORWARDED_TO_VCPU		= (1 << 20),
 	IRQD_AFFINITY_MANAGED		= (1 << 21),
+	IRQD_IRQ_STARTED		= (1 << 22),
+	IRQD_MANAGED_SHUTDOWN		= (1 << 23),
+	IRQD_SINGLE_TARGET		= (1 << 24),
 };
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -264,6 +278,20 @@ static inline bool irqd_is_level_type(struct irq_data *d)
 	return __irqd_to_state(d) & IRQD_LEVEL;
 }
 
+/*
+ * Must only be called of irqchip.irq_set_affinity() or low level
+ * hieararchy domain allocation functions.
+ */
+static inline void irqd_set_single_target(struct irq_data *d)
+{
+	__irqd_to_state(d) |= IRQD_SINGLE_TARGET;
+}
+
+static inline bool irqd_is_single_target(struct irq_data *d)
+{
+	return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
+}
+
 static inline bool irqd_is_wakeup_set(struct irq_data *d)
 {
 	return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
@@ -329,6 +357,16 @@ static inline void irqd_clr_activated(struct irq_data *d)
 	__irqd_to_state(d) &= ~IRQD_ACTIVATED;
 }
 
+static inline bool irqd_is_started(struct irq_data *d)
+{
+	return __irqd_to_state(d) & IRQD_IRQ_STARTED;
+}
+
+static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
+{
+	return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -478,14 +516,21 @@ extern int irq_set_affinity_locked(struct irq_data *data,
 				   const struct cpumask *cpumask, bool force);
 extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
 
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION)
 extern void irq_migrate_all_off_this_cpu(void);
+extern int irq_affinity_online_cpu(unsigned int cpu);
+#else
+# define irq_affinity_online_cpu	NULL
+#endif
 
 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
 void irq_move_irq(struct irq_data *data);
 void irq_move_masked_irq(struct irq_data *data);
+void irq_force_complete_move(struct irq_desc *desc);
 #else
 static inline void irq_move_irq(struct irq_data *data) { }
 static inline void irq_move_masked_irq(struct irq_data *data) { }
+static inline void irq_force_complete_move(struct irq_desc *desc) { }
 #endif
 
 extern int no_irq_affinity;
@@ -727,6 +772,29 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
 	return d->common->affinity;
 }
 
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+static inline
+struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+{
+	return d->common->effective_affinity;
+}
+static inline void irq_data_update_effective_affinity(struct irq_data *d,
+						      const struct cpumask *m)
+{
+	cpumask_copy(d->common->effective_affinity, m);
+}
+#else
+static inline void irq_data_update_effective_affinity(struct irq_data *d,
+						      const struct cpumask *m)
+{
+}
+static inline
+struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+{
+	return d->common->affinity;
+}
+#endif
+
 unsigned int arch_dynirq_lower_bound(unsigned int from);
 
 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
@@ -951,6 +1019,14 @@ int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
 			     unsigned int clr, unsigned int set);
 
+struct irq_chip_generic *
+devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
+			    unsigned int irq_base, void __iomem *reg_base,
+			    irq_flow_handler_t handler);
+int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
+				u32 msk, enum irq_gc_flags flags,
+				unsigned int clr, unsigned int set);
+
 struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
 
 int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
@@ -967,6 +1043,19 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
 					 handler, clr, set, flags);	\
 })
 
+static inline void irq_free_generic_chip(struct irq_chip_generic *gc)
+{
+	kfree(gc);
+}
+
+static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc,
+					    u32 msk, unsigned int clr,
+					    unsigned int set)
+{
+	irq_remove_generic_chip(gc, msk, clr, set);
+	irq_free_generic_chip(gc);
+}
+
 static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
 {
 	return container_of(d->chip, struct irq_chip_type, chip);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index fffb912..1fa293a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -417,6 +417,10 @@
 #define ICH_HCR_EN			(1 << 0)
 #define ICH_HCR_UIE			(1 << 1)
 
+#define ICH_VMCR_ACK_CTL_SHIFT		2
+#define ICH_VMCR_ACK_CTL_MASK		(1 << ICH_VMCR_ACK_CTL_SHIFT)
+#define ICH_VMCR_FIQ_EN_SHIFT		3
+#define ICH_VMCR_FIQ_EN_MASK		(1 << ICH_VMCR_FIQ_EN_SHIFT)
 #define ICH_VMCR_CBPR_SHIFT		4
 #define ICH_VMCR_CBPR_MASK		(1 << ICH_VMCR_CBPR_SHIFT)
 #define ICH_VMCR_EOIM_SHIFT		9
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index dc30f3d..d3453ee 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -25,7 +25,18 @@
 #define GICC_ENABLE			0x1
 #define GICC_INT_PRI_THRESHOLD		0xf0
 
-#define GIC_CPU_CTRL_EOImodeNS		(1 << 9)
+#define GIC_CPU_CTRL_EnableGrp0_SHIFT	0
+#define GIC_CPU_CTRL_EnableGrp0		(1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
+#define GIC_CPU_CTRL_EnableGrp1_SHIFT	1
+#define GIC_CPU_CTRL_EnableGrp1		(1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
+#define GIC_CPU_CTRL_AckCtl_SHIFT	2
+#define GIC_CPU_CTRL_AckCtl		(1 << GIC_CPU_CTRL_AckCtl_SHIFT)
+#define GIC_CPU_CTRL_FIQEn_SHIFT	3
+#define GIC_CPU_CTRL_FIQEn		(1 << GIC_CPU_CTRL_FIQEn_SHIFT)
+#define GIC_CPU_CTRL_CBPR_SHIFT		4
+#define GIC_CPU_CTRL_CBPR		(1 << GIC_CPU_CTRL_CBPR_SHIFT)
+#define GIC_CPU_CTRL_EOImodeNS_SHIFT	9
+#define GIC_CPU_CTRL_EOImodeNS		(1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
 
 #define GICC_IAR_INT_ID_MASK		0x3ff
 #define GICC_INT_SPURIOUS		1023
@@ -84,8 +95,19 @@
 #define GICH_LR_EOI			(1 << 19)
 #define GICH_LR_HW			(1 << 31)
 
-#define GICH_VMCR_CTRL_SHIFT		0
-#define GICH_VMCR_CTRL_MASK		(0x21f << GICH_VMCR_CTRL_SHIFT)
+#define GICH_VMCR_ENABLE_GRP0_SHIFT	0
+#define GICH_VMCR_ENABLE_GRP0_MASK	(1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
+#define GICH_VMCR_ENABLE_GRP1_SHIFT	1
+#define GICH_VMCR_ENABLE_GRP1_MASK	(1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
+#define GICH_VMCR_ACK_CTL_SHIFT		2
+#define GICH_VMCR_ACK_CTL_MASK		(1 << GICH_VMCR_ACK_CTL_SHIFT)
+#define GICH_VMCR_FIQ_EN_SHIFT		3
+#define GICH_VMCR_FIQ_EN_MASK		(1 << GICH_VMCR_FIQ_EN_SHIFT)
+#define GICH_VMCR_CBPR_SHIFT		4
+#define GICH_VMCR_CBPR_MASK		(1 << GICH_VMCR_CBPR_SHIFT)
+#define GICH_VMCR_EOI_MODE_SHIFT	9
+#define GICH_VMCR_EOI_MODE_MASK		(1 << GICH_VMCR_EOI_MODE_SHIFT)
+
 #define GICH_VMCR_PRIMASK_SHIFT		27
 #define GICH_VMCR_PRIMASK_MASK		(0x1f << GICH_VMCR_PRIMASK_SHIFT)
 #define GICH_VMCR_BINPOINT_SHIFT	21
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index c9be579..d425a3a 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -46,6 +46,7 @@ struct pt_regs;
  * @rcu:		rcu head for delayed free
  * @kobj:		kobject used to represent this struct in sysfs
  * @dir:		/proc/irq/ procfs entry
+ * @debugfs_file:	dentry for the debugfs file
  * @name:		flow handler name for /proc/interrupts output
  */
 struct irq_desc {
@@ -88,6 +89,9 @@ struct irq_desc {
 #ifdef CONFIG_PROC_FS
 	struct proc_dir_entry	*dir;
 #endif
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+	struct dentry		*debugfs_file;
+#endif
 #ifdef CONFIG_SPARSE_IRQ
 	struct rcu_head		rcu;
 	struct kobject		kobj;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 9f36160..cac77a5 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -130,6 +130,7 @@ struct irq_domain_chip_generic;
  * @host_data: private data pointer for use by owner.  Not touched by irq_domain
  *             core code.
  * @flags: host per irq_domain flags
+ * @mapcount: The number of mapped interrupts
  *
  * Optional elements
  * @of_node: Pointer to device tree nodes associated with the irq_domain. Used
@@ -138,6 +139,7 @@ struct irq_domain_chip_generic;
  *      setting up one or more generic chips for interrupt controllers
  *      drivers using the generic chip library which uses this pointer.
  * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
+ * @debugfs_file: dentry for the domain debugfs file
  *
  * Revmap data, used internally by irq_domain
  * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
@@ -152,6 +154,7 @@ struct irq_domain {
 	const struct irq_domain_ops *ops;
 	void *host_data;
 	unsigned int flags;
+	unsigned int mapcount;
 
 	/* Optional data */
 	struct fwnode_handle *fwnode;
@@ -160,6 +163,9 @@ struct irq_domain {
 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
 	struct irq_domain *parent;
 #endif
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+	struct dentry		*debugfs_file;
+#endif
 
 	/* reverse map data. The linear map gets appended to the irq_domain */
 	irq_hw_number_t hwirq_max;
@@ -174,8 +180,8 @@ enum {
 	/* Irq domain is hierarchical */
 	IRQ_DOMAIN_FLAG_HIERARCHY	= (1 << 0),
 
-	/* Core calls alloc/free recursive through the domain hierarchy. */
-	IRQ_DOMAIN_FLAG_AUTO_RECURSIVE	= (1 << 1),
+	/* Irq domain name was allocated in __irq_domain_add() */
+	IRQ_DOMAIN_NAME_ALLOCATED	= (1 << 6),
 
 	/* Irq domain is an IPI domain with virq per cpu */
 	IRQ_DOMAIN_FLAG_IPI_PER_CPU	= (1 << 2),
@@ -203,7 +209,33 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
 }
 
 #ifdef CONFIG_IRQ_DOMAIN
-struct fwnode_handle *irq_domain_alloc_fwnode(void *data);
+struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
+						const char *name, void *data);
+
+enum {
+	IRQCHIP_FWNODE_REAL,
+	IRQCHIP_FWNODE_NAMED,
+	IRQCHIP_FWNODE_NAMED_ID,
+};
+
+static inline
+struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
+{
+	return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL);
+}
+
+static inline
+struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
+{
+	return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
+					 NULL);
+}
+
+static inline struct fwnode_handle *irq_domain_alloc_fwnode(void *data)
+{
+	return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, data);
+}
+
 void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
 struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
 				    irq_hw_number_t hwirq_max, int direct_max,
@@ -238,6 +270,9 @@ static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
 	return fwnode && fwnode->type == FWNODE_IRQCHIP;
 }
 
+extern void irq_domain_update_bus_token(struct irq_domain *domain,
+					enum irq_domain_bus_token bus_token);
+
 static inline
 struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
 					    enum irq_domain_bus_token bus_token)
@@ -410,7 +445,7 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
 				       NULL);
 }
 
-extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
+extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
 					   unsigned int irq_base,
 					   unsigned int nr_irqs, void *arg);
 extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 36872fb..734377a 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
 
+#ifndef __jiffy_arch_data
+#define __jiffy_arch_data
+#endif
+
 /*
  * The 64-bit value is not atomic - you MUST NOT read it
  * without sampling the sequence number in jiffies_lock.
  * get_jiffies_64() will do this for you as appropriate.
  */
 extern u64 __cacheline_aligned_in_smp jiffies_64;
-extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
+extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
 
 #if (BITS_PER_LONG < 64)
 u64 get_jiffies_64(void);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 13bc08a..1c91f26 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -490,9 +490,13 @@ extern int root_mountflags;
 
 extern bool early_boot_irqs_disabled;
 
-/* Values used for system_state */
+/*
+ * Values used for system_state. Ordering of the states must not be changed
+ * as code checks for <, <=, >, >= STATE.
+ */
 extern enum system_states {
 	SYSTEM_BOOTING,
+	SYSTEM_SCHEDULING,
 	SYSTEM_RUNNING,
 	SYSTEM_HALT,
 	SYSTEM_POWER_OFF,
diff --git a/include/linux/key.h b/include/linux/key.h
index 0c9b93b..0441141 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  *
  *
- * See Documentation/security/keys.txt for information on keys/keyrings.
+ * See Documentation/security/keys/core.rst for information on keys/keyrings.
  */
 
 #ifndef _LINUX_KEY_H
@@ -173,7 +173,6 @@ struct key {
 #ifdef KEY_DEBUGGING
 	unsigned		magic;
 #define KEY_DEBUG_MAGIC		0x18273645u
-#define KEY_DEBUG_MAGIC_X	0xf8e9dacbu
 #endif
 
 	unsigned long		flags;		/* status flags (change with bitops) */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index ca85cb8..eeab34b 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -217,11 +217,9 @@ extern struct kobject *firmware_kobj;
 int kobject_uevent(struct kobject *kobj, enum kobject_action action);
 int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
 			char *envp[]);
+int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count);
 
 __printf(2, 3)
 int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...);
 
-int kobject_action_type(const char *buf, size_t count,
-			enum kobject_action *type);
-
 #endif /* _KOBJECT_H_ */
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index 0c1de05..76c2fbc 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -46,7 +46,7 @@ struct kvm_kernel_irqfd_resampler {
 struct kvm_kernel_irqfd {
 	/* Used for MSI fast-path */
 	struct kvm *kvm;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	/* Update side is protected by irqfds.lock */
 	struct kvm_kernel_irq_routing_entry irq_entry;
 	seqcount_t irq_entry_sc;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index c9a69fc..9e66332 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -19,7 +19,7 @@
  *
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
- *  as Documentation/DocBook/libata.*
+ *  as Documentation/driver-api/libata.rst
  *
  */
 
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 171baa9..d117381 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -110,6 +110,25 @@ static inline void init_llist_head(struct llist_head *list)
 	for ((pos) = (node); pos; (pos) = (pos)->next)
 
 /**
+ * llist_for_each_safe - iterate over some deleted entries of a lock-less list
+ *			 safe against removal of list entry
+ * @pos:	the &struct llist_node to use as a loop cursor
+ * @n:		another &struct llist_node to use as temporary storage
+ * @node:	the first entry of deleted list entries
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being deleted from list, so start with an entry
+ * instead of list head.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry.  If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_safe(pos, n, node)			\
+	for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n))
+
+/**
  * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type
  * @pos:	the type * to use as a loop cursor.
  * @node:	the fist entry of deleted list entries.
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index e58e577..22b5d4e 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -21,6 +21,7 @@
 #include <linux/path.h>
 #include <linux/key.h>
 #include <linux/skbuff.h>
+#include <rdma/ib_verbs.h>
 
 struct lsm_network_audit {
 	int netif;
@@ -45,6 +46,16 @@ struct lsm_ioctlop_audit {
 	u16 cmd;
 };
 
+struct lsm_ibpkey_audit {
+	u64	subnet_prefix;
+	u16	pkey;
+};
+
+struct lsm_ibendport_audit {
+	char	dev_name[IB_DEVICE_NAME_MAX];
+	u8	port;
+};
+
 /* Auxiliary data to use in generating the audit record. */
 struct common_audit_data {
 	char type;
@@ -60,6 +71,8 @@ struct common_audit_data {
 #define LSM_AUDIT_DATA_DENTRY	10
 #define LSM_AUDIT_DATA_IOCTL_OP	11
 #define LSM_AUDIT_DATA_FILE	12
+#define LSM_AUDIT_DATA_IBPKEY	13
+#define LSM_AUDIT_DATA_IBENDPORT 14
 	union 	{
 		struct path path;
 		struct dentry *dentry;
@@ -77,6 +90,8 @@ struct common_audit_data {
 		char *kmod_name;
 		struct lsm_ioctlop_audit *op;
 		struct file *file;
+		struct lsm_ibpkey_audit *ibpkey;
+		struct lsm_ibendport_audit *ibendport;
 	} u;
 	/* this union contains LSM specific data */
 	union {
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 080f34e..7a86925 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -8,6 +8,7 @@
  * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
  * Copyright (C) 2015 Intel Corporation.
  * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2016 Mellanox Techonologies
  *
  *	This program is free software; you can redistribute it and/or modify
  *	it under the terms of the GNU General Public License as published by
@@ -29,6 +30,8 @@
 #include <linux/rculist.h>
 
 /**
+ * union security_list_options - Linux Security Module hook function list
+ *
  * Security hooks for program execution operations.
  *
  * @bprm_set_creds:
@@ -193,8 +196,8 @@
  *	@value will be set to the allocated attribute value.
  *	@len will be set to the length of the value.
  *	Returns 0 if @name and @value have been successfully set,
- *		-EOPNOTSUPP if no security attribute is needed, or
- *		-ENOMEM on memory allocation failure.
+ *	-EOPNOTSUPP if no security attribute is needed, or
+ *	-ENOMEM on memory allocation failure.
  * @inode_create:
  *	Check permission to create a regular file.
  *	@dir contains inode structure of the parent of the new file.
@@ -510,8 +513,7 @@
  *	process @tsk.  Note that this hook is sometimes called from interrupt.
  *	Note that the fown_struct, @fown, is never outside the context of a
  *	struct file, so the file structure (and associated security information)
- *	can always be obtained:
- *		container_of(fown, struct file, f_owner)
+ *	can always be obtained: container_of(fown, struct file, f_owner)
  *	@tsk contains the structure of task receiving signal.
  *	@fown contains the file owner information.
  *	@sig is the signal that will be sent.  When 0, kernel sends SIGIO.
@@ -521,7 +523,7 @@
  *	to receive an open file descriptor via socket IPC.
  *	@file contains the file structure being received.
  *	Return 0 if permission is granted.
- * @file_open
+ * @file_open:
  *	Save open-time permission checking state for later use upon
  *	file_permission, and recheck access if anything has changed
  *	since inode_permission.
@@ -911,6 +913,26 @@
  *	associated with the TUN device's security structure.
  *	@security pointer to the TUN devices's security structure.
  *
+ * Security hooks for Infiniband
+ *
+ * @ib_pkey_access:
+ *	Check permission to access a pkey when modifing a QP.
+ *	@subnet_prefix the subnet prefix of the port being used.
+ *	@pkey the pkey to be accessed.
+ *	@sec pointer to a security structure.
+ * @ib_endport_manage_subnet:
+ *	Check permissions to send and receive SMPs on a end port.
+ *	@dev_name the IB device name (i.e. mlx4_0).
+ *	@port_num the port number.
+ *	@sec pointer to a security structure.
+ * @ib_alloc_security:
+ *	Allocate a security structure for Infiniband objects.
+ *	@sec pointer to a security structure pointer.
+ *	Returns 0 on success, non-zero on failure
+ * @ib_free_security:
+ *	Deallocate an Infiniband security structure.
+ *	@sec contains the security structure to be freed.
+ *
  * Security hooks for XFRM operations.
  *
  * @xfrm_policy_alloc_security:
@@ -1143,7 +1165,7 @@
  *	@sma contains the semaphore structure.  May be NULL.
  *	@cmd contains the operation to be performed.
  *	Return 0 if permission is granted.
- * @sem_semop
+ * @sem_semop:
  *	Check permissions before performing operations on members of the
  *	semaphore set @sma.  If the @alter flag is nonzero, the semaphore set
  *	may be modified.
@@ -1153,20 +1175,20 @@
  *	@alter contains the flag indicating whether changes are to be made.
  *	Return 0 if permission is granted.
  *
- * @binder_set_context_mgr
+ * @binder_set_context_mgr:
  *	Check whether @mgr is allowed to be the binder context manager.
  *	@mgr contains the task_struct for the task being registered.
  *	Return 0 if permission is granted.
- * @binder_transaction
+ * @binder_transaction:
  *	Check whether @from is allowed to invoke a binder transaction call
  *	to @to.
  *	@from contains the task_struct for the sending task.
  *	@to contains the task_struct for the receiving task.
- * @binder_transfer_binder
+ * @binder_transfer_binder:
  *	Check whether @from is allowed to transfer a binder reference to @to.
  *	@from contains the task_struct for the sending task.
  *	@to contains the task_struct for the receiving task.
- * @binder_transfer_file
+ * @binder_transfer_file:
  *	Check whether @from is allowed to transfer @file to @to.
  *	@from contains the task_struct for the sending task.
  *	@file contains the struct file being transferred.
@@ -1214,7 +1236,7 @@
  *	@cred contains the credentials to use.
  *	@ns contains the user namespace we want the capability in
  *	@cap contains the capability <include/linux/capability.h>.
- *	@audit: Whether to write an audit message or not
+ *	@audit contains whether to write an audit message or not
  *	Return 0 if the capability is granted for @tsk.
  * @syslog:
  *	Check permission before accessing the kernel message ring or changing
@@ -1336,9 +1358,7 @@
  *	@inode we wish to get the security context of.
  *	@ctx is a pointer in which to place the allocated security context.
  *	@ctxlen points to the place to put the length of @ctx.
- * This is the main security structure.
  */
-
 union security_list_options {
 	int (*binder_set_context_mgr)(struct task_struct *mgr);
 	int (*binder_transaction)(struct task_struct *from,
@@ -1388,7 +1408,9 @@ union security_list_options {
 				unsigned long kern_flags,
 				unsigned long *set_kern_flags);
 	int (*sb_clone_mnt_opts)(const struct super_block *oldsb,
-					struct super_block *newsb);
+					struct super_block *newsb,
+					unsigned long kern_flags,
+					unsigned long *set_kern_flags);
 	int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts);
 	int (*dentry_init_security)(struct dentry *dentry, int mode,
 					const struct qstr *name, void **ctx,
@@ -1620,6 +1642,14 @@ union security_list_options {
 	int (*tun_dev_open)(void *security);
 #endif	/* CONFIG_SECURITY_NETWORK */
 
+#ifdef CONFIG_SECURITY_INFINIBAND
+	int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey);
+	int (*ib_endport_manage_subnet)(void *sec, const char *dev_name,
+					u8 port_num);
+	int (*ib_alloc_security)(void **sec);
+	void (*ib_free_security)(void *sec);
+#endif	/* CONFIG_SECURITY_INFINIBAND */
+
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 	int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp,
 					  struct xfrm_user_sec_ctx *sec_ctx,
@@ -1851,6 +1881,12 @@ struct security_hook_heads {
 	struct list_head tun_dev_attach;
 	struct list_head tun_dev_open;
 #endif	/* CONFIG_SECURITY_NETWORK */
+#ifdef CONFIG_SECURITY_INFINIBAND
+	struct list_head ib_pkey_access;
+	struct list_head ib_endport_manage_subnet;
+	struct list_head ib_alloc_security;
+	struct list_head ib_free_security;
+#endif	/* CONFIG_SECURITY_INFINIBAND */
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 	struct list_head xfrm_policy_alloc_security;
 	struct list_head xfrm_policy_clone_security;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 4ce24a3..8098695 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
 }
 #endif
 
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+		phys_addr_t end_addr);
 #else
 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
 {
 	return 0;
 }
 
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+		phys_addr_t end_addr)
+{
+	return 0;
+}
+
 #endif /* CONFIG_HAVE_MEMBLOCK */
 
 #endif /* __KERNEL__ */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index cde56cf..965b027 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -119,6 +119,17 @@ enum axp20x_variants {
 #define AXP806_BUS_ADDR_EXT		0xfe
 #define AXP806_REG_ADDR_EXT		0xff
 
+#define AXP803_POLYPHASE_CTRL		0x14
+#define AXP803_FLDO1_V_OUT		0x1c
+#define AXP803_FLDO2_V_OUT		0x1d
+#define AXP803_DCDC1_V_OUT		0x20
+#define AXP803_DCDC2_V_OUT		0x21
+#define AXP803_DCDC3_V_OUT		0x22
+#define AXP803_DCDC4_V_OUT		0x23
+#define AXP803_DCDC5_V_OUT		0x24
+#define AXP803_DCDC6_V_OUT		0x25
+#define AXP803_DCDC_FREQ_CTRL		0x3b
+
 /* Interrupt */
 #define AXP152_IRQ1_EN			0x40
 #define AXP152_IRQ2_EN			0x41
@@ -350,6 +361,32 @@ enum {
 	AXP809_REG_ID_MAX,
 };
 
+enum {
+	AXP803_DCDC1 = 0,
+	AXP803_DCDC2,
+	AXP803_DCDC3,
+	AXP803_DCDC4,
+	AXP803_DCDC5,
+	AXP803_DCDC6,
+	AXP803_DC1SW,
+	AXP803_ALDO1,
+	AXP803_ALDO2,
+	AXP803_ALDO3,
+	AXP803_DLDO1,
+	AXP803_DLDO2,
+	AXP803_DLDO3,
+	AXP803_DLDO4,
+	AXP803_ELDO1,
+	AXP803_ELDO2,
+	AXP803_ELDO3,
+	AXP803_FLDO1,
+	AXP803_FLDO2,
+	AXP803_RTC_LDO,
+	AXP803_LDO_IO0,
+	AXP803_LDO_IO1,
+	AXP803_REG_ID_MAX,
+};
+
 /* IRQs */
 enum {
 	AXP152_IRQ_LDO0IN_CONNECT = 1,
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 5c9a1d4..6dec438 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -250,6 +250,7 @@ enum tps65917_regulators {
 	TPS65917_REG_SMPS3,
 	TPS65917_REG_SMPS4,
 	TPS65917_REG_SMPS5,
+	TPS65917_REG_SMPS12,
 	/* LDO regulators */
 	TPS65917_REG_LDO1,
 	TPS65917_REG_LDO2,
@@ -317,6 +318,7 @@ enum tps65917_external_requestor_id {
 	TPS65917_EXTERNAL_REQSTR_ID_SMPS3,
 	TPS65917_EXTERNAL_REQSTR_ID_SMPS4,
 	TPS65917_EXTERNAL_REQSTR_ID_SMPS5,
+	TPS65917_EXTERNAL_REQSTR_ID_SMPS12,
 	TPS65917_EXTERNAL_REQSTR_ID_LDO1,
 	TPS65917_EXTERNAL_REQSTR_ID_LDO2,
 	TPS65917_EXTERNAL_REQSTR_ID_LDO3,
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index 4a0abbc..ce7346e 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -34,6 +34,7 @@
 #define TIM_CR1_DIR	BIT(4)  /* Counter Direction	   */
 #define TIM_CR1_ARPE	BIT(7)	/* Auto-reload Preload Ena */
 #define TIM_CR2_MMS	(BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
+#define TIM_CR2_MMS2	GENMASK(23, 20) /* Master mode selection 2 */
 #define TIM_SMCR_SMS	(BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
 #define TIM_SMCR_TS	(BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
 #define TIM_DIER_UIE	BIT(0)	/* Update interrupt	   */
@@ -60,6 +61,7 @@
 
 #define MAX_TIM_PSC		0xFFFF
 #define TIM_CR2_MMS_SHIFT	4
+#define TIM_CR2_MMS2_SHIFT	20
 #define TIM_SMCR_TS_SHIFT	4
 #define TIM_BDTR_BKF_MASK	0xF
 #define TIM_BDTR_BKF_SHIFT	16
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index a1520d8..26e8f8c 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -13,15 +13,15 @@
 #define tmio_ioread16(addr) readw(addr)
 #define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
 #define tmio_ioread32(addr) \
-	(((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16))
+	(((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16))
 
 #define tmio_iowrite8(val, addr) writeb((val), (addr))
 #define tmio_iowrite16(val, addr) writew((val), (addr))
 #define tmio_iowrite16_rep(r, b, l) writesw(r, b, l)
 #define tmio_iowrite32(val, addr) \
 	do { \
-	writew((val),       (addr)); \
-	writew((val) >> 16, (addr) + 2); \
+		writew((val),       (addr)); \
+		writew((val) >> 16, (addr) + 2); \
 	} while (0)
 
 #define CNF_CMD     0x04
@@ -55,57 +55,57 @@
 	} while (0)
 
 /* tmio MMC platform flags */
-#define TMIO_MMC_WRPROTECT_DISABLE	(1 << 0)
+#define TMIO_MMC_WRPROTECT_DISABLE	BIT(0)
 /*
  * Some controllers can support a 2-byte block size when the bus width
  * is configured in 4-bit mode.
  */
-#define TMIO_MMC_BLKSZ_2BYTES		(1 << 1)
+#define TMIO_MMC_BLKSZ_2BYTES		BIT(1)
 /*
  * Some controllers can support SDIO IRQ signalling.
  */
-#define TMIO_MMC_SDIO_IRQ		(1 << 2)
+#define TMIO_MMC_SDIO_IRQ		BIT(2)
 
-/* Some features are only available or tested on RCar Gen2 or later */
-#define TMIO_MMC_MIN_RCAR2		(1 << 3)
+/* Some features are only available or tested on R-Car Gen2 or later */
+#define TMIO_MMC_MIN_RCAR2		BIT(3)
 
 /*
  * Some controllers require waiting for the SD bus to become
  * idle before writing to some registers.
  */
-#define TMIO_MMC_HAS_IDLE_WAIT		(1 << 4)
+#define TMIO_MMC_HAS_IDLE_WAIT		BIT(4)
 /*
  * A GPIO is used for card hotplug detection. We need an extra flag for this,
  * because 0 is a valid GPIO number too, and requiring users to specify
  * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility.
  */
-#define TMIO_MMC_USE_GPIO_CD		(1 << 5)
+#define TMIO_MMC_USE_GPIO_CD		BIT(5)
 
 /*
  * Some controllers doesn't have over 0x100 register.
  * it is used to checking accessibility of
  * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL
  */
-#define TMIO_MMC_HAVE_HIGH_REG		(1 << 6)
+#define TMIO_MMC_HAVE_HIGH_REG		BIT(6)
 
 /*
  * Some controllers have CMD12 automatically
  * issue/non-issue register
  */
-#define TMIO_MMC_HAVE_CMD12_CTRL	(1 << 7)
+#define TMIO_MMC_HAVE_CMD12_CTRL	BIT(7)
 
 /* Controller has some SDIO status bits which must be 1 */
-#define TMIO_MMC_SDIO_STATUS_SETBITS	(1 << 8)
+#define TMIO_MMC_SDIO_STATUS_SETBITS	BIT(8)
 
 /*
  * Some controllers have a 32-bit wide data port register
  */
-#define TMIO_MMC_32BIT_DATA_PORT	(1 << 9)
+#define TMIO_MMC_32BIT_DATA_PORT	BIT(9)
 
 /*
  * Some controllers allows to set SDx actual clock
  */
-#define TMIO_MMC_CLK_ACTUAL		(1 << 10)
+#define TMIO_MMC_CLK_ACTUAL		BIT(10)
 
 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
 int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -146,9 +146,9 @@ struct tmio_nand_data {
 
 struct tmio_fb_data {
 	int			(*lcd_set_power)(struct platform_device *fb_dev,
-								bool on);
+						 bool on);
 	int			(*lcd_mode)(struct platform_device *fb_dev,
-					const struct fb_videomode *mode);
+					    const struct fb_videomode *mode);
 	int			num_modes;
 	struct fb_videomode	*modes;
 
@@ -157,5 +157,4 @@ struct tmio_fb_data {
 	int			width;
 };
 
-
 #endif
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index ffb21e7..deffdcd 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -879,7 +879,7 @@ struct tps65910_board {
 	bool en_ck32k_xtal;
 	bool en_dev_slp;
 	bool pm_off;
-	struct tps65910_sleep_keepon_data *slp_keepon;
+	struct tps65910_sleep_keepon_data slp_keepon;
 	bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO];
 	unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS];
 	struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 762b5fe..58751ea 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -54,6 +54,7 @@
 #define VHOST_NET_MINOR		238
 #define UHID_MINOR		239
 #define USERIO_MINOR		240
+#define VHOST_VSOCK_MINOR	241
 #define MISC_DYNAMIC_MINOR	255
 
 struct device;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b4ee8f6..8e2828d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
 	u16	rate_val;
 };
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
 		   enum mlx4_update_qp_attr attr,
 		   struct mlx4_update_qp_params *params);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 32de072..edafedb 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -766,6 +766,12 @@ enum {
 	MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
 
+enum {
+	MLX5_CAP_UMR_FENCE_STRONG	= 0x0,
+	MLX5_CAP_UMR_FENCE_SMALL	= 0x1,
+	MLX5_CAP_UMR_FENCE_NONE		= 0x2,
+};
+
 struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         reserved_at_0[0x80];
 
@@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         reserved_at_202[0x1];
 	u8         ipoib_enhanced_offloads[0x1];
 	u8         ipoib_basic_offloads[0x1];
-	u8         reserved_at_205[0xa];
+	u8         reserved_at_205[0x5];
+	u8         umr_fence[0x2];
+	u8         reserved_at_20c[0x3];
 	u8         drain_sigerr[0x1];
 	u8         cmdif_checksum[0x2];
 	u8         sigerr_cqe[0x1];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7cb17c6..6f543a47 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1393,12 +1393,6 @@ int clear_page_dirty_for_io(struct page *page);
 
 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
-	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
 {
 	return !vma->vm_ops;
@@ -1414,28 +1408,6 @@ bool vma_is_shmem(struct vm_area_struct *vma);
 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
 #endif
 
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
-					     unsigned long addr)
-{
-	return (vma->vm_flags & VM_GROWSDOWN) &&
-		(vma->vm_start == addr) &&
-		!vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
-	return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
-					   unsigned long addr)
-{
-	return (vma->vm_flags & VM_GROWSUP) &&
-		(vma->vm_end == addr) &&
-		!vma_growsup(vma->vm_next, addr);
-}
-
 int vma_is_stack_for_current(struct vm_area_struct *vma);
 
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2222,6 +2194,7 @@ void page_cache_async_readahead(struct address_space *mapping,
 				pgoff_t offset,
 				unsigned long size);
 
+extern unsigned long stack_guard_gap;
 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
 
@@ -2250,6 +2223,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
 	return vma;
 }
 
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+	unsigned long vm_start = vma->vm_start;
+
+	if (vma->vm_flags & VM_GROWSDOWN) {
+		vm_start -= stack_guard_gap;
+		if (vm_start > vma->vm_start)
+			vm_start = 0;
+	}
+	return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+	unsigned long vm_end = vma->vm_end;
+
+	if (vma->vm_flags & VM_GROWSUP) {
+		vm_end += stack_guard_gap;
+		if (vm_end < vma->vm_end)
+			vm_end = -PAGE_SIZE;
+	}
+	return vm_end;
+}
+
 static inline unsigned long vma_pages(struct vm_area_struct *vma)
 {
 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -2327,6 +2324,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
 #define FOLL_COW	0x4000	/* internal GUP flag */
 
+static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
+{
+	if (vm_fault & VM_FAULT_OOM)
+		return -ENOMEM;
+	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
+	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+		return -EFAULT;
+	return 0;
+}
+
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 			void *data);
 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 136dfdf..fc412fb 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -14,6 +14,10 @@
 
 #include <asm/page.h>
 
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+#include <asm/tlbbatch.h>
+#endif
+
 #define USE_SPLIT_PTE_PTLOCKS	(NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
 #define USE_SPLIT_PMD_PTLOCKS	(USE_SPLIT_PTE_PTLOCKS && \
 		IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
@@ -67,12 +71,15 @@ struct page_frag {
 struct tlbflush_unmap_batch {
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 	/*
-	 * Each bit set is a CPU that potentially has a TLB entry for one of
-	 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+	 * The arch code makes the following promise: generic code can modify a
+	 * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
+	 * needed barriers), then call arch_tlbbatch_flush(), and the entries
+	 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
+	 * returns.
 	 */
-	struct cpumask cpumask;
+	struct arch_tlbflush_unmap_batch arch;
 
-	/* True if any bit in cpumask is set */
+	/* True if a flush is needed. */
 	bool flush_required;
 
 	/*
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index aad015e..46c73e9 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -305,9 +305,7 @@ struct mmc_card {
 	struct mmc_part	part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
 	unsigned int    nr_parts;
 
-	struct mmc_queue_req	*mqrq;		/* Shared queue structure */
 	unsigned int		bouncesz;	/* Bounce buffer size */
-	int			qdepth;		/* Shared queue depth */
 };
 
 static inline bool mmc_large_sector(struct mmc_card *card)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 21385ac..ebd1ceb 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -130,6 +130,7 @@ struct mmc_host_ops {
 	int	(*get_cd)(struct mmc_host *host);
 
 	void	(*enable_sdio_irq)(struct mmc_host *host, int enable);
+	void	(*ack_sdio_irq)(struct mmc_host *host);
 
 	/* optional callback for HC quirks */
 	void	(*init_card)(struct mmc_host *host, struct mmc_card *card);
@@ -184,6 +185,7 @@ struct mmc_async_req {
  */
 struct mmc_slot {
 	int cd_irq;
+	bool cd_wake_enabled;
 	void *handler_priv;
 };
 
@@ -270,9 +272,11 @@ struct mmc_host {
 #define MMC_CAP_UHS_SDR50	(1 << 18)	/* Host supports UHS SDR50 mode */
 #define MMC_CAP_UHS_SDR104	(1 << 19)	/* Host supports UHS SDR104 mode */
 #define MMC_CAP_UHS_DDR50	(1 << 20)	/* Host supports UHS DDR50 mode */
+#define MMC_CAP_NO_BOUNCE_BUFF	(1 << 21)	/* Disable bounce buffers on host */
 #define MMC_CAP_DRIVER_TYPE_A	(1 << 23)	/* Host supports Driver Type A */
 #define MMC_CAP_DRIVER_TYPE_C	(1 << 24)	/* Host supports Driver Type C */
 #define MMC_CAP_DRIVER_TYPE_D	(1 << 25)	/* Host supports Driver Type D */
+#define MMC_CAP_CD_WAKE		(1 << 28)	/* Enable card detect wake */
 #define MMC_CAP_CMD_DURING_TFR	(1 << 29)	/* Commands during data transfer */
 #define MMC_CAP_CMD23		(1 << 30)	/* CMD23 supported. */
 #define MMC_CAP_HW_RESET	(1 << 31)	/* Hardware reset */
@@ -285,7 +289,6 @@ struct mmc_host {
 #define MMC_CAP2_HS200_1_2V_SDR	(1 << 6)        /* can support */
 #define MMC_CAP2_HS200		(MMC_CAP2_HS200_1_8V_SDR | \
 				 MMC_CAP2_HS200_1_2V_SDR)
-#define MMC_CAP2_HC_ERASE_SZ	(1 << 9)	/* High-capacity erase size */
 #define MMC_CAP2_CD_ACTIVE_HIGH	(1 << 10)	/* Card-detect signal active high */
 #define MMC_CAP2_RO_ACTIVE_HIGH	(1 << 11)	/* Write-protect signal active high */
 #define MMC_CAP2_PACKED_RD	(1 << 12)	/* Allow packed read */
@@ -358,6 +361,7 @@ struct mmc_host {
 
 	unsigned int		sdio_irqs;
 	struct task_struct	*sdio_irq_thread;
+	struct delayed_work	sdio_irq_work;
 	bool			sdio_irq_pending;
 	atomic_t		sdio_irq_thread_abort;
 
@@ -428,6 +432,7 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
 }
 
 void sdio_run_irqs(struct mmc_host *host);
+void sdio_signal_irq(struct mmc_host *host);
 
 #ifdef CONFIG_REGULATOR
 int mmc_regulator_get_ocrmask(struct regulator *supply);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ebaccd4..ef6a13b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -678,6 +678,7 @@ typedef struct pglist_data {
 	 * is the first PFN that needs to be initialised.
 	 */
 	unsigned long first_deferred_pfn;
+	unsigned long static_init_size;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 566fda5..3f74ef2 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -467,6 +467,7 @@ enum dmi_field {
 	DMI_PRODUCT_VERSION,
 	DMI_PRODUCT_SERIAL,
 	DMI_PRODUCT_UUID,
+	DMI_PRODUCT_FAMILY,
 	DMI_BOARD_VENDOR,
 	DMI_BOARD_NAME,
 	DMI_BOARD_VERSION,
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 6be1949..1ee7b30 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -457,7 +457,7 @@ enum hwparam_type {
 	hwparam_ioport,		/* Module parameter configures an I/O port */
 	hwparam_iomem,		/* Module parameter configures an I/O mem address */
 	hwparam_ioport_or_iomem, /* Module parameter could be either, depending on other option */
-	hwparam_irq,		/* Module parameter configures an I/O port */
+	hwparam_irq,		/* Module parameter configures an IRQ */
 	hwparam_dma,		/* Module parameter configures a DMA channel */
 	hwparam_dma_addr,	/* Module parameter configures a DMA buffer address */
 	hwparam_other,		/* Module parameter configures some other value */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 8f67b15..de0d889 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -785,7 +785,7 @@ struct nand_manufacturer_ops {
  *			Minimum amount of bit errors per @ecc_step_ds guaranteed
  *			to be correctable. If unknown, set to zero.
  * @ecc_step_ds:	[INTERN] ECC step required by the @ecc_strength_ds,
- *                      also from the datasheet. It is the recommended ECC step
+ *			also from the datasheet. It is the recommended ECC step
  *			size, if known; if unknown, set to zero.
  * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
  *			      set to the actually used ONFI mode if the chip is
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 1127fe3..ffcba1f 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -214,9 +214,9 @@ enum mutex_trylock_recursive_enum {
  * raisins, and once those are gone this will be removed.
  *
  * Returns:
- *  MUTEX_TRYLOCK_FAILED    - trylock failed,
- *  MUTEX_TRYLOCK_SUCCESS   - lock acquired,
- *  MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
+ *  - MUTEX_TRYLOCK_FAILED    - trylock failed,
+ *  - MUTEX_TRYLOCK_SUCCESS   - lock acquired,
+ *  - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
  */
 static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
 mutex_trylock_recursive(struct mutex *lock)
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
new file mode 100644
index 0000000..5577e1b
--- /dev/null
+++ b/include/linux/mux/consumer.h
@@ -0,0 +1,32 @@
+/*
+ * mux/consumer.h - definitions for the multiplexer consumer interface
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MUX_CONSUMER_H
+#define _LINUX_MUX_CONSUMER_H
+
+struct device;
+struct mux_control;
+
+unsigned int mux_control_states(struct mux_control *mux);
+int __must_check mux_control_select(struct mux_control *mux,
+				    unsigned int state);
+int __must_check mux_control_try_select(struct mux_control *mux,
+					unsigned int state);
+int mux_control_deselect(struct mux_control *mux);
+
+struct mux_control *mux_control_get(struct device *dev, const char *mux_name);
+void mux_control_put(struct mux_control *mux);
+
+struct mux_control *devm_mux_control_get(struct device *dev,
+					 const char *mux_name);
+
+#endif /* _LINUX_MUX_CONSUMER_H */
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
new file mode 100644
index 0000000..35c3579
--- /dev/null
+++ b/include/linux/mux/driver.h
@@ -0,0 +1,108 @@
+/*
+ * mux/driver.h - definitions for the multiplexer driver interface
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MUX_DRIVER_H
+#define _LINUX_MUX_DRIVER_H
+
+#include <dt-bindings/mux/mux.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+
+struct mux_chip;
+struct mux_control;
+
+/**
+ * struct mux_control_ops -	Mux controller operations for a mux chip.
+ * @set:			Set the state of the given mux controller.
+ */
+struct mux_control_ops {
+	int (*set)(struct mux_control *mux, int state);
+};
+
+/**
+ * struct mux_control -	Represents a mux controller.
+ * @lock:		Protects the mux controller state.
+ * @chip:		The mux chip that is handling this mux controller.
+ * @cached_state:	The current mux controller state, or -1 if none.
+ * @states:		The number of mux controller states.
+ * @idle_state:		The mux controller state to use when inactive, or one
+ *			of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT.
+ *
+ * Mux drivers may only change @states and @idle_state, and may only do so
+ * between allocation and registration of the mux controller. Specifically,
+ * @cached_state is internal to the mux core and should never be written by
+ * mux drivers.
+ */
+struct mux_control {
+	struct semaphore lock; /* protects the state of the mux */
+
+	struct mux_chip *chip;
+	int cached_state;
+
+	unsigned int states;
+	int idle_state;
+};
+
+/**
+ * struct mux_chip -	Represents a chip holding mux controllers.
+ * @controllers:	Number of mux controllers handled by the chip.
+ * @mux:		Array of mux controllers that are handled.
+ * @dev:		Device structure.
+ * @id:			Used to identify the device internally.
+ * @ops:		Mux controller operations.
+ */
+struct mux_chip {
+	unsigned int controllers;
+	struct mux_control *mux;
+	struct device dev;
+	int id;
+
+	const struct mux_control_ops *ops;
+};
+
+#define to_mux_chip(x) container_of((x), struct mux_chip, dev)
+
+/**
+ * mux_chip_priv() - Get the extra memory reserved by mux_chip_alloc().
+ * @mux_chip: The mux-chip to get the private memory from.
+ *
+ * Return: Pointer to the private memory reserved by the allocator.
+ */
+static inline void *mux_chip_priv(struct mux_chip *mux_chip)
+{
+	return &mux_chip->mux[mux_chip->controllers];
+}
+
+struct mux_chip *mux_chip_alloc(struct device *dev,
+				unsigned int controllers, size_t sizeof_priv);
+int mux_chip_register(struct mux_chip *mux_chip);
+void mux_chip_unregister(struct mux_chip *mux_chip);
+void mux_chip_free(struct mux_chip *mux_chip);
+
+struct mux_chip *devm_mux_chip_alloc(struct device *dev,
+				     unsigned int controllers,
+				     size_t sizeof_priv);
+int devm_mux_chip_register(struct device *dev, struct mux_chip *mux_chip);
+
+/**
+ * mux_control_get_index() - Get the index of the given mux controller
+ * @mux: The mux-control to get the index for.
+ *
+ * Return: The index of the mux controller within the mux chip the mux
+ * controller is a part of.
+ */
+static inline unsigned int mux_control_get_index(struct mux_control *mux)
+{
+	return mux - mux->chip->mux;
+}
+
+#endif /* _LINUX_MUX_DRIVER_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3f39d27..24e88b3 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -914,8 +914,7 @@ struct xfrmdev_ops {
  *
  * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
  *	Called when a user wants to change the Maximum Transfer Unit
- *	of a device. If not defined, any request to change MTU will
- *	will return an error.
+ *	of a device.
  *
  * void (*ndo_tx_timeout)(struct net_device *dev);
  *	Callback used when the transmitter has not made any progress
@@ -1433,13 +1432,14 @@ enum netdev_priv_flags {
 
 /**
  *	struct net_device - The DEVICE structure.
- *		Actually, this whole structure is a big mistake.  It mixes I/O
- *		data with strictly "high-level" data, and it has to know about
- *		almost every data structure used in the INET module.
+ *
+ *	Actually, this whole structure is a big mistake.  It mixes I/O
+ *	data with strictly "high-level" data, and it has to know about
+ *	almost every data structure used in the INET module.
  *
  *	@name:	This is the first field of the "visible" part of this structure
  *		(i.e. as seen by users in the "Space.c" file).  It is the name
- *	 	of the interface.
+ *		of the interface.
  *
  *	@name_hlist: 	Device name hash chain, please keep it close to name[]
  *	@ifalias:	SNMP alias
@@ -1596,8 +1596,8 @@ enum netdev_priv_flags {
  *	@rtnl_link_state:	This enum represents the phases of creating
  *				a new link
  *
- *	@destructor:		Called from unregister,
- *				can be used to call free_netdev
+ *	@needs_free_netdev:	Should unregister perform free_netdev?
+ *	@priv_destructor:	Called from unregister
  *	@npinfo:		XXX: need comments on this one
  * 	@nd_net:		Network namespace this network device is inside
  *
@@ -1858,7 +1858,8 @@ struct net_device {
 		RTNL_LINK_INITIALIZING,
 	} rtnl_link_state:16;
 
-	void (*destructor)(struct net_device *dev);
+	bool needs_free_netdev;
+	void (*priv_destructor)(struct net_device *dev);
 
 #ifdef CONFIG_NETPOLL
 	struct netpoll_info __rcu	*npinfo;
@@ -4261,6 +4262,11 @@ static inline const char *netdev_name(const struct net_device *dev)
 	return dev->name;
 }
 
+static inline bool netdev_unregistering(const struct net_device *dev)
+{
+	return dev->reg_state == NETREG_UNREGISTERING;
+}
+
 static inline const char *netdev_reg_state(const struct net_device *dev)
 {
 	switch (dev->reg_state) {
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index e997c4a..bc711a1 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -177,7 +177,6 @@ struct fcnvme_lsdesc_rjt {
 };
 
 
-#define FCNVME_ASSOC_HOSTID_LEN		16
 #define FCNVME_ASSOC_HOSTNQN_LEN	256
 #define FCNVME_ASSOC_SUBNQN_LEN		256
 
@@ -191,7 +190,7 @@ struct fcnvme_lsdesc_cr_assoc_cmd {
 	__be16	cntlid;
 	__be16	sqsize;
 	__be32	rsvd52;
-	u8	hostid[FCNVME_ASSOC_HOSTID_LEN];
+	uuid_t	hostid;
 	u8	hostnqn[FCNVME_ASSOC_HOSTNQN_LEN];
 	u8	subnqn[FCNVME_ASSOC_SUBNQN_LEN];
 	u8	rsvd632[384];
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index b625bac..6b8ee9e 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -16,6 +16,7 @@
 #define _LINUX_NVME_H
 
 #include <linux/types.h>
+#include <linux/uuid.h>
 
 /* NQN names in commands fields specified one size */
 #define NVMF_NQN_FIELD_LEN	256
@@ -86,7 +87,7 @@ enum {
 	NVMF_RDMA_CMS_RDMA_CM	= 1, /* Sockets based endpoint addressing */
 };
 
-#define NVMF_AQ_DEPTH		32
+#define NVME_AQ_DEPTH		32
 
 enum {
 	NVME_REG_CAP	= 0x0000,	/* Controller Capabilities */
@@ -101,6 +102,7 @@ enum {
 	NVME_REG_ACQ	= 0x0030,	/* Admin CQ Base Address */
 	NVME_REG_CMBLOC = 0x0038,	/* Controller Memory Buffer Location */
 	NVME_REG_CMBSZ	= 0x003c,	/* Controller Memory Buffer Size */
+	NVME_REG_DBS	= 0x1000,	/* SQ 0 Tail Doorbell */
 };
 
 #define NVME_CAP_MQES(cap)	((cap) & 0xffff)
@@ -207,9 +209,15 @@ struct nvme_id_ctrl {
 	__u8			tnvmcap[16];
 	__u8			unvmcap[16];
 	__le32			rpmbs;
-	__u8			rsvd316[4];
+	__le16			edstt;
+	__u8			dsto;
+	__u8			fwug;
 	__le16			kas;
-	__u8			rsvd322[190];
+	__le16			hctma;
+	__le16			mntmt;
+	__le16			mxtmt;
+	__le32			sanicap;
+	__u8			rsvd332[180];
 	__u8			sqes;
 	__u8			cqes;
 	__le16			maxcmd;
@@ -245,6 +253,7 @@ enum {
 	NVME_CTRL_ONCS_WRITE_ZEROES		= 1 << 3,
 	NVME_CTRL_VWC_PRESENT			= 1 << 0,
 	NVME_CTRL_OACS_SEC_SUPP                 = 1 << 0,
+	NVME_CTRL_OACS_DIRECTIVES		= 1 << 5,
 	NVME_CTRL_OACS_DBBUF_SUPP		= 1 << 7,
 };
 
@@ -274,7 +283,7 @@ struct nvme_id_ns {
 	__le16			nabsn;
 	__le16			nabo;
 	__le16			nabspf;
-	__u16			rsvd46;
+	__le16			noiob;
 	__u8			nvmcap[16];
 	__u8			rsvd64[40];
 	__u8			nguid[16];
@@ -288,6 +297,7 @@ enum {
 	NVME_ID_CNS_NS			= 0x00,
 	NVME_ID_CNS_CTRL		= 0x01,
 	NVME_ID_CNS_NS_ACTIVE_LIST	= 0x02,
+	NVME_ID_CNS_NS_DESC_LIST	= 0x03,
 	NVME_ID_CNS_NS_PRESENT_LIST	= 0x10,
 	NVME_ID_CNS_NS_PRESENT		= 0x11,
 	NVME_ID_CNS_CTRL_NS_LIST	= 0x12,
@@ -295,6 +305,19 @@ enum {
 };
 
 enum {
+	NVME_DIR_IDENTIFY		= 0x00,
+	NVME_DIR_STREAMS		= 0x01,
+	NVME_DIR_SND_ID_OP_ENABLE	= 0x01,
+	NVME_DIR_SND_ST_OP_REL_ID	= 0x01,
+	NVME_DIR_SND_ST_OP_REL_RSC	= 0x02,
+	NVME_DIR_RCV_ID_OP_PARAM	= 0x01,
+	NVME_DIR_RCV_ST_OP_PARAM	= 0x01,
+	NVME_DIR_RCV_ST_OP_STATUS	= 0x02,
+	NVME_DIR_RCV_ST_OP_RESOURCE	= 0x03,
+	NVME_DIR_ENDIR			= 0x01,
+};
+
+enum {
 	NVME_NS_FEAT_THIN	= 1 << 0,
 	NVME_NS_FLBAS_LBA_MASK	= 0xf,
 	NVME_NS_FLBAS_META_EXT	= 0x10,
@@ -314,6 +337,22 @@ enum {
 	NVME_NS_DPS_PI_TYPE3	= 3,
 };
 
+struct nvme_ns_id_desc {
+	__u8 nidt;
+	__u8 nidl;
+	__le16 reserved;
+};
+
+#define NVME_NIDT_EUI64_LEN	8
+#define NVME_NIDT_NGUID_LEN	16
+#define NVME_NIDT_UUID_LEN	16
+
+enum {
+	NVME_NIDT_EUI64		= 0x01,
+	NVME_NIDT_NGUID		= 0x02,
+	NVME_NIDT_UUID		= 0x03,
+};
+
 struct nvme_smart_log {
 	__u8			critical_warning;
 	__u8			temperature[2];
@@ -535,6 +574,7 @@ enum {
 	NVME_RW_PRINFO_PRCHK_APP	= 1 << 11,
 	NVME_RW_PRINFO_PRCHK_GUARD	= 1 << 12,
 	NVME_RW_PRINFO_PRACT		= 1 << 13,
+	NVME_RW_DTYPE_STREAMS		= 1 << 4,
 };
 
 struct nvme_dsm_cmd {
@@ -586,6 +626,11 @@ struct nvme_feat_auto_pst {
 	__le64 entries[32];
 };
 
+enum {
+	NVME_HOST_MEM_ENABLE	= (1 << 0),
+	NVME_HOST_MEM_RETURN	= (1 << 1),
+};
+
 /* Admin commands */
 
 enum nvme_admin_opcode {
@@ -604,6 +649,8 @@ enum nvme_admin_opcode {
 	nvme_admin_download_fw		= 0x11,
 	nvme_admin_ns_attach		= 0x15,
 	nvme_admin_keep_alive		= 0x18,
+	nvme_admin_directive_send	= 0x19,
+	nvme_admin_directive_recv	= 0x1a,
 	nvme_admin_dbbuf		= 0x7C,
 	nvme_admin_format_nvm		= 0x80,
 	nvme_admin_security_send	= 0x81,
@@ -658,6 +705,8 @@ struct nvme_identify {
 	__u32			rsvd11[5];
 };
 
+#define NVME_IDENTIFY_DATA_SIZE 4096
+
 struct nvme_features {
 	__u8			opcode;
 	__u8			flags;
@@ -667,7 +716,16 @@ struct nvme_features {
 	union nvme_data_ptr	dptr;
 	__le32			fid;
 	__le32			dword11;
-	__u32			rsvd12[4];
+	__le32                  dword12;
+	__le32                  dword13;
+	__le32                  dword14;
+	__le32                  dword15;
+};
+
+struct nvme_host_mem_buf_desc {
+	__le64			addr;
+	__le32			size;
+	__u32			rsvd;
 };
 
 struct nvme_create_cq {
@@ -756,6 +814,24 @@ struct nvme_get_log_page_command {
 	__u32			rsvd14[2];
 };
 
+struct nvme_directive_cmd {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2[2];
+	union nvme_data_ptr	dptr;
+	__le32			numd;
+	__u8			doper;
+	__u8			dtype;
+	__le16			dspec;
+	__u8			endir;
+	__u8			tdtype;
+	__u16			rsvd15;
+
+	__u32			rsvd16[3];
+};
+
 /*
  * Fabrics subcommands.
  */
@@ -843,7 +919,7 @@ struct nvmf_connect_command {
 };
 
 struct nvmf_connect_data {
-	__u8		hostid[16];
+	uuid_t		hostid;
 	__le16		cntlid;
 	char		resv4[238];
 	char		subsysnqn[NVMF_NQN_FIELD_LEN];
@@ -886,6 +962,18 @@ struct nvme_dbbuf {
 	__u32			rsvd12[6];
 };
 
+struct streams_directive_params {
+	__u16	msl;
+	__u16	nssa;
+	__u16	nsso;
+	__u8	rsvd[10];
+	__u32	sws;
+	__u16	sgs;
+	__u16	nsa;
+	__u16	nso;
+	__u8	rsvd2[6];
+};
+
 struct nvme_command {
 	union {
 		struct nvme_common_command common;
@@ -906,6 +994,7 @@ struct nvme_command {
 		struct nvmf_property_set_command prop_set;
 		struct nvmf_property_get_command prop_get;
 		struct nvme_dbbuf dbbuf;
+		struct nvme_directive_cmd directive;
 	};
 };
 
@@ -1050,4 +1139,8 @@ struct nvme_completion {
 #define NVME_VS(major, minor, tertiary) \
 	(((major) << 16) | ((minor) << 8) | (tertiary))
 
+#define NVME_MAJOR(ver)		((ver) >> 16)
+#define NVME_MINOR(ver)		(((ver) >> 8) & 0xff)
+#define NVME_TERTIARY(ver)	((ver) & 0xff)
+
 #endif /* _LINUX_NVME_H */
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 0f9e567..2f9c1f9 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -166,9 +166,6 @@ struct padata_instance {
 
 extern struct padata_instance *padata_alloc_possible(
 					struct workqueue_struct *wq);
-extern struct padata_instance *padata_alloc(struct workqueue_struct *wq,
-					    const struct cpumask *pcpumask,
-					    const struct cpumask *cbcpumask);
 extern void padata_free(struct padata_instance *pinst);
 extern int padata_do_parallel(struct padata_instance *pinst,
 			      struct padata_priv *padata, int cb_cpu);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 316a19f..e7bbd9d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -524,7 +524,7 @@ void page_endio(struct page *page, bool is_write, int err);
 /*
  * Add an arbitrary waiter to a page's wait queue
  */
-extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
+extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
 
 /*
  * Fault everything in given userspace address range in.
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 7a4e83a..dd86c97 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -105,7 +105,7 @@ static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
 static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
 #endif
 
-extern const u8 pci_acpi_dsm_uuid[];
+extern const guid_t pci_acpi_dsm_guid;
 #define DEVICE_LABEL_DSM	0x07
 #define RESET_DELAY_DSM		0x08
 #define FUNCTION_DELAY_DSM	0x09
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8039f9f..1ef0938 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -307,7 +307,6 @@ struct pci_dev {
 	u8		pm_cap;		/* PM capability offset */
 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
 					   can be generated */
-	unsigned int	pme_interrupt:1;
 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
@@ -376,6 +375,7 @@ struct pci_dev {
 	unsigned int	irq_managed:1;
 	unsigned int	has_secondary_link:1;
 	unsigned int	non_compliant_bars:1;	/* broken BARs; ignore them */
+	unsigned int	is_probed:1;		/* device probing in progress */
 	pci_dev_flags_t dev_flags;
 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
 
@@ -1098,8 +1098,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
 void pci_pme_active(struct pci_dev *dev, bool enable);
-int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
-		      bool runtime, bool enable);
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
 int pci_prepare_to_sleep(struct pci_dev *dev);
 int pci_back_from_sleep(struct pci_dev *dev);
@@ -1109,12 +1108,6 @@ void pci_pme_wakeup_bus(struct pci_bus *bus);
 void pci_d3cold_enable(struct pci_dev *dev);
 void pci_d3cold_disable(struct pci_dev *dev);
 
-static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
-				  bool enable)
-{
-	return __pci_enable_wake(dev, state, false, enable);
-}
-
 /* PCI Virtual Channel */
 int pci_save_vc_state(struct pci_dev *dev);
 void pci_restore_vc_state(struct pci_dev *dev);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 24a6358..7d6aa29 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -801,6 +801,8 @@ struct perf_cpu_context {
 
 	struct list_head		sched_cb_entry;
 	int				sched_cb_usage;
+
+	int				online;
 };
 
 struct perf_output_handle {
diff --git a/drivers/phy/ulpi_phy.h b/include/linux/phy/ulpi_phy.h
similarity index 100%
rename from drivers/phy/ulpi_phy.h
rename to include/linux/phy/ulpi_phy.h
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 279e3c5..7620eb1 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -42,8 +42,6 @@
  * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
  *	impedance to VDD). If the argument is != 0 pull-up is enabled,
  *	if it is 0, pull-up is total, i.e. the pin is connected to VDD.
- * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous
- *	input and output operations.
  * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
  *	collector) which means it is usually wired with other output ports
  *	which are then pulled up with an external resistor. Setting this
@@ -98,7 +96,6 @@ enum pin_config_param {
 	PIN_CONFIG_BIAS_PULL_DOWN,
 	PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
 	PIN_CONFIG_BIAS_PULL_UP,
-	PIN_CONFIG_BIDIRECTIONAL,
 	PIN_CONFIG_DRIVE_OPEN_DRAIN,
 	PIN_CONFIG_DRIVE_OPEN_SOURCE,
 	PIN_CONFIG_DRIVE_PUSH_PULL,
diff --git a/include/linux/i2c/ads1015.h b/include/linux/platform_data/ads1015.h
similarity index 100%
rename from include/linux/i2c/ads1015.h
rename to include/linux/platform_data/ads1015.h
diff --git a/include/linux/i2c/apds990x.h b/include/linux/platform_data/apds990x.h
similarity index 100%
rename from include/linux/i2c/apds990x.h
rename to include/linux/platform_data/apds990x.h
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 3c8825b..cdceb4d 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -7,10 +7,6 @@
 #ifndef __ATMEL_H__
 #define __ATMEL_H__
 
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/serial.h>
-
  /* Compact Flash */
 struct at91_cf_data {
 	int	irq_pin;		/* I/O IRQ */
@@ -23,35 +19,14 @@ struct at91_cf_data {
 #define AT91_IDE_SWAP_A0_A2	0x02
 };
 
- /* NAND / SmartMedia */
-struct atmel_nand_data {
-	int		enable_pin;		/* chip enable */
-	int		det_pin;		/* card detect */
-	int		rdy_pin;		/* ready/busy */
-	u8		rdy_pin_active_low;	/* rdy_pin value is inverted */
-	u8		ale;			/* address line number connected to ALE */
-	u8		cle;			/* address line number connected to CLE */
-	u8		bus_width_16;		/* buswidth is 16 bit */
-	u8		ecc_mode;		/* ecc mode */
-	u8		on_flash_bbt;		/* bbt on flash */
-	struct mtd_partition *parts;
-	unsigned int	num_parts;
-	bool		has_dma;		/* support dma transfer */
-
-	/* default is false, only for at32ap7000 chip is true */
-	bool		need_reset_workaround;
-};
-
- /* Serial */
-struct atmel_uart_data {
-	int			num;		/* port num */
-	short			use_dma_tx;	/* use transmit DMA? */
-	short			use_dma_rx;	/* use receive DMA? */
-	void __iomem		*regs;		/* virt. base address, if any */
-	struct serial_rs485	rs485;		/* rs485 settings */
-};
-
 /* FIXME: this needs a better location, but gets stuff building again */
+#ifdef CONFIG_ATMEL_PM
 extern int at91_suspend_entering_slow_clock(void);
+#else
+static inline int at91_suspend_entering_slow_clock(void)
+{
+	return 0;
+}
+#endif
 
 #endif /* __ATMEL_H__ */
diff --git a/include/linux/i2c/bh1770glc.h b/include/linux/platform_data/bh1770glc.h
similarity index 100%
rename from include/linux/i2c/bh1770glc.h
rename to include/linux/platform_data/bh1770glc.h
diff --git a/include/linux/i2c/ds620.h b/include/linux/platform_data/ds620.h
similarity index 100%
rename from include/linux/i2c/ds620.h
rename to include/linux/platform_data/ds620.h
diff --git a/include/linux/i2c/ltc4245.h b/include/linux/platform_data/ltc4245.h
similarity index 100%
rename from include/linux/i2c/ltc4245.h
rename to include/linux/platform_data/ltc4245.h
diff --git a/include/linux/i2c/max6639.h b/include/linux/platform_data/max6639.h
similarity index 100%
rename from include/linux/i2c/max6639.h
rename to include/linux/platform_data/max6639.h
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
index 54b0448..ba4e4bb 100644
--- a/include/linux/platform_data/spi-mt65xx.h
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -16,5 +16,7 @@
 struct mtk_chip_config {
 	u32 tx_mlsb;
 	u32 rx_mlsb;
+	u32 cs_pol;
+	u32 sample_sel;
 };
 #endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index a0894bc..b8b4df0 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -584,7 +584,6 @@ struct dev_pm_info {
 	unsigned int		idle_notification:1;
 	unsigned int		request_pending:1;
 	unsigned int		deferred_resume:1;
-	unsigned int		run_wake:1;
 	unsigned int		runtime_auto:1;
 	bool			ignore_children:1;
 	unsigned int		no_callbacks:1;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index b7803a2..41004d9 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -206,9 +206,13 @@ static inline void pm_genpd_syscore_poweron(struct device *dev) {}
 /* OF PM domain providers */
 struct of_device_id;
 
+typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
+						   void *data);
+
 struct genpd_onecell_data {
 	struct generic_pm_domain **domains;
 	unsigned int num_domains;
+	genpd_xlate_t xlate;
 };
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index a6685b3..51ec727 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -121,6 +121,8 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
 void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
 struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
 void dev_pm_opp_put_regulators(struct opp_table *opp_table);
+struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
+void dev_pm_opp_put_clkname(struct opp_table *opp_table);
 struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
 void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table);
 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
@@ -257,6 +259,13 @@ static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, co
 
 static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {}
 
+static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name)
+{
+	return ERR_PTR(-ENOTSUPP);
+}
+
+static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
+
 static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 {
 	return -ENOTSUPP;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index ca4823e..2efb08a 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -76,16 +76,6 @@ static inline void pm_runtime_put_noidle(struct device *dev)
 	atomic_add_unless(&dev->power.usage_count, -1, 0);
 }
 
-static inline bool device_run_wake(struct device *dev)
-{
-	return dev->power.run_wake;
-}
-
-static inline void device_set_run_wake(struct device *dev, bool enable)
-{
-	dev->power.run_wake = enable;
-}
-
 static inline bool pm_runtime_suspended(struct device *dev)
 {
 	return dev->power.runtime_status == RPM_SUSPENDED
@@ -163,8 +153,6 @@ static inline void pm_runtime_forbid(struct device *dev) {}
 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
 static inline void pm_runtime_get_noresume(struct device *dev) {}
 static inline void pm_runtime_put_noidle(struct device *dev) {}
-static inline bool device_run_wake(struct device *dev) { return false; }
-static inline void device_set_run_wake(struct device *dev, bool enable) {}
 static inline bool pm_runtime_suspended(struct device *dev) { return false; }
 static inline bool pm_runtime_active(struct device *dev) { return true; }
 static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
diff --git a/include/linux/i2c/pmbus.h b/include/linux/pmbus.h
similarity index 100%
rename from include/linux/i2c/pmbus.h
rename to include/linux/pmbus.h
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 75ffc57..2889f09 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -75,7 +75,7 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
 struct poll_table_entry {
 	struct file *filp;
 	unsigned long key;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	wait_queue_head_t *wait_address;
 };
 
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 83b22ae..38d8225 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -42,12 +42,6 @@ struct posix_clock;
  * @clock_gettime:  Read the current time
  * @clock_getres:   Get the clock resolution
  * @clock_settime:  Set the current time value
- * @timer_create:   Create a new timer
- * @timer_delete:   Remove a previously created timer
- * @timer_gettime:  Get remaining time and interval of a timer
- * @timer_settime: Set a timer's initial expiration and interval
- * @fasync:         Optional character device fasync method
- * @mmap:           Optional character device mmap method
  * @open:           Optional character device open method
  * @release:        Optional character device release method
  * @ioctl:          Optional character device ioctl method
@@ -66,28 +60,12 @@ struct posix_clock_operations {
 	int  (*clock_settime)(struct posix_clock *pc,
 			      const struct timespec64 *ts);
 
-	int  (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
-
-	int  (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
-
-	void (*timer_gettime)(struct posix_clock *pc,
-			      struct k_itimer *kit, struct itimerspec64 *tsp);
-
-	int  (*timer_settime)(struct posix_clock *pc,
-			      struct k_itimer *kit, int flags,
-			      struct itimerspec64 *tsp, struct itimerspec64 *old);
 	/*
 	 * Optional character device methods:
 	 */
-	int     (*fasync)  (struct posix_clock *pc,
-			    int fd, struct file *file, int on);
-
 	long    (*ioctl)   (struct posix_clock *pc,
 			    unsigned int cmd, unsigned long arg);
 
-	int     (*mmap)    (struct posix_clock *pc,
-			    struct vm_area_struct *vma);
-
 	int     (*open)    (struct posix_clock *pc, fmode_t f_mode);
 
 	uint    (*poll)    (struct posix_clock *pc,
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 8c1e43a..29f1b7f 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -7,6 +7,7 @@
 #include <linux/timex.h>
 #include <linux/alarmtimer.h>
 
+struct siginfo;
 
 struct cpu_timer_list {
 	struct list_head entry;
@@ -48,81 +49,69 @@ struct cpu_timer_list {
 #define FD_TO_CLOCKID(fd)	((~(clockid_t) (fd) << 3) | CLOCKFD)
 #define CLOCKID_TO_FD(clk)	((unsigned int) ~((clk) >> 3))
 
-/* POSIX.1b interval timer structure. */
-struct k_itimer {
-	struct list_head list;		/* free/ allocate list */
-	struct hlist_node t_hash;
-	spinlock_t it_lock;
-	clockid_t it_clock;		/* which timer type */
-	timer_t it_id;			/* timer id */
-	int it_overrun;			/* overrun on pending signal  */
-	int it_overrun_last;		/* overrun on last delivered signal */
-	int it_requeue_pending;		/* waiting to requeue this timer */
 #define REQUEUE_PENDING 1
-	int it_sigev_notify;		/* notify word of sigevent struct */
-	struct signal_struct *it_signal;
+
+/**
+ * struct k_itimer - POSIX.1b interval timer structure.
+ * @list:		List head for binding the timer to signals->posix_timers
+ * @t_hash:		Entry in the posix timer hash table
+ * @it_lock:		Lock protecting the timer
+ * @kclock:		Pointer to the k_clock struct handling this timer
+ * @it_clock:		The posix timer clock id
+ * @it_id:		The posix timer id for identifying the timer
+ * @it_active:		Marker that timer is active
+ * @it_overrun:		The overrun counter for pending signals
+ * @it_overrun_last:	The overrun at the time of the last delivered signal
+ * @it_requeue_pending:	Indicator that timer waits for being requeued on
+ *			signal delivery
+ * @it_sigev_notify:	The notify word of sigevent struct for signal delivery
+ * @it_interval:	The interval for periodic timers
+ * @it_signal:		Pointer to the creators signal struct
+ * @it_pid:		The pid of the process/task targeted by the signal
+ * @it_process:		The task to wakeup on clock_nanosleep (CPU timers)
+ * @sigq:		Pointer to preallocated sigqueue
+ * @it:			Union representing the various posix timer type
+ *			internals. Also used for rcu freeing the timer.
+ */
+struct k_itimer {
+	struct list_head	list;
+	struct hlist_node	t_hash;
+	spinlock_t		it_lock;
+	const struct k_clock	*kclock;
+	clockid_t		it_clock;
+	timer_t			it_id;
+	int			it_active;
+	int			it_overrun;
+	int			it_overrun_last;
+	int			it_requeue_pending;
+	int			it_sigev_notify;
+	ktime_t			it_interval;
+	struct signal_struct	*it_signal;
 	union {
-		struct pid *it_pid;	/* pid of process to send signal to */
-		struct task_struct *it_process;	/* for clock_nanosleep */
+		struct pid		*it_pid;
+		struct task_struct	*it_process;
 	};
-	struct sigqueue *sigq;		/* signal queue entry. */
+	struct sigqueue		*sigq;
 	union {
 		struct {
-			struct hrtimer timer;
-			ktime_t interval;
+			struct hrtimer	timer;
 		} real;
-		struct cpu_timer_list cpu;
+		struct cpu_timer_list	cpu;
 		struct {
-			unsigned int clock;
-			unsigned int node;
-			unsigned long incr;
-			unsigned long expires;
-		} mmtimer;
-		struct {
-			struct alarm alarmtimer;
-			ktime_t interval;
+			struct alarm	alarmtimer;
 		} alarm;
-		struct rcu_head rcu;
+		struct rcu_head		rcu;
 	} it;
 };
 
-struct k_clock {
-	int (*clock_getres) (const clockid_t which_clock, struct timespec64 *tp);
-	int (*clock_set) (const clockid_t which_clock,
-			  const struct timespec64 *tp);
-	int (*clock_get) (const clockid_t which_clock, struct timespec64 *tp);
-	int (*clock_adj) (const clockid_t which_clock, struct timex *tx);
-	int (*timer_create) (struct k_itimer *timer);
-	int (*nsleep) (const clockid_t which_clock, int flags,
-		       struct timespec64 *, struct timespec __user *);
-	long (*nsleep_restart) (struct restart_block *restart_block);
-	int (*timer_set) (struct k_itimer *timr, int flags,
-			  struct itimerspec64 *new_setting,
-			  struct itimerspec64 *old_setting);
-	int (*timer_del) (struct k_itimer *timr);
-#define TIMER_RETRY 1
-	void (*timer_get) (struct k_itimer *timr,
-			   struct itimerspec64 *cur_setting);
-};
-
-extern struct k_clock clock_posix_cpu;
-extern struct k_clock clock_posix_dynamic;
-
-void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock);
-
-/* function to call to trigger timer event */
-int posix_timer_event(struct k_itimer *timr, int si_private);
-
-void posix_cpu_timer_schedule(struct k_itimer *timer);
-
 void run_posix_cpu_timers(struct task_struct *task);
 void posix_cpu_timers_exit(struct task_struct *task);
 void posix_cpu_timers_exit_group(struct task_struct *task);
 void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
 			   u64 *newval, u64 *oldval);
 
-long clock_nanosleep_restart(struct restart_block *restart_block);
-
 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
 
+void posixtimer_rearm(struct siginfo *info);
+
 #endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index b312bce..11e1168 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -40,6 +40,9 @@ struct bq27xxx_platform_data {
 struct bq27xxx_device_info;
 struct bq27xxx_access_methods {
 	int (*read)(struct bq27xxx_device_info *di, u8 reg, bool single);
+	int (*write)(struct bq27xxx_device_info *di, u8 reg, int value, bool single);
+	int (*read_bulk)(struct bq27xxx_device_info *di, u8 reg, u8 *data, int len);
+	int (*write_bulk)(struct bq27xxx_device_info *di, u8 reg, u8 *data, int len);
 };
 
 struct bq27xxx_reg_cache {
@@ -60,7 +63,10 @@ struct bq27xxx_device_info {
 	struct device *dev;
 	int id;
 	enum bq27xxx_chip chip;
+	bool ram_chip;
 	const char *name;
+	struct bq27xxx_dm_reg *dm_regs;
+	u32 unseal_key;
 	struct bq27xxx_access_methods bus;
 	struct bq27xxx_reg_cache cache;
 	int charge_design_full;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 3965503..de89066 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -146,6 +146,7 @@ enum power_supply_property {
 	POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
 	POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
 	POWER_SUPPLY_PROP_SCOPE,
+	POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
 	POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
 	POWER_SUPPLY_PROP_CALIBRATE,
 	/* Properties of type `const char *' */
@@ -159,13 +160,14 @@ enum power_supply_type {
 	POWER_SUPPLY_TYPE_BATTERY,
 	POWER_SUPPLY_TYPE_UPS,
 	POWER_SUPPLY_TYPE_MAINS,
-	POWER_SUPPLY_TYPE_USB,		/* Standard Downstream Port */
-	POWER_SUPPLY_TYPE_USB_DCP,	/* Dedicated Charging Port */
-	POWER_SUPPLY_TYPE_USB_CDP,	/* Charging Downstream Port */
-	POWER_SUPPLY_TYPE_USB_ACA,	/* Accessory Charger Adapters */
-	POWER_SUPPLY_TYPE_USB_TYPE_C,	/* Type C Port */
-	POWER_SUPPLY_TYPE_USB_PD,	/* Power Delivery Port */
-	POWER_SUPPLY_TYPE_USB_PD_DRP,	/* PD Dual Role Port */
+	POWER_SUPPLY_TYPE_USB,			/* Standard Downstream Port */
+	POWER_SUPPLY_TYPE_USB_DCP,		/* Dedicated Charging Port */
+	POWER_SUPPLY_TYPE_USB_CDP,		/* Charging Downstream Port */
+	POWER_SUPPLY_TYPE_USB_ACA,		/* Accessory Charger Adapters */
+	POWER_SUPPLY_TYPE_USB_TYPE_C,		/* Type C Port */
+	POWER_SUPPLY_TYPE_USB_PD,		/* Power Delivery Port */
+	POWER_SUPPLY_TYPE_USB_PD_DRP,		/* PD Dual Role Port */
+	POWER_SUPPLY_TYPE_APPLE_BRICK_ID,	/* Apple Charging Method */
 };
 
 enum power_supply_notifier_events {
@@ -288,6 +290,25 @@ struct power_supply_info {
 	int use_for_apm;
 };
 
+/*
+ * This is the recommended struct to manage static battery parameters,
+ * populated by power_supply_get_battery_info(). Most platform drivers should
+ * use these for consistency.
+ * Its field names must correspond to elements in enum power_supply_property.
+ * The default field value is -EINVAL.
+ * Power supply class itself doesn't use this.
+ */
+
+struct power_supply_battery_info {
+	int energy_full_design_uwh;	    /* microWatt-hours */
+	int charge_full_design_uah;	    /* microAmp-hours */
+	int voltage_min_design_uv;	    /* microVolts */
+	int precharge_current_ua;	    /* microAmps */
+	int charge_term_current_ua;	    /* microAmps */
+	int constant_charge_current_max_ua; /* microAmps */
+	int constant_charge_voltage_max_uv; /* microVolts */
+};
+
 extern struct atomic_notifier_head power_supply_notifier;
 extern int power_supply_reg_notifier(struct notifier_block *nb);
 extern void power_supply_unreg_notifier(struct notifier_block *nb);
@@ -306,6 +327,9 @@ static inline struct power_supply *
 devm_power_supply_get_by_phandle(struct device *dev, const char *property)
 { return NULL; }
 #endif /* CONFIG_OF */
+
+extern int power_supply_get_battery_info(struct power_supply *psy,
+					 struct power_supply_battery_info *info);
 extern void power_supply_changed(struct power_supply *psy);
 extern int power_supply_am_i_supplied(struct power_supply *psy);
 extern int power_supply_set_battery_charged(struct power_supply *psy);
@@ -359,6 +383,8 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp)
 	case POWER_SUPPLY_PROP_CHARGE_NOW:
 	case POWER_SUPPLY_PROP_CHARGE_AVG:
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+	case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+	case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 	case POWER_SUPPLY_PROP_CURRENT_MAX:
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index ef3eb8b..0e5fcc1 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -391,10 +391,6 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
 #define current_pt_regs() task_pt_regs(current)
 #endif
 
-#ifndef ptrace_signal_deliver
-#define ptrace_signal_deliver() ((void)0)
-#endif
-
 /*
  * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
  * on *all* architectures; the only reason to have a per-arch definition
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index a052232..8461b18 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -196,6 +196,7 @@ enum pxa_ssp_type {
 	LPSS_BSW_SSP,
 	LPSS_SPT_SSP,
 	LPSS_BXT_SSP,
+	LPSS_CNL_SSP,
 };
 
 struct ssp_device {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 9c6f768..dda22f4 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -44,6 +44,7 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number);
 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
 
 int dquot_initialize(struct inode *inode);
+bool dquot_initialize_needed(struct inode *inode);
 void dquot_drop(struct inode *inode);
 struct dquot *dqget(struct super_block *sb, struct kqid qid);
 static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -207,6 +208,11 @@ static inline int dquot_initialize(struct inode *inode)
 	return 0;
 }
 
+static inline bool dquot_initialize_needed(struct inode *inode)
+{
+	return false;
+}
+
 static inline void dquot_drop(struct inode *inode)
 {
 }
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h
index 4b766b6..426cee6 100644
--- a/include/linux/rcu_node_tree.h
+++ b/include/linux/rcu_node_tree.h
@@ -7,6 +7,10 @@
  * unlimited scalability while maintaining a constant level of contention
  * on the root node.
  *
+ * This seemingly RCU-private file must be available to SRCU users
+ * because the size of the TREE SRCU srcu_struct structure depends
+ * on these definitions.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index ba4d262..c3ad00e 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -1,6 +1,10 @@
 /*
  * RCU segmented callback lists
  *
+ * This seemingly RCU-private file must be available to SRCU users
+ * because the size of the TREE SRCU srcu_struct structure depends
+ * on these definitions.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index e1e5d00..f816fc7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -34,104 +34,15 @@
 #define __LINUX_RCUPDATE_H
 
 #include <linux/types.h>
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/seqlock.h>
-#include <linux/lockdep.h>
-#include <linux/debugobjects.h>
-#include <linux/bug.h>
 #include <linux/compiler.h>
-#include <linux/ktime.h>
+#include <linux/atomic.h>
 #include <linux/irqflags.h>
+#include <linux/preempt.h>
+#include <linux/bottom_half.h>
+#include <linux/lockdep.h>
+#include <asm/processor.h>
+#include <linux/cpumask.h>
 
-#include <asm/barrier.h>
-
-#ifndef CONFIG_TINY_RCU
-extern int rcu_expedited; /* for sysctl */
-extern int rcu_normal;    /* also for sysctl */
-#endif /* #ifndef CONFIG_TINY_RCU */
-
-#ifdef CONFIG_TINY_RCU
-/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
-static inline bool rcu_gp_is_normal(void)  /* Internal RCU use. */
-{
-	return true;
-}
-static inline bool rcu_gp_is_expedited(void)  /* Internal RCU use. */
-{
-	return false;
-}
-
-static inline void rcu_expedite_gp(void)
-{
-}
-
-static inline void rcu_unexpedite_gp(void)
-{
-}
-#else /* #ifdef CONFIG_TINY_RCU */
-bool rcu_gp_is_normal(void);     /* Internal RCU use. */
-bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
-void rcu_expedite_gp(void);
-void rcu_unexpedite_gp(void);
-#endif /* #else #ifdef CONFIG_TINY_RCU */
-
-enum rcutorture_type {
-	RCU_FLAVOR,
-	RCU_BH_FLAVOR,
-	RCU_SCHED_FLAVOR,
-	RCU_TASKS_FLAVOR,
-	SRCU_FLAVOR,
-	INVALID_RCU_FLAVOR
-};
-
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
-void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
-			    unsigned long *gpnum, unsigned long *completed);
-void rcutorture_record_test_transition(void);
-void rcutorture_record_progress(unsigned long vernum);
-void do_trace_rcu_torture_read(const char *rcutorturename,
-			       struct rcu_head *rhp,
-			       unsigned long secs,
-			       unsigned long c_old,
-			       unsigned long c);
-bool rcu_irq_enter_disabled(void);
-#else
-static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
-					  int *flags,
-					  unsigned long *gpnum,
-					  unsigned long *completed)
-{
-	*flags = 0;
-	*gpnum = 0;
-	*completed = 0;
-}
-static inline void rcutorture_record_test_transition(void)
-{
-}
-static inline void rcutorture_record_progress(unsigned long vernum)
-{
-}
-static inline bool rcu_irq_enter_disabled(void)
-{
-	return false;
-}
-#ifdef CONFIG_RCU_TRACE
-void do_trace_rcu_torture_read(const char *rcutorturename,
-			       struct rcu_head *rhp,
-			       unsigned long secs,
-			       unsigned long c_old,
-			       unsigned long c);
-#else
-#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
-	do { } while (0)
-#endif
-#endif
-
-#define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
-#define UINT_CMP_LT(a, b)	(UINT_MAX / 2 < (a) - (b))
 #define ULONG_CMP_GE(a, b)	(ULONG_MAX / 2 >= (a) - (b))
 #define ULONG_CMP_LT(a, b)	(ULONG_MAX / 2 < (a) - (b))
 #define ulong2long(a)		(*(long *)(&(a)))
@@ -139,115 +50,14 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
 /* Exported common interfaces */
 
 #ifdef CONFIG_PREEMPT_RCU
-
-/**
- * call_rcu() - Queue an RCU callback for invocation after a grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all pre-existing RCU read-side
- * critical sections have completed.  However, the callback function
- * might well execute concurrently with RCU read-side critical sections
- * that started after call_rcu() was invoked.  RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
- *
- * Note that all CPUs must agree that the grace period extended beyond
- * all pre-existing RCU read-side critical section.  On systems with more
- * than one CPU, this means that when "func()" is invoked, each CPU is
- * guaranteed to have executed a full memory barrier since the end of its
- * last RCU read-side critical section whose beginning preceded the call
- * to call_rcu().  It also means that each CPU executing an RCU read-side
- * critical section that continues beyond the start of "func()" must have
- * executed a memory barrier after the call_rcu() but before the beginning
- * of that RCU read-side critical section.  Note that these guarantees
- * include CPUs that are offline, idle, or executing in user mode, as
- * well as CPUs that are executing in the kernel.
- *
- * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
- * resulting RCU callback function "func()", then both CPU A and CPU B are
- * guaranteed to execute a full memory barrier during the time interval
- * between the call to call_rcu() and the invocation of "func()" -- even
- * if CPU A and CPU B are the same CPU (but again only if the system has
- * more than one CPU).
- */
-void call_rcu(struct rcu_head *head,
-	      rcu_callback_t func);
-
+void call_rcu(struct rcu_head *head, rcu_callback_t func);
 #else /* #ifdef CONFIG_PREEMPT_RCU */
-
-/* In classic RCU, call_rcu() is just call_rcu_sched(). */
 #define	call_rcu	call_rcu_sched
-
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 
-/**
- * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_bh() assumes
- * that the read-side critical sections end on completion of a softirq
- * handler. This means that read-side critical sections in process
- * context must not be interrupted by softirqs. This interface is to be
- * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by :
- *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
- *  OR
- *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
- *  These may be nested.
- *
- * See the description of call_rcu() for more detailed information on
- * memory ordering guarantees.
- */
-void call_rcu_bh(struct rcu_head *head,
-		 rcu_callback_t func);
-
-/**
- * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_sched() assumes
- * that the read-side critical sections end on enabling of preemption
- * or on voluntary preemption.
- * RCU read-side critical sections are delimited by :
- *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
- *  OR
- *  anything that disables preemption.
- *  These may be nested.
- *
- * See the description of call_rcu() for more detailed information on
- * memory ordering guarantees.
- */
-void call_rcu_sched(struct rcu_head *head,
-		    rcu_callback_t func);
-
+void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
+void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
 void synchronize_sched(void);
-
-/**
- * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_tasks() assumes
- * that the read-side critical sections end at a voluntary context
- * switch (not a preemption!), entry into idle, or transition to usermode
- * execution.  As such, there are no read-side primitives analogous to
- * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
- * to determine that all tasks have passed through a safe state, not so
- * much for data-strcuture synchronization.
- *
- * See the description of call_rcu() for more detailed information on
- * memory ordering guarantees.
- */
 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 void synchronize_rcu_tasks(void);
 void rcu_barrier_tasks(void);
@@ -301,22 +111,12 @@ void rcu_check_callbacks(int user);
 void rcu_report_dead(unsigned int cpu);
 void rcu_cpu_starting(unsigned int cpu);
 
-#ifndef CONFIG_TINY_RCU
-void rcu_end_inkernel_boot(void);
-#else /* #ifndef CONFIG_TINY_RCU */
-static inline void rcu_end_inkernel_boot(void) { }
-#endif /* #ifndef CONFIG_TINY_RCU */
-
 #ifdef CONFIG_RCU_STALL_COMMON
 void rcu_sysrq_start(void);
 void rcu_sysrq_end(void);
 #else /* #ifdef CONFIG_RCU_STALL_COMMON */
-static inline void rcu_sysrq_start(void)
-{
-}
-static inline void rcu_sysrq_end(void)
-{
-}
+static inline void rcu_sysrq_start(void) { }
+static inline void rcu_sysrq_end(void) { }
 #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
 
 #ifdef CONFIG_NO_HZ_FULL
@@ -330,9 +130,7 @@ static inline void rcu_user_exit(void) { }
 #ifdef CONFIG_RCU_NOCB_CPU
 void rcu_init_nohz(void);
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
-static inline void rcu_init_nohz(void)
-{
-}
+static inline void rcu_init_nohz(void) { }
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 
 /**
@@ -397,10 +195,6 @@ do { \
 		rcu_note_voluntary_context_switch(current); \
 } while (0)
 
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
-bool __rcu_is_watching(void);
-#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
-
 /*
  * Infrastructure to implement the synchronize_() primitives in
  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -414,10 +208,6 @@ bool __rcu_is_watching(void);
 #error "Unknown RCU implementation specified to kernel configuration"
 #endif
 
-#define RCU_SCHEDULER_INACTIVE	0
-#define RCU_SCHEDULER_INIT	1
-#define RCU_SCHEDULER_RUNNING	2
-
 /*
  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
  * initialization and destruction of rcu_head on the stack. rcu_head structures
@@ -430,30 +220,16 @@ void destroy_rcu_head(struct rcu_head *head);
 void init_rcu_head_on_stack(struct rcu_head *head);
 void destroy_rcu_head_on_stack(struct rcu_head *head);
 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-static inline void init_rcu_head(struct rcu_head *head)
-{
-}
-
-static inline void destroy_rcu_head(struct rcu_head *head)
-{
-}
-
-static inline void init_rcu_head_on_stack(struct rcu_head *head)
-{
-}
-
-static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
-{
-}
+static inline void init_rcu_head(struct rcu_head *head) { }
+static inline void destroy_rcu_head(struct rcu_head *head) { }
+static inline void init_rcu_head_on_stack(struct rcu_head *head) { }
+static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { }
 #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 
 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
 bool rcu_lockdep_current_cpu_online(void);
 #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
-static inline bool rcu_lockdep_current_cpu_online(void)
-{
-	return true;
-}
+static inline bool rcu_lockdep_current_cpu_online(void) { return true; }
 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -473,18 +249,8 @@ extern struct lockdep_map rcu_bh_lock_map;
 extern struct lockdep_map rcu_sched_lock_map;
 extern struct lockdep_map rcu_callback_map;
 int debug_lockdep_rcu_enabled(void);
-
 int rcu_read_lock_held(void);
 int rcu_read_lock_bh_held(void);
-
-/**
- * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
- *
- * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
- * RCU-sched read-side critical section.  In absence of
- * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
- * critical section unless it can prove otherwise.
- */
 int rcu_read_lock_sched_held(void);
 
 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -531,9 +297,7 @@ static inline void rcu_preempt_sleep_check(void)
 			 "Illegal context switch in RCU read-side critical section");
 }
 #else /* #ifdef CONFIG_PROVE_RCU */
-static inline void rcu_preempt_sleep_check(void)
-{
-}
+static inline void rcu_preempt_sleep_check(void) { }
 #endif /* #else #ifdef CONFIG_PROVE_RCU */
 
 #define rcu_sleep_check()						\
@@ -1084,52 +848,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 #define kfree_rcu(ptr, rcu_head)					\
 	__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
 
-#ifdef CONFIG_TINY_RCU
-static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
-{
-	*nextevt = KTIME_MAX;
-	return 0;
-}
-#endif /* #ifdef CONFIG_TINY_RCU */
-
-#if defined(CONFIG_RCU_NOCB_CPU_ALL)
-static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
-#elif defined(CONFIG_RCU_NOCB_CPU)
-bool rcu_is_nocb_cpu(int cpu);
-#else
-static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
-#endif
-
-
-/* Only for use by adaptive-ticks code. */
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-bool rcu_sys_is_idle(void);
-void rcu_sysidle_force_exit(void);
-#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-
-static inline bool rcu_sys_is_idle(void)
-{
-	return false;
-}
-
-static inline void rcu_sysidle_force_exit(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-
-
-/*
- * Dump the ftrace buffer, but only one time per callsite per boot.
- */
-#define rcu_ftrace_dump(oops_dump_mode) \
-do { \
-	static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
-	\
-	if (!atomic_read(&___rfd_beenhere) && \
-	    !atomic_xchg(&___rfd_beenhere, 1)) \
-		ftrace_dump(oops_dump_mode); \
-} while (0)
 
 /*
  * Place this after a lock-acquisition primitive to guarantee that
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 74d9c3a..5becbbc 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -25,7 +25,7 @@
 #ifndef __LINUX_TINY_H
 #define __LINUX_TINY_H
 
-#include <linux/cache.h>
+#include <linux/ktime.h>
 
 struct rcu_dynticks;
 static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
@@ -33,10 +33,8 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
 	return 0;
 }
 
-static inline bool rcu_eqs_special_set(int cpu)
-{
-	return false;  /* Never flag non-existent other CPUs! */
-}
+/* Never flag non-existent other CPUs! */
+static inline bool rcu_eqs_special_set(int cpu) { return false; }
 
 static inline unsigned long get_state_synchronize_rcu(void)
 {
@@ -98,159 +96,38 @@ static inline void kfree_call_rcu(struct rcu_head *head,
 		rcu_note_voluntary_context_switch_lite(current); \
 	} while (0)
 
+static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
+{
+	*nextevt = KTIME_MAX;
+	return 0;
+}
+
 /*
  * Take advantage of the fact that there is only one CPU, which
  * allows us to ignore virtualization-based context switches.
  */
-static inline void rcu_virt_note_context_switch(int cpu)
-{
-}
-
-/*
- * Return the number of grace periods started.
- */
-static inline unsigned long rcu_batches_started(void)
-{
-	return 0;
-}
-
-/*
- * Return the number of bottom-half grace periods started.
- */
-static inline unsigned long rcu_batches_started_bh(void)
-{
-	return 0;
-}
-
-/*
- * Return the number of sched grace periods started.
- */
-static inline unsigned long rcu_batches_started_sched(void)
-{
-	return 0;
-}
-
-/*
- * Return the number of grace periods completed.
- */
-static inline unsigned long rcu_batches_completed(void)
-{
-	return 0;
-}
-
-/*
- * Return the number of bottom-half grace periods completed.
- */
-static inline unsigned long rcu_batches_completed_bh(void)
-{
-	return 0;
-}
-
-/*
- * Return the number of sched grace periods completed.
- */
-static inline unsigned long rcu_batches_completed_sched(void)
-{
-	return 0;
-}
-
-/*
- * Return the number of expedited grace periods completed.
- */
-static inline unsigned long rcu_exp_batches_completed(void)
-{
-	return 0;
-}
-
-/*
- * Return the number of expedited sched grace periods completed.
- */
-static inline unsigned long rcu_exp_batches_completed_sched(void)
-{
-	return 0;
-}
-
-static inline void rcu_force_quiescent_state(void)
-{
-}
-
-static inline void rcu_bh_force_quiescent_state(void)
-{
-}
-
-static inline void rcu_sched_force_quiescent_state(void)
-{
-}
-
-static inline void show_rcu_gp_kthreads(void)
-{
-}
-
-static inline void rcu_cpu_stall_reset(void)
-{
-}
-
-static inline void rcu_idle_enter(void)
-{
-}
-
-static inline void rcu_idle_exit(void)
-{
-}
-
-static inline void rcu_irq_enter(void)
-{
-}
-
-static inline void rcu_irq_exit_irqson(void)
-{
-}
-
-static inline void rcu_irq_enter_irqson(void)
-{
-}
-
-static inline void rcu_irq_exit(void)
-{
-}
-
-static inline void exit_rcu(void)
-{
-}
+static inline void rcu_virt_note_context_switch(int cpu) { }
+static inline void rcu_cpu_stall_reset(void) { }
+static inline void rcu_idle_enter(void) { }
+static inline void rcu_idle_exit(void) { }
+static inline void rcu_irq_enter(void) { }
+static inline bool rcu_irq_enter_disabled(void) { return false; }
+static inline void rcu_irq_exit_irqson(void) { }
+static inline void rcu_irq_enter_irqson(void) { }
+static inline void rcu_irq_exit(void) { }
+static inline void exit_rcu(void) { }
 
 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
 extern int rcu_scheduler_active __read_mostly;
 void rcu_scheduler_starting(void);
 #else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
-static inline void rcu_scheduler_starting(void)
-{
-}
+static inline void rcu_scheduler_starting(void) { }
 #endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
+static inline void rcu_end_inkernel_boot(void) { }
+static inline bool rcu_is_watching(void) { return true; }
 
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
-
-static inline bool rcu_is_watching(void)
-{
-	return __rcu_is_watching();
-}
-
-#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
-
-static inline bool rcu_is_watching(void)
-{
-	return true;
-}
-
-#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
-
-static inline void rcu_request_urgent_qs_task(struct task_struct *t)
-{
-}
-
-static inline void rcu_all_qs(void)
-{
-	barrier(); /* Avoid RCU read-side critical sections leaking across. */
-}
+/* Avoid RCU read-side critical sections leaking across. */
+static inline void rcu_all_qs(void) { barrier(); }
 
 /* RCUtree hotplug events */
 #define rcutree_prepare_cpu      NULL
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 0bacb6b..37d6fd3 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -79,37 +79,20 @@ void cond_synchronize_rcu(unsigned long oldstate);
 unsigned long get_state_synchronize_sched(void);
 void cond_synchronize_sched(unsigned long oldstate);
 
-extern unsigned long rcutorture_testseq;
-extern unsigned long rcutorture_vernum;
-unsigned long rcu_batches_started(void);
-unsigned long rcu_batches_started_bh(void);
-unsigned long rcu_batches_started_sched(void);
-unsigned long rcu_batches_completed(void);
-unsigned long rcu_batches_completed_bh(void);
-unsigned long rcu_batches_completed_sched(void);
-unsigned long rcu_exp_batches_completed(void);
-unsigned long rcu_exp_batches_completed_sched(void);
-void show_rcu_gp_kthreads(void);
-
-void rcu_force_quiescent_state(void);
-void rcu_bh_force_quiescent_state(void);
-void rcu_sched_force_quiescent_state(void);
-
 void rcu_idle_enter(void);
 void rcu_idle_exit(void);
 void rcu_irq_enter(void);
 void rcu_irq_exit(void);
 void rcu_irq_enter_irqson(void);
 void rcu_irq_exit_irqson(void);
+bool rcu_irq_enter_disabled(void);
 
 void exit_rcu(void);
 
 void rcu_scheduler_starting(void);
 extern int rcu_scheduler_active __read_mostly;
-
+void rcu_end_inkernel_boot(void);
 bool rcu_is_watching(void);
-void rcu_request_urgent_qs_task(struct task_struct *t);
-
 void rcu_all_qs(void);
 
 /* RCUtree hotplug events */
diff --git a/drivers/power/reset/reboot-mode.h b/include/linux/reboot-mode.h
similarity index 100%
rename from drivers/power/reset/reboot-mode.h
rename to include/linux/reboot-mode.h
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index b34aa64..591792c 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -41,6 +41,7 @@ static inline unsigned int refcount_read(const refcount_t *r)
 	return atomic_read(&r->refs);
 }
 
+#ifdef CONFIG_REFCOUNT_FULL
 extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
 extern void refcount_add(unsigned int i, refcount_t *r);
 
@@ -48,10 +49,45 @@ extern __must_check bool refcount_inc_not_zero(refcount_t *r);
 extern void refcount_inc(refcount_t *r);
 
 extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
-extern void refcount_sub(unsigned int i, refcount_t *r);
 
 extern __must_check bool refcount_dec_and_test(refcount_t *r);
 extern void refcount_dec(refcount_t *r);
+#else
+static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+{
+	return atomic_add_unless(&r->refs, i, 0);
+}
+
+static inline void refcount_add(unsigned int i, refcount_t *r)
+{
+	atomic_add(i, &r->refs);
+}
+
+static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
+{
+	return atomic_add_unless(&r->refs, 1, 0);
+}
+
+static inline void refcount_inc(refcount_t *r)
+{
+	atomic_inc(&r->refs);
+}
+
+static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+	return atomic_sub_and_test(i, &r->refs);
+}
+
+static inline __must_check bool refcount_dec_and_test(refcount_t *r)
+{
+	return atomic_dec_and_test(&r->refs);
+}
+
+static inline void refcount_dec(refcount_t *r)
+{
+	atomic_dec(&r->refs);
+}
+#endif /* CONFIG_REFCOUNT_FULL */
 
 extern __must_check bool refcount_dec_if_one(refcount_t *r);
 extern __must_check bool refcount_dec_not_one(refcount_t *r);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index e886492..978abfb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -461,6 +461,10 @@ struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev,
 				      const struct regmap_config *config,
 				      struct lock_class_key *lock_key,
 				      const char *lock_name);
+struct regmap *__regmap_init_w1(struct device *w1_dev,
+				 const struct regmap_config *config,
+				 struct lock_class_key *lock_key,
+				 const char *lock_name);
 struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
 				      void __iomem *regs,
 				      const struct regmap_config *config,
@@ -493,6 +497,10 @@ struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev,
 					   const struct regmap_config *config,
 					   struct lock_class_key *lock_key,
 					   const char *lock_name);
+struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
+				      const struct regmap_config *config,
+				      struct lock_class_key *lock_key,
+				      const char *lock_name);
 struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
 					   const char *clk_id,
 					   void __iomem *regs,
@@ -597,6 +605,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
 				dev, config)
 
 /**
+ * regmap_init_w1() - Initialise register map
+ *
+ * @w1_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_w1(w1_dev, config)					\
+	__regmap_lockdep_wrapper(__regmap_init_w1, #config,		\
+				w1_dev, config)
+
+/**
  * regmap_init_mmio_clk() - Initialise register map with register clock
  *
  * @dev: Device that will be interacted with
@@ -712,6 +733,19 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
 				dev, config)
 
 /**
+ * devm_regmap_init_w1() - Initialise managed register map
+ *
+ * @w1_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_w1(w1_dev, config)				\
+	__regmap_lockdep_wrapper(__devm_regmap_init_w1, #config,	\
+				w1_dev, config)
+/**
  * devm_regmap_init_mmio_clk() - Initialise managed register map with clock
  *
  * @dev: Device that will be interacted with
@@ -884,6 +918,7 @@ struct regmap_irq {
  *
  * @status_base: Base status register address.
  * @mask_base:   Base mask register address.
+ * @mask_writeonly: Base mask register is write only.
  * @unmask_base:  Base unmask register address. for chips who have
  *                separate mask and unmask registers
  * @ack_base:    Base ack address. If zero then the chip is clear on read.
@@ -927,6 +962,7 @@ struct regmap_irq_chip {
 	unsigned int wake_base;
 	unsigned int type_base;
 	unsigned int irq_reg_stride;
+	bool mask_writeonly:1;
 	bool init_ack_masked:1;
 	bool mask_invert:1;
 	bool use_ack:1;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 117699d..9cd4fef 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -110,6 +110,10 @@ struct regulator_state {
  * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
  * @settling_time: Time to settle down after voltage change when voltage
  *		   change is non-linear (unit: microseconds).
+ * @settling_time_up: Time to settle down after voltage increase when voltage
+ *		      change is non-linear (unit: microseconds).
+ * @settling_time_down : Time to settle down after voltage decrease when
+ *			 voltage change is non-linear (unit: microseconds).
  * @active_discharge: Enable/disable active discharge. The enum
  *		      regulator_active_discharge values are used for
  *		      initialisation.
@@ -152,6 +156,8 @@ struct regulation_constraints {
 
 	unsigned int ramp_delay;
 	unsigned int settling_time;
+	unsigned int settling_time_up;
+	unsigned int settling_time_down;
 	unsigned int enable_time;
 
 	unsigned int active_discharge;
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 0d905d8..19df8422 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -11,6 +11,14 @@ struct timespec;
 struct compat_timespec;
 struct pollfd;
 
+enum timespec_type {
+	TT_NONE		= 0,
+	TT_NATIVE	= 1,
+#ifdef CONFIG_COMPAT
+	TT_COMPAT	= 2,
+#endif
+};
+
 /*
  * System call restart block.
  */
@@ -29,10 +37,13 @@ struct restart_block {
 		/* For nanosleep */
 		struct {
 			clockid_t clockid;
-			struct timespec __user *rmtp;
+			enum timespec_type type;
+			union {
+				struct timespec __user *rmtp;
 #ifdef CONFIG_COMPAT
-			struct compat_timespec __user *compat_rmtp;
+				struct compat_timespec __user *compat_rmtp;
 #endif
+			};
 			u64 expires;
 		} nanosleep;
 		/* For poll */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 1abba5c..44fd002 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -37,6 +37,9 @@ struct rt_mutex {
 	int			line;
 	void			*magic;
 #endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map	dep_map;
+#endif
 };
 
 struct rt_mutex_waiter;
@@ -58,19 +61,33 @@ struct hrtimer_sleeper;
 #ifdef CONFIG_DEBUG_RT_MUTEXES
 # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
 	, .name = #mutexname, .file = __FILE__, .line = __LINE__
-# define rt_mutex_init(mutex)			__rt_mutex_init(mutex, __func__)
+
+# define rt_mutex_init(mutex) \
+do { \
+	static struct lock_class_key __key; \
+	__rt_mutex_init(mutex, __func__, &__key); \
+} while (0)
+
  extern void rt_mutex_debug_task_free(struct task_struct *tsk);
 #else
 # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-# define rt_mutex_init(mutex)			__rt_mutex_init(mutex, NULL)
+# define rt_mutex_init(mutex)			__rt_mutex_init(mutex, NULL, NULL)
 # define rt_mutex_debug_task_free(t)			do { } while (0)
 #endif
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+	, .dep_map = { .name = #mutexname }
+#else
+#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
+#endif
+
 #define __RT_MUTEX_INITIALIZER(mutexname) \
 	{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
 	, .waiters = RB_ROOT \
 	, .owner = NULL \
-	__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
+	__DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+	__DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
 
 #define DEFINE_RT_MUTEX(mutexname) \
 	struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
@@ -86,7 +103,7 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
 	return lock->owner != NULL;
 }
 
-extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
 extern void rt_mutex_destroy(struct rt_mutex *lock);
 
 extern void rt_mutex_lock(struct rt_mutex *lock);
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index cb3c8fe..4b3286a 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -278,6 +278,8 @@ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
 			    const void *buf, size_t buflen, off_t skip);
 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
 			  void *buf, size_t buflen, off_t skip);
+size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
+		       size_t buflen, off_t skip);
 
 /*
  * Maximum number of entries that will be allocated in one piece, if
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2b69fc6..9c4ca74 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -421,7 +421,8 @@ struct sched_dl_entity {
 	u64				dl_runtime;	/* Maximum runtime for each instance	*/
 	u64				dl_deadline;	/* Relative deadline of each instance	*/
 	u64				dl_period;	/* Separation of two instances (period) */
-	u64				dl_bw;		/* dl_runtime / dl_deadline		*/
+	u64				dl_bw;		/* dl_runtime / dl_period		*/
+	u64				dl_density;	/* dl_runtime / dl_deadline		*/
 
 	/*
 	 * Actual scheduling parameters. Initialized with the values above,
@@ -445,16 +446,33 @@ struct sched_dl_entity {
 	 *
 	 * @dl_yielded tells if task gave up the CPU before consuming
 	 * all its available runtime during the last job.
+	 *
+	 * @dl_non_contending tells if the task is inactive while still
+	 * contributing to the active utilization. In other words, it
+	 * indicates if the inactive timer has been armed and its handler
+	 * has not been executed yet. This flag is useful to avoid race
+	 * conditions between the inactive timer handler and the wakeup
+	 * code.
 	 */
 	int				dl_throttled;
 	int				dl_boosted;
 	int				dl_yielded;
+	int				dl_non_contending;
 
 	/*
 	 * Bandwidth enforcement timer. Each -deadline task has its
 	 * own bandwidth to be enforced, thus we need one timer per task.
 	 */
 	struct hrtimer			dl_timer;
+
+	/*
+	 * Inactive timer, responsible for decreasing the active utilization
+	 * at the "0-lag time". When a -deadline task blocks, it contributes
+	 * to GRUB's active utilization until the "0-lag time", hence a
+	 * timer is needed to decrease the active utilization at the correct
+	 * time.
+	 */
+	struct hrtimer inactive_timer;
 };
 
 union rcu_special {
@@ -1096,8 +1114,6 @@ static inline struct pid *task_session(struct task_struct *task)
  *                     current.
  * task_xid_nr_ns()  : id seen from the ns specified;
  *
- * set_task_vxid()   : assigns a virtual id to a task;
- *
  * see also pid_nr() etc in include/linux/pid.h
  */
 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
@@ -1265,6 +1281,16 @@ extern struct pid *cad_pid;
 #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
 #define used_math()				tsk_used_math(current)
 
+static inline bool is_percpu_thread(void)
+{
+#ifdef CONFIG_SMP
+	return (current->flags & PF_NO_SETAFFINITY) &&
+		(current->nr_cpus_allowed  == 1);
+#else
+	return true;
+#endif
+}
+
 /* Per-process atomic flags. */
 #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
 #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 34fe92c..a55600f 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -23,10 +23,6 @@ extern u64 sched_clock_cpu(int cpu);
 extern void sched_clock_init(void);
 
 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-static inline void sched_clock_init_late(void)
-{
-}
-
 static inline void sched_clock_tick(void)
 {
 }
@@ -39,7 +35,7 @@ static inline void sched_clock_idle_sleep_event(void)
 {
 }
 
-static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
+static inline void sched_clock_idle_wakeup_event(void)
 {
 }
 
@@ -53,7 +49,6 @@ static inline u64 local_clock(void)
 	return sched_clock();
 }
 #else
-extern void sched_clock_init_late(void);
 extern int sched_clock_stable(void);
 extern void clear_sched_clock_stable(void);
 
@@ -63,10 +58,10 @@ extern void clear_sched_clock_stable(void);
  */
 extern u64 __sched_clock_offset;
 
-
 extern void sched_clock_tick(void);
+extern void sched_clock_tick_stable(void);
 extern void sched_clock_idle_sleep_event(void);
-extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+extern void sched_clock_idle_wakeup_event(void);
 
 /*
  * As outlined in clock.c, provides a fast, high resolution, nanosecond
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 4995b71..7d3f75d 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -23,11 +23,11 @@ static inline void set_cpu_sd_state_idle(void) { }
 #endif
 
 #ifdef CONFIG_NO_HZ_COMMON
-void calc_load_enter_idle(void);
-void calc_load_exit_idle(void);
+void calc_load_nohz_start(void);
+void calc_load_nohz_stop(void);
 #else
-static inline void calc_load_enter_idle(void) { }
-static inline void calc_load_exit_idle(void) { }
+static inline void calc_load_nohz_start(void) { }
+static inline void calc_load_nohz_stop(void) { }
 #endif /* CONFIG_NO_HZ_COMMON */
 
 #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index a978d71..f0f065c 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -95,8 +95,6 @@ static inline void put_task_struct(struct task_struct *t)
 }
 
 struct task_struct *task_rcu_dereference(struct task_struct **ptask);
-struct task_struct *try_get_task_struct(struct task_struct **ptask);
-
 
 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
 extern int arch_task_struct_size __read_mostly;
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index dc5f989..327d656 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -67,6 +67,9 @@ struct scpi_ops {
 	int (*dvfs_get_idx)(u8);
 	int (*dvfs_set_idx)(u8, u8);
 	struct scpi_dvfs_info *(*dvfs_get_info)(u8);
+	int (*device_domain_id)(struct device *);
+	int (*get_transition_latency)(struct device *);
+	int (*add_opps_to_device)(struct device *);
 	int (*sensor_get_capability)(u16 *sensors);
 	int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *);
 	int (*sensor_get_value)(u16, u64 *);
diff --git a/include/linux/security.h b/include/linux/security.h
index af675b5..b6ea1dc 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -6,6 +6,7 @@
  * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
  * Copyright (C) 2001 James Morris <jmorris@intercode.com.au>
  * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
+ * Copyright (C) 2016 Mellanox Techonologies
  *
  *	This program is free software; you can redistribute it and/or modify
  *	it under the terms of the GNU General Public License as published by
@@ -68,6 +69,10 @@ struct audit_krule;
 struct user_namespace;
 struct timezone;
 
+enum lsm_event {
+	LSM_POLICY_CHANGE,
+};
+
 /* These functions are in security/commoncap.c */
 extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
 		       int cap, int audit);
@@ -163,6 +168,10 @@ struct security_mnt_opts {
 	int num_mnt_opts;
 };
 
+int call_lsm_notifier(enum lsm_event event, void *data);
+int register_lsm_notifier(struct notifier_block *nb);
+int unregister_lsm_notifier(struct notifier_block *nb);
+
 static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
 {
 	opts->mnt_opts = NULL;
@@ -240,7 +249,9 @@ int security_sb_set_mnt_opts(struct super_block *sb,
 				unsigned long kern_flags,
 				unsigned long *set_kern_flags);
 int security_sb_clone_mnt_opts(const struct super_block *oldsb,
-				struct super_block *newsb);
+				struct super_block *newsb,
+				unsigned long kern_flags,
+				unsigned long *set_kern_flags);
 int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
 int security_dentry_init_security(struct dentry *dentry, int mode,
 					const struct qstr *name, void **ctx,
@@ -381,6 +392,21 @@ int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
 struct security_mnt_opts {
 };
 
+static inline int call_lsm_notifier(enum lsm_event event, void *data)
+{
+	return 0;
+}
+
+static inline int register_lsm_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline  int unregister_lsm_notifier(struct notifier_block *nb)
+{
+	return 0;
+}
+
 static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
 {
 }
@@ -581,7 +607,9 @@ static inline int security_sb_set_mnt_opts(struct super_block *sb,
 }
 
 static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb,
-					      struct super_block *newsb)
+					      struct super_block *newsb,
+					      unsigned long kern_flags,
+					      unsigned long *set_kern_flags)
 {
 	return 0;
 }
@@ -1406,6 +1434,32 @@ static inline int security_tun_dev_open(void *security)
 }
 #endif	/* CONFIG_SECURITY_NETWORK */
 
+#ifdef CONFIG_SECURITY_INFINIBAND
+int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey);
+int security_ib_endport_manage_subnet(void *sec, const char *name, u8 port_num);
+int security_ib_alloc_security(void **sec);
+void security_ib_free_security(void *sec);
+#else	/* CONFIG_SECURITY_INFINIBAND */
+static inline int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
+{
+	return 0;
+}
+
+static inline int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num)
+{
+	return 0;
+}
+
+static inline int security_ib_alloc_security(void **sec)
+{
+	return 0;
+}
+
+static inline void security_ib_free_security(void *sec)
+{
+}
+#endif	/* CONFIG_SECURITY_INFINIBAND */
+
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 
 int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
@@ -1651,6 +1705,10 @@ extern struct dentry *securityfs_create_file(const char *name, umode_t mode,
 					     struct dentry *parent, void *data,
 					     const struct file_operations *fops);
 extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
+struct dentry *securityfs_create_symlink(const char *name,
+					 struct dentry *parent,
+					 const char *target,
+					 const struct inode_operations *iops);
 extern void securityfs_remove(struct dentry *dentry);
 
 #else /* CONFIG_SECURITYFS */
@@ -1670,6 +1728,14 @@ static inline struct dentry *securityfs_create_file(const char *name,
 	return ERR_PTR(-ENODEV);
 }
 
+static inline struct dentry *securityfs_create_symlink(const char *name,
+					struct dentry *parent,
+					const char *target,
+					const struct inode_operations *iops)
+{
+	return ERR_PTR(-ENODEV);
+}
+
 static inline void securityfs_remove(struct dentry *dentry)
 {}
 
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 64d892f..1775500 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -195,6 +195,7 @@ struct uart_port {
 #define UPF_NO_TXEN_TEST	((__force upf_t) (1 << 15))
 #define UPF_MAGIC_MULTIPLIER	((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
 
+#define UPF_NO_THRE_TEST	((__force upf_t) (1 << 19))
 /* Port has hardware-assisted h/w flow control */
 #define UPF_AUTO_CTS		((__force upf_t) (1 << 20))
 #define UPF_AUTO_RTS		((__force upf_t) (1 << 21))
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 1f5a166..a39fedd 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -3,16 +3,13 @@
 
 #include <linux/bug.h>
 #include <linux/signal_types.h>
+#include <linux/string.h>
 
 struct task_struct;
 
 /* for sysctl */
 extern int print_fatal_signals;
 
-#ifndef HAVE_ARCH_COPY_SIGINFO
-
-#include <linux/string.h>
-
 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
 {
 	if (from->si_code < 0)
@@ -22,7 +19,7 @@ static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
 		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
 }
 
-#endif
+int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
 
 /*
  * Define some primitives to manipulate sigset_t.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a098d95..25b1659 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2691,7 +2691,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
  * @offset: the offset within the fragment (starting at the
  *          fragment's own offset)
  * @size: the number of bytes to map
- * @dir: the direction of the mapping (%PCI_DMA_*)
+ * @dir: the direction of the mapping (``PCI_DMA_*``)
  *
  * Maps the page associated with @frag to @device.
  */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 07ef550..93315d6 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -84,6 +84,7 @@ struct kmem_cache {
 	int red_left_pad;	/* Left redzone padding size */
 #ifdef CONFIG_SYSFS
 	struct kobject kobj;	/* For sysfs */
+	struct work_struct kobj_remove_work;
 #endif
 #ifdef CONFIG_MEMCG
 	struct memcg_cache_params memcg_params;
diff --git a/include/linux/soc/actions/owl-sps.h b/include/linux/soc/actions/owl-sps.h
new file mode 100644
index 0000000..33d0dbe
--- /dev/null
+++ b/include/linux/soc/actions/owl-sps.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+#ifndef SOC_ACTIONS_OWL_SPS_H
+#define SOC_ACTIONS_OWL_SPS_H
+
+int owl_sps_set_pg(void __iomem *base, u32 pwr_mask, u32 ack_mask, bool enable);
+
+#endif
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h
index 787e7ad..2c231f2 100644
--- a/include/linux/soc/renesas/rcar-rst.h
+++ b/include/linux/soc/renesas/rcar-rst.h
@@ -1,8 +1,7 @@
 #ifndef __LINUX_SOC_RENESAS_RCAR_RST_H__
 #define __LINUX_SOC_RENESAS_RCAR_RST_H__
 
-#if defined(CONFIG_ARCH_RCAR_GEN1) || defined(CONFIG_ARCH_RCAR_GEN2) || \
-    defined(CONFIG_ARCH_R8A7795) || defined(CONFIG_ARCH_R8A7796)
+#ifdef CONFIG_RST_RCAR
 int rcar_rst_read_mode_pins(u32 *mode);
 #else
 static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; }
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index b087a85..f74b581 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -1,10 +1,16 @@
 #ifndef __SPI_SH_MSIOF_H__
 #define __SPI_SH_MSIOF_H__
 
+enum {
+	MSIOF_SPI_MASTER,
+	MSIOF_SPI_SLAVE,
+};
+
 struct sh_msiof_spi_info {
 	int tx_fifo_override;
 	int rx_fifo_override;
 	u16 num_chipselect;
+	int mode;
 	unsigned int dma_tx_id;
 	unsigned int dma_rx_id;
 	u32 dtdl;
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 935bd28..7b2170b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -24,13 +24,13 @@
 
 struct dma_chan;
 struct property_entry;
-struct spi_master;
+struct spi_controller;
 struct spi_transfer;
 struct spi_flash_read_message;
 
 /*
- * INTERFACES between SPI master-side drivers and SPI infrastructure.
- * (There's no SPI slave support for Linux yet...)
+ * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
+ * and SPI infrastructure.
  */
 extern struct bus_type spi_bus_type;
 
@@ -84,7 +84,7 @@ struct spi_statistics {
 
 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 				       struct spi_transfer *xfer,
-				       struct spi_master *master);
+				       struct spi_controller *ctlr);
 
 #define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count)	\
 	do {							\
@@ -98,13 +98,14 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 	SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
 
 /**
- * struct spi_device - Master side proxy for an SPI slave device
+ * struct spi_device - Controller side proxy for an SPI slave device
  * @dev: Driver model representation of the device.
- * @master: SPI controller used with the device.
+ * @controller: SPI controller used with the device.
+ * @master: Copy of controller, for backwards compatibility.
  * @max_speed_hz: Maximum clock rate to be used with this chip
  *	(on this board); may be changed by the device's driver.
  *	The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Chipselect, distinguishing chips handled by @master.
+ * @chip_select: Chipselect, distinguishing chips handled by @controller.
  * @mode: The spi mode defines how data is clocked out and in.
  *	This may be changed by the device's driver.
  *	The "active low" default for chipselect mode can be overridden
@@ -140,7 +141,8 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
  */
 struct spi_device {
 	struct device		dev;
-	struct spi_master	*master;
+	struct spi_controller	*controller;
+	struct spi_controller	*master;	/* compatibility layer */
 	u32			max_speed_hz;
 	u8			chip_select;
 	u8			bits_per_word;
@@ -198,7 +200,7 @@ static inline void spi_dev_put(struct spi_device *spi)
 		put_device(&spi->dev);
 }
 
-/* ctldata is for the bus_master driver's runtime state */
+/* ctldata is for the bus_controller driver's runtime state */
 static inline void *spi_get_ctldata(struct spi_device *spi)
 {
 	return spi->controller_state;
@@ -292,9 +294,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
 			spi_unregister_driver)
 
 /**
- * struct spi_master - interface to SPI master controller
+ * struct spi_controller - interface to SPI master or slave controller
  * @dev: device interface to this driver
- * @list: link with the global spi_master list
+ * @list: link with the global spi_controller list
  * @bus_num: board-specific (and often SOC-specific) identifier for a
  *	given SPI controller.
  * @num_chipselect: chipselects are used to distinguish individual
@@ -311,6 +313,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @min_speed_hz: Lowest supported transfer speed
  * @max_speed_hz: Highest supported transfer speed
  * @flags: other constraints relevant to this driver
+ * @slave: indicates that this is an SPI slave controller
  * @max_transfer_size: function that returns the max transfer size for
  *	a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
  * @max_message_size: function that returns the max message size for
@@ -326,8 +329,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  *	the device whose settings are being modified.
  * @transfer: adds a message to the controller's transfer queue.
  * @cleanup: frees controller-specific state
- * @can_dma: determine whether this master supports DMA
- * @queued: whether this master is providing an internal message queue
+ * @can_dma: determine whether this controller supports DMA
+ * @queued: whether this controller is providing an internal message queue
  * @kworker: thread struct for message pump
  * @kworker_task: pointer to task for message pump kworker thread
  * @pump_messages: work struct for scheduling work to the message pump
@@ -374,6 +377,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @handle_err: the subsystem calls the driver to handle an error that occurs
  *		in the generic implementation of transfer_one_message().
  * @unprepare_message: undo any work done by prepare_message().
+ * @slave_abort: abort the ongoing transfer request on an SPI slave controller
  * @spi_flash_read: to support spi-controller hardwares that provide
  *                  accelerated interface to read from flash devices.
  * @spi_flash_can_dma: analogous to can_dma() interface, but for
@@ -382,7 +386,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
  *	number. Any individual value may be -ENOENT for CS lines that
  *	are not GPIOs (driven by the SPI controller itself).
- * @statistics: statistics for the spi_master
+ * @statistics: statistics for the spi_controller
  * @dma_tx: DMA transmit channel
  * @dma_rx: DMA receive channel
  * @dummy_rx: dummy receive buffer for full-duplex devices
@@ -391,7 +395,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  *	what Linux expects, this optional hook can be used to translate
  *	between the two.
  *
- * Each SPI master controller can communicate with one or more @spi_device
+ * Each SPI controller can communicate with one or more @spi_device
  * children.  These make a small bus, sharing MOSI, MISO and SCK signals
  * but not chip select signals.  Each device may be configured to use a
  * different clock rate, since those shared signals are ignored unless
@@ -402,7 +406,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * an SPI slave device.  For each such message it queues, it calls the
  * message's completion function when the transaction completes.
  */
-struct spi_master {
+struct spi_controller {
 	struct device	dev;
 
 	struct list_head list;
@@ -440,12 +444,16 @@ struct spi_master {
 
 	/* other constraints relevant to this driver */
 	u16			flags;
-#define SPI_MASTER_HALF_DUPLEX	BIT(0)		/* can't do full duplex */
-#define SPI_MASTER_NO_RX	BIT(1)		/* can't do buffer read */
-#define SPI_MASTER_NO_TX	BIT(2)		/* can't do buffer write */
-#define SPI_MASTER_MUST_RX      BIT(3)		/* requires rx */
-#define SPI_MASTER_MUST_TX      BIT(4)		/* requires tx */
-#define SPI_MASTER_GPIO_SS      BIT(5)		/* GPIO CS must select slave */
+#define SPI_CONTROLLER_HALF_DUPLEX	BIT(0)	/* can't do full duplex */
+#define SPI_CONTROLLER_NO_RX		BIT(1)	/* can't do buffer read */
+#define SPI_CONTROLLER_NO_TX		BIT(2)	/* can't do buffer write */
+#define SPI_CONTROLLER_MUST_RX		BIT(3)	/* requires rx */
+#define SPI_CONTROLLER_MUST_TX		BIT(4)	/* requires tx */
+
+#define SPI_MASTER_GPIO_SS		BIT(5)	/* GPIO CS must select slave */
+
+	/* flag indicating this is an SPI slave controller */
+	bool			slave;
 
 	/*
 	 * on some hardware transfer / message size may be constrained
@@ -480,8 +488,8 @@ struct spi_master {
 	 *   any other request management
 	 * + To a given spi_device, message queueing is pure fifo
 	 *
-	 * + The master's main job is to process its message queue,
-	 *   selecting a chip then transferring data
+	 * + The controller's main job is to process its message queue,
+	 *   selecting a chip (for masters), then transferring data
 	 * + If there are multiple spi_device children, the i/o queue
 	 *   arbitration algorithm is unspecified (round robin, fifo,
 	 *   priority, reservations, preemption, etc)
@@ -494,7 +502,7 @@ struct spi_master {
 	int			(*transfer)(struct spi_device *spi,
 						struct spi_message *mesg);
 
-	/* called on release() to free memory provided by spi_master */
+	/* called on release() to free memory provided by spi_controller */
 	void			(*cleanup)(struct spi_device *spi);
 
 	/*
@@ -504,13 +512,13 @@ struct spi_master {
 	 * not modify or store xfer and dma_tx and dma_rx must be set
 	 * while the device is prepared.
 	 */
-	bool			(*can_dma)(struct spi_master *master,
+	bool			(*can_dma)(struct spi_controller *ctlr,
 					   struct spi_device *spi,
 					   struct spi_transfer *xfer);
 
 	/*
 	 * These hooks are for drivers that want to use the generic
-	 * master transfer queueing mechanism. If these are used, the
+	 * controller transfer queueing mechanism. If these are used, the
 	 * transfer() function above must NOT be specified by the driver.
 	 * Over time we expect SPI drivers to be phased over to this API.
 	 */
@@ -531,14 +539,15 @@ struct spi_master {
 	struct completion               xfer_completion;
 	size_t				max_dma_len;
 
-	int (*prepare_transfer_hardware)(struct spi_master *master);
-	int (*transfer_one_message)(struct spi_master *master,
+	int (*prepare_transfer_hardware)(struct spi_controller *ctlr);
+	int (*transfer_one_message)(struct spi_controller *ctlr,
 				    struct spi_message *mesg);
-	int (*unprepare_transfer_hardware)(struct spi_master *master);
-	int (*prepare_message)(struct spi_master *master,
+	int (*unprepare_transfer_hardware)(struct spi_controller *ctlr);
+	int (*prepare_message)(struct spi_controller *ctlr,
 			       struct spi_message *message);
-	int (*unprepare_message)(struct spi_master *master,
+	int (*unprepare_message)(struct spi_controller *ctlr,
 				 struct spi_message *message);
+	int (*slave_abort)(struct spi_controller *ctlr);
 	int (*spi_flash_read)(struct  spi_device *spi,
 			      struct spi_flash_read_message *msg);
 	bool (*spi_flash_can_dma)(struct spi_device *spi,
@@ -550,9 +559,9 @@ struct spi_master {
 	 * of transfer_one_message() provied by the core.
 	 */
 	void (*set_cs)(struct spi_device *spi, bool enable);
-	int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
+	int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi,
 			    struct spi_transfer *transfer);
-	void (*handle_err)(struct spi_master *master,
+	void (*handle_err)(struct spi_controller *ctlr,
 			   struct spi_message *message);
 
 	/* gpio chip select */
@@ -569,57 +578,78 @@ struct spi_master {
 	void			*dummy_rx;
 	void			*dummy_tx;
 
-	int (*fw_translate_cs)(struct spi_master *master, unsigned cs);
+	int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs);
 };
 
-static inline void *spi_master_get_devdata(struct spi_master *master)
+static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
 {
-	return dev_get_drvdata(&master->dev);
+	return dev_get_drvdata(&ctlr->dev);
 }
 
-static inline void spi_master_set_devdata(struct spi_master *master, void *data)
+static inline void spi_controller_set_devdata(struct spi_controller *ctlr,
+					      void *data)
 {
-	dev_set_drvdata(&master->dev, data);
+	dev_set_drvdata(&ctlr->dev, data);
 }
 
-static inline struct spi_master *spi_master_get(struct spi_master *master)
+static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr)
 {
-	if (!master || !get_device(&master->dev))
+	if (!ctlr || !get_device(&ctlr->dev))
 		return NULL;
-	return master;
+	return ctlr;
 }
 
-static inline void spi_master_put(struct spi_master *master)
+static inline void spi_controller_put(struct spi_controller *ctlr)
 {
-	if (master)
-		put_device(&master->dev);
+	if (ctlr)
+		put_device(&ctlr->dev);
+}
+
+static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
+{
+	return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
 }
 
 /* PM calls that need to be issued by the driver */
-extern int spi_master_suspend(struct spi_master *master);
-extern int spi_master_resume(struct spi_master *master);
+extern int spi_controller_suspend(struct spi_controller *ctlr);
+extern int spi_controller_resume(struct spi_controller *ctlr);
 
 /* Calls the driver make to interact with the message queue */
-extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
-extern void spi_finalize_current_message(struct spi_master *master);
-extern void spi_finalize_current_transfer(struct spi_master *master);
+extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr);
+extern void spi_finalize_current_message(struct spi_controller *ctlr);
+extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
 
-/* the spi driver core manages memory for the spi_master classdev */
-extern struct spi_master *
-spi_alloc_master(struct device *host, unsigned size);
+/* the spi driver core manages memory for the spi_controller classdev */
+extern struct spi_controller *__spi_alloc_controller(struct device *host,
+						unsigned int size, bool slave);
 
-extern int spi_register_master(struct spi_master *master);
-extern int devm_spi_register_master(struct device *dev,
-				    struct spi_master *master);
-extern void spi_unregister_master(struct spi_master *master);
+static inline struct spi_controller *spi_alloc_master(struct device *host,
+						      unsigned int size)
+{
+	return __spi_alloc_controller(host, size, false);
+}
 
-extern struct spi_master *spi_busnum_to_master(u16 busnum);
+static inline struct spi_controller *spi_alloc_slave(struct device *host,
+						     unsigned int size)
+{
+	if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+		return NULL;
+
+	return __spi_alloc_controller(host, size, true);
+}
+
+extern int spi_register_controller(struct spi_controller *ctlr);
+extern int devm_spi_register_controller(struct device *dev,
+					struct spi_controller *ctlr);
+extern void spi_unregister_controller(struct spi_controller *ctlr);
+
+extern struct spi_controller *spi_busnum_to_master(u16 busnum);
 
 /*
  * SPI resource management while processing a SPI message
  */
 
-typedef void (*spi_res_release_t)(struct spi_master *master,
+typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
 				  struct spi_message *msg,
 				  void *res);
 
@@ -644,7 +674,7 @@ extern void *spi_res_alloc(struct spi_device *spi,
 extern void spi_res_add(struct spi_message *message, void *res);
 extern void spi_res_free(void *res);
 
-extern void spi_res_release(struct spi_master *master,
+extern void spi_res_release(struct spi_controller *ctlr,
 			    struct spi_message *message);
 
 /*---------------------------------------------------------------------------*/
@@ -828,7 +858,7 @@ struct spi_message {
 
 	/* for optional use by whatever driver currently owns the
 	 * spi_message ...  between calls to spi_async and then later
-	 * complete(), that's the spi_master controller driver.
+	 * complete(), that's the spi_controller controller driver.
 	 */
 	struct list_head	queue;
 	void			*state;
@@ -912,25 +942,27 @@ extern int spi_setup(struct spi_device *spi);
 extern int spi_async(struct spi_device *spi, struct spi_message *message);
 extern int spi_async_locked(struct spi_device *spi,
 			    struct spi_message *message);
+extern int spi_slave_abort(struct spi_device *spi);
 
 static inline size_t
 spi_max_message_size(struct spi_device *spi)
 {
-	struct spi_master *master = spi->master;
-	if (!master->max_message_size)
+	struct spi_controller *ctlr = spi->controller;
+
+	if (!ctlr->max_message_size)
 		return SIZE_MAX;
-	return master->max_message_size(spi);
+	return ctlr->max_message_size(spi);
 }
 
 static inline size_t
 spi_max_transfer_size(struct spi_device *spi)
 {
-	struct spi_master *master = spi->master;
+	struct spi_controller *ctlr = spi->controller;
 	size_t tr_max = SIZE_MAX;
 	size_t msg_max = spi_max_message_size(spi);
 
-	if (master->max_transfer_size)
-		tr_max = master->max_transfer_size(spi);
+	if (ctlr->max_transfer_size)
+		tr_max = ctlr->max_transfer_size(spi);
 
 	/* transfer size limit must not be greater than messsage size limit */
 	return min(tr_max, msg_max);
@@ -941,7 +973,7 @@ spi_max_transfer_size(struct spi_device *spi)
 /* SPI transfer replacement methods which make use of spi_res */
 
 struct spi_replaced_transfers;
-typedef void (*spi_replaced_release_t)(struct spi_master *master,
+typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
 				       struct spi_message *msg,
 				       struct spi_replaced_transfers *res);
 /**
@@ -985,7 +1017,7 @@ extern struct spi_replaced_transfers *spi_replace_transfers(
 
 /* SPI transfer transformation methods */
 
-extern int spi_split_transfers_maxsize(struct spi_master *master,
+extern int spi_split_transfers_maxsize(struct spi_controller *ctlr,
 				       struct spi_message *msg,
 				       size_t maxsize,
 				       gfp_t gfp);
@@ -999,8 +1031,8 @@ extern int spi_split_transfers_maxsize(struct spi_master *master,
 
 extern int spi_sync(struct spi_device *spi, struct spi_message *message);
 extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
-extern int spi_bus_lock(struct spi_master *master);
-extern int spi_bus_unlock(struct spi_master *master);
+extern int spi_bus_lock(struct spi_controller *ctlr);
+extern int spi_bus_unlock(struct spi_controller *ctlr);
 
 /**
  * spi_sync_transfer - synchronous SPI data transfer
@@ -1185,9 +1217,9 @@ struct spi_flash_read_message {
 /* SPI core interface for flash read support */
 static inline bool spi_flash_read_supported(struct spi_device *spi)
 {
-	return spi->master->spi_flash_read &&
-	       (!spi->master->flash_read_supported ||
-	       spi->master->flash_read_supported(spi));
+	return spi->controller->spi_flash_read &&
+	       (!spi->controller->flash_read_supported ||
+	       spi->controller->flash_read_supported(spi));
 }
 
 int spi_flash_read(struct spi_device *spi,
@@ -1220,7 +1252,7 @@ int spi_flash_read(struct spi_device *spi,
  * @irq: Initializes spi_device.irq; depends on how the board is wired.
  * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits
  *	from the chip datasheet and board-specific signal quality issues.
- * @bus_num: Identifies which spi_master parents the spi_device; unused
+ * @bus_num: Identifies which spi_controller parents the spi_device; unused
  *	by spi_new_device(), and otherwise depends on board wiring.
  * @chip_select: Initializes spi_device.chip_select; depends on how
  *	the board is wired.
@@ -1261,7 +1293,7 @@ struct spi_board_info {
 
 
 	/* bus_num is board specific and matches the bus_num of some
-	 * spi_master that will probably be registered later.
+	 * spi_controller that will probably be registered later.
 	 *
 	 * chip_select reflects how this chip is wired to that master;
 	 * it's less than num_chipselect.
@@ -1295,7 +1327,7 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
 /* If you're hotplugging an adapter with devices (parport, usb, etc)
  * use spi_new_device() to describe each device.  You can also call
  * spi_unregister_device() to start making that device vanish, but
- * normally that would be handled by spi_unregister_master().
+ * normally that would be handled by spi_unregister_controller().
  *
  * You can also use spi_alloc_device() and spi_add_device() to use a two
  * stage registration sequence for each spi_device.  This gives the caller
@@ -1304,13 +1336,13 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
  * be defined using the board info.
  */
 extern struct spi_device *
-spi_alloc_device(struct spi_master *master);
+spi_alloc_device(struct spi_controller *ctlr);
 
 extern int
 spi_add_device(struct spi_device *spi);
 
 extern struct spi_device *
-spi_new_device(struct spi_master *, struct spi_board_info *);
+spi_new_device(struct spi_controller *, struct spi_board_info *);
 
 extern void spi_unregister_device(struct spi_device *spi);
 
@@ -1318,9 +1350,32 @@ extern const struct spi_device_id *
 spi_get_device_id(const struct spi_device *sdev);
 
 static inline bool
-spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer)
+spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
 {
-	return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers);
+	return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
 }
 
+
+/* Compatibility layer */
+#define spi_master			spi_controller
+
+#define SPI_MASTER_HALF_DUPLEX		SPI_CONTROLLER_HALF_DUPLEX
+#define SPI_MASTER_NO_RX		SPI_CONTROLLER_NO_RX
+#define SPI_MASTER_NO_TX		SPI_CONTROLLER_NO_TX
+#define SPI_MASTER_MUST_RX		SPI_CONTROLLER_MUST_RX
+#define SPI_MASTER_MUST_TX		SPI_CONTROLLER_MUST_TX
+
+#define spi_master_get_devdata(_ctlr)	spi_controller_get_devdata(_ctlr)
+#define spi_master_set_devdata(_ctlr, _data)	\
+	spi_controller_set_devdata(_ctlr, _data)
+#define spi_master_get(_ctlr)		spi_controller_get(_ctlr)
+#define spi_master_put(_ctlr)		spi_controller_put(_ctlr)
+#define spi_master_suspend(_ctlr)	spi_controller_suspend(_ctlr)
+#define spi_master_resume(_ctlr)	spi_controller_resume(_ctlr)
+
+#define spi_register_master(_ctlr)	spi_register_controller(_ctlr)
+#define devm_spi_register_master(_dev, _ctlr) \
+	devm_spi_register_controller(_dev, _ctlr)
+#define spi_unregister_master(_ctlr)	spi_unregister_controller(_ctlr)
+
 #endif /* __LINUX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 59248dc..d9510e8 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -369,6 +369,26 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock)
 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
 })
 
+/**
+ * spin_unlock_wait - Interpose between successive critical sections
+ * @lock: the spinlock whose critical sections are to be interposed.
+ *
+ * Semantically this is equivalent to a spin_lock() immediately
+ * followed by a spin_unlock().  However, most architectures have
+ * more efficient implementations in which the spin_unlock_wait()
+ * cannot block concurrent lock acquisition, and in some cases
+ * where spin_unlock_wait() does not write to the lock variable.
+ * Nevertheless, spin_unlock_wait() can have high overhead, so if
+ * you feel the need to use it, please check to see if there is
+ * a better way to get your job done.
+ *
+ * The ordering guarantees provided by spin_unlock_wait() are:
+ *
+ * 1.  All accesses preceding the spin_unlock_wait() happen before
+ *     any accesses in later critical sections for this same lock.
+ * 2.  All accesses following the spin_unlock_wait() happen after
+ *     any accesses in earlier critical sections for this same lock.
+ */
 static __always_inline void spin_unlock_wait(spinlock_t *lock)
 {
 	raw_spin_unlock_wait(&lock->rlock);
diff --git a/include/linux/sram.h b/include/linux/sram.h
index c97dcbe..4fb405f 100644
--- a/include/linux/sram.h
+++ b/include/linux/sram.h
@@ -16,12 +16,12 @@
 struct gen_pool;
 
 #ifdef CONFIG_SRAM_EXEC
-int sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size);
+void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size);
 #else
-static inline int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
-				 size_t size)
+static inline void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
+				   size_t size)
 {
-	return -ENODEV;
+	return NULL;
 }
 #endif /* CONFIG_SRAM_EXEC */
 #endif /* __LINUX_SRAM_H__ */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 167ad88..39af9bc 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -60,32 +60,15 @@ int init_srcu_struct(struct srcu_struct *sp);
 #include <linux/srcutiny.h>
 #elif defined(CONFIG_TREE_SRCU)
 #include <linux/srcutree.h>
-#elif defined(CONFIG_CLASSIC_SRCU)
-#include <linux/srcuclassic.h>
-#else
+#elif defined(CONFIG_SRCU)
 #error "Unknown SRCU implementation specified to kernel configuration"
+#else
+/* Dummy definition for things like notifiers.  Actual use gets link error. */
+struct srcu_struct { };
 #endif
 
-/**
- * call_srcu() - Queue a callback for invocation after an SRCU grace period
- * @sp: srcu_struct in queue the callback
- * @head: structure to be used for queueing the SRCU callback.
- * @func: function to be invoked after the SRCU grace period
- *
- * The callback function will be invoked some time after a full SRCU
- * grace period elapses, in other words after all pre-existing SRCU
- * read-side critical sections have completed.  However, the callback
- * function might well execute concurrently with other SRCU read-side
- * critical sections that started after call_srcu() was invoked.  SRCU
- * read-side critical sections are delimited by srcu_read_lock() and
- * srcu_read_unlock(), and may be nested.
- *
- * The callback will be invoked from process context, but must nevertheless
- * be fast and must not block.
- */
 void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
 		void (*func)(struct rcu_head *head));
-
 void cleanup_srcu_struct(struct srcu_struct *sp);
 int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
 void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
@@ -172,9 +155,7 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
 {
 	int retval;
 
-	preempt_disable();
 	retval = __srcu_read_lock(sp);
-	preempt_enable();
 	rcu_lock_acquire(&(sp)->dep_map);
 	return retval;
 }
diff --git a/include/linux/srcuclassic.h b/include/linux/srcuclassic.h
deleted file mode 100644
index 5753f73..0000000
--- a/include/linux/srcuclassic.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Sleepable Read-Copy Update mechanism for mutual exclusion,
- *	classic v4.11 variant.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * Copyright (C) IBM Corporation, 2017
- *
- * Author: Paul McKenney <paulmck@us.ibm.com>
- */
-
-#ifndef _LINUX_SRCU_CLASSIC_H
-#define _LINUX_SRCU_CLASSIC_H
-
-struct srcu_array {
-	unsigned long lock_count[2];
-	unsigned long unlock_count[2];
-};
-
-struct rcu_batch {
-	struct rcu_head *head, **tail;
-};
-
-#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
-
-struct srcu_struct {
-	unsigned long completed;
-	struct srcu_array __percpu *per_cpu_ref;
-	spinlock_t queue_lock; /* protect ->batch_queue, ->running */
-	bool running;
-	/* callbacks just queued */
-	struct rcu_batch batch_queue;
-	/* callbacks try to do the first check_zero */
-	struct rcu_batch batch_check0;
-	/* callbacks done with the first check_zero and the flip */
-	struct rcu_batch batch_check1;
-	struct rcu_batch batch_done;
-	struct delayed_work work;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	struct lockdep_map dep_map;
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-};
-
-void process_srcu(struct work_struct *work);
-
-#define __SRCU_STRUCT_INIT(name)					\
-	{								\
-		.completed = -300,					\
-		.per_cpu_ref = &name##_srcu_array,			\
-		.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock),	\
-		.running = false,					\
-		.batch_queue = RCU_BATCH_INIT(name.batch_queue),	\
-		.batch_check0 = RCU_BATCH_INIT(name.batch_check0),	\
-		.batch_check1 = RCU_BATCH_INIT(name.batch_check1),	\
-		.batch_done = RCU_BATCH_INIT(name.batch_done),		\
-		.work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\
-		__SRCU_DEP_MAP_INIT(name)				\
-	}
-
-/*
- * Define and initialize a srcu struct at build time.
- * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
- *
- * Note that although DEFINE_STATIC_SRCU() hides the name from other
- * files, the per-CPU variable rules nevertheless require that the
- * chosen name be globally unique.  These rules also prohibit use of
- * DEFINE_STATIC_SRCU() within a function.  If these rules are too
- * restrictive, declare the srcu_struct manually.  For example, in
- * each file:
- *
- *	static struct srcu_struct my_srcu;
- *
- * Then, before the first use of each my_srcu, manually initialize it:
- *
- *	init_srcu_struct(&my_srcu);
- *
- * See include/linux/percpu-defs.h for the rules on per-CPU variables.
- */
-#define __DEFINE_SRCU(name, is_static)					\
-	static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
-	is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
-#define DEFINE_SRCU(name)		__DEFINE_SRCU(name, /* not static */)
-#define DEFINE_STATIC_SRCU(name)	__DEFINE_SRCU(name, static)
-
-void synchronize_srcu_expedited(struct srcu_struct *sp);
-void srcu_barrier(struct srcu_struct *sp);
-unsigned long srcu_batches_completed(struct srcu_struct *sp);
-
-static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
-					   struct srcu_struct *sp, int *flags,
-					   unsigned long *gpnum,
-					   unsigned long *completed)
-{
-	if (test_type != SRCU_FLAVOR)
-		return;
-	*flags = 0;
-	*completed = sp->completed;
-	*gpnum = *completed;
-	if (sp->batch_queue.head || sp->batch_check0.head || sp->batch_check0.head)
-		(*gpnum)++;
-}
-
-#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 42311ee..cfbfc54 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -27,15 +27,14 @@
 #include <linux/swait.h>
 
 struct srcu_struct {
-	int srcu_lock_nesting[2];	/* srcu_read_lock() nesting depth. */
+	short srcu_lock_nesting[2];	/* srcu_read_lock() nesting depth. */
+	short srcu_idx;			/* Current reader array element. */
+	u8 srcu_gp_running;		/* GP workqueue running? */
+	u8 srcu_gp_waiting;		/* GP waiting for readers? */
 	struct swait_queue_head srcu_wq;
 					/* Last srcu_read_unlock() wakes GP. */
-	unsigned long srcu_gp_seq;	/* GP seq # for callback tagging. */
-	struct rcu_segcblist srcu_cblist;
-					/* Pending SRCU callbacks. */
-	int srcu_idx;			/* Current reader array element. */
-	bool srcu_gp_running;		/* GP workqueue running? */
-	bool srcu_gp_waiting;		/* GP waiting for readers? */
+	struct rcu_head *srcu_cb_head;	/* Pending callbacks: Head. */
+	struct rcu_head **srcu_cb_tail;	/* Pending callbacks: Tail. */
 	struct work_struct srcu_work;	/* For driving grace periods. */
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lockdep_map dep_map;
@@ -47,7 +46,7 @@ void srcu_drive_gp(struct work_struct *wp);
 #define __SRCU_STRUCT_INIT(name)					\
 {									\
 	.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq),	\
-	.srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist),	\
+	.srcu_cb_tail = &name.srcu_cb_head,				\
 	.srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp),	\
 	__SRCU_DEP_MAP_INIT(name)					\
 }
@@ -63,6 +62,21 @@ void srcu_drive_gp(struct work_struct *wp);
 
 void synchronize_srcu(struct srcu_struct *sp);
 
+/*
+ * Counts the new reader in the appropriate per-CPU element of the
+ * srcu_struct.  Can be invoked from irq/bh handlers, but the matching
+ * __srcu_read_unlock() must be in the same handler instance.  Returns an
+ * index that must be passed to the matching srcu_read_unlock().
+ */
+static inline int __srcu_read_lock(struct srcu_struct *sp)
+{
+	int idx;
+
+	idx = READ_ONCE(sp->srcu_idx);
+	WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1);
+	return idx;
+}
+
 static inline void synchronize_srcu_expedited(struct srcu_struct *sp)
 {
 	synchronize_srcu(sp);
@@ -73,21 +87,4 @@ static inline void srcu_barrier(struct srcu_struct *sp)
 	synchronize_srcu(sp);
 }
 
-static inline unsigned long srcu_batches_completed(struct srcu_struct *sp)
-{
-	return 0;
-}
-
-static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
-					   struct srcu_struct *sp, int *flags,
-					   unsigned long *gpnum,
-					   unsigned long *completed)
-{
-	if (test_type != SRCU_FLAVOR)
-		return;
-	*flags = 0;
-	*completed = sp->srcu_gp_seq;
-	*gpnum = *completed;
-}
-
 #endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 32e86d8..42973f7 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -40,7 +40,7 @@ struct srcu_data {
 	unsigned long srcu_unlock_count[2];	/* Unlocks per CPU. */
 
 	/* Update-side state. */
-	spinlock_t lock ____cacheline_internodealigned_in_smp;
+	raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp;
 	struct rcu_segcblist srcu_cblist;	/* List of callbacks.*/
 	unsigned long srcu_gp_seq_needed;	/* Furthest future GP needed. */
 	unsigned long srcu_gp_seq_needed_exp;	/* Furthest future exp GP. */
@@ -58,7 +58,7 @@ struct srcu_data {
  * Node in SRCU combining tree, similar in function to rcu_data.
  */
 struct srcu_node {
-	spinlock_t lock;
+	raw_spinlock_t __private lock;
 	unsigned long srcu_have_cbs[4];		/* GP seq for children */
 						/*  having CBs, but only */
 						/*  is > ->srcu_gq_seq. */
@@ -78,7 +78,7 @@ struct srcu_struct {
 	struct srcu_node *level[RCU_NUM_LVLS + 1];
 						/* First node at each level. */
 	struct mutex srcu_cb_mutex;		/* Serialize CB preparation. */
-	spinlock_t gp_lock;			/* protect ->srcu_cblist */
+	raw_spinlock_t __private lock;		/* Protect counters */
 	struct mutex srcu_gp_mutex;		/* Serialize GP work. */
 	unsigned int srcu_idx;			/* Current rdr array element. */
 	unsigned long srcu_gp_seq;		/* Grace-period seq #. */
@@ -109,7 +109,7 @@ void process_srcu(struct work_struct *work);
 #define __SRCU_STRUCT_INIT(name)					\
 	{								\
 		.sda = &name##_srcu_data,				\
-		.gp_lock = __SPIN_LOCK_UNLOCKED(name.gp_lock),		\
+		.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock),		\
 		.srcu_gp_seq_needed = 0 - 1,				\
 		__SRCU_DEP_MAP_INIT(name)				\
 	}
@@ -141,10 +141,5 @@ void process_srcu(struct work_struct *work);
 
 void synchronize_srcu_expedited(struct srcu_struct *sp);
 void srcu_barrier(struct srcu_struct *sp);
-unsigned long srcu_batches_completed(struct srcu_struct *sp);
-
-void srcutorture_get_gp_data(enum rcutorture_type test_type,
-			     struct srcu_struct *sp, int *flags,
-			     unsigned long *gpnum, unsigned long *completed);
 
 #endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 3cc9632..3d60275 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -116,15 +116,29 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
  * @fn() runs.
  *
  * This can be thought of as a very heavy write lock, equivalent to
- * grabbing every spinlock in the kernel. */
+ * grabbing every spinlock in the kernel.
+ *
+ * Protects against CPU hotplug.
+ */
 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
 
+/**
+ * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
+ * @fn: the function to run
+ * @data: the data ptr for the @fn()
+ * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ *
+ * Same as above. Must be called from with in a cpus_read_lock() protected
+ * region. Avoids nested calls to cpus_read_lock().
+ */
+int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
+
 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
 				   const struct cpumask *cpus);
 #else	/* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
 
-static inline int stop_machine(cpu_stop_fn_t fn, void *data,
-				 const struct cpumask *cpus)
+static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
+					  const struct cpumask *cpus)
 {
 	unsigned long flags;
 	int ret;
@@ -134,6 +148,12 @@ static inline int stop_machine(cpu_stop_fn_t fn, void *data,
 	return ret;
 }
 
+static inline int stop_machine(cpu_stop_fn_t fn, void *data,
+			       const struct cpumask *cpus)
+{
+	return stop_machine_cpuslocked(fn, data, cpus);
+}
+
 static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
 						 const struct cpumask *cpus)
 {
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 7ba040c..9d7529f 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -13,7 +13,7 @@
 #include <linux/ktime.h>
 #include <linux/sunrpc/types.h>
 #include <linux/spinlock.h>
-#include <linux/wait.h>
+#include <linux/wait_bit.h>
 #include <linux/workqueue.h>
 #include <linux/sunrpc/xdr.h>
 
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 9463102..11cef5a 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
 {
 	char *cp = (char *)p;
 	struct kvec *vec = &rqstp->rq_arg.head[0];
-	return cp == (char *)vec->iov_base + vec->iov_len;
+	return cp >= (char*)vec->iov_base
+		&& cp <= (char*)vec->iov_base + vec->iov_len;
 }
 
 static inline int
diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h
index 17ea468..8d33767 100644
--- a/include/linux/superhyway.h
+++ b/include/linux/superhyway.h
@@ -101,7 +101,7 @@ int superhyway_add_device(unsigned long base, struct superhyway_device *, struct
 int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices);
 
 /* drivers/sh/superhyway/superhyway-sysfs.c */
-extern struct device_attribute superhyway_dev_attrs[];
+extern const struct attribute_group *superhyway_dev_groups[];
 
 #endif /* __LINUX_SUPERHYWAY_H */
 
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index c6f0f0d..aa02c32 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -512,7 +512,7 @@ static inline void sysfs_notify_dirent(struct kernfs_node *kn)
 }
 
 static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent,
-						   const unsigned char *name)
+						   const char *name)
 {
 	return kernfs_find_and_get(parent, name);
 }
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 110f453..0a0a53d 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,7 +29,6 @@
  */
 struct tk_read_base {
 	struct clocksource	*clock;
-	u64			(*read)(struct clocksource *cs);
 	u64			mask;
 	u64			cycle_last;
 	u32			mult;
@@ -52,13 +51,13 @@ struct tk_read_base {
  * @clock_was_set_seq:	The sequence number of clock was set events
  * @cs_was_changed_seq:	The sequence number of clocksource change events
  * @next_leap_ktime:	CLOCK_MONOTONIC time value of a pending leap-second
- * @raw_time:		Monotonic raw base time in timespec64 format
+ * @raw_sec:		CLOCK_MONOTONIC_RAW  time in seconds
  * @cycle_interval:	Number of clock cycles in one NTP interval
  * @xtime_interval:	Number of clock shifted nano seconds in one NTP
  *			interval.
  * @xtime_remainder:	Shifted nano seconds left over when rounding
  *			@cycle_interval
- * @raw_interval:	Raw nano seconds accumulated per NTP interval.
+ * @raw_interval:	Shifted raw nano seconds accumulated per NTP interval.
  * @ntp_error:		Difference between accumulated time and NTP time in ntp
  *			shifted nano seconds.
  * @ntp_error_shift:	Shift conversion between clock shifted nano seconds and
@@ -94,13 +93,13 @@ struct timekeeper {
 	unsigned int		clock_was_set_seq;
 	u8			cs_was_changed_seq;
 	ktime_t			next_leap_ktime;
-	struct timespec64	raw_time;
+	u64			raw_sec;
 
 	/* The following members are for timekeeping internal use */
 	u64			cycle_interval;
 	u64			xtime_interval;
 	s64			xtime_remainder;
-	u32			raw_interval;
+	u64			raw_interval;
 	/* The ntp_tick_length() value currently being used.
 	 * This cached copy ensures we consistently apply the tick
 	 * length for an entire tick, as ntp_tick_length may change
diff --git a/include/linux/tty.h b/include/linux/tty.h
index eccb4ec..69464c0 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -316,7 +316,6 @@ struct tty_struct {
 
 	struct tty_struct *link;
 	struct fasync_struct *fasync;
-	int alt_speed;		/* For magic substitution of 38400 bps */
 	wait_queue_head_t write_wait;
 	wait_queue_head_t read_wait;
 	struct work_struct hangup_work;
@@ -400,6 +399,9 @@ extern struct tty_struct *get_current_tty(void);
 /* tty_io.c */
 extern int __init tty_init(void);
 extern const char *tty_name(const struct tty_struct *tty);
+extern struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
+		struct file *filp);
+extern int tty_dev_name_to_number(const char *name, dev_t *number);
 #else
 static inline void tty_kref_put(struct tty_struct *tty)
 { }
@@ -420,6 +422,11 @@ static inline int __init tty_init(void)
 { return 0; }
 static inline const char *tty_name(const struct tty_struct *tty)
 { return "(none)"; }
+static inline struct tty_struct *tty_open_by_driver(dev_t device,
+		struct inode *inode, struct file *filp)
+{ return NULL; }
+static inline int tty_dev_name_to_number(const char *name, dev_t *number)
+{ return -ENOTSUPP; }
 #endif
 
 extern struct ktermios tty_std_termios;
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index fbc22a3..1a4a4ba 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -304,6 +304,7 @@ struct usb_gadget_ops {
 	int	(*udc_start)(struct usb_gadget *,
 			struct usb_gadget_driver *);
 	int	(*udc_stop)(struct usb_gadget *);
+	void	(*udc_set_speed)(struct usb_gadget *, enum usb_device_speed);
 	struct usb_ep *(*match_ep)(struct usb_gadget *,
 			struct usb_endpoint_descriptor *,
 			struct usb_ss_ep_comp_descriptor *);
@@ -352,6 +353,8 @@ struct usb_gadget_ops {
  * @deactivated: True if gadget is deactivated - in deactivated state it cannot
  *	be connected.
  * @connected: True if gadget is connected.
+ * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag
+ *	indicates that it supports LPM as per the LPM ECN & errata.
  *
  * Gadgets have a mostly-portable "gadget driver" implementing device
  * functions, handling all usb configurations and interfaces.  Gadget
@@ -404,6 +407,7 @@ struct usb_gadget {
 	unsigned			is_selfpowered:1;
 	unsigned			deactivated:1;
 	unsigned			connected:1;
+	unsigned			lpm_capable:1;
 };
 #define work_to_gadget(w)	(container_of((w), struct usb_gadget, work))
 
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 50398b6..a1f03eb 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -565,9 +565,9 @@ extern void usb_ep0_reinit(struct usb_device *);
 	((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
 
 #define EndpointRequest \
-	((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+	((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
 #define EndpointOutRequest \
-	((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+	((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
 
 /* class requests from the USB 2.0 hub spec, table 11-15 */
 #define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request))
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 31a8068..2992451 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -9,6 +9,7 @@
 #ifndef __LINUX_USB_PHY_H
 #define __LINUX_USB_PHY_H
 
+#include <linux/extcon.h>
 #include <linux/notifier.h>
 #include <linux/usb.h>
 
@@ -85,6 +86,12 @@ struct usb_phy {
 	struct usb_phy_io_ops	*io_ops;
 	void __iomem		*io_priv;
 
+	/* to support extcon device */
+	struct extcon_dev	*edev;
+	struct extcon_dev	*id_edev;
+	struct notifier_block	vbus_nb;
+	struct notifier_block	id_nb;
+
 	/* for notification of usb_phy_events */
 	struct atomic_notifier_head	notifier;
 
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index ec78204..ffe7487 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -117,13 +117,13 @@ struct typec_altmode_desc {
 
 struct typec_altmode
 *typec_partner_register_altmode(struct typec_partner *partner,
-				struct typec_altmode_desc *desc);
+				const struct typec_altmode_desc *desc);
 struct typec_altmode
 *typec_plug_register_altmode(struct typec_plug *plug,
-			     struct typec_altmode_desc *desc);
+			     const struct typec_altmode_desc *desc);
 struct typec_altmode
 *typec_port_register_altmode(struct typec_port *port,
-			     struct typec_altmode_desc *desc);
+			     const struct typec_altmode_desc *desc);
 void typec_unregister_altmode(struct typec_altmode *altmode);
 
 struct typec_port *typec_altmode2port(struct typec_altmode *alt);
@@ -190,6 +190,7 @@ struct typec_partner_desc {
  * @pr_set: Set Power Role
  * @vconn_set: Set VCONN Role
  * @activate_mode: Enter/exit given Alternate Mode
+ * @port_type_set: Set port type
  *
  * Static capabilities of a single USB Type-C port.
  */
@@ -214,6 +215,9 @@ struct typec_capability {
 
 	int		(*activate_mode)(const struct typec_capability *,
 					 int mode, int activate);
+	int		(*port_type_set)(const struct typec_capability *,
+					enum typec_port_type);
+
 };
 
 /* Specific to try_role(). Indicates the user want's to clear the preference. */
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 4dff73a..d1defe4 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -18,29 +18,16 @@
 
 #include <uapi/linux/uuid.h>
 
-/*
- * V1 (time-based) UUID definition [RFC 4122].
- * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
- *   increments since midnight 15th October 1582
- *   - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
- *     time
- * - the clock sequence is a 14-bit counter to avoid duplicate times
- */
-struct uuid_v1 {
-	__be32		time_low;			/* low part of timestamp */
-	__be16		time_mid;			/* mid part of timestamp */
-	__be16		time_hi_and_version;		/* high part of timestamp and version  */
-#define UUID_TO_UNIX_TIME	0x01b21dd213814000ULL
-#define UUID_TIMEHI_MASK	0x0fff
-#define UUID_VERSION_TIME	0x1000	/* time-based UUID */
-#define UUID_VERSION_NAME	0x3000	/* name-based UUID */
-#define UUID_VERSION_RANDOM	0x4000	/* (pseudo-)random generated UUID */
-	u8		clock_seq_hi_and_reserved;	/* clock seq hi and variant */
-#define UUID_CLOCKHI_MASK	0x3f
-#define UUID_VARIANT_STD	0x80
-	u8		clock_seq_low;			/* clock seq low */
-	u8		node[6];			/* spatially unique node ID (MAC addr) */
-};
+typedef struct {
+	__u8 b[16];
+} uuid_t;
+
+#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)			\
+((uuid_t)								\
+{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
+   ((b) >> 8) & 0xff, (b) & 0xff,					\
+   ((c) >> 8) & 0xff, (c) & 0xff,					\
+   (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
 
 /*
  * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
@@ -48,27 +35,73 @@ struct uuid_v1 {
  */
 #define	UUID_STRING_LEN		36
 
-static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
+extern const guid_t guid_null;
+extern const uuid_t uuid_null;
+
+static inline bool guid_equal(const guid_t *u1, const guid_t *u2)
 {
-	return memcmp(&u1, &u2, sizeof(uuid_le));
+	return memcmp(u1, u2, sizeof(guid_t)) == 0;
 }
 
-static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2)
+static inline void guid_copy(guid_t *dst, const guid_t *src)
 {
-	return memcmp(&u1, &u2, sizeof(uuid_be));
+	memcpy(dst, src, sizeof(guid_t));
+}
+
+static inline bool guid_is_null(const guid_t *guid)
+{
+	return guid_equal(guid, &guid_null);
+}
+
+static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2)
+{
+	return memcmp(u1, u2, sizeof(uuid_t)) == 0;
+}
+
+static inline void uuid_copy(uuid_t *dst, const uuid_t *src)
+{
+	memcpy(dst, src, sizeof(uuid_t));
+}
+
+static inline bool uuid_is_null(const uuid_t *uuid)
+{
+	return uuid_equal(uuid, &uuid_null);
 }
 
 void generate_random_uuid(unsigned char uuid[16]);
 
-extern void uuid_le_gen(uuid_le *u);
-extern void uuid_be_gen(uuid_be *u);
+extern void guid_gen(guid_t *u);
+extern void uuid_gen(uuid_t *u);
 
 bool __must_check uuid_is_valid(const char *uuid);
 
-extern const u8 uuid_le_index[16];
-extern const u8 uuid_be_index[16];
+extern const u8 guid_index[16];
+extern const u8 uuid_index[16];
 
-int uuid_le_to_bin(const char *uuid, uuid_le *u);
-int uuid_be_to_bin(const char *uuid, uuid_be *u);
+int guid_parse(const char *uuid, guid_t *u);
+int uuid_parse(const char *uuid, uuid_t *u);
+
+/* backwards compatibility, don't use in new code */
+typedef uuid_t uuid_be;
+#define UUID_BE(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+	UUID_INIT(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+#define NULL_UUID_BE 							\
+	UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,	\
+	     0x00, 0x00, 0x00, 0x00)
+
+#define uuid_le_gen(u)		guid_gen(u)
+#define uuid_be_gen(u)		uuid_gen(u)
+#define uuid_le_to_bin(guid, u)	guid_parse(guid, u)
+#define uuid_be_to_bin(uuid, u)	uuid_parse(uuid, u)
+
+static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
+{
+	return memcmp(&u1, &u2, sizeof(guid_t));
+}
+
+static inline int uuid_be_cmp(const uuid_t u1, const uuid_t u2)
+{
+	return memcmp(&u1, &u2, sizeof(uuid_t));
+}
 
 #endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index edf9b2c..f57076b 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -183,7 +183,7 @@ struct virqfd {
 	void			(*thread)(void *, void *);
 	void			*data;
 	struct work_struct	inject;
-	wait_queue_t		wait;
+	wait_queue_entry_t		wait;
 	poll_table		pt;
 	struct work_struct	shutdown;
 	struct virqfd		**pvirqfd;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index d84ae90..be3ab2d 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -93,10 +93,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 #endif
 #endif
 #ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
 		NR_TLB_REMOTE_FLUSH,	/* cpu tried to flush others' tlbs */
 		NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
-#endif /* CONFIG_SMP */
 		NR_TLB_LOCAL_FLUSH_ALL,
 		NR_TLB_LOCAL_FLUSH_ONE,
 #endif /* CONFIG_DEBUG_TLBFLUSH */
diff --git a/drivers/w1/w1.h b/include/linux/w1.h
similarity index 76%
rename from drivers/w1/w1.h
rename to include/linux/w1.h
index 758a7a6..90cbe7e 100644
--- a/drivers/w1/w1.h
+++ b/include/linux/w1.h
@@ -12,8 +12,10 @@
  * GNU General Public License for more details.
  */
 
-#ifndef __W1_H
-#define __W1_H
+#ifndef __LINUX_W1_H
+#define __LINUX_W1_H
+
+#include <linux/device.h>
 
 /**
  * struct w1_reg_num - broken out slave device id
@@ -22,8 +24,7 @@
  * @id: along with family is the unique device id
  * @crc: checksum of the other bytes
  */
-struct w1_reg_num
-{
+struct w1_reg_num {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
 	__u64	family:8,
 		id:48,
@@ -39,12 +40,6 @@ struct w1_reg_num
 
 #ifdef __KERNEL__
 
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-
-#include "w1_family.h"
-
 #define W1_MAXNAMELEN		32
 
 #define W1_SEARCH		0xF0
@@ -59,9 +54,6 @@ struct w1_reg_num
 #define W1_MATCH_ROM		0x55
 #define W1_RESUME_CMD		0xA5
 
-#define W1_SLAVE_ACTIVE		0
-#define W1_SLAVE_DETACH		1
-
 /**
  * struct w1_slave - holds a single slave device on the bus
  *
@@ -78,8 +70,7 @@ struct w1_reg_num
  * @dev: kernel device identifier
  *
  */
-struct w1_slave
-{
+struct w1_slave {
 	struct module		*owner;
 	unsigned char		name[W1_MAXNAMELEN];
 	struct list_head	w1_slave_entry;
@@ -96,7 +87,6 @@ struct w1_slave
 
 typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
 
-
 /**
  * struct w1_bus_master - operations available on a bus master
  *
@@ -142,8 +132,7 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
  * reset_bus.
  *
  */
-struct w1_bus_master
-{
+struct w1_bus_master {
 	void		*data;
 
 	u8		(*read_bit)(void *);
@@ -209,8 +198,7 @@ enum w1_master_flags {
  * @bus_master:		io operations available
  * @seq:		sequence number used for netlink broadcasts
  */
-struct w1_master
-{
+struct w1_master {
 	struct list_head	w1_master_entry;
 	struct module		*owner;
 	unsigned char		name[W1_MAXNAMELEN];
@@ -254,45 +242,51 @@ struct w1_master
 	u32			seq;
 };
 
+int w1_add_master_device(struct w1_bus_master *master);
+void w1_remove_master_device(struct w1_bus_master *master);
+
 /**
- * struct w1_async_cmd - execute callback from the w1_process kthread
- * @async_entry: link entry
- * @cb: callback function, must list_del and destroy this list before
- * returning
- *
- * When inserted into the w1_master async_list, w1_process will execute
- * the callback.  Embed this into the structure with the command details.
+ * struct w1_family_ops - operations for a family type
+ * @add_slave: add_slave
+ * @remove_slave: remove_slave
+ * @groups: sysfs group
  */
-struct w1_async_cmd {
-	struct list_head	async_entry;
-	void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd);
+struct w1_family_ops {
+	int  (*add_slave)(struct w1_slave *sl);
+	void (*remove_slave)(struct w1_slave *sl);
+	const struct attribute_group **groups;
 };
 
-int w1_create_master_attributes(struct w1_master *);
-void w1_destroy_master_attributes(struct w1_master *master);
-void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
-void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
-/* call w1_unref_slave to release the reference counts w1_search_slave added */
-struct w1_slave *w1_search_slave(struct w1_reg_num *id);
-/* decrements the reference on sl->master and sl, and cleans up if zero
- * returns the reference count after it has been decremented */
-int w1_unref_slave(struct w1_slave *sl);
-void w1_slave_found(struct w1_master *dev, u64 rn);
-void w1_search_process_cb(struct w1_master *dev, u8 search_type,
-	w1_slave_found_callback cb);
-struct w1_slave *w1_slave_search_device(struct w1_master *dev,
-	struct w1_reg_num *rn);
-struct w1_master *w1_search_master_id(u32 id);
-
-/* Disconnect and reconnect devices in the given family.  Used for finding
- * unclaimed devices after a family has been registered or releasing devices
- * after a family has been unregistered.  Set attach to 1 when a new family
- * has just been registered, to 0 when it has been unregistered.
+/**
+ * struct w1_family - reference counted family structure.
+ * @family_entry:	family linked list
+ * @fid:		8 bit family identifier
+ * @fops:		operations for this family
+ * @refcnt:		reference counter
  */
-void w1_reconnect_slaves(struct w1_family *f, int attach);
-int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
-/* 0 success, otherwise EBUSY */
-int w1_slave_detach(struct w1_slave *sl);
+struct w1_family {
+	struct list_head	family_entry;
+	u8			fid;
+
+	struct w1_family_ops	*fops;
+
+	atomic_t		refcnt;
+};
+
+int w1_register_family(struct w1_family *family);
+void w1_unregister_family(struct w1_family *family);
+
+/**
+ * module_w1_driver() - Helper macro for registering a 1-Wire families
+ * @__w1_family: w1_family struct
+ *
+ * Helper macro for 1-Wire families which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_w1_family(__w1_family) \
+	module_driver(__w1_family, w1_register_family, \
+			w1_unregister_family)
 
 u8 w1_triplet(struct w1_master *dev, int bdir);
 void w1_write_8(struct w1_master *, u8);
@@ -321,16 +315,6 @@ static inline struct w1_master* dev_to_w1_master(struct device *dev)
 	return container_of(dev, struct w1_master, dev);
 }
 
-extern struct device_driver w1_master_driver;
-extern struct device w1_master_device;
-extern int w1_max_slave_count;
-extern int w1_max_slave_ttl;
-extern struct list_head w1_masters;
-extern struct mutex w1_mlock;
-
-extern int w1_process_callbacks(struct w1_master *dev);
-extern int w1_process(void *);
-
 #endif /* __KERNEL__ */
 
-#endif /* __W1_H */
+#endif /* __LINUX_W1_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index db076ca..b289c96 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -10,38 +10,30 @@
 #include <asm/current.h>
 #include <uapi/linux/wait.h>
 
-typedef struct __wait_queue wait_queue_t;
-typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
-int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
+typedef struct wait_queue_entry wait_queue_entry_t;
 
-/* __wait_queue::flags */
+typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
+int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
+
+/* wait_queue_entry::flags */
 #define WQ_FLAG_EXCLUSIVE	0x01
 #define WQ_FLAG_WOKEN		0x02
 
-struct __wait_queue {
+/*
+ * A single wait-queue entry structure:
+ */
+struct wait_queue_entry {
 	unsigned int		flags;
 	void			*private;
 	wait_queue_func_t	func;
-	struct list_head	task_list;
+	struct list_head	entry;
 };
 
-struct wait_bit_key {
-	void			*flags;
-	int			bit_nr;
-#define WAIT_ATOMIC_T_BIT_NR	-1
-	unsigned long		timeout;
-};
-
-struct wait_bit_queue {
-	struct wait_bit_key	key;
-	wait_queue_t		wait;
-};
-
-struct __wait_queue_head {
+struct wait_queue_head {
 	spinlock_t		lock;
-	struct list_head	task_list;
+	struct list_head	head;
 };
-typedef struct __wait_queue_head wait_queue_head_t;
+typedef struct wait_queue_head wait_queue_head_t;
 
 struct task_struct;
 
@@ -49,82 +41,76 @@ struct task_struct;
  * Macros for declaration and initialisaton of the datatypes
  */
 
-#define __WAITQUEUE_INITIALIZER(name, tsk) {				\
-	.private	= tsk,						\
-	.func		= default_wake_function,			\
-	.task_list	= { NULL, NULL } }
+#define __WAITQUEUE_INITIALIZER(name, tsk) {					\
+	.private	= tsk,							\
+	.func		= default_wake_function,				\
+	.entry		= { NULL, NULL } }
 
-#define DECLARE_WAITQUEUE(name, tsk)					\
-	wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
+#define DECLARE_WAITQUEUE(name, tsk)						\
+	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
 
-#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {				\
-	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),		\
-	.task_list	= { &(name).task_list, &(name).task_list } }
+#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
+	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
+	.head		= { &(name).head, &(name).head } }
 
 #define DECLARE_WAIT_QUEUE_HEAD(name) \
-	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
+	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
 
-#define __WAIT_BIT_KEY_INITIALIZER(word, bit)				\
-	{ .flags = word, .bit_nr = bit, }
+extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
 
-#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)				\
-	{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
-
-extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
-
-#define init_waitqueue_head(q)				\
-	do {						\
-		static struct lock_class_key __key;	\
-							\
-		__init_waitqueue_head((q), #q, &__key);	\
+#define init_waitqueue_head(wq_head)						\
+	do {									\
+		static struct lock_class_key __key;				\
+										\
+		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
 	} while (0)
 
 #ifdef CONFIG_LOCKDEP
 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
 	({ init_waitqueue_head(&name); name; })
 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
-	wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
+	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
 #else
 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
 #endif
 
-static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
+static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
 {
-	q->flags	= 0;
-	q->private	= p;
-	q->func		= default_wake_function;
+	wq_entry->flags		= 0;
+	wq_entry->private	= p;
+	wq_entry->func		= default_wake_function;
 }
 
 static inline void
-init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
+init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
 {
-	q->flags	= 0;
-	q->private	= NULL;
-	q->func		= func;
+	wq_entry->flags		= 0;
+	wq_entry->private	= NULL;
+	wq_entry->func		= func;
 }
 
 /**
  * waitqueue_active -- locklessly test for waiters on the queue
- * @q: the waitqueue to test for waiters
+ * @wq_head: the waitqueue to test for waiters
  *
  * returns true if the wait list is not empty
  *
  * NOTE: this function is lockless and requires care, incorrect usage _will_
  * lead to sporadic and non-obvious failure.
  *
- * Use either while holding wait_queue_head_t::lock or when used for wakeups
+ * Use either while holding wait_queue_head::lock or when used for wakeups
  * with an extra smp_mb() like:
  *
  *      CPU0 - waker                    CPU1 - waiter
  *
  *                                      for (;;) {
- *      @cond = true;                     prepare_to_wait(&wq, &wait, state);
+ *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
  *      smp_mb();                         // smp_mb() from set_current_state()
- *      if (waitqueue_active(wq))         if (@cond)
- *        wake_up(wq);                      break;
+ *      if (waitqueue_active(wq_head))         if (@cond)
+ *        wake_up(wq_head);                      break;
  *                                        schedule();
  *                                      }
- *                                      finish_wait(&wq, &wait);
+ *                                      finish_wait(&wq_head, &wait);
  *
  * Because without the explicit smp_mb() it's possible for the
  * waitqueue_active() load to get hoisted over the @cond store such that we'll
@@ -133,20 +119,20 @@ init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  * which (when the lock is uncontended) are of roughly equal cost.
  */
-static inline int waitqueue_active(wait_queue_head_t *q)
+static inline int waitqueue_active(struct wait_queue_head *wq_head)
 {
-	return !list_empty(&q->task_list);
+	return !list_empty(&wq_head->head);
 }
 
 /**
  * wq_has_sleeper - check if there are any waiting processes
- * @wq: wait queue head
+ * @wq_head: wait queue head
  *
- * Returns true if wq has waiting processes
+ * Returns true if wq_head has waiting processes
  *
  * Please refer to the comment for waitqueue_active.
  */
-static inline bool wq_has_sleeper(wait_queue_head_t *wq)
+static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
 {
 	/*
 	 * We need to be sure we are in sync with the
@@ -156,63 +142,51 @@ static inline bool wq_has_sleeper(wait_queue_head_t *wq)
 	 * waiting side.
 	 */
 	smp_mb();
-	return waitqueue_active(wq);
+	return waitqueue_active(wq_head);
 }
 
-extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
-extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
-extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
+extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
 
-static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
+static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	list_add(&new->task_list, &head->task_list);
+	list_add(&wq_entry->entry, &wq_head->head);
 }
 
 /*
  * Used for wake-one threads:
  */
 static inline void
-__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	wait->flags |= WQ_FLAG_EXCLUSIVE;
-	__add_wait_queue(q, wait);
+	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
+	__add_wait_queue(wq_head, wq_entry);
 }
 
-static inline void __add_wait_queue_tail(wait_queue_head_t *head,
-					 wait_queue_t *new)
+static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	list_add_tail(&new->task_list, &head->task_list);
+	list_add_tail(&wq_entry->entry, &wq_head->head);
 }
 
 static inline void
-__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	wait->flags |= WQ_FLAG_EXCLUSIVE;
-	__add_wait_queue_tail(q, wait);
+	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
+	__add_wait_queue_entry_tail(wq_head, wq_entry);
 }
 
 static inline void
-__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
+__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	list_del(&old->task_list);
+	list_del(&wq_entry->entry);
 }
 
-typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
-void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
-void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
-void __wake_up_bit(wait_queue_head_t *, void *, int);
-int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
-int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
-void wake_up_bit(void *, int);
-void wake_up_atomic_t(atomic_t *);
-int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
-int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
-int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
-int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
-wait_queue_head_t *bit_waitqueue(void *, int);
+void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
+void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
+void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
 
 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
@@ -228,28 +202,28 @@ wait_queue_head_t *bit_waitqueue(void *, int);
 /*
  * Wakeup macros to be used to report events to the targets.
  */
-#define wake_up_poll(x, m)						\
+#define wake_up_poll(x, m)							\
 	__wake_up(x, TASK_NORMAL, 1, (void *) (m))
-#define wake_up_locked_poll(x, m)					\
+#define wake_up_locked_poll(x, m)						\
 	__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
-#define wake_up_interruptible_poll(x, m)				\
+#define wake_up_interruptible_poll(x, m)					\
 	__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
-#define wake_up_interruptible_sync_poll(x, m)				\
+#define wake_up_interruptible_sync_poll(x, m)					\
 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
 
-#define ___wait_cond_timeout(condition)					\
-({									\
-	bool __cond = (condition);					\
-	if (__cond && !__ret)						\
-		__ret = 1;						\
-	__cond || !__ret;						\
+#define ___wait_cond_timeout(condition)						\
+({										\
+	bool __cond = (condition);						\
+	if (__cond && !__ret)							\
+		__ret = 1;							\
+	__cond || !__ret;							\
 })
 
-#define ___wait_is_interruptible(state)					\
-	(!__builtin_constant_p(state) ||				\
-		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)	\
+#define ___wait_is_interruptible(state)						\
+	(!__builtin_constant_p(state) ||					\
+		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
 
-extern void init_wait_entry(wait_queue_t *__wait, int flags);
+extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
 
 /*
  * The below macro ___wait_event() has an explicit shadow of the __ret
@@ -263,108 +237,108 @@ extern void init_wait_entry(wait_queue_t *__wait, int flags);
  * otherwise.
  */
 
-#define ___wait_event(wq, condition, state, exclusive, ret, cmd)	\
-({									\
-	__label__ __out;						\
-	wait_queue_t __wait;						\
-	long __ret = ret;	/* explicit shadow */			\
-									\
-	init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
-	for (;;) {							\
-		long __int = prepare_to_wait_event(&wq, &__wait, state);\
-									\
-		if (condition)						\
-			break;						\
-									\
-		if (___wait_is_interruptible(state) && __int) {		\
-			__ret = __int;					\
-			goto __out;					\
-		}							\
-									\
-		cmd;							\
-	}								\
-	finish_wait(&wq, &__wait);					\
-__out:	__ret;								\
+#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
+({										\
+	__label__ __out;							\
+	struct wait_queue_entry __wq_entry;					\
+	long __ret = ret;	/* explicit shadow */				\
+										\
+	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
+	for (;;) {								\
+		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
+										\
+		if (condition)							\
+			break;							\
+										\
+		if (___wait_is_interruptible(state) && __int) {			\
+			__ret = __int;						\
+			goto __out;						\
+		}								\
+										\
+		cmd;								\
+	}									\
+	finish_wait(&wq_head, &__wq_entry);					\
+__out:	__ret;									\
 })
 
-#define __wait_event(wq, condition)					\
-	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
+#define __wait_event(wq_head, condition)					\
+	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 			    schedule())
 
 /**
  * wait_event - sleep until a condition gets true
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  *
  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
+ * the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
  */
-#define wait_event(wq, condition)					\
-do {									\
-	might_sleep();							\
-	if (condition)							\
-		break;							\
-	__wait_event(wq, condition);					\
+#define wait_event(wq_head, condition)						\
+do {										\
+	might_sleep();								\
+	if (condition)								\
+		break;								\
+	__wait_event(wq_head, condition);					\
 } while (0)
 
-#define __io_wait_event(wq, condition)					\
-	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
+#define __io_wait_event(wq_head, condition)					\
+	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 			    io_schedule())
 
 /*
  * io_wait_event() -- like wait_event() but with io_schedule()
  */
-#define io_wait_event(wq, condition)					\
-do {									\
-	might_sleep();							\
-	if (condition)							\
-		break;							\
-	__io_wait_event(wq, condition);					\
+#define io_wait_event(wq_head, condition)					\
+do {										\
+	might_sleep();								\
+	if (condition)								\
+		break;								\
+	__io_wait_event(wq_head, condition);					\
 } while (0)
 
-#define __wait_event_freezable(wq, condition)				\
-	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
+#define __wait_event_freezable(wq_head, condition)				\
+	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 			    schedule(); try_to_freeze())
 
 /**
  * wait_event_freezable - sleep (or freeze) until a condition gets true
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  *
  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  * to system load) until the @condition evaluates to true. The
- * @condition is checked each time the waitqueue @wq is woken up.
+ * @condition is checked each time the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
  */
-#define wait_event_freezable(wq, condition)				\
-({									\
-	int __ret = 0;							\
-	might_sleep();							\
-	if (!(condition))						\
-		__ret = __wait_event_freezable(wq, condition);		\
-	__ret;								\
+#define wait_event_freezable(wq_head, condition)				\
+({										\
+	int __ret = 0;								\
+	might_sleep();								\
+	if (!(condition))							\
+		__ret = __wait_event_freezable(wq_head, condition);		\
+	__ret;									\
 })
 
-#define __wait_event_timeout(wq, condition, timeout)			\
-	___wait_event(wq, ___wait_cond_timeout(condition),		\
-		      TASK_UNINTERRUPTIBLE, 0, timeout,			\
+#define __wait_event_timeout(wq_head, condition, timeout)			\
+	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
+		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
 		      __ret = schedule_timeout(__ret))
 
 /**
  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @timeout: timeout, in jiffies
  *
  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
+ * the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -375,83 +349,83 @@ do {									\
  * or the remaining jiffies (at least 1) if the @condition evaluated
  * to %true before the @timeout elapsed.
  */
-#define wait_event_timeout(wq, condition, timeout)			\
-({									\
-	long __ret = timeout;						\
-	might_sleep();							\
-	if (!___wait_cond_timeout(condition))				\
-		__ret = __wait_event_timeout(wq, condition, timeout);	\
-	__ret;								\
+#define wait_event_timeout(wq_head, condition, timeout)				\
+({										\
+	long __ret = timeout;							\
+	might_sleep();								\
+	if (!___wait_cond_timeout(condition))					\
+		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
+	__ret;									\
 })
 
-#define __wait_event_freezable_timeout(wq, condition, timeout)		\
-	___wait_event(wq, ___wait_cond_timeout(condition),		\
-		      TASK_INTERRUPTIBLE, 0, timeout,			\
+#define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
+	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
+		      TASK_INTERRUPTIBLE, 0, timeout,				\
 		      __ret = schedule_timeout(__ret); try_to_freeze())
 
 /*
  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  * increasing load and is freezable.
  */
-#define wait_event_freezable_timeout(wq, condition, timeout)		\
-({									\
-	long __ret = timeout;						\
-	might_sleep();							\
-	if (!___wait_cond_timeout(condition))				\
-		__ret = __wait_event_freezable_timeout(wq, condition, timeout);	\
-	__ret;								\
+#define wait_event_freezable_timeout(wq_head, condition, timeout)		\
+({										\
+	long __ret = timeout;							\
+	might_sleep();								\
+	if (!___wait_cond_timeout(condition))					\
+		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
+	__ret;									\
 })
 
-#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)		\
-	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
+#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
+	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
 			    cmd1; schedule(); cmd2)
 /*
  * Just like wait_event_cmd(), except it sets exclusive flag
  */
-#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)		\
-do {									\
-	if (condition)							\
-		break;							\
-	__wait_event_exclusive_cmd(wq, condition, cmd1, cmd2);		\
+#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
+do {										\
+	if (condition)								\
+		break;								\
+	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
 } while (0)
 
-#define __wait_event_cmd(wq, condition, cmd1, cmd2)			\
-	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
+#define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
+	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
 			    cmd1; schedule(); cmd2)
 
 /**
  * wait_event_cmd - sleep until a condition gets true
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @cmd1: the command will be executed before sleep
  * @cmd2: the command will be executed after sleep
  *
  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
+ * the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
  */
-#define wait_event_cmd(wq, condition, cmd1, cmd2)			\
-do {									\
-	if (condition)							\
-		break;							\
-	__wait_event_cmd(wq, condition, cmd1, cmd2);			\
+#define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
+do {										\
+	if (condition)								\
+		break;								\
+	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
 } while (0)
 
-#define __wait_event_interruptible(wq, condition)			\
-	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
+#define __wait_event_interruptible(wq_head, condition)				\
+	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
 		      schedule())
 
 /**
  * wait_event_interruptible - sleep until a condition gets true
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  *
  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -459,29 +433,29 @@ do {									\
  * The function will return -ERESTARTSYS if it was interrupted by a
  * signal and 0 if @condition evaluated to true.
  */
-#define wait_event_interruptible(wq, condition)				\
-({									\
-	int __ret = 0;							\
-	might_sleep();							\
-	if (!(condition))						\
-		__ret = __wait_event_interruptible(wq, condition);	\
-	__ret;								\
+#define wait_event_interruptible(wq_head, condition)				\
+({										\
+	int __ret = 0;								\
+	might_sleep();								\
+	if (!(condition))							\
+		__ret = __wait_event_interruptible(wq_head, condition);		\
+	__ret;									\
 })
 
-#define __wait_event_interruptible_timeout(wq, condition, timeout)	\
-	___wait_event(wq, ___wait_cond_timeout(condition),		\
-		      TASK_INTERRUPTIBLE, 0, timeout,			\
+#define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
+	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
+		      TASK_INTERRUPTIBLE, 0, timeout,				\
 		      __ret = schedule_timeout(__ret))
 
 /**
  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @timeout: timeout, in jiffies
  *
  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -493,50 +467,49 @@ do {									\
  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  * interrupted by a signal.
  */
-#define wait_event_interruptible_timeout(wq, condition, timeout)	\
-({									\
-	long __ret = timeout;						\
-	might_sleep();							\
-	if (!___wait_cond_timeout(condition))				\
-		__ret = __wait_event_interruptible_timeout(wq,		\
-						condition, timeout);	\
-	__ret;								\
+#define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
+({										\
+	long __ret = timeout;							\
+	might_sleep();								\
+	if (!___wait_cond_timeout(condition))					\
+		__ret = __wait_event_interruptible_timeout(wq_head,		\
+						condition, timeout);		\
+	__ret;									\
 })
 
-#define __wait_event_hrtimeout(wq, condition, timeout, state)		\
-({									\
-	int __ret = 0;							\
-	struct hrtimer_sleeper __t;					\
-									\
-	hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,		\
-			      HRTIMER_MODE_REL);			\
-	hrtimer_init_sleeper(&__t, current);				\
-	if ((timeout) != KTIME_MAX)				\
-		hrtimer_start_range_ns(&__t.timer, timeout,		\
-				       current->timer_slack_ns,		\
-				       HRTIMER_MODE_REL);		\
-									\
-	__ret = ___wait_event(wq, condition, state, 0, 0,		\
-		if (!__t.task) {					\
-			__ret = -ETIME;					\
-			break;						\
-		}							\
-		schedule());						\
-									\
-	hrtimer_cancel(&__t.timer);					\
-	destroy_hrtimer_on_stack(&__t.timer);				\
-	__ret;								\
+#define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
+({										\
+	int __ret = 0;								\
+	struct hrtimer_sleeper __t;						\
+										\
+	hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);	\
+	hrtimer_init_sleeper(&__t, current);					\
+	if ((timeout) != KTIME_MAX)						\
+		hrtimer_start_range_ns(&__t.timer, timeout,			\
+				       current->timer_slack_ns,			\
+				       HRTIMER_MODE_REL);			\
+										\
+	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
+		if (!__t.task) {						\
+			__ret = -ETIME;						\
+			break;							\
+		}								\
+		schedule());							\
+										\
+	hrtimer_cancel(&__t.timer);						\
+	destroy_hrtimer_on_stack(&__t.timer);					\
+	__ret;									\
 })
 
 /**
  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @timeout: timeout, as a ktime_t
  *
  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -544,25 +517,25 @@ do {									\
  * The function returns 0 if @condition became true, or -ETIME if the timeout
  * elapsed.
  */
-#define wait_event_hrtimeout(wq, condition, timeout)			\
-({									\
-	int __ret = 0;							\
-	might_sleep();							\
-	if (!(condition))						\
-		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
-					       TASK_UNINTERRUPTIBLE);	\
-	__ret;								\
+#define wait_event_hrtimeout(wq_head, condition, timeout)			\
+({										\
+	int __ret = 0;								\
+	might_sleep();								\
+	if (!(condition))							\
+		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
+					       TASK_UNINTERRUPTIBLE);		\
+	__ret;									\
 })
 
 /**
  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @timeout: timeout, as a ktime_t
  *
  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -570,73 +543,73 @@ do {									\
  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  * interrupted by a signal, or -ETIME if the timeout elapsed.
  */
-#define wait_event_interruptible_hrtimeout(wq, condition, timeout)	\
-({									\
-	long __ret = 0;							\
-	might_sleep();							\
-	if (!(condition))						\
-		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
-					       TASK_INTERRUPTIBLE);	\
-	__ret;								\
+#define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
+({										\
+	long __ret = 0;								\
+	might_sleep();								\
+	if (!(condition))							\
+		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
+					       TASK_INTERRUPTIBLE);		\
+	__ret;									\
 })
 
-#define __wait_event_interruptible_exclusive(wq, condition)		\
-	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,		\
+#define __wait_event_interruptible_exclusive(wq, condition)			\
+	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
 		      schedule())
 
-#define wait_event_interruptible_exclusive(wq, condition)		\
-({									\
-	int __ret = 0;							\
-	might_sleep();							\
-	if (!(condition))						\
-		__ret = __wait_event_interruptible_exclusive(wq, condition);\
-	__ret;								\
+#define wait_event_interruptible_exclusive(wq, condition)			\
+({										\
+	int __ret = 0;								\
+	might_sleep();								\
+	if (!(condition))							\
+		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
+	__ret;									\
 })
 
-#define __wait_event_killable_exclusive(wq, condition)			\
-	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,		\
+#define __wait_event_killable_exclusive(wq, condition)				\
+	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
 		      schedule())
 
-#define wait_event_killable_exclusive(wq, condition)			\
-({									\
-	int __ret = 0;							\
-	might_sleep();							\
-	if (!(condition))						\
-		__ret = __wait_event_killable_exclusive(wq, condition);	\
-	__ret;								\
+#define wait_event_killable_exclusive(wq, condition)				\
+({										\
+	int __ret = 0;								\
+	might_sleep();								\
+	if (!(condition))							\
+		__ret = __wait_event_killable_exclusive(wq, condition);		\
+	__ret;									\
 })
 
 
-#define __wait_event_freezable_exclusive(wq, condition)			\
-	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,		\
+#define __wait_event_freezable_exclusive(wq, condition)				\
+	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
 			schedule(); try_to_freeze())
 
-#define wait_event_freezable_exclusive(wq, condition)			\
-({									\
-	int __ret = 0;							\
-	might_sleep();							\
-	if (!(condition))						\
-		__ret = __wait_event_freezable_exclusive(wq, condition);\
-	__ret;								\
+#define wait_event_freezable_exclusive(wq, condition)				\
+({										\
+	int __ret = 0;								\
+	might_sleep();								\
+	if (!(condition))							\
+		__ret = __wait_event_freezable_exclusive(wq, condition);	\
+	__ret;									\
 })
 
-extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *);
-extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
+extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
+extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
 
-#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
-({									\
-	int __ret;							\
-	DEFINE_WAIT(__wait);						\
-	if (exclusive)							\
-		__wait.flags |= WQ_FLAG_EXCLUSIVE;			\
-	do {								\
-		__ret = fn(&(wq), &__wait);				\
-		if (__ret)						\
-			break;						\
-	} while (!(condition));						\
-	__remove_wait_queue(&(wq), &__wait);				\
-	__set_current_state(TASK_RUNNING);				\
-	__ret;								\
+#define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
+({										\
+	int __ret;								\
+	DEFINE_WAIT(__wait);							\
+	if (exclusive)								\
+		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
+	do {									\
+		__ret = fn(&(wq), &__wait);					\
+		if (__ret)							\
+			break;							\
+	} while (!(condition));							\
+	__remove_wait_queue(&(wq), &__wait);					\
+	__set_current_state(TASK_RUNNING);					\
+	__ret;									\
 })
 
 
@@ -663,8 +636,8 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
  * The function will return -ERESTARTSYS if it was interrupted by a
  * signal and 0 if @condition evaluated to true.
  */
-#define wait_event_interruptible_locked(wq, condition)			\
-	((condition)							\
+#define wait_event_interruptible_locked(wq, condition)				\
+	((condition)								\
 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
 
 /**
@@ -690,8 +663,8 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
  * The function will return -ERESTARTSYS if it was interrupted by a
  * signal and 0 if @condition evaluated to true.
  */
-#define wait_event_interruptible_locked_irq(wq, condition)		\
-	((condition)							\
+#define wait_event_interruptible_locked_irq(wq, condition)			\
+	((condition)								\
 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
 
 /**
@@ -721,8 +694,8 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
  * The function will return -ERESTARTSYS if it was interrupted by a
  * signal and 0 if @condition evaluated to true.
  */
-#define wait_event_interruptible_exclusive_locked(wq, condition)	\
-	((condition)							\
+#define wait_event_interruptible_exclusive_locked(wq, condition)		\
+	((condition)								\
 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
 
 /**
@@ -752,12 +725,12 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
  * The function will return -ERESTARTSYS if it was interrupted by a
  * signal and 0 if @condition evaluated to true.
  */
-#define wait_event_interruptible_exclusive_locked_irq(wq, condition)	\
-	((condition)							\
+#define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
+	((condition)								\
 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
 
 
-#define __wait_event_killable(wq, condition)				\
+#define __wait_event_killable(wq, condition)					\
 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
 
 /**
@@ -775,21 +748,21 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
  * The function will return -ERESTARTSYS if it was interrupted by a
  * signal and 0 if @condition evaluated to true.
  */
-#define wait_event_killable(wq, condition)				\
-({									\
-	int __ret = 0;							\
-	might_sleep();							\
-	if (!(condition))						\
-		__ret = __wait_event_killable(wq, condition);		\
-	__ret;								\
+#define wait_event_killable(wq_head, condition)					\
+({										\
+	int __ret = 0;								\
+	might_sleep();								\
+	if (!(condition))							\
+		__ret = __wait_event_killable(wq_head, condition);		\
+	__ret;									\
 })
 
 
-#define __wait_event_lock_irq(wq, condition, lock, cmd)			\
-	(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
-			    spin_unlock_irq(&lock);			\
-			    cmd;					\
-			    schedule();					\
+#define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
+	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
+			    spin_unlock_irq(&lock);				\
+			    cmd;						\
+			    schedule();						\
 			    spin_lock_irq(&lock))
 
 /**
@@ -797,7 +770,7 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
  *			     condition is checked under the lock. This
  *			     is expected to be called with the lock
  *			     taken.
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @lock: a locked spinlock_t, which will be released before cmd
  *	  and schedule() and reacquired afterwards.
@@ -806,7 +779,7 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
  *
  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
+ * the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -815,11 +788,11 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
  * dropped before invoking the cmd and going to sleep and is reacquired
  * afterwards.
  */
-#define wait_event_lock_irq_cmd(wq, condition, lock, cmd)		\
-do {									\
-	if (condition)							\
-		break;							\
-	__wait_event_lock_irq(wq, condition, lock, cmd);		\
+#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
+do {										\
+	if (condition)								\
+		break;								\
+	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
 } while (0)
 
 /**
@@ -827,14 +800,14 @@ do {									\
  *			 condition is checked under the lock. This
  *			 is expected to be called with the lock
  *			 taken.
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @lock: a locked spinlock_t, which will be released before schedule()
  *	  and reacquired afterwards.
  *
  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
+ * the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -842,26 +815,26 @@ do {									\
  * This is supposed to be called while holding the lock. The lock is
  * dropped before going to sleep and is reacquired afterwards.
  */
-#define wait_event_lock_irq(wq, condition, lock)			\
-do {									\
-	if (condition)							\
-		break;							\
-	__wait_event_lock_irq(wq, condition, lock, );			\
+#define wait_event_lock_irq(wq_head, condition, lock)				\
+do {										\
+	if (condition)								\
+		break;								\
+	__wait_event_lock_irq(wq_head, condition, lock, );			\
 } while (0)
 
 
-#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)	\
-	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,		\
-		      spin_unlock_irq(&lock);				\
-		      cmd;						\
-		      schedule();					\
+#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
+	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
+		      spin_unlock_irq(&lock);					\
+		      cmd;							\
+		      schedule();						\
 		      spin_lock_irq(&lock))
 
 /**
  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  *		The condition is checked under the lock. This is expected to
  *		be called with the lock taken.
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @lock: a locked spinlock_t, which will be released before cmd and
  *	  schedule() and reacquired afterwards.
@@ -870,7 +843,7 @@ do {									\
  *
  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  * @condition evaluates to true or a signal is received. The @condition is
- * checked each time the waitqueue @wq is woken up.
+ * checked each time the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -882,27 +855,27 @@ do {									\
  * The macro will return -ERESTARTSYS if it was interrupted by a signal
  * and 0 if @condition evaluated to true.
  */
-#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd)	\
-({									\
-	int __ret = 0;							\
-	if (!(condition))						\
-		__ret = __wait_event_interruptible_lock_irq(wq,		\
-						condition, lock, cmd);	\
-	__ret;								\
+#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
+({										\
+	int __ret = 0;								\
+	if (!(condition))							\
+		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
+						condition, lock, cmd);		\
+	__ret;									\
 })
 
 /**
  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  *		The condition is checked under the lock. This is expected
  *		to be called with the lock taken.
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @lock: a locked spinlock_t, which will be released before schedule()
  *	  and reacquired afterwards.
  *
  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  * @condition evaluates to true or signal is received. The @condition is
- * checked each time the waitqueue @wq is woken up.
+ * checked each time the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -913,28 +886,28 @@ do {									\
  * The macro will return -ERESTARTSYS if it was interrupted by a signal
  * and 0 if @condition evaluated to true.
  */
-#define wait_event_interruptible_lock_irq(wq, condition, lock)		\
-({									\
-	int __ret = 0;							\
-	if (!(condition))						\
-		__ret = __wait_event_interruptible_lock_irq(wq,		\
-						condition, lock,);	\
-	__ret;								\
+#define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
+({										\
+	int __ret = 0;								\
+	if (!(condition))							\
+		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
+						condition, lock,);		\
+	__ret;									\
 })
 
-#define __wait_event_interruptible_lock_irq_timeout(wq, condition,	\
-						    lock, timeout)	\
-	___wait_event(wq, ___wait_cond_timeout(condition),		\
-		      TASK_INTERRUPTIBLE, 0, timeout,			\
-		      spin_unlock_irq(&lock);				\
-		      __ret = schedule_timeout(__ret);			\
+#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition,		\
+						    lock, timeout)		\
+	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
+		      TASK_INTERRUPTIBLE, 0, timeout,				\
+		      spin_unlock_irq(&lock);					\
+		      __ret = schedule_timeout(__ret);				\
 		      spin_lock_irq(&lock));
 
 /**
  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  *		true or a timeout elapses. The condition is checked under
  *		the lock. This is expected to be called with the lock taken.
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
  * @condition: a C expression for the event to wait for
  * @lock: a locked spinlock_t, which will be released before schedule()
  *	  and reacquired afterwards.
@@ -942,7 +915,7 @@ do {									\
  *
  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  * @condition evaluates to true or signal is received. The @condition is
- * checked each time the waitqueue @wq is woken up.
+ * checked each time the waitqueue @wq_head is woken up.
  *
  * wake_up() has to be called after changing any variable that could
  * change the result of the wait condition.
@@ -954,263 +927,42 @@ do {									\
  * was interrupted by a signal, and the remaining jiffies otherwise
  * if the condition evaluated to true before the timeout elapsed.
  */
-#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,	\
-						  timeout)		\
-({									\
-	long __ret = timeout;						\
-	if (!___wait_cond_timeout(condition))				\
-		__ret = __wait_event_interruptible_lock_irq_timeout(	\
-					wq, condition, lock, timeout);	\
-	__ret;								\
+#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
+						  timeout)			\
+({										\
+	long __ret = timeout;							\
+	if (!___wait_cond_timeout(condition))					\
+		__ret = __wait_event_interruptible_lock_irq_timeout(		\
+					wq_head, condition, lock, timeout);	\
+	__ret;									\
 })
 
 /*
  * Waitqueues which are removed from the waitqueue_head at wakeup time
  */
-void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
-void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
-long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
-void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
-long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
-int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
-int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
+void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
+int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
+int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
 
-#define DEFINE_WAIT_FUNC(name, function)				\
-	wait_queue_t name = {						\
-		.private	= current,				\
-		.func		= function,				\
-		.task_list	= LIST_HEAD_INIT((name).task_list),	\
+#define DEFINE_WAIT_FUNC(name, function)					\
+	struct wait_queue_entry name = {					\
+		.private	= current,					\
+		.func		= function,					\
+		.entry		= LIST_HEAD_INIT((name).entry),			\
 	}
 
 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
 
-#define DEFINE_WAIT_BIT(name, word, bit)				\
-	struct wait_bit_queue name = {					\
-		.key = __WAIT_BIT_KEY_INITIALIZER(word, bit),		\
-		.wait	= {						\
-			.private	= current,			\
-			.func		= wake_bit_function,		\
-			.task_list	=				\
-				LIST_HEAD_INIT((name).wait.task_list),	\
-		},							\
-	}
-
-#define init_wait(wait)							\
-	do {								\
-		(wait)->private = current;				\
-		(wait)->func = autoremove_wake_function;		\
-		INIT_LIST_HEAD(&(wait)->task_list);			\
-		(wait)->flags = 0;					\
+#define init_wait(wait)								\
+	do {									\
+		(wait)->private = current;					\
+		(wait)->func = autoremove_wake_function;			\
+		INIT_LIST_HEAD(&(wait)->entry);					\
+		(wait)->flags = 0;						\
 	} while (0)
 
-
-extern int bit_wait(struct wait_bit_key *, int);
-extern int bit_wait_io(struct wait_bit_key *, int);
-extern int bit_wait_timeout(struct wait_bit_key *, int);
-extern int bit_wait_io_timeout(struct wait_bit_key *, int);
-
-/**
- * wait_on_bit - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- *
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit.
- * For instance, if one were to have waiters on a bitflag, one would
- * call wait_on_bit() in threads waiting for the bit to clear.
- * One uses wait_on_bit() where one is waiting for the bit to clear,
- * but has no intention of setting it.
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
- */
-static inline int
-wait_on_bit(unsigned long *word, int bit, unsigned mode)
-{
-	might_sleep();
-	if (!test_bit(bit, word))
-		return 0;
-	return out_of_line_wait_on_bit(word, bit,
-				       bit_wait,
-				       mode);
-}
-
-/**
- * wait_on_bit_io - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared.  This is similar to wait_on_bit(), but calls
- * io_schedule() instead of schedule() for the actual waiting.
- *
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
- */
-static inline int
-wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
-{
-	might_sleep();
-	if (!test_bit(bit, word))
-		return 0;
-	return out_of_line_wait_on_bit(word, bit,
-				       bit_wait_io,
-				       mode);
-}
-
-/**
- * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- * @timeout: timeout, in jiffies
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared. This is similar to wait_on_bit(), except also takes a
- * timeout parameter.
- *
- * Returned value will be zero if the bit was cleared before the
- * @timeout elapsed, or non-zero if the @timeout elapsed or process
- * received a signal and the mode permitted wakeup on that signal.
- */
-static inline int
-wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
-		    unsigned long timeout)
-{
-	might_sleep();
-	if (!test_bit(bit, word))
-		return 0;
-	return out_of_line_wait_on_bit_timeout(word, bit,
-					       bit_wait_timeout,
-					       mode, timeout);
-}
-
-/**
- * wait_on_bit_action - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @action: the function used to sleep, which may take special actions
- * @mode: the task state to sleep in
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared, and allow the waiting action to be specified.
- * This is like wait_on_bit() but allows fine control of how the waiting
- * is done.
- *
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
- */
-static inline int
-wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
-		   unsigned mode)
-{
-	might_sleep();
-	if (!test_bit(bit, word))
-		return 0;
-	return out_of_line_wait_on_bit(word, bit, action, mode);
-}
-
-/**
- * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- *
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit
- * when one intends to set it, for instance, trying to lock bitflags.
- * For instance, if one were to have waiters trying to set bitflag
- * and waiting for it to clear before setting it, one would call
- * wait_on_bit() in threads waiting to be able to set the bit.
- * One uses wait_on_bit_lock() where one is waiting for the bit to
- * clear with the intention of setting it, and when done, clearing it.
- *
- * Returns zero if the bit was (eventually) found to be clear and was
- * set.  Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
- */
-static inline int
-wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
-{
-	might_sleep();
-	if (!test_and_set_bit(bit, word))
-		return 0;
-	return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
-}
-
-/**
- * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared and then to atomically set it.  This is similar
- * to wait_on_bit(), but calls io_schedule() instead of schedule()
- * for the actual waiting.
- *
- * Returns zero if the bit was (eventually) found to be clear and was
- * set.  Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
- */
-static inline int
-wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
-{
-	might_sleep();
-	if (!test_and_set_bit(bit, word))
-		return 0;
-	return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
-}
-
-/**
- * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @action: the function used to sleep, which may take special actions
- * @mode: the task state to sleep in
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared and then to set it, and allow the waiting action
- * to be specified.
- * This is like wait_on_bit() but allows fine control of how the waiting
- * is done.
- *
- * Returns zero if the bit was (eventually) found to be clear and was
- * set.  Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
- */
-static inline int
-wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
-			unsigned mode)
-{
-	might_sleep();
-	if (!test_and_set_bit(bit, word))
-		return 0;
-	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
-}
-
-/**
- * wait_on_atomic_t - Wait for an atomic_t to become 0
- * @val: The atomic value being waited on, a kernel virtual address
- * @action: the function used to sleep, which may take special actions
- * @mode: the task state to sleep in
- *
- * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
- * the purpose of getting a waitqueue, but we set the key to a bit number
- * outside of the target 'word'.
- */
-static inline
-int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
-{
-	might_sleep();
-	if (atomic_read(val) == 0)
-		return 0;
-	return out_of_line_wait_on_atomic_t(val, action, mode);
-}
-
 #endif /* _LINUX_WAIT_H */
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
new file mode 100644
index 0000000..12b2666
--- /dev/null
+++ b/include/linux/wait_bit.h
@@ -0,0 +1,261 @@
+#ifndef _LINUX_WAIT_BIT_H
+#define _LINUX_WAIT_BIT_H
+
+/*
+ * Linux wait-bit related types and methods:
+ */
+#include <linux/wait.h>
+
+struct wait_bit_key {
+	void			*flags;
+	int			bit_nr;
+#define WAIT_ATOMIC_T_BIT_NR	-1
+	unsigned long		timeout;
+};
+
+struct wait_bit_queue_entry {
+	struct wait_bit_key	key;
+	struct wait_queue_entry	wq_entry;
+};
+
+#define __WAIT_BIT_KEY_INITIALIZER(word, bit)					\
+	{ .flags = word, .bit_nr = bit, }
+
+#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)					\
+	{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
+
+typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
+void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
+int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
+int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
+void wake_up_bit(void *word, int bit);
+void wake_up_atomic_t(atomic_t *p);
+int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
+int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
+int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
+int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode);
+struct wait_queue_head *bit_waitqueue(void *word, int bit);
+extern void __init wait_bit_init(void);
+
+int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
+
+#define DEFINE_WAIT_BIT(name, word, bit)					\
+	struct wait_bit_queue_entry name = {					\
+		.key = __WAIT_BIT_KEY_INITIALIZER(word, bit),			\
+		.wq_entry = {							\
+			.private	= current,				\
+			.func		= wake_bit_function,			\
+			.entry		=					\
+				LIST_HEAD_INIT((name).wq_entry.entry),		\
+		},								\
+	}
+
+extern int bit_wait(struct wait_bit_key *key, int bit);
+extern int bit_wait_io(struct wait_bit_key *key, int bit);
+extern int bit_wait_timeout(struct wait_bit_key *key, int bit);
+extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit);
+
+/**
+ * wait_on_bit - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit.
+ * For instance, if one were to have waiters on a bitflag, one would
+ * call wait_on_bit() in threads waiting for the bit to clear.
+ * One uses wait_on_bit() where one is waiting for the bit to clear,
+ * but has no intention of setting it.
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
+ */
+static inline int
+wait_on_bit(unsigned long *word, int bit, unsigned mode)
+{
+	might_sleep();
+	if (!test_bit(bit, word))
+		return 0;
+	return out_of_line_wait_on_bit(word, bit,
+				       bit_wait,
+				       mode);
+}
+
+/**
+ * wait_on_bit_io - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared.  This is similar to wait_on_bit(), but calls
+ * io_schedule() instead of schedule() for the actual waiting.
+ *
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
+ */
+static inline int
+wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
+{
+	might_sleep();
+	if (!test_bit(bit, word))
+		return 0;
+	return out_of_line_wait_on_bit(word, bit,
+				       bit_wait_io,
+				       mode);
+}
+
+/**
+ * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ * @timeout: timeout, in jiffies
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared. This is similar to wait_on_bit(), except also takes a
+ * timeout parameter.
+ *
+ * Returned value will be zero if the bit was cleared before the
+ * @timeout elapsed, or non-zero if the @timeout elapsed or process
+ * received a signal and the mode permitted wakeup on that signal.
+ */
+static inline int
+wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
+		    unsigned long timeout)
+{
+	might_sleep();
+	if (!test_bit(bit, word))
+		return 0;
+	return out_of_line_wait_on_bit_timeout(word, bit,
+					       bit_wait_timeout,
+					       mode, timeout);
+}
+
+/**
+ * wait_on_bit_action - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared, and allow the waiting action to be specified.
+ * This is like wait_on_bit() but allows fine control of how the waiting
+ * is done.
+ *
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
+ */
+static inline int
+wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
+		   unsigned mode)
+{
+	might_sleep();
+	if (!test_bit(bit, word))
+		return 0;
+	return out_of_line_wait_on_bit(word, bit, action, mode);
+}
+
+/**
+ * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit
+ * when one intends to set it, for instance, trying to lock bitflags.
+ * For instance, if one were to have waiters trying to set bitflag
+ * and waiting for it to clear before setting it, one would call
+ * wait_on_bit() in threads waiting to be able to set the bit.
+ * One uses wait_on_bit_lock() where one is waiting for the bit to
+ * clear with the intention of setting it, and when done, clearing it.
+ *
+ * Returns zero if the bit was (eventually) found to be clear and was
+ * set.  Returns non-zero if a signal was delivered to the process and
+ * the @mode allows that signal to wake the process.
+ */
+static inline int
+wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
+{
+	might_sleep();
+	if (!test_and_set_bit(bit, word))
+		return 0;
+	return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
+}
+
+/**
+ * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared and then to atomically set it.  This is similar
+ * to wait_on_bit(), but calls io_schedule() instead of schedule()
+ * for the actual waiting.
+ *
+ * Returns zero if the bit was (eventually) found to be clear and was
+ * set.  Returns non-zero if a signal was delivered to the process and
+ * the @mode allows that signal to wake the process.
+ */
+static inline int
+wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
+{
+	might_sleep();
+	if (!test_and_set_bit(bit, word))
+		return 0;
+	return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
+}
+
+/**
+ * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared and then to set it, and allow the waiting action
+ * to be specified.
+ * This is like wait_on_bit() but allows fine control of how the waiting
+ * is done.
+ *
+ * Returns zero if the bit was (eventually) found to be clear and was
+ * set.  Returns non-zero if a signal was delivered to the process and
+ * the @mode allows that signal to wake the process.
+ */
+static inline int
+wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
+			unsigned mode)
+{
+	might_sleep();
+	if (!test_and_set_bit(bit, word))
+		return 0;
+	return out_of_line_wait_on_bit_lock(word, bit, action, mode);
+}
+
+/**
+ * wait_on_atomic_t - Wait for an atomic_t to become 0
+ * @val: The atomic value being waited on, a kernel virtual address
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
+ * the purpose of getting a waitqueue, but we set the key to a bit number
+ * outside of the target 'word'.
+ */
+static inline
+int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
+{
+	might_sleep();
+	if (atomic_read(val) == 0)
+		return 0;
+	return out_of_line_wait_on_atomic_t(val, action, mode);
+}
+
+#endif /* _LINUX_WAIT_BIT_H */
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index eb50ce5..298f996 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -29,7 +29,7 @@ struct edid;
 struct cec_adapter;
 struct cec_notifier;
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
 
 /**
  * cec_notifier_get - find or create a new cec_notifier for the given device.
@@ -106,6 +106,16 @@ static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
 {
 }
 
+static inline void cec_notifier_register(struct cec_notifier *n,
+			 struct cec_adapter *adap,
+			 void (*callback)(struct cec_adapter *adap, u16 pa))
+{
+}
+
+static inline void cec_notifier_unregister(struct cec_notifier *n)
+{
+}
+
 #endif
 
 #endif
diff --git a/include/media/cec.h b/include/media/cec.h
index b8eb895..201f060 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -173,7 +173,7 @@ struct cec_adapter {
 	bool passthrough;
 	struct cec_log_addrs log_addrs;
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 	struct cec_notifier *notifier;
 #endif
 
@@ -206,7 +206,7 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
 #define cec_phys_addr_exp(pa) \
 	((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
 
-#if IS_ENABLED(CONFIG_CEC_CORE)
+#if IS_REACHABLE(CONFIG_CEC_CORE)
 struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
 		void *priv, const char *name, u32 caps, u8 available_las);
 int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
@@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
  */
 int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 void cec_register_cec_notifier(struct cec_adapter *adap,
 			       struct cec_notifier *notifier);
 #endif
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index fd60ecc..75e612a 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -62,7 +62,7 @@ struct unix_sock {
 #define UNIX_GC_CANDIDATE	0
 #define UNIX_GC_MAYBE_CYCLE	1
 	struct socket_wq	peer_wq;
-	wait_queue_t		peer_wake;
+	wait_queue_entry_t		peer_wake;
 };
 
 static inline struct unix_sock *unix_sk(const struct sock *sk)
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index dbf0abb..3e505bb 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
  */
 extern const struct proto_ops inet6_stream_ops;
 extern const struct proto_ops inet6_dgram_ops;
+extern const struct proto_ops inet6_sockraw_ops;
 
 struct group_source_req;
 struct group_filter;
diff --git a/include/net/sock.h b/include/net/sock.h
index f33e3d1..f97da14 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1953,11 +1953,10 @@ static inline bool sk_has_allocations(const struct sock *sk)
  * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
  * barrier call. They were added due to the race found within the tcp code.
  *
- * Consider following tcp code paths:
+ * Consider following tcp code paths::
  *
- * CPU1                  CPU2
- *
- * sys_select            receive packet
+ *   CPU1                CPU2
+ *   sys_select          receive packet
  *   ...                 ...
  *   __add_wait_queue    update tp->rcv_nxt
  *   ...                 ...
@@ -2264,7 +2263,7 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
  * @tsflags:	timestamping flags to use
  * @tx_flags:	completed with instructions for time stamping
  *
- * Note : callers should take care of initial *tx_flags value (usually 0)
+ * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
  */
 static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
 				     __u8 *tx_flags)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38a7427..be6223c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -924,7 +924,7 @@ struct tcp_congestion_ops {
 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
 	/* call when ack arrives (optional) */
 	void (*in_ack_event)(struct sock *sk, u32 flags);
-	/* new value of cwnd after loss (optional) */
+	/* new value of cwnd after loss (required) */
 	u32  (*undo_cwnd)(struct sock *sk);
 	/* hook for packet ack accounting (optional) */
 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
diff --git a/include/net/wext.h b/include/net/wext.h
index 3459119..454ff76 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -6,7 +6,7 @@
 struct net;
 
 #ifdef CONFIG_WEXT_CORE
-int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
 		      void __user *arg);
 int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
 			     unsigned long arg);
@@ -14,7 +14,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
 struct iw_statistics *get_wireless_stats(struct net_device *dev);
 int call_commit_handler(struct net_device *dev);
 #else
-static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
 				    void __user *arg)
 {
 	return -EINVAL;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 7e7e2b0..62f5a25 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1850,8 +1850,9 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
 }
 #endif
 
-#ifdef CONFIG_XFRM_OFFLOAD
 void __net_init xfrm_dev_init(void);
+
+#ifdef CONFIG_XFRM_OFFLOAD
 int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 		       struct xfrm_user_offload *xuo);
@@ -1877,10 +1878,6 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
 	}
 }
 #else
-static inline void __net_init xfrm_dev_init(void)
-{
-}
-
 static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
 {
 	return 0;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index d67b11b..2f4f176 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -575,6 +575,10 @@ struct ib_mad_agent {
 	u32			flags;
 	u8			port_num;
 	u8			rmpp_version;
+	void			*security;
+	bool			smp_allowed;
+	bool			lsm_nb_reg;
+	struct notifier_block   lsm_nb;
 };
 
 /**
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index f5f70e3..355b81f 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -158,7 +158,6 @@ enum sa_path_rec_type {
 };
 
 struct sa_path_rec_ib {
-	__be64       service_id;
 	__be16       dlid;
 	__be16       slid;
 	u8           raw_traffic;
@@ -174,7 +173,6 @@ struct sa_path_rec_roce {
 };
 
 struct sa_path_rec_opa {
-	__be64       service_id;
 	__be32       dlid;
 	__be32       slid;
 	u8           raw_traffic;
@@ -189,6 +187,7 @@ struct sa_path_rec_opa {
 struct sa_path_rec {
 	union ib_gid dgid;
 	union ib_gid sgid;
+	__be64       service_id;
 	/* reserved */
 	__be32       flow_label;
 	u8           hop_limit;
@@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib,
 		ib->ib.dlid	= htons(ntohl(opa->opa.dlid));
 		ib->ib.slid	= htons(ntohl(opa->opa.slid));
 	}
-	ib->ib.service_id	= opa->opa.service_id;
+	ib->service_id		= opa->service_id;
 	ib->ib.raw_traffic	= opa->opa.raw_traffic;
 }
 
@@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa,
 	}
 	opa->opa.slid		= slid;
 	opa->opa.dlid		= dlid;
-	opa->opa.service_id	= ib->ib.service_id;
+	opa->service_id		= ib->service_id;
 	opa->opa.raw_traffic	= ib->ib.raw_traffic;
 }
 
@@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
 		(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
 }
 
-static inline void sa_path_set_service_id(struct sa_path_rec *rec,
-					  __be64 service_id)
-{
-	if (rec->rec_type == SA_PATH_REC_TYPE_IB)
-		rec->ib.service_id = service_id;
-	else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
-		rec->opa.service_id = service_id;
-}
-
 static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
 {
 	if (rec->rec_type == SA_PATH_REC_TYPE_IB)
@@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
 		rec->opa.raw_traffic = raw_traffic;
 }
 
-static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec)
-{
-	if (rec->rec_type == SA_PATH_REC_TYPE_IB)
-		return rec->ib.service_id;
-	else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
-		return rec->opa.service_id;
-	return 0;
-}
-
 static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
 {
 	if (rec->rec_type == SA_PATH_REC_TYPE_IB)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index ba8314e..0e480a5 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1614,6 +1614,45 @@ struct ib_rwq_ind_table_init_attr {
 	struct ib_wq	**ind_tbl;
 };
 
+enum port_pkey_state {
+	IB_PORT_PKEY_NOT_VALID = 0,
+	IB_PORT_PKEY_VALID = 1,
+	IB_PORT_PKEY_LISTED = 2,
+};
+
+struct ib_qp_security;
+
+struct ib_port_pkey {
+	enum port_pkey_state	state;
+	u16			pkey_index;
+	u8			port_num;
+	struct list_head	qp_list;
+	struct list_head	to_error_list;
+	struct ib_qp_security  *sec;
+};
+
+struct ib_ports_pkeys {
+	struct ib_port_pkey	main;
+	struct ib_port_pkey	alt;
+};
+
+struct ib_qp_security {
+	struct ib_qp	       *qp;
+	struct ib_device       *dev;
+	/* Hold this mutex when changing port and pkey settings. */
+	struct mutex		mutex;
+	struct ib_ports_pkeys  *ports_pkeys;
+	/* A list of all open shared QP handles.  Required to enforce security
+	 * properly for all users of a shared QP.
+	 */
+	struct list_head        shared_qp_list;
+	void                   *security;
+	bool			destroying;
+	atomic_t		error_list_count;
+	struct completion	error_complete;
+	int			error_comps_pending;
+};
+
 /*
  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
@@ -1643,6 +1682,7 @@ struct ib_qp {
 	u32			max_read_sge;
 	enum ib_qp_type		qp_type;
 	struct ib_rwq_ind_table *rwq_ind_tbl;
+	struct ib_qp_security  *qp_sec;
 };
 
 struct ib_mr {
@@ -1891,6 +1931,7 @@ enum ib_mad_result {
 };
 
 struct ib_port_cache {
+	u64		      subnet_prefix;
 	struct ib_pkey_cache  *pkey;
 	struct ib_gid_table   *gid;
 	u8                     lmc;
@@ -1940,6 +1981,12 @@ struct rdma_netdev {
 			    union ib_gid *gid, u16 mlid);
 };
 
+struct ib_port_pkey_list {
+	/* Lock to hold while modifying the list. */
+	spinlock_t                    list_lock;
+	struct list_head              pkey_list;
+};
+
 struct ib_device {
 	/* Do not access @dma_device directly from ULP nor from HW drivers. */
 	struct device                *dma_device;
@@ -1963,6 +2010,8 @@ struct ib_device {
 
 	int			      num_comp_vectors;
 
+	struct ib_port_pkey_list     *port_pkey_list;
+
 	struct iw_cm_verbs	     *iwcm;
 
 	/**
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 5852661..348c102 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -10,9 +10,6 @@ struct ibnl_client_cbs {
 	struct module *module;
 };
 
-int ibnl_init(void);
-void ibnl_cleanup(void);
-
 /**
  * Add a a client to the list of IB netlink exporters.
  * @index: Index of the added client
@@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
 int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
 			unsigned int group, gfp_t flags);
 
-/**
- * Check if there are any listeners to the netlink group
- * @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
- */
-int ibnl_chk_listeners(unsigned int group);
-
 #endif /* _RDMA_NETLINK_H */
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index a09cca8..a29d308 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -157,7 +157,7 @@ struct osd_request {
 
 	osd_req_done_fn *async_done;
 	void *async_private;
-	int async_error;
+	blk_status_t async_error;
 	int req_errors;
 };
 
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index b379f93..da9bf2b 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -166,6 +166,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
 extern void scsi_kunmap_atomic_sg(void *virt);
 
 extern int scsi_init_io(struct scsi_cmnd *cmd);
+extern void scsi_initialize_rq(struct request *rq);
 
 extern int scsi_dma_map(struct scsi_cmnd *cmd);
 extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h
index f0c76f9..e0afa44 100644
--- a/include/scsi/scsi_request.h
+++ b/include/scsi/scsi_request.h
@@ -27,6 +27,6 @@ static inline void scsi_req_free_cmd(struct scsi_request *req)
 		kfree(req->cmd);
 }
 
-void scsi_req_init(struct request *);
+void scsi_req_init(struct scsi_request *req);
 
 #endif /* _SCSI_SCSI_REQUEST_H */
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index 0aaef59..98d8d38 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -81,13 +81,18 @@
  * Provides the MRQ number for the MRQ message: #mrq. The remainder of
  * the MRQ message is a payload (immediately following the
  * mrq_request) whose format depends on mrq.
- *
- * @todo document the flags
  */
 struct mrq_request {
 	/** @brief MRQ number of the request */
 	uint32_t mrq;
-	/** @brief flags for the request */
+	/**
+	 * @brief flags providing follow up directions to the receiver
+	 *
+	 * | Bit | Description                                |
+	 * |-----|--------------------------------------------|
+	 * | 1   | ring the sender's doorbell when responding |
+	 * | 0   | should be 1                                |
+	 */
 	uint32_t flags;
 } __ABI_PACKED;
 
@@ -99,13 +104,11 @@ struct mrq_request {
  *  remainder of the MRQ response is a payload (immediately following
  *  the mrq_response) whose format depends on the associated
  *  mrq_request::mrq
- *
- * @todo document the flags
  */
 struct mrq_response {
 	/** @brief error code for the MRQ request itself */
 	int32_t err;
-	/** @brief flags for the response */
+	/** @brief reserved for future use */
 	uint32_t flags;
 } __ABI_PACKED;
 
@@ -147,6 +150,8 @@ struct mrq_response {
 #define MRQ_ABI_RATCHET		29
 #define MRQ_EMC_DVFS_LATENCY	31
 #define MRQ_TRACE_ITER		64
+#define MRQ_RINGBUF_CONSOLE	65
+#define MRQ_PG			66
 
 /** @} */
 
@@ -155,7 +160,7 @@ struct mrq_response {
  * @brief Maximum MRQ code to be sent by CPU software to
  * BPMP. Subject to change in future
  */
-#define MAX_CPU_MRQ_ID		64
+#define MAX_CPU_MRQ_ID		66
 
 /**
  * @addtogroup MRQ_Payloads Message Payloads
@@ -175,6 +180,7 @@ struct mrq_response {
  *   @defgroup Vhint CPU Voltage hint
  *   @defgroup MRQ_Deprecated Deprecated MRQ messages
  *   @defgroup EMC
+ *   @defgroup RingbufConsole
  * @}
  */
 
@@ -637,7 +643,7 @@ struct mrq_debugfs_response {
  * * Initiators: Any
  * * Targets: BPMP
  * * Request Payload: @ref mrq_reset_request
- * * Response Payload: N/A
+ * * Response Payload: @ref mrq_reset_response
  */
 
 /**
@@ -647,6 +653,7 @@ enum mrq_reset_commands {
 	CMD_RESET_ASSERT = 1,
 	CMD_RESET_DEASSERT = 2,
 	CMD_RESET_MODULE = 3,
+	CMD_RESET_GET_MAX_ID = 4,
 	CMD_RESET_MAX, /* not part of ABI and subject to change */
 };
 
@@ -665,6 +672,38 @@ struct mrq_reset_request {
 } __ABI_PACKED;
 
 /**
+ * @ingroup Reset
+ * @brief Response for MRQ_RESET sub-command CMD_RESET_GET_MAX_ID. When
+ * this sub-command is not supported, firmware will return -BPMP_EBADCMD
+ * in mrq_response::err.
+ */
+struct cmd_reset_get_max_id_response {
+	/** @brief max reset id */
+	uint32_t max_id;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Reset
+ * @brief Response with MRQ_RESET
+ *
+ * Each sub-command supported by @ref mrq_reset_request may return
+ * sub-command-specific data. Some do and some do not as indicated
+ * in the following table
+ *
+ * | sub-command          | payload          |
+ * |----------------------|------------------|
+ * | CMD_RESET_ASSERT     | -                |
+ * | CMD_RESET_DEASSERT   | -                |
+ * | CMD_RESET_MODULE     | -                |
+ * | CMD_RESET_GET_MAX_ID | reset_get_max_id |
+ */
+struct mrq_reset_response {
+	union {
+		struct cmd_reset_get_max_id_response reset_get_max_id;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/**
  * @ingroup MRQ_Codes
  * @def MRQ_I2C
  * @brief issue an i2c transaction
@@ -812,6 +851,17 @@ enum {
 };
 /** @} */
 
+/**
+ * @name MRQ_CLK properties
+ * Flag bits for cmd_clk_properties_response::flags and
+ * cmd_clk_get_all_info_response::flags
+ * @{
+ */
+#define BPMP_CLK_HAS_MUX	(1 << 0)
+#define BPMP_CLK_HAS_SET_RATE	(1 << 1)
+#define BPMP_CLK_IS_ROOT	(1 << 2)
+/** @} */
+
 #define MRQ_CLK_NAME_MAXLEN	40
 #define MRQ_CLK_MAX_PARENTS	16
 
@@ -1010,7 +1060,7 @@ struct mrq_clk_response {
  *
  * * Platforms: All
  * * Initiators: Any
- * * Targets: Any
+ * * Targets: Any except DMCE
  * * Request Payload: @ref mrq_query_abi_request
  * * Response Payload: @ref mrq_query_abi_response
  */
@@ -1030,6 +1080,9 @@ struct mrq_query_abi_request {
 /**
  * @ingroup ABI_info
  * @brief response to MRQ_QUERY_ABI
+ *
+ * @note mrq_response::err of 0 indicates that the query was
+ * successful, not that the MRQ itself is supported!
  */
 struct mrq_query_abi_response {
 	/** @brief 0 if queried MRQ is supported. Else, -#BPMP_ENODEV */
@@ -1080,7 +1133,9 @@ struct mrq_pg_read_state_response {
 /**
  * @ingroup MRQ_Codes
  * @def MRQ_PG_UPDATE_STATE
- * @brief modify the power-gating state of a partition
+ * @brief modify the power-gating state of a partition. In contrast to
+ * MRQ_PG calls, the operations that change state (on/off) of power
+ * partition are reference counted.
  *
  * * Platforms: T186
  * * Initiators: Any
@@ -1126,6 +1181,171 @@ struct mrq_pg_update_state_request {
 
 /**
  * @ingroup MRQ_Codes
+ * @def MRQ_PG
+ * @brief Control power-gating state of a partition. In contrast to
+ * MRQ_PG_UPDATE_STATE, operations that change the power partition
+ * state are NOT reference counted
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pg_request
+ * * Response Payload: @ref mrq_pg_response
+ * @addtogroup Powergating
+ * @{
+ */
+
+/**
+ * @name MRQ_PG sub-commands
+ * @{
+ */
+enum mrq_pg_cmd {
+	/**
+	 * @brief Check whether the BPMP driver supports the specified
+	 * request type
+	 *
+	 * mrq_response::err is 0 if the specified request is
+	 * supported and -#BPMP_ENODEV otherwise.
+	 */
+	CMD_PG_QUERY_ABI = 0,
+
+	/**
+	 * @brief Set the current state of specified power domain. The
+	 * possible values for power domains are defined in enum
+	 * pg_states
+	 *
+	 * mrq_response:err is
+	 * 0: Success
+	 * -#BPMP_EINVAL: Invalid request parameters
+	 */
+	CMD_PG_SET_STATE = 1,
+
+	/**
+	 * @brief Get the current state of specified power domain. The
+	 * possible values for power domains are defined in enum
+	 * pg_states
+	 *
+	 * mrq_response:err is
+	 * 0: Success
+	 * -#BPMP_EINVAL: Invalid request parameters
+	 */
+	CMD_PG_GET_STATE = 2,
+
+	/**
+	 * @brief get the name string of specified power domain id.
+	 *
+	 * mrq_response:err is
+	 * 0: Success
+	 * -#BPMP_EINVAL: Invalid request parameters
+	 */
+	CMD_PG_GET_NAME = 3,
+
+
+	/**
+	 * @brief get the highest power domain id in the system. Not
+	 * all IDs between 0 and max_id are valid IDs.
+	 *
+	 * mrq_response:err is
+	 * 0: Success
+	 * -#BPMP_EINVAL: Invalid request parameters
+	 */
+	CMD_PG_GET_MAX_ID = 4,
+};
+/** @} */
+
+#define MRQ_PG_NAME_MAXLEN	40
+
+/**
+ * @brief possible power domain states in
+ * cmd_pg_set_state_request:state and cmd_pg_get_state_response:state.
+ *  PG_STATE_OFF: power domain is OFF
+ *  PG_STATE_ON: power domain is ON
+ *  PG_STATE_RUNNING: power domain is ON and made into directly usable
+ *                    state by turning on the clocks associated with
+ *                    the domain
+ */
+enum pg_states {
+	PG_STATE_OFF = 0,
+	PG_STATE_ON = 1,
+	PG_STATE_RUNNING = 2,
+};
+
+struct cmd_pg_query_abi_request {
+	uint32_t type; /* enum mrq_pg_cmd */
+} __ABI_PACKED;
+
+struct cmd_pg_set_state_request {
+	uint32_t state; /* enum pg_states */
+} __ABI_PACKED;
+
+struct cmd_pg_get_state_response {
+	uint32_t state; /* enum pg_states */
+} __ABI_PACKED;
+
+struct cmd_pg_get_name_response {
+	uint8_t name[MRQ_PG_NAME_MAXLEN];
+} __ABI_PACKED;
+
+struct cmd_pg_get_max_id_response {
+	uint32_t max_id;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Powergating
+ * @brief request with #MRQ_PG
+ *
+ * Used by the sender of an #MRQ_PG message to control power
+ * partitions. The pg_request is split into several sub-commands. Some
+ * sub-commands require no additional data. Others have a sub-command
+ * specific payload
+ *
+ * |sub-command                 |payload                |
+ * |----------------------------|-----------------------|
+ * |CMD_PG_QUERY_ABI            | query_abi             |
+ * |CMD_PG_SET_STATE            | set_state             |
+ * |CMD_PG_GET_STATE            | -                     |
+ * |CMD_PG_GET_NAME             | -                     |
+ * |CMD_PG_GET_MAX_ID           | -                     |
+ *
+ */
+
+struct mrq_pg_request {
+	uint32_t cmd;
+	uint32_t id;
+	union {
+		struct cmd_pg_query_abi_request query_abi;
+		struct cmd_pg_set_state_request set_state;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Powergating
+ * @brief response to MRQ_PG
+ *
+ * Each sub-command supported by @ref mrq_pg_request may return
+ * sub-command-specific data. Some do and some do not as indicated in
+ * the following table
+ *
+ * |sub-command                 |payload                |
+ * |----------------------------|-----------------------|
+ * |CMD_PG_QUERY_ABI            | -                     |
+ * |CMD_PG_SET_STATE            | -                     |
+ * |CMD_PG_GET_STATE            | get_state             |
+ * |CMD_PG_GET_NAME             | get_name              |
+ * |CMD_PG_GET_MAX_ID           | get_max_id            |
+ *
+ */
+
+struct mrq_pg_response {
+	union {
+		struct cmd_pg_get_state_response get_state;
+		struct cmd_pg_get_name_response get_name;
+		struct cmd_pg_get_max_id_response get_max_id;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
  * @def MRQ_THERMAL
  * @brief interact with BPMP thermal framework
  *
@@ -1529,6 +1749,184 @@ struct mrq_trace_iter_request {
 
 /** @} */
 
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_RINGBUF_CONSOLE
+ * @brief A ring buffer debug console for BPMP
+ * @addtogroup RingbufConsole
+ *
+ * The ring buffer debug console aims to be a substitute for the UART debug
+ * console. The debug console is implemented with two ring buffers in the
+ * BPMP-FW, the RX (receive) and TX (transmit) buffers. Characters can be read
+ * and written to the buffers by the host via the MRQ interface.
+ *
+ * @{
+ */
+
+/**
+ * @brief Maximum number of bytes transferred in a single write command to the
+ * BPMP
+ *
+ * This is determined by the number of free bytes in the message struct,
+ * rounded down to a multiple of four.
+ */
+#define MRQ_RINGBUF_CONSOLE_MAX_WRITE_LEN 112
+
+/**
+ * @brief Maximum number of bytes transferred in a single read command to the
+ * BPMP
+ *
+ * This is determined by the number of free bytes in the message struct,
+ * rounded down to a multiple of four.
+ */
+#define MRQ_RINGBUF_CONSOLE_MAX_READ_LEN 116
+
+enum mrq_ringbuf_console_host_to_bpmp_cmd {
+	/**
+	 * @brief Check whether the BPMP driver supports the specified request
+	 * type
+	 *
+	 * mrq_response::err is 0 if the specified request is supported and
+	 * -#BPMP_ENODEV otherwise
+	 */
+	CMD_RINGBUF_CONSOLE_QUERY_ABI = 0,
+	/**
+	 * @brief Perform a read operation on the BPMP TX buffer
+	 *
+	 * mrq_response::err is 0
+	 */
+	CMD_RINGBUF_CONSOLE_READ = 1,
+	/**
+	 * @brief Perform a write operation on the BPMP RX buffer
+	 *
+	 * mrq_response::err is 0 if the operation was successful and
+	 * -#BPMP_ENODEV otherwise
+	 */
+	CMD_RINGBUF_CONSOLE_WRITE = 2,
+	/**
+	 * @brief Get the length of the buffer and the physical addresses of
+	 * the buffer data and the head and tail counters
+	 *
+	 * mrq_response::err is 0 if the operation was successful and
+	 * -#BPMP_ENODEV otherwise
+	 */
+	CMD_RINGBUF_CONSOLE_GET_FIFO = 3,
+};
+
+/**
+ * @ingroup RingbufConsole
+ * @brief Host->BPMP request data for request type
+ * #CMD_RINGBUF_CONSOLE_QUERY_ABI
+ */
+struct cmd_ringbuf_console_query_abi_req {
+	/** @brief Command identifier to be queried */
+	uint32_t cmd;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_ringbuf_console_query_abi_resp {
+	EMPTY
+} __ABI_PACKED;
+
+/**
+ * @ingroup RingbufConsole
+ * @brief Host->BPMP request data for request type #CMD_RINGBUF_CONSOLE_READ
+ */
+struct cmd_ringbuf_console_read_req {
+	/**
+	 * @brief Number of bytes requested to be read from the BPMP TX buffer
+	 */
+	uint8_t len;
+} __ABI_PACKED;
+
+/**
+ * @ingroup RingbufConsole
+ * @brief BPMP->Host response data for request type #CMD_RINGBUF_CONSOLE_READ
+ */
+struct cmd_ringbuf_console_read_resp {
+	/** @brief The actual data read from the BPMP TX buffer */
+	uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_READ_LEN];
+	/** @brief Number of bytes in cmd_ringbuf_console_read_resp::data */
+	uint8_t len;
+} __ABI_PACKED;
+
+/**
+ * @ingroup RingbufConsole
+ * @brief Host->BPMP request data for request type #CMD_RINGBUF_CONSOLE_WRITE
+ */
+struct cmd_ringbuf_console_write_req {
+	/** @brief The actual data to be written to the BPMP RX buffer */
+	uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_WRITE_LEN];
+	/** @brief Number of bytes in cmd_ringbuf_console_write_req::data */
+	uint8_t len;
+} __ABI_PACKED;
+
+/**
+ * @ingroup RingbufConsole
+ * @brief BPMP->Host response data for request type #CMD_RINGBUF_CONSOLE_WRITE
+ */
+struct cmd_ringbuf_console_write_resp {
+	/** @brief Number of bytes of available space in the BPMP RX buffer */
+	uint32_t space_avail;
+	/** @brief Number of bytes that were written to the BPMP RX buffer */
+	uint8_t len;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_ringbuf_console_get_fifo_req {
+	EMPTY
+} __ABI_PACKED;
+
+/**
+ * @ingroup RingbufConsole
+ * @brief BPMP->Host reply data for request type #CMD_RINGBUF_CONSOLE_GET_FIFO
+ */
+struct cmd_ringbuf_console_get_fifo_resp {
+	/** @brief Physical address of the BPMP TX buffer */
+	uint64_t bpmp_tx_buf_addr;
+	/** @brief Physical address of the BPMP TX buffer head counter */
+	uint64_t bpmp_tx_head_addr;
+	/** @brief Physical address of the BPMP TX buffer tail counter */
+	uint64_t bpmp_tx_tail_addr;
+	/** @brief Length of the BPMP TX buffer */
+	uint32_t bpmp_tx_buf_len;
+} __ABI_PACKED;
+
+/**
+ * @ingroup RingbufConsole
+ * @brief Host->BPMP request data.
+ *
+ * Reply type is union #mrq_ringbuf_console_bpmp_to_host_response .
+ */
+struct mrq_ringbuf_console_host_to_bpmp_request {
+	/**
+	 * @brief type of request. Values listed in enum
+	 * #mrq_ringbuf_console_host_to_bpmp_cmd.
+	 */
+	uint32_t type;
+	/** @brief  request type specific parameters. */
+	union {
+		struct cmd_ringbuf_console_query_abi_req query_abi;
+		struct cmd_ringbuf_console_read_req read;
+		struct cmd_ringbuf_console_write_req write;
+		struct cmd_ringbuf_console_get_fifo_req get_fifo;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup RingbufConsole
+ * @brief Host->BPMP reply data
+ *
+ * In response to struct #mrq_ringbuf_console_host_to_bpmp_request.
+ */
+union mrq_ringbuf_console_bpmp_to_host_response {
+	struct cmd_ringbuf_console_query_abi_resp query_abi;
+	struct cmd_ringbuf_console_read_resp read;
+	struct cmd_ringbuf_console_write_resp write;
+	struct cmd_ringbuf_console_get_fifo_resp get_fifo;
+} __ABI_PACKED;
+/** @} */
+
 /*
  *  4. Enumerations
  */
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
index 13dcd44..9ba6522 100644
--- a/include/soc/tegra/bpmp.h
+++ b/include/soc/tegra/bpmp.h
@@ -15,6 +15,7 @@
 #define __SOC_TEGRA_BPMP_H
 
 #include <linux/mailbox_client.h>
+#include <linux/pm_domain.h>
 #include <linux/reset-controller.h>
 #include <linux/semaphore.h>
 #include <linux/types.h>
@@ -91,6 +92,8 @@ struct tegra_bpmp {
 	unsigned int num_clocks;
 
 	struct reset_controller_dev rstc;
+
+	struct genpd_onecell_data genpd;
 };
 
 struct tegra_bpmp *tegra_bpmp_get(struct device *dev);
@@ -138,4 +141,13 @@ static inline int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
 }
 #endif
 
+#if IS_ENABLED(CONFIG_SOC_TEGRA_POWERGATE_BPMP)
+int tegra_bpmp_init_powergates(struct tegra_bpmp *bpmp);
+#else
+static inline int tegra_bpmp_init_powergates(struct tegra_bpmp *bpmp)
+{
+	return 0;
+}
+#endif
+
 #endif /* __SOC_TEGRA_BPMP_H */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 275581d..5f17fb7 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -557,6 +557,7 @@ struct iscsi_conn {
 #define LOGIN_FLAGS_READ_ACTIVE		1
 #define LOGIN_FLAGS_CLOSED		2
 #define LOGIN_FLAGS_READY		4
+#define LOGIN_FLAGS_INITIAL_PDU		8
 	unsigned long		login_flags;
 	struct delayed_work	login_work;
 	struct delayed_work	login_cleanup_work;
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
new file mode 100644
index 0000000..697ee66
--- /dev/null
+++ b/include/trace/events/fsi.h
@@ -0,0 +1,127 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi
+
+#if !defined(_TRACE_FSI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fsi_master_read,
+	TP_PROTO(const struct fsi_master *master, int link, int id,
+			uint32_t addr, size_t size),
+	TP_ARGS(master, link, id, addr, size),
+	TP_STRUCT__entry(
+		__field(int,	master_idx)
+		__field(int,	link)
+		__field(int,	id)
+		__field(__u32,	addr)
+		__field(size_t,	size)
+	),
+	TP_fast_assign(
+		__entry->master_idx = master->idx;
+		__entry->link = link;
+		__entry->id = id;
+		__entry->addr = addr;
+		__entry->size = size;
+	),
+	TP_printk("fsi%d:%02d:%02d %08x[%zd]",
+		__entry->master_idx,
+		__entry->link,
+		__entry->id,
+		__entry->addr,
+		__entry->size
+	)
+);
+
+TRACE_EVENT(fsi_master_write,
+	TP_PROTO(const struct fsi_master *master, int link, int id,
+			uint32_t addr, size_t size, const void *data),
+	TP_ARGS(master, link, id, addr, size, data),
+	TP_STRUCT__entry(
+		__field(int,	master_idx)
+		__field(int,	link)
+		__field(int,	id)
+		__field(__u32,	addr)
+		__field(size_t,	size)
+		__field(__u32,	data)
+	),
+	TP_fast_assign(
+		__entry->master_idx = master->idx;
+		__entry->link = link;
+		__entry->id = id;
+		__entry->addr = addr;
+		__entry->size = size;
+		__entry->data = 0;
+		memcpy(&__entry->data, data, size);
+	),
+	TP_printk("fsi%d:%02d:%02d %08x[%zd] <= {%*ph}",
+		__entry->master_idx,
+		__entry->link,
+		__entry->id,
+		__entry->addr,
+		__entry->size,
+		(int)__entry->size, &__entry->data
+	)
+);
+
+TRACE_EVENT(fsi_master_rw_result,
+	TP_PROTO(const struct fsi_master *master, int link, int id,
+			uint32_t addr, size_t size,
+			bool write, const void *data, int ret),
+	TP_ARGS(master, link, id, addr, size, write, data, ret),
+	TP_STRUCT__entry(
+		__field(int,	master_idx)
+		__field(int,	link)
+		__field(int,	id)
+		__field(__u32,	addr)
+		__field(size_t,	size)
+		__field(bool,	write)
+		__field(__u32,	data)
+		__field(int,	ret)
+	),
+	TP_fast_assign(
+		__entry->master_idx = master->idx;
+		__entry->link = link;
+		__entry->id = id;
+		__entry->addr = addr;
+		__entry->size = size;
+		__entry->write = write;
+		__entry->data = 0;
+		__entry->ret = ret;
+		if (__entry->write || !__entry->ret)
+			memcpy(&__entry->data, data, size);
+	),
+	TP_printk("fsi%d:%02d:%02d %08x[%zd] %s {%*ph} ret %d",
+		__entry->master_idx,
+		__entry->link,
+		__entry->id,
+		__entry->addr,
+		__entry->size,
+		__entry->write ? "<=" : "=>",
+		(int)__entry->size, &__entry->data,
+		__entry->ret
+	)
+);
+
+TRACE_EVENT(fsi_master_break,
+	TP_PROTO(const struct fsi_master *master, int link),
+	TP_ARGS(master, link),
+	TP_STRUCT__entry(
+		__field(int,	master_idx)
+		__field(int,	link)
+	),
+	TP_fast_assign(
+		__entry->master_idx = master->idx;
+		__entry->link = link;
+	),
+	TP_printk("fsi%d:%d",
+		__entry->master_idx,
+		__entry->link
+	)
+);
+
+
+#endif /* _TRACE_FSI_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/fsi_master_gpio.h b/include/trace/events/fsi_master_gpio.h
new file mode 100644
index 0000000..11b36c1
--- /dev/null
+++ b/include/trace/events/fsi_master_gpio.h
@@ -0,0 +1,68 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_gpio
+
+#if !defined(_TRACE_FSI_MASTER_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_GPIO_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fsi_master_gpio_in,
+	TP_PROTO(const struct fsi_master_gpio *master, int bits, uint64_t msg),
+	TP_ARGS(master, bits, msg),
+	TP_STRUCT__entry(
+		__field(int,	master_idx)
+		__field(int,	bits)
+		__field(uint64_t, msg)
+	),
+	TP_fast_assign(
+		__entry->master_idx = master->master.idx;
+		__entry->bits = bits;
+		__entry->msg  = msg & ((1ull<<bits) - 1);
+	),
+	TP_printk("fsi-gpio%d => %0*llx[%d]",
+		__entry->master_idx,
+		(__entry->bits + 3) / 4,
+		__entry->msg,
+		__entry->bits
+	)
+);
+
+TRACE_EVENT(fsi_master_gpio_out,
+	TP_PROTO(const struct fsi_master_gpio *master, int bits, uint64_t msg),
+	TP_ARGS(master, bits, msg),
+	TP_STRUCT__entry(
+		__field(int,	master_idx)
+		__field(int,	bits)
+		__field(uint64_t, msg)
+	),
+	TP_fast_assign(
+		__entry->master_idx = master->master.idx;
+		__entry->bits = bits;
+		__entry->msg  = msg & ((1ull<<bits) - 1);
+	),
+	TP_printk("fsi-gpio%d <= %0*llx[%d]",
+		__entry->master_idx,
+		(__entry->bits + 3) / 4,
+		__entry->msg,
+		__entry->bits
+	)
+);
+
+TRACE_EVENT(fsi_master_gpio_break,
+	TP_PROTO(const struct fsi_master_gpio *master),
+	TP_ARGS(master),
+	TP_STRUCT__entry(
+		__field(int,	master_idx)
+	),
+	TP_fast_assign(
+		__entry->master_idx = master->master.idx;
+	),
+	TP_printk("fsi-gpio%d ----break---",
+		__entry->master_idx
+	)
+);
+
+#endif /* _TRACE_FSI_MASTER_GPIO_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index e3facb3..91dc089 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -742,6 +742,7 @@ TRACE_EVENT(rcu_torture_read,
  *	"OnlineQ": _rcu_barrier() found online CPU with callbacks.
  *	"OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
  *	"IRQ": An rcu_barrier_callback() callback posted on remote CPU.
+ *	"IRQNQ": An rcu_barrier_callback() callback found no callbacks.
  *	"CB": An rcu_barrier_callback() invoked a callback, not the last.
  *	"LastCB": An rcu_barrier_callback() invoked the last callback.
  *	"Inc2": _rcu_barrier() piggyback check counter incremented.
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index 7e02c98..f9f702b 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -7,37 +7,37 @@
 #include <linux/ktime.h>
 #include <linux/tracepoint.h>
 
-DECLARE_EVENT_CLASS(spi_master,
+DECLARE_EVENT_CLASS(spi_controller,
 
-	TP_PROTO(struct spi_master *master),
+	TP_PROTO(struct spi_controller *controller),
 
-	TP_ARGS(master),
+	TP_ARGS(controller),
 
 	TP_STRUCT__entry(
 		__field(        int,           bus_num             )
 	),
 
 	TP_fast_assign(
-		__entry->bus_num = master->bus_num;
+		__entry->bus_num = controller->bus_num;
 	),
 
 	TP_printk("spi%d", (int)__entry->bus_num)
 
 );
 
-DEFINE_EVENT(spi_master, spi_master_idle,
+DEFINE_EVENT(spi_controller, spi_controller_idle,
 
-	TP_PROTO(struct spi_master *master),
+	TP_PROTO(struct spi_controller *controller),
 
-	TP_ARGS(master)
+	TP_ARGS(controller)
 
 );
 
-DEFINE_EVENT(spi_master, spi_master_busy,
+DEFINE_EVENT(spi_controller, spi_controller_busy,
 
-	TP_PROTO(struct spi_master *master),
+	TP_PROTO(struct spi_controller *controller),
 
-	TP_ARGS(master)
+	TP_ARGS(controller)
 
 );
 
@@ -54,7 +54,7 @@ DECLARE_EVENT_CLASS(spi_message,
 	),
 
 	TP_fast_assign(
-		__entry->bus_num = msg->spi->master->bus_num;
+		__entry->bus_num = msg->spi->controller->bus_num;
 		__entry->chip_select = msg->spi->chip_select;
 		__entry->msg = msg;
 	),
@@ -95,7 +95,7 @@ TRACE_EVENT(spi_message_done,
 	),
 
 	TP_fast_assign(
-		__entry->bus_num = msg->spi->master->bus_num;
+		__entry->bus_num = msg->spi->controller->bus_num;
 		__entry->chip_select = msg->spi->chip_select;
 		__entry->msg = msg;
 		__entry->frame = msg->frame_length;
@@ -122,7 +122,7 @@ DECLARE_EVENT_CLASS(spi_transfer,
 	),
 
 	TP_fast_assign(
-		__entry->bus_num = msg->spi->master->bus_num;
+		__entry->bus_num = msg->spi->controller->bus_num;
 		__entry->chip_select = msg->spi->chip_select;
 		__entry->xfer = xfer;
 		__entry->len = xfer->len;
diff --git a/include/uapi/asm-generic/ioctls.h b/include/uapi/asm-generic/ioctls.h
index 143dacb..06d5f7d 100644
--- a/include/uapi/asm-generic/ioctls.h
+++ b/include/uapi/asm-generic/ioctls.h
@@ -77,6 +77,7 @@
 #define TIOCGPKT	_IOR('T', 0x38, int) /* Get packet mode state */
 #define TIOCGPTLCK	_IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL	_IOR('T', 0x40, int) /* Get exclusive mode state */
+#define TIOCGPTPEER	_IOR('T', 0x41, int) /* Safely open the slave */
 
 #define FIONCLEX	0x5450
 #define FIOCLEX		0x5451
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 1abaf62..9c4eca6 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -151,7 +151,18 @@ typedef struct siginfo {
 #define si_arch		_sifields._sigsys._arch
 #endif
 
-#ifndef __KERNEL__
+#ifdef __KERNEL__
+#define __SI_MASK	0xffff0000u
+#define __SI_KILL	(0 << 16)
+#define __SI_TIMER	(1 << 16)
+#define __SI_POLL	(2 << 16)
+#define __SI_FAULT	(3 << 16)
+#define __SI_CHLD	(4 << 16)
+#define __SI_RT		(5 << 16)
+#define __SI_MESGQ	(6 << 16)
+#define __SI_SYS	(7 << 16)
+#define __SI_CODE(T,N)	((T) | ((N) & 0xffff))
+#else /* __KERNEL__ */
 #define __SI_KILL	0
 #define __SI_TIMER	0
 #define __SI_POLL	0
@@ -161,7 +172,7 @@ typedef struct siginfo {
 #define __SI_MESGQ	0
 #define __SI_SYS	0
 #define __SI_CODE(T,N)	(N)
-#endif
+#endif /* __KERNEL__ */
 
 /*
  * si_code values
diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
index 7caf44c..295cd3e 100644
--- a/include/uapi/linux/a.out.h
+++ b/include/uapi/linux/a.out.h
@@ -112,24 +112,7 @@ enum machine_type {
 #define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0)
 #endif
 
-/* Address of data segment in memory after it is loaded.
-   Note that it is up to you to define SEGMENT_SIZE
-   on machines not listed here.  */
-#if defined(vax) || defined(hp300) || defined(pyr)
-#define SEGMENT_SIZE page_size
-#endif
-#ifdef	sony
-#define	SEGMENT_SIZE	0x2000
-#endif	/* Sony.  */
-#ifdef is68k
-#define SEGMENT_SIZE 0x20000
-#endif
-#if defined(m68k) && defined(PORTAR)
-#define PAGE_SIZE 0x400
-#define SEGMENT_SIZE PAGE_SIZE
-#endif
-
-#ifdef linux
+/* Address of data segment in memory after it is loaded. */
 #ifndef __KERNEL__
 #include <unistd.h>
 #endif
@@ -142,7 +125,6 @@ enum machine_type {
 #endif
 #endif
 #endif
-#endif
 
 #define _N_SEGMENT_ROUND(x) ALIGN(x, SEGMENT_SIZE)
 
@@ -260,13 +242,7 @@ struct relocation_info
   unsigned int r_extern:1;
   /* Four bits that aren't used, but when writing an object file
      it is desirable to clear them.  */
-#ifdef NS32K
-  unsigned r_bsr:1;
-  unsigned r_disp:1;
-  unsigned r_pad:2;
-#else
   unsigned int r_pad:4;
-#endif
 };
 #endif /* no N_RELOCATION_INFO_DECLARED.  */
 
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index bb2554f..a2d4a8a 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -79,7 +79,7 @@ struct io_event {
 struct iocb {
 	/* these are internal to the kernel/libc. */
 	__u64	aio_data;	/* data to be returned in event's data */
-	__u32	PADDED(aio_key, aio_reserved1);
+	__u32	PADDED(aio_key, aio_rw_flags);
 				/* the kernel sets aio_key to the req # */
 
 	/* common fields */
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index aa63451..1953f8d 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -26,7 +26,7 @@
 #define AUTOFS_MIN_PROTO_VERSION	AUTOFS_PROTO_VERSION
 
 /*
- * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
+ * The wait_queue_entry_token (autofs_wqt_t) is part of a structure which is passed
  * back to the kernel via ioctl from userspace. On architectures where 32- and
  * 64-bit userspace binaries can be executed it's important that the size of
  * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
@@ -49,7 +49,7 @@ struct autofs_packet_hdr {
 
 struct autofs_packet_missing {
 	struct autofs_packet_hdr hdr;
-	autofs_wqt_t wait_queue_token;
+	autofs_wqt_t wait_queue_entry_token;
 	int len;
 	char name[NAME_MAX+1];
 };	
diff --git a/include/uapi/linux/auto_fs4.h b/include/uapi/linux/auto_fs4.h
index 7c6da42..65b72d02 100644
--- a/include/uapi/linux/auto_fs4.h
+++ b/include/uapi/linux/auto_fs4.h
@@ -108,7 +108,7 @@ enum autofs_notify {
 /* v4 multi expire (via pipe) */
 struct autofs_packet_expire_multi {
 	struct autofs_packet_hdr hdr;
-	autofs_wqt_t wait_queue_token;
+	autofs_wqt_t wait_queue_entry_token;
 	int len;
 	char name[NAME_MAX+1];
 };
@@ -123,7 +123,7 @@ union autofs_packet_union {
 /* autofs v5 common packet struct */
 struct autofs_v5_packet {
 	struct autofs_packet_hdr hdr;
-	autofs_wqt_t wait_queue_token;
+	autofs_wqt_t wait_queue_entry_token;
 	__u32 dev;
 	__u64 ino;
 	__u32 uid;
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 4bf9f1e..2f6c77a 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
 #define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR	4
-#define DM_VERSION_MINOR	35
+#define DM_VERSION_MINOR	36
 #define DM_VERSION_PATCHLEVEL	0
-#define DM_VERSION_EXTRA	"-ioctl (2016-06-23)"
+#define DM_VERSION_EXTRA	"-ioctl (2017-06-09)"
 
 /* Status bits */
 #define DM_READONLY_FLAG	(1 << 0) /* In/Out */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index d179d77..7d4a594 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1486,8 +1486,10 @@ enum ethtool_link_mode_bit_indices {
  * it was forced up into this mode or autonegotiated.
  */
 
-/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */
-/* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */
+/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal.
+ * Update drivers/net/phy/phy.c:phy_speed_to_str() and
+ * drivers/net/bonding/bond_3ad.c:__get_link_speed() when adding new values.
+ */
 #define SPEED_10		10
 #define SPEED_100		100
 #define SPEED_1000		1000
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 813afd6..ec69d55 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -43,6 +43,27 @@
 /* (1U << 31) is reserved for signed error codes */
 
 /*
+ * Set/Get write life time hints. {GET,SET}_RW_HINT operate on the
+ * underlying inode, while {GET,SET}_FILE_RW_HINT operate only on
+ * the specific file.
+ */
+#define F_GET_RW_HINT		(F_LINUX_SPECIFIC_BASE + 11)
+#define F_SET_RW_HINT		(F_LINUX_SPECIFIC_BASE + 12)
+#define F_GET_FILE_RW_HINT	(F_LINUX_SPECIFIC_BASE + 13)
+#define F_SET_FILE_RW_HINT	(F_LINUX_SPECIFIC_BASE + 14)
+
+/*
+ * Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
+ * used to clear any hints previously set.
+ */
+#define RWF_WRITE_LIFE_NOT_SET	0
+#define RWH_WRITE_LIFE_NONE	1
+#define RWH_WRITE_LIFE_SHORT	2
+#define RWH_WRITE_LIFE_MEDIUM	3
+#define RWH_WRITE_LIFE_LONG	4
+#define RWH_WRITE_LIFE_EXTREME	5
+
+/*
  * Types of directory notifications that may be requested.
  */
 #define DN_ACCESS	0x00000001	/* File accessed */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 24e61a5..27d8c36 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -360,5 +360,9 @@ struct fscrypt_key {
 #define RWF_HIPRI			0x00000001 /* high priority request, poll if possible */
 #define RWF_DSYNC			0x00000002 /* per-IO O_DSYNC */
 #define RWF_SYNC			0x00000004 /* per-IO O_SYNC */
+#define RWF_NOWAIT			0x00000008 /* per-IO, return -EAGAIN if operation would block */
+
+#define RWF_SUPPORTED			(RWF_HIPRI | RWF_DSYNC | RWF_SYNC |\
+					 RWF_NOWAIT)
 
 #endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 201c6644..ef16df0 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -70,8 +70,8 @@ struct keyctl_dh_params {
 };
 
 struct keyctl_kdf_params {
-	char *hashname;
-	char *otherinfo;
+	char __user *hashname;
+	char __user *otherinfo;
 	__u32 otherinfolen;
 	__u32 __spare[8];
 };
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
index c8125ec..a3960f9 100644
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -22,6 +22,7 @@ enum {
 	LO_FLAGS_AUTOCLEAR	= 4,
 	LO_FLAGS_PARTSCAN	= 8,
 	LO_FLAGS_DIRECT_IO	= 16,
+	LO_FLAGS_BLOCKSIZE	= 32,
 };
 
 #include <asm/posix_types.h>	/* for __kernel_old_dev_t */
@@ -59,6 +60,8 @@ struct loop_info64 {
 	__u64		   lo_init[2];
 };
 
+#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0]
+
 /*
  * Loop filter types
  */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index e230af2..a0908f1 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -80,6 +80,8 @@
 #define BTRFS_TEST_MAGIC	0x73727279
 #define NSFS_MAGIC		0x6e736673
 #define BPF_FS_MAGIC		0xcafe4a11
+#define AAFS_MAGIC		0x5a3c69f0
+
 /* Since UDF 2.01 is ISO 13346 based... */
 #define UDF_SUPER_MAGIC		0x15013346
 #define BALLOON_KVM_MAGIC	0x13661366
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 155e33f..a50527e 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -41,10 +41,14 @@ enum {
 #define NBD_FLAG_HAS_FLAGS	(1 << 0) /* nbd-server supports flags */
 #define NBD_FLAG_READ_ONLY	(1 << 1) /* device is read-only */
 #define NBD_FLAG_SEND_FLUSH	(1 << 2) /* can flush writeback cache */
+#define NBD_FLAG_SEND_FUA	(1 << 3) /* send FUA (forced unit access) */
 /* there is a gap here to match userspace */
 #define NBD_FLAG_SEND_TRIM	(1 << 5) /* send trim/discard */
 #define NBD_FLAG_CAN_MULTI_CONN	(1 << 8)	/* Server supports multiple connections per export. */
 
+/* values for cmd flags in the upper 16 bits of request type */
+#define NBD_CMD_FLAG_FUA	(1 << 16) /* FUA (forced unit access) op */
+
 /* These are client behavior specific flags. */
 #define NBD_CFLAG_DESTROY_ON_DISCONNECT	(1 << 0) /* delete the nbd device on
 						    disconnect. */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 61b7d36..156ee4c 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -343,6 +343,7 @@ enum ovs_key_attr {
 #define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
 
 enum ovs_tunnel_key_attr {
+	/* OVS_TUNNEL_KEY_ATTR_NONE, standard nl API requires this attribute! */
 	OVS_TUNNEL_KEY_ATTR_ID,                 /* be64 Tunnel ID */
 	OVS_TUNNEL_KEY_ATTR_IPV4_SRC,           /* be32 src IP address. */
 	OVS_TUNNEL_KEY_ATTR_IPV4_DST,           /* be32 dst IP address. */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 5f0fe01..e2a6c7b 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -47,5 +47,6 @@
  * For the sched_{set,get}attr() calls
  */
 #define SCHED_FLAG_RESET_ON_FORK	0x01
+#define SCHED_FLAG_RECLAIM		0x02
 
 #endif /* _UAPI_LINUX_SCHED_H */
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 5d59c3e..d2667ec 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -122,6 +122,9 @@ struct serial_rs485 {
 #define SER_RS485_RTS_AFTER_SEND	(1 << 2)	/* Logical level for
 							   RTS pin after sent*/
 #define SER_RS485_RX_DURING_TX		(1 << 4)
+#define SER_RS485_TERMINATE_BUS		(1 << 5)	/* Enable bus
+							   termination
+							   (if supported) */
 	__u32	delay_rts_before_send;	/* Delay before send (milliseconds) */
 	__u32	delay_rts_after_send;	/* Delay after send (milliseconds) */
 	__u32	padding[5];		/* Memory is cheap, new structs
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 9ec741b..c34a2a3 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -83,7 +83,7 @@
 /* Parisc type numbers. */
 #define PORT_MUX	48
 
-/* Atmel AT91 / AT32 SoC */
+/* Atmel AT91 SoC */
 #define PORT_ATMEL	49
 
 /* Macintosh Zilog type numbers */
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index e75e1b6..09299fc 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -54,7 +54,11 @@ struct itimerval {
 #define CLOCK_BOOTTIME			7
 #define CLOCK_REALTIME_ALARM		8
 #define CLOCK_BOOTTIME_ALARM		9
-#define CLOCK_SGI_CYCLE			10	/* Hardware specific */
+/*
+ * The driver implementing this got removed. The clock ID is kept as a
+ * place holder. Do not reuse!
+ */
+#define CLOCK_SGI_CYCLE			10
 #define CLOCK_TAI			11
 
 #define MAX_CLOCKS			16
diff --git a/include/uapi/linux/tty.h b/include/uapi/linux/tty.h
index 01c4410..cf14553 100644
--- a/include/uapi/linux/tty.h
+++ b/include/uapi/linux/tty.h
@@ -35,5 +35,7 @@
 #define N_TRACESINK	23	/* Trace data routing for MIPI P1149.7 */
 #define N_TRACEROUTER	24	/* Trace data routing for MIPI P1149.7 */
 #define N_NCI		25	/* NFC NCI UART */
+#define N_SPEAKUP	26	/* Speakup communication with synths */
+#define N_NULL		27	/* Null ldisc used for error handling */
 
 #endif /* _UAPI_LINUX_TTY_H */
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 062606f..f913d08 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -275,13 +275,14 @@ struct usb_functionfs_event {
 #define	FUNCTIONFS_INTERFACE_REVMAP	_IO('g', 128)
 
 /*
- * Returns real bEndpointAddress of an endpoint.  If function is not
- * active returns -ENODEV.
+ * Returns real bEndpointAddress of an endpoint. If endpoint shuts down
+ * during the call, returns -ESHUTDOWN.
  */
 #define	FUNCTIONFS_ENDPOINT_REVMAP	_IO('g', 129)
 
 /*
- * Returns endpoint descriptor. If function is not active returns -ENODEV.
+ * Returns endpoint descriptor. If endpoint shuts down during the call,
+ * returns -ESHUTDOWN.
  */
 #define	FUNCTIONFS_ENDPOINT_DESC	_IOR('g', 130, \
 					     struct usb_endpoint_descriptor)
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
index a8653a6..0bbfd4a 100644
--- a/include/uapi/linux/usbdevice_fs.h
+++ b/include/uapi/linux/usbdevice_fs.h
@@ -156,6 +156,11 @@ struct usbdevfs_streams {
 	unsigned char eps[0];
 };
 
+/*
+ * USB_SPEED_* values returned by USBDEVFS_GET_SPEED are defined in
+ * linux/usb/ch9.h
+ */
+
 #define USBDEVFS_CONTROL           _IOWR('U', 0, struct usbdevfs_ctrltransfer)
 #define USBDEVFS_CONTROL32           _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
 #define USBDEVFS_BULK              _IOWR('U', 2, struct usbdevfs_bulktransfer)
@@ -190,5 +195,6 @@ struct usbdevfs_streams {
 #define USBDEVFS_ALLOC_STREAMS     _IOR('U', 28, struct usbdevfs_streams)
 #define USBDEVFS_FREE_STREAMS      _IOR('U', 29, struct usbdevfs_streams)
 #define USBDEVFS_DROP_PRIVILEGES   _IOW('U', 30, __u32)
+#define USBDEVFS_GET_SPEED         _IO('U', 31)
 
 #endif /* _UAPI_LINUX_USBDEVICE_FS_H */
diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h
index 3738e5f..8ef82f4 100644
--- a/include/uapi/linux/uuid.h
+++ b/include/uapi/linux/uuid.h
@@ -22,33 +22,21 @@
 
 typedef struct {
 	__u8 b[16];
-} uuid_le;
+} guid_t;
 
-typedef struct {
-	__u8 b[16];
-} uuid_be;
-
-#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)		\
-((uuid_le)								\
+#define GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)			\
+((guid_t)								\
 {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
    (b) & 0xff, ((b) >> 8) & 0xff,					\
    (c) & 0xff, ((c) >> 8) & 0xff,					\
    (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
 
-#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)		\
-((uuid_be)								\
-{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
-   ((b) >> 8) & 0xff, (b) & 0xff,					\
-   ((c) >> 8) & 0xff, (c) & 0xff,					\
-   (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
-
+/* backwards compatibility, don't use in new code */
+typedef guid_t uuid_le;
+#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)		\
+	GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
 #define NULL_UUID_LE							\
 	UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,	\
-		0x00, 0x00, 0x00, 0x00)
-
-#define NULL_UUID_BE							\
-	UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,	\
-		0x00, 0x00, 0x00, 0x00)
-
+	     0x00, 0x00, 0x00, 0x00)
 
 #endif /* _UAPI_LINUX_UUID_H_ */
diff --git a/include/uapi/linux/vtpm_proxy.h b/include/uapi/linux/vtpm_proxy.h
index a69e991..58ac73c 100644
--- a/include/uapi/linux/vtpm_proxy.h
+++ b/include/uapi/linux/vtpm_proxy.h
@@ -46,4 +46,8 @@ struct vtpm_proxy_new_dev {
 
 #define VTPM_PROXY_IOC_NEW_DEV	_IOWR(0xa1, 0x00, struct vtpm_proxy_new_dev)
 
+/* vendor specific commands to set locality */
+#define TPM2_CC_SET_LOCALITY	0x20001000
+#define TPM_ORD_SET_LOCALITY	0x20001000
+
 #endif /* _UAPI_LINUX_VTPM_PROXY_H */
diff --git a/init/Kconfig b/init/Kconfig
index 1d3475f..ee0f03b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -472,354 +472,7 @@
 
 endmenu # "CPU/Task time and stats accounting"
 
-menu "RCU Subsystem"
-
-config TREE_RCU
-	bool
-	default y if !PREEMPT && SMP
-	help
-	  This option selects the RCU implementation that is
-	  designed for very large SMP system with hundreds or
-	  thousands of CPUs.  It also scales down nicely to
-	  smaller systems.
-
-config PREEMPT_RCU
-	bool
-	default y if PREEMPT
-	help
-	  This option selects the RCU implementation that is
-	  designed for very large SMP systems with hundreds or
-	  thousands of CPUs, but for which real-time response
-	  is also required.  It also scales down nicely to
-	  smaller systems.
-
-	  Select this option if you are unsure.
-
-config TINY_RCU
-	bool
-	default y if !PREEMPT && !SMP
-	help
-	  This option selects the RCU implementation that is
-	  designed for UP systems from which real-time response
-	  is not required.  This option greatly reduces the
-	  memory footprint of RCU.
-
-config RCU_EXPERT
-	bool "Make expert-level adjustments to RCU configuration"
-	default n
-	help
-	  This option needs to be enabled if you wish to make
-	  expert-level adjustments to RCU configuration.  By default,
-	  no such adjustments can be made, which has the often-beneficial
-	  side-effect of preventing "make oldconfig" from asking you all
-	  sorts of detailed questions about how you would like numerous
-	  obscure RCU options to be set up.
-
-	  Say Y if you need to make expert-level adjustments to RCU.
-
-	  Say N if you are unsure.
-
-config SRCU
-	bool
-	default y
-	help
-	  This option selects the sleepable version of RCU. This version
-	  permits arbitrary sleeping or blocking within RCU read-side critical
-	  sections.
-
-config CLASSIC_SRCU
-	bool "Use v4.11 classic SRCU implementation"
-	default n
-	depends on RCU_EXPERT && SRCU
-	help
-	  This option selects the traditional well-tested classic SRCU
-	  implementation from v4.11, as might be desired for enterprise
-	  Linux distributions.  Without this option, the shiny new
-	  Tiny SRCU and Tree SRCU implementations are used instead.
-	  At some point, it is hoped that Tiny SRCU and Tree SRCU
-	  will accumulate enough test time and confidence to allow
-	  Classic SRCU to be dropped entirely.
-
-	  Say Y if you need a rock-solid SRCU.
-
-	  Say N if you would like help test Tree SRCU.
-
-config TINY_SRCU
-	bool
-	default y if SRCU && TINY_RCU && !CLASSIC_SRCU
-	help
-	  This option selects the single-CPU non-preemptible version of SRCU.
-
-config TREE_SRCU
-	bool
-	default y if SRCU && !TINY_RCU && !CLASSIC_SRCU
-	help
-	  This option selects the full-fledged version of SRCU.
-
-config TASKS_RCU
-	bool
-	default n
-	select SRCU
-	help
-	  This option enables a task-based RCU implementation that uses
-	  only voluntary context switch (not preemption!), idle, and
-	  user-mode execution as quiescent states.
-
-config RCU_STALL_COMMON
-	def_bool ( TREE_RCU || PREEMPT_RCU || RCU_TRACE )
-	help
-	  This option enables RCU CPU stall code that is common between
-	  the TINY and TREE variants of RCU.  The purpose is to allow
-	  the tiny variants to disable RCU CPU stall warnings, while
-	  making these warnings mandatory for the tree variants.
-
-config RCU_NEED_SEGCBLIST
-	def_bool ( TREE_RCU || PREEMPT_RCU || TINY_SRCU || TREE_SRCU )
-
-config CONTEXT_TRACKING
-       bool
-
-config CONTEXT_TRACKING_FORCE
-	bool "Force context tracking"
-	depends on CONTEXT_TRACKING
-	default y if !NO_HZ_FULL
-	help
-	  The major pre-requirement for full dynticks to work is to
-	  support the context tracking subsystem. But there are also
-	  other dependencies to provide in order to make the full
-	  dynticks working.
-
-	  This option stands for testing when an arch implements the
-	  context tracking backend but doesn't yet fullfill all the
-	  requirements to make the full dynticks feature working.
-	  Without the full dynticks, there is no way to test the support
-	  for context tracking and the subsystems that rely on it: RCU
-	  userspace extended quiescent state and tickless cputime
-	  accounting. This option copes with the absence of the full
-	  dynticks subsystem by forcing the context tracking on all
-	  CPUs in the system.
-
-	  Say Y only if you're working on the development of an
-	  architecture backend for the context tracking.
-
-	  Say N otherwise, this option brings an overhead that you
-	  don't want in production.
-
-
-config RCU_FANOUT
-	int "Tree-based hierarchical RCU fanout value"
-	range 2 64 if 64BIT
-	range 2 32 if !64BIT
-	depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT
-	default 64 if 64BIT
-	default 32 if !64BIT
-	help
-	  This option controls the fanout of hierarchical implementations
-	  of RCU, allowing RCU to work efficiently on machines with
-	  large numbers of CPUs.  This value must be at least the fourth
-	  root of NR_CPUS, which allows NR_CPUS to be insanely large.
-	  The default value of RCU_FANOUT should be used for production
-	  systems, but if you are stress-testing the RCU implementation
-	  itself, small RCU_FANOUT values allow you to test large-system
-	  code paths on small(er) systems.
-
-	  Select a specific number if testing RCU itself.
-	  Take the default if unsure.
-
-config RCU_FANOUT_LEAF
-	int "Tree-based hierarchical RCU leaf-level fanout value"
-	range 2 64 if 64BIT
-	range 2 32 if !64BIT
-	depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT
-	default 16
-	help
-	  This option controls the leaf-level fanout of hierarchical
-	  implementations of RCU, and allows trading off cache misses
-	  against lock contention.  Systems that synchronize their
-	  scheduling-clock interrupts for energy-efficiency reasons will
-	  want the default because the smaller leaf-level fanout keeps
-	  lock contention levels acceptably low.  Very large systems
-	  (hundreds or thousands of CPUs) will instead want to set this
-	  value to the maximum value possible in order to reduce the
-	  number of cache misses incurred during RCU's grace-period
-	  initialization.  These systems tend to run CPU-bound, and thus
-	  are not helped by synchronized interrupts, and thus tend to
-	  skew them, which reduces lock contention enough that large
-	  leaf-level fanouts work well.  That said, setting leaf-level
-	  fanout to a large number will likely cause problematic
-	  lock contention on the leaf-level rcu_node structures unless
-	  you boot with the skew_tick kernel parameter.
-
-	  Select a specific number if testing RCU itself.
-
-	  Select the maximum permissible value for large systems, but
-	  please understand that you may also need to set the skew_tick
-	  kernel boot parameter to avoid contention on the rcu_node
-	  structure's locks.
-
-	  Take the default if unsure.
-
-config RCU_FAST_NO_HZ
-	bool "Accelerate last non-dyntick-idle CPU's grace periods"
-	depends on NO_HZ_COMMON && SMP && RCU_EXPERT
-	default n
-	help
-	  This option permits CPUs to enter dynticks-idle state even if
-	  they have RCU callbacks queued, and prevents RCU from waking
-	  these CPUs up more than roughly once every four jiffies (by
-	  default, you can adjust this using the rcutree.rcu_idle_gp_delay
-	  parameter), thus improving energy efficiency.  On the other
-	  hand, this option increases the duration of RCU grace periods,
-	  for example, slowing down synchronize_rcu().
-
-	  Say Y if energy efficiency is critically important, and you
-	  	don't care about increased grace-period durations.
-
-	  Say N if you are unsure.
-
-config TREE_RCU_TRACE
-	def_bool RCU_TRACE && ( TREE_RCU || PREEMPT_RCU )
-	select DEBUG_FS
-	help
-	  This option provides tracing for the TREE_RCU and
-	  PREEMPT_RCU implementations, permitting Makefile to
-	  trivially select kernel/rcutree_trace.c.
-
-config RCU_BOOST
-	bool "Enable RCU priority boosting"
-	depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
-	default n
-	help
-	  This option boosts the priority of preempted RCU readers that
-	  block the current preemptible RCU grace period for too long.
-	  This option also prevents heavy loads from blocking RCU
-	  callback invocation for all flavors of RCU.
-
-	  Say Y here if you are working with real-time apps or heavy loads
-	  Say N here if you are unsure.
-
-config RCU_KTHREAD_PRIO
-	int "Real-time priority to use for RCU worker threads"
-	range 1 99 if RCU_BOOST
-	range 0 99 if !RCU_BOOST
-	default 1 if RCU_BOOST
-	default 0 if !RCU_BOOST
-	depends on RCU_EXPERT
-	help
-	  This option specifies the SCHED_FIFO priority value that will be
-	  assigned to the rcuc/n and rcub/n threads and is also the value
-	  used for RCU_BOOST (if enabled). If you are working with a
-	  real-time application that has one or more CPU-bound threads
-	  running at a real-time priority level, you should set
-	  RCU_KTHREAD_PRIO to a priority higher than the highest-priority
-	  real-time CPU-bound application thread.  The default RCU_KTHREAD_PRIO
-	  value of 1 is appropriate in the common case, which is real-time
-	  applications that do not have any CPU-bound threads.
-
-	  Some real-time applications might not have a single real-time
-	  thread that saturates a given CPU, but instead might have
-	  multiple real-time threads that, taken together, fully utilize
-	  that CPU.  In this case, you should set RCU_KTHREAD_PRIO to
-	  a priority higher than the lowest-priority thread that is
-	  conspiring to prevent the CPU from running any non-real-time
-	  tasks.  For example, if one thread at priority 10 and another
-	  thread at priority 5 are between themselves fully consuming
-	  the CPU time on a given CPU, then RCU_KTHREAD_PRIO should be
-	  set to priority 6 or higher.
-
-	  Specify the real-time priority, or take the default if unsure.
-
-config RCU_BOOST_DELAY
-	int "Milliseconds to delay boosting after RCU grace-period start"
-	range 0 3000
-	depends on RCU_BOOST
-	default 500
-	help
-	  This option specifies the time to wait after the beginning of
-	  a given grace period before priority-boosting preempted RCU
-	  readers blocking that grace period.  Note that any RCU reader
-	  blocking an expedited RCU grace period is boosted immediately.
-
-	  Accept the default if unsure.
-
-config RCU_NOCB_CPU
-	bool "Offload RCU callback processing from boot-selected CPUs"
-	depends on TREE_RCU || PREEMPT_RCU
-	depends on RCU_EXPERT || NO_HZ_FULL
-	default n
-	help
-	  Use this option to reduce OS jitter for aggressive HPC or
-	  real-time workloads.	It can also be used to offload RCU
-	  callback invocation to energy-efficient CPUs in battery-powered
-	  asymmetric multiprocessors.
-
-	  This option offloads callback invocation from the set of
-	  CPUs specified at boot time by the rcu_nocbs parameter.
-	  For each such CPU, a kthread ("rcuox/N") will be created to
-	  invoke callbacks, where the "N" is the CPU being offloaded,
-	  and where the "x" is "b" for RCU-bh, "p" for RCU-preempt, and
-	  "s" for RCU-sched.  Nothing prevents this kthread from running
-	  on the specified CPUs, but (1) the kthreads may be preempted
-	  between each callback, and (2) affinity or cgroups can be used
-	  to force the kthreads to run on whatever set of CPUs is desired.
-
-	  Say Y here if you want to help to debug reduced OS jitter.
-	  Say N here if you are unsure.
-
-choice
-	prompt "Build-forced no-CBs CPUs"
-	default RCU_NOCB_CPU_NONE
-	depends on RCU_NOCB_CPU
-	help
-	  This option allows no-CBs CPUs (whose RCU callbacks are invoked
-	  from kthreads rather than from softirq context) to be specified
-	  at build time.  Additional no-CBs CPUs may be specified by
-	  the rcu_nocbs= boot parameter.
-
-config RCU_NOCB_CPU_NONE
-	bool "No build_forced no-CBs CPUs"
-	help
-	  This option does not force any of the CPUs to be no-CBs CPUs.
-	  Only CPUs designated by the rcu_nocbs= boot parameter will be
-	  no-CBs CPUs, whose RCU callbacks will be invoked by per-CPU
-	  kthreads whose names begin with "rcuo".  All other CPUs will
-	  invoke their own RCU callbacks in softirq context.
-
-	  Select this option if you want to choose no-CBs CPUs at
-	  boot time, for example, to allow testing of different no-CBs
-	  configurations without having to rebuild the kernel each time.
-
-config RCU_NOCB_CPU_ZERO
-	bool "CPU 0 is a build_forced no-CBs CPU"
-	help
-	  This option forces CPU 0 to be a no-CBs CPU, so that its RCU
-	  callbacks are invoked by a per-CPU kthread whose name begins
-	  with "rcuo".	Additional CPUs may be designated as no-CBs
-	  CPUs using the rcu_nocbs= boot parameter will be no-CBs CPUs.
-	  All other CPUs will invoke their own RCU callbacks in softirq
-	  context.
-
-	  Select this if CPU 0 needs to be a no-CBs CPU for real-time
-	  or energy-efficiency reasons, but the real reason it exists
-	  is to ensure that randconfig testing covers mixed systems.
-
-config RCU_NOCB_CPU_ALL
-	bool "All CPUs are build_forced no-CBs CPUs"
-	help
-	  This option forces all CPUs to be no-CBs CPUs.  The rcu_nocbs=
-	  boot parameter will be ignored.  All CPUs' RCU callbacks will
-	  be executed in the context of per-CPU rcuo kthreads created for
-	  this purpose.  Assuming that the kthreads whose names start with
-	  "rcuo" are bound to "housekeeping" CPUs, this reduces OS jitter
-	  on the remaining CPUs, but might decrease memory locality during
-	  RCU-callback invocation, thus potentially degrading throughput.
-
-	  Select this if all CPUs need to be no-CBs CPUs for real-time
-	  or energy-efficiency reasons.
-
-endchoice
-
-endmenu # "RCU Subsystem"
+source "kernel/rcu/Kconfig"
 
 config BUILD_BIN2C
 	bool
@@ -1156,6 +809,7 @@
 
 config CPUSETS
 	bool "Cpuset controller"
+	depends on SMP
 	help
 	  This option will let you create and manage CPUSETs which
 	  allow dynamically partitioning a system into sets of CPUs and
diff --git a/init/main.c b/init/main.c
index f866510..df58a41 100644
--- a/init/main.c
+++ b/init/main.c
@@ -389,6 +389,7 @@ static __initdata DECLARE_COMPLETION(kthreadd_done);
 
 static noinline void __ref rest_init(void)
 {
+	struct task_struct *tsk;
 	int pid;
 
 	rcu_scheduler_starting();
@@ -397,12 +398,32 @@ static noinline void __ref rest_init(void)
 	 * the init task will end up wanting to create kthreads, which, if
 	 * we schedule it before we create kthreadd, will OOPS.
 	 */
-	kernel_thread(kernel_init, NULL, CLONE_FS);
+	pid = kernel_thread(kernel_init, NULL, CLONE_FS);
+	/*
+	 * Pin init on the boot CPU. Task migration is not properly working
+	 * until sched_init_smp() has been run. It will set the allowed
+	 * CPUs for init to the non isolated CPUs.
+	 */
+	rcu_read_lock();
+	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+	set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
+	rcu_read_unlock();
+
 	numa_default_policy();
 	pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
 	rcu_read_lock();
 	kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
 	rcu_read_unlock();
+
+	/*
+	 * Enable might_sleep() and smp_processor_id() checks.
+	 * They cannot be enabled earlier because with CONFIG_PRREMPT=y
+	 * kernel_thread() would trigger might_sleep() splats. With
+	 * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
+	 * already, but it's stuck on the kthreadd_done completion.
+	 */
+	system_state = SYSTEM_SCHEDULING;
+
 	complete(&kthreadd_done);
 
 	/*
@@ -1015,10 +1036,6 @@ static noinline void __init kernel_init_freeable(void)
 	 * init can allocate pages on any node
 	 */
 	set_mems_allowed(node_states[N_MEMORY]);
-	/*
-	 * init can run on any cpu.
-	 */
-	set_cpus_allowed_ptr(current, cpu_all_mask);
 
 	cad_pid = task_pid(current);
 
diff --git a/kernel/async.c b/kernel/async.c
index d2edd6e..2cbd3dd 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -114,14 +114,14 @@ static void async_run_entry_fn(struct work_struct *work)
 	ktime_t uninitialized_var(calltime), delta, rettime;
 
 	/* 1) run (and print duration) */
-	if (initcall_debug && system_state == SYSTEM_BOOTING) {
+	if (initcall_debug && system_state < SYSTEM_RUNNING) {
 		pr_debug("calling  %lli_%pF @ %i\n",
 			(long long)entry->cookie,
 			entry->func, task_pid_nr(current));
 		calltime = ktime_get();
 	}
 	entry->func(entry->data, entry->cookie);
-	if (initcall_debug && system_state == SYSTEM_BOOTING) {
+	if (initcall_debug && system_state < SYSTEM_RUNNING) {
 		rettime = ktime_get();
 		delta = ktime_sub(rettime, calltime);
 		pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n",
@@ -284,14 +284,14 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain
 {
 	ktime_t uninitialized_var(starttime), delta, endtime;
 
-	if (initcall_debug && system_state == SYSTEM_BOOTING) {
+	if (initcall_debug && system_state < SYSTEM_RUNNING) {
 		pr_debug("async_waiting @ %i\n", task_pid_nr(current));
 		starttime = ktime_get();
 	}
 
 	wait_event(async_done, lowest_in_progress(domain) >= cookie);
 
-	if (initcall_debug && system_state == SYSTEM_BOOTING) {
+	if (initcall_debug && system_state < SYSTEM_RUNNING) {
 		endtime = ktime_get();
 		delta = ktime_sub(endtime, starttime);
 
diff --git a/kernel/audit.c b/kernel/audit.c
index 4b7d498..833267b 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -575,12 +575,16 @@ static void kauditd_retry_skb(struct sk_buff *skb)
 
 /**
  * auditd_reset - Disconnect the auditd connection
+ * @ac: auditd connection state
  *
  * Description:
  * Break the auditd/kauditd connection and move all the queued records into the
- * hold queue in case auditd reconnects.
+ * hold queue in case auditd reconnects.  It is important to note that the @ac
+ * pointer should never be dereferenced inside this function as it may be NULL
+ * or invalid, you can only compare the memory address!  If @ac is NULL then
+ * the connection will always be reset.
  */
-static void auditd_reset(void)
+static void auditd_reset(const struct auditd_connection *ac)
 {
 	unsigned long flags;
 	struct sk_buff *skb;
@@ -590,17 +594,21 @@ static void auditd_reset(void)
 	spin_lock_irqsave(&auditd_conn_lock, flags);
 	ac_old = rcu_dereference_protected(auditd_conn,
 					   lockdep_is_held(&auditd_conn_lock));
+	if (ac && ac != ac_old) {
+		/* someone already registered a new auditd connection */
+		spin_unlock_irqrestore(&auditd_conn_lock, flags);
+		return;
+	}
 	rcu_assign_pointer(auditd_conn, NULL);
 	spin_unlock_irqrestore(&auditd_conn_lock, flags);
 
 	if (ac_old)
 		call_rcu(&ac_old->rcu, auditd_conn_free);
 
-	/* flush all of the main and retry queues to the hold queue */
+	/* flush the retry queue to the hold queue, but don't touch the main
+	 * queue since we need to process that normally for multicast */
 	while ((skb = skb_dequeue(&audit_retry_queue)))
 		kauditd_hold_skb(skb);
-	while ((skb = skb_dequeue(&audit_queue)))
-		kauditd_hold_skb(skb);
 }
 
 /**
@@ -649,8 +657,8 @@ static int auditd_send_unicast_skb(struct sk_buff *skb)
 	return rc;
 
 err:
-	if (rc == -ECONNREFUSED)
-		auditd_reset();
+	if (ac && rc == -ECONNREFUSED)
+		auditd_reset(ac);
 	return rc;
 }
 
@@ -795,9 +803,9 @@ static int kauditd_thread(void *dummy)
 		rc = kauditd_send_queue(sk, portid,
 					&audit_hold_queue, UNICAST_RETRIES,
 					NULL, kauditd_rehold_skb);
-		if (rc < 0) {
+		if (ac && rc < 0) {
 			sk = NULL;
-			auditd_reset();
+			auditd_reset(ac);
 			goto main_queue;
 		}
 
@@ -805,9 +813,9 @@ static int kauditd_thread(void *dummy)
 		rc = kauditd_send_queue(sk, portid,
 					&audit_retry_queue, UNICAST_RETRIES,
 					NULL, kauditd_hold_skb);
-		if (rc < 0) {
+		if (ac && rc < 0) {
 			sk = NULL;
-			auditd_reset();
+			auditd_reset(ac);
 			goto main_queue;
 		}
 
@@ -815,12 +823,13 @@ static int kauditd_thread(void *dummy)
 		/* process the main queue - do the multicast send and attempt
 		 * unicast, dump failed record sends to the retry queue; if
 		 * sk == NULL due to previous failures we will just do the
-		 * multicast send and move the record to the retry queue */
+		 * multicast send and move the record to the hold queue */
 		rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
 					kauditd_send_multicast_skb,
-					kauditd_retry_skb);
-		if (sk == NULL || rc < 0)
-			auditd_reset();
+					(sk ?
+					 kauditd_retry_skb : kauditd_hold_skb));
+		if (ac && rc < 0)
+			auditd_reset(ac);
 		sk = NULL;
 
 		/* drop our netns reference, no auditd sends past this line */
@@ -1230,7 +1239,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 								auditd_pid, 1);
 
 				/* unregister the auditd connection */
-				auditd_reset();
+				auditd_reset(NULL);
 			}
 		}
 		if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
@@ -1999,22 +2008,10 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
 
 static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
 {
-	kernel_cap_t *perm = &name->fcap.permitted;
-	kernel_cap_t *inh = &name->fcap.inheritable;
-	int log = 0;
-
-	if (!cap_isclear(*perm)) {
-		audit_log_cap(ab, "cap_fp", perm);
-		log = 1;
-	}
-	if (!cap_isclear(*inh)) {
-		audit_log_cap(ab, "cap_fi", inh);
-		log = 1;
-	}
-
-	if (log)
-		audit_log_format(ab, " cap_fe=%d cap_fver=%x",
-				 name->fcap.fE, name->fcap_ver);
+	audit_log_cap(ab, "cap_fp", &name->fcap.permitted);
+	audit_log_cap(ab, "cap_fi", &name->fcap.inheritable);
+	audit_log_format(ab, " cap_fe=%d cap_fver=%x",
+			 name->fcap.fE, name->fcap_ver);
 }
 
 static inline int audit_copy_fcaps(struct audit_names *name,
diff --git a/kernel/audit.h b/kernel/audit.h
index ddfce2e..b331d9b 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -68,6 +68,7 @@ struct audit_cap_data {
 		unsigned int	fE;		/* effective bit of file cap */
 		kernel_cap_t	effective;	/* effective set of process */
 	};
+	kernel_cap_t		ambient;
 };
 
 /* When fs/namei.c:getname() is called, we store the pointer in name and bump
@@ -247,13 +248,13 @@ struct audit_netlink_list {
 	struct sk_buff_head q;
 };
 
-int audit_send_list(void *);
+int audit_send_list(void *_dest);
 
 extern int selinux_audit_rule_update(void);
 
 extern struct mutex audit_filter_mutex;
-extern int audit_del_rule(struct audit_entry *);
-extern void audit_free_rule_rcu(struct rcu_head *);
+extern int audit_del_rule(struct audit_entry *entry);
+extern void audit_free_rule_rcu(struct rcu_head *head);
 extern struct list_head audit_filter_list[];
 
 extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
@@ -301,17 +302,17 @@ extern int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark
 #endif /* CONFIG_AUDIT_WATCH */
 
 #ifdef CONFIG_AUDIT_TREE
-extern struct audit_chunk *audit_tree_lookup(const struct inode *);
-extern void audit_put_chunk(struct audit_chunk *);
-extern bool audit_tree_match(struct audit_chunk *, struct audit_tree *);
-extern int audit_make_tree(struct audit_krule *, char *, u32);
-extern int audit_add_tree_rule(struct audit_krule *);
-extern int audit_remove_tree_rule(struct audit_krule *);
+extern struct audit_chunk *audit_tree_lookup(const struct inode *inode);
+extern void audit_put_chunk(struct audit_chunk *chunk);
+extern bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree);
+extern int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op);
+extern int audit_add_tree_rule(struct audit_krule *rule);
+extern int audit_remove_tree_rule(struct audit_krule *rule);
 extern void audit_trim_trees(void);
 extern int audit_tag_tree(char *old, char *new);
-extern const char *audit_tree_path(struct audit_tree *);
-extern void audit_put_tree(struct audit_tree *);
-extern void audit_kill_trees(struct list_head *);
+extern const char *audit_tree_path(struct audit_tree *tree);
+extern void audit_put_tree(struct audit_tree *tree);
+extern void audit_kill_trees(struct list_head *list);
 #else
 #define audit_remove_tree_rule(rule) BUG()
 #define audit_add_tree_rule(rule) -EINVAL
@@ -323,7 +324,7 @@ extern void audit_kill_trees(struct list_head *);
 #define audit_kill_trees(list) BUG()
 #endif
 
-extern char *audit_unpack_string(void **, size_t *, size_t);
+extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len);
 
 extern pid_t audit_sig_pid;
 extern kuid_t audit_sig_uid;
@@ -333,7 +334,7 @@ extern int audit_filter(int msgtype, unsigned int listtype);
 
 #ifdef CONFIG_AUDITSYSCALL
 extern int audit_signal_info(int sig, struct task_struct *t);
-extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
+extern void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx);
 extern struct list_head *audit_killed_trees(void);
 #else
 #define audit_signal_info(s,t) AUDIT_DISABLED
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index bb724ba..3260ba2 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1261,6 +1261,7 @@ static void show_special(struct audit_context *context, int *call_panic)
 		audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable);
 		audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted);
 		audit_log_cap(ab, "cap_pe", &context->capset.cap.effective);
+		audit_log_cap(ab, "cap_pa", &context->capset.cap.ambient);
 		break;
 	case AUDIT_MMAP:
 		audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd,
@@ -1382,9 +1383,11 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
 			audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted);
 			audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable);
 			audit_log_cap(ab, "old_pe", &axs->old_pcap.effective);
-			audit_log_cap(ab, "new_pp", &axs->new_pcap.permitted);
-			audit_log_cap(ab, "new_pi", &axs->new_pcap.inheritable);
-			audit_log_cap(ab, "new_pe", &axs->new_pcap.effective);
+			audit_log_cap(ab, "old_pa", &axs->old_pcap.ambient);
+			audit_log_cap(ab, "pp", &axs->new_pcap.permitted);
+			audit_log_cap(ab, "pi", &axs->new_pcap.inheritable);
+			audit_log_cap(ab, "pe", &axs->new_pcap.effective);
+			audit_log_cap(ab, "pa", &axs->new_pcap.ambient);
 			break; }
 
 		}
@@ -2342,10 +2345,12 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
 	ax->old_pcap.permitted   = old->cap_permitted;
 	ax->old_pcap.inheritable = old->cap_inheritable;
 	ax->old_pcap.effective   = old->cap_effective;
+	ax->old_pcap.ambient     = old->cap_ambient;
 
 	ax->new_pcap.permitted   = new->cap_permitted;
 	ax->new_pcap.inheritable = new->cap_inheritable;
 	ax->new_pcap.effective   = new->cap_effective;
+	ax->new_pcap.ambient     = new->cap_ambient;
 	return 0;
 }
 
@@ -2364,6 +2369,7 @@ void __audit_log_capset(const struct cred *new, const struct cred *old)
 	context->capset.cap.effective   = new->cap_effective;
 	context->capset.cap.inheritable = new->cap_effective;
 	context->capset.cap.permitted   = new->cap_permitted;
+	context->capset.cap.ambient     = new->cap_ambient;
 	context->type = AUDIT_CAPSET;
 }
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 339c8a1..a8a7256 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -989,6 +989,11 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
 	if (err)
 		return err;
 
+	if (is_pointer_value(env, insn->src_reg)) {
+		verbose("R%d leaks addr into mem\n", insn->src_reg);
+		return -EACCES;
+	}
+
 	/* check whether atomic_add can read the memory */
 	err = check_mem_access(env, insn->dst_reg, insn->off,
 			       BPF_SIZE(insn->code), BPF_READ, -1);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index c3c9a0e..8d4e85e 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css)
 {
 	lockdep_assert_held(&cgroup_mutex);
 
+	if (css->flags & CSS_DYING)
+		return;
+
+	css->flags |= CSS_DYING;
+
 	/*
 	 * This must happen before css is disassociated with its cgroup.
 	 * See seq_css() for details.
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index f6501f4f..ae64341 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -176,9 +176,9 @@ typedef enum {
 } cpuset_flagbits_t;
 
 /* convenient tests for these bits */
-static inline bool is_cpuset_online(const struct cpuset *cs)
+static inline bool is_cpuset_online(struct cpuset *cs)
 {
-	return test_bit(CS_ONLINE, &cs->flags);
+	return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
 }
 
 static inline int is_cpu_exclusive(const struct cpuset *cs)
diff --git a/kernel/compat.c b/kernel/compat.c
index 933bcb3..ebd8bdc 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -30,102 +30,68 @@
 
 #include <linux/uaccess.h>
 
-static int compat_get_timex(struct timex *txc, struct compat_timex __user *utp)
+int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp)
 {
-	memset(txc, 0, sizeof(struct timex));
+	struct compat_timex tx32;
 
-	if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) ||
-			__get_user(txc->modes, &utp->modes) ||
-			__get_user(txc->offset, &utp->offset) ||
-			__get_user(txc->freq, &utp->freq) ||
-			__get_user(txc->maxerror, &utp->maxerror) ||
-			__get_user(txc->esterror, &utp->esterror) ||
-			__get_user(txc->status, &utp->status) ||
-			__get_user(txc->constant, &utp->constant) ||
-			__get_user(txc->precision, &utp->precision) ||
-			__get_user(txc->tolerance, &utp->tolerance) ||
-			__get_user(txc->time.tv_sec, &utp->time.tv_sec) ||
-			__get_user(txc->time.tv_usec, &utp->time.tv_usec) ||
-			__get_user(txc->tick, &utp->tick) ||
-			__get_user(txc->ppsfreq, &utp->ppsfreq) ||
-			__get_user(txc->jitter, &utp->jitter) ||
-			__get_user(txc->shift, &utp->shift) ||
-			__get_user(txc->stabil, &utp->stabil) ||
-			__get_user(txc->jitcnt, &utp->jitcnt) ||
-			__get_user(txc->calcnt, &utp->calcnt) ||
-			__get_user(txc->errcnt, &utp->errcnt) ||
-			__get_user(txc->stbcnt, &utp->stbcnt))
+	if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
 		return -EFAULT;
 
+	txc->modes = tx32.modes;
+	txc->offset = tx32.offset;
+	txc->freq = tx32.freq;
+	txc->maxerror = tx32.maxerror;
+	txc->esterror = tx32.esterror;
+	txc->status = tx32.status;
+	txc->constant = tx32.constant;
+	txc->precision = tx32.precision;
+	txc->tolerance = tx32.tolerance;
+	txc->time.tv_sec = tx32.time.tv_sec;
+	txc->time.tv_usec = tx32.time.tv_usec;
+	txc->tick = tx32.tick;
+	txc->ppsfreq = tx32.ppsfreq;
+	txc->jitter = tx32.jitter;
+	txc->shift = tx32.shift;
+	txc->stabil = tx32.stabil;
+	txc->jitcnt = tx32.jitcnt;
+	txc->calcnt = tx32.calcnt;
+	txc->errcnt = tx32.errcnt;
+	txc->stbcnt = tx32.stbcnt;
+
 	return 0;
 }
 
-static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc)
+int compat_put_timex(struct compat_timex __user *utp, const struct timex *txc)
 {
-	if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) ||
-			__put_user(txc->modes, &utp->modes) ||
-			__put_user(txc->offset, &utp->offset) ||
-			__put_user(txc->freq, &utp->freq) ||
-			__put_user(txc->maxerror, &utp->maxerror) ||
-			__put_user(txc->esterror, &utp->esterror) ||
-			__put_user(txc->status, &utp->status) ||
-			__put_user(txc->constant, &utp->constant) ||
-			__put_user(txc->precision, &utp->precision) ||
-			__put_user(txc->tolerance, &utp->tolerance) ||
-			__put_user(txc->time.tv_sec, &utp->time.tv_sec) ||
-			__put_user(txc->time.tv_usec, &utp->time.tv_usec) ||
-			__put_user(txc->tick, &utp->tick) ||
-			__put_user(txc->ppsfreq, &utp->ppsfreq) ||
-			__put_user(txc->jitter, &utp->jitter) ||
-			__put_user(txc->shift, &utp->shift) ||
-			__put_user(txc->stabil, &utp->stabil) ||
-			__put_user(txc->jitcnt, &utp->jitcnt) ||
-			__put_user(txc->calcnt, &utp->calcnt) ||
-			__put_user(txc->errcnt, &utp->errcnt) ||
-			__put_user(txc->stbcnt, &utp->stbcnt) ||
-			__put_user(txc->tai, &utp->tai))
+	struct compat_timex tx32;
+
+	memset(&tx32, 0, sizeof(struct compat_timex));
+	tx32.modes = txc->modes;
+	tx32.offset = txc->offset;
+	tx32.freq = txc->freq;
+	tx32.maxerror = txc->maxerror;
+	tx32.esterror = txc->esterror;
+	tx32.status = txc->status;
+	tx32.constant = txc->constant;
+	tx32.precision = txc->precision;
+	tx32.tolerance = txc->tolerance;
+	tx32.time.tv_sec = txc->time.tv_sec;
+	tx32.time.tv_usec = txc->time.tv_usec;
+	tx32.tick = txc->tick;
+	tx32.ppsfreq = txc->ppsfreq;
+	tx32.jitter = txc->jitter;
+	tx32.shift = txc->shift;
+	tx32.stabil = txc->stabil;
+	tx32.jitcnt = txc->jitcnt;
+	tx32.calcnt = txc->calcnt;
+	tx32.errcnt = txc->errcnt;
+	tx32.stbcnt = txc->stbcnt;
+	tx32.tai = txc->tai;
+	if (copy_to_user(utp, &tx32, sizeof(struct compat_timex)))
 		return -EFAULT;
 	return 0;
 }
 
-COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
-		       struct timezone __user *, tz)
-{
-	if (tv) {
-		struct timeval ktv;
-		do_gettimeofday(&ktv);
-		if (compat_put_timeval(&ktv, tv))
-			return -EFAULT;
-	}
-	if (tz) {
-		if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
-			return -EFAULT;
-	}
-
-	return 0;
-}
-
-COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
-		       struct timezone __user *, tz)
-{
-	struct timespec64 new_ts;
-	struct timeval user_tv;
-	struct timezone new_tz;
-
-	if (tv) {
-		if (compat_get_timeval(&user_tv, tv))
-			return -EFAULT;
-		new_ts.tv_sec = user_tv.tv_sec;
-		new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
-	}
-	if (tz) {
-		if (copy_from_user(&new_tz, tz, sizeof(*tz)))
-			return -EFAULT;
-	}
-
-	return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
-}
-
 static int __compat_get_timeval(struct timeval *tv, const struct compat_timeval __user *ctv)
 {
 	return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
@@ -213,143 +179,30 @@ int compat_convert_timespec(struct timespec __user **kts,
 	return 0;
 }
 
-static long compat_nanosleep_restart(struct restart_block *restart)
+int get_compat_itimerval(struct itimerval *o, const struct compat_itimerval __user *i)
 {
-	struct compat_timespec __user *rmtp;
-	struct timespec rmt;
-	mm_segment_t oldfs;
-	long ret;
+	struct compat_itimerval v32;
 
-	restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	ret = hrtimer_nanosleep_restart(restart);
-	set_fs(oldfs);
-
-	if (ret == -ERESTART_RESTARTBLOCK) {
-		rmtp = restart->nanosleep.compat_rmtp;
-
-		if (rmtp && compat_put_timespec(&rmt, rmtp))
-			return -EFAULT;
-	}
-
-	return ret;
-}
-
-COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
-		       struct compat_timespec __user *, rmtp)
-{
-	struct timespec tu, rmt;
-	struct timespec64 tu64;
-	mm_segment_t oldfs;
-	long ret;
-
-	if (compat_get_timespec(&tu, rqtp))
+	if (copy_from_user(&v32, i, sizeof(struct compat_itimerval)))
 		return -EFAULT;
-
-	tu64 = timespec_to_timespec64(tu);
-	if (!timespec64_valid(&tu64))
-		return -EINVAL;
-
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	ret = hrtimer_nanosleep(&tu64,
-				rmtp ? (struct timespec __user *)&rmt : NULL,
-				HRTIMER_MODE_REL, CLOCK_MONOTONIC);
-	set_fs(oldfs);
-
-	/*
-	 * hrtimer_nanosleep() can only return 0 or
-	 * -ERESTART_RESTARTBLOCK here because:
-	 *
-	 * - we call it with HRTIMER_MODE_REL and therefor exclude the
-	 *   -ERESTARTNOHAND return path.
-	 *
-	 * - we supply the rmtp argument from the task stack (due to
-	 *   the necessary compat conversion. So the update cannot
-	 *   fail, which excludes the -EFAULT return path as well. If
-	 *   it fails nevertheless we have a bigger problem and wont
-	 *   reach this place anymore.
-	 *
-	 * - if the return value is 0, we do not have to update rmtp
-	 *    because there is no remaining time.
-	 *
-	 * We check for -ERESTART_RESTARTBLOCK nevertheless if the
-	 * core implementation decides to return random nonsense.
-	 */
-	if (ret == -ERESTART_RESTARTBLOCK) {
-		struct restart_block *restart = &current->restart_block;
-
-		restart->fn = compat_nanosleep_restart;
-		restart->nanosleep.compat_rmtp = rmtp;
-
-		if (rmtp && compat_put_timespec(&rmt, rmtp))
-			return -EFAULT;
-	}
-	return ret;
-}
-
-static inline long get_compat_itimerval(struct itimerval *o,
-		struct compat_itimerval __user *i)
-{
-	return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
-		(__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
-		 __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
-		 __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
-		 __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
-}
-
-static inline long put_compat_itimerval(struct compat_itimerval __user *o,
-		struct itimerval *i)
-{
-	return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
-		(__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
-		 __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
-		 __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
-		 __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
-}
-
-asmlinkage long sys_ni_posix_timers(void);
-
-COMPAT_SYSCALL_DEFINE2(getitimer, int, which,
-		struct compat_itimerval __user *, it)
-{
-	struct itimerval kit;
-	int error;
-
-	if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
-		return sys_ni_posix_timers();
-
-	error = do_getitimer(which, &kit);
-	if (!error && put_compat_itimerval(it, &kit))
-		error = -EFAULT;
-	return error;
-}
-
-COMPAT_SYSCALL_DEFINE3(setitimer, int, which,
-		struct compat_itimerval __user *, in,
-		struct compat_itimerval __user *, out)
-{
-	struct itimerval kin, kout;
-	int error;
-
-	if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
-		return sys_ni_posix_timers();
-
-	if (in) {
-		if (get_compat_itimerval(&kin, in))
-			return -EFAULT;
-	} else
-		memset(&kin, 0, sizeof(kin));
-
-	error = do_setitimer(which, &kin, out ? &kout : NULL);
-	if (error || !out)
-		return error;
-	if (put_compat_itimerval(out, &kout))
-		return -EFAULT;
+	o->it_interval.tv_sec = v32.it_interval.tv_sec;
+	o->it_interval.tv_usec = v32.it_interval.tv_usec;
+	o->it_value.tv_sec = v32.it_value.tv_sec;
+	o->it_value.tv_usec = v32.it_value.tv_usec;
 	return 0;
 }
 
+int put_compat_itimerval(struct compat_itimerval __user *o, const struct itimerval *i)
+{
+	struct compat_itimerval v32;
+
+	v32.it_interval.tv_sec = i->it_interval.tv_sec;
+	v32.it_interval.tv_usec = i->it_interval.tv_usec;
+	v32.it_value.tv_sec = i->it_value.tv_sec;
+	v32.it_value.tv_usec = i->it_value.tv_usec;
+	return copy_to_user(o, &v32, sizeof(struct compat_itimerval)) ? -EFAULT : 0;
+}
+
 static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
 {
 	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
@@ -689,193 +542,6 @@ int put_compat_itimerspec(struct compat_itimerspec __user *dst,
 	return 0;
 }
 
-COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
-		       struct compat_sigevent __user *, timer_event_spec,
-		       timer_t __user *, created_timer_id)
-{
-	struct sigevent __user *event = NULL;
-
-	if (timer_event_spec) {
-		struct sigevent kevent;
-
-		event = compat_alloc_user_space(sizeof(*event));
-		if (get_compat_sigevent(&kevent, timer_event_spec) ||
-		    copy_to_user(event, &kevent, sizeof(*event)))
-			return -EFAULT;
-	}
-
-	return sys_timer_create(which_clock, event, created_timer_id);
-}
-
-COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
-		       struct compat_itimerspec __user *, new,
-		       struct compat_itimerspec __user *, old)
-{
-	long err;
-	mm_segment_t oldfs;
-	struct itimerspec newts, oldts;
-
-	if (!new)
-		return -EINVAL;
-	if (get_compat_itimerspec(&newts, new))
-		return -EFAULT;
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	err = sys_timer_settime(timer_id, flags,
-				(struct itimerspec __user *) &newts,
-				(struct itimerspec __user *) &oldts);
-	set_fs(oldfs);
-	if (!err && old && put_compat_itimerspec(old, &oldts))
-		return -EFAULT;
-	return err;
-}
-
-COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
-		       struct compat_itimerspec __user *, setting)
-{
-	long err;
-	mm_segment_t oldfs;
-	struct itimerspec ts;
-
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	err = sys_timer_gettime(timer_id,
-				(struct itimerspec __user *) &ts);
-	set_fs(oldfs);
-	if (!err && put_compat_itimerspec(setting, &ts))
-		return -EFAULT;
-	return err;
-}
-
-COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
-		       struct compat_timespec __user *, tp)
-{
-	long err;
-	mm_segment_t oldfs;
-	struct timespec ts;
-
-	if (compat_get_timespec(&ts, tp))
-		return -EFAULT;
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	err = sys_clock_settime(which_clock,
-				(struct timespec __user *) &ts);
-	set_fs(oldfs);
-	return err;
-}
-
-COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
-		       struct compat_timespec __user *, tp)
-{
-	long err;
-	mm_segment_t oldfs;
-	struct timespec ts;
-
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	err = sys_clock_gettime(which_clock,
-				(struct timespec __user *) &ts);
-	set_fs(oldfs);
-	if (!err && compat_put_timespec(&ts, tp))
-		return -EFAULT;
-	return err;
-}
-
-COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
-		       struct compat_timex __user *, utp)
-{
-	struct timex txc;
-	mm_segment_t oldfs;
-	int err, ret;
-
-	err = compat_get_timex(&txc, utp);
-	if (err)
-		return err;
-
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
-	set_fs(oldfs);
-
-	err = compat_put_timex(utp, &txc);
-	if (err)
-		return err;
-
-	return ret;
-}
-
-COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
-		       struct compat_timespec __user *, tp)
-{
-	long err;
-	mm_segment_t oldfs;
-	struct timespec ts;
-
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	err = sys_clock_getres(which_clock,
-			       (struct timespec __user *) &ts);
-	set_fs(oldfs);
-	if (!err && tp && compat_put_timespec(&ts, tp))
-		return -EFAULT;
-	return err;
-}
-
-static long compat_clock_nanosleep_restart(struct restart_block *restart)
-{
-	long err;
-	mm_segment_t oldfs;
-	struct timespec tu;
-	struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
-
-	restart->nanosleep.rmtp = (struct timespec __user *) &tu;
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	err = clock_nanosleep_restart(restart);
-	set_fs(oldfs);
-
-	if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
-	    compat_put_timespec(&tu, rmtp))
-		return -EFAULT;
-
-	if (err == -ERESTART_RESTARTBLOCK) {
-		restart->fn = compat_clock_nanosleep_restart;
-		restart->nanosleep.compat_rmtp = rmtp;
-	}
-	return err;
-}
-
-COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
-		       struct compat_timespec __user *, rqtp,
-		       struct compat_timespec __user *, rmtp)
-{
-	long err;
-	mm_segment_t oldfs;
-	struct timespec in, out;
-	struct restart_block *restart;
-
-	if (compat_get_timespec(&in, rqtp))
-		return -EFAULT;
-
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);
-	err = sys_clock_nanosleep(which_clock, flags,
-				  (struct timespec __user *) &in,
-				  (struct timespec __user *) &out);
-	set_fs(oldfs);
-
-	if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
-	    compat_put_timespec(&out, rmtp))
-		return -EFAULT;
-
-	if (err == -ERESTART_RESTARTBLOCK) {
-		restart = &current->restart_block;
-		restart->fn = compat_clock_nanosleep_restart;
-		restart->nanosleep.compat_rmtp = rmtp;
-	}
-	return err;
-}
-
 /*
  * We currently only need the following fields from the sigevent
  * structure: sigev_value, sigev_signo, sig_notify and (sometimes
@@ -1035,64 +701,6 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
 	return ret;
 }
 
-#ifdef __ARCH_WANT_COMPAT_SYS_TIME
-
-/* compat_time_t is a 32 bit "long" and needs to get converted. */
-
-COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
-{
-	compat_time_t i;
-	struct timeval tv;
-
-	do_gettimeofday(&tv);
-	i = tv.tv_sec;
-
-	if (tloc) {
-		if (put_user(i,tloc))
-			return -EFAULT;
-	}
-	force_successful_syscall_return();
-	return i;
-}
-
-COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
-{
-	struct timespec tv;
-	int err;
-
-	if (get_user(tv.tv_sec, tptr))
-		return -EFAULT;
-
-	tv.tv_nsec = 0;
-
-	err = security_settime(&tv, NULL);
-	if (err)
-		return err;
-
-	do_settimeofday(&tv);
-	return 0;
-}
-
-#endif /* __ARCH_WANT_COMPAT_SYS_TIME */
-
-COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
-{
-	struct timex txc;
-	int err, ret;
-
-	err = compat_get_timex(&txc, utp);
-	if (err)
-		return err;
-
-	ret = do_adjtimex(&txc);
-
-	err = compat_put_timex(utp, &txc);
-	if (err)
-		return err;
-
-	return ret;
-}
-
 #ifdef CONFIG_NUMA
 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
 		       compat_uptr_t __user *, pages32,
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 26a06e0..d708290 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -1,10 +1,13 @@
 #  KEEP ALPHABETICALLY SORTED
 # CONFIG_DEVKMEM is not set
 # CONFIG_DEVMEM is not set
+# CONFIG_FHANDLE is not set
 # CONFIG_INET_LRO is not set
-# CONFIG_MODULES is not set
+# CONFIG_NFSD is not set
+# CONFIG_NFS_FS is not set
 # CONFIG_OABI_COMPAT is not set
 # CONFIG_SYSVIPC is not set
+# CONFIG_USELIB is not set
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
@@ -13,6 +16,7 @@
 CONFIG_AUDIT=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CGROUPS=y
+CONFIG_CGROUP_BPF=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
@@ -23,6 +27,8 @@
 CONFIG_FB=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
 CONFIG_INET6_AH=y
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
@@ -60,6 +66,9 @@
 CONFIG_IP_NF_TARGET_NETMAP=y
 CONFIG_IP_NF_TARGET_REDIRECT=y
 CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
 CONFIG_NET=y
 CONFIG_NETDEVICES=y
 CONFIG_NETFILTER=y
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index 28ee064..946fb92 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -6,13 +6,15 @@
 # CONFIG_NF_CONNTRACK_SIP is not set
 # CONFIG_PM_WAKELOCKS_GC is not set
 # CONFIG_VT is not set
+CONFIG_ARM64_SW_TTBR0_PAN=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_CC_STACKPROTECTOR_STRONG=y
 CONFIG_COMPACTION=y
-CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_CPU_SW_DOMAIN_PAN=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
@@ -105,6 +107,7 @@
 CONFIG_SMARTJOYPLUS_FF=y
 CONFIG_SND=y
 CONFIG_SOUND=y
+CONFIG_STRICT_KERNEL_RWX=y
 CONFIG_SUSPEND_TIME=y
 CONFIG_TABLET_USB_ACECAD=y
 CONFIG_TABLET_USB_AIPTEK=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9ae6fbe..b03a325 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -27,6 +27,7 @@
 #include <linux/smpboot.h>
 #include <linux/relay.h>
 #include <linux/slab.h>
+#include <linux/percpu-rwsem.h>
 
 #include <trace/events/power.h>
 #define CREATE_TRACE_POINTS
@@ -65,6 +66,12 @@ struct cpuhp_cpu_state {
 
 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
 
+#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
+static struct lock_class_key cpuhp_state_key;
+static struct lockdep_map cpuhp_state_lock_map =
+	STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
+#endif
+
 /**
  * cpuhp_step - Hotplug state machine step
  * @name:	Name of the step
@@ -196,121 +203,41 @@ void cpu_maps_update_done(void)
 	mutex_unlock(&cpu_add_remove_lock);
 }
 
-/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
+/*
+ * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  * Should always be manipulated under cpu_add_remove_lock
  */
 static int cpu_hotplug_disabled;
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-static struct {
-	struct task_struct *active_writer;
-	/* wait queue to wake up the active_writer */
-	wait_queue_head_t wq;
-	/* verifies that no writer will get active while readers are active */
-	struct mutex lock;
-	/*
-	 * Also blocks the new readers during
-	 * an ongoing cpu hotplug operation.
-	 */
-	atomic_t refcount;
+DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	struct lockdep_map dep_map;
-#endif
-} cpu_hotplug = {
-	.active_writer = NULL,
-	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
-	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	.dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
-#endif
-};
-
-/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
-#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
-#define cpuhp_lock_acquire_tryread() \
-				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
-#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
-#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
-
-
-void get_online_cpus(void)
+void cpus_read_lock(void)
 {
-	might_sleep();
-	if (cpu_hotplug.active_writer == current)
-		return;
-	cpuhp_lock_acquire_read();
-	mutex_lock(&cpu_hotplug.lock);
-	atomic_inc(&cpu_hotplug.refcount);
-	mutex_unlock(&cpu_hotplug.lock);
+	percpu_down_read(&cpu_hotplug_lock);
 }
-EXPORT_SYMBOL_GPL(get_online_cpus);
+EXPORT_SYMBOL_GPL(cpus_read_lock);
 
-void put_online_cpus(void)
+void cpus_read_unlock(void)
 {
-	int refcount;
-
-	if (cpu_hotplug.active_writer == current)
-		return;
-
-	refcount = atomic_dec_return(&cpu_hotplug.refcount);
-	if (WARN_ON(refcount < 0)) /* try to fix things up */
-		atomic_inc(&cpu_hotplug.refcount);
-
-	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
-		wake_up(&cpu_hotplug.wq);
-
-	cpuhp_lock_release();
-
+	percpu_up_read(&cpu_hotplug_lock);
 }
-EXPORT_SYMBOL_GPL(put_online_cpus);
+EXPORT_SYMBOL_GPL(cpus_read_unlock);
 
-/*
- * This ensures that the hotplug operation can begin only when the
- * refcount goes to zero.
- *
- * Note that during a cpu-hotplug operation, the new readers, if any,
- * will be blocked by the cpu_hotplug.lock
- *
- * Since cpu_hotplug_begin() is always called after invoking
- * cpu_maps_update_begin(), we can be sure that only one writer is active.
- *
- * Note that theoretically, there is a possibility of a livelock:
- * - Refcount goes to zero, last reader wakes up the sleeping
- *   writer.
- * - Last reader unlocks the cpu_hotplug.lock.
- * - A new reader arrives at this moment, bumps up the refcount.
- * - The writer acquires the cpu_hotplug.lock finds the refcount
- *   non zero and goes to sleep again.
- *
- * However, this is very difficult to achieve in practice since
- * get_online_cpus() not an api which is called all that often.
- *
- */
-void cpu_hotplug_begin(void)
+void cpus_write_lock(void)
 {
-	DEFINE_WAIT(wait);
-
-	cpu_hotplug.active_writer = current;
-	cpuhp_lock_acquire();
-
-	for (;;) {
-		mutex_lock(&cpu_hotplug.lock);
-		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
-		if (likely(!atomic_read(&cpu_hotplug.refcount)))
-				break;
-		mutex_unlock(&cpu_hotplug.lock);
-		schedule();
-	}
-	finish_wait(&cpu_hotplug.wq, &wait);
+	percpu_down_write(&cpu_hotplug_lock);
 }
 
-void cpu_hotplug_done(void)
+void cpus_write_unlock(void)
 {
-	cpu_hotplug.active_writer = NULL;
-	mutex_unlock(&cpu_hotplug.lock);
-	cpuhp_lock_release();
+	percpu_up_write(&cpu_hotplug_lock);
+}
+
+void lockdep_assert_cpus_held(void)
+{
+	percpu_rwsem_assert_held(&cpu_hotplug_lock);
 }
 
 /*
@@ -344,8 +271,6 @@ void cpu_hotplug_enable(void)
 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 #endif	/* CONFIG_HOTPLUG_CPU */
 
-/* Notifier wrappers for transitioning to state machine */
-
 static int bringup_wait_for_ap(unsigned int cpu)
 {
 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
@@ -484,6 +409,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
 
 	st->should_run = false;
 
+	lock_map_acquire(&cpuhp_state_lock_map);
 	/* Single callback invocation for [un]install ? */
 	if (st->single) {
 		if (st->cb_state < CPUHP_AP_ONLINE) {
@@ -510,6 +436,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
 		else if (st->state > st->target)
 			ret = cpuhp_ap_offline(cpu, st);
 	}
+	lock_map_release(&cpuhp_state_lock_map);
 	st->result = ret;
 	complete(&st->done);
 }
@@ -524,6 +451,9 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
 	if (!cpu_online(cpu))
 		return 0;
 
+	lock_map_acquire(&cpuhp_state_lock_map);
+	lock_map_release(&cpuhp_state_lock_map);
+
 	/*
 	 * If we are up and running, use the hotplug thread. For early calls
 	 * we invoke the thread function directly.
@@ -567,6 +497,8 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
 	enum cpuhp_state state = st->state;
 
 	trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
+	lock_map_acquire(&cpuhp_state_lock_map);
+	lock_map_release(&cpuhp_state_lock_map);
 	__cpuhp_kick_ap_work(st);
 	wait_for_completion(&st->done);
 	trace_cpuhp_exit(cpu, st->state, state, st->result);
@@ -630,30 +562,6 @@ void clear_tasks_mm_cpumask(int cpu)
 	rcu_read_unlock();
 }
 
-static inline void check_for_tasks(int dead_cpu)
-{
-	struct task_struct *g, *p;
-
-	read_lock(&tasklist_lock);
-	for_each_process_thread(g, p) {
-		if (!p->on_rq)
-			continue;
-		/*
-		 * We do the check with unlocked task_rq(p)->lock.
-		 * Order the reading to do not warn about a task,
-		 * which was running on this cpu in the past, and
-		 * it's just been woken on another cpu.
-		 */
-		rmb();
-		if (task_cpu(p) != dead_cpu)
-			continue;
-
-		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
-			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
-	}
-	read_unlock(&tasklist_lock);
-}
-
 /* Take this CPU down. */
 static int take_cpu_down(void *_param)
 {
@@ -701,7 +609,7 @@ static int takedown_cpu(unsigned int cpu)
 	/*
 	 * So now all preempt/rcu users must observe !cpu_active().
 	 */
-	err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
+	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
 	if (err) {
 		/* CPU refused to die */
 		irq_unlock_sparse();
@@ -773,7 +681,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 	if (!cpu_present(cpu))
 		return -EINVAL;
 
-	cpu_hotplug_begin();
+	cpus_write_lock();
 
 	cpuhp_tasks_frozen = tasks_frozen;
 
@@ -811,7 +719,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 	}
 
 out:
-	cpu_hotplug_done();
+	cpus_write_unlock();
 	return ret;
 }
 
@@ -893,7 +801,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
 	struct task_struct *idle;
 	int ret = 0;
 
-	cpu_hotplug_begin();
+	cpus_write_lock();
 
 	if (!cpu_present(cpu)) {
 		ret = -EINVAL;
@@ -941,7 +849,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
 	target = min((int)target, CPUHP_BRINGUP_CPU);
 	ret = cpuhp_up_callbacks(cpu, st, target);
 out:
-	cpu_hotplug_done();
+	cpus_write_unlock();
 	return ret;
 }
 
@@ -1252,6 +1160,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
 		.startup.single		= smpboot_unpark_threads,
 		.teardown.single	= NULL,
 	},
+	[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
+		.name			= "irq/affinity:online",
+		.startup.single		= irq_affinity_online_cpu,
+		.teardown.single	= NULL,
+	},
 	[CPUHP_AP_PERF_ONLINE] = {
 		.name			= "perf:online",
 		.startup.single		= perf_event_init_cpu,
@@ -1413,18 +1326,20 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
 	}
 }
 
-int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
-			       bool invoke)
+int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
+					  struct hlist_node *node,
+					  bool invoke)
 {
 	struct cpuhp_step *sp;
 	int cpu;
 	int ret;
 
+	lockdep_assert_cpus_held();
+
 	sp = cpuhp_get_step(state);
 	if (sp->multi_instance == false)
 		return -EINVAL;
 
-	get_online_cpus();
 	mutex_lock(&cpuhp_state_mutex);
 
 	if (!invoke || !sp->startup.multi)
@@ -1453,13 +1368,23 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
 	hlist_add_head(node, &sp->list);
 unlock:
 	mutex_unlock(&cpuhp_state_mutex);
-	put_online_cpus();
+	return ret;
+}
+
+int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+			       bool invoke)
+{
+	int ret;
+
+	cpus_read_lock();
+	ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
+	cpus_read_unlock();
 	return ret;
 }
 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
 
 /**
- * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
+ * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
  * @state:		The state to setup
  * @invoke:		If true, the startup function is invoked for cpus where
  *			cpu state >= @state
@@ -1468,25 +1393,27 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
  * @multi_instance:	State is set up for multiple instances which get
  *			added afterwards.
  *
+ * The caller needs to hold cpus read locked while calling this function.
  * Returns:
  *   On success:
  *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
  *      0 for all other states
  *   On failure: proper (negative) error code
  */
-int __cpuhp_setup_state(enum cpuhp_state state,
-			const char *name, bool invoke,
-			int (*startup)(unsigned int cpu),
-			int (*teardown)(unsigned int cpu),
-			bool multi_instance)
+int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
+				   const char *name, bool invoke,
+				   int (*startup)(unsigned int cpu),
+				   int (*teardown)(unsigned int cpu),
+				   bool multi_instance)
 {
 	int cpu, ret = 0;
 	bool dynstate;
 
+	lockdep_assert_cpus_held();
+
 	if (cpuhp_cb_check(state) || !name)
 		return -EINVAL;
 
-	get_online_cpus();
 	mutex_lock(&cpuhp_state_mutex);
 
 	ret = cpuhp_store_callbacks(state, name, startup, teardown,
@@ -1522,7 +1449,6 @@ int __cpuhp_setup_state(enum cpuhp_state state,
 	}
 out:
 	mutex_unlock(&cpuhp_state_mutex);
-	put_online_cpus();
 	/*
 	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
 	 * dynamically allocated state in case of success.
@@ -1531,6 +1457,22 @@ int __cpuhp_setup_state(enum cpuhp_state state,
 		return state;
 	return ret;
 }
+EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
+
+int __cpuhp_setup_state(enum cpuhp_state state,
+			const char *name, bool invoke,
+			int (*startup)(unsigned int cpu),
+			int (*teardown)(unsigned int cpu),
+			bool multi_instance)
+{
+	int ret;
+
+	cpus_read_lock();
+	ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
+					     teardown, multi_instance);
+	cpus_read_unlock();
+	return ret;
+}
 EXPORT_SYMBOL(__cpuhp_setup_state);
 
 int __cpuhp_state_remove_instance(enum cpuhp_state state,
@@ -1544,7 +1486,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
 	if (!sp->multi_instance)
 		return -EINVAL;
 
-	get_online_cpus();
+	cpus_read_lock();
 	mutex_lock(&cpuhp_state_mutex);
 
 	if (!invoke || !cpuhp_get_teardown_cb(state))
@@ -1565,29 +1507,30 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
 remove:
 	hlist_del(node);
 	mutex_unlock(&cpuhp_state_mutex);
-	put_online_cpus();
+	cpus_read_unlock();
 
 	return 0;
 }
 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
 
 /**
- * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
+ * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
  * @state:	The state to remove
  * @invoke:	If true, the teardown function is invoked for cpus where
  *		cpu state >= @state
  *
+ * The caller needs to hold cpus read locked while calling this function.
  * The teardown callback is currently not allowed to fail. Think
  * about module removal!
  */
-void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
 {
 	struct cpuhp_step *sp = cpuhp_get_step(state);
 	int cpu;
 
 	BUG_ON(cpuhp_cb_check(state));
 
-	get_online_cpus();
+	lockdep_assert_cpus_held();
 
 	mutex_lock(&cpuhp_state_mutex);
 	if (sp->multi_instance) {
@@ -1615,7 +1558,14 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
 remove:
 	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
 	mutex_unlock(&cpuhp_state_mutex);
-	put_online_cpus();
+}
+EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
+
+void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+{
+	cpus_read_lock();
+	__cpuhp_remove_state_cpuslocked(state, invoke);
+	cpus_read_unlock();
 }
 EXPORT_SYMBOL(__cpuhp_remove_state);
 
@@ -1658,13 +1608,13 @@ static ssize_t write_cpuhp_target(struct device *dev,
 	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
 	mutex_unlock(&cpuhp_state_mutex);
 	if (ret)
-		return ret;
+		goto out;
 
 	if (st->state < target)
 		ret = do_cpu_up(dev->id, target);
 	else
 		ret = do_cpu_down(dev->id, target);
-
+out:
 	unlock_device_hotplug();
 	return ret ? ret : count;
 }
@@ -1684,7 +1634,7 @@ static struct attribute *cpuhp_cpu_attrs[] = {
 	NULL
 };
 
-static struct attribute_group cpuhp_cpu_attr_group = {
+static const struct attribute_group cpuhp_cpu_attr_group = {
 	.attrs = cpuhp_cpu_attrs,
 	.name = "hotplug",
 	NULL
@@ -1716,7 +1666,7 @@ static struct attribute *cpuhp_cpu_root_attrs[] = {
 	NULL
 };
 
-static struct attribute_group cpuhp_cpu_root_attr_group = {
+static const struct attribute_group cpuhp_cpu_root_attr_group = {
 	.attrs = cpuhp_cpu_root_attrs,
 	.name = "hotplug",
 	NULL
diff --git a/kernel/cred.c b/kernel/cred.c
index 2bc6607..ecf0365 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -1,4 +1,4 @@
-/* Task credentials management - see Documentation/security/credentials.txt
+/* Task credentials management - see Documentation/security/credentials.rst
  *
  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6e75a5c..4d2c32f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -389,6 +389,7 @@ static atomic_t nr_switch_events __read_mostly;
 static LIST_HEAD(pmus);
 static DEFINE_MUTEX(pmus_lock);
 static struct srcu_struct pmus_srcu;
+static cpumask_var_t perf_online_mask;
 
 /*
  * perf event paranoia level:
@@ -925,11 +926,6 @@ static inline int is_cgroup_event(struct perf_event *event)
 	return 0;
 }
 
-static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
-{
-	return 0;
-}
-
 static inline void update_cgrp_time_from_event(struct perf_event *event)
 {
 }
@@ -3812,14 +3808,6 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
 			return ERR_PTR(-EACCES);
 
-		/*
-		 * We could be clever and allow to attach a event to an
-		 * offline CPU and activate it when the CPU comes up, but
-		 * that's for later.
-		 */
-		if (!cpu_online(cpu))
-			return ERR_PTR(-ENODEV);
-
 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
 		ctx = &cpuctx->ctx;
 		get_ctx(ctx);
@@ -5729,9 +5717,6 @@ static void perf_output_read_one(struct perf_output_handle *handle,
 	__output_copy(handle, values, n * sizeof(u64));
 }
 
-/*
- * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
- */
 static void perf_output_read_group(struct perf_output_handle *handle,
 			    struct perf_event *event,
 			    u64 enabled, u64 running)
@@ -5776,6 +5761,13 @@ static void perf_output_read_group(struct perf_output_handle *handle,
 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
 				 PERF_FORMAT_TOTAL_TIME_RUNNING)
 
+/*
+ * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
+ *
+ * The problem is that its both hard and excessively expensive to iterate the
+ * child list, not to mention that its impossible to IPI the children running
+ * on another CPU, from interrupt/NMI context.
+ */
 static void perf_output_read(struct perf_output_handle *handle,
 			     struct perf_event *event)
 {
@@ -7316,6 +7308,21 @@ int perf_event_account_interrupt(struct perf_event *event)
 	return __perf_event_account_interrupt(event, 1);
 }
 
+static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+	/*
+	 * Due to interrupt latency (AKA "skid"), we may enter the
+	 * kernel before taking an overflow, even if the PMU is only
+	 * counting user events.
+	 * To avoid leaking information to userspace, we must always
+	 * reject kernel samples when exclude_kernel is set.
+	 */
+	if (event->attr.exclude_kernel && !user_mode(regs))
+		return false;
+
+	return true;
+}
+
 /*
  * Generic event overflow handling, sampling.
  */
@@ -7337,6 +7344,12 @@ static int __perf_event_overflow(struct perf_event *event,
 	ret = __perf_event_account_interrupt(event, throttle);
 
 	/*
+	 * For security, drop the skid kernel samples if necessary.
+	 */
+	if (!sample_is_allowed(event, regs))
+		return ret;
+
+	/*
 	 * XXX event_limit might not quite work as expected on inherited
 	 * events
 	 */
@@ -7703,7 +7716,8 @@ static int swevent_hlist_get_cpu(int cpu)
 	int err = 0;
 
 	mutex_lock(&swhash->hlist_mutex);
-	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
+	if (!swevent_hlist_deref(swhash) &&
+	    cpumask_test_cpu(cpu, perf_online_mask)) {
 		struct swevent_hlist *hlist;
 
 		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
@@ -7724,7 +7738,7 @@ static int swevent_hlist_get(void)
 {
 	int err, cpu, failed_cpu;
 
-	get_online_cpus();
+	mutex_lock(&pmus_lock);
 	for_each_possible_cpu(cpu) {
 		err = swevent_hlist_get_cpu(cpu);
 		if (err) {
@@ -7732,8 +7746,7 @@ static int swevent_hlist_get(void)
 			goto fail;
 		}
 	}
-	put_online_cpus();
-
+	mutex_unlock(&pmus_lock);
 	return 0;
 fail:
 	for_each_possible_cpu(cpu) {
@@ -7741,8 +7754,7 @@ static int swevent_hlist_get(void)
 			break;
 		swevent_hlist_put_cpu(cpu);
 	}
-
-	put_online_cpus();
+	mutex_unlock(&pmus_lock);
 	return err;
 }
 
@@ -8920,7 +8932,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
 	pmu->hrtimer_interval_ms = timer;
 
 	/* update all cpuctx for this PMU */
-	get_online_cpus();
+	cpus_read_lock();
 	for_each_online_cpu(cpu) {
 		struct perf_cpu_context *cpuctx;
 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
@@ -8929,7 +8941,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
 		cpu_function_call(cpu,
 			(remote_function_f)perf_mux_hrtimer_restart, cpuctx);
 	}
-	put_online_cpus();
+	cpus_read_unlock();
 	mutex_unlock(&mux_interval_mutex);
 
 	return count;
@@ -9059,6 +9071,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
 		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
 		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
 		cpuctx->ctx.pmu = pmu;
+		cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
 
 		__perf_mux_hrtimer_init(cpuctx, cpu);
 	}
@@ -9172,7 +9185,7 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
 
 static struct pmu *perf_init_event(struct perf_event *event)
 {
-	struct pmu *pmu = NULL;
+	struct pmu *pmu;
 	int idx;
 	int ret;
 
@@ -9441,9 +9454,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 	local64_set(&hwc->period_left, hwc->sample_period);
 
 	/*
-	 * we currently do not support PERF_FORMAT_GROUP on inherited events
+	 * We currently do not support PERF_SAMPLE_READ on inherited events.
+	 * See perf_output_read().
 	 */
-	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
+	if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
 		goto err_ns;
 
 	if (!has_branch_stack(event))
@@ -9456,9 +9470,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 	}
 
 	pmu = perf_init_event(event);
-	if (!pmu)
-		goto err_ns;
-	else if (IS_ERR(pmu)) {
+	if (IS_ERR(pmu)) {
 		err = PTR_ERR(pmu);
 		goto err_ns;
 	}
@@ -9471,8 +9483,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 		event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
 						   sizeof(unsigned long),
 						   GFP_KERNEL);
-		if (!event->addr_filters_offs)
+		if (!event->addr_filters_offs) {
+			err = -ENOMEM;
 			goto err_per_task;
+		}
 
 		/* force hw sync on the address filters */
 		event->addr_filters_gen = 1;
@@ -9882,12 +9896,10 @@ SYSCALL_DEFINE5(perf_event_open,
 		goto err_task;
 	}
 
-	get_online_cpus();
-
 	if (task) {
 		err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
 		if (err)
-			goto err_cpus;
+			goto err_task;
 
 		/*
 		 * Reuse ptrace permission checks for now.
@@ -10073,6 +10085,23 @@ SYSCALL_DEFINE5(perf_event_open,
 		goto err_locked;
 	}
 
+	if (!task) {
+		/*
+		 * Check if the @cpu we're creating an event for is online.
+		 *
+		 * We use the perf_cpu_context::ctx::mutex to serialize against
+		 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
+		 */
+		struct perf_cpu_context *cpuctx =
+			container_of(ctx, struct perf_cpu_context, ctx);
+
+		if (!cpuctx->online) {
+			err = -ENODEV;
+			goto err_locked;
+		}
+	}
+
+
 	/*
 	 * Must be under the same ctx::mutex as perf_install_in_context(),
 	 * because we need to serialize with concurrent event creation.
@@ -10162,8 +10191,6 @@ SYSCALL_DEFINE5(perf_event_open,
 		put_task_struct(task);
 	}
 
-	put_online_cpus();
-
 	mutex_lock(&current->perf_event_mutex);
 	list_add_tail(&event->owner_entry, &current->perf_event_list);
 	mutex_unlock(&current->perf_event_mutex);
@@ -10197,8 +10224,6 @@ SYSCALL_DEFINE5(perf_event_open,
 err_cred:
 	if (task)
 		mutex_unlock(&task->signal->cred_guard_mutex);
-err_cpus:
-	put_online_cpus();
 err_task:
 	if (task)
 		put_task_struct(task);
@@ -10253,6 +10278,21 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 		goto err_unlock;
 	}
 
+	if (!task) {
+		/*
+		 * Check if the @cpu we're creating an event for is online.
+		 *
+		 * We use the perf_cpu_context::ctx::mutex to serialize against
+		 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
+		 */
+		struct perf_cpu_context *cpuctx =
+			container_of(ctx, struct perf_cpu_context, ctx);
+		if (!cpuctx->online) {
+			err = -ENODEV;
+			goto err_unlock;
+		}
+	}
+
 	if (!exclusive_event_installable(event, ctx)) {
 		err = -EBUSY;
 		goto err_unlock;
@@ -10920,6 +10960,8 @@ static void __init perf_event_init_all_cpus(void)
 	struct swevent_htable *swhash;
 	int cpu;
 
+	zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
+
 	for_each_possible_cpu(cpu) {
 		swhash = &per_cpu(swevent_htable, cpu);
 		mutex_init(&swhash->hlist_mutex);
@@ -10935,7 +10977,7 @@ static void __init perf_event_init_all_cpus(void)
 	}
 }
 
-int perf_event_init_cpu(unsigned int cpu)
+void perf_swevent_init_cpu(unsigned int cpu)
 {
 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
@@ -10948,7 +10990,6 @@ int perf_event_init_cpu(unsigned int cpu)
 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
 	}
 	mutex_unlock(&swhash->hlist_mutex);
-	return 0;
 }
 
 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
@@ -10966,19 +11007,22 @@ static void __perf_event_exit_context(void *__info)
 
 static void perf_event_exit_cpu_context(int cpu)
 {
+	struct perf_cpu_context *cpuctx;
 	struct perf_event_context *ctx;
 	struct pmu *pmu;
-	int idx;
 
-	idx = srcu_read_lock(&pmus_srcu);
-	list_for_each_entry_rcu(pmu, &pmus, entry) {
-		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+	mutex_lock(&pmus_lock);
+	list_for_each_entry(pmu, &pmus, entry) {
+		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+		ctx = &cpuctx->ctx;
 
 		mutex_lock(&ctx->mutex);
 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+		cpuctx->online = 0;
 		mutex_unlock(&ctx->mutex);
 	}
-	srcu_read_unlock(&pmus_srcu, idx);
+	cpumask_clear_cpu(cpu, perf_online_mask);
+	mutex_unlock(&pmus_lock);
 }
 #else
 
@@ -10986,6 +11030,29 @@ static void perf_event_exit_cpu_context(int cpu) { }
 
 #endif
 
+int perf_event_init_cpu(unsigned int cpu)
+{
+	struct perf_cpu_context *cpuctx;
+	struct perf_event_context *ctx;
+	struct pmu *pmu;
+
+	perf_swevent_init_cpu(cpu);
+
+	mutex_lock(&pmus_lock);
+	cpumask_set_cpu(cpu, perf_online_mask);
+	list_for_each_entry(pmu, &pmus, entry) {
+		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+		ctx = &cpuctx->ctx;
+
+		mutex_lock(&ctx->mutex);
+		cpuctx->online = 1;
+		mutex_unlock(&ctx->mutex);
+	}
+	mutex_unlock(&pmus_lock);
+
+	return 0;
+}
+
 int perf_event_exit_cpu(unsigned int cpu)
 {
 	perf_event_exit_cpu_context(cpu);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 2831480..ee97196 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -580,7 +580,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
 	int ret = -ENOMEM, max_order = 0;
 
 	if (!has_aux(event))
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 
 	if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
 		/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 516acdb..c632262 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -318,19 +318,6 @@ void rcuwait_wake_up(struct rcuwait *w)
 	rcu_read_unlock();
 }
 
-struct task_struct *try_get_task_struct(struct task_struct **ptask)
-{
-	struct task_struct *task;
-
-	rcu_read_lock();
-	task = task_rcu_dereference(ptask);
-	if (task)
-		get_task_struct(task);
-	rcu_read_unlock();
-
-	return task;
-}
-
 /*
  * Determine if a process group is "orphaned", according to the POSIX
  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
@@ -1004,7 +991,7 @@ struct wait_opts {
 	int __user		*wo_stat;
 	struct rusage __user	*wo_rusage;
 
-	wait_queue_t		child_wait;
+	wait_queue_entry_t		child_wait;
 	int			notask_error;
 };
 
@@ -1541,7 +1528,7 @@ static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
 	return 0;
 }
 
-static int child_wait_callback(wait_queue_t *wait, unsigned mode,
+static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
 				int sync, void *key)
 {
 	struct wait_opts *wo = container_of(wait, struct wait_opts,
diff --git a/kernel/extable.c b/kernel/extable.c
index 2676d7f..0fbdd85 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -75,7 +75,7 @@ int core_kernel_text(unsigned long addr)
 	    addr < (unsigned long)_etext)
 		return 1;
 
-	if (system_state == SYSTEM_BOOTING &&
+	if (system_state < SYSTEM_RUNNING &&
 	    init_kernel_text(addr))
 		return 1;
 	return 0;
diff --git a/kernel/futex.c b/kernel/futex.c
index 357348a..c934689 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -225,7 +225,7 @@ struct futex_pi_state {
  * @requeue_pi_key:	the requeue_pi target futex key
  * @bitset:		bitset for the optional bitmasked wakeup
  *
- * We use this hashed waitqueue, instead of a normal wait_queue_t, so
+ * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
  * we can wake only the relevant ones (hashed queues may be shared).
  *
  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
@@ -488,7 +488,7 @@ static void drop_futex_key_refs(union futex_key *key)
  *
  * Return: a negative error code or 0
  *
- * The key words are stored in *key on success.
+ * The key words are stored in @key on success.
  *
  * For shared mappings, it's (page->index, file_inode(vma->vm_file),
  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
@@ -1259,9 +1259,9 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
  * @set_waiters:	force setting the FUTEX_WAITERS bit (1) or not (0)
  *
  * Return:
- *  0 - ready to wait;
- *  1 - acquired the lock;
- * <0 - error
+ *  -  0 - ready to wait;
+ *  -  1 - acquired the lock;
+ *  - <0 - error
  *
  * The hb->lock and futex_key refs shall be held by the caller.
  */
@@ -1717,9 +1717,9 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
  * hb1 and hb2 must be held by the caller.
  *
  * Return:
- *  0 - failed to acquire the lock atomically;
- * >0 - acquired the lock, return value is vpid of the top_waiter
- * <0 - error
+ *  -  0 - failed to acquire the lock atomically;
+ *  - >0 - acquired the lock, return value is vpid of the top_waiter
+ *  - <0 - error
  */
 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
 				 struct futex_hash_bucket *hb1,
@@ -1785,8 +1785,8 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
  * uaddr2 atomically on behalf of the top waiter.
  *
  * Return:
- * >=0 - on success, the number of tasks requeued or woken;
- *  <0 - on error
+ *  - >=0 - on success, the number of tasks requeued or woken;
+ *  -  <0 - on error
  */
 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
 			 u32 __user *uaddr2, int nr_wake, int nr_requeue,
@@ -2142,8 +2142,8 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
  * be paired with exactly one earlier call to queue_me().
  *
  * Return:
- *   1 - if the futex_q was still queued (and we removed unqueued it);
- *   0 - if the futex_q was already removed by the waking thread
+ *  - 1 - if the futex_q was still queued (and we removed unqueued it);
+ *  - 0 - if the futex_q was already removed by the waking thread
  */
 static int unqueue_me(struct futex_q *q)
 {
@@ -2333,9 +2333,9 @@ static long futex_wait_restart(struct restart_block *restart);
  * acquire the lock. Must be called with the hb lock held.
  *
  * Return:
- *  1 - success, lock taken;
- *  0 - success, lock not taken;
- * <0 - on error (-EFAULT)
+ *  -  1 - success, lock taken;
+ *  -  0 - success, lock not taken;
+ *  - <0 - on error (-EFAULT)
  */
 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
 {
@@ -2422,8 +2422,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
  * with no q.key reference on failure.
  *
  * Return:
- *  0 - uaddr contains val and hb has been locked;
- * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
+ *  -  0 - uaddr contains val and hb has been locked;
+ *  - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
  */
 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
 			   struct futex_q *q, struct futex_hash_bucket **hb)
@@ -2895,8 +2895,8 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
  * called with the hb lock held.
  *
  * Return:
- *  0 = no early wakeup detected;
- * <0 = -ETIMEDOUT or -ERESTARTNOINTR
+ *  -  0 = no early wakeup detected;
+ *  - <0 = -ETIMEDOUT or -ERESTARTNOINTR
  */
 static inline
 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
@@ -2968,8 +2968,8 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
  *
  * Return:
- *  0 - On success;
- * <0 - On error
+ *  -  0 - On success;
+ *  - <0 - On error
  */
 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
 				 u32 val, ktime_t *abs_time, u32 bitset,
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 3bbfd6a..27c4e77 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -21,6 +21,10 @@
 config GENERIC_IRQ_SHOW_LEVEL
        bool
 
+# Supports effective affinity mask
+config GENERIC_IRQ_EFFECTIVE_AFF_MASK
+       bool
+
 # Facility to allocate a hardware interrupt. This is legacy support
 # and should not be used in new code. Use irq domains instead.
 config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
@@ -81,6 +85,9 @@
 config HANDLE_DOMAIN_IRQ
 	bool
 
+config IRQ_TIMINGS
+	bool
+
 config IRQ_DOMAIN_DEBUG
 	bool "Expose hardware/virtual IRQ mapping via debugfs"
 	depends on IRQ_DOMAIN && DEBUG_FS
@@ -108,4 +115,15 @@
 
 	  If you don't know what to do here, say N.
 
+config GENERIC_IRQ_DEBUGFS
+	bool "Expose irq internals in debugfs"
+	depends on DEBUG_FS
+	default n
+	---help---
+
+	  Exposes internal state information through debugfs. Mostly for
+	  developers and debugging of hard to diagnose interrupt problems.
+
+	  If you don't know what to do here, say N.
+
 endmenu
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 1d3ee31..e4aef735 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,5 +1,6 @@
 
 obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
+obj-$(CONFIG_IRQ_TIMINGS) += timings.o
 obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o
 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
 obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
@@ -10,3 +11,4 @@
 obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
 obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
 obj-$(CONFIG_SMP) += affinity.o
+obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index e2d356d..d2747f9 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -1,4 +1,7 @@
-
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -35,13 +38,54 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
 	}
 }
 
-static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
+static cpumask_var_t *alloc_node_to_present_cpumask(void)
+{
+	cpumask_var_t *masks;
+	int node;
+
+	masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
+	if (!masks)
+		return NULL;
+
+	for (node = 0; node < nr_node_ids; node++) {
+		if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
+			goto out_unwind;
+	}
+
+	return masks;
+
+out_unwind:
+	while (--node >= 0)
+		free_cpumask_var(masks[node]);
+	kfree(masks);
+	return NULL;
+}
+
+static void free_node_to_present_cpumask(cpumask_var_t *masks)
+{
+	int node;
+
+	for (node = 0; node < nr_node_ids; node++)
+		free_cpumask_var(masks[node]);
+	kfree(masks);
+}
+
+static void build_node_to_present_cpumask(cpumask_var_t *masks)
+{
+	int cpu;
+
+	for_each_present_cpu(cpu)
+		cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
+}
+
+static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask,
+				const struct cpumask *mask, nodemask_t *nodemsk)
 {
 	int n, nodes = 0;
 
 	/* Calculate the number of nodes in the supplied affinity mask */
-	for_each_online_node(n) {
-		if (cpumask_intersects(mask, cpumask_of_node(n))) {
+	for_each_node(n) {
+		if (cpumask_intersects(mask, node_to_present_cpumask[n])) {
 			node_set(n, *nodemsk);
 			nodes++;
 		}
@@ -64,7 +108,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 	int last_affv = affv + affd->pre_vectors;
 	nodemask_t nodemsk = NODE_MASK_NONE;
 	struct cpumask *masks;
-	cpumask_var_t nmsk;
+	cpumask_var_t nmsk, *node_to_present_cpumask;
 
 	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
 		return NULL;
@@ -73,13 +117,19 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 	if (!masks)
 		goto out;
 
+	node_to_present_cpumask = alloc_node_to_present_cpumask();
+	if (!node_to_present_cpumask)
+		goto out;
+
 	/* Fill out vectors at the beginning that don't need affinity */
 	for (curvec = 0; curvec < affd->pre_vectors; curvec++)
 		cpumask_copy(masks + curvec, irq_default_affinity);
 
 	/* Stabilize the cpumasks */
 	get_online_cpus();
-	nodes = get_nodes_in_cpumask(cpu_online_mask, &nodemsk);
+	build_node_to_present_cpumask(node_to_present_cpumask);
+	nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask,
+				     &nodemsk);
 
 	/*
 	 * If the number of nodes in the mask is greater than or equal the
@@ -87,7 +137,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 	 */
 	if (affv <= nodes) {
 		for_each_node_mask(n, nodemsk) {
-			cpumask_copy(masks + curvec, cpumask_of_node(n));
+			cpumask_copy(masks + curvec,
+				     node_to_present_cpumask[n]);
 			if (++curvec == last_affv)
 				break;
 		}
@@ -101,7 +152,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 		vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
 
 		/* Get the cpus on this node which are in the mask */
-		cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
+		cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]);
 
 		/* Calculate the number of cpus per vector */
 		ncpus = cpumask_weight(nmsk);
@@ -133,6 +184,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 	/* Fill out vectors at the end that don't need affinity */
 	for (; curvec < nvecs; curvec++)
 		cpumask_copy(masks + curvec, irq_default_affinity);
+	free_node_to_present_cpumask(node_to_present_cpumask);
 out:
 	free_cpumask_var(nmsk);
 	return masks;
@@ -147,12 +199,10 @@ int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
 {
 	int resv = affd->pre_vectors + affd->post_vectors;
 	int vecs = maxvec - resv;
-	int cpus;
+	int ret;
 
-	/* Stabilize the cpumasks */
 	get_online_cpus();
-	cpus = cpumask_weight(cpu_online_mask);
+	ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
 	put_online_cpus();
-
-	return min(cpus, vecs) + resv;
+	return ret;
 }
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 0119b9d..d30a0dd 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
 			if (desc->irq_data.chip->irq_set_type)
 				desc->irq_data.chip->irq_set_type(&desc->irq_data,
 							 IRQ_TYPE_PROBE);
-			irq_startup(desc, false);
+			irq_startup(desc, IRQ_NORESEND, IRQ_START_FORCE);
 		}
 		raw_spin_unlock_irq(&desc->lock);
 	}
@@ -70,7 +70,7 @@ unsigned long probe_irq_on(void)
 		raw_spin_lock_irq(&desc->lock);
 		if (!desc->action && irq_settings_can_probe(desc)) {
 			desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
-			if (irq_startup(desc, false))
+			if (irq_startup(desc, IRQ_NORESEND, IRQ_START_FORCE))
 				desc->istate |= IRQS_PENDING;
 		}
 		raw_spin_unlock_irq(&desc->lock);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c94da68..ad43468 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -7,7 +7,7 @@
  * This file contains the core interrupt handling code, for irq-chip
  * based architectures.
  *
- * Detailed information is available in Documentation/DocBook/genericirq
+ * Detailed information is available in Documentation/core-api/genericirq.rst
  */
 
 #include <linux/irq.h>
@@ -185,47 +185,162 @@ static void irq_state_set_masked(struct irq_desc *desc)
 	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
 }
 
-int irq_startup(struct irq_desc *desc, bool resend)
+static void irq_state_clr_started(struct irq_desc *desc)
 {
+	irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
+}
+
+static void irq_state_set_started(struct irq_desc *desc)
+{
+	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
+}
+
+enum {
+	IRQ_STARTUP_NORMAL,
+	IRQ_STARTUP_MANAGED,
+	IRQ_STARTUP_ABORT,
+};
+
+#ifdef CONFIG_SMP
+static int
+__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
+{
+	struct irq_data *d = irq_desc_get_irq_data(desc);
+
+	if (!irqd_affinity_is_managed(d))
+		return IRQ_STARTUP_NORMAL;
+
+	irqd_clr_managed_shutdown(d);
+
+	if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
+		/*
+		 * Catch code which fiddles with enable_irq() on a managed
+		 * and potentially shutdown IRQ. Chained interrupt
+		 * installment or irq auto probing should not happen on
+		 * managed irqs either. Emit a warning, break the affinity
+		 * and start it up as a normal interrupt.
+		 */
+		if (WARN_ON_ONCE(force))
+			return IRQ_STARTUP_NORMAL;
+		/*
+		 * The interrupt was requested, but there is no online CPU
+		 * in it's affinity mask. Put it into managed shutdown
+		 * state and let the cpu hotplug mechanism start it up once
+		 * a CPU in the mask becomes available.
+		 */
+		irqd_set_managed_shutdown(d);
+		return IRQ_STARTUP_ABORT;
+	}
+	return IRQ_STARTUP_MANAGED;
+}
+#else
+static int
+__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
+{
+	return IRQ_STARTUP_NORMAL;
+}
+#endif
+
+static int __irq_startup(struct irq_desc *desc)
+{
+	struct irq_data *d = irq_desc_get_irq_data(desc);
 	int ret = 0;
 
-	irq_state_clr_disabled(desc);
-	desc->depth = 0;
-
-	irq_domain_activate_irq(&desc->irq_data);
-	if (desc->irq_data.chip->irq_startup) {
-		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
+	irq_domain_activate_irq(d);
+	if (d->chip->irq_startup) {
+		ret = d->chip->irq_startup(d);
+		irq_state_clr_disabled(desc);
 		irq_state_clr_masked(desc);
 	} else {
 		irq_enable(desc);
 	}
-	if (resend)
-		check_irq_resend(desc);
+	irq_state_set_started(desc);
 	return ret;
 }
 
+int irq_startup(struct irq_desc *desc, bool resend, bool force)
+{
+	struct irq_data *d = irq_desc_get_irq_data(desc);
+	struct cpumask *aff = irq_data_get_affinity_mask(d);
+	int ret = 0;
+
+	desc->depth = 0;
+
+	if (irqd_is_started(d)) {
+		irq_enable(desc);
+	} else {
+		switch (__irq_startup_managed(desc, aff, force)) {
+		case IRQ_STARTUP_NORMAL:
+			ret = __irq_startup(desc);
+			irq_setup_affinity(desc);
+			break;
+		case IRQ_STARTUP_MANAGED:
+			ret = __irq_startup(desc);
+			irq_set_affinity_locked(d, aff, false);
+			break;
+		case IRQ_STARTUP_ABORT:
+			return 0;
+		}
+	}
+	if (resend)
+		check_irq_resend(desc);
+
+	return ret;
+}
+
+static void __irq_disable(struct irq_desc *desc, bool mask);
+
 void irq_shutdown(struct irq_desc *desc)
 {
-	irq_state_set_disabled(desc);
-	desc->depth = 1;
-	if (desc->irq_data.chip->irq_shutdown)
-		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
-	else if (desc->irq_data.chip->irq_disable)
-		desc->irq_data.chip->irq_disable(&desc->irq_data);
-	else
-		desc->irq_data.chip->irq_mask(&desc->irq_data);
+	if (irqd_is_started(&desc->irq_data)) {
+		desc->depth = 1;
+		if (desc->irq_data.chip->irq_shutdown) {
+			desc->irq_data.chip->irq_shutdown(&desc->irq_data);
+			irq_state_set_disabled(desc);
+			irq_state_set_masked(desc);
+		} else {
+			__irq_disable(desc, true);
+		}
+		irq_state_clr_started(desc);
+	}
+	/*
+	 * This must be called even if the interrupt was never started up,
+	 * because the activation can happen before the interrupt is
+	 * available for request/startup. It has it's own state tracking so
+	 * it's safe to call it unconditionally.
+	 */
 	irq_domain_deactivate_irq(&desc->irq_data);
-	irq_state_set_masked(desc);
 }
 
 void irq_enable(struct irq_desc *desc)
 {
-	irq_state_clr_disabled(desc);
-	if (desc->irq_data.chip->irq_enable)
-		desc->irq_data.chip->irq_enable(&desc->irq_data);
-	else
-		desc->irq_data.chip->irq_unmask(&desc->irq_data);
-	irq_state_clr_masked(desc);
+	if (!irqd_irq_disabled(&desc->irq_data)) {
+		unmask_irq(desc);
+	} else {
+		irq_state_clr_disabled(desc);
+		if (desc->irq_data.chip->irq_enable) {
+			desc->irq_data.chip->irq_enable(&desc->irq_data);
+			irq_state_clr_masked(desc);
+		} else {
+			unmask_irq(desc);
+		}
+	}
+}
+
+static void __irq_disable(struct irq_desc *desc, bool mask)
+{
+	if (irqd_irq_disabled(&desc->irq_data)) {
+		if (mask)
+			mask_irq(desc);
+	} else {
+		irq_state_set_disabled(desc);
+		if (desc->irq_data.chip->irq_disable) {
+			desc->irq_data.chip->irq_disable(&desc->irq_data);
+			irq_state_set_masked(desc);
+		} else if (mask) {
+			mask_irq(desc);
+		}
+	}
 }
 
 /**
@@ -250,13 +365,7 @@ void irq_enable(struct irq_desc *desc)
  */
 void irq_disable(struct irq_desc *desc)
 {
-	irq_state_set_disabled(desc);
-	if (desc->irq_data.chip->irq_disable) {
-		desc->irq_data.chip->irq_disable(&desc->irq_data);
-		irq_state_set_masked(desc);
-	} else if (irq_settings_disable_unlazy(desc)) {
-		mask_irq(desc);
-	}
+	__irq_disable(desc, irq_settings_disable_unlazy(desc));
 }
 
 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
@@ -279,18 +388,21 @@ void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
 
 static inline void mask_ack_irq(struct irq_desc *desc)
 {
-	if (desc->irq_data.chip->irq_mask_ack)
+	if (desc->irq_data.chip->irq_mask_ack) {
 		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
-	else {
-		desc->irq_data.chip->irq_mask(&desc->irq_data);
+		irq_state_set_masked(desc);
+	} else {
+		mask_irq(desc);
 		if (desc->irq_data.chip->irq_ack)
 			desc->irq_data.chip->irq_ack(&desc->irq_data);
 	}
-	irq_state_set_masked(desc);
 }
 
 void mask_irq(struct irq_desc *desc)
 {
+	if (irqd_irq_masked(&desc->irq_data))
+		return;
+
 	if (desc->irq_data.chip->irq_mask) {
 		desc->irq_data.chip->irq_mask(&desc->irq_data);
 		irq_state_set_masked(desc);
@@ -299,6 +411,9 @@ void mask_irq(struct irq_desc *desc)
 
 void unmask_irq(struct irq_desc *desc)
 {
+	if (!irqd_irq_masked(&desc->irq_data))
+		return;
+
 	if (desc->irq_data.chip->irq_unmask) {
 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 		irq_state_clr_masked(desc);
@@ -312,10 +427,7 @@ void unmask_threaded_irq(struct irq_desc *desc)
 	if (chip->flags & IRQCHIP_EOI_THREADED)
 		chip->irq_eoi(&desc->irq_data);
 
-	if (chip->irq_unmask) {
-		chip->irq_unmask(&desc->irq_data);
-		irq_state_clr_masked(desc);
-	}
+	unmask_irq(desc);
 }
 
 /*
@@ -851,7 +963,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
 		irq_settings_set_norequest(desc);
 		irq_settings_set_nothread(desc);
 		desc->action = &chained_action;
-		irq_startup(desc, true);
+		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
 	}
 }
 
@@ -903,6 +1015,13 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 
 	if (!desc)
 		return;
+
+	/*
+	 * Warn when a driver sets the no autoenable flag on an already
+	 * active interrupt.
+	 */
+	WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
+
 	irq_settings_clr_and_set(desc, clr, set);
 
 	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 011f8c4..aee8f7e 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -14,37 +14,99 @@
 
 #include "internals.h"
 
+/* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
+static inline bool irq_needs_fixup(struct irq_data *d)
+{
+	const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
+
+	return cpumask_test_cpu(smp_processor_id(), m);
+}
+
 static bool migrate_one_irq(struct irq_desc *desc)
 {
 	struct irq_data *d = irq_desc_get_irq_data(desc);
-	const struct cpumask *affinity = d->common->affinity;
-	struct irq_chip *c;
-	bool ret = false;
+	struct irq_chip *chip = irq_data_get_irq_chip(d);
+	bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
+	const struct cpumask *affinity;
+	bool brokeaff = false;
+	int err;
 
 	/*
-	 * If this is a per-CPU interrupt, or the affinity does not
-	 * include this CPU, then we have nothing to do.
+	 * IRQ chip might be already torn down, but the irq descriptor is
+	 * still in the radix tree. Also if the chip has no affinity setter,
+	 * nothing can be done here.
 	 */
-	if (irqd_is_per_cpu(d) ||
-	    !cpumask_test_cpu(smp_processor_id(), affinity))
+	if (!chip || !chip->irq_set_affinity) {
+		pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
 		return false;
+	}
+
+	/*
+	 * No move required, if:
+	 * - Interrupt is per cpu
+	 * - Interrupt is not started
+	 * - Affinity mask does not include this CPU.
+	 *
+	 * Note: Do not check desc->action as this might be a chained
+	 * interrupt.
+	 */
+	if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
+		/*
+		 * If an irq move is pending, abort it if the dying CPU is
+		 * the sole target.
+		 */
+		irq_fixup_move_pending(desc, false);
+		return false;
+	}
+
+	/*
+	 * Complete an eventually pending irq move cleanup. If this
+	 * interrupt was moved in hard irq context, then the vectors need
+	 * to be cleaned up. It can't wait until this interrupt actually
+	 * happens and this CPU was involved.
+	 */
+	irq_force_complete_move(desc);
+
+	/*
+	 * If there is a setaffinity pending, then try to reuse the pending
+	 * mask, so the last change of the affinity does not get lost. If
+	 * there is no move pending or the pending mask does not contain
+	 * any online CPU, use the current affinity mask.
+	 */
+	if (irq_fixup_move_pending(desc, true))
+		affinity = irq_desc_get_pending_mask(desc);
+	else
+		affinity = irq_data_get_affinity_mask(d);
+
+	/* Mask the chip for interrupts which cannot move in process context */
+	if (maskchip && chip->irq_mask)
+		chip->irq_mask(d);
 
 	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+		/*
+		 * If the interrupt is managed, then shut it down and leave
+		 * the affinity untouched.
+		 */
+		if (irqd_affinity_is_managed(d)) {
+			irqd_set_managed_shutdown(d);
+			irq_shutdown(desc);
+			return false;
+		}
 		affinity = cpu_online_mask;
-		ret = true;
+		brokeaff = true;
 	}
 
-	c = irq_data_get_irq_chip(d);
-	if (!c->irq_set_affinity) {
-		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-	} else {
-		int r = irq_do_set_affinity(d, affinity, false);
-		if (r)
-			pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
-					    d->irq, r);
+	err = irq_do_set_affinity(d, affinity, true);
+	if (err) {
+		pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
+				    d->irq, err);
+		brokeaff = false;
 	}
 
-	return ret;
+	if (maskchip && chip->irq_unmask)
+		chip->irq_unmask(d);
+
+	return brokeaff;
 }
 
 /**
@@ -59,11 +121,8 @@ static bool migrate_one_irq(struct irq_desc *desc)
  */
 void irq_migrate_all_off_this_cpu(void)
 {
-	unsigned int irq;
 	struct irq_desc *desc;
-	unsigned long flags;
-
-	local_irq_save(flags);
+	unsigned int irq;
 
 	for_each_active_irq(irq) {
 		bool affinity_broken;
@@ -73,10 +132,53 @@ void irq_migrate_all_off_this_cpu(void)
 		affinity_broken = migrate_one_irq(desc);
 		raw_spin_unlock(&desc->lock);
 
-		if (affinity_broken)
-			pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+		if (affinity_broken) {
+			pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
 					    irq, smp_processor_id());
+		}
+	}
+}
+
+static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
+{
+	struct irq_data *data = irq_desc_get_irq_data(desc);
+	const struct cpumask *affinity = irq_data_get_affinity_mask(data);
+
+	if (!irqd_affinity_is_managed(data) || !desc->action ||
+	    !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
+		return;
+
+	if (irqd_is_managed_and_shutdown(data)) {
+		irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
+		return;
 	}
 
-	local_irq_restore(flags);
+	/*
+	 * If the interrupt can only be directed to a single target
+	 * CPU then it is already assigned to a CPU in the affinity
+	 * mask. No point in trying to move it around.
+	 */
+	if (!irqd_is_single_target(data))
+		irq_set_affinity_locked(data, affinity, false);
+}
+
+/**
+ * irq_affinity_online_cpu - Restore affinity for managed interrupts
+ * @cpu:	Upcoming CPU for which interrupts should be restored
+ */
+int irq_affinity_online_cpu(unsigned int cpu)
+{
+	struct irq_desc *desc;
+	unsigned int irq;
+
+	irq_lock_sparse();
+	for_each_active_irq(irq) {
+		desc = irq_to_desc(irq);
+		raw_spin_lock_irq(&desc->lock);
+		irq_restore_affinity_of_irq(desc, cpu);
+		raw_spin_unlock_irq(&desc->lock);
+	}
+	irq_unlock_sparse();
+
+	return 0;
 }
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
new file mode 100644
index 0000000..4d384ed
--- /dev/null
+++ b/kernel/irq/debugfs.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2017 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This file is licensed under the GPL V2.
+ */
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+
+#include "internals.h"
+
+static struct dentry *irq_dir;
+
+struct irq_bit_descr {
+	unsigned int	mask;
+	char		*name;
+};
+#define BIT_MASK_DESCR(m)	{ .mask = m, .name = #m }
+
+static void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
+				const struct irq_bit_descr *sd, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++, sd++) {
+		if (state & sd->mask)
+			seq_printf(m, "%*s%s\n", ind + 12, "", sd->name);
+	}
+}
+
+#ifdef CONFIG_SMP
+static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc)
+{
+	struct irq_data *data = irq_desc_get_irq_data(desc);
+	struct cpumask *msk;
+
+	msk = irq_data_get_affinity_mask(data);
+	seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk));
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+	msk = irq_data_get_effective_affinity_mask(data);
+	seq_printf(m, "effectiv: %*pbl\n", cpumask_pr_args(msk));
+#endif
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+	msk = desc->pending_mask;
+	seq_printf(m, "pending:  %*pbl\n", cpumask_pr_args(msk));
+#endif
+}
+#else
+static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { }
+#endif
+
+static const struct irq_bit_descr irqchip_flags[] = {
+	BIT_MASK_DESCR(IRQCHIP_SET_TYPE_MASKED),
+	BIT_MASK_DESCR(IRQCHIP_EOI_IF_HANDLED),
+	BIT_MASK_DESCR(IRQCHIP_MASK_ON_SUSPEND),
+	BIT_MASK_DESCR(IRQCHIP_ONOFFLINE_ENABLED),
+	BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
+	BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
+	BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
+};
+
+static void
+irq_debug_show_chip(struct seq_file *m, struct irq_data *data, int ind)
+{
+	struct irq_chip *chip = data->chip;
+
+	if (!chip) {
+		seq_printf(m, "chip: None\n");
+		return;
+	}
+	seq_printf(m, "%*schip:    %s\n", ind, "", chip->name);
+	seq_printf(m, "%*sflags:   0x%lx\n", ind + 1, "", chip->flags);
+	irq_debug_show_bits(m, ind, chip->flags, irqchip_flags,
+			    ARRAY_SIZE(irqchip_flags));
+}
+
+static void
+irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
+{
+	seq_printf(m, "%*sdomain:  %s\n", ind, "",
+		   data->domain ? data->domain->name : "");
+	seq_printf(m, "%*shwirq:   0x%lx\n", ind + 1, "", data->hwirq);
+	irq_debug_show_chip(m, data, ind + 1);
+#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
+	if (!data->parent_data)
+		return;
+	seq_printf(m, "%*sparent:\n", ind + 1, "");
+	irq_debug_show_data(m, data->parent_data, ind + 4);
+#endif
+}
+
+static const struct irq_bit_descr irqdata_states[] = {
+	BIT_MASK_DESCR(IRQ_TYPE_EDGE_RISING),
+	BIT_MASK_DESCR(IRQ_TYPE_EDGE_FALLING),
+	BIT_MASK_DESCR(IRQ_TYPE_LEVEL_HIGH),
+	BIT_MASK_DESCR(IRQ_TYPE_LEVEL_LOW),
+	BIT_MASK_DESCR(IRQD_LEVEL),
+
+	BIT_MASK_DESCR(IRQD_ACTIVATED),
+	BIT_MASK_DESCR(IRQD_IRQ_STARTED),
+	BIT_MASK_DESCR(IRQD_IRQ_DISABLED),
+	BIT_MASK_DESCR(IRQD_IRQ_MASKED),
+	BIT_MASK_DESCR(IRQD_IRQ_INPROGRESS),
+
+	BIT_MASK_DESCR(IRQD_PER_CPU),
+	BIT_MASK_DESCR(IRQD_NO_BALANCING),
+
+	BIT_MASK_DESCR(IRQD_SINGLE_TARGET),
+	BIT_MASK_DESCR(IRQD_MOVE_PCNTXT),
+	BIT_MASK_DESCR(IRQD_AFFINITY_SET),
+	BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
+	BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
+	BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
+
+	BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
+
+	BIT_MASK_DESCR(IRQD_WAKEUP_STATE),
+	BIT_MASK_DESCR(IRQD_WAKEUP_ARMED),
+};
+
+static const struct irq_bit_descr irqdesc_states[] = {
+	BIT_MASK_DESCR(_IRQ_NOPROBE),
+	BIT_MASK_DESCR(_IRQ_NOREQUEST),
+	BIT_MASK_DESCR(_IRQ_NOTHREAD),
+	BIT_MASK_DESCR(_IRQ_NOAUTOEN),
+	BIT_MASK_DESCR(_IRQ_NESTED_THREAD),
+	BIT_MASK_DESCR(_IRQ_PER_CPU_DEVID),
+	BIT_MASK_DESCR(_IRQ_IS_POLLED),
+	BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY),
+};
+
+static const struct irq_bit_descr irqdesc_istates[] = {
+	BIT_MASK_DESCR(IRQS_AUTODETECT),
+	BIT_MASK_DESCR(IRQS_SPURIOUS_DISABLED),
+	BIT_MASK_DESCR(IRQS_POLL_INPROGRESS),
+	BIT_MASK_DESCR(IRQS_ONESHOT),
+	BIT_MASK_DESCR(IRQS_REPLAY),
+	BIT_MASK_DESCR(IRQS_WAITING),
+	BIT_MASK_DESCR(IRQS_PENDING),
+	BIT_MASK_DESCR(IRQS_SUSPENDED),
+};
+
+
+static int irq_debug_show(struct seq_file *m, void *p)
+{
+	struct irq_desc *desc = m->private;
+	struct irq_data *data;
+
+	raw_spin_lock_irq(&desc->lock);
+	data = irq_desc_get_irq_data(desc);
+	seq_printf(m, "handler:  %pf\n", desc->handle_irq);
+	seq_printf(m, "status:   0x%08x\n", desc->status_use_accessors);
+	irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
+			    ARRAY_SIZE(irqdesc_states));
+	seq_printf(m, "istate:   0x%08x\n", desc->istate);
+	irq_debug_show_bits(m, 0, desc->istate, irqdesc_istates,
+			    ARRAY_SIZE(irqdesc_istates));
+	seq_printf(m, "ddepth:   %u\n", desc->depth);
+	seq_printf(m, "wdepth:   %u\n", desc->wake_depth);
+	seq_printf(m, "dstate:   0x%08x\n", irqd_get(data));
+	irq_debug_show_bits(m, 0, irqd_get(data), irqdata_states,
+			    ARRAY_SIZE(irqdata_states));
+	seq_printf(m, "node:     %d\n", irq_data_get_node(data));
+	irq_debug_show_masks(m, desc);
+	irq_debug_show_data(m, data, 0);
+	raw_spin_unlock_irq(&desc->lock);
+	return 0;
+}
+
+static int irq_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, irq_debug_show, inode->i_private);
+}
+
+static const struct file_operations dfs_irq_ops = {
+	.open		= irq_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
+{
+	char name [10];
+
+	if (!irq_dir || !desc || desc->debugfs_file)
+		return;
+
+	sprintf(name, "%d", irq);
+	desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc,
+						 &dfs_irq_ops);
+}
+
+static int __init irq_debugfs_init(void)
+{
+	struct dentry *root_dir;
+	int irq;
+
+	root_dir = debugfs_create_dir("irq", NULL);
+	if (!root_dir)
+		return -ENOMEM;
+
+	irq_domain_debugfs_init(root_dir);
+
+	irq_dir = debugfs_create_dir("irqs", root_dir);
+
+	irq_lock_sparse();
+	for_each_active_irq(irq)
+		irq_add_debugfs_entry(irq, irq_to_desc(irq));
+	irq_unlock_sparse();
+
+	return 0;
+}
+__initcall(irq_debugfs_init);
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 1613bfd..194c506 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -4,6 +4,8 @@
 #include <linux/gfp.h>
 #include <linux/irq.h>
 
+#include "internals.h"
+
 /*
  * Device resource management aware IRQ request/free implementation.
  */
@@ -198,3 +200,87 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
 	return base;
 }
 EXPORT_SYMBOL_GPL(__devm_irq_alloc_descs);
+
+#ifdef CONFIG_GENERIC_IRQ_CHIP
+/**
+ * devm_irq_alloc_generic_chip - Allocate and initialize a generic chip
+ *                               for a managed device
+ * @dev:	Device to allocate the generic chip for
+ * @name:	Name of the irq chip
+ * @num_ct:	Number of irq_chip_type instances associated with this
+ * @irq_base:	Interrupt base nr for this chip
+ * @reg_base:	Register base address (virtual)
+ * @handler:	Default flow handler associated with this chip
+ *
+ * Returns an initialized irq_chip_generic structure. The chip defaults
+ * to the primary (index 0) irq_chip_type and @handler
+ */
+struct irq_chip_generic *
+devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
+			    unsigned int irq_base, void __iomem *reg_base,
+			    irq_flow_handler_t handler)
+{
+	struct irq_chip_generic *gc;
+	unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
+
+	gc = devm_kzalloc(dev, sz, GFP_KERNEL);
+	if (gc)
+		irq_init_generic_chip(gc, name, num_ct,
+				      irq_base, reg_base, handler);
+
+	return gc;
+}
+EXPORT_SYMBOL_GPL(devm_irq_alloc_generic_chip);
+
+struct irq_generic_chip_devres {
+	struct irq_chip_generic *gc;
+	u32 msk;
+	unsigned int clr;
+	unsigned int set;
+};
+
+static void devm_irq_remove_generic_chip(struct device *dev, void *res)
+{
+	struct irq_generic_chip_devres *this = res;
+
+	irq_remove_generic_chip(this->gc, this->msk, this->clr, this->set);
+}
+
+/**
+ * devm_irq_setup_generic_chip - Setup a range of interrupts with a generic
+ *                               chip for a managed device
+ *
+ * @dev:	Device to setup the generic chip for
+ * @gc:		Generic irq chip holding all data
+ * @msk:	Bitmask holding the irqs to initialize relative to gc->irq_base
+ * @flags:	Flags for initialization
+ * @clr:	IRQ_* bits to clear
+ * @set:	IRQ_* bits to set
+ *
+ * Set up max. 32 interrupts starting from gc->irq_base. Note, this
+ * initializes all interrupts to the primary irq_chip_type and its
+ * associated handler.
+ */
+int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
+				u32 msk, enum irq_gc_flags flags,
+				unsigned int clr, unsigned int set)
+{
+	struct irq_generic_chip_devres *dr;
+
+	dr = devres_alloc(devm_irq_remove_generic_chip,
+			  sizeof(*dr), GFP_KERNEL);
+	if (!dr)
+		return -ENOMEM;
+
+	irq_setup_generic_chip(gc, msk, flags, clr, set);
+
+	dr->gc = gc;
+	dr->msk = msk;
+	dr->clr = clr;
+	dr->set = set;
+	devres_add(dev, dr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_irq_setup_generic_chip);
+#endif /* CONFIG_GENERIC_IRQ_CHIP */
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index ee32870..f7086b7 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -201,10 +201,9 @@ static void irq_writel_be(u32 val, void __iomem *addr)
 	iowrite32be(val, addr);
 }
 
-static void
-irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
-		      int num_ct, unsigned int irq_base,
-		      void __iomem *reg_base, irq_flow_handler_t handler)
+void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
+			   int num_ct, unsigned int irq_base,
+			   void __iomem *reg_base, irq_flow_handler_t handler)
 {
 	raw_spin_lock_init(&gc->lock);
 	gc->num_ct = num_ct;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index d3f2490..79f987b 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -6,7 +6,7 @@
  *
  * This file contains the core interrupt handling code.
  *
- * Detailed information is available in Documentation/DocBook/genericirq
+ * Detailed information is available in Documentation/core-api/genericirq.rst
  *
  */
 
@@ -138,6 +138,8 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
 	unsigned int irq = desc->irq_data.irq;
 	struct irqaction *action;
 
+	record_irq_time(desc);
+
 	for_each_action_of_desc(desc, action) {
 		irqreturn_t res;
 
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index bc226e7..9da14d1 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -8,6 +8,7 @@
 #include <linux/irqdesc.h>
 #include <linux/kernel_stat.h>
 #include <linux/pm_runtime.h>
+#include <linux/sched/clock.h>
 
 #ifdef CONFIG_SPARSE_IRQ
 # define IRQ_BITMAP_BITS	(NR_IRQS + 8196)
@@ -57,6 +58,7 @@ enum {
 	IRQS_WAITING		= 0x00000080,
 	IRQS_PENDING		= 0x00000200,
 	IRQS_SUSPENDED		= 0x00000800,
+	IRQS_TIMINGS		= 0x00001000,
 };
 
 #include "debug.h"
@@ -66,7 +68,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags);
 extern void __disable_irq(struct irq_desc *desc);
 extern void __enable_irq(struct irq_desc *desc);
 
-extern int irq_startup(struct irq_desc *desc, bool resend);
+#define IRQ_RESEND	true
+#define IRQ_NORESEND	false
+
+#define IRQ_START_FORCE	true
+#define IRQ_START_COND	false
+
+extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
+
 extern void irq_shutdown(struct irq_desc *desc);
 extern void irq_enable(struct irq_desc *desc);
 extern void irq_disable(struct irq_desc *desc);
@@ -109,13 +118,19 @@ static inline void unregister_handler_proc(unsigned int irq,
 
 extern bool irq_can_set_affinity_usr(unsigned int irq);
 
-extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
+extern int irq_select_affinity_usr(unsigned int irq);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
 
 extern int irq_do_set_affinity(struct irq_data *data,
 			       const struct cpumask *dest, bool force);
 
+#ifdef CONFIG_SMP
+extern int irq_setup_affinity(struct irq_desc *desc);
+#else
+static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; }
+#endif
+
 /* Inline functions for support of irq chips on slow busses */
 static inline void chip_bus_lock(struct irq_desc *desc)
 {
@@ -169,6 +184,11 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
 
+static inline unsigned int irqd_get(struct irq_data *d)
+{
+	return __irqd_to_state(d);
+}
+
 /*
  * Manipulation functions for irq_data.state
  */
@@ -182,6 +202,16 @@ static inline void irqd_clr_move_pending(struct irq_data *d)
 	__irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING;
 }
 
+static inline void irqd_set_managed_shutdown(struct irq_data *d)
+{
+	__irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN;
+}
+
+static inline void irqd_clr_managed_shutdown(struct irq_data *d)
+{
+	__irqd_to_state(d) &= ~IRQD_MANAGED_SHUTDOWN;
+}
+
 static inline void irqd_clear(struct irq_data *d, unsigned int mask)
 {
 	__irqd_to_state(d) &= ~mask;
@@ -226,3 +256,194 @@ irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { }
 static inline void
 irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
 #endif
+
+#ifdef CONFIG_IRQ_TIMINGS
+
+#define IRQ_TIMINGS_SHIFT	5
+#define IRQ_TIMINGS_SIZE	(1 << IRQ_TIMINGS_SHIFT)
+#define IRQ_TIMINGS_MASK	(IRQ_TIMINGS_SIZE - 1)
+
+/**
+ * struct irq_timings - irq timings storing structure
+ * @values: a circular buffer of u64 encoded <timestamp,irq> values
+ * @count: the number of elements in the array
+ */
+struct irq_timings {
+	u64	values[IRQ_TIMINGS_SIZE];
+	int	count;
+};
+
+DECLARE_PER_CPU(struct irq_timings, irq_timings);
+
+extern void irq_timings_free(int irq);
+extern int irq_timings_alloc(int irq);
+
+static inline void irq_remove_timings(struct irq_desc *desc)
+{
+	desc->istate &= ~IRQS_TIMINGS;
+
+	irq_timings_free(irq_desc_get_irq(desc));
+}
+
+static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act)
+{
+	int irq = irq_desc_get_irq(desc);
+	int ret;
+
+	/*
+	 * We don't need the measurement because the idle code already
+	 * knows the next expiry event.
+	 */
+	if (act->flags & __IRQF_TIMER)
+		return;
+
+	/*
+	 * In case the timing allocation fails, we just want to warn,
+	 * not fail, so letting the system boot anyway.
+	 */
+	ret = irq_timings_alloc(irq);
+	if (ret) {
+		pr_warn("Failed to allocate irq timing stats for irq%d (%d)",
+			irq, ret);
+		return;
+	}
+
+	desc->istate |= IRQS_TIMINGS;
+}
+
+extern void irq_timings_enable(void);
+extern void irq_timings_disable(void);
+
+DECLARE_STATIC_KEY_FALSE(irq_timing_enabled);
+
+/*
+ * The interrupt number and the timestamp are encoded into a single
+ * u64 variable to optimize the size.
+ * 48 bit time stamp and 16 bit IRQ number is way sufficient.
+ *  Who cares an IRQ after 78 hours of idle time?
+ */
+static inline u64 irq_timing_encode(u64 timestamp, int irq)
+{
+	return (timestamp << 16) | irq;
+}
+
+static inline int irq_timing_decode(u64 value, u64 *timestamp)
+{
+	*timestamp = value >> 16;
+	return value & U16_MAX;
+}
+
+/*
+ * The function record_irq_time is only called in one place in the
+ * interrupts handler. We want this function always inline so the code
+ * inside is embedded in the function and the static key branching
+ * code can act at the higher level. Without the explicit
+ * __always_inline we can end up with a function call and a small
+ * overhead in the hotpath for nothing.
+ */
+static __always_inline void record_irq_time(struct irq_desc *desc)
+{
+	if (!static_branch_likely(&irq_timing_enabled))
+		return;
+
+	if (desc->istate & IRQS_TIMINGS) {
+		struct irq_timings *timings = this_cpu_ptr(&irq_timings);
+
+		timings->values[timings->count & IRQ_TIMINGS_MASK] =
+			irq_timing_encode(local_clock(),
+					  irq_desc_get_irq(desc));
+
+		timings->count++;
+	}
+}
+#else
+static inline void irq_remove_timings(struct irq_desc *desc) {}
+static inline void irq_setup_timings(struct irq_desc *desc,
+				     struct irqaction *act) {};
+static inline void record_irq_time(struct irq_desc *desc) {}
+#endif /* CONFIG_IRQ_TIMINGS */
+
+
+#ifdef CONFIG_GENERIC_IRQ_CHIP
+void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
+			   int num_ct, unsigned int irq_base,
+			   void __iomem *reg_base, irq_flow_handler_t handler);
+#else
+static inline void
+irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
+		      int num_ct, unsigned int irq_base,
+		      void __iomem *reg_base, irq_flow_handler_t handler) { }
+#endif /* CONFIG_GENERIC_IRQ_CHIP */
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+static inline bool irq_can_move_pcntxt(struct irq_data *data)
+{
+	return irqd_can_move_in_process_context(data);
+}
+static inline bool irq_move_pending(struct irq_data *data)
+{
+	return irqd_is_setaffinity_pending(data);
+}
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
+{
+	cpumask_copy(desc->pending_mask, mask);
+}
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
+{
+	cpumask_copy(mask, desc->pending_mask);
+}
+static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc)
+{
+	return desc->pending_mask;
+}
+bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear);
+#else /* CONFIG_GENERIC_PENDING_IRQ */
+static inline bool irq_can_move_pcntxt(struct irq_data *data)
+{
+	return true;
+}
+static inline bool irq_move_pending(struct irq_data *data)
+{
+	return false;
+}
+static inline void
+irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
+{
+}
+static inline void
+irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
+{
+}
+static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc)
+{
+	return NULL;
+}
+static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
+{
+	return false;
+}
+#endif /* !CONFIG_GENERIC_PENDING_IRQ */
+
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+#include <linux/debugfs.h>
+
+void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
+static inline void irq_remove_debugfs_entry(struct irq_desc *desc)
+{
+	debugfs_remove(desc->debugfs_file);
+}
+# ifdef CONFIG_IRQ_DOMAIN
+void irq_domain_debugfs_init(struct dentry *root);
+# else
+static inline void irq_domain_debugfs_init(struct dentry *root);
+# endif
+#else /* CONFIG_GENERIC_IRQ_DEBUGFS */
+static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d)
+{
+}
+static inline void irq_remove_debugfs_entry(struct irq_desc *d)
+{
+}
+#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 00bb0ae..8bbd064 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -4,7 +4,7 @@
  *
  * This file contains the interrupt descriptor management code
  *
- * Detailed information is available in Documentation/DocBook/genericirq
+ * Detailed information is available in Documentation/core-api/genericirq.rst
  *
  */
 #include <linux/irq.h>
@@ -54,14 +54,25 @@ static void __init init_irq_default_affinity(void)
 #endif
 
 #ifdef CONFIG_SMP
-static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
+static int alloc_masks(struct irq_desc *desc, int node)
 {
 	if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
-				     gfp, node))
+				     GFP_KERNEL, node))
 		return -ENOMEM;
 
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+	if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
+				     GFP_KERNEL, node)) {
+		free_cpumask_var(desc->irq_common_data.affinity);
+		return -ENOMEM;
+	}
+#endif
+
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
+	if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+		free_cpumask_var(desc->irq_common_data.effective_affinity);
+#endif
 		free_cpumask_var(desc->irq_common_data.affinity);
 		return -ENOMEM;
 	}
@@ -86,7 +97,7 @@ static void desc_smp_init(struct irq_desc *desc, int node,
 
 #else
 static inline int
-alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
+alloc_masks(struct irq_desc *desc, int node) { return 0; }
 static inline void
 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
 #endif
@@ -105,6 +116,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
 	desc->irq_data.chip_data = NULL;
 	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
 	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
+	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
 	desc->handle_irq = handle_bad_irq;
 	desc->depth = 1;
 	desc->irq_count = 0;
@@ -324,6 +336,9 @@ static void free_masks(struct irq_desc *desc)
 	free_cpumask_var(desc->pending_mask);
 #endif
 	free_cpumask_var(desc->irq_common_data.affinity);
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+	free_cpumask_var(desc->irq_common_data.effective_affinity);
+#endif
 }
 #else
 static inline void free_masks(struct irq_desc *desc) { }
@@ -344,9 +359,8 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
 				   struct module *owner)
 {
 	struct irq_desc *desc;
-	gfp_t gfp = GFP_KERNEL;
 
-	desc = kzalloc_node(sizeof(*desc), gfp, node);
+	desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
 	if (!desc)
 		return NULL;
 	/* allocate based on nr_cpu_ids */
@@ -354,7 +368,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
 	if (!desc->kstat_irqs)
 		goto err_desc;
 
-	if (alloc_masks(desc, gfp, node))
+	if (alloc_masks(desc, node))
 		goto err_kstat;
 
 	raw_spin_lock_init(&desc->lock);
@@ -394,6 +408,7 @@ static void free_desc(unsigned int irq)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
 
+	irq_remove_debugfs_entry(desc);
 	unregister_irq_proc(irq, desc);
 
 	/*
@@ -480,7 +495,8 @@ int __init early_irq_init(void)
 
 	/* Let arch update nr_irqs and return the nr of preallocated irqs */
 	initcnt = arch_probe_nr_irqs();
-	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
+	printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
+	       NR_IRQS, nr_irqs, initcnt);
 
 	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
 		nr_irqs = IRQ_BITMAP_BITS;
@@ -516,14 +532,14 @@ int __init early_irq_init(void)
 
 	init_irq_default_affinity();
 
-	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+	printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
 
 	desc = irq_desc;
 	count = ARRAY_SIZE(irq_desc);
 
 	for (i = 0; i < count; i++) {
 		desc[i].kstat_irqs = alloc_percpu(unsigned int);
-		alloc_masks(&desc[i], GFP_KERNEL, node);
+		alloc_masks(&desc[i], node);
 		raw_spin_lock_init(&desc[i].lock);
 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
 		desc_set_defaults(i, &desc[i], node, NULL, NULL);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 31805f2..14fe862 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -26,39 +26,69 @@ static struct irq_domain *irq_default_domain;
 static void irq_domain_check_hierarchy(struct irq_domain *domain);
 
 struct irqchip_fwid {
-	struct fwnode_handle fwnode;
-	char *name;
+	struct fwnode_handle	fwnode;
+	unsigned int		type;
+	char			*name;
 	void *data;
 };
 
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+static void debugfs_add_domain_dir(struct irq_domain *d);
+static void debugfs_remove_domain_dir(struct irq_domain *d);
+#else
+static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
+static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
+#endif
+
 /**
  * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
  *                           identifying an irq domain
- * @data: optional user-provided data
+ * @type:	Type of irqchip_fwnode. See linux/irqdomain.h
+ * @name:	Optional user provided domain name
+ * @id:		Optional user provided id if name != NULL
+ * @data:	Optional user-provided data
  *
- * Allocate a struct device_node, and return a poiner to the embedded
+ * Allocate a struct irqchip_fwid, and return a poiner to the embedded
  * fwnode_handle (or NULL on failure).
+ *
+ * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
+ * solely to transport name information to irqdomain creation code. The
+ * node is not stored. For other types the pointer is kept in the irq
+ * domain struct.
  */
-struct fwnode_handle *irq_domain_alloc_fwnode(void *data)
+struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
+						const char *name, void *data)
 {
 	struct irqchip_fwid *fwid;
-	char *name;
+	char *n;
 
 	fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
-	name = kasprintf(GFP_KERNEL, "irqchip@%p", data);
 
-	if (!fwid || !name) {
+	switch (type) {
+	case IRQCHIP_FWNODE_NAMED:
+		n = kasprintf(GFP_KERNEL, "%s", name);
+		break;
+	case IRQCHIP_FWNODE_NAMED_ID:
+		n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
+		break;
+	default:
+		n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
+		break;
+	}
+
+	if (!fwid || !n) {
 		kfree(fwid);
-		kfree(name);
+		kfree(n);
 		return NULL;
 	}
 
-	fwid->name = name;
+	fwid->type = type;
+	fwid->name = n;
 	fwid->data = data;
 	fwid->fwnode.type = FWNODE_IRQCHIP;
 	return &fwid->fwnode;
 }
-EXPORT_SYMBOL_GPL(irq_domain_alloc_fwnode);
+EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
 
 /**
  * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
@@ -97,26 +127,82 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
 				    void *host_data)
 {
 	struct device_node *of_node = to_of_node(fwnode);
+	struct irqchip_fwid *fwid;
 	struct irq_domain *domain;
 
+	static atomic_t unknown_domains;
+
 	domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
 			      GFP_KERNEL, of_node_to_nid(of_node));
 	if (WARN_ON(!domain))
 		return NULL;
 
+	if (fwnode && is_fwnode_irqchip(fwnode)) {
+		fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
+
+		switch (fwid->type) {
+		case IRQCHIP_FWNODE_NAMED:
+		case IRQCHIP_FWNODE_NAMED_ID:
+			domain->name = kstrdup(fwid->name, GFP_KERNEL);
+			if (!domain->name) {
+				kfree(domain);
+				return NULL;
+			}
+			domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+			break;
+		default:
+			domain->fwnode = fwnode;
+			domain->name = fwid->name;
+			break;
+		}
+	} else if (of_node) {
+		char *name;
+
+		/*
+		 * DT paths contain '/', which debugfs is legitimately
+		 * unhappy about. Replace them with ':', which does
+		 * the trick and is not as offensive as '\'...
+		 */
+		name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
+		if (!name) {
+			kfree(domain);
+			return NULL;
+		}
+
+		strreplace(name, '/', ':');
+
+		domain->name = name;
+		domain->fwnode = fwnode;
+		domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+	}
+
+	if (!domain->name) {
+		if (fwnode) {
+			pr_err("Invalid fwnode type (%d) for irqdomain\n",
+			       fwnode->type);
+		}
+		domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
+					 atomic_inc_return(&unknown_domains));
+		if (!domain->name) {
+			kfree(domain);
+			return NULL;
+		}
+		domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+	}
+
 	of_node_get(of_node);
 
 	/* Fill structure */
 	INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
 	domain->ops = ops;
 	domain->host_data = host_data;
-	domain->fwnode = fwnode;
 	domain->hwirq_max = hwirq_max;
 	domain->revmap_size = size;
 	domain->revmap_direct_max_irq = direct_max;
 	irq_domain_check_hierarchy(domain);
 
 	mutex_lock(&irq_domain_mutex);
+	debugfs_add_domain_dir(domain);
 	list_add(&domain->link, &irq_domain_list);
 	mutex_unlock(&irq_domain_mutex);
 
@@ -136,6 +222,7 @@ EXPORT_SYMBOL_GPL(__irq_domain_add);
 void irq_domain_remove(struct irq_domain *domain)
 {
 	mutex_lock(&irq_domain_mutex);
+	debugfs_remove_domain_dir(domain);
 
 	WARN_ON(!radix_tree_empty(&domain->revmap_tree));
 
@@ -152,10 +239,43 @@ void irq_domain_remove(struct irq_domain *domain)
 	pr_debug("Removed domain %s\n", domain->name);
 
 	of_node_put(irq_domain_get_of_node(domain));
+	if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
+		kfree(domain->name);
 	kfree(domain);
 }
 EXPORT_SYMBOL_GPL(irq_domain_remove);
 
+void irq_domain_update_bus_token(struct irq_domain *domain,
+				 enum irq_domain_bus_token bus_token)
+{
+	char *name;
+
+	if (domain->bus_token == bus_token)
+		return;
+
+	mutex_lock(&irq_domain_mutex);
+
+	domain->bus_token = bus_token;
+
+	name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
+	if (!name) {
+		mutex_unlock(&irq_domain_mutex);
+		return;
+	}
+
+	debugfs_remove_domain_dir(domain);
+
+	if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
+		kfree(domain->name);
+	else
+		domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
+
+	domain->name = name;
+	debugfs_add_domain_dir(domain);
+
+	mutex_unlock(&irq_domain_mutex);
+}
+
 /**
  * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
  * @of_node: pointer to interrupt controller's device tree node.
@@ -344,6 +464,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
 
 	irq_data->domain = NULL;
 	irq_data->hwirq = 0;
+	domain->mapcount--;
 
 	/* Clear reverse map for this hwirq */
 	if (hwirq < domain->revmap_size) {
@@ -395,6 +516,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
 			domain->name = irq_data->chip->name;
 	}
 
+	domain->mapcount++;
 	if (hwirq < domain->revmap_size) {
 		domain->linear_revmap[hwirq] = virq;
 	} else {
@@ -746,13 +868,54 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
 EXPORT_SYMBOL_GPL(irq_find_mapping);
 
 #ifdef CONFIG_IRQ_DOMAIN_DEBUG
+static void virq_debug_show_one(struct seq_file *m, struct irq_desc *desc)
+{
+	struct irq_domain *domain;
+	struct irq_data *data;
+
+	domain = desc->irq_data.domain;
+	data = &desc->irq_data;
+
+	while (domain) {
+		unsigned int irq = data->irq;
+		unsigned long hwirq = data->hwirq;
+		struct irq_chip *chip;
+		bool direct;
+
+		if (data == &desc->irq_data)
+			seq_printf(m, "%5d  ", irq);
+		else
+			seq_printf(m, "%5d+ ", irq);
+		seq_printf(m, "0x%05lx  ", hwirq);
+
+		chip = irq_data_get_irq_chip(data);
+		seq_printf(m, "%-15s  ", (chip && chip->name) ? chip->name : "none");
+
+		seq_printf(m, data ? "0x%p  " : "  %p  ",
+			   irq_data_get_irq_chip_data(data));
+
+		seq_printf(m, "   %c    ", (desc->action && desc->action->handler) ? '*' : ' ');
+		direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq);
+		seq_printf(m, "%6s%-8s  ",
+			   (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
+			   direct ? "(DIRECT)" : "");
+		seq_printf(m, "%s\n", domain->name);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+		domain = domain->parent;
+		data = data->parent_data;
+#else
+		domain = NULL;
+#endif
+	}
+}
+
 static int virq_debug_show(struct seq_file *m, void *private)
 {
 	unsigned long flags;
 	struct irq_desc *desc;
 	struct irq_domain *domain;
 	struct radix_tree_iter iter;
-	void *data, **slot;
+	void **slot;
 	int i;
 
 	seq_printf(m, " %-16s  %-6s  %-10s  %-10s  %s\n",
@@ -760,15 +923,26 @@ static int virq_debug_show(struct seq_file *m, void *private)
 	mutex_lock(&irq_domain_mutex);
 	list_for_each_entry(domain, &irq_domain_list, link) {
 		struct device_node *of_node;
+		const char *name;
+
 		int count = 0;
+
 		of_node = irq_domain_get_of_node(domain);
+		if (of_node)
+			name = of_node_full_name(of_node);
+		else if (is_fwnode_irqchip(domain->fwnode))
+			name = container_of(domain->fwnode, struct irqchip_fwid,
+					    fwnode)->name;
+		else
+			name = "";
+
 		radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
 			count++;
 		seq_printf(m, "%c%-16s  %6u  %10u  %10u  %s\n",
 			   domain == irq_default_domain ? '*' : ' ', domain->name,
 			   domain->revmap_size + count, domain->revmap_size,
 			   domain->revmap_direct_max_irq,
-			   of_node ? of_node_full_name(of_node) : "");
+			   name);
 	}
 	mutex_unlock(&irq_domain_mutex);
 
@@ -782,30 +956,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
 			continue;
 
 		raw_spin_lock_irqsave(&desc->lock, flags);
-		domain = desc->irq_data.domain;
-
-		if (domain) {
-			struct irq_chip *chip;
-			int hwirq = desc->irq_data.hwirq;
-			bool direct;
-
-			seq_printf(m, "%5d  ", i);
-			seq_printf(m, "0x%05x  ", hwirq);
-
-			chip = irq_desc_get_chip(desc);
-			seq_printf(m, "%-15s  ", (chip && chip->name) ? chip->name : "none");
-
-			data = irq_desc_get_chip_data(desc);
-			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
-
-			seq_printf(m, "   %c    ", (desc->action && desc->action->handler) ? '*' : ' ');
-			direct = (i == hwirq) && (i < domain->revmap_direct_max_irq);
-			seq_printf(m, "%6s%-8s  ",
-				   (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
-				   direct ? "(DIRECT)" : "");
-			seq_printf(m, "%s\n", desc->irq_data.domain->name);
-		}
-
+		virq_debug_show_one(m, desc);
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	}
 
@@ -973,6 +1124,7 @@ static void irq_domain_insert_irq(int virq)
 		struct irq_domain *domain = data->domain;
 		irq_hw_number_t hwirq = data->hwirq;
 
+		domain->mapcount++;
 		if (hwirq < domain->revmap_size) {
 			domain->linear_revmap[hwirq] = virq;
 		} else {
@@ -1002,6 +1154,7 @@ static void irq_domain_remove_irq(int virq)
 		struct irq_domain *domain = data->domain;
 		irq_hw_number_t hwirq = data->hwirq;
 
+		domain->mapcount--;
 		if (hwirq < domain->revmap_size) {
 			domain->linear_revmap[hwirq] = 0;
 		} else {
@@ -1189,43 +1342,18 @@ void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
 }
 
-static bool irq_domain_is_auto_recursive(struct irq_domain *domain)
-{
-	return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE;
-}
-
-static void irq_domain_free_irqs_recursive(struct irq_domain *domain,
+static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
 					   unsigned int irq_base,
 					   unsigned int nr_irqs)
 {
 	domain->ops->free(domain, irq_base, nr_irqs);
-	if (irq_domain_is_auto_recursive(domain)) {
-		BUG_ON(!domain->parent);
-		irq_domain_free_irqs_recursive(domain->parent, irq_base,
-					       nr_irqs);
-	}
 }
 
-int irq_domain_alloc_irqs_recursive(struct irq_domain *domain,
+int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
 				    unsigned int irq_base,
 				    unsigned int nr_irqs, void *arg)
 {
-	int ret = 0;
-	struct irq_domain *parent = domain->parent;
-	bool recursive = irq_domain_is_auto_recursive(domain);
-
-	BUG_ON(recursive && !parent);
-	if (recursive)
-		ret = irq_domain_alloc_irqs_recursive(parent, irq_base,
-						      nr_irqs, arg);
-	if (ret < 0)
-		return ret;
-
-	ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg);
-	if (ret < 0 && recursive)
-		irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs);
-
-	return ret;
+	return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
 }
 
 /**
@@ -1286,7 +1414,7 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
 	}
 
 	mutex_lock(&irq_domain_mutex);
-	ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg);
+	ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
 	if (ret < 0) {
 		mutex_unlock(&irq_domain_mutex);
 		goto out_free_irq_data;
@@ -1321,7 +1449,7 @@ void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
 	mutex_lock(&irq_domain_mutex);
 	for (i = 0; i < nr_irqs; i++)
 		irq_domain_remove_irq(virq + i);
-	irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs);
+	irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
 	mutex_unlock(&irq_domain_mutex);
 
 	irq_domain_free_irq_data(virq, nr_irqs);
@@ -1341,15 +1469,11 @@ int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
 				 unsigned int irq_base, unsigned int nr_irqs,
 				 void *arg)
 {
-	/* irq_domain_alloc_irqs_recursive() has called parent's alloc() */
-	if (irq_domain_is_auto_recursive(domain))
-		return 0;
+	if (!domain->parent)
+		return -ENOSYS;
 
-	domain = domain->parent;
-	if (domain)
-		return irq_domain_alloc_irqs_recursive(domain, irq_base,
-						       nr_irqs, arg);
-	return -ENOSYS;
+	return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
+					       nr_irqs, arg);
 }
 EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
 
@@ -1364,10 +1488,10 @@ EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
 void irq_domain_free_irqs_parent(struct irq_domain *domain,
 				 unsigned int irq_base, unsigned int nr_irqs)
 {
-	/* irq_domain_free_irqs_recursive() will call parent's free */
-	if (!irq_domain_is_auto_recursive(domain) && domain->parent)
-		irq_domain_free_irqs_recursive(domain->parent, irq_base,
-					       nr_irqs);
+	if (!domain->parent)
+		return;
+
+	irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
 }
 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
 
@@ -1487,3 +1611,78 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain)
 {
 }
 #endif	/* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+static struct dentry *domain_dir;
+
+static void
+irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
+{
+	seq_printf(m, "%*sname:   %s\n", ind, "", d->name);
+	seq_printf(m, "%*ssize:   %u\n", ind + 1, "",
+		   d->revmap_size + d->revmap_direct_max_irq);
+	seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
+	seq_printf(m, "%*sflags:  0x%08x\n", ind +1 , "", d->flags);
+#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
+	if (!d->parent)
+		return;
+	seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
+	irq_domain_debug_show_one(m, d->parent, ind + 4);
+#endif
+}
+
+static int irq_domain_debug_show(struct seq_file *m, void *p)
+{
+	struct irq_domain *d = m->private;
+
+	/* Default domain? Might be NULL */
+	if (!d) {
+		if (!irq_default_domain)
+			return 0;
+		d = irq_default_domain;
+	}
+	irq_domain_debug_show_one(m, d, 0);
+	return 0;
+}
+
+static int irq_domain_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, irq_domain_debug_show, inode->i_private);
+}
+
+static const struct file_operations dfs_domain_ops = {
+	.open		= irq_domain_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void debugfs_add_domain_dir(struct irq_domain *d)
+{
+	if (!d->name || !domain_dir || d->debugfs_file)
+		return;
+	d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
+					      &dfs_domain_ops);
+}
+
+static void debugfs_remove_domain_dir(struct irq_domain *d)
+{
+	if (d->debugfs_file)
+		debugfs_remove(d->debugfs_file);
+}
+
+void __init irq_domain_debugfs_init(struct dentry *root)
+{
+	struct irq_domain *d;
+
+	domain_dir = debugfs_create_dir("domains", root);
+	if (!domain_dir)
+		return;
+
+	debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops);
+	mutex_lock(&irq_domain_mutex);
+	list_for_each_entry(d, &irq_domain_list, link)
+		debugfs_add_domain_dir(d);
+	mutex_unlock(&irq_domain_mutex);
+}
+#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 070be98..5c11c17 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -168,34 +168,6 @@ void irq_set_thread_affinity(struct irq_desc *desc)
 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
 }
 
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-static inline bool irq_can_move_pcntxt(struct irq_data *data)
-{
-	return irqd_can_move_in_process_context(data);
-}
-static inline bool irq_move_pending(struct irq_data *data)
-{
-	return irqd_is_setaffinity_pending(data);
-}
-static inline void
-irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
-{
-	cpumask_copy(desc->pending_mask, mask);
-}
-static inline void
-irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
-{
-	cpumask_copy(mask, desc->pending_mask);
-}
-#else
-static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
-static inline bool irq_move_pending(struct irq_data *data) { return false; }
-static inline void
-irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
-static inline void
-irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
-#endif
-
 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 			bool force)
 {
@@ -345,15 +317,18 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 /*
  * Generic version of the affinity autoselector.
  */
-static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
+int irq_setup_affinity(struct irq_desc *desc)
 {
 	struct cpumask *set = irq_default_affinity;
-	int node = irq_desc_get_node(desc);
+	int ret, node = irq_desc_get_node(desc);
+	static DEFINE_RAW_SPINLOCK(mask_lock);
+	static struct cpumask mask;
 
 	/* Excludes PER_CPU and NO_BALANCE interrupts */
 	if (!__irq_can_set_affinity(desc))
 		return 0;
 
+	raw_spin_lock(&mask_lock);
 	/*
 	 * Preserve the managed affinity setting and a userspace affinity
 	 * setup, but make sure that one of the targets is online.
@@ -367,46 +342,40 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 	}
 
-	cpumask_and(mask, cpu_online_mask, set);
+	cpumask_and(&mask, cpu_online_mask, set);
 	if (node != NUMA_NO_NODE) {
 		const struct cpumask *nodemask = cpumask_of_node(node);
 
 		/* make sure at least one of the cpus in nodemask is online */
-		if (cpumask_intersects(mask, nodemask))
-			cpumask_and(mask, mask, nodemask);
+		if (cpumask_intersects(&mask, nodemask))
+			cpumask_and(&mask, &mask, nodemask);
 	}
-	irq_do_set_affinity(&desc->irq_data, mask, false);
-	return 0;
+	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
+	raw_spin_unlock(&mask_lock);
+	return ret;
 }
 #else
 /* Wrapper for ALPHA specific affinity selector magic */
-static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
+int irq_setup_affinity(struct irq_desc *desc)
 {
-	return irq_select_affinity(irq_desc_get_irq(d));
+	return irq_select_affinity(irq_desc_get_irq(desc));
 }
 #endif
 
 /*
- * Called when affinity is set via /proc/irq
+ * Called when a bogus affinity is set via /proc/irq
  */
-int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
+int irq_select_affinity_usr(unsigned int irq)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
 	unsigned long flags;
 	int ret;
 
 	raw_spin_lock_irqsave(&desc->lock, flags);
-	ret = setup_affinity(desc, mask);
+	ret = irq_setup_affinity(desc);
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 	return ret;
 }
-
-#else
-static inline int
-setup_affinity(struct irq_desc *desc, struct cpumask *mask)
-{
-	return 0;
-}
 #endif
 
 /**
@@ -533,9 +502,15 @@ void __enable_irq(struct irq_desc *desc)
 			goto err_out;
 		/* Prevent probing on this irq: */
 		irq_settings_set_noprobe(desc);
-		irq_enable(desc);
-		check_irq_resend(desc);
-		/* fall-through */
+		/*
+		 * Call irq_startup() not irq_enable() here because the
+		 * interrupt might be marked NOAUTOEN. So irq_startup()
+		 * needs to be invoked when it gets enabled the first
+		 * time. If it was already started up, then irq_startup()
+		 * will invoke irq_enable() under the hood.
+		 */
+		irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
+		break;
 	}
 	default:
 		desc->depth--;
@@ -1122,7 +1097,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 	struct irqaction *old, **old_ptr;
 	unsigned long flags, thread_mask = 0;
 	int ret, nested, shared = 0;
-	cpumask_var_t mask;
 
 	if (!desc)
 		return -EINVAL;
@@ -1181,11 +1155,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 		}
 	}
 
-	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
-		ret = -ENOMEM;
-		goto out_thread;
-	}
-
 	/*
 	 * Drivers are often written to work w/o knowledge about the
 	 * underlying irq chip implementation, so a request for a
@@ -1250,7 +1219,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 		 */
 		if (thread_mask == ~0UL) {
 			ret = -EBUSY;
-			goto out_mask;
+			goto out_unlock;
 		}
 		/*
 		 * The thread_mask for the action is or'ed to
@@ -1294,7 +1263,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
 		       irq);
 		ret = -EINVAL;
-		goto out_mask;
+		goto out_unlock;
 	}
 
 	if (!shared) {
@@ -1302,7 +1271,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 		if (ret) {
 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
 			       new->name, irq, desc->irq_data.chip->name);
-			goto out_mask;
+			goto out_unlock;
 		}
 
 		init_waitqueue_head(&desc->wait_for_threads);
@@ -1312,8 +1281,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 			ret = __irq_set_trigger(desc,
 						new->flags & IRQF_TRIGGER_MASK);
 
-			if (ret)
-				goto out_mask;
+			if (ret) {
+				irq_release_resources(desc);
+				goto out_unlock;
+			}
 		}
 
 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
@@ -1328,20 +1299,25 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 		if (new->flags & IRQF_ONESHOT)
 			desc->istate |= IRQS_ONESHOT;
 
-		if (irq_settings_can_autoenable(desc))
-			irq_startup(desc, true);
-		else
-			/* Undo nested disables: */
-			desc->depth = 1;
-
 		/* Exclude IRQ from balancing if requested */
 		if (new->flags & IRQF_NOBALANCING) {
 			irq_settings_set_no_balancing(desc);
 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
 		}
 
-		/* Set default affinity mask once everything is setup */
-		setup_affinity(desc, mask);
+		if (irq_settings_can_autoenable(desc)) {
+			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
+		} else {
+			/*
+			 * Shared interrupts do not go well with disabling
+			 * auto enable. The sharing interrupt might request
+			 * it while it's still disabled and then wait for
+			 * interrupts forever.
+			 */
+			WARN_ON_ONCE(new->flags & IRQF_SHARED);
+			/* Undo nested disables: */
+			desc->depth = 1;
+		}
 
 	} else if (new->flags & IRQF_TRIGGER_MASK) {
 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
@@ -1372,6 +1348,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 
+	irq_setup_timings(desc, new);
+
 	/*
 	 * Strictly no need to wake it up, but hung_task complains
 	 * when no hard interrupt wakes the thread up.
@@ -1382,10 +1360,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 		wake_up_process(new->secondary->thread);
 
 	register_irq_proc(irq, desc);
+	irq_add_debugfs_entry(irq, desc);
 	new->dir = NULL;
 	register_handler_proc(irq, new);
-	free_cpumask_var(mask);
-
 	return 0;
 
 mismatch:
@@ -1398,9 +1375,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 	}
 	ret = -EBUSY;
 
-out_mask:
+out_unlock:
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
-	free_cpumask_var(mask);
 
 out_thread:
 	if (new->thread) {
@@ -1500,6 +1476,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
 		irq_settings_clr_disable_unlazy(desc);
 		irq_shutdown(desc);
 		irq_release_resources(desc);
+		irq_remove_timings(desc);
 	}
 
 #ifdef CONFIG_SMP
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 37ddb7b..6ca054a 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -4,6 +4,36 @@
 
 #include "internals.h"
 
+/**
+ * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
+ * @desc:		Interrupt descpriptor to clean up
+ * @force_clear:	If set clear the move pending bit unconditionally.
+ *			If not set, clear it only when the dying CPU is the
+ *			last one in the pending mask.
+ *
+ * Returns true if the pending bit was set and the pending mask contains an
+ * online CPU other than the dying CPU.
+ */
+bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
+{
+	struct irq_data *data = irq_desc_get_irq_data(desc);
+
+	if (!irqd_is_setaffinity_pending(data))
+		return false;
+
+	/*
+	 * The outgoing CPU might be the last online target in a pending
+	 * interrupt move. If that's the case clear the pending move bit.
+	 */
+	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
+		irqd_clr_move_pending(data);
+		return false;
+	}
+	if (force_clear)
+		irqd_clr_move_pending(data);
+	return true;
+}
+
 void irq_move_masked_irq(struct irq_data *idata)
 {
 	struct irq_desc *desc = irq_data_to_desc(idata);
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index ddc2f54..48eadf4 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -265,13 +265,20 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
 					 struct msi_domain_info *info,
 					 struct irq_domain *parent)
 {
+	struct irq_domain *domain;
+
 	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
 		msi_domain_update_dom_ops(info);
 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
 		msi_domain_update_chip_ops(info);
 
-	return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
-					   fwnode, &msi_domain_ops, info);
+	domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
+					     fwnode, &msi_domain_ops, info);
+
+	if (domain && !domain->name && info->chip)
+		domain->name = info->chip->name;
+
+	return domain;
 }
 
 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
@@ -308,7 +315,7 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
 
 		ops->set_desc(arg, desc);
 		/* Assumes the domain mutex is held! */
-		ret = irq_domain_alloc_irqs_recursive(domain, virq, 1, arg);
+		ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
 		if (ret)
 			break;
 
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index c53edad..7f9642a 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -37,19 +37,47 @@ static struct proc_dir_entry *root_irq_dir;
 
 #ifdef CONFIG_SMP
 
-static int show_irq_affinity(int type, struct seq_file *m, void *v)
+enum {
+	AFFINITY,
+	AFFINITY_LIST,
+	EFFECTIVE,
+	EFFECTIVE_LIST,
+};
+
+static int show_irq_affinity(int type, struct seq_file *m)
 {
 	struct irq_desc *desc = irq_to_desc((long)m->private);
-	const struct cpumask *mask = desc->irq_common_data.affinity;
+	const struct cpumask *mask;
 
+	switch (type) {
+	case AFFINITY:
+	case AFFINITY_LIST:
+		mask = desc->irq_common_data.affinity;
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-	if (irqd_is_setaffinity_pending(&desc->irq_data))
-		mask = desc->pending_mask;
+		if (irqd_is_setaffinity_pending(&desc->irq_data))
+			mask = desc->pending_mask;
 #endif
-	if (type)
+		break;
+	case EFFECTIVE:
+	case EFFECTIVE_LIST:
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+		mask = desc->irq_common_data.effective_affinity;
+		break;
+#else
+		return -EINVAL;
+#endif
+	};
+
+	switch (type) {
+	case AFFINITY_LIST:
+	case EFFECTIVE_LIST:
 		seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
-	else
+		break;
+	case AFFINITY:
+	case EFFECTIVE:
 		seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
+		break;
+	}
 	return 0;
 }
 
@@ -80,12 +108,12 @@ static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
 int no_irq_affinity;
 static int irq_affinity_proc_show(struct seq_file *m, void *v)
 {
-	return show_irq_affinity(0, m, v);
+	return show_irq_affinity(AFFINITY, m);
 }
 
 static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
 {
-	return show_irq_affinity(1, m, v);
+	return show_irq_affinity(AFFINITY_LIST, m);
 }
 
 
@@ -120,9 +148,11 @@ static ssize_t write_irq_affinity(int type, struct file *file,
 	 * one online CPU still has to be targeted.
 	 */
 	if (!cpumask_intersects(new_value, cpu_online_mask)) {
-		/* Special case for empty set - allow the architecture
-		   code to set default SMP affinity. */
-		err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
+		/*
+		 * Special case for empty set - allow the architecture code
+		 * to set default SMP affinity.
+		 */
+		err = irq_select_affinity_usr(irq) ? -EINVAL : count;
 	} else {
 		irq_set_affinity(irq, new_value);
 		err = count;
@@ -183,6 +213,44 @@ static const struct file_operations irq_affinity_list_proc_fops = {
 	.write		= irq_affinity_list_proc_write,
 };
 
+#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
+{
+	return show_irq_affinity(EFFECTIVE, m);
+}
+
+static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
+{
+	return show_irq_affinity(EFFECTIVE_LIST, m);
+}
+
+static int irq_effective_aff_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, irq_effective_aff_proc_show, PDE_DATA(inode));
+}
+
+static int irq_effective_aff_list_proc_open(struct inode *inode,
+					    struct file *file)
+{
+	return single_open(file, irq_effective_aff_list_proc_show,
+			   PDE_DATA(inode));
+}
+
+static const struct file_operations irq_effective_aff_proc_fops = {
+	.open		= irq_effective_aff_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static const struct file_operations irq_effective_aff_list_proc_fops = {
+	.open		= irq_effective_aff_list_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#endif
+
 static int default_affinity_show(struct seq_file *m, void *v)
 {
 	seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
@@ -324,6 +392,7 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 {
 	static DEFINE_MUTEX(register_lock);
+	void __maybe_unused *irqp = (void *)(unsigned long) irq;
 	char name [MAX_NAMELEN];
 
 	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
@@ -349,20 +418,25 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 #ifdef CONFIG_SMP
 	/* create /proc/irq/<irq>/smp_affinity */
 	proc_create_data("smp_affinity", 0644, desc->dir,
-			 &irq_affinity_proc_fops, (void *)(long)irq);
+			 &irq_affinity_proc_fops, irqp);
 
 	/* create /proc/irq/<irq>/affinity_hint */
 	proc_create_data("affinity_hint", 0444, desc->dir,
-			 &irq_affinity_hint_proc_fops, (void *)(long)irq);
+			 &irq_affinity_hint_proc_fops, irqp);
 
 	/* create /proc/irq/<irq>/smp_affinity_list */
 	proc_create_data("smp_affinity_list", 0644, desc->dir,
-			 &irq_affinity_list_proc_fops, (void *)(long)irq);
+			 &irq_affinity_list_proc_fops, irqp);
 
 	proc_create_data("node", 0444, desc->dir,
-			 &irq_node_proc_fops, (void *)(long)irq);
+			 &irq_node_proc_fops, irqp);
+# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+	proc_create_data("effective_affinity", 0444, desc->dir,
+			 &irq_effective_aff_proc_fops, irqp);
+	proc_create_data("effective_affinity_list", 0444, desc->dir,
+			 &irq_effective_aff_list_proc_fops, irqp);
+# endif
 #endif
-
 	proc_create_data("spurious", 0444, desc->dir,
 			 &irq_spurious_proc_fops, (void *)(long)irq);
 
@@ -381,6 +455,10 @@ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
 	remove_proc_entry("affinity_hint", desc->dir);
 	remove_proc_entry("smp_affinity_list", desc->dir);
 	remove_proc_entry("node", desc->dir);
+# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+	remove_proc_entry("effective_affinity", desc->dir);
+	remove_proc_entry("effective_affinity_list", desc->dir);
+# endif
 #endif
 	remove_proc_entry("spurious", desc->dir);
 
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
new file mode 100644
index 0000000..c8c1d07
--- /dev/null
+++ b/kernel/irq/timings.c
@@ -0,0 +1,369 @@
+/*
+ * linux/kernel/irq/timings.c
+ *
+ * Copyright (C) 2016, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/static_key.h>
+#include <linux/interrupt.h>
+#include <linux/idr.h>
+#include <linux/irq.h>
+#include <linux/math64.h>
+
+#include <trace/events/irq.h>
+
+#include "internals.h"
+
+DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
+
+DEFINE_PER_CPU(struct irq_timings, irq_timings);
+
+struct irqt_stat {
+	u64	next_evt;
+	u64	last_ts;
+	u64	variance;
+	u32	avg;
+	u32	nr_samples;
+	int	anomalies;
+	int	valid;
+};
+
+static DEFINE_IDR(irqt_stats);
+
+void irq_timings_enable(void)
+{
+	static_branch_enable(&irq_timing_enabled);
+}
+
+void irq_timings_disable(void)
+{
+	static_branch_disable(&irq_timing_enabled);
+}
+
+/**
+ * irqs_update - update the irq timing statistics with a new timestamp
+ *
+ * @irqs: an irqt_stat struct pointer
+ * @ts: the new timestamp
+ *
+ * The statistics are computed online, in other words, the code is
+ * designed to compute the statistics on a stream of values rather
+ * than doing multiple passes on the values to compute the average,
+ * then the variance. The integer division introduces a loss of
+ * precision but with an acceptable error margin regarding the results
+ * we would have with the double floating precision: we are dealing
+ * with nanosec, so big numbers, consequently the mantisse is
+ * negligeable, especially when converting the time in usec
+ * afterwards.
+ *
+ * The computation happens at idle time. When the CPU is not idle, the
+ * interrupts' timestamps are stored in the circular buffer, when the
+ * CPU goes idle and this routine is called, all the buffer's values
+ * are injected in the statistical model continuying to extend the
+ * statistics from the previous busy-idle cycle.
+ *
+ * The observations showed a device will trigger a burst of periodic
+ * interrupts followed by one or two peaks of longer time, for
+ * instance when a SD card device flushes its cache, then the periodic
+ * intervals occur again. A one second inactivity period resets the
+ * stats, that gives us the certitude the statistical values won't
+ * exceed 1x10^9, thus the computation won't overflow.
+ *
+ * Basically, the purpose of the algorithm is to watch the periodic
+ * interrupts and eliminate the peaks.
+ *
+ * An interrupt is considered periodically stable if the interval of
+ * its occurences follow the normal distribution, thus the values
+ * comply with:
+ *
+ *      avg - 3 x stddev < value < avg + 3 x stddev
+ *
+ * Which can be simplified to:
+ *
+ *      -3 x stddev < value - avg < 3 x stddev
+ *
+ *      abs(value - avg) < 3 x stddev
+ *
+ * In order to save a costly square root computation, we use the
+ * variance. For the record, stddev = sqrt(variance). The equation
+ * above becomes:
+ *
+ *      abs(value - avg) < 3 x sqrt(variance)
+ *
+ * And finally we square it:
+ *
+ *      (value - avg) ^ 2 < (3 x sqrt(variance)) ^ 2
+ *
+ *      (value - avg) x (value - avg) < 9 x variance
+ *
+ * Statistically speaking, any values out of this interval is
+ * considered as an anomaly and is discarded. However, a normal
+ * distribution appears when the number of samples is 30 (it is the
+ * rule of thumb in statistics, cf. "30 samples" on Internet). When
+ * there are three consecutive anomalies, the statistics are resetted.
+ *
+ */
+static void irqs_update(struct irqt_stat *irqs, u64 ts)
+{
+	u64 old_ts = irqs->last_ts;
+	u64 variance = 0;
+	u64 interval;
+	s64 diff;
+
+	/*
+	 * The timestamps are absolute time values, we need to compute
+	 * the timing interval between two interrupts.
+	 */
+	irqs->last_ts = ts;
+
+	/*
+	 * The interval type is u64 in order to deal with the same
+	 * type in our computation, that prevent mindfuck issues with
+	 * overflow, sign and division.
+	 */
+	interval = ts - old_ts;
+
+	/*
+	 * The interrupt triggered more than one second apart, that
+	 * ends the sequence as predictible for our purpose. In this
+	 * case, assume we have the beginning of a sequence and the
+	 * timestamp is the first value. As it is impossible to
+	 * predict anything at this point, return.
+	 *
+	 * Note the first timestamp of the sequence will always fall
+	 * in this test because the old_ts is zero. That is what we
+	 * want as we need another timestamp to compute an interval.
+	 */
+	if (interval >= NSEC_PER_SEC) {
+		memset(irqs, 0, sizeof(*irqs));
+		irqs->last_ts = ts;
+		return;
+	}
+
+	/*
+	 * Pre-compute the delta with the average as the result is
+	 * used several times in this function.
+	 */
+	diff = interval - irqs->avg;
+
+	/*
+	 * Increment the number of samples.
+	 */
+	irqs->nr_samples++;
+
+	/*
+	 * Online variance divided by the number of elements if there
+	 * is more than one sample.  Normally the formula is division
+	 * by nr_samples - 1 but we assume the number of element will be
+	 * more than 32 and dividing by 32 instead of 31 is enough
+	 * precise.
+	 */
+	if (likely(irqs->nr_samples > 1))
+		variance = irqs->variance >> IRQ_TIMINGS_SHIFT;
+
+	/*
+	 * The rule of thumb in statistics for the normal distribution
+	 * is having at least 30 samples in order to have the model to
+	 * apply. Values outside the interval are considered as an
+	 * anomaly.
+	 */
+	if ((irqs->nr_samples >= 30) && ((diff * diff) > (9 * variance))) {
+		/*
+		 * After three consecutive anomalies, we reset the
+		 * stats as it is no longer stable enough.
+		 */
+		if (irqs->anomalies++ >= 3) {
+			memset(irqs, 0, sizeof(*irqs));
+			irqs->last_ts = ts;
+			return;
+		}
+	} else {
+		/*
+		 * The anomalies must be consecutives, so at this
+		 * point, we reset the anomalies counter.
+		 */
+		irqs->anomalies = 0;
+	}
+
+	/*
+	 * The interrupt is considered stable enough to try to predict
+	 * the next event on it.
+	 */
+	irqs->valid = 1;
+
+	/*
+	 * Online average algorithm:
+	 *
+	 *  new_average = average + ((value - average) / count)
+	 *
+	 * The variance computation depends on the new average
+	 * to be computed here first.
+	 *
+	 */
+	irqs->avg = irqs->avg + (diff >> IRQ_TIMINGS_SHIFT);
+
+	/*
+	 * Online variance algorithm:
+	 *
+	 *  new_variance = variance + (value - average) x (value - new_average)
+	 *
+	 * Warning: irqs->avg is updated with the line above, hence
+	 * 'interval - irqs->avg' is no longer equal to 'diff'
+	 */
+	irqs->variance = irqs->variance + (diff * (interval - irqs->avg));
+
+	/*
+	 * Update the next event
+	 */
+	irqs->next_evt = ts + irqs->avg;
+}
+
+/**
+ * irq_timings_next_event - Return when the next event is supposed to arrive
+ *
+ * During the last busy cycle, the number of interrupts is incremented
+ * and stored in the irq_timings structure. This information is
+ * necessary to:
+ *
+ * - know if the index in the table wrapped up:
+ *
+ *      If more than the array size interrupts happened during the
+ *      last busy/idle cycle, the index wrapped up and we have to
+ *      begin with the next element in the array which is the last one
+ *      in the sequence, otherwise it is a the index 0.
+ *
+ * - have an indication of the interrupts activity on this CPU
+ *   (eg. irq/sec)
+ *
+ * The values are 'consumed' after inserting in the statistical model,
+ * thus the count is reinitialized.
+ *
+ * The array of values **must** be browsed in the time direction, the
+ * timestamp must increase between an element and the next one.
+ *
+ * Returns a nanosec time based estimation of the earliest interrupt,
+ * U64_MAX otherwise.
+ */
+u64 irq_timings_next_event(u64 now)
+{
+	struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
+	struct irqt_stat *irqs;
+	struct irqt_stat __percpu *s;
+	u64 ts, next_evt = U64_MAX;
+	int i, irq = 0;
+
+	/*
+	 * This function must be called with the local irq disabled in
+	 * order to prevent the timings circular buffer to be updated
+	 * while we are reading it.
+	 */
+	WARN_ON_ONCE(!irqs_disabled());
+
+	/*
+	 * Number of elements in the circular buffer: If it happens it
+	 * was flushed before, then the number of elements could be
+	 * smaller than IRQ_TIMINGS_SIZE, so the count is used,
+	 * otherwise the array size is used as we wrapped. The index
+	 * begins from zero when we did not wrap. That could be done
+	 * in a nicer way with the proper circular array structure
+	 * type but with the cost of extra computation in the
+	 * interrupt handler hot path. We choose efficiency.
+	 *
+	 * Inject measured irq/timestamp to the statistical model
+	 * while decrementing the counter because we consume the data
+	 * from our circular buffer.
+	 */
+	for (i = irqts->count & IRQ_TIMINGS_MASK,
+		     irqts->count = min(IRQ_TIMINGS_SIZE, irqts->count);
+	     irqts->count > 0; irqts->count--, i = (i + 1) & IRQ_TIMINGS_MASK) {
+
+		irq = irq_timing_decode(irqts->values[i], &ts);
+
+		s = idr_find(&irqt_stats, irq);
+		if (s) {
+			irqs = this_cpu_ptr(s);
+			irqs_update(irqs, ts);
+		}
+	}
+
+	/*
+	 * Look in the list of interrupts' statistics, the earliest
+	 * next event.
+	 */
+	idr_for_each_entry(&irqt_stats, s, i) {
+
+		irqs = this_cpu_ptr(s);
+
+		if (!irqs->valid)
+			continue;
+
+		if (irqs->next_evt <= now) {
+			irq = i;
+			next_evt = now;
+
+			/*
+			 * This interrupt mustn't use in the future
+			 * until new events occur and update the
+			 * statistics.
+			 */
+			irqs->valid = 0;
+			break;
+		}
+
+		if (irqs->next_evt < next_evt) {
+			irq = i;
+			next_evt = irqs->next_evt;
+		}
+	}
+
+	return next_evt;
+}
+
+void irq_timings_free(int irq)
+{
+	struct irqt_stat __percpu *s;
+
+	s = idr_find(&irqt_stats, irq);
+	if (s) {
+		free_percpu(s);
+		idr_remove(&irqt_stats, irq);
+	}
+}
+
+int irq_timings_alloc(int irq)
+{
+	struct irqt_stat __percpu *s;
+	int id;
+
+	/*
+	 * Some platforms can have the same private interrupt per cpu,
+	 * so this function may be be called several times with the
+	 * same interrupt number. Just bail out in case the per cpu
+	 * stat structure is already allocated.
+	 */
+	s = idr_find(&irqt_stats, irq);
+	if (s)
+		return 0;
+
+	s = alloc_percpu(*s);
+	if (!s)
+		return -ENOMEM;
+
+	idr_preload(GFP_KERNEL);
+	id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT);
+	idr_preload_end();
+
+	if (id < 0) {
+		free_percpu(s);
+		return id;
+	}
+
+	return 0;
+}
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 6c9cb20..d11c506 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -15,6 +15,7 @@
 #include <linux/static_key.h>
 #include <linux/jump_label_ratelimit.h>
 #include <linux/bug.h>
+#include <linux/cpu.h>
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -124,6 +125,7 @@ void static_key_slow_inc(struct static_key *key)
 			return;
 	}
 
+	cpus_read_lock();
 	jump_label_lock();
 	if (atomic_read(&key->enabled) == 0) {
 		atomic_set(&key->enabled, -1);
@@ -133,12 +135,14 @@ void static_key_slow_inc(struct static_key *key)
 		atomic_inc(&key->enabled);
 	}
 	jump_label_unlock();
+	cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
 static void __static_key_slow_dec(struct static_key *key,
 		unsigned long rate_limit, struct delayed_work *work)
 {
+	cpus_read_lock();
 	/*
 	 * The negative count check is valid even when a negative
 	 * key->enabled is in use by static_key_slow_inc(); a
@@ -149,6 +153,7 @@ static void __static_key_slow_dec(struct static_key *key,
 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
 		WARN(atomic_read(&key->enabled) < 0,
 		     "jump label: negative count!\n");
+		cpus_read_unlock();
 		return;
 	}
 
@@ -159,6 +164,7 @@ static void __static_key_slow_dec(struct static_key *key,
 		jump_label_update(key);
 	}
 	jump_label_unlock();
+	cpus_read_unlock();
 }
 
 static void jump_label_update_timeout(struct work_struct *work)
@@ -334,6 +340,7 @@ void __init jump_label_init(void)
 	if (static_key_initialized)
 		return;
 
+	cpus_read_lock();
 	jump_label_lock();
 	jump_label_sort_entries(iter_start, iter_stop);
 
@@ -353,6 +360,7 @@ void __init jump_label_init(void)
 	}
 	static_key_initialized = true;
 	jump_label_unlock();
+	cpus_read_unlock();
 }
 
 #ifdef CONFIG_MODULES
@@ -590,28 +598,28 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
 	struct module *mod = data;
 	int ret = 0;
 
+	cpus_read_lock();
+	jump_label_lock();
+
 	switch (val) {
 	case MODULE_STATE_COMING:
-		jump_label_lock();
 		ret = jump_label_add_module(mod);
 		if (ret) {
 			WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
 			jump_label_del_module(mod);
 		}
-		jump_label_unlock();
 		break;
 	case MODULE_STATE_GOING:
-		jump_label_lock();
 		jump_label_del_module(mod);
-		jump_label_unlock();
 		break;
 	case MODULE_STATE_LIVE:
-		jump_label_lock();
 		jump_label_invalidate_module_init(mod);
-		jump_label_unlock();
 		break;
 	}
 
+	jump_label_unlock();
+	cpus_read_unlock();
+
 	return notifier_from_errno(ret);
 }
 
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index ae1a3ba..154ffb4 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -38,6 +38,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/compiler.h>
 #include <linux/hugetlb.h>
+#include <linux/frame.h>
 
 #include <asm/page.h>
 #include <asm/sections.h>
@@ -874,7 +875,7 @@ int kexec_load_disabled;
  * only when panic_cpu holds the current CPU number; this is the only CPU
  * which processes crash_kexec routines.
  */
-void __crash_kexec(struct pt_regs *regs)
+void __noclone __crash_kexec(struct pt_regs *regs)
 {
 	/* Take the kexec_mutex here to prevent sys_kexec_load
 	 * running on one cpu from replacing the crash kernel
@@ -896,6 +897,7 @@ void __crash_kexec(struct pt_regs *regs)
 		mutex_unlock(&kexec_mutex);
 	}
 }
+STACK_FRAME_NON_STANDARD(__crash_kexec);
 
 void crash_kexec(struct pt_regs *regs)
 {
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index adfe3b4..6756d75 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -483,11 +483,6 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  */
 static void do_optimize_kprobes(void)
 {
-	/* Optimization never be done when disarmed */
-	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
-	    list_empty(&optimizing_list))
-		return;
-
 	/*
 	 * The optimization/unoptimization refers online_cpus via
 	 * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -495,14 +490,19 @@ static void do_optimize_kprobes(void)
 	 * This combination can cause a deadlock (cpu-hotplug try to lock
 	 * text_mutex but stop_machine can not be done because online_cpus
 	 * has been changed)
-	 * To avoid this deadlock, we need to call get_online_cpus()
+	 * To avoid this deadlock, caller must have locked cpu hotplug
 	 * for preventing cpu-hotplug outside of text_mutex locking.
 	 */
-	get_online_cpus();
+	lockdep_assert_cpus_held();
+
+	/* Optimization never be done when disarmed */
+	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
+	    list_empty(&optimizing_list))
+		return;
+
 	mutex_lock(&text_mutex);
 	arch_optimize_kprobes(&optimizing_list);
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 /*
@@ -513,12 +513,13 @@ static void do_unoptimize_kprobes(void)
 {
 	struct optimized_kprobe *op, *tmp;
 
+	/* See comment in do_optimize_kprobes() */
+	lockdep_assert_cpus_held();
+
 	/* Unoptimization must be done anytime */
 	if (list_empty(&unoptimizing_list))
 		return;
 
-	/* Ditto to do_optimize_kprobes */
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 	/* Loop free_list for disarming */
@@ -537,7 +538,6 @@ static void do_unoptimize_kprobes(void)
 			list_del_init(&op->list);
 	}
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 }
 
 /* Reclaim all kprobes on the free_list */
@@ -562,6 +562,7 @@ static void kick_kprobe_optimizer(void)
 static void kprobe_optimizer(struct work_struct *work)
 {
 	mutex_lock(&kprobe_mutex);
+	cpus_read_lock();
 	/* Lock modules while optimizing kprobes */
 	mutex_lock(&module_mutex);
 
@@ -587,6 +588,7 @@ static void kprobe_optimizer(struct work_struct *work)
 	do_free_cleaned_kprobes();
 
 	mutex_unlock(&module_mutex);
+	cpus_read_unlock();
 	mutex_unlock(&kprobe_mutex);
 
 	/* Step 5: Kick optimizer again if needed */
@@ -650,9 +652,8 @@ static void optimize_kprobe(struct kprobe *p)
 /* Short cut to direct unoptimizing */
 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 {
-	get_online_cpus();
+	lockdep_assert_cpus_held();
 	arch_unoptimize_kprobe(op);
-	put_online_cpus();
 	if (kprobe_disabled(&op->kp))
 		arch_disarm_kprobe(&op->kp);
 }
@@ -791,6 +792,7 @@ static void try_to_optimize_kprobe(struct kprobe *p)
 		return;
 
 	/* For preparing optimization, jump_label_text_reserved() is called */
+	cpus_read_lock();
 	jump_label_lock();
 	mutex_lock(&text_mutex);
 
@@ -812,6 +814,7 @@ static void try_to_optimize_kprobe(struct kprobe *p)
 out:
 	mutex_unlock(&text_mutex);
 	jump_label_unlock();
+	cpus_read_unlock();
 }
 
 #ifdef CONFIG_SYSCTL
@@ -826,6 +829,7 @@ static void optimize_all_kprobes(void)
 	if (kprobes_allow_optimization)
 		goto out;
 
+	cpus_read_lock();
 	kprobes_allow_optimization = true;
 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 		head = &kprobe_table[i];
@@ -833,6 +837,7 @@ static void optimize_all_kprobes(void)
 			if (!kprobe_disabled(p))
 				optimize_kprobe(p);
 	}
+	cpus_read_unlock();
 	printk(KERN_INFO "Kprobes globally optimized\n");
 out:
 	mutex_unlock(&kprobe_mutex);
@@ -851,6 +856,7 @@ static void unoptimize_all_kprobes(void)
 		return;
 	}
 
+	cpus_read_lock();
 	kprobes_allow_optimization = false;
 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 		head = &kprobe_table[i];
@@ -859,6 +865,7 @@ static void unoptimize_all_kprobes(void)
 				unoptimize_kprobe(p, false);
 		}
 	}
+	cpus_read_unlock();
 	mutex_unlock(&kprobe_mutex);
 
 	/* Wait for unoptimizing completion */
@@ -1010,14 +1017,11 @@ static void arm_kprobe(struct kprobe *kp)
 		arm_kprobe_ftrace(kp);
 		return;
 	}
-	/*
-	 * Here, since __arm_kprobe() doesn't use stop_machine(),
-	 * this doesn't cause deadlock on text_mutex. So, we don't
-	 * need get_online_cpus().
-	 */
+	cpus_read_lock();
 	mutex_lock(&text_mutex);
 	__arm_kprobe(kp);
 	mutex_unlock(&text_mutex);
+	cpus_read_unlock();
 }
 
 /* Disarm a kprobe with text_mutex */
@@ -1027,10 +1031,12 @@ static void disarm_kprobe(struct kprobe *kp, bool reopt)
 		disarm_kprobe_ftrace(kp);
 		return;
 	}
-	/* Ditto */
+
+	cpus_read_lock();
 	mutex_lock(&text_mutex);
 	__disarm_kprobe(kp, reopt);
 	mutex_unlock(&text_mutex);
+	cpus_read_unlock();
 }
 
 /*
@@ -1298,13 +1304,10 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
 	int ret = 0;
 	struct kprobe *ap = orig_p;
 
+	cpus_read_lock();
+
 	/* For preparing optimization, jump_label_text_reserved() is called */
 	jump_label_lock();
-	/*
-	 * Get online CPUs to avoid text_mutex deadlock.with stop machine,
-	 * which is invoked by unoptimize_kprobe() in add_new_kprobe()
-	 */
-	get_online_cpus();
 	mutex_lock(&text_mutex);
 
 	if (!kprobe_aggrprobe(orig_p)) {
@@ -1352,8 +1355,8 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
 
 out:
 	mutex_unlock(&text_mutex);
-	put_online_cpus();
 	jump_label_unlock();
+	cpus_read_unlock();
 
 	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
 		ap->flags &= ~KPROBE_FLAG_DISABLED;
@@ -1555,9 +1558,12 @@ int register_kprobe(struct kprobe *p)
 		goto out;
 	}
 
-	mutex_lock(&text_mutex);	/* Avoiding text modification */
+	cpus_read_lock();
+	/* Prevent text modification */
+	mutex_lock(&text_mutex);
 	ret = prepare_kprobe(p);
 	mutex_unlock(&text_mutex);
+	cpus_read_unlock();
 	if (ret)
 		goto out;
 
@@ -1570,7 +1576,6 @@ int register_kprobe(struct kprobe *p)
 
 	/* Try to optimize kprobe */
 	try_to_optimize_kprobe(p);
-
 out:
 	mutex_unlock(&kprobe_mutex);
 
diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig
index 0450225..ec45651 100644
--- a/kernel/livepatch/Kconfig
+++ b/kernel/livepatch/Kconfig
@@ -10,6 +10,7 @@
 	depends on SYSFS
 	depends on KALLSYMS_ALL
 	depends on HAVE_LIVEPATCH
+	depends on !TRIM_UNUSED_KSYMS
 	help
 	  Say Y here if you want to support kernel live patching.
 	  This option has no runtime impact until a kernel "patch"
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index f826903..52c4e90 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -59,7 +59,11 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 
 	ops = container_of(fops, struct klp_ops, fops);
 
-	rcu_read_lock();
+	/*
+	 * A variant of synchronize_sched() is used to allow patching functions
+	 * where RCU is not watching, see klp_synchronize_transition().
+	 */
+	preempt_disable_notrace();
 
 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 				      stack_node);
@@ -115,7 +119,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
 
 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
 unlock:
-	rcu_read_unlock();
+	preempt_enable_notrace();
 }
 
 /*
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index adc0cc6..b004a1f 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -49,6 +49,28 @@ static void klp_transition_work_fn(struct work_struct *work)
 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
 
 /*
+ * This function is just a stub to implement a hard force
+ * of synchronize_sched(). This requires synchronizing
+ * tasks even in userspace and idle.
+ */
+static void klp_sync(struct work_struct *work)
+{
+}
+
+/*
+ * We allow to patch also functions where RCU is not watching,
+ * e.g. before user_exit(). We can not rely on the RCU infrastructure
+ * to do the synchronization. Instead hard force the sched synchronization.
+ *
+ * This approach allows to use RCU functions for manipulating func_stack
+ * safely.
+ */
+static void klp_synchronize_transition(void)
+{
+	schedule_on_each_cpu(klp_sync);
+}
+
+/*
  * The transition to the target patch state is complete.  Clean up the data
  * structures.
  */
@@ -73,7 +95,7 @@ static void klp_complete_transition(void)
 		 * func->transition gets cleared, the handler may choose a
 		 * removed function.
 		 */
-		synchronize_rcu();
+		klp_synchronize_transition();
 	}
 
 	if (klp_transition_patch->immediate)
@@ -92,7 +114,7 @@ static void klp_complete_transition(void)
 
 	/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
 	if (klp_target_state == KLP_PATCHED)
-		synchronize_rcu();
+		klp_synchronize_transition();
 
 	read_lock(&tasklist_lock);
 	for_each_process_thread(g, task) {
@@ -136,7 +158,11 @@ void klp_cancel_transition(void)
  */
 void klp_update_patch_state(struct task_struct *task)
 {
-	rcu_read_lock();
+	/*
+	 * A variant of synchronize_sched() is used to allow patching functions
+	 * where RCU is not watching, see klp_synchronize_transition().
+	 */
+	preempt_disable_notrace();
 
 	/*
 	 * This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +179,7 @@ void klp_update_patch_state(struct task_struct *task)
 	if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
 		task->patch_state = READ_ONCE(klp_target_state);
 
-	rcu_read_unlock();
+	preempt_enable_notrace();
 }
 
 /*
@@ -539,7 +565,7 @@ void klp_reverse_transition(void)
 		clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
 
 	/* Let any remaining calls to klp_update_patch_state() complete */
-	synchronize_rcu();
+	klp_synchronize_transition();
 
 	klp_start_transition();
 }
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index c0e31bf..7d2499b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1157,18 +1157,18 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
 	if (debug_locks_silent)
 		return 0;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("======================================================\n");
 	pr_warn("WARNING: possible circular locking dependency detected\n");
 	print_kernel_ident();
 	pr_warn("------------------------------------------------------\n");
-	printk("%s/%d is trying to acquire lock:\n",
+	pr_warn("%s/%d is trying to acquire lock:\n",
 		curr->comm, task_pid_nr(curr));
 	print_lock(check_src);
-	printk("\nbut task is already holding lock:\n");
+	pr_warn("\nbut task is already holding lock:\n");
 	print_lock(check_tgt);
-	printk("\nwhich lock already depends on the new lock.\n\n");
-	printk("\nthe existing dependency chain (in reverse order) is:\n");
+	pr_warn("\nwhich lock already depends on the new lock.\n\n");
+	pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
 
 	print_circular_bug_entry(entry, depth);
 
@@ -1495,13 +1495,13 @@ print_bad_irq_dependency(struct task_struct *curr,
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("=====================================================\n");
 	pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
 		irqclass, irqclass);
 	print_kernel_ident();
 	pr_warn("-----------------------------------------------------\n");
-	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
+	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
 		curr->comm, task_pid_nr(curr),
 		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
 		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
@@ -1509,46 +1509,46 @@ print_bad_irq_dependency(struct task_struct *curr,
 		curr->softirqs_enabled);
 	print_lock(next);
 
-	printk("\nand this task is already holding:\n");
+	pr_warn("\nand this task is already holding:\n");
 	print_lock(prev);
-	printk("which would create a new lock dependency:\n");
+	pr_warn("which would create a new lock dependency:\n");
 	print_lock_name(hlock_class(prev));
-	printk(KERN_CONT " ->");
+	pr_cont(" ->");
 	print_lock_name(hlock_class(next));
-	printk(KERN_CONT "\n");
+	pr_cont("\n");
 
-	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
+	pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
 		irqclass);
 	print_lock_name(backwards_entry->class);
-	printk("\n... which became %s-irq-safe at:\n", irqclass);
+	pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
 
 	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
 
-	printk("\nto a %s-irq-unsafe lock:\n", irqclass);
+	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
 	print_lock_name(forwards_entry->class);
-	printk("\n... which became %s-irq-unsafe at:\n", irqclass);
-	printk("...");
+	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
+	pr_warn("...");
 
 	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
 
-	printk("\nother info that might help us debug this:\n\n");
+	pr_warn("\nother info that might help us debug this:\n\n");
 	print_irq_lock_scenario(backwards_entry, forwards_entry,
 				hlock_class(prev), hlock_class(next));
 
 	lockdep_print_held_locks(curr);
 
-	printk("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
+	pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
 	if (!save_trace(&prev_root->trace))
 		return 0;
 	print_shortest_lock_dependencies(backwards_entry, prev_root);
 
-	printk("\nthe dependencies between the lock to be acquired");
-	printk(" and %s-irq-unsafe lock:\n", irqclass);
+	pr_warn("\nthe dependencies between the lock to be acquired");
+	pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
 	if (!save_trace(&next_root->trace))
 		return 0;
 	print_shortest_lock_dependencies(forwards_entry, next_root);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 
 	return 0;
@@ -1724,22 +1724,22 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("============================================\n");
 	pr_warn("WARNING: possible recursive locking detected\n");
 	print_kernel_ident();
 	pr_warn("--------------------------------------------\n");
-	printk("%s/%d is trying to acquire lock:\n",
+	pr_warn("%s/%d is trying to acquire lock:\n",
 		curr->comm, task_pid_nr(curr));
 	print_lock(next);
-	printk("\nbut task is already holding lock:\n");
+	pr_warn("\nbut task is already holding lock:\n");
 	print_lock(prev);
 
-	printk("\nother info that might help us debug this:\n");
+	pr_warn("\nother info that might help us debug this:\n");
 	print_deadlock_scenario(next, prev);
 	lockdep_print_held_locks(curr);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 
 	return 0;
@@ -2074,21 +2074,21 @@ static void print_collision(struct task_struct *curr,
 			struct held_lock *hlock_next,
 			struct lock_chain *chain)
 {
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("============================\n");
 	pr_warn("WARNING: chain_key collision\n");
 	print_kernel_ident();
 	pr_warn("----------------------------\n");
-	printk("%s/%d: ", current->comm, task_pid_nr(current));
-	printk("Hash chain already cached but the contents don't match!\n");
+	pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
+	pr_warn("Hash chain already cached but the contents don't match!\n");
 
-	printk("Held locks:");
+	pr_warn("Held locks:");
 	print_chain_keys_held_locks(curr, hlock_next);
 
-	printk("Locks in cached chain:");
+	pr_warn("Locks in cached chain:");
 	print_chain_keys_chain(chain);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 }
 #endif
@@ -2373,16 +2373,16 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("================================\n");
 	pr_warn("WARNING: inconsistent lock state\n");
 	print_kernel_ident();
 	pr_warn("--------------------------------\n");
 
-	printk("inconsistent {%s} -> {%s} usage.\n",
+	pr_warn("inconsistent {%s} -> {%s} usage.\n",
 		usage_str[prev_bit], usage_str[new_bit]);
 
-	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
+	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
 		curr->comm, task_pid_nr(curr),
 		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
 		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
@@ -2390,16 +2390,16 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
 		trace_softirqs_enabled(curr));
 	print_lock(this);
 
-	printk("{%s} state was registered at:\n", usage_str[prev_bit]);
+	pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
 	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
 
 	print_irqtrace_events(curr);
-	printk("\nother info that might help us debug this:\n");
+	pr_warn("\nother info that might help us debug this:\n");
 	print_usage_bug_scenario(this);
 
 	lockdep_print_held_locks(curr);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 
 	return 0;
@@ -2438,28 +2438,28 @@ print_irq_inversion_bug(struct task_struct *curr,
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("========================================================\n");
 	pr_warn("WARNING: possible irq lock inversion dependency detected\n");
 	print_kernel_ident();
 	pr_warn("--------------------------------------------------------\n");
-	printk("%s/%d just changed the state of lock:\n",
+	pr_warn("%s/%d just changed the state of lock:\n",
 		curr->comm, task_pid_nr(curr));
 	print_lock(this);
 	if (forwards)
-		printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
+		pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
 	else
-		printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
+		pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
 	print_lock_name(other->class);
-	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
+	pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
-	printk("\nother info that might help us debug this:\n");
+	pr_warn("\nother info that might help us debug this:\n");
 
 	/* Find a middle lock (if one exists) */
 	depth = get_lock_depth(other);
 	do {
 		if (depth == 0 && (entry != root)) {
-			printk("lockdep:%s bad path found in chain graph\n", __func__);
+			pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
 			break;
 		}
 		middle = entry;
@@ -2475,12 +2475,12 @@ print_irq_inversion_bug(struct task_struct *curr,
 
 	lockdep_print_held_locks(curr);
 
-	printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
+	pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
 	if (!save_trace(&root->trace))
 		return 0;
 	print_shortest_lock_dependencies(other, root);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 
 	return 0;
@@ -3189,25 +3189,25 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
 	if (debug_locks_silent)
 		return 0;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("==================================\n");
 	pr_warn("WARNING: Nested lock was not taken\n");
 	print_kernel_ident();
 	pr_warn("----------------------------------\n");
 
-	printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
+	pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
 	print_lock(hlock);
 
-	printk("\nbut this task is not holding:\n");
-	printk("%s\n", hlock->nest_lock->name);
+	pr_warn("\nbut this task is not holding:\n");
+	pr_warn("%s\n", hlock->nest_lock->name);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 
-	printk("\nother info that might help us debug this:\n");
+	pr_warn("\nother info that might help us debug this:\n");
 	lockdep_print_held_locks(curr);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 
 	return 0;
@@ -3402,21 +3402,21 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
 	if (debug_locks_silent)
 		return 0;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("=====================================\n");
 	pr_warn("WARNING: bad unlock balance detected!\n");
 	print_kernel_ident();
 	pr_warn("-------------------------------------\n");
-	printk("%s/%d is trying to release lock (",
+	pr_warn("%s/%d is trying to release lock (",
 		curr->comm, task_pid_nr(curr));
 	print_lockdep_cache(lock);
-	printk(KERN_CONT ") at:\n");
+	pr_cont(") at:\n");
 	print_ip_sym(ip);
-	printk("but there are no more locks to release!\n");
-	printk("\nother info that might help us debug this:\n");
+	pr_warn("but there are no more locks to release!\n");
+	pr_warn("\nother info that might help us debug this:\n");
 	lockdep_print_held_locks(curr);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 
 	return 0;
@@ -3974,21 +3974,21 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
 	if (debug_locks_silent)
 		return 0;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("=================================\n");
 	pr_warn("WARNING: bad contention detected!\n");
 	print_kernel_ident();
 	pr_warn("---------------------------------\n");
-	printk("%s/%d is trying to contend lock (",
+	pr_warn("%s/%d is trying to contend lock (",
 		curr->comm, task_pid_nr(curr));
 	print_lockdep_cache(lock);
-	printk(KERN_CONT ") at:\n");
+	pr_cont(") at:\n");
 	print_ip_sym(ip);
-	printk("but there are no locks held!\n");
-	printk("\nother info that might help us debug this:\n");
+	pr_warn("but there are no locks held!\n");
+	pr_warn("\nother info that might help us debug this:\n");
 	lockdep_print_held_locks(curr);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 
 	return 0;
@@ -4318,17 +4318,17 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
 	if (debug_locks_silent)
 		return;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("=========================\n");
 	pr_warn("WARNING: held lock freed!\n");
 	print_kernel_ident();
 	pr_warn("-------------------------\n");
-	printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
+	pr_warn("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
 		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
 	print_lock(hlock);
 	lockdep_print_held_locks(curr);
 
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 }
 
@@ -4376,14 +4376,14 @@ static void print_held_locks_bug(void)
 	if (debug_locks_silent)
 		return;
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("====================================\n");
 	pr_warn("WARNING: %s/%d still has locks held!\n",
 	       current->comm, task_pid_nr(current));
 	print_kernel_ident();
 	pr_warn("------------------------------------\n");
 	lockdep_print_held_locks(current);
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 }
 
@@ -4402,10 +4402,10 @@ void debug_show_all_locks(void)
 	int unlock = 1;
 
 	if (unlikely(!debug_locks)) {
-		printk("INFO: lockdep is turned off.\n");
+		pr_warn("INFO: lockdep is turned off.\n");
 		return;
 	}
-	printk("\nShowing all locks held in the system:\n");
+	pr_warn("\nShowing all locks held in the system:\n");
 
 	/*
 	 * Here we try to get the tasklist_lock as hard as possible,
@@ -4416,18 +4416,18 @@ void debug_show_all_locks(void)
 retry:
 	if (!read_trylock(&tasklist_lock)) {
 		if (count == 10)
-			printk("hm, tasklist_lock locked, retrying... ");
+			pr_warn("hm, tasklist_lock locked, retrying... ");
 		if (count) {
 			count--;
-			printk(" #%d", 10-count);
+			pr_cont(" #%d", 10-count);
 			mdelay(200);
 			goto retry;
 		}
-		printk(" ignoring it.\n");
+		pr_cont(" ignoring it.\n");
 		unlock = 0;
 	} else {
 		if (count != 10)
-			printk(KERN_CONT " locked it.\n");
+			pr_cont(" locked it.\n");
 	}
 
 	do_each_thread(g, p) {
@@ -4445,7 +4445,7 @@ void debug_show_all_locks(void)
 				unlock = 1;
 	} while_each_thread(g, p);
 
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("=============================================\n\n");
 
 	if (unlock)
@@ -4475,12 +4475,12 @@ asmlinkage __visible void lockdep_sys_exit(void)
 	if (unlikely(curr->lockdep_depth)) {
 		if (!debug_locks_off())
 			return;
-		printk("\n");
+		pr_warn("\n");
 		pr_warn("================================================\n");
 		pr_warn("WARNING: lock held when returning to user space!\n");
 		print_kernel_ident();
 		pr_warn("------------------------------------------------\n");
-		printk("%s/%d is leaving the kernel with locks still held!\n",
+		pr_warn("%s/%d is leaving the kernel with locks still held!\n",
 				curr->comm, curr->pid);
 		lockdep_print_held_locks(curr);
 	}
@@ -4490,19 +4490,15 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
 {
 	struct task_struct *curr = current;
 
-#ifndef CONFIG_PROVE_RCU_REPEATEDLY
-	if (!debug_locks_off())
-		return;
-#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
 	/* Note: the following can be executed concurrently, so be careful. */
-	printk("\n");
+	pr_warn("\n");
 	pr_warn("=============================\n");
 	pr_warn("WARNING: suspicious RCU usage\n");
 	print_kernel_ident();
 	pr_warn("-----------------------------\n");
-	printk("%s:%d %s!\n", file, line, s);
-	printk("\nother info that might help us debug this:\n\n");
-	printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
+	pr_warn("%s:%d %s!\n", file, line, s);
+	pr_warn("\nother info that might help us debug this:\n\n");
+	pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
 	       !rcu_lockdep_current_cpu_online()
 			? "RCU used illegally from offline CPU!\n"
 			: !rcu_is_watching()
@@ -4529,10 +4525,10 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
 	 * rcu_read_lock_bh() and so on from extended quiescent states.
 	 */
 	if (!rcu_is_watching())
-		printk("RCU used illegally from extended quiescent state!\n");
+		pr_warn("RCU used illegally from extended quiescent state!\n");
 
 	lockdep_print_held_locks(curr);
-	printk("\nstack backtrace:\n");
+	pr_warn("\nstack backtrace:\n");
 	dump_stack();
 }
 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 198527a..858a075 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -227,9 +227,9 @@ static void __sched __mutex_lock_slowpath(struct mutex *lock);
  * (or statically defined) before it can be locked. memset()-ing
  * the mutex to 0 is not allowed.
  *
- * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
- *   checks that will enforce the restrictions and will also do
- *   deadlock debugging. )
+ * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
+ * checks that will enforce the restrictions and will also do
+ * deadlock debugging)
  *
  * This function is similar to (but not equivalent to) down().
  */
diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
index 58e366a..ac35e64 100644
--- a/kernel/locking/rtmutex-debug.c
+++ b/kernel/locking/rtmutex-debug.c
@@ -166,12 +166,16 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
 	memset(waiter, 0x22, sizeof(*waiter));
 }
 
-void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
+void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key)
 {
 	/*
 	 * Make sure we are not reinitializing a held lock:
 	 */
 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
 	lock->name = name;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	lockdep_init_map(&lock->dep_map, name, key, 0);
+#endif
 }
 
diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
index b585af9..5078c6d 100644
--- a/kernel/locking/rtmutex-debug.h
+++ b/kernel/locking/rtmutex-debug.h
@@ -11,7 +11,7 @@
 
 extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
 extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
-extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
 extern void debug_rt_mutex_lock(struct rt_mutex *lock);
 extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
 extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 28cd09e..7806989 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1481,6 +1481,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
 	might_sleep();
 
+	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
@@ -1496,9 +1497,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
  */
 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
 {
+	int ret;
+
 	might_sleep();
 
-	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
+	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
+	if (ret)
+		mutex_release(&lock->dep_map, 1, _RET_IP_);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
@@ -1526,11 +1534,18 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
 int
 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
 {
+	int ret;
+
 	might_sleep();
 
-	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+	ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
 				       RT_MUTEX_MIN_CHAINWALK,
 				       rt_mutex_slowlock);
+	if (ret)
+		mutex_release(&lock->dep_map, 1, _RET_IP_);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
 
@@ -1547,10 +1562,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
  */
 int __sched rt_mutex_trylock(struct rt_mutex *lock)
 {
+	int ret;
+
 	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
 		return 0;
 
-	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+	if (ret)
+		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
 
@@ -1561,6 +1582,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
  */
 void __sched rt_mutex_unlock(struct rt_mutex *lock)
 {
+	mutex_release(&lock->dep_map, 1, _RET_IP_);
 	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
@@ -1620,7 +1642,6 @@ void rt_mutex_destroy(struct rt_mutex *lock)
 	lock->magic = NULL;
 #endif
 }
-
 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
 
 /**
@@ -1632,14 +1653,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
  *
  * Initializing of a locked rt lock is not allowed
  */
-void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+void __rt_mutex_init(struct rt_mutex *lock, const char *name,
+		     struct lock_class_key *key)
 {
 	lock->owner = NULL;
 	raw_spin_lock_init(&lock->wait_lock);
 	lock->waiters = RB_ROOT;
 	lock->waiters_leftmost = NULL;
 
-	debug_rt_mutex_init(lock, name);
+	if (name && key)
+		debug_rt_mutex_init(lock, name, key);
 }
 EXPORT_SYMBOL_GPL(__rt_mutex_init);
 
@@ -1660,7 +1683,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
 				struct task_struct *proxy_owner)
 {
-	__rt_mutex_init(lock, NULL);
+	__rt_mutex_init(lock, NULL, NULL);
 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
 	rt_mutex_set_owner(lock, proxy_owner);
 }
diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
index 6607802..5c253ca 100644
--- a/kernel/locking/rtmutex.h
+++ b/kernel/locking/rtmutex.h
@@ -17,7 +17,7 @@
 #define debug_rt_mutex_proxy_lock(l,p)			do { } while (0)
 #define debug_rt_mutex_proxy_unlock(l)			do { } while (0)
 #define debug_rt_mutex_unlock(l)			do { } while (0)
-#define debug_rt_mutex_init(m, n)			do { } while (0)
+#define debug_rt_mutex_init(m, n, k)			do { } while (0)
 #define debug_rt_mutex_deadlock(d, a ,l)		do { } while (0)
 #define debug_rt_mutex_print_deadlock(w)		do { } while (0)
 #define debug_rt_mutex_reset_waiter(w)			do { } while (0)
diff --git a/kernel/module.c b/kernel/module.c
index 4a3665f..d7eb41d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1202,10 +1202,7 @@ static ssize_t store_uevent(struct module_attribute *mattr,
 			    struct module_kobject *mk,
 			    const char *buffer, size_t count)
 {
-	enum kobject_action action;
-
-	if (kobject_action_type(buffer, count, &action) == 0)
-		kobject_uevent(&mk->kobj, action);
+	kobject_synth_uevent(&mk->kobj, buffer, count);
 	return count;
 }
 
diff --git a/kernel/padata.c b/kernel/padata.c
index ac8f1e5..868f947 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -934,29 +934,18 @@ static struct kobj_type padata_attr_type = {
 };
 
 /**
- * padata_alloc_possible - Allocate and initialize padata instance.
- *                         Use the cpu_possible_mask for serial and
- *                         parallel workers.
- *
- * @wq: workqueue to use for the allocated padata instance
- */
-struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
-{
-	return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
-}
-EXPORT_SYMBOL(padata_alloc_possible);
-
-/**
  * padata_alloc - allocate and initialize a padata instance and specify
  *                cpumasks for serial and parallel workers.
  *
  * @wq: workqueue to use for the allocated padata instance
  * @pcpumask: cpumask that will be used for padata parallelization
  * @cbcpumask: cpumask that will be used for padata serialization
+ *
+ * Must be called from a cpus_read_lock() protected region
  */
-struct padata_instance *padata_alloc(struct workqueue_struct *wq,
-				     const struct cpumask *pcpumask,
-				     const struct cpumask *cbcpumask)
+static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+					    const struct cpumask *pcpumask,
+					    const struct cpumask *cbcpumask)
 {
 	struct padata_instance *pinst;
 	struct parallel_data *pd = NULL;
@@ -965,7 +954,6 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
 	if (!pinst)
 		goto err;
 
-	get_online_cpus();
 	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
 		goto err_free_inst;
 	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
@@ -989,14 +977,12 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
 
 	pinst->flags = 0;
 
-	put_online_cpus();
-
 	BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
 	kobject_init(&pinst->kobj, &padata_attr_type);
 	mutex_init(&pinst->lock);
 
 #ifdef CONFIG_HOTPLUG_CPU
-	cpuhp_state_add_instance_nocalls(hp_online, &pinst->node);
+	cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
 #endif
 	return pinst;
 
@@ -1005,12 +991,27 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
 	free_cpumask_var(pinst->cpumask.cbcpu);
 err_free_inst:
 	kfree(pinst);
-	put_online_cpus();
 err:
 	return NULL;
 }
 
 /**
+ * padata_alloc_possible - Allocate and initialize padata instance.
+ *                         Use the cpu_possible_mask for serial and
+ *                         parallel workers.
+ *
+ * @wq: workqueue to use for the allocated padata instance
+ *
+ * Must be called from a cpus_read_lock() protected region
+ */
+struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
+{
+	lockdep_assert_cpus_held();
+	return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
+}
+EXPORT_SYMBOL(padata_alloc_possible);
+
+/**
  * padata_free - free a padata instance
  *
  * @padata_inst: padata instance to free
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index a8b978c..e1914c7 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -1108,7 +1108,7 @@ static struct attribute * g[] = {
 };
 
 
-static struct attribute_group attr_group = {
+static const struct attribute_group attr_group = {
 	.attrs = g,
 };
 
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index fa46606..b7708e3 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -36,13 +36,13 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/io.h>
-#ifdef CONFIG_STRICT_KERNEL_RWX
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
 #include <asm/set_memory.h>
 #endif
 
 #include "power.h"
 
-#ifdef CONFIG_STRICT_KERNEL_RWX
+#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
 static bool hibernate_restore_protection;
 static bool hibernate_restore_protection_active;
 
@@ -77,7 +77,7 @@ static inline void hibernate_restore_protection_begin(void) {}
 static inline void hibernate_restore_protection_end(void) {}
 static inline void hibernate_restore_protect_page(void *page_address) {}
 static inline void hibernate_restore_unprotect_page(void *page_address) {}
-#endif /* CONFIG_STRICT_KERNEL_RWX */
+#endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
 
 static int swsusp_page_is_free(struct page *);
 static void swsusp_set_page_forbidden(struct page *);
@@ -1929,8 +1929,7 @@ static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
  * also be located in the high memory, because of the way in which
  * copy_data_pages() works.
  */
-static int swsusp_alloc(struct memory_bitmap *orig_bm,
-			struct memory_bitmap *copy_bm,
+static int swsusp_alloc(struct memory_bitmap *copy_bm,
 			unsigned int nr_pages, unsigned int nr_highmem)
 {
 	if (nr_highmem > 0) {
@@ -1976,7 +1975,7 @@ asmlinkage __visible int swsusp_save(void)
 		return -ENOMEM;
 	}
 
-	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
+	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
 		printk(KERN_ERR "PM: Memory allocation failed\n");
 		return -ENOMEM;
 	}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c0248c7..3ecf275 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -86,11 +86,9 @@ static void freeze_enter(void)
 
 	/* Push all the CPUs into the idle loop. */
 	wake_up_all_idle_cpus();
-	pr_debug("PM: suspend-to-idle\n");
 	/* Make the current CPU wait so it can enter the idle loop too. */
 	wait_event(suspend_freeze_wait_head,
 		   suspend_freeze_state == FREEZE_STATE_WAKE);
-	pr_debug("PM: resume from suspend-to-idle\n");
 
 	cpuidle_pause();
 	put_online_cpus();
@@ -106,6 +104,8 @@ static void freeze_enter(void)
 
 static void s2idle_loop(void)
 {
+	pr_debug("PM: suspend-to-idle\n");
+
 	do {
 		freeze_enter();
 
@@ -121,6 +121,8 @@ static void s2idle_loop(void)
 
 		pm_wakeup_clear(false);
 	} while (!dpm_suspend_noirq(PMSG_SUSPEND));
+
+	pr_debug("PM: resume from suspend-to-idle\n");
 }
 
 void freeze_wake(void)
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index f80fd33..57d2257 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -225,14 +225,14 @@ static struct block_device *hib_resume_bdev;
 struct hib_bio_batch {
 	atomic_t		count;
 	wait_queue_head_t	wait;
-	int			error;
+	blk_status_t		error;
 };
 
 static void hib_init_batch(struct hib_bio_batch *hb)
 {
 	atomic_set(&hb->count, 0);
 	init_waitqueue_head(&hb->wait);
-	hb->error = 0;
+	hb->error = BLK_STS_OK;
 }
 
 static void hib_end_io(struct bio *bio)
@@ -240,7 +240,7 @@ static void hib_end_io(struct bio *bio)
 	struct hib_bio_batch *hb = bio->bi_private;
 	struct page *page = bio->bi_io_vec[0].bv_page;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
 				imajor(bio->bi_bdev->bd_inode),
 				iminor(bio->bi_bdev->bd_inode),
@@ -253,8 +253,8 @@ static void hib_end_io(struct bio *bio)
 		flush_icache_range((unsigned long)page_address(page),
 				   (unsigned long)page_address(page) + PAGE_SIZE);
 
-	if (bio->bi_error && !hb->error)
-		hb->error = bio->bi_error;
+	if (bio->bi_status && !hb->error)
+		hb->error = bio->bi_status;
 	if (atomic_dec_and_test(&hb->count))
 		wake_up(&hb->wait);
 
@@ -293,10 +293,10 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
 	return error;
 }
 
-static int hib_wait_io(struct hib_bio_batch *hb)
+static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
 {
 	wait_event(hb->wait, atomic_read(&hb->count) == 0);
-	return hb->error;
+	return blk_status_to_errno(hb->error);
 }
 
 /*
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 1db044f..2a7d040 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -18,12 +18,14 @@
 
 #ifdef CONFIG_PRINTK
 
-#define PRINTK_SAFE_CONTEXT_MASK	0x7fffffff
-#define PRINTK_NMI_CONTEXT_MASK	0x80000000
+#define PRINTK_SAFE_CONTEXT_MASK	 0x3fffffff
+#define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000
+#define PRINTK_NMI_CONTEXT_MASK		 0x80000000
 
 extern raw_spinlock_t logbuf_lock;
 
 __printf(1, 0) int vprintk_default(const char *fmt, va_list args);
+__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
 __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
 void __printk_safe_enter(void);
 void __printk_safe_exit(void);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a1aecf4..fc47863 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -269,7 +269,6 @@ static struct console *exclusive_console;
 #define MAX_CMDLINECONSOLES 8
 
 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
-static int console_cmdline_cnt;
 
 static int preferred_console = -1;
 int console_set_on_cmdline;
@@ -1176,7 +1175,7 @@ static void boot_delay_msec(int level)
 	unsigned long long k;
 	unsigned long timeout;
 
-	if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
+	if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
 		|| suppress_message_printing(level)) {
 		return;
 	}
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
 	 *	See if this tty is not yet registered, and
 	 *	if we have a slot free.
 	 */
-	for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) {
+	for (i = 0, c = console_cmdline;
+	     i < MAX_CMDLINECONSOLES && c->name[0];
+	     i++, c++) {
 		if (strcmp(c->name, name) == 0 && c->index == idx) {
-			if (brl_options)
-				return 0;
-
-			/*
-			 * Maintain an invariant that will help to find if
-			 * the matching console is preferred, see
-			 * register_console():
-			 *
-			 * The last non-braille console is always
-			 * the preferred one.
-			 */
-			if (i != console_cmdline_cnt - 1)
-				swap(console_cmdline[i],
-				     console_cmdline[console_cmdline_cnt - 1]);
-
-			preferred_console = console_cmdline_cnt - 1;
-
+			if (!brl_options)
+				preferred_console = i;
 			return 0;
 		}
 	}
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
 	braille_set_options(c, brl_options);
 
 	c->index = idx;
-	console_cmdline_cnt++;
 	return 0;
 }
 /*
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
 	}
 
 	/*
-	 * See if this console matches one we selected on the command line.
-	 *
-	 * There may be several entries in the console_cmdline array matching
-	 * with the same console, one with newcon->match(), another by
-	 * name/index:
-	 *
-	 *	pl011,mmio,0x87e024000000,115200 -- added from SPCR
-	 *	ttyAMA0 -- added from command line
-	 *
-	 * Traverse the console_cmdline array in reverse order to be
-	 * sure that if this console is preferred then it will be the first
-	 * matching entry.  We use the invariant that is maintained in
-	 * __add_preferred_console().
+	 *	See if this console matches one we selected on
+	 *	the command line.
 	 */
-	for (i = console_cmdline_cnt - 1; i >= 0; i--) {
-		c = console_cmdline + i;
-
+	for (i = 0, c = console_cmdline;
+	     i < MAX_CMDLINECONSOLES && c->name[0];
+	     i++, c++) {
 		if (!newcon->match ||
 		    newcon->match(newcon, c->name, c->index, c->options) != 0) {
 			/* default matching */
@@ -2746,20 +2720,29 @@ void wake_up_klogd(void)
 	preempt_enable();
 }
 
+int vprintk_deferred(const char *fmt, va_list args)
+{
+	int r;
+
+	r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
+
+	preempt_disable();
+	__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
+	irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
+	preempt_enable();
+
+	return r;
+}
+
 int printk_deferred(const char *fmt, ...)
 {
 	va_list args;
 	int r;
 
-	preempt_disable();
 	va_start(args, fmt);
-	r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
+	r = vprintk_deferred(fmt, args);
 	va_end(args);
 
-	__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
-	irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
-	preempt_enable();
-
 	return r;
 }
 
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index 033e50a..3cdaeae 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -80,8 +80,8 @@ static void queue_flush_work(struct printk_safe_seq_buf *s)
  * happen, printk_safe_log_store() will notice the buffer->len mismatch
  * and repeat the write.
  */
-static int printk_safe_log_store(struct printk_safe_seq_buf *s,
-				 const char *fmt, va_list args)
+static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
+						const char *fmt, va_list args)
 {
 	int add;
 	size_t len;
@@ -299,7 +299,7 @@ void printk_safe_flush_on_panic(void)
  * one writer running. But the buffer might get flushed from another
  * CPU, so we need to be careful.
  */
-static int vprintk_nmi(const char *fmt, va_list args)
+static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
 {
 	struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
 
@@ -308,17 +308,29 @@ static int vprintk_nmi(const char *fmt, va_list args)
 
 void printk_nmi_enter(void)
 {
-	this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
+	/*
+	 * The size of the extra per-CPU buffer is limited. Use it only when
+	 * the main one is locked. If this CPU is not in the safe context,
+	 * the lock must be taken on another CPU and we could wait for it.
+	 */
+	if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) &&
+	    raw_spin_is_locked(&logbuf_lock)) {
+		this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
+	} else {
+		this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK);
+	}
 }
 
 void printk_nmi_exit(void)
 {
-	this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
+	this_cpu_and(printk_context,
+		     ~(PRINTK_NMI_CONTEXT_MASK |
+		       PRINTK_NMI_DEFERRED_CONTEXT_MASK));
 }
 
 #else
 
-static int vprintk_nmi(const char *fmt, va_list args)
+static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
 {
 	return 0;
 }
@@ -330,7 +342,7 @@ static int vprintk_nmi(const char *fmt, va_list args)
  * into itself. It uses a per-CPU buffer to store the message, just like
  * NMI.
  */
-static int vprintk_safe(const char *fmt, va_list args)
+static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
 {
 	struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
 
@@ -351,12 +363,22 @@ void __printk_safe_exit(void)
 
 __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
 {
+	/* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
 	if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
 		return vprintk_nmi(fmt, args);
 
+	/* Use extra buffer to prevent a recursion deadlock in safe mode. */
 	if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
 		return vprintk_safe(fmt, args);
 
+	/*
+	 * Use the main logbuf when logbuf_lock is available in NMI.
+	 * But avoid calling console drivers that might have their own locks.
+	 */
+	if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK)
+		return vprintk_deferred(fmt, args);
+
+	/* No obstacles. */
 	return vprintk_default(fmt, args);
 }
 
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
new file mode 100644
index 0000000..be90c94
--- /dev/null
+++ b/kernel/rcu/Kconfig
@@ -0,0 +1,242 @@
+#
+# RCU-related configuration options
+#
+
+menu "RCU Subsystem"
+
+config TREE_RCU
+	bool
+	default y if !PREEMPT && SMP
+	help
+	  This option selects the RCU implementation that is
+	  designed for very large SMP system with hundreds or
+	  thousands of CPUs.  It also scales down nicely to
+	  smaller systems.
+
+config PREEMPT_RCU
+	bool
+	default y if PREEMPT
+	help
+	  This option selects the RCU implementation that is
+	  designed for very large SMP systems with hundreds or
+	  thousands of CPUs, but for which real-time response
+	  is also required.  It also scales down nicely to
+	  smaller systems.
+
+	  Select this option if you are unsure.
+
+config TINY_RCU
+	bool
+	default y if !PREEMPT && !SMP
+	help
+	  This option selects the RCU implementation that is
+	  designed for UP systems from which real-time response
+	  is not required.  This option greatly reduces the
+	  memory footprint of RCU.
+
+config RCU_EXPERT
+	bool "Make expert-level adjustments to RCU configuration"
+	default n
+	help
+	  This option needs to be enabled if you wish to make
+	  expert-level adjustments to RCU configuration.  By default,
+	  no such adjustments can be made, which has the often-beneficial
+	  side-effect of preventing "make oldconfig" from asking you all
+	  sorts of detailed questions about how you would like numerous
+	  obscure RCU options to be set up.
+
+	  Say Y if you need to make expert-level adjustments to RCU.
+
+	  Say N if you are unsure.
+
+config SRCU
+	bool
+	help
+	  This option selects the sleepable version of RCU. This version
+	  permits arbitrary sleeping or blocking within RCU read-side critical
+	  sections.
+
+config TINY_SRCU
+	bool
+	default y if SRCU && TINY_RCU
+	help
+	  This option selects the single-CPU non-preemptible version of SRCU.
+
+config TREE_SRCU
+	bool
+	default y if SRCU && !TINY_RCU
+	help
+	  This option selects the full-fledged version of SRCU.
+
+config TASKS_RCU
+	bool
+	default n
+	select SRCU
+	help
+	  This option enables a task-based RCU implementation that uses
+	  only voluntary context switch (not preemption!), idle, and
+	  user-mode execution as quiescent states.
+
+config RCU_STALL_COMMON
+	def_bool ( TREE_RCU || PREEMPT_RCU )
+	help
+	  This option enables RCU CPU stall code that is common between
+	  the TINY and TREE variants of RCU.  The purpose is to allow
+	  the tiny variants to disable RCU CPU stall warnings, while
+	  making these warnings mandatory for the tree variants.
+
+config RCU_NEED_SEGCBLIST
+	def_bool ( TREE_RCU || PREEMPT_RCU || TREE_SRCU )
+
+config CONTEXT_TRACKING
+       bool
+
+config CONTEXT_TRACKING_FORCE
+	bool "Force context tracking"
+	depends on CONTEXT_TRACKING
+	default y if !NO_HZ_FULL
+	help
+	  The major pre-requirement for full dynticks to work is to
+	  support the context tracking subsystem. But there are also
+	  other dependencies to provide in order to make the full
+	  dynticks working.
+
+	  This option stands for testing when an arch implements the
+	  context tracking backend but doesn't yet fullfill all the
+	  requirements to make the full dynticks feature working.
+	  Without the full dynticks, there is no way to test the support
+	  for context tracking and the subsystems that rely on it: RCU
+	  userspace extended quiescent state and tickless cputime
+	  accounting. This option copes with the absence of the full
+	  dynticks subsystem by forcing the context tracking on all
+	  CPUs in the system.
+
+	  Say Y only if you're working on the development of an
+	  architecture backend for the context tracking.
+
+	  Say N otherwise, this option brings an overhead that you
+	  don't want in production.
+
+
+config RCU_FANOUT
+	int "Tree-based hierarchical RCU fanout value"
+	range 2 64 if 64BIT
+	range 2 32 if !64BIT
+	depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT
+	default 64 if 64BIT
+	default 32 if !64BIT
+	help
+	  This option controls the fanout of hierarchical implementations
+	  of RCU, allowing RCU to work efficiently on machines with
+	  large numbers of CPUs.  This value must be at least the fourth
+	  root of NR_CPUS, which allows NR_CPUS to be insanely large.
+	  The default value of RCU_FANOUT should be used for production
+	  systems, but if you are stress-testing the RCU implementation
+	  itself, small RCU_FANOUT values allow you to test large-system
+	  code paths on small(er) systems.
+
+	  Select a specific number if testing RCU itself.
+	  Take the default if unsure.
+
+config RCU_FANOUT_LEAF
+	int "Tree-based hierarchical RCU leaf-level fanout value"
+	range 2 64 if 64BIT
+	range 2 32 if !64BIT
+	depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT
+	default 16
+	help
+	  This option controls the leaf-level fanout of hierarchical
+	  implementations of RCU, and allows trading off cache misses
+	  against lock contention.  Systems that synchronize their
+	  scheduling-clock interrupts for energy-efficiency reasons will
+	  want the default because the smaller leaf-level fanout keeps
+	  lock contention levels acceptably low.  Very large systems
+	  (hundreds or thousands of CPUs) will instead want to set this
+	  value to the maximum value possible in order to reduce the
+	  number of cache misses incurred during RCU's grace-period
+	  initialization.  These systems tend to run CPU-bound, and thus
+	  are not helped by synchronized interrupts, and thus tend to
+	  skew them, which reduces lock contention enough that large
+	  leaf-level fanouts work well.  That said, setting leaf-level
+	  fanout to a large number will likely cause problematic
+	  lock contention on the leaf-level rcu_node structures unless
+	  you boot with the skew_tick kernel parameter.
+
+	  Select a specific number if testing RCU itself.
+
+	  Select the maximum permissible value for large systems, but
+	  please understand that you may also need to set the skew_tick
+	  kernel boot parameter to avoid contention on the rcu_node
+	  structure's locks.
+
+	  Take the default if unsure.
+
+config RCU_FAST_NO_HZ
+	bool "Accelerate last non-dyntick-idle CPU's grace periods"
+	depends on NO_HZ_COMMON && SMP && RCU_EXPERT
+	default n
+	help
+	  This option permits CPUs to enter dynticks-idle state even if
+	  they have RCU callbacks queued, and prevents RCU from waking
+	  these CPUs up more than roughly once every four jiffies (by
+	  default, you can adjust this using the rcutree.rcu_idle_gp_delay
+	  parameter), thus improving energy efficiency.  On the other
+	  hand, this option increases the duration of RCU grace periods,
+	  for example, slowing down synchronize_rcu().
+
+	  Say Y if energy efficiency is critically important, and you
+	  	don't care about increased grace-period durations.
+
+	  Say N if you are unsure.
+
+config RCU_BOOST
+	bool "Enable RCU priority boosting"
+	depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
+	default n
+	help
+	  This option boosts the priority of preempted RCU readers that
+	  block the current preemptible RCU grace period for too long.
+	  This option also prevents heavy loads from blocking RCU
+	  callback invocation for all flavors of RCU.
+
+	  Say Y here if you are working with real-time apps or heavy loads
+	  Say N here if you are unsure.
+
+config RCU_BOOST_DELAY
+	int "Milliseconds to delay boosting after RCU grace-period start"
+	range 0 3000
+	depends on RCU_BOOST
+	default 500
+	help
+	  This option specifies the time to wait after the beginning of
+	  a given grace period before priority-boosting preempted RCU
+	  readers blocking that grace period.  Note that any RCU reader
+	  blocking an expedited RCU grace period is boosted immediately.
+
+	  Accept the default if unsure.
+
+config RCU_NOCB_CPU
+	bool "Offload RCU callback processing from boot-selected CPUs"
+	depends on TREE_RCU || PREEMPT_RCU
+	depends on RCU_EXPERT || NO_HZ_FULL
+	default n
+	help
+	  Use this option to reduce OS jitter for aggressive HPC or
+	  real-time workloads.	It can also be used to offload RCU
+	  callback invocation to energy-efficient CPUs in battery-powered
+	  asymmetric multiprocessors.
+
+	  This option offloads callback invocation from the set of
+	  CPUs specified at boot time by the rcu_nocbs parameter.
+	  For each such CPU, a kthread ("rcuox/N") will be created to
+	  invoke callbacks, where the "N" is the CPU being offloaded,
+	  and where the "x" is "b" for RCU-bh, "p" for RCU-preempt, and
+	  "s" for RCU-sched.  Nothing prevents this kthread from running
+	  on the specified CPUs, but (1) the kthreads may be preempted
+	  between each callback, and (2) affinity or cgroups can be used
+	  to force the kthreads to run on whatever set of CPUs is desired.
+
+	  Say Y here if you want to help to debug reduced OS jitter.
+	  Say N here if you are unsure.
+
+endmenu # "RCU Subsystem"
diff --git a/kernel/rcu/Kconfig.debug b/kernel/rcu/Kconfig.debug
new file mode 100644
index 0000000..0ec7d1d
--- /dev/null
+++ b/kernel/rcu/Kconfig.debug
@@ -0,0 +1,82 @@
+#
+# RCU-related debugging configuration options
+#
+
+menu "RCU Debugging"
+
+config PROVE_RCU
+	def_bool PROVE_LOCKING
+
+config TORTURE_TEST
+	tristate
+	default n
+
+config RCU_PERF_TEST
+	tristate "performance tests for RCU"
+	depends on DEBUG_KERNEL
+	select TORTURE_TEST
+	select SRCU
+	select TASKS_RCU
+	default n
+	help
+	  This option provides a kernel module that runs performance
+	  tests on the RCU infrastructure.  The kernel module may be built
+	  after the fact on the running kernel to be tested, if desired.
+
+	  Say Y here if you want RCU performance tests to be built into
+	  the kernel.
+	  Say M if you want the RCU performance tests to build as a module.
+	  Say N if you are unsure.
+
+config RCU_TORTURE_TEST
+	tristate "torture tests for RCU"
+	depends on DEBUG_KERNEL
+	select TORTURE_TEST
+	select SRCU
+	select TASKS_RCU
+	default n
+	help
+	  This option provides a kernel module that runs torture tests
+	  on the RCU infrastructure.  The kernel module may be built
+	  after the fact on the running kernel to be tested, if desired.
+
+	  Say Y here if you want RCU torture tests to be built into
+	  the kernel.
+	  Say M if you want the RCU torture tests to build as a module.
+	  Say N if you are unsure.
+
+config RCU_CPU_STALL_TIMEOUT
+	int "RCU CPU stall timeout in seconds"
+	depends on RCU_STALL_COMMON
+	range 3 300
+	default 21
+	help
+	  If a given RCU grace period extends more than the specified
+	  number of seconds, a CPU stall warning is printed.  If the
+	  RCU grace period persists, additional CPU stall warnings are
+	  printed at more widely spaced intervals.
+
+config RCU_TRACE
+	bool "Enable tracing for RCU"
+	depends on DEBUG_KERNEL
+	default y if TREE_RCU
+	select TRACE_CLOCK
+	help
+	  This option enables additional tracepoints for ftrace-style
+	  event tracing.
+
+	  Say Y here if you want to enable RCU tracing
+	  Say N if you are unsure.
+
+config RCU_EQS_DEBUG
+	bool "Provide debugging asserts for adding NO_HZ support to an arch"
+	depends on DEBUG_KERNEL
+	help
+	  This option provides consistency checks in RCU's handling of
+	  NO_HZ.  These checks have proven quite helpful in detecting
+	  bugs in arch-specific NO_HZ code.
+
+	  Say N here if you need ultimate kernel/user switch latencies
+	  Say Y if you are unsure
+
+endmenu # "RCU Debugging"
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index 23803c7..13c0fc8 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -3,13 +3,11 @@
 KCOV_INSTRUMENT := n
 
 obj-y += update.o sync.o
-obj-$(CONFIG_CLASSIC_SRCU) += srcu.o
 obj-$(CONFIG_TREE_SRCU) += srcutree.o
 obj-$(CONFIG_TINY_SRCU) += srcutiny.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
 obj-$(CONFIG_RCU_PERF_TEST) += rcuperf.o
 obj-$(CONFIG_TREE_RCU) += tree.o
 obj-$(CONFIG_PREEMPT_RCU) += tree.o
-obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
 obj-$(CONFIG_TINY_RCU) += tiny.o
 obj-$(CONFIG_RCU_NEED_SEGCBLIST) += rcu_segcblist.o
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 73e16ec4..808b8c8 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -212,6 +212,18 @@ int rcu_jiffies_till_stall_check(void);
  */
 #define TPS(x)  tracepoint_string(x)
 
+/*
+ * Dump the ftrace buffer, but only one time per callsite per boot.
+ */
+#define rcu_ftrace_dump(oops_dump_mode) \
+do { \
+	static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
+	\
+	if (!atomic_read(&___rfd_beenhere) && \
+	    !atomic_xchg(&___rfd_beenhere, 1)) \
+		ftrace_dump(oops_dump_mode); \
+} while (0)
+
 void rcu_early_boot_tests(void);
 void rcu_test_sync_prims(void);
 
@@ -291,6 +303,271 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 	     cpu <= rnp->grphi; \
 	     cpu = cpumask_next((cpu), cpu_possible_mask))
 
+/*
+ * Wrappers for the rcu_node::lock acquire and release.
+ *
+ * Because the rcu_nodes form a tree, the tree traversal locking will observe
+ * different lock values, this in turn means that an UNLOCK of one level
+ * followed by a LOCK of another level does not imply a full memory barrier;
+ * and most importantly transitivity is lost.
+ *
+ * In order to restore full ordering between tree levels, augment the regular
+ * lock acquire functions with smp_mb__after_unlock_lock().
+ *
+ * As ->lock of struct rcu_node is a __private field, therefore one should use
+ * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
+ */
+#define raw_spin_lock_rcu_node(p)					\
+do {									\
+	raw_spin_lock(&ACCESS_PRIVATE(p, lock));			\
+	smp_mb__after_unlock_lock();					\
+} while (0)
+
+#define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
+
+#define raw_spin_lock_irq_rcu_node(p)					\
+do {									\
+	raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));			\
+	smp_mb__after_unlock_lock();					\
+} while (0)
+
+#define raw_spin_unlock_irq_rcu_node(p)					\
+	raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
+
+#define raw_spin_lock_irqsave_rcu_node(p, flags)			\
+do {									\
+	raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
+	smp_mb__after_unlock_lock();					\
+} while (0)
+
+#define raw_spin_unlock_irqrestore_rcu_node(p, flags)			\
+	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)	\
+
+#define raw_spin_trylock_rcu_node(p)					\
+({									\
+	bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));	\
+									\
+	if (___locked)							\
+		smp_mb__after_unlock_lock();				\
+	___locked;							\
+})
+
 #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
 
+#ifdef CONFIG_TINY_RCU
+/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
+static inline bool rcu_gp_is_normal(void)  /* Internal RCU use. */
+{
+	return true;
+}
+static inline bool rcu_gp_is_expedited(void)  /* Internal RCU use. */
+{
+	return false;
+}
+
+static inline void rcu_expedite_gp(void)
+{
+}
+
+static inline void rcu_unexpedite_gp(void)
+{
+}
+#else /* #ifdef CONFIG_TINY_RCU */
+bool rcu_gp_is_normal(void);     /* Internal RCU use. */
+bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
+void rcu_expedite_gp(void);
+void rcu_unexpedite_gp(void);
+void rcupdate_announce_bootup_oddness(void);
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+#define RCU_SCHEDULER_INACTIVE	0
+#define RCU_SCHEDULER_INIT	1
+#define RCU_SCHEDULER_RUNNING	2
+
+#ifdef CONFIG_TINY_RCU
+static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
+#else /* #ifdef CONFIG_TINY_RCU */
+void rcu_request_urgent_qs_task(struct task_struct *t);
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+enum rcutorture_type {
+	RCU_FLAVOR,
+	RCU_BH_FLAVOR,
+	RCU_SCHED_FLAVOR,
+	RCU_TASKS_FLAVOR,
+	SRCU_FLAVOR,
+	INVALID_RCU_FLAVOR
+};
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
+			    unsigned long *gpnum, unsigned long *completed);
+void rcutorture_record_test_transition(void);
+void rcutorture_record_progress(unsigned long vernum);
+void do_trace_rcu_torture_read(const char *rcutorturename,
+			       struct rcu_head *rhp,
+			       unsigned long secs,
+			       unsigned long c_old,
+			       unsigned long c);
+#else
+static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
+					  int *flags,
+					  unsigned long *gpnum,
+					  unsigned long *completed)
+{
+	*flags = 0;
+	*gpnum = 0;
+	*completed = 0;
+}
+static inline void rcutorture_record_test_transition(void)
+{
+}
+static inline void rcutorture_record_progress(unsigned long vernum)
+{
+}
+#ifdef CONFIG_RCU_TRACE
+void do_trace_rcu_torture_read(const char *rcutorturename,
+			       struct rcu_head *rhp,
+			       unsigned long secs,
+			       unsigned long c_old,
+			       unsigned long c);
+#else
+#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
+	do { } while (0)
+#endif
+#endif
+
+#ifdef CONFIG_TINY_SRCU
+
+static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
+					   struct srcu_struct *sp, int *flags,
+					   unsigned long *gpnum,
+					   unsigned long *completed)
+{
+	if (test_type != SRCU_FLAVOR)
+		return;
+	*flags = 0;
+	*completed = sp->srcu_idx;
+	*gpnum = *completed;
+}
+
+#elif defined(CONFIG_TREE_SRCU)
+
+void srcutorture_get_gp_data(enum rcutorture_type test_type,
+			     struct srcu_struct *sp, int *flags,
+			     unsigned long *gpnum, unsigned long *completed);
+
+#endif
+
+#ifdef CONFIG_TINY_RCU
+
+/*
+ * Return the number of grace periods started.
+ */
+static inline unsigned long rcu_batches_started(void)
+{
+	return 0;
+}
+
+/*
+ * Return the number of bottom-half grace periods started.
+ */
+static inline unsigned long rcu_batches_started_bh(void)
+{
+	return 0;
+}
+
+/*
+ * Return the number of sched grace periods started.
+ */
+static inline unsigned long rcu_batches_started_sched(void)
+{
+	return 0;
+}
+
+/*
+ * Return the number of grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed(void)
+{
+	return 0;
+}
+
+/*
+ * Return the number of bottom-half grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_bh(void)
+{
+	return 0;
+}
+
+/*
+ * Return the number of sched grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_sched(void)
+{
+	return 0;
+}
+
+/*
+ * Return the number of expedited grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed(void)
+{
+	return 0;
+}
+
+/*
+ * Return the number of expedited sched grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed_sched(void)
+{
+	return 0;
+}
+
+static inline unsigned long srcu_batches_completed(struct srcu_struct *sp)
+{
+	return 0;
+}
+
+static inline void rcu_force_quiescent_state(void)
+{
+}
+
+static inline void rcu_bh_force_quiescent_state(void)
+{
+}
+
+static inline void rcu_sched_force_quiescent_state(void)
+{
+}
+
+static inline void show_rcu_gp_kthreads(void)
+{
+}
+
+#else /* #ifdef CONFIG_TINY_RCU */
+extern unsigned long rcutorture_testseq;
+extern unsigned long rcutorture_vernum;
+unsigned long rcu_batches_started(void);
+unsigned long rcu_batches_started_bh(void);
+unsigned long rcu_batches_started_sched(void);
+unsigned long rcu_batches_completed(void);
+unsigned long rcu_batches_completed_bh(void);
+unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_exp_batches_completed(void);
+unsigned long rcu_exp_batches_completed_sched(void);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
+void show_rcu_gp_kthreads(void);
+void rcu_force_quiescent_state(void);
+void rcu_bh_force_quiescent_state(void);
+void rcu_sched_force_quiescent_state(void);
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+#ifdef CONFIG_RCU_NOCB_CPU
+bool rcu_is_nocb_cpu(int cpu);
+#else
+static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
+#endif
+
 #endif /* __LINUX_RCU_H */
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index a4a86fb..3cc1811 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -48,6 +48,8 @@
 #include <linux/torture.h>
 #include <linux/vmalloc.h>
 
+#include "rcu.h"
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
 
@@ -59,12 +61,16 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
 #define VERBOSE_PERFOUT_ERRSTRING(s) \
 	do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
 
+torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
+torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
-torture_param(int, nreaders, -1, "Number of RCU reader threads");
+torture_param(int, nreaders, 0, "Number of RCU reader threads");
 torture_param(int, nwriters, -1, "Number of RCU updater threads");
-torture_param(bool, shutdown, false, "Shutdown at end of performance tests.");
+torture_param(bool, shutdown, !IS_ENABLED(MODULE),
+	      "Shutdown at end of performance tests.");
 torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
+torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
 
 static char *perf_type = "rcu";
 module_param(perf_type, charp, 0444);
@@ -86,13 +92,16 @@ static u64 t_rcu_perf_writer_started;
 static u64 t_rcu_perf_writer_finished;
 static unsigned long b_rcu_perf_writer_started;
 static unsigned long b_rcu_perf_writer_finished;
+static DEFINE_PER_CPU(atomic_t, n_async_inflight);
 
 static int rcu_perf_writer_state;
 #define RTWS_INIT		0
-#define RTWS_EXP_SYNC		1
-#define RTWS_SYNC		2
-#define RTWS_IDLE		2
-#define RTWS_STOPPING		3
+#define RTWS_ASYNC		1
+#define RTWS_BARRIER		2
+#define RTWS_EXP_SYNC		3
+#define RTWS_SYNC		4
+#define RTWS_IDLE		5
+#define RTWS_STOPPING		6
 
 #define MAX_MEAS 10000
 #define MIN_MEAS 100
@@ -114,6 +123,8 @@ struct rcu_perf_ops {
 	unsigned long (*started)(void);
 	unsigned long (*completed)(void);
 	unsigned long (*exp_completed)(void);
+	void (*async)(struct rcu_head *head, rcu_callback_t func);
+	void (*gp_barrier)(void);
 	void (*sync)(void);
 	void (*exp_sync)(void);
 	const char *name;
@@ -153,6 +164,8 @@ static struct rcu_perf_ops rcu_ops = {
 	.started	= rcu_batches_started,
 	.completed	= rcu_batches_completed,
 	.exp_completed	= rcu_exp_batches_completed,
+	.async		= call_rcu,
+	.gp_barrier	= rcu_barrier,
 	.sync		= synchronize_rcu,
 	.exp_sync	= synchronize_rcu_expedited,
 	.name		= "rcu"
@@ -181,6 +194,8 @@ static struct rcu_perf_ops rcu_bh_ops = {
 	.started	= rcu_batches_started_bh,
 	.completed	= rcu_batches_completed_bh,
 	.exp_completed	= rcu_exp_batches_completed_sched,
+	.async		= call_rcu_bh,
+	.gp_barrier	= rcu_barrier_bh,
 	.sync		= synchronize_rcu_bh,
 	.exp_sync	= synchronize_rcu_bh_expedited,
 	.name		= "rcu_bh"
@@ -208,6 +223,16 @@ static unsigned long srcu_perf_completed(void)
 	return srcu_batches_completed(srcu_ctlp);
 }
 
+static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
+{
+	call_srcu(srcu_ctlp, head, func);
+}
+
+static void srcu_rcu_barrier(void)
+{
+	srcu_barrier(srcu_ctlp);
+}
+
 static void srcu_perf_synchronize(void)
 {
 	synchronize_srcu(srcu_ctlp);
@@ -226,11 +251,42 @@ static struct rcu_perf_ops srcu_ops = {
 	.started	= NULL,
 	.completed	= srcu_perf_completed,
 	.exp_completed	= srcu_perf_completed,
+	.async		= srcu_call_rcu,
+	.gp_barrier	= srcu_rcu_barrier,
 	.sync		= srcu_perf_synchronize,
 	.exp_sync	= srcu_perf_synchronize_expedited,
 	.name		= "srcu"
 };
 
+static struct srcu_struct srcud;
+
+static void srcu_sync_perf_init(void)
+{
+	srcu_ctlp = &srcud;
+	init_srcu_struct(srcu_ctlp);
+}
+
+static void srcu_sync_perf_cleanup(void)
+{
+	cleanup_srcu_struct(srcu_ctlp);
+}
+
+static struct rcu_perf_ops srcud_ops = {
+	.ptype		= SRCU_FLAVOR,
+	.init		= srcu_sync_perf_init,
+	.cleanup	= srcu_sync_perf_cleanup,
+	.readlock	= srcu_perf_read_lock,
+	.readunlock	= srcu_perf_read_unlock,
+	.started	= NULL,
+	.completed	= srcu_perf_completed,
+	.exp_completed	= srcu_perf_completed,
+	.async		= srcu_call_rcu,
+	.gp_barrier	= srcu_rcu_barrier,
+	.sync		= srcu_perf_synchronize,
+	.exp_sync	= srcu_perf_synchronize_expedited,
+	.name		= "srcud"
+};
+
 /*
  * Definitions for sched perf testing.
  */
@@ -254,6 +310,8 @@ static struct rcu_perf_ops sched_ops = {
 	.started	= rcu_batches_started_sched,
 	.completed	= rcu_batches_completed_sched,
 	.exp_completed	= rcu_exp_batches_completed_sched,
+	.async		= call_rcu_sched,
+	.gp_barrier	= rcu_barrier_sched,
 	.sync		= synchronize_sched,
 	.exp_sync	= synchronize_sched_expedited,
 	.name		= "sched"
@@ -281,6 +339,8 @@ static struct rcu_perf_ops tasks_ops = {
 	.readunlock	= tasks_perf_read_unlock,
 	.started	= rcu_no_completed,
 	.completed	= rcu_no_completed,
+	.async		= call_rcu_tasks,
+	.gp_barrier	= rcu_barrier_tasks,
 	.sync		= synchronize_rcu_tasks,
 	.exp_sync	= synchronize_rcu_tasks,
 	.name		= "tasks"
@@ -344,6 +404,15 @@ rcu_perf_reader(void *arg)
 }
 
 /*
+ * Callback function for asynchronous grace periods from rcu_perf_writer().
+ */
+static void rcu_perf_async_cb(struct rcu_head *rhp)
+{
+	atomic_dec(this_cpu_ptr(&n_async_inflight));
+	kfree(rhp);
+}
+
+/*
  * RCU perf writer kthread.  Repeatedly does a grace period.
  */
 static int
@@ -352,6 +421,7 @@ rcu_perf_writer(void *arg)
 	int i = 0;
 	int i_max;
 	long me = (long)arg;
+	struct rcu_head *rhp = NULL;
 	struct sched_param sp;
 	bool started = false, done = false, alldone = false;
 	u64 t;
@@ -380,9 +450,27 @@ rcu_perf_writer(void *arg)
 	}
 
 	do {
+		if (writer_holdoff)
+			udelay(writer_holdoff);
 		wdp = &wdpp[i];
 		*wdp = ktime_get_mono_fast_ns();
-		if (gp_exp) {
+		if (gp_async) {
+retry:
+			if (!rhp)
+				rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
+			if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
+				rcu_perf_writer_state = RTWS_ASYNC;
+				atomic_inc(this_cpu_ptr(&n_async_inflight));
+				cur_ops->async(rhp, rcu_perf_async_cb);
+				rhp = NULL;
+			} else if (!kthread_should_stop()) {
+				rcu_perf_writer_state = RTWS_BARRIER;
+				cur_ops->gp_barrier();
+				goto retry;
+			} else {
+				kfree(rhp); /* Because we are stopping. */
+			}
+		} else if (gp_exp) {
 			rcu_perf_writer_state = RTWS_EXP_SYNC;
 			cur_ops->exp_sync();
 		} else {
@@ -429,6 +517,10 @@ rcu_perf_writer(void *arg)
 			i++;
 		rcu_perf_wait_shutdown();
 	} while (!torture_must_stop());
+	if (gp_async) {
+		rcu_perf_writer_state = RTWS_BARRIER;
+		cur_ops->gp_barrier();
+	}
 	rcu_perf_writer_state = RTWS_STOPPING;
 	writer_n_durations[me] = i_max;
 	torture_kthread_stopping("rcu_perf_writer");
@@ -452,6 +544,17 @@ rcu_perf_cleanup(void)
 	u64 *wdp;
 	u64 *wdpp;
 
+	/*
+	 * Would like warning at start, but everything is expedited
+	 * during the mid-boot phase, so have to wait till the end.
+	 */
+	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
+		VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
+	if (rcu_gp_is_normal() && gp_exp)
+		VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
+	if (gp_exp && gp_async)
+		VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
+
 	if (torture_cleanup_begin())
 		return;
 
@@ -554,7 +657,7 @@ rcu_perf_init(void)
 	long i;
 	int firsterr = 0;
 	static struct rcu_perf_ops *perf_ops[] = {
-		&rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
+		&rcu_ops, &rcu_bh_ops, &srcu_ops, &srcud_ops, &sched_ops,
 		RCUPERF_TASKS_OPS
 	};
 
@@ -624,16 +727,6 @@ rcu_perf_init(void)
 		firsterr = -ENOMEM;
 		goto unwind;
 	}
-	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp) {
-		VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
-		firsterr = -EINVAL;
-		goto unwind;
-	}
-	if (rcu_gp_is_normal() && gp_exp) {
-		VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
-		firsterr = -EINVAL;
-		goto unwind;
-	}
 	for (i = 0; i < nrealwriters; i++) {
 		writer_durations[i] =
 			kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index ae6e574..b8f7f8c 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -52,6 +52,8 @@
 #include <linux/torture.h>
 #include <linux/vmalloc.h>
 
+#include "rcu.h"
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
 
@@ -562,31 +564,19 @@ static void srcu_torture_stats(void)
 	int __maybe_unused cpu;
 	int idx;
 
-#if defined(CONFIG_TREE_SRCU) || defined(CONFIG_CLASSIC_SRCU)
 #ifdef CONFIG_TREE_SRCU
 	idx = srcu_ctlp->srcu_idx & 0x1;
-#else /* #ifdef CONFIG_TREE_SRCU */
-	idx = srcu_ctlp->completed & 0x1;
-#endif /* #else #ifdef CONFIG_TREE_SRCU */
 	pr_alert("%s%s Tree SRCU per-CPU(idx=%d):",
 		 torture_type, TORTURE_FLAG, idx);
 	for_each_possible_cpu(cpu) {
 		unsigned long l0, l1;
 		unsigned long u0, u1;
 		long c0, c1;
-#ifdef CONFIG_TREE_SRCU
 		struct srcu_data *counts;
 
 		counts = per_cpu_ptr(srcu_ctlp->sda, cpu);
 		u0 = counts->srcu_unlock_count[!idx];
 		u1 = counts->srcu_unlock_count[idx];
-#else /* #ifdef CONFIG_TREE_SRCU */
-		struct srcu_array *counts;
-
-		counts = per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu);
-		u0 = counts->unlock_count[!idx];
-		u1 = counts->unlock_count[idx];
-#endif /* #else #ifdef CONFIG_TREE_SRCU */
 
 		/*
 		 * Make sure that a lock is always counted if the corresponding
@@ -594,13 +584,8 @@ static void srcu_torture_stats(void)
 		 */
 		smp_rmb();
 
-#ifdef CONFIG_TREE_SRCU
 		l0 = counts->srcu_lock_count[!idx];
 		l1 = counts->srcu_lock_count[idx];
-#else /* #ifdef CONFIG_TREE_SRCU */
-		l0 = counts->lock_count[!idx];
-		l1 = counts->lock_count[idx];
-#endif /* #else #ifdef CONFIG_TREE_SRCU */
 
 		c0 = l0 - u0;
 		c1 = l1 - u1;
@@ -609,7 +594,7 @@ static void srcu_torture_stats(void)
 	pr_cont("\n");
 #elif defined(CONFIG_TINY_SRCU)
 	idx = READ_ONCE(srcu_ctlp->srcu_idx) & 0x1;
-	pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%d,%d)\n",
+	pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
 		 torture_type, TORTURE_FLAG, idx,
 		 READ_ONCE(srcu_ctlp->srcu_lock_nesting[!idx]),
 		 READ_ONCE(srcu_ctlp->srcu_lock_nesting[idx]));
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
deleted file mode 100644
index 584d8a9..0000000
--- a/kernel/rcu/srcu.c
+++ /dev/null
@@ -1,662 +0,0 @@
-/*
- * Sleepable Read-Copy Update mechanism for mutual exclusion.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * Copyright (C) IBM Corporation, 2006
- * Copyright (C) Fujitsu, 2012
- *
- * Author: Paul McKenney <paulmck@us.ibm.com>
- *	   Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- *		Documentation/RCU/ *.txt
- *
- */
-
-#include <linux/export.h>
-#include <linux/mutex.h>
-#include <linux/percpu.h>
-#include <linux/preempt.h>
-#include <linux/rcupdate_wait.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/delay.h>
-#include <linux/srcu.h>
-
-#include "rcu.h"
-
-/*
- * Initialize an rcu_batch structure to empty.
- */
-static inline void rcu_batch_init(struct rcu_batch *b)
-{
-	b->head = NULL;
-	b->tail = &b->head;
-}
-
-/*
- * Enqueue a callback onto the tail of the specified rcu_batch structure.
- */
-static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
-{
-	*b->tail = head;
-	b->tail = &head->next;
-}
-
-/*
- * Is the specified rcu_batch structure empty?
- */
-static inline bool rcu_batch_empty(struct rcu_batch *b)
-{
-	return b->tail == &b->head;
-}
-
-/*
- * Remove the callback at the head of the specified rcu_batch structure
- * and return a pointer to it, or return NULL if the structure is empty.
- */
-static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
-{
-	struct rcu_head *head;
-
-	if (rcu_batch_empty(b))
-		return NULL;
-
-	head = b->head;
-	b->head = head->next;
-	if (b->tail == &head->next)
-		rcu_batch_init(b);
-
-	return head;
-}
-
-/*
- * Move all callbacks from the rcu_batch structure specified by "from" to
- * the structure specified by "to".
- */
-static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
-{
-	if (!rcu_batch_empty(from)) {
-		*to->tail = from->head;
-		to->tail = from->tail;
-		rcu_batch_init(from);
-	}
-}
-
-static int init_srcu_struct_fields(struct srcu_struct *sp)
-{
-	sp->completed = 0;
-	spin_lock_init(&sp->queue_lock);
-	sp->running = false;
-	rcu_batch_init(&sp->batch_queue);
-	rcu_batch_init(&sp->batch_check0);
-	rcu_batch_init(&sp->batch_check1);
-	rcu_batch_init(&sp->batch_done);
-	INIT_DELAYED_WORK(&sp->work, process_srcu);
-	sp->per_cpu_ref = alloc_percpu(struct srcu_array);
-	return sp->per_cpu_ref ? 0 : -ENOMEM;
-}
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
-		       struct lock_class_key *key)
-{
-	/* Don't re-initialize a lock while it is held. */
-	debug_check_no_locks_freed((void *)sp, sizeof(*sp));
-	lockdep_init_map(&sp->dep_map, name, key, 0);
-	return init_srcu_struct_fields(sp);
-}
-EXPORT_SYMBOL_GPL(__init_srcu_struct);
-
-#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-
-/**
- * init_srcu_struct - initialize a sleep-RCU structure
- * @sp: structure to initialize.
- *
- * Must invoke this on a given srcu_struct before passing that srcu_struct
- * to any other function.  Each srcu_struct represents a separate domain
- * of SRCU protection.
- */
-int init_srcu_struct(struct srcu_struct *sp)
-{
-	return init_srcu_struct_fields(sp);
-}
-EXPORT_SYMBOL_GPL(init_srcu_struct);
-
-#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-
-/*
- * Returns approximate total of the readers' ->lock_count[] values for the
- * rank of per-CPU counters specified by idx.
- */
-static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
-{
-	int cpu;
-	unsigned long sum = 0;
-
-	for_each_possible_cpu(cpu) {
-		struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
-
-		sum += READ_ONCE(cpuc->lock_count[idx]);
-	}
-	return sum;
-}
-
-/*
- * Returns approximate total of the readers' ->unlock_count[] values for the
- * rank of per-CPU counters specified by idx.
- */
-static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
-{
-	int cpu;
-	unsigned long sum = 0;
-
-	for_each_possible_cpu(cpu) {
-		struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
-
-		sum += READ_ONCE(cpuc->unlock_count[idx]);
-	}
-	return sum;
-}
-
-/*
- * Return true if the number of pre-existing readers is determined to
- * be zero.
- */
-static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
-{
-	unsigned long unlocks;
-
-	unlocks = srcu_readers_unlock_idx(sp, idx);
-
-	/*
-	 * Make sure that a lock is always counted if the corresponding unlock
-	 * is counted. Needs to be a smp_mb() as the read side may contain a
-	 * read from a variable that is written to before the synchronize_srcu()
-	 * in the write side. In this case smp_mb()s A and B act like the store
-	 * buffering pattern.
-	 *
-	 * This smp_mb() also pairs with smp_mb() C to prevent accesses after the
-	 * synchronize_srcu() from being executed before the grace period ends.
-	 */
-	smp_mb(); /* A */
-
-	/*
-	 * If the locks are the same as the unlocks, then there must have
-	 * been no readers on this index at some time in between. This does not
-	 * mean that there are no more readers, as one could have read the
-	 * current index but not have incremented the lock counter yet.
-	 *
-	 * Possible bug: There is no guarantee that there haven't been ULONG_MAX
-	 * increments of ->lock_count[] since the unlocks were counted, meaning
-	 * that this could return true even if there are still active readers.
-	 * Since there are no memory barriers around srcu_flip(), the CPU is not
-	 * required to increment ->completed before running
-	 * srcu_readers_unlock_idx(), which means that there could be an
-	 * arbitrarily large number of critical sections that execute after
-	 * srcu_readers_unlock_idx() but use the old value of ->completed.
-	 */
-	return srcu_readers_lock_idx(sp, idx) == unlocks;
-}
-
-/**
- * srcu_readers_active - returns true if there are readers. and false
- *                       otherwise
- * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
- *
- * Note that this is not an atomic primitive, and can therefore suffer
- * severe errors when invoked on an active srcu_struct.  That said, it
- * can be useful as an error check at cleanup time.
- */
-static bool srcu_readers_active(struct srcu_struct *sp)
-{
-	int cpu;
-	unsigned long sum = 0;
-
-	for_each_possible_cpu(cpu) {
-		struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
-
-		sum += READ_ONCE(cpuc->lock_count[0]);
-		sum += READ_ONCE(cpuc->lock_count[1]);
-		sum -= READ_ONCE(cpuc->unlock_count[0]);
-		sum -= READ_ONCE(cpuc->unlock_count[1]);
-	}
-	return sum;
-}
-
-/**
- * cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @sp: structure to clean up.
- *
- * Must invoke this only after you are finished using a given srcu_struct
- * that was initialized via init_srcu_struct().  This code does some
- * probabalistic checking, spotting late uses of srcu_read_lock(),
- * synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu().
- * If any such late uses are detected, the per-CPU memory associated with
- * the srcu_struct is simply leaked and WARN_ON() is invoked.  If the
- * caller frees the srcu_struct itself, a use-after-free crash will likely
- * ensue, but at least there will be a warning printed.
- */
-void cleanup_srcu_struct(struct srcu_struct *sp)
-{
-	if (WARN_ON(srcu_readers_active(sp)))
-		return; /* Leakage unless caller handles error. */
-	free_percpu(sp->per_cpu_ref);
-	sp->per_cpu_ref = NULL;
-}
-EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
-
-/*
- * Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct.  Must be called from process context.
- * Returns an index that must be passed to the matching srcu_read_unlock().
- */
-int __srcu_read_lock(struct srcu_struct *sp)
-{
-	int idx;
-
-	idx = READ_ONCE(sp->completed) & 0x1;
-	__this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
-	smp_mb(); /* B */  /* Avoid leaking the critical section. */
-	return idx;
-}
-EXPORT_SYMBOL_GPL(__srcu_read_lock);
-
-/*
- * Removes the count for the old reader from the appropriate per-CPU
- * element of the srcu_struct.  Note that this may well be a different
- * CPU than that which was incremented by the corresponding srcu_read_lock().
- * Must be called from process context.
- */
-void __srcu_read_unlock(struct srcu_struct *sp, int idx)
-{
-	smp_mb(); /* C */  /* Avoid leaking the critical section. */
-	this_cpu_inc(sp->per_cpu_ref->unlock_count[idx]);
-}
-EXPORT_SYMBOL_GPL(__srcu_read_unlock);
-
-/*
- * We use an adaptive strategy for synchronize_srcu() and especially for
- * synchronize_srcu_expedited().  We spin for a fixed time period
- * (defined below) to allow SRCU readers to exit their read-side critical
- * sections.  If there are still some readers after 10 microseconds,
- * we repeatedly block for 1-millisecond time periods.  This approach
- * has done well in testing, so there is no need for a config parameter.
- */
-#define SRCU_RETRY_CHECK_DELAY		5
-#define SYNCHRONIZE_SRCU_TRYCOUNT	2
-#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT	12
-
-/*
- * @@@ Wait until all pre-existing readers complete.  Such readers
- * will have used the index specified by "idx".
- * the caller should ensures the ->completed is not changed while checking
- * and idx = (->completed & 1) ^ 1
- */
-static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
-{
-	for (;;) {
-		if (srcu_readers_active_idx_check(sp, idx))
-			return true;
-		if (--trycount <= 0)
-			return false;
-		udelay(SRCU_RETRY_CHECK_DELAY);
-	}
-}
-
-/*
- * Increment the ->completed counter so that future SRCU readers will
- * use the other rank of the ->(un)lock_count[] arrays.  This allows
- * us to wait for pre-existing readers in a starvation-free manner.
- */
-static void srcu_flip(struct srcu_struct *sp)
-{
-	WRITE_ONCE(sp->completed, sp->completed + 1);
-
-	/*
-	 * Ensure that if the updater misses an __srcu_read_unlock()
-	 * increment, that task's next __srcu_read_lock() will see the
-	 * above counter update.  Note that both this memory barrier
-	 * and the one in srcu_readers_active_idx_check() provide the
-	 * guarantee for __srcu_read_lock().
-	 */
-	smp_mb(); /* D */  /* Pairs with C. */
-}
-
-/*
- * Enqueue an SRCU callback on the specified srcu_struct structure,
- * initiating grace-period processing if it is not already running.
- *
- * Note that all CPUs must agree that the grace period extended beyond
- * all pre-existing SRCU read-side critical section.  On systems with
- * more than one CPU, this means that when "func()" is invoked, each CPU
- * is guaranteed to have executed a full memory barrier since the end of
- * its last corresponding SRCU read-side critical section whose beginning
- * preceded the call to call_rcu().  It also means that each CPU executing
- * an SRCU read-side critical section that continues beyond the start of
- * "func()" must have executed a memory barrier after the call_rcu()
- * but before the beginning of that SRCU read-side critical section.
- * Note that these guarantees include CPUs that are offline, idle, or
- * executing in user mode, as well as CPUs that are executing in the kernel.
- *
- * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
- * resulting SRCU callback function "func()", then both CPU A and CPU
- * B are guaranteed to execute a full memory barrier during the time
- * interval between the call to call_rcu() and the invocation of "func()".
- * This guarantee applies even if CPU A and CPU B are the same CPU (but
- * again only if the system has more than one CPU).
- *
- * Of course, these guarantees apply only for invocations of call_srcu(),
- * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
- * srcu_struct structure.
- */
-void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
-	       rcu_callback_t func)
-{
-	unsigned long flags;
-
-	head->next = NULL;
-	head->func = func;
-	spin_lock_irqsave(&sp->queue_lock, flags);
-	smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
-	rcu_batch_queue(&sp->batch_queue, head);
-	if (!sp->running) {
-		sp->running = true;
-		queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
-	}
-	spin_unlock_irqrestore(&sp->queue_lock, flags);
-}
-EXPORT_SYMBOL_GPL(call_srcu);
-
-static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
-static void srcu_reschedule(struct srcu_struct *sp);
-
-/*
- * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
- */
-static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
-{
-	struct rcu_synchronize rcu;
-	struct rcu_head *head = &rcu.head;
-	bool done = false;
-
-	RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
-			 lock_is_held(&rcu_bh_lock_map) ||
-			 lock_is_held(&rcu_lock_map) ||
-			 lock_is_held(&rcu_sched_lock_map),
-			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
-
-	might_sleep();
-	init_completion(&rcu.completion);
-
-	head->next = NULL;
-	head->func = wakeme_after_rcu;
-	spin_lock_irq(&sp->queue_lock);
-	smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
-	if (!sp->running) {
-		/* steal the processing owner */
-		sp->running = true;
-		rcu_batch_queue(&sp->batch_check0, head);
-		spin_unlock_irq(&sp->queue_lock);
-
-		srcu_advance_batches(sp, trycount);
-		if (!rcu_batch_empty(&sp->batch_done)) {
-			BUG_ON(sp->batch_done.head != head);
-			rcu_batch_dequeue(&sp->batch_done);
-			done = true;
-		}
-		/* give the processing owner to work_struct */
-		srcu_reschedule(sp);
-	} else {
-		rcu_batch_queue(&sp->batch_queue, head);
-		spin_unlock_irq(&sp->queue_lock);
-	}
-
-	if (!done) {
-		wait_for_completion(&rcu.completion);
-		smp_mb(); /* Caller's later accesses after GP. */
-	}
-
-}
-
-/**
- * synchronize_srcu - wait for prior SRCU read-side critical-section completion
- * @sp: srcu_struct with which to synchronize.
- *
- * Wait for the count to drain to zero of both indexes. To avoid the
- * possible starvation of synchronize_srcu(), it waits for the count of
- * the index=((->completed & 1) ^ 1) to drain to zero at first,
- * and then flip the completed and wait for the count of the other index.
- *
- * Can block; must be called from process context.
- *
- * Note that it is illegal to call synchronize_srcu() from the corresponding
- * SRCU read-side critical section; doing so will result in deadlock.
- * However, it is perfectly legal to call synchronize_srcu() on one
- * srcu_struct from some other srcu_struct's read-side critical section,
- * as long as the resulting graph of srcu_structs is acyclic.
- *
- * There are memory-ordering constraints implied by synchronize_srcu().
- * On systems with more than one CPU, when synchronize_srcu() returns,
- * each CPU is guaranteed to have executed a full memory barrier since
- * the end of its last corresponding SRCU-sched read-side critical section
- * whose beginning preceded the call to synchronize_srcu().  In addition,
- * each CPU having an SRCU read-side critical section that extends beyond
- * the return from synchronize_srcu() is guaranteed to have executed a
- * full memory barrier after the beginning of synchronize_srcu() and before
- * the beginning of that SRCU read-side critical section.  Note that these
- * guarantees include CPUs that are offline, idle, or executing in user mode,
- * as well as CPUs that are executing in the kernel.
- *
- * Furthermore, if CPU A invoked synchronize_srcu(), which returned
- * to its caller on CPU B, then both CPU A and CPU B are guaranteed
- * to have executed a full memory barrier during the execution of
- * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
- * are the same CPU, but again only if the system has more than one CPU.
- *
- * Of course, these memory-ordering guarantees apply only when
- * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
- * passed the same srcu_struct structure.
- */
-void synchronize_srcu(struct srcu_struct *sp)
-{
-	__synchronize_srcu(sp, (rcu_gp_is_expedited() && !rcu_gp_is_normal())
-			   ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
-			   : SYNCHRONIZE_SRCU_TRYCOUNT);
-}
-EXPORT_SYMBOL_GPL(synchronize_srcu);
-
-/**
- * synchronize_srcu_expedited - Brute-force SRCU grace period
- * @sp: srcu_struct with which to synchronize.
- *
- * Wait for an SRCU grace period to elapse, but be more aggressive about
- * spinning rather than blocking when waiting.
- *
- * Note that synchronize_srcu_expedited() has the same deadlock and
- * memory-ordering properties as does synchronize_srcu().
- */
-void synchronize_srcu_expedited(struct srcu_struct *sp)
-{
-	__synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
-}
-EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
-
-/**
- * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
- * @sp: srcu_struct on which to wait for in-flight callbacks.
- */
-void srcu_barrier(struct srcu_struct *sp)
-{
-	synchronize_srcu(sp);
-}
-EXPORT_SYMBOL_GPL(srcu_barrier);
-
-/**
- * srcu_batches_completed - return batches completed.
- * @sp: srcu_struct on which to report batch completion.
- *
- * Report the number of batches, correlated with, but not necessarily
- * precisely the same as, the number of grace periods that have elapsed.
- */
-unsigned long srcu_batches_completed(struct srcu_struct *sp)
-{
-	return sp->completed;
-}
-EXPORT_SYMBOL_GPL(srcu_batches_completed);
-
-#define SRCU_CALLBACK_BATCH	10
-#define SRCU_INTERVAL		1
-
-/*
- * Move any new SRCU callbacks to the first stage of the SRCU grace
- * period pipeline.
- */
-static void srcu_collect_new(struct srcu_struct *sp)
-{
-	if (!rcu_batch_empty(&sp->batch_queue)) {
-		spin_lock_irq(&sp->queue_lock);
-		rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
-		spin_unlock_irq(&sp->queue_lock);
-	}
-}
-
-/*
- * Core SRCU state machine.  Advance callbacks from ->batch_check0 to
- * ->batch_check1 and then to ->batch_done as readers drain.
- */
-static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
-{
-	int idx = 1 ^ (sp->completed & 1);
-
-	/*
-	 * Because readers might be delayed for an extended period after
-	 * fetching ->completed for their index, at any point in time there
-	 * might well be readers using both idx=0 and idx=1.  We therefore
-	 * need to wait for readers to clear from both index values before
-	 * invoking a callback.
-	 */
-
-	if (rcu_batch_empty(&sp->batch_check0) &&
-	    rcu_batch_empty(&sp->batch_check1))
-		return; /* no callbacks need to be advanced */
-
-	if (!try_check_zero(sp, idx, trycount))
-		return; /* failed to advance, will try after SRCU_INTERVAL */
-
-	/*
-	 * The callbacks in ->batch_check1 have already done with their
-	 * first zero check and flip back when they were enqueued on
-	 * ->batch_check0 in a previous invocation of srcu_advance_batches().
-	 * (Presumably try_check_zero() returned false during that
-	 * invocation, leaving the callbacks stranded on ->batch_check1.)
-	 * They are therefore ready to invoke, so move them to ->batch_done.
-	 */
-	rcu_batch_move(&sp->batch_done, &sp->batch_check1);
-
-	if (rcu_batch_empty(&sp->batch_check0))
-		return; /* no callbacks need to be advanced */
-	srcu_flip(sp);
-
-	/*
-	 * The callbacks in ->batch_check0 just finished their
-	 * first check zero and flip, so move them to ->batch_check1
-	 * for future checking on the other idx.
-	 */
-	rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
-
-	/*
-	 * SRCU read-side critical sections are normally short, so check
-	 * at least twice in quick succession after a flip.
-	 */
-	trycount = trycount < 2 ? 2 : trycount;
-	if (!try_check_zero(sp, idx^1, trycount))
-		return; /* failed to advance, will try after SRCU_INTERVAL */
-
-	/*
-	 * The callbacks in ->batch_check1 have now waited for all
-	 * pre-existing readers using both idx values.  They are therefore
-	 * ready to invoke, so move them to ->batch_done.
-	 */
-	rcu_batch_move(&sp->batch_done, &sp->batch_check1);
-}
-
-/*
- * Invoke a limited number of SRCU callbacks that have passed through
- * their grace period.  If there are more to do, SRCU will reschedule
- * the workqueue.  Note that needed memory barriers have been executed
- * in this task's context by srcu_readers_active_idx_check().
- */
-static void srcu_invoke_callbacks(struct srcu_struct *sp)
-{
-	int i;
-	struct rcu_head *head;
-
-	for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
-		head = rcu_batch_dequeue(&sp->batch_done);
-		if (!head)
-			break;
-		local_bh_disable();
-		head->func(head);
-		local_bh_enable();
-	}
-}
-
-/*
- * Finished one round of SRCU grace period.  Start another if there are
- * more SRCU callbacks queued, otherwise put SRCU into not-running state.
- */
-static void srcu_reschedule(struct srcu_struct *sp)
-{
-	bool pending = true;
-
-	if (rcu_batch_empty(&sp->batch_done) &&
-	    rcu_batch_empty(&sp->batch_check1) &&
-	    rcu_batch_empty(&sp->batch_check0) &&
-	    rcu_batch_empty(&sp->batch_queue)) {
-		spin_lock_irq(&sp->queue_lock);
-		if (rcu_batch_empty(&sp->batch_done) &&
-		    rcu_batch_empty(&sp->batch_check1) &&
-		    rcu_batch_empty(&sp->batch_check0) &&
-		    rcu_batch_empty(&sp->batch_queue)) {
-			sp->running = false;
-			pending = false;
-		}
-		spin_unlock_irq(&sp->queue_lock);
-	}
-
-	if (pending)
-		queue_delayed_work(system_power_efficient_wq,
-				   &sp->work, SRCU_INTERVAL);
-}
-
-/*
- * This is the work-queue function that handles SRCU grace periods.
- */
-void process_srcu(struct work_struct *work)
-{
-	struct srcu_struct *sp;
-
-	sp = container_of(work, struct srcu_struct, work.work);
-
-	srcu_collect_new(sp);
-	srcu_advance_batches(sp, 1);
-	srcu_invoke_callbacks(sp);
-	srcu_reschedule(sp);
-}
-EXPORT_SYMBOL_GPL(process_srcu);
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 36e1f82..1a1c104 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -38,8 +38,8 @@ static int init_srcu_struct_fields(struct srcu_struct *sp)
 	sp->srcu_lock_nesting[0] = 0;
 	sp->srcu_lock_nesting[1] = 0;
 	init_swait_queue_head(&sp->srcu_wq);
-	sp->srcu_gp_seq = 0;
-	rcu_segcblist_init(&sp->srcu_cblist);
+	sp->srcu_cb_head = NULL;
+	sp->srcu_cb_tail = &sp->srcu_cb_head;
 	sp->srcu_gp_running = false;
 	sp->srcu_gp_waiting = false;
 	sp->srcu_idx = 0;
@@ -88,31 +88,16 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
 {
 	WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
 	flush_work(&sp->srcu_work);
-	WARN_ON(rcu_seq_state(sp->srcu_gp_seq));
 	WARN_ON(sp->srcu_gp_running);
 	WARN_ON(sp->srcu_gp_waiting);
-	WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist));
+	WARN_ON(sp->srcu_cb_head);
+	WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail);
 }
 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 
 /*
- * Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct.  Must be called from process context.
- * Returns an index that must be passed to the matching srcu_read_unlock().
- */
-int __srcu_read_lock(struct srcu_struct *sp)
-{
-	int idx;
-
-	idx = READ_ONCE(sp->srcu_idx);
-	WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1);
-	return idx;
-}
-EXPORT_SYMBOL_GPL(__srcu_read_lock);
-
-/*
  * Removes the count for the old reader from the appropriate element of
- * the srcu_struct.  Must be called from process context.
+ * the srcu_struct.
  */
 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
 {
@@ -132,52 +117,44 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 void srcu_drive_gp(struct work_struct *wp)
 {
 	int idx;
-	struct rcu_cblist ready_cbs;
-	struct srcu_struct *sp;
+	struct rcu_head *lh;
 	struct rcu_head *rhp;
+	struct srcu_struct *sp;
 
 	sp = container_of(wp, struct srcu_struct, srcu_work);
-	if (sp->srcu_gp_running || rcu_segcblist_empty(&sp->srcu_cblist))
+	if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
 		return; /* Already running or nothing to do. */
 
-	/* Tag recently arrived callbacks and wait for readers. */
+	/* Remove recently arrived callbacks and wait for readers. */
 	WRITE_ONCE(sp->srcu_gp_running, true);
-	rcu_segcblist_accelerate(&sp->srcu_cblist,
-				 rcu_seq_snap(&sp->srcu_gp_seq));
-	rcu_seq_start(&sp->srcu_gp_seq);
+	local_irq_disable();
+	lh = sp->srcu_cb_head;
+	sp->srcu_cb_head = NULL;
+	sp->srcu_cb_tail = &sp->srcu_cb_head;
+	local_irq_enable();
 	idx = sp->srcu_idx;
 	WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
 	WRITE_ONCE(sp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
 	swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
 	WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
-	rcu_seq_end(&sp->srcu_gp_seq);
 
-	/* Update callback list based on GP, and invoke ready callbacks. */
-	rcu_segcblist_advance(&sp->srcu_cblist,
-			      rcu_seq_current(&sp->srcu_gp_seq));
-	if (rcu_segcblist_ready_cbs(&sp->srcu_cblist)) {
-		rcu_cblist_init(&ready_cbs);
-		local_irq_disable();
-		rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs);
-		local_irq_enable();
-		rhp = rcu_cblist_dequeue(&ready_cbs);
-		for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
-			local_bh_disable();
-			rhp->func(rhp);
-			local_bh_enable();
-		}
-		local_irq_disable();
-		rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs);
-		local_irq_enable();
+	/* Invoke the callbacks we removed above. */
+	while (lh) {
+		rhp = lh;
+		lh = lh->next;
+		local_bh_disable();
+		rhp->func(rhp);
+		local_bh_enable();
 	}
-	WRITE_ONCE(sp->srcu_gp_running, false);
 
 	/*
-	 * If more callbacks, reschedule ourselves.  This can race with
-	 * a call_srcu() at interrupt level, but the ->srcu_gp_running
-	 * checks will straighten that out.
+	 * Enable rescheduling, and if there are more callbacks,
+	 * reschedule ourselves.  This can race with a call_srcu()
+	 * at interrupt level, but the ->srcu_gp_running checks will
+	 * straighten that out.
 	 */
-	if (!rcu_segcblist_empty(&sp->srcu_cblist))
+	WRITE_ONCE(sp->srcu_gp_running, false);
+	if (READ_ONCE(sp->srcu_cb_head))
 		schedule_work(&sp->srcu_work);
 }
 EXPORT_SYMBOL_GPL(srcu_drive_gp);
@@ -186,14 +163,16 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp);
  * Enqueue an SRCU callback on the specified srcu_struct structure,
  * initiating grace-period processing if it is not already running.
  */
-void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
+void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
 	       rcu_callback_t func)
 {
 	unsigned long flags;
 
-	head->func = func;
+	rhp->func = func;
+	rhp->next = NULL;
 	local_irq_save(flags);
-	rcu_segcblist_enqueue(&sp->srcu_cblist, head, false);
+	*sp->srcu_cb_tail = rhp;
+	sp->srcu_cb_tail = &rhp->next;
 	local_irq_restore(flags);
 	if (!READ_ONCE(sp->srcu_gp_running))
 		schedule_work(&sp->srcu_work);
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 3ae8474..d0ca524 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -40,9 +40,15 @@
 #include "rcu.h"
 #include "rcu_segcblist.h"
 
-ulong exp_holdoff = 25 * 1000; /* Holdoff (ns) for auto-expediting. */
+/* Holdoff in nanoseconds for auto-expediting. */
+#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
+static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
 module_param(exp_holdoff, ulong, 0444);
 
+/* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
+static ulong counter_wrap_check = (ULONG_MAX >> 2);
+module_param(counter_wrap_check, ulong, 0444);
+
 static void srcu_invoke_callbacks(struct work_struct *work);
 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
 
@@ -70,7 +76,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
 
 	/* Each pass through this loop initializes one srcu_node structure. */
 	rcu_for_each_node_breadth_first(sp, snp) {
-		spin_lock_init(&snp->lock);
+		raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
 		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
 			     ARRAY_SIZE(snp->srcu_data_have_cbs));
 		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
@@ -104,7 +110,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
 	snp_first = sp->level[level];
 	for_each_possible_cpu(cpu) {
 		sdp = per_cpu_ptr(sp->sda, cpu);
-		spin_lock_init(&sdp->lock);
+		raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
 		rcu_segcblist_init(&sdp->srcu_cblist);
 		sdp->srcu_cblist_invoking = false;
 		sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
@@ -163,7 +169,7 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
 	/* Don't re-initialize a lock while it is held. */
 	debug_check_no_locks_freed((void *)sp, sizeof(*sp));
 	lockdep_init_map(&sp->dep_map, name, key, 0);
-	spin_lock_init(&sp->gp_lock);
+	raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
 	return init_srcu_struct_fields(sp, false);
 }
 EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -180,7 +186,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
  */
 int init_srcu_struct(struct srcu_struct *sp)
 {
-	spin_lock_init(&sp->gp_lock);
+	raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
 	return init_srcu_struct_fields(sp, false);
 }
 EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -191,7 +197,7 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
  * First-use initialization of statically allocated srcu_struct
  * structure.  Wiring up the combining tree is more than can be
  * done with compile-time initialization, so this check is added
- * to each update-side SRCU primitive.  Use ->gp_lock, which -is-
+ * to each update-side SRCU primitive.  Use sp->lock, which -is-
  * compile-time initialized, to resolve races involving multiple
  * CPUs trying to garner first-use privileges.
  */
@@ -203,13 +209,13 @@ static void check_init_srcu_struct(struct srcu_struct *sp)
 	/* The smp_load_acquire() pairs with the smp_store_release(). */
 	if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
 		return; /* Already initialized. */
-	spin_lock_irqsave(&sp->gp_lock, flags);
+	raw_spin_lock_irqsave_rcu_node(sp, flags);
 	if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
-		spin_unlock_irqrestore(&sp->gp_lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(sp, flags);
 		return;
 	}
 	init_srcu_struct_fields(sp, true);
-	spin_unlock_irqrestore(&sp->gp_lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(sp, flags);
 }
 
 /*
@@ -275,15 +281,20 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
 	 * not mean that there are no more readers, as one could have read
 	 * the current index but not have incremented the lock counter yet.
 	 *
-	 * Possible bug: There is no guarantee that there haven't been
-	 * ULONG_MAX increments of ->srcu_lock_count[] since the unlocks were
-	 * counted, meaning that this could return true even if there are
-	 * still active readers.  Since there are no memory barriers around
-	 * srcu_flip(), the CPU is not required to increment ->srcu_idx
-	 * before running srcu_readers_unlock_idx(), which means that there
-	 * could be an arbitrarily large number of critical sections that
-	 * execute after srcu_readers_unlock_idx() but use the old value
-	 * of ->srcu_idx.
+	 * So suppose that the updater is preempted here for so long
+	 * that more than ULONG_MAX non-nested readers come and go in
+	 * the meantime.  It turns out that this cannot result in overflow
+	 * because if a reader modifies its unlock count after we read it
+	 * above, then that reader's next load of ->srcu_idx is guaranteed
+	 * to get the new value, which will cause it to operate on the
+	 * other bank of counters, where it cannot contribute to the
+	 * overflow of these counters.  This means that there is a maximum
+	 * of 2*NR_CPUS increments, which cannot overflow given current
+	 * systems, especially not on 64-bit systems.
+	 *
+	 * OK, how about nesting?  This does impose a limit on nesting
+	 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
+	 * especially on 64-bit systems.
 	 */
 	return srcu_readers_lock_idx(sp, idx) == unlocks;
 }
@@ -357,7 +368,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 
 /*
  * Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct.  Must be called from process context.
+ * srcu_struct.
  * Returns an index that must be passed to the matching srcu_read_unlock().
  */
 int __srcu_read_lock(struct srcu_struct *sp)
@@ -365,7 +376,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
 	int idx;
 
 	idx = READ_ONCE(sp->srcu_idx) & 0x1;
-	__this_cpu_inc(sp->sda->srcu_lock_count[idx]);
+	this_cpu_inc(sp->sda->srcu_lock_count[idx]);
 	smp_mb(); /* B */  /* Avoid leaking the critical section. */
 	return idx;
 }
@@ -375,7 +386,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
  * Removes the count for the old reader from the appropriate per-CPU
  * element of the srcu_struct.  Note that this may well be a different
  * CPU than that which was incremented by the corresponding srcu_read_lock().
- * Must be called from process context.
  */
 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
 {
@@ -401,8 +411,7 @@ static void srcu_gp_start(struct srcu_struct *sp)
 	struct srcu_data *sdp = this_cpu_ptr(sp->sda);
 	int state;
 
-	RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock),
-			 "Invoked srcu_gp_start() without ->gp_lock!");
+	lockdep_assert_held(&sp->lock);
 	WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
 	rcu_segcblist_advance(&sdp->srcu_cblist,
 			      rcu_seq_current(&sp->srcu_gp_seq));
@@ -490,17 +499,20 @@ static void srcu_gp_end(struct srcu_struct *sp)
 {
 	unsigned long cbdelay;
 	bool cbs;
+	int cpu;
+	unsigned long flags;
 	unsigned long gpseq;
 	int idx;
 	int idxnext;
 	unsigned long mask;
+	struct srcu_data *sdp;
 	struct srcu_node *snp;
 
 	/* Prevent more than one additional grace period. */
 	mutex_lock(&sp->srcu_cb_mutex);
 
 	/* End the current grace period. */
-	spin_lock_irq(&sp->gp_lock);
+	raw_spin_lock_irq_rcu_node(sp);
 	idx = rcu_seq_state(sp->srcu_gp_seq);
 	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
 	cbdelay = srcu_get_delay(sp);
@@ -509,7 +521,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
 	gpseq = rcu_seq_current(&sp->srcu_gp_seq);
 	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
 		sp->srcu_gp_seq_needed_exp = gpseq;
-	spin_unlock_irq(&sp->gp_lock);
+	raw_spin_unlock_irq_rcu_node(sp);
 	mutex_unlock(&sp->srcu_gp_mutex);
 	/* A new grace period can start at this point.  But only one. */
 
@@ -517,7 +529,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
 	idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
 	idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
 	rcu_for_each_node_breadth_first(sp, snp) {
-		spin_lock_irq(&snp->lock);
+		raw_spin_lock_irq_rcu_node(snp);
 		cbs = false;
 		if (snp >= sp->level[rcu_num_lvls - 1])
 			cbs = snp->srcu_have_cbs[idx] == gpseq;
@@ -527,28 +539,37 @@ static void srcu_gp_end(struct srcu_struct *sp)
 			snp->srcu_gp_seq_needed_exp = gpseq;
 		mask = snp->srcu_data_have_cbs[idx];
 		snp->srcu_data_have_cbs[idx] = 0;
-		spin_unlock_irq(&snp->lock);
-		if (cbs) {
-			smp_mb(); /* GP end before CB invocation. */
+		raw_spin_unlock_irq_rcu_node(snp);
+		if (cbs)
 			srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
-		}
+
+		/* Occasionally prevent srcu_data counter wrap. */
+		if (!(gpseq & counter_wrap_check))
+			for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
+				sdp = per_cpu_ptr(sp->sda, cpu);
+				raw_spin_lock_irqsave_rcu_node(sdp, flags);
+				if (ULONG_CMP_GE(gpseq,
+						 sdp->srcu_gp_seq_needed + 100))
+					sdp->srcu_gp_seq_needed = gpseq;
+				raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
+			}
 	}
 
 	/* Callback initiation done, allow grace periods after next. */
 	mutex_unlock(&sp->srcu_cb_mutex);
 
 	/* Start a new grace period if needed. */
-	spin_lock_irq(&sp->gp_lock);
+	raw_spin_lock_irq_rcu_node(sp);
 	gpseq = rcu_seq_current(&sp->srcu_gp_seq);
 	if (!rcu_seq_state(gpseq) &&
 	    ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
 		srcu_gp_start(sp);
-		spin_unlock_irq(&sp->gp_lock);
+		raw_spin_unlock_irq_rcu_node(sp);
 		/* Throttle expedited grace periods: Should be rare! */
 		srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
 				    ? 0 : SRCU_INTERVAL);
 	} else {
-		spin_unlock_irq(&sp->gp_lock);
+		raw_spin_unlock_irq_rcu_node(sp);
 	}
 }
 
@@ -568,18 +589,18 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
 		if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
 		    ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
 			return;
-		spin_lock_irqsave(&snp->lock, flags);
+		raw_spin_lock_irqsave_rcu_node(snp, flags);
 		if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
-			spin_unlock_irqrestore(&snp->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(snp, flags);
 			return;
 		}
 		WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
-		spin_unlock_irqrestore(&snp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(snp, flags);
 	}
-	spin_lock_irqsave(&sp->gp_lock, flags);
+	raw_spin_lock_irqsave_rcu_node(sp, flags);
 	if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
 		sp->srcu_gp_seq_needed_exp = s;
-	spin_unlock_irqrestore(&sp->gp_lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(sp, flags);
 }
 
 /*
@@ -601,14 +622,13 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
 	for (; snp != NULL; snp = snp->srcu_parent) {
 		if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
 			return; /* GP already done and CBs recorded. */
-		spin_lock_irqsave(&snp->lock, flags);
+		raw_spin_lock_irqsave_rcu_node(snp, flags);
 		if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
 			snp_seq = snp->srcu_have_cbs[idx];
 			if (snp == sdp->mynode && snp_seq == s)
 				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
-			spin_unlock_irqrestore(&snp->lock, flags);
+			raw_spin_unlock_irqrestore_rcu_node(snp, flags);
 			if (snp == sdp->mynode && snp_seq != s) {
-				smp_mb(); /* CBs after GP! */
 				srcu_schedule_cbs_sdp(sdp, do_norm
 							   ? SRCU_INTERVAL
 							   : 0);
@@ -623,11 +643,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
 			snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
 		if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
 			snp->srcu_gp_seq_needed_exp = s;
-		spin_unlock_irqrestore(&snp->lock, flags);
+		raw_spin_unlock_irqrestore_rcu_node(snp, flags);
 	}
 
 	/* Top of tree, must ensure the grace period will be started. */
-	spin_lock_irqsave(&sp->gp_lock, flags);
+	raw_spin_lock_irqsave_rcu_node(sp, flags);
 	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
 		/*
 		 * Record need for grace period s.  Pair with load
@@ -646,7 +666,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
 		queue_delayed_work(system_power_efficient_wq, &sp->work,
 				   srcu_get_delay(sp));
 	}
-	spin_unlock_irqrestore(&sp->gp_lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(sp, flags);
 }
 
 /*
@@ -672,6 +692,16 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
  */
 static void srcu_flip(struct srcu_struct *sp)
 {
+	/*
+	 * Ensure that if this updater saw a given reader's increment
+	 * from __srcu_read_lock(), that reader was using an old value
+	 * of ->srcu_idx.  Also ensure that if a given reader sees the
+	 * new value of ->srcu_idx, this updater's earlier scans cannot
+	 * have seen that reader's increments (which is OK, because this
+	 * grace period need not wait on that reader).
+	 */
+	smp_mb(); /* E */  /* Pairs with B and C. */
+
 	WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
 
 	/*
@@ -746,6 +776,13 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
 }
 
 /*
+ * SRCU callback function to leak a callback.
+ */
+static void srcu_leak_callback(struct rcu_head *rhp)
+{
+}
+
+/*
  * Enqueue an SRCU callback on the srcu_data structure associated with
  * the current CPU and the specified srcu_struct structure, initiating
  * grace-period processing if it is not already running.
@@ -783,10 +820,16 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
 	struct srcu_data *sdp;
 
 	check_init_srcu_struct(sp);
+	if (debug_rcu_head_queue(rhp)) {
+		/* Probable double call_srcu(), so leak the callback. */
+		WRITE_ONCE(rhp->func, srcu_leak_callback);
+		WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
+		return;
+	}
 	rhp->func = func;
 	local_irq_save(flags);
 	sdp = this_cpu_ptr(sp->sda);
-	spin_lock(&sdp->lock);
+	raw_spin_lock_rcu_node(sdp);
 	rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
 	rcu_segcblist_advance(&sdp->srcu_cblist,
 			      rcu_seq_current(&sp->srcu_gp_seq));
@@ -800,13 +843,30 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
 		sdp->srcu_gp_seq_needed_exp = s;
 		needexp = true;
 	}
-	spin_unlock_irqrestore(&sdp->lock, flags);
+	raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
 	if (needgp)
 		srcu_funnel_gp_start(sp, sdp, s, do_norm);
 	else if (needexp)
 		srcu_funnel_exp_start(sp, sdp->mynode, s);
 }
 
+/**
+ * call_srcu() - Queue a callback for invocation after an SRCU grace period
+ * @sp: srcu_struct in queue the callback
+ * @head: structure to be used for queueing the SRCU callback.
+ * @func: function to be invoked after the SRCU grace period
+ *
+ * The callback function will be invoked some time after a full SRCU
+ * grace period elapses, in other words after all pre-existing SRCU
+ * read-side critical sections have completed.  However, the callback
+ * function might well execute concurrently with other SRCU read-side
+ * critical sections that started after call_srcu() was invoked.  SRCU
+ * read-side critical sections are delimited by srcu_read_lock() and
+ * srcu_read_unlock(), and may be nested.
+ *
+ * The callback will be invoked from process context, but must nevertheless
+ * be fast and must not block.
+ */
 void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
 	       rcu_callback_t func)
 {
@@ -954,13 +1014,16 @@ void srcu_barrier(struct srcu_struct *sp)
 	 */
 	for_each_possible_cpu(cpu) {
 		sdp = per_cpu_ptr(sp->sda, cpu);
-		spin_lock_irq(&sdp->lock);
+		raw_spin_lock_irq_rcu_node(sdp);
 		atomic_inc(&sp->srcu_barrier_cpu_cnt);
 		sdp->srcu_barrier_head.func = srcu_barrier_cb;
+		debug_rcu_head_queue(&sdp->srcu_barrier_head);
 		if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
-					   &sdp->srcu_barrier_head, 0))
+					   &sdp->srcu_barrier_head, 0)) {
+			debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
 			atomic_dec(&sp->srcu_barrier_cpu_cnt);
-		spin_unlock_irq(&sdp->lock);
+		}
+		raw_spin_unlock_irq_rcu_node(sdp);
 	}
 
 	/* Remove the initial count, at which point reaching zero can happen. */
@@ -1009,17 +1072,17 @@ static void srcu_advance_state(struct srcu_struct *sp)
 	 */
 	idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
 	if (idx == SRCU_STATE_IDLE) {
-		spin_lock_irq(&sp->gp_lock);
+		raw_spin_lock_irq_rcu_node(sp);
 		if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
 			WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
-			spin_unlock_irq(&sp->gp_lock);
+			raw_spin_unlock_irq_rcu_node(sp);
 			mutex_unlock(&sp->srcu_gp_mutex);
 			return;
 		}
 		idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
 		if (idx == SRCU_STATE_IDLE)
 			srcu_gp_start(sp);
-		spin_unlock_irq(&sp->gp_lock);
+		raw_spin_unlock_irq_rcu_node(sp);
 		if (idx != SRCU_STATE_IDLE) {
 			mutex_unlock(&sp->srcu_gp_mutex);
 			return; /* Someone else started the grace period. */
@@ -1068,22 +1131,22 @@ static void srcu_invoke_callbacks(struct work_struct *work)
 	sdp = container_of(work, struct srcu_data, work.work);
 	sp = sdp->sp;
 	rcu_cblist_init(&ready_cbs);
-	spin_lock_irq(&sdp->lock);
-	smp_mb(); /* Old grace periods before callback invocation! */
+	raw_spin_lock_irq_rcu_node(sdp);
 	rcu_segcblist_advance(&sdp->srcu_cblist,
 			      rcu_seq_current(&sp->srcu_gp_seq));
 	if (sdp->srcu_cblist_invoking ||
 	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
-		spin_unlock_irq(&sdp->lock);
+		raw_spin_unlock_irq_rcu_node(sdp);
 		return;  /* Someone else on the job or nothing to do. */
 	}
 
 	/* We are on the job!  Extract and invoke ready callbacks. */
 	sdp->srcu_cblist_invoking = true;
 	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
-	spin_unlock_irq(&sdp->lock);
+	raw_spin_unlock_irq_rcu_node(sdp);
 	rhp = rcu_cblist_dequeue(&ready_cbs);
 	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
+		debug_rcu_head_unqueue(rhp);
 		local_bh_disable();
 		rhp->func(rhp);
 		local_bh_enable();
@@ -1093,13 +1156,13 @@ static void srcu_invoke_callbacks(struct work_struct *work)
 	 * Update counts, accelerate new callbacks, and if needed,
 	 * schedule another round of callback invocation.
 	 */
-	spin_lock_irq(&sdp->lock);
+	raw_spin_lock_irq_rcu_node(sdp);
 	rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
 				       rcu_seq_snap(&sp->srcu_gp_seq));
 	sdp->srcu_cblist_invoking = false;
 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
-	spin_unlock_irq(&sdp->lock);
+	raw_spin_unlock_irq_rcu_node(sdp);
 	if (more)
 		srcu_schedule_cbs_sdp(sdp, 0);
 }
@@ -1112,7 +1175,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
 {
 	bool pushgp = true;
 
-	spin_lock_irq(&sp->gp_lock);
+	raw_spin_lock_irq_rcu_node(sp);
 	if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
 		if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
 			/* All requests fulfilled, time to go idle. */
@@ -1122,7 +1185,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
 		/* Outstanding request and no GP.  Start one. */
 		srcu_gp_start(sp);
 	}
-	spin_unlock_irq(&sp->gp_lock);
+	raw_spin_unlock_irq_rcu_node(sp);
 
 	if (pushgp)
 		queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
@@ -1153,3 +1216,12 @@ void srcutorture_get_gp_data(enum rcutorture_type test_type,
 	*gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
 }
 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
+
+static int __init srcu_bootup_announce(void)
+{
+	pr_info("Hierarchical SRCU implementation.\n");
+	if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
+		pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
+	return 0;
+}
+early_initcall(srcu_bootup_announce);
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index e538573..f848896 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -35,15 +35,26 @@
 #include <linux/time.h>
 #include <linux/cpu.h>
 #include <linux/prefetch.h>
-#include <linux/trace_events.h>
 
 #include "rcu.h"
 
-/* Forward declarations for tiny_plugin.h. */
-struct rcu_ctrlblk;
-static void __call_rcu(struct rcu_head *head,
-		       rcu_callback_t func,
-		       struct rcu_ctrlblk *rcp);
+/* Global control variables for rcupdate callback mechanism. */
+struct rcu_ctrlblk {
+	struct rcu_head *rcucblist;	/* List of pending callbacks (CBs). */
+	struct rcu_head **donetail;	/* ->next pointer of last "done" CB. */
+	struct rcu_head **curtail;	/* ->next pointer of last CB. */
+};
+
+/* Definition for rcupdate control block. */
+static struct rcu_ctrlblk rcu_sched_ctrlblk = {
+	.donetail	= &rcu_sched_ctrlblk.rcucblist,
+	.curtail	= &rcu_sched_ctrlblk.rcucblist,
+};
+
+static struct rcu_ctrlblk rcu_bh_ctrlblk = {
+	.donetail	= &rcu_bh_ctrlblk.rcucblist,
+	.curtail	= &rcu_bh_ctrlblk.rcucblist,
+};
 
 #include "tiny_plugin.h"
 
@@ -59,19 +70,6 @@ void rcu_barrier_sched(void)
 }
 EXPORT_SYMBOL(rcu_barrier_sched);
 
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
-
-/*
- * Test whether RCU thinks that the current CPU is idle.
- */
-bool notrace __rcu_is_watching(void)
-{
-	return true;
-}
-EXPORT_SYMBOL(__rcu_is_watching);
-
-#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
-
 /*
  * Helper function for rcu_sched_qs() and rcu_bh_qs().
  * Also irqs are disabled to avoid confusion due to interrupt handlers
@@ -79,7 +77,6 @@ EXPORT_SYMBOL(__rcu_is_watching);
  */
 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
 {
-	RCU_TRACE(reset_cpu_stall_ticks(rcp);)
 	if (rcp->donetail != rcp->curtail) {
 		rcp->donetail = rcp->curtail;
 		return 1;
@@ -125,7 +122,6 @@ void rcu_bh_qs(void)
  */
 void rcu_check_callbacks(int user)
 {
-	RCU_TRACE(check_cpu_stalls();)
 	if (user)
 		rcu_sched_qs();
 	else if (!in_softirq())
@@ -140,10 +136,8 @@ void rcu_check_callbacks(int user)
  */
 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
 {
-	const char *rn = NULL;
 	struct rcu_head *next, *list;
 	unsigned long flags;
-	RCU_TRACE(int cb_count = 0;)
 
 	/* Move the ready-to-invoke callbacks to a local list. */
 	local_irq_save(flags);
@@ -152,7 +146,6 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
 		local_irq_restore(flags);
 		return;
 	}
-	RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1);)
 	list = rcp->rcucblist;
 	rcp->rcucblist = *rcp->donetail;
 	*rcp->donetail = NULL;
@@ -162,22 +155,15 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
 	local_irq_restore(flags);
 
 	/* Invoke the callbacks on the local list. */
-	RCU_TRACE(rn = rcp->name;)
 	while (list) {
 		next = list->next;
 		prefetch(next);
 		debug_rcu_head_unqueue(list);
 		local_bh_disable();
-		__rcu_reclaim(rn, list);
+		__rcu_reclaim("", list);
 		local_bh_enable();
 		list = next;
-		RCU_TRACE(cb_count++;)
 	}
-	RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count);)
-	RCU_TRACE(trace_rcu_batch_end(rcp->name,
-				      cb_count, 0, need_resched(),
-				      is_idle_task(current),
-				      false));
 }
 
 static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
@@ -221,7 +207,6 @@ static void __call_rcu(struct rcu_head *head,
 	local_irq_save(flags);
 	*rcp->curtail = head;
 	rcp->curtail = &head->next;
-	RCU_TRACE(rcp->qlen++;)
 	local_irq_restore(flags);
 
 	if (unlikely(is_idle_task(current))) {
@@ -254,8 +239,5 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
 void __init rcu_init(void)
 {
 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
-	RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk);)
-	RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk);)
-
 	rcu_early_boot_tests();
 }
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 371034e..f0a01b2 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -22,36 +22,6 @@
  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  */
 
-#include <linux/kthread.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-/* Global control variables for rcupdate callback mechanism. */
-struct rcu_ctrlblk {
-	struct rcu_head *rcucblist;	/* List of pending callbacks (CBs). */
-	struct rcu_head **donetail;	/* ->next pointer of last "done" CB. */
-	struct rcu_head **curtail;	/* ->next pointer of last CB. */
-	RCU_TRACE(long qlen);		/* Number of pending CBs. */
-	RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
-	RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
-	RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
-	RCU_TRACE(const char *name);	/* Name of RCU type. */
-};
-
-/* Definition for rcupdate control block. */
-static struct rcu_ctrlblk rcu_sched_ctrlblk = {
-	.donetail	= &rcu_sched_ctrlblk.rcucblist,
-	.curtail	= &rcu_sched_ctrlblk.rcucblist,
-	RCU_TRACE(.name = "rcu_sched")
-};
-
-static struct rcu_ctrlblk rcu_bh_ctrlblk = {
-	.donetail	= &rcu_bh_ctrlblk.rcucblist,
-	.curtail	= &rcu_bh_ctrlblk.rcucblist,
-	RCU_TRACE(.name = "rcu_bh")
-};
-
 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
 #include <linux/kernel_stat.h>
 
@@ -75,96 +45,3 @@ void __init rcu_scheduler_starting(void)
 }
 
 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
-
-#ifdef CONFIG_RCU_TRACE
-
-static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	rcp->qlen -= n;
-	local_irq_restore(flags);
-}
-
-/*
- * Dump statistics for TINY_RCU, such as they are.
- */
-static int show_tiny_stats(struct seq_file *m, void *unused)
-{
-	seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
-	seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
-	return 0;
-}
-
-static int show_tiny_stats_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, show_tiny_stats, NULL);
-}
-
-static const struct file_operations show_tiny_stats_fops = {
-	.owner = THIS_MODULE,
-	.open = show_tiny_stats_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static struct dentry *rcudir;
-
-static int __init rcutiny_trace_init(void)
-{
-	struct dentry *retval;
-
-	rcudir = debugfs_create_dir("rcu", NULL);
-	if (!rcudir)
-		goto free_out;
-	retval = debugfs_create_file("rcudata", 0444, rcudir,
-				     NULL, &show_tiny_stats_fops);
-	if (!retval)
-		goto free_out;
-	return 0;
-free_out:
-	debugfs_remove_recursive(rcudir);
-	return 1;
-}
-device_initcall(rcutiny_trace_init);
-
-static void check_cpu_stall(struct rcu_ctrlblk *rcp)
-{
-	unsigned long j;
-	unsigned long js;
-
-	if (rcu_cpu_stall_suppress)
-		return;
-	rcp->ticks_this_gp++;
-	j = jiffies;
-	js = READ_ONCE(rcp->jiffies_stall);
-	if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
-		pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
-		       rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
-		       jiffies - rcp->gp_start, rcp->qlen);
-		dump_stack();
-		WRITE_ONCE(rcp->jiffies_stall,
-			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-	} else if (ULONG_CMP_GE(j, js)) {
-		WRITE_ONCE(rcp->jiffies_stall,
-			   jiffies + rcu_jiffies_till_stall_check());
-	}
-}
-
-static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
-{
-	rcp->ticks_this_gp = 0;
-	rcp->gp_start = jiffies;
-	WRITE_ONCE(rcp->jiffies_stall,
-		   jiffies + rcu_jiffies_till_stall_check());
-}
-
-static void check_cpu_stalls(void)
-{
-	RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk);)
-	RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk);)
-}
-
-#endif /* #ifdef CONFIG_RCU_TRACE */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e354e47..51d4c3a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -168,35 +168,17 @@ static void rcu_report_exp_rdp(struct rcu_state *rsp,
 static void sync_sched_exp_online_cleanup(int cpu);
 
 /* rcuc/rcub kthread realtime priority */
-#ifdef CONFIG_RCU_KTHREAD_PRIO
-static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
-#else /* #ifdef CONFIG_RCU_KTHREAD_PRIO */
 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
-#endif /* #else #ifdef CONFIG_RCU_KTHREAD_PRIO */
 module_param(kthread_prio, int, 0644);
 
 /* Delay in jiffies for grace-period initialization delays, debug only. */
 
-#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT
-static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY;
-module_param(gp_preinit_delay, int, 0644);
-#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
-static const int gp_preinit_delay;
-#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
-
-#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
-static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
-module_param(gp_init_delay, int, 0644);
-#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
-static const int gp_init_delay;
-#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
-
-#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP
-static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY;
-module_param(gp_cleanup_delay, int, 0644);
-#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
-static const int gp_cleanup_delay;
-#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
+static int gp_preinit_delay;
+module_param(gp_preinit_delay, int, 0444);
+static int gp_init_delay;
+module_param(gp_init_delay, int, 0444);
+static int gp_cleanup_delay;
+module_param(gp_cleanup_delay, int, 0444);
 
 /*
  * Number of grace periods between delays, normalized by the duration of
@@ -250,6 +232,7 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
  */
 void rcu_sched_qs(void)
 {
+	RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
 	if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
 		return;
 	trace_rcu_grace_period(TPS("rcu_sched"),
@@ -265,6 +248,7 @@ void rcu_sched_qs(void)
 
 void rcu_bh_qs(void)
 {
+	RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
 	if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
 		trace_rcu_grace_period(TPS("rcu_bh"),
 				       __this_cpu_read(rcu_bh_data.gpnum),
@@ -286,10 +270,6 @@ void rcu_bh_qs(void)
 static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
 	.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
 	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-	.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
-	.dynticks_idle = ATOMIC_INIT(1),
-#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 };
 
 /*
@@ -478,7 +458,7 @@ void rcu_note_context_switch(bool preempt)
 	barrier(); /* Avoid RCU read-side critical sections leaking down. */
 	trace_rcu_utilization(TPS("Start context switch"));
 	rcu_sched_qs();
-	rcu_preempt_note_context_switch();
+	rcu_preempt_note_context_switch(preempt);
 	/* Load rcu_urgent_qs before other flags. */
 	if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
 		goto out;
@@ -534,9 +514,12 @@ void rcu_all_qs(void)
 }
 EXPORT_SYMBOL_GPL(rcu_all_qs);
 
-static long blimit = 10;	/* Maximum callbacks per rcu_do_batch. */
-static long qhimark = 10000;	/* If this many pending, ignore blimit. */
-static long qlowmark = 100;	/* Once only this many pending, use blimit. */
+#define DEFAULT_RCU_BLIMIT 10     /* Maximum callbacks per rcu_do_batch. */
+static long blimit = DEFAULT_RCU_BLIMIT;
+#define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
+static long qhimark = DEFAULT_RCU_QHIMARK;
+#define DEFAULT_RCU_QLOMARK 100   /* Once only this many pending, use blimit. */
+static long qlowmark = DEFAULT_RCU_QLOMARK;
 
 module_param(blimit, long, 0444);
 module_param(qhimark, long, 0444);
@@ -559,10 +542,7 @@ module_param(jiffies_till_sched_qs, ulong, 0644);
 
 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
 				  struct rcu_data *rdp);
-static void force_qs_rnp(struct rcu_state *rsp,
-			 int (*f)(struct rcu_data *rsp, bool *isidle,
-				  unsigned long *maxj),
-			 bool *isidle, unsigned long *maxj);
+static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
 static void force_quiescent_state(struct rcu_state *rsp);
 static int rcu_pending(void);
 
@@ -757,6 +737,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
 	int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
 	int *fp = &rnp->need_future_gp[idx];
 
+	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!");
 	return READ_ONCE(*fp);
 }
 
@@ -768,6 +749,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
 static bool
 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
 {
+	RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!");
 	if (rcu_gp_in_progress(rsp))
 		return false;  /* No, a grace period is already in progress. */
 	if (rcu_future_needs_gp(rsp))
@@ -794,6 +776,7 @@ static void rcu_eqs_enter_common(bool user)
 	struct rcu_data *rdp;
 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 
+	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!");
 	trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
 	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 	    !user && !is_idle_task(current)) {
@@ -864,7 +847,6 @@ void rcu_idle_enter(void)
 
 	local_irq_save(flags);
 	rcu_eqs_enter(false);
-	rcu_sysidle_enter(0);
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -914,7 +896,6 @@ void rcu_irq_exit(void)
 		trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
 		rdtp->dynticks_nesting--;
 	}
-	rcu_sysidle_enter(1);
 }
 
 /*
@@ -967,6 +948,7 @@ static void rcu_eqs_exit(bool user)
 	struct rcu_dynticks *rdtp;
 	long long oldval;
 
+	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!");
 	rdtp = this_cpu_ptr(&rcu_dynticks);
 	oldval = rdtp->dynticks_nesting;
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
@@ -995,7 +977,6 @@ void rcu_idle_exit(void)
 
 	local_irq_save(flags);
 	rcu_eqs_exit(false);
-	rcu_sysidle_exit(0);
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -1047,7 +1028,6 @@ void rcu_irq_enter(void)
 		trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
 	else
 		rcu_eqs_exit_common(oldval, true);
-	rcu_sysidle_exit(1);
 }
 
 /*
@@ -1130,22 +1110,11 @@ void rcu_nmi_exit(void)
 }
 
 /**
- * __rcu_is_watching - are RCU read-side critical sections safe?
- *
- * Return true if RCU is watching the running CPU, which means that
- * this CPU can safely enter RCU read-side critical sections.  Unlike
- * rcu_is_watching(), the caller of __rcu_is_watching() must have at
- * least disabled preemption.
- */
-bool notrace __rcu_is_watching(void)
-{
-	return !rcu_dynticks_curr_cpu_in_eqs();
-}
-
-/**
  * rcu_is_watching - see if RCU thinks that the current CPU is idle
  *
- * If the current CPU is in its idle loop and is neither in an interrupt
+ * Return true if RCU is watching the running CPU, which means that this
+ * CPU can safely enter RCU read-side critical sections.  In other words,
+ * if the current CPU is in its idle loop and is neither in an interrupt
  * or NMI handler, return true.
  */
 bool notrace rcu_is_watching(void)
@@ -1153,7 +1122,7 @@ bool notrace rcu_is_watching(void)
 	bool ret;
 
 	preempt_disable_notrace();
-	ret = __rcu_is_watching();
+	ret = !rcu_dynticks_curr_cpu_in_eqs();
 	preempt_enable_notrace();
 	return ret;
 }
@@ -1237,11 +1206,9 @@ static int rcu_is_cpu_rrupt_from_idle(void)
  * credit them with an implicit quiescent state.  Return 1 if this CPU
  * is in dynticks idle mode, which is an extended quiescent state.
  */
-static int dyntick_save_progress_counter(struct rcu_data *rdp,
-					 bool *isidle, unsigned long *maxj)
+static int dyntick_save_progress_counter(struct rcu_data *rdp)
 {
 	rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
-	rcu_sysidle_check_cpu(rdp, isidle, maxj);
 	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
 		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
 		if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
@@ -1258,8 +1225,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
  * idle state since the last call to dyntick_save_progress_counter()
  * for this same CPU, or by virtue of having been offline.
  */
-static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
-				    bool *isidle, unsigned long *maxj)
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 {
 	unsigned long jtsq;
 	bool *rnhqp;
@@ -1674,6 +1640,8 @@ void rcu_cpu_stall_reset(void)
 static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
 				       struct rcu_node *rnp)
 {
+	lockdep_assert_held(&rnp->lock);
+
 	/*
 	 * If RCU is idle, we just wait for the next grace period.
 	 * But we can only be sure that RCU is idle if we are looking
@@ -1719,6 +1687,8 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
 	bool ret = false;
 	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
 
+	lockdep_assert_held(&rnp->lock);
+
 	/*
 	 * Pick up grace-period number for new callbacks.  If this
 	 * grace period is already marked as needed, return to the caller.
@@ -1845,6 +1815,8 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
 {
 	bool ret = false;
 
+	lockdep_assert_held(&rnp->lock);
+
 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
 		return false;
@@ -1883,6 +1855,8 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
 			    struct rcu_data *rdp)
 {
+	lockdep_assert_held(&rnp->lock);
+
 	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
 	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
 		return false;
@@ -1909,6 +1883,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 	bool ret;
 	bool need_gp;
 
+	lockdep_assert_held(&rnp->lock);
+
 	/* Handle the ends of any preceding grace periods first. */
 	if (rdp->completed == rnp->completed &&
 	    !unlikely(READ_ONCE(rdp->gpwrap))) {
@@ -2115,25 +2091,16 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
  */
 static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
 {
-	bool isidle = false;
-	unsigned long maxj;
 	struct rcu_node *rnp = rcu_get_root(rsp);
 
 	WRITE_ONCE(rsp->gp_activity, jiffies);
 	rsp->n_force_qs++;
 	if (first_time) {
 		/* Collect dyntick-idle snapshots. */
-		if (is_sysidle_rcu_state(rsp)) {
-			isidle = true;
-			maxj = jiffies - ULONG_MAX / 4;
-		}
-		force_qs_rnp(rsp, dyntick_save_progress_counter,
-			     &isidle, &maxj);
-		rcu_sysidle_report_gp(rsp, isidle, maxj);
+		force_qs_rnp(rsp, dyntick_save_progress_counter);
 	} else {
 		/* Handle dyntick-idle and offline CPUs. */
-		isidle = true;
-		force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
+		force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
 	}
 	/* Clear flag to prevent immediate re-entry. */
 	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
@@ -2341,6 +2308,7 @@ static bool
 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
 		      struct rcu_data *rdp)
 {
+	lockdep_assert_held(&rnp->lock);
 	if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
 		/*
 		 * Either we have not yet spawned the grace-period
@@ -2402,6 +2370,7 @@ static bool rcu_start_gp(struct rcu_state *rsp)
 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
 	__releases(rcu_get_root(rsp)->lock)
 {
+	lockdep_assert_held(&rcu_get_root(rsp)->lock);
 	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
 	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
 	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
@@ -2426,6 +2395,8 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
 	unsigned long oldmask = 0;
 	struct rcu_node *rnp_c;
 
+	lockdep_assert_held(&rnp->lock);
+
 	/* Walk up the rcu_node hierarchy. */
 	for (;;) {
 		if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
@@ -2486,6 +2457,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
 	unsigned long mask;
 	struct rcu_node *rnp_p;
 
+	lockdep_assert_held(&rnp->lock);
 	if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
 	    rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2599,6 +2571,8 @@ static void
 rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
 			  struct rcu_node *rnp, struct rcu_data *rdp)
 {
+	lockdep_assert_held(&rsp->orphan_lock);
+
 	/* No-CBs CPUs do not have orphanable callbacks. */
 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
 		return;
@@ -2639,6 +2613,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
 {
 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
 
+	lockdep_assert_held(&rsp->orphan_lock);
+
 	/* No-CBs CPUs are handled specially. */
 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
 	    rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
@@ -2705,6 +2681,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
 	long mask;
 	struct rcu_node *rnp = rnp_leaf;
 
+	lockdep_assert_held(&rnp->lock);
 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
 	    rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
 		return;
@@ -2895,10 +2872,7 @@ void rcu_check_callbacks(int user)
  *
  * The caller must have suppressed start of new grace periods.
  */
-static void force_qs_rnp(struct rcu_state *rsp,
-			 int (*f)(struct rcu_data *rsp, bool *isidle,
-				  unsigned long *maxj),
-			 bool *isidle, unsigned long *maxj)
+static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
 {
 	int cpu;
 	unsigned long flags;
@@ -2937,7 +2911,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
 		for_each_leaf_node_possible_cpu(rnp, cpu) {
 			unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
 			if ((rnp->qsmask & bit) != 0) {
-				if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
+				if (f(per_cpu_ptr(rsp->rda, cpu)))
 					mask |= bit;
 			}
 		}
@@ -3143,9 +3117,14 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
 	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
 
 	if (debug_rcu_head_queue(head)) {
-		/* Probable double call_rcu(), so leak the callback. */
+		/*
+		 * Probable double call_rcu(), so leak the callback.
+		 * Use rcu:rcu_callback trace event to find the previous
+		 * time callback was passed to __call_rcu().
+		 */
+		WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n",
+			  head, head->func);
 		WRITE_ONCE(head->func, rcu_leak_callback);
-		WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
 		return;
 	}
 	head->func = func;
@@ -3194,8 +3173,24 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
 	local_irq_restore(flags);
 }
 
-/*
- * Queue an RCU-sched callback for invocation after a grace period.
+/**
+ * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_sched() assumes
+ * that the read-side critical sections end on enabling of preemption
+ * or on voluntary preemption.
+ * RCU read-side critical sections are delimited by :
+ *  - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
+ *  - anything that disables preemption.
+ *
+ *  These may be nested.
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
  */
 void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
 {
@@ -3203,8 +3198,26 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
 }
 EXPORT_SYMBOL_GPL(call_rcu_sched);
 
-/*
- * Queue an RCU callback for invocation after a quicker grace period.
+/**
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_bh() assumes
+ * that the read-side critical sections end on completion of a softirq
+ * handler. This means that read-side critical sections in process
+ * context must not be interrupted by softirqs. This interface is to be
+ * used when most of the read-side critical sections are in softirq context.
+ * RCU read-side critical sections are delimited by :
+ *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
+ *  OR
+ *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
+ *  These may be nested.
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
  */
 void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
 {
@@ -3280,12 +3293,6 @@ static inline int rcu_blocking_is_gp(void)
  * to have executed a full memory barrier during the execution of
  * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
  * again only if the system has more than one CPU).
- *
- * This primitive provides the guarantees made by the (now removed)
- * synchronize_kernel() API.  In contrast, synchronize_rcu() only
- * guarantees that rcu_read_lock() sections will have completed.
- * In "classic RCU", these two guarantees happen to be one and
- * the same, but can differ in realtime RCU implementations.
  */
 void synchronize_sched(void)
 {
@@ -3578,8 +3585,14 @@ static void rcu_barrier_func(void *type)
 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
 
 	_rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
-	atomic_inc(&rsp->barrier_cpu_count);
-	rsp->call(&rdp->barrier_head, rcu_barrier_callback);
+	rdp->barrier_head.func = rcu_barrier_callback;
+	debug_rcu_head_queue(&rdp->barrier_head);
+	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
+		atomic_inc(&rsp->barrier_cpu_count);
+	} else {
+		debug_rcu_head_unqueue(&rdp->barrier_head);
+		_rcu_barrier_trace(rsp, "IRQNQ", -1, rsp->barrier_sequence);
+	}
 }
 
 /*
@@ -3698,6 +3711,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
 	long mask;
 	struct rcu_node *rnp = rnp_leaf;
 
+	lockdep_assert_held(&rnp->lock);
 	for (;;) {
 		mask = rnp->grpmask;
 		rnp = rnp->parent;
@@ -3753,7 +3767,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
 	    !init_nocb_callback_list(rdp))
 		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
 	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
-	rcu_sysidle_init_percpu_data(rdp->dynticks);
 	rcu_dynticks_eqs_online();
 	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
 
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index ba38262..9af0f31 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -45,14 +45,6 @@ struct rcu_dynticks {
 	bool rcu_need_heavy_qs;     /* GP old, need heavy quiescent state. */
 	unsigned long rcu_qs_ctr;   /* Light universal quiescent state ctr. */
 	bool rcu_urgent_qs;	    /* GP old need light quiescent state. */
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-	long long dynticks_idle_nesting;
-				    /* irq/process nesting level from idle. */
-	atomic_t dynticks_idle;	    /* Even value for idle, else odd. */
-				    /*  "Idle" excludes userspace execution. */
-	unsigned long dynticks_idle_jiffies;
-				    /* End of last non-NMI non-idle period. */
-#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 #ifdef CONFIG_RCU_FAST_NO_HZ
 	bool all_lazy;		    /* Are all CPU's CBs lazy? */
 	unsigned long nonlazy_posted;
@@ -160,19 +152,6 @@ struct rcu_node {
 				/* Number of tasks boosted for expedited GP. */
 	unsigned long n_normal_boosts;
 				/* Number of tasks boosted for normal GP. */
-	unsigned long n_balk_blkd_tasks;
-				/* Refused to boost: no blocked tasks. */
-	unsigned long n_balk_exp_gp_tasks;
-				/* Refused to boost: nothing blocking GP. */
-	unsigned long n_balk_boost_tasks;
-				/* Refused to boost: already boosting. */
-	unsigned long n_balk_notblocked;
-				/* Refused to boost: RCU RS CS still running. */
-	unsigned long n_balk_notyet;
-				/* Refused to boost: not yet time. */
-	unsigned long n_balk_nos;
-				/* Refused to boost: not sure why, though. */
-				/*  This can happen due to race conditions. */
 #ifdef CONFIG_RCU_NOCB_CPU
 	struct swait_queue_head nocb_gp_wq[2];
 				/* Place for rcu_nocb_kthread() to wait GP. */
@@ -312,9 +291,9 @@ struct rcu_data {
 };
 
 /* Values for nocb_defer_wakeup field in struct rcu_data. */
-#define RCU_NOGP_WAKE_NOT	0
-#define RCU_NOGP_WAKE		1
-#define RCU_NOGP_WAKE_FORCE	2
+#define RCU_NOCB_WAKE_NOT	0
+#define RCU_NOCB_WAKE		1
+#define RCU_NOCB_WAKE_FORCE	2
 
 #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
 					/* For jiffies_till_first_fqs and */
@@ -477,7 +456,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
 
 /* Forward declarations for rcutree_plugin.h */
 static void rcu_bootup_announce(void);
-static void rcu_preempt_note_context_switch(void);
+static void rcu_preempt_note_context_switch(bool preempt);
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
@@ -529,15 +508,7 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
 static bool init_nocb_callback_list(struct rcu_data *rdp);
-static void rcu_sysidle_enter(int irq);
-static void rcu_sysidle_exit(int irq);
-static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
-				  unsigned long *maxj);
-static bool is_sysidle_rcu_state(struct rcu_state *rsp);
-static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
-				  unsigned long maxj);
 static void rcu_bind_gp_kthread(void);
-static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
 static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
 static void rcu_dynticks_task_enter(void);
 static void rcu_dynticks_task_exit(void);
@@ -551,75 +522,3 @@ void srcu_offline_cpu(unsigned int cpu) { }
 #endif /* #else #ifdef CONFIG_SRCU */
 
 #endif /* #ifndef RCU_TREE_NONCORE */
-
-#ifdef CONFIG_RCU_TRACE
-/* Read out queue lengths for tracing. */
-static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
-{
-#ifdef CONFIG_RCU_NOCB_CPU
-	*ql = atomic_long_read(&rdp->nocb_q_count);
-	*qll = atomic_long_read(&rdp->nocb_q_count_lazy);
-#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-	*ql = 0;
-	*qll = 0;
-#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
-}
-#endif /* #ifdef CONFIG_RCU_TRACE */
-
-/*
- * Wrappers for the rcu_node::lock acquire and release.
- *
- * Because the rcu_nodes form a tree, the tree traversal locking will observe
- * different lock values, this in turn means that an UNLOCK of one level
- * followed by a LOCK of another level does not imply a full memory barrier;
- * and most importantly transitivity is lost.
- *
- * In order to restore full ordering between tree levels, augment the regular
- * lock acquire functions with smp_mb__after_unlock_lock().
- *
- * As ->lock of struct rcu_node is a __private field, therefore one should use
- * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
- */
-static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
-{
-	raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
-	smp_mb__after_unlock_lock();
-}
-
-static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
-{
-	raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
-}
-
-static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
-{
-	raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
-	smp_mb__after_unlock_lock();
-}
-
-static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
-{
-	raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
-}
-
-#define raw_spin_lock_irqsave_rcu_node(rnp, flags)			\
-do {									\
-	typecheck(unsigned long, flags);				\
-	raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags);	\
-	smp_mb__after_unlock_lock();					\
-} while (0)
-
-#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags)			\
-do {									\
-	typecheck(unsigned long, flags);				\
-	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags);	\
-} while (0)
-
-static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
-{
-	bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
-
-	if (locked)
-		smp_mb__after_unlock_lock();
-	return locked;
-}
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index e513b4a..dd21ca4 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -147,7 +147,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
  *
  * Caller must hold the rcu_state's exp_mutex.
  */
-static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
+static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 {
 	return rnp->exp_tasks == NULL &&
 	       READ_ONCE(rnp->expmask) == 0;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index c9a4865..908b309 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -70,7 +70,7 @@ static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
 static void __init rcu_bootup_announce_oddness(void)
 {
 	if (IS_ENABLED(CONFIG_RCU_TRACE))
-		pr_info("\tRCU debugfs-based tracing is enabled.\n");
+		pr_info("\tRCU event tracing is enabled.\n");
 	if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
 	    (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
 		pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
@@ -90,8 +90,32 @@ static void __init rcu_bootup_announce_oddness(void)
 		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
 	if (nr_cpu_ids != NR_CPUS)
 		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
-	if (IS_ENABLED(CONFIG_RCU_BOOST))
-		pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
+#ifdef CONFIG_RCU_BOOST
+	pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
+#endif
+	if (blimit != DEFAULT_RCU_BLIMIT)
+		pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
+	if (qhimark != DEFAULT_RCU_QHIMARK)
+		pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
+	if (qlowmark != DEFAULT_RCU_QLOMARK)
+		pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
+	if (jiffies_till_first_fqs != ULONG_MAX)
+		pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
+	if (jiffies_till_next_fqs != ULONG_MAX)
+		pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
+	if (rcu_kick_kthreads)
+		pr_info("\tKick kthreads if too-long grace period.\n");
+	if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
+		pr_info("\tRCU callback double-/use-after-free debug enabled.\n");
+	if (gp_preinit_delay)
+		pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
+	if (gp_init_delay)
+		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
+	if (gp_cleanup_delay)
+		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
+	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
+		pr_info("\tRCU debug extended QS entry/exit.\n");
+	rcupdate_announce_bootup_oddness();
 }
 
 #ifdef CONFIG_PREEMPT_RCU
@@ -155,6 +179,8 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
 			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
 	struct task_struct *t = current;
 
+	lockdep_assert_held(&rnp->lock);
+
 	/*
 	 * Decide where to queue the newly blocked task.  In theory,
 	 * this could be an if-statement.  In practice, when I tried
@@ -263,6 +289,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
  */
 static void rcu_preempt_qs(void)
 {
+	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
 	if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
 		trace_rcu_grace_period(TPS("rcu_preempt"),
 				       __this_cpu_read(rcu_data_p->gpnum),
@@ -286,12 +313,14 @@ static void rcu_preempt_qs(void)
  *
  * Caller must disable interrupts.
  */
-static void rcu_preempt_note_context_switch(void)
+static void rcu_preempt_note_context_switch(bool preempt)
 {
 	struct task_struct *t = current;
 	struct rcu_data *rdp;
 	struct rcu_node *rnp;
 
+	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_preempt_note_context_switch() invoked with interrupts enabled!!!\n");
+	WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
 	if (t->rcu_read_lock_nesting > 0 &&
 	    !t->rcu_read_unlock_special.b.blocked) {
 
@@ -607,6 +636,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  */
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 {
+	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
 	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
 	if (rcu_preempt_has_tasks(rnp))
 		rnp->gp_tasks = rnp->blkd_tasks.next;
@@ -643,8 +673,37 @@ static void rcu_preempt_do_callbacks(void)
 
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
-/*
- * Queue a preemptible-RCU callback for invocation after a grace period.
+/**
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all pre-existing RCU read-side
+ * critical sections have completed.  However, the callback function
+ * might well execute concurrently with RCU read-side critical sections
+ * that started after call_rcu() was invoked.  RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ *
+ * Note that all CPUs must agree that the grace period extended beyond
+ * all pre-existing RCU read-side critical section.  On systems with more
+ * than one CPU, this means that when "func()" is invoked, each CPU is
+ * guaranteed to have executed a full memory barrier since the end of its
+ * last RCU read-side critical section whose beginning preceded the call
+ * to call_rcu().  It also means that each CPU executing an RCU read-side
+ * critical section that continues beyond the start of "func()" must have
+ * executed a memory barrier after the call_rcu() but before the beginning
+ * of that RCU read-side critical section.  Note that these guarantees
+ * include CPUs that are offline, idle, or executing in user mode, as
+ * well as CPUs that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * resulting RCU callback function "func()", then both CPU A and CPU B are
+ * guaranteed to execute a full memory barrier during the time interval
+ * between the call to call_rcu() and the invocation of "func()" -- even
+ * if CPU A and CPU B are the same CPU (but again only if the system has
+ * more than one CPU).
  */
 void call_rcu(struct rcu_head *head, rcu_callback_t func)
 {
@@ -663,8 +722,13 @@ EXPORT_SYMBOL_GPL(call_rcu);
  * synchronize_rcu() was waiting.  RCU read-side critical sections are
  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
  *
- * See the description of synchronize_sched() for more detailed information
- * on memory ordering guarantees.
+ * See the description of synchronize_sched() for more detailed
+ * information on memory-ordering guarantees.  However, please note
+ * that -only- the memory-ordering guarantees apply.  For example,
+ * synchronize_rcu() is -not- guaranteed to wait on things like code
+ * protected by preempt_disable(), instead, synchronize_rcu() is -only-
+ * guaranteed to wait on RCU read-side critical sections, that is, sections
+ * of code protected by rcu_read_lock().
  */
 void synchronize_rcu(void)
 {
@@ -738,7 +802,7 @@ static void __init rcu_bootup_announce(void)
  * Because preemptible RCU does not exist, we never have to check for
  * CPUs being in quiescent states.
  */
-static void rcu_preempt_note_context_switch(void)
+static void rcu_preempt_note_context_switch(bool preempt)
 {
 }
 
@@ -835,33 +899,6 @@ void exit_rcu(void)
 
 #include "../locking/rtmutex_common.h"
 
-#ifdef CONFIG_RCU_TRACE
-
-static void rcu_initiate_boost_trace(struct rcu_node *rnp)
-{
-	if (!rcu_preempt_has_tasks(rnp))
-		rnp->n_balk_blkd_tasks++;
-	else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
-		rnp->n_balk_exp_gp_tasks++;
-	else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
-		rnp->n_balk_boost_tasks++;
-	else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
-		rnp->n_balk_notblocked++;
-	else if (rnp->gp_tasks != NULL &&
-		 ULONG_CMP_LT(jiffies, rnp->boost_time))
-		rnp->n_balk_notyet++;
-	else
-		rnp->n_balk_nos++;
-}
-
-#else /* #ifdef CONFIG_RCU_TRACE */
-
-static void rcu_initiate_boost_trace(struct rcu_node *rnp)
-{
-}
-
-#endif /* #else #ifdef CONFIG_RCU_TRACE */
-
 static void rcu_wake_cond(struct task_struct *t, int status)
 {
 	/*
@@ -992,8 +1029,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 {
 	struct task_struct *t;
 
+	lockdep_assert_held(&rnp->lock);
 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
-		rnp->n_balk_exp_gp_tasks++;
 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 		return;
 	}
@@ -1009,7 +1046,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 		if (t)
 			rcu_wake_cond(t, rnp->boost_kthread_status);
 	} else {
-		rcu_initiate_boost_trace(rnp);
 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 	}
 }
@@ -1260,8 +1296,7 @@ static void rcu_prepare_kthreads(int cpu)
 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 {
 	*nextevt = KTIME_MAX;
-	return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
-	       ? 0 : rcu_cpu_has_callbacks(NULL);
+	return rcu_cpu_has_callbacks(NULL);
 }
 
 /*
@@ -1372,10 +1407,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 	unsigned long dj;
 
-	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)) {
-		*nextevt = KTIME_MAX;
-		return 0;
-	}
+	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_needs_cpu() invoked with irqs enabled!!!");
 
 	/* Snapshot to detect later posting of non-lazy callback. */
 	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
@@ -1424,8 +1456,8 @@ static void rcu_prepare_for_idle(void)
 	struct rcu_state *rsp;
 	int tne;
 
-	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
-	    rcu_is_nocb_cpu(smp_processor_id()))
+	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_prepare_for_idle() invoked with irqs enabled!!!");
+	if (rcu_is_nocb_cpu(smp_processor_id()))
 		return;
 
 	/* Handle nohz enablement switches conservatively. */
@@ -1479,8 +1511,8 @@ static void rcu_prepare_for_idle(void)
  */
 static void rcu_cleanup_after_idle(void)
 {
-	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
-	    rcu_is_nocb_cpu(smp_processor_id()))
+	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_cleanup_after_idle() invoked with irqs enabled!!!");
+	if (rcu_is_nocb_cpu(smp_processor_id()))
 		return;
 	if (rcu_try_advance_all_cbs())
 		invoke_rcu_core();
@@ -1747,7 +1779,6 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
 	init_swait_queue_head(&rnp->nocb_gp_wq[1]);
 }
 
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
 /* Is the specified CPU a no-CBs CPU? */
 bool rcu_is_nocb_cpu(int cpu)
 {
@@ -1755,7 +1786,6 @@ bool rcu_is_nocb_cpu(int cpu)
 		return cpumask_test_cpu(cpu, rcu_nocb_mask);
 	return false;
 }
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 
 /*
  * Kick the leader kthread for this NOCB group.
@@ -1769,6 +1799,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
 	if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
 		/* Prior smp_mb__after_atomic() orders against prior enqueue. */
 		WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
+		smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
 		swake_up(&rdp_leader->nocb_wq);
 	}
 }
@@ -1860,7 +1891,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
 					    TPS("WakeEmpty"));
 		} else {
-			WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE);
+			WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE);
 			/* Store ->nocb_defer_wakeup before ->rcu_urgent_qs. */
 			smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
@@ -1874,7 +1905,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
 					    TPS("WakeOvf"));
 		} else {
-			WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_FORCE);
+			WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_FORCE);
 			/* Store ->nocb_defer_wakeup before ->rcu_urgent_qs. */
 			smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
@@ -2023,6 +2054,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp)
 	 * nocb_gp_head, where they await a grace period.
 	 */
 	gotcbs = false;
+	smp_mb(); /* wakeup before ->nocb_head reads. */
 	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
 		rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
 		if (!rdp->nocb_gp_head)
@@ -2201,8 +2233,8 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
 	if (!rcu_nocb_need_deferred_wakeup(rdp))
 		return;
 	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
-	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_NOT);
-	wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
+	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+	wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE);
 	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
 }
 
@@ -2212,10 +2244,6 @@ void __init rcu_init_nohz(void)
 	bool need_rcu_nocb_mask = true;
 	struct rcu_state *rsp;
 
-#ifdef CONFIG_RCU_NOCB_CPU_NONE
-	need_rcu_nocb_mask = false;
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
-
 #if defined(CONFIG_NO_HZ_FULL)
 	if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
 		need_rcu_nocb_mask = true;
@@ -2231,14 +2259,6 @@ void __init rcu_init_nohz(void)
 	if (!have_rcu_nocb_mask)
 		return;
 
-#ifdef CONFIG_RCU_NOCB_CPU_ZERO
-	pr_info("\tOffload RCU callbacks from CPU 0\n");
-	cpumask_set_cpu(0, rcu_nocb_mask);
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
-#ifdef CONFIG_RCU_NOCB_CPU_ALL
-	pr_info("\tOffload RCU callbacks from all CPUs\n");
-	cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
 #if defined(CONFIG_NO_HZ_FULL)
 	if (tick_nohz_full_running)
 		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
@@ -2491,421 +2511,6 @@ static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
 #endif /* #ifdef CONFIG_NO_HZ_FULL */
 }
 
-
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-
-static int full_sysidle_state;		/* Current system-idle state. */
-#define RCU_SYSIDLE_NOT		0	/* Some CPU is not idle. */
-#define RCU_SYSIDLE_SHORT	1	/* All CPUs idle for brief period. */
-#define RCU_SYSIDLE_LONG	2	/* All CPUs idle for long enough. */
-#define RCU_SYSIDLE_FULL	3	/* All CPUs idle, ready for sysidle. */
-#define RCU_SYSIDLE_FULL_NOTED	4	/* Actually entered sysidle state. */
-
-/*
- * Invoked to note exit from irq or task transition to idle.  Note that
- * usermode execution does -not- count as idle here!  After all, we want
- * to detect full-system idle states, not RCU quiescent states and grace
- * periods.  The caller must have disabled interrupts.
- */
-static void rcu_sysidle_enter(int irq)
-{
-	unsigned long j;
-	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
-
-	/* If there are no nohz_full= CPUs, no need to track this. */
-	if (!tick_nohz_full_enabled())
-		return;
-
-	/* Adjust nesting, check for fully idle. */
-	if (irq) {
-		rdtp->dynticks_idle_nesting--;
-		WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
-		if (rdtp->dynticks_idle_nesting != 0)
-			return;  /* Still not fully idle. */
-	} else {
-		if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
-		    DYNTICK_TASK_NEST_VALUE) {
-			rdtp->dynticks_idle_nesting = 0;
-		} else {
-			rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
-			WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
-			return;  /* Still not fully idle. */
-		}
-	}
-
-	/* Record start of fully idle period. */
-	j = jiffies;
-	WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
-	smp_mb__before_atomic();
-	atomic_inc(&rdtp->dynticks_idle);
-	smp_mb__after_atomic();
-	WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
-}
-
-/*
- * Unconditionally force exit from full system-idle state.  This is
- * invoked when a normal CPU exits idle, but must be called separately
- * for the timekeeping CPU (tick_do_timer_cpu).  The reason for this
- * is that the timekeeping CPU is permitted to take scheduling-clock
- * interrupts while the system is in system-idle state, and of course
- * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
- * interrupt from any other type of interrupt.
- */
-void rcu_sysidle_force_exit(void)
-{
-	int oldstate = READ_ONCE(full_sysidle_state);
-	int newoldstate;
-
-	/*
-	 * Each pass through the following loop attempts to exit full
-	 * system-idle state.  If contention proves to be a problem,
-	 * a trylock-based contention tree could be used here.
-	 */
-	while (oldstate > RCU_SYSIDLE_SHORT) {
-		newoldstate = cmpxchg(&full_sysidle_state,
-				      oldstate, RCU_SYSIDLE_NOT);
-		if (oldstate == newoldstate &&
-		    oldstate == RCU_SYSIDLE_FULL_NOTED) {
-			rcu_kick_nohz_cpu(tick_do_timer_cpu);
-			return; /* We cleared it, done! */
-		}
-		oldstate = newoldstate;
-	}
-	smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
-}
-
-/*
- * Invoked to note entry to irq or task transition from idle.  Note that
- * usermode execution does -not- count as idle here!  The caller must
- * have disabled interrupts.
- */
-static void rcu_sysidle_exit(int irq)
-{
-	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
-
-	/* If there are no nohz_full= CPUs, no need to track this. */
-	if (!tick_nohz_full_enabled())
-		return;
-
-	/* Adjust nesting, check for already non-idle. */
-	if (irq) {
-		rdtp->dynticks_idle_nesting++;
-		WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
-		if (rdtp->dynticks_idle_nesting != 1)
-			return; /* Already non-idle. */
-	} else {
-		/*
-		 * Allow for irq misnesting.  Yes, it really is possible
-		 * to enter an irq handler then never leave it, and maybe
-		 * also vice versa.  Handle both possibilities.
-		 */
-		if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
-			rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
-			WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
-			return; /* Already non-idle. */
-		} else {
-			rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
-		}
-	}
-
-	/* Record end of idle period. */
-	smp_mb__before_atomic();
-	atomic_inc(&rdtp->dynticks_idle);
-	smp_mb__after_atomic();
-	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
-
-	/*
-	 * If we are the timekeeping CPU, we are permitted to be non-idle
-	 * during a system-idle state.  This must be the case, because
-	 * the timekeeping CPU has to take scheduling-clock interrupts
-	 * during the time that the system is transitioning to full
-	 * system-idle state.  This means that the timekeeping CPU must
-	 * invoke rcu_sysidle_force_exit() directly if it does anything
-	 * more than take a scheduling-clock interrupt.
-	 */
-	if (smp_processor_id() == tick_do_timer_cpu)
-		return;
-
-	/* Update system-idle state: We are clearly no longer fully idle! */
-	rcu_sysidle_force_exit();
-}
-
-/*
- * Check to see if the current CPU is idle.  Note that usermode execution
- * does not count as idle.  The caller must have disabled interrupts,
- * and must be running on tick_do_timer_cpu.
- */
-static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
-				  unsigned long *maxj)
-{
-	int cur;
-	unsigned long j;
-	struct rcu_dynticks *rdtp = rdp->dynticks;
-
-	/* If there are no nohz_full= CPUs, don't check system-wide idleness. */
-	if (!tick_nohz_full_enabled())
-		return;
-
-	/*
-	 * If some other CPU has already reported non-idle, if this is
-	 * not the flavor of RCU that tracks sysidle state, or if this
-	 * is an offline or the timekeeping CPU, nothing to do.
-	 */
-	if (!*isidle || rdp->rsp != rcu_state_p ||
-	    cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
-		return;
-	/* Verify affinity of current kthread. */
-	WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
-
-	/* Pick up current idle and NMI-nesting counter and check. */
-	cur = atomic_read(&rdtp->dynticks_idle);
-	if (cur & 0x1) {
-		*isidle = false; /* We are not idle! */
-		return;
-	}
-	smp_mb(); /* Read counters before timestamps. */
-
-	/* Pick up timestamps. */
-	j = READ_ONCE(rdtp->dynticks_idle_jiffies);
-	/* If this CPU entered idle more recently, update maxj timestamp. */
-	if (ULONG_CMP_LT(*maxj, j))
-		*maxj = j;
-}
-
-/*
- * Is this the flavor of RCU that is handling full-system idle?
- */
-static bool is_sysidle_rcu_state(struct rcu_state *rsp)
-{
-	return rsp == rcu_state_p;
-}
-
-/*
- * Return a delay in jiffies based on the number of CPUs, rcu_node
- * leaf fanout, and jiffies tick rate.  The idea is to allow larger
- * systems more time to transition to full-idle state in order to
- * avoid the cache thrashing that otherwise occur on the state variable.
- * Really small systems (less than a couple of tens of CPUs) should
- * instead use a single global atomically incremented counter, and later
- * versions of this will automatically reconfigure themselves accordingly.
- */
-static unsigned long rcu_sysidle_delay(void)
-{
-	if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
-		return 0;
-	return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
-}
-
-/*
- * Advance the full-system-idle state.  This is invoked when all of
- * the non-timekeeping CPUs are idle.
- */
-static void rcu_sysidle(unsigned long j)
-{
-	/* Check the current state. */
-	switch (READ_ONCE(full_sysidle_state)) {
-	case RCU_SYSIDLE_NOT:
-
-		/* First time all are idle, so note a short idle period. */
-		WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_SHORT);
-		break;
-
-	case RCU_SYSIDLE_SHORT:
-
-		/*
-		 * Idle for a bit, time to advance to next state?
-		 * cmpxchg failure means race with non-idle, let them win.
-		 */
-		if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
-			(void)cmpxchg(&full_sysidle_state,
-				      RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
-		break;
-
-	case RCU_SYSIDLE_LONG:
-
-		/*
-		 * Do an additional check pass before advancing to full.
-		 * cmpxchg failure means race with non-idle, let them win.
-		 */
-		if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
-			(void)cmpxchg(&full_sysidle_state,
-				      RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
-		break;
-
-	default:
-		break;
-	}
-}
-
-/*
- * Found a non-idle non-timekeeping CPU, so kick the system-idle state
- * back to the beginning.
- */
-static void rcu_sysidle_cancel(void)
-{
-	smp_mb();
-	if (full_sysidle_state > RCU_SYSIDLE_SHORT)
-		WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_NOT);
-}
-
-/*
- * Update the sysidle state based on the results of a force-quiescent-state
- * scan of the CPUs' dyntick-idle state.
- */
-static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
-			       unsigned long maxj, bool gpkt)
-{
-	if (rsp != rcu_state_p)
-		return;  /* Wrong flavor, ignore. */
-	if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
-		return;  /* Running state machine from timekeeping CPU. */
-	if (isidle)
-		rcu_sysidle(maxj);    /* More idle! */
-	else
-		rcu_sysidle_cancel(); /* Idle is over. */
-}
-
-/*
- * Wrapper for rcu_sysidle_report() when called from the grace-period
- * kthread's context.
- */
-static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
-				  unsigned long maxj)
-{
-	/* If there are no nohz_full= CPUs, no need to track this. */
-	if (!tick_nohz_full_enabled())
-		return;
-
-	rcu_sysidle_report(rsp, isidle, maxj, true);
-}
-
-/* Callback and function for forcing an RCU grace period. */
-struct rcu_sysidle_head {
-	struct rcu_head rh;
-	int inuse;
-};
-
-static void rcu_sysidle_cb(struct rcu_head *rhp)
-{
-	struct rcu_sysidle_head *rshp;
-
-	/*
-	 * The following memory barrier is needed to replace the
-	 * memory barriers that would normally be in the memory
-	 * allocator.
-	 */
-	smp_mb();  /* grace period precedes setting inuse. */
-
-	rshp = container_of(rhp, struct rcu_sysidle_head, rh);
-	WRITE_ONCE(rshp->inuse, 0);
-}
-
-/*
- * Check to see if the system is fully idle, other than the timekeeping CPU.
- * The caller must have disabled interrupts.  This is not intended to be
- * called unless tick_nohz_full_enabled().
- */
-bool rcu_sys_is_idle(void)
-{
-	static struct rcu_sysidle_head rsh;
-	int rss = READ_ONCE(full_sysidle_state);
-
-	if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
-		return false;
-
-	/* Handle small-system case by doing a full scan of CPUs. */
-	if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
-		int oldrss = rss - 1;
-
-		/*
-		 * One pass to advance to each state up to _FULL.
-		 * Give up if any pass fails to advance the state.
-		 */
-		while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
-			int cpu;
-			bool isidle = true;
-			unsigned long maxj = jiffies - ULONG_MAX / 4;
-			struct rcu_data *rdp;
-
-			/* Scan all the CPUs looking for nonidle CPUs. */
-			for_each_possible_cpu(cpu) {
-				rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
-				rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
-				if (!isidle)
-					break;
-			}
-			rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
-			oldrss = rss;
-			rss = READ_ONCE(full_sysidle_state);
-		}
-	}
-
-	/* If this is the first observation of an idle period, record it. */
-	if (rss == RCU_SYSIDLE_FULL) {
-		rss = cmpxchg(&full_sysidle_state,
-			      RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
-		return rss == RCU_SYSIDLE_FULL;
-	}
-
-	smp_mb(); /* ensure rss load happens before later caller actions. */
-
-	/* If already fully idle, tell the caller (in case of races). */
-	if (rss == RCU_SYSIDLE_FULL_NOTED)
-		return true;
-
-	/*
-	 * If we aren't there yet, and a grace period is not in flight,
-	 * initiate a grace period.  Either way, tell the caller that
-	 * we are not there yet.  We use an xchg() rather than an assignment
-	 * to make up for the memory barriers that would otherwise be
-	 * provided by the memory allocator.
-	 */
-	if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
-	    !rcu_gp_in_progress(rcu_state_p) &&
-	    !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
-		call_rcu(&rsh.rh, rcu_sysidle_cb);
-	return false;
-}
-
-/*
- * Initialize dynticks sysidle state for CPUs coming online.
- */
-static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
-{
-	rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
-}
-
-#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-
-static void rcu_sysidle_enter(int irq)
-{
-}
-
-static void rcu_sysidle_exit(int irq)
-{
-}
-
-static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
-				  unsigned long *maxj)
-{
-}
-
-static bool is_sysidle_rcu_state(struct rcu_state *rsp)
-{
-	return false;
-}
-
-static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
-				  unsigned long maxj)
-{
-}
-
-static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
-{
-}
-
-#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-
 /*
  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
  * grace-period kthread will do force_quiescent_state() processing?
@@ -2936,13 +2541,7 @@ static void rcu_bind_gp_kthread(void)
 
 	if (!tick_nohz_full_enabled())
 		return;
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-	cpu = tick_do_timer_cpu;
-	if (cpu >= 0 && cpu < nr_cpu_ids)
-		set_cpus_allowed_ptr(current, cpumask_of(cpu));
-#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 	housekeeping_affine(current);
-#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 }
 
 /* Record the current task on dyntick-idle entry. */
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
deleted file mode 100644
index 6cea17a..0000000
--- a/kernel/rcu/tree_trace.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * Read-Copy Update tracing for hierarchical implementation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * Copyright IBM Corporation, 2008
- * Author: Paul E. McKenney
- *
- * Papers:  http://www.rdrop.com/users/paulmck/RCU
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- *		Documentation/RCU
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/rcupdate.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/atomic.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/mutex.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/prefetch.h>
-
-#define RCU_TREE_NONCORE
-#include "tree.h"
-#include "rcu.h"
-
-static int r_open(struct inode *inode, struct file *file,
-					const struct seq_operations *op)
-{
-	int ret = seq_open(file, op);
-	if (!ret) {
-		struct seq_file *m = (struct seq_file *)file->private_data;
-		m->private = inode->i_private;
-	}
-	return ret;
-}
-
-static void *r_start(struct seq_file *m, loff_t *pos)
-{
-	struct rcu_state *rsp = (struct rcu_state *)m->private;
-	*pos = cpumask_next(*pos - 1, cpu_possible_mask);
-	if ((*pos) < nr_cpu_ids)
-		return per_cpu_ptr(rsp->rda, *pos);
-	return NULL;
-}
-
-static void *r_next(struct seq_file *m, void *v, loff_t *pos)
-{
-	(*pos)++;
-	return r_start(m, pos);
-}
-
-static void r_stop(struct seq_file *m, void *v)
-{
-}
-
-static int show_rcubarrier(struct seq_file *m, void *v)
-{
-	struct rcu_state *rsp = (struct rcu_state *)m->private;
-	seq_printf(m, "bcc: %d bseq: %lu\n",
-		   atomic_read(&rsp->barrier_cpu_count),
-		   rsp->barrier_sequence);
-	return 0;
-}
-
-static int rcubarrier_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, show_rcubarrier, inode->i_private);
-}
-
-static const struct file_operations rcubarrier_fops = {
-	.owner = THIS_MODULE,
-	.open = rcubarrier_open,
-	.read = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-};
-
-#ifdef CONFIG_RCU_BOOST
-
-static char convert_kthread_status(unsigned int kthread_status)
-{
-	if (kthread_status > RCU_KTHREAD_MAX)
-		return '?';
-	return "SRWOY"[kthread_status];
-}
-
-#endif /* #ifdef CONFIG_RCU_BOOST */
-
-static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
-{
-	long ql, qll;
-
-	if (!rdp->beenonline)
-		return;
-	seq_printf(m, "%3d%cc=%ld g=%ld cnq=%d/%d:%d",
-		   rdp->cpu,
-		   cpu_is_offline(rdp->cpu) ? '!' : ' ',
-		   ulong2long(rdp->completed), ulong2long(rdp->gpnum),
-		   rdp->cpu_no_qs.b.norm,
-		   rdp->rcu_qs_ctr_snap == per_cpu(rdp->dynticks->rcu_qs_ctr, rdp->cpu),
-		   rdp->core_needs_qs);
-	seq_printf(m, " dt=%d/%llx/%d df=%lu",
-		   rcu_dynticks_snap(rdp->dynticks),
-		   rdp->dynticks->dynticks_nesting,
-		   rdp->dynticks->dynticks_nmi_nesting,
-		   rdp->dynticks_fqs);
-	seq_printf(m, " of=%lu", rdp->offline_fqs);
-	rcu_nocb_q_lengths(rdp, &ql, &qll);
-	qll += rcu_segcblist_n_lazy_cbs(&rdp->cblist);
-	ql += rcu_segcblist_n_cbs(&rdp->cblist);
-	seq_printf(m, " ql=%ld/%ld qs=%c%c%c%c",
-		   qll, ql,
-		   ".N"[!rcu_segcblist_segempty(&rdp->cblist, RCU_NEXT_TAIL)],
-		   ".R"[!rcu_segcblist_segempty(&rdp->cblist,
-						RCU_NEXT_READY_TAIL)],
-		   ".W"[!rcu_segcblist_segempty(&rdp->cblist, RCU_WAIT_TAIL)],
-		   ".D"[!rcu_segcblist_segempty(&rdp->cblist, RCU_DONE_TAIL)]);
-#ifdef CONFIG_RCU_BOOST
-	seq_printf(m, " kt=%d/%c ktl=%x",
-		   per_cpu(rcu_cpu_has_work, rdp->cpu),
-		   convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
-					  rdp->cpu)),
-		   per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
-#endif /* #ifdef CONFIG_RCU_BOOST */
-	seq_printf(m, " b=%ld", rdp->blimit);
-	seq_printf(m, " ci=%lu nci=%lu co=%lu ca=%lu\n",
-		   rdp->n_cbs_invoked, rdp->n_nocbs_invoked,
-		   rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
-}
-
-static int show_rcudata(struct seq_file *m, void *v)
-{
-	print_one_rcu_data(m, (struct rcu_data *)v);
-	return 0;
-}
-
-static const struct seq_operations rcudate_op = {
-	.start = r_start,
-	.next  = r_next,
-	.stop  = r_stop,
-	.show  = show_rcudata,
-};
-
-static int rcudata_open(struct inode *inode, struct file *file)
-{
-	return r_open(inode, file, &rcudate_op);
-}
-
-static const struct file_operations rcudata_fops = {
-	.owner = THIS_MODULE,
-	.open = rcudata_open,
-	.read = seq_read,
-	.llseek = no_llseek,
-	.release = seq_release,
-};
-
-static int show_rcuexp(struct seq_file *m, void *v)
-{
-	int cpu;
-	struct rcu_state *rsp = (struct rcu_state *)m->private;
-	struct rcu_data *rdp;
-	unsigned long s0 = 0, s1 = 0, s2 = 0, s3 = 0;
-
-	for_each_possible_cpu(cpu) {
-		rdp = per_cpu_ptr(rsp->rda, cpu);
-		s0 += atomic_long_read(&rdp->exp_workdone0);
-		s1 += atomic_long_read(&rdp->exp_workdone1);
-		s2 += atomic_long_read(&rdp->exp_workdone2);
-		s3 += atomic_long_read(&rdp->exp_workdone3);
-	}
-	seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu enq=%d sc=%lu\n",
-		   rsp->expedited_sequence, s0, s1, s2, s3,
-		   atomic_read(&rsp->expedited_need_qs),
-		   rsp->expedited_sequence / 2);
-	return 0;
-}
-
-static int rcuexp_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, show_rcuexp, inode->i_private);
-}
-
-static const struct file_operations rcuexp_fops = {
-	.owner = THIS_MODULE,
-	.open = rcuexp_open,
-	.read = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-};
-
-#ifdef CONFIG_RCU_BOOST
-
-static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp)
-{
-	seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu ",
-		   rnp->grplo, rnp->grphi,
-		   "T."[list_empty(&rnp->blkd_tasks)],
-		   "N."[!rnp->gp_tasks],
-		   "E."[!rnp->exp_tasks],
-		   "B."[!rnp->boost_tasks],
-		   convert_kthread_status(rnp->boost_kthread_status),
-		   rnp->n_tasks_boosted, rnp->n_exp_boosts,
-		   rnp->n_normal_boosts);
-	seq_printf(m, "j=%04x bt=%04x\n",
-		   (int)(jiffies & 0xffff),
-		   (int)(rnp->boost_time & 0xffff));
-	seq_printf(m, "    balk: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n",
-		   rnp->n_balk_blkd_tasks,
-		   rnp->n_balk_exp_gp_tasks,
-		   rnp->n_balk_boost_tasks,
-		   rnp->n_balk_notblocked,
-		   rnp->n_balk_notyet,
-		   rnp->n_balk_nos);
-}
-
-static int show_rcu_node_boost(struct seq_file *m, void *unused)
-{
-	struct rcu_node *rnp;
-
-	rcu_for_each_leaf_node(&rcu_preempt_state, rnp)
-		print_one_rcu_node_boost(m, rnp);
-	return 0;
-}
-
-static int rcu_node_boost_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, show_rcu_node_boost, NULL);
-}
-
-static const struct file_operations rcu_node_boost_fops = {
-	.owner = THIS_MODULE,
-	.open = rcu_node_boost_open,
-	.read = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-};
-
-#endif /* #ifdef CONFIG_RCU_BOOST */
-
-static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
-{
-	unsigned long gpnum;
-	int level = 0;
-	struct rcu_node *rnp;
-
-	gpnum = rsp->gpnum;
-	seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x ",
-		   ulong2long(rsp->completed), ulong2long(gpnum),
-		   rsp->gp_state,
-		   (long)(rsp->jiffies_force_qs - jiffies),
-		   (int)(jiffies & 0xffff));
-	seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
-		   rsp->n_force_qs, rsp->n_force_qs_ngp,
-		   rsp->n_force_qs - rsp->n_force_qs_ngp,
-		   READ_ONCE(rsp->n_force_qs_lh),
-		   rsp->orphan_done.len_lazy,
-		   rsp->orphan_done.len);
-	for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
-		if (rnp->level != level) {
-			seq_puts(m, "\n");
-			level = rnp->level;
-		}
-		seq_printf(m, "%lx/%lx->%lx %c%c>%c %d:%d ^%d    ",
-			   rnp->qsmask, rnp->qsmaskinit, rnp->qsmaskinitnext,
-			   ".G"[rnp->gp_tasks != NULL],
-			   ".E"[rnp->exp_tasks != NULL],
-			   ".T"[!list_empty(&rnp->blkd_tasks)],
-			   rnp->grplo, rnp->grphi, rnp->grpnum);
-	}
-	seq_puts(m, "\n");
-}
-
-static int show_rcuhier(struct seq_file *m, void *v)
-{
-	struct rcu_state *rsp = (struct rcu_state *)m->private;
-	print_one_rcu_state(m, rsp);
-	return 0;
-}
-
-static int rcuhier_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, show_rcuhier, inode->i_private);
-}
-
-static const struct file_operations rcuhier_fops = {
-	.owner = THIS_MODULE,
-	.open = rcuhier_open,
-	.read = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-};
-
-static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
-{
-	unsigned long flags;
-	unsigned long completed;
-	unsigned long gpnum;
-	unsigned long gpage;
-	unsigned long gpmax;
-	struct rcu_node *rnp = &rsp->node[0];
-
-	raw_spin_lock_irqsave_rcu_node(rnp, flags);
-	completed = READ_ONCE(rsp->completed);
-	gpnum = READ_ONCE(rsp->gpnum);
-	if (completed == gpnum)
-		gpage = 0;
-	else
-		gpage = jiffies - rsp->gp_start;
-	gpmax = rsp->gp_max;
-	raw_spin_unlock_irqrestore(&rnp->lock, flags);
-	seq_printf(m, "completed=%ld  gpnum=%ld  age=%ld  max=%ld\n",
-		   ulong2long(completed), ulong2long(gpnum), gpage, gpmax);
-}
-
-static int show_rcugp(struct seq_file *m, void *v)
-{
-	struct rcu_state *rsp = (struct rcu_state *)m->private;
-	show_one_rcugp(m, rsp);
-	return 0;
-}
-
-static int rcugp_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, show_rcugp, inode->i_private);
-}
-
-static const struct file_operations rcugp_fops = {
-	.owner = THIS_MODULE,
-	.open = rcugp_open,
-	.read = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-};
-
-static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
-{
-	if (!rdp->beenonline)
-		return;
-	seq_printf(m, "%3d%cnp=%ld ",
-		   rdp->cpu,
-		   cpu_is_offline(rdp->cpu) ? '!' : ' ',
-		   rdp->n_rcu_pending);
-	seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ",
-		   rdp->n_rp_core_needs_qs,
-		   rdp->n_rp_report_qs,
-		   rdp->n_rp_cb_ready,
-		   rdp->n_rp_cpu_needs_gp);
-	seq_printf(m, "gpc=%ld gps=%ld nn=%ld ndw%ld\n",
-		   rdp->n_rp_gp_completed,
-		   rdp->n_rp_gp_started,
-		   rdp->n_rp_nocb_defer_wakeup,
-		   rdp->n_rp_need_nothing);
-}
-
-static int show_rcu_pending(struct seq_file *m, void *v)
-{
-	print_one_rcu_pending(m, (struct rcu_data *)v);
-	return 0;
-}
-
-static const struct seq_operations rcu_pending_op = {
-	.start = r_start,
-	.next  = r_next,
-	.stop  = r_stop,
-	.show  = show_rcu_pending,
-};
-
-static int rcu_pending_open(struct inode *inode, struct file *file)
-{
-	return r_open(inode, file, &rcu_pending_op);
-}
-
-static const struct file_operations rcu_pending_fops = {
-	.owner = THIS_MODULE,
-	.open = rcu_pending_open,
-	.read = seq_read,
-	.llseek = no_llseek,
-	.release = seq_release,
-};
-
-static int show_rcutorture(struct seq_file *m, void *unused)
-{
-	seq_printf(m, "rcutorture test sequence: %lu %s\n",
-		   rcutorture_testseq >> 1,
-		   (rcutorture_testseq & 0x1) ? "(test in progress)" : "");
-	seq_printf(m, "rcutorture update version number: %lu\n",
-		   rcutorture_vernum);
-	return 0;
-}
-
-static int rcutorture_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, show_rcutorture, NULL);
-}
-
-static const struct file_operations rcutorture_fops = {
-	.owner = THIS_MODULE,
-	.open = rcutorture_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static struct dentry *rcudir;
-
-static int __init rcutree_trace_init(void)
-{
-	struct rcu_state *rsp;
-	struct dentry *retval;
-	struct dentry *rspdir;
-
-	rcudir = debugfs_create_dir("rcu", NULL);
-	if (!rcudir)
-		goto free_out;
-
-	for_each_rcu_flavor(rsp) {
-		rspdir = debugfs_create_dir(rsp->name, rcudir);
-		if (!rspdir)
-			goto free_out;
-
-		retval = debugfs_create_file("rcudata", 0444,
-				rspdir, rsp, &rcudata_fops);
-		if (!retval)
-			goto free_out;
-
-		retval = debugfs_create_file("rcuexp", 0444,
-				rspdir, rsp, &rcuexp_fops);
-		if (!retval)
-			goto free_out;
-
-		retval = debugfs_create_file("rcu_pending", 0444,
-				rspdir, rsp, &rcu_pending_fops);
-		if (!retval)
-			goto free_out;
-
-		retval = debugfs_create_file("rcubarrier", 0444,
-				rspdir, rsp, &rcubarrier_fops);
-		if (!retval)
-			goto free_out;
-
-#ifdef CONFIG_RCU_BOOST
-		if (rsp == &rcu_preempt_state) {
-			retval = debugfs_create_file("rcuboost", 0444,
-				rspdir, NULL, &rcu_node_boost_fops);
-			if (!retval)
-				goto free_out;
-		}
-#endif
-
-		retval = debugfs_create_file("rcugp", 0444,
-				rspdir, rsp, &rcugp_fops);
-		if (!retval)
-			goto free_out;
-
-		retval = debugfs_create_file("rcuhier", 0444,
-				rspdir, rsp, &rcuhier_fops);
-		if (!retval)
-			goto free_out;
-	}
-
-	retval = debugfs_create_file("rcutorture", 0444, rcudir,
-						NULL, &rcutorture_fops);
-	if (!retval)
-		goto free_out;
-	return 0;
-free_out:
-	debugfs_remove_recursive(rcudir);
-	return 1;
-}
-device_initcall(rcutree_trace_init);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 273e869..00e77c4 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -62,7 +62,9 @@
 #define MODULE_PARAM_PREFIX "rcupdate."
 
 #ifndef CONFIG_TINY_RCU
+extern int rcu_expedited; /* from sysctl */
 module_param(rcu_expedited, int, 0);
+extern int rcu_normal; /* from sysctl */
 module_param(rcu_normal, int, 0);
 static int rcu_normal_after_boot;
 module_param(rcu_normal_after_boot, int, 0);
@@ -379,6 +381,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
 		   struct rcu_synchronize *rs_array)
 {
 	int i;
+	int j;
 
 	/* Initialize and register callbacks for each flavor specified. */
 	for (i = 0; i < n; i++) {
@@ -390,7 +393,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
 		}
 		init_rcu_head_on_stack(&rs_array[i].head);
 		init_completion(&rs_array[i].completion);
-		(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
+		for (j = 0; j < i; j++)
+			if (crcu_array[j] == crcu_array[i])
+				break;
+		if (j == i)
+			(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
 	}
 
 	/* Wait for all callbacks to be invoked. */
@@ -399,7 +406,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
 		    (crcu_array[i] == call_rcu ||
 		     crcu_array[i] == call_rcu_bh))
 			continue;
-		wait_for_completion(&rs_array[i].completion);
+		for (j = 0; j < i; j++)
+			if (crcu_array[j] == crcu_array[i])
+				break;
+		if (j == i)
+			wait_for_completion(&rs_array[i].completion);
 		destroy_rcu_head_on_stack(&rs_array[i].head);
 	}
 }
@@ -560,15 +571,30 @@ static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
 DEFINE_SRCU(tasks_rcu_exit_srcu);
 
 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
-static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
+#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
+static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
 module_param(rcu_task_stall_timeout, int, 0644);
 
 static void rcu_spawn_tasks_kthread(void);
 static struct task_struct *rcu_tasks_kthread_ptr;
 
-/*
- * Post an RCU-tasks callback.  First call must be from process context
- * after the scheduler if fully operational.
+/**
+ * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
+ * @rhp: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_tasks() assumes
+ * that the read-side critical sections end at a voluntary context
+ * switch (not a preemption!), entry into idle, or transition to usermode
+ * execution.  As such, there are no read-side primitives analogous to
+ * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
+ * to determine that all tasks have passed through a safe state, not so
+ * much for data-strcuture synchronization.
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
  */
 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
 {
@@ -851,6 +877,23 @@ static void rcu_spawn_tasks_kthread(void)
 
 #endif /* #ifdef CONFIG_TASKS_RCU */
 
+#ifndef CONFIG_TINY_RCU
+
+/*
+ * Print any non-default Tasks RCU settings.
+ */
+static void __init rcu_tasks_bootup_oddness(void)
+{
+#ifdef CONFIG_TASKS_RCU
+	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
+		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
+	else
+		pr_info("\tTasks RCU enabled.\n");
+#endif /* #ifdef CONFIG_TASKS_RCU */
+}
+
+#endif /* #ifndef CONFIG_TINY_RCU */
+
 #ifdef CONFIG_PROVE_RCU
 
 /*
@@ -935,3 +978,25 @@ late_initcall(rcu_verify_early_boot_tests);
 #else
 void rcu_early_boot_tests(void) {}
 #endif /* CONFIG_PROVE_RCU */
+
+#ifndef CONFIG_TINY_RCU
+
+/*
+ * Print any significant non-default boot-time settings.
+ */
+void __init rcupdate_announce_bootup_oddness(void)
+{
+	if (rcu_normal)
+		pr_info("\tNo expedited grace period (rcu_normal).\n");
+	else if (rcu_normal_after_boot)
+		pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
+	else if (rcu_expedited)
+		pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
+	if (rcu_cpu_stall_suppress)
+		pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
+	if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
+		pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
+	rcu_tasks_bootup_oddness();
+}
+
+#endif /* #ifndef CONFIG_TINY_RCU */
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 89ab675..53f0164 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -16,9 +16,9 @@
 endif
 
 obj-y += core.o loadavg.o clock.o cputime.o
-obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o swait.o completion.o idle.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o
+obj-y += idle_task.o fair.o rt.o deadline.o
+obj-y += wait.o wait_bit.o swait.o completion.o idle.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 00a45c4..ca0f8fc 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -64,6 +64,7 @@
 #include <linux/workqueue.h>
 #include <linux/compiler.h>
 #include <linux/tick.h>
+#include <linux/init.h>
 
 /*
  * Scheduler clock - returns current time in nanosec units.
@@ -124,14 +125,27 @@ int sched_clock_stable(void)
 	return static_branch_likely(&__sched_clock_stable);
 }
 
+static void __scd_stamp(struct sched_clock_data *scd)
+{
+	scd->tick_gtod = ktime_get_ns();
+	scd->tick_raw = sched_clock();
+}
+
 static void __set_sched_clock_stable(void)
 {
-	struct sched_clock_data *scd = this_scd();
+	struct sched_clock_data *scd;
 
 	/*
+	 * Since we're still unstable and the tick is already running, we have
+	 * to disable IRQs in order to get a consistent scd->tick* reading.
+	 */
+	local_irq_disable();
+	scd = this_scd();
+	/*
 	 * Attempt to make the (initial) unstable->stable transition continuous.
 	 */
 	__sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
+	local_irq_enable();
 
 	printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
 			scd->tick_gtod, __gtod_offset,
@@ -141,8 +155,38 @@ static void __set_sched_clock_stable(void)
 	tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
 }
 
+/*
+ * If we ever get here, we're screwed, because we found out -- typically after
+ * the fact -- that TSC wasn't good. This means all our clocksources (including
+ * ktime) could have reported wrong values.
+ *
+ * What we do here is an attempt to fix up and continue sort of where we left
+ * off in a coherent manner.
+ *
+ * The only way to fully avoid random clock jumps is to boot with:
+ * "tsc=unstable".
+ */
 static void __sched_clock_work(struct work_struct *work)
 {
+	struct sched_clock_data *scd;
+	int cpu;
+
+	/* take a current timestamp and set 'now' */
+	preempt_disable();
+	scd = this_scd();
+	__scd_stamp(scd);
+	scd->clock = scd->tick_gtod + __gtod_offset;
+	preempt_enable();
+
+	/* clone to all CPUs */
+	for_each_possible_cpu(cpu)
+		per_cpu(sched_clock_data, cpu) = *scd;
+
+	printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'.\n");
+	printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
+			scd->tick_gtod, __gtod_offset,
+			scd->tick_raw,  __sched_clock_offset);
+
 	static_branch_disable(&__sched_clock_stable);
 }
 
@@ -150,27 +194,11 @@ static DECLARE_WORK(sched_clock_work, __sched_clock_work);
 
 static void __clear_sched_clock_stable(void)
 {
-	struct sched_clock_data *scd = this_scd();
-
-	/*
-	 * Attempt to make the stable->unstable transition continuous.
-	 *
-	 * Trouble is, this is typically called from the TSC watchdog
-	 * timer, which is late per definition. This means the tick
-	 * values can already be screwy.
-	 *
-	 * Still do what we can.
-	 */
-	__gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod);
-
-	printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
-			scd->tick_gtod, __gtod_offset,
-			scd->tick_raw,  __sched_clock_offset);
+	if (!sched_clock_stable())
+		return;
 
 	tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
-
-	if (sched_clock_stable())
-		schedule_work(&sched_clock_work);
+	schedule_work(&sched_clock_work);
 }
 
 void clear_sched_clock_stable(void)
@@ -183,7 +211,11 @@ void clear_sched_clock_stable(void)
 		__clear_sched_clock_stable();
 }
 
-void sched_clock_init_late(void)
+/*
+ * We run this as late_initcall() such that it runs after all built-in drivers,
+ * notably: acpi_processor and intel_idle, which can mark the TSC as unstable.
+ */
+static int __init sched_clock_init_late(void)
 {
 	sched_clock_running = 2;
 	/*
@@ -197,7 +229,10 @@ void sched_clock_init_late(void)
 
 	if (__sched_clock_stable_early)
 		__set_sched_clock_stable();
+
+	return 0;
 }
+late_initcall(sched_clock_init_late);
 
 /*
  * min, max except they take wrapping into account
@@ -347,21 +382,38 @@ void sched_clock_tick(void)
 {
 	struct sched_clock_data *scd;
 
+	if (sched_clock_stable())
+		return;
+
+	if (unlikely(!sched_clock_running))
+		return;
+
 	WARN_ON_ONCE(!irqs_disabled());
 
-	/*
-	 * Update these values even if sched_clock_stable(), because it can
-	 * become unstable at any point in time at which point we need some
-	 * values to fall back on.
-	 *
-	 * XXX arguably we can skip this if we expose tsc_clocksource_reliable
-	 */
 	scd = this_scd();
-	scd->tick_raw  = sched_clock();
-	scd->tick_gtod = ktime_get_ns();
+	__scd_stamp(scd);
+	sched_clock_local(scd);
+}
 
-	if (!sched_clock_stable() && likely(sched_clock_running))
-		sched_clock_local(scd);
+void sched_clock_tick_stable(void)
+{
+	u64 gtod, clock;
+
+	if (!sched_clock_stable())
+		return;
+
+	/*
+	 * Called under watchdog_lock.
+	 *
+	 * The watchdog just found this TSC to (still) be stable, so now is a
+	 * good moment to update our __gtod_offset. Because once we find the
+	 * TSC to be unstable, any computation will be computing crap.
+	 */
+	local_irq_disable();
+	gtod = ktime_get_ns();
+	clock = sched_clock();
+	__gtod_offset = (clock + __sched_clock_offset) - gtod;
+	local_irq_enable();
 }
 
 /*
@@ -374,15 +426,21 @@ void sched_clock_idle_sleep_event(void)
 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
 
 /*
- * We just idled delta nanoseconds (called with irqs disabled):
+ * We just idled; resync with ktime.
  */
-void sched_clock_idle_wakeup_event(u64 delta_ns)
+void sched_clock_idle_wakeup_event(void)
 {
-	if (timekeeping_suspended)
+	unsigned long flags;
+
+	if (sched_clock_stable())
 		return;
 
+	if (unlikely(timekeeping_suspended))
+		return;
+
+	local_irq_save(flags);
 	sched_clock_tick();
-	touch_softlockup_watchdog_sched();
+	local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
 
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 53f9558..13fc5ae 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -66,7 +66,7 @@ do_wait_for_common(struct completion *x,
 	if (!x->done) {
 		DECLARE_WAITQUEUE(wait, current);
 
-		__add_wait_queue_tail_exclusive(&x->wait, &wait);
+		__add_wait_queue_entry_tail_exclusive(&x->wait, &wait);
 		do {
 			if (signal_pending_state(state, current)) {
 				timeout = -ERESTARTSYS;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 803c3bc..17c667b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10,6 +10,7 @@
 #include <uapi/linux/sched/types.h>
 #include <linux/sched/loadavg.h>
 #include <linux/sched/hotplug.h>
+#include <linux/wait_bit.h>
 #include <linux/cpuset.h>
 #include <linux/delayacct.h>
 #include <linux/init_task.h>
@@ -788,36 +789,6 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 	dequeue_task(rq, p, flags);
 }
 
-void sched_set_stop_task(int cpu, struct task_struct *stop)
-{
-	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
-	struct task_struct *old_stop = cpu_rq(cpu)->stop;
-
-	if (stop) {
-		/*
-		 * Make it appear like a SCHED_FIFO task, its something
-		 * userspace knows about and won't get confused about.
-		 *
-		 * Also, it will make PI more or less work without too
-		 * much confusion -- but then, stop work should not
-		 * rely on PI working anyway.
-		 */
-		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
-
-		stop->sched_class = &stop_sched_class;
-	}
-
-	cpu_rq(cpu)->stop = stop;
-
-	if (old_stop) {
-		/*
-		 * Reset it back to a normal scheduling class so that
-		 * it can die in pieces.
-		 */
-		old_stop->sched_class = &rt_sched_class;
-	}
-}
-
 /*
  * __normal_prio - return the priority that is based on the static prio
  */
@@ -1588,6 +1559,36 @@ static void update_avg(u64 *avg, u64 sample)
 	*avg += diff >> 3;
 }
 
+void sched_set_stop_task(int cpu, struct task_struct *stop)
+{
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+	struct task_struct *old_stop = cpu_rq(cpu)->stop;
+
+	if (stop) {
+		/*
+		 * Make it appear like a SCHED_FIFO task, its something
+		 * userspace knows about and won't get confused about.
+		 *
+		 * Also, it will make PI more or less work without too
+		 * much confusion -- but then, stop work should not
+		 * rely on PI working anyway.
+		 */
+		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
+
+		stop->sched_class = &stop_sched_class;
+	}
+
+	cpu_rq(cpu)->stop = stop;
+
+	if (old_stop) {
+		/*
+		 * Reset it back to a normal scheduling class so that
+		 * it can die in pieces.
+		 */
+		old_stop->sched_class = &rt_sched_class;
+	}
+}
+
 #else
 
 static inline int __set_cpus_allowed_ptr(struct task_struct *p,
@@ -1731,7 +1732,7 @@ void sched_ttwu_pending(void)
 {
 	struct rq *rq = this_rq();
 	struct llist_node *llist = llist_del_all(&rq->wake_list);
-	struct task_struct *p;
+	struct task_struct *p, *t;
 	struct rq_flags rf;
 
 	if (!llist)
@@ -1740,17 +1741,8 @@ void sched_ttwu_pending(void)
 	rq_lock_irqsave(rq, &rf);
 	update_rq_clock(rq);
 
-	while (llist) {
-		int wake_flags = 0;
-
-		p = llist_entry(llist, struct task_struct, wake_entry);
-		llist = llist_next(llist);
-
-		if (p->sched_remote_wakeup)
-			wake_flags = WF_MIGRATED;
-
-		ttwu_do_activate(rq, p, wake_flags, &rf);
-	}
+	llist_for_each_entry_safe(p, t, llist, wake_entry)
+		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
 
 	rq_unlock_irqrestore(rq, &rf);
 }
@@ -2148,23 +2140,6 @@ int wake_up_state(struct task_struct *p, unsigned int state)
 }
 
 /*
- * This function clears the sched_dl_entity static params.
- */
-void __dl_clear_params(struct task_struct *p)
-{
-	struct sched_dl_entity *dl_se = &p->dl;
-
-	dl_se->dl_runtime = 0;
-	dl_se->dl_deadline = 0;
-	dl_se->dl_period = 0;
-	dl_se->flags = 0;
-	dl_se->dl_bw = 0;
-
-	dl_se->dl_throttled = 0;
-	dl_se->dl_yielded = 0;
-}
-
-/*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
  *
@@ -2193,6 +2168,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 
 	RB_CLEAR_NODE(&p->dl.rb_node);
 	init_dl_task_timer(&p->dl);
+	init_dl_inactive_task_timer(&p->dl);
 	__dl_clear_params(p);
 
 	INIT_LIST_HEAD(&p->rt.run_list);
@@ -2430,7 +2406,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
 unsigned long to_ratio(u64 period, u64 runtime)
 {
 	if (runtime == RUNTIME_INF)
-		return 1ULL << 20;
+		return BW_UNIT;
 
 	/*
 	 * Doing this here saves a lot of checks in all
@@ -2440,93 +2416,9 @@ unsigned long to_ratio(u64 period, u64 runtime)
 	if (period == 0)
 		return 0;
 
-	return div64_u64(runtime << 20, period);
+	return div64_u64(runtime << BW_SHIFT, period);
 }
 
-#ifdef CONFIG_SMP
-inline struct dl_bw *dl_bw_of(int i)
-{
-	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
-			 "sched RCU must be held");
-	return &cpu_rq(i)->rd->dl_bw;
-}
-
-static inline int dl_bw_cpus(int i)
-{
-	struct root_domain *rd = cpu_rq(i)->rd;
-	int cpus = 0;
-
-	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
-			 "sched RCU must be held");
-	for_each_cpu_and(i, rd->span, cpu_active_mask)
-		cpus++;
-
-	return cpus;
-}
-#else
-inline struct dl_bw *dl_bw_of(int i)
-{
-	return &cpu_rq(i)->dl.dl_bw;
-}
-
-static inline int dl_bw_cpus(int i)
-{
-	return 1;
-}
-#endif
-
-/*
- * We must be sure that accepting a new task (or allowing changing the
- * parameters of an existing one) is consistent with the bandwidth
- * constraints. If yes, this function also accordingly updates the currently
- * allocated bandwidth to reflect the new situation.
- *
- * This function is called while holding p's rq->lock.
- *
- * XXX we should delay bw change until the task's 0-lag point, see
- * __setparam_dl().
- */
-static int dl_overflow(struct task_struct *p, int policy,
-		       const struct sched_attr *attr)
-{
-
-	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
-	u64 period = attr->sched_period ?: attr->sched_deadline;
-	u64 runtime = attr->sched_runtime;
-	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
-	int cpus, err = -1;
-
-	/* !deadline task may carry old deadline bandwidth */
-	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
-		return 0;
-
-	/*
-	 * Either if a task, enters, leave, or stays -deadline but changes
-	 * its parameters, we may need to update accordingly the total
-	 * allocated bandwidth of the container.
-	 */
-	raw_spin_lock(&dl_b->lock);
-	cpus = dl_bw_cpus(task_cpu(p));
-	if (dl_policy(policy) && !task_has_dl_policy(p) &&
-	    !__dl_overflow(dl_b, cpus, 0, new_bw)) {
-		__dl_add(dl_b, new_bw);
-		err = 0;
-	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
-		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
-		__dl_clear(dl_b, p->dl.dl_bw);
-		__dl_add(dl_b, new_bw);
-		err = 0;
-	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
-		__dl_clear(dl_b, p->dl.dl_bw);
-		err = 0;
-	}
-	raw_spin_unlock(&dl_b->lock);
-
-	return err;
-}
-
-extern void init_dl_bw(struct dl_bw *dl_b);
-
 /*
  * wake_up_new_task - wake up a newly created task for the first time.
  *
@@ -3687,7 +3579,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
 	exception_exit(prev_state);
 }
 
-int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
 			  void *key)
 {
 	return try_to_wake_up(curr->private, mode, wake_flags);
@@ -4009,46 +3901,6 @@ static struct task_struct *find_process_by_pid(pid_t pid)
 }
 
 /*
- * This function initializes the sched_dl_entity of a newly becoming
- * SCHED_DEADLINE task.
- *
- * Only the static values are considered here, the actual runtime and the
- * absolute deadline will be properly calculated when the task is enqueued
- * for the first time with its new policy.
- */
-static void
-__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
-{
-	struct sched_dl_entity *dl_se = &p->dl;
-
-	dl_se->dl_runtime = attr->sched_runtime;
-	dl_se->dl_deadline = attr->sched_deadline;
-	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
-	dl_se->flags = attr->sched_flags;
-	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
-
-	/*
-	 * Changing the parameters of a task is 'tricky' and we're not doing
-	 * the correct thing -- also see task_dead_dl() and switched_from_dl().
-	 *
-	 * What we SHOULD do is delay the bandwidth release until the 0-lag
-	 * point. This would include retaining the task_struct until that time
-	 * and change dl_overflow() to not immediately decrement the current
-	 * amount.
-	 *
-	 * Instead we retain the current runtime/deadline and let the new
-	 * parameters take effect after the current reservation period lapses.
-	 * This is safe (albeit pessimistic) because the 0-lag point is always
-	 * before the current scheduling deadline.
-	 *
-	 * We can still have temporary overloads because we do not delay the
-	 * change in bandwidth until that time; so admission control is
-	 * not on the safe side. It does however guarantee tasks will never
-	 * consume more than promised.
-	 */
-}
-
-/*
  * sched_setparam() passes in -1 for its policy, to let the functions
  * it calls know not to change it.
  */
@@ -4101,59 +3953,6 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
 		p->sched_class = &fair_sched_class;
 }
 
-static void
-__getparam_dl(struct task_struct *p, struct sched_attr *attr)
-{
-	struct sched_dl_entity *dl_se = &p->dl;
-
-	attr->sched_priority = p->rt_priority;
-	attr->sched_runtime = dl_se->dl_runtime;
-	attr->sched_deadline = dl_se->dl_deadline;
-	attr->sched_period = dl_se->dl_period;
-	attr->sched_flags = dl_se->flags;
-}
-
-/*
- * This function validates the new parameters of a -deadline task.
- * We ask for the deadline not being zero, and greater or equal
- * than the runtime, as well as the period of being zero or
- * greater than deadline. Furthermore, we have to be sure that
- * user parameters are above the internal resolution of 1us (we
- * check sched_runtime only since it is always the smaller one) and
- * below 2^63 ns (we have to check both sched_deadline and
- * sched_period, as the latter can be zero).
- */
-static bool
-__checkparam_dl(const struct sched_attr *attr)
-{
-	/* deadline != 0 */
-	if (attr->sched_deadline == 0)
-		return false;
-
-	/*
-	 * Since we truncate DL_SCALE bits, make sure we're at least
-	 * that big.
-	 */
-	if (attr->sched_runtime < (1ULL << DL_SCALE))
-		return false;
-
-	/*
-	 * Since we use the MSB for wrap-around and sign issues, make
-	 * sure it's not set (mind that period can be equal to zero).
-	 */
-	if (attr->sched_deadline & (1ULL << 63) ||
-	    attr->sched_period & (1ULL << 63))
-		return false;
-
-	/* runtime <= deadline <= period (if period != 0) */
-	if ((attr->sched_period != 0 &&
-	     attr->sched_period < attr->sched_deadline) ||
-	    attr->sched_deadline < attr->sched_runtime)
-		return false;
-
-	return true;
-}
-
 /*
  * Check the target process has a UID that matches the current process's:
  */
@@ -4170,19 +3969,6 @@ static bool check_same_owner(struct task_struct *p)
 	return match;
 }
 
-static bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
-{
-	struct sched_dl_entity *dl_se = &p->dl;
-
-	if (dl_se->dl_runtime != attr->sched_runtime ||
-		dl_se->dl_deadline != attr->sched_deadline ||
-		dl_se->dl_period != attr->sched_period ||
-		dl_se->flags != attr->sched_flags)
-		return true;
-
-	return false;
-}
-
 static int __sched_setscheduler(struct task_struct *p,
 				const struct sched_attr *attr,
 				bool user, bool pi)
@@ -4197,8 +3983,8 @@ static int __sched_setscheduler(struct task_struct *p,
 	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
 	struct rq *rq;
 
-	/* May grab non-irq protected spin_locks: */
-	BUG_ON(in_interrupt());
+	/* The pi code expects interrupts enabled */
+	BUG_ON(pi && in_interrupt());
 recheck:
 	/* Double check policy once rq lock held: */
 	if (policy < 0) {
@@ -4211,7 +3997,8 @@ static int __sched_setscheduler(struct task_struct *p,
 			return -EINVAL;
 	}
 
-	if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
+	if (attr->sched_flags &
+		~(SCHED_FLAG_RESET_ON_FORK | SCHED_FLAG_RECLAIM))
 		return -EINVAL;
 
 	/*
@@ -4362,7 +4149,7 @@ static int __sched_setscheduler(struct task_struct *p,
 	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
 	 * is available.
 	 */
-	if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
+	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
 		task_rq_unlock(rq, p, &rf);
 		return -EBUSY;
 	}
@@ -5463,26 +5250,17 @@ void init_idle(struct task_struct *idle, int cpu)
 #endif
 }
 
+#ifdef CONFIG_SMP
+
 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
 			      const struct cpumask *trial)
 {
-	int ret = 1, trial_cpus;
-	struct dl_bw *cur_dl_b;
-	unsigned long flags;
+	int ret = 1;
 
 	if (!cpumask_weight(cur))
 		return ret;
 
-	rcu_read_lock_sched();
-	cur_dl_b = dl_bw_of(cpumask_any(cur));
-	trial_cpus = cpumask_weight(trial);
-
-	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
-	if (cur_dl_b->bw != -1 &&
-	    cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
-		ret = 0;
-	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
-	rcu_read_unlock_sched();
+	ret = dl_cpuset_cpumask_can_shrink(cur, trial);
 
 	return ret;
 }
@@ -5506,43 +5284,14 @@ int task_can_attach(struct task_struct *p,
 		goto out;
 	}
 
-#ifdef CONFIG_SMP
 	if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
-					      cs_cpus_allowed)) {
-		unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
-							cs_cpus_allowed);
-		struct dl_bw *dl_b;
-		bool overflow;
-		int cpus;
-		unsigned long flags;
+					      cs_cpus_allowed))
+		ret = dl_task_can_attach(p, cs_cpus_allowed);
 
-		rcu_read_lock_sched();
-		dl_b = dl_bw_of(dest_cpu);
-		raw_spin_lock_irqsave(&dl_b->lock, flags);
-		cpus = dl_bw_cpus(dest_cpu);
-		overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
-		if (overflow)
-			ret = -EBUSY;
-		else {
-			/*
-			 * We reserve space for this task in the destination
-			 * root_domain, as we can't fail after this point.
-			 * We will free resources in the source root_domain
-			 * later on (see set_cpus_allowed_dl()).
-			 */
-			__dl_add(dl_b, p->dl.dl_bw);
-		}
-		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-		rcu_read_unlock_sched();
-
-	}
-#endif
 out:
 	return ret;
 }
 
-#ifdef CONFIG_SMP
-
 bool sched_smp_initialized __read_mostly;
 
 #ifdef CONFIG_NUMA_BALANCING
@@ -5605,7 +5354,7 @@ void idle_task_exit(void)
 	BUG_ON(cpu_online(smp_processor_id()));
 
 	if (mm != &init_mm) {
-		switch_mm_irqs_off(mm, &init_mm, current);
+		switch_mm(mm, &init_mm, current);
 		finish_arch_post_lock_switch();
 	}
 	mmdrop(mm);
@@ -5805,23 +5554,8 @@ static void cpuset_cpu_active(void)
 
 static int cpuset_cpu_inactive(unsigned int cpu)
 {
-	unsigned long flags;
-	struct dl_bw *dl_b;
-	bool overflow;
-	int cpus;
-
 	if (!cpuhp_tasks_frozen) {
-		rcu_read_lock_sched();
-		dl_b = dl_bw_of(cpu);
-
-		raw_spin_lock_irqsave(&dl_b->lock, flags);
-		cpus = dl_bw_cpus(cpu);
-		overflow = __dl_overflow(dl_b, cpus, 0, 0);
-		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-
-		rcu_read_unlock_sched();
-
-		if (overflow)
+		if (dl_cpu_busy(cpu))
 			return -EBUSY;
 		cpuset_update_active_cpus();
 	} else {
@@ -5874,15 +5608,9 @@ int sched_cpu_deactivate(unsigned int cpu)
 	 * users of this state to go away such that all new such users will
 	 * observe it.
 	 *
-	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
-	 * not imply sync_sched(), so wait for both.
-	 *
 	 * Do sync before park smpboot threads to take care the rcu boost case.
 	 */
-	if (IS_ENABLED(CONFIG_PREEMPT))
-		synchronize_rcu_mult(call_rcu, call_rcu_sched);
-	else
-		synchronize_rcu();
+	synchronize_rcu_mult(call_rcu, call_rcu_sched);
 
 	if (!sched_smp_initialized)
 		return 0;
@@ -5958,7 +5686,6 @@ void __init sched_init_smp(void)
 	cpumask_var_t non_isolated_cpus;
 
 	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
-	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
 
 	sched_init_numa();
 
@@ -5968,7 +5695,7 @@ void __init sched_init_smp(void)
 	 * happen.
 	 */
 	mutex_lock(&sched_domains_mutex);
-	init_sched_domains(cpu_active_mask);
+	sched_init_domains(cpu_active_mask);
 	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
 	if (cpumask_empty(non_isolated_cpus))
 		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@@ -5984,7 +5711,6 @@ void __init sched_init_smp(void)
 	init_sched_dl_class();
 
 	sched_init_smt();
-	sched_clock_init_late();
 
 	sched_smp_initialized = true;
 }
@@ -6000,7 +5726,6 @@ early_initcall(migration_init);
 void __init sched_init_smp(void)
 {
 	sched_init_granularity();
-	sched_clock_init_late();
 }
 #endif /* CONFIG_SMP */
 
@@ -6026,28 +5751,13 @@ static struct kmem_cache *task_group_cache __read_mostly;
 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
 
-#define WAIT_TABLE_BITS 8
-#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
-static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
-
-wait_queue_head_t *bit_waitqueue(void *word, int bit)
-{
-	const int shift = BITS_PER_LONG == 32 ? 5 : 6;
-	unsigned long val = (unsigned long)word << shift | bit;
-
-	return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
-}
-EXPORT_SYMBOL(bit_waitqueue);
-
 void __init sched_init(void)
 {
 	int i, j;
 	unsigned long alloc_size = 0, ptr;
 
 	sched_clock_init();
-
-	for (i = 0; i < WAIT_TABLE_SIZE; i++)
-		init_waitqueue_head(bit_wait_table + i);
+	wait_bit_init();
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
@@ -6199,7 +5909,6 @@ void __init sched_init(void)
 	calc_load_update = jiffies + LOAD_FREQ;
 
 #ifdef CONFIG_SMP
-	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
 	/* May be allocated at isolcpus cmdline parse time */
 	if (cpu_isolated_map == NULL)
 		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
@@ -6251,8 +5960,10 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
 
 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
 	     !is_idle_task(current)) ||
-	    system_state != SYSTEM_RUNNING || oops_in_progress)
+	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
+	    oops_in_progress)
 		return;
+
 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 		return;
 	prev_jiffy = jiffies;
@@ -6507,385 +6218,6 @@ void sched_move_task(struct task_struct *tsk)
 
 	task_rq_unlock(rq, tsk, &rf);
 }
-#endif /* CONFIG_CGROUP_SCHED */
-
-#ifdef CONFIG_RT_GROUP_SCHED
-/*
- * Ensure that the real time constraints are schedulable.
- */
-static DEFINE_MUTEX(rt_constraints_mutex);
-
-/* Must be called with tasklist_lock held */
-static inline int tg_has_rt_tasks(struct task_group *tg)
-{
-	struct task_struct *g, *p;
-
-	/*
-	 * Autogroups do not have RT tasks; see autogroup_create().
-	 */
-	if (task_group_is_autogroup(tg))
-		return 0;
-
-	for_each_process_thread(g, p) {
-		if (rt_task(p) && task_group(p) == tg)
-			return 1;
-	}
-
-	return 0;
-}
-
-struct rt_schedulable_data {
-	struct task_group *tg;
-	u64 rt_period;
-	u64 rt_runtime;
-};
-
-static int tg_rt_schedulable(struct task_group *tg, void *data)
-{
-	struct rt_schedulable_data *d = data;
-	struct task_group *child;
-	unsigned long total, sum = 0;
-	u64 period, runtime;
-
-	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
-	runtime = tg->rt_bandwidth.rt_runtime;
-
-	if (tg == d->tg) {
-		period = d->rt_period;
-		runtime = d->rt_runtime;
-	}
-
-	/*
-	 * Cannot have more runtime than the period.
-	 */
-	if (runtime > period && runtime != RUNTIME_INF)
-		return -EINVAL;
-
-	/*
-	 * Ensure we don't starve existing RT tasks.
-	 */
-	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
-		return -EBUSY;
-
-	total = to_ratio(period, runtime);
-
-	/*
-	 * Nobody can have more than the global setting allows.
-	 */
-	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
-		return -EINVAL;
-
-	/*
-	 * The sum of our children's runtime should not exceed our own.
-	 */
-	list_for_each_entry_rcu(child, &tg->children, siblings) {
-		period = ktime_to_ns(child->rt_bandwidth.rt_period);
-		runtime = child->rt_bandwidth.rt_runtime;
-
-		if (child == d->tg) {
-			period = d->rt_period;
-			runtime = d->rt_runtime;
-		}
-
-		sum += to_ratio(period, runtime);
-	}
-
-	if (sum > total)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
-{
-	int ret;
-
-	struct rt_schedulable_data data = {
-		.tg = tg,
-		.rt_period = period,
-		.rt_runtime = runtime,
-	};
-
-	rcu_read_lock();
-	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
-	rcu_read_unlock();
-
-	return ret;
-}
-
-static int tg_set_rt_bandwidth(struct task_group *tg,
-		u64 rt_period, u64 rt_runtime)
-{
-	int i, err = 0;
-
-	/*
-	 * Disallowing the root group RT runtime is BAD, it would disallow the
-	 * kernel creating (and or operating) RT threads.
-	 */
-	if (tg == &root_task_group && rt_runtime == 0)
-		return -EINVAL;
-
-	/* No period doesn't make any sense. */
-	if (rt_period == 0)
-		return -EINVAL;
-
-	mutex_lock(&rt_constraints_mutex);
-	read_lock(&tasklist_lock);
-	err = __rt_schedulable(tg, rt_period, rt_runtime);
-	if (err)
-		goto unlock;
-
-	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
-	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
-	tg->rt_bandwidth.rt_runtime = rt_runtime;
-
-	for_each_possible_cpu(i) {
-		struct rt_rq *rt_rq = tg->rt_rq[i];
-
-		raw_spin_lock(&rt_rq->rt_runtime_lock);
-		rt_rq->rt_runtime = rt_runtime;
-		raw_spin_unlock(&rt_rq->rt_runtime_lock);
-	}
-	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
-unlock:
-	read_unlock(&tasklist_lock);
-	mutex_unlock(&rt_constraints_mutex);
-
-	return err;
-}
-
-static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
-{
-	u64 rt_runtime, rt_period;
-
-	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
-	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
-	if (rt_runtime_us < 0)
-		rt_runtime = RUNTIME_INF;
-
-	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
-}
-
-static long sched_group_rt_runtime(struct task_group *tg)
-{
-	u64 rt_runtime_us;
-
-	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
-		return -1;
-
-	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
-	do_div(rt_runtime_us, NSEC_PER_USEC);
-	return rt_runtime_us;
-}
-
-static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
-{
-	u64 rt_runtime, rt_period;
-
-	rt_period = rt_period_us * NSEC_PER_USEC;
-	rt_runtime = tg->rt_bandwidth.rt_runtime;
-
-	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
-}
-
-static long sched_group_rt_period(struct task_group *tg)
-{
-	u64 rt_period_us;
-
-	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
-	do_div(rt_period_us, NSEC_PER_USEC);
-	return rt_period_us;
-}
-#endif /* CONFIG_RT_GROUP_SCHED */
-
-#ifdef CONFIG_RT_GROUP_SCHED
-static int sched_rt_global_constraints(void)
-{
-	int ret = 0;
-
-	mutex_lock(&rt_constraints_mutex);
-	read_lock(&tasklist_lock);
-	ret = __rt_schedulable(NULL, 0, 0);
-	read_unlock(&tasklist_lock);
-	mutex_unlock(&rt_constraints_mutex);
-
-	return ret;
-}
-
-static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
-{
-	/* Don't accept realtime tasks when there is no way for them to run */
-	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
-		return 0;
-
-	return 1;
-}
-
-#else /* !CONFIG_RT_GROUP_SCHED */
-static int sched_rt_global_constraints(void)
-{
-	unsigned long flags;
-	int i;
-
-	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
-	for_each_possible_cpu(i) {
-		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
-
-		raw_spin_lock(&rt_rq->rt_runtime_lock);
-		rt_rq->rt_runtime = global_rt_runtime();
-		raw_spin_unlock(&rt_rq->rt_runtime_lock);
-	}
-	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
-
-	return 0;
-}
-#endif /* CONFIG_RT_GROUP_SCHED */
-
-static int sched_dl_global_validate(void)
-{
-	u64 runtime = global_rt_runtime();
-	u64 period = global_rt_period();
-	u64 new_bw = to_ratio(period, runtime);
-	struct dl_bw *dl_b;
-	int cpu, ret = 0;
-	unsigned long flags;
-
-	/*
-	 * Here we want to check the bandwidth not being set to some
-	 * value smaller than the currently allocated bandwidth in
-	 * any of the root_domains.
-	 *
-	 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
-	 * cycling on root_domains... Discussion on different/better
-	 * solutions is welcome!
-	 */
-	for_each_possible_cpu(cpu) {
-		rcu_read_lock_sched();
-		dl_b = dl_bw_of(cpu);
-
-		raw_spin_lock_irqsave(&dl_b->lock, flags);
-		if (new_bw < dl_b->total_bw)
-			ret = -EBUSY;
-		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-
-		rcu_read_unlock_sched();
-
-		if (ret)
-			break;
-	}
-
-	return ret;
-}
-
-static void sched_dl_do_global(void)
-{
-	u64 new_bw = -1;
-	struct dl_bw *dl_b;
-	int cpu;
-	unsigned long flags;
-
-	def_dl_bandwidth.dl_period = global_rt_period();
-	def_dl_bandwidth.dl_runtime = global_rt_runtime();
-
-	if (global_rt_runtime() != RUNTIME_INF)
-		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
-
-	/*
-	 * FIXME: As above...
-	 */
-	for_each_possible_cpu(cpu) {
-		rcu_read_lock_sched();
-		dl_b = dl_bw_of(cpu);
-
-		raw_spin_lock_irqsave(&dl_b->lock, flags);
-		dl_b->bw = new_bw;
-		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-
-		rcu_read_unlock_sched();
-	}
-}
-
-static int sched_rt_global_validate(void)
-{
-	if (sysctl_sched_rt_period <= 0)
-		return -EINVAL;
-
-	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
-		(sysctl_sched_rt_runtime > sysctl_sched_rt_period))
-		return -EINVAL;
-
-	return 0;
-}
-
-static void sched_rt_do_global(void)
-{
-	def_rt_bandwidth.rt_runtime = global_rt_runtime();
-	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
-}
-
-int sched_rt_handler(struct ctl_table *table, int write,
-		void __user *buffer, size_t *lenp,
-		loff_t *ppos)
-{
-	int old_period, old_runtime;
-	static DEFINE_MUTEX(mutex);
-	int ret;
-
-	mutex_lock(&mutex);
-	old_period = sysctl_sched_rt_period;
-	old_runtime = sysctl_sched_rt_runtime;
-
-	ret = proc_dointvec(table, write, buffer, lenp, ppos);
-
-	if (!ret && write) {
-		ret = sched_rt_global_validate();
-		if (ret)
-			goto undo;
-
-		ret = sched_dl_global_validate();
-		if (ret)
-			goto undo;
-
-		ret = sched_rt_global_constraints();
-		if (ret)
-			goto undo;
-
-		sched_rt_do_global();
-		sched_dl_do_global();
-	}
-	if (0) {
-undo:
-		sysctl_sched_rt_period = old_period;
-		sysctl_sched_rt_runtime = old_runtime;
-	}
-	mutex_unlock(&mutex);
-
-	return ret;
-}
-
-int sched_rr_handler(struct ctl_table *table, int write,
-		void __user *buffer, size_t *lenp,
-		loff_t *ppos)
-{
-	int ret;
-	static DEFINE_MUTEX(mutex);
-
-	mutex_lock(&mutex);
-	ret = proc_dointvec(table, write, buffer, lenp, ppos);
-	/*
-	 * Make sure that internally we keep jiffies.
-	 * Also, writing zero resets the timeslice to default:
-	 */
-	if (!ret && write) {
-		sched_rr_timeslice =
-			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
-			msecs_to_jiffies(sysctl_sched_rr_timeslice);
-	}
-	mutex_unlock(&mutex);
-	return ret;
-}
-
-#ifdef CONFIG_CGROUP_SCHED
 
 static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
 {
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 622eed1..076a2e3 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -101,9 +101,6 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
 	if (sg_policy->next_freq == next_freq)
 		return;
 
-	if (sg_policy->next_freq > next_freq)
-		next_freq = (sg_policy->next_freq + next_freq) >> 1;
-
 	sg_policy->next_freq = next_freq;
 	sg_policy->last_freq_update_time = time;
 
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index aea3135..67c70e2 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -615,19 +615,13 @@ static void cputime_adjust(struct task_cputime *curr,
 	 * userspace. Once a task gets some ticks, the monotonicy code at
 	 * 'update' will ensure things converge to the observed ratio.
 	 */
-	if (stime == 0) {
-		utime = rtime;
-		goto update;
+	if (stime != 0) {
+		if (utime == 0)
+			stime = rtime;
+		else
+			stime = scale_stime(stime, rtime, stime + utime);
 	}
 
-	if (utime == 0) {
-		stime = rtime;
-		goto update;
-	}
-
-	stime = scale_stime(stime, rtime, stime + utime);
-
-update:
 	/*
 	 * Make sure stime doesn't go backwards; this preserves monotonicity
 	 * for utime because rtime is monotonic.
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a2ce590..a84299f 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -17,6 +17,7 @@
 #include "sched.h"
 
 #include <linux/slab.h>
+#include <uapi/linux/sched/types.h>
 
 struct dl_bandwidth def_dl_bandwidth;
 
@@ -43,6 +44,254 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
 	return !RB_EMPTY_NODE(&dl_se->rb_node);
 }
 
+#ifdef CONFIG_SMP
+static inline struct dl_bw *dl_bw_of(int i)
+{
+	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+			 "sched RCU must be held");
+	return &cpu_rq(i)->rd->dl_bw;
+}
+
+static inline int dl_bw_cpus(int i)
+{
+	struct root_domain *rd = cpu_rq(i)->rd;
+	int cpus = 0;
+
+	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+			 "sched RCU must be held");
+	for_each_cpu_and(i, rd->span, cpu_active_mask)
+		cpus++;
+
+	return cpus;
+}
+#else
+static inline struct dl_bw *dl_bw_of(int i)
+{
+	return &cpu_rq(i)->dl.dl_bw;
+}
+
+static inline int dl_bw_cpus(int i)
+{
+	return 1;
+}
+#endif
+
+static inline
+void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
+{
+	u64 old = dl_rq->running_bw;
+
+	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
+	dl_rq->running_bw += dl_bw;
+	SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
+	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
+}
+
+static inline
+void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
+{
+	u64 old = dl_rq->running_bw;
+
+	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
+	dl_rq->running_bw -= dl_bw;
+	SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
+	if (dl_rq->running_bw > old)
+		dl_rq->running_bw = 0;
+}
+
+static inline
+void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
+{
+	u64 old = dl_rq->this_bw;
+
+	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
+	dl_rq->this_bw += dl_bw;
+	SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
+}
+
+static inline
+void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
+{
+	u64 old = dl_rq->this_bw;
+
+	lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
+	dl_rq->this_bw -= dl_bw;
+	SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
+	if (dl_rq->this_bw > old)
+		dl_rq->this_bw = 0;
+	SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
+}
+
+void dl_change_utilization(struct task_struct *p, u64 new_bw)
+{
+	struct rq *rq;
+
+	if (task_on_rq_queued(p))
+		return;
+
+	rq = task_rq(p);
+	if (p->dl.dl_non_contending) {
+		sub_running_bw(p->dl.dl_bw, &rq->dl);
+		p->dl.dl_non_contending = 0;
+		/*
+		 * If the timer handler is currently running and the
+		 * timer cannot be cancelled, inactive_task_timer()
+		 * will see that dl_not_contending is not set, and
+		 * will not touch the rq's active utilization,
+		 * so we are still safe.
+		 */
+		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
+			put_task_struct(p);
+	}
+	sub_rq_bw(p->dl.dl_bw, &rq->dl);
+	add_rq_bw(new_bw, &rq->dl);
+}
+
+/*
+ * The utilization of a task cannot be immediately removed from
+ * the rq active utilization (running_bw) when the task blocks.
+ * Instead, we have to wait for the so called "0-lag time".
+ *
+ * If a task blocks before the "0-lag time", a timer (the inactive
+ * timer) is armed, and running_bw is decreased when the timer
+ * fires.
+ *
+ * If the task wakes up again before the inactive timer fires,
+ * the timer is cancelled, whereas if the task wakes up after the
+ * inactive timer fired (and running_bw has been decreased) the
+ * task's utilization has to be added to running_bw again.
+ * A flag in the deadline scheduling entity (dl_non_contending)
+ * is used to avoid race conditions between the inactive timer handler
+ * and task wakeups.
+ *
+ * The following diagram shows how running_bw is updated. A task is
+ * "ACTIVE" when its utilization contributes to running_bw; an
+ * "ACTIVE contending" task is in the TASK_RUNNING state, while an
+ * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
+ * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
+ * time already passed, which does not contribute to running_bw anymore.
+ *                              +------------------+
+ *             wakeup           |    ACTIVE        |
+ *          +------------------>+   contending     |
+ *          | add_running_bw    |                  |
+ *          |                   +----+------+------+
+ *          |                        |      ^
+ *          |                dequeue |      |
+ * +--------+-------+                |      |
+ * |                |   t >= 0-lag   |      | wakeup
+ * |    INACTIVE    |<---------------+      |
+ * |                | sub_running_bw |      |
+ * +--------+-------+                |      |
+ *          ^                        |      |
+ *          |              t < 0-lag |      |
+ *          |                        |      |
+ *          |                        V      |
+ *          |                   +----+------+------+
+ *          | sub_running_bw    |    ACTIVE        |
+ *          +-------------------+                  |
+ *            inactive timer    |  non contending  |
+ *            fired             +------------------+
+ *
+ * The task_non_contending() function is invoked when a task
+ * blocks, and checks if the 0-lag time already passed or
+ * not (in the first case, it directly updates running_bw;
+ * in the second case, it arms the inactive timer).
+ *
+ * The task_contending() function is invoked when a task wakes
+ * up, and checks if the task is still in the "ACTIVE non contending"
+ * state or not (in the second case, it updates running_bw).
+ */
+static void task_non_contending(struct task_struct *p)
+{
+	struct sched_dl_entity *dl_se = &p->dl;
+	struct hrtimer *timer = &dl_se->inactive_timer;
+	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+	struct rq *rq = rq_of_dl_rq(dl_rq);
+	s64 zerolag_time;
+
+	/*
+	 * If this is a non-deadline task that has been boosted,
+	 * do nothing
+	 */
+	if (dl_se->dl_runtime == 0)
+		return;
+
+	WARN_ON(hrtimer_active(&dl_se->inactive_timer));
+	WARN_ON(dl_se->dl_non_contending);
+
+	zerolag_time = dl_se->deadline -
+		 div64_long((dl_se->runtime * dl_se->dl_period),
+			dl_se->dl_runtime);
+
+	/*
+	 * Using relative times instead of the absolute "0-lag time"
+	 * allows to simplify the code
+	 */
+	zerolag_time -= rq_clock(rq);
+
+	/*
+	 * If the "0-lag time" already passed, decrease the active
+	 * utilization now, instead of starting a timer
+	 */
+	if (zerolag_time < 0) {
+		if (dl_task(p))
+			sub_running_bw(dl_se->dl_bw, dl_rq);
+		if (!dl_task(p) || p->state == TASK_DEAD) {
+			struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+
+			if (p->state == TASK_DEAD)
+				sub_rq_bw(p->dl.dl_bw, &rq->dl);
+			raw_spin_lock(&dl_b->lock);
+			__dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
+			__dl_clear_params(p);
+			raw_spin_unlock(&dl_b->lock);
+		}
+
+		return;
+	}
+
+	dl_se->dl_non_contending = 1;
+	get_task_struct(p);
+	hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
+}
+
+static void task_contending(struct sched_dl_entity *dl_se, int flags)
+{
+	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
+
+	/*
+	 * If this is a non-deadline task that has been boosted,
+	 * do nothing
+	 */
+	if (dl_se->dl_runtime == 0)
+		return;
+
+	if (flags & ENQUEUE_MIGRATED)
+		add_rq_bw(dl_se->dl_bw, dl_rq);
+
+	if (dl_se->dl_non_contending) {
+		dl_se->dl_non_contending = 0;
+		/*
+		 * If the timer handler is currently running and the
+		 * timer cannot be cancelled, inactive_task_timer()
+		 * will see that dl_not_contending is not set, and
+		 * will not touch the rq's active utilization,
+		 * so we are still safe.
+		 */
+		if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
+			put_task_struct(dl_task_of(dl_se));
+	} else {
+		/*
+		 * Since "dl_non_contending" is not set, the
+		 * task's utilization has already been removed from
+		 * active utilization (either when the task blocked,
+		 * when the "inactive timer" fired).
+		 * So, add it back.
+		 */
+		add_running_bw(dl_se->dl_bw, dl_rq);
+	}
+}
+
 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 {
 	struct sched_dl_entity *dl_se = &p->dl;
@@ -83,6 +332,10 @@ void init_dl_rq(struct dl_rq *dl_rq)
 #else
 	init_dl_bw(&dl_rq->dl_bw);
 #endif
+
+	dl_rq->running_bw = 0;
+	dl_rq->this_bw = 0;
+	init_dl_rq_bw_ratio(dl_rq);
 }
 
 #ifdef CONFIG_SMP
@@ -484,13 +737,84 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 }
 
 /*
- * When a -deadline entity is queued back on the runqueue, its runtime and
- * deadline might need updating.
+ * Revised wakeup rule [1]: For self-suspending tasks, rather then
+ * re-initializing task's runtime and deadline, the revised wakeup
+ * rule adjusts the task's runtime to avoid the task to overrun its
+ * density.
  *
- * The policy here is that we update the deadline of the entity only if:
- *  - the current deadline is in the past,
- *  - using the remaining runtime with the current deadline would make
- *    the entity exceed its bandwidth.
+ * Reasoning: a task may overrun the density if:
+ *    runtime / (deadline - t) > dl_runtime / dl_deadline
+ *
+ * Therefore, runtime can be adjusted to:
+ *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
+ *
+ * In such way that runtime will be equal to the maximum density
+ * the task can use without breaking any rule.
+ *
+ * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
+ * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
+ */
+static void
+update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
+{
+	u64 laxity = dl_se->deadline - rq_clock(rq);
+
+	/*
+	 * If the task has deadline < period, and the deadline is in the past,
+	 * it should already be throttled before this check.
+	 *
+	 * See update_dl_entity() comments for further details.
+	 */
+	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
+
+	dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
+}
+
+/*
+ * Regarding the deadline, a task with implicit deadline has a relative
+ * deadline == relative period. A task with constrained deadline has a
+ * relative deadline <= relative period.
+ *
+ * We support constrained deadline tasks. However, there are some restrictions
+ * applied only for tasks which do not have an implicit deadline. See
+ * update_dl_entity() to know more about such restrictions.
+ *
+ * The dl_is_implicit() returns true if the task has an implicit deadline.
+ */
+static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
+{
+	return dl_se->dl_deadline == dl_se->dl_period;
+}
+
+/*
+ * When a deadline entity is placed in the runqueue, its runtime and deadline
+ * might need to be updated. This is done by a CBS wake up rule. There are two
+ * different rules: 1) the original CBS; and 2) the Revisited CBS.
+ *
+ * When the task is starting a new period, the Original CBS is used. In this
+ * case, the runtime is replenished and a new absolute deadline is set.
+ *
+ * When a task is queued before the begin of the next period, using the
+ * remaining runtime and deadline could make the entity to overflow, see
+ * dl_entity_overflow() to find more about runtime overflow. When such case
+ * is detected, the runtime and deadline need to be updated.
+ *
+ * If the task has an implicit deadline, i.e., deadline == period, the Original
+ * CBS is applied. the runtime is replenished and a new absolute deadline is
+ * set, as in the previous cases.
+ *
+ * However, the Original CBS does not work properly for tasks with
+ * deadline < period, which are said to have a constrained deadline. By
+ * applying the Original CBS, a constrained deadline task would be able to run
+ * runtime/deadline in a period. With deadline < period, the task would
+ * overrun the runtime/period allowed bandwidth, breaking the admission test.
+ *
+ * In order to prevent this misbehave, the Revisited CBS is used for
+ * constrained deadline tasks when a runtime overflow is detected. In the
+ * Revisited CBS, rather than replenishing & setting a new absolute deadline,
+ * the remaining runtime of the task is reduced to avoid runtime overflow.
+ * Please refer to the comments update_dl_revised_wakeup() function to find
+ * more about the Revised CBS rule.
  */
 static void update_dl_entity(struct sched_dl_entity *dl_se,
 			     struct sched_dl_entity *pi_se)
@@ -500,6 +824,14 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
 
 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
+
+		if (unlikely(!dl_is_implicit(dl_se) &&
+			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
+			     !dl_se->dl_boosted)){
+			update_dl_revised_wakeup(dl_se, rq);
+			return;
+		}
+
 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 		dl_se->runtime = pi_se->dl_runtime;
 	}
@@ -593,10 +925,8 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 	 * The task might have changed its scheduling policy to something
 	 * different than SCHED_DEADLINE (through switched_from_dl()).
 	 */
-	if (!dl_task(p)) {
-		__dl_clear_params(p);
+	if (!dl_task(p))
 		goto unlock;
-	}
 
 	/*
 	 * The task might have been boosted by someone else and might be in the
@@ -723,6 +1053,8 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
 		if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
 			return;
 		dl_se->dl_throttled = 1;
+		if (dl_se->runtime > 0)
+			dl_se->runtime = 0;
 	}
 }
 
@@ -735,6 +1067,47 @@ int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
 
 /*
+ * This function implements the GRUB accounting rule:
+ * according to the GRUB reclaiming algorithm, the runtime is
+ * not decreased as "dq = -dt", but as
+ * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
+ * where u is the utilization of the task, Umax is the maximum reclaimable
+ * utilization, Uinact is the (per-runqueue) inactive utilization, computed
+ * as the difference between the "total runqueue utilization" and the
+ * runqueue active utilization, and Uextra is the (per runqueue) extra
+ * reclaimable utilization.
+ * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
+ * multiplied by 2^BW_SHIFT, the result has to be shifted right by
+ * BW_SHIFT.
+ * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
+ * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
+ * Since delta is a 64 bit variable, to have an overflow its value
+ * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
+ * So, overflow is not an issue here.
+ */
+u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
+{
+	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
+	u64 u_act;
+	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
+
+	/*
+	 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
+	 * we compare u_inact + rq->dl.extra_bw with
+	 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
+	 * u_inact + rq->dl.extra_bw can be larger than
+	 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
+	 * leading to wrong results)
+	 */
+	if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
+		u_act = u_act_min;
+	else
+		u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
+
+	return (delta * u_act) >> BW_SHIFT;
+}
+
+/*
  * Update the current task's runtime statistics (provided it is still
  * a -deadline task and has not been removed from the dl_rq).
  */
@@ -776,6 +1149,8 @@ static void update_curr_dl(struct rq *rq)
 
 	sched_rt_avg_update(rq, delta_exec);
 
+	if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
+		delta_exec = grub_reclaim(delta_exec, rq, &curr->dl);
 	dl_se->runtime -= delta_exec;
 
 throttle:
@@ -815,6 +1190,56 @@ static void update_curr_dl(struct rq *rq)
 	}
 }
 
+static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
+{
+	struct sched_dl_entity *dl_se = container_of(timer,
+						     struct sched_dl_entity,
+						     inactive_timer);
+	struct task_struct *p = dl_task_of(dl_se);
+	struct rq_flags rf;
+	struct rq *rq;
+
+	rq = task_rq_lock(p, &rf);
+
+	if (!dl_task(p) || p->state == TASK_DEAD) {
+		struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+
+		if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
+			sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
+			sub_rq_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
+			dl_se->dl_non_contending = 0;
+		}
+
+		raw_spin_lock(&dl_b->lock);
+		__dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
+		raw_spin_unlock(&dl_b->lock);
+		__dl_clear_params(p);
+
+		goto unlock;
+	}
+	if (dl_se->dl_non_contending == 0)
+		goto unlock;
+
+	sched_clock_tick();
+	update_rq_clock(rq);
+
+	sub_running_bw(dl_se->dl_bw, &rq->dl);
+	dl_se->dl_non_contending = 0;
+unlock:
+	task_rq_unlock(rq, p, &rf);
+	put_task_struct(p);
+
+	return HRTIMER_NORESTART;
+}
+
+void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
+{
+	struct hrtimer *timer = &dl_se->inactive_timer;
+
+	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	timer->function = inactive_task_timer;
+}
+
 #ifdef CONFIG_SMP
 
 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
@@ -946,10 +1371,12 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
 	 * parameters of the task might need updating. Otherwise,
 	 * we want a replenishment of its runtime.
 	 */
-	if (flags & ENQUEUE_WAKEUP)
+	if (flags & ENQUEUE_WAKEUP) {
+		task_contending(dl_se, flags);
 		update_dl_entity(dl_se, pi_se);
-	else if (flags & ENQUEUE_REPLENISH)
+	} else if (flags & ENQUEUE_REPLENISH) {
 		replenish_dl_entity(dl_se, pi_se);
+	}
 
 	__enqueue_dl_entity(dl_se);
 }
@@ -959,11 +1386,6 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
 	__dequeue_dl_entity(dl_se);
 }
 
-static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
-{
-	return dl_se->dl_deadline < dl_se->dl_period;
-}
-
 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 {
 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -995,17 +1417,32 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 	 * If that is the case, the task will be throttled and
 	 * the replenishment timer will be set to the next period.
 	 */
-	if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
+	if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
 		dl_check_constrained_dl(&p->dl);
 
+	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
+		add_rq_bw(p->dl.dl_bw, &rq->dl);
+		add_running_bw(p->dl.dl_bw, &rq->dl);
+	}
+
 	/*
-	 * If p is throttled, we do nothing. In fact, if it exhausted
+	 * If p is throttled, we do not enqueue it. In fact, if it exhausted
 	 * its budget it needs a replenishment and, since it now is on
 	 * its rq, the bandwidth timer callback (which clearly has not
 	 * run yet) will take care of this.
+	 * However, the active utilization does not depend on the fact
+	 * that the task is on the runqueue or not (but depends on the
+	 * task's state - in GRUB parlance, "inactive" vs "active contending").
+	 * In other words, even if a task is throttled its utilization must
+	 * be counted in the active utilization; hence, we need to call
+	 * add_running_bw().
 	 */
-	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
+	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
+		if (flags & ENQUEUE_WAKEUP)
+			task_contending(&p->dl, flags);
+
 		return;
+	}
 
 	enqueue_dl_entity(&p->dl, pi_se, flags);
 
@@ -1023,6 +1460,23 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 {
 	update_curr_dl(rq);
 	__dequeue_task_dl(rq, p, flags);
+
+	if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
+		sub_running_bw(p->dl.dl_bw, &rq->dl);
+		sub_rq_bw(p->dl.dl_bw, &rq->dl);
+	}
+
+	/*
+	 * This check allows to start the inactive timer (or to immediately
+	 * decrease the active utilization, if needed) in two cases:
+	 * when the task blocks and when it is terminating
+	 * (p->state == TASK_DEAD). We can handle the two cases in the same
+	 * way, because from GRUB's point of view the same thing is happening
+	 * (the task moves from "active contending" to "active non contending"
+	 * or "inactive")
+	 */
+	if (flags & DEQUEUE_SLEEP)
+		task_non_contending(p);
 }
 
 /*
@@ -1100,6 +1554,37 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
 	return cpu;
 }
 
+static void migrate_task_rq_dl(struct task_struct *p)
+{
+	struct rq *rq;
+
+	if (p->state != TASK_WAKING)
+		return;
+
+	rq = task_rq(p);
+	/*
+	 * Since p->state == TASK_WAKING, set_task_cpu() has been called
+	 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
+	 * rq->lock is not... So, lock it
+	 */
+	raw_spin_lock(&rq->lock);
+	if (p->dl.dl_non_contending) {
+		sub_running_bw(p->dl.dl_bw, &rq->dl);
+		p->dl.dl_non_contending = 0;
+		/*
+		 * If the timer handler is currently running and the
+		 * timer cannot be cancelled, inactive_task_timer()
+		 * will see that dl_not_contending is not set, and
+		 * will not touch the rq's active utilization,
+		 * so we are still safe.
+		 */
+		if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
+			put_task_struct(p);
+	}
+	sub_rq_bw(p->dl.dl_bw, &rq->dl);
+	raw_spin_unlock(&rq->lock);
+}
+
 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
 {
 	/*
@@ -1255,19 +1740,6 @@ static void task_fork_dl(struct task_struct *p)
 	 */
 }
 
-static void task_dead_dl(struct task_struct *p)
-{
-	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
-
-	/*
-	 * Since we are TASK_DEAD we won't slip out of the domain!
-	 */
-	raw_spin_lock_irq(&dl_b->lock);
-	/* XXX we should retain the bw until 0-lag */
-	dl_b->total_bw -= p->dl.dl_bw;
-	raw_spin_unlock_irq(&dl_b->lock);
-}
-
 static void set_curr_task_dl(struct rq *rq)
 {
 	struct task_struct *p = rq->curr;
@@ -1533,7 +2005,7 @@ static int push_dl_task(struct rq *rq)
 		 * then possible that next_task has migrated.
 		 */
 		task = pick_next_pushable_dl_task(rq);
-		if (task_cpu(next_task) == rq->cpu && task == next_task) {
+		if (task == next_task) {
 			/*
 			 * The task is still there. We don't try
 			 * again, some other cpu will pull it when ready.
@@ -1551,7 +2023,11 @@ static int push_dl_task(struct rq *rq)
 	}
 
 	deactivate_task(rq, next_task, 0);
+	sub_running_bw(next_task->dl.dl_bw, &rq->dl);
+	sub_rq_bw(next_task->dl.dl_bw, &rq->dl);
 	set_task_cpu(next_task, later_rq->cpu);
+	add_rq_bw(next_task->dl.dl_bw, &later_rq->dl);
+	add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
 	activate_task(later_rq, next_task, 0);
 	ret = 1;
 
@@ -1639,7 +2115,11 @@ static void pull_dl_task(struct rq *this_rq)
 			resched = true;
 
 			deactivate_task(src_rq, p, 0);
+			sub_running_bw(p->dl.dl_bw, &src_rq->dl);
+			sub_rq_bw(p->dl.dl_bw, &src_rq->dl);
 			set_task_cpu(p, this_cpu);
+			add_rq_bw(p->dl.dl_bw, &this_rq->dl);
+			add_running_bw(p->dl.dl_bw, &this_rq->dl);
 			activate_task(this_rq, p, 0);
 			dmin = p->dl.deadline;
 
@@ -1695,7 +2175,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
 		 * until we complete the update.
 		 */
 		raw_spin_lock(&src_dl_b->lock);
-		__dl_clear(src_dl_b, p->dl.dl_bw);
+		__dl_clear(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
 		raw_spin_unlock(&src_dl_b->lock);
 	}
 
@@ -1737,13 +2217,26 @@ void __init init_sched_dl_class(void)
 static void switched_from_dl(struct rq *rq, struct task_struct *p)
 {
 	/*
-	 * Start the deadline timer; if we switch back to dl before this we'll
-	 * continue consuming our current CBS slice. If we stay outside of
-	 * SCHED_DEADLINE until the deadline passes, the timer will reset the
-	 * task.
+	 * task_non_contending() can start the "inactive timer" (if the 0-lag
+	 * time is in the future). If the task switches back to dl before
+	 * the "inactive timer" fires, it can continue to consume its current
+	 * runtime using its current deadline. If it stays outside of
+	 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
+	 * will reset the task parameters.
 	 */
-	if (!start_dl_timer(p))
-		__dl_clear_params(p);
+	if (task_on_rq_queued(p) && p->dl.dl_runtime)
+		task_non_contending(p);
+
+	if (!task_on_rq_queued(p))
+		sub_rq_bw(p->dl.dl_bw, &rq->dl);
+
+	/*
+	 * We cannot use inactive_task_timer() to invoke sub_running_bw()
+	 * at the 0-lag time, because the task could have been migrated
+	 * while SCHED_OTHER in the meanwhile.
+	 */
+	if (p->dl.dl_non_contending)
+		p->dl.dl_non_contending = 0;
 
 	/*
 	 * Since this might be the only -deadline task on the rq,
@@ -1762,11 +2255,15 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
  */
 static void switched_to_dl(struct rq *rq, struct task_struct *p)
 {
+	if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
+		put_task_struct(p);
 
 	/* If p is not queued we will update its parameters at next wakeup. */
-	if (!task_on_rq_queued(p))
-		return;
+	if (!task_on_rq_queued(p)) {
+		add_rq_bw(p->dl.dl_bw, &rq->dl);
 
+		return;
+	}
 	/*
 	 * If p is boosted we already updated its params in
 	 * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
@@ -1836,6 +2333,7 @@ const struct sched_class dl_sched_class = {
 
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_dl,
+	.migrate_task_rq	= migrate_task_rq_dl,
 	.set_cpus_allowed       = set_cpus_allowed_dl,
 	.rq_online              = rq_online_dl,
 	.rq_offline             = rq_offline_dl,
@@ -1845,7 +2343,6 @@ const struct sched_class dl_sched_class = {
 	.set_curr_task		= set_curr_task_dl,
 	.task_tick		= task_tick_dl,
 	.task_fork              = task_fork_dl,
-	.task_dead		= task_dead_dl,
 
 	.prio_changed           = prio_changed_dl,
 	.switched_from		= switched_from_dl,
@@ -1854,6 +2351,317 @@ const struct sched_class dl_sched_class = {
 	.update_curr		= update_curr_dl,
 };
 
+int sched_dl_global_validate(void)
+{
+	u64 runtime = global_rt_runtime();
+	u64 period = global_rt_period();
+	u64 new_bw = to_ratio(period, runtime);
+	struct dl_bw *dl_b;
+	int cpu, ret = 0;
+	unsigned long flags;
+
+	/*
+	 * Here we want to check the bandwidth not being set to some
+	 * value smaller than the currently allocated bandwidth in
+	 * any of the root_domains.
+	 *
+	 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
+	 * cycling on root_domains... Discussion on different/better
+	 * solutions is welcome!
+	 */
+	for_each_possible_cpu(cpu) {
+		rcu_read_lock_sched();
+		dl_b = dl_bw_of(cpu);
+
+		raw_spin_lock_irqsave(&dl_b->lock, flags);
+		if (new_bw < dl_b->total_bw)
+			ret = -EBUSY;
+		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+		rcu_read_unlock_sched();
+
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
+{
+	if (global_rt_runtime() == RUNTIME_INF) {
+		dl_rq->bw_ratio = 1 << RATIO_SHIFT;
+		dl_rq->extra_bw = 1 << BW_SHIFT;
+	} else {
+		dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
+			  global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
+		dl_rq->extra_bw = to_ratio(global_rt_period(),
+						    global_rt_runtime());
+	}
+}
+
+void sched_dl_do_global(void)
+{
+	u64 new_bw = -1;
+	struct dl_bw *dl_b;
+	int cpu;
+	unsigned long flags;
+
+	def_dl_bandwidth.dl_period = global_rt_period();
+	def_dl_bandwidth.dl_runtime = global_rt_runtime();
+
+	if (global_rt_runtime() != RUNTIME_INF)
+		new_bw = to_ratio(global_rt_period(), global_rt_runtime());
+
+	/*
+	 * FIXME: As above...
+	 */
+	for_each_possible_cpu(cpu) {
+		rcu_read_lock_sched();
+		dl_b = dl_bw_of(cpu);
+
+		raw_spin_lock_irqsave(&dl_b->lock, flags);
+		dl_b->bw = new_bw;
+		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+		rcu_read_unlock_sched();
+		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
+	}
+}
+
+/*
+ * We must be sure that accepting a new task (or allowing changing the
+ * parameters of an existing one) is consistent with the bandwidth
+ * constraints. If yes, this function also accordingly updates the currently
+ * allocated bandwidth to reflect the new situation.
+ *
+ * This function is called while holding p's rq->lock.
+ */
+int sched_dl_overflow(struct task_struct *p, int policy,
+		      const struct sched_attr *attr)
+{
+	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+	u64 period = attr->sched_period ?: attr->sched_deadline;
+	u64 runtime = attr->sched_runtime;
+	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
+	int cpus, err = -1;
+
+	/* !deadline task may carry old deadline bandwidth */
+	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
+		return 0;
+
+	/*
+	 * Either if a task, enters, leave, or stays -deadline but changes
+	 * its parameters, we may need to update accordingly the total
+	 * allocated bandwidth of the container.
+	 */
+	raw_spin_lock(&dl_b->lock);
+	cpus = dl_bw_cpus(task_cpu(p));
+	if (dl_policy(policy) && !task_has_dl_policy(p) &&
+	    !__dl_overflow(dl_b, cpus, 0, new_bw)) {
+		if (hrtimer_active(&p->dl.inactive_timer))
+			__dl_clear(dl_b, p->dl.dl_bw, cpus);
+		__dl_add(dl_b, new_bw, cpus);
+		err = 0;
+	} else if (dl_policy(policy) && task_has_dl_policy(p) &&
+		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
+		/*
+		 * XXX this is slightly incorrect: when the task
+		 * utilization decreases, we should delay the total
+		 * utilization change until the task's 0-lag point.
+		 * But this would require to set the task's "inactive
+		 * timer" when the task is not inactive.
+		 */
+		__dl_clear(dl_b, p->dl.dl_bw, cpus);
+		__dl_add(dl_b, new_bw, cpus);
+		dl_change_utilization(p, new_bw);
+		err = 0;
+	} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
+		/*
+		 * Do not decrease the total deadline utilization here,
+		 * switched_from_dl() will take care to do it at the correct
+		 * (0-lag) time.
+		 */
+		err = 0;
+	}
+	raw_spin_unlock(&dl_b->lock);
+
+	return err;
+}
+
+/*
+ * This function initializes the sched_dl_entity of a newly becoming
+ * SCHED_DEADLINE task.
+ *
+ * Only the static values are considered here, the actual runtime and the
+ * absolute deadline will be properly calculated when the task is enqueued
+ * for the first time with its new policy.
+ */
+void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
+{
+	struct sched_dl_entity *dl_se = &p->dl;
+
+	dl_se->dl_runtime = attr->sched_runtime;
+	dl_se->dl_deadline = attr->sched_deadline;
+	dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
+	dl_se->flags = attr->sched_flags;
+	dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+	dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
+}
+
+void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
+{
+	struct sched_dl_entity *dl_se = &p->dl;
+
+	attr->sched_priority = p->rt_priority;
+	attr->sched_runtime = dl_se->dl_runtime;
+	attr->sched_deadline = dl_se->dl_deadline;
+	attr->sched_period = dl_se->dl_period;
+	attr->sched_flags = dl_se->flags;
+}
+
+/*
+ * This function validates the new parameters of a -deadline task.
+ * We ask for the deadline not being zero, and greater or equal
+ * than the runtime, as well as the period of being zero or
+ * greater than deadline. Furthermore, we have to be sure that
+ * user parameters are above the internal resolution of 1us (we
+ * check sched_runtime only since it is always the smaller one) and
+ * below 2^63 ns (we have to check both sched_deadline and
+ * sched_period, as the latter can be zero).
+ */
+bool __checkparam_dl(const struct sched_attr *attr)
+{
+	/* deadline != 0 */
+	if (attr->sched_deadline == 0)
+		return false;
+
+	/*
+	 * Since we truncate DL_SCALE bits, make sure we're at least
+	 * that big.
+	 */
+	if (attr->sched_runtime < (1ULL << DL_SCALE))
+		return false;
+
+	/*
+	 * Since we use the MSB for wrap-around and sign issues, make
+	 * sure it's not set (mind that period can be equal to zero).
+	 */
+	if (attr->sched_deadline & (1ULL << 63) ||
+	    attr->sched_period & (1ULL << 63))
+		return false;
+
+	/* runtime <= deadline <= period (if period != 0) */
+	if ((attr->sched_period != 0 &&
+	     attr->sched_period < attr->sched_deadline) ||
+	    attr->sched_deadline < attr->sched_runtime)
+		return false;
+
+	return true;
+}
+
+/*
+ * This function clears the sched_dl_entity static params.
+ */
+void __dl_clear_params(struct task_struct *p)
+{
+	struct sched_dl_entity *dl_se = &p->dl;
+
+	dl_se->dl_runtime = 0;
+	dl_se->dl_deadline = 0;
+	dl_se->dl_period = 0;
+	dl_se->flags = 0;
+	dl_se->dl_bw = 0;
+	dl_se->dl_density = 0;
+
+	dl_se->dl_throttled = 0;
+	dl_se->dl_yielded = 0;
+	dl_se->dl_non_contending = 0;
+}
+
+bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
+{
+	struct sched_dl_entity *dl_se = &p->dl;
+
+	if (dl_se->dl_runtime != attr->sched_runtime ||
+	    dl_se->dl_deadline != attr->sched_deadline ||
+	    dl_se->dl_period != attr->sched_period ||
+	    dl_se->flags != attr->sched_flags)
+		return true;
+
+	return false;
+}
+
+#ifdef CONFIG_SMP
+int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
+{
+	unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
+							cs_cpus_allowed);
+	struct dl_bw *dl_b;
+	bool overflow;
+	int cpus, ret;
+	unsigned long flags;
+
+	rcu_read_lock_sched();
+	dl_b = dl_bw_of(dest_cpu);
+	raw_spin_lock_irqsave(&dl_b->lock, flags);
+	cpus = dl_bw_cpus(dest_cpu);
+	overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
+	if (overflow)
+		ret = -EBUSY;
+	else {
+		/*
+		 * We reserve space for this task in the destination
+		 * root_domain, as we can't fail after this point.
+		 * We will free resources in the source root_domain
+		 * later on (see set_cpus_allowed_dl()).
+		 */
+		__dl_add(dl_b, p->dl.dl_bw, cpus);
+		ret = 0;
+	}
+	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+	rcu_read_unlock_sched();
+	return ret;
+}
+
+int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
+				 const struct cpumask *trial)
+{
+	int ret = 1, trial_cpus;
+	struct dl_bw *cur_dl_b;
+	unsigned long flags;
+
+	rcu_read_lock_sched();
+	cur_dl_b = dl_bw_of(cpumask_any(cur));
+	trial_cpus = cpumask_weight(trial);
+
+	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
+	if (cur_dl_b->bw != -1 &&
+	    cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
+		ret = 0;
+	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
+	rcu_read_unlock_sched();
+	return ret;
+}
+
+bool dl_cpu_busy(unsigned int cpu)
+{
+	unsigned long flags;
+	struct dl_bw *dl_b;
+	bool overflow;
+	int cpus;
+
+	rcu_read_lock_sched();
+	dl_b = dl_bw_of(cpu);
+	raw_spin_lock_irqsave(&dl_b->lock, flags);
+	cpus = dl_bw_cpus(cpu);
+	overflow = __dl_overflow(dl_b, cpus, 0, 0);
+	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+	rcu_read_unlock_sched();
+	return overflow;
+}
+#endif
+
 #ifdef CONFIG_SCHED_DEBUG
 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
 
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 38f0193..4fa66de 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -552,15 +552,21 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
 
 #define P(x) \
 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
+#define PU(x) \
+	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
 #define PN(x) \
 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
 
-	P(rt_nr_running);
+	PU(rt_nr_running);
+#ifdef CONFIG_SMP
+	PU(rt_nr_migratory);
+#endif
 	P(rt_throttled);
 	PN(rt_time);
 	PN(rt_runtime);
 
 #undef PN
+#undef PU
 #undef P
 }
 
@@ -569,14 +575,21 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
 	struct dl_bw *dl_bw;
 
 	SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
-	SEQ_printf(m, "  .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
+
+#define PU(x) \
+	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
+
+	PU(dl_nr_running);
 #ifdef CONFIG_SMP
+	PU(dl_nr_migratory);
 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
 #else
 	dl_bw = &dl_rq->dl_bw;
 #endif
 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
+
+#undef PU
 }
 
 extern __read_mostly int sched_clock_running;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d711093..008c514 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -369,8 +369,9 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 }
 
 /* Iterate thr' all leaf cfs_rq's on a runqueue */
-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
-	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
+	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
+				 leaf_cfs_rq_list)
 
 /* Do the two (enqueued) entities belong to the same group ? */
 static inline struct cfs_rq *
@@ -463,8 +464,8 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 {
 }
 
-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
-		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
+#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)	\
+		for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
 
 static inline struct sched_entity *parent_entity(struct sched_entity *se)
 {
@@ -1381,7 +1382,6 @@ static unsigned long weighted_cpuload(const int cpu);
 static unsigned long source_load(int cpu, int type);
 static unsigned long target_load(int cpu, int type);
 static unsigned long capacity_of(int cpu);
-static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
 
 /* Cached statistics for all CPUs within a node */
 struct numa_stats {
@@ -2469,7 +2469,8 @@ void task_numa_work(struct callback_head *work)
 		return;
 
 
-	down_read(&mm->mmap_sem);
+	if (!down_read_trylock(&mm->mmap_sem))
+		return;
 	vma = find_vma(mm, start);
 	if (!vma) {
 		reset_ptenuma_scan(p);
@@ -2584,6 +2585,60 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
 		}
 	}
 }
+
+/*
+ * Can a task be moved from prev_cpu to this_cpu without causing a load
+ * imbalance that would trigger the load balancer?
+ */
+static inline bool numa_wake_affine(struct sched_domain *sd,
+				    struct task_struct *p, int this_cpu,
+				    int prev_cpu, int sync)
+{
+	struct numa_stats prev_load, this_load;
+	s64 this_eff_load, prev_eff_load;
+
+	update_numa_stats(&prev_load, cpu_to_node(prev_cpu));
+	update_numa_stats(&this_load, cpu_to_node(this_cpu));
+
+	/*
+	 * If sync wakeup then subtract the (maximum possible)
+	 * effect of the currently running task from the load
+	 * of the current CPU:
+	 */
+	if (sync) {
+		unsigned long current_load = task_h_load(current);
+
+		if (this_load.load > current_load)
+			this_load.load -= current_load;
+		else
+			this_load.load = 0;
+	}
+
+	/*
+	 * In low-load situations, where this_cpu's node is idle due to the
+	 * sync cause above having dropped this_load.load to 0, move the task.
+	 * Moving to an idle socket will not create a bad imbalance.
+	 *
+	 * Otherwise check if the nodes are near enough in load to allow this
+	 * task to be woken on this_cpu's node.
+	 */
+	if (this_load.load > 0) {
+		unsigned long task_load = task_h_load(p);
+
+		this_eff_load = 100;
+		this_eff_load *= prev_load.compute_capacity;
+
+		prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
+		prev_eff_load *= this_load.compute_capacity;
+
+		this_eff_load *= this_load.load + task_load;
+		prev_eff_load *= prev_load.load - task_load;
+
+		return this_eff_load <= prev_eff_load;
+	}
+
+	return true;
+}
 #else
 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
 {
@@ -2596,6 +2651,15 @@ static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
 {
 }
+
+#ifdef CONFIG_SMP
+static inline bool numa_wake_affine(struct sched_domain *sd,
+				    struct task_struct *p, int this_cpu,
+				    int prev_cpu, int sync)
+{
+	return true;
+}
+#endif /* !SMP */
 #endif /* CONFIG_NUMA_BALANCING */
 
 static void
@@ -2916,12 +2980,12 @@ ___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
 	/*
 	 * Step 2: update *_avg.
 	 */
-	sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
+	sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX - 1024 + sa->period_contrib);
 	if (cfs_rq) {
 		cfs_rq->runnable_load_avg =
-			div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
+			div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX - 1024 + sa->period_contrib);
 	}
-	sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
+	sa->util_avg = sa->util_sum / (LOAD_AVG_MAX - 1024 + sa->period_contrib);
 
 	return 1;
 }
@@ -2982,8 +3046,7 @@ __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
  * differential update where we store the last value we propagated. This in
  * turn allows skipping updates if the differential is 'small'.
  *
- * Updating tg's load_avg is necessary before update_cfs_share() (which is
- * done) and effective_load() (which is not done because it is too costly).
+ * Updating tg's load_avg is necessary before update_cfs_share().
  */
 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
 {
@@ -3563,7 +3626,7 @@ static inline void check_schedstat_required(void)
 			trace_sched_stat_runtime_enabled())  {
 		printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
 			     "stat_blocked and stat_runtime require the "
-			     "kernel parameter schedstats=enabled or "
+			     "kernel parameter schedstats=enable or "
 			     "kernel.sched_schedstats=1\n");
 	}
 #endif
@@ -4642,24 +4705,43 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 	hrtimer_cancel(&cfs_b->slack_timer);
 }
 
+/*
+ * Both these cpu hotplug callbacks race against unregister_fair_sched_group()
+ *
+ * The race is harmless, since modifying bandwidth settings of unhooked group
+ * bits doesn't do much.
+ */
+
+/* cpu online calback */
 static void __maybe_unused update_runtime_enabled(struct rq *rq)
 {
-	struct cfs_rq *cfs_rq;
+	struct task_group *tg;
 
-	for_each_leaf_cfs_rq(rq, cfs_rq) {
-		struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
+	lockdep_assert_held(&rq->lock);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(tg, &task_groups, list) {
+		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
+		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
 
 		raw_spin_lock(&cfs_b->lock);
 		cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
 		raw_spin_unlock(&cfs_b->lock);
 	}
+	rcu_read_unlock();
 }
 
+/* cpu offline callback */
 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
 {
-	struct cfs_rq *cfs_rq;
+	struct task_group *tg;
 
-	for_each_leaf_cfs_rq(rq, cfs_rq) {
+	lockdep_assert_held(&rq->lock);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(tg, &task_groups, list) {
+		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+
 		if (!cfs_rq->runtime_enabled)
 			continue;
 
@@ -4677,6 +4759,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
 		if (cfs_rq_throttled(cfs_rq))
 			unthrottle_cfs_rq(cfs_rq);
 	}
+	rcu_read_unlock();
 }
 
 #else /* CONFIG_CFS_BANDWIDTH */
@@ -5215,126 +5298,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
 	return 0;
 }
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-/*
- * effective_load() calculates the load change as seen from the root_task_group
- *
- * Adding load to a group doesn't make a group heavier, but can cause movement
- * of group shares between cpus. Assuming the shares were perfectly aligned one
- * can calculate the shift in shares.
- *
- * Calculate the effective load difference if @wl is added (subtracted) to @tg
- * on this @cpu and results in a total addition (subtraction) of @wg to the
- * total group weight.
- *
- * Given a runqueue weight distribution (rw_i) we can compute a shares
- * distribution (s_i) using:
- *
- *   s_i = rw_i / \Sum rw_j						(1)
- *
- * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
- * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
- * shares distribution (s_i):
- *
- *   rw_i = {   2,   4,   1,   0 }
- *   s_i  = { 2/7, 4/7, 1/7,   0 }
- *
- * As per wake_affine() we're interested in the load of two CPUs (the CPU the
- * task used to run on and the CPU the waker is running on), we need to
- * compute the effect of waking a task on either CPU and, in case of a sync
- * wakeup, compute the effect of the current task going to sleep.
- *
- * So for a change of @wl to the local @cpu with an overall group weight change
- * of @wl we can compute the new shares distribution (s'_i) using:
- *
- *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
- *
- * Suppose we're interested in CPUs 0 and 1, and want to compute the load
- * differences in waking a task to CPU 0. The additional task changes the
- * weight and shares distributions like:
- *
- *   rw'_i = {   3,   4,   1,   0 }
- *   s'_i  = { 3/8, 4/8, 1/8,   0 }
- *
- * We can then compute the difference in effective weight by using:
- *
- *   dw_i = S * (s'_i - s_i)						(3)
- *
- * Where 'S' is the group weight as seen by its parent.
- *
- * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
- * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
- * 4/7) times the weight of the group.
- */
-static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
-{
-	struct sched_entity *se = tg->se[cpu];
-
-	if (!tg->parent)	/* the trivial, non-cgroup case */
-		return wl;
-
-	for_each_sched_entity(se) {
-		struct cfs_rq *cfs_rq = se->my_q;
-		long W, w = cfs_rq_load_avg(cfs_rq);
-
-		tg = cfs_rq->tg;
-
-		/*
-		 * W = @wg + \Sum rw_j
-		 */
-		W = wg + atomic_long_read(&tg->load_avg);
-
-		/* Ensure \Sum rw_j >= rw_i */
-		W -= cfs_rq->tg_load_avg_contrib;
-		W += w;
-
-		/*
-		 * w = rw_i + @wl
-		 */
-		w += wl;
-
-		/*
-		 * wl = S * s'_i; see (2)
-		 */
-		if (W > 0 && w < W)
-			wl = (w * (long)scale_load_down(tg->shares)) / W;
-		else
-			wl = scale_load_down(tg->shares);
-
-		/*
-		 * Per the above, wl is the new se->load.weight value; since
-		 * those are clipped to [MIN_SHARES, ...) do so now. See
-		 * calc_cfs_shares().
-		 */
-		if (wl < MIN_SHARES)
-			wl = MIN_SHARES;
-
-		/*
-		 * wl = dw_i = S * (s'_i - s_i); see (3)
-		 */
-		wl -= se->avg.load_avg;
-
-		/*
-		 * Recursively apply this logic to all parent groups to compute
-		 * the final effective load change on the root group. Since
-		 * only the @tg group gets extra weight, all parent groups can
-		 * only redistribute existing shares. @wl is the shift in shares
-		 * resulting from this level per the above.
-		 */
-		wg = 0;
-	}
-
-	return wl;
-}
-#else
-
-static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
-{
-	return wl;
-}
-
-#endif
-
 static void record_wakee(struct task_struct *p)
 {
 	/*
@@ -5385,67 +5348,25 @@ static int wake_wide(struct task_struct *p)
 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
 		       int prev_cpu, int sync)
 {
-	s64 this_load, load;
-	s64 this_eff_load, prev_eff_load;
-	int idx, this_cpu;
-	struct task_group *tg;
-	unsigned long weight;
-	int balanced;
-
-	idx	  = sd->wake_idx;
-	this_cpu  = smp_processor_id();
-	load	  = source_load(prev_cpu, idx);
-	this_load = target_load(this_cpu, idx);
+	int this_cpu = smp_processor_id();
+	bool affine = false;
 
 	/*
-	 * If sync wakeup then subtract the (maximum possible)
-	 * effect of the currently running task from the load
-	 * of the current CPU:
+	 * Common case: CPUs are in the same socket, and select_idle_sibling()
+	 * will do its thing regardless of what we return:
 	 */
-	if (sync) {
-		tg = task_group(current);
-		weight = current->se.avg.load_avg;
-
-		this_load += effective_load(tg, this_cpu, -weight, -weight);
-		load += effective_load(tg, prev_cpu, 0, -weight);
-	}
-
-	tg = task_group(p);
-	weight = p->se.avg.load_avg;
-
-	/*
-	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
-	 * due to the sync cause above having dropped this_load to 0, we'll
-	 * always have an imbalance, but there's really nothing you can do
-	 * about that, so that's good too.
-	 *
-	 * Otherwise check if either cpus are near enough in load to allow this
-	 * task to be woken on this_cpu.
-	 */
-	this_eff_load = 100;
-	this_eff_load *= capacity_of(prev_cpu);
-
-	prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
-	prev_eff_load *= capacity_of(this_cpu);
-
-	if (this_load > 0) {
-		this_eff_load *= this_load +
-			effective_load(tg, this_cpu, weight, weight);
-
-		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
-	}
-
-	balanced = this_eff_load <= prev_eff_load;
+	if (cpus_share_cache(prev_cpu, this_cpu))
+		affine = true;
+	else
+		affine = numa_wake_affine(sd, p, this_cpu, prev_cpu, sync);
 
 	schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
+	if (affine) {
+		schedstat_inc(sd->ttwu_move_affine);
+		schedstat_inc(p->se.statistics.nr_wakeups_affine);
+	}
 
-	if (!balanced)
-		return 0;
-
-	schedstat_inc(sd->ttwu_move_affine);
-	schedstat_inc(p->se.statistics.nr_wakeups_affine);
-
-	return 1;
+	return affine;
 }
 
 static inline int task_util(struct task_struct *p);
@@ -5484,12 +5405,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 		int i;
 
 		/* Skip over this group if it has no CPUs allowed */
-		if (!cpumask_intersects(sched_group_cpus(group),
+		if (!cpumask_intersects(sched_group_span(group),
 					&p->cpus_allowed))
 			continue;
 
 		local_group = cpumask_test_cpu(this_cpu,
-					       sched_group_cpus(group));
+					       sched_group_span(group));
 
 		/*
 		 * Tally up the load of all CPUs in the group and find
@@ -5499,7 +5420,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 		runnable_load = 0;
 		max_spare_cap = 0;
 
-		for_each_cpu(i, sched_group_cpus(group)) {
+		for_each_cpu(i, sched_group_span(group)) {
 			/* Bias balancing toward cpus of our domain */
 			if (local_group)
 				load = source_load(i, load_idx);
@@ -5602,10 +5523,10 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 
 	/* Check if we have any choice: */
 	if (group->group_weight == 1)
-		return cpumask_first(sched_group_cpus(group));
+		return cpumask_first(sched_group_span(group));
 
 	/* Traverse only the allowed CPUs */
-	for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
+	for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
 		if (idle_cpu(i)) {
 			struct rq *rq = cpu_rq(i);
 			struct cpuidle_state *idle = idle_get_state(rq);
@@ -5640,43 +5561,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 }
 
-/*
- * Implement a for_each_cpu() variant that starts the scan at a given cpu
- * (@start), and wraps around.
- *
- * This is used to scan for idle CPUs; such that not all CPUs looking for an
- * idle CPU find the same CPU. The down-side is that tasks tend to cycle
- * through the LLC domain.
- *
- * Especially tbench is found sensitive to this.
- */
-
-static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
-{
-	int next;
-
-again:
-	next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
-
-	if (*wrapped) {
-		if (next >= start)
-			return nr_cpumask_bits;
-	} else {
-		if (next >= nr_cpumask_bits) {
-			*wrapped = 1;
-			n = -1;
-			goto again;
-		}
-	}
-
-	return next;
-}
-
-#define for_each_cpu_wrap(cpu, mask, start, wrap)				\
-	for ((wrap) = 0, (cpu) = (start)-1;					\
-		(cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)),	\
-		(cpu) < nr_cpumask_bits; )
-
 #ifdef CONFIG_SCHED_SMT
 
 static inline void set_idle_cores(int cpu, int val)
@@ -5736,7 +5620,7 @@ void __update_idle_core(struct rq *rq)
 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
 {
 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
-	int core, cpu, wrap;
+	int core, cpu;
 
 	if (!static_branch_likely(&sched_smt_present))
 		return -1;
@@ -5746,7 +5630,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
 
 	cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
 
-	for_each_cpu_wrap(core, cpus, target, wrap) {
+	for_each_cpu_wrap(core, cpus, target) {
 		bool idle = true;
 
 		for_each_cpu(cpu, cpu_smt_mask(core)) {
@@ -5809,27 +5693,38 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd
 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
 {
 	struct sched_domain *this_sd;
-	u64 avg_cost, avg_idle = this_rq()->avg_idle;
+	u64 avg_cost, avg_idle;
 	u64 time, cost;
 	s64 delta;
-	int cpu, wrap;
+	int cpu, nr = INT_MAX;
 
 	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
 	if (!this_sd)
 		return -1;
 
-	avg_cost = this_sd->avg_scan_cost;
-
 	/*
 	 * Due to large variance we need a large fuzz factor; hackbench in
 	 * particularly is sensitive here.
 	 */
-	if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
+	avg_idle = this_rq()->avg_idle / 512;
+	avg_cost = this_sd->avg_scan_cost + 1;
+
+	if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost)
 		return -1;
 
+	if (sched_feat(SIS_PROP)) {
+		u64 span_avg = sd->span_weight * avg_idle;
+		if (span_avg > 4*avg_cost)
+			nr = div_u64(span_avg, avg_cost);
+		else
+			nr = 4;
+	}
+
 	time = local_clock();
 
-	for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
+	for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
+		if (!--nr)
+			return -1;
 		if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
 			continue;
 		if (idle_cpu(cpu))
@@ -6011,11 +5906,15 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
 
 	if (affine_sd) {
 		sd = NULL; /* Prefer wake_affine over balance flags */
-		if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
+		if (cpu == prev_cpu)
+			goto pick_cpu;
+
+		if (wake_affine(affine_sd, p, prev_cpu, sync))
 			new_cpu = cpu;
 	}
 
 	if (!sd) {
+ pick_cpu:
 		if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
 			new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
 
@@ -6168,8 +6067,11 @@ static void set_last_buddy(struct sched_entity *se)
 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
 		return;
 
-	for_each_sched_entity(se)
+	for_each_sched_entity(se) {
+		if (SCHED_WARN_ON(!se->on_rq))
+			return;
 		cfs_rq_of(se)->last = se;
+	}
 }
 
 static void set_next_buddy(struct sched_entity *se)
@@ -6177,8 +6079,11 @@ static void set_next_buddy(struct sched_entity *se)
 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
 		return;
 
-	for_each_sched_entity(se)
+	for_each_sched_entity(se) {
+		if (SCHED_WARN_ON(!se->on_rq))
+			return;
 		cfs_rq_of(se)->next = se;
+	}
 }
 
 static void set_skip_buddy(struct sched_entity *se)
@@ -6686,6 +6591,10 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
 	if (dst_nid == p->numa_preferred_nid)
 		return 0;
 
+	/* Leaving a core idle is often worse than degrading locality. */
+	if (env->idle != CPU_NOT_IDLE)
+		return -1;
+
 	if (numa_group) {
 		src_faults = group_faults(p, src_nid);
 		dst_faults = group_faults(p, dst_nid);
@@ -6970,10 +6879,28 @@ static void attach_tasks(struct lb_env *env)
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
+
+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+{
+	if (cfs_rq->load.weight)
+		return false;
+
+	if (cfs_rq->avg.load_sum)
+		return false;
+
+	if (cfs_rq->avg.util_sum)
+		return false;
+
+	if (cfs_rq->runnable_load_sum)
+		return false;
+
+	return true;
+}
+
 static void update_blocked_averages(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
-	struct cfs_rq *cfs_rq;
+	struct cfs_rq *cfs_rq, *pos;
 	struct rq_flags rf;
 
 	rq_lock_irqsave(rq, &rf);
@@ -6983,7 +6910,7 @@ static void update_blocked_averages(int cpu)
 	 * Iterates the task_group tree in a bottom up fashion, see
 	 * list_add_leaf_cfs_rq() for details.
 	 */
-	for_each_leaf_cfs_rq(rq, cfs_rq) {
+	for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
 		struct sched_entity *se;
 
 		/* throttled entities do not contribute to load */
@@ -6997,6 +6924,13 @@ static void update_blocked_averages(int cpu)
 		se = cfs_rq->tg->se[cpu];
 		if (se && !skip_blocked_update(se))
 			update_load_avg(se, 0);
+
+		/*
+		 * There can be a lot of idle CPU cgroups.  Don't let fully
+		 * decayed cfs_rqs linger on the list.
+		 */
+		if (cfs_rq_is_decayed(cfs_rq))
+			list_del_leaf_cfs_rq(cfs_rq);
 	}
 	rq_unlock_irqrestore(rq, &rf);
 }
@@ -7229,7 +7163,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
 		 * span the current group.
 		 */
 
-		for_each_cpu(cpu, sched_group_cpus(sdg)) {
+		for_each_cpu(cpu, sched_group_span(sdg)) {
 			struct sched_group_capacity *sgc;
 			struct rq *rq = cpu_rq(cpu);
 
@@ -7408,7 +7342,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
 	memset(sgs, 0, sizeof(*sgs));
 
-	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
+	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
 		struct rq *rq = cpu_rq(i);
 
 		/* Bias balancing toward cpus of our domain */
@@ -7572,7 +7506,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 		struct sg_lb_stats *sgs = &tmp_sgs;
 		int local_group;
 
-		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
+		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
 		if (local_group) {
 			sds->local = sg;
 			sgs = local;
@@ -7927,7 +7861,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
 	unsigned long busiest_load = 0, busiest_capacity = 1;
 	int i;
 
-	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
+	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
 		unsigned long capacity, wl;
 		enum fbq_type rt;
 
@@ -8033,7 +7967,6 @@ static int active_load_balance_cpu_stop(void *data);
 static int should_we_balance(struct lb_env *env)
 {
 	struct sched_group *sg = env->sd->groups;
-	struct cpumask *sg_cpus, *sg_mask;
 	int cpu, balance_cpu = -1;
 
 	/*
@@ -8043,11 +7976,9 @@ static int should_we_balance(struct lb_env *env)
 	if (env->idle == CPU_NEWLY_IDLE)
 		return 1;
 
-	sg_cpus = sched_group_cpus(sg);
-	sg_mask = sched_group_mask(sg);
 	/* Try to find first idle cpu */
-	for_each_cpu_and(cpu, sg_cpus, env->cpus) {
-		if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
+	for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
+		if (!idle_cpu(cpu))
 			continue;
 
 		balance_cpu = cpu;
@@ -8083,7 +8014,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 		.sd		= sd,
 		.dst_cpu	= this_cpu,
 		.dst_rq		= this_rq,
-		.dst_grpmask    = sched_group_cpus(sd->groups),
+		.dst_grpmask    = sched_group_span(sd->groups),
 		.idle		= idle,
 		.loop_break	= sched_nr_migrate_break,
 		.cpus		= cpus,
@@ -8659,6 +8590,10 @@ void nohz_balance_enter_idle(int cpu)
 	if (!cpu_active(cpu))
 		return;
 
+	/* Spare idle load balancing on CPUs that don't want to be disturbed: */
+	if (!is_housekeeping_cpu(cpu))
+		return;
+
 	if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
 		return;
 
@@ -9523,10 +9458,10 @@ const struct sched_class fair_sched_class = {
 #ifdef CONFIG_SCHED_DEBUG
 void print_cfs_stats(struct seq_file *m, int cpu)
 {
-	struct cfs_rq *cfs_rq;
+	struct cfs_rq *cfs_rq, *pos;
 
 	rcu_read_lock();
-	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
+	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
 		print_cfs_rq(m, cpu, cfs_rq);
 	rcu_read_unlock();
 }
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 11192e0..d3fb155 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -55,6 +55,7 @@ SCHED_FEAT(TTWU_QUEUE, true)
  * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
  */
 SCHED_FEAT(SIS_AVG_CPU, false)
+SCHED_FEAT(SIS_PROP, true)
 
 /*
  * Issue a WARN when we do multiple update_rq_clock() calls
@@ -76,7 +77,6 @@ SCHED_FEAT(WARN_DOUBLE_CLOCK, false)
 SCHED_FEAT(RT_PUSH_IPI, true)
 #endif
 
-SCHED_FEAT(FORCE_SD_OVERLAP, false)
 SCHED_FEAT(RT_RUNTIME_SHARE, true)
 SCHED_FEAT(LB_MIN, false)
 SCHED_FEAT(ATTACH_AGE_LOAD, true)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index ef63adc..6c23e30 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -219,6 +219,7 @@ static void do_idle(void)
 	 */
 
 	__current_set_polling();
+	quiet_vmstat();
 	tick_nohz_idle_enter();
 
 	while (!need_resched()) {
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index f15fb2b..f14716a 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -117,7 +117,7 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
  * load-average relies on per-cpu sampling from the tick, it is affected by
  * NO_HZ.
  *
- * The basic idea is to fold the nr_active delta into a global idle-delta upon
+ * The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon
  * entering NO_HZ state such that we can include this as an 'extra' cpu delta
  * when we read the global state.
  *
@@ -126,7 +126,7 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
  *  - When we go NO_HZ idle during the window, we can negate our sample
  *    contribution, causing under-accounting.
  *
- *    We avoid this by keeping two idle-delta counters and flipping them
+ *    We avoid this by keeping two NO_HZ-delta counters and flipping them
  *    when the window starts, thus separating old and new NO_HZ load.
  *
  *    The only trick is the slight shift in index flip for read vs write.
@@ -137,22 +137,22 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
  *    r:0 0 1           1 0           0 1           1 0
  *    w:0 1 1           0 0           1 1           0 0
  *
- *    This ensures we'll fold the old idle contribution in this window while
+ *    This ensures we'll fold the old NO_HZ contribution in this window while
  *    accumlating the new one.
  *
- *  - When we wake up from NO_HZ idle during the window, we push up our
+ *  - When we wake up from NO_HZ during the window, we push up our
  *    contribution, since we effectively move our sample point to a known
  *    busy state.
  *
  *    This is solved by pushing the window forward, and thus skipping the
- *    sample, for this cpu (effectively using the idle-delta for this cpu which
+ *    sample, for this cpu (effectively using the NO_HZ-delta for this cpu which
  *    was in effect at the time the window opened). This also solves the issue
- *    of having to deal with a cpu having been in NOHZ idle for multiple
- *    LOAD_FREQ intervals.
+ *    of having to deal with a cpu having been in NO_HZ for multiple LOAD_FREQ
+ *    intervals.
  *
  * When making the ILB scale, we should try to pull this in as well.
  */
-static atomic_long_t calc_load_idle[2];
+static atomic_long_t calc_load_nohz[2];
 static int calc_load_idx;
 
 static inline int calc_load_write_idx(void)
@@ -167,7 +167,7 @@ static inline int calc_load_write_idx(void)
 
 	/*
 	 * If the folding window started, make sure we start writing in the
-	 * next idle-delta.
+	 * next NO_HZ-delta.
 	 */
 	if (!time_before(jiffies, READ_ONCE(calc_load_update)))
 		idx++;
@@ -180,24 +180,24 @@ static inline int calc_load_read_idx(void)
 	return calc_load_idx & 1;
 }
 
-void calc_load_enter_idle(void)
+void calc_load_nohz_start(void)
 {
 	struct rq *this_rq = this_rq();
 	long delta;
 
 	/*
-	 * We're going into NOHZ mode, if there's any pending delta, fold it
-	 * into the pending idle delta.
+	 * We're going into NO_HZ mode, if there's any pending delta, fold it
+	 * into the pending NO_HZ delta.
 	 */
 	delta = calc_load_fold_active(this_rq, 0);
 	if (delta) {
 		int idx = calc_load_write_idx();
 
-		atomic_long_add(delta, &calc_load_idle[idx]);
+		atomic_long_add(delta, &calc_load_nohz[idx]);
 	}
 }
 
-void calc_load_exit_idle(void)
+void calc_load_nohz_stop(void)
 {
 	struct rq *this_rq = this_rq();
 
@@ -217,13 +217,13 @@ void calc_load_exit_idle(void)
 		this_rq->calc_load_update += LOAD_FREQ;
 }
 
-static long calc_load_fold_idle(void)
+static long calc_load_nohz_fold(void)
 {
 	int idx = calc_load_read_idx();
 	long delta = 0;
 
-	if (atomic_long_read(&calc_load_idle[idx]))
-		delta = atomic_long_xchg(&calc_load_idle[idx], 0);
+	if (atomic_long_read(&calc_load_nohz[idx]))
+		delta = atomic_long_xchg(&calc_load_nohz[idx], 0);
 
 	return delta;
 }
@@ -299,9 +299,9 @@ calc_load_n(unsigned long load, unsigned long exp,
 
 /*
  * NO_HZ can leave us missing all per-cpu ticks calling
- * calc_load_account_active(), but since an idle CPU folds its delta into
- * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
- * in the pending idle delta if our idle period crossed a load cycle boundary.
+ * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into
+ * calc_load_nohz per calc_load_nohz_start(), all we need to do is fold
+ * in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary.
  *
  * Once we've updated the global active value, we need to apply the exponential
  * weights adjusted to the number of cycles missed.
@@ -330,7 +330,7 @@ static void calc_global_nohz(void)
 	}
 
 	/*
-	 * Flip the idle index...
+	 * Flip the NO_HZ index...
 	 *
 	 * Make sure we first write the new time then flip the index, so that
 	 * calc_load_write_idx() will see the new time when it reads the new
@@ -341,7 +341,7 @@ static void calc_global_nohz(void)
 }
 #else /* !CONFIG_NO_HZ_COMMON */
 
-static inline long calc_load_fold_idle(void) { return 0; }
+static inline long calc_load_nohz_fold(void) { return 0; }
 static inline void calc_global_nohz(void) { }
 
 #endif /* CONFIG_NO_HZ_COMMON */
@@ -362,9 +362,9 @@ void calc_global_load(unsigned long ticks)
 		return;
 
 	/*
-	 * Fold the 'old' idle-delta to include all NO_HZ cpus.
+	 * Fold the 'old' NO_HZ-delta to include all NO_HZ cpus.
 	 */
-	delta = calc_load_fold_idle();
+	delta = calc_load_nohz_fold();
 	if (delta)
 		atomic_long_add(delta, &calc_load_tasks);
 
@@ -378,7 +378,8 @@ void calc_global_load(unsigned long ticks)
 	WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
 
 	/*
-	 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
+	 * In case we went to NO_HZ for multiple LOAD_FREQ intervals
+	 * catch up in bulk.
 	 */
 	calc_global_nohz();
 }
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 979b734..45caf93 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -840,6 +840,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 		int enqueue = 0;
 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 		struct rq *rq = rq_of_rt_rq(rt_rq);
+		int skip;
+
+		/*
+		 * When span == cpu_online_mask, taking each rq->lock
+		 * can be time-consuming. Try to avoid it when possible.
+		 */
+		raw_spin_lock(&rt_rq->rt_runtime_lock);
+		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
+		raw_spin_unlock(&rt_rq->rt_runtime_lock);
+		if (skip)
+			continue;
 
 		raw_spin_lock(&rq->lock);
 		if (rt_rq->rt_time) {
@@ -1819,7 +1830,7 @@ static int push_rt_task(struct rq *rq)
 		 * pushing.
 		 */
 		task = pick_next_pushable_task(rq);
-		if (task_cpu(next_task) == rq->cpu && task == next_task) {
+		if (task == next_task) {
 			/*
 			 * The task hasn't migrated, and is still the next
 			 * eligible task, but we failed to find a run-queue
@@ -2438,6 +2449,316 @@ const struct sched_class rt_sched_class = {
 	.update_curr		= update_curr_rt,
 };
 
+#ifdef CONFIG_RT_GROUP_SCHED
+/*
+ * Ensure that the real time constraints are schedulable.
+ */
+static DEFINE_MUTEX(rt_constraints_mutex);
+
+/* Must be called with tasklist_lock held */
+static inline int tg_has_rt_tasks(struct task_group *tg)
+{
+	struct task_struct *g, *p;
+
+	/*
+	 * Autogroups do not have RT tasks; see autogroup_create().
+	 */
+	if (task_group_is_autogroup(tg))
+		return 0;
+
+	for_each_process_thread(g, p) {
+		if (rt_task(p) && task_group(p) == tg)
+			return 1;
+	}
+
+	return 0;
+}
+
+struct rt_schedulable_data {
+	struct task_group *tg;
+	u64 rt_period;
+	u64 rt_runtime;
+};
+
+static int tg_rt_schedulable(struct task_group *tg, void *data)
+{
+	struct rt_schedulable_data *d = data;
+	struct task_group *child;
+	unsigned long total, sum = 0;
+	u64 period, runtime;
+
+	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
+	runtime = tg->rt_bandwidth.rt_runtime;
+
+	if (tg == d->tg) {
+		period = d->rt_period;
+		runtime = d->rt_runtime;
+	}
+
+	/*
+	 * Cannot have more runtime than the period.
+	 */
+	if (runtime > period && runtime != RUNTIME_INF)
+		return -EINVAL;
+
+	/*
+	 * Ensure we don't starve existing RT tasks.
+	 */
+	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
+		return -EBUSY;
+
+	total = to_ratio(period, runtime);
+
+	/*
+	 * Nobody can have more than the global setting allows.
+	 */
+	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
+		return -EINVAL;
+
+	/*
+	 * The sum of our children's runtime should not exceed our own.
+	 */
+	list_for_each_entry_rcu(child, &tg->children, siblings) {
+		period = ktime_to_ns(child->rt_bandwidth.rt_period);
+		runtime = child->rt_bandwidth.rt_runtime;
+
+		if (child == d->tg) {
+			period = d->rt_period;
+			runtime = d->rt_runtime;
+		}
+
+		sum += to_ratio(period, runtime);
+	}
+
+	if (sum > total)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
+{
+	int ret;
+
+	struct rt_schedulable_data data = {
+		.tg = tg,
+		.rt_period = period,
+		.rt_runtime = runtime,
+	};
+
+	rcu_read_lock();
+	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
+	rcu_read_unlock();
+
+	return ret;
+}
+
+static int tg_set_rt_bandwidth(struct task_group *tg,
+		u64 rt_period, u64 rt_runtime)
+{
+	int i, err = 0;
+
+	/*
+	 * Disallowing the root group RT runtime is BAD, it would disallow the
+	 * kernel creating (and or operating) RT threads.
+	 */
+	if (tg == &root_task_group && rt_runtime == 0)
+		return -EINVAL;
+
+	/* No period doesn't make any sense. */
+	if (rt_period == 0)
+		return -EINVAL;
+
+	mutex_lock(&rt_constraints_mutex);
+	read_lock(&tasklist_lock);
+	err = __rt_schedulable(tg, rt_period, rt_runtime);
+	if (err)
+		goto unlock;
+
+	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
+	tg->rt_bandwidth.rt_runtime = rt_runtime;
+
+	for_each_possible_cpu(i) {
+		struct rt_rq *rt_rq = tg->rt_rq[i];
+
+		raw_spin_lock(&rt_rq->rt_runtime_lock);
+		rt_rq->rt_runtime = rt_runtime;
+		raw_spin_unlock(&rt_rq->rt_runtime_lock);
+	}
+	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+unlock:
+	read_unlock(&tasklist_lock);
+	mutex_unlock(&rt_constraints_mutex);
+
+	return err;
+}
+
+int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
+{
+	u64 rt_runtime, rt_period;
+
+	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
+	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
+	if (rt_runtime_us < 0)
+		rt_runtime = RUNTIME_INF;
+
+	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
+}
+
+long sched_group_rt_runtime(struct task_group *tg)
+{
+	u64 rt_runtime_us;
+
+	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
+		return -1;
+
+	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
+	do_div(rt_runtime_us, NSEC_PER_USEC);
+	return rt_runtime_us;
+}
+
+int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
+{
+	u64 rt_runtime, rt_period;
+
+	rt_period = rt_period_us * NSEC_PER_USEC;
+	rt_runtime = tg->rt_bandwidth.rt_runtime;
+
+	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
+}
+
+long sched_group_rt_period(struct task_group *tg)
+{
+	u64 rt_period_us;
+
+	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
+	do_div(rt_period_us, NSEC_PER_USEC);
+	return rt_period_us;
+}
+
+static int sched_rt_global_constraints(void)
+{
+	int ret = 0;
+
+	mutex_lock(&rt_constraints_mutex);
+	read_lock(&tasklist_lock);
+	ret = __rt_schedulable(NULL, 0, 0);
+	read_unlock(&tasklist_lock);
+	mutex_unlock(&rt_constraints_mutex);
+
+	return ret;
+}
+
+int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
+{
+	/* Don't accept realtime tasks when there is no way for them to run */
+	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
+		return 0;
+
+	return 1;
+}
+
+#else /* !CONFIG_RT_GROUP_SCHED */
+static int sched_rt_global_constraints(void)
+{
+	unsigned long flags;
+	int i;
+
+	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+	for_each_possible_cpu(i) {
+		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
+
+		raw_spin_lock(&rt_rq->rt_runtime_lock);
+		rt_rq->rt_runtime = global_rt_runtime();
+		raw_spin_unlock(&rt_rq->rt_runtime_lock);
+	}
+	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+
+	return 0;
+}
+#endif /* CONFIG_RT_GROUP_SCHED */
+
+static int sched_rt_global_validate(void)
+{
+	if (sysctl_sched_rt_period <= 0)
+		return -EINVAL;
+
+	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
+		(sysctl_sched_rt_runtime > sysctl_sched_rt_period))
+		return -EINVAL;
+
+	return 0;
+}
+
+static void sched_rt_do_global(void)
+{
+	def_rt_bandwidth.rt_runtime = global_rt_runtime();
+	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
+}
+
+int sched_rt_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int old_period, old_runtime;
+	static DEFINE_MUTEX(mutex);
+	int ret;
+
+	mutex_lock(&mutex);
+	old_period = sysctl_sched_rt_period;
+	old_runtime = sysctl_sched_rt_runtime;
+
+	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+	if (!ret && write) {
+		ret = sched_rt_global_validate();
+		if (ret)
+			goto undo;
+
+		ret = sched_dl_global_validate();
+		if (ret)
+			goto undo;
+
+		ret = sched_rt_global_constraints();
+		if (ret)
+			goto undo;
+
+		sched_rt_do_global();
+		sched_dl_do_global();
+	}
+	if (0) {
+undo:
+		sysctl_sched_rt_period = old_period;
+		sysctl_sched_rt_runtime = old_runtime;
+	}
+	mutex_unlock(&mutex);
+
+	return ret;
+}
+
+int sched_rr_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+	static DEFINE_MUTEX(mutex);
+
+	mutex_lock(&mutex);
+	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+	/*
+	 * Make sure that internally we keep jiffies.
+	 * Also, writing zero resets the timeslice to default:
+	 */
+	if (!ret && write) {
+		sched_rr_timeslice =
+			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
+			msecs_to_jiffies(sysctl_sched_rr_timeslice);
+	}
+	mutex_unlock(&mutex);
+	return ret;
+}
+
 #ifdef CONFIG_SCHED_DEBUG
 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6dda2aa..eeef1a3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -39,9 +39,9 @@
 #include "cpuacct.h"
 
 #ifdef CONFIG_SCHED_DEBUG
-#define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
+# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
 #else
-#define SCHED_WARN_ON(x)	((void)(x))
+# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
 #endif
 
 struct rq;
@@ -218,23 +218,25 @@ static inline int dl_bandwidth_enabled(void)
 	return sysctl_sched_rt_runtime >= 0;
 }
 
-extern struct dl_bw *dl_bw_of(int i);
-
 struct dl_bw {
 	raw_spinlock_t lock;
 	u64 bw, total_bw;
 };
 
+static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
+
 static inline
-void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
+void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
 {
 	dl_b->total_bw -= tsk_bw;
+	__dl_update(dl_b, (s32)tsk_bw / cpus);
 }
 
 static inline
-void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
+void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
 {
 	dl_b->total_bw += tsk_bw;
+	__dl_update(dl_b, -((s32)tsk_bw / cpus));
 }
 
 static inline
@@ -244,7 +246,22 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
 	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
 }
 
+void dl_change_utilization(struct task_struct *p, u64 new_bw);
 extern void init_dl_bw(struct dl_bw *dl_b);
+extern int sched_dl_global_validate(void);
+extern void sched_dl_do_global(void);
+extern int sched_dl_overflow(struct task_struct *p, int policy,
+			     const struct sched_attr *attr);
+extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
+extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
+extern bool __checkparam_dl(const struct sched_attr *attr);
+extern void __dl_clear_params(struct task_struct *p);
+extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
+extern int dl_task_can_attach(struct task_struct *p,
+			      const struct cpumask *cs_cpus_allowed);
+extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
+					const struct cpumask *trial);
+extern bool dl_cpu_busy(unsigned int cpu);
 
 #ifdef CONFIG_CGROUP_SCHED
 
@@ -366,6 +383,11 @@ extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent
 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 		struct sched_rt_entity *rt_se, int cpu,
 		struct sched_rt_entity *parent);
+extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
+extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
+extern long sched_group_rt_runtime(struct task_group *tg);
+extern long sched_group_rt_period(struct task_group *tg);
+extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
 
 extern struct task_group *sched_create_group(struct task_group *parent);
 extern void sched_online_group(struct task_group *tg,
@@ -558,6 +580,30 @@ struct dl_rq {
 #else
 	struct dl_bw dl_bw;
 #endif
+	/*
+	 * "Active utilization" for this runqueue: increased when a
+	 * task wakes up (becomes TASK_RUNNING) and decreased when a
+	 * task blocks
+	 */
+	u64 running_bw;
+
+	/*
+	 * Utilization of the tasks "assigned" to this runqueue (including
+	 * the tasks that are in runqueue and the tasks that executed on this
+	 * CPU and blocked). Increased when a task moves to this runqueue, and
+	 * decreased when the task moves away (migrates, changes scheduling
+	 * policy, or terminates).
+	 * This is needed to compute the "inactive utilization" for the
+	 * runqueue (inactive utilization = this_bw - running_bw).
+	 */
+	u64 this_bw;
+	u64 extra_bw;
+
+	/*
+	 * Inverse of the fraction of CPU utilization that can be reclaimed
+	 * by the GRUB algorithm.
+	 */
+	u64 bw_ratio;
 };
 
 #ifdef CONFIG_SMP
@@ -606,11 +652,9 @@ struct root_domain {
 
 extern struct root_domain def_root_domain;
 extern struct mutex sched_domains_mutex;
-extern cpumask_var_t fallback_doms;
-extern cpumask_var_t sched_domains_tmpmask;
 
 extern void init_defrootdomain(void);
-extern int init_sched_domains(const struct cpumask *cpu_map);
+extern int sched_init_domains(const struct cpumask *cpu_map);
 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
 
 #endif /* CONFIG_SMP */
@@ -1025,7 +1069,11 @@ struct sched_group_capacity {
 	unsigned long next_update;
 	int imbalance; /* XXX unrelated to capacity but shared group state */
 
-	unsigned long cpumask[0]; /* iteration mask */
+#ifdef CONFIG_SCHED_DEBUG
+	int id;
+#endif
+
+	unsigned long cpumask[0]; /* balance mask */
 };
 
 struct sched_group {
@@ -1046,16 +1094,15 @@ struct sched_group {
 	unsigned long cpumask[0];
 };
 
-static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+static inline struct cpumask *sched_group_span(struct sched_group *sg)
 {
 	return to_cpumask(sg->cpumask);
 }
 
 /*
- * cpumask masking which cpus in the group are allowed to iterate up the domain
- * tree.
+ * See build_balance_mask().
  */
-static inline struct cpumask *sched_group_mask(struct sched_group *sg)
+static inline struct cpumask *group_balance_mask(struct sched_group *sg)
 {
 	return to_cpumask(sg->sgc->cpumask);
 }
@@ -1066,7 +1113,7 @@ static inline struct cpumask *sched_group_mask(struct sched_group *sg)
  */
 static inline unsigned int group_first_cpu(struct sched_group *group)
 {
-	return cpumask_first(sched_group_cpus(group));
+	return cpumask_first(sched_group_span(group));
 }
 
 extern int group_balance_cpu(struct sched_group *sg);
@@ -1422,7 +1469,11 @@ static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
 	curr->sched_class->set_curr_task(rq);
 }
 
+#ifdef CONFIG_SMP
 #define sched_class_highest (&stop_sched_class)
+#else
+#define sched_class_highest (&dl_sched_class)
+#endif
 #define for_each_class(class) \
    for (class = sched_class_highest; class; class = class->next)
 
@@ -1486,7 +1537,12 @@ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime
 extern struct dl_bandwidth def_dl_bandwidth;
 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
+extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
+extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
 
+#define BW_SHIFT	20
+#define BW_UNIT		(1 << BW_SHIFT)
+#define RATIO_SHIFT	8
 unsigned long to_ratio(u64 period, u64 runtime);
 
 extern void init_entity_runnable_average(struct sched_entity *se);
@@ -1928,6 +1984,33 @@ extern void nohz_balance_exit_idle(unsigned int cpu);
 static inline void nohz_balance_exit_idle(unsigned int cpu) { }
 #endif
 
+
+#ifdef CONFIG_SMP
+static inline
+void __dl_update(struct dl_bw *dl_b, s64 bw)
+{
+	struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
+	int i;
+
+	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+			 "sched RCU must be held");
+	for_each_cpu_and(i, rd->span, cpu_active_mask) {
+		struct rq *rq = cpu_rq(i);
+
+		rq->dl.extra_bw += bw;
+	}
+}
+#else
+static inline
+void __dl_update(struct dl_bw *dl_b, s64 bw)
+{
+	struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
+
+	dl->extra_bw += bw;
+}
+#endif
+
+
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 struct irqtime {
 	u64			total;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 1b0b4fb..79895ae 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -10,6 +10,7 @@ DEFINE_MUTEX(sched_domains_mutex);
 
 /* Protected by sched_domains_mutex: */
 cpumask_var_t sched_domains_tmpmask;
+cpumask_var_t sched_domains_tmpmask2;
 
 #ifdef CONFIG_SCHED_DEBUG
 
@@ -35,7 +36,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 
 	cpumask_clear(groupmask);
 
-	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
+	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
 
 	if (!(sd->flags & SD_LOAD_BALANCE)) {
 		printk("does not load-balance\n");
@@ -45,14 +46,14 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 		return -1;
 	}
 
-	printk(KERN_CONT "span %*pbl level %s\n",
+	printk(KERN_CONT "span=%*pbl level=%s\n",
 	       cpumask_pr_args(sched_domain_span(sd)), sd->name);
 
 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
 		printk(KERN_ERR "ERROR: domain->span does not contain "
 				"CPU%d\n", cpu);
 	}
-	if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
+	if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
 		printk(KERN_ERR "ERROR: domain->groups does not contain"
 				" CPU%d\n", cpu);
 	}
@@ -65,29 +66,47 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 			break;
 		}
 
-		if (!cpumask_weight(sched_group_cpus(group))) {
+		if (!cpumask_weight(sched_group_span(group))) {
 			printk(KERN_CONT "\n");
 			printk(KERN_ERR "ERROR: empty group\n");
 			break;
 		}
 
 		if (!(sd->flags & SD_OVERLAP) &&
-		    cpumask_intersects(groupmask, sched_group_cpus(group))) {
+		    cpumask_intersects(groupmask, sched_group_span(group))) {
 			printk(KERN_CONT "\n");
 			printk(KERN_ERR "ERROR: repeated CPUs\n");
 			break;
 		}
 
-		cpumask_or(groupmask, groupmask, sched_group_cpus(group));
+		cpumask_or(groupmask, groupmask, sched_group_span(group));
 
-		printk(KERN_CONT " %*pbl",
-		       cpumask_pr_args(sched_group_cpus(group)));
-		if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
-			printk(KERN_CONT " (cpu_capacity = %lu)",
-				group->sgc->capacity);
+		printk(KERN_CONT " %d:{ span=%*pbl",
+				group->sgc->id,
+				cpumask_pr_args(sched_group_span(group)));
+
+		if ((sd->flags & SD_OVERLAP) &&
+		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
+			printk(KERN_CONT " mask=%*pbl",
+				cpumask_pr_args(group_balance_mask(group)));
 		}
 
+		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
+			printk(KERN_CONT " cap=%lu", group->sgc->capacity);
+
+		if (group == sd->groups && sd->child &&
+		    !cpumask_equal(sched_domain_span(sd->child),
+				   sched_group_span(group))) {
+			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
+		}
+
+		printk(KERN_CONT " }");
+
 		group = group->next;
+
+		if (group != sd->groups)
+			printk(KERN_CONT ",");
+
 	} while (group != sd->groups);
 	printk(KERN_CONT "\n");
 
@@ -113,7 +132,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
 		return;
 	}
 
-	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
 
 	for (;;) {
 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
@@ -477,46 +496,214 @@ enum s_alloc {
 };
 
 /*
- * Build an iteration mask that can exclude certain CPUs from the upwards
- * domain traversal.
+ * Return the canonical balance CPU for this group, this is the first CPU
+ * of this group that's also in the balance mask.
  *
- * Asymmetric node setups can result in situations where the domain tree is of
- * unequal depth, make sure to skip domains that already cover the entire
- * range.
+ * The balance mask are all those CPUs that could actually end up at this
+ * group. See build_balance_mask().
  *
- * In that case build_sched_domains() will have terminated the iteration early
- * and our sibling sd spans will be empty. Domains should always include the
- * CPU they're built on, so check that.
+ * Also see should_we_balance().
  */
-static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+int group_balance_cpu(struct sched_group *sg)
 {
-	const struct cpumask *span = sched_domain_span(sd);
+	return cpumask_first(group_balance_mask(sg));
+}
+
+
+/*
+ * NUMA topology (first read the regular topology blurb below)
+ *
+ * Given a node-distance table, for example:
+ *
+ *   node   0   1   2   3
+ *     0:  10  20  30  20
+ *     1:  20  10  20  30
+ *     2:  30  20  10  20
+ *     3:  20  30  20  10
+ *
+ * which represents a 4 node ring topology like:
+ *
+ *   0 ----- 1
+ *   |       |
+ *   |       |
+ *   |       |
+ *   3 ----- 2
+ *
+ * We want to construct domains and groups to represent this. The way we go
+ * about doing this is to build the domains on 'hops'. For each NUMA level we
+ * construct the mask of all nodes reachable in @level hops.
+ *
+ * For the above NUMA topology that gives 3 levels:
+ *
+ * NUMA-2	0-3		0-3		0-3		0-3
+ *  groups:	{0-1,3},{1-3}	{0-2},{0,2-3}	{1-3},{0-1,3}	{0,2-3},{0-2}
+ *
+ * NUMA-1	0-1,3		0-2		1-3		0,2-3
+ *  groups:	{0},{1},{3}	{0},{1},{2}	{1},{2},{3}	{0},{2},{3}
+ *
+ * NUMA-0	0		1		2		3
+ *
+ *
+ * As can be seen; things don't nicely line up as with the regular topology.
+ * When we iterate a domain in child domain chunks some nodes can be
+ * represented multiple times -- hence the "overlap" naming for this part of
+ * the topology.
+ *
+ * In order to minimize this overlap, we only build enough groups to cover the
+ * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
+ *
+ * Because:
+ *
+ *  - the first group of each domain is its child domain; this
+ *    gets us the first 0-1,3
+ *  - the only uncovered node is 2, who's child domain is 1-3.
+ *
+ * However, because of the overlap, computing a unique CPU for each group is
+ * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
+ * groups include the CPUs of Node-0, while those CPUs would not in fact ever
+ * end up at those groups (they would end up in group: 0-1,3).
+ *
+ * To correct this we have to introduce the group balance mask. This mask
+ * will contain those CPUs in the group that can reach this group given the
+ * (child) domain tree.
+ *
+ * With this we can once again compute balance_cpu and sched_group_capacity
+ * relations.
+ *
+ * XXX include words on how balance_cpu is unique and therefore can be
+ * used for sched_group_capacity links.
+ *
+ *
+ * Another 'interesting' topology is:
+ *
+ *   node   0   1   2   3
+ *     0:  10  20  20  30
+ *     1:  20  10  20  20
+ *     2:  20  20  10  20
+ *     3:  30  20  20  10
+ *
+ * Which looks a little like:
+ *
+ *   0 ----- 1
+ *   |     / |
+ *   |   /   |
+ *   | /     |
+ *   2 ----- 3
+ *
+ * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
+ * are not.
+ *
+ * This leads to a few particularly weird cases where the sched_domain's are
+ * not of the same number for each cpu. Consider:
+ *
+ * NUMA-2	0-3						0-3
+ *  groups:	{0-2},{1-3}					{1-3},{0-2}
+ *
+ * NUMA-1	0-2		0-3		0-3		1-3
+ *
+ * NUMA-0	0		1		2		3
+ *
+ */
+
+
+/*
+ * Build the balance mask; it contains only those CPUs that can arrive at this
+ * group and should be considered to continue balancing.
+ *
+ * We do this during the group creation pass, therefore the group information
+ * isn't complete yet, however since each group represents a (child) domain we
+ * can fully construct this using the sched_domain bits (which are already
+ * complete).
+ */
+static void
+build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
+{
+	const struct cpumask *sg_span = sched_group_span(sg);
 	struct sd_data *sdd = sd->private;
 	struct sched_domain *sibling;
 	int i;
 
-	for_each_cpu(i, span) {
+	cpumask_clear(mask);
+
+	for_each_cpu(i, sg_span) {
 		sibling = *per_cpu_ptr(sdd->sd, i);
-		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+
+		/*
+		 * Can happen in the asymmetric case, where these siblings are
+		 * unused. The mask will not be empty because those CPUs that
+		 * do have the top domain _should_ span the domain.
+		 */
+		if (!sibling->child)
 			continue;
 
-		cpumask_set_cpu(i, sched_group_mask(sg));
+		/* If we would not end up here, we can't continue from here */
+		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
+			continue;
+
+		cpumask_set_cpu(i, mask);
 	}
+
+	/* We must not have empty masks here */
+	WARN_ON_ONCE(cpumask_empty(mask));
 }
 
 /*
- * Return the canonical balance CPU for this group, this is the first CPU
- * of this group that's also in the iteration mask.
+ * XXX: This creates per-node group entries; since the load-balancer will
+ * immediately access remote memory to construct this group's load-balance
+ * statistics having the groups node local is of dubious benefit.
  */
-int group_balance_cpu(struct sched_group *sg)
+static struct sched_group *
+build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
 {
-	return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
+	struct sched_group *sg;
+	struct cpumask *sg_span;
+
+	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+			GFP_KERNEL, cpu_to_node(cpu));
+
+	if (!sg)
+		return NULL;
+
+	sg_span = sched_group_span(sg);
+	if (sd->child)
+		cpumask_copy(sg_span, sched_domain_span(sd->child));
+	else
+		cpumask_copy(sg_span, sched_domain_span(sd));
+
+	return sg;
+}
+
+static void init_overlap_sched_group(struct sched_domain *sd,
+				     struct sched_group *sg)
+{
+	struct cpumask *mask = sched_domains_tmpmask2;
+	struct sd_data *sdd = sd->private;
+	struct cpumask *sg_span;
+	int cpu;
+
+	build_balance_mask(sd, sg, mask);
+	cpu = cpumask_first_and(sched_group_span(sg), mask);
+
+	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
+	if (atomic_inc_return(&sg->sgc->ref) == 1)
+		cpumask_copy(group_balance_mask(sg), mask);
+	else
+		WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
+
+	/*
+	 * Initialize sgc->capacity such that even if we mess up the
+	 * domains and no possible iteration will get us here, we won't
+	 * die on a /0 trap.
+	 */
+	sg_span = sched_group_span(sg);
+	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
 }
 
 static int
 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 {
-	struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+	struct sched_group *first = NULL, *last = NULL, *sg;
 	const struct cpumask *span = sched_domain_span(sd);
 	struct cpumask *covered = sched_domains_tmpmask;
 	struct sd_data *sdd = sd->private;
@@ -525,7 +712,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 
 	cpumask_clear(covered);
 
-	for_each_cpu(i, span) {
+	for_each_cpu_wrap(i, span, cpu) {
 		struct cpumask *sg_span;
 
 		if (cpumask_test_cpu(i, covered))
@@ -533,44 +720,27 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 
 		sibling = *per_cpu_ptr(sdd->sd, i);
 
-		/* See the comment near build_group_mask(). */
+		/*
+		 * Asymmetric node setups can result in situations where the
+		 * domain tree is of unequal depth, make sure to skip domains
+		 * that already cover the entire range.
+		 *
+		 * In that case build_sched_domains() will have terminated the
+		 * iteration early and our sibling sd spans will be empty.
+		 * Domains should always include the CPU they're built on, so
+		 * check that.
+		 */
 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
 			continue;
 
-		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
-				GFP_KERNEL, cpu_to_node(cpu));
-
+		sg = build_group_from_child_sched_domain(sibling, cpu);
 		if (!sg)
 			goto fail;
 
-		sg_span = sched_group_cpus(sg);
-		if (sibling->child)
-			cpumask_copy(sg_span, sched_domain_span(sibling->child));
-		else
-			cpumask_set_cpu(i, sg_span);
-
+		sg_span = sched_group_span(sg);
 		cpumask_or(covered, covered, sg_span);
 
-		sg->sgc = *per_cpu_ptr(sdd->sgc, i);
-		if (atomic_inc_return(&sg->sgc->ref) == 1)
-			build_group_mask(sd, sg);
-
-		/*
-		 * Initialize sgc->capacity such that even if we mess up the
-		 * domains and no possible iteration will get us here, we won't
-		 * die on a /0 trap.
-		 */
-		sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
-		sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
-
-		/*
-		 * Make sure the first group of this domain contains the
-		 * canonical balance CPU. Otherwise the sched_domain iteration
-		 * breaks. See update_sg_lb_stats().
-		 */
-		if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
-		    group_balance_cpu(sg) == cpu)
-			groups = sg;
+		init_overlap_sched_group(sd, sg);
 
 		if (!first)
 			first = sg;
@@ -579,7 +749,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 		last = sg;
 		last->next = first;
 	}
-	sd->groups = groups;
+	sd->groups = first;
 
 	return 0;
 
@@ -589,23 +759,106 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 	return -ENOMEM;
 }
 
-static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
+
+/*
+ * Package topology (also see the load-balance blurb in fair.c)
+ *
+ * The scheduler builds a tree structure to represent a number of important
+ * topology features. By default (default_topology[]) these include:
+ *
+ *  - Simultaneous multithreading (SMT)
+ *  - Multi-Core Cache (MC)
+ *  - Package (DIE)
+ *
+ * Where the last one more or less denotes everything up to a NUMA node.
+ *
+ * The tree consists of 3 primary data structures:
+ *
+ *	sched_domain -> sched_group -> sched_group_capacity
+ *	    ^ ^             ^ ^
+ *          `-'             `-'
+ *
+ * The sched_domains are per-cpu and have a two way link (parent & child) and
+ * denote the ever growing mask of CPUs belonging to that level of topology.
+ *
+ * Each sched_domain has a circular (double) linked list of sched_group's, each
+ * denoting the domains of the level below (or individual CPUs in case of the
+ * first domain level). The sched_group linked by a sched_domain includes the
+ * CPU of that sched_domain [*].
+ *
+ * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
+ *
+ * CPU   0   1   2   3   4   5   6   7
+ *
+ * DIE  [                             ]
+ * MC   [             ] [             ]
+ * SMT  [     ] [     ] [     ] [     ]
+ *
+ *  - or -
+ *
+ * DIE  0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
+ * MC	0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
+ * SMT  0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
+ *
+ * CPU   0   1   2   3   4   5   6   7
+ *
+ * One way to think about it is: sched_domain moves you up and down among these
+ * topology levels, while sched_group moves you sideways through it, at child
+ * domain granularity.
+ *
+ * sched_group_capacity ensures each unique sched_group has shared storage.
+ *
+ * There are two related construction problems, both require a CPU that
+ * uniquely identify each group (for a given domain):
+ *
+ *  - The first is the balance_cpu (see should_we_balance() and the
+ *    load-balance blub in fair.c); for each group we only want 1 CPU to
+ *    continue balancing at a higher domain.
+ *
+ *  - The second is the sched_group_capacity; we want all identical groups
+ *    to share a single sched_group_capacity.
+ *
+ * Since these topologies are exclusive by construction. That is, its
+ * impossible for an SMT thread to belong to multiple cores, and cores to
+ * be part of multiple caches. There is a very clear and unique location
+ * for each CPU in the hierarchy.
+ *
+ * Therefore computing a unique CPU for each group is trivial (the iteration
+ * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
+ * group), we can simply pick the first CPU in each group.
+ *
+ *
+ * [*] in other words, the first group of each domain is its child domain.
+ */
+
+static struct sched_group *get_group(int cpu, struct sd_data *sdd)
 {
 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
 	struct sched_domain *child = sd->child;
+	struct sched_group *sg;
 
 	if (child)
 		cpu = cpumask_first(sched_domain_span(child));
 
-	if (sg) {
-		*sg = *per_cpu_ptr(sdd->sg, cpu);
-		(*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
+	sg = *per_cpu_ptr(sdd->sg, cpu);
+	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
 
-		/* For claim_allocations: */
-		atomic_set(&(*sg)->sgc->ref, 1);
+	/* For claim_allocations: */
+	atomic_inc(&sg->ref);
+	atomic_inc(&sg->sgc->ref);
+
+	if (child) {
+		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
+		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
+	} else {
+		cpumask_set_cpu(cpu, sched_group_span(sg));
+		cpumask_set_cpu(cpu, group_balance_mask(sg));
 	}
 
-	return cpu;
+	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
+	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
+
+	return sg;
 }
 
 /*
@@ -624,34 +877,20 @@ build_sched_groups(struct sched_domain *sd, int cpu)
 	struct cpumask *covered;
 	int i;
 
-	get_group(cpu, sdd, &sd->groups);
-	atomic_inc(&sd->groups->ref);
-
-	if (cpu != cpumask_first(span))
-		return 0;
-
 	lockdep_assert_held(&sched_domains_mutex);
 	covered = sched_domains_tmpmask;
 
 	cpumask_clear(covered);
 
-	for_each_cpu(i, span) {
+	for_each_cpu_wrap(i, span, cpu) {
 		struct sched_group *sg;
-		int group, j;
 
 		if (cpumask_test_cpu(i, covered))
 			continue;
 
-		group = get_group(i, sdd, &sg);
-		cpumask_setall(sched_group_mask(sg));
+		sg = get_group(i, sdd);
 
-		for_each_cpu(j, span) {
-			if (get_group(j, sdd, NULL) != group)
-				continue;
-
-			cpumask_set_cpu(j, covered);
-			cpumask_set_cpu(j, sched_group_cpus(sg));
-		}
+		cpumask_or(covered, covered, sched_group_span(sg));
 
 		if (!first)
 			first = sg;
@@ -660,6 +899,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
 		last = sg;
 	}
 	last->next = first;
+	sd->groups = first;
 
 	return 0;
 }
@@ -683,12 +923,12 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
 	do {
 		int cpu, max_cpu = -1;
 
-		sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+		sg->group_weight = cpumask_weight(sched_group_span(sg));
 
 		if (!(sd->flags & SD_ASYM_PACKING))
 			goto next;
 
-		for_each_cpu(cpu, sched_group_cpus(sg)) {
+		for_each_cpu(cpu, sched_group_span(sg)) {
 			if (max_cpu < 0)
 				max_cpu = cpu;
 			else if (sched_asym_prefer(cpu, max_cpu))
@@ -1308,6 +1548,10 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
 			if (!sgc)
 				return -ENOMEM;
 
+#ifdef CONFIG_SCHED_DEBUG
+			sgc->id = j;
+#endif
+
 			*per_cpu_ptr(sdd->sgc, j) = sgc;
 		}
 	}
@@ -1407,7 +1651,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
 			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
 			if (tl == sched_domain_topology)
 				*per_cpu_ptr(d.sd, i) = sd;
-			if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
+			if (tl->flags & SDTL_OVERLAP)
 				sd->flags |= SD_OVERLAP;
 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
 				break;
@@ -1478,7 +1722,7 @@ static struct sched_domain_attr		*dattr_cur;
  * cpumask) fails, then fallback to a single sched domain,
  * as determined by the single cpumask fallback_doms.
  */
-cpumask_var_t				fallback_doms;
+static cpumask_var_t			fallback_doms;
 
 /*
  * arch_update_cpu_topology lets virtualized architectures update the
@@ -1520,10 +1764,14 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
  * For now this just excludes isolated CPUs, but could be used to
  * exclude other special cases in the future.
  */
-int init_sched_domains(const struct cpumask *cpu_map)
+int sched_init_domains(const struct cpumask *cpu_map)
 {
 	int err;
 
+	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
+	zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
+	zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
+
 	arch_update_cpu_topology();
 	ndoms_cur = 1;
 	doms_cur = alloc_sched_domains(ndoms_cur);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index b8c84c6..17f11c6 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -12,44 +12,44 @@
 #include <linux/hash.h>
 #include <linux/kthread.h>
 
-void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
+void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
 {
-	spin_lock_init(&q->lock);
-	lockdep_set_class_and_name(&q->lock, key, name);
-	INIT_LIST_HEAD(&q->task_list);
+	spin_lock_init(&wq_head->lock);
+	lockdep_set_class_and_name(&wq_head->lock, key, name);
+	INIT_LIST_HEAD(&wq_head->head);
 }
 
 EXPORT_SYMBOL(__init_waitqueue_head);
 
-void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
 	unsigned long flags;
 
-	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
-	spin_lock_irqsave(&q->lock, flags);
-	__add_wait_queue(q, wait);
-	spin_unlock_irqrestore(&q->lock, flags);
+	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
+	spin_lock_irqsave(&wq_head->lock, flags);
+	__add_wait_queue_entry_tail(wq_head, wq_entry);
+	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(add_wait_queue);
 
-void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
 	unsigned long flags;
 
-	wait->flags |= WQ_FLAG_EXCLUSIVE;
-	spin_lock_irqsave(&q->lock, flags);
-	__add_wait_queue_tail(q, wait);
-	spin_unlock_irqrestore(&q->lock, flags);
+	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
+	spin_lock_irqsave(&wq_head->lock, flags);
+	__add_wait_queue_entry_tail(wq_head, wq_entry);
+	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(add_wait_queue_exclusive);
 
-void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&q->lock, flags);
-	__remove_wait_queue(q, wait);
-	spin_unlock_irqrestore(&q->lock, flags);
+	spin_lock_irqsave(&wq_head->lock, flags);
+	__remove_wait_queue(wq_head, wq_entry);
+	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(remove_wait_queue);
 
@@ -63,12 +63,12 @@ EXPORT_SYMBOL(remove_wait_queue);
  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
 			int nr_exclusive, int wake_flags, void *key)
 {
-	wait_queue_t *curr, *next;
+	wait_queue_entry_t *curr, *next;
 
-	list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
+	list_for_each_entry_safe(curr, next, &wq_head->head, entry) {
 		unsigned flags = curr->flags;
 
 		if (curr->func(curr, mode, wake_flags, key) &&
@@ -79,7 +79,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
 
 /**
  * __wake_up - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
+ * @wq_head: the waitqueue
  * @mode: which threads
  * @nr_exclusive: how many wake-one or wake-many threads to wake up
  * @key: is directly passed to the wakeup function
@@ -87,35 +87,35 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  * It may be assumed that this function implies a write memory barrier before
  * changing the task state if and only if any tasks are woken up.
  */
-void __wake_up(wait_queue_head_t *q, unsigned int mode,
+void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
 			int nr_exclusive, void *key)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&q->lock, flags);
-	__wake_up_common(q, mode, nr_exclusive, 0, key);
-	spin_unlock_irqrestore(&q->lock, flags);
+	spin_lock_irqsave(&wq_head->lock, flags);
+	__wake_up_common(wq_head, mode, nr_exclusive, 0, key);
+	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(__wake_up);
 
 /*
  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  */
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
+void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
 {
-	__wake_up_common(q, mode, nr, 0, NULL);
+	__wake_up_common(wq_head, mode, nr, 0, NULL);
 }
 EXPORT_SYMBOL_GPL(__wake_up_locked);
 
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
+void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
 {
-	__wake_up_common(q, mode, 1, 0, key);
+	__wake_up_common(wq_head, mode, 1, 0, key);
 }
 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
 
 /**
  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
+ * @wq_head: the waitqueue
  * @mode: which threads
  * @nr_exclusive: how many wake-one or wake-many threads to wake up
  * @key: opaque value to be passed to wakeup targets
@@ -130,30 +130,30 @@ EXPORT_SYMBOL_GPL(__wake_up_locked_key);
  * It may be assumed that this function implies a write memory barrier before
  * changing the task state if and only if any tasks are woken up.
  */
-void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
+void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
 			int nr_exclusive, void *key)
 {
 	unsigned long flags;
 	int wake_flags = 1; /* XXX WF_SYNC */
 
-	if (unlikely(!q))
+	if (unlikely(!wq_head))
 		return;
 
 	if (unlikely(nr_exclusive != 1))
 		wake_flags = 0;
 
-	spin_lock_irqsave(&q->lock, flags);
-	__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
-	spin_unlock_irqrestore(&q->lock, flags);
+	spin_lock_irqsave(&wq_head->lock, flags);
+	__wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key);
+	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
 
 /*
  * __wake_up_sync - see __wake_up_sync_key()
  */
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
 {
-	__wake_up_sync_key(q, mode, nr_exclusive, NULL);
+	__wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
 
@@ -170,48 +170,48 @@ EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
  * loads to move into the critical region).
  */
 void
-prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
+prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 {
 	unsigned long flags;
 
-	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
-	spin_lock_irqsave(&q->lock, flags);
-	if (list_empty(&wait->task_list))
-		__add_wait_queue(q, wait);
+	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
+	spin_lock_irqsave(&wq_head->lock, flags);
+	if (list_empty(&wq_entry->entry))
+		__add_wait_queue(wq_head, wq_entry);
 	set_current_state(state);
-	spin_unlock_irqrestore(&q->lock, flags);
+	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(prepare_to_wait);
 
 void
-prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
+prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 {
 	unsigned long flags;
 
-	wait->flags |= WQ_FLAG_EXCLUSIVE;
-	spin_lock_irqsave(&q->lock, flags);
-	if (list_empty(&wait->task_list))
-		__add_wait_queue_tail(q, wait);
+	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
+	spin_lock_irqsave(&wq_head->lock, flags);
+	if (list_empty(&wq_entry->entry))
+		__add_wait_queue_entry_tail(wq_head, wq_entry);
 	set_current_state(state);
-	spin_unlock_irqrestore(&q->lock, flags);
+	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(prepare_to_wait_exclusive);
 
-void init_wait_entry(wait_queue_t *wait, int flags)
+void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
 {
-	wait->flags = flags;
-	wait->private = current;
-	wait->func = autoremove_wake_function;
-	INIT_LIST_HEAD(&wait->task_list);
+	wq_entry->flags = flags;
+	wq_entry->private = current;
+	wq_entry->func = autoremove_wake_function;
+	INIT_LIST_HEAD(&wq_entry->entry);
 }
 EXPORT_SYMBOL(init_wait_entry);
 
-long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
+long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 {
 	unsigned long flags;
 	long ret = 0;
 
-	spin_lock_irqsave(&q->lock, flags);
+	spin_lock_irqsave(&wq_head->lock, flags);
 	if (unlikely(signal_pending_state(state, current))) {
 		/*
 		 * Exclusive waiter must not fail if it was selected by wakeup,
@@ -219,24 +219,24 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
 		 *
 		 * The caller will recheck the condition and return success if
 		 * we were already woken up, we can not miss the event because
-		 * wakeup locks/unlocks the same q->lock.
+		 * wakeup locks/unlocks the same wq_head->lock.
 		 *
 		 * But we need to ensure that set-condition + wakeup after that
 		 * can't see us, it should wake up another exclusive waiter if
 		 * we fail.
 		 */
-		list_del_init(&wait->task_list);
+		list_del_init(&wq_entry->entry);
 		ret = -ERESTARTSYS;
 	} else {
-		if (list_empty(&wait->task_list)) {
-			if (wait->flags & WQ_FLAG_EXCLUSIVE)
-				__add_wait_queue_tail(q, wait);
+		if (list_empty(&wq_entry->entry)) {
+			if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
+				__add_wait_queue_entry_tail(wq_head, wq_entry);
 			else
-				__add_wait_queue(q, wait);
+				__add_wait_queue(wq_head, wq_entry);
 		}
 		set_current_state(state);
 	}
-	spin_unlock_irqrestore(&q->lock, flags);
+	spin_unlock_irqrestore(&wq_head->lock, flags);
 
 	return ret;
 }
@@ -249,10 +249,10 @@ EXPORT_SYMBOL(prepare_to_wait_event);
  * condition in the caller before they add the wait
  * entry to the wake queue.
  */
-int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
+int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
 {
-	if (likely(list_empty(&wait->task_list)))
-		__add_wait_queue_tail(wq, wait);
+	if (likely(list_empty(&wait->entry)))
+		__add_wait_queue_entry_tail(wq, wait);
 
 	set_current_state(TASK_INTERRUPTIBLE);
 	if (signal_pending(current))
@@ -265,10 +265,10 @@ int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
 }
 EXPORT_SYMBOL(do_wait_intr);
 
-int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait)
+int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
 {
-	if (likely(list_empty(&wait->task_list)))
-		__add_wait_queue_tail(wq, wait);
+	if (likely(list_empty(&wait->entry)))
+		__add_wait_queue_entry_tail(wq, wait);
 
 	set_current_state(TASK_INTERRUPTIBLE);
 	if (signal_pending(current))
@@ -283,14 +283,14 @@ EXPORT_SYMBOL(do_wait_intr_irq);
 
 /**
  * finish_wait - clean up after waiting in a queue
- * @q: waitqueue waited on
- * @wait: wait descriptor
+ * @wq_head: waitqueue waited on
+ * @wq_entry: wait descriptor
  *
  * Sets current thread back to running state and removes
  * the wait descriptor from the given waitqueue if still
  * queued.
  */
-void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
 	unsigned long flags;
 
@@ -308,20 +308,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
 	 *    have _one_ other CPU that looks at or modifies
 	 *    the list).
 	 */
-	if (!list_empty_careful(&wait->task_list)) {
-		spin_lock_irqsave(&q->lock, flags);
-		list_del_init(&wait->task_list);
-		spin_unlock_irqrestore(&q->lock, flags);
+	if (!list_empty_careful(&wq_entry->entry)) {
+		spin_lock_irqsave(&wq_head->lock, flags);
+		list_del_init(&wq_entry->entry);
+		spin_unlock_irqrestore(&wq_head->lock, flags);
 	}
 }
 EXPORT_SYMBOL(finish_wait);
 
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
 {
-	int ret = default_wake_function(wait, mode, sync, key);
+	int ret = default_wake_function(wq_entry, mode, sync, key);
 
 	if (ret)
-		list_del_init(&wait->task_list);
+		list_del_init(&wq_entry->entry);
 	return ret;
 }
 EXPORT_SYMBOL(autoremove_wake_function);
@@ -334,24 +334,24 @@ static inline bool is_kthread_should_stop(void)
 /*
  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
  *
- * add_wait_queue(&wq, &wait);
+ * add_wait_queue(&wq_head, &wait);
  * for (;;) {
  *     if (condition)
  *         break;
  *
  *     p->state = mode;				condition = true;
  *     smp_mb(); // A				smp_wmb(); // C
- *     if (!wait->flags & WQ_FLAG_WOKEN)	wait->flags |= WQ_FLAG_WOKEN;
+ *     if (!wq_entry->flags & WQ_FLAG_WOKEN)	wq_entry->flags |= WQ_FLAG_WOKEN;
  *         schedule()				try_to_wake_up();
  *     p->state = TASK_RUNNING;		    ~~~~~~~~~~~~~~~~~~
- *     wait->flags &= ~WQ_FLAG_WOKEN;		condition = true;
+ *     wq_entry->flags &= ~WQ_FLAG_WOKEN;		condition = true;
  *     smp_mb() // B				smp_wmb(); // C
- *						wait->flags |= WQ_FLAG_WOKEN;
+ *						wq_entry->flags |= WQ_FLAG_WOKEN;
  * }
- * remove_wait_queue(&wq, &wait);
+ * remove_wait_queue(&wq_head, &wait);
  *
  */
-long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
+long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
 {
 	set_current_state(mode); /* A */
 	/*
@@ -359,7 +359,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
 	 * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
 	 * also observe all state before the wakeup.
 	 */
-	if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
+	if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
 		timeout = schedule_timeout(timeout);
 	__set_current_state(TASK_RUNNING);
 
@@ -369,13 +369,13 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
 	 * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
 	 * an event.
 	 */
-	smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
+	smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
 
 	return timeout;
 }
 EXPORT_SYMBOL(wait_woken);
 
-int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
 {
 	/*
 	 * Although this function is called under waitqueue lock, LOCK
@@ -385,267 +385,8 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
 	 * and is paired with smp_store_mb() in wait_woken().
 	 */
 	smp_wmb(); /* C */
-	wait->flags |= WQ_FLAG_WOKEN;
+	wq_entry->flags |= WQ_FLAG_WOKEN;
 
-	return default_wake_function(wait, mode, sync, key);
+	return default_wake_function(wq_entry, mode, sync, key);
 }
 EXPORT_SYMBOL(woken_wake_function);
-
-int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
-{
-	struct wait_bit_key *key = arg;
-	struct wait_bit_queue *wait_bit
-		= container_of(wait, struct wait_bit_queue, wait);
-
-	if (wait_bit->key.flags != key->flags ||
-			wait_bit->key.bit_nr != key->bit_nr ||
-			test_bit(key->bit_nr, key->flags))
-		return 0;
-	else
-		return autoremove_wake_function(wait, mode, sync, key);
-}
-EXPORT_SYMBOL(wake_bit_function);
-
-/*
- * To allow interruptible waiting and asynchronous (i.e. nonblocking)
- * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
- * permitted return codes. Nonzero return codes halt waiting and return.
- */
-int __sched
-__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
-	      wait_bit_action_f *action, unsigned mode)
-{
-	int ret = 0;
-
-	do {
-		prepare_to_wait(wq, &q->wait, mode);
-		if (test_bit(q->key.bit_nr, q->key.flags))
-			ret = (*action)(&q->key, mode);
-	} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
-	finish_wait(wq, &q->wait);
-	return ret;
-}
-EXPORT_SYMBOL(__wait_on_bit);
-
-int __sched out_of_line_wait_on_bit(void *word, int bit,
-				    wait_bit_action_f *action, unsigned mode)
-{
-	wait_queue_head_t *wq = bit_waitqueue(word, bit);
-	DEFINE_WAIT_BIT(wait, word, bit);
-
-	return __wait_on_bit(wq, &wait, action, mode);
-}
-EXPORT_SYMBOL(out_of_line_wait_on_bit);
-
-int __sched out_of_line_wait_on_bit_timeout(
-	void *word, int bit, wait_bit_action_f *action,
-	unsigned mode, unsigned long timeout)
-{
-	wait_queue_head_t *wq = bit_waitqueue(word, bit);
-	DEFINE_WAIT_BIT(wait, word, bit);
-
-	wait.key.timeout = jiffies + timeout;
-	return __wait_on_bit(wq, &wait, action, mode);
-}
-EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
-
-int __sched
-__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
-			wait_bit_action_f *action, unsigned mode)
-{
-	int ret = 0;
-
-	for (;;) {
-		prepare_to_wait_exclusive(wq, &q->wait, mode);
-		if (test_bit(q->key.bit_nr, q->key.flags)) {
-			ret = action(&q->key, mode);
-			/*
-			 * See the comment in prepare_to_wait_event().
-			 * finish_wait() does not necessarily takes wq->lock,
-			 * but test_and_set_bit() implies mb() which pairs with
-			 * smp_mb__after_atomic() before wake_up_page().
-			 */
-			if (ret)
-				finish_wait(wq, &q->wait);
-		}
-		if (!test_and_set_bit(q->key.bit_nr, q->key.flags)) {
-			if (!ret)
-				finish_wait(wq, &q->wait);
-			return 0;
-		} else if (ret) {
-			return ret;
-		}
-	}
-}
-EXPORT_SYMBOL(__wait_on_bit_lock);
-
-int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
-					 wait_bit_action_f *action, unsigned mode)
-{
-	wait_queue_head_t *wq = bit_waitqueue(word, bit);
-	DEFINE_WAIT_BIT(wait, word, bit);
-
-	return __wait_on_bit_lock(wq, &wait, action, mode);
-}
-EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
-
-void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
-{
-	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
-	if (waitqueue_active(wq))
-		__wake_up(wq, TASK_NORMAL, 1, &key);
-}
-EXPORT_SYMBOL(__wake_up_bit);
-
-/**
- * wake_up_bit - wake up a waiter on a bit
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- *
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that wakes up waiters
- * on a bit. For instance, if one were to have waiters on a bitflag,
- * one would call wake_up_bit() after clearing the bit.
- *
- * In order for this to function properly, as it uses waitqueue_active()
- * internally, some kind of memory barrier must be done prior to calling
- * this. Typically, this will be smp_mb__after_atomic(), but in some
- * cases where bitflags are manipulated non-atomically under a lock, one
- * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
- * because spin_unlock() does not guarantee a memory barrier.
- */
-void wake_up_bit(void *word, int bit)
-{
-	__wake_up_bit(bit_waitqueue(word, bit), word, bit);
-}
-EXPORT_SYMBOL(wake_up_bit);
-
-/*
- * Manipulate the atomic_t address to produce a better bit waitqueue table hash
- * index (we're keying off bit -1, but that would produce a horrible hash
- * value).
- */
-static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
-{
-	if (BITS_PER_LONG == 64) {
-		unsigned long q = (unsigned long)p;
-		return bit_waitqueue((void *)(q & ~1), q & 1);
-	}
-	return bit_waitqueue(p, 0);
-}
-
-static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
-				  void *arg)
-{
-	struct wait_bit_key *key = arg;
-	struct wait_bit_queue *wait_bit
-		= container_of(wait, struct wait_bit_queue, wait);
-	atomic_t *val = key->flags;
-
-	if (wait_bit->key.flags != key->flags ||
-	    wait_bit->key.bit_nr != key->bit_nr ||
-	    atomic_read(val) != 0)
-		return 0;
-	return autoremove_wake_function(wait, mode, sync, key);
-}
-
-/*
- * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
- * the actions of __wait_on_atomic_t() are permitted return codes.  Nonzero
- * return codes halt waiting and return.
- */
-static __sched
-int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
-		       int (*action)(atomic_t *), unsigned mode)
-{
-	atomic_t *val;
-	int ret = 0;
-
-	do {
-		prepare_to_wait(wq, &q->wait, mode);
-		val = q->key.flags;
-		if (atomic_read(val) == 0)
-			break;
-		ret = (*action)(val);
-	} while (!ret && atomic_read(val) != 0);
-	finish_wait(wq, &q->wait);
-	return ret;
-}
-
-#define DEFINE_WAIT_ATOMIC_T(name, p)					\
-	struct wait_bit_queue name = {					\
-		.key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p),		\
-		.wait	= {						\
-			.private	= current,			\
-			.func		= wake_atomic_t_function,	\
-			.task_list	=				\
-				LIST_HEAD_INIT((name).wait.task_list),	\
-		},							\
-	}
-
-__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
-					 unsigned mode)
-{
-	wait_queue_head_t *wq = atomic_t_waitqueue(p);
-	DEFINE_WAIT_ATOMIC_T(wait, p);
-
-	return __wait_on_atomic_t(wq, &wait, action, mode);
-}
-EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
-
-/**
- * wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @p: The atomic_t being waited on, a kernel virtual address
- *
- * Wake up anyone waiting for the atomic_t to go to zero.
- *
- * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
- * check is done by the waiter's wake function, not the by the waker itself).
- */
-void wake_up_atomic_t(atomic_t *p)
-{
-	__wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
-}
-EXPORT_SYMBOL(wake_up_atomic_t);
-
-__sched int bit_wait(struct wait_bit_key *word, int mode)
-{
-	schedule();
-	if (signal_pending_state(mode, current))
-		return -EINTR;
-	return 0;
-}
-EXPORT_SYMBOL(bit_wait);
-
-__sched int bit_wait_io(struct wait_bit_key *word, int mode)
-{
-	io_schedule();
-	if (signal_pending_state(mode, current))
-		return -EINTR;
-	return 0;
-}
-EXPORT_SYMBOL(bit_wait_io);
-
-__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
-{
-	unsigned long now = READ_ONCE(jiffies);
-	if (time_after_eq(now, word->timeout))
-		return -EAGAIN;
-	schedule_timeout(word->timeout - now);
-	if (signal_pending_state(mode, current))
-		return -EINTR;
-	return 0;
-}
-EXPORT_SYMBOL_GPL(bit_wait_timeout);
-
-__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
-{
-	unsigned long now = READ_ONCE(jiffies);
-	if (time_after_eq(now, word->timeout))
-		return -EAGAIN;
-	io_schedule_timeout(word->timeout - now);
-	if (signal_pending_state(mode, current))
-		return -EINTR;
-	return 0;
-}
-EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
new file mode 100644
index 0000000..f815969
--- /dev/null
+++ b/kernel/sched/wait_bit.c
@@ -0,0 +1,286 @@
+/*
+ * The implementation of the wait_bit*() and related waiting APIs:
+ */
+#include <linux/wait_bit.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/debug.h>
+#include <linux/hash.h>
+
+#define WAIT_TABLE_BITS 8
+#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
+
+static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
+
+wait_queue_head_t *bit_waitqueue(void *word, int bit)
+{
+	const int shift = BITS_PER_LONG == 32 ? 5 : 6;
+	unsigned long val = (unsigned long)word << shift | bit;
+
+	return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
+}
+EXPORT_SYMBOL(bit_waitqueue);
+
+int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
+{
+	struct wait_bit_key *key = arg;
+	struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
+
+	if (wait_bit->key.flags != key->flags ||
+			wait_bit->key.bit_nr != key->bit_nr ||
+			test_bit(key->bit_nr, key->flags))
+		return 0;
+	else
+		return autoremove_wake_function(wq_entry, mode, sync, key);
+}
+EXPORT_SYMBOL(wake_bit_function);
+
+/*
+ * To allow interruptible waiting and asynchronous (i.e. nonblocking)
+ * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
+ * permitted return codes. Nonzero return codes halt waiting and return.
+ */
+int __sched
+__wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
+	      wait_bit_action_f *action, unsigned mode)
+{
+	int ret = 0;
+
+	do {
+		prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
+		if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
+			ret = (*action)(&wbq_entry->key, mode);
+	} while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
+	finish_wait(wq_head, &wbq_entry->wq_entry);
+	return ret;
+}
+EXPORT_SYMBOL(__wait_on_bit);
+
+int __sched out_of_line_wait_on_bit(void *word, int bit,
+				    wait_bit_action_f *action, unsigned mode)
+{
+	struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
+	DEFINE_WAIT_BIT(wq_entry, word, bit);
+
+	return __wait_on_bit(wq_head, &wq_entry, action, mode);
+}
+EXPORT_SYMBOL(out_of_line_wait_on_bit);
+
+int __sched out_of_line_wait_on_bit_timeout(
+	void *word, int bit, wait_bit_action_f *action,
+	unsigned mode, unsigned long timeout)
+{
+	struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
+	DEFINE_WAIT_BIT(wq_entry, word, bit);
+
+	wq_entry.key.timeout = jiffies + timeout;
+	return __wait_on_bit(wq_head, &wq_entry, action, mode);
+}
+EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
+
+int __sched
+__wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
+			wait_bit_action_f *action, unsigned mode)
+{
+	int ret = 0;
+
+	for (;;) {
+		prepare_to_wait_exclusive(wq_head, &wbq_entry->wq_entry, mode);
+		if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
+			ret = action(&wbq_entry->key, mode);
+			/*
+			 * See the comment in prepare_to_wait_event().
+			 * finish_wait() does not necessarily takes wwq_head->lock,
+			 * but test_and_set_bit() implies mb() which pairs with
+			 * smp_mb__after_atomic() before wake_up_page().
+			 */
+			if (ret)
+				finish_wait(wq_head, &wbq_entry->wq_entry);
+		}
+		if (!test_and_set_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
+			if (!ret)
+				finish_wait(wq_head, &wbq_entry->wq_entry);
+			return 0;
+		} else if (ret) {
+			return ret;
+		}
+	}
+}
+EXPORT_SYMBOL(__wait_on_bit_lock);
+
+int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
+					 wait_bit_action_f *action, unsigned mode)
+{
+	struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
+	DEFINE_WAIT_BIT(wq_entry, word, bit);
+
+	return __wait_on_bit_lock(wq_head, &wq_entry, action, mode);
+}
+EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
+
+void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit)
+{
+	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
+	if (waitqueue_active(wq_head))
+		__wake_up(wq_head, TASK_NORMAL, 1, &key);
+}
+EXPORT_SYMBOL(__wake_up_bit);
+
+/**
+ * wake_up_bit - wake up a waiter on a bit
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that wakes up waiters
+ * on a bit. For instance, if one were to have waiters on a bitflag,
+ * one would call wake_up_bit() after clearing the bit.
+ *
+ * In order for this to function properly, as it uses waitqueue_active()
+ * internally, some kind of memory barrier must be done prior to calling
+ * this. Typically, this will be smp_mb__after_atomic(), but in some
+ * cases where bitflags are manipulated non-atomically under a lock, one
+ * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
+ * because spin_unlock() does not guarantee a memory barrier.
+ */
+void wake_up_bit(void *word, int bit)
+{
+	__wake_up_bit(bit_waitqueue(word, bit), word, bit);
+}
+EXPORT_SYMBOL(wake_up_bit);
+
+/*
+ * Manipulate the atomic_t address to produce a better bit waitqueue table hash
+ * index (we're keying off bit -1, but that would produce a horrible hash
+ * value).
+ */
+static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
+{
+	if (BITS_PER_LONG == 64) {
+		unsigned long q = (unsigned long)p;
+		return bit_waitqueue((void *)(q & ~1), q & 1);
+	}
+	return bit_waitqueue(p, 0);
+}
+
+static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync,
+				  void *arg)
+{
+	struct wait_bit_key *key = arg;
+	struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
+	atomic_t *val = key->flags;
+
+	if (wait_bit->key.flags != key->flags ||
+	    wait_bit->key.bit_nr != key->bit_nr ||
+	    atomic_read(val) != 0)
+		return 0;
+	return autoremove_wake_function(wq_entry, mode, sync, key);
+}
+
+/*
+ * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
+ * the actions of __wait_on_atomic_t() are permitted return codes.  Nonzero
+ * return codes halt waiting and return.
+ */
+static __sched
+int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
+		       int (*action)(atomic_t *), unsigned mode)
+{
+	atomic_t *val;
+	int ret = 0;
+
+	do {
+		prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
+		val = wbq_entry->key.flags;
+		if (atomic_read(val) == 0)
+			break;
+		ret = (*action)(val);
+	} while (!ret && atomic_read(val) != 0);
+	finish_wait(wq_head, &wbq_entry->wq_entry);
+	return ret;
+}
+
+#define DEFINE_WAIT_ATOMIC_T(name, p)					\
+	struct wait_bit_queue_entry name = {				\
+		.key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p),		\
+		.wq_entry = {						\
+			.private	= current,			\
+			.func		= wake_atomic_t_function,	\
+			.entry		=				\
+				LIST_HEAD_INIT((name).wq_entry.entry),	\
+		},							\
+	}
+
+__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
+					 unsigned mode)
+{
+	struct wait_queue_head *wq_head = atomic_t_waitqueue(p);
+	DEFINE_WAIT_ATOMIC_T(wq_entry, p);
+
+	return __wait_on_atomic_t(wq_head, &wq_entry, action, mode);
+}
+EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
+
+/**
+ * wake_up_atomic_t - Wake up a waiter on a atomic_t
+ * @p: The atomic_t being waited on, a kernel virtual address
+ *
+ * Wake up anyone waiting for the atomic_t to go to zero.
+ *
+ * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
+ * check is done by the waiter's wake function, not the by the waker itself).
+ */
+void wake_up_atomic_t(atomic_t *p)
+{
+	__wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
+}
+EXPORT_SYMBOL(wake_up_atomic_t);
+
+__sched int bit_wait(struct wait_bit_key *word, int mode)
+{
+	schedule();
+	if (signal_pending_state(mode, current))
+		return -EINTR;
+	return 0;
+}
+EXPORT_SYMBOL(bit_wait);
+
+__sched int bit_wait_io(struct wait_bit_key *word, int mode)
+{
+	io_schedule();
+	if (signal_pending_state(mode, current))
+		return -EINTR;
+	return 0;
+}
+EXPORT_SYMBOL(bit_wait_io);
+
+__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
+{
+	unsigned long now = READ_ONCE(jiffies);
+	if (time_after_eq(now, word->timeout))
+		return -EAGAIN;
+	schedule_timeout(word->timeout - now);
+	if (signal_pending_state(mode, current))
+		return -EINTR;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bit_wait_timeout);
+
+__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
+{
+	unsigned long now = READ_ONCE(jiffies);
+	if (time_after_eq(now, word->timeout))
+		return -EAGAIN;
+	io_schedule_timeout(word->timeout - now);
+	if (signal_pending_state(mode, current))
+		return -EINTR;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
+
+void __init wait_bit_init(void)
+{
+	int i;
+
+	for (i = 0; i < WAIT_TABLE_SIZE; i++)
+		init_waitqueue_head(bit_wait_table + i);
+}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 65f6107..98b59b5 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -13,7 +13,7 @@
  *        of Berkeley Packet Filters/Linux Socket Filters.
  */
 
-#include <linux/atomic.h>
+#include <linux/refcount.h>
 #include <linux/audit.h>
 #include <linux/compat.h>
 #include <linux/coredump.h>
@@ -56,7 +56,7 @@
  * to a task_struct (other than @usage).
  */
 struct seccomp_filter {
-	atomic_t usage;
+	refcount_t usage;
 	struct seccomp_filter *prev;
 	struct bpf_prog *prog;
 };
@@ -378,7 +378,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
 		return ERR_PTR(ret);
 	}
 
-	atomic_set(&sfilter->usage, 1);
+	refcount_set(&sfilter->usage, 1);
 
 	return sfilter;
 }
@@ -465,7 +465,7 @@ void get_seccomp_filter(struct task_struct *tsk)
 	if (!orig)
 		return;
 	/* Reference count is bounded by the number of total processes. */
-	atomic_inc(&orig->usage);
+	refcount_inc(&orig->usage);
 }
 
 static inline void seccomp_filter_free(struct seccomp_filter *filter)
@@ -481,7 +481,7 @@ void put_seccomp_filter(struct task_struct *tsk)
 {
 	struct seccomp_filter *orig = tsk->seccomp.filter;
 	/* Clean up single-reference branches iteratively. */
-	while (orig && atomic_dec_and_test(&orig->usage)) {
+	while (orig && refcount_dec_and_test(&orig->usage)) {
 		struct seccomp_filter *freeme = orig;
 		orig = orig->prev;
 		seccomp_filter_free(freeme);
@@ -641,11 +641,12 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
 		return 0;
 
 	case SECCOMP_RET_KILL:
-	default: {
-		siginfo_t info;
+	default:
 		audit_seccomp(this_syscall, SIGSYS, action);
 		/* Dump core only if this is the last remaining thread. */
 		if (get_nr_threads(current) == 1) {
+			siginfo_t info;
+
 			/* Show the original registers in the dump. */
 			syscall_rollback(current, task_pt_regs(current));
 			/* Trigger a manual coredump since do_exit skips it. */
@@ -654,7 +655,6 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
 		}
 		do_exit(SIGSYS);
 	}
-	}
 
 	unreachable();
 
diff --git a/kernel/signal.c b/kernel/signal.c
index ca92bcf..35a570f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -39,6 +39,7 @@
 #include <linux/compat.h>
 #include <linux/cn_proc.h>
 #include <linux/compiler.h>
+#include <linux/posix-timers.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/signal.h>
@@ -510,7 +511,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
 	return !tsk->ptrace;
 }
 
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
+			   bool *resched_timer)
 {
 	struct sigqueue *q, *first = NULL;
 
@@ -532,6 +534,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 still_pending:
 		list_del_init(&first->list);
 		copy_siginfo(info, &first->info);
+
+		*resched_timer =
+			(first->flags & SIGQUEUE_PREALLOC) &&
+			(info->si_code == SI_TIMER) &&
+			(info->si_sys_private);
+
 		__sigqueue_free(first);
 	} else {
 		/*
@@ -548,12 +556,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 }
 
 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
-			siginfo_t *info)
+			siginfo_t *info, bool *resched_timer)
 {
 	int sig = next_signal(pending, mask);
 
 	if (sig)
-		collect_signal(sig, pending, info);
+		collect_signal(sig, pending, info, resched_timer);
 	return sig;
 }
 
@@ -565,15 +573,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  */
 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 {
+	bool resched_timer = false;
 	int signr;
 
 	/* We only dequeue private signals from ourselves, we don't let
 	 * signalfd steal them
 	 */
-	signr = __dequeue_signal(&tsk->pending, mask, info);
+	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
 	if (!signr) {
 		signr = __dequeue_signal(&tsk->signal->shared_pending,
-					 mask, info);
+					 mask, info, &resched_timer);
 #ifdef CONFIG_POSIX_TIMERS
 		/*
 		 * itimer signal ?
@@ -621,7 +630,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 		current->jobctl |= JOBCTL_STOP_DEQUEUED;
 	}
 #ifdef CONFIG_POSIX_TIMERS
-	if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
+	if (resched_timer) {
 		/*
 		 * Release the siglock to ensure proper locking order
 		 * of timer locks outside of siglocks.  Note, we leave
@@ -629,7 +638,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 		 * about to disable them again anyway.
 		 */
 		spin_unlock(&tsk->sighand->siglock);
-		do_schedule_next_timer(info);
+		posixtimer_rearm(info);
 		spin_lock(&tsk->sighand->siglock);
 	}
 #endif
@@ -2092,7 +2101,6 @@ static void do_jobctl_trap(void)
 
 static int ptrace_signal(int signr, siginfo_t *info)
 {
-	ptrace_signal_deliver();
 	/*
 	 * We do not check sig_kernel_stop(signr) but set this marker
 	 * unconditionally because we do not know whether debugger will
diff --git a/kernel/smp.c b/kernel/smp.c
index a817769..3061483 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -30,6 +30,7 @@ enum {
 struct call_function_data {
 	struct call_single_data	__percpu *csd;
 	cpumask_var_t		cpumask;
+	cpumask_var_t		cpumask_ipi;
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@ -45,9 +46,15 @@ int smpcfd_prepare_cpu(unsigned int cpu)
 	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
 				     cpu_to_node(cpu)))
 		return -ENOMEM;
+	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
+				     cpu_to_node(cpu))) {
+		free_cpumask_var(cfd->cpumask);
+		return -ENOMEM;
+	}
 	cfd->csd = alloc_percpu(struct call_single_data);
 	if (!cfd->csd) {
 		free_cpumask_var(cfd->cpumask);
+		free_cpumask_var(cfd->cpumask_ipi);
 		return -ENOMEM;
 	}
 
@@ -59,6 +66,7 @@ int smpcfd_dead_cpu(unsigned int cpu)
 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 
 	free_cpumask_var(cfd->cpumask);
+	free_cpumask_var(cfd->cpumask_ipi);
 	free_percpu(cfd->csd);
 	return 0;
 }
@@ -428,12 +436,13 @@ void smp_call_function_many(const struct cpumask *mask,
 	cfd = this_cpu_ptr(&cfd_data);
 
 	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
-	cpumask_clear_cpu(this_cpu, cfd->cpumask);
+	__cpumask_clear_cpu(this_cpu, cfd->cpumask);
 
 	/* Some callers race with other cpus changing the passed mask */
 	if (unlikely(!cpumask_weight(cfd->cpumask)))
 		return;
 
+	cpumask_clear(cfd->cpumask_ipi);
 	for_each_cpu(cpu, cfd->cpumask) {
 		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
 
@@ -442,11 +451,12 @@ void smp_call_function_many(const struct cpumask *mask,
 			csd->flags |= CSD_FLAG_SYNCHRONOUS;
 		csd->func = func;
 		csd->info = info;
-		llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
+		if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
+			__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
 	}
 
 	/* Send a message to all CPUs in the map */
-	arch_send_call_function_ipi_mask(cfd->cpumask);
+	arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
 
 	if (wait) {
 		for_each_cpu(cpu, cfd->cpumask) {
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 1eb8266..b759126 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -552,7 +552,8 @@ static int __init cpu_stop_init(void)
 }
 early_initcall(cpu_stop_init);
 
-static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
+int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
+			    const struct cpumask *cpus)
 {
 	struct multi_stop_data msdata = {
 		.fn = fn,
@@ -561,6 +562,8 @@ static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cp
 		.active_cpus = cpus,
 	};
 
+	lockdep_assert_cpus_held();
+
 	if (!stop_machine_initialized) {
 		/*
 		 * Handle the case where stop_machine() is called
@@ -590,9 +593,9 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
 	int ret;
 
 	/* No CPUs can come up or down during this. */
-	get_online_cpus();
-	ret = __stop_machine(fn, data, cpus);
-	put_online_cpus();
+	cpus_read_lock();
+	ret = stop_machine_cpuslocked(fn, data, cpus);
+	cpus_read_unlock();
 	return ret;
 }
 EXPORT_SYMBOL_GPL(stop_machine);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index ece4b17..939a158 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1119,7 +1119,7 @@ static ssize_t bin_uuid(struct file *file,
 	/* Only supports reads */
 	if (oldval && oldlen) {
 		char buf[UUID_STRING_LEN + 1];
-		uuid_be uuid;
+		uuid_t uuid;
 
 		result = kernel_read(file, 0, buf, sizeof(buf) - 1);
 		if (result < 0)
@@ -1128,7 +1128,7 @@ static ssize_t bin_uuid(struct file *file,
 		buf[result] = '\0';
 
 		result = -EIO;
-		if (uuid_be_to_bin(buf, &uuid))
+		if (uuid_parse(buf, &uuid))
 			goto out;
 
 		if (oldlen > 16)
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 4008d9f..ac09bc2 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -126,56 +126,6 @@
 	 Note the boot CPU will still be kept outside the range to
 	 handle the timekeeping duty.
 
-config NO_HZ_FULL_SYSIDLE
-	bool "Detect full-system idle state for full dynticks system"
-	depends on NO_HZ_FULL
-	default n
-	help
-	 At least one CPU must keep the scheduling-clock tick running for
-	 timekeeping purposes whenever there is a non-idle CPU, where
-	 "non-idle" also includes dynticks CPUs as long as they are
-	 running non-idle tasks.  Because the underlying adaptive-tick
-	 support cannot distinguish between all CPUs being idle and
-	 all CPUs each running a single task in dynticks mode, the
-	 underlying support simply ensures that there is always a CPU
-	 handling the scheduling-clock tick, whether or not all CPUs
-	 are idle.  This Kconfig option enables scalable detection of
-	 the all-CPUs-idle state, thus allowing the scheduling-clock
-	 tick to be disabled when all CPUs are idle.  Note that scalable
-	 detection of the all-CPUs-idle state means that larger systems
-	 will be slower to declare the all-CPUs-idle state.
-
-	 Say Y if you would like to help debug all-CPUs-idle detection.
-
-	 Say N if you are unsure.
-
-config NO_HZ_FULL_SYSIDLE_SMALL
-	int "Number of CPUs above which large-system approach is used"
-	depends on NO_HZ_FULL_SYSIDLE
-	range 1 NR_CPUS
-	default 8
-	help
-	 The full-system idle detection mechanism takes a lazy approach
-	 on large systems, as is required to attain decent scalability.
-	 However, on smaller systems, scalability is not anywhere near as
-	 large a concern as is energy efficiency.  The sysidle subsystem
-	 therefore uses a fast but non-scalable algorithm for small
-	 systems and a lazier but scalable algorithm for large systems.
-	 This Kconfig parameter defines the number of CPUs in the largest
-	 system that will be considered to be "small".
-
-	 The default value will be fine in most cases.	Battery-powered
-	 systems that (1) enable NO_HZ_FULL_SYSIDLE, (2) have larger
-	 numbers of CPUs, and (3) are suffering from battery-lifetime
-	 problems due to long sysidle latencies might wish to experiment
-	 with larger values for this Kconfig parameter.  On the other
-	 hand, they might be even better served by disabling NO_HZ_FULL
-	 entirely, given that NO_HZ_FULL is intended for HPC and
-	 real-time workloads that at present do not tend to be run on
-	 battery-powered systems.
-
-	 Take the default if you are unsure.
-
 config NO_HZ
 	bool "Old Idle dynticks config"
 	depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 5cb5b00..c991cf2 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -27,6 +27,9 @@
 #include <linux/posix-timers.h>
 #include <linux/workqueue.h>
 #include <linux/freezer.h>
+#include <linux/compat.h>
+
+#include "posix-timers.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/alarmtimer.h>
@@ -45,11 +48,13 @@ static struct alarm_base {
 	clockid_t		base_clockid;
 } alarm_bases[ALARM_NUMTYPE];
 
+#if defined(CONFIG_POSIX_TIMERS) || defined(CONFIG_RTC_CLASS)
 /* freezer information to handle clock_nanosleep triggered wakeups */
 static enum alarmtimer_type freezer_alarmtype;
 static ktime_t freezer_expires;
 static ktime_t freezer_delta;
 static DEFINE_SPINLOCK(freezer_delta_lock);
+#endif
 
 static struct wakeup_source *ws;
 
@@ -307,38 +312,6 @@ static int alarmtimer_resume(struct device *dev)
 }
 #endif
 
-static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
-{
-	struct alarm_base *base;
-	unsigned long flags;
-	ktime_t delta;
-
-	switch(type) {
-	case ALARM_REALTIME:
-		base = &alarm_bases[ALARM_REALTIME];
-		type = ALARM_REALTIME_FREEZER;
-		break;
-	case ALARM_BOOTTIME:
-		base = &alarm_bases[ALARM_BOOTTIME];
-		type = ALARM_BOOTTIME_FREEZER;
-		break;
-	default:
-		WARN_ONCE(1, "Invalid alarm type: %d\n", type);
-		return;
-	}
-
-	delta = ktime_sub(absexp, base->gettime());
-
-	spin_lock_irqsave(&freezer_delta_lock, flags);
-	if (!freezer_delta || (delta < freezer_delta)) {
-		freezer_delta = delta;
-		freezer_expires = absexp;
-		freezer_alarmtype = type;
-	}
-	spin_unlock_irqrestore(&freezer_delta_lock, flags);
-}
-
-
 /**
  * alarm_init - Initialize an alarm structure
  * @alarm: ptr to alarm to be initialized
@@ -387,7 +360,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start)
 {
 	struct alarm_base *base = &alarm_bases[alarm->type];
 
-	start = ktime_add(start, base->gettime());
+	start = ktime_add_safe(start, base->gettime());
 	alarm_start(alarm, start);
 }
 EXPORT_SYMBOL_GPL(alarm_start_relative);
@@ -475,7 +448,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
 		overrun++;
 	}
 
-	alarm->node.expires = ktime_add(alarm->node.expires, interval);
+	alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
 	return overrun;
 }
 EXPORT_SYMBOL_GPL(alarm_forward);
@@ -488,6 +461,38 @@ u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
 }
 EXPORT_SYMBOL_GPL(alarm_forward_now);
 
+#ifdef CONFIG_POSIX_TIMERS
+
+static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
+{
+	struct alarm_base *base;
+	unsigned long flags;
+	ktime_t delta;
+
+	switch(type) {
+	case ALARM_REALTIME:
+		base = &alarm_bases[ALARM_REALTIME];
+		type = ALARM_REALTIME_FREEZER;
+		break;
+	case ALARM_BOOTTIME:
+		base = &alarm_bases[ALARM_BOOTTIME];
+		type = ALARM_BOOTTIME_FREEZER;
+		break;
+	default:
+		WARN_ONCE(1, "Invalid alarm type: %d\n", type);
+		return;
+	}
+
+	delta = ktime_sub(absexp, base->gettime());
+
+	spin_lock_irqsave(&freezer_delta_lock, flags);
+	if (!freezer_delta || (delta < freezer_delta)) {
+		freezer_delta = delta;
+		freezer_expires = absexp;
+		freezer_alarmtype = type;
+	}
+	spin_unlock_irqrestore(&freezer_delta_lock, flags);
+}
 
 /**
  * clock2alarm - helper that converts from clockid to alarmtypes
@@ -511,22 +516,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
 static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
 							ktime_t now)
 {
-	unsigned long flags;
 	struct k_itimer *ptr = container_of(alarm, struct k_itimer,
-						it.alarm.alarmtimer);
+					    it.alarm.alarmtimer);
 	enum alarmtimer_restart result = ALARMTIMER_NORESTART;
+	unsigned long flags;
+	int si_private = 0;
 
 	spin_lock_irqsave(&ptr->it_lock, flags);
-	if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
-		if (IS_ENABLED(CONFIG_POSIX_TIMERS) &&
-		    posix_timer_event(ptr, 0) != 0)
-			ptr->it_overrun++;
-	}
 
-	/* Re-add periodic timers */
-	if (ptr->it.alarm.interval) {
-		ptr->it_overrun += alarm_forward(alarm, now,
-						ptr->it.alarm.interval);
+	ptr->it_active = 0;
+	if (ptr->it_interval)
+		si_private = ++ptr->it_requeue_pending;
+
+	if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
+		/*
+		 * Handle ignored signals and rearm the timer. This will go
+		 * away once we handle ignored signals proper.
+		 */
+		ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval);
+		++ptr->it_requeue_pending;
+		ptr->it_active = 1;
 		result = ALARMTIMER_RESTART;
 	}
 	spin_unlock_irqrestore(&ptr->it_lock, flags);
@@ -535,6 +544,72 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
 }
 
 /**
+ * alarm_timer_rearm - Posix timer callback for rearming timer
+ * @timr:	Pointer to the posixtimer data struct
+ */
+static void alarm_timer_rearm(struct k_itimer *timr)
+{
+	struct alarm *alarm = &timr->it.alarm.alarmtimer;
+
+	timr->it_overrun += alarm_forward_now(alarm, timr->it_interval);
+	alarm_start(alarm, alarm->node.expires);
+}
+
+/**
+ * alarm_timer_forward - Posix timer callback for forwarding timer
+ * @timr:	Pointer to the posixtimer data struct
+ * @now:	Current time to forward the timer against
+ */
+static int alarm_timer_forward(struct k_itimer *timr, ktime_t now)
+{
+	struct alarm *alarm = &timr->it.alarm.alarmtimer;
+
+	return (int) alarm_forward(alarm, timr->it_interval, now);
+}
+
+/**
+ * alarm_timer_remaining - Posix timer callback to retrieve remaining time
+ * @timr:	Pointer to the posixtimer data struct
+ * @now:	Current time to calculate against
+ */
+static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
+{
+	struct alarm *alarm = &timr->it.alarm.alarmtimer;
+
+	return ktime_sub(now, alarm->node.expires);
+}
+
+/**
+ * alarm_timer_try_to_cancel - Posix timer callback to cancel a timer
+ * @timr:	Pointer to the posixtimer data struct
+ */
+static int alarm_timer_try_to_cancel(struct k_itimer *timr)
+{
+	return alarm_try_to_cancel(&timr->it.alarm.alarmtimer);
+}
+
+/**
+ * alarm_timer_arm - Posix timer callback to arm a timer
+ * @timr:	Pointer to the posixtimer data struct
+ * @expires:	The new expiry time
+ * @absolute:	Expiry value is absolute time
+ * @sigev_none:	Posix timer does not deliver signals
+ */
+static void alarm_timer_arm(struct k_itimer *timr, ktime_t expires,
+			    bool absolute, bool sigev_none)
+{
+	struct alarm *alarm = &timr->it.alarm.alarmtimer;
+	struct alarm_base *base = &alarm_bases[alarm->type];
+
+	if (!absolute)
+		expires = ktime_add_safe(expires, base->gettime());
+	if (sigev_none)
+		alarm->node.expires = expires;
+	else
+		alarm_start(&timr->it.alarm.alarmtimer, expires);
+}
+
+/**
  * alarm_clock_getres - posix getres interface
  * @which_clock: clockid
  * @tp: timespec to fill
@@ -591,89 +666,6 @@ static int alarm_timer_create(struct k_itimer *new_timer)
 }
 
 /**
- * alarm_timer_get - posix timer_get interface
- * @new_timer: k_itimer pointer
- * @cur_setting: itimerspec data to fill
- *
- * Copies out the current itimerspec data
- */
-static void alarm_timer_get(struct k_itimer *timr,
-			    struct itimerspec64 *cur_setting)
-{
-	ktime_t relative_expiry_time =
-		alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
-
-	if (ktime_to_ns(relative_expiry_time) > 0) {
-		cur_setting->it_value = ktime_to_timespec64(relative_expiry_time);
-	} else {
-		cur_setting->it_value.tv_sec = 0;
-		cur_setting->it_value.tv_nsec = 0;
-	}
-
-	cur_setting->it_interval = ktime_to_timespec64(timr->it.alarm.interval);
-}
-
-/**
- * alarm_timer_del - posix timer_del interface
- * @timr: k_itimer pointer to be deleted
- *
- * Cancels any programmed alarms for the given timer.
- */
-static int alarm_timer_del(struct k_itimer *timr)
-{
-	if (!rtcdev)
-		return -ENOTSUPP;
-
-	if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0)
-		return TIMER_RETRY;
-
-	return 0;
-}
-
-/**
- * alarm_timer_set - posix timer_set interface
- * @timr: k_itimer pointer to be deleted
- * @flags: timer flags
- * @new_setting: itimerspec to be used
- * @old_setting: itimerspec being replaced
- *
- * Sets the timer to new_setting, and starts the timer.
- */
-static int alarm_timer_set(struct k_itimer *timr, int flags,
-			   struct itimerspec64 *new_setting,
-			   struct itimerspec64 *old_setting)
-{
-	ktime_t exp;
-
-	if (!rtcdev)
-		return -ENOTSUPP;
-
-	if (flags & ~TIMER_ABSTIME)
-		return -EINVAL;
-
-	if (old_setting)
-		alarm_timer_get(timr, old_setting);
-
-	/* If the timer was already set, cancel it */
-	if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0)
-		return TIMER_RETRY;
-
-	/* start the timer */
-	timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval);
-	exp = timespec64_to_ktime(new_setting->it_value);
-	/* Convert (if necessary) to absolute time */
-	if (flags != TIMER_ABSTIME) {
-		ktime_t now;
-
-		now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
-		exp = ktime_add(now, exp);
-	}
-
-	alarm_start(&timr->it.alarm.alarmtimer, exp);
-	return 0;
-}
-
-/**
  * alarmtimer_nsleep_wakeup - Wakeup function for alarm_timer_nsleep
  * @alarm: ptr to alarm that fired
  *
@@ -697,8 +689,10 @@ static enum alarmtimer_restart alarmtimer_nsleep_wakeup(struct alarm *alarm,
  *
  * Sets the alarm timer and sleeps until it is fired or interrupted.
  */
-static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp)
+static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
+				enum alarmtimer_type type)
 {
+	struct restart_block *restart;
 	alarm->data = (void *)current;
 	do {
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -711,36 +705,25 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp)
 
 	__set_current_state(TASK_RUNNING);
 
-	return (alarm->data == NULL);
-}
-
-
-/**
- * update_rmtp - Update remaining timespec value
- * @exp: expiration time
- * @type: timer type
- * @rmtp: user pointer to remaining timepsec value
- *
- * Helper function that fills in rmtp value with time between
- * now and the exp value
- */
-static int update_rmtp(ktime_t exp, enum  alarmtimer_type type,
-			struct timespec __user *rmtp)
-{
-	struct timespec rmt;
-	ktime_t rem;
-
-	rem = ktime_sub(exp, alarm_bases[type].gettime());
-
-	if (rem <= 0)
+	if (!alarm->data)
 		return 0;
-	rmt = ktime_to_timespec(rem);
 
-	if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
-		return -EFAULT;
+	if (freezing(current))
+		alarmtimer_freezerset(absexp, type);
+	restart = &current->restart_block;
+	if (restart->nanosleep.type != TT_NONE) {
+		struct timespec rmt;
+		ktime_t rem;
 
-	return 1;
+		rem = ktime_sub(absexp, alarm_bases[type].gettime());
 
+		if (rem <= 0)
+			return 0;
+		rmt = ktime_to_timespec(rem);
+
+		return nanosleep_copyout(restart, &rmt);
+	}
+	return -ERESTART_RESTARTBLOCK;
 }
 
 /**
@@ -752,32 +735,12 @@ static int update_rmtp(ktime_t exp, enum  alarmtimer_type type,
 static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
 {
 	enum  alarmtimer_type type = restart->nanosleep.clockid;
-	ktime_t exp;
-	struct timespec __user  *rmtp;
+	ktime_t exp = restart->nanosleep.expires;
 	struct alarm alarm;
-	int ret = 0;
 
-	exp = restart->nanosleep.expires;
 	alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
 
-	if (alarmtimer_do_nsleep(&alarm, exp))
-		goto out;
-
-	if (freezing(current))
-		alarmtimer_freezerset(exp, type);
-
-	rmtp = restart->nanosleep.rmtp;
-	if (rmtp) {
-		ret = update_rmtp(exp, type, rmtp);
-		if (ret <= 0)
-			goto out;
-	}
-
-
-	/* The other values in restart are already filled in */
-	ret = -ERESTART_RESTARTBLOCK;
-out:
-	return ret;
+	return alarmtimer_do_nsleep(&alarm, exp, type);
 }
 
 /**
@@ -790,11 +753,10 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
  * Handles clock_nanosleep calls against _ALARM clockids
  */
 static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
-			      struct timespec64 *tsreq,
-			      struct timespec __user *rmtp)
+			      const struct timespec64 *tsreq)
 {
 	enum  alarmtimer_type type = clock2alarm(which_clock);
-	struct restart_block *restart;
+	struct restart_block *restart = &current->restart_block;
 	struct alarm alarm;
 	ktime_t exp;
 	int ret = 0;
@@ -817,35 +779,36 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
 		exp = ktime_add(now, exp);
 	}
 
-	if (alarmtimer_do_nsleep(&alarm, exp))
-		goto out;
-
-	if (freezing(current))
-		alarmtimer_freezerset(exp, type);
+	ret = alarmtimer_do_nsleep(&alarm, exp, type);
+	if (ret != -ERESTART_RESTARTBLOCK)
+		return ret;
 
 	/* abs timers don't set remaining time or restart */
-	if (flags == TIMER_ABSTIME) {
-		ret = -ERESTARTNOHAND;
-		goto out;
-	}
+	if (flags == TIMER_ABSTIME)
+		return -ERESTARTNOHAND;
 
-	if (rmtp) {
-		ret = update_rmtp(exp, type, rmtp);
-		if (ret <= 0)
-			goto out;
-	}
-
-	restart = &current->restart_block;
 	restart->fn = alarm_timer_nsleep_restart;
 	restart->nanosleep.clockid = type;
 	restart->nanosleep.expires = exp;
-	restart->nanosleep.rmtp = rmtp;
-	ret = -ERESTART_RESTARTBLOCK;
-
-out:
 	return ret;
 }
 
+const struct k_clock alarm_clock = {
+	.clock_getres		= alarm_clock_getres,
+	.clock_get		= alarm_clock_get,
+	.timer_create		= alarm_timer_create,
+	.timer_set		= common_timer_set,
+	.timer_del		= common_timer_del,
+	.timer_get		= common_timer_get,
+	.timer_arm		= alarm_timer_arm,
+	.timer_rearm		= alarm_timer_rearm,
+	.timer_forward		= alarm_timer_forward,
+	.timer_remaining	= alarm_timer_remaining,
+	.timer_try_to_cancel	= alarm_timer_try_to_cancel,
+	.nsleep			= alarm_timer_nsleep,
+};
+#endif /* CONFIG_POSIX_TIMERS */
+
 
 /* Suspend hook structures */
 static const struct dev_pm_ops alarmtimer_pm_ops = {
@@ -871,23 +834,9 @@ static int __init alarmtimer_init(void)
 	struct platform_device *pdev;
 	int error = 0;
 	int i;
-	struct k_clock alarm_clock = {
-		.clock_getres	= alarm_clock_getres,
-		.clock_get	= alarm_clock_get,
-		.timer_create	= alarm_timer_create,
-		.timer_set	= alarm_timer_set,
-		.timer_del	= alarm_timer_del,
-		.timer_get	= alarm_timer_get,
-		.nsleep		= alarm_timer_nsleep,
-	};
 
 	alarmtimer_rtc_timer_init();
 
-	if (IS_ENABLED(CONFIG_POSIX_TIMERS)) {
-		posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
-		posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
-	}
-
 	/* Initialize alarm bases */
 	alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
 	alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 93621ae..03918a1 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -233,6 +233,9 @@ static void clocksource_watchdog(unsigned long data)
 			continue;
 		}
 
+		if (cs == curr_clocksource && cs->tick_stable)
+			cs->tick_stable(cs);
+
 		if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
 		    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
 		    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index ac053bb..81da124 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -51,6 +51,7 @@
 #include <linux/sched/debug.h>
 #include <linux/timer.h>
 #include <linux/freezer.h>
+#include <linux/compat.h>
 
 #include <linux/uaccess.h>
 
@@ -1439,8 +1440,29 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
 }
 EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
 
+int nanosleep_copyout(struct restart_block *restart, struct timespec *ts)
+{
+	switch(restart->nanosleep.type) {
+#ifdef CONFIG_COMPAT
+	case TT_COMPAT:
+		if (compat_put_timespec(ts, restart->nanosleep.compat_rmtp))
+			return -EFAULT;
+		break;
+#endif
+	case TT_NATIVE:
+		if (copy_to_user(restart->nanosleep.rmtp, ts, sizeof(struct timespec)))
+			return -EFAULT;
+		break;
+	default:
+		BUG();
+	}
+	return -ERESTART_RESTARTBLOCK;
+}
+
 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
 {
+	struct restart_block *restart;
+
 	hrtimer_init_sleeper(t, current);
 
 	do {
@@ -1457,53 +1479,38 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
 
 	__set_current_state(TASK_RUNNING);
 
-	return t->task == NULL;
-}
-
-static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
-{
-	struct timespec rmt;
-	ktime_t rem;
-
-	rem = hrtimer_expires_remaining(timer);
-	if (rem <= 0)
+	if (!t->task)
 		return 0;
-	rmt = ktime_to_timespec(rem);
 
-	if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
-		return -EFAULT;
+	restart = &current->restart_block;
+	if (restart->nanosleep.type != TT_NONE) {
+		ktime_t rem = hrtimer_expires_remaining(&t->timer);
+		struct timespec rmt;
 
-	return 1;
+		if (rem <= 0)
+			return 0;
+		rmt = ktime_to_timespec(rem);
+
+		return nanosleep_copyout(restart, &rmt);
+	}
+	return -ERESTART_RESTARTBLOCK;
 }
 
-long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
+static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
 {
 	struct hrtimer_sleeper t;
-	struct timespec __user  *rmtp;
-	int ret = 0;
+	int ret;
 
 	hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
 				HRTIMER_MODE_ABS);
 	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
 
-	if (do_nanosleep(&t, HRTIMER_MODE_ABS))
-		goto out;
-
-	rmtp = restart->nanosleep.rmtp;
-	if (rmtp) {
-		ret = update_rmtp(&t.timer, rmtp);
-		if (ret <= 0)
-			goto out;
-	}
-
-	/* The other values in restart are already filled in */
-	ret = -ERESTART_RESTARTBLOCK;
-out:
+	ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
 	destroy_hrtimer_on_stack(&t.timer);
 	return ret;
 }
 
-long hrtimer_nanosleep(struct timespec64 *rqtp, struct timespec __user *rmtp,
+long hrtimer_nanosleep(const struct timespec64 *rqtp,
 		       const enum hrtimer_mode mode, const clockid_t clockid)
 {
 	struct restart_block *restart;
@@ -1517,7 +1524,8 @@ long hrtimer_nanosleep(struct timespec64 *rqtp, struct timespec __user *rmtp,
 
 	hrtimer_init_on_stack(&t.timer, clockid, mode);
 	hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
-	if (do_nanosleep(&t, mode))
+	ret = do_nanosleep(&t, mode);
+	if (ret != -ERESTART_RESTARTBLOCK)
 		goto out;
 
 	/* Absolute timers do not update the rmtp value and restart: */
@@ -1526,19 +1534,10 @@ long hrtimer_nanosleep(struct timespec64 *rqtp, struct timespec __user *rmtp,
 		goto out;
 	}
 
-	if (rmtp) {
-		ret = update_rmtp(&t.timer, rmtp);
-		if (ret <= 0)
-			goto out;
-	}
-
 	restart = &current->restart_block;
 	restart->fn = hrtimer_nanosleep_restart;
 	restart->nanosleep.clockid = t.timer.base->clockid;
-	restart->nanosleep.rmtp = rmtp;
 	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
-
-	ret = -ERESTART_RESTARTBLOCK;
 out:
 	destroy_hrtimer_on_stack(&t.timer);
 	return ret;
@@ -1557,9 +1556,32 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
 	if (!timespec64_valid(&tu64))
 		return -EINVAL;
 
-	return hrtimer_nanosleep(&tu64, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
+	current->restart_block.nanosleep.rmtp = rmtp;
+	return hrtimer_nanosleep(&tu64, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
 }
 
+#ifdef CONFIG_COMPAT
+
+COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
+		       struct compat_timespec __user *, rmtp)
+{
+	struct timespec64 tu64;
+	struct timespec tu;
+
+	if (compat_get_timespec(&tu, rqtp))
+		return -EFAULT;
+
+	tu64 = timespec_to_timespec64(tu);
+	if (!timespec64_valid(&tu64))
+		return -EINVAL;
+
+	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
+	current->restart_block.nanosleep.compat_rmtp = rmtp;
+	return hrtimer_nanosleep(&tu64, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+}
+#endif
+
 /*
  * Functions related to boot-time initialization:
  */
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 087d6a1..2ef98a0 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -15,6 +15,7 @@
 #include <linux/posix-timers.h>
 #include <linux/hrtimer.h>
 #include <trace/events/timer.h>
+#include <linux/compat.h>
 
 #include <linux/uaccess.h>
 
@@ -116,6 +117,19 @@ SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
 	return error;
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(getitimer, int, which,
+		       struct compat_itimerval __user *, it)
+{
+	struct itimerval kit;
+	int error = do_getitimer(which, &kit);
+
+	if (!error && put_compat_itimerval(it, &kit))
+		error = -EFAULT;
+	return error;
+}
+#endif
+
 
 /*
  * The timer is automagically restarted, when interval != 0
@@ -138,8 +152,12 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
 	u64 oval, nval, ointerval, ninterval;
 	struct cpu_itimer *it = &tsk->signal->it[clock_id];
 
-	nval = timeval_to_ns(&value->it_value);
-	ninterval = timeval_to_ns(&value->it_interval);
+	/*
+	 * Use the to_ktime conversion because that clamps the maximum
+	 * value to KTIME_MAX and avoid multiplication overflows.
+	 */
+	nval = ktime_to_ns(timeval_to_ktime(value->it_value));
+	ninterval = ktime_to_ns(timeval_to_ktime(value->it_interval));
 
 	spin_lock_irq(&tsk->sighand->siglock);
 
@@ -294,3 +312,27 @@ SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
 		return -EFAULT;
 	return 0;
 }
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE3(setitimer, int, which,
+		       struct compat_itimerval __user *, in,
+		       struct compat_itimerval __user *, out)
+{
+	struct itimerval kin, kout;
+	int error;
+
+	if (in) {
+		if (get_compat_itimerval(&kin, in))
+			return -EFAULT;
+	} else {
+		memset(&kin, 0, sizeof(kin));
+	}
+
+	error = do_setitimer(which, &kin, out ? &kout : NULL);
+	if (error || !out)
+		return error;
+	if (put_compat_itimerval(out, &kout))
+		return -EFAULT;
+	return 0;
+}
+#endif
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 31d588d..17cdc55 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -25,6 +25,8 @@
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
 
+#include "posix-timers.h"
+
 static void delete_clock(struct kref *kref);
 
 /*
@@ -82,38 +84,6 @@ static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
 	return result;
 }
 
-static int posix_clock_fasync(int fd, struct file *fp, int on)
-{
-	struct posix_clock *clk = get_posix_clock(fp);
-	int err = 0;
-
-	if (!clk)
-		return -ENODEV;
-
-	if (clk->ops.fasync)
-		err = clk->ops.fasync(clk, fd, fp, on);
-
-	put_posix_clock(clk);
-
-	return err;
-}
-
-static int posix_clock_mmap(struct file *fp, struct vm_area_struct *vma)
-{
-	struct posix_clock *clk = get_posix_clock(fp);
-	int err = -ENODEV;
-
-	if (!clk)
-		return -ENODEV;
-
-	if (clk->ops.mmap)
-		err = clk->ops.mmap(clk, vma);
-
-	put_posix_clock(clk);
-
-	return err;
-}
-
 static long posix_clock_ioctl(struct file *fp,
 			      unsigned int cmd, unsigned long arg)
 {
@@ -199,8 +169,6 @@ static const struct file_operations posix_clock_file_operations = {
 	.unlocked_ioctl	= posix_clock_ioctl,
 	.open		= posix_clock_open,
 	.release	= posix_clock_release,
-	.fasync		= posix_clock_fasync,
-	.mmap		= posix_clock_mmap,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= posix_clock_compat_ioctl,
 #endif
@@ -359,88 +327,9 @@ static int pc_clock_settime(clockid_t id, const struct timespec64 *ts)
 	return err;
 }
 
-static int pc_timer_create(struct k_itimer *kit)
-{
-	clockid_t id = kit->it_clock;
-	struct posix_clock_desc cd;
-	int err;
-
-	err = get_clock_desc(id, &cd);
-	if (err)
-		return err;
-
-	if (cd.clk->ops.timer_create)
-		err = cd.clk->ops.timer_create(cd.clk, kit);
-	else
-		err = -EOPNOTSUPP;
-
-	put_clock_desc(&cd);
-
-	return err;
-}
-
-static int pc_timer_delete(struct k_itimer *kit)
-{
-	clockid_t id = kit->it_clock;
-	struct posix_clock_desc cd;
-	int err;
-
-	err = get_clock_desc(id, &cd);
-	if (err)
-		return err;
-
-	if (cd.clk->ops.timer_delete)
-		err = cd.clk->ops.timer_delete(cd.clk, kit);
-	else
-		err = -EOPNOTSUPP;
-
-	put_clock_desc(&cd);
-
-	return err;
-}
-
-static void pc_timer_gettime(struct k_itimer *kit, struct itimerspec64 *ts)
-{
-	clockid_t id = kit->it_clock;
-	struct posix_clock_desc cd;
-
-	if (get_clock_desc(id, &cd))
-		return;
-
-	if (cd.clk->ops.timer_gettime)
-		cd.clk->ops.timer_gettime(cd.clk, kit, ts);
-
-	put_clock_desc(&cd);
-}
-
-static int pc_timer_settime(struct k_itimer *kit, int flags,
-			    struct itimerspec64 *ts, struct itimerspec64 *old)
-{
-	clockid_t id = kit->it_clock;
-	struct posix_clock_desc cd;
-	int err;
-
-	err = get_clock_desc(id, &cd);
-	if (err)
-		return err;
-
-	if (cd.clk->ops.timer_settime)
-		err = cd.clk->ops.timer_settime(cd.clk, kit, flags, ts, old);
-	else
-		err = -EOPNOTSUPP;
-
-	put_clock_desc(&cd);
-
-	return err;
-}
-
-struct k_clock clock_posix_dynamic = {
+const struct k_clock clock_posix_dynamic = {
 	.clock_getres	= pc_clock_getres,
 	.clock_set	= pc_clock_settime,
 	.clock_get	= pc_clock_gettime,
 	.clock_adj	= pc_clock_adjtime,
-	.timer_create	= pc_timer_create,
-	.timer_set	= pc_timer_settime,
-	.timer_del	= pc_timer_delete,
-	.timer_get	= pc_timer_gettime,
 };
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index d2a1e6d..60cb24a 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -12,6 +12,11 @@
 #include <trace/events/timer.h>
 #include <linux/tick.h>
 #include <linux/workqueue.h>
+#include <linux/compat.h>
+
+#include "posix-timers.h"
+
+static void posix_cpu_timer_rearm(struct k_itimer *timer);
 
 /*
  * Called after updating RLIMIT_CPU to run cpu timer and update
@@ -322,6 +327,8 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
 	if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
 		return -EINVAL;
 
+	new_timer->kclock = &clock_posix_cpu;
+
 	INIT_LIST_HEAD(&new_timer->it.cpu.entry);
 
 	rcu_read_lock();
@@ -524,7 +531,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
 		 * reload the timer.  But we need to keep it
 		 * ticking in case the signal is deliverable next time.
 		 */
-		posix_cpu_timer_schedule(timer);
+		posix_cpu_timer_rearm(timer);
+		++timer->it_requeue_pending;
 	}
 }
 
@@ -572,7 +580,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 
 	WARN_ON_ONCE(p == NULL);
 
-	new_expires = timespec64_to_ns(&new->it_value);
+	/*
+	 * Use the to_ktime conversion because that clamps the maximum
+	 * value to KTIME_MAX and avoid multiplication overflows.
+	 */
+	new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
 
 	/*
 	 * Protect against sighand release/switch in exit/exec and p->cpu_timers
@@ -712,10 +724,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
 	 */
 	itp->it_interval = ns_to_timespec64(timer->it.cpu.incr);
 
-	if (timer->it.cpu.expires == 0) {	/* Timer not armed at all.  */
-		itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
+	if (!timer->it.cpu.expires)
 		return;
-	}
 
 	/*
 	 * Sample the clock to take the difference with the expiry time.
@@ -739,7 +749,6 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
 			 * Call the timer disarmed, nothing else to do.
 			 */
 			timer->it.cpu.expires = 0;
-			itp->it_value = ns_to_timespec64(timer->it.cpu.expires);
 			return;
 		} else {
 			cpu_timer_sample_group(timer->it_clock, p, &now);
@@ -976,10 +985,10 @@ static void check_process_timers(struct task_struct *tsk,
 }
 
 /*
- * This is called from the signal code (via do_schedule_next_timer)
+ * This is called from the signal code (via posixtimer_rearm)
  * when the last timer signal was delivered and we have to reload the timer.
  */
-void posix_cpu_timer_schedule(struct k_itimer *timer)
+static void posix_cpu_timer_rearm(struct k_itimer *timer)
 {
 	struct sighand_struct *sighand;
 	unsigned long flags;
@@ -995,12 +1004,12 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
 		cpu_clock_sample(timer->it_clock, p, &now);
 		bump_cpu_timer(timer, now);
 		if (unlikely(p->exit_state))
-			goto out;
+			return;
 
 		/* Protect timer list r/w in arm_timer() */
 		sighand = lock_task_sighand(p, &flags);
 		if (!sighand)
-			goto out;
+			return;
 	} else {
 		/*
 		 * Protect arm_timer() and timer sampling in case of call to
@@ -1013,11 +1022,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
 			 * We can't even collect a sample any more.
 			 */
 			timer->it.cpu.expires = 0;
-			goto out;
+			return;
 		} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
-			unlock_task_sighand(p, &flags);
-			/* Optimizations: if the process is dying, no need to rearm */
-			goto out;
+			/* If the process is dying, no need to rearm */
+			goto unlock;
 		}
 		cpu_timer_sample_group(timer->it_clock, p, &now);
 		bump_cpu_timer(timer, now);
@@ -1029,12 +1037,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
 	 */
 	WARN_ON_ONCE(!irqs_disabled());
 	arm_timer(timer);
+unlock:
 	unlock_task_sighand(p, &flags);
-
-out:
-	timer->it_overrun_last = timer->it_overrun;
-	timer->it_overrun = -1;
-	++timer->it_requeue_pending;
 }
 
 /**
@@ -1227,9 +1231,11 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
 }
 
 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
-			    struct timespec64 *rqtp, struct itimerspec64 *it)
+			    const struct timespec64 *rqtp)
 {
+	struct itimerspec64 it;
 	struct k_itimer timer;
+	u64 expires;
 	int error;
 
 	/*
@@ -1243,12 +1249,13 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
 	timer.it_process = current;
 	if (!error) {
 		static struct itimerspec64 zero_it;
+		struct restart_block *restart;
 
-		memset(it, 0, sizeof *it);
-		it->it_value = *rqtp;
+		memset(&it, 0, sizeof(it));
+		it.it_value = *rqtp;
 
 		spin_lock_irq(&timer.it_lock);
-		error = posix_cpu_timer_set(&timer, flags, it, NULL);
+		error = posix_cpu_timer_set(&timer, flags, &it, NULL);
 		if (error) {
 			spin_unlock_irq(&timer.it_lock);
 			return error;
@@ -1277,8 +1284,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
 		/*
 		 * We were interrupted by a signal.
 		 */
-		*rqtp = ns_to_timespec64(timer.it.cpu.expires);
-		error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
+		expires = timer.it.cpu.expires;
+		error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
 		if (!error) {
 			/*
 			 * Timer is now unarmed, deletion can not fail.
@@ -1298,7 +1305,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
 			spin_unlock_irq(&timer.it_lock);
 		}
 
-		if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
+		if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
 			/*
 			 * It actually did fire already.
 			 */
@@ -1306,6 +1313,17 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
 		}
 
 		error = -ERESTART_RESTARTBLOCK;
+		/*
+		 * Report back to the user the time still remaining.
+		 */
+		restart = &current->restart_block;
+		restart->nanosleep.expires = expires;
+		if (restart->nanosleep.type != TT_NONE) {
+			struct timespec ts;
+
+			ts = timespec64_to_timespec(it.it_value);
+			error = nanosleep_copyout(restart, &ts);
+		}
 	}
 
 	return error;
@@ -1314,11 +1332,9 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
 
 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
-			    struct timespec64 *rqtp, struct timespec __user *rmtp)
+			    const struct timespec64 *rqtp)
 {
 	struct restart_block *restart_block = &current->restart_block;
-	struct itimerspec64 it;
-	struct timespec ts;
 	int error;
 
 	/*
@@ -1329,23 +1345,15 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
 	     CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
 		return -EINVAL;
 
-	error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
+	error = do_cpu_nanosleep(which_clock, flags, rqtp);
 
 	if (error == -ERESTART_RESTARTBLOCK) {
 
 		if (flags & TIMER_ABSTIME)
 			return -ERESTARTNOHAND;
-		/*
-		 * Report back to the user the time still remaining.
-		 */
-		ts = timespec64_to_timespec(it.it_value);
-		if (rmtp && copy_to_user(rmtp, &ts, sizeof(*rmtp)))
-			return -EFAULT;
 
 		restart_block->fn = posix_cpu_nsleep_restart;
 		restart_block->nanosleep.clockid = which_clock;
-		restart_block->nanosleep.rmtp = rmtp;
-		restart_block->nanosleep.expires = timespec64_to_ns(rqtp);
 	}
 	return error;
 }
@@ -1353,28 +1361,11 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
 {
 	clockid_t which_clock = restart_block->nanosleep.clockid;
-	struct itimerspec64 it;
 	struct timespec64 t;
-	struct timespec tmp;
-	int error;
 
 	t = ns_to_timespec64(restart_block->nanosleep.expires);
 
-	error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
-
-	if (error == -ERESTART_RESTARTBLOCK) {
-		struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
-		/*
-		 * Report back to the user the time still remaining.
-		 */
-		 tmp = timespec64_to_timespec(it.it_value);
-		if (rmtp && copy_to_user(rmtp, &tmp, sizeof(*rmtp)))
-			return -EFAULT;
-
-		restart_block->nanosleep.expires = timespec64_to_ns(&t);
-	}
-	return error;
-
+	return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
 }
 
 #define PROCESS_CLOCK	MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
@@ -1396,14 +1387,9 @@ static int process_cpu_timer_create(struct k_itimer *timer)
 	return posix_cpu_timer_create(timer);
 }
 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
-			      struct timespec64 *rqtp,
-			      struct timespec __user *rmtp)
+			      const struct timespec64 *rqtp)
 {
-	return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
-}
-static long process_cpu_nsleep_restart(struct restart_block *restart_block)
-{
-	return -EINVAL;
+	return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
 }
 static int thread_cpu_clock_getres(const clockid_t which_clock,
 				   struct timespec64 *tp)
@@ -1421,36 +1407,27 @@ static int thread_cpu_timer_create(struct k_itimer *timer)
 	return posix_cpu_timer_create(timer);
 }
 
-struct k_clock clock_posix_cpu = {
+const struct k_clock clock_posix_cpu = {
 	.clock_getres	= posix_cpu_clock_getres,
 	.clock_set	= posix_cpu_clock_set,
 	.clock_get	= posix_cpu_clock_get,
 	.timer_create	= posix_cpu_timer_create,
 	.nsleep		= posix_cpu_nsleep,
-	.nsleep_restart	= posix_cpu_nsleep_restart,
 	.timer_set	= posix_cpu_timer_set,
 	.timer_del	= posix_cpu_timer_del,
 	.timer_get	= posix_cpu_timer_get,
+	.timer_rearm	= posix_cpu_timer_rearm,
 };
 
-static __init int init_posix_cpu_timers(void)
-{
-	struct k_clock process = {
-		.clock_getres	= process_cpu_clock_getres,
-		.clock_get	= process_cpu_clock_get,
-		.timer_create	= process_cpu_timer_create,
-		.nsleep		= process_cpu_nsleep,
-		.nsleep_restart	= process_cpu_nsleep_restart,
-	};
-	struct k_clock thread = {
-		.clock_getres	= thread_cpu_clock_getres,
-		.clock_get	= thread_cpu_clock_get,
-		.timer_create	= thread_cpu_timer_create,
-	};
+const struct k_clock clock_process = {
+	.clock_getres	= process_cpu_clock_getres,
+	.clock_get	= process_cpu_clock_get,
+	.timer_create	= process_cpu_timer_create,
+	.nsleep		= process_cpu_nsleep,
+};
 
-	posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
-	posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
-
-	return 0;
-}
-__initcall(init_posix_cpu_timers);
+const struct k_clock clock_thread = {
+	.clock_getres	= thread_cpu_clock_getres,
+	.clock_get	= thread_cpu_clock_get,
+	.timer_create	= thread_cpu_timer_create,
+};
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index c0cd53e..38f3b20 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -17,6 +17,7 @@
 #include <linux/ktime.h>
 #include <linux/timekeeping.h>
 #include <linux/posix-timers.h>
+#include <linux/compat.h>
 
 asmlinkage long sys_ni_posix_timers(void)
 {
@@ -27,6 +28,7 @@ asmlinkage long sys_ni_posix_timers(void)
 }
 
 #define SYS_NI(name)  SYSCALL_ALIAS(sys_##name, sys_ni_posix_timers)
+#define COMPAT_SYS_NI(name)  SYSCALL_ALIAS(compat_sys_##name, sys_ni_posix_timers)
 
 SYS_NI(timer_create);
 SYS_NI(timer_gettime);
@@ -39,6 +41,12 @@ SYS_NI(setitimer);
 #ifdef __ARCH_WANT_SYS_ALARM
 SYS_NI(alarm);
 #endif
+COMPAT_SYS_NI(timer_create);
+COMPAT_SYS_NI(clock_adjtime);
+COMPAT_SYS_NI(timer_settime);
+COMPAT_SYS_NI(timer_gettime);
+COMPAT_SYS_NI(getitimer);
+COMPAT_SYS_NI(setitimer);
 
 /*
  * We preserve minimal support for CLOCK_REALTIME and CLOCK_MONOTONIC
@@ -110,22 +118,106 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
 	case CLOCK_REALTIME:
 	case CLOCK_MONOTONIC:
 	case CLOCK_BOOTTIME:
-		if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
+		return -EFAULT;
+	t64 = timespec_to_timespec64(t);
+	if (!timespec64_valid(&t64))
+		return -EINVAL;
+	if (flags & TIMER_ABSTIME)
+		rmtp = NULL;
+	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
+	current->restart_block.nanosleep.rmtp = rmtp;
+	return hrtimer_nanosleep(&t64, flags & TIMER_ABSTIME ?
+				 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
+				 which_clock);
+}
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+		       struct compat_timespec __user *, tp)
+{
+	struct timespec64 new_tp64;
+	struct timespec new_tp;
+
+	if (which_clock != CLOCK_REALTIME)
+		return -EINVAL;
+	if (compat_get_timespec(&new_tp, tp))
+		return -EFAULT;
+
+	new_tp64 = timespec_to_timespec64(new_tp);
+	return do_sys_settimeofday64(&new_tp64, NULL);
+}
+
+COMPAT_SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
+		       struct compat_timespec __user *,tp)
+{
+	struct timespec64 kernel_tp64;
+	struct timespec kernel_tp;
+
+	switch (which_clock) {
+	case CLOCK_REALTIME: ktime_get_real_ts64(&kernel_tp64); break;
+	case CLOCK_MONOTONIC: ktime_get_ts64(&kernel_tp64); break;
+	case CLOCK_BOOTTIME: get_monotonic_boottime64(&kernel_tp64); break;
+	default: return -EINVAL;
+	}
+
+	kernel_tp = timespec64_to_timespec(kernel_tp64);
+	if (compat_put_timespec(&kernel_tp, tp))
+		return -EFAULT;
+	return 0;
+}
+
+COMPAT_SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
+		       struct compat_timespec __user *, tp)
+{
+	struct timespec rtn_tp = {
+		.tv_sec = 0,
+		.tv_nsec = hrtimer_resolution,
+	};
+
+	switch (which_clock) {
+	case CLOCK_REALTIME:
+	case CLOCK_MONOTONIC:
+	case CLOCK_BOOTTIME:
+		if (compat_put_timespec(&rtn_tp, tp))
 			return -EFAULT;
-		t64 = timespec_to_timespec64(t);
-		if (!timespec64_valid(&t64))
-			return -EINVAL;
-		return hrtimer_nanosleep(&t64, rmtp, flags & TIMER_ABSTIME ?
-					 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
-					 which_clock);
+		return 0;
 	default:
 		return -EINVAL;
 	}
 }
-
-#ifdef CONFIG_COMPAT
-long clock_nanosleep_restart(struct restart_block *restart_block)
+COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
+		       struct compat_timespec __user *, rqtp,
+		       struct compat_timespec __user *, rmtp)
 {
-	return hrtimer_nanosleep_restart(restart_block);
+	struct timespec64 t64;
+	struct timespec t;
+
+	switch (which_clock) {
+	case CLOCK_REALTIME:
+	case CLOCK_MONOTONIC:
+	case CLOCK_BOOTTIME:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (compat_get_timespec(&t, rqtp))
+		return -EFAULT;
+	t64 = timespec_to_timespec64(t);
+	if (!timespec64_valid(&t64))
+		return -EINVAL;
+	if (flags & TIMER_ABSTIME)
+		rmtp = NULL;
+	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
+	current->restart_block.nanosleep.compat_rmtp = rmtp;
+	return hrtimer_nanosleep(&t64, flags & TIMER_ABSTIME ?
+				 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
+				 which_clock);
 }
 #endif
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 4d7b2ce..82d67be 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -49,8 +49,10 @@
 #include <linux/workqueue.h>
 #include <linux/export.h>
 #include <linux/hashtable.h>
+#include <linux/compat.h>
 
 #include "timekeeping.h"
+#include "posix-timers.h"
 
 /*
  * Management arrays for POSIX timers. Timers are now kept in static hash table
@@ -69,6 +71,10 @@ static struct kmem_cache *posix_timers_cache;
 static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
 static DEFINE_SPINLOCK(hash_lock);
 
+static const struct k_clock * const posix_clocks[];
+static const struct k_clock *clockid_to_kclock(const clockid_t id);
+static const struct k_clock clock_realtime, clock_monotonic;
+
 /*
  * we assume that the new SIGEV_THREAD_ID shares no bits with the other
  * SIGEV values.  Here we put out an error if this assumption fails.
@@ -124,22 +130,6 @@ static DEFINE_SPINLOCK(hash_lock);
  *	    have is CLOCK_REALTIME and its high res counter part, both of
  *	    which we beg off on and pass to do_sys_settimeofday().
  */
-
-static struct k_clock posix_clocks[MAX_CLOCKS];
-
-/*
- * These ones are defined below.
- */
-static int common_nsleep(const clockid_t, int flags, struct timespec64 *t,
-			 struct timespec __user *rmtp);
-static int common_timer_create(struct k_itimer *new_timer);
-static void common_timer_get(struct k_itimer *, struct itimerspec64 *);
-static int common_timer_set(struct k_itimer *, int,
-			    struct itimerspec64 *, struct itimerspec64 *);
-static int common_timer_del(struct k_itimer *timer);
-
-static enum hrtimer_restart posix_timer_fn(struct hrtimer *data);
-
 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
 
 #define lock_timer(tid, flags)						   \
@@ -285,91 +275,23 @@ static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
  */
 static __init int init_posix_timers(void)
 {
-	struct k_clock clock_realtime = {
-		.clock_getres	= posix_get_hrtimer_res,
-		.clock_get	= posix_clock_realtime_get,
-		.clock_set	= posix_clock_realtime_set,
-		.clock_adj	= posix_clock_realtime_adj,
-		.nsleep		= common_nsleep,
-		.nsleep_restart	= hrtimer_nanosleep_restart,
-		.timer_create	= common_timer_create,
-		.timer_set	= common_timer_set,
-		.timer_get	= common_timer_get,
-		.timer_del	= common_timer_del,
-	};
-	struct k_clock clock_monotonic = {
-		.clock_getres	= posix_get_hrtimer_res,
-		.clock_get	= posix_ktime_get_ts,
-		.nsleep		= common_nsleep,
-		.nsleep_restart	= hrtimer_nanosleep_restart,
-		.timer_create	= common_timer_create,
-		.timer_set	= common_timer_set,
-		.timer_get	= common_timer_get,
-		.timer_del	= common_timer_del,
-	};
-	struct k_clock clock_monotonic_raw = {
-		.clock_getres	= posix_get_hrtimer_res,
-		.clock_get	= posix_get_monotonic_raw,
-	};
-	struct k_clock clock_realtime_coarse = {
-		.clock_getres	= posix_get_coarse_res,
-		.clock_get	= posix_get_realtime_coarse,
-	};
-	struct k_clock clock_monotonic_coarse = {
-		.clock_getres	= posix_get_coarse_res,
-		.clock_get	= posix_get_monotonic_coarse,
-	};
-	struct k_clock clock_tai = {
-		.clock_getres	= posix_get_hrtimer_res,
-		.clock_get	= posix_get_tai,
-		.nsleep		= common_nsleep,
-		.nsleep_restart	= hrtimer_nanosleep_restart,
-		.timer_create	= common_timer_create,
-		.timer_set	= common_timer_set,
-		.timer_get	= common_timer_get,
-		.timer_del	= common_timer_del,
-	};
-	struct k_clock clock_boottime = {
-		.clock_getres	= posix_get_hrtimer_res,
-		.clock_get	= posix_get_boottime,
-		.nsleep		= common_nsleep,
-		.nsleep_restart	= hrtimer_nanosleep_restart,
-		.timer_create	= common_timer_create,
-		.timer_set	= common_timer_set,
-		.timer_get	= common_timer_get,
-		.timer_del	= common_timer_del,
-	};
-
-	posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
-	posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
-	posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
-	posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
-	posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
-	posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
-	posix_timers_register_clock(CLOCK_TAI, &clock_tai);
-
 	posix_timers_cache = kmem_cache_create("posix_timers_cache",
 					sizeof (struct k_itimer), 0, SLAB_PANIC,
 					NULL);
 	return 0;
 }
-
 __initcall(init_posix_timers);
 
-static void schedule_next_timer(struct k_itimer *timr)
+static void common_hrtimer_rearm(struct k_itimer *timr)
 {
 	struct hrtimer *timer = &timr->it.real.timer;
 
-	if (timr->it.real.interval == 0)
+	if (!timr->it_interval)
 		return;
 
 	timr->it_overrun += (unsigned int) hrtimer_forward(timer,
 						timer->base->get_time(),
-						timr->it.real.interval);
-
-	timr->it_overrun_last = timr->it_overrun;
-	timr->it_overrun = -1;
-	++timr->it_requeue_pending;
+						timr->it_interval);
 	hrtimer_restart(timer);
 }
 
@@ -384,24 +306,27 @@ static void schedule_next_timer(struct k_itimer *timr)
  * To protect against the timer going away while the interrupt is queued,
  * we require that the it_requeue_pending flag be set.
  */
-void do_schedule_next_timer(struct siginfo *info)
+void posixtimer_rearm(struct siginfo *info)
 {
 	struct k_itimer *timr;
 	unsigned long flags;
 
 	timr = lock_timer(info->si_tid, &flags);
+	if (!timr)
+		return;
 
-	if (timr && timr->it_requeue_pending == info->si_sys_private) {
-		if (timr->it_clock < 0)
-			posix_cpu_timer_schedule(timr);
-		else
-			schedule_next_timer(timr);
+	if (timr->it_requeue_pending == info->si_sys_private) {
+		timr->kclock->timer_rearm(timr);
+
+		timr->it_active = 1;
+		timr->it_overrun_last = timr->it_overrun;
+		timr->it_overrun = -1;
+		++timr->it_requeue_pending;
 
 		info->si_overrun += timr->it_overrun_last;
 	}
 
-	if (timr)
-		unlock_timer(timr, flags);
+	unlock_timer(timr, flags);
 }
 
 int posix_timer_event(struct k_itimer *timr, int si_private)
@@ -410,12 +335,12 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
 	int shared, ret = -1;
 	/*
 	 * FIXME: if ->sigq is queued we can race with
-	 * dequeue_signal()->do_schedule_next_timer().
+	 * dequeue_signal()->posixtimer_rearm().
 	 *
 	 * If dequeue_signal() sees the "right" value of
-	 * si_sys_private it calls do_schedule_next_timer().
+	 * si_sys_private it calls posixtimer_rearm().
 	 * We re-queue ->sigq and drop ->it_lock().
-	 * do_schedule_next_timer() locks the timer
+	 * posixtimer_rearm() locks the timer
 	 * and re-schedules it while ->sigq is pending.
 	 * Not really bad, but not that we want.
 	 */
@@ -431,7 +356,6 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
 	/* If we failed to send the signal the timer stops. */
 	return ret > 0;
 }
-EXPORT_SYMBOL_GPL(posix_timer_event);
 
 /*
  * This function gets called when a POSIX.1b interval timer expires.  It
@@ -450,7 +374,8 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
 	timr = container_of(timer, struct k_itimer, it.real.timer);
 	spin_lock_irqsave(&timr->it_lock, flags);
 
-	if (timr->it.real.interval != 0)
+	timr->it_active = 0;
+	if (timr->it_interval != 0)
 		si_private = ++timr->it_requeue_pending;
 
 	if (posix_timer_event(timr, si_private)) {
@@ -459,7 +384,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
 		 * we will not get a call back to restart it AND
 		 * it should be restarted.
 		 */
-		if (timr->it.real.interval != 0) {
+		if (timr->it_interval != 0) {
 			ktime_t now = hrtimer_cb_get_time(timer);
 
 			/*
@@ -488,15 +413,16 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
 			{
 				ktime_t kj = NSEC_PER_SEC / HZ;
 
-				if (timr->it.real.interval < kj)
+				if (timr->it_interval < kj)
 					now = ktime_add(now, kj);
 			}
 #endif
 			timr->it_overrun += (unsigned int)
 				hrtimer_forward(timer, now,
-						timr->it.real.interval);
+						timr->it_interval);
 			ret = HRTIMER_RESTART;
 			++timr->it_requeue_pending;
+			timr->it_active = 1;
 		}
 	}
 
@@ -521,30 +447,6 @@ static struct pid *good_sigevent(sigevent_t * event)
 	return task_pid(rtn);
 }
 
-void posix_timers_register_clock(const clockid_t clock_id,
-				 struct k_clock *new_clock)
-{
-	if ((unsigned) clock_id >= MAX_CLOCKS) {
-		printk(KERN_WARNING "POSIX clock register failed for clock_id %d\n",
-		       clock_id);
-		return;
-	}
-
-	if (!new_clock->clock_get) {
-		printk(KERN_WARNING "POSIX clock id %d lacks clock_get()\n",
-		       clock_id);
-		return;
-	}
-	if (!new_clock->clock_getres) {
-		printk(KERN_WARNING "POSIX clock id %d lacks clock_getres()\n",
-		       clock_id);
-		return;
-	}
-
-	posix_clocks[clock_id] = *new_clock;
-}
-EXPORT_SYMBOL_GPL(posix_timers_register_clock);
-
 static struct k_itimer * alloc_posix_timer(void)
 {
 	struct k_itimer *tmr;
@@ -581,17 +483,6 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
 	call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
 }
 
-static struct k_clock *clockid_to_kclock(const clockid_t id)
-{
-	if (id < 0)
-		return (id & CLOCKFD_MASK) == CLOCKFD ?
-			&clock_posix_dynamic : &clock_posix_cpu;
-
-	if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
-		return NULL;
-	return &posix_clocks[id];
-}
-
 static int common_timer_create(struct k_itimer *new_timer)
 {
 	hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
@@ -599,15 +490,12 @@ static int common_timer_create(struct k_itimer *new_timer)
 }
 
 /* Create a POSIX.1b interval timer. */
-
-SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
-		struct sigevent __user *, timer_event_spec,
-		timer_t __user *, created_timer_id)
+static int do_timer_create(clockid_t which_clock, struct sigevent *event,
+			   timer_t __user *created_timer_id)
 {
-	struct k_clock *kc = clockid_to_kclock(which_clock);
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
 	struct k_itimer *new_timer;
 	int error, new_timer_id;
-	sigevent_t event;
 	int it_id_set = IT_ID_NOT_SET;
 
 	if (!kc)
@@ -629,31 +517,28 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
 	it_id_set = IT_ID_SET;
 	new_timer->it_id = (timer_t) new_timer_id;
 	new_timer->it_clock = which_clock;
+	new_timer->kclock = kc;
 	new_timer->it_overrun = -1;
 
-	if (timer_event_spec) {
-		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
-			error = -EFAULT;
-			goto out;
-		}
+	if (event) {
 		rcu_read_lock();
-		new_timer->it_pid = get_pid(good_sigevent(&event));
+		new_timer->it_pid = get_pid(good_sigevent(event));
 		rcu_read_unlock();
 		if (!new_timer->it_pid) {
 			error = -EINVAL;
 			goto out;
 		}
+		new_timer->it_sigev_notify     = event->sigev_notify;
+		new_timer->sigq->info.si_signo = event->sigev_signo;
+		new_timer->sigq->info.si_value = event->sigev_value;
 	} else {
-		memset(&event.sigev_value, 0, sizeof(event.sigev_value));
-		event.sigev_notify = SIGEV_SIGNAL;
-		event.sigev_signo = SIGALRM;
-		event.sigev_value.sival_int = new_timer->it_id;
+		new_timer->it_sigev_notify     = SIGEV_SIGNAL;
+		new_timer->sigq->info.si_signo = SIGALRM;
+		memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t));
+		new_timer->sigq->info.si_value.sival_int = new_timer->it_id;
 		new_timer->it_pid = get_pid(task_tgid(current));
 	}
 
-	new_timer->it_sigev_notify     = event.sigev_notify;
-	new_timer->sigq->info.si_signo = event.sigev_signo;
-	new_timer->sigq->info.si_value = event.sigev_value;
 	new_timer->sigq->info.si_tid   = new_timer->it_id;
 	new_timer->sigq->info.si_code  = SI_TIMER;
 
@@ -684,6 +569,36 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
 	return error;
 }
 
+SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+		struct sigevent __user *, timer_event_spec,
+		timer_t __user *, created_timer_id)
+{
+	if (timer_event_spec) {
+		sigevent_t event;
+
+		if (copy_from_user(&event, timer_event_spec, sizeof (event)))
+			return -EFAULT;
+		return do_timer_create(which_clock, &event, created_timer_id);
+	}
+	return do_timer_create(which_clock, NULL, created_timer_id);
+}
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
+		       struct compat_sigevent __user *, timer_event_spec,
+		       timer_t __user *, created_timer_id)
+{
+	if (timer_event_spec) {
+		sigevent_t event;
+
+		if (get_compat_sigevent(&event, timer_event_spec))
+			return -EFAULT;
+		return do_timer_create(which_clock, &event, created_timer_id);
+	}
+	return do_timer_create(which_clock, NULL, created_timer_id);
+}
+#endif
+
 /*
  * Locking issues: We need to protect the result of the id look up until
  * we get the timer locked down so it is not deleted under us.  The
@@ -717,6 +632,20 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
 	return NULL;
 }
 
+static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
+{
+	struct hrtimer *timer = &timr->it.real.timer;
+
+	return __hrtimer_expires_remaining_adjusted(timer, now);
+}
+
+static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
+{
+	struct hrtimer *timer = &timr->it.real.timer;
+
+	return (int)hrtimer_forward(timer, now, timr->it_interval);
+}
+
 /*
  * Get the time remaining on a POSIX.1b interval timer.  This function
  * is ALWAYS called with spin_lock_irq on the timer, thus it must not
@@ -733,55 +662,61 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
  * it is the same as a requeue pending timer WRT to what we should
  * report.
  */
-static void
-common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
+void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
 {
+	const struct k_clock *kc = timr->kclock;
 	ktime_t now, remaining, iv;
-	struct hrtimer *timer = &timr->it.real.timer;
+	struct timespec64 ts64;
+	bool sig_none;
 
-	memset(cur_setting, 0, sizeof(*cur_setting));
-
-	iv = timr->it.real.interval;
+	sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+	iv = timr->it_interval;
 
 	/* interval timer ? */
-	if (iv)
+	if (iv) {
 		cur_setting->it_interval = ktime_to_timespec64(iv);
-	else if (!hrtimer_active(timer) &&
-		 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
-		return;
-
-	now = timer->base->get_time();
+	} else if (!timr->it_active) {
+		/*
+		 * SIGEV_NONE oneshot timers are never queued. Check them
+		 * below.
+		 */
+		if (!sig_none)
+			return;
+	}
 
 	/*
-	 * When a requeue is pending or this is a SIGEV_NONE
-	 * timer move the expiry time forward by intervals, so
-	 * expiry is > now.
+	 * The timespec64 based conversion is suboptimal, but it's not
+	 * worth to implement yet another callback.
 	 */
-	if (iv && (timr->it_requeue_pending & REQUEUE_PENDING ||
-		   (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
-		timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
+	kc->clock_get(timr->it_clock, &ts64);
+	now = timespec64_to_ktime(ts64);
 
-	remaining = __hrtimer_expires_remaining_adjusted(timer, now);
+	/*
+	 * When a requeue is pending or this is a SIGEV_NONE timer move the
+	 * expiry time forward by intervals, so expiry is > now.
+	 */
+	if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
+		timr->it_overrun += kc->timer_forward(timr, now);
+
+	remaining = kc->timer_remaining(timr, now);
 	/* Return 0 only, when the timer is expired and not pending */
 	if (remaining <= 0) {
 		/*
 		 * A single shot SIGEV_NONE timer must return 0, when
 		 * it is expired !
 		 */
-		if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
+		if (!sig_none)
 			cur_setting->it_value.tv_nsec = 1;
-	} else
+	} else {
 		cur_setting->it_value = ktime_to_timespec64(remaining);
+	}
 }
 
 /* Get the time remaining on a POSIX.1b interval timer. */
-SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
-		struct itimerspec __user *, setting)
+static int do_timer_gettime(timer_t timer_id,  struct itimerspec64 *setting)
 {
-	struct itimerspec64 cur_setting64;
-	struct itimerspec cur_setting;
 	struct k_itimer *timr;
-	struct k_clock *kc;
+	const struct k_clock *kc;
 	unsigned long flags;
 	int ret = 0;
 
@@ -789,28 +724,57 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
 	if (!timr)
 		return -EINVAL;
 
-	kc = clockid_to_kclock(timr->it_clock);
+	memset(setting, 0, sizeof(*setting));
+	kc = timr->kclock;
 	if (WARN_ON_ONCE(!kc || !kc->timer_get))
 		ret = -EINVAL;
 	else
-		kc->timer_get(timr, &cur_setting64);
+		kc->timer_get(timr, setting);
 
 	unlock_timer(timr, flags);
-
-	cur_setting = itimerspec64_to_itimerspec(&cur_setting64);
-	if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
-		return -EFAULT;
-
 	return ret;
 }
 
+/* Get the time remaining on a POSIX.1b interval timer. */
+SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
+		struct itimerspec __user *, setting)
+{
+	struct itimerspec64 cur_setting64;
+
+	int ret = do_timer_gettime(timer_id, &cur_setting64);
+	if (!ret) {
+		struct itimerspec cur_setting;
+		cur_setting = itimerspec64_to_itimerspec(&cur_setting64);
+		if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
+			ret = -EFAULT;
+	}
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
+		       struct compat_itimerspec __user *, setting)
+{
+	struct itimerspec64 cur_setting64;
+
+	int ret = do_timer_gettime(timer_id, &cur_setting64);
+	if (!ret) {
+		struct itimerspec cur_setting;
+		cur_setting = itimerspec64_to_itimerspec(&cur_setting64);
+		if (put_compat_itimerspec(setting, &cur_setting))
+			ret = -EFAULT;
+	}
+	return ret;
+}
+#endif
+
 /*
  * Get the number of overruns of a POSIX.1b interval timer.  This is to
  * be the overrun of the timer last delivered.  At the same time we are
  * accumulating overruns on the next timer.  The overrun is frozen when
  * the signal is delivered, either at the notify time (if the info block
  * is not queued) or at the actual delivery time (as we are informed by
- * the call back to do_schedule_next_timer().  So all we need to do is
+ * the call back to posixtimer_rearm().  So all we need to do is
  * to pick up the frozen overrun.
  */
 SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
@@ -829,55 +793,113 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
 	return overrun;
 }
 
-/* Set a POSIX.1b interval timer. */
-/* timr->it_lock is taken. */
-static int
-common_timer_set(struct k_itimer *timr, int flags,
-		 struct itimerspec64 *new_setting, struct itimerspec64 *old_setting)
+static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
+			       bool absolute, bool sigev_none)
 {
 	struct hrtimer *timer = &timr->it.real.timer;
 	enum hrtimer_mode mode;
 
-	if (old_setting)
-		common_timer_get(timr, old_setting);
-
-	/* disable the timer */
-	timr->it.real.interval = 0;
+	mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
 	/*
-	 * careful here.  If smp we could be in the "fire" routine which will
-	 * be spinning as we hold the lock.  But this is ONLY an SMP issue.
+	 * Posix magic: Relative CLOCK_REALTIME timers are not affected by
+	 * clock modifications, so they become CLOCK_MONOTONIC based under the
+	 * hood. See hrtimer_init(). Update timr->kclock, so the generic
+	 * functions which use timr->kclock->clock_get() work.
+	 *
+	 * Note: it_clock stays unmodified, because the next timer_set() might
+	 * use ABSTIME, so it needs to switch back.
 	 */
-	if (hrtimer_try_to_cancel(timer) < 0)
-		return TIMER_RETRY;
+	if (timr->it_clock == CLOCK_REALTIME)
+		timr->kclock = absolute ? &clock_realtime : &clock_monotonic;
 
-	timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 
-		~REQUEUE_PENDING;
-	timr->it_overrun_last = 0;
-
-	/* switch off the timer when it_value is zero */
-	if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
-		return 0;
-
-	mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
 	hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
 	timr->it.real.timer.function = posix_timer_fn;
 
-	hrtimer_set_expires(timer, timespec64_to_ktime(new_setting->it_value));
+	if (!absolute)
+		expires = ktime_add_safe(expires, timer->base->get_time());
+	hrtimer_set_expires(timer, expires);
 
-	/* Convert interval */
-	timr->it.real.interval = timespec64_to_ktime(new_setting->it_interval);
+	if (!sigev_none)
+		hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+}
 
-	/* SIGEV_NONE timers are not queued ! See common_timer_get */
-	if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
-		/* Setup correct expiry time for relative timers */
-		if (mode == HRTIMER_MODE_REL) {
-			hrtimer_add_expires(timer, timer->base->get_time());
-		}
+static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
+{
+	return hrtimer_try_to_cancel(&timr->it.real.timer);
+}
+
+/* Set a POSIX.1b interval timer. */
+int common_timer_set(struct k_itimer *timr, int flags,
+		     struct itimerspec64 *new_setting,
+		     struct itimerspec64 *old_setting)
+{
+	const struct k_clock *kc = timr->kclock;
+	bool sigev_none;
+	ktime_t expires;
+
+	if (old_setting)
+		common_timer_get(timr, old_setting);
+
+	/* Prevent rearming by clearing the interval */
+	timr->it_interval = 0;
+	/*
+	 * Careful here. On SMP systems the timer expiry function could be
+	 * active and spinning on timr->it_lock.
+	 */
+	if (kc->timer_try_to_cancel(timr) < 0)
+		return TIMER_RETRY;
+
+	timr->it_active = 0;
+	timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
+		~REQUEUE_PENDING;
+	timr->it_overrun_last = 0;
+
+	/* Switch off the timer when it_value is zero */
+	if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
 		return 0;
+
+	timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
+	expires = timespec64_to_ktime(new_setting->it_value);
+	sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+
+	kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
+	timr->it_active = !sigev_none;
+	return 0;
+}
+
+static int do_timer_settime(timer_t timer_id, int flags,
+			    struct itimerspec64 *new_spec64,
+			    struct itimerspec64 *old_spec64)
+{
+	const struct k_clock *kc;
+	struct k_itimer *timr;
+	unsigned long flag;
+	int error = 0;
+
+	if (!timespec64_valid(&new_spec64->it_interval) ||
+	    !timespec64_valid(&new_spec64->it_value))
+		return -EINVAL;
+
+	if (old_spec64)
+		memset(old_spec64, 0, sizeof(*old_spec64));
+retry:
+	timr = lock_timer(timer_id, &flag);
+	if (!timr)
+		return -EINVAL;
+
+	kc = timr->kclock;
+	if (WARN_ON_ONCE(!kc || !kc->timer_set))
+		error = -EINVAL;
+	else
+		error = kc->timer_set(timr, flags, new_spec64, old_spec64);
+
+	unlock_timer(timr, flag);
+	if (error == TIMER_RETRY) {
+		old_spec64 = NULL;	// We already got the old time...
+		goto retry;
 	}
 
-	hrtimer_start_expires(timer, mode);
-	return 0;
+	return error;
 }
 
 /* Set a POSIX.1b interval timer */
@@ -887,10 +909,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
 {
 	struct itimerspec64 new_spec64, old_spec64;
 	struct itimerspec64 *rtn = old_setting ? &old_spec64 : NULL;
-	struct itimerspec new_spec, old_spec;
-	struct k_itimer *timr;
-	unsigned long flag;
-	struct k_clock *kc;
+	struct itimerspec new_spec;
 	int error = 0;
 
 	if (!new_setting)
@@ -900,46 +919,57 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
 		return -EFAULT;
 	new_spec64 = itimerspec_to_itimerspec64(&new_spec);
 
-	if (!timespec64_valid(&new_spec64.it_interval) ||
-	    !timespec64_valid(&new_spec64.it_value))
-		return -EINVAL;
-retry:
-	timr = lock_timer(timer_id, &flag);
-	if (!timr)
-		return -EINVAL;
-
-	kc = clockid_to_kclock(timr->it_clock);
-	if (WARN_ON_ONCE(!kc || !kc->timer_set))
-		error = -EINVAL;
-	else
-		error = kc->timer_set(timr, flags, &new_spec64, rtn);
-
-	unlock_timer(timr, flag);
-	if (error == TIMER_RETRY) {
-		rtn = NULL;	// We already got the old time...
-		goto retry;
+	error = do_timer_settime(timer_id, flags, &new_spec64, rtn);
+	if (!error && old_setting) {
+		struct itimerspec old_spec;
+		old_spec = itimerspec64_to_itimerspec(&old_spec64);
+		if (copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
+			error = -EFAULT;
 	}
-
-	old_spec = itimerspec64_to_itimerspec(&old_spec64);
-	if (old_setting && !error &&
-	    copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-		error = -EFAULT;
-
 	return error;
 }
 
-static int common_timer_del(struct k_itimer *timer)
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
+		       struct compat_itimerspec __user *, new,
+		       struct compat_itimerspec __user *, old)
 {
-	timer->it.real.interval = 0;
+	struct itimerspec64 new_spec64, old_spec64;
+	struct itimerspec64 *rtn = old ? &old_spec64 : NULL;
+	struct itimerspec new_spec;
+	int error = 0;
 
-	if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
+	if (!new)
+		return -EINVAL;
+	if (get_compat_itimerspec(&new_spec, new))
+		return -EFAULT;
+
+	new_spec64 = itimerspec_to_itimerspec64(&new_spec);
+	error = do_timer_settime(timer_id, flags, &new_spec64, rtn);
+	if (!error && old) {
+		struct itimerspec old_spec;
+		old_spec = itimerspec64_to_itimerspec(&old_spec64);
+		if (put_compat_itimerspec(old, &old_spec))
+			error = -EFAULT;
+	}
+	return error;
+}
+#endif
+
+int common_timer_del(struct k_itimer *timer)
+{
+	const struct k_clock *kc = timer->kclock;
+
+	timer->it_interval = 0;
+	if (kc->timer_try_to_cancel(timer) < 0)
 		return TIMER_RETRY;
+	timer->it_active = 0;
 	return 0;
 }
 
 static inline int timer_delete_hook(struct k_itimer *timer)
 {
-	struct k_clock *kc = clockid_to_kclock(timer->it_clock);
+	const struct k_clock *kc = timer->kclock;
 
 	if (WARN_ON_ONCE(!kc || !kc->timer_del))
 		return -EINVAL;
@@ -1018,7 +1048,7 @@ void exit_itimers(struct signal_struct *sig)
 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
 		const struct timespec __user *, tp)
 {
-	struct k_clock *kc = clockid_to_kclock(which_clock);
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
 	struct timespec64 new_tp64;
 	struct timespec new_tp;
 
@@ -1035,7 +1065,7 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
 		struct timespec __user *,tp)
 {
-	struct k_clock *kc = clockid_to_kclock(which_clock);
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
 	struct timespec64 kernel_tp64;
 	struct timespec kernel_tp;
 	int error;
@@ -1055,7 +1085,7 @@ SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
 SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
 		struct timex __user *, utx)
 {
-	struct k_clock *kc = clockid_to_kclock(which_clock);
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
 	struct timex ktx;
 	int err;
 
@@ -1078,7 +1108,7 @@ SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
 		struct timespec __user *, tp)
 {
-	struct k_clock *kc = clockid_to_kclock(which_clock);
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
 	struct timespec64 rtn_tp64;
 	struct timespec rtn_tp;
 	int error;
@@ -1095,13 +1125,98 @@ SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
 	return error;
 }
 
+#ifdef CONFIG_COMPAT
+
+COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
+		       struct compat_timespec __user *, tp)
+{
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
+	struct timespec64 new_tp64;
+	struct timespec new_tp;
+
+	if (!kc || !kc->clock_set)
+		return -EINVAL;
+
+	if (compat_get_timespec(&new_tp, tp))
+		return -EFAULT;
+
+	new_tp64 = timespec_to_timespec64(new_tp);
+
+	return kc->clock_set(which_clock, &new_tp64);
+}
+
+COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
+		       struct compat_timespec __user *, tp)
+{
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
+	struct timespec64 kernel_tp64;
+	struct timespec kernel_tp;
+	int error;
+
+	if (!kc)
+		return -EINVAL;
+
+	error = kc->clock_get(which_clock, &kernel_tp64);
+	kernel_tp = timespec64_to_timespec(kernel_tp64);
+
+	if (!error && compat_put_timespec(&kernel_tp, tp))
+		error = -EFAULT;
+
+	return error;
+}
+
+COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
+		       struct compat_timex __user *, utp)
+{
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
+	struct timex ktx;
+	int err;
+
+	if (!kc)
+		return -EINVAL;
+	if (!kc->clock_adj)
+		return -EOPNOTSUPP;
+
+	err = compat_get_timex(&ktx, utp);
+	if (err)
+		return err;
+
+	err = kc->clock_adj(which_clock, &ktx);
+
+	if (err >= 0)
+		err = compat_put_timex(utp, &ktx);
+
+	return err;
+}
+
+COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
+		       struct compat_timespec __user *, tp)
+{
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
+	struct timespec64 rtn_tp64;
+	struct timespec rtn_tp;
+	int error;
+
+	if (!kc)
+		return -EINVAL;
+
+	error = kc->clock_getres(which_clock, &rtn_tp64);
+	rtn_tp = timespec64_to_timespec(rtn_tp64);
+
+	if (!error && tp && compat_put_timespec(&rtn_tp, tp))
+		error = -EFAULT;
+
+	return error;
+}
+#endif
+
 /*
  * nanosleep for monotonic and realtime clocks
  */
 static int common_nsleep(const clockid_t which_clock, int flags,
-			 struct timespec64 *tsave, struct timespec __user *rmtp)
+			 const struct timespec64 *rqtp)
 {
-	return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
+	return hrtimer_nanosleep(rqtp, flags & TIMER_ABSTIME ?
 				 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
 				 which_clock);
 }
@@ -1110,7 +1225,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
 		const struct timespec __user *, rqtp,
 		struct timespec __user *, rmtp)
 {
-	struct k_clock *kc = clockid_to_kclock(which_clock);
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
 	struct timespec64 t64;
 	struct timespec t;
 
@@ -1125,21 +1240,141 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
 	t64 = timespec_to_timespec64(t);
 	if (!timespec64_valid(&t64))
 		return -EINVAL;
+	if (flags & TIMER_ABSTIME)
+		rmtp = NULL;
+	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
+	current->restart_block.nanosleep.rmtp = rmtp;
 
-	return kc->nsleep(which_clock, flags, &t64, rmtp);
+	return kc->nsleep(which_clock, flags, &t64);
 }
 
-/*
- * This will restart clock_nanosleep. This is required only by
- * compat_clock_nanosleep_restart for now.
- */
-long clock_nanosleep_restart(struct restart_block *restart_block)
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
+		       struct compat_timespec __user *, rqtp,
+		       struct compat_timespec __user *, rmtp)
 {
-	clockid_t which_clock = restart_block->nanosleep.clockid;
-	struct k_clock *kc = clockid_to_kclock(which_clock);
+	const struct k_clock *kc = clockid_to_kclock(which_clock);
+	struct timespec64 t64;
+	struct timespec t;
 
-	if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
+	if (!kc)
 		return -EINVAL;
+	if (!kc->nsleep)
+		return -ENANOSLEEP_NOTSUP;
 
-	return kc->nsleep_restart(restart_block);
+	if (compat_get_timespec(&t, rqtp))
+		return -EFAULT;
+
+	t64 = timespec_to_timespec64(t);
+	if (!timespec64_valid(&t64))
+		return -EINVAL;
+	if (flags & TIMER_ABSTIME)
+		rmtp = NULL;
+	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
+	current->restart_block.nanosleep.compat_rmtp = rmtp;
+
+	return kc->nsleep(which_clock, flags, &t64);
+}
+#endif
+
+static const struct k_clock clock_realtime = {
+	.clock_getres		= posix_get_hrtimer_res,
+	.clock_get		= posix_clock_realtime_get,
+	.clock_set		= posix_clock_realtime_set,
+	.clock_adj		= posix_clock_realtime_adj,
+	.nsleep			= common_nsleep,
+	.timer_create		= common_timer_create,
+	.timer_set		= common_timer_set,
+	.timer_get		= common_timer_get,
+	.timer_del		= common_timer_del,
+	.timer_rearm		= common_hrtimer_rearm,
+	.timer_forward		= common_hrtimer_forward,
+	.timer_remaining	= common_hrtimer_remaining,
+	.timer_try_to_cancel	= common_hrtimer_try_to_cancel,
+	.timer_arm		= common_hrtimer_arm,
+};
+
+static const struct k_clock clock_monotonic = {
+	.clock_getres		= posix_get_hrtimer_res,
+	.clock_get		= posix_ktime_get_ts,
+	.nsleep			= common_nsleep,
+	.timer_create		= common_timer_create,
+	.timer_set		= common_timer_set,
+	.timer_get		= common_timer_get,
+	.timer_del		= common_timer_del,
+	.timer_rearm		= common_hrtimer_rearm,
+	.timer_forward		= common_hrtimer_forward,
+	.timer_remaining	= common_hrtimer_remaining,
+	.timer_try_to_cancel	= common_hrtimer_try_to_cancel,
+	.timer_arm		= common_hrtimer_arm,
+};
+
+static const struct k_clock clock_monotonic_raw = {
+	.clock_getres		= posix_get_hrtimer_res,
+	.clock_get		= posix_get_monotonic_raw,
+};
+
+static const struct k_clock clock_realtime_coarse = {
+	.clock_getres		= posix_get_coarse_res,
+	.clock_get		= posix_get_realtime_coarse,
+};
+
+static const struct k_clock clock_monotonic_coarse = {
+	.clock_getres		= posix_get_coarse_res,
+	.clock_get		= posix_get_monotonic_coarse,
+};
+
+static const struct k_clock clock_tai = {
+	.clock_getres		= posix_get_hrtimer_res,
+	.clock_get		= posix_get_tai,
+	.nsleep			= common_nsleep,
+	.timer_create		= common_timer_create,
+	.timer_set		= common_timer_set,
+	.timer_get		= common_timer_get,
+	.timer_del		= common_timer_del,
+	.timer_rearm		= common_hrtimer_rearm,
+	.timer_forward		= common_hrtimer_forward,
+	.timer_remaining	= common_hrtimer_remaining,
+	.timer_try_to_cancel	= common_hrtimer_try_to_cancel,
+	.timer_arm		= common_hrtimer_arm,
+};
+
+static const struct k_clock clock_boottime = {
+	.clock_getres		= posix_get_hrtimer_res,
+	.clock_get		= posix_get_boottime,
+	.nsleep			= common_nsleep,
+	.timer_create		= common_timer_create,
+	.timer_set		= common_timer_set,
+	.timer_get		= common_timer_get,
+	.timer_del		= common_timer_del,
+	.timer_rearm		= common_hrtimer_rearm,
+	.timer_forward		= common_hrtimer_forward,
+	.timer_remaining	= common_hrtimer_remaining,
+	.timer_try_to_cancel	= common_hrtimer_try_to_cancel,
+	.timer_arm		= common_hrtimer_arm,
+};
+
+static const struct k_clock * const posix_clocks[] = {
+	[CLOCK_REALTIME]		= &clock_realtime,
+	[CLOCK_MONOTONIC]		= &clock_monotonic,
+	[CLOCK_PROCESS_CPUTIME_ID]	= &clock_process,
+	[CLOCK_THREAD_CPUTIME_ID]	= &clock_thread,
+	[CLOCK_MONOTONIC_RAW]		= &clock_monotonic_raw,
+	[CLOCK_REALTIME_COARSE]		= &clock_realtime_coarse,
+	[CLOCK_MONOTONIC_COARSE]	= &clock_monotonic_coarse,
+	[CLOCK_BOOTTIME]		= &clock_boottime,
+	[CLOCK_REALTIME_ALARM]		= &alarm_clock,
+	[CLOCK_BOOTTIME_ALARM]		= &alarm_clock,
+	[CLOCK_TAI]			= &clock_tai,
+};
+
+static const struct k_clock *clockid_to_kclock(const clockid_t id)
+{
+	if (id < 0)
+		return (id & CLOCKFD_MASK) == CLOCKFD ?
+			&clock_posix_dynamic : &clock_posix_cpu;
+
+	if (id >= ARRAY_SIZE(posix_clocks) || !posix_clocks[id])
+		return NULL;
+	return posix_clocks[id];
 }
diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h
new file mode 100644
index 0000000..fb303c3
--- /dev/null
+++ b/kernel/time/posix-timers.h
@@ -0,0 +1,40 @@
+#define TIMER_RETRY 1
+
+struct k_clock {
+	int	(*clock_getres)(const clockid_t which_clock,
+				struct timespec64 *tp);
+	int	(*clock_set)(const clockid_t which_clock,
+			     const struct timespec64 *tp);
+	int	(*clock_get)(const clockid_t which_clock,
+			     struct timespec64 *tp);
+	int	(*clock_adj)(const clockid_t which_clock, struct timex *tx);
+	int	(*timer_create)(struct k_itimer *timer);
+	int	(*nsleep)(const clockid_t which_clock, int flags,
+			  const struct timespec64 *);
+	int	(*timer_set)(struct k_itimer *timr, int flags,
+			     struct itimerspec64 *new_setting,
+			     struct itimerspec64 *old_setting);
+	int	(*timer_del)(struct k_itimer *timr);
+	void	(*timer_get)(struct k_itimer *timr,
+			     struct itimerspec64 *cur_setting);
+	void	(*timer_rearm)(struct k_itimer *timr);
+	int	(*timer_forward)(struct k_itimer *timr, ktime_t now);
+	ktime_t	(*timer_remaining)(struct k_itimer *timr, ktime_t now);
+	int	(*timer_try_to_cancel)(struct k_itimer *timr);
+	void	(*timer_arm)(struct k_itimer *timr, ktime_t expires,
+			     bool absolute, bool sigev_none);
+};
+
+extern const struct k_clock clock_posix_cpu;
+extern const struct k_clock clock_posix_dynamic;
+extern const struct k_clock clock_process;
+extern const struct k_clock clock_thread;
+extern const struct k_clock alarm_clock;
+
+int posix_timer_event(struct k_itimer *timr, int si_private);
+
+void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting);
+int common_timer_set(struct k_itimer *timr, int flags,
+		     struct itimerspec64 *new_setting,
+		     struct itimerspec64 *old_setting);
+int common_timer_del(struct k_itimer *timer);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 987e496..b398c2e 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -37,9 +37,11 @@ static int tick_broadcast_forced;
 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
 
 #ifdef CONFIG_TICK_ONESHOT
+static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
 static void tick_broadcast_clear_oneshot(int cpu);
 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
 #else
+static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
 static inline void tick_broadcast_clear_oneshot(int cpu) { }
 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
 #endif
@@ -867,7 +869,7 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
 /**
  * tick_broadcast_setup_oneshot - setup the broadcast device
  */
-void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
+static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
 	int cpu = smp_processor_id();
 
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index f738251..be0ac01 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -126,7 +126,6 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
 
 /* Functions related to oneshot broadcasting */
 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
-extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
 extern void tick_broadcast_switch_to_oneshot(void);
 extern void tick_shutdown_broadcast_oneshot(unsigned int cpu);
 extern int tick_broadcast_oneshot_active(void);
@@ -134,7 +133,6 @@ extern void tick_check_oneshot_broadcast_this_cpu(void);
 bool tick_broadcast_oneshot_available(void);
 extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
 #else /* !(BROADCAST && ONESHOT): */
-static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
 static inline void tick_broadcast_switch_to_oneshot(void) { }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 64c97fc..c7a899c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -150,6 +150,12 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 		touch_softlockup_watchdog_sched();
 		if (is_idle_task(current))
 			ts->idle_jiffies++;
+		/*
+		 * In case the current tick fired too early past its expected
+		 * expiration, make sure we don't bypass the next clock reprogramming
+		 * to the same deadline.
+		 */
+		ts->next_tick = 0;
 	}
 #endif
 	update_process_times(user_mode(regs));
@@ -554,7 +560,7 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
 	update_ts_time_stats(smp_processor_id(), ts, now, NULL);
 	ts->idle_active = 0;
 
-	sched_clock_idle_wakeup_event(0);
+	sched_clock_idle_wakeup_event();
 }
 
 static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
@@ -660,6 +666,12 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
 		hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
 	else
 		tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
+
+	/*
+	 * Reset to make sure next tick stop doesn't get fooled by past
+	 * cached clock deadline.
+	 */
+	ts->next_tick = 0;
 }
 
 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
@@ -701,8 +713,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 	 */
 	delta = next_tick - basemono;
 	if (delta <= (u64)TICK_NSEC) {
-		tick = 0;
-
 		/*
 		 * Tell the timer code that the base is not idle, i.e. undo
 		 * the effect of get_next_timer_interrupt():
@@ -712,23 +722,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 		 * We've not stopped the tick yet, and there's a timer in the
 		 * next period, so no point in stopping it either, bail.
 		 */
-		if (!ts->tick_stopped)
-			goto out;
-
-		/*
-		 * If, OTOH, we did stop it, but there's a pending (expired)
-		 * timer reprogram the timer hardware to fire now.
-		 *
-		 * We will not restart the tick proper, just prod the timer
-		 * hardware into firing an interrupt to process the pending
-		 * timers. Just like tick_irq_exit() will not restart the tick
-		 * for 'normal' interrupts.
-		 *
-		 * Only once we exit the idle loop will we re-enable the tick,
-		 * see tick_nohz_idle_exit().
-		 */
-		if (delta == 0) {
-			tick_nohz_restart(ts, now);
+		if (!ts->tick_stopped) {
+			tick = 0;
 			goto out;
 		}
 	}
@@ -771,8 +766,16 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 	tick = expires;
 
 	/* Skip reprogram of event if its not changed */
-	if (ts->tick_stopped && (expires == dev->next_event))
-		goto out;
+	if (ts->tick_stopped && (expires == ts->next_tick)) {
+		/* Sanity check: make sure clockevent is actually programmed */
+		if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
+			goto out;
+
+		WARN_ON_ONCE(1);
+		printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
+			    basemono, ts->next_tick, dev->next_event,
+			    hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
+	}
 
 	/*
 	 * nohz_stop_sched_tick can be called several times before
@@ -782,8 +785,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 	 * the scheduler tick in nohz_restart_sched_tick.
 	 */
 	if (!ts->tick_stopped) {
-		nohz_balance_enter_idle(cpu);
-		calc_load_enter_idle();
+		calc_load_nohz_start();
 		cpu_load_update_nohz_start();
 
 		ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
@@ -791,6 +793,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 		trace_tick_stop(1, TICK_DEP_MASK_NONE);
 	}
 
+	ts->next_tick = tick;
+
 	/*
 	 * If the expiration time == KTIME_MAX, then we simply stop
 	 * the tick timer.
@@ -801,12 +805,17 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 		goto out;
 	}
 
+	hrtimer_set_expires(&ts->sched_timer, tick);
+
 	if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
-		hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
+		hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
 	else
 		tick_program_event(tick, 1);
 out:
-	/* Update the estimated sleep length */
+	/*
+	 * Update the estimated sleep length until the next timer
+	 * (not only the tick).
+	 */
 	ts->sleep_length = ktime_sub(dev->next_event, now);
 	return tick;
 }
@@ -823,7 +832,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
 	 */
 	timer_clear_idle();
 
-	calc_load_exit_idle();
+	calc_load_nohz_stop();
 	touch_softlockup_watchdog_sched();
 	/*
 	 * Cancel the scheduled timer and restore the tick
@@ -864,6 +873,11 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
 	if (unlikely(!cpu_online(cpu))) {
 		if (cpu == tick_do_timer_cpu)
 			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+		/*
+		 * Make sure the CPU doesn't get fooled by obsolete tick
+		 * deadline if it comes back online later.
+		 */
+		ts->next_tick = 0;
 		return false;
 	}
 
@@ -923,8 +937,10 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
 			ts->idle_expires = expires;
 		}
 
-		if (!was_stopped && ts->tick_stopped)
+		if (!was_stopped && ts->tick_stopped) {
 			ts->idle_jiffies = ts->last_jiffies;
+			nohz_balance_enter_idle(cpu);
+		}
 	}
 }
 
@@ -1172,6 +1188,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
 	 */
 	if (regs)
 		tick_sched_handle(ts, regs);
+	else
+		ts->next_tick = 0;
 
 	/* No need to reprogram if we are in idle or full dynticks mode */
 	if (unlikely(ts->tick_stopped))
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index bf38226..075444e 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -27,6 +27,7 @@ enum tick_nohz_mode {
  *			timer is modified for nohz sleeps. This is necessary
  *			to resume the tick timer operation in the timeline
  *			when the CPU returns from nohz sleep.
+ * @next_tick:		Next tick to be fired when in dynticks mode.
  * @tick_stopped:	Indicator that the idle tick has been stopped
  * @idle_jiffies:	jiffies at the entry to idle for idle time accounting
  * @idle_calls:		Total number of idle calls
@@ -44,6 +45,7 @@ struct tick_sched {
 	unsigned long			check_clocks;
 	enum tick_nohz_mode		nohz_mode;
 	ktime_t				last_tick;
+	ktime_t				next_tick;
 	int				inidle;
 	int				tick_stopped;
 	unsigned long			idle_jiffies;
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 49c73c6..7c89e43 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -39,6 +39,7 @@
 #include <linux/ptrace.h>
 
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 #include <asm/unistd.h>
 
 #include <generated/timeconst.h>
@@ -99,6 +100,47 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
 
 #endif /* __ARCH_WANT_SYS_TIME */
 
+#ifdef CONFIG_COMPAT
+#ifdef __ARCH_WANT_COMPAT_SYS_TIME
+
+/* compat_time_t is a 32 bit "long" and needs to get converted. */
+COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
+{
+	struct timeval tv;
+	compat_time_t i;
+
+	do_gettimeofday(&tv);
+	i = tv.tv_sec;
+
+	if (tloc) {
+		if (put_user(i,tloc))
+			return -EFAULT;
+	}
+	force_successful_syscall_return();
+	return i;
+}
+
+COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
+{
+	struct timespec tv;
+	int err;
+
+	if (get_user(tv.tv_sec, tptr))
+		return -EFAULT;
+
+	tv.tv_nsec = 0;
+
+	err = security_settime(&tv, NULL);
+	if (err)
+		return err;
+
+	do_settimeofday(&tv);
+	return 0;
+}
+
+#endif /* __ARCH_WANT_COMPAT_SYS_TIME */
+#endif
+
 SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
 		struct timezone __user *, tz)
 {
@@ -215,6 +257,47 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
 	return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
+		       struct timezone __user *, tz)
+{
+	if (tv) {
+		struct timeval ktv;
+
+		do_gettimeofday(&ktv);
+		if (compat_put_timeval(&ktv, tv))
+			return -EFAULT;
+	}
+	if (tz) {
+		if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
+		       struct timezone __user *, tz)
+{
+	struct timespec64 new_ts;
+	struct timeval user_tv;
+	struct timezone new_tz;
+
+	if (tv) {
+		if (compat_get_timeval(&user_tv, tv))
+			return -EFAULT;
+		new_ts.tv_sec = user_tv.tv_sec;
+		new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
+	}
+	if (tz) {
+		if (copy_from_user(&new_tz, tz, sizeof(*tz)))
+			return -EFAULT;
+	}
+
+	return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
+}
+#endif
+
 SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
 {
 	struct timex txc;		/* Local copy of parameter */
@@ -224,12 +307,33 @@ SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
 	 * structure. But bear in mind that the structures
 	 * may change
 	 */
-	if(copy_from_user(&txc, txc_p, sizeof(struct timex)))
+	if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
 		return -EFAULT;
 	ret = do_adjtimex(&txc);
 	return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
 }
 
+#ifdef CONFIG_COMPAT
+
+COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
+{
+	struct timex txc;
+	int err, ret;
+
+	err = compat_get_timex(&txc, utp);
+	if (err)
+		return err;
+
+	ret = do_adjtimex(&txc);
+
+	err = compat_put_timex(utp, &txc);
+	if (err)
+		return err;
+
+	return ret;
+}
+#endif
+
 /*
  * Convert jiffies to milliseconds and back.
  *
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 9652bc5..cedafa0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -72,6 +72,10 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
 		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
 		tk->xtime_sec++;
 	}
+	while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
+		tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+		tk->raw_sec++;
+	}
 }
 
 static inline struct timespec64 tk_xtime(struct timekeeper *tk)
@@ -118,6 +122,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
 	tk->offs_boot = ktime_add(tk->offs_boot, delta);
 }
 
+/*
+ * tk_clock_read - atomic clocksource read() helper
+ *
+ * This helper is necessary to use in the read paths because, while the
+ * seqlock ensures we don't return a bad value while structures are updated,
+ * it doesn't protect from potential crashes. There is the possibility that
+ * the tkr's clocksource may change between the read reference, and the
+ * clock reference passed to the read function.  This can cause crashes if
+ * the wrong clocksource is passed to the wrong read function.
+ * This isn't necessary to use when holding the timekeeper_lock or doing
+ * a read of the fast-timekeeper tkrs (which is protected by its own locking
+ * and update logic).
+ */
+static inline u64 tk_clock_read(struct tk_read_base *tkr)
+{
+	struct clocksource *clock = READ_ONCE(tkr->clock);
+
+	return clock->read(clock);
+}
+
 #ifdef CONFIG_DEBUG_TIMEKEEPING
 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
 
@@ -175,7 +199,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
 	 */
 	do {
 		seq = read_seqcount_begin(&tk_core.seq);
-		now = tkr->read(tkr->clock);
+		now = tk_clock_read(tkr);
 		last = tkr->cycle_last;
 		mask = tkr->mask;
 		max = tkr->clock->max_cycles;
@@ -209,7 +233,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
 	u64 cycle_now, delta;
 
 	/* read clocksource */
-	cycle_now = tkr->read(tkr->clock);
+	cycle_now = tk_clock_read(tkr);
 
 	/* calculate the delta since the last update_wall_time */
 	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -238,12 +262,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 	++tk->cs_was_changed_seq;
 	old_clock = tk->tkr_mono.clock;
 	tk->tkr_mono.clock = clock;
-	tk->tkr_mono.read = clock->read;
 	tk->tkr_mono.mask = clock->mask;
-	tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
+	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
 
 	tk->tkr_raw.clock = clock;
-	tk->tkr_raw.read = clock->read;
 	tk->tkr_raw.mask = clock->mask;
 	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
 
@@ -262,17 +284,19 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 	/* Go back from cycles -> shifted ns */
 	tk->xtime_interval = interval * clock->mult;
 	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
-	tk->raw_interval = (interval * clock->mult) >> clock->shift;
+	tk->raw_interval = interval * clock->mult;
 
 	 /* if changing clocks, convert xtime_nsec shift units */
 	if (old_clock) {
 		int shift_change = clock->shift - old_clock->shift;
-		if (shift_change < 0)
+		if (shift_change < 0) {
 			tk->tkr_mono.xtime_nsec >>= -shift_change;
-		else
+			tk->tkr_raw.xtime_nsec >>= -shift_change;
+		} else {
 			tk->tkr_mono.xtime_nsec <<= shift_change;
+			tk->tkr_raw.xtime_nsec <<= shift_change;
+		}
 	}
-	tk->tkr_raw.xtime_nsec = 0;
 
 	tk->tkr_mono.shift = clock->shift;
 	tk->tkr_raw.shift = clock->shift;
@@ -404,7 +428,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
 
 		now += timekeeping_delta_to_ns(tkr,
 				clocksource_delta(
-					tkr->read(tkr->clock),
+					tk_clock_read(tkr),
 					tkr->cycle_last,
 					tkr->mask));
 	} while (read_seqcount_retry(&tkf->seq, seq));
@@ -461,6 +485,10 @@ static u64 dummy_clock_read(struct clocksource *cs)
 	return cycles_at_suspend;
 }
 
+static struct clocksource dummy_clock = {
+	.read = dummy_clock_read,
+};
+
 /**
  * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
  * @tk: Timekeeper to snapshot.
@@ -477,17 +505,18 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
 	struct tk_read_base *tkr = &tk->tkr_mono;
 
 	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
-	cycles_at_suspend = tkr->read(tkr->clock);
-	tkr_dummy.read = dummy_clock_read;
+	cycles_at_suspend = tk_clock_read(tkr);
+	tkr_dummy.clock = &dummy_clock;
 	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
 
 	tkr = &tk->tkr_raw;
 	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
-	tkr_dummy.read = dummy_clock_read;
+	tkr_dummy.clock = &dummy_clock;
 	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 }
 
 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
+#warning Please contact your maintainers, as GENERIC_TIME_VSYSCALL_OLD compatibity will disappear soon.
 
 static inline void update_vsyscall(struct timekeeper *tk)
 {
@@ -597,9 +626,6 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
 	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
 	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
 
-	/* Update the monotonic raw base */
-	tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
-
 	/*
 	 * The sum of the nanoseconds portions of xtime and
 	 * wall_to_monotonic can be greater/equal one second. Take
@@ -609,6 +635,11 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
 	if (nsec >= NSEC_PER_SEC)
 		seconds++;
 	tk->ktime_sec = seconds;
+
+	/* Update the monotonic raw base */
+	seconds = tk->raw_sec;
+	nsec = (u32)(tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift);
+	tk->tkr_raw.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
 }
 
 /* must hold timekeeper_lock */
@@ -649,11 +680,9 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
  */
 static void timekeeping_forward_now(struct timekeeper *tk)
 {
-	struct clocksource *clock = tk->tkr_mono.clock;
 	u64 cycle_now, delta;
-	u64 nsec;
 
-	cycle_now = tk->tkr_mono.read(clock);
+	cycle_now = tk_clock_read(&tk->tkr_mono);
 	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 	tk->tkr_mono.cycle_last = cycle_now;
 	tk->tkr_raw.cycle_last  = cycle_now;
@@ -663,10 +692,13 @@ static void timekeeping_forward_now(struct timekeeper *tk)
 	/* If arch requires, add in get_arch_timeoffset() */
 	tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
 
-	tk_normalize_xtime(tk);
 
-	nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
-	timespec64_add_ns(&tk->raw_time, nsec);
+	tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
+
+	/* If arch requires, add in get_arch_timeoffset() */
+	tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
+
+	tk_normalize_xtime(tk);
 }
 
 /**
@@ -929,8 +961,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
 
 	do {
 		seq = read_seqcount_begin(&tk_core.seq);
-
-		now = tk->tkr_mono.read(tk->tkr_mono.clock);
+		now = tk_clock_read(&tk->tkr_mono);
 		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
 		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
 		base_real = ktime_add(tk->tkr_mono.base,
@@ -1108,7 +1139,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
 		 * Check whether the system counter value provided by the
 		 * device driver is on the current timekeeping interval.
 		 */
-		now = tk->tkr_mono.read(tk->tkr_mono.clock);
+		now = tk_clock_read(&tk->tkr_mono);
 		interval_start = tk->tkr_mono.cycle_last;
 		if (!cycle_between(interval_start, cycles, now)) {
 			clock_was_set_seq = tk->clock_was_set_seq;
@@ -1353,19 +1384,18 @@ int timekeeping_notify(struct clocksource *clock)
 void getrawmonotonic64(struct timespec64 *ts)
 {
 	struct timekeeper *tk = &tk_core.timekeeper;
-	struct timespec64 ts64;
 	unsigned long seq;
 	u64 nsecs;
 
 	do {
 		seq = read_seqcount_begin(&tk_core.seq);
+		ts->tv_sec = tk->raw_sec;
 		nsecs = timekeeping_get_ns(&tk->tkr_raw);
-		ts64 = tk->raw_time;
 
 	} while (read_seqcount_retry(&tk_core.seq, seq));
 
-	timespec64_add_ns(&ts64, nsecs);
-	*ts = ts64;
+	ts->tv_nsec = 0;
+	timespec64_add_ns(ts, nsecs);
 }
 EXPORT_SYMBOL(getrawmonotonic64);
 
@@ -1489,8 +1519,7 @@ void __init timekeeping_init(void)
 	tk_setup_internals(tk, clock);
 
 	tk_set_xtime(tk, &now);
-	tk->raw_time.tv_sec = 0;
-	tk->raw_time.tv_nsec = 0;
+	tk->raw_sec = 0;
 	if (boot.tv_sec == 0 && boot.tv_nsec == 0)
 		boot = tk_xtime(tk);
 
@@ -1629,7 +1658,7 @@ void timekeeping_resume(void)
 	 * The less preferred source will only be tried if there is no better
 	 * usable source. The rtc part is handled separately in rtc core code.
 	 */
-	cycle_now = tk->tkr_mono.read(clock);
+	cycle_now = tk_clock_read(&tk->tkr_mono);
 	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
 		cycle_now > tk->tkr_mono.cycle_last) {
 		u64 nsec, cyc_delta;
@@ -1976,7 +2005,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
 				    u32 shift, unsigned int *clock_set)
 {
 	u64 interval = tk->cycle_interval << shift;
-	u64 raw_nsecs;
+	u64 snsec_per_sec;
 
 	/* If the offset is smaller than a shifted interval, do nothing */
 	if (offset < interval)
@@ -1991,14 +2020,12 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
 	*clock_set |= accumulate_nsecs_to_secs(tk);
 
 	/* Accumulate raw time */
-	raw_nsecs = (u64)tk->raw_interval << shift;
-	raw_nsecs += tk->raw_time.tv_nsec;
-	if (raw_nsecs >= NSEC_PER_SEC) {
-		u64 raw_secs = raw_nsecs;
-		raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
-		tk->raw_time.tv_sec += raw_secs;
+	tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
+	snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
+	while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
+		tk->tkr_raw.xtime_nsec -= snsec_per_sec;
+		tk->raw_sec++;
 	}
-	tk->raw_time.tv_nsec = raw_nsecs;
 
 	/* Accumulate error between NTP and clock interval */
 	tk->ntp_error += tk->ntp_tick << shift;
@@ -2030,7 +2057,7 @@ void update_wall_time(void)
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 	offset = real_tk->cycle_interval;
 #else
-	offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
+	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
 				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 #endif
 
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 152a706..71ce3f4 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -195,7 +195,7 @@ EXPORT_SYMBOL(jiffies_64);
 #endif
 
 struct timer_base {
-	spinlock_t		lock;
+	raw_spinlock_t		lock;
 	struct timer_list	*running_timer;
 	unsigned long		clk;
 	unsigned long		next_expiry;
@@ -913,10 +913,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
 
 		if (!(tf & TIMER_MIGRATING)) {
 			base = get_timer_base(tf);
-			spin_lock_irqsave(&base->lock, *flags);
+			raw_spin_lock_irqsave(&base->lock, *flags);
 			if (timer->flags == tf)
 				return base;
-			spin_unlock_irqrestore(&base->lock, *flags);
+			raw_spin_unlock_irqrestore(&base->lock, *flags);
 		}
 		cpu_relax();
 	}
@@ -986,9 +986,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 			/* See the comment in lock_timer_base() */
 			timer->flags |= TIMER_MIGRATING;
 
-			spin_unlock(&base->lock);
+			raw_spin_unlock(&base->lock);
 			base = new_base;
-			spin_lock(&base->lock);
+			raw_spin_lock(&base->lock);
 			WRITE_ONCE(timer->flags,
 				   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
 		}
@@ -1013,7 +1013,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 	}
 
 out_unlock:
-	spin_unlock_irqrestore(&base->lock, flags);
+	raw_spin_unlock_irqrestore(&base->lock, flags);
 
 	return ret;
 }
@@ -1106,16 +1106,16 @@ void add_timer_on(struct timer_list *timer, int cpu)
 	if (base != new_base) {
 		timer->flags |= TIMER_MIGRATING;
 
-		spin_unlock(&base->lock);
+		raw_spin_unlock(&base->lock);
 		base = new_base;
-		spin_lock(&base->lock);
+		raw_spin_lock(&base->lock);
 		WRITE_ONCE(timer->flags,
 			   (timer->flags & ~TIMER_BASEMASK) | cpu);
 	}
 
 	debug_activate(timer, timer->expires);
 	internal_add_timer(base, timer);
-	spin_unlock_irqrestore(&base->lock, flags);
+	raw_spin_unlock_irqrestore(&base->lock, flags);
 }
 EXPORT_SYMBOL_GPL(add_timer_on);
 
@@ -1141,7 +1141,7 @@ int del_timer(struct timer_list *timer)
 	if (timer_pending(timer)) {
 		base = lock_timer_base(timer, &flags);
 		ret = detach_if_pending(timer, base, true);
-		spin_unlock_irqrestore(&base->lock, flags);
+		raw_spin_unlock_irqrestore(&base->lock, flags);
 	}
 
 	return ret;
@@ -1150,7 +1150,7 @@ EXPORT_SYMBOL(del_timer);
 
 /**
  * try_to_del_timer_sync - Try to deactivate a timer
- * @timer: timer do del
+ * @timer: timer to delete
  *
  * This function tries to deactivate a timer. Upon successful (ret >= 0)
  * exit the timer is not queued and the handler is not running on any CPU.
@@ -1168,7 +1168,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
 	if (base->running_timer != timer)
 		ret = detach_if_pending(timer, base, true);
 
-	spin_unlock_irqrestore(&base->lock, flags);
+	raw_spin_unlock_irqrestore(&base->lock, flags);
 
 	return ret;
 }
@@ -1299,13 +1299,13 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
 		data = timer->data;
 
 		if (timer->flags & TIMER_IRQSAFE) {
-			spin_unlock(&base->lock);
+			raw_spin_unlock(&base->lock);
 			call_timer_fn(timer, fn, data);
-			spin_lock(&base->lock);
+			raw_spin_lock(&base->lock);
 		} else {
-			spin_unlock_irq(&base->lock);
+			raw_spin_unlock_irq(&base->lock);
 			call_timer_fn(timer, fn, data);
-			spin_lock_irq(&base->lock);
+			raw_spin_lock_irq(&base->lock);
 		}
 	}
 }
@@ -1474,7 +1474,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 	if (cpu_is_offline(smp_processor_id()))
 		return expires;
 
-	spin_lock(&base->lock);
+	raw_spin_lock(&base->lock);
 	nextevt = __next_timer_interrupt(base);
 	is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
 	base->next_expiry = nextevt;
@@ -1502,7 +1502,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 		if ((expires - basem) > TICK_NSEC)
 			base->is_idle = true;
 	}
-	spin_unlock(&base->lock);
+	raw_spin_unlock(&base->lock);
 
 	return cmp_next_hrtimer_event(basem, expires);
 }
@@ -1590,7 +1590,7 @@ static inline void __run_timers(struct timer_base *base)
 	if (!time_after_eq(jiffies, base->clk))
 		return;
 
-	spin_lock_irq(&base->lock);
+	raw_spin_lock_irq(&base->lock);
 
 	while (time_after_eq(jiffies, base->clk)) {
 
@@ -1601,7 +1601,7 @@ static inline void __run_timers(struct timer_base *base)
 			expire_timers(base, heads + levels);
 	}
 	base->running_timer = NULL;
-	spin_unlock_irq(&base->lock);
+	raw_spin_unlock_irq(&base->lock);
 }
 
 /*
@@ -1786,16 +1786,16 @@ int timers_dead_cpu(unsigned int cpu)
 		 * The caller is globally serialized and nobody else
 		 * takes two locks at once, deadlock is not possible.
 		 */
-		spin_lock_irq(&new_base->lock);
-		spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+		raw_spin_lock_irq(&new_base->lock);
+		raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
 		BUG_ON(old_base->running_timer);
 
 		for (i = 0; i < WHEEL_SIZE; i++)
 			migrate_timer_list(new_base, old_base->vectors + i);
 
-		spin_unlock(&old_base->lock);
-		spin_unlock_irq(&new_base->lock);
+		raw_spin_unlock(&old_base->lock);
+		raw_spin_unlock_irq(&new_base->lock);
 		put_cpu_ptr(&timer_bases);
 	}
 	return 0;
@@ -1811,7 +1811,7 @@ static void __init init_timer_cpu(int cpu)
 	for (i = 0; i < NR_BASES; i++) {
 		base = per_cpu_ptr(&timer_bases[i], cpu);
 		base->cpu = cpu;
-		spin_lock_init(&base->lock);
+		raw_spin_lock_init(&base->lock);
 		base->clk = jiffies;
 	}
 }
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 193c5f5..bc364f8 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -867,7 +867,7 @@ static void blk_add_trace_split(void *ignore,
 
 		__blk_add_trace(bt, bio->bi_iter.bi_sector,
 				bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
-				BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
+				BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
 				&rpdu);
 	}
 }
@@ -900,7 +900,7 @@ static void blk_add_trace_bio_remap(void *ignore,
 	r.sector_from = cpu_to_be64(from);
 
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-			bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_error,
+			bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
 			sizeof(r), &r);
 }
 
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9e5841d..b308be3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4337,9 +4337,6 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
 
 	command = strsep(&next, ":");
 
-	if (WARN_ON_ONCE(!tr))
-		return -EINVAL;
-
 	mutex_lock(&ftrace_cmd_mutex);
 	list_for_each_entry(p, &ftrace_commands, list) {
 		if (strcmp(p->name, command) == 0) {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1122f15..091e801 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6881,6 +6881,9 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
 	char *number;
 	int ret;
 
+	if (!tr)
+		return -ENODEV;
+
 	/* hash funcs only work with set_ftrace_filter */
 	if (!enable)
 		return -EINVAL;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index a3bddbf..a0910c0 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -654,6 +654,9 @@ ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
 {
 	struct ftrace_probe_ops *ops;
 
+	if (!tr)
+		return -ENODEV;
+
 	/* we register both traceon and traceoff to this callback */
 	if (strcmp(cmd, "traceon") == 0)
 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
@@ -670,6 +673,9 @@ ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
 {
 	struct ftrace_probe_ops *ops;
 
+	if (!tr)
+		return -ENODEV;
+
 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
 
 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
@@ -682,6 +688,9 @@ ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
 {
 	struct ftrace_probe_ops *ops;
 
+	if (!tr)
+		return -ENODEV;
+
 	ops = &dump_probe_ops;
 
 	/* Only dump once. */
@@ -695,6 +704,9 @@ ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
 {
 	struct ftrace_probe_ops *ops;
 
+	if (!tr)
+		return -ENODEV;
+
 	ops = &cpudump_probe_ops;
 
 	/* Only dump once. */
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index c129fca..b53c8d3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -707,20 +707,16 @@ static int create_trace_kprobe(int argc, char **argv)
 		pr_info("Probe point is not specified.\n");
 		return -EINVAL;
 	}
-	if (isdigit(argv[1][0])) {
-		/* an address specified */
-		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
-		if (ret) {
-			pr_info("Failed to parse address.\n");
-			return ret;
-		}
-	} else {
+
+	/* try to parse an address. if that fails, try to read the
+	 * input as a symbol. */
+	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
 		/* a symbol specified */
 		symbol = argv[1];
 		/* TODO: support .init module functions */
 		ret = traceprobe_split_symbol_offset(symbol, &offset);
 		if (ret) {
-			pr_info("Failed to parse symbol.\n");
+			pr_info("Failed to parse either an address or a symbol.\n");
 			return ret;
 		}
 		if (offset && is_return &&
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 76aa04d..b4a751e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -409,7 +409,9 @@ static const struct file_operations stack_trace_fops = {
 static int
 stack_trace_filter_open(struct inode *inode, struct file *file)
 {
-	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
+	struct ftrace_ops *ops = inode->i_private;
+
+	return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
 				 inode, file);
 }
 
@@ -476,7 +478,7 @@ static __init int stack_trace_init(void)
 			NULL, &stack_trace_fops);
 
 	trace_create_file("stack_trace_filter", 0444, d_tracer,
-			NULL, &stack_trace_filter_fops);
+			  &trace_ops, &stack_trace_filter_fops);
 
 	if (stack_trace_filter_buf[0])
 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c74bf39..a86688fa 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2864,11 +2864,11 @@ bool flush_work(struct work_struct *work)
 EXPORT_SYMBOL_GPL(flush_work);
 
 struct cwt_wait {
-	wait_queue_t		wait;
+	wait_queue_entry_t		wait;
 	struct work_struct	*work;
 };
 
-static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 {
 	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
 
diff --git a/lib/Kconfig b/lib/Kconfig
index 0c8b78a..d2fd262 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -158,6 +158,14 @@
 
 endchoice
 
+config CRC4
+	tristate "CRC4 functions"
+	help
+	  This option is provided for the case where no in-kernel-tree
+	  modules require CRC4 functions, but a module built outside
+	  the kernel tree does. Such modules that use library CRC4
+	  functions require M here.
+
 config CRC7
 	tristate "CRC7 functions"
 	help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e4587eb..ca9460f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -286,7 +286,7 @@
 	  write to these files.
 
 	  For detailed documentation on the debugfs API, see
-	  Documentation/DocBook/filesystems.
+	  Documentation/filesystems/.
 
 	  If unsure, say N.
 
@@ -1052,6 +1052,7 @@
 	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
 	select DEBUG_SPINLOCK
 	select DEBUG_MUTEXES
+	select DEBUG_RT_MUTEXES if RT_MUTEXES
 	select LOCKDEP
 	help
 	 This feature will check whether any held lock (spinlock, rwlock,
@@ -1067,6 +1068,7 @@
 	select LOCKDEP
 	select DEBUG_SPINLOCK
 	select DEBUG_MUTEXES
+	select DEBUG_RT_MUTEXES if RT_MUTEXES
 	select DEBUG_LOCK_ALLOC
 	select TRACE_IRQFLAGS
 	default n
@@ -1121,6 +1123,7 @@
 	select LOCKDEP
 	select DEBUG_SPINLOCK
 	select DEBUG_MUTEXES
+	select DEBUG_RT_MUTEXES if RT_MUTEXES
 	select DEBUG_LOCK_ALLOC
 	default n
 	help
@@ -1301,189 +1304,7 @@
 
 	  If unsure, say N.
 
-menu "RCU Debugging"
-
-config PROVE_RCU
-	def_bool PROVE_LOCKING
-
-config PROVE_RCU_REPEATEDLY
-	bool "RCU debugging: don't disable PROVE_RCU on first splat"
-	depends on PROVE_RCU
-	default n
-	help
-	 By itself, PROVE_RCU will disable checking upon issuing the
-	 first warning (or "splat").  This feature prevents such
-	 disabling, allowing multiple RCU-lockdep warnings to be printed
-	 on a single reboot.
-
-	 Say Y to allow multiple RCU-lockdep warnings per boot.
-
-	 Say N if you are unsure.
-
-config SPARSE_RCU_POINTER
-	bool "RCU debugging: sparse-based checks for pointer usage"
-	default n
-	help
-	 This feature enables the __rcu sparse annotation for
-	 RCU-protected pointers.  This annotation will cause sparse
-	 to flag any non-RCU used of annotated pointers.  This can be
-	 helpful when debugging RCU usage.  Please note that this feature
-	 is not intended to enforce code cleanliness; it is instead merely
-	 a debugging aid.
-
-	 Say Y to make sparse flag questionable use of RCU-protected pointers
-
-	 Say N if you are unsure.
-
-config TORTURE_TEST
-	tristate
-	default n
-
-config RCU_PERF_TEST
-	tristate "performance tests for RCU"
-	depends on DEBUG_KERNEL
-	select TORTURE_TEST
-	select SRCU
-	select TASKS_RCU
-	default n
-	help
-	  This option provides a kernel module that runs performance
-	  tests on the RCU infrastructure.  The kernel module may be built
-	  after the fact on the running kernel to be tested, if desired.
-
-	  Say Y here if you want RCU performance tests to be built into
-	  the kernel.
-	  Say M if you want the RCU performance tests to build as a module.
-	  Say N if you are unsure.
-
-config RCU_TORTURE_TEST
-	tristate "torture tests for RCU"
-	depends on DEBUG_KERNEL
-	select TORTURE_TEST
-	select SRCU
-	select TASKS_RCU
-	default n
-	help
-	  This option provides a kernel module that runs torture tests
-	  on the RCU infrastructure.  The kernel module may be built
-	  after the fact on the running kernel to be tested, if desired.
-
-	  Say Y here if you want RCU torture tests to be built into
-	  the kernel.
-	  Say M if you want the RCU torture tests to build as a module.
-	  Say N if you are unsure.
-
-config RCU_TORTURE_TEST_SLOW_PREINIT
-	bool "Slow down RCU grace-period pre-initialization to expose races"
-	depends on RCU_TORTURE_TEST
-	help
-	  This option delays grace-period pre-initialization (the
-	  propagation of CPU-hotplug changes up the rcu_node combining
-	  tree) for a few jiffies between initializing each pair of
-	  consecutive rcu_node structures.  This helps to expose races
-	  involving grace-period pre-initialization, in other words, it
-	  makes your kernel less stable.  It can also greatly increase
-	  grace-period latency, especially on systems with large numbers
-	  of CPUs.  This is useful when torture-testing RCU, but in
-	  almost no other circumstance.
-
-	  Say Y here if you want your system to crash and hang more often.
-	  Say N if you want a sane system.
-
-config RCU_TORTURE_TEST_SLOW_PREINIT_DELAY
-	int "How much to slow down RCU grace-period pre-initialization"
-	range 0 5
-	default 3
-	depends on RCU_TORTURE_TEST_SLOW_PREINIT
-	help
-	  This option specifies the number of jiffies to wait between
-	  each rcu_node structure pre-initialization step.
-
-config RCU_TORTURE_TEST_SLOW_INIT
-	bool "Slow down RCU grace-period initialization to expose races"
-	depends on RCU_TORTURE_TEST
-	help
-	  This option delays grace-period initialization for a few
-	  jiffies between initializing each pair of consecutive
-	  rcu_node structures.	This helps to expose races involving
-	  grace-period initialization, in other words, it makes your
-	  kernel less stable.  It can also greatly increase grace-period
-	  latency, especially on systems with large numbers of CPUs.
-	  This is useful when torture-testing RCU, but in almost no
-	  other circumstance.
-
-	  Say Y here if you want your system to crash and hang more often.
-	  Say N if you want a sane system.
-
-config RCU_TORTURE_TEST_SLOW_INIT_DELAY
-	int "How much to slow down RCU grace-period initialization"
-	range 0 5
-	default 3
-	depends on RCU_TORTURE_TEST_SLOW_INIT
-	help
-	  This option specifies the number of jiffies to wait between
-	  each rcu_node structure initialization.
-
-config RCU_TORTURE_TEST_SLOW_CLEANUP
-	bool "Slow down RCU grace-period cleanup to expose races"
-	depends on RCU_TORTURE_TEST
-	help
-	  This option delays grace-period cleanup for a few jiffies
-	  between cleaning up each pair of consecutive rcu_node
-	  structures.  This helps to expose races involving grace-period
-	  cleanup, in other words, it makes your kernel less stable.
-	  It can also greatly increase grace-period latency, especially
-	  on systems with large numbers of CPUs.  This is useful when
-	  torture-testing RCU, but in almost no other circumstance.
-
-	  Say Y here if you want your system to crash and hang more often.
-	  Say N if you want a sane system.
-
-config RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY
-	int "How much to slow down RCU grace-period cleanup"
-	range 0 5
-	default 3
-	depends on RCU_TORTURE_TEST_SLOW_CLEANUP
-	help
-	  This option specifies the number of jiffies to wait between
-	  each rcu_node structure cleanup operation.
-
-config RCU_CPU_STALL_TIMEOUT
-	int "RCU CPU stall timeout in seconds"
-	depends on RCU_STALL_COMMON
-	range 3 300
-	default 21
-	help
-	  If a given RCU grace period extends more than the specified
-	  number of seconds, a CPU stall warning is printed.  If the
-	  RCU grace period persists, additional CPU stall warnings are
-	  printed at more widely spaced intervals.
-
-config RCU_TRACE
-	bool "Enable tracing for RCU"
-	depends on DEBUG_KERNEL
-	default y if TREE_RCU
-	select TRACE_CLOCK
-	help
-	  This option provides tracing in RCU which presents stats
-	  in debugfs for debugging RCU implementation.  It also enables
-	  additional tracepoints for ftrace-style event tracing.
-
-	  Say Y here if you want to enable RCU tracing
-	  Say N if you are unsure.
-
-config RCU_EQS_DEBUG
-	bool "Provide debugging asserts for adding NO_HZ support to an arch"
-	depends on DEBUG_KERNEL
-	help
-	  This option provides consistency checks in RCU's handling of
-	  NO_HZ.  These checks have proven quite helpful in detecting
-	  bugs in arch-specific NO_HZ code.
-
-	  Say N here if you need ultimate kernel/user switch latencies
-	  Say Y if you are unsure
-
-endmenu # "RCU Debugging"
+source "kernel/rcu/Kconfig.debug"
 
 config DEBUG_WQ_FORCE_RR_CPU
 	bool "Force round-robin CPU selection for unbound work items"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 533f912..ab4ff0e 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -13,7 +13,7 @@
 	  CONFIG_FRAME_POINTER to aid in producing more reliable stack
 	  backtraces in the external debugger.  Documentation of
 	  kernel debugger is available at http://kgdb.sourceforge.net
-	  as well as in DocBook form in Documentation/DocBook/.  If
+	  as well as in Documentation/dev-tools/kgdb.rst.  If
 	  unsure, say N.
 
 if KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 0166fbc..7fb6ab7 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,9 +25,6 @@
 	 earlycpio.o seq_buf.o siphash.o \
 	 nmi_backtrace.o nodemask.o win_minmax.o
 
-CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER
-CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER
-
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
 lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
@@ -99,6 +96,7 @@
 obj-$(CONFIG_CRC_ITU_T)	+= crc-itu-t.o
 obj-$(CONFIG_CRC32)	+= crc32.o
 obj-$(CONFIG_CRC32_SELFTEST)	+= crc32test.o
+obj-$(CONFIG_CRC4)	+= crc4.o
 obj-$(CONFIG_CRC7)	+= crc7.o
 obj-$(CONFIG_LIBCRC32C)	+= libcrc32c.o
 obj-$(CONFIG_CRC8)	+= crc8.o
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 3c6432df..4c0888c 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -23,14 +23,14 @@
  *	the values[M, M+1, ..., N] into the ints array in get_options.
  */
 
-static int get_range(char **str, int *pint)
+static int get_range(char **str, int *pint, int n)
 {
 	int x, inc_counter, upper_range;
 
 	(*str)++;
 	upper_range = simple_strtol((*str), NULL, 0);
 	inc_counter = upper_range - *pint;
-	for (x = *pint; x < upper_range; x++)
+	for (x = *pint; n && x < upper_range; x++, n--)
 		*pint++ = x;
 	return inc_counter;
 }
@@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints)
 			break;
 		if (res == 3) {
 			int range_nums;
-			range_nums = get_range((char **)&str, ints + i);
+			range_nums = get_range((char **)&str, ints + i, nints - i);
 			if (range_nums < 0)
 				break;
 			/*
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 81dedaa..4731a08 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
 }
 EXPORT_SYMBOL(cpumask_any_but);
 
+/**
+ * cpumask_next_wrap - helper to implement for_each_cpu_wrap
+ * @n: the cpu prior to the place to search
+ * @mask: the cpumask pointer
+ * @start: the start point of the iteration
+ * @wrap: assume @n crossing @start terminates the iteration
+ *
+ * Returns >= nr_cpu_ids on completion
+ *
+ * Note: the @wrap argument is required for the start condition when
+ * we cannot assume @start is set in @mask.
+ */
+int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+	int next;
+
+again:
+	next = cpumask_next(n, mask);
+
+	if (wrap && n < start && next >= start) {
+		return nr_cpumask_bits;
+
+	} else if (next >= nr_cpumask_bits) {
+		wrap = true;
+		n = -1;
+		goto again;
+	}
+
+	return next;
+}
+EXPORT_SYMBOL(cpumask_next_wrap);
+
 /* These are not inline because of header tangles. */
 #ifdef CONFIG_CPUMASK_OFFSTACK
 /**
diff --git a/lib/crc4.c b/lib/crc4.c
new file mode 100644
index 0000000..cf6db46
--- /dev/null
+++ b/lib/crc4.c
@@ -0,0 +1,46 @@
+/*
+ * crc4.c - simple crc-4 calculations.
+ *
+ * This source code is licensed under the GNU General Public License, Version
+ * 2. See the file COPYING for more details.
+ */
+
+#include <linux/crc4.h>
+#include <linux/module.h>
+
+static const uint8_t crc4_tab[] = {
+	0x0, 0x7, 0xe, 0x9, 0xb, 0xc, 0x5, 0x2,
+	0x1, 0x6, 0xf, 0x8, 0xa, 0xd, 0x4, 0x3,
+};
+
+/**
+ * crc4 - calculate the 4-bit crc of a value.
+ * @crc:  starting crc4
+ * @x:    value to checksum
+ * @bits: number of bits in @x to checksum
+ *
+ * Returns the crc4 value of @x, using polynomial 0b10111.
+ *
+ * The @x value is treated as left-aligned, and bits above @bits are ignored
+ * in the crc calculations.
+ */
+uint8_t crc4(uint8_t c, uint64_t x, int bits)
+{
+	int i;
+
+	/* mask off anything above the top bit */
+	x &= (1ull << bits) - 1;
+
+	/* Align to 4-bits */
+	bits = (bits + 3) & ~0x3;
+
+	/* Calculate crc4 over four-bit nibbles, starting at the MSbit */
+	for (i = bits - 4; i >= 0; i -= 4)
+		c = crc4_tab[c ^ ((x >> i) & 0xf)];
+
+	return c;
+}
+EXPORT_SYMBOL_GPL(crc4);
+
+MODULE_DESCRIPTION("CRC4 calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9a2b811..719c155 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -23,6 +23,8 @@
 #include <linux/socket.h>
 #include <linux/skbuff.h>
 #include <linux/netlink.h>
+#include <linux/uuid.h>
+#include <linux/ctype.h>
 #include <net/sock.h>
 #include <net/net_namespace.h>
 
@@ -52,19 +54,13 @@ static const char *kobject_actions[] = {
 	[KOBJ_OFFLINE] =	"offline",
 };
 
-/**
- * kobject_action_type - translate action string to numeric type
- *
- * @buf: buffer containing the action string, newline is ignored
- * @count: length of buffer
- * @type: pointer to the location to store the action type
- *
- * Returns 0 if the action string was recognized.
- */
-int kobject_action_type(const char *buf, size_t count,
-			enum kobject_action *type)
+static int kobject_action_type(const char *buf, size_t count,
+			       enum kobject_action *type,
+			       const char **args)
 {
 	enum kobject_action action;
+	size_t count_first;
+	const char *args_start;
 	int ret = -EINVAL;
 
 	if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
@@ -73,11 +69,20 @@ int kobject_action_type(const char *buf, size_t count,
 	if (!count)
 		goto out;
 
+	args_start = strnchr(buf, count, ' ');
+	if (args_start) {
+		count_first = args_start - buf;
+		args_start = args_start + 1;
+	} else
+		count_first = count;
+
 	for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) {
-		if (strncmp(kobject_actions[action], buf, count) != 0)
+		if (strncmp(kobject_actions[action], buf, count_first) != 0)
 			continue;
-		if (kobject_actions[action][count] != '\0')
+		if (kobject_actions[action][count_first] != '\0')
 			continue;
+		if (args)
+			*args = args_start;
 		*type = action;
 		ret = 0;
 		break;
@@ -86,6 +91,142 @@ int kobject_action_type(const char *buf, size_t count,
 	return ret;
 }
 
+static const char *action_arg_word_end(const char *buf, const char *buf_end,
+				       char delim)
+{
+	const char *next = buf;
+
+	while (next <= buf_end && *next != delim)
+		if (!isalnum(*next++))
+			return NULL;
+
+	if (next == buf)
+		return NULL;
+
+	return next;
+}
+
+static int kobject_action_args(const char *buf, size_t count,
+			       struct kobj_uevent_env **ret_env)
+{
+	struct kobj_uevent_env *env = NULL;
+	const char *next, *buf_end, *key;
+	int key_len;
+	int r = -EINVAL;
+
+	if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0'))
+		count--;
+
+	if (!count)
+		return -EINVAL;
+
+	env = kzalloc(sizeof(*env), GFP_KERNEL);
+	if (!env)
+		return -ENOMEM;
+
+	/* first arg is UUID */
+	if (count < UUID_STRING_LEN || !uuid_is_valid(buf) ||
+	    add_uevent_var(env, "SYNTH_UUID=%.*s", UUID_STRING_LEN, buf))
+		goto out;
+
+	/*
+	 * the rest are custom environment variables in KEY=VALUE
+	 * format with ' ' delimiter between each KEY=VALUE pair
+	 */
+	next = buf + UUID_STRING_LEN;
+	buf_end = buf + count - 1;
+
+	while (next <= buf_end) {
+		if (*next != ' ')
+			goto out;
+
+		/* skip the ' ', key must follow */
+		key = ++next;
+		if (key > buf_end)
+			goto out;
+
+		buf = next;
+		next = action_arg_word_end(buf, buf_end, '=');
+		if (!next || next > buf_end || *next != '=')
+			goto out;
+		key_len = next - buf;
+
+		/* skip the '=', value must follow */
+		if (++next > buf_end)
+			goto out;
+
+		buf = next;
+		next = action_arg_word_end(buf, buf_end, ' ');
+		if (!next)
+			goto out;
+
+		if (add_uevent_var(env, "SYNTH_ARG_%.*s=%.*s",
+				   key_len, key, (int) (next - buf), buf))
+			goto out;
+	}
+
+	r = 0;
+out:
+	if (r)
+		kfree(env);
+	else
+		*ret_env = env;
+	return r;
+}
+
+/**
+ * kobject_synth_uevent - send synthetic uevent with arguments
+ *
+ * @kobj: struct kobject for which synthetic uevent is to be generated
+ * @buf: buffer containing action type and action args, newline is ignored
+ * @count: length of buffer
+ *
+ * Returns 0 if kobject_synthetic_uevent() is completed with success or the
+ * corresponding error when it fails.
+ */
+int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
+{
+	char *no_uuid_envp[] = { "SYNTH_UUID=0", NULL };
+	enum kobject_action action;
+	const char *action_args;
+	struct kobj_uevent_env *env;
+	const char *msg = NULL, *devpath;
+	int r;
+
+	r = kobject_action_type(buf, count, &action, &action_args);
+	if (r) {
+		msg = "unknown uevent action string\n";
+		goto out;
+	}
+
+	if (!action_args) {
+		r = kobject_uevent_env(kobj, action, no_uuid_envp);
+		goto out;
+	}
+
+	r = kobject_action_args(action_args,
+				count - (action_args - buf), &env);
+	if (r == -EINVAL) {
+		msg = "incorrect uevent action arguments\n";
+		goto out;
+	}
+
+	if (r)
+		goto out;
+
+	r = kobject_uevent_env(kobj, action, env->envp);
+	kfree(env);
+out:
+	if (r) {
+		devpath = kobject_get_path(kobj, GFP_KERNEL);
+		printk(KERN_WARNING "synth uevent: %s: %s",
+		       devpath ?: "unknown device",
+		       msg ?: "failed to send uevent");
+		kfree(devpath);
+	}
+	return r;
+}
+
 #ifdef CONFIG_NET
 static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
 {
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 74a54b7..9f79547 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -43,7 +43,7 @@ static struct crypto_shash *tfm;
 u32 crc32c(u32 crc, const void *address, unsigned int length)
 {
 	SHASH_DESC_ON_STACK(shash, tfm);
-	u32 *ctx = (u32 *)shash_desc_ctx(shash);
+	u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
 	int err;
 
 	shash->tfm = tfm;
@@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
 	err = crypto_shash_update(shash, address, length);
 	BUG_ON(err);
 
-	return *ctx;
+	ret = *ctx;
+	barrier_data(ctx);
+	return ret;
 }
 
 EXPORT_SYMBOL(crc32c);
diff --git a/lib/locking-selftest-rtmutex.h b/lib/locking-selftest-rtmutex.h
new file mode 100644
index 0000000..e3cb839
--- /dev/null
+++ b/lib/locking-selftest-rtmutex.h
@@ -0,0 +1,11 @@
+#undef LOCK
+#define LOCK		RTL
+
+#undef UNLOCK
+#define UNLOCK		RTU
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT		RTI
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index f3a217e..6f2b135 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -21,6 +21,7 @@
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
 #include <linux/irqflags.h>
+#include <linux/rtmutex.h>
 
 /*
  * Change this to 1 if you want to see the failure printouts:
@@ -46,6 +47,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
 #define LOCKTYPE_MUTEX	0x4
 #define LOCKTYPE_RWSEM	0x8
 #define LOCKTYPE_WW	0x10
+#define LOCKTYPE_RTMUTEX 0x20
 
 static struct ww_acquire_ctx t, t2;
 static struct ww_mutex o, o2, o3;
@@ -74,6 +76,15 @@ static DECLARE_RWSEM(rwsem_B);
 static DECLARE_RWSEM(rwsem_C);
 static DECLARE_RWSEM(rwsem_D);
 
+#ifdef CONFIG_RT_MUTEXES
+
+static DEFINE_RT_MUTEX(rtmutex_A);
+static DEFINE_RT_MUTEX(rtmutex_B);
+static DEFINE_RT_MUTEX(rtmutex_C);
+static DEFINE_RT_MUTEX(rtmutex_D);
+
+#endif
+
 /*
  * Locks that we initialize dynamically as well so that
  * e.g. X1 and X2 becomes two instances of the same class,
@@ -108,6 +119,17 @@ static DECLARE_RWSEM(rwsem_Y2);
 static DECLARE_RWSEM(rwsem_Z1);
 static DECLARE_RWSEM(rwsem_Z2);
 
+#ifdef CONFIG_RT_MUTEXES
+
+static DEFINE_RT_MUTEX(rtmutex_X1);
+static DEFINE_RT_MUTEX(rtmutex_X2);
+static DEFINE_RT_MUTEX(rtmutex_Y1);
+static DEFINE_RT_MUTEX(rtmutex_Y2);
+static DEFINE_RT_MUTEX(rtmutex_Z1);
+static DEFINE_RT_MUTEX(rtmutex_Z2);
+
+#endif
+
 /*
  * non-inlined runtime initializers, to let separate locks share
  * the same lock-class:
@@ -129,6 +151,17 @@ INIT_CLASS_FUNC(Z)
 
 static void init_shared_classes(void)
 {
+#ifdef CONFIG_RT_MUTEXES
+	static struct lock_class_key rt_X, rt_Y, rt_Z;
+
+	__rt_mutex_init(&rtmutex_X1, __func__, &rt_X);
+	__rt_mutex_init(&rtmutex_X2, __func__, &rt_X);
+	__rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y);
+	__rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y);
+	__rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z);
+	__rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z);
+#endif
+
 	init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
 	init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
 
@@ -193,6 +226,10 @@ static void init_shared_classes(void)
 #define MU(x)			mutex_unlock(&mutex_##x)
 #define MI(x)			mutex_init(&mutex_##x)
 
+#define RTL(x)			rt_mutex_lock(&rtmutex_##x)
+#define RTU(x)			rt_mutex_unlock(&rtmutex_##x)
+#define RTI(x)			rt_mutex_init(&rtmutex_##x)
+
 #define WSL(x)			down_write(&rwsem_##x)
 #define WSU(x)			up_write(&rwsem_##x)
 
@@ -264,6 +301,11 @@ GENERATE_TESTCASE(AA_wsem)
 #include "locking-selftest-rsem.h"
 GENERATE_TESTCASE(AA_rsem)
 
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(AA_rtmutex);
+#endif
+
 #undef E
 
 /*
@@ -345,6 +387,11 @@ GENERATE_TESTCASE(ABBA_wsem)
 #include "locking-selftest-rsem.h"
 GENERATE_TESTCASE(ABBA_rsem)
 
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABBA_rtmutex);
+#endif
+
 #undef E
 
 /*
@@ -373,6 +420,11 @@ GENERATE_TESTCASE(ABBCCA_wsem)
 #include "locking-selftest-rsem.h"
 GENERATE_TESTCASE(ABBCCA_rsem)
 
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABBCCA_rtmutex);
+#endif
+
 #undef E
 
 /*
@@ -401,6 +453,11 @@ GENERATE_TESTCASE(ABCABC_wsem)
 #include "locking-selftest-rsem.h"
 GENERATE_TESTCASE(ABCABC_rsem)
 
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABCABC_rtmutex);
+#endif
+
 #undef E
 
 /*
@@ -430,6 +487,11 @@ GENERATE_TESTCASE(ABBCCDDA_wsem)
 #include "locking-selftest-rsem.h"
 GENERATE_TESTCASE(ABBCCDDA_rsem)
 
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABBCCDDA_rtmutex);
+#endif
+
 #undef E
 
 /*
@@ -458,6 +520,11 @@ GENERATE_TESTCASE(ABCDBDDA_wsem)
 #include "locking-selftest-rsem.h"
 GENERATE_TESTCASE(ABCDBDDA_rsem)
 
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABCDBDDA_rtmutex);
+#endif
+
 #undef E
 
 /*
@@ -486,6 +553,11 @@ GENERATE_TESTCASE(ABCDBCDA_wsem)
 #include "locking-selftest-rsem.h"
 GENERATE_TESTCASE(ABCDBCDA_rsem)
 
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABCDBCDA_rtmutex);
+#endif
+
 #undef E
 
 /*
@@ -513,33 +585,10 @@ GENERATE_TESTCASE(double_unlock_wsem)
 #include "locking-selftest-rsem.h"
 GENERATE_TESTCASE(double_unlock_rsem)
 
-#undef E
-
-/*
- * Bad unlock ordering:
- */
-#define E()					\
-						\
-	LOCK(A);				\
-	LOCK(B);				\
-	UNLOCK(A); /* fail */			\
-	UNLOCK(B);
-
-/*
- * 6 testcases:
- */
-#include "locking-selftest-spin.h"
-GENERATE_TESTCASE(bad_unlock_order_spin)
-#include "locking-selftest-wlock.h"
-GENERATE_TESTCASE(bad_unlock_order_wlock)
-#include "locking-selftest-rlock.h"
-GENERATE_TESTCASE(bad_unlock_order_rlock)
-#include "locking-selftest-mutex.h"
-GENERATE_TESTCASE(bad_unlock_order_mutex)
-#include "locking-selftest-wsem.h"
-GENERATE_TESTCASE(bad_unlock_order_wsem)
-#include "locking-selftest-rsem.h"
-GENERATE_TESTCASE(bad_unlock_order_rsem)
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(double_unlock_rtmutex);
+#endif
 
 #undef E
 
@@ -567,6 +616,11 @@ GENERATE_TESTCASE(init_held_wsem)
 #include "locking-selftest-rsem.h"
 GENERATE_TESTCASE(init_held_rsem)
 
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(init_held_rtmutex);
+#endif
+
 #undef E
 
 /*
@@ -916,6 +970,9 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
 # define I_MUTEX(x)	lockdep_reset_lock(&mutex_##x.dep_map)
 # define I_RWSEM(x)	lockdep_reset_lock(&rwsem_##x.dep_map)
 # define I_WW(x)	lockdep_reset_lock(&x.dep_map)
+#ifdef CONFIG_RT_MUTEXES
+# define I_RTMUTEX(x)	lockdep_reset_lock(&rtmutex_##x.dep_map)
+#endif
 #else
 # define I_SPINLOCK(x)
 # define I_RWLOCK(x)
@@ -924,12 +981,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
 # define I_WW(x)
 #endif
 
+#ifndef I_RTMUTEX
+# define I_RTMUTEX(x)
+#endif
+
+#ifdef CONFIG_RT_MUTEXES
+#define I2_RTMUTEX(x)	rt_mutex_init(&rtmutex_##x)
+#else
+#define I2_RTMUTEX(x)
+#endif
+
 #define I1(x)					\
 	do {					\
 		I_SPINLOCK(x);			\
 		I_RWLOCK(x);			\
 		I_MUTEX(x);			\
 		I_RWSEM(x);			\
+		I_RTMUTEX(x);			\
 	} while (0)
 
 #define I2(x)					\
@@ -938,6 +1006,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
 		rwlock_init(&rwlock_##x);	\
 		mutex_init(&mutex_##x);		\
 		init_rwsem(&rwsem_##x);		\
+		I2_RTMUTEX(x);			\
 	} while (0)
 
 static void reset_locks(void)
@@ -1013,6 +1082,12 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
 	reset_locks();
 }
 
+#ifdef CONFIG_RT_MUTEXES
+#define dotest_rt(fn, e, m)	dotest((fn), (e), (m))
+#else
+#define dotest_rt(fn, e, m)
+#endif
+
 static inline void print_testname(const char *testname)
 {
 	printk("%33s:", testname);
@@ -1050,6 +1125,7 @@ static inline void print_testname(const char *testname)
 	dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX);		\
 	dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM);		\
 	dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM);		\
+	dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX);	\
 	pr_cont("\n");
 
 #define DO_TESTCASE_6_SUCCESS(desc, name)			\
@@ -1060,6 +1136,7 @@ static inline void print_testname(const char *testname)
 	dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX);		\
 	dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM);		\
 	dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM);		\
+	dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX);	\
 	pr_cont("\n");
 
 /*
@@ -1073,6 +1150,7 @@ static inline void print_testname(const char *testname)
 	dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX);		\
 	dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM);		\
 	dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM);		\
+	dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX);	\
 	pr_cont("\n");
 
 #define DO_TESTCASE_2I(desc, name, nr)				\
@@ -1825,7 +1903,6 @@ void locking_selftest(void)
 	DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
 	DO_TESTCASE_6("double unlock", double_unlock);
 	DO_TESTCASE_6("initialize held", init_held);
-	DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order);
 
 	printk("  --------------------------------------------------------------------------\n");
 	print_testname("recursive read-lock");
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 4e8a30d..0bc0a35 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
 
 bool nmi_cpu_backtrace(struct pt_regs *regs)
 {
+	static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
 	int cpu = smp_processor_id();
 
 	if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+		arch_spin_lock(&lock);
 		if (regs && cpu_in_idle(instruction_pointer(regs))) {
 			pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
 				cpu, instruction_pointer(regs));
@@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
 			else
 				dump_stack();
 		}
+		arch_spin_unlock(&lock);
 		cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
 		return true;
 	}
diff --git a/lib/refcount.c b/lib/refcount.c
index 9f90678..5d0582a 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -37,6 +37,8 @@
 #include <linux/refcount.h>
 #include <linux/bug.h>
 
+#ifdef CONFIG_REFCOUNT_FULL
+
 /**
  * refcount_add_not_zero - add a value to a refcount unless it is 0
  * @i: the value to add to the refcount
@@ -225,6 +227,7 @@ void refcount_dec(refcount_t *r)
 	WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
 }
 EXPORT_SYMBOL(refcount_dec);
+#endif /* CONFIG_REFCOUNT_FULL */
 
 /**
  * refcount_dec_if_one - decrement a refcount if it is 1
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index c6cf822..be7b4dd 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -751,3 +751,38 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
 }
 EXPORT_SYMBOL(sg_pcopy_to_buffer);
+
+/**
+ * sg_zero_buffer - Zero-out a part of a SG list
+ * @sgl:		 The SG list
+ * @nents:		 Number of SG entries
+ * @buflen:		 The number of bytes to zero out
+ * @skip:		 Number of bytes to skip before zeroing
+ *
+ * Returns the number of bytes zeroed.
+ **/
+size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
+		       size_t buflen, off_t skip)
+{
+	unsigned int offset = 0;
+	struct sg_mapping_iter miter;
+	unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
+
+	sg_miter_start(&miter, sgl, nents, sg_flags);
+
+	if (!sg_miter_skip(&miter, skip))
+		return false;
+
+	while (offset < buflen && sg_miter_next(&miter)) {
+		unsigned int len;
+
+		len = min(miter.length, buflen - offset);
+		memset(miter.addr, 0, len);
+
+		offset += len;
+	}
+
+	sg_miter_stop(&miter);
+	return offset;
+}
+EXPORT_SYMBOL(sg_zero_buffer);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 690d75b..2fb007b 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -28,7 +28,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
 	/*
 	 * It is valid to assume CPU-locality during early bootup:
 	 */
-	if (system_state != SYSTEM_RUNNING)
+	if (system_state < SYSTEM_SCHEDULING)
 		goto out;
 
 	/*
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
index 547d312..478c049 100644
--- a/lib/test_uuid.c
+++ b/lib/test_uuid.c
@@ -11,25 +11,25 @@
 
 struct test_uuid_data {
 	const char *uuid;
-	uuid_le le;
-	uuid_be be;
+	guid_t le;
+	uuid_t be;
 };
 
 static const struct test_uuid_data test_uuid_test_data[] = {
 	{
 		.uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
-		.le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
-		.be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+		.le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+		.be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
 	},
 	{
 		.uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
-		.le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
-		.be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+		.le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+		.be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
 	},
 	{
 		.uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
-		.le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
-		.be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+		.le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+		.be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
 	},
 };
 
@@ -61,28 +61,28 @@ static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
 
 static void __init test_uuid_test(const struct test_uuid_data *data)
 {
-	uuid_le le;
-	uuid_be be;
+	guid_t le;
+	uuid_t be;
 	char buf[48];
 
 	/* LE */
 	total_tests++;
-	if (uuid_le_to_bin(data->uuid, &le))
+	if (guid_parse(data->uuid, &le))
 		test_uuid_failed("conversion", false, false, data->uuid, NULL);
 
 	total_tests++;
-	if (uuid_le_cmp(data->le, le)) {
+	if (!guid_equal(&data->le, &le)) {
 		sprintf(buf, "%pUl", &le);
 		test_uuid_failed("cmp", false, false, data->uuid, buf);
 	}
 
 	/* BE */
 	total_tests++;
-	if (uuid_be_to_bin(data->uuid, &be))
+	if (uuid_parse(data->uuid, &be))
 		test_uuid_failed("conversion", false, true, data->uuid, NULL);
 
 	total_tests++;
-	if (uuid_be_cmp(data->be, be)) {
+	if (uuid_equal(&data->be, &be)) {
 		sprintf(buf, "%pUb", &be);
 		test_uuid_failed("cmp", false, true, data->uuid, buf);
 	}
@@ -90,17 +90,17 @@ static void __init test_uuid_test(const struct test_uuid_data *data)
 
 static void __init test_uuid_wrong(const char *data)
 {
-	uuid_le le;
-	uuid_be be;
+	guid_t le;
+	uuid_t be;
 
 	/* LE */
 	total_tests++;
-	if (!uuid_le_to_bin(data, &le))
+	if (!guid_parse(data, &le))
 		test_uuid_failed("negative", true, false, data, NULL);
 
 	/* BE */
 	total_tests++;
-	if (!uuid_be_to_bin(data, &be))
+	if (!uuid_parse(data, &be))
 		test_uuid_failed("negative", true, true, data, NULL);
 }
 
diff --git a/lib/uuid.c b/lib/uuid.c
index 37687af..680b9fb 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -21,10 +21,13 @@
 #include <linux/uuid.h>
 #include <linux/random.h>
 
-const u8 uuid_le_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
-EXPORT_SYMBOL(uuid_le_index);
-const u8 uuid_be_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
-EXPORT_SYMBOL(uuid_be_index);
+const guid_t guid_null;
+EXPORT_SYMBOL(guid_null);
+const uuid_t uuid_null;
+EXPORT_SYMBOL(uuid_null);
+
+const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
+const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
 
 /***************************************************************
  * Random UUID interface
@@ -53,21 +56,21 @@ static void __uuid_gen_common(__u8 b[16])
 	b[8] = (b[8] & 0x3F) | 0x80;
 }
 
-void uuid_le_gen(uuid_le *lu)
+void guid_gen(guid_t *lu)
 {
 	__uuid_gen_common(lu->b);
 	/* version 4 : random generation */
 	lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
 }
-EXPORT_SYMBOL_GPL(uuid_le_gen);
+EXPORT_SYMBOL_GPL(guid_gen);
 
-void uuid_be_gen(uuid_be *bu)
+void uuid_gen(uuid_t *bu)
 {
 	__uuid_gen_common(bu->b);
 	/* version 4 : random generation */
 	bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
 }
-EXPORT_SYMBOL_GPL(uuid_be_gen);
+EXPORT_SYMBOL_GPL(uuid_gen);
 
 /**
   * uuid_is_valid - checks if UUID string valid
@@ -97,7 +100,7 @@ bool uuid_is_valid(const char *uuid)
 }
 EXPORT_SYMBOL(uuid_is_valid);
 
-static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
+static int __uuid_parse(const char *uuid, __u8 b[16], const u8 ei[16])
 {
 	static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34};
 	unsigned int i;
@@ -115,14 +118,14 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
 	return 0;
 }
 
-int uuid_le_to_bin(const char *uuid, uuid_le *u)
+int guid_parse(const char *uuid, guid_t *u)
 {
-	return __uuid_to_bin(uuid, u->b, uuid_le_index);
+	return __uuid_parse(uuid, u->b, guid_index);
 }
-EXPORT_SYMBOL(uuid_le_to_bin);
+EXPORT_SYMBOL(guid_parse);
 
-int uuid_be_to_bin(const char *uuid, uuid_be *u)
+int uuid_parse(const char *uuid, uuid_t *u)
 {
-	return __uuid_to_bin(uuid, u->b, uuid_be_index);
+	return __uuid_parse(uuid, u->b, uuid_index);
 }
-EXPORT_SYMBOL(uuid_be_to_bin);
+EXPORT_SYMBOL(uuid_parse);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 2d41de3..9f37d62 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1308,14 +1308,14 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
 	char uuid[UUID_STRING_LEN + 1];
 	char *p = uuid;
 	int i;
-	const u8 *index = uuid_be_index;
+	const u8 *index = uuid_index;
 	bool uc = false;
 
 	switch (*(++fmt)) {
 	case 'L':
 		uc = true;		/* fall-through */
 	case 'l':
-		index = uuid_le_index;
+		index = guid_index;
 		break;
 	case 'B':
 		uc = true;
diff --git a/mm/Kconfig b/mm/Kconfig
index beb7a45..398b460 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -137,7 +137,7 @@
 config HAVE_MEMBLOCK_PHYS_MAP
 	bool
 
-config HAVE_GENERIC_RCU_GUP
+config HAVE_GENERIC_GUP
 	bool
 
 config ARCH_DISCARD_MEMBLOCK
diff --git a/mm/cleancache.c b/mm/cleancache.c
index ba5d8f3..f7b9fdc 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -130,7 +130,7 @@ void __cleancache_init_shared_fs(struct super_block *sb)
 	int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
 
 	if (cleancache_ops) {
-		pool_id = cleancache_ops->init_shared_fs(sb->s_uuid, PAGE_SIZE);
+		pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
 		if (pool_id < 0)
 			pool_id = CLEANCACHE_NO_POOL;
 	}
diff --git a/mm/filemap.c b/mm/filemap.c
index 6f1be57..aea58e9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -376,6 +376,38 @@ int filemap_flush(struct address_space *mapping)
 }
 EXPORT_SYMBOL(filemap_flush);
 
+/**
+ * filemap_range_has_page - check if a page exists in range.
+ * @mapping:           address space within which to check
+ * @start_byte:        offset in bytes where the range starts
+ * @end_byte:          offset in bytes where the range ends (inclusive)
+ *
+ * Find at least one page in the range supplied, usually used to check if
+ * direct writing in this range will trigger a writeback.
+ */
+bool filemap_range_has_page(struct address_space *mapping,
+			   loff_t start_byte, loff_t end_byte)
+{
+	pgoff_t index = start_byte >> PAGE_SHIFT;
+	pgoff_t end = end_byte >> PAGE_SHIFT;
+	struct pagevec pvec;
+	bool ret;
+
+	if (end_byte < start_byte)
+		return false;
+
+	if (mapping->nrpages == 0)
+		return false;
+
+	pagevec_init(&pvec, 0);
+	if (!pagevec_lookup(&pvec, mapping, index, 1))
+		return false;
+	ret = (pvec.pages[0]->index <= end);
+	pagevec_release(&pvec);
+	return ret;
+}
+EXPORT_SYMBOL(filemap_range_has_page);
+
 static int __filemap_fdatawait_range(struct address_space *mapping,
 				     loff_t start_byte, loff_t end_byte)
 {
@@ -768,10 +800,10 @@ struct wait_page_key {
 struct wait_page_queue {
 	struct page *page;
 	int bit_nr;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 };
 
-static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
+static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
 {
 	struct wait_page_key *key = arg;
 	struct wait_page_queue *wait_page
@@ -834,7 +866,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 		struct page *page, int bit_nr, int state, bool lock)
 {
 	struct wait_page_queue wait_page;
-	wait_queue_t *wait = &wait_page.wait;
+	wait_queue_entry_t *wait = &wait_page.wait;
 	int ret = 0;
 
 	init_wait(wait);
@@ -845,9 +877,9 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
 	for (;;) {
 		spin_lock_irq(&q->lock);
 
-		if (likely(list_empty(&wait->task_list))) {
+		if (likely(list_empty(&wait->entry))) {
 			if (lock)
-				__add_wait_queue_tail_exclusive(q, wait);
+				__add_wait_queue_entry_tail_exclusive(q, wait);
 			else
 				__add_wait_queue(q, wait);
 			SetPageWaiters(page);
@@ -907,7 +939,7 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
  *
  * Add an arbitrary @waiter to the wait queue for the nominated @page.
  */
-void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
+void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
 {
 	wait_queue_head_t *q = page_waitqueue(page);
 	unsigned long flags;
@@ -2038,10 +2070,17 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 		loff_t size;
 
 		size = i_size_read(inode);
-		retval = filemap_write_and_wait_range(mapping, iocb->ki_pos,
-					iocb->ki_pos + count - 1);
-		if (retval < 0)
-			goto out;
+		if (iocb->ki_flags & IOCB_NOWAIT) {
+			if (filemap_range_has_page(mapping, iocb->ki_pos,
+						   iocb->ki_pos + count - 1))
+				return -EAGAIN;
+		} else {
+			retval = filemap_write_and_wait_range(mapping,
+						iocb->ki_pos,
+					        iocb->ki_pos + count - 1);
+			if (retval < 0)
+				goto out;
+		}
 
 		file_accessed(file);
 
@@ -2642,6 +2681,9 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
 
 	pos = iocb->ki_pos;
 
+	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+		return -EINVAL;
+
 	if (limit != RLIM_INFINITY) {
 		if (iocb->ki_pos >= limit) {
 			send_sig(SIGXFSZ, current, 0);
@@ -2710,9 +2752,17 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
 	write_len = iov_iter_count(from);
 	end = (pos + write_len - 1) >> PAGE_SHIFT;
 
-	written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
-	if (written)
-		goto out;
+	if (iocb->ki_flags & IOCB_NOWAIT) {
+		/* If there are pages to writeback, return */
+		if (filemap_range_has_page(inode->i_mapping, pos,
+					   pos + iov_iter_count(from)))
+			return -EAGAIN;
+	} else {
+		written = filemap_write_and_wait_range(mapping, pos,
+							pos + write_len - 1);
+		if (written)
+			goto out;
+	}
 
 	/*
 	 * After a write we want buffered reads to be sure to go to disk to get
diff --git a/mm/gup.c b/mm/gup.c
index d9e6fdd..3ab78dc 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -387,11 +387,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 	/* mlock all present pages, but do not fault in new pages */
 	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
 		return -ENOENT;
-	/* For mm_populate(), just skip the stack guard page. */
-	if ((*flags & FOLL_POPULATE) &&
-			(stack_guard_page_start(vma, address) ||
-			 stack_guard_page_end(vma, address + PAGE_SIZE)))
-		return -ENOENT;
 	if (*flags & FOLL_WRITE)
 		fault_flags |= FAULT_FLAG_WRITE;
 	if (*flags & FOLL_REMOTE)
@@ -407,12 +402,10 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 
 	ret = handle_mm_fault(vma, address, fault_flags);
 	if (ret & VM_FAULT_ERROR) {
-		if (ret & VM_FAULT_OOM)
-			return -ENOMEM;
-		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-			return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
-		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-			return -EFAULT;
+		int err = vm_fault_to_errno(ret, *flags);
+
+		if (err)
+			return err;
 		BUG();
 	}
 
@@ -723,12 +716,10 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
 	ret = handle_mm_fault(vma, address, fault_flags);
 	major |= ret & VM_FAULT_MAJOR;
 	if (ret & VM_FAULT_ERROR) {
-		if (ret & VM_FAULT_OOM)
-			return -ENOMEM;
-		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
-			return -EHWPOISON;
-		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
-			return -EFAULT;
+		int err = vm_fault_to_errno(ret, 0);
+
+		if (err)
+			return err;
 		BUG();
 	}
 
@@ -1155,7 +1146,7 @@ struct page *get_dump_page(unsigned long addr)
 #endif /* CONFIG_ELF_CORE */
 
 /*
- * Generic RCU Fast GUP
+ * Generic Fast GUP
  *
  * get_user_pages_fast attempts to pin user pages by walking the page
  * tables directly and avoids taking locks. Thus the walker needs to be
@@ -1176,8 +1167,8 @@ struct page *get_dump_page(unsigned long addr)
  * Before activating this code, please be aware that the following assumptions
  * are currently made:
  *
- *  *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
- *      pages containing page tables.
+ *  *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
+ *  free pages containing page tables or TLB flushing requires IPI broadcast.
  *
  *  *) ptes can be read atomically by the architecture.
  *
@@ -1187,7 +1178,7 @@ struct page *get_dump_page(unsigned long addr)
  *
  * This code is based heavily on the PowerPC implementation by Nick Piggin.
  */
-#ifdef CONFIG_HAVE_GENERIC_RCU_GUP
+#ifdef CONFIG_HAVE_GENERIC_GUP
 
 #ifndef gup_get_pte
 /*
@@ -1677,4 +1668,4 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 	return ret;
 }
 
-#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
+#endif /* CONFIG_HAVE_GENERIC_GUP */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a84909c..88c6167 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1426,8 +1426,11 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
 	 */
 	if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
 		page = pmd_page(*vmf->pmd);
+		if (!get_page_unless_zero(page))
+			goto out_unlock;
 		spin_unlock(vmf->ptl);
 		wait_on_page_locked(page);
+		put_page(page);
 		goto out;
 	}
 
@@ -1459,9 +1462,12 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
 
 	/* Migration could have started since the pmd_trans_migrating check */
 	if (!page_locked) {
+		page_nid = -1;
+		if (!get_page_unless_zero(page))
+			goto out_unlock;
 		spin_unlock(vmf->ptl);
 		wait_on_page_locked(page);
-		page_nid = -1;
+		put_page(page);
 		goto out;
 	}
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e582887..3eedb18 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4170,6 +4170,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			}
 			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
 			if (ret & VM_FAULT_ERROR) {
+				int err = vm_fault_to_errno(ret, flags);
+
+				if (err)
+					return err;
+
 				remainder = 0;
 				break;
 			}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 945fd1c..df4ebdb 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
 			spin_unlock(ptl);
 			free_page_and_swap_cache(src_page);
 		}
-		cond_resched();
 	}
 }
 
diff --git a/mm/ksm.c b/mm/ksm.c
index d9fc0e456..216184a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1028,8 +1028,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
 		goto out;
 
 	if (PageTransCompound(page)) {
-		err = split_huge_page(page);
-		if (err)
+		if (split_huge_page(page))
 			goto out_unlock;
 	}
 
diff --git a/mm/memblock.c b/mm/memblock.c
index b049c9b..7b8a5db 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1739,6 +1739,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
 	}
 }
 
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+	struct memblock_region *rgn;
+	unsigned long size = 0;
+	int idx;
+
+	for_each_memblock_type((&memblock.reserved), rgn) {
+		phys_addr_t start, end;
+
+		if (rgn->base + rgn->size < start_addr)
+			continue;
+		if (rgn->base > end_addr)
+			continue;
+
+		start = rgn->base;
+		end = start + rgn->size;
+		size += end - start;
+	}
+
+	return size;
+}
+
 void __init_memblock __memblock_dump_all(void)
 {
 	pr_info("MEMBLOCK configuration:\n");
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9417208..d75b38b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -170,7 +170,7 @@ struct mem_cgroup_event {
 	 */
 	poll_table pt;
 	wait_queue_head_t *wqh;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	struct work_struct remove;
 };
 
@@ -1479,10 +1479,10 @@ static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
 
 struct oom_wait_info {
 	struct mem_cgroup *memcg;
-	wait_queue_t	wait;
+	wait_queue_entry_t	wait;
 };
 
-static int memcg_oom_wake_function(wait_queue_t *wait,
+static int memcg_oom_wake_function(wait_queue_entry_t *wait,
 	unsigned mode, int sync, void *arg)
 {
 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
@@ -1570,7 +1570,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
 	owait.wait.flags = 0;
 	owait.wait.func = memcg_oom_wake_function;
 	owait.wait.private = current;
-	INIT_LIST_HEAD(&owait.wait.task_list);
+	INIT_LIST_HEAD(&owait.wait.entry);
 
 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
 	mem_cgroup_mark_under_oom(memcg);
@@ -3725,7 +3725,7 @@ static void memcg_event_remove(struct work_struct *work)
  *
  * Called with wqh->lock held and interrupts disabled.
  */
-static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
+static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
 			    int sync, void *key)
 {
 	struct mem_cgroup_event *event =
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 2527dfe..ecc183f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1184,7 +1184,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
 	 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
 	 * correctly, we save a copy of the page flags at this time.
 	 */
-	page_flags = p->flags;
+	if (PageHuge(p))
+		page_flags = hpage->flags;
+	else
+		page_flags = p->flags;
 
 	/*
 	 * unpoison always clear PG_hwpoison inside page lock
@@ -1595,12 +1598,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
 	if (ret) {
 		pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
 			pfn, ret, page->flags, &page->flags);
-		/*
-		 * We know that soft_offline_huge_page() tries to migrate
-		 * only one hugepage pointed to by hpage, so we need not
-		 * run through the pagelist here.
-		 */
-		putback_active_hugepage(hpage);
+		if (!list_empty(&pagelist))
+			putback_movable_pages(&pagelist);
 		if (ret > 0)
 			ret = -EIO;
 	} else {
diff --git a/mm/memory.c b/mm/memory.c
index 6ff5d72..bb11c47 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2855,40 +2855,6 @@ int do_swap_page(struct vm_fault *vmf)
 }
 
 /*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
-	address &= PAGE_MASK;
-	if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-		struct vm_area_struct *prev = vma->vm_prev;
-
-		/*
-		 * Is there a mapping abutting this one below?
-		 *
-		 * That's only ok if it's the same stack mapping
-		 * that has gotten split..
-		 */
-		if (prev && prev->vm_end == address)
-			return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
-		return expand_downwards(vma, address - PAGE_SIZE);
-	}
-	if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
-		struct vm_area_struct *next = vma->vm_next;
-
-		/* As VM_GROWSDOWN but s/below/above/ */
-		if (next && next->vm_start == address + PAGE_SIZE)
-			return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
-		return expand_upwards(vma, address + PAGE_SIZE);
-	}
-	return 0;
-}
-
-/*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2904,10 +2870,6 @@ static int do_anonymous_page(struct vm_fault *vmf)
 	if (vma->vm_flags & VM_SHARED)
 		return VM_FAULT_SIGBUS;
 
-	/* Check if we need to add a guard page to the stack */
-	if (check_stack_guard_page(vma, vmf->address) < 0)
-		return VM_FAULT_SIGSEGV;
-
 	/*
 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
 	 * pte_offset_map() on pmds where a huge pmd might be created
@@ -3029,6 +2991,17 @@ static int __do_fault(struct vm_fault *vmf)
 	return ret;
 }
 
+/*
+ * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
+ * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+	return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
 static int pte_alloc_one_map(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
@@ -3052,18 +3025,27 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
 map_pte:
 	/*
 	 * If a huge pmd materialized under us just retry later.  Use
-	 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
-	 * didn't become pmd_trans_huge under us and then back to pmd_none, as
-	 * a result of MADV_DONTNEED running immediately after a huge pmd fault
-	 * in a different thread of this mm, in turn leading to a misleading
-	 * pmd_trans_huge() retval.  All we have to ensure is that it is a
-	 * regular pmd that we can walk with pte_offset_map() and we can do that
-	 * through an atomic read in C, which is what pmd_trans_unstable()
-	 * provides.
+	 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
+	 * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
+	 * under us and then back to pmd_none, as a result of MADV_DONTNEED
+	 * running immediately after a huge pmd fault in a different thread of
+	 * this mm, in turn leading to a misleading pmd_trans_huge() retval.
+	 * All we have to ensure is that it is a regular pmd that we can walk
+	 * with pte_offset_map() and we can do that through an atomic read in
+	 * C, which is what pmd_trans_unstable() provides.
 	 */
-	if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+	if (pmd_devmap_trans_unstable(vmf->pmd))
 		return VM_FAULT_NOPAGE;
 
+	/*
+	 * At this point we know that our vmf->pmd points to a page of ptes
+	 * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
+	 * for the duration of the fault.  If a racing MADV_DONTNEED runs and
+	 * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
+	 * be valid and we will re-check to make sure the vmf->pte isn't
+	 * pte_none() under vmf->ptl protection when we return to
+	 * alloc_set_pte().
+	 */
 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
 			&vmf->ptl);
 	return 0;
@@ -3690,7 +3672,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
 		vmf->pte = NULL;
 	} else {
 		/* See comment in pte_alloc_one_map() */
-		if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
+		if (pmd_devmap_trans_unstable(vmf->pmd))
 			return 0;
 		/*
 		 * A regular pmd is established and it can't morph into a huge
diff --git a/mm/mempool.c b/mm/mempool.c
index 47a659d..1c02948 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -312,7 +312,7 @@ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
 {
 	void *element;
 	unsigned long flags;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	gfp_t gfp_temp;
 
 	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
diff --git a/mm/mlock.c b/mm/mlock.c
index c483c5c..b562b55 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -284,7 +284,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 {
 	int i;
 	int nr = pagevec_count(pvec);
-	int delta_munlocked;
+	int delta_munlocked = -nr;
 	struct pagevec pvec_putback;
 	int pgrescued = 0;
 
@@ -304,6 +304,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 				continue;
 			else
 				__munlock_isolation_failed(page);
+		} else {
+			delta_munlocked++;
 		}
 
 		/*
@@ -315,7 +317,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 		pagevec_add(&pvec_putback, pvec->pages[i]);
 		pvec->pages[i] = NULL;
 	}
-	delta_munlocked = -nr + pagevec_count(&pvec_putback);
 	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
 	spin_unlock_irq(zone_lru_lock(zone));
 
diff --git a/mm/mmap.c b/mm/mmap.c
index f82741e..a5e3dcd 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 	unsigned long retval;
 	unsigned long newbrk, oldbrk;
 	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *next;
 	unsigned long min_brk;
 	bool populate;
 	LIST_HEAD(uf);
@@ -229,7 +230,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 	}
 
 	/* Check against existing mmap mappings. */
-	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+	next = find_vma(mm, oldbrk);
+	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
 		goto out;
 
 	/* Ok, looks good - let it rip. */
@@ -253,10 +255,22 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 
 static long vma_compute_subtree_gap(struct vm_area_struct *vma)
 {
-	unsigned long max, subtree_gap;
-	max = vma->vm_start;
-	if (vma->vm_prev)
-		max -= vma->vm_prev->vm_end;
+	unsigned long max, prev_end, subtree_gap;
+
+	/*
+	 * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+	 * allow two stack_guard_gaps between them here, and when choosing
+	 * an unmapped area; whereas when expanding we only require one.
+	 * That's a little inconsistent, but keeps the code here simpler.
+	 */
+	max = vm_start_gap(vma);
+	if (vma->vm_prev) {
+		prev_end = vm_end_gap(vma->vm_prev);
+		if (max > prev_end)
+			max -= prev_end;
+		else
+			max = 0;
+	}
 	if (vma->vm_rb.rb_left) {
 		subtree_gap = rb_entry(vma->vm_rb.rb_left,
 				struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -352,7 +366,7 @@ static void validate_mm(struct mm_struct *mm)
 			anon_vma_unlock_read(anon_vma);
 		}
 
-		highest_address = vma->vm_end;
+		highest_address = vm_end_gap(vma);
 		vma = vma->vm_next;
 		i++;
 	}
@@ -541,7 +555,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
 	if (vma->vm_next)
 		vma_gap_update(vma->vm_next);
 	else
-		mm->highest_vm_end = vma->vm_end;
+		mm->highest_vm_end = vm_end_gap(vma);
 
 	/*
 	 * vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -856,7 +870,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 			vma_gap_update(vma);
 		if (end_changed) {
 			if (!next)
-				mm->highest_vm_end = end;
+				mm->highest_vm_end = vm_end_gap(vma);
 			else if (!adjust_next)
 				vma_gap_update(next);
 		}
@@ -941,7 +955,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
 			 * mm->highest_vm_end doesn't need any update
 			 * in remove_next == 1 case.
 			 */
-			VM_WARN_ON(mm->highest_vm_end != end);
+			VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
 		}
 	}
 	if (insert && file)
@@ -1787,7 +1801,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 
 	while (true) {
 		/* Visit left subtree if it looks promising */
-		gap_end = vma->vm_start;
+		gap_end = vm_start_gap(vma);
 		if (gap_end >= low_limit && vma->vm_rb.rb_left) {
 			struct vm_area_struct *left =
 				rb_entry(vma->vm_rb.rb_left,
@@ -1798,12 +1812,13 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 			}
 		}
 
-		gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
 check_current:
 		/* Check if current node has a suitable gap */
 		if (gap_start > high_limit)
 			return -ENOMEM;
-		if (gap_end >= low_limit && gap_end - gap_start >= length)
+		if (gap_end >= low_limit &&
+		    gap_end > gap_start && gap_end - gap_start >= length)
 			goto found;
 
 		/* Visit right subtree if it looks promising */
@@ -1825,8 +1840,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 			vma = rb_entry(rb_parent(prev),
 				       struct vm_area_struct, vm_rb);
 			if (prev == vma->vm_rb.rb_left) {
-				gap_start = vma->vm_prev->vm_end;
-				gap_end = vma->vm_start;
+				gap_start = vm_end_gap(vma->vm_prev);
+				gap_end = vm_start_gap(vma);
 				goto check_current;
 			}
 		}
@@ -1890,7 +1905,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 
 	while (true) {
 		/* Visit right subtree if it looks promising */
-		gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
 		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
 			struct vm_area_struct *right =
 				rb_entry(vma->vm_rb.rb_right,
@@ -1903,10 +1918,11 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 
 check_current:
 		/* Check if current node has a suitable gap */
-		gap_end = vma->vm_start;
+		gap_end = vm_start_gap(vma);
 		if (gap_end < low_limit)
 			return -ENOMEM;
-		if (gap_start <= high_limit && gap_end - gap_start >= length)
+		if (gap_start <= high_limit &&
+		    gap_end > gap_start && gap_end - gap_start >= length)
 			goto found;
 
 		/* Visit left subtree if it looks promising */
@@ -1929,7 +1945,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 				       struct vm_area_struct, vm_rb);
 			if (prev == vma->vm_rb.rb_right) {
 				gap_start = vma->vm_prev ?
-					vma->vm_prev->vm_end : 0;
+					vm_end_gap(vma->vm_prev) : 0;
 				goto check_current;
 			}
 		}
@@ -1967,7 +1983,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 		unsigned long len, unsigned long pgoff, unsigned long flags)
 {
 	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma;
+	struct vm_area_struct *vma, *prev;
 	struct vm_unmapped_area_info info;
 
 	if (len > TASK_SIZE - mmap_min_addr)
@@ -1978,9 +1994,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
-		vma = find_vma(mm, addr);
+		vma = find_vma_prev(mm, addr, &prev);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    (!vma || addr + len <= vm_start_gap(vma)) &&
+		    (!prev || addr >= vm_end_gap(prev)))
 			return addr;
 	}
 
@@ -2003,7 +2020,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 			  const unsigned long len, const unsigned long pgoff,
 			  const unsigned long flags)
 {
-	struct vm_area_struct *vma;
+	struct vm_area_struct *vma, *prev;
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = addr0;
 	struct vm_unmapped_area_info info;
@@ -2018,9 +2035,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 	/* requesting a specific address */
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
-		vma = find_vma(mm, addr);
+		vma = find_vma_prev(mm, addr, &prev);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-				(!vma || addr + len <= vma->vm_start))
+				(!vma || addr + len <= vm_start_gap(vma)) &&
+				(!prev || addr >= vm_end_gap(prev)))
 			return addr;
 	}
 
@@ -2155,21 +2173,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
  * update accounting. This is shared with both the
  * grow-up and grow-down cases.
  */
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+			     unsigned long size, unsigned long grow)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct rlimit *rlim = current->signal->rlim;
-	unsigned long new_start, actual_size;
+	unsigned long new_start;
 
 	/* address space limit tests */
 	if (!may_expand_vm(mm, vma->vm_flags, grow))
 		return -ENOMEM;
 
 	/* Stack limit test */
-	actual_size = size;
-	if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
-		actual_size -= PAGE_SIZE;
-	if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+	if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
 		return -ENOMEM;
 
 	/* mlock limit tests */
@@ -2207,16 +2223,32 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	struct vm_area_struct *next;
+	unsigned long gap_addr;
 	int error = 0;
 
 	if (!(vma->vm_flags & VM_GROWSUP))
 		return -EFAULT;
 
-	/* Guard against wrapping around to address 0. */
-	if (address < PAGE_ALIGN(address+4))
-		address = PAGE_ALIGN(address+4);
-	else
+	/* Guard against exceeding limits of the address space. */
+	address &= PAGE_MASK;
+	if (address >= TASK_SIZE)
 		return -ENOMEM;
+	address += PAGE_SIZE;
+
+	/* Enforce stack_guard_gap */
+	gap_addr = address + stack_guard_gap;
+
+	/* Guard against overflow */
+	if (gap_addr < address || gap_addr > TASK_SIZE)
+		gap_addr = TASK_SIZE;
+
+	next = vma->vm_next;
+	if (next && next->vm_start < gap_addr) {
+		if (!(next->vm_flags & VM_GROWSUP))
+			return -ENOMEM;
+		/* Check that both stack segments have the same anon_vma? */
+	}
 
 	/* We must make sure the anon_vma is allocated. */
 	if (unlikely(anon_vma_prepare(vma)))
@@ -2261,7 +2293,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 				if (vma->vm_next)
 					vma_gap_update(vma->vm_next);
 				else
-					mm->highest_vm_end = address;
+					mm->highest_vm_end = vm_end_gap(vma);
 				spin_unlock(&mm->page_table_lock);
 
 				perf_event_mmap(vma);
@@ -2282,6 +2314,8 @@ int expand_downwards(struct vm_area_struct *vma,
 				   unsigned long address)
 {
 	struct mm_struct *mm = vma->vm_mm;
+	struct vm_area_struct *prev;
+	unsigned long gap_addr;
 	int error;
 
 	address &= PAGE_MASK;
@@ -2289,6 +2323,17 @@ int expand_downwards(struct vm_area_struct *vma,
 	if (error)
 		return error;
 
+	/* Enforce stack_guard_gap */
+	gap_addr = address - stack_guard_gap;
+	if (gap_addr > address)
+		return -ENOMEM;
+	prev = vma->vm_prev;
+	if (prev && prev->vm_end > gap_addr) {
+		if (!(prev->vm_flags & VM_GROWSDOWN))
+			return -ENOMEM;
+		/* Check that both stack segments have the same anon_vma? */
+	}
+
 	/* We must make sure the anon_vma is allocated. */
 	if (unlikely(anon_vma_prepare(vma)))
 		return -ENOMEM;
@@ -2343,28 +2388,25 @@ int expand_downwards(struct vm_area_struct *vma,
 	return error;
 }
 
-/*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+	unsigned long val;
+	char *endptr;
+
+	val = simple_strtoul(p, &endptr, 10);
+	if (!*endptr)
+		stack_guard_gap = val << PAGE_SHIFT;
+
+	return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
 #ifdef CONFIG_STACK_GROWSUP
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
-	struct vm_area_struct *next;
-
-	address &= PAGE_MASK;
-	next = vma->vm_next;
-	if (next && next->vm_start == address + PAGE_SIZE) {
-		if (!(next->vm_flags & VM_GROWSUP))
-			return -ENOMEM;
-	}
 	return expand_upwards(vma, address);
 }
 
@@ -2386,14 +2428,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 #else
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
-	struct vm_area_struct *prev;
-
-	address &= PAGE_MASK;
-	prev = vma->vm_prev;
-	if (prev && prev->vm_end == address) {
-		if (!(prev->vm_flags & VM_GROWSDOWN))
-			return -ENOMEM;
-	}
 	return expand_downwards(vma, address);
 }
 
@@ -2491,7 +2525,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
 		vma->vm_prev = prev;
 		vma_gap_update(vma);
 	} else
-		mm->highest_vm_end = prev ? prev->vm_end : 0;
+		mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
 	tail_vma->vm_next = NULL;
 
 	/* Kill the cache */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f9e450c..2302f25 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -292,6 +292,26 @@ int page_group_by_mobility_disabled __read_mostly;
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 static inline void reset_deferred_meminit(pg_data_t *pgdat)
 {
+	unsigned long max_initialise;
+	unsigned long reserved_lowmem;
+
+	/*
+	 * Initialise at least 2G of a node but also take into account that
+	 * two large system hashes that can take up 1GB for 0.25TB/node.
+	 */
+	max_initialise = max(2UL << (30 - PAGE_SHIFT),
+		(pgdat->node_spanned_pages >> 8));
+
+	/*
+	 * Compensate the all the memblock reservations (e.g. crash kernel)
+	 * from the initial estimation to make sure we will initialize enough
+	 * memory to boot.
+	 */
+	reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+			pgdat->node_start_pfn + max_initialise);
+	max_initialise += reserved_lowmem;
+
+	pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
 	pgdat->first_deferred_pfn = ULONG_MAX;
 }
 
@@ -314,20 +334,11 @@ static inline bool update_defer_init(pg_data_t *pgdat,
 				unsigned long pfn, unsigned long zone_end,
 				unsigned long *nr_initialised)
 {
-	unsigned long max_initialise;
-
 	/* Always populate low zones for address-contrained allocations */
 	if (zone_end < pgdat_end_pfn(pgdat))
 		return true;
-	/*
-	 * Initialise at least 2G of a node but also take into account that
-	 * two large system hashes that can take up 1GB for 0.25TB/node.
-	 */
-	max_initialise = max(2UL << (30 - PAGE_SHIFT),
-		(pgdat->node_spanned_pages >> 8));
-
 	(*nr_initialised)++;
-	if ((*nr_initialised > max_initialise) &&
+	if ((*nr_initialised > pgdat->static_init_size) &&
 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
 		pgdat->first_deferred_pfn = pfn;
 		return false;
@@ -3870,7 +3881,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 		goto got_pg;
 
 	/* Avoid allocations with no watermarks from looping endlessly */
-	if (test_thread_flag(TIF_MEMDIE))
+	if (test_thread_flag(TIF_MEMDIE) &&
+	    (alloc_flags == ALLOC_NO_WATERMARKS ||
+	     (gfp_mask & __GFP_NOMEMALLOC)))
 		goto nopage;
 
 	/* Retry as long as the OOM killer is making progress */
@@ -6136,7 +6149,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
 	/* pg_data_t should be reset to zero when it's allocated */
 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
 
-	reset_deferred_meminit(pgdat);
 	pgdat->node_id = nid;
 	pgdat->node_start_pfn = node_start_pfn;
 	pgdat->per_cpu_nodestats = NULL;
@@ -6158,6 +6170,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
 		(unsigned long)pgdat->node_mem_map);
 #endif
 
+	reset_deferred_meminit(pgdat);
 	free_area_init_core(pgdat);
 }
 
diff --git a/mm/page_io.c b/mm/page_io.c
index 23f6d0d..2da71e6 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -45,7 +45,7 @@ void end_swap_bio_write(struct bio *bio)
 {
 	struct page *page = bio->bi_io_vec[0].bv_page;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		SetPageError(page);
 		/*
 		 * We failed to write the page out to swap-space.
@@ -118,7 +118,7 @@ static void end_swap_bio_read(struct bio *bio)
 {
 	struct page *page = bio->bi_io_vec[0].bv_page;
 
-	if (bio->bi_error) {
+	if (bio->bi_status) {
 		SetPageError(page);
 		ClearPageUptodate(page);
 		pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
diff --git a/mm/rmap.c b/mm/rmap.c
index d405f0e..130c238 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -579,25 +579,13 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 void try_to_unmap_flush(void)
 {
 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
-	int cpu;
 
 	if (!tlb_ubc->flush_required)
 		return;
 
-	cpu = get_cpu();
-
-	if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
-		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-		local_flush_tlb();
-		trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
-	}
-
-	if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
-		flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
-	cpumask_clear(&tlb_ubc->cpumask);
+	arch_tlbbatch_flush(&tlb_ubc->arch);
 	tlb_ubc->flush_required = false;
 	tlb_ubc->writable = false;
-	put_cpu();
 }
 
 /* Flush iff there are potentially writable TLB entries that can race with IO */
@@ -613,7 +601,7 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
 {
 	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 
-	cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
+	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
 	tlb_ubc->flush_required = true;
 
 	/*
diff --git a/mm/shmem.c b/mm/shmem.c
index e67d6ba..9100c49 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -75,6 +75,7 @@ static struct vfsmount *shm_mnt;
 #include <uapi/linux/memfd.h>
 #include <linux/userfaultfd_k.h>
 #include <linux/rmap.h>
+#include <linux/uuid.h>
 
 #include <linux/uaccess.h>
 #include <asm/pgtable.h>
@@ -1902,10 +1903,10 @@ alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
  * entry unconditionally - even if something else had already woken the
  * target.
  */
-static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 {
 	int ret = default_wake_function(wait, mode, sync, key);
-	list_del_init(&wait->task_list);
+	list_del_init(&wait->entry);
 	return ret;
 }
 
@@ -2840,7 +2841,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
 		spin_lock(&inode->i_lock);
 		inode->i_private = NULL;
 		wake_up_all(&shmem_falloc_waitq);
-		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list));
+		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
 		spin_unlock(&inode->i_lock);
 		error = 0;
 		goto out;
@@ -3761,6 +3762,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
 #ifdef CONFIG_TMPFS_POSIX_ACL
 	sb->s_flags |= MS_POSIXACL;
 #endif
+	uuid_gen(&sb->s_uuid);
 
 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
 	if (!inode)
diff --git a/mm/slub.c b/mm/slub.c
index 57e5156..8addc53 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5512,6 +5512,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
 		char mbuf[64];
 		char *buf;
 		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+		ssize_t len;
 
 		if (!attr || !attr->store || !attr->show)
 			continue;
@@ -5536,8 +5537,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
 			buf = buffer;
 		}
 
-		attr->show(root_cache, buf);
-		attr->store(s, buf, strlen(buf));
+		len = attr->show(root_cache, buf);
+		if (len > 0)
+			attr->store(s, buf, len);
 	}
 
 	if (buffer)
@@ -5623,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s)
 	return name;
 }
 
+static void sysfs_slab_remove_workfn(struct work_struct *work)
+{
+	struct kmem_cache *s =
+		container_of(work, struct kmem_cache, kobj_remove_work);
+
+	if (!s->kobj.state_in_sysfs)
+		/*
+		 * For a memcg cache, this may be called during
+		 * deactivation and again on shutdown.  Remove only once.
+		 * A cache is never shut down before deactivation is
+		 * complete, so no need to worry about synchronization.
+		 */
+		return;
+
+#ifdef CONFIG_MEMCG
+	kset_unregister(s->memcg_kset);
+#endif
+	kobject_uevent(&s->kobj, KOBJ_REMOVE);
+	kobject_del(&s->kobj);
+	kobject_put(&s->kobj);
+}
+
 static int sysfs_slab_add(struct kmem_cache *s)
 {
 	int err;
@@ -5630,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
 	struct kset *kset = cache_kset(s);
 	int unmergeable = slab_unmergeable(s);
 
+	INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
+
 	if (!kset) {
 		kobject_init(&s->kobj, &slab_ktype);
 		return 0;
@@ -5693,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s)
 		 */
 		return;
 
-	if (!s->kobj.state_in_sysfs)
-		/*
-		 * For a memcg cache, this may be called during
-		 * deactivation and again on shutdown.  Remove only once.
-		 * A cache is never shut down before deactivation is
-		 * complete, so no need to worry about synchronization.
-		 */
-		return;
-
-#ifdef CONFIG_MEMCG
-	kset_unregister(s->memcg_kset);
-#endif
-	kobject_uevent(&s->kobj, KOBJ_REMOVE);
-	kobject_del(&s->kobj);
+	kobject_get(&s->kobj);
+	schedule_work(&s->kobj_remove_work);
 }
 
 void sysfs_slab_release(struct kmem_cache *s)
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index ac6318a..3405b4e 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type)
 		if (!page)
 			goto not_enough_page;
 		ctrl->map[idx] = page;
+
+		if (!(idx % SWAP_CLUSTER_MAX))
+			cond_resched();
 	}
 	return 0;
 not_enough_page:
diff --git a/mm/util.c b/mm/util.c
index 464df34..26be640 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -357,8 +357,11 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 	WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
 
 	/*
-	 * Make sure that larger requests are not too disruptive - no OOM
-	 * killer and no allocation failure warnings as we have a fallback
+	 * We want to attempt a large physically contiguous block first because
+	 * it is less likely to fragment multiple larger blocks and therefore
+	 * contribute to a long term fragmentation less than vmalloc fallback.
+	 * However make sure that larger requests are not too disruptive - no
+	 * OOM killer and no allocation failure warnings as we have a fallback.
 	 */
 	if (size > PAGE_SIZE) {
 		kmalloc_flags |= __GFP_NOWARN;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 34a1c3e..ecc97f7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
 	if (p4d_none(*p4d))
 		return NULL;
 	pud = pud_offset(p4d, addr);
-	if (pud_none(*pud))
+
+	/*
+	 * Don't dereference bad PUD or PMD (below) entries. This will also
+	 * identify huge mappings, which we may encounter on architectures
+	 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
+	 * identified as vmalloc addresses by is_vmalloc_addr(), but are
+	 * not [unambiguously] associated with a struct page, so there is
+	 * no correct value to return for them.
+	 */
+	WARN_ON_ONCE(pud_bad(*pud));
+	if (pud_none(*pud) || pud_bad(*pud))
 		return NULL;
 	pmd = pmd_offset(pud, addr);
-	if (pmd_none(*pmd))
+	WARN_ON_ONCE(pmd_bad(*pmd));
+	if (pmd_none(*pmd) || pmd_bad(*pmd))
 		return NULL;
 
 	ptep = pte_offset_map(pmd, addr);
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 6063581..ce0618b 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -115,9 +115,9 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
 	unsigned long pressure = 0;
 
 	/*
-	 * reclaimed can be greater than scanned in cases
-	 * like THP, where the scanned is 1 and reclaimed
-	 * could be 512
+	 * reclaimed can be greater than scanned for things such as reclaimed
+	 * slab pages. shrink_node() just adds reclaimed pages without a
+	 * related increment to scanned pages.
 	 */
 	if (reclaimed >= scanned)
 		goto out;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8ad39bb..c3c1c6a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3652,7 +3652,7 @@ int kswapd_run(int nid)
 	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
 	if (IS_ERR(pgdat->kswapd)) {
 		/* failure at boot is fatal */
-		BUG_ON(system_state == SYSTEM_BOOTING);
+		BUG_ON(system_state < SYSTEM_RUNNING);
 		pr_err("Failed to start kswapd on node %d\n", nid);
 		ret = PTR_ERR(pgdat->kswapd);
 		pgdat->kswapd = NULL;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 467069b..9649579 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
 	return 0;
 
 out_free_newdev:
-	free_netdev(new_dev);
+	if (new_dev->reg_state == NETREG_UNINITIALIZED)
+		free_netdev(new_dev);
 	return err;
 }
 
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 953b672..abc5f40 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -813,7 +813,6 @@ static void vlan_dev_free(struct net_device *dev)
 
 	free_percpu(vlan->vlan_pcpu_stats);
 	vlan->vlan_pcpu_stats = NULL;
-	free_netdev(dev);
 }
 
 void vlan_setup(struct net_device *dev)
@@ -826,7 +825,8 @@ void vlan_setup(struct net_device *dev)
 	netif_keep_dst(dev);
 
 	dev->netdev_ops		= &vlan_netdev_ops;
-	dev->destructor		= vlan_dev_free;
+	dev->needs_free_netdev	= true;
+	dev->priv_destructor	= vlan_dev_free;
 	dev->ethtool_ops	= &vlan_ethtool_ops;
 
 	dev->min_mtu		= 0;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 7bc2208..dca3cdd 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -95,7 +95,7 @@ enum {
 
 struct p9_poll_wait {
 	struct p9_conn *conn;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	wait_queue_head_t *wait_addr;
 };
 
@@ -522,7 +522,7 @@ static void p9_write_work(struct work_struct *work)
 	clear_bit(Wworksched, &m->wsched);
 }
 
-static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
+static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
 {
 	struct p9_poll_wait *pwait =
 		container_of(wait, struct p9_poll_wait, wait);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 013e970..000ca2f 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1064,8 +1064,9 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
 
 		skb_new->protocol = eth_type_trans(skb_new, soft_iface);
 
-		soft_iface->stats.rx_packets++;
-		soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
+		batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+		batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+				   skb->len + ETH_HLEN + hdr_size);
 
 		netif_rx(skb_new);
 		batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index e1ebe14..ae9f4d3 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -987,7 +987,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
 				batadv_dbg(BATADV_DBG_BLA, bat_priv,
 					   "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n",
 					   orig_addr_gw);
-				return NET_RX_DROP;
+				goto free_skb;
 			}
 		}
 
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index b25789a..10f7edf 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1034,8 +1034,6 @@ static void batadv_softif_free(struct net_device *dev)
 	 * netdev and its private data (bat_priv)
 	 */
 	rcu_barrier();
-
-	free_netdev(dev);
 }
 
 /**
@@ -1047,7 +1045,8 @@ static void batadv_softif_init_early(struct net_device *dev)
 	ether_setup(dev);
 
 	dev->netdev_ops = &batadv_netdev_ops;
-	dev->destructor = batadv_softif_free;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = batadv_softif_free;
 	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
 	dev->priv_flags |= IFF_NO_QUEUE;
 
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 6089599..ab3b654 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -598,7 +598,7 @@ static void netdev_setup(struct net_device *dev)
 
 	dev->netdev_ops		= &netdev_ops;
 	dev->header_ops		= &header_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 }
 
 static struct device_type bt_type = {
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index fbf251f..5c4808b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -484,7 +484,7 @@ static int bnep_session(void *arg)
 	struct net_device *dev = s->dev;
 	struct sock *sk = s->sock->sk;
 	struct sk_buff *skb;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	BT_DBG("");
 
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 9e59b66..14f7c81 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -280,7 +280,7 @@ static int cmtp_session(void *arg)
 	struct cmtp_session *session = arg;
 	struct sock *sk = session->sock->sk;
 	struct sk_buff *skb;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	BT_DBG("session %p", session);
 
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 0bec458..fc31161 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1244,7 +1244,7 @@ static void hidp_session_run(struct hidp_session *session)
 static int hidp_session_thread(void *arg)
 {
 	struct hidp_session *session = arg;
-	wait_queue_t ctrl_wait, intr_wait;
+	wait_queue_entry_t ctrl_wait, intr_wait;
 
 	BT_DBG("session %p", session);
 
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 430b53e..f0f3447 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -379,7 +379,7 @@ void br_dev_setup(struct net_device *dev)
 	ether_setup(dev);
 
 	dev->netdev_ops = &br_netdev_ops;
-	dev->destructor = free_netdev;
+	dev->needs_free_netdev = true;
 	dev->ethtool_ops = &br_ethtool_ops;
 	SET_NETDEV_DEVTYPE(dev, &br_type);
 	dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 574f788..32bd3ea 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
 		err = 0;
 		switch (nla_type(attr)) {
 		case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
-			if (!(p->flags & BR_VLAN_TUNNEL))
+			if (!p || !(p->flags & BR_VLAN_TUNNEL))
 				return -EINVAL;
 			err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
 			if (err)
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 0db8102..6f12a52 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -179,7 +179,8 @@ static void br_stp_start(struct net_bridge *br)
 		br_debug(br, "using kernel STP\n");
 
 		/* To start timers on any ports left in blocking */
-		mod_timer(&br->hello_timer, jiffies + br->hello_time);
+		if (br->dev->flags & IFF_UP)
+			mod_timer(&br->hello_timer, jiffies + br->hello_time);
 		br_port_state_selection(br);
 	}
 
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index adcad344..21f18ea 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -754,6 +754,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
 
 	lock_sock(sk);
 
+	err = -EINVAL;
+	if (addr_len < offsetofend(struct sockaddr, sa_family))
+		goto out;
+
 	err = -EAFNOSUPPORT;
 	if (uaddr->sa_family != AF_CAIF)
 		goto out;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 59ce1fc..71b6ab2 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
 {
 	struct sk_buff *skb;
 
-	if (likely(in_interrupt()))
-		skb = alloc_skb(len + pfx, GFP_ATOMIC);
-	else
-		skb = alloc_skb(len + pfx, GFP_KERNEL);
-
+	skb = alloc_skb(len + pfx, GFP_ATOMIC);
 	if (unlikely(skb == NULL))
 		return NULL;
 
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 1816fc9..fe3c53e 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -392,14 +392,14 @@ static void chnl_net_destructor(struct net_device *dev)
 {
 	struct chnl_net *priv = netdev_priv(dev);
 	caif_free_client(&priv->chnl);
-	free_netdev(dev);
 }
 
 static void ipcaif_net_setup(struct net_device *dev)
 {
 	struct chnl_net *priv;
 	dev->netdev_ops = &netdev_ops;
-	dev->destructor = chnl_net_destructor;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = chnl_net_destructor;
 	dev->flags |= IFF_NOARP;
 	dev->flags |= IFF_POINTOPOINT;
 	dev->mtu = GPRS_PDP_MTU;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index b6406fe..88edac0 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -872,8 +872,7 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
 
 static int can_pernet_init(struct net *net)
 {
-	net->can.can_rcvlists_lock =
-		__SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock);
+	spin_lock_init(&net->can.can_rcvlists_lock);
 	net->can.can_rx_alldev_list =
 		kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
 
diff --git a/net/core/datagram.c b/net/core/datagram.c
index db1866f2..f965398 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -68,7 +68,7 @@ static inline int connection_based(struct sock *sk)
 	return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
 }
 
-static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
+static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
 				  void *key)
 {
 	unsigned long bits = (unsigned long)key;
@@ -181,7 +181,7 @@ static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
  *
  *	This function will lock the socket if a skb is returned, so
  *	the caller needs to unlock the socket in that case (usually by
- *	calling skb_free_datagram). Returns NULL with *err set to
+ *	calling skb_free_datagram). Returns NULL with @err set to
  *	-EAGAIN if no data was available or to some other value if an
  *	error was detected.
  *
diff --git a/net/core/dev.c b/net/core/dev.c
index fca407b..416137c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1253,8 +1253,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
 	if (!new_ifalias)
 		return -ENOMEM;
 	dev->ifalias = new_ifalias;
+	memcpy(dev->ifalias, alias, len);
+	dev->ifalias[len] = 0;
 
-	strlcpy(dev->ifalias, alias, len+1);
 	return len;
 }
 
@@ -4766,6 +4767,13 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
+static void napi_skb_free_stolen_head(struct sk_buff *skb)
+{
+	skb_dst_drop(skb);
+	secpath_reset(skb);
+	kmem_cache_free(skbuff_head_cache, skb);
+}
+
 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 {
 	switch (ret) {
@@ -4779,13 +4787,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 		break;
 
 	case GRO_MERGED_FREE:
-		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
-			skb_dst_drop(skb);
-			secpath_reset(skb);
-			kmem_cache_free(skbuff_head_cache, skb);
-		} else {
+		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+			napi_skb_free_stolen_head(skb);
+		else
 			__kfree_skb(skb);
-		}
 		break;
 
 	case GRO_HELD:
@@ -4857,10 +4862,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
 		break;
 
 	case GRO_DROP:
-	case GRO_MERGED_FREE:
 		napi_reuse_skb(napi, skb);
 		break;
 
+	case GRO_MERGED_FREE:
+		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+			napi_skb_free_stolen_head(skb);
+		else
+			napi_reuse_skb(napi, skb);
+		break;
+
 	case GRO_MERGED:
 	case GRO_CONSUMED:
 		break;
@@ -4948,6 +4959,19 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(__skb_gro_checksum_complete);
 
+static void net_rps_send_ipi(struct softnet_data *remsd)
+{
+#ifdef CONFIG_RPS
+	while (remsd) {
+		struct softnet_data *next = remsd->rps_ipi_next;
+
+		if (cpu_online(remsd->cpu))
+			smp_call_function_single_async(remsd->cpu, &remsd->csd);
+		remsd = next;
+	}
+#endif
+}
+
 /*
  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
  * Note: called with local irq disabled, but exits with local irq enabled.
@@ -4963,14 +4987,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
 		local_irq_enable();
 
 		/* Send pending IPI's to kick RPS processing on remote cpus. */
-		while (remsd) {
-			struct softnet_data *next = remsd->rps_ipi_next;
-
-			if (cpu_online(remsd->cpu))
-				smp_call_function_single_async(remsd->cpu,
-							   &remsd->csd);
-			remsd = next;
-		}
+		net_rps_send_ipi(remsd);
 	} else
 #endif
 		local_irq_enable();
@@ -5199,8 +5216,6 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
 	if (rc == BUSY_POLL_BUDGET)
 		__napi_schedule(napi);
 	local_bh_enable();
-	if (local_softirq_pending())
-		do_softirq();
 }
 
 void napi_busy_loop(unsigned int napi_id,
@@ -7501,6 +7516,8 @@ int register_netdevice(struct net_device *dev)
 err_uninit:
 	if (dev->netdev_ops->ndo_uninit)
 		dev->netdev_ops->ndo_uninit(dev);
+	if (dev->priv_destructor)
+		dev->priv_destructor(dev);
 	goto out;
 }
 EXPORT_SYMBOL(register_netdevice);
@@ -7708,8 +7725,10 @@ void netdev_run_todo(void)
 		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
 		WARN_ON(dev->dn_ptr);
 
-		if (dev->destructor)
-			dev->destructor(dev);
+		if (dev->priv_destructor)
+			dev->priv_destructor(dev);
+		if (dev->needs_free_netdev)
+			free_netdev(dev);
 
 		/* Report a network device has been unregistered */
 		rtnl_lock();
@@ -7774,9 +7793,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 	} else {
 		netdev_stats_to_stats64(storage, &dev->stats);
 	}
-	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
-	storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
-	storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
+	storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
+	storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
+	storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
 	return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
@@ -8192,7 +8211,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
 	struct sk_buff **list_skb;
 	struct sk_buff *skb;
 	unsigned int cpu;
-	struct softnet_data *sd, *oldsd;
+	struct softnet_data *sd, *oldsd, *remsd = NULL;
 
 	local_irq_disable();
 	cpu = smp_processor_id();
@@ -8233,6 +8252,13 @@ static int dev_cpu_dead(unsigned int oldcpu)
 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
 	local_irq_enable();
 
+#ifdef CONFIG_RPS
+	remsd = oldsd->rps_ipi_list;
+	oldsd->rps_ipi_list = NULL;
+#endif
+	/* send out pending IPI's on offline CPU */
+	net_rps_send_ipi(remsd);
+
 	/* Process offline CPU's input_pkt_queue */
 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
 		netif_rx_ni(skb);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b94b1d2..27fad31 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -410,6 +410,22 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 	if (cmd == SIOCGIFNAME)
 		return dev_ifname(net, (struct ifreq __user *)arg);
 
+	/*
+	 * Take care of Wireless Extensions. Unfortunately struct iwreq
+	 * isn't a proper subset of struct ifreq (it's 8 byte shorter)
+	 * so we need to treat it specially, otherwise applications may
+	 * fault if the struct they're passing happens to land at the
+	 * end of a mapped page.
+	 */
+	if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
+		struct iwreq iwr;
+
+		if (copy_from_user(&iwr, arg, sizeof(iwr)))
+			return -EFAULT;
+
+		return wext_handle_ioctl(net, &iwr, cmd, arg);
+	}
+
 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
 		return -EFAULT;
 
@@ -559,9 +575,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 				ret = -EFAULT;
 			return ret;
 		}
-		/* Take care of Wireless Extensions */
-		if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
-			return wext_handle_ioctl(net, &ifr, cmd, arg);
 		return -ENOTTY;
 	}
 }
diff --git a/net/core/devlink.c b/net/core/devlink.c
index b0b87a2..a0adfc3 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1680,8 +1680,10 @@ static int devlink_dpipe_tables_fill(struct genl_info *info,
 
 	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
 			  &devlink_nl_family, NLM_F_MULTI, cmd);
-	if (!hdr)
+	if (!hdr) {
+		nlmsg_free(skb);
 		return -EMSGSIZE;
+	}
 
 	if (devlink_nl_put_handle(skb, devlink))
 		goto nla_put_failure;
@@ -2098,8 +2100,10 @@ static int devlink_dpipe_headers_fill(struct genl_info *info,
 
 	hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
 			  &devlink_nl_family, NLM_F_MULTI, cmd);
-	if (!hdr)
+	if (!hdr) {
+		nlmsg_free(skb);
 		return -EMSGSIZE;
+	}
 
 	if (devlink_nl_put_handle(skb, devlink))
 		goto nla_put_failure;
diff --git a/net/core/dst.c b/net/core/dst.c
index 6192f11..13ba4a0 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -469,6 +469,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
 		spin_lock_bh(&dst_garbage.lock);
 		dst = dst_garbage.list;
 		dst_garbage.list = NULL;
+		/* The code in dst_ifdown places a hold on the loopback device.
+		 * If the gc entry processing is set to expire after a lengthy
+		 * interval, this hold can cause netdev_wait_allrefs() to hang
+		 * out and wait for a long time -- until the the loopback
+		 * interface is released.  If we're really unlucky, it'll emit
+		 * pr_emerg messages to console too.  Reset the interval here,
+		 * so dst cleanups occur in a more timely fashion.
+		 */
+		if (dst_garbage.timer_inc > DST_GC_INC) {
+			dst_garbage.timer_inc = DST_GC_INC;
+			dst_garbage.timer_expires = DST_GC_MIN;
+			mod_delayed_work(system_wq, &dst_gc_work,
+					 dst_garbage.timer_expires);
+		}
 		spin_unlock_bh(&dst_garbage.lock);
 
 		if (last)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f21c4d3..3bba291 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -568,7 +568,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
 	struct net *net = sock_net(skb->sk);
 	struct fib_rule_hdr *frh = nlmsg_data(nlh);
 	struct fib_rules_ops *ops = NULL;
-	struct fib_rule *rule, *tmp;
+	struct fib_rule *rule, *r;
 	struct nlattr *tb[FRA_MAX+1];
 	struct fib_kuid_range range;
 	int err = -EINVAL;
@@ -668,16 +668,23 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 		/*
 		 * Check if this rule is a target to any of them. If so,
+		 * adjust to the next one with the same preference or
 		 * disable them. As this operation is eventually very
-		 * expensive, it is only performed if goto rules have
-		 * actually been added.
+		 * expensive, it is only performed if goto rules, except
+		 * current if it is goto rule, have actually been added.
 		 */
 		if (ops->nr_goto_rules > 0) {
-			list_for_each_entry(tmp, &ops->rules_list, list) {
-				if (rtnl_dereference(tmp->ctarget) == rule) {
-					RCU_INIT_POINTER(tmp->ctarget, NULL);
+			struct fib_rule *n;
+
+			n = list_next_entry(rule, list);
+			if (&n->list == &ops->rules_list || n->pref != rule->pref)
+				n = NULL;
+			list_for_each_entry(r, &ops->rules_list, list) {
+				if (rtnl_dereference(r->ctarget) != rule)
+					continue;
+				rcu_assign_pointer(r->ctarget, n);
+				if (!n)
 					ops->unresolved_rules++;
-				}
 			}
 		}
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9e2c0a7..467a2f4 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -931,6 +931,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
 	       + nla_total_size(1) /* IFLA_LINKMODE */
 	       + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
 	       + nla_total_size(4) /* IFLA_LINK_NETNSID */
+	       + nla_total_size(4) /* IFLA_GROUP */
 	       + nla_total_size(ext_filter_mask
 			        & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
 	       + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1124,6 +1125,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
 	struct ifla_vf_mac vf_mac;
 	struct ifla_vf_info ivi;
 
+	memset(&ivi, 0, sizeof(ivi));
+
 	/* Not all SR-IOV capable drivers support the
 	 * spoofcheck and "RSS query enable" query.  Preset to
 	 * -1 so the user space tool can detect that the driver
@@ -1132,7 +1135,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
 	ivi.spoofchk = -1;
 	ivi.rss_query_en = -1;
 	ivi.trusted = -1;
-	memset(ivi.mac, 0, sizeof(ivi.mac));
 	/* The default value for VF link state is "auto"
 	 * IFLA_VF_LINK_STATE_AUTO which equals zero
 	 */
@@ -1467,6 +1469,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
 	[IFLA_LINK_NETNSID]	= { .type = NLA_S32 },
 	[IFLA_PROTO_DOWN]	= { .type = NLA_U8 },
 	[IFLA_XDP]		= { .type = NLA_NESTED },
+	[IFLA_GROUP]		= { .type = NLA_U32 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 346d3e8..b1be7c0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
 
 	spin_lock_irqsave(&q->lock, flags);
 	skb = __skb_dequeue(q);
-	if (skb && (skb_next = skb_peek(q)))
+	if (skb && (skb_next = skb_peek(q))) {
 		icmp_next = is_icmp_err_skb(skb_next);
+		if (icmp_next)
+			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
+	}
 	spin_unlock_irqrestore(&q->lock, flags);
 
 	if (is_icmp_err_skb(skb) && !icmp_next)
diff --git a/net/core/sock.c b/net/core/sock.c
index 727f924..0c3fc16 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2675,9 +2675,12 @@ EXPORT_SYMBOL(release_sock);
  * @sk: socket
  *
  * This version should be used for very small section, where process wont block
- * return false if fast path is taken
+ * return false if fast path is taken:
+ *
  *   sk_lock.slock locked, owned = 0, BH disabled
- * return true if slow path is taken
+ *
+ * return true if slow path is taken:
+ *
  *   sk_lock.slock unlocked, owned = 1, BH enabled
  */
 bool lock_sock_fast(struct sock *sk)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 4b9518a..6f95612 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
 	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
 }
 
-static inline void dnrt_drop(struct dn_route *rt)
-{
-	dst_release(&rt->dst);
-	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
 static void dn_dst_check_expire(unsigned long dummy)
 {
 	int i;
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
 			}
 			*rtp = rt->dst.dn_next;
 			rt->dst.dn_next = NULL;
-			dnrt_drop(rt);
+			dnrt_free(rt);
 			break;
 		}
 		spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
 			dst_use(&rth->dst, now);
 			spin_unlock_bh(&dn_rt_hash_table[hash].lock);
 
-			dnrt_drop(rt);
+			dst_free(&rt->dst);
 			*rp = rth;
 			return 0;
 		}
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
 		for(; rt; rt = next) {
 			next = rcu_dereference_raw(rt->dst.dn_next);
 			RCU_INIT_POINTER(rt->dst.dn_next, NULL);
-			dst_free((struct dst_entry *)rt);
+			dnrt_free(rt);
 		}
 
 nothing_to_declare:
@@ -1187,7 +1181,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
 	if (dev_out->flags & IFF_LOOPBACK)
 		flags |= RTCF_LOCAL;
 
-	rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
+	rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
 	if (rt == NULL)
 		goto e_nobufs;
 
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1ed81ac..aa8ffec 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
 {
 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
 
-	if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
+	if (skb->len < sizeof(*nlh) ||
+	    nlh->nlmsg_len < sizeof(*nlh) ||
+	    skb->len < nlh->nlmsg_len)
 		return;
 
 	if (!netlink_capable(skb, CAP_NET_ADMIN))
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 26130ae..90038d4 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds)
+{
+	int i, ret = 0;
+
+	/* Suspend slave network devices */
+	for (i = 0; i < ds->num_ports; i++) {
+		if (!dsa_is_port_initialized(ds, i))
+			continue;
+
+		ret = dsa_slave_suspend(ds->ports[i].netdev);
+		if (ret)
+			return ret;
+	}
+
+	if (ds->ops->suspend)
+		ret = ds->ops->suspend(ds);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_suspend);
+
+int dsa_switch_resume(struct dsa_switch *ds)
+{
+	int i, ret = 0;
+
+	if (ds->ops->resume)
+		ret = ds->ops->resume(ds);
+
+	if (ret)
+		return ret;
+
+	/* Resume slave network devices */
+	for (i = 0; i < ds->num_ports; i++) {
+		if (!dsa_is_port_initialized(ds, i))
+			continue;
+
+		ret = dsa_slave_resume(ds->ports[i].netdev);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_resume);
+#endif
+
 static struct packet_type dsa_pack_type __read_mostly = {
 	.type	= cpu_to_be16(ETH_P_XDSA),
 	.func	= dsa_switch_rcv,
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 033b3bf..7796580 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
 		dsa_ds_unapply(dst, ds);
 	}
 
-	if (dst->cpu_switch)
+	if (dst->cpu_switch) {
 		dsa_cpu_port_ethtool_restore(dst->cpu_switch);
+		dst->cpu_switch = NULL;
+	}
 
 	pr_info("DSA: tree %d unapplied\n", dst->tree);
 	dst->applied = false;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index ad345c8..7281098 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
 	dsa_switch_unregister_notifier(ds);
 }
 
-#ifdef CONFIG_PM_SLEEP
-int dsa_switch_suspend(struct dsa_switch *ds)
-{
-	int i, ret = 0;
-
-	/* Suspend slave network devices */
-	for (i = 0; i < ds->num_ports; i++) {
-		if (!dsa_is_port_initialized(ds, i))
-			continue;
-
-		ret = dsa_slave_suspend(ds->ports[i].netdev);
-		if (ret)
-			return ret;
-	}
-
-	if (ds->ops->suspend)
-		ret = ds->ops->suspend(ds);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_suspend);
-
-int dsa_switch_resume(struct dsa_switch *ds)
-{
-	int i, ret = 0;
-
-	if (ds->ops->resume)
-		ret = ds->ops->resume(ds);
-
-	if (ret)
-		return ret;
-
-	/* Resume slave network devices */
-	for (i = 0; i < ds->num_ports; i++) {
-		if (!dsa_is_port_initialized(ds, i))
-			continue;
-
-		ret = dsa_slave_resume(ds->ports[i].netdev);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_resume);
-#endif
-
 /* platform driver init and cleanup *****************************************/
 static int dev_is_class(struct device *dev, void *class)
 {
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c73160f..0a0a392 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -378,7 +378,6 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
 	del_timer_sync(&hsr->announce_timer);
 
 	synchronize_rcu();
-	free_netdev(hsr_dev);
 }
 
 static const struct net_device_ops hsr_device_ops = {
@@ -404,7 +403,8 @@ void hsr_dev_setup(struct net_device *dev)
 	SET_NETDEV_DEVTYPE(dev, &hsr_type);
 	dev->priv_flags |= IFF_NO_QUEUE;
 
-	dev->destructor = hsr_dev_destroy;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = hsr_dev_destroy;
 
 	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
 			   NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 4ebe2aa..04b5450 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -324,8 +324,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame,
 	unsigned long irqflags;
 
 	frame->is_supervision = is_supervision_frame(port->hsr, skb);
-	frame->node_src = hsr_get_node(&port->hsr->node_db, skb,
-				       frame->is_supervision);
+	frame->node_src = hsr_get_node(port, skb, frame->is_supervision);
 	if (frame->node_src == NULL)
 		return -1; /* Unknown node and !is_supervision, or no mem */
 
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 7ea9258..284a9b8 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -158,9 +158,10 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
 
 /* Get the hsr_node from which 'skb' was sent.
  */
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
 			      bool is_sup)
 {
+	struct list_head *node_db = &port->hsr->node_db;
 	struct hsr_node *node;
 	struct ethhdr *ethhdr;
 	u16 seq_out;
@@ -186,7 +187,11 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
 		 */
 		seq_out = hsr_get_skb_sequence_nr(skb) - 1;
 	} else {
-		WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
+		/* this is called also for frames from master port and
+		 * so warn only for non master ports
+		 */
+		if (port->type != HSR_PT_MASTER)
+			WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
 		seq_out = HSR_SEQNR_START;
 	}
 
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 438b40f..4e04f0e 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -18,7 +18,7 @@ struct hsr_node;
 
 struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
 			      u16 seq_out);
-struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
 			      bool is_sup);
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
 			  struct hsr_port *port);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index d7efbf0..0a866f33 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -107,7 +107,7 @@ static void lowpan_setup(struct net_device *ldev)
 
 	ldev->netdev_ops	= &lowpan_netdev_ops;
 	ldev->header_ops	= &lowpan_header_ops;
-	ldev->destructor	= free_netdev;
+	ldev->needs_free_netdev	= true;
 	ldev->features		|= NETIF_F_NETNS_LOCAL;
 }
 
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f3dad16..58925b6 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
 		.type =       SOCK_DGRAM,
 		.protocol =   IPPROTO_ICMP,
 		.prot =       &ping_prot,
-		.ops =        &inet_dgram_ops,
+		.ops =        &inet_sockraw_ops,
 		.flags =      INET_PROTOSW_REUSE,
        },
 
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 43318b5..9144fa7 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -657,8 +657,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
 	/* Needed by both icmp_global_allow and icmp_xmit_lock */
 	local_bh_disable();
 
-	/* Check global sysctl_icmp_msgs_per_sec ratelimit */
-	if (!icmpv4_global_allow(net, type, code))
+	/* Check global sysctl_icmp_msgs_per_sec ratelimit, unless
+	 * incoming dev is loopback.  If outgoing dev change to not be
+	 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow)
+	 */
+	if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) &&
+	      !icmpv4_global_allow(net, type, code))
 		goto out_bh_enable;
 
 	sk = icmp_xmit_lock(net);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 44fd86d..ec9a396 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
 	pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
 	if (!pmc)
 		return;
+	spin_lock_init(&pmc->lock);
 	spin_lock_bh(&im->lock);
 	pmc->interface = im->interface;
 	in_dev_hold(in_dev);
@@ -2071,21 +2072,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
 
 static void ip_mc_clear_src(struct ip_mc_list *pmc)
 {
-	struct ip_sf_list *psf, *nextpsf;
+	struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
 
-	for (psf = pmc->tomb; psf; psf = nextpsf) {
-		nextpsf = psf->sf_next;
-		kfree(psf);
-	}
+	spin_lock_bh(&pmc->lock);
+	tomb = pmc->tomb;
 	pmc->tomb = NULL;
-	for (psf = pmc->sources; psf; psf = nextpsf) {
-		nextpsf = psf->sf_next;
-		kfree(psf);
-	}
+	sources = pmc->sources;
 	pmc->sources = NULL;
 	pmc->sfmode = MCAST_EXCLUDE;
 	pmc->sfcount[MCAST_INCLUDE] = 0;
 	pmc->sfcount[MCAST_EXCLUDE] = 1;
+	spin_unlock_bh(&pmc->lock);
+
+	for (psf = tomb; psf; psf = nextpsf) {
+		nextpsf = psf->sf_next;
+		kfree(psf);
+	}
+	for (psf = sources; psf; psf = nextpsf) {
+		nextpsf = psf->sf_next;
+		kfree(psf);
+	}
 }
 
 /* Join a multicast group
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7a3fd25..532b36e 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -964,7 +964,8 @@ static int __ip_append_data(struct sock *sk,
 		csummode = CHECKSUM_PARTIAL;
 
 	cork->length += length;
-	if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
+	if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
+	     (skb && skb_is_gso(skb))) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
 	    (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
 	    (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b878ecb..129d1a3 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -446,6 +446,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
 	return 0;
 
 drop:
+	if (tun_dst)
+		dst_release((struct dst_entry *)tun_dst);
 	kfree_skb(skb);
 	return 0;
 }
@@ -967,7 +969,6 @@ static void ip_tunnel_dev_free(struct net_device *dev)
 	gro_cells_destroy(&tunnel->gro_cells);
 	dst_cache_destroy(&tunnel->dst_cache);
 	free_percpu(dev->tstats);
-	free_netdev(dev);
 }
 
 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
@@ -1155,7 +1156,8 @@ int ip_tunnel_init(struct net_device *dev)
 	struct iphdr *iph = &tunnel->parms.iph;
 	int err;
 
-	dev->destructor	= ip_tunnel_dev_free;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = ip_tunnel_dev_free;
 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
 	if (!dev->tstats)
 		return -ENOMEM;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 551de4d..8ae425c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -101,8 +101,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id);
 static void ipmr_free_table(struct mr_table *mrt);
 
 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
-			  struct sk_buff *skb, struct mfc_cache *cache,
-			  int local);
+			  struct net_device *dev, struct sk_buff *skb,
+			  struct mfc_cache *cache, int local);
 static int ipmr_cache_report(struct mr_table *mrt,
 			     struct sk_buff *pkt, vifi_t vifi, int assert);
 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -501,7 +501,7 @@ static void reg_vif_setup(struct net_device *dev)
 	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr) - 8;
 	dev->flags		= IFF_NOARP;
 	dev->netdev_ops		= &reg_vif_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 	dev->features		|= NETIF_F_NETNS_LOCAL;
 }
 
@@ -988,7 +988,7 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
 
 			rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
 		} else {
-			ip_mr_forward(net, mrt, skb, c, 0);
+			ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
 		}
 	}
 }
@@ -1073,7 +1073,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
 
 /* Queue a packet for resolution. It gets locked cache entry! */
 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
-				 struct sk_buff *skb)
+				 struct sk_buff *skb, struct net_device *dev)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	struct mfc_cache *c;
@@ -1130,6 +1130,10 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
 		kfree_skb(skb);
 		err = -ENOBUFS;
 	} else {
+		if (dev) {
+			skb->dev = dev;
+			skb->skb_iif = dev->ifindex;
+		}
 		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
 		err = 0;
 	}
@@ -1828,10 +1832,10 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
 
 /* "local" means that we should preserve one skb (for local delivery) */
 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
-			  struct sk_buff *skb, struct mfc_cache *cache,
-			  int local)
+			  struct net_device *dev, struct sk_buff *skb,
+			  struct mfc_cache *cache, int local)
 {
-	int true_vifi = ipmr_find_vif(mrt, skb->dev);
+	int true_vifi = ipmr_find_vif(mrt, dev);
 	int psend = -1;
 	int vif, ct;
 
@@ -1853,13 +1857,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
 	}
 
 	/* Wrong interface: drop packet and (maybe) send PIM assert. */
-	if (mrt->vif_table[vif].dev != skb->dev) {
-		struct net_device *mdev;
-
-		mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev);
-		if (mdev == skb->dev)
-			goto forward;
-
+	if (mrt->vif_table[vif].dev != dev) {
 		if (rt_is_output_route(skb_rtable(skb))) {
 			/* It is our own packet, looped back.
 			 * Very complicated situation...
@@ -2053,7 +2051,7 @@ int ip_mr_input(struct sk_buff *skb)
 		read_lock(&mrt_lock);
 		vif = ipmr_find_vif(mrt, dev);
 		if (vif >= 0) {
-			int err2 = ipmr_cache_unresolved(mrt, vif, skb);
+			int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
 			read_unlock(&mrt_lock);
 
 			return err2;
@@ -2064,7 +2062,7 @@ int ip_mr_input(struct sk_buff *skb)
 	}
 
 	read_lock(&mrt_lock);
-	ip_mr_forward(net, mrt, skb, cache, local);
+	ip_mr_forward(net, mrt, dev, skb, cache, local);
 	read_unlock(&mrt_lock);
 
 	if (local)
@@ -2238,7 +2236,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
 		iph->saddr = saddr;
 		iph->daddr = daddr;
 		iph->version = 0;
-		err = ipmr_cache_unresolved(mrt, vif, skb2);
+		err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
 		read_unlock(&mrt_lock);
 		rcu_read_unlock();
 		return err;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 59792d2..40aca78 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2330,6 +2330,8 @@ int tcp_disconnect(struct sock *sk, int flags)
 	tcp_init_send_head(sk);
 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
 	__sk_dst_reset(sk);
+	dst_release(sk->sk_rx_dst);
+	sk->sk_rx_dst = NULL;
 	tcp_saved_syn_free(tp);
 
 	/* Clean up fastopen related fields */
@@ -2381,9 +2383,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
 	return 0;
 }
 
-static int tcp_repair_options_est(struct tcp_sock *tp,
+static int tcp_repair_options_est(struct sock *sk,
 		struct tcp_repair_opt __user *optbuf, unsigned int len)
 {
+	struct tcp_sock *tp = tcp_sk(sk);
 	struct tcp_repair_opt opt;
 
 	while (len >= sizeof(opt)) {
@@ -2396,6 +2399,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
 		switch (opt.opt_code) {
 		case TCPOPT_MSS:
 			tp->rx_opt.mss_clamp = opt.opt_val;
+			tcp_mtup_init(sk);
 			break;
 		case TCPOPT_WINDOW:
 			{
@@ -2555,7 +2559,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
 		if (!tp->repair)
 			err = -EINVAL;
 		else if (sk->sk_state == TCP_ESTABLISHED)
-			err = tcp_repair_options_est(tp,
+			err = tcp_repair_options_est(sk,
 					(struct tcp_repair_opt __user *)optval,
 					optlen);
 		else
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6e3c512..324c9bc 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 
+	tcp_sk(sk)->prior_ssthresh = 0;
 	if (icsk->icsk_ca_ops->init)
 		icsk->icsk_ca_ops->init(sk);
 	if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6a4fb1e..1d2dbac 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -332,9 +332,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
 				   unsigned long delay)
 {
-	if (!delayed_work_pending(&ifp->dad_work))
-		in6_ifa_hold(ifp);
-	mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
+	in6_ifa_hold(ifp);
+	if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
+		in6_ifa_put(ifp);
 }
 
 static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -3369,6 +3369,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct netdev_notifier_changeupper_info *info;
 	struct inet6_dev *idev = __in6_dev_get(dev);
+	struct net *net = dev_net(dev);
 	int run_pending = 0;
 	int err;
 
@@ -3384,7 +3385,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 	case NETDEV_CHANGEMTU:
 		/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
 		if (dev->mtu < IPV6_MIN_MTU) {
-			addrconf_ifdown(dev, 1);
+			addrconf_ifdown(dev, dev != net->loopback_dev);
 			break;
 		}
 
@@ -3500,7 +3501,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 			 * IPV6_MIN_MTU stop IPv6 on this interface.
 			 */
 			if (dev->mtu < IPV6_MIN_MTU)
-				addrconf_ifdown(dev, 1);
+				addrconf_ifdown(dev, dev != net->loopback_dev);
 		}
 		break;
 
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 37ac9de..8d772fe 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
 	struct ipv6hdr *ip6_hdr;
 	struct ipv6_opt_hdr *hop;
 	unsigned char buf[CALIPSO_MAX_BUFFER];
-	int len_delta, new_end, pad;
+	int len_delta, new_end, pad, payload;
 	unsigned int start, end;
 
 	ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
 	if (ret_val < 0)
 		return ret_val;
 
+	ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
+
 	if (len_delta) {
 		if (len_delta > 0)
 			skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
 			sizeof(*ip6_hdr) + start);
 		skb_reset_network_header(skb);
 		ip6_hdr = ipv6_hdr(skb);
+		payload = ntohs(ip6_hdr->payload_len);
+		ip6_hdr->payload_len = htons(payload + len_delta);
 	}
 
 	hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index e011122..5c786f5 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -250,8 +250,14 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
 	 */
 
 	err = ip6_datagram_dst_update(sk, true);
-	if (err)
+	if (err) {
+		/* Reset daddr and dport so that udp_v6_early_demux()
+		 * fails to find this socket
+		 */
+		memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
+		inet->inet_dport = 0;
 		goto out;
+	}
 
 	sk->sk_state = TCP_ESTABLISHED;
 	sk_set_txhash(sk);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index d950d43..f02f131 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -30,6 +30,25 @@
 #include <net/ipv6.h>
 #include <linux/icmpv6.h>
 
+static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
+{
+	int off = sizeof(struct ipv6hdr);
+	struct ipv6_opt_hdr *exthdr;
+
+	if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
+		return offsetof(struct ipv6hdr, nexthdr);
+
+	while (off < nhlen) {
+		exthdr = (void *)ipv6_hdr + off;
+		if (exthdr->nexthdr == NEXTHDR_ESP)
+			return off;
+
+		off += ipv6_optlen(exthdr);
+	}
+
+	return 0;
+}
+
 static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
 					 struct sk_buff *skb)
 {
@@ -38,6 +57,7 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
 	struct xfrm_state *x;
 	__be32 seq;
 	__be32 spi;
+	int nhoff;
 	int err;
 
 	skb_pull(skb, offset);
@@ -72,6 +92,11 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
 
 	xo->flags |= XFRM_GRO;
 
+	nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
+	if (!nhoff)
+		goto out;
+
+	IP6CB(skb)->nhoff = nhoff;
 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
 	XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index eea23b5..ec849d8 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,7 +32,6 @@ struct fib6_rule {
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
 				   int flags, pol_lookup_t lookup)
 {
-	struct rt6_info *rt;
 	struct fib_lookup_arg arg = {
 		.lookup_ptr = lookup,
 		.flags = FIB_LOOKUP_NOREF,
@@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
 	fib_rules_lookup(net->ipv6.fib6_rules_ops,
 			 flowi6_to_flowi(fl6), flags, &arg);
 
-	rt = arg.result;
+	if (arg.result)
+		return arg.result;
 
-	if (!rt) {
-		dst_hold(&net->ipv6.ip6_null_entry->dst);
-		return &net->ipv6.ip6_null_entry->dst;
-	}
-
-	if (rt->rt6i_flags & RTF_REJECT &&
-	    rt->dst.error == -EAGAIN) {
-		ip6_rt_put(rt);
-		rt = net->ipv6.ip6_null_entry;
-		dst_hold(&rt->dst);
-	}
-
-	return &rt->dst;
+	dst_hold(&net->ipv6.ip6_null_entry->dst);
+	return &net->ipv6.ip6_null_entry->dst;
 }
 
 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
 			flp6->saddr = saddr;
 		}
 		err = rt->dst.error;
-		goto out;
+		if (err != -EAGAIN)
+			goto out;
 	}
 again:
 	ip6_rt_put(rt);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 230b5aa..8d7b113 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -491,7 +491,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
 	local_bh_disable();
 
 	/* Check global sysctl_icmp_msgs_per_sec ratelimit */
-	if (!icmpv6_global_allow(type))
+	if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type))
 		goto out_bh_enable;
 
 	mip6_addr_swap(skb);
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 2fd5ca1..77f7f8c 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -62,6 +62,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc)
 {
 	u32 *v = (u32 *)loc.v32;
 
+	__ila_hash_secret_init();
 	return jhash_2words(v[0], v[1], hashrnd);
 }
 
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d4bf2c6..e6b78ba 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
 	struct rt6_info *rt;
 
 	rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
-	if (rt->rt6i_flags & RTF_REJECT &&
-	    rt->dst.error == -EAGAIN) {
+	if (rt->dst.error == -EAGAIN) {
 		ip6_rt_put(rt);
 		rt = net->ipv6.ip6_null_entry;
 		dst_hold(&rt->dst);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 0c5b4caa..64eea396 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -991,13 +991,13 @@ static void ip6gre_dev_free(struct net_device *dev)
 
 	dst_cache_destroy(&t->dst_cache);
 	free_percpu(dev->tstats);
-	free_netdev(dev);
 }
 
 static void ip6gre_tunnel_setup(struct net_device *dev)
 {
 	dev->netdev_ops = &ip6gre_netdev_ops;
-	dev->destructor = ip6gre_dev_free;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = ip6gre_dev_free;
 
 	dev->type = ARPHRD_IP6GRE;
 
@@ -1148,7 +1148,7 @@ static int __net_init ip6gre_init_net(struct net *net)
 	return 0;
 
 err_reg_dev:
-	ip6gre_dev_free(ign->fb_tunnel_dev);
+	free_netdev(ign->fb_tunnel_dev);
 err_alloc_dev:
 	return err;
 }
@@ -1300,7 +1300,8 @@ static void ip6gre_tap_setup(struct net_device *dev)
 	ether_setup(dev);
 
 	dev->netdev_ops = &ip6gre_tap_netdev_ops;
-	dev->destructor = ip6gre_dev_free;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = ip6gre_dev_free;
 
 	dev->features |= NETIF_F_NETNS_LOCAL;
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 280268f..cdb3728 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
 		if (udpfrag) {
 			int err = ip6_find_1stfragopt(skb, &prevhdr);
-			if (err < 0)
+			if (err < 0) {
+				kfree_skb_list(segs);
 				return ERR_PTR(err);
+			}
 			fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
 			fptr->frag_off = htons(offset);
 			if (skb->next)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index bf8a58a..1699acb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1390,7 +1390,7 @@ static int __ip6_append_data(struct sock *sk,
 	 */
 
 	cork->length += length;
-	if ((((length + fragheaderlen) > mtu) ||
+	if ((((length + (skb ? skb->len : headersize)) > mtu) ||
 	     (skb && skb_is_gso(skb))) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
 	    (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 7ae6c50..8c6c3c8 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -254,7 +254,6 @@ static void ip6_dev_free(struct net_device *dev)
 	gro_cells_destroy(&t->gro_cells);
 	dst_cache_destroy(&t->dst_cache);
 	free_percpu(dev->tstats);
-	free_netdev(dev);
 }
 
 static int ip6_tnl_create2(struct net_device *dev)
@@ -322,7 +321,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
 	return t;
 
 failed_free:
-	ip6_dev_free(dev);
+	free_netdev(dev);
 failed:
 	return ERR_PTR(err);
 }
@@ -859,6 +858,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
 	return 0;
 
 drop:
+	if (tun_dst)
+		dst_release((struct dst_entry *)tun_dst);
 	kfree_skb(skb);
 	return 0;
 }
@@ -1095,6 +1096,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
 
 	if (!dst) {
 route_lookup:
+		/* add dsfield to flowlabel for route lookup */
+		fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
+
 		dst = ip6_route_output(net, NULL, fl6);
 
 		if (dst->error)
@@ -1244,7 +1248,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 		fl6.flowi6_proto = IPPROTO_IPIP;
 		fl6.daddr = key->u.ipv6.dst;
 		fl6.flowlabel = key->label;
-		dsfield = ip6_tclass(key->label);
+		dsfield =  key->tos;
 	} else {
 		if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 			encap_limit = t->parms.encap_limit;
@@ -1315,7 +1319,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 		fl6.flowi6_proto = IPPROTO_IPV6;
 		fl6.daddr = key->u.ipv6.dst;
 		fl6.flowlabel = key->label;
-		dsfield = ip6_tclass(key->label);
+		dsfield = key->tos;
 	} else {
 		offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
 		/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
@@ -1774,7 +1778,8 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
 static void ip6_tnl_dev_setup(struct net_device *dev)
 {
 	dev->netdev_ops = &ip6_tnl_netdev_ops;
-	dev->destructor = ip6_dev_free;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = ip6_dev_free;
 
 	dev->type = ARPHRD_TUNNEL6;
 	dev->flags |= IFF_NOARP;
@@ -2221,7 +2226,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
 	return 0;
 
 err_register:
-	ip6_dev_free(ip6n->fb_tnl_dev);
+	free_netdev(ip6n->fb_tnl_dev);
 err_alloc_dev:
 	return err;
 }
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d67ef56..837ea1e 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -180,7 +180,6 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
 static void vti6_dev_free(struct net_device *dev)
 {
 	free_percpu(dev->tstats);
-	free_netdev(dev);
 }
 
 static int vti6_tnl_create2(struct net_device *dev)
@@ -235,7 +234,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
 	return t;
 
 failed_free:
-	vti6_dev_free(dev);
+	free_netdev(dev);
 failed:
 	return NULL;
 }
@@ -842,7 +841,8 @@ static const struct net_device_ops vti6_netdev_ops = {
 static void vti6_dev_setup(struct net_device *dev)
 {
 	dev->netdev_ops = &vti6_netdev_ops;
-	dev->destructor = vti6_dev_free;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = vti6_dev_free;
 
 	dev->type = ARPHRD_TUNNEL6;
 	dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
@@ -1100,7 +1100,7 @@ static int __net_init vti6_init_net(struct net *net)
 	return 0;
 
 err_register:
-	vti6_dev_free(ip6n->fb_tnl_dev);
+	free_netdev(ip6n->fb_tnl_dev);
 err_alloc_dev:
 	return err;
 }
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 374997d..2ecb39b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -733,7 +733,7 @@ static void reg_vif_setup(struct net_device *dev)
 	dev->mtu		= 1500 - sizeof(struct ipv6hdr) - 8;
 	dev->flags		= IFF_NOARP;
 	dev->netdev_ops		= &reg_vif_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 	dev->features		|= NETIF_F_NETNS_LOCAL;
 }
 
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 9b522fa..ac826dd 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
 	.type =      SOCK_DGRAM,
 	.protocol =  IPPROTO_ICMPV6,
 	.prot =      &pingv6_prot,
-	.ops =       &inet6_dgram_ops,
+	.ops =       &inet6_sockraw_ops,
 	.flags =     INET_PROTOSW_REUSE,
 };
 
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index cc8e3ae..e88bcb8 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
 	u64 buff64[SNMP_MIB_MAX];
 	int i;
 
-	memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
+	memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
 
 	snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
 	for (i = 0; itemlist[i].name; i++)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f992d9..60be012 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
 #endif	/* CONFIG_PROC_FS */
 
 /* Same as inet6_dgram_ops, sans udp_poll.  */
-static const struct proto_ops inet6_sockraw_ops = {
+const struct proto_ops inet6_sockraw_ops = {
 	.family		   = PF_INET6,
 	.owner		   = THIS_MODULE,
 	.release	   = inet6_release,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dc61b0b..322bd62 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2804,6 +2804,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg)
 	if ((rt->dst.dev == dev || !dev) &&
 	    rt != adn->net->ipv6.ip6_null_entry &&
 	    (rt->rt6i_nsiblings == 0 ||
+	     (dev && netdev_unregistering(dev)) ||
 	     !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
 		return -1;
 
@@ -3721,7 +3722,11 @@ static int ip6_route_dev_notify(struct notifier_block *this,
 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
 #endif
-	 } else if (event == NETDEV_UNREGISTER) {
+	 } else if (event == NETDEV_UNREGISTER &&
+		    dev->reg_state != NETREG_UNREGISTERED) {
+		/* NETDEV_UNREGISTER could be fired for multiple times by
+		 * netdev_wait_allrefs(). Make sure we only call this once.
+		 */
 		in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 		in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 61e5902..f8ad158 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -265,7 +265,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
 	return nt;
 
 failed_free:
-	ipip6_dev_free(dev);
+	free_netdev(dev);
 failed:
 	return NULL;
 }
@@ -305,7 +305,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
 	 * we try harder to allocate.
 	 */
 	kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
-		kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
+		kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) :
 		NULL;
 
 	rcu_read_lock();
@@ -1336,7 +1336,6 @@ static void ipip6_dev_free(struct net_device *dev)
 
 	dst_cache_destroy(&tunnel->dst_cache);
 	free_percpu(dev->tstats);
-	free_netdev(dev);
 }
 
 #define SIT_FEATURES (NETIF_F_SG	   | \
@@ -1351,7 +1350,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
 	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
 	dev->netdev_ops		= &ipip6_netdev_ops;
-	dev->destructor		= ipip6_dev_free;
+	dev->needs_free_netdev	= true;
+	dev->priv_destructor	= ipip6_dev_free;
 
 	dev->type		= ARPHRD_SIT;
 	dev->hard_header_len	= LL_MAX_HEADER + t_hlen;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 06ec39b..75703fd 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -879,7 +879,8 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
 	struct sock *sk;
 
 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
-		if (INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif))
+		if (sk->sk_state == TCP_ESTABLISHED &&
+		    INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif))
 			return sk;
 		/* Only check first socket in chain */
 		break;
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 08a807b..3ef5d91 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -43,8 +43,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
 		return 1;
 #endif
 
-	ipv6_hdr(skb)->payload_len = htons(skb->len);
 	__skb_push(skb, skb->data - skb_network_header(skb));
+	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
 
 	if (xo && (xo->flags & XFRM_GRO)) {
 		skb_mac_header_rebuild(skb);
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e01590..07d3657 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
 	iph = ipv6_hdr(skb);
 
 	hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+	if (hdr_len < 0)
+		return hdr_len;
 	skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
 	skb_set_network_header(skb, -x->props.header_len);
 	skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 7a92c0f..9ad07a9 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 	skb_set_inner_transport_header(skb, skb_transport_offset(skb));
 
 	hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+	if (hdr_len < 0)
+		return hdr_len;
 	skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
 	skb_set_network_header(skb, -x->props.header_len);
 	skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index f180701..171c3de 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -97,33 +97,7 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self,
 		self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN;
 	}
 	tty_port_set_check_carrier(&self->port, ~cflag & CLOCAL);
-#if 0
-	/*
-	 * Set up parity check flag
-	 */
 
-	if (I_INPCK(self->tty))
-		driver->read_status_mask |= LSR_FE | LSR_PE;
-	if (I_BRKINT(driver->tty) || I_PARMRK(driver->tty))
-		driver->read_status_mask |= LSR_BI;
-
-	/*
-	 * Characters to ignore
-	 */
-	driver->ignore_status_mask = 0;
-	if (I_IGNPAR(driver->tty))
-		driver->ignore_status_mask |= LSR_PE | LSR_FE;
-
-	if (I_IGNBRK(self->tty)) {
-		self->ignore_status_mask |= LSR_BI;
-		/*
-		 * If we're ignore parity and break indicators, ignore
-		 * overruns too. (For real raw support).
-		 */
-		if (I_IGNPAR(self->tty))
-			self->ignore_status_mask |= LSR_OE;
-	}
-#endif
 	self->settings.data_format = cval;
 
 	ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
@@ -271,67 +245,6 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
 static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self,
 				      struct serial_struct __user *new_info)
 {
-#if 0
-	struct serial_struct new_serial;
-	struct ircomm_tty_cb old_state, *state;
-
-	if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
-		return -EFAULT;
-
-
-	state = self
-	old_state = *self;
-
-	if (!capable(CAP_SYS_ADMIN)) {
-		if ((new_serial.baud_base != state->settings.data_rate) ||
-		    (new_serial.close_delay != state->close_delay) ||
-		    ((new_serial.flags & ~ASYNC_USR_MASK) !=
-		     (self->flags & ~ASYNC_USR_MASK)))
-			return -EPERM;
-		state->flags = ((state->flags & ~ASYNC_USR_MASK) |
-				 (new_serial.flags & ASYNC_USR_MASK));
-		self->flags = ((self->flags & ~ASYNC_USR_MASK) |
-			       (new_serial.flags & ASYNC_USR_MASK));
-		/* self->custom_divisor = new_serial.custom_divisor; */
-		goto check_and_exit;
-	}
-
-	/*
-	 * OK, past this point, all the error checking has been done.
-	 * At this point, we start making changes.....
-	 */
-
-	if (self->settings.data_rate != new_serial.baud_base) {
-		self->settings.data_rate = new_serial.baud_base;
-		ircomm_param_request(self, IRCOMM_DATA_RATE, TRUE);
-	}
-
-	self->close_delay = new_serial.close_delay * HZ/100;
-	self->closing_wait = new_serial.closing_wait * HZ/100;
-	/* self->custom_divisor = new_serial.custom_divisor; */
-
-	self->flags = ((self->flags & ~ASYNC_FLAGS) |
-		       (new_serial.flags & ASYNC_FLAGS));
-	self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
-
- check_and_exit:
-
-	if (tty_port_initialized(self)) {
-		if (((old_state.flags & ASYNC_SPD_MASK) !=
-		     (self->flags & ASYNC_SPD_MASK)) ||
-		    (old_driver.custom_divisor != driver->custom_divisor)) {
-			if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
-				driver->tty->alt_speed = 57600;
-			if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
-				driver->tty->alt_speed = 115200;
-			if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
-				driver->tty->alt_speed = 230400;
-			if ((driver->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
-				driver->tty->alt_speed = 460800;
-			ircomm_tty_change_speed(driver);
-		}
-	}
-#endif
 	return 0;
 }
 
@@ -367,24 +280,6 @@ int ircomm_tty_ioctl(struct tty_struct *tty,
 
 	case TIOCGICOUNT:
 		pr_debug("%s(), TIOCGICOUNT not impl!\n", __func__);
-#if 0
-		save_flags(flags); cli();
-		cnow = driver->icount;
-		restore_flags(flags);
-		p_cuser = (struct serial_icounter_struct __user *) arg;
-		if (put_user(cnow.cts, &p_cuser->cts) ||
-		    put_user(cnow.dsr, &p_cuser->dsr) ||
-		    put_user(cnow.rng, &p_cuser->rng) ||
-		    put_user(cnow.dcd, &p_cuser->dcd) ||
-		    put_user(cnow.rx, &p_cuser->rx) ||
-		    put_user(cnow.tx, &p_cuser->tx) ||
-		    put_user(cnow.frame, &p_cuser->frame) ||
-		    put_user(cnow.overrun, &p_cuser->overrun) ||
-		    put_user(cnow.parity, &p_cuser->parity) ||
-		    put_user(cnow.brk, &p_cuser->brk) ||
-		    put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
-			return -EFAULT;
-#endif
 		return 0;
 	default:
 		ret = -ENOIOCTLCMD;  /* ioctls which we must ignore */
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 74d09f9..3be8528 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -65,7 +65,7 @@ static void irlan_eth_setup(struct net_device *dev)
 	ether_setup(dev);
 
 	dev->netdev_ops		= &irlan_eth_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 	dev->min_mtu		= 0;
 	dev->max_mtu		= ETH_MAX_MTU;
 
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 512dc43..b1432b6 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1157,6 +1157,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
 			goto out;
 	}
 
+	err = -ENOBUFS;
 	key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
 	if (sa->sadb_sa_auth) {
 		int keysize = 0;
@@ -1168,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
 		if (key)
 			keysize = (key->sadb_key_bits + 7) / 8;
 		x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
-		if (!x->aalg)
+		if (!x->aalg) {
+			err = -ENOMEM;
 			goto out;
+		}
 		strcpy(x->aalg->alg_name, a->name);
 		x->aalg->alg_key_len = 0;
 		if (key) {
@@ -1188,8 +1191,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
 				goto out;
 			}
 			x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
-			if (!x->calg)
+			if (!x->calg) {
+				err = -ENOMEM;
 				goto out;
+			}
 			strcpy(x->calg->alg_name, a->name);
 			x->props.calgo = sa->sadb_sa_encrypt;
 		} else {
@@ -1203,8 +1208,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
 			if (key)
 				keysize = (key->sadb_key_bits + 7) / 8;
 			x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
-			if (!x->ealg)
+			if (!x->ealg) {
+				err = -ENOMEM;
 				goto out;
+			}
 			strcpy(x->ealg->alg_name, a->name);
 			x->ealg->alg_key_len = 0;
 			if (key) {
@@ -1249,8 +1256,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
 		struct xfrm_encap_tmpl *natt;
 
 		x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
-		if (!x->encap)
+		if (!x->encap) {
+			err = -ENOMEM;
 			goto out;
+		}
 
 		natt = x->encap;
 		n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
@@ -2755,6 +2764,8 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
 	int err, err2;
 
 	err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
+	if (!err)
+		xfrm_garbage_collect(net);
 	err2 = unicast_flush_resp(sk, hdr);
 	if (err || err2) {
 		if (err == -ESRCH) /* empty table - old silent behavior */
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8b21af7..4de2ec9 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -114,12 +114,13 @@ static void l2tp_eth_get_stats64(struct net_device *dev,
 {
 	struct l2tp_eth *priv = netdev_priv(dev);
 
-	stats->tx_bytes   = atomic_long_read(&priv->tx_bytes);
-	stats->tx_packets = atomic_long_read(&priv->tx_packets);
-	stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
-	stats->rx_bytes   = atomic_long_read(&priv->rx_bytes);
-	stats->rx_packets = atomic_long_read(&priv->rx_packets);
-	stats->rx_errors  = atomic_long_read(&priv->rx_errors);
+	stats->tx_bytes   = (unsigned long) atomic_long_read(&priv->tx_bytes);
+	stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets);
+	stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped);
+	stats->rx_bytes   = (unsigned long) atomic_long_read(&priv->rx_bytes);
+	stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets);
+	stats->rx_errors  = (unsigned long) atomic_long_read(&priv->rx_errors);
+
 }
 
 static const struct net_device_ops l2tp_eth_netdev_ops = {
@@ -141,7 +142,7 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
 	dev->priv_flags		&= ~IFF_TX_SKB_SHARING;
 	dev->features		|= NETIF_F_LLTX;
 	dev->netdev_ops		= &l2tp_eth_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 }
 
 static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 60e2a62..cf2392b2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,7 +7,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -741,7 +741,47 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
 	ieee80211_agg_start_txq(sta, tid, true);
 }
 
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+			      struct tid_ampdu_tx *tid_tx)
+{
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	struct ieee80211_local *local = sdata->local;
+
+	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
+		return;
+
+	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
+		ieee80211_agg_tx_operational(local, sta, tid);
+}
+
+static struct tid_ampdu_tx *
+ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
+			const u8 *ra, u16 tid, struct sta_info **sta)
+{
+	struct tid_ampdu_tx *tid_tx;
+
+	if (tid >= IEEE80211_NUM_TIDS) {
+		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
+		       tid, IEEE80211_NUM_TIDS);
+		return NULL;
+	}
+
+	*sta = sta_info_get_bss(sdata, ra);
+	if (!*sta) {
+		ht_dbg(sdata, "Could not find station: %pM\n", ra);
+		return NULL;
+	}
+
+	tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
+
+	if (WARN_ON(!tid_tx))
+		ht_dbg(sdata, "addBA was not requested!\n");
+
+	return tid_tx;
+}
+
+void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
+				      const u8 *ra, u16 tid)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 	struct ieee80211_local *local = sdata->local;
@@ -750,57 +790,15 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
 
 	trace_api_start_tx_ba_cb(sdata, ra, tid);
 
-	if (tid >= IEEE80211_NUM_TIDS) {
-		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
-		       tid, IEEE80211_NUM_TIDS);
-		return;
-	}
+	rcu_read_lock();
+	tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+	if (!tid_tx)
+		goto out;
 
-	mutex_lock(&local->sta_mtx);
-	sta = sta_info_get_bss(sdata, ra);
-	if (!sta) {
-		mutex_unlock(&local->sta_mtx);
-		ht_dbg(sdata, "Could not find station: %pM\n", ra);
-		return;
-	}
-
-	mutex_lock(&sta->ampdu_mlme.mtx);
-	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-
-	if (WARN_ON(!tid_tx)) {
-		ht_dbg(sdata, "addBA was not requested!\n");
-		goto unlock;
-	}
-
-	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
-		goto unlock;
-
-	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
-		ieee80211_agg_tx_operational(local, sta, tid);
-
- unlock:
-	mutex_unlock(&sta->ampdu_mlme.mtx);
-	mutex_unlock(&local->sta_mtx);
-}
-
-void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
-				      const u8 *ra, u16 tid)
-{
-	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_ra_tid *ra_tid;
-	struct sk_buff *skb = dev_alloc_skb(0);
-
-	if (unlikely(!skb))
-		return;
-
-	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-	memcpy(&ra_tid->ra, ra, ETH_ALEN);
-	ra_tid->tid = tid;
-
-	skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
-	skb_queue_tail(&sdata->skb_queue, skb);
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
+	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
 
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
 
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+			     struct tid_ampdu_tx *tid_tx)
 {
-	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-	struct ieee80211_local *local = sdata->local;
-	struct sta_info *sta;
-	struct tid_ampdu_tx *tid_tx;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	bool send_delba = false;
 
-	trace_api_stop_tx_ba_cb(sdata, ra, tid);
+	ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
+	       sta->sta.addr, tid);
 
-	if (tid >= IEEE80211_NUM_TIDS) {
-		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
-		       tid, IEEE80211_NUM_TIDS);
-		return;
-	}
-
-	ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
-
-	mutex_lock(&local->sta_mtx);
-
-	sta = sta_info_get_bss(sdata, ra);
-	if (!sta) {
-		ht_dbg(sdata, "Could not find station: %pM\n", ra);
-		goto unlock;
-	}
-
-	mutex_lock(&sta->ampdu_mlme.mtx);
 	spin_lock_bh(&sta->lock);
-	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
-	if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+	if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
 		ht_dbg(sdata,
 		       "unexpected callback to A-MPDU stop for %pM tid %d\n",
 		       sta->sta.addr, tid);
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
 	spin_unlock_bh(&sta->lock);
 
 	if (send_delba)
-		ieee80211_send_delba(sdata, ra, tid,
+		ieee80211_send_delba(sdata, sta->sta.addr, tid,
 			WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
-
-	mutex_unlock(&sta->ampdu_mlme.mtx);
- unlock:
-	mutex_unlock(&local->sta_mtx);
 }
 
 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_ra_tid *ra_tid;
-	struct sk_buff *skb = dev_alloc_skb(0);
+	struct sta_info *sta;
+	struct tid_ampdu_tx *tid_tx;
 
-	if (unlikely(!skb))
-		return;
+	trace_api_stop_tx_ba_cb(sdata, ra, tid);
 
-	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-	memcpy(&ra_tid->ra, ra, ETH_ALEN);
-	ra_tid->tid = tid;
+	rcu_read_lock();
+	tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+	if (!tid_tx)
+		goto out;
 
-	skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
-	skb_queue_tail(&sdata->skb_queue, skb);
-	ieee80211_queue_work(&local->hw, &sdata->work);
+	set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
+	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
 
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6c2e606..4a388fe 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -902,6 +902,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
 	default:
 		return -EINVAL;
 	}
+	sdata->u.ap.req_smps = sdata->smps_mode;
+
 	sdata->needed_rx_chains = sdata->local->rx_chains;
 
 	sdata->vif.bss_conf.beacon_int = params->beacon_interval;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f4a5287..6ca5442 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -7,6 +7,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
+ * Copyright 2017	Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
 {
 	int i;
 
-	cancel_work_sync(&sta->ampdu_mlme.work);
-
 	for (i = 0; i <  IEEE80211_NUM_TIDS; i++) {
 		__ieee80211_stop_tx_ba_session(sta, i, reason);
 		__ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
 					       reason != AGG_STOP_DESTROY_STA &&
 					       reason != AGG_STOP_PEER_REQUEST);
 	}
+
+	/* stopping might queue the work again - so cancel only afterwards */
+	cancel_work_sync(&sta->ampdu_mlme.work);
 }
 
 void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
 		spin_unlock_bh(&sta->lock);
 
 		tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-		if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
-						 &tid_tx->state))
+		if (!tid_tx)
+			continue;
+
+		if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
+			ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
+		if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
 			___ieee80211_stop_tx_ba_session(sta, tid,
 							AGG_STOP_LOCAL_REQUEST);
+		if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
+			ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
 	}
 	mutex_unlock(&sta->ampdu_mlme.mtx);
 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f8f6c14..5e002f6 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
 
 enum sdata_queue_type {
 	IEEE80211_SDATA_QUEUE_TYPE_FRAME	= 0,
-	IEEE80211_SDATA_QUEUE_AGG_START		= 1,
-	IEEE80211_SDATA_QUEUE_AGG_STOP		= 2,
 	IEEE80211_SDATA_QUEUE_RX_AGG_START	= 3,
 	IEEE80211_SDATA_QUEUE_RX_AGG_STOP	= 4,
 };
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
 	return local->hw.wiphy->bands[band];
 }
 
-/* this struct represents 802.11n's RA/TID combination */
-struct ieee80211_ra_tid {
-	u8 ra[ETH_ALEN];
-	u16 tid;
-};
-
 /* this struct holds the value parsing from channel switch IE  */
 struct ieee80211_csa_ie {
 	struct cfg80211_chan_def chandef;
@@ -1539,7 +1531,7 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
 		return true;
 	/* can't handle non-legacy preamble yet */
 	if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
-	    status->encoding != RX_ENC_LEGACY)
+	    status->encoding == RX_ENC_LEGACY)
 		return true;
 	return false;
 }
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 				   enum ieee80211_agg_stop_reason reason);
 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 				    enum ieee80211_agg_stop_reason reason);
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+			      struct tid_ampdu_tx *tid_tx);
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+			     struct tid_ampdu_tx *tid_tx);
 void ieee80211_ba_session_work(struct work_struct *work);
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3bd5b81..f5f5015 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1213,7 +1213,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
 static void ieee80211_if_free(struct net_device *dev)
 {
 	free_percpu(dev->tstats);
-	free_netdev(dev);
 }
 
 static void ieee80211_if_setup(struct net_device *dev)
@@ -1221,7 +1220,8 @@ static void ieee80211_if_setup(struct net_device *dev)
 	ether_setup(dev);
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	dev->netdev_ops = &ieee80211_dataif_ops;
-	dev->destructor = ieee80211_if_free;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = ieee80211_if_free;
 }
 
 static void ieee80211_if_setup_no_queue(struct net_device *dev)
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
 	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb;
 	struct sta_info *sta;
-	struct ieee80211_ra_tid *ra_tid;
 	struct ieee80211_rx_agg *rx_agg;
 
 	if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
 	while ((skb = skb_dequeue(&sdata->skb_queue))) {
 		struct ieee80211_mgmt *mgmt = (void *)skb->data;
 
-		if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
-			ra_tid = (void *)&skb->cb;
-			ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
-						 ra_tid->tid);
-		} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
-			ra_tid = (void *)&skb->cb;
-			ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
-						ra_tid->tid);
-		} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
+		if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
 			rx_agg = (void *)&skb->cb;
 			mutex_lock(&local->sta_mtx);
 			sta = sta_info_get_bss(sdata, rx_agg->addr);
@@ -1825,6 +1816,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 		ret = dev_alloc_name(ndev, ndev->name);
 		if (ret < 0) {
 			ieee80211_if_free(ndev);
+			free_netdev(ndev);
 			return ret;
 		}
 
@@ -1914,7 +1906,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
 		ret = register_netdevice(ndev);
 		if (ret) {
-			ieee80211_if_free(ndev);
+			free_netdev(ndev);
 			return ret;
 		}
 	}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0ea9712..cc8e6ea 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -601,7 +601,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_channel *chan;
-	u32 rate_flags, rates = 0;
+	u32 rates = 0;
 
 	sdata_assert_lock(sdata);
 
@@ -612,7 +612,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 		return;
 	}
 	chan = chanctx_conf->def.chan;
-	rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
 	rcu_read_unlock();
 	sband = local->hw.wiphy->bands[chan->band];
 	shift = ieee80211_vif_get_shift(&sdata->vif);
@@ -636,9 +635,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 		 */
 		rates_len = 0;
 		for (i = 0; i < sband->n_bitrates; i++) {
-			if ((rate_flags & sband->bitrates[i].flags)
-			    != rate_flags)
-				continue;
 			rates |= BIT(i);
 			rates_len++;
 		}
@@ -2818,7 +2814,7 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
 				u32 *rates, u32 *basic_rates,
 				bool *have_higher_than_11mbit,
 				int *min_rate, int *min_rate_index,
-				int shift, u32 rate_flags)
+				int shift)
 {
 	int i, j;
 
@@ -2846,8 +2842,6 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
 			int brate;
 
 			br = &sband->bitrates[j];
-			if ((rate_flags & br->flags) != rate_flags)
-				continue;
 
 			brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
 			if (brate == rate) {
@@ -4398,40 +4392,32 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
 			return -ENOMEM;
 	}
 
-	if (new_sta || override) {
-		err = ieee80211_prep_channel(sdata, cbss);
-		if (err) {
-			if (new_sta)
-				sta_info_free(local, new_sta);
-			return -EINVAL;
-		}
-	}
-
+	/*
+	 * Set up the information for the new channel before setting the
+	 * new channel. We can't - completely race-free - change the basic
+	 * rates bitmap and the channel (sband) that it refers to, but if
+	 * we set it up before we at least avoid calling into the driver's
+	 * bss_info_changed() method with invalid information (since we do
+	 * call that from changing the channel - only for IDLE and perhaps
+	 * some others, but ...).
+	 *
+	 * So to avoid that, just set up all the new information before the
+	 * channel, but tell the driver to apply it only afterwards, since
+	 * it might need the new channel for that.
+	 */
 	if (new_sta) {
 		u32 rates = 0, basic_rates = 0;
 		bool have_higher_than_11mbit;
 		int min_rate = INT_MAX, min_rate_index = -1;
-		struct ieee80211_chanctx_conf *chanctx_conf;
 		const struct cfg80211_bss_ies *ies;
 		int shift = ieee80211_vif_get_shift(&sdata->vif);
-		u32 rate_flags;
-
-		rcu_read_lock();
-		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-		if (WARN_ON(!chanctx_conf)) {
-			rcu_read_unlock();
-			sta_info_free(local, new_sta);
-			return -EINVAL;
-		}
-		rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
-		rcu_read_unlock();
 
 		ieee80211_get_rates(sband, bss->supp_rates,
 				    bss->supp_rates_len,
 				    &rates, &basic_rates,
 				    &have_higher_than_11mbit,
 				    &min_rate, &min_rate_index,
-				    shift, rate_flags);
+				    shift);
 
 		/*
 		 * This used to be a workaround for basic rates missing
@@ -4489,8 +4475,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
 			sdata->vif.bss_conf.sync_dtim_count = 0;
 		}
 		rcu_read_unlock();
+	}
 
-		/* tell driver about BSSID, basic rates and timing */
+	if (new_sta || override) {
+		err = ieee80211_prep_channel(sdata, cbss);
+		if (err) {
+			if (new_sta)
+				sta_info_free(local, new_sta);
+			return -EINVAL;
+		}
+	}
+
+	if (new_sta) {
+		/*
+		 * tell driver about BSSID, basic rates and timing
+		 * this was set up above, before setting the channel
+		 */
 		ieee80211_bss_info_change_notify(sdata,
 			BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
 			BSS_CHANGED_BEACON_INT);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1f75280..3674fe3 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1613,12 +1613,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
 	 */
 	if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
 	    !ieee80211_has_morefrags(hdr->frame_control) &&
+	    !ieee80211_is_back_req(hdr->frame_control) &&
 	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
 	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
 	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
-	    /* PM bit is only checked in frames where it isn't reserved,
+	    /*
+	     * PM bit is only checked in frames where it isn't reserved,
 	     * in AP mode it's reserved in non-bufferable management frames
 	     * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+	     * BAR frames should be ignored as specified in
+	     * IEEE 802.11-2012 10.2.1.2.
 	     */
 	    (!ieee80211_is_mgmt(hdr->frame_control) ||
 	     ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7cdf7a8..403e3cc 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 			struct ieee80211_sta_rx_stats *cpurxs;
 
 			cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
-			sinfo->rx_packets += cpurxs->dropped;
+			sinfo->rx_dropped_misc += cpurxs->dropped;
 		}
 	}
 
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5609cac..ea0747d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
 #define HT_AGG_STATE_STOPPING		3
 #define HT_AGG_STATE_WANT_START		4
 #define HT_AGG_STATE_WANT_STOP		5
+#define HT_AGG_STATE_START_CB		6
+#define HT_AGG_STATE_STOP_CB		7
 
 enum ieee80211_agg_stop_reason {
 	AGG_STOP_DECLINED,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c1ef22d..cc19614 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -17,6 +17,7 @@
 #include <asm/unaligned.h>
 #include <net/mac80211.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 
 #include "ieee80211_i.h"
 #include "michael.h"
@@ -153,7 +154,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
 	data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
 	key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
 	michael_mic(key, hdr, data, data_len, mic);
-	if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
+	if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
 		goto mic_fail;
 
 	/* remove Michael MIC from payload */
@@ -1048,7 +1049,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
 		bip_aad(skb, aad);
 		ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
 				   skb->data + 24, skb->len - 24, mic);
-		if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+		if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
 			key->u.aes_cmac.icverrors++;
 			return RX_DROP_UNUSABLE;
 		}
@@ -1098,7 +1099,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
 		bip_aad(skb, aad);
 		ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
 				       skb->data + 24, skb->len - 24, mic);
-		if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+		if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
 			key->u.aes_cmac.icverrors++;
 			return RX_DROP_UNUSABLE;
 		}
@@ -1202,7 +1203,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
 		if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
 				       skb->data + 24, skb->len - 24,
 				       mic) < 0 ||
-		    memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+		    crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
 			key->u.aes_gmac.icverrors++;
 			return RX_DROP_UNUSABLE;
 		}
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 06019db..bd88a9b 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -526,8 +526,6 @@ static void mac802154_wpan_free(struct net_device *dev)
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
 
 	mac802154_llsec_destroy(&sdata->sec);
-
-	free_netdev(dev);
 }
 
 static void ieee802154_if_setup(struct net_device *dev)
@@ -593,7 +591,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
 					sdata->dev->dev_addr);
 
 		sdata->dev->header_ops = &mac802154_header_ops;
-		sdata->dev->destructor = mac802154_wpan_free;
+		sdata->dev->needs_free_netdev = true;
+		sdata->dev->priv_destructor = mac802154_wpan_free;
 		sdata->dev->netdev_ops = &mac802154_wpan_ops;
 		sdata->dev->ml_priv = &mac802154_mlme_wpan;
 		wpan_dev->promiscuous_mode = false;
@@ -608,7 +607,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
 
 		break;
 	case NL802154_IFTYPE_MONITOR:
-		sdata->dev->destructor = free_netdev;
+		sdata->dev->needs_free_netdev = true;
 		sdata->dev->netdev_ops = &mac802154_monitor_ops;
 		wpan_dev->promiscuous_mode = true;
 		break;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 257ec66..7b05fd1 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
 				continue;
 			alive++;
 			nh_flags &= ~flags;
-			WRITE_ONCE(nh->nh_flags, flags);
+			WRITE_ONCE(nh->nh_flags, nh_flags);
 		} endfor_nexthops(rt);
 
 		WRITE_ONCE(rt->rt_nhn_alive, alive);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9799a50..a8be9b7 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -890,8 +890,13 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
 	}
 out:
 	local_bh_enable();
-	if (last)
+	if (last) {
+		/* nf ct hash resize happened, now clear the leftover. */
+		if ((struct nf_conn *)cb->args[1] == last)
+			cb->args[1] = 0;
+
 		nf_ct_put(last);
+	}
 
 	while (i) {
 		i--;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 13875d5..1c5b14a 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
 		      u8 pf, unsigned int hooknum)
 {
 	const struct sctphdr *sh;
-	struct sctphdr _sctph;
 	const char *logmsg;
 
-	sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
-	if (!sh) {
+	if (skb->len < dataoff + sizeof(struct sctphdr)) {
 		logmsg = "nf_ct_sctp: short packet ";
 		goto out_invalid;
 	}
 	if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
 	    skb->ip_summed == CHECKSUM_NONE) {
+		if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
+			logmsg = "nf_ct_sctp: failed to read header ";
+			goto out_invalid;
+		}
+		sh = (const struct sctphdr *)(skb->data + dataoff);
 		if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
 			logmsg = "nf_ct_sctp: bad CRC ";
 			goto out_invalid;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index ef0be32..6c72922 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -566,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
 	 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
 	 * will delete entry from already-freed table.
 	 */
-	ct->status &= ~IPS_NAT_DONE_MASK;
+	clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
 	rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
 			nf_nat_bysource_params);
 
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index e97e2fb..fbdbaa0 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
 		else if (d > 0)
 			p = &parent->rb_right;
 		else {
-			if (nft_set_elem_active(&rbe->ext, genmask)) {
-				if (nft_rbtree_interval_end(rbe) &&
-				    !nft_rbtree_interval_end(new))
-					p = &parent->rb_left;
-				else if (!nft_rbtree_interval_end(rbe) &&
-					 nft_rbtree_interval_end(new))
-					p = &parent->rb_right;
-				else {
-					*ext = &rbe->ext;
-					return -EEXIST;
-				}
+			if (nft_rbtree_interval_end(rbe) &&
+			    !nft_rbtree_interval_end(new)) {
+				p = &parent->rb_left;
+			} else if (!nft_rbtree_interval_end(rbe) &&
+				   nft_rbtree_interval_end(new)) {
+				p = &parent->rb_right;
+			} else if (nft_set_elem_active(&rbe->ext, genmask)) {
+				*ext = &rbe->ext;
+				return -EEXIST;
+			} else {
+				p = &parent->rb_left;
 			}
 		}
 	}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ee841f0..7586d44 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
 #include <asm/cacheflush.h>
 #include <linux/hash.h>
 #include <linux/genetlink.h>
+#include <linux/net_namespace.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
 		goto out;
 	}
 	NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
-	NETLINK_CB(p->skb2).nsid_is_set = true;
+	if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
+		NETLINK_CB(p->skb2).nsid_is_set = true;
 	val = netlink_broadcast_deliver(sk, p->skb2);
 	if (val < 0) {
 		netlink_overrun(sk);
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 89193a6..04a3128 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -94,7 +94,6 @@ static void internal_dev_destructor(struct net_device *dev)
 	struct vport *vport = ovs_internal_dev_get_vport(dev);
 
 	ovs_vport_free(vport);
-	free_netdev(dev);
 }
 
 static void
@@ -156,7 +155,8 @@ static void do_setup(struct net_device *netdev)
 	netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
 			      IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
-	netdev->destructor = internal_dev_destructor;
+	netdev->needs_free_netdev = true;
+	netdev->priv_destructor = internal_dev_destructor;
 	netdev->ethtool_ops = &internal_dev_ethtool_ops;
 	netdev->rtnl_link_ops = &internal_dev_link_ops;
 
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 21c28b5..2c93379 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -236,7 +236,7 @@ static void gprs_setup(struct net_device *dev)
 	dev->tx_queue_len	= 10;
 
 	dev->netdev_ops		= &gprs_netdev_ops;
-	dev->destructor		= free_netdev;
+	dev->needs_free_netdev	= true;
 }
 
 /*
diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
index 0a4e284..5436922 100644
--- a/net/rxrpc/key.c
+++ b/net/rxrpc/key.c
@@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
 				       unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned int toklen = *_toklen, n_parts, loop, tmp;
+	unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
 
 	/* there must be at least one name, and at least #names+1 length
 	 * words */
@@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
 		toklen -= 4;
 		if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
 			return -EINVAL;
-		if (tmp > toklen)
+		paddedlen = (tmp + 3) & ~3;
+		if (paddedlen > toklen)
 			return -EINVAL;
 		princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
 		if (!princ->name_parts[loop])
 			return -ENOMEM;
 		memcpy(princ->name_parts[loop], xdr, tmp);
 		princ->name_parts[loop][tmp] = 0;
-		tmp = (tmp + 3) & ~3;
-		toklen -= tmp;
-		xdr += tmp >> 2;
+		toklen -= paddedlen;
+		xdr += paddedlen >> 2;
 	}
 
 	if (toklen < 4)
@@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
 	toklen -= 4;
 	if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
 		return -EINVAL;
-	if (tmp > toklen)
+	paddedlen = (tmp + 3) & ~3;
+	if (paddedlen > toklen)
 		return -EINVAL;
 	princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
 	if (!princ->realm)
 		return -ENOMEM;
 	memcpy(princ->realm, xdr, tmp);
 	princ->realm[tmp] = 0;
-	tmp = (tmp + 3) & ~3;
-	toklen -= tmp;
-	xdr += tmp >> 2;
+	toklen -= paddedlen;
+	xdr += paddedlen >> 2;
 
 	_debug("%s/...@%s", princ->name_parts[0], princ->realm);
 
@@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
 					 unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned int toklen = *_toklen, len;
+	unsigned int toklen = *_toklen, len, paddedlen;
 
 	/* there must be at least one tag and one length word */
 	if (toklen <= 8)
@@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
 	toklen -= 8;
 	if (len > max_data_size)
 		return -EINVAL;
+	paddedlen = (len + 3) & ~3;
+	if (paddedlen > toklen)
+		return -EINVAL;
 	td->data_len = len;
 
 	if (len > 0) {
 		td->data = kmemdup(xdr, len, GFP_KERNEL);
 		if (!td->data)
 			return -ENOMEM;
-		len = (len + 3) & ~3;
-		toklen -= len;
-		xdr += len >> 2;
+		toklen -= paddedlen;
+		xdr += paddedlen >> 2;
 	}
 
 	_debug("tag %x len %x", td->tag, td->data_len);
@@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
 				    const __be32 **_xdr, unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned int toklen = *_toklen, len;
+	unsigned int toklen = *_toklen, len, paddedlen;
 
 	/* there must be at least one length word */
 	if (toklen <= 4)
@@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
 	toklen -= 4;
 	if (len > AFSTOKEN_K5_TIX_MAX)
 		return -EINVAL;
+	paddedlen = (len + 3) & ~3;
+	if (paddedlen > toklen)
+		return -EINVAL;
 	*_tktlen = len;
 
 	_debug("ticket len %u", len);
@@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
 		*_ticket = kmemdup(xdr, len, GFP_KERNEL);
 		if (!*_ticket)
 			return -ENOMEM;
-		len = (len + 3) & ~3;
-		toklen -= len;
-		xdr += len >> 2;
+		toklen -= paddedlen;
+		xdr += paddedlen >> 2;
 	}
 
 	*_xdr = xdr;
@@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
 {
 	const __be32 *xdr = prep->data, *token;
 	const char *cp;
-	unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
+	unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
 	size_t datalen = prep->datalen;
 	int ret;
 
@@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
 	if (len < 1 || len > AFSTOKEN_CELL_MAX)
 		goto not_xdr;
 	datalen -= 4;
-	tmp = (len + 3) & ~3;
-	if (tmp > datalen)
+	paddedlen = (len + 3) & ~3;
+	if (paddedlen > datalen)
 		goto not_xdr;
 
 	cp = (const char *) xdr;
 	for (loop = 0; loop < len; loop++)
 		if (!isprint(cp[loop]))
 			goto not_xdr;
-	if (len < tmp)
-		for (; loop < tmp; loop++)
-			if (cp[loop])
-				goto not_xdr;
+	for (; loop < paddedlen; loop++)
+		if (cp[loop])
+			goto not_xdr;
 	_debug("cellname: [%u/%u] '%*.*s'",
-	       len, tmp, len, len, (const char *) xdr);
-	datalen -= tmp;
-	xdr += tmp >> 2;
+	       len, paddedlen, len, len, (const char *) xdr);
+	datalen -= paddedlen;
+	xdr += paddedlen >> 2;
 
 	/* get the token count */
 	if (datalen < 12)
@@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
 		sec_ix = ntohl(*xdr);
 		datalen -= 4;
 		_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
-		if (toklen < 20 || toklen > datalen)
+		paddedlen = (toklen + 3) & ~3;
+		if (toklen < 20 || toklen > datalen || paddedlen > datalen)
 			goto not_xdr;
-		datalen -= (toklen + 3) & ~3;
-		xdr += (toklen + 3) >> 2;
+		datalen -= paddedlen;
+		xdr += paddedlen >> 2;
 
 	} while (--loop > 0);
 
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 164b5ac..7dc5892 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -94,8 +94,10 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
 		k++;
 	}
 
-	if (n)
+	if (n) {
+		err = -EINVAL;
 		goto err_out;
+	}
 
 	return keys_ex;
 
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index f42008b..b062bc8 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -132,21 +132,21 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
 		}
 	}
 
-	spin_lock_bh(&police->tcf_lock);
 	if (est) {
 		err = gen_replace_estimator(&police->tcf_bstats, NULL,
 					    &police->tcf_rate_est,
 					    &police->tcf_lock,
 					    NULL, est);
 		if (err)
-			goto failure_unlock;
+			goto failure;
 	} else if (tb[TCA_POLICE_AVRATE] &&
 		   (ret == ACT_P_CREATED ||
 		    !gen_estimator_active(&police->tcf_rate_est))) {
 		err = -EINVAL;
-		goto failure_unlock;
+		goto failure;
 	}
 
+	spin_lock_bh(&police->tcf_lock);
 	/* No failure allowed after this point */
 	police->tcfp_mtu = parm->mtu;
 	if (police->tcfp_mtu == 0) {
@@ -192,8 +192,6 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
 
 	return ret;
 
-failure_unlock:
-	spin_unlock_bh(&police->tcf_lock);
 failure:
 	qdisc_put_rtab(P_tab);
 	qdisc_put_rtab(R_tab);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index e88342f..cfdbfa1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1019,7 +1019,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 		return sch;
 	}
 	/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
-	ops->destroy(sch);
+	if (ops->destroy)
+		ops->destroy(sch);
 err_out3:
 	dev_put(dev);
 	kfree((char *) sch - sch->padded);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 8c58923..3dcd0ec 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -275,6 +275,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
 		if (sctp_sk(sk)->bind_hash)
 			sctp_put_port(sk);
 
+		sctp_sk(sk)->ep = NULL;
 		sock_put(sk);
 	}
 
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 048954e..9a64721 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -278,7 +278,6 @@ static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
 
 static int sctp_sock_dump(struct sock *sk, void *p)
 {
-	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
 	struct sctp_comm_param *commp = p;
 	struct sk_buff *skb = commp->skb;
 	struct netlink_callback *cb = commp->cb;
@@ -287,7 +286,9 @@ static int sctp_sock_dump(struct sock *sk, void *p)
 	int err = 0;
 
 	lock_sock(sk);
-	list_for_each_entry(assoc, &ep->asocs, asocs) {
+	if (!sctp_sk(sk)->ep)
+		goto release;
+	list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) {
 		if (cb->args[4] < cb->args[1])
 			goto next;
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f16c8d9..3a8318e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4622,13 +4622,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
 
 	for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
 	     hash++, head++) {
-		read_lock(&head->lock);
+		read_lock_bh(&head->lock);
 		sctp_for_each_hentry(epb, &head->chain) {
 			err = cb(sctp_ep(epb), p);
 			if (err)
 				break;
 		}
-		read_unlock(&head->lock);
+		read_unlock_bh(&head->lock);
 	}
 
 	return err;
@@ -4666,9 +4666,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
 	if (err)
 		return err;
 
-	sctp_transport_get_idx(net, &hti, pos);
-	obj = sctp_transport_get_next(net, &hti);
-	for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
+	obj = sctp_transport_get_idx(net, &hti, pos + 1);
+	for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
 		struct sctp_transport *transport = obj;
 
 		if (!sctp_transport_hold(transport))
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 24fedd4b..03f6b58 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -119,11 +119,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
 
 	for (i = 0; i < (reqs << 1); i++) {
 		rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
-		if (!rqst) {
-			pr_err("RPC:       %s: Failed to create bc rpc_rqst\n",
-			       __func__);
+		if (!rqst)
 			goto out_free;
-		}
+
 		dprintk("RPC:       %s: new rqst %p\n", __func__, rqst);
 
 		rqst->rq_xprt = &r_xprt->rx_xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 16aff8d..d5b54c0 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2432,7 +2432,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
 	case -ENETUNREACH:
 	case -EADDRINUSE:
 	case -ENOBUFS:
-		/* retry with existing socket, after a delay */
+		/*
+		 * xs_tcp_force_close() wakes tasks with -EIO.
+		 * We need to wake them first to ensure the
+		 * correct error code.
+		 */
+		xprt_wake_pending_tasks(xprt, status);
 		xs_tcp_force_close(xprt);
 		goto out;
 	}
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 312ef7d..ab30876 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 	}
 
 	if (skb_cloned(_skb) &&
-	    pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+	    pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
 		goto exit;
 
 	/* Now reverse the concerned fields */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 6a7fe76..c77ced0 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -343,7 +343,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
  * are still connected to it and there's no way to inform "a polling
  * implementation" that it should let go of a certain wait queue
  *
- * In order to propagate a wake up, a wait_queue_t of the client
+ * In order to propagate a wake up, a wait_queue_entry_t of the client
  * socket is enqueued on the peer_wait queue of the server socket
  * whose wake function does a wake_up on the ordinary client socket
  * wait queue. This connection is established whenever a write (or
@@ -352,7 +352,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
  * was relayed.
  */
 
-static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
+static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
 				      void *key)
 {
 	struct unix_sock *u;
@@ -999,7 +999,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 	struct path path = { };
 
 	err = -EINVAL;
-	if (sunaddr->sun_family != AF_UNIX)
+	if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
+	    sunaddr->sun_family != AF_UNIX)
 		goto out;
 
 	if (addr_len == sizeof(short)) {
@@ -1110,6 +1111,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
 	unsigned int hash;
 	int err;
 
+	err = -EINVAL;
+	if (alen < offsetofend(struct sockaddr, sa_family))
+		goto out;
+
 	if (addr->sa_family != AF_UNSPEC) {
 		err = unix_mkname(sunaddr, alen, &hash);
 		if (err < 0)
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 1a4db67..6cdb054 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -914,13 +914,12 @@ int call_commit_handler(struct net_device *dev)
  * Main IOCTl dispatcher.
  * Check the type of IOCTL and call the appropriate wrapper...
  */
-static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
+static int wireless_process_ioctl(struct net *net, struct iwreq *iwr,
 				  unsigned int cmd,
 				  struct iw_request_info *info,
 				  wext_ioctl_func standard,
 				  wext_ioctl_func private)
 {
-	struct iwreq *iwr = (struct iwreq *) ifr;
 	struct net_device *dev;
 	iw_handler	handler;
 
@@ -928,7 +927,7 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
 	 * The copy_to/from_user() of ifr is also dealt with in there */
 
 	/* Make sure the device exist */
-	if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL)
+	if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL)
 		return -ENODEV;
 
 	/* A bunch of special cases, then the generic case...
@@ -957,9 +956,6 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
 		else if (private)
 			return private(dev, iwr, cmd, info, handler);
 	}
-	/* Old driver API : call driver ioctl handler */
-	if (dev->netdev_ops->ndo_do_ioctl)
-		return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
 	return -EOPNOTSUPP;
 }
 
@@ -977,7 +973,7 @@ static int wext_permission_check(unsigned int cmd)
 }
 
 /* entry point from dev ioctl */
-static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
+static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr,
 			       unsigned int cmd, struct iw_request_info *info,
 			       wext_ioctl_func standard,
 			       wext_ioctl_func private)
@@ -987,9 +983,9 @@ static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr,
 	if (ret)
 		return ret;
 
-	dev_load(net, ifr->ifr_name);
+	dev_load(net, iwr->ifr_name);
 	rtnl_lock();
-	ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private);
+	ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private);
 	rtnl_unlock();
 
 	return ret;
@@ -1039,18 +1035,18 @@ static int ioctl_standard_call(struct net_device *	dev,
 }
 
 
-int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd,
 		      void __user *arg)
 {
 	struct iw_request_info info = { .cmd = cmd, .flags = 0 };
 	int ret;
 
-	ret = wext_ioctl_dispatch(net, ifr, cmd, &info,
+	ret = wext_ioctl_dispatch(net, iwr, cmd, &info,
 				  ioctl_standard_call,
 				  ioctl_private_call);
 	if (ret >= 0 &&
 	    IW_IS_GET(cmd) &&
-	    copy_to_user(arg, ifr, sizeof(struct iwreq)))
+	    copy_to_user(arg, iwr, sizeof(struct iwreq)))
 		return -EFAULT;
 
 	return ret;
@@ -1107,7 +1103,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
 	info.cmd = cmd;
 	info.flags = IW_REQUEST_FLAG_COMPAT;
 
-	ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info,
+	ret = wext_ioctl_dispatch(net, &iwr, cmd, &info,
 				  compat_standard_call,
 				  compat_private_call);
 
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index abf81b3..55b2ac3 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -4,8 +4,7 @@
 
 obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
 		      xfrm_input.o xfrm_output.o \
-		      xfrm_sysctl.o xfrm_replay.o
-obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o
+		      xfrm_sysctl.o xfrm_replay.o xfrm_device.o
 obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
 obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
 obj-$(CONFIG_XFRM_USER) += xfrm_user.o
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 574e6f3..5aba036 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -22,6 +22,7 @@
 #include <net/xfrm.h>
 #include <linux/notifier.h>
 
+#ifdef CONFIG_XFRM_OFFLOAD
 int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
 {
 	int err;
@@ -137,6 +138,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 	return true;
 }
 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+#endif
 
 int xfrm_dev_register(struct net_device *dev)
 {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ed4e52d..643a18f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1006,10 +1006,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
 		err = -ESRCH;
 out:
 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
-
-	if (cnt)
-		xfrm_garbage_collect(net);
-
 	return err;
 }
 EXPORT_SYMBOL(xfrm_policy_flush);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 38614df..86116e9 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2027,6 +2027,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
 			return 0;
 		return err;
 	}
+	xfrm_garbage_collect(net);
 
 	c.data.type = type;
 	c.event = nlh->nlmsg_type;
diff --git a/scripts/.gitignore b/scripts/.gitignore
index e063daa..0442c06 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -7,7 +7,6 @@
 unifdef
 ihex2fw
 recordmcount
-docproc
 check-lc_ctype
 sortextable
 asn1_compiler
diff --git a/scripts/Makefile b/scripts/Makefile
index 1d80897..c06f499 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -6,8 +6,6 @@
 # pnmttologo:    Convert pnm files to logo files
 # conmakehash:   Create chartable
 # conmakehash:	 Create arrays for initializing the kernel console tables
-# docproc:       Used in Documentation/DocBook
-# check-lc_ctype: Used in Documentation/DocBook
 
 HOST_EXTRACFLAGS += -I$(srctree)/tools/include
 
@@ -29,16 +27,12 @@
 always		:= $(hostprogs-y) $(hostprogs-m)
 
 # The following hostprogs-y programs are only build on demand
-hostprogs-y += unifdef docproc check-lc_ctype
+hostprogs-y += unifdef
 
 # These targets are used internally to avoid "is up to date" messages
-PHONY += build_unifdef build_docproc build_check-lc_ctype
+PHONY += build_unifdef
 build_unifdef: $(obj)/unifdef
 	@:
-build_docproc: $(obj)/docproc
-	@:
-build_check-lc_ctype: $(obj)/check-lc_ctype
-	@:
 
 subdir-$(CONFIG_MODVERSIONS) += genksyms
 subdir-y                     += mod
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index ce753a4..c583a1e 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -14,7 +14,15 @@
 include scripts/Kbuild.include
 
 srcdir        := $(srctree)/$(obj)
-subdirs       := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.))
+
+# When make is run under a fakechroot environment, the function
+# $(wildcard $(srcdir)/*/.) doesn't only return directories, but also regular
+# files. So, we are using a combination of sort/dir/wildcard which works
+# with fakechroot.
+subdirs       := $(patsubst $(srcdir)/%/,%,\
+		 $(filter-out $(srcdir)/,\
+		 $(sort $(dir $(wildcard $(srcdir)/*/)))))
+
 # caller may set destination dir (when installing to asm/)
 _dst          := $(if $(dst),$(dst),$(obj))
 
diff --git a/scripts/check-lc_ctype.c b/scripts/check-lc_ctype.c
deleted file mode 100644
index 9097ff5..0000000
--- a/scripts/check-lc_ctype.c
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Check that a specified locale works as LC_CTYPE.  Used by the
- * DocBook build system to probe for C.UTF-8 support.
- */
-
-#include <locale.h>
-
-int main(void)
-{
-	return !setlocale(LC_CTYPE, "");
-}
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 4b9569f..c7e4d73 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5533,23 +5533,6 @@
 			}
 		}
 
-# Check for expedited grace periods that interrupt non-idle non-nohz
-# online CPUs.  These expedited can therefore degrade real-time response
-# if used carelessly, and should be avoided where not absolutely
-# needed.  It is always OK to use synchronize_rcu_expedited() and
-# synchronize_sched_expedited() at boot time (before real-time applications
-# start) and in error situations where real-time response is compromised in
-# any case.  Note that synchronize_srcu_expedited() does -not- interrupt
-# other CPUs, so don't warn on uses of synchronize_srcu_expedited().
-# Of course, nothing comes for free, and srcu_read_lock() and
-# srcu_read_unlock() do contain full memory barriers in payment for
-# synchronize_srcu_expedited() non-interruption properties.
-		if ($line =~ /\b(synchronize_rcu_expedited|synchronize_sched_expedited)\(/) {
-			WARN("EXPEDITED_RCU_GRACE_PERIOD",
-			     "expedited RCU grace periods should be avoided where they can degrade real-time response\n" . $herecurr);
-
-		}
-
 # check of hardware specific defines
 		if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
 			CHK("ARCH_DEFINES",
diff --git a/scripts/docproc.c b/scripts/docproc.c
deleted file mode 100644
index 0a12593b..0000000
--- a/scripts/docproc.c
+++ /dev/null
@@ -1,681 +0,0 @@
-/*
- *	docproc is a simple preprocessor for the template files
- *      used as placeholders for the kernel internal documentation.
- *	docproc is used for documentation-frontend and
- *      dependency-generator.
- *	The two usages have in common that they require
- *	some knowledge of the .tmpl syntax, therefore they
- *	are kept together.
- *
- *	documentation-frontend
- *		Scans the template file and call kernel-doc for
- *		all occurrences of ![EIF]file
- *		Beforehand each referenced file is scanned for
- *		any symbols that are exported via these macros:
- *			EXPORT_SYMBOL(), EXPORT_SYMBOL_GPL(), &
- *			EXPORT_SYMBOL_GPL_FUTURE()
- *		This is used to create proper -function and
- *		-nofunction arguments in calls to kernel-doc.
- *		Usage: docproc doc file.tmpl
- *
- *	dependency-generator:
- *		Scans the template file and list all files
- *		referenced in a format recognized by make.
- *		Usage:	docproc depend file.tmpl
- *		Writes dependency information to stdout
- *		in the following format:
- *		file.tmpl src.c	src2.c
- *		The filenames are obtained from the following constructs:
- *		!Efilename
- *		!Ifilename
- *		!Dfilename
- *		!Ffilename
- *		!Pfilename
- *
- */
-
-#define _GNU_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <ctype.h>
-#include <unistd.h>
-#include <limits.h>
-#include <errno.h>
-#include <getopt.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <time.h>
-
-/* exitstatus is used to keep track of any failing calls to kernel-doc,
- * but execution continues. */
-int exitstatus = 0;
-
-typedef void DFL(char *);
-DFL *defaultline;
-
-typedef void FILEONLY(char * file);
-FILEONLY *internalfunctions;
-FILEONLY *externalfunctions;
-FILEONLY *symbolsonly;
-FILEONLY *findall;
-
-typedef void FILELINE(char * file, char * line);
-FILELINE * singlefunctions;
-FILELINE * entity_system;
-FILELINE * docsection;
-
-#define MAXLINESZ     2048
-#define MAXFILES      250
-#define KERNELDOCPATH "scripts/"
-#define KERNELDOC     "kernel-doc"
-#define DOCBOOK       "-docbook"
-#define RST           "-rst"
-#define LIST          "-list"
-#define FUNCTION      "-function"
-#define NOFUNCTION    "-nofunction"
-#define NODOCSECTIONS "-no-doc-sections"
-#define SHOWNOTFOUND  "-show-not-found"
-
-enum file_format {
-	FORMAT_AUTO,
-	FORMAT_DOCBOOK,
-	FORMAT_RST,
-};
-
-static enum file_format file_format = FORMAT_AUTO;
-
-#define KERNELDOC_FORMAT	(file_format == FORMAT_RST ? RST : DOCBOOK)
-
-static char *srctree, *kernsrctree;
-
-static char **all_list = NULL;
-static int all_list_len = 0;
-
-static void consume_symbol(const char *sym)
-{
-	int i;
-
-	for (i = 0; i < all_list_len; i++) {
-		if (!all_list[i])
-			continue;
-		if (strcmp(sym, all_list[i]))
-			continue;
-		all_list[i] = NULL;
-		break;
-	}
-}
-
-static void usage (void)
-{
-	fprintf(stderr, "Usage: docproc [{--docbook|--rst}] {doc|depend} file\n");
-	fprintf(stderr, "Input is read from file.tmpl. Output is sent to stdout\n");
-	fprintf(stderr, "doc: frontend when generating kernel documentation\n");
-	fprintf(stderr, "depend: generate list of files referenced within file\n");
-	fprintf(stderr, "Environment variable SRCTREE: absolute path to sources.\n");
-	fprintf(stderr, "                     KBUILD_SRC: absolute path to kernel source tree.\n");
-}
-
-/*
- * Execute kernel-doc with parameters given in svec
- */
-static void exec_kernel_doc(char **svec)
-{
-	pid_t pid;
-	int ret;
-	char real_filename[PATH_MAX + 1];
-	/* Make sure output generated so far are flushed */
-	fflush(stdout);
-	switch (pid=fork()) {
-		case -1:
-			perror("fork");
-			exit(1);
-		case  0:
-			memset(real_filename, 0, sizeof(real_filename));
-			strncat(real_filename, kernsrctree, PATH_MAX);
-			strncat(real_filename, "/" KERNELDOCPATH KERNELDOC,
-					PATH_MAX - strlen(real_filename));
-			execvp(real_filename, svec);
-			fprintf(stderr, "exec ");
-			perror(real_filename);
-			exit(1);
-		default:
-			waitpid(pid, &ret ,0);
-	}
-	if (WIFEXITED(ret))
-		exitstatus |= WEXITSTATUS(ret);
-	else
-		exitstatus = 0xff;
-}
-
-/* Types used to create list of all exported symbols in a number of files */
-struct symbols
-{
-	char *name;
-};
-
-struct symfile
-{
-	char *filename;
-	struct symbols *symbollist;
-	int symbolcnt;
-};
-
-struct symfile symfilelist[MAXFILES];
-int symfilecnt = 0;
-
-static void add_new_symbol(struct symfile *sym, char * symname)
-{
-	sym->symbollist =
-	  realloc(sym->symbollist, (sym->symbolcnt + 1) * sizeof(char *));
-	sym->symbollist[sym->symbolcnt++].name = strdup(symname);
-}
-
-/* Add a filename to the list */
-static struct symfile * add_new_file(char * filename)
-{
-	symfilelist[symfilecnt++].filename = strdup(filename);
-	return &symfilelist[symfilecnt - 1];
-}
-
-/* Check if file already are present in the list */
-static struct symfile * filename_exist(char * filename)
-{
-	int i;
-	for (i=0; i < symfilecnt; i++)
-		if (strcmp(symfilelist[i].filename, filename) == 0)
-			return &symfilelist[i];
-	return NULL;
-}
-
-/*
- * List all files referenced within the template file.
- * Files are separated by tabs.
- */
-static void adddep(char * file)		   { printf("\t%s", file); }
-static void adddep2(char * file, char * line)     { line = line; adddep(file); }
-static void noaction(char * line)		   { line = line; }
-static void noaction2(char * file, char * line)   { file = file; line = line; }
-
-/* Echo the line without further action */
-static void printline(char * line)               { printf("%s", line); }
-
-/*
- * Find all symbols in filename that are exported with EXPORT_SYMBOL &
- * EXPORT_SYMBOL_GPL (& EXPORT_SYMBOL_GPL_FUTURE implicitly).
- * All symbols located are stored in symfilelist.
- */
-static void find_export_symbols(char * filename)
-{
-	FILE * fp;
-	struct symfile *sym;
-	char line[MAXLINESZ];
-	if (filename_exist(filename) == NULL) {
-		char real_filename[PATH_MAX + 1];
-		memset(real_filename, 0, sizeof(real_filename));
-		strncat(real_filename, srctree, PATH_MAX);
-		strncat(real_filename, "/", PATH_MAX - strlen(real_filename));
-		strncat(real_filename, filename,
-				PATH_MAX - strlen(real_filename));
-		sym = add_new_file(filename);
-		fp = fopen(real_filename, "r");
-		if (fp == NULL)	{
-			fprintf(stderr, "docproc: ");
-			perror(real_filename);
-			exit(1);
-		}
-		while (fgets(line, MAXLINESZ, fp)) {
-			char *p;
-			char *e;
-			if (((p = strstr(line, "EXPORT_SYMBOL_GPL")) != NULL) ||
-			    ((p = strstr(line, "EXPORT_SYMBOL")) != NULL)) {
-				/* Skip EXPORT_SYMBOL{_GPL} */
-				while (isalnum(*p) || *p == '_')
-					p++;
-				/* Remove parentheses & additional whitespace */
-				while (isspace(*p))
-					p++;
-				if (*p != '(')
-					continue; /* Syntax error? */
-				else
-					p++;
-				while (isspace(*p))
-					p++;
-				e = p;
-				while (isalnum(*e) || *e == '_')
-					e++;
-				*e = '\0';
-				add_new_symbol(sym, p);
-			}
-		}
-		fclose(fp);
-	}
-}
-
-/*
- * Document all external or internal functions in a file.
- * Call kernel-doc with following parameters:
- * kernel-doc [-docbook|-rst] -nofunction function_name1 filename
- * Function names are obtained from all the src files
- * by find_export_symbols.
- * intfunc uses -nofunction
- * extfunc uses -function
- */
-static void docfunctions(char * filename, char * type)
-{
-	int i,j;
-	int symcnt = 0;
-	int idx = 0;
-	char **vec;
-
-	for (i=0; i <= symfilecnt; i++)
-		symcnt += symfilelist[i].symbolcnt;
-	vec = malloc((2 + 2 * symcnt + 3) * sizeof(char *));
-	if (vec == NULL) {
-		perror("docproc: ");
-		exit(1);
-	}
-	vec[idx++] = KERNELDOC;
-	vec[idx++] = KERNELDOC_FORMAT;
-	vec[idx++] = NODOCSECTIONS;
-	for (i=0; i < symfilecnt; i++) {
-		struct symfile * sym = &symfilelist[i];
-		for (j=0; j < sym->symbolcnt; j++) {
-			vec[idx++]     = type;
-			consume_symbol(sym->symbollist[j].name);
-			vec[idx++] = sym->symbollist[j].name;
-		}
-	}
-	vec[idx++]     = filename;
-	vec[idx] = NULL;
-	if (file_format == FORMAT_RST)
-		printf(".. %s\n", filename);
-	else
-		printf("<!-- %s -->\n", filename);
-	exec_kernel_doc(vec);
-	fflush(stdout);
-	free(vec);
-}
-static void intfunc(char * filename) {	docfunctions(filename, NOFUNCTION); }
-static void extfunc(char * filename) { docfunctions(filename, FUNCTION);   }
-
-/*
- * Document specific function(s) in a file.
- * Call kernel-doc with the following parameters:
- * kernel-doc -docbook -function function1 [-function function2]
- */
-static void singfunc(char * filename, char * line)
-{
-	char *vec[200]; /* Enough for specific functions */
-	int i, idx = 0;
-	int startofsym = 1;
-	vec[idx++] = KERNELDOC;
-	vec[idx++] = KERNELDOC_FORMAT;
-	vec[idx++] = SHOWNOTFOUND;
-
-	/* Split line up in individual parameters preceded by FUNCTION */
-	for (i=0; line[i]; i++) {
-		if (isspace(line[i])) {
-			line[i] = '\0';
-			startofsym = 1;
-			continue;
-		}
-		if (startofsym) {
-			startofsym = 0;
-			vec[idx++] = FUNCTION;
-			vec[idx++] = &line[i];
-		}
-	}
-	for (i = 0; i < idx; i++) {
-		if (strcmp(vec[i], FUNCTION))
-			continue;
-		consume_symbol(vec[i + 1]);
-	}
-	vec[idx++] = filename;
-	vec[idx] = NULL;
-	exec_kernel_doc(vec);
-}
-
-/*
- * Insert specific documentation section from a file.
- * Call kernel-doc with the following parameters:
- * kernel-doc -docbook -function "doc section" filename
- */
-static void docsect(char *filename, char *line)
-{
-	/* kerneldoc -docbook -show-not-found -function "section" file NULL */
-	char *vec[7];
-	char *s;
-
-	for (s = line; *s; s++)
-		if (*s == '\n')
-			*s = '\0';
-
-	if (asprintf(&s, "DOC: %s", line) < 0) {
-		perror("asprintf");
-		exit(1);
-	}
-	consume_symbol(s);
-	free(s);
-
-	vec[0] = KERNELDOC;
-	vec[1] = KERNELDOC_FORMAT;
-	vec[2] = SHOWNOTFOUND;
-	vec[3] = FUNCTION;
-	vec[4] = line;
-	vec[5] = filename;
-	vec[6] = NULL;
-	exec_kernel_doc(vec);
-}
-
-static void find_all_symbols(char *filename)
-{
-	char *vec[4]; /* kerneldoc -list file NULL */
-	pid_t pid;
-	int ret, i, count, start;
-	char real_filename[PATH_MAX + 1];
-	int pipefd[2];
-	char *data, *str;
-	size_t data_len = 0;
-
-	vec[0] = KERNELDOC;
-	vec[1] = LIST;
-	vec[2] = filename;
-	vec[3] = NULL;
-
-	if (pipe(pipefd)) {
-		perror("pipe");
-		exit(1);
-	}
-
-	switch (pid=fork()) {
-		case -1:
-			perror("fork");
-			exit(1);
-		case  0:
-			close(pipefd[0]);
-			dup2(pipefd[1], 1);
-			memset(real_filename, 0, sizeof(real_filename));
-			strncat(real_filename, kernsrctree, PATH_MAX);
-			strncat(real_filename, "/" KERNELDOCPATH KERNELDOC,
-					PATH_MAX - strlen(real_filename));
-			execvp(real_filename, vec);
-			fprintf(stderr, "exec ");
-			perror(real_filename);
-			exit(1);
-		default:
-			close(pipefd[1]);
-			data = malloc(4096);
-			do {
-				while ((ret = read(pipefd[0],
-						   data + data_len,
-						   4096)) > 0) {
-					data_len += ret;
-					data = realloc(data, data_len + 4096);
-				}
-			} while (ret == -EAGAIN);
-			if (ret != 0) {
-				perror("read");
-				exit(1);
-			}
-			waitpid(pid, &ret ,0);
-	}
-	if (WIFEXITED(ret))
-		exitstatus |= WEXITSTATUS(ret);
-	else
-		exitstatus = 0xff;
-
-	count = 0;
-	/* poor man's strtok, but with counting */
-	for (i = 0; i < data_len; i++) {
-		if (data[i] == '\n') {
-			count++;
-			data[i] = '\0';
-		}
-	}
-	start = all_list_len;
-	all_list_len += count;
-	all_list = realloc(all_list, sizeof(char *) * all_list_len);
-	str = data;
-	for (i = 0; i < data_len && start != all_list_len; i++) {
-		if (data[i] == '\0') {
-			all_list[start] = str;
-			str = data + i + 1;
-			start++;
-		}
-	}
-}
-
-/*
- * Terminate s at first space, if any. If there was a space, return pointer to
- * the character after that. Otherwise, return pointer to the terminating NUL.
- */
-static char *chomp(char *s)
-{
-	while (*s && !isspace(*s))
-		s++;
-
-	if (*s)
-		*s++ = '\0';
-
-	return s;
-}
-
-/* Return pointer to directive content, or NULL if not a directive. */
-static char *is_directive(char *line)
-{
-	if (file_format == FORMAT_DOCBOOK && line[0] == '!')
-		return line + 1;
-	else if (file_format == FORMAT_RST && !strncmp(line, ".. !", 4))
-		return line + 4;
-
-	return NULL;
-}
-
-/*
- * Parse file, calling action specific functions for:
- * 1) Lines containing !E
- * 2) Lines containing !I
- * 3) Lines containing !D
- * 4) Lines containing !F
- * 5) Lines containing !P
- * 6) Lines containing !C
- * 7) Default lines - lines not matching the above
- */
-static void parse_file(FILE *infile)
-{
-	char line[MAXLINESZ];
-	char *p, *s;
-	while (fgets(line, MAXLINESZ, infile)) {
-		p = is_directive(line);
-		if (!p) {
-			defaultline(line);
-			continue;
-		}
-
-		switch (*p++) {
-		case 'E':
-			chomp(p);
-			externalfunctions(p);
-			break;
-		case 'I':
-			chomp(p);
-			internalfunctions(p);
-			break;
-		case 'D':
-			chomp(p);
-			symbolsonly(p);
-			break;
-		case 'F':
-			/* filename */
-			s = chomp(p);
-			/* function names */
-			while (isspace(*s))
-				s++;
-			singlefunctions(p, s);
-			break;
-		case 'P':
-			/* filename */
-			s = chomp(p);
-			/* DOC: section name */
-			while (isspace(*s))
-				s++;
-			docsection(p, s);
-			break;
-		case 'C':
-			chomp(p);
-			if (findall)
-				findall(p);
-			break;
-		default:
-			defaultline(line);
-		}
-	}
-	fflush(stdout);
-}
-
-/*
- * Is this a RestructuredText template?  Answer the question by seeing if its
- * name ends in ".rst".
- */
-static int is_rst(const char *file)
-{
-	char *dot = strrchr(file, '.');
-
-	return dot && !strcmp(dot + 1, "rst");
-}
-
-enum opts {
-	OPT_DOCBOOK,
-	OPT_RST,
-	OPT_HELP,
-};
-
-int main(int argc, char *argv[])
-{
-	const char *subcommand, *filename;
-	FILE * infile;
-	int i;
-
-	srctree = getenv("SRCTREE");
-	if (!srctree)
-		srctree = getcwd(NULL, 0);
-	kernsrctree = getenv("KBUILD_SRC");
-	if (!kernsrctree || !*kernsrctree)
-		kernsrctree = srctree;
-
-	for (;;) {
-		int c;
-		struct option opts[] = {
-			{ "docbook",	no_argument, NULL, OPT_DOCBOOK },
-			{ "rst",	no_argument, NULL, OPT_RST },
-			{ "help",	no_argument, NULL, OPT_HELP },
-			{}
-		};
-
-		c = getopt_long_only(argc, argv, "", opts, NULL);
-		if (c == -1)
-			break;
-
-		switch (c) {
-		case OPT_DOCBOOK:
-			file_format = FORMAT_DOCBOOK;
-			break;
-		case OPT_RST:
-			file_format = FORMAT_RST;
-			break;
-		case OPT_HELP:
-			usage();
-			return 0;
-		default:
-		case '?':
-			usage();
-			return 1;
-		}
-	}
-
-	argc -= optind;
-	argv += optind;
-
-	if (argc != 2) {
-		usage();
-		exit(1);
-	}
-
-	subcommand = argv[0];
-	filename = argv[1];
-
-	if (file_format == FORMAT_AUTO)
-		file_format = is_rst(filename) ? FORMAT_RST : FORMAT_DOCBOOK;
-
-	/* Open file, exit on error */
-	infile = fopen(filename, "r");
-	if (infile == NULL) {
-		fprintf(stderr, "docproc: ");
-		perror(filename);
-		exit(2);
-	}
-
-	if (strcmp("doc", subcommand) == 0) {
-		if (file_format == FORMAT_RST) {
-			time_t t = time(NULL);
-			printf(".. generated from %s by docproc %s\n",
-			       filename, ctime(&t));
-		}
-
-		/* Need to do this in two passes.
-		 * First pass is used to collect all symbols exported
-		 * in the various files;
-		 * Second pass generate the documentation.
-		 * This is required because some functions are declared
-		 * and exported in different files :-((
-		 */
-		/* Collect symbols */
-		defaultline       = noaction;
-		internalfunctions = find_export_symbols;
-		externalfunctions = find_export_symbols;
-		symbolsonly       = find_export_symbols;
-		singlefunctions   = noaction2;
-		docsection        = noaction2;
-		findall           = find_all_symbols;
-		parse_file(infile);
-
-		/* Rewind to start from beginning of file again */
-		fseek(infile, 0, SEEK_SET);
-		defaultline       = printline;
-		internalfunctions = intfunc;
-		externalfunctions = extfunc;
-		symbolsonly       = printline;
-		singlefunctions   = singfunc;
-		docsection        = docsect;
-		findall           = NULL;
-
-		parse_file(infile);
-
-		for (i = 0; i < all_list_len; i++) {
-			if (!all_list[i])
-				continue;
-			fprintf(stderr, "Warning: didn't use docs for %s\n",
-				all_list[i]);
-		}
-	} else if (strcmp("depend", subcommand) == 0) {
-		/* Create first part of dependency chain
-		 * file.tmpl */
-		printf("%s\t", filename);
-		defaultline       = noaction;
-		internalfunctions = adddep;
-		externalfunctions = adddep;
-		symbolsonly       = adddep;
-		singlefunctions   = adddep2;
-		docsection        = adddep2;
-		findall           = adddep;
-		parse_file(infile);
-		printf("\n");
-	} else {
-		fprintf(stderr, "Unknown option: %s\n", subcommand);
-		exit(1);
-	}
-	fclose(infile);
-	fflush(stdout);
-	return exitstatus;
-}
diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py
index f9b92ec..5afd109 100644
--- a/scripts/gdb/linux/dmesg.py
+++ b/scripts/gdb/linux/dmesg.py
@@ -23,10 +23,11 @@
         super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
 
     def invoke(self, arg, from_tty):
-        log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
-        log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
-        log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
-        log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
+        log_buf_addr = int(str(gdb.parse_and_eval(
+            "'printk.c'::log_buf")).split()[0], 16)
+        log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx"))
+        log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx"))
+        log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len"))
 
         inf = gdb.inferiors()[0]
         start = log_buf_addr + log_first_idx
diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h
index 3bffdca..b724a02 100644
--- a/scripts/genksyms/genksyms.h
+++ b/scripts/genksyms/genksyms.h
@@ -75,7 +75,7 @@ struct string_list *copy_list_range(struct string_list *start,
 int yylex(void);
 int yyparse(void);
 
-void error_with_pos(const char *, ...);
+void error_with_pos(const char *, ...) __attribute__ ((format(printf, 1, 2)));
 
 /*----------------------------------------------------------------------*/
 #define xmalloc(size) ({ void *__ptr = malloc(size);		\
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 90a091b..eb81446 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -196,7 +196,7 @@
 
 # Check that we have the required ncurses stuff installed for lxdialog (menuconfig)
 PHONY += $(obj)/dochecklxdialog
-$(addprefix $(obj)/,$(lxdialog)): $(obj)/dochecklxdialog
+$(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/dochecklxdialog
 $(obj)/dochecklxdialog:
 	$(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf)
 
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index a9bc533..0031147 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -271,7 +271,7 @@ static struct mitem k_menu_items[MAX_MENU_ITEMS];
 static int items_num;
 static int global_exit;
 /* the currently selected button */
-const char *current_instructions = menu_instructions;
+static const char *current_instructions = menu_instructions;
 
 static char *dialog_input_result;
 static int dialog_input_result_len;
@@ -305,7 +305,7 @@ struct function_keys {
 };
 
 static const int function_keys_num = 9;
-struct function_keys function_keys[] = {
+static struct function_keys function_keys[] = {
 	{
 		.key_str = "F1",
 		.func = "Help",
@@ -508,7 +508,7 @@ static int get_mext_match(const char *match_str, match_f flag)
 	index = (index + items_num) % items_num;
 	while (true) {
 		char *str = k_menu_items[index].str;
-		if (strcasestr(str, match_str) != 0)
+		if (strcasestr(str, match_str) != NULL)
 			return index;
 		if (flag == FIND_NEXT_MATCH_UP ||
 		    flag == MATCH_TINKER_PATTERN_UP)
@@ -1067,7 +1067,7 @@ static int do_match(int key, struct match_state *state, int *ans)
 
 static void conf(struct menu *menu)
 {
-	struct menu *submenu = 0;
+	struct menu *submenu = NULL;
 	const char *prompt = menu_get_prompt(menu);
 	struct symbol *sym;
 	int res;
@@ -1234,7 +1234,7 @@ static void show_help(struct menu *menu)
 static void conf_choice(struct menu *menu)
 {
 	const char *prompt = _(menu_get_prompt(menu));
-	struct menu *child = 0;
+	struct menu *child = NULL;
 	struct symbol *active;
 	int selected_index = 0;
 	int last_top_row = 0;
@@ -1456,7 +1456,7 @@ static void conf_save(void)
 	}
 }
 
-void setup_windows(void)
+static void setup_windows(void)
 {
 	int lines, columns;
 
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 4b2f44c..a64b1c3 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -129,7 +129,7 @@ static void no_colors_theme(void)
 	mkattrn(FUNCTION_TEXT, A_REVERSE);
 }
 
-void set_colors()
+void set_colors(void)
 {
 	start_color();
 	use_default_colors();
@@ -192,7 +192,7 @@ const char *get_line(const char *text, int line_no)
 	int lines = 0;
 
 	if (!text)
-		return 0;
+		return NULL;
 
 	for (i = 0; text[i] != '\0' && lines < line_no; i++)
 		if (text[i] == '\n')
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index a26a5f2..c1ffd31 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2189,6 +2189,8 @@
 	$members =~ s/\s*CRYPTO_MINALIGN_ATTR//gos;
 	# replace DECLARE_BITMAP
 	$members =~ s/DECLARE_BITMAP\s*\(([^,)]+), ([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos;
+	# replace DECLARE_HASHTABLE
+	$members =~ s/DECLARE_HASHTABLE\s*\(([^,)]+), ([^,)]+)\)/unsigned long $1\[1 << (($2) - 1)\]/gos;
 
 	create_parameterlist($members, ';', $file);
 	check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
diff --git a/scripts/kernel-doc-xml-ref b/scripts/kernel-doc-xml-ref
deleted file mode 100755
index 104a5a5..0000000
--- a/scripts/kernel-doc-xml-ref
+++ /dev/null
@@ -1,198 +0,0 @@
-#!/usr/bin/perl -w
-
-use strict;
-
-## Copyright (C) 2015  Intel Corporation                         ##
-#                                                                ##
-## This software falls under the GNU General Public License.     ##
-## Please read the COPYING file for more information             ##
-#
-#
-# This software reads a XML file and a list of valid interal
-# references to replace Docbook tags with links.
-#
-# The list of "valid internal references" must be one-per-line in the following format:
-#      API-struct-foo
-#      API-enum-bar
-#      API-my-function
-#
-# The software walks over the XML file looking for xml tags representing possible references
-# to the Document. Each reference will be cross checked against the "Valid Internal Reference" list. If
-# the referece is found it replaces its content by a <link> tag.
-#
-# usage:
-# kernel-doc-xml-ref -db filename
-#		     xml filename > outputfile
-
-# read arguments
-if ($#ARGV != 2) {
-	usage();
-}
-
-#Holds the database filename
-my $databasefile;
-my @database;
-
-#holds the inputfile
-my $inputfile;
-my $errors = 0;
-
-my %highlights = (
-	"<function>(.*?)</function>",
-	    "\"<function>\" . convert_function(\$1, \$line) . \"</function>\"",
-	"<structname>(.*?)</structname>",
-	    "\"<structname>\" . convert_struct(\$1) . \"</structname>\"",
-	"<funcdef>(.*?)<function>(.*?)</function></funcdef>",
-	    "\"<funcdef>\" . convert_param(\$1) . \"<function>\$2</function></funcdef>\"",
-	"<paramdef>(.*?)<parameter>(.*?)</parameter></paramdef>",
-	    "\"<paramdef>\" . convert_param(\$1) . \"<parameter>\$2</parameter></paramdef>\"");
-
-while($ARGV[0] =~ m/^-(.*)/) {
-	my $cmd = shift @ARGV;
-	if ($cmd eq "-db") {
-		$databasefile = shift @ARGV
-	} else {
-		usage();
-	}
-}
-$inputfile = shift @ARGV;
-
-sub open_database {
-	open (my $handle, '<', $databasefile) or die "Cannot open $databasefile";
-	chomp(my @lines = <$handle>);
-	close $handle;
-
-	@database = @lines;
-}
-
-sub process_file {
-	open_database();
-
-	my $dohighlight;
-	foreach my $pattern (keys %highlights) {
-		$dohighlight .=  "\$line =~ s:$pattern:$highlights{$pattern}:eg;\n";
-	}
-
-	open(FILE, $inputfile) or die("Could not open $inputfile") or die ("Cannot open $inputfile");
-	foreach my $line (<FILE>)  {
-		eval $dohighlight;
-		print $line;
-	}
-}
-
-sub trim($_)
-{
-	my $str = $_[0];
-	$str =~ s/^\s+|\s+$//g;
-	return $str
-}
-
-sub has_key_defined($_)
-{
-	if ( grep( /^$_[0]$/, @database)) {
-		return 1;
-	}
-	return 0;
-}
-
-# Gets a <function> content and add it a hyperlink if possible.
-sub convert_function($_)
-{
-	my $arg = $_[0];
-	my $key = $_[0];
-
-	my $line = $_[1];
-
-	$key = trim($key);
-
-	$key =~ s/[^A-Za-z0-9]/-/g;
-	$key = "API-" . $key;
-
-	# We shouldn't add links to <funcdef> prototype
-	if (!has_key_defined($key) || $line =~ m/\s+<funcdef/i) {
-		return $arg;
-	}
-
-	my $head = $arg;
-	my $tail = "";
-	if ($arg =~ /(.*?)( ?)$/) {
-		$head = $1;
-		$tail = $2;
-	}
-	return "<link linkend=\"$key\">$head</link>$tail";
-}
-
-# Converting a struct text to link
-sub convert_struct($_)
-{
-	my $arg = $_[0];
-	my $key = $_[0];
-	$key =~ s/(struct )?(\w)/$2/g;
-	$key =~ s/[^A-Za-z0-9]/-/g;
-	$key = "API-struct-" . $key;
-
-	if (!has_key_defined($key)) {
-		return $arg;
-	}
-
-	my ($head, $tail) = split_pointer($arg);
-	return "<link linkend=\"$key\">$head</link>$tail";
-}
-
-# Identify "object *" elements
-sub split_pointer($_)
-{
-	my $arg = $_[0];
-	if ($arg =~ /(.*?)( ?\* ?)/) {
-		return ($1, $2);
-	}
-	return ($arg, "");
-}
-
-sub convert_param($_)
-{
-	my $type = $_[0];
-	my $keyname = convert_key_name($type);
-
-	if (!has_key_defined($keyname)) {
-		return $type;
-	}
-
-	my ($head, $tail) = split_pointer($type);
-	return "<link linkend=\"$keyname\">$head</link>$tail";
-
-}
-
-# DocBook links are in the API-<TYPE>-<STRUCT-NAME> format
-# This method gets an element and returns a valid DocBook reference for it.
-sub convert_key_name($_)
-{
-	#Pattern $2 is optional and might be uninitialized
-	no warnings 'uninitialized';
-
-	my $str = $_[0];
-	$str =~ s/(const|static)? ?(struct)? ?([a-zA-Z0-9_]+) ?(\*|&)?/$2 $3/g ;
-
-	# trim
-	$str =~ s/^\s+|\s+$//g;
-
-	# spaces and _ to -
-	$str =~ s/[^A-Za-z0-9]/-/g;
-
-	return "API-" . $str;
-}
-
-sub usage {
-	print "Usage: $0 -db database filename\n";
-	print "         xml source file(s) > outputfile\n";
-	exit 1;
-}
-
-# starting point
-process_file();
-
-if ($errors) {
-	print STDERR "$errors errors\n";
-}
-
-exit($errors);
diff --git a/scripts/selinux/README b/scripts/selinux/README
index 4d020ec..5ba679c5 100644
--- a/scripts/selinux/README
+++ b/scripts/selinux/README
@@ -1,2 +1,2 @@
-Please see Documentation/security/SELinux.txt for information on
+Please see Documentation/admin-guide/LSM/SELinux.rst for information on
 installing a dummy SELinux policy.
diff --git a/scripts/tags.sh b/scripts/tags.sh
index d661f2f..d23dcbf 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -106,6 +106,7 @@
 		case "$i" in
 			*.[cS])
 				j=${i/\.[cS]/\.o}
+				j="${j#$tree}"
 				if [ -e $j ]; then
 					echo $i
 				fi
diff --git a/security/Kconfig b/security/Kconfig
index 93027fdf..d540bfe 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -54,6 +54,15 @@
 	  implement socket and networking access controls.
 	  If you are unsure how to answer this question, answer N.
 
+config SECURITY_INFINIBAND
+	bool "Infiniband Security Hooks"
+	depends on SECURITY && INFINIBAND
+	help
+	  This enables the Infiniband security hooks.
+	  If enabled, a security module can use these hooks to
+	  implement Infiniband access controls.
+	  If you are unsure how to answer this question, answer N.
+
 config SECURITY_NETWORK_XFRM
 	bool "XFRM (IPSec) Networking Security Hooks"
 	depends on XFRM && SECURITY_NETWORK
@@ -139,7 +148,7 @@
 	  copying memory to/from the kernel (via copy_to_user() and
 	  copy_from_user() functions) by rejecting memory ranges that
 	  are larger than the specified heap object, span multiple
-	  separately allocates pages, are not on the process stack,
+	  separately allocated pages, are not on the process stack,
 	  or are part of the kernel text. This kills entire classes
 	  of heap overflow exploits and similar kernel memory exposures.
 
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
index ad369a7..a16b195 100644
--- a/security/apparmor/Makefile
+++ b/security/apparmor/Makefile
@@ -4,7 +4,7 @@
 
 apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
               path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
-              resource.o secid.o file.o policy_ns.o
+              resource.o secid.o file.o policy_ns.o label.o
 apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
 
 clean-files := capability_names.h rlim_names.h
@@ -20,7 +20,7 @@
 	sed $< >>$@ -r -n -e '/CAP_FS_MASK/d' \
 	-e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/[\2] = "\L\1",/p';\
 	echo "};" >> $@ ;\
-	echo -n '\#define AA_FS_CAPS_MASK "' >> $@ ;\
+	printf '%s' '\#define AA_SFS_CAPS_MASK "' >> $@ ;\
 	sed $< -r -n -e '/CAP_FS_MASK/d' \
 	    -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/\L\1/p' | \
 	     tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
@@ -46,7 +46,7 @@
 #    #define RLIMIT_FSIZE        1   /* Maximum filesize */
 #    #define RLIMIT_STACK		3	/* max stack size */
 # to
-# #define AA_FS_RLIMIT_MASK "fsize stack"
+# #define AA_SFS_RLIMIT_MASK "fsize stack"
 quiet_cmd_make-rlim = GEN     $@
 cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
 	> $@ ;\
@@ -56,7 +56,7 @@
 	echo "static const int rlim_map[RLIM_NLIMITS] = {" >> $@ ;\
 	sed -r -n "s/^\# ?define[ \t]+(RLIMIT_[A-Z0-9_]+).*/\1,/p" $< >> $@ ;\
 	echo "};" >> $@ ; \
-	echo -n '\#define AA_FS_RLIMIT_MASK "' >> $@ ;\
+	printf '%s' '\#define AA_SFS_RLIMIT_MASK "' >> $@ ;\
 	sed -r -n 's/^\# ?define[ \t]+RLIMIT_([A-Z0-9_]+).*/\L\1/p' $< | \
 	    tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
 
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 4f6ac9d..853c2ec 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -22,19 +22,52 @@
 #include <linux/namei.h>
 #include <linux/capability.h>
 #include <linux/rcupdate.h>
-#include <uapi/linux/major.h>
 #include <linux/fs.h>
+#include <linux/poll.h>
+#include <uapi/linux/major.h>
+#include <uapi/linux/magic.h>
 
 #include "include/apparmor.h"
 #include "include/apparmorfs.h"
 #include "include/audit.h"
 #include "include/context.h"
 #include "include/crypto.h"
+#include "include/policy_ns.h"
+#include "include/label.h"
 #include "include/policy.h"
 #include "include/policy_ns.h"
 #include "include/resource.h"
 #include "include/policy_unpack.h"
 
+/*
+ * The apparmor filesystem interface used for policy load and introspection
+ * The interface is split into two main components based on their function
+ * a securityfs component:
+ *   used for static files that are always available, and which allows
+ *   userspace to specificy the location of the security filesystem.
+ *
+ *   fns and data are prefixed with
+ *      aa_sfs_
+ *
+ * an apparmorfs component:
+ *   used loaded policy content and introspection. It is not part of  a
+ *   regular mounted filesystem and is available only through the magic
+ *   policy symlink in the root of the securityfs apparmor/ directory.
+ *   Tasks queries will be magically redirected to the correct portion
+ *   of the policy tree based on their confinement.
+ *
+ *   fns and data are prefixed with
+ *      aafs_
+ *
+ * The aa_fs_ prefix is used to indicate the fn is used by both the
+ * securityfs and apparmorfs filesystems.
+ */
+
+
+/*
+ * support fns
+ */
+
 /**
  * aa_mangle_name - mangle a profile name to std profile layout form
  * @name: profile name to mangle  (NOT NULL)
@@ -74,6 +107,265 @@ static int mangle_name(const char *name, char *target)
 	return t - target;
 }
 
+
+/*
+ * aafs - core fns and data for the policy tree
+ */
+
+#define AAFS_NAME		"apparmorfs"
+static struct vfsmount *aafs_mnt;
+static int aafs_count;
+
+
+static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
+{
+	struct inode *inode = d_inode(dentry);
+
+	seq_printf(seq, "%s:[%lu]", AAFS_NAME, inode->i_ino);
+	return 0;
+}
+
+static void aafs_evict_inode(struct inode *inode)
+{
+	truncate_inode_pages_final(&inode->i_data);
+	clear_inode(inode);
+	if (S_ISLNK(inode->i_mode))
+		kfree(inode->i_link);
+}
+
+static const struct super_operations aafs_super_ops = {
+	.statfs = simple_statfs,
+	.evict_inode = aafs_evict_inode,
+	.show_path = aafs_show_path,
+};
+
+static int fill_super(struct super_block *sb, void *data, int silent)
+{
+	static struct tree_descr files[] = { {""} };
+	int error;
+
+	error = simple_fill_super(sb, AAFS_MAGIC, files);
+	if (error)
+		return error;
+	sb->s_op = &aafs_super_ops;
+
+	return 0;
+}
+
+static struct dentry *aafs_mount(struct file_system_type *fs_type,
+				 int flags, const char *dev_name, void *data)
+{
+	return mount_single(fs_type, flags, data, fill_super);
+}
+
+static struct file_system_type aafs_ops = {
+	.owner = THIS_MODULE,
+	.name = AAFS_NAME,
+	.mount = aafs_mount,
+	.kill_sb = kill_anon_super,
+};
+
+/**
+ * __aafs_setup_d_inode - basic inode setup for apparmorfs
+ * @dir: parent directory for the dentry
+ * @dentry: dentry we are seting the inode up for
+ * @mode: permissions the file should have
+ * @data: data to store on inode.i_private, available in open()
+ * @link: if symlink, symlink target string
+ * @fops: struct file_operations that should be used
+ * @iops: struct of inode_operations that should be used
+ */
+static int __aafs_setup_d_inode(struct inode *dir, struct dentry *dentry,
+			       umode_t mode, void *data, char *link,
+			       const struct file_operations *fops,
+			       const struct inode_operations *iops)
+{
+	struct inode *inode = new_inode(dir->i_sb);
+
+	AA_BUG(!dir);
+	AA_BUG(!dentry);
+
+	if (!inode)
+		return -ENOMEM;
+
+	inode->i_ino = get_next_ino();
+	inode->i_mode = mode;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+	inode->i_private = data;
+	if (S_ISDIR(mode)) {
+		inode->i_op = iops ? iops : &simple_dir_inode_operations;
+		inode->i_fop = &simple_dir_operations;
+		inc_nlink(inode);
+		inc_nlink(dir);
+	} else if (S_ISLNK(mode)) {
+		inode->i_op = iops ? iops : &simple_symlink_inode_operations;
+		inode->i_link = link;
+	} else {
+		inode->i_fop = fops;
+	}
+	d_instantiate(dentry, inode);
+	dget(dentry);
+
+	return 0;
+}
+
+/**
+ * aafs_create - create a dentry in the apparmorfs filesystem
+ *
+ * @name: name of dentry to create
+ * @mode: permissions the file should have
+ * @parent: parent directory for this dentry
+ * @data: data to store on inode.i_private, available in open()
+ * @link: if symlink, symlink target string
+ * @fops: struct file_operations that should be used for
+ * @iops: struct of inode_operations that should be used
+ *
+ * This is the basic "create a xxx" function for apparmorfs.
+ *
+ * Returns a pointer to a dentry if it succeeds, that must be free with
+ * aafs_remove(). Will return ERR_PTR on failure.
+ */
+static struct dentry *aafs_create(const char *name, umode_t mode,
+				  struct dentry *parent, void *data, void *link,
+				  const struct file_operations *fops,
+				  const struct inode_operations *iops)
+{
+	struct dentry *dentry;
+	struct inode *dir;
+	int error;
+
+	AA_BUG(!name);
+	AA_BUG(!parent);
+
+	if (!(mode & S_IFMT))
+		mode = (mode & S_IALLUGO) | S_IFREG;
+
+	error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count);
+	if (error)
+		return ERR_PTR(error);
+
+	dir = d_inode(parent);
+
+	inode_lock(dir);
+	dentry = lookup_one_len(name, parent, strlen(name));
+	if (IS_ERR(dentry))
+		goto fail_lock;
+
+	if (d_really_is_positive(dentry)) {
+		error = -EEXIST;
+		goto fail_dentry;
+	}
+
+	error = __aafs_setup_d_inode(dir, dentry, mode, data, link, fops, iops);
+	if (error)
+		goto fail_dentry;
+	inode_unlock(dir);
+
+	return dentry;
+
+fail_dentry:
+	dput(dentry);
+
+fail_lock:
+	inode_unlock(dir);
+	simple_release_fs(&aafs_mnt, &aafs_count);
+
+	return ERR_PTR(error);
+}
+
+/**
+ * aafs_create_file - create a file in the apparmorfs filesystem
+ *
+ * @name: name of dentry to create
+ * @mode: permissions the file should have
+ * @parent: parent directory for this dentry
+ * @data: data to store on inode.i_private, available in open()
+ * @fops: struct file_operations that should be used for
+ *
+ * see aafs_create
+ */
+static struct dentry *aafs_create_file(const char *name, umode_t mode,
+				       struct dentry *parent, void *data,
+				       const struct file_operations *fops)
+{
+	return aafs_create(name, mode, parent, data, NULL, fops, NULL);
+}
+
+/**
+ * aafs_create_dir - create a directory in the apparmorfs filesystem
+ *
+ * @name: name of dentry to create
+ * @parent: parent directory for this dentry
+ *
+ * see aafs_create
+ */
+static struct dentry *aafs_create_dir(const char *name, struct dentry *parent)
+{
+	return aafs_create(name, S_IFDIR | 0755, parent, NULL, NULL, NULL,
+			   NULL);
+}
+
+/**
+ * aafs_create_symlink - create a symlink in the apparmorfs filesystem
+ * @name: name of dentry to create
+ * @parent: parent directory for this dentry
+ * @target: if symlink, symlink target string
+ * @iops: struct of inode_operations that should be used
+ *
+ * If @target parameter is %NULL, then the @iops parameter needs to be
+ * setup to handle .readlink and .get_link inode_operations.
+ */
+static struct dentry *aafs_create_symlink(const char *name,
+					  struct dentry *parent,
+					  const char *target,
+					  const struct inode_operations *iops)
+{
+	struct dentry *dent;
+	char *link = NULL;
+
+	if (target) {
+		link = kstrdup(target, GFP_KERNEL);
+		if (!link)
+			return ERR_PTR(-ENOMEM);
+	}
+	dent = aafs_create(name, S_IFLNK | 0444, parent, NULL, link, NULL,
+			   iops);
+	if (IS_ERR(dent))
+		kfree(link);
+
+	return dent;
+}
+
+/**
+ * aafs_remove - removes a file or directory from the apparmorfs filesystem
+ *
+ * @dentry: dentry of the file/directory/symlink to removed.
+ */
+static void aafs_remove(struct dentry *dentry)
+{
+	struct inode *dir;
+
+	if (!dentry || IS_ERR(dentry))
+		return;
+
+	dir = d_inode(dentry->d_parent);
+	inode_lock(dir);
+	if (simple_positive(dentry)) {
+		if (d_is_dir(dentry))
+			simple_rmdir(dir, dentry);
+		else
+			simple_unlink(dir, dentry);
+		dput(dentry);
+	}
+	inode_unlock(dir);
+	simple_release_fs(&aafs_mnt, &aafs_count);
+}
+
+
+/*
+ * aa_fs - policy load/replace/remove
+ */
+
 /**
  * aa_simple_write_to_buffer - common routine for getting policy from user
  * @userbuf: user buffer to copy data from  (NOT NULL)
@@ -98,14 +390,11 @@ static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf,
 		return ERR_PTR(-ESPIPE);
 
 	/* freed by caller to simple_write_to_buffer */
-	data = kvmalloc(sizeof(*data) + alloc_size, GFP_KERNEL);
-	if (data == NULL)
-		return ERR_PTR(-ENOMEM);
-	kref_init(&data->count);
-	data->size = copy_size;
-	data->hash = NULL;
-	data->abi = 0;
+	data = aa_loaddata_alloc(alloc_size);
+	if (IS_ERR(data))
+		return data;
 
+	data->size = copy_size;
 	if (copy_from_user(data->data, userbuf, copy_size)) {
 		kvfree(data);
 		return ERR_PTR(-EFAULT);
@@ -114,27 +403,29 @@ static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf,
 	return data;
 }
 
-static ssize_t policy_update(int binop, const char __user *buf, size_t size,
+static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
 			     loff_t *pos, struct aa_ns *ns)
 {
-	ssize_t error;
 	struct aa_loaddata *data;
-	struct aa_profile *profile = aa_current_profile();
-	const char *op = binop == PROF_ADD ? OP_PROF_LOAD : OP_PROF_REPL;
+	struct aa_label *label;
+	ssize_t error;
+
+	label = begin_current_label_crit_section();
+
 	/* high level check about policy management - fine grained in
 	 * below after unpack
 	 */
-	error = aa_may_manage_policy(profile, ns, op);
+	error = aa_may_manage_policy(label, ns, mask);
 	if (error)
 		return error;
 
 	data = aa_simple_write_to_buffer(buf, size, size, pos);
 	error = PTR_ERR(data);
 	if (!IS_ERR(data)) {
-		error = aa_replace_profiles(ns ? ns : profile->ns, profile,
-					    binop, data);
+		error = aa_replace_profiles(ns, label, mask, data);
 		aa_put_loaddata(data);
 	}
+	end_current_label_crit_section(label);
 
 	return error;
 }
@@ -144,7 +435,7 @@ static ssize_t profile_load(struct file *f, const char __user *buf, size_t size,
 			    loff_t *pos)
 {
 	struct aa_ns *ns = aa_get_ns(f->f_inode->i_private);
-	int error = policy_update(PROF_ADD, buf, size, pos, ns);
+	int error = policy_update(AA_MAY_LOAD_POLICY, buf, size, pos, ns);
 
 	aa_put_ns(ns);
 
@@ -161,8 +452,8 @@ static ssize_t profile_replace(struct file *f, const char __user *buf,
 			       size_t size, loff_t *pos)
 {
 	struct aa_ns *ns = aa_get_ns(f->f_inode->i_private);
-	int error = policy_update(PROF_REPLACE, buf, size, pos, ns);
-
+	int error = policy_update(AA_MAY_LOAD_POLICY | AA_MAY_REPLACE_POLICY,
+				  buf, size, pos, ns);
 	aa_put_ns(ns);
 
 	return error;
@@ -178,15 +469,15 @@ static ssize_t profile_remove(struct file *f, const char __user *buf,
 			      size_t size, loff_t *pos)
 {
 	struct aa_loaddata *data;
-	struct aa_profile *profile;
+	struct aa_label *label;
 	ssize_t error;
 	struct aa_ns *ns = aa_get_ns(f->f_inode->i_private);
 
-	profile = aa_current_profile();
+	label = begin_current_label_crit_section();
 	/* high level check about policy management - fine grained in
 	 * below after unpack
 	 */
-	error = aa_may_manage_policy(profile, ns, OP_PROF_RM);
+	error = aa_may_manage_policy(label, ns, AA_MAY_REMOVE_POLICY);
 	if (error)
 		goto out;
 
@@ -199,11 +490,11 @@ static ssize_t profile_remove(struct file *f, const char __user *buf,
 	error = PTR_ERR(data);
 	if (!IS_ERR(data)) {
 		data->data[size] = 0;
-		error = aa_remove_profiles(ns ? ns : profile->ns, profile,
-					   data->data, size);
+		error = aa_remove_profiles(ns, label, data->data, size);
 		aa_put_loaddata(data);
 	}
  out:
+	end_current_label_crit_section(label);
 	aa_put_ns(ns);
 	return error;
 }
@@ -213,6 +504,136 @@ static const struct file_operations aa_fs_profile_remove = {
 	.llseek = default_llseek,
 };
 
+struct aa_revision {
+	struct aa_ns *ns;
+	long last_read;
+};
+
+/* revision file hook fn for policy loads */
+static int ns_revision_release(struct inode *inode, struct file *file)
+{
+	struct aa_revision *rev = file->private_data;
+
+	if (rev) {
+		aa_put_ns(rev->ns);
+		kfree(rev);
+	}
+
+	return 0;
+}
+
+static ssize_t ns_revision_read(struct file *file, char __user *buf,
+				size_t size, loff_t *ppos)
+{
+	struct aa_revision *rev = file->private_data;
+	char buffer[32];
+	long last_read;
+	int avail;
+
+	mutex_lock(&rev->ns->lock);
+	last_read = rev->last_read;
+	if (last_read == rev->ns->revision) {
+		mutex_unlock(&rev->ns->lock);
+		if (file->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		if (wait_event_interruptible(rev->ns->wait,
+					     last_read !=
+					     READ_ONCE(rev->ns->revision)))
+			return -ERESTARTSYS;
+		mutex_lock(&rev->ns->lock);
+	}
+
+	avail = sprintf(buffer, "%ld\n", rev->ns->revision);
+	if (*ppos + size > avail) {
+		rev->last_read = rev->ns->revision;
+		*ppos = 0;
+	}
+	mutex_unlock(&rev->ns->lock);
+
+	return simple_read_from_buffer(buf, size, ppos, buffer, avail);
+}
+
+static int ns_revision_open(struct inode *inode, struct file *file)
+{
+	struct aa_revision *rev = kzalloc(sizeof(*rev), GFP_KERNEL);
+
+	if (!rev)
+		return -ENOMEM;
+
+	rev->ns = aa_get_ns(inode->i_private);
+	if (!rev->ns)
+		rev->ns = aa_get_current_ns();
+	file->private_data = rev;
+
+	return 0;
+}
+
+static unsigned int ns_revision_poll(struct file *file, poll_table *pt)
+{
+	struct aa_revision *rev = file->private_data;
+	unsigned int mask = 0;
+
+	if (rev) {
+		mutex_lock(&rev->ns->lock);
+		poll_wait(file, &rev->ns->wait, pt);
+		if (rev->last_read < rev->ns->revision)
+			mask |= POLLIN | POLLRDNORM;
+		mutex_unlock(&rev->ns->lock);
+	}
+
+	return mask;
+}
+
+void __aa_bump_ns_revision(struct aa_ns *ns)
+{
+	ns->revision++;
+	wake_up_interruptible(&ns->wait);
+}
+
+static const struct file_operations aa_fs_ns_revision_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ns_revision_open,
+	.poll		= ns_revision_poll,
+	.read		= ns_revision_read,
+	.llseek		= generic_file_llseek,
+	.release	= ns_revision_release,
+};
+
+static void profile_query_cb(struct aa_profile *profile, struct aa_perms *perms,
+			     const char *match_str, size_t match_len)
+{
+	struct aa_perms tmp;
+	struct aa_dfa *dfa;
+	unsigned int state = 0;
+
+	if (profile_unconfined(profile))
+		return;
+	if (profile->file.dfa && *match_str == AA_CLASS_FILE) {
+		dfa = profile->file.dfa;
+		state = aa_dfa_match_len(dfa, profile->file.start,
+					 match_str + 1, match_len - 1);
+		tmp = nullperms;
+		if (state) {
+			struct path_cond cond = { };
+
+			tmp = aa_compute_fperms(dfa, state, &cond);
+		}
+	} else if (profile->policy.dfa) {
+		if (!PROFILE_MEDIATES_SAFE(profile, *match_str))
+			return;	/* no change to current perms */
+		dfa = profile->policy.dfa;
+		state = aa_dfa_match_len(dfa, profile->policy.start[0],
+					 match_str, match_len);
+		if (state)
+			aa_compute_perms(dfa, state, &tmp);
+		else
+			tmp = nullperms;
+	}
+	aa_apply_modes_to_perms(profile, &tmp);
+	aa_perms_accum_raw(perms, &tmp);
+}
+
+
 /**
  * query_data - queries a policy and writes its data to buf
  * @buf: the resulting data is stored here (NOT NULL)
@@ -236,6 +657,8 @@ static ssize_t query_data(char *buf, size_t buf_len,
 {
 	char *out;
 	const char *key;
+	struct label_it i;
+	struct aa_label *label, *curr;
 	struct aa_profile *profile;
 	struct aa_data *data;
 	u32 bytes, blocks;
@@ -253,7 +676,11 @@ static ssize_t query_data(char *buf, size_t buf_len,
 	if (buf_len < sizeof(bytes) + sizeof(blocks))
 		return -EINVAL; /* not enough space */
 
-	profile = aa_current_profile();
+	curr = begin_current_label_crit_section();
+	label = aa_label_parse(curr, query, GFP_KERNEL, false, false);
+	end_current_label_crit_section(curr);
+	if (IS_ERR(label))
+		return PTR_ERR(label);
 
 	/* We are going to leave space for two numbers. The first is the total
 	 * number of bytes we are writing after the first number. This is so
@@ -267,13 +694,19 @@ static ssize_t query_data(char *buf, size_t buf_len,
 	out = buf + sizeof(bytes) + sizeof(blocks);
 
 	blocks = 0;
-	if (profile->data) {
+	label_for_each_confined(i, label, profile) {
+		if (!profile->data)
+			continue;
+
 		data = rhashtable_lookup_fast(profile->data, &key,
 					      profile->data->p);
 
 		if (data) {
-			if (out + sizeof(outle32) + data->size > buf + buf_len)
+			if (out + sizeof(outle32) + data->size > buf +
+			    buf_len) {
+				aa_put_label(label);
 				return -EINVAL; /* not enough space */
+			}
 			outle32 = __cpu_to_le32(data->size);
 			memcpy(out, &outle32, sizeof(outle32));
 			out += sizeof(outle32);
@@ -282,6 +715,7 @@ static ssize_t query_data(char *buf, size_t buf_len,
 			blocks++;
 		}
 	}
+	aa_put_label(label);
 
 	outle32 = __cpu_to_le32(out - buf - sizeof(bytes));
 	memcpy(buf, &outle32, sizeof(outle32));
@@ -291,6 +725,182 @@ static ssize_t query_data(char *buf, size_t buf_len,
 	return out - buf;
 }
 
+/**
+ * query_label - queries a label and writes permissions to buf
+ * @buf: the resulting permissions string is stored here (NOT NULL)
+ * @buf_len: size of buf
+ * @query: binary query string to match against the dfa
+ * @query_len: size of query
+ * @view_only: only compute for querier's view
+ *
+ * The buffers pointed to by buf and query may overlap. The query buffer is
+ * parsed before buf is written to.
+ *
+ * The query should look like "LABEL_NAME\0DFA_STRING" where LABEL_NAME is
+ * the name of the label, in the current namespace, that is to be queried and
+ * DFA_STRING is a binary string to match against the label(s)'s DFA.
+ *
+ * LABEL_NAME must be NUL terminated. DFA_STRING may contain NUL characters
+ * but must *not* be NUL terminated.
+ *
+ * Returns: number of characters written to buf or -errno on failure
+ */
+static ssize_t query_label(char *buf, size_t buf_len,
+			   char *query, size_t query_len, bool view_only)
+{
+	struct aa_profile *profile;
+	struct aa_label *label, *curr;
+	char *label_name, *match_str;
+	size_t label_name_len, match_len;
+	struct aa_perms perms;
+	struct label_it i;
+
+	if (!query_len)
+		return -EINVAL;
+
+	label_name = query;
+	label_name_len = strnlen(query, query_len);
+	if (!label_name_len || label_name_len == query_len)
+		return -EINVAL;
+
+	/**
+	 * The extra byte is to account for the null byte between the
+	 * profile name and dfa string. profile_name_len is greater
+	 * than zero and less than query_len, so a byte can be safely
+	 * added or subtracted.
+	 */
+	match_str = label_name + label_name_len + 1;
+	match_len = query_len - label_name_len - 1;
+
+	curr = begin_current_label_crit_section();
+	label = aa_label_parse(curr, label_name, GFP_KERNEL, false, false);
+	end_current_label_crit_section(curr);
+	if (IS_ERR(label))
+		return PTR_ERR(label);
+
+	perms = allperms;
+	if (view_only) {
+		label_for_each_in_ns(i, labels_ns(label), label, profile) {
+			profile_query_cb(profile, &perms, match_str, match_len);
+		}
+	} else {
+		label_for_each(i, label, profile) {
+			profile_query_cb(profile, &perms, match_str, match_len);
+		}
+	}
+	aa_put_label(label);
+
+	return scnprintf(buf, buf_len,
+		      "allow 0x%08x\ndeny 0x%08x\naudit 0x%08x\nquiet 0x%08x\n",
+		      perms.allow, perms.deny, perms.audit, perms.quiet);
+}
+
+/*
+ * Transaction based IO.
+ * The file expects a write which triggers the transaction, and then
+ * possibly a read(s) which collects the result - which is stored in a
+ * file-local buffer. Once a new write is performed, a new set of results
+ * are stored in the file-local buffer.
+ */
+struct multi_transaction {
+	struct kref count;
+	ssize_t size;
+	char data[0];
+};
+
+#define MULTI_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct multi_transaction))
+/* TODO: replace with per file lock */
+static DEFINE_SPINLOCK(multi_transaction_lock);
+
+static void multi_transaction_kref(struct kref *kref)
+{
+	struct multi_transaction *t;
+
+	t = container_of(kref, struct multi_transaction, count);
+	free_page((unsigned long) t);
+}
+
+static struct multi_transaction *
+get_multi_transaction(struct multi_transaction *t)
+{
+	if  (t)
+		kref_get(&(t->count));
+
+	return t;
+}
+
+static void put_multi_transaction(struct multi_transaction *t)
+{
+	if (t)
+		kref_put(&(t->count), multi_transaction_kref);
+}
+
+/* does not increment @new's count */
+static void multi_transaction_set(struct file *file,
+				  struct multi_transaction *new, size_t n)
+{
+	struct multi_transaction *old;
+
+	AA_BUG(n > MULTI_TRANSACTION_LIMIT);
+
+	new->size = n;
+	spin_lock(&multi_transaction_lock);
+	old = (struct multi_transaction *) file->private_data;
+	file->private_data = new;
+	spin_unlock(&multi_transaction_lock);
+	put_multi_transaction(old);
+}
+
+static struct multi_transaction *multi_transaction_new(struct file *file,
+						       const char __user *buf,
+						       size_t size)
+{
+	struct multi_transaction *t;
+
+	if (size > MULTI_TRANSACTION_LIMIT - 1)
+		return ERR_PTR(-EFBIG);
+
+	t = (struct multi_transaction *)get_zeroed_page(GFP_KERNEL);
+	if (!t)
+		return ERR_PTR(-ENOMEM);
+	kref_init(&t->count);
+	if (copy_from_user(t->data, buf, size))
+		return ERR_PTR(-EFAULT);
+
+	return t;
+}
+
+static ssize_t multi_transaction_read(struct file *file, char __user *buf,
+				       size_t size, loff_t *pos)
+{
+	struct multi_transaction *t;
+	ssize_t ret;
+
+	spin_lock(&multi_transaction_lock);
+	t = get_multi_transaction(file->private_data);
+	spin_unlock(&multi_transaction_lock);
+	if (!t)
+		return 0;
+
+	ret = simple_read_from_buffer(buf, size, pos, t->data, t->size);
+	put_multi_transaction(t);
+
+	return ret;
+}
+
+static int multi_transaction_release(struct inode *inode, struct file *file)
+{
+	put_multi_transaction(file->private_data);
+
+	return 0;
+}
+
+#define QUERY_CMD_LABEL		"label\0"
+#define QUERY_CMD_LABEL_LEN	6
+#define QUERY_CMD_PROFILE	"profile\0"
+#define QUERY_CMD_PROFILE_LEN	8
+#define QUERY_CMD_LABELALL	"labelall\0"
+#define QUERY_CMD_LABELALL_LEN	9
 #define QUERY_CMD_DATA		"data\0"
 #define QUERY_CMD_DATA_LEN	5
 
@@ -318,54 +928,72 @@ static ssize_t query_data(char *buf, size_t buf_len,
 static ssize_t aa_write_access(struct file *file, const char __user *ubuf,
 			       size_t count, loff_t *ppos)
 {
-	char *buf;
+	struct multi_transaction *t;
 	ssize_t len;
 
 	if (*ppos)
 		return -ESPIPE;
 
-	buf = simple_transaction_get(file, ubuf, count);
-	if (IS_ERR(buf))
-		return PTR_ERR(buf);
+	t = multi_transaction_new(file, ubuf, count);
+	if (IS_ERR(t))
+		return PTR_ERR(t);
 
-	if (count > QUERY_CMD_DATA_LEN &&
-		   !memcmp(buf, QUERY_CMD_DATA, QUERY_CMD_DATA_LEN)) {
-		len = query_data(buf, SIMPLE_TRANSACTION_LIMIT,
-				 buf + QUERY_CMD_DATA_LEN,
+	if (count > QUERY_CMD_PROFILE_LEN &&
+	    !memcmp(t->data, QUERY_CMD_PROFILE, QUERY_CMD_PROFILE_LEN)) {
+		len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
+				  t->data + QUERY_CMD_PROFILE_LEN,
+				  count - QUERY_CMD_PROFILE_LEN, true);
+	} else if (count > QUERY_CMD_LABEL_LEN &&
+		   !memcmp(t->data, QUERY_CMD_LABEL, QUERY_CMD_LABEL_LEN)) {
+		len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
+				  t->data + QUERY_CMD_LABEL_LEN,
+				  count - QUERY_CMD_LABEL_LEN, true);
+	} else if (count > QUERY_CMD_LABELALL_LEN &&
+		   !memcmp(t->data, QUERY_CMD_LABELALL,
+			   QUERY_CMD_LABELALL_LEN)) {
+		len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
+				  t->data + QUERY_CMD_LABELALL_LEN,
+				  count - QUERY_CMD_LABELALL_LEN, false);
+	} else if (count > QUERY_CMD_DATA_LEN &&
+		   !memcmp(t->data, QUERY_CMD_DATA, QUERY_CMD_DATA_LEN)) {
+		len = query_data(t->data, MULTI_TRANSACTION_LIMIT,
+				 t->data + QUERY_CMD_DATA_LEN,
 				 count - QUERY_CMD_DATA_LEN);
 	} else
 		len = -EINVAL;
 
-	if (len < 0)
+	if (len < 0) {
+		put_multi_transaction(t);
 		return len;
+	}
 
-	simple_transaction_set(file, len);
+	multi_transaction_set(file, t, len);
 
 	return count;
 }
 
-static const struct file_operations aa_fs_access = {
+static const struct file_operations aa_sfs_access = {
 	.write		= aa_write_access,
-	.read		= simple_transaction_read,
-	.release	= simple_transaction_release,
+	.read		= multi_transaction_read,
+	.release	= multi_transaction_release,
 	.llseek		= generic_file_llseek,
 };
 
-static int aa_fs_seq_show(struct seq_file *seq, void *v)
+static int aa_sfs_seq_show(struct seq_file *seq, void *v)
 {
-	struct aa_fs_entry *fs_file = seq->private;
+	struct aa_sfs_entry *fs_file = seq->private;
 
 	if (!fs_file)
 		return 0;
 
 	switch (fs_file->v_type) {
-	case AA_FS_TYPE_BOOLEAN:
+	case AA_SFS_TYPE_BOOLEAN:
 		seq_printf(seq, "%s\n", fs_file->v.boolean ? "yes" : "no");
 		break;
-	case AA_FS_TYPE_STRING:
+	case AA_SFS_TYPE_STRING:
 		seq_printf(seq, "%s\n", fs_file->v.string);
 		break;
-	case AA_FS_TYPE_U64:
+	case AA_SFS_TYPE_U64:
 		seq_printf(seq, "%#08lx\n", fs_file->v.u64);
 		break;
 	default:
@@ -376,21 +1004,40 @@ static int aa_fs_seq_show(struct seq_file *seq, void *v)
 	return 0;
 }
 
-static int aa_fs_seq_open(struct inode *inode, struct file *file)
+static int aa_sfs_seq_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, aa_fs_seq_show, inode->i_private);
+	return single_open(file, aa_sfs_seq_show, inode->i_private);
 }
 
-const struct file_operations aa_fs_seq_file_ops = {
+const struct file_operations aa_sfs_seq_file_ops = {
 	.owner		= THIS_MODULE,
-	.open		= aa_fs_seq_open,
+	.open		= aa_sfs_seq_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= single_release,
 };
 
-static int aa_fs_seq_profile_open(struct inode *inode, struct file *file,
-				  int (*show)(struct seq_file *, void *))
+/*
+ * profile based file operations
+ *     policy/profiles/XXXX/profiles/ *
+ */
+
+#define SEQ_PROFILE_FOPS(NAME)						      \
+static int seq_profile_ ##NAME ##_open(struct inode *inode, struct file *file)\
+{									      \
+	return seq_profile_open(inode, file, seq_profile_ ##NAME ##_show);    \
+}									      \
+									      \
+static const struct file_operations seq_profile_ ##NAME ##_fops = {	      \
+	.owner		= THIS_MODULE,					      \
+	.open		= seq_profile_ ##NAME ##_open,			      \
+	.read		= seq_read,					      \
+	.llseek		= seq_lseek,					      \
+	.release	= seq_profile_release,				      \
+}									      \
+
+static int seq_profile_open(struct inode *inode, struct file *file,
+			    int (*show)(struct seq_file *, void *))
 {
 	struct aa_proxy *proxy = aa_get_proxy(inode->i_private);
 	int error = single_open(file, show, proxy);
@@ -403,7 +1050,7 @@ static int aa_fs_seq_profile_open(struct inode *inode, struct file *file,
 	return error;
 }
 
-static int aa_fs_seq_profile_release(struct inode *inode, struct file *file)
+static int seq_profile_release(struct inode *inode, struct file *file)
 {
 	struct seq_file *seq = (struct seq_file *) file->private_data;
 	if (seq)
@@ -411,217 +1058,229 @@ static int aa_fs_seq_profile_release(struct inode *inode, struct file *file)
 	return single_release(inode, file);
 }
 
-static int aa_fs_seq_profname_show(struct seq_file *seq, void *v)
+static int seq_profile_name_show(struct seq_file *seq, void *v)
 {
 	struct aa_proxy *proxy = seq->private;
-	struct aa_profile *profile = aa_get_profile_rcu(&proxy->profile);
+	struct aa_label *label = aa_get_label_rcu(&proxy->label);
+	struct aa_profile *profile = labels_profile(label);
 	seq_printf(seq, "%s\n", profile->base.name);
-	aa_put_profile(profile);
+	aa_put_label(label);
 
 	return 0;
 }
 
-static int aa_fs_seq_profname_open(struct inode *inode, struct file *file)
-{
-	return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profname_show);
-}
-
-static const struct file_operations aa_fs_profname_fops = {
-	.owner		= THIS_MODULE,
-	.open		= aa_fs_seq_profname_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= aa_fs_seq_profile_release,
-};
-
-static int aa_fs_seq_profmode_show(struct seq_file *seq, void *v)
+static int seq_profile_mode_show(struct seq_file *seq, void *v)
 {
 	struct aa_proxy *proxy = seq->private;
-	struct aa_profile *profile = aa_get_profile_rcu(&proxy->profile);
+	struct aa_label *label = aa_get_label_rcu(&proxy->label);
+	struct aa_profile *profile = labels_profile(label);
 	seq_printf(seq, "%s\n", aa_profile_mode_names[profile->mode]);
-	aa_put_profile(profile);
+	aa_put_label(label);
 
 	return 0;
 }
 
-static int aa_fs_seq_profmode_open(struct inode *inode, struct file *file)
-{
-	return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profmode_show);
-}
-
-static const struct file_operations aa_fs_profmode_fops = {
-	.owner		= THIS_MODULE,
-	.open		= aa_fs_seq_profmode_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= aa_fs_seq_profile_release,
-};
-
-static int aa_fs_seq_profattach_show(struct seq_file *seq, void *v)
+static int seq_profile_attach_show(struct seq_file *seq, void *v)
 {
 	struct aa_proxy *proxy = seq->private;
-	struct aa_profile *profile = aa_get_profile_rcu(&proxy->profile);
+	struct aa_label *label = aa_get_label_rcu(&proxy->label);
+	struct aa_profile *profile = labels_profile(label);
 	if (profile->attach)
 		seq_printf(seq, "%s\n", profile->attach);
 	else if (profile->xmatch)
 		seq_puts(seq, "<unknown>\n");
 	else
 		seq_printf(seq, "%s\n", profile->base.name);
-	aa_put_profile(profile);
+	aa_put_label(label);
 
 	return 0;
 }
 
-static int aa_fs_seq_profattach_open(struct inode *inode, struct file *file)
-{
-	return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profattach_show);
-}
-
-static const struct file_operations aa_fs_profattach_fops = {
-	.owner		= THIS_MODULE,
-	.open		= aa_fs_seq_profattach_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= aa_fs_seq_profile_release,
-};
-
-static int aa_fs_seq_hash_show(struct seq_file *seq, void *v)
+static int seq_profile_hash_show(struct seq_file *seq, void *v)
 {
 	struct aa_proxy *proxy = seq->private;
-	struct aa_profile *profile = aa_get_profile_rcu(&proxy->profile);
+	struct aa_label *label = aa_get_label_rcu(&proxy->label);
+	struct aa_profile *profile = labels_profile(label);
 	unsigned int i, size = aa_hash_size();
 
 	if (profile->hash) {
 		for (i = 0; i < size; i++)
 			seq_printf(seq, "%.2x", profile->hash[i]);
-		seq_puts(seq, "\n");
+		seq_putc(seq, '\n');
 	}
-	aa_put_profile(profile);
+	aa_put_label(label);
 
 	return 0;
 }
 
-static int aa_fs_seq_hash_open(struct inode *inode, struct file *file)
+SEQ_PROFILE_FOPS(name);
+SEQ_PROFILE_FOPS(mode);
+SEQ_PROFILE_FOPS(attach);
+SEQ_PROFILE_FOPS(hash);
+
+/*
+ * namespace based files
+ *     several root files and
+ *     policy/ *
+ */
+
+#define SEQ_NS_FOPS(NAME)						      \
+static int seq_ns_ ##NAME ##_open(struct inode *inode, struct file *file)     \
+{									      \
+	return single_open(file, seq_ns_ ##NAME ##_show, inode->i_private);   \
+}									      \
+									      \
+static const struct file_operations seq_ns_ ##NAME ##_fops = {	      \
+	.owner		= THIS_MODULE,					      \
+	.open		= seq_ns_ ##NAME ##_open,			      \
+	.read		= seq_read,					      \
+	.llseek		= seq_lseek,					      \
+	.release	= single_release,				      \
+}									      \
+
+static int seq_ns_stacked_show(struct seq_file *seq, void *v)
 {
-	return single_open(file, aa_fs_seq_hash_show, inode->i_private);
-}
+	struct aa_label *label;
 
-static const struct file_operations aa_fs_seq_hash_fops = {
-	.owner		= THIS_MODULE,
-	.open		= aa_fs_seq_hash_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-
-static int aa_fs_seq_show_ns_level(struct seq_file *seq, void *v)
-{
-	struct aa_ns *ns = aa_current_profile()->ns;
-
-	seq_printf(seq, "%d\n", ns->level);
+	label = begin_current_label_crit_section();
+	seq_printf(seq, "%s\n", label->size > 1 ? "yes" : "no");
+	end_current_label_crit_section(label);
 
 	return 0;
 }
 
-static int aa_fs_seq_open_ns_level(struct inode *inode, struct file *file)
+static int seq_ns_nsstacked_show(struct seq_file *seq, void *v)
 {
-	return single_open(file, aa_fs_seq_show_ns_level, inode->i_private);
-}
+	struct aa_label *label;
+	struct aa_profile *profile;
+	struct label_it it;
+	int count = 1;
 
-static const struct file_operations aa_fs_ns_level = {
-	.owner		= THIS_MODULE,
-	.open		= aa_fs_seq_open_ns_level,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+	label = begin_current_label_crit_section();
 
-static int aa_fs_seq_show_ns_name(struct seq_file *seq, void *v)
-{
-	struct aa_ns *ns = aa_current_profile()->ns;
-
-	seq_printf(seq, "%s\n", ns->base.name);
-
-	return 0;
-}
-
-static int aa_fs_seq_open_ns_name(struct inode *inode, struct file *file)
-{
-	return single_open(file, aa_fs_seq_show_ns_name, inode->i_private);
-}
-
-static const struct file_operations aa_fs_ns_name = {
-	.owner		= THIS_MODULE,
-	.open		= aa_fs_seq_open_ns_name,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static int rawdata_release(struct inode *inode, struct file *file)
-{
-	/* TODO: switch to loaddata when profile switched to symlink */
-	aa_put_loaddata(file->private_data);
-
-	return 0;
-}
-
-static int aa_fs_seq_raw_abi_show(struct seq_file *seq, void *v)
-{
-	struct aa_proxy *proxy = seq->private;
-	struct aa_profile *profile = aa_get_profile_rcu(&proxy->profile);
-
-	if (profile->rawdata->abi) {
-		seq_printf(seq, "v%d", profile->rawdata->abi);
-		seq_puts(seq, "\n");
+	if (label->size > 1) {
+		label_for_each(it, label, profile)
+			if (profile->ns != labels_ns(label)) {
+				count++;
+				break;
+			}
 	}
-	aa_put_profile(profile);
+
+	seq_printf(seq, "%s\n", count > 1 ? "yes" : "no");
+	end_current_label_crit_section(label);
 
 	return 0;
 }
 
-static int aa_fs_seq_raw_abi_open(struct inode *inode, struct file *file)
+static int seq_ns_level_show(struct seq_file *seq, void *v)
 {
-	return aa_fs_seq_profile_open(inode, file, aa_fs_seq_raw_abi_show);
+	struct aa_label *label;
+
+	label = begin_current_label_crit_section();
+	seq_printf(seq, "%d\n", labels_ns(label)->level);
+	end_current_label_crit_section(label);
+
+	return 0;
 }
 
-static const struct file_operations aa_fs_seq_raw_abi_fops = {
-	.owner		= THIS_MODULE,
-	.open		= aa_fs_seq_raw_abi_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= aa_fs_seq_profile_release,
-};
-
-static int aa_fs_seq_raw_hash_show(struct seq_file *seq, void *v)
+static int seq_ns_name_show(struct seq_file *seq, void *v)
 {
-	struct aa_proxy *proxy = seq->private;
-	struct aa_profile *profile = aa_get_profile_rcu(&proxy->profile);
+	struct aa_label *label = begin_current_label_crit_section();
+
+	seq_printf(seq, "%s\n", aa_ns_name(labels_ns(label),
+					   labels_ns(label), true));
+	end_current_label_crit_section(label);
+
+	return 0;
+}
+
+SEQ_NS_FOPS(stacked);
+SEQ_NS_FOPS(nsstacked);
+SEQ_NS_FOPS(level);
+SEQ_NS_FOPS(name);
+
+
+/* policy/raw_data/ * file ops */
+
+#define SEQ_RAWDATA_FOPS(NAME)						      \
+static int seq_rawdata_ ##NAME ##_open(struct inode *inode, struct file *file)\
+{									      \
+	return seq_rawdata_open(inode, file, seq_rawdata_ ##NAME ##_show);    \
+}									      \
+									      \
+static const struct file_operations seq_rawdata_ ##NAME ##_fops = {	      \
+	.owner		= THIS_MODULE,					      \
+	.open		= seq_rawdata_ ##NAME ##_open,			      \
+	.read		= seq_read,					      \
+	.llseek		= seq_lseek,					      \
+	.release	= seq_rawdata_release,				      \
+}									      \
+
+static int seq_rawdata_open(struct inode *inode, struct file *file,
+			    int (*show)(struct seq_file *, void *))
+{
+	struct aa_loaddata *data = __aa_get_loaddata(inode->i_private);
+	int error;
+
+	if (!data)
+		/* lost race this ent is being reaped */
+		return -ENOENT;
+
+	error = single_open(file, show, data);
+	if (error) {
+		AA_BUG(file->private_data &&
+		       ((struct seq_file *)file->private_data)->private);
+		aa_put_loaddata(data);
+	}
+
+	return error;
+}
+
+static int seq_rawdata_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = (struct seq_file *) file->private_data;
+
+	if (seq)
+		aa_put_loaddata(seq->private);
+
+	return single_release(inode, file);
+}
+
+static int seq_rawdata_abi_show(struct seq_file *seq, void *v)
+{
+	struct aa_loaddata *data = seq->private;
+
+	seq_printf(seq, "v%d\n", data->abi);
+
+	return 0;
+}
+
+static int seq_rawdata_revision_show(struct seq_file *seq, void *v)
+{
+	struct aa_loaddata *data = seq->private;
+
+	seq_printf(seq, "%ld\n", data->revision);
+
+	return 0;
+}
+
+static int seq_rawdata_hash_show(struct seq_file *seq, void *v)
+{
+	struct aa_loaddata *data = seq->private;
 	unsigned int i, size = aa_hash_size();
 
-	if (profile->rawdata->hash) {
+	if (data->hash) {
 		for (i = 0; i < size; i++)
-			seq_printf(seq, "%.2x", profile->rawdata->hash[i]);
-		seq_puts(seq, "\n");
+			seq_printf(seq, "%.2x", data->hash[i]);
+		seq_putc(seq, '\n');
 	}
-	aa_put_profile(profile);
 
 	return 0;
 }
 
-static int aa_fs_seq_raw_hash_open(struct inode *inode, struct file *file)
-{
-	return aa_fs_seq_profile_open(inode, file, aa_fs_seq_raw_hash_show);
-}
-
-static const struct file_operations aa_fs_seq_raw_hash_fops = {
-	.owner		= THIS_MODULE,
-	.open		= aa_fs_seq_raw_hash_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= aa_fs_seq_profile_release,
-};
+SEQ_RAWDATA_FOPS(abi);
+SEQ_RAWDATA_FOPS(revision);
+SEQ_RAWDATA_FOPS(hash);
 
 static ssize_t rawdata_read(struct file *file, char __user *buf, size_t size,
 			    loff_t *ppos)
@@ -632,29 +1291,127 @@ static ssize_t rawdata_read(struct file *file, char __user *buf, size_t size,
 				       rawdata->size);
 }
 
-static int rawdata_open(struct inode *inode, struct file *file)
+static int rawdata_release(struct inode *inode, struct file *file)
 {
-	struct aa_proxy *proxy = inode->i_private;
-	struct aa_profile *profile;
-
-	if (!policy_view_capable(NULL))
-		return -EACCES;
-	profile = aa_get_profile_rcu(&proxy->profile);
-	file->private_data = aa_get_loaddata(profile->rawdata);
-	aa_put_profile(profile);
+	aa_put_loaddata(file->private_data);
 
 	return 0;
 }
 
-static const struct file_operations aa_fs_rawdata_fops = {
+static int rawdata_open(struct inode *inode, struct file *file)
+{
+	if (!policy_view_capable(NULL))
+		return -EACCES;
+	file->private_data = __aa_get_loaddata(inode->i_private);
+	if (!file->private_data)
+		/* lost race: this entry is being reaped */
+		return -ENOENT;
+
+	return 0;
+}
+
+static const struct file_operations rawdata_fops = {
 	.open = rawdata_open,
 	.read = rawdata_read,
 	.llseek = generic_file_llseek,
 	.release = rawdata_release,
 };
 
+static void remove_rawdata_dents(struct aa_loaddata *rawdata)
+{
+	int i;
+
+	for (i = 0; i < AAFS_LOADDATA_NDENTS; i++) {
+		if (!IS_ERR_OR_NULL(rawdata->dents[i])) {
+			/* no refcounts on i_private */
+			aafs_remove(rawdata->dents[i]);
+			rawdata->dents[i] = NULL;
+		}
+	}
+}
+
+void __aa_fs_remove_rawdata(struct aa_loaddata *rawdata)
+{
+	AA_BUG(rawdata->ns && !mutex_is_locked(&rawdata->ns->lock));
+
+	if (rawdata->ns) {
+		remove_rawdata_dents(rawdata);
+		list_del_init(&rawdata->list);
+		aa_put_ns(rawdata->ns);
+		rawdata->ns = NULL;
+	}
+}
+
+int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata)
+{
+	struct dentry *dent, *dir;
+
+	AA_BUG(!ns);
+	AA_BUG(!rawdata);
+	AA_BUG(!mutex_is_locked(&ns->lock));
+	AA_BUG(!ns_subdata_dir(ns));
+
+	/*
+	 * just use ns revision dir was originally created at. This is
+	 * under ns->lock and if load is successful revision will be
+	 * bumped and is guaranteed to be unique
+	 */
+	rawdata->name = kasprintf(GFP_KERNEL, "%ld", ns->revision);
+	if (!rawdata->name)
+		return -ENOMEM;
+
+	dir = aafs_create_dir(rawdata->name, ns_subdata_dir(ns));
+	if (IS_ERR(dir))
+		/* ->name freed when rawdata freed */
+		return PTR_ERR(dir);
+	rawdata->dents[AAFS_LOADDATA_DIR] = dir;
+
+	dent = aafs_create_file("abi", S_IFREG | 0444, dir, rawdata,
+				      &seq_rawdata_abi_fops);
+	if (IS_ERR(dent))
+		goto fail;
+	rawdata->dents[AAFS_LOADDATA_ABI] = dent;
+
+	dent = aafs_create_file("revision", S_IFREG | 0444, dir, rawdata,
+				      &seq_rawdata_revision_fops);
+	if (IS_ERR(dent))
+		goto fail;
+	rawdata->dents[AAFS_LOADDATA_REVISION] = dent;
+
+	if (aa_g_hash_policy) {
+		dent = aafs_create_file("sha1", S_IFREG | 0444, dir,
+					      rawdata, &seq_rawdata_hash_fops);
+		if (IS_ERR(dent))
+			goto fail;
+		rawdata->dents[AAFS_LOADDATA_HASH] = dent;
+	}
+
+	dent = aafs_create_file("raw_data", S_IFREG | 0444,
+				      dir, rawdata, &rawdata_fops);
+	if (IS_ERR(dent))
+		goto fail;
+	rawdata->dents[AAFS_LOADDATA_DATA] = dent;
+	d_inode(dent)->i_size = rawdata->size;
+
+	rawdata->ns = aa_get_ns(ns);
+	list_add(&rawdata->list, &ns->rawdata_list);
+	/* no refcount on inode rawdata */
+
+	return 0;
+
+fail:
+	remove_rawdata_dents(rawdata);
+
+	return PTR_ERR(dent);
+}
+
 /** fns to setup dynamic per profile/namespace files **/
-void __aa_fs_profile_rmdir(struct aa_profile *profile)
+
+/**
+ *
+ * Requires: @profile->ns->lock held
+ */
+void __aafs_profile_rmdir(struct aa_profile *profile)
 {
 	struct aa_profile *child;
 	int i;
@@ -663,7 +1420,7 @@ void __aa_fs_profile_rmdir(struct aa_profile *profile)
 		return;
 
 	list_for_each_entry(child, &profile->base.profiles, base.list)
-		__aa_fs_profile_rmdir(child);
+		__aafs_profile_rmdir(child);
 
 	for (i = AAFS_PROF_SIZEOF - 1; i >= 0; --i) {
 		struct aa_proxy *proxy;
@@ -671,14 +1428,18 @@ void __aa_fs_profile_rmdir(struct aa_profile *profile)
 			continue;
 
 		proxy = d_inode(profile->dents[i])->i_private;
-		securityfs_remove(profile->dents[i]);
+		aafs_remove(profile->dents[i]);
 		aa_put_proxy(proxy);
 		profile->dents[i] = NULL;
 	}
 }
 
-void __aa_fs_profile_migrate_dents(struct aa_profile *old,
-				   struct aa_profile *new)
+/**
+ *
+ * Requires: @old->ns->lock held
+ */
+void __aafs_profile_migrate_dents(struct aa_profile *old,
+				  struct aa_profile *new)
 {
 	int i;
 
@@ -694,18 +1455,52 @@ static struct dentry *create_profile_file(struct dentry *dir, const char *name,
 					  struct aa_profile *profile,
 					  const struct file_operations *fops)
 {
-	struct aa_proxy *proxy = aa_get_proxy(profile->proxy);
+	struct aa_proxy *proxy = aa_get_proxy(profile->label.proxy);
 	struct dentry *dent;
 
-	dent = securityfs_create_file(name, S_IFREG | 0444, dir, proxy, fops);
+	dent = aafs_create_file(name, S_IFREG | 0444, dir, proxy, fops);
 	if (IS_ERR(dent))
 		aa_put_proxy(proxy);
 
 	return dent;
 }
 
-/* requires lock be held */
-int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+static int profile_depth(struct aa_profile *profile)
+{
+	int depth = 0;
+
+	rcu_read_lock();
+	for (depth = 0; profile; profile = rcu_access_pointer(profile->parent))
+		depth++;
+	rcu_read_unlock();
+
+	return depth;
+}
+
+static int gen_symlink_name(char *buffer, size_t bsize, int depth,
+			    const char *dirname, const char *fname)
+{
+	int error;
+
+	for (; depth > 0; depth--) {
+		if (bsize < 7)
+			return -ENAMETOOLONG;
+		strcpy(buffer, "../../");
+		buffer += 6;
+		bsize -= 6;
+	}
+
+	error = snprintf(buffer, bsize, "raw_data/%s/%s", dirname, fname);
+	if (error >= bsize || error < 0)
+		return -ENAMETOOLONG;
+
+	return 0;
+}
+
+/*
+ * Requires: @profile->ns->lock held
+ */
+int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
 {
 	struct aa_profile *child;
 	struct dentry *dent = NULL, *dir;
@@ -716,7 +1511,7 @@ int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
 		p = aa_deref_parent(profile);
 		dent = prof_dir(p);
 		/* adding to parent that previously didn't have children */
-		dent = securityfs_create_dir("profiles", dent);
+		dent = aafs_create_dir("profiles", dent);
 		if (IS_ERR(dent))
 			goto fail;
 		prof_child_dir(p) = parent = dent;
@@ -728,67 +1523,80 @@ int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
 		id_len = snprintf(NULL, 0, ".%ld", profile->ns->uniq_id);
 
 		profile->dirname = kmalloc(len + id_len + 1, GFP_KERNEL);
-		if (!profile->dirname)
-			goto fail;
+		if (!profile->dirname) {
+			error = -ENOMEM;
+			goto fail2;
+		}
 
 		mangle_name(profile->base.name, profile->dirname);
 		sprintf(profile->dirname + len, ".%ld", profile->ns->uniq_id++);
 	}
 
-	dent = securityfs_create_dir(profile->dirname, parent);
+	dent = aafs_create_dir(profile->dirname, parent);
 	if (IS_ERR(dent))
 		goto fail;
 	prof_dir(profile) = dir = dent;
 
-	dent = create_profile_file(dir, "name", profile, &aa_fs_profname_fops);
+	dent = create_profile_file(dir, "name", profile,
+				   &seq_profile_name_fops);
 	if (IS_ERR(dent))
 		goto fail;
 	profile->dents[AAFS_PROF_NAME] = dent;
 
-	dent = create_profile_file(dir, "mode", profile, &aa_fs_profmode_fops);
+	dent = create_profile_file(dir, "mode", profile,
+				   &seq_profile_mode_fops);
 	if (IS_ERR(dent))
 		goto fail;
 	profile->dents[AAFS_PROF_MODE] = dent;
 
 	dent = create_profile_file(dir, "attach", profile,
-				   &aa_fs_profattach_fops);
+				   &seq_profile_attach_fops);
 	if (IS_ERR(dent))
 		goto fail;
 	profile->dents[AAFS_PROF_ATTACH] = dent;
 
 	if (profile->hash) {
 		dent = create_profile_file(dir, "sha1", profile,
-					   &aa_fs_seq_hash_fops);
+					   &seq_profile_hash_fops);
 		if (IS_ERR(dent))
 			goto fail;
 		profile->dents[AAFS_PROF_HASH] = dent;
 	}
 
 	if (profile->rawdata) {
-		dent = create_profile_file(dir, "raw_sha1", profile,
-					   &aa_fs_seq_raw_hash_fops);
+		char target[64];
+		int depth = profile_depth(profile);
+
+		error = gen_symlink_name(target, sizeof(target), depth,
+					 profile->rawdata->name, "sha1");
+		if (error < 0)
+			goto fail2;
+		dent = aafs_create_symlink("raw_sha1", dir, target, NULL);
 		if (IS_ERR(dent))
 			goto fail;
 		profile->dents[AAFS_PROF_RAW_HASH] = dent;
 
-		dent = create_profile_file(dir, "raw_abi", profile,
-					   &aa_fs_seq_raw_abi_fops);
+		error = gen_symlink_name(target, sizeof(target), depth,
+					 profile->rawdata->name, "abi");
+		if (error < 0)
+			goto fail2;
+		dent = aafs_create_symlink("raw_abi", dir, target, NULL);
 		if (IS_ERR(dent))
 			goto fail;
 		profile->dents[AAFS_PROF_RAW_ABI] = dent;
 
-		dent = securityfs_create_file("raw_data", S_IFREG | 0444, dir,
-					      profile->proxy,
-					      &aa_fs_rawdata_fops);
+		error = gen_symlink_name(target, sizeof(target), depth,
+					 profile->rawdata->name, "raw_data");
+		if (error < 0)
+			goto fail2;
+		dent = aafs_create_symlink("raw_data", dir, target, NULL);
 		if (IS_ERR(dent))
 			goto fail;
 		profile->dents[AAFS_PROF_RAW_DATA] = dent;
-		d_inode(dent)->i_size = profile->rawdata->size;
-		aa_get_proxy(profile->proxy);
 	}
 
 	list_for_each_entry(child, &profile->base.profiles, base.list) {
-		error = __aa_fs_profile_mkdir(child, prof_child_dir(profile));
+		error = __aafs_profile_mkdir(child, prof_child_dir(profile));
 		if (error)
 			goto fail2;
 	}
@@ -799,12 +1607,123 @@ int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
 	error = PTR_ERR(dent);
 
 fail2:
-	__aa_fs_profile_rmdir(profile);
+	__aafs_profile_rmdir(profile);
 
 	return error;
 }
 
-void __aa_fs_ns_rmdir(struct aa_ns *ns)
+static int ns_mkdir_op(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	struct aa_ns *ns, *parent;
+	/* TODO: improve permission check */
+	struct aa_label *label;
+	int error;
+
+	label = begin_current_label_crit_section();
+	error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
+	end_current_label_crit_section(label);
+	if (error)
+		return error;
+
+	parent = aa_get_ns(dir->i_private);
+	AA_BUG(d_inode(ns_subns_dir(parent)) != dir);
+
+	/* we have to unlock and then relock to get locking order right
+	 * for pin_fs
+	 */
+	inode_unlock(dir);
+	error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count);
+	mutex_lock(&parent->lock);
+	inode_lock_nested(dir, I_MUTEX_PARENT);
+	if (error)
+		goto out;
+
+	error = __aafs_setup_d_inode(dir, dentry, mode | S_IFDIR,  NULL,
+				     NULL, NULL, NULL);
+	if (error)
+		goto out_pin;
+
+	ns = __aa_find_or_create_ns(parent, READ_ONCE(dentry->d_name.name),
+				    dentry);
+	if (IS_ERR(ns)) {
+		error = PTR_ERR(ns);
+		ns = NULL;
+	}
+
+	aa_put_ns(ns);		/* list ref remains */
+out_pin:
+	if (error)
+		simple_release_fs(&aafs_mnt, &aafs_count);
+out:
+	mutex_unlock(&parent->lock);
+	aa_put_ns(parent);
+
+	return error;
+}
+
+static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
+{
+	struct aa_ns *ns, *parent;
+	/* TODO: improve permission check */
+	struct aa_label *label;
+	int error;
+
+	label = begin_current_label_crit_section();
+	error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
+	end_current_label_crit_section(label);
+	if (error)
+		return error;
+
+	 parent = aa_get_ns(dir->i_private);
+	/* rmdir calls the generic securityfs functions to remove files
+	 * from the apparmor dir. It is up to the apparmor ns locking
+	 * to avoid races.
+	 */
+	inode_unlock(dir);
+	inode_unlock(dentry->d_inode);
+
+	mutex_lock(&parent->lock);
+	ns = aa_get_ns(__aa_findn_ns(&parent->sub_ns, dentry->d_name.name,
+				     dentry->d_name.len));
+	if (!ns) {
+		error = -ENOENT;
+		goto out;
+	}
+	AA_BUG(ns_dir(ns) != dentry);
+
+	__aa_remove_ns(ns);
+	aa_put_ns(ns);
+
+out:
+	mutex_unlock(&parent->lock);
+	inode_lock_nested(dir, I_MUTEX_PARENT);
+	inode_lock(dentry->d_inode);
+	aa_put_ns(parent);
+
+	return error;
+}
+
+static const struct inode_operations ns_dir_inode_operations = {
+	.lookup		= simple_lookup,
+	.mkdir		= ns_mkdir_op,
+	.rmdir		= ns_rmdir_op,
+};
+
+static void __aa_fs_list_remove_rawdata(struct aa_ns *ns)
+{
+	struct aa_loaddata *ent, *tmp;
+
+	AA_BUG(!mutex_is_locked(&ns->lock));
+
+	list_for_each_entry_safe(ent, tmp, &ns->rawdata_list, list)
+		__aa_fs_remove_rawdata(ent);
+}
+
+/**
+ *
+ * Requires: @ns->lock held
+ */
+void __aafs_ns_rmdir(struct aa_ns *ns)
 {
 	struct aa_ns *sub;
 	struct aa_profile *child;
@@ -814,14 +1733,16 @@ void __aa_fs_ns_rmdir(struct aa_ns *ns)
 		return;
 
 	list_for_each_entry(child, &ns->base.profiles, base.list)
-		__aa_fs_profile_rmdir(child);
+		__aafs_profile_rmdir(child);
 
 	list_for_each_entry(sub, &ns->sub_ns, base.list) {
 		mutex_lock(&sub->lock);
-		__aa_fs_ns_rmdir(sub);
+		__aafs_ns_rmdir(sub);
 		mutex_unlock(&sub->lock);
 	}
 
+	__aa_fs_list_remove_rawdata(ns);
+
 	if (ns_subns_dir(ns)) {
 		sub = d_inode(ns_subns_dir(ns))->i_private;
 		aa_put_ns(sub);
@@ -838,53 +1759,66 @@ void __aa_fs_ns_rmdir(struct aa_ns *ns)
 		sub = d_inode(ns_subremove(ns))->i_private;
 		aa_put_ns(sub);
 	}
+	if (ns_subrevision(ns)) {
+		sub = d_inode(ns_subrevision(ns))->i_private;
+		aa_put_ns(sub);
+	}
 
 	for (i = AAFS_NS_SIZEOF - 1; i >= 0; --i) {
-		securityfs_remove(ns->dents[i]);
+		aafs_remove(ns->dents[i]);
 		ns->dents[i] = NULL;
 	}
 }
 
 /* assumes cleanup in caller */
-static int __aa_fs_ns_mkdir_entries(struct aa_ns *ns, struct dentry *dir)
+static int __aafs_ns_mkdir_entries(struct aa_ns *ns, struct dentry *dir)
 {
 	struct dentry *dent;
 
 	AA_BUG(!ns);
 	AA_BUG(!dir);
 
-	dent = securityfs_create_dir("profiles", dir);
+	dent = aafs_create_dir("profiles", dir);
 	if (IS_ERR(dent))
 		return PTR_ERR(dent);
 	ns_subprofs_dir(ns) = dent;
 
-	dent = securityfs_create_dir("raw_data", dir);
+	dent = aafs_create_dir("raw_data", dir);
 	if (IS_ERR(dent))
 		return PTR_ERR(dent);
 	ns_subdata_dir(ns) = dent;
 
-	dent = securityfs_create_file(".load", 0640, dir, ns,
+	dent = aafs_create_file("revision", 0444, dir, ns,
+				&aa_fs_ns_revision_fops);
+	if (IS_ERR(dent))
+		return PTR_ERR(dent);
+	aa_get_ns(ns);
+	ns_subrevision(ns) = dent;
+
+	dent = aafs_create_file(".load", 0640, dir, ns,
 				      &aa_fs_profile_load);
 	if (IS_ERR(dent))
 		return PTR_ERR(dent);
 	aa_get_ns(ns);
 	ns_subload(ns) = dent;
 
-	dent = securityfs_create_file(".replace", 0640, dir, ns,
+	dent = aafs_create_file(".replace", 0640, dir, ns,
 				      &aa_fs_profile_replace);
 	if (IS_ERR(dent))
 		return PTR_ERR(dent);
 	aa_get_ns(ns);
 	ns_subreplace(ns) = dent;
 
-	dent = securityfs_create_file(".remove", 0640, dir, ns,
+	dent = aafs_create_file(".remove", 0640, dir, ns,
 				      &aa_fs_profile_remove);
 	if (IS_ERR(dent))
 		return PTR_ERR(dent);
 	aa_get_ns(ns);
 	ns_subremove(ns) = dent;
 
-	dent = securityfs_create_dir("namespaces", dir);
+	  /* use create_dentry so we can supply private data */
+	dent = aafs_create("namespaces", S_IFDIR | 0755, dir, ns, NULL, NULL,
+			   &ns_dir_inode_operations);
 	if (IS_ERR(dent))
 		return PTR_ERR(dent);
 	aa_get_ns(ns);
@@ -893,11 +1827,15 @@ static int __aa_fs_ns_mkdir_entries(struct aa_ns *ns, struct dentry *dir)
 	return 0;
 }
 
-int __aa_fs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name)
+/*
+ * Requires: @ns->lock held
+ */
+int __aafs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name,
+		    struct dentry *dent)
 {
 	struct aa_ns *sub;
 	struct aa_profile *child;
-	struct dentry *dent, *dir;
+	struct dentry *dir;
 	int error;
 
 	AA_BUG(!ns);
@@ -907,19 +1845,21 @@ int __aa_fs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name)
 	if (!name)
 		name = ns->base.name;
 
-	/* create ns dir if it doesn't already exist */
-	dent = securityfs_create_dir(name, parent);
-	if (IS_ERR(dent))
-		goto fail;
-
+	if (!dent) {
+		/* create ns dir if it doesn't already exist */
+		dent = aafs_create_dir(name, parent);
+		if (IS_ERR(dent))
+			goto fail;
+	} else
+		dget(dent);
 	ns_dir(ns) = dir = dent;
-	error = __aa_fs_ns_mkdir_entries(ns, dir);
+	error = __aafs_ns_mkdir_entries(ns, dir);
 	if (error)
 		goto fail2;
 
 	/* profiles */
 	list_for_each_entry(child, &ns->base.profiles, base.list) {
-		error = __aa_fs_profile_mkdir(child, ns_subprofs_dir(ns));
+		error = __aafs_profile_mkdir(child, ns_subprofs_dir(ns));
 		if (error)
 			goto fail2;
 	}
@@ -927,7 +1867,7 @@ int __aa_fs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name)
 	/* subnamespaces */
 	list_for_each_entry(sub, &ns->sub_ns, base.list) {
 		mutex_lock(&sub->lock);
-		error = __aa_fs_ns_mkdir(sub, ns_subns_dir(ns), NULL);
+		error = __aafs_ns_mkdir(sub, ns_subns_dir(ns), NULL, NULL);
 		mutex_unlock(&sub->lock);
 		if (error)
 			goto fail2;
@@ -939,7 +1879,7 @@ int __aa_fs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name)
 	error = PTR_ERR(dent);
 
 fail2:
-	__aa_fs_ns_rmdir(ns);
+	__aafs_ns_rmdir(ns);
 
 	return error;
 }
@@ -1074,10 +2014,9 @@ static struct aa_profile *next_profile(struct aa_ns *root,
 static void *p_start(struct seq_file *f, loff_t *pos)
 {
 	struct aa_profile *profile = NULL;
-	struct aa_ns *root = aa_current_profile()->ns;
+	struct aa_ns *root = aa_get_current_ns();
 	loff_t l = *pos;
-	f->private = aa_get_ns(root);
-
+	f->private = root;
 
 	/* find the first profile */
 	mutex_lock(&root->lock);
@@ -1141,15 +2080,14 @@ static int seq_show_profile(struct seq_file *f, void *p)
 	struct aa_profile *profile = (struct aa_profile *)p;
 	struct aa_ns *root = f->private;
 
-	if (profile->ns != root)
-		seq_printf(f, ":%s://", aa_ns_name(root, profile->ns, true));
-	seq_printf(f, "%s (%s)\n", profile->base.hname,
-		   aa_profile_mode_names[profile->mode]);
+	aa_label_seq_xprint(f, root, &profile->label,
+			    FLAG_SHOW_MODE | FLAG_VIEW_SUBNS, GFP_KERNEL);
+	seq_putc(f, '\n');
 
 	return 0;
 }
 
-static const struct seq_operations aa_fs_profiles_op = {
+static const struct seq_operations aa_sfs_profiles_op = {
 	.start = p_start,
 	.next = p_next,
 	.stop = p_stop,
@@ -1161,7 +2099,7 @@ static int profiles_open(struct inode *inode, struct file *file)
 	if (!policy_view_capable(NULL))
 		return -EACCES;
 
-	return seq_open(file, &aa_fs_profiles_op);
+	return seq_open(file, &aa_sfs_profiles_op);
 }
 
 static int profiles_release(struct inode *inode, struct file *file)
@@ -1169,7 +2107,7 @@ static int profiles_release(struct inode *inode, struct file *file)
 	return seq_release(inode, file);
 }
 
-static const struct file_operations aa_fs_profiles_fops = {
+static const struct file_operations aa_sfs_profiles_fops = {
 	.open = profiles_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -1178,64 +2116,94 @@ static const struct file_operations aa_fs_profiles_fops = {
 
 
 /** Base file system setup **/
-static struct aa_fs_entry aa_fs_entry_file[] = {
-	AA_FS_FILE_STRING("mask", "create read write exec append mmap_exec " \
-				  "link lock"),
+static struct aa_sfs_entry aa_sfs_entry_file[] = {
+	AA_SFS_FILE_STRING("mask",
+			   "create read write exec append mmap_exec link lock"),
 	{ }
 };
 
-static struct aa_fs_entry aa_fs_entry_domain[] = {
-	AA_FS_FILE_BOOLEAN("change_hat",	1),
-	AA_FS_FILE_BOOLEAN("change_hatv",	1),
-	AA_FS_FILE_BOOLEAN("change_onexec",	1),
-	AA_FS_FILE_BOOLEAN("change_profile",	1),
-	AA_FS_FILE_BOOLEAN("fix_binfmt_elf_mmap",	1),
-	AA_FS_FILE_STRING("version", "1.2"),
+static struct aa_sfs_entry aa_sfs_entry_ptrace[] = {
+	AA_SFS_FILE_STRING("mask", "read trace"),
 	{ }
 };
 
-static struct aa_fs_entry aa_fs_entry_versions[] = {
-	AA_FS_FILE_BOOLEAN("v5",	1),
+static struct aa_sfs_entry aa_sfs_entry_domain[] = {
+	AA_SFS_FILE_BOOLEAN("change_hat",	1),
+	AA_SFS_FILE_BOOLEAN("change_hatv",	1),
+	AA_SFS_FILE_BOOLEAN("change_onexec",	1),
+	AA_SFS_FILE_BOOLEAN("change_profile",	1),
+	AA_SFS_FILE_BOOLEAN("stack",		1),
+	AA_SFS_FILE_BOOLEAN("fix_binfmt_elf_mmap",	1),
+	AA_SFS_FILE_STRING("version", "1.2"),
 	{ }
 };
 
-static struct aa_fs_entry aa_fs_entry_policy[] = {
-	AA_FS_DIR("versions",                   aa_fs_entry_versions),
-	AA_FS_FILE_BOOLEAN("set_load",		1),
+static struct aa_sfs_entry aa_sfs_entry_versions[] = {
+	AA_SFS_FILE_BOOLEAN("v5",	1),
+	AA_SFS_FILE_BOOLEAN("v6",	1),
+	AA_SFS_FILE_BOOLEAN("v7",	1),
 	{ }
 };
 
-static struct aa_fs_entry aa_fs_entry_features[] = {
-	AA_FS_DIR("policy",			aa_fs_entry_policy),
-	AA_FS_DIR("domain",			aa_fs_entry_domain),
-	AA_FS_DIR("file",			aa_fs_entry_file),
-	AA_FS_FILE_U64("capability",		VFS_CAP_FLAGS_MASK),
-	AA_FS_DIR("rlimit",			aa_fs_entry_rlimit),
-	AA_FS_DIR("caps",			aa_fs_entry_caps),
+static struct aa_sfs_entry aa_sfs_entry_policy[] = {
+	AA_SFS_DIR("versions",			aa_sfs_entry_versions),
+	AA_SFS_FILE_BOOLEAN("set_load",		1),
 	{ }
 };
 
-static struct aa_fs_entry aa_fs_entry_apparmor[] = {
-	AA_FS_FILE_FOPS(".access", 0640, &aa_fs_access),
-	AA_FS_FILE_FOPS(".ns_level", 0666, &aa_fs_ns_level),
-	AA_FS_FILE_FOPS(".ns_name", 0640, &aa_fs_ns_name),
-	AA_FS_FILE_FOPS("profiles", 0440, &aa_fs_profiles_fops),
-	AA_FS_DIR("features", aa_fs_entry_features),
+static struct aa_sfs_entry aa_sfs_entry_ns[] = {
+	AA_SFS_FILE_BOOLEAN("profile",		1),
+	AA_SFS_FILE_BOOLEAN("pivot_root",	1),
 	{ }
 };
 
-static struct aa_fs_entry aa_fs_entry =
-	AA_FS_DIR("apparmor", aa_fs_entry_apparmor);
+static struct aa_sfs_entry aa_sfs_entry_query_label[] = {
+	AA_SFS_FILE_STRING("perms", "allow deny audit quiet"),
+	AA_SFS_FILE_BOOLEAN("data",		1),
+	AA_SFS_FILE_BOOLEAN("multi_transaction",	1),
+	{ }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_query[] = {
+	AA_SFS_DIR("label",			aa_sfs_entry_query_label),
+	{ }
+};
+static struct aa_sfs_entry aa_sfs_entry_features[] = {
+	AA_SFS_DIR("policy",			aa_sfs_entry_policy),
+	AA_SFS_DIR("domain",			aa_sfs_entry_domain),
+	AA_SFS_DIR("file",			aa_sfs_entry_file),
+	AA_SFS_DIR("namespaces",		aa_sfs_entry_ns),
+	AA_SFS_FILE_U64("capability",		VFS_CAP_FLAGS_MASK),
+	AA_SFS_DIR("rlimit",			aa_sfs_entry_rlimit),
+	AA_SFS_DIR("caps",			aa_sfs_entry_caps),
+	AA_SFS_DIR("ptrace",			aa_sfs_entry_ptrace),
+	AA_SFS_DIR("query",			aa_sfs_entry_query),
+	{ }
+};
+
+static struct aa_sfs_entry aa_sfs_entry_apparmor[] = {
+	AA_SFS_FILE_FOPS(".access", 0640, &aa_sfs_access),
+	AA_SFS_FILE_FOPS(".stacked", 0444, &seq_ns_stacked_fops),
+	AA_SFS_FILE_FOPS(".ns_stacked", 0444, &seq_ns_nsstacked_fops),
+	AA_SFS_FILE_FOPS(".ns_level", 0666, &seq_ns_level_fops),
+	AA_SFS_FILE_FOPS(".ns_name", 0640, &seq_ns_name_fops),
+	AA_SFS_FILE_FOPS("profiles", 0440, &aa_sfs_profiles_fops),
+	AA_SFS_DIR("features", aa_sfs_entry_features),
+	{ }
+};
+
+static struct aa_sfs_entry aa_sfs_entry =
+	AA_SFS_DIR("apparmor", aa_sfs_entry_apparmor);
 
 /**
- * aafs_create_file - create a file entry in the apparmor securityfs
- * @fs_file: aa_fs_entry to build an entry for (NOT NULL)
+ * entry_create_file - create a file entry in the apparmor securityfs
+ * @fs_file: aa_sfs_entry to build an entry for (NOT NULL)
  * @parent: the parent dentry in the securityfs
  *
- * Use aafs_remove_file to remove entries created with this fn.
+ * Use entry_remove_file to remove entries created with this fn.
  */
-static int __init aafs_create_file(struct aa_fs_entry *fs_file,
-				   struct dentry *parent)
+static int __init entry_create_file(struct aa_sfs_entry *fs_file,
+				    struct dentry *parent)
 {
 	int error = 0;
 
@@ -1250,18 +2218,18 @@ static int __init aafs_create_file(struct aa_fs_entry *fs_file,
 	return error;
 }
 
-static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir);
+static void __init entry_remove_dir(struct aa_sfs_entry *fs_dir);
 /**
- * aafs_create_dir - recursively create a directory entry in the securityfs
- * @fs_dir: aa_fs_entry (and all child entries) to build (NOT NULL)
+ * entry_create_dir - recursively create a directory entry in the securityfs
+ * @fs_dir: aa_sfs_entry (and all child entries) to build (NOT NULL)
  * @parent: the parent dentry in the securityfs
  *
- * Use aafs_remove_dir to remove entries created with this fn.
+ * Use entry_remove_dir to remove entries created with this fn.
  */
-static int __init aafs_create_dir(struct aa_fs_entry *fs_dir,
-				  struct dentry *parent)
+static int __init entry_create_dir(struct aa_sfs_entry *fs_dir,
+				   struct dentry *parent)
 {
-	struct aa_fs_entry *fs_file;
+	struct aa_sfs_entry *fs_file;
 	struct dentry *dir;
 	int error;
 
@@ -1271,10 +2239,10 @@ static int __init aafs_create_dir(struct aa_fs_entry *fs_dir,
 	fs_dir->dentry = dir;
 
 	for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
-		if (fs_file->v_type == AA_FS_TYPE_DIR)
-			error = aafs_create_dir(fs_file, fs_dir->dentry);
+		if (fs_file->v_type == AA_SFS_TYPE_DIR)
+			error = entry_create_dir(fs_file, fs_dir->dentry);
 		else
-			error = aafs_create_file(fs_file, fs_dir->dentry);
+			error = entry_create_file(fs_file, fs_dir->dentry);
 		if (error)
 			goto failed;
 	}
@@ -1282,16 +2250,16 @@ static int __init aafs_create_dir(struct aa_fs_entry *fs_dir,
 	return 0;
 
 failed:
-	aafs_remove_dir(fs_dir);
+	entry_remove_dir(fs_dir);
 
 	return error;
 }
 
 /**
- * aafs_remove_file - drop a single file entry in the apparmor securityfs
- * @fs_file: aa_fs_entry to detach from the securityfs (NOT NULL)
+ * entry_remove_file - drop a single file entry in the apparmor securityfs
+ * @fs_file: aa_sfs_entry to detach from the securityfs (NOT NULL)
  */
-static void __init aafs_remove_file(struct aa_fs_entry *fs_file)
+static void __init entry_remove_file(struct aa_sfs_entry *fs_file)
 {
 	if (!fs_file->dentry)
 		return;
@@ -1301,21 +2269,21 @@ static void __init aafs_remove_file(struct aa_fs_entry *fs_file)
 }
 
 /**
- * aafs_remove_dir - recursively drop a directory entry from the securityfs
- * @fs_dir: aa_fs_entry (and all child entries) to detach (NOT NULL)
+ * entry_remove_dir - recursively drop a directory entry from the securityfs
+ * @fs_dir: aa_sfs_entry (and all child entries) to detach (NOT NULL)
  */
-static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir)
+static void __init entry_remove_dir(struct aa_sfs_entry *fs_dir)
 {
-	struct aa_fs_entry *fs_file;
+	struct aa_sfs_entry *fs_file;
 
 	for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
-		if (fs_file->v_type == AA_FS_TYPE_DIR)
-			aafs_remove_dir(fs_file);
+		if (fs_file->v_type == AA_SFS_TYPE_DIR)
+			entry_remove_dir(fs_file);
 		else
-			aafs_remove_file(fs_file);
+			entry_remove_file(fs_file);
 	}
 
-	aafs_remove_file(fs_dir);
+	entry_remove_file(fs_dir);
 }
 
 /**
@@ -1325,7 +2293,7 @@ static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir)
  */
 void __init aa_destroy_aafs(void)
 {
-	aafs_remove_dir(&aa_fs_entry);
+	entry_remove_dir(&aa_sfs_entry);
 }
 
 
@@ -1374,6 +2342,59 @@ static int aa_mk_null_file(struct dentry *parent)
 	return error;
 }
 
+
+
+static const char *policy_get_link(struct dentry *dentry,
+				   struct inode *inode,
+				   struct delayed_call *done)
+{
+	struct aa_ns *ns;
+	struct path path;
+
+	if (!dentry)
+		return ERR_PTR(-ECHILD);
+	ns = aa_get_current_ns();
+	path.mnt = mntget(aafs_mnt);
+	path.dentry = dget(ns_dir(ns));
+	nd_jump_link(&path);
+	aa_put_ns(ns);
+
+	return NULL;
+}
+
+static int ns_get_name(char *buf, size_t size, struct aa_ns *ns,
+		       struct inode *inode)
+{
+	int res = snprintf(buf, size, "%s:[%lu]", AAFS_NAME, inode->i_ino);
+
+	if (res < 0 || res >= size)
+		res = -ENOENT;
+
+	return res;
+}
+
+static int policy_readlink(struct dentry *dentry, char __user *buffer,
+			   int buflen)
+{
+	struct aa_ns *ns;
+	char name[32];
+	int res;
+
+	ns = aa_get_current_ns();
+	res = ns_get_name(name, sizeof(name), ns, d_inode(dentry));
+	if (res >= 0)
+		res = readlink_copy(buffer, buflen, name);
+	aa_put_ns(ns);
+
+	return res;
+}
+
+static const struct inode_operations policy_link_iops = {
+	.readlink	= policy_readlink,
+	.get_link	= policy_get_link,
+};
+
+
 /**
  * aa_create_aafs - create the apparmor security filesystem
  *
@@ -1389,17 +2410,23 @@ static int __init aa_create_aafs(void)
 	if (!apparmor_initialized)
 		return 0;
 
-	if (aa_fs_entry.dentry) {
+	if (aa_sfs_entry.dentry) {
 		AA_ERROR("%s: AppArmor securityfs already exists\n", __func__);
 		return -EEXIST;
 	}
 
+	/* setup apparmorfs used to virtualize policy/ */
+	aafs_mnt = kern_mount(&aafs_ops);
+	if (IS_ERR(aafs_mnt))
+		panic("can't set apparmorfs up\n");
+	aafs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+
 	/* Populate fs tree. */
-	error = aafs_create_dir(&aa_fs_entry, NULL);
+	error = entry_create_dir(&aa_sfs_entry, NULL);
 	if (error)
 		goto error;
 
-	dent = securityfs_create_file(".load", 0666, aa_fs_entry.dentry,
+	dent = securityfs_create_file(".load", 0666, aa_sfs_entry.dentry,
 				      NULL, &aa_fs_profile_load);
 	if (IS_ERR(dent)) {
 		error = PTR_ERR(dent);
@@ -1407,7 +2434,7 @@ static int __init aa_create_aafs(void)
 	}
 	ns_subload(root_ns) = dent;
 
-	dent = securityfs_create_file(".replace", 0666, aa_fs_entry.dentry,
+	dent = securityfs_create_file(".replace", 0666, aa_sfs_entry.dentry,
 				      NULL, &aa_fs_profile_replace);
 	if (IS_ERR(dent)) {
 		error = PTR_ERR(dent);
@@ -1415,7 +2442,7 @@ static int __init aa_create_aafs(void)
 	}
 	ns_subreplace(root_ns) = dent;
 
-	dent = securityfs_create_file(".remove", 0666, aa_fs_entry.dentry,
+	dent = securityfs_create_file(".remove", 0666, aa_sfs_entry.dentry,
 				      NULL, &aa_fs_profile_remove);
 	if (IS_ERR(dent)) {
 		error = PTR_ERR(dent);
@@ -1423,14 +2450,31 @@ static int __init aa_create_aafs(void)
 	}
 	ns_subremove(root_ns) = dent;
 
-	mutex_lock(&root_ns->lock);
-	error = __aa_fs_ns_mkdir(root_ns, aa_fs_entry.dentry, "policy");
-	mutex_unlock(&root_ns->lock);
+	dent = securityfs_create_file("revision", 0444, aa_sfs_entry.dentry,
+				      NULL, &aa_fs_ns_revision_fops);
+	if (IS_ERR(dent)) {
+		error = PTR_ERR(dent);
+		goto error;
+	}
+	ns_subrevision(root_ns) = dent;
 
+	/* policy tree referenced by magic policy symlink */
+	mutex_lock(&root_ns->lock);
+	error = __aafs_ns_mkdir(root_ns, aafs_mnt->mnt_root, ".policy",
+				aafs_mnt->mnt_root);
+	mutex_unlock(&root_ns->lock);
 	if (error)
 		goto error;
 
-	error = aa_mk_null_file(aa_fs_entry.dentry);
+	/* magic symlink similar to nsfs redirects based on task policy */
+	dent = securityfs_create_symlink("policy", aa_sfs_entry.dentry,
+					 NULL, &policy_link_iops);
+	if (IS_ERR(dent)) {
+		error = PTR_ERR(dent);
+		goto error;
+	}
+
+	error = aa_mk_null_file(aa_sfs_entry.dentry);
 	if (error)
 		goto error;
 
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 87f40fa..8f9ecac 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -77,14 +77,24 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
 			audit_log_format(ab, " error=%d", aad(sa)->error);
 	}
 
-	if (aad(sa)->profile) {
-		struct aa_profile *profile = aad(sa)->profile;
-		if (profile->ns != root_ns) {
-			audit_log_format(ab, " namespace=");
-			audit_log_untrustedstring(ab, profile->ns->base.hname);
+	if (aad(sa)->label) {
+		struct aa_label *label = aad(sa)->label;
+
+		if (label_isprofile(label)) {
+			struct aa_profile *profile = labels_profile(label);
+
+			if (profile->ns != root_ns) {
+				audit_log_format(ab, " namespace=");
+				audit_log_untrustedstring(ab,
+						       profile->ns->base.hname);
+			}
+			audit_log_format(ab, " profile=");
+			audit_log_untrustedstring(ab, profile->base.hname);
+		} else {
+			audit_log_format(ab, " label=");
+			aa_label_xaudit(ab, root_ns, label, FLAG_VIEW_SUBNS,
+					GFP_ATOMIC);
 		}
-		audit_log_format(ab, " profile=");
-		audit_log_untrustedstring(ab, profile->base.hname);
 	}
 
 	if (aad(sa)->name) {
@@ -139,8 +149,7 @@ int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
 	if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
 		type = AUDIT_APPARMOR_KILL;
 
-	if (!unconfined(profile))
-		aad(sa)->profile = profile;
+	aad(sa)->label = &profile->label;
 
 	aa_audit_msg(type, sa, cb);
 
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index ed0a3e6..67e3471 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -28,8 +28,8 @@
  */
 #include "capability_names.h"
 
-struct aa_fs_entry aa_fs_entry_caps[] = {
-	AA_FS_FILE_STRING("mask", AA_FS_CAPS_MASK),
+struct aa_sfs_entry aa_sfs_entry_caps[] = {
+	AA_SFS_FILE_STRING("mask", AA_SFS_CAPS_MASK),
 	{ }
 };
 
@@ -48,15 +48,16 @@ static DEFINE_PER_CPU(struct audit_cache, audit_cache);
 static void audit_cb(struct audit_buffer *ab, void *va)
 {
 	struct common_audit_data *sa = va;
+
 	audit_log_format(ab, " capname=");
 	audit_log_untrustedstring(ab, capability_names[sa->u.cap]);
 }
 
 /**
  * audit_caps - audit a capability
+ * @sa: audit data
  * @profile: profile being tested for confinement (NOT NULL)
  * @cap: capability tested
- @audit: whether an audit record should be generated
  * @error: error code returned by test
  *
  * Do auditing of capability and handle, audit/complain/kill modes switching
@@ -64,16 +65,13 @@ static void audit_cb(struct audit_buffer *ab, void *va)
  *
  * Returns: 0 or sa->error on success,  error code on failure
  */
-static int audit_caps(struct aa_profile *profile, int cap, int audit,
-		      int error)
+static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
+		      int cap, int error)
 {
 	struct audit_cache *ent;
 	int type = AUDIT_APPARMOR_AUTO;
-	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_CAP, OP_CAPABLE);
-	sa.u.cap = cap;
-	aad(&sa)->error = error;
-	if (audit == SECURITY_CAP_NOAUDIT)
-		aad(&sa)->info = "optional: no audit";
+
+	aad(sa)->error = error;
 
 	if (likely(!error)) {
 		/* test if auditing is being forced */
@@ -105,24 +103,44 @@ static int audit_caps(struct aa_profile *profile, int cap, int audit,
 	}
 	put_cpu_var(audit_cache);
 
-	return aa_audit(type, profile, &sa, audit_cb);
+	return aa_audit(type, profile, sa, audit_cb);
 }
 
 /**
  * profile_capable - test if profile allows use of capability @cap
  * @profile: profile being enforced    (NOT NULL, NOT unconfined)
  * @cap: capability to test if allowed
+ * @audit: whether an audit record should be generated
+ * @sa: audit data (MAY BE NULL indicating no auditing)
  *
  * Returns: 0 if allowed else -EPERM
  */
-static int profile_capable(struct aa_profile *profile, int cap)
+static int profile_capable(struct aa_profile *profile, int cap, int audit,
+			   struct common_audit_data *sa)
 {
-	return cap_raised(profile->caps.allow, cap) ? 0 : -EPERM;
+	int error;
+
+	if (cap_raised(profile->caps.allow, cap) &&
+	    !cap_raised(profile->caps.denied, cap))
+		error = 0;
+	else
+		error = -EPERM;
+
+	if (audit == SECURITY_CAP_NOAUDIT) {
+		if (!COMPLAIN_MODE(profile))
+			return error;
+		/* audit the cap request in complain mode but note that it
+		 * should be optional.
+		 */
+		aad(sa)->info = "optional: no audit";
+	}
+
+	return audit_caps(sa, profile, cap, error);
 }
 
 /**
  * aa_capable - test permission to use capability
- * @profile: profile being tested against (NOT NULL)
+ * @label: label being tested for capability (NOT NULL)
  * @cap: capability to be tested
  * @audit: whether an audit record should be generated
  *
@@ -130,14 +148,15 @@ static int profile_capable(struct aa_profile *profile, int cap)
  *
  * Returns: 0 on success, or else an error code.
  */
-int aa_capable(struct aa_profile *profile, int cap, int audit)
+int aa_capable(struct aa_label *label, int cap, int audit)
 {
-	int error = profile_capable(profile, cap);
+	struct aa_profile *profile;
+	int error = 0;
+	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_CAP, OP_CAPABLE);
 
-	if (audit == SECURITY_CAP_NOAUDIT) {
-		if (!COMPLAIN_MODE(profile))
-			return error;
-	}
+	sa.u.cap = cap;
+	error = fn_for_each_confined(label, profile,
+			profile_capable(profile, cap, audit, &sa));
 
-	return audit_caps(profile, cap, audit, error);
+	return error;
 }
diff --git a/security/apparmor/context.c b/security/apparmor/context.c
index 1fc16b8..c95f1ac 100644
--- a/security/apparmor/context.c
+++ b/security/apparmor/context.c
@@ -14,9 +14,9 @@
  *
  *
  * AppArmor sets confinement on every task, via the the aa_task_ctx and
- * the aa_task_ctx.profile, both of which are required and are not allowed
+ * the aa_task_ctx.label, both of which are required and are not allowed
  * to be NULL.  The aa_task_ctx is not reference counted and is unique
- * to each cred (which is reference count).  The profile pointed to by
+ * to each cred (which is reference count).  The label pointed to by
  * the task_ctx is reference counted.
  *
  * TODO
@@ -47,9 +47,9 @@ struct aa_task_ctx *aa_alloc_task_context(gfp_t flags)
 void aa_free_task_context(struct aa_task_ctx *ctx)
 {
 	if (ctx) {
-		aa_put_profile(ctx->profile);
-		aa_put_profile(ctx->previous);
-		aa_put_profile(ctx->onexec);
+		aa_put_label(ctx->label);
+		aa_put_label(ctx->previous);
+		aa_put_label(ctx->onexec);
 
 		kzfree(ctx);
 	}
@@ -63,41 +63,41 @@ void aa_free_task_context(struct aa_task_ctx *ctx)
 void aa_dup_task_context(struct aa_task_ctx *new, const struct aa_task_ctx *old)
 {
 	*new = *old;
-	aa_get_profile(new->profile);
-	aa_get_profile(new->previous);
-	aa_get_profile(new->onexec);
+	aa_get_label(new->label);
+	aa_get_label(new->previous);
+	aa_get_label(new->onexec);
 }
 
 /**
- * aa_get_task_profile - Get another task's profile
+ * aa_get_task_label - Get another task's label
  * @task: task to query  (NOT NULL)
  *
- * Returns: counted reference to @task's profile
+ * Returns: counted reference to @task's label
  */
-struct aa_profile *aa_get_task_profile(struct task_struct *task)
+struct aa_label *aa_get_task_label(struct task_struct *task)
 {
-	struct aa_profile *p;
+	struct aa_label *p;
 
 	rcu_read_lock();
-	p = aa_get_profile(__aa_task_profile(task));
+	p = aa_get_newest_label(__aa_task_raw_label(task));
 	rcu_read_unlock();
 
 	return p;
 }
 
 /**
- * aa_replace_current_profile - replace the current tasks profiles
- * @profile: new profile  (NOT NULL)
+ * aa_replace_current_label - replace the current tasks label
+ * @label: new label  (NOT NULL)
  *
  * Returns: 0 or error on failure
  */
-int aa_replace_current_profile(struct aa_profile *profile)
+int aa_replace_current_label(struct aa_label *label)
 {
 	struct aa_task_ctx *ctx = current_ctx();
 	struct cred *new;
-	AA_BUG(!profile);
+	AA_BUG(!label);
 
-	if (ctx->profile == profile)
+	if (ctx->label == label)
 		return 0;
 
 	if (current_cred() != current_real_cred())
@@ -108,8 +108,8 @@ int aa_replace_current_profile(struct aa_profile *profile)
 		return -ENOMEM;
 
 	ctx = cred_ctx(new);
-	if (unconfined(profile) || (ctx->profile->ns != profile->ns))
-		/* if switching to unconfined or a different profile namespace
+	if (unconfined(label) || (labels_ns(ctx->label) != labels_ns(label)))
+		/* if switching to unconfined or a different label namespace
 		 * clear out context state
 		 */
 		aa_clear_task_ctx_trans(ctx);
@@ -120,9 +120,9 @@ int aa_replace_current_profile(struct aa_profile *profile)
 	 * keeping @profile valid, so make sure to get its reference before
 	 * dropping the reference on ctx->profile
 	 */
-	aa_get_profile(profile);
-	aa_put_profile(ctx->profile);
-	ctx->profile = profile;
+	aa_get_label(label);
+	aa_put_label(ctx->label);
+	ctx->label = label;
 
 	commit_creds(new);
 	return 0;
@@ -130,11 +130,11 @@ int aa_replace_current_profile(struct aa_profile *profile)
 
 /**
  * aa_set_current_onexec - set the tasks change_profile to happen onexec
- * @profile: system profile to set at exec  (MAYBE NULL to clear value)
- *
+ * @label: system label to set at exec  (MAYBE NULL to clear value)
+ * @stack: whether stacking should be done
  * Returns: 0 or error on failure
  */
-int aa_set_current_onexec(struct aa_profile *profile)
+int aa_set_current_onexec(struct aa_label *label, bool stack)
 {
 	struct aa_task_ctx *ctx;
 	struct cred *new = prepare_creds();
@@ -142,9 +142,10 @@ int aa_set_current_onexec(struct aa_profile *profile)
 		return -ENOMEM;
 
 	ctx = cred_ctx(new);
-	aa_get_profile(profile);
-	aa_put_profile(ctx->onexec);
-	ctx->onexec = profile;
+	aa_get_label(label);
+	aa_clear_task_ctx_trans(ctx);
+	ctx->onexec = label;
+	ctx->token = stack;
 
 	commit_creds(new);
 	return 0;
@@ -152,7 +153,7 @@ int aa_set_current_onexec(struct aa_profile *profile)
 
 /**
  * aa_set_current_hat - set the current tasks hat
- * @profile: profile to set as the current hat  (NOT NULL)
+ * @label: label to set as the current hat  (NOT NULL)
  * @token: token value that must be specified to change from the hat
  *
  * Do switch of tasks hat.  If the task is currently in a hat
@@ -160,29 +161,29 @@ int aa_set_current_onexec(struct aa_profile *profile)
  *
  * Returns: 0 or error on failure
  */
-int aa_set_current_hat(struct aa_profile *profile, u64 token)
+int aa_set_current_hat(struct aa_label *label, u64 token)
 {
 	struct aa_task_ctx *ctx;
 	struct cred *new = prepare_creds();
 	if (!new)
 		return -ENOMEM;
-	AA_BUG(!profile);
+	AA_BUG(!label);
 
 	ctx = cred_ctx(new);
 	if (!ctx->previous) {
 		/* transfer refcount */
-		ctx->previous = ctx->profile;
+		ctx->previous = ctx->label;
 		ctx->token = token;
 	} else if (ctx->token == token) {
-		aa_put_profile(ctx->profile);
+		aa_put_label(ctx->label);
 	} else {
 		/* previous_profile && ctx->token != token */
 		abort_creds(new);
 		return -EACCES;
 	}
-	ctx->profile = aa_get_newest_profile(profile);
+	ctx->label = aa_get_newest_label(label);
 	/* clear exec on switching context */
-	aa_put_profile(ctx->onexec);
+	aa_put_label(ctx->onexec);
 	ctx->onexec = NULL;
 
 	commit_creds(new);
@@ -190,15 +191,15 @@ int aa_set_current_hat(struct aa_profile *profile, u64 token)
 }
 
 /**
- * aa_restore_previous_profile - exit from hat context restoring the profile
+ * aa_restore_previous_label - exit from hat context restoring previous label
  * @token: the token that must be matched to exit hat context
  *
- * Attempt to return out of a hat to the previous profile.  The token
+ * Attempt to return out of a hat to the previous label.  The token
  * must match the stored token value.
  *
  * Returns: 0 or error of failure
  */
-int aa_restore_previous_profile(u64 token)
+int aa_restore_previous_label(u64 token)
 {
 	struct aa_task_ctx *ctx;
 	struct cred *new = prepare_creds();
@@ -210,15 +211,15 @@ int aa_restore_previous_profile(u64 token)
 		abort_creds(new);
 		return -EACCES;
 	}
-	/* ignore restores when there is no saved profile */
+	/* ignore restores when there is no saved label */
 	if (!ctx->previous) {
 		abort_creds(new);
 		return 0;
 	}
 
-	aa_put_profile(ctx->profile);
-	ctx->profile = aa_get_newest_profile(ctx->previous);
-	AA_BUG(!ctx->profile);
+	aa_put_label(ctx->label);
+	ctx->label = aa_get_newest_label(ctx->previous);
+	AA_BUG(!ctx->label);
 	/* clear exec && prev information when restoring to previous context */
 	aa_clear_task_ctx_trans(ctx);
 
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 001e133..d059444 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -51,76 +51,254 @@ void aa_free_domain_entries(struct aa_domain *domain)
 
 /**
  * may_change_ptraced_domain - check if can change profile on ptraced task
- * @to_profile: profile to change to  (NOT NULL)
+ * @to_label: profile to change to  (NOT NULL)
+ * @info: message if there is an error
  *
  * Check if current is ptraced and if so if the tracing task is allowed
  * to trace the new domain
  *
  * Returns: %0 or error if change not allowed
  */
-static int may_change_ptraced_domain(struct aa_profile *to_profile)
+static int may_change_ptraced_domain(struct aa_label *to_label,
+				     const char **info)
 {
 	struct task_struct *tracer;
-	struct aa_profile *tracerp = NULL;
+	struct aa_label *tracerl = NULL;
 	int error = 0;
 
 	rcu_read_lock();
 	tracer = ptrace_parent(current);
 	if (tracer)
 		/* released below */
-		tracerp = aa_get_task_profile(tracer);
+		tracerl = aa_get_task_label(tracer);
 
 	/* not ptraced */
-	if (!tracer || unconfined(tracerp))
+	if (!tracer || unconfined(tracerl))
 		goto out;
 
-	error = aa_may_ptrace(tracerp, to_profile, PTRACE_MODE_ATTACH);
+	error = aa_may_ptrace(tracerl, to_label, PTRACE_MODE_ATTACH);
 
 out:
 	rcu_read_unlock();
-	aa_put_profile(tracerp);
+	aa_put_label(tracerl);
 
+	if (error)
+		*info = "ptrace prevents transition";
 	return error;
 }
 
+/**** TODO: dedup to aa_label_match - needs perm and dfa, merging
+ * specifically this is an exact copy of aa_label_match except
+ * aa_compute_perms is replaced with aa_compute_fperms
+ * and policy.dfa with file.dfa
+ ****/
+/* match a profile and its associated ns component if needed
+ * Assumes visibility test has already been done.
+ * If a subns profile is not to be matched should be prescreened with
+ * visibility test.
+ */
+static inline unsigned int match_component(struct aa_profile *profile,
+					   struct aa_profile *tp,
+					   bool stack, unsigned int state)
+{
+	const char *ns_name;
+
+	if (stack)
+		state = aa_dfa_match(profile->file.dfa, state, "&");
+	if (profile->ns == tp->ns)
+		return aa_dfa_match(profile->file.dfa, state, tp->base.hname);
+
+	/* try matching with namespace name and then profile */
+	ns_name = aa_ns_name(profile->ns, tp->ns, true);
+	state = aa_dfa_match_len(profile->file.dfa, state, ":", 1);
+	state = aa_dfa_match(profile->file.dfa, state, ns_name);
+	state = aa_dfa_match_len(profile->file.dfa, state, ":", 1);
+	return aa_dfa_match(profile->file.dfa, state, tp->base.hname);
+}
+
+/**
+ * label_compound_match - find perms for full compound label
+ * @profile: profile to find perms for
+ * @label: label to check access permissions for
+ * @stack: whether this is a stacking request
+ * @start: state to start match in
+ * @subns: whether to do permission checks on components in a subns
+ * @request: permissions to request
+ * @perms: perms struct to set
+ *
+ * Returns: 0 on success else ERROR
+ *
+ * For the label A//&B//&C this does the perm match for A//&B//&C
+ * @perms should be preinitialized with allperms OR a previous permission
+ *        check to be stacked.
+ */
+static int label_compound_match(struct aa_profile *profile,
+				struct aa_label *label, bool stack,
+				unsigned int state, bool subns, u32 request,
+				struct aa_perms *perms)
+{
+	struct aa_profile *tp;
+	struct label_it i;
+	struct path_cond cond = { };
+
+	/* find first subcomponent that is visible */
+	label_for_each(i, label, tp) {
+		if (!aa_ns_visible(profile->ns, tp->ns, subns))
+			continue;
+		state = match_component(profile, tp, stack, state);
+		if (!state)
+			goto fail;
+		goto next;
+	}
+
+	/* no component visible */
+	*perms = allperms;
+	return 0;
+
+next:
+	label_for_each_cont(i, label, tp) {
+		if (!aa_ns_visible(profile->ns, tp->ns, subns))
+			continue;
+		state = aa_dfa_match(profile->file.dfa, state, "//&");
+		state = match_component(profile, tp, false, state);
+		if (!state)
+			goto fail;
+	}
+	*perms = aa_compute_fperms(profile->file.dfa, state, &cond);
+	aa_apply_modes_to_perms(profile, perms);
+	if ((perms->allow & request) != request)
+		return -EACCES;
+
+	return 0;
+
+fail:
+	*perms = nullperms;
+	return -EACCES;
+}
+
+/**
+ * label_components_match - find perms for all subcomponents of a label
+ * @profile: profile to find perms for
+ * @label: label to check access permissions for
+ * @stack: whether this is a stacking request
+ * @start: state to start match in
+ * @subns: whether to do permission checks on components in a subns
+ * @request: permissions to request
+ * @perms: an initialized perms struct to add accumulation to
+ *
+ * Returns: 0 on success else ERROR
+ *
+ * For the label A//&B//&C this does the perm match for each of A and B and C
+ * @perms should be preinitialized with allperms OR a previous permission
+ *        check to be stacked.
+ */
+static int label_components_match(struct aa_profile *profile,
+				  struct aa_label *label, bool stack,
+				  unsigned int start, bool subns, u32 request,
+				  struct aa_perms *perms)
+{
+	struct aa_profile *tp;
+	struct label_it i;
+	struct aa_perms tmp;
+	struct path_cond cond = { };
+	unsigned int state = 0;
+
+	/* find first subcomponent to test */
+	label_for_each(i, label, tp) {
+		if (!aa_ns_visible(profile->ns, tp->ns, subns))
+			continue;
+		state = match_component(profile, tp, stack, start);
+		if (!state)
+			goto fail;
+		goto next;
+	}
+
+	/* no subcomponents visible - no change in perms */
+	return 0;
+
+next:
+	tmp = aa_compute_fperms(profile->file.dfa, state, &cond);
+	aa_apply_modes_to_perms(profile, &tmp);
+	aa_perms_accum(perms, &tmp);
+	label_for_each_cont(i, label, tp) {
+		if (!aa_ns_visible(profile->ns, tp->ns, subns))
+			continue;
+		state = match_component(profile, tp, stack, start);
+		if (!state)
+			goto fail;
+		tmp = aa_compute_fperms(profile->file.dfa, state, &cond);
+		aa_apply_modes_to_perms(profile, &tmp);
+		aa_perms_accum(perms, &tmp);
+	}
+
+	if ((perms->allow & request) != request)
+		return -EACCES;
+
+	return 0;
+
+fail:
+	*perms = nullperms;
+	return -EACCES;
+}
+
+/**
+ * label_match - do a multi-component label match
+ * @profile: profile to match against (NOT NULL)
+ * @label: label to match (NOT NULL)
+ * @stack: whether this is a stacking request
+ * @state: state to start in
+ * @subns: whether to match subns components
+ * @request: permission request
+ * @perms: Returns computed perms (NOT NULL)
+ *
+ * Returns: the state the match finished in, may be the none matching state
+ */
+static int label_match(struct aa_profile *profile, struct aa_label *label,
+		       bool stack, unsigned int state, bool subns, u32 request,
+		       struct aa_perms *perms)
+{
+	int error;
+
+	*perms = nullperms;
+	error = label_compound_match(profile, label, stack, state, subns,
+				     request, perms);
+	if (!error)
+		return error;
+
+	*perms = allperms;
+	return label_components_match(profile, label, stack, state, subns,
+				      request, perms);
+}
+
+/******* end TODO: dedup *****/
+
 /**
  * change_profile_perms - find permissions for change_profile
  * @profile: the current profile  (NOT NULL)
- * @ns: the namespace being switched to  (NOT NULL)
- * @name: the name of the profile to change to  (NOT NULL)
+ * @target: label to transition to (NOT NULL)
+ * @stack: whether this is a stacking request
  * @request: requested perms
  * @start: state to start matching in
  *
+ *
  * Returns: permission set
+ *
+ * currently only matches full label A//&B//&C or individual components A, B, C
+ * not arbitrary combinations. Eg. A//&B, C
  */
-static struct file_perms change_profile_perms(struct aa_profile *profile,
-					      struct aa_ns *ns,
-					      const char *name, u32 request,
-					      unsigned int start)
+static int change_profile_perms(struct aa_profile *profile,
+				struct aa_label *target, bool stack,
+				u32 request, unsigned int start,
+				struct aa_perms *perms)
 {
-	struct file_perms perms;
-	struct path_cond cond = { };
-	unsigned int state;
-
-	if (unconfined(profile)) {
-		perms.allow = AA_MAY_CHANGE_PROFILE | AA_MAY_ONEXEC;
-		perms.audit = perms.quiet = perms.kill = 0;
-		return perms;
-	} else if (!profile->file.dfa) {
-		return nullperms;
-	} else if ((ns == profile->ns)) {
-		/* try matching against rules with out namespace prepended */
-		aa_str_perms(profile->file.dfa, start, name, &cond, &perms);
-		if (COMBINED_PERM_MASK(perms) & request)
-			return perms;
+	if (profile_unconfined(profile)) {
+		perms->allow = AA_MAY_CHANGE_PROFILE | AA_MAY_ONEXEC;
+		perms->audit = perms->quiet = perms->kill = 0;
+		return 0;
 	}
 
-	/* try matching with namespace name and then profile */
-	state = aa_dfa_match(profile->file.dfa, start, ns->base.name);
-	state = aa_dfa_match_len(profile->file.dfa, state, ":", 1);
-	aa_str_perms(profile->file.dfa, state, name, &cond, &perms);
-
-	return perms;
+	/* TODO: add profile in ns screening */
+	return label_match(profile, target, stack, start, true, request, perms);
 }
 
 /**
@@ -144,7 +322,7 @@ static struct aa_profile *__attach_match(const char *name,
 	struct aa_profile *profile, *candidate = NULL;
 
 	list_for_each_entry_rcu(profile, head, base.list) {
-		if (profile->flags & PFLAG_NULL)
+		if (profile->label.flags & FLAG_NULL)
 			continue;
 		if (profile->xmatch && profile->xmatch_len > len) {
 			unsigned int state = aa_dfa_match(profile->xmatch,
@@ -169,10 +347,10 @@ static struct aa_profile *__attach_match(const char *name,
  * @list: list to search  (NOT NULL)
  * @name: the executable name to match against  (NOT NULL)
  *
- * Returns: profile or NULL if no match found
+ * Returns: label or NULL if no match found
  */
-static struct aa_profile *find_attach(struct aa_ns *ns,
-				      struct list_head *list, const char *name)
+static struct aa_label *find_attach(struct aa_ns *ns, struct list_head *list,
+				    const char *name)
 {
 	struct aa_profile *profile;
 
@@ -180,49 +358,7 @@ static struct aa_profile *find_attach(struct aa_ns *ns,
 	profile = aa_get_profile(__attach_match(name, list));
 	rcu_read_unlock();
 
-	return profile;
-}
-
-/**
- * separate_fqname - separate the namespace and profile names
- * @fqname: the fqname name to split  (NOT NULL)
- * @ns_name: the namespace name if it exists  (NOT NULL)
- *
- * This is the xtable equivalent routine of aa_split_fqname.  It finds the
- * split in an xtable fqname which contains an embedded \0 instead of a :
- * if a namespace is specified.  This is done so the xtable is constant and
- * isn't re-split on every lookup.
- *
- * Either the profile or namespace name may be optional but if the namespace
- * is specified the profile name termination must be present.  This results
- * in the following possible encodings:
- * profile_name\0
- * :ns_name\0profile_name\0
- * :ns_name\0\0
- *
- * NOTE: the xtable fqname is pre-validated at load time in unpack_trans_table
- *
- * Returns: profile name if it is specified else NULL
- */
-static const char *separate_fqname(const char *fqname, const char **ns_name)
-{
-	const char *name;
-
-	if (fqname[0] == ':') {
-		/* In this case there is guaranteed to be two \0 terminators
-		 * in the string.  They are verified at load time by
-		 * by unpack_trans_table
-		 */
-		*ns_name = fqname + 1;		/* skip : */
-		name = *ns_name + strlen(*ns_name) + 1;
-		if (!*name)
-			name = NULL;
-	} else {
-		*ns_name = NULL;
-		name = fqname;
-	}
-
-	return name;
+	return profile ? &profile->label : NULL;
 }
 
 static const char *next_name(int xtype, const char *name)
@@ -234,99 +370,370 @@ static const char *next_name(int xtype, const char *name)
  * x_table_lookup - lookup an x transition name via transition table
  * @profile: current profile (NOT NULL)
  * @xindex: index into x transition table
+ * @name: returns: name tested to find label (NOT NULL)
  *
- * Returns: refcounted profile, or NULL on failure (MAYBE NULL)
+ * Returns: refcounted label, or NULL on failure (MAYBE NULL)
  */
-static struct aa_profile *x_table_lookup(struct aa_profile *profile, u32 xindex)
+static struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
+				       const char **name)
 {
-	struct aa_profile *new_profile = NULL;
-	struct aa_ns *ns = profile->ns;
+	struct aa_label *label = NULL;
 	u32 xtype = xindex & AA_X_TYPE_MASK;
 	int index = xindex & AA_X_INDEX_MASK;
-	const char *name;
+
+	AA_BUG(!name);
 
 	/* index is guaranteed to be in range, validated at load time */
-	for (name = profile->file.trans.table[index]; !new_profile && name;
-	     name = next_name(xtype, name)) {
-		struct aa_ns *new_ns;
-		const char *xname = NULL;
-
-		new_ns = NULL;
+	/* TODO: move lookup parsing to unpack time so this is a straight
+	 *       index into the resultant label
+	 */
+	for (*name = profile->file.trans.table[index]; !label && *name;
+	     *name = next_name(xtype, *name)) {
 		if (xindex & AA_X_CHILD) {
+			struct aa_profile *new_profile;
 			/* release by caller */
-			new_profile = aa_find_child(profile, name);
+			new_profile = aa_find_child(profile, *name);
+			if (new_profile)
+				label = &new_profile->label;
 			continue;
-		} else if (*name == ':') {
-			/* switching namespace */
-			const char *ns_name;
-			xname = name = separate_fqname(name, &ns_name);
-			if (!xname)
-				/* no name so use profile name */
-				xname = profile->base.hname;
-			if (*ns_name == '@') {
-				/* TODO: variable support */
-				;
-			}
-			/* released below */
-			new_ns = aa_find_ns(ns, ns_name);
-			if (!new_ns)
-				continue;
-		} else if (*name == '@') {
-			/* TODO: variable support */
-			continue;
-		} else {
-			/* basic namespace lookup */
-			xname = name;
 		}
-
-		/* released by caller */
-		new_profile = aa_lookup_profile(new_ns ? new_ns : ns, xname);
-		aa_put_ns(new_ns);
+		label = aa_label_parse(&profile->label, *name, GFP_ATOMIC,
+				       true, false);
+		if (IS_ERR(label))
+			label = NULL;
 	}
 
 	/* released by caller */
-	return new_profile;
+
+	return label;
 }
 
 /**
- * x_to_profile - get target profile for a given xindex
+ * x_to_label - get target label for a given xindex
  * @profile: current profile  (NOT NULL)
  * @name: name to lookup (NOT NULL)
  * @xindex: index into x transition table
+ * @lookupname: returns: name used in lookup if one was specified (NOT NULL)
  *
- * find profile for a transition index
+ * find label for a transition index
  *
- * Returns: refcounted profile or NULL if not found available
+ * Returns: refcounted label or NULL if not found available
  */
-static struct aa_profile *x_to_profile(struct aa_profile *profile,
-				       const char *name, u32 xindex)
+static struct aa_label *x_to_label(struct aa_profile *profile,
+				   const char *name, u32 xindex,
+				   const char **lookupname,
+				   const char **info)
 {
-	struct aa_profile *new_profile = NULL;
+	struct aa_label *new = NULL;
 	struct aa_ns *ns = profile->ns;
 	u32 xtype = xindex & AA_X_TYPE_MASK;
+	const char *stack = NULL;
 
 	switch (xtype) {
 	case AA_X_NONE:
 		/* fail exec unless ix || ux fallback - handled by caller */
-		return NULL;
+		*lookupname = NULL;
+		break;
+	case AA_X_TABLE:
+		/* TODO: fix when perm mapping done at unload */
+		stack = profile->file.trans.table[xindex & AA_X_INDEX_MASK];
+		if (*stack != '&') {
+			/* released by caller */
+			new = x_table_lookup(profile, xindex, lookupname);
+			stack = NULL;
+			break;
+		}
+		/* fall through to X_NAME */
 	case AA_X_NAME:
 		if (xindex & AA_X_CHILD)
 			/* released by caller */
-			new_profile = find_attach(ns, &profile->base.profiles,
-						  name);
+			new = find_attach(ns, &profile->base.profiles,
+						name);
 		else
 			/* released by caller */
-			new_profile = find_attach(ns, &ns->base.profiles,
-						  name);
-		break;
-	case AA_X_TABLE:
-		/* released by caller */
-		new_profile = x_table_lookup(profile, xindex);
+			new = find_attach(ns, &ns->base.profiles,
+						name);
+		*lookupname = name;
 		break;
 	}
 
+	if (!new) {
+		if (xindex & AA_X_INHERIT) {
+			/* (p|c|n)ix - don't change profile but do
+			 * use the newest version
+			 */
+			*info = "ix fallback";
+			/* no profile && no error */
+			new = aa_get_newest_label(&profile->label);
+		} else if (xindex & AA_X_UNCONFINED) {
+			new = aa_get_newest_label(ns_unconfined(profile->ns));
+			*info = "ux fallback";
+		}
+	}
+
+	if (new && stack) {
+		/* base the stack on post domain transition */
+		struct aa_label *base = new;
+
+		new = aa_label_parse(base, stack, GFP_ATOMIC, true, false);
+		if (IS_ERR(new))
+			new = NULL;
+		aa_put_label(base);
+	}
+
 	/* released by caller */
-	return new_profile;
+	return new;
+}
+
+static struct aa_label *profile_transition(struct aa_profile *profile,
+					   const struct linux_binprm *bprm,
+					   char *buffer, struct path_cond *cond,
+					   bool *secure_exec)
+{
+	struct aa_label *new = NULL;
+	const char *info = NULL, *name = NULL, *target = NULL;
+	unsigned int state = profile->file.start;
+	struct aa_perms perms = {};
+	bool nonewprivs = false;
+	int error = 0;
+
+	AA_BUG(!profile);
+	AA_BUG(!bprm);
+	AA_BUG(!buffer);
+
+	error = aa_path_name(&bprm->file->f_path, profile->path_flags, buffer,
+			     &name, &info, profile->disconnected);
+	if (error) {
+		if (profile_unconfined(profile) ||
+		    (profile->label.flags & FLAG_IX_ON_NAME_ERROR)) {
+			AA_DEBUG("name lookup ix on error");
+			error = 0;
+			new = aa_get_newest_label(&profile->label);
+		}
+		name = bprm->filename;
+		goto audit;
+	}
+
+	if (profile_unconfined(profile)) {
+		new = find_attach(profile->ns, &profile->ns->base.profiles,
+				  name);
+		if (new) {
+			AA_DEBUG("unconfined attached to new label");
+			return new;
+		}
+		AA_DEBUG("unconfined exec no attachment");
+		return aa_get_newest_label(&profile->label);
+	}
+
+	/* find exec permissions for name */
+	state = aa_str_perms(profile->file.dfa, state, name, cond, &perms);
+	if (perms.allow & MAY_EXEC) {
+		/* exec permission determine how to transition */
+		new = x_to_label(profile, name, perms.xindex, &target, &info);
+		if (new && new->proxy == profile->label.proxy && info) {
+			/* hack ix fallback - improve how this is detected */
+			goto audit;
+		} else if (!new) {
+			error = -EACCES;
+			info = "profile transition not found";
+			/* remove MAY_EXEC to audit as failure */
+			perms.allow &= ~MAY_EXEC;
+		}
+	} else if (COMPLAIN_MODE(profile)) {
+		/* no exec permission - learning mode */
+		struct aa_profile *new_profile = aa_new_null_profile(profile,
+							      false, name,
+							      GFP_ATOMIC);
+		if (!new_profile) {
+			error = -ENOMEM;
+			info = "could not create null profile";
+		} else {
+			error = -EACCES;
+			new = &new_profile->label;
+		}
+		perms.xindex |= AA_X_UNSAFE;
+	} else
+		/* fail exec */
+		error = -EACCES;
+
+	if (!new)
+		goto audit;
+
+	/* Policy has specified a domain transitions. if no_new_privs and
+	 * confined and not transitioning to the current domain fail.
+	 *
+	 * NOTE: Domain transitions from unconfined and to stritly stacked
+	 * subsets are allowed even when no_new_privs is set because this
+	 * aways results in a further reduction of permissions.
+	 */
+	if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
+	    !profile_unconfined(profile) &&
+	    !aa_label_is_subset(new, &profile->label)) {
+		error = -EPERM;
+		info = "no new privs";
+		nonewprivs = true;
+		perms.allow &= ~MAY_EXEC;
+		goto audit;
+	}
+
+	if (!(perms.xindex & AA_X_UNSAFE)) {
+		if (DEBUG_ON) {
+			dbg_printk("apparmor: scrubbing environment variables"
+				   " for %s profile=", name);
+			aa_label_printk(new, GFP_ATOMIC);
+			dbg_printk("\n");
+		}
+		*secure_exec = true;
+	}
+
+audit:
+	aa_audit_file(profile, &perms, OP_EXEC, MAY_EXEC, name, target, new,
+		      cond->uid, info, error);
+	if (!new || nonewprivs) {
+		aa_put_label(new);
+		return ERR_PTR(error);
+	}
+
+	return new;
+}
+
+static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
+			  bool stack, const struct linux_binprm *bprm,
+			  char *buffer, struct path_cond *cond,
+			  bool *secure_exec)
+{
+	unsigned int state = profile->file.start;
+	struct aa_perms perms = {};
+	const char *xname = NULL, *info = "change_profile onexec";
+	int error = -EACCES;
+
+	AA_BUG(!profile);
+	AA_BUG(!onexec);
+	AA_BUG(!bprm);
+	AA_BUG(!buffer);
+
+	if (profile_unconfined(profile)) {
+		/* change_profile on exec already granted */
+		/*
+		 * NOTE: Domain transitions from unconfined are allowed
+		 * even when no_new_privs is set because this aways results
+		 * in a further reduction of permissions.
+		 */
+		return 0;
+	}
+
+	error = aa_path_name(&bprm->file->f_path, profile->path_flags, buffer,
+			     &xname, &info, profile->disconnected);
+	if (error) {
+		if (profile_unconfined(profile) ||
+		    (profile->label.flags & FLAG_IX_ON_NAME_ERROR)) {
+			AA_DEBUG("name lookup ix on error");
+			error = 0;
+		}
+		xname = bprm->filename;
+		goto audit;
+	}
+
+	/* find exec permissions for name */
+	state = aa_str_perms(profile->file.dfa, state, xname, cond, &perms);
+	if (!(perms.allow & AA_MAY_ONEXEC)) {
+		info = "no change_onexec valid for executable";
+		goto audit;
+	}
+	/* test if this exec can be paired with change_profile onexec.
+	 * onexec permission is linked to exec with a standard pairing
+	 * exec\0change_profile
+	 */
+	state = aa_dfa_null_transition(profile->file.dfa, state);
+	error = change_profile_perms(profile, onexec, stack, AA_MAY_ONEXEC,
+				     state, &perms);
+	if (error) {
+		perms.allow &= ~AA_MAY_ONEXEC;
+		goto audit;
+	}
+	/* Policy has specified a domain transitions. if no_new_privs and
+	 * confined and not transitioning to the current domain fail.
+	 *
+	 * NOTE: Domain transitions from unconfined and to stritly stacked
+	 * subsets are allowed even when no_new_privs is set because this
+	 * aways results in a further reduction of permissions.
+	 */
+	if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
+	    !profile_unconfined(profile) &&
+	    !aa_label_is_subset(onexec, &profile->label)) {
+		error = -EPERM;
+		info = "no new privs";
+		perms.allow &= ~AA_MAY_ONEXEC;
+		goto audit;
+	}
+
+	if (!(perms.xindex & AA_X_UNSAFE)) {
+		if (DEBUG_ON) {
+			dbg_printk("apparmor: scrubbing environment "
+				   "variables for %s label=", xname);
+			aa_label_printk(onexec, GFP_ATOMIC);
+			dbg_printk("\n");
+		}
+		*secure_exec = true;
+	}
+
+audit:
+	return aa_audit_file(profile, &perms, OP_EXEC, AA_MAY_ONEXEC, xname,
+			     NULL, onexec, cond->uid, info, error);
+}
+
+/* ensure none ns domain transitions are correctly applied with onexec */
+
+static struct aa_label *handle_onexec(struct aa_label *label,
+				      struct aa_label *onexec, bool stack,
+				      const struct linux_binprm *bprm,
+				      char *buffer, struct path_cond *cond,
+				      bool *unsafe)
+{
+	struct aa_profile *profile;
+	struct aa_label *new;
+	int error;
+
+	AA_BUG(!label);
+	AA_BUG(!onexec);
+	AA_BUG(!bprm);
+	AA_BUG(!buffer);
+
+	if (!stack) {
+		error = fn_for_each_in_ns(label, profile,
+				profile_onexec(profile, onexec, stack,
+					       bprm, buffer, cond, unsafe));
+		if (error)
+			return ERR_PTR(error);
+		new = fn_label_build_in_ns(label, profile, GFP_ATOMIC,
+				aa_get_newest_label(onexec),
+				profile_transition(profile, bprm, buffer,
+						   cond, unsafe));
+
+	} else {
+		/* TODO: determine how much we want to losen this */
+		error = fn_for_each_in_ns(label, profile,
+				profile_onexec(profile, onexec, stack, bprm,
+					       buffer, cond, unsafe));
+		if (error)
+			return ERR_PTR(error);
+		new = fn_label_build_in_ns(label, profile, GFP_ATOMIC,
+				aa_label_merge(&profile->label, onexec,
+					       GFP_ATOMIC),
+				profile_transition(profile, bprm, buffer,
+						   cond, unsafe));
+	}
+
+	if (new)
+		return new;
+
+	/* TODO: get rid of GLOBAL_ROOT_UID */
+	error = fn_for_each_in_ns(label, profile,
+			aa_audit_file(profile, &nullperms, OP_CHANGE_ONEXEC,
+				      AA_MAY_ONEXEC, bprm->filename, NULL,
+				      onexec, GLOBAL_ROOT_UID,
+				      "failed to build target label", -ENOMEM));
+	return ERR_PTR(error);
 }
 
 /**
@@ -334,21 +741,22 @@ static struct aa_profile *x_to_profile(struct aa_profile *profile,
  * @bprm: binprm for the exec  (NOT NULL)
  *
  * Returns: %0 or error on failure
+ *
+ * TODO: once the other paths are done see if we can't refactor into a fn
  */
 int apparmor_bprm_set_creds(struct linux_binprm *bprm)
 {
 	struct aa_task_ctx *ctx;
-	struct aa_profile *profile, *new_profile = NULL;
-	struct aa_ns *ns;
+	struct aa_label *label, *new = NULL;
+	struct aa_profile *profile;
 	char *buffer = NULL;
-	unsigned int state;
-	struct file_perms perms = {};
+	const char *info = NULL;
+	int error = 0;
+	bool unsafe = false;
 	struct path_cond cond = {
 		file_inode(bprm->file)->i_uid,
 		file_inode(bprm->file)->i_mode
 	};
-	const char *name = NULL, *info = NULL;
-	int error = 0;
 
 	if (bprm->cred_prepared)
 		return 0;
@@ -356,168 +764,83 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
 	ctx = cred_ctx(bprm->cred);
 	AA_BUG(!ctx);
 
-	profile = aa_get_newest_profile(ctx->profile);
-	/*
-	 * get the namespace from the replacement profile as replacement
-	 * can change the namespace
-	 */
-	ns = profile->ns;
-	state = profile->file.start;
+	label = aa_get_newest_label(ctx->label);
 
 	/* buffer freed below, name is pointer into buffer */
-	error = aa_path_name(&bprm->file->f_path, profile->path_flags, &buffer,
-			     &name, &info);
-	if (error) {
-		if (unconfined(profile) ||
-		    (profile->flags & PFLAG_IX_ON_NAME_ERROR))
-			error = 0;
-		name = bprm->filename;
-		goto audit;
+	get_buffers(buffer);
+	/* Test for onexec first as onexec override other x transitions. */
+	if (ctx->onexec)
+		new = handle_onexec(label, ctx->onexec, ctx->token,
+				    bprm, buffer, &cond, &unsafe);
+	else
+		new = fn_label_build(label, profile, GFP_ATOMIC,
+				profile_transition(profile, bprm, buffer,
+						   &cond, &unsafe));
+
+	AA_BUG(!new);
+	if (IS_ERR(new)) {
+		error = PTR_ERR(new);
+		goto done;
+	} else if (!new) {
+		error = -ENOMEM;
+		goto done;
 	}
 
-	/* Test for onexec first as onexec directives override other
-	 * x transitions.
-	 */
-	if (unconfined(profile)) {
-		/* unconfined task */
-		if (ctx->onexec)
-			/* change_profile on exec already been granted */
-			new_profile = aa_get_profile(ctx->onexec);
-		else
-			new_profile = find_attach(ns, &ns->base.profiles, name);
-		if (!new_profile)
-			goto cleanup;
-		/*
-		 * NOTE: Domain transitions from unconfined are allowed
-		 * even when no_new_privs is set because this aways results
-		 * in a further reduction of permissions.
-		 */
-		goto apply;
-	}
-
-	/* find exec permissions for name */
-	state = aa_str_perms(profile->file.dfa, state, name, &cond, &perms);
-	if (ctx->onexec) {
-		struct file_perms cp;
-		info = "change_profile onexec";
-		new_profile = aa_get_newest_profile(ctx->onexec);
-		if (!(perms.allow & AA_MAY_ONEXEC))
-			goto audit;
-
-		/* test if this exec can be paired with change_profile onexec.
-		 * onexec permission is linked to exec with a standard pairing
-		 * exec\0change_profile
-		 */
-		state = aa_dfa_null_transition(profile->file.dfa, state);
-		cp = change_profile_perms(profile, ctx->onexec->ns,
-					  ctx->onexec->base.name,
-					  AA_MAY_ONEXEC, state);
-
-		if (!(cp.allow & AA_MAY_ONEXEC))
-			goto audit;
-		goto apply;
-	}
-
-	if (perms.allow & MAY_EXEC) {
-		/* exec permission determine how to transition */
-		new_profile = x_to_profile(profile, name, perms.xindex);
-		if (!new_profile) {
-			if (perms.xindex & AA_X_INHERIT) {
-				/* (p|c|n)ix - don't change profile but do
-				 * use the newest version, which was picked
-				 * up above when getting profile
-				 */
-				info = "ix fallback";
-				new_profile = aa_get_profile(profile);
-				goto x_clear;
-			} else if (perms.xindex & AA_X_UNCONFINED) {
-				new_profile = aa_get_newest_profile(ns->unconfined);
-				info = "ux fallback";
-			} else {
-				error = -EACCES;
-				info = "profile not found";
-				/* remove MAY_EXEC to audit as failure */
-				perms.allow &= ~MAY_EXEC;
-			}
-		}
-	} else if (COMPLAIN_MODE(profile)) {
-		/* no exec permission - are we in learning mode */
-		new_profile = aa_new_null_profile(profile, false, name,
-						  GFP_ATOMIC);
-		if (!new_profile) {
-			error = -ENOMEM;
-			info = "could not create null profile";
-		} else
-			error = -EACCES;
-		perms.xindex |= AA_X_UNSAFE;
-	} else
-		/* fail exec */
-		error = -EACCES;
-
-	/*
-	 * Policy has specified a domain transition, if no_new_privs then
-	 * fail the exec.
-	 */
-	if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) {
-		error = -EPERM;
-		goto cleanup;
-	}
-
-	if (!new_profile)
-		goto audit;
+	/* TODO: Add ns level no_new_privs subset test */
 
 	if (bprm->unsafe & LSM_UNSAFE_SHARE) {
 		/* FIXME: currently don't mediate shared state */
 		;
 	}
 
-	if (bprm->unsafe & LSM_UNSAFE_PTRACE) {
-		error = may_change_ptraced_domain(new_profile);
+	if (bprm->unsafe & (LSM_UNSAFE_PTRACE)) {
+		/* TODO: test needs to be profile of label to new */
+		error = may_change_ptraced_domain(new, &info);
 		if (error)
 			goto audit;
 	}
 
-	/* Determine if secure exec is needed.
-	 * Can be at this point for the following reasons:
-	 * 1. unconfined switching to confined
-	 * 2. confined switching to different confinement
-	 * 3. confined switching to unconfined
-	 *
-	 * Cases 2 and 3 are marked as requiring secure exec
-	 * (unless policy specified "unsafe exec")
-	 *
-	 * bprm->unsafe is used to cache the AA_X_UNSAFE permission
-	 * to avoid having to recompute in secureexec
-	 */
-	if (!(perms.xindex & AA_X_UNSAFE)) {
-		AA_DEBUG("scrubbing environment variables for %s profile=%s\n",
-			 name, new_profile->base.hname);
+	if (unsafe) {
+		if (DEBUG_ON) {
+			dbg_printk("scrubbing environment variables for %s "
+				   "label=", bprm->filename);
+			aa_label_printk(new, GFP_ATOMIC);
+			dbg_printk("\n");
+		}
 		bprm->unsafe |= AA_SECURE_X_NEEDED;
 	}
-apply:
-	/* when transitioning profiles clear unsafe personality bits */
-	bprm->per_clear |= PER_CLEAR_ON_SETID;
 
-x_clear:
-	aa_put_profile(ctx->profile);
-	/* transfer new profile reference will be released when ctx is freed */
-	ctx->profile = new_profile;
-	new_profile = NULL;
+	if (label->proxy != new->proxy) {
+		/* when transitioning clear unsafe personality bits */
+		if (DEBUG_ON) {
+			dbg_printk("apparmor: clearing unsafe personality "
+				   "bits. %s label=", bprm->filename);
+			aa_label_printk(new, GFP_ATOMIC);
+			dbg_printk("\n");
+		}
+		bprm->per_clear |= PER_CLEAR_ON_SETID;
+	}
+	aa_put_label(ctx->label);
+	/* transfer reference, released when ctx is freed */
+	ctx->label = new;
 
-	/* clear out all temporary/transitional state from the context */
+done:
+	/* clear out temporary/transitional state from the context */
 	aa_clear_task_ctx_trans(ctx);
 
-audit:
-	error = aa_audit_file(profile, &perms, OP_EXEC, MAY_EXEC, name,
-			      new_profile ? new_profile->base.hname : NULL,
-			      cond.uid, info, error);
-
-cleanup:
-	aa_put_profile(new_profile);
-	aa_put_profile(profile);
-	kfree(buffer);
+	aa_put_label(label);
+	put_buffers(buffer);
 
 	return error;
+
+audit:
+	error = fn_for_each(label, profile,
+			aa_audit_file(profile, &nullperms, OP_EXEC, MAY_EXEC,
+				      bprm->filename, NULL, new,
+				      file_inode(bprm->file)->i_uid, info,
+				      error));
+	aa_put_label(new);
+	goto done;
 }
 
 /**
@@ -537,53 +860,157 @@ int apparmor_bprm_secureexec(struct linux_binprm *bprm)
 	return 0;
 }
 
-/**
- * apparmor_bprm_committing_creds - do task cleanup on committing new creds
- * @bprm: binprm for the exec  (NOT NULL)
- */
-void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
-{
-	struct aa_profile *profile = __aa_current_profile();
-	struct aa_task_ctx *new_ctx = cred_ctx(bprm->cred);
-
-	/* bail out if unconfined or not changing profile */
-	if ((new_ctx->profile == profile) ||
-	    (unconfined(new_ctx->profile)))
-		return;
-
-	current->pdeath_signal = 0;
-
-	/* reset soft limits and set hard limits for the new profile */
-	__aa_transition_rlimits(profile, new_ctx->profile);
-}
-
-/**
- * apparmor_bprm_commited_cred - do cleanup after new creds committed
- * @bprm: binprm for the exec  (NOT NULL)
- */
-void apparmor_bprm_committed_creds(struct linux_binprm *bprm)
-{
-	/* TODO: cleanup signals - ipc mediation */
-	return;
-}
-
 /*
  * Functions for self directed profile change
  */
 
-/**
- * new_compound_name - create an hname with @n2 appended to @n1
- * @n1: base of hname  (NOT NULL)
- * @n2: name to append (NOT NULL)
+
+/* helper fn for change_hat
  *
- * Returns: new name or NULL on error
+ * Returns: label for hat transition OR ERR_PTR.  Does NOT return NULL
  */
-static char *new_compound_name(const char *n1, const char *n2)
+static struct aa_label *build_change_hat(struct aa_profile *profile,
+					 const char *name, bool sibling)
 {
-	char *name = kmalloc(strlen(n1) + strlen(n2) + 3, GFP_KERNEL);
-	if (name)
-		sprintf(name, "%s//%s", n1, n2);
-	return name;
+	struct aa_profile *root, *hat = NULL;
+	const char *info = NULL;
+	int error = 0;
+
+	if (sibling && PROFILE_IS_HAT(profile)) {
+		root = aa_get_profile_rcu(&profile->parent);
+	} else if (!sibling && !PROFILE_IS_HAT(profile)) {
+		root = aa_get_profile(profile);
+	} else {
+		info = "conflicting target types";
+		error = -EPERM;
+		goto audit;
+	}
+
+	hat = aa_find_child(root, name);
+	if (!hat) {
+		error = -ENOENT;
+		if (COMPLAIN_MODE(profile)) {
+			hat = aa_new_null_profile(profile, true, name,
+						  GFP_KERNEL);
+			if (!hat) {
+				info = "failed null profile create";
+				error = -ENOMEM;
+			}
+		}
+	}
+	aa_put_profile(root);
+
+audit:
+	aa_audit_file(profile, &nullperms, OP_CHANGE_HAT, AA_MAY_CHANGEHAT,
+		      name, hat ? hat->base.hname : NULL,
+		      hat ? &hat->label : NULL, GLOBAL_ROOT_UID, NULL,
+		      error);
+	if (!hat || (error && error != -ENOENT))
+		return ERR_PTR(error);
+	/* if hat && error - complain mode, already audited and we adjust for
+	 * complain mode allow by returning hat->label
+	 */
+	return &hat->label;
+}
+
+/* helper fn for changing into a hat
+ *
+ * Returns: label for hat transition or ERR_PTR. Does not return NULL
+ */
+static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
+				   int count, int flags)
+{
+	struct aa_profile *profile, *root, *hat = NULL;
+	struct aa_label *new;
+	struct label_it it;
+	bool sibling = false;
+	const char *name, *info = NULL;
+	int i, error;
+
+	AA_BUG(!label);
+	AA_BUG(!hats);
+	AA_BUG(count < 1);
+
+	if (PROFILE_IS_HAT(labels_profile(label)))
+		sibling = true;
+
+	/*find first matching hat */
+	for (i = 0; i < count && !hat; i++) {
+		name = hats[i];
+		label_for_each_in_ns(it, labels_ns(label), label, profile) {
+			if (sibling && PROFILE_IS_HAT(profile)) {
+				root = aa_get_profile_rcu(&profile->parent);
+			} else if (!sibling && !PROFILE_IS_HAT(profile)) {
+				root = aa_get_profile(profile);
+			} else {	/* conflicting change type */
+				info = "conflicting targets types";
+				error = -EPERM;
+				goto fail;
+			}
+			hat = aa_find_child(root, name);
+			aa_put_profile(root);
+			if (!hat) {
+				if (!COMPLAIN_MODE(profile))
+					goto outer_continue;
+				/* complain mode succeed as if hat */
+			} else if (!PROFILE_IS_HAT(hat)) {
+				info = "target not hat";
+				error = -EPERM;
+				aa_put_profile(hat);
+				goto fail;
+			}
+			aa_put_profile(hat);
+		}
+		/* found a hat for all profiles in ns */
+		goto build;
+outer_continue:
+	;
+	}
+	/* no hats that match, find appropriate error
+	 *
+	 * In complain mode audit of the failure is based off of the first
+	 * hat supplied.  This is done due how userspace interacts with
+	 * change_hat.
+	 */
+	name = NULL;
+	label_for_each_in_ns(it, labels_ns(label), label, profile) {
+		if (!list_empty(&profile->base.profiles)) {
+			info = "hat not found";
+			error = -ENOENT;
+			goto fail;
+		}
+	}
+	info = "no hats defined";
+	error = -ECHILD;
+
+fail:
+	label_for_each_in_ns(it, labels_ns(label), label, profile) {
+		/*
+		 * no target as it has failed to be found or built
+		 *
+		 * change_hat uses probing and should not log failures
+		 * related to missing hats
+		 */
+		/* TODO: get rid of GLOBAL_ROOT_UID */
+		if (count > 1 || COMPLAIN_MODE(profile)) {
+			aa_audit_file(profile, &nullperms, OP_CHANGE_HAT,
+				      AA_MAY_CHANGEHAT, name, NULL, NULL,
+				      GLOBAL_ROOT_UID, info, error);
+		}
+	}
+	return ERR_PTR(error);
+
+build:
+	new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+				   build_change_hat(profile, name, sibling),
+				   aa_get_label(&profile->label));
+	if (!new) {
+		info = "label build failed";
+		error = -ENOMEM;
+		goto fail;
+	} /* else if (IS_ERR) build_change_hat has logged error so return new */
+
+	return new;
 }
 
 /**
@@ -591,24 +1018,26 @@ static char *new_compound_name(const char *n1, const char *n2)
  * @hats: vector of hat names to try changing into (MAYBE NULL if @count == 0)
  * @count: number of hat names in @hats
  * @token: magic value to validate the hat change
- * @permtest: true if this is just a permission test
+ * @flags: flags affecting behavior of the change
+ *
+ * Returns %0 on success, error otherwise.
  *
  * Change to the first profile specified in @hats that exists, and store
  * the @hat_magic in the current task context.  If the count == 0 and the
  * @token matches that stored in the current task context, return to the
  * top level profile.
  *
- * Returns %0 on success, error otherwise.
+ * change_hat only applies to profiles in the current ns, and each profile
+ * in the ns must make the same transition otherwise change_hat will fail.
  */
-int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
+int aa_change_hat(const char *hats[], int count, u64 token, int flags)
 {
 	const struct cred *cred;
 	struct aa_task_ctx *ctx;
-	struct aa_profile *profile, *previous_profile, *hat = NULL;
-	char *name = NULL;
-	int i;
-	struct file_perms perms = {};
-	const char *target = NULL, *info = NULL;
+	struct aa_label *label, *previous, *new = NULL, *target = NULL;
+	struct aa_profile *profile;
+	struct aa_perms perms = {};
+	const char *info = NULL;
 	int error = 0;
 
 	/*
@@ -616,122 +1045,120 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
 	 * There is no exception for unconfined as change_hat is not
 	 * available.
 	 */
-	if (task_no_new_privs(current))
+	if (task_no_new_privs(current)) {
+		/* not an apparmor denial per se, so don't log it */
+		AA_DEBUG("no_new_privs - change_hat denied");
 		return -EPERM;
+	}
 
 	/* released below */
 	cred = get_current_cred();
 	ctx = cred_ctx(cred);
-	profile = aa_get_newest_profile(aa_cred_profile(cred));
-	previous_profile = aa_get_newest_profile(ctx->previous);
+	label = aa_get_newest_cred_label(cred);
+	previous = aa_get_newest_label(ctx->previous);
 
-	if (unconfined(profile)) {
-		info = "unconfined";
+	if (unconfined(label)) {
+		info = "unconfined can not change_hat";
 		error = -EPERM;
-		goto audit;
+		goto fail;
 	}
 
 	if (count) {
-		/* attempting to change into a new hat or switch to a sibling */
-		struct aa_profile *root;
-		if (PROFILE_IS_HAT(profile))
-			root = aa_get_profile_rcu(&profile->parent);
-		else
-			root = aa_get_profile(profile);
-
-		/* find first matching hat */
-		for (i = 0; i < count && !hat; i++)
-			/* released below */
-			hat = aa_find_child(root, hats[i]);
-		if (!hat) {
-			if (!COMPLAIN_MODE(root) || permtest) {
-				if (list_empty(&root->base.profiles))
-					error = -ECHILD;
-				else
-					error = -ENOENT;
-				aa_put_profile(root);
-				goto out;
-			}
-
-			/*
-			 * In complain mode and failed to match any hats.
-			 * Audit the failure is based off of the first hat
-			 * supplied.  This is done due how userspace
-			 * interacts with change_hat.
-			 *
-			 * TODO: Add logging of all failed hats
-			 */
-
-			/* freed below */
-			name = new_compound_name(root->base.hname, hats[0]);
-			aa_put_profile(root);
-			target = name;
-			/* released below */
-			hat = aa_new_null_profile(profile, true, hats[0],
-						  GFP_KERNEL);
-			if (!hat) {
-				info = "failed null profile create";
-				error = -ENOMEM;
-				goto audit;
-			}
-		} else {
-			aa_put_profile(root);
-			target = hat->base.hname;
-			if (!PROFILE_IS_HAT(hat)) {
-				info = "target not hat";
-				error = -EPERM;
-				goto audit;
-			}
+		new = change_hat(label, hats, count, flags);
+		AA_BUG(!new);
+		if (IS_ERR(new)) {
+			error = PTR_ERR(new);
+			new = NULL;
+			/* already audited */
+			goto out;
 		}
 
-		error = may_change_ptraced_domain(hat);
-		if (error) {
-			info = "ptraced";
-			error = -EPERM;
-			goto audit;
-		}
+		error = may_change_ptraced_domain(new, &info);
+		if (error)
+			goto fail;
 
-		if (!permtest) {
-			error = aa_set_current_hat(hat, token);
-			if (error == -EACCES)
-				/* kill task in case of brute force attacks */
-				perms.kill = AA_MAY_CHANGEHAT;
-			else if (name && !error)
-				/* reset error for learning of new hats */
-				error = -ENOENT;
-		}
-	} else if (previous_profile) {
-		/* Return to saved profile.  Kill task if restore fails
+		if (flags & AA_CHANGE_TEST)
+			goto out;
+
+		target = new;
+		error = aa_set_current_hat(new, token);
+		if (error == -EACCES)
+			/* kill task in case of brute force attacks */
+			goto kill;
+	} else if (previous && !(flags & AA_CHANGE_TEST)) {
+		/* Return to saved label.  Kill task if restore fails
 		 * to avoid brute force attacks
 		 */
-		target = previous_profile->base.hname;
-		error = aa_restore_previous_profile(token);
-		perms.kill = AA_MAY_CHANGEHAT;
-	} else
-		/* ignore restores when there is no saved profile */
-		goto out;
-
-audit:
-	if (!permtest)
-		error = aa_audit_file(profile, &perms, OP_CHANGE_HAT,
-				      AA_MAY_CHANGEHAT, NULL, target,
-				      GLOBAL_ROOT_UID, info, error);
+		target = previous;
+		error = aa_restore_previous_label(token);
+		if (error) {
+			if (error == -EACCES)
+				goto kill;
+			goto fail;
+		}
+	} /* else ignore @flags && restores when there is no saved profile */
 
 out:
-	aa_put_profile(hat);
-	kfree(name);
-	aa_put_profile(profile);
-	aa_put_profile(previous_profile);
+	aa_put_label(new);
+	aa_put_label(previous);
+	aa_put_label(label);
 	put_cred(cred);
 
 	return error;
+
+kill:
+	info = "failed token match";
+	perms.kill = AA_MAY_CHANGEHAT;
+
+fail:
+	fn_for_each_in_ns(label, profile,
+		aa_audit_file(profile, &perms, OP_CHANGE_HAT,
+			      AA_MAY_CHANGEHAT, NULL, NULL, target,
+			      GLOBAL_ROOT_UID, info, error));
+
+	goto out;
+}
+
+
+static int change_profile_perms_wrapper(const char *op, const char *name,
+					struct aa_profile *profile,
+					struct aa_label *target, bool stack,
+					u32 request, struct aa_perms *perms)
+{
+	const char *info = NULL;
+	int error = 0;
+
+	/*
+	 * Fail explicitly requested domain transitions when no_new_privs
+	 * and not unconfined OR the transition results in a stack on
+	 * the current label.
+	 * Stacking domain transitions and transitions from unconfined are
+	 * allowed even when no_new_privs is set because this aways results
+	 * in a reduction of permissions.
+	 */
+	if (task_no_new_privs(current) && !stack &&
+	    !profile_unconfined(profile) &&
+	    !aa_label_is_subset(target, &profile->label)) {
+		info = "no new privs";
+		error = -EPERM;
+	}
+
+	if (!error)
+		error = change_profile_perms(profile, target, stack, request,
+					     profile->file.start, perms);
+	if (error)
+		error = aa_audit_file(profile, perms, op, request, name,
+				      NULL, target, GLOBAL_ROOT_UID, info,
+				      error);
+
+	return error;
 }
 
 /**
  * aa_change_profile - perform a one-way profile transition
  * @fqname: name of profile may include namespace (NOT NULL)
  * @onexec: whether this transition is to take place immediately or at exec
- * @permtest: true if this is just a permission test
+ * @flags: flags affecting change behavior
  *
  * Change to new profile @name.  Unlike with hats, there is no way
  * to change back.  If @name isn't specified the current profile name is
@@ -741,14 +1168,16 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
  *
  * Returns %0 on success, error otherwise.
  */
-int aa_change_profile(const char *fqname, bool onexec,
-		      bool permtest, bool stack)
+int aa_change_profile(const char *fqname, int flags)
 {
-	const struct cred *cred;
-	struct aa_profile *profile, *target = NULL;
-	struct file_perms perms = {};
-	const char *info = NULL, *op;
+	struct aa_label *label, *new = NULL, *target = NULL;
+	struct aa_profile *profile;
+	struct aa_perms perms = {};
+	const char *info = NULL;
+	const char *auditname = fqname;		/* retain leading & if stack */
+	bool stack = flags & AA_CHANGE_STACK;
 	int error = 0;
+	char *op;
 	u32 request;
 
 	if (!fqname || !*fqname) {
@@ -756,74 +1185,118 @@ int aa_change_profile(const char *fqname, bool onexec,
 		return -EINVAL;
 	}
 
-	if (onexec) {
+	if (flags & AA_CHANGE_ONEXEC) {
 		request = AA_MAY_ONEXEC;
-		op = OP_CHANGE_ONEXEC;
+		if (stack)
+			op = OP_STACK_ONEXEC;
+		else
+			op = OP_CHANGE_ONEXEC;
 	} else {
 		request = AA_MAY_CHANGE_PROFILE;
-		op = OP_CHANGE_PROFILE;
+		if (stack)
+			op = OP_STACK;
+		else
+			op = OP_CHANGE_PROFILE;
 	}
 
-	cred = get_current_cred();
-	profile = aa_cred_profile(cred);
+	label = aa_get_current_label();
 
-	/*
-	 * Fail explicitly requested domain transitions if no_new_privs
-	 * and not unconfined.
-	 * Domain transitions from unconfined are allowed even when
-	 * no_new_privs is set because this aways results in a reduction
-	 * of permissions.
-	 */
-	if (task_no_new_privs(current) && !unconfined(profile)) {
-		put_cred(cred);
-		return -EPERM;
+	if (*fqname == '&') {
+		stack = true;
+		/* don't have label_parse() do stacking */
+		fqname++;
 	}
+	target = aa_label_parse(label, fqname, GFP_KERNEL, true, false);
+	if (IS_ERR(target)) {
+		struct aa_profile *tprofile;
 
-	target = aa_fqlookupn_profile(profile, fqname, strlen(fqname));
-	if (!target) {
-		info = "profile not found";
-		error = -ENOENT;
-		if (permtest || !COMPLAIN_MODE(profile))
+		info = "label not found";
+		error = PTR_ERR(target);
+		target = NULL;
+		/*
+		 * TODO: fixme using labels_profile is not right - do profile
+		 * per complain profile
+		 */
+		if ((flags & AA_CHANGE_TEST) ||
+		    !COMPLAIN_MODE(labels_profile(label)))
 			goto audit;
 		/* released below */
-		target = aa_new_null_profile(profile, false, fqname,
-					     GFP_KERNEL);
-		if (!target) {
+		tprofile = aa_new_null_profile(labels_profile(label), false,
+					       fqname, GFP_KERNEL);
+		if (!tprofile) {
 			info = "failed null profile create";
 			error = -ENOMEM;
 			goto audit;
 		}
+		target = &tprofile->label;
+		goto check;
 	}
 
-	perms = change_profile_perms(profile, target->ns, target->base.hname,
-				     request, profile->file.start);
-	if (!(perms.allow & request)) {
-		error = -EACCES;
-		goto audit;
-	}
+	/*
+	 * self directed transitions only apply to current policy ns
+	 * TODO: currently requiring perms for stacking and straight change
+	 *       stacking doesn't strictly need this. Determine how much
+	 *       we want to loosen this restriction for stacking
+	 *
+	 * if (!stack) {
+	 */
+	error = fn_for_each_in_ns(label, profile,
+			change_profile_perms_wrapper(op, auditname,
+						     profile, target, stack,
+						     request, &perms));
+	if (error)
+		/* auditing done in change_profile_perms_wrapper */
+		goto out;
 
+	/* } */
+
+check:
 	/* check if tracing task is allowed to trace target domain */
-	error = may_change_ptraced_domain(target);
-	if (error) {
-		info = "ptrace prevents transition";
-		goto audit;
-	}
-
-	if (permtest)
+	error = may_change_ptraced_domain(target, &info);
+	if (error && !fn_for_each_in_ns(label, profile,
+					COMPLAIN_MODE(profile)))
 		goto audit;
 
-	if (onexec)
-		error = aa_set_current_onexec(target);
-	else
-		error = aa_replace_current_profile(target);
+	/* TODO: add permission check to allow this
+	 * if ((flags & AA_CHANGE_ONEXEC) && !current_is_single_threaded()) {
+	 *      info = "not a single threaded task";
+	 *      error = -EACCES;
+	 *      goto audit;
+	 * }
+	 */
+	if (flags & AA_CHANGE_TEST)
+		goto out;
+
+	if (!(flags & AA_CHANGE_ONEXEC)) {
+		/* only transition profiles in the current ns */
+		if (stack)
+			new = aa_label_merge(label, target, GFP_KERNEL);
+		else
+			new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
+					aa_get_label(target),
+					aa_get_label(&profile->label));
+		if (IS_ERR_OR_NULL(new)) {
+			info = "failed to build target label";
+			error = PTR_ERR(new);
+			new = NULL;
+			perms.allow = 0;
+			goto audit;
+		}
+		error = aa_replace_current_label(new);
+	} else
+		/* full transition will be built in exec path */
+		error = aa_set_current_onexec(target, stack);
 
 audit:
-	if (!permtest)
-		error = aa_audit_file(profile, &perms, op, request, NULL,
-				      fqname, GLOBAL_ROOT_UID, info, error);
+	error = fn_for_each_in_ns(label, profile,
+			aa_audit_file(profile, &perms, op, request, auditname,
+				      NULL, new ? new : target,
+				      GLOBAL_ROOT_UID, info, error));
 
-	aa_put_profile(target);
-	put_cred(cred);
+out:
+	aa_put_label(new);
+	aa_put_label(target);
+	aa_put_label(label);
 
 	return error;
 }
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 750564c..3382518 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -12,15 +12,30 @@
  * License.
  */
 
+#include <linux/tty.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+
 #include "include/apparmor.h"
 #include "include/audit.h"
+#include "include/context.h"
 #include "include/file.h"
 #include "include/match.h"
 #include "include/path.h"
 #include "include/policy.h"
+#include "include/label.h"
 
-struct file_perms nullperms;
+static u32 map_mask_to_chr_mask(u32 mask)
+{
+	u32 m = mask & PERMS_CHRS_MASK;
 
+	if (mask & AA_MAY_GETATTR)
+		m |= MAY_READ;
+	if (mask & (AA_MAY_SETATTR | AA_MAY_CHMOD | AA_MAY_CHOWN))
+		m |= MAY_WRITE;
+
+	return m;
+}
 
 /**
  * audit_file_mask - convert mask to permission string
@@ -31,29 +46,7 @@ static void audit_file_mask(struct audit_buffer *ab, u32 mask)
 {
 	char str[10];
 
-	char *m = str;
-
-	if (mask & AA_EXEC_MMAP)
-		*m++ = 'm';
-	if (mask & (MAY_READ | AA_MAY_META_READ))
-		*m++ = 'r';
-	if (mask & (MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_CHMOD |
-		    AA_MAY_CHOWN))
-		*m++ = 'w';
-	else if (mask & MAY_APPEND)
-		*m++ = 'a';
-	if (mask & AA_MAY_CREATE)
-		*m++ = 'c';
-	if (mask & AA_MAY_DELETE)
-		*m++ = 'd';
-	if (mask & AA_MAY_LINK)
-		*m++ = 'l';
-	if (mask & AA_MAY_LOCK)
-		*m++ = 'k';
-	if (mask & MAY_EXEC)
-		*m++ = 'x';
-	*m = '\0';
-
+	aa_perm_mask_to_str(str, aa_file_perm_chrs, map_mask_to_chr_mask(mask));
 	audit_log_string(ab, str);
 }
 
@@ -67,22 +60,26 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
 	struct common_audit_data *sa = va;
 	kuid_t fsuid = current_fsuid();
 
-	if (aad(sa)->fs.request & AA_AUDIT_FILE_MASK) {
+	if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
 		audit_log_format(ab, " requested_mask=");
-		audit_file_mask(ab, aad(sa)->fs.request);
+		audit_file_mask(ab, aad(sa)->request);
 	}
-	if (aad(sa)->fs.denied & AA_AUDIT_FILE_MASK) {
+	if (aad(sa)->denied & AA_AUDIT_FILE_MASK) {
 		audit_log_format(ab, " denied_mask=");
-		audit_file_mask(ab, aad(sa)->fs.denied);
+		audit_file_mask(ab, aad(sa)->denied);
 	}
-	if (aad(sa)->fs.request & AA_AUDIT_FILE_MASK) {
+	if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
 		audit_log_format(ab, " fsuid=%d",
 				 from_kuid(&init_user_ns, fsuid));
 		audit_log_format(ab, " ouid=%d",
 				 from_kuid(&init_user_ns, aad(sa)->fs.ouid));
 	}
 
-	if (aad(sa)->fs.target) {
+	if (aad(sa)->peer) {
+		audit_log_format(ab, " target=");
+		aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+				FLAG_VIEW_SUBNS, GFP_ATOMIC);
+	} else if (aad(sa)->fs.target) {
 		audit_log_format(ab, " target=");
 		audit_log_untrustedstring(ab, aad(sa)->fs.target);
 	}
@@ -92,28 +89,30 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
  * aa_audit_file - handle the auditing of file operations
  * @profile: the profile being enforced  (NOT NULL)
  * @perms: the permissions computed for the request (NOT NULL)
- * @gfp: allocation flags
  * @op: operation being mediated
  * @request: permissions requested
  * @name: name of object being mediated (MAYBE NULL)
  * @target: name of target (MAYBE NULL)
+ * @tlabel: target label (MAY BE NULL)
  * @ouid: object uid
  * @info: extra information message (MAYBE NULL)
  * @error: 0 if operation allowed else failure error code
  *
  * Returns: %0 or error on failure
  */
-int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
+int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
 		  const char *op, u32 request, const char *name,
-		  const char *target, kuid_t ouid, const char *info, int error)
+		  const char *target, struct aa_label *tlabel,
+		  kuid_t ouid, const char *info, int error)
 {
 	int type = AUDIT_APPARMOR_AUTO;
 	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_TASK, op);
 
 	sa.u.tsk = NULL;
-	aad(&sa)->fs.request = request;
+	aad(&sa)->request = request;
 	aad(&sa)->name = name;
 	aad(&sa)->fs.target = target;
+	aad(&sa)->peer = tlabel;
 	aad(&sa)->fs.ouid = ouid;
 	aad(&sa)->info = info;
 	aad(&sa)->error = error;
@@ -126,34 +125,67 @@ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
 			mask = 0xffff;
 
 		/* mask off perms that are not being force audited */
-		aad(&sa)->fs.request &= mask;
+		aad(&sa)->request &= mask;
 
-		if (likely(!aad(&sa)->fs.request))
+		if (likely(!aad(&sa)->request))
 			return 0;
 		type = AUDIT_APPARMOR_AUDIT;
 	} else {
 		/* only report permissions that were denied */
-		aad(&sa)->fs.request = aad(&sa)->fs.request & ~perms->allow;
-		AA_BUG(!aad(&sa)->fs.request);
+		aad(&sa)->request = aad(&sa)->request & ~perms->allow;
+		AA_BUG(!aad(&sa)->request);
 
-		if (aad(&sa)->fs.request & perms->kill)
+		if (aad(&sa)->request & perms->kill)
 			type = AUDIT_APPARMOR_KILL;
 
 		/* quiet known rejects, assumes quiet and kill do not overlap */
-		if ((aad(&sa)->fs.request & perms->quiet) &&
+		if ((aad(&sa)->request & perms->quiet) &&
 		    AUDIT_MODE(profile) != AUDIT_NOQUIET &&
 		    AUDIT_MODE(profile) != AUDIT_ALL)
-			aad(&sa)->fs.request &= ~perms->quiet;
+			aad(&sa)->request &= ~perms->quiet;
 
-		if (!aad(&sa)->fs.request)
-			return COMPLAIN_MODE(profile) ? 0 : aad(&sa)->error;
+		if (!aad(&sa)->request)
+			return aad(&sa)->error;
 	}
 
-	aad(&sa)->fs.denied = aad(&sa)->fs.request & ~perms->allow;
+	aad(&sa)->denied = aad(&sa)->request & ~perms->allow;
 	return aa_audit(type, profile, &sa, file_audit_cb);
 }
 
 /**
+ * is_deleted - test if a file has been completely unlinked
+ * @dentry: dentry of file to test for deletion  (NOT NULL)
+ *
+ * Returns: %1 if deleted else %0
+ */
+static inline bool is_deleted(struct dentry *dentry)
+{
+	if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
+		return 1;
+	return 0;
+}
+
+static int path_name(const char *op, struct aa_label *label,
+		     const struct path *path, int flags, char *buffer,
+		     const char **name, struct path_cond *cond, u32 request)
+{
+	struct aa_profile *profile;
+	const char *info = NULL;
+	int error;
+
+	error = aa_path_name(path, flags, buffer, name, &info,
+			     labels_profile(label)->disconnected);
+	if (error) {
+		fn_for_each_confined(label, profile,
+			aa_audit_file(profile, &nullperms, op, request, *name,
+				      NULL, NULL, cond->uid, info, error));
+		return error;
+	}
+
+	return 0;
+}
+
+/**
  * map_old_perms - map old file perms layout to the new layout
  * @old: permission set in old mapping
  *
@@ -163,10 +195,10 @@ static u32 map_old_perms(u32 old)
 {
 	u32 new = old & 0xf;
 	if (old & MAY_READ)
-		new |= AA_MAY_META_READ;
+		new |= AA_MAY_GETATTR | AA_MAY_OPEN;
 	if (old & MAY_WRITE)
-		new |= AA_MAY_META_WRITE | AA_MAY_CREATE | AA_MAY_DELETE |
-			AA_MAY_CHMOD | AA_MAY_CHOWN;
+		new |= AA_MAY_SETATTR | AA_MAY_CREATE | AA_MAY_DELETE |
+		       AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_OPEN;
 	if (old & 0x10)
 		new |= AA_MAY_LINK;
 	/* the old mapping lock and link_subset flags where overlaid
@@ -181,7 +213,7 @@ static u32 map_old_perms(u32 old)
 }
 
 /**
- * compute_perms - convert dfa compressed perms to internal perms
+ * aa_compute_fperms - convert dfa compressed perms to internal perms
  * @dfa: dfa to compute perms for   (NOT NULL)
  * @state: state in dfa
  * @cond:  conditions to consider  (NOT NULL)
@@ -191,17 +223,21 @@ static u32 map_old_perms(u32 old)
  *
  * Returns: computed permission set
  */
-static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state,
-				       struct path_cond *cond)
+struct aa_perms aa_compute_fperms(struct aa_dfa *dfa, unsigned int state,
+				  struct path_cond *cond)
 {
-	struct file_perms perms;
+	struct aa_perms perms;
 
 	/* FIXME: change over to new dfa format
 	 * currently file perms are encoded in the dfa, new format
 	 * splits the permissions from the dfa.  This mapping can be
 	 * done at profile load
 	 */
-	perms.kill = 0;
+	perms.deny = 0;
+	perms.kill = perms.stop = 0;
+	perms.complain = perms.cond = 0;
+	perms.hide = 0;
+	perms.prompt = 0;
 
 	if (uid_eq(current_fsuid(), cond->uid)) {
 		perms.allow = map_old_perms(dfa_user_allow(dfa, state));
@@ -214,7 +250,7 @@ static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state,
 		perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
 		perms.xindex = dfa_other_xindex(dfa, state);
 	}
-	perms.allow |= AA_MAY_META_READ;
+	perms.allow |= AA_MAY_GETATTR;
 
 	/* change_profile wasn't determined by ownership in old mapping */
 	if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
@@ -237,37 +273,55 @@ static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state,
  */
 unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
 			  const char *name, struct path_cond *cond,
-			  struct file_perms *perms)
+			  struct aa_perms *perms)
 {
 	unsigned int state;
-	if (!dfa) {
-		*perms = nullperms;
-		return DFA_NOMATCH;
-	}
-
 	state = aa_dfa_match(dfa, start, name);
-	*perms = compute_perms(dfa, state, cond);
+	*perms = aa_compute_fperms(dfa, state, cond);
 
 	return state;
 }
 
-/**
- * is_deleted - test if a file has been completely unlinked
- * @dentry: dentry of file to test for deletion  (NOT NULL)
- *
- * Returns: %1 if deleted else %0
- */
-static inline bool is_deleted(struct dentry *dentry)
+int __aa_path_perm(const char *op, struct aa_profile *profile, const char *name,
+		   u32 request, struct path_cond *cond, int flags,
+		   struct aa_perms *perms)
 {
-	if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
-		return 1;
-	return 0;
+	int e = 0;
+
+	if (profile_unconfined(profile))
+		return 0;
+	aa_str_perms(profile->file.dfa, profile->file.start, name, cond, perms);
+	if (request & ~perms->allow)
+		e = -EACCES;
+	return aa_audit_file(profile, perms, op, request, name, NULL, NULL,
+			     cond->uid, NULL, e);
+}
+
+
+static int profile_path_perm(const char *op, struct aa_profile *profile,
+			     const struct path *path, char *buffer, u32 request,
+			     struct path_cond *cond, int flags,
+			     struct aa_perms *perms)
+{
+	const char *name;
+	int error;
+
+	if (profile_unconfined(profile))
+		return 0;
+
+	error = path_name(op, &profile->label, path,
+			  flags | profile->path_flags, buffer, &name, cond,
+			  request);
+	if (error)
+		return error;
+	return __aa_path_perm(op, profile, name, request, cond, flags,
+			      perms);
 }
 
 /**
  * aa_path_perm - do permissions check & audit for @path
  * @op: operation being checked
- * @profile: profile being enforced  (NOT NULL)
+ * @label: profile being enforced  (NOT NULL)
  * @path: path to check permissions of  (NOT NULL)
  * @flags: any additional path flags beyond what the profile specifies
  * @request: requested permissions
@@ -275,35 +329,23 @@ static inline bool is_deleted(struct dentry *dentry)
  *
  * Returns: %0 else error if access denied or other error
  */
-int aa_path_perm(const char *op, struct aa_profile *profile,
+int aa_path_perm(const char *op, struct aa_label *label,
 		 const struct path *path, int flags, u32 request,
 		 struct path_cond *cond)
 {
+	struct aa_perms perms = {};
+	struct aa_profile *profile;
 	char *buffer = NULL;
-	struct file_perms perms = {};
-	const char *name, *info = NULL;
 	int error;
 
-	flags |= profile->path_flags | (S_ISDIR(cond->mode) ? PATH_IS_DIR : 0);
-	error = aa_path_name(path, flags, &buffer, &name, &info);
-	if (error) {
-		if (error == -ENOENT && is_deleted(path->dentry)) {
-			/* Access to open files that are deleted are
-			 * give a pass (implicit delegation)
-			 */
-			error = 0;
-			info = NULL;
-			perms.allow = request;
-		}
-	} else {
-		aa_str_perms(profile->file.dfa, profile->file.start, name, cond,
-			     &perms);
-		if (request & ~perms.allow)
-			error = -EACCES;
-	}
-	error = aa_audit_file(profile, &perms, op, request, name, NULL,
-			      cond->uid, info, error);
-	kfree(buffer);
+	flags |= PATH_DELEGATE_DELETED | (S_ISDIR(cond->mode) ? PATH_IS_DIR :
+								0);
+	get_buffers(buffer);
+	error = fn_for_each_confined(label, profile,
+			profile_path_perm(op, profile, path, buffer, request,
+					  cond, flags, &perms));
+
+	put_buffers(buffer);
 
 	return error;
 }
@@ -328,65 +370,40 @@ static inline bool xindex_is_subset(u32 link, u32 target)
 	return 1;
 }
 
-/**
- * aa_path_link - Handle hard link permission check
- * @profile: the profile being enforced  (NOT NULL)
- * @old_dentry: the target dentry  (NOT NULL)
- * @new_dir: directory the new link will be created in  (NOT NULL)
- * @new_dentry: the link being created  (NOT NULL)
- *
- * Handle the permission test for a link & target pair.  Permission
- * is encoded as a pair where the link permission is determined
- * first, and if allowed, the target is tested.  The target test
- * is done from the point of the link match (not start of DFA)
- * making the target permission dependent on the link permission match.
- *
- * The subset test if required forces that permissions granted
- * on link are a subset of the permission granted to target.
- *
- * Returns: %0 if allowed else error
- */
-int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
-		 const struct path *new_dir, struct dentry *new_dentry)
+static int profile_path_link(struct aa_profile *profile,
+			     const struct path *link, char *buffer,
+			     const struct path *target, char *buffer2,
+			     struct path_cond *cond)
 {
-	struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
-	struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
-	struct path_cond cond = {
-		d_backing_inode(old_dentry)->i_uid,
-		d_backing_inode(old_dentry)->i_mode
-	};
-	char *buffer = NULL, *buffer2 = NULL;
-	const char *lname, *tname = NULL, *info = NULL;
-	struct file_perms lperms, perms;
+	const char *lname, *tname = NULL;
+	struct aa_perms lperms = {}, perms;
+	const char *info = NULL;
 	u32 request = AA_MAY_LINK;
 	unsigned int state;
 	int error;
 
-	lperms = nullperms;
-
-	/* buffer freed below, lname is pointer in buffer */
-	error = aa_path_name(&link, profile->path_flags, &buffer, &lname,
-			     &info);
+	error = path_name(OP_LINK, &profile->label, link, profile->path_flags,
+			  buffer, &lname, cond, AA_MAY_LINK);
 	if (error)
 		goto audit;
 
 	/* buffer2 freed below, tname is pointer in buffer2 */
-	error = aa_path_name(&target, profile->path_flags, &buffer2, &tname,
-			     &info);
+	error = path_name(OP_LINK, &profile->label, target, profile->path_flags,
+			  buffer2, &tname, cond, AA_MAY_LINK);
 	if (error)
 		goto audit;
 
 	error = -EACCES;
 	/* aa_str_perms - handles the case of the dfa being NULL */
 	state = aa_str_perms(profile->file.dfa, profile->file.start, lname,
-			     &cond, &lperms);
+			     cond, &lperms);
 
 	if (!(lperms.allow & AA_MAY_LINK))
 		goto audit;
 
 	/* test to see if target can be paired with link */
 	state = aa_dfa_null_transition(profile->file.dfa, state);
-	aa_str_perms(profile->file.dfa, state, tname, &cond, &perms);
+	aa_str_perms(profile->file.dfa, state, tname, cond, &perms);
 
 	/* force audit/quiet masks for link are stored in the second entry
 	 * in the link pair.
@@ -397,6 +414,7 @@ int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
 
 	if (!(perms.allow & AA_MAY_LINK)) {
 		info = "target restricted";
+		lperms = perms;
 		goto audit;
 	}
 
@@ -404,10 +422,10 @@ int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
 	if (!(perms.allow & AA_LINK_SUBSET))
 		goto done_tests;
 
-	/* Do link perm subset test requiring allowed permission on link are a
-	 * subset of the allowed permissions on target.
+	/* Do link perm subset test requiring allowed permission on link are
+	 * a subset of the allowed permissions on target.
 	 */
-	aa_str_perms(profile->file.dfa, profile->file.start, tname, &cond,
+	aa_str_perms(profile->file.dfa, profile->file.start, tname, cond,
 		     &perms);
 
 	/* AA_MAY_LINK is not considered in the subset test */
@@ -429,10 +447,121 @@ int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
 	error = 0;
 
 audit:
-	error = aa_audit_file(profile, &lperms, OP_LINK, request,
-			      lname, tname, cond.uid, info, error);
-	kfree(buffer);
-	kfree(buffer2);
+	return aa_audit_file(profile, &lperms, OP_LINK, request, lname, tname,
+			     NULL, cond->uid, info, error);
+}
+
+/**
+ * aa_path_link - Handle hard link permission check
+ * @label: the label being enforced  (NOT NULL)
+ * @old_dentry: the target dentry  (NOT NULL)
+ * @new_dir: directory the new link will be created in  (NOT NULL)
+ * @new_dentry: the link being created  (NOT NULL)
+ *
+ * Handle the permission test for a link & target pair.  Permission
+ * is encoded as a pair where the link permission is determined
+ * first, and if allowed, the target is tested.  The target test
+ * is done from the point of the link match (not start of DFA)
+ * making the target permission dependent on the link permission match.
+ *
+ * The subset test if required forces that permissions granted
+ * on link are a subset of the permission granted to target.
+ *
+ * Returns: %0 if allowed else error
+ */
+int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
+		 const struct path *new_dir, struct dentry *new_dentry)
+{
+	struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
+	struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
+	struct path_cond cond = {
+		d_backing_inode(old_dentry)->i_uid,
+		d_backing_inode(old_dentry)->i_mode
+	};
+	char *buffer = NULL, *buffer2 = NULL;
+	struct aa_profile *profile;
+	int error;
+
+	/* buffer freed below, lname is pointer in buffer */
+	get_buffers(buffer, buffer2);
+	error = fn_for_each_confined(label, profile,
+			profile_path_link(profile, &link, buffer, &target,
+					  buffer2, &cond));
+	put_buffers(buffer, buffer2);
+
+	return error;
+}
+
+static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
+			    u32 request)
+{
+	struct aa_label *l, *old;
+
+	/* update caching of label on file_ctx */
+	spin_lock(&fctx->lock);
+	old = rcu_dereference_protected(fctx->label,
+					spin_is_locked(&fctx->lock));
+	l = aa_label_merge(old, label, GFP_ATOMIC);
+	if (l) {
+		if (l != old) {
+			rcu_assign_pointer(fctx->label, l);
+			aa_put_label(old);
+		} else
+			aa_put_label(l);
+		fctx->allow |= request;
+	}
+	spin_unlock(&fctx->lock);
+}
+
+static int __file_path_perm(const char *op, struct aa_label *label,
+			    struct aa_label *flabel, struct file *file,
+			    u32 request, u32 denied)
+{
+	struct aa_profile *profile;
+	struct aa_perms perms = {};
+	struct path_cond cond = {
+		.uid = file_inode(file)->i_uid,
+		.mode = file_inode(file)->i_mode
+	};
+	char *buffer;
+	int flags, error;
+
+	/* revalidation due to label out of date. No revocation at this time */
+	if (!denied && aa_label_is_subset(flabel, label))
+		/* TODO: check for revocation on stale profiles */
+		return 0;
+
+	flags = PATH_DELEGATE_DELETED | (S_ISDIR(cond.mode) ? PATH_IS_DIR : 0);
+	get_buffers(buffer);
+
+	/* check every profile in task label not in current cache */
+	error = fn_for_each_not_in_set(flabel, label, profile,
+			profile_path_perm(op, profile, &file->f_path, buffer,
+					  request, &cond, flags, &perms));
+	if (denied && !error) {
+		/*
+		 * check every profile in file label that was not tested
+		 * in the initial check above.
+		 *
+		 * TODO: cache full perms so this only happens because of
+		 * conditionals
+		 * TODO: don't audit here
+		 */
+		if (label == flabel)
+			error = fn_for_each(label, profile,
+				profile_path_perm(op, profile, &file->f_path,
+						  buffer, request, &cond, flags,
+						  &perms));
+		else
+			error = fn_for_each_not_in_set(label, flabel, profile,
+				profile_path_perm(op, profile, &file->f_path,
+						  buffer, request, &cond, flags,
+						  &perms));
+	}
+	if (!error)
+		update_file_ctx(file_ctx(file), label, request);
+
+	put_buffers(buffer);
 
 	return error;
 }
@@ -440,20 +569,114 @@ int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
 /**
  * aa_file_perm - do permission revalidation check & audit for @file
  * @op: operation being checked
- * @profile: profile being enforced   (NOT NULL)
+ * @label: label being enforced   (NOT NULL)
  * @file: file to revalidate access permissions on  (NOT NULL)
  * @request: requested permissions
  *
  * Returns: %0 if access allowed else error
  */
-int aa_file_perm(const char *op, struct aa_profile *profile, struct file *file,
+int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
 		 u32 request)
 {
-	struct path_cond cond = {
-		.uid = file_inode(file)->i_uid,
-		.mode = file_inode(file)->i_mode
-	};
+	struct aa_file_ctx *fctx;
+	struct aa_label *flabel;
+	u32 denied;
+	int error = 0;
 
-	return aa_path_perm(op, profile, &file->f_path, PATH_DELEGATE_DELETED,
-			    request, &cond);
+	AA_BUG(!label);
+	AA_BUG(!file);
+
+	fctx = file_ctx(file);
+
+	rcu_read_lock();
+	flabel  = rcu_dereference(fctx->label);
+	AA_BUG(!flabel);
+
+	/* revalidate access, if task is unconfined, or the cached cred
+	 * doesn't match or if the request is for more permissions than
+	 * was granted.
+	 *
+	 * Note: the test for !unconfined(flabel) is to handle file
+	 *       delegation from unconfined tasks
+	 */
+	denied = request & ~fctx->allow;
+	if (unconfined(label) || unconfined(flabel) ||
+	    (!denied && aa_label_is_subset(flabel, label)))
+		goto done;
+
+	/* TODO: label cross check */
+
+	if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
+		error = __file_path_perm(op, label, flabel, file, request,
+					 denied);
+
+done:
+	rcu_read_unlock();
+
+	return error;
+}
+
+static void revalidate_tty(struct aa_label *label)
+{
+	struct tty_struct *tty;
+	int drop_tty = 0;
+
+	tty = get_current_tty();
+	if (!tty)
+		return;
+
+	spin_lock(&tty->files_lock);
+	if (!list_empty(&tty->tty_files)) {
+		struct tty_file_private *file_priv;
+		struct file *file;
+		/* TODO: Revalidate access to controlling tty. */
+		file_priv = list_first_entry(&tty->tty_files,
+					     struct tty_file_private, list);
+		file = file_priv->file;
+
+		if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE))
+			drop_tty = 1;
+	}
+	spin_unlock(&tty->files_lock);
+	tty_kref_put(tty);
+
+	if (drop_tty)
+		no_tty();
+}
+
+static int match_file(const void *p, struct file *file, unsigned int fd)
+{
+	struct aa_label *label = (struct aa_label *)p;
+
+	if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file)))
+		return fd + 1;
+	return 0;
+}
+
+
+/* based on selinux's flush_unauthorized_files */
+void aa_inherit_files(const struct cred *cred, struct files_struct *files)
+{
+	struct aa_label *label = aa_get_newest_cred_label(cred);
+	struct file *devnull = NULL;
+	unsigned int n;
+
+	revalidate_tty(label);
+
+	/* Revalidate access to inherited open files. */
+	n = iterate_fd(files, 0, match_file, label);
+	if (!n) /* none found? */
+		goto out;
+
+	devnull = dentry_open(&aa_null, O_RDWR, cred);
+	if (IS_ERR(devnull))
+		devnull = NULL;
+	/* replace all the matching ones with this */
+	do {
+		replace_fd(n - 1, devnull, 0);
+	} while ((n = iterate_fd(files, n, match_file, label)) != 0);
+	if (devnull)
+		fput(devnull);
+out:
+	aa_put_label(label);
 }
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 1750cc0..aaf893f4 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -4,7 +4,7 @@
  * This file contains AppArmor basic global
  *
  * Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2010 Canonical Ltd.
+ * Copyright 2009-2017 Canonical Ltd.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -27,8 +27,10 @@
 #define AA_CLASS_NET		4
 #define AA_CLASS_RLIMITS	5
 #define AA_CLASS_DOMAIN		6
+#define AA_CLASS_PTRACE		9
+#define AA_CLASS_LABEL		16
 
-#define AA_CLASS_LAST		AA_CLASS_DOMAIN
+#define AA_CLASS_LAST		AA_CLASS_LABEL
 
 /* Control parameters settable through module/boot flags */
 extern enum audit_mode aa_g_audit;
diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h
index 120a798..bd68911 100644
--- a/security/apparmor/include/apparmorfs.h
+++ b/security/apparmor/include/apparmorfs.h
@@ -17,49 +17,49 @@
 
 extern struct path aa_null;
 
-enum aa_fs_type {
-	AA_FS_TYPE_BOOLEAN,
-	AA_FS_TYPE_STRING,
-	AA_FS_TYPE_U64,
-	AA_FS_TYPE_FOPS,
-	AA_FS_TYPE_DIR,
+enum aa_sfs_type {
+	AA_SFS_TYPE_BOOLEAN,
+	AA_SFS_TYPE_STRING,
+	AA_SFS_TYPE_U64,
+	AA_SFS_TYPE_FOPS,
+	AA_SFS_TYPE_DIR,
 };
 
-struct aa_fs_entry;
+struct aa_sfs_entry;
 
-struct aa_fs_entry {
+struct aa_sfs_entry {
 	const char *name;
 	struct dentry *dentry;
 	umode_t mode;
-	enum aa_fs_type v_type;
+	enum aa_sfs_type v_type;
 	union {
 		bool boolean;
 		char *string;
 		unsigned long u64;
-		struct aa_fs_entry *files;
+		struct aa_sfs_entry *files;
 	} v;
 	const struct file_operations *file_ops;
 };
 
-extern const struct file_operations aa_fs_seq_file_ops;
+extern const struct file_operations aa_sfs_seq_file_ops;
 
-#define AA_FS_FILE_BOOLEAN(_name, _value) \
+#define AA_SFS_FILE_BOOLEAN(_name, _value) \
 	{ .name = (_name), .mode = 0444, \
-	  .v_type = AA_FS_TYPE_BOOLEAN, .v.boolean = (_value), \
-	  .file_ops = &aa_fs_seq_file_ops }
-#define AA_FS_FILE_STRING(_name, _value) \
+	  .v_type = AA_SFS_TYPE_BOOLEAN, .v.boolean = (_value), \
+	  .file_ops = &aa_sfs_seq_file_ops }
+#define AA_SFS_FILE_STRING(_name, _value) \
 	{ .name = (_name), .mode = 0444, \
-	  .v_type = AA_FS_TYPE_STRING, .v.string = (_value), \
-	  .file_ops = &aa_fs_seq_file_ops }
-#define AA_FS_FILE_U64(_name, _value) \
+	  .v_type = AA_SFS_TYPE_STRING, .v.string = (_value), \
+	  .file_ops = &aa_sfs_seq_file_ops }
+#define AA_SFS_FILE_U64(_name, _value) \
 	{ .name = (_name), .mode = 0444, \
-	  .v_type = AA_FS_TYPE_U64, .v.u64 = (_value), \
-	  .file_ops = &aa_fs_seq_file_ops }
-#define AA_FS_FILE_FOPS(_name, _mode, _fops) \
-	{ .name = (_name), .v_type = AA_FS_TYPE_FOPS, \
+	  .v_type = AA_SFS_TYPE_U64, .v.u64 = (_value), \
+	  .file_ops = &aa_sfs_seq_file_ops }
+#define AA_SFS_FILE_FOPS(_name, _mode, _fops) \
+	{ .name = (_name), .v_type = AA_SFS_TYPE_FOPS, \
 	  .mode = (_mode), .file_ops = (_fops) }
-#define AA_FS_DIR(_name, _value) \
-	{ .name = (_name), .v_type = AA_FS_TYPE_DIR, .v.files = (_value) }
+#define AA_SFS_DIR(_name, _value) \
+	{ .name = (_name), .v_type = AA_SFS_TYPE_DIR, .v.files = (_value) }
 
 extern void __init aa_destroy_aafs(void);
 
@@ -74,6 +74,7 @@ enum aafs_ns_type {
 	AAFS_NS_LOAD,
 	AAFS_NS_REPLACE,
 	AAFS_NS_REMOVE,
+	AAFS_NS_REVISION,
 	AAFS_NS_COUNT,
 	AAFS_NS_MAX_COUNT,
 	AAFS_NS_SIZE,
@@ -102,16 +103,22 @@ enum aafs_prof_type {
 #define ns_subload(X) ((X)->dents[AAFS_NS_LOAD])
 #define ns_subreplace(X) ((X)->dents[AAFS_NS_REPLACE])
 #define ns_subremove(X) ((X)->dents[AAFS_NS_REMOVE])
+#define ns_subrevision(X) ((X)->dents[AAFS_NS_REVISION])
 
 #define prof_dir(X) ((X)->dents[AAFS_PROF_DIR])
 #define prof_child_dir(X) ((X)->dents[AAFS_PROF_PROFS])
 
-void __aa_fs_profile_rmdir(struct aa_profile *profile);
-void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+void __aa_bump_ns_revision(struct aa_ns *ns);
+void __aafs_profile_rmdir(struct aa_profile *profile);
+void __aafs_profile_migrate_dents(struct aa_profile *old,
 				   struct aa_profile *new);
-int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent);
-void __aa_fs_ns_rmdir(struct aa_ns *ns);
-int __aa_fs_ns_mkdir(struct aa_ns *ns, struct dentry *parent,
-		     const char *name);
+int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent);
+void __aafs_ns_rmdir(struct aa_ns *ns);
+int __aafs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name,
+		     struct dentry *dent);
+
+struct aa_loaddata;
+void __aa_fs_remove_rawdata(struct aa_loaddata *rawdata);
+int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata);
 
 #endif /* __AA_APPARMORFS_H */
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index fdc4774..c68839a 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -22,8 +22,7 @@
 #include <linux/slab.h>
 
 #include "file.h"
-
-struct aa_profile;
+#include "label.h"
 
 extern const char *const audit_mode_names[];
 #define AUDIT_MAX_INDEX 5
@@ -65,10 +64,12 @@ enum audit_type {
 #define OP_GETATTR "getattr"
 #define OP_OPEN "open"
 
+#define OP_FRECEIVE "file_receive"
 #define OP_FPERM "file_perm"
 #define OP_FLOCK "file_lock"
 #define OP_FMMAP "file_mmap"
 #define OP_FMPROT "file_mprotect"
+#define OP_INHERIT "file_inherit"
 
 #define OP_CREATE "create"
 #define OP_POST_CREATE "post_create"
@@ -91,6 +92,8 @@ enum audit_type {
 #define OP_CHANGE_HAT "change_hat"
 #define OP_CHANGE_PROFILE "change_profile"
 #define OP_CHANGE_ONEXEC "change_onexec"
+#define OP_STACK "stack"
+#define OP_STACK_ONEXEC "stack_onexec"
 
 #define OP_SETPROCATTR "setprocattr"
 #define OP_SETRLIMIT "setrlimit"
@@ -102,19 +105,19 @@ enum audit_type {
 
 struct apparmor_audit_data {
 	int error;
-	const char *op;
 	int type;
-	void *profile;
+	const char *op;
+	struct aa_label *label;
 	const char *name;
 	const char *info;
+	u32 request;
+	u32 denied;
 	union {
 		/* these entries require a custom callback fn */
 		struct {
-			struct aa_profile *peer;
+			struct aa_label *peer;
 			struct {
 				const char *target;
-				u32 request;
-				u32 denied;
 				kuid_t ouid;
 			} fs;
 		};
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
index fc3fa38..e0304e2 100644
--- a/security/apparmor/include/capability.h
+++ b/security/apparmor/include/capability.h
@@ -19,11 +19,12 @@
 
 #include "apparmorfs.h"
 
-struct aa_profile;
+struct aa_label;
 
 /* aa_caps - confinement data for capabilities
  * @allowed: capabilities mask
  * @audit: caps that are to be audited
+ * @denied: caps that are explicitly denied
  * @quiet: caps that should not be audited
  * @kill: caps that when requested will result in the task being killed
  * @extended: caps that are subject finer grained mediation
@@ -31,14 +32,15 @@ struct aa_profile;
 struct aa_caps {
 	kernel_cap_t allow;
 	kernel_cap_t audit;
+	kernel_cap_t denied;
 	kernel_cap_t quiet;
 	kernel_cap_t kill;
 	kernel_cap_t extended;
 };
 
-extern struct aa_fs_entry aa_fs_entry_caps[];
+extern struct aa_sfs_entry aa_sfs_entry_caps[];
 
-int aa_capable(struct aa_profile *profile, int cap, int audit);
+int aa_capable(struct aa_label *label, int cap, int audit);
 
 static inline void aa_free_cap_rules(struct aa_caps *caps)
 {
diff --git a/security/apparmor/include/context.h b/security/apparmor/include/context.h
index 5b18fed..6ae07e9 100644
--- a/security/apparmor/include/context.h
+++ b/security/apparmor/include/context.h
@@ -19,60 +19,28 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 
-#include "policy.h"
+#include "label.h"
 #include "policy_ns.h"
 
 #define cred_ctx(X) ((X)->security)
 #define current_ctx() cred_ctx(current_cred())
 
-/* struct aa_file_ctx - the AppArmor context the file was opened in
- * @perms: the permission the file was opened with
- *
- * The file_ctx could currently be directly stored in file->f_security
- * as the profile reference is now stored in the f_cred.  However the
- * ctx struct will expand in the future so we keep the struct.
- */
-struct aa_file_ctx {
-	u16 allow;
-};
-
-/**
- * aa_alloc_file_context - allocate file_ctx
- * @gfp: gfp flags for allocation
- *
- * Returns: file_ctx or NULL on failure
- */
-static inline struct aa_file_ctx *aa_alloc_file_context(gfp_t gfp)
-{
-	return kzalloc(sizeof(struct aa_file_ctx), gfp);
-}
-
-/**
- * aa_free_file_context - free a file_ctx
- * @ctx: file_ctx to free  (MAYBE_NULL)
- */
-static inline void aa_free_file_context(struct aa_file_ctx *ctx)
-{
-	if (ctx)
-		kzfree(ctx);
-}
-
 /**
  * struct aa_task_ctx - primary label for confined tasks
- * @profile: the current profile   (NOT NULL)
- * @exec: profile to transition to on next exec  (MAYBE NULL)
- * @previous: profile the task may return to     (MAYBE NULL)
- * @token: magic value the task must know for returning to @previous_profile
+ * @label: the current label   (NOT NULL)
+ * @exec: label to transition to on next exec  (MAYBE NULL)
+ * @previous: label the task may return to     (MAYBE NULL)
+ * @token: magic value the task must know for returning to @previous
  *
- * Contains the task's current profile (which could change due to
+ * Contains the task's current label (which could change due to
  * change_hat).  Plus the hat_magic needed during change_hat.
  *
  * TODO: make so a task can be confined by a stack of contexts
  */
 struct aa_task_ctx {
-	struct aa_profile *profile;
-	struct aa_profile *onexec;
-	struct aa_profile *previous;
+	struct aa_label *label;
+	struct aa_label *onexec;
+	struct aa_label *previous;
 	u64 token;
 };
 
@@ -80,40 +48,51 @@ struct aa_task_ctx *aa_alloc_task_context(gfp_t flags);
 void aa_free_task_context(struct aa_task_ctx *ctx);
 void aa_dup_task_context(struct aa_task_ctx *new,
 			 const struct aa_task_ctx *old);
-int aa_replace_current_profile(struct aa_profile *profile);
-int aa_set_current_onexec(struct aa_profile *profile);
-int aa_set_current_hat(struct aa_profile *profile, u64 token);
-int aa_restore_previous_profile(u64 cookie);
-struct aa_profile *aa_get_task_profile(struct task_struct *task);
+int aa_replace_current_label(struct aa_label *label);
+int aa_set_current_onexec(struct aa_label *label, bool stack);
+int aa_set_current_hat(struct aa_label *label, u64 token);
+int aa_restore_previous_label(u64 cookie);
+struct aa_label *aa_get_task_label(struct task_struct *task);
 
 
 /**
- * aa_cred_profile - obtain cred's profiles
- * @cred: cred to obtain profiles from  (NOT NULL)
+ * aa_cred_raw_label - obtain cred's label
+ * @cred: cred to obtain label from  (NOT NULL)
  *
- * Returns: confining profile
+ * Returns: confining label
  *
  * does NOT increment reference count
  */
-static inline struct aa_profile *aa_cred_profile(const struct cred *cred)
+static inline struct aa_label *aa_cred_raw_label(const struct cred *cred)
 {
 	struct aa_task_ctx *ctx = cred_ctx(cred);
 
-	AA_BUG(!ctx || !ctx->profile);
-	return ctx->profile;
+	AA_BUG(!ctx || !ctx->label);
+	return ctx->label;
 }
 
 /**
- * __aa_task_profile - retrieve another task's profile
+ * aa_get_newest_cred_label - obtain the newest label on a cred
+ * @cred: cred to obtain label from (NOT NULL)
+ *
+ * Returns: newest version of confining label
+ */
+static inline struct aa_label *aa_get_newest_cred_label(const struct cred *cred)
+{
+	return aa_get_newest_label(aa_cred_raw_label(cred));
+}
+
+/**
+ * __aa_task_raw_label - retrieve another task's label
  * @task: task to query  (NOT NULL)
  *
- * Returns: @task's profile without incrementing its ref count
+ * Returns: @task's label without incrementing its ref count
  *
  * If @task != current needs to be called in RCU safe critical section
  */
-static inline struct aa_profile *__aa_task_profile(struct task_struct *task)
+static inline struct aa_label *__aa_task_raw_label(struct task_struct *task)
 {
-	return aa_cred_profile(__task_cred(task));
+	return aa_cred_raw_label(__task_cred(task));
 }
 
 /**
@@ -124,50 +103,114 @@ static inline struct aa_profile *__aa_task_profile(struct task_struct *task)
  */
 static inline bool __aa_task_is_confined(struct task_struct *task)
 {
-	return !unconfined(__aa_task_profile(task));
+	return !unconfined(__aa_task_raw_label(task));
 }
 
 /**
- * __aa_current_profile - find the current tasks confining profile
+ * aa_current_raw_label - find the current tasks confining label
  *
- * Returns: up to date confining profile or the ns unconfined profile (NOT NULL)
+ * Returns: up to date confining label or the ns unconfined label (NOT NULL)
  *
  * This fn will not update the tasks cred to the most up to date version
- * of the profile so it is safe to call when inside of locks.
+ * of the label so it is safe to call when inside of locks.
  */
-static inline struct aa_profile *__aa_current_profile(void)
+static inline struct aa_label *aa_current_raw_label(void)
 {
-	return aa_cred_profile(current_cred());
+	return aa_cred_raw_label(current_cred());
 }
 
 /**
- * aa_current_profile - find the current tasks confining profile and do updates
+ * aa_get_current_label - get the newest version of the current tasks label
  *
- * Returns: up to date confining profile or the ns unconfined profile (NOT NULL)
+ * Returns: newest version of confining label (NOT NULL)
  *
- * This fn will update the tasks cred structure if the profile has been
- * replaced.  Not safe to call inside locks
+ * This fn will not update the tasks cred, so it is safe inside of locks
+ *
+ * The returned reference must be put with aa_put_label()
  */
-static inline struct aa_profile *aa_current_profile(void)
+static inline struct aa_label *aa_get_current_label(void)
 {
-	const struct aa_task_ctx *ctx = current_ctx();
-	struct aa_profile *profile;
+	struct aa_label *l = aa_current_raw_label();
 
-	AA_BUG(!ctx || !ctx->profile);
+	if (label_is_stale(l))
+		return aa_get_newest_label(l);
+	return aa_get_label(l);
+}
 
-	if (profile_is_stale(ctx->profile)) {
-		profile = aa_get_newest_profile(ctx->profile);
-		aa_replace_current_profile(profile);
-		aa_put_profile(profile);
-		ctx = current_ctx();
+#define __end_current_label_crit_section(X) end_current_label_crit_section(X)
+
+/**
+ * end_label_crit_section - put a reference found with begin_current_label..
+ * @label: label reference to put
+ *
+ * Should only be used with a reference obtained with
+ * begin_current_label_crit_section and never used in situations where the
+ * task cred may be updated
+ */
+static inline void end_current_label_crit_section(struct aa_label *label)
+{
+	if (label != aa_current_raw_label())
+		aa_put_label(label);
+}
+
+/**
+ * __begin_current_label_crit_section - current's confining label
+ *
+ * Returns: up to date confining label or the ns unconfined label (NOT NULL)
+ *
+ * safe to call inside locks
+ *
+ * The returned reference must be put with __end_current_label_crit_section()
+ * This must NOT be used if the task cred could be updated within the
+ * critical section between __begin_current_label_crit_section() ..
+ * __end_current_label_crit_section()
+ */
+static inline struct aa_label *__begin_current_label_crit_section(void)
+{
+	struct aa_label *label = aa_current_raw_label();
+
+	if (label_is_stale(label))
+		label = aa_get_newest_label(label);
+
+	return label;
+}
+
+/**
+ * begin_current_label_crit_section - current's confining label and update it
+ *
+ * Returns: up to date confining label or the ns unconfined label (NOT NULL)
+ *
+ * Not safe to call inside locks
+ *
+ * The returned reference must be put with end_current_label_crit_section()
+ * This must NOT be used if the task cred could be updated within the
+ * critical section between begin_current_label_crit_section() ..
+ * end_current_label_crit_section()
+ */
+static inline struct aa_label *begin_current_label_crit_section(void)
+{
+	struct aa_label *label = aa_current_raw_label();
+
+	if (label_is_stale(label)) {
+		label = aa_get_newest_label(label);
+		if (aa_replace_current_label(label) == 0)
+			/* task cred will keep the reference */
+			aa_put_label(label);
 	}
 
-	return ctx->profile;
+	return label;
 }
 
 static inline struct aa_ns *aa_get_current_ns(void)
 {
-	return aa_get_ns(__aa_current_profile()->ns);
+	struct aa_label *label;
+	struct aa_ns *ns;
+
+	label  = __begin_current_label_crit_section();
+	ns = aa_get_ns(labels_ns(label));
+	__end_current_label_crit_section(label);
+
+	return ns;
 }
 
 /**
@@ -176,8 +219,8 @@ static inline struct aa_ns *aa_get_current_ns(void)
  */
 static inline void aa_clear_task_ctx_trans(struct aa_task_ctx *ctx)
 {
-	aa_put_profile(ctx->previous);
-	aa_put_profile(ctx->onexec);
+	aa_put_label(ctx->previous);
+	aa_put_label(ctx->onexec);
 	ctx->previous = NULL;
 	ctx->onexec = NULL;
 	ctx->token = 0;
diff --git a/security/apparmor/include/domain.h b/security/apparmor/include/domain.h
index 3054472..bab5810 100644
--- a/security/apparmor/include/domain.h
+++ b/security/apparmor/include/domain.h
@@ -23,14 +23,17 @@ struct aa_domain {
 	char **table;
 };
 
+#define AA_CHANGE_NOFLAGS 0
+#define AA_CHANGE_TEST 1
+#define AA_CHANGE_CHILD 2
+#define AA_CHANGE_ONEXEC  4
+#define AA_CHANGE_STACK 8
+
 int apparmor_bprm_set_creds(struct linux_binprm *bprm);
 int apparmor_bprm_secureexec(struct linux_binprm *bprm);
-void apparmor_bprm_committing_creds(struct linux_binprm *bprm);
-void apparmor_bprm_committed_creds(struct linux_binprm *bprm);
 
 void aa_free_domain_entries(struct aa_domain *domain);
-int aa_change_hat(const char *hats[], int count, u64 token, bool permtest);
-int aa_change_profile(const char *fqname, bool onexec, bool permtest,
-		      bool stack);
+int aa_change_hat(const char *hats[], int count, u64 token, int flags);
+int aa_change_profile(const char *fqname, int flags);
 
 #endif /* __AA_DOMAIN_H */
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index 38f821b..001e400 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -15,38 +15,73 @@
 #ifndef __AA_FILE_H
 #define __AA_FILE_H
 
+#include <linux/spinlock.h>
+
 #include "domain.h"
 #include "match.h"
+#include "perms.h"
 
 struct aa_profile;
 struct path;
 
-/*
- * We use MAY_EXEC, MAY_WRITE, MAY_READ, MAY_APPEND and the following flags
- * for profile permissions
- */
-#define AA_MAY_CREATE                  0x0010
-#define AA_MAY_DELETE                  0x0020
-#define AA_MAY_META_WRITE              0x0040
-#define AA_MAY_META_READ               0x0080
-
-#define AA_MAY_CHMOD                   0x0100
-#define AA_MAY_CHOWN                   0x0200
-#define AA_MAY_LOCK                    0x0400
-#define AA_EXEC_MMAP                   0x0800
-
-#define AA_MAY_LINK			0x1000
-#define AA_LINK_SUBSET			AA_MAY_LOCK	/* overlaid */
-#define AA_MAY_ONEXEC			0x40000000	/* exec allows onexec */
-#define AA_MAY_CHANGE_PROFILE		0x80000000
-#define AA_MAY_CHANGEHAT		0x80000000	/* ctrl auditing only */
+#define mask_mode_t(X) (X & (MAY_EXEC | MAY_WRITE | MAY_READ | MAY_APPEND))
 
 #define AA_AUDIT_FILE_MASK	(MAY_READ | MAY_WRITE | MAY_EXEC | MAY_APPEND |\
 				 AA_MAY_CREATE | AA_MAY_DELETE |	\
-				 AA_MAY_META_READ | AA_MAY_META_WRITE | \
+				 AA_MAY_GETATTR | AA_MAY_SETATTR | \
 				 AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_LOCK | \
 				 AA_EXEC_MMAP | AA_MAY_LINK)
 
+#define file_ctx(X) ((struct aa_file_ctx *)(X)->f_security)
+
+/* struct aa_file_ctx - the AppArmor context the file was opened in
+ * @lock: lock to update the ctx
+ * @label: label currently cached on the ctx
+ * @perms: the permission the file was opened with
+ */
+struct aa_file_ctx {
+	spinlock_t lock;
+	struct aa_label __rcu *label;
+	u32 allow;
+};
+
+/**
+ * aa_alloc_file_ctx - allocate file_ctx
+ * @label: initial label of task creating the file
+ * @gfp: gfp flags for allocation
+ *
+ * Returns: file_ctx or NULL on failure
+ */
+static inline struct aa_file_ctx *aa_alloc_file_ctx(struct aa_label *label,
+						    gfp_t gfp)
+{
+	struct aa_file_ctx *ctx;
+
+	ctx = kzalloc(sizeof(struct aa_file_ctx), gfp);
+	if (ctx) {
+		spin_lock_init(&ctx->lock);
+		rcu_assign_pointer(ctx->label, aa_get_label(label));
+	}
+	return ctx;
+}
+
+/**
+ * aa_free_file_ctx - free a file_ctx
+ * @ctx: file_ctx to free  (MAYBE_NULL)
+ */
+static inline void aa_free_file_ctx(struct aa_file_ctx *ctx)
+{
+	if (ctx) {
+		aa_put_label(rcu_access_pointer(ctx->label));
+		kzfree(ctx);
+	}
+}
+
+static inline struct aa_label *aa_get_file_label(struct aa_file_ctx *ctx)
+{
+	return aa_get_label_rcu(&ctx->label);
+}
+
 /*
  * The xindex is broken into 3 parts
  * - index - an index into either the exec name table or the variable table
@@ -75,25 +110,6 @@ struct path_cond {
 	umode_t mode;
 };
 
-/* struct file_perms - file permission
- * @allow: mask of permissions that are allowed
- * @audit: mask of permissions to force an audit message for
- * @quiet: mask of permissions to quiet audit messages for
- * @kill: mask of permissions that when matched will kill the task
- * @xindex: exec transition index if @allow contains MAY_EXEC
- *
- * The @audit and @queit mask should be mutually exclusive.
- */
-struct file_perms {
-	u32 allow;
-	u32 audit;
-	u32 quiet;
-	u32 kill;
-	u16 xindex;
-};
-
-extern struct file_perms nullperms;
-
 #define COMBINED_PERM_MASK(X) ((X).allow | (X).audit | (X).quiet | (X).kill)
 
 /* FIXME: split perms from dfa and match this to description
@@ -144,9 +160,10 @@ static inline u16 dfa_map_xindex(u16 mask)
 #define dfa_other_xindex(dfa, state) \
 	dfa_map_xindex((ACCEPT_TABLE(dfa)[state] >> 14) & 0x3fff)
 
-int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
+int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
 		  const char *op, u32 request, const char *name,
-		  const char *target, kuid_t ouid, const char *info, int error);
+		  const char *target, struct aa_label *tlabel, kuid_t ouid,
+		  const char *info, int error);
 
 /**
  * struct aa_file_rules - components used for file rule permissions
@@ -167,20 +184,27 @@ struct aa_file_rules {
 	/* TODO: add delegate table */
 };
 
+struct aa_perms aa_compute_fperms(struct aa_dfa *dfa, unsigned int state,
+				    struct path_cond *cond);
 unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
 			  const char *name, struct path_cond *cond,
-			  struct file_perms *perms);
+			  struct aa_perms *perms);
 
-int aa_path_perm(const char *op, struct aa_profile *profile,
+int __aa_path_perm(const char *op, struct aa_profile *profile,
+		   const char *name, u32 request, struct path_cond *cond,
+		   int flags, struct aa_perms *perms);
+int aa_path_perm(const char *op, struct aa_label *label,
 		 const struct path *path, int flags, u32 request,
 		 struct path_cond *cond);
 
-int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
+int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
 		 const struct path *new_dir, struct dentry *new_dentry);
 
-int aa_file_perm(const char *op, struct aa_profile *profile, struct file *file,
+int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
 		 u32 request);
 
+void aa_inherit_files(const struct cred *cred, struct files_struct *files);
+
 static inline void aa_free_file_rules(struct aa_file_rules *rules)
 {
 	aa_put_dfa(rules->dfa);
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
index 288ca76..656fdb8 100644
--- a/security/apparmor/include/ipc.h
+++ b/security/apparmor/include/ipc.h
@@ -4,7 +4,7 @@
  * This file contains AppArmor ipc mediation function definitions.
  *
  * Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2010 Canonical Ltd.
+ * Copyright 2009-2017 Canonical Ltd.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -19,10 +19,16 @@
 
 struct aa_profile;
 
-int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
-		  unsigned int mode);
+#define AA_PTRACE_TRACE		MAY_WRITE
+#define AA_PTRACE_READ		MAY_READ
+#define AA_MAY_BE_TRACED	AA_MAY_APPEND
+#define AA_MAY_BE_READ		AA_MAY_CREATE
+#define PTRACE_PERM_SHIFT	2
 
-int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
-	      unsigned int mode);
+#define AA_PTRACE_PERM_MASK (AA_PTRACE_READ | AA_PTRACE_TRACE | \
+			     AA_MAY_BE_READ | AA_MAY_BE_TRACED)
+
+int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+		  u32 request);
 
 #endif /* __AA_IPC_H */
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h
new file mode 100644
index 0000000..9a283b7
--- /dev/null
+++ b/security/apparmor/include/label.h
@@ -0,0 +1,441 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor label definitions
+ *
+ * Copyright 2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_LABEL_H
+#define __AA_LABEL_H
+
+#include <linux/atomic.h>
+#include <linux/audit.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+
+#include "apparmor.h"
+#include "lib.h"
+
+struct aa_ns;
+
+#define LOCAL_VEC_ENTRIES 8
+#define DEFINE_VEC(T, V)						\
+	struct aa_ ## T *(_ ## V ## _localtmp)[LOCAL_VEC_ENTRIES];	\
+	struct aa_ ## T **(V)
+
+#define vec_setup(T, V, N, GFP)						\
+({									\
+	if ((N) <= LOCAL_VEC_ENTRIES) {					\
+		typeof(N) i;						\
+		(V) = (_ ## V ## _localtmp);				\
+		for (i = 0; i < (N); i++)				\
+			(V)[i] = NULL;					\
+	} else								\
+		(V) = kzalloc(sizeof(struct aa_ ## T *) * (N), (GFP));	\
+	(V) ? 0 : -ENOMEM;						\
+})
+
+#define vec_cleanup(T, V, N)						\
+do {									\
+	int i;								\
+	for (i = 0; i < (N); i++) {					\
+		if (!IS_ERR_OR_NULL((V)[i]))				\
+			aa_put_ ## T((V)[i]);				\
+	}								\
+	if ((V) != _ ## V ## _localtmp)					\
+		kfree(V);						\
+} while (0)
+
+#define vec_last(VEC, SIZE) ((VEC)[(SIZE) - 1])
+#define vec_ns(VEC, SIZE) (vec_last((VEC), (SIZE))->ns)
+#define vec_labelset(VEC, SIZE) (&vec_ns((VEC), (SIZE))->labels)
+#define cleanup_domain_vec(V, L) cleanup_label_vec((V), (L)->size)
+
+struct aa_profile;
+#define VEC_FLAG_TERMINATE 1
+int aa_vec_unique(struct aa_profile **vec, int n, int flags);
+struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len,
+					     gfp_t gfp);
+#define aa_sort_and_merge_vec(N, V) \
+	aa_sort_and_merge_profiles((N), (struct aa_profile **)(V))
+
+
+/* struct aa_labelset - set of labels for a namespace
+ *
+ * Labels are reference counted; aa_labelset does not contribute to label
+ * reference counts. Once a label's last refcount is put it is removed from
+ * the set.
+ */
+struct aa_labelset {
+	rwlock_t lock;
+
+	struct rb_root root;
+};
+
+#define __labelset_for_each(LS, N) \
+	for ((N) = rb_first(&(LS)->root); (N); (N) = rb_next(N))
+
+void aa_labelset_destroy(struct aa_labelset *ls);
+void aa_labelset_init(struct aa_labelset *ls);
+
+
+enum label_flags {
+	FLAG_HAT = 1,			/* profile is a hat */
+	FLAG_UNCONFINED = 2,		/* label unconfined only if all */
+	FLAG_NULL = 4,			/* profile is null learning profile */
+	FLAG_IX_ON_NAME_ERROR = 8,	/* fallback to ix on name lookup fail */
+	FLAG_IMMUTIBLE = 0x10,		/* don't allow changes/replacement */
+	FLAG_USER_DEFINED = 0x20,	/* user based profile - lower privs */
+	FLAG_NO_LIST_REF = 0x40,	/* list doesn't keep profile ref */
+	FLAG_NS_COUNT = 0x80,		/* carries NS ref count */
+	FLAG_IN_TREE = 0x100,		/* label is in tree */
+	FLAG_PROFILE = 0x200,		/* label is a profile */
+	FLAG_EXPLICIT = 0x400,		/* explicit static label */
+	FLAG_STALE = 0x800,		/* replaced/removed */
+	FLAG_RENAMED = 0x1000,		/* label has renaming in it */
+	FLAG_REVOKED = 0x2000,		/* label has revocation in it */
+
+	/* These flags must correspond with PATH_flags */
+	/* TODO: add new path flags */
+};
+
+struct aa_label;
+struct aa_proxy {
+	struct kref count;
+	struct aa_label __rcu *label;
+};
+
+struct label_it {
+	int i, j;
+};
+
+/* struct aa_label - lazy labeling struct
+ * @count: ref count of active users
+ * @node: rbtree position
+ * @rcu: rcu callback struct
+ * @proxy: is set to the label that replaced this label
+ * @hname: text representation of the label (MAYBE_NULL)
+ * @flags: stale and other flags - values may change under label set lock
+ * @secid: secid that references this label
+ * @size: number of entries in @ent[]
+ * @ent: set of profiles for label, actual size determined by @size
+ */
+struct aa_label {
+	struct kref count;
+	struct rb_node node;
+	struct rcu_head rcu;
+	struct aa_proxy *proxy;
+	__counted char *hname;
+	long flags;
+	u32 secid;
+	int size;
+	struct aa_profile *vec[];
+};
+
+#define last_error(E, FN)				\
+do {							\
+	int __subE = (FN);				\
+	if (__subE)					\
+		(E) = __subE;				\
+} while (0)
+
+#define label_isprofile(X) ((X)->flags & FLAG_PROFILE)
+#define label_unconfined(X) ((X)->flags & FLAG_UNCONFINED)
+#define unconfined(X) label_unconfined(X)
+#define label_is_stale(X) ((X)->flags & FLAG_STALE)
+#define __label_make_stale(X) ((X)->flags |= FLAG_STALE)
+#define labels_ns(X) (vec_ns(&((X)->vec[0]), (X)->size))
+#define labels_set(X) (&labels_ns(X)->labels)
+#define labels_profile(X) ((X)->vec[(X)->size - 1])
+
+
+int aa_label_next_confined(struct aa_label *l, int i);
+
+/* for each profile in a label */
+#define label_for_each(I, L, P)						\
+	for ((I).i = 0; ((P) = (L)->vec[(I).i]); ++((I).i))
+
+/* assumes break/goto ended label_for_each */
+#define label_for_each_cont(I, L, P)					\
+	for (++((I).i); ((P) = (L)->vec[(I).i]); ++((I).i))
+
+#define next_comb(I, L1, L2)						\
+do {									\
+	(I).j++;							\
+	if ((I).j >= (L2)->size) {					\
+		(I).i++;						\
+		(I).j = 0;						\
+	}								\
+} while (0)
+
+
+/* for each combination of P1 in L1, and P2 in L2 */
+#define label_for_each_comb(I, L1, L2, P1, P2)				\
+for ((I).i = (I).j = 0;							\
+	((P1) = (L1)->vec[(I).i]) && ((P2) = (L2)->vec[(I).j]);		\
+	(I) = next_comb(I, L1, L2))
+
+#define fn_for_each_comb(L1, L2, P1, P2, FN)				\
+({									\
+	struct label_it i;						\
+	int __E = 0;							\
+	label_for_each_comb(i, (L1), (L2), (P1), (P2)) {		\
+		last_error(__E, (FN));					\
+	}								\
+	__E;								\
+})
+
+/* for each profile that is enforcing confinement in a label */
+#define label_for_each_confined(I, L, P)				\
+	for ((I).i = aa_label_next_confined((L), 0);			\
+	     ((P) = (L)->vec[(I).i]);					\
+	     (I).i = aa_label_next_confined((L), (I).i + 1))
+
+#define label_for_each_in_merge(I, A, B, P)				\
+	for ((I).i = (I).j = 0;						\
+	     ((P) = aa_label_next_in_merge(&(I), (A), (B)));		\
+	     )
+
+#define label_for_each_not_in_set(I, SET, SUB, P)			\
+	for ((I).i = (I).j = 0;						\
+	     ((P) = __aa_label_next_not_in_set(&(I), (SET), (SUB)));	\
+	     )
+
+#define next_in_ns(i, NS, L)						\
+({									\
+	typeof(i) ___i = (i);						\
+	while ((L)->vec[___i] && (L)->vec[___i]->ns != (NS))		\
+		(___i)++;						\
+	(___i);								\
+})
+
+#define label_for_each_in_ns(I, NS, L, P)				\
+	for ((I).i = next_in_ns(0, (NS), (L));				\
+	     ((P) = (L)->vec[(I).i]);					\
+	     (I).i = next_in_ns((I).i + 1, (NS), (L)))
+
+#define fn_for_each_in_ns(L, P, FN)					\
+({									\
+	struct label_it __i;						\
+	struct aa_ns *__ns = labels_ns(L);				\
+	int __E = 0;							\
+	label_for_each_in_ns(__i, __ns, (L), (P)) {			\
+		last_error(__E, (FN));					\
+	}								\
+	__E;								\
+})
+
+
+#define fn_for_each_XXX(L, P, FN, ...)					\
+({									\
+	struct label_it i;						\
+	int __E = 0;							\
+	label_for_each ## __VA_ARGS__(i, (L), (P)) {			\
+		last_error(__E, (FN));					\
+	}								\
+	__E;								\
+})
+
+#define fn_for_each(L, P, FN) fn_for_each_XXX(L, P, FN)
+#define fn_for_each_confined(L, P, FN) fn_for_each_XXX(L, P, FN, _confined)
+
+#define fn_for_each2_XXX(L1, L2, P, FN, ...)				\
+({									\
+	struct label_it i;						\
+	int __E = 0;							\
+	label_for_each ## __VA_ARGS__(i, (L1), (L2), (P)) {		\
+		last_error(__E, (FN));					\
+	}								\
+	__E;								\
+})
+
+#define fn_for_each_in_merge(L1, L2, P, FN)				\
+	fn_for_each2_XXX((L1), (L2), P, FN, _in_merge)
+#define fn_for_each_not_in_set(L1, L2, P, FN)				\
+	fn_for_each2_XXX((L1), (L2), P, FN, _not_in_set)
+
+#define LABEL_MEDIATES(L, C)						\
+({									\
+	struct aa_profile *profile;					\
+	struct label_it i;						\
+	int ret = 0;							\
+	label_for_each(i, (L), profile) {				\
+		if (PROFILE_MEDIATES(profile, (C))) {			\
+			ret = 1;					\
+			break;						\
+		}							\
+	}								\
+	ret;								\
+})
+
+
+void aa_labelset_destroy(struct aa_labelset *ls);
+void aa_labelset_init(struct aa_labelset *ls);
+void __aa_labelset_update_subtree(struct aa_ns *ns);
+
+void aa_label_free(struct aa_label *label);
+void aa_label_kref(struct kref *kref);
+bool aa_label_init(struct aa_label *label, int size);
+struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
+
+bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub);
+struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
+					     struct aa_label *set,
+					     struct aa_label *sub);
+bool aa_label_remove(struct aa_label *label);
+struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *l);
+bool aa_label_replace(struct aa_label *old, struct aa_label *new);
+bool aa_label_make_newest(struct aa_labelset *ls, struct aa_label *old,
+			  struct aa_label *new);
+
+struct aa_label *aa_label_find(struct aa_label *l);
+
+struct aa_profile *aa_label_next_in_merge(struct label_it *I,
+					  struct aa_label *a,
+					  struct aa_label *b);
+struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b);
+struct aa_label *aa_label_merge(struct aa_label *a, struct aa_label *b,
+				gfp_t gfp);
+
+
+bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp);
+
+#define FLAGS_NONE 0
+#define FLAG_SHOW_MODE 1
+#define FLAG_VIEW_SUBNS 2
+#define FLAG_HIDDEN_UNCONFINED 4
+int aa_label_snxprint(char *str, size_t size, struct aa_ns *view,
+		      struct aa_label *label, int flags);
+int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
+		      int flags, gfp_t gfp);
+int aa_label_acntsxprint(char __counted **strp, struct aa_ns *ns,
+			 struct aa_label *label, int flags, gfp_t gfp);
+void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
+		     struct aa_label *label, int flags, gfp_t gfp);
+void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
+			 struct aa_label *label, int flags, gfp_t gfp);
+void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
+		      gfp_t gfp);
+void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp);
+void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp);
+void aa_label_printk(struct aa_label *label, gfp_t gfp);
+
+struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
+				gfp_t gfp, bool create, bool force_stack);
+
+
+struct aa_perms;
+int aa_label_match(struct aa_profile *profile, struct aa_label *label,
+		   unsigned int state, bool subns, u32 request,
+		   struct aa_perms *perms);
+
+
+/**
+ * __aa_get_label - get a reference count to uncounted label reference
+ * @l: reference to get a count on
+ *
+ * Returns: pointer to reference OR NULL if race is lost and reference is
+ *          being repeated.
+ * Requires: lock held, and the return code MUST be checked
+ */
+static inline struct aa_label *__aa_get_label(struct aa_label *l)
+{
+	if (l && kref_get_unless_zero(&l->count))
+		return l;
+
+	return NULL;
+}
+
+static inline struct aa_label *aa_get_label(struct aa_label *l)
+{
+	if (l)
+		kref_get(&(l->count));
+
+	return l;
+}
+
+
+/**
+ * aa_get_label_rcu - increment refcount on a label that can be replaced
+ * @l: pointer to label that can be replaced (NOT NULL)
+ *
+ * Returns: pointer to a refcounted label.
+ *     else NULL if no label
+ */
+static inline struct aa_label *aa_get_label_rcu(struct aa_label __rcu **l)
+{
+	struct aa_label *c;
+
+	rcu_read_lock();
+	do {
+		c = rcu_dereference(*l);
+	} while (c && !kref_get_unless_zero(&c->count));
+	rcu_read_unlock();
+
+	return c;
+}
+
+/**
+ * aa_get_newest_label - find the newest version of @l
+ * @l: the label to check for newer versions of
+ *
+ * Returns: refcounted newest version of @l taking into account
+ *          replacement, renames and removals
+ *          return @l.
+ */
+static inline struct aa_label *aa_get_newest_label(struct aa_label *l)
+{
+	if (!l)
+		return NULL;
+
+	if (label_is_stale(l)) {
+		struct aa_label *tmp;
+
+		AA_BUG(!l->proxy);
+		AA_BUG(!l->proxy->label);
+		/* BUG: only way this can happen is @l ref count and its
+		 * replacement count have gone to 0 and are on their way
+		 * to destruction. ie. we have a refcounting error
+		 */
+		tmp = aa_get_label_rcu(&l->proxy->label);
+		AA_BUG(!tmp);
+
+		return tmp;
+	}
+
+	return aa_get_label(l);
+}
+
+static inline void aa_put_label(struct aa_label *l)
+{
+	if (l)
+		kref_put(&l->count, aa_label_kref);
+}
+
+
+struct aa_proxy *aa_alloc_proxy(struct aa_label *l, gfp_t gfp);
+void aa_proxy_kref(struct kref *kref);
+
+static inline struct aa_proxy *aa_get_proxy(struct aa_proxy *proxy)
+{
+	if (proxy)
+		kref_get(&(proxy->count));
+
+	return proxy;
+}
+
+static inline void aa_put_proxy(struct aa_proxy *proxy)
+{
+	if (proxy)
+		kref_put(&proxy->count, aa_proxy_kref);
+}
+
+void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new);
+
+#endif /* __AA_LABEL_H */
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 550a700..436b3a7 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -60,6 +60,7 @@
 extern int apparmor_initialized;
 
 /* fn's in lib */
+const char *skipn_spaces(const char *str, size_t n);
 char *aa_split_fqname(char *args, char **ns_name);
 const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
 			     size_t *ns_len);
@@ -99,6 +100,36 @@ static inline bool path_mediated_fs(struct dentry *dentry)
 	return !(dentry->d_sb->s_flags & MS_NOUSER);
 }
 
+
+struct counted_str {
+	struct kref count;
+	char name[];
+};
+
+#define str_to_counted(str) \
+	((struct counted_str *)(str - offsetof(struct counted_str, name)))
+
+#define __counted	/* atm just a notation */
+
+void aa_str_kref(struct kref *kref);
+char *aa_str_alloc(int size, gfp_t gfp);
+
+
+static inline __counted char *aa_get_str(__counted char *str)
+{
+	if (str)
+		kref_get(&(str_to_counted(str)->count));
+
+	return str;
+}
+
+static inline void aa_put_str(__counted char *str)
+{
+	if (str)
+		kref_put(&str_to_counted(str)->count, aa_str_kref);
+}
+
+
 /* struct aa_policy - common part of both namespaces and profiles
  * @name: name of the object
  * @hname - The hierarchical name
@@ -107,7 +138,7 @@ static inline bool path_mediated_fs(struct dentry *dentry)
  */
 struct aa_policy {
 	const char *name;
-	const char *hname;
+	__counted char *hname;
 	struct list_head list;
 	struct list_head profiles;
 };
@@ -180,4 +211,89 @@ bool aa_policy_init(struct aa_policy *policy, const char *prefix,
 		    const char *name, gfp_t gfp);
 void aa_policy_destroy(struct aa_policy *policy);
 
-#endif /* AA_LIB_H */
+
+/*
+ * fn_label_build - abstract out the build of a label transition
+ * @L: label the transition is being computed for
+ * @P: profile parameter derived from L by this macro, can be passed to FN
+ * @GFP: memory allocation type to use
+ * @FN: fn to call for each profile transition. @P is set to the profile
+ *
+ * Returns: new label on success
+ *          ERR_PTR if build @FN fails
+ *          NULL if label_build fails due to low memory conditions
+ *
+ * @FN must return a label or ERR_PTR on failure. NULL is not allowed
+ */
+#define fn_label_build(L, P, GFP, FN)					\
+({									\
+	__label__ __cleanup, __done;					\
+	struct aa_label *__new_;					\
+									\
+	if ((L)->size > 1) {						\
+		/* TODO: add cache of transitions already done */	\
+		struct label_it __i;					\
+		int __j, __k, __count;					\
+		DEFINE_VEC(label, __lvec);				\
+		DEFINE_VEC(profile, __pvec);				\
+		if (vec_setup(label, __lvec, (L)->size, (GFP)))	{	\
+			__new_ = NULL;					\
+			goto __done;					\
+		}							\
+		__j = 0;						\
+		label_for_each(__i, (L), (P)) {				\
+			__new_ = (FN);					\
+			AA_BUG(!__new_);				\
+			if (IS_ERR(__new_))				\
+				goto __cleanup;				\
+			__lvec[__j++] = __new_;				\
+		}							\
+		for (__j = __count = 0; __j < (L)->size; __j++)		\
+			__count += __lvec[__j]->size;			\
+		if (!vec_setup(profile, __pvec, __count, (GFP))) {	\
+			for (__j = __k = 0; __j < (L)->size; __j++) {	\
+				label_for_each(__i, __lvec[__j], (P))	\
+					__pvec[__k++] = aa_get_profile(P); \
+			}						\
+			__count -= aa_vec_unique(__pvec, __count, 0);	\
+			if (__count > 1) {				\
+				__new_ = aa_vec_find_or_create_label(__pvec,\
+						     __count, (GFP));	\
+				/* only fails if out of Mem */		\
+				if (!__new_)				\
+					__new_ = NULL;			\
+			} else						\
+				__new_ = aa_get_label(&__pvec[0]->label); \
+			vec_cleanup(profile, __pvec, __count);		\
+		} else							\
+			__new_ = NULL;					\
+__cleanup:								\
+		vec_cleanup(label, __lvec, (L)->size);			\
+	} else {							\
+		(P) = labels_profile(L);				\
+		__new_ = (FN);						\
+	}								\
+__done:									\
+	if (!__new_)							\
+		AA_DEBUG("label build failed\n");			\
+	(__new_);							\
+})
+
+
+#define __fn_build_in_ns(NS, P, NS_FN, OTHER_FN)			\
+({									\
+	struct aa_label *__new;						\
+	if ((P)->ns != (NS))						\
+		__new = (OTHER_FN);					\
+	else								\
+		__new = (NS_FN);					\
+	(__new);							\
+})
+
+#define fn_label_build_in_ns(L, P, GFP, NS_FN, OTHER_FN)		\
+({									\
+	fn_label_build((L), (P), (GFP),					\
+		__fn_build_in_ns(labels_ns(L), (P), (NS_FN), (OTHER_FN))); \
+})
+
+#endif /* __AA_LIB_H */
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
index 0444fdd..05fb330 100644
--- a/security/apparmor/include/path.h
+++ b/security/apparmor/include/path.h
@@ -23,11 +23,12 @@ enum path_flags {
 	PATH_CHROOT_NSCONNECT = 0x10,	/* connect paths that are at ns root */
 
 	PATH_DELEGATE_DELETED = 0x08000, /* delegate deleted files */
-	PATH_MEDIATE_DELETED = 0x10000,	/* mediate deleted paths */
+	PATH_MEDIATE_DELETED = 0x10000,	 /* mediate deleted paths */
 };
 
-int aa_path_name(const struct path *path, int flags, char **buffer,
-		 const char **name, const char **info);
+int aa_path_name(const struct path *path, int flags, char *buffer,
+		 const char **name, const char **info,
+		 const char *disconnected);
 
 #define MAX_PATH_BUFFERS 2
 
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
new file mode 100644
index 0000000..2b27bb7
--- /dev/null
+++ b/security/apparmor/include/perms.h
@@ -0,0 +1,155 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor basic permission sets definitions.
+ *
+ * Copyright 2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_PERM_H
+#define __AA_PERM_H
+
+#include <linux/fs.h>
+#include "label.h"
+
+#define AA_MAY_EXEC		MAY_EXEC
+#define AA_MAY_WRITE		MAY_WRITE
+#define AA_MAY_READ		MAY_READ
+#define AA_MAY_APPEND		MAY_APPEND
+
+#define AA_MAY_CREATE		0x0010
+#define AA_MAY_DELETE		0x0020
+#define AA_MAY_OPEN		0x0040
+#define AA_MAY_RENAME		0x0080		/* pair */
+
+#define AA_MAY_SETATTR		0x0100		/* meta write */
+#define AA_MAY_GETATTR		0x0200		/* meta read */
+#define AA_MAY_SETCRED		0x0400		/* security cred/attr */
+#define AA_MAY_GETCRED		0x0800
+
+#define AA_MAY_CHMOD		0x1000		/* pair */
+#define AA_MAY_CHOWN		0x2000		/* pair */
+#define AA_MAY_CHGRP		0x4000		/* pair */
+#define AA_MAY_LOCK		0x8000		/* LINK_SUBSET overlaid */
+
+#define AA_EXEC_MMAP		0x00010000
+#define AA_MAY_MPROT		0x00020000	/* extend conditions */
+#define AA_MAY_LINK		0x00040000	/* pair */
+#define AA_MAY_SNAPSHOT		0x00080000	/* pair */
+
+#define AA_MAY_DELEGATE
+#define AA_CONT_MATCH		0x08000000
+
+#define AA_MAY_STACK		0x10000000
+#define AA_MAY_ONEXEC		0x20000000 /* either stack or change_profile */
+#define AA_MAY_CHANGE_PROFILE	0x40000000
+#define AA_MAY_CHANGEHAT	0x80000000
+
+#define AA_LINK_SUBSET		AA_MAY_LOCK	/* overlaid */
+
+
+#define PERMS_CHRS_MASK (MAY_READ | MAY_WRITE | AA_MAY_CREATE |		\
+			 AA_MAY_DELETE | AA_MAY_LINK | AA_MAY_LOCK |	\
+			 AA_MAY_EXEC | AA_EXEC_MMAP | AA_MAY_APPEND)
+
+#define PERMS_NAMES_MASK (PERMS_CHRS_MASK | AA_MAY_OPEN | AA_MAY_RENAME |     \
+			  AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_SETCRED | \
+			  AA_MAY_GETCRED | AA_MAY_CHMOD | AA_MAY_CHOWN | \
+			  AA_MAY_CHGRP | AA_MAY_MPROT | AA_MAY_SNAPSHOT | \
+			  AA_MAY_STACK | AA_MAY_ONEXEC |		\
+			  AA_MAY_CHANGE_PROFILE | AA_MAY_CHANGEHAT)
+
+extern const char aa_file_perm_chrs[];
+extern const char *aa_file_perm_names[];
+
+struct aa_perms {
+	u32 allow;
+	u32 audit;	/* set only when allow is set */
+
+	u32 deny;	/* explicit deny, or conflict if allow also set */
+	u32 quiet;	/* set only when ~allow | deny */
+	u32 kill;	/* set only when ~allow | deny */
+	u32 stop;	/* set only when ~allow | deny */
+
+	u32 complain;	/* accumulates only used when ~allow & ~deny */
+	u32 cond;	/* set only when ~allow and ~deny */
+
+	u32 hide;	/* set only when  ~allow | deny */
+	u32 prompt;	/* accumulates only used when ~allow & ~deny */
+
+	/* Reserved:
+	 * u32 subtree;	/ * set only when allow is set * /
+	 */
+	u16 xindex;
+};
+
+#define ALL_PERMS_MASK 0xffffffff
+extern struct aa_perms nullperms;
+extern struct aa_perms allperms;
+
+
+#define xcheck(FN1, FN2)	\
+({				\
+	int e, error = FN1;	\
+	e = FN2;		\
+	if (e)			\
+		error = e;	\
+	error;			\
+})
+
+
+/*
+ * TODO: update for labels pointing to labels instead of profiles
+ * TODO: optimize the walk, currently does subwalk of L2 for each P in L1
+ * gah this doesn't allow for label compound check!!!!
+ */
+#define xcheck_ns_profile_profile(P1, P2, FN, args...)		\
+({								\
+	int ____e = 0;						\
+	if (P1->ns == P2->ns)					\
+		____e = FN((P1), (P2), args);			\
+	(____e);						\
+})
+
+#define xcheck_ns_profile_label(P, L, FN, args...)		\
+({								\
+	struct aa_profile *__p2;				\
+	fn_for_each((L), __p2,					\
+		    xcheck_ns_profile_profile((P), __p2, (FN), args));	\
+})
+
+#define xcheck_ns_labels(L1, L2, FN, args...)			\
+({								\
+	struct aa_profile *__p1;				\
+	fn_for_each((L1), __p1, FN(__p1, (L2), args));		\
+})
+
+/* Do the cross check but applying FN at the profiles level */
+#define xcheck_labels_profiles(L1, L2, FN, args...)		\
+	xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
+
+
+void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
+void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
+void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
+			u32 chrsmask, const char **names, u32 namesmask);
+void aa_apply_modes_to_perms(struct aa_profile *profile,
+			     struct aa_perms *perms);
+void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
+		      struct aa_perms *perms);
+void aa_perms_accum(struct aa_perms *accum, struct aa_perms *addend);
+void aa_perms_accum_raw(struct aa_perms *accum, struct aa_perms *addend);
+void aa_profile_match_label(struct aa_profile *profile, struct aa_label *label,
+			    int type, u32 request, struct aa_perms *perms);
+int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+			  u32 request, int type, u32 *deny,
+			  struct common_audit_data *sa);
+int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+		   u32 request, struct common_audit_data *sa,
+		   void (*cb)(struct audit_buffer *, void *));
+#endif /* __AA_PERM_H */
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index 67bc96a..17fe41a 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -29,6 +29,8 @@
 #include "domain.h"
 #include "file.h"
 #include "lib.h"
+#include "label.h"
+#include "perms.h"
 #include "resource.h"
 
 
@@ -47,9 +49,9 @@ extern const char *const aa_profile_mode_names[];
 
 #define KILL_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_KILL)
 
-#define PROFILE_IS_HAT(_profile) ((_profile)->flags & PFLAG_HAT)
+#define PROFILE_IS_HAT(_profile) ((_profile)->label.flags & FLAG_HAT)
 
-#define profile_is_stale(_profile) ((_profile)->flags & PFLAG_STALE)
+#define profile_is_stale(_profile) (label_is_stale(&(_profile)->label))
 
 #define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2)
 
@@ -66,22 +68,6 @@ enum profile_mode {
 	APPARMOR_UNCONFINED,	/* profile set to unconfined */
 };
 
-enum profile_flags {
-	PFLAG_HAT = 1,			/* profile is a hat */
-	PFLAG_NULL = 4,			/* profile is null learning profile */
-	PFLAG_IX_ON_NAME_ERROR = 8,	/* fallback to ix on name lookup fail */
-	PFLAG_IMMUTABLE = 0x10,		/* don't allow changes/replacement */
-	PFLAG_USER_DEFINED = 0x20,	/* user based profile - lower privs */
-	PFLAG_NO_LIST_REF = 0x40,	/* list doesn't keep profile ref */
-	PFLAG_OLD_NULL_TRANS = 0x100,	/* use // as the null transition */
-	PFLAG_STALE = 0x200,		/* profile replaced/removed */
-	PFLAG_NS_COUNT = 0x400,		/* carries NS ref count */
-
-	/* These flags must correspond with PATH_flags */
-	PFLAG_MEDIATE_DELETED = 0x10000, /* mediate instead delegate deleted */
-};
-
-struct aa_profile;
 
 /* struct aa_policydb - match engine for a policy
  * dfa: dfa pattern match
@@ -94,11 +80,6 @@ struct aa_policydb {
 
 };
 
-struct aa_proxy {
-	struct kref count;
-	struct aa_profile __rcu *profile;
-};
-
 /* struct aa_data - generic data structure
  * key: name for retrieving this data
  * size: size of data in bytes
@@ -115,19 +96,17 @@ struct aa_data {
 
 /* struct aa_profile - basic confinement data
  * @base - base components of the profile (name, refcount, lists, lock ...)
- * @count: reference count of the obj
- * @rcu: rcu head used when removing from @list
+ * @label - label this profile is an extension of
  * @parent: parent of profile
  * @ns: namespace the profile is in
- * @proxy: is set to the profile that replaced this profile
  * @rename: optional profile name that this profile renamed
  * @attach: human readable attachment string
  * @xmatch: optional extended matching for unconfined executables names
  * @xmatch_len: xmatch prefix len, used to determine xmatch priority
  * @audit: the auditing mode of the profile
  * @mode: the enforcement mode of the profile
- * @flags: flags controlling profile behavior
  * @path_flags: flags controlling path generation behavior
+ * @disconnected: what to prepend if attach_disconnected is specified
  * @size: the memory consumed by this profiles rules
  * @policy: general match rules governing policy
  * @file: The set of rules governing basic file access and domain transitions
@@ -143,8 +122,6 @@ struct aa_data {
  * used to determine profile attachment against unconfined tasks.  All other
  * attachments are determined by profile X transition rules.
  *
- * The @proxy struct is write protected by the profile lock.
- *
  * Profiles have a hierarchy where hats and children profiles keep
  * a reference to their parent.
  *
@@ -154,12 +131,9 @@ struct aa_data {
  */
 struct aa_profile {
 	struct aa_policy base;
-	struct kref count;
-	struct rcu_head rcu;
 	struct aa_profile __rcu *parent;
 
 	struct aa_ns *ns;
-	struct aa_proxy *proxy;
 	const char *rename;
 
 	const char *attach;
@@ -167,8 +141,8 @@ struct aa_profile {
 	int xmatch_len;
 	enum audit_mode audit;
 	long mode;
-	long flags;
 	u32 path_flags;
+	const char *disconnected;
 	int size;
 
 	struct aa_policydb policy;
@@ -181,17 +155,24 @@ struct aa_profile {
 	char *dirname;
 	struct dentry *dents[AAFS_PROF_SIZEOF];
 	struct rhashtable *data;
+	struct aa_label label;
 };
 
 extern enum profile_mode aa_g_profile_mode;
 
-void __aa_update_proxy(struct aa_profile *orig, struct aa_profile *new);
+#define AA_MAY_LOAD_POLICY	AA_MAY_APPEND
+#define AA_MAY_REPLACE_POLICY	AA_MAY_WRITE
+#define AA_MAY_REMOVE_POLICY	AA_MAY_DELETE
+
+#define profiles_ns(P) ((P)->ns)
+#define name_is_shared(A, B) ((A)->hname && (A)->hname == (B)->hname)
 
 void aa_add_profile(struct aa_policy *common, struct aa_profile *profile);
 
 
 void aa_free_proxy_kref(struct kref *kref);
-struct aa_profile *aa_alloc_profile(const char *name, gfp_t gfp);
+struct aa_profile *aa_alloc_profile(const char *name, struct aa_proxy *proxy,
+				    gfp_t gfp);
 struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
 				       const char *base, gfp_t gfp);
 void aa_free_profile(struct aa_profile *profile);
@@ -200,21 +181,44 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name);
 struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname,
 				      size_t n);
 struct aa_profile *aa_lookup_profile(struct aa_ns *ns, const char *name);
-struct aa_profile *aa_fqlookupn_profile(struct aa_profile *base,
+struct aa_profile *aa_fqlookupn_profile(struct aa_label *base,
 					const char *fqname, size_t n);
 struct aa_profile *aa_match_profile(struct aa_ns *ns, const char *name);
 
-ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
-			    bool noreplace, struct aa_loaddata *udata);
-ssize_t aa_remove_profiles(struct aa_ns *view, struct aa_profile *profile,
-			    char *name, size_t size);
+ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_label *label,
+			    u32 mask, struct aa_loaddata *udata);
+ssize_t aa_remove_profiles(struct aa_ns *view, struct aa_label *label,
+			   char *name, size_t size);
 void __aa_profile_list_release(struct list_head *head);
 
 #define PROF_ADD 1
 #define PROF_REPLACE 0
 
-#define unconfined(X) ((X)->mode == APPARMOR_UNCONFINED)
+#define profile_unconfined(X) ((X)->mode == APPARMOR_UNCONFINED)
 
+/**
+ * aa_get_newest_profile - simple wrapper fn to wrap the label version
+ * @p: profile (NOT NULL)
+ *
+ * Returns refcount to newest version of the profile (maybe @p)
+ *
+ * Requires: @p must be held with a valid refcount
+ */
+static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
+{
+	return labels_profile(aa_get_newest_label(&p->label));
+}
+
+#define PROFILE_MEDIATES(P, T)  ((P)->policy.start[(T)])
+/* safe version of POLICY_MEDIATES for full range input */
+static inline unsigned int PROFILE_MEDIATES_SAFE(struct aa_profile *profile,
+						 unsigned char class)
+{
+	if (profile->policy.dfa)
+		return aa_dfa_match_len(profile->policy.dfa,
+					profile->policy.start[0], &class, 1);
+	return 0;
+}
 
 /**
  * aa_get_profile - increment refcount on profile @p
@@ -226,7 +230,7 @@ void __aa_profile_list_release(struct list_head *head);
 static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
 {
 	if (p)
-		kref_get(&(p->count));
+		kref_get(&(p->label.count));
 
 	return p;
 }
@@ -240,7 +244,7 @@ static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
  */
 static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p)
 {
-	if (p && kref_get_unless_zero(&p->count))
+	if (p && kref_get_unless_zero(&p->label.count))
 		return p;
 
 	return NULL;
@@ -260,53 +264,20 @@ static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
 	rcu_read_lock();
 	do {
 		c = rcu_dereference(*p);
-	} while (c && !kref_get_unless_zero(&c->count));
+	} while (c && !kref_get_unless_zero(&c->label.count));
 	rcu_read_unlock();
 
 	return c;
 }
 
 /**
- * aa_get_newest_profile - find the newest version of @profile
- * @profile: the profile to check for newer versions of
- *
- * Returns: refcounted newest version of @profile taking into account
- *          replacement, renames and removals
- *          return @profile.
- */
-static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
-{
-	if (!p)
-		return NULL;
-
-	if (profile_is_stale(p))
-		return aa_get_profile_rcu(&p->proxy->profile);
-
-	return aa_get_profile(p);
-}
-
-/**
  * aa_put_profile - decrement refcount on profile @p
  * @p: profile  (MAYBE NULL)
  */
 static inline void aa_put_profile(struct aa_profile *p)
 {
 	if (p)
-		kref_put(&p->count, aa_free_profile_kref);
-}
-
-static inline struct aa_proxy *aa_get_proxy(struct aa_proxy *p)
-{
-	if (p)
-		kref_get(&(p->count));
-
-	return p;
-}
-
-static inline void aa_put_proxy(struct aa_proxy *p)
-{
-	if (p)
-		kref_put(&p->count, aa_free_proxy_kref);
+		kref_put(&p->label.count, aa_label_kref);
 }
 
 static inline int AUDIT_MODE(struct aa_profile *profile)
@@ -319,7 +290,7 @@ static inline int AUDIT_MODE(struct aa_profile *profile)
 
 bool policy_view_capable(struct aa_ns *ns);
 bool policy_admin_capable(struct aa_ns *ns);
-int aa_may_manage_policy(struct aa_profile *profile, struct aa_ns *ns,
-			 const char *op);
+int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns,
+			 u32 mask);
 
 #endif /* __AA_POLICY_H */
diff --git a/security/apparmor/include/policy_ns.h b/security/apparmor/include/policy_ns.h
index 89cffdd..9605f18 100644
--- a/security/apparmor/include/policy_ns.h
+++ b/security/apparmor/include/policy_ns.h
@@ -19,6 +19,7 @@
 
 #include "apparmor.h"
 #include "apparmorfs.h"
+#include "label.h"
 #include "policy.h"
 
 
@@ -68,6 +69,11 @@ struct aa_ns {
 	atomic_t uniq_null;
 	long uniq_id;
 	int level;
+	long revision;
+	wait_queue_head_t wait;
+
+	struct aa_labelset labels;
+	struct list_head rawdata_list;
 
 	struct dentry *dents[AAFS_NS_SIZEOF];
 };
@@ -76,6 +82,8 @@ extern struct aa_ns *root_ns;
 
 extern const char *aa_hidden_ns_name;
 
+#define ns_unconfined(NS) (&(NS)->unconfined->label)
+
 bool aa_ns_visible(struct aa_ns *curr, struct aa_ns *view, bool subns);
 const char *aa_ns_name(struct aa_ns *parent, struct aa_ns *child, bool subns);
 void aa_free_ns(struct aa_ns *ns);
@@ -85,6 +93,8 @@ void aa_free_ns_kref(struct kref *kref);
 
 struct aa_ns *aa_find_ns(struct aa_ns *root, const char *name);
 struct aa_ns *aa_findn_ns(struct aa_ns *root, const char *name, size_t n);
+struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n);
+struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n);
 struct aa_ns *__aa_find_or_create_ns(struct aa_ns *parent, const char *name,
 				     struct dentry *dir);
 struct aa_ns *aa_prepare_ns(struct aa_ns *root, const char *name);
@@ -144,4 +154,15 @@ static inline struct aa_ns *__aa_find_ns(struct list_head *head,
 	return __aa_findn_ns(head, name, strlen(name));
 }
 
+static inline struct aa_ns *__aa_lookup_ns(struct aa_ns *base,
+					   const char *hname)
+{
+	return __aa_lookupn_ns(base, hname, strlen(hname));
+}
+
+static inline struct aa_ns *aa_lookup_ns(struct aa_ns *view, const char *name)
+{
+	return aa_lookupn_ns(view, name, strlen(name));
+}
+
 #endif /* AA_NAMESPACE_H */
diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
index 4c1319e..be6cd69 100644
--- a/security/apparmor/include/policy_unpack.h
+++ b/security/apparmor/include/policy_unpack.h
@@ -17,6 +17,8 @@
 
 #include <linux/list.h>
 #include <linux/kref.h>
+#include <linux/dcache.h>
+#include <linux/workqueue.h>
 
 struct aa_load_ent {
 	struct list_head list;
@@ -36,26 +38,84 @@ struct aa_load_ent *aa_load_ent_alloc(void);
 #define PACKED_MODE_KILL	2
 #define PACKED_MODE_UNCONFINED	3
 
-/* struct aa_loaddata - buffer of policy load data set */
+struct aa_ns;
+
+enum {
+	AAFS_LOADDATA_ABI = 0,
+	AAFS_LOADDATA_REVISION,
+	AAFS_LOADDATA_HASH,
+	AAFS_LOADDATA_DATA,
+	AAFS_LOADDATA_DIR,		/* must be last actual entry */
+	AAFS_LOADDATA_NDENTS		/* count of entries */
+};
+
+/*
+ * struct aa_loaddata - buffer of policy raw_data set
+ *
+ * there is no loaddata ref for being on ns list, nor a ref from
+ * d_inode(@dentry) when grab a ref from these, @ns->lock must be held
+ * && __aa_get_loaddata() needs to be used, and the return value
+ * checked, if NULL the loaddata is already being reaped and should be
+ * considered dead.
+ */
 struct aa_loaddata {
 	struct kref count;
+	struct list_head list;
+	struct work_struct work;
+	struct dentry *dents[AAFS_LOADDATA_NDENTS];
+	struct aa_ns *ns;
+	char *name;
 	size_t size;
+	long revision;			/* the ns policy revision this caused */
 	int abi;
 	unsigned char *hash;
+
 	char data[];
 };
 
 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, const char **ns);
 
+/**
+ * __aa_get_loaddata - get a reference count to uncounted data reference
+ * @data: reference to get a count on
+ *
+ * Returns: pointer to reference OR NULL if race is lost and reference is
+ *          being repeated.
+ * Requires: @data->ns->lock held, and the return code MUST be checked
+ *
+ * Use only from inode->i_private and @data->list found references
+ */
+static inline struct aa_loaddata *
+__aa_get_loaddata(struct aa_loaddata *data)
+{
+	if (data && kref_get_unless_zero(&(data->count)))
+		return data;
+
+	return NULL;
+}
+
+/**
+ * aa_get_loaddata - get a reference count from a counted data reference
+ * @data: reference to get a count on
+ *
+ * Returns: point to reference
+ * Requires: @data to have a valid reference count on it. It is a bug
+ *           if the race to reap can be encountered when it is used.
+ */
 static inline struct aa_loaddata *
 aa_get_loaddata(struct aa_loaddata *data)
 {
-	if (data)
-		kref_get(&(data->count));
-	return data;
+	struct aa_loaddata *tmp = __aa_get_loaddata(data);
+
+	AA_BUG(data && !tmp);
+
+	return tmp;
 }
 
+void __aa_loaddata_update(struct aa_loaddata *data, long revision);
+bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r);
 void aa_loaddata_kref(struct kref *kref);
+struct aa_loaddata *aa_loaddata_alloc(size_t size);
 static inline void aa_put_loaddata(struct aa_loaddata *data)
 {
 	if (data)
diff --git a/security/apparmor/include/procattr.h b/security/apparmor/include/procattr.h
index 6bd5f33..c8fd99c 100644
--- a/security/apparmor/include/procattr.h
+++ b/security/apparmor/include/procattr.h
@@ -15,11 +15,7 @@
 #ifndef __AA_PROCATTR_H
 #define __AA_PROCATTR_H
 
-#define AA_DO_TEST 1
-#define AA_ONEXEC  1
-
-int aa_getprocattr(struct aa_profile *profile, char **string);
-int aa_setprocattr_changehat(char *args, size_t size, int test);
-int aa_setprocattr_changeprofile(char *fqname, bool onexec, int test);
+int aa_getprocattr(struct aa_label *label, char **string);
+int aa_setprocattr_changehat(char *args, size_t size, int flags);
 
 #endif /* __AA_PROCATTR_H */
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
index d3f4cf0..76f1586 100644
--- a/security/apparmor/include/resource.h
+++ b/security/apparmor/include/resource.h
@@ -34,13 +34,13 @@ struct aa_rlimit {
 	struct rlimit limits[RLIM_NLIMITS];
 };
 
-extern struct aa_fs_entry aa_fs_entry_rlimit[];
+extern struct aa_sfs_entry aa_sfs_entry_rlimit[];
 
 int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
 		      unsigned int resource, struct rlimit *new_rlim);
 
-void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
+void __aa_transition_rlimits(struct aa_label *old, struct aa_label *new);
 
 static inline void aa_free_rlimit_rules(struct aa_rlimit *rlims)
 {
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index edac790..11e66b5 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -4,7 +4,7 @@
  * This file contains AppArmor ipc mediation
  *
  * Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2010 Canonical Ltd.
+ * Copyright 2009-2017 Canonical Ltd.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -21,87 +21,103 @@
 #include "include/policy.h"
 #include "include/ipc.h"
 
-/* call back to audit ptrace fields */
-static void audit_cb(struct audit_buffer *ab, void *va)
+/**
+ * audit_ptrace_mask - convert mask to permission string
+ * @buffer: buffer to write string to (NOT NULL)
+ * @mask: permission mask to convert
+ */
+static void audit_ptrace_mask(struct audit_buffer *ab, u32 mask)
 {
-	struct common_audit_data *sa = va;
-	audit_log_format(ab, " peer=");
-	audit_log_untrustedstring(ab, aad(sa)->peer->base.hname);
+	switch (mask) {
+	case MAY_READ:
+		audit_log_string(ab, "read");
+		break;
+	case MAY_WRITE:
+		audit_log_string(ab, "trace");
+		break;
+	case AA_MAY_BE_READ:
+		audit_log_string(ab, "readby");
+		break;
+	case AA_MAY_BE_TRACED:
+		audit_log_string(ab, "tracedby");
+		break;
+	}
 }
 
-/**
- * aa_audit_ptrace - do auditing for ptrace
- * @profile: profile being enforced  (NOT NULL)
- * @target: profile being traced (NOT NULL)
- * @error: error condition
- *
- * Returns: %0 or error code
- */
-static int aa_audit_ptrace(struct aa_profile *profile,
-			   struct aa_profile *target, int error)
+/* call back to audit ptrace fields */
+static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
 {
-	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
+	struct common_audit_data *sa = va;
 
-	aad(&sa)->peer = target;
-	aad(&sa)->error = error;
+	if (aad(sa)->request & AA_PTRACE_PERM_MASK) {
+		audit_log_format(ab, " requested_mask=");
+		audit_ptrace_mask(ab, aad(sa)->request);
 
-	return aa_audit(AUDIT_APPARMOR_AUTO, profile, &sa, audit_cb);
+		if (aad(sa)->denied & AA_PTRACE_PERM_MASK) {
+			audit_log_format(ab, " denied_mask=");
+			audit_ptrace_mask(ab, aad(sa)->denied);
+		}
+	}
+	audit_log_format(ab, " peer=");
+	aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+			FLAGS_NONE, GFP_ATOMIC);
+}
+
+/* TODO: conditionals */
+static int profile_ptrace_perm(struct aa_profile *profile,
+			       struct aa_profile *peer, u32 request,
+			       struct common_audit_data *sa)
+{
+	struct aa_perms perms = { };
+
+	/* need because of peer in cross check */
+	if (profile_unconfined(profile) ||
+	    !PROFILE_MEDIATES(profile, AA_CLASS_PTRACE))
+		return 0;
+
+	aad(sa)->peer = &peer->label;
+	aa_profile_match_label(profile, &peer->label, AA_CLASS_PTRACE, request,
+			       &perms);
+	aa_apply_modes_to_perms(profile, &perms);
+	return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
+}
+
+static int cross_ptrace_perm(struct aa_profile *tracer,
+			     struct aa_profile *tracee, u32 request,
+			     struct common_audit_data *sa)
+{
+	if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE))
+		return xcheck(profile_ptrace_perm(tracer, tracee, request, sa),
+			      profile_ptrace_perm(tracee, tracer,
+						  request << PTRACE_PERM_SHIFT,
+						  sa));
+	/* policy uses the old style capability check for ptrace */
+	if (profile_unconfined(tracer) || tracer == tracee)
+		return 0;
+
+	aad(sa)->label = &tracer->label;
+	aad(sa)->peer = &tracee->label;
+	aad(sa)->request = 0;
+	aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1);
+
+	return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
 }
 
 /**
  * aa_may_ptrace - test if tracer task can trace the tracee
- * @tracer: profile of the task doing the tracing  (NOT NULL)
- * @tracee: task to be traced
- * @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH
+ * @tracer: label of the task doing the tracing  (NOT NULL)
+ * @tracee: task label to be traced
+ * @request: permission request
  *
  * Returns: %0 else error code if permission denied or error
  */
-int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
-		  unsigned int mode)
+int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
+		  u32 request)
 {
-	/* TODO: currently only based on capability, not extended ptrace
-	 *       rules,
-	 *       Test mode for PTRACE_MODE_READ || PTRACE_MODE_ATTACH
-	 */
+	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE);
 
-	if (unconfined(tracer) || tracer == tracee)
-		return 0;
-	/* log this capability request */
-	return aa_capable(tracer, CAP_SYS_PTRACE, 1);
+	return xcheck_labels_profiles(tracer, tracee, cross_ptrace_perm,
+				      request, &sa);
 }
 
-/**
- * aa_ptrace - do ptrace permission check and auditing
- * @tracer: task doing the tracing (NOT NULL)
- * @tracee: task being traced (NOT NULL)
- * @mode: ptrace mode either PTRACE_MODE_READ || PTRACE_MODE_ATTACH
- *
- * Returns: %0 else error code if permission denied or error
- */
-int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
-	      unsigned int mode)
-{
-	/*
-	 * tracer can ptrace tracee when
-	 * - tracer is unconfined ||
-	 *   - tracer is in complain mode
-	 *   - tracer has rules allowing it to trace tracee currently this is:
-	 *       - confined by the same profile ||
-	 *       - tracer profile has CAP_SYS_PTRACE
-	 */
 
-	struct aa_profile *tracer_p = aa_get_task_profile(tracer);
-	int error = 0;
-
-	if (!unconfined(tracer_p)) {
-		struct aa_profile *tracee_p = aa_get_task_profile(tracee);
-
-		error = aa_may_ptrace(tracer_p, tracee_p, mode);
-		error = aa_audit_ptrace(tracer_p, tracee_p, error);
-
-		aa_put_profile(tracee_p);
-	}
-	aa_put_profile(tracer_p);
-
-	return error;
-}
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
new file mode 100644
index 0000000..e052eab
--- /dev/null
+++ b/security/apparmor/label.c
@@ -0,0 +1,2120 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor label definitions
+ *
+ * Copyright 2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/audit.h>
+#include <linux/seq_file.h>
+#include <linux/sort.h>
+
+#include "include/apparmor.h"
+#include "include/context.h"
+#include "include/label.h"
+#include "include/policy.h"
+#include "include/secid.h"
+
+
+/*
+ * the aa_label represents the set of profiles confining an object
+ *
+ * Labels maintain a reference count to the set of pointers they reference
+ * Labels are ref counted by
+ *   tasks and object via the security field/security context off the field
+ *   code - will take a ref count on a label if it needs the label
+ *          beyond what is possible with an rcu_read_lock.
+ *   profiles - each profile is a label
+ *   secids - a pinned secid will keep a refcount of the label it is
+ *          referencing
+ *   objects - inode, files, sockets, ...
+ *
+ * Labels are not ref counted by the label set, so they maybe removed and
+ * freed when no longer in use.
+ *
+ */
+
+#define PROXY_POISON 97
+#define LABEL_POISON 100
+
+static void free_proxy(struct aa_proxy *proxy)
+{
+	if (proxy) {
+		/* p->label will not updated any more as p is dead */
+		aa_put_label(rcu_dereference_protected(proxy->label, true));
+		memset(proxy, 0, sizeof(*proxy));
+		proxy->label = (struct aa_label *) PROXY_POISON;
+		kfree(proxy);
+	}
+}
+
+void aa_proxy_kref(struct kref *kref)
+{
+	struct aa_proxy *proxy = container_of(kref, struct aa_proxy, count);
+
+	free_proxy(proxy);
+}
+
+struct aa_proxy *aa_alloc_proxy(struct aa_label *label, gfp_t gfp)
+{
+	struct aa_proxy *new;
+
+	new = kzalloc(sizeof(struct aa_proxy), gfp);
+	if (new) {
+		kref_init(&new->count);
+		rcu_assign_pointer(new->label, aa_get_label(label));
+	}
+	return new;
+}
+
+/* requires profile list write lock held */
+void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
+{
+	struct aa_label *tmp;
+
+	AA_BUG(!orig);
+	AA_BUG(!new);
+	AA_BUG(!write_is_locked(&labels_set(orig)->lock));
+
+	tmp = rcu_dereference_protected(orig->proxy->label,
+					&labels_ns(orig)->lock);
+	rcu_assign_pointer(orig->proxy->label, aa_get_label(new));
+	orig->flags |= FLAG_STALE;
+	aa_put_label(tmp);
+}
+
+static void __proxy_share(struct aa_label *old, struct aa_label *new)
+{
+	struct aa_proxy *proxy = new->proxy;
+
+	new->proxy = aa_get_proxy(old->proxy);
+	__aa_proxy_redirect(old, new);
+	aa_put_proxy(proxy);
+}
+
+
+/**
+ * ns_cmp - compare ns for label set ordering
+ * @a: ns to compare (NOT NULL)
+ * @b: ns to compare (NOT NULL)
+ *
+ * Returns: <0 if a < b
+ *          ==0 if a == b
+ *          >0  if a > b
+ */
+static int ns_cmp(struct aa_ns *a, struct aa_ns *b)
+{
+	int res;
+
+	AA_BUG(!a);
+	AA_BUG(!b);
+	AA_BUG(!a->base.hname);
+	AA_BUG(!b->base.hname);
+
+	if (a == b)
+		return 0;
+
+	res = a->level - b->level;
+	if (res)
+		return res;
+
+	return strcmp(a->base.hname, b->base.hname);
+}
+
+/**
+ * profile_cmp - profile comparision for set ordering
+ * @a: profile to compare (NOT NULL)
+ * @b: profile to compare (NOT NULL)
+ *
+ * Returns: <0  if a < b
+ *          ==0 if a == b
+ *          >0  if a > b
+ */
+static int profile_cmp(struct aa_profile *a, struct aa_profile *b)
+{
+	int res;
+
+	AA_BUG(!a);
+	AA_BUG(!b);
+	AA_BUG(!a->ns);
+	AA_BUG(!b->ns);
+	AA_BUG(!a->base.hname);
+	AA_BUG(!b->base.hname);
+
+	if (a == b || a->base.hname == b->base.hname)
+		return 0;
+	res = ns_cmp(a->ns, b->ns);
+	if (res)
+		return res;
+
+	return strcmp(a->base.hname, b->base.hname);
+}
+
+/**
+ * vec_cmp - label comparision for set ordering
+ * @a: label to compare (NOT NULL)
+ * @vec: vector of profiles to compare (NOT NULL)
+ * @n: length of @vec
+ *
+ * Returns: <0  if a < vec
+ *          ==0 if a == vec
+ *          >0  if a > vec
+ */
+static int vec_cmp(struct aa_profile **a, int an, struct aa_profile **b, int bn)
+{
+	int i;
+
+	AA_BUG(!a);
+	AA_BUG(!*a);
+	AA_BUG(!b);
+	AA_BUG(!*b);
+	AA_BUG(an <= 0);
+	AA_BUG(bn <= 0);
+
+	for (i = 0; i < an && i < bn; i++) {
+		int res = profile_cmp(a[i], b[i]);
+
+		if (res != 0)
+			return res;
+	}
+
+	return an - bn;
+}
+
+static bool vec_is_stale(struct aa_profile **vec, int n)
+{
+	int i;
+
+	AA_BUG(!vec);
+
+	for (i = 0; i < n; i++) {
+		if (profile_is_stale(vec[i]))
+			return true;
+	}
+
+	return false;
+}
+
+static bool vec_unconfined(struct aa_profile **vec, int n)
+{
+	int i;
+
+	AA_BUG(!vec);
+
+	for (i = 0; i < n; i++) {
+		if (!profile_unconfined(vec[i]))
+			return false;
+	}
+
+	return true;
+}
+
+static int sort_cmp(const void *a, const void *b)
+{
+	return profile_cmp(*(struct aa_profile **)a, *(struct aa_profile **)b);
+}
+
+/*
+ * assumes vec is sorted
+ * Assumes @vec has null terminator at vec[n], and will null terminate
+ * vec[n - dups]
+ */
+static inline int unique(struct aa_profile **vec, int n)
+{
+	int i, pos, dups = 0;
+
+	AA_BUG(n < 1);
+	AA_BUG(!vec);
+
+	pos = 0;
+	for (i = 1; i < n; i++) {
+		int res = profile_cmp(vec[pos], vec[i]);
+
+		AA_BUG(res > 0, "vec not sorted");
+		if (res == 0) {
+			/* drop duplicate */
+			aa_put_profile(vec[i]);
+			dups++;
+			continue;
+		}
+		pos++;
+		if (dups)
+			vec[pos] = vec[i];
+	}
+
+	AA_BUG(dups < 0);
+
+	return dups;
+}
+
+/**
+ * aa_vec_unique - canonical sort and unique a list of profiles
+ * @n: number of refcounted profiles in the list (@n > 0)
+ * @vec: list of profiles to sort and merge
+ *
+ * Returns: the number of duplicates eliminated == references put
+ *
+ * If @flags & VEC_FLAG_TERMINATE @vec has null terminator at vec[n], and will
+ * null terminate vec[n - dups]
+ */
+int aa_vec_unique(struct aa_profile **vec, int n, int flags)
+{
+	int i, dups = 0;
+
+	AA_BUG(n < 1);
+	AA_BUG(!vec);
+
+	/* vecs are usually small and inorder, have a fallback for larger */
+	if (n > 8) {
+		sort(vec, n, sizeof(struct aa_profile *), sort_cmp, NULL);
+		dups = unique(vec, n);
+		goto out;
+	}
+
+	/* insertion sort + unique in one */
+	for (i = 1; i < n; i++) {
+		struct aa_profile *tmp = vec[i];
+		int pos, j;
+
+		for (pos = i - 1 - dups; pos >= 0; pos--) {
+			int res = profile_cmp(vec[pos], tmp);
+
+			if (res == 0) {
+				/* drop duplicate entry */
+				aa_put_profile(tmp);
+				dups++;
+				goto continue_outer;
+			} else if (res < 0)
+				break;
+		}
+		/* pos is at entry < tmp, or index -1. Set to insert pos */
+		pos++;
+
+		for (j = i - dups; j > pos; j--)
+			vec[j] = vec[j - 1];
+		vec[pos] = tmp;
+continue_outer:
+		;
+	}
+
+	AA_BUG(dups < 0);
+
+out:
+	if (flags & VEC_FLAG_TERMINATE)
+		vec[n - dups] = NULL;
+
+	return dups;
+}
+
+
+static void label_destroy(struct aa_label *label)
+{
+	struct aa_label *tmp;
+
+	AA_BUG(!label);
+
+	if (!label_isprofile(label)) {
+		struct aa_profile *profile;
+		struct label_it i;
+
+		aa_put_str(label->hname);
+
+		label_for_each(i, label, profile) {
+			aa_put_profile(profile);
+			label->vec[i.i] = (struct aa_profile *)
+					   (LABEL_POISON + (long) i.i);
+		}
+	}
+
+	if (rcu_dereference_protected(label->proxy->label, true) == label)
+		rcu_assign_pointer(label->proxy->label, NULL);
+
+	aa_free_secid(label->secid);
+
+	tmp = rcu_dereference_protected(label->proxy->label, true);
+	if (tmp == label)
+		rcu_assign_pointer(label->proxy->label, NULL);
+
+	aa_put_proxy(label->proxy);
+	label->proxy = (struct aa_proxy *) PROXY_POISON + 1;
+}
+
+void aa_label_free(struct aa_label *label)
+{
+	if (!label)
+		return;
+
+	label_destroy(label);
+	kfree(label);
+}
+
+static void label_free_switch(struct aa_label *label)
+{
+	if (label->flags & FLAG_NS_COUNT)
+		aa_free_ns(labels_ns(label));
+	else if (label_isprofile(label))
+		aa_free_profile(labels_profile(label));
+	else
+		aa_label_free(label);
+}
+
+static void label_free_rcu(struct rcu_head *head)
+{
+	struct aa_label *label = container_of(head, struct aa_label, rcu);
+
+	if (label->flags & FLAG_IN_TREE)
+		(void) aa_label_remove(label);
+	label_free_switch(label);
+}
+
+void aa_label_kref(struct kref *kref)
+{
+	struct aa_label *label = container_of(kref, struct aa_label, count);
+	struct aa_ns *ns = labels_ns(label);
+
+	if (!ns) {
+		/* never live, no rcu callback needed, just using the fn */
+		label_free_switch(label);
+		return;
+	}
+	/* TODO: update labels_profile macro so it works here */
+	AA_BUG(label_isprofile(label) &&
+	       on_list_rcu(&label->vec[0]->base.profiles));
+	AA_BUG(label_isprofile(label) &&
+	       on_list_rcu(&label->vec[0]->base.list));
+
+	/* TODO: if compound label and not stale add to reclaim cache */
+	call_rcu(&label->rcu, label_free_rcu);
+}
+
+static void label_free_or_put_new(struct aa_label *label, struct aa_label *new)
+{
+	if (label != new)
+		/* need to free directly to break circular ref with proxy */
+		aa_label_free(new);
+	else
+		aa_put_label(new);
+}
+
+bool aa_label_init(struct aa_label *label, int size)
+{
+	AA_BUG(!label);
+	AA_BUG(size < 1);
+
+	label->secid = aa_alloc_secid();
+	if (label->secid == AA_SECID_INVALID)
+		return false;
+
+	label->size = size;			/* doesn't include null */
+	label->vec[size] = NULL;		/* null terminate */
+	kref_init(&label->count);
+	RB_CLEAR_NODE(&label->node);
+
+	return true;
+}
+
+/**
+ * aa_label_alloc - allocate a label with a profile vector of @size length
+ * @size: size of profile vector in the label
+ * @proxy: proxy to use OR null if to allocate a new one
+ * @gfp: memory allocation type
+ *
+ * Returns: new label
+ *     else NULL if failed
+ */
+struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp)
+{
+	struct aa_label *new;
+
+	AA_BUG(size < 1);
+
+	/*  + 1 for null terminator entry on vec */
+	new = kzalloc(sizeof(*new) + sizeof(struct aa_profile *) * (size + 1),
+			gfp);
+	AA_DEBUG("%s (%p)\n", __func__, new);
+	if (!new)
+		goto fail;
+
+	if (!aa_label_init(new, size))
+		goto fail;
+
+	if (!proxy) {
+		proxy = aa_alloc_proxy(new, gfp);
+		if (!proxy)
+			goto fail;
+	} else
+		aa_get_proxy(proxy);
+	/* just set new's proxy, don't redirect proxy here if it was passed in*/
+	new->proxy = proxy;
+
+	return new;
+
+fail:
+	kfree(new);
+
+	return NULL;
+}
+
+
+/**
+ * label_cmp - label comparision for set ordering
+ * @a: label to compare (NOT NULL)
+ * @b: label to compare (NOT NULL)
+ *
+ * Returns: <0  if a < b
+ *          ==0 if a == b
+ *          >0  if a > b
+ */
+static int label_cmp(struct aa_label *a, struct aa_label *b)
+{
+	AA_BUG(!b);
+
+	if (a == b)
+		return 0;
+
+	return vec_cmp(a->vec, a->size, b->vec, b->size);
+}
+
+/* helper fn for label_for_each_confined */
+int aa_label_next_confined(struct aa_label *label, int i)
+{
+	AA_BUG(!label);
+	AA_BUG(i < 0);
+
+	for (; i < label->size; i++) {
+		if (!profile_unconfined(label->vec[i]))
+			return i;
+	}
+
+	return i;
+}
+
+/**
+ * aa_label_next_not_in_set - return the next profile of @sub not in @set
+ * @I: label iterator
+ * @set: label to test against
+ * @sub: label to if is subset of @set
+ *
+ * Returns: profile in @sub that is not in @set, with iterator set pos after
+ *     else NULL if @sub is a subset of @set
+ */
+struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
+					      struct aa_label *set,
+					      struct aa_label *sub)
+{
+	AA_BUG(!set);
+	AA_BUG(!I);
+	AA_BUG(I->i < 0);
+	AA_BUG(I->i > set->size);
+	AA_BUG(!sub);
+	AA_BUG(I->j < 0);
+	AA_BUG(I->j > sub->size);
+
+	while (I->j < sub->size && I->i < set->size) {
+		int res = profile_cmp(sub->vec[I->j], set->vec[I->i]);
+
+		if (res == 0) {
+			(I->j)++;
+			(I->i)++;
+		} else if (res > 0)
+			(I->i)++;
+		else
+			return sub->vec[(I->j)++];
+	}
+
+	if (I->j < sub->size)
+		return sub->vec[(I->j)++];
+
+	return NULL;
+}
+
+/**
+ * aa_label_is_subset - test if @sub is a subset of @set
+ * @set: label to test against
+ * @sub: label to test if is subset of @set
+ *
+ * Returns: true if @sub is subset of @set
+ *     else false
+ */
+bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub)
+{
+	struct label_it i = { };
+
+	AA_BUG(!set);
+	AA_BUG(!sub);
+
+	if (sub == set)
+		return true;
+
+	return __aa_label_next_not_in_set(&i, set, sub) == NULL;
+}
+
+
+
+/**
+ * __label_remove - remove @label from the label set
+ * @l: label to remove
+ * @new: label to redirect to
+ *
+ * Requires: labels_set(@label)->lock write_lock
+ * Returns:  true if the label was in the tree and removed
+ */
+static bool __label_remove(struct aa_label *label, struct aa_label *new)
+{
+	struct aa_labelset *ls = labels_set(label);
+
+	AA_BUG(!ls);
+	AA_BUG(!label);
+	AA_BUG(!write_is_locked(&ls->lock));
+
+	if (new)
+		__aa_proxy_redirect(label, new);
+
+	if (!label_is_stale(label))
+		__label_make_stale(label);
+
+	if (label->flags & FLAG_IN_TREE) {
+		rb_erase(&label->node, &ls->root);
+		label->flags &= ~FLAG_IN_TREE;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * __label_replace - replace @old with @new in label set
+ * @old: label to remove from label set
+ * @new: label to replace @old with
+ *
+ * Requires: labels_set(@old)->lock write_lock
+ *           valid ref count be held on @new
+ * Returns: true if @old was in set and replaced by @new
+ *
+ * Note: current implementation requires label set be order in such a way
+ *       that @new directly replaces @old position in the set (ie.
+ *       using pointer comparison of the label address would not work)
+ */
+static bool __label_replace(struct aa_label *old, struct aa_label *new)
+{
+	struct aa_labelset *ls = labels_set(old);
+
+	AA_BUG(!ls);
+	AA_BUG(!old);
+	AA_BUG(!new);
+	AA_BUG(!write_is_locked(&ls->lock));
+	AA_BUG(new->flags & FLAG_IN_TREE);
+
+	if (!label_is_stale(old))
+		__label_make_stale(old);
+
+	if (old->flags & FLAG_IN_TREE) {
+		rb_replace_node(&old->node, &new->node, &ls->root);
+		old->flags &= ~FLAG_IN_TREE;
+		new->flags |= FLAG_IN_TREE;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * __label_insert - attempt to insert @l into a label set
+ * @ls: set of labels to insert @l into (NOT NULL)
+ * @label: new label to insert (NOT NULL)
+ * @replace: whether insertion should replace existing entry that is not stale
+ *
+ * Requires: @ls->lock
+ *           caller to hold a valid ref on l
+ *           if @replace is true l has a preallocated proxy associated
+ * Returns: @l if successful in inserting @l - with additional refcount
+ *          else ref counted equivalent label that is already in the set,
+ *          the else condition only happens if @replace is false
+ */
+static struct aa_label *__label_insert(struct aa_labelset *ls,
+				       struct aa_label *label, bool replace)
+{
+	struct rb_node **new, *parent = NULL;
+
+	AA_BUG(!ls);
+	AA_BUG(!label);
+	AA_BUG(labels_set(label) != ls);
+	AA_BUG(!write_is_locked(&ls->lock));
+	AA_BUG(label->flags & FLAG_IN_TREE);
+
+	/* Figure out where to put new node */
+	new = &ls->root.rb_node;
+	while (*new) {
+		struct aa_label *this = rb_entry(*new, struct aa_label, node);
+		int result = label_cmp(label, this);
+
+		parent = *new;
+		if (result == 0) {
+			/* !__aa_get_label means queued for destruction,
+			 * so replace in place, however the label has
+			 * died before the replacement so do not share
+			 * the proxy
+			 */
+			if (!replace && !label_is_stale(this)) {
+				if (__aa_get_label(this))
+					return this;
+			} else
+				__proxy_share(this, label);
+			AA_BUG(!__label_replace(this, label));
+			return aa_get_label(label);
+		} else if (result < 0)
+			new = &((*new)->rb_left);
+		else /* (result > 0) */
+			new = &((*new)->rb_right);
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&label->node, parent, new);
+	rb_insert_color(&label->node, &ls->root);
+	label->flags |= FLAG_IN_TREE;
+
+	return aa_get_label(label);
+}
+
+/**
+ * __vec_find - find label that matches @vec in label set
+ * @vec: vec of profiles to find matching label for (NOT NULL)
+ * @n: length of @vec
+ *
+ * Requires: @vec_labelset(vec) lock held
+ *           caller to hold a valid ref on l
+ *
+ * Returns: ref counted @label if matching label is in tree
+ *          ref counted label that is equiv to @l in tree
+ *     else NULL if @vec equiv is not in tree
+ */
+static struct aa_label *__vec_find(struct aa_profile **vec, int n)
+{
+	struct rb_node *node;
+
+	AA_BUG(!vec);
+	AA_BUG(!*vec);
+	AA_BUG(n <= 0);
+
+	node = vec_labelset(vec, n)->root.rb_node;
+	while (node) {
+		struct aa_label *this = rb_entry(node, struct aa_label, node);
+		int result = vec_cmp(this->vec, this->size, vec, n);
+
+		if (result > 0)
+			node = node->rb_left;
+		else if (result < 0)
+			node = node->rb_right;
+		else
+			return __aa_get_label(this);
+	}
+
+	return NULL;
+}
+
+/**
+ * __label_find - find label @label in label set
+ * @label: label to find (NOT NULL)
+ *
+ * Requires: labels_set(@label)->lock held
+ *           caller to hold a valid ref on l
+ *
+ * Returns: ref counted @label if @label is in tree OR
+ *          ref counted label that is equiv to @label in tree
+ *     else NULL if @label or equiv is not in tree
+ */
+static struct aa_label *__label_find(struct aa_label *label)
+{
+	AA_BUG(!label);
+
+	return __vec_find(label->vec, label->size);
+}
+
+
+/**
+ * aa_label_remove - remove a label from the labelset
+ * @label: label to remove
+ *
+ * Returns: true if @label was removed from the tree
+ *     else @label was not in tree so it could not be removed
+ */
+bool aa_label_remove(struct aa_label *label)
+{
+	struct aa_labelset *ls = labels_set(label);
+	unsigned long flags;
+	bool res;
+
+	AA_BUG(!ls);
+
+	write_lock_irqsave(&ls->lock, flags);
+	res = __label_remove(label, ns_unconfined(labels_ns(label)));
+	write_unlock_irqrestore(&ls->lock, flags);
+
+	return res;
+}
+
+/**
+ * aa_label_replace - replace a label @old with a new version @new
+ * @old: label to replace
+ * @new: label replacing @old
+ *
+ * Returns: true if @old was in tree and replaced
+ *     else @old was not in tree, and @new was not inserted
+ */
+bool aa_label_replace(struct aa_label *old, struct aa_label *new)
+{
+	unsigned long flags;
+	bool res;
+
+	if (name_is_shared(old, new) && labels_ns(old) == labels_ns(new)) {
+		write_lock_irqsave(&labels_set(old)->lock, flags);
+		if (old->proxy != new->proxy)
+			__proxy_share(old, new);
+		else
+			__aa_proxy_redirect(old, new);
+		res = __label_replace(old, new);
+		write_unlock_irqrestore(&labels_set(old)->lock, flags);
+	} else {
+		struct aa_label *l;
+		struct aa_labelset *ls = labels_set(old);
+
+		write_lock_irqsave(&ls->lock, flags);
+		res = __label_remove(old, new);
+		if (labels_ns(old) != labels_ns(new)) {
+			write_unlock_irqrestore(&ls->lock, flags);
+			ls = labels_set(new);
+			write_lock_irqsave(&ls->lock, flags);
+		}
+		l = __label_insert(ls, new, true);
+		res = (l == new);
+		write_unlock_irqrestore(&ls->lock, flags);
+		aa_put_label(l);
+	}
+
+	return res;
+}
+
+/**
+ * vec_find - find label @l in label set
+ * @vec: array of profiles to find equiv label for (NOT NULL)
+ * @n: length of @vec
+ *
+ * Returns: refcounted label if @vec equiv is in tree
+ *     else NULL if @vec equiv is not in tree
+ */
+static struct aa_label *vec_find(struct aa_profile **vec, int n)
+{
+	struct aa_labelset *ls;
+	struct aa_label *label;
+	unsigned long flags;
+
+	AA_BUG(!vec);
+	AA_BUG(!*vec);
+	AA_BUG(n <= 0);
+
+	ls = vec_labelset(vec, n);
+	read_lock_irqsave(&ls->lock, flags);
+	label = __vec_find(vec, n);
+	read_unlock_irqrestore(&ls->lock, flags);
+
+	return label;
+}
+
+/* requires sort and merge done first */
+static struct aa_label *vec_create_and_insert_label(struct aa_profile **vec,
+						    int len, gfp_t gfp)
+{
+	struct aa_label *label = NULL;
+	struct aa_labelset *ls;
+	unsigned long flags;
+	struct aa_label *new;
+	int i;
+
+	AA_BUG(!vec);
+
+	if (len == 1)
+		return aa_get_label(&vec[0]->label);
+
+	ls = labels_set(&vec[len - 1]->label);
+
+	/* TODO: enable when read side is lockless
+	 * check if label exists before taking locks
+	 */
+	new = aa_label_alloc(len, NULL, gfp);
+	if (!new)
+		return NULL;
+
+	for (i = 0; i < len; i++)
+		new->vec[i] = aa_get_profile(vec[i]);
+
+	write_lock_irqsave(&ls->lock, flags);
+	label = __label_insert(ls, new, false);
+	write_unlock_irqrestore(&ls->lock, flags);
+	label_free_or_put_new(label, new);
+
+	return label;
+}
+
+struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len,
+					     gfp_t gfp)
+{
+	struct aa_label *label = vec_find(vec, len);
+
+	if (label)
+		return label;
+
+	return vec_create_and_insert_label(vec, len, gfp);
+}
+
+/**
+ * aa_label_find - find label @label in label set
+ * @label: label to find (NOT NULL)
+ *
+ * Requires: caller to hold a valid ref on l
+ *
+ * Returns: refcounted @label if @label is in tree
+ *          refcounted label that is equiv to @label in tree
+ *     else NULL if @label or equiv is not in tree
+ */
+struct aa_label *aa_label_find(struct aa_label *label)
+{
+	AA_BUG(!label);
+
+	return vec_find(label->vec, label->size);
+}
+
+
+/**
+ * aa_label_insert - insert label @label into @ls or return existing label
+ * @ls - labelset to insert @label into
+ * @label - label to insert
+ *
+ * Requires: caller to hold a valid ref on @label
+ *
+ * Returns: ref counted @label if successful in inserting @label
+ *     else ref counted equivalent label that is already in the set
+ */
+struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *label)
+{
+	struct aa_label *l;
+	unsigned long flags;
+
+	AA_BUG(!ls);
+	AA_BUG(!label);
+
+	/* check if label exists before taking lock */
+	if (!label_is_stale(label)) {
+		read_lock_irqsave(&ls->lock, flags);
+		l = __label_find(label);
+		read_unlock_irqrestore(&ls->lock, flags);
+		if (l)
+			return l;
+	}
+
+	write_lock_irqsave(&ls->lock, flags);
+	l = __label_insert(ls, label, false);
+	write_unlock_irqrestore(&ls->lock, flags);
+
+	return l;
+}
+
+
+/**
+ * aa_label_next_in_merge - find the next profile when merging @a and @b
+ * @I: label iterator
+ * @a: label to merge
+ * @b: label to merge
+ *
+ * Returns: next profile
+ *     else null if no more profiles
+ */
+struct aa_profile *aa_label_next_in_merge(struct label_it *I,
+					  struct aa_label *a,
+					  struct aa_label *b)
+{
+	AA_BUG(!a);
+	AA_BUG(!b);
+	AA_BUG(!I);
+	AA_BUG(I->i < 0);
+	AA_BUG(I->i > a->size);
+	AA_BUG(I->j < 0);
+	AA_BUG(I->j > b->size);
+
+	if (I->i < a->size) {
+		if (I->j < b->size) {
+			int res = profile_cmp(a->vec[I->i], b->vec[I->j]);
+
+			if (res > 0)
+				return b->vec[(I->j)++];
+			if (res == 0)
+				(I->j)++;
+		}
+
+		return a->vec[(I->i)++];
+	}
+
+	if (I->j < b->size)
+		return b->vec[(I->j)++];
+
+	return NULL;
+}
+
+/**
+ * label_merge_cmp - cmp of @a merging with @b against @z for set ordering
+ * @a: label to merge then compare (NOT NULL)
+ * @b: label to merge then compare (NOT NULL)
+ * @z: label to compare merge against (NOT NULL)
+ *
+ * Assumes: using the most recent versions of @a, @b, and @z
+ *
+ * Returns: <0  if a < b
+ *          ==0 if a == b
+ *          >0  if a > b
+ */
+static int label_merge_cmp(struct aa_label *a, struct aa_label *b,
+			   struct aa_label *z)
+{
+	struct aa_profile *p = NULL;
+	struct label_it i = { };
+	int k;
+
+	AA_BUG(!a);
+	AA_BUG(!b);
+	AA_BUG(!z);
+
+	for (k = 0;
+	     k < z->size && (p = aa_label_next_in_merge(&i, a, b));
+	     k++) {
+		int res = profile_cmp(p, z->vec[k]);
+
+		if (res != 0)
+			return res;
+	}
+
+	if (p)
+		return 1;
+	else if (k < z->size)
+		return -1;
+	return 0;
+}
+
+/**
+ * label_merge_insert - create a new label by merging @a and @b
+ * @new: preallocated label to merge into (NOT NULL)
+ * @a: label to merge with @b  (NOT NULL)
+ * @b: label to merge with @a  (NOT NULL)
+ *
+ * Requires: preallocated proxy
+ *
+ * Returns: ref counted label either @new if merge is unique
+ *          @a if @b is a subset of @a
+ *          @b if @a is a subset of @b
+ *
+ * NOTE: will not use @new if the merge results in @new == @a or @b
+ *
+ *       Must be used within labelset write lock to avoid racing with
+ *       setting labels stale.
+ */
+static struct aa_label *label_merge_insert(struct aa_label *new,
+					   struct aa_label *a,
+					   struct aa_label *b)
+{
+	struct aa_label *label;
+	struct aa_labelset *ls;
+	struct aa_profile *next;
+	struct label_it i;
+	unsigned long flags;
+	int k = 0, invcount = 0;
+	bool stale = false;
+
+	AA_BUG(!a);
+	AA_BUG(a->size < 0);
+	AA_BUG(!b);
+	AA_BUG(b->size < 0);
+	AA_BUG(!new);
+	AA_BUG(new->size < a->size + b->size);
+
+	label_for_each_in_merge(i, a, b, next) {
+		AA_BUG(!next);
+		if (profile_is_stale(next)) {
+			new->vec[k] = aa_get_newest_profile(next);
+			AA_BUG(!new->vec[k]->label.proxy);
+			AA_BUG(!new->vec[k]->label.proxy->label);
+			if (next->label.proxy != new->vec[k]->label.proxy)
+				invcount++;
+			k++;
+			stale = true;
+		} else
+			new->vec[k++] = aa_get_profile(next);
+	}
+	/* set to actual size which is <= allocated len */
+	new->size = k;
+	new->vec[k] = NULL;
+
+	if (invcount) {
+		new->size -= aa_vec_unique(&new->vec[0], new->size,
+					   VEC_FLAG_TERMINATE);
+		/* TODO: deal with reference labels */
+		if (new->size == 1) {
+			label = aa_get_label(&new->vec[0]->label);
+			return label;
+		}
+	} else if (!stale) {
+		/*
+		 * merge could be same as a || b, note: it is not possible
+		 * for new->size == a->size == b->size unless a == b
+		 */
+		if (k == a->size)
+			return aa_get_label(a);
+		else if (k == b->size)
+			return aa_get_label(b);
+	}
+	if (vec_unconfined(new->vec, new->size))
+		new->flags |= FLAG_UNCONFINED;
+	ls = labels_set(new);
+	write_lock_irqsave(&ls->lock, flags);
+	label = __label_insert(labels_set(new), new, false);
+	write_unlock_irqrestore(&ls->lock, flags);
+
+	return label;
+}
+
+/**
+ * labelset_of_merge - find which labelset a merged label should be inserted
+ * @a: label to merge and insert
+ * @b: label to merge and insert
+ *
+ * Returns: labelset that the merged label should be inserted into
+ */
+static struct aa_labelset *labelset_of_merge(struct aa_label *a,
+					     struct aa_label *b)
+{
+	struct aa_ns *nsa = labels_ns(a);
+	struct aa_ns *nsb = labels_ns(b);
+
+	if (ns_cmp(nsa, nsb) <= 0)
+		return &nsa->labels;
+	return &nsb->labels;
+}
+
+/**
+ * __label_find_merge - find label that is equiv to merge of @a and @b
+ * @ls: set of labels to search (NOT NULL)
+ * @a: label to merge with @b  (NOT NULL)
+ * @b: label to merge with @a  (NOT NULL)
+ *
+ * Requires: ls->lock read_lock held
+ *
+ * Returns: ref counted label that is equiv to merge of @a and @b
+ *     else NULL if merge of @a and @b is not in set
+ */
+static struct aa_label *__label_find_merge(struct aa_labelset *ls,
+					   struct aa_label *a,
+					   struct aa_label *b)
+{
+	struct rb_node *node;
+
+	AA_BUG(!ls);
+	AA_BUG(!a);
+	AA_BUG(!b);
+
+	if (a == b)
+		return __label_find(a);
+
+	node  = ls->root.rb_node;
+	while (node) {
+		struct aa_label *this = container_of(node, struct aa_label,
+						     node);
+		int result = label_merge_cmp(a, b, this);
+
+		if (result < 0)
+			node = node->rb_left;
+		else if (result > 0)
+			node = node->rb_right;
+		else
+			return __aa_get_label(this);
+	}
+
+	return NULL;
+}
+
+
+/**
+ * aa_label_find_merge - find label that is equiv to merge of @a and @b
+ * @a: label to merge with @b  (NOT NULL)
+ * @b: label to merge with @a  (NOT NULL)
+ *
+ * Requires: labels be fully constructed with a valid ns
+ *
+ * Returns: ref counted label that is equiv to merge of @a and @b
+ *     else NULL if merge of @a and @b is not in set
+ */
+struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b)
+{
+	struct aa_labelset *ls;
+	struct aa_label *label, *ar = NULL, *br = NULL;
+	unsigned long flags;
+
+	AA_BUG(!a);
+	AA_BUG(!b);
+
+	if (label_is_stale(a))
+		a = ar = aa_get_newest_label(a);
+	if (label_is_stale(b))
+		b = br = aa_get_newest_label(b);
+	ls = labelset_of_merge(a, b);
+	read_lock_irqsave(&ls->lock, flags);
+	label = __label_find_merge(ls, a, b);
+	read_unlock_irqrestore(&ls->lock, flags);
+	aa_put_label(ar);
+	aa_put_label(br);
+
+	return label;
+}
+
+/**
+ * aa_label_merge - attempt to insert new merged label of @a and @b
+ * @ls: set of labels to insert label into (NOT NULL)
+ * @a: label to merge with @b  (NOT NULL)
+ * @b: label to merge with @a  (NOT NULL)
+ * @gfp: memory allocation type
+ *
+ * Requires: caller to hold valid refs on @a and @b
+ *           labels be fully constructed with a valid ns
+ *
+ * Returns: ref counted new label if successful in inserting merge of a & b
+ *     else ref counted equivalent label that is already in the set.
+ *     else NULL if could not create label (-ENOMEM)
+ */
+struct aa_label *aa_label_merge(struct aa_label *a, struct aa_label *b,
+				gfp_t gfp)
+{
+	struct aa_label *label = NULL;
+
+	AA_BUG(!a);
+	AA_BUG(!b);
+
+	if (a == b)
+		return aa_get_newest_label(a);
+
+	/* TODO: enable when read side is lockless
+	 * check if label exists before taking locks
+	if (!label_is_stale(a) && !label_is_stale(b))
+		label = aa_label_find_merge(a, b);
+	*/
+
+	if (!label) {
+		struct aa_label *new;
+
+		a = aa_get_newest_label(a);
+		b = aa_get_newest_label(b);
+
+		/* could use label_merge_len(a, b), but requires double
+		 * comparison for small savings
+		 */
+		new = aa_label_alloc(a->size + b->size, NULL, gfp);
+		if (!new)
+			goto out;
+
+		label = label_merge_insert(new, a, b);
+		label_free_or_put_new(label, new);
+out:
+		aa_put_label(a);
+		aa_put_label(b);
+	}
+
+	return label;
+}
+
+static inline bool label_is_visible(struct aa_profile *profile,
+				    struct aa_label *label)
+{
+	return aa_ns_visible(profile->ns, labels_ns(label), true);
+}
+
+/* match a profile and its associated ns component if needed
+ * Assumes visibility test has already been done.
+ * If a subns profile is not to be matched should be prescreened with
+ * visibility test.
+ */
+static inline unsigned int match_component(struct aa_profile *profile,
+					   struct aa_profile *tp,
+					   unsigned int state)
+{
+	const char *ns_name;
+
+	if (profile->ns == tp->ns)
+		return aa_dfa_match(profile->policy.dfa, state, tp->base.hname);
+
+	/* try matching with namespace name and then profile */
+	ns_name = aa_ns_name(profile->ns, tp->ns, true);
+	state = aa_dfa_match_len(profile->policy.dfa, state, ":", 1);
+	state = aa_dfa_match(profile->policy.dfa, state, ns_name);
+	state = aa_dfa_match_len(profile->policy.dfa, state, ":", 1);
+	return aa_dfa_match(profile->policy.dfa, state, tp->base.hname);
+}
+
+/**
+ * label_compound_match - find perms for full compound label
+ * @profile: profile to find perms for
+ * @label: label to check access permissions for
+ * @start: state to start match in
+ * @subns: whether to do permission checks on components in a subns
+ * @request: permissions to request
+ * @perms: perms struct to set
+ *
+ * Returns: 0 on success else ERROR
+ *
+ * For the label A//&B//&C this does the perm match for A//&B//&C
+ * @perms should be preinitialized with allperms OR a previous permission
+ *        check to be stacked.
+ */
+static int label_compound_match(struct aa_profile *profile,
+				struct aa_label *label,
+				unsigned int state, bool subns, u32 request,
+				struct aa_perms *perms)
+{
+	struct aa_profile *tp;
+	struct label_it i;
+
+	/* find first subcomponent that is visible */
+	label_for_each(i, label, tp) {
+		if (!aa_ns_visible(profile->ns, tp->ns, subns))
+			continue;
+		state = match_component(profile, tp, state);
+		if (!state)
+			goto fail;
+		goto next;
+	}
+
+	/* no component visible */
+	*perms = allperms;
+	return 0;
+
+next:
+	label_for_each_cont(i, label, tp) {
+		if (!aa_ns_visible(profile->ns, tp->ns, subns))
+			continue;
+		state = aa_dfa_match(profile->policy.dfa, state, "//&");
+		state = match_component(profile, tp, state);
+		if (!state)
+			goto fail;
+	}
+	aa_compute_perms(profile->policy.dfa, state, perms);
+	aa_apply_modes_to_perms(profile, perms);
+	if ((perms->allow & request) != request)
+		return -EACCES;
+
+	return 0;
+
+fail:
+	*perms = nullperms;
+	return state;
+}
+
+/**
+ * label_components_match - find perms for all subcomponents of a label
+ * @profile: profile to find perms for
+ * @label: label to check access permissions for
+ * @start: state to start match in
+ * @subns: whether to do permission checks on components in a subns
+ * @request: permissions to request
+ * @perms: an initialized perms struct to add accumulation to
+ *
+ * Returns: 0 on success else ERROR
+ *
+ * For the label A//&B//&C this does the perm match for each of A and B and C
+ * @perms should be preinitialized with allperms OR a previous permission
+ *        check to be stacked.
+ */
+static int label_components_match(struct aa_profile *profile,
+				  struct aa_label *label, unsigned int start,
+				  bool subns, u32 request,
+				  struct aa_perms *perms)
+{
+	struct aa_profile *tp;
+	struct label_it i;
+	struct aa_perms tmp;
+	unsigned int state = 0;
+
+	/* find first subcomponent to test */
+	label_for_each(i, label, tp) {
+		if (!aa_ns_visible(profile->ns, tp->ns, subns))
+			continue;
+		state = match_component(profile, tp, start);
+		if (!state)
+			goto fail;
+		goto next;
+	}
+
+	/* no subcomponents visible - no change in perms */
+	return 0;
+
+next:
+	aa_compute_perms(profile->policy.dfa, state, &tmp);
+	aa_apply_modes_to_perms(profile, &tmp);
+	aa_perms_accum(perms, &tmp);
+	label_for_each_cont(i, label, tp) {
+		if (!aa_ns_visible(profile->ns, tp->ns, subns))
+			continue;
+		state = match_component(profile, tp, start);
+		if (!state)
+			goto fail;
+		aa_compute_perms(profile->policy.dfa, state, &tmp);
+		aa_apply_modes_to_perms(profile, &tmp);
+		aa_perms_accum(perms, &tmp);
+	}
+
+	if ((perms->allow & request) != request)
+		return -EACCES;
+
+	return 0;
+
+fail:
+	*perms = nullperms;
+	return -EACCES;
+}
+
+/**
+ * aa_label_match - do a multi-component label match
+ * @profile: profile to match against (NOT NULL)
+ * @label: label to match (NOT NULL)
+ * @state: state to start in
+ * @subns: whether to match subns components
+ * @request: permission request
+ * @perms: Returns computed perms (NOT NULL)
+ *
+ * Returns: the state the match finished in, may be the none matching state
+ */
+int aa_label_match(struct aa_profile *profile, struct aa_label *label,
+		   unsigned int state, bool subns, u32 request,
+		   struct aa_perms *perms)
+{
+	int error = label_compound_match(profile, label, state, subns, request,
+					 perms);
+	if (!error)
+		return error;
+
+	*perms = allperms;
+	return label_components_match(profile, label, state, subns, request,
+				      perms);
+}
+
+
+/**
+ * aa_update_label_name - update a label to have a stored name
+ * @ns: ns being viewed from (NOT NULL)
+ * @label: label to update (NOT NULL)
+ * @gfp: type of memory allocation
+ *
+ * Requires: labels_set(label) not locked in caller
+ *
+ * note: only updates the label name if it does not have a name already
+ *       and if it is in the labelset
+ */
+bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp)
+{
+	struct aa_labelset *ls;
+	unsigned long flags;
+	char __counted *name;
+	bool res = false;
+
+	AA_BUG(!ns);
+	AA_BUG(!label);
+
+	if (label->hname || labels_ns(label) != ns)
+		return res;
+
+	if (aa_label_acntsxprint(&name, ns, label, FLAGS_NONE, gfp) == -1)
+		return res;
+
+	ls = labels_set(label);
+	write_lock_irqsave(&ls->lock, flags);
+	if (!label->hname && label->flags & FLAG_IN_TREE) {
+		label->hname = name;
+		res = true;
+	} else
+		aa_put_str(name);
+	write_unlock_irqrestore(&ls->lock, flags);
+
+	return res;
+}
+
+/*
+ * cached label name is present and visible
+ * @label->hname only exists if label is namespace hierachical
+ */
+static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label)
+{
+	if (label->hname && labels_ns(label) == ns)
+		return true;
+
+	return false;
+}
+
+/* helper macro for snprint routines */
+#define update_for_len(total, len, size, str)	\
+do {					\
+	AA_BUG(len < 0);		\
+	total += len;			\
+	len = min(len, size);		\
+	size -= len;			\
+	str += len;			\
+} while (0)
+
+/**
+ * aa_profile_snxprint - print a profile name to a buffer
+ * @str: buffer to write to. (MAY BE NULL if @size == 0)
+ * @size: size of buffer
+ * @view: namespace profile is being viewed from
+ * @profile: profile to view (NOT NULL)
+ * @flags: whether to include the mode string
+ * @prev_ns: last ns printed when used in compound print
+ *
+ * Returns: size of name written or would be written if larger than
+ *          available buffer
+ *
+ * Note: will not print anything if the profile is not visible
+ */
+static int aa_profile_snxprint(char *str, size_t size, struct aa_ns *view,
+			       struct aa_profile *profile, int flags,
+			       struct aa_ns **prev_ns)
+{
+	const char *ns_name = NULL;
+
+	AA_BUG(!str && size != 0);
+	AA_BUG(!profile);
+
+	if (!view)
+		view = profiles_ns(profile);
+
+	if (view != profile->ns &&
+	    (!prev_ns || (prev_ns && *prev_ns != profile->ns))) {
+		if (prev_ns)
+			*prev_ns = profile->ns;
+		ns_name = aa_ns_name(view, profile->ns,
+				     flags & FLAG_VIEW_SUBNS);
+		if (ns_name == aa_hidden_ns_name) {
+			if (flags & FLAG_HIDDEN_UNCONFINED)
+				return snprintf(str, size, "%s", "unconfined");
+			return snprintf(str, size, "%s", ns_name);
+		}
+	}
+
+	if ((flags & FLAG_SHOW_MODE) && profile != profile->ns->unconfined) {
+		const char *modestr = aa_profile_mode_names[profile->mode];
+
+		if (ns_name)
+			return snprintf(str, size, ":%s:%s (%s)", ns_name,
+					profile->base.hname, modestr);
+		return snprintf(str, size, "%s (%s)", profile->base.hname,
+				modestr);
+	}
+
+	if (ns_name)
+		return snprintf(str, size, ":%s:%s", ns_name,
+				profile->base.hname);
+	return snprintf(str, size, "%s", profile->base.hname);
+}
+
+static const char *label_modename(struct aa_ns *ns, struct aa_label *label,
+				  int flags)
+{
+	struct aa_profile *profile;
+	struct label_it i;
+	int mode = -1, count = 0;
+
+	label_for_each(i, label, profile) {
+		if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
+			if (profile->mode == APPARMOR_UNCONFINED)
+				/* special case unconfined so stacks with
+				 * unconfined don't report as mixed. ie.
+				 * profile_foo//&:ns1:unconfined (mixed)
+				 */
+				continue;
+			count++;
+			if (mode == -1)
+				mode = profile->mode;
+			else if (mode != profile->mode)
+				return "mixed";
+		}
+	}
+
+	if (count == 0)
+		return "-";
+	if (mode == -1)
+		/* everything was unconfined */
+		mode = APPARMOR_UNCONFINED;
+
+	return aa_profile_mode_names[mode];
+}
+
+/* if any visible label is not unconfined the display_mode returns true */
+static inline bool display_mode(struct aa_ns *ns, struct aa_label *label,
+				int flags)
+{
+	if ((flags & FLAG_SHOW_MODE)) {
+		struct aa_profile *profile;
+		struct label_it i;
+
+		label_for_each(i, label, profile) {
+			if (aa_ns_visible(ns, profile->ns,
+					  flags & FLAG_VIEW_SUBNS) &&
+			    profile != profile->ns->unconfined)
+				return true;
+		}
+		/* only ns->unconfined in set of profiles in ns */
+		return false;
+	}
+
+	return false;
+}
+
+/**
+ * aa_label_snxprint - print a label name to a string buffer
+ * @str: buffer to write to. (MAY BE NULL if @size == 0)
+ * @size: size of buffer
+ * @ns: namespace profile is being viewed from
+ * @label: label to view (NOT NULL)
+ * @flags: whether to include the mode string
+ *
+ * Returns: size of name written or would be written if larger than
+ *          available buffer
+ *
+ * Note: labels do not have to be strictly hierarchical to the ns as
+ *       objects may be shared across different namespaces and thus
+ *       pickup labeling from each ns.  If a particular part of the
+ *       label is not visible it will just be excluded.  And if none
+ *       of the label is visible "---" will be used.
+ */
+int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
+		      struct aa_label *label, int flags)
+{
+	struct aa_profile *profile;
+	struct aa_ns *prev_ns = NULL;
+	struct label_it i;
+	int count = 0, total = 0;
+	size_t len;
+
+	AA_BUG(!str && size != 0);
+	AA_BUG(!label);
+
+	if (!ns)
+		ns = labels_ns(label);
+
+	label_for_each(i, label, profile) {
+		if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
+			if (count > 0) {
+				len = snprintf(str, size, "//&");
+				update_for_len(total, len, size, str);
+			}
+			len = aa_profile_snxprint(str, size, ns, profile,
+						  flags & FLAG_VIEW_SUBNS,
+						  &prev_ns);
+			update_for_len(total, len, size, str);
+			count++;
+		}
+	}
+
+	if (count == 0) {
+		if (flags & FLAG_HIDDEN_UNCONFINED)
+			return snprintf(str, size, "%s", "unconfined");
+		return snprintf(str, size, "%s", aa_hidden_ns_name);
+	}
+
+	/* count == 1 && ... is for backwards compat where the mode
+	 * is not displayed for 'unconfined' in the current ns
+	 */
+	if (display_mode(ns, label, flags)) {
+		len = snprintf(str, size, " (%s)",
+			       label_modename(ns, label, flags));
+		update_for_len(total, len, size, str);
+	}
+
+	return total;
+}
+#undef update_for_len
+
+/**
+ * aa_label_asxprint - allocate a string buffer and print label into it
+ * @strp: Returns - the allocated buffer with the label name. (NOT NULL)
+ * @ns: namespace profile is being viewed from
+ * @label: label to view (NOT NULL)
+ * @flags: flags controlling what label info is printed
+ * @gfp: kernel memory allocation type
+ *
+ * Returns: size of name written or would be written if larger than
+ *          available buffer
+ */
+int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
+		      int flags, gfp_t gfp)
+{
+	int size;
+
+	AA_BUG(!strp);
+	AA_BUG(!label);
+
+	size = aa_label_snxprint(NULL, 0, ns, label, flags);
+	if (size < 0)
+		return size;
+
+	*strp = kmalloc(size + 1, gfp);
+	if (!*strp)
+		return -ENOMEM;
+	return aa_label_snxprint(*strp, size + 1, ns, label, flags);
+}
+
+/**
+ * aa_label_acntsxprint - allocate a __counted string buffer and print label
+ * @strp: buffer to write to. (MAY BE NULL if @size == 0)
+ * @ns: namespace profile is being viewed from
+ * @label: label to view (NOT NULL)
+ * @flags: flags controlling what label info is printed
+ * @gfp: kernel memory allocation type
+ *
+ * Returns: size of name written or would be written if larger than
+ *          available buffer
+ */
+int aa_label_acntsxprint(char __counted **strp, struct aa_ns *ns,
+			 struct aa_label *label, int flags, gfp_t gfp)
+{
+	int size;
+
+	AA_BUG(!strp);
+	AA_BUG(!label);
+
+	size = aa_label_snxprint(NULL, 0, ns, label, flags);
+	if (size < 0)
+		return size;
+
+	*strp = aa_str_alloc(size + 1, gfp);
+	if (!*strp)
+		return -ENOMEM;
+	return aa_label_snxprint(*strp, size + 1, ns, label, flags);
+}
+
+
+void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
+		     struct aa_label *label, int flags, gfp_t gfp)
+{
+	const char *str;
+	char *name = NULL;
+	int len;
+
+	AA_BUG(!ab);
+	AA_BUG(!label);
+
+	if (!ns)
+		ns = labels_ns(label);
+
+	if (!use_label_hname(ns, label) || display_mode(ns, label, flags)) {
+		len  = aa_label_asxprint(&name, ns, label, flags, gfp);
+		if (len == -1) {
+			AA_DEBUG("label print error");
+			return;
+		}
+		str = name;
+	} else {
+		str = (char *) label->hname;
+		len = strlen(str);
+	}
+	if (audit_string_contains_control(str, len))
+		audit_log_n_hex(ab, str, len);
+	else
+		audit_log_n_string(ab, str, len);
+
+	kfree(name);
+}
+
+void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
+			 struct aa_label *label, int flags, gfp_t gfp)
+{
+	AA_BUG(!f);
+	AA_BUG(!label);
+
+	if (!ns)
+		ns = labels_ns(label);
+
+	if (!use_label_hname(ns, label)) {
+		char *str;
+		int len;
+
+		len = aa_label_asxprint(&str, ns, label, flags, gfp);
+		if (len == -1) {
+			AA_DEBUG("label print error");
+			return;
+		}
+		seq_printf(f, "%s", str);
+		kfree(str);
+	} else if (display_mode(ns, label, flags))
+		seq_printf(f, "%s (%s)", label->hname,
+			   label_modename(ns, label, flags));
+	else
+		seq_printf(f, "%s", label->hname);
+}
+
+void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
+		      gfp_t gfp)
+{
+	AA_BUG(!label);
+
+	if (!ns)
+		ns = labels_ns(label);
+
+	if (!use_label_hname(ns, label)) {
+		char *str;
+		int len;
+
+		len = aa_label_asxprint(&str, ns, label, flags, gfp);
+		if (len == -1) {
+			AA_DEBUG("label print error");
+			return;
+		}
+		pr_info("%s", str);
+		kfree(str);
+	} else if (display_mode(ns, label, flags))
+		pr_info("%s (%s)", label->hname,
+		       label_modename(ns, label, flags));
+	else
+		pr_info("%s", label->hname);
+}
+
+void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp)
+{
+	struct aa_ns *ns = aa_get_current_ns();
+
+	aa_label_xaudit(ab, ns, label, FLAG_VIEW_SUBNS, gfp);
+	aa_put_ns(ns);
+}
+
+void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp)
+{
+	struct aa_ns *ns = aa_get_current_ns();
+
+	aa_label_seq_xprint(f, ns, label, FLAG_VIEW_SUBNS, gfp);
+	aa_put_ns(ns);
+}
+
+void aa_label_printk(struct aa_label *label, gfp_t gfp)
+{
+	struct aa_ns *ns = aa_get_current_ns();
+
+	aa_label_xprintk(ns, label, FLAG_VIEW_SUBNS, gfp);
+	aa_put_ns(ns);
+}
+
+static int label_count_str_entries(const char *str)
+{
+	const char *split;
+	int count = 1;
+
+	AA_BUG(!str);
+
+	for (split = strstr(str, "//&"); split; split = strstr(str, "//&")) {
+		count++;
+		str = split + 3;
+	}
+
+	return count;
+}
+
+/*
+ * ensure stacks with components like
+ *   :ns:A//&B
+ * have :ns: applied to both 'A' and 'B' by making the lookup relative
+ * to the base if the lookup specifies an ns, else making the stacked lookup
+ * relative to the last embedded ns in the string.
+ */
+static struct aa_profile *fqlookupn_profile(struct aa_label *base,
+					    struct aa_label *currentbase,
+					    const char *str, size_t n)
+{
+	const char *first = skipn_spaces(str, n);
+
+	if (first && *first == ':')
+		return aa_fqlookupn_profile(base, str, n);
+
+	return aa_fqlookupn_profile(currentbase, str, n);
+}
+
+/**
+ * aa_label_parse - parse, validate and convert a text string to a label
+ * @base: base label to use for lookups (NOT NULL)
+ * @str: null terminated text string (NOT NULL)
+ * @gfp: allocation type
+ * @create: true if should create compound labels if they don't exist
+ * @force_stack: true if should stack even if no leading &
+ *
+ * Returns: the matching refcounted label if present
+ *     else ERRPTR
+ */
+struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
+				gfp_t gfp, bool create, bool force_stack)
+{
+	DEFINE_VEC(profile, vec);
+	struct aa_label *label, *currbase = base;
+	int i, len, stack = 0, error;
+	char *split;
+
+	AA_BUG(!base);
+	AA_BUG(!str);
+
+	str = skip_spaces(str);
+	len = label_count_str_entries(str);
+	if (*str == '&' || force_stack) {
+		/* stack on top of base */
+		stack = base->size;
+		len += stack;
+		if (*str == '&')
+			str++;
+	}
+	error = vec_setup(profile, vec, len, gfp);
+	if (error)
+		return ERR_PTR(error);
+
+	for (i = 0; i < stack; i++)
+		vec[i] = aa_get_profile(base->vec[i]);
+
+	for (split = strstr(str, "//&"), i = stack; split && i < len; i++) {
+		vec[i] = fqlookupn_profile(base, currbase, str, split - str);
+		if (!vec[i])
+			goto fail;
+		/*
+		 * if component specified a new ns it becomes the new base
+		 * so that subsequent lookups are relative to it
+		 */
+		if (vec[i]->ns != labels_ns(currbase))
+			currbase = &vec[i]->label;
+		str = split + 3;
+		split = strstr(str, "//&");
+	}
+	/* last element doesn't have a split */
+	if (i < len) {
+		vec[i] = fqlookupn_profile(base, currbase, str, strlen(str));
+		if (!vec[i])
+			goto fail;
+	}
+	if (len == 1)
+		/* no need to free vec as len < LOCAL_VEC_ENTRIES */
+		return &vec[0]->label;
+
+	len -= aa_vec_unique(vec, len, VEC_FLAG_TERMINATE);
+	/* TODO: deal with reference labels */
+	if (len == 1) {
+		label = aa_get_label(&vec[0]->label);
+		goto out;
+	}
+
+	if (create)
+		label = aa_vec_find_or_create_label(vec, len, gfp);
+	else
+		label = vec_find(vec, len);
+	if (!label)
+		goto fail;
+
+out:
+	/* use adjusted len from after vec_unique, not original */
+	vec_cleanup(profile, vec, len);
+	return label;
+
+fail:
+	label = ERR_PTR(-ENOENT);
+	goto out;
+}
+
+
+/**
+ * aa_labelset_destroy - remove all labels from the label set
+ * @ls: label set to cleanup (NOT NULL)
+ *
+ * Labels that are removed from the set may still exist beyond the set
+ * being destroyed depending on their reference counting
+ */
+void aa_labelset_destroy(struct aa_labelset *ls)
+{
+	struct rb_node *node;
+	unsigned long flags;
+
+	AA_BUG(!ls);
+
+	write_lock_irqsave(&ls->lock, flags);
+	for (node = rb_first(&ls->root); node; node = rb_first(&ls->root)) {
+		struct aa_label *this = rb_entry(node, struct aa_label, node);
+
+		if (labels_ns(this) != root_ns)
+			__label_remove(this,
+				       ns_unconfined(labels_ns(this)->parent));
+		else
+			__label_remove(this, NULL);
+	}
+	write_unlock_irqrestore(&ls->lock, flags);
+}
+
+/*
+ * @ls: labelset to init (NOT NULL)
+ */
+void aa_labelset_init(struct aa_labelset *ls)
+{
+	AA_BUG(!ls);
+
+	rwlock_init(&ls->lock);
+	ls->root = RB_ROOT;
+}
+
+static struct aa_label *labelset_next_stale(struct aa_labelset *ls)
+{
+	struct aa_label *label;
+	struct rb_node *node;
+	unsigned long flags;
+
+	AA_BUG(!ls);
+
+	read_lock_irqsave(&ls->lock, flags);
+
+	__labelset_for_each(ls, node) {
+		label = rb_entry(node, struct aa_label, node);
+		if ((label_is_stale(label) ||
+		     vec_is_stale(label->vec, label->size)) &&
+		    __aa_get_label(label))
+			goto out;
+
+	}
+	label = NULL;
+
+out:
+	read_unlock_irqrestore(&ls->lock, flags);
+
+	return label;
+}
+
+/**
+ * __label_update - insert updated version of @label into labelset
+ * @label - the label to update/repace
+ *
+ * Returns: new label that is up to date
+ *     else NULL on failure
+ *
+ * Requires: @ns lock be held
+ *
+ * Note: worst case is the stale @label does not get updated and has
+ *       to be updated at a later time.
+ */
+static struct aa_label *__label_update(struct aa_label *label)
+{
+	struct aa_label *new, *tmp;
+	struct aa_labelset *ls;
+	unsigned long flags;
+	int i, invcount = 0;
+
+	AA_BUG(!label);
+	AA_BUG(!mutex_is_locked(&labels_ns(label)->lock));
+
+	new = aa_label_alloc(label->size, label->proxy, GFP_KERNEL);
+	if (!new)
+		return NULL;
+
+	/*
+	 * while holding the ns_lock will stop profile replacement, removal,
+	 * and label updates, label merging and removal can be occurring
+	 */
+	ls = labels_set(label);
+	write_lock_irqsave(&ls->lock, flags);
+	for (i = 0; i < label->size; i++) {
+		AA_BUG(!label->vec[i]);
+		new->vec[i] = aa_get_newest_profile(label->vec[i]);
+		AA_BUG(!new->vec[i]);
+		AA_BUG(!new->vec[i]->label.proxy);
+		AA_BUG(!new->vec[i]->label.proxy->label);
+		if (new->vec[i]->label.proxy != label->vec[i]->label.proxy)
+			invcount++;
+	}
+
+	/* updated stale label by being removed/renamed from labelset */
+	if (invcount) {
+		new->size -= aa_vec_unique(&new->vec[0], new->size,
+					   VEC_FLAG_TERMINATE);
+		/* TODO: deal with reference labels */
+		if (new->size == 1) {
+			tmp = aa_get_label(&new->vec[0]->label);
+			AA_BUG(tmp == label);
+			goto remove;
+		}
+		if (labels_set(label) != labels_set(new)) {
+			write_unlock_irqrestore(&ls->lock, flags);
+			tmp = aa_label_insert(labels_set(new), new);
+			write_lock_irqsave(&ls->lock, flags);
+			goto remove;
+		}
+	} else
+		AA_BUG(labels_ns(label) != labels_ns(new));
+
+	tmp = __label_insert(labels_set(label), new, true);
+remove:
+	/* ensure label is removed, and redirected correctly */
+	__label_remove(label, tmp);
+	write_unlock_irqrestore(&ls->lock, flags);
+	label_free_or_put_new(tmp, new);
+
+	return tmp;
+}
+
+/**
+ * __labelset_update - update labels in @ns
+ * @ns: namespace to update labels in  (NOT NULL)
+ *
+ * Requires: @ns lock be held
+ *
+ * Walk the labelset ensuring that all labels are up to date and valid
+ * Any label that has a stale component is marked stale and replaced and
+ * by an updated version.
+ *
+ * If failures happen due to memory pressures then stale labels will
+ * be left in place until the next pass.
+ */
+static void __labelset_update(struct aa_ns *ns)
+{
+	struct aa_label *label;
+
+	AA_BUG(!ns);
+	AA_BUG(!mutex_is_locked(&ns->lock));
+
+	do {
+		label = labelset_next_stale(&ns->labels);
+		if (label) {
+			struct aa_label *l = __label_update(label);
+
+			aa_put_label(l);
+			aa_put_label(label);
+		}
+	} while (label);
+}
+
+/**
+ * __aa_labelset_udate_subtree - update all labels with a stale component
+ * @ns: ns to start update at (NOT NULL)
+ *
+ * Requires: @ns lock be held
+ *
+ * Invalidates labels based on @p in @ns and any children namespaces.
+ */
+void __aa_labelset_update_subtree(struct aa_ns *ns)
+{
+	struct aa_ns *child;
+
+	AA_BUG(!ns);
+	AA_BUG(!mutex_is_locked(&ns->lock));
+
+	__labelset_update(ns);
+
+	list_for_each_entry(child, &ns->sub_ns, base.list) {
+		mutex_lock(&child->lock);
+		__aa_labelset_update_subtree(child);
+		mutex_unlock(&child->lock);
+	}
+}
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 7cd788a..08ca26b 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -21,8 +21,14 @@
 #include "include/audit.h"
 #include "include/apparmor.h"
 #include "include/lib.h"
+#include "include/perms.h"
 #include "include/policy.h"
 
+struct aa_perms nullperms;
+struct aa_perms allperms = { .allow = ALL_PERMS_MASK,
+			     .quiet = ALL_PERMS_MASK,
+			     .hide = ALL_PERMS_MASK };
+
 /**
  * aa_split_fqname - split a fqname into a profile and namespace name
  * @fqname: a full qualified name in namespace profile format (NOT NULL)
@@ -69,7 +75,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
  * if all whitespace will return NULL
  */
 
-static const char *skipn_spaces(const char *str, size_t n)
+const char *skipn_spaces(const char *str, size_t n)
 {
 	for (; n && isspace(*str); --n)
 		++str;
@@ -128,11 +134,350 @@ void aa_info_message(const char *str)
 	printk(KERN_INFO "AppArmor: %s\n", str);
 }
 
+__counted char *aa_str_alloc(int size, gfp_t gfp)
+{
+	struct counted_str *str;
+
+	str = kmalloc(sizeof(struct counted_str) + size, gfp);
+	if (!str)
+		return NULL;
+
+	kref_init(&str->count);
+	return str->name;
+}
+
+void aa_str_kref(struct kref *kref)
+{
+	kfree(container_of(kref, struct counted_str, count));
+}
+
+
+const char aa_file_perm_chrs[] = "xwracd         km l     ";
+const char *aa_file_perm_names[] = {
+	"exec",
+	"write",
+	"read",
+	"append",
+
+	"create",
+	"delete",
+	"open",
+	"rename",
+
+	"setattr",
+	"getattr",
+	"setcred",
+	"getcred",
+
+	"chmod",
+	"chown",
+	"chgrp",
+	"lock",
+
+	"mmap",
+	"mprot",
+	"link",
+	"snapshot",
+
+	"unknown",
+	"unknown",
+	"unknown",
+	"unknown",
+
+	"unknown",
+	"unknown",
+	"unknown",
+	"unknown",
+
+	"stack",
+	"change_onexec",
+	"change_profile",
+	"change_hat",
+};
+
+/**
+ * aa_perm_mask_to_str - convert a perm mask to its short string
+ * @str: character buffer to store string in (at least 10 characters)
+ * @mask: permission mask to convert
+ */
+void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask)
+{
+	unsigned int i, perm = 1;
+
+	for (i = 0; i < 32; perm <<= 1, i++) {
+		if (mask & perm)
+			*str++ = chrs[i];
+	}
+	*str = '\0';
+}
+
+void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask)
+{
+	const char *fmt = "%s";
+	unsigned int i, perm = 1;
+	bool prev = false;
+
+	for (i = 0; i < 32; perm <<= 1, i++) {
+		if (mask & perm) {
+			audit_log_format(ab, fmt, names[i]);
+			if (!prev) {
+				prev = true;
+				fmt = " %s";
+			}
+		}
+	}
+}
+
+void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
+			u32 chrsmask, const char **names, u32 namesmask)
+{
+	char str[33];
+
+	audit_log_format(ab, "\"");
+	if ((mask & chrsmask) && chrs) {
+		aa_perm_mask_to_str(str, chrs, mask & chrsmask);
+		mask &= ~chrsmask;
+		audit_log_format(ab, "%s", str);
+		if (mask & namesmask)
+			audit_log_format(ab, " ");
+	}
+	if ((mask & namesmask) && names)
+		aa_audit_perm_names(ab, names, mask & namesmask);
+	audit_log_format(ab, "\"");
+}
+
+/**
+ * aa_audit_perms_cb - generic callback fn for auditing perms
+ * @ab: audit buffer (NOT NULL)
+ * @va: audit struct to audit values of (NOT NULL)
+ */
+static void aa_audit_perms_cb(struct audit_buffer *ab, void *va)
+{
+	struct common_audit_data *sa = va;
+
+	if (aad(sa)->request) {
+		audit_log_format(ab, " requested_mask=");
+		aa_audit_perm_mask(ab, aad(sa)->request, aa_file_perm_chrs,
+				   PERMS_CHRS_MASK, aa_file_perm_names,
+				   PERMS_NAMES_MASK);
+	}
+	if (aad(sa)->denied) {
+		audit_log_format(ab, "denied_mask=");
+		aa_audit_perm_mask(ab, aad(sa)->denied, aa_file_perm_chrs,
+				   PERMS_CHRS_MASK, aa_file_perm_names,
+				   PERMS_NAMES_MASK);
+	}
+	audit_log_format(ab, " peer=");
+	aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+				      FLAGS_NONE, GFP_ATOMIC);
+}
+
+/**
+ * aa_apply_modes_to_perms - apply namespace and profile flags to perms
+ * @profile: that perms where computed from
+ * @perms: perms to apply mode modifiers to
+ *
+ * TODO: split into profile and ns based flags for when accumulating perms
+ */
+void aa_apply_modes_to_perms(struct aa_profile *profile, struct aa_perms *perms)
+{
+	switch (AUDIT_MODE(profile)) {
+	case AUDIT_ALL:
+		perms->audit = ALL_PERMS_MASK;
+		/* fall through */
+	case AUDIT_NOQUIET:
+		perms->quiet = 0;
+		break;
+	case AUDIT_QUIET:
+		perms->audit = 0;
+		/* fall through */
+	case AUDIT_QUIET_DENIED:
+		perms->quiet = ALL_PERMS_MASK;
+		break;
+	}
+
+	if (KILL_MODE(profile))
+		perms->kill = ALL_PERMS_MASK;
+	else if (COMPLAIN_MODE(profile))
+		perms->complain = ALL_PERMS_MASK;
+/*
+ *  TODO:
+ *	else if (PROMPT_MODE(profile))
+ *		perms->prompt = ALL_PERMS_MASK;
+ */
+}
+
+static u32 map_other(u32 x)
+{
+	return ((x & 0x3) << 8) |	/* SETATTR/GETATTR */
+		((x & 0x1c) << 18) |	/* ACCEPT/BIND/LISTEN */
+		((x & 0x60) << 19);	/* SETOPT/GETOPT */
+}
+
+void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
+		      struct aa_perms *perms)
+{
+	perms->deny = 0;
+	perms->kill = perms->stop = 0;
+	perms->complain = perms->cond = 0;
+	perms->hide = 0;
+	perms->prompt = 0;
+	perms->allow = dfa_user_allow(dfa, state);
+	perms->audit = dfa_user_audit(dfa, state);
+	perms->quiet = dfa_user_quiet(dfa, state);
+
+	/* for v5 perm mapping in the policydb, the other set is used
+	 * to extend the general perm set
+	 */
+	perms->allow |= map_other(dfa_other_allow(dfa, state));
+	perms->audit |= map_other(dfa_other_audit(dfa, state));
+	perms->quiet |= map_other(dfa_other_quiet(dfa, state));
+//	perms->xindex = dfa_user_xindex(dfa, state);
+}
+
+/**
+ * aa_perms_accum_raw - accumulate perms with out masking off overlapping perms
+ * @accum - perms struct to accumulate into
+ * @addend - perms struct to add to @accum
+ */
+void aa_perms_accum_raw(struct aa_perms *accum, struct aa_perms *addend)
+{
+	accum->deny |= addend->deny;
+	accum->allow &= addend->allow & ~addend->deny;
+	accum->audit |= addend->audit & addend->allow;
+	accum->quiet &= addend->quiet & ~addend->allow;
+	accum->kill |= addend->kill & ~addend->allow;
+	accum->stop |= addend->stop & ~addend->allow;
+	accum->complain |= addend->complain & ~addend->allow & ~addend->deny;
+	accum->cond |= addend->cond & ~addend->allow & ~addend->deny;
+	accum->hide &= addend->hide & ~addend->allow;
+	accum->prompt |= addend->prompt & ~addend->allow & ~addend->deny;
+}
+
+/**
+ * aa_perms_accum - accumulate perms, masking off overlapping perms
+ * @accum - perms struct to accumulate into
+ * @addend - perms struct to add to @accum
+ */
+void aa_perms_accum(struct aa_perms *accum, struct aa_perms *addend)
+{
+	accum->deny |= addend->deny;
+	accum->allow &= addend->allow & ~accum->deny;
+	accum->audit |= addend->audit & accum->allow;
+	accum->quiet &= addend->quiet & ~accum->allow;
+	accum->kill |= addend->kill & ~accum->allow;
+	accum->stop |= addend->stop & ~accum->allow;
+	accum->complain |= addend->complain & ~accum->allow & ~accum->deny;
+	accum->cond |= addend->cond & ~accum->allow & ~accum->deny;
+	accum->hide &= addend->hide & ~accum->allow;
+	accum->prompt |= addend->prompt & ~accum->allow & ~accum->deny;
+}
+
+void aa_profile_match_label(struct aa_profile *profile, struct aa_label *label,
+			    int type, u32 request, struct aa_perms *perms)
+{
+	/* TODO: doesn't yet handle extended types */
+	unsigned int state;
+
+	state = aa_dfa_next(profile->policy.dfa,
+			    profile->policy.start[AA_CLASS_LABEL],
+			    type);
+	aa_label_match(profile, label, state, false, request, perms);
+}
+
+
+/* currently unused */
+int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
+			  u32 request, int type, u32 *deny,
+			  struct common_audit_data *sa)
+{
+	struct aa_perms perms;
+
+	aad(sa)->label = &profile->label;
+	aad(sa)->peer = &target->label;
+	aad(sa)->request = request;
+
+	aa_profile_match_label(profile, &target->label, type, request, &perms);
+	aa_apply_modes_to_perms(profile, &perms);
+	*deny |= request & perms.deny;
+	return aa_check_perms(profile, &perms, request, sa, aa_audit_perms_cb);
+}
+
+/**
+ * aa_check_perms - do audit mode selection based on perms set
+ * @profile: profile being checked
+ * @perms: perms computed for the request
+ * @request: requested perms
+ * @deny: Returns: explicit deny set
+ * @sa: initialized audit structure (MAY BE NULL if not auditing)
+ * @cb: callback fn for tpye specific fields (MAY BE NULL)
+ *
+ * Returns: 0 if permission else error code
+ *
+ * Note: profile audit modes need to be set before calling by setting the
+ *       perm masks appropriately.
+ *
+ *       If not auditing then complain mode is not enabled and the
+ *       error code will indicate whether there was an explicit deny
+ *	 with a positive value.
+ */
+int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
+		   u32 request, struct common_audit_data *sa,
+		   void (*cb)(struct audit_buffer *, void *))
+{
+	int type, error;
+	bool stop = false;
+	u32 denied = request & (~perms->allow | perms->deny);
+
+	if (likely(!denied)) {
+		/* mask off perms that are not being force audited */
+		request &= perms->audit;
+		if (!request || !sa)
+			return 0;
+
+		type = AUDIT_APPARMOR_AUDIT;
+		error = 0;
+	} else {
+		error = -EACCES;
+
+		if (denied & perms->kill)
+			type = AUDIT_APPARMOR_KILL;
+		else if (denied == (denied & perms->complain))
+			type = AUDIT_APPARMOR_ALLOWED;
+		else
+			type = AUDIT_APPARMOR_DENIED;
+
+		if (denied & perms->stop)
+			stop = true;
+		if (denied == (denied & perms->hide))
+			error = -ENOENT;
+
+		denied &= ~perms->quiet;
+		if (!sa || !denied)
+			return error;
+	}
+
+	if (sa) {
+		aad(sa)->label = &profile->label;
+		aad(sa)->request = request;
+		aad(sa)->denied = denied;
+		aad(sa)->error = error;
+		aa_audit_msg(type, sa, cb);
+	}
+
+	if (type == AUDIT_APPARMOR_ALLOWED)
+		error = 0;
+
+	return error;
+}
+
+
 /**
  * aa_policy_init - initialize a policy structure
  * @policy: policy to initialize  (NOT NULL)
  * @prefix: prefix name if any is required.  (MAYBE NULL)
  * @name: name of the policy, init will make a copy of it  (NOT NULL)
+ * @gfp: allocation mode
  *
  * Note: this fn creates a copy of strings passed in
  *
@@ -141,16 +486,21 @@ void aa_info_message(const char *str)
 bool aa_policy_init(struct aa_policy *policy, const char *prefix,
 		    const char *name, gfp_t gfp)
 {
+	char *hname;
+
 	/* freed by policy_free */
 	if (prefix) {
-		policy->hname = kmalloc(strlen(prefix) + strlen(name) + 3,
-					gfp);
-		if (policy->hname)
-			sprintf((char *)policy->hname, "%s//%s", prefix, name);
-	} else
-		policy->hname = kstrdup(name, gfp);
-	if (!policy->hname)
+		hname = aa_str_alloc(strlen(prefix) + strlen(name) + 3, gfp);
+		if (hname)
+			sprintf(hname, "%s//%s", prefix, name);
+	} else {
+		hname = aa_str_alloc(strlen(name) + 1, gfp);
+		if (hname)
+			strcpy(hname, name);
+	}
+	if (!hname)
 		return false;
+	policy->hname = hname;
 	/* base.name is a substring of fqname */
 	policy->name = basename(policy->hname);
 	INIT_LIST_HEAD(&policy->list);
@@ -169,5 +519,5 @@ void aa_policy_destroy(struct aa_policy *policy)
 	AA_BUG(on_list_rcu(&policy->list));
 
 	/* don't free name as its a subset of hname */
-	kzfree(policy->hname);
+	aa_put_str(policy->hname);
 }
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 8f3c0f7..867bcd1 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -34,6 +34,7 @@
 #include "include/file.h"
 #include "include/ipc.h"
 #include "include/path.h"
+#include "include/label.h"
 #include "include/policy.h"
 #include "include/policy_ns.h"
 #include "include/procattr.h"
@@ -49,7 +50,7 @@ DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
  */
 
 /*
- * free the associated aa_task_ctx and put its profiles
+ * free the associated aa_task_ctx and put its labels
  */
 static void apparmor_cred_free(struct cred *cred)
 {
@@ -103,34 +104,63 @@ static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
 static int apparmor_ptrace_access_check(struct task_struct *child,
 					unsigned int mode)
 {
-	return aa_ptrace(current, child, mode);
+	struct aa_label *tracer, *tracee;
+	int error;
+
+	tracer = begin_current_label_crit_section();
+	tracee = aa_get_task_label(child);
+	error = aa_may_ptrace(tracer, tracee,
+		  mode == PTRACE_MODE_READ ? AA_PTRACE_READ : AA_PTRACE_TRACE);
+	aa_put_label(tracee);
+	end_current_label_crit_section(tracer);
+
+	return error;
 }
 
 static int apparmor_ptrace_traceme(struct task_struct *parent)
 {
-	return aa_ptrace(parent, current, PTRACE_MODE_ATTACH);
+	struct aa_label *tracer, *tracee;
+	int error;
+
+	tracee = begin_current_label_crit_section();
+	tracer = aa_get_task_label(parent);
+	error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
+	aa_put_label(tracer);
+	end_current_label_crit_section(tracee);
+
+	return error;
 }
 
 /* Derived from security/commoncap.c:cap_capget */
 static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective,
 			   kernel_cap_t *inheritable, kernel_cap_t *permitted)
 {
-	struct aa_profile *profile;
+	struct aa_label *label;
 	const struct cred *cred;
 
 	rcu_read_lock();
 	cred = __task_cred(target);
-	profile = aa_cred_profile(cred);
+	label = aa_get_newest_cred_label(cred);
 
 	/*
 	 * cap_capget is stacked ahead of this and will
 	 * initialize effective and permitted.
 	 */
-	if (!unconfined(profile) && !COMPLAIN_MODE(profile)) {
-		*effective = cap_intersect(*effective, profile->caps.allow);
-		*permitted = cap_intersect(*permitted, profile->caps.allow);
+	if (!unconfined(label)) {
+		struct aa_profile *profile;
+		struct label_it i;
+
+		label_for_each_confined(i, label, profile) {
+			if (COMPLAIN_MODE(profile))
+				continue;
+			*effective = cap_intersect(*effective,
+						   profile->caps.allow);
+			*permitted = cap_intersect(*permitted,
+						   profile->caps.allow);
+		}
 	}
 	rcu_read_unlock();
+	aa_put_label(label);
 
 	return 0;
 }
@@ -138,12 +168,14 @@ static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective,
 static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
 			    int cap, int audit)
 {
-	struct aa_profile *profile;
+	struct aa_label *label;
 	int error = 0;
 
-	profile = aa_cred_profile(cred);
-	if (!unconfined(profile))
-		error = aa_capable(profile, cap, audit);
+	label = aa_get_newest_cred_label(cred);
+	if (!unconfined(label))
+		error = aa_capable(label, cap, audit);
+	aa_put_label(label);
+
 	return error;
 }
 
@@ -159,12 +191,13 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
 static int common_perm(const char *op, const struct path *path, u32 mask,
 		       struct path_cond *cond)
 {
-	struct aa_profile *profile;
+	struct aa_label *label;
 	int error = 0;
 
-	profile = __aa_current_profile();
-	if (!unconfined(profile))
-		error = aa_path_perm(op, profile, path, 0, mask, cond);
+	label = __begin_current_label_crit_section();
+	if (!unconfined(label))
+		error = aa_path_perm(op, label, path, 0, mask, cond);
+	__end_current_label_crit_section(label);
 
 	return error;
 }
@@ -278,7 +311,7 @@ static int apparmor_path_mknod(const struct path *dir, struct dentry *dentry,
 
 static int apparmor_path_truncate(const struct path *path)
 {
-	return common_perm_cond(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE);
+	return common_perm_cond(OP_TRUNC, path, MAY_WRITE | AA_MAY_SETATTR);
 }
 
 static int apparmor_path_symlink(const struct path *dir, struct dentry *dentry,
@@ -291,29 +324,31 @@ static int apparmor_path_symlink(const struct path *dir, struct dentry *dentry,
 static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_dir,
 			      struct dentry *new_dentry)
 {
-	struct aa_profile *profile;
+	struct aa_label *label;
 	int error = 0;
 
 	if (!path_mediated_fs(old_dentry))
 		return 0;
 
-	profile = aa_current_profile();
-	if (!unconfined(profile))
-		error = aa_path_link(profile, old_dentry, new_dir, new_dentry);
+	label = begin_current_label_crit_section();
+	if (!unconfined(label))
+		error = aa_path_link(label, old_dentry, new_dir, new_dentry);
+	end_current_label_crit_section(label);
+
 	return error;
 }
 
 static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_dentry,
 				const struct path *new_dir, struct dentry *new_dentry)
 {
-	struct aa_profile *profile;
+	struct aa_label *label;
 	int error = 0;
 
 	if (!path_mediated_fs(old_dentry))
 		return 0;
 
-	profile = aa_current_profile();
-	if (!unconfined(profile)) {
+	label = begin_current_label_crit_section();
+	if (!unconfined(label)) {
 		struct path old_path = { .mnt = old_dir->mnt,
 					 .dentry = old_dentry };
 		struct path new_path = { .mnt = new_dir->mnt,
@@ -322,16 +357,18 @@ static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_d
 					  d_backing_inode(old_dentry)->i_mode
 		};
 
-		error = aa_path_perm(OP_RENAME_SRC, profile, &old_path, 0,
-				     MAY_READ | AA_MAY_META_READ | MAY_WRITE |
-				     AA_MAY_META_WRITE | AA_MAY_DELETE,
+		error = aa_path_perm(OP_RENAME_SRC, label, &old_path, 0,
+				     MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
+				     AA_MAY_SETATTR | AA_MAY_DELETE,
 				     &cond);
 		if (!error)
-			error = aa_path_perm(OP_RENAME_DEST, profile, &new_path,
-					     0, MAY_WRITE | AA_MAY_META_WRITE |
+			error = aa_path_perm(OP_RENAME_DEST, label, &new_path,
+					     0, MAY_WRITE | AA_MAY_SETATTR |
 					     AA_MAY_CREATE, &cond);
 
 	}
+	end_current_label_crit_section(label);
+
 	return error;
 }
 
@@ -347,13 +384,13 @@ static int apparmor_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
 
 static int apparmor_inode_getattr(const struct path *path)
 {
-	return common_perm_cond(OP_GETATTR, path, AA_MAY_META_READ);
+	return common_perm_cond(OP_GETATTR, path, AA_MAY_GETATTR);
 }
 
 static int apparmor_file_open(struct file *file, const struct cred *cred)
 {
-	struct aa_file_ctx *fctx = file->f_security;
-	struct aa_profile *profile;
+	struct aa_file_ctx *fctx = file_ctx(file);
+	struct aa_label *label;
 	int error = 0;
 
 	if (!path_mediated_fs(file->f_path.dentry))
@@ -369,65 +406,61 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
 		return 0;
 	}
 
-	profile = aa_cred_profile(cred);
-	if (!unconfined(profile)) {
+	label = aa_get_newest_cred_label(cred);
+	if (!unconfined(label)) {
 		struct inode *inode = file_inode(file);
 		struct path_cond cond = { inode->i_uid, inode->i_mode };
 
-		error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0,
+		error = aa_path_perm(OP_OPEN, label, &file->f_path, 0,
 				     aa_map_file_to_perms(file), &cond);
 		/* todo cache full allowed permissions set and state */
 		fctx->allow = aa_map_file_to_perms(file);
 	}
+	aa_put_label(label);
 
 	return error;
 }
 
 static int apparmor_file_alloc_security(struct file *file)
 {
-	/* freed by apparmor_file_free_security */
-	file->f_security = aa_alloc_file_context(GFP_KERNEL);
-	if (!file->f_security)
-		return -ENOMEM;
-	return 0;
+	int error = 0;
 
+	/* freed by apparmor_file_free_security */
+	struct aa_label *label = begin_current_label_crit_section();
+	file->f_security = aa_alloc_file_ctx(label, GFP_KERNEL);
+	if (!file_ctx(file))
+		error = -ENOMEM;
+	end_current_label_crit_section(label);
+
+	return error;
 }
 
 static void apparmor_file_free_security(struct file *file)
 {
-	struct aa_file_ctx *ctx = file->f_security;
-
-	aa_free_file_context(ctx);
+	aa_free_file_ctx(file_ctx(file));
 }
 
 static int common_file_perm(const char *op, struct file *file, u32 mask)
 {
-	struct aa_file_ctx *fctx = file->f_security;
-	struct aa_profile *profile, *fprofile = aa_cred_profile(file->f_cred);
+	struct aa_label *label;
 	int error = 0;
 
-	AA_BUG(!fprofile);
+	/* don't reaudit files closed during inheritance */
+	if (file->f_path.dentry == aa_null.dentry)
+		return -EACCES;
 
-	if (!file->f_path.mnt ||
-	    !path_mediated_fs(file->f_path.dentry))
-		return 0;
-
-	profile = __aa_current_profile();
-
-	/* revalidate access, if task is unconfined, or the cached cred
-	 * doesn't match or if the request is for more permissions than
-	 * was granted.
-	 *
-	 * Note: the test for !unconfined(fprofile) is to handle file
-	 *       delegation from unconfined tasks
-	 */
-	if (!unconfined(profile) && !unconfined(fprofile) &&
-	    ((fprofile != profile) || (mask & ~fctx->allow)))
-		error = aa_file_perm(op, profile, file, mask);
+	label = __begin_current_label_crit_section();
+	error = aa_file_perm(op, label, file, mask);
+	__end_current_label_crit_section(label);
 
 	return error;
 }
 
+static int apparmor_file_receive(struct file *file)
+{
+	return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file));
+}
+
 static int apparmor_file_permission(struct file *file, int mask)
 {
 	return common_file_perm(OP_FPERM, file, mask);
@@ -448,7 +481,7 @@ static int common_mmap(const char *op, struct file *file, unsigned long prot,
 {
 	int mask = 0;
 
-	if (!file || !file->f_security)
+	if (!file || !file_ctx(file))
 		return 0;
 
 	if (prot & PROT_READ)
@@ -485,21 +518,21 @@ static int apparmor_getprocattr(struct task_struct *task, char *name,
 	/* released below */
 	const struct cred *cred = get_task_cred(task);
 	struct aa_task_ctx *ctx = cred_ctx(cred);
-	struct aa_profile *profile = NULL;
+	struct aa_label *label = NULL;
 
 	if (strcmp(name, "current") == 0)
-		profile = aa_get_newest_profile(ctx->profile);
+		label = aa_get_newest_label(ctx->label);
 	else if (strcmp(name, "prev") == 0  && ctx->previous)
-		profile = aa_get_newest_profile(ctx->previous);
+		label = aa_get_newest_label(ctx->previous);
 	else if (strcmp(name, "exec") == 0 && ctx->onexec)
-		profile = aa_get_newest_profile(ctx->onexec);
+		label = aa_get_newest_label(ctx->onexec);
 	else
 		error = -EINVAL;
 
-	if (profile)
-		error = aa_getprocattr(profile, value);
+	if (label)
+		error = aa_getprocattr(label, value);
 
-	aa_put_profile(profile);
+	aa_put_label(label);
 	put_cred(cred);
 
 	return error;
@@ -539,22 +572,24 @@ static int apparmor_setprocattr(const char *name, void *value,
 	if (strcmp(name, "current") == 0) {
 		if (strcmp(command, "changehat") == 0) {
 			error = aa_setprocattr_changehat(args, arg_size,
-							 !AA_DO_TEST);
+							 AA_CHANGE_NOFLAGS);
 		} else if (strcmp(command, "permhat") == 0) {
 			error = aa_setprocattr_changehat(args, arg_size,
-							 AA_DO_TEST);
+							 AA_CHANGE_TEST);
 		} else if (strcmp(command, "changeprofile") == 0) {
-			error = aa_change_profile(args, !AA_ONEXEC,
-						  !AA_DO_TEST, false);
+			error = aa_change_profile(args, AA_CHANGE_NOFLAGS);
 		} else if (strcmp(command, "permprofile") == 0) {
-			error = aa_change_profile(args, !AA_ONEXEC, AA_DO_TEST,
-						  false);
+			error = aa_change_profile(args, AA_CHANGE_TEST);
+		} else if (strcmp(command, "stack") == 0) {
+			error = aa_change_profile(args, AA_CHANGE_STACK);
 		} else
 			goto fail;
 	} else if (strcmp(name, "exec") == 0) {
 		if (strcmp(command, "exec") == 0)
-			error = aa_change_profile(args, AA_ONEXEC, !AA_DO_TEST,
-						  false);
+			error = aa_change_profile(args, AA_CHANGE_ONEXEC);
+		else if (strcmp(command, "stack") == 0)
+			error = aa_change_profile(args, (AA_CHANGE_ONEXEC |
+							 AA_CHANGE_STACK));
 		else
 			goto fail;
 	} else
@@ -568,21 +603,55 @@ static int apparmor_setprocattr(const char *name, void *value,
 	return error;
 
 fail:
-	aad(&sa)->profile = aa_current_profile();
+	aad(&sa)->label = begin_current_label_crit_section();
 	aad(&sa)->info = name;
 	aad(&sa)->error = error = -EINVAL;
 	aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
+	end_current_label_crit_section(aad(&sa)->label);
 	goto out;
 }
 
+/**
+ * apparmor_bprm_committing_creds - do task cleanup on committing new creds
+ * @bprm: binprm for the exec  (NOT NULL)
+ */
+static void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
+{
+	struct aa_label *label = aa_current_raw_label();
+	struct aa_task_ctx *new_ctx = cred_ctx(bprm->cred);
+
+	/* bail out if unconfined or not changing profile */
+	if ((new_ctx->label->proxy == label->proxy) ||
+	    (unconfined(new_ctx->label)))
+		return;
+
+	aa_inherit_files(bprm->cred, current->files);
+
+	current->pdeath_signal = 0;
+
+	/* reset soft limits and set hard limits for the new label */
+	__aa_transition_rlimits(label, new_ctx->label);
+}
+
+/**
+ * apparmor_bprm_committed_cred - do cleanup after new creds committed
+ * @bprm: binprm for the exec  (NOT NULL)
+ */
+static void apparmor_bprm_committed_creds(struct linux_binprm *bprm)
+{
+	/* TODO: cleanup signals - ipc mediation */
+	return;
+}
+
 static int apparmor_task_setrlimit(struct task_struct *task,
 		unsigned int resource, struct rlimit *new_rlim)
 {
-	struct aa_profile *profile = __aa_current_profile();
+	struct aa_label *label = __begin_current_label_crit_section();
 	int error = 0;
 
-	if (!unconfined(profile))
-		error = aa_task_setrlimit(profile, task, resource, new_rlim);
+	if (!unconfined(label))
+		error = aa_task_setrlimit(label, task, resource, new_rlim);
+	__end_current_label_crit_section(label);
 
 	return error;
 }
@@ -606,6 +675,7 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
 	LSM_HOOK_INIT(inode_getattr, apparmor_inode_getattr),
 
 	LSM_HOOK_INIT(file_open, apparmor_file_open),
+	LSM_HOOK_INIT(file_receive, apparmor_file_receive),
 	LSM_HOOK_INIT(file_permission, apparmor_file_permission),
 	LSM_HOOK_INIT(file_alloc_security, apparmor_file_alloc_security),
 	LSM_HOOK_INIT(file_free_security, apparmor_file_free_security),
@@ -774,11 +844,18 @@ static int param_get_aabool(char *buffer, const struct kernel_param *kp)
 
 static int param_set_aauint(const char *val, const struct kernel_param *kp)
 {
+	int error;
+
 	if (!apparmor_enabled)
 		return -EINVAL;
-	if (apparmor_initialized && !policy_admin_capable(NULL))
+	/* file is ro but enforce 2nd line check */
+	if (apparmor_initialized)
 		return -EPERM;
-	return param_set_uint(val, kp);
+
+	error = param_set_uint(val, kp);
+	pr_info("AppArmor: buffer size set to %d bytes\n", aa_g_path_max);
+
+	return error;
 }
 
 static int param_get_aauint(char *buffer, const struct kernel_param *kp)
@@ -869,7 +946,7 @@ static int __init set_init_ctx(void)
 	if (!ctx)
 		return -ENOMEM;
 
-	ctx->profile = aa_get_profile(root_ns->unconfined);
+	ctx->label = aa_get_label(ns_unconfined(root_ns));
 	cred_ctx(cred) = ctx;
 
 	return 0;
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 960c913..72c6043 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -226,7 +226,7 @@ void aa_dfa_free_kref(struct kref *kref)
  * @flags: flags controlling what type of accept tables are acceptable
  *
  * Unpack a dfa that has been serialized.  To find information on the dfa
- * format look in Documentation/security/apparmor.txt
+ * format look in Documentation/admin-guide/LSM/apparmor.rst
  * Assumes the dfa @blob stream has been aligned on a 8 byte boundary
  *
  * Returns: an unpacked dfa ready for matching or ERR_PTR on failure
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index a8fc7d0..9d5de1d 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -50,7 +50,7 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
  *     namespace root.
  */
 static int disconnect(const struct path *path, char *buf, char **name,
-		      int flags)
+		      int flags, const char *disconnected)
 {
 	int error = 0;
 
@@ -63,9 +63,14 @@ static int disconnect(const struct path *path, char *buf, char **name,
 		error = -EACCES;
 		if (**name == '/')
 			*name = *name + 1;
-	} else if (**name != '/')
-		/* CONNECT_PATH with missing root */
-		error = prepend(name, *name - buf, "/", 1);
+	} else {
+		if (**name != '/')
+			/* CONNECT_PATH with missing root */
+			error = prepend(name, *name - buf, "/", 1);
+		if (!error && disconnected)
+			error = prepend(name, *name - buf, disconnected,
+					strlen(disconnected));
+	}
 
 	return error;
 }
@@ -74,9 +79,9 @@ static int disconnect(const struct path *path, char *buf, char **name,
  * d_namespace_path - lookup a name associated with a given path
  * @path: path to lookup  (NOT NULL)
  * @buf:  buffer to store path to  (NOT NULL)
- * @buflen: length of @buf
  * @name: Returns - pointer for start of path name with in @buf (NOT NULL)
  * @flags: flags controlling path lookup
+ * @disconnected: string to prefix to disconnected paths
  *
  * Handle path name lookup.
  *
@@ -84,12 +89,14 @@ static int disconnect(const struct path *path, char *buf, char **name,
  *          When no error the path name is returned in @name which points to
  *          to a position in @buf
  */
-static int d_namespace_path(const struct path *path, char *buf, int buflen,
-			    char **name, int flags)
+static int d_namespace_path(const struct path *path, char *buf, char **name,
+			    int flags, const char *disconnected)
 {
 	char *res;
 	int error = 0;
 	int connected = 1;
+	int isdir = (flags & PATH_IS_DIR) ? 1 : 0;
+	int buflen = aa_g_path_max - isdir;
 
 	if (path->mnt->mnt_flags & MNT_INTERNAL) {
 		/* it's not mounted anywhere */
@@ -104,10 +111,12 @@ static int d_namespace_path(const struct path *path, char *buf, int buflen,
 			/* TODO: convert over to using a per namespace
 			 * control instead of hard coded /proc
 			 */
-			return prepend(name, *name - buf, "/proc", 5);
+			error = prepend(name, *name - buf, "/proc", 5);
+			goto out;
 		} else
-			return disconnect(path, buf, name, flags);
-		return 0;
+			error = disconnect(path, buf, name, flags,
+					   disconnected);
+		goto out;
 	}
 
 	/* resolve paths relative to chroot?*/
@@ -126,8 +135,11 @@ static int d_namespace_path(const struct path *path, char *buf, int buflen,
 	 * be returned.
 	 */
 	if (!res || IS_ERR(res)) {
-		if (PTR_ERR(res) == -ENAMETOOLONG)
-			return -ENAMETOOLONG;
+		if (PTR_ERR(res) == -ENAMETOOLONG) {
+			error = -ENAMETOOLONG;
+			*name = buf;
+			goto out;
+		}
 		connected = 0;
 		res = dentry_path_raw(path->dentry, buf, buflen);
 		if (IS_ERR(res)) {
@@ -140,6 +152,9 @@ static int d_namespace_path(const struct path *path, char *buf, int buflen,
 
 	*name = res;
 
+	if (!connected)
+		error = disconnect(path, buf, name, flags, disconnected);
+
 	/* Handle two cases:
 	 * 1. A deleted dentry && profile is not allowing mediation of deleted
 	 * 2. On some filesystems, newly allocated dentries appear to the
@@ -147,62 +162,30 @@ static int d_namespace_path(const struct path *path, char *buf, int buflen,
 	 *    allocated.
 	 */
 	if (d_unlinked(path->dentry) && d_is_positive(path->dentry) &&
-	    !(flags & PATH_MEDIATE_DELETED)) {
+	    !(flags & (PATH_MEDIATE_DELETED | PATH_DELEGATE_DELETED))) {
 			error = -ENOENT;
 			goto out;
 	}
 
-	if (!connected)
-		error = disconnect(path, buf, name, flags);
-
 out:
-	return error;
-}
-
-/**
- * get_name_to_buffer - get the pathname to a buffer ensure dir / is appended
- * @path: path to get name for  (NOT NULL)
- * @flags: flags controlling path lookup
- * @buffer: buffer to put name in  (NOT NULL)
- * @size: size of buffer
- * @name: Returns - contains position of path name in @buffer (NOT NULL)
- *
- * Returns: %0 else error on failure
- */
-static int get_name_to_buffer(const struct path *path, int flags, char *buffer,
-			      int size, char **name, const char **info)
-{
-	int adjust = (flags & PATH_IS_DIR) ? 1 : 0;
-	int error = d_namespace_path(path, buffer, size - adjust, name, flags);
-
-	if (!error && (flags & PATH_IS_DIR) && (*name)[1] != '\0')
-		/*
-		 * Append "/" to the pathname.  The root directory is a special
-		 * case; it already ends in slash.
-		 */
-		strcpy(&buffer[size - 2], "/");
-
-	if (info && error) {
-		if (error == -ENOENT)
-			*info = "Failed name lookup - deleted entry";
-		else if (error == -EACCES)
-			*info = "Failed name lookup - disconnected path";
-		else if (error == -ENAMETOOLONG)
-			*info = "Failed name lookup - name too long";
-		else
-			*info = "Failed name lookup";
-	}
+	/*
+	 * Append "/" to the pathname.  The root directory is a special
+	 * case; it already ends in slash.
+	 */
+	if (!error && isdir && ((*name)[1] != '\0' || (*name)[0] != '/'))
+		strcpy(&buf[aa_g_path_max - 2], "/");
 
 	return error;
 }
 
 /**
- * aa_path_name - compute the pathname of a file
+ * aa_path_name - get the pathname to a buffer ensure dir / is appended
  * @path: path the file  (NOT NULL)
  * @flags: flags controlling path name generation
- * @buffer: buffer that aa_get_name() allocated  (NOT NULL)
+ * @buffer: buffer to put name in (NOT NULL)
  * @name: Returns - the generated path name if !error (NOT NULL)
  * @info: Returns - information on why the path lookup failed (MAYBE NULL)
+ * @disconnected: string to prepend to disconnected paths
  *
  * @name is a pointer to the beginning of the pathname (which usually differs
  * from the beginning of the buffer), or NULL.  If there is an error @name
@@ -215,32 +198,23 @@ static int get_name_to_buffer(const struct path *path, int flags, char *buffer,
  *
  * Returns: %0 else error code if could retrieve name
  */
-int aa_path_name(const struct path *path, int flags, char **buffer,
-		 const char **name, const char **info)
+int aa_path_name(const struct path *path, int flags, char *buffer,
+		 const char **name, const char **info, const char *disconnected)
 {
-	char *buf, *str = NULL;
-	int size = 256;
-	int error;
+	char *str = NULL;
+	int error = d_namespace_path(path, buffer, &str, flags, disconnected);
 
-	*name = NULL;
-	*buffer = NULL;
-	for (;;) {
-		/* freed by caller */
-		buf = kmalloc(size, GFP_KERNEL);
-		if (!buf)
-			return -ENOMEM;
-
-		error = get_name_to_buffer(path, flags, buf, size, &str, info);
-		if (error != -ENAMETOOLONG)
-			break;
-
-		kfree(buf);
-		size <<= 1;
-		if (size > aa_g_path_max)
-			return -ENAMETOOLONG;
-		*info = NULL;
+	if (info && error) {
+		if (error == -ENOENT)
+			*info = "Failed name lookup - deleted entry";
+		else if (error == -EACCES)
+			*info = "Failed name lookup - disconnected path";
+		else if (error == -ENAMETOOLONG)
+			*info = "Failed name lookup - name too long";
+		else
+			*info = "Failed name lookup";
 	}
-	*buffer = buf;
+
 	*name = str;
 
 	return error;
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index cf9d670..244ea4a 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -101,20 +101,9 @@ const char *const aa_profile_mode_names[] = {
 	"unconfined",
 };
 
-/* requires profile list write lock held */
-void __aa_update_proxy(struct aa_profile *orig, struct aa_profile *new)
-{
-	struct aa_profile *tmp;
-
-	tmp = rcu_dereference_protected(orig->proxy->profile,
-					mutex_is_locked(&orig->ns->lock));
-	rcu_assign_pointer(orig->proxy->profile, aa_get_profile(new));
-	orig->flags |= PFLAG_STALE;
-	aa_put_profile(tmp);
-}
 
 /**
- * __list_add_profile - add a profile to a list
+ * __add_profile - add a profiles to list and label tree
  * @list: list to add it to  (NOT NULL)
  * @profile: the profile to add  (NOT NULL)
  *
@@ -122,12 +111,21 @@ void __aa_update_proxy(struct aa_profile *orig, struct aa_profile *new)
  *
  * Requires: namespace lock be held, or list not be shared
  */
-static void __list_add_profile(struct list_head *list,
-			       struct aa_profile *profile)
+static void __add_profile(struct list_head *list, struct aa_profile *profile)
 {
+	struct aa_label *l;
+
+	AA_BUG(!list);
+	AA_BUG(!profile);
+	AA_BUG(!profile->ns);
+	AA_BUG(!mutex_is_locked(&profile->ns->lock));
+
 	list_add_rcu(&profile->base.list, list);
 	/* get list reference */
 	aa_get_profile(profile);
+	l = aa_label_insert(&profile->ns->labels, &profile->label);
+	AA_BUG(l != &profile->label);
+	aa_put_label(l);
 }
 
 /**
@@ -144,6 +142,10 @@ static void __list_add_profile(struct list_head *list,
  */
 static void __list_remove_profile(struct aa_profile *profile)
 {
+	AA_BUG(!profile);
+	AA_BUG(!profile->ns);
+	AA_BUG(!mutex_is_locked(&profile->ns->lock));
+
 	list_del_rcu(&profile->base.list);
 	aa_put_profile(profile);
 }
@@ -156,11 +158,15 @@ static void __list_remove_profile(struct aa_profile *profile)
  */
 static void __remove_profile(struct aa_profile *profile)
 {
+	AA_BUG(!profile);
+	AA_BUG(!profile->ns);
+	AA_BUG(!mutex_is_locked(&profile->ns->lock));
+
 	/* release any children lists first */
 	__aa_profile_list_release(&profile->base.profiles);
 	/* released by free_profile */
-	__aa_update_proxy(profile, profile->ns->unconfined);
-	__aa_fs_profile_rmdir(profile);
+	aa_label_remove(&profile->label);
+	__aafs_profile_rmdir(profile);
 	__list_remove_profile(profile);
 }
 
@@ -177,24 +183,6 @@ void __aa_profile_list_release(struct list_head *head)
 		__remove_profile(profile);
 }
 
-
-static void free_proxy(struct aa_proxy *p)
-{
-	if (p) {
-		/* r->profile will not be updated any more as r is dead */
-		aa_put_profile(rcu_dereference_protected(p->profile, true));
-		kzfree(p);
-	}
-}
-
-
-void aa_free_proxy_kref(struct kref *kref)
-{
-	struct aa_proxy *p = container_of(kref, struct aa_proxy, count);
-
-	free_proxy(p);
-}
-
 /**
  * aa_free_data - free a data blob
  * @ptr: data to free
@@ -242,7 +230,6 @@ void aa_free_profile(struct aa_profile *profile)
 	kzfree(profile->dirname);
 	aa_put_dfa(profile->xmatch);
 	aa_put_dfa(profile->policy.dfa);
-	aa_put_proxy(profile->proxy);
 
 	if (profile->data) {
 		rht = profile->data;
@@ -253,63 +240,51 @@ void aa_free_profile(struct aa_profile *profile)
 
 	kzfree(profile->hash);
 	aa_put_loaddata(profile->rawdata);
+
 	kzfree(profile);
 }
 
 /**
- * aa_free_profile_rcu - free aa_profile by rcu (called by aa_free_profile_kref)
- * @head: rcu_head callback for freeing of a profile  (NOT NULL)
- */
-static void aa_free_profile_rcu(struct rcu_head *head)
-{
-	struct aa_profile *p = container_of(head, struct aa_profile, rcu);
-	if (p->flags & PFLAG_NS_COUNT)
-		aa_free_ns(p->ns);
-	else
-		aa_free_profile(p);
-}
-
-/**
- * aa_free_profile_kref - free aa_profile by kref (called by aa_put_profile)
- * @kr: kref callback for freeing of a profile  (NOT NULL)
- */
-void aa_free_profile_kref(struct kref *kref)
-{
-	struct aa_profile *p = container_of(kref, struct aa_profile, count);
-	call_rcu(&p->rcu, aa_free_profile_rcu);
-}
-
-/**
  * aa_alloc_profile - allocate, initialize and return a new profile
  * @hname: name of the profile  (NOT NULL)
  * @gfp: allocation type
  *
  * Returns: refcount profile or NULL on failure
  */
-struct aa_profile *aa_alloc_profile(const char *hname, gfp_t gfp)
+struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy,
+				    gfp_t gfp)
 {
 	struct aa_profile *profile;
 
 	/* freed by free_profile - usually through aa_put_profile */
-	profile = kzalloc(sizeof(*profile), gfp);
+	profile = kzalloc(sizeof(*profile) + sizeof(struct aa_profile *) * 2,
+			  gfp);
 	if (!profile)
 		return NULL;
 
-	profile->proxy = kzalloc(sizeof(struct aa_proxy), gfp);
-	if (!profile->proxy)
-		goto fail;
-	kref_init(&profile->proxy->count);
-
 	if (!aa_policy_init(&profile->base, NULL, hname, gfp))
 		goto fail;
-	kref_init(&profile->count);
+	if (!aa_label_init(&profile->label, 1))
+		goto fail;
+
+	/* update being set needed by fs interface */
+	if (!proxy) {
+		proxy = aa_alloc_proxy(&profile->label, gfp);
+		if (!proxy)
+			goto fail;
+	} else
+		aa_get_proxy(proxy);
+	profile->label.proxy = proxy;
+
+	profile->label.hname = profile->base.hname;
+	profile->label.flags |= FLAG_PROFILE;
+	profile->label.vec[0] = profile;
 
 	/* refcount released by caller */
 	return profile;
 
 fail:
-	kzfree(profile->proxy);
-	kzfree(profile);
+	aa_free_profile(profile);
 
 	return NULL;
 }
@@ -362,14 +337,14 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
 	if (profile)
 		goto out;
 
-	profile = aa_alloc_profile(name, gfp);
+	profile = aa_alloc_profile(name, NULL, gfp);
 	if (!profile)
 		goto fail;
 
 	profile->mode = APPARMOR_COMPLAIN;
-	profile->flags |= PFLAG_NULL;
+	profile->label.flags |= FLAG_NULL;
 	if (hat)
-		profile->flags |= PFLAG_HAT;
+		profile->label.flags |= FLAG_HAT;
 	profile->path_flags = parent->path_flags;
 
 	/* released on free_profile */
@@ -379,7 +354,7 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
 	profile->policy.dfa = aa_get_dfa(nulldfa);
 
 	mutex_lock(&profile->ns->lock);
-	__list_add_profile(&parent->base.profiles, profile);
+	__add_profile(&parent->base.profiles, profile);
 	mutex_unlock(&profile->ns->lock);
 
 	/* refcount released by caller */
@@ -389,7 +364,6 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
 	return profile;
 
 fail:
-	kfree(name);
 	aa_free_profile(profile);
 	return NULL;
 }
@@ -397,20 +371,6 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat,
 /* TODO: profile accounting - setup in remove */
 
 /**
- * __find_child - find a profile on @head list with a name matching @name
- * @head: list to search  (NOT NULL)
- * @name: name of profile (NOT NULL)
- *
- * Requires: rcu_read_lock be held
- *
- * Returns: unrefcounted profile ptr, or NULL if not found
- */
-static struct aa_profile *__find_child(struct list_head *head, const char *name)
-{
-	return (struct aa_profile *)__policy_find(head, name);
-}
-
-/**
  * __strn_find_child - find a profile on @head list using substring of @name
  * @head: list to search  (NOT NULL)
  * @name: name of profile (NOT NULL)
@@ -427,6 +387,20 @@ static struct aa_profile *__strn_find_child(struct list_head *head,
 }
 
 /**
+ * __find_child - find a profile on @head list with a name matching @name
+ * @head: list to search  (NOT NULL)
+ * @name: name of profile (NOT NULL)
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile ptr, or NULL if not found
+ */
+static struct aa_profile *__find_child(struct list_head *head, const char *name)
+{
+	return __strn_find_child(head, name, strlen(name));
+}
+
+/**
  * aa_find_child - find a profile by @name in @parent
  * @parent: profile to search  (NOT NULL)
  * @name: profile name to search for  (NOT NULL)
@@ -556,7 +530,7 @@ struct aa_profile *aa_lookup_profile(struct aa_ns *ns, const char *hname)
 	return aa_lookupn_profile(ns, hname, strlen(hname));
 }
 
-struct aa_profile *aa_fqlookupn_profile(struct aa_profile *base,
+struct aa_profile *aa_fqlookupn_profile(struct aa_label *base,
 					const char *fqname, size_t n)
 {
 	struct aa_profile *profile;
@@ -566,11 +540,11 @@ struct aa_profile *aa_fqlookupn_profile(struct aa_profile *base,
 
 	name = aa_splitn_fqname(fqname, n, &ns_name, &ns_len);
 	if (ns_name) {
-		ns = aa_findn_ns(base->ns, ns_name, ns_len);
+		ns = aa_lookupn_ns(labels_ns(base), ns_name, ns_len);
 		if (!ns)
 			return NULL;
 	} else
-		ns = aa_get_ns(base->ns);
+		ns = aa_get_ns(labels_ns(base));
 
 	if (name)
 		profile = aa_lookupn_profile(ns, name, n - (name - fqname));
@@ -596,7 +570,7 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
 			       const char **info)
 {
 	if (profile) {
-		if (profile->flags & PFLAG_IMMUTABLE) {
+		if (profile->label.flags & FLAG_IMMUTIBLE) {
 			*info = "cannot replace immutible profile";
 			return -EPERM;
 		} else if (noreplace) {
@@ -619,29 +593,31 @@ static void audit_cb(struct audit_buffer *ab, void *va)
 }
 
 /**
- * aa_audit_policy - Do auditing of policy changes
- * @profile: profile to check if it can manage policy
+ * audit_policy - Do auditing of policy changes
+ * @label: label to check if it can manage policy
  * @op: policy operation being performed
- * @gfp: memory allocation flags
- * @nsname: name of the ns being manipulated (MAY BE NULL)
+ * @ns_name: name of namespace being manipulated
  * @name: name of profile being manipulated (NOT NULL)
  * @info: any extra information to be audited (MAYBE NULL)
  * @error: error code
  *
  * Returns: the error to be returned after audit is done
  */
-static int audit_policy(struct aa_profile *profile, const char *op,
-			const char *nsname, const char *name,
+static int audit_policy(struct aa_label *label, const char *op,
+			const char *ns_name, const char *name,
 			const char *info, int error)
 {
 	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, op);
 
-	aad(&sa)->iface.ns = nsname;
+	aad(&sa)->iface.ns = ns_name;
 	aad(&sa)->name = name;
 	aad(&sa)->info = info;
 	aad(&sa)->error = error;
+	aad(&sa)->label = label;
 
-	return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
+	aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, audit_cb);
+
+	return error;
 }
 
 /**
@@ -685,22 +661,30 @@ bool policy_admin_capable(struct aa_ns *ns)
 
 /**
  * aa_may_manage_policy - can the current task manage policy
- * @profile: profile to check if it can manage policy
+ * @label: label to check if it can manage policy
  * @op: the policy manipulation operation being done
  *
  * Returns: 0 if the task is allowed to manipulate policy else error
  */
-int aa_may_manage_policy(struct aa_profile *profile, struct aa_ns *ns,
-			 const char *op)
+int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
 {
+	const char *op;
+
+	if (mask & AA_MAY_REMOVE_POLICY)
+		op = OP_PROF_RM;
+	else if (mask & AA_MAY_REPLACE_POLICY)
+		op = OP_PROF_REPL;
+	else
+		op = OP_PROF_LOAD;
+
 	/* check if loading policy is locked out */
 	if (aa_g_lock_policy)
-		return audit_policy(profile, op, NULL, NULL,
-			     "policy_locked", -EACCES);
+		return audit_policy(label, op, NULL, NULL, "policy_locked",
+				    -EACCES);
 
 	if (!policy_admin_capable(ns))
-		return audit_policy(profile, op, NULL, NULL,
-				    "not policy admin", -EACCES);
+		return audit_policy(label, op, NULL, NULL, "not policy admin",
+				    -EACCES);
 
 	/* TODO: add fine grained mediation of policy loads */
 	return 0;
@@ -742,8 +726,7 @@ static struct aa_profile *__list_lookup_parent(struct list_head *lh,
  *
  * Requires: namespace list lock be held, or list not be shared
  */
-static void __replace_profile(struct aa_profile *old, struct aa_profile *new,
-			      bool share_proxy)
+static void __replace_profile(struct aa_profile *old, struct aa_profile *new)
 {
 	struct aa_profile *child, *tmp;
 
@@ -758,7 +741,7 @@ static void __replace_profile(struct aa_profile *old, struct aa_profile *new,
 			p = __find_child(&new->base.profiles, child->base.name);
 			if (p) {
 				/* @p replaces @child  */
-				__replace_profile(child, p, share_proxy);
+				__replace_profile(child, p);
 				continue;
 			}
 
@@ -776,15 +759,9 @@ static void __replace_profile(struct aa_profile *old, struct aa_profile *new,
 		struct aa_profile *parent = aa_deref_parent(old);
 		rcu_assign_pointer(new->parent, aa_get_profile(parent));
 	}
-	__aa_update_proxy(old, new);
-	if (share_proxy) {
-		aa_put_proxy(new->proxy);
-		new->proxy = aa_get_proxy(old->proxy);
-	} else if (!rcu_access_pointer(new->proxy->profile))
-		/* aafs interface uses proxy */
-		rcu_assign_pointer(new->proxy->profile,
-				   aa_get_profile(new));
-	__aa_fs_profile_migrate_dents(old, new);
+	aa_label_replace(&old->label, &new->label);
+	/* migrate dents must come after label replacement b/c update */
+	__aafs_profile_migrate_dents(old, new);
 
 	if (list_empty(&new->base.list)) {
 		/* new is not on a list already */
@@ -821,11 +798,41 @@ static int __lookup_replace(struct aa_ns *ns, const char *hname,
 	return 0;
 }
 
+static void share_name(struct aa_profile *old, struct aa_profile *new)
+{
+	aa_put_str(new->base.hname);
+	aa_get_str(old->base.hname);
+	new->base.hname = old->base.hname;
+	new->base.name = old->base.name;
+	new->label.hname = old->label.hname;
+}
+
+/* Update to newest version of parent after previous replacements
+ * Returns: unrefcount newest version of parent
+ */
+static struct aa_profile *update_to_newest_parent(struct aa_profile *new)
+{
+	struct aa_profile *parent, *newest;
+
+	parent = rcu_dereference_protected(new->parent,
+					   mutex_is_locked(&new->ns->lock));
+	newest = aa_get_newest_profile(parent);
+
+	/* parent replaced in this atomic set? */
+	if (newest != parent) {
+		aa_put_profile(parent);
+		rcu_assign_pointer(new->parent, newest);
+	} else
+		aa_put_profile(newest);
+
+	return newest;
+}
+
 /**
  * aa_replace_profiles - replace profile(s) on the profile list
- * @view: namespace load is viewed from
+ * @policy_ns: namespace load is occurring on
  * @label: label that is attempting to load/replace policy
- * @noreplace: true if only doing addition, no replacement allowed
+ * @mask: permission mask
  * @udata: serialized data stream  (NOT NULL)
  *
  * unpack and replace a profile on the profile list and uses of that profile
@@ -834,16 +841,19 @@ static int __lookup_replace(struct aa_ns *ns, const char *hname,
  *
  * Returns: size of data consumed else error code on failure.
  */
-ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
-			    bool noreplace, struct aa_loaddata *udata)
+ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
+			    u32 mask, struct aa_loaddata *udata)
 {
 	const char *ns_name, *info = NULL;
 	struct aa_ns *ns = NULL;
 	struct aa_load_ent *ent, *tmp;
-	const char *op = OP_PROF_REPL;
+	struct aa_loaddata *rawdata_ent;
+	const char *op;
 	ssize_t count, error;
 	LIST_HEAD(lh);
 
+	op = mask & AA_MAY_REPLACE_POLICY ? OP_PROF_REPL : OP_PROF_LOAD;
+	aa_get_loaddata(udata);
 	/* released below */
 	error = aa_unpack(udata, &lh, &ns_name);
 	if (error)
@@ -874,7 +884,8 @@ ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
 			count++;
 	}
 	if (ns_name) {
-		ns = aa_prepare_ns(view, ns_name);
+		ns = aa_prepare_ns(policy_ns ? policy_ns : labels_ns(label),
+				   ns_name);
 		if (IS_ERR(ns)) {
 			op = OP_PROF_LOAD;
 			info = "failed to prepare namespace";
@@ -884,22 +895,38 @@ ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
 			goto fail;
 		}
 	} else
-		ns = aa_get_ns(view);
+		ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(label));
 
 	mutex_lock(&ns->lock);
+	/* check for duplicate rawdata blobs: space and file dedup */
+	list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) {
+		if (aa_rawdata_eq(rawdata_ent, udata)) {
+			struct aa_loaddata *tmp;
+
+			tmp = __aa_get_loaddata(rawdata_ent);
+			/* check we didn't fail the race */
+			if (tmp) {
+				aa_put_loaddata(udata);
+				udata = tmp;
+				break;
+			}
+		}
+	}
 	/* setup parent and ns info */
 	list_for_each_entry(ent, &lh, list) {
 		struct aa_policy *policy;
+
 		ent->new->rawdata = aa_get_loaddata(udata);
-		error = __lookup_replace(ns, ent->new->base.hname, noreplace,
+		error = __lookup_replace(ns, ent->new->base.hname,
+					 !(mask & AA_MAY_REPLACE_POLICY),
 					 &ent->old, &info);
 		if (error)
 			goto fail_lock;
 
 		if (ent->new->rename) {
 			error = __lookup_replace(ns, ent->new->rename,
-						 noreplace, &ent->rename,
-						 &info);
+						!(mask & AA_MAY_REPLACE_POLICY),
+						&ent->rename, &info);
 			if (error)
 				goto fail_lock;
 		}
@@ -929,15 +956,16 @@ ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
 	}
 
 	/* create new fs entries for introspection if needed */
+	if (!udata->dents[AAFS_LOADDATA_DIR]) {
+		error = __aa_fs_create_rawdata(ns, udata);
+		if (error) {
+			info = "failed to create raw_data dir and files";
+			ent = NULL;
+			goto fail_lock;
+		}
+	}
 	list_for_each_entry(ent, &lh, list) {
-		if (ent->old) {
-			/* inherit old interface files */
-
-			/* if (ent->rename)
-				TODO: support rename */
-		/* } else if (ent->rename) {
-			TODO: support rename */
-		} else {
+		if (!ent->old) {
 			struct dentry *parent;
 			if (rcu_access_pointer(ent->new->parent)) {
 				struct aa_profile *p;
@@ -945,65 +973,61 @@ ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
 				parent = prof_child_dir(p);
 			} else
 				parent = ns_subprofs_dir(ent->new->ns);
-			error = __aa_fs_profile_mkdir(ent->new, parent);
+			error = __aafs_profile_mkdir(ent->new, parent);
 		}
 
 		if (error) {
-			info = "failed to create ";
+			info = "failed to create";
 			goto fail_lock;
 		}
 	}
 
 	/* Done with checks that may fail - do actual replacement */
+	__aa_bump_ns_revision(ns);
+	__aa_loaddata_update(udata, ns->revision);
 	list_for_each_entry_safe(ent, tmp, &lh, list) {
 		list_del_init(&ent->list);
 		op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
 
-		audit_policy(profile, op, NULL, ent->new->base.hname,
-			     NULL, error);
+		if (ent->old && ent->old->rawdata == ent->new->rawdata) {
+			/* dedup actual profile replacement */
+			audit_policy(label, op, ns_name, ent->new->base.hname,
+				     "same as current profile, skipping",
+				     error);
+			goto skip;
+		}
+
+		/*
+		 * TODO: finer dedup based on profile range in data. Load set
+		 * can differ but profile may remain unchanged
+		 */
+		audit_policy(label, op, ns_name, ent->new->base.hname, NULL,
+			     error);
 
 		if (ent->old) {
-			__replace_profile(ent->old, ent->new, 1);
-			if (ent->rename) {
-				/* aafs interface uses proxy */
-				struct aa_proxy *r = ent->new->proxy;
-				rcu_assign_pointer(r->profile,
-						   aa_get_profile(ent->new));
-				__replace_profile(ent->rename, ent->new, 0);
-			}
-		} else if (ent->rename) {
-			/* aafs interface uses proxy */
-			rcu_assign_pointer(ent->new->proxy->profile,
-					   aa_get_profile(ent->new));
-			__replace_profile(ent->rename, ent->new, 0);
-		} else if (ent->new->parent) {
-			struct aa_profile *parent, *newest;
-			parent = aa_deref_parent(ent->new);
-			newest = aa_get_newest_profile(parent);
-
-			/* parent replaced in this atomic set? */
-			if (newest != parent) {
-				aa_get_profile(newest);
-				rcu_assign_pointer(ent->new->parent, newest);
-				aa_put_profile(parent);
-			}
-			/* aafs interface uses proxy */
-			rcu_assign_pointer(ent->new->proxy->profile,
-					   aa_get_profile(ent->new));
-			__list_add_profile(&newest->base.profiles, ent->new);
-			aa_put_profile(newest);
+			share_name(ent->old, ent->new);
+			__replace_profile(ent->old, ent->new);
 		} else {
-			/* aafs interface uses proxy */
-			rcu_assign_pointer(ent->new->proxy->profile,
-					   aa_get_profile(ent->new));
-			__list_add_profile(&ns->base.profiles, ent->new);
+			struct list_head *lh;
+
+			if (rcu_access_pointer(ent->new->parent)) {
+				struct aa_profile *parent;
+
+				parent = update_to_newest_parent(ent->new);
+				lh = &parent->base.profiles;
+			} else
+				lh = &ns->base.profiles;
+			__add_profile(lh, ent->new);
 		}
+	skip:
 		aa_load_ent_free(ent);
 	}
+	__aa_labelset_update_subtree(ns);
 	mutex_unlock(&ns->lock);
 
 out:
 	aa_put_ns(ns);
+	aa_put_loaddata(udata);
 
 	if (error)
 		return error;
@@ -1013,10 +1037,10 @@ ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
 	mutex_unlock(&ns->lock);
 
 	/* audit cause of failure */
-	op = (!ent->old) ? OP_PROF_LOAD : OP_PROF_REPL;
+	op = (ent && !ent->old) ? OP_PROF_LOAD : OP_PROF_REPL;
 fail:
-	audit_policy(profile, op, ns_name, ent ? ent->new->base.hname : NULL,
-		     info, error);
+	  audit_policy(label, op, ns_name, ent ? ent->new->base.hname : NULL,
+		       info, error);
 	/* audit status that rest of profiles in the atomic set failed too */
 	info = "valid profile in failed atomic policy load";
 	list_for_each_entry(tmp, &lh, list) {
@@ -1026,8 +1050,8 @@ ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
 			continue;
 		}
 		op = (!tmp->old) ? OP_PROF_LOAD : OP_PROF_REPL;
-		audit_policy(profile, op, ns_name,
-			     tmp->new->base.hname, info, error);
+		audit_policy(label, op, ns_name, tmp->new->base.hname, info,
+			     error);
 	}
 	list_for_each_entry_safe(ent, tmp, &lh, list) {
 		list_del_init(&ent->list);
@@ -1039,8 +1063,8 @@ ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
 
 /**
  * aa_remove_profiles - remove profile(s) from the system
- * @view: namespace the remove is being done from
- * @subj: profile attempting to remove policy
+ * @policy_ns: namespace the remove is being done from
+ * @subj: label attempting to remove policy
  * @fqname: name of the profile or namespace to remove  (NOT NULL)
  * @size: size of the name
  *
@@ -1051,13 +1075,13 @@ ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_profile *profile,
  *
  * Returns: size of data consume else error code if fails
  */
-ssize_t aa_remove_profiles(struct aa_ns *view, struct aa_profile *subj,
+ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
 			   char *fqname, size_t size)
 {
-	struct aa_ns *root = NULL, *ns = NULL;
+	struct aa_ns *ns = NULL;
 	struct aa_profile *profile = NULL;
 	const char *name = fqname, *info = NULL;
-	char *ns_name = NULL;
+	const char *ns_name = NULL;
 	ssize_t error = 0;
 
 	if (*fqname == 0) {
@@ -1066,12 +1090,13 @@ ssize_t aa_remove_profiles(struct aa_ns *view, struct aa_profile *subj,
 		goto fail;
 	}
 
-	root = view;
-
 	if (fqname[0] == ':') {
-		name = aa_split_fqname(fqname, &ns_name);
+		size_t ns_len;
+
+		name = aa_splitn_fqname(fqname, size, &ns_name, &ns_len);
 		/* released below */
-		ns = aa_find_ns(root, ns_name);
+		ns = aa_lookupn_ns(policy_ns ? policy_ns : labels_ns(subj),
+				   ns_name, ns_len);
 		if (!ns) {
 			info = "namespace does not exist";
 			error = -ENOENT;
@@ -1079,12 +1104,13 @@ ssize_t aa_remove_profiles(struct aa_ns *view, struct aa_profile *subj,
 		}
 	} else
 		/* released below */
-		ns = aa_get_ns(root);
+		ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(subj));
 
 	if (!name) {
 		/* remove namespace - can only happen if fqname[0] == ':' */
 		mutex_lock(&ns->parent->lock);
 		__aa_remove_ns(ns);
+		__aa_bump_ns_revision(ns);
 		mutex_unlock(&ns->parent->lock);
 	} else {
 		/* remove profile */
@@ -1097,6 +1123,8 @@ ssize_t aa_remove_profiles(struct aa_ns *view, struct aa_profile *subj,
 		}
 		name = profile->base.hname;
 		__remove_profile(profile);
+		__aa_labelset_update_subtree(ns);
+		__aa_bump_ns_revision(ns);
 		mutex_unlock(&ns->lock);
 	}
 
diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
index 93d1826..351d3ba 100644
--- a/security/apparmor/policy_ns.c
+++ b/security/apparmor/policy_ns.c
@@ -23,6 +23,7 @@
 #include "include/apparmor.h"
 #include "include/context.h"
 #include "include/policy_ns.h"
+#include "include/label.h"
 #include "include/policy.h"
 
 /* root profile namespace */
@@ -99,15 +100,17 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name)
 		goto fail_ns;
 
 	INIT_LIST_HEAD(&ns->sub_ns);
+	INIT_LIST_HEAD(&ns->rawdata_list);
 	mutex_init(&ns->lock);
+	init_waitqueue_head(&ns->wait);
 
 	/* released by aa_free_ns() */
-	ns->unconfined = aa_alloc_profile("unconfined", GFP_KERNEL);
+	ns->unconfined = aa_alloc_profile("unconfined", NULL, GFP_KERNEL);
 	if (!ns->unconfined)
 		goto fail_unconfined;
 
-	ns->unconfined->flags = PFLAG_IX_ON_NAME_ERROR |
-		PFLAG_IMMUTABLE | PFLAG_NS_COUNT;
+	ns->unconfined->label.flags |= FLAG_IX_ON_NAME_ERROR |
+		FLAG_IMMUTIBLE | FLAG_NS_COUNT | FLAG_UNCONFINED;
 	ns->unconfined->mode = APPARMOR_UNCONFINED;
 
 	/* ns and ns->unconfined share ns->unconfined refcount */
@@ -115,6 +118,8 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name)
 
 	atomic_set(&ns->uniq_null, 0);
 
+	aa_labelset_init(&ns->labels);
+
 	return ns;
 
 fail_unconfined:
@@ -137,6 +142,7 @@ void aa_free_ns(struct aa_ns *ns)
 		return;
 
 	aa_policy_destroy(&ns->base);
+	aa_labelset_destroy(&ns->labels);
 	aa_put_ns(ns->parent);
 
 	ns->unconfined->ns = NULL;
@@ -181,6 +187,60 @@ struct aa_ns *aa_find_ns(struct aa_ns *root, const char *name)
 	return aa_findn_ns(root, name, strlen(name));
 }
 
+/**
+ * __aa_lookupn_ns - lookup the namespace matching @hname
+ * @base: base list to start looking up profile name from  (NOT NULL)
+ * @hname: hierarchical ns name  (NOT NULL)
+ * @n: length of @hname
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted ns pointer or NULL if not found
+ *
+ * Do a relative name lookup, recursing through profile tree.
+ */
+struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n)
+{
+	struct aa_ns *ns = view;
+	const char *split;
+
+	for (split = strnstr(hname, "//", n); split;
+	     split = strnstr(hname, "//", n)) {
+		ns = __aa_findn_ns(&ns->sub_ns, hname, split - hname);
+		if (!ns)
+			return NULL;
+
+		n -= split + 2 - hname;
+		hname = split + 2;
+	}
+
+	if (n)
+		return __aa_findn_ns(&ns->sub_ns, hname, n);
+	return NULL;
+}
+
+/**
+ * aa_lookupn_ns  -  look up a policy namespace relative to @view
+ * @view: namespace to search in  (NOT NULL)
+ * @name: name of namespace to find  (NOT NULL)
+ * @n: length of @name
+ *
+ * Returns: a refcounted namespace on the list, or NULL if no namespace
+ *          called @name exists.
+ *
+ * refcount released by caller
+ */
+struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n)
+{
+	struct aa_ns *ns = NULL;
+
+	rcu_read_lock();
+	ns = aa_get_ns(__aa_lookupn_ns(view, name, n));
+	rcu_read_unlock();
+
+	return ns;
+}
+
 static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
 				    struct dentry *dir)
 {
@@ -195,7 +255,7 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
 	if (!ns)
 		return NULL;
 	mutex_lock(&ns->lock);
-	error = __aa_fs_ns_mkdir(ns, ns_subns_dir(parent), name);
+	error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir);
 	if (error) {
 		AA_ERROR("Failed to create interface for ns %s\n",
 			 ns->base.name);
@@ -281,9 +341,15 @@ static void destroy_ns(struct aa_ns *ns)
 	/* release all sub namespaces */
 	__ns_list_release(&ns->sub_ns);
 
-	if (ns->parent)
-		__aa_update_proxy(ns->unconfined, ns->parent->unconfined);
-	__aa_fs_ns_rmdir(ns);
+	if (ns->parent) {
+		unsigned long flags;
+
+		write_lock_irqsave(&ns->labels.lock, flags);
+		__aa_proxy_redirect(ns_unconfined(ns),
+				    ns_unconfined(ns->parent));
+		write_unlock_irqrestore(&ns->labels.lock, flags);
+	}
+	__aafs_ns_rmdir(ns);
 	mutex_unlock(&ns->lock);
 }
 
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index f3422a9..c600f4d 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -13,7 +13,7 @@
  * License.
  *
  * AppArmor uses a serialized binary format for loading policy. To find
- * policy format documentation look in Documentation/security/apparmor.txt
+ * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
  * All policy is validated before it is used.
  */
 
@@ -26,6 +26,7 @@
 #include "include/context.h"
 #include "include/crypto.h"
 #include "include/match.h"
+#include "include/path.h"
 #include "include/policy.h"
 #include "include/policy_unpack.h"
 
@@ -107,7 +108,7 @@ static int audit_iface(struct aa_profile *new, const char *ns_name,
 		       const char *name, const char *info, struct aa_ext *e,
 		       int error)
 {
-	struct aa_profile *profile = __aa_current_profile();
+	struct aa_profile *profile = labels_profile(aa_current_raw_label());
 	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
 	if (e)
 		aad(&sa)->iface.pos = e->pos - e->start;
@@ -122,16 +123,73 @@ static int audit_iface(struct aa_profile *new, const char *ns_name,
 	return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
 }
 
+void __aa_loaddata_update(struct aa_loaddata *data, long revision)
+{
+	AA_BUG(!data);
+	AA_BUG(!data->ns);
+	AA_BUG(!data->dents[AAFS_LOADDATA_REVISION]);
+	AA_BUG(!mutex_is_locked(&data->ns->lock));
+	AA_BUG(data->revision > revision);
+
+	data->revision = revision;
+	d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
+		current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
+	d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
+		current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
+}
+
+bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
+{
+	if (l->size != r->size)
+		return false;
+	if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
+		return false;
+	return memcmp(l->data, r->data, r->size) == 0;
+}
+
+/*
+ * need to take the ns mutex lock which is NOT safe most places that
+ * put_loaddata is called, so we have to delay freeing it
+ */
+static void do_loaddata_free(struct work_struct *work)
+{
+	struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
+	struct aa_ns *ns = aa_get_ns(d->ns);
+
+	if (ns) {
+		mutex_lock(&ns->lock);
+		__aa_fs_remove_rawdata(d);
+		mutex_unlock(&ns->lock);
+		aa_put_ns(ns);
+	}
+
+	kzfree(d->hash);
+	kfree(d->name);
+	kvfree(d);
+}
+
 void aa_loaddata_kref(struct kref *kref)
 {
 	struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
 
 	if (d) {
-		kzfree(d->hash);
-		kvfree(d);
+		INIT_WORK(&d->work, do_loaddata_free);
+		schedule_work(&d->work);
 	}
 }
 
+struct aa_loaddata *aa_loaddata_alloc(size_t size)
+{
+	struct aa_loaddata *d = kvzalloc(sizeof(*d) + size, GFP_KERNEL);
+
+	if (d == NULL)
+		return ERR_PTR(-ENOMEM);
+	kref_init(&d->count);
+	INIT_LIST_HEAD(&d->list);
+
+	return d;
+}
+
 /* test if read will be in packed data bounds */
 static bool inbounds(struct aa_ext *e, size_t size)
 {
@@ -408,7 +466,7 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
 		profile->file.trans.size = size;
 		for (i = 0; i < size; i++) {
 			char *str;
-			int c, j, size2 = unpack_strdup(e, &str, NULL);
+			int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
 			/* unpack_strdup verifies that the last character is
 			 * null termination byte.
 			 */
@@ -420,19 +478,25 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
 				goto fail;
 
 			/* count internal #  of internal \0 */
-			for (c = j = 0; j < size2 - 2; j++) {
-				if (!str[j])
+			for (c = j = 0; j < size2 - 1; j++) {
+				if (!str[j]) {
+					pos = j;
 					c++;
+				}
 			}
 			if (*str == ':') {
+				/* first character after : must be valid */
+				if (!str[1])
+					goto fail;
 				/* beginning with : requires an embedded \0,
 				 * verify that exactly 1 internal \0 exists
 				 * trailing \0 already verified by unpack_strdup
+				 *
+				 * convert \0 back to : for label_parse
 				 */
-				if (c != 1)
-					goto fail;
-				/* first character after : must be valid */
-				if (!str[1])
+				if (c == 1)
+					str[pos] = ':';
+				else if (c > 1)
 					goto fail;
 			} else if (c)
 				/* fail - all other cases with embedded \0 */
@@ -545,7 +609,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
 		name = tmpname;
 	}
 
-	profile = aa_alloc_profile(name, GFP_KERNEL);
+	profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
 	if (!profile)
 		return ERR_PTR(-ENOMEM);
 
@@ -569,13 +633,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
 		profile->xmatch_len = tmp;
 	}
 
+	/* disconnected attachment string is optional */
+	(void) unpack_str(e, &profile->disconnected, "disconnected");
+
 	/* per profile debug flags (complain, audit) */
 	if (!unpack_nameX(e, AA_STRUCT, "flags"))
 		goto fail;
 	if (!unpack_u32(e, &tmp, NULL))
 		goto fail;
 	if (tmp & PACKED_FLAG_HAT)
-		profile->flags |= PFLAG_HAT;
+		profile->label.flags |= FLAG_HAT;
 	if (!unpack_u32(e, &tmp, NULL))
 		goto fail;
 	if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG))
@@ -594,10 +661,11 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
 
 	/* path_flags is optional */
 	if (unpack_u32(e, &profile->path_flags, "path_flags"))
-		profile->path_flags |= profile->flags & PFLAG_MEDIATE_DELETED;
+		profile->path_flags |= profile->label.flags &
+			PATH_MEDIATE_DELETED;
 	else
 		/* set a default value if path_flags field is not present */
-		profile->path_flags = PFLAG_MEDIATE_DELETED;
+		profile->path_flags = PATH_MEDIATE_DELETED;
 
 	if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
 		goto fail;
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index 3466a27..d816173 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -34,50 +34,41 @@
  *
  * Returns: size of string placed in @string else error code on failure
  */
-int aa_getprocattr(struct aa_profile *profile, char **string)
+int aa_getprocattr(struct aa_label *label, char **string)
 {
-	char *str;
-	int len = 0, mode_len = 0, ns_len = 0, name_len;
-	const char *mode_str = aa_profile_mode_names[profile->mode];
-	const char *ns_name = NULL;
-	struct aa_ns *ns = profile->ns;
-	struct aa_ns *current_ns = __aa_current_profile()->ns;
-	char *s;
+	struct aa_ns *ns = labels_ns(label);
+	struct aa_ns *current_ns = aa_get_current_ns();
+	int len;
 
-	if (!aa_ns_visible(current_ns, ns, true))
+	if (!aa_ns_visible(current_ns, ns, true)) {
+		aa_put_ns(current_ns);
 		return -EACCES;
-
-	ns_name = aa_ns_name(current_ns, ns, true);
-	ns_len = strlen(ns_name);
-
-	/* if the visible ns_name is > 0 increase size for : :// seperator */
-	if (ns_len)
-		ns_len += 4;
-
-	/* unconfined profiles don't have a mode string appended */
-	if (!unconfined(profile))
-		mode_len = strlen(mode_str) + 3;	/* + 3 for _() */
-
-	name_len = strlen(profile->base.hname);
-	len = mode_len + ns_len + name_len + 1;	    /* + 1 for \n */
-	s = str = kmalloc(len + 1, GFP_KERNEL);	    /* + 1 \0 */
-	if (!str)
-		return -ENOMEM;
-
-	if (ns_len) {
-		/* skip over prefix current_ns->base.hname and separating // */
-		sprintf(s, ":%s://", ns_name);
-		s += ns_len;
 	}
-	if (unconfined(profile))
-		/* mode string not being appended */
-		sprintf(s, "%s\n", profile->base.hname);
-	else
-		sprintf(s, "%s (%s)\n", profile->base.hname, mode_str);
-	*string = str;
 
-	/* NOTE: len does not include \0 of string, not saved as part of file */
-	return len;
+	len = aa_label_snxprint(NULL, 0, current_ns, label,
+				FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
+				FLAG_HIDDEN_UNCONFINED);
+	AA_BUG(len < 0);
+
+	*string = kmalloc(len + 2, GFP_KERNEL);
+	if (!*string) {
+		aa_put_ns(current_ns);
+		return -ENOMEM;
+	}
+
+	len = aa_label_snxprint(*string, len + 2, current_ns, label,
+				FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
+				FLAG_HIDDEN_UNCONFINED);
+	if (len < 0) {
+		aa_put_ns(current_ns);
+		return len;
+	}
+
+	(*string)[len] = '\n';
+	(*string)[len + 1] = 0;
+
+	aa_put_ns(current_ns);
+	return len + 1;
 }
 
 /**
@@ -108,11 +99,11 @@ static char *split_token_from_name(const char *op, char *args, u64 *token)
  * aa_setprocattr_chagnehat - handle procattr interface to change_hat
  * @args: args received from writing to /proc/<pid>/attr/current (NOT NULL)
  * @size: size of the args
- * @test: true if this is a test of change_hat permissions
+ * @flags: set of flags governing behavior
  *
  * Returns: %0 or error code if change_hat fails
  */
-int aa_setprocattr_changehat(char *args, size_t size, int test)
+int aa_setprocattr_changehat(char *args, size_t size, int flags)
 {
 	char *hat;
 	u64 token;
@@ -147,5 +138,5 @@ int aa_setprocattr_changehat(char *args, size_t size, int test)
 		AA_DEBUG("%s: (pid %d) Magic 0x%llx count %d Hat '%s'\n",
 			 __func__, current->pid, token, count, "<NULL>");
 
-	return aa_change_hat(hats, count, token, test);
+	return aa_change_hat(hats, count, token, flags);
 }
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index 86a941a..d8bc842 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -13,6 +13,7 @@
  */
 
 #include <linux/audit.h>
+#include <linux/security.h>
 
 #include "include/audit.h"
 #include "include/context.h"
@@ -24,8 +25,8 @@
  */
 #include "rlim_names.h"
 
-struct aa_fs_entry aa_fs_entry_rlimit[] = {
-	AA_FS_FILE_STRING("mask", AA_FS_RLIMIT_MASK),
+struct aa_sfs_entry aa_sfs_entry_rlimit[] = {
+	AA_SFS_FILE_STRING("mask", AA_SFS_RLIMIT_MASK),
 	{ }
 };
 
@@ -36,6 +37,11 @@ static void audit_cb(struct audit_buffer *ab, void *va)
 
 	audit_log_format(ab, " rlimit=%s value=%lu",
 			 rlim_names[aad(sa)->rlim.rlim], aad(sa)->rlim.max);
+	if (aad(sa)->peer) {
+		audit_log_format(ab, " peer=");
+		aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
+				FLAGS_NONE, GFP_ATOMIC);
+	}
 }
 
 /**
@@ -48,13 +54,17 @@ static void audit_cb(struct audit_buffer *ab, void *va)
  * Returns: 0 or sa->error else other error code on failure
  */
 static int audit_resource(struct aa_profile *profile, unsigned int resource,
-			  unsigned long value, int error)
+			  unsigned long value, struct aa_label *peer,
+			  const char *info, int error)
 {
 	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_SETRLIMIT);
 
 	aad(&sa)->rlim.rlim = resource;
 	aad(&sa)->rlim.max = value;
+	aad(&sa)->peer = peer;
+	aad(&sa)->info = info;
 	aad(&sa)->error = error;
+
 	return aa_audit(AUDIT_APPARMOR_AUTO, profile, &sa, audit_cb);
 }
 
@@ -72,9 +82,21 @@ int aa_map_resource(int resource)
 	return rlim_map[resource];
 }
 
+static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
+			     struct rlimit *new_rlim)
+{
+	int e = 0;
+
+	if (profile->rlimits.mask & (1 << resource) && new_rlim->rlim_max >
+	    profile->rlimits.limits[resource].rlim_max)
+		e = -EACCES;
+	return audit_resource(profile, resource, new_rlim->rlim_max, NULL, NULL,
+			      e);
+}
+
 /**
  * aa_task_setrlimit - test permission to set an rlimit
- * @profile - profile confining the task  (NOT NULL)
+ * @label - label confining the task  (NOT NULL)
  * @task - task the resource is being set on
  * @resource - the resource being set
  * @new_rlim - the new resource limit  (NOT NULL)
@@ -83,14 +105,15 @@ int aa_map_resource(int resource)
  *
  * Returns: 0 or error code if setting resource failed
  */
-int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
 		      unsigned int resource, struct rlimit *new_rlim)
 {
-	struct aa_profile *task_profile;
+	struct aa_profile *profile;
+	struct aa_label *peer;
 	int error = 0;
 
 	rcu_read_lock();
-	task_profile = aa_get_profile(aa_cred_profile(__task_cred(task)));
+	peer = aa_get_newest_cred_label(__task_cred(task));
 	rcu_read_unlock();
 
 	/* TODO: extend resource control to handle other (non current)
@@ -99,53 +122,70 @@ int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
 	 * the same profile or that the task setting the resource of another
 	 * task has CAP_SYS_RESOURCE.
 	 */
-	if ((profile != task_profile &&
-	     aa_capable(profile, CAP_SYS_RESOURCE, 1)) ||
-	    (profile->rlimits.mask & (1 << resource) &&
-	     new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
-		error = -EACCES;
 
-	aa_put_profile(task_profile);
+	if (label != peer &&
+	    !aa_capable(label, CAP_SYS_RESOURCE, SECURITY_CAP_NOAUDIT))
+		error = fn_for_each(label, profile,
+				audit_resource(profile, resource,
+					       new_rlim->rlim_max, peer,
+					       "cap_sys_resoure", -EACCES));
+	else
+		error = fn_for_each_confined(label, profile,
+				profile_setrlimit(profile, resource, new_rlim));
+	aa_put_label(peer);
 
-	return audit_resource(profile, resource, new_rlim->rlim_max, error);
+	return error;
 }
 
 /**
  * __aa_transition_rlimits - apply new profile rlimits
- * @old: old profile on task  (NOT NULL)
- * @new: new profile with rlimits to apply  (NOT NULL)
+ * @old_l: old label on task  (NOT NULL)
+ * @new_l: new label with rlimits to apply  (NOT NULL)
  */
-void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new)
+void __aa_transition_rlimits(struct aa_label *old_l, struct aa_label *new_l)
 {
 	unsigned int mask = 0;
 	struct rlimit *rlim, *initrlim;
-	int i;
+	struct aa_profile *old, *new;
+	struct label_it i;
 
-	/* for any rlimits the profile controlled reset the soft limit
-	 * to the less of the tasks hard limit and the init tasks soft limit
+	old = labels_profile(old_l);
+	new = labels_profile(new_l);
+
+	/* for any rlimits the profile controlled, reset the soft limit
+	 * to the lesser of the tasks hard limit and the init tasks soft limit
 	 */
-	if (old->rlimits.mask) {
-		for (i = 0, mask = 1; i < RLIM_NLIMITS; i++, mask <<= 1) {
-			if (old->rlimits.mask & mask) {
-				rlim = current->signal->rlim + i;
-				initrlim = init_task.signal->rlim + i;
-				rlim->rlim_cur = min(rlim->rlim_max,
-						     initrlim->rlim_cur);
+	label_for_each_confined(i, old_l, old) {
+		if (old->rlimits.mask) {
+			int j;
+
+			for (j = 0, mask = 1; j < RLIM_NLIMITS; j++,
+				     mask <<= 1) {
+				if (old->rlimits.mask & mask) {
+					rlim = current->signal->rlim + j;
+					initrlim = init_task.signal->rlim + j;
+					rlim->rlim_cur = min(rlim->rlim_max,
+							    initrlim->rlim_cur);
+				}
 			}
 		}
 	}
 
 	/* set any new hard limits as dictated by the new profile */
-	if (!new->rlimits.mask)
-		return;
-	for (i = 0, mask = 1; i < RLIM_NLIMITS; i++, mask <<= 1) {
-		if (!(new->rlimits.mask & mask))
-			continue;
+	label_for_each_confined(i, new_l, new) {
+		int j;
 
-		rlim = current->signal->rlim + i;
-		rlim->rlim_max = min(rlim->rlim_max,
-				     new->rlimits.limits[i].rlim_max);
-		/* soft limit should not exceed hard limit */
-		rlim->rlim_cur = min(rlim->rlim_cur, rlim->rlim_max);
+		if (!new->rlimits.mask)
+			continue;
+		for (j = 0, mask = 1; j < RLIM_NLIMITS; j++, mask <<= 1) {
+			if (!(new->rlimits.mask & mask))
+				continue;
+
+			rlim = current->signal->rlim + j;
+			rlim->rlim_max = min(rlim->rlim_max,
+					     new->rlimits.limits[j].rlim_max);
+			/* soft limit should not exceed hard limit */
+			rlim->rlim_cur = min(rlim->rlim_cur, rlim->rlim_max);
+		}
 	}
 }
diff --git a/security/inode.c b/security/inode.c
index eccd58e..8dd9ca8 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -26,11 +26,31 @@
 static struct vfsmount *mount;
 static int mount_count;
 
+static void securityfs_evict_inode(struct inode *inode)
+{
+	truncate_inode_pages_final(&inode->i_data);
+	clear_inode(inode);
+	if (S_ISLNK(inode->i_mode))
+		kfree(inode->i_link);
+}
+
+static const struct super_operations securityfs_super_operations = {
+	.statfs		= simple_statfs,
+	.evict_inode	= securityfs_evict_inode,
+};
+
 static int fill_super(struct super_block *sb, void *data, int silent)
 {
 	static const struct tree_descr files[] = {{""}};
+	int error;
 
-	return simple_fill_super(sb, SECURITYFS_MAGIC, files);
+	error = simple_fill_super(sb, SECURITYFS_MAGIC, files);
+	if (error)
+		return error;
+
+	sb->s_op = &securityfs_super_operations;
+
+	return 0;
 }
 
 static struct dentry *get_sb(struct file_system_type *fs_type,
@@ -48,7 +68,7 @@ static struct file_system_type fs_type = {
 };
 
 /**
- * securityfs_create_file - create a file in the securityfs filesystem
+ * securityfs_create_dentry - create a dentry in the securityfs filesystem
  *
  * @name: a pointer to a string containing the name of the file to create.
  * @mode: the permission that the file should have
@@ -60,34 +80,35 @@ static struct file_system_type fs_type = {
  *        the open() call.
  * @fops: a pointer to a struct file_operations that should be used for
  *        this file.
+ * @iops: a point to a struct of inode_operations that should be used for
+ *        this file/dir
  *
- * This is the basic "create a file" function for securityfs.  It allows for a
- * wide range of flexibility in creating a file, or a directory (if you
- * want to create a directory, the securityfs_create_dir() function is
- * recommended to be used instead).
+ * This is the basic "create a file/dir/symlink" function for
+ * securityfs.  It allows for a wide range of flexibility in creating
+ * a file, or a directory (if you want to create a directory, the
+ * securityfs_create_dir() function is recommended to be used
+ * instead).
  *
  * This function returns a pointer to a dentry if it succeeds.  This
- * pointer must be passed to the securityfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here).  If an error occurs, the function will return
- * the error value (via ERR_PTR).
+ * pointer must be passed to the securityfs_remove() function when the
+ * file is to be removed (no automatic cleanup happens if your module
+ * is unloaded, you are responsible here).  If an error occurs, the
+ * function will return the error value (via ERR_PTR).
  *
  * If securityfs is not enabled in the kernel, the value %-ENODEV is
  * returned.
  */
-struct dentry *securityfs_create_file(const char *name, umode_t mode,
-				   struct dentry *parent, void *data,
-				   const struct file_operations *fops)
+static struct dentry *securityfs_create_dentry(const char *name, umode_t mode,
+					struct dentry *parent, void *data,
+					const struct file_operations *fops,
+					const struct inode_operations *iops)
 {
 	struct dentry *dentry;
-	int is_dir = S_ISDIR(mode);
 	struct inode *dir, *inode;
 	int error;
 
-	if (!is_dir) {
-		BUG_ON(!fops);
+	if (!(mode & S_IFMT))
 		mode = (mode & S_IALLUGO) | S_IFREG;
-	}
 
 	pr_debug("securityfs: creating file '%s'\n",name);
 
@@ -120,11 +141,14 @@ struct dentry *securityfs_create_file(const char *name, umode_t mode,
 	inode->i_mode = mode;
 	inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
 	inode->i_private = data;
-	if (is_dir) {
+	if (S_ISDIR(mode)) {
 		inode->i_op = &simple_dir_inode_operations;
 		inode->i_fop = &simple_dir_operations;
 		inc_nlink(inode);
 		inc_nlink(dir);
+	} else if (S_ISLNK(mode)) {
+		inode->i_op = iops ? iops : &simple_symlink_inode_operations;
+		inode->i_link = data;
 	} else {
 		inode->i_fop = fops;
 	}
@@ -141,6 +165,38 @@ struct dentry *securityfs_create_file(const char *name, umode_t mode,
 	simple_release_fs(&mount, &mount_count);
 	return dentry;
 }
+
+/**
+ * securityfs_create_file - create a file in the securityfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file.  This should be a
+ *          directory dentry if set.  If this parameter is %NULL, then the
+ *          file will be created in the root of the securityfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ *        on.  The inode.i_private pointer will point to this value on
+ *        the open() call.
+ * @fops: a pointer to a struct file_operations that should be used for
+ *        this file.
+ *
+ * This function creates a file in securityfs with the given @name.
+ *
+ * This function returns a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the securityfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here).  If an error occurs, the function will return
+ * the error value (via ERR_PTR).
+ *
+ * If securityfs is not enabled in the kernel, the value %-ENODEV is
+ * returned.
+ */
+struct dentry *securityfs_create_file(const char *name, umode_t mode,
+				      struct dentry *parent, void *data,
+				      const struct file_operations *fops)
+{
+	return securityfs_create_dentry(name, mode, parent, data, fops, NULL);
+}
 EXPORT_SYMBOL_GPL(securityfs_create_file);
 
 /**
@@ -165,13 +221,59 @@ EXPORT_SYMBOL_GPL(securityfs_create_file);
  */
 struct dentry *securityfs_create_dir(const char *name, struct dentry *parent)
 {
-	return securityfs_create_file(name,
-				      S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
-				      parent, NULL, NULL);
+	return securityfs_create_file(name, S_IFDIR | 0755, parent, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(securityfs_create_dir);
 
 /**
+ * securityfs_create_symlink - create a symlink in the securityfs filesystem
+ *
+ * @name: a pointer to a string containing the name of the symlink to
+ *        create.
+ * @parent: a pointer to the parent dentry for the symlink.  This should be a
+ *          directory dentry if set.  If this parameter is %NULL, then the
+ *          directory will be created in the root of the securityfs filesystem.
+ * @target: a pointer to a string containing the name of the symlink's target.
+ *          If this parameter is %NULL, then the @iops parameter needs to be
+ *          setup to handle .readlink and .get_link inode_operations.
+ * @iops: a pointer to the struct inode_operations to use for the symlink. If
+ *        this parameter is %NULL, then the default simple_symlink_inode
+ *        operations will be used.
+ *
+ * This function creates a symlink in securityfs with the given @name.
+ *
+ * This function returns a pointer to a dentry if it succeeds.  This
+ * pointer must be passed to the securityfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here).  If an error occurs, the function will return
+ * the error value (via ERR_PTR).
+ *
+ * If securityfs is not enabled in the kernel, the value %-ENODEV is
+ * returned.
+ */
+struct dentry *securityfs_create_symlink(const char *name,
+					 struct dentry *parent,
+					 const char *target,
+					 const struct inode_operations *iops)
+{
+	struct dentry *dent;
+	char *link = NULL;
+
+	if (target) {
+		link = kstrdup(target, GFP_KERNEL);
+		if (!link)
+			return ERR_PTR(-ENOMEM);
+	}
+	dent = securityfs_create_dentry(name, S_IFLNK | 0444, parent,
+					link, NULL, iops);
+	if (IS_ERR(dent))
+		kfree(link);
+
+	return dent;
+}
+EXPORT_SYMBOL_GPL(securityfs_create_symlink);
+
+/**
  * securityfs_remove - removes a file or directory from the securityfs filesystem
  *
  * @dentry: a pointer to a the dentry of the file or directory to be removed.
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index 80052ed..ab6a029 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -92,13 +92,13 @@ int asymmetric_verify(struct key *keyring, const char *sig,
 
 	siglen -= sizeof(*hdr);
 
-	if (siglen != __be16_to_cpu(hdr->sig_size))
+	if (siglen != be16_to_cpu(hdr->sig_size))
 		return -EBADMSG;
 
 	if (hdr->hash_algo >= HASH_ALGO__LAST)
 		return -ENOPKG;
 
-	key = request_asymmetric_key(keyring, __be32_to_cpu(hdr->keyid));
+	key = request_asymmetric_key(keyring, be32_to_cpu(hdr->keyid));
 	if (IS_ERR(key))
 		return PTR_ERR(key);
 
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index d7f282d..1d32cd2 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -164,7 +164,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
 	hmac_misc.mode = inode->i_mode;
 	crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
 	if (evm_hmac_attrs & EVM_ATTR_FSUUID)
-		crypto_shash_update(desc, inode->i_sb->s_uuid,
+		crypto_shash_update(desc, &inode->i_sb->s_uuid.b[0],
 				    sizeof(inode->i_sb->s_uuid));
 	crypto_shash_final(desc, digest);
 }
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index c710d22..6fc888c 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -182,7 +182,7 @@ security_initcall(integrity_iintcache_init);
  *
  */
 int integrity_kernel_read(struct file *file, loff_t offset,
-			  char *addr, unsigned long count)
+			  void *addr, unsigned long count)
 {
 	mm_segment_t old_fs;
 	char __user *buf = (char __user *)addr;
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 370eb2f..35ef693 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -96,19 +96,19 @@
 
 	config IMA_DEFAULT_HASH_SHA1
 		bool "SHA1 (default)"
-		depends on CRYPTO_SHA1
+		depends on CRYPTO_SHA1=y
 
 	config IMA_DEFAULT_HASH_SHA256
 		bool "SHA256"
-		depends on CRYPTO_SHA256 && !IMA_TEMPLATE
+		depends on CRYPTO_SHA256=y && !IMA_TEMPLATE
 
 	config IMA_DEFAULT_HASH_SHA512
 		bool "SHA512"
-		depends on CRYPTO_SHA512 && !IMA_TEMPLATE
+		depends on CRYPTO_SHA512=y && !IMA_TEMPLATE
 
 	config IMA_DEFAULT_HASH_WP512
 		bool "WP512"
-		depends on CRYPTO_WP512 && !IMA_TEMPLATE
+		depends on CRYPTO_WP512=y && !IMA_TEMPLATE
 endchoice
 
 config IMA_DEFAULT_HASH
@@ -155,6 +155,14 @@
 	  <http://linux-ima.sourceforge.net>
 	  If unsure, say N.
 
+config IMA_APPRAISE_BOOTPARAM
+	bool "ima_appraise boot parameter"
+	depends on IMA_APPRAISE
+	default y
+	help
+	  This option enables the different "ima_appraise=" modes
+	  (eg. fix, log) from the boot command line.
+
 config IMA_TRUSTED_KEYRING
 	bool "Require all keys on the .ima keyring be signed (deprecated)"
 	depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index b563fbd..d52b487 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -172,17 +172,22 @@ static inline unsigned long ima_hash_key(u8 *digest)
 	return hash_long(*digest, IMA_HASH_BITS);
 }
 
+#define __ima_hooks(hook)		\
+	hook(NONE)			\
+	hook(FILE_CHECK)		\
+	hook(MMAP_CHECK)		\
+	hook(BPRM_CHECK)		\
+	hook(POST_SETATTR)		\
+	hook(MODULE_CHECK)		\
+	hook(FIRMWARE_CHECK)		\
+	hook(KEXEC_KERNEL_CHECK)	\
+	hook(KEXEC_INITRAMFS_CHECK)	\
+	hook(POLICY_CHECK)		\
+	hook(MAX_CHECK)
+#define __ima_hook_enumify(ENUM)	ENUM,
+
 enum ima_hooks {
-	FILE_CHECK = 1,
-	MMAP_CHECK,
-	BPRM_CHECK,
-	POST_SETATTR,
-	MODULE_CHECK,
-	FIRMWARE_CHECK,
-	KEXEC_KERNEL_CHECK,
-	KEXEC_INITRAMFS_CHECK,
-	POLICY_CHECK,
-	MAX_CHECK
+	__ima_hooks(__ima_hook_enumify)
 };
 
 /* LIM API function definitions */
@@ -284,7 +289,7 @@ static inline int ima_read_xattr(struct dentry *dentry,
 	return 0;
 }
 
-#endif
+#endif /* CONFIG_IMA_APPRAISE */
 
 /* LSM based policy rules require audit */
 #ifdef CONFIG_IMA_LSM_RULES
@@ -306,12 +311,12 @@ static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
 {
 	return -EINVAL;
 }
-#endif /* CONFIG_IMA_TRUSTED_KEYRING */
+#endif /* CONFIG_IMA_LSM_RULES */
 
 #ifdef	CONFIG_IMA_READ_POLICY
 #define	POLICY_FILE_FLAGS	(S_IWUSR | S_IRUSR)
 #else
 #define	POLICY_FILE_FLAGS	S_IWUSR
-#endif /* CONFIG_IMA_WRITE_POLICY */
+#endif /* CONFIG_IMA_READ_POLICY */
 
 #endif /* __LINUX_IMA_H */
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 5d0785c..809ba70 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -20,18 +20,30 @@
 
 static int __init default_appraise_setup(char *str)
 {
+#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
 	if (strncmp(str, "off", 3) == 0)
 		ima_appraise = 0;
 	else if (strncmp(str, "log", 3) == 0)
 		ima_appraise = IMA_APPRAISE_LOG;
 	else if (strncmp(str, "fix", 3) == 0)
 		ima_appraise = IMA_APPRAISE_FIX;
+#endif
 	return 1;
 }
 
 __setup("ima_appraise=", default_appraise_setup);
 
 /*
+ * is_ima_appraise_enabled - return appraise status
+ *
+ * Only return enabled, if not in ima_appraise="fix" or "log" modes.
+ */
+bool is_ima_appraise_enabled(void)
+{
+	return (ima_appraise & IMA_APPRAISE_ENFORCE) ? 1 : 0;
+}
+
+/*
  * ima_must_appraise - set appraise flag
  *
  * Return 1 to appraise
@@ -205,7 +217,8 @@ int ima_appraise_measurement(enum ima_hooks func,
 		if (rc && rc != -ENODATA)
 			goto out;
 
-		cause = "missing-hash";
+		cause = iint->flags & IMA_DIGSIG_REQUIRED ?
+				"IMA-signature-required" : "missing-hash";
 		status = INTEGRITY_NOLABEL;
 		if (opened & FILE_CREATED)
 			iint->flags |= IMA_NEW_FILE;
@@ -228,6 +241,7 @@ int ima_appraise_measurement(enum ima_hooks func,
 	case IMA_XATTR_DIGEST_NG:
 		/* first byte contains algorithm id */
 		hash_start = 1;
+		/* fall through */
 	case IMA_XATTR_DIGEST:
 		if (iint->flags & IMA_DIGSIG_REQUIRED) {
 			cause = "IMA-signature-required";
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ca303e5..ad491c5 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -323,16 +323,11 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf,
 	if (*ppos != 0)
 		goto out;
 
-	result = -ENOMEM;
-	data = kmalloc(datalen + 1, GFP_KERNEL);
-	if (!data)
+	data = memdup_user_nul(buf, datalen);
+	if (IS_ERR(data)) {
+		result = PTR_ERR(data);
 		goto out;
-
-	*(data + datalen) = '\0';
-
-	result = -EFAULT;
-	if (copy_from_user(data, buf, datalen))
-		goto out_free;
+	}
 
 	result = mutex_lock_interruptible(&ima_write_mutex);
 	if (result < 0)
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 3ab1067..95209a5 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -61,7 +61,7 @@ struct ima_rule_entry {
 	enum ima_hooks func;
 	int mask;
 	unsigned long fsmagic;
-	u8 fsuuid[16];
+	uuid_t fsuuid;
 	kuid_t uid;
 	kuid_t fowner;
 	bool (*uid_op)(kuid_t, kuid_t);    /* Handlers for operators       */
@@ -96,6 +96,8 @@ static struct ima_rule_entry dont_measure_rules[] __ro_after_init = {
 	{.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
 	{.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC,
 	 .flags = IMA_FSMAGIC},
+	{.action = DONT_MEASURE, .fsmagic = CGROUP2_SUPER_MAGIC,
+	 .flags = IMA_FSMAGIC},
 	{.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC}
 };
 
@@ -139,6 +141,7 @@ static struct ima_rule_entry default_appraise_rules[] __ro_after_init = {
 	{.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
 	{.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
 	{.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+	{.action = DONT_APPRAISE, .fsmagic = CGROUP2_SUPER_MAGIC, .flags = IMA_FSMAGIC},
 #ifdef CONFIG_IMA_WRITE_POLICY
 	{.action = APPRAISE, .func = POLICY_CHECK,
 	.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
@@ -153,6 +156,17 @@ static struct ima_rule_entry default_appraise_rules[] __ro_after_init = {
 #endif
 };
 
+static struct ima_rule_entry secure_boot_rules[] __ro_after_init = {
+	{.action = APPRAISE, .func = MODULE_CHECK,
+	 .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+	{.action = APPRAISE, .func = FIRMWARE_CHECK,
+	 .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+	{.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
+	 .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+	{.action = APPRAISE, .func = POLICY_CHECK,
+	 .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+};
+
 static LIST_HEAD(ima_default_rules);
 static LIST_HEAD(ima_policy_rules);
 static LIST_HEAD(ima_temp_rules);
@@ -170,19 +184,27 @@ static int __init default_measure_policy_setup(char *str)
 }
 __setup("ima_tcb", default_measure_policy_setup);
 
+static bool ima_use_appraise_tcb __initdata;
+static bool ima_use_secure_boot __initdata;
 static int __init policy_setup(char *str)
 {
-	if (ima_policy)
-		return 1;
+	char *p;
 
-	if (strcmp(str, "tcb") == 0)
-		ima_policy = DEFAULT_TCB;
+	while ((p = strsep(&str, " |\n")) != NULL) {
+		if (*p == ' ')
+			continue;
+		if ((strcmp(p, "tcb") == 0) && !ima_policy)
+			ima_policy = DEFAULT_TCB;
+		else if (strcmp(p, "appraise_tcb") == 0)
+			ima_use_appraise_tcb = 1;
+		else if (strcmp(p, "secure_boot") == 0)
+			ima_use_secure_boot = 1;
+	}
 
 	return 1;
 }
 __setup("ima_policy=", policy_setup);
 
-static bool ima_use_appraise_tcb __initdata;
 static int __init default_appraise_policy_setup(char *str)
 {
 	ima_use_appraise_tcb = 1;
@@ -244,7 +266,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
 	    && rule->fsmagic != inode->i_sb->s_magic)
 		return false;
 	if ((rule->flags & IMA_FSUUID) &&
-	    memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
+	    !uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
 		return false;
 	if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
 		return false;
@@ -405,12 +427,14 @@ void ima_update_policy_flag(void)
  */
 void __init ima_init_policy(void)
 {
-	int i, measure_entries, appraise_entries;
+	int i, measure_entries, appraise_entries, secure_boot_entries;
 
 	/* if !ima_policy set entries = 0 so we load NO default rules */
 	measure_entries = ima_policy ? ARRAY_SIZE(dont_measure_rules) : 0;
 	appraise_entries = ima_use_appraise_tcb ?
 			 ARRAY_SIZE(default_appraise_rules) : 0;
+	secure_boot_entries = ima_use_secure_boot ?
+			ARRAY_SIZE(secure_boot_rules) : 0;
 
 	for (i = 0; i < measure_entries; i++)
 		list_add_tail(&dont_measure_rules[i].list, &ima_default_rules);
@@ -429,6 +453,14 @@ void __init ima_init_policy(void)
 		break;
 	}
 
+	/*
+	 * Insert the appraise rules requiring file signatures, prior to
+	 * any other appraise rules.
+	 */
+	for (i = 0; i < secure_boot_entries; i++)
+		list_add_tail(&secure_boot_rules[i].list,
+			      &ima_default_rules);
+
 	for (i = 0; i < appraise_entries; i++) {
 		list_add_tail(&default_appraise_rules[i].list,
 			      &ima_default_rules);
@@ -711,14 +743,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
 		case Opt_fsuuid:
 			ima_log_string(ab, "fsuuid", args[0].from);
 
-			if (memchr_inv(entry->fsuuid, 0x00,
-				       sizeof(entry->fsuuid))) {
+			if (uuid_is_null(&entry->fsuuid)) {
 				result = -EINVAL;
 				break;
 			}
 
-			result = blk_part_pack_uuid(args[0].from,
-						    entry->fsuuid);
+			result = uuid_parse(args[0].from, &entry->fsuuid);
 			if (!result)
 				entry->flags |= IMA_FSUUID;
 			break;
@@ -933,30 +963,17 @@ enum {
 	mask_exec = 0, mask_write, mask_read, mask_append
 };
 
-static char *mask_tokens[] = {
+static const char *const mask_tokens[] = {
 	"MAY_EXEC",
 	"MAY_WRITE",
 	"MAY_READ",
 	"MAY_APPEND"
 };
 
-enum {
-	func_file = 0, func_mmap, func_bprm,
-	func_module, func_firmware, func_post,
-	func_kexec_kernel, func_kexec_initramfs,
-	func_policy
-};
+#define __ima_hook_stringify(str)	(#str),
 
-static char *func_tokens[] = {
-	"FILE_CHECK",
-	"MMAP_CHECK",
-	"BPRM_CHECK",
-	"MODULE_CHECK",
-	"FIRMWARE_CHECK",
-	"POST_SETATTR",
-	"KEXEC_KERNEL_CHECK",
-	"KEXEC_INITRAMFS_CHECK",
-	"POLICY_CHECK"
+static const char *const func_tokens[] = {
+	__ima_hooks(__ima_hook_stringify)
 };
 
 void *ima_policy_start(struct seq_file *m, loff_t *pos)
@@ -993,49 +1010,16 @@ void ima_policy_stop(struct seq_file *m, void *v)
 
 #define pt(token)	policy_tokens[token + Opt_err].pattern
 #define mt(token)	mask_tokens[token]
-#define ft(token)	func_tokens[token]
 
 /*
  * policy_func_show - display the ima_hooks policy rule
  */
 static void policy_func_show(struct seq_file *m, enum ima_hooks func)
 {
-	char tbuf[64] = {0,};
-
-	switch (func) {
-	case FILE_CHECK:
-		seq_printf(m, pt(Opt_func), ft(func_file));
-		break;
-	case MMAP_CHECK:
-		seq_printf(m, pt(Opt_func), ft(func_mmap));
-		break;
-	case BPRM_CHECK:
-		seq_printf(m, pt(Opt_func), ft(func_bprm));
-		break;
-	case MODULE_CHECK:
-		seq_printf(m, pt(Opt_func), ft(func_module));
-		break;
-	case FIRMWARE_CHECK:
-		seq_printf(m, pt(Opt_func), ft(func_firmware));
-		break;
-	case POST_SETATTR:
-		seq_printf(m, pt(Opt_func), ft(func_post));
-		break;
-	case KEXEC_KERNEL_CHECK:
-		seq_printf(m, pt(Opt_func), ft(func_kexec_kernel));
-		break;
-	case KEXEC_INITRAMFS_CHECK:
-		seq_printf(m, pt(Opt_func), ft(func_kexec_initramfs));
-		break;
-	case POLICY_CHECK:
-		seq_printf(m, pt(Opt_func), ft(func_policy));
-		break;
-	default:
-		snprintf(tbuf, sizeof(tbuf), "%d", func);
-		seq_printf(m, pt(Opt_func), tbuf);
-		break;
-	}
-	seq_puts(m, " ");
+	if (func > 0 && func < MAX_CHECK)
+		seq_printf(m, "func=%s ", func_tokens[func]);
+	else
+		seq_printf(m, "func=%d ", func);
 }
 
 int ima_policy_show(struct seq_file *m, void *v)
@@ -1087,7 +1071,7 @@ int ima_policy_show(struct seq_file *m, void *v)
 	}
 
 	if (entry->flags & IMA_FSUUID) {
-		seq_printf(m, "fsuuid=%pU", entry->fsuuid);
+		seq_printf(m, "fsuuid=%pU", &entry->fsuuid);
 		seq_puts(m, " ");
 	}
 
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index d9aa5ab..a02a86d 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -81,7 +81,7 @@ static int get_binary_runtime_size(struct ima_template_entry *entry)
 	size += sizeof(u32);	/* pcr */
 	size += sizeof(entry->digest);
 	size += sizeof(int);	/* template name size field */
-	size += strlen(entry->template_desc->name) + 1;
+	size += strlen(entry->template_desc->name);
 	size += sizeof(entry->template_data_len);
 	size += entry->template_data_len;
 	return size;
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index cebb37c..7412d02 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -19,6 +19,9 @@
 #include "ima.h"
 #include "ima_template_lib.h"
 
+enum header_fields { HDR_PCR, HDR_DIGEST, HDR_TEMPLATE_NAME,
+		     HDR_TEMPLATE_DATA, HDR__LAST };
+
 static struct ima_template_desc builtin_templates[] = {
 	{.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
 	{.name = "ima-ng", .fmt = "d-ng|n-ng"},
@@ -274,13 +277,6 @@ static int ima_restore_template_data(struct ima_template_desc *template_desc,
 				     int template_data_size,
 				     struct ima_template_entry **entry)
 {
-	struct binary_field_data {
-		u32 len;
-		u8 data[0];
-	} __packed;
-
-	struct binary_field_data *field_data;
-	int offset = 0;
 	int ret = 0;
 	int i;
 
@@ -290,30 +286,19 @@ static int ima_restore_template_data(struct ima_template_desc *template_desc,
 	if (!*entry)
 		return -ENOMEM;
 
+	ret = ima_parse_buf(template_data, template_data + template_data_size,
+			    NULL, template_desc->num_fields,
+			    (*entry)->template_data, NULL, NULL,
+			    ENFORCE_FIELDS | ENFORCE_BUFEND, "template data");
+	if (ret < 0) {
+		kfree(*entry);
+		return ret;
+	}
+
 	(*entry)->template_desc = template_desc;
 	for (i = 0; i < template_desc->num_fields; i++) {
-		field_data = template_data + offset;
-
-		/* Each field of the template data is prefixed with a length. */
-		if (offset > (template_data_size - sizeof(*field_data))) {
-			pr_err("Restoring the template field failed\n");
-			ret = -EINVAL;
-			break;
-		}
-		offset += sizeof(*field_data);
-
-		if (ima_canonical_fmt)
-			field_data->len = le32_to_cpu(field_data->len);
-
-		if (offset > (template_data_size - field_data->len)) {
-			pr_err("Restoring the template field data failed\n");
-			ret = -EINVAL;
-			break;
-		}
-		offset += field_data->len;
-
-		(*entry)->template_data[i].len = field_data->len;
-		(*entry)->template_data_len += sizeof(field_data->len);
+		struct ima_field_data *field_data = &(*entry)->template_data[i];
+		u8 *data = field_data->data;
 
 		(*entry)->template_data[i].data =
 			kzalloc(field_data->len + 1, GFP_KERNEL);
@@ -321,8 +306,8 @@ static int ima_restore_template_data(struct ima_template_desc *template_desc,
 			ret = -ENOMEM;
 			break;
 		}
-		memcpy((*entry)->template_data[i].data, field_data->data,
-			field_data->len);
+		memcpy((*entry)->template_data[i].data, data, field_data->len);
+		(*entry)->template_data_len += sizeof(field_data->len);
 		(*entry)->template_data_len += field_data->len;
 	}
 
@@ -337,27 +322,19 @@ static int ima_restore_template_data(struct ima_template_desc *template_desc,
 /* Restore the serialized binary measurement list without extending PCRs. */
 int ima_restore_measurement_list(loff_t size, void *buf)
 {
-	struct binary_hdr_v1 {
-		u32 pcr;
-		u8 digest[TPM_DIGEST_SIZE];
-		u32 template_name_len;
-		char template_name[0];
-	} __packed;
 	char template_name[MAX_TEMPLATE_NAME_LEN];
 
-	struct binary_data_v1 {
-		u32 template_data_size;
-		char template_data[0];
-	} __packed;
-
 	struct ima_kexec_hdr *khdr = buf;
-	struct binary_hdr_v1 *hdr_v1;
-	struct binary_data_v1 *data_v1;
+	struct ima_field_data hdr[HDR__LAST] = {
+		[HDR_PCR] = {.len = sizeof(u32)},
+		[HDR_DIGEST] = {.len = TPM_DIGEST_SIZE},
+	};
 
 	void *bufp = buf + sizeof(*khdr);
 	void *bufendp;
 	struct ima_template_entry *entry;
 	struct ima_template_desc *template_desc;
+	DECLARE_BITMAP(hdr_mask, HDR__LAST);
 	unsigned long count = 0;
 	int ret = 0;
 
@@ -380,6 +357,10 @@ int ima_restore_measurement_list(loff_t size, void *buf)
 		return -EINVAL;
 	}
 
+	bitmap_zero(hdr_mask, HDR__LAST);
+	bitmap_set(hdr_mask, HDR_PCR, 1);
+	bitmap_set(hdr_mask, HDR_DIGEST, 1);
+
 	/*
 	 * ima kexec buffer prefix: version, buffer size, count
 	 * v1 format: pcr, digest, template-name-len, template-name,
@@ -387,31 +368,25 @@ int ima_restore_measurement_list(loff_t size, void *buf)
 	 */
 	bufendp = buf + khdr->buffer_size;
 	while ((bufp < bufendp) && (count++ < khdr->count)) {
-		hdr_v1 = bufp;
-		if (bufp > (bufendp - sizeof(*hdr_v1))) {
-			pr_err("attempting to restore partial measurement\n");
-			ret = -EINVAL;
+		int enforce_mask = ENFORCE_FIELDS;
+
+		enforce_mask |= (count == khdr->count) ? ENFORCE_BUFEND : 0;
+		ret = ima_parse_buf(bufp, bufendp, &bufp, HDR__LAST, hdr, NULL,
+				    hdr_mask, enforce_mask, "entry header");
+		if (ret < 0)
 			break;
-		}
-		bufp += sizeof(*hdr_v1);
 
-		if (ima_canonical_fmt)
-			hdr_v1->template_name_len =
-			    le32_to_cpu(hdr_v1->template_name_len);
-
-		if ((hdr_v1->template_name_len >= MAX_TEMPLATE_NAME_LEN) ||
-		    (bufp > (bufendp - hdr_v1->template_name_len))) {
+		if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) {
 			pr_err("attempting to restore a template name \
 				that is too long\n");
 			ret = -EINVAL;
 			break;
 		}
-		data_v1 = bufp += (u_int8_t)hdr_v1->template_name_len;
 
 		/* template name is not null terminated */
-		memcpy(template_name, hdr_v1->template_name,
-		       hdr_v1->template_name_len);
-		template_name[hdr_v1->template_name_len] = 0;
+		memcpy(template_name, hdr[HDR_TEMPLATE_NAME].data,
+		       hdr[HDR_TEMPLATE_NAME].len);
+		template_name[hdr[HDR_TEMPLATE_NAME].len] = 0;
 
 		if (strcmp(template_name, "ima") == 0) {
 			pr_err("attempting to restore an unsupported \
@@ -441,34 +416,17 @@ int ima_restore_measurement_list(loff_t size, void *buf)
 			break;
 		}
 
-		if (bufp > (bufendp - sizeof(data_v1->template_data_size))) {
-			pr_err("restoring the template data size failed\n");
-			ret = -EINVAL;
-			break;
-		}
-		bufp += (u_int8_t) sizeof(data_v1->template_data_size);
-
-		if (ima_canonical_fmt)
-			data_v1->template_data_size =
-			    le32_to_cpu(data_v1->template_data_size);
-
-		if (bufp > (bufendp - data_v1->template_data_size)) {
-			pr_err("restoring the template data failed\n");
-			ret = -EINVAL;
-			break;
-		}
-		bufp += data_v1->template_data_size;
-
 		ret = ima_restore_template_data(template_desc,
-						data_v1->template_data,
-						data_v1->template_data_size,
+						hdr[HDR_TEMPLATE_DATA].data,
+						hdr[HDR_TEMPLATE_DATA].len,
 						&entry);
 		if (ret < 0)
 			break;
 
-		memcpy(entry->digest, hdr_v1->digest, TPM_DIGEST_SIZE);
-		entry->pcr =
-		    !ima_canonical_fmt ? hdr_v1->pcr : le32_to_cpu(hdr_v1->pcr);
+		memcpy(entry->digest, hdr[HDR_DIGEST].data,
+		       hdr[HDR_DIGEST].len);
+		entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
+			     le32_to_cpu(*(hdr[HDR_PCR].data));
 		ret = ima_restore_measurement_entry(entry);
 		if (ret < 0)
 			break;
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index f9ba37b..28af43f 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -159,6 +159,67 @@ void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
 	ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
 }
 
+/**
+ * ima_parse_buf() - Parses lengths and data from an input buffer
+ * @bufstartp:       Buffer start address.
+ * @bufendp:         Buffer end address.
+ * @bufcurp:         Pointer to remaining (non-parsed) data.
+ * @maxfields:       Length of fields array.
+ * @fields:          Array containing lengths and pointers of parsed data.
+ * @curfields:       Number of array items containing parsed data.
+ * @len_mask:        Bitmap (if bit is set, data length should not be parsed).
+ * @enforce_mask:    Check if curfields == maxfields and/or bufcurp == bufendp.
+ * @bufname:         String identifier of the input buffer.
+ *
+ * Return: 0 on success, -EINVAL on error.
+ */
+int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
+		  int maxfields, struct ima_field_data *fields, int *curfields,
+		  unsigned long *len_mask, int enforce_mask, char *bufname)
+{
+	void *bufp = bufstartp;
+	int i;
+
+	for (i = 0; i < maxfields; i++) {
+		if (len_mask == NULL || !test_bit(i, len_mask)) {
+			if (bufp > (bufendp - sizeof(u32)))
+				break;
+
+			fields[i].len = *(u32 *)bufp;
+			if (ima_canonical_fmt)
+				fields[i].len = le32_to_cpu(fields[i].len);
+
+			bufp += sizeof(u32);
+		}
+
+		if (bufp > (bufendp - fields[i].len))
+			break;
+
+		fields[i].data = bufp;
+		bufp += fields[i].len;
+	}
+
+	if ((enforce_mask & ENFORCE_FIELDS) && i != maxfields) {
+		pr_err("%s: nr of fields mismatch: expected: %d, current: %d\n",
+		       bufname, maxfields, i);
+		return -EINVAL;
+	}
+
+	if ((enforce_mask & ENFORCE_BUFEND) && bufp != bufendp) {
+		pr_err("%s: buf end mismatch: expected: %p, current: %p\n",
+		       bufname, bufendp, bufp);
+		return -EINVAL;
+	}
+
+	if (curfields)
+		*curfields = i;
+
+	if (bufcurp)
+		*bufcurp = bufp;
+
+	return 0;
+}
+
 static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
 				       struct ima_field_data *field_data)
 {
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
index c344530..6a3d8b8 100644
--- a/security/integrity/ima/ima_template_lib.h
+++ b/security/integrity/ima/ima_template_lib.h
@@ -18,6 +18,9 @@
 #include <linux/seq_file.h>
 #include "ima.h"
 
+#define ENFORCE_FIELDS 0x00000001
+#define ENFORCE_BUFEND 0x00000002
+
 void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
 			      struct ima_field_data *field_data);
 void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
@@ -26,6 +29,9 @@ void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
 			      struct ima_field_data *field_data);
 void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
 			   struct ima_field_data *field_data);
+int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
+		  int maxfields, struct ima_field_data *fields, int *curfields,
+		  unsigned long *len_mask, int enforce_mask, char *bufname);
 int ima_eventdigest_init(struct ima_event_data *event_data,
 			 struct ima_field_data *field_data);
 int ima_eventname_init(struct ima_event_data *event_data,
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 24520b4..a53e7e4 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -92,8 +92,8 @@ struct signature_v2_hdr {
 	uint8_t type;		/* xattr type */
 	uint8_t version;	/* signature format version */
 	uint8_t	hash_algo;	/* Digest algorithm [enum hash_algo] */
-	uint32_t keyid;		/* IMA key identifier - not X509/PGP specific */
-	uint16_t sig_size;	/* signature size */
+	__be32 keyid;		/* IMA key identifier - not X509/PGP specific */
+	__be16 sig_size;	/* signature size */
 	uint8_t sig[0];		/* signature payload */
 } __packed;
 
@@ -118,7 +118,8 @@ struct integrity_iint_cache {
 struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
 
 int integrity_kernel_read(struct file *file, loff_t offset,
-			  char *addr, unsigned long count);
+			  void *addr, unsigned long count);
+
 int __init integrity_read_file(const char *path, char **data);
 
 #define INTEGRITY_KEYRING_EVM		0
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index 6fd95f7..a7a23b5 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -20,6 +20,10 @@
 
 	  If you are unsure as to whether this is required, answer N.
 
+config KEYS_COMPAT
+	def_bool y
+	depends on COMPAT && KEYS
+
 config PERSISTENT_KEYRINGS
 	bool "Enable register of persistent per-UID keyrings"
 	depends on KEYS
@@ -89,9 +93,9 @@
 config KEY_DH_OPERATIONS
        bool "Diffie-Hellman operations on retained keys"
        depends on KEYS
-       select MPILIB
        select CRYPTO
        select CRYPTO_HASH
+       select CRYPTO_DH
        help
 	 This option provides support for calculating Diffie-Hellman
 	 public keys and shared secrets using values stored as keys
diff --git a/security/keys/dh.c b/security/keys/dh.c
index e603bd9..4755d4b 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -8,34 +8,17 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include <linux/mpi.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/scatterlist.h>
 #include <linux/crypto.h>
 #include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include <crypto/dh.h>
 #include <keys/user-type.h>
 #include "internal.h"
 
-/*
- * Public key or shared secret generation function [RFC2631 sec 2.1.1]
- *
- * ya = g^xa mod p;
- * or
- * ZZ = yb^xa mod p;
- *
- * where xa is the local private key, ya is the local public key, g is
- * the generator, p is the prime, yb is the remote public key, and ZZ
- * is the shared secret.
- *
- * Both are the same calculation, so g or yb are the "base" and ya or
- * ZZ are the "result".
- */
-static int do_dh(MPI result, MPI base, MPI xa, MPI p)
-{
-	return mpi_powm(result, base, xa, p);
-}
-
-static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
+static ssize_t dh_data_from_key(key_serial_t keyid, void **data)
 {
 	struct key *key;
 	key_ref_t key_ref;
@@ -56,19 +39,17 @@ static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
 		status = key_validate(key);
 		if (status == 0) {
 			const struct user_key_payload *payload;
+			uint8_t *duplicate;
 
 			payload = user_key_payload_locked(key);
 
-			if (maxlen == 0) {
-				*mpi = NULL;
+			duplicate = kmemdup(payload->data, payload->datalen,
+					    GFP_KERNEL);
+			if (duplicate) {
+				*data = duplicate;
 				ret = payload->datalen;
-			} else if (payload->datalen <= maxlen) {
-				*mpi = mpi_read_raw_data(payload->data,
-							 payload->datalen);
-				if (*mpi)
-					ret = payload->datalen;
 			} else {
-				ret = -EINVAL;
+				ret = -ENOMEM;
 			}
 		}
 		up_read(&key->sem);
@@ -79,6 +60,29 @@ static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
 	return ret;
 }
 
+static void dh_free_data(struct dh *dh)
+{
+	kzfree(dh->key);
+	kzfree(dh->p);
+	kzfree(dh->g);
+}
+
+struct dh_completion {
+	struct completion completion;
+	int err;
+};
+
+static void dh_crypto_done(struct crypto_async_request *req, int err)
+{
+	struct dh_completion *compl = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	compl->err = err;
+	complete(&compl->completion);
+}
+
 struct kdf_sdesc {
 	struct shash_desc shash;
 	char ctx[];
@@ -89,6 +93,7 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
 	struct crypto_shash *tfm;
 	struct kdf_sdesc *sdesc;
 	int size;
+	int err;
 
 	/* allocate synchronous hash */
 	tfm = crypto_alloc_shash(hashname, 0, 0);
@@ -97,16 +102,25 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
 		return PTR_ERR(tfm);
 	}
 
+	err = -EINVAL;
+	if (crypto_shash_digestsize(tfm) == 0)
+		goto out_free_tfm;
+
+	err = -ENOMEM;
 	size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm);
 	sdesc = kmalloc(size, GFP_KERNEL);
 	if (!sdesc)
-		return -ENOMEM;
+		goto out_free_tfm;
 	sdesc->shash.tfm = tfm;
 	sdesc->shash.flags = 0x0;
 
 	*sdesc_ret = sdesc;
 
 	return 0;
+
+out_free_tfm:
+	crypto_free_shash(tfm);
+	return err;
 }
 
 static void kdf_dealloc(struct kdf_sdesc *sdesc)
@@ -120,14 +134,6 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc)
 	kzfree(sdesc);
 }
 
-/* convert 32 bit integer into its string representation */
-static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf)
-{
-	__be32 *a = (__be32 *)buf;
-
-	*a = cpu_to_be32(val);
-}
-
 /*
  * Implementation of the KDF in counter mode according to SP800-108 section 5.1
  * as well as SP800-56A section 5.8.1 (Single-step KDF).
@@ -138,25 +144,39 @@ static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf)
  * 5.8.1.2).
  */
 static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
-		   u8 *dst, unsigned int dlen)
+		   u8 *dst, unsigned int dlen, unsigned int zlen)
 {
 	struct shash_desc *desc = &sdesc->shash;
 	unsigned int h = crypto_shash_digestsize(desc->tfm);
 	int err = 0;
 	u8 *dst_orig = dst;
-	u32 i = 1;
-	u8 iteration[sizeof(u32)];
+	__be32 counter = cpu_to_be32(1);
 
 	while (dlen) {
 		err = crypto_shash_init(desc);
 		if (err)
 			goto err;
 
-		crypto_kw_cpu_to_be32(i, iteration);
-		err = crypto_shash_update(desc, iteration, sizeof(u32));
+		err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32));
 		if (err)
 			goto err;
 
+		if (zlen && h) {
+			u8 tmpbuffer[h];
+			size_t chunk = min_t(size_t, zlen, h);
+			memset(tmpbuffer, 0, chunk);
+
+			do {
+				err = crypto_shash_update(desc, tmpbuffer,
+							  chunk);
+				if (err)
+					goto err;
+
+				zlen -= chunk;
+				chunk = min_t(size_t, zlen, h);
+			} while (zlen);
+		}
+
 		if (src && slen) {
 			err = crypto_shash_update(desc, src, slen);
 			if (err)
@@ -179,7 +199,7 @@ static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
 
 			dlen -= h;
 			dst += h;
-			i++;
+			counter = cpu_to_be32(be32_to_cpu(counter) + 1);
 		}
 	}
 
@@ -192,7 +212,7 @@ static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
 
 static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
 				 char __user *buffer, size_t buflen,
-				 uint8_t *kbuf, size_t kbuflen)
+				 uint8_t *kbuf, size_t kbuflen, size_t lzero)
 {
 	uint8_t *outbuf = NULL;
 	int ret;
@@ -203,7 +223,7 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
 		goto err;
 	}
 
-	ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen);
+	ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen, lzero);
 	if (ret)
 		goto err;
 
@@ -221,21 +241,26 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
 			 struct keyctl_kdf_params *kdfcopy)
 {
 	long ret;
-	MPI base, private, prime, result;
-	unsigned nbytes;
+	ssize_t dlen;
+	int secretlen;
+	int outlen;
 	struct keyctl_dh_params pcopy;
-	uint8_t *kbuf;
-	ssize_t keylen;
-	size_t resultlen;
+	struct dh dh_inputs;
+	struct scatterlist outsg;
+	struct dh_completion compl;
+	struct crypto_kpp *tfm;
+	struct kpp_request *req;
+	uint8_t *secret;
+	uint8_t *outbuf;
 	struct kdf_sdesc *sdesc = NULL;
 
 	if (!params || (!buffer && buflen)) {
 		ret = -EINVAL;
-		goto out;
+		goto out1;
 	}
 	if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) {
 		ret = -EFAULT;
-		goto out;
+		goto out1;
 	}
 
 	if (kdfcopy) {
@@ -244,104 +269,147 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
 		if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN ||
 		    kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) {
 			ret = -EMSGSIZE;
-			goto out;
+			goto out1;
 		}
 
 		/* get KDF name string */
 		hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME);
 		if (IS_ERR(hashname)) {
 			ret = PTR_ERR(hashname);
-			goto out;
+			goto out1;
 		}
 
 		/* allocate KDF from the kernel crypto API */
 		ret = kdf_alloc(&sdesc, hashname);
 		kfree(hashname);
 		if (ret)
-			goto out;
+			goto out1;
 	}
 
-	/*
-	 * If the caller requests postprocessing with a KDF, allow an
-	 * arbitrary output buffer size since the KDF ensures proper truncation.
-	 */
-	keylen = mpi_from_key(pcopy.prime, kdfcopy ? SIZE_MAX : buflen, &prime);
-	if (keylen < 0 || !prime) {
-		/* buflen == 0 may be used to query the required buffer size,
-		 * which is the prime key length.
-		 */
-		ret = keylen;
-		goto out;
+	memset(&dh_inputs, 0, sizeof(dh_inputs));
+
+	dlen = dh_data_from_key(pcopy.prime, &dh_inputs.p);
+	if (dlen < 0) {
+		ret = dlen;
+		goto out1;
 	}
+	dh_inputs.p_size = dlen;
 
-	/* The result is never longer than the prime */
-	resultlen = keylen;
-
-	keylen = mpi_from_key(pcopy.base, SIZE_MAX, &base);
-	if (keylen < 0 || !base) {
-		ret = keylen;
-		goto error1;
+	dlen = dh_data_from_key(pcopy.base, &dh_inputs.g);
+	if (dlen < 0) {
+		ret = dlen;
+		goto out2;
 	}
+	dh_inputs.g_size = dlen;
 
-	keylen = mpi_from_key(pcopy.private, SIZE_MAX, &private);
-	if (keylen < 0 || !private) {
-		ret = keylen;
-		goto error2;
+	dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+	if (dlen < 0) {
+		ret = dlen;
+		goto out2;
 	}
+	dh_inputs.key_size = dlen;
 
-	result = mpi_alloc(0);
-	if (!result) {
+	secretlen = crypto_dh_key_len(&dh_inputs);
+	secret = kmalloc(secretlen, GFP_KERNEL);
+	if (!secret) {
 		ret = -ENOMEM;
-		goto error3;
+		goto out2;
 	}
-
-	/* allocate space for DH shared secret and SP800-56A otherinfo */
-	kbuf = kmalloc(kdfcopy ? (resultlen + kdfcopy->otherinfolen) : resultlen,
-		       GFP_KERNEL);
-	if (!kbuf) {
-		ret = -ENOMEM;
-		goto error4;
-	}
-
-	/*
-	 * Concatenate SP800-56A otherinfo past DH shared secret -- the
-	 * input to the KDF is (DH shared secret || otherinfo)
-	 */
-	if (kdfcopy && kdfcopy->otherinfo &&
-	    copy_from_user(kbuf + resultlen, kdfcopy->otherinfo,
-			   kdfcopy->otherinfolen) != 0) {
-		ret = -EFAULT;
-		goto error5;
-	}
-
-	ret = do_dh(result, base, private, prime);
+	ret = crypto_dh_encode_key(secret, secretlen, &dh_inputs);
 	if (ret)
-		goto error5;
+		goto out3;
 
-	ret = mpi_read_buffer(result, kbuf, resultlen, &nbytes, NULL);
-	if (ret != 0)
-		goto error5;
+	tfm = crypto_alloc_kpp("dh", CRYPTO_ALG_TYPE_KPP, 0);
+	if (IS_ERR(tfm)) {
+		ret = PTR_ERR(tfm);
+		goto out3;
+	}
+
+	ret = crypto_kpp_set_secret(tfm, secret, secretlen);
+	if (ret)
+		goto out4;
+
+	outlen = crypto_kpp_maxsize(tfm);
+
+	if (!kdfcopy) {
+		/*
+		 * When not using a KDF, buflen 0 is used to read the
+		 * required buffer length
+		 */
+		if (buflen == 0) {
+			ret = outlen;
+			goto out4;
+		} else if (outlen > buflen) {
+			ret = -EOVERFLOW;
+			goto out4;
+		}
+	}
+
+	outbuf = kzalloc(kdfcopy ? (outlen + kdfcopy->otherinfolen) : outlen,
+			 GFP_KERNEL);
+	if (!outbuf) {
+		ret = -ENOMEM;
+		goto out4;
+	}
+
+	sg_init_one(&outsg, outbuf, outlen);
+
+	req = kpp_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		ret = -ENOMEM;
+		goto out5;
+	}
+
+	kpp_request_set_input(req, NULL, 0);
+	kpp_request_set_output(req, &outsg, outlen);
+	init_completion(&compl.completion);
+	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+				 CRYPTO_TFM_REQ_MAY_SLEEP,
+				 dh_crypto_done, &compl);
+
+	/*
+	 * For DH, generate_public_key and generate_shared_secret are
+	 * the same calculation
+	 */
+	ret = crypto_kpp_generate_public_key(req);
+	if (ret == -EINPROGRESS) {
+		wait_for_completion(&compl.completion);
+		ret = compl.err;
+		if (ret)
+			goto out6;
+	}
 
 	if (kdfcopy) {
-		ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, kbuf,
-					    resultlen + kdfcopy->otherinfolen);
-	} else {
-		ret = nbytes;
-		if (copy_to_user(buffer, kbuf, nbytes) != 0)
+		/*
+		 * Concatenate SP800-56A otherinfo past DH shared secret -- the
+		 * input to the KDF is (DH shared secret || otherinfo)
+		 */
+		if (copy_from_user(outbuf + req->dst_len, kdfcopy->otherinfo,
+				   kdfcopy->otherinfolen) != 0) {
 			ret = -EFAULT;
+			goto out6;
+		}
+
+		ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, outbuf,
+					    req->dst_len + kdfcopy->otherinfolen,
+					    outlen - req->dst_len);
+	} else if (copy_to_user(buffer, outbuf, req->dst_len) == 0) {
+		ret = req->dst_len;
+	} else {
+		ret = -EFAULT;
 	}
 
-error5:
-	kzfree(kbuf);
-error4:
-	mpi_free(result);
-error3:
-	mpi_free(private);
-error2:
-	mpi_free(base);
-error1:
-	mpi_free(prime);
-out:
+out6:
+	kpp_request_free(req);
+out5:
+	kzfree(outbuf);
+out4:
+	crypto_free_kpp(tfm);
+out3:
+	kzfree(secret);
+out2:
+	dh_free_data(&dh_inputs);
+out1:
 	kdf_dealloc(sdesc);
 	return ret;
 }
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 0010955..69855ba 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -11,7 +11,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation, version 2 of the License.
  *
- * See Documentation/security/keys-trusted-encrypted.txt
+ * See Documentation/security/keys/trusted-encrypted.rst
  */
 
 #include <linux/uaccess.h>
@@ -30,6 +30,7 @@
 #include <linux/scatterlist.h>
 #include <linux/ctype.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
 #include <crypto/skcipher.h>
@@ -54,13 +55,7 @@ static int blksize;
 #define MAX_DATA_SIZE 4096
 #define MIN_DATA_SIZE  20
 
-struct sdesc {
-	struct shash_desc shash;
-	char ctx[];
-};
-
-static struct crypto_shash *hashalg;
-static struct crypto_shash *hmacalg;
+static struct crypto_shash *hash_tfm;
 
 enum {
 	Opt_err = -1, Opt_new, Opt_load, Opt_update
@@ -141,23 +136,22 @@ static int valid_ecryptfs_desc(const char *ecryptfs_desc)
  */
 static int valid_master_desc(const char *new_desc, const char *orig_desc)
 {
-	if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) {
-		if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN)
-			goto out;
-		if (orig_desc)
-			if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN))
-				goto out;
-	} else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) {
-		if (strlen(new_desc) == KEY_USER_PREFIX_LEN)
-			goto out;
-		if (orig_desc)
-			if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN))
-				goto out;
-	} else
-		goto out;
+	int prefix_len;
+
+	if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
+		prefix_len = KEY_TRUSTED_PREFIX_LEN;
+	else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
+		prefix_len = KEY_USER_PREFIX_LEN;
+	else
+		return -EINVAL;
+
+	if (!new_desc[prefix_len])
+		return -EINVAL;
+
+	if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
+		return -EINVAL;
+
 	return 0;
-out:
-	return -EINVAL;
 }
 
 /*
@@ -321,53 +315,38 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
 	return ukey;
 }
 
-static struct sdesc *alloc_sdesc(struct crypto_shash *alg)
+static int calc_hash(struct crypto_shash *tfm, u8 *digest,
+		     const u8 *buf, unsigned int buflen)
 {
-	struct sdesc *sdesc;
-	int size;
+	SHASH_DESC_ON_STACK(desc, tfm);
+	int err;
 
-	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
-	sdesc = kmalloc(size, GFP_KERNEL);
-	if (!sdesc)
-		return ERR_PTR(-ENOMEM);
-	sdesc->shash.tfm = alg;
-	sdesc->shash.flags = 0x0;
-	return sdesc;
+	desc->tfm = tfm;
+	desc->flags = 0;
+
+	err = crypto_shash_digest(desc, buf, buflen, digest);
+	shash_desc_zero(desc);
+	return err;
 }
 
 static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
 		     const u8 *buf, unsigned int buflen)
 {
-	struct sdesc *sdesc;
-	int ret;
+	struct crypto_shash *tfm;
+	int err;
 
-	sdesc = alloc_sdesc(hmacalg);
-	if (IS_ERR(sdesc)) {
-		pr_info("encrypted_key: can't alloc %s\n", hmac_alg);
-		return PTR_ERR(sdesc);
+	tfm = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		pr_err("encrypted_key: can't alloc %s transform: %ld\n",
+		       hmac_alg, PTR_ERR(tfm));
+		return PTR_ERR(tfm);
 	}
 
-	ret = crypto_shash_setkey(hmacalg, key, keylen);
-	if (!ret)
-		ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
-	kfree(sdesc);
-	return ret;
-}
-
-static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen)
-{
-	struct sdesc *sdesc;
-	int ret;
-
-	sdesc = alloc_sdesc(hashalg);
-	if (IS_ERR(sdesc)) {
-		pr_info("encrypted_key: can't alloc %s\n", hash_alg);
-		return PTR_ERR(sdesc);
-	}
-
-	ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
-	kfree(sdesc);
-	return ret;
+	err = crypto_shash_setkey(tfm, key, keylen);
+	if (!err)
+		err = calc_hash(tfm, digest, buf, buflen);
+	crypto_free_shash(tfm);
+	return err;
 }
 
 enum derived_key_type { ENC_KEY, AUTH_KEY };
@@ -385,10 +364,9 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
 		derived_buf_len = HASH_SIZE;
 
 	derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
-	if (!derived_buf) {
-		pr_err("encrypted_key: out of memory\n");
+	if (!derived_buf)
 		return -ENOMEM;
-	}
+
 	if (key_type)
 		strcpy(derived_buf, "AUTH_KEY");
 	else
@@ -396,8 +374,8 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
 
 	memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
 	       master_keylen);
-	ret = calc_hash(derived_key, derived_buf, derived_buf_len);
-	kfree(derived_buf);
+	ret = calc_hash(hash_tfm, derived_key, derived_buf, derived_buf_len);
+	kzfree(derived_buf);
 	return ret;
 }
 
@@ -480,12 +458,9 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
 	struct skcipher_request *req;
 	unsigned int encrypted_datalen;
 	u8 iv[AES_BLOCK_SIZE];
-	unsigned int padlen;
-	char pad[16];
 	int ret;
 
 	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
-	padlen = encrypted_datalen - epayload->decrypted_datalen;
 
 	req = init_skcipher_req(derived_key, derived_keylen);
 	ret = PTR_ERR(req);
@@ -493,11 +468,10 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
 		goto out;
 	dump_decrypted_data(epayload);
 
-	memset(pad, 0, sizeof pad);
 	sg_init_table(sg_in, 2);
 	sg_set_buf(&sg_in[0], epayload->decrypted_data,
 		   epayload->decrypted_datalen);
-	sg_set_buf(&sg_in[1], pad, padlen);
+	sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
 
 	sg_init_table(sg_out, 1);
 	sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
@@ -533,6 +507,7 @@ static int datablob_hmac_append(struct encrypted_key_payload *epayload,
 	if (!ret)
 		dump_hmac(NULL, digest, HASH_SIZE);
 out:
+	memzero_explicit(derived_key, sizeof(derived_key));
 	return ret;
 }
 
@@ -561,8 +536,8 @@ static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
 	ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
 	if (ret < 0)
 		goto out;
-	ret = memcmp(digest, epayload->format + epayload->datablob_len,
-		     sizeof digest);
+	ret = crypto_memneq(digest, epayload->format + epayload->datablob_len,
+			    sizeof(digest));
 	if (ret) {
 		ret = -EINVAL;
 		dump_hmac("datablob",
@@ -571,6 +546,7 @@ static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
 		dump_hmac("calc", digest, HASH_SIZE);
 	}
 out:
+	memzero_explicit(derived_key, sizeof(derived_key));
 	return ret;
 }
 
@@ -584,9 +560,14 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
 	struct skcipher_request *req;
 	unsigned int encrypted_datalen;
 	u8 iv[AES_BLOCK_SIZE];
-	char pad[16];
+	u8 *pad;
 	int ret;
 
+	/* Throwaway buffer to hold the unused zero padding at the end */
+	pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
+	if (!pad)
+		return -ENOMEM;
+
 	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
 	req = init_skcipher_req(derived_key, derived_keylen);
 	ret = PTR_ERR(req);
@@ -594,13 +575,12 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
 		goto out;
 	dump_encrypted_data(epayload, encrypted_datalen);
 
-	memset(pad, 0, sizeof pad);
 	sg_init_table(sg_in, 1);
 	sg_init_table(sg_out, 2);
 	sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
 	sg_set_buf(&sg_out[0], epayload->decrypted_data,
 		   epayload->decrypted_datalen);
-	sg_set_buf(&sg_out[1], pad, sizeof pad);
+	sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
 
 	memcpy(iv, epayload->iv, sizeof(iv));
 	skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
@@ -612,6 +592,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
 		goto out;
 	dump_decrypted_data(epayload);
 out:
+	kfree(pad);
 	return ret;
 }
 
@@ -722,6 +703,7 @@ static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
 out:
 	up_read(&mkey->sem);
 	key_put(mkey);
+	memzero_explicit(derived_key, sizeof(derived_key));
 	return ret;
 }
 
@@ -828,13 +810,13 @@ static int encrypted_instantiate(struct key *key,
 	ret = encrypted_init(epayload, key->description, format, master_desc,
 			     decrypted_datalen, hex_encoded_iv);
 	if (ret < 0) {
-		kfree(epayload);
+		kzfree(epayload);
 		goto out;
 	}
 
 	rcu_assign_keypointer(key, epayload);
 out:
-	kfree(datablob);
+	kzfree(datablob);
 	return ret;
 }
 
@@ -843,8 +825,7 @@ static void encrypted_rcu_free(struct rcu_head *rcu)
 	struct encrypted_key_payload *epayload;
 
 	epayload = container_of(rcu, struct encrypted_key_payload, rcu);
-	memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
-	kfree(epayload);
+	kzfree(epayload);
 }
 
 /*
@@ -902,7 +883,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
 	rcu_assign_keypointer(key, new_epayload);
 	call_rcu(&epayload->rcu, encrypted_rcu_free);
 out:
-	kfree(buf);
+	kzfree(buf);
 	return ret;
 }
 
@@ -960,33 +941,26 @@ static long encrypted_read(const struct key *key, char __user *buffer,
 
 	up_read(&mkey->sem);
 	key_put(mkey);
+	memzero_explicit(derived_key, sizeof(derived_key));
 
 	if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
 		ret = -EFAULT;
-	kfree(ascii_buf);
+	kzfree(ascii_buf);
 
 	return asciiblob_len;
 out:
 	up_read(&mkey->sem);
 	key_put(mkey);
+	memzero_explicit(derived_key, sizeof(derived_key));
 	return ret;
 }
 
 /*
- * encrypted_destroy - before freeing the key, clear the decrypted data
- *
- * Before freeing the key, clear the memory containing the decrypted
- * key data.
+ * encrypted_destroy - clear and free the key's payload
  */
 static void encrypted_destroy(struct key *key)
 {
-	struct encrypted_key_payload *epayload = key->payload.data[0];
-
-	if (!epayload)
-		return;
-
-	memzero_explicit(epayload->decrypted_data, epayload->decrypted_datalen);
-	kfree(key->payload.data[0]);
+	kzfree(key->payload.data[0]);
 }
 
 struct key_type key_type_encrypted = {
@@ -999,47 +973,17 @@ struct key_type key_type_encrypted = {
 };
 EXPORT_SYMBOL_GPL(key_type_encrypted);
 
-static void encrypted_shash_release(void)
-{
-	if (hashalg)
-		crypto_free_shash(hashalg);
-	if (hmacalg)
-		crypto_free_shash(hmacalg);
-}
-
-static int __init encrypted_shash_alloc(void)
-{
-	int ret;
-
-	hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
-	if (IS_ERR(hmacalg)) {
-		pr_info("encrypted_key: could not allocate crypto %s\n",
-			hmac_alg);
-		return PTR_ERR(hmacalg);
-	}
-
-	hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
-	if (IS_ERR(hashalg)) {
-		pr_info("encrypted_key: could not allocate crypto %s\n",
-			hash_alg);
-		ret = PTR_ERR(hashalg);
-		goto hashalg_fail;
-	}
-
-	return 0;
-
-hashalg_fail:
-	crypto_free_shash(hmacalg);
-	return ret;
-}
-
 static int __init init_encrypted(void)
 {
 	int ret;
 
-	ret = encrypted_shash_alloc();
-	if (ret < 0)
-		return ret;
+	hash_tfm = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hash_tfm)) {
+		pr_err("encrypted_key: can't allocate %s transform: %ld\n",
+		       hash_alg, PTR_ERR(hash_tfm));
+		return PTR_ERR(hash_tfm);
+	}
+
 	ret = aes_get_sizes();
 	if (ret < 0)
 		goto out;
@@ -1048,14 +992,14 @@ static int __init init_encrypted(void)
 		goto out;
 	return 0;
 out:
-	encrypted_shash_release();
+	crypto_free_shash(hash_tfm);
 	return ret;
 
 }
 
 static void __exit cleanup_encrypted(void)
 {
-	encrypted_shash_release();
+	crypto_free_shash(hash_tfm);
 	unregister_key_type(&key_type_encrypted);
 }
 
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
index b5b4812..cbf0bc1 100644
--- a/security/keys/encrypted-keys/masterkey_trusted.c
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -11,7 +11,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation, version 2 of the License.
  *
- * See Documentation/security/keys-trusted-encrypted.txt
+ * See Documentation/security/keys/trusted-encrypted.rst
  */
 
 #include <linux/uaccess.h>
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 595becc..87cb260 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -158,9 +158,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
 
 		kfree(key->description);
 
-#ifdef KEY_DEBUGGING
-		key->magic = KEY_DEBUG_MAGIC_X;
-#endif
+		memzero_explicit(key, sizeof(*key));
 		kmem_cache_free(key_jar, key);
 	}
 }
diff --git a/security/keys/internal.h b/security/keys/internal.h
index c0f8682..91bc621 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -13,6 +13,7 @@
 #define _INTERNAL_H
 
 #include <linux/sched.h>
+#include <linux/wait_bit.h>
 #include <linux/cred.h>
 #include <linux/key-type.h>
 #include <linux/task_work.h>
diff --git a/security/keys/key.c b/security/keys/key.c
index 455c04d..83da68d 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -660,14 +660,11 @@ struct key *key_lookup(key_serial_t id)
 	goto error;
 
 found:
-	/* pretend it doesn't exist if it is awaiting deletion */
-	if (refcount_read(&key->usage) == 0)
-		goto not_found;
-
-	/* this races with key_put(), but that doesn't matter since key_put()
-	 * doesn't actually change the key
+	/* A key is allowed to be looked up only if someone still owns a
+	 * reference to it - otherwise it's awaiting the gc.
 	 */
-	__key_get(key);
+	if (!refcount_inc_not_zero(&key->usage))
+		goto not_found;
 
 error:
 	spin_unlock(&key_serial_lock);
@@ -966,12 +963,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
 	/* the key must be writable */
 	ret = key_permission(key_ref, KEY_NEED_WRITE);
 	if (ret < 0)
-		goto error;
+		return ret;
 
 	/* attempt to update it if supported */
-	ret = -EOPNOTSUPP;
 	if (!key->type->update)
-		goto error;
+		return -EOPNOTSUPP;
 
 	memset(&prep, 0, sizeof(prep));
 	prep.data = payload;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 447a7d5..ab0b337 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -99,7 +99,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
 	/* pull the payload in if one was supplied */
 	payload = NULL;
 
-	if (_payload) {
+	if (plen) {
 		ret = -ENOMEM;
 		payload = kvmalloc(plen, GFP_KERNEL);
 		if (!payload)
@@ -132,7 +132,10 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
 
 	key_ref_put(keyring_ref);
  error3:
-	kvfree(payload);
+	if (payload) {
+		memzero_explicit(payload, plen);
+		kvfree(payload);
+	}
  error2:
 	kfree(description);
  error:
@@ -324,7 +327,7 @@ long keyctl_update_key(key_serial_t id,
 
 	/* pull the payload in if one was supplied */
 	payload = NULL;
-	if (_payload) {
+	if (plen) {
 		ret = -ENOMEM;
 		payload = kmalloc(plen, GFP_KERNEL);
 		if (!payload)
@@ -347,7 +350,7 @@ long keyctl_update_key(key_serial_t id,
 
 	key_ref_put(key_ref);
 error2:
-	kfree(payload);
+	kzfree(payload);
 error:
 	return ret;
 }
@@ -1093,7 +1096,10 @@ long keyctl_instantiate_key_common(key_serial_t id,
 		keyctl_change_reqkey_auth(NULL);
 
 error2:
-	kvfree(payload);
+	if (payload) {
+		memzero_explicit(payload, plen);
+		kvfree(payload);
+	}
 error:
 	return ret;
 }
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 4d1678e4..de81793 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -706,7 +706,7 @@ static bool search_nested_keyrings(struct key *keyring,
 	 * Non-keyrings avoid the leftmost branch of the root entirely (root
 	 * slots 1-15).
 	 */
-	ptr = ACCESS_ONCE(keyring->keys.root);
+	ptr = READ_ONCE(keyring->keys.root);
 	if (!ptr)
 		goto not_this_keyring;
 
@@ -720,7 +720,7 @@ static bool search_nested_keyrings(struct key *keyring,
 		if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
 			goto not_this_keyring;
 
-		ptr = ACCESS_ONCE(shortcut->next_node);
+		ptr = READ_ONCE(shortcut->next_node);
 		node = assoc_array_ptr_to_node(ptr);
 		goto begin_node;
 	}
@@ -740,7 +740,7 @@ static bool search_nested_keyrings(struct key *keyring,
 	if (assoc_array_ptr_is_shortcut(ptr)) {
 		shortcut = assoc_array_ptr_to_shortcut(ptr);
 		smp_read_barrier_depends();
-		ptr = ACCESS_ONCE(shortcut->next_node);
+		ptr = READ_ONCE(shortcut->next_node);
 		BUG_ON(!assoc_array_ptr_is_node(ptr));
 	}
 	node = assoc_array_ptr_to_node(ptr);
@@ -752,7 +752,7 @@ static bool search_nested_keyrings(struct key *keyring,
 ascend_to_node:
 	/* Go through the slots in a node */
 	for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
-		ptr = ACCESS_ONCE(node->slots[slot]);
+		ptr = READ_ONCE(node->slots[slot]);
 
 		if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
 			goto descend_to_node;
@@ -790,13 +790,13 @@ static bool search_nested_keyrings(struct key *keyring,
 	/* We've dealt with all the slots in the current node, so now we need
 	 * to ascend to the parent and continue processing there.
 	 */
-	ptr = ACCESS_ONCE(node->back_pointer);
+	ptr = READ_ONCE(node->back_pointer);
 	slot = node->parent_slot;
 
 	if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
 		shortcut = assoc_array_ptr_to_shortcut(ptr);
 		smp_read_barrier_depends();
-		ptr = ACCESS_ONCE(shortcut->back_pointer);
+		ptr = READ_ONCE(shortcut->back_pointer);
 		slot = shortcut->parent_slot;
 	}
 	if (!ptr)
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 2217dfec..86bced9 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -809,15 +809,14 @@ long join_session_keyring(const char *name)
 		ret = PTR_ERR(keyring);
 		goto error2;
 	} else if (keyring == new->session_keyring) {
-		key_put(keyring);
 		ret = 0;
-		goto error2;
+		goto error3;
 	}
 
 	/* we've got a keyring - now to install it */
 	ret = install_session_keyring_to_cred(new, keyring);
 	if (ret < 0)
-		goto error2;
+		goto error3;
 
 	commit_creds(new);
 	mutex_unlock(&key_session_mutex);
@@ -827,6 +826,8 @@ long join_session_keyring(const char *name)
 okay:
 	return ret;
 
+error3:
+	key_put(keyring);
 error2:
 	mutex_unlock(&key_session_mutex);
 error:
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 9822e50..63e63a4 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -8,7 +8,7 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  *
- * See Documentation/security/keys-request-key.txt
+ * See Documentation/security/keys/request-key.rst
  */
 
 #include <linux/module.h>
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 0f06215..afe9d22 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -8,7 +8,7 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  *
- * See Documentation/security/keys-request-key.txt
+ * See Documentation/security/keys/request-key.rst
  */
 
 #include <linux/module.h>
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 2ae31c5..ddfaebf 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -8,7 +8,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation, version 2 of the License.
  *
- * See Documentation/security/keys-trusted-encrypted.txt
+ * See Documentation/security/keys/trusted-encrypted.rst
  */
 
 #include <crypto/hash_info.h>
@@ -70,7 +70,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen,
 	}
 
 	ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
-	kfree(sdesc);
+	kzfree(sdesc);
 	return ret;
 }
 
@@ -114,7 +114,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
 	if (!ret)
 		ret = crypto_shash_final(&sdesc->shash, digest);
 out:
-	kfree(sdesc);
+	kzfree(sdesc);
 	return ret;
 }
 
@@ -165,7 +165,7 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
 				  paramdigest, TPM_NONCE_SIZE, h1,
 				  TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
 out:
-	kfree(sdesc);
+	kzfree(sdesc);
 	return ret;
 }
 
@@ -246,7 +246,7 @@ static int TSS_checkhmac1(unsigned char *buffer,
 	if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
 		ret = -EINVAL;
 out:
-	kfree(sdesc);
+	kzfree(sdesc);
 	return ret;
 }
 
@@ -347,7 +347,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
 	if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
 		ret = -EINVAL;
 out:
-	kfree(sdesc);
+	kzfree(sdesc);
 	return ret;
 }
 
@@ -564,7 +564,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
 		*bloblen = storedsize;
 	}
 out:
-	kfree(td);
+	kzfree(td);
 	return ret;
 }
 
@@ -678,7 +678,7 @@ static int key_seal(struct trusted_key_payload *p,
 	if (ret < 0)
 		pr_info("trusted_key: srkseal failed (%d)\n", ret);
 
-	kfree(tb);
+	kzfree(tb);
 	return ret;
 }
 
@@ -703,7 +703,7 @@ static int key_unseal(struct trusted_key_payload *p,
 		/* pull migratable flag out of sealed key */
 		p->migratable = p->key[--p->key_len];
 
-	kfree(tb);
+	kzfree(tb);
 	return ret;
 }
 
@@ -1037,12 +1037,12 @@ static int trusted_instantiate(struct key *key,
 	if (!ret && options->pcrlock)
 		ret = pcrlock(options->pcrlock);
 out:
-	kfree(datablob);
-	kfree(options);
+	kzfree(datablob);
+	kzfree(options);
 	if (!ret)
 		rcu_assign_keypointer(key, payload);
 	else
-		kfree(payload);
+		kzfree(payload);
 	return ret;
 }
 
@@ -1051,8 +1051,7 @@ static void trusted_rcu_free(struct rcu_head *rcu)
 	struct trusted_key_payload *p;
 
 	p = container_of(rcu, struct trusted_key_payload, rcu);
-	memset(p->key, 0, p->key_len);
-	kfree(p);
+	kzfree(p);
 }
 
 /*
@@ -1094,13 +1093,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
 	ret = datablob_parse(datablob, new_p, new_o);
 	if (ret != Opt_update) {
 		ret = -EINVAL;
-		kfree(new_p);
+		kzfree(new_p);
 		goto out;
 	}
 
 	if (!new_o->keyhandle) {
 		ret = -EINVAL;
-		kfree(new_p);
+		kzfree(new_p);
 		goto out;
 	}
 
@@ -1114,22 +1113,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
 	ret = key_seal(new_p, new_o);
 	if (ret < 0) {
 		pr_info("trusted_key: key_seal failed (%d)\n", ret);
-		kfree(new_p);
+		kzfree(new_p);
 		goto out;
 	}
 	if (new_o->pcrlock) {
 		ret = pcrlock(new_o->pcrlock);
 		if (ret < 0) {
 			pr_info("trusted_key: pcrlock failed (%d)\n", ret);
-			kfree(new_p);
+			kzfree(new_p);
 			goto out;
 		}
 	}
 	rcu_assign_keypointer(key, new_p);
 	call_rcu(&p->rcu, trusted_rcu_free);
 out:
-	kfree(datablob);
-	kfree(new_o);
+	kzfree(datablob);
+	kzfree(new_o);
 	return ret;
 }
 
@@ -1158,24 +1157,19 @@ static long trusted_read(const struct key *key, char __user *buffer,
 	for (i = 0; i < p->blob_len; i++)
 		bufp = hex_byte_pack(bufp, p->blob[i]);
 	if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
-		kfree(ascii_buf);
+		kzfree(ascii_buf);
 		return -EFAULT;
 	}
-	kfree(ascii_buf);
+	kzfree(ascii_buf);
 	return 2 * p->blob_len;
 }
 
 /*
- * trusted_destroy - before freeing the key, clear the decrypted data
+ * trusted_destroy - clear and free the key's payload
  */
 static void trusted_destroy(struct key *key)
 {
-	struct trusted_key_payload *p = key->payload.data[0];
-
-	if (!p)
-		return;
-	memset(p->key, 0, p->key_len);
-	kfree(key->payload.data[0]);
+	kzfree(key->payload.data[0]);
 }
 
 struct key_type key_type_trusted = {
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 2660513..3d8c68e 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -86,10 +86,18 @@ EXPORT_SYMBOL_GPL(user_preparse);
  */
 void user_free_preparse(struct key_preparsed_payload *prep)
 {
-	kfree(prep->payload.data[0]);
+	kzfree(prep->payload.data[0]);
 }
 EXPORT_SYMBOL_GPL(user_free_preparse);
 
+static void user_free_payload_rcu(struct rcu_head *head)
+{
+	struct user_key_payload *payload;
+
+	payload = container_of(head, struct user_key_payload, rcu);
+	kzfree(payload);
+}
+
 /*
  * update a user defined key
  * - the key's semaphore is write-locked
@@ -112,7 +120,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
 	prep->payload.data[0] = NULL;
 
 	if (zap)
-		kfree_rcu(zap, rcu);
+		call_rcu(&zap->rcu, user_free_payload_rcu);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(user_update);
@@ -130,7 +138,7 @@ void user_revoke(struct key *key)
 
 	if (upayload) {
 		rcu_assign_keypointer(key, NULL);
-		kfree_rcu(upayload, rcu);
+		call_rcu(&upayload->rcu, user_free_payload_rcu);
 	}
 }
 
@@ -143,7 +151,7 @@ void user_destroy(struct key *key)
 {
 	struct user_key_payload *upayload = key->payload.data[0];
 
-	kfree(upayload);
+	kzfree(upayload);
 }
 
 EXPORT_SYMBOL_GPL(user_destroy);
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 37f04da..28d4c3a 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -410,6 +410,22 @@ static void dump_common_audit_data(struct audit_buffer *ab,
 		audit_log_format(ab, " kmod=");
 		audit_log_untrustedstring(ab, a->u.kmod_name);
 		break;
+	case LSM_AUDIT_DATA_IBPKEY: {
+		struct in6_addr sbn_pfx;
+
+		memset(&sbn_pfx.s6_addr, 0,
+		       sizeof(sbn_pfx.s6_addr));
+		memcpy(&sbn_pfx.s6_addr, &a->u.ibpkey->subnet_prefix,
+		       sizeof(a->u.ibpkey->subnet_prefix));
+		audit_log_format(ab, " pkey=0x%x subnet_prefix=%pI6c",
+				 a->u.ibpkey->pkey, &sbn_pfx);
+		break;
+	}
+	case LSM_AUDIT_DATA_IBENDPORT:
+		audit_log_format(ab, " device=%s port_num=%u",
+				 a->u.ibendport->dev_name,
+				 a->u.ibendport->port);
+		break;
 	} /* switch (a->type) */
 }
 
diff --git a/security/security.c b/security/security.c
index b9fea39..3013237 100644
--- a/security/security.c
+++ b/security/security.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
  * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
  * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ * Copyright (C) 2016 Mellanox Technologies
  *
  *	This program is free software; you can redistribute it and/or modify
  *	it under the terms of the GNU General Public License as published by
@@ -25,6 +26,7 @@
 #include <linux/mount.h>
 #include <linux/personality.h>
 #include <linux/backing-dev.h>
+#include <linux/string.h>
 #include <net/flow.h>
 
 #define MAX_LSM_EVM_XATTR	2
@@ -33,6 +35,8 @@
 #define SECURITY_NAME_MAX	10
 
 struct security_hook_heads security_hook_heads __lsm_ro_after_init;
+static ATOMIC_NOTIFIER_HEAD(lsm_notifier_chain);
+
 char *lsm_names;
 /* Boot-time LSM user choice */
 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
@@ -86,6 +90,21 @@ static int __init choose_lsm(char *str)
 }
 __setup("security=", choose_lsm);
 
+static bool match_last_lsm(const char *list, const char *lsm)
+{
+	const char *last;
+
+	if (WARN_ON(!list || !lsm))
+		return false;
+	last = strrchr(list, ',');
+	if (last)
+		/* Pass the comma, strcmp() will check for '\0' */
+		last++;
+	else
+		last = list;
+	return !strcmp(last, lsm);
+}
+
 static int lsm_append(char *new, char **result)
 {
 	char *cp;
@@ -93,6 +112,9 @@ static int lsm_append(char *new, char **result)
 	if (*result == NULL) {
 		*result = kstrdup(new, GFP_KERNEL);
 	} else {
+		/* Check if it is the last registered name */
+		if (match_last_lsm(*result, new))
+			return 0;
 		cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new);
 		if (cp == NULL)
 			return -ENOMEM;
@@ -146,6 +168,24 @@ void __init security_add_hooks(struct security_hook_list *hooks, int count,
 		panic("%s - Cannot get early memory.\n", __func__);
 }
 
+int call_lsm_notifier(enum lsm_event event, void *data)
+{
+	return atomic_notifier_call_chain(&lsm_notifier_chain, event, data);
+}
+EXPORT_SYMBOL(call_lsm_notifier);
+
+int register_lsm_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&lsm_notifier_chain, nb);
+}
+EXPORT_SYMBOL(register_lsm_notifier);
+
+int unregister_lsm_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&lsm_notifier_chain, nb);
+}
+EXPORT_SYMBOL(unregister_lsm_notifier);
+
 /*
  * Hook list operation macros.
  *
@@ -380,9 +420,12 @@ int security_sb_set_mnt_opts(struct super_block *sb,
 EXPORT_SYMBOL(security_sb_set_mnt_opts);
 
 int security_sb_clone_mnt_opts(const struct super_block *oldsb,
-				struct super_block *newsb)
+				struct super_block *newsb,
+				unsigned long kern_flags,
+				unsigned long *set_kern_flags)
 {
-	return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb);
+	return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb,
+				kern_flags, set_kern_flags);
 }
 EXPORT_SYMBOL(security_sb_clone_mnt_opts);
 
@@ -1496,6 +1539,33 @@ EXPORT_SYMBOL(security_tun_dev_open);
 
 #endif	/* CONFIG_SECURITY_NETWORK */
 
+#ifdef CONFIG_SECURITY_INFINIBAND
+
+int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
+{
+	return call_int_hook(ib_pkey_access, 0, sec, subnet_prefix, pkey);
+}
+EXPORT_SYMBOL(security_ib_pkey_access);
+
+int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num)
+{
+	return call_int_hook(ib_endport_manage_subnet, 0, sec, dev_name, port_num);
+}
+EXPORT_SYMBOL(security_ib_endport_manage_subnet);
+
+int security_ib_alloc_security(void **sec)
+{
+	return call_int_hook(ib_alloc_security, 0, sec);
+}
+EXPORT_SYMBOL(security_ib_alloc_security);
+
+void security_ib_free_security(void *sec)
+{
+	call_void_hook(ib_free_security, sec);
+}
+EXPORT_SYMBOL(security_ib_free_security);
+#endif	/* CONFIG_SECURITY_INFINIBAND */
+
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 
 int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 3411c33..ff5895e 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -5,7 +5,7 @@
 obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
 
 selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
-	     netnode.o netport.o exports.o \
+	     netnode.o netport.o ibpkey.o exports.o \
 	     ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
 	     ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e67a526..33fd061 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -17,6 +17,7 @@
  *	Paul Moore <paul@paul-moore.com>
  *  Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
  *		       Yuichi Nakamura <ynakam@hitachisoft.jp>
+ *  Copyright (C) 2016 Mellanox Technologies
  *
  *	This program is free software; you can redistribute it and/or modify
  *	it under the terms of the GNU General Public License version 2,
@@ -90,6 +91,7 @@
 #include "netif.h"
 #include "netnode.h"
 #include "netport.h"
+#include "ibpkey.h"
 #include "xfrm.h"
 #include "netlabel.h"
 #include "audit.h"
@@ -171,6 +173,16 @@ static int selinux_netcache_avc_callback(u32 event)
 	return 0;
 }
 
+static int selinux_lsm_notifier_avc_callback(u32 event)
+{
+	if (event == AVC_CALLBACK_RESET) {
+		sel_ib_pkey_flush();
+		call_lsm_notifier(LSM_POLICY_CHANGE, NULL);
+	}
+
+	return 0;
+}
+
 /*
  * initialise the security for the init task
  */
@@ -398,18 +410,6 @@ static void superblock_free_security(struct super_block *sb)
 	kfree(sbsec);
 }
 
-/* The file system's label must be initialized prior to use. */
-
-static const char *labeling_behaviors[7] = {
-	"uses xattr",
-	"uses transition SIDs",
-	"uses task SIDs",
-	"uses genfs_contexts",
-	"not configured for labeling",
-	"uses mountpoint labeling",
-	"uses native labeling",
-};
-
 static inline int inode_doinit(struct inode *inode)
 {
 	return inode_doinit_with_dentry(inode, NULL);
@@ -524,13 +524,17 @@ static int sb_finish_set_opts(struct super_block *sb)
 		}
 	}
 
-	if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
-		printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
-		       sb->s_id, sb->s_type->name);
-
 	sbsec->flags |= SE_SBINITIALIZED;
+
+	/*
+	 * Explicitly set or clear SBLABEL_MNT.  It's not sufficient to simply
+	 * leave the flag untouched because sb_clone_mnt_opts might be handing
+	 * us a superblock that needs the flag to be cleared.
+	 */
 	if (selinux_is_sblabel_mnt(sb))
 		sbsec->flags |= SBLABEL_MNT;
+	else
+		sbsec->flags &= ~SBLABEL_MNT;
 
 	/* Initialize the root inode. */
 	rc = inode_doinit_with_dentry(root_inode, root);
@@ -809,6 +813,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
 		sbsec->flags |= SE_SBPROC | SE_SBGENFS;
 
 	if (!strcmp(sb->s_type->name, "debugfs") ||
+	    !strcmp(sb->s_type->name, "tracefs") ||
 	    !strcmp(sb->s_type->name, "sysfs") ||
 	    !strcmp(sb->s_type->name, "pstore"))
 		sbsec->flags |= SE_SBGENFS;
@@ -963,8 +968,11 @@ static int selinux_cmp_sb_context(const struct super_block *oldsb,
 }
 
 static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
-					struct super_block *newsb)
+					struct super_block *newsb,
+					unsigned long kern_flags,
+					unsigned long *set_kern_flags)
 {
+	int rc = 0;
 	const struct superblock_security_struct *oldsbsec = oldsb->s_security;
 	struct superblock_security_struct *newsbsec = newsb->s_security;
 
@@ -979,6 +987,13 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
 	if (!ss_initialized)
 		return 0;
 
+	/*
+	 * Specifying internal flags without providing a place to
+	 * place the results is not allowed.
+	 */
+	if (kern_flags && !set_kern_flags)
+		return -EINVAL;
+
 	/* how can we clone if the old one wasn't set up?? */
 	BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
 
@@ -994,6 +1009,18 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
 	newsbsec->def_sid = oldsbsec->def_sid;
 	newsbsec->behavior = oldsbsec->behavior;
 
+	if (newsbsec->behavior == SECURITY_FS_USE_NATIVE &&
+		!(kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context) {
+		rc = security_fs_use(newsb);
+		if (rc)
+			goto out;
+	}
+
+	if (kern_flags & SECURITY_LSM_NATIVE_LABELS && !set_context) {
+		newsbsec->behavior = SECURITY_FS_USE_NATIVE;
+		*set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
+	}
+
 	if (set_context) {
 		u32 sid = oldsbsec->mntpoint_sid;
 
@@ -1013,8 +1040,9 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
 	}
 
 	sb_finish_set_opts(newsb);
+out:
 	mutex_unlock(&newsbsec->lock);
-	return 0;
+	return rc;
 }
 
 static int selinux_parse_opts_str(char *options,
@@ -1106,10 +1134,8 @@ static int selinux_parse_opts_str(char *options,
 
 	opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int),
 				       GFP_KERNEL);
-	if (!opts->mnt_opts_flags) {
-		kfree(opts->mnt_opts);
+	if (!opts->mnt_opts_flags)
 		goto out_err;
-	}
 
 	if (fscontext) {
 		opts->mnt_opts[num_mnt_opts] = fscontext;
@@ -1132,6 +1158,7 @@ static int selinux_parse_opts_str(char *options,
 	return 0;
 
 out_err:
+	security_free_mnt_opts(opts);
 	kfree(context);
 	kfree(defcontext);
 	kfree(fscontext);
@@ -2063,8 +2090,9 @@ static inline u32 file_to_av(struct file *file)
 static inline u32 open_file_to_av(struct file *file)
 {
 	u32 av = file_to_av(file);
+	struct inode *inode = file_inode(file);
 
-	if (selinux_policycap_openperm)
+	if (selinux_policycap_openperm && inode->i_sb->s_magic != SOCKFS_MAGIC)
 		av |= FILE__OPEN;
 
 	return av;
@@ -3059,6 +3087,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
 static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
 {
 	const struct cred *cred = current_cred();
+	struct inode *inode = d_backing_inode(dentry);
 	unsigned int ia_valid = iattr->ia_valid;
 	__u32 av = FILE__WRITE;
 
@@ -3074,8 +3103,10 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
 			ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
 		return dentry_has_perm(cred, dentry, FILE__SETATTR);
 
-	if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE)
-			&& !(ia_valid & ATTR_FILE))
+	if (selinux_policycap_openperm &&
+	    inode->i_sb->s_magic != SOCKFS_MAGIC &&
+	    (ia_valid & ATTR_SIZE) &&
+	    !(ia_valid & ATTR_FILE))
 		av |= FILE__OPEN;
 
 	return dentry_has_perm(cred, dentry, av);
@@ -3107,6 +3138,18 @@ static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
 	return dentry_has_perm(cred, dentry, FILE__SETATTR);
 }
 
+static bool has_cap_mac_admin(bool audit)
+{
+	const struct cred *cred = current_cred();
+	int cap_audit = audit ? SECURITY_CAP_AUDIT : SECURITY_CAP_NOAUDIT;
+
+	if (cap_capable(cred, &init_user_ns, CAP_MAC_ADMIN, cap_audit))
+		return false;
+	if (cred_has_capability(cred, CAP_MAC_ADMIN, cap_audit, true))
+		return false;
+	return true;
+}
+
 static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
 				  const void *value, size_t size, int flags)
 {
@@ -3138,7 +3181,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
 
 	rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
 	if (rc == -EINVAL) {
-		if (!capable(CAP_MAC_ADMIN)) {
+		if (!has_cap_mac_admin(true)) {
 			struct audit_buffer *ab;
 			size_t audit_size;
 			const char *str;
@@ -3264,13 +3307,8 @@ static int selinux_inode_getsecurity(struct inode *inode, const char *name, void
 	 * and lack of permission just means that we fall back to the
 	 * in-core context value, not a denial.
 	 */
-	error = cap_capable(current_cred(), &init_user_ns, CAP_MAC_ADMIN,
-			    SECURITY_CAP_NOAUDIT);
-	if (!error)
-		error = cred_has_capability(current_cred(), CAP_MAC_ADMIN,
-					    SECURITY_CAP_NOAUDIT, true);
 	isec = inode_security(inode);
-	if (!error)
+	if (has_cap_mac_admin(false))
 		error = security_sid_to_context_force(isec->sid, &context,
 						      &size);
 	else
@@ -3550,6 +3588,18 @@ static int selinux_mmap_addr(unsigned long addr)
 static int selinux_mmap_file(struct file *file, unsigned long reqprot,
 			     unsigned long prot, unsigned long flags)
 {
+	struct common_audit_data ad;
+	int rc;
+
+	if (file) {
+		ad.type = LSM_AUDIT_DATA_FILE;
+		ad.u.file = file;
+		rc = inode_has_perm(current_cred(), file_inode(file),
+				    FILE__MAP, &ad);
+		if (rc)
+			return rc;
+	}
+
 	if (selinux_checkreqprot)
 		prot = reqprot;
 
@@ -3710,7 +3760,8 @@ static int selinux_file_open(struct file *file, const struct cred *cred)
 
 /* task security operations */
 
-static int selinux_task_create(unsigned long clone_flags)
+static int selinux_task_alloc(struct task_struct *task,
+			      unsigned long clone_flags)
 {
 	u32 sid = current_sid();
 
@@ -5918,7 +5969,7 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
 		}
 		error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
 		if (error == -EINVAL && !strcmp(name, "fscreate")) {
-			if (!capable(CAP_MAC_ADMIN)) {
+			if (!has_cap_mac_admin(true)) {
 				struct audit_buffer *ab;
 				size_t audit_size;
 
@@ -6128,7 +6179,70 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
 	*_buffer = context;
 	return rc;
 }
+#endif
 
+#ifdef CONFIG_SECURITY_INFINIBAND
+static int selinux_ib_pkey_access(void *ib_sec, u64 subnet_prefix, u16 pkey_val)
+{
+	struct common_audit_data ad;
+	int err;
+	u32 sid = 0;
+	struct ib_security_struct *sec = ib_sec;
+	struct lsm_ibpkey_audit ibpkey;
+
+	err = sel_ib_pkey_sid(subnet_prefix, pkey_val, &sid);
+	if (err)
+		return err;
+
+	ad.type = LSM_AUDIT_DATA_IBPKEY;
+	ibpkey.subnet_prefix = subnet_prefix;
+	ibpkey.pkey = pkey_val;
+	ad.u.ibpkey = &ibpkey;
+	return avc_has_perm(sec->sid, sid,
+			    SECCLASS_INFINIBAND_PKEY,
+			    INFINIBAND_PKEY__ACCESS, &ad);
+}
+
+static int selinux_ib_endport_manage_subnet(void *ib_sec, const char *dev_name,
+					    u8 port_num)
+{
+	struct common_audit_data ad;
+	int err;
+	u32 sid = 0;
+	struct ib_security_struct *sec = ib_sec;
+	struct lsm_ibendport_audit ibendport;
+
+	err = security_ib_endport_sid(dev_name, port_num, &sid);
+
+	if (err)
+		return err;
+
+	ad.type = LSM_AUDIT_DATA_IBENDPORT;
+	strncpy(ibendport.dev_name, dev_name, sizeof(ibendport.dev_name));
+	ibendport.port = port_num;
+	ad.u.ibendport = &ibendport;
+	return avc_has_perm(sec->sid, sid,
+			    SECCLASS_INFINIBAND_ENDPORT,
+			    INFINIBAND_ENDPORT__MANAGE_SUBNET, &ad);
+}
+
+static int selinux_ib_alloc_security(void **ib_sec)
+{
+	struct ib_security_struct *sec;
+
+	sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+	if (!sec)
+		return -ENOMEM;
+	sec->sid = current_sid();
+
+	*ib_sec = sec;
+	return 0;
+}
+
+static void selinux_ib_free_security(void *ib_sec)
+{
+	kfree(ib_sec);
+}
 #endif
 
 static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
@@ -6213,7 +6327,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
 
 	LSM_HOOK_INIT(file_open, selinux_file_open),
 
-	LSM_HOOK_INIT(task_create, selinux_task_create),
+	LSM_HOOK_INIT(task_alloc, selinux_task_alloc),
 	LSM_HOOK_INIT(cred_alloc_blank, selinux_cred_alloc_blank),
 	LSM_HOOK_INIT(cred_free, selinux_cred_free),
 	LSM_HOOK_INIT(cred_prepare, selinux_cred_prepare),
@@ -6315,7 +6429,13 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
 	LSM_HOOK_INIT(tun_dev_attach_queue, selinux_tun_dev_attach_queue),
 	LSM_HOOK_INIT(tun_dev_attach, selinux_tun_dev_attach),
 	LSM_HOOK_INIT(tun_dev_open, selinux_tun_dev_open),
-
+#ifdef CONFIG_SECURITY_INFINIBAND
+	LSM_HOOK_INIT(ib_pkey_access, selinux_ib_pkey_access),
+	LSM_HOOK_INIT(ib_endport_manage_subnet,
+		      selinux_ib_endport_manage_subnet),
+	LSM_HOOK_INIT(ib_alloc_security, selinux_ib_alloc_security),
+	LSM_HOOK_INIT(ib_free_security, selinux_ib_free_security),
+#endif
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 	LSM_HOOK_INIT(xfrm_policy_alloc_security, selinux_xfrm_policy_alloc),
 	LSM_HOOK_INIT(xfrm_policy_clone_security, selinux_xfrm_policy_clone),
@@ -6379,6 +6499,9 @@ static __init int selinux_init(void)
 	if (avc_add_callback(selinux_netcache_avc_callback, AVC_CALLBACK_RESET))
 		panic("SELinux: Unable to register AVC netcache callback\n");
 
+	if (avc_add_callback(selinux_lsm_notifier_avc_callback, AVC_CALLBACK_RESET))
+		panic("SELinux: Unable to register AVC LSM notifier callback\n");
+
 	if (selinux_enforcing)
 		printk(KERN_DEBUG "SELinux:  Starting in enforcing mode\n");
 	else
@@ -6448,6 +6571,23 @@ static struct nf_hook_ops selinux_nf_ops[] = {
 #endif	/* IPV6 */
 };
 
+static int __net_init selinux_nf_register(struct net *net)
+{
+	return nf_register_net_hooks(net, selinux_nf_ops,
+				     ARRAY_SIZE(selinux_nf_ops));
+}
+
+static void __net_exit selinux_nf_unregister(struct net *net)
+{
+	nf_unregister_net_hooks(net, selinux_nf_ops,
+				ARRAY_SIZE(selinux_nf_ops));
+}
+
+static struct pernet_operations selinux_net_ops = {
+	.init = selinux_nf_register,
+	.exit = selinux_nf_unregister,
+};
+
 static int __init selinux_nf_ip_init(void)
 {
 	int err;
@@ -6457,13 +6597,12 @@ static int __init selinux_nf_ip_init(void)
 
 	printk(KERN_DEBUG "SELinux:  Registering netfilter hooks\n");
 
-	err = nf_register_hooks(selinux_nf_ops, ARRAY_SIZE(selinux_nf_ops));
+	err = register_pernet_subsys(&selinux_net_ops);
 	if (err)
-		panic("SELinux: nf_register_hooks: error %d\n", err);
+		panic("SELinux: register_pernet_subsys: error %d\n", err);
 
 	return 0;
 }
-
 __initcall(selinux_nf_ip_init);
 
 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
@@ -6471,7 +6610,7 @@ static void selinux_nf_ip_exit(void)
 {
 	printk(KERN_DEBUG "SELinux:  Unregistering netfilter hooks\n");
 
-	nf_unregister_hooks(selinux_nf_ops, ARRAY_SIZE(selinux_nf_ops));
+	unregister_pernet_subsys(&selinux_net_ops);
 }
 #endif
 
diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c
new file mode 100644
index 0000000..e3614ee
--- /dev/null
+++ b/security/selinux/ibpkey.c
@@ -0,0 +1,245 @@
+/*
+ * Pkey table
+ *
+ * SELinux must keep a mapping of Infinband PKEYs to labels/SIDs.  This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ *
+ * This code is heavily based on the "netif" and "netport" concept originally
+ * developed by
+ * James Morris <jmorris@redhat.com> and
+ * Paul Moore <paul@paul-moore.com>
+ *   (see security/selinux/netif.c and security/selinux/netport.c for more
+ *   information)
+ *
+ */
+
+/*
+ * (c) Mellanox Technologies, 2016
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "ibpkey.h"
+#include "objsec.h"
+
+#define SEL_PKEY_HASH_SIZE       256
+#define SEL_PKEY_HASH_BKT_LIMIT   16
+
+struct sel_ib_pkey_bkt {
+	int size;
+	struct list_head list;
+};
+
+struct sel_ib_pkey {
+	struct pkey_security_struct psec;
+	struct list_head list;
+	struct rcu_head rcu;
+};
+
+static LIST_HEAD(sel_ib_pkey_list);
+static DEFINE_SPINLOCK(sel_ib_pkey_lock);
+static struct sel_ib_pkey_bkt sel_ib_pkey_hash[SEL_PKEY_HASH_SIZE];
+
+/**
+ * sel_ib_pkey_hashfn - Hashing function for the pkey table
+ * @pkey: pkey number
+ *
+ * Description:
+ * This is the hashing function for the pkey table, it returns the bucket
+ * number for the given pkey.
+ *
+ */
+static unsigned int sel_ib_pkey_hashfn(u16 pkey)
+{
+	return (pkey & (SEL_PKEY_HASH_SIZE - 1));
+}
+
+/**
+ * sel_ib_pkey_find - Search for a pkey record
+ * @subnet_prefix: subnet_prefix
+ * @pkey_num: pkey_num
+ *
+ * Description:
+ * Search the pkey table and return the matching record.  If an entry
+ * can not be found in the table return NULL.
+ *
+ */
+static struct sel_ib_pkey *sel_ib_pkey_find(u64 subnet_prefix, u16 pkey_num)
+{
+	unsigned int idx;
+	struct sel_ib_pkey *pkey;
+
+	idx = sel_ib_pkey_hashfn(pkey_num);
+	list_for_each_entry_rcu(pkey, &sel_ib_pkey_hash[idx].list, list) {
+		if (pkey->psec.pkey == pkey_num &&
+		    pkey->psec.subnet_prefix == subnet_prefix)
+			return pkey;
+	}
+
+	return NULL;
+}
+
+/**
+ * sel_ib_pkey_insert - Insert a new pkey into the table
+ * @pkey: the new pkey record
+ *
+ * Description:
+ * Add a new pkey record to the hash table.
+ *
+ */
+static void sel_ib_pkey_insert(struct sel_ib_pkey *pkey)
+{
+	unsigned int idx;
+
+	/* we need to impose a limit on the growth of the hash table so check
+	 * this bucket to make sure it is within the specified bounds
+	 */
+	idx = sel_ib_pkey_hashfn(pkey->psec.pkey);
+	list_add_rcu(&pkey->list, &sel_ib_pkey_hash[idx].list);
+	if (sel_ib_pkey_hash[idx].size == SEL_PKEY_HASH_BKT_LIMIT) {
+		struct sel_ib_pkey *tail;
+
+		tail = list_entry(
+			rcu_dereference_protected(
+				sel_ib_pkey_hash[idx].list.prev,
+				lockdep_is_held(&sel_ib_pkey_lock)),
+			struct sel_ib_pkey, list);
+		list_del_rcu(&tail->list);
+		kfree_rcu(tail, rcu);
+	} else {
+		sel_ib_pkey_hash[idx].size++;
+	}
+}
+
+/**
+ * sel_ib_pkey_sid_slow - Lookup the SID of a pkey using the policy
+ * @subnet_prefix: subnet prefix
+ * @pkey_num: pkey number
+ * @sid: pkey SID
+ *
+ * Description:
+ * This function determines the SID of a pkey by querying the security
+ * policy.  The result is added to the pkey table to speedup future
+ * queries.  Returns zero on success, negative values on failure.
+ *
+ */
+static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid)
+{
+	int ret;
+	struct sel_ib_pkey *pkey;
+	struct sel_ib_pkey *new = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sel_ib_pkey_lock, flags);
+	pkey = sel_ib_pkey_find(subnet_prefix, pkey_num);
+	if (pkey) {
+		*sid = pkey->psec.sid;
+		spin_unlock_irqrestore(&sel_ib_pkey_lock, flags);
+		return 0;
+	}
+
+	ret = security_ib_pkey_sid(subnet_prefix, pkey_num, sid);
+	if (ret)
+		goto out;
+
+	/* If this memory allocation fails still return 0. The SID
+	 * is valid, it just won't be added to the cache.
+	 */
+	new = kzalloc(sizeof(*new), GFP_ATOMIC);
+	if (!new)
+		goto out;
+
+	new->psec.subnet_prefix = subnet_prefix;
+	new->psec.pkey = pkey_num;
+	new->psec.sid = *sid;
+	sel_ib_pkey_insert(new);
+
+out:
+	spin_unlock_irqrestore(&sel_ib_pkey_lock, flags);
+	return ret;
+}
+
+/**
+ * sel_ib_pkey_sid - Lookup the SID of a PKEY
+ * @subnet_prefix: subnet_prefix
+ * @pkey_num: pkey number
+ * @sid: pkey SID
+ *
+ * Description:
+ * This function determines the SID of a PKEY using the fastest method
+ * possible.  First the pkey table is queried, but if an entry can't be found
+ * then the policy is queried and the result is added to the table to speedup
+ * future queries.  Returns zero on success, negative values on failure.
+ *
+ */
+int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *sid)
+{
+	struct sel_ib_pkey *pkey;
+
+	rcu_read_lock();
+	pkey = sel_ib_pkey_find(subnet_prefix, pkey_num);
+	if (pkey) {
+		*sid = pkey->psec.sid;
+		rcu_read_unlock();
+		return 0;
+	}
+	rcu_read_unlock();
+
+	return sel_ib_pkey_sid_slow(subnet_prefix, pkey_num, sid);
+}
+
+/**
+ * sel_ib_pkey_flush - Flush the entire pkey table
+ *
+ * Description:
+ * Remove all entries from the pkey table
+ *
+ */
+void sel_ib_pkey_flush(void)
+{
+	unsigned int idx;
+	struct sel_ib_pkey *pkey, *pkey_tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sel_ib_pkey_lock, flags);
+	for (idx = 0; idx < SEL_PKEY_HASH_SIZE; idx++) {
+		list_for_each_entry_safe(pkey, pkey_tmp,
+					 &sel_ib_pkey_hash[idx].list, list) {
+			list_del_rcu(&pkey->list);
+			kfree_rcu(pkey, rcu);
+		}
+		sel_ib_pkey_hash[idx].size = 0;
+	}
+	spin_unlock_irqrestore(&sel_ib_pkey_lock, flags);
+}
+
+static __init int sel_ib_pkey_init(void)
+{
+	int iter;
+
+	if (!selinux_enabled)
+		return 0;
+
+	for (iter = 0; iter < SEL_PKEY_HASH_SIZE; iter++) {
+		INIT_LIST_HEAD(&sel_ib_pkey_hash[iter].list);
+		sel_ib_pkey_hash[iter].size = 0;
+	}
+
+	return 0;
+}
+
+subsys_initcall(sel_ib_pkey_init);
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 1e0cc9b..b9fe343 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -1,7 +1,7 @@
 #include <linux/capability.h>
 
 #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
-    "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append"
+    "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map"
 
 #define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
     "rename", "execute", "quotaon", "mounton", "audit_access", \
@@ -231,6 +231,10 @@ struct security_class_mapping secclass_map[] = {
 	  { COMMON_SOCK_PERMS, NULL } },
 	{ "smc_socket",
 	  { COMMON_SOCK_PERMS, NULL } },
+	{ "infiniband_pkey",
+	  { "access", NULL } },
+	{ "infiniband_endport",
+	  { "manage_subnet", NULL } },
 	{ NULL }
   };
 
diff --git a/security/selinux/include/ibpkey.h b/security/selinux/include/ibpkey.h
new file mode 100644
index 0000000..b17a19e
--- /dev/null
+++ b/security/selinux/include/ibpkey.h
@@ -0,0 +1,31 @@
+/*
+ * pkey table
+ *
+ * SELinux must keep a mapping of pkeys to labels/SIDs.  This
+ * mapping is maintained as part of the normal policy but a fast cache is
+ * needed to reduce the lookup overhead.
+ *
+ */
+
+/*
+ * (c) Mellanox Technologies, 2016
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SELINUX_IB_PKEY_H
+#define _SELINUX_IB_PKEY_H
+
+void sel_ib_pkey_flush(void);
+
+int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid);
+
+#endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index c03cdcd..6ebc61e 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -10,6 +10,7 @@
  *
  *  Copyright (C) 2001,2002 Networks Associates Technology, Inc.
  *  Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ *  Copyright (C) 2016 Mellanox Technologies
  *
  *	This program is free software; you can redistribute it and/or modify
  *	it under the terms of the GNU General Public License version 2,
@@ -139,6 +140,16 @@ struct key_security_struct {
 	u32 sid;	/* SID of key */
 };
 
+struct ib_security_struct {
+	u32 sid;        /* SID of the queue pair or MAD agent */
+};
+
+struct pkey_security_struct {
+	u64	subnet_prefix; /* Port subnet prefix */
+	u16	pkey;	/* PKey number */
+	u32	sid;	/* SID of pkey */
+};
+
 extern unsigned int selinux_checkreqprot;
 
 #endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index f979c35..e91f08c 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -36,10 +36,11 @@
 #define POLICYDB_VERSION_DEFAULT_TYPE	28
 #define POLICYDB_VERSION_CONSTRAINT_NAMES	29
 #define POLICYDB_VERSION_XPERMS_IOCTL	30
+#define POLICYDB_VERSION_INFINIBAND		31
 
 /* Range of policy versions we understand*/
 #define POLICYDB_VERSION_MIN   POLICYDB_VERSION_BASE
-#define POLICYDB_VERSION_MAX	POLICYDB_VERSION_XPERMS_IOCTL
+#define POLICYDB_VERSION_MAX   POLICYDB_VERSION_INFINIBAND
 
 /* Mask for just the mount related flags */
 #define SE_MNTMASK	0x0f
@@ -76,6 +77,8 @@ enum {
 };
 #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
 
+extern char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX];
+
 extern int selinux_policycap_netpeer;
 extern int selinux_policycap_openperm;
 extern int selinux_policycap_extsockclass;
@@ -178,6 +181,10 @@ int security_get_user_sids(u32 callsid, char *username,
 
 int security_port_sid(u8 protocol, u16 port, u32 *out_sid);
 
+int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid);
+
+int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid);
+
 int security_netif_sid(char *name, u32 *if_sid);
 
 int security_node_sid(u16 domain, void *addr, u32 addrlen,
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 50062e70..9010a36 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -41,15 +41,6 @@
 #include "objsec.h"
 #include "conditional.h"
 
-/* Policy capability filenames */
-static char *policycap_names[] = {
-	"network_peer_controls",
-	"open_perms",
-	"extended_socket_class",
-	"always_check_network",
-	"cgroup_seclabel"
-};
-
 unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
 
 static int __init checkreqprot_setup(char *str)
@@ -163,6 +154,8 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
 			avc_ss_reset(0);
 		selnl_notify_setenforce(selinux_enforcing);
 		selinux_status_update_setenforce(selinux_enforcing);
+		if (!selinux_enforcing)
+			call_lsm_notifier(LSM_POLICY_CHANGE, NULL);
 	}
 	length = count;
 out:
@@ -1750,9 +1743,9 @@ static int sel_make_policycap(void)
 	sel_remove_entries(policycap_dir);
 
 	for (iter = 0; iter <= POLICYDB_CAPABILITY_MAX; iter++) {
-		if (iter < ARRAY_SIZE(policycap_names))
+		if (iter < ARRAY_SIZE(selinux_policycap_names))
 			dentry = d_alloc_name(policycap_dir,
-					      policycap_names[iter]);
+					      selinux_policycap_names[iter]);
 		else
 			dentry = d_alloc_name(policycap_dir, "unknown");
 
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 9db4709a..ad38299 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -24,6 +24,8 @@
 
 #define BITS_PER_U64	(sizeof(u64) * 8)
 
+static struct kmem_cache *ebitmap_node_cachep;
+
 int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
 {
 	struct ebitmap_node *n1, *n2;
@@ -54,7 +56,7 @@ int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src)
 	n = src->node;
 	prev = NULL;
 	while (n) {
-		new = kzalloc(sizeof(*new), GFP_ATOMIC);
+		new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
 		if (!new) {
 			ebitmap_destroy(dst);
 			return -ENOMEM;
@@ -162,7 +164,7 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap,
 		if (e_iter == NULL ||
 		    offset >= e_iter->startbit + EBITMAP_SIZE) {
 			e_prev = e_iter;
-			e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC);
+			e_iter = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
 			if (e_iter == NULL)
 				goto netlbl_import_failure;
 			e_iter->startbit = offset - (offset % EBITMAP_SIZE);
@@ -288,7 +290,7 @@ int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
 					prev->next = n->next;
 				else
 					e->node = n->next;
-				kfree(n);
+				kmem_cache_free(ebitmap_node_cachep, n);
 			}
 			return 0;
 		}
@@ -299,7 +301,7 @@ int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
 	if (!value)
 		return 0;
 
-	new = kzalloc(sizeof(*new), GFP_ATOMIC);
+	new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
 	if (!new)
 		return -ENOMEM;
 
@@ -332,7 +334,7 @@ void ebitmap_destroy(struct ebitmap *e)
 	while (n) {
 		temp = n;
 		n = n->next;
-		kfree(temp);
+		kmem_cache_free(ebitmap_node_cachep, temp);
 	}
 
 	e->highbit = 0;
@@ -400,7 +402,7 @@ int ebitmap_read(struct ebitmap *e, void *fp)
 
 		if (!n || startbit >= n->startbit + EBITMAP_SIZE) {
 			struct ebitmap_node *tmp;
-			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+			tmp = kmem_cache_zalloc(ebitmap_node_cachep, GFP_KERNEL);
 			if (!tmp) {
 				printk(KERN_ERR
 				       "SELinux: ebitmap: out of memory\n");
@@ -519,3 +521,15 @@ int ebitmap_write(struct ebitmap *e, void *fp)
 	}
 	return 0;
 }
+
+void ebitmap_cache_init(void)
+{
+	ebitmap_node_cachep = kmem_cache_create("ebitmap_node",
+							sizeof(struct ebitmap_node),
+							0, SLAB_PANIC, NULL);
+}
+
+void ebitmap_cache_destroy(void)
+{
+	kmem_cache_destroy(ebitmap_node_cachep);
+}
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 9637b8c..6d5a9ac 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -130,6 +130,9 @@ void ebitmap_destroy(struct ebitmap *e);
 int ebitmap_read(struct ebitmap *e, void *fp);
 int ebitmap_write(struct ebitmap *e, void *fp);
 
+void ebitmap_cache_init(void);
+void ebitmap_cache_destroy(void);
+
 #ifdef CONFIG_NETLABEL
 int ebitmap_netlbl_export(struct ebitmap *ebmap,
 			  struct netlbl_lsm_catmap **catmap);
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 0080122..aa6500a 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -17,6 +17,11 @@
  *
  *      Added support for the policy capability bitmap
  *
+ * Update: Mellanox Techonologies
+ *
+ *	Added Infiniband support
+ *
+ * Copyright (C) 2016 Mellanox Techonologies
  * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
  * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
  * Copyright (C) 2003 - 2004 Tresys Technology, LLC
@@ -76,81 +81,86 @@ static struct policydb_compat_info policydb_compat[] = {
 	{
 		.version	= POLICYDB_VERSION_BASE,
 		.sym_num	= SYM_NUM - 3,
-		.ocon_num	= OCON_NUM - 1,
+		.ocon_num	= OCON_NUM - 3,
 	},
 	{
 		.version	= POLICYDB_VERSION_BOOL,
 		.sym_num	= SYM_NUM - 2,
-		.ocon_num	= OCON_NUM - 1,
+		.ocon_num	= OCON_NUM - 3,
 	},
 	{
 		.version	= POLICYDB_VERSION_IPV6,
 		.sym_num	= SYM_NUM - 2,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_NLCLASS,
 		.sym_num	= SYM_NUM - 2,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_MLS,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_AVTAB,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_RANGETRANS,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_POLCAP,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_PERMISSIVE,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_BOUNDARY,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_FILENAME_TRANS,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_ROLETRANS,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_DEFAULT_TYPE,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_CONSTRAINT_NAMES,
 		.sym_num	= SYM_NUM,
-		.ocon_num	= OCON_NUM,
+		.ocon_num	= OCON_NUM - 2,
 	},
 	{
 		.version	= POLICYDB_VERSION_XPERMS_IOCTL,
 		.sym_num	= SYM_NUM,
+		.ocon_num	= OCON_NUM - 2,
+	},
+	{
+		.version	= POLICYDB_VERSION_INFINIBAND,
+		.sym_num	= SYM_NUM,
 		.ocon_num	= OCON_NUM,
 	},
 };
@@ -538,34 +548,30 @@ static int policydb_index(struct policydb *p)
 	symtab_hash_eval(p->symtab);
 #endif
 
-	rc = -ENOMEM;
 	p->class_val_to_struct = kcalloc(p->p_classes.nprim,
 					 sizeof(*p->class_val_to_struct),
 					 GFP_KERNEL);
 	if (!p->class_val_to_struct)
-		goto out;
+		return -ENOMEM;
 
-	rc = -ENOMEM;
 	p->role_val_to_struct = kcalloc(p->p_roles.nprim,
 					sizeof(*p->role_val_to_struct),
 					GFP_KERNEL);
 	if (!p->role_val_to_struct)
-		goto out;
+		return -ENOMEM;
 
-	rc = -ENOMEM;
 	p->user_val_to_struct = kcalloc(p->p_users.nprim,
 					sizeof(*p->user_val_to_struct),
 					GFP_KERNEL);
 	if (!p->user_val_to_struct)
-		goto out;
+		return -ENOMEM;
 
 	/* Yes, I want the sizeof the pointer, not the structure */
-	rc = -ENOMEM;
 	p->type_val_to_struct_array = flex_array_alloc(sizeof(struct type_datum *),
 						       p->p_types.nprim,
 						       GFP_KERNEL | __GFP_ZERO);
 	if (!p->type_val_to_struct_array)
-		goto out;
+		return -ENOMEM;
 
 	rc = flex_array_prealloc(p->type_val_to_struct_array, 0,
 				 p->p_types.nprim, GFP_KERNEL | __GFP_ZERO);
@@ -577,12 +583,11 @@ static int policydb_index(struct policydb *p)
 		goto out;
 
 	for (i = 0; i < SYM_NUM; i++) {
-		rc = -ENOMEM;
 		p->sym_val_to_name[i] = flex_array_alloc(sizeof(char *),
 							 p->symtab[i].nprim,
 							 GFP_KERNEL | __GFP_ZERO);
 		if (!p->sym_val_to_name[i])
-			goto out;
+			return -ENOMEM;
 
 		rc = flex_array_prealloc(p->sym_val_to_name[i],
 					 0, p->symtab[i].nprim,
@@ -2211,6 +2216,51 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
 					goto out;
 				break;
 			}
+			case OCON_IBPKEY:
+				rc = next_entry(nodebuf, fp, sizeof(u32) * 4);
+				if (rc)
+					goto out;
+
+				c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf));
+
+				if (nodebuf[2] > 0xffff ||
+				    nodebuf[3] > 0xffff) {
+					rc = -EINVAL;
+					goto out;
+				}
+
+				c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]);
+				c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]);
+
+				rc = context_read_and_validate(&c->context[0],
+							       p,
+							       fp);
+				if (rc)
+					goto out;
+				break;
+			case OCON_IBENDPORT:
+				rc = next_entry(buf, fp, sizeof(u32) * 2);
+				if (rc)
+					goto out;
+				len = le32_to_cpu(buf[0]);
+
+				rc = str_read(&c->u.ibendport.dev_name, GFP_KERNEL, fp, len);
+				if (rc)
+					goto out;
+
+				if (buf[1] > 0xff || buf[1] == 0) {
+					rc = -EINVAL;
+					goto out;
+				}
+
+				c->u.ibendport.port = le32_to_cpu(buf[1]);
+
+				rc = context_read_and_validate(&c->context[0],
+							       p,
+							       fp);
+				if (rc)
+					goto out;
+				break;
 			}
 		}
 	}
@@ -3140,6 +3190,33 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
 				if (rc)
 					return rc;
 				break;
+			case OCON_IBPKEY:
+				*((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix);
+
+				nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey);
+				nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey);
+
+				rc = put_entry(nodebuf, sizeof(u32), 4, fp);
+				if (rc)
+					return rc;
+				rc = context_write(p, &c->context[0], fp);
+				if (rc)
+					return rc;
+				break;
+			case OCON_IBENDPORT:
+				len = strlen(c->u.ibendport.dev_name);
+				buf[0] = cpu_to_le32(len);
+				buf[1] = cpu_to_le32(c->u.ibendport.port);
+				rc = put_entry(buf, sizeof(u32), 2, fp);
+				if (rc)
+					return rc;
+				rc = put_entry(c->u.ibendport.dev_name, 1, len, fp);
+				if (rc)
+					return rc;
+				rc = context_write(p, &c->context[0], fp);
+				if (rc)
+					return rc;
+				break;
 			}
 		}
 	}
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 725d594..5d23eed 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -187,6 +187,15 @@ struct ocontext {
 			u32 addr[4];
 			u32 mask[4];
 		} node6;        /* IPv6 node information */
+		struct {
+			u64 subnet_prefix;
+			u16 low_pkey;
+			u16 high_pkey;
+		} ibpkey;
+		struct {
+			char *dev_name;
+			u8 port;
+		} ibendport;
 	} u;
 	union {
 		u32 sclass;  /* security class for genfs */
@@ -215,14 +224,16 @@ struct genfs {
 #define SYM_NUM     8
 
 /* object context array indices */
-#define OCON_ISID  0	/* initial SIDs */
-#define OCON_FS    1	/* unlabeled file systems */
-#define OCON_PORT  2	/* TCP and UDP port numbers */
-#define OCON_NETIF 3	/* network interfaces */
-#define OCON_NODE  4	/* nodes */
-#define OCON_FSUSE 5	/* fs_use */
-#define OCON_NODE6 6	/* IPv6 nodes */
-#define OCON_NUM   7
+#define OCON_ISID	0 /* initial SIDs */
+#define OCON_FS		1 /* unlabeled file systems */
+#define OCON_PORT	2 /* TCP and UDP port numbers */
+#define OCON_NETIF	3 /* network interfaces */
+#define OCON_NODE	4 /* nodes */
+#define OCON_FSUSE	5 /* fs_use */
+#define OCON_NODE6	6 /* IPv6 nodes */
+#define OCON_IBPKEY	7 /* Infiniband PKeys */
+#define OCON_IBENDPORT	8 /* Infiniband end ports */
+#define OCON_NUM	9
 
 /* The policy database */
 struct policydb {
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 60d9b02..2f02fa6 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -70,6 +70,15 @@
 #include "ebitmap.h"
 #include "audit.h"
 
+/* Policy capability names */
+char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = {
+	"network_peer_controls",
+	"open_perms",
+	"extended_socket_class",
+	"always_check_network",
+	"cgroup_seclabel"
+};
+
 int selinux_policycap_netpeer;
 int selinux_policycap_openperm;
 int selinux_policycap_extsockclass;
@@ -1986,6 +1995,9 @@ static int convert_context(u32 key,
 
 static void security_load_policycaps(void)
 {
+	unsigned int i;
+	struct ebitmap_node *node;
+
 	selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps,
 						  POLICYDB_CAPABILITY_NETPEER);
 	selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
@@ -1997,6 +2009,17 @@ static void security_load_policycaps(void)
 	selinux_policycap_cgroupseclabel =
 		ebitmap_get_bit(&policydb.policycaps,
 				POLICYDB_CAPABILITY_CGROUPSECLABEL);
+
+	for (i = 0; i < ARRAY_SIZE(selinux_policycap_names); i++)
+		pr_info("SELinux:  policy capability %s=%d\n",
+			selinux_policycap_names[i],
+			ebitmap_get_bit(&policydb.policycaps, i));
+
+	ebitmap_for_each_positive_bit(&policydb.policycaps, node, i) {
+		if (i >= ARRAY_SIZE(selinux_policycap_names))
+			pr_info("SELinux:  unknown policy capability %u\n",
+				i);
+	}
 }
 
 static int security_preserve_bools(struct policydb *p);
@@ -2031,9 +2054,11 @@ int security_load_policy(void *data, size_t len)
 
 	if (!ss_initialized) {
 		avtab_cache_init();
+		ebitmap_cache_init();
 		rc = policydb_read(&policydb, fp);
 		if (rc) {
 			avtab_cache_destroy();
+			ebitmap_cache_destroy();
 			goto out;
 		}
 
@@ -2044,6 +2069,7 @@ int security_load_policy(void *data, size_t len)
 		if (rc) {
 			policydb_destroy(&policydb);
 			avtab_cache_destroy();
+			ebitmap_cache_destroy();
 			goto out;
 		}
 
@@ -2051,6 +2077,7 @@ int security_load_policy(void *data, size_t len)
 		if (rc) {
 			policydb_destroy(&policydb);
 			avtab_cache_destroy();
+			ebitmap_cache_destroy();
 			goto out;
 		}
 
@@ -2210,6 +2237,87 @@ int security_port_sid(u8 protocol, u16 port, u32 *out_sid)
 }
 
 /**
+ * security_pkey_sid - Obtain the SID for a pkey.
+ * @subnet_prefix: Subnet Prefix
+ * @pkey_num: pkey number
+ * @out_sid: security identifier
+ */
+int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid)
+{
+	struct ocontext *c;
+	int rc = 0;
+
+	read_lock(&policy_rwlock);
+
+	c = policydb.ocontexts[OCON_IBPKEY];
+	while (c) {
+		if (c->u.ibpkey.low_pkey <= pkey_num &&
+		    c->u.ibpkey.high_pkey >= pkey_num &&
+		    c->u.ibpkey.subnet_prefix == subnet_prefix)
+			break;
+
+		c = c->next;
+	}
+
+	if (c) {
+		if (!c->sid[0]) {
+			rc = sidtab_context_to_sid(&sidtab,
+						   &c->context[0],
+						   &c->sid[0]);
+			if (rc)
+				goto out;
+		}
+		*out_sid = c->sid[0];
+	} else
+		*out_sid = SECINITSID_UNLABELED;
+
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+/**
+ * security_ib_endport_sid - Obtain the SID for a subnet management interface.
+ * @dev_name: device name
+ * @port: port number
+ * @out_sid: security identifier
+ */
+int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid)
+{
+	struct ocontext *c;
+	int rc = 0;
+
+	read_lock(&policy_rwlock);
+
+	c = policydb.ocontexts[OCON_IBENDPORT];
+	while (c) {
+		if (c->u.ibendport.port == port_num &&
+		    !strncmp(c->u.ibendport.dev_name,
+			     dev_name,
+			     IB_DEVICE_NAME_MAX))
+			break;
+
+		c = c->next;
+	}
+
+	if (c) {
+		if (!c->sid[0]) {
+			rc = sidtab_context_to_sid(&sidtab,
+						   &c->context[0],
+						   &c->sid[0]);
+			if (rc)
+				goto out;
+		}
+		*out_sid = c->sid[0];
+	} else
+		*out_sid = SECINITSID_UNLABELED;
+
+out:
+	read_unlock(&policy_rwlock);
+	return rc;
+}
+
+/**
  * security_netif_sid - Obtain the SID for a network interface.
  * @name: interface name
  * @if_sid: interface SID
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index f6915f2..c5f436b 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -32,13 +32,11 @@ int sidtab_init(struct sidtab *s)
 
 int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
 {
-	int hvalue, rc = 0;
+	int hvalue;
 	struct sidtab_node *prev, *cur, *newnode;
 
-	if (!s) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!s)
+		return -ENOMEM;
 
 	hvalue = SIDTAB_HASH(sid);
 	prev = NULL;
@@ -48,21 +46,17 @@ int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
 		cur = cur->next;
 	}
 
-	if (cur && sid == cur->sid) {
-		rc = -EEXIST;
-		goto out;
-	}
+	if (cur && sid == cur->sid)
+		return -EEXIST;
 
 	newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
-	if (!newnode) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!newnode)
+		return -ENOMEM;
+
 	newnode->sid = sid;
 	if (context_cpy(&newnode->context, context)) {
 		kfree(newnode);
-		rc = -ENOMEM;
-		goto out;
+		return -ENOMEM;
 	}
 
 	if (prev) {
@@ -78,8 +72,7 @@ int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
 	s->nel++;
 	if (sid >= s->next_sid)
 		s->next_sid = sid + 1;
-out:
-	return rc;
+	return 0;
 }
 
 static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 612b810..6a71fc7 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -320,7 +320,7 @@ int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int);
 struct smack_known *smk_import_entry(const char *, int);
 void smk_insert_entry(struct smack_known *skp);
 struct smack_known *smk_find_entry(const char *);
-int smack_privileged(int cap);
+bool smack_privileged(int cap);
 void smk_destroy_label_list(struct list_head *list);
 
 /*
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index a4b2e6b..1a30041 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -627,35 +627,38 @@ DEFINE_MUTEX(smack_onlycap_lock);
  * Is the task privileged and allowed to be privileged
  * by the onlycap rule.
  *
- * Returns 1 if the task is allowed to be privileged, 0 if it's not.
+ * Returns true if the task is allowed to be privileged, false if it's not.
  */
-int smack_privileged(int cap)
+bool smack_privileged(int cap)
 {
 	struct smack_known *skp = smk_of_current();
 	struct smack_known_list_elem *sklep;
+	int rc;
 
 	/*
 	 * All kernel tasks are privileged
 	 */
 	if (unlikely(current->flags & PF_KTHREAD))
-		return 1;
+		return true;
 
-	if (!capable(cap))
-		return 0;
+	rc = cap_capable(current_cred(), &init_user_ns, cap,
+				SECURITY_CAP_AUDIT);
+	if (rc)
+		return false;
 
 	rcu_read_lock();
 	if (list_empty(&smack_onlycap_list)) {
 		rcu_read_unlock();
-		return 1;
+		return true;
 	}
 
 	list_for_each_entry_rcu(sklep, &smack_onlycap_list, list) {
 		if (sklep->smk_label == skp) {
 			rcu_read_unlock();
-			return 1;
+			return true;
 		}
 	}
 	rcu_read_unlock();
 
-	return 0;
+	return false;
 }
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 658f5d8..463af86 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1915,7 +1915,7 @@ static int smack_file_receive(struct file *file)
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
 	smk_ad_setfield_u_fs_path(&ad, file->f_path);
 
-	if (S_ISSOCK(inode->i_mode)) {
+	if (inode->i_sb->s_magic == SOCKFS_MAGIC) {
 		sock = SOCKET_I(inode);
 		ssp = sock->sk->sk_security;
 		tsp = current_security();
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
index 205b785..cdeb0f3 100644
--- a/security/smack/smack_netfilter.c
+++ b/security/smack/smack_netfilter.c
@@ -18,6 +18,7 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/netdevice.h>
 #include <net/inet_sock.h>
+#include <net/net_namespace.h>
 #include "smack.h"
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -74,20 +75,29 @@ static struct nf_hook_ops smack_nf_ops[] = {
 #endif	/* IPV6 */
 };
 
+static int __net_init smack_nf_register(struct net *net)
+{
+	return nf_register_net_hooks(net, smack_nf_ops,
+				     ARRAY_SIZE(smack_nf_ops));
+}
+
+static void __net_exit smack_nf_unregister(struct net *net)
+{
+	nf_unregister_net_hooks(net, smack_nf_ops, ARRAY_SIZE(smack_nf_ops));
+}
+
+static struct pernet_operations smack_net_ops = {
+	.init = smack_nf_register,
+	.exit = smack_nf_unregister,
+};
+
 static int __init smack_nf_ip_init(void)
 {
-	int err;
-
 	if (smack_enabled == 0)
 		return 0;
 
 	printk(KERN_DEBUG "Smack: Registering netfilter hooks\n");
-
-	err = nf_register_hooks(smack_nf_ops, ARRAY_SIZE(smack_nf_ops));
-	if (err)
-		pr_info("Smack: nf_register_hooks: error %d\n", err);
-
-	return 0;
+	return register_pernet_subsys(&smack_net_ops);
 }
 
 __initcall(smack_nf_ip_init);
diff --git a/security/yama/Kconfig b/security/yama/Kconfig
index 90c605e..96b2740 100644
--- a/security/yama/Kconfig
+++ b/security/yama/Kconfig
@@ -7,6 +7,7 @@
 	  system-wide security settings beyond regular Linux discretionary
 	  access controls. Currently available is ptrace scope restriction.
 	  Like capabilities, this security module stacks with other LSMs.
-	  Further information can be found in Documentation/security/Yama.txt.
+	  Further information can be found in
+	  Documentation/admin-guide/LSM/Yama.rst.
 
 	  If you are unsure how to answer this question, answer N.
diff --git a/sound/core/control.c b/sound/core/control.c
index c109b82..6362da1 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1577,7 +1577,7 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
 		struct snd_ctl_event ev;
 		struct snd_kctl_event *kev;
 		while (list_empty(&ctl->events)) {
-			wait_queue_t wait;
+			wait_queue_entry_t wait;
 			if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
 				err = -EAGAIN;
 				goto __end_lock;
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
index 9602a7e..a73baa1 100644
--- a/sound/core/hwdep.c
+++ b/sound/core/hwdep.c
@@ -85,7 +85,7 @@ static int snd_hwdep_open(struct inode *inode, struct file * file)
 	int major = imajor(inode);
 	struct snd_hwdep *hw;
 	int err;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	if (major == snd_major) {
 		hw = snd_lookup_minor_data(iminor(inode),
diff --git a/sound/core/init.c b/sound/core/init.c
index 6bda843..d61d2b3 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -989,7 +989,7 @@ EXPORT_SYMBOL(snd_card_file_remove);
  */
 int snd_power_wait(struct snd_card *card, unsigned int power_state)
 {
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	int result = 0;
 
 	/* fastpath */
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 36baf96..cd8b7be 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1554,7 +1554,7 @@ static int snd_pcm_oss_sync1(struct snd_pcm_substream *substream, size_t size)
 	ssize_t result = 0;
 	snd_pcm_state_t state;
 	long res;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	runtime = substream->runtime;
 	init_waitqueue_entry(&wait, current);
@@ -2387,7 +2387,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
 	struct snd_pcm_oss_file *pcm_oss_file;
 	struct snd_pcm_oss_setup setup[2];
 	int nonblock;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	err = nonseekable_open(inode, file);
 	if (err < 0)
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 5088d4b..8771760 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1904,7 +1904,7 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	int err = 0;
 	snd_pcm_uframes_t avail = 0;
 	long wait_time, tout;
@@ -2492,7 +2492,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
 	struct snd_pcm_substream *substream;
 	const struct snd_pcm_chmap_elem *map;
 
-	if (snd_BUG_ON(!info->chmap))
+	if (!info->chmap)
 		return -EINVAL;
 	substream = snd_pcm_chmap_substream(info, idx);
 	if (!substream)
@@ -2524,7 +2524,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
 	unsigned int __user *dst;
 	int c, count = 0;
 
-	if (snd_BUG_ON(!info->chmap))
+	if (!info->chmap)
 		return -EINVAL;
 	if (size < 8)
 		return -ENOMEM;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 13dec5e..faa2e2b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1652,7 +1652,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
 	struct snd_card *card;
 	struct snd_pcm_runtime *runtime;
 	struct snd_pcm_substream *s;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	int result = 0;
 	int nonblock = 0;
 
@@ -2353,7 +2353,7 @@ static int snd_pcm_capture_open(struct inode *inode, struct file *file)
 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
 {
 	int err;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	if (pcm == NULL) {
 		err = -ENODEV;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index ab89033..32588ad 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -368,7 +368,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
 	int err;
 	struct snd_rawmidi *rmidi;
 	struct snd_rawmidi_file *rawmidi_file = NULL;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK)) 
 		return -EINVAL;		/* invalid combination */
@@ -1002,7 +1002,7 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
 	while (count > 0) {
 		spin_lock_irq(&runtime->lock);
 		while (!snd_rawmidi_ready(substream)) {
-			wait_queue_t wait;
+			wait_queue_entry_t wait;
 			if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
 				spin_unlock_irq(&runtime->lock);
 				return result > 0 ? result : -EAGAIN;
@@ -1306,7 +1306,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
 	while (count > 0) {
 		spin_lock_irq(&runtime->lock);
 		while (!snd_rawmidi_ready_append(substream, count)) {
-			wait_queue_t wait;
+			wait_queue_entry_t wait;
 			if (file->f_flags & O_NONBLOCK) {
 				spin_unlock_irq(&runtime->lock);
 				return result > 0 ? result : -EAGAIN;
@@ -1338,7 +1338,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
 	if (file->f_flags & O_DSYNC) {
 		spin_lock_irq(&runtime->lock);
 		while (runtime->avail != runtime->buffer_size) {
-			wait_queue_t wait;
+			wait_queue_entry_t wait;
 			unsigned int last_avail = runtime->avail;
 			init_waitqueue_entry(&wait, current);
 			add_wait_queue(&runtime->sleep, &wait);
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 01c4cfe..a8c2822 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -179,7 +179,7 @@ int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
 {
 	struct snd_seq_event_cell *cell;
 	unsigned long flags;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	if (snd_BUG_ON(!f))
 		return -EINVAL;
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index d4c61ec..d6e9aac 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -227,7 +227,7 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
 	struct snd_seq_event_cell *cell;
 	unsigned long flags;
 	int err = -EAGAIN;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	if (pool == NULL)
 		return -EINVAL;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 2f836ca..884c306 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1618,6 +1618,7 @@ static int snd_timer_user_tselect(struct file *file,
 	if (err < 0)
 		goto __err;
 
+	tu->qhead = tu->qtail = tu->qused = 0;
 	kfree(tu->queue);
 	tu->queue = NULL;
 	kfree(tu->tqueue);
@@ -1959,10 +1960,11 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
 	tu = file->private_data;
 	unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
+	mutex_lock(&tu->ioctl_lock);
 	spin_lock_irq(&tu->qlock);
 	while ((long)count - result >= unit) {
 		while (!tu->qused) {
-			wait_queue_t wait;
+			wait_queue_entry_t wait;
 
 			if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
 				err = -EAGAIN;
@@ -1974,7 +1976,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 			add_wait_queue(&tu->qchange_sleep, &wait);
 
 			spin_unlock_irq(&tu->qlock);
+			mutex_unlock(&tu->ioctl_lock);
 			schedule();
+			mutex_lock(&tu->ioctl_lock);
 			spin_lock_irq(&tu->qlock);
 
 			remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1994,7 +1998,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 		tu->qused--;
 		spin_unlock_irq(&tu->qlock);
 
-		mutex_lock(&tu->ioctl_lock);
 		if (tu->tread) {
 			if (copy_to_user(buffer, &tu->tqueue[qhead],
 					 sizeof(struct snd_timer_tread)))
@@ -2004,7 +2007,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 					 sizeof(struct snd_timer_read)))
 				err = -EFAULT;
 		}
-		mutex_unlock(&tu->ioctl_lock);
 
 		spin_lock_irq(&tu->qlock);
 		if (err < 0)
@@ -2014,6 +2016,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 	}
  _error:
 	spin_unlock_irq(&tu->qlock);
+	mutex_unlock(&tu->ioctl_lock);
 	return result > 0 ? result : err;
 }
 
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 9e6f54f..1e26854 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -682,7 +682,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
 		cycle = increment_cycle_count(cycle, 1);
 		if (s->handle_packet(s, 0, cycle, i) < 0) {
 			s->packet_index = -1;
-			amdtp_stream_pcm_abort(s);
+			if (in_interrupt())
+				amdtp_stream_pcm_abort(s);
+			WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
 			return;
 		}
 	}
@@ -734,7 +736,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
 	/* Queueing error or detecting invalid payload. */
 	if (i < packets) {
 		s->packet_index = -1;
-		amdtp_stream_pcm_abort(s);
+		if (in_interrupt())
+			amdtp_stream_pcm_abort(s);
+		WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
 		return;
 	}
 
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index 7e88317..ea1a91e 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -135,7 +135,7 @@ struct amdtp_stream {
 	/* For a PCM substream processing. */
 	struct snd_pcm_substream *pcm;
 	struct tasklet_struct period_tasklet;
-	unsigned int pcm_buffer_pointer;
+	snd_pcm_uframes_t pcm_buffer_pointer;
 	unsigned int pcm_period_pointer;
 
 	/* To wait for first packet. */
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index 4dae9ff..0b1e4b3 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -1782,7 +1782,7 @@ wavefront_should_cause_interrupt (snd_wavefront_t *dev,
 				  int val, int port, unsigned long timeout)
 
 {
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 
 	init_waitqueue_entry(&wait, current);
 	spin_lock_irq(&dev->irq_lock);
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index d6fb2d5..60ce1cf 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -295,6 +295,8 @@ struct hda_codec {
 
 #define list_for_each_codec(c, bus) \
 	list_for_each_entry(c, &(bus)->core.codec_list, core.list)
+#define list_for_each_codec_safe(c, n, bus)				\
+	list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
 
 /* snd_hda_codec_read/write optional flags */
 #define HDA_RW_NO_RESPONSE_FALLBACK	(1 << 0)
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 3715a57..1c60beb 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1337,8 +1337,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
 /* configure each codec instance */
 int azx_codec_configure(struct azx *chip)
 {
-	struct hda_codec *codec;
-	list_for_each_codec(codec, &chip->bus) {
+	struct hda_codec *codec, *next;
+
+	/* use _safe version here since snd_hda_codec_configure() deregisters
+	 * the device upon error and deletes itself from the bus list.
+	 */
+	list_for_each_codec_safe(codec, next, &chip->bus) {
 		snd_hda_codec_configure(codec);
 	}
 	return 0;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 2842c82..71545b5 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3174,6 +3174,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec)
 						spec->input_paths[i][nums]);
 					spec->input_paths[i][nums] =
 						spec->input_paths[i][n];
+					spec->input_paths[i][n] = 0;
 				}
 			}
 			nums++;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1770f08..01eb1dc 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -370,10 +370,12 @@ enum {
 #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
 #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98)
 #define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
-			IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)	|| \
-			IS_GLK(pci)
+#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \
+			  IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \
+			  IS_KBL_H(pci)	|| IS_GLK(pci) || IS_CFL(pci))
 
 static char *driver_short_names[] = {
 	[AZX_DRIVER_ICH] = "HDA Intel",
@@ -2378,6 +2380,9 @@ static const struct pci_device_id azx_ids[] = {
 	/* Kabylake-H */
 	{ PCI_DEVICE(0x8086, 0xa2f0),
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+	/* Coffelake */
+	{ PCI_DEVICE(0x8086, 0xa348),
+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE},
 	/* Broxton-P(Apollolake) */
 	{ PCI_DEVICE(0x8086, 0x5a98),
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 918e452..cbeebc0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2324,11 +2324,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
 
 	SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
-	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
-	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
 	SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
 	SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -5854,7 +5854,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
 	SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+	SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+	SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
@@ -5862,13 +5866,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+	SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
-	SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
 	SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-	SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-	SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-	SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
-	SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
 	SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c
index dccf3db4..8bf2ce3 100644
--- a/sound/pci/mixart/mixart_core.c
+++ b/sound/pci/mixart/mixart_core.c
@@ -239,7 +239,7 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int
 	struct mixart_msg resp;
 	u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */
 	int err;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	long timeout;
 
 	init_waitqueue_entry(&wait, current);
@@ -284,7 +284,7 @@ int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr,
 				   struct mixart_msg *request, u32 notif_event)
 {
 	int err;
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	long timeout;
 
 	if (snd_BUG_ON(!notif_event))
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index fe4ba46..1114166 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -781,7 +781,7 @@ static snd_pcm_uframes_t snd_ymfpci_capture_pointer(struct snd_pcm_substream *su
 
 static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
 {
-	wait_queue_t wait;
+	wait_queue_entry_t wait;
 	int loops = 4;
 
 	while (loops-- > 0) {
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 7ae46c2..b7ef8c5 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -301,6 +301,14 @@ static int atmel_classd_codec_probe(struct snd_soc_codec *codec)
 	return 0;
 }
 
+static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
+{
+	struct snd_soc_card *card = snd_soc_codec_get_drvdata(codec);
+	struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
+
+	return regcache_sync(dd->regmap);
+}
+
 static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
 {
 	return dev_get_regmap(dev, NULL);
@@ -308,6 +316,7 @@ static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
 
 static struct snd_soc_codec_driver soc_codec_dev_classd = {
 	.probe		= atmel_classd_codec_probe,
+	.resume		= atmel_classd_codec_resume,
 	.get_regmap	= atmel_classd_codec_get_remap,
 	.component_driver = {
 		.controls		= atmel_classd_snd_controls,
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 6dd7578..024d83f 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -772,7 +772,7 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
 				++i;
 				msleep(50);
 			}
-		} while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock));
+		} while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock));
 
 		if (!srm_lock)
 			dev_warn(codec->dev, "SRM failed to lock\n");
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 9c365a7..7899a2c 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1108,6 +1108,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
 		}
 	},
+	{
+		.ident = "Thinkpad Helix 2nd",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
+		}
+	},
 
 	{ }
 };
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 2c9deda..bc136d2 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -202,7 +202,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
 	if (ret < 0)
 		return ret;
 
-	ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX);
+	ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
 	if (ret < 0)
 		return ret;
 
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index e3f0667..e7d766d 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -21,8 +21,9 @@
 #include "skl.h"
 
 /* Unique identification for getting NHLT blobs */
-static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
-				0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
+static guid_t osc_guid =
+	GUID_INIT(0xA69F886E, 0x6CEB, 0x4594,
+		  0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53);
 
 struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
 {
@@ -37,7 +38,7 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
 		return NULL;
 	}
 
-	obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
+	obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
 	if (obj && obj->type == ACPI_TYPE_BUFFER) {
 		nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
 		nhlt_table = (struct nhlt_acpi_table *)
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 58c5250..498b153 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -413,8 +413,11 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
 	u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
 	u64 *ipc_header = (u64 *)(&header);
 	struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
+	unsigned long flags;
 
+	spin_lock_irqsave(&ipc->dsp->spinlock, flags);
 	msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
+	spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
 	if (msg == NULL) {
 		dev_dbg(ipc->dev, "ipc: rx list is empty\n");
 		return;
@@ -456,8 +459,10 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
 		}
 	}
 
+	spin_lock_irqsave(&ipc->dsp->spinlock, flags);
 	list_del(&msg->list);
 	sst_ipc_tx_msg_reply_complete(ipc, msg);
+	spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
 }
 
 irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 3a99712..64a0f8e 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -2502,7 +2502,7 @@ static int skl_tplg_get_manifest_tkn(struct device *dev,
 
 			if (ret < 0)
 				return ret;
-			tkn_count += ret;
+			tkn_count = ret;
 
 			tuple_size += tkn_count *
 				sizeof(struct snd_soc_tplg_vendor_string_elem);
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 6df3b31..4c9b578 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -410,7 +410,7 @@ static int skl_free(struct hdac_ext_bus *ebus)
 	struct skl *skl  = ebus_to_skl(ebus);
 	struct hdac_bus *bus = ebus_to_hbus(ebus);
 
-	skl->init_failed = 1; /* to be sure */
+	skl->init_done = 0; /* to be sure */
 
 	snd_hdac_ext_stop_streams(ebus);
 
@@ -428,8 +428,10 @@ static int skl_free(struct hdac_ext_bus *ebus)
 
 	snd_hdac_ext_bus_exit(ebus);
 
+	cancel_work_sync(&skl->probe_work);
 	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
 		snd_hdac_i915_exit(&ebus->bus);
+
 	return 0;
 }
 
@@ -566,6 +568,84 @@ static const struct hdac_bus_ops bus_core_ops = {
 	.get_response = snd_hdac_bus_get_response,
 };
 
+static int skl_i915_init(struct hdac_bus *bus)
+{
+	int err;
+
+	/*
+	 * The HDMI codec is in GPU so we need to ensure that it is powered
+	 * up and ready for probe
+	 */
+	err = snd_hdac_i915_init(bus);
+	if (err < 0)
+		return err;
+
+	err = snd_hdac_display_power(bus, true);
+	if (err < 0)
+		dev_err(bus->dev, "Cannot turn on display power on i915\n");
+
+	return err;
+}
+
+static void skl_probe_work(struct work_struct *work)
+{
+	struct skl *skl = container_of(work, struct skl, probe_work);
+	struct hdac_ext_bus *ebus = &skl->ebus;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_ext_link *hlink = NULL;
+	int err;
+
+	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+		err = skl_i915_init(bus);
+		if (err < 0)
+			return;
+	}
+
+	err = skl_init_chip(bus, true);
+	if (err < 0) {
+		dev_err(bus->dev, "Init chip failed with err: %d\n", err);
+		goto out_err;
+	}
+
+	/* codec detection */
+	if (!bus->codec_mask)
+		dev_info(bus->dev, "no hda codecs found!\n");
+
+	/* create codec instances */
+	err = skl_codec_create(ebus);
+	if (err < 0)
+		goto out_err;
+
+	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+		err = snd_hdac_display_power(bus, false);
+		if (err < 0) {
+			dev_err(bus->dev, "Cannot turn off display power on i915\n");
+			return;
+		}
+	}
+
+	/* register platform dai and controls */
+	err = skl_platform_register(bus->dev);
+	if (err < 0)
+		return;
+	/*
+	 * we are done probing so decrement link counts
+	 */
+	list_for_each_entry(hlink, &ebus->hlink_list, list)
+		snd_hdac_ext_bus_link_put(ebus, hlink);
+
+	/* configure PM */
+	pm_runtime_put_noidle(bus->dev);
+	pm_runtime_allow(bus->dev);
+	skl->init_done = 1;
+
+	return;
+
+out_err:
+	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+		err = snd_hdac_display_power(bus, false);
+}
+
 /*
  * constructor
  */
@@ -593,6 +673,7 @@ static int skl_create(struct pci_dev *pci,
 	snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
 	ebus->bus.use_posbuf = 1;
 	skl->pci = pci;
+	INIT_WORK(&skl->probe_work, skl_probe_work);
 
 	ebus->bus.bdl_pos_adj = 0;
 
@@ -601,27 +682,6 @@ static int skl_create(struct pci_dev *pci,
 	return 0;
 }
 
-static int skl_i915_init(struct hdac_bus *bus)
-{
-	int err;
-
-	/*
-	 * The HDMI codec is in GPU so we need to ensure that it is powered
-	 * up and ready for probe
-	 */
-	err = snd_hdac_i915_init(bus);
-	if (err < 0)
-		return err;
-
-	err = snd_hdac_display_power(bus, true);
-	if (err < 0) {
-		dev_err(bus->dev, "Cannot turn on display power on i915\n");
-		return err;
-	}
-
-	return err;
-}
-
 static int skl_first_init(struct hdac_ext_bus *ebus)
 {
 	struct skl *skl = ebus_to_skl(ebus);
@@ -684,20 +744,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
 	/* initialize chip */
 	skl_init_pci(skl);
 
-	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
-		err = skl_i915_init(bus);
-		if (err < 0)
-			return err;
-	}
-
-	skl_init_chip(bus, true);
-
-	/* codec detection */
-	if (!bus->codec_mask) {
-		dev_info(bus->dev, "no hda codecs found!\n");
-	}
-
-	return 0;
+	return skl_init_chip(bus, true);
 }
 
 static int skl_probe(struct pci_dev *pci,
@@ -706,7 +753,6 @@ static int skl_probe(struct pci_dev *pci,
 	struct skl *skl;
 	struct hdac_ext_bus *ebus = NULL;
 	struct hdac_bus *bus = NULL;
-	struct hdac_ext_link *hlink = NULL;
 	int err;
 
 	/* we use ext core ops, so provide NULL for ops here */
@@ -729,7 +775,7 @@ static int skl_probe(struct pci_dev *pci,
 
 	if (skl->nhlt == NULL) {
 		err = -ENODEV;
-		goto out_display_power_off;
+		goto out_free;
 	}
 
 	err = skl_nhlt_create_sysfs(skl);
@@ -760,56 +806,24 @@ static int skl_probe(struct pci_dev *pci,
 	if (bus->mlcap)
 		snd_hdac_ext_bus_get_ml_capabilities(ebus);
 
+	snd_hdac_bus_stop_chip(bus);
+
 	/* create device for soc dmic */
 	err = skl_dmic_device_register(skl);
 	if (err < 0)
 		goto out_dsp_free;
 
-	/* register platform dai and controls */
-	err = skl_platform_register(bus->dev);
-	if (err < 0)
-		goto out_dmic_free;
-
-	/* create codec instances */
-	err = skl_codec_create(ebus);
-	if (err < 0)
-		goto out_unregister;
-
-	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
-		err = snd_hdac_display_power(bus, false);
-		if (err < 0) {
-			dev_err(bus->dev, "Cannot turn off display power on i915\n");
-			return err;
-		}
-	}
-
-	/*
-	 * we are done probling so decrement link counts
-	 */
-	list_for_each_entry(hlink, &ebus->hlink_list, list)
-		snd_hdac_ext_bus_link_put(ebus, hlink);
-
-	/* configure PM */
-	pm_runtime_put_noidle(bus->dev);
-	pm_runtime_allow(bus->dev);
+	schedule_work(&skl->probe_work);
 
 	return 0;
 
-out_unregister:
-	skl_platform_unregister(bus->dev);
-out_dmic_free:
-	skl_dmic_device_unregister(skl);
 out_dsp_free:
 	skl_free_dsp(skl);
 out_mach_free:
 	skl_machine_device_unregister(skl);
 out_nhlt_free:
 	skl_nhlt_free(skl->nhlt);
-out_display_power_off:
-	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
-		snd_hdac_display_power(bus, false);
 out_free:
-	skl->init_failed = 1;
 	skl_free(ebus);
 
 	return err;
@@ -828,7 +842,7 @@ static void skl_shutdown(struct pci_dev *pci)
 
 	skl = ebus_to_skl(ebus);
 
-	if (skl->init_failed)
+	if (!skl->init_done)
 		return;
 
 	snd_hdac_ext_stop_streams(ebus);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index a454f60..2a630fc 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -46,7 +46,7 @@ struct skl {
 	struct hdac_ext_bus ebus;
 	struct pci_dev *pci;
 
-	unsigned int init_failed:1; /* delayed init failed */
+	unsigned int init_done:1; /* delayed init status */
 	struct platform_device *dmic_dev;
 	struct platform_device *i2s_dev;
 	struct snd_soc_platform *platform;
@@ -64,6 +64,8 @@ struct skl {
 	const struct firmware *tplg;
 
 	int supend_active;
+
+	struct work_struct probe_work;
 };
 
 #define skl_to_ebus(s)	(&(s)->ebus)
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 66203d1..d3b0dc1 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -507,7 +507,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
 				rbga = rbgx;
 				adg->rbga_rate_for_441khz = rate / div;
 				ckr |= brg_table[i] << 20;
-				if (req_441kHz_rate)
+				if (req_441kHz_rate &&
+				    !(adg_mode_flags(adg) & AUDIO_OUT_48))
 					parent_clk_name = __clk_get_name(clk);
 			}
 		}
@@ -522,7 +523,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
 				rbgb = rbgx;
 				adg->rbgb_rate_for_48khz = rate / div;
 				ckr |= brg_table[i] << 16;
-				if (req_48kHz_rate)
+				if (req_48kHz_rate &&
+				    (adg_mode_flags(adg) & AUDIO_OUT_48))
 					parent_clk_name = __clk_get_name(clk);
 			}
 		}
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index 7d92a24..d879c01 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -89,6 +89,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
 	dev_dbg(dev, "ctu/mix path = 0x%08x", data);
 
 	rsnd_mod_write(mod, CMD_ROUTE_SLCT, data);
+	rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1);
 	rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
 
 	rsnd_adg_set_cmd_timsel_gen2(mod, io);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 1744015..8c1f4e2 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -343,6 +343,57 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 		return 0x76543210;
 }
 
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
+{
+	enum rsnd_mod_type playback_mods[] = {
+		RSND_MOD_SRC,
+		RSND_MOD_CMD,
+		RSND_MOD_SSIU,
+	};
+	enum rsnd_mod_type capture_mods[] = {
+		RSND_MOD_CMD,
+		RSND_MOD_SRC,
+		RSND_MOD_SSIU,
+	};
+	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	struct rsnd_mod *tmod = NULL;
+	enum rsnd_mod_type *mods =
+		rsnd_io_is_play(io) ?
+		playback_mods : capture_mods;
+	int i;
+
+	/*
+	 * This is needed for 24bit data
+	 * We need to shift 8bit
+	 *
+	 * Linux 24bit data is located as 0x00******
+	 * HW    24bit data is located as 0x******00
+	 *
+	 */
+	switch (runtime->sample_bits) {
+	case 16:
+		return 0;
+	case 32:
+		break;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
+		tmod = rsnd_io_to_mod(io, mods[i]);
+		if (tmod)
+			break;
+	}
+
+	if (tmod != mod)
+		return 0;
+
+	if (rsnd_io_is_play(io))
+		return  (0 << 20) | /* shift to Left */
+			(8 << 16);  /* 8bit */
+	else
+		return  (1 << 20) | /* shift to Right */
+			(8 << 16);  /* 8bit */
+}
+
 /*
  *	rsnd_dai functions
  */
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 63b6d3c..4b09807 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -236,6 +236,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
 		RSND_GEN_M_REG(SRC_ROUTE_MODE0,	0xc,	0x20),
 		RSND_GEN_M_REG(SRC_CTRL,	0x10,	0x20),
 		RSND_GEN_M_REG(SRC_INT_ENABLE0,	0x18,	0x20),
+		RSND_GEN_M_REG(CMD_BUSIF_MODE,	0x184,	0x20),
 		RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188,	0x20),
 		RSND_GEN_M_REG(CMD_ROUTE_SLCT,	0x18c,	0x20),
 		RSND_GEN_M_REG(CMD_CTRL,	0x190,	0x20),
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index dbf4163..323af41 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -73,6 +73,7 @@ enum rsnd_reg {
 	RSND_REG_SCU_SYS_INT_EN0,
 	RSND_REG_SCU_SYS_INT_EN1,
 	RSND_REG_CMD_CTRL,
+	RSND_REG_CMD_BUSIF_MODE,
 	RSND_REG_CMD_BUSIF_DALIGN,
 	RSND_REG_CMD_ROUTE_SLCT,
 	RSND_REG_CMDOUT_TIMSEL,
@@ -204,6 +205,7 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
 		    u32 mask, u32 data);
 u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
 u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
 
 /*
  *	R-Car DMA
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 20b5b2e..76a477a 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -190,11 +190,13 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
 	struct device *dev = rsnd_priv_to_dev(priv);
 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	int is_play = rsnd_io_is_play(io);
 	int use_src = 0;
 	u32 fin, fout;
 	u32 ifscr, fsrate, adinr;
 	u32 cr, route;
 	u32 bsdsr, bsisr;
+	u32 i_busif, o_busif, tmp;
 	uint ratio;
 
 	if (!runtime)
@@ -270,6 +272,11 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
 		break;
 	}
 
+	/* BUSIF_MODE */
+	tmp = rsnd_get_busif_shift(io, mod);
+	i_busif = ( is_play ? tmp : 0) | 1;
+	o_busif = (!is_play ? tmp : 0) | 1;
+
 	rsnd_mod_write(mod, SRC_ROUTE_MODE0, route);
 
 	rsnd_mod_write(mod, SRC_SRCIR, 1);	/* initialize */
@@ -281,8 +288,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
 	rsnd_mod_write(mod, SRC_BSISR, bsisr);
 	rsnd_mod_write(mod, SRC_SRCIR, 0);	/* cancel initialize */
 
-	rsnd_mod_write(mod, SRC_I_BUSIF_MODE, 1);
-	rsnd_mod_write(mod, SRC_O_BUSIF_MODE, 1);
+	rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif);
+	rsnd_mod_write(mod, SRC_O_BUSIF_MODE, o_busif);
+
 	rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
 
 	rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 135c566..91e5c07 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -302,7 +302,7 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
 	 * always use 32bit system word.
 	 * see also rsnd_ssi_master_clk_enable()
 	 */
-	cr_own = FORCE | SWL_32 | PDTA;
+	cr_own = FORCE | SWL_32;
 
 	if (rdai->bit_clk_inv)
 		cr_own |= SCKP;
@@ -550,6 +550,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
 		struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 		u32 *buf = (u32 *)(runtime->dma_area +
 				   rsnd_dai_pointer_offset(io, 0));
+		int shift = 0;
+
+		switch (runtime->sample_bits) {
+		case 32:
+			shift = 8;
+			break;
+		}
 
 		/*
 		 * 8/16/32 data can be assesse to TDR/RDR register
@@ -557,9 +564,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
 		 * see rsnd_ssi_init()
 		 */
 		if (rsnd_io_is_play(io))
-			rsnd_mod_write(mod, SSITDR, *buf);
+			rsnd_mod_write(mod, SSITDR, (*buf) << shift);
 		else
-			*buf = rsnd_mod_read(mod, SSIRDR);
+			*buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
 
 		elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
 	}
@@ -709,6 +716,11 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
 			       struct rsnd_priv *priv)
 {
 	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+	struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
+
+	/* Do nothing for SSI parent mod */
+	if (ssi_parent_mod == mod)
+		return 0;
 
 	/* PIO will request IRQ again */
 	free_irq(ssi->irq, mod);
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 14fafda..512d238 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -144,7 +144,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
 			       (rsnd_io_is_play(io) ?
 				rsnd_runtime_channel_after_ctu(io) :
 				rsnd_runtime_channel_original(io)));
-		rsnd_mod_write(mod, SSI_BUSIF_MODE,  1);
+		rsnd_mod_write(mod, SSI_BUSIF_MODE,
+			       rsnd_get_busif_shift(io, mod) | 1);
 		rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
 			       rsnd_get_dalign(mod, io));
 	}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index aae099c..754e3ef 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2286,6 +2286,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
 	list_for_each_entry(rtd, &card->rtd_list, list)
 		flush_delayed_work(&rtd->delayed_work);
 
+	/* free the ALSA card at first; this syncs with pending operations */
+	snd_card_free(card->snd_card);
+
 	/* remove and free each DAI */
 	soc_remove_dai_links(card);
 	soc_remove_pcm_runtimes(card);
@@ -2300,9 +2303,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
 	if (card->remove)
 		card->remove(card);
 
-	snd_card_free(card->snd_card);
 	return 0;
-
 }
 
 /* removes a socdev */
diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c
index dc48eed..26ed23b 100644
--- a/sound/usb/mixer_us16x08.c
+++ b/sound/usb/mixer_us16x08.c
@@ -698,16 +698,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
 	struct snd_usb_audio *chip = elem->head.mixer->chip;
 	struct snd_us16x08_meter_store *store = elem->private_data;
 	u8 meter_urb[64];
-	char tmp[sizeof(mix_init_msg2)] = {0};
 
 	switch (kcontrol->private_value) {
-	case 0:
-		snd_us16x08_send_urb(chip, (char *)mix_init_msg1,
-				     sizeof(mix_init_msg1));
+	case 0: {
+		char tmp[sizeof(mix_init_msg1)];
+
+		memcpy(tmp, mix_init_msg1, sizeof(mix_init_msg1));
+		snd_us16x08_send_urb(chip, tmp, 4);
 		snd_us16x08_recv_urb(chip, meter_urb,
 			sizeof(meter_urb));
 		kcontrol->private_value++;
 		break;
+	}
 	case 1:
 		snd_us16x08_recv_urb(chip, meter_urb,
 			sizeof(meter_urb));
@@ -718,15 +720,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
 			sizeof(meter_urb));
 		kcontrol->private_value++;
 		break;
-	case 3:
+	case 3: {
+		char tmp[sizeof(mix_init_msg2)];
+
 		memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2));
 		tmp[2] = snd_get_meter_comp_index(store);
-		snd_us16x08_send_urb(chip, tmp, sizeof(mix_init_msg2));
+		snd_us16x08_send_urb(chip, tmp, 10);
 		snd_us16x08_recv_urb(chip, meter_urb,
 			sizeof(meter_urb));
 		kcontrol->private_value = 0;
 		break;
 	}
+	}
 
 	for (set = 0; set < 6; set++)
 		get_meter_levels_from_urb(set, store, meter_urb);
@@ -1135,7 +1140,7 @@ static const struct snd_us16x08_control_params eq_controls[] = {
 		.control_id = SND_US16X08_ID_EQLOWMIDWIDTH,
 		.type = USB_MIXER_U8,
 		.num_channels = 16,
-		.name = "EQ MidQLow Q",
+		.name = "EQ MidLow Q",
 	},
 	{ /* EQ mid high gain */
 		.kcontrol_new = &snd_us16x08_eq_gain_ctl,
diff --git a/tools/Makefile b/tools/Makefile
index c8a90d0..221e1ce 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -19,6 +19,7 @@
 	@echo '  kvm_stat               - top-like utility for displaying kvm statistics'
 	@echo '  leds                   - LEDs  tools'
 	@echo '  lguest                 - a minimal 32-bit x86 hypervisor'
+	@echo '  liblockdep             - user-space wrapper for kernel locking-validator'
 	@echo '  net                    - misc networking tools'
 	@echo '  perf                   - Linux performance measurement and analysis tool'
 	@echo '  selftests              - various kernel selftests'
@@ -89,7 +90,7 @@
 kvm_stat: FORCE
 	$(call descend,kvm/$@)
 
-all: acpi cgroup cpupower gpio hv firewire lguest \
+all: acpi cgroup cpupower gpio hv firewire lguest liblockdep \
 		perf selftests turbostat usb \
 		virtio vm net x86_energy_perf_policy \
 		tmon freefall objtool kvm_stat
@@ -103,6 +104,9 @@
 cgroup_install firewire_install gpio_install hv_install lguest_install perf_install usb_install virtio_install vm_install net_install objtool_install:
 	$(call descend,$(@:_install=),install)
 
+liblockdep_install:
+	$(call descend,lib/lockdep,install)
+
 selftests_install:
 	$(call descend,testing/$(@:_install=),install)
 
@@ -119,7 +123,7 @@
 	$(call descend,kvm/$(@:_install=),install)
 
 install: acpi_install cgroup_install cpupower_install gpio_install \
-		hv_install firewire_install lguest_install \
+		hv_install firewire_install lguest_install liblockdep_install \
 		perf_install selftests_install turbostat_install usb_install \
 		virtio_install vm_install net_install x86_energy_perf_policy_install \
 		tmon_install freefall_install objtool_install kvm_stat_install
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index f1758fc..88b20e0 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -39,6 +39,7 @@
 #include <fcntl.h>
 #include <dirent.h>
 #include <net/if.h>
+#include <limits.h>
 #include <getopt.h>
 
 /*
@@ -97,6 +98,8 @@ static struct utsname uts_buf;
 #define KVP_SCRIPTS_PATH "/usr/libexec/hypervkvpd/"
 #endif
 
+#define KVP_NET_DIR "/sys/class/net/"
+
 #define MAX_FILE_NAME 100
 #define ENTRIES_PER_BLOCK 50
 
@@ -596,26 +599,21 @@ static char *kvp_get_if_name(char *guid)
 	DIR *dir;
 	struct dirent *entry;
 	FILE    *file;
-	char    *p, *q, *x;
+	char    *p, *x;
 	char    *if_name = NULL;
 	char    buf[256];
-	char *kvp_net_dir = "/sys/class/net/";
-	char dev_id[256];
+	char dev_id[PATH_MAX];
 
-	dir = opendir(kvp_net_dir);
+	dir = opendir(KVP_NET_DIR);
 	if (dir == NULL)
 		return NULL;
 
-	snprintf(dev_id, sizeof(dev_id), "%s", kvp_net_dir);
-	q = dev_id + strlen(kvp_net_dir);
-
 	while ((entry = readdir(dir)) != NULL) {
 		/*
 		 * Set the state for the next pass.
 		 */
-		*q = '\0';
-		strcat(dev_id, entry->d_name);
-		strcat(dev_id, "/device/device_id");
+		snprintf(dev_id, sizeof(dev_id), "%s%s/device/device_id",
+			 KVP_NET_DIR, entry->d_name);
 
 		file = fopen(dev_id, "r");
 		if (file == NULL)
@@ -653,12 +651,12 @@ static char *kvp_if_name_to_mac(char *if_name)
 	FILE    *file;
 	char    *p, *x;
 	char    buf[256];
-	char addr_file[256];
+	char addr_file[PATH_MAX];
 	unsigned int i;
 	char *mac_addr = NULL;
 
-	snprintf(addr_file, sizeof(addr_file), "%s%s%s", "/sys/class/net/",
-		if_name, "/address");
+	snprintf(addr_file, sizeof(addr_file), "%s%s%s", KVP_NET_DIR,
+		 if_name, "/address");
 
 	file = fopen(addr_file, "r");
 	if (file == NULL)
@@ -688,28 +686,22 @@ static char *kvp_mac_to_if_name(char *mac)
 	DIR *dir;
 	struct dirent *entry;
 	FILE    *file;
-	char    *p, *q, *x;
+	char    *p, *x;
 	char    *if_name = NULL;
 	char    buf[256];
-	char *kvp_net_dir = "/sys/class/net/";
-	char dev_id[256];
+	char dev_id[PATH_MAX];
 	unsigned int i;
 
-	dir = opendir(kvp_net_dir);
+	dir = opendir(KVP_NET_DIR);
 	if (dir == NULL)
 		return NULL;
 
-	snprintf(dev_id, sizeof(dev_id), "%s", kvp_net_dir);
-	q = dev_id + strlen(kvp_net_dir);
-
 	while ((entry = readdir(dir)) != NULL) {
 		/*
 		 * Set the state for the next pass.
 		 */
-		*q = '\0';
-
-		strcat(dev_id, entry->d_name);
-		strcat(dev_id, "/address");
+		snprintf(dev_id, sizeof(dev_id), "%s%s/address", KVP_NET_DIR,
+			 entry->d_name);
 
 		file = fopen(dev_id, "r");
 		if (file == NULL)
@@ -1218,9 +1210,9 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
 static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
 {
 	int error = 0;
-	char if_file[128];
+	char if_file[PATH_MAX];
 	FILE *file;
-	char cmd[512];
+	char cmd[PATH_MAX];
 	char *mac_addr;
 
 	/*
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index e082980..7ba5419 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -261,7 +261,9 @@ int main(int argc, char *argv[])
 		if (len != sizeof(struct hv_vss_msg)) {
 			syslog(LOG_ERR, "write failed; error: %d %s", errno,
 			       strerror(errno));
-			exit(EXIT_FAILURE);
+
+			if (op == VSS_OP_FREEZE)
+				vss_operate(VSS_OP_THAW);
 		}
 	}
 
diff --git a/tools/iio/Makefile b/tools/iio/Makefile
index 5446d62..8f08e03 100644
--- a/tools/iio/Makefile
+++ b/tools/iio/Makefile
@@ -1,5 +1,5 @@
 CC = $(CROSS_COMPILE)gcc
-CFLAGS += -Wall -g -D_GNU_SOURCE
+CFLAGS += -Wall -g -D_GNU_SOURCE -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
 
 BINDIR=usr/bin
 INSTALL_PROGRAM=install -m 755 -p
diff --git a/tools/iio/iio_utils.h b/tools/iio/iio_utils.h
index 780f201..8b379da 100644
--- a/tools/iio/iio_utils.h
+++ b/tools/iio/iio_utils.h
@@ -13,7 +13,7 @@
 #include <stdint.h>
 
 /* Made up value to limit allocation sizes */
-#define IIO_MAX_NAME_LENGTH 30
+#define IIO_MAX_NAME_LENGTH 64
 
 #define FORMAT_SCAN_ELEMENTS_DIR "%s/scan_elements"
 #define FORMAT_TYPE_FILE "%s_type"
diff --git a/tools/include/asm/sections.h b/tools/include/asm/sections.h
new file mode 100644
index 0000000..a80643d
--- /dev/null
+++ b/tools/include/asm/sections.h
@@ -0,0 +1,4 @@
+#ifndef __TOOLS_INCLUDE_LINUX_ASM_SECTIONS_H
+#define __TOOLS_INCLUDE_LINUX_ASM_SECTIONS_H
+
+#endif /* __TOOLS_INCLUDE_LINUX_ASM_SECTIONS_H */
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 1aecad3..969db19 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -61,4 +61,14 @@ static inline unsigned fls_long(unsigned long l)
 	return fls64(l);
 }
 
+/**
+ * rol32 - rotate a 32-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u32 rol32(__u32 word, unsigned int shift)
+{
+	return (word << shift) | (word >> ((-shift) & 31));
+}
+
 #endif
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
index 825d44f..bd39b20 100644
--- a/tools/include/linux/compiler-gcc.h
+++ b/tools/include/linux/compiler-gcc.h
@@ -19,3 +19,13 @@
 
 /* &a[0] degrades to a pointer: a different type from an array */
 #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+
+#define  noinline	__attribute__((noinline))
+
+#define __packed	__attribute__((packed))
+
+#define __noreturn	__attribute__((noreturn))
+
+#define __aligned(x)	__attribute__((aligned(x)))
+#define __printf(a, b)	__attribute__((format(printf, a, b)))
+#define __scanf(a, b)	__attribute__((format(scanf, a, b)))
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 23299d7..d7a5604 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -17,6 +17,10 @@
 # define __always_inline	inline __attribute__((always_inline))
 #endif
 
+#ifndef noinline
+#define noinline
+#endif
+
 /* Are two types/vars the same type (ignoring qualifiers)? */
 #ifndef __same_type
 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
@@ -45,6 +49,10 @@
 # define __maybe_unused		__attribute__((unused))
 #endif
 
+#ifndef __used
+# define __used		__attribute__((__unused__))
+#endif
+
 #ifndef __packed
 # define __packed		__attribute__((__packed__))
 #endif
@@ -65,6 +73,14 @@
 # define unlikely(x)		__builtin_expect(!!(x), 0)
 #endif
 
+#ifndef __init
+# define __init
+#endif
+
+#ifndef noinline
+# define noinline
+#endif
+
 #define uninitialized_var(x) x = *(&(x))
 
 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
diff --git a/tools/lib/lockdep/uinclude/linux/debug_locks.h b/tools/include/linux/debug_locks.h
similarity index 74%
rename from tools/lib/lockdep/uinclude/linux/debug_locks.h
rename to tools/include/linux/debug_locks.h
index f38eb64..61cc7f5 100644
--- a/tools/lib/lockdep/uinclude/linux/debug_locks.h
+++ b/tools/include/linux/debug_locks.h
@@ -3,8 +3,9 @@
 
 #include <stddef.h>
 #include <linux/compiler.h>
+#include <asm/bug.h>
 
-#define DEBUG_LOCKS_WARN_ON(x) (x)
+#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x)
 
 extern bool debug_locks;
 extern bool debug_locks_silent;
diff --git a/tools/include/linux/delay.h b/tools/include/linux/delay.h
new file mode 100644
index 0000000..55aa417
--- /dev/null
+++ b/tools/include/linux/delay.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_INCLUDE_LINUX_DELAY_H
+#define _TOOLS_INCLUDE_LINUX_DELAY_H
+
+#endif /* _TOOLS_INCLUDE_LINUX_DELAY_H */
diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h
index bdc3dd8..abf0478 100644
--- a/tools/include/linux/err.h
+++ b/tools/include/linux/err.h
@@ -46,4 +46,9 @@ static inline bool __must_check IS_ERR(__force const void *ptr)
 	return IS_ERR_VALUE((unsigned long)ptr);
 }
 
+static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
+{
+	return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
+}
+
 #endif /* _LINUX_ERR_H */
diff --git a/tools/include/linux/ftrace.h b/tools/include/linux/ftrace.h
new file mode 100644
index 0000000..949f541
--- /dev/null
+++ b/tools/include/linux/ftrace.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_INCLUDE_LINUX_FTRACE_H
+#define _TOOLS_INCLUDE_LINUX_FTRACE_H
+
+#endif /* _TOOLS_INCLUDE_LINUX_FTRACE_H */
diff --git a/tools/include/linux/gfp.h b/tools/include/linux/gfp.h
new file mode 100644
index 0000000..2203075
--- /dev/null
+++ b/tools/include/linux/gfp.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_INCLUDE_LINUX_GFP_H
+#define _TOOLS_INCLUDE_LINUX_GFP_H
+
+#endif /* _TOOLS_INCLUDE_LINUX_GFP_H */
diff --git a/tools/lib/lockdep/uinclude/linux/hardirq.h b/tools/include/linux/hardirq.h
similarity index 100%
rename from tools/lib/lockdep/uinclude/linux/hardirq.h
rename to tools/include/linux/hardirq.h
diff --git a/tools/include/linux/interrupt.h b/tools/include/linux/interrupt.h
new file mode 100644
index 0000000..6be25bb
--- /dev/null
+++ b/tools/include/linux/interrupt.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_INCLUDE_LINUX_INTERRUPT_H
+#define _TOOLS_INCLUDE_LINUX_INTERRUPT_H
+
+#endif /* _TOOLS_INCLUDE_LINUX_INTERRUPT_H */
diff --git a/tools/lib/lockdep/uinclude/linux/irqflags.h b/tools/include/linux/irqflags.h
similarity index 84%
rename from tools/lib/lockdep/uinclude/linux/irqflags.h
rename to tools/include/linux/irqflags.h
index 6cc296f..df77669 100644
--- a/tools/lib/lockdep/uinclude/linux/irqflags.h
+++ b/tools/include/linux/irqflags.h
@@ -17,19 +17,19 @@
 #define raw_local_irq_disable() do { } while (0)
 #define raw_local_irq_enable() do { } while (0)
 #define raw_local_irq_save(flags) ((flags) = 0)
-#define raw_local_irq_restore(flags) do { } while (0)
+#define raw_local_irq_restore(flags) ((void)(flags))
 #define raw_local_save_flags(flags) ((flags) = 0)
-#define raw_irqs_disabled_flags(flags) do { } while (0)
+#define raw_irqs_disabled_flags(flags) ((void)(flags))
 #define raw_irqs_disabled() 0
 #define raw_safe_halt()
 
 #define local_irq_enable() do { } while (0)
 #define local_irq_disable() do { } while (0)
 #define local_irq_save(flags) ((flags) = 0)
-#define local_irq_restore(flags) do { } while (0)
+#define local_irq_restore(flags) ((void)(flags))
 #define local_save_flags(flags)	((flags) = 0)
 #define irqs_disabled() (1)
-#define irqs_disabled_flags(flags) (0)
+#define irqs_disabled_flags(flags) ((void)(flags), 0)
 #define safe_halt() do { } while (0)
 
 #define trace_lock_release(x, y)
diff --git a/tools/include/linux/jhash.h b/tools/include/linux/jhash.h
new file mode 100644
index 0000000..348c6f4
--- /dev/null
+++ b/tools/include/linux/jhash.h
@@ -0,0 +1,175 @@
+#ifndef _LINUX_JHASH_H
+#define _LINUX_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * These are functions for producing 32-bit hashes for hash table lookup.
+ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+ * are externally useful functions.  Routines to test the hash are included
+ * if SELF_TEST is defined.  You can use this free for any purpose.  It's in
+ * the public domain.  It has no warranty.
+ *
+ * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
+ * any bugs present are my fault.
+ * Jozsef
+ */
+#include <linux/bitops.h>
+#include <linux/unaligned/packed_struct.h>
+
+/* Best hash sizes are of power of two */
+#define jhash_size(n)   ((u32)1<<(n))
+/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
+#define jhash_mask(n)   (jhash_size(n)-1)
+
+/* __jhash_mix -- mix 3 32-bit values reversibly. */
+#define __jhash_mix(a, b, c)			\
+{						\
+	a -= c;  a ^= rol32(c, 4);  c += b;	\
+	b -= a;  b ^= rol32(a, 6);  a += c;	\
+	c -= b;  c ^= rol32(b, 8);  b += a;	\
+	a -= c;  a ^= rol32(c, 16); c += b;	\
+	b -= a;  b ^= rol32(a, 19); a += c;	\
+	c -= b;  c ^= rol32(b, 4);  b += a;	\
+}
+
+/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
+#define __jhash_final(a, b, c)			\
+{						\
+	c ^= b; c -= rol32(b, 14);		\
+	a ^= c; a -= rol32(c, 11);		\
+	b ^= a; b -= rol32(a, 25);		\
+	c ^= b; c -= rol32(b, 16);		\
+	a ^= c; a -= rol32(c, 4);		\
+	b ^= a; b -= rol32(a, 14);		\
+	c ^= b; c -= rol32(b, 24);		\
+}
+
+/* An arbitrary initial parameter */
+#define JHASH_INITVAL		0xdeadbeef
+
+/* jhash - hash an arbitrary key
+ * @k: sequence of bytes as key
+ * @length: the length of the key
+ * @initval: the previous hash, or an arbitray value
+ *
+ * The generic version, hashes an arbitrary sequence of bytes.
+ * No alignment or length assumptions are made about the input key.
+ *
+ * Returns the hash value of the key. The result depends on endianness.
+ */
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+	u32 a, b, c;
+	const u8 *k = key;
+
+	/* Set up the internal state */
+	a = b = c = JHASH_INITVAL + length + initval;
+
+	/* All but the last block: affect some 32 bits of (a,b,c) */
+	while (length > 12) {
+		a += __get_unaligned_cpu32(k);
+		b += __get_unaligned_cpu32(k + 4);
+		c += __get_unaligned_cpu32(k + 8);
+		__jhash_mix(a, b, c);
+		length -= 12;
+		k += 12;
+	}
+	/* Last block: affect all 32 bits of (c) */
+	/* All the case statements fall through */
+	switch (length) {
+	case 12: c += (u32)k[11]<<24;
+	case 11: c += (u32)k[10]<<16;
+	case 10: c += (u32)k[9]<<8;
+	case 9:  c += k[8];
+	case 8:  b += (u32)k[7]<<24;
+	case 7:  b += (u32)k[6]<<16;
+	case 6:  b += (u32)k[5]<<8;
+	case 5:  b += k[4];
+	case 4:  a += (u32)k[3]<<24;
+	case 3:  a += (u32)k[2]<<16;
+	case 2:  a += (u32)k[1]<<8;
+	case 1:  a += k[0];
+		 __jhash_final(a, b, c);
+	case 0: /* Nothing left to add */
+		break;
+	}
+
+	return c;
+}
+
+/* jhash2 - hash an array of u32's
+ * @k: the key which must be an array of u32's
+ * @length: the number of u32's in the key
+ * @initval: the previous hash, or an arbitray value
+ *
+ * Returns the hash value of the key.
+ */
+static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
+	u32 a, b, c;
+
+	/* Set up the internal state */
+	a = b = c = JHASH_INITVAL + (length<<2) + initval;
+
+	/* Handle most of the key */
+	while (length > 3) {
+		a += k[0];
+		b += k[1];
+		c += k[2];
+		__jhash_mix(a, b, c);
+		length -= 3;
+		k += 3;
+	}
+
+	/* Handle the last 3 u32's: all the case statements fall through */
+	switch (length) {
+	case 3: c += k[2];
+	case 2: b += k[1];
+	case 1: a += k[0];
+		__jhash_final(a, b, c);
+	case 0:	/* Nothing left to add */
+		break;
+	}
+
+	return c;
+}
+
+
+/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
+static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+	a += initval;
+	b += initval;
+	c += initval;
+
+	__jhash_final(a, b, c);
+
+	return c;
+}
+
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+	return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
+}
+
+static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+	return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+static inline u32 jhash_1word(u32 a, u32 initval)
+{
+	return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
+}
+
+#endif /* _LINUX_JHASH_H */
diff --git a/tools/lib/lockdep/uinclude/linux/kallsyms.h b/tools/include/linux/kallsyms.h
similarity index 89%
rename from tools/lib/lockdep/uinclude/linux/kallsyms.h
rename to tools/include/linux/kallsyms.h
index b0f2dbd..582cc1e 100644
--- a/tools/lib/lockdep/uinclude/linux/kallsyms.h
+++ b/tools/include/linux/kallsyms.h
@@ -3,6 +3,7 @@
 
 #include <linux/kernel.h>
 #include <stdio.h>
+#include <unistd.h>
 
 #define KSYM_NAME_LEN 128
 
@@ -24,7 +25,7 @@ static inline void print_ip_sym(unsigned long ip)
 
 	name = backtrace_symbols((void **)&ip, 1);
 
-	printf("%s\n", *name);
+	dprintf(STDOUT_FILENO, "%s\n", *name);
 
 	free(name);
 }
diff --git a/tools/lib/lockdep/uinclude/linux/kern_levels.h b/tools/include/linux/kern_levels.h
similarity index 100%
rename from tools/lib/lockdep/uinclude/linux/kern_levels.h
rename to tools/include/linux/kern_levels.h
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 73ccc48..77d2e94 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -5,6 +5,8 @@
 #include <stddef.h>
 #include <assert.h>
 #include <linux/compiler.h>
+#include <endian.h>
+#include <byteswap.h>
 
 #ifndef UINT_MAX
 #define UINT_MAX	(~0U)
@@ -32,6 +34,7 @@
 	(type *)((char *)__mptr - offsetof(type, member)); })
 #endif
 
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
 #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
 
 #ifndef max
@@ -67,12 +70,33 @@
 #endif
 #endif
 
-/*
- * Both need more care to handle endianness
- * (Don't use bitmap_copy_le() for now)
- */
-#define cpu_to_le64(x)	(x)
-#define cpu_to_le32(x)	(x)
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define cpu_to_le16 bswap_16
+#define cpu_to_le32 bswap_32
+#define cpu_to_le64 bswap_64
+#define le16_to_cpu bswap_16
+#define le32_to_cpu bswap_32
+#define le64_to_cpu bswap_64
+#define cpu_to_be16
+#define cpu_to_be32
+#define cpu_to_be64
+#define be16_to_cpu
+#define be32_to_cpu
+#define be64_to_cpu
+#else
+#define cpu_to_le16
+#define cpu_to_le32
+#define cpu_to_le64
+#define le16_to_cpu
+#define le32_to_cpu
+#define le64_to_cpu
+#define cpu_to_be16 bswap_16
+#define cpu_to_be32 bswap_32
+#define cpu_to_be64 bswap_64
+#define be16_to_cpu bswap_16
+#define be32_to_cpu bswap_32
+#define be64_to_cpu bswap_64
+#endif
 
 int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
 int scnprintf(char * buf, size_t size, const char * fmt, ...);
@@ -89,4 +113,7 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...);
 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
 #define round_down(x, y) ((x) & ~__round_mask(x, y))
 
+#define current_gfp_context(k) 0
+#define synchronize_sched()
+
 #endif
diff --git a/tools/lib/lockdep/uinclude/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h
similarity index 100%
rename from tools/lib/lockdep/uinclude/linux/kmemcheck.h
rename to tools/include/linux/kmemcheck.h
diff --git a/tools/include/linux/linkage.h b/tools/include/linux/linkage.h
new file mode 100644
index 0000000..bc763d5
--- /dev/null
+++ b/tools/include/linux/linkage.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_INCLUDE_LINUX_LINKAGE_H
+#define _TOOLS_INCLUDE_LINUX_LINKAGE_H
+
+#endif /* _TOOLS_INCLUDE_LINUX_LINKAGE_H */
diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/include/linux/lockdep.h
similarity index 63%
rename from tools/lib/lockdep/uinclude/linux/lockdep.h
rename to tools/include/linux/lockdep.h
index c808c7d..8da3e8e 100644
--- a/tools/lib/lockdep/uinclude/linux/lockdep.h
+++ b/tools/include/linux/lockdep.h
@@ -7,8 +7,15 @@
 #include <limits.h>
 #include <linux/utsname.h>
 #include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/kern_levels.h>
+#include <linux/err.h>
+#include <linux/rcu.h>
+#include <linux/list.h>
+#include <linux/hardirq.h>
+#include <unistd.h>
 
-#define MAX_LOCK_DEPTH 2000UL
+#define MAX_LOCK_DEPTH 63UL
 
 #define asmlinkage
 #define __visible
@@ -29,31 +36,32 @@ extern struct task_struct *__curr(void);
 
 #define current (__curr())
 
-#define debug_locks_off() 1
+static inline int debug_locks_off(void)
+{
+	return 1;
+}
+
 #define task_pid_nr(tsk) ((tsk)->pid)
 
 #define KSYM_NAME_LEN 128
-#define printk printf
+#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
+#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
+#define pr_warn pr_err
 
 #define list_del_rcu list_del
 
 #define atomic_t unsigned long
 #define atomic_inc(x) ((*(x))++)
 
-static struct new_utsname *init_utsname(void)
-{
-	static struct new_utsname n = (struct new_utsname) {
-		.release = "liblockdep",
-		.version = LIBLOCKDEP_VERSION,
-	};
-
-	return &n;
-}
-
 #define print_tainted() ""
 #define static_obj(x) 1
 
 #define debug_show_all_locks()
 extern void debug_check_no_locks_held(void);
 
+static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr)
+{
+	return false;
+}
+
 #endif
diff --git a/tools/include/linux/module.h b/tools/include/linux/module.h
new file mode 100644
index 0000000..07055db
--- /dev/null
+++ b/tools/include/linux/module.h
@@ -0,0 +1,11 @@
+#ifndef _LIBLOCKDEP_LINUX_MODULE_H_
+#define _LIBLOCKDEP_LINUX_MODULE_H_
+
+#define module_param(name, type, perm)
+
+static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
+{
+	return false;
+}
+
+#endif
diff --git a/tools/include/linux/mutex.h b/tools/include/linux/mutex.h
new file mode 100644
index 0000000..a8180d2
--- /dev/null
+++ b/tools/include/linux/mutex.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_INCLUDE_LINUX_MUTEX_H
+#define _TOOLS_INCLUDE_LINUX_MUTEX_H
+
+#endif /* _TOOLS_INCLUDE_LINUX_MUTEX_H */
diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h
new file mode 100644
index 0000000..8b3b03b
--- /dev/null
+++ b/tools/include/linux/proc_fs.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H
+#define _TOOLS_INCLUDE_LINUX_PROC_FS_H
+
+#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */
diff --git a/tools/lib/lockdep/uinclude/linux/rcu.h b/tools/include/linux/rcu.h
similarity index 76%
rename from tools/lib/lockdep/uinclude/linux/rcu.h
rename to tools/include/linux/rcu.h
index 042ee8e..5080649 100644
--- a/tools/lib/lockdep/uinclude/linux/rcu.h
+++ b/tools/include/linux/rcu.h
@@ -18,4 +18,7 @@ static inline bool rcu_is_watching(void)
 	return false;
 }
 
+#define rcu_assign_pointer(p, v) ((p) = (v))
+#define RCU_INIT_POINTER(p, v) p=(v)
+
 #endif
diff --git a/tools/include/linux/sched/clock.h b/tools/include/linux/sched/clock.h
new file mode 100644
index 0000000..5837d17
--- /dev/null
+++ b/tools/include/linux/sched/clock.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_PERF_LINUX_SCHED_CLOCK_H
+#define _TOOLS_PERF_LINUX_SCHED_CLOCK_H
+
+#endif  /* _TOOLS_PERF_LINUX_SCHED_CLOCK_H */
diff --git a/tools/include/linux/sched/mm.h b/tools/include/linux/sched/mm.h
new file mode 100644
index 0000000..c8d9f19
--- /dev/null
+++ b/tools/include/linux/sched/mm.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_PERF_LINUX_SCHED_MM_H
+#define _TOOLS_PERF_LINUX_SCHED_MM_H
+
+#endif  /* _TOOLS_PERF_LINUX_SCHED_MM_H */
diff --git a/tools/include/linux/sched/task.h b/tools/include/linux/sched/task.h
new file mode 100644
index 0000000..a97890ec
--- /dev/null
+++ b/tools/include/linux/sched/task.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_PERF_LINUX_SCHED_TASK_H
+#define _TOOLS_PERF_LINUX_SCHED_TASK_H
+
+#endif  /* _TOOLS_PERF_LINUX_SCHED_TASK_H */
diff --git a/tools/include/linux/seq_file.h b/tools/include/linux/seq_file.h
new file mode 100644
index 0000000..102fd92
--- /dev/null
+++ b/tools/include/linux/seq_file.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
+#define _TOOLS_INCLUDE_LINUX_SEQ_FILE_H
+
+#endif /* _TOOLS_INCLUDE_LINUX_SEQ_FILE_H */
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
index 58397dc..417cda4 100644
--- a/tools/include/linux/spinlock.h
+++ b/tools/include/linux/spinlock.h
@@ -1,5 +1,31 @@
+#ifndef __LINUX_SPINLOCK_H_
+#define __LINUX_SPINLOCK_H_
+
+#include <pthread.h>
+#include <stdbool.h>
+
 #define spinlock_t		pthread_mutex_t
 #define DEFINE_SPINLOCK(x)	pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
 
 #define spin_lock_irqsave(x, f)		(void)f, pthread_mutex_lock(x)
 #define spin_unlock_irqrestore(x, f)	(void)f, pthread_mutex_unlock(x)
+
+#define arch_spinlock_t pthread_mutex_t
+#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
+
+static inline void arch_spin_lock(arch_spinlock_t *mutex)
+{
+	pthread_mutex_lock(mutex);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *mutex)
+{
+	pthread_mutex_unlock(mutex);
+}
+
+static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
+{
+	return true;
+}
+
+#endif
diff --git a/tools/lib/lockdep/uinclude/linux/stacktrace.h b/tools/include/linux/stacktrace.h
similarity index 100%
rename from tools/lib/lockdep/uinclude/linux/stacktrace.h
rename to tools/include/linux/stacktrace.h
diff --git a/tools/include/linux/unaligned/packed_struct.h b/tools/include/linux/unaligned/packed_struct.h
new file mode 100644
index 0000000..c0d817d
--- /dev/null
+++ b/tools/include/linux/unaligned/packed_struct.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
+#define _LINUX_UNALIGNED_PACKED_STRUCT_H
+
+#include <linux/kernel.h>
+
+struct __una_u16 { u16 x; } __packed;
+struct __una_u32 { u32 x; } __packed;
+struct __una_u64 { u64 x; } __packed;
+
+static inline u16 __get_unaligned_cpu16(const void *p)
+{
+	const struct __una_u16 *ptr = (const struct __una_u16 *)p;
+	return ptr->x;
+}
+
+static inline u32 __get_unaligned_cpu32(const void *p)
+{
+	const struct __una_u32 *ptr = (const struct __una_u32 *)p;
+	return ptr->x;
+}
+
+static inline u64 __get_unaligned_cpu64(const void *p)
+{
+	const struct __una_u64 *ptr = (const struct __una_u64 *)p;
+	return ptr->x;
+}
+
+static inline void __put_unaligned_cpu16(u16 val, void *p)
+{
+	struct __una_u16 *ptr = (struct __una_u16 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu32(u32 val, void *p)
+{
+	struct __una_u32 *ptr = (struct __una_u32 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu64(u64 val, void *p)
+{
+	struct __una_u64 *ptr = (struct __una_u64 *)p;
+	ptr->x = val;
+}
+
+#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */
diff --git a/tools/include/trace/events/lock.h b/tools/include/trace/events/lock.h
new file mode 100644
index 0000000..5b15fd5
--- /dev/null
+++ b/tools/include/trace/events/lock.h
@@ -0,0 +1,4 @@
+#ifndef _TOOLS_INCLUDE_TRACE_EVENTS_LOCK_H
+#define _TOOLS_INCLUDE_TRACE_EVENTS_LOCK_H
+
+#endif /* _TOOLS_INCLUDE_TRACE_EVENTS_LOCK_H */
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 809c772..a7ecf8f 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -387,6 +387,22 @@ int filename__read_str(const char *filename, char **buf, size_t *sizep)
 	return err;
 }
 
+int filename__write_int(const char *filename, int value)
+{
+	int fd = open(filename, O_WRONLY), err = -1;
+	char buf[64];
+
+	if (fd < 0)
+		return err;
+
+	sprintf(buf, "%d", value);
+	if (write(fd, buf, sizeof(buf)) == sizeof(buf))
+		err = 0;
+
+	close(fd);
+	return err;
+}
+
 int procfs__read_str(const char *entry, char **buf, size_t *sizep)
 {
 	char path[PATH_MAX];
@@ -480,3 +496,17 @@ int sysctl__read_int(const char *sysctl, int *value)
 
 	return filename__read_int(path, value);
 }
+
+int sysfs__write_int(const char *entry, int value)
+{
+	char path[PATH_MAX];
+	const char *sysfs = sysfs__mountpoint();
+
+	if (!sysfs)
+		return -1;
+
+	if (snprintf(path, sizeof(path), "%s/%s", sysfs, entry) >= PATH_MAX)
+		return -1;
+
+	return filename__write_int(path, value);
+}
diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
index 956c211..4560534 100644
--- a/tools/lib/api/fs/fs.h
+++ b/tools/lib/api/fs/fs.h
@@ -31,6 +31,8 @@ int filename__read_int(const char *filename, int *value);
 int filename__read_ull(const char *filename, unsigned long long *value);
 int filename__read_str(const char *filename, char **buf, size_t *sizep);
 
+int filename__write_int(const char *filename, int value);
+
 int procfs__read_str(const char *entry, char **buf, size_t *sizep);
 
 int sysctl__read_int(const char *sysctl, int *value);
@@ -38,4 +40,6 @@ int sysfs__read_int(const char *entry, int *value);
 int sysfs__read_ull(const char *entry, unsigned long long *value);
 int sysfs__read_str(const char *entry, char **buf, size_t *sizep);
 int sysfs__read_bool(const char *entry, bool *value);
+
+int sysfs__write_int(const char *entry, int value);
 #endif /* __API_FS__ */
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index 3bc0ef9..ed9ace5 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -79,6 +79,7 @@
 # Set compile option CFLAGS if not set elsewhere
 CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
 CFLAGS += -fPIC
+CFLAGS += -Wall
 
 override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
 
@@ -100,7 +101,7 @@
 
 do_compile_shared_library =			\
 	($(print_shared_lib_compile)		\
-	$(CC) --shared $^ -o $@ -lpthread -ldl -Wl,-soname='"$@"';$(shell ln -sf $@ liblockdep.so))
+	$(CC) $(LDFLAGS) --shared $^ -o $@ -lpthread -ldl -Wl,-soname='$(@F)';$(shell ln -sf $(@F) $(@D)/liblockdep.so))
 
 do_build_static_lib =				\
 	($(print_static_lib_build)		\
@@ -118,10 +119,10 @@
 $(LIB_IN): force
 	$(Q)$(MAKE) $(build)=liblockdep
 
-liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN)
+$(OUTPUT)liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN)
 	$(Q)$(do_compile_shared_library)
 
-liblockdep.a: $(LIB_IN)
+$(OUTPUT)liblockdep.a: $(LIB_IN)
 	$(Q)$(do_build_static_lib)
 
 tags:	force
@@ -149,7 +150,7 @@
 install: install_lib
 
 clean:
-	$(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d .*.cmd
+	$(RM) $(OUTPUT)*.o *~ $(TARGETS) $(OUTPUT)*.a $(OUTPUT)*liblockdep*.so* $(VERSION_FILES) $(OUTPUT).*.d $(OUTPUT).*.cmd
 	$(RM) tags TAGS
 
 PHONY += force
diff --git a/tools/lib/lockdep/lockdep.c b/tools/lib/lockdep/lockdep.c
index a0a2e3a..ced6d74 100644
--- a/tools/lib/lockdep/lockdep.c
+++ b/tools/lib/lockdep/lockdep.c
@@ -1,8 +1,27 @@
 #include <linux/lockdep.h>
+#include <stdlib.h>
 
 /* Trivial API wrappers, we don't (yet) have RCU in user-space: */
 #define hlist_for_each_entry_rcu	hlist_for_each_entry
 #define hlist_add_head_rcu		hlist_add_head
 #define hlist_del_rcu			hlist_del
+#define list_for_each_entry_rcu		list_for_each_entry
+#define list_add_tail_rcu		list_add_tail
+
+u32 prandom_u32(void)
+{
+	/* Used only by lock_pin_lock() which is dead code */
+	abort();
+}
+
+static struct new_utsname *init_utsname(void)
+{
+	static struct new_utsname n = (struct new_utsname) {
+		.release = "liblockdep",
+		.version = LIBLOCKDEP_VERSION,
+	};
+
+	return &n;
+}
 
 #include "../../../kernel/locking/lockdep.c"
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 5284484..6a2d3c5 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -4,6 +4,7 @@
 #include <dlfcn.h>
 #include <stdlib.h>
 #include <sysexits.h>
+#include <unistd.h>
 #include "include/liblockdep/mutex.h"
 #include "../../include/linux/rbtree.h"
 
@@ -122,8 +123,6 @@ static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent)
 #define LIBLOCKDEP_STATIC_ENTRIES	1024
 #endif
 
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-
 static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES];
 static int __locks_nr;
 
@@ -149,7 +148,7 @@ static struct lock_lookup *alloc_lock(void)
 
 		int idx = __locks_nr++;
 		if (idx >= ARRAY_SIZE(__locks)) {
-			fprintf(stderr,
+			dprintf(STDERR_FILENO,
 		"LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
 			exit(EX_UNAVAILABLE);
 		}
diff --git a/tools/lib/lockdep/rbtree.c b/tools/lib/lockdep/rbtree.c
index f7f4303..297c304 100644
--- a/tools/lib/lockdep/rbtree.c
+++ b/tools/lib/lockdep/rbtree.c
@@ -1 +1 @@
-#include "../../../lib/rbtree.c"
+#include "../../lib/rbtree.c"
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
index 1069d96..f9b9409 100755
--- a/tools/lib/lockdep/run_tests.sh
+++ b/tools/lib/lockdep/run_tests.sh
@@ -4,9 +4,9 @@
 
 for i in `ls tests/*.c`; do
 	testname=$(basename "$i" .c)
-	gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
+	gcc -o tests/$testname -pthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
 	echo -ne "$testname... "
-	if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then
+	if [ $(timeout 1 ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then
 		echo "PASSED!"
 	else
 		echo "FAILED!"
@@ -18,9 +18,9 @@
 
 for i in `ls tests/*.c`; do
 	testname=$(basename "$i" .c)
-	gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null
+	gcc -o tests/$testname -pthread -Iinclude $i &> /dev/null
 	echo -ne "(PRELOAD) $testname... "
-	if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then
+	if [ $(timeout 1 ./lockdep ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then
 		echo "PASSED!"
 	else
 		echo "FAILED!"
diff --git a/tools/lib/lockdep/uinclude/asm/hash.h b/tools/lib/lockdep/uinclude/asm/hash.h
deleted file mode 100644
index d82b170b..0000000
--- a/tools/lib/lockdep/uinclude/asm/hash.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_GENERIC_HASH_H
-#define __ASM_GENERIC_HASH_H
-
-/* Stub */
-
-#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/lib/lockdep/uinclude/asm/hweight.h b/tools/lib/lockdep/uinclude/asm/hweight.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/asm/hweight.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/asm/sections.h b/tools/lib/lockdep/uinclude/asm/sections.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/asm/sections.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/bitops.h b/tools/lib/lockdep/uinclude/linux/bitops.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/linux/bitops.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/compiler.h b/tools/lib/lockdep/uinclude/linux/compiler.h
deleted file mode 100644
index fd3e56a..0000000
--- a/tools/lib/lockdep/uinclude/linux/compiler.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _LIBLOCKDEP_LINUX_COMPILER_H_
-#define _LIBLOCKDEP_LINUX_COMPILER_H_
-
-#define __used		__attribute__((__unused__))
-#define unlikely
-#define READ_ONCE(x) (x)
-#define WRITE_ONCE(x, val) x=(val)
-#define RCU_INIT_POINTER(p, v) p=(v)
-
-#endif
diff --git a/tools/lib/lockdep/uinclude/linux/delay.h b/tools/lib/lockdep/uinclude/linux/delay.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/linux/delay.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/ftrace.h b/tools/lib/lockdep/uinclude/linux/ftrace.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/linux/ftrace.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/gfp.h b/tools/lib/lockdep/uinclude/linux/gfp.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/linux/gfp.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/hash.h b/tools/lib/lockdep/uinclude/linux/hash.h
deleted file mode 100644
index 0f84798..0000000
--- a/tools/lib/lockdep/uinclude/linux/hash.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../include/linux/hash.h"
diff --git a/tools/lib/lockdep/uinclude/linux/interrupt.h b/tools/lib/lockdep/uinclude/linux/interrupt.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/linux/interrupt.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/kernel.h b/tools/lib/lockdep/uinclude/linux/kernel.h
deleted file mode 100644
index 276c7a8..0000000
--- a/tools/lib/lockdep/uinclude/linux/kernel.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef _LIBLOCKDEP_LINUX_KERNEL_H_
-#define _LIBLOCKDEP_LINUX_KERNEL_H_
-
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/rcu.h>
-#include <linux/hardirq.h>
-#include <linux/kern_levels.h>
-
-#ifndef container_of
-#define container_of(ptr, type, member) ({			\
-	const typeof(((type *)0)->member) * __mptr = (ptr);	\
-	(type *)((char *)__mptr - offsetof(type, member)); })
-#endif
-
-#define max(x, y) ({				\
-	typeof(x) _max1 = (x);			\
-	typeof(y) _max2 = (y);			\
-	(void) (&_max1 == &_max2);		\
-	_max1 > _max2 ? _max1 : _max2; })
-
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#define WARN_ON(x) (x)
-#define WARN_ON_ONCE(x) (x)
-#define likely(x) (x)
-#define WARN(x, y...) (x)
-#define uninitialized_var(x) x
-#define __init
-#define noinline
-#define list_add_tail_rcu list_add_tail
-#define list_for_each_entry_rcu list_for_each_entry
-#define barrier() 
-#define synchronize_sched()
-
-#ifndef CALLER_ADDR0
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#endif
-
-#ifndef _RET_IP_
-#define _RET_IP_ CALLER_ADDR0
-#endif
-
-#ifndef _THIS_IP_
-#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
-#endif
-
-#endif
diff --git a/tools/lib/lockdep/uinclude/linux/linkage.h b/tools/lib/lockdep/uinclude/linux/linkage.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/linux/linkage.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/list.h b/tools/lib/lockdep/uinclude/linux/list.h
deleted file mode 100644
index 6e9ef31..0000000
--- a/tools/lib/lockdep/uinclude/linux/list.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../include/linux/list.h"
diff --git a/tools/lib/lockdep/uinclude/linux/module.h b/tools/lib/lockdep/uinclude/linux/module.h
deleted file mode 100644
index 09c7a7b..0000000
--- a/tools/lib/lockdep/uinclude/linux/module.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _LIBLOCKDEP_LINUX_MODULE_H_
-#define _LIBLOCKDEP_LINUX_MODULE_H_
-
-#define module_param(name, type, perm)
-
-#endif
diff --git a/tools/lib/lockdep/uinclude/linux/mutex.h b/tools/lib/lockdep/uinclude/linux/mutex.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/linux/mutex.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/poison.h b/tools/lib/lockdep/uinclude/linux/poison.h
deleted file mode 100644
index 0c27bdf..0000000
--- a/tools/lib/lockdep/uinclude/linux/poison.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../include/linux/poison.h"
diff --git a/tools/lib/lockdep/uinclude/linux/prefetch.h b/tools/lib/lockdep/uinclude/linux/prefetch.h
deleted file mode 100644
index d73fe6f..0000000
--- a/tools/lib/lockdep/uinclude/linux/prefetch.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _LIBLOCKDEP_LINUX_PREFETCH_H_
-#define _LIBLOCKDEP_LINUX_PREFETCH_H
-
-static inline void prefetch(void *a __attribute__((unused))) { }
-
-#endif
diff --git a/tools/lib/lockdep/uinclude/linux/proc_fs.h b/tools/lib/lockdep/uinclude/linux/proc_fs.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/linux/proc_fs.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h b/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h
deleted file mode 100644
index c375947..0000000
--- a/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define __always_inline
-#include "../../../include/linux/rbtree_augmented.h"
diff --git a/tools/lib/lockdep/uinclude/linux/seq_file.h b/tools/lib/lockdep/uinclude/linux/seq_file.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/linux/seq_file.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/lib/lockdep/uinclude/linux/spinlock.h b/tools/lib/lockdep/uinclude/linux/spinlock.h
deleted file mode 100644
index 68c1aa2..0000000
--- a/tools/lib/lockdep/uinclude/linux/spinlock.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _LIBLOCKDEP_SPINLOCK_H_
-#define _LIBLOCKDEP_SPINLOCK_H_
-
-#include <pthread.h>
-#include <stdbool.h>
-
-#define arch_spinlock_t pthread_mutex_t
-#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
-
-static inline void arch_spin_lock(arch_spinlock_t *mutex)
-{
-	pthread_mutex_lock(mutex);
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *mutex)
-{
-	pthread_mutex_unlock(mutex);
-}
-
-static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
-{
-	return true;
-}
-
-#endif
diff --git a/tools/lib/lockdep/uinclude/linux/stringify.h b/tools/lib/lockdep/uinclude/linux/stringify.h
deleted file mode 100644
index 05dfcd1..0000000
--- a/tools/lib/lockdep/uinclude/linux/stringify.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _LIBLOCKDEP_LINUX_STRINGIFY_H_
-#define _LIBLOCKDEP_LINUX_STRINGIFY_H_
-
-#define __stringify_1(x...)	#x
-#define __stringify(x...)	__stringify_1(x)
-
-#endif
diff --git a/tools/lib/lockdep/uinclude/trace/events/lock.h b/tools/lib/lockdep/uinclude/trace/events/lock.h
deleted file mode 100644
index fab00ff..0000000
--- a/tools/lib/lockdep/uinclude/trace/events/lock.h
+++ /dev/null
@@ -1,3 +0,0 @@
-
-/* empty file */
-
diff --git a/tools/objtool/Build b/tools/objtool/Build
index d6cdece..6f2e198 100644
--- a/tools/objtool/Build
+++ b/tools/objtool/Build
@@ -1,5 +1,6 @@
 objtool-y += arch/$(SRCARCH)/
 objtool-y += builtin-check.o
+objtool-y += check.o
 objtool-y += elf.o
 objtool-y += special.o
 objtool-y += objtool.o
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 55a60d3..17c1195 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -127,28 +127,13 @@
 
 c) Higher live patching compatibility rate
 
-   (NOTE: This is not yet implemented)
+   Livepatch has an optional "consistency model", which is needed for
+   more complex patches.  In order for the consistency model to work,
+   stack traces need to be reliable (or an unreliable condition needs to
+   be detectable).  Objtool makes that possible.
 
-   Currently with CONFIG_LIVEPATCH there's a basic live patching
-   framework which is safe for roughly 85-90% of "security" fixes.  But
-   patches can't have complex features like function dependency or
-   prototype changes, or data structure changes.
-
-   There's a strong need to support patches which have the more complex
-   features so that the patch compatibility rate for security fixes can
-   eventually approach something resembling 100%.  To achieve that, a
-   "consistency model" is needed, which allows tasks to be safely
-   transitioned from an unpatched state to a patched state.
-
-   One of the key requirements of the currently proposed livepatch
-   consistency model [*] is that it needs to walk the stack of each
-   sleeping task to determine if it can be transitioned to the patched
-   state.  If objtool can ensure that stack traces are reliable, this
-   consistency model can be used and the live patching compatibility
-   rate can be improved significantly.
-
-   [*] https://lkml.kernel.org/r/cover.1423499826.git.jpoimboe@redhat.com
-
+   For more details, see the livepatch documentation in the Linux kernel
+   source tree at Documentation/livepatch/livepatch.txt.
 
 Rules
 -----
@@ -201,80 +186,84 @@
    return normally.
 
 
-Errors in .S files
-------------------
+Objtool warnings
+----------------
 
-If you're getting an error in a compiled .S file which you don't
-understand, first make sure that the affected code follows the above
-rules.
+For asm files, if you're getting an error which doesn't make sense,
+first make sure that the affected code follows the above rules.
+
+For C files, the common culprits are inline asm statements and calls to
+"noreturn" functions.  See below for more details.
+
+Another possible cause for errors in C code is if the Makefile removes
+-fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
 
 Here are some examples of common warnings reported by objtool, what
 they mean, and suggestions for how to fix them.
 
 
-1. asm_file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
+1. file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
 
    The func() function made a function call without first saving and/or
-   updating the frame pointer.
+   updating the frame pointer, and CONFIG_FRAME_POINTER is enabled.
 
-   If func() is indeed a callable function, add proper frame pointer
-   logic using the FRAME_BEGIN and FRAME_END macros.  Otherwise, remove
-   its ELF function annotation by changing ENDPROC to END.
+   If the error is for an asm file, and func() is indeed a callable
+   function, add proper frame pointer logic using the FRAME_BEGIN and
+   FRAME_END macros.  Otherwise, if it's not a callable function, remove
+   its ELF function annotation by changing ENDPROC to END, and instead
+   use the manual CFI hint macros in asm/undwarf.h.
 
-   If you're getting this error in a .c file, see the "Errors in .c
-   files" section.
+   If it's a GCC-compiled .c file, the error may be because the function
+   uses an inline asm() statement which has a "call" instruction.  An
+   asm() statement with a call instruction must declare the use of the
+   stack pointer in its output operand.  For example, on x86_64:
+
+     register void *__sp asm("rsp");
+     asm volatile("call func" : "+r" (__sp));
+
+   Otherwise the stack frame may not get created before the call.
 
 
-2. asm_file.o: warning: objtool: .text+0x53: return instruction outside of a callable function
+2. file.o: warning: objtool: .text+0x53: unreachable instruction
 
-   A return instruction was detected, but objtool couldn't find a way
-   for a callable function to reach the instruction.
+   Objtool couldn't find a code path to reach the instruction.
 
-   If the return instruction is inside (or reachable from) a callable
-   function, the function needs to be annotated with the ENTRY/ENDPROC
-   macros.
+   If the error is for an asm file, and the instruction is inside (or
+   reachable from) a callable function, the function should be annotated
+   with the ENTRY/ENDPROC macros (ENDPROC is the important one).
+   Otherwise, the code should probably be annotated with the CFI hint
+   macros in asm/undwarf.h so objtool and the unwinder can know the
+   stack state associated with the code.
 
-   If you _really_ need a return instruction outside of a function, and
-   are 100% sure that it won't affect stack traces, you can tell
-   objtool to ignore it.  See the "Adding exceptions" section below.
-
-
-3. asm_file.o: warning: objtool: func()+0x9: function has unreachable instruction
-
-   The instruction lives inside of a callable function, but there's no
-   possible control flow path from the beginning of the function to the
-   instruction.
-
-   If the instruction is actually needed, and it's actually in a
-   callable function, ensure that its function is properly annotated
-   with ENTRY/ENDPROC.
+   If you're 100% sure the code won't affect stack traces, or if you're
+   a just a bad person, you can tell objtool to ignore it.  See the
+   "Adding exceptions" section below.
 
    If it's not actually in a callable function (e.g. kernel entry code),
    change ENDPROC to END.
 
 
-4. asm_file.o: warning: objtool: func(): can't find starting instruction
+4. file.o: warning: objtool: func(): can't find starting instruction
    or
-   asm_file.o: warning: objtool: func()+0x11dd: can't decode instruction
+   file.o: warning: objtool: func()+0x11dd: can't decode instruction
 
-   Did you put data in a text section?  If so, that can confuse
+   Does the file have data in a text section?  If so, that can confuse
    objtool's instruction decoder.  Move the data to a more appropriate
    section like .data or .rodata.
 
 
-5. asm_file.o: warning: objtool: func()+0x6: kernel entry/exit from callable instruction
+5. file.o: warning: objtool: func()+0x6: unsupported instruction in callable function
 
-   This is a kernel entry/exit instruction like sysenter or sysret.
-   Such instructions aren't allowed in a callable function, and are most
-   likely part of the kernel entry code.
-
-   If the instruction isn't actually in a callable function, change
-   ENDPROC to END.
+   This is a kernel entry/exit instruction like sysenter or iret.  Such
+   instructions aren't allowed in a callable function, and are most
+   likely part of the kernel entry code.  They should usually not have
+   the callable function annotation (ENDPROC) and should always be
+   annotated with the CFI hint macros in asm/undwarf.h.
 
 
-6. asm_file.o: warning: objtool: func()+0x26: sibling call from callable instruction with changed frame pointer
+6. file.o: warning: objtool: func()+0x26: sibling call from callable instruction with modified stack frame
 
-   This is a dynamic jump or a jump to an undefined symbol.  Stacktool
+   This is a dynamic jump or a jump to an undefined symbol.  Objtool
    assumed it's a sibling call and detected that the frame pointer
    wasn't first restored to its original state.
 
@@ -282,24 +271,28 @@
    destination code to the local file.
 
    If the instruction is not actually in a callable function (e.g.
-   kernel entry code), change ENDPROC to END.
+   kernel entry code), change ENDPROC to END and annotate manually with
+   the CFI hint macros in asm/undwarf.h.
 
 
-7. asm_file: warning: objtool: func()+0x5c: frame pointer state mismatch
+7. file: warning: objtool: func()+0x5c: stack state mismatch
 
    The instruction's frame pointer state is inconsistent, depending on
    which execution path was taken to reach the instruction.
 
-   Make sure the function pushes and sets up the frame pointer (for
-   x86_64, this means rbp) at the beginning of the function and pops it
-   at the end of the function.  Also make sure that no other code in the
-   function touches the frame pointer.
+   Make sure that, when CONFIG_FRAME_POINTER is enabled, the function
+   pushes and sets up the frame pointer (for x86_64, this means rbp) at
+   the beginning of the function and pops it at the end of the function.
+   Also make sure that no other code in the function touches the frame
+   pointer.
+
+   Another possibility is that the code has some asm or inline asm which
+   does some unusual things to the stack or the frame pointer.  In such
+   cases it's probably appropriate to use the CFI hint macros in
+   asm/undwarf.h.
 
 
-Errors in .c files
-------------------
-
-1. c_file.o: warning: objtool: funcA() falls through to next function funcB()
+8. file.o: warning: objtool: funcA() falls through to next function funcB()
 
    This means that funcA() doesn't end with a return instruction or an
    unconditional jump, and that objtool has determined that the function
@@ -318,22 +311,6 @@
       might be corrupt due to a gcc bug.  For more details, see:
       https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
 
-2. If you're getting any other objtool error in a compiled .c file, it
-   may be because the file uses an asm() statement which has a "call"
-   instruction.  An asm() statement with a call instruction must declare
-   the use of the stack pointer in its output operand.  For example, on
-   x86_64:
-
-     register void *__sp asm("rsp");
-     asm volatile("call func" : "+r" (__sp));
-
-   Otherwise the stack frame may not get created before the call.
-
-3. Another possible cause for errors in C code is if the Makefile removes
-   -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
-
-Also see the above section for .S file errors for more information what
-the individual error messages mean.
 
 If the error doesn't seem to make sense, it could be a bug in objtool.
 Feel free to ask the objtool maintainer for help.
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 27e019c..0e2765e 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -25,7 +25,7 @@
 all: $(OBJTOOL)
 
 INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
-CFLAGS   += -Wall -Werror $(EXTRA_WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
+CFLAGS   += -Wall -Werror $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -fomit-frame-pointer -O2 -g $(INCLUDES)
 LDFLAGS  += -lelf $(LIBSUBCMD)
 
 # Allow old libelf to be used:
diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h
index a59e061..21aeca8 100644
--- a/tools/objtool/arch.h
+++ b/tools/objtool/arch.h
@@ -19,25 +19,63 @@
 #define _ARCH_H
 
 #include <stdbool.h>
+#include <linux/list.h>
 #include "elf.h"
+#include "cfi.h"
 
-#define INSN_FP_SAVE		1
-#define INSN_FP_SETUP		2
-#define INSN_FP_RESTORE		3
-#define INSN_JUMP_CONDITIONAL	4
-#define INSN_JUMP_UNCONDITIONAL	5
-#define INSN_JUMP_DYNAMIC	6
-#define INSN_CALL		7
-#define INSN_CALL_DYNAMIC	8
-#define INSN_RETURN		9
-#define INSN_CONTEXT_SWITCH	10
-#define INSN_NOP		11
-#define INSN_OTHER		12
+#define INSN_JUMP_CONDITIONAL	1
+#define INSN_JUMP_UNCONDITIONAL	2
+#define INSN_JUMP_DYNAMIC	3
+#define INSN_CALL		4
+#define INSN_CALL_DYNAMIC	5
+#define INSN_RETURN		6
+#define INSN_CONTEXT_SWITCH	7
+#define INSN_STACK		8
+#define INSN_NOP		9
+#define INSN_OTHER		10
 #define INSN_LAST		INSN_OTHER
 
+enum op_dest_type {
+	OP_DEST_REG,
+	OP_DEST_REG_INDIRECT,
+	OP_DEST_MEM,
+	OP_DEST_PUSH,
+	OP_DEST_LEAVE,
+};
+
+struct op_dest {
+	enum op_dest_type type;
+	unsigned char reg;
+	int offset;
+};
+
+enum op_src_type {
+	OP_SRC_REG,
+	OP_SRC_REG_INDIRECT,
+	OP_SRC_CONST,
+	OP_SRC_POP,
+	OP_SRC_ADD,
+	OP_SRC_AND,
+};
+
+struct op_src {
+	enum op_src_type type;
+	unsigned char reg;
+	int offset;
+};
+
+struct stack_op {
+	struct op_dest dest;
+	struct op_src src;
+};
+
+void arch_initial_func_cfi_state(struct cfi_state *state);
+
 int arch_decode_instruction(struct elf *elf, struct section *sec,
 			    unsigned long offset, unsigned int maxlen,
 			    unsigned int *len, unsigned char *type,
-			    unsigned long *displacement);
+			    unsigned long *immediate, struct stack_op *op);
+
+bool arch_callee_saved_reg(unsigned char reg);
 
 #endif /* _ARCH_H */
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 6ac99e3..a36c2eb 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -27,6 +27,17 @@
 #include "../../arch.h"
 #include "../../warn.h"
 
+static unsigned char op_to_cfi_reg[][2] = {
+	{CFI_AX, CFI_R8},
+	{CFI_CX, CFI_R9},
+	{CFI_DX, CFI_R10},
+	{CFI_BX, CFI_R11},
+	{CFI_SP, CFI_R12},
+	{CFI_BP, CFI_R13},
+	{CFI_SI, CFI_R14},
+	{CFI_DI, CFI_R15},
+};
+
 static int is_x86_64(struct elf *elf)
 {
 	switch (elf->ehdr.e_machine) {
@@ -40,24 +51,50 @@ static int is_x86_64(struct elf *elf)
 	}
 }
 
+bool arch_callee_saved_reg(unsigned char reg)
+{
+	switch (reg) {
+	case CFI_BP:
+	case CFI_BX:
+	case CFI_R12:
+	case CFI_R13:
+	case CFI_R14:
+	case CFI_R15:
+		return true;
+
+	case CFI_AX:
+	case CFI_CX:
+	case CFI_DX:
+	case CFI_SI:
+	case CFI_DI:
+	case CFI_SP:
+	case CFI_R8:
+	case CFI_R9:
+	case CFI_R10:
+	case CFI_R11:
+	case CFI_RA:
+	default:
+		return false;
+	}
+}
+
 int arch_decode_instruction(struct elf *elf, struct section *sec,
 			    unsigned long offset, unsigned int maxlen,
 			    unsigned int *len, unsigned char *type,
-			    unsigned long *immediate)
+			    unsigned long *immediate, struct stack_op *op)
 {
 	struct insn insn;
-	int x86_64;
-	unsigned char op1, op2, ext;
+	int x86_64, sign;
+	unsigned char op1, op2, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0,
+		      modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0,
+		      sib = 0;
 
 	x86_64 = is_x86_64(elf);
 	if (x86_64 == -1)
 		return -1;
 
-	insn_init(&insn, (void *)(sec->data + offset), maxlen, x86_64);
+	insn_init(&insn, sec->data->d_buf + offset, maxlen, x86_64);
 	insn_get_length(&insn);
-	insn_get_opcode(&insn);
-	insn_get_modrm(&insn);
-	insn_get_immediate(&insn);
 
 	if (!insn_complete(&insn)) {
 		WARN_FUNC("can't decode instruction", sec, offset);
@@ -73,67 +110,323 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
 	op1 = insn.opcode.bytes[0];
 	op2 = insn.opcode.bytes[1];
 
+	if (insn.rex_prefix.nbytes) {
+		rex = insn.rex_prefix.bytes[0];
+		rex_w = X86_REX_W(rex) >> 3;
+		rex_r = X86_REX_R(rex) >> 2;
+		rex_b = X86_REX_B(rex);
+	}
+
+	if (insn.modrm.nbytes) {
+		modrm = insn.modrm.bytes[0];
+		modrm_mod = X86_MODRM_MOD(modrm);
+		modrm_reg = X86_MODRM_REG(modrm);
+		modrm_rm = X86_MODRM_RM(modrm);
+	}
+
+	if (insn.sib.nbytes)
+		sib = insn.sib.bytes[0];
+
 	switch (op1) {
-	case 0x55:
-		if (!insn.rex_prefix.nbytes)
-			/* push rbp */
-			*type = INSN_FP_SAVE;
+
+	case 0x1:
+	case 0x29:
+		if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+
+			/* add/sub reg, %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_SRC_REG;
+			op->dest.reg = CFI_SP;
+		}
 		break;
 
-	case 0x5d:
-		if (!insn.rex_prefix.nbytes)
-			/* pop rbp */
-			*type = INSN_FP_RESTORE;
+	case 0x50 ... 0x57:
+
+		/* push reg */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_REG;
+		op->src.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+		op->dest.type = OP_DEST_PUSH;
+
+		break;
+
+	case 0x58 ... 0x5f:
+
+		/* pop reg */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_POP;
+		op->dest.type = OP_DEST_REG;
+		op->dest.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+
+		break;
+
+	case 0x68:
+	case 0x6a:
+		/* push immediate */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_CONST;
+		op->dest.type = OP_DEST_PUSH;
 		break;
 
 	case 0x70 ... 0x7f:
 		*type = INSN_JUMP_CONDITIONAL;
 		break;
 
+	case 0x81:
+	case 0x83:
+		if (rex != 0x48)
+			break;
+
+		if (modrm == 0xe4) {
+			/* and imm, %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_AND;
+			op->src.reg = CFI_SP;
+			op->src.offset = insn.immediate.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+			break;
+		}
+
+		if (modrm == 0xc4)
+			sign = 1;
+		else if (modrm == 0xec)
+			sign = -1;
+		else
+			break;
+
+		/* add/sub imm, %rsp */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_ADD;
+		op->src.reg = CFI_SP;
+		op->src.offset = insn.immediate.value * sign;
+		op->dest.type = OP_DEST_REG;
+		op->dest.reg = CFI_SP;
+		break;
+
 	case 0x89:
-		if (insn.rex_prefix.nbytes == 1 &&
-		    insn.rex_prefix.bytes[0] == 0x48 &&
-		    insn.modrm.nbytes && insn.modrm.bytes[0] == 0xe5)
-			/* mov rsp, rbp */
-			*type = INSN_FP_SETUP;
+		if (rex == 0x48 && modrm == 0xe5) {
+
+			/* mov %rsp, %rbp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = CFI_SP;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_BP;
+			break;
+		}
+		/* fallthrough */
+	case 0x88:
+		if (!rex_b &&
+		    (modrm_mod == 1 || modrm_mod == 2) && modrm_rm == 5) {
+
+			/* mov reg, disp(%rbp) */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG_INDIRECT;
+			op->dest.reg = CFI_BP;
+			op->dest.offset = insn.displacement.value;
+
+		} else if (rex_w && !rex_b && modrm_rm == 4 && sib == 0x24) {
+
+			/* mov reg, disp(%rsp) */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG;
+			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
+			op->dest.type = OP_DEST_REG_INDIRECT;
+			op->dest.reg = CFI_SP;
+			op->dest.offset = insn.displacement.value;
+		}
+
+		break;
+
+	case 0x8b:
+		if (rex_w && !rex_b && modrm_mod == 1 && modrm_rm == 5) {
+
+			/* mov disp(%rbp), reg */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG_INDIRECT;
+			op->src.reg = CFI_BP;
+			op->src.offset = insn.displacement.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+
+		} else if (rex_w && !rex_b && sib == 0x24 &&
+			   modrm_mod != 3 && modrm_rm == 4) {
+
+			/* mov disp(%rsp), reg */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_REG_INDIRECT;
+			op->src.reg = CFI_SP;
+			op->src.offset = insn.displacement.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+		}
+
 		break;
 
 	case 0x8d:
-		if (insn.rex_prefix.nbytes &&
-		    insn.rex_prefix.bytes[0] == 0x48 &&
-		    insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
-		    insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
-			/* lea %(rsp), %rbp */
-			*type = INSN_FP_SETUP;
+		if (rex == 0x48 && modrm == 0x65) {
+
+			/* lea -disp(%rbp), %rsp */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_BP;
+			op->src.offset = insn.displacement.value;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+			break;
+		}
+
+		if (rex == 0x4c && modrm == 0x54 && sib == 0x24 &&
+		    insn.displacement.value == 8) {
+
+			/*
+			 * lea 0x8(%rsp), %r10
+			 *
+			 * Here r10 is the "drap" pointer, used as a stack
+			 * pointer helper when the stack gets realigned.
+			 */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_SP;
+			op->src.offset = 8;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_R10;
+			break;
+		}
+
+		if (rex == 0x4c && modrm == 0x6c && sib == 0x24 &&
+		    insn.displacement.value == 16) {
+
+			/*
+			 * lea 0x10(%rsp), %r13
+			 *
+			 * Here r13 is the "drap" pointer, used as a stack
+			 * pointer helper when the stack gets realigned.
+			 */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_SP;
+			op->src.offset = 16;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_R13;
+			break;
+		}
+
+		if (rex == 0x49 && modrm == 0x62 &&
+		    insn.displacement.value == -8) {
+
+			/*
+			 * lea -0x8(%r10), %rsp
+			 *
+			 * Restoring rsp back to its original value after a
+			 * stack realignment.
+			 */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_R10;
+			op->src.offset = -8;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+			break;
+		}
+
+		if (rex == 0x49 && modrm == 0x65 &&
+		    insn.displacement.value == -16) {
+
+			/*
+			 * lea -0x10(%r13), %rsp
+			 *
+			 * Restoring rsp back to its original value after a
+			 * stack realignment.
+			 */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_ADD;
+			op->src.reg = CFI_R13;
+			op->src.offset = -16;
+			op->dest.type = OP_DEST_REG;
+			op->dest.reg = CFI_SP;
+			break;
+		}
+
+		break;
+
+	case 0x8f:
+		/* pop to mem */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_POP;
+		op->dest.type = OP_DEST_MEM;
 		break;
 
 	case 0x90:
 		*type = INSN_NOP;
 		break;
 
+	case 0x9c:
+		/* pushf */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_CONST;
+		op->dest.type = OP_DEST_PUSH;
+		break;
+
+	case 0x9d:
+		/* popf */
+		*type = INSN_STACK;
+		op->src.type = OP_SRC_POP;
+		op->dest.type = OP_DEST_MEM;
+		break;
+
 	case 0x0f:
+
 		if (op2 >= 0x80 && op2 <= 0x8f)
 			*type = INSN_JUMP_CONDITIONAL;
 		else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
 			 op2 == 0x35)
+
 			/* sysenter, sysret */
 			*type = INSN_CONTEXT_SWITCH;
+
 		else if (op2 == 0x0d || op2 == 0x1f)
+
 			/* nopl/nopw */
 			*type = INSN_NOP;
-		else if (op2 == 0x01 && insn.modrm.nbytes &&
-			 (insn.modrm.bytes[0] == 0xc2 ||
-			  insn.modrm.bytes[0] == 0xd8))
-			/* vmlaunch, vmrun */
-			*type = INSN_CONTEXT_SWITCH;
+
+		else if (op2 == 0xa0 || op2 == 0xa8) {
+
+			/* push fs/gs */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_CONST;
+			op->dest.type = OP_DEST_PUSH;
+
+		} else if (op2 == 0xa1 || op2 == 0xa9) {
+
+			/* pop fs/gs */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_POP;
+			op->dest.type = OP_DEST_MEM;
+		}
 
 		break;
 
-	case 0xc9: /* leave */
-		*type = INSN_FP_RESTORE;
+	case 0xc9:
+		/*
+		 * leave
+		 *
+		 * equivalent to:
+		 * mov bp, sp
+		 * pop bp
+		 */
+		*type = INSN_STACK;
+		op->dest.type = OP_DEST_LEAVE;
+
 		break;
 
-	case 0xe3: /* jecxz/jrcxz */
+	case 0xe3:
+		/* jecxz/jrcxz */
 		*type = INSN_JUMP_CONDITIONAL;
 		break;
 
@@ -158,14 +451,27 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
 		break;
 
 	case 0xff:
-		ext = X86_MODRM_REG(insn.modrm.bytes[0]);
-		if (ext == 2 || ext == 3)
+		if (modrm_reg == 2 || modrm_reg == 3)
+
 			*type = INSN_CALL_DYNAMIC;
-		else if (ext == 4)
+
+		else if (modrm_reg == 4)
+
 			*type = INSN_JUMP_DYNAMIC;
-		else if (ext == 5) /*jmpf */
+
+		else if (modrm_reg == 5)
+
+			/* jmpf */
 			*type = INSN_CONTEXT_SWITCH;
 
+		else if (modrm_reg == 6) {
+
+			/* push from mem */
+			*type = INSN_STACK;
+			op->src.type = OP_SRC_CONST;
+			op->dest.type = OP_DEST_PUSH;
+		}
+
 		break;
 
 	default:
@@ -176,3 +482,21 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
 
 	return 0;
 }
+
+void arch_initial_func_cfi_state(struct cfi_state *state)
+{
+	int i;
+
+	for (i = 0; i < CFI_NUM_REGS; i++) {
+		state->regs[i].base = CFI_UNDEFINED;
+		state->regs[i].offset = 0;
+	}
+
+	/* initial CFA (call frame address) */
+	state->cfa.base = CFI_SP;
+	state->cfa.offset = 8;
+
+	/* initial RA (return address) */
+	state->regs[16].base = CFI_CFA;
+	state->regs[16].offset = -8;
+}
diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
index 767be7c..12e3771 100644
--- a/tools/objtool/arch/x86/insn/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
@@ -1009,7 +1009,7 @@
 1: fxstor | RDGSBASE Ry (F3),(11B)
 2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
-4: XSAVE
+4: XSAVE | ptwrite Ey (F3),(11B)
 5: XRSTOR | lfence (11B)
 6: XSAVEOPT | clwb (66) | mfence (11B)
 7: clflush | clflushopt (66) | sfence (11B)
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 282a603..365c34e 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -25,1286 +25,32 @@
  * For more information, see tools/objtool/Documentation/stack-validation.txt.
  */
 
-#include <string.h>
-#include <stdlib.h>
 #include <subcmd/parse-options.h>
-
 #include "builtin.h"
-#include "elf.h"
-#include "special.h"
-#include "arch.h"
-#include "warn.h"
+#include "check.h"
 
-#include <linux/hashtable.h>
-#include <linux/kernel.h>
+bool nofp;
 
-#define STATE_FP_SAVED		0x1
-#define STATE_FP_SETUP		0x2
-#define STATE_FENTRY		0x4
-
-struct instruction {
-	struct list_head list;
-	struct hlist_node hash;
-	struct section *sec;
-	unsigned long offset;
-	unsigned int len, state;
-	unsigned char type;
-	unsigned long immediate;
-	bool alt_group, visited, dead_end;
-	struct symbol *call_dest;
-	struct instruction *jump_dest;
-	struct list_head alts;
-	struct symbol *func;
-};
-
-struct alternative {
-	struct list_head list;
-	struct instruction *insn;
-};
-
-struct objtool_file {
-	struct elf *elf;
-	struct list_head insn_list;
-	DECLARE_HASHTABLE(insn_hash, 16);
-	struct section *rodata, *whitelist;
-	bool ignore_unreachables, c_file;
-};
-
-const char *objname;
-static bool nofp;
-
-static struct instruction *find_insn(struct objtool_file *file,
-				     struct section *sec, unsigned long offset)
-{
-	struct instruction *insn;
-
-	hash_for_each_possible(file->insn_hash, insn, hash, offset)
-		if (insn->sec == sec && insn->offset == offset)
-			return insn;
-
-	return NULL;
-}
-
-static struct instruction *next_insn_same_sec(struct objtool_file *file,
-					      struct instruction *insn)
-{
-	struct instruction *next = list_next_entry(insn, list);
-
-	if (&next->list == &file->insn_list || next->sec != insn->sec)
-		return NULL;
-
-	return next;
-}
-
-static bool gcov_enabled(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *sym;
-
-	list_for_each_entry(sec, &file->elf->sections, list)
-		list_for_each_entry(sym, &sec->symbol_list, list)
-			if (!strncmp(sym->name, "__gcov_.", 8))
-				return true;
-
-	return false;
-}
-
-#define for_each_insn(file, insn)					\
-	list_for_each_entry(insn, &file->insn_list, list)
-
-#define func_for_each_insn(file, func, insn)				\
-	for (insn = find_insn(file, func->sec, func->offset);		\
-	     insn && &insn->list != &file->insn_list &&			\
-		insn->sec == func->sec &&				\
-		insn->offset < func->offset + func->len;		\
-	     insn = list_next_entry(insn, list))
-
-#define func_for_each_insn_continue_reverse(file, func, insn)		\
-	for (insn = list_prev_entry(insn, list);			\
-	     &insn->list != &file->insn_list &&				\
-		insn->sec == func->sec && insn->offset >= func->offset;	\
-	     insn = list_prev_entry(insn, list))
-
-#define sec_for_each_insn_from(file, insn)				\
-	for (; insn; insn = next_insn_same_sec(file, insn))
-
-
-/*
- * Check if the function has been manually whitelisted with the
- * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
- * due to its use of a context switching instruction.
- */
-static bool ignore_func(struct objtool_file *file, struct symbol *func)
-{
-	struct rela *rela;
-	struct instruction *insn;
-
-	/* check for STACK_FRAME_NON_STANDARD */
-	if (file->whitelist && file->whitelist->rela)
-		list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
-			if (rela->sym->type == STT_SECTION &&
-			    rela->sym->sec == func->sec &&
-			    rela->addend == func->offset)
-				return true;
-			if (rela->sym->type == STT_FUNC && rela->sym == func)
-				return true;
-		}
-
-	/* check if it has a context switching instruction */
-	func_for_each_insn(file, func, insn)
-		if (insn->type == INSN_CONTEXT_SWITCH)
-			return true;
-
-	return false;
-}
-
-/*
- * This checks to see if the given function is a "noreturn" function.
- *
- * For global functions which are outside the scope of this object file, we
- * have to keep a manual list of them.
- *
- * For local functions, we have to detect them manually by simply looking for
- * the lack of a return instruction.
- *
- * Returns:
- *  -1: error
- *   0: no dead end
- *   1: dead end
- */
-static int __dead_end_function(struct objtool_file *file, struct symbol *func,
-			       int recursion)
-{
-	int i;
-	struct instruction *insn;
-	bool empty = true;
-
-	/*
-	 * Unfortunately these have to be hard coded because the noreturn
-	 * attribute isn't provided in ELF data.
-	 */
-	static const char * const global_noreturns[] = {
-		"__stack_chk_fail",
-		"panic",
-		"do_exit",
-		"do_task_dead",
-		"__module_put_and_exit",
-		"complete_and_exit",
-		"kvm_spurious_fault",
-		"__reiserfs_panic",
-		"lbug_with_loc"
-	};
-
-	if (func->bind == STB_WEAK)
-		return 0;
-
-	if (func->bind == STB_GLOBAL)
-		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
-			if (!strcmp(func->name, global_noreturns[i]))
-				return 1;
-
-	if (!func->sec)
-		return 0;
-
-	func_for_each_insn(file, func, insn) {
-		empty = false;
-
-		if (insn->type == INSN_RETURN)
-			return 0;
-	}
-
-	if (empty)
-		return 0;
-
-	/*
-	 * A function can have a sibling call instead of a return.  In that
-	 * case, the function's dead-end status depends on whether the target
-	 * of the sibling call returns.
-	 */
-	func_for_each_insn(file, func, insn) {
-		if (insn->sec != func->sec ||
-		    insn->offset >= func->offset + func->len)
-			break;
-
-		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
-			struct instruction *dest = insn->jump_dest;
-			struct symbol *dest_func;
-
-			if (!dest)
-				/* sibling call to another file */
-				return 0;
-
-			if (dest->sec != func->sec ||
-			    dest->offset < func->offset ||
-			    dest->offset >= func->offset + func->len) {
-				/* local sibling call */
-				dest_func = find_symbol_by_offset(dest->sec,
-								  dest->offset);
-				if (!dest_func)
-					continue;
-
-				if (recursion == 5) {
-					WARN_FUNC("infinite recursion (objtool bug!)",
-						  dest->sec, dest->offset);
-					return -1;
-				}
-
-				return __dead_end_function(file, dest_func,
-							   recursion + 1);
-			}
-		}
-
-		if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
-			/* sibling call */
-			return 0;
-	}
-
-	return 1;
-}
-
-static int dead_end_function(struct objtool_file *file, struct symbol *func)
-{
-	return __dead_end_function(file, func, 0);
-}
-
-/*
- * Call the arch-specific instruction decoder for all the instructions and add
- * them to the global instruction list.
- */
-static int decode_instructions(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *func;
-	unsigned long offset;
-	struct instruction *insn;
-	int ret;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-
-		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
-			continue;
-
-		for (offset = 0; offset < sec->len; offset += insn->len) {
-			insn = malloc(sizeof(*insn));
-			memset(insn, 0, sizeof(*insn));
-
-			INIT_LIST_HEAD(&insn->alts);
-			insn->sec = sec;
-			insn->offset = offset;
-
-			ret = arch_decode_instruction(file->elf, sec, offset,
-						      sec->len - offset,
-						      &insn->len, &insn->type,
-						      &insn->immediate);
-			if (ret)
-				return ret;
-
-			if (!insn->type || insn->type > INSN_LAST) {
-				WARN_FUNC("invalid instruction type %d",
-					  insn->sec, insn->offset, insn->type);
-				return -1;
-			}
-
-			hash_add(file->insn_hash, &insn->hash, insn->offset);
-			list_add_tail(&insn->list, &file->insn_list);
-		}
-
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			if (!find_insn(file, sec, func->offset)) {
-				WARN("%s(): can't find starting instruction",
-				     func->name);
-				return -1;
-			}
-
-			func_for_each_insn(file, func, insn)
-				if (!insn->func)
-					insn->func = func;
-		}
-	}
-
-	return 0;
-}
-
-/*
- * Find all uses of the unreachable() macro, which are code path dead ends.
- */
-static int add_dead_ends(struct objtool_file *file)
-{
-	struct section *sec;
-	struct rela *rela;
-	struct instruction *insn;
-	bool found;
-
-	sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
-	if (!sec)
-		return 0;
-
-	list_for_each_entry(rela, &sec->rela_list, list) {
-		if (rela->sym->type != STT_SECTION) {
-			WARN("unexpected relocation symbol type in %s", sec->name);
-			return -1;
-		}
-		insn = find_insn(file, rela->sym->sec, rela->addend);
-		if (insn)
-			insn = list_prev_entry(insn, list);
-		else if (rela->addend == rela->sym->sec->len) {
-			found = false;
-			list_for_each_entry_reverse(insn, &file->insn_list, list) {
-				if (insn->sec == rela->sym->sec) {
-					found = true;
-					break;
-				}
-			}
-
-			if (!found) {
-				WARN("can't find unreachable insn at %s+0x%x",
-				     rela->sym->sec->name, rela->addend);
-				return -1;
-			}
-		} else {
-			WARN("can't find unreachable insn at %s+0x%x",
-			     rela->sym->sec->name, rela->addend);
-			return -1;
-		}
-
-		insn->dead_end = true;
-	}
-
-	return 0;
-}
-
-/*
- * Warnings shouldn't be reported for ignored functions.
- */
-static void add_ignores(struct objtool_file *file)
-{
-	struct instruction *insn;
-	struct section *sec;
-	struct symbol *func;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			if (!ignore_func(file, func))
-				continue;
-
-			func_for_each_insn(file, func, insn)
-				insn->visited = true;
-		}
-	}
-}
-
-/*
- * Find the destination instructions for all jumps.
- */
-static int add_jump_destinations(struct objtool_file *file)
-{
-	struct instruction *insn;
-	struct rela *rela;
-	struct section *dest_sec;
-	unsigned long dest_off;
-
-	for_each_insn(file, insn) {
-		if (insn->type != INSN_JUMP_CONDITIONAL &&
-		    insn->type != INSN_JUMP_UNCONDITIONAL)
-			continue;
-
-		/* skip ignores */
-		if (insn->visited)
-			continue;
-
-		rela = find_rela_by_dest_range(insn->sec, insn->offset,
-					       insn->len);
-		if (!rela) {
-			dest_sec = insn->sec;
-			dest_off = insn->offset + insn->len + insn->immediate;
-		} else if (rela->sym->type == STT_SECTION) {
-			dest_sec = rela->sym->sec;
-			dest_off = rela->addend + 4;
-		} else if (rela->sym->sec->idx) {
-			dest_sec = rela->sym->sec;
-			dest_off = rela->sym->sym.st_value + rela->addend + 4;
-		} else {
-			/* sibling call */
-			insn->jump_dest = 0;
-			continue;
-		}
-
-		insn->jump_dest = find_insn(file, dest_sec, dest_off);
-		if (!insn->jump_dest) {
-
-			/*
-			 * This is a special case where an alt instruction
-			 * jumps past the end of the section.  These are
-			 * handled later in handle_group_alt().
-			 */
-			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
-				continue;
-
-			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
-				  insn->sec, insn->offset, dest_sec->name,
-				  dest_off);
-			return -1;
-		}
-	}
-
-	return 0;
-}
-
-/*
- * Find the destination instructions for all calls.
- */
-static int add_call_destinations(struct objtool_file *file)
-{
-	struct instruction *insn;
-	unsigned long dest_off;
-	struct rela *rela;
-
-	for_each_insn(file, insn) {
-		if (insn->type != INSN_CALL)
-			continue;
-
-		rela = find_rela_by_dest_range(insn->sec, insn->offset,
-					       insn->len);
-		if (!rela) {
-			dest_off = insn->offset + insn->len + insn->immediate;
-			insn->call_dest = find_symbol_by_offset(insn->sec,
-								dest_off);
-			if (!insn->call_dest) {
-				WARN_FUNC("can't find call dest symbol at offset 0x%lx",
-					  insn->sec, insn->offset, dest_off);
-				return -1;
-			}
-		} else if (rela->sym->type == STT_SECTION) {
-			insn->call_dest = find_symbol_by_offset(rela->sym->sec,
-								rela->addend+4);
-			if (!insn->call_dest ||
-			    insn->call_dest->type != STT_FUNC) {
-				WARN_FUNC("can't find call dest symbol at %s+0x%x",
-					  insn->sec, insn->offset,
-					  rela->sym->sec->name,
-					  rela->addend + 4);
-				return -1;
-			}
-		} else
-			insn->call_dest = rela->sym;
-	}
-
-	return 0;
-}
-
-/*
- * The .alternatives section requires some extra special care, over and above
- * what other special sections require:
- *
- * 1. Because alternatives are patched in-place, we need to insert a fake jump
- *    instruction at the end so that validate_branch() skips all the original
- *    replaced instructions when validating the new instruction path.
- *
- * 2. An added wrinkle is that the new instruction length might be zero.  In
- *    that case the old instructions are replaced with noops.  We simulate that
- *    by creating a fake jump as the only new instruction.
- *
- * 3. In some cases, the alternative section includes an instruction which
- *    conditionally jumps to the _end_ of the entry.  We have to modify these
- *    jumps' destinations to point back to .text rather than the end of the
- *    entry in .altinstr_replacement.
- *
- * 4. It has been requested that we don't validate the !POPCNT feature path
- *    which is a "very very small percentage of machines".
- */
-static int handle_group_alt(struct objtool_file *file,
-			    struct special_alt *special_alt,
-			    struct instruction *orig_insn,
-			    struct instruction **new_insn)
-{
-	struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump;
-	unsigned long dest_off;
-
-	last_orig_insn = NULL;
-	insn = orig_insn;
-	sec_for_each_insn_from(file, insn) {
-		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
-			break;
-
-		if (special_alt->skip_orig)
-			insn->type = INSN_NOP;
-
-		insn->alt_group = true;
-		last_orig_insn = insn;
-	}
-
-	if (!next_insn_same_sec(file, last_orig_insn)) {
-		WARN("%s: don't know how to handle alternatives at end of section",
-		     special_alt->orig_sec->name);
-		return -1;
-	}
-
-	fake_jump = malloc(sizeof(*fake_jump));
-	if (!fake_jump) {
-		WARN("malloc failed");
-		return -1;
-	}
-	memset(fake_jump, 0, sizeof(*fake_jump));
-	INIT_LIST_HEAD(&fake_jump->alts);
-	fake_jump->sec = special_alt->new_sec;
-	fake_jump->offset = -1;
-	fake_jump->type = INSN_JUMP_UNCONDITIONAL;
-	fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
-
-	if (!special_alt->new_len) {
-		*new_insn = fake_jump;
-		return 0;
-	}
-
-	last_new_insn = NULL;
-	insn = *new_insn;
-	sec_for_each_insn_from(file, insn) {
-		if (insn->offset >= special_alt->new_off + special_alt->new_len)
-			break;
-
-		last_new_insn = insn;
-
-		if (insn->type != INSN_JUMP_CONDITIONAL &&
-		    insn->type != INSN_JUMP_UNCONDITIONAL)
-			continue;
-
-		if (!insn->immediate)
-			continue;
-
-		dest_off = insn->offset + insn->len + insn->immediate;
-		if (dest_off == special_alt->new_off + special_alt->new_len)
-			insn->jump_dest = fake_jump;
-
-		if (!insn->jump_dest) {
-			WARN_FUNC("can't find alternative jump destination",
-				  insn->sec, insn->offset);
-			return -1;
-		}
-	}
-
-	if (!last_new_insn) {
-		WARN_FUNC("can't find last new alternative instruction",
-			  special_alt->new_sec, special_alt->new_off);
-		return -1;
-	}
-
-	list_add(&fake_jump->list, &last_new_insn->list);
-
-	return 0;
-}
-
-/*
- * A jump table entry can either convert a nop to a jump or a jump to a nop.
- * If the original instruction is a jump, make the alt entry an effective nop
- * by just skipping the original instruction.
- */
-static int handle_jump_alt(struct objtool_file *file,
-			   struct special_alt *special_alt,
-			   struct instruction *orig_insn,
-			   struct instruction **new_insn)
-{
-	if (orig_insn->type == INSN_NOP)
-		return 0;
-
-	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
-		WARN_FUNC("unsupported instruction at jump label",
-			  orig_insn->sec, orig_insn->offset);
-		return -1;
-	}
-
-	*new_insn = list_next_entry(orig_insn, list);
-	return 0;
-}
-
-/*
- * Read all the special sections which have alternate instructions which can be
- * patched in or redirected to at runtime.  Each instruction having alternate
- * instruction(s) has them added to its insn->alts list, which will be
- * traversed in validate_branch().
- */
-static int add_special_section_alts(struct objtool_file *file)
-{
-	struct list_head special_alts;
-	struct instruction *orig_insn, *new_insn;
-	struct special_alt *special_alt, *tmp;
-	struct alternative *alt;
-	int ret;
-
-	ret = special_get_alts(file->elf, &special_alts);
-	if (ret)
-		return ret;
-
-	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
-		alt = malloc(sizeof(*alt));
-		if (!alt) {
-			WARN("malloc failed");
-			ret = -1;
-			goto out;
-		}
-
-		orig_insn = find_insn(file, special_alt->orig_sec,
-				      special_alt->orig_off);
-		if (!orig_insn) {
-			WARN_FUNC("special: can't find orig instruction",
-				  special_alt->orig_sec, special_alt->orig_off);
-			ret = -1;
-			goto out;
-		}
-
-		new_insn = NULL;
-		if (!special_alt->group || special_alt->new_len) {
-			new_insn = find_insn(file, special_alt->new_sec,
-					     special_alt->new_off);
-			if (!new_insn) {
-				WARN_FUNC("special: can't find new instruction",
-					  special_alt->new_sec,
-					  special_alt->new_off);
-				ret = -1;
-				goto out;
-			}
-		}
-
-		if (special_alt->group) {
-			ret = handle_group_alt(file, special_alt, orig_insn,
-					       &new_insn);
-			if (ret)
-				goto out;
-		} else if (special_alt->jump_or_nop) {
-			ret = handle_jump_alt(file, special_alt, orig_insn,
-					      &new_insn);
-			if (ret)
-				goto out;
-		}
-
-		alt->insn = new_insn;
-		list_add_tail(&alt->list, &orig_insn->alts);
-
-		list_del(&special_alt->list);
-		free(special_alt);
-	}
-
-out:
-	return ret;
-}
-
-static int add_switch_table(struct objtool_file *file, struct symbol *func,
-			    struct instruction *insn, struct rela *table,
-			    struct rela *next_table)
-{
-	struct rela *rela = table;
-	struct instruction *alt_insn;
-	struct alternative *alt;
-
-	list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
-		if (rela == next_table)
-			break;
-
-		if (rela->sym->sec != insn->sec ||
-		    rela->addend <= func->offset ||
-		    rela->addend >= func->offset + func->len)
-			break;
-
-		alt_insn = find_insn(file, insn->sec, rela->addend);
-		if (!alt_insn) {
-			WARN("%s: can't find instruction at %s+0x%x",
-			     file->rodata->rela->name, insn->sec->name,
-			     rela->addend);
-			return -1;
-		}
-
-		alt = malloc(sizeof(*alt));
-		if (!alt) {
-			WARN("malloc failed");
-			return -1;
-		}
-
-		alt->insn = alt_insn;
-		list_add_tail(&alt->list, &insn->alts);
-	}
-
-	return 0;
-}
-
-/*
- * find_switch_table() - Given a dynamic jump, find the switch jump table in
- * .rodata associated with it.
- *
- * There are 3 basic patterns:
- *
- * 1. jmpq *[rodata addr](,%reg,8)
- *
- *    This is the most common case by far.  It jumps to an address in a simple
- *    jump table which is stored in .rodata.
- *
- * 2. jmpq *[rodata addr](%rip)
- *
- *    This is caused by a rare GCC quirk, currently only seen in three driver
- *    functions in the kernel, only with certain obscure non-distro configs.
- *
- *    As part of an optimization, GCC makes a copy of an existing switch jump
- *    table, modifies it, and then hard-codes the jump (albeit with an indirect
- *    jump) to use a single entry in the table.  The rest of the jump table and
- *    some of its jump targets remain as dead code.
- *
- *    In such a case we can just crudely ignore all unreachable instruction
- *    warnings for the entire object file.  Ideally we would just ignore them
- *    for the function, but that would require redesigning the code quite a
- *    bit.  And honestly that's just not worth doing: unreachable instruction
- *    warnings are of questionable value anyway, and this is such a rare issue.
- *
- * 3. mov [rodata addr],%reg1
- *    ... some instructions ...
- *    jmpq *(%reg1,%reg2,8)
- *
- *    This is a fairly uncommon pattern which is new for GCC 6.  As of this
- *    writing, there are 11 occurrences of it in the allmodconfig kernel.
- *
- *    TODO: Once we have DWARF CFI and smarter instruction decoding logic,
- *    ensure the same register is used in the mov and jump instructions.
- */
-static struct rela *find_switch_table(struct objtool_file *file,
-				      struct symbol *func,
-				      struct instruction *insn)
-{
-	struct rela *text_rela, *rodata_rela;
-	struct instruction *orig_insn = insn;
-
-	text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
-	if (text_rela && text_rela->sym == file->rodata->sym) {
-		/* case 1 */
-		rodata_rela = find_rela_by_dest(file->rodata,
-						text_rela->addend);
-		if (rodata_rela)
-			return rodata_rela;
-
-		/* case 2 */
-		rodata_rela = find_rela_by_dest(file->rodata,
-						text_rela->addend + 4);
-		if (!rodata_rela)
-			return NULL;
-		file->ignore_unreachables = true;
-		return rodata_rela;
-	}
-
-	/* case 3 */
-	func_for_each_insn_continue_reverse(file, func, insn) {
-		if (insn->type == INSN_JUMP_DYNAMIC)
-			break;
-
-		/* allow small jumps within the range */
-		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
-		    insn->jump_dest &&
-		    (insn->jump_dest->offset <= insn->offset ||
-		     insn->jump_dest->offset > orig_insn->offset))
-		    break;
-
-		/* look for a relocation which references .rodata */
-		text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
-						    insn->len);
-		if (!text_rela || text_rela->sym != file->rodata->sym)
-			continue;
-
-		/*
-		 * Make sure the .rodata address isn't associated with a
-		 * symbol.  gcc jump tables are anonymous data.
-		 */
-		if (find_symbol_containing(file->rodata, text_rela->addend))
-			continue;
-
-		return find_rela_by_dest(file->rodata, text_rela->addend);
-	}
-
-	return NULL;
-}
-
-static int add_func_switch_tables(struct objtool_file *file,
-				  struct symbol *func)
-{
-	struct instruction *insn, *prev_jump = NULL;
-	struct rela *rela, *prev_rela = NULL;
-	int ret;
-
-	func_for_each_insn(file, func, insn) {
-		if (insn->type != INSN_JUMP_DYNAMIC)
-			continue;
-
-		rela = find_switch_table(file, func, insn);
-		if (!rela)
-			continue;
-
-		/*
-		 * We found a switch table, but we don't know yet how big it
-		 * is.  Don't add it until we reach the end of the function or
-		 * the beginning of another switch table in the same function.
-		 */
-		if (prev_jump) {
-			ret = add_switch_table(file, func, prev_jump, prev_rela,
-					       rela);
-			if (ret)
-				return ret;
-		}
-
-		prev_jump = insn;
-		prev_rela = rela;
-	}
-
-	if (prev_jump) {
-		ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-/*
- * For some switch statements, gcc generates a jump table in the .rodata
- * section which contains a list of addresses within the function to jump to.
- * This finds these jump tables and adds them to the insn->alts lists.
- */
-static int add_switch_table_alts(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *func;
-	int ret;
-
-	if (!file->rodata || !file->rodata->rela)
-		return 0;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			ret = add_func_switch_tables(file, func);
-			if (ret)
-				return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int decode_sections(struct objtool_file *file)
-{
-	int ret;
-
-	ret = decode_instructions(file);
-	if (ret)
-		return ret;
-
-	ret = add_dead_ends(file);
-	if (ret)
-		return ret;
-
-	add_ignores(file);
-
-	ret = add_jump_destinations(file);
-	if (ret)
-		return ret;
-
-	ret = add_call_destinations(file);
-	if (ret)
-		return ret;
-
-	ret = add_special_section_alts(file);
-	if (ret)
-		return ret;
-
-	ret = add_switch_table_alts(file);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static bool is_fentry_call(struct instruction *insn)
-{
-	if (insn->type == INSN_CALL &&
-	    insn->call_dest->type == STT_NOTYPE &&
-	    !strcmp(insn->call_dest->name, "__fentry__"))
-		return true;
-
-	return false;
-}
-
-static bool has_modified_stack_frame(struct instruction *insn)
-{
-	return (insn->state & STATE_FP_SAVED) ||
-	       (insn->state & STATE_FP_SETUP);
-}
-
-static bool has_valid_stack_frame(struct instruction *insn)
-{
-	return (insn->state & STATE_FP_SAVED) &&
-	       (insn->state & STATE_FP_SETUP);
-}
-
-static unsigned int frame_state(unsigned long state)
-{
-	return (state & (STATE_FP_SAVED | STATE_FP_SETUP));
-}
-
-/*
- * Follow the branch starting at the given instruction, and recursively follow
- * any other branches (jumps).  Meanwhile, track the frame pointer state at
- * each instruction and validate all the rules described in
- * tools/objtool/Documentation/stack-validation.txt.
- */
-static int validate_branch(struct objtool_file *file,
-			   struct instruction *first, unsigned char first_state)
-{
-	struct alternative *alt;
-	struct instruction *insn;
-	struct section *sec;
-	struct symbol *func = NULL;
-	unsigned char state;
-	int ret;
-
-	insn = first;
-	sec = insn->sec;
-	state = first_state;
-
-	if (insn->alt_group && list_empty(&insn->alts)) {
-		WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
-			  sec, insn->offset);
-		return 1;
-	}
-
-	while (1) {
-		if (file->c_file && insn->func) {
-			if (func && func != insn->func) {
-				WARN("%s() falls through to next function %s()",
-				     func->name, insn->func->name);
-				return 1;
-			}
-
-			func = insn->func;
-		}
-
-		if (insn->visited) {
-			if (frame_state(insn->state) != frame_state(state)) {
-				WARN_FUNC("frame pointer state mismatch",
-					  sec, insn->offset);
-				return 1;
-			}
-
-			return 0;
-		}
-
-		insn->visited = true;
-		insn->state = state;
-
-		list_for_each_entry(alt, &insn->alts, list) {
-			ret = validate_branch(file, alt->insn, state);
-			if (ret)
-				return 1;
-		}
-
-		switch (insn->type) {
-
-		case INSN_FP_SAVE:
-			if (!nofp) {
-				if (state & STATE_FP_SAVED) {
-					WARN_FUNC("duplicate frame pointer save",
-						  sec, insn->offset);
-					return 1;
-				}
-				state |= STATE_FP_SAVED;
-			}
-			break;
-
-		case INSN_FP_SETUP:
-			if (!nofp) {
-				if (state & STATE_FP_SETUP) {
-					WARN_FUNC("duplicate frame pointer setup",
-						  sec, insn->offset);
-					return 1;
-				}
-				state |= STATE_FP_SETUP;
-			}
-			break;
-
-		case INSN_FP_RESTORE:
-			if (!nofp) {
-				if (has_valid_stack_frame(insn))
-					state &= ~STATE_FP_SETUP;
-
-				state &= ~STATE_FP_SAVED;
-			}
-			break;
-
-		case INSN_RETURN:
-			if (!nofp && has_modified_stack_frame(insn)) {
-				WARN_FUNC("return without frame pointer restore",
-					  sec, insn->offset);
-				return 1;
-			}
-			return 0;
-
-		case INSN_CALL:
-			if (is_fentry_call(insn)) {
-				state |= STATE_FENTRY;
-				break;
-			}
-
-			ret = dead_end_function(file, insn->call_dest);
-			if (ret == 1)
-				return 0;
-			if (ret == -1)
-				return 1;
-
-			/* fallthrough */
-		case INSN_CALL_DYNAMIC:
-			if (!nofp && !has_valid_stack_frame(insn)) {
-				WARN_FUNC("call without frame pointer save/setup",
-					  sec, insn->offset);
-				return 1;
-			}
-			break;
-
-		case INSN_JUMP_CONDITIONAL:
-		case INSN_JUMP_UNCONDITIONAL:
-			if (insn->jump_dest) {
-				ret = validate_branch(file, insn->jump_dest,
-						      state);
-				if (ret)
-					return 1;
-			} else if (has_modified_stack_frame(insn)) {
-				WARN_FUNC("sibling call from callable instruction with changed frame pointer",
-					  sec, insn->offset);
-				return 1;
-			} /* else it's a sibling call */
-
-			if (insn->type == INSN_JUMP_UNCONDITIONAL)
-				return 0;
-
-			break;
-
-		case INSN_JUMP_DYNAMIC:
-			if (list_empty(&insn->alts) &&
-			    has_modified_stack_frame(insn)) {
-				WARN_FUNC("sibling call from callable instruction with changed frame pointer",
-					  sec, insn->offset);
-				return 1;
-			}
-
-			return 0;
-
-		default:
-			break;
-		}
-
-		if (insn->dead_end)
-			return 0;
-
-		insn = next_insn_same_sec(file, insn);
-		if (!insn) {
-			WARN("%s: unexpected end of section", sec->name);
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-static bool is_kasan_insn(struct instruction *insn)
-{
-	return (insn->type == INSN_CALL &&
-		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
-}
-
-static bool is_ubsan_insn(struct instruction *insn)
-{
-	return (insn->type == INSN_CALL &&
-		!strcmp(insn->call_dest->name,
-			"__ubsan_handle_builtin_unreachable"));
-}
-
-static bool ignore_unreachable_insn(struct symbol *func,
-				    struct instruction *insn)
-{
-	int i;
-
-	if (insn->type == INSN_NOP)
-		return true;
-
-	/*
-	 * Check if this (or a subsequent) instruction is related to
-	 * CONFIG_UBSAN or CONFIG_KASAN.
-	 *
-	 * End the search at 5 instructions to avoid going into the weeds.
-	 */
-	for (i = 0; i < 5; i++) {
-
-		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
-			return true;
-
-		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
-			insn = insn->jump_dest;
-			continue;
-		}
-
-		if (insn->offset + insn->len >= func->offset + func->len)
-			break;
-		insn = list_next_entry(insn, list);
-	}
-
-	return false;
-}
-
-static int validate_functions(struct objtool_file *file)
-{
-	struct section *sec;
-	struct symbol *func;
-	struct instruction *insn;
-	int ret, warnings = 0;
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			insn = find_insn(file, sec, func->offset);
-			if (!insn)
-				continue;
-
-			ret = validate_branch(file, insn, 0);
-			warnings += ret;
-		}
-	}
-
-	list_for_each_entry(sec, &file->elf->sections, list) {
-		list_for_each_entry(func, &sec->symbol_list, list) {
-			if (func->type != STT_FUNC)
-				continue;
-
-			func_for_each_insn(file, func, insn) {
-				if (insn->visited)
-					continue;
-
-				insn->visited = true;
-
-				if (file->ignore_unreachables || warnings ||
-				    ignore_unreachable_insn(func, insn))
-					continue;
-
-				/*
-				 * gcov produces a lot of unreachable
-				 * instructions.  If we get an unreachable
-				 * warning and the file has gcov enabled, just
-				 * ignore it, and all other such warnings for
-				 * the file.
-				 */
-				if (!file->ignore_unreachables &&
-				    gcov_enabled(file)) {
-					file->ignore_unreachables = true;
-					continue;
-				}
-
-				WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
-				warnings++;
-			}
-		}
-	}
-
-	return warnings;
-}
-
-static int validate_uncallable_instructions(struct objtool_file *file)
-{
-	struct instruction *insn;
-	int warnings = 0;
-
-	for_each_insn(file, insn) {
-		if (!insn->visited && insn->type == INSN_RETURN) {
-			WARN_FUNC("return instruction outside of a callable function",
-				  insn->sec, insn->offset);
-			warnings++;
-		}
-	}
-
-	return warnings;
-}
-
-static void cleanup(struct objtool_file *file)
-{
-	struct instruction *insn, *tmpinsn;
-	struct alternative *alt, *tmpalt;
-
-	list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
-		list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
-			list_del(&alt->list);
-			free(alt);
-		}
-		list_del(&insn->list);
-		hash_del(&insn->hash);
-		free(insn);
-	}
-	elf_close(file->elf);
-}
-
-const char * const check_usage[] = {
+static const char * const check_usage[] = {
 	"objtool check [<options>] file.o",
 	NULL,
 };
 
+const struct option check_options[] = {
+	OPT_BOOLEAN('f', "no-fp", &nofp, "Skip frame pointer validation"),
+	OPT_END(),
+};
+
 int cmd_check(int argc, const char **argv)
 {
-	struct objtool_file file;
-	int ret, warnings = 0;
+	const char *objname;
 
-	const struct option options[] = {
-		OPT_BOOLEAN('f', "no-fp", &nofp, "Skip frame pointer validation"),
-		OPT_END(),
-	};
-
-	argc = parse_options(argc, argv, options, check_usage, 0);
+	argc = parse_options(argc, argv, check_options, check_usage, 0);
 
 	if (argc != 1)
-		usage_with_options(check_usage, options);
+		usage_with_options(check_usage, check_options);
 
 	objname = argv[0];
 
-	file.elf = elf_open(objname);
-	if (!file.elf) {
-		fprintf(stderr, "error reading elf file %s\n", objname);
-		return 1;
-	}
-
-	INIT_LIST_HEAD(&file.insn_list);
-	hash_init(file.insn_hash);
-	file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
-	file.rodata = find_section_by_name(file.elf, ".rodata");
-	file.ignore_unreachables = false;
-	file.c_file = find_section_by_name(file.elf, ".comment");
-
-	ret = decode_sections(&file);
-	if (ret < 0)
-		goto out;
-	warnings += ret;
-
-	ret = validate_functions(&file);
-	if (ret < 0)
-		goto out;
-	warnings += ret;
-
-	ret = validate_uncallable_instructions(&file);
-	if (ret < 0)
-		goto out;
-	warnings += ret;
-
-out:
-	cleanup(&file);
-
-	/* ignore warnings for now until we get all the code cleaned up */
-	if (ret || warnings)
-		return 0;
-	return 0;
+	return check(objname, nofp);
 }
diff --git a/tools/objtool/cfi.h b/tools/objtool/cfi.h
new file mode 100644
index 0000000..443ab2c
--- /dev/null
+++ b/tools/objtool/cfi.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _OBJTOOL_CFI_H
+#define _OBJTOOL_CFI_H
+
+#define CFI_UNDEFINED		-1
+#define CFI_CFA			-2
+#define CFI_SP_INDIRECT		-3
+#define CFI_BP_INDIRECT		-4
+
+#define CFI_AX			0
+#define CFI_DX			1
+#define CFI_CX			2
+#define CFI_BX			3
+#define CFI_SI			4
+#define CFI_DI			5
+#define CFI_BP			6
+#define CFI_SP			7
+#define CFI_R8			8
+#define CFI_R9			9
+#define CFI_R10			10
+#define CFI_R11			11
+#define CFI_R12			12
+#define CFI_R13			13
+#define CFI_R14			14
+#define CFI_R15			15
+#define CFI_RA			16
+#define CFI_NUM_REGS	17
+
+struct cfi_reg {
+	int base;
+	int offset;
+};
+
+struct cfi_state {
+	struct cfi_reg cfa;
+	struct cfi_reg regs[CFI_NUM_REGS];
+};
+
+#endif /* _OBJTOOL_CFI_H */
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
new file mode 100644
index 0000000..fea2221
--- /dev/null
+++ b/tools/objtool/check.c
@@ -0,0 +1,1655 @@
+/*
+ * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "check.h"
+#include "elf.h"
+#include "special.h"
+#include "arch.h"
+#include "warn.h"
+
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+
+struct alternative {
+	struct list_head list;
+	struct instruction *insn;
+};
+
+const char *objname;
+static bool nofp;
+struct cfi_state initial_func_cfi;
+
+static struct instruction *find_insn(struct objtool_file *file,
+				     struct section *sec, unsigned long offset)
+{
+	struct instruction *insn;
+
+	hash_for_each_possible(file->insn_hash, insn, hash, offset)
+		if (insn->sec == sec && insn->offset == offset)
+			return insn;
+
+	return NULL;
+}
+
+static struct instruction *next_insn_same_sec(struct objtool_file *file,
+					      struct instruction *insn)
+{
+	struct instruction *next = list_next_entry(insn, list);
+
+	if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
+		return NULL;
+
+	return next;
+}
+
+static bool gcov_enabled(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *sym;
+
+	for_each_sec(file, sec)
+		list_for_each_entry(sym, &sec->symbol_list, list)
+			if (!strncmp(sym->name, "__gcov_.", 8))
+				return true;
+
+	return false;
+}
+
+#define func_for_each_insn(file, func, insn)				\
+	for (insn = find_insn(file, func->sec, func->offset);		\
+	     insn && &insn->list != &file->insn_list &&			\
+		insn->sec == func->sec &&				\
+		insn->offset < func->offset + func->len;		\
+	     insn = list_next_entry(insn, list))
+
+#define func_for_each_insn_continue_reverse(file, func, insn)		\
+	for (insn = list_prev_entry(insn, list);			\
+	     &insn->list != &file->insn_list &&				\
+		insn->sec == func->sec && insn->offset >= func->offset;	\
+	     insn = list_prev_entry(insn, list))
+
+#define sec_for_each_insn_from(file, insn)				\
+	for (; insn; insn = next_insn_same_sec(file, insn))
+
+#define sec_for_each_insn_continue(file, insn)				\
+	for (insn = next_insn_same_sec(file, insn); insn;		\
+	     insn = next_insn_same_sec(file, insn))
+
+/*
+ * Check if the function has been manually whitelisted with the
+ * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
+ * due to its use of a context switching instruction.
+ */
+static bool ignore_func(struct objtool_file *file, struct symbol *func)
+{
+	struct rela *rela;
+	struct instruction *insn;
+
+	/* check for STACK_FRAME_NON_STANDARD */
+	if (file->whitelist && file->whitelist->rela)
+		list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+			if (rela->sym->type == STT_SECTION &&
+			    rela->sym->sec == func->sec &&
+			    rela->addend == func->offset)
+				return true;
+			if (rela->sym->type == STT_FUNC && rela->sym == func)
+				return true;
+		}
+
+	/* check if it has a context switching instruction */
+	func_for_each_insn(file, func, insn)
+		if (insn->type == INSN_CONTEXT_SWITCH)
+			return true;
+
+	return false;
+}
+
+/*
+ * This checks to see if the given function is a "noreturn" function.
+ *
+ * For global functions which are outside the scope of this object file, we
+ * have to keep a manual list of them.
+ *
+ * For local functions, we have to detect them manually by simply looking for
+ * the lack of a return instruction.
+ *
+ * Returns:
+ *  -1: error
+ *   0: no dead end
+ *   1: dead end
+ */
+static int __dead_end_function(struct objtool_file *file, struct symbol *func,
+			       int recursion)
+{
+	int i;
+	struct instruction *insn;
+	bool empty = true;
+
+	/*
+	 * Unfortunately these have to be hard coded because the noreturn
+	 * attribute isn't provided in ELF data.
+	 */
+	static const char * const global_noreturns[] = {
+		"__stack_chk_fail",
+		"panic",
+		"do_exit",
+		"do_task_dead",
+		"__module_put_and_exit",
+		"complete_and_exit",
+		"kvm_spurious_fault",
+		"__reiserfs_panic",
+		"lbug_with_loc",
+		"fortify_panic",
+	};
+
+	if (func->bind == STB_WEAK)
+		return 0;
+
+	if (func->bind == STB_GLOBAL)
+		for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
+			if (!strcmp(func->name, global_noreturns[i]))
+				return 1;
+
+	if (!func->sec)
+		return 0;
+
+	func_for_each_insn(file, func, insn) {
+		empty = false;
+
+		if (insn->type == INSN_RETURN)
+			return 0;
+	}
+
+	if (empty)
+		return 0;
+
+	/*
+	 * A function can have a sibling call instead of a return.  In that
+	 * case, the function's dead-end status depends on whether the target
+	 * of the sibling call returns.
+	 */
+	func_for_each_insn(file, func, insn) {
+		if (insn->sec != func->sec ||
+		    insn->offset >= func->offset + func->len)
+			break;
+
+		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+			struct instruction *dest = insn->jump_dest;
+			struct symbol *dest_func;
+
+			if (!dest)
+				/* sibling call to another file */
+				return 0;
+
+			if (dest->sec != func->sec ||
+			    dest->offset < func->offset ||
+			    dest->offset >= func->offset + func->len) {
+				/* local sibling call */
+				dest_func = find_symbol_by_offset(dest->sec,
+								  dest->offset);
+				if (!dest_func)
+					continue;
+
+				if (recursion == 5) {
+					WARN_FUNC("infinite recursion (objtool bug!)",
+						  dest->sec, dest->offset);
+					return -1;
+				}
+
+				return __dead_end_function(file, dest_func,
+							   recursion + 1);
+			}
+		}
+
+		if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
+			/* sibling call */
+			return 0;
+	}
+
+	return 1;
+}
+
+static int dead_end_function(struct objtool_file *file, struct symbol *func)
+{
+	return __dead_end_function(file, func, 0);
+}
+
+static void clear_insn_state(struct insn_state *state)
+{
+	int i;
+
+	memset(state, 0, sizeof(*state));
+	state->cfa.base = CFI_UNDEFINED;
+	for (i = 0; i < CFI_NUM_REGS; i++)
+		state->regs[i].base = CFI_UNDEFINED;
+	state->drap_reg = CFI_UNDEFINED;
+}
+
+/*
+ * Call the arch-specific instruction decoder for all the instructions and add
+ * them to the global instruction list.
+ */
+static int decode_instructions(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *func;
+	unsigned long offset;
+	struct instruction *insn;
+	int ret;
+
+	for_each_sec(file, sec) {
+
+		if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+			continue;
+
+		for (offset = 0; offset < sec->len; offset += insn->len) {
+			insn = malloc(sizeof(*insn));
+			if (!insn) {
+				WARN("malloc failed");
+				return -1;
+			}
+			memset(insn, 0, sizeof(*insn));
+			INIT_LIST_HEAD(&insn->alts);
+			clear_insn_state(&insn->state);
+
+			insn->sec = sec;
+			insn->offset = offset;
+
+			ret = arch_decode_instruction(file->elf, sec, offset,
+						      sec->len - offset,
+						      &insn->len, &insn->type,
+						      &insn->immediate,
+						      &insn->stack_op);
+			if (ret)
+				return ret;
+
+			if (!insn->type || insn->type > INSN_LAST) {
+				WARN_FUNC("invalid instruction type %d",
+					  insn->sec, insn->offset, insn->type);
+				return -1;
+			}
+
+			hash_add(file->insn_hash, &insn->hash, insn->offset);
+			list_add_tail(&insn->list, &file->insn_list);
+		}
+
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			if (!find_insn(file, sec, func->offset)) {
+				WARN("%s(): can't find starting instruction",
+				     func->name);
+				return -1;
+			}
+
+			func_for_each_insn(file, func, insn)
+				if (!insn->func)
+					insn->func = func;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Find all uses of the unreachable() macro, which are code path dead ends.
+ */
+static int add_dead_ends(struct objtool_file *file)
+{
+	struct section *sec;
+	struct rela *rela;
+	struct instruction *insn;
+	bool found;
+
+	sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
+	if (!sec)
+		return 0;
+
+	list_for_each_entry(rela, &sec->rela_list, list) {
+		if (rela->sym->type != STT_SECTION) {
+			WARN("unexpected relocation symbol type in %s", sec->name);
+			return -1;
+		}
+		insn = find_insn(file, rela->sym->sec, rela->addend);
+		if (insn)
+			insn = list_prev_entry(insn, list);
+		else if (rela->addend == rela->sym->sec->len) {
+			found = false;
+			list_for_each_entry_reverse(insn, &file->insn_list, list) {
+				if (insn->sec == rela->sym->sec) {
+					found = true;
+					break;
+				}
+			}
+
+			if (!found) {
+				WARN("can't find unreachable insn at %s+0x%x",
+				     rela->sym->sec->name, rela->addend);
+				return -1;
+			}
+		} else {
+			WARN("can't find unreachable insn at %s+0x%x",
+			     rela->sym->sec->name, rela->addend);
+			return -1;
+		}
+
+		insn->dead_end = true;
+	}
+
+	return 0;
+}
+
+/*
+ * Warnings shouldn't be reported for ignored functions.
+ */
+static void add_ignores(struct objtool_file *file)
+{
+	struct instruction *insn;
+	struct section *sec;
+	struct symbol *func;
+
+	for_each_sec(file, sec) {
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			if (!ignore_func(file, func))
+				continue;
+
+			func_for_each_insn(file, func, insn)
+				insn->ignore = true;
+		}
+	}
+}
+
+/*
+ * Find the destination instructions for all jumps.
+ */
+static int add_jump_destinations(struct objtool_file *file)
+{
+	struct instruction *insn;
+	struct rela *rela;
+	struct section *dest_sec;
+	unsigned long dest_off;
+
+	for_each_insn(file, insn) {
+		if (insn->type != INSN_JUMP_CONDITIONAL &&
+		    insn->type != INSN_JUMP_UNCONDITIONAL)
+			continue;
+
+		if (insn->ignore)
+			continue;
+
+		rela = find_rela_by_dest_range(insn->sec, insn->offset,
+					       insn->len);
+		if (!rela) {
+			dest_sec = insn->sec;
+			dest_off = insn->offset + insn->len + insn->immediate;
+		} else if (rela->sym->type == STT_SECTION) {
+			dest_sec = rela->sym->sec;
+			dest_off = rela->addend + 4;
+		} else if (rela->sym->sec->idx) {
+			dest_sec = rela->sym->sec;
+			dest_off = rela->sym->sym.st_value + rela->addend + 4;
+		} else {
+			/* sibling call */
+			insn->jump_dest = 0;
+			continue;
+		}
+
+		insn->jump_dest = find_insn(file, dest_sec, dest_off);
+		if (!insn->jump_dest) {
+
+			/*
+			 * This is a special case where an alt instruction
+			 * jumps past the end of the section.  These are
+			 * handled later in handle_group_alt().
+			 */
+			if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+				continue;
+
+			WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
+				  insn->sec, insn->offset, dest_sec->name,
+				  dest_off);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Find the destination instructions for all calls.
+ */
+static int add_call_destinations(struct objtool_file *file)
+{
+	struct instruction *insn;
+	unsigned long dest_off;
+	struct rela *rela;
+
+	for_each_insn(file, insn) {
+		if (insn->type != INSN_CALL)
+			continue;
+
+		rela = find_rela_by_dest_range(insn->sec, insn->offset,
+					       insn->len);
+		if (!rela) {
+			dest_off = insn->offset + insn->len + insn->immediate;
+			insn->call_dest = find_symbol_by_offset(insn->sec,
+								dest_off);
+			if (!insn->call_dest) {
+				WARN_FUNC("can't find call dest symbol at offset 0x%lx",
+					  insn->sec, insn->offset, dest_off);
+				return -1;
+			}
+		} else if (rela->sym->type == STT_SECTION) {
+			insn->call_dest = find_symbol_by_offset(rela->sym->sec,
+								rela->addend+4);
+			if (!insn->call_dest ||
+			    insn->call_dest->type != STT_FUNC) {
+				WARN_FUNC("can't find call dest symbol at %s+0x%x",
+					  insn->sec, insn->offset,
+					  rela->sym->sec->name,
+					  rela->addend + 4);
+				return -1;
+			}
+		} else
+			insn->call_dest = rela->sym;
+	}
+
+	return 0;
+}
+
+/*
+ * The .alternatives section requires some extra special care, over and above
+ * what other special sections require:
+ *
+ * 1. Because alternatives are patched in-place, we need to insert a fake jump
+ *    instruction at the end so that validate_branch() skips all the original
+ *    replaced instructions when validating the new instruction path.
+ *
+ * 2. An added wrinkle is that the new instruction length might be zero.  In
+ *    that case the old instructions are replaced with noops.  We simulate that
+ *    by creating a fake jump as the only new instruction.
+ *
+ * 3. In some cases, the alternative section includes an instruction which
+ *    conditionally jumps to the _end_ of the entry.  We have to modify these
+ *    jumps' destinations to point back to .text rather than the end of the
+ *    entry in .altinstr_replacement.
+ *
+ * 4. It has been requested that we don't validate the !POPCNT feature path
+ *    which is a "very very small percentage of machines".
+ */
+static int handle_group_alt(struct objtool_file *file,
+			    struct special_alt *special_alt,
+			    struct instruction *orig_insn,
+			    struct instruction **new_insn)
+{
+	struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump;
+	unsigned long dest_off;
+
+	last_orig_insn = NULL;
+	insn = orig_insn;
+	sec_for_each_insn_from(file, insn) {
+		if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
+			break;
+
+		if (special_alt->skip_orig)
+			insn->type = INSN_NOP;
+
+		insn->alt_group = true;
+		last_orig_insn = insn;
+	}
+
+	if (!next_insn_same_sec(file, last_orig_insn)) {
+		WARN("%s: don't know how to handle alternatives at end of section",
+		     special_alt->orig_sec->name);
+		return -1;
+	}
+
+	fake_jump = malloc(sizeof(*fake_jump));
+	if (!fake_jump) {
+		WARN("malloc failed");
+		return -1;
+	}
+	memset(fake_jump, 0, sizeof(*fake_jump));
+	INIT_LIST_HEAD(&fake_jump->alts);
+	clear_insn_state(&fake_jump->state);
+
+	fake_jump->sec = special_alt->new_sec;
+	fake_jump->offset = -1;
+	fake_jump->type = INSN_JUMP_UNCONDITIONAL;
+	fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
+	fake_jump->ignore = true;
+
+	if (!special_alt->new_len) {
+		*new_insn = fake_jump;
+		return 0;
+	}
+
+	last_new_insn = NULL;
+	insn = *new_insn;
+	sec_for_each_insn_from(file, insn) {
+		if (insn->offset >= special_alt->new_off + special_alt->new_len)
+			break;
+
+		last_new_insn = insn;
+
+		if (insn->type != INSN_JUMP_CONDITIONAL &&
+		    insn->type != INSN_JUMP_UNCONDITIONAL)
+			continue;
+
+		if (!insn->immediate)
+			continue;
+
+		dest_off = insn->offset + insn->len + insn->immediate;
+		if (dest_off == special_alt->new_off + special_alt->new_len)
+			insn->jump_dest = fake_jump;
+
+		if (!insn->jump_dest) {
+			WARN_FUNC("can't find alternative jump destination",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+	}
+
+	if (!last_new_insn) {
+		WARN_FUNC("can't find last new alternative instruction",
+			  special_alt->new_sec, special_alt->new_off);
+		return -1;
+	}
+
+	list_add(&fake_jump->list, &last_new_insn->list);
+
+	return 0;
+}
+
+/*
+ * A jump table entry can either convert a nop to a jump or a jump to a nop.
+ * If the original instruction is a jump, make the alt entry an effective nop
+ * by just skipping the original instruction.
+ */
+static int handle_jump_alt(struct objtool_file *file,
+			   struct special_alt *special_alt,
+			   struct instruction *orig_insn,
+			   struct instruction **new_insn)
+{
+	if (orig_insn->type == INSN_NOP)
+		return 0;
+
+	if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
+		WARN_FUNC("unsupported instruction at jump label",
+			  orig_insn->sec, orig_insn->offset);
+		return -1;
+	}
+
+	*new_insn = list_next_entry(orig_insn, list);
+	return 0;
+}
+
+/*
+ * Read all the special sections which have alternate instructions which can be
+ * patched in or redirected to at runtime.  Each instruction having alternate
+ * instruction(s) has them added to its insn->alts list, which will be
+ * traversed in validate_branch().
+ */
+static int add_special_section_alts(struct objtool_file *file)
+{
+	struct list_head special_alts;
+	struct instruction *orig_insn, *new_insn;
+	struct special_alt *special_alt, *tmp;
+	struct alternative *alt;
+	int ret;
+
+	ret = special_get_alts(file->elf, &special_alts);
+	if (ret)
+		return ret;
+
+	list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
+		alt = malloc(sizeof(*alt));
+		if (!alt) {
+			WARN("malloc failed");
+			ret = -1;
+			goto out;
+		}
+
+		orig_insn = find_insn(file, special_alt->orig_sec,
+				      special_alt->orig_off);
+		if (!orig_insn) {
+			WARN_FUNC("special: can't find orig instruction",
+				  special_alt->orig_sec, special_alt->orig_off);
+			ret = -1;
+			goto out;
+		}
+
+		new_insn = NULL;
+		if (!special_alt->group || special_alt->new_len) {
+			new_insn = find_insn(file, special_alt->new_sec,
+					     special_alt->new_off);
+			if (!new_insn) {
+				WARN_FUNC("special: can't find new instruction",
+					  special_alt->new_sec,
+					  special_alt->new_off);
+				ret = -1;
+				goto out;
+			}
+		}
+
+		if (special_alt->group) {
+			ret = handle_group_alt(file, special_alt, orig_insn,
+					       &new_insn);
+			if (ret)
+				goto out;
+		} else if (special_alt->jump_or_nop) {
+			ret = handle_jump_alt(file, special_alt, orig_insn,
+					      &new_insn);
+			if (ret)
+				goto out;
+		}
+
+		alt->insn = new_insn;
+		list_add_tail(&alt->list, &orig_insn->alts);
+
+		list_del(&special_alt->list);
+		free(special_alt);
+	}
+
+out:
+	return ret;
+}
+
+static int add_switch_table(struct objtool_file *file, struct symbol *func,
+			    struct instruction *insn, struct rela *table,
+			    struct rela *next_table)
+{
+	struct rela *rela = table;
+	struct instruction *alt_insn;
+	struct alternative *alt;
+
+	list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
+		if (rela == next_table)
+			break;
+
+		if (rela->sym->sec != insn->sec ||
+		    rela->addend <= func->offset ||
+		    rela->addend >= func->offset + func->len)
+			break;
+
+		alt_insn = find_insn(file, insn->sec, rela->addend);
+		if (!alt_insn) {
+			WARN("%s: can't find instruction at %s+0x%x",
+			     file->rodata->rela->name, insn->sec->name,
+			     rela->addend);
+			return -1;
+		}
+
+		alt = malloc(sizeof(*alt));
+		if (!alt) {
+			WARN("malloc failed");
+			return -1;
+		}
+
+		alt->insn = alt_insn;
+		list_add_tail(&alt->list, &insn->alts);
+	}
+
+	return 0;
+}
+
+/*
+ * find_switch_table() - Given a dynamic jump, find the switch jump table in
+ * .rodata associated with it.
+ *
+ * There are 3 basic patterns:
+ *
+ * 1. jmpq *[rodata addr](,%reg,8)
+ *
+ *    This is the most common case by far.  It jumps to an address in a simple
+ *    jump table which is stored in .rodata.
+ *
+ * 2. jmpq *[rodata addr](%rip)
+ *
+ *    This is caused by a rare GCC quirk, currently only seen in three driver
+ *    functions in the kernel, only with certain obscure non-distro configs.
+ *
+ *    As part of an optimization, GCC makes a copy of an existing switch jump
+ *    table, modifies it, and then hard-codes the jump (albeit with an indirect
+ *    jump) to use a single entry in the table.  The rest of the jump table and
+ *    some of its jump targets remain as dead code.
+ *
+ *    In such a case we can just crudely ignore all unreachable instruction
+ *    warnings for the entire object file.  Ideally we would just ignore them
+ *    for the function, but that would require redesigning the code quite a
+ *    bit.  And honestly that's just not worth doing: unreachable instruction
+ *    warnings are of questionable value anyway, and this is such a rare issue.
+ *
+ * 3. mov [rodata addr],%reg1
+ *    ... some instructions ...
+ *    jmpq *(%reg1,%reg2,8)
+ *
+ *    This is a fairly uncommon pattern which is new for GCC 6.  As of this
+ *    writing, there are 11 occurrences of it in the allmodconfig kernel.
+ *
+ *    TODO: Once we have DWARF CFI and smarter instruction decoding logic,
+ *    ensure the same register is used in the mov and jump instructions.
+ */
+static struct rela *find_switch_table(struct objtool_file *file,
+				      struct symbol *func,
+				      struct instruction *insn)
+{
+	struct rela *text_rela, *rodata_rela;
+	struct instruction *orig_insn = insn;
+
+	text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
+	if (text_rela && text_rela->sym == file->rodata->sym) {
+		/* case 1 */
+		rodata_rela = find_rela_by_dest(file->rodata,
+						text_rela->addend);
+		if (rodata_rela)
+			return rodata_rela;
+
+		/* case 2 */
+		rodata_rela = find_rela_by_dest(file->rodata,
+						text_rela->addend + 4);
+		if (!rodata_rela)
+			return NULL;
+		file->ignore_unreachables = true;
+		return rodata_rela;
+	}
+
+	/* case 3 */
+	func_for_each_insn_continue_reverse(file, func, insn) {
+		if (insn->type == INSN_JUMP_DYNAMIC)
+			break;
+
+		/* allow small jumps within the range */
+		if (insn->type == INSN_JUMP_UNCONDITIONAL &&
+		    insn->jump_dest &&
+		    (insn->jump_dest->offset <= insn->offset ||
+		     insn->jump_dest->offset > orig_insn->offset))
+		    break;
+
+		/* look for a relocation which references .rodata */
+		text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
+						    insn->len);
+		if (!text_rela || text_rela->sym != file->rodata->sym)
+			continue;
+
+		/*
+		 * Make sure the .rodata address isn't associated with a
+		 * symbol.  gcc jump tables are anonymous data.
+		 */
+		if (find_symbol_containing(file->rodata, text_rela->addend))
+			continue;
+
+		return find_rela_by_dest(file->rodata, text_rela->addend);
+	}
+
+	return NULL;
+}
+
+static int add_func_switch_tables(struct objtool_file *file,
+				  struct symbol *func)
+{
+	struct instruction *insn, *prev_jump = NULL;
+	struct rela *rela, *prev_rela = NULL;
+	int ret;
+
+	func_for_each_insn(file, func, insn) {
+		if (insn->type != INSN_JUMP_DYNAMIC)
+			continue;
+
+		rela = find_switch_table(file, func, insn);
+		if (!rela)
+			continue;
+
+		/*
+		 * We found a switch table, but we don't know yet how big it
+		 * is.  Don't add it until we reach the end of the function or
+		 * the beginning of another switch table in the same function.
+		 */
+		if (prev_jump) {
+			ret = add_switch_table(file, func, prev_jump, prev_rela,
+					       rela);
+			if (ret)
+				return ret;
+		}
+
+		prev_jump = insn;
+		prev_rela = rela;
+	}
+
+	if (prev_jump) {
+		ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * For some switch statements, gcc generates a jump table in the .rodata
+ * section which contains a list of addresses within the function to jump to.
+ * This finds these jump tables and adds them to the insn->alts lists.
+ */
+static int add_switch_table_alts(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *func;
+	int ret;
+
+	if (!file->rodata || !file->rodata->rela)
+		return 0;
+
+	for_each_sec(file, sec) {
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			ret = add_func_switch_tables(file, func);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int decode_sections(struct objtool_file *file)
+{
+	int ret;
+
+	ret = decode_instructions(file);
+	if (ret)
+		return ret;
+
+	ret = add_dead_ends(file);
+	if (ret)
+		return ret;
+
+	add_ignores(file);
+
+	ret = add_jump_destinations(file);
+	if (ret)
+		return ret;
+
+	ret = add_call_destinations(file);
+	if (ret)
+		return ret;
+
+	ret = add_special_section_alts(file);
+	if (ret)
+		return ret;
+
+	ret = add_switch_table_alts(file);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static bool is_fentry_call(struct instruction *insn)
+{
+	if (insn->type == INSN_CALL &&
+	    insn->call_dest->type == STT_NOTYPE &&
+	    !strcmp(insn->call_dest->name, "__fentry__"))
+		return true;
+
+	return false;
+}
+
+static bool has_modified_stack_frame(struct insn_state *state)
+{
+	int i;
+
+	if (state->cfa.base != initial_func_cfi.cfa.base ||
+	    state->cfa.offset != initial_func_cfi.cfa.offset ||
+	    state->stack_size != initial_func_cfi.cfa.offset ||
+	    state->drap)
+		return true;
+
+	for (i = 0; i < CFI_NUM_REGS; i++)
+		if (state->regs[i].base != initial_func_cfi.regs[i].base ||
+		    state->regs[i].offset != initial_func_cfi.regs[i].offset)
+			return true;
+
+	return false;
+}
+
+static bool has_valid_stack_frame(struct insn_state *state)
+{
+	if (state->cfa.base == CFI_BP && state->regs[CFI_BP].base == CFI_CFA &&
+	    state->regs[CFI_BP].offset == -16)
+		return true;
+
+	if (state->drap && state->regs[CFI_BP].base == CFI_BP)
+		return true;
+
+	return false;
+}
+
+static void save_reg(struct insn_state *state, unsigned char reg, int base,
+		     int offset)
+{
+	if ((arch_callee_saved_reg(reg) ||
+	    (state->drap && reg == state->drap_reg)) &&
+	    state->regs[reg].base == CFI_UNDEFINED) {
+		state->regs[reg].base = base;
+		state->regs[reg].offset = offset;
+	}
+}
+
+static void restore_reg(struct insn_state *state, unsigned char reg)
+{
+	state->regs[reg].base = CFI_UNDEFINED;
+	state->regs[reg].offset = 0;
+}
+
+/*
+ * A note about DRAP stack alignment:
+ *
+ * GCC has the concept of a DRAP register, which is used to help keep track of
+ * the stack pointer when aligning the stack.  r10 or r13 is used as the DRAP
+ * register.  The typical DRAP pattern is:
+ *
+ *   4c 8d 54 24 08		lea    0x8(%rsp),%r10
+ *   48 83 e4 c0		and    $0xffffffffffffffc0,%rsp
+ *   41 ff 72 f8		pushq  -0x8(%r10)
+ *   55				push   %rbp
+ *   48 89 e5			mov    %rsp,%rbp
+ *				(more pushes)
+ *   41 52			push   %r10
+ *				...
+ *   41 5a			pop    %r10
+ *				(more pops)
+ *   5d				pop    %rbp
+ *   49 8d 62 f8		lea    -0x8(%r10),%rsp
+ *   c3				retq
+ *
+ * There are some variations in the epilogues, like:
+ *
+ *   5b				pop    %rbx
+ *   41 5a			pop    %r10
+ *   41 5c			pop    %r12
+ *   41 5d			pop    %r13
+ *   41 5e			pop    %r14
+ *   c9				leaveq
+ *   49 8d 62 f8		lea    -0x8(%r10),%rsp
+ *   c3				retq
+ *
+ * and:
+ *
+ *   4c 8b 55 e8		mov    -0x18(%rbp),%r10
+ *   48 8b 5d e0		mov    -0x20(%rbp),%rbx
+ *   4c 8b 65 f0		mov    -0x10(%rbp),%r12
+ *   4c 8b 6d f8		mov    -0x8(%rbp),%r13
+ *   c9				leaveq
+ *   49 8d 62 f8		lea    -0x8(%r10),%rsp
+ *   c3				retq
+ *
+ * Sometimes r13 is used as the DRAP register, in which case it's saved and
+ * restored beforehand:
+ *
+ *   41 55			push   %r13
+ *   4c 8d 6c 24 10		lea    0x10(%rsp),%r13
+ *   48 83 e4 f0		and    $0xfffffffffffffff0,%rsp
+ *				...
+ *   49 8d 65 f0		lea    -0x10(%r13),%rsp
+ *   41 5d			pop    %r13
+ *   c3				retq
+ */
+static int update_insn_state(struct instruction *insn, struct insn_state *state)
+{
+	struct stack_op *op = &insn->stack_op;
+	struct cfi_reg *cfa = &state->cfa;
+	struct cfi_reg *regs = state->regs;
+
+	/* stack operations don't make sense with an undefined CFA */
+	if (cfa->base == CFI_UNDEFINED) {
+		if (insn->func) {
+			WARN_FUNC("undefined stack state", insn->sec, insn->offset);
+			return -1;
+		}
+		return 0;
+	}
+
+	switch (op->dest.type) {
+
+	case OP_DEST_REG:
+		switch (op->src.type) {
+
+		case OP_SRC_REG:
+			if (cfa->base == op->src.reg && cfa->base == CFI_SP &&
+			    op->dest.reg == CFI_BP && regs[CFI_BP].base == CFI_CFA &&
+			    regs[CFI_BP].offset == -cfa->offset) {
+
+				/* mov %rsp, %rbp */
+				cfa->base = op->dest.reg;
+				state->bp_scratch = false;
+			} else if (state->drap) {
+
+				/* drap: mov %rsp, %rbp */
+				regs[CFI_BP].base = CFI_BP;
+				regs[CFI_BP].offset = -state->stack_size;
+				state->bp_scratch = false;
+			} else if (!nofp) {
+
+				WARN_FUNC("unknown stack-related register move",
+					  insn->sec, insn->offset);
+				return -1;
+			}
+
+			break;
+
+		case OP_SRC_ADD:
+			if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
+
+				/* add imm, %rsp */
+				state->stack_size -= op->src.offset;
+				if (cfa->base == CFI_SP)
+					cfa->offset -= op->src.offset;
+				break;
+			}
+
+			if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
+
+				/* lea disp(%rbp), %rsp */
+				state->stack_size = -(op->src.offset + regs[CFI_BP].offset);
+				break;
+			}
+
+			if (op->dest.reg != CFI_BP && op->src.reg == CFI_SP &&
+			    cfa->base == CFI_SP) {
+
+				/* drap: lea disp(%rsp), %drap */
+				state->drap_reg = op->dest.reg;
+				break;
+			}
+
+			if (state->drap && op->dest.reg == CFI_SP &&
+			    op->src.reg == state->drap_reg) {
+
+				 /* drap: lea disp(%drap), %rsp */
+				cfa->base = CFI_SP;
+				cfa->offset = state->stack_size = -op->src.offset;
+				state->drap_reg = CFI_UNDEFINED;
+				state->drap = false;
+				break;
+			}
+
+			if (op->dest.reg == state->cfa.base) {
+				WARN_FUNC("unsupported stack register modification",
+					  insn->sec, insn->offset);
+				return -1;
+			}
+
+			break;
+
+		case OP_SRC_AND:
+			if (op->dest.reg != CFI_SP ||
+			    (state->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
+			    (state->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
+				WARN_FUNC("unsupported stack pointer realignment",
+					  insn->sec, insn->offset);
+				return -1;
+			}
+
+			if (state->drap_reg != CFI_UNDEFINED) {
+				/* drap: and imm, %rsp */
+				cfa->base = state->drap_reg;
+				cfa->offset = state->stack_size = 0;
+				state->drap = true;
+
+			}
+
+			/*
+			 * Older versions of GCC (4.8ish) realign the stack
+			 * without DRAP, with a frame pointer.
+			 */
+
+			break;
+
+		case OP_SRC_POP:
+			if (!state->drap && op->dest.type == OP_DEST_REG &&
+			    op->dest.reg == cfa->base) {
+
+				/* pop %rbp */
+				cfa->base = CFI_SP;
+			}
+
+			if (regs[op->dest.reg].offset == -state->stack_size) {
+
+				if (state->drap && cfa->base == CFI_BP_INDIRECT &&
+				    op->dest.type == OP_DEST_REG &&
+				    op->dest.reg == state->drap_reg) {
+
+					/* drap: pop %drap */
+					cfa->base = state->drap_reg;
+					cfa->offset = 0;
+				}
+
+				restore_reg(state, op->dest.reg);
+			}
+
+			state->stack_size -= 8;
+			if (cfa->base == CFI_SP)
+				cfa->offset -= 8;
+
+			break;
+
+		case OP_SRC_REG_INDIRECT:
+			if (state->drap && op->src.reg == CFI_BP &&
+			    op->src.offset == regs[op->dest.reg].offset) {
+
+				/* drap: mov disp(%rbp), %reg */
+				if (op->dest.reg == state->drap_reg) {
+					cfa->base = state->drap_reg;
+					cfa->offset = 0;
+				}
+
+				restore_reg(state, op->dest.reg);
+
+			} else if (op->src.reg == cfa->base &&
+			    op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
+
+				/* mov disp(%rbp), %reg */
+				/* mov disp(%rsp), %reg */
+				restore_reg(state, op->dest.reg);
+			}
+
+			break;
+
+		default:
+			WARN_FUNC("unknown stack-related instruction",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		break;
+
+	case OP_DEST_PUSH:
+		state->stack_size += 8;
+		if (cfa->base == CFI_SP)
+			cfa->offset += 8;
+
+		if (op->src.type != OP_SRC_REG)
+			break;
+
+		if (state->drap) {
+			if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+
+				/* drap: push %drap */
+				cfa->base = CFI_BP_INDIRECT;
+				cfa->offset = -state->stack_size;
+
+				/* save drap so we know when to undefine it */
+				save_reg(state, op->src.reg, CFI_CFA, -state->stack_size);
+
+			} else if (op->src.reg == CFI_BP && cfa->base == state->drap_reg) {
+
+				/* drap: push %rbp */
+				state->stack_size = 0;
+
+			} else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+
+				/* drap: push %reg */
+				save_reg(state, op->src.reg, CFI_BP, -state->stack_size);
+			}
+
+		} else {
+
+			/* push %reg */
+			save_reg(state, op->src.reg, CFI_CFA, -state->stack_size);
+		}
+
+		/* detect when asm code uses rbp as a scratch register */
+		if (!nofp && insn->func && op->src.reg == CFI_BP &&
+		    cfa->base != CFI_BP)
+			state->bp_scratch = true;
+		break;
+
+	case OP_DEST_REG_INDIRECT:
+
+		if (state->drap) {
+			if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+
+				/* drap: mov %drap, disp(%rbp) */
+				cfa->base = CFI_BP_INDIRECT;
+				cfa->offset = op->dest.offset;
+
+				/* save drap so we know when to undefine it */
+				save_reg(state, op->src.reg, CFI_CFA, op->dest.offset);
+			}
+
+			else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+
+				/* drap: mov reg, disp(%rbp) */
+				save_reg(state, op->src.reg, CFI_BP, op->dest.offset);
+			}
+
+		} else if (op->dest.reg == cfa->base) {
+
+			/* mov reg, disp(%rbp) */
+			/* mov reg, disp(%rsp) */
+			save_reg(state, op->src.reg, CFI_CFA,
+				 op->dest.offset - state->cfa.offset);
+		}
+
+		break;
+
+	case OP_DEST_LEAVE:
+		if ((!state->drap && cfa->base != CFI_BP) ||
+		    (state->drap && cfa->base != state->drap_reg)) {
+			WARN_FUNC("leave instruction with modified stack frame",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		/* leave (mov %rbp, %rsp; pop %rbp) */
+
+		state->stack_size = -state->regs[CFI_BP].offset - 8;
+		restore_reg(state, CFI_BP);
+
+		if (!state->drap) {
+			cfa->base = CFI_SP;
+			cfa->offset -= 8;
+		}
+
+		break;
+
+	case OP_DEST_MEM:
+		if (op->src.type != OP_SRC_POP) {
+			WARN_FUNC("unknown stack-related memory operation",
+				  insn->sec, insn->offset);
+			return -1;
+		}
+
+		/* pop mem */
+		state->stack_size -= 8;
+		if (cfa->base == CFI_SP)
+			cfa->offset -= 8;
+
+		break;
+
+	default:
+		WARN_FUNC("unknown stack-related instruction",
+			  insn->sec, insn->offset);
+		return -1;
+	}
+
+	return 0;
+}
+
+static bool insn_state_match(struct instruction *insn, struct insn_state *state)
+{
+	struct insn_state *state1 = &insn->state, *state2 = state;
+	int i;
+
+	if (memcmp(&state1->cfa, &state2->cfa, sizeof(state1->cfa))) {
+		WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
+			  insn->sec, insn->offset,
+			  state1->cfa.base, state1->cfa.offset,
+			  state2->cfa.base, state2->cfa.offset);
+
+	} else if (memcmp(&state1->regs, &state2->regs, sizeof(state1->regs))) {
+		for (i = 0; i < CFI_NUM_REGS; i++) {
+			if (!memcmp(&state1->regs[i], &state2->regs[i],
+				    sizeof(struct cfi_reg)))
+				continue;
+
+			WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
+				  insn->sec, insn->offset,
+				  i, state1->regs[i].base, state1->regs[i].offset,
+				  i, state2->regs[i].base, state2->regs[i].offset);
+			break;
+		}
+
+	} else if (state1->drap != state2->drap ||
+		 (state1->drap && state1->drap_reg != state2->drap_reg)) {
+		WARN_FUNC("stack state mismatch: drap1=%d(%d) drap2=%d(%d)",
+			  insn->sec, insn->offset,
+			  state1->drap, state1->drap_reg,
+			  state2->drap, state2->drap_reg);
+
+	} else
+		return true;
+
+	return false;
+}
+
+/*
+ * Follow the branch starting at the given instruction, and recursively follow
+ * any other branches (jumps).  Meanwhile, track the frame pointer state at
+ * each instruction and validate all the rules described in
+ * tools/objtool/Documentation/stack-validation.txt.
+ */
+static int validate_branch(struct objtool_file *file, struct instruction *first,
+			   struct insn_state state)
+{
+	struct alternative *alt;
+	struct instruction *insn;
+	struct section *sec;
+	struct symbol *func = NULL;
+	int ret;
+
+	insn = first;
+	sec = insn->sec;
+
+	if (insn->alt_group && list_empty(&insn->alts)) {
+		WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
+			  sec, insn->offset);
+		return -1;
+	}
+
+	while (1) {
+		if (file->c_file && insn->func) {
+			if (func && func != insn->func) {
+				WARN("%s() falls through to next function %s()",
+				     func->name, insn->func->name);
+				return 1;
+			}
+		}
+
+		func = insn->func;
+
+		if (insn->visited) {
+			if (!!insn_state_match(insn, &state))
+				return 1;
+
+			return 0;
+		}
+
+		insn->state = state;
+
+		insn->visited = true;
+
+		list_for_each_entry(alt, &insn->alts, list) {
+			ret = validate_branch(file, alt->insn, state);
+			if (ret)
+				return 1;
+		}
+
+		switch (insn->type) {
+
+		case INSN_RETURN:
+			if (func && has_modified_stack_frame(&state)) {
+				WARN_FUNC("return with modified stack frame",
+					  sec, insn->offset);
+				return 1;
+			}
+
+			if (state.bp_scratch) {
+				WARN("%s uses BP as a scratch register",
+				     insn->func->name);
+				return 1;
+			}
+
+			return 0;
+
+		case INSN_CALL:
+			if (is_fentry_call(insn))
+				break;
+
+			ret = dead_end_function(file, insn->call_dest);
+			if (ret == 1)
+				return 0;
+			if (ret == -1)
+				return 1;
+
+			/* fallthrough */
+		case INSN_CALL_DYNAMIC:
+			if (!nofp && func && !has_valid_stack_frame(&state)) {
+				WARN_FUNC("call without frame pointer save/setup",
+					  sec, insn->offset);
+				return 1;
+			}
+			break;
+
+		case INSN_JUMP_CONDITIONAL:
+		case INSN_JUMP_UNCONDITIONAL:
+			if (insn->jump_dest) {
+				ret = validate_branch(file, insn->jump_dest,
+						      state);
+				if (ret)
+					return 1;
+			} else if (func && has_modified_stack_frame(&state)) {
+				WARN_FUNC("sibling call from callable instruction with modified stack frame",
+					  sec, insn->offset);
+				return 1;
+			} /* else it's a sibling call */
+
+			if (insn->type == INSN_JUMP_UNCONDITIONAL)
+				return 0;
+
+			break;
+
+		case INSN_JUMP_DYNAMIC:
+			if (func && list_empty(&insn->alts) &&
+			    has_modified_stack_frame(&state)) {
+				WARN_FUNC("sibling call from callable instruction with modified stack frame",
+					  sec, insn->offset);
+				return 1;
+			}
+
+			return 0;
+
+		case INSN_STACK:
+			if (update_insn_state(insn, &state))
+				return -1;
+
+			break;
+
+		default:
+			break;
+		}
+
+		if (insn->dead_end)
+			return 0;
+
+		insn = next_insn_same_sec(file, insn);
+		if (!insn) {
+			WARN("%s: unexpected end of section", sec->name);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static bool is_kasan_insn(struct instruction *insn)
+{
+	return (insn->type == INSN_CALL &&
+		!strcmp(insn->call_dest->name, "__asan_handle_no_return"));
+}
+
+static bool is_ubsan_insn(struct instruction *insn)
+{
+	return (insn->type == INSN_CALL &&
+		!strcmp(insn->call_dest->name,
+			"__ubsan_handle_builtin_unreachable"));
+}
+
+static bool ignore_unreachable_insn(struct instruction *insn)
+{
+	int i;
+
+	if (insn->ignore || insn->type == INSN_NOP)
+		return true;
+
+	/*
+	 * Ignore any unused exceptions.  This can happen when a whitelisted
+	 * function has an exception table entry.
+	 */
+	if (!strcmp(insn->sec->name, ".fixup"))
+		return true;
+
+	/*
+	 * Check if this (or a subsequent) instruction is related to
+	 * CONFIG_UBSAN or CONFIG_KASAN.
+	 *
+	 * End the search at 5 instructions to avoid going into the weeds.
+	 */
+	if (!insn->func)
+		return false;
+	for (i = 0; i < 5; i++) {
+
+		if (is_kasan_insn(insn) || is_ubsan_insn(insn))
+			return true;
+
+		if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
+			insn = insn->jump_dest;
+			continue;
+		}
+
+		if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
+			break;
+		insn = list_next_entry(insn, list);
+	}
+
+	return false;
+}
+
+static int validate_functions(struct objtool_file *file)
+{
+	struct section *sec;
+	struct symbol *func;
+	struct instruction *insn;
+	struct insn_state state;
+	int ret, warnings = 0;
+
+	clear_insn_state(&state);
+
+	state.cfa = initial_func_cfi.cfa;
+	memcpy(&state.regs, &initial_func_cfi.regs,
+	       CFI_NUM_REGS * sizeof(struct cfi_reg));
+	state.stack_size = initial_func_cfi.cfa.offset;
+
+	for_each_sec(file, sec) {
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			insn = find_insn(file, sec, func->offset);
+			if (!insn || insn->ignore)
+				continue;
+
+			ret = validate_branch(file, insn, state);
+			warnings += ret;
+		}
+	}
+
+	return warnings;
+}
+
+static int validate_reachable_instructions(struct objtool_file *file)
+{
+	struct instruction *insn;
+
+	if (file->ignore_unreachables)
+		return 0;
+
+	for_each_insn(file, insn) {
+		if (insn->visited || ignore_unreachable_insn(insn))
+			continue;
+
+		/*
+		 * gcov produces a lot of unreachable instructions.  If we get
+		 * an unreachable warning and the file has gcov enabled, just
+		 * ignore it, and all other such warnings for the file.  Do
+		 * this here because this is an expensive function.
+		 */
+		if (gcov_enabled(file))
+			return 0;
+
+		WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void cleanup(struct objtool_file *file)
+{
+	struct instruction *insn, *tmpinsn;
+	struct alternative *alt, *tmpalt;
+
+	list_for_each_entry_safe(insn, tmpinsn, &file->insn_list, list) {
+		list_for_each_entry_safe(alt, tmpalt, &insn->alts, list) {
+			list_del(&alt->list);
+			free(alt);
+		}
+		list_del(&insn->list);
+		hash_del(&insn->hash);
+		free(insn);
+	}
+	elf_close(file->elf);
+}
+
+int check(const char *_objname, bool _nofp)
+{
+	struct objtool_file file;
+	int ret, warnings = 0;
+
+	objname = _objname;
+	nofp = _nofp;
+
+	file.elf = elf_open(objname);
+	if (!file.elf)
+		return 1;
+
+	INIT_LIST_HEAD(&file.insn_list);
+	hash_init(file.insn_hash);
+	file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
+	file.rodata = find_section_by_name(file.elf, ".rodata");
+	file.ignore_unreachables = false;
+	file.c_file = find_section_by_name(file.elf, ".comment");
+
+	arch_initial_func_cfi_state(&initial_func_cfi);
+
+	ret = decode_sections(&file);
+	if (ret < 0)
+		goto out;
+	warnings += ret;
+
+	if (list_empty(&file.insn_list))
+		goto out;
+
+	ret = validate_functions(&file);
+	if (ret < 0)
+		goto out;
+	warnings += ret;
+
+	if (!warnings) {
+		ret = validate_reachable_instructions(&file);
+		if (ret < 0)
+			goto out;
+		warnings += ret;
+	}
+
+out:
+	cleanup(&file);
+
+	/* ignore warnings for now until we get all the code cleaned up */
+	if (ret || warnings)
+		return 0;
+	return 0;
+}
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
new file mode 100644
index 0000000..da85f5b
--- /dev/null
+++ b/tools/objtool/check.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CHECK_H
+#define _CHECK_H
+
+#include <stdbool.h>
+#include "elf.h"
+#include "cfi.h"
+#include "arch.h"
+#include <linux/hashtable.h>
+
+struct insn_state {
+	struct cfi_reg cfa;
+	struct cfi_reg regs[CFI_NUM_REGS];
+	int stack_size;
+	bool bp_scratch;
+	bool drap;
+	int drap_reg;
+};
+
+struct instruction {
+	struct list_head list;
+	struct hlist_node hash;
+	struct section *sec;
+	unsigned long offset;
+	unsigned int len;
+	unsigned char type;
+	unsigned long immediate;
+	bool alt_group, visited, dead_end, ignore;
+	struct symbol *call_dest;
+	struct instruction *jump_dest;
+	struct list_head alts;
+	struct symbol *func;
+	struct stack_op stack_op;
+	struct insn_state state;
+};
+
+struct objtool_file {
+	struct elf *elf;
+	struct list_head insn_list;
+	DECLARE_HASHTABLE(insn_hash, 16);
+	struct section *rodata, *whitelist;
+	bool ignore_unreachables, c_file;
+};
+
+int check(const char *objname, bool nofp);
+
+#define for_each_insn(file, insn)					\
+	list_for_each_entry(insn, &file->insn_list, list)
+
+#endif /* _CHECK_H */
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index d897702..1a7e8aa 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -37,6 +37,9 @@
 #define ELF_C_READ_MMAP ELF_C_READ
 #endif
 
+#define WARN_ELF(format, ...)					\
+	WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
+
 struct section *find_section_by_name(struct elf *elf, const char *name)
 {
 	struct section *sec;
@@ -139,12 +142,12 @@ static int read_sections(struct elf *elf)
 	int i;
 
 	if (elf_getshdrnum(elf->elf, &sections_nr)) {
-		perror("elf_getshdrnum");
+		WARN_ELF("elf_getshdrnum");
 		return -1;
 	}
 
 	if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
-		perror("elf_getshdrstrndx");
+		WARN_ELF("elf_getshdrstrndx");
 		return -1;
 	}
 
@@ -165,37 +168,36 @@ static int read_sections(struct elf *elf)
 
 		s = elf_getscn(elf->elf, i);
 		if (!s) {
-			perror("elf_getscn");
+			WARN_ELF("elf_getscn");
 			return -1;
 		}
 
 		sec->idx = elf_ndxscn(s);
 
 		if (!gelf_getshdr(s, &sec->sh)) {
-			perror("gelf_getshdr");
+			WARN_ELF("gelf_getshdr");
 			return -1;
 		}
 
 		sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
 		if (!sec->name) {
-			perror("elf_strptr");
+			WARN_ELF("elf_strptr");
 			return -1;
 		}
 
-		sec->elf_data = elf_getdata(s, NULL);
-		if (!sec->elf_data) {
-			perror("elf_getdata");
+		sec->data = elf_getdata(s, NULL);
+		if (!sec->data) {
+			WARN_ELF("elf_getdata");
 			return -1;
 		}
 
-		if (sec->elf_data->d_off != 0 ||
-		    sec->elf_data->d_size != sec->sh.sh_size) {
+		if (sec->data->d_off != 0 ||
+		    sec->data->d_size != sec->sh.sh_size) {
 			WARN("unexpected data attributes for %s", sec->name);
 			return -1;
 		}
 
-		sec->data = (unsigned long)sec->elf_data->d_buf;
-		sec->len = sec->elf_data->d_size;
+		sec->len = sec->data->d_size;
 	}
 
 	/* sanity check, one more call to elf_nextscn() should return NULL */
@@ -232,15 +234,15 @@ static int read_symbols(struct elf *elf)
 
 		sym->idx = i;
 
-		if (!gelf_getsym(symtab->elf_data, i, &sym->sym)) {
-			perror("gelf_getsym");
+		if (!gelf_getsym(symtab->data, i, &sym->sym)) {
+			WARN_ELF("gelf_getsym");
 			goto err;
 		}
 
 		sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
 				       sym->sym.st_name);
 		if (!sym->name) {
-			perror("elf_strptr");
+			WARN_ELF("elf_strptr");
 			goto err;
 		}
 
@@ -322,8 +324,8 @@ static int read_relas(struct elf *elf)
 			}
 			memset(rela, 0, sizeof(*rela));
 
-			if (!gelf_getrela(sec->elf_data, i, &rela->rela)) {
-				perror("gelf_getrela");
+			if (!gelf_getrela(sec->data, i, &rela->rela)) {
+				WARN_ELF("gelf_getrela");
 				return -1;
 			}
 
@@ -362,12 +364,6 @@ struct elf *elf_open(const char *name)
 
 	INIT_LIST_HEAD(&elf->sections);
 
-	elf->name = strdup(name);
-	if (!elf->name) {
-		perror("strdup");
-		goto err;
-	}
-
 	elf->fd = open(name, O_RDONLY);
 	if (elf->fd == -1) {
 		perror("open");
@@ -376,12 +372,12 @@ struct elf *elf_open(const char *name)
 
 	elf->elf = elf_begin(elf->fd, ELF_C_READ_MMAP, NULL);
 	if (!elf->elf) {
-		perror("elf_begin");
+		WARN_ELF("elf_begin");
 		goto err;
 	}
 
 	if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
-		perror("gelf_getehdr");
+		WARN_ELF("gelf_getehdr");
 		goto err;
 	}
 
@@ -407,6 +403,12 @@ void elf_close(struct elf *elf)
 	struct symbol *sym, *tmpsym;
 	struct rela *rela, *tmprela;
 
+	if (elf->elf)
+		elf_end(elf->elf);
+
+	if (elf->fd > 0)
+		close(elf->fd);
+
 	list_for_each_entry_safe(sec, tmpsec, &elf->sections, list) {
 		list_for_each_entry_safe(sym, tmpsym, &sec->symbol_list, list) {
 			list_del(&sym->list);
@@ -421,11 +423,6 @@ void elf_close(struct elf *elf)
 		list_del(&sec->list);
 		free(sec);
 	}
-	if (elf->name)
-		free(elf->name);
-	if (elf->fd > 0)
-		close(elf->fd);
-	if (elf->elf)
-		elf_end(elf->elf);
+
 	free(elf);
 }
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index 731973e..343968b 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -37,10 +37,9 @@ struct section {
 	DECLARE_HASHTABLE(rela_hash, 16);
 	struct section *base, *rela;
 	struct symbol *sym;
-	Elf_Data *elf_data;
+	Elf_Data *data;
 	char *name;
 	int idx;
-	unsigned long data;
 	unsigned int len;
 };
 
@@ -86,6 +85,7 @@ struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
 struct symbol *find_containing_func(struct section *sec, unsigned long offset);
 void elf_close(struct elf *elf);
 
-
+#define for_each_sec(file, sec)						\
+	list_for_each_entry(sec, &file->elf->sections, list)
 
 #endif /* _OBJTOOL_ELF_H */
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index bff8abb..84f001d 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -91,16 +91,16 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
 	alt->jump_or_nop = entry->jump_or_nop;
 
 	if (alt->group) {
-		alt->orig_len = *(unsigned char *)(sec->data + offset +
+		alt->orig_len = *(unsigned char *)(sec->data->d_buf + offset +
 						   entry->orig_len);
-		alt->new_len = *(unsigned char *)(sec->data + offset +
+		alt->new_len = *(unsigned char *)(sec->data->d_buf + offset +
 						  entry->new_len);
 	}
 
 	if (entry->feature) {
 		unsigned short feature;
 
-		feature = *(unsigned short *)(sec->data + offset +
+		feature = *(unsigned short *)(sec->data->d_buf + offset +
 					      entry->feature);
 
 		/*
diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h
index ac7e075..afd9f7a 100644
--- a/tools/objtool/warn.h
+++ b/tools/objtool/warn.h
@@ -18,6 +18,13 @@
 #ifndef _WARN_H
 #define _WARN_H
 
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "elf.h"
+
 extern const char *objname;
 
 static inline char *offstr(struct section *sec, unsigned long offset)
@@ -57,4 +64,7 @@ static inline char *offstr(struct section *sec, unsigned long offset)
 	free(_str);					\
 })
 
+#define WARN_ELF(format, ...)				\
+	WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
+
 #endif /* _WARN_H */
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
index b0b3007..4b6cdbf 100644
--- a/tools/perf/Documentation/intel-pt.txt
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -108,6 +108,9 @@
 script export-to-postgresql.py for more details, and to script
 call-graph-from-postgresql.py for an example of using the database.
 
+There is also script intel-pt-events.py which provides an example of how to
+unpack the raw data for power events and PTWRITE.
+
 As mentioned above, it is easy to capture too much data.  One way to limit the
 data captured is to use 'snapshot' mode which is explained further below.
 Refer to 'new snapshot option' and 'Intel PT modes of operation' further below.
@@ -364,6 +367,42 @@
 
 		CYC packets are not requested by default.
 
+pt		Specifies pass-through which enables the 'branch' config term.
+
+		The default config selects 'pt' if it is available, so a user will
+		never need to specify this term.
+
+branch		Enable branch tracing.  Branch tracing is enabled by default so to
+		disable branch tracing use 'branch=0'.
+
+		The default config selects 'branch' if it is available.
+
+ptw		Enable PTWRITE packets which are produced when a ptwrite instruction
+		is executed.
+
+		Support for this feature is indicated by:
+
+			/sys/bus/event_source/devices/intel_pt/caps/ptwrite
+
+		which contains "1" if the feature is supported and
+		"0" otherwise.
+
+fup_on_ptw	Enable a FUP packet to follow the PTWRITE packet.  The FUP packet
+		provides the address of the ptwrite instruction.  In the absence of
+		fup_on_ptw, the decoder will use the address of the previous branch
+		if branch tracing is enabled, otherwise the address will be zero.
+		Note that fup_on_ptw will work even when branch tracing is disabled.
+
+pwr_evt		Enable power events.  The power events provide information about
+		changes to the CPU C-state.
+
+		Support for this feature is indicated by:
+
+			/sys/bus/event_source/devices/intel_pt/caps/power_event_trace
+
+		which contains "1" if the feature is supported and
+		"0" otherwise.
+
 
 new snapshot option
 -------------------
@@ -674,13 +713,15 @@
 
 which, in turn, is the same as
 
-	--itrace=ibxe
+	--itrace=ibxwpe
 
 The letters are:
 
 	i	synthesize "instructions" events
 	b	synthesize "branches" events
 	x	synthesize "transactions" events
+	w	synthesize "ptwrite" events
+	p	synthesize "power" events
 	c	synthesize branches events (calls only)
 	r	synthesize branches events (returns only)
 	e	synthesize tracing error events
@@ -699,7 +740,40 @@
 'flags' field can be used in perf script to determine whether the event is a
 tranasaction start, commit or abort.
 
-Error events are new.  They show where the decoder lost the trace.  Error events
+Note that "instructions", "branches" and "transactions" events depend on code
+flow packets which can be disabled by using the config term "branch=0".  Refer
+to the config terms section above.
+
+"ptwrite" events record the payload of the ptwrite instruction and whether
+"fup_on_ptw" was used.  "ptwrite" events depend on PTWRITE packets which are
+recorded only if the "ptw" config term was used.  Refer to the config terms
+section above.  perf script "synth" field displays "ptwrite" information like
+this: "ip: 0 payload: 0x123456789abcdef0"  where "ip" is 1 if "fup_on_ptw" was
+used.
+
+"Power" events correspond to power event packets and CBR (core-to-bus ratio)
+packets.  While CBR packets are always recorded when tracing is enabled, power
+event packets are recorded only if the "pwr_evt" config term was used.  Refer to
+the config terms section above.  The power events record information about
+C-state changes, whereas CBR is indicative of CPU frequency.  perf script
+"event,synth" fields display information like this:
+	cbr:  cbr: 22 freq: 2189 MHz (200%)
+	mwait:  hints: 0x60 extensions: 0x1
+	pwre:  hw: 0 cstate: 2 sub-cstate: 0
+	exstop:  ip: 1
+	pwrx:  deepest cstate: 2 last cstate: 2 wake reason: 0x4
+Where:
+	"cbr" includes the frequency and the percentage of maximum non-turbo
+	"mwait" shows mwait hints and extensions
+	"pwre" shows C-state transitions (to a C-state deeper than C0) and
+	whether	initiated by hardware
+	"exstop" indicates execution stopped and whether the IP was recorded
+	exactly,
+	"pwrx" indicates return to C0
+For more details refer to the Intel 64 and IA-32 Architectures Software
+Developer Manuals.
+
+Error events show where the decoder lost the trace.  Error events
 are quite important.  Users must know if what they are seeing is a complete
 picture or not.
 
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
index e2a4c5e..a3abe04 100644
--- a/tools/perf/Documentation/itrace.txt
+++ b/tools/perf/Documentation/itrace.txt
@@ -3,13 +3,15 @@
 		c	synthesize branches events (calls only)
 		r	synthesize branches events (returns only)
 		x	synthesize transactions events
+		w	synthesize ptwrite events
+		p	synthesize power events
 		e	synthesize error events
 		d	create a debug log
 		g	synthesize a call chain (use with i or x)
 		l	synthesize last branch entries (use with i or x)
 		s       skip initial number of events
 
-	The default is all events i.e. the same as --itrace=ibxe
+	The default is all events i.e. the same as --itrace=ibxwpe
 
 	In addition, the period (default 100000) for instructions events
 	can be specified in units of:
@@ -26,8 +28,8 @@
 	Also the number of last branch entries (default 64, max. 1024) for
 	instructions or transactions events can be specified.
 
-	It is also possible to skip events generated (instructions, branches, transactions)
-	at the beginning. This is useful to ignore initialization code.
+	It is also possible to skip events generated (instructions, branches, transactions,
+	ptwrite, power) at the beginning. This is useful to ignore initialization code.
 
 	--itrace=i0nss1000000
 
diff --git a/tools/perf/Documentation/perf-ftrace.txt b/tools/perf/Documentation/perf-ftrace.txt
index 6e6a8b2..721a447 100644
--- a/tools/perf/Documentation/perf-ftrace.txt
+++ b/tools/perf/Documentation/perf-ftrace.txt
@@ -48,6 +48,39 @@
 	Ranges of CPUs are specified with -: 0-2.
 	Default is to trace on all online CPUs.
 
+-T::
+--trace-funcs=::
+	Only trace functions given by the argument.  Multiple functions
+	can be given by using this option more than once.  The function
+	argument also can be a glob pattern.  It will be passed to
+	'set_ftrace_filter' in tracefs.
+
+-N::
+--notrace-funcs=::
+	Do not trace functions given by the argument.  Like -T option,
+	this can be used more than once to specify multiple functions
+	(or glob patterns).  It will be passed to 'set_ftrace_notrace'
+	in tracefs.
+
+-G::
+--graph-funcs=::
+	Set graph filter on the given function (or a glob pattern).
+	This is useful for the function_graph tracer only and enables
+	tracing for functions executed from the given function.
+	This can be used more than once to specify multiple functions.
+	It will be passed to 'set_graph_function' in tracefs.
+
+-g::
+--nograph-funcs=::
+	Set graph notrace filter on the given function (or a glob pattern).
+	Like -G option, this is useful for the function_graph tracer only
+	and disables tracing for function executed from the given function.
+	This can be used more than once to specify multiple functions.
+	It will be passed to 'set_graph_notrace' in tracefs.
+
+-D::
+--graph-depth=::
+	Set max depth for function graph tracer to follow
 
 SEE ALSO
 --------
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index e6c9902..165c2b1 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -240,9 +240,13 @@
  or
  ./perf probe --add='schedule:12 cpu'
 
- this will add one or more probes which has the name start with "schedule".
+Add one or more probes which has the name start with "schedule".
 
- Add probes on lines in schedule() function which calls update_rq_clock().
+ ./perf probe schedule*
+ or
+ ./perf probe --add='schedule*'
+
+Add probes on lines in schedule() function which calls update_rq_clock().
 
  ./perf probe 'schedule;update_rq_clock*'
  or
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index dfbb506..142606c 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -39,7 +39,7 @@
 When perf script is invoked using a trace script, a user-defined
 'handler function' is called for each event in the trace.  If there's
 no handler function defined for a given event type, the event is
-ignored (or passed to a 'trace_handled' function, see below) and the
+ignored (or passed to a 'trace_unhandled' function, see below) and the
 next event is processed.
 
 Most of the event's field values are passed as arguments to the
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 54acba2..51ec2d2 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -149,10 +149,8 @@
 		print "id=%d, args=%s\n" % \
 		(id, args),
 
-def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
-		common_pid, common_comm):
-		print_header(event_name, common_cpu, common_secs, common_nsecs,
-		common_pid, common_comm)
+def trace_unhandled(event_name, context, event_fields_dict):
+		print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
 
 def print_header(event_name, cpu, secs, nsecs, pid, comm):
 	print "%-20s %5u %05u.%09u %8u %-20s " % \
@@ -321,7 +319,7 @@
 process can be generalized to any tracepoint or set of tracepoints
 you're interested in - basically find the tracepoint(s) you're
 interested in by looking at the list of available events shown by
-'perf list' and/or look in /sys/kernel/debug/tracing events for
+'perf list' and/or look in /sys/kernel/debug/tracing/events/ for
 detailed event and field info, record the corresponding trace data
 using 'perf record', passing it the list of interesting events,
 generate a skeleton script using 'perf script -g python' and modify the
@@ -334,7 +332,7 @@
 scripts listed by the 'perf script -l' command e.g.:
 
 ----
-root@tropicana:~# perf script -l
+# perf script -l
 List of available trace scripts:
   wakeup-latency                       system-wide min/max/avg wakeup latency
   rw-by-file <comm>                    r/w activity for a program, by file
@@ -383,8 +381,6 @@
 
 ----
 # ls -al kernel-source/tools/perf/scripts/python
-
-root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
 total 32
 drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
 drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
@@ -399,7 +395,7 @@
 should show a new entry for your script:
 
 ----
-root@tropicana:~# perf script -l
+# perf script -l
 List of available trace scripts:
   wakeup-latency                       system-wide min/max/avg wakeup latency
   rw-by-file <comm>                    r/w activity for a program, by file
@@ -437,7 +433,7 @@
 When perf script is invoked using a trace script, a user-defined
 'handler function' is called for each event in the trace.  If there's
 no handler function defined for a given event type, the event is
-ignored (or passed to a 'trace_handled' function, see below) and the
+ignored (or passed to a 'trace_unhandled' function, see below) and the
 next event is processed.
 
 Most of the event's field values are passed as arguments to the
@@ -532,7 +528,7 @@
 gives scripts a chance to do setup tasks:
 
 ----
-def trace_begin:
+def trace_begin():
     pass
 ----
 
@@ -541,7 +537,7 @@
  as display results:
 
 ----
-def trace_end:
+def trace_end():
     pass
 ----
 
@@ -550,8 +546,7 @@
  of common arguments are passed into it:
 
 ----
-def trace_unhandled(event_name, context, common_cpu, common_secs,
-        common_nsecs, common_pid, common_comm):
+def trace_unhandled(event_name, context, event_fields_dict):
     pass
 ----
 
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 3517e20..5ee8796 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -116,8 +116,9 @@
 --fields::
         Comma separated list of fields to print. Options are:
         comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
-        srcline, period, iregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
-        callindent, insn, insnlen. Field list can be prepended with the type, trace, sw or hw,
+        srcline, period, iregs, brstack, brstacksym, flags, bpf-output, brstackinsn, brstackoff,
+        callindent, insn, insnlen, synth.
+        Field list can be prepended with the type, trace, sw or hw,
         to indicate to which event type the field list applies.
         e.g., -F sw:comm,tid,time,ip,sym  and -F trace:time,cpu,trace
 
@@ -130,6 +131,14 @@
 	i.e., the specified fields apply to all event types if the type string
 	is not given.
 
+	In addition to overriding fields, it is also possible to add or remove
+	fields from the defaults. For example
+
+		-F -cpu,+insn
+
+	removes the cpu field and adds the insn field. Adding/removing fields
+	cannot be mixed with normal overriding.
+
 	The arguments are processed in the order received. A later usage can
 	reset a prior request. e.g.:
 
@@ -185,6 +194,9 @@
 	instruction bytes and the instruction length of the current
 	instruction.
 
+	The synth field is used by synthesized events which may be created when
+	Instruction Trace decoding.
+
 	Finally, a user may not set fields to none for all event types.
 	i.e., -F "" is not allowed.
 
@@ -203,6 +215,8 @@
 	is printed. This is the full execution path leading to the sample. This is only supported when the
 	sample was recorded with perf record -b or -j any.
 
+	The brstackoff field will print an offset into a specific dso/binary.
+
 -k::
 --vmlinux=<file>::
         vmlinux pathname
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index bd0e441..6980763 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -239,6 +239,20 @@
 --no-merge::
 Do not merge results from same PMUs.
 
+--smi-cost::
+Measure SMI cost if msr/aperf/ and msr/smi/ events are supported.
+
+During the measurement, the /sys/device/cpu/freeze_on_smi will be set to
+freeze core counters on SMI.
+The aperf counter will not be effected by the setting.
+The cost of SMI can be measured by (aperf - unhalted core cycles).
+
+In practice, the percentages of SMI cycles is very useful for performance
+oriented analysis. --metric_only will be applied by default.
+The output is SMI cycles%, equals to (aperf - unhalted core cycles) / aperf
+
+Users who wants to get the actual value can apply --no-metric-only.
+
 EXAMPLES
 --------
 
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 8354d04..bdf0e87 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -19,18 +19,18 @@
 
 include $(srctree)/tools/scripts/Makefile.arch
 
-$(call detected_var,ARCH)
+$(call detected_var,SRCARCH)
 
 NO_PERF_REGS := 1
 
 # Additional ARCH settings for ppc
-ifeq ($(ARCH),powerpc)
+ifeq ($(SRCARCH),powerpc)
   NO_PERF_REGS := 0
   LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
 endif
 
 # Additional ARCH settings for x86
-ifeq ($(ARCH),x86)
+ifeq ($(SRCARCH),x86)
   $(call detected,CONFIG_X86)
   ifeq (${IS_64_BIT}, 1)
     CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
@@ -43,12 +43,12 @@
   NO_PERF_REGS := 0
 endif
 
-ifeq ($(ARCH),arm)
+ifeq ($(SRCARCH),arm)
   NO_PERF_REGS := 0
   LIBUNWIND_LIBS = -lunwind -lunwind-arm
 endif
 
-ifeq ($(ARCH),arm64)
+ifeq ($(SRCARCH),arm64)
   NO_PERF_REGS := 0
   LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
 endif
@@ -61,7 +61,7 @@
 # Disable it on all other architectures in case libdw unwind
 # support is detected in system. Add supported architectures
 # to the check.
-ifneq ($(ARCH),$(filter $(ARCH),x86 arm))
+ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm powerpc))
   NO_LIBDW_DWARF_UNWIND := 1
 endif
 
@@ -115,9 +115,9 @@
 FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
 FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
 
-FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
+FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
 # include ARCH specific config
--include $(src-perf)/arch/$(ARCH)/Makefile
+-include $(src-perf)/arch/$(SRCARCH)/Makefile
 
 ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
   CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
@@ -228,12 +228,12 @@
 endif
 
 INC_FLAGS += -I$(src-perf)/util/include
-INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include
+INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
 INC_FLAGS += -I$(srctree)/tools/include/uapi
 INC_FLAGS += -I$(srctree)/tools/include/
-INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi
-INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/
-INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/
+INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi
+INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/
+INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/
 
 # $(obj-perf)      for generated common-cmds.h
 # $(obj-perf)/util for generated bison/flex headers
@@ -355,7 +355,7 @@
 
   ifndef NO_DWARF
     ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
-      msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
+      msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
       NO_DWARF := 1
     else
       CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
@@ -380,7 +380,7 @@
         CFLAGS += -DHAVE_BPF_PROLOGUE
         $(call detected,CONFIG_BPF_PROLOGUE)
       else
-        msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset());
+        msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset());
       endif
     else
       msg := $(warning DWARF support is off, BPF prologue is disabled);
@@ -406,7 +406,7 @@
   endif
 endif
 
-ifeq ($(ARCH),powerpc)
+ifeq ($(SRCARCH),powerpc)
   ifndef NO_DWARF
     CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
   endif
@@ -487,7 +487,7 @@
 endif
 
 ifndef NO_LOCAL_LIBUNWIND
-  ifeq ($(ARCH),$(filter $(ARCH),arm arm64))
+  ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
     $(call feature_check,libunwind-debug-frame)
     ifneq ($(feature-libunwind-debug-frame), 1)
       msg := $(warning No debug_frame support found in libunwind);
@@ -740,7 +740,7 @@
       NO_PERF_READ_VDSO32 := 1
     endif
   endif
-  ifneq ($(ARCH), x86)
+  ifneq ($(SRCARCH), x86)
     NO_PERF_READ_VDSOX32 := 1
   endif
   ifndef NO_PERF_READ_VDSOX32
@@ -769,7 +769,7 @@
 endif
 
 ifndef NO_AUXTRACE
-  ifeq ($(ARCH),x86)
+  ifeq ($(SRCARCH),x86)
     ifeq ($(feature-get_cpuid), 0)
       msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
       NO_AUXTRACE := 1
@@ -872,7 +872,7 @@
 ETC_PERFCONFIG = etc/perfconfig
 endif
 ifndef lib
-ifeq ($(ARCH)$(IS_64_BIT), x861)
+ifeq ($(SRCARCH)$(IS_64_BIT), x861)
 lib = lib64
 else
 lib = lib
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 79fe31f..5008f51 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -226,7 +226,7 @@
 
 ifeq ($(config),0)
 include $(srctree)/tools/scripts/Makefile.arch
--include arch/$(ARCH)/Makefile
+-include arch/$(SRCARCH)/Makefile
 endif
 
 # The FEATURE_DUMP_EXPORT holds location of the actual
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index 109eb75..d9b6af83 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,2 @@
 libperf-y += common.o
-libperf-y += $(ARCH)/
+libperf-y += $(SRCARCH)/
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 29361d9..7ce3d1a 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -17,6 +17,7 @@
 
 #include <api/fs/fs.h>
 #include <linux/bitops.h>
+#include <linux/compiler.h>
 #include <linux/coresight-pmu.h>
 #include <linux/kernel.h>
 #include <linux/log2.h>
@@ -202,19 +203,18 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
 		pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
 			  opts->auxtrace_snapshot_size);
 
-	if (cs_etm_evsel) {
-		/*
-		 * To obtain the auxtrace buffer file descriptor, the auxtrace
-		 * event must come first.
-		 */
-		perf_evlist__to_front(evlist, cs_etm_evsel);
-		/*
-		 * In the case of per-cpu mmaps, we need the CPU on the
-		 * AUX event.
-		 */
-		if (!cpu_map__empty(cpus))
-			perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
-	}
+	/*
+	 * To obtain the auxtrace buffer file descriptor, the auxtrace
+	 * event must come first.
+	 */
+	perf_evlist__to_front(evlist, cs_etm_evsel);
+
+	/*
+	 * In the case of per-cpu mmaps, we need the CPU on the
+	 * AUX event.
+	 */
+	if (!cpu_map__empty(cpus))
+		perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
 
 	/* Add dummy event to keep tracking */
 	if (opts->full_auxtrace) {
@@ -583,8 +583,7 @@ static FILE *cs_device__open_file(const char *name)
 
 }
 
-static __attribute__((format(printf, 2, 3)))
-int cs_device__print_file(const char *name, const char *fmt, ...)
+static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
 {
 	va_list args;
 	FILE *file;
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 837067f..6b40e9f 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -26,6 +26,7 @@ const char *const arm64_triplets[] = {
 
 const char *const powerpc_triplets[] = {
 	"powerpc-unknown-linux-gnu-",
+	"powerpc-linux-gnu-",
 	"powerpc64-unknown-linux-gnu-",
 	"powerpc64-linux-gnu-",
 	"powerpc64le-linux-gnu-",
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index 90ad64b..2e659531 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -5,4 +5,6 @@
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
+
 libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/powerpc/util/unwind-libdw.c b/tools/perf/arch/powerpc/util/unwind-libdw.c
new file mode 100644
index 0000000..3a24b3c4
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/unwind-libdw.c
@@ -0,0 +1,73 @@
+#include <elfutils/libdwfl.h>
+#include "../../util/unwind-libdw.h"
+#include "../../util/perf_regs.h"
+#include "../../util/event.h"
+
+/* See backends/ppc_initreg.c and backends/ppc_regs.c in elfutils.  */
+static const int special_regs[3][2] = {
+	{ 65, PERF_REG_POWERPC_LINK },
+	{ 101, PERF_REG_POWERPC_XER },
+	{ 109, PERF_REG_POWERPC_CTR },
+};
+
+bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
+{
+	struct unwind_info *ui = arg;
+	struct regs_dump *user_regs = &ui->sample->user_regs;
+	Dwarf_Word dwarf_regs[32], dwarf_nip;
+	size_t i;
+
+#define REG(r) ({						\
+	Dwarf_Word val = 0;					\
+	perf_reg_value(&val, user_regs, PERF_REG_POWERPC_##r);	\
+	val;							\
+})
+
+	dwarf_regs[0]  = REG(R0);
+	dwarf_regs[1]  = REG(R1);
+	dwarf_regs[2]  = REG(R2);
+	dwarf_regs[3]  = REG(R3);
+	dwarf_regs[4]  = REG(R4);
+	dwarf_regs[5]  = REG(R5);
+	dwarf_regs[6]  = REG(R6);
+	dwarf_regs[7]  = REG(R7);
+	dwarf_regs[8]  = REG(R8);
+	dwarf_regs[9]  = REG(R9);
+	dwarf_regs[10] = REG(R10);
+	dwarf_regs[11] = REG(R11);
+	dwarf_regs[12] = REG(R12);
+	dwarf_regs[13] = REG(R13);
+	dwarf_regs[14] = REG(R14);
+	dwarf_regs[15] = REG(R15);
+	dwarf_regs[16] = REG(R16);
+	dwarf_regs[17] = REG(R17);
+	dwarf_regs[18] = REG(R18);
+	dwarf_regs[19] = REG(R19);
+	dwarf_regs[20] = REG(R20);
+	dwarf_regs[21] = REG(R21);
+	dwarf_regs[22] = REG(R22);
+	dwarf_regs[23] = REG(R23);
+	dwarf_regs[24] = REG(R24);
+	dwarf_regs[25] = REG(R25);
+	dwarf_regs[26] = REG(R26);
+	dwarf_regs[27] = REG(R27);
+	dwarf_regs[28] = REG(R28);
+	dwarf_regs[29] = REG(R29);
+	dwarf_regs[30] = REG(R30);
+	dwarf_regs[31] = REG(R31);
+	if (!dwfl_thread_state_registers(thread, 0, 32, dwarf_regs))
+		return false;
+
+	dwarf_nip = REG(NIP);
+	dwfl_thread_state_register_pc(thread, dwarf_nip);
+	for (i = 0; i < ARRAY_SIZE(special_regs); i++) {
+		Dwarf_Word val = 0;
+		perf_reg_value(&val, user_regs, special_regs[i][1]);
+		if (!dwfl_thread_state_registers(thread,
+						 special_regs[i][0], 1,
+						 &val))
+			return false;
+	}
+
+	return true;
+}
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-32.c b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
index 0f196ee..3cbf6fa 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-32.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
@@ -1664,3 +1664,15 @@
 "0f c7 1d 78 56 34 12 \txrstors 0x12345678",},
 {{0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
 "0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%eax,%ecx,8)",},
+{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
+"f3 0f ae 20          \tptwritel (%eax)",},
+{{0xf3, 0x0f, 0xae, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f ae 25 78 56 34 12 \tptwritel 0x12345678",},
+{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%eax,%ecx,8)",},
+{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
+"f3 0f ae 20          \tptwritel (%eax)",},
+{{0xf3, 0x0f, 0xae, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
+"f3 0f ae 25 78 56 34 12 \tptwritel 0x12345678",},
+{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%eax,%ecx,8)",},
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-64.c b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
index af25bc8..aa512fa 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-64.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
@@ -1696,3 +1696,33 @@
 "0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%rax,%rcx,8)",},
 {{0x41, 0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
 "41 0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%r8,%rcx,8)",},
+{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
+"f3 0f ae 20          \tptwritel (%rax)",},
+{{0xf3, 0x41, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
+"f3 41 0f ae 20       \tptwritel (%r8)",},
+{{0xf3, 0x0f, 0xae, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f ae 24 25 78 56 34 12 \tptwritel 0x12345678",},
+{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%rax,%rcx,8)",},
+{{0xf3, 0x41, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f3 41 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%r8,%rcx,8)",},
+{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
+"f3 0f ae 20          \tptwritel (%rax)",},
+{{0xf3, 0x41, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
+"f3 41 0f ae 20       \tptwritel (%r8)",},
+{{0xf3, 0x0f, 0xae, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f ae 24 25 78 56 34 12 \tptwritel 0x12345678",},
+{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
+"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%rax,%rcx,8)",},
+{{0xf3, 0x41, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f3 41 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%r8,%rcx,8)",},
+{{0xf3, 0x48, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
+"f3 48 0f ae 20       \tptwriteq (%rax)",},
+{{0xf3, 0x49, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
+"f3 49 0f ae 20       \tptwriteq (%r8)",},
+{{0xf3, 0x48, 0x0f, 0xae, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f3 48 0f ae 24 25 78 56 34 12 \tptwriteq 0x12345678",},
+{{0xf3, 0x48, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f3 48 0f ae a4 c8 78 56 34 12 \tptwriteq 0x12345678(%rax,%rcx,8)",},
+{{0xf3, 0x49, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"f3 49 0f ae a4 c8 78 56 34 12 \tptwriteq 0x12345678(%r8,%rcx,8)",},
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-src.c b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
index 979487d..6cdb65d 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-src.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
@@ -1343,6 +1343,26 @@ int main(void)
 	asm volatile("xrstors 0x12345678(%rax,%rcx,8)");
 	asm volatile("xrstors 0x12345678(%r8,%rcx,8)");
 
+	/* ptwrite */
+
+	asm volatile("ptwrite (%rax)");
+	asm volatile("ptwrite (%r8)");
+	asm volatile("ptwrite (0x12345678)");
+	asm volatile("ptwrite 0x12345678(%rax,%rcx,8)");
+	asm volatile("ptwrite 0x12345678(%r8,%rcx,8)");
+
+	asm volatile("ptwritel (%rax)");
+	asm volatile("ptwritel (%r8)");
+	asm volatile("ptwritel (0x12345678)");
+	asm volatile("ptwritel 0x12345678(%rax,%rcx,8)");
+	asm volatile("ptwritel 0x12345678(%r8,%rcx,8)");
+
+	asm volatile("ptwriteq (%rax)");
+	asm volatile("ptwriteq (%r8)");
+	asm volatile("ptwriteq (0x12345678)");
+	asm volatile("ptwriteq 0x12345678(%rax,%rcx,8)");
+	asm volatile("ptwriteq 0x12345678(%r8,%rcx,8)");
+
 #else  /* #ifdef __x86_64__ */
 
 	/* bound r32, mem (same op code as EVEX prefix) */
@@ -2653,6 +2673,16 @@ int main(void)
 	asm volatile("xrstors (0x12345678)");
 	asm volatile("xrstors 0x12345678(%eax,%ecx,8)");
 
+	/* ptwrite */
+
+	asm volatile("ptwrite (%eax)");
+	asm volatile("ptwrite (0x12345678)");
+	asm volatile("ptwrite 0x12345678(%eax,%ecx,8)");
+
+	asm volatile("ptwritel (%eax)");
+	asm volatile("ptwritel (0x12345678)");
+	asm volatile("ptwritel 0x12345678(%eax,%ecx,8)");
+
 #endif /* #ifndef __x86_64__ */
 
 	/* Following line is a marker for the awk script - do not change */
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index af2bce7..781df40 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -35,10 +35,6 @@
 #define KiB_MASK(x) (KiB(x) - 1)
 #define MiB_MASK(x) (MiB(x) - 1)
 
-#define INTEL_BTS_DFLT_SAMPLE_SIZE	KiB(4)
-
-#define INTEL_BTS_MAX_SAMPLE_SIZE	KiB(60)
-
 struct intel_bts_snapshot_ref {
 	void	*ref_buf;
 	size_t	ref_offset;
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index f630de0..9535be5 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -40,10 +40,6 @@
 #define KiB_MASK(x) (KiB(x) - 1)
 #define MiB_MASK(x) (MiB(x) - 1)
 
-#define INTEL_PT_DEFAULT_SAMPLE_SIZE	KiB(4)
-
-#define INTEL_PT_MAX_SAMPLE_SIZE	KiB(60)
-
 #define INTEL_PT_PSB_PERIOD_NEAR	256
 
 struct intel_pt_snapshot_ref {
@@ -196,6 +192,7 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
 	int psb_cyc, psb_periods, psb_period;
 	int pos = 0;
 	u64 config;
+	char c;
 
 	pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
 
@@ -229,6 +226,10 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
 		}
 	}
 
+	if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
+	    perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1)
+		pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
+
 	pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
 
 	intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 27de0c8..469d65b 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -700,7 +700,7 @@ static inline uint32_t lfsr_32(uint32_t lfsr)
  * kernel (KSM, zero page, etc.) cannot optimize away RAM
  * accesses:
  */
-static inline u64 access_data(u64 *data __attribute__((unused)), u64 val)
+static inline u64 access_data(u64 *data, u64 val)
 {
 	if (g->p.data_reads)
 		val += *data;
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 620a467..475999e 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -1725,10 +1725,10 @@ static int c2c_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
 				tok; tok = strtok_r(NULL, ", ", &tmp)) {	\
 			ret = _fn(hpp_list, tok);				\
 			if (ret == -EINVAL) {					\
-				error("Invalid --fields key: `%s'", tok);	\
+				pr_err("Invalid --fields key: `%s'", tok);	\
 				break;						\
 			} else if (ret == -ESRCH) {				\
-				error("Unknown --fields key: `%s'", tok);	\
+				pr_err("Unknown --fields key: `%s'", tok);	\
 				break;						\
 			}							\
 		}								\
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index 80668fa..ece4558 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -156,7 +156,7 @@ static int parse_config_arg(char *arg, char **var, char **value)
 
 int cmd_config(int argc, const char **argv)
 {
-	int i, ret = 0;
+	int i, ret = -1;
 	struct perf_config_set *set;
 	char *user_config = mkpath("%s/.perfconfig", getenv("HOME"));
 	const char *config_filename;
@@ -186,10 +186,8 @@ int cmd_config(int argc, const char **argv)
 	 * because of reinitializing with options config file location.
 	 */
 	set = perf_config_set__new();
-	if (!set) {
-		ret = -1;
+	if (!set)
 		goto out_err;
-	}
 
 	switch (actions) {
 	case ACTION_LIST:
@@ -197,41 +195,54 @@ int cmd_config(int argc, const char **argv)
 			pr_err("Error: takes no arguments\n");
 			parse_options_usage(config_usage, config_options, "l", 1);
 		} else {
-			ret = show_config(set);
-			if (ret < 0)
+			if (show_config(set) < 0) {
 				pr_err("Nothing configured, "
 				       "please check your %s \n", config_filename);
+				goto out_err;
+			}
 		}
 		break;
 	default:
-		if (argc) {
-			for (i = 0; argv[i]; i++) {
-				char *var, *value;
-				char *arg = strdup(argv[i]);
-
-				if (!arg) {
-					pr_err("%s: strdup failed\n", __func__);
-					ret = -1;
-					break;
-				}
-
-				if (parse_config_arg(arg, &var, &value) < 0) {
-					free(arg);
-					ret = -1;
-					break;
-				}
-
-				if (value == NULL)
-					ret = show_spec_config(set, var);
-				else
-					ret = set_config(set, config_filename, var, value);
-				free(arg);
-			}
-		} else
+		if (!argc) {
 			usage_with_options(config_usage, config_options);
+			break;
+		}
+
+		for (i = 0; argv[i]; i++) {
+			char *var, *value;
+			char *arg = strdup(argv[i]);
+
+			if (!arg) {
+				pr_err("%s: strdup failed\n", __func__);
+				goto out_err;
+			}
+
+			if (parse_config_arg(arg, &var, &value) < 0) {
+				free(arg);
+				goto out_err;
+			}
+
+			if (value == NULL) {
+				if (show_spec_config(set, var) < 0) {
+					pr_err("%s is not configured: %s\n",
+					       var, config_filename);
+					free(arg);
+					goto out_err;
+				}
+			} else {
+				if (set_config(set, config_filename, var, value) < 0) {
+					pr_err("Failed to set '%s=%s' on %s\n",
+					       var, value, config_filename);
+					free(arg);
+					goto out_err;
+				}
+			}
+			free(arg);
+		}
 	}
 
-	perf_config_set__delete(set);
+	ret = 0;
 out_err:
+	perf_config_set__delete(set);
 	return ret;
 }
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index eec5df8..0cd4cf6 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -1302,7 +1302,10 @@ static int diff__config(const char *var, const char *value,
 			void *cb __maybe_unused)
 {
 	if (!strcmp(var, "diff.order")) {
-		sort_compute = perf_config_int(var, value);
+		int ret;
+		if (perf_config_int(&ret, var, value) < 0)
+			return -1;
+		sort_compute = ret;
 		return 0;
 	}
 	if (!strcmp(var, "diff.compute")) {
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index 9e0b35c..dd26c62 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -28,9 +28,19 @@
 #define DEFAULT_TRACER  "function_graph"
 
 struct perf_ftrace {
-	struct perf_evlist *evlist;
-	struct target target;
-	const char *tracer;
+	struct perf_evlist	*evlist;
+	struct target		target;
+	const char		*tracer;
+	struct list_head	filters;
+	struct list_head	notrace;
+	struct list_head	graph_funcs;
+	struct list_head	nograph_funcs;
+	int			graph_depth;
+};
+
+struct filter_entry {
+	struct list_head	list;
+	char			name[];
 };
 
 static bool done;
@@ -61,6 +71,7 @@ static int __write_tracing_file(const char *name, const char *val, bool append)
 	int fd, ret = -1;
 	ssize_t size = strlen(val);
 	int flags = O_WRONLY;
+	char errbuf[512];
 
 	file = get_tracing_file(name);
 	if (!file) {
@@ -75,14 +86,16 @@ static int __write_tracing_file(const char *name, const char *val, bool append)
 
 	fd = open(file, flags);
 	if (fd < 0) {
-		pr_debug("cannot open tracing file: %s\n", name);
+		pr_debug("cannot open tracing file: %s: %s\n",
+			 name, str_error_r(errno, errbuf, sizeof(errbuf)));
 		goto out;
 	}
 
 	if (write(fd, val, size) == size)
 		ret = 0;
 	else
-		pr_debug("write '%s' to tracing/%s failed\n", val, name);
+		pr_debug("write '%s' to tracing/%s failed: %s\n",
+			 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
 
 	close(fd);
 out:
@@ -101,6 +114,7 @@ static int append_tracing_file(const char *name, const char *val)
 }
 
 static int reset_tracing_cpu(void);
+static void reset_tracing_filters(void);
 
 static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
 {
@@ -116,6 +130,10 @@ static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
 	if (reset_tracing_cpu() < 0)
 		return -1;
 
+	if (write_tracing_file("max_graph_depth", "0") < 0)
+		return -1;
+
+	reset_tracing_filters();
 	return 0;
 }
 
@@ -181,6 +199,68 @@ static int reset_tracing_cpu(void)
 	return ret;
 }
 
+static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
+{
+	struct filter_entry *pos;
+
+	list_for_each_entry(pos, funcs, list) {
+		if (append_tracing_file(filter_file, pos->name) < 0)
+			return -1;
+	}
+
+	return 0;
+}
+
+static int set_tracing_filters(struct perf_ftrace *ftrace)
+{
+	int ret;
+
+	ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
+	if (ret < 0)
+		return ret;
+
+	ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
+	if (ret < 0)
+		return ret;
+
+	ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
+	if (ret < 0)
+		return ret;
+
+	/* old kernels do not have this filter */
+	__set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
+
+	return ret;
+}
+
+static void reset_tracing_filters(void)
+{
+	write_tracing_file("set_ftrace_filter", " ");
+	write_tracing_file("set_ftrace_notrace", " ");
+	write_tracing_file("set_graph_function", " ");
+	write_tracing_file("set_graph_notrace", " ");
+}
+
+static int set_tracing_depth(struct perf_ftrace *ftrace)
+{
+	char buf[16];
+
+	if (ftrace->graph_depth == 0)
+		return 0;
+
+	if (ftrace->graph_depth < 0) {
+		pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
+		return -1;
+	}
+
+	snprintf(buf, sizeof(buf), "%d", ftrace->graph_depth);
+
+	if (write_tracing_file("max_graph_depth", buf) < 0)
+		return -1;
+
+	return 0;
+}
+
 static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
 {
 	char *trace_file;
@@ -223,11 +303,23 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
 		goto out_reset;
 	}
 
+	if (set_tracing_filters(ftrace) < 0) {
+		pr_err("failed to set tracing filters\n");
+		goto out_reset;
+	}
+
+	if (set_tracing_depth(ftrace) < 0) {
+		pr_err("failed to set graph depth\n");
+		goto out_reset;
+	}
+
 	if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
 		pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
 		goto out_reset;
 	}
 
+	setup_pager();
+
 	trace_file = get_tracing_file("trace_pipe");
 	if (!trace_file) {
 		pr_err("failed to open trace_pipe\n");
@@ -251,8 +343,6 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
 		goto out_close_fd;
 	}
 
-	setup_pager();
-
 	perf_evlist__start_workload(ftrace->evlist);
 
 	while (!done) {
@@ -307,6 +397,32 @@ static int perf_ftrace_config(const char *var, const char *value, void *cb)
 	return -1;
 }
 
+static int parse_filter_func(const struct option *opt, const char *str,
+			     int unset __maybe_unused)
+{
+	struct list_head *head = opt->value;
+	struct filter_entry *entry;
+
+	entry = malloc(sizeof(*entry) + strlen(str) + 1);
+	if (entry == NULL)
+		return -ENOMEM;
+
+	strcpy(entry->name, str);
+	list_add_tail(&entry->list, head);
+
+	return 0;
+}
+
+static void delete_filter_func(struct list_head *head)
+{
+	struct filter_entry *pos, *tmp;
+
+	list_for_each_entry_safe(pos, tmp, head, list) {
+		list_del(&pos->list);
+		free(pos);
+	}
+}
+
 int cmd_ftrace(int argc, const char **argv)
 {
 	int ret;
@@ -330,9 +446,24 @@ int cmd_ftrace(int argc, const char **argv)
 		    "system-wide collection from all CPUs"),
 	OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
 		    "list of cpus to monitor"),
+	OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
+		     "trace given functions only", parse_filter_func),
+	OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
+		     "do not trace given functions", parse_filter_func),
+	OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
+		     "Set graph filter on given functions", parse_filter_func),
+	OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
+		     "Set nograph filter on given functions", parse_filter_func),
+	OPT_INTEGER('D', "graph-depth", &ftrace.graph_depth,
+		    "Max depth for function graph tracer"),
 	OPT_END()
 	};
 
+	INIT_LIST_HEAD(&ftrace.filters);
+	INIT_LIST_HEAD(&ftrace.notrace);
+	INIT_LIST_HEAD(&ftrace.graph_funcs);
+	INIT_LIST_HEAD(&ftrace.nograph_funcs);
+
 	ret = perf_config(perf_ftrace_config, &ftrace);
 	if (ret < 0)
 		return -1;
@@ -348,12 +479,14 @@ int cmd_ftrace(int argc, const char **argv)
 
 		target__strerror(&ftrace.target, ret, errbuf, 512);
 		pr_err("%s\n", errbuf);
-		return -EINVAL;
+		goto out_delete_filters;
 	}
 
 	ftrace.evlist = perf_evlist__new();
-	if (ftrace.evlist == NULL)
-		return -ENOMEM;
+	if (ftrace.evlist == NULL) {
+		ret = -ENOMEM;
+		goto out_delete_filters;
+	}
 
 	ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
 	if (ret < 0)
@@ -364,5 +497,11 @@ int cmd_ftrace(int argc, const char **argv)
 out_delete_evlist:
 	perf_evlist__delete(ftrace.evlist);
 
+out_delete_filters:
+	delete_filter_func(&ftrace.filters);
+	delete_filter_func(&ftrace.notrace);
+	delete_filter_func(&ftrace.graph_funcs);
+	delete_filter_func(&ftrace.nograph_funcs);
+
 	return ret;
 }
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 492f8e1..530a7f2 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -108,10 +108,14 @@ static int check_emacsclient_version(void)
 	return ret;
 }
 
-static void exec_woman_emacs(const char *path, const char *page)
+static void exec_failed(const char *cmd)
 {
 	char sbuf[STRERR_BUFSIZE];
+	pr_warning("failed to exec '%s': %s", cmd, str_error_r(errno, sbuf, sizeof(sbuf)));
+}
 
+static void exec_woman_emacs(const char *path, const char *page)
+{
 	if (!check_emacsclient_version()) {
 		/* This works only with emacsclient version >= 22. */
 		char *man_page;
@@ -122,8 +126,7 @@ static void exec_woman_emacs(const char *path, const char *page)
 			execlp(path, "emacsclient", "-e", man_page, NULL);
 			free(man_page);
 		}
-		warning("failed to exec '%s': %s", path,
-			str_error_r(errno, sbuf, sizeof(sbuf)));
+		exec_failed(path);
 	}
 }
 
@@ -134,7 +137,6 @@ static void exec_man_konqueror(const char *path, const char *page)
 	if (display && *display) {
 		char *man_page;
 		const char *filename = "kfmclient";
-		char sbuf[STRERR_BUFSIZE];
 
 		/* It's simpler to launch konqueror using kfmclient. */
 		if (path) {
@@ -155,33 +157,27 @@ static void exec_man_konqueror(const char *path, const char *page)
 			execlp(path, filename, "newTab", man_page, NULL);
 			free(man_page);
 		}
-		warning("failed to exec '%s': %s", path,
-			str_error_r(errno, sbuf, sizeof(sbuf)));
+		exec_failed(path);
 	}
 }
 
 static void exec_man_man(const char *path, const char *page)
 {
-	char sbuf[STRERR_BUFSIZE];
-
 	if (!path)
 		path = "man";
 	execlp(path, "man", page, NULL);
-	warning("failed to exec '%s': %s", path,
-		str_error_r(errno, sbuf, sizeof(sbuf)));
+	exec_failed(path);
 }
 
 static void exec_man_cmd(const char *cmd, const char *page)
 {
-	char sbuf[STRERR_BUFSIZE];
 	char *shell_cmd;
 
 	if (asprintf(&shell_cmd, "%s %s", cmd, page) > 0) {
 		execl("/bin/sh", "sh", "-c", shell_cmd, NULL);
 		free(shell_cmd);
 	}
-	warning("failed to exec '%s': %s", cmd,
-		str_error_r(errno, sbuf, sizeof(sbuf)));
+	exec_failed(cmd);
 }
 
 static void add_man_viewer(const char *name)
@@ -214,6 +210,12 @@ static void do_add_man_viewer_info(const char *name,
 	man_viewer_info_list = new;
 }
 
+static void unsupported_man_viewer(const char *name, const char *var)
+{
+	pr_warning("'%s': path for unsupported man viewer.\n"
+		   "Please consider using 'man.<tool>.%s' instead.", name, var);
+}
+
 static int add_man_viewer_path(const char *name,
 			       size_t len,
 			       const char *value)
@@ -221,9 +223,7 @@ static int add_man_viewer_path(const char *name,
 	if (supported_man_viewer(name, len))
 		do_add_man_viewer_info(name, len, value);
 	else
-		warning("'%s': path for unsupported man viewer.\n"
-			"Please consider using 'man.<tool>.cmd' instead.",
-			name);
+		unsupported_man_viewer(name, "cmd");
 
 	return 0;
 }
@@ -233,9 +233,7 @@ static int add_man_viewer_cmd(const char *name,
 			      const char *value)
 {
 	if (supported_man_viewer(name, len))
-		warning("'%s': cmd for supported man viewer.\n"
-			"Please consider using 'man.<tool>.path' instead.",
-			name);
+		unsupported_man_viewer(name, "path");
 	else
 		do_add_man_viewer_info(name, len, value);
 
@@ -247,8 +245,10 @@ static int add_man_viewer_info(const char *var, const char *value)
 	const char *name = var + 4;
 	const char *subkey = strrchr(name, '.');
 
-	if (!subkey)
-		return error("Config with no key for man viewer: %s", name);
+	if (!subkey) {
+		pr_err("Config with no key for man viewer: %s", name);
+		return -1;
+	}
 
 	if (!strcmp(subkey, ".path")) {
 		if (!value)
@@ -261,7 +261,7 @@ static int add_man_viewer_info(const char *var, const char *value)
 		return add_man_viewer_cmd(name, subkey - name, value);
 	}
 
-	warning("'%s': unsupported man viewer sub key.", subkey);
+	pr_warning("'%s': unsupported man viewer sub key.", subkey);
 	return 0;
 }
 
@@ -332,7 +332,7 @@ static void setup_man_path(void)
 		setenv("MANPATH", new_path, 1);
 		free(new_path);
 	} else {
-		error("Unable to setup man path");
+		pr_err("Unable to setup man path");
 	}
 }
 
@@ -349,7 +349,7 @@ static void exec_viewer(const char *name, const char *page)
 	else if (info)
 		exec_man_cmd(info, page);
 	else
-		warning("'%s': unknown man viewer.", name);
+		pr_warning("'%s': unknown man viewer.", name);
 }
 
 static int show_man_page(const char *perf_cmd)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 9409c94..0a8a1c4 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -1715,7 +1715,7 @@ static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
 		if (!tok)
 			break;
 		if (slab_sort_dimension__add(tok, sort_list) < 0) {
-			error("Unknown slab --sort key: '%s'", tok);
+			pr_err("Unknown slab --sort key: '%s'", tok);
 			free(str);
 			return -1;
 		}
@@ -1741,7 +1741,7 @@ static int setup_page_sorting(struct list_head *sort_list, const char *arg)
 		if (!tok)
 			break;
 		if (page_sort_dimension__add(tok, sort_list) < 0) {
-			error("Unknown page --sort key: '%s'", tok);
+			pr_err("Unknown page --sort key: '%s'", tok);
 			free(str);
 			return -1;
 		}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index ee7d0a8..17a14bc 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -453,7 +453,7 @@ static int record__open(struct record *rec)
 	}
 
 	if (perf_evlist__apply_filters(evlist, &pos)) {
-		error("failed to set filter \"%s\" on event %s with %d (%s)\n",
+		pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
 			pos->filter, perf_evsel__name(pos), errno,
 			str_error_r(errno, msg, sizeof(msg)));
 		rc = -1;
@@ -461,7 +461,7 @@ static int record__open(struct record *rec)
 	}
 
 	if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
-		error("failed to set config \"%s\" on event %s with %d (%s)\n",
+		pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
 		      err_term->val.drv_cfg, perf_evsel__name(pos), errno,
 		      str_error_r(errno, msg, sizeof(msg)));
 		rc = -1;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 22478ff..79a33eb 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -94,10 +94,9 @@ static int report__config(const char *var, const char *value, void *cb)
 		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
 		return 0;
 	}
-	if (!strcmp(var, "report.queue-size")) {
-		rep->queue_size = perf_config_u64(var, value);
-		return 0;
-	}
+	if (!strcmp(var, "report.queue-size"))
+		return perf_config_u64(&rep->queue_size, var, value);
+
 	if (!strcmp(var, "report.sort_order")) {
 		default_sort_order = strdup(value);
 		return 0;
@@ -558,6 +557,7 @@ static int __cmd_report(struct report *rep)
 			ui__error("failed to set cpu bitmap\n");
 			return ret;
 		}
+		session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
 	}
 
 	if (rep->show_threads) {
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 39996c5..322b4de 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2066,7 +2066,7 @@ static void save_task_callchain(struct perf_sched *sched,
 	if (thread__resolve_callchain(thread, cursor, evsel, sample,
 				      NULL, NULL, sched->max_stack + 2) != 0) {
 		if (verbose > 0)
-			error("Failed to resolve callchain. Skipping\n");
+			pr_err("Failed to resolve callchain. Skipping\n");
 
 		return;
 	}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 4761b0d..83cdc0a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -85,6 +85,8 @@ enum perf_output_field {
 	PERF_OUTPUT_INSN	    = 1U << 21,
 	PERF_OUTPUT_INSNLEN	    = 1U << 22,
 	PERF_OUTPUT_BRSTACKINSN	    = 1U << 23,
+	PERF_OUTPUT_BRSTACKOFF	    = 1U << 24,
+	PERF_OUTPUT_SYNTH           = 1U << 25,
 };
 
 struct output_option {
@@ -115,6 +117,13 @@ struct output_option {
 	{.str = "insn", .field = PERF_OUTPUT_INSN},
 	{.str = "insnlen", .field = PERF_OUTPUT_INSNLEN},
 	{.str = "brstackinsn", .field = PERF_OUTPUT_BRSTACKINSN},
+	{.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF},
+	{.str = "synth", .field = PERF_OUTPUT_SYNTH},
+};
+
+enum {
+	OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
+	OUTPUT_TYPE_MAX
 };
 
 /* default set to maintain compatibility with current format */
@@ -124,7 +133,7 @@ static struct {
 	unsigned int print_ip_opts;
 	u64 fields;
 	u64 invalid_fields;
-} output[PERF_TYPE_MAX] = {
+} output[OUTPUT_TYPE_MAX] = {
 
 	[PERF_TYPE_HARDWARE] = {
 		.user_set = false,
@@ -182,12 +191,44 @@ static struct {
 
 		.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
 	},
+
+	[OUTPUT_TYPE_SYNTH] = {
+		.user_set = false,
+
+		.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+			      PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+			      PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+			      PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
+			      PERF_OUTPUT_SYNTH,
+
+		.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+	},
 };
 
+static inline int output_type(unsigned int type)
+{
+	switch (type) {
+	case PERF_TYPE_SYNTH:
+		return OUTPUT_TYPE_SYNTH;
+	default:
+		return type;
+	}
+}
+
+static inline unsigned int attr_type(unsigned int type)
+{
+	switch (type) {
+	case OUTPUT_TYPE_SYNTH:
+		return PERF_TYPE_SYNTH;
+	default:
+		return type;
+	}
+}
+
 static bool output_set_by_user(void)
 {
 	int j;
-	for (j = 0; j < PERF_TYPE_MAX; ++j) {
+	for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
 		if (output[j].user_set)
 			return true;
 	}
@@ -208,7 +249,7 @@ static const char *output_field2str(enum perf_output_field field)
 	return str;
 }
 
-#define PRINT_FIELD(x)  (output[attr->type].fields & PERF_OUTPUT_##x)
+#define PRINT_FIELD(x)  (output[output_type(attr->type)].fields & PERF_OUTPUT_##x)
 
 static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
 				      u64 sample_type, const char *sample_msg,
@@ -216,7 +257,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
 				      bool allow_user_set)
 {
 	struct perf_event_attr *attr = &evsel->attr;
-	int type = attr->type;
+	int type = output_type(attr->type);
 	const char *evname;
 
 	if (attr->sample_type & sample_type)
@@ -298,10 +339,10 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
 		       "selected.\n");
 		return -EINVAL;
 	}
-	if (PRINT_FIELD(DSO) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
-		pr_err("Display of DSO requested but neither sample IP nor "
-			   "sample address\nis selected. Hence, no addresses to convert "
-		       "to DSO.\n");
+	if (PRINT_FIELD(DSO) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR) &&
+	    !PRINT_FIELD(BRSTACK) && !PRINT_FIELD(BRSTACKSYM) && !PRINT_FIELD(BRSTACKOFF)) {
+		pr_err("Display of DSO requested but no address to convert.  Select\n"
+		       "sample IP, sample address, brstack, brstacksym, or brstackoff.\n");
 		return -EINVAL;
 	}
 	if (PRINT_FIELD(SRCLINE) && !PRINT_FIELD(IP)) {
@@ -346,7 +387,7 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
 
 static void set_print_ip_opts(struct perf_event_attr *attr)
 {
-	unsigned int type = attr->type;
+	unsigned int type = output_type(attr->type);
 
 	output[type].print_ip_opts = 0;
 	if (PRINT_FIELD(IP))
@@ -374,16 +415,17 @@ static int perf_session__check_output_opt(struct perf_session *session)
 	unsigned int j;
 	struct perf_evsel *evsel;
 
-	for (j = 0; j < PERF_TYPE_MAX; ++j) {
-		evsel = perf_session__find_first_evtype(session, j);
+	for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
+		evsel = perf_session__find_first_evtype(session, attr_type(j));
 
 		/*
 		 * even if fields is set to 0 (ie., show nothing) event must
 		 * exist if user explicitly includes it on the command line
 		 */
-		if (!evsel && output[j].user_set && !output[j].wildcard_set) {
+		if (!evsel && output[j].user_set && !output[j].wildcard_set &&
+		    j != OUTPUT_TYPE_SYNTH) {
 			pr_err("%s events do not exist. "
-			       "Remove corresponding -f option to proceed.\n",
+			       "Remove corresponding -F option to proceed.\n",
 			       event_type(j));
 			return -1;
 		}
@@ -514,18 +556,43 @@ mispred_str(struct branch_entry *br)
 	return br->flags.predicted ? 'P' : 'M';
 }
 
-static void print_sample_brstack(struct perf_sample *sample)
+static void print_sample_brstack(struct perf_sample *sample,
+				 struct thread *thread,
+				 struct perf_event_attr *attr)
 {
 	struct branch_stack *br = sample->branch_stack;
-	u64 i;
+	struct addr_location alf, alt;
+	u64 i, from, to;
 
 	if (!(br && br->nr))
 		return;
 
 	for (i = 0; i < br->nr; i++) {
-		printf(" 0x%"PRIx64"/0x%"PRIx64"/%c/%c/%c/%d ",
-			br->entries[i].from,
-			br->entries[i].to,
+		from = br->entries[i].from;
+		to   = br->entries[i].to;
+
+		if (PRINT_FIELD(DSO)) {
+			memset(&alf, 0, sizeof(alf));
+			memset(&alt, 0, sizeof(alt));
+			thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf);
+			thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
+		}
+
+		printf("0x%"PRIx64, from);
+		if (PRINT_FIELD(DSO)) {
+			printf("(");
+			map__fprintf_dsoname(alf.map, stdout);
+			printf(")");
+		}
+
+		printf("/0x%"PRIx64, to);
+		if (PRINT_FIELD(DSO)) {
+			printf("(");
+			map__fprintf_dsoname(alt.map, stdout);
+			printf(")");
+		}
+
+		printf("/%c/%c/%c/%d ",
 			mispred_str( br->entries + i),
 			br->entries[i].flags.in_tx? 'X' : '-',
 			br->entries[i].flags.abort? 'A' : '-',
@@ -534,7 +601,8 @@ static void print_sample_brstack(struct perf_sample *sample)
 }
 
 static void print_sample_brstacksym(struct perf_sample *sample,
-				    struct thread *thread)
+				    struct thread *thread,
+				    struct perf_event_attr *attr)
 {
 	struct branch_stack *br = sample->branch_stack;
 	struct addr_location alf, alt;
@@ -559,8 +627,18 @@ static void print_sample_brstacksym(struct perf_sample *sample,
 			alt.sym = map__find_symbol(alt.map, alt.addr);
 
 		symbol__fprintf_symname_offs(alf.sym, &alf, stdout);
+		if (PRINT_FIELD(DSO)) {
+			printf("(");
+			map__fprintf_dsoname(alf.map, stdout);
+			printf(")");
+		}
 		putchar('/');
 		symbol__fprintf_symname_offs(alt.sym, &alt, stdout);
+		if (PRINT_FIELD(DSO)) {
+			printf("(");
+			map__fprintf_dsoname(alt.map, stdout);
+			printf(")");
+		}
 		printf("/%c/%c/%c/%d ",
 			mispred_str( br->entries + i),
 			br->entries[i].flags.in_tx? 'X' : '-',
@@ -569,6 +647,51 @@ static void print_sample_brstacksym(struct perf_sample *sample,
 	}
 }
 
+static void print_sample_brstackoff(struct perf_sample *sample,
+				    struct thread *thread,
+				    struct perf_event_attr *attr)
+{
+	struct branch_stack *br = sample->branch_stack;
+	struct addr_location alf, alt;
+	u64 i, from, to;
+
+	if (!(br && br->nr))
+		return;
+
+	for (i = 0; i < br->nr; i++) {
+
+		memset(&alf, 0, sizeof(alf));
+		memset(&alt, 0, sizeof(alt));
+		from = br->entries[i].from;
+		to   = br->entries[i].to;
+
+		thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf);
+		if (alf.map && !alf.map->dso->adjust_symbols)
+			from = map__map_ip(alf.map, from);
+
+		thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
+		if (alt.map && !alt.map->dso->adjust_symbols)
+			to = map__map_ip(alt.map, to);
+
+		printf("0x%"PRIx64, from);
+		if (PRINT_FIELD(DSO)) {
+			printf("(");
+			map__fprintf_dsoname(alf.map, stdout);
+			printf(")");
+		}
+		printf("/0x%"PRIx64, to);
+		if (PRINT_FIELD(DSO)) {
+			printf("(");
+			map__fprintf_dsoname(alt.map, stdout);
+			printf(")");
+		}
+		printf("/%c/%c/%c/%d ",
+			mispred_str(br->entries + i),
+			br->entries[i].flags.in_tx ? 'X' : '-',
+			br->entries[i].flags.abort ? 'A' : '-',
+			br->entries[i].flags.cycles);
+	}
+}
 #define MAXBB 16384UL
 
 static int grab_bb(u8 *buffer, u64 start, u64 end,
@@ -906,6 +1029,7 @@ static void print_sample_bts(struct perf_sample *sample,
 			     struct machine *machine)
 {
 	struct perf_event_attr *attr = &evsel->attr;
+	unsigned int type = output_type(attr->type);
 	bool print_srcline_last = false;
 
 	if (PRINT_FIELD(CALLINDENT))
@@ -913,7 +1037,7 @@ static void print_sample_bts(struct perf_sample *sample,
 
 	/* print branch_from information */
 	if (PRINT_FIELD(IP)) {
-		unsigned int print_opts = output[attr->type].print_ip_opts;
+		unsigned int print_opts = output[type].print_ip_opts;
 		struct callchain_cursor *cursor = NULL;
 
 		if (symbol_conf.use_callchain && sample->callchain &&
@@ -936,7 +1060,7 @@ static void print_sample_bts(struct perf_sample *sample,
 	/* print branch_to information */
 	if (PRINT_FIELD(ADDR) ||
 	    ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
-	     !output[attr->type].user_set)) {
+	     !output[type].user_set)) {
 		printf(" => ");
 		print_sample_addr(sample, thread, attr);
 	}
@@ -1079,6 +1203,127 @@ static void print_sample_bpf_output(struct perf_sample *sample)
 		       (char *)(sample->raw_data));
 }
 
+static void print_sample_spacing(int len, int spacing)
+{
+	if (len > 0 && len < spacing)
+		printf("%*s", spacing - len, "");
+}
+
+static void print_sample_pt_spacing(int len)
+{
+	print_sample_spacing(len, 34);
+}
+
+static void print_sample_synth_ptwrite(struct perf_sample *sample)
+{
+	struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
+	int len;
+
+	if (perf_sample__bad_synth_size(sample, *data))
+		return;
+
+	len = printf(" IP: %u payload: %#" PRIx64 " ",
+		     data->ip, le64_to_cpu(data->payload));
+	print_sample_pt_spacing(len);
+}
+
+static void print_sample_synth_mwait(struct perf_sample *sample)
+{
+	struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
+	int len;
+
+	if (perf_sample__bad_synth_size(sample, *data))
+		return;
+
+	len = printf(" hints: %#x extensions: %#x ",
+		     data->hints, data->extensions);
+	print_sample_pt_spacing(len);
+}
+
+static void print_sample_synth_pwre(struct perf_sample *sample)
+{
+	struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
+	int len;
+
+	if (perf_sample__bad_synth_size(sample, *data))
+		return;
+
+	len = printf(" hw: %u cstate: %u sub-cstate: %u ",
+		     data->hw, data->cstate, data->subcstate);
+	print_sample_pt_spacing(len);
+}
+
+static void print_sample_synth_exstop(struct perf_sample *sample)
+{
+	struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
+	int len;
+
+	if (perf_sample__bad_synth_size(sample, *data))
+		return;
+
+	len = printf(" IP: %u ", data->ip);
+	print_sample_pt_spacing(len);
+}
+
+static void print_sample_synth_pwrx(struct perf_sample *sample)
+{
+	struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
+	int len;
+
+	if (perf_sample__bad_synth_size(sample, *data))
+		return;
+
+	len = printf(" deepest cstate: %u last cstate: %u wake reason: %#x ",
+		     data->deepest_cstate, data->last_cstate,
+		     data->wake_reason);
+	print_sample_pt_spacing(len);
+}
+
+static void print_sample_synth_cbr(struct perf_sample *sample)
+{
+	struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
+	unsigned int percent, freq;
+	int len;
+
+	if (perf_sample__bad_synth_size(sample, *data))
+		return;
+
+	freq = (le32_to_cpu(data->freq) + 500) / 1000;
+	len = printf(" cbr: %2u freq: %4u MHz ", data->cbr, freq);
+	if (data->max_nonturbo) {
+		percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10;
+		len += printf("(%3u%%) ", percent);
+	}
+	print_sample_pt_spacing(len);
+}
+
+static void print_sample_synth(struct perf_sample *sample,
+			       struct perf_evsel *evsel)
+{
+	switch (evsel->attr.config) {
+	case PERF_SYNTH_INTEL_PTWRITE:
+		print_sample_synth_ptwrite(sample);
+		break;
+	case PERF_SYNTH_INTEL_MWAIT:
+		print_sample_synth_mwait(sample);
+		break;
+	case PERF_SYNTH_INTEL_PWRE:
+		print_sample_synth_pwre(sample);
+		break;
+	case PERF_SYNTH_INTEL_EXSTOP:
+		print_sample_synth_exstop(sample);
+		break;
+	case PERF_SYNTH_INTEL_PWRX:
+		print_sample_synth_pwrx(sample);
+		break;
+	case PERF_SYNTH_INTEL_CBR:
+		print_sample_synth_cbr(sample);
+		break;
+	default:
+		break;
+	}
+}
+
 struct perf_script {
 	struct perf_tool	tool;
 	struct perf_session	*session;
@@ -1132,8 +1377,9 @@ static void process_event(struct perf_script *script,
 {
 	struct thread *thread = al->thread;
 	struct perf_event_attr *attr = &evsel->attr;
+	unsigned int type = output_type(attr->type);
 
-	if (output[attr->type].fields == 0)
+	if (output[type].fields == 0)
 		return;
 
 	print_sample_start(sample, thread, evsel);
@@ -1162,6 +1408,10 @@ static void process_event(struct perf_script *script,
 	if (PRINT_FIELD(TRACE))
 		event_format__print(evsel->tp_format, sample->cpu,
 				    sample->raw_data, sample->raw_size);
+
+	if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
+		print_sample_synth(sample, evsel);
+
 	if (PRINT_FIELD(ADDR))
 		print_sample_addr(sample, thread, attr);
 
@@ -1180,16 +1430,18 @@ static void process_event(struct perf_script *script,
 			cursor = &callchain_cursor;
 
 		putchar(cursor ? '\n' : ' ');
-		sample__fprintf_sym(sample, al, 0, output[attr->type].print_ip_opts, cursor, stdout);
+		sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, stdout);
 	}
 
 	if (PRINT_FIELD(IREGS))
 		print_sample_iregs(sample, attr);
 
 	if (PRINT_FIELD(BRSTACK))
-		print_sample_brstack(sample);
+		print_sample_brstack(sample, thread, attr);
 	else if (PRINT_FIELD(BRSTACKSYM))
-		print_sample_brstacksym(sample, thread);
+		print_sample_brstacksym(sample, thread, attr);
+	else if (PRINT_FIELD(BRSTACKOFF))
+		print_sample_brstackoff(sample, thread, attr);
 
 	if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
 		print_sample_bpf_output(sample);
@@ -1325,7 +1577,8 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
 	evlist = *pevlist;
 	evsel = perf_evlist__last(*pevlist);
 
-	if (evsel->attr.type >= PERF_TYPE_MAX)
+	if (evsel->attr.type >= PERF_TYPE_MAX &&
+	    evsel->attr.type != PERF_TYPE_SYNTH)
 		return 0;
 
 	evlist__for_each_entry(evlist, pos) {
@@ -1727,6 +1980,7 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
 	int rc = 0;
 	char *str = strdup(arg);
 	int type = -1;
+	enum { DEFAULT, SET, ADD, REMOVE } change = DEFAULT;
 
 	if (!str)
 		return -ENOMEM;
@@ -1749,6 +2003,8 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
 			type = PERF_TYPE_RAW;
 		else if (!strcmp(str, "break"))
 			type = PERF_TYPE_BREAKPOINT;
+		else if (!strcmp(str, "synth"))
+			type = OUTPUT_TYPE_SYNTH;
 		else {
 			fprintf(stderr, "Invalid event type in field string.\n");
 			rc = -EINVAL;
@@ -1772,23 +2028,44 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
 			goto out;
 		}
 
+		/* Don't override defaults for +- */
+		if (strchr(str, '+') || strchr(str, '-'))
+			goto parse;
+
 		if (output_set_by_user())
 			pr_warning("Overriding previous field request for all events.\n");
 
-		for (j = 0; j < PERF_TYPE_MAX; ++j) {
+		for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
 			output[j].fields = 0;
 			output[j].user_set = true;
 			output[j].wildcard_set = true;
 		}
 	}
 
+parse:
 	for (tok = strtok_r(tok, ",", &strtok_saveptr); tok; tok = strtok_r(NULL, ",", &strtok_saveptr)) {
+		if (*tok == '+') {
+			if (change == SET)
+				goto out_badmix;
+			change = ADD;
+			tok++;
+		} else if (*tok == '-') {
+			if (change == SET)
+				goto out_badmix;
+			change = REMOVE;
+			tok++;
+		} else {
+			if (change != SET && change != DEFAULT)
+				goto out_badmix;
+			change = SET;
+		}
+
 		for (i = 0; i < imax; ++i) {
 			if (strcmp(tok, all_output_options[i].str) == 0)
 				break;
 		}
 		if (i == imax && strcmp(tok, "flags") == 0) {
-			print_flags = true;
+			print_flags = change == REMOVE ? false : true;
 			continue;
 		}
 		if (i == imax) {
@@ -1801,12 +2078,16 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
 			/* add user option to all events types for
 			 * which it is valid
 			 */
-			for (j = 0; j < PERF_TYPE_MAX; ++j) {
+			for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
 				if (output[j].invalid_fields & all_output_options[i].field) {
 					pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
 						   all_output_options[i].str, event_type(j));
-				} else
-					output[j].fields |= all_output_options[i].field;
+				} else {
+					if (change == REMOVE)
+						output[j].fields &= ~all_output_options[i].field;
+					else
+						output[j].fields |= all_output_options[i].field;
+				}
 			}
 		} else {
 			if (output[type].invalid_fields & all_output_options[i].field) {
@@ -1826,7 +2107,11 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
 				 "Events will not be displayed.\n", event_type(type));
 		}
 	}
+	goto out;
 
+out_badmix:
+	fprintf(stderr, "Cannot mix +-field with overridden fields\n");
+	rc = -EINVAL;
 out:
 	free(str);
 	return rc;
@@ -2444,10 +2729,11 @@ int cmd_script(int argc, const char **argv)
 		     symbol__config_symfs),
 	OPT_CALLBACK('F', "fields", NULL, "str",
 		     "comma separated output fields prepend with 'type:'. "
-		     "Valid types: hw,sw,trace,raw. "
+		     "+field to add and -field to remove."
+		     "Valid types: hw,sw,trace,raw,synth. "
 		     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
 		     "addr,symoff,period,iregs,brstack,brstacksym,flags,"
-		     "bpf-output,callindent,insn,insnlen,brstackinsn",
+		     "bpf-output,callindent,insn,insnlen,brstackinsn,synth",
 		     parse_output_fields),
 	OPT_BOOLEAN('a', "all-cpus", &system_wide,
 		    "system-wide collection from all CPUs"),
@@ -2706,6 +2992,7 @@ int cmd_script(int argc, const char **argv)
 		err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
 		if (err < 0)
 			goto out_delete;
+		itrace_synth_opts.cpu_bitmap = cpu_bitmap;
 	}
 
 	if (!no_callchain)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a935b50..48ac53b 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -86,6 +86,7 @@
 #define DEFAULT_SEPARATOR	" "
 #define CNTR_NOT_SUPPORTED	"<not supported>"
 #define CNTR_NOT_COUNTED	"<not counted>"
+#define FREEZE_ON_SMI_PATH	"devices/cpu/freeze_on_smi"
 
 static void print_counters(struct timespec *ts, int argc, const char **argv);
 
@@ -122,6 +123,14 @@ static const char * topdown_attrs[] = {
 	NULL,
 };
 
+static const char *smi_cost_attrs = {
+	"{"
+	"msr/aperf/,"
+	"msr/smi/,"
+	"cycles"
+	"}"
+};
+
 static struct perf_evlist	*evsel_list;
 
 static struct target target = {
@@ -137,6 +146,8 @@ static bool			null_run			=  false;
 static int			detailed_run			=  0;
 static bool			transaction_run;
 static bool			topdown_run			= false;
+static bool			smi_cost			= false;
+static bool			smi_reset			= false;
 static bool			big_num				=  true;
 static int			big_num_opt			=  -1;
 static const char		*csv_sep			= NULL;
@@ -625,14 +636,14 @@ static int __run_perf_stat(int argc, const char **argv)
 	}
 
 	if (perf_evlist__apply_filters(evsel_list, &counter)) {
-		error("failed to set filter \"%s\" on event %s with %d (%s)\n",
+		pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
 			counter->filter, perf_evsel__name(counter), errno,
 			str_error_r(errno, msg, sizeof(msg)));
 		return -1;
 	}
 
 	if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
-		error("failed to set config \"%s\" on event %s with %d (%s)\n",
+		pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
 		      err_term->val.drv_cfg, perf_evsel__name(counter), errno,
 		      str_error_r(errno, msg, sizeof(msg)));
 		return -1;
@@ -1578,6 +1589,7 @@ static void print_header(int argc, const char **argv)
 static void print_footer(void)
 {
 	FILE *output = stat_config.output;
+	int n;
 
 	if (!null_run)
 		fprintf(output, "\n");
@@ -1590,7 +1602,9 @@ static void print_footer(void)
 	}
 	fprintf(output, "\n\n");
 
-	if (print_free_counters_hint)
+	if (print_free_counters_hint &&
+	    sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
+	    n > 0)
 		fprintf(output,
 "Some events weren't counted. Try disabling the NMI watchdog:\n"
 "	echo 0 > /proc/sys/kernel/nmi_watchdog\n"
@@ -1779,6 +1793,8 @@ static const struct option stat_options[] = {
 			"Only print computed metrics. No raw values", enable_metric_only),
 	OPT_BOOLEAN(0, "topdown", &topdown_run,
 			"measure topdown level 1 statistics"),
+	OPT_BOOLEAN(0, "smi-cost", &smi_cost,
+			"measure SMI cost"),
 	OPT_END()
 };
 
@@ -2157,6 +2173,39 @@ static int add_default_attributes(void)
 		return 0;
 	}
 
+	if (smi_cost) {
+		int smi;
+
+		if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
+			fprintf(stderr, "freeze_on_smi is not supported.\n");
+			return -1;
+		}
+
+		if (!smi) {
+			if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
+				fprintf(stderr, "Failed to set freeze_on_smi.\n");
+				return -1;
+			}
+			smi_reset = true;
+		}
+
+		if (pmu_have_event("msr", "aperf") &&
+		    pmu_have_event("msr", "smi")) {
+			if (!force_metric_only)
+				metric_only = true;
+			err = parse_events(evsel_list, smi_cost_attrs, NULL);
+		} else {
+			fprintf(stderr, "To measure SMI cost, it needs "
+				"msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
+			return -1;
+		}
+		if (err) {
+			fprintf(stderr, "Cannot set up SMI cost events\n");
+			return -1;
+		}
+		return 0;
+	}
+
 	if (topdown_run) {
 		char *str = NULL;
 		bool warn = false;
@@ -2739,6 +2788,9 @@ int cmd_stat(int argc, const char **argv)
 	perf_stat__exit_aggr_mode();
 	perf_evlist__free_stats(evsel_list);
 out:
+	if (smi_cost && smi_reset)
+		sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
+
 	perf_evlist__delete(evsel_list);
 	return status;
 }
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 10b6362..6052376 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -134,7 +134,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
 		return err;
 	}
 
-	err = symbol__disassemble(sym, map, NULL, 0);
+	err = symbol__disassemble(sym, map, NULL, 0, NULL);
 	if (err == 0) {
 out_assign:
 		top->sym_filter_entry = he;
@@ -958,7 +958,7 @@ static int __cmd_top(struct perf_top *top)
 
 	ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
 	if (ret) {
-		error("failed to set config \"%s\" on event %s with %d (%s)\n",
+		pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
 			err_term->val.drv_cfg, perf_evsel__name(pos), errno,
 			str_error_r(errno, msg, sizeof(msg)));
 		goto out_delete;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index d014350..4b2a5d2 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -681,6 +681,10 @@ static struct syscall_fmt {
 	{ .name	    = "mlockall",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
 	{ .name	    = "mmap",	    .hexret = true,
+/* The standard mmap maps to old_mmap on s390x */
+#if defined(__s390x__)
+	.alias = "old_mmap",
+#endif
 	  .arg_scnprintf = { [0] = SCA_HEX,	  /* addr */
 			     [2] = SCA_MMAP_PROT, /* prot */
 			     [3] = SCA_MMAP_FLAGS, /* flags */ }, },
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index e9651a9..cf36de7 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -304,7 +304,7 @@ jvmti_close(void *agent)
 	FILE *fp = agent;
 
 	if (!fp) {
-		warnx("jvmti: incalid fd in close_agent");
+		warnx("jvmti: invalid fd in close_agent");
 		return -1;
 	}
 
diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h
index bedf5d0..c53a41f 100644
--- a/tools/perf/jvmti/jvmti_agent.h
+++ b/tools/perf/jvmti/jvmti_agent.h
@@ -5,8 +5,6 @@
 #include <stdint.h>
 #include <jvmti.h>
 
-#define __unused __attribute__((unused))
-
 #if defined(__cplusplus)
 extern "C" {
 #endif
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index 5612641..6d71090 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -1,3 +1,4 @@
+#include <linux/compiler.h>
 #include <sys/types.h>
 #include <stdio.h>
 #include <string.h>
@@ -238,7 +239,7 @@ code_generated_cb(jvmtiEnv *jvmti,
 }
 
 JNIEXPORT jint JNICALL
-Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __unused)
+Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __maybe_unused)
 {
 	jvmtiEventCallbacks cb;
 	jvmtiCapabilities caps1;
@@ -313,7 +314,7 @@ Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __unused)
 }
 
 JNIEXPORT void JNICALL
-Agent_OnUnload(JavaVM *jvm __unused)
+Agent_OnUnload(JavaVM *jvm __maybe_unused)
 {
 	int ret;
 
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 9213a12..999a4e8 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -2,7 +2,7 @@
 
 jevents-y	+= json.o jsmn.o jevents.o
 pmu-events-y	+= pmu-events.o
-JDIR		=  pmu-events/arch/$(ARCH)
+JDIR		=  pmu-events/arch/$(SRCARCH)
 JSON		=  $(shell [ -d $(JDIR) ] &&				\
 			find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
 #
@@ -10,4 +10,4 @@
 # directory and create tables in pmu-events.c.
 #
 $(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
-	$(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
+	$(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index baa073f..bd0aabb 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -48,10 +48,6 @@
 #include "json.h"
 #include "jevents.h"
 
-#ifndef __maybe_unused
-#define __maybe_unused                  __attribute__((unused))
-#endif
-
 int verbose;
 char *prog;
 
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-record b/tools/perf/scripts/python/bin/intel-pt-events-record
new file mode 100644
index 0000000..10fe2b6
--- /dev/null
+++ b/tools/perf/scripts/python/bin/intel-pt-events-record
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+#
+# print Intel PT Power Events and PTWRITE. The intel_pt PMU event needs
+# to be specified with appropriate config terms.
+#
+if ! echo "$@" | grep -q intel_pt ; then
+	echo "Options must include the Intel PT event e.g. -e intel_pt/pwr_evt,ptw/"
+	echo "and for power events it probably needs to be system wide i.e. -a option"
+	echo "For example: -a -e intel_pt/pwr_evt,branch=0/ sleep 1"
+	exit 1
+fi
+perf record $@
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-report b/tools/perf/scripts/python/bin/intel-pt-events-report
new file mode 100644
index 0000000..9a9c92f
--- /dev/null
+++ b/tools/perf/scripts/python/bin/intel-pt-events-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: print Intel PT Power Events and PTWRITE
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py
\ No newline at end of file
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
new file mode 100644
index 0000000..b19172d
--- /dev/null
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -0,0 +1,128 @@
+# intel-pt-events.py: Print Intel PT Power Events and PTWRITE
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+
+import os
+import sys
+import struct
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+# These perf imports are not used at present
+#from perf_trace_context import *
+#from Core import *
+
+def trace_begin():
+	print "Intel PT Power Events and PTWRITE"
+
+def trace_end():
+	print "End"
+
+def trace_unhandled(event_name, context, event_fields_dict):
+		print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+
+def print_ptwrite(raw_buf):
+	data = struct.unpack_from("<IQ", raw_buf)
+	flags = data[0]
+	payload = data[1]
+	exact_ip = flags & 1
+	print "IP: %u payload: %#x" % (exact_ip, payload),
+
+def print_cbr(raw_buf):
+	data = struct.unpack_from("<BBBBII", raw_buf)
+	cbr = data[0]
+	f = (data[4] + 500) / 1000
+	p = ((cbr * 1000 / data[2]) + 5) / 10
+	print "%3u  freq: %4u MHz  (%3u%%)" % (cbr, f, p),
+
+def print_mwait(raw_buf):
+	data = struct.unpack_from("<IQ", raw_buf)
+	payload = data[1]
+	hints = payload & 0xff
+	extensions = (payload >> 32) & 0x3
+	print "hints: %#x extensions: %#x" % (hints, extensions),
+
+def print_pwre(raw_buf):
+	data = struct.unpack_from("<IQ", raw_buf)
+	payload = data[1]
+	hw = (payload >> 7) & 1
+	cstate = (payload >> 12) & 0xf
+	subcstate = (payload >> 8) & 0xf
+	print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+
+def print_exstop(raw_buf):
+	data = struct.unpack_from("<I", raw_buf)
+	flags = data[0]
+	exact_ip = flags & 1
+	print "IP: %u" % (exact_ip),
+
+def print_pwrx(raw_buf):
+	data = struct.unpack_from("<IQ", raw_buf)
+	payload = data[1]
+	deepest_cstate = payload & 0xf
+	last_cstate = (payload >> 4) & 0xf
+	wake_reason = (payload >> 8) & 0xf
+	print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+
+def print_common_start(comm, sample, name):
+	ts = sample["time"]
+	cpu = sample["cpu"]
+	pid = sample["pid"]
+	tid = sample["tid"]
+	print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+
+def print_common_ip(sample, symbol, dso):
+	ip = sample["ip"]
+	print "%16x %s (%s)" % (ip, symbol, dso)
+
+def process_event(param_dict):
+        event_attr = param_dict["attr"]
+        sample     = param_dict["sample"]
+        raw_buf    = param_dict["raw_buf"]
+        comm       = param_dict["comm"]
+        name       = param_dict["ev_name"]
+
+        # Symbol and dso info are not always resolved
+        if (param_dict.has_key("dso")):
+                dso = param_dict["dso"]
+        else:
+                dso = "[unknown]"
+
+        if (param_dict.has_key("symbol")):
+                symbol = param_dict["symbol"]
+        else:
+                symbol = "[unknown]"
+
+	if name == "ptwrite":
+		print_common_start(comm, sample, name)
+		print_ptwrite(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "cbr":
+		print_common_start(comm, sample, name)
+		print_cbr(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "mwait":
+		print_common_start(comm, sample, name)
+		print_mwait(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "pwre":
+		print_common_start(comm, sample, name)
+		print_pwre(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "exstop":
+		print_common_start(comm, sample, name)
+		print_exstop(raw_buf)
+		print_common_ip(sample, symbol, dso)
+	elif name == "pwrx":
+		print_common_start(comm, sample, name)
+		print_pwrx(raw_buf)
+		print_common_ip(sample, symbol, dso)
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index af58ebc..84222bd 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -75,7 +75,7 @@
 	$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
 	$(Q)echo ';' >> $@
 
-ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc))
+ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
 endif
 
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 0dd77494..0e77b2c 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -18,6 +18,7 @@
  * permissions. All the event text files are stored there.
  */
 
+#include <debug.h>
 #include <errno.h>
 #include <inttypes.h>
 #include <stdlib.h>
@@ -29,14 +30,11 @@
 #include <sys/stat.h>
 #include <unistd.h>
 #include "../perf.h"
-#include "util.h"
 #include <subcmd/exec-cmd.h>
 #include "tests.h"
 
 #define ENV "PERF_TEST_ATTR"
 
-extern int verbose;
-
 static char *dir;
 
 void test_attr__init(void)
@@ -138,8 +136,10 @@ void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
 {
 	int errno_saved = errno;
 
-	if (store_event(attr, pid, cpu, fd, group_fd, flags))
-		die("test attr FAILED");
+	if (store_event(attr, pid, cpu, fd, group_fd, flags)) {
+		pr_err("test attr FAILED");
+		exit(128);
+	}
 
 	errno = errno_saved;
 }
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index 1091bd4..cdf21a9 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -16,6 +16,13 @@
     def getMsg(self):
         return '\'%s\' - %s' % (self.test.path, self.msg)
 
+class Notest(Exception):
+    def __init__(self, test, arch):
+        self.arch = arch
+        self.test = test
+    def getMsg(self):
+        return '[%s] \'%s\'' % (self.arch, self.test.path)
+
 class Unsup(Exception):
     def __init__(self, test):
         self.test = test
@@ -112,6 +119,9 @@
 #     'command' - perf command name
 #     'args'    - special command arguments
 #     'ret'     - expected command return value (0 by default)
+#     'arch'    - architecture specific test (optional)
+#                 comma separated list, ! at the beginning
+#                 negates it.
 #
 # [eventX:base]
 #   - one or multiple instances in file
@@ -134,6 +144,12 @@
         except:
             self.ret  = 0
 
+        try:
+            self.arch  = parser.get('config', 'arch')
+            log.warning("test limitation '%s'" % self.arch)
+        except:
+            self.arch  = ''
+
         self.expect   = {}
         self.result   = {}
         log.debug("  loading expected events");
@@ -145,6 +161,31 @@
         else:
             return True
 
+    def skip_test(self, myarch):
+        # If architecture not set always run test
+        if self.arch == '':
+            # log.warning("test for arch %s is ok" % myarch)
+            return False
+
+        # Allow multiple values in assignment separated by ','
+        arch_list = self.arch.split(',')
+
+        # Handle negated list such as !s390x,ppc
+        if arch_list[0][0] == '!':
+            arch_list[0] = arch_list[0][1:]
+            log.warning("excluded architecture list %s" % arch_list)
+            for arch_item in arch_list:
+                # log.warning("test for %s arch is %s" % (arch_item, myarch))
+                if arch_item == myarch:
+                    return True
+            return False
+
+        for arch_item in arch_list:
+            # log.warning("test for architecture '%s' current '%s'" % (arch_item, myarch))
+            if arch_item == myarch:
+                return False
+        return True
+
     def load_events(self, path, events):
         parser_event = ConfigParser.SafeConfigParser()
         parser_event.read(path)
@@ -168,6 +209,11 @@
             events[section] = e
 
     def run_cmd(self, tempdir):
+        junk1, junk2, junk3, junk4, myarch = (os.uname())
+
+        if self.skip_test(myarch):
+            raise Notest(self, myarch)
+
         cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
               self.perf, self.command, tempdir, self.args)
         ret = os.WEXITSTATUS(os.system(cmd))
@@ -265,6 +311,8 @@
             Test(f, options).run()
         except Unsup, obj:
             log.warning("unsupp  %s" % obj.getMsg())
+        except Notest, obj:
+            log.warning("skipped %s" % obj.getMsg())
 
 def setup_log(verbose):
     global log
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index e7664fe..39bbb97 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -62,8 +62,7 @@ static void __test_function(volatile long *ptr)
 }
 #endif
 
-__attribute__ ((noinline))
-static int test_function(void)
+static noinline int test_function(void)
 {
 	__test_function(&the_var);
 	the_var++;
@@ -288,3 +287,17 @@ int test__bp_signal(int subtest __maybe_unused)
 	return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
 		TEST_OK : TEST_FAIL;
 }
+
+bool test__bp_signal_is_supported(void)
+{
+/*
+ * The powerpc so far does not have support to even create
+ * instruction breakpoint using the perf event interface.
+ * Once it's there we can release this.
+ */
+#ifdef __powerpc__
+	return false;
+#else
+	return true;
+#endif
+}
diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c
index 89f92fa..3b1ac6f 100644
--- a/tools/perf/tests/bp_signal_overflow.c
+++ b/tools/perf/tests/bp_signal_overflow.c
@@ -28,8 +28,7 @@
 
 static int overflows;
 
-__attribute__ ((noinline))
-static int test_function(void)
+static noinline int test_function(void)
 {
 	return time(NULL);
 }
diff --git a/tools/perf/tests/bpf-script-test-prologue.c b/tools/perf/tests/bpf-script-test-prologue.c
index 7230e62..b4ebc75 100644
--- a/tools/perf/tests/bpf-script-test-prologue.c
+++ b/tools/perf/tests/bpf-script-test-prologue.c
@@ -10,6 +10,15 @@
 
 #include <uapi/linux/fs.h>
 
+/*
+ * If CONFIG_PROFILE_ALL_BRANCHES is selected,
+ * 'if' is redefined after include kernel header.
+ * Recover 'if' for BPF object code.
+ */
+#ifdef if
+# undef if
+#endif
+
 #define FMODE_READ		0x1
 #define FMODE_WRITE		0x2
 
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 9e08d29..3ccfd58 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -97,10 +97,12 @@ static struct test generic_tests[] = {
 	{
 		.desc = "Breakpoint overflow signal handler",
 		.func = test__bp_signal,
+		.is_supported = test__bp_signal_is_supported,
 	},
 	{
 		.desc = "Breakpoint overflow sampling",
 		.func = test__bp_signal_overflow,
+		.is_supported = test__bp_signal_is_supported,
 	},
 	{
 		.desc = "Number of exit events of a simple workload",
@@ -401,6 +403,11 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
 		if (!perf_test__matches(t, curr, argc, argv))
 			continue;
 
+		if (t->is_supported && !t->is_supported()) {
+			pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
+			continue;
+		}
+
 		pr_info("%2d: %-*s:", i, width, t->desc);
 
 		if (intlist__find(skiplist, i)) {
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 1f14e76..94b7c7b 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -229,6 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
 	unsigned char buf2[BUFSZ];
 	size_t ret_len;
 	u64 objdump_addr;
+	const char *objdump_name;
+	char decomp_name[KMOD_DECOMP_LEN];
 	int ret;
 
 	pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
@@ -289,9 +291,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
 		state->done[state->done_cnt++] = al.map->start;
 	}
 
+	objdump_name = al.map->dso->long_name;
+	if (dso__needs_decompress(al.map->dso)) {
+		if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
+						 decomp_name,
+						 sizeof(decomp_name)) < 0) {
+			pr_debug("decompression failed\n");
+			return -1;
+		}
+
+		objdump_name = decomp_name;
+	}
+
 	/* Read the object code using objdump */
 	objdump_addr = map__rip_2objdump(al.map, al.addr);
-	ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
+	ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
+
+	if (dso__needs_decompress(al.map->dso))
+		unlink(objdump_name);
+
 	if (ret > 0) {
 		/*
 		 * The kernel maps are inaccurate - assume objdump is right in
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index dfe5c89..3e56d08 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -76,8 +76,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
 	return strcmp((const char *) symbol, funcs[idx]);
 }
 
-__attribute__ ((noinline))
-static int unwind_thread(struct thread *thread)
+static noinline int unwind_thread(struct thread *thread)
 {
 	struct perf_sample sample;
 	unsigned long cnt = 0;
@@ -108,8 +107,7 @@ static int unwind_thread(struct thread *thread)
 
 static int global_unwind_retval = -INT_MAX;
 
-__attribute__ ((noinline))
-static int compare(void *p1, void *p2)
+static noinline int compare(void *p1, void *p2)
 {
 	/* Any possible value should be 'thread' */
 	struct thread *thread = *(struct thread **)p1;
@@ -128,8 +126,7 @@ static int compare(void *p1, void *p2)
 	return p1 - p2;
 }
 
-__attribute__ ((noinline))
-static int krava_3(struct thread *thread)
+static noinline int krava_3(struct thread *thread)
 {
 	struct thread *array[2] = {thread, thread};
 	void *fp = &bsearch;
@@ -147,14 +144,12 @@ static int krava_3(struct thread *thread)
 	return global_unwind_retval;
 }
 
-__attribute__ ((noinline))
-static int krava_2(struct thread *thread)
+static noinline int krava_2(struct thread *thread)
 {
 	return krava_3(thread);
 }
 
-__attribute__ ((noinline))
-static int krava_1(struct thread *thread)
+static noinline int krava_1(struct thread *thread)
 {
 	return krava_2(thread);
 }
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 7fad885..812a053 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1810,17 +1810,6 @@ static int test_pmu_events(void)
 	return ret;
 }
 
-static void debug_warn(const char *warn, va_list params)
-{
-	char msg[1024];
-
-	if (verbose <= 0)
-		return;
-
-	vsnprintf(msg, sizeof(msg), warn, params);
-	fprintf(stderr, " Warning: %s\n", msg);
-}
-
 int test__parse_events(int subtest __maybe_unused)
 {
 	int ret1, ret2 = 0;
@@ -1832,8 +1821,6 @@ do {							\
 		ret2 = ret1;				\
 } while (0)
 
-	set_warning_routine(debug_warn);
-
 	TEST_EVENTS(test__events);
 
 	if (test_pmu())
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 32873ec..cf00eba 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -83,7 +83,7 @@ int test__task_exit(int subtest __maybe_unused)
 
 	evsel = perf_evlist__first(evlist);
 	evsel->attr.task = 1;
-	evsel->attr.sample_freq = 0;
+	evsel->attr.sample_freq = 1;
 	evsel->attr.inherit = 0;
 	evsel->attr.watermark = 0;
 	evsel->attr.wakeup_events = 1;
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 6318596..5773638 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -34,6 +34,7 @@ struct test {
 		int (*get_nr)(void);
 		const char *(*get_desc)(int subtest);
 	} subtest;
+	bool (*is_supported)(void);
 };
 
 /* Tests */
@@ -99,6 +100,8 @@ const char *test__clang_subtest_get_desc(int subtest);
 int test__clang_subtest_get_nr(void);
 int test__unit_number__scnprint(int subtest);
 
+bool test__bp_signal_is_supported(void);
+
 #if defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index d990ad0..27f41f2 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -46,12 +46,15 @@ static struct annotate_browser_opt {
 	.jump_arrows	= true,
 };
 
+struct arch;
+
 struct annotate_browser {
 	struct ui_browser b;
 	struct rb_root	  entries;
 	struct rb_node	  *curr_hot;
 	struct disasm_line  *selection;
 	struct disasm_line  **offsets;
+	struct arch	    *arch;
 	int		    nr_events;
 	u64		    start;
 	int		    nr_asm_entries;
@@ -125,43 +128,57 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
 	int i, pcnt_width = annotate_browser__pcnt_width(ab);
 	double percent_max = 0.0;
 	char bf[256];
+	bool show_title = false;
 
 	for (i = 0; i < ab->nr_events; i++) {
 		if (bdl->samples[i].percent > percent_max)
 			percent_max = bdl->samples[i].percent;
 	}
 
+	if ((row == 0) && (dl->offset == -1 || percent_max == 0.0)) {
+		if (ab->have_cycles) {
+			if (dl->ipc == 0.0 && dl->cycles == 0)
+				show_title = true;
+		} else
+			show_title = true;
+	}
+
 	if (dl->offset != -1 && percent_max != 0.0) {
-		if (percent_max != 0.0) {
-			for (i = 0; i < ab->nr_events; i++) {
-				ui_browser__set_percent_color(browser,
-							bdl->samples[i].percent,
-							current_entry);
-				if (annotate_browser__opts.show_total_period) {
-					ui_browser__printf(browser, "%6" PRIu64 " ",
-							   bdl->samples[i].nr);
-				} else {
-					ui_browser__printf(browser, "%6.2f ",
-							   bdl->samples[i].percent);
-				}
+		for (i = 0; i < ab->nr_events; i++) {
+			ui_browser__set_percent_color(browser,
+						bdl->samples[i].percent,
+						current_entry);
+			if (annotate_browser__opts.show_total_period) {
+				ui_browser__printf(browser, "%6" PRIu64 " ",
+						   bdl->samples[i].nr);
+			} else {
+				ui_browser__printf(browser, "%6.2f ",
+						   bdl->samples[i].percent);
 			}
-		} else {
-			ui_browser__write_nstring(browser, " ", 7 * ab->nr_events);
 		}
 	} else {
 		ui_browser__set_percent_color(browser, 0, current_entry);
-		ui_browser__write_nstring(browser, " ", 7 * ab->nr_events);
+
+		if (!show_title)
+			ui_browser__write_nstring(browser, " ", 7 * ab->nr_events);
+		else
+			ui_browser__printf(browser, "%*s", 7, "Percent");
 	}
 	if (ab->have_cycles) {
 		if (dl->ipc)
 			ui_browser__printf(browser, "%*.2f ", IPC_WIDTH - 1, dl->ipc);
-		else
+		else if (!show_title)
 			ui_browser__write_nstring(browser, " ", IPC_WIDTH);
+		else
+			ui_browser__printf(browser, "%*s ", IPC_WIDTH - 1, "IPC");
+
 		if (dl->cycles)
 			ui_browser__printf(browser, "%*" PRIu64 " ",
 					   CYCLES_WIDTH - 1, dl->cycles);
-		else
+		else if (!show_title)
 			ui_browser__write_nstring(browser, " ", CYCLES_WIDTH);
+		else
+			ui_browser__printf(browser, "%*s ", CYCLES_WIDTH - 1, "Cycle");
 	}
 
 	SLsmg_write_char(' ');
@@ -1056,7 +1073,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
 		  (nr_pcnt - 1);
 	}
 
-	err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), sizeof_bdl);
+	err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
+				  sizeof_bdl, &browser.arch);
 	if (err) {
 		char msg[BUFSIZ];
 		symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index e99ba86..d903fd4 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -168,7 +168,8 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
 	if (map->dso->annotate_warned)
 		return -1;
 
-	err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0);
+	err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
+				  0, NULL);
 	if (err) {
 		char msg[BUFSIZ];
 		symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 683f834..be1caab 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -239,10 +239,20 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
 	const char *s = strchr(ops->raw, '+');
 	const char *c = strchr(ops->raw, ',');
 
-	if (c++ != NULL)
+	/*
+	 * skip over possible up to 2 operands to get to address, e.g.:
+	 * tbnz	 w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
+	 */
+	if (c++ != NULL) {
 		ops->target.addr = strtoull(c, NULL, 16);
-	else
+		if (!ops->target.addr) {
+			c = strchr(c, ',');
+			if (c++ != NULL)
+				ops->target.addr = strtoull(c, NULL, 16);
+		}
+	} else {
 		ops->target.addr = strtoull(ops->raw, NULL, 16);
+	}
 
 	if (s++ != NULL) {
 		ops->target.offset = strtoull(s, NULL, 16);
@@ -257,10 +267,27 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
 			   struct ins_operands *ops)
 {
+	const char *c = strchr(ops->raw, ',');
+
 	if (!ops->target.addr || ops->target.offset < 0)
 		return ins__raw_scnprintf(ins, bf, size, ops);
 
-	return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
+	if (c != NULL) {
+		const char *c2 = strchr(c + 1, ',');
+
+		/* check for 3-op insn */
+		if (c2 != NULL)
+			c = c2;
+		c++;
+
+		/* mirror arch objdump's space-after-comma style */
+		if (*c == ' ')
+			c++;
+	}
+
+	return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
+			 ins->name, c ? c - ops->raw : 0, ops->raw,
+			 ops->target.offset);
 }
 
 static struct ins_ops jump_ops = {
@@ -1294,6 +1321,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
 	char linkname[PATH_MAX];
 	char *build_id_filename;
 	char *build_id_path = NULL;
+	char *pos;
 
 	if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
 	    !dso__is_kcore(dso))
@@ -1313,7 +1341,14 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
 	if (!build_id_path)
 		return -1;
 
-	dirname(build_id_path);
+	/*
+	 * old style build-id cache has name of XX/XXXXXXX.. while
+	 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
+	 * extract the build-id part of dirname in the new style only.
+	 */
+	pos = strrchr(build_id_path, '/');
+	if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
+		dirname(build_id_path);
 
 	if (dso__is_kcore(dso) ||
 	    readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
@@ -1344,7 +1379,9 @@ static const char *annotate__norm_arch(const char *arch_name)
 	return normalize_arch((char *)arch_name);
 }
 
-int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize)
+int symbol__disassemble(struct symbol *sym, struct map *map,
+			const char *arch_name, size_t privsize,
+			struct arch **parch)
 {
 	struct dso *dso = map->dso;
 	char command[PATH_MAX * 2];
@@ -1370,6 +1407,9 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
 	if (arch == NULL)
 		return -ENOTSUP;
 
+	if (parch)
+		*parch = arch;
+
 	if (arch->init) {
 		err = arch->init(arch);
 		if (err) {
@@ -1396,31 +1436,10 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
 				sizeof(symfs_filename));
 		}
 	} else if (dso__needs_decompress(dso)) {
-		char tmp[PATH_MAX];
-		struct kmod_path m;
-		int fd;
-		bool ret;
+		char tmp[KMOD_DECOMP_LEN];
 
-		if (kmod_path__parse_ext(&m, symfs_filename))
-			goto out;
-
-		snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
-
-		fd = mkstemp(tmp);
-		if (fd < 0) {
-			free(m.ext);
-			goto out;
-		}
-
-		ret = decompress_to_file(m.ext, symfs_filename, fd);
-
-		if (ret)
-			pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
-
-		free(m.ext);
-		close(fd);
-
-		if (!ret)
+		if (dso__decompress_kmodule_path(dso, symfs_filename,
+						 tmp, sizeof(tmp)) < 0)
 			goto out;
 
 		strcpy(symfs_filename, tmp);
@@ -1429,7 +1448,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
 	snprintf(command, sizeof(command),
 		 "%s %s%s --start-address=0x%016" PRIx64
 		 " --stop-address=0x%016" PRIx64
-		 " -l -d %s %s -C %s 2>/dev/null|grep -v %s:|expand",
+		 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
 		 objdump_path ? objdump_path : "objdump",
 		 disassembler_style ? "-M " : "",
 		 disassembler_style ? disassembler_style : "",
@@ -1887,7 +1906,8 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
 	struct rb_root source_line = RB_ROOT;
 	u64 len;
 
-	if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0) < 0)
+	if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
+				0, NULL) < 0)
 		return -1;
 
 	len = symbol__size(sym);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 948aa8e..2105503 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -158,7 +158,9 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr);
 int symbol__alloc_hist(struct symbol *sym);
 void symbol__annotate_zero_histograms(struct symbol *sym);
 
-int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize);
+int symbol__disassemble(struct symbol *sym, struct map *map,
+			const char *arch_name, size_t privsize,
+			struct arch **parch);
 
 enum symbol_disassemble_errno {
 	SYMBOL_ANNOTATE_ERRNO__SUCCESS		= 0,
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 0daf63b..5547457 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -322,6 +322,13 @@ static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
 	return auxtrace_queues__add_buffer(queues, idx, buffer);
 }
 
+static bool filter_cpu(struct perf_session *session, int cpu)
+{
+	unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
+
+	return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
+}
+
 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
 			       struct perf_session *session,
 			       union perf_event *event, off_t data_offset,
@@ -331,6 +338,9 @@ int auxtrace_queues__add_event(struct auxtrace_queues *queues,
 	unsigned int idx;
 	int err;
 
+	if (filter_cpu(session, event->auxtrace.cpu))
+		return 0;
+
 	buffer = zalloc(sizeof(struct auxtrace_buffer));
 	if (!buffer)
 		return -ENOMEM;
@@ -947,6 +957,8 @@ void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
 	synth_opts->instructions = true;
 	synth_opts->branches = true;
 	synth_opts->transactions = true;
+	synth_opts->ptwrites = true;
+	synth_opts->pwr_events = true;
 	synth_opts->errors = true;
 	synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
 	synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
@@ -1030,6 +1042,12 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
 		case 'x':
 			synth_opts->transactions = true;
 			break;
+		case 'w':
+			synth_opts->ptwrites = true;
+			break;
+		case 'p':
+			synth_opts->pwr_events = true;
+			break;
 		case 'e':
 			synth_opts->errors = true;
 			break;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 9f0de72..33b5e6c 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -59,6 +59,8 @@ enum itrace_period_type {
  * @instructions: whether to synthesize 'instructions' events
  * @branches: whether to synthesize 'branches' events
  * @transactions: whether to synthesize events for transactions
+ * @ptwrites: whether to synthesize events for ptwrites
+ * @pwr_events: whether to synthesize power events
  * @errors: whether to synthesize decoder error events
  * @dont_decode: whether to skip decoding entirely
  * @log: write a decoding log
@@ -72,6 +74,7 @@ enum itrace_period_type {
  * @period: 'instructions' events period
  * @period_type: 'instructions' events period type
  * @initial_skip: skip N events at the beginning.
+ * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
  */
 struct itrace_synth_opts {
 	bool			set;
@@ -79,6 +82,8 @@ struct itrace_synth_opts {
 	bool			instructions;
 	bool			branches;
 	bool			transactions;
+	bool			ptwrites;
+	bool			pwr_events;
 	bool			errors;
 	bool			dont_decode;
 	bool			log;
@@ -92,6 +97,7 @@ struct itrace_synth_opts {
 	unsigned long long	period;
 	enum itrace_period_type	period_type;
 	unsigned long		initial_skip;
+	unsigned long		*cpu_bitmap;
 };
 
 /**
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 168cc49..e0148b0 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -278,51 +278,6 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
 	return bf;
 }
 
-bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
-{
-	char *id_name = NULL, *ch;
-	struct stat sb;
-	char sbuild_id[SBUILD_ID_SIZE];
-
-	if (!dso->has_build_id)
-		goto err;
-
-	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
-	id_name = build_id_cache__linkname(sbuild_id, NULL, 0);
-	if (!id_name)
-		goto err;
-	if (access(id_name, F_OK))
-		goto err;
-	if (lstat(id_name, &sb) == -1)
-		goto err;
-	if ((size_t)sb.st_size > size - 1)
-		goto err;
-	if (readlink(id_name, bf, size - 1) < 0)
-		goto err;
-
-	bf[sb.st_size] = '\0';
-
-	/*
-	 * link should be:
-	 * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92
-	 */
-	ch = strrchr(bf, '/');
-	if (!ch)
-		goto err;
-	if (ch - 3 < bf)
-		goto err;
-
-	free(id_name);
-	return strncmp(".ko", ch - 3, 3) == 0;
-err:
-	pr_err("Invalid build id: %s\n", id_name ? :
-					 dso->long_name ? :
-					 dso->short_name ? :
-					 "[unknown]");
-	free(id_name);
-	return false;
-}
-
 #define dsos__for_each_with_build_id(pos, head)	\
 	list_for_each_entry(pos, head, node)	\
 		if (!pos->has_build_id)		\
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 8a89b19..96690a5 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -17,7 +17,6 @@ char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
 				    size_t size);
 
 char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
-bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
 
 int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
 			   struct perf_sample *sample, struct perf_evsel *evsel,
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 0328f29..0175765 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -5,6 +5,7 @@
 #include <subcmd/pager.h>
 #include "../ui/ui.h"
 
+#include <linux/compiler.h>
 #include <linux/string.h>
 
 #define CMD_EXEC_PATH "--exec-path"
@@ -24,6 +25,6 @@ static inline int is_absolute_path(const char *path)
 	return path[0] == '/';
 }
 
-char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
+char *mkpath(const char *fmt, ...) __printf(1, 2);
 
 #endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 8d724f0..31a7dea 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -335,32 +335,42 @@ static int perf_parse_long(const char *value, long *ret)
 	return 0;
 }
 
-static void die_bad_config(const char *name)
+static void bad_config(const char *name)
 {
 	if (config_file_name)
-		die("bad config value for '%s' in %s", name, config_file_name);
-	die("bad config value for '%s'", name);
+		pr_warning("bad config value for '%s' in %s, ignoring...\n", name, config_file_name);
+	else
+		pr_warning("bad config value for '%s', ignoring...\n", name);
 }
 
-u64 perf_config_u64(const char *name, const char *value)
+int perf_config_u64(u64 *dest, const char *name, const char *value)
 {
 	long long ret = 0;
 
-	if (!perf_parse_llong(value, &ret))
-		die_bad_config(name);
-	return (u64) ret;
+	if (!perf_parse_llong(value, &ret)) {
+		bad_config(name);
+		return -1;
+	}
+
+	*dest = ret;
+	return 0;
 }
 
-int perf_config_int(const char *name, const char *value)
+int perf_config_int(int *dest, const char *name, const char *value)
 {
 	long ret = 0;
-	if (!perf_parse_long(value, &ret))
-		die_bad_config(name);
-	return ret;
+	if (!perf_parse_long(value, &ret)) {
+		bad_config(name);
+		return -1;
+	}
+	*dest = ret;
+	return 0;
 }
 
 static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
 {
+	int ret;
+
 	*is_bool = 1;
 	if (!value)
 		return 1;
@@ -371,7 +381,7 @@ static int perf_config_bool_or_int(const char *name, const char *value, int *is_
 	if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off"))
 		return 0;
 	*is_bool = 0;
-	return perf_config_int(name, value);
+	return perf_config_int(&ret, name, value) < 0 ? -1 : ret;
 }
 
 int perf_config_bool(const char *name, const char *value)
@@ -657,8 +667,7 @@ static int perf_config_set__init(struct perf_config_set *set)
 
 	user_config = strdup(mkpath("%s/.perfconfig", home));
 	if (user_config == NULL) {
-		warning("Not enough memory to process %s/.perfconfig, "
-			"ignoring it.", home);
+		pr_warning("Not enough memory to process %s/.perfconfig, ignoring it.", home);
 		goto out;
 	}
 
@@ -671,8 +680,7 @@ static int perf_config_set__init(struct perf_config_set *set)
 	ret = 0;
 
 	if (st.st_uid && (st.st_uid != geteuid())) {
-		warning("File %s not owned by current user or root, "
-			"ignoring it.", user_config);
+		pr_warning("File %s not owned by current user or root, ignoring it.", user_config);
 		goto out_free;
 	}
 
@@ -795,7 +803,8 @@ void perf_config_set__delete(struct perf_config_set *set)
  */
 int config_error_nonbool(const char *var)
 {
-	return error("Missing value for '%s'", var);
+	pr_err("Missing value for '%s'", var);
+	return -1;
 }
 
 void set_buildid_dir(const char *dir)
diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h
index 1a59a6b..b6bb11f 100644
--- a/tools/perf/util/config.h
+++ b/tools/perf/util/config.h
@@ -27,8 +27,8 @@ extern const char *config_exclusive_filename;
 typedef int (*config_fn_t)(const char *, const char *, void *);
 int perf_default_config(const char *, const char *, void *);
 int perf_config(config_fn_t fn, void *);
-int perf_config_int(const char *, const char *);
-u64 perf_config_u64(const char *, const char *);
+int perf_config_int(int *dest, const char *, const char *);
+int perf_config_u64(u64 *dest, const char *, const char *);
 int perf_config_bool(const char *, const char *);
 int config_error_nonbool(const char *);
 const char *perf_etc_perfconfig(void);
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 89d5031..3149b70 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -1444,10 +1444,8 @@ static int convert__config(const char *var, const char *value, void *cb)
 {
 	struct convert *c = cb;
 
-	if (!strcmp(var, "convert.queue-size")) {
-		c->queue_size = perf_config_u64(var, value);
-		return 0;
-	}
+	if (!strcmp(var, "convert.queue-size"))
+		return perf_config_u64(&c->queue_size, var, value);
 
 	return 0;
 }
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 8a23ea1..c818bdb 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -4,6 +4,7 @@
 
 #include <stdbool.h>
 #include <string.h>
+#include <linux/compiler.h>
 #include "event.h"
 #include "../ui/helpline.h"
 #include "../ui/progress.h"
@@ -40,16 +41,16 @@ extern int debug_data_convert;
 
 #define STRERR_BUFSIZE	128	/* For the buffer size of str_error_r */
 
-int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+int dump_printf(const char *fmt, ...) __printf(1, 2);
 void trace_event(union perf_event *event);
 
-int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
-int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
+int ui__error(const char *format, ...) __printf(1, 2);
+int ui__warning(const char *format, ...) __printf(1, 2);
 
 void pr_stat(const char *fmt, ...);
 
-int eprintf(int level, int var, const char *fmt, ...) __attribute__((format(printf, 3, 4)));
-int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __attribute__((format(printf, 4, 5)));
+int eprintf(int level, int var, const char *fmt, ...) __printf(3, 4);
+int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __printf(4, 5);
 int veprintf(int level, int var, const char *fmt, va_list args);
 
 int perf_debug_option(const char *str);
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index a96a99d..4e7ab61 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -248,6 +248,64 @@ bool dso__needs_decompress(struct dso *dso)
 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 }
 
+static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
+{
+	int fd = -1;
+	struct kmod_path m;
+
+	if (!dso__needs_decompress(dso))
+		return -1;
+
+	if (kmod_path__parse_ext(&m, dso->long_name))
+		return -1;
+
+	if (!m.comp)
+		goto out;
+
+	fd = mkstemp(tmpbuf);
+	if (fd < 0) {
+		dso->load_errno = errno;
+		goto out;
+	}
+
+	if (!decompress_to_file(m.ext, name, fd)) {
+		dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
+		close(fd);
+		fd = -1;
+	}
+
+out:
+	free(m.ext);
+	return fd;
+}
+
+int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
+{
+	char tmpbuf[] = KMOD_DECOMP_NAME;
+	int fd;
+
+	fd = decompress_kmodule(dso, name, tmpbuf);
+	unlink(tmpbuf);
+	return fd;
+}
+
+int dso__decompress_kmodule_path(struct dso *dso, const char *name,
+				 char *pathname, size_t len)
+{
+	char tmpbuf[] = KMOD_DECOMP_NAME;
+	int fd;
+
+	fd = decompress_kmodule(dso, name, tmpbuf);
+	if (fd < 0) {
+		unlink(tmpbuf);
+		return -1;
+	}
+
+	strncpy(pathname, tmpbuf, len);
+	close(fd);
+	return 0;
+}
+
 /*
  * Parses kernel module specified in @path and updates
  * @m argument like:
@@ -335,6 +393,21 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
 	return 0;
 }
 
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+			  struct machine *machine)
+{
+	if (machine__is_host(machine))
+		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
+	else
+		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+
+	/* _KMODULE_COMP should be next to _KMODULE */
+	if (m->kmod && m->comp)
+		dso->symtab_type++;
+
+	dso__set_short_name(dso, strdup(m->name), true);
+}
+
 /*
  * Global list of open DSOs and the counter.
  */
@@ -381,7 +454,7 @@ static int do_open(char *name)
 
 static int __open_dso(struct dso *dso, struct machine *machine)
 {
-	int fd;
+	int fd = -EINVAL;
 	char *root_dir = (char *)"";
 	char *name = malloc(PATH_MAX);
 
@@ -392,15 +465,30 @@ static int __open_dso(struct dso *dso, struct machine *machine)
 		root_dir = machine->root_dir;
 
 	if (dso__read_binary_type_filename(dso, dso->binary_type,
-					    root_dir, name, PATH_MAX)) {
-		free(name);
-		return -EINVAL;
-	}
+					    root_dir, name, PATH_MAX))
+		goto out;
 
 	if (!is_regular_file(name))
-		return -EINVAL;
+		goto out;
+
+	if (dso__needs_decompress(dso)) {
+		char newpath[KMOD_DECOMP_LEN];
+		size_t len = sizeof(newpath);
+
+		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
+			fd = -dso->load_errno;
+			goto out;
+		}
+
+		strcpy(name, newpath);
+	}
 
 	fd = do_open(name);
+
+	if (dso__needs_decompress(dso))
+		unlink(name);
+
+out:
 	free(name);
 	return fd;
 }
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 12350b1..bd061ba 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -244,6 +244,12 @@ bool is_supported_compression(const char *ext);
 bool is_kernel_module(const char *pathname, int cpumode);
 bool decompress_to_file(const char *ext, const char *filename, int output_fd);
 bool dso__needs_decompress(struct dso *dso);
+int dso__decompress_kmodule_fd(struct dso *dso, const char *name);
+int dso__decompress_kmodule_path(struct dso *dso, const char *name,
+				 char *pathname, size_t len);
+
+#define KMOD_DECOMP_NAME  "/tmp/perf-kmod-XXXXXX"
+#define KMOD_DECOMP_LEN   sizeof(KMOD_DECOMP_NAME)
 
 struct kmod_path {
 	char *name;
@@ -259,6 +265,9 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
 #define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false)
 #define kmod_path__parse_ext(__m, __p)  __kmod_path__parse(__m, __p, false, true)
 
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+			  struct machine *machine);
+
 /*
  * The dso__data_* external interface provides following functions:
  *   dso__data_get_fd
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 7c3fa1c..9967c87 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -252,6 +252,127 @@ enum auxtrace_error_type {
 	PERF_AUXTRACE_ERROR_MAX
 };
 
+/* Attribute type for custom synthesized events */
+#define PERF_TYPE_SYNTH		(INT_MAX + 1U)
+
+/* Attribute config for custom synthesized events */
+enum perf_synth_id {
+	PERF_SYNTH_INTEL_PTWRITE,
+	PERF_SYNTH_INTEL_MWAIT,
+	PERF_SYNTH_INTEL_PWRE,
+	PERF_SYNTH_INTEL_EXSTOP,
+	PERF_SYNTH_INTEL_PWRX,
+	PERF_SYNTH_INTEL_CBR,
+};
+
+/*
+ * Raw data formats for synthesized events. Note that 4 bytes of padding are
+ * present to match the 'size' member of PERF_SAMPLE_RAW data which is always
+ * 8-byte aligned. That means we must dereference raw_data with an offset of 4.
+ * Refer perf_sample__synth_ptr() and perf_synth__raw_data().  It also means the
+ * structure sizes are 4 bytes bigger than the raw_size, refer
+ * perf_synth__raw_size().
+ */
+
+struct perf_synth_intel_ptwrite {
+	u32 padding;
+	union {
+		struct {
+			u32	ip		:  1,
+				reserved	: 31;
+		};
+		u32	flags;
+	};
+	u64	payload;
+};
+
+struct perf_synth_intel_mwait {
+	u32 padding;
+	u32 reserved;
+	union {
+		struct {
+			u64	hints		:  8,
+				reserved1	: 24,
+				extensions	:  2,
+				reserved2	: 30;
+		};
+		u64	payload;
+	};
+};
+
+struct perf_synth_intel_pwre {
+	u32 padding;
+	u32 reserved;
+	union {
+		struct {
+			u64	reserved1	:  7,
+				hw		:  1,
+				subcstate	:  4,
+				cstate		:  4,
+				reserved2	: 48;
+		};
+		u64	payload;
+	};
+};
+
+struct perf_synth_intel_exstop {
+	u32 padding;
+	union {
+		struct {
+			u32	ip		:  1,
+				reserved	: 31;
+		};
+		u32	flags;
+	};
+};
+
+struct perf_synth_intel_pwrx {
+	u32 padding;
+	u32 reserved;
+	union {
+		struct {
+			u64	deepest_cstate	:  4,
+				last_cstate	:  4,
+				wake_reason	:  4,
+				reserved1	: 52;
+		};
+		u64	payload;
+	};
+};
+
+struct perf_synth_intel_cbr {
+	u32 padding;
+	union {
+		struct {
+			u32	cbr		:  8,
+				reserved1	:  8,
+				max_nonturbo	:  8,
+				reserved2	:  8;
+		};
+		u32	flags;
+	};
+	u32 freq;
+	u32 reserved3;
+};
+
+/*
+ * raw_data is always 4 bytes from an 8-byte boundary, so subtract 4 to get
+ * 8-byte alignment.
+ */
+static inline void *perf_sample__synth_ptr(struct perf_sample *sample)
+{
+	return sample->raw_data - 4;
+}
+
+static inline void *perf_synth__raw_data(void *p)
+{
+	return p + 4;
+}
+
+#define perf_synth__raw_size(d) (sizeof(d) - 4)
+
+#define perf_sample__bad_synth_size(s, d) ((s)->raw_size < sizeof(d) - 4)
+
 /*
  * The kernel collects the number of events it couldn't send in a stretch and
  * when possible sends this number in a PERF_RECORD_LOST event. The number of
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 94cea43..8d601fb 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -1,6 +1,7 @@
 #ifndef __PERF_EVLIST_H
 #define __PERF_EVLIST_H 1
 
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/refcount.h>
 #include <linux/list.h>
@@ -34,7 +35,7 @@ struct perf_mmap {
 	refcount_t	 refcnt;
 	u64		 prev;
 	struct auxtrace_mmap auxtrace_mmap;
-	char		 event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
+	char		 event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
 };
 
 static inline size_t
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e4f7902..6f4882f 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -11,13 +11,17 @@
 #include <errno.h>
 #include <inttypes.h>
 #include <linux/bitops.h>
+#include <api/fs/fs.h>
 #include <api/fs/tracing_path.h>
 #include <traceevent/event-parse.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/perf_event.h>
+#include <linux/compiler.h>
 #include <linux/err.h>
 #include <sys/ioctl.h>
 #include <sys/resource.h>
+#include <sys/types.h>
+#include <dirent.h>
 #include "asm/bug.h"
 #include "callchain.h"
 #include "cgroup.h"
@@ -273,8 +277,20 @@ struct perf_evsel *perf_evsel__new_cycles(void)
 	struct perf_evsel *evsel;
 
 	event_attr_init(&attr);
+	/*
+	 * Unnamed union member, not supported as struct member named
+	 * initializer in older compilers such as gcc 4.4.7
+	 *
+	 * Just for probing the precise_ip:
+	 */
+	attr.sample_period = 1;
 
 	perf_event_attr__set_max_precise_ip(&attr);
+	/*
+	 * Now let the usual logic to set up the perf_event_attr defaults
+	 * to kick in when we return and before perf_evsel__open() is called.
+	 */
+	attr.sample_period = 0;
 
 	evsel = perf_evsel__new(&attr);
 	if (evsel == NULL)
@@ -1429,7 +1445,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
 }
 
 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
-				void *priv __attribute__((unused)))
+				void *priv __maybe_unused)
 {
 	return fprintf(fp, "  %-32s %s\n", name, val);
 }
@@ -2459,6 +2475,42 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
 	return false;
 }
 
+static bool find_process(const char *name)
+{
+	size_t len = strlen(name);
+	DIR *dir;
+	struct dirent *d;
+	int ret = -1;
+
+	dir = opendir(procfs__mountpoint());
+	if (!dir)
+		return false;
+
+	/* Walk through the directory. */
+	while (ret && (d = readdir(dir)) != NULL) {
+		char path[PATH_MAX];
+		char *data;
+		size_t size;
+
+		if ((d->d_type != DT_DIR) ||
+		     !strcmp(".", d->d_name) ||
+		     !strcmp("..", d->d_name))
+			continue;
+
+		scnprintf(path, sizeof(path), "%s/%s/comm",
+			  procfs__mountpoint(), d->d_name);
+
+		if (filename__read_str(path, &data, &size))
+			continue;
+
+		ret = strncmp(name, data, len);
+		free(data);
+	}
+
+	closedir(dir);
+	return ret ? false : true;
+}
+
 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
 			      int err, char *msg, size_t size)
 {
diff --git a/tools/perf/util/genelf_debug.c b/tools/perf/util/genelf_debug.c
index 5980f7d..40789d8 100644
--- a/tools/perf/util/genelf_debug.c
+++ b/tools/perf/util/genelf_debug.c
@@ -11,6 +11,7 @@
  * @remark Copyright 2007 OProfile authors
  * @author Philippe Elie
  */
+#include <linux/compiler.h>
 #include <sys/types.h>
 #include <stdio.h>
 #include <getopt.h>
@@ -125,7 +126,7 @@ struct debug_line_header {
 	 * and filesize, last entry is followed by en empty string.
 	 */
 	/* follow the first program statement */
-} __attribute__((packed));
+} __packed;
 
 /* DWARF 2 spec talk only about one possible compilation unit header while
  * binutils can handle two flavours of dwarf 2, 32 and 64 bits, this is not
@@ -138,7 +139,7 @@ struct compilation_unit_header {
 	uhalf version;
 	uword debug_abbrev_offset;
 	ubyte pointer_size;
-} __attribute__((packed));
+} __packed;
 
 #define DW_LNS_num_opcode (DW_LNS_set_isa + 1)
 
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 314a071..76ed7d0 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -8,6 +8,7 @@
 #include <unistd.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <linux/compiler.h>
 #include <linux/list.h>
 #include <linux/kernel.h>
 #include <linux/bitops.h>
@@ -841,7 +842,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
 
 /*
  * default get_cpuid(): nothing gets recorded
- * actual implementation must be in arch/$(ARCH)/util/header.c
+ * actual implementation must be in arch/$(SRCARCH)/util/header.c
  */
 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
 {
@@ -1274,7 +1275,7 @@ read_event_desc(struct perf_header *ph, int fd)
 }
 
 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
-				void *priv __attribute__((unused)))
+				void *priv __maybe_unused)
 {
 	return fprintf(fp, ", %s = %s", name, val);
 }
@@ -1469,8 +1470,16 @@ static int __event_process_build_id(struct build_id_event *bev,
 
 		dso__set_build_id(dso, &bev->build_id);
 
-		if (!is_kernel_module(filename, cpumode))
-			dso->kernel = dso_type;
+		if (dso_type != DSO_TYPE_USER) {
+			struct kmod_path m = { .name = NULL, };
+
+			if (!kmod_path__parse_name(&m, filename) && m.kmod)
+				dso__set_module_info(dso, &m, machine);
+			else
+				dso->kernel = dso_type;
+
+			free(m.name);
+		}
 
 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
 				  sbuild_id);
diff --git a/tools/perf/util/help-unknown-cmd.c b/tools/perf/util/help-unknown-cmd.c
index 1c88ad6..15b9530 100644
--- a/tools/perf/util/help-unknown-cmd.c
+++ b/tools/perf/util/help-unknown-cmd.c
@@ -12,7 +12,7 @@ static int perf_unknown_cmd_config(const char *var, const char *value,
 				   void *cb __maybe_unused)
 {
 	if (!strcmp(var, "help.autocorrect"))
-		autocorrect = perf_config_int(var,value);
+		return perf_config_int(&autocorrect, var,value);
 
 	return 0;
 }
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index b2834ac..218ee2b 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -866,8 +866,6 @@ static void intel_bts_print_info(u64 *arr, int start, int finish)
 		fprintf(stdout, intel_bts_info_fmts[i], arr[i]);
 }
 
-u64 intel_bts_auxtrace_info_priv[INTEL_BTS_AUXTRACE_PRIV_SIZE];
-
 int intel_bts_process_auxtrace_info(union perf_event *event,
 				    struct perf_session *session)
 {
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 7cf7f7a..aa1593c 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -64,6 +64,25 @@ enum intel_pt_pkt_state {
 	INTEL_PT_STATE_FUP_NO_TIP,
 };
 
+static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
+{
+	switch (pkt_state) {
+	case INTEL_PT_STATE_NO_PSB:
+	case INTEL_PT_STATE_NO_IP:
+	case INTEL_PT_STATE_ERR_RESYNC:
+	case INTEL_PT_STATE_IN_SYNC:
+	case INTEL_PT_STATE_TNT:
+		return true;
+	case INTEL_PT_STATE_TIP:
+	case INTEL_PT_STATE_TIP_PGD:
+	case INTEL_PT_STATE_FUP:
+	case INTEL_PT_STATE_FUP_NO_TIP:
+		return false;
+	default:
+		return true;
+	};
+}
+
 #ifdef INTEL_PT_STRICT
 #define INTEL_PT_STATE_ERR1	INTEL_PT_STATE_NO_PSB
 #define INTEL_PT_STATE_ERR2	INTEL_PT_STATE_NO_PSB
@@ -87,11 +106,13 @@ struct intel_pt_decoder {
 	const unsigned char *buf;
 	size_t len;
 	bool return_compression;
+	bool branch_enable;
 	bool mtc_insn;
 	bool pge;
 	bool have_tma;
 	bool have_cyc;
 	bool fixup_last_mtc;
+	bool have_last_ip;
 	uint64_t pos;
 	uint64_t last_ip;
 	uint64_t ip;
@@ -99,6 +120,7 @@ struct intel_pt_decoder {
 	uint64_t timestamp;
 	uint64_t tsc_timestamp;
 	uint64_t ref_timestamp;
+	uint64_t sample_timestamp;
 	uint64_t ret_addr;
 	uint64_t ctc_timestamp;
 	uint64_t ctc_delta;
@@ -119,6 +141,7 @@ struct intel_pt_decoder {
 	int pkt_len;
 	int last_packet_type;
 	unsigned int cbr;
+	unsigned int cbr_seen;
 	unsigned int max_non_turbo_ratio;
 	double max_non_turbo_ratio_fp;
 	double cbr_cyc_to_tsc;
@@ -136,9 +159,18 @@ struct intel_pt_decoder {
 	bool continuous_period;
 	bool overflow;
 	bool set_fup_tx_flags;
+	bool set_fup_ptw;
+	bool set_fup_mwait;
+	bool set_fup_pwre;
+	bool set_fup_exstop;
 	unsigned int fup_tx_flags;
 	unsigned int tx_flags;
+	uint64_t fup_ptw_payload;
+	uint64_t fup_mwait_payload;
+	uint64_t fup_pwre_payload;
+	uint64_t cbr_payload;
 	uint64_t timestamp_insn_cnt;
+	uint64_t sample_insn_cnt;
 	uint64_t stuck_ip;
 	int no_progress;
 	int stuck_ip_prd;
@@ -192,6 +224,7 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
 	decoder->pgd_ip             = params->pgd_ip;
 	decoder->data               = params->data;
 	decoder->return_compression = params->return_compression;
+	decoder->branch_enable      = params->branch_enable;
 
 	decoder->period             = params->period;
 	decoder->period_type        = params->period_type;
@@ -398,6 +431,7 @@ static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
 static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
 {
 	decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
+	decoder->have_last_ip = true;
 }
 
 static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
@@ -635,6 +669,8 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
 	case INTEL_PT_PAD:
 	case INTEL_PT_VMCS:
 	case INTEL_PT_MNT:
+	case INTEL_PT_PTWRITE:
+	case INTEL_PT_PTWRITE_IP:
 		return 0;
 
 	case INTEL_PT_MTC:
@@ -675,6 +711,12 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
 		break;
 
 	case INTEL_PT_TSC:
+		/*
+		 * For now, do not support using TSC packets - refer
+		 * intel_pt_calc_cyc_to_tsc().
+		 */
+		if (data->from_mtc)
+			return 1;
 		timestamp = pkt_info->packet.payload |
 			    (data->timestamp & (0xffULL << 56));
 		if (data->from_mtc && timestamp < data->timestamp &&
@@ -733,6 +775,11 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
 
 	case INTEL_PT_TIP_PGD:
 	case INTEL_PT_TRACESTOP:
+	case INTEL_PT_EXSTOP:
+	case INTEL_PT_EXSTOP_IP:
+	case INTEL_PT_MWAIT:
+	case INTEL_PT_PWRE:
+	case INTEL_PT_PWRX:
 	case INTEL_PT_OVF:
 	case INTEL_PT_BAD: /* Does not happen */
 	default:
@@ -787,6 +834,14 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
 		.cbr_cyc_to_tsc = 0,
 	};
 
+	/*
+	 * For now, do not support using TSC packets for at least the reasons:
+	 * 1) timing might have stopped
+	 * 2) TSC packets within PSB+ can slip against CYC packets
+	 */
+	if (!from_mtc)
+		return;
+
 	intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data);
 }
 
@@ -898,6 +953,7 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
 
 	decoder->tot_insn_cnt += insn_cnt;
 	decoder->timestamp_insn_cnt += insn_cnt;
+	decoder->sample_insn_cnt += insn_cnt;
 	decoder->period_insn_cnt += insn_cnt;
 
 	if (err) {
@@ -990,6 +1046,57 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
 	return err;
 }
 
+static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
+{
+	bool ret = false;
+
+	if (decoder->set_fup_tx_flags) {
+		decoder->set_fup_tx_flags = false;
+		decoder->tx_flags = decoder->fup_tx_flags;
+		decoder->state.type = INTEL_PT_TRANSACTION;
+		decoder->state.from_ip = decoder->ip;
+		decoder->state.to_ip = 0;
+		decoder->state.flags = decoder->fup_tx_flags;
+		return true;
+	}
+	if (decoder->set_fup_ptw) {
+		decoder->set_fup_ptw = false;
+		decoder->state.type = INTEL_PT_PTW;
+		decoder->state.flags |= INTEL_PT_FUP_IP;
+		decoder->state.from_ip = decoder->ip;
+		decoder->state.to_ip = 0;
+		decoder->state.ptw_payload = decoder->fup_ptw_payload;
+		return true;
+	}
+	if (decoder->set_fup_mwait) {
+		decoder->set_fup_mwait = false;
+		decoder->state.type = INTEL_PT_MWAIT_OP;
+		decoder->state.from_ip = decoder->ip;
+		decoder->state.to_ip = 0;
+		decoder->state.mwait_payload = decoder->fup_mwait_payload;
+		ret = true;
+	}
+	if (decoder->set_fup_pwre) {
+		decoder->set_fup_pwre = false;
+		decoder->state.type |= INTEL_PT_PWR_ENTRY;
+		decoder->state.type &= ~INTEL_PT_BRANCH;
+		decoder->state.from_ip = decoder->ip;
+		decoder->state.to_ip = 0;
+		decoder->state.pwre_payload = decoder->fup_pwre_payload;
+		ret = true;
+	}
+	if (decoder->set_fup_exstop) {
+		decoder->set_fup_exstop = false;
+		decoder->state.type |= INTEL_PT_EX_STOP;
+		decoder->state.type &= ~INTEL_PT_BRANCH;
+		decoder->state.flags |= INTEL_PT_FUP_IP;
+		decoder->state.from_ip = decoder->ip;
+		decoder->state.to_ip = 0;
+		ret = true;
+	}
+	return ret;
+}
+
 static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
 {
 	struct intel_pt_insn intel_pt_insn;
@@ -1003,15 +1110,8 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
 		if (err == INTEL_PT_RETURN)
 			return 0;
 		if (err == -EAGAIN) {
-			if (decoder->set_fup_tx_flags) {
-				decoder->set_fup_tx_flags = false;
-				decoder->tx_flags = decoder->fup_tx_flags;
-				decoder->state.type = INTEL_PT_TRANSACTION;
-				decoder->state.from_ip = decoder->ip;
-				decoder->state.to_ip = 0;
-				decoder->state.flags = decoder->fup_tx_flags;
+			if (intel_pt_fup_event(decoder))
 				return 0;
-			}
 			return err;
 		}
 		decoder->set_fup_tx_flags = false;
@@ -1360,7 +1460,9 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
 
 static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
 {
-	unsigned int cbr = decoder->packet.payload;
+	unsigned int cbr = decoder->packet.payload & 0xff;
+
+	decoder->cbr_payload = decoder->packet.payload;
 
 	if (decoder->cbr == cbr)
 		return;
@@ -1417,6 +1519,13 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
 		case INTEL_PT_TRACESTOP:
 		case INTEL_PT_BAD:
 		case INTEL_PT_PSB:
+		case INTEL_PT_PTWRITE:
+		case INTEL_PT_PTWRITE_IP:
+		case INTEL_PT_EXSTOP:
+		case INTEL_PT_EXSTOP_IP:
+		case INTEL_PT_MWAIT:
+		case INTEL_PT_PWRE:
+		case INTEL_PT_PWRX:
 			decoder->have_tma = false;
 			intel_pt_log("ERROR: Unexpected packet\n");
 			return -EAGAIN;
@@ -1446,7 +1555,8 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
 
 		case INTEL_PT_FUP:
 			decoder->pge = true;
-			intel_pt_set_last_ip(decoder);
+			if (decoder->packet.count)
+				intel_pt_set_last_ip(decoder);
 			break;
 
 		case INTEL_PT_MODE_TSX:
@@ -1497,6 +1607,13 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
 		case INTEL_PT_MODE_TSX:
 		case INTEL_PT_BAD:
 		case INTEL_PT_PSBEND:
+		case INTEL_PT_PTWRITE:
+		case INTEL_PT_PTWRITE_IP:
+		case INTEL_PT_EXSTOP:
+		case INTEL_PT_EXSTOP_IP:
+		case INTEL_PT_MWAIT:
+		case INTEL_PT_PWRE:
+		case INTEL_PT_PWRX:
 			intel_pt_log("ERROR: Missing TIP after FUP\n");
 			decoder->pkt_state = INTEL_PT_STATE_ERR3;
 			return -ENOENT;
@@ -1625,6 +1742,15 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
 				break;
 			}
 			intel_pt_set_last_ip(decoder);
+			if (!decoder->branch_enable) {
+				decoder->ip = decoder->last_ip;
+				if (intel_pt_fup_event(decoder))
+					return 0;
+				no_tip = false;
+				break;
+			}
+			if (decoder->set_fup_mwait)
+				no_tip = true;
 			err = intel_pt_walk_fup(decoder);
 			if (err != -EAGAIN) {
 				if (err)
@@ -1650,6 +1776,8 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
 			break;
 
 		case INTEL_PT_PSB:
+			decoder->last_ip = 0;
+			decoder->have_last_ip = true;
 			intel_pt_clear_stack(&decoder->stack);
 			err = intel_pt_walk_psbend(decoder);
 			if (err == -EAGAIN)
@@ -1696,6 +1824,16 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
 
 		case INTEL_PT_CBR:
 			intel_pt_calc_cbr(decoder);
+			if (!decoder->branch_enable &&
+			    decoder->cbr != decoder->cbr_seen) {
+				decoder->cbr_seen = decoder->cbr;
+				decoder->state.type = INTEL_PT_CBR_CHG;
+				decoder->state.from_ip = decoder->ip;
+				decoder->state.to_ip = 0;
+				decoder->state.cbr_payload =
+							decoder->packet.payload;
+				return 0;
+			}
 			break;
 
 		case INTEL_PT_MODE_EXEC:
@@ -1722,6 +1860,71 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
 		case INTEL_PT_PAD:
 			break;
 
+		case INTEL_PT_PTWRITE_IP:
+			decoder->fup_ptw_payload = decoder->packet.payload;
+			err = intel_pt_get_next_packet(decoder);
+			if (err)
+				return err;
+			if (decoder->packet.type == INTEL_PT_FUP) {
+				decoder->set_fup_ptw = true;
+				no_tip = true;
+			} else {
+				intel_pt_log_at("ERROR: Missing FUP after PTWRITE",
+						decoder->pos);
+			}
+			goto next;
+
+		case INTEL_PT_PTWRITE:
+			decoder->state.type = INTEL_PT_PTW;
+			decoder->state.from_ip = decoder->ip;
+			decoder->state.to_ip = 0;
+			decoder->state.ptw_payload = decoder->packet.payload;
+			return 0;
+
+		case INTEL_PT_MWAIT:
+			decoder->fup_mwait_payload = decoder->packet.payload;
+			decoder->set_fup_mwait = true;
+			break;
+
+		case INTEL_PT_PWRE:
+			if (decoder->set_fup_mwait) {
+				decoder->fup_pwre_payload =
+							decoder->packet.payload;
+				decoder->set_fup_pwre = true;
+				break;
+			}
+			decoder->state.type = INTEL_PT_PWR_ENTRY;
+			decoder->state.from_ip = decoder->ip;
+			decoder->state.to_ip = 0;
+			decoder->state.pwrx_payload = decoder->packet.payload;
+			return 0;
+
+		case INTEL_PT_EXSTOP_IP:
+			err = intel_pt_get_next_packet(decoder);
+			if (err)
+				return err;
+			if (decoder->packet.type == INTEL_PT_FUP) {
+				decoder->set_fup_exstop = true;
+				no_tip = true;
+			} else {
+				intel_pt_log_at("ERROR: Missing FUP after EXSTOP",
+						decoder->pos);
+			}
+			goto next;
+
+		case INTEL_PT_EXSTOP:
+			decoder->state.type = INTEL_PT_EX_STOP;
+			decoder->state.from_ip = decoder->ip;
+			decoder->state.to_ip = 0;
+			return 0;
+
+		case INTEL_PT_PWRX:
+			decoder->state.type = INTEL_PT_PWR_EXIT;
+			decoder->state.from_ip = decoder->ip;
+			decoder->state.to_ip = 0;
+			decoder->state.pwrx_payload = decoder->packet.payload;
+			return 0;
+
 		default:
 			return intel_pt_bug(decoder);
 		}
@@ -1730,8 +1933,9 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
 
 static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
 {
-	return decoder->last_ip || decoder->packet.count == 0 ||
-	       decoder->packet.count == 3 || decoder->packet.count == 6;
+	return decoder->packet.count &&
+	       (decoder->have_last_ip || decoder->packet.count == 3 ||
+		decoder->packet.count == 6);
 }
 
 /* Walk PSB+ packets to get in sync. */
@@ -1750,6 +1954,13 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
 			__fallthrough;
 		case INTEL_PT_TIP_PGE:
 		case INTEL_PT_TIP:
+		case INTEL_PT_PTWRITE:
+		case INTEL_PT_PTWRITE_IP:
+		case INTEL_PT_EXSTOP:
+		case INTEL_PT_EXSTOP_IP:
+		case INTEL_PT_MWAIT:
+		case INTEL_PT_PWRE:
+		case INTEL_PT_PWRX:
 			intel_pt_log("ERROR: Unexpected packet\n");
 			return -ENOENT;
 
@@ -1854,14 +2065,10 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
 			break;
 
 		case INTEL_PT_FUP:
-			if (decoder->overflow) {
-				if (intel_pt_have_ip(decoder))
-					intel_pt_set_ip(decoder);
-				if (decoder->ip)
-					return 0;
-			}
-			if (decoder->packet.count)
-				intel_pt_set_last_ip(decoder);
+			if (intel_pt_have_ip(decoder))
+				intel_pt_set_ip(decoder);
+			if (decoder->ip)
+				return 0;
 			break;
 
 		case INTEL_PT_MTC:
@@ -1910,6 +2117,9 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
 			break;
 
 		case INTEL_PT_PSB:
+			decoder->last_ip = 0;
+			decoder->have_last_ip = true;
+			intel_pt_clear_stack(&decoder->stack);
 			err = intel_pt_walk_psb(decoder);
 			if (err)
 				return err;
@@ -1925,6 +2135,13 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
 		case INTEL_PT_VMCS:
 		case INTEL_PT_MNT:
 		case INTEL_PT_PAD:
+		case INTEL_PT_PTWRITE:
+		case INTEL_PT_PTWRITE_IP:
+		case INTEL_PT_EXSTOP:
+		case INTEL_PT_EXSTOP_IP:
+		case INTEL_PT_MWAIT:
+		case INTEL_PT_PWRE:
+		case INTEL_PT_PWRX:
 		default:
 			break;
 		}
@@ -1935,6 +2152,19 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
 {
 	int err;
 
+	decoder->set_fup_tx_flags = false;
+	decoder->set_fup_ptw = false;
+	decoder->set_fup_mwait = false;
+	decoder->set_fup_pwre = false;
+	decoder->set_fup_exstop = false;
+
+	if (!decoder->branch_enable) {
+		decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+		decoder->overflow = false;
+		decoder->state.type = 0; /* Do not have a sample */
+		return 0;
+	}
+
 	intel_pt_log("Scanning for full IP\n");
 	err = intel_pt_walk_to_ip(decoder);
 	if (err)
@@ -2043,6 +2273,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
 
 	decoder->pge = false;
 	decoder->continuous_period = false;
+	decoder->have_last_ip = false;
 	decoder->last_ip = 0;
 	decoder->ip = 0;
 	intel_pt_clear_stack(&decoder->stack);
@@ -2051,6 +2282,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
 	if (err)
 		return err;
 
+	decoder->have_last_ip = true;
 	decoder->pkt_state = INTEL_PT_STATE_NO_IP;
 
 	err = intel_pt_walk_psb(decoder);
@@ -2069,7 +2301,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
 
 static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
 {
-	uint64_t est = decoder->timestamp_insn_cnt << 1;
+	uint64_t est = decoder->sample_insn_cnt << 1;
 
 	if (!decoder->cbr || !decoder->max_non_turbo_ratio)
 		goto out;
@@ -2077,7 +2309,7 @@ static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
 	est *= decoder->max_non_turbo_ratio;
 	est /= decoder->cbr;
 out:
-	return decoder->timestamp + est;
+	return decoder->sample_timestamp + est;
 }
 
 const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
@@ -2093,8 +2325,10 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
 			err = intel_pt_sync(decoder);
 			break;
 		case INTEL_PT_STATE_NO_IP:
+			decoder->have_last_ip = false;
 			decoder->last_ip = 0;
-			/* Fall through */
+			decoder->ip = 0;
+			__fallthrough;
 		case INTEL_PT_STATE_ERR_RESYNC:
 			err = intel_pt_sync_ip(decoder);
 			break;
@@ -2130,15 +2364,29 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
 		}
 	} while (err == -ENOLINK);
 
-	decoder->state.err = err ? intel_pt_ext_err(err) : 0;
-	decoder->state.timestamp = decoder->timestamp;
+	if (err) {
+		decoder->state.err = intel_pt_ext_err(err);
+		decoder->state.from_ip = decoder->ip;
+		decoder->sample_timestamp = decoder->timestamp;
+		decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+	} else {
+		decoder->state.err = 0;
+		if (decoder->cbr != decoder->cbr_seen && decoder->state.type) {
+			decoder->cbr_seen = decoder->cbr;
+			decoder->state.type |= INTEL_PT_CBR_CHG;
+			decoder->state.cbr_payload = decoder->cbr_payload;
+		}
+		if (intel_pt_sample_time(decoder->pkt_state)) {
+			decoder->sample_timestamp = decoder->timestamp;
+			decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+		}
+	}
+
+	decoder->state.timestamp = decoder->sample_timestamp;
 	decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
 	decoder->state.cr3 = decoder->cr3;
 	decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
 
-	if (err)
-		decoder->state.from_ip = decoder->ip;
-
 	return &decoder->state;
 }
 
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index e90619a..921b22e 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -25,11 +25,18 @@
 #define INTEL_PT_IN_TX		(1 << 0)
 #define INTEL_PT_ABORT_TX	(1 << 1)
 #define INTEL_PT_ASYNC		(1 << 2)
+#define INTEL_PT_FUP_IP		(1 << 3)
 
 enum intel_pt_sample_type {
 	INTEL_PT_BRANCH		= 1 << 0,
 	INTEL_PT_INSTRUCTION	= 1 << 1,
 	INTEL_PT_TRANSACTION	= 1 << 2,
+	INTEL_PT_PTW		= 1 << 3,
+	INTEL_PT_MWAIT_OP	= 1 << 4,
+	INTEL_PT_PWR_ENTRY	= 1 << 5,
+	INTEL_PT_EX_STOP	= 1 << 6,
+	INTEL_PT_PWR_EXIT	= 1 << 7,
+	INTEL_PT_CBR_CHG	= 1 << 8,
 };
 
 enum intel_pt_period_type {
@@ -63,6 +70,11 @@ struct intel_pt_state {
 	uint64_t timestamp;
 	uint64_t est_timestamp;
 	uint64_t trace_nr;
+	uint64_t ptw_payload;
+	uint64_t mwait_payload;
+	uint64_t pwre_payload;
+	uint64_t pwrx_payload;
+	uint64_t cbr_payload;
 	uint32_t flags;
 	enum intel_pt_insn_op insn_op;
 	int insn_len;
@@ -87,6 +99,7 @@ struct intel_pt_params {
 	bool (*pgd_ip)(uint64_t ip, void *data);
 	void *data;
 	bool return_compression;
+	bool branch_enable;
 	uint64_t period;
 	enum intel_pt_period_type period_type;
 	unsigned max_non_turbo_ratio;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
index debe751..45b64f9 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
@@ -16,6 +16,7 @@
 #ifndef INCLUDE__INTEL_PT_LOG_H__
 #define INCLUDE__INTEL_PT_LOG_H__
 
+#include <linux/compiler.h>
 #include <stdint.h>
 #include <inttypes.h>
 
@@ -34,8 +35,7 @@ void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip);
 void __intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn,
 				 uint64_t ip);
 
-__attribute__((format(printf, 1, 2)))
-void __intel_pt_log(const char *fmt, ...);
+void __intel_pt_log(const char *fmt, ...) __printf(1, 2);
 
 #define intel_pt_log(fmt, ...) \
 	do { \
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index 7528ae4..ba4c9dd 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -64,6 +64,13 @@ static const char * const packet_name[] = {
 	[INTEL_PT_PIP]		= "PIP",
 	[INTEL_PT_OVF]		= "OVF",
 	[INTEL_PT_MNT]		= "MNT",
+	[INTEL_PT_PTWRITE]	= "PTWRITE",
+	[INTEL_PT_PTWRITE_IP]	= "PTWRITE",
+	[INTEL_PT_EXSTOP]	= "EXSTOP",
+	[INTEL_PT_EXSTOP_IP]	= "EXSTOP",
+	[INTEL_PT_MWAIT]	= "MWAIT",
+	[INTEL_PT_PWRE]		= "PWRE",
+	[INTEL_PT_PWRX]		= "PWRX",
 };
 
 const char *intel_pt_pkt_name(enum intel_pt_pkt_type type)
@@ -123,7 +130,7 @@ static int intel_pt_get_cbr(const unsigned char *buf, size_t len,
 	if (len < 4)
 		return INTEL_PT_NEED_MORE_BYTES;
 	packet->type = INTEL_PT_CBR;
-	packet->payload = buf[2];
+	packet->payload = le16_to_cpu(*(uint16_t *)(buf + 2));
 	return 4;
 }
 
@@ -217,12 +224,80 @@ static int intel_pt_get_3byte(const unsigned char *buf, size_t len,
 	}
 }
 
+static int intel_pt_get_ptwrite(const unsigned char *buf, size_t len,
+				struct intel_pt_pkt *packet)
+{
+	packet->count = (buf[1] >> 5) & 0x3;
+	packet->type = buf[1] & BIT(7) ? INTEL_PT_PTWRITE_IP :
+					 INTEL_PT_PTWRITE;
+
+	switch (packet->count) {
+	case 0:
+		if (len < 6)
+			return INTEL_PT_NEED_MORE_BYTES;
+		packet->payload = le32_to_cpu(*(uint32_t *)(buf + 2));
+		return 6;
+	case 1:
+		if (len < 10)
+			return INTEL_PT_NEED_MORE_BYTES;
+		packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
+		return 10;
+	default:
+		return INTEL_PT_BAD_PACKET;
+	}
+}
+
+static int intel_pt_get_exstop(struct intel_pt_pkt *packet)
+{
+	packet->type = INTEL_PT_EXSTOP;
+	return 2;
+}
+
+static int intel_pt_get_exstop_ip(struct intel_pt_pkt *packet)
+{
+	packet->type = INTEL_PT_EXSTOP_IP;
+	return 2;
+}
+
+static int intel_pt_get_mwait(const unsigned char *buf, size_t len,
+			      struct intel_pt_pkt *packet)
+{
+	if (len < 10)
+		return INTEL_PT_NEED_MORE_BYTES;
+	packet->type = INTEL_PT_MWAIT;
+	packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
+	return 10;
+}
+
+static int intel_pt_get_pwre(const unsigned char *buf, size_t len,
+			     struct intel_pt_pkt *packet)
+{
+	if (len < 4)
+		return INTEL_PT_NEED_MORE_BYTES;
+	packet->type = INTEL_PT_PWRE;
+	memcpy_le64(&packet->payload, buf + 2, 2);
+	return 4;
+}
+
+static int intel_pt_get_pwrx(const unsigned char *buf, size_t len,
+			     struct intel_pt_pkt *packet)
+{
+	if (len < 7)
+		return INTEL_PT_NEED_MORE_BYTES;
+	packet->type = INTEL_PT_PWRX;
+	memcpy_le64(&packet->payload, buf + 2, 5);
+	return 7;
+}
+
 static int intel_pt_get_ext(const unsigned char *buf, size_t len,
 			    struct intel_pt_pkt *packet)
 {
 	if (len < 2)
 		return INTEL_PT_NEED_MORE_BYTES;
 
+	if ((buf[1] & 0x1f) == 0x12)
+		return intel_pt_get_ptwrite(buf, len, packet);
+
 	switch (buf[1]) {
 	case 0xa3: /* Long TNT */
 		return intel_pt_get_long_tnt(buf, len, packet);
@@ -244,6 +319,16 @@ static int intel_pt_get_ext(const unsigned char *buf, size_t len,
 		return intel_pt_get_tma(buf, len, packet);
 	case 0xC3: /* 3-byte header */
 		return intel_pt_get_3byte(buf, len, packet);
+	case 0x62: /* EXSTOP no IP */
+		return intel_pt_get_exstop(packet);
+	case 0xE2: /* EXSTOP with IP */
+		return intel_pt_get_exstop_ip(packet);
+	case 0xC2: /* MWAIT */
+		return intel_pt_get_mwait(buf, len, packet);
+	case 0x22: /* PWRE */
+		return intel_pt_get_pwre(buf, len, packet);
+	case 0xA2: /* PWRX */
+		return intel_pt_get_pwrx(buf, len, packet);
 	default:
 		return INTEL_PT_BAD_PACKET;
 	}
@@ -522,6 +607,29 @@ int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf,
 		ret = snprintf(buf, buf_len, "%s 0x%llx (NR=%d)",
 			       name, payload, nr);
 		return ret;
+	case INTEL_PT_PTWRITE:
+		return snprintf(buf, buf_len, "%s 0x%llx IP:0", name, payload);
+	case INTEL_PT_PTWRITE_IP:
+		return snprintf(buf, buf_len, "%s 0x%llx IP:1", name, payload);
+	case INTEL_PT_EXSTOP:
+		return snprintf(buf, buf_len, "%s IP:0", name);
+	case INTEL_PT_EXSTOP_IP:
+		return snprintf(buf, buf_len, "%s IP:1", name);
+	case INTEL_PT_MWAIT:
+		return snprintf(buf, buf_len, "%s 0x%llx Hints 0x%x Extensions 0x%x",
+				name, payload, (unsigned int)(payload & 0xff),
+				(unsigned int)((payload >> 32) & 0x3));
+	case INTEL_PT_PWRE:
+		return snprintf(buf, buf_len, "%s 0x%llx HW:%u CState:%u Sub-CState:%u",
+				name, payload, !!(payload & 0x80),
+				(unsigned int)((payload >> 12) & 0xf),
+				(unsigned int)((payload >> 8) & 0xf));
+	case INTEL_PT_PWRX:
+		return snprintf(buf, buf_len, "%s 0x%llx Last CState:%u Deepest CState:%u Wake Reason 0x%x",
+				name, payload,
+				(unsigned int)((payload >> 4) & 0xf),
+				(unsigned int)(payload & 0xf),
+				(unsigned int)((payload >> 8) & 0xf));
 	default:
 		break;
 	}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
index 781bb79..73ddc3a 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
@@ -52,6 +52,13 @@ enum intel_pt_pkt_type {
 	INTEL_PT_PIP,
 	INTEL_PT_OVF,
 	INTEL_PT_MNT,
+	INTEL_PT_PTWRITE,
+	INTEL_PT_PTWRITE_IP,
+	INTEL_PT_EXSTOP,
+	INTEL_PT_EXSTOP_IP,
+	INTEL_PT_MWAIT,
+	INTEL_PT_PWRE,
+	INTEL_PT_PWRX,
 };
 
 struct intel_pt_pkt {
diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
index 767be7c..12e3771 100644
--- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
+++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
@@ -1009,7 +1009,7 @@
 1: fxstor | RDGSBASE Ry (F3),(11B)
 2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
 3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
-4: XSAVE
+4: XSAVE | ptwrite Ey (F3),(11B)
 5: XRSTOR | lfence (11B)
 6: XSAVEOPT | clwb (66) | mfence (11B)
 7: clflush | clflushopt (66) | sfence (11B)
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 4c7718f..b58f9fd 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -81,7 +81,6 @@ struct intel_pt {
 
 	bool sample_instructions;
 	u64 instructions_sample_type;
-	u64 instructions_sample_period;
 	u64 instructions_id;
 
 	bool sample_branches;
@@ -93,6 +92,18 @@ struct intel_pt {
 	u64 transactions_sample_type;
 	u64 transactions_id;
 
+	bool sample_ptwrites;
+	u64 ptwrites_sample_type;
+	u64 ptwrites_id;
+
+	bool sample_pwr_events;
+	u64 pwr_events_sample_type;
+	u64 mwait_id;
+	u64 pwre_id;
+	u64 exstop_id;
+	u64 pwrx_id;
+	u64 cbr_id;
+
 	bool synth_needs_swap;
 
 	u64 tsc_bit;
@@ -103,6 +114,7 @@ struct intel_pt {
 	u64 cyc_bit;
 	u64 noretcomp_bit;
 	unsigned max_non_turbo_ratio;
+	unsigned cbr2khz;
 
 	unsigned long num_events;
 
@@ -668,6 +680,19 @@ static bool intel_pt_return_compression(struct intel_pt *pt)
 	return true;
 }
 
+static bool intel_pt_branch_enable(struct intel_pt *pt)
+{
+	struct perf_evsel *evsel;
+	u64 config;
+
+	evlist__for_each_entry(pt->session->evlist, evsel) {
+		if (intel_pt_get_config(pt, &evsel->attr, &config) &&
+		    (config & 1) && !(config & 0x2000))
+			return false;
+	}
+	return true;
+}
+
 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
 {
 	struct perf_evsel *evsel;
@@ -799,6 +824,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
 	params.walk_insn = intel_pt_walk_next_insn;
 	params.data = ptq;
 	params.return_compression = intel_pt_return_compression(pt);
+	params.branch_enable = intel_pt_branch_enable(pt);
 	params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
 	params.mtc_period = intel_pt_mtc_period(pt);
 	params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
@@ -1044,6 +1070,36 @@ static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
 		bs->nr += 1;
 }
 
+static inline bool intel_pt_skip_event(struct intel_pt *pt)
+{
+	return pt->synth_opts.initial_skip &&
+	       pt->num_events++ < pt->synth_opts.initial_skip;
+}
+
+static void intel_pt_prep_b_sample(struct intel_pt *pt,
+				   struct intel_pt_queue *ptq,
+				   union perf_event *event,
+				   struct perf_sample *sample)
+{
+	event->sample.header.type = PERF_RECORD_SAMPLE;
+	event->sample.header.misc = PERF_RECORD_MISC_USER;
+	event->sample.header.size = sizeof(struct perf_event_header);
+
+	if (!pt->timeless_decoding)
+		sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
+
+	sample->cpumode = PERF_RECORD_MISC_USER;
+	sample->ip = ptq->state->from_ip;
+	sample->pid = ptq->pid;
+	sample->tid = ptq->tid;
+	sample->addr = ptq->state->to_ip;
+	sample->period = 1;
+	sample->cpu = ptq->cpu;
+	sample->flags = ptq->flags;
+	sample->insn_len = ptq->insn_len;
+	memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
+}
+
 static int intel_pt_inject_event(union perf_event *event,
 				 struct perf_sample *sample, u64 type,
 				 bool swapped)
@@ -1052,9 +1108,35 @@ static int intel_pt_inject_event(union perf_event *event,
 	return perf_event__synthesize_sample(event, type, 0, sample, swapped);
 }
 
-static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
+static inline int intel_pt_opt_inject(struct intel_pt *pt,
+				      union perf_event *event,
+				      struct perf_sample *sample, u64 type)
+{
+	if (!pt->synth_opts.inject)
+		return 0;
+
+	return intel_pt_inject_event(event, sample, type, pt->synth_needs_swap);
+}
+
+static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
+					  union perf_event *event,
+					  struct perf_sample *sample, u64 type)
 {
 	int ret;
+
+	ret = intel_pt_opt_inject(pt, event, sample, type);
+	if (ret)
+		return ret;
+
+	ret = perf_session__deliver_synth_event(pt->session, event, sample);
+	if (ret)
+		pr_err("Intel PT: failed to deliver event, error %d\n", ret);
+
+	return ret;
+}
+
+static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
+{
 	struct intel_pt *pt = ptq->pt;
 	union perf_event *event = ptq->event_buf;
 	struct perf_sample sample = { .ip = 0, };
@@ -1066,29 +1148,13 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
 	if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
 		return 0;
 
-	if (pt->synth_opts.initial_skip &&
-	    pt->num_events++ < pt->synth_opts.initial_skip)
+	if (intel_pt_skip_event(pt))
 		return 0;
 
-	event->sample.header.type = PERF_RECORD_SAMPLE;
-	event->sample.header.misc = PERF_RECORD_MISC_USER;
-	event->sample.header.size = sizeof(struct perf_event_header);
+	intel_pt_prep_b_sample(pt, ptq, event, &sample);
 
-	if (!pt->timeless_decoding)
-		sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
-
-	sample.cpumode = PERF_RECORD_MISC_USER;
-	sample.ip = ptq->state->from_ip;
-	sample.pid = ptq->pid;
-	sample.tid = ptq->tid;
-	sample.addr = ptq->state->to_ip;
 	sample.id = ptq->pt->branches_id;
 	sample.stream_id = ptq->pt->branches_id;
-	sample.period = 1;
-	sample.cpu = ptq->cpu;
-	sample.flags = ptq->flags;
-	sample.insn_len = ptq->insn_len;
-	memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
 
 	/*
 	 * perf report cannot handle events without a branch stack when using
@@ -1105,144 +1171,251 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
 		sample.branch_stack = (struct branch_stack *)&dummy_bs;
 	}
 
-	if (pt->synth_opts.inject) {
-		ret = intel_pt_inject_event(event, &sample,
-					    pt->branches_sample_type,
-					    pt->synth_needs_swap);
-		if (ret)
-			return ret;
+	return intel_pt_deliver_synth_b_event(pt, event, &sample,
+					      pt->branches_sample_type);
+}
+
+static void intel_pt_prep_sample(struct intel_pt *pt,
+				 struct intel_pt_queue *ptq,
+				 union perf_event *event,
+				 struct perf_sample *sample)
+{
+	intel_pt_prep_b_sample(pt, ptq, event, sample);
+
+	if (pt->synth_opts.callchain) {
+		thread_stack__sample(ptq->thread, ptq->chain,
+				     pt->synth_opts.callchain_sz, sample->ip);
+		sample->callchain = ptq->chain;
 	}
 
-	ret = perf_session__deliver_synth_event(pt->session, event, &sample);
-	if (ret)
-		pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n",
-		       ret);
+	if (pt->synth_opts.last_branch) {
+		intel_pt_copy_last_branch_rb(ptq);
+		sample->branch_stack = ptq->last_branch;
+	}
+}
+
+static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
+					       struct intel_pt_queue *ptq,
+					       union perf_event *event,
+					       struct perf_sample *sample,
+					       u64 type)
+{
+	int ret;
+
+	ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
+
+	if (pt->synth_opts.last_branch)
+		intel_pt_reset_last_branch_rb(ptq);
 
 	return ret;
 }
 
 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
 {
-	int ret;
 	struct intel_pt *pt = ptq->pt;
 	union perf_event *event = ptq->event_buf;
 	struct perf_sample sample = { .ip = 0, };
 
-	if (pt->synth_opts.initial_skip &&
-	    pt->num_events++ < pt->synth_opts.initial_skip)
+	if (intel_pt_skip_event(pt))
 		return 0;
 
-	event->sample.header.type = PERF_RECORD_SAMPLE;
-	event->sample.header.misc = PERF_RECORD_MISC_USER;
-	event->sample.header.size = sizeof(struct perf_event_header);
+	intel_pt_prep_sample(pt, ptq, event, &sample);
 
-	if (!pt->timeless_decoding)
-		sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
-
-	sample.cpumode = PERF_RECORD_MISC_USER;
-	sample.ip = ptq->state->from_ip;
-	sample.pid = ptq->pid;
-	sample.tid = ptq->tid;
-	sample.addr = ptq->state->to_ip;
 	sample.id = ptq->pt->instructions_id;
 	sample.stream_id = ptq->pt->instructions_id;
 	sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
-	sample.cpu = ptq->cpu;
-	sample.flags = ptq->flags;
-	sample.insn_len = ptq->insn_len;
-	memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
 
 	ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
 
-	if (pt->synth_opts.callchain) {
-		thread_stack__sample(ptq->thread, ptq->chain,
-				     pt->synth_opts.callchain_sz, sample.ip);
-		sample.callchain = ptq->chain;
-	}
-
-	if (pt->synth_opts.last_branch) {
-		intel_pt_copy_last_branch_rb(ptq);
-		sample.branch_stack = ptq->last_branch;
-	}
-
-	if (pt->synth_opts.inject) {
-		ret = intel_pt_inject_event(event, &sample,
-					    pt->instructions_sample_type,
-					    pt->synth_needs_swap);
-		if (ret)
-			return ret;
-	}
-
-	ret = perf_session__deliver_synth_event(pt->session, event, &sample);
-	if (ret)
-		pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
-		       ret);
-
-	if (pt->synth_opts.last_branch)
-		intel_pt_reset_last_branch_rb(ptq);
-
-	return ret;
+	return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
+					    pt->instructions_sample_type);
 }
 
 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
 {
-	int ret;
 	struct intel_pt *pt = ptq->pt;
 	union perf_event *event = ptq->event_buf;
 	struct perf_sample sample = { .ip = 0, };
 
-	if (pt->synth_opts.initial_skip &&
-	    pt->num_events++ < pt->synth_opts.initial_skip)
+	if (intel_pt_skip_event(pt))
 		return 0;
 
-	event->sample.header.type = PERF_RECORD_SAMPLE;
-	event->sample.header.misc = PERF_RECORD_MISC_USER;
-	event->sample.header.size = sizeof(struct perf_event_header);
+	intel_pt_prep_sample(pt, ptq, event, &sample);
 
-	if (!pt->timeless_decoding)
-		sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
-
-	sample.cpumode = PERF_RECORD_MISC_USER;
-	sample.ip = ptq->state->from_ip;
-	sample.pid = ptq->pid;
-	sample.tid = ptq->tid;
-	sample.addr = ptq->state->to_ip;
 	sample.id = ptq->pt->transactions_id;
 	sample.stream_id = ptq->pt->transactions_id;
-	sample.period = 1;
-	sample.cpu = ptq->cpu;
-	sample.flags = ptq->flags;
-	sample.insn_len = ptq->insn_len;
-	memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
 
-	if (pt->synth_opts.callchain) {
-		thread_stack__sample(ptq->thread, ptq->chain,
-				     pt->synth_opts.callchain_sz, sample.ip);
-		sample.callchain = ptq->chain;
-	}
+	return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
+					    pt->transactions_sample_type);
+}
 
-	if (pt->synth_opts.last_branch) {
-		intel_pt_copy_last_branch_rb(ptq);
-		sample.branch_stack = ptq->last_branch;
-	}
+static void intel_pt_prep_p_sample(struct intel_pt *pt,
+				   struct intel_pt_queue *ptq,
+				   union perf_event *event,
+				   struct perf_sample *sample)
+{
+	intel_pt_prep_sample(pt, ptq, event, sample);
 
-	if (pt->synth_opts.inject) {
-		ret = intel_pt_inject_event(event, &sample,
-					    pt->transactions_sample_type,
-					    pt->synth_needs_swap);
-		if (ret)
-			return ret;
-	}
+	/*
+	 * Zero IP is used to mean "trace start" but that is not the case for
+	 * power or PTWRITE events with no IP, so clear the flags.
+	 */
+	if (!sample->ip)
+		sample->flags = 0;
+}
 
-	ret = perf_session__deliver_synth_event(pt->session, event, &sample);
-	if (ret)
-		pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
-		       ret);
+static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
+{
+	struct intel_pt *pt = ptq->pt;
+	union perf_event *event = ptq->event_buf;
+	struct perf_sample sample = { .ip = 0, };
+	struct perf_synth_intel_ptwrite raw;
 
-	if (pt->synth_opts.last_branch)
-		intel_pt_reset_last_branch_rb(ptq);
+	if (intel_pt_skip_event(pt))
+		return 0;
 
-	return ret;
+	intel_pt_prep_p_sample(pt, ptq, event, &sample);
+
+	sample.id = ptq->pt->ptwrites_id;
+	sample.stream_id = ptq->pt->ptwrites_id;
+
+	raw.flags = 0;
+	raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
+	raw.payload = cpu_to_le64(ptq->state->ptw_payload);
+
+	sample.raw_size = perf_synth__raw_size(raw);
+	sample.raw_data = perf_synth__raw_data(&raw);
+
+	return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
+					    pt->ptwrites_sample_type);
+}
+
+static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
+{
+	struct intel_pt *pt = ptq->pt;
+	union perf_event *event = ptq->event_buf;
+	struct perf_sample sample = { .ip = 0, };
+	struct perf_synth_intel_cbr raw;
+	u32 flags;
+
+	if (intel_pt_skip_event(pt))
+		return 0;
+
+	intel_pt_prep_p_sample(pt, ptq, event, &sample);
+
+	sample.id = ptq->pt->cbr_id;
+	sample.stream_id = ptq->pt->cbr_id;
+
+	flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
+	raw.flags = cpu_to_le32(flags);
+	raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
+	raw.reserved3 = 0;
+
+	sample.raw_size = perf_synth__raw_size(raw);
+	sample.raw_data = perf_synth__raw_data(&raw);
+
+	return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
+					    pt->pwr_events_sample_type);
+}
+
+static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
+{
+	struct intel_pt *pt = ptq->pt;
+	union perf_event *event = ptq->event_buf;
+	struct perf_sample sample = { .ip = 0, };
+	struct perf_synth_intel_mwait raw;
+
+	if (intel_pt_skip_event(pt))
+		return 0;
+
+	intel_pt_prep_p_sample(pt, ptq, event, &sample);
+
+	sample.id = ptq->pt->mwait_id;
+	sample.stream_id = ptq->pt->mwait_id;
+
+	raw.reserved = 0;
+	raw.payload = cpu_to_le64(ptq->state->mwait_payload);
+
+	sample.raw_size = perf_synth__raw_size(raw);
+	sample.raw_data = perf_synth__raw_data(&raw);
+
+	return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
+					    pt->pwr_events_sample_type);
+}
+
+static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
+{
+	struct intel_pt *pt = ptq->pt;
+	union perf_event *event = ptq->event_buf;
+	struct perf_sample sample = { .ip = 0, };
+	struct perf_synth_intel_pwre raw;
+
+	if (intel_pt_skip_event(pt))
+		return 0;
+
+	intel_pt_prep_p_sample(pt, ptq, event, &sample);
+
+	sample.id = ptq->pt->pwre_id;
+	sample.stream_id = ptq->pt->pwre_id;
+
+	raw.reserved = 0;
+	raw.payload = cpu_to_le64(ptq->state->pwre_payload);
+
+	sample.raw_size = perf_synth__raw_size(raw);
+	sample.raw_data = perf_synth__raw_data(&raw);
+
+	return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
+					    pt->pwr_events_sample_type);
+}
+
+static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
+{
+	struct intel_pt *pt = ptq->pt;
+	union perf_event *event = ptq->event_buf;
+	struct perf_sample sample = { .ip = 0, };
+	struct perf_synth_intel_exstop raw;
+
+	if (intel_pt_skip_event(pt))
+		return 0;
+
+	intel_pt_prep_p_sample(pt, ptq, event, &sample);
+
+	sample.id = ptq->pt->exstop_id;
+	sample.stream_id = ptq->pt->exstop_id;
+
+	raw.flags = 0;
+	raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
+
+	sample.raw_size = perf_synth__raw_size(raw);
+	sample.raw_data = perf_synth__raw_data(&raw);
+
+	return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
+					    pt->pwr_events_sample_type);
+}
+
+static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
+{
+	struct intel_pt *pt = ptq->pt;
+	union perf_event *event = ptq->event_buf;
+	struct perf_sample sample = { .ip = 0, };
+	struct perf_synth_intel_pwrx raw;
+
+	if (intel_pt_skip_event(pt))
+		return 0;
+
+	intel_pt_prep_p_sample(pt, ptq, event, &sample);
+
+	sample.id = ptq->pt->pwrx_id;
+	sample.stream_id = ptq->pt->pwrx_id;
+
+	raw.reserved = 0;
+	raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
+
+	sample.raw_size = perf_synth__raw_size(raw);
+	sample.raw_data = perf_synth__raw_data(&raw);
+
+	return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
+					    pt->pwr_events_sample_type);
 }
 
 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
@@ -1296,6 +1469,10 @@ static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
 			       PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
 }
 
+#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
+			  INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \
+			  INTEL_PT_CBR_CHG)
+
 static int intel_pt_sample(struct intel_pt_queue *ptq)
 {
 	const struct intel_pt_state *state = ptq->state;
@@ -1307,24 +1484,52 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
 
 	ptq->have_sample = false;
 
-	if (pt->sample_instructions &&
-	    (state->type & INTEL_PT_INSTRUCTION) &&
-	    (!pt->synth_opts.initial_skip ||
-	     pt->num_events++ >= pt->synth_opts.initial_skip)) {
+	if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
+		if (state->type & INTEL_PT_CBR_CHG) {
+			err = intel_pt_synth_cbr_sample(ptq);
+			if (err)
+				return err;
+		}
+		if (state->type & INTEL_PT_MWAIT_OP) {
+			err = intel_pt_synth_mwait_sample(ptq);
+			if (err)
+				return err;
+		}
+		if (state->type & INTEL_PT_PWR_ENTRY) {
+			err = intel_pt_synth_pwre_sample(ptq);
+			if (err)
+				return err;
+		}
+		if (state->type & INTEL_PT_EX_STOP) {
+			err = intel_pt_synth_exstop_sample(ptq);
+			if (err)
+				return err;
+		}
+		if (state->type & INTEL_PT_PWR_EXIT) {
+			err = intel_pt_synth_pwrx_sample(ptq);
+			if (err)
+				return err;
+		}
+	}
+
+	if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
 		err = intel_pt_synth_instruction_sample(ptq);
 		if (err)
 			return err;
 	}
 
-	if (pt->sample_transactions &&
-	    (state->type & INTEL_PT_TRANSACTION) &&
-	    (!pt->synth_opts.initial_skip ||
-	     pt->num_events++ >= pt->synth_opts.initial_skip)) {
+	if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
 		err = intel_pt_synth_transaction_sample(ptq);
 		if (err)
 			return err;
 	}
 
+	if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
+		err = intel_pt_synth_ptwrite_sample(ptq);
+		if (err)
+			return err;
+	}
+
 	if (!(state->type & INTEL_PT_BRANCH))
 		return 0;
 
@@ -1925,36 +2130,65 @@ static int intel_pt_event_synth(struct perf_tool *tool,
 						 NULL);
 }
 
-static int intel_pt_synth_event(struct perf_session *session,
+static int intel_pt_synth_event(struct perf_session *session, const char *name,
 				struct perf_event_attr *attr, u64 id)
 {
 	struct intel_pt_synth intel_pt_synth;
+	int err;
+
+	pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
+		 name, id, (u64)attr->sample_type);
 
 	memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
 	intel_pt_synth.session = session;
 
-	return perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
-					   &id, intel_pt_event_synth);
+	err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
+					  &id, intel_pt_event_synth);
+	if (err)
+		pr_err("%s: failed to synthesize '%s' event type\n",
+		       __func__, name);
+
+	return err;
+}
+
+static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
+				    const char *name)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each_entry(evlist, evsel) {
+		if (evsel->id && evsel->id[0] == id) {
+			if (evsel->name)
+				zfree(&evsel->name);
+			evsel->name = strdup(name);
+			break;
+		}
+	}
+}
+
+static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
+					 struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each_entry(evlist, evsel) {
+		if (evsel->attr.type == pt->pmu_type && evsel->ids)
+			return evsel;
+	}
+
+	return NULL;
 }
 
 static int intel_pt_synth_events(struct intel_pt *pt,
 				 struct perf_session *session)
 {
 	struct perf_evlist *evlist = session->evlist;
-	struct perf_evsel *evsel;
+	struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
 	struct perf_event_attr attr;
-	bool found = false;
 	u64 id;
 	int err;
 
-	evlist__for_each_entry(evlist, evsel) {
-		if (evsel->attr.type == pt->pmu_type && evsel->ids) {
-			found = true;
-			break;
-		}
-	}
-
-	if (!found) {
+	if (!evsel) {
 		pr_debug("There are no selected events with Intel Processor Trace data\n");
 		return 0;
 	}
@@ -1983,6 +2217,25 @@ static int intel_pt_synth_events(struct intel_pt *pt,
 	if (!id)
 		id = 1;
 
+	if (pt->synth_opts.branches) {
+		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
+		attr.sample_period = 1;
+		attr.sample_type |= PERF_SAMPLE_ADDR;
+		err = intel_pt_synth_event(session, "branches", &attr, id);
+		if (err)
+			return err;
+		pt->sample_branches = true;
+		pt->branches_sample_type = attr.sample_type;
+		pt->branches_id = id;
+		id += 1;
+		attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
+	}
+
+	if (pt->synth_opts.callchain)
+		attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
+	if (pt->synth_opts.last_branch)
+		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+
 	if (pt->synth_opts.instructions) {
 		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
 		if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
@@ -1990,70 +2243,90 @@ static int intel_pt_synth_events(struct intel_pt *pt,
 				intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
 		else
 			attr.sample_period = pt->synth_opts.period;
-		pt->instructions_sample_period = attr.sample_period;
-		if (pt->synth_opts.callchain)
-			attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
-		if (pt->synth_opts.last_branch)
-			attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
-		pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
-			 id, (u64)attr.sample_type);
-		err = intel_pt_synth_event(session, &attr, id);
-		if (err) {
-			pr_err("%s: failed to synthesize 'instructions' event type\n",
-			       __func__);
+		err = intel_pt_synth_event(session, "instructions", &attr, id);
+		if (err)
 			return err;
-		}
 		pt->sample_instructions = true;
 		pt->instructions_sample_type = attr.sample_type;
 		pt->instructions_id = id;
 		id += 1;
 	}
 
+	attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
+	attr.sample_period = 1;
+
 	if (pt->synth_opts.transactions) {
 		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
-		attr.sample_period = 1;
-		if (pt->synth_opts.callchain)
-			attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
-		if (pt->synth_opts.last_branch)
-			attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
-		pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
-			 id, (u64)attr.sample_type);
-		err = intel_pt_synth_event(session, &attr, id);
-		if (err) {
-			pr_err("%s: failed to synthesize 'transactions' event type\n",
-			       __func__);
+		err = intel_pt_synth_event(session, "transactions", &attr, id);
+		if (err)
 			return err;
-		}
 		pt->sample_transactions = true;
+		pt->transactions_sample_type = attr.sample_type;
 		pt->transactions_id = id;
+		intel_pt_set_event_name(evlist, id, "transactions");
 		id += 1;
-		evlist__for_each_entry(evlist, evsel) {
-			if (evsel->id && evsel->id[0] == pt->transactions_id) {
-				if (evsel->name)
-					zfree(&evsel->name);
-				evsel->name = strdup("transactions");
-				break;
-			}
-		}
 	}
 
-	if (pt->synth_opts.branches) {
-		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
-		attr.sample_period = 1;
-		attr.sample_type |= PERF_SAMPLE_ADDR;
-		attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN;
-		attr.sample_type &= ~(u64)PERF_SAMPLE_BRANCH_STACK;
-		pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
-			 id, (u64)attr.sample_type);
-		err = intel_pt_synth_event(session, &attr, id);
-		if (err) {
-			pr_err("%s: failed to synthesize 'branches' event type\n",
-			       __func__);
+	attr.type = PERF_TYPE_SYNTH;
+	attr.sample_type |= PERF_SAMPLE_RAW;
+
+	if (pt->synth_opts.ptwrites) {
+		attr.config = PERF_SYNTH_INTEL_PTWRITE;
+		err = intel_pt_synth_event(session, "ptwrite", &attr, id);
+		if (err)
 			return err;
-		}
-		pt->sample_branches = true;
-		pt->branches_sample_type = attr.sample_type;
-		pt->branches_id = id;
+		pt->sample_ptwrites = true;
+		pt->ptwrites_sample_type = attr.sample_type;
+		pt->ptwrites_id = id;
+		intel_pt_set_event_name(evlist, id, "ptwrite");
+		id += 1;
+	}
+
+	if (pt->synth_opts.pwr_events) {
+		pt->sample_pwr_events = true;
+		pt->pwr_events_sample_type = attr.sample_type;
+
+		attr.config = PERF_SYNTH_INTEL_CBR;
+		err = intel_pt_synth_event(session, "cbr", &attr, id);
+		if (err)
+			return err;
+		pt->cbr_id = id;
+		intel_pt_set_event_name(evlist, id, "cbr");
+		id += 1;
+	}
+
+	if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
+		attr.config = PERF_SYNTH_INTEL_MWAIT;
+		err = intel_pt_synth_event(session, "mwait", &attr, id);
+		if (err)
+			return err;
+		pt->mwait_id = id;
+		intel_pt_set_event_name(evlist, id, "mwait");
+		id += 1;
+
+		attr.config = PERF_SYNTH_INTEL_PWRE;
+		err = intel_pt_synth_event(session, "pwre", &attr, id);
+		if (err)
+			return err;
+		pt->pwre_id = id;
+		intel_pt_set_event_name(evlist, id, "pwre");
+		id += 1;
+
+		attr.config = PERF_SYNTH_INTEL_EXSTOP;
+		err = intel_pt_synth_event(session, "exstop", &attr, id);
+		if (err)
+			return err;
+		pt->exstop_id = id;
+		intel_pt_set_event_name(evlist, id, "exstop");
+		id += 1;
+
+		attr.config = PERF_SYNTH_INTEL_PWRX;
+		err = intel_pt_synth_event(session, "pwrx", &attr, id);
+		if (err)
+			return err;
+		pt->pwrx_id = id;
+		intel_pt_set_event_name(evlist, id, "pwrx");
+		id += 1;
 	}
 
 	pt->synth_needs_swap = evsel->needs_swap;
@@ -2322,6 +2595,7 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
 		intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
 		intel_pt_log("Maximum non-turbo ratio %u\n",
 			     pt->max_non_turbo_ratio);
+		pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
 	}
 
 	if (pt->synth_opts.calls)
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index d97e014..5de2b86 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -572,16 +572,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
 		if (dso == NULL)
 			goto out_unlock;
 
-		if (machine__is_host(machine))
-			dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
-		else
-			dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
-
-		/* _KMODULE_COMP should be next to _KMODULE */
-		if (m->kmod && m->comp)
-			dso->symtab_type++;
-
-		dso__set_short_name(dso, strdup(m->name), true);
+		dso__set_module_info(dso, m, machine);
 		dso__set_long_name(dso, strdup(filename), true);
 	}
 
@@ -1218,10 +1209,12 @@ int machine__create_kernel_maps(struct machine *machine)
 	 */
 	map_groups__fixup_end(&machine->kmaps);
 
-	if (machine__get_running_kernel_start(machine, &name, &addr)) {
-	} else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
-		machine__destroy_kernel_maps(machine);
-		return -1;
+	if (!machine__get_running_kernel_start(machine, &name, &addr)) {
+		if (name &&
+		    maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
+			machine__destroy_kernel_maps(machine);
+			return -1;
+		}
 	}
 
 	return 0;
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index ea7f450..389e972 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -2,6 +2,7 @@
 #define __PMU_H
 
 #include <linux/bitmap.h>
+#include <linux/compiler.h>
 #include <linux/perf_event.h>
 #include <stdbool.h>
 #include "evsel.h"
@@ -83,8 +84,7 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet,
 		      bool long_desc, bool details_flag);
 bool pmu_have_event(const char *pname, const char *name);
 
-int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
-			...) __attribute__((format(scanf, 3, 4)));
+int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) __scanf(3, 4);
 
 int perf_pmu__test(void);
 
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 84e7e69..a2670e9 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -619,7 +619,7 @@ static int post_process_probe_trace_point(struct probe_trace_point *tp,
 					   struct map *map, unsigned long offs)
 {
 	struct symbol *sym;
-	u64 addr = tp->address + tp->offset - offs;
+	u64 addr = tp->address - offs;
 
 	sym = map__find_symbol(map, addr);
 	if (!sym)
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 3738426..5812947 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -1,6 +1,7 @@
 #ifndef _PROBE_EVENT_H
 #define _PROBE_EVENT_H
 
+#include <linux/compiler.h>
 #include <stdbool.h>
 #include "intlist.h"
 
@@ -171,8 +172,7 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev,
 			     struct symbol *sym);
 
 /* If there is no space to write, returns -E2BIG. */
-int e_snprintf(char *str, size_t size, const char *format, ...)
-	__attribute__((format(printf, 3, 4)));
+int e_snprintf(char *str, size_t size, const char *format, ...) __printf(3, 4);
 
 /* Maximum index number of event-name postfix */
 #define MAX_EVENT_INDEX	1024
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 9d92af7..57b7a00 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -28,6 +28,7 @@
 #include <stdbool.h>
 #include <errno.h>
 #include <linux/bitmap.h>
+#include <linux/compiler.h>
 #include <linux/time64.h>
 
 #include "../../perf.h"
@@ -84,7 +85,7 @@ struct tables {
 
 static struct tables tables_global;
 
-static void handler_call_die(const char *handler_name) NORETURN;
+static void handler_call_die(const char *handler_name) __noreturn;
 static void handler_call_die(const char *handler_name)
 {
 	PyErr_Print();
@@ -1219,7 +1220,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
 	fprintf(ofp, "# be retrieved using Python functions of the form "
 		"common_*(context).\n");
 
-	fprintf(ofp, "# See the perf-trace-python Documentation for the list "
+	fprintf(ofp, "# See the perf-script-python Documentation for the list "
 		"of available functions.\n\n");
 
 	fprintf(ofp, "import os\n");
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 7dc1096..d19c40a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2035,7 +2035,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
 
 		if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
 			pr_err("File does not contain CPU events. "
-			       "Remove -c option to proceed.\n");
+			       "Remove -C option to proceed.\n");
 			return -1;
 		}
 	}
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 5762ae4..8b327c9 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -2532,12 +2532,12 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
 			ret = sort_dimension__add(list, tok, evlist, level);
 			if (ret == -EINVAL) {
 				if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok)))
-					error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
+					pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
 				else
-					error("Invalid --sort key: `%s'", tok);
+					pr_err("Invalid --sort key: `%s'", tok);
 				break;
 			} else if (ret == -ESRCH) {
-				error("Unknown --sort key: `%s'", tok);
+				pr_err("Unknown --sort key: `%s'", tok);
 				break;
 			}
 		}
@@ -2594,7 +2594,7 @@ static int setup_sort_order(struct perf_evlist *evlist)
 		return 0;
 
 	if (sort_order[1] == '\0') {
-		error("Invalid --sort key: `+'");
+		pr_err("Invalid --sort key: `+'");
 		return -EINVAL;
 	}
 
@@ -2604,7 +2604,7 @@ static int setup_sort_order(struct perf_evlist *evlist)
 	 */
 	if (asprintf(&new_sort_order, "%s,%s",
 		     get_default_sort_order(evlist), sort_order + 1) < 0) {
-		error("Not enough memory to set up --sort");
+		pr_err("Not enough memory to set up --sort");
 		return -ENOMEM;
 	}
 
@@ -2668,7 +2668,7 @@ static int __setup_sorting(struct perf_evlist *evlist)
 
 	str = strdup(sort_keys);
 	if (str == NULL) {
-		error("Not enough memory to setup sort keys");
+		pr_err("Not enough memory to setup sort keys");
 		return -ENOMEM;
 	}
 
@@ -2678,7 +2678,7 @@ static int __setup_sorting(struct perf_evlist *evlist)
 	if (!is_strict_order(field_order)) {
 		str = setup_overhead(str);
 		if (str == NULL) {
-			error("Not enough memory to setup overhead keys");
+			pr_err("Not enough memory to setup overhead keys");
 			return -ENOMEM;
 		}
 	}
@@ -2834,10 +2834,10 @@ static int setup_output_list(struct perf_hpp_list *list, char *str)
 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
 		ret = output_field_add(list, tok);
 		if (ret == -EINVAL) {
-			error("Invalid --fields key: `%s'", tok);
+			pr_err("Invalid --fields key: `%s'", tok);
 			break;
 		} else if (ret == -ESRCH) {
-			error("Unknown --fields key: `%s'", tok);
+			pr_err("Unknown --fields key: `%s'", tok);
 			break;
 		}
 	}
@@ -2877,7 +2877,7 @@ static int __setup_output_field(void)
 
 	strp = str = strdup(field_order);
 	if (str == NULL) {
-		error("Not enough memory to setup output fields");
+		pr_err("Not enough memory to setup output fields");
 		return -ENOMEM;
 	}
 
@@ -2885,7 +2885,7 @@ static int __setup_output_field(void)
 		strp++;
 
 	if (!strlen(strp)) {
-		error("Invalid --fields key: `+'");
+		pr_err("Invalid --fields key: `+'");
 		goto out;
 	}
 
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index ac10cc6..719d6cb 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -44,6 +44,8 @@ static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS];
 static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS];
 static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS];
 static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_smi_num_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_aperf_stats[NUM_CTX][MAX_NR_CPUS];
 static struct rblist runtime_saved_values;
 static bool have_frontend_stalled;
 
@@ -157,6 +159,8 @@ void perf_stat__reset_shadow_stats(void)
 	memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued));
 	memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles));
 	memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles));
+	memset(runtime_smi_num_stats, 0, sizeof(runtime_smi_num_stats));
+	memset(runtime_aperf_stats, 0, sizeof(runtime_aperf_stats));
 
 	next = rb_first(&runtime_saved_values.entries);
 	while (next) {
@@ -217,6 +221,10 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
 		update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]);
 	else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
 		update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]);
+	else if (perf_stat_evsel__is(counter, SMI_NUM))
+		update_stats(&runtime_smi_num_stats[ctx][cpu], count[0]);
+	else if (perf_stat_evsel__is(counter, APERF))
+		update_stats(&runtime_aperf_stats[ctx][cpu], count[0]);
 
 	if (counter->collect_stat) {
 		struct saved_value *v = saved_value_lookup(counter, cpu, ctx,
@@ -592,6 +600,29 @@ static double td_be_bound(int ctx, int cpu)
 	return sanitize_val(1.0 - sum);
 }
 
+static void print_smi_cost(int cpu, struct perf_evsel *evsel,
+			   struct perf_stat_output_ctx *out)
+{
+	double smi_num, aperf, cycles, cost = 0.0;
+	int ctx = evsel_context(evsel);
+	const char *color = NULL;
+
+	smi_num = avg_stats(&runtime_smi_num_stats[ctx][cpu]);
+	aperf = avg_stats(&runtime_aperf_stats[ctx][cpu]);
+	cycles = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+
+	if ((cycles == 0) || (aperf == 0))
+		return;
+
+	if (smi_num)
+		cost = (aperf - cycles) / aperf * 100.00;
+
+	if (cost > 10)
+		color = PERF_COLOR_RED;
+	out->print_metric(out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
+	out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num);
+}
+
 void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
 				   double avg, int cpu,
 				   struct perf_stat_output_ctx *out)
@@ -825,6 +856,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
 		}
 		snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
 		print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
+	} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
+		print_smi_cost(cpu, evsel, out);
 	} else {
 		print_metric(ctxp, NULL, NULL, NULL, 0);
 	}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index c581744..53b9a99 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -86,6 +86,8 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
 	ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
 	ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
 	ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
+	ID(SMI_NUM, msr/smi/),
+	ID(APERF, msr/aperf/),
 };
 #undef ID
 
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 0a65ae2..7522bf1 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -22,6 +22,8 @@ enum perf_stat_evsel_id {
 	PERF_STAT_EVSEL_ID__TOPDOWN_SLOTS_RETIRED,
 	PERF_STAT_EVSEL_ID__TOPDOWN_FETCH_BUBBLES,
 	PERF_STAT_EVSEL_ID__TOPDOWN_RECOVERY_BUBBLES,
+	PERF_STAT_EVSEL_ID__SMI_NUM,
+	PERF_STAT_EVSEL_ID__APERF,
 	PERF_STAT_EVSEL_ID__MAX,
 };
 
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index 318424e..802d743 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -42,6 +42,7 @@
 #include <stdarg.h>
 #include <stddef.h>
 #include <string.h>
+#include <linux/compiler.h>
 #include <sys/types.h>
 
 extern char strbuf_slopbuf[];
@@ -85,8 +86,7 @@ static inline int strbuf_addstr(struct strbuf *sb, const char *s) {
 	return strbuf_add(sb, s, strlen(s));
 }
 
-__attribute__((format(printf,2,3)))
-int strbuf_addf(struct strbuf *sb, const char *fmt, ...);
+int strbuf_addf(struct strbuf *sb, const char *fmt, ...) __printf(2, 3);
 
 /* XXX: if read fails, any partial read is undone */
 ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index e7ee47f..502505c 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -637,43 +637,6 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
 	return 0;
 }
 
-static int decompress_kmodule(struct dso *dso, const char *name,
-			      enum dso_binary_type type)
-{
-	int fd = -1;
-	char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
-	struct kmod_path m;
-
-	if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
-	    type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
-	    type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
-		return -1;
-
-	if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
-		name = dso->long_name;
-
-	if (kmod_path__parse_ext(&m, name) || !m.comp)
-		return -1;
-
-	fd = mkstemp(tmpbuf);
-	if (fd < 0) {
-		dso->load_errno = errno;
-		goto out;
-	}
-
-	if (!decompress_to_file(m.ext, name, fd)) {
-		dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
-		close(fd);
-		fd = -1;
-	}
-
-	unlink(tmpbuf);
-
-out:
-	free(m.ext);
-	return fd;
-}
-
 bool symsrc__possibly_runtime(struct symsrc *ss)
 {
 	return ss->dynsym || ss->opdsec;
@@ -705,9 +668,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
 	int fd;
 
 	if (dso__needs_decompress(dso)) {
-		fd = decompress_kmodule(dso, name, type);
+		fd = dso__decompress_kmodule_fd(dso, name);
 		if (fd < 0)
 			return -1;
+
+		type = dso->symtab_type;
 	} else {
 		fd = open(name, O_RDONLY);
 		if (fd < 0) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 8f2b068..e7a98db 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1562,10 +1562,6 @@ int dso__load(struct dso *dso, struct map *map)
 	if (!runtime_ss && syms_ss)
 		runtime_ss = syms_ss;
 
-	if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
-		if (dso__build_id_is_kmod(dso, name, PATH_MAX))
-			kmod = true;
-
 	if (syms_ss)
 		ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
 	else
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 746bbee..e0a6e9a 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -24,7 +24,7 @@
 #include <errno.h>
 
 #include "../perf.h"
-#include "util.h"
+#include "debug.h"
 #include "trace-event.h"
 
 #include "sane_ctype.h"
@@ -150,7 +150,7 @@ void parse_ftrace_printk(struct pevent *pevent,
 	while (line) {
 		addr_str = strtok_r(line, ":", &fmt);
 		if (!addr_str) {
-			warning("printk format with empty entry");
+			pr_warning("printk format with empty entry");
 			break;
 		}
 		addr = strtoull(addr_str, NULL, 16);
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 943a062..7755a5e0 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -39,6 +39,14 @@ static int __report_module(struct addr_location *al, u64 ip,
 		return 0;
 
 	mod = dwfl_addrmodule(ui->dwfl, ip);
+	if (mod) {
+		Dwarf_Addr s;
+
+		dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+		if (s != al->map->start)
+			mod = 0;
+	}
+
 	if (!mod)
 		mod = dwfl_report_elf(ui->dwfl, dso->short_name,
 				      dso->long_name, -1, al->map->start,
@@ -170,6 +178,14 @@ frame_callback(Dwfl_Frame *state, void *arg)
 	Dwarf_Addr pc;
 	bool isactivation;
 
+	if (!dwfl_frame_pc(state, &pc, NULL)) {
+		pr_err("%s", dwfl_errmsg(-1));
+		return DWARF_CB_ABORT;
+	}
+
+	// report the module before we query for isactivation
+	report_module(pc, ui);
+
 	if (!dwfl_frame_pc(state, &pc, &isactivation)) {
 		pr_err("%s", dwfl_errmsg(-1));
 		return DWARF_CB_ABORT;
@@ -224,7 +240,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
 
 	err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
 
-	if (err && !ui->max_stack)
+	if (err && ui->max_stack != max_stack)
 		err = 0;
 
 	/*
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
index 996046a..6cc9d98 100644
--- a/tools/perf/util/usage.c
+++ b/tools/perf/util/usage.c
@@ -9,75 +9,17 @@
 #include "util.h"
 #include "debug.h"
 
-static void report(const char *prefix, const char *err, va_list params)
-{
-	char msg[1024];
-	vsnprintf(msg, sizeof(msg), err, params);
-	fprintf(stderr, " %s%s\n", prefix, msg);
-}
-
-static NORETURN void usage_builtin(const char *err)
+static __noreturn void usage_builtin(const char *err)
 {
 	fprintf(stderr, "\n Usage: %s\n", err);
 	exit(129);
 }
 
-static NORETURN void die_builtin(const char *err, va_list params)
-{
-	report(" Fatal: ", err, params);
-	exit(128);
-}
-
-static void error_builtin(const char *err, va_list params)
-{
-	report(" Error: ", err, params);
-}
-
-static void warn_builtin(const char *warn, va_list params)
-{
-	report(" Warning: ", warn, params);
-}
-
 /* If we are in a dlopen()ed .so write to a global variable would segfault
  * (ugh), so keep things static. */
-static void (*usage_routine)(const char *err) NORETURN = usage_builtin;
-static void (*error_routine)(const char *err, va_list params) = error_builtin;
-static void (*warn_routine)(const char *err, va_list params) = warn_builtin;
-
-void set_warning_routine(void (*routine)(const char *err, va_list params))
-{
-	warn_routine = routine;
-}
+static void (*usage_routine)(const char *err) __noreturn = usage_builtin;
 
 void usage(const char *err)
 {
 	usage_routine(err);
 }
-
-void die(const char *err, ...)
-{
-	va_list params;
-
-	va_start(params, err);
-	die_builtin(err, params);
-	va_end(params);
-}
-
-int error(const char *err, ...)
-{
-	va_list params;
-
-	va_start(params, err);
-	error_routine(err, params);
-	va_end(params);
-	return -1;
-}
-
-void warning(const char *warn, ...)
-{
-	va_list params;
-
-	va_start(params, warn);
-	warn_routine(warn, params);
-	va_end(params);
-}
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 28c9f33..988111e 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -343,43 +343,6 @@ int perf_event_paranoid(void)
 
 	return value;
 }
-
-bool find_process(const char *name)
-{
-	size_t len = strlen(name);
-	DIR *dir;
-	struct dirent *d;
-	int ret = -1;
-
-	dir = opendir(procfs__mountpoint());
-	if (!dir)
-		return false;
-
-	/* Walk through the directory. */
-	while (ret && (d = readdir(dir)) != NULL) {
-		char path[PATH_MAX];
-		char *data;
-		size_t size;
-
-		if ((d->d_type != DT_DIR) ||
-		     !strcmp(".", d->d_name) ||
-		     !strcmp("..", d->d_name))
-			continue;
-
-		scnprintf(path, sizeof(path), "%s/%s/comm",
-			  procfs__mountpoint(), d->d_name);
-
-		if (filename__read_str(path, &data, &size))
-			continue;
-
-		ret = strncmp(name, data, len);
-		free(data);
-	}
-
-	closedir(dir);
-	return ret ? false : true;
-}
-
 static int
 fetch_ubuntu_kernel_version(unsigned int *puint)
 {
@@ -387,8 +350,12 @@ fetch_ubuntu_kernel_version(unsigned int *puint)
 	size_t line_len = 0;
 	char *ptr, *line = NULL;
 	int version, patchlevel, sublevel, err;
-	FILE *vsig = fopen("/proc/version_signature", "r");
+	FILE *vsig;
 
+	if (!puint)
+		return 0;
+
+	vsig = fopen("/proc/version_signature", "r");
 	if (!vsig) {
 		pr_debug("Open /proc/version_signature failed: %s\n",
 			 strerror(errno));
@@ -418,8 +385,7 @@ fetch_ubuntu_kernel_version(unsigned int *puint)
 		goto errout;
 	}
 
-	if (puint)
-		*puint = (version << 16) + (patchlevel << 8) + sublevel;
+	*puint = (version << 16) + (patchlevel << 8) + sublevel;
 	err = 0;
 errout:
 	free(line);
@@ -446,6 +412,9 @@ fetch_kernel_version(unsigned int *puint, char *str,
 		str[str_size - 1] = '\0';
 	}
 
+	if (!puint || int_ver_ready)
+		return 0;
+
 	err = sscanf(utsname.release, "%d.%d.%d",
 		     &version, &patchlevel, &sublevel);
 
@@ -455,8 +424,7 @@ fetch_kernel_version(unsigned int *puint, char *str,
 		return -1;
 	}
 
-	if (puint && !int_ver_ready)
-		*puint = (version << 16) + (patchlevel << 8) + sublevel;
+	*puint = (version << 16) + (patchlevel << 8) + sublevel;
 	return 0;
 }
 
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 5dfb9bb..2c9e58a 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -1,7 +1,6 @@
 #ifndef GIT_COMPAT_UTIL_H
 #define GIT_COMPAT_UTIL_H
 
-#define _ALL_SOURCE 1
 #define _BSD_SOURCE 1
 /* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */
 #define _DEFAULT_SOURCE 1
@@ -11,24 +10,12 @@
 #include <stddef.h>
 #include <stdlib.h>
 #include <stdarg.h>
+#include <linux/compiler.h>
 #include <linux/types.h>
 
-#ifdef __GNUC__
-#define NORETURN __attribute__((__noreturn__))
-#else
-#define NORETURN
-#ifndef __attribute__
-#define __attribute__(x)
-#endif
-#endif
-
 /* General helper functions */
-void usage(const char *err) NORETURN;
-void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2)));
-int error(const char *err, ...) __attribute__((format (printf, 1, 2)));
-void warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
-
-void set_warning_routine(void (*routine)(const char *err, va_list params));
+void usage(const char *err) __noreturn;
+void die(const char *err, ...) __noreturn __printf(1, 2);
 
 static inline void *zalloc(size_t size)
 {
@@ -57,8 +44,6 @@ int hex2u64(const char *ptr, u64 *val);
 extern unsigned int page_size;
 extern int cacheline_size;
 
-bool find_process(const char *name);
-
 int fetch_kernel_version(unsigned int *puint,
 			 char *str, size_t str_sz);
 #define KVER_VERSION(x)		(((x) >> 16) & 0xff)
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index c04e8fe..025c1b0 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -750,9 +750,9 @@ acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 msec_timeout)
 {
 	acpi_status status = AE_OK;
 	sem_t *sem = (sem_t *) handle;
+	int ret_val;
 #ifndef ACPI_USE_ALTERNATE_TIMEOUT
 	struct timespec time;
-	int ret_val;
 #endif
 
 	if (!sem) {
@@ -778,7 +778,10 @@ acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 msec_timeout)
 
 	case ACPI_WAIT_FOREVER:
 
-		if (sem_wait(sem)) {
+		while (((ret_val = sem_wait(sem)) == -1) && (errno == EINTR)) {
+			continue;	/* Restart if interrupted */
+		}
+		if (ret_val != 0) {
 			status = (AE_TIME);
 		}
 		break;
@@ -831,7 +834,8 @@ acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 msec_timeout)
 
 		while (((ret_val = sem_timedwait(sem, &time)) == -1)
 		       && (errno == EINTR)) {
-			continue;
+			continue;	/* Restart if interrupted */
+
 		}
 
 		if (ret_val != 0) {
diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
index 6437ef3..5fd5c5b 100644
--- a/tools/power/cpupower/utils/helpers/amd.c
+++ b/tools/power/cpupower/utils/helpers/amd.c
@@ -26,6 +26,15 @@ union msr_pstate {
 		unsigned res3:21;
 		unsigned en:1;
 	} bits;
+	struct {
+		unsigned fid:8;
+		unsigned did:6;
+		unsigned vid:8;
+		unsigned iddval:8;
+		unsigned idddiv:2;
+		unsigned res1:30;
+		unsigned en:1;
+	} fam17h_bits;
 	unsigned long long val;
 };
 
@@ -35,6 +44,8 @@ static int get_did(int family, union msr_pstate pstate)
 
 	if (family == 0x12)
 		t = pstate.val & 0xf;
+	else if (family == 0x17)
+		t = pstate.fam17h_bits.did;
 	else
 		t = pstate.bits.did;
 
@@ -44,16 +55,20 @@ static int get_did(int family, union msr_pstate pstate)
 static int get_cof(int family, union msr_pstate pstate)
 {
 	int t;
-	int fid, did;
+	int fid, did, cof;
 
 	did = get_did(family, pstate);
-
-	t = 0x10;
-	fid = pstate.bits.fid;
-	if (family == 0x11)
-		t = 0x8;
-
-	return (100 * (fid + t)) >> did;
+	if (family == 0x17) {
+		fid = pstate.fam17h_bits.fid;
+		cof = 200 * fid / did;
+	} else {
+		t = 0x10;
+		fid = pstate.bits.fid;
+		if (family == 0x11)
+			t = 0x8;
+		cof = (100 * (fid + t)) >> did;
+	}
+	return cof;
 }
 
 /* Needs:
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index afb66f8..799a18b 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -70,6 +70,8 @@ enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
 #define CPUPOWER_CAP_IS_SNB		0x00000020
 #define CPUPOWER_CAP_INTEL_IDA		0x00000040
 
+#define CPUPOWER_AMD_CPBDIS		0x02000000
+
 #define MAX_HW_PSTATES 10
 
 struct cpupower_cpu_info {
diff --git a/tools/power/cpupower/utils/helpers/misc.c b/tools/power/cpupower/utils/helpers/misc.c
index 1609243..601d719 100644
--- a/tools/power/cpupower/utils/helpers/misc.c
+++ b/tools/power/cpupower/utils/helpers/misc.c
@@ -2,11 +2,14 @@
 
 #include "helpers/helpers.h"
 
+#define MSR_AMD_HWCR	0xc0010015
+
 int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
 			int *states)
 {
 	struct cpupower_cpu_info cpu_info;
 	int ret;
+	unsigned long long val;
 
 	*support = *active = *states = 0;
 
@@ -16,10 +19,22 @@ int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
 
 	if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_CBP) {
 		*support = 1;
-		amd_pci_get_num_boost_states(active, states);
-		if (ret <= 0)
-			return ret;
-		*support = 1;
+
+		/* AMD Family 0x17 does not utilize PCI D18F4 like prior
+		 * families and has no fixed discrete boost states but
+		 * has Hardware determined variable increments instead.
+		 */
+
+		if (cpu_info.family == 0x17) {
+			if (!read_msr(cpu, MSR_AMD_HWCR, &val)) {
+				if (!(val & CPUPOWER_AMD_CPBDIS))
+					*active = 1;
+			}
+		} else {
+			ret = amd_pci_get_num_boost_states(active, states);
+			if (ret)
+				return ret;
+		}
 	} else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA)
 		*support = *active = 1;
 	return 0;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index b112947..0dafba2 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -57,7 +57,6 @@ unsigned int list_header_only;
 unsigned int dump_only;
 unsigned int do_snb_cstates;
 unsigned int do_knl_cstates;
-unsigned int do_skl_residency;
 unsigned int do_slm_cstates;
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
@@ -93,6 +92,7 @@ unsigned int do_ring_perf_limit_reasons;
 unsigned int crystal_hz;
 unsigned long long tsc_hz;
 int base_cpu;
+int do_migrate;
 double discover_bclk(unsigned int family, unsigned int model);
 unsigned int has_hwp;	/* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
 			/* IA32_HWP_REQUEST, IA32_HWP_STATUS */
@@ -151,6 +151,8 @@ size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
 #define MAX_ADDED_COUNTERS 16
 
 struct thread_data {
+	struct timeval tv_begin;
+	struct timeval tv_end;
 	unsigned long long tsc;
 	unsigned long long aperf;
 	unsigned long long mperf;
@@ -301,6 +303,9 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
 
 int cpu_migrate(int cpu)
 {
+	if (!do_migrate)
+		return 0;
+
 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
 	CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
 	if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
@@ -384,8 +389,14 @@ struct msr_counter bic[] = {
 	{ 0x0, "CPU" },
 	{ 0x0, "Mod%c6" },
 	{ 0x0, "sysfs" },
+	{ 0x0, "Totl%C0" },
+	{ 0x0, "Any%C0" },
+	{ 0x0, "GFX%C0" },
+	{ 0x0, "CPUGFX%" },
 };
 
+
+
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
 #define	BIC_Package	(1ULL << 0)
 #define	BIC_Avg_MHz	(1ULL << 1)
@@ -426,6 +437,10 @@ struct msr_counter bic[] = {
 #define	BIC_CPU		(1ULL << 36)
 #define	BIC_Mod_c6	(1ULL << 37)
 #define	BIC_sysfs	(1ULL << 38)
+#define	BIC_Totl_c0	(1ULL << 39)
+#define	BIC_Any_c0	(1ULL << 40)
+#define	BIC_GFX_c0	(1ULL << 41)
+#define	BIC_CPUGFX	(1ULL << 42)
 
 unsigned long long bic_enabled = 0xFFFFFFFFFFFFFFFFULL;
 unsigned long long bic_present = BIC_sysfs;
@@ -521,6 +536,8 @@ void print_header(char *delim)
 	struct msr_counter *mp;
 	int printed = 0;
 
+	if (debug)
+		outp += sprintf(outp, "usec %s", delim);
 	if (DO_BIC(BIC_Package))
 		outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
 	if (DO_BIC(BIC_Core))
@@ -599,12 +616,14 @@ void print_header(char *delim)
 	if (DO_BIC(BIC_GFXMHz))
 		outp += sprintf(outp, "%sGFXMHz", (printed++ ? delim : ""));
 
-	if (do_skl_residency) {
+	if (DO_BIC(BIC_Totl_c0))
 		outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : ""));
+	if (DO_BIC(BIC_Any_c0))
 		outp += sprintf(outp, "%sAny%%C0", (printed++ ? delim : ""));
+	if (DO_BIC(BIC_GFX_c0))
 		outp += sprintf(outp, "%sGFX%%C0", (printed++ ? delim : ""));
+	if (DO_BIC(BIC_CPUGFX))
 		outp += sprintf(outp, "%sCPUGFX%%", (printed++ ? delim : ""));
-	}
 
 	if (DO_BIC(BIC_Pkgpc2))
 		outp += sprintf(outp, "%sPkg%%pc2", (printed++ ? delim : ""));
@@ -771,6 +790,14 @@ int format_counters(struct thread_data *t, struct core_data *c,
 		(cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset)))
 		return 0;
 
+	if (debug) {
+		/* on each row, print how many usec each timestamp took to gather */
+		struct timeval tv;
+
+		timersub(&t->tv_end, &t->tv_begin, &tv);
+		outp += sprintf(outp, "%5ld\t", tv.tv_sec * 1000000 + tv.tv_usec);
+	}
+
 	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
 
 	tsc = t->tsc * tsc_tweak;
@@ -912,12 +939,14 @@ int format_counters(struct thread_data *t, struct core_data *c,
 		outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz);
 
 	/* Totl%C0, Any%C0 GFX%C0 CPUGFX% */
-	if (do_skl_residency) {
+	if (DO_BIC(BIC_Totl_c0))
 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0/tsc);
+	if (DO_BIC(BIC_Any_c0))
 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0/tsc);
+	if (DO_BIC(BIC_GFX_c0))
 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0/tsc);
+	if (DO_BIC(BIC_CPUGFX))
 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0/tsc);
-	}
 
 	if (DO_BIC(BIC_Pkgpc2))
 		outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2/tsc);
@@ -1038,12 +1067,16 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
 	int i;
 	struct msr_counter *mp;
 
-	if (do_skl_residency) {
+
+	if (DO_BIC(BIC_Totl_c0))
 		old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0;
+	if (DO_BIC(BIC_Any_c0))
 		old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0;
+	if (DO_BIC(BIC_GFX_c0))
 		old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0;
+	if (DO_BIC(BIC_CPUGFX))
 		old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0;
-	}
+
 	old->pc2 = new->pc2 - old->pc2;
 	if (DO_BIC(BIC_Pkgpc3))
 		old->pc3 = new->pc3 - old->pc3;
@@ -1292,12 +1325,14 @@ int sum_counters(struct thread_data *t, struct core_data *c,
 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
 		return 0;
 
-	if (do_skl_residency) {
+	if (DO_BIC(BIC_Totl_c0))
 		average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0;
+	if (DO_BIC(BIC_Any_c0))
 		average.packages.pkg_any_core_c0 += p->pkg_any_core_c0;
+	if (DO_BIC(BIC_GFX_c0))
 		average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0;
+	if (DO_BIC(BIC_CPUGFX))
 		average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0;
-	}
 
 	average.packages.pc2 += p->pc2;
 	if (DO_BIC(BIC_Pkgpc3))
@@ -1357,12 +1392,14 @@ void compute_average(struct thread_data *t, struct core_data *c,
 	average.cores.c7 /= topo.num_cores;
 	average.cores.mc6_us /= topo.num_cores;
 
-	if (do_skl_residency) {
+	if (DO_BIC(BIC_Totl_c0))
 		average.packages.pkg_wtd_core_c0 /= topo.num_packages;
+	if (DO_BIC(BIC_Any_c0))
 		average.packages.pkg_any_core_c0 /= topo.num_packages;
+	if (DO_BIC(BIC_GFX_c0))
 		average.packages.pkg_any_gfxe_c0 /= topo.num_packages;
+	if (DO_BIC(BIC_CPUGFX))
 		average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages;
-	}
 
 	average.packages.pc2 /= topo.num_packages;
 	if (DO_BIC(BIC_Pkgpc3))
@@ -1482,6 +1519,9 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 	struct msr_counter *mp;
 	int i;
 
+
+	gettimeofday(&t->tv_begin, (struct timezone *)NULL);
+
 	if (cpu_migrate(cpu)) {
 		fprintf(outf, "Could not migrate to CPU %d\n", cpu);
 		return -1;
@@ -1565,7 +1605,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 
 	/* collect core counters only for 1st thread in core */
 	if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
-		return 0;
+		goto done;
 
 	if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) {
 		if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
@@ -1601,15 +1641,21 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 
 	/* collect package counters only for 1st core in package */
 	if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
-		return 0;
+		goto done;
 
-	if (do_skl_residency) {
+	if (DO_BIC(BIC_Totl_c0)) {
 		if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0))
 			return -10;
+	}
+	if (DO_BIC(BIC_Any_c0)) {
 		if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0))
 			return -11;
+	}
+	if (DO_BIC(BIC_GFX_c0)) {
 		if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0))
 			return -12;
+	}
+	if (DO_BIC(BIC_CPUGFX)) {
 		if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0))
 			return -13;
 	}
@@ -1688,6 +1734,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 		if (get_mp(cpu, mp, &p->counter[i]))
 			return -10;
 	}
+done:
+	gettimeofday(&t->tv_end, (struct timezone *)NULL);
 
 	return 0;
 }
@@ -3895,6 +3943,9 @@ void decode_misc_enable_msr(void)
 {
 	unsigned long long msr;
 
+	if (!genuine_intel)
+		return;
+
 	if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr))
 		fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%sTCC %sEIST %sMWAIT %sPREFETCH %sTURBO)\n",
 			base_cpu, msr,
@@ -4198,7 +4249,12 @@ void process_cpuid()
 		BIC_PRESENT(BIC_Pkgpc10);
 	}
 	do_irtl_hsw = has_hsw_msrs(family, model);
-	do_skl_residency = has_skl_msrs(family, model);
+	if (has_skl_msrs(family, model)) {
+		BIC_PRESENT(BIC_Totl_c0);
+		BIC_PRESENT(BIC_Any_c0);
+		BIC_PRESENT(BIC_GFX_c0);
+		BIC_PRESENT(BIC_CPUGFX);
+	}
 	do_slm_cstates = is_slm(family, model);
 	do_knl_cstates  = is_knl(family, model);
 
@@ -4578,7 +4634,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-	fprintf(outf, "turbostat version 17.04.12"
+	fprintf(outf, "turbostat version 17.06.23"
 		" - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -4951,6 +5007,7 @@ void cmdline(int argc, char **argv)
 		{"hide",	required_argument,	0, 'H'},	// meh, -h taken by --help
 		{"Joules",	no_argument,		0, 'J'},
 		{"list",	no_argument,		0, 'l'},
+		{"migrate",	no_argument,		0, 'm'},
 		{"out",		required_argument,	0, 'o'},
 		{"quiet",	no_argument,		0, 'q'},
 		{"show",	required_argument,	0, 's'},
@@ -4962,7 +5019,7 @@ void cmdline(int argc, char **argv)
 
 	progname = argv[0];
 
-	while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
+	while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v",
 				long_options, &option_index)) != -1) {
 		switch (opt) {
 		case 'a':
@@ -5005,6 +5062,9 @@ void cmdline(int argc, char **argv)
 			list_header_only++;
 			quiet++;
 			break;
+		case 'm':
+			do_migrate = 1;
+			break;
 		case 'o':
 			outf = fopen_or_die(optarg, "w");
 			break;
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index 971c9ff..a711eec 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -1,10 +1,27 @@
-DESTDIR ?=
+CC		= $(CROSS_COMPILE)gcc
+BUILD_OUTPUT    := $(CURDIR)
+PREFIX		:= /usr
+DESTDIR		:=
+
+ifeq ("$(origin O)", "command line")
+	BUILD_OUTPUT := $(O)
+endif
 
 x86_energy_perf_policy : x86_energy_perf_policy.c
+CFLAGS +=	-Wall
+CFLAGS +=	-DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
 
+%: %.c
+	@mkdir -p $(BUILD_OUTPUT)
+	$(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@
+
+.PHONY : clean
 clean :
-	rm -f x86_energy_perf_policy
+	@rm -f $(BUILD_OUTPUT)/x86_energy_perf_policy
 
-install :
-	install x86_energy_perf_policy ${DESTDIR}/usr/bin/
-	install x86_energy_perf_policy.8 ${DESTDIR}/usr/share/man/man8/
+install : x86_energy_perf_policy
+	install -d  $(DESTDIR)$(PREFIX)/bin
+	install $(BUILD_OUTPUT)/x86_energy_perf_policy $(DESTDIR)$(PREFIX)/bin/x86_energy_perf_policy
+	install -d  $(DESTDIR)$(PREFIX)/share/man/man8
+	install x86_energy_perf_policy.8 $(DESTDIR)$(PREFIX)/share/man/man8
+
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
index 8eaaad6..17db1c3 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -1,104 +1,213 @@
-.\"  This page Copyright (C) 2010 Len Brown <len.brown@intel.com>
+.\"  This page Copyright (C) 2010 - 2015 Len Brown <len.brown@intel.com>
 .\"  Distributed under the GPL, Copyleft 1994.
 .TH X86_ENERGY_PERF_POLICY 8
 .SH NAME
-x86_energy_perf_policy \- read or write MSR_IA32_ENERGY_PERF_BIAS
+x86_energy_perf_policy \- Manage Energy vs. Performance Policy via x86 Model Specific Registers
 .SH SYNOPSIS
-.ft B
 .B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB "\-r"
+.RB "[ options ] [ scope ] [field \ value]"
 .br
-.B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB 'performance'
+.RB "scope: \-\-cpu\ cpu-list | \-\-pkg\ pkg-list"
 .br
-.B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB 'normal'
+.RB "cpu-list, pkg-list: # | #,# | #-# | all"
 .br
-.B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB 'powersave'
+.RB "field: \-\-all | \-\-epb | \-\-hwp-epp | \-\-hwp-min | \-\-hwp-max | \-\-hwp-desired"
 .br
-.B x86_energy_perf_policy
-.RB [ "\-c cpu" ]
-.RB [ "\-v" ]
-.RB n
+.RB "other: (\-\-force | \-\-hwp-enable | \-\-turbo-enable)  value)"
 .br
+.RB "value: # | default | performance | balance-performance | balance-power | power"
 .SH DESCRIPTION
 \fBx86_energy_perf_policy\fP
-allows software to convey
-its policy for the relative importance of performance
-versus energy savings to the processor.
+displays and updates energy-performance policy settings specific to
+Intel Architecture Processors.  Settings are accessed via Model Specific Register (MSR)
+updates, no matter if the Linux cpufreq sub-system is enabled or not.
 
-The processor uses this information in model-specific ways
-when it must select trade-offs between performance and
-energy efficiency.
+Policy in MSR_IA32_ENERGY_PERF_BIAS (EPB)
+may affect a wide range of hardware decisions,
+such as how aggressively the hardware enters and exits CPU idle states (C-states)
+and Processor Performance States (P-states).
+This policy hint does not replace explicit OS C-state and P-state selection.
+Rather, it tells the hardware how aggressively to implement those selections.
+Further, it allows the OS to influence energy/performance trade-offs where there
+is no software interface, such as in the opportunistic "turbo-mode" P-state range.
+Note that MSR_IA32_ENERGY_PERF_BIAS is defined per CPU,
+but some implementations
+share a single MSR among all CPUs in each processor package.
+On those systems, a write to EPB on one processor will
+be visible, and will have an effect, on all CPUs
+in the same processor package.
 
-This policy hint does not supersede Processor Performance states
-(P-states) or CPU Idle power states (C-states), but allows
-software to have influence where it would otherwise be unable
-to express a preference.
+Hardware P-States (HWP) are effectively an expansion of hardware
+P-state control from the opportunistic turbo-mode P-state range
+to include the entire range of available P-states.
+On Broadwell Xeon, the initial HWP implementation, EBP influenced HWP.
+That influence was removed in subsequent generations,
+where it was moved to the
+Energy_Performance_Preference (EPP) field in
+a pair of dedicated MSRs -- MSR_IA32_HWP_REQUEST and MSR_IA32_HWP_REQUEST_PKG.
 
-For example, this setting may tell the hardware how
-aggressively or conservatively to control frequency
-in the "turbo range" above the explicitly OS-controlled
-P-state frequency range.  It may also tell the hardware
-how aggressively is should enter the OS requested C-states.
+EPP is the most commonly managed knob in HWP mode,
+but MSR_IA32_HWP_REQUEST also allows the user to specify
+minimum-frequency for Quality-of-Service,
+and maximum-frequency for power-capping.
+MSR_IA32_HWP_REQUEST is defined per-CPU.
 
-Support for this feature is indicated by CPUID.06H.ECX.bit3
-per the Intel Architectures Software Developer's Manual.
+MSR_IA32_HWP_REQUEST_PKG has the same capability as MSR_IA32_HWP_REQUEST,
+but it can simultaneously set the default policy for all CPUs within a package.
+A bit in per-CPU MSR_IA32_HWP_REQUEST indicates whether it is
+over-ruled-by or exempt-from MSR_IA32_HWP_REQUEST_PKG.
 
-.SS Options
-\fB-c\fP limits operation to a single CPU.
-The default is to operate on all CPUs.
-Note that MSR_IA32_ENERGY_PERF_BIAS is defined per
-logical processor, but that the initial implementations
-of the MSR were shared among all processors in each package.
+MSR_HWP_CAPABILITIES shows the default values for the fields
+in MSR_IA32_HWP_REQUEST.  It is displayed when no values
+are being written.
+
+.SS SCOPE OPTIONS
 .PP
-\fB-v\fP increases verbosity.  By default
-x86_energy_perf_policy is silent.
+\fB-c, --cpu\fP Operate on the MSR_IA32_HWP_REQUEST for each CPU in a CPU-list.
+The CPU-list may be comma-separated CPU numbers, with dash for range
+or the string "all".  Eg. '--cpu 1,4,6-8' or '--cpu all'.
+When --cpu is used, \fB--hwp-use-pkg\fP is available, which specifies whether the per-cpu
+MSR_IA32_HWP_REQUEST should be over-ruled by MSR_IA32_HWP_REQUEST_PKG (1),
+or exempt from MSR_IA32_HWP_REQUEST_PKG (0).
+
+\fB-p, --pkg\fP Operate on the MSR_IA32_HWP_REQUEST_PKG for each package in the package-list.
+The list is a string of individual package numbers separated
+by commas, and or ranges of package numbers separated by a dash,
+or the string "all".
+For example '--pkg 1,3' or '--pkg all'
+
+.SS VALUE OPTIONS
 .PP
-\fB-r\fP is for "read-only" mode - the unchanged state
-is read and displayed.
-.PP
-.I performance
-Set a policy where performance is paramount.
-The processor will be unwilling to sacrifice any performance
-for the sake of energy saving. This is the hardware default.
-.PP
-.I normal
+.I normal | default
 Set a policy with a normal balance between performance and energy efficiency.
 The processor will tolerate minor performance compromise
 for potentially significant energy savings.
-This reasonable default for most desktops and servers.
+This is a reasonable default for most desktops and servers.
+"default" is a synonym for "normal".
 .PP
-.I powersave
+.I performance
+Set a policy for maximum performance,
+accepting no performance sacrifice for the benefit of energy efficiency.
+.PP
+.I balance-performance
+Set a policy with a high priority on performance,
+but allowing some performance loss to benefit energy efficiency.
+.PP
+.I balance-power
+Set a policy where the performance and power are balanced.
+This is the default.
+.PP
+.I power
 Set a policy where the processor can accept
-a measurable performance hit to maximize energy efficiency.
-.PP
-.I n
-Set MSR_IA32_ENERGY_PERF_BIAS to the specified number.
-The range of valid numbers is 0-15, where 0 is maximum
-performance and 15 is maximum energy efficiency.
+a measurable performance impact to maximize energy efficiency.
 
+.PP
+The following table shows the mapping from the value strings above to actual MSR values.
+This mapping is defined in the Linux-kernel header, msr-index.h.
+
+.nf
+VALUE STRING      	EPB	EPP
+performance       	0	0
+balance-performance	4	128
+normal, default		6	128
+balance-power	 	8	192
+power       		15	255
+.fi
+.PP
+For MSR_IA32_HWP_REQUEST performance fields
+(--hwp-min, --hwp-max, --hwp-desired), the value option
+is in units of 100 MHz, Eg. 12 signifies 1200 MHz.
+
+.SS FIELD OPTIONS
+\fB-a, --all value-string\fP Sets all EPB and EPP and HWP limit fields to the value associated with
+the value-string.  In addition, enables turbo-mode and HWP-mode, if they were previous disabled.
+Thus "--all normal" will set a system without cpufreq into a well known configuration.
+.PP
+\fB-B, --epb\fP set EPB per-core or per-package.
+See value strings in the table above.
+.PP
+\fB-d, --debug\fP debug increases verbosity.  By default
+x86_energy_perf_policy is silent for updates,
+and verbose for read-only mode.
+.PP
+\fB-P, --hwp-epp\fP set HWP.EPP per-core or per-package.
+See value strings in the table above.
+.PP
+\fB-m, --hwp-min\fP request HWP to not go below the specified core/bus ratio.
+The "default" is the value found in IA32_HWP_CAPABILITIES.min.
+.PP
+\fB-M, --hwp-max\fP request HWP not exceed a the specified core/bus ratio.
+The "default" is the value found in IA32_HWP_CAPABILITIES.max.
+.PP
+\fB-D, --hwp-desired\fP request HWP 'desired' frequency.
+The "normal" setting is 0, which
+corresponds to 'full autonomous' HWP control.
+Non-zero performance values request a specific performance
+level on this processor, specified in multiples of 100 MHz.
+.PP
+\fB-w, --hwp-window\fP specify integer number of microsec
+in the sliding window that HWP uses to maintain average frequency.
+This parameter is meaningful only when the "desired" field above is non-zero.
+Default is 0, allowing the HW to choose.
+.SH OTHER OPTIONS
+.PP
+\fB-f, --force\fP writes the specified values without bounds checking.
+.PP
+\fB-U, --hwp-use-pkg\fP (0 | 1), when used in conjunction with --cpu,
+indicates whether the per-CPU MSR_IA32_HWP_REQUEST should be overruled (1)
+or exempt (0) from per-Package MSR_IA32_HWP_REQUEST_PKG settings.
+The default is exempt.
+.PP
+\fB-H, --hwp-enable\fP enable HardWare-P-state (HWP) mode.  Once enabled, system RESET is required to disable HWP mode.
+.PP
+\fB-t, --turbo-enable\fP enable (1) or disable (0) turbo mode.
+.PP
+\fB-v, --version\fP print version and exit.
+.PP
+If no request to change policy is made,
+the default behavior is to read
+and display the current system state,
+including the default capabilities.
+.SH WARNING
+.PP
+This utility writes directly to Model Specific Registers.
+There is no locking or coordination should this utility
+be used to modify HWP limit fields at the same time that
+intel_pstate's sysfs attributes access the same MSRs.
+.PP
+Note that --hwp-desired and --hwp-window are considered experimental.
+Future versions of Linux reserve the right to access these
+fields internally -- potentially conflicting with user-space access.
+.SH EXAMPLE
+.nf
+# sudo x86_energy_perf_policy
+cpu0: EPB 6
+cpu0: HWP_REQ: min 6 max 35 des 0 epp 128 window 0x0 (0*10^0us) use_pkg 0
+cpu0: HWP_CAP: low 1 eff 8 guar 27 high 35
+cpu1: EPB 6
+cpu1: HWP_REQ: min 6 max 35 des 0 epp 128 window 0x0 (0*10^0us) use_pkg 0
+cpu1: HWP_CAP: low 1 eff 8 guar 27 high 35
+cpu2: EPB 6
+cpu2: HWP_REQ: min 6 max 35 des 0 epp 128 window 0x0 (0*10^0us) use_pkg 0
+cpu2: HWP_CAP: low 1 eff 8 guar 27 high 35
+cpu3: EPB 6
+cpu3: HWP_REQ: min 6 max 35 des 0 epp 128 window 0x0 (0*10^0us) use_pkg 0
+cpu3: HWP_CAP: low 1 eff 8 guar 27 high 35
+.fi
 .SH NOTES
-.B "x86_energy_perf_policy "
+.B "x86_energy_perf_policy"
 runs only as root.
 .SH FILES
 .ta
 .nf
 /dev/cpu/*/msr
 .fi
-
 .SH "SEE ALSO"
+.nf
 msr(4)
+Intel(R) 64 and IA-32 Architectures Software Developer's Manual
+.fi
 .PP
 .SH AUTHORS
 .nf
-Written by Len Brown <len.brown@intel.com>
+Len Brown
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 40b3e54..65bbe62 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -3,322 +3,1424 @@
  * policy preference bias on recent X86 processors.
  */
 /*
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2010 - 2017 Intel Corporation.
  * Len Brown <len.brown@intel.com>
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * This program is released under GPL v2
  */
 
+#define _GNU_SOURCE
+#include MSRHEADER
 #include <stdio.h>
 #include <unistd.h>
 #include <sys/types.h>
+#include <sched.h>
 #include <sys/stat.h>
 #include <sys/resource.h>
+#include <getopt.h>
+#include <err.h>
 #include <fcntl.h>
 #include <signal.h>
 #include <sys/time.h>
+#include <limits.h>
 #include <stdlib.h>
 #include <string.h>
+#include <cpuid.h>
+#include <errno.h>
 
-unsigned int verbose;		/* set with -v */
-unsigned int read_only;		/* set with -r */
+#define	OPTARG_NORMAL			(INT_MAX - 1)
+#define	OPTARG_POWER			(INT_MAX - 2)
+#define	OPTARG_BALANCE_POWER		(INT_MAX - 3)
+#define	OPTARG_BALANCE_PERFORMANCE	(INT_MAX - 4)
+#define	OPTARG_PERFORMANCE		(INT_MAX - 5)
+
+struct msr_hwp_cap {
+	unsigned char highest;
+	unsigned char guaranteed;
+	unsigned char efficient;
+	unsigned char lowest;
+};
+
+struct msr_hwp_request {
+	unsigned char hwp_min;
+	unsigned char hwp_max;
+	unsigned char hwp_desired;
+	unsigned char hwp_epp;
+	unsigned int hwp_window;
+	unsigned char hwp_use_pkg;
+} req_update;
+
+unsigned int debug;
+unsigned int verbose;
+unsigned int force;
 char *progname;
-unsigned long long new_bias;
-int cpu = -1;
+int base_cpu;
+unsigned char update_epb;
+unsigned long long new_epb;
+unsigned char turbo_is_enabled;
+unsigned char update_turbo;
+unsigned char turbo_update_value;
+unsigned char update_hwp_epp;
+unsigned char update_hwp_min;
+unsigned char update_hwp_max;
+unsigned char update_hwp_desired;
+unsigned char update_hwp_window;
+unsigned char update_hwp_use_pkg;
+unsigned char update_hwp_enable;
+#define hwp_update_enabled() (update_hwp_enable | update_hwp_epp | update_hwp_max | update_hwp_min | update_hwp_desired | update_hwp_window | update_hwp_use_pkg)
+int max_cpu_num;
+int max_pkg_num;
+#define MAX_PACKAGES 64
+unsigned int first_cpu_in_pkg[MAX_PACKAGES];
+unsigned long long pkg_present_set;
+unsigned long long pkg_selected_set;
+cpu_set_t *cpu_present_set;
+cpu_set_t *cpu_selected_set;
+int genuine_intel;
+
+size_t cpu_setsize;
+
+char *proc_stat = "/proc/stat";
+
+unsigned int has_epb;	/* MSR_IA32_ENERGY_PERF_BIAS */
+unsigned int has_hwp;	/* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
+			/* IA32_HWP_REQUEST, IA32_HWP_STATUS */
+unsigned int has_hwp_notify;		/* IA32_HWP_INTERRUPT */
+unsigned int has_hwp_activity_window;	/* IA32_HWP_REQUEST[bits 41:32] */
+unsigned int has_hwp_epp;	/* IA32_HWP_REQUEST[bits 31:24] */
+unsigned int has_hwp_request_pkg;	/* IA32_HWP_REQUEST_PKG */
+
+unsigned int bdx_highest_ratio;
 
 /*
- * Usage:
- *
- * -c cpu: limit action to a single CPU (default is all CPUs)
- * -v: verbose output (can invoke more than once)
- * -r: read-only, don't change any settings
- *
- *  performance
- *	Performance is paramount.
- *	Unwilling to sacrifice any performance
- *	for the sake of energy saving. (hardware default)
- *
- *  normal
- *	Can tolerate minor performance compromise
- *	for potentially significant energy savings.
- *	(reasonable default for most desktops and servers)
- *
- *  powersave
- *	Can tolerate significant performance hit
- *	to maximize energy savings.
- *
- * n
- *	a numerical value to write to the underlying MSR.
+ * maintain compatibility with original implementation, but don't document it:
  */
 void usage(void)
 {
-	printf("%s: [-c cpu] [-v] "
-		"(-r | 'performance' | 'normal' | 'powersave' | n)\n",
-		progname);
+	fprintf(stderr, "%s [options] [scope][field value]\n", progname);
+	fprintf(stderr, "scope: --cpu cpu-list [--hwp-use-pkg #] | --pkg pkg-list\n");
+	fprintf(stderr, "field: --all | --epb | --hwp-epp | --hwp-min | --hwp-max | --hwp-desired\n");
+	fprintf(stderr, "other: --hwp-enable | --turbo-enable (0 | 1) | --help | --force\n");
+	fprintf(stderr,
+		"value: ( # | \"normal\" | \"performance\" | \"balance-performance\" | \"balance-power\"| \"power\")\n");
+	fprintf(stderr, "--hwp-window usec\n");
+
+	fprintf(stderr, "Specify only Energy Performance BIAS (legacy usage):\n");
+	fprintf(stderr, "%s: [-c cpu] [-v] (-r | policy-value )\n", progname);
+
 	exit(1);
 }
 
-#define MSR_IA32_ENERGY_PERF_BIAS	0x000001b0
+/*
+ * If bdx_highest_ratio is set,
+ * then we must translate between MSR format and simple ratio
+ * used on the cmdline.
+ */
+int ratio_2_msr_perf(int ratio)
+{
+	int msr_perf;
 
-#define	BIAS_PERFORMANCE		0
-#define BIAS_BALANCE			6
-#define	BIAS_POWERSAVE			15
+	if (!bdx_highest_ratio)
+		return ratio;
+
+	msr_perf = ratio * 255 / bdx_highest_ratio;
+
+	if (debug)
+		fprintf(stderr, "%d = ratio_to_msr_perf(%d)\n", msr_perf, ratio);
+
+	return msr_perf;
+}
+int msr_perf_2_ratio(int msr_perf)
+{
+	int ratio;
+	double d;
+
+	if (!bdx_highest_ratio)
+		return msr_perf;
+
+	d = (double)msr_perf * (double) bdx_highest_ratio / 255.0;
+	d = d + 0.5;	/* round */
+	ratio = (int)d;
+
+	if (debug)
+		fprintf(stderr, "%d = msr_perf_ratio(%d) {%f}\n", ratio, msr_perf, d);
+
+	return ratio;
+}
+int parse_cmdline_epb(int i)
+{
+	if (!has_epb)
+		errx(1, "EPB not enabled on this platform");
+
+	update_epb = 1;
+
+	switch (i) {
+	case OPTARG_POWER:
+		return ENERGY_PERF_BIAS_POWERSAVE;
+	case OPTARG_BALANCE_POWER:
+		return ENERGY_PERF_BIAS_BALANCE_POWERSAVE;
+	case OPTARG_NORMAL:
+		return ENERGY_PERF_BIAS_NORMAL;
+	case OPTARG_BALANCE_PERFORMANCE:
+		return ENERGY_PERF_BIAS_BALANCE_PERFORMANCE;
+	case OPTARG_PERFORMANCE:
+		return ENERGY_PERF_BIAS_PERFORMANCE;
+	}
+	if (i < 0 || i > ENERGY_PERF_BIAS_POWERSAVE)
+		errx(1, "--epb must be from 0 to 15");
+	return i;
+}
+
+#define HWP_CAP_LOWEST 0
+#define HWP_CAP_HIGHEST 255
+
+/*
+ * "performance" changes hwp_min to cap.highest
+ * All others leave it at cap.lowest
+ */
+int parse_cmdline_hwp_min(int i)
+{
+	update_hwp_min = 1;
+
+	switch (i) {
+	case OPTARG_POWER:
+	case OPTARG_BALANCE_POWER:
+	case OPTARG_NORMAL:
+	case OPTARG_BALANCE_PERFORMANCE:
+		return HWP_CAP_LOWEST;
+	case OPTARG_PERFORMANCE:
+		return HWP_CAP_HIGHEST;
+	}
+	return i;
+}
+/*
+ * "power" changes hwp_max to cap.lowest
+ * All others leave it at cap.highest
+ */
+int parse_cmdline_hwp_max(int i)
+{
+	update_hwp_max = 1;
+
+	switch (i) {
+	case OPTARG_POWER:
+		return HWP_CAP_LOWEST;
+	case OPTARG_NORMAL:
+	case OPTARG_BALANCE_POWER:
+	case OPTARG_BALANCE_PERFORMANCE:
+	case OPTARG_PERFORMANCE:
+		return HWP_CAP_HIGHEST;
+	}
+	return i;
+}
+/*
+ * for --hwp-des, all strings leave it in autonomous mode
+ * If you want to change it, you need to explicitly pick a value
+ */
+int parse_cmdline_hwp_desired(int i)
+{
+	update_hwp_desired = 1;
+
+	switch (i) {
+	case OPTARG_POWER:
+	case OPTARG_BALANCE_POWER:
+	case OPTARG_BALANCE_PERFORMANCE:
+	case OPTARG_NORMAL:
+	case OPTARG_PERFORMANCE:
+		return 0;	/* autonomous */
+	}
+	return i;
+}
+
+int parse_cmdline_hwp_window(int i)
+{
+	unsigned int exponent;
+
+	update_hwp_window = 1;
+
+	switch (i) {
+	case OPTARG_POWER:
+	case OPTARG_BALANCE_POWER:
+	case OPTARG_NORMAL:
+	case OPTARG_BALANCE_PERFORMANCE:
+	case OPTARG_PERFORMANCE:
+		return 0;
+	}
+	if (i < 0 || i > 1270000000) {
+		fprintf(stderr, "--hwp-window: 0 for auto; 1 - 1270000000 usec for window duration\n");
+		usage();
+	}
+	for (exponent = 0; ; ++exponent) {
+		if (debug)
+			printf("%d 10^%d\n", i, exponent);
+
+		if (i <= 127)
+			break;
+
+		i = i / 10;
+	}
+	if (debug)
+		fprintf(stderr, "%d*10^%d: 0x%x\n", i, exponent, (exponent << 7) | i);
+
+	return (exponent << 7) | i;
+}
+int parse_cmdline_hwp_epp(int i)
+{
+	update_hwp_epp = 1;
+
+	switch (i) {
+	case OPTARG_POWER:
+		return HWP_EPP_POWERSAVE;
+	case OPTARG_BALANCE_POWER:
+		return HWP_EPP_BALANCE_POWERSAVE;
+	case OPTARG_NORMAL:
+	case OPTARG_BALANCE_PERFORMANCE:
+		return HWP_EPP_BALANCE_PERFORMANCE;
+	case OPTARG_PERFORMANCE:
+		return HWP_EPP_PERFORMANCE;
+	}
+	if (i < 0 || i > 0xff) {
+		fprintf(stderr, "--hwp-epp must be from 0 to 0xff\n");
+		usage();
+	}
+	return i;
+}
+int parse_cmdline_turbo(int i)
+{
+	update_turbo = 1;
+
+	switch (i) {
+	case OPTARG_POWER:
+		return 0;
+	case OPTARG_NORMAL:
+	case OPTARG_BALANCE_POWER:
+	case OPTARG_BALANCE_PERFORMANCE:
+	case OPTARG_PERFORMANCE:
+		return 1;
+	}
+	if (i < 0 || i > 1) {
+		fprintf(stderr, "--turbo-enable: 1 to enable, 0 to disable\n");
+		usage();
+	}
+	return i;
+}
+
+int parse_optarg_string(char *s)
+{
+	int i;
+	char *endptr;
+
+	if (!strncmp(s, "default", 7))
+		return OPTARG_NORMAL;
+
+	if (!strncmp(s, "normal", 6))
+		return OPTARG_NORMAL;
+
+	if (!strncmp(s, "power", 9))
+		return OPTARG_POWER;
+
+	if (!strncmp(s, "balance-power", 17))
+		return OPTARG_BALANCE_POWER;
+
+	if (!strncmp(s, "balance-performance", 19))
+		return OPTARG_BALANCE_PERFORMANCE;
+
+	if (!strncmp(s, "performance", 11))
+		return OPTARG_PERFORMANCE;
+
+	i = strtol(s, &endptr, 0);
+	if (s == endptr) {
+		fprintf(stderr, "no digits in \"%s\"\n", s);
+		usage();
+	}
+	if (i == LONG_MIN || i == LONG_MAX)
+		errx(-1, "%s", s);
+
+	if (i > 0xFF)
+		errx(-1, "%d (0x%x) must be < 256", i, i);
+
+	if (i < 0)
+		errx(-1, "%d (0x%x) must be >= 0", i, i);
+	return i;
+}
+
+void parse_cmdline_all(char *s)
+{
+	force++;
+	update_hwp_enable = 1;
+	req_update.hwp_min = parse_cmdline_hwp_min(parse_optarg_string(s));
+	req_update.hwp_max = parse_cmdline_hwp_max(parse_optarg_string(s));
+	req_update.hwp_epp = parse_cmdline_hwp_epp(parse_optarg_string(s));
+	if (has_epb)
+		new_epb = parse_cmdline_epb(parse_optarg_string(s));
+	turbo_update_value = parse_cmdline_turbo(parse_optarg_string(s));
+	req_update.hwp_desired = parse_cmdline_hwp_desired(parse_optarg_string(s));
+	req_update.hwp_window = parse_cmdline_hwp_window(parse_optarg_string(s));
+}
+
+void validate_cpu_selected_set(void)
+{
+	int cpu;
+
+	if (CPU_COUNT_S(cpu_setsize, cpu_selected_set) == 0)
+		errx(0, "no CPUs requested");
+
+	for (cpu = 0; cpu <= max_cpu_num; ++cpu) {
+		if (CPU_ISSET_S(cpu, cpu_setsize, cpu_selected_set))
+			if (!CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
+				errx(1, "Requested cpu% is not present", cpu);
+	}
+}
+
+void parse_cmdline_cpu(char *s)
+{
+	char *startp, *endp;
+	int cpu = 0;
+
+	if (pkg_selected_set) {
+		usage();
+		errx(1, "--cpu | --pkg");
+	}
+	cpu_selected_set = CPU_ALLOC((max_cpu_num + 1));
+	if (cpu_selected_set == NULL)
+		err(1, "cpu_selected_set");
+	CPU_ZERO_S(cpu_setsize, cpu_selected_set);
+
+	for (startp = s; startp && *startp;) {
+
+		if (*startp == ',') {
+			startp++;
+			continue;
+		}
+
+		if (*startp == '-') {
+			int end_cpu;
+
+			startp++;
+			end_cpu = strtol(startp, &endp, 10);
+			if (startp == endp)
+				continue;
+
+			while (cpu <= end_cpu) {
+				if (cpu > max_cpu_num)
+					errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num);
+				CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+				cpu++;
+			}
+			startp = endp;
+			continue;
+		}
+
+		if (strncmp(startp, "all", 3) == 0) {
+			for (cpu = 0; cpu <= max_cpu_num; cpu += 1) {
+				if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
+					CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+			}
+			startp += 3;
+			if (*startp == 0)
+				break;
+		}
+		/* "--cpu even" is not documented */
+		if (strncmp(startp, "even", 4) == 0) {
+			for (cpu = 0; cpu <= max_cpu_num; cpu += 2) {
+				if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
+					CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+			}
+			startp += 4;
+			if (*startp == 0)
+				break;
+		}
+
+		/* "--cpu odd" is not documented */
+		if (strncmp(startp, "odd", 3) == 0) {
+			for (cpu = 1; cpu <= max_cpu_num; cpu += 2) {
+				if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set))
+					CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+			}
+			startp += 3;
+			if (*startp == 0)
+				break;
+		}
+
+		cpu = strtol(startp, &endp, 10);
+		if (startp == endp)
+			errx(1, "--cpu cpu-set: confused by '%s'", startp);
+		if (cpu > max_cpu_num)
+			errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num);
+		CPU_SET_S(cpu, cpu_setsize, cpu_selected_set);
+		startp = endp;
+	}
+
+	validate_cpu_selected_set();
+
+}
+
+void parse_cmdline_pkg(char *s)
+{
+	char *startp, *endp;
+	int pkg = 0;
+
+	if (cpu_selected_set) {
+		usage();
+		errx(1, "--pkg | --cpu");
+	}
+	pkg_selected_set = 0;
+
+	for (startp = s; startp && *startp;) {
+
+		if (*startp == ',') {
+			startp++;
+			continue;
+		}
+
+		if (*startp == '-') {
+			int end_pkg;
+
+			startp++;
+			end_pkg = strtol(startp, &endp, 10);
+			if (startp == endp)
+				continue;
+
+			while (pkg <= end_pkg) {
+				if (pkg > max_pkg_num)
+					errx(1, "Requested pkg%d exceeds max pkg%d", pkg, max_pkg_num);
+				pkg_selected_set |= 1 << pkg;
+				pkg++;
+			}
+			startp = endp;
+			continue;
+		}
+
+		if (strncmp(startp, "all", 3) == 0) {
+			pkg_selected_set = pkg_present_set;
+			return;
+		}
+
+		pkg = strtol(startp, &endp, 10);
+		if (pkg > max_pkg_num)
+			errx(1, "Requested pkg%d Exceeds max pkg%d", pkg, max_pkg_num);
+		pkg_selected_set |= 1 << pkg;
+		startp = endp;
+	}
+}
+
+void for_packages(unsigned long long pkg_set, int (func)(int))
+{
+	int pkg_num;
+
+	for (pkg_num = 0; pkg_num <= max_pkg_num; ++pkg_num) {
+		if (pkg_set & (1UL << pkg_num))
+			func(pkg_num);
+	}
+}
+
+void print_version(void)
+{
+	printf("x86_energy_perf_policy 17.05.11 (C) Len Brown <len.brown@intel.com>\n");
+}
 
 void cmdline(int argc, char **argv)
 {
 	int opt;
+	int option_index = 0;
+
+	static struct option long_options[] = {
+		{"all",		required_argument,	0, 'a'},
+		{"cpu",		required_argument,	0, 'c'},
+		{"pkg",		required_argument,	0, 'p'},
+		{"debug",	no_argument,		0, 'd'},
+		{"hwp-desired",	required_argument,	0, 'D'},
+		{"epb",	required_argument,	0, 'B'},
+		{"force",	no_argument,	0, 'f'},
+		{"hwp-enable",	no_argument,	0, 'e'},
+		{"help",	no_argument,	0, 'h'},
+		{"hwp-epp",	required_argument,	0, 'P'},
+		{"hwp-min",	required_argument,	0, 'm'},
+		{"hwp-max",	required_argument,	0, 'M'},
+		{"read",	no_argument,		0, 'r'},
+		{"turbo-enable",	required_argument,	0, 't'},
+		{"hwp-use-pkg",	required_argument,	0, 'u'},
+		{"version",	no_argument,		0, 'v'},
+		{"hwp-window",	required_argument,	0, 'w'},
+		{0,		0,			0, 0 }
+	};
 
 	progname = argv[0];
 
-	while ((opt = getopt(argc, argv, "+rvc:")) != -1) {
+	while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw",
+				long_options, &option_index)) != -1) {
 		switch (opt) {
+		case 'a':
+			parse_cmdline_all(optarg);
+			break;
+		case 'B':
+			new_epb = parse_cmdline_epb(parse_optarg_string(optarg));
+			break;
 		case 'c':
-			cpu = atoi(optarg);
+			parse_cmdline_cpu(optarg);
+			break;
+		case 'e':
+			update_hwp_enable = 1;
+			break;
+		case 'h':
+			usage();
+			break;
+		case 'd':
+			debug++;
+			verbose++;
+			break;
+		case 'f':
+			force++;
+			break;
+		case 'D':
+			req_update.hwp_desired = parse_cmdline_hwp_desired(parse_optarg_string(optarg));
+			break;
+		case 'm':
+			req_update.hwp_min = parse_cmdline_hwp_min(parse_optarg_string(optarg));
+			break;
+		case 'M':
+			req_update.hwp_max = parse_cmdline_hwp_max(parse_optarg_string(optarg));
+			break;
+		case 'p':
+			parse_cmdline_pkg(optarg);
+			break;
+		case 'P':
+			req_update.hwp_epp = parse_cmdline_hwp_epp(parse_optarg_string(optarg));
 			break;
 		case 'r':
-			read_only = 1;
+			/* v1 used -r to specify read-only mode, now the default */
+			break;
+		case 't':
+			turbo_update_value = parse_cmdline_turbo(parse_optarg_string(optarg));
+			break;
+		case 'u':
+			update_hwp_use_pkg++;
+			if (atoi(optarg) == 0)
+				req_update.hwp_use_pkg = 0;
+			else
+				req_update.hwp_use_pkg = 1;
 			break;
 		case 'v':
-			verbose++;
+			print_version();
+			exit(0);
+			break;
+		case 'w':
+			req_update.hwp_window = parse_cmdline_hwp_window(parse_optarg_string(optarg));
 			break;
 		default:
 			usage();
 		}
 	}
-	/* if -r, then should be no additional optind */
-	if (read_only && (argc > optind))
-		usage();
-
 	/*
-	 * if no -r , then must be one additional optind
+	 * v1 allowed "performance"|"normal"|"power" with no policy specifier
+	 * to update BIAS.  Continue to support that, even though no longer documented.
 	 */
-	if (!read_only) {
+	if (argc == optind + 1)
+		new_epb = parse_cmdline_epb(parse_optarg_string(argv[optind]));
 
-		if (argc != optind + 1) {
-			printf("must supply -r or policy param\n");
-			usage();
-			}
-
-		if (!strcmp("performance", argv[optind])) {
-			new_bias = BIAS_PERFORMANCE;
-		} else if (!strcmp("normal", argv[optind])) {
-			new_bias = BIAS_BALANCE;
-		} else if (!strcmp("powersave", argv[optind])) {
-			new_bias = BIAS_POWERSAVE;
-		} else {
-			char *endptr;
-
-			new_bias = strtoull(argv[optind], &endptr, 0);
-			if (endptr == argv[optind] ||
-				new_bias > BIAS_POWERSAVE) {
-					fprintf(stderr, "invalid value: %s\n",
-						argv[optind]);
-				usage();
-			}
-		}
+	if (argc > optind + 1) {
+		fprintf(stderr, "stray parameter '%s'\n", argv[optind + 1]);
+		usage();
 	}
 }
 
+
+int get_msr(int cpu, int offset, unsigned long long *msr)
+{
+	int retval;
+	char pathname[32];
+	int fd;
+
+	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+	fd = open(pathname, O_RDONLY);
+	if (fd < 0)
+		err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
+
+	retval = pread(fd, msr, sizeof(*msr), offset);
+	if (retval != sizeof(*msr))
+		err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
+
+	if (debug > 1)
+		fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr);
+
+	close(fd);
+	return 0;
+}
+
+int put_msr(int cpu, int offset, unsigned long long new_msr)
+{
+	char pathname[32];
+	int retval;
+	int fd;
+
+	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+	fd = open(pathname, O_RDWR);
+	if (fd < 0)
+		err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
+
+	retval = pwrite(fd, &new_msr, sizeof(new_msr), offset);
+	if (retval != sizeof(new_msr))
+		err(-2, "pwrite(cpu%d, offset 0x%x, 0x%llx) = %d", cpu, offset, new_msr, retval);
+
+	close(fd);
+
+	if (debug > 1)
+		fprintf(stderr, "put_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, new_msr);
+
+	return 0;
+}
+
+void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str)
+{
+	if (cpu != -1)
+		printf("cpu%d: ", cpu);
+
+	printf("HWP_CAP: low %d eff %d guar %d high %d\n",
+		cap->lowest, cap->efficient, cap->guaranteed, cap->highest);
+}
+void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset)
+{
+	unsigned long long msr;
+
+	get_msr(cpu, msr_offset, &msr);
+
+	cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr));
+	cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr));
+	cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr));
+	cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr));
+}
+
+void print_hwp_request(int cpu, struct msr_hwp_request *h, char *str)
+{
+	if (cpu != -1)
+		printf("cpu%d: ", cpu);
+
+	if (str)
+		printf("%s", str);
+
+	printf("HWP_REQ: min %d max %d des %d epp %d window 0x%x (%d*10^%dus) use_pkg %d\n",
+		h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp,
+		h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7, h->hwp_use_pkg);
+}
+void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str)
+{
+	printf("pkg%d: ", pkg);
+
+	if (str)
+		printf("%s", str);
+
+	printf("HWP_REQ_PKG: min %d max %d des %d epp %d window 0x%x (%d*10^%dus)\n",
+		h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp,
+		h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7);
+}
+void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
+{
+	unsigned long long msr;
+
+	get_msr(cpu, msr_offset, &msr);
+
+	hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 0) & 0xff));
+	hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 8) & 0xff));
+	hwp_req->hwp_desired = msr_perf_2_ratio((((msr) >> 16) & 0xff));
+	hwp_req->hwp_epp = (((msr) >> 24) & 0xff);
+	hwp_req->hwp_window = (((msr) >> 32) & 0x3ff);
+	hwp_req->hwp_use_pkg = (((msr) >> 42) & 0x1);
+}
+
+void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset)
+{
+	unsigned long long msr = 0;
+
+	if (debug > 1)
+		printf("cpu%d: requesting min %d max %d des %d epp %d window 0x%0x use_pkg %d\n",
+			cpu, hwp_req->hwp_min, hwp_req->hwp_max,
+			hwp_req->hwp_desired, hwp_req->hwp_epp,
+			hwp_req->hwp_window, hwp_req->hwp_use_pkg);
+
+	msr |= HWP_MIN_PERF(ratio_2_msr_perf(hwp_req->hwp_min));
+	msr |= HWP_MAX_PERF(ratio_2_msr_perf(hwp_req->hwp_max));
+	msr |= HWP_DESIRED_PERF(ratio_2_msr_perf(hwp_req->hwp_desired));
+	msr |= HWP_ENERGY_PERF_PREFERENCE(hwp_req->hwp_epp);
+	msr |= HWP_ACTIVITY_WINDOW(hwp_req->hwp_window);
+	msr |= HWP_PACKAGE_CONTROL(hwp_req->hwp_use_pkg);
+
+	put_msr(cpu, msr_offset, msr);
+}
+
+int print_cpu_msrs(int cpu)
+{
+	unsigned long long msr;
+	struct msr_hwp_request req;
+	struct msr_hwp_cap cap;
+
+	if (has_epb) {
+		get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr);
+
+		printf("cpu%d: EPB %u\n", cpu, (unsigned int) msr);
+	}
+
+	if (!has_hwp)
+		return 0;
+
+	read_hwp_request(cpu, &req, MSR_HWP_REQUEST);
+	print_hwp_request(cpu, &req, "");
+
+	read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
+	print_hwp_cap(cpu, &cap, "");
+
+	return 0;
+}
+
+int print_pkg_msrs(int pkg)
+{
+	struct msr_hwp_request req;
+	unsigned long long msr;
+
+	if (!has_hwp)
+		return 0;
+
+	read_hwp_request(first_cpu_in_pkg[pkg], &req, MSR_HWP_REQUEST_PKG);
+	print_hwp_request_pkg(pkg, &req, "");
+
+	if (has_hwp_notify) {
+		get_msr(first_cpu_in_pkg[pkg], MSR_HWP_INTERRUPT, &msr);
+		fprintf(stderr,
+		"pkg%d: MSR_HWP_INTERRUPT: 0x%08llx (Excursion_Min-%sabled, Guaranteed_Perf_Change-%sabled)\n",
+		pkg, msr,
+		((msr) & 0x2) ? "EN" : "Dis",
+		((msr) & 0x1) ? "EN" : "Dis");
+	}
+	get_msr(first_cpu_in_pkg[pkg], MSR_HWP_STATUS, &msr);
+	fprintf(stderr,
+		"pkg%d: MSR_HWP_STATUS: 0x%08llx (%sExcursion_Min, %sGuaranteed_Perf_Change)\n",
+		pkg, msr,
+		((msr) & 0x4) ? "" : "No-",
+		((msr) & 0x1) ? "" : "No-");
+
+	return 0;
+}
+
 /*
- * validate_cpuid()
- * returns on success, quietly exits on failure (make verbose with -v)
+ * Assumption: All HWP systems have 100 MHz bus clock
  */
-void validate_cpuid(void)
+int ratio_2_sysfs_khz(int ratio)
+{
+	int bclk_khz = 100 * 1000;	/* 100,000 KHz = 100 MHz */
+
+	return ratio * bclk_khz;
+}
+/*
+ * If HWP is enabled and cpufreq sysfs attribtes are present,
+ * then update sysfs, so that it will not become
+ * stale when we write to MSRs.
+ * (intel_pstate's max_perf_pct and min_perf_pct will follow cpufreq,
+ *  so we don't have to touch that.)
+ */
+void update_cpufreq_scaling_freq(int is_max, int cpu, unsigned int ratio)
+{
+	char pathname[64];
+	FILE *fp;
+	int retval;
+	int khz;
+
+	sprintf(pathname, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_%s_freq",
+		cpu, is_max ? "max" : "min");
+
+	fp = fopen(pathname, "w");
+	if (!fp) {
+		if (debug)
+			perror(pathname);
+		return;
+	}
+
+	khz = ratio_2_sysfs_khz(ratio);
+	retval = fprintf(fp, "%d", khz);
+	if (retval < 0)
+		if (debug)
+			perror("fprintf");
+	if (debug)
+		printf("echo %d > %s\n", khz, pathname);
+
+	fclose(fp);
+}
+
+/*
+ * We update all sysfs before updating any MSRs because of
+ * bugs in cpufreq/intel_pstate where the sysfs writes
+ * for a CPU may change the min/max values on other CPUS.
+ */
+
+int update_sysfs(int cpu)
+{
+	if (!has_hwp)
+		return 0;
+
+	if (!hwp_update_enabled())
+		return 0;
+
+	if (access("/sys/devices/system/cpu/cpu0/cpufreq", F_OK))
+		return 0;
+
+	if (update_hwp_min)
+		update_cpufreq_scaling_freq(0, cpu, req_update.hwp_min);
+
+	if (update_hwp_max)
+		update_cpufreq_scaling_freq(1, cpu, req_update.hwp_max);
+
+	return 0;
+}
+
+int verify_hwp_req_self_consistency(int cpu, struct msr_hwp_request *req)
+{
+	/* fail if min > max requested */
+	if (req->hwp_min > req->hwp_max) {
+		errx(1, "cpu%d: requested hwp-min %d > hwp_max %d",
+			cpu, req->hwp_min, req->hwp_max);
+	}
+
+	/* fail if desired > max requestd */
+	if (req->hwp_desired && (req->hwp_desired > req->hwp_max)) {
+		errx(1, "cpu%d: requested hwp-desired %d > hwp_max %d",
+			cpu, req->hwp_desired, req->hwp_max);
+	}
+	/* fail if desired < min requestd */
+	if (req->hwp_desired && (req->hwp_desired < req->hwp_min)) {
+		errx(1, "cpu%d: requested hwp-desired %d < requested hwp_min %d",
+			cpu, req->hwp_desired, req->hwp_min);
+	}
+
+	return 0;
+}
+
+int check_hwp_request_v_hwp_capabilities(int cpu, struct msr_hwp_request *req, struct msr_hwp_cap *cap)
+{
+	if (update_hwp_max) {
+		if (req->hwp_max > cap->highest)
+			errx(1, "cpu%d: requested max %d > capabilities highest %d, use --force?",
+				cpu, req->hwp_max, cap->highest);
+		if (req->hwp_max < cap->lowest)
+			errx(1, "cpu%d: requested max %d < capabilities lowest %d, use --force?",
+				cpu, req->hwp_max, cap->lowest);
+	}
+
+	if (update_hwp_min) {
+		if (req->hwp_min > cap->highest)
+			errx(1, "cpu%d: requested min %d > capabilities highest %d, use --force?",
+				cpu, req->hwp_min, cap->highest);
+		if (req->hwp_min < cap->lowest)
+			errx(1, "cpu%d: requested min %d < capabilities lowest %d, use --force?",
+				cpu, req->hwp_min, cap->lowest);
+	}
+
+	if (update_hwp_min && update_hwp_max && (req->hwp_min > req->hwp_max))
+		errx(1, "cpu%d: requested min %d > requested max %d",
+			cpu, req->hwp_min, req->hwp_max);
+
+	if (update_hwp_desired && req->hwp_desired) {
+		if (req->hwp_desired > req->hwp_max)
+			errx(1, "cpu%d: requested desired %d > requested max %d, use --force?",
+				cpu, req->hwp_desired, req->hwp_max);
+		if (req->hwp_desired < req->hwp_min)
+			errx(1, "cpu%d: requested desired %d < requested min %d, use --force?",
+				cpu, req->hwp_desired, req->hwp_min);
+		if (req->hwp_desired < cap->lowest)
+			errx(1, "cpu%d: requested desired %d < capabilities lowest %d, use --force?",
+				cpu, req->hwp_desired, cap->lowest);
+		if (req->hwp_desired > cap->highest)
+			errx(1, "cpu%d: requested desired %d > capabilities highest %d, use --force?",
+				cpu, req->hwp_desired, cap->highest);
+	}
+
+	return 0;
+}
+
+int update_hwp_request(int cpu)
+{
+	struct msr_hwp_request req;
+	struct msr_hwp_cap cap;
+
+	int msr_offset = MSR_HWP_REQUEST;
+
+	read_hwp_request(cpu, &req, msr_offset);
+	if (debug)
+		print_hwp_request(cpu, &req, "old: ");
+
+	if (update_hwp_min)
+		req.hwp_min = req_update.hwp_min;
+
+	if (update_hwp_max)
+		req.hwp_max = req_update.hwp_max;
+
+	if (update_hwp_desired)
+		req.hwp_desired = req_update.hwp_desired;
+
+	if (update_hwp_window)
+		req.hwp_window = req_update.hwp_window;
+
+	if (update_hwp_epp)
+		req.hwp_epp = req_update.hwp_epp;
+
+	req.hwp_use_pkg = req_update.hwp_use_pkg;
+
+	read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
+	if (debug)
+		print_hwp_cap(cpu, &cap, "");
+
+	if (!force)
+		check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
+
+	verify_hwp_req_self_consistency(cpu, &req);
+
+	write_hwp_request(cpu, &req, msr_offset);
+
+	if (debug) {
+		read_hwp_request(cpu, &req, msr_offset);
+		print_hwp_request(cpu, &req, "new: ");
+	}
+	return 0;
+}
+int update_hwp_request_pkg(int pkg)
+{
+	struct msr_hwp_request req;
+	struct msr_hwp_cap cap;
+	int cpu = first_cpu_in_pkg[pkg];
+
+	int msr_offset = MSR_HWP_REQUEST_PKG;
+
+	read_hwp_request(cpu, &req, msr_offset);
+	if (debug)
+		print_hwp_request_pkg(pkg, &req, "old: ");
+
+	if (update_hwp_min)
+		req.hwp_min = req_update.hwp_min;
+
+	if (update_hwp_max)
+		req.hwp_max = req_update.hwp_max;
+
+	if (update_hwp_desired)
+		req.hwp_desired = req_update.hwp_desired;
+
+	if (update_hwp_window)
+		req.hwp_window = req_update.hwp_window;
+
+	if (update_hwp_epp)
+		req.hwp_epp = req_update.hwp_epp;
+
+	read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES);
+	if (debug)
+		print_hwp_cap(cpu, &cap, "");
+
+	if (!force)
+		check_hwp_request_v_hwp_capabilities(cpu, &req, &cap);
+
+	verify_hwp_req_self_consistency(cpu, &req);
+
+	write_hwp_request(cpu, &req, msr_offset);
+
+	if (debug) {
+		read_hwp_request(cpu, &req, msr_offset);
+		print_hwp_request_pkg(pkg, &req, "new: ");
+	}
+	return 0;
+}
+
+int enable_hwp_on_cpu(int cpu)
+{
+	unsigned long long msr;
+
+	get_msr(cpu, MSR_PM_ENABLE, &msr);
+	put_msr(cpu, MSR_PM_ENABLE, 1);
+
+	if (verbose)
+		printf("cpu%d: MSR_PM_ENABLE old: %d new: %d\n", cpu, (unsigned int) msr, 1);
+
+	return 0;
+}
+
+int update_cpu_msrs(int cpu)
+{
+	unsigned long long msr;
+
+
+	if (update_epb) {
+		get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr);
+		put_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, new_epb);
+
+		if (verbose)
+			printf("cpu%d: ENERGY_PERF_BIAS old: %d new: %d\n",
+				cpu, (unsigned int) msr, (unsigned int) new_epb);
+	}
+
+	if (update_turbo) {
+		int turbo_is_present_and_disabled;
+
+		get_msr(cpu, MSR_IA32_MISC_ENABLE, &msr);
+
+		turbo_is_present_and_disabled = ((msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE) != 0);
+
+		if (turbo_update_value == 1)	{
+			if (turbo_is_present_and_disabled) {
+				msr &= ~MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+				put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
+				if (verbose)
+					printf("cpu%d: turbo ENABLE\n", cpu);
+			}
+		} else {
+			/*
+			 * if "turbo_is_enabled" were known to be describe this cpu
+			 * then we could use it here to skip redundant disable requests.
+			 * but cpu may be in a different package, so we always write.
+			 */
+			msr |= MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+			put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
+			if (verbose)
+				printf("cpu%d: turbo DISABLE\n", cpu);
+		}
+	}
+
+	if (!has_hwp)
+		return 0;
+
+	if (!hwp_update_enabled())
+		return 0;
+
+	update_hwp_request(cpu);
+	return 0;
+}
+
+/*
+ * Open a file, and exit on failure
+ */
+FILE *fopen_or_die(const char *path, const char *mode)
+{
+	FILE *filep = fopen(path, "r");
+
+	if (!filep)
+		err(1, "%s: open failed", path);
+	return filep;
+}
+
+unsigned int get_pkg_num(int cpu)
+{
+	FILE *fp;
+	char pathname[128];
+	unsigned int pkg;
+	int retval;
+
+	sprintf(pathname, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
+
+	fp = fopen_or_die(pathname, "r");
+	retval = fscanf(fp, "%d\n", &pkg);
+	if (retval != 1)
+		errx(1, "%s: failed to parse", pathname);
+	return pkg;
+}
+
+int set_max_cpu_pkg_num(int cpu)
+{
+	unsigned int pkg;
+
+	if (max_cpu_num < cpu)
+		max_cpu_num = cpu;
+
+	pkg = get_pkg_num(cpu);
+
+	if (pkg >= MAX_PACKAGES)
+		errx(1, "cpu%d: %d >= MAX_PACKAGES (%d)", cpu, pkg, MAX_PACKAGES);
+
+	if (pkg > max_pkg_num)
+		max_pkg_num = pkg;
+
+	if ((pkg_present_set & (1ULL << pkg)) == 0) {
+		pkg_present_set |= (1ULL << pkg);
+		first_cpu_in_pkg[pkg] = cpu;
+	}
+
+	return 0;
+}
+int mark_cpu_present(int cpu)
+{
+	CPU_SET_S(cpu, cpu_setsize, cpu_present_set);
+	return 0;
+}
+
+/*
+ * run func(cpu) on every cpu in /proc/stat
+ * return max_cpu number
+ */
+int for_all_proc_cpus(int (func)(int))
+{
+	FILE *fp;
+	int cpu_num;
+	int retval;
+
+	fp = fopen_or_die(proc_stat, "r");
+
+	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
+	if (retval != 0)
+		err(1, "%s: failed to parse format", proc_stat);
+
+	while (1) {
+		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
+		if (retval != 1)
+			break;
+
+		retval = func(cpu_num);
+		if (retval) {
+			fclose(fp);
+			return retval;
+		}
+	}
+	fclose(fp);
+	return 0;
+}
+
+void for_all_cpus_in_set(size_t set_size, cpu_set_t *cpu_set, int (func)(int))
+{
+	int cpu_num;
+
+	for (cpu_num = 0; cpu_num <= max_cpu_num; ++cpu_num)
+		if (CPU_ISSET_S(cpu_num, set_size, cpu_set))
+			func(cpu_num);
+}
+
+void init_data_structures(void)
+{
+	for_all_proc_cpus(set_max_cpu_pkg_num);
+
+	cpu_setsize = CPU_ALLOC_SIZE((max_cpu_num + 1));
+
+	cpu_present_set = CPU_ALLOC((max_cpu_num + 1));
+	if (cpu_present_set == NULL)
+		err(3, "CPU_ALLOC");
+	CPU_ZERO_S(cpu_setsize, cpu_present_set);
+	for_all_proc_cpus(mark_cpu_present);
+}
+
+/* clear has_hwp if it is not enable (or being enabled) */
+
+void verify_hwp_is_enabled(void)
+{
+	unsigned long long msr;
+
+	if (!has_hwp)	/* set in early_cpuid() */
+		return;
+
+	/* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
+	get_msr(base_cpu, MSR_PM_ENABLE, &msr);
+	if ((msr & 1) == 0) {
+		fprintf(stderr, "HWP can be enabled using '--hwp-enable'\n");
+		has_hwp = 0;
+		return;
+	}
+}
+
+int req_update_bounds_check(void)
+{
+	if (!hwp_update_enabled())
+		return 0;
+
+	/* fail if min > max requested */
+	if ((update_hwp_max && update_hwp_min) &&
+	    (req_update.hwp_min > req_update.hwp_max)) {
+		printf("hwp-min %d > hwp_max %d\n", req_update.hwp_min, req_update.hwp_max);
+		return -EINVAL;
+	}
+
+	/* fail if desired > max requestd */
+	if (req_update.hwp_desired && update_hwp_max &&
+	    (req_update.hwp_desired > req_update.hwp_max)) {
+		printf("hwp-desired cannot be greater than hwp_max\n");
+		return -EINVAL;
+	}
+	/* fail if desired < min requestd */
+	if (req_update.hwp_desired && update_hwp_min &&
+	    (req_update.hwp_desired < req_update.hwp_min)) {
+		printf("hwp-desired cannot be less than hwp_min\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void set_base_cpu(void)
+{
+	base_cpu = sched_getcpu();
+	if (base_cpu < 0)
+		err(-ENODEV, "No valid cpus found");
+}
+
+
+void probe_dev_msr(void)
+{
+	struct stat sb;
+	char pathname[32];
+
+	sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
+	if (stat(pathname, &sb))
+		if (system("/sbin/modprobe msr > /dev/null 2>&1"))
+			err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
+}
+/*
+ * early_cpuid()
+ * initialize turbo_is_enabled, has_hwp, has_epb
+ * before cmdline is parsed
+ */
+void early_cpuid(void)
+{
+	unsigned int eax, ebx, ecx, edx, max_level;
+	unsigned int fms, family, model;
+
+	__get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+
+	if (max_level < 6)
+		errx(1, "Processor not supported\n");
+
+	__get_cpuid(1, &fms, &ebx, &ecx, &edx);
+	family = (fms >> 8) & 0xf;
+	model = (fms >> 4) & 0xf;
+	if (family == 6 || family == 0xf)
+		model += ((fms >> 16) & 0xf) << 4;
+
+	if (model == 0x4F) {
+		unsigned long long msr;
+
+		get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
+
+		bdx_highest_ratio = msr & 0xFF;
+	}
+
+	__get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+	turbo_is_enabled = (eax >> 1) & 1;
+	has_hwp = (eax >> 7) & 1;
+	has_epb = (ecx >> 3) & 1;
+}
+
+/*
+ * parse_cpuid()
+ * set
+ * has_hwp, has_hwp_notify, has_hwp_activity_window, has_hwp_epp, has_hwp_request_pkg, has_epb
+ */
+void parse_cpuid(void)
 {
 	unsigned int eax, ebx, ecx, edx, max_level;
 	unsigned int fms, family, model, stepping;
 
 	eax = ebx = ecx = edx = 0;
 
-	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx),
-		"=d" (edx) : "a" (0));
+	__get_cpuid(0, &max_level, &ebx, &ecx, &edx);
 
-	if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) {
-		if (verbose)
-			fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel",
-				(char *)&ebx, (char *)&edx, (char *)&ecx);
-		exit(1);
-	}
+	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
+		genuine_intel = 1;
 
-	asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+	if (debug)
+		fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
+			(char *)&ebx, (char *)&edx, (char *)&ecx);
+
+	__get_cpuid(1, &fms, &ebx, &ecx, &edx);
 	family = (fms >> 8) & 0xf;
 	model = (fms >> 4) & 0xf;
 	stepping = fms & 0xf;
 	if (family == 6 || family == 0xf)
 		model += ((fms >> 16) & 0xf) << 4;
 
-	if (verbose > 1)
-		printf("CPUID %d levels family:model:stepping "
-			"0x%x:%x:%x (%d:%d:%d)\n", max_level,
-			family, model, stepping, family, model, stepping);
-
-	if (!(edx & (1 << 5))) {
-		if (verbose)
-			printf("CPUID: no MSR\n");
-		exit(1);
+	if (debug) {
+		fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
+			max_level, family, model, stepping, family, model, stepping);
+		fprintf(stderr, "CPUID(1): %s %s %s %s %s %s %s %s\n",
+			ecx & (1 << 0) ? "SSE3" : "-",
+			ecx & (1 << 3) ? "MONITOR" : "-",
+			ecx & (1 << 7) ? "EIST" : "-",
+			ecx & (1 << 8) ? "TM2" : "-",
+			edx & (1 << 4) ? "TSC" : "-",
+			edx & (1 << 5) ? "MSR" : "-",
+			edx & (1 << 22) ? "ACPI-TM" : "-",
+			edx & (1 << 29) ? "TM" : "-");
 	}
 
-	/*
-	 * Support for MSR_IA32_ENERGY_PERF_BIAS
-	 * is indicated by CPUID.06H.ECX.bit3
-	 */
-	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6));
-	if (verbose)
-		printf("CPUID.06H.ECX: 0x%x\n", ecx);
-	if (!(ecx & (1 << 3))) {
-		if (verbose)
-			printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n");
-		exit(1);
-	}
+	if (!(edx & (1 << 5)))
+		errx(1, "CPUID: no MSR");
+
+
+	__get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+	/* turbo_is_enabled already set */
+	/* has_hwp already set */
+	has_hwp_notify = eax & (1 << 8);
+	has_hwp_activity_window = eax & (1 << 9);
+	has_hwp_epp = eax & (1 << 10);
+	has_hwp_request_pkg = eax & (1 << 11);
+
+	if (!has_hwp_request_pkg && update_hwp_use_pkg)
+		errx(1, "--hwp-use-pkg is not available on this hardware");
+
+	/* has_epb already set */
+
+	if (debug)
+		fprintf(stderr,
+			"CPUID(6): %sTURBO, %sHWP, %sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n",
+			turbo_is_enabled ? "" : "No-",
+			has_hwp ? "" : "No-",
+			has_hwp_notify ? "" : "No-",
+			has_hwp_activity_window ? "" : "No-",
+			has_hwp_epp ? "" : "No-",
+			has_hwp_request_pkg ? "" : "No-",
+			has_epb ? "" : "No-");
+
 	return;	/* success */
 }
 
-unsigned long long get_msr(int cpu, int offset)
-{
-	unsigned long long msr;
-	char msr_path[32];
-	int retval;
-	int fd;
-
-	sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
-	fd = open(msr_path, O_RDONLY);
-	if (fd < 0) {
-		printf("Try \"# modprobe msr\"\n");
-		perror(msr_path);
-		exit(1);
-	}
-
-	retval = pread(fd, &msr, sizeof msr, offset);
-
-	if (retval != sizeof msr) {
-		printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
-		exit(-2);
-	}
-	close(fd);
-	return msr;
-}
-
-unsigned long long  put_msr(int cpu, unsigned long long new_msr, int offset)
-{
-	unsigned long long old_msr;
-	char msr_path[32];
-	int retval;
-	int fd;
-
-	sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
-	fd = open(msr_path, O_RDWR);
-	if (fd < 0) {
-		perror(msr_path);
-		exit(1);
-	}
-
-	retval = pread(fd, &old_msr, sizeof old_msr, offset);
-	if (retval != sizeof old_msr) {
-		perror("pwrite");
-		printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
-		exit(-2);
-	}
-
-	retval = pwrite(fd, &new_msr, sizeof new_msr, offset);
-	if (retval != sizeof new_msr) {
-		perror("pwrite");
-		printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval);
-		exit(-2);
-	}
-
-	close(fd);
-
-	return old_msr;
-}
-
-void print_msr(int cpu)
-{
-	printf("cpu%d: 0x%016llx\n",
-		cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS));
-}
-
-void update_msr(int cpu)
-{
-	unsigned long long previous_msr;
-
-	previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS);
-
-	if (verbose)
-		printf("cpu%d  msr0x%x 0x%016llx -> 0x%016llx\n",
-			cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias);
-
-	return;
-}
-
-char *proc_stat = "/proc/stat";
-/*
- * run func() on every cpu in /dev/cpu
- */
-void for_every_cpu(void (func)(int))
-{
-	FILE *fp;
-	int retval;
-
-	fp = fopen(proc_stat, "r");
-	if (fp == NULL) {
-		perror(proc_stat);
-		exit(1);
-	}
-
-	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
-	if (retval != 0) {
-		perror("/proc/stat format");
-		exit(1);
-	}
-
-	while (1) {
-		int cpu;
-
-		retval = fscanf(fp,
-			"cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n",
-			&cpu);
-		if (retval != 1)
-			break;
-
-		func(cpu);
-	}
-	fclose(fp);
-}
-
 int main(int argc, char **argv)
 {
+	set_base_cpu();
+	probe_dev_msr();
+	init_data_structures();
+
+	early_cpuid();	/* initial cpuid parse before cmdline */
+
 	cmdline(argc, argv);
 
-	if (verbose > 1)
-		printf("x86_energy_perf_policy Nov 24, 2010"
-				" - Len Brown <lenb@kernel.org>\n");
-	if (verbose > 1 && !read_only)
-		printf("new_bias %lld\n", new_bias);
+	if (debug)
+		print_version();
 
-	validate_cpuid();
+	parse_cpuid();
 
-	if (cpu != -1) {
-		if (read_only)
-			print_msr(cpu);
-		else
-			update_msr(cpu);
-	} else {
-		if (read_only)
-			for_every_cpu(print_msr);
-		else
-			for_every_cpu(update_msr);
+	 /* If CPU-set and PKG-set are not initialized, default to all CPUs */
+	if ((cpu_selected_set == 0) && (pkg_selected_set == 0))
+		cpu_selected_set = cpu_present_set;
+
+	/*
+	 * If HWP is being enabled, do it now, so that subsequent operations
+	 * that access HWP registers can work.
+	 */
+	if (update_hwp_enable)
+		for_all_cpus_in_set(cpu_setsize, cpu_selected_set, enable_hwp_on_cpu);
+
+	/* If HWP present, but disabled, warn and ignore from here forward */
+	verify_hwp_is_enabled();
+
+	if (req_update_bounds_check())
+		return -EINVAL;
+
+	/* display information only, no updates to settings */
+	if (!update_epb && !update_turbo && !hwp_update_enabled()) {
+		if (cpu_selected_set)
+			for_all_cpus_in_set(cpu_setsize, cpu_selected_set, print_cpu_msrs);
+
+		if (has_hwp_request_pkg) {
+			if (pkg_selected_set == 0)
+				pkg_selected_set = pkg_present_set;
+
+			for_packages(pkg_selected_set, print_pkg_msrs);
+		}
+
+		return 0;
 	}
 
+	/* update CPU set */
+	if (cpu_selected_set) {
+		for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_sysfs);
+		for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_cpu_msrs);
+	} else if (pkg_selected_set)
+		for_packages(pkg_selected_set, update_hwp_request_pkg);
+
 	return 0;
 }
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index 64cae1a..e1f75a1 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -370,7 +370,7 @@ acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
 }
 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
 
-union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
+union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
 		u64 rev, u64 func, union acpi_object *argv4)
 {
 	union acpi_object *obj = ERR_PTR(-ENXIO);
@@ -379,11 +379,11 @@ union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
 	rcu_read_lock();
 	ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
 	if (ops)
-		obj = ops->evaluate_dsm(handle, uuid, rev, func, argv4);
+		obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
 	rcu_read_unlock();
 
 	if (IS_ERR(obj))
-		return acpi_evaluate_dsm(handle, uuid, rev, func, argv4);
+		return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
 	return obj;
 }
 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index c218717..28859da 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -1559,7 +1559,7 @@ static unsigned long nfit_ctl_handle;
 union acpi_object *result;
 
 static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
-		const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4)
+		const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
 {
 	if (handle != &nfit_ctl_handle)
 		return ERR_PTR(-ENXIO);
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index f54c003..d3d63dd 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -13,6 +13,7 @@
 #ifndef __NFIT_TEST_H__
 #define __NFIT_TEST_H__
 #include <linux/list.h>
+#include <linux/uuid.h>
 #include <linux/ioport.h>
 #include <linux/spinlock_types.h>
 
@@ -36,7 +37,8 @@ typedef void *acpi_handle;
 
 typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
 typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
-		const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4);
+		 const guid_t *guid, u64 rev, u64 func,
+		 union acpi_object *argv4);
 void __iomem *__wrap_ioremap_nocache(resource_size_t offset,
 		unsigned long size);
 void __wrap_iounmap(volatile void __iomem *addr);
diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h
index 19d0604..487cbfb 100644
--- a/tools/testing/selftests/bpf/bpf_endian.h
+++ b/tools/testing/selftests/bpf/bpf_endian.h
@@ -1,23 +1,42 @@
 #ifndef __BPF_ENDIAN__
 #define __BPF_ENDIAN__
 
-#include <asm/byteorder.h>
+#include <linux/swab.h>
 
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-# define __bpf_ntohs(x)		__builtin_bswap16(x)
-# define __bpf_htons(x)		__builtin_bswap16(x)
-#elif __BYTE_ORDER == __BIG_ENDIAN
-# define __bpf_ntohs(x)		(x)
-# define __bpf_htons(x)		(x)
+/* LLVM's BPF target selects the endianness of the CPU
+ * it compiles on, or the user specifies (bpfel/bpfeb),
+ * respectively. The used __BYTE_ORDER__ is defined by
+ * the compiler, we cannot rely on __BYTE_ORDER from
+ * libc headers, since it doesn't reflect the actual
+ * requested byte order.
+ *
+ * Note, LLVM's BPF target has different __builtin_bswapX()
+ * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE
+ * in bpfel and bpfeb case, which means below, that we map
+ * to cpu_to_be16(). We could use it unconditionally in BPF
+ * case, but better not rely on it, so that this header here
+ * can be used from application and BPF program side, which
+ * use different targets.
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define __bpf_ntohs(x)			__builtin_bswap16(x)
+# define __bpf_htons(x)			__builtin_bswap16(x)
+# define __bpf_constant_ntohs(x)	___constant_swab16(x)
+# define __bpf_constant_htons(x)	___constant_swab16(x)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define __bpf_ntohs(x)			(x)
+# define __bpf_htons(x)			(x)
+# define __bpf_constant_ntohs(x)	(x)
+# define __bpf_constant_htons(x)	(x)
 #else
-# error "Fix your __BYTE_ORDER?!"
+# error "Fix your compiler's __BYTE_ORDER__?!"
 #endif
 
 #define bpf_htons(x)				\
 	(__builtin_constant_p(x) ?		\
-	 __constant_htons(x) : __bpf_htons(x))
+	 __bpf_constant_htons(x) : __bpf_htons(x))
 #define bpf_ntohs(x)				\
 	(__builtin_constant_p(x) ?		\
-	 __constant_ntohs(x) : __bpf_ntohs(x))
+	 __bpf_constant_ntohs(x) : __bpf_ntohs(x))
 
-#endif
+#endif /* __BPF_ENDIAN__ */
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index cabb19b..0ff8c55 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -3749,6 +3749,72 @@ static struct bpf_test tests[] = {
 		.errstr = "invalid bpf_context access",
 	},
 	{
+		"leak pointer into ctx 1",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+				    offsetof(struct __sk_buff, cb[0])),
+			BPF_LD_MAP_FD(BPF_REG_2, 0),
+			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
+				      offsetof(struct __sk_buff, cb[0])),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 2 },
+		.errstr_unpriv = "R2 leaks addr into mem",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"leak pointer into ctx 2",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+				    offsetof(struct __sk_buff, cb[0])),
+			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
+				      offsetof(struct __sk_buff, cb[0])),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R10 leaks addr into mem",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"leak pointer into ctx 3",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_LD_MAP_FD(BPF_REG_2, 0),
+			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
+				      offsetof(struct __sk_buff, cb[0])),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 1 },
+		.errstr_unpriv = "R2 leaks addr into ctx",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"leak pointer into map val",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_3, 0),
+			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 4 },
+		.errstr_unpriv = "R6 leaks addr into mem",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
 		"helper access to map: full range",
 		.insns = {
 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index a676d3e..13f5198 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -305,7 +305,7 @@
 	echo "Running remote perf test $WITH DMA"
 	write_file "" $REMOTE_PERF/run
 	echo -n "  "
-	read_file $LOCAL_PERF/run
+	read_file $REMOTE_PERF/run
 	echo "  Passed"
 
 	_modprobe -r ntb_perf
diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh
index eee31e2..70fca31 100755
--- a/tools/testing/selftests/rcutorture/bin/configcheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh
@@ -27,7 +27,7 @@
 
 cat $2 | sed -e 's/\(.*\)=n/# \1 is not set/' -e 's/^#CHECK#//' |
 awk	'
-BEGIN	{
+{
 		print "if grep -q \"" $0 "\" < '"$T/.config"'";
 		print "then";
 		print "\t:";
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 00cb0db..c29f2ec 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -45,7 +45,7 @@
 trap 'rm -rf $T' 0
 mkdir $T
 
-grep -v 'CONFIG_[A-Z]*_TORTURE_TEST' < ${config_template} > $T/config
+grep -v 'CONFIG_[A-Z]*_TORTURE_TEST=' < ${config_template} > $T/config
 cat << ___EOF___ >> $T/config
 CONFIG_INITRAMFS_SOURCE="$TORTURE_INITRD"
 CONFIG_VIRTIO_PCI=y
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 3b3c1b6..50091de 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -296,10 +296,7 @@
 then
 	git status >> $resdir/$ds/testid.txt
 	git rev-parse HEAD >> $resdir/$ds/testid.txt
-	if ! git diff HEAD > $T/git-diff 2>&1
-	then
-		cp $T/git-diff $resdir/$ds
-	fi
+	git diff HEAD >> $resdir/$ds/testid.txt
 fi
 ___EOF___
 awk < $T/cfgcpu.pack \
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
index a3a1a05..6a0b9f6 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
+++ b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
@@ -9,6 +9,8 @@
 TREE09
 SRCU-N
 SRCU-P
+SRCU-t
+SRCU-u
 TINY01
 TINY02
 TASKS01
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-C.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-C.boot
new file mode 100644
index 0000000..84a7d51
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-C.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=srcud
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N
index 1a087c3..2da8b49 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N
@@ -5,4 +5,4 @@
 CONFIG_PREEMPT_NONE=y
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=n
-CONFIG_RCU_EXPERT=y
+#CHECK#CONFIG_RCU_EXPERT=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P
index 4837430..ab7ccd3 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P
@@ -2,7 +2,11 @@
 CONFIG_SMP=y
 CONFIG_NR_CPUS=8
 CONFIG_HOTPLUG_CPU=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FANOUT=2
+CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
-#CHECK#CONFIG_RCU_EXPERT=n
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t
new file mode 100644
index 0000000..6c78022
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t
@@ -0,0 +1,10 @@
+CONFIG_SMP=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TINY_SRCU=y
+CONFIG_RCU_TRACE=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+#CHECK#CONFIG_PREEMPT_COUNT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t.boot
new file mode 100644
index 0000000..238bfe3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-t.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=srcu
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u
new file mode 100644
index 0000000..6bc24e9
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u
@@ -0,0 +1,9 @@
+CONFIG_SMP=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TINY_SRCU=y
+CONFIG_RCU_TRACE=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_PREEMPT_COUNT=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot
new file mode 100644
index 0000000..84a7d51
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=srcud
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY02 b/tools/testing/selftests/rcutorture/configs/rcu/TINY02
index a59f768..d867426 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TINY02
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY02
@@ -6,10 +6,9 @@
 CONFIG_HZ_PERIODIC=y
 CONFIG_NO_HZ_IDLE=n
 CONFIG_NO_HZ_FULL=n
-CONFIG_RCU_TRACE=y
 CONFIG_PROVE_LOCKING=y
-CONFIG_PROVE_RCU_REPEATEDLY=y
 #CHECK#CONFIG_PROVE_RCU=y
 CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
-CONFIG_PREEMPT_COUNT=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
index 359cb25..b5b5397 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
@@ -10,12 +10,9 @@
 CONFIG_RCU_TRACE=y
 CONFIG_HOTPLUG_CPU=y
 CONFIG_MAXSMP=y
+CONFIG_CPUMASK_OFFSTACK=y
 CONFIG_RCU_NOCB_CPU=y
-CONFIG_RCU_NOCB_CPU_ZERO=y
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
index adc3abc..1d14e13 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
@@ -1 +1,5 @@
 rcutorture.torture_type=rcu_bh maxcpus=8
+rcutree.gp_preinit_delay=3
+rcutree.gp_init_delay=3
+rcutree.gp_cleanup_delay=3
+rcu_nocbs=0
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
index c1ab592..35e639e 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
@@ -18,9 +18,6 @@
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=n
 CONFIG_RCU_BOOST=n
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
+CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
deleted file mode 100644
index 917d251..0000000
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
+++ /dev/null
@@ -1,21 +0,0 @@
-CONFIG_SMP=y
-CONFIG_NR_CPUS=8
-CONFIG_PREEMPT_NONE=n
-CONFIG_PREEMPT_VOLUNTARY=n
-CONFIG_PREEMPT=y
-#CHECK#CONFIG_PREEMPT_RCU=y
-CONFIG_HZ_PERIODIC=n
-CONFIG_NO_HZ_IDLE=y
-CONFIG_NO_HZ_FULL=n
-CONFIG_RCU_FAST_NO_HZ=n
-CONFIG_RCU_TRACE=y
-CONFIG_HOTPLUG_CPU=n
-CONFIG_SUSPEND=n
-CONFIG_HIBERNATION=n
-CONFIG_RCU_FANOUT=3
-CONFIG_RCU_FANOUT_LEAF=3
-CONFIG_RCU_NOCB_CPU=n
-CONFIG_DEBUG_LOCK_ALLOC=y
-CONFIG_PROVE_LOCKING=n
-CONFIG_RCU_BOOST=n
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
index 3b93ee5..2dc31b1 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
@@ -14,9 +14,5 @@
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_RCU_BOOST=y
-CONFIG_RCU_KTHREAD_PRIO=2
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
index 120c0c8..5d2cc0b 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
@@ -1 +1,5 @@
 rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30
+rcutree.gp_preinit_delay=3
+rcutree.gp_init_delay=3
+rcutree.gp_cleanup_delay=3
+rcutree.kthread_prio=2
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
index 5af758e..27d2269 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
@@ -15,11 +15,7 @@
 CONFIG_HIBERNATION=n
 CONFIG_RCU_FANOUT=4
 CONFIG_RCU_FANOUT_LEAF=3
-CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
 CONFIG_RCU_EQS_DEBUG=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
index d4cdc0d..2dde0d9 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
@@ -13,12 +13,8 @@
 CONFIG_RCU_FANOUT=6
 CONFIG_RCU_FANOUT_LEAF=6
 CONFIG_RCU_NOCB_CPU=y
-CONFIG_RCU_NOCB_CPU_NONE=y
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
 #CHECK#CONFIG_PROVE_RCU=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
index 15b3e1a..c7fd050 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
@@ -1,2 +1,5 @@
 rcutorture.torture_type=sched
 rcupdate.rcu_self_test_sched=1
+rcutree.gp_preinit_delay=3
+rcutree.gp_init_delay=3
+rcutree.gp_cleanup_delay=3
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
index 4cb02bd..05a4eec 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
@@ -18,8 +18,6 @@
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
 #CHECK#CONFIG_PROVE_RCU=y
+CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RCU_EXPERT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
index dd90f28..ad18b52 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
@@ -2,3 +2,6 @@
 rcupdate.rcu_self_test_bh=1
 rcupdate.rcu_self_test_sched=1
 rcutree.rcu_fanout_exact=1
+rcutree.gp_preinit_delay=3
+rcutree.gp_init_delay=3
+rcutree.gp_cleanup_delay=3
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
index b12a3ea..0f4759f 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
@@ -1,6 +1,5 @@
 CONFIG_SMP=y
 CONFIG_NR_CPUS=16
-CONFIG_CPUMASK_OFFSTACK=y
 CONFIG_PREEMPT_NONE=y
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=n
@@ -9,16 +8,11 @@
 CONFIG_NO_HZ_IDLE=n
 CONFIG_NO_HZ_FULL=y
 CONFIG_NO_HZ_FULL_ALL=n
-CONFIG_NO_HZ_FULL_SYSIDLE=y
 CONFIG_RCU_FAST_NO_HZ=n
 CONFIG_RCU_TRACE=y
 CONFIG_HOTPLUG_CPU=y
 CONFIG_RCU_FANOUT=2
 CONFIG_RCU_FANOUT_LEAF=2
-CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
index 099cc63..fb1c763 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
@@ -15,7 +15,6 @@
 CONFIG_RCU_FANOUT=3
 CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_NOCB_CPU=y
-CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_PROVE_LOCKING=n
 CONFIG_RCU_BOOST=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
deleted file mode 100644
index 2ad13f0..0000000
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
+++ /dev/null
@@ -1,21 +0,0 @@
-CONFIG_SMP=y
-CONFIG_NR_CPUS=16
-CONFIG_PREEMPT_NONE=n
-CONFIG_PREEMPT_VOLUNTARY=n
-CONFIG_PREEMPT=y
-#CHECK#CONFIG_PREEMPT_RCU=y
-CONFIG_HZ_PERIODIC=n
-CONFIG_NO_HZ_IDLE=y
-CONFIG_NO_HZ_FULL=n
-CONFIG_RCU_FAST_NO_HZ=n
-CONFIG_RCU_TRACE=y
-CONFIG_HOTPLUG_CPU=n
-CONFIG_SUSPEND=n
-CONFIG_HIBERNATION=n
-CONFIG_RCU_FANOUT=3
-CONFIG_RCU_FANOUT_LEAF=2
-CONFIG_RCU_NOCB_CPU=y
-CONFIG_RCU_NOCB_CPU_ALL=y
-CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_RCU_BOOST=n
-CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
index fb066dc..1bd8efc 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
@@ -2,3 +2,4 @@
 rcupdate.rcu_self_test=1
 rcupdate.rcu_self_test_sched=1
 rcutree.rcu_fanout_exact=1
+rcu_nocbs=0-7
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/TINY b/tools/testing/selftests/rcutorture/configs/rcuperf/TINY
new file mode 100644
index 0000000..fb05ef5
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/TINY
@@ -0,0 +1,16 @@
+CONFIG_SMP=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+#CHECK#CONFIG_TINY_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_LOCKING=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TRACE=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/TREE b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE
index a312f67..721cfda 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuperf/TREE
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE
@@ -7,7 +7,6 @@
 CONFIG_NO_HZ_IDLE=y
 CONFIG_NO_HZ_FULL=n
 CONFIG_RCU_FAST_NO_HZ=n
-CONFIG_RCU_TRACE=n
 CONFIG_HOTPLUG_CPU=n
 CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/TREE54 b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE54
index 985fb17..7629f5d 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuperf/TREE54
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE54
@@ -8,7 +8,6 @@
 CONFIG_NO_HZ_IDLE=y
 CONFIG_NO_HZ_FULL=n
 CONFIG_RCU_FAST_NO_HZ=n
-CONFIG_RCU_TRACE=n
 CONFIG_HOTPLUG_CPU=n
 CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
diff --git a/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt b/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt
index 24396ae..a75b169 100644
--- a/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt
+++ b/tools/testing/selftests/rcutorture/doc/TINY_RCU.txt
@@ -18,7 +18,6 @@
 
 	In common code tested by TREE_RCU test cases.
 
-CONFIG_NO_HZ_FULL_SYSIDLE
 CONFIG_RCU_NOCB_CPU
 
 	Meaningless for TINY_RCU.
diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
index 364801b..9ad3f89 100644
--- a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
+++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
@@ -9,28 +9,20 @@
 CONFIG_HOTPLUG_CPU -- Do half.  (Every second.)
 CONFIG_HZ_PERIODIC -- Do one.
 CONFIG_NO_HZ_IDLE -- Do those not otherwise specified. (Groups of two.)
-CONFIG_NO_HZ_FULL -- Do two, one with CONFIG_NO_HZ_FULL_SYSIDLE.
-CONFIG_NO_HZ_FULL_SYSIDLE -- Do one.
+CONFIG_NO_HZ_FULL -- Do two, one with partial CPU enablement.
 CONFIG_PREEMPT -- Do half.  (First three and #8.)
 CONFIG_PROVE_LOCKING -- Do several, covering CONFIG_DEBUG_LOCK_ALLOC=y and not.
 CONFIG_PROVE_RCU -- Hardwired to CONFIG_PROVE_LOCKING.
-CONFIG_PROVE_RCU_REPEATEDLY -- Do one.
 CONFIG_RCU_BOOST -- one of PREEMPT_RCU.
-CONFIG_RCU_KTHREAD_PRIO -- set to 2 for _BOOST testing.
 CONFIG_RCU_FANOUT -- Cover hierarchy, but overlap with others.
 CONFIG_RCU_FANOUT_LEAF -- Do one non-default.
-CONFIG_RCU_FAST_NO_HZ -- Do one, but not with CONFIG_RCU_NOCB_CPU_ALL.
-CONFIG_RCU_NOCB_CPU -- Do three, see below.
-CONFIG_RCU_NOCB_CPU_ALL -- Do one.
-CONFIG_RCU_NOCB_CPU_NONE -- Do one.
-CONFIG_RCU_NOCB_CPU_ZERO -- Do one.
+CONFIG_RCU_FAST_NO_HZ -- Do one, but not with all nohz_full CPUs.
+CONFIG_RCU_NOCB_CPU -- Do three, one with no rcu_nocbs CPUs, one with
+	rcu_nocbs=0, and one with all rcu_nocbs CPUs.
 CONFIG_RCU_TRACE -- Do half.
 CONFIG_SMP -- Need one !SMP for PREEMPT_RCU.
 CONFIG_RCU_EXPERT=n -- Do a few, but these have to be vanilla configurations.
 CONFIG_RCU_EQS_DEBUG -- Do at least one for CONFIG_NO_HZ_FULL and not.
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP -- Do for all but a couple TREE scenarios.
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT -- Do for all but a couple TREE scenarios.
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT -- Do for all but a couple TREE scenarios.
 
 RCU-bh: Do one with PREEMPT and one with !PREEMPT.
 RCU-sched: Do one with PREEMPT but not BOOST.
@@ -52,10 +44,6 @@
 
 	Used only to check CONFIG_RCU_FANOUT value, inspection suffices.
 
-CONFIG_NO_HZ_FULL_SYSIDLE_SMALL
-
-	Defer until Frederic uses this.
-
 CONFIG_PREEMPT_COUNT
 CONFIG_PREEMPT_RCU
 
@@ -78,30 +66,16 @@
 
 	Always used in KVM testing.
 
-CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY
-CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY
-CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY
-
-	Inspection suffices, ignore.
-
 CONFIG_PREEMPT_RCU
 CONFIG_TREE_RCU
 CONFIG_TINY_RCU
 
 	These are controlled by CONFIG_PREEMPT and/or CONFIG_SMP.
 
-CONFIG_SPARSE_RCU_POINTER
-
-	Makes sense only for sparse runs, not for kernel builds.
-
 CONFIG_SRCU
 CONFIG_TASKS_RCU
 
 	Selected by CONFIG_RCU_TORTURE_TEST, so cannot disable.
 
-CONFIG_RCU_TRACE
-
-	Implied by CONFIG_RCU_TRACE for Tree RCU.
-
 
 boot parameters ignored: TBD
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
index 8ff8904..c9e8bc5 100755
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
@@ -1,4 +1,4 @@
-#!/bin/awk -f
+#!/usr/bin/awk -f
 
 # Modify SRCU for formal verification. The first argument should be srcu.h and
 # the second should be srcu.c. Outputs modified srcu.h and srcu.c into the
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 03f1fa4..00a928b 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1822,6 +1822,23 @@ struct tsync_sibling {
 	struct __test_metadata *metadata;
 };
 
+/*
+ * To avoid joining joined threads (which is not allowed by Bionic),
+ * make sure we both successfully join and clear the tid to skip a
+ * later join attempt during fixture teardown. Any remaining threads
+ * will be directly killed during teardown.
+ */
+#define PTHREAD_JOIN(tid, status)					\
+	do {								\
+		int _rc = pthread_join(tid, status);			\
+		if (_rc) {						\
+			TH_LOG("pthread_join of tid %u failed: %d\n",	\
+				(unsigned int)tid, _rc);		\
+		} else {						\
+			tid = 0;					\
+		}							\
+	} while (0)
+
 FIXTURE_DATA(TSYNC) {
 	struct sock_fprog root_prog, apply_prog;
 	struct tsync_sibling sibling[TSYNC_SIBLINGS];
@@ -1890,14 +1907,14 @@ FIXTURE_TEARDOWN(TSYNC)
 
 	for ( ; sib < self->sibling_count; ++sib) {
 		struct tsync_sibling *s = &self->sibling[sib];
-		void *status;
 
 		if (!s->tid)
 			continue;
-		if (pthread_kill(s->tid, 0)) {
-			pthread_cancel(s->tid);
-			pthread_join(s->tid, &status);
-		}
+		/*
+		 * If a thread is still running, it may be stuck, so hit
+		 * it over the head really hard.
+		 */
+		pthread_kill(s->tid, 9);
 	}
 	pthread_mutex_destroy(&self->mutex);
 	pthread_cond_destroy(&self->cond);
@@ -1987,9 +2004,9 @@ TEST_F(TSYNC, siblings_fail_prctl)
 	pthread_mutex_unlock(&self->mutex);
 
 	/* Ensure diverging sibling failed to call prctl. */
-	pthread_join(self->sibling[0].tid, &status);
+	PTHREAD_JOIN(self->sibling[0].tid, &status);
 	EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
-	pthread_join(self->sibling[1].tid, &status);
+	PTHREAD_JOIN(self->sibling[1].tid, &status);
 	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
 }
 
@@ -2029,9 +2046,9 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
 	}
 	pthread_mutex_unlock(&self->mutex);
 	/* Ensure they are both killed and don't exit cleanly. */
-	pthread_join(self->sibling[0].tid, &status);
+	PTHREAD_JOIN(self->sibling[0].tid, &status);
 	EXPECT_EQ(0x0, (long)status);
-	pthread_join(self->sibling[1].tid, &status);
+	PTHREAD_JOIN(self->sibling[1].tid, &status);
 	EXPECT_EQ(0x0, (long)status);
 }
 
@@ -2055,9 +2072,9 @@ TEST_F(TSYNC, two_sibling_want_nnp)
 	pthread_mutex_unlock(&self->mutex);
 
 	/* Ensure they are both upset about lacking nnp. */
-	pthread_join(self->sibling[0].tid, &status);
+	PTHREAD_JOIN(self->sibling[0].tid, &status);
 	EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
-	pthread_join(self->sibling[1].tid, &status);
+	PTHREAD_JOIN(self->sibling[1].tid, &status);
 	EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
 }
 
@@ -2095,9 +2112,9 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
 	pthread_mutex_unlock(&self->mutex);
 
 	/* Ensure they are both killed and don't exit cleanly. */
-	pthread_join(self->sibling[0].tid, &status);
+	PTHREAD_JOIN(self->sibling[0].tid, &status);
 	EXPECT_EQ(0x0, (long)status);
-	pthread_join(self->sibling[1].tid, &status);
+	PTHREAD_JOIN(self->sibling[1].tid, &status);
 	EXPECT_EQ(0x0, (long)status);
 }
 
@@ -2140,9 +2157,9 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
 	pthread_mutex_unlock(&self->mutex);
 
 	/* Ensure they are both unkilled. */
-	pthread_join(self->sibling[0].tid, &status);
+	PTHREAD_JOIN(self->sibling[0].tid, &status);
 	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
-	pthread_join(self->sibling[1].tid, &status);
+	PTHREAD_JOIN(self->sibling[1].tid, &status);
 	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
 }
 
@@ -2199,7 +2216,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
 		TH_LOG("cond broadcast non-zero");
 	}
 	pthread_mutex_unlock(&self->mutex);
-	pthread_join(self->sibling[sib].tid, &status);
+	PTHREAD_JOIN(self->sibling[sib].tid, &status);
 	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
 	/* Poll for actual task death. pthread_join doesn't guarantee it. */
 	while (!kill(self->sibling[sib].system_tid, 0))
@@ -2224,7 +2241,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
 		TH_LOG("cond broadcast non-zero");
 	}
 	pthread_mutex_unlock(&self->mutex);
-	pthread_join(self->sibling[sib].tid, &status);
+	PTHREAD_JOIN(self->sibling[sib].tid, &status);
 	EXPECT_EQ(0, (long)status);
 	/* Poll for actual task death. pthread_join doesn't guarantee it. */
 	while (!kill(self->sibling[sib].system_tid, 0))
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index 5fa1d7e9..5801bbe 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,6 +1,6 @@
 BUILD_FLAGS = -DKTEST
 CFLAGS += -O3 -Wl,-no-as-needed -Wall $(BUILD_FLAGS)
-LDFLAGS += -lrt -lpthread
+LDFLAGS += -lrt -lpthread -lm
 
 # these are all "safe" tests that don't modify
 # system time or require escalated privileges
@@ -8,7 +8,7 @@
 	     inconsistency-check raw_skew threadtest rtctest
 
 TEST_GEN_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \
-		      skew_consistency clocksource-switch leap-a-day \
+		      skew_consistency clocksource-switch freq-step leap-a-day \
 		      leapcrash set-tai set-2038 set-tz
 
 
@@ -24,6 +24,7 @@
 	./change_skew
 	./skew_consistency
 	./clocksource-switch
+	./freq-step
 	./leap-a-day -s -i 10
 	./leapcrash
 	./set-tz
diff --git a/tools/testing/selftests/timers/freq-step.c b/tools/testing/selftests/timers/freq-step.c
new file mode 100644
index 0000000..e8c6183
--- /dev/null
+++ b/tools/testing/selftests/timers/freq-step.c
@@ -0,0 +1,268 @@
+/*
+ * This test checks the response of the system clock to frequency
+ * steps made with adjtimex(). The frequency error and stability of
+ * the CLOCK_MONOTONIC clock relative to the CLOCK_MONOTONIC_RAW clock
+ * is measured in two intervals following the step. The test fails if
+ * values from the second interval exceed specified limits.
+ *
+ * Copyright (C) Miroslav Lichvar <mlichvar@redhat.com>  2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <math.h>
+#include <stdio.h>
+#include <sys/timex.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "../kselftest.h"
+
+#define SAMPLES 100
+#define SAMPLE_READINGS 10
+#define MEAN_SAMPLE_INTERVAL 0.1
+#define STEP_INTERVAL 1.0
+#define MAX_PRECISION 100e-9
+#define MAX_FREQ_ERROR 10e-6
+#define MAX_STDDEV 1000e-9
+
+struct sample {
+	double offset;
+	double time;
+};
+
+static time_t mono_raw_base;
+static time_t mono_base;
+static long user_hz;
+static double precision;
+static double mono_freq_offset;
+
+static double diff_timespec(struct timespec *ts1, struct timespec *ts2)
+{
+	return ts1->tv_sec - ts2->tv_sec + (ts1->tv_nsec - ts2->tv_nsec) / 1e9;
+}
+
+static double get_sample(struct sample *sample)
+{
+	double delay, mindelay = 0.0;
+	struct timespec ts1, ts2, ts3;
+	int i;
+
+	for (i = 0; i < SAMPLE_READINGS; i++) {
+		clock_gettime(CLOCK_MONOTONIC_RAW, &ts1);
+		clock_gettime(CLOCK_MONOTONIC, &ts2);
+		clock_gettime(CLOCK_MONOTONIC_RAW, &ts3);
+
+		ts1.tv_sec -= mono_raw_base;
+		ts2.tv_sec -= mono_base;
+		ts3.tv_sec -= mono_raw_base;
+
+		delay = diff_timespec(&ts3, &ts1);
+		if (delay <= 1e-9) {
+			i--;
+			continue;
+		}
+
+		if (!i || delay < mindelay) {
+			sample->offset = diff_timespec(&ts2, &ts1);
+			sample->offset -= delay / 2.0;
+			sample->time = ts1.tv_sec + ts1.tv_nsec / 1e9;
+			mindelay = delay;
+		}
+	}
+
+	return mindelay;
+}
+
+static void reset_ntp_error(void)
+{
+	struct timex txc;
+
+	txc.modes = ADJ_SETOFFSET;
+	txc.time.tv_sec = 0;
+	txc.time.tv_usec = 0;
+
+	if (adjtimex(&txc) < 0) {
+		perror("[FAIL] adjtimex");
+		ksft_exit_fail();
+	}
+}
+
+static void set_frequency(double freq)
+{
+	struct timex txc;
+	int tick_offset;
+
+	tick_offset = 1e6 * freq / user_hz;
+
+	txc.modes = ADJ_TICK | ADJ_FREQUENCY;
+	txc.tick = 1000000 / user_hz + tick_offset;
+	txc.freq = (1e6 * freq - user_hz * tick_offset) * (1 << 16);
+
+	if (adjtimex(&txc) < 0) {
+		perror("[FAIL] adjtimex");
+		ksft_exit_fail();
+	}
+}
+
+static void regress(struct sample *samples, int n, double *intercept,
+		    double *slope, double *r_stddev, double *r_max)
+{
+	double x, y, r, x_sum, y_sum, xy_sum, x2_sum, r2_sum;
+	int i;
+
+	x_sum = 0.0, y_sum = 0.0, xy_sum = 0.0, x2_sum = 0.0;
+
+	for (i = 0; i < n; i++) {
+		x = samples[i].time;
+		y = samples[i].offset;
+
+		x_sum += x;
+		y_sum += y;
+		xy_sum += x * y;
+		x2_sum += x * x;
+	}
+
+	*slope = (xy_sum - x_sum * y_sum / n) / (x2_sum - x_sum * x_sum / n);
+	*intercept = (y_sum - *slope * x_sum) / n;
+
+	*r_max = 0.0, r2_sum = 0.0;
+
+	for (i = 0; i < n; i++) {
+		x = samples[i].time;
+		y = samples[i].offset;
+		r = fabs(x * *slope + *intercept - y);
+		if (*r_max < r)
+			*r_max = r;
+		r2_sum += r * r;
+	}
+
+	*r_stddev = sqrt(r2_sum / n);
+}
+
+static int run_test(int calibration, double freq_base, double freq_step)
+{
+	struct sample samples[SAMPLES];
+	double intercept, slope, stddev1, max1, stddev2, max2;
+	double freq_error1, freq_error2;
+	int i;
+
+	set_frequency(freq_base);
+
+	for (i = 0; i < 10; i++)
+		usleep(1e6 * MEAN_SAMPLE_INTERVAL / 10);
+
+	reset_ntp_error();
+
+	set_frequency(freq_base + freq_step);
+
+	for (i = 0; i < 10; i++)
+		usleep(rand() % 2000000 * STEP_INTERVAL / 10);
+
+	set_frequency(freq_base);
+
+	for (i = 0; i < SAMPLES; i++) {
+		usleep(rand() % 2000000 * MEAN_SAMPLE_INTERVAL);
+		get_sample(&samples[i]);
+	}
+
+	if (calibration) {
+		regress(samples, SAMPLES, &intercept, &slope, &stddev1, &max1);
+		mono_freq_offset = slope;
+		printf("CLOCK_MONOTONIC_RAW frequency offset: %11.3f ppm\n",
+		       1e6 * mono_freq_offset);
+		return 0;
+	}
+
+	regress(samples, SAMPLES / 2, &intercept, &slope, &stddev1, &max1);
+	freq_error1 = slope * (1.0 - mono_freq_offset) - mono_freq_offset -
+			freq_base;
+
+	regress(samples + SAMPLES / 2, SAMPLES / 2, &intercept, &slope,
+		&stddev2, &max2);
+	freq_error2 = slope * (1.0 - mono_freq_offset) - mono_freq_offset -
+			freq_base;
+
+	printf("%6.0f %+10.3f %6.0f %7.0f %+10.3f %6.0f %7.0f\t",
+	       1e6 * freq_step,
+	       1e6 * freq_error1, 1e9 * stddev1, 1e9 * max1,
+	       1e6 * freq_error2, 1e9 * stddev2, 1e9 * max2);
+
+	if (fabs(freq_error2) > MAX_FREQ_ERROR || stddev2 > MAX_STDDEV) {
+		printf("[FAIL]\n");
+		return 1;
+	}
+
+	printf("[OK]\n");
+	return 0;
+}
+
+static void init_test(void)
+{
+	struct timespec ts;
+	struct sample sample;
+
+	if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts)) {
+		perror("[FAIL] clock_gettime(CLOCK_MONOTONIC_RAW)");
+		ksft_exit_fail();
+	}
+
+	mono_raw_base = ts.tv_sec;
+
+	if (clock_gettime(CLOCK_MONOTONIC, &ts)) {
+		perror("[FAIL] clock_gettime(CLOCK_MONOTONIC)");
+		ksft_exit_fail();
+	}
+
+	mono_base = ts.tv_sec;
+
+	user_hz = sysconf(_SC_CLK_TCK);
+
+	precision = get_sample(&sample) / 2.0;
+	printf("CLOCK_MONOTONIC_RAW+CLOCK_MONOTONIC precision: %.0f ns\t\t",
+	       1e9 * precision);
+
+	if (precision > MAX_PRECISION) {
+		printf("[SKIP]\n");
+		ksft_exit_skip();
+	}
+
+	printf("[OK]\n");
+	srand(ts.tv_sec ^ ts.tv_nsec);
+
+	run_test(1, 0.0, 0.0);
+}
+
+int main(int argc, char **argv)
+{
+	double freq_base, freq_step;
+	int i, j, fails = 0;
+
+	init_test();
+
+	printf("Checking response to frequency step:\n");
+	printf("  Step           1st interval              2nd interval\n");
+	printf("             Freq    Dev     Max       Freq    Dev     Max\n");
+
+	for (i = 2; i >= 0; i--) {
+		for (j = 0; j < 5; j++) {
+			freq_base = (rand() % (1 << 24) - (1 << 23)) / 65536e6;
+			freq_step = 10e-6 * (1 << (6 * i));
+			fails += run_test(0, freq_base, freq_step);
+		}
+	}
+
+	set_frequency(0.0);
+
+	if (fails)
+		ksft_exit_fail();
+
+	ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/timers/inconsistency-check.c b/tools/testing/selftests/timers/inconsistency-check.c
index caf1bc9..74c60e8 100644
--- a/tools/testing/selftests/timers/inconsistency-check.c
+++ b/tools/testing/selftests/timers/inconsistency-check.c
@@ -118,7 +118,7 @@ int consistency_test(int clock_type, unsigned long seconds)
 	start_str = ctime(&t);
 
 	while (seconds == -1 || now - then < seconds) {
-		inconsistent = 0;
+		inconsistent = -1;
 
 		/* Fill list */
 		for (i = 0; i < CALLS_PER_LOOP; i++)
@@ -130,7 +130,7 @@ int consistency_test(int clock_type, unsigned long seconds)
 				inconsistent = i;
 
 		/* display inconsistency */
-		if (inconsistent) {
+		if (inconsistent >= 0) {
 			unsigned long long delta;
 
 			printf("\%s\n", start_str);
diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c
index 0692d99..2d89b5f 100644
--- a/tools/usb/testusb.c
+++ b/tools/usb/testusb.c
@@ -387,15 +387,17 @@ int main (int argc, char **argv)
 	/* pick defaults that works with all speeds, without short packets.
 	 *
 	 * Best per-frame data rates:
-	 *     high speed, bulk       512 * 13 * 8 = 53248
-	 *                 interrupt 1024 *  3 * 8 = 24576
-	 *     full speed, bulk/intr   64 * 19     =  1216
-	 *                 interrupt   64 *  1     =    64
-	 *      low speed, interrupt    8 *  1     =     8
+	 *     super speed,bulk      1024 * 16 * 8 = 131072
+	 *                 interrupt 1024 *  3 * 8 =  24576
+	 *     high speed, bulk       512 * 13 * 8 =  53248
+	 *                 interrupt 1024 *  3 * 8 =  24576
+	 *     full speed, bulk/intr   64 * 19     =   1216
+	 *                 interrupt   64 *  1     =     64
+	 *      low speed, interrupt    8 *  1     =      8
 	 */
 	param.iterations = 1000;
 	param.length = 1024;
-	param.vary = 512;
+	param.vary = 1024;
 	param.sglen = 32;
 
 	/* for easy use when hotplugging */
@@ -457,7 +459,7 @@ int main (int argc, char **argv)
 			"\t-c iterations		default 1000\n"
 			"\t-s transfer length	default 1024\n"
 			"\t-g sglen		default 32\n"
-			"\t-v vary			default 512\n",
+			"\t-v vary			default 1024\n",
 			argv[0]);
 		return 1;
 	}
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index f659c14..9bd2cd7 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -7,6 +7,7 @@
 #include <limits.h>
 #include <netdb.h>
 #include <libudev.h>
+#include <dirent.h>
 #include "sysfs_utils.h"
 
 #undef  PROGNAME
@@ -35,18 +36,11 @@ imported_device_init(struct usbip_imported_device *idev, char *busid)
 	return NULL;
 }
 
-
-
 static int parse_status(const char *value)
 {
 	int ret = 0;
 	char *c;
 
-
-	for (int i = 0; i < vhci_driver->nports; i++)
-		memset(&vhci_driver->idev[i], 0, sizeof(vhci_driver->idev[i]));
-
-
 	/* skip a header line */
 	c = strchr(value, '\n');
 	if (!c)
@@ -57,9 +51,11 @@ static int parse_status(const char *value)
 		int port, status, speed, devid;
 		unsigned long socket;
 		char lbusid[SYSFS_BUS_ID_SIZE];
+		struct usbip_imported_device *idev;
+		char hub[3];
 
-		ret = sscanf(c, "%d %d %d %x %lx %31s\n",
-				&port, &status, &speed,
+		ret = sscanf(c, "%2s  %d %d %d %x %lx %31s\n",
+				hub, &port, &status, &speed,
 				&devid, &socket, lbusid);
 
 		if (ret < 5) {
@@ -67,34 +63,36 @@ static int parse_status(const char *value)
 			BUG();
 		}
 
-		dbg("port %d status %d speed %d devid %x",
-				port, status, speed, devid);
+		dbg("hub %s port %d status %d speed %d devid %x",
+				hub, port, status, speed, devid);
 		dbg("socket %lx lbusid %s", socket, lbusid);
 
-
 		/* if a device is connected, look at it */
-		{
-			struct usbip_imported_device *idev = &vhci_driver->idev[port];
+		idev = &vhci_driver->idev[port];
+		memset(idev, 0, sizeof(*idev));
 
-			idev->port	= port;
-			idev->status	= status;
+		if (strncmp("hs", hub, 2) == 0)
+			idev->hub = HUB_SPEED_HIGH;
+		else /* strncmp("ss", hub, 2) == 0 */
+			idev->hub = HUB_SPEED_SUPER;
 
-			idev->devid	= devid;
+		idev->port	= port;
+		idev->status	= status;
 
-			idev->busnum	= (devid >> 16);
-			idev->devnum	= (devid & 0x0000ffff);
+		idev->devid	= devid;
 
-			if (idev->status != VDEV_ST_NULL
-			    && idev->status != VDEV_ST_NOTASSIGNED) {
-				idev = imported_device_init(idev, lbusid);
-				if (!idev) {
-					dbg("imported_device_init failed");
-					return -1;
-				}
+		idev->busnum	= (devid >> 16);
+		idev->devnum	= (devid & 0x0000ffff);
+
+		if (idev->status != VDEV_ST_NULL
+		    && idev->status != VDEV_ST_NOTASSIGNED) {
+			idev = imported_device_init(idev, lbusid);
+			if (!idev) {
+				dbg("imported_device_init failed");
+				return -1;
 			}
 		}
 
-
 		/* go to the next line */
 		c = strchr(c, '\n');
 		if (!c)
@@ -107,18 +105,33 @@ static int parse_status(const char *value)
 	return 0;
 }
 
+#define MAX_STATUS_NAME 16
+
 static int refresh_imported_device_list(void)
 {
 	const char *attr_status;
+	char status[MAX_STATUS_NAME+1] = "status";
+	int i, ret;
 
-	attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device,
-					       "status");
-	if (!attr_status) {
-		err("udev_device_get_sysattr_value failed");
-		return -1;
+	for (i = 0; i < vhci_driver->ncontrollers; i++) {
+		if (i > 0)
+			snprintf(status, sizeof(status), "status.%d", i);
+
+		attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device,
+							    status);
+		if (!attr_status) {
+			err("udev_device_get_sysattr_value failed");
+			return -1;
+		}
+
+		dbg("controller %d", i);
+
+		ret = parse_status(attr_status);
+		if (ret != 0)
+			return ret;
 	}
 
-	return parse_status(attr_status);
+	return 0;
 }
 
 static int get_nports(void)
@@ -134,6 +147,33 @@ static int get_nports(void)
 	return (int)strtoul(attr_nports, NULL, 10);
 }
 
+static int vhci_hcd_filter(const struct dirent *dirent)
+{
+	return strcmp(dirent->d_name, "vhci_hcd") >= 0;
+}
+
+static int get_ncontrollers(void)
+{
+	struct dirent **namelist;
+	struct udev_device *platform;
+	int n;
+
+	platform = udev_device_get_parent(vhci_driver->hc_device);
+	if (platform == NULL)
+		return -1;
+
+	n = scandir(udev_device_get_syspath(platform), &namelist, vhci_hcd_filter, NULL);
+	if (n < 0)
+		err("scandir failed");
+	else {
+		for (int i = 0; i < n; i++)
+			free(namelist[i]);
+		free(namelist);
+	}
+
+	return n;
+}
+
 /*
  * Read the given port's record.
  *
@@ -213,16 +253,31 @@ int usbip_vhci_driver_open(void)
 	vhci_driver->hc_device =
 		udev_device_new_from_subsystem_sysname(udev_context,
 						       USBIP_VHCI_BUS_TYPE,
-						       USBIP_VHCI_DRV_NAME);
+						       USBIP_VHCI_DEVICE_NAME);
 	if (!vhci_driver->hc_device) {
 		err("udev_device_new_from_subsystem_sysname failed");
 		goto err;
 	}
 
 	vhci_driver->nports = get_nports();
-
 	dbg("available ports: %d", vhci_driver->nports);
 
+	if (vhci_driver->nports <= 0) {
+		err("no available ports");
+		goto err;
+	} else if (vhci_driver->nports > MAXNPORT) {
+		err("port number exceeds %d", MAXNPORT);
+		goto err;
+	}
+
+	vhci_driver->ncontrollers = get_ncontrollers();
+	dbg("available controllers: %d", vhci_driver->ncontrollers);
+
+	if (vhci_driver->ncontrollers <=0) {
+		err("no available usb controllers");
+		goto err;
+	}
+
 	if (refresh_imported_device_list())
 		goto err;
 
@@ -270,11 +325,15 @@ int usbip_vhci_refresh_device_list(void)
 }
 
 
-int usbip_vhci_get_free_port(void)
+int usbip_vhci_get_free_port(uint32_t speed)
 {
 	for (int i = 0; i < vhci_driver->nports; i++) {
+		if (speed == USB_SPEED_SUPER &&
+		    vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
+			continue;
+
 		if (vhci_driver->idev[i].status == VDEV_ST_NULL)
-			return i;
+			return vhci_driver->idev[i].port;
 	}
 
 	return -1;
diff --git a/tools/usb/usbip/libsrc/vhci_driver.h b/tools/usb/usbip/libsrc/vhci_driver.h
index fa2316c..4898d3b 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.h
+++ b/tools/usb/usbip/libsrc/vhci_driver.h
@@ -11,9 +11,16 @@
 #include "usbip_common.h"
 
 #define USBIP_VHCI_BUS_TYPE "platform"
+#define USBIP_VHCI_DEVICE_NAME "vhci_hcd.0"
 #define MAXNPORT 128
 
+enum hub_speed {
+	HUB_SPEED_HIGH = 0,
+	HUB_SPEED_SUPER,
+};
+
 struct usbip_imported_device {
+	enum hub_speed hub;
 	uint8_t port;
 	uint32_t status;
 
@@ -31,6 +38,7 @@ struct usbip_vhci_driver {
 	/* /sys/devices/platform/vhci_hcd */
 	struct udev_device *hc_device;
 
+	int ncontrollers;
 	int nports;
 	struct usbip_imported_device idev[MAXNPORT];
 };
@@ -44,7 +52,7 @@ void usbip_vhci_driver_close(void);
 int  usbip_vhci_refresh_device_list(void);
 
 
-int usbip_vhci_get_free_port(void);
+int usbip_vhci_get_free_port(uint32_t speed);
 int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid,
 		uint32_t speed);
 
diff --git a/tools/usb/usbip/src/usbip_attach.c b/tools/usb/usbip/src/usbip_attach.c
index 70a6b50..6e89768 100644
--- a/tools/usb/usbip/src/usbip_attach.c
+++ b/tools/usb/usbip/src/usbip_attach.c
@@ -94,6 +94,7 @@ static int import_device(int sockfd, struct usbip_usb_device *udev)
 {
 	int rc;
 	int port;
+	uint32_t speed = udev->speed;
 
 	rc = usbip_vhci_driver_open();
 	if (rc < 0) {
@@ -101,13 +102,15 @@ static int import_device(int sockfd, struct usbip_usb_device *udev)
 		return -1;
 	}
 
-	port = usbip_vhci_get_free_port();
+	port = usbip_vhci_get_free_port(speed);
 	if (port < 0) {
 		err("no free port");
 		usbip_vhci_driver_close();
 		return -1;
 	}
 
+	dbg("got free port %d", port);
+
 	rc = usbip_vhci_attach_device(port, sockfd, udev->busnum,
 				      udev->devnum, udev->speed);
 	if (rc < 0) {
diff --git a/usr/Kconfig b/usr/Kconfig
index c0c4850..ad0543e 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -220,6 +220,7 @@
 endchoice
 
 config INITRAMFS_COMPRESSION
+	depends on INITRAMFS_SOURCE!=""
 	string
 	default ""      if INITRAMFS_COMPRESSION_NONE
 	default ".gz"   if INITRAMFS_COMPRESSION_GZIP
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 32c3295..8794036 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -22,7 +22,7 @@
 #include <asm/kvm_hyp.h>
 
 #define vtr_to_max_lr_idx(v)		((v) & 0xf)
-#define vtr_to_nr_pre_bits(v)		(((u32)(v) >> 26) + 1)
+#define vtr_to_nr_pre_bits(v)		((((u32)(v) >> 26) & 7) + 1)
 
 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
 {
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index a2d6324..e2e5eff 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -879,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
 	pmd_t *pmd;
 
 	pud = stage2_get_pud(kvm, cache, addr);
+	if (!pud)
+		return NULL;
+
 	if (stage2_pud_none(*pud)) {
 		if (!cache)
 			return NULL;
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 0a4283e..63e0bbd 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
 
 	switch (addr & 0xff) {
 	case GIC_CPU_CTRL:
-		val = vmcr.ctlr;
+		val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
+		val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
+		val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
+		val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
+		val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
+		val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
+
 		break;
 	case GIC_CPU_PRIMASK:
 		/*
@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
 
 	switch (addr & 0xff) {
 	case GIC_CPU_CTRL:
-		vmcr.ctlr = val;
+		vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
+		vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
+		vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
+		vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
+		vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
+		vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
+
 		break;
 	case GIC_CPU_PRIMASK:
 		/*
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 504b4bd..e4187e5 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -177,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
 	u32 vmcr;
 
-	vmcr  = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
+	vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
+		GICH_VMCR_ENABLE_GRP0_MASK;
+	vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
+		GICH_VMCR_ENABLE_GRP1_MASK;
+	vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
+		GICH_VMCR_ACK_CTL_MASK;
+	vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
+		GICH_VMCR_FIQ_EN_MASK;
+	vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
+		GICH_VMCR_CBPR_MASK;
+	vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
+		GICH_VMCR_EOI_MODE_MASK;
 	vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
 		GICH_VMCR_ALIAS_BINPOINT_MASK;
 	vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
@@ -195,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 
 	vmcr = cpu_if->vgic_vmcr;
 
-	vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
-			GICH_VMCR_CTRL_SHIFT;
+	vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
+		GICH_VMCR_ENABLE_GRP0_SHIFT;
+	vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
+		GICH_VMCR_ENABLE_GRP1_SHIFT;
+	vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
+		GICH_VMCR_ACK_CTL_SHIFT;
+	vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
+		GICH_VMCR_FIQ_EN_SHIFT;
+	vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
+		GICH_VMCR_CBPR_SHIFT;
+	vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
+		GICH_VMCR_EOI_MODE_SHIFT;
+
 	vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
 			GICH_VMCR_ALIAS_BINPOINT_SHIFT;
 	vmcrp->bpr  = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 6fe3f00..030248e 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -159,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 {
 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+	u32 model = vcpu->kvm->arch.vgic.vgic_model;
 	u32 vmcr;
 
-	/*
-	 * Ignore the FIQen bit, because GIC emulation always implies
-	 * SRE=1 which means the vFIQEn bit is also RES1.
-	 */
-	vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) <<
-		 ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
-	vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
+	if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+		vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
+			ICH_VMCR_ACK_CTL_MASK;
+		vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
+			ICH_VMCR_FIQ_EN_MASK;
+	} else {
+		/*
+		 * When emulating GICv3 on GICv3 with SRE=1 on the
+		 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
+		 */
+		vmcr = ICH_VMCR_FIQ_EN_MASK;
+	}
+
+	vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
+	vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
 	vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
 	vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
 	vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
@@ -180,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 {
 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+	u32 model = vcpu->kvm->arch.vgic.vgic_model;
 	u32 vmcr;
 
 	vmcr = cpu_if->vgic_vmcr;
 
-	/*
-	 * Ignore the FIQen bit, because GIC emulation always implies
-	 * SRE=1 which means the vFIQEn bit is also RES1.
-	 */
-	vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) <<
-			ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
-	vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
+	if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+		vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
+			ICH_VMCR_ACK_CTL_SHIFT;
+		vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
+			ICH_VMCR_FIQ_EN_SHIFT;
+	} else {
+		/*
+		 * When emulating GICv3 on GICv3 with SRE=1 on the
+		 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
+		 */
+		vmcrp->fiqen = 1;
+		vmcrp->ackctl = 0;
+	}
+
+	vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
+	vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
 	vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
 	vmcrp->bpr  = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
 	vmcrp->pmr  = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index da83e4c..bba7fa2 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
  * registers regardless of the hardware backed GIC used.
  */
 struct vgic_vmcr {
-	u32	ctlr;
+	u32	grpen0;
+	u32	grpen1;
+
+	u32	ackctl;
+	u32	fiqen;
+	u32	cbpr;
+	u32	eoim;
+
 	u32	abpr;
 	u32	bpr;
 	u32	pmr;  /* Priority mask field in the GICC_PMR and
 		       * ICC_PMR_EL1 priority field format */
-	/* Below member variable are valid only for GICv3 */
-	u32	grpen0;
-	u32	grpen1;
 };
 
 struct vgic_reg_attr {
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a8d5403..9120edf 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -184,7 +184,7 @@ int __attribute__((weak)) kvm_arch_set_irq_inatomic(
  * Called with wqh->lock held and interrupts disabled
  */
 static int
-irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
+irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 {
 	struct kvm_kernel_irqfd *irqfd =
 		container_of(wait, struct kvm_kernel_irqfd, wait);